changeset 0:772086c29cc7

Initial import.
author Matti Hamalainen <ccr@tnsp.org>
date Wed, 16 Nov 2016 11:16:33 +0200
parents
children c04221f4d596
files ChangeLog Makefile README VERSION bpgdec.c bpgenc.c bpgenc.h bpgview.c config.h doc/bpg_spec.txt emscripten.diff html/bpgdec.js html/bpgdec8.js html/bpgdec8a.js html/clock.bpg html/index.html html/lena512color.bpg jctvc/Makefile jctvc/TAppEncCfg.cpp jctvc/TAppEncCfg.h jctvc/TAppEncTop.cpp jctvc/TAppEncTop.h jctvc/TLibCommon/AccessUnit.h jctvc/TLibCommon/CommonDef.h jctvc/TLibCommon/ContextModel.cpp jctvc/TLibCommon/ContextModel.h jctvc/TLibCommon/ContextModel3DBuffer.cpp jctvc/TLibCommon/ContextModel3DBuffer.h jctvc/TLibCommon/ContextTables.h jctvc/TLibCommon/Debug.cpp jctvc/TLibCommon/Debug.h jctvc/TLibCommon/NAL.h jctvc/TLibCommon/SEI.cpp jctvc/TLibCommon/SEI.h jctvc/TLibCommon/TComBitCounter.h jctvc/TLibCommon/TComBitStream.cpp jctvc/TLibCommon/TComBitStream.h jctvc/TLibCommon/TComCABACTables.cpp jctvc/TLibCommon/TComCABACTables.h jctvc/TLibCommon/TComChromaFormat.cpp jctvc/TLibCommon/TComChromaFormat.h jctvc/TLibCommon/TComCodingStatistics.h jctvc/TLibCommon/TComDataCU.cpp jctvc/TLibCommon/TComDataCU.h jctvc/TLibCommon/TComInterpolationFilter.cpp jctvc/TLibCommon/TComInterpolationFilter.h jctvc/TLibCommon/TComList.h jctvc/TLibCommon/TComLoopFilter.cpp jctvc/TLibCommon/TComLoopFilter.h jctvc/TLibCommon/TComMotionInfo.cpp jctvc/TLibCommon/TComMotionInfo.h jctvc/TLibCommon/TComMv.h jctvc/TLibCommon/TComPattern.cpp jctvc/TLibCommon/TComPattern.h jctvc/TLibCommon/TComPic.cpp jctvc/TLibCommon/TComPic.h jctvc/TLibCommon/TComPicSym.cpp jctvc/TLibCommon/TComPicSym.h jctvc/TLibCommon/TComPicYuv.cpp jctvc/TLibCommon/TComPicYuv.h jctvc/TLibCommon/TComPicYuvMD5.cpp jctvc/TLibCommon/TComPrediction.cpp jctvc/TLibCommon/TComPrediction.h jctvc/TLibCommon/TComRdCost.cpp jctvc/TLibCommon/TComRdCost.h jctvc/TLibCommon/TComRdCostWeightPrediction.cpp jctvc/TLibCommon/TComRdCostWeightPrediction.h jctvc/TLibCommon/TComRectangle.h jctvc/TLibCommon/TComRom.cpp jctvc/TLibCommon/TComRom.h jctvc/TLibCommon/TComSampleAdaptiveOffset.cpp jctvc/TLibCommon/TComSampleAdaptiveOffset.h jctvc/TLibCommon/TComSlice.cpp jctvc/TLibCommon/TComSlice.h jctvc/TLibCommon/TComTU.cpp jctvc/TLibCommon/TComTU.h jctvc/TLibCommon/TComTrQuant.cpp jctvc/TLibCommon/TComTrQuant.h jctvc/TLibCommon/TComWeightPrediction.cpp jctvc/TLibCommon/TComWeightPrediction.h jctvc/TLibCommon/TComYuv.cpp jctvc/TLibCommon/TComYuv.h jctvc/TLibCommon/TypeDef.h jctvc/TLibEncoder/AnnexBwrite.h jctvc/TLibEncoder/NALwrite.cpp jctvc/TLibEncoder/NALwrite.h jctvc/TLibEncoder/SEIwrite.cpp jctvc/TLibEncoder/SEIwrite.h jctvc/TLibEncoder/SyntaxElementWriter.cpp jctvc/TLibEncoder/SyntaxElementWriter.h jctvc/TLibEncoder/TEncAnalyze.cpp jctvc/TLibEncoder/TEncAnalyze.h jctvc/TLibEncoder/TEncBinCoder.h jctvc/TLibEncoder/TEncBinCoderCABAC.cpp jctvc/TLibEncoder/TEncBinCoderCABAC.h jctvc/TLibEncoder/TEncBinCoderCABACCounter.cpp jctvc/TLibEncoder/TEncBinCoderCABACCounter.h jctvc/TLibEncoder/TEncCavlc.cpp jctvc/TLibEncoder/TEncCavlc.h jctvc/TLibEncoder/TEncCfg.h jctvc/TLibEncoder/TEncCu.cpp jctvc/TLibEncoder/TEncCu.h jctvc/TLibEncoder/TEncEntropy.cpp jctvc/TLibEncoder/TEncEntropy.h jctvc/TLibEncoder/TEncGOP.cpp jctvc/TLibEncoder/TEncGOP.h jctvc/TLibEncoder/TEncPic.cpp jctvc/TLibEncoder/TEncPic.h jctvc/TLibEncoder/TEncPreanalyzer.cpp jctvc/TLibEncoder/TEncPreanalyzer.h jctvc/TLibEncoder/TEncRateCtrl.cpp jctvc/TLibEncoder/TEncRateCtrl.h jctvc/TLibEncoder/TEncSampleAdaptiveOffset.cpp jctvc/TLibEncoder/TEncSampleAdaptiveOffset.h jctvc/TLibEncoder/TEncSbac.cpp jctvc/TLibEncoder/TEncSbac.h jctvc/TLibEncoder/TEncSearch.cpp jctvc/TLibEncoder/TEncSearch.h jctvc/TLibEncoder/TEncSlice.cpp jctvc/TLibEncoder/TEncSlice.h jctvc/TLibEncoder/TEncTop.cpp jctvc/TLibEncoder/TEncTop.h jctvc/TLibEncoder/WeightPredAnalysis.cpp jctvc/TLibEncoder/WeightPredAnalysis.h jctvc/TLibVideoIO/TVideoIOYuv.cpp jctvc/TLibVideoIO/TVideoIOYuv.h jctvc/encmain.cpp jctvc/encoder_intra_main.cfg jctvc/libmd5/MD5.h jctvc/libmd5/libmd5.c jctvc/libmd5/libmd5.h jctvc/program_options_lite.cpp jctvc/program_options_lite.h jctvc_glue.cpp libavcodec/avcodec.h libavcodec/bit_depth_template.c libavcodec/bswapdsp.h libavcodec/bytestream.h libavcodec/cabac.c libavcodec/cabac.h libavcodec/cabac_functions.h libavcodec/cabac_tablegen.h libavcodec/get_bits.h libavcodec/golomb.c libavcodec/golomb.h libavcodec/hevc.c libavcodec/hevc.h libavcodec/hevc_cabac.c libavcodec/hevc_filter.c libavcodec/hevc_mvs.c libavcodec/hevc_ps.c libavcodec/hevc_refs.c libavcodec/hevc_sei.c libavcodec/hevcdsp.c libavcodec/hevcdsp.h libavcodec/hevcdsp_template.c libavcodec/hevcpred.c libavcodec/hevcpred.h libavcodec/hevcpred_template.c libavcodec/internal.h libavcodec/mathops.h libavcodec/old_codec_ids.h libavcodec/put_bits.h libavcodec/rnd_avg.h libavcodec/thread.h libavcodec/utils.c libavcodec/version.h libavcodec/videodsp.c libavcodec/videodsp.h libavcodec/videodsp_template.c libavutil/adler32.h libavutil/aes.h libavutil/atomic.h libavutil/atomic_gcc.h libavutil/atomic_suncc.h libavutil/atomic_win32.h libavutil/attributes.h libavutil/audio_fifo.h libavutil/audioconvert.h libavutil/avassert.h libavutil/avconfig.h libavutil/avstring.h libavutil/avutil.h libavutil/base64.h libavutil/blowfish.h libavutil/bprint.h libavutil/bswap.h libavutil/buffer.c libavutil/buffer.h libavutil/buffer_internal.h libavutil/cast5.h libavutil/channel_layout.h libavutil/colorspace.h libavutil/common.h libavutil/cpu.h libavutil/cpu_internal.h libavutil/crc.h libavutil/des.h libavutil/dict.h libavutil/display.h libavutil/downmix_info.h libavutil/dynarray.h libavutil/error.h libavutil/eval.h libavutil/ffversion.h libavutil/fifo.h libavutil/file.h libavutil/fixed_dsp.h libavutil/float_dsp.h libavutil/frame.c libavutil/frame.h libavutil/hash.h libavutil/hmac.h libavutil/imgutils.h libavutil/integer.h libavutil/internal.h libavutil/intfloat.h libavutil/intmath.h libavutil/intreadwrite.h libavutil/lfg.h libavutil/libm.h libavutil/lls.h libavutil/log.h libavutil/log2_tab.c libavutil/lzo.h libavutil/macros.h libavutil/mathematics.h libavutil/md5.c libavutil/md5.h libavutil/mem.c libavutil/mem.h libavutil/motion_vector.h libavutil/murmur3.h libavutil/old_pix_fmts.h libavutil/opencl.h libavutil/opencl_internal.h libavutil/opt.h libavutil/parseutils.h libavutil/pca.h libavutil/pixdesc.c libavutil/pixdesc.h libavutil/pixelutils.h libavutil/pixfmt.h libavutil/qsort.h libavutil/random_seed.h libavutil/rational.h libavutil/rc4.h libavutil/replaygain.h libavutil/ripemd.h libavutil/samplefmt.h libavutil/sha.h libavutil/sha512.h libavutil/softfloat.h libavutil/stereo3d.h libavutil/threadmessage.h libavutil/time.h libavutil/time_internal.h libavutil/timecode.h libavutil/timer.h libavutil/timestamp.h libavutil/tree.h libavutil/version.h libavutil/x86_cpu.h libavutil/xga_font_data.h libavutil/xtea.h libbpg.c libbpg.h post.js pre.js tmalloc.c x265/COPYING x265/build/README.txt x265/build/linux/make-Makefiles.bash x265/build/linux/multilib.sh x265/build/msys/make-Makefiles.sh x265/build/msys/make-x86_64-w64-mingw32-Makefiles.sh x265/build/msys/multilib.sh x265/build/msys/toolchain-mingw32.cmake x265/build/msys/toolchain-x86_64-w64-mingw32.cmake x265/build/vc10-x86/build-all.bat x265/build/vc10-x86/make-solutions.bat x265/build/vc10-x86_64/build-all.bat x265/build/vc10-x86_64/make-solutions.bat x265/build/vc10-x86_64/multilib.bat x265/build/vc11-x86/build-all.bat x265/build/vc11-x86/make-solutions.bat x265/build/vc11-x86_64/build-all.bat x265/build/vc11-x86_64/make-solutions.bat x265/build/vc11-x86_64/multilib.bat x265/build/vc12-x86/build-all.bat x265/build/vc12-x86/make-solutions.bat x265/build/vc12-x86_64/build-all.bat x265/build/vc12-x86_64/make-solutions.bat x265/build/vc12-x86_64/multilib.bat x265/build/vc9-x86/build-all.bat x265/build/vc9-x86/make-solutions.bat x265/build/vc9-x86_64/build-all.bat x265/build/vc9-x86_64/make-solutions.bat x265/build/vc9-x86_64/multilib.bat x265/build/xcode/make-project.sh x265/doc/intra/intra-16x16.txt x265/doc/intra/intra-32x32.txt x265/doc/intra/intra-4x4.txt x265/doc/intra/intra-8x8.txt x265/doc/reST/Makefile x265/doc/reST/api.rst x265/doc/reST/cli.rst x265/doc/reST/conf.py x265/doc/reST/index.rst x265/doc/reST/introduction.rst x265/doc/reST/lossless.rst x265/doc/reST/presets.rst x265/doc/reST/threading.rst x265/doc/reST/x265.rst x265/doc/uncrustify/codingstyle.cfg x265/readme.rst x265/source/CMakeLists.txt x265/source/cmake/CMakeASM_YASMInformation.cmake x265/source/cmake/CMakeDetermineASM_YASMCompiler.cmake x265/source/cmake/CMakeTestASM_YASMCompiler.cmake x265/source/cmake/FindNuma.cmake x265/source/cmake/FindVLD.cmake x265/source/cmake/FindYasm.cmake x265/source/cmake/clean-generated.cmake x265/source/cmake/cmake_uninstall.cmake.in x265/source/cmake/version.cmake x265/source/common/CMakeLists.txt x265/source/common/bitstream.cpp x265/source/common/bitstream.h x265/source/common/common.cpp x265/source/common/common.h x265/source/common/constants.cpp x265/source/common/constants.h x265/source/common/contexts.h x265/source/common/cpu.cpp x265/source/common/cpu.h x265/source/common/cudata.cpp x265/source/common/cudata.h x265/source/common/dct.cpp x265/source/common/deblock.cpp x265/source/common/deblock.h x265/source/common/frame.cpp x265/source/common/frame.h x265/source/common/framedata.cpp x265/source/common/framedata.h x265/source/common/intrapred.cpp x265/source/common/ipfilter.cpp x265/source/common/loopfilter.cpp x265/source/common/lowres.cpp x265/source/common/lowres.h x265/source/common/md5.cpp x265/source/common/md5.h x265/source/common/mv.h x265/source/common/param.cpp x265/source/common/param.h x265/source/common/piclist.cpp x265/source/common/piclist.h x265/source/common/picyuv.cpp x265/source/common/picyuv.h x265/source/common/pixel.cpp x265/source/common/predict.cpp x265/source/common/predict.h x265/source/common/primitives.cpp x265/source/common/primitives.h x265/source/common/quant.cpp x265/source/common/quant.h x265/source/common/scalinglist.cpp x265/source/common/scalinglist.h x265/source/common/shortyuv.cpp x265/source/common/shortyuv.h x265/source/common/slice.cpp x265/source/common/slice.h x265/source/common/threading.cpp x265/source/common/threading.h x265/source/common/threadpool.cpp x265/source/common/threadpool.h x265/source/common/vec/dct-sse3.cpp x265/source/common/vec/dct-sse41.cpp x265/source/common/vec/dct-ssse3.cpp x265/source/common/vec/vec-primitives.cpp x265/source/common/version.cpp x265/source/common/wavefront.cpp x265/source/common/wavefront.h x265/source/common/winxp.cpp x265/source/common/winxp.h x265/source/common/x86/README.txt x265/source/common/x86/asm-primitives.cpp x265/source/common/x86/blockcopy8.asm x265/source/common/x86/blockcopy8.h x265/source/common/x86/const-a.asm x265/source/common/x86/cpu-a.asm x265/source/common/x86/dct8.asm x265/source/common/x86/dct8.h x265/source/common/x86/intrapred.h x265/source/common/x86/intrapred16.asm x265/source/common/x86/intrapred8.asm x265/source/common/x86/intrapred8_allangs.asm x265/source/common/x86/ipfilter16.asm x265/source/common/x86/ipfilter8.asm x265/source/common/x86/ipfilter8.h x265/source/common/x86/loopfilter.asm x265/source/common/x86/loopfilter.h x265/source/common/x86/mc-a.asm x265/source/common/x86/mc-a2.asm x265/source/common/x86/mc.h x265/source/common/x86/pixel-32.asm x265/source/common/x86/pixel-a.asm x265/source/common/x86/pixel-util.h x265/source/common/x86/pixel-util8.asm x265/source/common/x86/pixel.h x265/source/common/x86/pixeladd8.asm x265/source/common/x86/sad-a.asm x265/source/common/x86/sad16-a.asm x265/source/common/x86/ssd-a.asm x265/source/common/x86/x86inc.asm x265/source/common/x86/x86util.asm x265/source/common/yuv.cpp x265/source/common/yuv.h x265/source/compat/getopt/LGPL.txt x265/source/compat/getopt/getopt.c x265/source/compat/getopt/getopt.h x265/source/compat/msvc/stdint.h x265/source/encoder/CMakeLists.txt x265/source/encoder/analysis.cpp x265/source/encoder/analysis.h x265/source/encoder/api.cpp x265/source/encoder/bitcost.cpp x265/source/encoder/bitcost.h x265/source/encoder/dpb.cpp x265/source/encoder/dpb.h x265/source/encoder/encoder.cpp x265/source/encoder/encoder.h x265/source/encoder/entropy.cpp x265/source/encoder/entropy.h x265/source/encoder/frameencoder.cpp x265/source/encoder/frameencoder.h x265/source/encoder/framefilter.cpp x265/source/encoder/framefilter.h x265/source/encoder/level.cpp x265/source/encoder/level.h x265/source/encoder/motion.cpp x265/source/encoder/motion.h x265/source/encoder/nal.cpp x265/source/encoder/nal.h x265/source/encoder/ratecontrol.cpp x265/source/encoder/ratecontrol.h x265/source/encoder/rdcost.h x265/source/encoder/reference.cpp x265/source/encoder/reference.h x265/source/encoder/sao.cpp x265/source/encoder/sao.h x265/source/encoder/search.cpp x265/source/encoder/search.h x265/source/encoder/sei.cpp x265/source/encoder/sei.h x265/source/encoder/slicetype.cpp x265/source/encoder/slicetype.h x265/source/encoder/weightPrediction.cpp x265/source/input/input.cpp x265/source/input/input.h x265/source/input/y4m.cpp x265/source/input/y4m.h x265/source/input/yuv.cpp x265/source/input/yuv.h x265/source/output/output.cpp x265/source/output/output.h x265/source/output/raw.cpp x265/source/output/raw.h x265/source/output/reconplay.cpp x265/source/output/reconplay.h x265/source/output/y4m.cpp x265/source/output/y4m.h x265/source/output/yuv.cpp x265/source/output/yuv.h x265/source/profile/CMakeLists.txt x265/source/profile/PPA/CMakeLists.txt x265/source/profile/PPA/ppa.cpp x265/source/profile/PPA/ppa.h x265/source/profile/PPA/ppaApi.h x265/source/profile/cpuEvents.h x265/source/profile/vtune/CMakeLists.txt x265/source/profile/vtune/vtune.cpp x265/source/profile/vtune/vtune.h x265/source/test/CMakeLists.txt x265/source/test/checkasm-a.asm x265/source/test/intrapredharness.cpp x265/source/test/intrapredharness.h x265/source/test/ipfilterharness.cpp x265/source/test/ipfilterharness.h x265/source/test/mbdstharness.cpp x265/source/test/mbdstharness.h x265/source/test/pixelharness.cpp x265/source/test/pixelharness.h x265/source/test/rate-control-tests.txt x265/source/test/regression-tests.txt x265/source/test/smoke-tests.txt x265/source/test/testbench.cpp x265/source/test/testharness.h x265/source/x265-extras.cpp x265/source/x265-extras.h x265/source/x265.cpp x265/source/x265.def.in x265/source/x265.h x265/source/x265.pc.in x265/source/x265.rc.in x265/source/x265_config.h.in x265/source/x265cli.h x265_glue.c
diffstat 508 files changed, 343256 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ChangeLog	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,60 @@
+version 0.9.7:
+
+- Increased the memory size of the JS decoder to 32 MiB and avoid
+  polluting the global namespace.
+- Published the Emscripten patches which reduce the size of the
+  generated JS code.
+
+version 0.9.6:
+
+- Faster encoding (x265 is the default encoder and is built in bpgenc).
+- Added monochrome support to x265.
+- Fixed metadata handling.
+
+version 0.9.5:
+
+- Added animation support.
+- added bpgview utility.
+- bpgenc: fixed support of some JPEG parameter combinations
+- fixed JS 8 bit only decoder and renamed it to bpgdec8.js
+- libbpg: added CMYK output format
+
+version 0.9.4:
+
+- Modified alpha plane encoding to allow progressive display and
+  streaming encoding. This change is incompatible, so images
+  containing alpha from the previous versions of the format cannot be
+  decoded.
+
+- Added 4:2:2 and 4:2:0 chroma formats with MPEG2 chroma sample position.
+
+version 0.9.3:
+
+- Fixed small palette PNG.
+- Added support for BT 709 and BT 2020 YCbCr.
+- Added limited range color support.
+- Changed CMYK signalling.
+- Added premultiplied alpha support.
+- Specified the output RGB color space if no color profile is present.
+- Reduced the size of the js decoder.
+- Removed buffer overflows.
+
+version 0.9.2:
+
+- Fixed encoding of paletted PNG.
+- Reduced memory usage in the decoder.
+- Added faster 8 bit only Javascript decoder.
+- bpgenc: added '-e' option to explicitely select the encoder.
+- bpgenc: set default bit depth to 8.
+- bpgenc: added lossless support with x265.
+- js decoder: handle width and height attributes.
+
+version 0.9.1:
+
+- Added new meta data tags: ICC profile, XMP and thumbnail.
+- Disabled metadata copying by default.
+- Use same chroma pixel position as JPEG for 4:2:2 and 4:2:0.
+
+version 0.9:
+
+- Initial release.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Makefile	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,249 @@
+# libbpg Makefile
+# 
+# Compile options:
+#
+# Enable compilation of Javascript decoder with Emscripten
+#USE_EMCC=y
+# Enable x265 for the encoder
+USE_X265=y
+# Enable the JCTVC code (best quality but slow) for the encoder
+#USE_JCTVC=y
+# Compile bpgview (SDL and SDL_image libraries needed)
+USE_BPGVIEW=y
+# Enable it to use bit depths > 12 (need more tests to validate encoder)
+#USE_JCTVC_HIGH_BIT_DEPTH=y
+# Enable the cross compilation for Windows
+#CONFIG_WIN32=y
+# Enable for compilation on MacOS X
+#CONFIG_APPLE=y
+# Installation prefix
+prefix=/usr/local
+
+
+#################################
+
+ifdef CONFIG_WIN32
+CROSS_PREFIX:=x86_64-w64-mingw32-
+#CROSS_PREFIX=i686-w64-mingw32-
+EXE:=.exe
+else
+CROSS_PREFIX:=
+EXE:=
+endif
+
+CC=$(CROSS_PREFIX)gcc
+CXX=$(CROSS_PREFIX)g++
+AR=$(CROSS_PREFIX)ar
+EMCC=emcc
+
+PWD:=$(shell pwd)
+
+CFLAGS:=-Os -Wall -MMD -fno-asynchronous-unwind-tables -fdata-sections -ffunction-sections -fno-math-errno -fno-signed-zeros -fno-tree-vectorize -fomit-frame-pointer
+CFLAGS+=-D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_REENTRANT
+CFLAGS+=-I.
+CFLAGS+=-DCONFIG_BPG_VERSION=\"$(shell cat VERSION)\"
+ifdef USE_JCTVC_HIGH_BIT_DEPTH
+CFLAGS+=-DRExt__HIGH_BIT_DEPTH_SUPPORT
+endif
+
+# Emscriptem config
+EMLDFLAGS:=-s "EXPORTED_FUNCTIONS=['_bpg_decoder_open','_bpg_decoder_decode','_bpg_decoder_get_info','_bpg_decoder_start','_bpg_decoder_get_frame_duration','_bpg_decoder_get_line','_bpg_decoder_close','_malloc','_free']"
+EMLDFLAGS+=-s NO_FILESYSTEM=1 -s NO_BROWSER=1
+#EMLDFLAGS+=-O1 --pre-js pre.js --post-js post.js
+# Note: the closure compiler is disabled because it adds unwanted global symbols
+EMLDFLAGS+=-O3 --memory-init-file 0 --closure 0 --pre-js pre.js --post-js post.js
+EMCFLAGS:=$(CFLAGS)
+
+LDFLAGS=-g
+ifdef CONFIG_APPLE
+LDFLAGS+=-Wl,-dead_strip
+else
+LDFLAGS+=-Wl,--gc-sections
+endif
+CFLAGS+=-g
+CXXFLAGS=$(CFLAGS)
+
+PROGS=bpgdec$(EXE) bpgenc$(EXE)
+ifdef USE_BPGVIEW
+PROGS+=bpgview$(EXE)
+endif
+ifdef USE_EMCC
+PROGS+=bpgdec.js bpgdec8.js bpgdec8a.js
+endif
+
+all: $(PROGS)
+
+LIBBPG_OBJS:=$(addprefix libavcodec/, \
+hevc_cabac.o  hevc_filter.o  hevc.o         hevcpred.o  hevc_refs.o\
+hevcdsp.o     hevc_mvs.o     hevc_ps.o   hevc_sei.o\
+utils.o cabac.o golomb.o videodsp.o )
+LIBBPG_OBJS+=$(addprefix libavutil/, mem.o buffer.o log2_tab.o frame.o pixdesc.o md5.o )
+LIBBPG_OBJS+=libbpg.o
+
+LIBBPG_JS_OBJS:=$(patsubst %.o, %.js.o, $(LIBBPG_OBJS)) tmalloc.js.o
+
+LIBBPG_JS8_OBJS:=$(patsubst %.o, %.js8.o, $(LIBBPG_OBJS)) tmalloc.js8.o
+
+LIBBPG_JS8A_OBJS:=$(patsubst %.o, %.js8a.o, $(LIBBPG_OBJS)) tmalloc.js8a.o
+
+$(LIBBPG_OBJS): CFLAGS+=-D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 -DHAVE_AV_CONFIG_H -std=c99 -D_GNU_SOURCE=1 -DUSE_VAR_BIT_DEPTH -DUSE_PRED
+
+$(LIBBPG_JS_OBJS): EMCFLAGS+=-D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 -DHAVE_AV_CONFIG_H -std=c99 -D_GNU_SOURCE=1 -DUSE_VAR_BIT_DEPTH
+
+$(LIBBPG_JS8_OBJS): EMCFLAGS+=-D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 -DHAVE_AV_CONFIG_H -std=c99 -D_GNU_SOURCE=1
+
+$(LIBBPG_JS8A_OBJS): EMCFLAGS+=-D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 -DHAVE_AV_CONFIG_H -std=c99 -D_GNU_SOURCE=1 -DUSE_PRED
+
+BPGENC_OBJS:=bpgenc.o
+BPGENC_LIBS:=
+
+ifdef USE_X265
+
+X265_LIBS:=./x265.out/8bit/libx265.a ./x265.out/10bit/libx265.a ./x265.out/12bit/libx265.a
+BPGENC_OBJS+=x265_glue.o $(X265_LIBS)
+
+bpgenc.o: CFLAGS+=-DUSE_X265
+x265_glue.o: CFLAGS+=-I./x265/source -I./x265.out/8bit
+x265_glue.o: $(X265_LIBS)
+
+ifdef CONFIG_WIN32
+CMAKE_OPTS:=-DCMAKE_TOOLCHAIN_FILE=../../x265/build/msys/toolchain-x86_64-w64-mingw32.cmake
+else
+CMAKE_OPTS:=
+endif
+
+x265.out:
+	mkdir -p x265.out/8bit x265.out/10bit x265.out/12bit
+	cd x265.out/12bit && cmake ../../x265/source $(CMAKE_OPTS) -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+	cd x265.out/10bit && cmake ../../x265/source $(CMAKE_OPTS) -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN10=ON
+	cd x265.out/8bit && cmake ../../x265/source $(CMAKE_OPTS) -DLINKED_10BIT=ON -DLINKED_12BIT=ON -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+
+# use this target to manually rebuild x265
+x265_make: | x265.out
+	$(MAKE) -C x265.out/12bit
+	$(MAKE) -C x265.out/10bit
+	$(MAKE) -C x265.out/8bit
+
+x265_clean:
+	rm -rf x265.out
+
+$(X265_LIBS): x265_make
+
+else
+
+x265_clean:
+
+endif # USE_X265
+
+ifdef USE_JCTVC
+JCTVC_OBJS=$(addprefix jctvc/TLibEncoder/, SyntaxElementWriter.o TEncSbac.o \
+TEncBinCoderCABACCounter.o TEncGOP.o\
+TEncSampleAdaptiveOffset.o TEncBinCoderCABAC.o TEncAnalyze.o\
+TEncEntropy.o TEncTop.o SEIwrite.o TEncPic.o TEncRateCtrl.o\
+WeightPredAnalysis.o TEncSlice.o TEncCu.o NALwrite.o TEncCavlc.o\
+TEncSearch.o TEncPreanalyzer.o)
+JCTVC_OBJS+=jctvc/TLibVideoIO/TVideoIOYuv.o
+JCTVC_OBJS+=$(addprefix jctvc/TLibCommon/, TComWeightPrediction.o TComLoopFilter.o\
+TComBitStream.o TComMotionInfo.o TComSlice.o ContextModel3DBuffer.o\
+TComPic.o TComRdCostWeightPrediction.o TComTU.o TComPicSym.o\
+TComPicYuv.o TComYuv.o TComTrQuant.o TComInterpolationFilter.o\
+ContextModel.o TComSampleAdaptiveOffset.o SEI.o TComPrediction.o\
+TComDataCU.o TComChromaFormat.o Debug.o TComRom.o\
+TComPicYuvMD5.o TComRdCost.o TComPattern.o TComCABACTables.o)
+JCTVC_OBJS+=jctvc/libmd5/libmd5.o
+JCTVC_OBJS+=jctvc/TAppEncCfg.o jctvc/TAppEncTop.o jctvc/program_options_lite.o 
+
+$(JCTVC_OBJS) jctvc_glue.o: CFLAGS+=-I$(PWD)/jctvc -Wno-sign-compare
+
+jctvc/libjctvc.a: $(JCTVC_OBJS)
+	$(AR) rcs $@ $^
+
+BPGENC_OBJS+=jctvc_glue.o jctvc/libjctvc.a
+
+bpgenc.o: CFLAGS+=-DUSE_JCTVC
+endif # USE_JCTVC
+
+
+ifdef CONFIG_WIN32
+
+BPGDEC_LIBS:=-lpng -lz
+BPGENC_LIBS+=-lpng -ljpeg -lz
+BPGVIEW_LIBS:=-lmingw32 -lSDLmain -lSDL_image -lSDL -mwindows
+
+else
+
+ifdef CONFIG_APPLE
+LIBS:=
+else
+LIBS:=-lrt
+endif # !CONFIG_APPLE 
+LIBS+=-lm -lpthread
+
+BPGDEC_LIBS:=-lpng $(LIBS)
+BPGENC_LIBS+=-lpng -ljpeg $(LIBS)
+BPGVIEW_LIBS:=-lSDL_image -lSDL $(LIBS)
+
+endif #!CONFIG_WIN32
+
+bpgenc.o: CFLAGS+=-Wno-unused-but-set-variable
+
+libbpg.a: $(LIBBPG_OBJS) 
+	$(AR) rcs $@ $^
+
+bpgdec$(EXE): bpgdec.o libbpg.a
+	$(CC) $(LDFLAGS) -o $@ $^ $(BPGDEC_LIBS)
+
+bpgenc$(EXE): $(BPGENC_OBJS)
+	$(CXX) $(LDFLAGS) -o $@ $^ $(BPGENC_LIBS)
+
+bpgview$(EXE): bpgview.o libbpg.a
+	$(CC) $(LDFLAGS) -o $@ $^ $(BPGVIEW_LIBS)
+
+bpgdec.js: $(LIBBPG_JS_OBJS) post.js
+	$(EMCC) $(EMLDFLAGS) -s TOTAL_MEMORY=33554432 -o $@ $(LIBBPG_JS_OBJS)
+
+bpgdec8.js: $(LIBBPG_JS8_OBJS) post.js
+	$(EMCC) $(EMLDFLAGS) -s TOTAL_MEMORY=33554432 -o $@ $(LIBBPG_JS8_OBJS)
+
+bpgdec8a.js: $(LIBBPG_JS8A_OBJS) post.js
+	$(EMCC) $(EMLDFLAGS) -s TOTAL_MEMORY=33554432 -o $@ $(LIBBPG_JS8A_OBJS)
+
+size:
+	strip bpgdec
+	size bpgdec libbpg.o libavcodec/*.o libavutil/*.o | sort -n
+	gzip < bpgdec | wc
+
+install: bpgenc bpgdec
+	install -s -m 755 $^ $(prefix)/bin
+
+CLEAN_DIRS=doc html libavcodec libavutil \
+     jctvc jctvc/TLibEncoder jctvc/TLibVideoIO jctvc/TLibCommon jctvc/libmd5
+
+clean: x265_clean
+	rm -f $(PROGS) *.o *.a *.d *~ $(addsuffix /*.o, $(CLEAN_DIRS)) \
+          $(addsuffix /*.d, $(CLEAN_DIRS)) $(addsuffix /*~, $(CLEAN_DIRS)) \
+          $(addsuffix /*.a, $(CLEAN_DIRS))
+
+%.o: %.c
+	$(CC) $(CFLAGS) -c -o $@ $<
+
+%.o: %.cpp
+	$(CXX) $(CXXFLAGS) -c -o $@ $<
+
+%.js.o: %.c
+	$(EMCC) $(EMCFLAGS) -c -o $@ $<
+
+%.js8.o: %.c
+	$(EMCC) $(EMCFLAGS) -c -o $@ $<
+
+%.js8a.o: %.c
+	$(EMCC) $(EMCFLAGS) -c -o $@ $<
+
+-include $(wildcard *.d)
+-include $(wildcard libavcodec/*.d)
+-include $(wildcard libavutil/*.d)
+-include $(wildcard jctvc/*.d)
+-include $(wildcard jctvc/TLibEncoder/*.d)
+-include $(wildcard jctvc/TLibVideoIO/*.d)
+-include $(wildcard jctvc/TLibCommon/*.d)
+-include $(wildcard jctvc/libmd5/*.d)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/README	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,263 @@
+BPG Image library and utilities
+-------------------------------
+
+1) Quick introduction
+---------------------
+
+- Edit the Makefile to change the compile options (the default compile
+  options should be OK for Linux). Type 'make' to compile and 'make
+  install' to install the compiled binaries.
+
+- bpgview: in order to compile it you need to install the SDL and
+  SDL_image libraries.
+
+- Emscripten usage: in order to generate the Javascript decoder, you
+  must install Emscripten and enable its use in the Makefile.
+
+- An HTML demonstration (with a precompiled Javascript decoder) is
+  available in html/index.html (if you use Chrome and want to use
+  file:// to access it, launch Chrome with the option
+  --allow-file-access-from-files). 
+
+- The BPG file format is specified in doc/bpg_spec.txt.
+
+2) Compilation and Installation Notes
+-------------------------------------
+
+2.1) Linux
+----------
+
+  - Edit the Makefile to change the compile options (the default
+  compile options should be OK). Type 'make' to compile and 'make
+  install' to install the compiled binaries.
+ 
+  - Use 'make -j N' where N is the number of CPU cores to compile faster.
+
+  - The following packages must be installed: SDL-devel
+  SDL_image-devel yasm. It is recommended to use yasm version >= 1.3.0
+  to have a faster compilation.
+ 
+  - Only a 64 bit target is supported because x265 needs it for bit
+    depths > 8.
+
+2.2) Windows
+------------
+
+  - Only cross-compilation from Linux is supported.
+
+  - The following packages need to be installed: mingw64-gcc
+    mingw64-libpng mingw64-libjpeg-turbo mingw64-SDL mingw64-SDL_image
+    yasm. It is recommended to use yasm version >= 1.3.0 to have a
+    faster compilation.
+
+  - Only a 64 bit target is supported because x265 needs it for bit
+    depths > 8.
+
+3) BPG encoder
+--------------
+
+The BPG command line encoder is 'bpgenc'. It takes JPEG or PNG images
+as input.
+
+- Speed: by default bpgenc uses the x265. You can compile the much
+  slower but more efficient JCTVC encoder and select it with the '-e
+  jctvc' option. With x265 you can select the encoding speed with the
+  '-m' option (1 = fast, but larger image, 9 = slower but smaller
+  image).
+
+- Bit depth: the default bit depth is 8. You can increase it to 10
+  ('-b 10' option) to slightly increase the compression ratio. For web
+  publishing it is generally not a good idea because the Javascript
+  decoder uses more memory. The compiled x265 encoder supports the bit
+  depth of 8, 10 and 12. The slower JCTVC encoder can be compiled to
+  support higher bit depths (up to 14) by enabling the Makefile
+  define: USE_JCTVC_HIGH_BIT_DEPTH.
+
+- Lossless compression is supported as a bonus thru the HEVC lossless
+  capabilities. Use a PNG input in this case unless you know what you
+  do ! In case of a JPEG input, the compression is lossless related to
+  the JPEG YCbCr data, not the RGB data. In any case, the bit depth
+  should match the one of your picture otherwise the file size
+  increases a lot. By default the lossless mode sets the bit depth to
+  8 bits. The prefered color space is set to "rgb". Notes: 
+  
+    - lossless mode is less tested that the lossy mode but it usually
+      gives better results that PNG on photographic images.
+
+    - the JCTVC encoder gives smaller images than the x265 encoder
+      with lossless compression.
+
+- There is a small difference of interpretation of the quantizer
+  parameter (-q option) between the x265 and JCTVC encoder.
+
+- Color space and chroma format:
+
+    * For JPEG input, the color space of the input image is not
+      modified (it is YCbCr, RGB, YCbCrK or CMYK). The chroma is
+      subsampled according to the preferred chroma format ('-f'
+      option).
+
+    * For PNG input, the input image is converted to the preferred
+      color space ('-c' option). Its chroma is then subsampled
+      according to the preferred chroma format.
+
+    * grayscale images are kept unmodified.
+
+- Premultiplied alpha: by default bpgenc uses non-premultiplied alpha
+  to preserve the color components. However, premultiplied alpha
+  ('-premul' option) usually gives a better compression at the expense
+  of a loss in the color components. This loss is not an issue if the
+  image is not edited.
+
+- Animations: with the '-a' option, animations can be encoded from a
+  sequence of PNG or JPEG images, indexed from 1 or 0. For example:
+
+  ./bpgenc -a anim%2d.png -fps 25 -loop 0 -o anim.bpg
+
+  generates an animation from anim01.png, anim02.png, etc... The frame
+  rate is specified with '-fps' and the number of loops with '-loop'
+  (0 = infinite). If a different delay per image is needed as in some
+  animated GIFs, a text file can be specified with the '-delayfile'
+  option. It contains one number per image giving its duration in
+  centiseconds. All durations are rounded to a multiple of '1/fps', so
+  it is important to set a consistent frame rate.
+  
+  The necessary frames and delay file can be generated from animated
+  GIFs with the ImageMagick tools:
+  
+  convert -coalesce anim.gif anim%d.png
+ 
+  identify -format "%T\n" anim.gif > anim.txt
+  
+  In order to reduce the file size, the frame rate can be choosen so
+  that most frames have a frame period of 1 (hence if anim.txt
+  contains only frame durations of 5 centiseconds, then choose a frame
+  rate of 20 frames/s).
+
+  As GIFs use paletted colors and 1 bit transparency, it is always
+  better to start from the source material (e.g. PNG files) to have
+  the best quality.
+
+  A BPG decoder not supporting animations only displays the first
+  frame.
+
+- By default, bpgenc does not copy the metadata. You can copy them
+  with the '-keepmetadata' option. For JPEG input, EXIF, ICCP and XMP
+  are copied. For PNG input, ICCP is copied.
+
+- Objective comparisons: x265 is tuned by default for SSIM. the JCTVC
+  encoder is tuned for PSNR only, not for SSIM, so you should use PSNR
+  when making objective comparison with other formats.
+
+4) BPG decoder
+--------------
+
+The BPG command line decoder is bpgdec. It outputs a PNG or PPM
+image. Use a PPM output to get the fastest speed.
+
+- With the '-i' option, you have information about the BPG image (and
+no decoded image is output).
+
+- The '-b' option selects the bit depth (8 or 16) of the PNG
+  output. It is independent of the internal BPG bit depth.
+
+5) BPG viewer
+-------------
+
+The BPG image viewer uses the SDL library to display BPG images and
+other image formats supported by the SDL_image library. The available
+keys are displayed by launching bpgview without parameters. bpgview
+supports BPG animations.
+
+6) BPG decoding library
+-----------------------
+
+BPG images can be decoded in any program with the libbpg
+library.
+
+The API is not considered stable yet so that the library is only
+provided as a static one.
+
+Currently there is no similar library for encoding so you should
+invoke the bpgenc utility.
+
+7) Javascript decoder
+---------------------
+
+The following Javascript decoders are available, sorted by increasing size:
+
+             > 8 bits   animations
+bpgdec8.js   no         no
+bpgdec.js    yes        no
+bpgdec8a.js  no         yes
+
+
+The 8 bit only decoders are a little faster and consumes less memory
+(16 MB instead of 32 MB by default, you can change the memory
+configuration in the Makefile if you want to handle larger images).
+
+The Javascript decoder substitutes all the <img> tags with a source
+having a .bpg extension with a <canvas> tag and decodes the BPG image
+into it. Stylesheets are supported (the 'id' and 'class' attributes
+are preserved). The 'width' and 'height' attributes are supported only
+with pixel units.
+
+The image data is downloaded with the XMLHttpRequest object. So the
+BPG images and the BPG Javascript decoder must be in the same domain
+unless Cross-Origin Resource Sharing is used.
+
+When animations are displayed, all the frames are stored in memory, so
+animations with a large number of frames and large resolutions should
+be avoided, as with animated GIFs.
+
+asm.js gives an interesting speed boost, so we hope that more browsers
+will support this Javascript subset.
+
+8) FFmpeg modifications
+-----------------------
+
+- Completed support of chroma_format_idc = 0 (monochrome mode).
+
+- Fixed RDPCM support (intra predictions).
+
+- Reduced memory usage for the SAO loop filter.
+
+- Generated the IDCT coefficients dynamically to reduce the code size.
+
+- Added a 'dynamic bit depth' mode where all the bit depths from 8 to
+  14 are supported without code duplication but slower decoding.
+
+- Added a modified SPS header to reduce the size of the BPG decoder
+  (an alternate solution is to generate standard VPS and SPS headers
+  from the BPG header).
+
+- Added defines to keep only the HEVC intra code and suppress the
+  parsing of all the irrelevant NAL units.
+
+- Stripped FFmpeg from all codecs except HEVC and the necessary
+  support code.
+
+9) x265 modifications
+---------------------
+
+- Support of monochrome format (some parts not used by BPG may be
+  missing).
+
+- Support of static build.
+
+10) Licensing
+-------------
+
+- libbpg and bpgdec are released under the LGPL license (the FFmpeg
+  part is under the LGPL, the BPG specific part is released under the
+  BSD license).
+
+- bpgenc is released under the GPL version 2 license. The BPG specific
+  code is released under the BSD license. The JCTVC code is released
+  under the BSD license. The x265 code is released under the GPL
+  version 2 license.
+
+- BPG relies on the HEVC compression technology which may be protected
+  by patents in some countries. Most devices already include or will
+  include hardware HEVC support, so we suggest to use it if patents
+  are an issue.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/VERSION	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1 @@
+0.9.7
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bpgdec.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,354 @@
+/*
+ * BPG decoder command line utility
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <getopt.h>
+#include <inttypes.h>
+
+/* define it to include PNG output */
+#define USE_PNG
+
+#ifdef USE_PNG
+#include <png.h>
+#endif
+
+#include "libbpg.h"
+
+static void ppm_save(BPGDecoderContext *img, const char *filename)
+{
+    BPGImageInfo img_info_s, *img_info = &img_info_s;
+    FILE *f;
+    int w, h, y;
+    uint8_t *rgb_line;
+
+    bpg_decoder_get_info(img, img_info);
+    
+    w = img_info->width;
+    h = img_info->height;
+
+    rgb_line = malloc(3 * w);
+
+    f = fopen(filename,"wb");
+    if (!f) {
+        fprintf(stderr, "%s: I/O error\n", filename);
+        exit(1);
+    }
+        
+    fprintf(f, "P6\n%d %d\n%d\n", w, h, 255);
+    
+    bpg_decoder_start(img, BPG_OUTPUT_FORMAT_RGB24);
+    for (y = 0; y < h; y++) {
+        bpg_decoder_get_line(img, rgb_line);
+        fwrite(rgb_line, 1, w * 3, f);
+    }
+    fclose(f);
+
+    free(rgb_line);
+}
+
+#ifdef USE_PNG
+static void png_write_data (png_structp png_ptr, png_bytep data,
+                            png_size_t length)
+{
+    FILE *f;
+    int ret;
+
+    f = png_get_io_ptr(png_ptr);
+    ret = fwrite(data, 1, length, f);
+    if (ret != length)
+	png_error(png_ptr, "PNG Write Error");
+}
+
+static void png_save(BPGDecoderContext *img, const char *filename, int bit_depth)
+{
+    BPGImageInfo img_info_s, *img_info = &img_info_s;
+    FILE *f;
+    png_structp png_ptr;
+    png_infop info_ptr;
+    png_bytep row_pointer;
+    int y, color_type, bpp;
+    BPGDecoderOutputFormat out_fmt;
+
+    if (bit_depth != 8 && bit_depth != 16) {
+        fprintf(stderr, "Only bit_depth = 8 or 16 are supported for PNG output\n");
+        exit(1);
+    }
+
+    bpg_decoder_get_info(img, img_info);
+
+    f = fopen(filename, "wb");
+    if (!f) {
+        fprintf(stderr, "%s: I/O error\n", filename);
+        exit(1);
+    }
+
+    png_ptr = png_create_write_struct_2(PNG_LIBPNG_VER_STRING,
+                                        NULL,
+                                        NULL,  /* error */
+                                        NULL, /* warning */
+                                        NULL,
+                                        NULL,
+                                        NULL);
+    info_ptr = png_create_info_struct(png_ptr);
+    png_set_write_fn(png_ptr, (png_voidp)f, &png_write_data, NULL);
+
+    if (setjmp(png_jmpbuf(png_ptr)) != 0) {
+        fprintf(stderr, "PNG write error\n");
+        exit(1);
+    }
+
+    if (img_info->has_alpha)
+        color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+    else
+        color_type = PNG_COLOR_TYPE_RGB;
+        
+    png_set_IHDR(png_ptr, info_ptr, img_info->width, img_info->height,
+                 bit_depth, color_type, PNG_INTERLACE_NONE,
+                 PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
+
+    png_write_info(png_ptr, info_ptr);
+
+#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+    if (bit_depth == 16) {
+        png_set_swap(png_ptr);
+    }
+#endif
+    
+    if (bit_depth == 16) {
+        if (img_info->has_alpha)
+            out_fmt = BPG_OUTPUT_FORMAT_RGBA64;
+        else
+            out_fmt = BPG_OUTPUT_FORMAT_RGB48;
+    } else {
+        if (img_info->has_alpha)
+            out_fmt = BPG_OUTPUT_FORMAT_RGBA32;
+        else
+            out_fmt = BPG_OUTPUT_FORMAT_RGB24;
+    }
+    
+    bpg_decoder_start(img, out_fmt);
+
+    bpp = (3 + img_info->has_alpha) * (bit_depth / 8);
+    row_pointer = (png_bytep)png_malloc(png_ptr, img_info->width * bpp);
+    for (y = 0; y < img_info->height; y++) {
+        bpg_decoder_get_line(img, row_pointer);
+        png_write_row(png_ptr, row_pointer);
+    }
+    png_free(png_ptr, row_pointer);
+    
+    png_write_end(png_ptr, NULL);
+    
+    png_destroy_write_struct(&png_ptr, &info_ptr);
+
+    fclose(f);
+}
+#endif /* USE_PNG */
+
+static void bpg_show_info(const char *filename, int show_extensions)
+{
+    uint8_t *buf;
+    int buf_len, ret, buf_len_max;
+    FILE *f;
+    BPGImageInfo p_s, *p = &p_s;
+    BPGExtensionData *first_md, *md;
+    static const char *format_str[6] = {
+        "Gray",
+        "4:2:0",
+        "4:2:2",
+        "4:4:4",
+        "4:2:0_video",
+        "4:2:2_video",
+    };
+    static const char *color_space_str[BPG_CS_COUNT] = {
+        "YCbCr",
+        "RGB",
+        "YCgCo",
+        "YCbCr_BT709",
+        "YCbCr_BT2020",
+    };
+    static const char *extension_tag_str[] = {
+        "Unknown",
+        "EXIF",
+        "ICC profile",
+        "XMP",
+        "Thumbnail",
+        "Animation control",
+    };
+        
+    f = fopen(filename, "rb");
+    if (!f) {
+        fprintf(stderr, "Could not open %s\n", filename);
+        exit(1);
+    }
+
+    if (show_extensions) {
+        fseek(f, 0, SEEK_END);
+        buf_len_max = ftell(f);
+        fseek(f, 0, SEEK_SET);
+    } else {
+        /* if no extension are shown, just need the header */
+        buf_len_max = BPG_DECODER_INFO_BUF_SIZE;
+    }
+    buf = malloc(buf_len_max);
+    buf_len = fread(buf, 1, buf_len_max, f);
+
+    ret = bpg_decoder_get_info_from_buf(p, show_extensions ? &first_md : NULL,
+                                        buf, buf_len);
+    free(buf);
+    fclose(f);
+    if (ret < 0) {
+        fprintf(stderr, "Not a BPG image\n");
+        exit(1);
+    }
+    printf("size=%dx%d color_space=%s",
+           p->width, p->height,
+           p->format == BPG_FORMAT_GRAY ? "Gray" : color_space_str[p->color_space]);
+    if (p->has_w_plane) {
+        printf(" w_plane=%d", p->has_w_plane);
+    }
+    if (p->has_alpha) {
+        printf(" alpha=%d premul=%d", 
+               p->has_alpha, p->premultiplied_alpha);
+    }
+    printf(" format=%s limited_range=%d bit_depth=%d animation=%d\n",
+           format_str[p->format],
+           p->limited_range,
+           p->bit_depth,
+           p->has_animation);
+           
+    if (first_md) {
+        const char *tag_name;
+        printf("Extension data:\n");
+        for(md = first_md; md != NULL; md = md->next) {
+            if (md->tag <= 5)
+                tag_name = extension_tag_str[md->tag];
+            else
+                tag_name = extension_tag_str[0];
+            printf("  tag=%d (%s) length=%d\n",
+                   md->tag, tag_name, md->buf_len);
+        }
+        bpg_decoder_free_extension_data(first_md);
+    }
+}
+
+static void help(void)
+{
+    printf("BPG Image Decoder version " CONFIG_BPG_VERSION "\n"
+           "usage: bpgdec [options] infile\n"
+           "Options:\n"
+           "-o outfile.[ppm|png]   set the output filename (default = out.png)\n"
+           "-b bit_depth           PNG output only: use bit_depth per component (8 or 16, default = 8)\n"
+           "-i                     display information about the image\n");
+    exit(1);
+}
+
+int main(int argc, char **argv)
+{
+    FILE *f;
+    BPGDecoderContext *img;
+    uint8_t *buf;
+    int buf_len, bit_depth, c, show_info;
+    const char *outfilename, *filename, *p;
+    
+    outfilename = "out.png";
+    bit_depth = 8;
+    show_info = 0;
+    for(;;) {
+        c = getopt(argc, argv, "ho:b:i");
+        if (c == -1)
+            break;
+        switch(c) {
+        case 'h':
+        show_help:
+            help();
+            break;
+        case 'o':
+            outfilename = optarg;
+            break;
+        case 'b':
+            bit_depth = atoi(optarg);
+            break;
+        case 'i':
+            show_info = 1;
+            break;
+        default:
+            exit(1);
+        }
+    }
+
+    if (optind >= argc)
+        goto show_help;
+
+    filename = argv[optind++];
+
+    if (show_info) {
+        bpg_show_info(filename, 1);
+        return 0;
+    }
+
+    f = fopen(filename, "rb");
+    if (!f) {
+        fprintf(stderr, "Could not open %s\n", filename);
+        exit(1);
+    }
+
+    fseek(f, 0, SEEK_END);
+    buf_len = ftell(f);
+    fseek(f, 0, SEEK_SET);
+
+    buf = malloc(buf_len);
+    if (fread(buf, 1, buf_len, f) != buf_len) {
+        fprintf(stderr, "Error while reading file\n");
+        exit(1);
+    }
+    
+    fclose(f);
+
+    img = bpg_decoder_open();
+
+    if (bpg_decoder_decode(img, buf, buf_len) < 0) {
+        fprintf(stderr, "Could not decode image\n");
+        exit(1);
+    }
+    free(buf);
+
+#ifdef USE_PNG
+    p = strrchr(outfilename, '.');
+    if (p)
+        p++;
+
+    if (p && strcasecmp(p, "ppm") != 0) {
+        png_save(img, outfilename, bit_depth);
+    } else 
+#endif
+    {
+        ppm_save(img, outfilename);
+    }
+
+    bpg_decoder_close(img);
+
+    return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bpgenc.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2967 @@
+/*
+ * BPG encoder
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <getopt.h>
+#include <math.h>
+#include <assert.h>
+
+#include <png.h>
+#include <jpeglib.h>
+
+#include "bpgenc.h"
+
+typedef uint16_t PIXEL;
+
+static void put_ue(uint8_t **pp, uint32_t v);
+
+static inline int clamp_pix(int a, int pixel_max)
+{
+    if (a < 0)
+        return 0;
+    else if (a > pixel_max)
+        return pixel_max;
+    else
+        return a;
+}
+
+static inline int sub_mod_int(int a, int b, int m)
+{
+    a -= b;
+    if (a < 0)
+        a += m;
+    return a;
+}
+
+static inline int add_mod_int(int a, int b, int m)
+{
+    a += b;
+    if (a >= m)
+        a -= m;
+    return a;
+}
+
+typedef struct {
+    int c_shift;
+    int c_rnd;
+    int c_0_25, c_0_5, c_one;
+    int rgb_to_ycc[3 * 3];
+    int y_one;
+    int y_offset;
+    int bit_depth;
+    int pixel_max;
+    int c_center;
+} ColorConvertState;
+
+static void convert_init(ColorConvertState *s, int in_bit_depth, 
+                         int out_bit_depth, BPGColorSpaceEnum color_space,
+                         int limited_range)
+{
+    double k_r, k_b, mult, mult_y, mult_c;
+    int in_pixel_max, out_pixel_max, c_shift, i;
+    double rgb_to_ycc[3 * 3];
+
+    /* XXX: could use one more bit */
+    c_shift = 31 - out_bit_depth;
+    in_pixel_max = (1 << in_bit_depth) - 1;
+    out_pixel_max = (1 << out_bit_depth) - 1;
+    mult = (double)out_pixel_max * (1 << c_shift) / (double)in_pixel_max;
+    //    printf("mult=%f c_shift=%d\n", mult, c_shift);
+    if (limited_range) {
+        mult_y = (double)(219 << (out_bit_depth - 8)) * (1 << c_shift) / 
+            (double)in_pixel_max;
+        mult_c = (double)(224 << (out_bit_depth - 8)) * (1 << c_shift) / 
+            (double)in_pixel_max;
+    } else {
+        mult_y = mult;
+        mult_c = mult;
+    }
+    switch(color_space) {
+    case BPG_CS_YCbCr:
+        k_r = 0.299;
+        k_b = 0.114;
+        goto convert_ycc;
+        
+    case BPG_CS_YCbCr_BT709:
+        k_r = 0.2126; 
+        k_b = 0.0722;
+        goto convert_ycc;
+        
+    case BPG_CS_YCbCr_BT2020:
+        k_r = 0.2627;
+        k_b = 0.0593;
+    convert_ycc:
+        rgb_to_ycc[0] = k_r;
+        rgb_to_ycc[1] = 1 - k_r - k_b;
+        rgb_to_ycc[2] = k_b;
+        rgb_to_ycc[3] = -0.5 * k_r / (1 - k_b);
+        rgb_to_ycc[4] = -0.5 * (1 - k_r - k_b) / (1 - k_b);
+        rgb_to_ycc[5] = 0.5;
+        rgb_to_ycc[6] = 0.5;
+        rgb_to_ycc[7] = -0.5 * (1 - k_r - k_b) / (1 - k_r);
+        rgb_to_ycc[8] = -0.5 * k_b / (1 - k_r);
+        
+        for(i = 0; i < 3; i++)
+            s->rgb_to_ycc[i] = lrint(rgb_to_ycc[i] * mult_y);
+        for(i = 3; i < 9; i++)
+            s->rgb_to_ycc[i] = lrint(rgb_to_ycc[i] * mult_c);
+        break;
+    case BPG_CS_YCgCo:
+        s->c_0_25 = lrint(0.25 * mult_y);
+        s->c_0_5 = lrint(0.5 * mult_y);
+        break;
+    default:
+        break;
+    }
+
+    s->c_one = lrint(mult);
+    s->c_shift = c_shift;
+    s->c_rnd = (1 << (c_shift - 1));
+    if (limited_range) {
+        s->y_offset = s->c_rnd + (16 << (c_shift + out_bit_depth - 8));
+        s->y_one = lrint(mult_y);
+    } else {
+        s->y_offset = s->c_rnd;
+        s->y_one = s->c_one;
+    }
+
+    s->bit_depth = out_bit_depth;
+    s->c_center = 1 << (out_bit_depth - 1);
+    s->pixel_max = out_pixel_max;
+}
+
+/* 8 bit input */
+static void rgb24_to_ycc(ColorConvertState *s,
+                         PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                         const void *src1, int n, int incr)
+{
+    const uint8_t *src = src1;
+    int i, r, g, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, shift, rnd, center;
+    int pixel_max, y_offset;
+
+    c0 = s->rgb_to_ycc[0];
+    c1 = s->rgb_to_ycc[1];
+    c2 = s->rgb_to_ycc[2];
+    c3 = s->rgb_to_ycc[3];
+    c4 = s->rgb_to_ycc[4];
+    c5 = s->rgb_to_ycc[5];
+    c6 = s->rgb_to_ycc[6];
+    c7 = s->rgb_to_ycc[7];
+    c8 = s->rgb_to_ycc[8];
+    shift = s->c_shift;
+    rnd = s->c_rnd;
+    y_offset = s->y_offset;
+    center = s->c_center;
+    pixel_max = s->pixel_max;
+    for(i = 0; i < n; i++) {
+        r = src[0];
+        g = src[1];
+        b = src[2];
+        y_ptr[i] = clamp_pix((c0 * r + c1 * g + c2 * b +
+                              y_offset) >> shift, pixel_max);
+        cb_ptr[i] = clamp_pix(((c3 * r + c4 * g + c5 * b + 
+                                rnd) >> shift) + center, pixel_max);
+        cr_ptr[i] = clamp_pix(((c6 * r + c7 * g + c8 * b + 
+                                rnd) >> shift) + center, pixel_max);
+        src += incr;
+    }
+}
+
+static void rgb24_to_rgb(ColorConvertState *s,
+                         PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                         const void *src1, int n, int incr)
+{
+    const uint8_t *src = src1;
+    int i, r, g, b, c, shift, rnd;
+
+    c = s->y_one;
+    shift = s->c_shift;
+    rnd = s->y_offset;
+    for(i = 0; i < n; i++) {
+        r = src[0];
+        g = src[1];
+        b = src[2];
+        y_ptr[i] = (c * g + rnd) >> shift;
+        cb_ptr[i] = (c * b + rnd) >> shift;
+        cr_ptr[i] = (c * r + rnd) >> shift;
+        src += incr;
+    }
+}
+
+static void rgb24_to_ycgco(ColorConvertState *s,
+                           PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                           const void *src1, int n, int incr)
+{
+    const uint8_t *src = src1;
+    int i, r, g, b, t1, t2, pixel_max, c_0_5, c_0_25, rnd, shift, center;
+    int y_offset;
+
+    c_0_25 = s->c_0_25;
+    c_0_5 = s->c_0_5;
+    rnd = s->c_rnd;
+    shift = s->c_shift;
+    pixel_max = s->pixel_max;
+    center = s->c_center;
+    y_offset = s->y_offset;
+    for(i = 0; i < n; i++) {
+        r = src[0];
+        g = src[1];
+        b = src[2];
+        t1 = c_0_5 * g;
+        t2 = c_0_25 * (r + b);
+        y_ptr[i] = clamp_pix((t1 + t2 + y_offset) >> shift, pixel_max);
+        cb_ptr[i] = clamp_pix(((t1 - t2 + rnd) >> shift) + center, 
+                              pixel_max);
+        cr_ptr[i] = clamp_pix(((c_0_5 * (r - b) +
+                                rnd) >> shift) + center, pixel_max);
+        src += incr;
+    }
+}
+
+/* Note: used for alpha/W so no limited range */
+static void gray8_to_gray(ColorConvertState *s,
+                          PIXEL *y_ptr, const uint8_t *src, int n, int incr)
+{
+    int i, g, c, shift, rnd;
+
+    c = s->c_one;
+    shift = s->c_shift;
+    rnd = s->c_rnd;
+    for(i = 0; i < n; i++) {
+        g = src[0];
+        y_ptr[i] = (c * g + rnd) >> shift;
+        src += incr;
+    }
+}
+
+static void luma8_to_gray(ColorConvertState *s,
+                          PIXEL *y_ptr, const uint8_t *src, int n, int incr)
+{
+    int i, g, c, shift, rnd;
+
+    c = s->y_one;
+    shift = s->c_shift;
+    rnd = s->y_offset;
+    for(i = 0; i < n; i++) {
+        g = src[0];
+        y_ptr[i] = (c * g + rnd) >> shift;
+        src += incr;
+    }
+}
+
+/* 16 bit input */
+
+static void rgb48_to_ycc(ColorConvertState *s, 
+                         PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                         const void *src1, int n, int incr)
+{
+    const uint16_t *src = src1;
+    int i, r, g, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, shift, rnd, center;
+    int pixel_max, y_offset;
+
+    c0 = s->rgb_to_ycc[0];
+    c1 = s->rgb_to_ycc[1];
+    c2 = s->rgb_to_ycc[2];
+    c3 = s->rgb_to_ycc[3];
+    c4 = s->rgb_to_ycc[4];
+    c5 = s->rgb_to_ycc[5];
+    c6 = s->rgb_to_ycc[6];
+    c7 = s->rgb_to_ycc[7];
+    c8 = s->rgb_to_ycc[8];
+    shift = s->c_shift;
+    rnd = s->c_rnd;
+    y_offset = s->y_offset;
+    center = s->c_center;
+    pixel_max = s->pixel_max;
+    for(i = 0; i < n; i++) {
+        r = src[0];
+        g = src[1];
+        b = src[2];
+        y_ptr[i] = clamp_pix((c0 * r + c1 * g + c2 * b +
+                              y_offset) >> shift, pixel_max);
+        cb_ptr[i] = clamp_pix(((c3 * r + c4 * g + c5 * b + 
+                                rnd) >> shift) + center, pixel_max);
+        cr_ptr[i] = clamp_pix(((c6 * r + c7 * g + c8 * b + 
+                                rnd) >> shift) + center, pixel_max);
+        src += incr;
+    }
+}
+
+static void rgb48_to_ycgco(ColorConvertState *s, 
+                           PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                           const void *src1, int n, int incr)
+{
+    const uint16_t *src = src1;
+    int i, r, g, b, t1, t2, pixel_max, c_0_5, c_0_25, rnd, shift, center;
+    int y_offset;
+
+    c_0_25 = s->c_0_25;
+    c_0_5 = s->c_0_5;
+    rnd = s->c_rnd;
+    y_offset = s->y_offset;
+    shift = s->c_shift;
+    pixel_max = s->pixel_max;
+    center = s->c_center;
+    for(i = 0; i < n; i++) {
+        r = src[0];
+        g = src[1];
+        b = src[2];
+        t1 = c_0_5 * g;
+        t2 = c_0_25 * (r + b);
+        y_ptr[i] = clamp_pix((t1 + t2 + y_offset) >> shift, pixel_max);
+        cb_ptr[i] = clamp_pix(((t1 - t2 + rnd) >> shift) + center, 
+                              pixel_max);
+        cr_ptr[i] = clamp_pix(((c_0_5 * (r - b) +
+                                rnd) >> shift) + center, pixel_max);
+        src += incr;
+    }
+}
+
+/* Note: use for alpha/W so no limited range */
+static void gray16_to_gray(ColorConvertState *s, 
+                           PIXEL *y_ptr, const uint16_t *src, int n, int incr)
+{
+    int i, g, c, shift, rnd;
+
+    c = s->c_one;
+    shift = s->c_shift;
+    rnd = s->c_rnd;
+    for(i = 0; i < n; i++) {
+        g = src[0];
+        y_ptr[i] = (c * g + rnd) >> shift;
+        src += incr;
+    }
+}
+
+static void luma16_to_gray(ColorConvertState *s, 
+                           PIXEL *y_ptr, const uint16_t *src, int n, int incr)
+{
+    int i, g, c, shift, rnd;
+
+    c = s->y_one;
+    shift = s->c_shift;
+    rnd = s->y_offset;
+    for(i = 0; i < n; i++) {
+        g = src[0];
+        y_ptr[i] = (c * g + rnd) >> shift;
+        src += incr;
+    }
+}
+
+static void rgb48_to_rgb(ColorConvertState *s, 
+                         PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                         const void *src1, int n, int incr)
+{
+    const uint16_t *src = src1;
+
+    luma16_to_gray(s, y_ptr, src + 1, n, incr);
+    luma16_to_gray(s, cb_ptr, src + 2, n, incr);
+    luma16_to_gray(s, cr_ptr, src + 0, n, incr);
+}
+
+typedef void RGBConvertFunc(ColorConvertState *s, 
+                            PIXEL *y_ptr, PIXEL *cb_ptr, PIXEL *cr_ptr,
+                            const void *src, int n, int incr);
+
+static RGBConvertFunc *rgb_to_cs[2][BPG_CS_COUNT] = {
+    {
+        rgb24_to_ycc,
+        rgb24_to_rgb,
+        rgb24_to_ycgco,
+        rgb24_to_ycc,
+        rgb24_to_ycc,
+    },
+    {
+        rgb48_to_ycc,
+        rgb48_to_rgb,
+        rgb48_to_ycgco,
+        rgb48_to_ycc,
+        rgb48_to_ycc,
+    }
+};
+    
+/* val = 1.0 - val */
+static void gray_one_minus(ColorConvertState *s, PIXEL *y_ptr, int n)
+{
+    int pixel_max = s->pixel_max;
+    int i;
+
+    for(i = 0; i < n; i++) {
+        y_ptr[i] = pixel_max - y_ptr[i];
+    }
+}
+
+/* val = -val for chroma */
+static void gray_neg_c(ColorConvertState *s, PIXEL *y_ptr, int n)
+{
+    int pixel_max = s->pixel_max;
+    int i, v;
+
+    for(i = 0; i < n; i++) {
+        v = y_ptr[i];
+        if (v == 0)
+            v = pixel_max;
+        else
+            v = pixel_max + 1 - v;
+        y_ptr[i] = v;
+    }
+}
+
+
+/* decimation */
+
+/* phase = 0 */
+#define DP0TAPS2 7
+#define DP0TAPS (2 * DP0TAPS + 1)
+#define DP0C0 64
+#define DP0C1 40
+#define DP0C3 (-11)
+#define DP0C5 4
+#define DP0C7 (-1)
+
+/* phase = 0.5 */
+#define DP1TAPS2 5
+#define DP1TAPS (2 * DP1TAPS2)
+#define DP1C0 57
+#define DP1C1 17
+#define DP1C2 (-8)
+#define DP1C3 (-4)
+#define DP1C4 2
+
+#define DTAPS_MAX 7
+
+/* chroma aligned with luma samples */
+static void decimate2p0_simple(PIXEL *dst, PIXEL *src, int n, int bit_depth)
+{
+    int n2, i, pixel_max;
+    pixel_max = (1 << bit_depth) - 1;
+    n2 = (n + 1) / 2;
+    for(i = 0; i < n2; i++) {
+        dst[i] = clamp_pix(((src[-7] + src[7]) * DP0C7 + 
+                            (src[-5] + src[5]) * DP0C5 + 
+                            (src[-3] + src[3]) * DP0C3 + 
+                            (src[-1] + src[1]) * DP0C1 + 
+                            src[0] * DP0C0 + 64) >> 7, pixel_max);
+        src += 2;
+    }
+}
+
+/* same with more precision and no saturation */
+static void decimate2p0_simple16(int16_t *dst, PIXEL *src, int n, int bit_depth)
+{
+    int n2, i, shift, rnd;
+    shift = bit_depth - 7;
+    rnd = 1 << (shift - 1);
+    n2 = (n + 1) / 2;
+    for(i = 0; i < n2; i++) {
+        dst[i] = ((src[-7] + src[7]) * DP0C7 + 
+                  (src[-5] + src[5]) * DP0C5 + 
+                  (src[-3] + src[3]) * DP0C3 + 
+                  (src[-1] + src[1]) * DP0C1 + 
+                  src[0] * DP0C0 + rnd) >> shift;
+    src += 2;
+    }
+}
+
+
+/* chroma half way between luma samples */
+static void decimate2p1_simple(PIXEL *dst, PIXEL *src, int n, int bit_depth)
+{
+    int n2, i, pixel_max;
+    pixel_max = (1 << bit_depth) - 1;
+    n2 = (n + 1) / 2;
+    for(i = 0; i < n2; i++) {
+        dst[i] = clamp_pix(((src[-4] + src[5]) * DP1C4 + 
+                            (src[-3] + src[4]) * DP1C3 + 
+                            (src[-2] + src[3]) * DP1C2 + 
+                            (src[-1] + src[2]) * DP1C1 + 
+                            (src[0] + src[1]) * DP1C0 + 64) >> 7, pixel_max);
+        src += 2;
+    }
+}
+
+/* same with more precision and no saturation */
+static void decimate2p1_simple16(int16_t *dst, PIXEL *src, int n, int bit_depth)
+{
+    int n2, i, shift, rnd;
+    shift = bit_depth - 7;
+    rnd = 1 << (shift - 1);
+    n2 = (n + 1) / 2;
+    for(i = 0; i < n2; i++) {
+        dst[i] = ((src[-4] + src[5]) * DP1C4 + 
+                  (src[-3] + src[4]) * DP1C3 + 
+                  (src[-2] + src[3]) * DP1C2 + 
+                  (src[-1] + src[2]) * DP1C1 + 
+                  (src[0] + src[1]) * DP1C0 + rnd) >> shift;
+        src += 2;
+    }
+}
+
+static void decimate2_h(PIXEL *dst, PIXEL *src, int n, int bit_depth, int phase)
+{
+    PIXEL *src1, v;
+    int d, i;
+
+    if (phase == 0) 
+        d = DP0TAPS2;
+    else
+        d = DP1TAPS2;
+    /* add edge pixels */
+    src1 = malloc(sizeof(PIXEL) * (n + 2 * d));
+    v = src[0];
+    for(i = 0; i < d; i++)
+        src1[i] = v;
+    memcpy(src1 + d, src, n * sizeof(PIXEL));
+    v = src[n - 1];
+    for(i = 0; i < d; i++)
+        src1[d + n + i] = v;
+    if (phase == 0)
+        decimate2p0_simple(dst, src1 + d, n, bit_depth);
+    else
+        decimate2p1_simple(dst, src1 + d, n, bit_depth);
+    free(src1);
+}
+
+/* src1 is a temporary buffer of length n + 2 * DTAPS */
+static void decimate2_h16(int16_t *dst, PIXEL *src, int n, PIXEL *src1,
+                          int bit_depth, int phase)
+{
+    PIXEL v;
+    int d, i;
+
+    if (phase == 0) 
+        d = DP0TAPS2;
+    else
+        d = DP1TAPS2;
+    /* add edge pixels */
+    v = src[0];
+    for(i = 0; i < d; i++)
+        src1[i] = v;
+    memcpy(src1 + d, src, n * sizeof(PIXEL));
+    v = src[n - 1];
+    for(i = 0; i < d; i++)
+        src1[d + n + i] = v;
+    if (phase == 0)
+        decimate2p0_simple16(dst, src1 + d, n, bit_depth);
+    else
+        decimate2p1_simple16(dst, src1 + d, n, bit_depth);
+        
+}
+
+static void decimate2_v(PIXEL *dst, int16_t **src, int pos, int n,
+                        int bit_depth)
+{
+    int16_t *src0, *src1, *src2, *src3, *src4, *src5, *srcm1, *srcm2, *srcm3, *srcm4;
+    int i, shift, offset, pixel_max;
+
+    pos = sub_mod_int(pos, 4, DP1TAPS);
+    srcm4 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    srcm3 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    srcm2 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    srcm1 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src0 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src1 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src2 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src3 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src4 = src[pos];
+    pos = add_mod_int(pos, 1, DP1TAPS);
+    src5 = src[pos];
+    
+    shift = 21 - bit_depth;
+    offset = 1 << (shift - 1);
+    pixel_max = (1 << bit_depth) - 1;
+    for(i = 0; i < n; i++) {
+        dst[i] = clamp_pix(((srcm4[i] + src5[i]) * DP1C4 + 
+                            (srcm3[i] + src4[i]) * DP1C3 + 
+                            (srcm2[i] + src3[i]) * DP1C2 + 
+                            (srcm1[i] + src2[i]) * DP1C1 + 
+                            (src0[i] + src1[i]) * DP1C0 + offset) >> shift, pixel_max);
+    }
+}
+
+/* Note: we do the horizontal decimation first to use less CPU cache */
+static void decimate2_hv(uint8_t *dst, int dst_linesize,
+                         uint8_t *src, int src_linesize, 
+                         int w, int h, int bit_depth, int h_phase)
+{
+    PIXEL *buf1;
+    int16_t *buf2[DP1TAPS];
+    int w2, pos, i, y, y1, y2;
+    
+    w2 = (w + 1) / 2;
+
+    buf1 = malloc(sizeof(PIXEL) * (w + 2 * DTAPS_MAX));
+    /* init line buffer */
+    for(i = 0; i < DP1TAPS; i++) {
+        buf2[i] = malloc(sizeof(int16_t) * w2);
+        y = i;
+        if (y > DP1TAPS2)
+            y -= DP1TAPS;
+        if (y < 0) {
+            /* copy from first line */
+            memcpy(buf2[i], buf2[0], sizeof(int16_t) * w2);
+        } else if (y >= h) {
+            /* copy from last line (only happens for small height) */
+            memcpy(buf2[i], buf2[h - 1], sizeof(int16_t) * w2);
+        } else {
+            decimate2_h16(buf2[i], (PIXEL *)(src + src_linesize * y), w,
+                          buf1, bit_depth, h_phase);
+        }
+    }
+
+    for(y = 0; y < h; y++) {
+        pos = y % DP1TAPS;
+        if ((y & 1) == 0) {
+            /* filter one line */
+            y2 = y >> 1;
+            decimate2_v((PIXEL *)(dst + y2 * dst_linesize), buf2,
+                        pos, w2, bit_depth);
+        }
+        /* add a new line in the buffer */
+        y1 = y + DP1TAPS2 + 1;
+        pos = add_mod_int(pos, DP1TAPS2 + 1, DP1TAPS);
+        if (y1 >= h) {
+            /* copy last line */
+            memcpy(buf2[pos], buf2[sub_mod_int(pos, 1, DP1TAPS)],
+                   sizeof(int16_t) * w2);
+        } else {
+            /* horizontally decimate new line */
+            decimate2_h16(buf2[pos], (PIXEL *)(src + src_linesize * y1), w,
+                          buf1, bit_depth, h_phase);
+        }
+    }
+
+    for(i = 0; i < DP1TAPS; i++)
+        free(buf2[i]);
+    free(buf1);
+}
+
+static void get_plane_res(Image *img, int *pw, int *ph, int i)
+{
+    if (img->format == BPG_FORMAT_420 && (i == 1 || i == 2)) {
+        *pw = (img->w + 1) / 2;
+        *ph = (img->h + 1) / 2;
+    } else if (img->format == BPG_FORMAT_422 && (i == 1 || i == 2)) {
+        *pw = (img->w + 1) / 2;
+        *ph = img->h;
+    } else {
+        *pw = img->w;
+        *ph = img->h;
+    }
+}
+
+#define W_PAD 16
+
+Image *image_alloc(int w, int h, BPGImageFormatEnum format, int has_alpha,
+                   BPGColorSpaceEnum color_space, int bit_depth)
+{
+    Image *img;
+    int i, linesize, w1, h1, c_count;
+
+    img = malloc(sizeof(Image));
+    memset(img, 0, sizeof(*img));
+    
+    img->w = w;
+    img->h = h;
+    img->format = format;
+    img->has_alpha = has_alpha;
+    img->bit_depth = bit_depth;
+    img->color_space = color_space;
+    img->pixel_shift = 1;
+    img->c_h_phase = 1;
+
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    if (has_alpha)
+        c_count++;
+    for(i = 0; i < c_count; i++) {
+        get_plane_res(img, &w1, &h1, i);
+        /* multiple of 16 pixels to add borders */
+        w1 = (w1 + (W_PAD - 1)) & ~(W_PAD - 1);
+        h1 = (h1 + (W_PAD - 1)) & ~(W_PAD - 1);
+        
+        linesize = w1 << img->pixel_shift;
+        img->data[i] = malloc(linesize * h1);
+        img->linesize[i] = linesize;
+    }
+    return img;
+}
+
+void image_free(Image *img)
+{
+    int i, c_count;
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    if (img->has_alpha)
+        c_count++;
+    for(i = 0; i < c_count; i++)
+        free(img->data[i]);
+    free(img);
+}
+
+int image_ycc444_to_ycc422(Image *img, int h_phase)
+{
+    uint8_t *data1;
+    int w1, h1, bpp, linesize1, i, y;
+
+    if (img->format != BPG_FORMAT_444 || img->pixel_shift != 1)
+        return -1;
+    bpp = 2;
+    w1 = (img->w + 1) / 2;
+    w1 = (w1 + (W_PAD - 1)) & ~(W_PAD - 1);
+    h1 = (img->h + (W_PAD - 1)) & ~(W_PAD - 1);
+    linesize1 = bpp * w1;
+    for(i = 1; i <= 2; i++) {
+        data1 = malloc(linesize1 * h1);
+        for(y = 0; y < img->h; y++) {
+            decimate2_h((PIXEL *)(data1 + y * linesize1),
+                        (PIXEL *)(img->data[i] + y * img->linesize[i]),
+                        img->w, img->bit_depth, h_phase);
+        }
+        free(img->data[i]);
+        img->data[i] = data1;
+        img->linesize[i] = linesize1;
+    }
+    img->format = BPG_FORMAT_422;
+    img->c_h_phase = h_phase;
+    return 0;
+}
+
+int image_ycc444_to_ycc420(Image *img, int h_phase)
+{
+    uint8_t *data1;
+    int w1, h1, bpp, linesize1, i;
+
+    if (img->format != BPG_FORMAT_444 || img->pixel_shift != 1)
+        return -1;
+    bpp = 2;
+    w1 = (img->w + 1) / 2;
+    h1 = (img->h + 1) / 2;
+    w1 = (w1 + (W_PAD - 1)) & ~(W_PAD - 1);
+    h1 = (h1 + (W_PAD - 1)) & ~(W_PAD - 1);
+    linesize1 = bpp * w1;
+    for(i = 1; i <= 2; i++) {
+        data1 = malloc(linesize1 * h1);
+        decimate2_hv(data1, linesize1,
+                     img->data[i], img->linesize[i],
+                     img->w, img->h, img->bit_depth, h_phase);
+        free(img->data[i]);
+        img->data[i] = data1;
+        img->linesize[i] = linesize1;
+    }
+    img->format = BPG_FORMAT_420;
+    img->c_h_phase = h_phase;
+    return 0;
+}
+
+/* duplicate right and bottom samples so that the image has a width
+   and height multiple of cb_size (power of two) */
+void image_pad(Image *img, int cb_size)
+{
+    int w1, h1, x, y, c_count, c_w, c_h, c_w1, c_h1, h_shift, v_shift, c_idx;
+    PIXEL *ptr, v, *ptr1;
+
+    assert(img->pixel_shift == 1);
+    if (cb_size <= 1)
+        return;
+    w1 = (img->w + cb_size - 1) & ~(cb_size - 1);
+    h1 = (img->h + cb_size - 1) & ~(cb_size - 1);
+    
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    if (img->has_alpha)
+        c_count++;
+    for(c_idx = 0; c_idx < c_count; c_idx++) {
+        if (img->format == BPG_FORMAT_420 && 
+            (c_idx == 1 || c_idx == 2)) {
+            h_shift = 1;
+            v_shift = 1;
+        } else if (img->format == BPG_FORMAT_422 && 
+                   (c_idx == 1 || c_idx == 2)) {
+            h_shift = 1;
+            v_shift = 0;
+        } else {
+            h_shift = 0;
+            v_shift = 0;
+        }
+
+        c_w = (img->w + h_shift) >> h_shift;
+        c_h = (img->h + v_shift) >> v_shift;
+        c_w1 = w1 >> h_shift;
+        c_h1 = h1 >> v_shift;
+
+        /* pad horizontally */
+        for(y = 0; y < c_h; y++) {
+            ptr = (PIXEL *)(img->data[c_idx] + img->linesize[c_idx] * y);
+            v = ptr[c_w - 1];
+            for(x = c_w; x < c_w1; x++) {
+                ptr[x] = v;
+            }
+        }
+
+        /* pad vertically */
+        ptr1 = (PIXEL *)(img->data[c_idx] + img->linesize[c_idx] * (c_h - 1));
+        for(y = c_h; y < c_h1; y++) {
+            ptr = (PIXEL *)(img->data[c_idx] + img->linesize[c_idx] * y);
+            memcpy(ptr, ptr1, c_w1 * sizeof(PIXEL));
+        }
+    }
+    img->w = w1;
+    img->h = h1;
+}
+
+/* convert the 16 bit components to 8 bits */
+void image_convert16to8(Image *img)
+{
+    int w, h, stride, y, x, c_count, i;
+    uint8_t *plane;
+
+    if (img->bit_depth > 8 || img->pixel_shift != 1)
+        return;
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    if (img->has_alpha)
+        c_count++;
+    for(i = 0; i < c_count; i++) {
+        get_plane_res(img, &w, &h, i);
+        stride = w;
+        plane = malloc(stride * h);
+        for(y = 0; y < h; y++) {
+            const uint16_t *src;
+            uint8_t *dst;
+            dst = plane + stride * y;
+            src = (uint16_t *)(img->data[i] + img->linesize[i] * y);
+            for(x = 0; x < w; x++)
+                dst[x] = src[x];
+        }
+        free(img->data[i]);
+        img->data[i] = plane;
+        img->linesize[i] = stride;
+    }
+    img->pixel_shift = 0;
+}
+
+typedef struct BPGMetaData {
+    uint32_t tag;
+    uint8_t *buf;
+    int buf_len;
+    struct BPGMetaData *next;
+} BPGMetaData;
+
+BPGMetaData *bpg_md_alloc(uint32_t tag)
+{
+    BPGMetaData *md;
+    md = malloc(sizeof(BPGMetaData));
+    memset(md, 0, sizeof(*md));
+    md->tag = tag;
+    return md;
+}
+
+void bpg_md_free(BPGMetaData *md)
+{
+    BPGMetaData *md_next;
+
+    while (md != NULL) {
+        md_next = md->next;
+        free(md->buf);
+        free(md);
+        md = md_next;
+    }
+}
+
+Image *read_png(BPGMetaData **pmd,
+                FILE *f, BPGColorSpaceEnum color_space, int out_bit_depth,
+                int limited_range, int premultiplied_alpha)
+{
+    png_structp png_ptr;
+    png_infop info_ptr;
+    int bit_depth, color_type;
+    Image *img;
+    uint8_t **rows;
+    int y, has_alpha, linesize, bpp;
+    BPGImageFormatEnum format;
+    ColorConvertState cvt_s, *cvt = &cvt_s;
+    BPGMetaData *md, **plast_md, *first_md;
+    
+    png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING,
+                                     NULL, NULL, NULL);
+    if (png_ptr == NULL) {
+        return NULL;
+    }
+    
+    info_ptr = png_create_info_struct(png_ptr);
+    if (info_ptr == NULL) {
+       png_destroy_read_struct(&png_ptr, NULL, NULL);
+       return NULL;
+    }
+    
+    if (setjmp(png_jmpbuf(png_ptr))) {
+        png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
+        return NULL;
+    }
+    
+    png_init_io(png_ptr, f);
+    
+    png_read_info(png_ptr, info_ptr);
+    
+    bit_depth   = png_get_bit_depth(png_ptr, info_ptr);
+    color_type = png_get_color_type(png_ptr, info_ptr);
+
+    switch (color_type) {
+    case PNG_COLOR_TYPE_PALETTE:
+        png_set_palette_to_rgb(png_ptr);
+        bit_depth = 8;
+        break;
+    case PNG_COLOR_TYPE_GRAY:
+    case PNG_COLOR_TYPE_GRAY_ALPHA:
+        if (bit_depth < 8) {
+            png_set_expand_gray_1_2_4_to_8(png_ptr);
+            bit_depth = 8;
+        }
+        break;
+    }
+    assert(bit_depth == 8 || bit_depth == 16);
+
+#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+    if (bit_depth == 16) {
+        png_set_swap(png_ptr);
+    }
+#endif
+
+    if (color_type == PNG_COLOR_TYPE_GRAY ||
+        color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
+        format = BPG_FORMAT_GRAY;
+        color_space = BPG_CS_YCbCr;
+    } else {
+        format = BPG_FORMAT_444;
+    }
+    
+    has_alpha = (color_type == PNG_COLOR_TYPE_GRAY_ALPHA ||
+                 color_type == PNG_COLOR_TYPE_RGB_ALPHA);
+
+    if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) {
+        png_set_tRNS_to_alpha(png_ptr);
+        has_alpha = 1;
+    }
+
+    if (premultiplied_alpha) {
+        png_set_alpha_mode(png_ptr, PNG_ALPHA_ASSOCIATED, PNG_GAMMA_LINEAR);
+    }
+
+    img = image_alloc(png_get_image_width(png_ptr, info_ptr),
+                      png_get_image_height(png_ptr, info_ptr),
+                      format, has_alpha, color_space,
+                      out_bit_depth);
+    img->limited_range = limited_range;
+    img->premultiplied_alpha = premultiplied_alpha;
+
+    rows = malloc(sizeof(rows[0]) * img->h);
+    if (format == BPG_FORMAT_GRAY)
+        bpp = (1 + has_alpha) * (bit_depth / 8);
+    else
+        bpp = (3 + has_alpha) * (bit_depth / 8);
+    linesize = bpp * img->w;
+    for (y = 0; y < img->h; y++) {
+        rows[y] = malloc(linesize);
+    }
+    
+    png_read_image(png_ptr, rows);
+    
+    convert_init(cvt, bit_depth, out_bit_depth, color_space, limited_range);
+
+    if (format != BPG_FORMAT_GRAY) {
+        int idx;
+        RGBConvertFunc *convert_func;
+
+        idx = (bit_depth == 16);
+        convert_func = rgb_to_cs[idx][color_space];
+        
+        for (y = 0; y < img->h; y++) {
+            convert_func(cvt, (PIXEL *)(img->data[0] + y * img->linesize[0]),
+                         (PIXEL *)(img->data[1] + y * img->linesize[1]),
+                         (PIXEL *)(img->data[2] + y * img->linesize[2]),
+                         rows[y], img->w, 3 + has_alpha);
+            if (has_alpha) {
+                if (idx) {
+                    gray16_to_gray(cvt, (PIXEL *)(img->data[3] + y * img->linesize[3]),
+                                   (uint16_t *)rows[y] + 3, img->w, 4);
+                } else {
+                    gray8_to_gray(cvt, (PIXEL *)(img->data[3] + y * img->linesize[3]),
+                                  rows[y] + 3, img->w, 4);
+                }
+            }
+        }
+    } else {
+        if (bit_depth == 16) {
+            for (y = 0; y < img->h; y++) {
+                luma16_to_gray(cvt, (PIXEL *)(img->data[0] + y * img->linesize[0]),
+                               (uint16_t *)rows[y], img->w, 1 + has_alpha);
+                if (has_alpha) {
+                    gray16_to_gray(cvt, (PIXEL *)(img->data[1] + y * img->linesize[1]),
+                                   (uint16_t *)rows[y] + 1, img->w, 2);
+                }
+            }
+        } else {
+            for (y = 0; y < img->h; y++) {
+                luma8_to_gray(cvt, (PIXEL *)(img->data[0] + y * img->linesize[0]),
+                              rows[y], img->w, 1 + has_alpha);
+                if (has_alpha) {
+                    gray8_to_gray(cvt, (PIXEL *)(img->data[1] + y * img->linesize[1]),
+                                  rows[y] + 1, img->w, 2);
+                }
+            }
+        }
+    }
+
+    for (y = 0; y < img->h; y++) {
+        free(rows[y]);
+    }
+    free(rows);
+        
+    png_read_end(png_ptr, info_ptr);
+    
+    /* get the ICC profile if present */
+    first_md = NULL;
+    plast_md = &first_md;
+    {
+        png_charp name;
+        int comp_type;
+        png_bytep iccp_buf;
+        png_uint_32 iccp_buf_len;
+        
+        if (png_get_iCCP(png_ptr, info_ptr,
+                         &name, &comp_type, &iccp_buf, &iccp_buf_len) == 
+            PNG_INFO_iCCP) {
+            md = bpg_md_alloc(BPG_EXTENSION_TAG_ICCP);
+            md->buf_len = iccp_buf_len;
+            md->buf = malloc(iccp_buf_len);
+            memcpy(md->buf, iccp_buf, iccp_buf_len);
+            *plast_md = md;
+            plast_md = &md->next;
+        }
+    }
+
+    png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
+    
+    *pmd = first_md;
+    return img;
+}
+
+static BPGMetaData *jpeg_get_metadata(jpeg_saved_marker_ptr first_marker)
+{
+    static const char app1_exif[] = "Exif";
+    static const char app1_xmp[] = "http://ns.adobe.com/xap/1.0/";
+    static const char app2_iccp[] = "ICC_PROFILE";
+    jpeg_saved_marker_ptr marker;
+    BPGMetaData *md, **plast_md, *first_md;
+    int has_exif, has_xmp, l, iccp_chunk_count, i;
+    jpeg_saved_marker_ptr iccp_chunks[256];
+    
+    iccp_chunk_count = 0;
+    has_exif = 0;
+    has_xmp = 0;
+    first_md = NULL;
+    plast_md = &first_md;
+    for (marker = first_marker; marker != NULL; marker = marker->next) {
+#if 0
+        printf("marker=APP%d len=%d\n", 
+               marker->marker - JPEG_APP0, marker->data_length);
+#endif
+        if (!has_exif && marker->marker == JPEG_APP0 + 1 &&
+            marker->data_length > sizeof(app1_exif) &&
+            !memcmp(marker->data, app1_exif, sizeof(app1_exif))) {
+            md = bpg_md_alloc(BPG_EXTENSION_TAG_EXIF);
+            l = sizeof(app1_exif);
+            md->buf_len = marker->data_length - l;
+            md->buf = malloc(md->buf_len);
+            memcpy(md->buf, marker->data + l, md->buf_len);
+            *plast_md = md;
+            plast_md = &md->next;
+            has_exif = 1;
+        } else if (!has_xmp && marker->marker == JPEG_APP0 + 1 &&
+                   marker->data_length > sizeof(app1_xmp) &&
+                   !memcmp(marker->data, app1_xmp, sizeof(app1_xmp)) && 
+                   !has_xmp) {
+            md = bpg_md_alloc(BPG_EXTENSION_TAG_XMP);
+            l = sizeof(app1_xmp);
+            md->buf_len = marker->data_length - l;
+            md->buf = malloc(md->buf_len);
+            memcpy(md->buf, marker->data + l, md->buf_len);
+            *plast_md = md;
+            plast_md = &md->next;
+            has_xmp = 1;
+        } else if (marker->marker == JPEG_APP0 + 2 &&
+                   marker->data_length > (sizeof(app2_iccp) + 2) &&
+                   !memcmp(marker->data, app2_iccp, sizeof(app2_iccp))) {
+            int chunk_count, chunk_index;
+            l = sizeof(app2_iccp);
+            chunk_index = marker->data[l];
+            chunk_count = marker->data[l];
+            if (chunk_index == 0 || chunk_count == 0) 
+                continue;
+            if (iccp_chunk_count == 0) {
+                iccp_chunk_count = chunk_count;
+                for(i = 0; i < chunk_count; i++) {
+                    iccp_chunks[i] = NULL;
+                }
+            } else {
+                if (chunk_count != iccp_chunk_count)
+                    continue;
+            }
+            if (chunk_index > iccp_chunk_count)
+                continue;
+            iccp_chunks[chunk_index - 1] = marker;
+        }
+    }
+
+    if (iccp_chunk_count != 0) {
+        int len, hlen, idx;
+        /* check that no chunk are missing */
+        len = 0;
+        hlen = sizeof(app2_iccp) + 2;
+        for(i = 0; i < iccp_chunk_count; i++) {
+            if (!iccp_chunks[i])
+                break;
+            len += iccp_chunks[i]->data_length - hlen;
+        }
+        if (i == iccp_chunk_count) {
+            md = bpg_md_alloc(BPG_EXTENSION_TAG_ICCP);
+            md->buf_len = len;
+            md->buf = malloc(md->buf_len);
+            idx = 0;
+            for(i = 0; i < iccp_chunk_count; i++) {
+                l = iccp_chunks[i]->data_length - hlen;
+                memcpy(md->buf + idx, iccp_chunks[i]->data + hlen, l);
+                idx += l;
+            }
+            assert(idx == len);
+            *plast_md = md;
+            plast_md = &md->next;
+        }
+    }
+    return first_md;
+}
+
+Image *read_jpeg(BPGMetaData **pmd, FILE *f, 
+                 int out_bit_depth)
+{
+    struct jpeg_decompress_struct cinfo;
+    struct jpeg_error_mgr jerr;
+    int w, h, w1, i, y_h, c_h, y, v_shift, c_w, y1, idx, c_idx, h_shift;
+    int h1, plane_idx[4], has_alpha, has_w_plane;
+    Image *img;
+    BPGImageFormatEnum format;
+    BPGColorSpaceEnum color_space;
+    ColorConvertState cvt_s, *cvt = &cvt_s;
+    BPGMetaData *first_md = NULL;
+    uint32_t comp_hv;
+
+    cinfo.err = jpeg_std_error(&jerr);
+    jpeg_create_decompress(&cinfo);
+
+    jpeg_save_markers(&cinfo, JPEG_APP0 + 1, 65535);
+    jpeg_save_markers(&cinfo, JPEG_APP0 + 2, 65535);
+
+    jpeg_stdio_src(&cinfo, f);
+    
+    jpeg_read_header(&cinfo, TRUE);
+    
+    cinfo.raw_data_out = TRUE;
+    cinfo.do_fancy_upsampling = TRUE;
+    
+    w = cinfo.image_width;
+    h = cinfo.image_height;
+
+    has_w_plane = 0;
+    comp_hv = 0;
+    if (cinfo.num_components < 1 || cinfo.num_components > 4)
+        goto unsupported;
+    for(i = 0; i < cinfo.num_components; i++) {
+        comp_hv |= cinfo.comp_info[i].h_samp_factor << (i * 8 + 4);
+        comp_hv |= cinfo.comp_info[i].v_samp_factor << (i * 8);
+    }
+    switch(cinfo.jpeg_color_space) {
+    case JCS_GRAYSCALE:
+        if (cinfo.num_components != 1 || comp_hv != 0x11)
+            goto unsupported;
+        format = BPG_FORMAT_GRAY;
+        color_space = BPG_CS_YCbCr;
+        break;
+    case JCS_YCbCr:
+        if (cinfo.num_components != 3)
+            goto unsupported;
+        switch(comp_hv) {
+        case 0x111111:
+            format = BPG_FORMAT_444;
+            break;
+        case 0x111121:
+            format = BPG_FORMAT_422;
+            break;
+        case 0x111122:
+            format = BPG_FORMAT_420;
+            break;
+        default:
+            cinfo.raw_data_out = FALSE;
+            format = BPG_FORMAT_444;
+            cinfo.out_color_space = JCS_YCbCr;
+            break;
+        }
+        color_space = BPG_CS_YCbCr;
+        break;
+    case JCS_RGB:
+        if (cinfo.num_components != 3)
+            goto unsupported;
+        format = BPG_FORMAT_444;
+        color_space = BPG_CS_RGB;
+        cinfo.raw_data_out = FALSE;
+        cinfo.out_color_space = JCS_RGB;
+        break;
+    case JCS_YCCK:
+        if (cinfo.num_components != 4)
+            goto unsupported;
+        switch(comp_hv) {
+        case 0x11111111:
+            format = BPG_FORMAT_444;
+            color_space = BPG_CS_YCbCr;
+            break;
+        case 0x22111121:
+            format = BPG_FORMAT_422;
+            color_space = BPG_CS_YCbCr;
+            break;
+        case 0x22111122:
+            format = BPG_FORMAT_420;
+            color_space = BPG_CS_YCbCr;
+            break;
+        default:
+            cinfo.raw_data_out = FALSE;
+            format = BPG_FORMAT_444;
+            cinfo.out_color_space = JCS_CMYK;
+            color_space = BPG_CS_RGB;
+            break;
+        }
+        has_w_plane = 1;
+        break;
+    case JCS_CMYK:
+        if (cinfo.num_components != 4)
+            goto unsupported;
+        format = BPG_FORMAT_444;
+        color_space = BPG_CS_RGB;
+        has_w_plane = 1;
+        cinfo.raw_data_out = FALSE;
+        cinfo.out_color_space = JCS_CMYK;
+        break;
+    default:
+    unsupported:
+        fprintf(stderr, "Unsupported JPEG parameters (cs=%d n_comp=%d comp_hv=%x)\n",
+                cinfo.jpeg_color_space, cinfo.num_components, comp_hv);
+        img = NULL;
+        goto the_end;
+    }
+
+    v_shift = (format == BPG_FORMAT_420);
+    h_shift = (format == BPG_FORMAT_422 || format == BPG_FORMAT_420);
+    has_alpha = (cinfo.num_components == 4);
+    img = image_alloc(w, h, format, has_alpha, color_space, out_bit_depth);
+    img->has_w_plane = has_w_plane;
+
+    convert_init(cvt, 8, out_bit_depth, color_space, 0);
+
+    jpeg_start_decompress(&cinfo);
+
+    if (color_space == BPG_CS_RGB) {
+        plane_idx[0] = 2;
+        plane_idx[1] = 0;
+        plane_idx[2] = 1;
+    } else {
+        plane_idx[0] = 0;
+        plane_idx[1] = 1;
+        plane_idx[2] = 2;
+    }
+    plane_idx[3] = 3;
+        
+    if (cinfo.raw_data_out) {
+        JSAMPROW rows[4][16];
+        JSAMPROW *plane_pointer[4];
+
+        y_h = 8 * cinfo.max_v_samp_factor;
+        if (cinfo.num_components == 1) {
+            c_h = 0;
+            c_w = 0;
+        } else {
+            c_h = 8;
+            if (h_shift)
+                c_w = (w + 1) / 2;
+            else
+                c_w = w;
+        }
+        w1 = (w + 15) & ~15;
+        for(c_idx = 0; c_idx < cinfo.num_components; c_idx++) {
+            if (c_idx == 1 || c_idx == 2) {
+                h1 = c_h;
+            } else {
+                h1 = y_h;
+            }
+            for(i = 0; i < h1; i++) {
+                rows[c_idx][i] = malloc(w1);
+            }
+            plane_pointer[c_idx] = rows[c_idx];
+        }
+        
+        while (cinfo.output_scanline < cinfo.output_height) {
+            y = cinfo.output_scanline;
+            jpeg_read_raw_data(&cinfo, plane_pointer, y_h);
+            
+            for(c_idx = 0; c_idx < cinfo.num_components; c_idx++) {
+                if (c_idx == 1 || c_idx == 2) {
+                    h1 = c_h;
+                    w1 = c_w;
+                    y1 = (y >> v_shift);
+                } else {
+                    h1 = y_h;
+                    w1 = img->w;
+                    y1 = y;
+                }
+                idx = plane_idx[c_idx];
+                for(i = 0; i < h1; i++) {
+                    PIXEL *ptr;
+                    ptr = (PIXEL *)(img->data[idx] + 
+                                    img->linesize[idx] * (y1 + i));
+                    gray8_to_gray(cvt, ptr, rows[c_idx][i], w1, 1);
+                    if (color_space == BPG_CS_YCbCr && has_w_plane) {
+                        /* negate color */
+                        if (c_idx == 0) {
+                            gray_one_minus(cvt, ptr, w1);
+                        } else if (c_idx <= 2) {
+                            gray_neg_c(cvt, ptr, w1);
+                        }
+                    }
+                }
+            }
+        }
+    
+        for(c_idx = 0; c_idx < cinfo.num_components; c_idx++) {
+            if (c_idx == 1 || c_idx == 2) {
+                h1 = c_h;
+            } else {
+                h1 = y_h;
+            }
+            for(i = 0; i < h1; i++) {
+                free(rows[c_idx][i]);
+            }
+        }
+    } else {
+        JSAMPROW rows[1];
+        uint8_t *buf;
+        int c_count;
+
+        c_count = 3 + has_w_plane;
+        buf = malloc(c_count * w);
+        rows[0] = buf;
+        while (cinfo.output_scanline < cinfo.output_height) {
+            y = cinfo.output_scanline;
+            jpeg_read_scanlines(&cinfo, rows, 1);
+
+            for(c_idx = 0; c_idx < c_count; c_idx++) {
+                idx = plane_idx[c_idx];
+                gray8_to_gray(cvt, (PIXEL *)(img->data[idx] + 
+                                             img->linesize[idx] * y),
+                              buf + c_idx, w, c_count);
+            }
+        }
+        free(buf);
+    }
+    
+    first_md = jpeg_get_metadata(cinfo.marker_list);
+
+ the_end:
+    jpeg_finish_decompress(&cinfo);
+    
+    jpeg_destroy_decompress(&cinfo);
+    *pmd = first_md;
+    return img;
+}
+
+Image *load_image(BPGMetaData **pmd, const char *infilename,
+                  BPGColorSpaceEnum color_space, int bit_depth,
+                  int limited_range, int premultiplied_alpha)
+{
+    FILE *f;
+    int is_png;
+    Image *img;
+    BPGMetaData *md;
+
+    *pmd = NULL;
+
+    f = fopen(infilename, "rb");
+    if (!f)
+        return NULL;
+    {
+        uint8_t buf[8];
+        if (fread(buf, 1, 8, f) == 8 && 
+            png_sig_cmp(buf, 0, 8) == 0)
+            is_png = 1;
+        else
+            is_png = 0;
+        fseek(f, 0, SEEK_SET);
+    }
+    
+    if (is_png) {
+        img = read_png(&md, f, color_space, bit_depth, limited_range,
+                       premultiplied_alpha);
+    } else {
+        img = read_jpeg(&md, f, bit_depth);
+    }
+    fclose(f);
+    *pmd = md;
+    return img;
+}
+
+void save_yuv1(Image *img, FILE *f)
+{
+    int c_w, c_h, i, c_count, y;
+
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    for(i = 0; i < c_count; i++) {
+        get_plane_res(img, &c_w, &c_h, i);
+        for(y = 0; y < c_h; y++) {
+            fwrite(img->data[i] + y * img->linesize[i], 
+                   1, c_w << img->pixel_shift, f);
+        }
+    }
+}
+
+void save_yuv(Image *img, const char *filename)
+{
+    FILE *f;
+
+    f = fopen(filename, "wb");
+    if (!f) {
+        fprintf(stderr, "Could not open %s\n", filename);
+        exit(1);
+    }
+    save_yuv1(img, f);
+    fclose(f);
+}
+
+/* return the position of the end of the NAL or -1 if error */
+static int find_nal_end(const uint8_t *buf, int buf_len)
+{
+    int idx;
+
+    idx = 0;
+    if (buf_len >= 4 &&
+        buf[0] == 0 && buf[1] == 0 && buf[2] == 0 && buf[3] == 1) {
+        idx = 4;
+    } else if (buf_len >= 3 &&
+               buf[0] == 0 && buf[1] == 0 && buf[2] == 1) {
+        idx = 3;
+    } else {
+        return -1;
+    }
+    /* NAL header */
+    if (idx + 2 > buf_len)
+        return -1;
+    /* find the last byte */
+    for(;;) {
+        if (idx + 2 >= buf_len) {
+            idx = buf_len;
+            break;
+        }
+        if (buf[idx] == 0 && buf[idx + 1] == 0 && buf[idx + 2] == 1)
+            break;
+        if (idx + 3 < buf_len &&
+            buf[idx] == 0 && buf[idx + 1] == 0 && buf[idx + 2] == 0 && buf[idx + 3] == 1)
+            break;
+        idx++;
+    }
+    return idx;
+}
+
+/* return the position of the end of the NAL or -1 if error */
+static int extract_nal(uint8_t **pnal_buf, int *pnal_len, 
+                       const uint8_t *buf, int buf_len)
+{
+    int idx, start, end, len;
+    uint8_t *nal_buf;
+    int nal_len;
+
+    end = find_nal_end(buf, buf_len);
+    if (end < 0)
+        return -1;
+    if (buf[2] == 1)
+        start = 3;
+    else
+        start = 4;
+    len = end - start;
+    
+    nal_buf = malloc(len);
+    nal_len = 0;
+    idx = start;
+    while (idx < end) {
+        if (idx + 2 < end && buf[idx] == 0 && buf[idx + 1] == 0 && buf[idx + 2] == 3) {
+            nal_buf[nal_len++] = 0;
+            nal_buf[nal_len++] = 0;
+            idx += 3;
+        } else {
+            nal_buf[nal_len++] = buf[idx++];
+        }
+    }
+    while (idx < end) {
+        nal_buf[nal_len++] = buf[idx++];
+    }
+    *pnal_buf = nal_buf;
+    *pnal_len = nal_len;
+    return idx;
+}
+
+/* big endian variable length 7 bit encoding */
+static void put_ue(uint8_t **pp, uint32_t v)
+{
+    uint8_t *p = *pp;
+    int i, j;
+    
+    for(i = 1; i < 5; i++) {
+        if (v < (1 << (7 * i)))
+            break;
+    }
+    for(j = i - 1; j >= 1; j--)
+        *p++ = ((v >> (7 * j)) & 0x7f) | 0x80;
+    *p++ = v & 0x7f;
+    *pp = p;
+}
+
+typedef struct {
+    const uint8_t *buf;
+    int idx;
+    int buf_len;
+} GetBitState;
+
+static void init_get_bits(GetBitState *s, const uint8_t *buf, int buf_len)
+{
+    s->buf = buf;
+    s->buf_len = buf_len;
+    s->idx = 0;
+}
+
+static void skip_bits(GetBitState *s, int n)
+{
+    s->idx += n;
+}
+
+/* 1 <= n <= 25. return '0' bits if past the end of the buffer. */
+static uint32_t get_bits(GetBitState *s, int n)
+{
+    const uint8_t *buf = s->buf;
+    int p, i;
+    uint32_t v;
+
+    p = s->idx >> 3;
+    if ((p + 3) < s->buf_len) {
+        v = (buf[p] << 24) | (buf[p + 1] << 16) | 
+            (buf[p + 2] << 8) | buf[p + 3];
+    } else {
+        v = 0;
+        for(i = 0; i < 3; i++) {
+            if ((p + i) < s->buf_len)
+                v |= buf[p + i] << (24 - i * 8);
+        }
+    }
+    v = (v >> (32 - (s->idx & 7) - n)) & ((1 << n) - 1);
+    s->idx += n;
+    return v;
+}
+
+/* 1 <= n <= 32 */
+static uint32_t get_bits_long(GetBitState *s, int n)
+{
+    uint32_t v;
+
+    if (n <= 25) {
+        v = get_bits(s, n);
+    } else {
+        n -= 16;
+        v = get_bits(s, 16) << n;
+        v |= get_bits(s, n);
+    }
+    return v;
+}
+
+/* at most 32 bits are supported */
+static uint32_t get_ue_golomb(GetBitState *s)
+{
+    int i;
+    i = 0;
+    for(;;) {
+        if (get_bits(s, 1))
+            break;
+        i++;
+        if (i == 32)
+            return 0xffffffff;
+    }
+    if (i == 0)
+        return 0;
+    else
+        return ((1 << i) | get_bits_long(s, i)) - 1;
+}
+
+typedef struct {
+    uint8_t *buf;
+    int idx;
+} PutBitState;
+
+static void init_put_bits(PutBitState *s, uint8_t *buf)
+{
+    s->buf = buf;
+    s->idx = 0;
+}
+
+static void put_bit(PutBitState *s, int bit)
+{
+    s->buf[s->idx >> 3] |= bit << (7 - (s->idx & 7));
+    s->idx++;
+}
+
+static void put_bits(PutBitState *s, int n, uint32_t v)
+{
+    int i;
+
+    for(i = 0; i < n; i++) {
+        put_bit(s, (v >> (n - 1 - i)) & 1);
+    }
+}
+
+static void put_ue_golomb(PutBitState *s, uint32_t v)
+{
+    uint32_t a;
+    int n;
+
+    v++;
+    n = 0;
+    a = v;
+    while (a != 0) {
+        a >>= 1;
+        n++;
+    }
+    if (n > 1)
+        put_bits(s, n - 1, 0);
+    put_bits(s, n, v);
+}
+
+typedef struct {
+    uint8_t *buf;
+    int size;
+    int len;
+} DynBuf;
+
+static void dyn_buf_init(DynBuf *s)
+{
+    s->buf = NULL;
+    s->size = 0;
+    s->len = 0;
+}
+
+static int dyn_buf_resize(DynBuf *s, int size)
+{
+    int new_size;
+    uint8_t *new_buf;
+
+    if (size <= s->size)
+        return 0;
+    new_size = (s->size * 3) / 2;
+    if (new_size < size)
+        new_size = size;
+    new_buf = realloc(s->buf, new_size);
+    if (!new_buf) 
+        return -1;
+    s->buf = new_buf;
+    s->size = new_size;
+    return 0;
+}
+
+/* suppress the VPS NAL and keep only the useful part of the SPS
+   header. The decoder can rebuild a valid HEVC stream if needed. */
+static int build_modified_sps(uint8_t **pout_buf, int *pout_buf_len,
+                              const uint8_t *buf, int buf_len)
+{
+    int nal_unit_type, nal_len, idx, i, ret, msps_buf_len;
+    int out_buf_len, out_buf_len_max;
+    uint8_t *nal_buf, *msps_buf, *out_buf;
+    GetBitState gb_s, *gb = &gb_s;
+    PutBitState pb_s, *pb = &pb_s;
+    uint8_t *p;
+
+    idx = extract_nal(&nal_buf, &nal_len, buf, buf_len);
+    if (idx < 0)
+        return -1;
+    if (nal_len < 2) {
+        free(nal_buf);
+        return -1;
+    }
+    nal_unit_type = (nal_buf[0] >> 1) & 0x3f;
+    free(nal_buf);
+    if (nal_unit_type != 32)  {
+        fprintf(stderr, "expecting VPS nal (%d)\n", nal_unit_type);
+        return -1; /* expect VPS nal */
+    }
+
+    ret = extract_nal(&nal_buf, &nal_len, buf + idx, buf_len);
+    if (ret < 0)
+        return -1;
+    idx += ret;
+    if (nal_len < 2)
+        return -1;
+    nal_unit_type = (nal_buf[0] >> 1) & 0x3f;
+    if (nal_unit_type != 33) {
+        fprintf(stderr, "expecting SPS nal (%d)\n", nal_unit_type);
+        return -1; /* expect SPS nal */
+    }
+
+    /* skip the initial part of the SPS up to and including
+       log2_min_cb_size */
+    {
+        int vps_id, max_sub_layers, profile_idc, sps_id;
+        int chroma_format_idc, width, height, bit_depth_luma, bit_depth_chroma;
+        int log2_max_poc_lsb, sublayer_ordering_info, log2_min_cb_size;
+        int log2_diff_max_min_coding_block_size, log2_min_tb_size;
+        int log2_diff_max_min_transform_block_size;
+        int max_transform_hierarchy_depth_inter;
+        int max_transform_hierarchy_depth_intra;
+        int scaling_list_enable_flag, amp_enabled_flag, sao_enabled;
+        int pcm_enabled_flag, nb_st_rps;
+        int long_term_ref_pics_present_flag, sps_strong_intra_smoothing_enable_flag, vui_present;
+        int sps_temporal_mvp_enabled_flag;
+        int pcm_sample_bit_depth_luma_minus1;
+        int pcm_sample_bit_depth_chroma_minus1;
+        int log2_min_pcm_luma_coding_block_size_minus3;
+        int log2_diff_max_min_pcm_luma_coding_block_size;
+        int pcm_loop_filter_disabled_flag;
+        int sps_extension_flag, sps_range_extension_flag, sps_extension_7bits;
+        int sps_range_extension_flags;
+
+        init_get_bits(gb, nal_buf, nal_len);
+        skip_bits(gb, 16); /* nal header */
+        vps_id = get_bits(gb, 4);
+        if (vps_id != 0) {
+            fprintf(stderr, "VPS id 0 expected\n");
+            return -1;
+        }
+        max_sub_layers = get_bits(gb, 3);
+        if (max_sub_layers != 0) {
+            fprintf(stderr, "max_sub_layers == 0 expected\n");
+            return -1;
+        }
+        skip_bits(gb, 1); /* temporal_id_nesting_flag */
+        /* profile tier level */
+        skip_bits(gb, 2); /* profile_space */
+        skip_bits(gb, 1); /* tier_flag */
+        profile_idc = get_bits(gb, 5);
+        for(i = 0; i < 32; i++) {
+            skip_bits(gb, 1); /* profile_compatibility_flag */
+        }
+        skip_bits(gb, 1); /* progressive_source_flag */
+        skip_bits(gb, 1); /* interlaced_source_flag */
+        skip_bits(gb, 1); /* non_packed_constraint_flag */
+        skip_bits(gb, 1); /* frame_only_constraint_flag */
+        skip_bits(gb, 44); /*  XXX_reserved_zero_44 */
+        skip_bits(gb, 8); /* level_idc */
+
+        sps_id = get_ue_golomb(gb);
+        if (sps_id != 0) {
+            fprintf(stderr, "SPS id 0 expected (%d)\n", sps_id);
+            return -1;
+        }
+        chroma_format_idc = get_ue_golomb(gb);
+        if (chroma_format_idc == 3) {
+            get_bits(gb, 1); /* separate_colour_plane_flag */
+        }
+        width = get_ue_golomb(gb);
+        height = get_ue_golomb(gb);
+        /* pic conformance_flag */
+        if (get_bits(gb, 1)) {
+            get_ue_golomb(gb); /* left_offset */
+            get_ue_golomb(gb); /* right_offset */
+            get_ue_golomb(gb); /* top_offset */
+            get_ue_golomb(gb); /* bottom_offset */
+        }
+        bit_depth_luma = get_ue_golomb(gb) + 8;
+        bit_depth_chroma = get_ue_golomb(gb) + 8;
+        log2_max_poc_lsb = get_ue_golomb(gb) + 4;
+        if (log2_max_poc_lsb != 8) {
+            fprintf(stderr, "log2_max_poc_lsb must be 8 (%d)\n", log2_max_poc_lsb);
+            return -1;
+        }
+        sublayer_ordering_info = get_bits(gb, 1);
+        get_ue_golomb(gb); /* max_dec_pic_buffering */
+        get_ue_golomb(gb); /* num_reorder_pics */
+        get_ue_golomb(gb); /* max_latency_increase */
+        
+        log2_min_cb_size = get_ue_golomb(gb) + 3;
+        log2_diff_max_min_coding_block_size = get_ue_golomb(gb);
+        log2_min_tb_size = get_ue_golomb(gb) + 2;
+        log2_diff_max_min_transform_block_size = get_ue_golomb(gb);
+               
+        max_transform_hierarchy_depth_inter = get_ue_golomb(gb);
+        max_transform_hierarchy_depth_intra = get_ue_golomb(gb);
+        if (max_transform_hierarchy_depth_inter != max_transform_hierarchy_depth_intra) {
+            fprintf(stderr, "max_transform_hierarchy_depth_inter must be the same as max_transform_hierarchy_depth_intra (%d %d)\n", max_transform_hierarchy_depth_inter, max_transform_hierarchy_depth_intra);
+            return -1;
+        }
+
+        scaling_list_enable_flag = get_bits(gb, 1);
+        if (scaling_list_enable_flag != 0) {
+            fprintf(stderr, "scaling_list_enable_flag must be 0\n");
+            return -1;
+        }
+        amp_enabled_flag = get_bits(gb, 1);
+        if (!amp_enabled_flag) {
+            fprintf(stderr, "amp_enabled_flag must be set\n");
+            return -1;
+        }
+        sao_enabled = get_bits(gb, 1);
+        pcm_enabled_flag = get_bits(gb, 1);
+        if (pcm_enabled_flag) {
+            pcm_sample_bit_depth_luma_minus1 = get_bits(gb, 4);
+            pcm_sample_bit_depth_chroma_minus1 = get_bits(gb, 4);
+            log2_min_pcm_luma_coding_block_size_minus3 = get_ue_golomb(gb);
+            log2_diff_max_min_pcm_luma_coding_block_size = get_ue_golomb(gb);
+            pcm_loop_filter_disabled_flag = get_bits(gb, 1);
+        }
+        nb_st_rps = get_ue_golomb(gb);
+        if (nb_st_rps != 0) {
+            fprintf(stderr, "nb_st_rps must be 0 (%d)\n", nb_st_rps);
+            return -1;
+        }
+        long_term_ref_pics_present_flag = get_bits(gb, 1);
+        if (long_term_ref_pics_present_flag) {
+            fprintf(stderr, "nlong_term_ref_pics_present_flag must be 0 (%d)\n", nb_st_rps);
+            return -1;
+        }
+        sps_temporal_mvp_enabled_flag = get_bits(gb, 1);
+        if (!sps_temporal_mvp_enabled_flag) {
+            fprintf(stderr, "sps_temporal_mvp_enabled_flag must be set\n");
+            return -1;
+        }
+        sps_strong_intra_smoothing_enable_flag = get_bits(gb, 1);
+        vui_present = get_bits(gb, 1);
+        if (vui_present) {
+            int sar_present, sar_idx, overscan_info_present_flag;
+            int video_signal_type_present_flag, chroma_loc_info_present_flag;
+            int default_display_window_flag, vui_timing_info_present_flag;
+            int vui_poc_proportional_to_timing_flag;
+            int vui_hrd_parameters_present_flag, bitstream_restriction_flag;
+
+            sar_present = get_bits(gb, 1);
+            if (sar_present) {
+                sar_idx = get_bits(gb, 8);
+                if (sar_idx == 255) {
+                    skip_bits(gb, 16); /* sar_num */ 
+                    skip_bits(gb, 16); /* sar_den */ 
+                }
+            }
+            
+            overscan_info_present_flag = get_bits(gb, 1);
+            if (overscan_info_present_flag) {
+                skip_bits(gb, 1); /* overscan_appropriate_flag */
+            }
+
+            video_signal_type_present_flag = get_bits(gb, 1);
+            if (video_signal_type_present_flag) {
+                fprintf(stderr, "video_signal_type_present_flag must be 0\n");
+                return -1;
+            }
+            chroma_loc_info_present_flag = get_bits(gb, 1);
+            if (chroma_loc_info_present_flag) {
+                get_ue_golomb(gb);
+                get_ue_golomb(gb);
+            }
+            skip_bits(gb, 1); /* neutra_chroma_indication_flag */
+            skip_bits(gb, 1);
+            skip_bits(gb, 1);
+            default_display_window_flag = get_bits(gb, 1);
+            if (default_display_window_flag) {
+                fprintf(stderr, "default_display_window_flag must be 0\n");
+                return -1;
+            }
+            vui_timing_info_present_flag = get_bits(gb, 1);
+            if (vui_timing_info_present_flag) {
+                skip_bits(gb, 32);
+                skip_bits(gb, 32);
+                vui_poc_proportional_to_timing_flag = get_bits(gb, 1);
+                if (vui_poc_proportional_to_timing_flag) {
+                    get_ue_golomb(gb);
+                }
+                vui_hrd_parameters_present_flag = get_bits(gb, 1);
+                if (vui_hrd_parameters_present_flag) {
+                    fprintf(stderr, "vui_hrd_parameters_present_flag must be 0\n");
+                    return -1;
+                }
+            }
+            bitstream_restriction_flag = get_bits(gb, 1);
+            if (bitstream_restriction_flag) {
+                skip_bits(gb, 1);
+                skip_bits(gb, 1);
+                skip_bits(gb, 1);
+                get_ue_golomb(gb);
+                get_ue_golomb(gb);
+                get_ue_golomb(gb);
+                get_ue_golomb(gb);
+                get_ue_golomb(gb);
+            }
+        }
+        sps_extension_flag = get_bits(gb, 1);
+        sps_range_extension_flag = 0;
+        sps_range_extension_flags = 0;
+        if (sps_extension_flag) {
+            sps_range_extension_flag = get_bits(gb, 1);
+            sps_extension_7bits = get_bits(gb, 7);
+            if (sps_extension_7bits != 0) {
+                fprintf(stderr, "sps_extension_7bits must be 0\n");
+                return -1;
+            }
+            if (sps_range_extension_flag) {
+                sps_range_extension_flags = get_bits(gb, 9);
+                if (sps_range_extension_flags & ((1 << (8 - 3)) | 
+                                                 (1 << (8 - 4)) | 
+                                                 (1 << (8 - 6)) | 
+                                                 (1 << (8 - 8)))) {
+                    fprintf(stderr, "unsupported range extensions (0x%x)\n",
+                            sps_range_extension_flags);
+                    return -1;
+                }
+            }
+        }
+
+        /* build the modified SPS */
+        msps_buf = malloc(nal_len + 32);
+        memset(msps_buf, 0, nal_len + 16);
+        
+        init_put_bits(pb, msps_buf);
+        put_ue_golomb(pb, log2_min_cb_size - 3);
+        put_ue_golomb(pb, log2_diff_max_min_coding_block_size);
+        put_ue_golomb(pb, log2_min_tb_size - 2);
+        put_ue_golomb(pb, log2_diff_max_min_transform_block_size);
+        put_ue_golomb(pb, max_transform_hierarchy_depth_intra);
+        put_bits(pb, 1, sao_enabled);
+        put_bits(pb, 1, pcm_enabled_flag);
+        if (pcm_enabled_flag) {
+            put_bits(pb, 4, pcm_sample_bit_depth_luma_minus1);
+            put_bits(pb, 4, pcm_sample_bit_depth_chroma_minus1);
+            put_ue_golomb(pb, log2_min_pcm_luma_coding_block_size_minus3);
+            put_ue_golomb(pb, log2_diff_max_min_pcm_luma_coding_block_size);
+            put_bits(pb, 1, pcm_loop_filter_disabled_flag);
+        }
+        put_bits(pb, 1, sps_strong_intra_smoothing_enable_flag);
+        put_bits(pb, 1, sps_extension_flag);
+        if (sps_extension_flag) {
+            put_bits(pb, 1, sps_range_extension_flag);
+            put_bits(pb, 7, 0);
+            if (sps_range_extension_flag) {
+                put_bits(pb, 9, sps_range_extension_flags);
+            }
+        }
+        msps_buf_len = (pb->idx + 7) >> 3;
+
+        out_buf_len_max = 5 + msps_buf_len;
+        out_buf = malloc(out_buf_len_max);
+
+        //        printf("msps_n_bits=%d\n", pb->idx);
+        p = out_buf;
+        put_ue(&p, msps_buf_len); /* header length */
+
+        memcpy(p, msps_buf, msps_buf_len);
+        p += msps_buf_len;
+        
+        out_buf_len = p - out_buf;
+        free(msps_buf);
+        free(nal_buf);
+    }
+    *pout_buf = out_buf;
+    *pout_buf_len = out_buf_len;
+    return idx;
+}
+
+static int add_frame_duration_sei(DynBuf *out_buf, uint16_t frame_ticks)
+{
+    uint8_t nal_buf[128], *q;
+    int nut, nal_len;
+
+    q = nal_buf;
+    *q++ = 0x00;
+    *q++ = 0x00;
+    *q++ = 0x01;
+    nut = 39; /* prefix SEI NUT */
+    *q++ = (nut << 1);
+    *q++ = 1;
+    *q++ = 0xff;  /* payload_type = 257 */
+    *q++ = 0x02;
+    *q++ = 2; /* payload_size = 2 */
+    *q++ = frame_ticks >> 8;
+    *q++ = frame_ticks;
+    *q++ = 0x80; /* extra '1' bit and align to byte */
+    /* Note: the 0x00 0x00 b pattern with b <= 3 cannot happen, so no
+       need to escape */
+    nal_len = q - nal_buf;
+    if (dyn_buf_resize(out_buf, out_buf->len + nal_len) < 0)
+        return -1;
+    memcpy(out_buf->buf + out_buf->len, nal_buf, nal_len);
+    out_buf->len += nal_len;
+    return 0;
+}
+
+static int build_modified_hevc(uint8_t **pout_buf, 
+                               const uint8_t *cbuf, int cbuf_len,
+                               const uint8_t *abuf, int abuf_len,
+                               const uint16_t *frame_duration_tab,
+                               int frame_count)
+{
+    DynBuf out_buf_s, *out_buf = &out_buf_s;
+    uint8_t *msps;
+    const uint8_t *nal_buf;
+    int msps_len, cidx, aidx, is_alpha, nal_len, first_nal, start, l, frame_num;
+    
+    dyn_buf_init(out_buf);
+    
+    /* add alpha MSPS */
+    aidx = 0; /* avoids warning */
+    if (abuf) {
+        aidx = build_modified_sps(&msps, &msps_len, abuf, abuf_len);
+        if (aidx < 0)
+            goto fail;
+        if (dyn_buf_resize(out_buf, out_buf->len + msps_len) < 0)
+            goto fail;
+        memcpy(out_buf->buf + out_buf->len, msps, msps_len);
+        out_buf->len += msps_len;
+        free(msps);
+    }
+    
+    /* add color MSPS */
+    cidx = build_modified_sps(&msps, &msps_len, cbuf, cbuf_len);
+    if (cidx < 0)
+        goto fail;
+    if (dyn_buf_resize(out_buf, out_buf->len + msps_len) < 0)
+        goto fail;
+    memcpy(out_buf->buf + out_buf->len, msps, msps_len);
+    out_buf->len += msps_len;
+    free(msps);
+
+    /* add the remaining NALs, alternating between alpha (if present)
+       and color. */
+    is_alpha = (abuf != NULL);
+    first_nal = 1;
+    frame_num = 0;
+    for(;;) {
+        if (!is_alpha) {
+            if (cidx >= cbuf_len) {
+                if (abuf) {
+                    fprintf(stderr, "Incorrect number of alpha NALs\n");
+                    goto fail;
+                }
+                break;
+            }
+            nal_buf = cbuf + cidx;
+            nal_len = find_nal_end(nal_buf, cbuf_len - cidx);
+            //            printf("cidx=%d/%d nal_len=%d\n", cidx, cbuf_len, nal_len);
+            if (nal_len < 0)
+                goto fail;
+            cidx += nal_len;
+        } else {
+            if (aidx >= abuf_len) 
+                break;
+            nal_buf = abuf + aidx;
+            nal_len = find_nal_end(nal_buf, abuf_len - aidx);
+            //            printf("aidx=%d/%d nal_len=%d\n", aidx, abuf_len, nal_len);
+            if (nal_len < 0)
+                goto fail;
+            aidx += nal_len;
+        }
+        start = 3 + (nal_buf[2] == 0);
+        if (!is_alpha) {
+            int nut;
+            /* add SEI NAL for the frame duration (animation case) */
+            nut = (nal_buf[start] >> 1) & 0x3f;
+            if ((nut <= 9 || (nut >= 16 && nut <= 21)) &&  
+                start + 2 < nal_len && (nal_buf[start + 2] & 0x80)) {
+                int frame_ticks;
+                assert(frame_num < frame_count);
+                frame_ticks = frame_duration_tab[frame_num];
+                if (frame_ticks > 1) {
+                    add_frame_duration_sei(out_buf, frame_ticks);
+                }
+                frame_num++;
+            }
+        }
+        if (first_nal) {
+            /* skip first start code */
+            l = start;
+        } else {
+            l = 0;
+        }
+        if (dyn_buf_resize(out_buf, out_buf->len + nal_len - l) < 0)
+            goto fail;
+        //        printf("add nal len=%d\n", nal_len - l);
+        memcpy(out_buf->buf + out_buf->len, nal_buf + l, nal_len - l);
+        if (is_alpha) {
+            /* set nul_layer_id of alpha to '1' */
+            out_buf->buf[out_buf->len + (start - l) + 1] |= 1 << 3;
+        }
+        out_buf->len += nal_len - l;
+
+        if (abuf) {
+            is_alpha ^= 1;
+        }
+        first_nal = 0;
+    }
+    *pout_buf = out_buf->buf;
+    return out_buf->len;
+ fail:
+    free(out_buf->buf);
+    return -1;
+}
+
+typedef enum {
+#if defined(USE_X265)
+    HEVC_ENCODER_X265,
+#endif
+#if defined(USE_JCTVC)
+    HEVC_ENCODER_JCTVC,
+#endif
+
+    HEVC_ENCODER_COUNT,
+} HEVCEncoderEnum;
+
+static char *hevc_encoder_name[HEVC_ENCODER_COUNT] = {
+#if defined(USE_X265)
+    "x265",
+#endif
+#if defined(USE_JCTVC)
+    "jctvc",
+#endif
+};
+
+static HEVCEncoder *hevc_encoder_tab[HEVC_ENCODER_COUNT] = {
+#if defined(USE_X265)
+    &x265_hevc_encoder,
+#endif
+#if defined(USE_JCTVC)
+    &jctvc_encoder,
+#endif
+};
+
+#define IMAGE_HEADER_MAGIC 0x425047fb
+
+#define DEFAULT_OUTFILENAME "out.bpg"
+#define DEFAULT_QP 29
+#define DEFAULT_BIT_DEPTH 8
+
+#ifdef RExt__HIGH_BIT_DEPTH_SUPPORT
+#define BIT_DEPTH_MAX 14
+#else
+#define BIT_DEPTH_MAX 12
+#endif
+#define DEFAULT_COMPRESS_LEVEL 8
+
+
+typedef struct BPGEncoderContext BPGEncoderContext;
+
+typedef struct BPGEncoderParameters {
+    int qp; /* 0 ... 51 */
+    int alpha_qp; /* -1 ... 51. -1 means same as qp */
+    int lossless; /* true if lossless compression (qp and alpha_qp are
+                     ignored) */
+    BPGImageFormatEnum preferred_chroma_format;
+    int sei_decoded_picture_hash; /* 0, 1 */
+    int compress_level; /* 1 ... 9 */
+    int verbose;
+    HEVCEncoderEnum encoder_type;
+    int animated; /* 0 ... 1: if true, encode as animated image */
+    uint16_t loop_count; /* animations: number of loops. 0=infinite */
+    /* animations: the frame delay is a multiple of
+       frame_delay_num/frame_delay_den seconds */
+    uint16_t frame_delay_num;
+    uint16_t frame_delay_den;
+} BPGEncoderParameters;
+
+typedef int BPGEncoderWriteFunc(void *opaque, const uint8_t *buf, int buf_len);
+
+struct BPGEncoderContext {
+    BPGEncoderParameters params;
+    BPGMetaData *first_md;
+    HEVCEncoder *encoder;
+    int frame_count;
+    HEVCEncoderContext *enc_ctx;
+    HEVCEncoderContext *alpha_enc_ctx;
+    int frame_ticks;
+    uint16_t *frame_duration_tab;
+    int frame_duration_tab_size;
+};
+
+void *mallocz(size_t size)
+{
+    void *ptr;
+    ptr = malloc(size);
+    if (!ptr)
+        return NULL;
+    memset(ptr, 0, size);
+    return ptr;
+}
+
+BPGEncoderParameters *bpg_encoder_param_alloc(void)
+{
+    BPGEncoderParameters *p;
+    p = mallocz(sizeof(BPGEncoderParameters));
+    if (!p)
+        return NULL;
+    p->qp = DEFAULT_QP;
+    p->alpha_qp = -1;
+    p->preferred_chroma_format = BPG_FORMAT_420;
+    p->compress_level = DEFAULT_COMPRESS_LEVEL;
+    p->frame_delay_num = 1;
+    p->frame_delay_den = 25;
+    p->loop_count = 0;
+    return p;
+}
+
+void bpg_encoder_param_free(BPGEncoderParameters *p)
+{
+    free(p);
+}
+
+BPGEncoderContext *bpg_encoder_open(BPGEncoderParameters *p)
+{
+    BPGEncoderContext *s;
+
+    s = mallocz(sizeof(BPGEncoderContext));
+    if (!s)
+        return NULL;
+    s->params = *p;
+    s->encoder = hevc_encoder_tab[s->params.encoder_type];
+    s->frame_ticks = 1;
+    return s;
+}
+
+void bpg_encoder_set_extension_data(BPGEncoderContext *s, 
+                                    BPGMetaData *md)
+{
+    s->first_md = md;
+}
+
+static int bpg_encoder_encode_trailer(BPGEncoderContext *s, 
+                                      BPGEncoderWriteFunc *write_func,
+                                      void *opaque)
+{
+    uint8_t *out_buf, *alpha_buf, *hevc_buf;
+    int out_buf_len, alpha_buf_len, hevc_buf_len;
+
+    out_buf_len = s->encoder->close(s->enc_ctx, &out_buf);
+    if (out_buf_len < 0) {
+        fprintf(stderr, "Error while encoding picture\n");
+        exit(1);
+    }
+    s->enc_ctx = NULL;
+    
+    alpha_buf = NULL;
+    alpha_buf_len = 0;
+    if (s->alpha_enc_ctx) {
+        alpha_buf_len = s->encoder->close(s->alpha_enc_ctx, &alpha_buf);
+        if (alpha_buf_len < 0) {
+            fprintf(stderr, "Error while encoding picture (alpha plane)\n");
+            exit(1);
+        }
+        s->alpha_enc_ctx = NULL;
+    }
+    
+    hevc_buf = NULL;
+    hevc_buf_len = build_modified_hevc(&hevc_buf, out_buf, out_buf_len,
+                                       alpha_buf, alpha_buf_len,
+                                       s->frame_duration_tab, s->frame_count);
+    if (hevc_buf_len < 0) {
+        fprintf(stderr, "Error while creating HEVC data\n");
+        exit(1);
+    }
+    free(out_buf);
+    free(alpha_buf);
+    
+    if (write_func(opaque, hevc_buf, hevc_buf_len) != hevc_buf_len) {
+        fprintf(stderr, "Error while writing HEVC data\n");
+        exit(1);
+    }
+    free(hevc_buf);
+    return 0;
+}
+
+int bpg_encoder_set_frame_duration(BPGEncoderContext *s, int frame_ticks)
+{
+    if (frame_ticks >= 1 && frame_ticks <= 65535) {
+        s->frame_ticks = frame_ticks;
+        return 0;
+    } else {
+        return -1;
+    }
+}
+
+/* Warning: currently 'img' is modified. When encoding animations, img
+   = NULL indicates the end of the stream. */
+int bpg_encoder_encode(BPGEncoderContext *s, Image *img,
+                       BPGEncoderWriteFunc *write_func,
+                       void *opaque)
+{
+    const BPGEncoderParameters *p = &s->params;
+    Image *img_alpha;
+    HEVCEncodeParams ep_s, *ep = &ep_s;
+    uint8_t *extension_buf;
+    int extension_buf_len;
+    int cb_size, width, height;
+
+    if (p->animated && !img) {
+        return bpg_encoder_encode_trailer(s, write_func, opaque);
+    }
+
+    /* extract the alpha plane */
+    if (img->has_alpha) {
+        int c_idx;
+
+        img_alpha = malloc(sizeof(Image));
+        memset(img_alpha, 0, sizeof(*img_alpha));
+        if (img->format == BPG_FORMAT_GRAY)
+            c_idx = 1;
+        else
+            c_idx = 3;
+
+        img_alpha->w = img->w;
+        img_alpha->h = img->h;
+        img_alpha->format = BPG_FORMAT_GRAY;
+        img_alpha->has_alpha = 0;
+        img_alpha->color_space = BPG_CS_YCbCr;
+        img_alpha->bit_depth = img->bit_depth;
+        img_alpha->pixel_shift = img->pixel_shift;
+        img_alpha->data[0] = img->data[c_idx];
+        img_alpha->linesize[0] = img->linesize[c_idx];
+        
+        img->data[c_idx] = NULL;
+        img->has_alpha = 0;
+    } else {
+        img_alpha = NULL;
+    }
+
+    if (img->format == BPG_FORMAT_444 && img->color_space != BPG_CS_RGB) {
+        if (p->preferred_chroma_format == BPG_FORMAT_420 ||
+            p->preferred_chroma_format == BPG_FORMAT_420_VIDEO) {
+            int c_h_phase = (p->preferred_chroma_format == BPG_FORMAT_420);
+            if (image_ycc444_to_ycc420(img, c_h_phase) != 0)
+                goto error_convert;
+        } else if (p->preferred_chroma_format == BPG_FORMAT_422 ||
+                   p->preferred_chroma_format == BPG_FORMAT_422_VIDEO) {
+            int c_h_phase = (p->preferred_chroma_format == BPG_FORMAT_422);
+            if (image_ycc444_to_ycc422(img, c_h_phase) != 0)  {
+            error_convert:
+                fprintf(stderr, "Cannot convert image\n");
+                exit(1);
+            }
+        }
+    }
+
+    cb_size = 8; /* XXX: should make it configurable. We assume the
+                    HEVC encoder uses the same value */
+    width = img->w;
+    height = img->h;
+    image_pad(img, cb_size);
+    if (img_alpha)
+        image_pad(img_alpha, cb_size);
+
+    /* convert to the allocated pixel width to 8 bit if needed by the
+       HEVC encoder */
+    if (img->bit_depth == 8) {
+        image_convert16to8(img);
+        if (img_alpha)
+            image_convert16to8(img_alpha);
+    }
+        
+    if (s->frame_count == 0) {
+        memset(ep, 0, sizeof(*ep));
+        ep->qp = p->qp;
+        ep->width = img->w;
+        ep->height = img->h;
+        ep->chroma_format = img->format;
+        ep->bit_depth = img->bit_depth;
+        ep->intra_only = !p->animated;
+        ep->lossless = p->lossless;
+        ep->sei_decoded_picture_hash = p->sei_decoded_picture_hash;
+        ep->compress_level = p->compress_level;
+        ep->verbose = p->verbose;
+
+        s->enc_ctx = s->encoder->open(ep);
+        if (!s->enc_ctx) {
+            fprintf(stderr, "Error while opening encoder\n");
+            exit(1);
+        }
+
+        if (img_alpha) {
+            if (p->alpha_qp < 0)
+                ep->qp = p->qp;
+            else
+                ep->qp = p->alpha_qp;
+            ep->chroma_format = 0;
+            
+            s->alpha_enc_ctx = s->encoder->open(ep);
+            if (!s->alpha_enc_ctx) {
+                fprintf(stderr, "Error while opening alpha encoder\n");
+                exit(1);
+            }
+        }
+
+        /* prepare the extension data */
+        if (p->animated) {
+            BPGMetaData *md;
+            uint8_t buf[15], *q;
+
+            md = bpg_md_alloc(BPG_EXTENSION_TAG_ANIM_CONTROL);
+            q = buf;
+            put_ue(&q, p->loop_count);
+            put_ue(&q, p->frame_delay_num);
+            put_ue(&q, p->frame_delay_den);
+            md->buf_len = q - buf;
+            md->buf = malloc(md->buf_len);
+            memcpy(md->buf, buf, md->buf_len);
+            md->next = s->first_md;
+            s->first_md = md;
+        }
+
+        extension_buf = NULL;
+        extension_buf_len = 0;
+        if (s->first_md) {
+            BPGMetaData *md1;
+            int max_len;
+            uint8_t *q;
+            
+            max_len = 0;
+            for(md1 = s->first_md; md1 != NULL; md1 = md1->next) {
+                max_len += md1->buf_len + 5 * 2;
+            }
+            extension_buf = malloc(max_len);
+            q = extension_buf;
+            for(md1 = s->first_md; md1 != NULL; md1 = md1->next) {
+                put_ue(&q, md1->tag);
+                put_ue(&q, md1->buf_len);
+                memcpy(q, md1->buf, md1->buf_len);
+                q += md1->buf_len;
+            }
+            extension_buf_len = q - extension_buf;
+            
+            bpg_md_free(s->first_md);
+            s->first_md = NULL;
+        }
+    
+        {
+            uint8_t img_header[128], *q;
+            int v, has_alpha, has_extension, alpha2_flag, alpha1_flag, format;
+            
+            has_alpha = (img_alpha != NULL);
+            has_extension = (extension_buf_len > 0);
+            
+            
+            if (has_alpha) {
+                if (img->has_w_plane) {
+                    alpha1_flag = 0;
+                    alpha2_flag = 1;
+                } else {
+                    alpha1_flag = 1;
+                    alpha2_flag = img->premultiplied_alpha;
+                }
+            } else {
+                alpha1_flag = 0;
+                alpha2_flag = 0;
+            }
+            
+            q = img_header;
+            *q++ = (IMAGE_HEADER_MAGIC >> 24) & 0xff;
+            *q++ = (IMAGE_HEADER_MAGIC >> 16) & 0xff;
+            *q++ = (IMAGE_HEADER_MAGIC >> 8) & 0xff;
+            *q++ = (IMAGE_HEADER_MAGIC >> 0) & 0xff;
+
+            if (img->c_h_phase == 0 && img->format == BPG_FORMAT_420)
+                format = BPG_FORMAT_420_VIDEO;
+            else if (img->c_h_phase == 0 && img->format == BPG_FORMAT_422)
+                format = BPG_FORMAT_422_VIDEO;
+            else
+                format = img->format;
+            v = (format << 5) | (alpha1_flag << 4) | (img->bit_depth - 8);
+            *q++ = v;
+            v = (img->color_space << 4) | (has_extension << 3) |
+                (alpha2_flag << 2) | (img->limited_range << 1) |
+                p->animated;
+            *q++ = v;
+            put_ue(&q, width);
+            put_ue(&q, height);
+            
+            put_ue(&q, 0); /* zero length means up to the end of the file */
+            if (has_extension) {
+                put_ue(&q, extension_buf_len); /* extension data length */
+            }
+            
+            write_func(opaque, img_header, q - img_header);
+            
+            if (has_extension) {
+                if (write_func(opaque, extension_buf, extension_buf_len) != extension_buf_len) {
+                    fprintf(stderr, "Error while writing extension data\n");
+                    exit(1);
+                }
+                free(extension_buf);
+            }
+        }
+    }
+
+    /* store the frame duration */
+    if ((s->frame_count + 1) > s->frame_duration_tab_size) {
+        s->frame_duration_tab_size = (s->frame_duration_tab_size * 3) / 2;
+        if (s->frame_duration_tab_size < (s->frame_count + 1))
+            s->frame_duration_tab_size = (s->frame_count + 1);
+        s->frame_duration_tab = realloc(s->frame_duration_tab, 
+                                        sizeof(s->frame_duration_tab) * s->frame_duration_tab_size);
+    }
+    s->frame_duration_tab[s->frame_count] = s->frame_ticks;
+
+    s->encoder->encode(s->enc_ctx, img);
+    
+    if (img_alpha) {
+        s->encoder->encode(s->alpha_enc_ctx, img_alpha);
+        image_free(img_alpha);
+    }
+    
+    s->frame_count++;
+
+    if (!p->animated)
+        bpg_encoder_encode_trailer(s, write_func, opaque);
+
+    return 0;
+}
+
+void bpg_encoder_close(BPGEncoderContext *s)
+{
+    free(s->frame_duration_tab);
+    bpg_md_free(s->first_md);
+    free(s);
+}
+
+static int my_write_func(void *opaque, const uint8_t *buf, int buf_len)
+{
+    FILE *f = opaque;
+    return fwrite(buf, 1, buf_len, f);
+}
+
+static int get_filename_num(char *buf, int buf_size, const char *str, int n)
+{
+    const char *p, *r;
+    char *q;
+    int l, c;
+
+    q = buf;
+    p = str;
+    for(;;) {
+        c = *p++;
+        if (c == '\0')
+            break;
+        if (c == '%') {
+            r = p - 1;
+            l = 0;
+            for(;;) {
+                c = *p;
+                if (c < '0' || c > '9')
+                    break;
+                l = l * 10 + (c - '0');
+                p++;
+            }
+            c = *p++;
+            if (c == '%') {
+                goto add_char;
+            } else if (c != 'd') {
+                return -1;
+            }
+            snprintf(q, buf + buf_size - q, "%0*u", l, n);
+            q += strlen(q);
+            
+        } else {
+        add_char:
+            if ((q - buf) < buf_size - 1)
+                *q++ = c;
+        }
+    }
+    *q = '\0';
+    return 0;
+}
+
+void help(int is_full)
+{
+    char hevc_encoders[128];
+    int i;
+
+    hevc_encoders[0] = '\0';
+    for(i = 0; i < HEVC_ENCODER_COUNT; i++) {
+        if (i != 0)
+            strcat(hevc_encoders, " ");
+        strcat(hevc_encoders, hevc_encoder_name[i]);
+    }
+        
+    printf("BPG Image Encoder version " CONFIG_BPG_VERSION "\n"
+           "usage: bpgenc [options] infile.[jpg|png]\n"
+           "\n"
+           "Main options:\n"
+           "-h                   show the full help (including the advanced options)\n"
+           "-o outfile           set output filename (default = %s)\n"
+           "-q qp                set quantizer parameter (smaller gives better quality,\n" 
+           "                     range: 0-51, default = %d)\n"
+           "-f cfmt              set the preferred chroma format (420, 422, 444,\n"
+           "                     default=420)\n"
+           "-c color_space       set the preferred color space (ycbcr, rgb, ycgco,\n"
+           "                     ycbcr_bt709, ycbcr_bt2020, default=ycbcr)\n"
+           "-b bit_depth         set the bit depth (8 to %d, default = %d)\n"
+           "-lossless            enable lossless mode\n"
+           "-e encoder           select the HEVC encoder (%s, default = %s)\n"
+           "-m level             select the compression level (1=fast, 9=slow, default = %d)\n"
+           "\n"
+           "Animation options:\n"
+           "-a                   generate animations from a sequence of images. Use %%d or\n"
+           "                     %%Nd (N = number of digits) in the filename to specify the\n"
+           "                     image index, starting from 0 or 1.\n"
+           "-fps N               set the frame rate (default = 25)\n"
+           "-loop N              set the number of times the animation is played. 0 means\n"
+           "                     infinite (default = 0)\n"
+           "-delayfile file      text file containing one number per image giving the\n"
+           "                     display delay per image in centiseconds.\n"
+           , DEFAULT_OUTFILENAME, DEFAULT_QP, BIT_DEPTH_MAX, DEFAULT_BIT_DEPTH,
+           hevc_encoders, hevc_encoder_name[0], DEFAULT_COMPRESS_LEVEL);
+
+    if (is_full) {
+        printf("\nAdvanced options:\n"
+           "-alphaq              set quantizer parameter for the alpha channel (default = same as -q value)\n"
+           "-premul              store the color with premultiplied alpha\n"
+           "-limitedrange        encode the color data with the limited range of video\n"
+           "-hash                include MD5 hash in HEVC bitstream\n"
+           "-keepmetadata        keep the metadata (from JPEG: EXIF, ICC profile, XMP, from PNG: ICC profile)\n"
+           "-v                   show debug messages\n"
+               );
+    }
+
+    exit(1);
+}
+
+struct option long_opts[] = {
+    { "hash", no_argument },
+    { "keepmetadata", no_argument },
+    { "alphaq", required_argument },
+    { "lossless", no_argument },
+    { "limitedrange", no_argument },
+    { "premul", no_argument },
+    { "loop", required_argument },
+    { "fps", required_argument },
+    { "delayfile", required_argument },
+    { NULL },
+};
+
+int main(int argc, char **argv)
+{
+    const char *infilename, *outfilename, *frame_delay_file;
+    Image *img;
+    FILE *f;
+    int c, option_index;
+    int keep_metadata;
+    int bit_depth, i, limited_range, premultiplied_alpha;
+    BPGColorSpaceEnum color_space;
+    BPGMetaData *md;
+    BPGEncoderContext *enc_ctx;
+    BPGEncoderParameters *p;
+
+    p = bpg_encoder_param_alloc();
+
+    outfilename = DEFAULT_OUTFILENAME;
+    color_space = BPG_CS_YCbCr;
+    keep_metadata = 0;
+    bit_depth = DEFAULT_BIT_DEPTH;
+    limited_range = 0;
+    premultiplied_alpha = 0;
+    frame_delay_file = NULL;
+    
+    for(;;) {
+        c = getopt_long_only(argc, argv, "q:o:hf:c:vm:b:e:a", long_opts, &option_index);
+        if (c == -1)
+            break;
+        switch(c) {
+        case 0:
+            switch(option_index) {
+            case 0:
+                p->sei_decoded_picture_hash = 1;
+                break;
+            case 1:
+                keep_metadata = 1;
+                break;
+            case 2:
+                p->alpha_qp = atoi(optarg);
+                if (p->alpha_qp < 0 || p->alpha_qp > 51) {
+                    fprintf(stderr, "alpha_qp must be between 0 and 51\n");
+                    exit(1);
+                }
+                break;
+            case 3:
+                p->lossless = 1;
+                color_space = BPG_CS_RGB;
+                p->preferred_chroma_format = BPG_FORMAT_444;
+                bit_depth = 8;
+                limited_range = 0;
+                break;
+            case 4:
+                limited_range = 1;
+                break;
+            case 5:
+                premultiplied_alpha = 1;
+                break;
+            case 6:
+                p->loop_count = strtoul(optarg, NULL, 0);
+                break;
+            case 7:
+                p->frame_delay_num = 1;
+                p->frame_delay_den = strtoul(optarg, NULL, 0);
+                if (p->frame_delay_den == 0) {
+                    fprintf(stderr, "invalid frame rate\n");
+                    exit(1);
+                }
+                break;
+            case 8:
+                frame_delay_file = optarg;
+                break;
+            default:
+                goto show_help;
+            }
+            break;
+        case 'h':
+        show_help:
+            help(1);
+            break;
+        case 'q':
+            p->qp = atoi(optarg);
+            if (p->qp < 0 || p->qp > 51) {
+                fprintf(stderr, "qp must be between 0 and 51\n");
+                exit(1);
+            }
+            break;
+        case 'o':
+            outfilename = optarg;
+            break;
+        case 'f':
+            if (!strcmp(optarg, "420")) {
+                p->preferred_chroma_format = BPG_FORMAT_420;
+            } else if (!strcmp(optarg, "422")) {
+                p->preferred_chroma_format = BPG_FORMAT_422;
+            } else if (!strcmp(optarg, "444")) {
+                p->preferred_chroma_format = BPG_FORMAT_444;
+            } else if (!strcmp(optarg, "422_video")) {
+                p->preferred_chroma_format = BPG_FORMAT_422_VIDEO;
+            } else if (!strcmp(optarg, "420_video")) {
+                p->preferred_chroma_format = BPG_FORMAT_420_VIDEO;
+            } else {
+                fprintf(stderr, "Invalid chroma format\n");
+                exit(1);
+            }
+            break;
+        case 'c':
+            if (!strcmp(optarg, "ycbcr")) {
+                color_space = BPG_CS_YCbCr;
+            } else if (!strcmp(optarg, "rgb")) {
+                color_space = BPG_CS_RGB;
+                p->preferred_chroma_format = BPG_FORMAT_444;
+            } else if (!strcmp(optarg, "ycgco")) {
+                color_space = BPG_CS_YCgCo;
+            } else if (!strcmp(optarg, "ycbcr_bt709")) {
+                color_space = BPG_CS_YCbCr_BT709;
+            } else if (!strcmp(optarg, "ycbcr_bt2020")) {
+                color_space = BPG_CS_YCbCr_BT2020;
+            } else {
+                fprintf(stderr, "Invalid color space format\n");
+                exit(1);
+            }
+            break;
+        case 'm':
+            p->compress_level = atoi(optarg);
+            if (p->compress_level < 1)
+                p->compress_level = 1;
+            else if (p->compress_level > 9)
+                p->compress_level = 9;
+            break;
+        case 'b':
+            bit_depth = atoi(optarg);
+            if (bit_depth < 8 || bit_depth > BIT_DEPTH_MAX) {
+                fprintf(stderr, "Invalid bit depth (range: 8 to %d)\n",
+                        BIT_DEPTH_MAX);
+                exit(1);
+            }
+            break;
+        case 'v':
+            p->verbose++;
+            break;
+        case 'e':
+            for(i = 0; i < HEVC_ENCODER_COUNT; i++) {
+                if (!strcmp(optarg, hevc_encoder_name[i]))
+                    break;
+            }
+            if (i == HEVC_ENCODER_COUNT) {
+                fprintf(stderr, "Unsupported encoder. Available ones are:");
+                for(i = 0; i < HEVC_ENCODER_COUNT; i++) {
+                    fprintf(stderr, " %s", hevc_encoder_name[i]);
+                }
+                fprintf(stderr, "\n");
+                exit(1);
+            }
+            p->encoder_type = i;
+            break;
+        case 'a':
+            p->animated = 1;
+            break;
+        default:
+            exit(1);
+        }
+    }
+
+    if (optind >= argc) 
+        help(0);
+    infilename = argv[optind];
+
+    f = fopen(outfilename, "wb");
+    if (!f) {
+        perror(outfilename);
+        exit(1);
+    }
+
+    enc_ctx = bpg_encoder_open(p);
+    if (!enc_ctx) {
+        fprintf(stderr, "Could not open BPG encoder\n");
+        exit(1);
+    }
+
+    if (p->animated) {
+        int frame_num, first_frame, frame_ticks;
+        char filename[1024];
+        FILE *f1;
+
+        if (frame_delay_file) {
+            f1 = fopen(frame_delay_file, "r");
+            if (!f1) {
+                fprintf(stderr, "Could not open '%s'\n", frame_delay_file);
+                exit(1);
+            }
+        } else {
+            f1 = NULL;
+        }
+
+        first_frame = 1;
+        for(frame_num = 0; ; frame_num++) {
+            if (get_filename_num(filename, sizeof(filename), infilename, frame_num) < 0) {
+                fprintf(stderr, "Invalid filename syntax: '%s'\n", infilename);
+                exit(1);
+            }
+            img = load_image(&md, filename, color_space, bit_depth, limited_range,
+                             premultiplied_alpha);
+            if (!img) {
+                if (frame_num == 0)
+                    continue; /* accept to start at 0 or 1 */
+                if (first_frame) {
+                    fprintf(stderr, "Could not read '%s'\n", filename);
+                    exit(1);
+                } else {
+                    break;
+                }
+            }
+            frame_ticks = 1;
+            if (f1) {
+                float fdelay;
+                if (fscanf(f1, "%f", &fdelay) == 1) {
+                    frame_ticks = lrint(fdelay * p->frame_delay_den / (p->frame_delay_num * 100));
+                    if (frame_ticks < 1)
+                        frame_ticks = 1;
+                }
+            }
+            
+            if (p->verbose)
+                printf("Encoding '%s' ticks=%d\n", filename, frame_ticks);
+            
+            if (keep_metadata && first_frame) {
+                bpg_encoder_set_extension_data(enc_ctx, md);
+            } else {
+                bpg_md_free(md);
+            }
+            bpg_encoder_set_frame_duration(enc_ctx, frame_ticks);
+            bpg_encoder_encode(enc_ctx, img, my_write_func, f);
+            image_free(img);
+
+            first_frame = 0;
+        }
+        if (f1)
+            fclose(f1);
+        /* end of stream */
+        bpg_encoder_encode(enc_ctx, NULL, my_write_func, f);
+    } else {
+        img = load_image(&md, infilename, color_space, bit_depth, limited_range,
+                         premultiplied_alpha);
+        if (!img) {
+            fprintf(stderr, "Could not read '%s'\n", infilename);
+            exit(1);
+        }
+        
+        if (!keep_metadata && md) {
+            bpg_md_free(md);
+            md = NULL;
+        }
+        
+        bpg_encoder_set_extension_data(enc_ctx, md);
+        
+        bpg_encoder_encode(enc_ctx, img, my_write_func, f);
+        image_free(img);
+    }
+
+    fclose(f);
+    
+    bpg_encoder_close(enc_ctx);
+    
+    bpg_encoder_param_free(p);
+
+    return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bpgenc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,79 @@
+/*
+ * BPG encoder
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "libbpg.h"
+
+typedef struct {
+    int w, h;
+    BPGImageFormatEnum format; /* x_VIDEO values are forbidden here */
+    uint8_t c_h_phase; /* 4:2:2 or 4:2:0 : give the horizontal chroma
+                          position. 0=MPEG2, 1=JPEG. */
+    uint8_t has_alpha;
+    uint8_t has_w_plane;
+    uint8_t limited_range;
+    uint8_t premultiplied_alpha;
+    BPGColorSpaceEnum color_space;
+    uint8_t bit_depth;
+    uint8_t pixel_shift; /* (1 << pixel_shift) bytes per pixel */
+    uint8_t *data[4];
+    int linesize[4];
+} Image;
+
+typedef struct {
+    int width;
+    int height;
+    int chroma_format; /* 0-3 */
+    int bit_depth; /* 8-14 */
+    int intra_only; /* 0-1 */
+
+    int qp; /* quantizer 0-51 */
+    int lossless; /* 0-1 lossless mode */
+    int sei_decoded_picture_hash; /* 0=no hash, 1=MD5 hash */
+    int compress_level; /* 1-9 */
+    int verbose;
+} HEVCEncodeParams;
+
+typedef struct HEVCEncoderContext HEVCEncoderContext; 
+
+typedef struct {
+    HEVCEncoderContext *(*open)(const HEVCEncodeParams *params);
+    int (*encode)(HEVCEncoderContext *s, Image *img);
+    int (*close)(HEVCEncoderContext *s, uint8_t **pbuf);
+} HEVCEncoder;
+
+extern HEVCEncoder jctvc_encoder;
+extern HEVCEncoder x265_hevc_encoder;
+
+int x265_encode_picture(uint8_t **pbuf, Image *img, 
+                        const HEVCEncodeParams *params);
+void save_yuv1(Image *img, FILE *f);
+void save_yuv(Image *img, const char *filename);
+
+#ifdef __cplusplus
+}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bpgview.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,538 @@
+/*
+ * BPG viewer
+ *
+ * Copyright (c) 2014-2015 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <getopt.h>
+#include <inttypes.h>
+#ifdef WIN32
+#include <windows.h>
+#endif
+#include <SDL/SDL.h>
+#include <SDL/SDL_image.h>
+
+#include "libbpg.h"
+
+typedef enum {
+    BG_BLACK,
+    BG_TILED,
+} BackgroundTypeEnum;
+
+typedef struct {
+    SDL_Surface *img;
+    int delay; /* in ms */
+} Frame;
+
+typedef struct {
+    int screen_w, screen_h;
+    int win_w, win_h;
+    SDL_Surface *screen;
+
+    int img_w, img_h;
+    int frame_count;
+    Frame *frames;
+    int frame_index; /* index of the current frame */
+    int loop_counter;
+    int loop_count;
+    SDL_TimerID frame_timer_id;
+
+    int is_full_screen;
+    int pos_x, pos_y;
+    BackgroundTypeEnum background_type;
+} DispContext;
+
+static uint32_t timer_cb(uint32_t interval, void *param);
+
+static inline int clamp_int(int val, int min_val, int max_val)
+{
+    if (val < min_val)
+        return min_val;
+    else if (val > max_val)
+        return max_val;
+    else
+        return val;
+}
+
+Frame *bpg_load(FILE *f, int *pframe_count, int *ploop_count)
+{
+    BPGDecoderContext *s;
+    BPGImageInfo bi_s, *bi = &bi_s;
+    uint8_t *buf;
+    int len, y;
+    SDL_Surface *img;
+    Frame *frames;
+    uint32_t rmask, gmask, bmask, amask;
+    int frame_count, i, delay_num, delay_den;
+
+    fseek(f, 0, SEEK_END);
+    len = ftell(f);
+    fseek(f, 0, SEEK_SET);
+    if (len < 0)
+        return NULL;
+    buf = malloc(len);
+    if (!buf)
+        return NULL;
+    if (fread(buf, 1, len, f) != len)
+        return NULL;
+    
+    frames = NULL;
+    frame_count = 0;
+
+    s = bpg_decoder_open();
+    if (bpg_decoder_decode(s, buf, len) < 0) 
+        goto fail;
+    bpg_decoder_get_info(s, bi);
+#if SDL_BYTEORDER == SDL_BIG_ENDIAN
+    rmask = 0xff000000;
+    gmask = 0x00ff0000;
+    bmask = 0x0000ff00;
+    amask = 0x000000ff;
+#else
+    rmask = 0x000000ff;
+    gmask = 0x0000ff00;
+    bmask = 0x00ff0000;
+    amask = 0xff000000;
+#endif
+    for(;;) {
+        if (bpg_decoder_start(s, BPG_OUTPUT_FORMAT_RGBA32) < 0)
+            break;
+        bpg_decoder_get_frame_duration(s, &delay_num, &delay_den);
+        frames = realloc(frames, sizeof(frames[0]) * (frame_count + 1));
+        img = SDL_CreateRGBSurface(SDL_HWSURFACE, bi->width, bi->height, 32,
+                                   rmask, gmask, bmask, amask);
+        if (!img) 
+            goto fail;
+    
+        SDL_LockSurface(img);
+        for(y = 0; y < bi->height; y++) {
+            bpg_decoder_get_line(s, (uint8_t *)img->pixels + y * img->pitch);
+        }
+        SDL_UnlockSurface(img);
+        frames[frame_count].img = img;
+        frames[frame_count].delay = (delay_num * 1000) / delay_den;
+        frame_count++;
+    }
+    bpg_decoder_close(s);
+    *pframe_count = frame_count;
+    *ploop_count = bi->loop_count;
+    return frames;
+ fail:
+    bpg_decoder_close(s);
+    for(i = 0; i < frame_count; i++) {
+        SDL_FreeSurface(frames[i].img);
+    }
+    free(frames);
+    *pframe_count = 0;
+    return NULL;
+}
+
+static void restart_frame_timer(DispContext *dc)
+{
+    if (dc->frame_timer_id) {
+        /* XXX: the SDL timer API is not safe, so we remove the timer even if it already expired */
+        SDL_RemoveTimer(dc->frame_timer_id);
+        dc->frame_timer_id = 0;
+    }
+    dc->frame_timer_id = 
+        SDL_AddTimer(dc->frames[dc->frame_index].delay, timer_cb, NULL);
+}
+
+int load_image(DispContext *dc, const char *filename)
+{
+    SDL_Surface *img;
+    Frame *frames;
+    FILE *f;
+    uint8_t buf[BPG_DECODER_INFO_BUF_SIZE];
+    int len, i, frame_count, loop_count;
+    BPGImageInfo bi;
+
+    f = fopen(filename, "rb");
+    if (!f)
+        goto fail;
+    len = fread(buf, 1, sizeof(buf), f);
+    if (bpg_decoder_get_info_from_buf(&bi, NULL, buf, len) >= 0) {
+        fseek(f, 0, SEEK_SET);
+        frames = bpg_load(f, &frame_count, &loop_count);
+        if (!frames)
+            goto fail;
+        fclose(f);
+    } else {
+        /* use SDL image loader */
+        img = IMG_Load(filename);
+        if (!img) {
+        fail:
+            fprintf(stderr, "Could not load '%s'\n", filename);
+            return -1;
+        }
+        frame_count = 1;
+        frames = malloc(sizeof(dc->frames[0]) * frame_count);
+        frames[0].img = img;
+        frames[0].delay = 0;
+        loop_count = 1;
+    }
+
+    for(i = 0; i < dc->frame_count; i++) {
+        SDL_FreeSurface(dc->frames[i].img);
+    }
+    free(dc->frames);
+    if (dc->frame_timer_id) {
+        SDL_RemoveTimer(dc->frame_timer_id);
+        dc->frame_timer_id = 0;
+    }
+    
+    dc->frame_count = frame_count;
+    dc->frames = frames;
+    dc->frame_index = 0;
+    dc->loop_counter = 0;
+    dc->loop_count = loop_count;
+    dc->img_w = dc->frames[0].img->w;
+    dc->img_h = dc->frames[0].img->h;
+
+    /* start the animation timer if needed */
+    if (dc->frame_count > 1) {
+        restart_frame_timer(dc);
+    }
+    return 0;
+}
+
+void center_image(DispContext *dc)
+{
+    dc->pos_x = clamp_int((dc->screen->w - dc->img_w) / 2, -32767, 32768);
+    dc->pos_y = clamp_int((dc->screen->h - dc->img_h) / 2, -32767, 32768);
+}
+
+void draw_image(DispContext *dc)
+{
+    SDL_Rect r;
+
+    r.x = 0;
+    r.y = 0;
+    r.w = dc->screen->w;
+    r.h = dc->screen->h;
+    SDL_FillRect(dc->screen, &r, SDL_MapRGB(dc->screen->format, 0x00, 0x00, 0x00));
+
+    if (dc->background_type == BG_TILED) {
+        int x, y, tw, w, h, x2, y2, w1, h1, x1, y1;
+        uint32_t bgcolors[2];
+
+        tw = 16;
+        w = dc->img_w;
+        h = dc->img_h;
+        w1 = (w + tw - 1) / tw;
+        h1 = (h + tw - 1) / tw;
+        bgcolors[0] = SDL_MapRGB(dc->screen->format, 100, 100, 100);
+        bgcolors[1] = SDL_MapRGB(dc->screen->format, 150, 150, 150);
+        for(y = 0; y < h1; y++) {
+            for(x = 0; x < w1; x++) {
+                x1 = x * tw;
+                y1 = y * tw;
+                x2 = x1 + tw;
+                y2 = y1 + tw;
+                if (x2 > w)
+                    x2 = w;
+                if (y2 > h)
+                    y2 = h;
+                r.x = x1 + dc->pos_x;
+                r.y = y1 + dc->pos_y;
+                r.w = x2 - x1;
+                r.h = y2 - y1;
+                SDL_FillRect(dc->screen, &r, bgcolors[(x ^ y) & 1]);
+            }
+        }
+    }
+
+    r.x = dc->pos_x;
+    r.y = dc->pos_y;
+    r.w = 0;
+    r.h = 0;
+    SDL_BlitSurface (dc->frames[dc->frame_index].img, NULL, dc->screen, &r);
+
+    SDL_Flip(dc->screen);
+}
+
+void pan_image(DispContext *dc, int dx, int dy)
+{
+    int dw, dh;
+
+    dw = dc->img_w - dc->screen->w;
+    dh = dc->img_h - dc->screen->h;
+    if (dw > 0) {
+        dc->pos_x += dx;
+        if (dc->pos_x < -dw)
+            dc->pos_x = -dw;
+        else if (dc->pos_x > 0)
+            dc->pos_x = 0;
+    }
+    if (dh > 0) {
+        dc->pos_y += dy;
+        if (dc->pos_y < -dh)
+            dc->pos_y = -dh;
+        else if (dc->pos_y > 0)
+            dc->pos_y = 0;
+    }
+    draw_image(dc);
+}
+
+static void set_caption(DispContext *dc, char **argv,
+                        int image_index, int image_count)
+{
+    char buf[1024];
+    const char *filename;
+    filename = argv[image_index];
+    snprintf(buf, sizeof(buf), "bpgview [%d of %d] - %s",
+             image_index + 1, image_count, filename);
+    SDL_WM_SetCaption(buf, buf);
+}
+
+static void open_window(DispContext *dc, int w, int h, int is_full_screen)
+{
+    int flags;
+
+    flags = SDL_DOUBLEBUF | SDL_HWSURFACE | SDL_HWACCEL;
+    if (is_full_screen)
+        flags |= SDL_FULLSCREEN;
+    else
+        flags |= SDL_RESIZABLE;
+
+    dc->screen = SDL_SetVideoMode(w, h, 32, flags);
+    if (!dc->screen) {
+        fprintf(stderr, "Could not init screen\n");
+        exit(1);
+    }
+}
+
+static uint32_t timer_cb(uint32_t interval, void *param)
+{
+    SDL_Event event;
+    SDL_UserEvent userevent;
+
+    userevent.type = SDL_USEREVENT;
+    userevent.code = 0;
+    userevent.data1 = NULL;
+    userevent.data2 = NULL;
+
+    event.type = SDL_USEREVENT;
+    event.user = userevent;
+
+    SDL_PushEvent(&event);
+    return 0;
+}
+
+#define DEFAULT_W 640
+#define DEFAULT_H 480
+
+static void help(void)
+{
+    const char *str;
+    str = "BPG Image Viewer version " CONFIG_BPG_VERSION "\n"
+           "usage: bpgview infile...\n"
+           "\n"
+           "Keys:\n"
+           "q, ESC         quit\n"
+           "n, SPACE       next image\n"
+           "p              previous image\n"
+           "arrows         pan\n"
+           "c              center\n"
+           "b              toggle background type\n";
+#ifdef WIN32
+    MessageBox(NULL, str, "Error", MB_ICONERROR | MB_OK);
+    exit(1);
+#else
+    printf("%s", str);
+    exit(1);
+#endif
+}
+
+int main(int argc, char **argv)
+{
+    int c, image_index, image_count, incr, i;
+    SDL_Event event;
+    DispContext dc_s, *dc = &dc_s;
+    const SDL_VideoInfo *vi;
+
+    for(;;) {
+        c = getopt(argc, argv, "h");
+        if (c == -1)
+            break;
+        switch(c) {
+        case 'h':
+        show_help:
+            help();
+            break;
+        default:
+            exit(1);
+        }
+    }
+
+    if (optind >= argc)
+        goto show_help;
+
+    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER) < 0) {
+        fprintf(stderr, "Could not init SDL\n");
+        exit(1);
+    }
+    memset(dc, 0, sizeof(*dc));
+
+    vi = SDL_GetVideoInfo();
+    dc->screen_w = vi->current_w;
+    dc->screen_h = vi->current_h;
+    dc->is_full_screen = 0;
+
+    image_count = argc - optind;
+    image_index = 0;
+    if (load_image(dc, argv[optind + image_index]) < 0)
+        exit(1);
+    dc->background_type = BG_TILED;
+
+    {
+        int w, h;
+
+        if (image_count > 1 || (dc->img_w < 256 || dc->img_h < 256)) {
+            w = DEFAULT_W;
+            h = DEFAULT_H;
+        } else {
+            w = clamp_int(dc->img_w, 32, dc->screen_w);
+            h = clamp_int(dc->img_h, 32, dc->screen_h);
+        }
+        open_window(dc, w, h, 0);
+        set_caption(dc, argv + optind, image_index, image_count);
+    }
+
+    center_image(dc);
+    draw_image(dc);
+
+    SDL_EnableKeyRepeat(SDL_DEFAULT_REPEAT_DELAY, SDL_DEFAULT_REPEAT_INTERVAL);
+
+    for(;;) {
+        if (!SDL_WaitEvent(&event))
+            continue;
+        switch(event.type) {
+        case SDL_KEYDOWN:
+            switch (event.key.keysym.sym) {
+            case SDLK_ESCAPE:
+            case SDLK_q:
+                goto done;
+            case SDLK_SPACE: /* next image */
+            case SDLK_n:
+                incr = 1;
+                goto prev_next;
+            case SDLK_p: /* previous image */
+                incr = -1;
+            prev_next:
+                if (image_count > 1) {
+                    for(i = 0; i < image_count; i++) {
+                        image_index += incr;
+                        if (image_index < 0)
+                            image_index = image_count - 1;
+                        else if (image_index >= image_count)
+                            image_index = 0;
+                        if (load_image(dc, argv[optind + image_index]) == 0)
+                            break;
+                    }
+                    if (i == image_count)
+                        exit(1);
+                    set_caption(dc, argv + optind, image_index, image_count);
+                    center_image(dc);
+                    draw_image(dc);
+                }
+                break;
+            case SDLK_LEFT:
+                pan_image(dc, 32, 0);
+                break;
+            case SDLK_RIGHT:
+                pan_image(dc, -32, 0);
+                break;
+            case SDLK_UP:
+                pan_image(dc, 0, 32);
+                break;
+            case SDLK_DOWN:
+                pan_image(dc, 0, -32);
+                break;
+            case SDLK_c:
+                center_image(dc);
+                draw_image(dc);
+                break;
+            case SDLK_b:
+                dc->background_type ^= 1;
+                draw_image(dc);
+                break;
+            case SDLK_f:
+                dc->is_full_screen ^= 1;
+                if (dc->is_full_screen) {
+                    /* save old windows size */
+                    dc->win_w = dc->screen->w;
+                    dc->win_h = dc->screen->h;
+                    open_window(dc, dc->screen_w, dc->screen_h, 1);
+                } else {
+                    open_window(dc, dc->win_w, dc->win_h, 0);
+                }
+                center_image(dc);
+                draw_image(dc);
+                break;
+            default:
+                break;
+            }
+            break;
+        case SDL_VIDEORESIZE:
+            {
+                open_window(dc, event.resize.w, event.resize.h, 0);
+                center_image(dc);
+                draw_image(dc);
+            }
+            break;
+        case SDL_QUIT:
+            goto done;
+        case SDL_MOUSEMOTION:
+            if (event.motion.state) {
+                pan_image(dc, event.motion.xrel, event.motion.yrel);
+            }
+            break;
+        case SDL_USEREVENT:
+            if (dc->frame_count > 1) {
+                /* show next frame */
+                if (dc->frame_index == (dc->frame_count - 1)) {
+                    if (dc->loop_count == 0 ||
+                        dc->loop_counter < (dc->loop_count - 1)) {
+                        dc->frame_index = 0;
+                        dc->loop_counter++;
+                    } else {
+                        break;
+                    }
+                } else {
+                    dc->frame_index++;
+                }
+                draw_image(dc);
+                restart_frame_timer(dc);
+            }
+            break;
+        default:
+            break;
+        }
+    }
+ done: 
+
+    SDL_FreeSurface(dc->screen);
+    return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/config.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1834 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-asm --enable-small --disable-pthreads --disable-everything --enable-decoder=hevc --enable-demuxer=hevc --enable-protocol=file --disable-ffserver --disable-ffprobe --disable-doc --enable-parser=hevc"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2014
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "gcc 4.7.2 (GCC) 20120921 (Red Hat 4.7.2-2)"
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM 
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPSDSPR1 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_LOONGSON 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPSDSPR1_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_LOONGSON_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPSDSPR1_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_LOONGSON_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 0
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED_8 1
+#define HAVE_LOCAL_ALIGNED_16 1
+#define HAVE_LOCAL_ALIGNED_32 1
+#define HAVE_SIMD_ALIGN_16 0
+#define HAVE_ATOMICS_GCC 1
+#define HAVE_ATOMICS_SUNCC 0
+#define HAVE_ATOMICS_WIN32 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_ATOMIC_COMPARE_EXCHANGE 1
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 0
+#define HAVE_SARESTART 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_YASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 0
+#define HAVE_INCOMPATIBLE_LIBAV_ABI 0
+#define HAVE_ALSA_ASOUNDLIB_H 0
+#define HAVE_ALTIVEC_H 0
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CL_CL_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DLFCN_H 1
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_MACH_MACH_TIME_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 0
+#define HAVE_OPENJPEG_1_5_OPENJPEG_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SNDIO_H 0
+#define HAVE_SOUNDCARD_H 0
+#define HAVE_SYS_MMAN_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COSF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_COTASKMEMFREE 0
+#define HAVE_CRYPTGENRANDOM 0
+#define HAVE_DLOPEN 1
+#define HAVE_FCNTL 1
+#define HAVE_FLT_LIM 1
+#define HAVE_FORK 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSERVBYPORT 1
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_JACK_PORT_GET_LATENCY_RANGE 0
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 0
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 1
+#define HAVE_USLEEP 1
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_PTHREADS 0
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_ATTRIBUTE_MAY_ALIAS 1
+#define HAVE_ATTRIBUTE_PACKED 1
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 1
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_CONDITION_VARIABLE_PTR 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 1
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 1
+#define HAVE_ATOMICS_NATIVE 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_DXVA2API_COBJ 0
+#define HAVE_DXVA2_LIB 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_LIBDC1394_1 0
+#define HAVE_LIBDC1394_2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_SDL 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_THREADS 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_XLIB 1
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_DEMUXERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_INDEVS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_PROTOCOLS 1
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODING_ENCODING_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 1
+#define CONFIG_FILTER_AUDIO_EXAMPLE 1
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 1
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 1
+#define CONFIG_METADATA_EXAMPLE 1
+#define CONFIG_MUXING_EXAMPLE 1
+#define CONFIG_REMUXING_EXAMPLE 1
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 1
+#define CONFIG_SCALING_VIDEO_EXAMPLE 1
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 1
+#define CONFIG_TRANSCODING_EXAMPLE 1
+#define CONFIG_AVISYNTH 0
+#define CONFIG_BZLIB 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_FREI0R 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_ICONV 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LIBAACPLUS 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBFAAC 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBNUT 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBQUVI 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSCHROEDINGER 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSTAGEFRIGHT_H264 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUTVIDEO 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBVO_AACENC 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWAVPACK 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXCB 1
+#define CONFIG_LIBXCB_SHM 1
+#define CONFIG_LIBXCB_XFIXES 1
+#define CONFIG_LIBXVID 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LZMA 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENCL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_SDL 1
+#define CONFIG_X11GRAB 0
+#define CONFIG_XLIB 1
+#define CONFIG_ZLIB 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 0
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 1
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_DXVA2 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDA 0
+#define CONFIG_VDPAU 0
+#define CONFIG_XVMC 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_AVDEVICE 1
+#define CONFIG_AVFILTER 1
+#define CONFIG_AVFORMAT 1
+#define CONFIG_AVRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_POSTPROC 0
+#define CONFIG_SWRESAMPLE 1
+#define CONFIG_SWSCALE 1
+#define CONFIG_FFPLAY 1
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFSERVER 0
+#define CONFIG_FFMPEG 1
+#define CONFIG_DCT 0
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 0
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_LZO 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_INCOMPATIBLE_LIBAV_ABI 0
+#define CONFIG_MEMALIGN_HACK 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_PIC 0
+#define CONFIG_POD2MAN 1
+#define CONFIG_RAISE_MAJOR 0
+#define CONFIG_THUMB 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 1
+#define CONFIG_CABAC 1
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 0
+#define CONFIG_FAANIDCT 0
+#define CONFIG_FDCTDSP 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GOLOMB 1
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PRED 0
+#define CONFIG_H264QPEL 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 0
+#define CONFIG_IIRFILTER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 0
+#define CONFIG_MPEGAUDIODSP 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_NETTLE 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_AAC_ADTSTOASC_BSF 0
+#define CONFIG_CHOMP_BSF 0
+#define CONFIG_DUMP_EXTRADATA_BSF 0
+#define CONFIG_H264_MP4TOANNEXB_BSF 0
+#define CONFIG_IMX_DUMP_HEADER_BSF 0
+#define CONFIG_MJPEG2JPEG_BSF 0
+#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0
+#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0
+#define CONFIG_MOV2TEXTSUB_BSF 0
+#define CONFIG_NOISE_BSF 0
+#define CONFIG_REMOVE_EXTRADATA_BSF 0
+#define CONFIG_TEXT2MOVSUB_BSF 0
+#define CONFIG_AASC_DECODER 0
+#define CONFIG_AIC_DECODER 0
+#define CONFIG_ALIAS_PIX_DECODER 0
+#define CONFIG_AMV_DECODER 0
+#define CONFIG_ANM_DECODER 0
+#define CONFIG_ANSI_DECODER 0
+#define CONFIG_ASV1_DECODER 0
+#define CONFIG_ASV2_DECODER 0
+#define CONFIG_AURA_DECODER 0
+#define CONFIG_AURA2_DECODER 0
+#define CONFIG_AVRP_DECODER 0
+#define CONFIG_AVRN_DECODER 0
+#define CONFIG_AVS_DECODER 0
+#define CONFIG_AVUI_DECODER 0
+#define CONFIG_AYUV_DECODER 0
+#define CONFIG_BETHSOFTVID_DECODER 0
+#define CONFIG_BFI_DECODER 0
+#define CONFIG_BINK_DECODER 0
+#define CONFIG_BMP_DECODER 0
+#define CONFIG_BMV_VIDEO_DECODER 0
+#define CONFIG_BRENDER_PIX_DECODER 0
+#define CONFIG_C93_DECODER 0
+#define CONFIG_CAVS_DECODER 0
+#define CONFIG_CDGRAPHICS_DECODER 0
+#define CONFIG_CDXL_DECODER 0
+#define CONFIG_CINEPAK_DECODER 0
+#define CONFIG_CLJR_DECODER 0
+#define CONFIG_CLLC_DECODER 0
+#define CONFIG_COMFORTNOISE_DECODER 0
+#define CONFIG_CPIA_DECODER 0
+#define CONFIG_CSCD_DECODER 0
+#define CONFIG_CYUV_DECODER 0
+#define CONFIG_DFA_DECODER 0
+#define CONFIG_DIRAC_DECODER 0
+#define CONFIG_DNXHD_DECODER 0
+#define CONFIG_DPX_DECODER 0
+#define CONFIG_DSICINVIDEO_DECODER 0
+#define CONFIG_DVVIDEO_DECODER 0
+#define CONFIG_DXA_DECODER 0
+#define CONFIG_DXTORY_DECODER 0
+#define CONFIG_EACMV_DECODER 0
+#define CONFIG_EAMAD_DECODER 0
+#define CONFIG_EATGQ_DECODER 0
+#define CONFIG_EATGV_DECODER 0
+#define CONFIG_EATQI_DECODER 0
+#define CONFIG_EIGHTBPS_DECODER 0
+#define CONFIG_EIGHTSVX_EXP_DECODER 0
+#define CONFIG_EIGHTSVX_FIB_DECODER 0
+#define CONFIG_ESCAPE124_DECODER 0
+#define CONFIG_ESCAPE130_DECODER 0
+#define CONFIG_EXR_DECODER 0
+#define CONFIG_FFV1_DECODER 0
+#define CONFIG_FFVHUFF_DECODER 0
+#define CONFIG_FIC_DECODER 0
+#define CONFIG_FLASHSV_DECODER 0
+#define CONFIG_FLASHSV2_DECODER 0
+#define CONFIG_FLIC_DECODER 0
+#define CONFIG_FLV_DECODER 0
+#define CONFIG_FOURXM_DECODER 0
+#define CONFIG_FRAPS_DECODER 0
+#define CONFIG_FRWU_DECODER 0
+#define CONFIG_G2M_DECODER 0
+#define CONFIG_GIF_DECODER 0
+#define CONFIG_H261_DECODER 0
+#define CONFIG_H263_DECODER 0
+#define CONFIG_H263I_DECODER 0
+#define CONFIG_H263P_DECODER 0
+#define CONFIG_H264_DECODER 0
+#define CONFIG_H264_CRYSTALHD_DECODER 0
+#define CONFIG_H264_VDA_DECODER 0
+#define CONFIG_H264_VDPAU_DECODER 0
+#define CONFIG_HEVC_DECODER 1
+#define CONFIG_HNM4_VIDEO_DECODER 0
+#define CONFIG_HUFFYUV_DECODER 0
+#define CONFIG_IDCIN_DECODER 0
+#define CONFIG_IFF_BYTERUN1_DECODER 0
+#define CONFIG_IFF_ILBM_DECODER 0
+#define CONFIG_INDEO2_DECODER 0
+#define CONFIG_INDEO3_DECODER 0
+#define CONFIG_INDEO4_DECODER 0
+#define CONFIG_INDEO5_DECODER 0
+#define CONFIG_INTERPLAY_VIDEO_DECODER 0
+#define CONFIG_JPEG2000_DECODER 0
+#define CONFIG_JPEGLS_DECODER 0
+#define CONFIG_JV_DECODER 0
+#define CONFIG_KGV1_DECODER 0
+#define CONFIG_KMVC_DECODER 0
+#define CONFIG_LAGARITH_DECODER 0
+#define CONFIG_LOCO_DECODER 0
+#define CONFIG_MDEC_DECODER 0
+#define CONFIG_MIMIC_DECODER 0
+#define CONFIG_MJPEG_DECODER 0
+#define CONFIG_MJPEGB_DECODER 0
+#define CONFIG_MMVIDEO_DECODER 0
+#define CONFIG_MOTIONPIXELS_DECODER 0
+#define CONFIG_MPEG_XVMC_DECODER 0
+#define CONFIG_MPEG1VIDEO_DECODER 0
+#define CONFIG_MPEG2VIDEO_DECODER 0
+#define CONFIG_MPEG4_DECODER 0
+#define CONFIG_MPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MPEG4_VDPAU_DECODER 0
+#define CONFIG_MPEGVIDEO_DECODER 0
+#define CONFIG_MPEG_VDPAU_DECODER 0
+#define CONFIG_MPEG1_VDPAU_DECODER 0
+#define CONFIG_MPEG2_CRYSTALHD_DECODER 0
+#define CONFIG_MSA1_DECODER 0
+#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MSMPEG4V1_DECODER 0
+#define CONFIG_MSMPEG4V2_DECODER 0
+#define CONFIG_MSMPEG4V3_DECODER 0
+#define CONFIG_MSRLE_DECODER 0
+#define CONFIG_MSS1_DECODER 0
+#define CONFIG_MSS2_DECODER 0
+#define CONFIG_MSVIDEO1_DECODER 0
+#define CONFIG_MSZH_DECODER 0
+#define CONFIG_MTS2_DECODER 0
+#define CONFIG_MVC1_DECODER 0
+#define CONFIG_MVC2_DECODER 0
+#define CONFIG_MXPEG_DECODER 0
+#define CONFIG_NUV_DECODER 0
+#define CONFIG_PAF_VIDEO_DECODER 0
+#define CONFIG_PAM_DECODER 0
+#define CONFIG_PBM_DECODER 0
+#define CONFIG_PCX_DECODER 0
+#define CONFIG_PGM_DECODER 0
+#define CONFIG_PGMYUV_DECODER 0
+#define CONFIG_PICTOR_DECODER 0
+#define CONFIG_PNG_DECODER 0
+#define CONFIG_PPM_DECODER 0
+#define CONFIG_PRORES_DECODER 0
+#define CONFIG_PRORES_LGPL_DECODER 0
+#define CONFIG_PTX_DECODER 0
+#define CONFIG_QDRAW_DECODER 0
+#define CONFIG_QPEG_DECODER 0
+#define CONFIG_QTRLE_DECODER 0
+#define CONFIG_R10K_DECODER 0
+#define CONFIG_R210_DECODER 0
+#define CONFIG_RAWVIDEO_DECODER 0
+#define CONFIG_RL2_DECODER 0
+#define CONFIG_ROQ_DECODER 0
+#define CONFIG_RPZA_DECODER 0
+#define CONFIG_RV10_DECODER 0
+#define CONFIG_RV20_DECODER 0
+#define CONFIG_RV30_DECODER 0
+#define CONFIG_RV40_DECODER 0
+#define CONFIG_S302M_DECODER 0
+#define CONFIG_SANM_DECODER 0
+#define CONFIG_SGI_DECODER 0
+#define CONFIG_SGIRLE_DECODER 0
+#define CONFIG_SMACKER_DECODER 0
+#define CONFIG_SMC_DECODER 0
+#define CONFIG_SMVJPEG_DECODER 0
+#define CONFIG_SNOW_DECODER 0
+#define CONFIG_SP5X_DECODER 0
+#define CONFIG_SUNRAST_DECODER 0
+#define CONFIG_SVQ1_DECODER 0
+#define CONFIG_SVQ3_DECODER 0
+#define CONFIG_TARGA_DECODER 0
+#define CONFIG_TARGA_Y216_DECODER 0
+#define CONFIG_THEORA_DECODER 0
+#define CONFIG_THP_DECODER 0
+#define CONFIG_TIERTEXSEQVIDEO_DECODER 0
+#define CONFIG_TIFF_DECODER 0
+#define CONFIG_TMV_DECODER 0
+#define CONFIG_TRUEMOTION1_DECODER 0
+#define CONFIG_TRUEMOTION2_DECODER 0
+#define CONFIG_TSCC_DECODER 0
+#define CONFIG_TSCC2_DECODER 0
+#define CONFIG_TXD_DECODER 0
+#define CONFIG_ULTI_DECODER 0
+#define CONFIG_UTVIDEO_DECODER 0
+#define CONFIG_V210_DECODER 0
+#define CONFIG_V210X_DECODER 0
+#define CONFIG_V308_DECODER 0
+#define CONFIG_V408_DECODER 0
+#define CONFIG_V410_DECODER 0
+#define CONFIG_VB_DECODER 0
+#define CONFIG_VBLE_DECODER 0
+#define CONFIG_VC1_DECODER 0
+#define CONFIG_VC1_CRYSTALHD_DECODER 0
+#define CONFIG_VC1_VDPAU_DECODER 0
+#define CONFIG_VC1IMAGE_DECODER 0
+#define CONFIG_VCR1_DECODER 0
+#define CONFIG_VMDVIDEO_DECODER 0
+#define CONFIG_VMNC_DECODER 0
+#define CONFIG_VP3_DECODER 0
+#define CONFIG_VP5_DECODER 0
+#define CONFIG_VP6_DECODER 0
+#define CONFIG_VP6A_DECODER 0
+#define CONFIG_VP6F_DECODER 0
+#define CONFIG_VP7_DECODER 0
+#define CONFIG_VP8_DECODER 0
+#define CONFIG_VP9_DECODER 0
+#define CONFIG_VQA_DECODER 0
+#define CONFIG_WEBP_DECODER 0
+#define CONFIG_WMV1_DECODER 0
+#define CONFIG_WMV2_DECODER 0
+#define CONFIG_WMV3_DECODER 0
+#define CONFIG_WMV3_CRYSTALHD_DECODER 0
+#define CONFIG_WMV3_VDPAU_DECODER 0
+#define CONFIG_WMV3IMAGE_DECODER 0
+#define CONFIG_WNV1_DECODER 0
+#define CONFIG_XAN_WC3_DECODER 0
+#define CONFIG_XAN_WC4_DECODER 0
+#define CONFIG_XBM_DECODER 0
+#define CONFIG_XFACE_DECODER 0
+#define CONFIG_XL_DECODER 0
+#define CONFIG_XWD_DECODER 0
+#define CONFIG_Y41P_DECODER 0
+#define CONFIG_YOP_DECODER 0
+#define CONFIG_YUV4_DECODER 0
+#define CONFIG_ZERO12V_DECODER 0
+#define CONFIG_ZEROCODEC_DECODER 0
+#define CONFIG_ZLIB_DECODER 0
+#define CONFIG_ZMBV_DECODER 0
+#define CONFIG_AAC_DECODER 0
+#define CONFIG_AAC_LATM_DECODER 0
+#define CONFIG_AC3_DECODER 0
+#define CONFIG_AC3_FIXED_DECODER 0
+#define CONFIG_ALAC_DECODER 0
+#define CONFIG_ALS_DECODER 0
+#define CONFIG_AMRNB_DECODER 0
+#define CONFIG_AMRWB_DECODER 0
+#define CONFIG_APE_DECODER 0
+#define CONFIG_ATRAC1_DECODER 0
+#define CONFIG_ATRAC3_DECODER 0
+#define CONFIG_ATRAC3P_DECODER 0
+#define CONFIG_BINKAUDIO_DCT_DECODER 0
+#define CONFIG_BINKAUDIO_RDFT_DECODER 0
+#define CONFIG_BMV_AUDIO_DECODER 0
+#define CONFIG_COOK_DECODER 0
+#define CONFIG_DCA_DECODER 0
+#define CONFIG_DSD_LSBF_DECODER 0
+#define CONFIG_DSD_MSBF_DECODER 0
+#define CONFIG_DSD_LSBF_PLANAR_DECODER 0
+#define CONFIG_DSD_MSBF_PLANAR_DECODER 0
+#define CONFIG_DSICINAUDIO_DECODER 0
+#define CONFIG_EAC3_DECODER 0
+#define CONFIG_EVRC_DECODER 0
+#define CONFIG_FFWAVESYNTH_DECODER 0
+#define CONFIG_FLAC_DECODER 0
+#define CONFIG_G723_1_DECODER 0
+#define CONFIG_G729_DECODER 0
+#define CONFIG_GSM_DECODER 0
+#define CONFIG_GSM_MS_DECODER 0
+#define CONFIG_IAC_DECODER 0
+#define CONFIG_IMC_DECODER 0
+#define CONFIG_MACE3_DECODER 0
+#define CONFIG_MACE6_DECODER 0
+#define CONFIG_METASOUND_DECODER 0
+#define CONFIG_MLP_DECODER 0
+#define CONFIG_MP1_DECODER 0
+#define CONFIG_MP1FLOAT_DECODER 0
+#define CONFIG_MP2_DECODER 0
+#define CONFIG_MP2FLOAT_DECODER 0
+#define CONFIG_MP3_DECODER 0
+#define CONFIG_MP3FLOAT_DECODER 0
+#define CONFIG_MP3ADU_DECODER 0
+#define CONFIG_MP3ADUFLOAT_DECODER 0
+#define CONFIG_MP3ON4_DECODER 0
+#define CONFIG_MP3ON4FLOAT_DECODER 0
+#define CONFIG_MPC7_DECODER 0
+#define CONFIG_MPC8_DECODER 0
+#define CONFIG_NELLYMOSER_DECODER 0
+#define CONFIG_ON2AVC_DECODER 0
+#define CONFIG_OPUS_DECODER 0
+#define CONFIG_PAF_AUDIO_DECODER 0
+#define CONFIG_QCELP_DECODER 0
+#define CONFIG_QDM2_DECODER 0
+#define CONFIG_RA_144_DECODER 0
+#define CONFIG_RA_288_DECODER 0
+#define CONFIG_RALF_DECODER 0
+#define CONFIG_SHORTEN_DECODER 0
+#define CONFIG_SIPR_DECODER 0
+#define CONFIG_SMACKAUD_DECODER 0
+#define CONFIG_SONIC_DECODER 0
+#define CONFIG_TAK_DECODER 0
+#define CONFIG_TRUEHD_DECODER 0
+#define CONFIG_TRUESPEECH_DECODER 0
+#define CONFIG_TTA_DECODER 0
+#define CONFIG_TWINVQ_DECODER 0
+#define CONFIG_VMDAUDIO_DECODER 0
+#define CONFIG_VORBIS_DECODER 0
+#define CONFIG_WAVPACK_DECODER 0
+#define CONFIG_WMALOSSLESS_DECODER 0
+#define CONFIG_WMAPRO_DECODER 0
+#define CONFIG_WMAV1_DECODER 0
+#define CONFIG_WMAV2_DECODER 0
+#define CONFIG_WMAVOICE_DECODER 0
+#define CONFIG_WS_SND1_DECODER 0
+#define CONFIG_PCM_ALAW_DECODER 0
+#define CONFIG_PCM_BLURAY_DECODER 0
+#define CONFIG_PCM_DVD_DECODER 0
+#define CONFIG_PCM_F32BE_DECODER 0
+#define CONFIG_PCM_F32LE_DECODER 0
+#define CONFIG_PCM_F64BE_DECODER 0
+#define CONFIG_PCM_F64LE_DECODER 0
+#define CONFIG_PCM_LXF_DECODER 0
+#define CONFIG_PCM_MULAW_DECODER 0
+#define CONFIG_PCM_S8_DECODER 0
+#define CONFIG_PCM_S8_PLANAR_DECODER 0
+#define CONFIG_PCM_S16BE_DECODER 0
+#define CONFIG_PCM_S16BE_PLANAR_DECODER 0
+#define CONFIG_PCM_S16LE_DECODER 0
+#define CONFIG_PCM_S16LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S24BE_DECODER 0
+#define CONFIG_PCM_S24DAUD_DECODER 0
+#define CONFIG_PCM_S24LE_DECODER 0
+#define CONFIG_PCM_S24LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S32BE_DECODER 0
+#define CONFIG_PCM_S32LE_DECODER 0
+#define CONFIG_PCM_S32LE_PLANAR_DECODER 0
+#define CONFIG_PCM_U8_DECODER 0
+#define CONFIG_PCM_U16BE_DECODER 0
+#define CONFIG_PCM_U16LE_DECODER 0
+#define CONFIG_PCM_U24BE_DECODER 0
+#define CONFIG_PCM_U24LE_DECODER 0
+#define CONFIG_PCM_U32BE_DECODER 0
+#define CONFIG_PCM_U32LE_DECODER 0
+#define CONFIG_PCM_ZORK_DECODER 0
+#define CONFIG_INTERPLAY_DPCM_DECODER 0
+#define CONFIG_ROQ_DPCM_DECODER 0
+#define CONFIG_SOL_DPCM_DECODER 0
+#define CONFIG_XAN_DPCM_DECODER 0
+#define CONFIG_ADPCM_4XM_DECODER 0
+#define CONFIG_ADPCM_ADX_DECODER 0
+#define CONFIG_ADPCM_AFC_DECODER 0
+#define CONFIG_ADPCM_CT_DECODER 0
+#define CONFIG_ADPCM_DTK_DECODER 0
+#define CONFIG_ADPCM_EA_DECODER 0
+#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0
+#define CONFIG_ADPCM_EA_R1_DECODER 0
+#define CONFIG_ADPCM_EA_R2_DECODER 0
+#define CONFIG_ADPCM_EA_R3_DECODER 0
+#define CONFIG_ADPCM_EA_XAS_DECODER 0
+#define CONFIG_ADPCM_G722_DECODER 0
+#define CONFIG_ADPCM_G726_DECODER 0
+#define CONFIG_ADPCM_G726LE_DECODER 0
+#define CONFIG_ADPCM_IMA_AMV_DECODER 0
+#define CONFIG_ADPCM_IMA_APC_DECODER 0
+#define CONFIG_ADPCM_IMA_DK3_DECODER 0
+#define CONFIG_ADPCM_IMA_DK4_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0
+#define CONFIG_ADPCM_IMA_ISS_DECODER 0
+#define CONFIG_ADPCM_IMA_OKI_DECODER 0
+#define CONFIG_ADPCM_IMA_QT_DECODER 0
+#define CONFIG_ADPCM_IMA_RAD_DECODER 0
+#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0
+#define CONFIG_ADPCM_IMA_WAV_DECODER 0
+#define CONFIG_ADPCM_IMA_WS_DECODER 0
+#define CONFIG_ADPCM_MS_DECODER 0
+#define CONFIG_ADPCM_SBPRO_2_DECODER 0
+#define CONFIG_ADPCM_SBPRO_3_DECODER 0
+#define CONFIG_ADPCM_SBPRO_4_DECODER 0
+#define CONFIG_ADPCM_SWF_DECODER 0
+#define CONFIG_ADPCM_THP_DECODER 0
+#define CONFIG_ADPCM_VIMA_DECODER 0
+#define CONFIG_ADPCM_XA_DECODER 0
+#define CONFIG_ADPCM_YAMAHA_DECODER 0
+#define CONFIG_VIMA_DECODER 0
+#define CONFIG_SSA_DECODER 0
+#define CONFIG_ASS_DECODER 0
+#define CONFIG_DVBSUB_DECODER 0
+#define CONFIG_DVDSUB_DECODER 0
+#define CONFIG_JACOSUB_DECODER 0
+#define CONFIG_MICRODVD_DECODER 0
+#define CONFIG_MOVTEXT_DECODER 0
+#define CONFIG_MPL2_DECODER 0
+#define CONFIG_PGSSUB_DECODER 0
+#define CONFIG_PJS_DECODER 0
+#define CONFIG_REALTEXT_DECODER 0
+#define CONFIG_SAMI_DECODER 0
+#define CONFIG_SRT_DECODER 0
+#define CONFIG_STL_DECODER 0
+#define CONFIG_SUBRIP_DECODER 0
+#define CONFIG_SUBVIEWER_DECODER 0
+#define CONFIG_SUBVIEWER1_DECODER 0
+#define CONFIG_TEXT_DECODER 0
+#define CONFIG_VPLAYER_DECODER 0
+#define CONFIG_WEBVTT_DECODER 0
+#define CONFIG_XSUB_DECODER 0
+#define CONFIG_LIBCELT_DECODER 0
+#define CONFIG_LIBFDK_AAC_DECODER 0
+#define CONFIG_LIBGSM_DECODER 0
+#define CONFIG_LIBGSM_MS_DECODER 0
+#define CONFIG_LIBILBC_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0
+#define CONFIG_LIBOPENJPEG_DECODER 0
+#define CONFIG_LIBOPUS_DECODER 0
+#define CONFIG_LIBSCHROEDINGER_DECODER 0
+#define CONFIG_LIBSPEEX_DECODER 0
+#define CONFIG_LIBSTAGEFRIGHT_H264_DECODER 0
+#define CONFIG_LIBUTVIDEO_DECODER 0
+#define CONFIG_LIBVORBIS_DECODER 0
+#define CONFIG_LIBVPX_VP8_DECODER 0
+#define CONFIG_LIBVPX_VP9_DECODER 0
+#define CONFIG_LIBZVBI_TELETEXT_DECODER 0
+#define CONFIG_BINTEXT_DECODER 0
+#define CONFIG_XBIN_DECODER 0
+#define CONFIG_IDF_DECODER 0
+#define CONFIG_AAC_DEMUXER 0
+#define CONFIG_AC3_DEMUXER 0
+#define CONFIG_ACT_DEMUXER 0
+#define CONFIG_ADF_DEMUXER 0
+#define CONFIG_ADP_DEMUXER 0
+#define CONFIG_ADX_DEMUXER 0
+#define CONFIG_AEA_DEMUXER 0
+#define CONFIG_AFC_DEMUXER 0
+#define CONFIG_AIFF_DEMUXER 0
+#define CONFIG_AMR_DEMUXER 0
+#define CONFIG_ANM_DEMUXER 0
+#define CONFIG_APC_DEMUXER 0
+#define CONFIG_APE_DEMUXER 0
+#define CONFIG_AQTITLE_DEMUXER 0
+#define CONFIG_ASF_DEMUXER 0
+#define CONFIG_ASS_DEMUXER 0
+#define CONFIG_AST_DEMUXER 0
+#define CONFIG_AU_DEMUXER 0
+#define CONFIG_AVI_DEMUXER 0
+#define CONFIG_AVISYNTH_DEMUXER 0
+#define CONFIG_AVR_DEMUXER 0
+#define CONFIG_AVS_DEMUXER 0
+#define CONFIG_BETHSOFTVID_DEMUXER 0
+#define CONFIG_BFI_DEMUXER 0
+#define CONFIG_BINTEXT_DEMUXER 0
+#define CONFIG_BINK_DEMUXER 0
+#define CONFIG_BIT_DEMUXER 0
+#define CONFIG_BMV_DEMUXER 0
+#define CONFIG_BRSTM_DEMUXER 0
+#define CONFIG_BOA_DEMUXER 0
+#define CONFIG_C93_DEMUXER 0
+#define CONFIG_CAF_DEMUXER 0
+#define CONFIG_CAVSVIDEO_DEMUXER 0
+#define CONFIG_CDG_DEMUXER 0
+#define CONFIG_CDXL_DEMUXER 0
+#define CONFIG_CINE_DEMUXER 0
+#define CONFIG_CONCAT_DEMUXER 0
+#define CONFIG_DATA_DEMUXER 0
+#define CONFIG_DAUD_DEMUXER 0
+#define CONFIG_DFA_DEMUXER 0
+#define CONFIG_DIRAC_DEMUXER 0
+#define CONFIG_DNXHD_DEMUXER 0
+#define CONFIG_DSF_DEMUXER 0
+#define CONFIG_DSICIN_DEMUXER 0
+#define CONFIG_DTS_DEMUXER 0
+#define CONFIG_DTSHD_DEMUXER 0
+#define CONFIG_DV_DEMUXER 0
+#define CONFIG_DXA_DEMUXER 0
+#define CONFIG_EA_DEMUXER 0
+#define CONFIG_EA_CDATA_DEMUXER 0
+#define CONFIG_EAC3_DEMUXER 0
+#define CONFIG_EPAF_DEMUXER 0
+#define CONFIG_FFM_DEMUXER 0
+#define CONFIG_FFMETADATA_DEMUXER 0
+#define CONFIG_FILMSTRIP_DEMUXER 0
+#define CONFIG_FLAC_DEMUXER 0
+#define CONFIG_FLIC_DEMUXER 0
+#define CONFIG_FLV_DEMUXER 0
+#define CONFIG_LIVE_FLV_DEMUXER 0
+#define CONFIG_FOURXM_DEMUXER 0
+#define CONFIG_FRM_DEMUXER 0
+#define CONFIG_G722_DEMUXER 0
+#define CONFIG_G723_1_DEMUXER 0
+#define CONFIG_G729_DEMUXER 0
+#define CONFIG_GIF_DEMUXER 0
+#define CONFIG_GSM_DEMUXER 0
+#define CONFIG_GXF_DEMUXER 0
+#define CONFIG_H261_DEMUXER 0
+#define CONFIG_H263_DEMUXER 0
+#define CONFIG_H264_DEMUXER 0
+#define CONFIG_HEVC_DEMUXER 1
+#define CONFIG_HLS_DEMUXER 0
+#define CONFIG_HNM_DEMUXER 0
+#define CONFIG_ICO_DEMUXER 0
+#define CONFIG_IDCIN_DEMUXER 0
+#define CONFIG_IDF_DEMUXER 0
+#define CONFIG_IFF_DEMUXER 0
+#define CONFIG_ILBC_DEMUXER 0
+#define CONFIG_IMAGE2_DEMUXER 0
+#define CONFIG_IMAGE2PIPE_DEMUXER 0
+#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0
+#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0
+#define CONFIG_INGENIENT_DEMUXER 0
+#define CONFIG_IPMOVIE_DEMUXER 0
+#define CONFIG_IRCAM_DEMUXER 0
+#define CONFIG_ISS_DEMUXER 0
+#define CONFIG_IV8_DEMUXER 0
+#define CONFIG_IVF_DEMUXER 0
+#define CONFIG_JACOSUB_DEMUXER 0
+#define CONFIG_JV_DEMUXER 0
+#define CONFIG_LATM_DEMUXER 0
+#define CONFIG_LMLM4_DEMUXER 0
+#define CONFIG_LOAS_DEMUXER 0
+#define CONFIG_LRC_DEMUXER 0
+#define CONFIG_LVF_DEMUXER 0
+#define CONFIG_LXF_DEMUXER 0
+#define CONFIG_M4V_DEMUXER 0
+#define CONFIG_MATROSKA_DEMUXER 0
+#define CONFIG_MGSTS_DEMUXER 0
+#define CONFIG_MICRODVD_DEMUXER 0
+#define CONFIG_MJPEG_DEMUXER 0
+#define CONFIG_MLP_DEMUXER 0
+#define CONFIG_MLV_DEMUXER 0
+#define CONFIG_MM_DEMUXER 0
+#define CONFIG_MMF_DEMUXER 0
+#define CONFIG_MOV_DEMUXER 0
+#define CONFIG_MP3_DEMUXER 0
+#define CONFIG_MPC_DEMUXER 0
+#define CONFIG_MPC8_DEMUXER 0
+#define CONFIG_MPEGPS_DEMUXER 0
+#define CONFIG_MPEGTS_DEMUXER 0
+#define CONFIG_MPEGTSRAW_DEMUXER 0
+#define CONFIG_MPEGVIDEO_DEMUXER 0
+#define CONFIG_MPL2_DEMUXER 0
+#define CONFIG_MPSUB_DEMUXER 0
+#define CONFIG_MSNWC_TCP_DEMUXER 0
+#define CONFIG_MTV_DEMUXER 0
+#define CONFIG_MV_DEMUXER 0
+#define CONFIG_MVI_DEMUXER 0
+#define CONFIG_MXF_DEMUXER 0
+#define CONFIG_MXG_DEMUXER 0
+#define CONFIG_NC_DEMUXER 0
+#define CONFIG_NISTSPHERE_DEMUXER 0
+#define CONFIG_NSV_DEMUXER 0
+#define CONFIG_NUT_DEMUXER 0
+#define CONFIG_NUV_DEMUXER 0
+#define CONFIG_OGG_DEMUXER 0
+#define CONFIG_OMA_DEMUXER 0
+#define CONFIG_PAF_DEMUXER 0
+#define CONFIG_PCM_ALAW_DEMUXER 0
+#define CONFIG_PCM_MULAW_DEMUXER 0
+#define CONFIG_PCM_F64BE_DEMUXER 0
+#define CONFIG_PCM_F64LE_DEMUXER 0
+#define CONFIG_PCM_F32BE_DEMUXER 0
+#define CONFIG_PCM_F32LE_DEMUXER 0
+#define CONFIG_PCM_S32BE_DEMUXER 0
+#define CONFIG_PCM_S32LE_DEMUXER 0
+#define CONFIG_PCM_S24BE_DEMUXER 0
+#define CONFIG_PCM_S24LE_DEMUXER 0
+#define CONFIG_PCM_S16BE_DEMUXER 0
+#define CONFIG_PCM_S16LE_DEMUXER 0
+#define CONFIG_PCM_S8_DEMUXER 0
+#define CONFIG_PCM_U32BE_DEMUXER 0
+#define CONFIG_PCM_U32LE_DEMUXER 0
+#define CONFIG_PCM_U24BE_DEMUXER 0
+#define CONFIG_PCM_U24LE_DEMUXER 0
+#define CONFIG_PCM_U16BE_DEMUXER 0
+#define CONFIG_PCM_U16LE_DEMUXER 0
+#define CONFIG_PCM_U8_DEMUXER 0
+#define CONFIG_PJS_DEMUXER 0
+#define CONFIG_PMP_DEMUXER 0
+#define CONFIG_PVA_DEMUXER 0
+#define CONFIG_PVF_DEMUXER 0
+#define CONFIG_QCP_DEMUXER 0
+#define CONFIG_R3D_DEMUXER 0
+#define CONFIG_RAWVIDEO_DEMUXER 0
+#define CONFIG_REALTEXT_DEMUXER 0
+#define CONFIG_REDSPARK_DEMUXER 0
+#define CONFIG_RL2_DEMUXER 0
+#define CONFIG_RM_DEMUXER 0
+#define CONFIG_ROQ_DEMUXER 0
+#define CONFIG_RPL_DEMUXER 0
+#define CONFIG_RSD_DEMUXER 0
+#define CONFIG_RSO_DEMUXER 0
+#define CONFIG_RTP_DEMUXER 0
+#define CONFIG_RTSP_DEMUXER 0
+#define CONFIG_SAMI_DEMUXER 0
+#define CONFIG_SAP_DEMUXER 0
+#define CONFIG_SBG_DEMUXER 0
+#define CONFIG_SDP_DEMUXER 0
+#define CONFIG_SDR2_DEMUXER 0
+#define CONFIG_SEGAFILM_DEMUXER 0
+#define CONFIG_SHORTEN_DEMUXER 0
+#define CONFIG_SIFF_DEMUXER 0
+#define CONFIG_SLN_DEMUXER 0
+#define CONFIG_SMACKER_DEMUXER 0
+#define CONFIG_SMJPEG_DEMUXER 0
+#define CONFIG_SMUSH_DEMUXER 0
+#define CONFIG_SOL_DEMUXER 0
+#define CONFIG_SOX_DEMUXER 0
+#define CONFIG_SPDIF_DEMUXER 0
+#define CONFIG_SRT_DEMUXER 0
+#define CONFIG_STR_DEMUXER 0
+#define CONFIG_STL_DEMUXER 0
+#define CONFIG_SUBVIEWER1_DEMUXER 0
+#define CONFIG_SUBVIEWER_DEMUXER 0
+#define CONFIG_SUP_DEMUXER 0
+#define CONFIG_SWF_DEMUXER 0
+#define CONFIG_TAK_DEMUXER 0
+#define CONFIG_TEDCAPTIONS_DEMUXER 0
+#define CONFIG_THP_DEMUXER 0
+#define CONFIG_TIERTEXSEQ_DEMUXER 0
+#define CONFIG_TMV_DEMUXER 0
+#define CONFIG_TRUEHD_DEMUXER 0
+#define CONFIG_TTA_DEMUXER 0
+#define CONFIG_TXD_DEMUXER 0
+#define CONFIG_TTY_DEMUXER 0
+#define CONFIG_VC1_DEMUXER 0
+#define CONFIG_VC1T_DEMUXER 0
+#define CONFIG_VIVO_DEMUXER 0
+#define CONFIG_VMD_DEMUXER 0
+#define CONFIG_VOBSUB_DEMUXER 0
+#define CONFIG_VOC_DEMUXER 0
+#define CONFIG_VPLAYER_DEMUXER 0
+#define CONFIG_VQF_DEMUXER 0
+#define CONFIG_W64_DEMUXER 0
+#define CONFIG_WAV_DEMUXER 0
+#define CONFIG_WC3_DEMUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0
+#define CONFIG_WEBVTT_DEMUXER 0
+#define CONFIG_WSAUD_DEMUXER 0
+#define CONFIG_WSVQA_DEMUXER 0
+#define CONFIG_WTV_DEMUXER 0
+#define CONFIG_WV_DEMUXER 0
+#define CONFIG_XA_DEMUXER 0
+#define CONFIG_XBIN_DEMUXER 0
+#define CONFIG_XMV_DEMUXER 0
+#define CONFIG_XWMA_DEMUXER 0
+#define CONFIG_YOP_DEMUXER 0
+#define CONFIG_YUV4MPEGPIPE_DEMUXER 0
+#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0
+#define CONFIG_LIBGME_DEMUXER 0
+#define CONFIG_LIBMODPLUG_DEMUXER 0
+#define CONFIG_LIBNUT_DEMUXER 0
+#define CONFIG_LIBQUVI_DEMUXER 0
+#define CONFIG_A64MULTI_ENCODER 0
+#define CONFIG_A64MULTI5_ENCODER 0
+#define CONFIG_ALIAS_PIX_ENCODER 0
+#define CONFIG_AMV_ENCODER 0
+#define CONFIG_ASV1_ENCODER 0
+#define CONFIG_ASV2_ENCODER 0
+#define CONFIG_AVRP_ENCODER 0
+#define CONFIG_AVUI_ENCODER 0
+#define CONFIG_AYUV_ENCODER 0
+#define CONFIG_BMP_ENCODER 0
+#define CONFIG_CINEPAK_ENCODER 0
+#define CONFIG_CLJR_ENCODER 0
+#define CONFIG_COMFORTNOISE_ENCODER 0
+#define CONFIG_DNXHD_ENCODER 0
+#define CONFIG_DPX_ENCODER 0
+#define CONFIG_DVVIDEO_ENCODER 0
+#define CONFIG_FFV1_ENCODER 0
+#define CONFIG_FFVHUFF_ENCODER 0
+#define CONFIG_FLASHSV_ENCODER 0
+#define CONFIG_FLASHSV2_ENCODER 0
+#define CONFIG_FLV_ENCODER 0
+#define CONFIG_GIF_ENCODER 0
+#define CONFIG_H261_ENCODER 0
+#define CONFIG_H263_ENCODER 0
+#define CONFIG_H263P_ENCODER 0
+#define CONFIG_HUFFYUV_ENCODER 0
+#define CONFIG_JPEG2000_ENCODER 0
+#define CONFIG_JPEGLS_ENCODER 0
+#define CONFIG_LJPEG_ENCODER 0
+#define CONFIG_MJPEG_ENCODER 0
+#define CONFIG_MPEG1VIDEO_ENCODER 0
+#define CONFIG_MPEG2VIDEO_ENCODER 0
+#define CONFIG_MPEG4_ENCODER 0
+#define CONFIG_MSMPEG4V2_ENCODER 0
+#define CONFIG_MSMPEG4V3_ENCODER 0
+#define CONFIG_MSVIDEO1_ENCODER 0
+#define CONFIG_PAM_ENCODER 0
+#define CONFIG_PBM_ENCODER 0
+#define CONFIG_PCX_ENCODER 0
+#define CONFIG_PGM_ENCODER 0
+#define CONFIG_PGMYUV_ENCODER 0
+#define CONFIG_PNG_ENCODER 0
+#define CONFIG_PPM_ENCODER 0
+#define CONFIG_PRORES_ENCODER 0
+#define CONFIG_PRORES_AW_ENCODER 0
+#define CONFIG_PRORES_KS_ENCODER 0
+#define CONFIG_QTRLE_ENCODER 0
+#define CONFIG_R10K_ENCODER 0
+#define CONFIG_R210_ENCODER 0
+#define CONFIG_RAWVIDEO_ENCODER 0
+#define CONFIG_ROQ_ENCODER 0
+#define CONFIG_RV10_ENCODER 0
+#define CONFIG_RV20_ENCODER 0
+#define CONFIG_S302M_ENCODER 0
+#define CONFIG_SGI_ENCODER 0
+#define CONFIG_SNOW_ENCODER 0
+#define CONFIG_SUNRAST_ENCODER 0
+#define CONFIG_SVQ1_ENCODER 0
+#define CONFIG_TARGA_ENCODER 0
+#define CONFIG_TIFF_ENCODER 0
+#define CONFIG_UTVIDEO_ENCODER 0
+#define CONFIG_V210_ENCODER 0
+#define CONFIG_V308_ENCODER 0
+#define CONFIG_V408_ENCODER 0
+#define CONFIG_V410_ENCODER 0
+#define CONFIG_WMV1_ENCODER 0
+#define CONFIG_WMV2_ENCODER 0
+#define CONFIG_XBM_ENCODER 0
+#define CONFIG_XFACE_ENCODER 0
+#define CONFIG_XWD_ENCODER 0
+#define CONFIG_Y41P_ENCODER 0
+#define CONFIG_YUV4_ENCODER 0
+#define CONFIG_ZLIB_ENCODER 0
+#define CONFIG_ZMBV_ENCODER 0
+#define CONFIG_AAC_ENCODER 0
+#define CONFIG_AC3_ENCODER 0
+#define CONFIG_AC3_FIXED_ENCODER 0
+#define CONFIG_ALAC_ENCODER 0
+#define CONFIG_DCA_ENCODER 0
+#define CONFIG_EAC3_ENCODER 0
+#define CONFIG_FLAC_ENCODER 0
+#define CONFIG_G723_1_ENCODER 0
+#define CONFIG_MP2_ENCODER 0
+#define CONFIG_MP2FIXED_ENCODER 0
+#define CONFIG_NELLYMOSER_ENCODER 0
+#define CONFIG_RA_144_ENCODER 0
+#define CONFIG_SONIC_ENCODER 0
+#define CONFIG_SONIC_LS_ENCODER 0
+#define CONFIG_TTA_ENCODER 0
+#define CONFIG_VORBIS_ENCODER 0
+#define CONFIG_WAVPACK_ENCODER 0
+#define CONFIG_WMAV1_ENCODER 0
+#define CONFIG_WMAV2_ENCODER 0
+#define CONFIG_PCM_ALAW_ENCODER 0
+#define CONFIG_PCM_F32BE_ENCODER 0
+#define CONFIG_PCM_F32LE_ENCODER 0
+#define CONFIG_PCM_F64BE_ENCODER 0
+#define CONFIG_PCM_F64LE_ENCODER 0
+#define CONFIG_PCM_MULAW_ENCODER 0
+#define CONFIG_PCM_S8_ENCODER 0
+#define CONFIG_PCM_S8_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16BE_ENCODER 0
+#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16LE_ENCODER 0
+#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S24BE_ENCODER 0
+#define CONFIG_PCM_S24DAUD_ENCODER 0
+#define CONFIG_PCM_S24LE_ENCODER 0
+#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S32BE_ENCODER 0
+#define CONFIG_PCM_S32LE_ENCODER 0
+#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_U8_ENCODER 0
+#define CONFIG_PCM_U16BE_ENCODER 0
+#define CONFIG_PCM_U16LE_ENCODER 0
+#define CONFIG_PCM_U24BE_ENCODER 0
+#define CONFIG_PCM_U24LE_ENCODER 0
+#define CONFIG_PCM_U32BE_ENCODER 0
+#define CONFIG_PCM_U32LE_ENCODER 0
+#define CONFIG_ROQ_DPCM_ENCODER 0
+#define CONFIG_ADPCM_ADX_ENCODER 0
+#define CONFIG_ADPCM_G722_ENCODER 0
+#define CONFIG_ADPCM_G726_ENCODER 0
+#define CONFIG_ADPCM_IMA_QT_ENCODER 0
+#define CONFIG_ADPCM_IMA_WAV_ENCODER 0
+#define CONFIG_ADPCM_MS_ENCODER 0
+#define CONFIG_ADPCM_SWF_ENCODER 0
+#define CONFIG_ADPCM_YAMAHA_ENCODER 0
+#define CONFIG_SSA_ENCODER 0
+#define CONFIG_ASS_ENCODER 0
+#define CONFIG_DVBSUB_ENCODER 0
+#define CONFIG_DVDSUB_ENCODER 0
+#define CONFIG_MOVTEXT_ENCODER 0
+#define CONFIG_SRT_ENCODER 0
+#define CONFIG_SUBRIP_ENCODER 0
+#define CONFIG_WEBVTT_ENCODER 0
+#define CONFIG_XSUB_ENCODER 0
+#define CONFIG_LIBFAAC_ENCODER 0
+#define CONFIG_LIBFDK_AAC_ENCODER 0
+#define CONFIG_LIBGSM_ENCODER 0
+#define CONFIG_LIBGSM_MS_ENCODER 0
+#define CONFIG_LIBILBC_ENCODER 0
+#define CONFIG_LIBMP3LAME_ENCODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0
+#define CONFIG_LIBOPENJPEG_ENCODER 0
+#define CONFIG_LIBOPUS_ENCODER 0
+#define CONFIG_LIBSCHROEDINGER_ENCODER 0
+#define CONFIG_LIBSHINE_ENCODER 0
+#define CONFIG_LIBSPEEX_ENCODER 0
+#define CONFIG_LIBTHEORA_ENCODER 0
+#define CONFIG_LIBTWOLAME_ENCODER 0
+#define CONFIG_LIBUTVIDEO_ENCODER 0
+#define CONFIG_LIBVO_AACENC_ENCODER 0
+#define CONFIG_LIBVO_AMRWBENC_ENCODER 0
+#define CONFIG_LIBVORBIS_ENCODER 0
+#define CONFIG_LIBVPX_VP8_ENCODER 0
+#define CONFIG_LIBVPX_VP9_ENCODER 0
+#define CONFIG_LIBWAVPACK_ENCODER 0
+#define CONFIG_LIBWEBP_ENCODER 0
+#define CONFIG_LIBX264_ENCODER 0
+#define CONFIG_LIBX264RGB_ENCODER 0
+#define CONFIG_LIBX265_ENCODER 0
+#define CONFIG_LIBXAVS_ENCODER 0
+#define CONFIG_LIBXVID_ENCODER 0
+#define CONFIG_LIBAACPLUS_ENCODER 0
+#define CONFIG_ADELAY_FILTER 0
+#define CONFIG_AECHO_FILTER 0
+#define CONFIG_AEVAL_FILTER 0
+#define CONFIG_AFADE_FILTER 0
+#define CONFIG_AFORMAT_FILTER 1
+#define CONFIG_AINTERLEAVE_FILTER 0
+#define CONFIG_ALLPASS_FILTER 0
+#define CONFIG_AMERGE_FILTER 0
+#define CONFIG_AMIX_FILTER 0
+#define CONFIG_ANULL_FILTER 1
+#define CONFIG_APAD_FILTER 0
+#define CONFIG_APERMS_FILTER 0
+#define CONFIG_APHASER_FILTER 0
+#define CONFIG_ARESAMPLE_FILTER 0
+#define CONFIG_ASELECT_FILTER 0
+#define CONFIG_ASENDCMD_FILTER 0
+#define CONFIG_ASETNSAMPLES_FILTER 0
+#define CONFIG_ASETPTS_FILTER 0
+#define CONFIG_ASETRATE_FILTER 0
+#define CONFIG_ASETTB_FILTER 0
+#define CONFIG_ASHOWINFO_FILTER 0
+#define CONFIG_ASPLIT_FILTER 0
+#define CONFIG_ASTATS_FILTER 0
+#define CONFIG_ASTREAMSYNC_FILTER 0
+#define CONFIG_ASYNCTS_FILTER 0
+#define CONFIG_ATEMPO_FILTER 0
+#define CONFIG_ATRIM_FILTER 1
+#define CONFIG_AZMQ_FILTER 0
+#define CONFIG_BANDPASS_FILTER 0
+#define CONFIG_BANDREJECT_FILTER 0
+#define CONFIG_BASS_FILTER 0
+#define CONFIG_BIQUAD_FILTER 0
+#define CONFIG_BS2B_FILTER 0
+#define CONFIG_CHANNELMAP_FILTER 0
+#define CONFIG_CHANNELSPLIT_FILTER 0
+#define CONFIG_COMPAND_FILTER 0
+#define CONFIG_EARWAX_FILTER 0
+#define CONFIG_EBUR128_FILTER 0
+#define CONFIG_EQUALIZER_FILTER 0
+#define CONFIG_FLANGER_FILTER 0
+#define CONFIG_HIGHPASS_FILTER 0
+#define CONFIG_JOIN_FILTER 0
+#define CONFIG_LADSPA_FILTER 0
+#define CONFIG_LOWPASS_FILTER 0
+#define CONFIG_PAN_FILTER 0
+#define CONFIG_REPLAYGAIN_FILTER 0
+#define CONFIG_RESAMPLE_FILTER 0
+#define CONFIG_SILENCEDETECT_FILTER 0
+#define CONFIG_SILENCEREMOVE_FILTER 0
+#define CONFIG_TREBLE_FILTER 0
+#define CONFIG_VOLUME_FILTER 0
+#define CONFIG_VOLUMEDETECT_FILTER 0
+#define CONFIG_AEVALSRC_FILTER 0
+#define CONFIG_ANULLSRC_FILTER 0
+#define CONFIG_FLITE_FILTER 0
+#define CONFIG_SINE_FILTER 0
+#define CONFIG_ANULLSINK_FILTER 0
+#define CONFIG_ALPHAEXTRACT_FILTER 0
+#define CONFIG_ALPHAMERGE_FILTER 0
+#define CONFIG_ASS_FILTER 0
+#define CONFIG_BBOX_FILTER 0
+#define CONFIG_BLACKDETECT_FILTER 0
+#define CONFIG_BLACKFRAME_FILTER 0
+#define CONFIG_BLEND_FILTER 0
+#define CONFIG_BOXBLUR_FILTER 0
+#define CONFIG_CODECVIEW_FILTER 0
+#define CONFIG_COLORBALANCE_FILTER 0
+#define CONFIG_COLORCHANNELMIXER_FILTER 0
+#define CONFIG_COLORMATRIX_FILTER 0
+#define CONFIG_COPY_FILTER 0
+#define CONFIG_CROP_FILTER 1
+#define CONFIG_CROPDETECT_FILTER 0
+#define CONFIG_CURVES_FILTER 0
+#define CONFIG_DCTDNOIZ_FILTER 0
+#define CONFIG_DECIMATE_FILTER 0
+#define CONFIG_DEJUDDER_FILTER 0
+#define CONFIG_DELOGO_FILTER 0
+#define CONFIG_DESHAKE_FILTER 0
+#define CONFIG_DRAWBOX_FILTER 0
+#define CONFIG_DRAWGRID_FILTER 0
+#define CONFIG_DRAWTEXT_FILTER 0
+#define CONFIG_EDGEDETECT_FILTER 0
+#define CONFIG_ELBG_FILTER 0
+#define CONFIG_EXTRACTPLANES_FILTER 0
+#define CONFIG_FADE_FILTER 0
+#define CONFIG_FIELD_FILTER 0
+#define CONFIG_FIELDMATCH_FILTER 0
+#define CONFIG_FIELDORDER_FILTER 0
+#define CONFIG_FORMAT_FILTER 1
+#define CONFIG_FPS_FILTER 0
+#define CONFIG_FRAMEPACK_FILTER 0
+#define CONFIG_FRAMESTEP_FILTER 0
+#define CONFIG_FREI0R_FILTER 0
+#define CONFIG_GEQ_FILTER 0
+#define CONFIG_GRADFUN_FILTER 0
+#define CONFIG_HALDCLUT_FILTER 0
+#define CONFIG_HFLIP_FILTER 1
+#define CONFIG_HISTEQ_FILTER 0
+#define CONFIG_HISTOGRAM_FILTER 0
+#define CONFIG_HQDN3D_FILTER 0
+#define CONFIG_HQX_FILTER 0
+#define CONFIG_HUE_FILTER 0
+#define CONFIG_IDET_FILTER 0
+#define CONFIG_IL_FILTER 0
+#define CONFIG_INTERLACE_FILTER 0
+#define CONFIG_INTERLEAVE_FILTER 0
+#define CONFIG_KERNDEINT_FILTER 0
+#define CONFIG_LENSCORRECTION_FILTER 0
+#define CONFIG_LUT3D_FILTER 0
+#define CONFIG_LUT_FILTER 0
+#define CONFIG_LUTRGB_FILTER 0
+#define CONFIG_LUTYUV_FILTER 0
+#define CONFIG_MCDEINT_FILTER 0
+#define CONFIG_MERGEPLANES_FILTER 0
+#define CONFIG_MP_FILTER 0
+#define CONFIG_MPDECIMATE_FILTER 0
+#define CONFIG_NEGATE_FILTER 0
+#define CONFIG_NOFORMAT_FILTER 0
+#define CONFIG_NOISE_FILTER 0
+#define CONFIG_NULL_FILTER 1
+#define CONFIG_OCV_FILTER 0
+#define CONFIG_OVERLAY_FILTER 0
+#define CONFIG_OWDENOISE_FILTER 0
+#define CONFIG_PAD_FILTER 0
+#define CONFIG_PERMS_FILTER 0
+#define CONFIG_PERSPECTIVE_FILTER 0
+#define CONFIG_PHASE_FILTER 0
+#define CONFIG_PIXDESCTEST_FILTER 0
+#define CONFIG_PP_FILTER 0
+#define CONFIG_PSNR_FILTER 0
+#define CONFIG_PULLUP_FILTER 0
+#define CONFIG_REMOVELOGO_FILTER 0
+#define CONFIG_ROTATE_FILTER 1
+#define CONFIG_SAB_FILTER 0
+#define CONFIG_SCALE_FILTER 0
+#define CONFIG_SELECT_FILTER 0
+#define CONFIG_SENDCMD_FILTER 0
+#define CONFIG_SEPARATEFIELDS_FILTER 0
+#define CONFIG_SETDAR_FILTER 0
+#define CONFIG_SETFIELD_FILTER 0
+#define CONFIG_SETPTS_FILTER 1
+#define CONFIG_SETSAR_FILTER 0
+#define CONFIG_SETTB_FILTER 0
+#define CONFIG_SHOWINFO_FILTER 0
+#define CONFIG_SHUFFLEPLANES_FILTER 0
+#define CONFIG_SIGNALSTATS_FILTER 0
+#define CONFIG_SMARTBLUR_FILTER 0
+#define CONFIG_SPLIT_FILTER 0
+#define CONFIG_SPP_FILTER 0
+#define CONFIG_STEREO3D_FILTER 0
+#define CONFIG_SUBTITLES_FILTER 0
+#define CONFIG_SUPER2XSAI_FILTER 0
+#define CONFIG_SWAPUV_FILTER 0
+#define CONFIG_TELECINE_FILTER 0
+#define CONFIG_THUMBNAIL_FILTER 0
+#define CONFIG_TILE_FILTER 0
+#define CONFIG_TINTERLACE_FILTER 0
+#define CONFIG_TRANSPOSE_FILTER 1
+#define CONFIG_TRIM_FILTER 1
+#define CONFIG_UNSHARP_FILTER 0
+#define CONFIG_VFLIP_FILTER 1
+#define CONFIG_VIDSTABDETECT_FILTER 0
+#define CONFIG_VIDSTABTRANSFORM_FILTER 0
+#define CONFIG_VIGNETTE_FILTER 0
+#define CONFIG_W3FDIF_FILTER 0
+#define CONFIG_XBR_FILTER 0
+#define CONFIG_YADIF_FILTER 0
+#define CONFIG_ZMQ_FILTER 0
+#define CONFIG_ZOOMPAN_FILTER 0
+#define CONFIG_CELLAUTO_FILTER 0
+#define CONFIG_COLOR_FILTER 0
+#define CONFIG_FREI0R_SRC_FILTER 0
+#define CONFIG_HALDCLUTSRC_FILTER 0
+#define CONFIG_LIFE_FILTER 0
+#define CONFIG_MANDELBROT_FILTER 0
+#define CONFIG_MPTESTSRC_FILTER 0
+#define CONFIG_NULLSRC_FILTER 0
+#define CONFIG_RGBTESTSRC_FILTER 0
+#define CONFIG_SMPTEBARS_FILTER 0
+#define CONFIG_SMPTEHDBARS_FILTER 0
+#define CONFIG_TESTSRC_FILTER 0
+#define CONFIG_NULLSINK_FILTER 0
+#define CONFIG_AVECTORSCOPE_FILTER 0
+#define CONFIG_CONCAT_FILTER 0
+#define CONFIG_SHOWCQT_FILTER 0
+#define CONFIG_SHOWSPECTRUM_FILTER 0
+#define CONFIG_SHOWWAVES_FILTER 0
+#define CONFIG_AMOVIE_FILTER 0
+#define CONFIG_MOVIE_FILTER 0
+#define CONFIG_H263_VAAPI_HWACCEL 0
+#define CONFIG_H263_VDPAU_HWACCEL 0
+#define CONFIG_H264_DXVA2_HWACCEL 0
+#define CONFIG_H264_VAAPI_HWACCEL 0
+#define CONFIG_H264_VDA_HWACCEL 0
+#define CONFIG_H264_VDA_OLD_HWACCEL 0
+#define CONFIG_H264_VDPAU_HWACCEL 0
+#define CONFIG_MPEG1_XVMC_HWACCEL 0
+#define CONFIG_MPEG1_VDPAU_HWACCEL 0
+#define CONFIG_MPEG2_XVMC_HWACCEL 0
+#define CONFIG_MPEG2_DXVA2_HWACCEL 0
+#define CONFIG_MPEG2_VAAPI_HWACCEL 0
+#define CONFIG_MPEG2_VDPAU_HWACCEL 0
+#define CONFIG_MPEG4_VAAPI_HWACCEL 0
+#define CONFIG_MPEG4_VDPAU_HWACCEL 0
+#define CONFIG_VC1_DXVA2_HWACCEL 0
+#define CONFIG_VC1_VAAPI_HWACCEL 0
+#define CONFIG_VC1_VDPAU_HWACCEL 0
+#define CONFIG_WMV3_DXVA2_HWACCEL 0
+#define CONFIG_WMV3_VAAPI_HWACCEL 0
+#define CONFIG_WMV3_VDPAU_HWACCEL 0
+#define CONFIG_ALSA_INDEV 0
+#define CONFIG_AVFOUNDATION_INDEV 0
+#define CONFIG_BKTR_INDEV 0
+#define CONFIG_DECKLINK_INDEV 0
+#define CONFIG_DSHOW_INDEV 0
+#define CONFIG_DV1394_INDEV 0
+#define CONFIG_FBDEV_INDEV 0
+#define CONFIG_GDIGRAB_INDEV 0
+#define CONFIG_IEC61883_INDEV 0
+#define CONFIG_JACK_INDEV 0
+#define CONFIG_LAVFI_INDEV 0
+#define CONFIG_OPENAL_INDEV 0
+#define CONFIG_OSS_INDEV 0
+#define CONFIG_PULSE_INDEV 0
+#define CONFIG_QTKIT_INDEV 0
+#define CONFIG_SNDIO_INDEV 0
+#define CONFIG_V4L2_INDEV 0
+#define CONFIG_VFWCAP_INDEV 0
+#define CONFIG_X11GRAB_INDEV 0
+#define CONFIG_X11GRAB_XCB_INDEV 0
+#define CONFIG_LIBCDIO_INDEV 0
+#define CONFIG_LIBDC1394_INDEV 0
+#define CONFIG_A64_MUXER 0
+#define CONFIG_AC3_MUXER 0
+#define CONFIG_ADTS_MUXER 0
+#define CONFIG_ADX_MUXER 0
+#define CONFIG_AIFF_MUXER 0
+#define CONFIG_AMR_MUXER 0
+#define CONFIG_ASF_MUXER 0
+#define CONFIG_ASS_MUXER 0
+#define CONFIG_AST_MUXER 0
+#define CONFIG_ASF_STREAM_MUXER 0
+#define CONFIG_AU_MUXER 0
+#define CONFIG_AVI_MUXER 0
+#define CONFIG_AVM2_MUXER 0
+#define CONFIG_BIT_MUXER 0
+#define CONFIG_CAF_MUXER 0
+#define CONFIG_CAVSVIDEO_MUXER 0
+#define CONFIG_CRC_MUXER 0
+#define CONFIG_DATA_MUXER 0
+#define CONFIG_DAUD_MUXER 0
+#define CONFIG_DIRAC_MUXER 0
+#define CONFIG_DNXHD_MUXER 0
+#define CONFIG_DTS_MUXER 0
+#define CONFIG_DV_MUXER 0
+#define CONFIG_EAC3_MUXER 0
+#define CONFIG_F4V_MUXER 0
+#define CONFIG_FFM_MUXER 0
+#define CONFIG_FFMETADATA_MUXER 0
+#define CONFIG_FILMSTRIP_MUXER 0
+#define CONFIG_FLAC_MUXER 0
+#define CONFIG_FLV_MUXER 0
+#define CONFIG_FRAMECRC_MUXER 0
+#define CONFIG_FRAMEMD5_MUXER 0
+#define CONFIG_G722_MUXER 0
+#define CONFIG_G723_1_MUXER 0
+#define CONFIG_GIF_MUXER 0
+#define CONFIG_GXF_MUXER 0
+#define CONFIG_H261_MUXER 0
+#define CONFIG_H263_MUXER 0
+#define CONFIG_H264_MUXER 0
+#define CONFIG_HDS_MUXER 0
+#define CONFIG_HEVC_MUXER 0
+#define CONFIG_HLS_MUXER 0
+#define CONFIG_ICO_MUXER 0
+#define CONFIG_ILBC_MUXER 0
+#define CONFIG_IMAGE2_MUXER 0
+#define CONFIG_IMAGE2PIPE_MUXER 0
+#define CONFIG_IPOD_MUXER 0
+#define CONFIG_IRCAM_MUXER 0
+#define CONFIG_ISMV_MUXER 0
+#define CONFIG_IVF_MUXER 0
+#define CONFIG_JACOSUB_MUXER 0
+#define CONFIG_LATM_MUXER 0
+#define CONFIG_LRC_MUXER 0
+#define CONFIG_M4V_MUXER 0
+#define CONFIG_MD5_MUXER 0
+#define CONFIG_MATROSKA_MUXER 0
+#define CONFIG_MATROSKA_AUDIO_MUXER 0
+#define CONFIG_MICRODVD_MUXER 0
+#define CONFIG_MJPEG_MUXER 0
+#define CONFIG_MLP_MUXER 0
+#define CONFIG_MMF_MUXER 0
+#define CONFIG_MOV_MUXER 0
+#define CONFIG_MP2_MUXER 0
+#define CONFIG_MP3_MUXER 0
+#define CONFIG_MP4_MUXER 0
+#define CONFIG_MPEG1SYSTEM_MUXER 0
+#define CONFIG_MPEG1VCD_MUXER 0
+#define CONFIG_MPEG1VIDEO_MUXER 0
+#define CONFIG_MPEG2DVD_MUXER 0
+#define CONFIG_MPEG2SVCD_MUXER 0
+#define CONFIG_MPEG2VIDEO_MUXER 0
+#define CONFIG_MPEG2VOB_MUXER 0
+#define CONFIG_MPEGTS_MUXER 0
+#define CONFIG_MPJPEG_MUXER 0
+#define CONFIG_MXF_MUXER 0
+#define CONFIG_MXF_D10_MUXER 0
+#define CONFIG_NULL_MUXER 0
+#define CONFIG_NUT_MUXER 0
+#define CONFIG_OGA_MUXER 0
+#define CONFIG_OGG_MUXER 0
+#define CONFIG_OMA_MUXER 0
+#define CONFIG_OPUS_MUXER 0
+#define CONFIG_PCM_ALAW_MUXER 0
+#define CONFIG_PCM_MULAW_MUXER 0
+#define CONFIG_PCM_F64BE_MUXER 0
+#define CONFIG_PCM_F64LE_MUXER 0
+#define CONFIG_PCM_F32BE_MUXER 0
+#define CONFIG_PCM_F32LE_MUXER 0
+#define CONFIG_PCM_S32BE_MUXER 0
+#define CONFIG_PCM_S32LE_MUXER 0
+#define CONFIG_PCM_S24BE_MUXER 0
+#define CONFIG_PCM_S24LE_MUXER 0
+#define CONFIG_PCM_S16BE_MUXER 0
+#define CONFIG_PCM_S16LE_MUXER 0
+#define CONFIG_PCM_S8_MUXER 0
+#define CONFIG_PCM_U32BE_MUXER 0
+#define CONFIG_PCM_U32LE_MUXER 0
+#define CONFIG_PCM_U24BE_MUXER 0
+#define CONFIG_PCM_U24LE_MUXER 0
+#define CONFIG_PCM_U16BE_MUXER 0
+#define CONFIG_PCM_U16LE_MUXER 0
+#define CONFIG_PCM_U8_MUXER 0
+#define CONFIG_PSP_MUXER 0
+#define CONFIG_RAWVIDEO_MUXER 0
+#define CONFIG_RM_MUXER 0
+#define CONFIG_ROQ_MUXER 0
+#define CONFIG_RSO_MUXER 0
+#define CONFIG_RTP_MUXER 0
+#define CONFIG_RTSP_MUXER 0
+#define CONFIG_SAP_MUXER 0
+#define CONFIG_SEGMENT_MUXER 0
+#define CONFIG_STREAM_SEGMENT_MUXER 0
+#define CONFIG_SMJPEG_MUXER 0
+#define CONFIG_SMOOTHSTREAMING_MUXER 0
+#define CONFIG_SOX_MUXER 0
+#define CONFIG_SPX_MUXER 0
+#define CONFIG_SPDIF_MUXER 0
+#define CONFIG_SRT_MUXER 0
+#define CONFIG_SWF_MUXER 0
+#define CONFIG_TEE_MUXER 0
+#define CONFIG_TG2_MUXER 0
+#define CONFIG_TGP_MUXER 0
+#define CONFIG_MKVTIMESTAMP_V2_MUXER 0
+#define CONFIG_TRUEHD_MUXER 0
+#define CONFIG_UNCODEDFRAMECRC_MUXER 0
+#define CONFIG_VC1_MUXER 0
+#define CONFIG_VC1T_MUXER 0
+#define CONFIG_VOC_MUXER 0
+#define CONFIG_W64_MUXER 0
+#define CONFIG_WAV_MUXER 0
+#define CONFIG_WEBM_MUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0
+#define CONFIG_WEBP_MUXER 0
+#define CONFIG_WEBVTT_MUXER 0
+#define CONFIG_WTV_MUXER 0
+#define CONFIG_WV_MUXER 0
+#define CONFIG_YUV4MPEGPIPE_MUXER 0
+#define CONFIG_LIBNUT_MUXER 0
+#define CONFIG_ALSA_OUTDEV 0
+#define CONFIG_CACA_OUTDEV 0
+#define CONFIG_DECKLINK_OUTDEV 0
+#define CONFIG_FBDEV_OUTDEV 0
+#define CONFIG_OPENGL_OUTDEV 0
+#define CONFIG_OSS_OUTDEV 0
+#define CONFIG_PULSE_OUTDEV 0
+#define CONFIG_SDL_OUTDEV 0
+#define CONFIG_SNDIO_OUTDEV 0
+#define CONFIG_V4L2_OUTDEV 0
+#define CONFIG_XV_OUTDEV 0
+#define CONFIG_AAC_PARSER 0
+#define CONFIG_AAC_LATM_PARSER 0
+#define CONFIG_AC3_PARSER 0
+#define CONFIG_ADX_PARSER 0
+#define CONFIG_BMP_PARSER 0
+#define CONFIG_CAVSVIDEO_PARSER 0
+#define CONFIG_COOK_PARSER 0
+#define CONFIG_DCA_PARSER 0
+#define CONFIG_DIRAC_PARSER 0
+#define CONFIG_DNXHD_PARSER 0
+#define CONFIG_DPX_PARSER 0
+#define CONFIG_DVBSUB_PARSER 0
+#define CONFIG_DVDSUB_PARSER 0
+#define CONFIG_DVD_NAV_PARSER 0
+#define CONFIG_FLAC_PARSER 0
+#define CONFIG_GSM_PARSER 0
+#define CONFIG_H261_PARSER 0
+#define CONFIG_H263_PARSER 0
+#define CONFIG_H264_PARSER 0
+#define CONFIG_HEVC_PARSER 0
+#define CONFIG_MJPEG_PARSER 0
+#define CONFIG_MLP_PARSER 0
+#define CONFIG_MPEG4VIDEO_PARSER 0
+#define CONFIG_MPEGAUDIO_PARSER 0
+#define CONFIG_MPEGVIDEO_PARSER 0
+#define CONFIG_OPUS_PARSER 0
+#define CONFIG_PNG_PARSER 0
+#define CONFIG_PNM_PARSER 0
+#define CONFIG_RV30_PARSER 0
+#define CONFIG_RV40_PARSER 0
+#define CONFIG_TAK_PARSER 0
+#define CONFIG_VC1_PARSER 0
+#define CONFIG_VORBIS_PARSER 0
+#define CONFIG_VP3_PARSER 0
+#define CONFIG_VP8_PARSER 0
+#define CONFIG_VP9_PARSER 0
+#define CONFIG_BLURAY_PROTOCOL 0
+#define CONFIG_CACHE_PROTOCOL 0
+#define CONFIG_CONCAT_PROTOCOL 0
+#define CONFIG_CRYPTO_PROTOCOL 0
+#define CONFIG_DATA_PROTOCOL 0
+#define CONFIG_FFRTMPCRYPT_PROTOCOL 0
+#define CONFIG_FFRTMPHTTP_PROTOCOL 0
+#define CONFIG_FILE_PROTOCOL 1
+#define CONFIG_FTP_PROTOCOL 0
+#define CONFIG_GOPHER_PROTOCOL 0
+#define CONFIG_HLS_PROTOCOL 0
+#define CONFIG_HTTP_PROTOCOL 0
+#define CONFIG_HTTPPROXY_PROTOCOL 0
+#define CONFIG_HTTPS_PROTOCOL 0
+#define CONFIG_ICECAST_PROTOCOL 0
+#define CONFIG_MMSH_PROTOCOL 0
+#define CONFIG_MMST_PROTOCOL 0
+#define CONFIG_MD5_PROTOCOL 0
+#define CONFIG_PIPE_PROTOCOL 0
+#define CONFIG_RTMP_PROTOCOL 0
+#define CONFIG_RTMPE_PROTOCOL 0
+#define CONFIG_RTMPS_PROTOCOL 0
+#define CONFIG_RTMPT_PROTOCOL 0
+#define CONFIG_RTMPTE_PROTOCOL 0
+#define CONFIG_RTMPTS_PROTOCOL 0
+#define CONFIG_RTP_PROTOCOL 0
+#define CONFIG_SCTP_PROTOCOL 0
+#define CONFIG_SRTP_PROTOCOL 0
+#define CONFIG_SUBFILE_PROTOCOL 0
+#define CONFIG_TCP_PROTOCOL 0
+#define CONFIG_TLS_PROTOCOL 0
+#define CONFIG_UDP_PROTOCOL 0
+#define CONFIG_UDPLITE_PROTOCOL 0
+#define CONFIG_UNIX_PROTOCOL 0
+#define CONFIG_LIBRTMP_PROTOCOL 0
+#define CONFIG_LIBRTMPE_PROTOCOL 0
+#define CONFIG_LIBRTMPS_PROTOCOL 0
+#define CONFIG_LIBRTMPT_PROTOCOL 0
+#define CONFIG_LIBRTMPTE_PROTOCOL 0
+#define CONFIG_LIBSSH_PROTOCOL 0
+#define CONFIG_LIBSMBCLIENT_PROTOCOL 0
+
+/* HEVC decoder options */
+//#define USE_MD5 /* include MD5 SEI check */
+#define USE_MSPS /* support modified SPS header to simplify decoder */
+//#define USE_VAR_BIT_DEPTH /* support all bit depths with reduced code size */
+#define USE_SAO_SMALL_BUFFER /* reduce the memory used by SAO */
+//#define USE_PRED           /* allow non intra frames */
+//#define USE_FULL           /* include HEVC code not relevant for BPG decoding */
+//#define USE_FUNC_PTR /* use function pointers for dsp */
+//#define USE_AV_LOG /* include av_log() */
+#define USE_FRAME_DURATION_SEI /* for animations */
+//#define USE_BIPRED /* allow bi-prediction */
+
+#endif /* FFMPEG_CONFIG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/bpg_spec.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,517 @@
+BPG Specification
+
+version 0.9.5
+
+Copyright (c) 2014-2015 Fabrice Bellard
+
+1) Introduction
+---------------
+
+BPG is a lossy and lossless picture compression format based on HEVC
+[1]. It supports grayscale, YCbCr, RGB, YCgCo color spaces with an
+optional alpha channel. CMYK is supported by reusing the alpha channel
+to encode an additional white component. The bit depth of each
+component is from 8 to 14 bits. The color values are stored either in
+full range (JPEG case) or limited range (video case). The YCbCr color
+space is either BT 601 (JPEG case), BT 709 or BT 2020.
+
+The chroma can be subsampled by a factor of two in horizontal or both
+in horizontal and vertical directions (4:4:4, 4:2:2 or 4:2:0 chroma
+formats are supported). In order to be able to transcode JPEG images
+or video frames without modification to the chroma, both JPEG and
+MPEG2 chroma sample positions are supported.
+
+Progressive decoding and display is supported by interleaving the
+alpha and color data.
+
+Arbitrary metadata (such as EXIF, ICC profile, XMP) are supported.
+
+Animations are supported as an optional feature. Decoders not
+supporting animation display the first frame of the animation.
+
+2) Bitstream conventions
+------------------------
+
+The bit stream is byte aligned and bit fields are read from most
+significant to least signficant bit in each byte.
+
+- u(n) is an unsigned integer stored on n bits.
+
+- ue7(n) is an unsigned integer of at most n bits stored on a variable
+  number of bytes. All the bytes except the last one have a '1' as
+  their first bit. The unsigned integer is represented as the
+  concatenation of the remaining 7 bit codewords. Only the shortest
+  encoding for a given unsigned integer shall be accepted by the
+  decoder (i.e. the first byte is never 0x80). Example:
+
+  Encoded bytes       Unsigned integer value
+  0x08                8
+  0x84 0x1e           542
+  0xac 0xbe 0x17      728855
+
+- ue(v) : unsigned integer 0-th order Exp-Golomb-coded (see HEVC
+  specification).
+
+- b(8) is an arbitrary byte.
+
+3) File format
+--------------
+
+3.1) Syntax
+-----------
+
+heic_file() {
+
+     file_magic                                                  u(32)
+
+     pixel_format                                                u(3)
+     alpha1_flag                                                 u(1)
+     bit_depth_minus_8                                           u(4)
+
+     color_space                                                 u(4)
+     extension_present_flag                                      u(1)
+     alpha2_flag                                                 u(1)
+     limited_range_flag                                          u(1)
+     animation_flag                                              u(1)
+     
+     picture_width                                               ue7(32)
+     picture_height                                              ue7(32)
+     
+     picture_data_length                                         ue7(32)
+     if (extension_present_flag)  
+         extension_data_length                                   ue7(32)
+         extension_data()
+     }
+
+     hevc_header_and_data()
+}
+
+extension_data() 
+{
+     while (more_bytes()) {
+         extension_tag                                           ue7(32)
+         extension_tag_length                                    ue7(32)
+         if (extension_tag == 5) {
+             animation_control_extension(extension_tag_length)
+         } else {
+             for(j = 0; j < extension_tag_length; j++) {
+                 extension_tag_data_byte                         b(8)
+             }
+         }
+     }
+}
+
+animation_control_extension(payload_length)
+{
+    loop_count                                                   ue7(16)
+    frame_period_num                                             ue7(16)
+    frame_period_den                                             ue7(16)
+    while (more_bytes()) {
+        dummy_byte                                               b(8)
+    }
+}
+
+hevc_header_and_data()
+{
+     if (alpha1_flag || alpha2_flag) {
+         hevc_header()
+     }
+     hevc_header()
+     hevc_data()
+}
+
+hevc_header()
+{
+     hevc_header_length                                          ue7(32)
+     log2_min_luma_coding_block_size_minus3                      ue(v)
+     log2_diff_max_min_luma_coding_block_size                    ue(v)
+     log2_min_transform_block_size_minus2                        ue(v)
+     log2_diff_max_min_transform_block_size                      ue(v)
+     max_transform_hierarchy_depth_intra                         ue(v)
+     sample_adaptive_offset_enabled_flag                         u(1)
+     pcm_enabled_flag                                            u(1)
+     if (pcm_enabled_flag) {
+         pcm_sample_bit_depth_luma_minus1                        u(4)
+         pcm_sample_bit_depth_chroma_minus1                      u(4)
+         log2_min_pcm_luma_coding_block_size_minus3              ue(v)
+         log2_diff_max_min_pcm_luma_coding_block_size            ue(v)
+         pcm_loop_filter_disabled_flag                           u(1)
+     }
+     strong_intra_smoothing_enabled_flag                         u(1)
+     sps_extension_present_flag                                  u(1)
+     if (sps_extension_present_flag) {
+         sps_range_extension_flag                                u(1)
+         sps_extension_7bits                                     u(7)     
+     }
+     if (sps_range_extension_flag) {
+         transform_skip_rotation_enabled_flag                    u(1)
+         transform_skip_context_enabled_flag                     u(1)
+         implicit_rdpcm_enabled_flag                             u(1)
+         explicit_rdpcm_enabled_flag                             u(1)
+         extended_precision_processing_flag                      u(1)
+         intra_smoothing_disabled_flag                           u(1)
+         high_precision_offsets_enabled_flag                     u(1)
+         persistent_rice_adaptation_enabled_flag                 u(1)
+         cabac_bypass_alignment_enabled_flag                     u(1)
+     }
+     trailing_bits                                               u(v)
+}
+
+hevc_data() 
+{
+     for(i = 0; i < v; i++) {
+         hevc_data_byte                                          b(8)
+     }
+}
+
+frame_duration_sei(payloadSize)
+{
+     frame_duration                                              u(16)
+}
+
+3.2) Semantics
+--------------
+
+     'file_magic' is defined as 0x425047fb.
+
+     'pixel_format' indicates the chroma subsampling:
+
+       0 : Grayscale
+       1 : 4:2:0. Chroma at position (0.5, 0.5) (JPEG chroma position)
+       2 : 4:2:2. Chroma at position (0.5, 0) (JPEG chroma position)
+       3 : 4:4:4
+       4 : 4:2:0. Chroma at position (0, 0.5) (MPEG2 chroma position)
+       5 : 4:2:2. Chroma at position (0, 0) (MPEG2 chroma position)
+
+       The other values are reserved.
+       
+     'alpha1_flag' and 'alpha2_flag' give information about the alpha plane:
+
+       alpha1_flag=0 alpha2_flag=0: no alpha plane.
+
+       alpha1_flag=1 alpha2_flag=0: alpha present. The color is not
+       premultiplied.
+        
+       alpha1_flag=1 alpha2_flag=1: alpha present. The color is
+       premultiplied. The resulting non-premultiplied R', G', B' shall
+       be recovered as:
+          
+         if A != 0 
+           R' = min(R / A, 1), G' = min(G / A, 1), B' = min(B / A, 1)
+         else
+           R' = G' = B' = 1 .
+         
+       alpha1_flag=0 alpha2_flag=1: the alpha plane is present and
+       contains the W color component (CMYK color). The resulting CMYK
+       data can be recovered as follows:
+
+         C = (1 - R), M = (1 - G), Y = (1 - B), K = (1 - W) .
+     
+       In case no color profile is specified, the sRGB color R'G'B'
+       shall be computed as: 
+
+         R' = R * W, G' = G * W, B' = B * W .
+
+     'bit_depth_minus_8' is the number of bits used for each component
+     minus 8. In this version of the specification, bit_depth_minus_8
+     <= 6.
+
+     'extension_present_flag' indicates that extension data are
+     present.
+
+     'color_space' specifies how to convert the color planes to
+     RGB. It must be 0 when pixel_format = 0 (grayscale):
+
+       0 : YCbCr (BT 601, same as JPEG and HEVC matrix_coeffs = 5)
+       1 : RGB (component order: G B R)
+       2 : YCgCo (same as HEVC matrix_coeffs = 8)
+       3 : YCbCr (BT 709, same as HEVC matrix_coeffs = 1)
+       4 : YCbCr (BT 2020 non constant luminance system, same as HEVC
+       matrix_coeffs = 9)
+       5 : reserved for BT 2020 constant luminance system, not
+       supported in this version of the specification.
+
+       The other values are reserved.
+
+       YCbCr is defined using the BT 601, BT 709 or BT 2020 conversion
+       matrices.
+
+       For RGB, G is stored as the Y plane. B in the Cb plane and R in
+       the Cr plane.
+
+       YCgCo is defined as HEVC matrix_coeffs = 8. Y is stored in the
+       Y plane. Cg in the Cb plane and Co in the Cr plane.
+       
+       If no color profile is present, the RGB output data are assumed
+       to be in the sRGB color space [6].
+
+     'limited_range_flag': opposite of the HEVC video_full_range_flag.
+     The value zero indicates that the full range of each color
+     component is used. The value one indicates that a limited range
+     is used:
+
+          - (16 << (bit_depth - 8) to (235 << (bit_depth - 8)) for Y
+     and G, B, R,
+          - (16 << (bit_depth - 8) to (240 << (bit_depth - 8)) for Cb and Cr.
+
+     For the YCgCo color space, the range limitation shall be done on
+     the RGB data.
+
+     The alpha (or W) plane always uses the full range.
+
+     'animation_flag'. The value '1' indicates that more than one
+     frame are encoded in the hevc data. The animation control
+     extension must be present. If the decoder does not support
+     animations, it shall decode the first frame only and ignore the
+     animation information.
+
+     'picture_width' is the picture width in pixels. The value 0 is
+     not allowed.
+
+     'picture_height' is the picture height in pixels. The value 0 is
+     not allowed.
+
+     'picture_data_length' is the picture data length in bytes. The
+     special value of zero indicates that the picture data goes up to
+     the end of the file.
+
+     'extension_data_length' is the extension data length in bytes.
+
+     'extension_data()' is the extension data.
+
+     'extension_tag' is the extension tag. The following values are defined:
+
+       1: EXIF data.
+
+       2: ICC profile (see [4])
+
+       3: XMP (see [5])
+
+       4: Thumbnail (the thumbnail shall be a lower resolution version
+       of the image and stored in BPG format).
+
+       5: Animation control data.
+
+     The decoder shall ignore the tags it does not support.
+
+     'extension_tag_length' is the length in bytes of the extension tag.
+
+     'loop_count' gives the number of times the animation shall be
+     played. The value of 0 means infinite.
+     
+     'frame_period_num' and 'frame_period_den' encode the default
+     delay between each frame as frame_period_num/frame_period_den
+     seconds. The value of 0 for 'frame_period_num' or
+     'frame_period_den' is forbidden.
+     
+     'hevc_header_length' is the length in bytes of the following data
+     up to and including 'trailing_bits'.
+     
+     'log2_min_luma_coding_block_size_minus3',
+     'log2_diff_max_min_luma_coding_block_size',
+     'log2_min_transform_block_size_minus2',
+     'log2_diff_max_min_transform_block_size',
+     'max_transform_hierarchy_depth_intra',
+     'sample_adaptive_offset_enabled_flag', 'pcm_enabled_flag',
+     'pcm_sample_bit_depth_luma_minus1',
+     'pcm_sample_bit_depth_chroma_minus1',
+     'log2_min_pcm_luma_coding_block_size_minus3',
+     'log2_diff_max_min_pcm_luma_coding_block_size',
+     'pcm_loop_filter_disabled_flag',
+     'strong_intra_smoothing_enabled_flag', 'sps_extension_flag'
+     'sps_extension_present_flag', 'sps_range_extension_flag'
+     'transform_skip_rotation_enabled_flag',
+     'transform_skip_context_enabled_flag',
+     'implicit_rdpcm_enabled_flag', 'explicit_rdpcm_enabled_flag',
+     'extended_precision_processing_flag',
+     'intra_smoothing_disabled_flag',
+     'high_precision_offsets_enabled_flag',
+     'persistent_rice_adaptation_enabled_flag',
+     'cabac_bypass_alignment_enabled_flag' are
+     the corresponding fields of the HEVC SPS syntax element.
+         
+     'trailing_bits' has a value of 0 and has a length from 0 to 7
+     bits so that the next data is byte aligned.
+
+     'hevc_data()' contains the corresponding HEVC picture data,
+     excluding the first NAL start code (i.e. the first 0x00 0x00 0x01
+     or 0x00 0x00 0x00 0x01 bytes). The VPS and SPS NALs shall not be
+     included in the HEVC picture data. The decoder can recover the
+     necessary fields from the header by doing the following
+     assumptions:
+
+     - vps_video_parameter_set_id = 0
+     - sps_video_parameter_set_id = 0
+     - sps_max_sub_layers = 1
+     - sps_seq_parameter_set_id = 0
+     - chroma_format_idc: for picture data: 
+         chroma_format_idc = pixel_format
+       for alpha data: 
+         chroma_format_idc = 0.
+     - separate_colour_plane_flag = 0
+     - pic_width_in_luma_samples = ceil(picture_width/cb_size) * cb_size
+     - pic_height_in_luma_samples = ceil(picture_height/cb_size) * cb_size
+       with cb_size = 1 << log2_min_luma_coding_block_size
+     - bit_depth_luma_minus8 = bit_depth_minus_8
+     - bit_depth_chroma_minus8 = bit_depth_minus_8
+     - max_transform_hierarchy_depth_inter = max_transform_hierarchy_depth_intra
+     - scaling_list_enabled_flag = 0
+     - log2_max_pic_order_cnt_lsb_minus4 = 4
+     - amp_enabled_flag = 1
+     - sps_temporal_mvp_enabled_flag = 1
+     
+
+     Alpha data encoding:
+
+     - If alpha data is present, all the corresponding NALs have
+       nuh_layer_id = 1. NALs for color data shall have nuh_layer_id =
+       0.
+     - Alpha data shall use the same tile sizes as color data and
+       shall have the same entropy_coding_sync_enabled_flag value as
+       color data.
+     - Alpha slices shall use the same number of coding units as color
+       slices and should be interleaved with color slices. alpha NALs
+       shall come before the corresponding color NALs.
+
+     Animation encoding:
+
+     - The optional prefix SEI with payloadType = 257 (defined in
+       frame_duration_sei()) specifies that the image must be repeated
+       'frame_duration' times. 'frame_duration' shall not be zero. If
+       the frame duration SEI is not present for a given frame,
+       frame_duration = 1 shall be assumed by the decoder. If alpha
+       data is present, the frame duration SEI shall be present only
+       for the color data.
+     
+3.3) HEVC Profile
+-----------------
+
+Conforming HEVC bit streams shall conform to the Main 4:4:4 16 Still
+Picture, Level 8.5 of the HEVC specification with the following
+modifications.
+
+- separate_colour_plane_flag shall be 0 when present.
+
+- bit_depth_luma_minus8 <= 6
+
+- bit_depth_chroma_minus8 = bit_depth_luma_minus8
+
+- explicit_rdpcm_enabled_flag = 0 (does not matter for intra frames)
+
+- extended_precision_processing_flag = 0
+
+- cabac_bypass_alignment_enabled_flag = 0
+
+- high_precision_offsets_enabled_flag = 0 (does not matter for intra frames)
+
+- If the encoded image is larger than the size indicated by
+picture_width and picture_height, the lower right part of the decoded
+image shall be cropped. If a horizontal (resp. vertical) decimation by
+two is done for the chroma and that the width (resp. height) is n
+pixels, ceil(n/2) pixels must be kept as the resulting chroma
+information.
+
+When animations are present, the next frames shall be encoded with the
+following changes:
+
+- P slices are allowed (but B slices are not allowed).
+
+- Only the previous picture can be used as reference (hence a DPB size
+  of 2 pictures).
+
+4) Design choices
+-----------------
+
+(This section is informative)
+
+- Our design principle was to keep the format as simple as possible
+  while taking the HEVC codec as basis. Our main metric to evaluate
+  the simplicity was the size of a software decoder which outputs 32
+  bit RGBA pixel data.
+
+- Pixel formats: we wanted to be able to convert JPEG images to BPG
+  with as little loss as possible. So supporting the same color space
+  (BT 601 YCbCr) with the same range (full range) and most of the
+  allowed JPEG chroma formats (4:4:4, 4:2:2, 4:2:0 or grayscale) was
+  mandatory to avoid going back to RGB or doing a subsampling or
+  interpolation.
+
+- Alpha support: alpha support is mandatory. We chose to use a
+  separate HEVC monochrome plane to handle it instead of another
+  format to simplify the decoder. The color is either
+  non-premultiplied or premultiplied. Premultiplied alpha usually
+  gives a better compression. Non-premultiplied alpha is supported in
+  case no loss is needed on the color components. In order to allow
+  progressive display, the alpha and color data are interleaved (the
+  nuh_layed_id NAL field is 0 for color data and 1 for alpha
+  data). The alpha and color slices should contain the same number of
+  coding units and each alpha slice should come before the
+  corresponding color slice. Since alpha slices are usually smaller
+  than color slices, it allows a progressive display even if there is
+  a single slice.
+
+- Color spaces: In addition to YCbCr, RGB is supported for the high
+  quality or lossless cases. YCgCo is supported because it may give
+  slightly better results than YCbCr for high quality images. CMYK is
+  supported so that JPEGs containing this color space can be
+  converted. The alpha plane is used to store the W (1-K) plane. The
+  data is stored with inverted components (1-X) so that the conversion
+  to RGB is simplified. The support of the BT 709 and BT 2020 (non
+  constant luminance) YCbCr encodings and of the limited range color
+  values were added to reduce the losses when converting video frames.
+
+- Bit depth: we decided to support the HEVC bit depths 8 to 14. The
+  added complexity is small and it allows to support high quality
+  pictures from cameras.
+
+- Picture file format: keeping a completely standard HEVC stream would
+  have meant a more difficult parsing for the picture header which is
+  a problem for the various image utilities to get the basic picture
+  information (pixel format, width, height). So we added a small
+  header before the HEVC bit stream. The picture header is byte
+  oriended so it is easy to parse.
+
+- HEVC bit stream: the standard HEVC headers (the VPS and SPS NALs)
+  give an overhead of about 60 bytes for no added value in the case of
+  picture compression. Since the alpha plane uses a different HEVC bit
+  stream, it also adds the same overhead again. So we removed the VPS
+  and SPS NALs and added a very small header with the equivalent
+  information (typically 4 bytes). We also removed the first NAL start
+  code which is not useful. It is still possible to reconstruct a
+  standard HEVC stream to feed an unmodified hardware decoder if needed.
+
+- Extensions: the metadata are stored at the beginning of the file so
+  that they can be read at the same time as the header. Since metadata
+  tend to evolve faster than the image formats, we left room for
+  extension by using a (tag, lengh) representation. The decoder can
+  easily skip all the metadata because their length is explicitly
+  stored in the image header.
+
+- Animations: they are interesting compared to WebM or MP4 short
+  videos for the following reasons:
+    * transparency is supported
+    * lossless encoding is supported
+    * the decoding resources are smaller than with a generic video
+      player because only two frames need to be stored (DPB size = 2).
+    * the animations are expected to be small so the decoder can cache
+      all the decoded frames in memory.
+    * the animation can be decoded as a still image if the decoder
+      does not support animations.
+  Compared to the other animated image formats (GIF, APNG, WebP), the
+  compression ratio is usually much higher because of the HEVC inter
+  frame prediction.
+
+5) References
+-------------
+
+[1] High efficiency video coding (HEVC) version 2 (ITU-T Recommendation H.265)
+
+[2] JPEG File Interchange Format version 1.02 ( http://www.w3.org/Graphics/JPEG/jfif3.pdf )
+
+[3] EXIF version 2.2 (JEITA CP-3451)
+
+[4] The International Color Consortium ( http://www.color.org/ )
+
+[5] Extensible Metadata Platform (XMP) http://www.adobe.com/devnet/xmp.html
+
+[6] sRGB color space, IEC 61966-2-1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/emscripten.diff	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,656 @@
+diff --git a/src/fastLong.js b/src/fastLong.js
+index 2b70b2f..977c7ec 100644
+--- a/src/fastLong.js
++++ b/src/fastLong.js
+@@ -12,47 +12,6 @@ function ___muldsi3($a, $b) {
+   $12 = Math_imul($11, $1) | 0;
+   return (tempRet0 = (($8 >>> 16) + (Math_imul($11, $6) | 0) | 0) + ((($8 & 65535) + $12 | 0) >>> 16) | 0, 0 | ($8 + $12 << 16 | $3 & 65535)) | 0;
+ }
+-function ___divdi3($a$0, $a$1, $b$0, $b$1) {
+-  $a$0 = $a$0 | 0;
+-  $a$1 = $a$1 | 0;
+-  $b$0 = $b$0 | 0;
+-  $b$1 = $b$1 | 0;
+-  var $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $7$0 = 0, $7$1 = 0, $8$0 = 0, $10$0 = 0;
+-  $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
+-  $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
+-  $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
+-  $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
+-  $4$0 = _i64Subtract($1$0 ^ $a$0, $1$1 ^ $a$1, $1$0, $1$1) | 0;
+-  $4$1 = tempRet0;
+-  $6$0 = _i64Subtract($2$0 ^ $b$0, $2$1 ^ $b$1, $2$0, $2$1) | 0;
+-  $7$0 = $2$0 ^ $1$0;
+-  $7$1 = $2$1 ^ $1$1;
+-  $8$0 = ___udivmoddi4($4$0, $4$1, $6$0, tempRet0, 0) | 0;
+-  $10$0 = _i64Subtract($8$0 ^ $7$0, tempRet0 ^ $7$1, $7$0, $7$1) | 0;
+-  return (tempRet0 = tempRet0, $10$0) | 0;
+-}
+-function ___remdi3($a$0, $a$1, $b$0, $b$1) {
+-  $a$0 = $a$0 | 0;
+-  $a$1 = $a$1 | 0;
+-  $b$0 = $b$0 | 0;
+-  $b$1 = $b$1 | 0;
+-  var $rem = 0, $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $10$0 = 0, $10$1 = 0, __stackBase__ = 0;
+-  __stackBase__ = STACKTOP;
+-  STACKTOP = STACKTOP + 8 | 0;
+-  $rem = __stackBase__ | 0;
+-  $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
+-  $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
+-  $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
+-  $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
+-  $4$0 = _i64Subtract($1$0 ^ $a$0, $1$1 ^ $a$1, $1$0, $1$1) | 0;
+-  $4$1 = tempRet0;
+-  $6$0 = _i64Subtract($2$0 ^ $b$0, $2$1 ^ $b$1, $2$0, $2$1) | 0;
+-  ___udivmoddi4($4$0, $4$1, $6$0, tempRet0, $rem) | 0;
+-  $10$0 = _i64Subtract(HEAP32[$rem >> 2] ^ $1$0, HEAP32[$rem + 4 >> 2] ^ $1$1, $1$0, $1$1) | 0;
+-  $10$1 = tempRet0;
+-  STACKTOP = __stackBase__;
+-  return (tempRet0 = $10$1, $10$0) | 0;
+-}
+ function ___muldi3($a$0, $a$1, $b$0, $b$1) {
+   $a$0 = $a$0 | 0;
+   $a$1 = $a$1 | 0;
+@@ -66,234 +25,5 @@ function ___muldi3($a$0, $a$1, $b$0, $b$1) {
+   $2 = Math_imul($a$1, $y_sroa_0_0_extract_trunc) | 0;
+   return (tempRet0 = ((Math_imul($b$1, $x_sroa_0_0_extract_trunc) | 0) + $2 | 0) + $1$1 | $1$1 & 0, 0 | $1$0 & -1) | 0;
+ }
+-function ___udivdi3($a$0, $a$1, $b$0, $b$1) {
+-  $a$0 = $a$0 | 0;
+-  $a$1 = $a$1 | 0;
+-  $b$0 = $b$0 | 0;
+-  $b$1 = $b$1 | 0;
+-  var $1$0 = 0;
+-  $1$0 = ___udivmoddi4($a$0, $a$1, $b$0, $b$1, 0) | 0;
+-  return (tempRet0 = tempRet0, $1$0) | 0;
+-}
+-function ___uremdi3($a$0, $a$1, $b$0, $b$1) {
+-  $a$0 = $a$0 | 0;
+-  $a$1 = $a$1 | 0;
+-  $b$0 = $b$0 | 0;
+-  $b$1 = $b$1 | 0;
+-  var $rem = 0, __stackBase__ = 0;
+-  __stackBase__ = STACKTOP;
+-  STACKTOP = STACKTOP + 8 | 0;
+-  $rem = __stackBase__ | 0;
+-  ___udivmoddi4($a$0, $a$1, $b$0, $b$1, $rem) | 0;
+-  STACKTOP = __stackBase__;
+-  return (tempRet0 = HEAP32[$rem + 4 >> 2] | 0, HEAP32[$rem >> 2] | 0) | 0;
+-}
+-function ___udivmoddi4($a$0, $a$1, $b$0, $b$1, $rem) {
+-  $a$0 = $a$0 | 0;
+-  $a$1 = $a$1 | 0;
+-  $b$0 = $b$0 | 0;
+-  $b$1 = $b$1 | 0;
+-  $rem = $rem | 0;
+-  var $n_sroa_0_0_extract_trunc = 0, $n_sroa_1_4_extract_shift$0 = 0, $n_sroa_1_4_extract_trunc = 0, $d_sroa_0_0_extract_trunc = 0, $d_sroa_1_4_extract_shift$0 = 0, $d_sroa_1_4_extract_trunc = 0, $4 = 0, $17 = 0, $37 = 0, $49 = 0, $51 = 0, $57 = 0, $58 = 0, $66 = 0, $78 = 0, $86 = 0, $88 = 0, $89 = 0, $91 = 0, $92 = 0, $95 = 0, $105 = 0, $117 = 0, $119 = 0, $125 = 0, $126 = 0, $130 = 0, $q_sroa_1_1_ph = 0, $q_sroa_0_1_ph = 0, $r_sroa_1_1_ph = 0, $r_sroa_0_1_ph = 0, $sr_1_ph = 0, $d_sroa_0_0_insert_insert99$0 = 0, $d_sroa_0_0_insert_insert99$1 = 0, $137$0 = 0, $137$1 = 0, $carry_0203 = 0, $sr_1202 = 0, $r_sroa_0_1201 = 0, $r_sroa_1_1200 = 0, $q_sroa_0_1199 = 0, $q_sroa_1_1198 = 0, $147 = 0, $149 = 0, $r_sroa_0_0_insert_insert42$0 = 0, $r_sroa_0_0_insert_insert42$1 = 0, $150$1 = 0, $151$0 = 0, $152 = 0, $154$0 = 0, $r_sroa_0_0_extract_trunc = 0, $r_sroa_1_4_extract_trunc = 0, $155 = 0, $carry_0_lcssa$0 = 0, $carry_0_lcssa$1 = 0, $r_sroa_0_1_lcssa = 0, $r_sroa_1_1_lcssa = 0, $q_sroa_0_1_lcssa = 0, $q_sroa_1_1_lcssa = 0, $q_sroa_0_0_insert_ext75$0 = 0, $q_sroa_0_0_insert_ext75$1 = 0, $q_sroa_0_0_insert_insert77$1 = 0, $_0$0 = 0, $_0$1 = 0;
+-  $n_sroa_0_0_extract_trunc = $a$0;
+-  $n_sroa_1_4_extract_shift$0 = $a$1;
+-  $n_sroa_1_4_extract_trunc = $n_sroa_1_4_extract_shift$0;
+-  $d_sroa_0_0_extract_trunc = $b$0;
+-  $d_sroa_1_4_extract_shift$0 = $b$1;
+-  $d_sroa_1_4_extract_trunc = $d_sroa_1_4_extract_shift$0;
+-  if (($n_sroa_1_4_extract_trunc | 0) == 0) {
+-    $4 = ($rem | 0) != 0;
+-    if (($d_sroa_1_4_extract_trunc | 0) == 0) {
+-      if ($4) {
+-        HEAP32[$rem >> 2] = ($n_sroa_0_0_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0);
+-        HEAP32[$rem + 4 >> 2] = 0;
+-      }
+-      $_0$1 = 0;
+-      $_0$0 = ($n_sroa_0_0_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0;
+-      return (tempRet0 = $_0$1, $_0$0) | 0;
+-    } else {
+-      if (!$4) {
+-        $_0$1 = 0;
+-        $_0$0 = 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      HEAP32[$rem >> 2] = $a$0 & -1;
+-      HEAP32[$rem + 4 >> 2] = $a$1 & 0;
+-      $_0$1 = 0;
+-      $_0$0 = 0;
+-      return (tempRet0 = $_0$1, $_0$0) | 0;
+-    }
+-  }
+-  $17 = ($d_sroa_1_4_extract_trunc | 0) == 0;
+-  do {
+-    if (($d_sroa_0_0_extract_trunc | 0) == 0) {
+-      if ($17) {
+-        if (($rem | 0) != 0) {
+-          HEAP32[$rem >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0);
+-          HEAP32[$rem + 4 >> 2] = 0;
+-        }
+-        $_0$1 = 0;
+-        $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      if (($n_sroa_0_0_extract_trunc | 0) == 0) {
+-        if (($rem | 0) != 0) {
+-          HEAP32[$rem >> 2] = 0;
+-          HEAP32[$rem + 4 >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_1_4_extract_trunc >>> 0);
+-        }
+-        $_0$1 = 0;
+-        $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_1_4_extract_trunc >>> 0) >>> 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      $37 = $d_sroa_1_4_extract_trunc - 1 | 0;
+-      if (($37 & $d_sroa_1_4_extract_trunc | 0) == 0) {
+-        if (($rem | 0) != 0) {
+-          HEAP32[$rem >> 2] = 0 | $a$0 & -1;
+-          HEAP32[$rem + 4 >> 2] = $37 & $n_sroa_1_4_extract_trunc | $a$1 & 0;
+-        }
+-        $_0$1 = 0;
+-        $_0$0 = $n_sroa_1_4_extract_trunc >>> ((_llvm_cttz_i32($d_sroa_1_4_extract_trunc | 0) | 0) >>> 0);
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      $49 = _llvm_ctlz_i32($d_sroa_1_4_extract_trunc | 0) | 0;
+-      $51 = $49 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
+-      if ($51 >>> 0 <= 30) {
+-        $57 = $51 + 1 | 0;
+-        $58 = 31 - $51 | 0;
+-        $sr_1_ph = $57;
+-        $r_sroa_0_1_ph = $n_sroa_1_4_extract_trunc << $58 | $n_sroa_0_0_extract_trunc >>> ($57 >>> 0);
+-        $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($57 >>> 0);
+-        $q_sroa_0_1_ph = 0;
+-        $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $58;
+-        break;
+-      }
+-      if (($rem | 0) == 0) {
+-        $_0$1 = 0;
+-        $_0$0 = 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      HEAP32[$rem >> 2] = 0 | $a$0 & -1;
+-      HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
+-      $_0$1 = 0;
+-      $_0$0 = 0;
+-      return (tempRet0 = $_0$1, $_0$0) | 0;
+-    } else {
+-      if (!$17) {
+-        $117 = _llvm_ctlz_i32($d_sroa_1_4_extract_trunc | 0) | 0;
+-        $119 = $117 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
+-        if ($119 >>> 0 <= 31) {
+-          $125 = $119 + 1 | 0;
+-          $126 = 31 - $119 | 0;
+-          $130 = $119 - 31 >> 31;
+-          $sr_1_ph = $125;
+-          $r_sroa_0_1_ph = $n_sroa_0_0_extract_trunc >>> ($125 >>> 0) & $130 | $n_sroa_1_4_extract_trunc << $126;
+-          $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($125 >>> 0) & $130;
+-          $q_sroa_0_1_ph = 0;
+-          $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $126;
+-          break;
+-        }
+-        if (($rem | 0) == 0) {
+-          $_0$1 = 0;
+-          $_0$0 = 0;
+-          return (tempRet0 = $_0$1, $_0$0) | 0;
+-        }
+-        HEAP32[$rem >> 2] = 0 | $a$0 & -1;
+-        HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
+-        $_0$1 = 0;
+-        $_0$0 = 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-      $66 = $d_sroa_0_0_extract_trunc - 1 | 0;
+-      if (($66 & $d_sroa_0_0_extract_trunc | 0) != 0) {
+-        $86 = (_llvm_ctlz_i32($d_sroa_0_0_extract_trunc | 0) | 0) + 33 | 0;
+-        $88 = $86 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
+-        $89 = 64 - $88 | 0;
+-        $91 = 32 - $88 | 0;
+-        $92 = $91 >> 31;
+-        $95 = $88 - 32 | 0;
+-        $105 = $95 >> 31;
+-        $sr_1_ph = $88;
+-        $r_sroa_0_1_ph = $91 - 1 >> 31 & $n_sroa_1_4_extract_trunc >>> ($95 >>> 0) | ($n_sroa_1_4_extract_trunc << $91 | $n_sroa_0_0_extract_trunc >>> ($88 >>> 0)) & $105;
+-        $r_sroa_1_1_ph = $105 & $n_sroa_1_4_extract_trunc >>> ($88 >>> 0);
+-        $q_sroa_0_1_ph = $n_sroa_0_0_extract_trunc << $89 & $92;
+-        $q_sroa_1_1_ph = ($n_sroa_1_4_extract_trunc << $89 | $n_sroa_0_0_extract_trunc >>> ($95 >>> 0)) & $92 | $n_sroa_0_0_extract_trunc << $91 & $88 - 33 >> 31;
+-        break;
+-      }
+-      if (($rem | 0) != 0) {
+-        HEAP32[$rem >> 2] = $66 & $n_sroa_0_0_extract_trunc;
+-        HEAP32[$rem + 4 >> 2] = 0;
+-      }
+-      if (($d_sroa_0_0_extract_trunc | 0) == 1) {
+-        $_0$1 = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
+-        $_0$0 = 0 | $a$0 & -1;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      } else {
+-        $78 = _llvm_cttz_i32($d_sroa_0_0_extract_trunc | 0) | 0;
+-        $_0$1 = 0 | $n_sroa_1_4_extract_trunc >>> ($78 >>> 0);
+-        $_0$0 = $n_sroa_1_4_extract_trunc << 32 - $78 | $n_sroa_0_0_extract_trunc >>> ($78 >>> 0) | 0;
+-        return (tempRet0 = $_0$1, $_0$0) | 0;
+-      }
+-    }
+-  } while (0);
+-  if (($sr_1_ph | 0) == 0) {
+-    $q_sroa_1_1_lcssa = $q_sroa_1_1_ph;
+-    $q_sroa_0_1_lcssa = $q_sroa_0_1_ph;
+-    $r_sroa_1_1_lcssa = $r_sroa_1_1_ph;
+-    $r_sroa_0_1_lcssa = $r_sroa_0_1_ph;
+-    $carry_0_lcssa$1 = 0;
+-    $carry_0_lcssa$0 = 0;
+-  } else {
+-    $d_sroa_0_0_insert_insert99$0 = 0 | $b$0 & -1;
+-    $d_sroa_0_0_insert_insert99$1 = $d_sroa_1_4_extract_shift$0 | $b$1 & 0;
+-    $137$0 = _i64Add($d_sroa_0_0_insert_insert99$0, $d_sroa_0_0_insert_insert99$1, -1, -1) | 0;
+-    $137$1 = tempRet0;
+-    $q_sroa_1_1198 = $q_sroa_1_1_ph;
+-    $q_sroa_0_1199 = $q_sroa_0_1_ph;
+-    $r_sroa_1_1200 = $r_sroa_1_1_ph;
+-    $r_sroa_0_1201 = $r_sroa_0_1_ph;
+-    $sr_1202 = $sr_1_ph;
+-    $carry_0203 = 0;
+-    while (1) {
+-      $147 = $q_sroa_0_1199 >>> 31 | $q_sroa_1_1198 << 1;
+-      $149 = $carry_0203 | $q_sroa_0_1199 << 1;
+-      $r_sroa_0_0_insert_insert42$0 = 0 | ($r_sroa_0_1201 << 1 | $q_sroa_1_1198 >>> 31);
+-      $r_sroa_0_0_insert_insert42$1 = $r_sroa_0_1201 >>> 31 | $r_sroa_1_1200 << 1 | 0;
+-      _i64Subtract($137$0, $137$1, $r_sroa_0_0_insert_insert42$0, $r_sroa_0_0_insert_insert42$1) | 0;
+-      $150$1 = tempRet0;
+-      $151$0 = $150$1 >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1;
+-      $152 = $151$0 & 1;
+-      $154$0 = _i64Subtract($r_sroa_0_0_insert_insert42$0, $r_sroa_0_0_insert_insert42$1, $151$0 & $d_sroa_0_0_insert_insert99$0, ((($150$1 | 0) < 0 ? -1 : 0) >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1) & $d_sroa_0_0_insert_insert99$1) | 0;
+-      $r_sroa_0_0_extract_trunc = $154$0;
+-      $r_sroa_1_4_extract_trunc = tempRet0;
+-      $155 = $sr_1202 - 1 | 0;
+-      if (($155 | 0) == 0) {
+-        break;
+-      } else {
+-        $q_sroa_1_1198 = $147;
+-        $q_sroa_0_1199 = $149;
+-        $r_sroa_1_1200 = $r_sroa_1_4_extract_trunc;
+-        $r_sroa_0_1201 = $r_sroa_0_0_extract_trunc;
+-        $sr_1202 = $155;
+-        $carry_0203 = $152;
+-      }
+-    }
+-    $q_sroa_1_1_lcssa = $147;
+-    $q_sroa_0_1_lcssa = $149;
+-    $r_sroa_1_1_lcssa = $r_sroa_1_4_extract_trunc;
+-    $r_sroa_0_1_lcssa = $r_sroa_0_0_extract_trunc;
+-    $carry_0_lcssa$1 = 0;
+-    $carry_0_lcssa$0 = $152;
+-  }
+-  $q_sroa_0_0_insert_ext75$0 = $q_sroa_0_1_lcssa;
+-  $q_sroa_0_0_insert_ext75$1 = 0;
+-  $q_sroa_0_0_insert_insert77$1 = $q_sroa_1_1_lcssa | $q_sroa_0_0_insert_ext75$1;
+-  if (($rem | 0) != 0) {
+-    HEAP32[$rem >> 2] = 0 | $r_sroa_0_1_lcssa;
+-    HEAP32[$rem + 4 >> 2] = $r_sroa_1_1_lcssa | 0;
+-  }
+-  $_0$1 = (0 | $q_sroa_0_0_insert_ext75$0) >>> 31 | $q_sroa_0_0_insert_insert77$1 << 1 | ($q_sroa_0_0_insert_ext75$1 << 1 | $q_sroa_0_0_insert_ext75$0 >>> 31) & 0 | $carry_0_lcssa$1;
+-  $_0$0 = ($q_sroa_0_0_insert_ext75$0 << 1 | 0 >>> 31) & -2 | $carry_0_lcssa$0;
+-  return (tempRet0 = $_0$1, $_0$0) | 0;
+-}
+ // =======================================================================
+ 
+diff --git a/src/jsifier.js b/src/jsifier.js
+index a717b79..51bab6f 100644
+--- a/src/jsifier.js
++++ b/src/jsifier.js
+@@ -1883,8 +1883,14 @@ function JSify(data, functionsOnly) {
+       }
+       // these may be duplicated in side modules and the main module without issue
+       print(read('fastLong.js'));
++      if (!USE_BPG_STRIP)
++        print(read('fastLongDiv.js'));
+       print('// EMSCRIPTEN_END_FUNCS\n');
+-      print(read('long.js'));
++      if (PRECISE_I64_MATH == 2) {
++        print('var i64Math = null;');
++      } else {
++        print(read('long.js'));
++      }
+     } else {
+       print('// EMSCRIPTEN_END_FUNCS\n');
+       print('// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included');
+diff --git a/src/postamble.js b/src/postamble.js
+index bac17ab..06800ee 100644
+--- a/src/postamble.js
++++ b/src/postamble.js
+@@ -52,6 +52,7 @@ dependenciesFulfilled = function runCaller() {
+   if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+ }
+ 
++#if !USE_BPG_STRIP
+ Module['callMain'] = Module.callMain = function callMain(args) {
+   assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+   assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+@@ -108,6 +109,7 @@ Module['callMain'] = Module.callMain = function callMain(args) {
+     calledMain = true;
+   }
+ }
++#endif
+ 
+ {{GLOBAL_VARS}}
+ 
+@@ -142,9 +144,11 @@ function run(args) {
+       Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+     }
+ 
++#if !USE_BPG_STRIP
+     if (Module['_main'] && shouldRunNow) {
+       Module['callMain'](args);
+     }
++#endif
+ 
+     postRun();
+   }
+diff --git a/src/preamble.js b/src/preamble.js
+index a1bc48e..9a2e36b 100644
+--- a/src/preamble.js
++++ b/src/preamble.js
+@@ -633,6 +633,8 @@ function allocate(slab, types, allocator, ptr) {
+ }
+ Module['allocate'] = allocate;
+ 
++#if !USE_BPG_STRIP
++
+ function Pointer_stringify(ptr, /* optional */ length) {
+   if (length === 0) return '';
+   // TODO: use TextDecoder
+@@ -924,6 +926,11 @@ function demangle(func) {
+ function demangleAll(text) {
+   return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+ }
++#else
++function demangleAll(text) {
++    return text;
++}
++#endif
+ 
+ function jsStackTrace() {
+   var err = new Error();
+diff --git a/src/settings.js b/src/settings.js
+index eb34d2d..49a0a11 100644
+--- a/src/settings.js
++++ b/src/settings.js
+@@ -594,6 +594,8 @@ var USE_SDL = 1; // Specify the SDL version that is being linked against.
+                  // 2 is a port of the SDL C code on emscripten-ports
+ var USE_SDL_IMAGE = 1; // Specify the SDL_image version that is being linked against. Must match USE_SDL
+ 
++// strip unused code by BPG
++var USE_BPG_STRIP = 1;
+ 
+ // Compiler debugging options
+ var DEBUG_TAGS_SHOWING = [];
+diff --git a/src/fastLongDiv.js b/src/fastLongDiv.js
+new file mode 100644
+index 0000000..d1e5b49
+--- /dev/null
++++ b/src/fastLongDiv.js
+@@ -0,0 +1,273 @@
++// ======== compiled code from system/lib/compiler-rt , see readme therein
++function ___divdi3($a$0, $a$1, $b$0, $b$1) {
++  $a$0 = $a$0 | 0;
++  $a$1 = $a$1 | 0;
++  $b$0 = $b$0 | 0;
++  $b$1 = $b$1 | 0;
++  var $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $7$0 = 0, $7$1 = 0, $8$0 = 0, $10$0 = 0;
++  $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
++  $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
++  $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
++  $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
++  $4$0 = _i64Subtract($1$0 ^ $a$0, $1$1 ^ $a$1, $1$0, $1$1) | 0;
++  $4$1 = tempRet0;
++  $6$0 = _i64Subtract($2$0 ^ $b$0, $2$1 ^ $b$1, $2$0, $2$1) | 0;
++  $7$0 = $2$0 ^ $1$0;
++  $7$1 = $2$1 ^ $1$1;
++  $8$0 = ___udivmoddi4($4$0, $4$1, $6$0, tempRet0, 0) | 0;
++  $10$0 = _i64Subtract($8$0 ^ $7$0, tempRet0 ^ $7$1, $7$0, $7$1) | 0;
++  return (tempRet0 = tempRet0, $10$0) | 0;
++}
++function ___remdi3($a$0, $a$1, $b$0, $b$1) {
++  $a$0 = $a$0 | 0;
++  $a$1 = $a$1 | 0;
++  $b$0 = $b$0 | 0;
++  $b$1 = $b$1 | 0;
++  var $rem = 0, $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $10$0 = 0, $10$1 = 0, __stackBase__ = 0;
++  __stackBase__ = STACKTOP;
++  STACKTOP = STACKTOP + 8 | 0;
++  $rem = __stackBase__ | 0;
++  $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
++  $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1;
++  $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
++  $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1;
++  $4$0 = _i64Subtract($1$0 ^ $a$0, $1$1 ^ $a$1, $1$0, $1$1) | 0;
++  $4$1 = tempRet0;
++  $6$0 = _i64Subtract($2$0 ^ $b$0, $2$1 ^ $b$1, $2$0, $2$1) | 0;
++  ___udivmoddi4($4$0, $4$1, $6$0, tempRet0, $rem) | 0;
++  $10$0 = _i64Subtract(HEAP32[$rem >> 2] ^ $1$0, HEAP32[$rem + 4 >> 2] ^ $1$1, $1$0, $1$1) | 0;
++  $10$1 = tempRet0;
++  STACKTOP = __stackBase__;
++  return (tempRet0 = $10$1, $10$0) | 0;
++}
++function ___udivdi3($a$0, $a$1, $b$0, $b$1) {
++  $a$0 = $a$0 | 0;
++  $a$1 = $a$1 | 0;
++  $b$0 = $b$0 | 0;
++  $b$1 = $b$1 | 0;
++  var $1$0 = 0;
++  $1$0 = ___udivmoddi4($a$0, $a$1, $b$0, $b$1, 0) | 0;
++  return (tempRet0 = tempRet0, $1$0) | 0;
++}
++function ___uremdi3($a$0, $a$1, $b$0, $b$1) {
++  $a$0 = $a$0 | 0;
++  $a$1 = $a$1 | 0;
++  $b$0 = $b$0 | 0;
++  $b$1 = $b$1 | 0;
++  var $rem = 0, __stackBase__ = 0;
++  __stackBase__ = STACKTOP;
++  STACKTOP = STACKTOP + 8 | 0;
++  $rem = __stackBase__ | 0;
++  ___udivmoddi4($a$0, $a$1, $b$0, $b$1, $rem) | 0;
++  STACKTOP = __stackBase__;
++  return (tempRet0 = HEAP32[$rem + 4 >> 2] | 0, HEAP32[$rem >> 2] | 0) | 0;
++}
++function ___udivmoddi4($a$0, $a$1, $b$0, $b$1, $rem) {
++  $a$0 = $a$0 | 0;
++  $a$1 = $a$1 | 0;
++  $b$0 = $b$0 | 0;
++  $b$1 = $b$1 | 0;
++  $rem = $rem | 0;
++  var $n_sroa_0_0_extract_trunc = 0, $n_sroa_1_4_extract_shift$0 = 0, $n_sroa_1_4_extract_trunc = 0, $d_sroa_0_0_extract_trunc = 0, $d_sroa_1_4_extract_shift$0 = 0, $d_sroa_1_4_extract_trunc = 0, $4 = 0, $17 = 0, $37 = 0, $49 = 0, $51 = 0, $57 = 0, $58 = 0, $66 = 0, $78 = 0, $86 = 0, $88 = 0, $89 = 0, $91 = 0, $92 = 0, $95 = 0, $105 = 0, $117 = 0, $119 = 0, $125 = 0, $126 = 0, $130 = 0, $q_sroa_1_1_ph = 0, $q_sroa_0_1_ph = 0, $r_sroa_1_1_ph = 0, $r_sroa_0_1_ph = 0, $sr_1_ph = 0, $d_sroa_0_0_insert_insert99$0 = 0, $d_sroa_0_0_insert_insert99$1 = 0, $137$0 = 0, $137$1 = 0, $carry_0203 = 0, $sr_1202 = 0, $r_sroa_0_1201 = 0, $r_sroa_1_1200 = 0, $q_sroa_0_1199 = 0, $q_sroa_1_1198 = 0, $147 = 0, $149 = 0, $r_sroa_0_0_insert_insert42$0 = 0, $r_sroa_0_0_insert_insert42$1 = 0, $150$1 = 0, $151$0 = 0, $152 = 0, $154$0 = 0, $r_sroa_0_0_extract_trunc = 0, $r_sroa_1_4_extract_trunc = 0, $155 = 0, $carry_0_lcssa$0 = 0, $carry_0_lcssa$1 = 0, $r_sroa_0_1_lcssa = 0, $r_sroa_1_1_lcssa = 0, $q_sroa_0_1_lcssa = 0, $q_sroa_1_1_lcssa = 0, $q_sroa_0_0_insert_ext75$0 = 0, $q_sroa_0_0_insert_ext75$1 = 0, $q_sroa_0_0_insert_insert77$1 = 0, $_0$0 = 0, $_0$1 = 0;
++  $n_sroa_0_0_extract_trunc = $a$0;
++  $n_sroa_1_4_extract_shift$0 = $a$1;
++  $n_sroa_1_4_extract_trunc = $n_sroa_1_4_extract_shift$0;
++  $d_sroa_0_0_extract_trunc = $b$0;
++  $d_sroa_1_4_extract_shift$0 = $b$1;
++  $d_sroa_1_4_extract_trunc = $d_sroa_1_4_extract_shift$0;
++  if (($n_sroa_1_4_extract_trunc | 0) == 0) {
++    $4 = ($rem | 0) != 0;
++    if (($d_sroa_1_4_extract_trunc | 0) == 0) {
++      if ($4) {
++        HEAP32[$rem >> 2] = ($n_sroa_0_0_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0);
++        HEAP32[$rem + 4 >> 2] = 0;
++      }
++      $_0$1 = 0;
++      $_0$0 = ($n_sroa_0_0_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0;
++      return (tempRet0 = $_0$1, $_0$0) | 0;
++    } else {
++      if (!$4) {
++        $_0$1 = 0;
++        $_0$0 = 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      HEAP32[$rem >> 2] = $a$0 & -1;
++      HEAP32[$rem + 4 >> 2] = $a$1 & 0;
++      $_0$1 = 0;
++      $_0$0 = 0;
++      return (tempRet0 = $_0$1, $_0$0) | 0;
++    }
++  }
++  $17 = ($d_sroa_1_4_extract_trunc | 0) == 0;
++  do {
++    if (($d_sroa_0_0_extract_trunc | 0) == 0) {
++      if ($17) {
++        if (($rem | 0) != 0) {
++          HEAP32[$rem >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0);
++          HEAP32[$rem + 4 >> 2] = 0;
++        }
++        $_0$1 = 0;
++        $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      if (($n_sroa_0_0_extract_trunc | 0) == 0) {
++        if (($rem | 0) != 0) {
++          HEAP32[$rem >> 2] = 0;
++          HEAP32[$rem + 4 >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_1_4_extract_trunc >>> 0);
++        }
++        $_0$1 = 0;
++        $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_1_4_extract_trunc >>> 0) >>> 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      $37 = $d_sroa_1_4_extract_trunc - 1 | 0;
++      if (($37 & $d_sroa_1_4_extract_trunc | 0) == 0) {
++        if (($rem | 0) != 0) {
++          HEAP32[$rem >> 2] = 0 | $a$0 & -1;
++          HEAP32[$rem + 4 >> 2] = $37 & $n_sroa_1_4_extract_trunc | $a$1 & 0;
++        }
++        $_0$1 = 0;
++        $_0$0 = $n_sroa_1_4_extract_trunc >>> ((_llvm_cttz_i32($d_sroa_1_4_extract_trunc | 0) | 0) >>> 0);
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      $49 = _llvm_ctlz_i32($d_sroa_1_4_extract_trunc | 0) | 0;
++      $51 = $49 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
++      if ($51 >>> 0 <= 30) {
++        $57 = $51 + 1 | 0;
++        $58 = 31 - $51 | 0;
++        $sr_1_ph = $57;
++        $r_sroa_0_1_ph = $n_sroa_1_4_extract_trunc << $58 | $n_sroa_0_0_extract_trunc >>> ($57 >>> 0);
++        $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($57 >>> 0);
++        $q_sroa_0_1_ph = 0;
++        $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $58;
++        break;
++      }
++      if (($rem | 0) == 0) {
++        $_0$1 = 0;
++        $_0$0 = 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      HEAP32[$rem >> 2] = 0 | $a$0 & -1;
++      HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
++      $_0$1 = 0;
++      $_0$0 = 0;
++      return (tempRet0 = $_0$1, $_0$0) | 0;
++    } else {
++      if (!$17) {
++        $117 = _llvm_ctlz_i32($d_sroa_1_4_extract_trunc | 0) | 0;
++        $119 = $117 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
++        if ($119 >>> 0 <= 31) {
++          $125 = $119 + 1 | 0;
++          $126 = 31 - $119 | 0;
++          $130 = $119 - 31 >> 31;
++          $sr_1_ph = $125;
++          $r_sroa_0_1_ph = $n_sroa_0_0_extract_trunc >>> ($125 >>> 0) & $130 | $n_sroa_1_4_extract_trunc << $126;
++          $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($125 >>> 0) & $130;
++          $q_sroa_0_1_ph = 0;
++          $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $126;
++          break;
++        }
++        if (($rem | 0) == 0) {
++          $_0$1 = 0;
++          $_0$0 = 0;
++          return (tempRet0 = $_0$1, $_0$0) | 0;
++        }
++        HEAP32[$rem >> 2] = 0 | $a$0 & -1;
++        HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
++        $_0$1 = 0;
++        $_0$0 = 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++      $66 = $d_sroa_0_0_extract_trunc - 1 | 0;
++      if (($66 & $d_sroa_0_0_extract_trunc | 0) != 0) {
++        $86 = (_llvm_ctlz_i32($d_sroa_0_0_extract_trunc | 0) | 0) + 33 | 0;
++        $88 = $86 - (_llvm_ctlz_i32($n_sroa_1_4_extract_trunc | 0) | 0) | 0;
++        $89 = 64 - $88 | 0;
++        $91 = 32 - $88 | 0;
++        $92 = $91 >> 31;
++        $95 = $88 - 32 | 0;
++        $105 = $95 >> 31;
++        $sr_1_ph = $88;
++        $r_sroa_0_1_ph = $91 - 1 >> 31 & $n_sroa_1_4_extract_trunc >>> ($95 >>> 0) | ($n_sroa_1_4_extract_trunc << $91 | $n_sroa_0_0_extract_trunc >>> ($88 >>> 0)) & $105;
++        $r_sroa_1_1_ph = $105 & $n_sroa_1_4_extract_trunc >>> ($88 >>> 0);
++        $q_sroa_0_1_ph = $n_sroa_0_0_extract_trunc << $89 & $92;
++        $q_sroa_1_1_ph = ($n_sroa_1_4_extract_trunc << $89 | $n_sroa_0_0_extract_trunc >>> ($95 >>> 0)) & $92 | $n_sroa_0_0_extract_trunc << $91 & $88 - 33 >> 31;
++        break;
++      }
++      if (($rem | 0) != 0) {
++        HEAP32[$rem >> 2] = $66 & $n_sroa_0_0_extract_trunc;
++        HEAP32[$rem + 4 >> 2] = 0;
++      }
++      if (($d_sroa_0_0_extract_trunc | 0) == 1) {
++        $_0$1 = $n_sroa_1_4_extract_shift$0 | $a$1 & 0;
++        $_0$0 = 0 | $a$0 & -1;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      } else {
++        $78 = _llvm_cttz_i32($d_sroa_0_0_extract_trunc | 0) | 0;
++        $_0$1 = 0 | $n_sroa_1_4_extract_trunc >>> ($78 >>> 0);
++        $_0$0 = $n_sroa_1_4_extract_trunc << 32 - $78 | $n_sroa_0_0_extract_trunc >>> ($78 >>> 0) | 0;
++        return (tempRet0 = $_0$1, $_0$0) | 0;
++      }
++    }
++  } while (0);
++  if (($sr_1_ph | 0) == 0) {
++    $q_sroa_1_1_lcssa = $q_sroa_1_1_ph;
++    $q_sroa_0_1_lcssa = $q_sroa_0_1_ph;
++    $r_sroa_1_1_lcssa = $r_sroa_1_1_ph;
++    $r_sroa_0_1_lcssa = $r_sroa_0_1_ph;
++    $carry_0_lcssa$1 = 0;
++    $carry_0_lcssa$0 = 0;
++  } else {
++    $d_sroa_0_0_insert_insert99$0 = 0 | $b$0 & -1;
++    $d_sroa_0_0_insert_insert99$1 = $d_sroa_1_4_extract_shift$0 | $b$1 & 0;
++    $137$0 = _i64Add($d_sroa_0_0_insert_insert99$0, $d_sroa_0_0_insert_insert99$1, -1, -1) | 0;
++    $137$1 = tempRet0;
++    $q_sroa_1_1198 = $q_sroa_1_1_ph;
++    $q_sroa_0_1199 = $q_sroa_0_1_ph;
++    $r_sroa_1_1200 = $r_sroa_1_1_ph;
++    $r_sroa_0_1201 = $r_sroa_0_1_ph;
++    $sr_1202 = $sr_1_ph;
++    $carry_0203 = 0;
++    while (1) {
++      $147 = $q_sroa_0_1199 >>> 31 | $q_sroa_1_1198 << 1;
++      $149 = $carry_0203 | $q_sroa_0_1199 << 1;
++      $r_sroa_0_0_insert_insert42$0 = 0 | ($r_sroa_0_1201 << 1 | $q_sroa_1_1198 >>> 31);
++      $r_sroa_0_0_insert_insert42$1 = $r_sroa_0_1201 >>> 31 | $r_sroa_1_1200 << 1 | 0;
++      _i64Subtract($137$0, $137$1, $r_sroa_0_0_insert_insert42$0, $r_sroa_0_0_insert_insert42$1) | 0;
++      $150$1 = tempRet0;
++      $151$0 = $150$1 >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1;
++      $152 = $151$0 & 1;
++      $154$0 = _i64Subtract($r_sroa_0_0_insert_insert42$0, $r_sroa_0_0_insert_insert42$1, $151$0 & $d_sroa_0_0_insert_insert99$0, ((($150$1 | 0) < 0 ? -1 : 0) >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1) & $d_sroa_0_0_insert_insert99$1) | 0;
++      $r_sroa_0_0_extract_trunc = $154$0;
++      $r_sroa_1_4_extract_trunc = tempRet0;
++      $155 = $sr_1202 - 1 | 0;
++      if (($155 | 0) == 0) {
++        break;
++      } else {
++        $q_sroa_1_1198 = $147;
++        $q_sroa_0_1199 = $149;
++        $r_sroa_1_1200 = $r_sroa_1_4_extract_trunc;
++        $r_sroa_0_1201 = $r_sroa_0_0_extract_trunc;
++        $sr_1202 = $155;
++        $carry_0203 = $152;
++      }
++    }
++    $q_sroa_1_1_lcssa = $147;
++    $q_sroa_0_1_lcssa = $149;
++    $r_sroa_1_1_lcssa = $r_sroa_1_4_extract_trunc;
++    $r_sroa_0_1_lcssa = $r_sroa_0_0_extract_trunc;
++    $carry_0_lcssa$1 = 0;
++    $carry_0_lcssa$0 = $152;
++  }
++  $q_sroa_0_0_insert_ext75$0 = $q_sroa_0_1_lcssa;
++  $q_sroa_0_0_insert_ext75$1 = 0;
++  $q_sroa_0_0_insert_insert77$1 = $q_sroa_1_1_lcssa | $q_sroa_0_0_insert_ext75$1;
++  if (($rem | 0) != 0) {
++    HEAP32[$rem >> 2] = 0 | $r_sroa_0_1_lcssa;
++    HEAP32[$rem + 4 >> 2] = $r_sroa_1_1_lcssa | 0;
++  }
++  $_0$1 = (0 | $q_sroa_0_0_insert_ext75$0) >>> 31 | $q_sroa_0_0_insert_insert77$1 << 1 | ($q_sroa_0_0_insert_ext75$1 << 1 | $q_sroa_0_0_insert_ext75$0 >>> 31) & 0 | $carry_0_lcssa$1;
++  $_0$0 = ($q_sroa_0_0_insert_ext75$0 << 1 | 0 >>> 31) & -2 | $carry_0_lcssa$0;
++  return (tempRet0 = $_0$1, $_0$0) | 0;
++}
++// =======================================================================
++
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/html/bpgdec.js	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,23 @@
+((function(){var Module={};var Module;if(!Module)Module=(typeof Module!=="undefined"?Module:null)||{};var moduleOverrides={};for(var key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof require==="function";var ENVIRONMENT_IS_WEB=typeof window==="object";var ENVIRONMENT_IS_WORKER=typeof importScripts==="function";var ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;if(ENVIRONMENT_IS_NODE){if(!Module["print"])Module["print"]=function print(x){process["stdout"].write(x+"\n")};if(!Module["printErr"])Module["printErr"]=function printErr(x){process["stderr"].write(x+"\n")};var nodeFS=require("fs");var nodePath=require("path");Module["read"]=function read(filename,binary){filename=nodePath["normalize"](filename);var ret=nodeFS["readFileSync"](filename);if(!ret&&filename!=nodePath["resolve"](filename)){filename=path.join(__dirname,"..","src",filename);ret=nodeFS["readFileSync"](filename)}if(ret&&!binary)ret=ret.toString();return ret};Module["readBinary"]=function readBinary(filename){return Module["read"](filename,true)};Module["load"]=function load(f){globalEval(read(f))};Module["thisProgram"]=process["argv"][1].replace(/\\/g,"/");Module["arguments"]=process["argv"].slice(2);if(typeof module!=="undefined"){module["exports"]=Module}process["on"]("uncaughtException",(function(ex){if(!(ex instanceof ExitStatus)){throw ex}}))}else if(ENVIRONMENT_IS_SHELL){if(!Module["print"])Module["print"]=print;if(typeof printErr!="undefined")Module["printErr"]=printErr;if(typeof read!="undefined"){Module["read"]=read}else{Module["read"]=function read(){throw"no read() available (jsc?)"}}Module["readBinary"]=function readBinary(f){if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}var data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){Module["arguments"]=scriptArgs}else if(typeof arguments!="undefined"){Module["arguments"]=arguments}this["Module"]=Module}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){Module["read"]=function read(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText};if(typeof arguments!="undefined"){Module["arguments"]=arguments}if(typeof console!=="undefined"){if(!Module["print"])Module["print"]=function print(x){console.log(x)};if(!Module["printErr"])Module["printErr"]=function printErr(x){console.log(x)}}else{var TRY_USE_DUMP=false;if(!Module["print"])Module["print"]=TRY_USE_DUMP&&typeof dump!=="undefined"?(function(x){dump(x)}):(function(x){})}if(ENVIRONMENT_IS_WEB){window["Module"]=Module}else{Module["load"]=importScripts}}else{throw"Unknown runtime environment. Where are we?"}function globalEval(x){eval.call(null,x)}if(!Module["load"]&&Module["read"]){Module["load"]=function load(f){globalEval(Module["read"](f))}}if(!Module["print"]){Module["print"]=(function(){})}if(!Module["printErr"]){Module["printErr"]=Module["print"]}if(!Module["arguments"]){Module["arguments"]=[]}if(!Module["thisProgram"]){Module["thisProgram"]="./this.program"}Module.print=Module["print"];Module.printErr=Module["printErr"];Module["preRun"]=[];Module["postRun"]=[];for(var key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}var Runtime={setTempRet0:(function(value){tempRet0=value}),getTempRet0:(function(){return tempRet0}),stackSave:(function(){return STACKTOP}),stackRestore:(function(stackTop){STACKTOP=stackTop}),getNativeTypeSize:(function(type){switch(type){case"i1":case"i8":return 1;case"i16":return 2;case"i32":return 4;case"i64":return 8;case"float":return 4;case"double":return 8;default:{if(type[type.length-1]==="*"){return Runtime.QUANTUM_SIZE}else if(type[0]==="i"){var bits=parseInt(type.substr(1));assert(bits%8===0);return bits/8}else{return 0}}}}),getNativeFieldSize:(function(type){return Math.max(Runtime.getNativeTypeSize(type),Runtime.QUANTUM_SIZE)}),STACK_ALIGN:16,getAlignSize:(function(type,size,vararg){if(!vararg&&(type=="i64"||type=="double"))return 8;if(!type)return Math.min(size,8);return Math.min(size||(type?Runtime.getNativeFieldSize(type):0),Runtime.QUANTUM_SIZE)}),dynCall:(function(sig,ptr,args){if(args&&args.length){if(!args.splice)args=Array.prototype.slice.call(args);args.splice(0,0,ptr);return Module["dynCall_"+sig].apply(null,args)}else{return Module["dynCall_"+sig].call(null,ptr)}}),functionPointers:[],addFunction:(function(func){for(var i=0;i<Runtime.functionPointers.length;i++){if(!Runtime.functionPointers[i]){Runtime.functionPointers[i]=func;return 2*(1+i)}}throw"Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS."}),removeFunction:(function(index){Runtime.functionPointers[(index-2)/2]=null}),getAsmConst:(function(code,numArgs){if(!Runtime.asmConstCache)Runtime.asmConstCache={};var func=Runtime.asmConstCache[code];if(func)return func;var args=[];for(var i=0;i<numArgs;i++){args.push(String.fromCharCode(36)+i)}var source=Pointer_stringify(code);if(source[0]==='"'){if(source.indexOf('"',1)===source.length-1){source=source.substr(1,source.length-2)}else{abort("invalid EM_ASM input |"+source+"|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)")}}try{var evalled=eval("(function(Module, FS) { return function("+args.join(",")+"){ "+source+" } })")(Module,typeof FS!=="undefined"?FS:null)}catch(e){Module.printErr("error in executing inline EM_ASM code: "+e+" on: \n\n"+source+"\n\nwith args |"+args+"| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)");throw e}return Runtime.asmConstCache[code]=evalled}),warnOnce:(function(text){if(!Runtime.warnOnce.shown)Runtime.warnOnce.shown={};if(!Runtime.warnOnce.shown[text]){Runtime.warnOnce.shown[text]=1;Module.printErr(text)}}),funcWrappers:{},getFuncWrapper:(function(func,sig){assert(sig);if(!Runtime.funcWrappers[sig]){Runtime.funcWrappers[sig]={}}var sigCache=Runtime.funcWrappers[sig];if(!sigCache[func]){sigCache[func]=function dynCall_wrapper(){return Runtime.dynCall(sig,func,arguments)}}return sigCache[func]}),UTF8Processor:(function(){var buffer=[];var needed=0;this.processCChar=(function(code){code=code&255;if(buffer.length==0){if((code&128)==0){return String.fromCharCode(code)}buffer.push(code);if((code&224)==192){needed=1}else if((code&240)==224){needed=2}else{needed=3}return""}if(needed){buffer.push(code);needed--;if(needed>0)return""}var c1=buffer[0];var c2=buffer[1];var c3=buffer[2];var c4=buffer[3];var ret;if(buffer.length==2){ret=String.fromCharCode((c1&31)<<6|c2&63)}else if(buffer.length==3){ret=String.fromCharCode((c1&15)<<12|(c2&63)<<6|c3&63)}else{var codePoint=(c1&7)<<18|(c2&63)<<12|(c3&63)<<6|c4&63;ret=String.fromCharCode(((codePoint-65536)/1024|0)+55296,(codePoint-65536)%1024+56320)}buffer.length=0;return ret});this.processJSString=function processJSString(string){string=unescape(encodeURIComponent(string));var ret=[];for(var i=0;i<string.length;i++){ret.push(string.charCodeAt(i))}return ret}}),getCompilerSetting:(function(name){throw"You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work"}),stackAlloc:(function(size){var ret=STACKTOP;STACKTOP=STACKTOP+size|0;STACKTOP=STACKTOP+15&-16;return ret}),staticAlloc:(function(size){var ret=STATICTOP;STATICTOP=STATICTOP+size|0;STATICTOP=STATICTOP+15&-16;return ret}),dynamicAlloc:(function(size){var ret=DYNAMICTOP;DYNAMICTOP=DYNAMICTOP+size|0;DYNAMICTOP=DYNAMICTOP+15&-16;if(DYNAMICTOP>=TOTAL_MEMORY)enlargeMemory();return ret}),alignMemory:(function(size,quantum){var ret=size=Math.ceil(size/(quantum?quantum:16))*(quantum?quantum:16);return ret}),makeBigInt:(function(low,high,unsigned){var ret=unsigned?+(low>>>0)+ +(high>>>0)*+4294967296:+(low>>>0)+ +(high|0)*+4294967296;return ret}),GLOBAL_BASE:8,QUANTUM_SIZE:4,__dummy__:0};Module["Runtime"]=Runtime;var __THREW__=0;var ABORT=false;var EXITSTATUS=0;var undef=0;var tempValue,tempInt,tempBigInt,tempInt2,tempBigInt2,tempPair,tempBigIntI,tempBigIntR,tempBigIntS,tempBigIntP,tempBigIntD,tempDouble,tempFloat;var tempI64,tempI64b;var tempRet0,tempRet1,tempRet2,tempRet3,tempRet4,tempRet5,tempRet6,tempRet7,tempRet8,tempRet9;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}var globalScope=this;function getCFunc(ident){var func=Module["_"+ident];if(!func){try{func=eval("_"+ident)}catch(e){}}assert(func,"Cannot call unknown function "+ident+" (perhaps LLVM optimizations or closure removed it?)");return func}var cwrap,ccall;((function(){var stack=0;var JSfuncs={"stackSave":(function(){stack=Runtime.stackSave()}),"stackRestore":(function(){Runtime.stackRestore(stack)}),"arrayToC":(function(arr){var ret=Runtime.stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}),"stringToC":(function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){ret=Runtime.stackAlloc((str.length<<2)+1);writeStringToMemory(str,ret)}return ret})};var toC={"string":JSfuncs["stringToC"],"array":JSfuncs["arrayToC"]};ccall=function ccallFunc(ident,returnType,argTypes,args){var func=getCFunc(ident);var cArgs=[];if(args){for(var i=0;i<args.length;i++){var converter=toC[argTypes[i]];if(converter){if(stack===0)stack=Runtime.stackSave();cArgs[i]=converter(args[i])}else{cArgs[i]=args[i]}}}var ret=func.apply(null,cArgs);if(returnType==="string")ret=Pointer_stringify(ret);if(stack!==0)JSfuncs["stackRestore"]();return ret};var sourceRegex=/^function\s*\(([^)]*)\)\s*{\s*([^*]*?)[\s;]*(?:return\s*(.*?)[;\s]*)?}$/;function parseJSFunc(jsfunc){var parsed=jsfunc.toString().match(sourceRegex).slice(1);return{arguments:parsed[0],body:parsed[1],returnValue:parsed[2]}}var JSsource={};for(var fun in JSfuncs){if(JSfuncs.hasOwnProperty(fun)){JSsource[fun]=parseJSFunc(JSfuncs[fun])}}cwrap=function cwrap(ident,returnType,argTypes){argTypes=argTypes||[];var cfunc=getCFunc(ident);var numericArgs=argTypes.every((function(type){return type==="number"}));var numericRet=returnType!=="string";if(numericRet&&numericArgs){return cfunc}var argNames=argTypes.map((function(x,i){return"$"+i}));var funcstr="(function("+argNames.join(",")+") {";var nargs=argTypes.length;if(!numericArgs){funcstr+=JSsource["stackSave"].body+";";for(var i=0;i<nargs;i++){var arg=argNames[i],type=argTypes[i];if(type==="number")continue;var convertCode=JSsource[type+"ToC"];funcstr+="var "+convertCode.arguments+" = "+arg+";";funcstr+=convertCode.body+";";funcstr+=arg+"="+convertCode.returnValue+";"}}var cfuncname=parseJSFunc((function(){return cfunc})).returnValue;funcstr+="var ret = "+cfuncname+"("+argNames.join(",")+");";if(!numericRet){var strgfy=parseJSFunc((function(){return Pointer_stringify})).returnValue;funcstr+="ret = "+strgfy+"(ret);"}if(!numericArgs){funcstr+=JSsource["stackRestore"].body+";"}funcstr+="return ret})";return eval(funcstr)}}))();Module["cwrap"]=cwrap;Module["ccall"]=ccall;function setValue(ptr,value,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":HEAP8[ptr>>0]=value;break;case"i8":HEAP8[ptr>>0]=value;break;case"i16":HEAP16[ptr>>1]=value;break;case"i32":HEAP32[ptr>>2]=value;break;case"i64":tempI64=[value>>>0,(tempDouble=value,+Math_abs(tempDouble)>=+1?tempDouble>+0?(Math_min(+Math_floor(tempDouble/+4294967296),+4294967295)|0)>>>0:~~+Math_ceil((tempDouble- +(~~tempDouble>>>0))/+4294967296)>>>0:0)],HEAP32[ptr>>2]=tempI64[0],HEAP32[ptr+4>>2]=tempI64[1];break;case"float":HEAPF32[ptr>>2]=value;break;case"double":HEAPF64[ptr>>3]=value;break;default:abort("invalid type for setValue: "+type)}}Module["setValue"]=setValue;function getValue(ptr,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":return HEAP8[ptr>>0];case"i8":return HEAP8[ptr>>0];case"i16":return HEAP16[ptr>>1];case"i32":return HEAP32[ptr>>2];case"i64":return HEAP32[ptr>>2];case"float":return HEAPF32[ptr>>2];case"double":return HEAPF64[ptr>>3];default:abort("invalid type for setValue: "+type)}return null}Module["getValue"]=getValue;var ALLOC_NORMAL=0;var ALLOC_STACK=1;var ALLOC_STATIC=2;var ALLOC_DYNAMIC=3;var ALLOC_NONE=4;Module["ALLOC_NORMAL"]=ALLOC_NORMAL;Module["ALLOC_STACK"]=ALLOC_STACK;Module["ALLOC_STATIC"]=ALLOC_STATIC;Module["ALLOC_DYNAMIC"]=ALLOC_DYNAMIC;Module["ALLOC_NONE"]=ALLOC_NONE;function allocate(slab,types,allocator,ptr){var zeroinit,size;if(typeof slab==="number"){zeroinit=true;size=slab}else{zeroinit=false;size=slab.length}var singleType=typeof types==="string"?types:null;var ret;if(allocator==ALLOC_NONE){ret=ptr}else{ret=[_malloc,Runtime.stackAlloc,Runtime.staticAlloc,Runtime.dynamicAlloc][allocator===undefined?ALLOC_STATIC:allocator](Math.max(size,singleType?1:types.length))}if(zeroinit){var ptr=ret,stop;assert((ret&3)==0);stop=ret+(size&~3);for(;ptr<stop;ptr+=4){HEAP32[ptr>>2]=0}stop=ret+size;while(ptr<stop){HEAP8[ptr++>>0]=0}return ret}if(singleType==="i8"){if(slab.subarray||slab.slice){HEAPU8.set(slab,ret)}else{HEAPU8.set(new Uint8Array(slab),ret)}return ret}var i=0,type,typeSize,previousType;while(i<size){var curr=slab[i];if(typeof curr==="function"){curr=Runtime.getFunctionIndex(curr)}type=singleType||types[i];if(type===0){i++;continue}if(type=="i64")type="i32";setValue(ret+i,curr,type);if(previousType!==type){typeSize=Runtime.getNativeTypeSize(type);previousType=type}i+=typeSize}return ret}Module["allocate"]=allocate;function demangleAll(text){return text}function jsStackTrace(){var err=new Error;if(!err.stack){try{throw new Error(0)}catch(e){err=e}if(!err.stack){return"(no stack trace available)"}}return err.stack.toString()}function stackTrace(){return demangleAll(jsStackTrace())}Module["stackTrace"]=stackTrace;var PAGE_SIZE=4096;function alignMemoryPage(x){return x+4095&-4096}var HEAP;var HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;var STATIC_BASE=0,STATICTOP=0,staticSealed=false;var STACK_BASE=0,STACKTOP=0,STACK_MAX=0;var DYNAMIC_BASE=0,DYNAMICTOP=0;function enlargeMemory(){abort("Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value "+TOTAL_MEMORY+", (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.")}var TOTAL_STACK=Module["TOTAL_STACK"]||5242880;var TOTAL_MEMORY=Module["TOTAL_MEMORY"]||33554432;var FAST_MEMORY=Module["FAST_MEMORY"]||2097152;var totalMemory=64*1024;while(totalMemory<TOTAL_MEMORY||totalMemory<2*TOTAL_STACK){if(totalMemory<16*1024*1024){totalMemory*=2}else{totalMemory+=16*1024*1024}}if(totalMemory!==TOTAL_MEMORY){Module.printErr("increasing TOTAL_MEMORY to "+totalMemory+" to be compliant with the asm.js spec");TOTAL_MEMORY=totalMemory}assert(typeof Int32Array!=="undefined"&&typeof Float64Array!=="undefined"&&!!(new Int32Array(1))["subarray"]&&!!(new Int32Array(1))["set"],"JS engine does not provide full typed array support");var buffer=new ArrayBuffer(TOTAL_MEMORY);HEAP8=new Int8Array(buffer);HEAP16=new Int16Array(buffer);HEAP32=new Int32Array(buffer);HEAPU8=new Uint8Array(buffer);HEAPU16=new Uint16Array(buffer);HEAPU32=new Uint32Array(buffer);HEAPF32=new Float32Array(buffer);HEAPF64=new Float64Array(buffer);HEAP32[0]=255;assert(HEAPU8[0]===255&&HEAPU8[3]===0,"Typed arrays 2 must be run on a little-endian system");Module["HEAP"]=HEAP;Module["buffer"]=buffer;Module["HEAP8"]=HEAP8;Module["HEAP16"]=HEAP16;Module["HEAP32"]=HEAP32;Module["HEAPU8"]=HEAPU8;Module["HEAPU16"]=HEAPU16;Module["HEAPU32"]=HEAPU32;Module["HEAPF32"]=HEAPF32;Module["HEAPF64"]=HEAPF64;function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback=="function"){callback();continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){Runtime.dynCall("v",func)}else{Runtime.dynCall("vi",func,[callback.arg])}}else{func(callback.arg===undefined?null:callback.arg)}}}var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function ensureInitRuntime(){if(runtimeInitialized)return;runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){callRuntimeCallbacks(__ATEXIT__);runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}Module["addOnPreRun"]=Module.addOnPreRun=addOnPreRun;function addOnInit(cb){__ATINIT__.unshift(cb)}Module["addOnInit"]=Module.addOnInit=addOnInit;function addOnPreMain(cb){__ATMAIN__.unshift(cb)}Module["addOnPreMain"]=Module.addOnPreMain=addOnPreMain;function addOnExit(cb){__ATEXIT__.unshift(cb)}Module["addOnExit"]=Module.addOnExit=addOnExit;function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}Module["addOnPostRun"]=Module.addOnPostRun=addOnPostRun;function intArrayFromString(stringy,dontAddNull,length){var ret=(new Runtime.UTF8Processor).processJSString(stringy);if(length){ret.length=length}if(!dontAddNull){ret.push(0)}return ret}Module["intArrayFromString"]=intArrayFromString;function intArrayToString(array){var ret=[];for(var i=0;i<array.length;i++){var chr=array[i];if(chr>255){chr&=255}ret.push(String.fromCharCode(chr))}return ret.join("")}Module["intArrayToString"]=intArrayToString;function writeStringToMemory(string,buffer,dontAddNull){var array=intArrayFromString(string,dontAddNull);var i=0;while(i<array.length){var chr=array[i];HEAP8[buffer+i>>0]=chr;i=i+1}}Module["writeStringToMemory"]=writeStringToMemory;function writeArrayToMemory(array,buffer){for(var i=0;i<array.length;i++){HEAP8[buffer+i>>0]=array[i]}}Module["writeArrayToMemory"]=writeArrayToMemory;function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i<str.length;i++){HEAP8[buffer+i>>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer+str.length>>0]=0}Module["writeAsciiToMemory"]=writeAsciiToMemory;function unSign(value,bits,ignore){if(value>=0){return value}return bits<=32?2*Math.abs(1<<bits-1)+value:Math.pow(2,bits)+value}function reSign(value,bits,ignore){if(value<=0){return value}var half=bits<=32?Math.abs(1<<bits-1):Math.pow(2,bits-1);if(value>=half&&(bits<=32||value>half)){value=-2*half+value}return value}if(!Math["imul"]||Math["imul"](4294967295,5)!==-5)Math["imul"]=function imul(a,b){var ah=a>>>16;var al=a&65535;var bh=b>>>16;var bl=b&65535;return al*bl+(ah*bl+al*bh<<16)|0};Math.imul=Math["imul"];var Math_abs=Math.abs;var Math_cos=Math.cos;var Math_sin=Math.sin;var Math_tan=Math.tan;var Math_acos=Math.acos;var Math_asin=Math.asin;var Math_atan=Math.atan;var Math_atan2=Math.atan2;var Math_exp=Math.exp;var Math_log=Math.log;var Math_sqrt=Math.sqrt;var Math_ceil=Math.ceil;var Math_floor=Math.floor;var Math_pow=Math.pow;var Math_imul=Math.imul;var Math_fround=Math.fround;var Math_min=Math.min;var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}Module["addRunDependency"]=addRunDependency;function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["removeRunDependency"]=removeRunDependency;Module["preloadedImages"]={};Module["preloadedAudios"]={};var memoryInitializer=null;STATIC_BASE=8;STATICTOP=STATIC_BASE+6112;__ATINIT__.push();allocate([0,0,1,0,1,2,0,1,2,3,1,2,3,2,3,3,0,1,0,2,1,0,3,2,1,0,3,2,1,3,2,3,0,0,1,0,1,2,0,1,2,3,0,1,2,3,4,0,1,2,3,4,5,0,1,2,3,4,5,6,0,1,2,3,4,5,6,7,1,2,3,4,5,6,7,2,3,4,5,6,7,3,4,5,6,7,4,5,6,7,5,6,7,6,7,7,0,1,0,2,1,0,3,2,1,0,4,3,2,1,0,5,4,3,2,1,0,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,7,6,5,4,3,2,7,6,5,4,3,7,6,5,4,7,6,5,7,6,7,40,45,51,57,64,72,0,0,29,0,0,0,30,0,0,0,31,0,0,0,32,0,0,0,33,0,0,0,33,0,0,0,34,0,0,0,34,0,0,0,35,0,0,0,35,0,0,0,36,0,0,0,36,0,0,0,37,0,0,0,37,0,0,0,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,8,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,12,12,0,0,0,0,0,0,0,2,5,9,1,4,8,12,3,7,11,14,6,10,13,15,0,0,0,0,0,0,0,0,0,2,1,3,0,0,0,0,0,2,5,9,14,20,27,35,1,4,8,13,19,26,34,42,3,7,12,18,25,33,41,48,6,11,17,24,32,40,47,53,10,16,23,31,39,46,52,57,15,22,30,38,45,51,56,60,21,29,37,44,50,55,59,62,28,36,43,49,54,58,61,63,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,0,1,2,3,16,17,18,19,4,5,6,7,20,21,22,23,8,9,10,11,24,25,26,27,12,13,14,15,28,29,30,31,32,33,34,35,48,49,50,51,36,37,38,39,52,53,54,55,40,41,42,43,56,57,58,59,44,45,46,47,60,61,62,63,0,1,4,5,2,3,4,5,6,6,8,8,7,7,8,8,1,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,153,200,139,141,157,154,154,154,154,154,154,154,154,184,154,154,154,184,63,139,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,153,138,138,111,141,94,138,182,154,139,139,139,139,139,139,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,91,171,134,141,111,111,125,110,110,94,124,108,124,107,125,141,179,153,125,107,125,141,179,153,125,107,125,141,179,153,125,140,139,182,182,152,136,152,136,153,136,139,111,136,139,111,141,111,140,92,137,138,140,152,138,139,153,74,149,92,139,107,122,152,140,179,166,182,140,227,122,197,138,153,136,167,152,152,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,185,107,139,126,154,197,185,201,154,154,154,149,154,139,154,154,154,152,139,110,122,95,79,63,31,31,153,153,153,153,140,198,140,198,168,79,124,138,94,153,111,149,107,167,154,139,139,139,139,139,139,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,121,140,61,154,155,154,139,153,139,123,123,63,153,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,123,123,107,121,107,121,167,151,183,140,151,183,140,140,140,154,196,196,167,154,152,167,182,182,134,149,136,153,121,136,137,169,194,166,167,154,167,137,182,107,167,91,122,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,160,107,139,126,154,197,185,201,154,154,154,134,154,139,154,154,183,152,139,154,137,95,79,63,31,31,153,153,153,153,169,198,169,198,168,79,224,167,122,153,111,149,92,167,154,139,139,139,139,139,139,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,121,140,61,154,170,154,139,153,139,123,123,63,124,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,138,138,122,121,122,121,167,151,183,140,151,183,140,140,140,154,196,167,167,154,152,167,182,182,134,149,136,153,121,136,122,169,208,166,167,154,152,167,182,107,167,91,107,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,5,5,6,6,7,8,9,10,11,13,14,16,18,20,22,24,0,0,29,30,31,32,33,33,34,34,35,35,36,36,37,37,0,0,104,101,118,99,0,0,0,0,128,5,0,0,0,0,0,0,0,0,0,0,53,54,50,72,34,48,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,176,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,26,10,1,0,0,0,0,0,1,2,2,2,2,3,5,7,8,10,12,13,15,17,18,19,20,21,22,23,23,24,24,25,25,26,27,27,28,28,29,29,30,31,0,0,0,0,0,7,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,32,26,21,17,13,9,5,2,0,254,251,247,243,239,235,230,224,230,235,239,243,247,251,254,0,2,5,9,13,17,21,26,32,0,0,0,0,0,0,0,0,240,154,249,114,252,138,253,30,254,122,254,197,254,0,255,197,254,122,254,30,254,138,253,114,252,154,249,0,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64,90,90,90,89,88,87,85,83,82,80,78,75,73,70,67,64,61,57,54,50,46,43,38,36,31,25,22,18,13,9,4,1,2,0,3,4,0,0,0,255,0,1,0,0,255,0,1,255,255,1,1,1,255,255,1,16,16,16,16,17,18,21,24,16,16,16,16,17,19,22,25,16,16,17,18,20,22,25,29,16,16,18,21,24,27,31,36,17,17,20,24,30,35,41,47,18,19,22,27,35,44,54,65,21,22,25,31,41,54,70,88,24,25,29,36,47,65,88,115,16,16,16,16,17,18,20,24,16,16,16,17,18,20,24,25,16,16,17,18,20,24,25,28,16,17,18,20,24,25,28,33,17,18,20,24,25,28,33,41,18,20,24,25,28,33,41,54,20,24,25,28,33,41,54,71,24,25,28,33,41,54,71,91,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,176,208,240,128,167,197,227,128,158,187,216,123,150,178,205,116,142,169,195,111,135,160,185,105,128,152,175,100,122,144,166,95,116,137,158,90,110,130,150,85,104,123,142,81,99,117,135,77,94,111,128,73,89,105,122,69,85,100,116,66,80,95,110,62,76,90,104,59,72,86,99,56,69,81,94,53,65,77,89,51,62,73,85,48,59,69,80,46,56,66,76,43,53,63,72,41,50,59,69,39,48,56,65,37,45,54,62,35,43,51,59,33,41,48,56,32,39,46,53,30,37,43,50,29,35,41,48,27,33,39,45,26,31,37,43,24,30,35,41,23,28,33,39,22,27,32,37,21,26,30,35,20,24,29,33,19,23,27,31,18,22,26,30,17,21,25,28,16,20,23,27,15,19,22,25,14,18,21,24,14,17,20,23,13,16,19,22,12,15,18,21,12,14,17,20,11,14,16,19,11,13,15,18,10,12,15,17,10,12,14,16,9,11,13,15,9,11,12,14,8,10,12,14,8,9,11,13,7,9,11,12,7,9,10,12,7,8,10,11,6,8,9,11,6,7,9,10,6,7,8,9,2,2,2,2,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,62,63,0,0,1,2,2,4,4,5,6,7,8,9,9,11,11,12,13,13,15,15,16,16,18,18,19,19,21,21,22,22,23,24,24,25,26,26,27,27,28,29,29,30,30,30,31,32,32,33,33,33,34,34,35,35,35,36,36,36,37,37,37,38,38,63,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,0,255,255,255,127,0,0,0,0,0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,54,0,0,0,0,0,0,0,3,1,1,0,36,120,37,120,38,120,0,0,0,0,0,0,56,0,0,0,0,0,0,0,3,1,0,16,36,120,37,120,38,120,0,0,0,0,0,0,58,0,0,0,0,0,0,0,3,0,0,16,36,120,37,120,38,120,0,0,0,0,0,0,32,0,0,0,0,0,0,0,1,0,0,0,36,120,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_NONE,Runtime.GLOBAL_BASE);var tempDoublePtr=Runtime.alignMemory(allocate(12,"i8",ALLOC_STATIC),8);assert(tempDoublePtr%8==0);function copyTempFloat(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3]}function copyTempDouble(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3];HEAP8[tempDoublePtr+4]=HEAP8[ptr+4];HEAP8[tempDoublePtr+5]=HEAP8[ptr+5];HEAP8[tempDoublePtr+6]=HEAP8[ptr+6];HEAP8[tempDoublePtr+7]=HEAP8[ptr+7]}Module["_bitshift64Ashr"]=_bitshift64Ashr;Module["_i64Subtract"]=_i64Subtract;function _sbrk(bytes){var self=_sbrk;if(!self.called){DYNAMICTOP=alignMemoryPage(DYNAMICTOP);self.called=true;assert(Runtime.dynamicAlloc);self.alloc=Runtime.dynamicAlloc;Runtime.dynamicAlloc=(function(){abort("cannot dynamically allocate, sbrk now has control")})}var ret=DYNAMICTOP;if(bytes!=0)self.alloc(bytes);return ret}Module["_i64Add"]=_i64Add;Module["_strlen"]=_strlen;Module["_memset"]=_memset;Module["_bitshift64Shl"]=_bitshift64Shl;function _abort(){Module["abort"]()}Module["_llvm_bswap_i32"]=_llvm_bswap_i32;function _rint(x){if(Math.abs(x%1)!==.5)return Math.round(x);return x+x%2+(x<0?1:-1)}function _lrint(){return _rint.apply(null,arguments)}function _emscripten_memcpy_big(dest,src,num){HEAPU8.set(HEAPU8.subarray(src,src+num),dest);return dest}Module["_memcpy"]=_memcpy;STACK_BASE=STACKTOP=Runtime.alignMemory(STATICTOP);staticSealed=true;STACK_MAX=STACK_BASE+TOTAL_STACK;DYNAMIC_BASE=DYNAMICTOP=Runtime.alignMemory(STACK_MAX);assert(DYNAMIC_BASE<TOTAL_MEMORY,"TOTAL_MEMORY not big enough for stack");var ctlz_i8=allocate([8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_DYNAMIC);var cttz_i8=allocate([8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0],"i8",ALLOC_DYNAMIC);function invoke_iiii(index,a1,a2,a3){try{return Module["dynCall_iiii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10){try{Module["dynCall_viiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiii(index,a1,a2,a3,a4,a5,a6,a7){try{Module["dynCall_viiiiiii"](index,a1,a2,a3,a4,a5,a6,a7)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13){try{Module["dynCall_viiiiiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vi(index,a1){try{Module["dynCall_vi"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vii(index,a1,a2){try{Module["dynCall_vii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiiii(index,a1,a2,a3,a4,a5,a6){try{return Module["dynCall_iiiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_ii(index,a1){try{return Module["dynCall_ii"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viii(index,a1,a2,a3){try{Module["dynCall_viii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiii(index,a1,a2,a3,a4){try{return Module["dynCall_iiiii"](index,a1,a2,a3,a4)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6){try{Module["dynCall_viiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iii(index,a1,a2){try{return Module["dynCall_iii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiii(index,a1,a2,a3,a4,a5){try{return Module["dynCall_iiiiii"](index,a1,a2,a3,a4,a5)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiii(index,a1,a2,a3,a4){try{Module["dynCall_viiii"](index,a1,a2,a3,a4)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}Module.asmGlobalArg={"Math":Math,"Int8Array":Int8Array,"Int16Array":Int16Array,"Int32Array":Int32Array,"Uint8Array":Uint8Array,"Uint16Array":Uint16Array,"Uint32Array":Uint32Array,"Float32Array":Float32Array,"Float64Array":Float64Array};Module.asmLibraryArg={"abort":abort,"assert":assert,"min":Math_min,"invoke_iiii":invoke_iiii,"invoke_viiiiiiiiii":invoke_viiiiiiiiii,"invoke_viiiiiii":invoke_viiiiiii,"invoke_viiiiiiiiiiiii":invoke_viiiiiiiiiiiii,"invoke_vi":invoke_vi,"invoke_vii":invoke_vii,"invoke_iiiiiii":invoke_iiiiiii,"invoke_ii":invoke_ii,"invoke_viii":invoke_viii,"invoke_iiiii":invoke_iiiii,"invoke_viiiiii":invoke_viiiiii,"invoke_iii":invoke_iii,"invoke_iiiiii":invoke_iiiiii,"invoke_viiii":invoke_viiii,"_sbrk":_sbrk,"_lrint":_lrint,"_abort":_abort,"_emscripten_memcpy_big":_emscripten_memcpy_big,"_rint":_rint,"STACKTOP":STACKTOP,"STACK_MAX":STACK_MAX,"tempDoublePtr":tempDoublePtr,"ABORT":ABORT,"cttz_i8":cttz_i8,"ctlz_i8":ctlz_i8,"NaN":NaN,"Infinity":Infinity};// EMSCRIPTEN_START_ASM
+var asm=(function(global,env,buffer) {
+"use asm";var a=new global.Int8Array(buffer);var b=new global.Int16Array(buffer);var c=new global.Int32Array(buffer);var d=new global.Uint8Array(buffer);var e=new global.Uint16Array(buffer);var f=new global.Uint32Array(buffer);var g=new global.Float32Array(buffer);var h=new global.Float64Array(buffer);var i=env.STACKTOP|0;var j=env.STACK_MAX|0;var k=env.tempDoublePtr|0;var l=env.ABORT|0;var m=env.cttz_i8|0;var n=env.ctlz_i8|0;var o=0;var p=0;var q=0;var r=0;var s=+env.NaN,t=+env.Infinity;var u=0,v=0,w=0,x=0,y=0.0,z=0,A=0,B=0,C=0.0;var D=0;var E=0;var F=0;var G=0;var H=0;var I=0;var J=0;var K=0;var L=0;var M=0;var N=global.Math.floor;var O=global.Math.abs;var P=global.Math.sqrt;var Q=global.Math.pow;var R=global.Math.cos;var S=global.Math.sin;var T=global.Math.tan;var U=global.Math.acos;var V=global.Math.asin;var W=global.Math.atan;var X=global.Math.atan2;var Y=global.Math.exp;var Z=global.Math.log;var _=global.Math.ceil;var $=global.Math.imul;var aa=env.abort;var ba=env.assert;var ca=env.min;var da=env.invoke_iiii;var ea=env.invoke_viiiiiiiiii;var fa=env.invoke_viiiiiii;var ga=env.invoke_viiiiiiiiiiiii;var ha=env.invoke_vi;var ia=env.invoke_vii;var ja=env.invoke_iiiiiii;var ka=env.invoke_ii;var la=env.invoke_viii;var ma=env.invoke_iiiii;var na=env.invoke_viiiiii;var oa=env.invoke_iii;var pa=env.invoke_iiiiii;var qa=env.invoke_viiii;var ra=env._sbrk;var sa=env._lrint;var ta=env._abort;var ua=env._emscripten_memcpy_big;var va=env._rint;var wa=0.0;
+// EMSCRIPTEN_START_FUNCS
+function La(a){a=a|0;var b=0;b=i;i=i+a|0;i=i+15&-16;return b|0}function Ma(){return i|0}function Na(a){a=a|0;i=a}function Oa(a,b){a=a|0;b=b|0;if(!o){o=a;p=b}}function Pa(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0]}function Qa(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0];a[k+4>>0]=a[b+4>>0];a[k+5>>0]=a[b+5>>0];a[k+6>>0]=a[b+6>>0];a[k+7>>0]=a[b+7>>0]}function Ra(a){a=a|0;D=a}function Sa(){return D|0}function Ta(b,d){b=b|0;d=d|0;var e=0,f=0;e=i;if(!(a[(c[b+204>>2]|0)+43>>0]|0)){i=e;return}f=c[(c[b+200>>2]|0)+13128>>2]|0;d=(d|0)%(f|0)|0;if((d|0)!=2?!((f|0)==2&(d|0)==0):0){i=e;return}fe(c[b+152>>2]|0,c[b+136>>2]|0,199)|0;i=e;return}function Ua(b,d){b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0;e=i;g=b+204|0;f=c[g>>2]|0;if((c[(c[f+1668>>2]|0)+(c[b+2500>>2]<<2)>>2]|0)==(d|0)){Va(b);f=b+1449|0;if(a[f>>0]|0){j=c[g>>2]|0;if((a[j+42>>0]|0)!=0?(j=c[j+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0)h=5}else h=5;if((h|0)==5)Wa(b);if(a[b+1448>>0]|0){i=e;return}if(!(a[(c[g>>2]|0)+43>>0]|0)){i=e;return}g=c[(c[b+200>>2]|0)+13128>>2]|0;if((d|0)%(g|0)|0){i=e;return}if((g|0)==1){Wa(b);i=e;return}if((a[f>>0]|0)!=1){i=e;return}fe(c[b+136>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}if((a[f+42>>0]|0)!=0?(j=c[f+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0){if((a[b+141>>0]|0)==1)Xa(c[b+136>>2]|0);else Va(b);Wa(b);f=c[g>>2]|0}if(!(a[f+43>>0]|0)){i=e;return}f=b+200|0;if((d|0)%(c[(c[f>>2]|0)+13128>>2]|0)|0){i=e;return}d=b+136|0;Ya((c[d>>2]|0)+224|0)|0;if((a[b+141>>0]|0)==1)Xa(c[d>>2]|0);else Va(b);if((c[(c[f>>2]|0)+13128>>2]|0)==1){Wa(b);i=e;return}else{fe(c[d>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}}function Va(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;e=a+136|0;a=c[e>>2]|0;d=a+204|0;ad(d,1);g=a+212|0;f=c[g>>2]|0;h=0-f&7;if(h){ad(d,h);f=c[g>>2]|0}Yc((c[e>>2]|0)+224|0,(c[d>>2]|0)+((f|0)/8|0)|0,(7-f+(c[a+216>>2]|0)|0)/8|0);i=b;return}function Wa(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0;g=i;f=c[b+1440>>2]|0;e=2-f|0;e=(a[b+2060>>0]|0)==0|(f|0)==2?e:e^3;f=b+2112|0;b=b+136|0;h=0;do{j=d[680+(e*199|0)+h>>0]|0;l=a[f>>0]|0;k=l<<24>>24;if(l<<24>>24<0)k=0;else k=(k|0)>51?51:k;j=((j<<3&120)+ -16+(($(k,((j>>>4)*5|0)+ -45|0)|0)>>4)<<1)+ -127|0;j=j>>31^j;if((j|0)>124)j=j&1|124;a[(c[b>>2]|0)+h>>0]=j;h=h+1|0}while((h|0)!=199);a[(c[b>>2]|0)+199>>0]=0;a[(c[b>>2]|0)+200>>0]=0;a[(c[b>>2]|0)+201>>0]=0;a[(c[b>>2]|0)+202>>0]=0;i=g;return}function Xa(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=a+224|0;e=c[a+240>>2]|0;f=c[d>>2]|0;e=(f&1|0)==0?e:e+ -1|0;e=(f&511|0)==0?e:e+ -1|0;a=(c[a+244>>2]|0)-e|0;if((a|0)<0){i=b;return}Yc(d,e,a);i=b;return}function Ya(a){a=a|0;var b=0,d=0,e=0,f=0,g=0;b=i;f=a+4|0;d=c[f>>2]|0;e=d+ -2|0;c[f>>2]=e;g=c[a>>2]|0;if((g|0)>=(e<<17|0)){g=(c[a+16>>2]|0)-(c[a+12>>2]|0)|0;i=b;return g|0}d=(d+ -258|0)>>>31;c[f>>2]=e<<d;g=g<<d;c[a>>2]=g;if(g&65535){g=0;i=b;return g|0}yb(a);g=0;i=b;return g|0}function Za(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a)|0;i=b;return a|0}function _a(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;h=d[e>>0]|0;j=b+4|0;k=c[j>>2]|0;l=d[2880+((k<<1&384)+(h|512))>>0]|0;k=k-l|0;m=k<<17;n=c[b>>2]|0;g=m-n>>31;c[b>>2]=n-(g&m);c[j>>2]=(g&l-k)+k;h=g^h;a[e>>0]=a[h+4032>>0]|0;e=h&1;h=c[j>>2]|0;g=d[2880+h>>0]|0;c[j>>2]=h<<g;g=c[b>>2]<<g;c[b>>2]=g;if(g&65535){i=f;return e|0}j=b+16|0;h=c[j>>2]|0;c[b>>2]=(((d[h+1>>0]|0)<<1|(d[h>>0]|0)<<9)+ -65535<<7-(d[2880+((g+ -1^g)>>15)>>0]|0))+g;if(h>>>0>=(c[b+20>>2]|0)>>>0){i=f;return e|0}c[j>>2]=h+2;i=f;return e|0}function $a(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(_a(d+224|0,d+1|0)|0)){d=0;i=b;return d|0}d=(ab((c[a>>2]|0)+224|0)|0)==0;d=d?1:2;i=b;return d|0}function ab(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[a>>2]<<1;c[a>>2]=d;if(!(d&65534)){yb(a);d=c[a>>2]|0}e=c[a+4>>2]<<17;if((d|0)<(e|0)){e=0;i=b;return e|0}c[a>>2]=d-e;e=1;i=b;return e|0}function bb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function cb(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=c[(c[a+200>>2]|0)+52>>2]|0;d=(d|0)>10?31:(1<<d+ -5)+ -1|0;e=a+136|0;if((d|0)>0)a=0;else{f=0;i=b;return f|0}while(1){f=a+1|0;if(!(ab((c[e>>2]|0)+224|0)|0)){d=4;break}if((f|0)<(d|0))a=f;else{a=f;d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function db(a){a=a|0;var b=0;b=i;a=ab((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function eb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function fb(a){a=a|0;var b=0;b=i;a=Ya((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function gb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+5|0)|0;i=b;return a|0}function hb(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;a=a+136|0;g=9;e=0;while(1){h=c[a>>2]|0;f=e;e=e+1|0;if(!(_a(h+224|0,h+g|0)|0)){e=f;g=0;break}if((e|0)>=5){f=0;g=0;d=4;break}else g=10}do if((d|0)==4){while(1){d=0;if(!(ab((c[a>>2]|0)+224|0)|0)){d=5;break}g=(1<<f)+g|0;f=f+1|0;if((f|0)<31)d=4;else break}if((d|0)==5)if(!f)break;do{f=f+ -1|0;g=((ab((c[a>>2]|0)+224|0)|0)<<f)+g|0}while((f|0)!=0)}while(0);i=b;return g+e|0}function ib(a){a=a|0;var b=0;b=i;a=ab((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function jb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+176|0)|0;i=b;return a|0}function kb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0;d=i;e=a[(c[b+204>>2]|0)+1633>>0]|0;e=(e&255)<5?5:e&255;f=b+136|0;if(!e){g=0;i=d;return g|0}else b=0;while(1){h=c[f>>2]|0;g=b+1|0;if(!(_a(h+224|0,h+177|0)|0)){e=4;break}if((g|0)<(e|0))b=g;else{b=g;e=4;break}}if((e|0)==4){i=d;return b|0}return 0}function lb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0;j=i;k=c[b+200>>2]|0;n=(1<<c[k+13080>>2])+ -1|0;l=n&g;m=c[k+13064>>2]|0;h=f>>m;m=g>>m;g=c[b+136>>2]|0;if((a[g+308>>0]|0)==0?(n&f|0)==0:0)f=0;else{f=h+ -1+($(c[k+13140>>2]|0,m)|0)|0;f=d[(c[b+4336>>2]|0)+f>>0]|0}if((a[g+309>>0]|0)==0&(l|0)==0){n=0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=_a(f,n)|0;i=j;return n|0}n=($(c[k+13140>>2]|0,m+ -1|0)|0)+h|0;n=d[(c[b+4336>>2]|0)+n>>0]|0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=_a(f,n)|0;i=j;return n|0}function mb(a,b){a=a|0;b=b|0;var d=0;d=i;b=c[a+136>>2]|0;b=(_a(b+224|0,b+13|0)|0)==0;i=d;return(b?3:0)|0}function nb(a){a=a|0;var b=0;b=i;a=Ya((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function ob(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+17|0)|0;i=b;return a|0}function pb(a){a=a|0;var b=0,d=0,e=0;b=i;e=a+136|0;d=0;while(1){a=d+1|0;if(!(ab((c[e>>2]|0)+224|0)|0)){a=d;d=4;break}if((a|0)<2)d=a;else{d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function qb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function rb(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(_a(d+224|0,d+18|0)|0)){d=4;i=b;return d|0}d=(ab((c[a>>2]|0)+224|0)|0)<<1;d=ab((c[a>>2]|0)+224|0)|0|d;i=b;return d|0}function sb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(42-b)|0)|0;i=d;return a|0}function tb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(b+42)|0)|0;i=d;return a|0}function ub(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+((b|0)==0|40)|0)|0;i=d;return a|0}function vb(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;a=a+136|0;f=(b<<2)+166|0;e=0;while(1){g=c[a>>2]|0;b=e+1|0;if(!(_a(g+224|0,g+(f+e)|0)|0)){b=e;a=4;break}if((b|0)<4)e=b;else{a=4;break}}if((a|0)==4){i=d;return b|0}return 0}function wb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(b+174)|0)|0;i=d;return a|0}function xb(f,g,h,j,k,l){f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ta=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ba=0,Da=0,Ea=0,Ga=0,Ha=0,Ia=0,Ja=0,La=0,Ma=0,Na=0;o=i;i=i+96|0;v=o+24|0;u=o+8|0;s=o;t=f+136|0;p=c[t>>2]|0;q=c[f+160>>2]|0;m=c[q+(l<<2)+32>>2]|0;n=f+200|0;T=c[n>>2]|0;h=$(h>>c[T+(l<<2)+13180>>2],m)|0;g=(c[q+(l<<2)>>2]|0)+(h+(g>>c[T+(l<<2)+13168>>2]<<c[T+56>>2]))|0;T=(l|0)!=0;h=p+320|0;q=T?p+11680|0:h;w=v+0|0;r=w+64|0;do{a[w>>0]=0;w=w+1|0}while((w|0)<(r|0));S=1<<j;y=(l|0)==0;w=c[(y?p+288|0:p+292|0)>>2]|0;r=S<<j;ce(q|0,0,r<<1|0)|0;z=p+31256|0;if(!(a[z>>0]|0)){A=a[p+272>>0]|0;C=f+204|0;Ma=c[C>>2]|0;if((a[Ma+21>>0]|0)!=0?(d[Ma+1629>>0]|0)>=(j|0):0){F=c[t>>2]|0;F=_a(F+224|0,F+(T&1|46)|0)|0}else F=0;if(y){B=c[n>>2]|0;G=B;B=(c[B+13192>>2]|0)+A|0}else{B=c[C>>2]|0;if((l|0)==1)B=(c[f+2072>>2]|0)+(c[B+28>>2]|0)+(a[p+302>>0]|0)|0;else B=(c[f+2076>>2]|0)+(c[B+32>>2]|0)+(a[p+303>>0]|0)|0;E=B+A|0;G=c[n>>2]|0;A=c[G+13192>>2]|0;B=0-A|0;if((E|0)>=(B|0))B=(E|0)>57?57:E;do if((c[G+4>>2]|0)==1){if((B|0)>=30)if((B|0)>43){B=B+ -6|0;break}else{B=c[176+(B+ -30<<2)>>2]|0;break}}else B=(B|0)>51?51:B;while(0);B=A+B|0}A=(c[G+52>>2]|0)+j|0;E=A+ -5|0;A=1<<A+ -6;B=d[168+(d[232+B>>0]|0)>>0]<<d[312+B>>0];if((a[G+634>>0]|0)!=0?!((F|0)!=0&(j|0)>2):0){H=c[C>>2]|0;H=(a[H+68>>0]|0)==0?G+635|0:H+69|0;G=((c[p+31244>>2]|0)!=1?3:0)+l|0;C=H+((j+ -2|0)*384|0)+(G<<6)|0;if((j|0)>3)ia=a[H+((j+ -4|0)*6|0)+G+1536>>0]|0;else ia=16}else{ia=16;C=0}}else{A=0;ia=0;B=0;C=0;E=0;F=0}J=(j<<1)+ -1|0;if(y){G=(j*3|0)+ -6+(j+ -1>>2)|0;I=j+1>>2}else{G=15;I=j+ -2|0}if((J|0)>0){K=G+52|0;H=0;while(1){Ma=c[t>>2]|0;L=H+1|0;if(!(_a(Ma+224|0,Ma+(K+(H>>I))|0)|0))break;if((L|0)<(J|0))H=L;else{H=L;break}}L=G+70|0;G=0;while(1){Ma=c[t>>2]|0;K=G+1|0;if(!(_a(Ma+224|0,Ma+(L+(G>>I))|0)|0))break;if((K|0)<(J|0))G=K;else{G=K;break}}if((H|0)>3){I=(H>>1)+ -1|0;K=ab((c[t>>2]|0)+224|0)|0;if((I|0)>1){J=1;do{K=ab((c[t>>2]|0)+224|0)|0|K<<1;J=J+1|0}while((J|0)!=(I|0))}H=K+((H&1|2)<<I)|0}if((G|0)>3){J=(G>>1)+ -1|0;K=ab((c[t>>2]|0)+224|0)|0;if((J|0)>1){I=1;do{K=ab((c[t>>2]|0)+224|0)|0|K<<1;I=I+1|0}while((I|0)!=(J|0))}I=H;L=K+((G&1|2)<<J)|0}else{I=H;L=G}}else{I=0;L=0}do if((k|0)!=2){M=I>>2;N=L>>2;if((k|0)==1){G=I;H=L;P=d[536+(L<<3)+I>>0]|0;I=488;L=504;J=496;K=520;break}else if(k){J=I;K=L;x=49;break}P=d[(I&3)+(392+((L&3)<<2))>>0]|0;if((S|0)==8){G=I;H=L;P=(d[416+(N<<1)+M>>0]<<4)+P|0;I=496;L=8;J=488;K=24;break}else if((S|0)==16){G=I;H=L;P=(d[392+(N<<2)+M>>0]<<4)+P|0;I=8;L=8;J=24;K=24;break}else if((S|0)==4){G=I;H=L;I=408;L=8;J=408;K=24;break}else{G=I;H=L;P=(d[424+(N<<3)+M>>0]<<4)+P|0;I=40;L=8;J=104;K=24;break}}else{J=L;K=I;M=L>>2;N=I>>2;x=49}while(0);if((x|0)==49){G=J;H=K;P=d[536+(J<<3)+K>>0]|0;I=496;L=520;J=488;K=504}O=P+1|0;Q=P>>4;if((Q|0)>-1){P=(1<<j+ -2)+ -1|0;l=(l|0)>0;R=l?90:88;S=S+ -1>>2;W=T?27:0;U=(j|0)==2;T=W+3|0;V=(j|0)==3;Z=(k|0)==0?9:15;_=y?0:27;da=(F|0)==0;aa=y?42:43;fa=y?40:41;ba=y?2:0;ea=p+31244|0;ca=w&-17;ga=f+204|0;Y=((B|0)<0)<<31>>31;X=((A|0)<0)<<31>>31;ha=(F|0)!=0&(j|0)>2;k=(j|0)<4;ia=ia&255;ma=(y&1)<<1;ja=ma|1;ra=1;ka=Q;oa=0;xa=16;while(1){na=ka<<4;wa=a[I+ka>>0]|0;ta=wa&255;va=a[J+ka>>0]|0;ua=va&255;la=(ka|0)>0;if((ka|0)<(Q|0)&la){if((ta|0)<(P|0))pa=d[v+(ta+1<<3)+ua>>0]|0;else pa=0;if((ua|0)<(P|0))pa=(d[ua+1+(v+(ta<<3))>>0]|0)+pa|0;ya=c[t>>2]|0;ya=(_a(ya+224|0,ya+(((pa|0)>1?1:pa)+R)|0)|0)&255;a[v+(ta<<3)+ua>>0]=ya;qa=1}else{if(!((ta|0)==(M|0)&(ua|0)==(N|0)))if(!(wa<<24>>24))ya=va<<24>>24==0&1;else ya=0;else ya=1;a[v+(ta<<3)+ua>>0]=ya;qa=0}na=O-na|0;pa=(ka|0)==(Q|0);if(pa){a[u>>0]=na+255;sa=na+ -2|0;na=1}else{sa=15;na=0}if((ta|0)<(S|0))za=(a[v+(ta+1<<3)+ua>>0]|0)!=0&1;else za=0;if((ua|0)<(S|0))za=((a[ua+1+(v+(ta<<3))>>0]|0)!=0&1)<<1|za;do if(ya<<24>>24!=0&(sa|0)>-1){if(!(c[(c[n>>2]|0)+13100>>2]|0))if(U){wa=600;va=W}else x=73;else if(da){ya=(a[z>>0]|0)!=0;if(ya|U){wa=ya?664:600;va=ya?fa:W}else x=73}else{wa=664;va=fa}do if((x|0)==73){x=0;ya=(za<<4)+616|0;if(!y){wa=ya;va=W+(V?9:12)|0;break}va=(va|wa)<<24>>24==0?W:T;if(V){wa=ya;va=va+Z|0;break}else{wa=ya;va=va+21|0;break}}while(0);if((sa|0)>0){ya=va+92|0;do{Ma=c[t>>2]|0;if(_a(Ma+224|0,Ma+(ya+(d[wa+((d[K+sa>>0]<<2)+(d[L+sa>>0]|0))>>0]|0))|0)|0){a[u+(na&255)>>0]=sa;qa=0;na=na+1<<24>>24}sa=sa+ -1|0}while((sa|0)>0)}if(qa){a[u+(na&255)>>0]=0;qa=na+1<<24>>24;break}if(c[(c[n>>2]|0)+13100>>2]|0)if(da?(a[z>>0]|0)==0:0)x=87;else qa=aa;else x=87;if((x|0)==87){x=0;qa=(ka|0)==0?_:va+2|0}Ma=c[t>>2]|0;if((_a(Ma+224|0,Ma+(qa+92)|0)|0)==1){a[u+(na&255)>>0]=0;qa=na+1<<24>>24}else qa=na}else qa=na;while(0);na=qa&255;a:do if(qa<<24>>24){qa=la?ba:0;if(!(c[(c[n>>2]|0)+13116>>2]|0))Ea=0;else{if(da?(a[z>>0]|0)==0:0)oa=ma;else oa=ja;Ea=(d[p+oa+199>>0]|0)>>>2}sa=qa|(ra|0)==0&(pa^1)&1;Ba=a[u>>0]|0;va=Ba&255;qa=na>>>0>8?8:na;if(!qa){pa=-1;ra=1}else{ya=sa<<2;pa=-1;ra=1;wa=0;do{Ma=ra+ya|0;La=c[t>>2]|0;Ma=(_a(La+224|0,La+((l?Ma+16|0:Ma)+136)|0)|0)&255;a[s+wa>>0]=Ma;if(!(Ma<<24>>24))ra=((ra+ -1|0)>>>0<2&1)+ra|0;else{pa=(pa|0)==-1?wa:pa;ra=0}wa=wa+1|0}while((wa|0)<(qa|0))}wa=na+ -1|0;qa=a[u+wa>>0]|0;ya=qa&255;do if(!(a[z>>0]|0)){if((c[ea>>2]|0)==1?!((c[(c[n>>2]|0)+13104>>2]|0)==0|da|(ca|0)!=10):0){va=0;break}va=(va-ya|0)>3&1}else va=0;while(0);if((pa|0)!=-1){La=c[t>>2]|0;La=_a(La+224|0,La+((l?sa|4:sa)|160)|0)|0;Ma=s+pa|0;a[Ma>>0]=(d[Ma>>0]|0)+La}sa=(va|0)==0;if((a[(c[ga>>2]|0)+4>>0]|0)==0|sa){wa=0;va=0;do{va=ab((c[t>>2]|0)+224|0)|0|va<<1;wa=wa+1|0}while((wa|0)<(na|0));za=va<<16-na}else{va=wa&255;if(!((wa&255)<<24>>24))ya=0;else{wa=0;ya=0;do{ya=ab((c[t>>2]|0)+224|0)|0|ya<<1;wa=wa+1|0}while((wa|0)<(va|0))}za=ya<<17-na}ta=ta<<2;va=ua<<2;ua=p+oa+199|0;wa=0;Ga=0;Aa=xa;Da=0;while(1){xa=Ba&255;ya=(d[L+xa>>0]|0)+ta|0;xa=(d[K+xa>>0]|0)+va|0;b:do if((wa|0)<8){Ha=(d[s+wa>>0]|0)+1|0;Ma=(wa|0)==(pa|0);if((Ha|0)==((Ma?3:2)|0)&0==((Ma?0:0)|0))Ia=0;else{Ia=0;break}while(1){Ja=Ia+1|0;if(!(ab((c[t>>2]|0)+224|0)|0)){x=120;break}if((Ja|0)<31)Ia=Ja;else{x=124;break}}do if((x|0)==120){x=0;if((Ia|0)>=3){Ja=Ia;x=124;break}if((Ea|0)>0){Ja=0;La=0;do{La=ab((c[t>>2]|0)+224|0)|0|La<<1;Ja=Ja+1|0}while((Ja|0)!=(Ea|0))}else La=0;Ja=La+(Ia<<Ea)|0}while(0);if((x|0)==124){x=0;Ia=Ja+ -3|0;if((Ia+Ea|0)>0){La=Ea+ -3+Ja|0;Ja=0;Ma=0;do{Ma=ab((c[t>>2]|0)+224|0)|0|Ma<<1;Ja=Ja+1|0}while((Ja|0)!=(La|0))}else Ma=0;Ja=Ma+((1<<Ia)+2<<Ea)|0}Ha=ae(Ja|0,((Ja|0)<0)<<31>>31|0,Ha|0,0)|0;Ia=D;La=3<<Ea;Na=((La|0)<0)<<31>>31;Ma=c[(c[n>>2]|0)+13116>>2]|0;do if((Ia|0)>(Na|0)|(Ia|0)==(Na|0)&Ha>>>0>La>>>0){La=Ea+1|0;if(Ma){Ea=La;break}Ea=(Ea|0)>3?4:La;break b}while(0);if(!((Ma|0)!=0&(Ga|0)==0))break;Ga=a[ua>>0]|0;La=(Ga&255)>>>2;if((Ja|0)>=(3<<La|0)){a[ua>>0]=Ga+1<<24>>24;Ga=1;break}if((Ja<<1|0)>=(1<<La|0)|Ga<<24>>24==0){Ga=1;break}a[ua>>0]=Ga+ -1<<24>>24;Ga=1}else{Ha=0;while(1){Ia=Ha+1|0;if(!(ab((c[t>>2]|0)+224|0)|0)){x=138;break}if((Ia|0)<31)Ha=Ia;else{x=142;break}}do if((x|0)==138){x=0;if((Ha|0)>=3){Ia=Ha;x=142;break}if((Ea|0)>0){Ia=0;Ja=0;do{Ja=ab((c[t>>2]|0)+224|0)|0|Ja<<1;Ia=Ia+1|0}while((Ia|0)!=(Ea|0))}else Ja=0;Ja=Ja+(Ha<<Ea)|0}while(0);if((x|0)==142){x=0;Ha=Ia+ -3|0;if((Ha+Ea|0)>0){Ja=Ea+ -3+Ia|0;Ia=0;La=0;do{La=ab((c[t>>2]|0)+224|0)|0|La<<1;Ia=Ia+1|0}while((Ia|0)!=(Ja|0))}else La=0;Ja=La+((1<<Ha)+2<<Ea)|0}Ha=Ja+1|0;Ia=((Ha|0)<0)<<31>>31;Ma=c[(c[n>>2]|0)+13116>>2]|0;do if((Ja|0)>=(3<<Ea|0)){La=Ea+1|0;if(Ma){Ea=La;break}Ea=(Ea|0)>3?4:La;break b}while(0);if(!((Ma|0)!=0&(Ga|0)==0))break;La=a[ua>>0]|0;Ga=(La&255)>>>2;if((Ja|0)>=(3<<Ga|0)){a[ua>>0]=La+1<<24>>24;Ga=1;break}if((Ja<<1|0)>=(1<<Ga|0)|La<<24>>24==0){Ga=1;break}a[ua>>0]=La+ -1<<24>>24;Ga=1}while(0);do if(!((a[(c[ga>>2]|0)+4>>0]|0)==0|sa)){Da=ae(Ha|0,Ia|0,Da|0,0)|0;if(Ba<<24>>24!=qa<<24>>24)break;Na=(Da&1|0)==0;Ma=$d(0,0,Ha|0,Ia|0)|0;Ha=Na?Ha:Ma;Ia=Na?Ia:D}while(0);Na=(za&32768|0)==0;Ba=$d(0,0,Ha|0,Ia|0)|0;Ba=Na?Ha:Ba;Ha=Na?Ia:D;za=za<<1&131070;Ia=Ba&65535;do if(!(a[z>>0]|0)){do if(!((a[(c[n>>2]|0)+634>>0]|0)==0|ha)){if(!((xa|ya|0)!=0|k)){Aa=ia;break}if((j|0)==3)Aa=(xa<<3)+ya|0;else if((j|0)==4)Aa=(xa>>>1<<3)+(ya>>>1)|0;else if((j|0)==5)Aa=(xa>>>2<<3)+(ya>>>2)|0;else Aa=(xa<<2)+ya|0;Aa=d[C+Aa>>0]|0}while(0);Ba=ke(Ba|0,Ha|0,B|0,Y|0)|0;Ba=ke(Ba|0,D|0,Aa|0,((Aa|0)<0)<<31>>31|0)|0;Ba=ae(Ba|0,D|0,A|0,X|0)|0;Ba=_d(Ba|0,D|0,E|0)|0;Ha=D;if((Ha|0)<0){Ia=(Ba&-32768|0)==-32768&(Ha&268435455|0)==268435455?Ba&65535:-32768;break}else{Ia=Ha>>>0>0|(Ha|0)==0&Ba>>>0>32767?32767:Ba&65535;break}}while(0);b[q+((xa<<j)+ya<<1)>>1]=Ia;wa=wa+1|0;if((wa|0)>=(na|0)){xa=Aa;break a}Ba=a[u+wa>>0]|0}}while(0);if(la)ka=ka+ -1|0;else break}}do if(a[z>>0]|0){if((c[(c[n>>2]|0)+13104>>2]|0)!=0?(w&-17|0)==10:0)Fa[c[f+2632>>2]&7](q,j&65535,(w|0)==26&1)}else{if(F){s=c[n>>2]|0;if((c[s+13096>>2]|0)!=0&(j|0)==2?(c[p+31244>>2]|0)==1:0){t=0;do{La=q+(15-t<<1)|0;Ma=b[La>>1]|0;Na=q+(t<<1)|0;b[La>>1]=b[Na>>1]|0;b[Na>>1]=Ma;t=t+1|0}while((t|0)!=8)}t=j&65535;Fa[c[f+2628>>2]&7](q,t,c[s+52>>2]|0);if(!(c[(c[n>>2]|0)+13104>>2]|0))break;if((c[p+31244>>2]|0)!=1)break;if((w&-17|0)!=10)break;Fa[c[f+2632>>2]&7](q,t,(w|0)==26&1);break}if(y&(c[p+31244>>2]|0)==1&(j|0)==2){Ca[c[f+2636>>2]&7](q,c[(c[n>>2]|0)+52>>2]|0);break}s=(G|0)>(H|0)?G:H;if(!s){Ca[c[f+(j+ -2<<2)+2656>>2]&7](q,c[(c[n>>2]|0)+52>>2]|0);break}t=H+4+G|0;do if((s|0)>=4){if((s|0)<8){t=(t|0)<8?t:8;break}if((s|0)<12)t=(t|0)<24?t:24}else t=(t|0)<4?t:4;while(0);Fa[c[f+(j+ -2<<2)+2640>>2]&7](q,t,c[(c[n>>2]|0)+52>>2]|0)}while(0);if(!(a[p+304>>0]|0)){Ma=j+ -2|0;Ma=f+(Ma<<2)+2612|0;Ma=c[Ma>>2]|0;Na=c[n>>2]|0;Na=Na+52|0;Na=c[Na>>2]|0;Ka[Ma&7](g,q,m,Na);i=o;return}if((r|0)<=0){Ma=j+ -2|0;Ma=f+(Ma<<2)+2612|0;Ma=c[Ma>>2]|0;Na=c[n>>2]|0;Na=Na+52|0;Na=c[Na>>2]|0;Ka[Ma&7](g,q,m,Na);i=o;return}p=c[p+284>>2]|0;s=0;do{Na=q+(s<<1)|0;b[Na>>1]=(($(b[h+(s<<1)>>1]|0,p)|0)>>>3)+(e[Na>>1]|0);s=s+1|0}while((s|0)!=(r|0));Ma=j+ -2|0;Ma=f+(Ma<<2)+2612|0;Ma=c[Ma>>2]|0;Na=c[n>>2]|0;Na=Na+52|0;Na=c[Na>>2]|0;Ka[Ma&7](g,q,m,Na);i=o;return}function yb(a){a=a|0;var b=0,e=0,f=0;b=i;f=a+16|0;e=c[f>>2]|0;c[a>>2]=(c[a>>2]|0)+ -65535+((d[e+1>>0]|0)<<1|(d[e>>0]|0)<<9);if(e>>>0>=(c[a+20>>2]|0)>>>0){i=b;return}c[f>>2]=e+2;i=b;return}function zb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;f=i;h=b+136|0;n=c[h>>2]|0;g=b+200|0;j=c[g>>2]|0;m=c[j+13080>>2]|0;q=(1<<m)+ -1|0;m=-1<<m-(c[(c[b+204>>2]|0)+24>>2]|0);o=m&d;p=m&e;k=c[j+13140>>2]|0;j=c[j+13064>>2]|0;l=o>>j;j=p>>j;if(!(q&d))o=0;else o=(o&q|0)!=0;if(!(q&e))p=0;else p=(p&q|0)!=0;q=n+203|0;if((a[q>>0]|0)==0?(m&(e|d)|0)!=0:0)d=c[n+276>>2]|0;else{a[q>>0]=(a[n+300>>0]|0)==0&1;d=a[b+2112>>0]|0}if(o){e=l+ -1+($(j,k)|0)|0;e=a[(c[b+4316>>2]|0)+e>>0]|0}else e=d;if(p){d=($(j+ -1|0,k)|0)+l|0;d=a[(c[b+4316>>2]|0)+d>>0]|0}b=e+1+d>>1;h=c[h>>2]|0;j=c[h+280>>2]|0;if(!j){a[h+272>>0]=b;i=f;return}g=c[(c[g>>2]|0)+13192>>2]|0;b=j+52+b+(g<<1)|0;if((b|0)>0)j=b;else j=-52-g+1+b|0;a[h+272>>0]=b-g-j+((j|0)%(g+52|0)|0);i=f;return}function Ab(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0;g=i;j=c[b+136>>2]|0;k=b+200|0;do if((e|0)>0&(e&7|0)==0){if(((a[b+2062>>0]|0)==0?(c[j+31312>>2]&4|0)!=0:0)?((e|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0)break;if(((a[(c[b+204>>2]|0)+53>>0]|0)==0?(c[j+31312>>2]&8|0)!=0:0)?((e|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0)break;h=1<<f;if((h|0)>0){l=b+2596|0;m=b+4320|0;n=0;do{o=n+d+($(c[l>>2]|0,e)|0)>>2;a[(c[m>>2]|0)+o>>0]=2;n=n+4|0}while((n|0)<(h|0))}}while(0);if(!((d|0)>0&(d&7|0)==0)){i=g;return}if(((a[b+2062>>0]|0)==0?(c[j+31312>>2]&1|0)!=0:0)?((d|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0){i=g;return}if(((a[(c[b+204>>2]|0)+53>>0]|0)==0?(c[j+31312>>2]&2|0)!=0:0)?((d|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0){i=g;return}h=1<<f;if((h|0)<=0){i=g;return}j=b+2596|0;b=b+4324|0;k=0;do{o=($(c[j>>2]|0,k+e|0)|0)+d>>2;a[(c[b>>2]|0)+o>>0]=2;k=k+4|0}while((k|0)<(h|0));i=g;return}function Bb(e,f,g,h){e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0;j=i;i=i+32|0;o=j+8|0;x=j;n=j+18|0;s=j+16|0;l=e+200|0;K=c[l>>2]|0;v=c[K+13120>>2]|0;k=(v-h|0)<=(f|0);b[n>>1]=0;b[s>>1]=0;w=c[K+13080>>2]|0;u=1<<w;w=($(g>>w,c[K+13128>>2]|0)|0)+(f>>w)|0;t=c[e+2508>>2]|0;m=c[t+(w<<3)+4>>2]|0;y=c[t+(w<<3)>>2]|0;if((c[K+68>>2]|0)!=0?(a[K+13056>>0]|0)!=0:0)p=1;else p=(a[(c[e+204>>2]|0)+40>>0]|0)!=0;r=c[K+52>>2]|0;q=(f|0)!=0;if(q){w=w+ -1|0;z=c[t+(w<<3)>>2]|0;w=c[t+(w<<3)+4>>2]|0}else{z=0;w=0}t=u+f|0;t=(t|0)>(v|0)?v:t;u=u+g|0;A=c[K+13124>>2]|0;u=(u|0)>(A|0)?A:u;A=(t|0)==(v|0)?t:t+ -8|0;v=(u|0)>(g|0);if(v){K=q?f:8;N=(K|0)<(t|0);Q=q?f+ -8|0:0;F=e+2596|0;P=e+4320|0;I=e+4316|0;C=x+4|0;D=e+160|0;E=n+1|0;R=s+1|0;L=e+4300|0;M=e+4284|0;J=e+4324|0;G=e+4304|0;H=e+4288|0;B=(Q|0)>=(A|0);U=y;T=m;O=g;do{if(N){W=O+4|0;X=T+ -2&-2;V=K;do{_=c[F>>2]|0;da=($(_,O)|0)+V>>2;aa=c[J>>2]|0;da=a[aa+da>>0]|0;ca=da&255;_=a[aa+(($(_,W)|0)+V>>2)>>0]|0;aa=_&255;da=da<<24>>24!=0;_=_<<24>>24==0;do if(!(_&(da^1))){S=V+ -1|0;Y=c[l>>2]|0;ba=c[Y+13064>>2]|0;Z=$(O>>ba,c[Y+13140>>2]|0)|0;ea=c[I>>2]|0;ba=(a[ea+(Z+(S>>ba))>>0]|0)+1+(a[ea+(Z+(V>>ba))>>0]|0)>>1;Z=ba+U|0;if((Z|0)<0)Z=0;else Z=(Z|0)>51?51:Z;Z=d[1280+Z>>0]|0;if(da){ca=(ca<<1)+X+ba|0;if((ca|0)<0)ca=0;else ca=(ca|0)>53?53:ca;ca=d[1336+ca>>0]|0}else ca=0;c[x>>2]=ca;if(_)_=0;else{_=(aa<<1)+X+ba|0;if((_|0)<0)_=0;else _=(_|0)>53?53:_;_=d[1336+_>>0]|0}c[C>>2]=_;da=c[D>>2]|0;_=c[da+32>>2]|0;ea=$(_,O)|0;Y=(c[da>>2]|0)+((V<<c[Y+56>>2])+ea)|0;if(p){a[n>>0]=Gb(e,S,O)|0;a[E>>0]=Gb(e,S,W)|0;a[s>>0]=Gb(e,V,O)|0;a[R>>0]=Gb(e,V,W)|0;za[c[G>>2]&7](Y,_,Z,x,n,s,r);break}else{za[c[H>>2]&7](Y,_,Z,x,n,s,r);break}}while(0);V=V+8|0}while((V|0)<(t|0))}if(!((O|0)==0|B)){S=O+ -1|0;W=T;T=Q;do{Z=$(c[F>>2]|0,O)|0;_=c[P>>2]|0;ba=a[_+(Z+T>>2)>>0]|0;ca=ba&255;V=T+4|0;Z=a[_+(Z+V>>2)>>0]|0;_=Z&255;ba=ba<<24>>24!=0;Z=Z<<24>>24==0;do if(!(Z&(ba^1))){X=c[l>>2]|0;W=c[X+13064>>2]|0;aa=T>>W;U=c[X+13140>>2]|0;ea=($(S>>W,U)|0)+aa|0;Y=c[I>>2]|0;aa=(a[Y+ea>>0]|0)+1+(a[Y+(($(O>>W,U)|0)+aa)>>0]|0)>>1;U=(T|0)>=(f|0);W=U?m:w;U=U?y:z;Y=aa+U|0;if((Y|0)<0)Y=0;else Y=(Y|0)>51?51:Y;Y=d[1280+Y>>0]|0;if(ba){ba=(ca<<1)+(W+ -2&-2)+aa|0;if((ba|0)<0)ba=0;else ba=(ba|0)>53?53:ba;ba=d[1336+ba>>0]|0}else ba=0;c[x>>2]=ba;if(Z)Z=0;else{Z=(_<<1)+(W+ -2&-2)+aa|0;if((Z|0)<0)Z=0;else Z=(Z|0)>53?53:Z;Z=d[1336+Z>>0]|0}c[C>>2]=Z;da=c[D>>2]|0;Z=c[da+32>>2]|0;ea=$(Z,O)|0;X=(c[da>>2]|0)+((T<<c[X+56>>2])+ea)|0;if(p){a[n>>0]=Gb(e,T,S)|0;a[E>>0]=Gb(e,V,S)|0;a[s>>0]=Gb(e,T,O)|0;a[R>>0]=Gb(e,V,O)|0;za[c[L>>2]&7](X,Z,Y,x,n,s,r);break}else{za[c[M>>2]&7](X,Z,Y,x,n,s,r);break}}while(0);T=T+8|0}while((T|0)<(A|0));T=W}O=O+8|0}while((O|0)<(u|0));K=c[l>>2]|0}else T=m;if(c[K+4>>2]|0){D=q?w:m;G=e+2596|0;F=e+4320|0;w=e+4316|0;y=o+4|0;x=e+160|0;C=n+1|0;B=s+1|0;E=e+4308|0;H=e+4292|0;I=e+4324|0;A=e+4312|0;z=e+4296|0;J=1;do{P=1<<c[K+(J<<2)+13168>>2];Q=1<<c[K+(J<<2)+13180>>2];if(v){O=P<<3;M=q?f:O;L=(M|0)<(t|0);K=Q<<3;N=q?f-O|0:0;P=P<<2;Q=Q<<2;R=g;do{if(L){S=R+Q|0;U=M;do{X=c[G>>2]|0;Z=($(X,R)|0)+U>>2;ea=c[I>>2]|0;Z=(a[ea+Z>>0]|0)==2;X=(a[ea+(($(X,S)|0)+U>>2)>>0]|0)==2;do if(Z|X){V=U+ -1|0;W=c[l>>2]|0;ea=c[W+13064>>2]|0;_=V>>ea;Y=c[W+13140>>2]|0;ba=$(R>>ea,Y)|0;aa=c[w>>2]|0;ca=U>>ea;Y=$(S>>ea,Y)|0;Y=(a[aa+(Y+_)>>0]|0)+1+(a[aa+(Y+ca)>>0]|0)>>1;if(Z)Z=Hb(e,(a[aa+(ba+ca)>>0]|0)+1+(a[aa+(ba+_)>>0]|0)>>1,J,T)|0;else Z=0;c[o>>2]=Z;if(X)X=Hb(e,Y,J,T)|0;else X=0;c[y>>2]=X;da=c[x>>2]|0;X=c[da+(J<<2)+32>>2]|0;ea=$(X,R>>c[W+(J<<2)+13180>>2])|0;W=(c[da+(J<<2)>>2]|0)+((U>>c[W+(J<<2)+13168>>2]<<c[W+56>>2])+ea)|0;if(p){a[n>>0]=Gb(e,V,R)|0;a[C>>0]=Gb(e,V,S)|0;a[s>>0]=Gb(e,U,R)|0;a[B>>0]=Gb(e,U,S)|0;Ha[c[A>>2]&3](W,X,o,n,s,r);break}else{Ha[c[z>>2]&3](W,X,o,n,s,r);break}}while(0);U=U+O|0}while((U|0)<(t|0))}if(R){V=t-((t|0)==(c[(c[l>>2]|0)+13120>>2]|0)?0:O)|0;if((N|0)<(V|0)){U=R+ -1|0;T=N;do{X=$(c[G>>2]|0,R)|0;ea=c[F>>2]|0;S=T+P|0;Y=(a[ea+(X+T>>2)>>0]|0)==2;X=(a[ea+(X+S>>2)>>0]|0)==2;do if(Y|X){if(Y){ea=c[l>>2]|0;da=c[ea+13064>>2]|0;Z=T>>da;ea=c[ea+13140>>2]|0;ba=($(U>>da,ea)|0)+Z|0;ca=c[w>>2]|0;Z=(a[ca+ba>>0]|0)+1+(a[ca+(($(R>>da,ea)|0)+Z)>>0]|0)>>1}else Z=0;if(X){ea=c[l>>2]|0;da=c[ea+13064>>2]|0;W=S>>da;ea=c[ea+13140>>2]|0;ba=($(U>>da,ea)|0)+W|0;ca=c[w>>2]|0;W=(a[ca+ba>>0]|0)+1+(a[ca+(($(R>>da,ea)|0)+W)>>0]|0)>>1}else W=0;if(Y)Y=Hb(e,Z,J,D)|0;else Y=0;c[o>>2]=Y;if(X)W=Hb(e,W,J,m)|0;else W=0;c[y>>2]=W;ea=c[l>>2]|0;da=c[x>>2]|0;X=c[da+(J<<2)+32>>2]|0;W=$(X,R>>c[ea+13184>>2])|0;W=(c[da+(J<<2)>>2]|0)+((T>>c[ea+13172>>2]<<c[ea+56>>2])+W)|0;if(p){a[n>>0]=Gb(e,T,U)|0;a[C>>0]=Gb(e,S,U)|0;a[s>>0]=Gb(e,T,R)|0;a[B>>0]=Gb(e,S,R)|0;Ha[c[E>>2]&3](W,X,o,n,s,r);break}else{Ha[c[H>>2]&3](W,X,o,n,s,r);break}}while(0);T=T+O|0}while((T|0)<(V|0));T=D}else T=D}R=R+K|0}while((R|0)<(u|0))}J=J+1|0;K=c[l>>2]|0}while((J|0)!=3)}if(!(a[K+12941>>0]|0)){if((a[e+140>>0]&1)==0|k^1){i=j;return}i=j;return}n=(c[K+13124>>2]|0)-h|0;l=(g|0)==0;m=(f|0)==0;if(!(l|m))Cb(e,f-h|0,g-h|0);n=(n|0)>(g|0);if(!(m|n))Cb(e,f-h|0,g);k=k^1;!(l|k)?(Cb(e,f,g-h|0),(a[e+140>>0]&1)!=0):0;if(n|k){i=j;return}Cb(e,f,g);if(!(a[e+140>>0]&1)){i=j;return}i=j;return}function Cb(e,f,g){e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0;h=i;i=i+48|0;l=h+24|0;r=h+42|0;s=h+40|0;p=h+16|0;k=h+8|0;t=h;o=e+200|0;S=c[o>>2]|0;y=c[S+13080>>2]|0;j=f>>y;y=g>>y;G=S+13128|0;n=($(y,c[G>>2]|0)|0)+j|0;M=c[e+204>>2]|0;L=M+1668|0;N=c[(c[L>>2]|0)+(n<<2)>>2]|0;A=e+2504|0;m=c[A>>2]|0;q=m+(n*148|0)|0;b[r>>1]=0;b[s>>1]=0;c[p>>2]=0;F=($(c[G>>2]|0,y)|0)+j|0;F=a[(c[e+4352>>2]|0)+F>>0]|0;if((a[M+42>>0]|0)!=0?(a[M+53>>0]|0)==0:0){R=1;O=1}else{R=F<<24>>24==0&1;O=0}D=(j|0)==0;c[l>>2]=D&1;I=(y|0)==0;u=l+4|0;c[u>>2]=I&1;H=(j|0)==((c[G>>2]|0)+ -1|0);z=l+8|0;c[z>>2]=H&1;E=(y|0)==((c[S+13132>>2]|0)+ -1|0);x=l+12|0;c[x>>2]=E&1;if(R<<24>>24){if(D)J=0;else{if(O){J=c[M+1676>>2]|0;J=(c[J+(N<<2)>>2]|0)!=(c[J+(c[(c[L>>2]|0)+(n+ -1<<2)>>2]<<2)>>2]|0)&1}else J=0;if(F<<24>>24==0?(pa=$(c[G>>2]|0,y)|0,oa=c[e+4328>>2]|0,(c[oa+(pa+j<<2)>>2]|0)!=(c[oa+(j+ -1+pa<<2)>>2]|0)):0)K=1;else K=J;a[r>>0]=K}if(H)K=0;else{if(O){K=c[M+1676>>2]|0;K=(c[K+(N<<2)>>2]|0)!=(c[K+(c[(c[L>>2]|0)+(n+1<<2)>>2]<<2)>>2]|0)&1}else K=0;if(F<<24>>24==0?(pa=$(c[G>>2]|0,y)|0,oa=c[e+4328>>2]|0,(c[oa+(pa+j<<2)>>2]|0)!=(c[oa+(j+1+pa<<2)>>2]|0)):0)P=1;else P=K;a[r+1>>0]=P}if(I)P=0;else{if(O){P=c[M+1676>>2]|0;P=(c[P+(N<<2)>>2]|0)!=(c[P+(c[(c[L>>2]|0)+(n-(c[G>>2]|0)<<2)>>2]<<2)>>2]|0)&1}else P=0;if(F<<24>>24==0?(pa=c[G>>2]|0,na=($(pa,y)|0)+j|0,oa=c[e+4328>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,y+ -1|0)|0)+j<<2)>>2]|0)):0)Q=1;else Q=P;a[s>>0]=Q}if(E)L=0;else{if(O){pa=c[M+1676>>2]|0;L=(c[pa+(N<<2)>>2]|0)!=(c[pa+(c[(c[L>>2]|0)+((c[G>>2]|0)+n<<2)>>2]<<2)>>2]|0)&1}else L=0;if(F<<24>>24==0?(pa=c[G>>2]|0,na=($(pa,y)|0)+j|0,oa=c[e+4328>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,y+1|0)|0)+j<<2)>>2]|0)):0)M=1;else M=L;a[s+1>>0]=M}if(!D)if(I)B=47;else{if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+4328>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+ -1+($(pa,y+ -1|0)|0)<<2)>>2]|0):0)B=38;else M=1}else if(!(J<<24>>24))B=38;else M=1;if((B|0)==38)M=P<<24>>24!=0&1;a[p>>0]=M;B=40}else B=40;if((B|0)==40)if(!I){if(!H){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+4328>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+1+($(pa,y+ -1|0)|0)<<2)>>2]|0):0)B=45;else I=1}else if(!(K<<24>>24))B=45;else I=1;if((B|0)==45)I=P<<24>>24!=0&1;a[p+1>>0]=I;B=47}}else B=47;if((B|0)==47?!(H|E):0){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+4328>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+1+($(pa,y+1|0)|0)<<2)>>2]|0):0)B=51;else H=1}else if(!(K<<24>>24))B=51;else H=1;if((B|0)==51)H=L<<24>>24!=0&1;a[p+2>>0]=H}if(!(D|E)){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+4328>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+ -1+($(pa,y+1|0)|0)<<2)>>2]|0):0)B=57;else D=1}else if(!(J<<24>>24))B=57;else D=1;if((B|0)==57)D=L<<24>>24!=0&1;a[p+3>>0]=D}}N=(c[S+4>>2]|0)!=0?3:1;E=e+160|0;D=e+168|0;F=e+2672|0;P=y<<1;H=P+ -1|0;G=k+4|0;O=y+ -1|0;J=j+1|0;L=j+ -1|0;P=P+2|0;Q=t+4|0;M=y+1|0;I=j<<1;K=I+ -1|0;I=I+2|0;R=e+((R&255)<<2)+2676|0;na=S;_=0;while(1){ka=c[na+(_<<2)+13168>>2]|0;V=f>>ka;ha=c[na+(_<<2)+13180>>2]|0;aa=g>>ha;ba=c[E>>2]|0;W=c[ba+(_<<2)+32>>2]|0;S=1<<c[na+13080>>2];Z=S>>ka;Y=S>>ha;ka=c[na+13120>>2]>>ka;ca=ka-V|0;Z=(Z|0)>(ca|0)?ca:Z;ha=c[na+13124>>2]>>ha;ca=ha-aa|0;Y=(Y|0)>(ca|0)?ca:Y;ca=$(W,aa)|0;fa=c[na+56>>2]|0;ca=(V<<fa)+ca|0;ba=c[ba+(_<<2)>>2]|0;X=ba+ca|0;S=S+2<<fa;ea=c[D>>2]|0;ga=1<<fa;da=S+ga|0;U=ea+da|0;T=m+(n*148|0)+_+142|0;ia=d[T>>0]|0;if((ia|0)==2){ja=c[l>>2]|0;ia=c[z>>2]|0;la=c[x>>2]|0;do if(!(c[u>>2]|0)){pa=1-ja|0;oa=pa<<fa;ma=ga-oa|0;c[k>>2]=ba+(ca-W-oa);c[G>>2]=(c[e+(_<<2)+172>>2]|0)+(($(ka,H)|0)+V-pa<<fa);do if((ja|0)!=1){oa=ea+ma|0;pa=L+($(c[na+13128>>2]|0,O)|0)|0;pa=c[k+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0;if(!fa){a[oa>>0]=a[pa>>0]|0;na=c[o>>2]|0;oa=ga;break}else{b[oa>>1]=b[pa>>1]|0;oa=ga;break}}else oa=0;while(0);pa=($(c[na+13128>>2]|0,O)|0)+j|0;na=Z<<fa;fe(ea+(oa+ma)|0,(c[k+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+oa|0,na|0)|0;if((ia|0)!=1){pa=oa+na|0;oa=J+($(c[(c[o>>2]|0)+13128>>2]|0,O)|0)|0;na=ea+(pa+ma)|0;ma=(c[k+(((a[(c[A>>2]|0)+(oa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+pa|0;if(!fa){a[na>>0]=a[ma>>0]|0;break}else{b[na>>1]=b[ma>>1]|0;break}}}while(0);do if(!la){pa=1-ja|0;oa=pa<<fa;la=($(Y,S)|0)+da-oa|0;c[t>>2]=ba+(($(Y,W)|0)+ca-oa);c[Q>>2]=(c[e+(_<<2)+172>>2]|0)+(($(ka,P)|0)+V-pa<<fa);do if((ja|0)!=1){ka=ea+la|0;ma=L+($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)|0;ma=c[t+(((a[(c[A>>2]|0)+(ma*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0;if(!fa){a[ka>>0]=a[ma>>0]|0;ma=ga;break}else{b[ka>>1]=b[ma>>1]|0;ma=ga;break}}else ma=0;while(0);pa=($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)+j|0;ka=Z<<fa;fe(ea+(ma+la)|0,(c[t+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+ma|0,ka|0)|0;if((ia|0)!=1){pa=ma+ka|0;oa=J+($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)|0;ka=ea+(pa+la)|0;la=(c[t+(((a[(c[A>>2]|0)+(oa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+pa|0;if(!fa){a[ka>>0]=a[la>>0]|0;break}else{b[ka>>1]=b[la>>1]|0;break}}}while(0);do if(!ja){pa=L+($(c[(c[o>>2]|0)+13128>>2]|0,y)|0)|0;if((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3){la=ea+S|0;ja=(c[e+(_<<2)+184>>2]|0)+(($(ha,K)|0)+aa<<fa)|0;ka=(Y|0)>0;if(!fa){if(ka)ka=0;else{ja=0;break}while(1){a[la>>0]=a[ja>>0]|0;ka=ka+1|0;if((ka|0)==(Y|0)){ja=0;break}else{la=la+S|0;ja=ja+ga|0}}}else{if(ka)ka=0;else{ja=0;break}while(1){b[la>>1]=b[ja>>1]|0;ka=ka+1|0;if((ka|0)==(Y|0)){ja=0;break}else{la=la+S|0;ja=ja+ga|0}}}}else ja=1}else ja=0;while(0);do if(!ia){pa=J+($(c[(c[o>>2]|0)+13128>>2]|0,y)|0)|0;if((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3){ia=ea+((Z<<fa)+da)|0;ha=(c[e+(_<<2)+184>>2]|0)+(($(ha,I)|0)+aa<<fa)|0;ka=(Y|0)>0;if(!fa){if(ka)B=0;else break;while(1){a[ia>>0]=a[ha>>0]|0;B=B+1|0;if((B|0)==(Y|0)){C=0;B=96;break}else{ia=ia+S|0;ha=ha+ga|0}}}else{if(ka)B=0;else break;while(1){b[ia>>1]=b[ha>>1]|0;B=B+1|0;if((B|0)==(Y|0)){C=0;B=96;break}else{ia=ia+S|0;ha=ha+ga|0}}}}else{C=1;B=96}}else{C=0;B=96}while(0);if((B|0)==96?(B=0,v=ja<<fa,w=ja+Z+C<<fa,(Y|0)>0):0){da=ea+(da-v)|0;ea=0;ba=ba+(ca-v)|0;while(1){fe(da|0,ba|0,w|0)|0;ea=ea+1|0;if((ea|0)==(Y|0))break;else{da=da+S|0;ba=ba+W|0}}}Eb(e,X,W,V,aa,Z,Y,_,j,y);Aa[c[R>>2]&3](X,U,W,S,q,l,Z,Y,_,r,s,p,c[(c[o>>2]|0)+52>>2]|0);Fb(e,X,U,W,S,f,g,Z,Y,_);a[T>>0]=3}else if((ia|0)==1){ca=Z<<fa;if((Y|0)>0){ba=U;da=0;ea=X;while(1){fe(ba|0,ea|0,ca|0)|0;da=da+1|0;if((da|0)==(Y|0))break;else{ba=ba+S|0;ea=ea+W|0}}}Eb(e,X,W,V,aa,Z,Y,_,j,y);ya[c[F>>2]&1](X,U,W,S,q,l,Z,Y,_,c[(c[o>>2]|0)+52>>2]|0);Fb(e,X,U,W,S,f,g,Z,Y,_);a[T>>0]=3}_=_+1|0;if((_|0)>=(N|0))break;na=c[o>>2]|0}i=h;return}function Db(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;h=c[a+200>>2]|0;k=((c[h+13120>>2]|0)-e|0)>(b|0);h=((c[h+13124>>2]|0)-e|0)>(d|0);j=(d|0)==0;g=(b|0)==0;if(!(j|g))Bb(a,b-e|0,d-e|0,e);if(!(j|k))Bb(a,b,d-e|0,e);if(g|h){i=f;return}Bb(a,b-e|0,d,e);i=f;return}function Eb(d,e,f,g,h,j,k,l,m,n){d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;q=c[d+200>>2]|0;p=c[q+56>>2]|0;s=c[q+13120>>2]>>c[q+(l<<2)+13168>>2];q=c[q+13124>>2]>>c[q+(l<<2)+13180>>2];u=d+(l<<2)+172|0;t=n<<1;r=j<<p;fe((c[u>>2]|0)+(($(s,t)|0)+g<<p)|0,e|0,r|0)|0;fe((c[u>>2]|0)+(($(s,t|1)|0)+g<<p)|0,e+($(k+ -1|0,f)|0)|0,r|0)|0;d=d+(l<<2)+184|0;r=c[d>>2]|0;l=m<<1;t=r+(($(q,l)|0)+h<<p)|0;m=1<<p;n=(p|0)==0;g=(k|0)>0;if(n){if(g){r=t;t=0;s=e;while(1){a[r>>0]=a[s>>0]|0;t=t+1|0;if((t|0)==(k|0))break;else{r=r+m|0;s=s+f|0}}r=c[d>>2]|0}}else if(g){d=0;s=e;while(1){b[t>>1]=b[s>>1]|0;d=d+1|0;if((d|0)==(k|0))break;else{t=t+m|0;s=s+f|0}}}h=r+(($(q,l|1)|0)+h<<p)|0;j=e+(j+ -1<<p)|0;if(n){if(g)p=0;else{i=o;return}while(1){a[h>>0]=a[j>>0]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}else{if(g)p=0;else{i=o;return}while(1){b[h>>1]=b[j>>1]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}}function Fb(b,d,e,f,g,h,j,k,l,m){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0;n=i;t=c[b+200>>2]|0;if(!(a[(c[b+204>>2]|0)+40>>0]|0)){if(!(a[t+13056>>0]|0)){i=n;return}if(!(c[t+68>>2]|0)){i=n;return}}p=b+200|0;C=c[t+13084>>2]|0;v=1<<C;o=c[t+(m<<2)+13168>>2]|0;s=c[t+(m<<2)+13180>>2]|0;m=h>>C;z=j>>C;k=k+h>>C;l=l+j>>C;t=v>>o<<c[t+56>>2];if((z|0)>=(l|0)){i=n;return}u=(m|0)<(k|0);b=b+4348|0;v=v>>s;w=(v|0)>0;do{if(u){x=z-j|0;y=m;do{A=c[p>>2]|0;C=($(c[A+13156>>2]|0,z)|0)+y|0;if((a[(c[b>>2]|0)+C>>0]|0)!=0?(r=c[A+13084>>2]|0,q=x<<r>>s,r=y-h<<r>>o<<c[A+56>>2],w):0){C=e+(($(q,g)|0)+r)|0;A=0;B=d+(($(q,f)|0)+r)|0;while(1){fe(B|0,C|0,t|0)|0;A=A+1|0;if((A|0)==(v|0))break;else{C=C+g|0;B=B+f|0}}}y=y+1|0}while((y|0)!=(k|0))}z=z+1|0}while((z|0)!=(l|0));i=n;return}function Gb(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;f=i;g=c[a+200>>2]|0;h=c[g+13084>>2]|0;if((e|b|0)<0){e=2;i=f;return e|0}b=b>>h;e=e>>h;h=c[g+13156>>2]|0;if((b|0)>=(h|0)){e=2;i=f;return e|0}if((e|0)>=(c[g+13160>>2]|0)){e=2;i=f;return e|0}e=($(h,e)|0)+b|0;e=d[(c[a+4348>>2]|0)+e>>0]|0;i=f;return e|0}function Hb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0;h=i;j=c[b+204>>2]|0;e=(c[((f|0)==1?j+28|0:j+32|0)>>2]|0)+e|0;if((e|0)<0)e=0;else e=(e|0)>57?57:e;do if((c[(c[b+200>>2]|0)+4>>2]|0)==1){if((e|0)>=30)if((e|0)>43){e=e+ -6|0;break}else{e=d[1392+(e+ -30)>>0]|0;break}}else if((e|0)<0)e=0;else e=(e|0)>51?51:e;while(0);g=g+2+e|0;if((g|0)<0){j=0;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}j=(g|0)>53?53:g;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}function Ib(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;g=i;k=b+4376|0;c[k>>2]=0;a:do if((e|0)>1){m=0;while(1){if(!(a[d+m>>0]|0)){if((m|0)>0){l=m+ -1|0;l=(a[d+l>>0]|0)==0?l:m}else l=m;m=l+2|0;if(((m|0)<(e|0)?(a[d+(l+1)>>0]|0)==0:0)?(j=a[d+m>>0]|0,(j&255)<4):0)break}else l=m;m=l+2|0;if((l+3|0)>=(e|0))break a}m=l;e=j<<24>>24==3?e:l}else m=0;while(0);if((m|0)>=(e+ -1|0)){c[f+12>>2]=d;c[f+8>>2]=e;q=e;i=g;return q|0}nd(f,f+4|0,e+32|0);j=c[f>>2]|0;if(!j){q=-12;i=g;return q|0}fe(j|0,d|0,m|0)|0;o=m+2|0;b:do if((o|0)<(e|0)){l=b+4384|0;b=b+4380|0;n=m;c:while(1){p=d+o|0;q=a[p>>0]|0;do if((q&255)<=3){p=a[d+m>>0]|0;if(!(p<<24>>24))if(!(a[d+(m+1)>>0]|0)){if(q<<24>>24!=3){e=m;break b}o=n+1|0;a[j+n>>0]=0;n=n+2|0;a[j+o>>0]=0;m=m+3|0;q=(c[k>>2]|0)+1|0;c[k>>2]=q;p=c[l>>2]|0;if((p|0)<(q|0)){p=p<<1;c[l>>2]=p;ld(b,p,4)|0;p=c[b>>2]|0;if(!p){f=-12;break c}}else{p=c[b>>2]|0;if(!p)break}c[p+((c[k>>2]|0)+ -1<<2)>>2]=o}else{p=0;h=26}else h=26}else{a[j+n>>0]=a[d+m>>0]|0;a[j+(n+1)>>0]=a[d+(m+1)>>0]|0;p=a[p>>0]|0;n=n+2|0;m=o;h=26}while(0);if((h|0)==26){h=0;a[j+n>>0]=p;n=n+1|0;m=m+1|0}o=m+2|0;if((o|0)>=(e|0)){h=15;break b}}i=g;return f|0}else{n=m;h=15}while(0);if((h|0)==15)if((m|0)<(e|0)){h=e+n|0;k=m;while(1){a[j+n>>0]=a[d+k>>0]|0;k=k+1|0;if((k|0)==(e|0))break;else n=n+1|0}n=h-m|0}else e=m;h=j+n+0|0;d=h+32|0;do{a[h>>0]=0;h=h+1|0}while((h|0)<(d|0));c[f+12>>2]=j;c[f+8>>2]=n;q=e;i=g;return q|0}function Jb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0;e=i;f=b+60|0;d=c[f>>2]|0;Zc();ac();f=c[f>>2]|0;c[f+4>>2]=b;g=md(31328)|0;c[f+136>>2]=g;if((((g|0)!=0?(c[f+72>>2]=g,c[f+8>>2]=f,g=fd(199)|0,c[f+152>>2]=g,(g|0)!=0):0)?(g=wd()|0,c[f+164>>2]=g,(g|0)!=0):0)?(h=wd()|0,c[f+2524>>2]=h,(h|0)!=0):0){c[f+2528>>2]=h;c[f+2592>>2]=2147483647;a[f+4469>>0]=1;c[f+2584>>2]=0;c[d+4368>>2]=0;c[d+4520>>2]=0;f=b+808|0;if(!(c[f>>2]&2))a[d+141>>0]=1;else a[d+141>>0]=c[b+800>>2];if((c[f>>2]&1|0)!=0?(c[b+800>>2]|0)>1:0){a[d+140>>0]=1;h=0;i=e;return h|0}a[d+140>>0]=2;h=0;i=e;return h|0}Lb(b)|0;h=-12;i=e;return h|0}function Kb(f,g,h,j){f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ba=0,Ca=0,Ea=0,Fa=0,Ga=0,Ha=0,Ia=0,Ja=0,Ka=0,La=0,Ma=0,Na=0,Oa=0,Pa=0,Qa=0,Ra=0,Sa=0,Ta=0,Ua=0,Va=0,Wa=0,Xa=0,Ya=0,Za=0,_a=0,$a=0,ab=0;k=i;i=i+16|0;q=k+8|0;n=k;m=c[f+60>>2]|0;l=j+28|0;K=c[l>>2]|0;if(!K){g=$b(m,g,1)|0;if((g|0)<0){_a=g;i=k;return _a|0}c[h>>2]=g;_a=0;i=k;return _a|0}r=m+2520|0;c[r>>2]=0;f=m+4524|0;b[f>>1]=1;J=c[j+24>>2]|0;c[r>>2]=0;G=m+2584|0;A=m+2588|0;c[A>>2]=c[G>>2];c[G>>2]=0;w=m+4408|0;c[w>>2]=0;a:do if((K|0)>3){H=m+4470|0;F=m+4412|0;z=m+4404|0;v=m+4388|0;C=m+4396|0;x=m+4392|0;E=m+4384|0;y=m+4380|0;j=m+4376|0;u=m+136|0;t=m+2512|0;I=m+4480|0;while(1){B=(a[H>>0]|0)==0;if(B){while(1){L=J+1|0;if(((a[J>>0]|0)==0?(a[L>>0]|0)==0:0)?(a[J+2>>0]|0)==1:0)break;if((K|0)<5){p=-1094995529;o=180;break a}J=L;K=K+ -1|0}J=J+3|0;M=0;K=K+ -3|0}else{L=c[I>>2]|0;if((L|0)>0){N=0;M=0;do{N=d[J+M>>0]|N<<8;M=M+1|0}while((M|0)!=(L|0));M=N}else M=0;K=K-L|0;if((M|0)>(K|0)){p=-1094995529;o=180;break a}J=J+L|0}B=B?K:M;L=c[F>>2]|0;N=c[w>>2]|0;if((L|0)<(N+1|0)){L=L+1|0;M=kd(c[z>>2]|0,L,16)|0;if(!M){p=-12;o=180;break a}c[z>>2]=M;N=c[F>>2]|0;ce(M+(N<<4)|0,0,L-N<<4|0)|0;ld(v,L,4)|0;ld(C,L,4)|0;ld(x,L,4)|0;N=c[C>>2]|0;c[N+(c[F>>2]<<2)>>2]=1024;N=od(c[N+(c[F>>2]<<2)>>2]|0,4)|0;c[(c[x>>2]|0)+(c[F>>2]<<2)>>2]=N;c[F>>2]=L;N=c[w>>2]|0}c[E>>2]=c[(c[C>>2]|0)+(N<<2)>>2];c[y>>2]=c[(c[x>>2]|0)+(N<<2)>>2];M=c[z>>2]|0;L=Ib(m,J,B,M+(N<<4)|0)|0;c[(c[v>>2]|0)+(c[w>>2]<<2)>>2]=c[j>>2];c[(c[C>>2]|0)+(c[w>>2]<<2)>>2]=c[E>>2];Za=c[y>>2]|0;_a=c[w>>2]|0;c[w>>2]=_a+1;c[(c[x>>2]|0)+(_a<<2)>>2]=Za;if((L|0)<0){p=L;o=180;break a}Za=c[u>>2]|0;Xa=c[M+(N<<4)+12>>2]|0;Ya=c[M+(N<<4)+8>>2]|0;Ya=Ya>>>0>268435455?-8:Ya<<3;_a=Ya>>>0>2147483639|(Xa|0)==0;Ya=_a?0:Ya;Xa=_a?0:Xa;B=_a?-1094995529:0;c[Za+204>>2]=Xa;c[Za+216>>2]=Ya;c[Za+220>>2]=Ya+8;c[Za+208>>2]=Xa+(Ya>>3);c[Za+212>>2]=0;if(_a){p=B;o=180;break a}Ob(m)|0;if(((c[t>>2]|0)+ -36|0)>>>0<2)c[G>>2]=1;K=K-L|0;if((K|0)<=3)break;else J=J+L|0}if((c[w>>2]|0)>0){la=m+4|0;Ga=m+1448|0;aa=m+2046|0;_=m+1428|0;Fa=m+204|0;wa=m+200|0;Ka=m+1449|0;La=m+1432|0;Pa=m+1436|0;Qa=m+2580|0;Ia=m+156|0;ra=m+1440|0;I=m+1450|0;L=m+1620|0;va=m+2572|0;K=m+2516|0;M=m+2576|0;W=m+2056|0;X=m+2057|0;N=m+2058|0;P=m+2052|0;O=m+2048|0;Na=m+2068|0;S=m+2072|0;Q=m+2076|0;T=m+2080|0;Y=m+2061|0;V=m+2084|0;U=m+2088|0;Z=m+2062|0;J=m+1451|0;Oa=m+2108|0;Ja=m+2112|0;Ma=m+2500|0;na=m+2592|0;oa=m+2604|0;pa=m+4416|0;Ha=q+4|0;xa=m+4320|0;za=m+2596|0;ya=m+2600|0;Aa=m+4324|0;Ba=m+4344|0;Ca=m+4348|0;Ea=m+4328|0;sa=m+160|0;qa=m+140|0;ua=m+164|0;R=m+2096|0;F=m+2100|0;E=m+2104|0;G=m+141|0;H=m+4368|0;ca=m+2504|0;ba=m+2508|0;ea=m+4332|0;da=m+4336|0;fa=m+4340|0;ha=m+4352|0;ga=m+4316|0;ia=m+2608|0;ka=m+196|0;ma=m+4364|0;ja=m+168|0;C=0;b:while(1){c[j>>2]=c[(c[v>>2]|0)+(C<<2)>>2];c[y>>2]=c[(c[x>>2]|0)+(C<<2)>>2];Za=c[z>>2]|0;Ya=c[Za+(C<<4)+12>>2]|0;Za=c[Za+(C<<4)+8>>2]|0;_a=c[u>>2]|0;Za=Za>>>0>268435455?-8:Za<<3;Ra=Za>>>0>2147483639|(Ya|0)==0;Za=Ra?0:Za;Ya=Ra?0:Ya;c[_a+204>>2]=Ya;c[_a+216>>2]=Za;c[_a+220>>2]=Za+8;c[_a+208>>2]=Ya+(Za>>3);c[_a+212>>2]=0;c:do if(Ra){s=Ra?-1094995529:0;o=178}else{Ra=Ob(m)|0;d:do if((Ra|0)>=0){if(!Ra)break c;switch(c[t>>2]|0){case 37:case 36:{b[ma>>1]=(e[ma>>1]|0)+1&255;c[na>>2]=2147483647;break c};case 48:{Ra=Dc(m)|0;if((Ra|0)<0)break d;else break c};case 34:{Ra=Ec(m)|0;if((Ra|0)<0)break d;else break c};case 40:case 39:{Ra=Gc(m)|0;if((Ra|0)<0)break d;else break c};case 9:case 8:case 7:case 6:case 21:case 20:case 19:case 18:case 17:case 16:case 5:case 4:case 3:case 2:case 0:case 1:{Ra=c[u>>2]|0;Sa=Ra+204|0;_a=(bd(Sa)|0)&255;a[Ga>>0]=_a;Ta=c[t>>2]|0;if(!((Ta+ -16|0)>>>0>4|_a<<24>>24==0)?(b[ma>>1]=(e[ma>>1]|0)+1&255,c[na>>2]=2147483647,(Ta+ -19|0)>>>0<2):0){Yb(m);Ta=c[t>>2]|0}a[aa>>0]=0;if((Ta+ -16|0)>>>0<8)a[aa>>0]=bd(Sa)|0;Ta=dd(Sa)|0;c[_>>2]=Ta;if(Ta>>>0>255){p=B;o=180;break a}Ta=c[m+(Ta<<2)+400>>2]|0;if(!Ta){p=B;o=180;break a}if(!(a[Ga>>0]|0)){Wa=c[Ta+4>>2]|0;if((c[Fa>>2]|0)!=(Wa|0)){p=B;o=180;break a}}else Wa=c[Ta+4>>2]|0;c[Fa>>2]=Wa;Ua=c[t>>2]|0;Va=(Ua|0)==21;if(Va?(c[A>>2]|0)==1:0)a[aa>>0]=1;Ta=c[wa>>2]|0;Wa=c[(c[m+(c[Wa>>2]<<2)+272>>2]|0)+4>>2]|0;if((Ta|0)!=(Wa|0)){c[wa>>2]=Wa;e:do if(Ta){if((Ua+ -16|0)>>>0>7|Va)break;do if((c[Wa+13120>>2]|0)==(c[Ta+13120>>2]|0)){if((c[Wa+13124>>2]|0)!=(c[Ta+13124>>2]|0))break;if((c[Wa+76+(((c[Wa+72>>2]|0)+ -1|0)*12|0)>>2]|0)==(c[Ta+(((c[Ta+72>>2]|0)+ -1|0)*12|0)+76>>2]|0))break e}while(0);a[aa>>0]=0}while(0);Yb(m);Ta=c[wa>>2]|0;Nb(m);Va=c[Ta+13064>>2]|0;Wa=Ta+13120|0;ab=c[Wa>>2]|0;Xa=Ta+13124|0;$a=c[Xa>>2]|0;Va=$(($a>>Va)+1|0,(ab>>Va)+1|0)|0;Ua=$(c[Ta+13132>>2]|0,c[Ta+13128>>2]|0)|0;_a=Ta+13156|0;Za=Ta+13160|0;Ya=$(c[Za>>2]|0,c[_a>>2]|0)|0;c[za>>2]=(ab>>2)+1;c[ya>>2]=($a>>2)+1;c[ca>>2]=pd(Ua,148)|0;$a=pd(Ua,8)|0;c[ba>>2]=$a;if((c[ca>>2]|0)==0|($a|0)==0){o=71;break b}$a=Ta+13144|0;ab=Ta+13140|0;c[ea>>2]=fd($(c[ab>>2]|0,c[$a>>2]|0)|0)|0;ab=od(c[$a>>2]|0,c[ab>>2]|0)|0;c[da>>2]=ab;if((c[ea>>2]|0)==0|(ab|0)==0){o=71;break b}c[Ba>>2]=od(c[Ta+13148>>2]|0,c[Ta+13152>>2]|0)|0;c[fa>>2]=md(Ya)|0;Ya=fd($((c[Za>>2]|0)+1|0,(c[_a>>2]|0)+1|0)|0)|0;c[Ca>>2]=Ya;if(!(c[fa>>2]|0)){o=71;break b}if((c[Ba>>2]|0)==0|(Ya|0)==0){o=71;break b}c[ha>>2]=fd(Ua)|0;c[Ea>>2]=od(Va,4)|0;ab=od(Va,1)|0;c[ga>>2]=ab;if(!ab){o=71;break b}if(!(c[ha>>2]|0)){o=71;break b}if(!(c[Ea>>2]|0)){o=71;break b}c[xa>>2]=pd(c[za>>2]|0,c[ya>>2]|0)|0;ab=pd(c[za>>2]|0,c[ya>>2]|0)|0;c[Aa>>2]=ab;if((c[xa>>2]|0)==0|(ab|0)==0){o=71;break b}ab=c[la>>2]|0;c[ab+124>>2]=c[Wa>>2];c[ab+128>>2]=c[Xa>>2];c[ab+116>>2]=c[Ta+12>>2];c[ab+120>>2]=c[Ta+16>>2];c[ab+136>>2]=c[Ta+60>>2];c[ab+172>>2]=c[Ta+(((c[Ta+72>>2]|0)+ -1|0)*12|0)+80>>2];ab=Ta+160|0;c[q+0>>2]=c[ab+0>>2];c[q+4>>2]=c[ab+4>>2];if(!(c[Ta+176>>2]|0)){Ua=c[la>>2]|0;c[Ua+392>>2]=1}else{Ua=c[la>>2]|0;c[Ua+392>>2]=(c[Ta+184>>2]|0)!=0?2:1}if(!(c[Ta+188>>2]|0)){c[Ua+380>>2]=2;c[Ua+384>>2]=2;c[Ua+388>>2]=2}else{c[Ua+380>>2]=d[Ta+192>>0];c[Ua+384>>2]=d[Ta+193>>0];c[Ua+388>>2]=d[Ta+194>>0]}bc(ia,c[Ta+52>>2]|0);if(a[Ta+12941>>0]|0){Ua=c[wa>>2]|0;Va=(c[Ua+4>>2]|0)!=0?3:1;ab=(1<<c[Ua+13080>>2])+2|0;ab=$(ab,ab)|0;c[ja>>2]=fd(ab<<c[Ua+56>>2])|0;Ua=0;do{ab=c[wa>>2]|0;$a=c[ab+13124>>2]>>c[ab+(Ua<<2)+13180>>2];_a=$(c[ab+13120>>2]>>c[ab+(Ua<<2)+13168>>2]<<1,c[ab+13132>>2]|0)|0;c[m+(Ua<<2)+172>>2]=fd(_a<<c[ab+56>>2])|0;ab=c[wa>>2]|0;$a=$($a<<1,c[ab+13128>>2]|0)|0;c[m+(Ua<<2)+184>>2]=fd($a<<c[ab+56>>2])|0;Ua=Ua+1|0}while((Ua|0)<(Va|0))}c[wa>>2]=Ta;c[ka>>2]=c[(c[m+(c[Ta>>2]<<2)+208>>2]|0)+4>>2];b[ma>>1]=(e[ma>>1]|0)+1&255;c[na>>2]=2147483647}ab=c[la>>2]|0;c[ab+832>>2]=d[Ta+302>>0];c[ab+836>>2]=d[Ta+335>>0];a[Ka>>0]=0;do if(!(a[Ga>>0]|0)){if(a[(c[Fa>>2]|0)+41>>0]|0){a[Ka>>0]=bd(Sa)|0;Ta=c[wa>>2]|0}Ta=($(c[Ta+13128>>2]<<1,c[Ta+13132>>2]|0)|0)+ -2|0;Ua=Ta>>>0>65535;Ta=Ua?Ta>>>16:Ta;Ua=Ua?16:0;if(Ta&65280){Ua=Ua|8;Ta=Ta>>>8}Ta=_c(Sa,(d[4680+Ta>>0]|0)+Ua|0)|0;c[La>>2]=Ta;ab=c[wa>>2]|0;if(Ta>>>0>=($(c[ab+13132>>2]|0,c[ab+13128>>2]|0)|0)>>>0){p=B;o=180;break a}if(a[Ka>>0]|0)if(!(a[Ia>>0]|0)){p=B;o=180;break a}else break;else{c[Pa>>2]=Ta;c[Qa>>2]=(c[Qa>>2]|0)+1;o=82;break}}else{c[Pa>>2]=0;c[La>>2]=0;c[Qa>>2]=0;a[Ia>>0]=0;o=82}while(0);f:do if((o|0)==82){o=0;a[Ia>>0]=0;if((c[(c[Fa>>2]|0)+1624>>2]|0)>0){Ta=0;do{ad(Sa,1);Ta=Ta+1|0}while((Ta|0)<(c[(c[Fa>>2]|0)+1624>>2]|0))}Ta=dd(Sa)|0;c[ra>>2]=Ta;if(Ta>>>0>=3){p=B;o=180;break a}if(!((Ta|0)==2?1:((c[t>>2]|0)+ -16|0)>>>0>7)){p=B;o=180;break a}a[I>>0]=1;if(a[(c[Fa>>2]|0)+39>>0]|0)a[I>>0]=bd(Sa)|0;if(a[(c[wa>>2]|0)+8>>0]|0)a[J>>0]=_c(Sa,2)|0;if(((c[t>>2]|0)+ -19|0)>>>0>=2){o=91;break b}c[L>>2]=0;c[va>>2]=0;if(!(c[K>>2]|0))c[M>>2]=0;do if(a[(c[wa>>2]|0)+12941>>0]|0){a[W>>0]=bd(Sa)|0;if(!(c[(c[wa>>2]|0)+4>>2]|0)){a[X>>0]=0;a[N>>0]=0;break}else{ab=(bd(Sa)|0)&255;a[N>>0]=ab;a[X>>0]=ab;break}}else{a[W>>0]=0;a[X>>0]=0;a[N>>0]=0}while(0);c[P>>2]=0;c[O>>2]=0;c[Na>>2]=ed(Sa)|0;Ta=c[Fa>>2]|0;if(!(a[Ta+36>>0]|0)){c[S>>2]=0;c[Q>>2]=0}else{c[S>>2]=ed(Sa)|0;c[Q>>2]=ed(Sa)|0;Ta=c[Fa>>2]|0}if(!(a[Ta+1631>>0]|0))a[T>>0]=0;else{a[T>>0]=bd(Sa)|0;Ta=c[Fa>>2]|0}g:do if(!(a[Ta+55>>0]|0)){a[Y>>0]=0;c[V>>2]=0;c[U>>2]=0}else{do if(a[Ta+56>>0]|0){if(!(bd(Sa)|0)){Ta=c[Fa>>2]|0;break}ab=(bd(Sa)|0)&255;a[Y>>0]=ab;if(ab<<24>>24)break g;c[V>>2]=(ed(Sa)|0)<<1;c[U>>2]=(ed(Sa)|0)<<1;break g}while(0);a[Y>>0]=a[Ta+57>>0]|0;c[V>>2]=c[Ta+60>>2];c[U>>2]=c[Ta+64>>2]}while(0);Ta=a[(c[Fa>>2]|0)+54>>0]|0;h:do if(Ta<<24>>24){do if(!(a[W>>0]|0)){if(a[X>>0]|0)break;if(a[Y>>0]|0)break h}while(0);a[Z>>0]=bd(Sa)|0;break f}while(0);a[Z>>0]=Ta}while(0);c[Oa>>2]=0;ab=c[Fa>>2]|0;if(!((a[ab+42>>0]|0)==0?(a[ab+43>>0]|0)==0:0))o=122;i:do if((o|0)==122){o=0;ab=dd(Sa)|0;c[Oa>>2]=ab;if((ab|0)<=0){c[H>>2]=0;break}Ta=(dd(Sa)|0)+1|0;Ua=Ta>>4;Ta=Ta&15;jd(R);jd(F);jd(E);c[R>>2]=od(c[Oa>>2]|0,4)|0;c[F>>2]=od(c[Oa>>2]|0,4)|0;Va=od(c[Oa>>2]|0,4)|0;c[E>>2]=Va;if(!(c[R>>2]|0)){o=127;break b}if((c[F>>2]|0)==0|(Va|0)==0){o=127;break b}if((c[Oa>>2]|0)>0){Xa=(Ua|0)>0;Wa=(Ta|0)==0;Va=0;do{if(Xa){Ya=0;Za=0;do{Za=(_c(Sa,16)|0)+(Za<<16)|0;Ya=Ya+1|0}while((Ya|0)!=(Ua|0))}else Za=0;if(!Wa)Za=(_c(Sa,Ta)|0)+(Za<<Ta)|0;c[(c[R>>2]|0)+(Va<<2)>>2]=Za+1;Va=Va+1|0}while((Va|0)<(c[Oa>>2]|0))}do if((d[G>>0]|0)>1){ab=c[Fa>>2]|0;if((c[ab+48>>2]|0)<=1?(c[ab+44>>2]|0)<=1:0)break;c[H>>2]=0;a[G>>0]=1;break i}while(0);c[H>>2]=0}while(0);Ta=c[Fa>>2]|0;if(a[Ta+1628>>0]|0){Ta=dd(Sa)|0;$a=de(Ta|0,0,3)|0;Za=D;ab=(c[Ra+216>>2]|0)-(c[Ra+212>>2]|0)|0;_a=((ab|0)<0)<<31>>31;if((Za|0)>(_a|0)|(Za|0)==(_a|0)&$a>>>0>ab>>>0){p=B;o=180;break a}if(Ta){Ua=0;do{ad(Sa,8);Ua=Ua+1|0}while((Ua|0)!=(Ta|0))}Ta=c[Fa>>2]|0}Sa=(c[Ta+16>>2]|0)+26+(c[Na>>2]|0)|0;a[Ja>>0]=Sa;Sa=Sa<<24;if((Sa|0)>855638016){p=B;o=180;break a}if((Sa>>24|0)<(0-(c[(c[wa>>2]|0)+13192>>2]|0)|0)){p=B;o=180;break a}ab=c[La>>2]|0;c[Ma>>2]=ab;if((ab|0)==0?(a[Ka>>0]|0)!=0:0){p=B;o=180;break a}if(((c[Ra+216>>2]|0)-(c[Ra+212>>2]|0)|0)<0){p=B;o=180;break a}a[(c[u>>2]|0)+203>>0]=(a[Ka>>0]|0)==0&1;if(!(a[(c[Fa>>2]|0)+22>>0]|0))a[(c[u>>2]|0)+272>>0]=a[Ja>>0]|0;a[Ia>>0]=1;a[(c[u>>2]|0)+302>>0]=0;a[(c[u>>2]|0)+303>>0]=0;Sa=c[na>>2]|0;Ra=c[t>>2]|0;j:do if((Sa|0)==2147483647)switch(Ra|0){case 18:case 16:case 17:case 21:{Sa=c[va>>2]|0;c[na>>2]=Sa;break j};case 20:case 19:{c[na>>2]=-2147483648;Sa=-2147483648;break j};default:{Sa=2147483647;break j}}while(0);do if((Ra+ -8|0)>>>0<2){if((c[va>>2]|0)<=(Sa|0)){c[oa>>2]=0;break c}if((Ra|0)!=9)break;c[na>>2]=-2147483648}while(0);k:do if(!(a[Ga>>0]|0)){if(!(c[r>>2]|0)){Ra=0;break d}}else{Sa=c[u>>2]|0;$a=c[wa>>2]|0;Ra=c[$a+13064>>2]|0;ab=c[$a+13120>>2]>>Ra;Ra=(c[$a+13124>>2]>>Ra)+1|0;ce(c[xa>>2]|0,0,$(c[ya>>2]|0,c[za>>2]|0)|0)|0;ce(c[Aa>>2]|0,0,$(c[ya>>2]|0,c[za>>2]|0)|0)|0;$a=c[wa>>2]|0;ce(c[Ba>>2]|0,0,$(c[$a+13152>>2]|0,c[$a+13148>>2]|0)|0)|0;$a=c[wa>>2]|0;ce(c[Ca>>2]|0,0,$((c[$a+13160>>2]|0)+1|0,(c[$a+13156>>2]|0)+1|0)|0)|0;ce(c[Ea>>2]|0,-1,$((ab<<2)+4|0,Ra)|0)|0;c[oa>>2]=0;c[pa>>2]=c[t>>2];Ra=c[Fa>>2]|0;if(a[Ra+42>>0]|0)c[Sa+312>>2]=c[c[Ra+1648>>2]>>2]<<c[(c[wa>>2]|0)+13080>>2];Ra=_b(m,sa,c[va>>2]|0)|0;do if((Ra|0)>=0){c[(c[c[r>>2]>>2]|0)+80>>2]=((c[t>>2]|0)+ -16|0)>>>0<8&1;c[(c[sa>>2]|0)+84>>2]=3-(c[ra>>2]|0);yd(c[ua>>2]|0);Ra=$b(m,c[ua>>2]|0,0)|0;if((Ra|0)<0)break;break k}while(0);if(!(c[r>>2]|0)){o=167;break b}c[r>>2]=0;if((Ra|0)<0){p=B;break a}}while(0);if((c[t>>2]|0)!=(c[pa>>2]|0)){p=B;o=180;break a}c[q>>2]=0;c[Ha>>2]=1;Ra=c[la>>2]|0;Da[c[Ra+816>>2]&1](Ra,1,q,n,1,4)|0;Ra=c[n>>2]|0;ab=c[wa>>2]|0;if((Ra|0)>=($(c[ab+13132>>2]|0,c[ab+13128>>2]|0)|0))c[oa>>2]=1;if((Ra|0)<0)break d;else break c};default:break c}}while(0);s=(c[(c[la>>2]|0)+688>>2]&8|0)==0?0:Ra;o=178}while(0);if((o|0)==178?(o=0,(s|0)<0):0){p=B;o=180;break a}C=C+1|0;if((C|0)>=(c[w>>2]|0)){p=B;o=180;break a}}if((o|0)==71){Nb(m);Nb(m);c[wa>>2]=0;p=B;o=180;break}else if((o|0)==91)ta();else if((o|0)==127){c[Oa>>2]=0;p=B;o=180;break}else if((o|0)==167){c[r>>2]=0;p=B;break}}else{p=B;o=180}}else{p=0;o=180}while(0);if((p|0)<0){ab=p;i=k;return ab|0}n=m+2604|0;if(c[n>>2]|0)c[n>>2]=0;m=c[m+164>>2]|0;if(c[m+304>>2]|0){ab=m+128|0;c[ab>>2]=e[f>>1];c[ab+4>>2]=0;zd(g,m);c[h>>2]=1}ab=c[l>>2]|0;i=k;return ab|0}function Lb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=c[b+60>>2]|0;Nb(e);b=e+4412|0;f=e+4392|0;if((c[b>>2]|0)>0){g=0;do{jd((c[f>>2]|0)+(g<<2)|0);g=g+1|0}while((g|0)<(c[b>>2]|0))}jd(e+4396|0);jd(e+4388|0);jd(f);jd(e+152|0);jd(e+168|0);jd(e+172|0);jd(e+184|0);jd(e+176|0);jd(e+188|0);jd(e+180|0);jd(e+192|0);xd(e+164|0);g=e+2524|0;Xb(e,g,-1);xd(g);g=e+208|0;f=0;do{vd(g+(f<<2)|0);f=f+1|0}while((f|0)!=16);g=e+272|0;f=0;do{vd(g+(f<<2)|0);f=f+1|0}while((f|0)!=32);f=e+400|0;g=0;do{vd(f+(g<<2)|0);g=g+1|0}while((g|0)!=256);c[e+200>>2]=0;c[e+204>>2]=0;c[e+196>>2]=0;vd(e+1424|0);jd(e+2096|0);jd(e+2100|0);jd(e+2104|0);h=e+141|0;l=a[h>>0]|0;f=e+72|0;if((l&255)>1){g=e+8|0;j=1;do{k=f+(j<<2)|0;if(c[k>>2]|0){jd(k);jd(g+(j<<2)|0);l=a[h>>0]|0}j=j+1|0}while((j|0)<(l&255|0))}g=e+136|0;if((c[g>>2]|0)==(c[f>>2]|0))c[g>>2]=0;jd(f);f=e+4404|0;if((c[b>>2]|0)<=0){jd(f);c[b>>2]=0;i=d;return 0}e=0;do{jd((c[f>>2]|0)+(e<<4)|0);e=e+1|0}while((e|0)<(c[b>>2]|0));jd(f);c[b>>2]=0;i=d;return 0}function Mb(a){a=a|0;var b=0;b=i;a=c[a+60>>2]|0;Zb(a);c[a+2592>>2]=2147483647;i=b;return}function Nb(a){a=a|0;var b=0;b=i;jd(a+2504|0);jd(a+2508|0);jd(a+4332|0);jd(a+4336|0);jd(a+4340|0);jd(a+4344|0);jd(a+4348|0);jd(a+4316|0);jd(a+4328|0);jd(a+4352|0);jd(a+4320|0);jd(a+4324|0);jd(a+2096|0);jd(a+2104|0);jd(a+2100|0);i=b;return}function Ob(a){a=a|0;var b=0,d=0,e=0;b=i;d=(c[a+136>>2]|0)+204|0;if(bd(d)|0){e=-1094995529;i=b;return e|0}c[a+2512>>2]=_c(d,6)|0;e=_c(d,6)|0;d=(_c(d,3)|0)+ -1|0;c[a+2516>>2]=d;if((d|0)<0){e=-1094995529;i=b;return e|0}e=(e|0)==0&1;i=b;return e|0}function Pb(e,f){e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0;f=i;h=c[e+60>>2]|0;k=h+200|0;E=c[k>>2]|0;e=1<<c[E+13080>>2];l=h+204|0;n=c[l>>2]|0;A=c[(c[n+1668>>2]|0)+(c[h+2500>>2]<<2)>>2]|0;m=(a[h+1449>>0]|0)==0;if(!A)if(m)g=4;else{W=-1094995529;i=f;return W|0}else if(!m){m=h+4328|0;r=h+1436|0;if((c[(c[m>>2]|0)+(c[(c[n+1672>>2]|0)+(A+ -1<<2)>>2]<<2)>>2]|0)!=(c[r>>2]|0)){W=-1094995529;i=f;return W|0}}else g=4;if((g|0)==4){m=h+4328|0;r=h+1436|0}q=e+ -1|0;s=h+136|0;p=h+2504|0;y=h+2056|0;o=h+2057|0;w=h+2084|0;x=h+2508|0;v=h+2088|0;u=h+2062|0;t=h+4352|0;z=0;n=0;do{if((A|0)>=(c[E+13136>>2]|0))break;G=c[l>>2]|0;B=c[(c[G+1672>>2]|0)+(A<<2)>>2]|0;J=E+13120|0;I=E+13080|0;H=c[I>>2]|0;n=q+(c[J>>2]|0)>>H;z=((B|0)%(n|0)|0)<<H;n=((B|0)/(n|0)|0)<<H;C=c[s>>2]|0;H=1<<H;F=c[r>>2]|0;D=B-F|0;c[(c[m>>2]|0)+(B<<2)>>2]=F;do if(!(a[G+43>>0]|0)){if(!(a[G+42>>0]|0)){c[C+312>>2]=c[J>>2];G=E;break}if((A|0)!=0?(W=c[G+1676>>2]|0,(c[W+(A<<2)>>2]|0)!=(c[W+(A+ -1<<2)>>2]|0)):0){W=c[I>>2]|0;c[C+312>>2]=(c[(c[G+1648>>2]|0)+(c[(c[G+1664>>2]|0)+(z>>W<<2)>>2]<<2)>>2]<<W)+z;a[C+203>>0]=1;G=c[k>>2]|0}else G=E}else{if((z|0)==0?(H+ -1&n|0)==0:0){a[C+203>>0]=1;E=c[k>>2]|0}c[C+312>>2]=c[E+13120>>2];G=E}while(0);E=H+n|0;H=c[G+13124>>2]|0;c[C+316>>2]=(E|0)>(H|0)?H:E;E=C+31312|0;c[E>>2]=0;H=c[l>>2]|0;if(!(a[H+42>>0]|0)){if((B|0)==(F|0)){c[E>>2]=1;F=1}else F=0;if((D|0)<(c[G+13128>>2]|0)){F=F|4;c[E>>2]=F}}else{if((z|0)>0){W=c[H+1676>>2]|0;I=B+ -1|0;if((c[W+(A<<2)>>2]|0)==(c[W+(c[(c[H+1668>>2]|0)+(I<<2)>>2]<<2)>>2]|0))F=0;else{c[E>>2]=2;F=2}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(I<<2)>>2]|0)){F=F|1;c[E>>2]=F}}else F=0;if((n|0)>0){W=c[H+1676>>2]|0;I=G+13128|0;G=c[I>>2]|0;if((c[W+(A<<2)>>2]|0)!=(c[W+(c[(c[H+1668>>2]|0)+(B-G<<2)>>2]<<2)>>2]|0)){F=F|8;c[E>>2]=F;G=c[I>>2]|0}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(B-G<<2)>>2]|0)){F=F|4;c[E>>2]=F}}}E=(z|0)>0;if(E&(D|0)>0)G=(F>>>1&1^1)&255;else G=0;a[C+308>>0]=G;if((n|0)>0){if((D|0)<(c[(c[k>>2]|0)+13128>>2]|0))F=0;else F=(F>>>3&1^1)&255;a[C+309>>0]=F;F=c[(c[k>>2]|0)+13128>>2]|0;if((D+1|0)<(F|0))F=0;else{W=c[l>>2]|0;V=c[W+1676>>2]|0;F=(c[V+(A<<2)>>2]|0)==(c[V+(c[(c[W+1668>>2]|0)+(B+1-F<<2)>>2]<<2)>>2]|0)&1}a[C+310>>0]=F;if(E?(j=c[(c[k>>2]|0)+13128>>2]|0,(D|0)>(j|0)):0){D=c[l>>2]|0;W=c[D+1676>>2]|0;D=(c[W+(A<<2)>>2]|0)==(c[W+(c[(c[D+1668>>2]|0)+(B+ -1-j<<2)>>2]<<2)>>2]|0)&1}else D=0}else{a[C+309>>0]=0;a[C+310>>0]=0;D=0}a[C+311>>0]=D;Ua(h,A);D=c[k>>2]|0;E=c[D+13080>>2]|0;F=z>>E;E=n>>E;G=c[s>>2]|0;D=($(c[D+13128>>2]|0,E)|0)+F|0;C=c[p>>2]|0;if((a[y>>0]|0)==0?(a[o>>0]|0)==0:0){M=0;H=0}else{if((F|0)>0?(a[G+308>>0]|0)!=0:0)M=Za(h)|0;else M=0;if((E|0)>0&(M|0)==0)if(!(a[G+309>>0]|0)){M=0;H=0}else{M=0;H=(Za(h)|0)!=0}else H=0}I=(c[(c[k>>2]|0)+4>>2]|0)!=0?3:1;L=C+(D*148|0)+143|0;G=C+(D*148|0)+144|0;K=C+(D*148|0)+104|0;J=C+(D*148|0)+108|0;R=(M|0)==0;S=R&(H^1);M=E+ -1|0;O=F+ -1|0;P=0;do{Q=c[l>>2]|0;Q=d[((P|0)==0?Q+1644|0:Q+1645|0)>>0]|0;a:do if(a[h+P+2056>>0]|0){T=(P|0)==2;do if(!T){if(S){U=($a(h)|0)&255;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(!R){U=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(H){U=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}else{a[C+(D*148|0)+P+142>>0]=0;break a}}else{U=a[L>>0]|0;a[G>>0]=U;c[J>>2]=c[K>>2];N=G}while(0);if(U<<24>>24){U=0;do{do if(!S){if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}else{c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=cb(h)|0;while(0);U=U+1|0}while((U|0)!=4);do if((a[N>>0]|0)==1){T=0;do{do if(c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0){if(S){c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=db(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}else{c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;while(0);T=T+1|0}while((T|0)!=4);if(S){a[C+(D*148|0)+P+96>>0]=bb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}else{a[C+(D*148|0)+P+96>>0]=0;break}}else if(!T){if(S){c[C+(D*148|0)+(P<<2)+100>>2]=eb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}else{c[C+(D*148|0)+(P<<2)+100>>2]=0;break}}while(0);b[C+(D*148|0)+(P*10|0)+112>>1]=0;T=0;do{W=c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0;V=T;T=T+1|0;U=C+(D*148|0)+(P*10|0)+(T<<1)+112|0;b[U>>1]=W;if((a[N>>0]|0)==2){if((V|0)>1){W=0-W|0;b[U>>1]=W}}else if(c[C+(D*148|0)+(P<<4)+(V<<2)+48>>2]|0){W=0-W|0;b[U>>1]=W}b[U>>1]=W<<16>>16<<Q}while((T|0)!=4)}}else a[C+(D*148|0)+P+142>>0]=0;while(0);P=P+1|0}while((P|0)<(I|0));C=c[x>>2]|0;c[C+(B<<3)>>2]=c[w>>2];c[C+(B<<3)+4>>2]=c[v>>2];a[(c[t>>2]|0)+B>>0]=a[u>>0]|0;C=Qb(h,z,n,c[(c[k>>2]|0)+13080>>2]|0,0)|0;if((C|0)<0){g=108;break}A=A+1|0;Ta(h,A);Db(h,z,n,e);E=c[k>>2]|0}while((C|0)!=0);if((g|0)==108){c[(c[m>>2]|0)+(B<<2)>>2]=-1;W=C;i=f;return W|0}if((z+e|0)<(c[E+13120>>2]|0)){W=A;i=f;return W|0}if((n+e|0)<(c[E+13124>>2]|0)){W=A;i=f;return W|0}Bb(h,z,n,e);W=A;i=f;return W|0}function Qb(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0;j=i;i=i+32|0;z=j;B=j+20|0;A=b+136|0;r=c[A>>2]|0;l=1<<g;m=b+200|0;s=c[m>>2]|0;q=b+204|0;t=c[q>>2]|0;p=(1<<(c[s+13080>>2]|0)-(c[t+24>>2]|0))+ -1|0;c[r+31232>>2]=h;k=l+e|0;if(((k|0)<=(c[s+13120>>2]|0)?(l+f|0)<=(c[s+13124>>2]|0):0)?(c[s+13064>>2]|0)>>>0<g>>>0:0){s=lb(b,h,e,f)|0;t=c[q>>2]|0}else s=(c[s+13064>>2]|0)>>>0<g>>>0&1;if((a[t+22>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(c[t+24>>2]|0)|0)>>>0<=g>>>0:0){a[r+300>>0]=0;c[r+280>>2]=0}if((a[b+2080>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(d[(c[q>>2]|0)+1632>>0]|0)|0)>>>0<=g>>>0:0)a[r+301>>0]=0;if(s){n=l>>1;o=n+e|0;q=n+f|0;g=g+ -1|0;h=h+1|0;s=Qb(b,e,f,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}if(s){if((o|0)<(c[(c[m>>2]|0)+13120>>2]|0)){s=Qb(b,o,f,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}if(s){if((q|0)<(c[(c[m>>2]|0)+13124>>2]|0)){s=Qb(b,e,q,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}if(s){X=c[m>>2]|0;if((o|0)<(c[X+13120>>2]|0)?(q|0)<(c[X+13124>>2]|0):0){s=Qb(b,o,q,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}}else s=0}else s=0}else s=0;if((p&k|0)==0?(p&l+f|0)==0:0)c[r+276>>2]=a[r+272>>0];if(!s){X=0;i=j;return X|0}k=c[m>>2]|0;if((o+n|0)<(c[k+13120>>2]|0))k=1;else k=(q+n|0)<(c[k+13124>>2]|0);X=k&1;i=j;return X|0}p=c[A>>2]|0;s=c[m>>2]|0;r=c[s+13064>>2]|0;h=c[s+13140>>2]|0;s=1<<(c[s+13080>>2]|0)-(c[(c[q>>2]|0)+24>>2]|0);c[p+31236>>2]=e;c[p+31240>>2]=f;x=p+31252|0;a[x>>0]=1;v=p+31244|0;c[v>>2]=1;E=p+31248|0;c[E>>2]=0;w=p+31254|0;a[w>>0]=0;y=p+31253|0;a[y>>0]=0;t=($(f>>r,h)|0)+(e>>r)|0;H=b+4332|0;a[(c[H>>2]|0)+t>>0]=0;X=p+31268|0;a[X>>0]=1;a[X+1>>0]=1;a[X+2>>0]=1;a[X+3>>0]=1;r=l>>r;s=s+ -1|0;if(a[(c[q>>2]|0)+40>>0]|0){X=(gb(b)|0)&255;a[p+31256>>0]=X;if(X<<24>>24)Rb(b,e,f,g)}else a[p+31256>>0]=0;u=(r|0)>0;if(u){G=t;F=0;while(1){ce((c[H>>2]|0)+G|0,0,r|0)|0;F=F+1|0;if((F|0)==(r|0))break;else G=G+h|0}}if((c[v>>2]|0)==1?(c[(c[m>>2]|0)+13064>>2]|0)!=(g|0):0)D=c[E>>2]|0;else{F=mb(b,g)|0;c[E>>2]=F;E=c[v>>2]|0;if((F|0)==3)G=(E|0)==1&1;else G=0;a[w>>0]=G;if((E|0)==1)D=F;else ta()}if((((D|0)==0?(C=c[m>>2]|0,(c[C+68>>2]|0)!=0):0)?(c[C+13048>>2]|0)>>>0<=g>>>0:0)?(c[C+13052>>2]|0)>>>0>=g>>>0:0){C=(nb(b)|0)&255;a[y>>0]=C}else C=a[y>>0]|0;do if(!(C<<24>>24)){C=c[A>>2]|0;E=(c[C+31248>>2]|0)==3;D=E?2:1;H=0;do{F=H<<1;G=0;do{a[B+(G+F)>>0]=ob(b)|0;G=G+1|0}while((G|0)<(D|0));H=H+1|0}while((H|0)<(D|0));Q=l>>(E&1);P=C+31264|0;R=z+4|0;E=z+8|0;F=b+4340|0;O=C+31260|0;N=0;do{J=N<<1;H=($(N,Q)|0)+f|0;I=0;do{L=I+J|0;U=(a[B+L>>0]|0)==0;if(U)c[P>>2]=qb(b)|0;else c[O>>2]=pb(b)|0;V=($(I,Q)|0)+e|0;T=c[A>>2]|0;X=c[m>>2]|0;S=c[X+13084>>2]|0;M=V>>S;K=H>>S;G=c[X+13156>>2]|0;S=Q>>S;X=c[X+13080>>2]|0;W=(1<<X)+ -1|0;V=W&V;if((a[T+309>>0]|0)==0?(W&H|0)==0:0)W=1;else{W=($(K+ -1|0,G)|0)+M|0;W=d[(c[F>>2]|0)+W>>0]|0}if((a[T+308>>0]|0)==0&(V|0)==0)V=1;else{V=M+ -1+($(K,G)|0)|0;V=d[(c[F>>2]|0)+V>>0]|0}X=(H>>X<<X|0)<(H|0)?W:1;do if((V|0)==(X|0))if(V>>>0<2){c[z>>2]=0;c[R>>2]=1;c[E>>2]=26;V=0;X=1;W=26;break}else{c[z>>2]=V;X=(V+29&31)+2|0;c[R>>2]=X;W=(V+31&31)+2|0;c[E>>2]=W;break}else{c[z>>2]=V;c[R>>2]=X;if(!((V|0)==0|(X|0)==0)){c[E>>2]=0;W=0;break}if((V|0)==1|(X|0)==1){c[E>>2]=26;W=26;break}else{c[E>>2]=1;W=1;break}}while(0);if(U){if((V|0)>(X|0)){c[R>>2]=V;U=X&255;c[z>>2]=U}else{U=V;V=X}if((U|0)>(W|0)){c[E>>2]=U;X=W&255;c[z>>2]=X;W=U;U=X}if((V|0)>(W|0)){c[E>>2]=V;X=W&255;c[R>>2]=X}else{X=V;V=W}T=c[T+31264>>2]|0;T=((T|0)>=(U|0)&1)+T|0;T=((T|0)>=(X|0)&1)+T|0;T=((T|0)>=(V|0)&1)+T|0}else T=c[z+(c[T+31260>>2]<<2)>>2]|0;S=(S|0)==0?1:S;T=T&255;if((S|0)>0){U=0;do{X=($(U+K|0,G)|0)+M|0;ce((c[F>>2]|0)+X|0,T|0,S|0)|0;U=U+1|0}while((U|0)<(S|0))}a[C+L+31268>>0]=T;I=I+1|0}while((I|0)<(D|0));N=N+1|0}while((N|0)<(D|0));z=c[(c[m>>2]|0)+4>>2]|0;if((z|0)==3){B=0;do{z=B<<1;E=0;do{G=rb(b)|0;F=E+z|0;a[C+F+31281>>0]=G;A=a[C+F+31268>>0]|0;do if((G|0)!=4){G=a[1528+G>>0]|0;F=C+F+31277|0;if(A<<24>>24==G<<24>>24){a[F>>0]=34;break}else{a[F>>0]=G;break}}else a[C+F+31277>>0]=A;while(0);E=E+1|0}while((E|0)<(D|0));B=B+1|0}while((B|0)<(D|0))}else if((z|0)==2){A=rb(b)|0;a[C+31281>>0]=A;z=a[C+31268>>0]|0;if((A|0)==4)z=z&255;else{X=a[1528+A>>0]|0;z=z<<24>>24==X<<24>>24?34:X&255}a[C+31277>>0]=a[1536+z>>0]|0;break}else if(z){A=rb(b)|0;z=a[C+31268>>0]|0;if((A|0)==4){a[C+31277>>0]=z;break}A=a[1528+A>>0]|0;B=C+31277|0;if(z<<24>>24==A<<24>>24){a[B>>0]=34;break}else{a[B>>0]=A;break}}else break}else{G=c[m>>2]|0;B=c[G+13084>>2]|0;E=l>>B;C=c[G+13156>>2]|0;D=e>>B;B=f>>B;E=(E|0)==0?1:E;if((E|0)>0){F=b+4340|0;G=0;do{X=($(G+B|0,C)|0)+D|0;ce((c[F>>2]|0)+X|0,1,E|0)|0;G=G+1|0}while((G|0)<(E|0));G=c[m>>2]|0}K=c[A>>2]|0;V=c[b+160>>2]|0;A=c[V+32>>2]|0;D=$(A,f)|0;H=c[G+56>>2]|0;D=(c[V>>2]|0)+((e<<H)+D)|0;E=c[V+36>>2]|0;J=c[G+13184>>2]|0;B=$(f>>J,E)|0;I=c[G+13172>>2]|0;B=(c[V+4>>2]|0)+((e>>I<<H)+B)|0;C=c[V+40>>2]|0;X=c[G+13188>>2]|0;F=$(f>>X,C)|0;W=c[G+13176>>2]|0;F=(c[V+8>>2]|0)+((e>>W<<H)+F)|0;H=$(d[G+13044>>0]|0,l<<g)|0;J=($(l>>W,l>>X)|0)+($(l>>I,l>>J)|0)|0;G=($(d[G+13045>>0]|0,J)|0)+H|0;H=K+224|0;J=G+7>>3;I=c[K+240>>2]|0;X=c[H>>2]|0;I=(X&1|0)==0?I:I+ -1|0;I=(X&511|0)==0?I:I+ -1|0;K=(c[K+244>>2]|0)-I|0;if((K|0)<(J|0))I=0;else Yc(H,I+J|0,K-J|0);if(!(a[b+2061>>0]|0))Ab(b,e,f,g);X=G>>>0>2147483639|(I|0)==0;W=X?0:G;V=X?0:I;c[z>>2]=V;c[z+12>>2]=W;c[z+16>>2]=W+8;c[z+4>>2]=V+(W+7>>3);c[z+8>>2]=0;if(X)z=-1094995529;else{W=b+2608|0;X=c[m>>2]|0;za[c[W>>2]&7](D,A,l,l,z,d[X+13044>>0]|0,c[X+52>>2]|0);X=c[m>>2]|0;za[c[W>>2]&7](B,E,l>>c[X+13172>>2],l>>c[X+13184>>2],z,d[X+13045>>0]|0,c[X+52>>2]|0);X=c[m>>2]|0;za[c[W>>2]&7](F,C,l>>c[X+13176>>2],l>>c[X+13188>>2],z,d[X+13045>>0]|0,c[X+52>>2]|0);z=0}if(a[(c[m>>2]|0)+13056>>0]|0)Rb(b,e,f,g);if((z|0)<0){X=z;i=j;return X|0}}while(0);do if(!(a[y>>0]|0)){if(!(a[x>>0]|0)){if(a[b+2061>>0]|0)break;Ab(b,e,f,g);break}x=c[m>>2]|0;if((c[v>>2]|0)==1)v=(d[w>>0]|0)+(c[x+13092>>2]|0)|0;else v=c[x+13088>>2]|0;a[p+31255>>0]=v;v=Sb(b,e,f,e,f,e,f,g,g,0,0,1520,1520)|0;if((v|0)<0){X=v;i=j;return X|0}}while(0);if((a[(c[q>>2]|0)+22>>0]|0)!=0?(a[p+300>>0]|0)==0:0)zb(b,e,f,g);if(u){q=b+4316|0;g=p+272|0;u=0;while(1){ce((c[q>>2]|0)+t|0,a[g>>0]|0,r|0)|0;u=u+1|0;if((u|0)==(r|0))break;else t=t+h|0}}if((s&k|0)==0?(s&l+f|0)==0:0)c[p+276>>2]=a[p+272>>0];q=c[m>>2]|0;X=c[q+13064>>2]|0;g=l>>X;r=e>>X;e=f>>X;if((g|0)>0?(n=b+4336|0,o=c[p+31232>>2]&255,X=($(c[q+13140>>2]|0,e)|0)+r|0,ce((c[n>>2]|0)+X|0,o|0,g|0)|0,(g|0)!=1):0){p=1;do{X=($(c[(c[m>>2]|0)+13140>>2]|0,p+e|0)|0)+r|0;ce((c[n>>2]|0)+X|0,o|0,g|0)|0;p=p+1|0}while((p|0)!=(g|0))}e=c[m>>2]|0;m=1<<c[e+13080>>2];if(((k|0)%(m|0)|0|0)!=0?(k|0)<(c[e+13120>>2]|0):0){X=1;i=j;return X|0}X=l+f|0;if(((X|0)%(m|0)|0|0)!=0?(X|0)<(c[e+13124>>2]|0):0){X=1;i=j;return X|0}X=(fb(b)|0)==0&1;i=j;return X|0}function Rb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;m=1<<f;n=c[b+200>>2]|0;l=c[n+13084>>2]|0;f=c[n+13156>>2]|0;k=m+d|0;j=c[n+13120>>2]|0;m=m+e|0;n=c[n+13124>>2]|0;h=e>>l;e=((m|0)>(n|0)?n:m)>>l;if((h|0)>=(e|0)){i=g;return}d=d>>l;j=((k|0)>(j|0)?j:k)>>l;k=(d|0)<(j|0);b=b+4348|0;do{if(k){m=$(h,f)|0;l=d;do{a[(c[b>>2]|0)+(l+m)>>0]=2;l=l+1|0}while((l|0)!=(j|0))}h=h+1|0}while((h|0)!=(e|0));i=g;return}function Sb(e,f,g,h,j,k,l,m,n,o,p,q,r){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;r=r|0;var s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0;s=i;i=i+16|0;w=s+8|0;t=s;z=e+136|0;v=c[z>>2]|0;K=c[q>>2]|0;c[w>>2]=K;A=c[q+4>>2]|0;G=w+4|0;c[G>>2]=A;L=c[r>>2]|0;c[t>>2]=L;q=c[r+4>>2]|0;x=t+4|0;c[x>>2]=q;y=a[v+31254>>0]|0;do if(y<<24>>24){if((o|0)==1){c[v+288>>2]=d[v+p+31268>>0];if((c[(c[e+200>>2]|0)+4>>2]|0)==3){c[v+292>>2]=d[v+p+31277>>0];c[v+296>>2]=d[v+p+31281>>0];break}else{c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0];break}}}else{c[v+288>>2]=d[v+31268>>0];c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0]}while(0);r=e+200|0;I=c[r>>2]|0;B=(c[I+13076>>2]|0)>>>0<n>>>0;if(((!B?(c[I+13072>>2]|0)>>>0<n>>>0:0)?(d[v+31255>>0]|0)>(o|0):0)?!(y<<24>>24!=0&(o|0)==0):0)y=(sb(e,n)|0)&255;else{if((c[I+13088>>2]|0)==0?(c[v+31244>>2]|0)==0:0)I=(o|0)==0&(c[v+31248>>2]|0)!=0;else I=0;if(B)y=1;else y=(y<<24>>24!=0&(o|0)==0|I)&1}B=(n|0)>2;I=c[(c[r>>2]|0)+4>>2]|0;if(B)if(!I){J=q;M=A}else E=20;else if((I|0)==3)E=20;else{J=q;M=A}do if((E|0)==20){I=(o|0)==0;if(!((K|0)==0&(I^1))){K=tb(e,o)|0;c[w>>2]=K;if((c[(c[r>>2]|0)+4>>2]|0)==2?y<<24>>24==0|(n|0)==3:0){A=tb(e,o)|0;c[G>>2]=A}if(!I)E=25}else{K=0;E=25}if((E|0)==25)if(!L){L=0;J=q;M=A;break}L=tb(e,o)|0;c[t>>2]=L;if((c[(c[r>>2]|0)+4>>2]|0)==2?y<<24>>24==0|(n|0)==3:0){J=tb(e,o)|0;c[x>>2]=J;M=A}else{J=q;M=A}}while(0);if(!(y<<24>>24)){A=c[r>>2]|0;q=c[A+13072>>2]|0;y=1<<q;x=c[A+13148>>2]|0;if(((o|0)==0?(c[v+31244>>2]|0)!=1:0)&(K|0)==0&(L|0)==0)if((c[A+4>>2]|0)==2?(M|J|0)!=0:0)E=37;else o=1;else E=37;if((E|0)==37){o=ub(e,o)|0;A=c[r>>2]|0}G=c[z>>2]|0;A=n-(c[A+13172>>2]|0)|0;z=G+31244|0;if((c[z>>2]|0)==1){I=1<<n;Cc(e,f,g,I,I);Ub(e,f,g,n,0)}I=(o|0)!=0;K=(K|L|0)==0;do if(I)if(K)E=46;else{F=0;E=48}else if(K){K=c[r>>2]|0;L=c[K+4>>2]|0;if((L|0)==2){if(M){E=46;break}if(J){M=0;E=46;break}}if(!((c[z>>2]|0)!=1|(L|0)==0)){if(B|(L|0)==3){t=1<<(c[K+13172>>2]|0)+A;w=1<<(c[K+13184>>2]|0)+A;Cc(e,f,g,t,w);Ub(e,f,g,A,1);Ub(e,f,g,A,2);if((c[(c[r>>2]|0)+4>>2]|0)!=2)break;M=(1<<A)+g|0;Cc(e,f,M,t,w);Ub(e,f,M,A,1);Ub(e,f,M,A,2);break}if((p|0)==3?(H=1<<n+1,F=1<<(c[K+13184>>2]|0)+n,Cc(e,h,j,H,F),Ub(e,h,j,n,1),Ub(e,h,j,n,2),(c[(c[r>>2]|0)+4>>2]|0)==2):0){M=(1<<n)+j|0;Cc(e,h,M,H,F);Ub(e,h,M,n,1);Ub(e,h,M,n,2)}}}else{F=0;E=48}while(0);if((E|0)==46)if((c[(c[r>>2]|0)+4>>2]|0)==2){F=(M|J|0)==0;E=48}else{F=1;E=48}a:do if((E|0)==48){E=e+204|0;do if((a[(c[E>>2]|0)+22>>0]|0)!=0?(D=G+300|0,(a[D>>0]|0)==0):0){M=hb(e)|0;H=G+280|0;c[H>>2]=M;if(M){M=(ib(e)|0)==1;J=c[H>>2]|0;if(M){J=0-J|0;c[H>>2]=J}}else J=0;a[D>>0]=1;M=(c[(c[r>>2]|0)+13192>>2]|0)/2|0;if((J|0)<(-26-M|0)|(J|0)>(M+25|0)){M=-1094995529;i=s;return M|0}else{zb(e,k,l,m);break}}while(0);if((!((a[e+2080>>0]|0)==0|F)?(a[G+31256>>0]|0)==0:0)?(C=G+301|0,(a[C>>0]|0)==0):0){if(!(jb(e)|0)){a[G+302>>0]=0;a[G+303>>0]=0}else{k=c[E>>2]|0;if(!(a[k+1633>>0]|0))l=0;else{l=kb(e)|0;k=c[E>>2]|0}a[G+302>>0]=a[k+l+1634>>0]|0;a[G+303>>0]=a[(c[E>>2]|0)+l+1639>>0]|0}a[C>>0]=1}if((c[z>>2]|0)==1&(n|0)<4){k=c[G+288>>2]|0;if((k+ -6|0)>>>0<9)m=2;else m=(k+ -22|0)>>>0<9&1;k=c[G+292>>2]|0;if((k+ -6|0)>>>0<9)k=2;else k=(k+ -22|0)>>>0<9&1}else{m=0;k=0}l=G+304|0;a[l>>0]=0;if(I)xb(e,f,g,n,m,0);m=c[r>>2]|0;C=c[m+4>>2]|0;if(C){if(!(B|(C|0)==3)){if((p|0)!=3)break;p=1<<n+1;A=1<<(c[m+13184>>2]|0)+n;l=0;do{if((c[z>>2]|0)==1){M=(l<<n)+j|0;Cc(e,h,M,p,A);Ub(e,h,M,n,1)}if(c[w+(l<<2)>>2]|0)xb(e,h,(l<<n)+j|0,n,k,1);l=l+1|0}while((l|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));w=0;while(1){if((c[z>>2]|0)==1){M=(w<<n)+j|0;Cc(e,h,M,p,A);Ub(e,h,M,n,2)}if(c[t+(w<<2)>>2]|0)xb(e,h,(w<<n)+j|0,n,k,2);w=w+1|0;if((w|0)>=(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))break a}}j=1<<(c[m+13172>>2]|0)+A;h=1<<(c[m+13184>>2]|0)+A;do if((a[(c[E>>2]|0)+1630>>0]|0)==0|I^1)a[l>>0]=0;else{if(c[z>>2]|0){M=(c[G+296>>2]|0)==4;a[l>>0]=M&1;if(!M)break}else a[l>>0]=1;Tb(e,0)}while(0);m=e+160|0;C=G+320|0;D=G+11680|0;E=1<<A<<A;B=(E|0)>0;p=e+(A+ -2<<2)+2612|0;F=G+284|0;J=0;do{if((c[z>>2]|0)==1){M=(J<<A)+g|0;Cc(e,f,M,j,h);Ub(e,f,M,A,1)}do if(!(c[w+(J<<2)>>2]|0)){if(!(a[l>>0]|0))break;M=c[m>>2]|0;G=c[M+36>>2]|0;H=c[r>>2]|0;I=$(g>>c[H+13184>>2],G)|0;I=(c[M+4>>2]|0)+(I+(f>>c[H+13172>>2]<<c[H+56>>2]))|0;if(B){J=0;do{b[D+(J<<1)>>1]=($(b[C+(J<<1)>>1]|0,c[F>>2]|0)|0)>>>3;J=J+1|0}while((J|0)!=(E|0));J=E}else J=0;Ka[c[p>>2]&7](I,D,G,c[H+52>>2]|0)}else xb(e,f,(J<<A)+g|0,A,k,1);while(0);J=J+1|0}while((J|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));if(!(a[l>>0]|0))I=0;else{Tb(e,1);I=0}do{if((c[z>>2]|0)==1){M=(I<<A)+g|0;Cc(e,f,M,j,h);Ub(e,f,M,A,2)}do if(!(c[t+(I<<2)>>2]|0)){if(!(a[l>>0]|0))break;M=c[m>>2]|0;G=c[M+40>>2]|0;w=c[r>>2]|0;H=$(g>>c[w+13188>>2],G)|0;H=(c[M+8>>2]|0)+(H+(f>>c[w+13176>>2]<<c[w+56>>2]))|0;if(B){I=0;do{b[D+(I<<1)>>1]=($(b[C+(I<<1)>>1]|0,c[F>>2]|0)|0)>>>3;I=I+1|0}while((I|0)!=(E|0));I=E}else I=0;Ka[c[p>>2]&7](H,D,G,c[w+52>>2]|0)}else xb(e,f,(I<<A)+g|0,A,k,2);while(0);I=I+1|0}while((I|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))}}while(0);if((o|0)!=0?(u=1<<n,(u|0)>0):0){t=e+4344|0;r=0;do{w=$(r+g>>q,x)|0;o=0;do{a[(c[t>>2]|0)+((o+f>>q)+w)>>0]=1;o=o+y|0}while((o|0)<(u|0));r=r+y|0}while((r|0)<(u|0))}if(((a[e+2061>>0]|0)==0?(Ab(e,f,g,n),(a[(c[e+204>>2]|0)+40>>0]|0)!=0):0)?(a[v+31256>>0]|0)!=0:0)Rb(e,f,g,n)}else{u=n+ -1|0;n=1<<u;v=n+f|0;n=n+g|0;r=o+1|0;q=Sb(e,f,g,f,g,k,l,m,u,r,0,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=Sb(e,v,g,f,g,k,l,m,u,r,1,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=Sb(e,f,n,f,g,k,l,m,u,r,2,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}f=Sb(e,v,n,f,g,k,l,m,u,r,3,w,t)|0;if((f|0)<0){M=f;i=s;return M|0}}M=0;i=s;return M|0}function Tb(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=c[a+136>>2]|0;f=vb(a,b)|0;if(!f){c[e+284>>2]=0;i=d;return}else{c[e+284>>2]=1-((wb(a,b)|0)<<1)<<f+ -1;i=d;return}}function Ub(d,f,g,h,j){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0;l=i;i=i+528|0;v=l+390|0;A=l+260|0;y=l+130|0;x=l;t=c[d+136>>2]|0;s=c[d+200>>2]|0;q=c[s+52>>2]|0;V=c[s+(j<<2)+13168>>2]|0;U=c[s+(j<<2)+13180>>2]|0;k=1<<h;ma=k<<V;X=c[s+13072>>2]|0;oa=k<<U;_=c[s+13164>>2]|0;W=f>>X&_;ia=g>>X&_;Y=_+2|0;aa=($(ia,Y)|0)+W|0;ba=c[d+204>>2]|0;ca=c[ba+1684>>2]|0;aa=c[ca+(aa<<2)>>2]|0;n=c[d+160>>2]|0;d=(c[n+(j<<2)+32>>2]|0)>>>1;n=c[n+(j<<2)>>2]|0;m=($(d,g>>U)|0)+(f>>V)|0;o=n+(m<<1)|0;p=(j|0)==0;r=c[(p?t+288|0:t+292|0)>>2]|0;w=v+2|0;B=y+2|0;z=A+2|0;u=x+2|0;if(!(c[t+31288>>2]|0))na=0;else na=(aa|0)>(c[ca+(W+ -1+($(_&ia+(oa>>X),Y)|0)<<2)>>2]|0);la=na&1;da=c[t+31292>>2]|0;M=c[t+31300>>2]|0;Z=c[t+31296>>2]|0;if(!(c[t+31304>>2]|0))ja=0;else ja=(aa|0)>(c[ca+(($(Y,ia+ -1|0)|0)+(_&W+(ma>>X))<<2)>>2]|0);W=ja&1;ca=(oa<<1)+g|0;_=s+13124|0;ia=c[_>>2]|0;X=oa+g|0;ca=((ca|0)>(ia|0)?ia:ca)-X>>U;ia=(ma<<1)+f|0;aa=s+13120|0;ra=c[aa>>2]|0;Y=ma+f|0;ia=((ia|0)>(ra|0)?ra:ia)-Y>>V;ba=ba+20|0;if((a[ba>>0]|0)==1){ka=c[s+13084>>2]|0;pa=oa>>ka;ma=ma>>ka;qa=(1<<ka)+ -1|0;oa=qa&g;ma=((ma|0)==0&1)+ma|0;qa=(qa&f|0)!=0;if(!(qa|na^1)){na=(c[s+13160>>2]|0)-(X>>ka)|0;na=(pa|0)>(na|0)?na:pa;if((na|0)>0){la=0;ra=0;do{la=la|1;ra=ra+2|0}while((ra|0)<(na|0))}else la=0}if(!((da|0)!=1|qa)){ra=(c[s+13160>>2]|0)-(g>>ka)|0;pa=(pa|0)>(ra|0)?ra:pa;if((pa|0)>0){da=0;na=0;do{da=da|1;na=na+2|0}while((na|0)<(pa|0))}else da=0}na=(oa|0)!=0;if(!((Z|0)!=1|na)){oa=(c[s+13156>>2]|0)-(f>>ka)|0;oa=(ma|0)>(oa|0)?oa:ma;if((oa|0)>0){Z=0;pa=0;do{Z=Z|1;pa=pa+2|0}while((pa|0)<(oa|0))}else Z=0}if(!(na|ja^1)){ka=(c[s+13156>>2]|0)-(Y>>ka)|0;ka=(ma|0)>(ka|0)?ka:ma;if((ka|0)>0){W=0;ja=0;do{W=W|1;ja=ja+2|0}while((ja|0)<(ka|0))}else W=0}ka=w+0|0;ja=ka+128|0;do{b[ka>>1]=32896;ka=ka+2|0}while((ka|0)<(ja|0));ka=B+0|0;ja=ka+128|0;do{b[ka>>1]=32896;ka=ka+2|0}while((ka|0)<(ja|0));b[y>>1]=128;ma=W}else ma=W;ka=(M|0)!=0;if(ka){ra=b[n+(m+~d<<1)>>1]|0;b[v>>1]=ra;b[y>>1]=ra}ja=(Z|0)!=0;if(ja)fe(B|0,n+(m-d<<1)|0,k<<1|0)|0;W=(ma|0)!=0;if(W?(ha=k+1|0,fe(y+(ha<<1)|0,n+(k-d+m<<1)|0,k<<1|0)|0,fa=ke(e[n+(k+ -1-d+m+ia<<1)>>1]|0,0,65537,65537)|0,ga=D,ea=k-ia|0,(ea|0)>0):0){ia=ia+ha|0;ha=0;do{ra=y+(ia+ha<<1)|0;qa=ra;b[qa>>1]=fa;b[qa+2>>1]=fa>>>16;ra=ra+4|0;b[ra>>1]=ga;b[ra+2>>1]=ga>>>16;ha=ha+4|0}while((ha|0)<(ea|0))}ea=(da|0)!=0;if(ea&(k|0)>0){fa=m+ -1|0;ga=0;do{ra=ga;ga=ga+1|0;b[v+(ga<<1)>>1]=b[n+(fa+($(ra,d)|0)<<1)>>1]|0}while((ga|0)!=(k|0))}fa=(la|0)!=0;if(fa){ia=ca+k|0;ha=m+ -1|0;if((ca|0)>0){ga=k;do{ra=ga;ga=ga+1|0;b[v+(ga<<1)>>1]=b[n+(ha+($(ra,d)|0)<<1)>>1]|0}while((ga|0)<(ia|0))}ia=ke(e[n+(ha+($(ia+ -1|0,d)|0)<<1)>>1]|0,0,65537,65537)|0;ha=D;ga=k-ca|0;if((ga|0)>0){ca=k+1+ca|0;na=0;do{ra=v+(ca+na<<1)|0;qa=ra;b[qa>>1]=ia;b[qa+2>>1]=ia>>>16;ra=ra+4|0;b[ra>>1]=ha;b[ra+2>>1]=ha>>>16;na=na+4|0}while((na|0)<(ga|0))}}do if((a[ba>>0]|0)==1?(ra=la|da,S=(ra|0)==0,ra=ra|M,T=(ra|0)==0,(Z|ma|ra|0)!=0):0){ba=k<<1;ca=c[aa>>2]|0;if(((ba<<V)+f|0)<(ca|0))aa=ba;else aa=ca-f>>V;_=c[_>>2]|0;if(((ba<<U)+g|0)>=(_|0))ba=_-g>>U;if(!W)if((Y|0)<(ca|0))V=k;else V=ca-f>>V;else V=aa;if(!fa)if((X|0)<(_|0))U=k;else U=_-g>>U;else U=ba;X=b[y>>1]|0;if(T)b[v>>1]=X;b[v>>1]=X;if(!S){S=0;while(1)if((S|0)<(U|0))S=S+4|0;else break}if(!ea?(R=ke(X&65535|0,0,65537,65537)|0,Q=D,(k|0)>0):0){S=0;do{ra=v+((S|1)<<1)|0;qa=ra;b[qa>>1]=R;b[qa+2>>1]=R>>>16;ra=ra+4|0;b[ra>>1]=Q;b[ra+2>>1]=Q>>>16;S=S+4|0}while((S|0)<(k|0))}do if(!fa){Q=ke(e[v+(k<<1)>>1]|0,0,65537,65537)|0;S=D;if((k|0)<=0)break;T=k+1|0;R=0;do{ra=v+(T+R<<1)|0;qa=ra;b[qa>>1]=Q;b[qa+2>>1]=Q>>>16;ra=ra+4|0;b[ra>>1]=S;b[ra+2>>1]=S>>>16;R=R+4|0}while((R|0)<(k|0))}while(0);g=(g|0)==0;if((f|0)==0&(U|0)>0){f=0;do{ra=v+((f|1)<<1)|0;qa=ra;b[qa>>1]=0;b[qa+2>>1]=0>>>16;ra=ra+4|0;b[ra>>1]=0;b[ra+2>>1]=0>>>16;f=f+4|0}while((f|0)<(U|0))}b[y>>1]=b[v>>1]|0;if(g)break;else f=0;while(1)if((f|0)<(V|0))f=f+4|0;else break}while(0);a:do if(!fa){if(ea){P=ke(e[v+(k<<1)>>1]|0,0,65537,65537)|0;f=D;if((k|0)<=0){P=84;break}g=k+1|0;Q=0;while(1){ra=v+(g+Q<<1)|0;qa=ra;b[qa>>1]=P;b[qa+2>>1]=P>>>16;ra=ra+4|0;b[ra>>1]=f;b[ra+2>>1]=f>>>16;Q=Q+4|0;if((Q|0)>=(k|0)){P=84;break a}}}if(ka){f=ke(e[v>>1]|0,0,65537,65537)|0;P=D;O=k<<1;if((k|0)>0)N=0;else{P=87;break}while(1){ra=v+((N|1)<<1)|0;qa=ra;b[qa>>1]=f;b[qa+2>>1]=f>>>16;ra=ra+4|0;b[ra>>1]=P;b[ra+2>>1]=P>>>16;N=N+4|0;if((N|0)>=(O|0)){P=87;break a}}}if(ja){N=b[B>>1]|0;b[v>>1]=N;N=ke(N&65535|0,0,65537,65537)|0;O=D;M=k<<1;if((k|0)>0)P=0;else{P=89;break}while(1){ra=v+((P|1)<<1)|0;qa=ra;b[qa>>1]=N;b[qa+2>>1]=N>>>16;ra=ra+4|0;b[ra>>1]=O;b[ra+2>>1]=O>>>16;P=P+4|0;if((P|0)>=(M|0)){P=89;break a}}}if(!W){g=1<<q+ -1;b[v>>1]=g;R=ke(g&65535|0,0,65537,65537)|0;Q=D;P=k<<1;f=(k|0)>0;if(f)S=0;else{P=84;break}do{ra=y+((S|1)<<1)|0;qa=ra;b[qa>>1]=R;b[qa+2>>1]=R>>>16;ra=ra+4|0;b[ra>>1]=Q;b[ra+2>>1]=Q>>>16;S=S+4|0}while((S|0)<(P|0));g=ke(g&65535|0,0,65537,65537)|0;Q=D;if(f)f=0;else{P=84;break}while(1){ra=v+((f|1)<<1)|0;qa=ra;b[qa>>1]=g;b[qa+2>>1]=g>>>16;ra=ra+4|0;b[ra>>1]=Q;b[ra+2>>1]=Q>>>16;f=f+4|0;if((f|0)>=(P|0)){P=84;break a}}}M=y+(k+1<<1)|0;O=b[M>>1]|0;N=ke(O&65535|0,0,65537,65537)|0;L=D;K=(k|0)>0;if(K)O=0;else{b[v>>1]=O;break}do{ra=y+((O|1)<<1)|0;qa=ra;b[qa>>1]=N;b[qa+2>>1]=N>>>16;ra=ra+4|0;b[ra>>1]=L;b[ra+2>>1]=L>>>16;O=O+4|0}while((O|0)<(k|0));M=b[M>>1]|0;b[v>>1]=M;M=ke(M&65535|0,0,65537,65537)|0;L=D;N=k<<1;if(K){K=0;do{ra=v+((K|1)<<1)|0;qa=ra;b[qa>>1]=M;b[qa+2>>1]=M>>>16;ra=ra+4|0;b[ra>>1]=L;b[ra+2>>1]=L>>>16;K=K+4|0}while((K|0)<(N|0));P=92}else P=92}else P=84;while(0);if((P|0)==84)if((da|0)==0?(N=ke(e[v+(k+1<<1)>>1]|0,0,65537,65537)|0,O=D,(k|0)>0):0){P=0;do{ra=v+((P|1)<<1)|0;qa=ra;b[qa>>1]=N;b[qa+2>>1]=N>>>16;ra=ra+4|0;b[ra>>1]=O;b[ra+2>>1]=O>>>16;P=P+4|0}while((P|0)<(k|0));P=87}else P=87;if((P|0)==87)if(!M){b[v>>1]=b[w>>1]|0;P=89}else P=89;if((P|0)==89)if((Z|0)==0?(K=ke(e[v>>1]|0,0,65537,65537)|0,L=D,(k|0)>0):0){M=0;do{ra=y+((M|1)<<1)|0;qa=ra;b[qa>>1]=K;b[qa+2>>1]=K>>>16;ra=ra+4|0;b[ra>>1]=L;b[ra+2>>1]=L>>>16;M=M+4|0}while((M|0)<(k|0));P=92}else P=92;if(((P|0)==92?!W:0)?(J=ke(e[y+(k<<1)>>1]|0,0,65537,65537)|0,I=D,(k|0)>0):0){K=k+1|0;L=0;do{ra=y+(K+L<<1)|0;qa=ra;b[qa>>1]=J;b[qa+2>>1]=J>>>16;ra=ra+4|0;b[ra>>1]=I;b[ra+2>>1]=I>>>16;L=L+4|0}while((L|0)<(k|0))}I=b[v>>1]|0;b[y>>1]=I;b:do if(!(c[s+13112>>2]|0)){if(p){if((r|0)==1|(k|0)==4){u=B;break}}else if(((r|0)==1?1:(c[s+4>>2]|0)!=3)|(k|0)==4){u=B;break}ra=r+ -26|0;ra=(ra|0)>-1?ra:26-r|0;qa=r+ -10|0;qa=(qa|0)>-1?qa:10-r|0;if((((ra|0)>(qa|0)?qa:ra)|0)>(c[1576+(h+ -3<<2)>>2]|0)){J=1<<q+ -5;if((p&(a[s+13061>>0]|0)!=0&(h|0)==5?(G=I&65535,H=b[y+128>>1]|0,F=H&65535,ra=F+G-(e[y+64>>1]<<1)|0,(((ra|0)>-1?ra:0-ra|0)|0)<(J|0)):0)?(C=v+128|0,E=b[C>>1]|0,ra=(E&65535)+G-(e[v+64>>1]<<1)|0,(((ra|0)>-1?ra:0-ra|0)|0)<(J|0)):0){b[x>>1]=I;b[x+128>>1]=H;y=0;do{ra=y;y=y+1|0;b[x+(y<<1)>>1]=(($(G,63-ra|0)|0)+32+($(F,y)|0)|0)>>>6}while((y|0)!=63);y=0;while(1){x=y+1|0;b[v+(x<<1)>>1]=(($(I&65535,63-y|0)|0)+32+($(E&65535,x)|0)|0)>>>6;if((x|0)==63)break b;I=b[v>>1]|0;E=b[C>>1]|0;y=x}}C=k<<1;H=b[v+(C<<1)>>1]|0;b[A+(C<<1)>>1]=H;F=b[y+(C<<1)>>1]|0;b[x+(C<<1)>>1]=F;C=C+ -2|0;E=(C|0)>-1;if(E){G=C;while(1){ra=G+1|0;qa=H;H=b[v+(ra<<1)>>1]|0;b[A+(ra<<1)>>1]=((qa&65535)+2+((H&65535)<<1)+(e[v+(G<<1)>>1]|0)|0)>>>2;if((G|0)<=0)break;else G=G+ -1|0}}ra=((e[w>>1]|0)+2+((I&65535)<<1)+(e[B>>1]|0)|0)>>>2&65535;b[A>>1]=ra;b[x>>1]=ra;if(E)while(1){ra=C+1|0;qa=F;F=b[y+(ra<<1)>>1]|0;b[x+(ra<<1)>>1]=((qa&65535)+2+((F&65535)<<1)+(e[y+(C<<1)>>1]|0)|0)>>>2;if((C|0)<=0){w=z;break}else C=C+ -1|0}else w=z}else u=B}else u=B;while(0);if(!r){Vb(o,u,w,d,h);i=l;return}else if((r|0)==1){if((k|0)>0){j=k;q=0;do{j=(e[w+(q<<1)>>1]|0)+j+(e[u+(q<<1)>>1]|0)|0;q=q+1|0}while((q|0)!=(k|0));r=j>>h+1;s=ke(r|0,((r|0)<0)<<31>>31|0,65537,65537)|0;t=D;q=0;do{j=($(q,d)|0)+m|0;h=0;do{ra=n+(j+h<<1)|0;qa=ra;a[qa>>0]=s;a[qa+1>>0]=s>>8;a[qa+2>>0]=s>>16;a[qa+3>>0]=s>>24;ra=ra+4|0;a[ra>>0]=t;a[ra+1>>0]=t>>8;a[ra+2>>0]=t>>16;a[ra+3>>0]=t>>24;h=h+4|0}while((h|0)<(k|0));q=q+1|0}while((q|0)!=(k|0))}else r=k>>h+1;if(!(p&(k|0)<32)){i=l;return}b[o>>1]=((r<<1)+2+(e[w>>1]|0)+(e[u>>1]|0)|0)>>>2;if((k|0)<=1){i=l;return}o=(r*3|0)+2|0;p=1;do{b[n+(p+m<<1)>>1]=((e[u+(p<<1)>>1]|0)+o|0)>>>2;p=p+1|0}while((p|0)!=(k|0));p=1;do{b[n+(($(p,d)|0)+m<<1)>>1]=((e[w+(p<<1)>>1]|0)+o|0)>>>2;p=p+1|0}while((p|0)!=(k|0));i=l;return}else{if(!(c[s+13104>>2]|0))m=0;else m=(a[t+31256>>0]|0)!=0;Wb(o,u,w,d,j,r,k,m&1,q);i=l;return}}function Vb(a,c,d,f,g){a=a|0;c=c|0;d=d|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0;m=i;j=1<<g;if((j|0)<=0){i=m;return}l=j+ -1|0;h=c+(j<<1)|0;k=d+(j<<1)|0;g=g+1|0;n=0;do{o=d+(n<<1)|0;p=l-n|0;q=$(n,f)|0;n=n+1|0;r=0;do{v=$(e[o>>1]|0,l-r|0)|0;s=r;r=r+1|0;u=$(e[h>>1]|0,r)|0;t=$(e[c+(s<<1)>>1]|0,p)|0;b[a+(s+q<<1)>>1]=v+j+u+t+($(e[k>>1]|0,n)|0)>>g}while((r|0)!=(j|0))}while((n|0)!=(j|0));i=m;return}function Wb(c,f,g,h,j,k,l,m,n){c=c|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;o=i;i=i+208|0;r=o;p=a[1592+(k+ -2)>>0]|0;q=r+(l<<1)|0;s=($(p,l)|0)>>5;if((k|0)>17){u=f+ -2|0;t=k+ -11|0;if(t>>>0<15&(s|0)<-1){if((l|0)>=0){u=0;do{w=f+(u+ -1<<1)|0;y=w;y=d[y>>0]|d[y+1>>0]<<8|d[y+2>>0]<<16|d[y+3>>0]<<24;w=w+4|0;w=d[w>>0]|d[w+1>>0]<<8|d[w+2>>0]<<16|d[w+3>>0]<<24;x=r+(u+l<<1)|0;v=x;b[v>>1]=y;b[v+2>>1]=y>>>16;x=x+4|0;b[x>>1]=w;b[x+2>>1]=w>>>16;u=u+4|0}while((u|0)<=(l|0))}if((s|0)<0){t=b[1632+(t<<1)>>1]|0;do{b[r+(s+l<<1)>>1]=b[g+((($(t,s)|0)+128>>8)+ -1<<1)>>1]|0;s=s+1|0}while((s|0)!=0)}}else q=u;r=(l|0)>0;if(r){s=0;do{w=s;s=s+1|0;v=$(s,p)|0;t=v>>5;v=v&31;if(!v){t=t+1|0;v=$(w,h)|0;u=0;do{x=q+(t+u<<1)|0;z=x;z=d[z>>0]|d[z+1>>0]<<8|d[z+2>>0]<<16|d[z+3>>0]<<24;x=x+4|0;x=d[x>>0]|d[x+1>>0]<<8|d[x+2>>0]<<16|d[x+3>>0]<<24;y=c+(u+v<<1)|0;w=y;a[w>>0]=z;a[w+1>>0]=z>>8;a[w+2>>0]=z>>16;a[w+3>>0]=z>>24;y=y+4|0;a[y>>0]=x;a[y+1>>0]=x>>8;a[y+2>>0]=x>>16;a[y+3>>0]=x>>24;u=u+4|0}while((u|0)<(l|0))}else{u=32-v|0;w=$(w,h)|0;x=0;do{z=x+t|0;y=$(e[q+(z+1<<1)>>1]|0,u)|0;b[c+(x+w<<1)>>1]=(y+16+($(e[q+(z+2<<1)>>1]|0,v)|0)|0)>>>5;z=x|1;y=z+t|0;A=$(e[q+(y+1<<1)>>1]|0,u)|0;b[c+(z+w<<1)>>1]=(A+16+($(e[q+(y+2<<1)>>1]|0,v)|0)|0)>>>5;z=x|2;y=z+t|0;A=$(e[q+(y+1<<1)>>1]|0,u)|0;b[c+(z+w<<1)>>1]=(A+16+($(e[q+(y+2<<1)>>1]|0,v)|0)|0)>>>5;z=x|3;y=z+t|0;A=$(e[q+(y+1<<1)>>1]|0,u)|0;b[c+(z+w<<1)>>1]=(A+16+($(e[q+(y+2<<1)>>1]|0,v)|0)|0)>>>5;x=x+4|0}while((x|0)<(l|0))}}while((s|0)!=(l|0))}if(!((k|0)==26&(j|0)==0&(l|0)<32&(m|0)==0&r)){i=o;return}j=g+ -2|0;k=1<<n;m=0-k|0;k=k+ -1|0;n=0;do{p=((e[g+(n<<1)>>1]|0)-(e[j>>1]|0)>>1)+(e[f>>1]|0)|0;if(p&m)p=0-p>>31&k;b[c+(($(n,h)|0)<<1)>>1]=p;n=n+1|0}while((n|0)!=(l|0));i=o;return}u=g+ -2|0;t=k+ -11|0;if(t>>>0<15&(s|0)<-1){if((l|0)>=0){u=0;do{z=g+(u+ -1<<1)|0;x=z;x=d[x>>0]|d[x+1>>0]<<8|d[x+2>>0]<<16|d[x+3>>0]<<24;z=z+4|0;z=d[z>>0]|d[z+1>>0]<<8|d[z+2>>0]<<16|d[z+3>>0]<<24;A=r+(u+l<<1)|0;y=A;b[y>>1]=x;b[y+2>>1]=x>>>16;A=A+4|0;b[A>>1]=z;b[A+2>>1]=z>>>16;u=u+4|0}while((u|0)<=(l|0))}if((s|0)<0){t=b[1632+(t<<1)>>1]|0;do{b[r+(s+l<<1)>>1]=b[f+((($(t,s)|0)+128>>8)+ -1<<1)>>1]|0;s=s+1|0}while((s|0)!=0)}}else q=u;r=(l|0)>0;if(r){t=0;do{s=t;t=t+1|0;w=$(t,p)|0;x=w>>5;w=w&31;if(!w){v=x+1|0;u=0;do{b[c+(($(u,h)|0)+s<<1)>>1]=b[q+(v+u<<1)>>1]|0;u=u+1|0}while((u|0)!=(l|0))}else{v=32-w|0;u=0;do{A=u+x|0;z=$(e[q+(A+1<<1)>>1]|0,v)|0;b[c+(($(u,h)|0)+s<<1)>>1]=(z+16+($(e[q+(A+2<<1)>>1]|0,w)|0)|0)>>>5;u=u+1|0}while((u|0)!=(l|0))}}while((t|0)!=(l|0))}if(!((k|0)==10&(j|0)==0&(l|0)<32&(m|0)==0&r)){i=o;return}h=f+ -2|0;j=1<<n;n=0-j|0;j=j+ -1|0;k=0;do{m=((e[f+(k<<1)>>1]|0)-(e[h>>1]|0)>>1)+(e[g>>1]|0)|0;if(m&n)m=0-m>>31&j;b[c+(k<<1)>>1]=m;m=k|1;p=((e[f+(m<<1)>>1]|0)-(e[h>>1]|0)>>1)+(e[g>>1]|0)|0;if(p&n)p=0-p>>31&j;b[c+(m<<1)>>1]=p;m=k|2;p=((e[f+(m<<1)>>1]|0)-(e[h>>1]|0)>>1)+(e[g>>1]|0)|0;if(p&n)p=0-p>>31&j;b[c+(m<<1)>>1]=p;m=k|3;p=((e[f+(m<<1)>>1]|0)-(e[h>>1]|0)>>1)+(e[g>>1]|0)|0;if(p&n)p=0-p>>31&j;b[c+(m<<1)>>1]=p;k=k+4|0}while((k|0)<(l|0));i=o;return}function Xb(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0;g=i;h=c[e>>2]|0;if(!h){i=g;return}if(!(c[h+304>>2]|0)){i=g;return}h=e+46|0;f=(d[h>>0]|0)&(f^255)&255;a[h>>0]=f;if(f<<24>>24){i=g;return}Uc(c[b+4>>2]|0,e+4|0);c[e+24>>2]=0;i=g;return}function Yb(a){a=a|0;var b=0;b=i;Xb(a,a+2524|0,6);i=b;return}function Zb(a){a=a|0;var b=0;b=i;Xb(a,a+2524|0,-1);i=b;return}function _b(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=d+4364|0;if(((c[(c[d+2524>>2]|0)+304>>2]|0)!=0?(b[d+2568>>1]|0)==(b[h>>1]|0):0)?(c[d+2544>>2]|0)==(f|0):0){j=-1094995529;i=g;return j|0}j=d+2524|0;if(c[(c[j>>2]|0)+304>>2]|0){j=-12;i=g;return j|0}if((Tc(c[d+4>>2]|0,d+2528|0,1)|0)<0){j=-12;i=g;return j|0}k=d+200|0;m=c[k>>2]|0;c[d+2540>>2]=$(c[m+13132>>2]|0,c[m+13128>>2]|0)|0;m=d+4520|0;l=c[j>>2]|0;c[l+244>>2]=(c[m>>2]|0)==1&1;c[l+240>>2]=((c[m>>2]|0)+ -1|0)>>>0<2&1;c[e>>2]=l;c[d+2520>>2]=j;a[d+2570>>0]=(a[d+1450>>0]|0)==0?2:3;c[d+2544>>2]=f;b[d+2568>>1]=b[h>>1]|0;j=d+2552|0;f=(c[k>>2]|0)+20|0;c[j+0>>2]=c[f+0>>2];c[j+4>>2]=c[f+4>>2];c[j+8>>2]=c[f+8>>2];c[j+12>>2]=c[f+12>>2];j=0;i=g;return j|0}function $b(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;g=i;l=d+2046|0;k=d+2572|0;h=d+4366|0;n=(f|0)==0;m=d+4364|0;f=d+200|0;while(1){if((a[l>>0]|0)==1){p=d+2524|0;o=d+2570|0;if(((a[o>>0]&8)==0?(c[d+2544>>2]|0)!=(c[k>>2]|0):0)?(b[d+2568>>1]|0)==(b[h>>1]|0):0)Xb(d,p,1)}else o=d+2570|0;if(!(a[o>>0]&1))o=0;else o=(b[d+2568>>1]|0)==(b[h>>1]|0)&1;if(((n?(b[h>>1]|0)==(b[m>>1]|0):0)?(j=c[f>>2]|0,(j|0)!=0):0)?(o|0)<=(c[j+(((c[j+72>>2]|0)+ -1|0)*12|0)+80>>2]|0):0){d=0;h=21;break}if(o){h=15;break}o=b[h>>1]|0;if(o<<16>>16==(b[m>>1]|0)){d=0;h=21;break}b[h>>1]=(o&65535)+1&255}if((h|0)==15){h=d+2524|0;e=Ad(e,c[h>>2]|0)|0;if(!(a[d+2570>>0]&8))Xb(d,h,1);else Xb(d,h,9);p=(e|0)<0?e:1;i=g;return p|0}else if((h|0)==21){i=g;return d|0}return 0}function ac(){var b=0,c=0,d=0,e=0,f=0;b=i;if(!(a[1664]|0))c=0;else{i=b;return}do{d=0;do{f=($(d<<1|1,c)|0)&127;e=f>>>0>63;f=e?f+ -64|0:f;e=e?-1:1;if((f|0)>31){f=64-f|0;e=0-e|0}a[1664+(c<<5)+d>>0]=$(a[2688+f>>0]|0,e)|0;d=d+1|0}while((d|0)!=32);c=c+1|0}while((c|0)!=32);i=b;return}function bc(a,b){a=a|0;b=b|0;c[a>>2]=4;c[a+4>>2]=1;c[a+8>>2]=2;c[a+12>>2]=3;c[a+16>>2]=4;c[a+20>>2]=1;c[a+24>>2]=2;c[a+28>>2]=1;c[a+32>>2]=3;c[a+36>>2]=4;c[a+40>>2]=5;c[a+44>>2]=6;c[a+48>>2]=2;c[a+52>>2]=3;c[a+56>>2]=4;c[a+60>>2]=5;c[a+64>>2]=1;c[a+68>>2]=1;c[a+72>>2]=2;c[a+1676>>2]=5;c[a+1680>>2]=6;c[a+1684>>2]=1;c[a+1688>>2]=2;c[a+1692>>2]=5;c[a+1696>>2]=6;c[a+1700>>2]=1;c[a+1704>>2]=2;return}function cc(a,c,d,e,f,g,h){a=a|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0;j=i;c=c>>>1;if((e|0)<=0){i=j;return}k=(d|0)>0;h=h-g|0;l=0;while(1){if(k){m=0;do{b[a+(m<<1)>>1]=(_c(f,g)|0)<<h;m=m+1|0}while((m|0)!=(d|0))}l=l+1|0;if((l|0)==(e|0))break;else a=a+(c<<1)|0}i=j;return}function dc(a,c,d,f){a=a|0;c=c|0;d=d|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;d=d>>>1;f=1<<f;h=0-f|0;f=f+ -1|0;j=0;while(1){l=c;m=0;while(1){k=a+(m<<1)|0;n=(b[l>>1]|0)+(e[k>>1]|0)|0;if(n&h)n=0-n>>31&f;b[k>>1]=n;m=m+1|0;if((m|0)==4)break;else l=l+2|0}j=j+1|0;if((j|0)==4)break;else{c=c+8|0;a=a+(d<<1)|0}}i=g;return}
+
+
+
+function ec(a,c,d,f){a=a|0;c=c|0;d=d|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;d=d>>>1;f=1<<f;h=0-f|0;f=f+ -1|0;j=0;while(1){l=c;m=0;while(1){k=a+(m<<1)|0;n=(b[l>>1]|0)+(e[k>>1]|0)|0;if(n&h)n=0-n>>31&f;b[k>>1]=n;m=m+1|0;if((m|0)==8)break;else l=l+2|0}j=j+1|0;if((j|0)==8)break;else{c=c+16|0;a=a+(d<<1)|0}}i=g;return}function fc(a,c,d,f){a=a|0;c=c|0;d=d|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;d=d>>>1;f=1<<f;h=0-f|0;f=f+ -1|0;j=0;while(1){l=c;m=0;while(1){k=a+(m<<1)|0;n=(b[l>>1]|0)+(e[k>>1]|0)|0;if(n&h)n=0-n>>31&f;b[k>>1]=n;m=m+1|0;if((m|0)==16)break;else l=l+2|0}j=j+1|0;if((j|0)==16)break;else{c=c+32|0;a=a+(d<<1)|0}}i=g;return}function gc(a,c,d,f){a=a|0;c=c|0;d=d|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;d=d>>>1;f=1<<f;h=0-f|0;f=f+ -1|0;j=0;while(1){l=c;m=0;while(1){k=a+(m<<1)|0;n=(b[l>>1]|0)+(e[k>>1]|0)|0;if(n&h)n=0-n>>31&f;b[k>>1]=n;m=m+1|0;if((m|0)==32)break;else l=l+2|0}j=j+1|0;if((j|0)==32)break;else{c=c+64|0;a=a+(d<<1)|0}}i=g;return}function hc(a,c,d){a=a|0;c=c|0;d=d|0;var e=0,f=0,g=0,h=0,j=0;e=i;c=c<<16>>16;d=15-d-c|0;c=1<<c;if((d|0)>0){f=1<<d+ -1;if((c|0)>0){h=a;a=0}else{i=e;return}while(1){j=h;g=0;while(1){b[j>>1]=(b[j>>1]|0)+f>>d;g=g+1|0;if((g|0)==(c|0))break;else j=j+2|0}a=a+1|0;if((a|0)==(c|0))break;else h=h+(c<<1)|0}i=e;return}if((c|0)<=0){i=e;return}d=0-d|0;f=0;while(1){g=a;h=0;while(1){b[g>>1]=b[g>>1]<<d;h=h+1|0;if((h|0)==(c|0))break;else g=g+2|0}f=f+1|0;if((f|0)==(c|0))break;else a=a+(c<<1)|0}i=e;return}function ic(a,c,d){a=a|0;c=c|0;d=d|0;var f=0,g=0,h=0,j=0,k=0;f=i;c=1<<(c<<16>>16);if(d){d=c+ -1|0;if((d|0)<=0){i=f;return}g=(c|0)>0;h=0;do{if(g){j=0;do{k=a+(j+c<<1)|0;b[k>>1]=(e[k>>1]|0)+(e[a+(j<<1)>>1]|0);j=j+1|0}while((j|0)!=(c|0))}a=a+(c<<1)|0;h=h+1|0}while((h|0)!=(d|0));i=f;return}if((c|0)<=0){i=f;return}d=(c|0)>1;h=0;while(1){if(d){j=b[a>>1]|0;g=1;do{k=a+(g<<1)|0;j=(e[k>>1]|0)+(j&65535)&65535;b[k>>1]=j;g=g+1|0}while((g|0)!=(c|0))}h=h+1|0;if((h|0)==(c|0))break;else a=a+(c<<1)|0}i=f;return}function jc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;d=i;e=0;g=a;while(1){q=b[g>>1]|0;n=g+16|0;o=b[n>>1]|0;h=o+q|0;f=g+24|0;p=b[f>>1]|0;m=p+o|0;j=q-p|0;l=g+8|0;k=(b[l>>1]|0)*74|0;p=((q-o+p|0)*74|0)+64|0;o=p>>7;if((o+32768|0)>>>0>65535)o=p>>31^32767;b[n>>1]=o;o=(h*29|0)+64+(m*55|0)+k|0;n=o>>7;if((n+32768|0)>>>0>65535)n=o>>31^32767;b[g>>1]=n;m=($(m,-29)|0)+64+(j*55|0)+k|0;n=m>>7;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[l>>1]=n;j=(h*55|0)+64+(j*29|0)-k|0;h=j>>7;if((h+32768|0)>>>0>65535)h=j>>31^32767;b[f>>1]=h;e=e+1|0;if((e|0)==4)break;else g=g+2|0}e=20-c|0;c=1<<e+ -1;g=0;while(1){p=b[a>>1]|0;n=a+4|0;q=b[n>>1]|0;l=q+p|0;f=a+6|0;o=b[f>>1]|0;m=o+q|0;h=p-o|0;k=a+2|0;j=(b[k>>1]|0)*74|0;o=((p-q+o|0)*74|0)+c>>e;if((o+32768|0)>>>0>65535)o=o>>31^32767;b[n>>1]=o;n=(l*29|0)+c+(m*55|0)+j>>e;if((n+32768|0)>>>0>65535)n=n>>31^32767;b[a>>1]=n;m=($(m,-29)|0)+c+(h*55|0)+j>>e;if((m+32768|0)>>>0>65535)m=m>>31^32767;b[k>>1]=m;h=(l*55|0)+c+(h*29|0)-j>>e;if((h+32768|0)>>>0>65535)h=h>>31^32767;b[f>>1]=h;g=g+1|0;if((g|0)==4)break;else a=a+8|0}i=d;return}function kc(a,c,d){a=a|0;c=c|0;d=d|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;c=i;e=0;g=a;while(1){n=b[g>>1]<<6;k=g+16|0;m=b[k>>1]<<6;j=m+n|0;m=n-m|0;n=g+8|0;l=b[n>>1]|0;f=g+24|0;o=b[f>>1]|0;h=(o*36|0)+(l*83|0)|0;l=($(o,-83)|0)+(l*36|0)|0;o=j+64+h|0;p=o>>7;if((p+32768|0)>>>0>65535)p=o>>31^32767;b[g>>1]=p;p=m+64+l|0;o=p>>7;if((o+32768|0)>>>0>65535)o=p>>31^32767;b[n>>1]=o;l=m-l+64|0;m=l>>7;if((m+32768|0)>>>0>65535)m=l>>31^32767;b[k>>1]=m;j=j-h+64|0;h=j>>7;if((h+32768|0)>>>0>65535)h=j>>31^32767;b[f>>1]=h;e=e+1|0;if((e|0)==4)break;else g=g+2|0}e=20-d|0;d=1<<e+ -1;g=0;while(1){n=b[a>>1]<<6;k=a+4|0;o=b[k>>1]<<6;m=a+2|0;l=b[m>>1]|0;f=a+6|0;j=b[f>>1]|0;h=(j*36|0)+(l*83|0)|0;l=($(j,-83)|0)+(l*36|0)|0;j=o+n+d|0;p=j+h>>e;if((p+32768|0)>>>0>65535)p=p>>31^32767;b[a>>1]=p;n=n-o+d|0;o=n+l>>e;if((o+32768|0)>>>0>65535)o=o>>31^32767;b[m>>1]=o;l=n-l>>e;if((l+32768|0)>>>0>65535)l=l>>31^32767;b[k>>1]=l;h=j-h>>e;if((h+32768|0)>>>0>65535)h=h>>31^32767;b[f>>1]=h;g=g+1|0;if((g|0)==4)break;else a=a+8|0}i=c;return}function lc(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;j=i;i=i+64|0;r=j+48|0;p=j+32|0;g=j+16|0;h=j;q=(e|0)>8;s=e+4|0;k=r+4|0;l=r+8|0;m=r+12|0;o=0;s=(s|0)>8?8:s;n=d;while(1){c[p+0>>2]=0;c[p+4>>2]=0;c[p+8>>2]=0;c[p+12>>2]=0;w=(s|0)>1;t=0;do{if(w){v=p+(t<<2)|0;u=c[v>>2]|0;x=1;do{u=($(b[n+(x<<3<<1)>>1]|0,a[1664+(x<<2<<5)+t>>0]|0)|0)+u|0;x=x+2|0}while((x|0)<(s|0));c[v>>2]=u}t=t+1|0}while((t|0)!=4);w=b[n>>1]<<6;v=b[n+64>>1]<<6;x=v+w|0;v=w-v|0;w=b[n+32>>1]|0;u=b[n+96>>1]|0;t=(u*36|0)+(w*83|0)|0;w=($(u,-83)|0)+(w*36|0)|0;u=t+x|0;c[r>>2]=u;c[k>>2]=w+v;c[l>>2]=v-w;c[m>>2]=x-t;t=0;while(1){v=c[p+(t<<2)>>2]|0;w=u+64+v|0;x=w>>7;if((x+32768|0)>>>0>65535)x=w>>31^32767;b[n+(t<<3<<1)>>1]=x;v=u-v+64|0;u=v>>7;if((u+32768|0)>>>0>65535)u=v>>31^32767;b[n+(7-t<<3<<1)>>1]=u;t=t+1|0;if((t|0)==4)break;u=c[r+(t<<2)>>2]|0}if((s|0)<8)s=(o&3|0)==0&(o|0)!=0?s+ -4|0:s;o=o+1|0;if((o|0)==8)break;else n=n+2|0}k=q?8:e;l=20-f|0;m=1<<l+ -1;f=(k|0)>1;n=g+4|0;o=g+8|0;e=g+12|0;p=0;while(1){c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;t=0;do{if(f){q=h+(t<<2)|0;s=c[q>>2]|0;r=1;do{s=($(b[d+(r<<1)>>1]|0,a[1664+(r<<2<<5)+t>>0]|0)|0)+s|0;r=r+2|0}while((r|0)<(k|0));c[q>>2]=s}t=t+1|0}while((t|0)!=4);w=b[d>>1]<<6;v=b[d+8>>1]<<6;x=v+w|0;v=w-v|0;w=b[d+4>>1]|0;s=b[d+12>>1]|0;q=(s*36|0)+(w*83|0)|0;w=($(s,-83)|0)+(w*36|0)|0;s=q+x|0;c[g>>2]=s;c[n>>2]=w+v;c[o>>2]=v-w;c[e>>2]=x-q;q=0;while(1){r=c[h+(q<<2)>>2]|0;s=s+m|0;t=s+r>>l;if((t+32768|0)>>>0>65535)t=t>>31^32767;b[d+(q<<1)>>1]=t;r=s-r>>l;if((r+32768|0)>>>0>65535)r=r>>31^32767;b[d+(7-q<<1)>>1]=r;q=q+1|0;if((q|0)==4)break;s=c[g+(q<<2)>>2]|0}p=p+1|0;if((p|0)==8)break;else d=d+16|0}i=j;return}function mc(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;g=i;i=i+192|0;t=g+160|0;u=g+128|0;v=g+112|0;s=g+96|0;j=g+64|0;l=g+32|0;h=g+16|0;k=g;m=(e|0)>16;w=e+4|0;n=v+4|0;o=v+8|0;p=v+12|0;r=0;w=(w|0)>16?16:w;q=d;while(1){c[u+0>>2]=0;c[u+4>>2]=0;c[u+8>>2]=0;c[u+12>>2]=0;c[u+16>>2]=0;c[u+20>>2]=0;c[u+24>>2]=0;c[u+28>>2]=0;A=(w|0)>1;B=0;do{if(A){z=u+(B<<2)|0;x=c[z>>2]|0;y=1;do{x=($(b[q+(y<<4<<1)>>1]|0,a[1664+(y<<1<<5)+B>>0]|0)|0)+x|0;y=y+2|0}while((y|0)<(w|0));c[z>>2]=x}B=B+1|0}while((B|0)!=8);c[s+0>>2]=0;c[s+4>>2]=0;c[s+8>>2]=0;c[s+12>>2]=0;y=0;do{x=s+(y<<2)|0;A=c[x>>2]|0;z=1;do{A=($(b[q+(z<<5<<1)>>1]|0,a[1664+(z<<2<<5)+y>>0]|0)|0)+A|0;z=z+2|0}while((z|0)<8);c[x>>2]=A;y=y+1|0}while((y|0)!=4);A=b[q>>1]<<6;z=b[q+256>>1]<<6;B=z+A|0;z=A-z|0;A=b[q+128>>1]|0;x=b[q+384>>1]|0;y=(x*36|0)+(A*83|0)|0;A=($(x,-83)|0)+(A*36|0)|0;x=y+B|0;c[v>>2]=x;c[n>>2]=A+z;c[o>>2]=z-A;c[p>>2]=B-y;y=0;while(1){B=c[s+(y<<2)>>2]|0;c[t+(y<<2)>>2]=B+x;c[t+(7-y<<2)>>2]=x-B;y=y+1|0;if((y|0)==4){x=0;break}x=c[v+(y<<2)>>2]|0}do{z=c[t+(x<<2)>>2]|0;y=c[u+(x<<2)>>2]|0;B=z+64+y|0;A=B>>7;if((A+32768|0)>>>0>65535)A=B>>31^32767;b[q+(x<<4<<1)>>1]=A;y=z-y+64|0;z=y>>7;if((z+32768|0)>>>0>65535)z=y>>31^32767;b[q+(15-x<<4<<1)>>1]=z;x=x+1|0}while((x|0)!=8);if((w|0)<16)w=(r&3|0)==0&(r|0)!=0?w+ -4|0:w;r=r+1|0;if((r|0)==16)break;else q=q+2|0}m=m?16:e;f=20-f|0;n=1<<f+ -1;q=(m|0)>1;o=h+4|0;r=h+8|0;p=h+12|0;s=0;while(1){c[l+0>>2]=0;c[l+4>>2]=0;c[l+8>>2]=0;c[l+12>>2]=0;c[l+16>>2]=0;c[l+20>>2]=0;c[l+24>>2]=0;c[l+28>>2]=0;v=0;do{if(q){e=l+(v<<2)|0;u=c[e>>2]|0;t=1;do{u=($(b[d+(t<<1)>>1]|0,a[1664+(t<<1<<5)+v>>0]|0)|0)+u|0;t=t+2|0}while((t|0)<(m|0));c[e>>2]=u}v=v+1|0}while((v|0)!=8);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;e=0;do{t=k+(e<<2)|0;v=c[t>>2]|0;u=1;do{v=($(b[d+(u<<1<<1)>>1]|0,a[1664+(u<<2<<5)+e>>0]|0)|0)+v|0;u=u+2|0}while((u|0)<8);c[t>>2]=v;e=e+1|0}while((e|0)!=4);A=b[d>>1]<<6;z=b[d+16>>1]<<6;B=z+A|0;z=A-z|0;A=b[d+8>>1]|0;t=b[d+24>>1]|0;u=(t*36|0)+(A*83|0)|0;A=($(t,-83)|0)+(A*36|0)|0;t=u+B|0;c[h>>2]=t;c[o>>2]=A+z;c[r>>2]=z-A;c[p>>2]=B-u;u=0;while(1){B=c[k+(u<<2)>>2]|0;c[j+(u<<2)>>2]=B+t;c[j+(7-u<<2)>>2]=t-B;u=u+1|0;if((u|0)==4){t=0;break}t=c[h+(u<<2)>>2]|0}do{u=c[l+(t<<2)>>2]|0;v=(c[j+(t<<2)>>2]|0)+n|0;e=v+u>>f;if((e+32768|0)>>>0>65535)e=e>>31^32767;b[d+(t<<1)>>1]=e;u=v-u>>f;if((u+32768|0)>>>0>65535)u=u>>31^32767;b[d+(15-t<<1)>>1]=u;t=t+1|0}while((t|0)!=8);s=s+1|0;if((s|0)==16)break;else d=d+32|0}i=g;return}function nc(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0;g=i;i=i+320|0;h=g+256|0;n=g+192|0;o=g+160|0;s=g+128|0;u=g+112|0;t=g+96|0;m=g+64|0;k=g+32|0;j=g+16|0;l=g;q=(e|0)>32;y=e+4|0;v=u+4|0;w=u+8|0;x=u+12|0;p=0;y=(y|0)>32?32:y;r=d;while(1){z=n+0|0;A=z+64|0;do{c[z>>2]=0;z=z+4|0}while((z|0)<(A|0));z=(y|0)>1;C=0;do{if(z){B=n+(C<<2)|0;A=c[B>>2]|0;D=1;do{A=($(b[r+(D<<5<<1)>>1]|0,a[1664+(D<<5)+C>>0]|0)|0)+A|0;D=D+2|0}while((D|0)<(y|0));c[B>>2]=A}C=C+1|0}while((C|0)!=16);c[s+0>>2]=0;c[s+4>>2]=0;c[s+8>>2]=0;c[s+12>>2]=0;c[s+16>>2]=0;c[s+20>>2]=0;c[s+24>>2]=0;c[s+28>>2]=0;z=(y|0)/2|0;A=(y|0)>3;B=0;do{if(A){C=s+(B<<2)|0;D=c[C>>2]|0;E=1;do{D=($(b[r+(E<<6<<1)>>1]|0,a[1664+(E<<1<<5)+B>>0]|0)|0)+D|0;E=E+2|0}while((E|0)<(z|0));c[C>>2]=D}B=B+1|0}while((B|0)!=8);c[t+0>>2]=0;c[t+4>>2]=0;c[t+8>>2]=0;c[t+12>>2]=0;C=0;do{z=t+(C<<2)|0;B=c[z>>2]|0;A=1;do{B=($(b[r+(A<<7<<1)>>1]|0,a[1664+(A<<2<<5)+C>>0]|0)|0)+B|0;A=A+2|0}while((A|0)<8);c[z>>2]=B;C=C+1|0}while((C|0)!=4);D=b[r>>1]<<6;C=b[r+1024>>1]<<6;E=C+D|0;C=D-C|0;D=b[r+512>>1]|0;z=b[r+1536>>1]|0;A=(z*36|0)+(D*83|0)|0;D=($(z,-83)|0)+(D*36|0)|0;z=A+E|0;c[u>>2]=z;c[v>>2]=D+C;c[w>>2]=C-D;c[x>>2]=E-A;A=0;while(1){E=c[t+(A<<2)>>2]|0;c[o+(A<<2)>>2]=E+z;c[o+(7-A<<2)>>2]=z-E;A=A+1|0;if((A|0)==4){z=0;break}z=c[u+(A<<2)>>2]|0}do{D=c[o+(z<<2)>>2]|0;E=c[s+(z<<2)>>2]|0;c[h+(z<<2)>>2]=E+D;c[h+(15-z<<2)>>2]=D-E;z=z+1|0}while((z|0)!=8);z=0;do{A=c[h+(z<<2)>>2]|0;B=c[n+(z<<2)>>2]|0;D=A+64+B|0;C=D>>7;if((C+32768|0)>>>0>65535)C=D>>31^32767;b[r+(z<<5<<1)>>1]=C;A=A-B+64|0;B=A>>7;if((B+32768|0)>>>0>65535)B=A>>31^32767;b[r+(31-z<<5<<1)>>1]=B;z=z+1|0}while((z|0)!=16);if((y|0)<32)y=(p&3|0)==0&(p|0)!=0?y+ -4|0:y;p=p+1|0;if((p|0)==32)break;else r=r+2|0}o=q?32:e;e=20-f|0;s=1<<e+ -1;f=(o|0)>1;r=(o|0)/2|0;q=(o|0)>3;p=j+4|0;t=j+8|0;u=j+12|0;v=0;while(1){z=n+0|0;A=z+64|0;do{c[z>>2]=0;z=z+4|0}while((z|0)<(A|0));z=0;do{if(f){w=n+(z<<2)|0;y=c[w>>2]|0;x=1;do{y=($(b[d+(x<<1)>>1]|0,a[1664+(x<<5)+z>>0]|0)|0)+y|0;x=x+2|0}while((x|0)<(o|0));c[w>>2]=y}z=z+1|0}while((z|0)!=16);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;c[k+16>>2]=0;c[k+20>>2]=0;c[k+24>>2]=0;c[k+28>>2]=0;z=0;do{if(q){w=k+(z<<2)|0;y=c[w>>2]|0;x=1;do{E=x<<1;y=($(b[d+(E<<1)>>1]|0,a[1664+(E<<5)+z>>0]|0)|0)+y|0;x=x+2|0}while((x|0)<(r|0));c[w>>2]=y}z=z+1|0}while((z|0)!=8);c[l+0>>2]=0;c[l+4>>2]=0;c[l+8>>2]=0;c[l+12>>2]=0;w=0;do{x=l+(w<<2)|0;z=c[x>>2]|0;y=1;do{E=y<<2;z=($(b[d+(E<<1)>>1]|0,a[1664+(E<<5)+w>>0]|0)|0)+z|0;y=y+2|0}while((y|0)<8);c[x>>2]=z;w=w+1|0}while((w|0)!=4);D=b[d>>1]<<6;C=b[d+32>>1]<<6;E=C+D|0;C=D-C|0;D=b[d+16>>1]|0;w=b[d+48>>1]|0;x=(w*36|0)+(D*83|0)|0;D=($(w,-83)|0)+(D*36|0)|0;w=x+E|0;c[j>>2]=w;c[p>>2]=D+C;c[t>>2]=C-D;c[u>>2]=E-x;x=0;while(1){E=c[l+(x<<2)>>2]|0;c[m+(x<<2)>>2]=E+w;c[m+(7-x<<2)>>2]=w-E;x=x+1|0;if((x|0)==4){w=0;break}w=c[j+(x<<2)>>2]|0}do{D=c[m+(w<<2)>>2]|0;E=c[k+(w<<2)>>2]|0;c[h+(w<<2)>>2]=E+D;c[h+(15-w<<2)>>2]=D-E;w=w+1|0}while((w|0)!=8);w=0;do{x=c[n+(w<<2)>>2]|0;y=(c[h+(w<<2)>>2]|0)+s|0;z=y+x>>e;if((z+32768|0)>>>0>65535)z=z>>31^32767;b[d+(w<<1)>>1]=z;x=y-x>>e;if((x+32768|0)>>>0>65535)x=x>>31^32767;b[d+(31-w<<1)>>1]=x;w=w+1|0}while((w|0)!=16);v=v+1|0;if((v|0)==32)break;else d=d+64|0}i=g;return}function oc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0;d=i;c=14-c|0;c=((b[a>>1]|0)+1>>1)+(1<<c+ -1)>>c&65535;e=0;do{f=e<<2;b[a+(f<<1)>>1]=c;b[a+((f|1)<<1)>>1]=c;b[a+((f|2)<<1)>>1]=c;b[a+((f|3)<<1)>>1]=c;e=e+1|0}while((e|0)!=4);i=d;return}function pc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0;d=i;c=14-c|0;c=((b[a>>1]|0)+1>>1)+(1<<c+ -1)>>c&65535;e=0;do{f=e<<3;b[a+(f<<1)>>1]=c;b[a+((f|1)<<1)>>1]=c;b[a+((f|2)<<1)>>1]=c;b[a+((f|3)<<1)>>1]=c;b[a+((f|4)<<1)>>1]=c;b[a+((f|5)<<1)>>1]=c;b[a+((f|6)<<1)>>1]=c;b[a+((f|7)<<1)>>1]=c;e=e+1|0}while((e|0)!=8);i=d;return}function qc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0;d=i;e=14-c|0;e=((b[a>>1]|0)+1>>1)+(1<<e+ -1)>>e&65535;c=0;do{f=c<<4;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=16);c=c+1|0}while((c|0)!=16);i=d;return}function rc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0;d=i;e=14-c|0;e=((b[a>>1]|0)+1>>1)+(1<<e+ -1)>>e&65535;c=0;do{f=c<<5;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=32);c=c+1|0}while((c|0)!=32);i=d;return}function sc(a,f,g,h,j,k,l,m,n,o){a=a|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;var p=0,q=0,r=0,s=0,t=0;p=i;i=i+128|0;k=p;r=k+0|0;q=r+128|0;do{c[r>>2]=0;r=r+4|0}while((r|0)<(q|0));t=d[j+n+96>>0]|0;g=g>>>1;c[k+((t&31)<<2)>>2]=b[j+(n*10|0)+114>>1];c[k+((t+1&31)<<2)>>2]=b[j+(n*10|0)+116>>1];c[k+((t+2&31)<<2)>>2]=b[j+(n*10|0)+118>>1];c[k+((t+3&31)<<2)>>2]=b[j+(n*10|0)+120>>1];j=o+ -5|0;h=h>>>1;if((m|0)<=0){i=p;return}n=(l|0)>0;o=1<<o;q=0-o|0;o=o+ -1|0;r=0;while(1){if(n){s=0;do{t=e[f+(s<<1)>>1]|0;t=t+(c[k+(t>>>j<<2)>>2]|0)|0;if(t&q)t=0-t>>31&o;b[a+(s<<1)>>1]=t;s=s+1|0}while((s|0)!=(l|0))}r=r+1|0;if((r|0)==(m|0))break;else{a=a+(g<<1)|0;f=f+(h<<1)|0}}i=p;return}function tc(a,d,f,g,h,j,k,l,m,n,o,p,q){a=a|0;d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;var r=0,s=0,t=0,u=0,v=0,w=0;n=i;o=h+(m*10|0)+112|0;r=c[h+(m<<2)+100>>2]|0;f=f>>>1;g=g>>>1;if((r|0)!=1){if(c[j>>2]|0){u=b[o>>1]|0;if((l|0)>0){p=1<<q;t=0-p|0;p=p+ -1|0;s=0;do{v=(e[d+(($(s,g)|0)<<1)>>1]|0)+u|0;if(v&t)v=0-v>>31&p;b[a+(($(s,f)|0)<<1)>>1]=v;s=s+1|0}while((s|0)!=(l|0));p=1}else p=1}else p=0;if(c[j+8>>2]|0){s=b[o>>1]|0;k=k+ -1|0;if((l|0)>0){v=1<<q;u=0-v|0;v=v+ -1|0;t=0;do{w=(e[d+(($(t,g)|0)+k<<1)>>1]|0)+s|0;if(w&u)w=0-w>>31&v;b[a+(($(t,f)|0)+k<<1)>>1]=w;t=t+1|0}while((t|0)!=(l|0))}}if(!r){u=l;v=p;w=0;t=k;Bc(a,d,f,g,h,t,u,m,v,w,q);i=n;return}}else p=0;if(c[j+4>>2]|0){r=b[o>>1]|0;if((p|0)<(k|0)){s=1<<q;t=0-s|0;s=s+ -1|0;u=p;do{v=(e[d+(u<<1)>>1]|0)+r|0;if(v&t)v=0-v>>31&s;b[a+(u<<1)>>1]=v;u=u+1|0}while((u|0)!=(k|0));r=1}else r=1}else r=0;if(!(c[j+12>>2]|0)){u=l;v=p;w=r;t=k;Bc(a,d,f,g,h,t,u,m,v,w,q);i=n;return}j=b[o>>1]|0;t=l+ -1|0;o=$(t,f)|0;u=$(t,g)|0;if((p|0)>=(k|0)){u=t;v=p;w=r;t=k;Bc(a,d,f,g,h,t,u,m,v,w,q);i=n;return}s=1<<q;l=0-s|0;s=s+ -1|0;v=p;do{w=(e[d+(v+u<<1)>>1]|0)+j|0;if(w&l)w=0-w>>31&s;b[a+(v+o<<1)>>1]=w;v=v+1|0}while((v|0)!=(k|0));Bc(a,d,f,g,h,k,t,m,p,r,q);i=n;return}function uc(d,f,g,h,j,k,l,m,n,o,p,q,r){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;r=r|0;var s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0;t=i;D=j+(n*10|0)+112|0;C=c[j+(n<<2)+100>>2]|0;g=g>>>1;h=h>>>1;B=(C|0)!=1;if(B){if(c[k>>2]|0){I=b[D>>1]|0;if((m|0)>0){H=1<<r;G=0-H|0;H=H+ -1|0;F=0;do{J=(e[f+(($(F,h)|0)<<1)>>1]|0)+I|0;if(J&G)J=0-J>>31&H;b[d+(($(F,g)|0)<<1)>>1]=J;F=F+1|0}while((F|0)!=(m|0));F=1}else F=1}else F=0;if(c[k+8>>2]|0){G=b[D>>1]|0;l=l+ -1|0;if((m|0)>0){J=1<<r;I=0-J|0;J=J+ -1|0;H=0;do{K=(e[f+(($(H,h)|0)+l<<1)>>1]|0)+G|0;if(K&I)K=0-K>>31&J;b[d+(($(H,g)|0)+l<<1)>>1]=K;H=H+1|0}while((H|0)!=(m|0))}}if(!C){D=1;G=0}else E=15}else{F=0;E=15}if((E|0)==15){if(c[k+4>>2]|0){H=b[D>>1]|0;if((F|0)<(l|0)){I=1<<r;G=0-I|0;I=I+ -1|0;E=F;do{J=(e[f+(E<<1)>>1]|0)+H|0;if(J&G)J=0-J>>31&I;b[d+(E<<1)>>1]=J;E=E+1|0}while((E|0)!=(l|0));G=1}else G=1}else G=0;if(c[k+12>>2]|0){D=b[D>>1]|0;m=m+ -1|0;I=$(m,g)|0;H=$(m,h)|0;if((F|0)<(l|0)){K=1<<r;J=0-K|0;K=K+ -1|0;E=F;do{L=(e[f+(E+H<<1)>>1]|0)+D|0;if(L&J)L=0-L>>31&K;b[d+(E+I<<1)>>1]=L;E=E+1|0}while((E|0)!=(l|0));D=0}else D=0}else D=0}Bc(d,f,g,h,j,l,m,n,F,G,r);r=(C|0)==2;if((a[q>>0]|0)==0&r?(c[k>>2]|0)==0:0)j=(c[k+4>>2]|0)==0;else j=0;H=j&1;j=q+1|0;C=(C|0)==3;if((a[j>>0]|0)==0&C?(c[k+4>>2]|0)==0:0)n=(c[k+8>>2]|0)==0;else n=0;I=n&1;n=q+2|0;if((a[n>>0]|0)==0&r?(c[k+8>>2]|0)==0:0)E=(c[k+12>>2]|0)==0;else E=0;J=E&1;E=q+3|0;if((a[E>>0]|0)==0&C?(c[k>>2]|0)==0:0)k=(c[k+12>>2]|0)==0;else k=0;k=k&1;B=B^1;if(!((a[o>>0]|0)==0|B)?(A=H+G|0,z=m-k|0,(A|0)<(z|0)):0)do{b[d+(($(A,g)|0)<<1)>>1]=b[f+(($(A,h)|0)<<1)>>1]|0;A=A+1|0}while((A|0)!=(z|0));if(!((a[o+1>>0]|0)==0|B)?(y=I+G|0,x=m-J|0,(y|0)<(x|0)):0){z=l+ -1|0;do{b[d+(z+($(y,g)|0)<<1)>>1]=b[f+(z+($(y,h)|0)<<1)>>1]|0;y=y+1|0}while((y|0)!=(x|0))}if(!((a[p>>0]|0)==0|D)?(w=H+F|0,v=l-I|0,(w|0)<(v|0)):0)do{b[d+(w<<1)>>1]=b[f+(w<<1)>>1]|0;w=w+1|0}while((w|0)!=(v|0));if(!((a[p+1>>0]|0)==0|D)?(u=k+F|0,s=l-J|0,(u|0)<(s|0)):0){v=m+ -1|0;p=$(v,h)|0;v=$(v,g)|0;do{b[d+(u+v<<1)>>1]=b[f+(u+p<<1)>>1]|0;u=u+1|0}while((u|0)!=(s|0))}if((a[q>>0]|0)!=0&r)b[d>>1]=b[f>>1]|0;if((a[j>>0]|0)!=0&C){L=l+ -1|0;b[d+(L<<1)>>1]=b[f+(L<<1)>>1]|0}if((a[n>>0]|0)!=0&r){L=m+ -1|0;K=l+ -1|0;b[d+(K+($(L,g)|0)<<1)>>1]=b[f+(K+($(L,h)|0)<<1)>>1]|0}if(!((a[E>>0]|0)!=0&C)){i=t;return}L=m+ -1|0;b[d+(($(L,g)|0)<<1)>>1]=b[f+(($(L,h)|0)<<1)>>1]|0;i=t;return}function vc(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0;h=i;Ac(a,b,2,c,d,e,f,g);i=h;return}function wc(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0;h=i;Ac(a,2,b,c,d,e,f,g);i=h;return}function xc(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;zc(a,b,2,c,d,e,f);i=g;return}function yc(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;zc(a,2,b,c,d,e,f);i=g;return}function zc(d,f,g,h,j,k,l){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;m=i;f=f>>>1;p=g>>>1;o=l+ -8|0;g=$(f,-2)|0;n=0-f|0;r=1<<l;q=0-r|0;r=r+ -1|0;l=0;while(1){s=c[h+(l<<2)>>2]<<o;if((s|0)>=1){v=0-s|0;u=(a[j+l>>0]|0)==0;t=(a[k+l>>0]|0)==0;x=0;w=d;while(1){y=w+(n<<1)|0;B=e[y>>1]|0;z=e[w>>1]|0;A=(e[w+(g<<1)>>1]|0)+4-(e[w+(f<<1)>>1]|0)+(z-B<<2)>>3;if((A|0)<(v|0))A=v;else A=(A|0)>(s|0)?s:A;if(u){B=A+B|0;if(B&q)B=0-B>>31&r;b[y>>1]=B}if(t){y=z-A|0;if(y&q)y=0-y>>31&r;b[w>>1]=y}x=x+1|0;if((x|0)==4)break;else w=w+(p<<1)|0}}l=l+1|0;if((l|0)==2)break;else d=d+(p<<2<<1)|0}i=m;return}function Ac(d,f,g,h,j,k,l,m){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0;n=i;f=f>>>1;g=g>>>1;o=m+ -8|0;y=h<<o;r=$(f,-3)|0;s=$(f,-2)|0;w=0-f|0;h=f<<1;C=g*3|0;A=C+r|0;B=C+s|0;z=C-f|0;D=C+h|0;E=C+f|0;x=y>>3;u=y>>2;q=$(f,-4)|0;p=f*3|0;v=C+q|0;F=(g+f|0)*3|0;t=(y>>1)+y>>3;H=1<<m;G=0-H|0;H=H+ -1|0;m=g<<2;J=g<<2;I=0;do{Y=b[d+(r<<1)>>1]|0;X=b[d+(s<<1)>>1]|0;W=b[d+(w<<1)>>1]|0;da=W&65535;R=(Y&65535)-((X&65535)<<1)+da|0;R=(R|0)>-1?R:0-R|0;T=b[d+(h<<1)>>1]|0;U=b[d+(f<<1)>>1]|0;V=b[d>>1]|0;M=V&65535;S=(T&65535)-((U&65535)<<1)+M|0;S=(S|0)>-1?S:0-S|0;ba=e[d+(z<<1)>>1]|0;Q=(e[d+(A<<1)>>1]|0)-((e[d+(B<<1)>>1]|0)<<1)+ba|0;Q=(Q|0)>-1?Q:0-Q|0;ca=e[d+(C<<1)>>1]|0;Z=(e[d+(D<<1)>>1]|0)-((e[d+(E<<1)>>1]|0)<<1)+ca|0;Z=(Z|0)>-1?Z:0-Z|0;L=S+R|0;aa=Z+Q|0;K=c[j+(I<<2)>>2]<<o;_=a[k+I>>0]|0;P=a[l+I>>0]|0;do if((aa+L|0)<(y|0)){N=(K*5|0)+1>>1;ea=(e[d+(q<<1)>>1]|0)-da|0;O=b[d+(p<<1)>>1]|0;fa=(O&65535)-M|0;if((((((((fa|0)>-1?fa:0-fa|0)+((ea|0)>-1?ea:0-ea|0)|0)<(x|0)?(fa=da-M|0,(((fa|0)>-1?fa:0-fa|0)|0)<(N|0)):0)?(fa=(e[d+(v<<1)>>1]|0)-ba|0,ea=(e[d+(F<<1)>>1]|0)-ca|0,(((ea|0)>-1?ea:0-ea|0)+((fa|0)>-1?fa:0-fa|0)|0)<(x|0)):0)?(fa=ba-ca|0,(((fa|0)>-1?fa:0-fa|0)|0)<(N|0)):0)?(L<<1|0)<(u|0):0)?(aa<<1|0)<(u|0):0){K=K<<1;L=_<<24>>24==0;M=0-K|0;N=P<<24>>24==0;ba=O;P=1;O=d;while(1){Z=O+(r<<1)|0;Y=Y&65535;_=O+(s<<1)|0;X=X&65535;aa=O+(w<<1)|0;R=W&65535;V=V&65535;W=O+(f<<1)|0;U=U&65535;Q=O+(h<<1)|0;S=T&65535;T=ba&65535;if(L){ba=e[O+(q<<1)>>1]|0;ca=(Y+4+U+(R+X+V<<1)>>3)-R|0;if((ca|0)<(M|0))ca=M;else ca=(ca|0)>(K|0)?K:ca;b[aa>>1]=ca+R;aa=((Y+2+X+R+V|0)>>>2)-X|0;if((aa|0)<(M|0))aa=M;else aa=(aa|0)>(K|0)?K:aa;b[_>>1]=aa+X;_=((Y*3|0)+4+X+R+V+(ba<<1)>>3)-Y|0;if((_|0)<(M|0))_=M;else _=(_|0)>(K|0)?K:_;b[Z>>1]=_+Y}if(N){X=(X+4+S+(V+R+U<<1)>>3)-V|0;if((X|0)<(M|0))X=M;else X=(X|0)>(K|0)?K:X;b[O>>1]=X+V;X=((R+2+V+U+S|0)>>>2)-U|0;if((X|0)<(M|0))X=M;else X=(X|0)>(K|0)?K:X;b[W>>1]=X+U;R=(R+4+V+U+(S*3|0)+(T<<1)>>3)-S|0;if((R|0)<(M|0))R=M;else R=(R|0)>(K|0)?K:R;b[Q>>1]=R+S}Q=O+(g<<1)|0;if((P|0)==4)break;Y=b[O+(g+r<<1)>>1]|0;X=b[O+(g+s<<1)>>1]|0;W=b[O+(g-f<<1)>>1]|0;V=b[Q>>1]|0;U=b[O+(g+f<<1)>>1]|0;T=b[O+(g+h<<1)>>1]|0;ba=b[O+(g+p<<1)>>1]|0;P=P+1|0;O=Q}d=d+(J<<1)|0;break}L=K>>1;N=K*10|0;M=0-K|0;O=_<<24>>24!=0;P=P<<24>>24!=0;Q=(Q+R|0)<(t|0)&(O^1);R=0-L|0;S=(Z+S|0)<(t|0)&(P^1);_=V;ba=U;U=1;V=d;while(1){aa=Y&65535;Y=V+(s<<1)|0;Z=X&65535;da=V+(w<<1)|0;ca=W&65535;_=_&65535;W=V+(f<<1)|0;X=ba&65535;T=T&65535;ba=((_-ca|0)*9|0)+8+($(X-Z|0,-3)|0)>>4;if((((ba|0)>-1?ba:0-ba|0)|0)<(N|0)){if((ba|0)<(M|0))ba=M;else ba=(ba|0)>(K|0)?K:ba;if(!O){ea=ba+ca|0;if(ea&G)ea=0-ea>>31&H;b[da>>1]=ea}if(!P){da=_-ba|0;if(da&G)da=0-da>>31&H;b[V>>1]=da}if(Q){aa=((aa+1+ca|0)>>>1)-Z+ba>>1;if((aa|0)<(R|0))aa=R;else aa=(aa|0)>(L|0)?L:aa;Z=aa+Z|0;if(Z&G)Z=0-Z>>31&H;b[Y>>1]=Z}if(S){T=((_+1+T|0)>>>1)-X-ba>>1;if((T|0)<(R|0))T=R;else T=(T|0)>(L|0)?L:T;T=T+X|0;if(T&G)T=0-T>>31&H;b[W>>1]=T}}Z=V+(g<<1)|0;if((U|0)==4)break;Y=b[V+(g+r<<1)>>1]|0;X=b[V+(g+s<<1)>>1]|0;W=b[V+(g-f<<1)>>1]|0;_=b[Z>>1]|0;ba=b[V+(g+f<<1)>>1]|0;T=b[V+(g+h<<1)>>1]|0;U=U+1|0;V=Z}d=d+(J<<1)|0}else d=d+(m<<1)|0;while(0);I=I+1|0}while((I|0)!=2);i=n;return}function Bc(e,f,g,h,j,k,l,m,n,o,p){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;var q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0;t=i;x=c[j+(m<<2)+100>>2]|0;r=a[2728+(x<<2)>>0]|0;q=a[2730+(x<<2)>>0]|0;if((o|0)>=(l|0)){i=t;return}s=(n|0)<(k|0);v=1<<p;u=0-v|0;v=v+ -1|0;w=o;p=$((a[2729+(x<<2)>>0]|0)+o|0,h)|0;y=$((a[2731+(x<<2)>>0]|0)+o|0,h)|0;x=$(o,g)|0;z=$(o,h)|0;while(1){if(s){o=p+r|0;A=y+q|0;B=n;do{C=b[f+(B+z<<1)>>1]|0;D=b[f+(o+B<<1)>>1]|0;if((C&65535)>(D&65535))D=3;else D=((C<<16>>16!=D<<16>>16)<<31>>31)+2|0;E=b[f+(A+B<<1)>>1]|0;if((C&65535)>(E&65535))E=1;else E=(C<<16>>16!=E<<16>>16)<<31>>31;C=(b[j+(m*10|0)+(d[2720+(E+D)>>0]<<1)+112>>1]|0)+(C&65535)|0;if(C&u)C=0-C>>31&v;b[e+(B+x<<1)>>1]=C;B=B+1|0}while((B|0)!=(k|0))}w=w+1|0;if((w|0)==(l|0))break;else{p=p+h|0;y=y+h|0;x=x+g|0;z=z+h|0}}i=t;return}function Cc(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0;j=i;k=c[b+136>>2]|0;l=(c[b+200>>2]|0)+13080|0;r=(1<<c[l>>2])+ -1|0;o=r&e;n=r&f;q=(n|0)!=0|(a[k+309>>0]|0)!=0;m=q&1;c[k+31296>>2]=m;p=(o|0)!=0|(a[k+308>>0]|0)!=0;b=p&1;c[k+31292>>2]=b;if(!(r&(f|e)))p=d[k+311>>0]|0;else p=p&q&1;c[k+31300>>2]=p;if((o+g|0)==(1<<c[l>>2]|0))m=(a[k+310>>0]|0)!=0&(n|0)==0&1;c[k+31308>>2]=m;if(!m){q=0;q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}q=(g+e|0)<(c[k+312>>2]|0);q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}function Dc(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0;f=i;i=i+16|0;e=f;h=c[b+136>>2]|0;j=h+204|0;k=td(13196)|0;c[e>>2]=k;if(!k){u=-12;i=f;return u|0}k=c[k+4>>2]|0;m=td(468)|0;if(!m){u=-12;i=f;return u|0}l=c[m+4>>2]|0;c[l+4>>2]=1;o=l+8|0;c[o>>2]=1;a[l>>0]=0;c[l+348>>2]=1;q=l+352|0;n=l+380|0;r=l+408|0;p=0;do{c[q+(p<<2)>>2]=1;c[n+(p<<2)>>2]=0;c[r+(p<<2)>>2]=-1;p=p+1|0}while((p|0)<(c[o>>2]|0));c[l+436>>2]=0;c[l+440>>2]=1;a[l+444>>0]=0;p=b+208|0;vd(p);c[p>>2]=m;c[k>>2]=0;p=k+72|0;c[p>>2]=1;u=_c(j,8)|0;q=k+4|0;c[q>>2]=u;do if((u|0)<=3){a[k+8>>0]=0;o=k+13120|0;c[o>>2]=cd(j,32)|0;m=cd(j,32)|0;n=k+13124|0;c[n>>2]=m;m=Qc(c[o>>2]|0,m,0,c[b+4>>2]|0)|0;if((m|0)>=0){l=k+52|0;c[l>>2]=(_c(j,8)|0)+8;q=c[q>>2]|0;if((q|0)==1){c[k+60>>2]=54;q=54}else if(!q){c[k+60>>2]=32;q=32}else if((q|0)==2){c[k+60>>2]=56;q=56}else{c[k+60>>2]=58;q=58}c[k+56>>2]=1;q=Bd(q)|0;if(q){c[k+13180>>2]=0;c[k+13168>>2]=0;u=d[q+5>>0]|0;c[k+13172>>2]=u;c[k+13176>>2]=u;u=d[q+6>>0]|0;c[k+13184>>2]=u;c[k+13188>>2]=u;c[k+64>>2]=8;if((c[p>>2]|0)>0){q=k+76|0;r=0;do{c[q+(r*12|0)>>2]=1;c[q+(r*12|0)+4>>2]=0;c[q+(r*12|0)+8>>2]=-1;r=r+1|0}while((r|0)<(c[p>>2]|0))}t=(dd(j)|0)+3|0;u=k+13064|0;c[u>>2]=t;t=1<<t;s=t+ -1|0;t=0-t|0;c[o>>2]=s+(c[o>>2]|0)&t;c[n>>2]=s+(c[n>>2]|0)&t;t=k+13068|0;c[t>>2]=dd(j)|0;s=k+13072|0;c[s>>2]=(dd(j)|0)+2;p=dd(j)|0;q=c[s>>2]|0;r=k+13076|0;c[r>>2]=q+p;if(q>>>0<(c[u>>2]|0)>>>0){v=dd(j)|0;p=k+13092|0;c[p>>2]=v;q=k+13088|0;c[q>>2]=v;a[k+12940>>0]=1;a[k+12941>>0]=bd(j)|0;v=bd(j)|0;c[k+68>>2]=v;if(v){v=k+13044|0;a[v>>0]=(_c(j,4)|0)+1;a[k+13045>>0]=(_c(j,4)|0)+1;w=(dd(j)|0)+3|0;c[k+13048>>2]=w;c[k+13052>>2]=w+(dd(j)|0);if((d[v>>0]|0|0)>(c[l>>2]|0)){m=-1094995529;break}a[k+13056>>0]=bd(j)|0}c[k+2184>>2]=0;a[k+12942>>0]=0;a[k+13060>>0]=1;a[k+13061>>0]=bd(j)|0;c[k+160>>2]=0;c[k+164>>2]=1;if((bd(j)|0)!=0?(w=bd(j)|0,ad(j,7),(w|0)!=0):0){c[k+13096>>2]=bd(j)|0;c[k+13100>>2]=bd(j)|0;c[k+13104>>2]=bd(j)|0;c[k+13108>>2]=bd(j)|0;bd(j)|0;c[k+13112>>2]=bd(j)|0;bd(j)|0;c[k+13116>>2]=bd(j)|0;bd(j)|0}j=c[o>>2]|0;c[k+12>>2]=j;o=c[n>>2]|0;c[k+16>>2]=o;u=c[u>>2]|0;w=(c[t>>2]|0)+u|0;c[k+13080>>2]=w;t=u+ -1|0;c[k+13084>>2]=t;n=1<<w;v=j+ -1+n>>w;c[k+13128>>2]=v;n=o+ -1+n>>w;c[k+13132>>2]=n;c[k+13136>>2]=$(n,v)|0;c[k+13140>>2]=j>>u;c[k+13144>>2]=o>>u;v=c[s>>2]|0;c[k+13148>>2]=j>>v;c[k+13152>>2]=o>>v;c[k+13156>>2]=j>>t;c[k+13160>>2]=o>>t;v=w-v|0;c[k+13164>>2]=(1<<v)+ -1;c[k+13192>>2]=((c[l>>2]|0)*6|0)+ -48;u=(1<<u)+ -1|0;if((((((u&j|0)==0?!((o&u|0)!=0|w>>>0>6):0)?(c[q>>2]|0)>>>0<=v>>>0:0)?(c[p>>2]|0)>>>0<=v>>>0:0)?(c[r>>2]|0)>>>0<=(w>>>0>5?5:w)>>>0:0)?((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)>=0:0){h=b+272|0;j=c[h>>2]|0;if((j|0)!=0?(w=c[e>>2]|0,(Yd(c[j+4>>2]|0,c[w+4>>2]|0,c[w+8>>2]|0)|0)==0):0){vd(e);w=0;i=f;return w|0}else j=0;do{k=b+(j<<2)+400|0;l=c[k>>2]|0;do if(l){if(c[c[l+4>>2]>>2]|0)break;vd(k)}while(0);j=j+1|0}while((j|0)!=256);j=c[h>>2]|0;do if((j|0)!=0?(g=b+200|0,(c[g>>2]|0)==(c[j+4>>2]|0)):0){v=b+1424|0;vd(v);w=ud(c[h>>2]|0)|0;c[v>>2]=w;if(w)break;c[g>>2]=0}while(0);vd(h);c[h>>2]=c[e>>2];w=0;i=f;return w|0}}else m=-1094995529}else m=-22}}else m=-1094995529;while(0);vd(e);w=m;i=f;return w|0}function Ec(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0;f=i;i=i+16|0;e=f+4|0;j=f;l=b+136|0;g=c[l>>2]|0;n=g+204|0;h=md(1692)|0;c[j>>2]=h;if(!h){I=-12;i=f;return I|0}I=qd(h,1692,6,0,0)|0;c[e>>2]=I;if(!I){jd(j);I=-12;i=f;return I|0}a[(c[j>>2]|0)+53>>0]=1;h=c[j>>2]|0;c[h+44>>2]=1;c[h+48>>2]=1;a[h+52>>0]=1;a[(c[j>>2]|0)+57>>0]=0;h=c[j>>2]|0;c[h+60>>2]=0;c[h+64>>2]=0;a[h+1629>>0]=2;h=dd(n)|0;a:do if((h>>>0<=255?(k=dd(n)|0,c[c[j>>2]>>2]=k,k>>>0<=31):0)?(m=c[b+(k<<2)+272>>2]|0,(m|0)!=0):0){k=c[m+4>>2]|0;I=(bd(n)|0)&255;a[(c[j>>2]|0)+41>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+39>>0]=I;I=_c(n,3)|0;c[(c[j>>2]|0)+1624>>2]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+4>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+5>>0]=I;I=(dd(n)|0)+1|0;c[(c[j>>2]|0)+8>>2]=I;I=(dd(n)|0)+1|0;c[(c[j>>2]|0)+12>>2]=I;I=ed(n)|0;c[(c[j>>2]|0)+16>>2]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+20>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+21>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+22>>0]=I;I=c[j>>2]|0;c[I+24>>2]=0;if(a[I+22>>0]|0){I=dd(n)|0;c[(c[j>>2]|0)+24>>2]=I}I=ed(n)|0;c[(c[j>>2]|0)+28>>2]=I;if((I+12|0)>>>0<=24?(I=ed(n)|0,c[(c[j>>2]|0)+32>>2]=I,(I+12|0)>>>0<=24):0){I=(bd(n)|0)&255;a[(c[j>>2]|0)+36>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+37>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+38>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+40>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+42>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+43>>0]=I;if(a[(c[j>>2]|0)+42>>0]|0){m=(dd(n)|0)+1|0;c[(c[j>>2]|0)+44>>2]=m;m=(dd(n)|0)+1|0;o=c[j>>2]|0;c[o+48>>2]=m;o=c[o+44>>2]|0;if(!o){b=-1094995529;break}if((m|0)==0?1:(o|0)>=(c[k+13120>>2]|0)){b=-1094995529;break}if((m|0)>=(c[k+13124>>2]|0)){b=-1094995529;break}m=od(o,4)|0;c[(c[j>>2]|0)+1648>>2]=m;m=od(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=m;m=c[j>>2]|0;if(!(c[m+1648>>2]|0)){b=-12;break}if(!(c[m+1652>>2]|0)){b=-12;break}p=(bd(n)|0)&255;a[(c[j>>2]|0)+52>>0]=p;p=c[j>>2]|0;if(!(a[p+52>>0]|0)){q=(c[p+44>>2]|0)+ -1|0;if((q|0)>0){o=0;m=0;r=0;do{q=(dd(n)|0)+1|0;p=c[j>>2]|0;c[(c[p+1648>>2]|0)+(r<<2)>>2]=q;o=ae(q|0,0,o|0,m|0)|0;m=D;r=r+1|0;q=(c[p+44>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=0;o=0}r=c[k+13128>>2]|0;s=((r|0)<0)<<31>>31;if(!(m>>>0<s>>>0|(m|0)==(s|0)&o>>>0<r>>>0)){b=-1094995529;break}I=$d(r|0,s|0,o|0,m|0)|0;c[(c[p+1648>>2]|0)+(q<<2)>>2]=I;q=(c[p+48>>2]|0)+ -1|0;if((q|0)>0){p=0;o=0;r=0;do{q=(dd(n)|0)+1|0;m=c[j>>2]|0;c[(c[m+1652>>2]|0)+(r<<2)>>2]=q;p=ae(q|0,0,p|0,o|0)|0;o=D;r=r+1|0;q=(c[m+48>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=p;o=0;p=0}r=c[k+13132>>2]|0;s=((r|0)<0)<<31>>31;if(!(o>>>0<s>>>0|(o|0)==(s|0)&p>>>0<r>>>0)){b=-1094995529;break}I=$d(r|0,s|0,p|0,o|0)|0;c[(c[m+1652>>2]|0)+(q<<2)>>2]=I}I=(bd(n)|0)&255;a[(c[j>>2]|0)+53>>0]=I}I=(bd(n)|0)&255;a[(c[j>>2]|0)+54>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+55>>0]=I;if((a[(c[j>>2]|0)+55>>0]|0)!=0?(I=(bd(n)|0)&255,a[(c[j>>2]|0)+56>>0]=I,I=(bd(n)|0)&255,a[(c[j>>2]|0)+57>>0]=I,(a[(c[j>>2]|0)+57>>0]|0)==0):0){m=(ed(n)|0)<<1;c[(c[j>>2]|0)+60>>2]=m;m=(ed(n)|0)<<1;I=c[j>>2]|0;c[I+64>>2]=m;if(((c[I+60>>2]|0)+13|0)>>>0>26){b=-1094995529;break}if((m+13|0)>>>0>26){b=-1094995529;break}}p=(bd(n)|0)&255;a[(c[j>>2]|0)+68>>0]=p;p=c[j>>2]|0;if(a[p+68>>0]|0){q=0;do{o=p+(q<<6)+69|0;m=o+16|0;do{a[o>>0]=16;o=o+1|0}while((o|0)<(m|0));a[p+q+1605>>0]=16;a[p+q+1611>>0]=16;q=q+1|0}while((q|0)!=6);o=p+453|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+517|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+581|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+645|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+709|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+773|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+837|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+901|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+965|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1029|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1093|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1157|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1221|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1285|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1349|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1413|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1477|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1541|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));m=c[j>>2]|0;v=(c[l>>2]|0)+204|0;w=0;do{p=(w|0)>0?64:16;q=(w|0)>1;o=w+ -2|0;x=(w|0)==3?3:1;r=1<<(w<<1)+4;t=(r|0)>0;s=(w|0)==0;r=(r|0)<64?r:64;u=0;do{if(!(((bd(v)|0)&255)<<24>>24)){y=dd(v)|0;if(y){if(u>>>0<y>>>0){b=-1094995529;break a}y=u-y|0;fe(m+(w*384|0)+(u<<6)+69|0,m+(w*384|0)+(y<<6)+69|0,p|0)|0;if(q)a[m+(o*6|0)+u+1605>>0]=a[m+(o*6|0)+y+1605>>0]|0}}else{if(q){z=(ed(v)|0)+8|0;a[m+(o*6|0)+u+1605>>0]=z}else z=8;if(t){y=0;do{if(s)A=(d[24+y>>0]<<2)+(d[8+y>>0]|0)|0;else A=(d[104+y>>0]<<3)+(d[40+y>>0]|0)|0;z=(z+256+(ed(v)|0)|0)%256|0;a[m+(w*384|0)+(u<<6)+A+69>>0]=z;y=y+1|0}while((y|0)!=(r|0))}}u=u+x|0}while((u|0)<6);w=w+1|0}while((w|0)<4);if((c[k+4>>2]|0)==3){o=0;do{a[m+o+1285>>0]=a[m+o+901>>0]|0;a[m+o+1349>>0]=a[m+o+965>>0]|0;a[m+o+1477>>0]=a[m+o+1093>>0]|0;a[m+o+1541>>0]=a[m+o+1157>>0]|0;o=o+1|0}while((o|0)!=64);a[m+1612>>0]=a[m+1606>>0]|0;a[m+1613>>0]=a[m+1607>>0]|0;a[m+1615>>0]=a[m+1609>>0]|0;a[m+1616>>0]=a[m+1610>>0]|0}}I=(bd(n)|0)&255;a[(c[j>>2]|0)+1617>>0]=I;I=(dd(n)|0)+2|0;c[(c[j>>2]|0)+1620>>2]=I;m=k+13080|0;if(I>>>0<=(c[m>>2]|0)>>>0){I=(bd(n)|0)&255;a[(c[j>>2]|0)+1628>>0]=I;do if((bd(n)|0)!=0?(I=bd(n)|0,_c(n,7)|0,(I|0)!=0):0){n=c[j>>2]|0;p=(c[l>>2]|0)+204|0;if(a[n+21>>0]|0)a[n+1629>>0]=(dd(p)|0)+2;a[n+1630>>0]=bd(p)|0;I=(bd(p)|0)&255;a[n+1631>>0]=I;if(I<<24>>24){a[n+1632>>0]=dd(p)|0;I=dd(p)|0;o=n+1633|0;a[o>>0]=I;if((I&255)>>>0<5)l=0;else break;while(1){a[n+l+1634>>0]=ed(p)|0;a[n+l+1639>>0]=ed(p)|0;if((l|0)<(d[o>>0]|0))l=l+1|0;else break}}a[n+1644>>0]=dd(p)|0;a[n+1645>>0]=dd(p)|0}while(0);l=od((c[(c[j>>2]|0)+44>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1656>>2]=l;l=od((c[(c[j>>2]|0)+48>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1660>>2]=l;l=k+13128|0;o=od(c[l>>2]|0,4)|0;c[(c[j>>2]|0)+1664>>2]=o;o=c[j>>2]|0;n=c[o+1656>>2]|0;if(((n|0)!=0?(c[o+1660>>2]|0)!=0:0)?(c[o+1664>>2]|0)!=0:0){if(a[o+52>>0]|0){p=c[o+1648>>2]|0;if(!p){o=od(c[o+44>>2]|0,4)|0;c[(c[j>>2]|0)+1648>>2]=o;o=od(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=o;o=c[j>>2]|0;p=c[o+1648>>2]|0;if(!p){b=-12;break}}n=c[o+1652>>2]|0;if(!n){b=-12;break}q=o+44|0;s=c[q>>2]|0;if((s|0)>0){r=0;do{I=r;r=r+1|0;H=c[l>>2]|0;c[p+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}q=o+48|0;s=c[q>>2]|0;if((s|0)>0){p=k+13132|0;r=0;do{I=r;r=r+1|0;H=c[p>>2]|0;c[n+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}n=c[o+1656>>2]|0}c[n>>2]=0;q=o+44|0;if((c[q>>2]|0)>0){p=c[o+1648>>2]|0;r=0;s=0;do{r=(c[p+(s<<2)>>2]|0)+r|0;s=s+1|0;c[n+(s<<2)>>2]=r}while((s|0)<(c[q>>2]|0))}s=c[o+1660>>2]|0;c[s>>2]=0;r=o+48|0;if((c[r>>2]|0)>0){q=c[o+1652>>2]|0;t=0;p=0;do{t=(c[q+(p<<2)>>2]|0)+t|0;p=p+1|0;c[s+(p<<2)>>2]=t}while((p|0)<(c[r>>2]|0))}r=c[l>>2]|0;if((r|0)>0){o=c[o+1664>>2]|0;p=0;q=0;do{q=(p>>>0>(c[n+(q<<2)>>2]|0)>>>0&1)+q|0;c[o+(p<<2)>>2]=q;p=p+1|0;r=c[l>>2]|0}while((p|0)<(r|0))}x=$(c[k+13132>>2]|0,r)|0;n=od(x,4)|0;c[(c[j>>2]|0)+1668>>2]=n;n=od(x,4)|0;c[(c[j>>2]|0)+1672>>2]=n;n=od(x,4)|0;c[(c[j>>2]|0)+1676>>2]=n;n=k+13164|0;q=(c[n>>2]|0)+2|0;q=od($(q,q)|0,4)|0;c[(c[j>>2]|0)+1688>>2]=q;q=c[j>>2]|0;p=c[q+1668>>2]|0;if(!p){b=-12;break}w=c[q+1672>>2]|0;if(!w){b=-12;break}o=c[q+1676>>2]|0;if(!o){b=-12;break}if(!(c[q+1688>>2]|0)){b=-12;break}if((x|0)>0){B=q+44|0;r=q+48|0;s=c[q+1660>>2]|0;v=c[q+1648>>2]|0;u=c[q+1656>>2]|0;t=q+1652|0;A=0;do{C=c[l>>2]|0;y=(A|0)%(C|0)|0;z=(A|0)/(C|0)|0;G=c[B>>2]|0;E=0;while(1){if((E|0)>=(G|0)){E=0;break}F=E+1|0;if(y>>>0<(c[u+(F<<2)>>2]|0)>>>0)break;else E=F}H=c[r>>2]|0;F=0;while(1){if((F|0)>=(H|0)){F=0;break}G=F+1|0;if(z>>>0<(c[s+(G<<2)>>2]|0)>>>0)break;else F=G}if((E|0)>0){G=c[(c[t>>2]|0)+(F<<2)>>2]|0;H=0;I=0;do{I=($(c[v+(H<<2)>>2]|0,G)|0)+I|0;H=H+1|0}while((H|0)!=(E|0))}else I=0;if((F|0)>0){G=c[t>>2]|0;H=0;do{I=($(c[G+(H<<2)>>2]|0,C)|0)+I|0;H=H+1|0}while((H|0)!=(F|0))}H=$(c[v+(E<<2)>>2]|0,z-(c[s+(F<<2)>>2]|0)|0)|0;I=I+y+H-(c[u+(E<<2)>>2]|0)|0;c[p+(A<<2)>>2]=I;c[w+(I<<2)>>2]=A;A=A+1|0}while((A|0)!=(x|0))}else r=q+48|0;x=c[r>>2]|0;if((x|0)>0){s=q+44|0;t=q+1660|0;q=q+1656|0;z=c[s>>2]|0;u=0;w=0;while(1){v=u;u=u+1|0;if((z|0)>0){x=c[t>>2]|0;y=x+(u<<2)|0;G=c[y>>2]|0;B=z;z=0;do{E=c[x+(v<<2)>>2]|0;A=z;z=z+1|0;if(E>>>0<G>>>0){B=c[q>>2]|0;C=B+(z<<2)|0;F=c[C>>2]|0;do{H=c[B+(A<<2)>>2]|0;if(H>>>0<F>>>0){do{c[o+(c[p+(($(c[l>>2]|0,E)|0)+H<<2)>>2]<<2)>>2]=w;H=H+1|0;F=c[C>>2]|0}while(H>>>0<F>>>0);G=c[y>>2]|0}E=E+1|0}while(E>>>0<G>>>0);B=c[s>>2]|0}w=w+1|0}while((z|0)<(B|0));v=c[r>>2]|0;z=B}else v=x;if((u|0)>=(v|0))break;else x=v}}else w=0;o=od(w,4)|0;c[(c[j>>2]|0)+1680>>2]=o;o=c[j>>2]|0;p=c[o+1680>>2]|0;if(!p){b=-12;break}r=o+48|0;u=c[r>>2]|0;if((u|0)>0){q=o+44|0;t=c[q>>2]|0;s=0;do{if((t|0)>0){u=c[o+1660>>2]|0;v=c[o+1656>>2]|0;w=0;do{I=$(c[l>>2]|0,c[u+(s<<2)>>2]|0)|0;c[p+(($(t,s)|0)+w<<2)>>2]=(c[v+(w<<2)>>2]|0)+I;w=w+1|0;t=c[q>>2]|0}while((w|0)<(t|0));u=c[r>>2]|0}s=s+1|0}while((s|0)<(u|0))}k=(c[m>>2]|0)-(c[k+13072>>2]|0)|0;v=c[n>>2]|0;c[o+1684>>2]=(c[o+1688>>2]|0)+(v+3<<2);p=v+2|0;if((p|0)>0){m=c[(c[j>>2]|0)+1688>>2]|0;o=0;do{c[m+(($(p,o)|0)<<2)>>2]=-1;c[m+(o<<2)>>2]=-1;o=o+1|0;v=c[n>>2]|0;p=v+2|0}while((o|0)<(p|0))}if((v|0)>-1){m=c[j>>2]|0;j=m+1668|0;p=k<<1;o=(k|0)>0;m=m+1684|0;q=0;while(1){if((v|0)>-1){r=q>>k;t=c[j>>2]|0;s=c[m>>2]|0;u=0;while(1){z=c[t+(($(c[l>>2]|0,r)|0)+(u>>k)<<2)>>2]<<p;if(o){w=0;do{y=1<<w;if(!(y&q))x=0;else x=y<<1<<w;z=((y&u|0)==0?0:y<<w)+z+x|0;w=w+1|0}while((w|0)!=(k|0))}c[s+(($(v+2|0,q)|0)+u<<2)>>2]=z;v=c[n>>2]|0;if((u|0)<(v|0))u=u+1|0;else break}}if((q|0)<(v|0))q=q+1|0;else break}}if(((c[g+216>>2]|0)-(c[g+212>>2]|0)|0)<0){b=0;break}I=b+(h<<2)+400|0;vd(I);c[I>>2]=c[e>>2];I=0;i=f;return I|0}else b=-12}else b=-1094995529}else b=-1094995529}else b=-1094995529;while(0);vd(e);I=b;i=f;return I|0}function Fc(a,b){a=a|0;b=b|0;var d=0;a=i;i=i+16|0;d=a;c[d>>2]=b;jd(b+1648|0);jd(b+1652|0);jd(b+1656|0);jd(b+1660|0);jd(b+1664|0);jd(b+1668|0);jd(b+1672|0);jd(b+1680|0);jd(b+1676|0);jd(b+1688|0);jd(d);i=a;return}function Gc(a){a=a|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=a+136|0;g=a+2512|0;f=a+4524|0;while(1){h=(c[e>>2]|0)+204|0;j=0;do{k=_c(h,8)|0;j=k+j|0}while((k|0)==255);k=0;do{l=_c(h,8)|0;k=l+k|0}while((l|0)==255);do if((c[g>>2]|0)==39)if((j|0)==257){b[f>>1]=_c(h,16)|0;break}else if((j|0)==256){Hc(a);break}else{ad(h,k<<3);break}else if((j|0)==132){Hc(a);break}else{ad(h,k<<3);break}while(0);h=c[e>>2]|0;if(((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)<=0){a=15;break}if(($c(h+204|0,8)|0)==128){a=15;break}}if((a|0)==15){i=d;return 1}return 0}function Hc(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;e=(c[b+136>>2]|0)+204|0;g=(_c(e,8)|0)&255;f=b+4468|0;h=0;do{if((g|0)==1)ad(e,16);else if(!g){a[f>>0]=1;j=0;do{a[b+(h<<4)+j+4420>>0]=_c(e,8)|0;j=j+1|0}while((j|0)!=16)}else if((g|0)==2)ad(e,32);h=h+1|0}while((h|0)!=3);i=d;return}function Ic(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0;d=i;f=c[b+52>>2]|0;e=a+60|0;if((f|0)>0){if((c[e>>2]|0)==0?(f=md(f)|0,c[e>>2]=f,(f|0)==0):0){f=-12;i=d;return f|0}}else c[e>>2]=0;f=a+12|0;c[f>>2]=b;c[a+424>>2]=0;c[a+800>>2]=1;h=a+912|0;g=a+936|0;c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;c[g>>2]=0;c[g+4>>2]=-2147483648;g=a+928|0;c[g>>2]=0;c[g+4>>2]=-2147483648;a=Ea[c[b+76>>2]&3](a)|0;if((a|0)>=0){h=0;i=d;return h|0}jd(e);c[f>>2]=0;h=a;i=d;return h|0}function Jc(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;if(!a){i=b;return 0}e=a+12|0;f=c[e>>2]|0;if((f|0)!=0?(d=c[f+92>>2]|0,(d|0)!=0):0)Ea[d&3](a)|0;c[a+796>>2]=0;jd(a+60|0);c[e>>2]=0;c[a+808>>2]=0;i=b;return 0}function Kc(a,b,d,e,f,g){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0;h=i;if((f|0)<=0){i=h;return 0}j=(e|0)==0;k=0;do{l=d+($(k,g)|0)|0;l=Ia[b&1](a,l)|0;if(!j)c[e+(k<<2)>>2]=l;k=k+1|0}while((k|0)!=(f|0));i=h;return 0}function Lc(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;if((f|0)<=0){i=g;return 0}h=(e|0)==0;j=0;do{k=Ga[b&1](a,d,j,0)|0;if(!h)c[e+(j<<2)>>2]=k;j=j+1|0}while((j|0)!=(f|0));i=g;return 0}function Mc(b,f,g){b=b|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;g=i;h=Bd(c[f+76>>2]|0)|0;b=h+4|0;if(!(a[b>>0]|0)){p=0;i=g;return p|0}k=f+64|0;l=h+5|0;m=f+68|0;n=h+6|0;j=0;while(1){p=($((((e[h+(j<<1)+8>>1]|0)>>>11&15)+8|0)>>>3,c[k>>2]|0)|0)+31&-32;if((j+ -1|0)>>>0<2){p=0-(0-p>>d[l>>0])|0;c[f+(j<<2)+32>>2]=p;o=0-(0-((c[m>>2]|0)+31&-32)>>d[n>>0])|0}else{c[f+(j<<2)+32>>2]=p;o=(c[m>>2]|0)+31&-32}o=sd(($(p,o)|0)+32|0)|0;c[f+(j<<2)+304>>2]=o;if(!o){b=-1;f=8;break}c[f+(j<<2)>>2]=c[o+4>>2];j=j+1|0;if((j|0)>=(d[b>>0]|0)){b=0;f=8;break}}if((f|0)==8){i=g;return b|0}return 0}function Nc(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;ce(a|0,0,976)|0;e=(b|0)!=0;if(e){c[a+8>>2]=c[b+8>>2];c[a+48>>2]=c[b+12>>2]}else c[a+8>>2]=-1;c[a+100>>2]=0;c[a+104>>2]=1;c[a+888>>2]=0;c[a+892>>2]=1;c[a+896>>2]=0;c[a+900>>2]=1;c[a+476>>2]=1;c[a+816>>2]=1;c[a+820>>2]=1;c[a+220>>2]=0;c[a+224>>2]=1;c[a+136>>2]=-1;c[a+416>>2]=-1;g=a+696|0;c[g>>2]=0;c[g+4>>2]=-2147483648;if((e?(f=c[b+52>>2]|0,(f|0)!=0):0)?(g=md(f)|0,c[a+60>>2]=g,(g|0)==0):0){g=-12;i=d;return g|0}g=0;i=d;return g|0}function Oc(a){a=a|0;var b=0,c=0;b=i;c=fd(976)|0;if(c){if((Nc(c,a)|0)<0){id(c);c=0}}else c=0;i=b;return c|0}function Pc(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;i=i+80|0;g=f;k=g+0|0;j=e+0|0;h=k+80|0;do{c[k>>2]=c[j>>2];k=k+4|0;j=j+4|0}while((k|0)<(h|0));h=a+12|0;j=c[h>>2]|0;if(!j){k=-22;i=f;return k|0}if(c[j+8>>2]|0){k=-22;i=f;return k|0}c[d>>2]=0;j=c[a+124>>2]|0;k=c[a+128>>2]|0;if(!j){if(k){k=-22;i=f;return k|0}}else{if(!((j|0)>0&(k|0)>0)){k=-22;i=f;return k|0}if((j+128|0)>>>0>=(268435455/((k+128|0)>>>0)|0)>>>0){k=-22;i=f;return k|0}}yd(b);h=c[h>>2]|0;if(((c[h+16>>2]&32|0)==0?(c[e+28>>2]|0)==0:0)?(c[a+808>>2]&1|0)==0:0){k=0;i=f;return k|0}g=Ga[c[h+88>>2]&1](a,b,d,g)|0;if(!(c[d>>2]|0)){yd(b);k=g;i=f;return k|0}else{k=a+424|0;c[k>>2]=(c[k>>2]|0)+1;k=g;i=f;return k|0}return 0}function Qc(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=i;if((a|0)>0&(b|0)>0?(a+128|0)>>>0<(268435455/((b+128|0)>>>0)|0)>>>0:0){d=0;i=c;return d|0}d=-22;i=c;return d|0}function Rc(a,b){a=a|0;b=b|0;return 0}function Sc(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;e=a+8|0;if(!(c[e>>2]|0)){g=c[a+116>>2]|0;h=a+120|0;j=c[h>>2]|0;if(!((g|0)>0&(j|0)>0)){l=-22;i=f;return l|0}if((g+128|0)>>>0>=(268435455/((j+128|0)>>>0)|0)>>>0){l=-22;i=f;return l|0}j=c[a+136>>2]|0;if((j|0)<0){l=-22;i=f;return l|0}k=b+64|0;l=b+68|0;if((c[k>>2]|0)>=1?(c[l>>2]|0)>=1:0)g=1;else{m=a+792|0;n=0-(0-(c[a+124>>2]|0)>>c[m>>2])|0;c[k>>2]=(g|0)>(n|0)?g:n;k=c[h>>2]|0;g=0-(0-(c[a+128>>2]|0)>>c[m>>2])|0;c[l>>2]=(k|0)>(g|0)?k:g;g=0}c[b+76>>2]=j}else g=1;d=xa[c[a+476>>2]&1](a,b,d)|0;if(c[e>>2]|g){n=d;i=f;return n|0}c[b+64>>2]=c[a+116>>2];c[b+68>>2]=c[a+120>>2];n=d;i=f;return n|0}function Tc(a,b,d){a=a|0;b=b|0;d=d|0;var e=0;e=i;c[b+4>>2]=a;a=Sc(a,c[b>>2]|0,d)|0;i=e;return a|0}function Uc(a,b){a=a|0;b=b|0;a=i;b=c[b>>2]|0;if(b)yd(b);i=a;return}function Vc(a){a=a|0;return}function Wc(a,b,c){a=a|0;b=b|0;c=c|0;return}function Xc(a){a=a|0;var b=0,d=0;b=i;d=a+8|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+16|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+64|0;c[d>>2]=-1;c[d+4>>2]=-1;d=a+72|0;c[d>>2]=0;c[d+4>>2]=0;d=a+32|0;c[a>>2]=0;c[d+0>>2]=0;c[d+4>>2]=0;c[d+8>>2]=0;c[d+12>>2]=0;c[d+16>>2]=0;i=b;return}function Yc(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;g=a+16|0;c[a+12>>2]=b;c[a+20>>2]=b+e;h=b+1|0;c[g>>2]=h;e=(d[b>>0]|0)<<18;c[a>>2]=e;f=b+2|0;c[g>>2]=f;e=(d[h>>0]|0)<<10|e;c[a>>2]=e;c[g>>2]=b+3;c[a>>2]=(d[f>>0]|0)<<2|e|2;c[a+4>>2]=510;return}function Zc(){var b=0,e=0,f=0,g=0,h=0,j=0;b=i;if(!(c[718]|0))e=0;else{i=b;return}while(1)if(e){g=(e&65280|0)==0;a[2880+e>>0]=(g?8:0)-(d[4680+(g?e:e>>>8)>>0]|0);e=e+1|0;if((e|0)==512){e=0;break}else continue}else{a[2880]=9;e=1;continue}while(1){f=e<<1;g=0;do{j=a[4224+(e<<2)+g>>0]|0;h=(g<<7)+f|0;a[(h|1)+3392>>0]=j;a[h+3392>>0]=j;g=g+1|0}while((g|0)!=4);j=(d[4480+e>>0]|0)<<1;a[f+4032>>0]=j;a[f+4033>>0]=j|1;if(e){h=(d[4544+e>>0]|0)<<1;j=128-f|0;a[j+3903>>0]=h;a[j+3902>>0]=h|1;e=e+1|0;if((e|0)==64)break;else continue}else{e=128-f|0;a[e+3903>>0]=1;a[e+3902>>0]=0;e=1;continue}}g=4160|0;f=4608|0;e=g+63|0;do{a[g>>0]=a[f>>0]|0;g=g+1|0;f=f+1|0}while((g|0)<(e|0));c[718]=1;i=b;return}function _c(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0;e=i;f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;a=(c[a>>2]|0)+(h>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7)>>>(32-b|0);b=h+b|0;c[f>>2]=g>>>0>b>>>0?b:g;i=e;return a|0}function $c(a,b){a=a|0;b=b|0;var e=0,f=0;e=i;f=c[a+8>>2]|0;a=(c[a>>2]|0)+(f>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(f&7)>>>(32-b|0);i=e;return a|0}function ad(a,b){a=a|0;b=b|0;var d=0;d=a+8|0;a=c[a+16>>2]|0;b=(c[d>>2]|0)+b|0;c[d>>2]=a>>>0>b>>>0?b:a;return}function bd(a){a=a|0;var b=0,e=0,f=0;e=a+8|0;f=c[e>>2]|0;b=(d[(c[a>>2]|0)+(f>>>3)>>0]|0)<<(f&7)>>>7&1;c[e>>2]=((f|0)<(c[a+16>>2]|0)&1)+f;return b|0}function cd(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0;e=i;if(!b){j=0;i=e;return j|0}f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;j=c[a>>2]|0;a=j+(h>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7);if((b|0)<26){j=h+b|0;c[f>>2]=g>>>0>j>>>0?j:g;j=a>>>(32-b|0);i=e;return j|0}else{k=h+16|0;k=g>>>0>k>>>0?k:g;c[f>>2]=k;h=b+ -16|0;j=j+(k>>>3)|0;j=(ee(d[j>>0]|d[j+1>>0]<<8|d[j+2>>0]<<16|d[j+3>>0]<<24|0)|0)<<(k&7)>>>(48-b|0);b=k+h|0;c[f>>2]=g>>>0>b>>>0?b:g;j=j|a>>>16<<h;i=e;return j|0}return 0}function dd(a){a=a|0;var b=0,e=0,f=0,g=0,h=0,j=0;b=i;i=i+32|0;e=b;c[e+0>>2]=c[a+0>>2];c[e+4>>2]=c[a+4>>2];c[e+8>>2]=c[a+8>>2];c[e+12>>2]=c[a+12>>2];c[e+16>>2]=c[a+16>>2];e=cd(e,32)|0;f=e>>>0>65535;e=f?e>>>16:e;f=f?16:0;if(e&65280){f=f|8;e=e>>>8}j=31-f-(d[4680+e>>0]|0)|0;g=a+8|0;f=c[g>>2]|0;e=0-f|0;h=(c[a+16>>2]|0)-f|0;if((j|0)<(e|0)){h=e;h=h+f|0;c[g>>2]=h;j=j+1|0;j=cd(a,j)|0;j=j+ -1|0;i=b;return j|0}h=(h|0)<(j|0)?h:j;h=h+f|0;c[g>>2]=h;j=j+1|0;j=cd(a,j)|0;j=j+ -1|0;i=b;return j|0}function ed(a){a=a|0;var b=0;b=i;a=dd(a)|0;if(!(a&1)){a=0-(a>>>1)|0;i=b;return a|0}else{a=(a+1|0)>>>1;i=b;return a|0}return 0}function fd(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[1168]|0;if((d+ -32|0)>>>0>=a>>>0){e=Vd(a)|0;if((e|0)==0&(a|0)==0)if((d|0)==32)e=0;else e=Vd(1)|0}else e=0;i=b;return e|0}function gd(a,b){a=a|0;b=b|0;var d=0;d=i;if(((c[1168]|0)+ -32|0)>>>0<b>>>0){b=0;i=d;return b|0}b=Xd(a,((b|0)==0&1)+b|0)|0;i=d;return b|0}function hd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;f=$(d,b)|0;if((d|b)>>>0>65535&(d|0)!=0?((f>>>0)/(d>>>0)|0|0)!=(b|0):0){Wd(a);d=0;i=e;return d|0}if(((c[1168]|0)+ -32|0)>>>0<f>>>0)b=0;else b=Xd(a,((f|0)==0&1)+f|0)|0;if((b|0)!=0|(f|0)==0){d=b;i=e;return d|0}Wd(a);d=0;i=e;return d|0}function id(a){a=a|0;var b=0;b=i;Wd(a);i=b;return}function jd(a){a=a|0;var b=0;b=i;Wd(c[a>>2]|0);c[a>>2]=0;i=b;return}function kd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if(((d|0)!=0?(2147483647/(d>>>0)|0)>>>0>b>>>0:0)?(f=$(d,b)|0,((c[1168]|0)+ -32|0)>>>0>=f>>>0):0)a=Xd(a,((f|0)==0&1)+f|0)|0;else a=0;i=e;return a|0}function ld(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;f=i;e=hd(c[a>>2]|0,b,d)|0;c[a>>2]=e;i=f;return((e|0)!=0|(b|0)==0|(d|0)==0?0:-12)|0}function md(a){a=a|0;var b=0,c=0;c=i;b=fd(a)|0;if(b)ce(b|0,0,a|0)|0;i=c;return b|0}function nd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if((c[b>>2]|0)>>>0>d>>>0){i=e;return}f=((d*17|0)>>>4)+32|0;d=f>>>0>d>>>0?f:d;Wd(c[a>>2]|0);f=fd(d)|0;c[a>>2]=f;c[b>>2]=(f|0)==0?0:d;i=e;return}function od(a,b){a=a|0;b=b|0;var c=0;c=i;if((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)b=fd($(b,a)|0)|0;else b=0;i=c;return b|0}function pd(a,b){a=a|0;b=b|0;var c=0,d=0,e=0;c=i;if(((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)?(e=$(b,a)|0,d=fd(e)|0,(d|0)!=0):0)ce(d|0,0,e|0)|0;else d=0;i=c;return d|0}function qd(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;g=i;i=i+16|0;h=g;j=md(24)|0;c[h>>2]=j;if(!j){f=0;i=g;return f|0}c[j>>2]=a;c[j+4>>2]=b;c[j+12>>2]=(d|0)!=0?d:7;c[j+16>>2]=e;c[j+8>>2]=1;if(f&1){f=(c[h>>2]|0)+20|0;c[f>>2]=c[f>>2]|1}j=md(12)|0;if(!j){jd(h);f=0;i=g;return f|0}else{c[j>>2]=c[h>>2];c[j+4>>2]=a;c[j+8>>2]=b;f=j;i=g;return f|0}return 0}function rd(a,b){a=a|0;b=b|0;a=i;id(b);i=a;return}function sd(a){a=a|0;var b=0,d=0,e=0;b=i;i=i+16|0;d=b;e=fd(a)|0;c[d>>2]=e;if(e){a=qd(e,a,7,0,0)|0;if(!a){jd(d);a=0}}else a=0;i=b;return a|0}function td(a){a=a|0;var b=0,d=0;b=i;d=sd(a)|0;if(!d){d=0;i=b;return d|0}ce(c[d+4>>2]|0,0,a|0)|0;i=b;return d|0}function ud(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b;d=md(12)|0;if(!d){e=0;i=b;return e|0}c[d+0>>2]=c[a+0>>2];c[d+4>>2]=c[a+4>>2];c[d+8>>2]=c[a+8>>2];f=(c[a>>2]|0)+8|0;a=c[f>>2]|0;c[f>>2]=a+1;c[e>>2]=a+1;e=d;i=b;return e|0}function vd(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b+4|0;d=b;if(!a){i=b;return}f=c[a>>2]|0;if(!f){i=b;return}f=c[f>>2]|0;c[d>>2]=f;jd(a);a=f+8|0;f=c[a>>2]|0;c[a>>2]=f+ -1;c[e>>2]=f+ -1;if(c[e>>2]|0){i=b;return}f=c[d>>2]|0;Ca[c[f+12>>2]&7](c[f+16>>2]|0,c[f>>2]|0);jd(d);i=b;return}function wd(){var a=0,b=0,d=0;a=i;b=md(400)|0;if(!b){b=0;i=a;return b|0}ce(b|0,0,400)|0;d=b+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+376|0;c[d>>2]=0;c[d+4>>2]=0;d=b+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=a;return b|0}function xd(a){a=a|0;var b=0,d=0;b=i;if((a|0)!=0?(d=c[a>>2]|0,(d|0)!=0):0){yd(d);jd(a)}i=b;return}function yd(a){a=a|0;var b=0,d=0;b=i;vd(a+304|0);vd(a+308|0);vd(a+312|0);vd(a+316|0);vd(a+320|0);vd(a+324|0);vd(a+328|0);vd(a+332|0);ce(a|0,0,400)|0;d=a+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+376|0;c[d>>2]=0;c[d+4>>2]=0;d=a+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[a+392>>2]=-1;c[a+80>>2]=1;c[a+120>>2]=0;c[a+124>>2]=1;c[a+76>>2]=-1;c[a+344>>2]=2;c[a+348>>2]=2;c[a+352>>2]=2;c[a+340>>2]=0;c[a+356>>2]=0;i=b;return}function zd(a,b){a=a|0;b=b|0;var d=0;d=i;fe(a|0,b|0,400)|0;ce(b|0,0,400)|0;a=b+136|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+144|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+128|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+360|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+376|0;c[a>>2]=0;c[a+4>>2]=0;a=b+368|0;c[a>>2]=-1;c[a+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=d;return}function Ad(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;c[a+76>>2]=c[b+76>>2];c[a+64>>2]=c[b+64>>2];c[a+68>>2]=c[b+68>>2];c[a+388>>2]=c[b+388>>2];j=b+296|0;h=c[j+4>>2]|0;f=a+296|0;c[f>>2]=c[j>>2];c[f+4>>2]=h;c[a+72>>2]=c[b+72>>2];f=c[b+304>>2]|0;if(!f)ta();else{e=f;g=0}while(1){if((e|0)!=0?(j=ud(e)|0,c[a+(g<<2)+304>>2]=j,(j|0)==0):0){e=5;break}g=g+1|0;if(g>>>0>=8){e=8;break}e=c[b+(g<<2)+304>>2]|0}if((e|0)==5){yd(a);j=-12;i=d;return j|0}else if((e|0)==8){c[a+0>>2]=c[b+0>>2];c[a+4>>2]=c[b+4>>2];c[a+8>>2]=c[b+8>>2];c[a+12>>2]=c[b+12>>2];c[a+16>>2]=c[b+16>>2];c[a+20>>2]=c[b+20>>2];c[a+24>>2]=c[b+24>>2];c[a+28>>2]=c[b+28>>2];j=a+32|0;h=b+32|0;c[j+0>>2]=c[h+0>>2];c[j+4>>2]=c[h+4>>2];c[j+8>>2]=c[h+8>>2];c[j+12>>2]=c[h+12>>2];c[j+16>>2]=c[h+16>>2];c[j+20>>2]=c[h+20>>2];c[j+24>>2]=c[h+24>>2];c[j+28>>2]=c[h+28>>2];j=0;i=d;return j|0}return 0}function Bd(a){a=a|0;var b=0,d=0,e=0,f=0;d=i;e=0;while(1){f=e+1|0;if((c[4936+(e*24|0)>>2]|0)==(a|0))break;if(f>>>0<4)e=f;else{e=0;b=5;break}}if((b|0)==5){i=d;return e|0}f=4940+(e*24|0)|0;i=d;return f|0}function Cd(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0;f=i;g=(c[b+24>>2]|0)==0?1:3;if((g|0)>(e|0)){b=c[b+8>>2]|0;c[d>>2]=c[b+(e<<2)+32>>2];b=c[b+(e<<2)>>2]|0;i=f;return b|0}if((a[b+29>>0]|0)!=0&(g|0)==(e|0)){b=c[b+12>>2]|0;c[d>>2]=c[b+32>>2];b=c[b>>2]|0;i=f;return b|0}else{c[d>>2]=0;b=0;i=f;return b|0}return 0}function Dd(d,e){d=d|0;e=e|0;var f=0,g=0,h=0;f=i;if(!(c[d+8>>2]|0)){h=-1;i=f;return h|0}c[e>>2]=c[d+16>>2];c[e+4>>2]=c[d+20>>2];a[e+8>>0]=c[d+24>>2];g=d+31|0;if(!(a[d+29>>0]|0))h=0;else h=(a[g>>0]|0)==0&1;a[e+9>>0]=h;a[e+12>>0]=a[d+33>>0]|0;a[e+13>>0]=a[g>>0]|0;a[e+14>>0]=a[d+32>>0]|0;a[e+10>>0]=c[d+36>>2];a[e+11>>0]=a[d+30>>0]|0;a[e+15>>0]=a[d+34>>0]|0;b[e+16>>1]=b[d+48>>1]|0;h=0;i=f;return h|0}function Ed(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0.0,p=0,q=0.0,r=0.0,s=0.0,t=0,u=0.0,v=0,w=0.0,x=0.0,y=0.0;f=i;if(!(c[b+8>>2]|0)){t=-1;i=f;return t|0}g=b+68|0;if((a[g>>0]|0)!=0|e>>>0>1){t=-1;i=f;return t|0}a[b+76>>0]=(e|0)==1&1;l=b+77|0;a[l>>0]=0;a[b+78>>0]=0;h=b+24|0;if(((c[h>>2]|0)+ -1|0)>>>0<2?(t=b+16|0,p=c[t>>2]|0,j=b+84|0,c[j>>2]=(p+1|0)/2|0,c[b+88>>2]=((c[b+20>>2]|0)+1|0)/2|0,c[b+124>>2]=fd(p<<1)|0,c[b+128>>2]=fd(c[t>>2]<<1)|0,c[b+196>>2]=fd((c[j>>2]<<1)+14|0)|0,(c[h>>2]|0)==1):0){m=0;do{c[b+(m<<2)+132>>2]=fd(c[j>>2]<<1)|0;c[b+(m<<2)+164>>2]=fd(c[j>>2]<<1)|0;m=m+1|0}while((m|0)!=8)}j=d[b+30>>0]|0;v=(a[l>>0]|0)!=0?16:8;l=b+36|0;t=c[l>>2]|0;m=a[b+32>>0]|0;n=m&255;p=30-v|0;o=+((1<<v)+ -1|0)*+(1<<p|0);q=o/+((1<<j)+ -1|0);m=m<<24>>24!=0;if(m){v=j+ -8|0;r=o/+(224<<v|0);o=o/+(219<<v|0)}else{r=q;o=q}if(!t){u=.114;s=.299;k=11}else if((t|0)==3){u=.0722;s=.2126;k=11}else if((t|0)==4){u=.0593;s=.2627;k=11}if((k|0)==11){y=1.0-s;c[b+220>>2]=sa(+(r*y*2.0))|0;w=1.0-u;x=w-s;c[b+224>>2]=sa(+(r*(u*2.0*w/x)))|0;c[b+228>>2]=sa(+(r*(s*2.0*y/x)))|0;c[b+232>>2]=sa(+(r*w*2.0))|0}k=sa(+q)|0;c[b+208>>2]=k;c[b+200>>2]=p;t=1<<p+ -1;p=b+204|0;c[p>>2]=t;c[b+236>>2]=1<<j+ -1;if(m){v=sa(+o)|0;c[b+212>>2]=v;v=$(v,-16<<j+ -8)|0;c[b+216>>2]=v+(c[p>>2]|0)}else{c[b+212>>2]=k;c[b+216>>2]=t}c[b+240>>2]=j;c[b+244>>2]=n;if(!(c[h>>2]|0))c[b+248>>2]=7;else c[b+248>>2]=c[6064+(c[l>>2]<<2)>>2];a[g>>0]=1;c[b+72>>2]=e;c[b+92>>2]=Cd(b,b+108|0,0)|0;if(!(c[h>>2]|0))e=1;else{c[b+96>>2]=Cd(b,b+112|0,1)|0;c[b+100>>2]=Cd(b,b+116|0,2)|0;e=3}if(!(a[b+29>>0]|0))c[b+104>>2]=0;else c[b+104>>2]=Cd(b,b+120|0,e)|0;c[b+80>>2]=0;v=0;i=f;return v|0}function Fd(a,b,d){a=a|0;b=b|0;d=d|0;c[b>>2]=0;c[d>>2]=1;return}function Gd(f,g){f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;k=i;j=f+80|0;n=c[j>>2]|0;if(n>>>0>=(c[f+20>>2]|0)>>>0){w=-1;i=k;return w|0}h=c[f+16>>2]|0;o=(c[f+92>>2]|0)+($(c[f+108>>2]|0,n)|0)|0;l=f+76|0;if(!(a[l>>0]|0))m=(a[f+78>>0]|0)!=0?4:3;else m=4;p=c[f+24>>2]|0;if((p|0)==2){w=(c[f+96>>2]|0)+($(c[f+112>>2]|0,n)|0)|0;r=(c[f+100>>2]|0)+($(c[f+116>>2]|0,n)|0)|0;v=f+124|0;s=f+30|0;t=f+28|0;u=f+196|0;Id(c[v>>2]|0,w,h,d[s>>0]|0,d[t>>0]|0,c[u>>2]|0);w=f+128|0;Id(c[w>>2]|0,r,h,d[s>>0]|0,d[t>>0]|0,c[u>>2]|0);za[c[f+248>>2]&7](f+200|0,g,o,c[v>>2]|0,c[w>>2]|0,h,m)}else if(!p)za[c[f+248>>2]&7](f+200|0,g,o,0,0,h,m);else if((p|0)==1){if(!n){v=f+96|0;p=f+112|0;q=f+100|0;r=f+116|0;s=f+84|0;t=f+88|0;u=0;do{w=(u|0)>4?u+ -8|0:u;if((w|0)<0)w=0;else{x=c[t>>2]|0;w=(w|0)<(x|0)?w:x+ -1|0}y=(c[v>>2]|0)+($(c[p>>2]|0,w)|0)|0;x=(c[q>>2]|0)+($(c[r>>2]|0,w)|0)|0;fe(c[f+(u<<2)+132>>2]|0,y|0,c[s>>2]<<1|0)|0;fe(c[f+(u<<2)+164>>2]|0,x|0,c[s>>2]<<1|0)|0;u=u+1|0}while((u|0)!=8)}p=n>>1;q=(p|0)%8|0;y=n&1;s=f+124|0;v=f+196|0;w=f+30|0;x=f+28|0;Hd(c[s>>2]|0,f+132|0,h,q,c[v>>2]|0,d[w>>0]|0,y,d[x>>0]|0);r=f+128|0;Hd(c[r>>2]|0,f+164|0,h,q,c[v>>2]|0,d[w>>0]|0,y,d[x>>0]|0);if(y){w=(q+5|0)%8|0;v=p+5|0;x=c[f+88>>2]|0;x=(v|0)<(x|0)?v:x+ -1|0;v=(c[f+96>>2]|0)+($(x,c[f+112>>2]|0)|0)|0;x=(c[f+100>>2]|0)+($(c[f+116>>2]|0,x)|0)|0;y=f+84|0;fe(c[f+(w<<2)+132>>2]|0,v|0,c[y>>2]<<1|0)|0;fe(c[f+(w<<2)+164>>2]|0,x|0,c[y>>2]<<1|0)|0}za[c[f+248>>2]&7](f+200|0,g,o,c[s>>2]|0,c[r>>2]|0,h,m)}else if((p|0)==3){x=(c[f+96>>2]|0)+($(c[f+112>>2]|0,n)|0)|0;y=(c[f+100>>2]|0)+($(c[f+116>>2]|0,n)|0)|0;za[c[f+248>>2]&7](f+200|0,g,o,x,y,h,m)}else{y=-1;i=k;return y|0}a:do if(!(a[f+31>>0]|0)){if(a[l>>0]|0){if(!(a[f+29>>0]|0)){if((h|0)<=0)break;f=g+3|0;g=0;while(1){a[f>>0]=-1;g=g+1|0;if((g|0)==(h|0))break a;else f=f+4|0}}l=(c[f+104>>2]|0)+($(c[f+120>>2]|0,n)|0)|0;p=g+3|0;if((c[f+240>>2]|0)==8){if((h|0)>0){m=0;while(1){a[p>>0]=b[l+(m<<1)>>1];m=m+1|0;if((m|0)==(h|0))break;else p=p+4|0}}}else{m=c[f+208>>2]|0;n=c[f+204>>2]|0;o=c[f+200>>2]|0;if((h|0)>0){q=0;while(1){a[p>>0]=($(e[l+(q<<1)>>1]|0,m)|0)+n>>o;q=q+1|0;if((q|0)==(h|0))break;else p=p+4|0}}}if(a[f+33>>0]|0){if(!(c[1258]|0)){c[1258]=1;f=1;do{c[5040+(f<<2)>>2]=(((f|0)/2|0)+16711808|0)/(f|0)|0;f=f+1|0}while((f|0)!=256)}if((h|0)>0){f=0;while(1){l=a[g+3>>0]|0;if(!(l<<24>>24)){a[g>>0]=-1;a[g+1>>0]=-1;a[g+2>>0]=-1}else{m=c[5040+((l&255)<<2)>>2]|0;n=a[g>>0]|0;if((n&255)<(l&255))n=(($(n&255,m)|0)+32768|0)>>>16&255;else n=-1;a[g>>0]=n;n=g+1|0;o=a[n>>0]|0;if((o&255)<(l&255))o=(($(o&255,m)|0)+32768|0)>>>16&255;else o=-1;a[n>>0]=o;n=g+2|0;o=a[n>>0]|0;if((o&255)<(l&255))l=(($(o&255,m)|0)+32768|0)>>>16&255;else l=-1;a[n>>0]=l}f=f+1|0;if((f|0)==(h|0))break;else g=g+4|0}}}}}else{n=(c[f+104>>2]|0)+($(c[f+120>>2]|0,n)|0)|0;q=c[f+240>>2]|0;p=1<<q+ -1;r=(h|0)>0;if(r){o=g;f=0;while(1){x=e[n+(f<<1)>>1]|0;a[o>>0]=($(d[o>>0]|0,x)|0)+p>>q;y=o+1|0;a[y>>0]=($(d[y>>0]|0,x)|0)+p>>q;y=o+2|0;a[y>>0]=($(d[y>>0]|0,x)|0)+p>>q;f=f+1|0;if((f|0)==(h|0))break;else o=o+m|0}}if(!((a[l>>0]|0)==0|r^1)){g=g+3|0;f=0;while(1){a[g>>0]=-1;f=f+1|0;if((f|0)==(h|0))break;else g=g+4|0}}}while(0);c[j>>2]=(c[j>>2]|0)+1;y=0;i=k;return y|0}function Hd(a,d,f,g,h,j,k,l){a=a|0;d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;m=i;o=c[d+((g+5&7)<<2)>>2]|0;s=c[d+((g+6&7)<<2)>>2]|0;p=c[d+((g+7&7)<<2)>>2]|0;r=c[d+((g&7)<<2)>>2]|0;q=c[d+((g+1&7)<<2)>>2]|0;n=c[d+((g+2&7)<<2)>>2]|0;d=c[d+((g+3&7)<<2)>>2]|0;t=j+ -8|0;u=1<<t>>1;g=(f+1|0)/2|0;v=(f|0)>0;if(!k){if(v){k=0;do{x=$(e[s+(k<<1)>>1]|0,-6)|0;y=$(e[q+(k<<1)>>1]|0,-10)|0;b[h+(k+3<<1)>>1]=(e[o+(k<<1)>>1]<<1)+u+x+((e[p+(k<<1)>>1]|0)*18|0)+((e[r+(k<<1)>>1]|0)*57|0)+y+(e[n+(k<<1)>>1]<<2)-(e[d+(k<<1)>>1]|0)>>t;k=k+1|0}while((k|0)<(g|0))}}else if(v){k=0;do{x=$(e[p+(k<<1)>>1]|0,-10)|0;y=$(e[n+(k<<1)>>1]|0,-6)|0;b[h+(k+3<<1)>>1]=u-(e[o+(k<<1)>>1]|0)+(e[s+(k<<1)>>1]<<2)+x+((e[r+(k<<1)>>1]|0)*57|0)+((e[q+(k<<1)>>1]|0)*18|0)+y+(e[d+(k<<1)>>1]<<1)>>t;k=k+1|0}while((k|0)<(g|0))}n=h+6|0;y=b[n>>1]|0;b[h>>1]=y;p=h+2|0;b[p>>1]=y;o=h+4|0;b[o>>1]=y;y=b[h+(g+2<<1)>>1]|0;b[h+(g+3<<1)>>1]=y;b[h+(g+4<<1)>>1]=y;b[h+(g+5<<1)>>1]=y;b[h+(g+6<<1)>>1]=y;g=(1<<j)+ -1|0;if(!l){o=14-j|0;l=1<<o>>1;s=20-j|0;r=1<<s+ -1;if((f|0)>1){q=f+ -2|0;j=q>>>1;p=j<<1;d=a;while(1){t=(b[n>>1]|0)+l>>o;if((t|0)<0)t=0;else t=((t|0)>(g|0)?g:t)&65535;b[d>>1]=t;y=$((b[n+4>>1]|0)+(b[n+ -2>>1]|0)|0,-11)|0;t=n;n=n+2|0;t=r-(b[t+ -6>>1]|0)-(b[t+8>>1]|0)+((b[t+6>>1]|0)+(b[t+ -4>>1]|0)<<2)+y+(((b[n>>1]|0)+(b[t>>1]|0)|0)*40|0)>>s;if((t|0)<0)t=0;else t=((t|0)>(g|0)?g:t)&65535;b[d+2>>1]=t;f=f+ -2|0;if((f|0)<=1)break;else d=d+4|0}a=a+(p+2<<1)|0;f=q-p|0;n=h+(j+4<<1)|0}if(!f){i=m;return}h=(b[n>>1]|0)+l>>o;if((h|0)<0)h=0;else h=((h|0)>(g|0)?g:h)&65535;b[a>>1]=h;i=m;return}else{j=20-j|0;l=1<<j+ -1;k=b[h>>1]|0;u=b[p>>1]|0;t=b[o>>1]|0;d=b[n>>1]|0;r=b[h+8>>1]|0;s=b[h+10>>1]|0;if((f|0)>1){q=f+ -2|0;o=q>>>1;p=o<<1;w=a;while(1){v=b[n+6>>1]|0;x=d*57|0;y=(s<<2)+l+($(r,-10)|0)+x+(t*18|0)+($(u,-6)|0)+(k<<1)-v>>j;if((y|0)<0)y=0;else y=((y|0)>(g|0)?g:y)&65535;b[w>>1]=y;k=($(s,-6)|0)+l+(r*18|0)+x+($(t,-10)|0)-k+(u<<2)+(v<<1)>>j;if((k|0)<0)k=0;else k=((k|0)>(g|0)?g:k)&65535;b[w+2>>1]=k;f=f+ -2|0;if((f|0)<=1)break;else{A=s;z=r;x=d;y=t;k=u;s=v;w=w+4|0;n=n+2|0;r=A;d=z;t=x;u=y}}k=u;u=t;t=d;d=r;r=s;s=v;a=a+(p+2<<1)|0;f=q-p|0;n=h+(o+4<<1)|0}if(!f){i=m;return}h=(s<<2)+l+($(r,-10)|0)+(d*57|0)+(t*18|0)+($(u,-6)|0)+(k<<1)-(b[n+6>>1]|0)>>j;if((h|0)<0)h=0;else h=((h|0)>(g|0)?g:h)&65535;b[a>>1]=h;i=m;return}}function Id(a,c,d,f,g,h){a=a|0;c=c|0;d=d|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;j=i;v=(d+1|0)/2|0;k=h+6|0;fe(k|0,c|0,v<<1|0)|0;u=b[c>>1]|0;b[h>>1]=u;m=h+2|0;b[m>>1]=u;l=h+4|0;b[l>>1]=u;c=b[c+(v+ -1<<1)>>1]|0;b[h+(v+3<<1)>>1]=c;b[h+(v+4<<1)>>1]=c;b[h+(v+5<<1)>>1]=c;b[h+(v+6<<1)>>1]=c;c=(1<<f)+ -1|0;if(!g){if((d|0)>1){g=d+ -2|0;l=g>>>1;m=l<<1;f=a;while(1){b[f>>1]=b[k>>1]|0;v=$((e[k+4>>1]|0)+(e[k+ -2>>1]|0)|0,-11)|0;n=k;k=k+2|0;n=32-(e[n+ -6>>1]|0)-(e[n+8>>1]|0)+((e[n+6>>1]|0)+(e[n+ -4>>1]|0)<<2)+v+(((e[k>>1]|0)+(e[n>>1]|0)|0)*40|0)>>6;if((n|0)<0)n=0;else n=((n|0)>(c|0)?c:n)&65535;b[f+2>>1]=n;d=d+ -2|0;if((d|0)<=1)break;else f=f+4|0}a=a+(m+2<<1)|0;d=g-m|0;k=h+(l+4<<1)|0}if(!d){i=j;return}b[a>>1]=b[k>>1]|0;i=j;return}r=e[h>>1]|0;f=e[m>>1]|0;q=e[l>>1]|0;p=e[k>>1]|0;o=e[h+8>>1]|0;n=e[h+10>>1]|0;if((d|0)>1){m=d+ -2|0;l=m>>>1;g=l<<1;t=a;while(1){s=e[k+6>>1]|0;u=p*57|0;v=(n<<2)+32+($(o,-10)|0)+u+(q*18|0)+($(f,-6)|0)+(r<<1)-s>>6;if((v|0)<0)v=0;else v=((v|0)>(c|0)?c:v)&65535;b[t>>1]=v;r=($(n,-6)|0)+32+(o*18|0)+u+($(q,-10)|0)-r+(f<<2)+(s<<1)>>6;if((r|0)<0)r=0;else r=((r|0)>(c|0)?c:r)&65535;b[t+2>>1]=r;d=d+ -2|0;if((d|0)<=1)break;else{x=n;w=o;u=p;v=q;r=f;n=s;t=t+4|0;k=k+2|0;o=x;p=w;q=u;f=v}}r=f;f=q;q=p;p=o;o=n;n=s;a=a+(g+2<<1)|0;d=m-g|0;k=h+(l+4<<1)|0}if(!d){i=j;return}h=(n<<2)+32+($(o,-10)|0)+(p*57|0)+(q*18|0)+($(f,-6)|0)+(r<<1)-(e[k+6>>1]|0)>>6;if((h|0)<0)h=0;else h=((h|0)>(c|0)?c:h)&65535;b[a>>1]=h;i=j;return}function Jd(){var a=0,b=0;a=i;b=md(252)|0;if(!b)b=0;i=a;return b|0}function Kd(e,f,g){e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0;k=i;i=i+80|0;n=k+72|0;l=k+60|0;h=k+48|0;u=k+44|0;x=k+40|0;t=k+36|0;p=k;y=a[e+40>>0]|0;a:do if(((((((g|0)>=6?(a[f>>0]|0)==66:0)?(a[f+1>>0]|0)==80:0)?(a[f+2>>0]|0)==71:0)?(a[f+3>>0]|0)==-5:0)?(R=a[f+4>>0]|0,N=R&255,J=N>>>5,c[p+8>>2]=J,(R&255)<=191):0)?(R=(N&15)+8|0,a[p+13>>0]=R,(R&255)>>>0<=14):0){L=a[f+5>>0]|0;v=L&255;M=v>>>4;c[p+24>>2]=M;A=v&8;R=v>>>2&1;a[p+16>>0]=v>>>1&1;r=p+17|0;a[r>>0]=v&1;v=p+18|0;b[v>>1]=0;s=p+20|0;b[s>>1]=0;w=p+22|0;b[w>>1]=0;O=p+12|0;a[O>>0]=0;Q=p+14|0;a[Q>>0]=0;P=p+15|0;a[P>>0]=0;if(!(N&16))if(!R)N=0;else{a[O>>0]=1;a[Q>>0]=1;N=1}else{a[O>>0]=1;a[P>>0]=R;N=0}if((((((((((L&255)<=79?(J|0)!=0|(M|0)==0:0)?!(N<<24>>24!=0&(J|0)==0):0)?(F=Qd(p,f+6|0,g+ -6|0)|0,(F|0)>=0):0)?(C=(c[p>>2]|0)>>>0>1073741823?-1:F,(C|0)>=0):0)?(E=C+6|0,z=p+4|0,I=Qd(z,f+E|0,g-E|0)|0,(I|0)>=0):0)?(H=c[z>>2]|0,D=H>>>0>1073741823?-1:I,(D|0)>=0):0)?(G=D+E|0,!((c[p>>2]|0)==0|(H|0)==0)):0)?(q=p+28|0,K=Qd(q,f+G|0,g-G|0)|0,(K|0)>=0):0)?(B=(c[q>>2]|0)>>>0>1073741823?-1:K,(B|0)>=0):0){z=B+G|0;c[n>>2]=0;do if(!A){c[p+32>>2]=0;m=48}else{A=Qd(n,f+z|0,g-z|0)|0;if((A|0)<0){z=-1;break a}B=c[n>>2]|0;A=B>>>0>1073741823?-1:A;if((A|0)<0){z=-1;break a}C=A+z|0;A=p+32|0;c[A>>2]=0;z=C+B|0;if((z|0)>(g|0)){z=-1;break a}y=y<<24>>24!=0;if(!y?(a[r>>0]|0)==0:0)break;if((C|0)>=(z|0)){z=C;m=48;break}while(1){B=Qd(l,f+C|0,z-C|0)|0;if((B|0)<0){z=-1;break a}C=B+C|0;D=Qd(h,f+C|0,z-C|0)|0;if((D|0)<0){z=-1;break a}B=c[h>>2]|0;D=B>>>0>1073741823?-1:D;if((D|0)<0){z=-1;break a}D=D+C|0;C=D+B|0;if(C>>>0>z>>>0){z=-1;break a}do if(a[r>>0]|0){if((c[l>>2]|0)!=5)break;F=Qd(u,f+D|0,z-D|0)|0;if((F|0)<0){z=-1;break a}E=c[u>>2]|0;F=E>>>0>1073741823?-1:F;if((F|0)<0){z=-1;break a}G=F+D|0;H=Qd(x,f+G|0,z-G|0)|0;if((H|0)<0){z=-1;break a}F=c[x>>2]|0;H=F>>>0>1073741823?-1:H;if((H|0)<0){z=-1;break a}R=H+G|0;if((Qd(t,f+R|0,z-R|0)|0)<0){z=-1;break a}G=c[t>>2]|0;if(!((F&65535|0)==(F|0)&((G>>>0>1073741823|(F|0)==0|(G|0)==0)^1))){z=-1;break a}if((G&65535|0)!=(G|0)){z=-1;break a}if((E&65535|0)!=(E|0)){z=-1;break a}b[v>>1]=E;b[s>>1]=F;b[w>>1]=G}while(0);if(y){P=fd(16)|0;c[P>>2]=c[l>>2];Q=P+4|0;c[Q>>2]=B;R=P+12|0;c[R>>2]=0;c[A>>2]=P;A=fd(B)|0;c[P+8>>2]=A;fe(A|0,f+D|0,c[Q>>2]|0)|0;A=R}if((C|0)>=(z|0)){z=C;m=48;break}}}while(0);do if((m|0)==48){if(!(a[r>>0]|0))break;if(!(b[s>>1]|0)){z=-1;break a}}while(0);if(c[q>>2]|0)break;c[q>>2]=g-z}else z=-1}else z=-1;while(0);if((z|0)<0){R=z;i=k;return R|0}u=c[p>>2]|0;v=c[p+4>>2]|0;B=c[p+12>>2]|0;y=B&255;A=c[p+24>>2]|0;w=(B&65535)>>>8;t=w&255;w=w&65535;q=e+16|0;c[q>>2]=u;r=e+20|0;c[r>>2]=v;x=c[p+8>>2]|0;C=e+24|0;c[C>>2]=x;s=B>>>24&255;B=B>>>16&255;if((x|0)==4){c[C>>2]=1;a[e+28>>0]=0;x=1}else if((x|0)==5){c[C>>2]=2;a[e+28>>0]=0;x=2}else{c[C>>2]=x;a[e+28>>0]=1}a[e+29>>0]=y;a[e+33>>0]=s;a[e+31>>0]=B;s=c[p+16>>2]|0;a[e+32>>0]=s;c[e+36>>2]=A;a[e+30>>0]=t;a[e+34>>0]=(s&65535)>>>8;b[e+48>>1]=s>>>16;s=c[p+20>>2]|0;b[e+50>>1]=s;b[e+52>>1]=s>>>16;s=e+44|0;c[s>>2]=c[p+32>>2];do if(((c[p+28>>2]|0)+z|0)>>>0<=g>>>0){A=f+z|0;g=g-z|0;c[l>>2]=0;c[l+4>>2]=0;p=l+8|0;c[p>>2]=0;c[h>>2]=0;c[h+4>>2]=0;t=h+8|0;c[t>>2]=0;if(!(y<<24>>24)){z=A;y=g}else{y=Nd(l,e+12|0,e+4|0,A,g,u,v,0,w)|0;if((y|0)<0)break;z=f+(y+z)|0;y=g-y|0}f=e+8|0;A=Nd(h,f,e,z,y,u,v,x,w)|0;if((A|0)>=0){u=y-A|0;v=e+4|0;y=c[v>>2]|0;w=(y|0)!=0;c[n>>2]=0;x=n+4|0;c[x>>2]=0;b:do if((u|0)>0){y=n+((w&1)<<2)|0;D=0;H=0;z=z+A|0;A=u;G=1;c:while(1){B=(G|0)!=0;if((A|0)<((B?5:2)|0)){n=-1;break b}if(B)F=0;else F=(a[z+2>>0]|0)==0?4:3;if((A|0)<(F+3|0)){n=-1;break b}C=z+F|0;B=d[C>>0]|0;E=B<<5&32|(d[z+(F+1)>>0]|0)>>>3;B=B>>>1&63;do if((B+ -32|0)>>>0<4|(B|0)==39|B>>>0>40)if(D)if(!(c[y>>2]|0))B=H;else break c;else{B=H;D=0}else if((B>>>0<10|(B+ -16|0)>>>0<6?(o=F+2|0,(o|0)<(A|0)):0)?(a[z+o>>0]|0)<0:0){if((H|0)!=0?(c[y>>2]|0)!=0:0)break c;if(w&(E|0)==1){c[x>>2]=1;B=H;D=H;break}else{c[n>>2]=1;B=1;D=1;break}}else B=H;while(0);do if((G|0)!=1){if(((((A|0)>3?(a[z>>0]|0)==0:0)?(a[z+1>>0]|0)==0:0)?(a[z+2>>0]|0)==0:0)?(a[z+3>>0]|0)==1:0){G=4;break}if((A|0)<=2){n=-1;break b}if(a[z>>0]|0){n=-1;break b}if(a[z+1>>0]|0){n=-1;break b}if((a[z+2>>0]|0)==1)G=3;else{n=-1;break b}}else G=0;while(0);H=G+2|0;if((H|0)>(A|0)){n=-1;break b}d:do if((H|0)<(A|0))while(1){K=(a[z+G>>0]|0)==0;do if(K){if(a[z+(G+1)>>0]|0)break;if((a[z+H>>0]|0)==1)break d}while(0);J=H;H=G+3|0;if((H|0)>=(A|0)){G=A;break d}I=G+1|0;if(!K){G=I;continue}if(a[z+I>>0]|0){G=I;continue}if(a[z+J>>0]|0){G=I;continue}J=(a[z+H>>0]|0)==1;if(J){G=J?G:A;break}else G=I}else G=A;while(0);if((G|0)<0){n=-1;break b}H=G-F|0;F=H+3|0;E=w&(E|0)==1;J=E?l:h;I=J+8|0;if((Od(J,(c[I>>2]|0)+F|0)|0)<0){n=-1;break b}K=c[J>>2]|0;J=c[I>>2]|0;a[K+J>>0]=0;a[K+(J+1)>>0]=0;a[K+(J+2)>>0]=1;fe(K+(J+3)|0,C|0,H|0)|0;if(E){R=K+(J+4)|0;a[R>>0]=d[R>>0]&7}c[I>>2]=J+F;A=A-G|0;if((A|0)>0){H=B;z=z+G|0;G=0}else break}y=c[v>>2]|0;m=105}else{A=u;m=105}while(0);do if((m|0)==105){if(y){if((Od(l,(c[p>>2]|0)+32|0)|0)<0){n=-1;break}if((Pd(c[v>>2]|0,c[e+12>>2]|0,c[l>>2]|0,c[p>>2]|0)|0)<0){n=-1;break}}if((Od(h,(c[t>>2]|0)+32|0)|0)<0)n=-1;else{n=(Pd(c[e>>2]|0,c[f>>2]|0,c[h>>2]|0,c[t>>2]|0)|0)<0;n=n?-1:u-A|0}}while(0);id(c[l>>2]|0);id(c[h>>2]|0);if((((n|0)>=0?(g-u+n|0)>=0:0)?(Ld(e),j=c[f>>2]|0,(c[j+64>>2]|0)>=(c[q>>2]|0)):0)?(c[j+68>>2]|0)>=(c[r>>2]|0):0){c[e+80>>2]=-1;R=0;i=k;return R|0}}}while(0);xd(e+8|0);xd(e+12|0);c[s>>2]=0;R=-1;i=k;return R|0}function Ld(a){a=a|0;var b=0,d=0,e=0;b=i;d=a+4|0;e=c[d>>2]|0;if(e){Jc(e)|0;id(c[d>>2]|0);c[d>>2]=0}d=c[a>>2]|0;if(!d){i=b;return}Jc(d)|0;id(c[a>>2]|0);c[a>>2]=0;i=b;return}function Md(a){a=a|0;var b=0,d=0;b=i;id(c[a+124>>2]|0);id(c[a+128>>2]|0);d=0;do{id(c[a+(d<<2)+132>>2]|0);id(c[a+(d<<2)+164>>2]|0);d=d+1|0}while((d|0)!=8);id(c[a+196>>2]|0);id(c[a+56>>2]|0);Ld(a);xd(a+8|0);xd(a+12|0);id(a);i=b;return}function Nd(b,d,e,f,g,h,j,k,l){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0;n=i;i=i+16|0;q=n+4|0;m=n;p=Qd(q,f,g)|0;if((p|0)<0){t=-1;i=n;return t|0}r=c[q>>2]|0;t=r>>>0>1073741823?-1:p;if((t|0)<0){t=-1;i=n;return t|0}s=g-t|0;if(r>>>0>s>>>0){t=-1;i=n;return t|0}q=r+10|0;p=fd(q)|0;a[p>>0]=k;a[p+1>>0]=h>>>24;a[p+2>>0]=h>>>16;a[p+3>>0]=h>>>8;a[p+4>>0]=h;a[p+5>>0]=j>>>24;a[p+6>>0]=j>>>16;a[p+7>>0]=j>>>8;a[p+8>>0]=j;a[p+9>>0]=l+248;fe(p+10|0,f+t|0,r|0)|0;l=s-r|0;k=fd(10-r+(q<<1)+l|0)|0;a[k>>0]=0;a[k+1>>0]=0;a[k+2>>0]=0;a[k+3>>0]=1;a[k+4>>0]=96;a[k+5>>0]=1;if((q|0)>0){r=0;f=6;do{j=r+1|0;h=a[p+r>>0]|0;if((j|0)<(q|0)&h<<24>>24==0)if(!(a[p+j>>0]|0)){a[k+f>>0]=0;a[k+(f+1)>>0]=0;a[k+(f+2)>>0]=3;r=r+2|0;f=f+3|0}else{h=0;o=8}else o=8;if((o|0)==8){o=0;a[k+f>>0]=h;r=j;f=f+1|0}}while((r|0)<(q|0));if(!f){f=0;o=12}else o=11}else{f=6;o=11}if((o|0)==11)if(!(a[k+(f+ -1)>>0]|0))o=12;if((o|0)==12){a[k+f>>0]=-128;f=f+1|0}id(p);o=g-l|0;if((o|0)<0){t=-1;i=n;return t|0}g=b+8|0;if((Od(b,(c[g>>2]|0)+f|0)|0)<0){id(k);t=-1;i=n;return t|0}fe((c[b>>2]|0)+(c[g>>2]|0)|0,k|0,f|0)|0;c[g>>2]=(c[g>>2]|0)+f;id(k);b=Oc(1416)|0;if(!b){t=-1;i=n;return t|0}g=wd()|0;c[m>>2]=g;if(!g){t=-1;i=n;return t|0}t=b+688|0;c[t>>2]=c[t>>2]|1;if((Ic(b,1416,0)|0)<0){xd(m);t=-1;i=n;return t|0}else{c[e>>2]=b;c[d>>2]=g;t=o;i=n;return t|0}return 0}function Od(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=a+4|0;f=c[e>>2]|0;if((f|0)<(b|0)){f=(f*3|0)/2|0;f=(f|0)<(b|0)?b:f;b=gd(c[a>>2]|0,f)|0;if(!b)a=-1;else{c[a>>2]=b;c[e>>2]=f;a=0}}else a=0;i=d;return a|0}function Pd(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;j=i;i=i+96|0;h=j;g=j+80|0;Xc(h);c[h+24>>2]=e;c[h+28>>2]=f;e=e+f+0|0;f=e+32|0;do{a[e>>0]=0;e=e+1|0}while((e|0)<(f|0));e=(Pc(b,d,g,h)|0)<0;i=j;return(e|(c[g>>2]|0)==0)<<31>>31|0}function Qd(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;a:do if((f|0)>=1){j=a[e>>0]|0;h=j&255;if(j<<24>>24>-1){c[b>>2]=h;b=1;break}if(j<<24>>24!=-128){j=e+1|0;h=h&127;while(1){if((f|0)<2){b=-1;break a}k=j;j=j+1|0;k=d[k>>0]|0;h=k&127|h<<7;if(!(k&128))break;else f=f+ -1|0}c[b>>2]=h;b=j-e|0}else b=-1}else b=-1;while(0);i=g;return b|0}function Rd(d,f,g,h,j,k,l){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0;j=i;if((c[d+40>>2]|0)==8?(c[d+44>>2]|0)==0:0){if((k|0)>0)h=0;else{i=j;return}while(1){o=b[g+(h<<1)>>1]&255;a[f>>0]=o;a[f+1>>0]=o;a[f+2>>0]=o;h=h+1|0;if((h|0)==(k|0))break;else f=f+l|0}i=j;return}m=c[d+12>>2]|0;h=c[d+16>>2]|0;d=c[d>>2]|0;if((k|0)>0)n=0;else{i=j;return}while(1){o=($(e[g+(n<<1)>>1]|0,m)|0)+h>>d;if((o|0)<0)o=0;else o=(o|0)>255?-1:o&255;a[f>>0]=o;a[f+1>>0]=o;a[f+2>>0]=o;n=n+1|0;if((n|0)==(k|0))break;else f=f+l|0}i=j;return}function Sd(b,d,f,g,h,j,k){b=b|0;d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;q=i;s=c[b+20>>2]|0;n=c[b+24>>2]|0;o=c[b+28>>2]|0;l=c[b+32>>2]|0;p=c[b+12>>2]|0;r=c[b+16>>2]|0;m=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)t=0;else{i=q;return}while(1){v=$(e[f+(t<<1)>>1]|0,p)|0;u=(e[g+(t<<1)>>1]|0)-b|0;w=(e[h+(t<<1)>>1]|0)-b|0;v=v+r|0;x=v+($(w,s)|0)>>m;if((x|0)<0)x=0;else x=(x|0)>255?-1:x&255;a[d>>0]=x;w=v-($(u,n)|0)-($(w,o)|0)>>m;if((w|0)<0)w=0;else w=(w|0)>255?-1:w&255;a[d+1>>0]=w;u=v+($(u,l)|0)>>m;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[d+2>>0]=u;t=t+1|0;if((t|0)==(j|0))break;else d=d+k|0}i=q;return}function Td(d,f,g,h,j,k,l){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0;m=i;if((c[d+40>>2]|0)==8?(c[d+44>>2]|0)==0:0){if((k|0)>0)n=0;else{i=m;return}while(1){a[f>>0]=b[j+(n<<1)>>1];a[f+1>>0]=b[g+(n<<1)>>1];a[f+2>>0]=b[h+(n<<1)>>1];n=n+1|0;if((n|0)==(k|0))break;else f=f+l|0}i=m;return}o=c[d+12>>2]|0;n=c[d+16>>2]|0;d=c[d>>2]|0;if((k|0)>0)p=0;else{i=m;return}while(1){q=($(e[j+(p<<1)>>1]|0,o)|0)+n>>d;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[f>>0]=q;q=($(e[g+(p<<1)>>1]|0,o)|0)+n>>d;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[f+1>>0]=q;q=($(e[h+(p<<1)>>1]|0,o)|0)+n>>d;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[f+2>>0]=q;p=p+1|0;if((p|0)==(k|0))break;else f=f+l|0}i=m;return}function Ud(b,d,f,g,h,j,k){b=b|0;d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;l=c[b+12>>2]|0;m=c[b+16>>2]|0;n=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)p=0;else{i=o;return}while(1){t=e[f+(p<<1)>>1]|0;s=(e[g+(p<<1)>>1]|0)-b|0;r=(e[h+(p<<1)>>1]|0)-b|0;q=t-s|0;u=($(q+r|0,l)|0)+m>>n;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[d>>0]=u;s=($(s+t|0,l)|0)+m>>n;if((s|0)<0)s=0;else s=(s|0)>255?-1:s&255;a[d+1>>0]=s;q=($(q-r|0,l)|0)+m>>n;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[d+2>>0]=q;p=p+1|0;if((p|0)==(j|0))break;else d=d+k|0}i=o;return}function Vd(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0;d=i;if((b|0)==0|b>>>0>2147483583){k=0;i=d;return k|0}do if(!(c[1523]|0)){e=ra(64)|0;if((e|0)==(-1|0)){k=0;i=d;return k|0}else{c[1524]=ra(0)|0;c[1523]=6088;c[1522]=6088;c[1527]=6104;c[1526]=6104;k=e+16|0;a[e+15>>0]=-86;j=c[1527]|0;c[1527]=k;c[k>>2]=6104;c[e+20>>2]=j;c[j>>2]=k;j=e+24|0;k=c[1523]|0;c[1523]=j;c[j>>2]=6088;c[e+28>>2]=k;c[k>>2]=j;break}}while(0);e=b+40&-32;h=c[1524]|0;g=c[1522]|0;k=6092|0;while(1){f=c[k>>2]|0;b=f+ -8|0;k=c[f+ -4>>2]|0;if((k|0)==6104)j=h;else j=k;j=j-b|0;if(e>>>0<j>>>0){h=12;break}if((f|0)==(g|0)){h=10;break}k=f+4|0;if((e|0)==(j|0)){h=15;break}}do if((h|0)==10)if((ra(e+32-j|0)|0)==(-1|0)){k=0;i=d;return k|0}else{c[1524]=ra(0)|0;k=c[g+ -4>>2]|0;f=g;h=12;break}else if((h|0)==15){j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}while(0);if((h|0)==12){h=b+e|0;c[f+ -4>>2]=h;c[h>>2]=b;c[b+(e|4)>>2]=k;c[k>>2]=h;h=b+(e|8)|0;k=f+4|0;j=c[k>>2]|0;c[k>>2]=h;c[h>>2]=f;c[b+(e|12)>>2]=j;c[j>>2]=h;a[b+(e+ -1)>>0]=-86;j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}a[b+ -1>>0]=85;k=f;i=d;return k|0}function Wd(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;if(!b){i=d;return}g=b+ -8|0;e=c[1523]|0;c[1523]=b;c[b>>2]=6088;f=b+4|0;c[f>>2]=e;c[e>>2]=b;a[b+ -9>>0]=-86;e=c[g>>2]|0;if((e|0)!=6104?(a[e+ -1>>0]|0)==-86:0){g=c[b+ -4>>2]|0;c[e+4>>2]=g;c[g>>2]=e;b=c[b>>2]|0;g=c[f>>2]|0;c[b+4>>2]=g;c[g>>2]=b}else e=g;b=c[e+4>>2]|0;if((b|0)==6104){i=d;return}if((a[b+ -1>>0]|0)!=-86){i=d;return}g=c[b>>2]|0;h=c[b+4>>2]|0;c[g+4>>2]=h;c[h>>2]=g;h=e+8|0;g=c[h>>2]|0;j=e+12|0;f=c[j>>2]|0;c[g+4>>2]=f;c[f>>2]=g;f=b+8|0;g=b+12|0;e=c[g>>2]|0;c[g>>2]=h;c[h>>2]=f;c[j>>2]=e;c[e>>2]=h;f=c[f>>2]|0;g=c[g>>2]|0;c[f+4>>2]=g;c[g>>2]=f;i=d;return}function Xd(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;do if(a){if(!b){Wd(a);e=0;break}e=Vd(b)|0;if(!e)e=0;else{f=(c[a+ -4>>2]|0)-a+ -1|0;fe(e|0,a|0,(f>>>0>b>>>0?b:f)|0)|0;Wd(a)}}else e=Vd(b)|0;while(0);i=d;return e|0}function Yd(b,c,d){b=b|0;c=c|0;d=d|0;var e=0,f=0,g=0,h=0;f=i;if(!d){h=0;i=f;return h|0}while(1){g=a[b>>0]|0;h=a[c>>0]|0;if(g<<24>>24!=h<<24>>24)break;d=d+ -1|0;if(!d){b=0;e=5;break}else{b=b+1|0;c=c+1|0}}if((e|0)==5){i=f;return b|0}h=(g&255)-(h&255)|0;i=f;return h|0}function Zd(){}function _d(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=(b|0)<0?-1:0;return b>>c-32|0}function $d(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;b=b-d-(c>>>0>a>>>0|0)>>>0;return(D=b,a-c>>>0|0)|0}function ae(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=a+c>>>0;return(D=b+d+(c>>>0<a>>>0|0)>>>0,c|0)|0}function be(b){b=b|0;var c=0;c=b;while(a[c>>0]|0)c=c+1|0;return c-b|0}function ce(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,i=0;f=b+e|0;if((e|0)>=20){d=d&255;i=b&3;h=d|d<<8|d<<16|d<<24;g=f&~3;if(i){i=b+4-i|0;while((b|0)<(i|0)){a[b>>0]=d;b=b+1|0}}while((b|0)<(g|0)){c[b>>2]=h;b=b+4|0}}while((b|0)<(f|0)){a[b>>0]=d;b=b+1|0}return b-e|0}function de(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b<<c|(a&(1<<c)-1<<32-c)>>>32-c;return a<<c}D=a<<c-32;return 0}function ee(a){a=a|0;return(a&255)<<24|(a>>8&255)<<16|(a>>16&255)<<8|a>>>24|0}function fe(b,d,e){b=b|0;d=d|0;e=e|0;var f=0;if((e|0)>=4096)return ua(b|0,d|0,e|0)|0;f=b|0;if((b&3)==(d&3)){while(b&3){if(!e)return f|0;a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}while((e|0)>=4){c[b>>2]=c[d>>2];b=b+4|0;d=d+4|0;e=e-4|0}}while((e|0)>0){a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}return f|0}function ge(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=0;return b>>>c-32|0}function he(b){b=b|0;var c=0;c=a[n+(b>>>24)>>0]|0;if((c|0)<8)return c|0;c=a[n+(b>>16&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[n+(b>>8&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[n+(b&255)>>0]|0)+24|0}function ie(b){b=b|0;var c=0;c=a[m+(b&255)>>0]|0;if((c|0)<8)return c|0;c=a[m+(b>>8&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[m+(b>>16&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[m+(b>>>24)>>0]|0)+24|0}function je(a,b){a=a|0;b=b|0;var c=0,d=0,e=0,f=0;f=a&65535;d=b&65535;c=$(d,f)|0;e=a>>>16;d=(c>>>16)+($(d,e)|0)|0;b=b>>>16;a=$(b,f)|0;return(D=(d>>>16)+($(b,e)|0)+(((d&65535)+a|0)>>>16)|0,d+a<<16|c&65535|0)|0}function ke(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;var e=0,f=0;e=a;f=c;a=je(e,f)|0;c=D;return(D=($(b,f)|0)+($(d,e)|0)+c|c&0,a|0|0)|0}function le(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;return xa[a&1](b|0,c|0,d|0)|0}function me(a,b,c,d,e,f,g,h,i,j,k){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;ya[a&1](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0)}function ne(a,b,c,d,e,f,g,h){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;za[a&7](b|0,c|0,d|0,e|0,f|0,g|0,h|0)}function oe(a,b,c,d,e,f,g,h,i,j,k,l,m,n){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;Aa[a&3](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0,l|0,m|0,n|0)}function pe(a,b){a=a|0;b=b|0;Ba[a&1](b|0)}function qe(a,b,c){a=a|0;b=b|0;c=c|0;Ca[a&7](b|0,c|0)}function re(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;return Da[a&1](b|0,c|0,d|0,e|0,f|0,g|0)|0}function se(a,b){a=a|0;b=b|0;return Ea[a&3](b|0)|0}function te(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;Fa[a&7](b|0,c|0,d|0)}function ue(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;return Ga[a&1](b|0,c|0,d|0,e|0)|0}function ve(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;Ha[a&3](b|0,c|0,d|0,e|0,f|0,g|0)}function we(a,b,c){a=a|0;b=b|0;c=c|0;return Ia[a&1](b|0,c|0)|0}function xe(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;return Ja[a&1](b|0,c|0,d|0,e|0,f|0)|0}function ye(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;Ka[a&7](b|0,c|0,d|0,e|0)}function ze(a,b,c){a=a|0;b=b|0;c=c|0;aa(0);return 0}function Ae(a,b,c,d,e,f,g,h,i,j){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;aa(1)}function Be(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;aa(2)}function Ce(a,b,c,d,e,f,g,h,i,j,k,l,m){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;m=m|0;aa(3)}function De(a){a=a|0;aa(4)}function Ee(a,b){a=a|0;b=b|0;aa(5)}function Fe(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(6);return 0}function Ge(a){a=a|0;aa(7);return 0}function He(a,b,c){a=a|0;b=b|0;c=c|0;aa(8)}function Ie(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;aa(9);return 0}function Je(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(10)}function Ke(a,b){a=a|0;b=b|0;aa(11);return 0}function Le(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;aa(12);return 0}function Me(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;aa(13)}
+
+
+
+
+// EMSCRIPTEN_END_FUNCS
+var xa=[ze,Mc];var ya=[Ae,sc];var za=[Be,Sd,Td,Ud,cc,vc,wc,Rd];var Aa=[Ce,tc,uc,Ce];var Ba=[De,Mb];var Ca=[Ee,jc,oc,pc,qc,rc,Fc,rd];var Da=[Fe,Kc];var Ea=[Ge,Jb,Lb,Ge];var Fa=[He,hc,ic,kc,lc,mc,nc,He];var Ga=[Ie,Kb];var Ha=[Je,xc,yc,Je];var Ia=[Ke,Pb];var Ja=[Le,Lc];var Ka=[Me,dc,ec,fc,gc,Me,Me,Me];return{_i64Subtract:$d,_free:Wd,_bpg_decoder_decode:Kd,_bpg_decoder_start:Ed,_realloc:Xd,_i64Add:ae,_bpg_decoder_open:Jd,_bitshift64Ashr:_d,_strlen:be,_bpg_decoder_get_info:Dd,_memset:ce,_malloc:Vd,_memcpy:fe,_bpg_decoder_get_line:Gd,_bpg_decoder_close:Md,_bpg_decoder_get_frame_duration:Fd,_llvm_bswap_i32:ee,_bitshift64Shl:de,runPostSets:Zd,stackAlloc:La,stackSave:Ma,stackRestore:Na,setThrew:Oa,setTempRet0:Ra,getTempRet0:Sa,dynCall_iiii:le,dynCall_viiiiiiiiii:me,dynCall_viiiiiii:ne,dynCall_viiiiiiiiiiiii:oe,dynCall_vi:pe,dynCall_vii:qe,dynCall_iiiiiii:re,dynCall_ii:se,dynCall_viii:te,dynCall_iiiii:ue,dynCall_viiiiii:ve,dynCall_iii:we,dynCall_iiiiii:xe,dynCall_viiii:ye}})
+
+
+// EMSCRIPTEN_END_ASM
+(Module.asmGlobalArg,Module.asmLibraryArg,buffer);var _i64Subtract=Module["_i64Subtract"]=asm["_i64Subtract"];var _free=Module["_free"]=asm["_free"];var _bpg_decoder_decode=Module["_bpg_decoder_decode"]=asm["_bpg_decoder_decode"];var _bpg_decoder_start=Module["_bpg_decoder_start"]=asm["_bpg_decoder_start"];var _realloc=Module["_realloc"]=asm["_realloc"];var _i64Add=Module["_i64Add"]=asm["_i64Add"];var _bpg_decoder_open=Module["_bpg_decoder_open"]=asm["_bpg_decoder_open"];var _bitshift64Ashr=Module["_bitshift64Ashr"]=asm["_bitshift64Ashr"];var _strlen=Module["_strlen"]=asm["_strlen"];var _bpg_decoder_get_info=Module["_bpg_decoder_get_info"]=asm["_bpg_decoder_get_info"];var _memset=Module["_memset"]=asm["_memset"];var _malloc=Module["_malloc"]=asm["_malloc"];var _memcpy=Module["_memcpy"]=asm["_memcpy"];var _bpg_decoder_get_line=Module["_bpg_decoder_get_line"]=asm["_bpg_decoder_get_line"];var _bpg_decoder_close=Module["_bpg_decoder_close"]=asm["_bpg_decoder_close"];var _bpg_decoder_get_frame_duration=Module["_bpg_decoder_get_frame_duration"]=asm["_bpg_decoder_get_frame_duration"];var _llvm_bswap_i32=Module["_llvm_bswap_i32"]=asm["_llvm_bswap_i32"];var _bitshift64Shl=Module["_bitshift64Shl"]=asm["_bitshift64Shl"];var runPostSets=Module["runPostSets"]=asm["runPostSets"];var dynCall_iiii=Module["dynCall_iiii"]=asm["dynCall_iiii"];var dynCall_viiiiiiiiii=Module["dynCall_viiiiiiiiii"]=asm["dynCall_viiiiiiiiii"];var dynCall_viiiiiii=Module["dynCall_viiiiiii"]=asm["dynCall_viiiiiii"];var dynCall_viiiiiiiiiiiii=Module["dynCall_viiiiiiiiiiiii"]=asm["dynCall_viiiiiiiiiiiii"];var dynCall_vi=Module["dynCall_vi"]=asm["dynCall_vi"];var dynCall_vii=Module["dynCall_vii"]=asm["dynCall_vii"];var dynCall_iiiiiii=Module["dynCall_iiiiiii"]=asm["dynCall_iiiiiii"];var dynCall_ii=Module["dynCall_ii"]=asm["dynCall_ii"];var dynCall_viii=Module["dynCall_viii"]=asm["dynCall_viii"];var dynCall_iiiii=Module["dynCall_iiiii"]=asm["dynCall_iiiii"];var dynCall_viiiiii=Module["dynCall_viiiiii"]=asm["dynCall_viiiiii"];var dynCall_iii=Module["dynCall_iii"]=asm["dynCall_iii"];var dynCall_iiiiii=Module["dynCall_iiiiii"]=asm["dynCall_iiiiii"];var dynCall_viiii=Module["dynCall_viiii"]=asm["dynCall_viiii"];Runtime.stackAlloc=asm["stackAlloc"];Runtime.stackSave=asm["stackSave"];Runtime.stackRestore=asm["stackRestore"];Runtime.setTempRet0=asm["setTempRet0"];Runtime.getTempRet0=asm["getTempRet0"];var i64Math=null;if(memoryInitializer){if(typeof Module["locateFile"]==="function"){memoryInitializer=Module["locateFile"](memoryInitializer)}else if(Module["memoryInitializerPrefixURL"]){memoryInitializer=Module["memoryInitializerPrefixURL"]+memoryInitializer}if(ENVIRONMENT_IS_NODE||ENVIRONMENT_IS_SHELL){var data=Module["readBinary"](memoryInitializer);HEAPU8.set(data,STATIC_BASE)}else{addRunDependency("memory initializer");Browser.asyncLoad(memoryInitializer,(function(data){HEAPU8.set(data,STATIC_BASE);removeRunDependency("memory initializer")}),(function(data){throw"could not load memory initializer "+memoryInitializer}))}}function ExitStatus(status){this.name="ExitStatus";this.message="Program terminated with exit("+status+")";this.status=status}ExitStatus.prototype=new Error;ExitStatus.prototype.constructor=ExitStatus;var initialStackTop;var preloadStartTime=null;var calledMain=false;dependenciesFulfilled=function runCaller(){if(!Module["calledRun"]&&shouldRunNow)run();if(!Module["calledRun"])dependenciesFulfilled=runCaller};function run(args){args=args||Module["arguments"];if(preloadStartTime===null)preloadStartTime=Date.now();if(runDependencies>0){return}preRun();if(runDependencies>0)return;if(Module["calledRun"])return;function doRun(){if(Module["calledRun"])return;Module["calledRun"]=true;if(ABORT)return;ensureInitRuntime();preMain();if(ENVIRONMENT_IS_WEB&&preloadStartTime!==null){Module.printErr("pre-main prep time: "+(Date.now()-preloadStartTime)+" ms")}postRun()}if(Module["setStatus"]){Module["setStatus"]("Running...");setTimeout((function(){setTimeout((function(){Module["setStatus"]("")}),1);doRun()}),1)}else{doRun()}}Module["run"]=Module.run=run;function exit(status){if(Module["noExitRuntime"]){return}ABORT=true;EXITSTATUS=status;STACKTOP=initialStackTop;exitRuntime();if(ENVIRONMENT_IS_NODE){process["stdout"]["once"]("drain",(function(){process["exit"](status)}));console.log(" ");setTimeout((function(){process["exit"](status)}),500)}else if(ENVIRONMENT_IS_SHELL&&typeof quit==="function"){quit(status)}throw new ExitStatus(status)}Module["exit"]=Module.exit=exit;function abort(text){if(text){Module.print(text);Module.printErr(text)}ABORT=true;EXITSTATUS=1;var extra="\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.";throw"abort() at "+stackTrace()+extra}Module["abort"]=Module.abort=abort;if(Module["preInit"]){if(typeof Module["preInit"]=="function")Module["preInit"]=[Module["preInit"]];while(Module["preInit"].length>0){Module["preInit"].pop()()}}var shouldRunNow=true;if(Module["noInitialRun"]){shouldRunNow=false}run();window["BPGDecoder"]=(function(ctx){this.ctx=ctx;this["imageData"]=null;this["onload"]=null;this["frames"]=null;this["loop_count"]=0});window["BPGDecoder"].prototype={malloc:Module["cwrap"]("malloc","number",["number"]),free:Module["cwrap"]("free","void",["number"]),bpg_decoder_open:Module["cwrap"]("bpg_decoder_open","number",[]),bpg_decoder_decode:Module["cwrap"]("bpg_decoder_decode","number",["number","array","number"]),bpg_decoder_get_info:Module["cwrap"]("bpg_decoder_get_info","number",["number","number"]),bpg_decoder_start:Module["cwrap"]("bpg_decoder_start","number",["number","number"]),bpg_decoder_get_frame_duration:Module["cwrap"]("bpg_decoder_get_frame_duration","void",["number","number","number"]),bpg_decoder_get_line:Module["cwrap"]("bpg_decoder_get_line","number",["number","number"]),bpg_decoder_close:Module["cwrap"]("bpg_decoder_close","void",["number"]),load:(function(url){var request=new XMLHttpRequest;var this1=this;request.open("get",url,true);request.responseType="arraybuffer";request.onload=(function(event){this1._onload(request,event)});request.send()}),_onload:(function(request,event){var data=request.response;var array=new Uint8Array(data);var img,w,h,img_info_buf,cimg,p0,rgba_line,w4,frame_count;var heap8,heap16,heap32,dst,v,i,y,func,duration,frames,loop_count;img=this.bpg_decoder_open();if(this.bpg_decoder_decode(img,array,array.length)<0){console.log("could not decode image");return}img_info_buf=this.malloc(5*4);this.bpg_decoder_get_info(img,img_info_buf);heap8=Module["HEAPU8"];heap16=Module["HEAPU16"];heap32=Module["HEAPU32"];w=heap32[img_info_buf>>2];h=heap32[img_info_buf+4>>2];loop_count=heap16[img_info_buf+16>>1];w4=w*4;rgba_line=this.malloc(w4);frame_count=0;frames=[];for(;;){if(this.bpg_decoder_start(img,1)<0)break;this.bpg_decoder_get_frame_duration(img,img_info_buf,img_info_buf+4);duration=heap32[img_info_buf>>2]*1e3/heap32[img_info_buf+4>>2];cimg=this.ctx.createImageData(w,h);dst=cimg.data;p0=0;for(y=0;y<h;y++){this.bpg_decoder_get_line(img,rgba_line);for(i=0;i<w4;i=i+1|0){dst[p0]=heap8[rgba_line+i|0]|0;p0=p0+1|0}}frames[frame_count++]={"img":cimg,"duration":duration}}this.free(rgba_line);this.free(img_info_buf);this.bpg_decoder_close(img);this["loop_count"]=loop_count;this["frames"]=frames;this["imageData"]=frames[0]["img"];if(this["onload"])this["onload"]()})};window.onload=(function(){var i,n,el,tab,tab1,url,dec,canvas,id,style,ctx,dw,dh;tab=document.images;n=tab.length;tab1=[];for(i=0;i<n;i++){el=tab[i];url=el.src;if(url.substr(-4,4).toLowerCase()==".bpg"){tab1[tab1.length]=el}}n=tab1.length;for(i=0;i<n;i++){el=tab1[i];url=el.src;canvas=document.createElement("canvas");if(el.id)canvas.id=el.id;if(el.className)canvas.className=el.className;dw=el.getAttribute("width")|0;if(dw){canvas.style.width=dw+"px"}dh=el.getAttribute("height")|0;if(dh){canvas.style.height=dh+"px"}el.parentNode.replaceChild(canvas,el);ctx=canvas.getContext("2d");dec=new BPGDecoder(ctx);dec.onload=(function(canvas,ctx){var dec=this;var frames=this["frames"];var imageData=frames[0]["img"];function next_frame(){var frame_index=dec.frame_index;if(++frame_index>=frames.length){if(dec["loop_count"]==0||dec.loop_counter<dec["loop_count"]){frame_index=0;dec.loop_counter++}else{frame_index=-1}}if(frame_index>=0){dec.frame_index=frame_index;ctx.putImageData(frames[frame_index]["img"],0,0);setTimeout(next_frame,frames[frame_index]["duration"])}}canvas.width=imageData.width;canvas.height=imageData.height;ctx.putImageData(imageData,0,0);if(frames.length>1){dec.frame_index=0;dec.loop_counter=0;setTimeout(next_frame,frames[0]["duration"])}}).bind(dec,canvas,ctx);dec.load(url)}})}))()
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/html/bpgdec8.js	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,23 @@
+((function(){var Module={};var Module;if(!Module)Module=(typeof Module!=="undefined"?Module:null)||{};var moduleOverrides={};for(var key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof require==="function";var ENVIRONMENT_IS_WEB=typeof window==="object";var ENVIRONMENT_IS_WORKER=typeof importScripts==="function";var ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;if(ENVIRONMENT_IS_NODE){if(!Module["print"])Module["print"]=function print(x){process["stdout"].write(x+"\n")};if(!Module["printErr"])Module["printErr"]=function printErr(x){process["stderr"].write(x+"\n")};var nodeFS=require("fs");var nodePath=require("path");Module["read"]=function read(filename,binary){filename=nodePath["normalize"](filename);var ret=nodeFS["readFileSync"](filename);if(!ret&&filename!=nodePath["resolve"](filename)){filename=path.join(__dirname,"..","src",filename);ret=nodeFS["readFileSync"](filename)}if(ret&&!binary)ret=ret.toString();return ret};Module["readBinary"]=function readBinary(filename){return Module["read"](filename,true)};Module["load"]=function load(f){globalEval(read(f))};Module["thisProgram"]=process["argv"][1].replace(/\\/g,"/");Module["arguments"]=process["argv"].slice(2);if(typeof module!=="undefined"){module["exports"]=Module}process["on"]("uncaughtException",(function(ex){if(!(ex instanceof ExitStatus)){throw ex}}))}else if(ENVIRONMENT_IS_SHELL){if(!Module["print"])Module["print"]=print;if(typeof printErr!="undefined")Module["printErr"]=printErr;if(typeof read!="undefined"){Module["read"]=read}else{Module["read"]=function read(){throw"no read() available (jsc?)"}}Module["readBinary"]=function readBinary(f){if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}var data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){Module["arguments"]=scriptArgs}else if(typeof arguments!="undefined"){Module["arguments"]=arguments}this["Module"]=Module}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){Module["read"]=function read(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText};if(typeof arguments!="undefined"){Module["arguments"]=arguments}if(typeof console!=="undefined"){if(!Module["print"])Module["print"]=function print(x){console.log(x)};if(!Module["printErr"])Module["printErr"]=function printErr(x){console.log(x)}}else{var TRY_USE_DUMP=false;if(!Module["print"])Module["print"]=TRY_USE_DUMP&&typeof dump!=="undefined"?(function(x){dump(x)}):(function(x){})}if(ENVIRONMENT_IS_WEB){window["Module"]=Module}else{Module["load"]=importScripts}}else{throw"Unknown runtime environment. Where are we?"}function globalEval(x){eval.call(null,x)}if(!Module["load"]&&Module["read"]){Module["load"]=function load(f){globalEval(Module["read"](f))}}if(!Module["print"]){Module["print"]=(function(){})}if(!Module["printErr"]){Module["printErr"]=Module["print"]}if(!Module["arguments"]){Module["arguments"]=[]}if(!Module["thisProgram"]){Module["thisProgram"]="./this.program"}Module.print=Module["print"];Module.printErr=Module["printErr"];Module["preRun"]=[];Module["postRun"]=[];for(var key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}var Runtime={setTempRet0:(function(value){tempRet0=value}),getTempRet0:(function(){return tempRet0}),stackSave:(function(){return STACKTOP}),stackRestore:(function(stackTop){STACKTOP=stackTop}),getNativeTypeSize:(function(type){switch(type){case"i1":case"i8":return 1;case"i16":return 2;case"i32":return 4;case"i64":return 8;case"float":return 4;case"double":return 8;default:{if(type[type.length-1]==="*"){return Runtime.QUANTUM_SIZE}else if(type[0]==="i"){var bits=parseInt(type.substr(1));assert(bits%8===0);return bits/8}else{return 0}}}}),getNativeFieldSize:(function(type){return Math.max(Runtime.getNativeTypeSize(type),Runtime.QUANTUM_SIZE)}),STACK_ALIGN:16,getAlignSize:(function(type,size,vararg){if(!vararg&&(type=="i64"||type=="double"))return 8;if(!type)return Math.min(size,8);return Math.min(size||(type?Runtime.getNativeFieldSize(type):0),Runtime.QUANTUM_SIZE)}),dynCall:(function(sig,ptr,args){if(args&&args.length){if(!args.splice)args=Array.prototype.slice.call(args);args.splice(0,0,ptr);return Module["dynCall_"+sig].apply(null,args)}else{return Module["dynCall_"+sig].call(null,ptr)}}),functionPointers:[],addFunction:(function(func){for(var i=0;i<Runtime.functionPointers.length;i++){if(!Runtime.functionPointers[i]){Runtime.functionPointers[i]=func;return 2*(1+i)}}throw"Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS."}),removeFunction:(function(index){Runtime.functionPointers[(index-2)/2]=null}),getAsmConst:(function(code,numArgs){if(!Runtime.asmConstCache)Runtime.asmConstCache={};var func=Runtime.asmConstCache[code];if(func)return func;var args=[];for(var i=0;i<numArgs;i++){args.push(String.fromCharCode(36)+i)}var source=Pointer_stringify(code);if(source[0]==='"'){if(source.indexOf('"',1)===source.length-1){source=source.substr(1,source.length-2)}else{abort("invalid EM_ASM input |"+source+"|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)")}}try{var evalled=eval("(function(Module, FS) { return function("+args.join(",")+"){ "+source+" } })")(Module,typeof FS!=="undefined"?FS:null)}catch(e){Module.printErr("error in executing inline EM_ASM code: "+e+" on: \n\n"+source+"\n\nwith args |"+args+"| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)");throw e}return Runtime.asmConstCache[code]=evalled}),warnOnce:(function(text){if(!Runtime.warnOnce.shown)Runtime.warnOnce.shown={};if(!Runtime.warnOnce.shown[text]){Runtime.warnOnce.shown[text]=1;Module.printErr(text)}}),funcWrappers:{},getFuncWrapper:(function(func,sig){assert(sig);if(!Runtime.funcWrappers[sig]){Runtime.funcWrappers[sig]={}}var sigCache=Runtime.funcWrappers[sig];if(!sigCache[func]){sigCache[func]=function dynCall_wrapper(){return Runtime.dynCall(sig,func,arguments)}}return sigCache[func]}),UTF8Processor:(function(){var buffer=[];var needed=0;this.processCChar=(function(code){code=code&255;if(buffer.length==0){if((code&128)==0){return String.fromCharCode(code)}buffer.push(code);if((code&224)==192){needed=1}else if((code&240)==224){needed=2}else{needed=3}return""}if(needed){buffer.push(code);needed--;if(needed>0)return""}var c1=buffer[0];var c2=buffer[1];var c3=buffer[2];var c4=buffer[3];var ret;if(buffer.length==2){ret=String.fromCharCode((c1&31)<<6|c2&63)}else if(buffer.length==3){ret=String.fromCharCode((c1&15)<<12|(c2&63)<<6|c3&63)}else{var codePoint=(c1&7)<<18|(c2&63)<<12|(c3&63)<<6|c4&63;ret=String.fromCharCode(((codePoint-65536)/1024|0)+55296,(codePoint-65536)%1024+56320)}buffer.length=0;return ret});this.processJSString=function processJSString(string){string=unescape(encodeURIComponent(string));var ret=[];for(var i=0;i<string.length;i++){ret.push(string.charCodeAt(i))}return ret}}),getCompilerSetting:(function(name){throw"You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work"}),stackAlloc:(function(size){var ret=STACKTOP;STACKTOP=STACKTOP+size|0;STACKTOP=STACKTOP+15&-16;return ret}),staticAlloc:(function(size){var ret=STATICTOP;STATICTOP=STATICTOP+size|0;STATICTOP=STATICTOP+15&-16;return ret}),dynamicAlloc:(function(size){var ret=DYNAMICTOP;DYNAMICTOP=DYNAMICTOP+size|0;DYNAMICTOP=DYNAMICTOP+15&-16;if(DYNAMICTOP>=TOTAL_MEMORY)enlargeMemory();return ret}),alignMemory:(function(size,quantum){var ret=size=Math.ceil(size/(quantum?quantum:16))*(quantum?quantum:16);return ret}),makeBigInt:(function(low,high,unsigned){var ret=unsigned?+(low>>>0)+ +(high>>>0)*+4294967296:+(low>>>0)+ +(high|0)*+4294967296;return ret}),GLOBAL_BASE:8,QUANTUM_SIZE:4,__dummy__:0};Module["Runtime"]=Runtime;var __THREW__=0;var ABORT=false;var EXITSTATUS=0;var undef=0;var tempValue,tempInt,tempBigInt,tempInt2,tempBigInt2,tempPair,tempBigIntI,tempBigIntR,tempBigIntS,tempBigIntP,tempBigIntD,tempDouble,tempFloat;var tempI64,tempI64b;var tempRet0,tempRet1,tempRet2,tempRet3,tempRet4,tempRet5,tempRet6,tempRet7,tempRet8,tempRet9;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}var globalScope=this;function getCFunc(ident){var func=Module["_"+ident];if(!func){try{func=eval("_"+ident)}catch(e){}}assert(func,"Cannot call unknown function "+ident+" (perhaps LLVM optimizations or closure removed it?)");return func}var cwrap,ccall;((function(){var stack=0;var JSfuncs={"stackSave":(function(){stack=Runtime.stackSave()}),"stackRestore":(function(){Runtime.stackRestore(stack)}),"arrayToC":(function(arr){var ret=Runtime.stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}),"stringToC":(function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){ret=Runtime.stackAlloc((str.length<<2)+1);writeStringToMemory(str,ret)}return ret})};var toC={"string":JSfuncs["stringToC"],"array":JSfuncs["arrayToC"]};ccall=function ccallFunc(ident,returnType,argTypes,args){var func=getCFunc(ident);var cArgs=[];if(args){for(var i=0;i<args.length;i++){var converter=toC[argTypes[i]];if(converter){if(stack===0)stack=Runtime.stackSave();cArgs[i]=converter(args[i])}else{cArgs[i]=args[i]}}}var ret=func.apply(null,cArgs);if(returnType==="string")ret=Pointer_stringify(ret);if(stack!==0)JSfuncs["stackRestore"]();return ret};var sourceRegex=/^function\s*\(([^)]*)\)\s*{\s*([^*]*?)[\s;]*(?:return\s*(.*?)[;\s]*)?}$/;function parseJSFunc(jsfunc){var parsed=jsfunc.toString().match(sourceRegex).slice(1);return{arguments:parsed[0],body:parsed[1],returnValue:parsed[2]}}var JSsource={};for(var fun in JSfuncs){if(JSfuncs.hasOwnProperty(fun)){JSsource[fun]=parseJSFunc(JSfuncs[fun])}}cwrap=function cwrap(ident,returnType,argTypes){argTypes=argTypes||[];var cfunc=getCFunc(ident);var numericArgs=argTypes.every((function(type){return type==="number"}));var numericRet=returnType!=="string";if(numericRet&&numericArgs){return cfunc}var argNames=argTypes.map((function(x,i){return"$"+i}));var funcstr="(function("+argNames.join(",")+") {";var nargs=argTypes.length;if(!numericArgs){funcstr+=JSsource["stackSave"].body+";";for(var i=0;i<nargs;i++){var arg=argNames[i],type=argTypes[i];if(type==="number")continue;var convertCode=JSsource[type+"ToC"];funcstr+="var "+convertCode.arguments+" = "+arg+";";funcstr+=convertCode.body+";";funcstr+=arg+"="+convertCode.returnValue+";"}}var cfuncname=parseJSFunc((function(){return cfunc})).returnValue;funcstr+="var ret = "+cfuncname+"("+argNames.join(",")+");";if(!numericRet){var strgfy=parseJSFunc((function(){return Pointer_stringify})).returnValue;funcstr+="ret = "+strgfy+"(ret);"}if(!numericArgs){funcstr+=JSsource["stackRestore"].body+";"}funcstr+="return ret})";return eval(funcstr)}}))();Module["cwrap"]=cwrap;Module["ccall"]=ccall;function setValue(ptr,value,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":HEAP8[ptr>>0]=value;break;case"i8":HEAP8[ptr>>0]=value;break;case"i16":HEAP16[ptr>>1]=value;break;case"i32":HEAP32[ptr>>2]=value;break;case"i64":tempI64=[value>>>0,(tempDouble=value,+Math_abs(tempDouble)>=+1?tempDouble>+0?(Math_min(+Math_floor(tempDouble/+4294967296),+4294967295)|0)>>>0:~~+Math_ceil((tempDouble- +(~~tempDouble>>>0))/+4294967296)>>>0:0)],HEAP32[ptr>>2]=tempI64[0],HEAP32[ptr+4>>2]=tempI64[1];break;case"float":HEAPF32[ptr>>2]=value;break;case"double":HEAPF64[ptr>>3]=value;break;default:abort("invalid type for setValue: "+type)}}Module["setValue"]=setValue;function getValue(ptr,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":return HEAP8[ptr>>0];case"i8":return HEAP8[ptr>>0];case"i16":return HEAP16[ptr>>1];case"i32":return HEAP32[ptr>>2];case"i64":return HEAP32[ptr>>2];case"float":return HEAPF32[ptr>>2];case"double":return HEAPF64[ptr>>3];default:abort("invalid type for setValue: "+type)}return null}Module["getValue"]=getValue;var ALLOC_NORMAL=0;var ALLOC_STACK=1;var ALLOC_STATIC=2;var ALLOC_DYNAMIC=3;var ALLOC_NONE=4;Module["ALLOC_NORMAL"]=ALLOC_NORMAL;Module["ALLOC_STACK"]=ALLOC_STACK;Module["ALLOC_STATIC"]=ALLOC_STATIC;Module["ALLOC_DYNAMIC"]=ALLOC_DYNAMIC;Module["ALLOC_NONE"]=ALLOC_NONE;function allocate(slab,types,allocator,ptr){var zeroinit,size;if(typeof slab==="number"){zeroinit=true;size=slab}else{zeroinit=false;size=slab.length}var singleType=typeof types==="string"?types:null;var ret;if(allocator==ALLOC_NONE){ret=ptr}else{ret=[_malloc,Runtime.stackAlloc,Runtime.staticAlloc,Runtime.dynamicAlloc][allocator===undefined?ALLOC_STATIC:allocator](Math.max(size,singleType?1:types.length))}if(zeroinit){var ptr=ret,stop;assert((ret&3)==0);stop=ret+(size&~3);for(;ptr<stop;ptr+=4){HEAP32[ptr>>2]=0}stop=ret+size;while(ptr<stop){HEAP8[ptr++>>0]=0}return ret}if(singleType==="i8"){if(slab.subarray||slab.slice){HEAPU8.set(slab,ret)}else{HEAPU8.set(new Uint8Array(slab),ret)}return ret}var i=0,type,typeSize,previousType;while(i<size){var curr=slab[i];if(typeof curr==="function"){curr=Runtime.getFunctionIndex(curr)}type=singleType||types[i];if(type===0){i++;continue}if(type=="i64")type="i32";setValue(ret+i,curr,type);if(previousType!==type){typeSize=Runtime.getNativeTypeSize(type);previousType=type}i+=typeSize}return ret}Module["allocate"]=allocate;function demangleAll(text){return text}function jsStackTrace(){var err=new Error;if(!err.stack){try{throw new Error(0)}catch(e){err=e}if(!err.stack){return"(no stack trace available)"}}return err.stack.toString()}function stackTrace(){return demangleAll(jsStackTrace())}Module["stackTrace"]=stackTrace;var PAGE_SIZE=4096;function alignMemoryPage(x){return x+4095&-4096}var HEAP;var HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;var STATIC_BASE=0,STATICTOP=0,staticSealed=false;var STACK_BASE=0,STACKTOP=0,STACK_MAX=0;var DYNAMIC_BASE=0,DYNAMICTOP=0;function enlargeMemory(){abort("Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value "+TOTAL_MEMORY+", (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.")}var TOTAL_STACK=Module["TOTAL_STACK"]||5242880;var TOTAL_MEMORY=Module["TOTAL_MEMORY"]||33554432;var FAST_MEMORY=Module["FAST_MEMORY"]||2097152;var totalMemory=64*1024;while(totalMemory<TOTAL_MEMORY||totalMemory<2*TOTAL_STACK){if(totalMemory<16*1024*1024){totalMemory*=2}else{totalMemory+=16*1024*1024}}if(totalMemory!==TOTAL_MEMORY){Module.printErr("increasing TOTAL_MEMORY to "+totalMemory+" to be compliant with the asm.js spec");TOTAL_MEMORY=totalMemory}assert(typeof Int32Array!=="undefined"&&typeof Float64Array!=="undefined"&&!!(new Int32Array(1))["subarray"]&&!!(new Int32Array(1))["set"],"JS engine does not provide full typed array support");var buffer=new ArrayBuffer(TOTAL_MEMORY);HEAP8=new Int8Array(buffer);HEAP16=new Int16Array(buffer);HEAP32=new Int32Array(buffer);HEAPU8=new Uint8Array(buffer);HEAPU16=new Uint16Array(buffer);HEAPU32=new Uint32Array(buffer);HEAPF32=new Float32Array(buffer);HEAPF64=new Float64Array(buffer);HEAP32[0]=255;assert(HEAPU8[0]===255&&HEAPU8[3]===0,"Typed arrays 2 must be run on a little-endian system");Module["HEAP"]=HEAP;Module["buffer"]=buffer;Module["HEAP8"]=HEAP8;Module["HEAP16"]=HEAP16;Module["HEAP32"]=HEAP32;Module["HEAPU8"]=HEAPU8;Module["HEAPU16"]=HEAPU16;Module["HEAPU32"]=HEAPU32;Module["HEAPF32"]=HEAPF32;Module["HEAPF64"]=HEAPF64;function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback=="function"){callback();continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){Runtime.dynCall("v",func)}else{Runtime.dynCall("vi",func,[callback.arg])}}else{func(callback.arg===undefined?null:callback.arg)}}}var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function ensureInitRuntime(){if(runtimeInitialized)return;runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){callRuntimeCallbacks(__ATEXIT__);runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}Module["addOnPreRun"]=Module.addOnPreRun=addOnPreRun;function addOnInit(cb){__ATINIT__.unshift(cb)}Module["addOnInit"]=Module.addOnInit=addOnInit;function addOnPreMain(cb){__ATMAIN__.unshift(cb)}Module["addOnPreMain"]=Module.addOnPreMain=addOnPreMain;function addOnExit(cb){__ATEXIT__.unshift(cb)}Module["addOnExit"]=Module.addOnExit=addOnExit;function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}Module["addOnPostRun"]=Module.addOnPostRun=addOnPostRun;function intArrayFromString(stringy,dontAddNull,length){var ret=(new Runtime.UTF8Processor).processJSString(stringy);if(length){ret.length=length}if(!dontAddNull){ret.push(0)}return ret}Module["intArrayFromString"]=intArrayFromString;function intArrayToString(array){var ret=[];for(var i=0;i<array.length;i++){var chr=array[i];if(chr>255){chr&=255}ret.push(String.fromCharCode(chr))}return ret.join("")}Module["intArrayToString"]=intArrayToString;function writeStringToMemory(string,buffer,dontAddNull){var array=intArrayFromString(string,dontAddNull);var i=0;while(i<array.length){var chr=array[i];HEAP8[buffer+i>>0]=chr;i=i+1}}Module["writeStringToMemory"]=writeStringToMemory;function writeArrayToMemory(array,buffer){for(var i=0;i<array.length;i++){HEAP8[buffer+i>>0]=array[i]}}Module["writeArrayToMemory"]=writeArrayToMemory;function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i<str.length;i++){HEAP8[buffer+i>>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer+str.length>>0]=0}Module["writeAsciiToMemory"]=writeAsciiToMemory;function unSign(value,bits,ignore){if(value>=0){return value}return bits<=32?2*Math.abs(1<<bits-1)+value:Math.pow(2,bits)+value}function reSign(value,bits,ignore){if(value<=0){return value}var half=bits<=32?Math.abs(1<<bits-1):Math.pow(2,bits-1);if(value>=half&&(bits<=32||value>half)){value=-2*half+value}return value}if(!Math["imul"]||Math["imul"](4294967295,5)!==-5)Math["imul"]=function imul(a,b){var ah=a>>>16;var al=a&65535;var bh=b>>>16;var bl=b&65535;return al*bl+(ah*bl+al*bh<<16)|0};Math.imul=Math["imul"];var Math_abs=Math.abs;var Math_cos=Math.cos;var Math_sin=Math.sin;var Math_tan=Math.tan;var Math_acos=Math.acos;var Math_asin=Math.asin;var Math_atan=Math.atan;var Math_atan2=Math.atan2;var Math_exp=Math.exp;var Math_log=Math.log;var Math_sqrt=Math.sqrt;var Math_ceil=Math.ceil;var Math_floor=Math.floor;var Math_pow=Math.pow;var Math_imul=Math.imul;var Math_fround=Math.fround;var Math_min=Math.min;var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}Module["addRunDependency"]=addRunDependency;function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["removeRunDependency"]=removeRunDependency;Module["preloadedImages"]={};Module["preloadedAudios"]={};var memoryInitializer=null;STATIC_BASE=8;STATICTOP=STATIC_BASE+6112;__ATINIT__.push();allocate([0,0,1,0,1,2,0,1,2,3,1,2,3,2,3,3,0,1,0,2,1,0,3,2,1,0,3,2,1,3,2,3,0,0,1,0,1,2,0,1,2,3,0,1,2,3,4,0,1,2,3,4,5,0,1,2,3,4,5,6,0,1,2,3,4,5,6,7,1,2,3,4,5,6,7,2,3,4,5,6,7,3,4,5,6,7,4,5,6,7,5,6,7,6,7,7,0,1,0,2,1,0,3,2,1,0,4,3,2,1,0,5,4,3,2,1,0,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,7,6,5,4,3,2,7,6,5,4,3,7,6,5,4,7,6,5,7,6,7,40,45,51,57,64,72,0,0,29,0,0,0,30,0,0,0,31,0,0,0,32,0,0,0,33,0,0,0,33,0,0,0,34,0,0,0,34,0,0,0,35,0,0,0,35,0,0,0,36,0,0,0,36,0,0,0,37,0,0,0,37,0,0,0,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,8,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,12,12,0,0,0,0,0,0,0,2,5,9,1,4,8,12,3,7,11,14,6,10,13,15,0,0,0,0,0,0,0,0,0,2,1,3,0,0,0,0,0,2,5,9,14,20,27,35,1,4,8,13,19,26,34,42,3,7,12,18,25,33,41,48,6,11,17,24,32,40,47,53,10,16,23,31,39,46,52,57,15,22,30,38,45,51,56,60,21,29,37,44,50,55,59,62,28,36,43,49,54,58,61,63,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,0,1,2,3,16,17,18,19,4,5,6,7,20,21,22,23,8,9,10,11,24,25,26,27,12,13,14,15,28,29,30,31,32,33,34,35,48,49,50,51,36,37,38,39,52,53,54,55,40,41,42,43,56,57,58,59,44,45,46,47,60,61,62,63,0,1,4,5,2,3,4,5,6,6,8,8,7,7,8,8,1,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,153,200,139,141,157,154,154,154,154,154,154,154,154,184,154,154,154,184,63,139,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,153,138,138,111,141,94,138,182,154,139,139,139,139,139,139,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,91,171,134,141,111,111,125,110,110,94,124,108,124,107,125,141,179,153,125,107,125,141,179,153,125,107,125,141,179,153,125,140,139,182,182,152,136,152,136,153,136,139,111,136,139,111,141,111,140,92,137,138,140,152,138,139,153,74,149,92,139,107,122,152,140,179,166,182,140,227,122,197,138,153,136,167,152,152,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,185,107,139,126,154,197,185,201,154,154,154,149,154,139,154,154,154,152,139,110,122,95,79,63,31,31,153,153,153,153,140,198,140,198,168,79,124,138,94,153,111,149,107,167,154,139,139,139,139,139,139,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,121,140,61,154,155,154,139,153,139,123,123,63,153,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,123,123,107,121,107,121,167,151,183,140,151,183,140,140,140,154,196,196,167,154,152,167,182,182,134,149,136,153,121,136,137,169,194,166,167,154,167,137,182,107,167,91,122,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,160,107,139,126,154,197,185,201,154,154,154,134,154,139,154,154,183,152,139,154,137,95,79,63,31,31,153,153,153,153,169,198,169,198,168,79,224,167,122,153,111,149,92,167,154,139,139,139,139,139,139,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,121,140,61,154,170,154,139,153,139,123,123,63,124,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,138,138,122,121,122,121,167,151,183,140,151,183,140,140,140,154,196,167,167,154,152,167,182,182,134,149,136,153,121,136,122,169,208,166,167,154,152,167,182,107,167,91,107,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,5,5,6,6,7,8,9,10,11,13,14,16,18,20,22,24,0,0,29,30,31,32,33,33,34,34,35,35,36,36,37,37,0,0,104,101,118,99,0,0,0,0,128,5,0,0,0,0,0,0,0,0,0,0,53,54,50,72,34,48,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,176,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,26,10,1,0,0,0,0,0,1,2,2,2,2,3,5,7,8,10,12,13,15,17,18,19,20,21,22,23,23,24,24,25,25,26,27,27,28,28,29,29,30,31,0,0,0,0,0,7,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,32,26,21,17,13,9,5,2,0,254,251,247,243,239,235,230,224,230,235,239,243,247,251,254,0,2,5,9,13,17,21,26,32,0,0,0,0,0,0,0,0,240,154,249,114,252,138,253,30,254,122,254,197,254,0,255,197,254,122,254,30,254,138,253,114,252,154,249,0,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64,90,90,90,89,88,87,85,83,82,80,78,75,73,70,67,64,61,57,54,50,46,43,38,36,31,25,22,18,13,9,4,1,2,0,3,4,0,0,0,255,0,1,0,0,255,0,1,255,255,1,1,1,255,255,1,16,16,16,16,17,18,21,24,16,16,16,16,17,19,22,25,16,16,17,18,20,22,25,29,16,16,18,21,24,27,31,36,17,17,20,24,30,35,41,47,18,19,22,27,35,44,54,65,21,22,25,31,41,54,70,88,24,25,29,36,47,65,88,115,16,16,16,16,17,18,20,24,16,16,16,17,18,20,24,25,16,16,17,18,20,24,25,28,16,17,18,20,24,25,28,33,17,18,20,24,25,28,33,41,18,20,24,25,28,33,41,54,20,24,25,28,33,41,54,71,24,25,28,33,41,54,71,91,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,176,208,240,128,167,197,227,128,158,187,216,123,150,178,205,116,142,169,195,111,135,160,185,105,128,152,175,100,122,144,166,95,116,137,158,90,110,130,150,85,104,123,142,81,99,117,135,77,94,111,128,73,89,105,122,69,85,100,116,66,80,95,110,62,76,90,104,59,72,86,99,56,69,81,94,53,65,77,89,51,62,73,85,48,59,69,80,46,56,66,76,43,53,63,72,41,50,59,69,39,48,56,65,37,45,54,62,35,43,51,59,33,41,48,56,32,39,46,53,30,37,43,50,29,35,41,48,27,33,39,45,26,31,37,43,24,30,35,41,23,28,33,39,22,27,32,37,21,26,30,35,20,24,29,33,19,23,27,31,18,22,26,30,17,21,25,28,16,20,23,27,15,19,22,25,14,18,21,24,14,17,20,23,13,16,19,22,12,15,18,21,12,14,17,20,11,14,16,19,11,13,15,18,10,12,15,17,10,12,14,16,9,11,13,15,9,11,12,14,8,10,12,14,8,9,11,13,7,9,11,12,7,9,10,12,7,8,10,11,6,8,9,11,6,7,9,10,6,7,8,9,2,2,2,2,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,62,63,0,0,1,2,2,4,4,5,6,7,8,9,9,11,11,12,13,13,15,15,16,16,18,18,19,19,21,21,22,22,23,24,24,25,26,26,27,27,28,29,29,30,30,30,31,32,32,33,33,33,34,34,35,35,35,36,36,36,37,37,37,38,38,63,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,0,255,255,255,127,0,0,0,0,0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,0,0,0,0,3,1,1,0,36,56,37,56,38,56,0,0,0,0,0,0,4,0,0,0,0,0,0,0,3,1,0,16,36,56,37,56,38,56,0,0,0,0,0,0,5,0,0,0,0,0,0,0,3,0,0,16,36,56,37,56,38,56,0,0,0,0,0,0,8,0,0,0,0,0,0,0,1,0,0,0,36,56,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_NONE,Runtime.GLOBAL_BASE);var tempDoublePtr=Runtime.alignMemory(allocate(12,"i8",ALLOC_STATIC),8);assert(tempDoublePtr%8==0);function copyTempFloat(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3]}function copyTempDouble(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3];HEAP8[tempDoublePtr+4]=HEAP8[ptr+4];HEAP8[tempDoublePtr+5]=HEAP8[ptr+5];HEAP8[tempDoublePtr+6]=HEAP8[ptr+6];HEAP8[tempDoublePtr+7]=HEAP8[ptr+7]}Module["_bitshift64Ashr"]=_bitshift64Ashr;Module["_i64Subtract"]=_i64Subtract;function _sbrk(bytes){var self=_sbrk;if(!self.called){DYNAMICTOP=alignMemoryPage(DYNAMICTOP);self.called=true;assert(Runtime.dynamicAlloc);self.alloc=Runtime.dynamicAlloc;Runtime.dynamicAlloc=(function(){abort("cannot dynamically allocate, sbrk now has control")})}var ret=DYNAMICTOP;if(bytes!=0)self.alloc(bytes);return ret}Module["_i64Add"]=_i64Add;Module["_strlen"]=_strlen;Module["_memset"]=_memset;Module["_bitshift64Shl"]=_bitshift64Shl;function _abort(){Module["abort"]()}Module["_llvm_bswap_i32"]=_llvm_bswap_i32;function _rint(x){if(Math.abs(x%1)!==.5)return Math.round(x);return x+x%2+(x<0?1:-1)}function _lrint(){return _rint.apply(null,arguments)}function _emscripten_memcpy_big(dest,src,num){HEAPU8.set(HEAPU8.subarray(src,src+num),dest);return dest}Module["_memcpy"]=_memcpy;STACK_BASE=STACKTOP=Runtime.alignMemory(STATICTOP);staticSealed=true;STACK_MAX=STACK_BASE+TOTAL_STACK;DYNAMIC_BASE=DYNAMICTOP=Runtime.alignMemory(STACK_MAX);assert(DYNAMIC_BASE<TOTAL_MEMORY,"TOTAL_MEMORY not big enough for stack");var ctlz_i8=allocate([8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_DYNAMIC);var cttz_i8=allocate([8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0],"i8",ALLOC_DYNAMIC);function invoke_iiii(index,a1,a2,a3){try{return Module["dynCall_iiii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiii(index,a1,a2,a3,a4,a5){try{Module["dynCall_viiiii"](index,a1,a2,a3,a4,a5)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vi(index,a1){try{Module["dynCall_vi"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vii(index,a1,a2){try{Module["dynCall_vii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiiii(index,a1,a2,a3,a4,a5,a6){try{return Module["dynCall_iiiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12){try{Module["dynCall_viiiiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_ii(index,a1){try{return Module["dynCall_ii"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viii(index,a1,a2,a3){try{Module["dynCall_viii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9){try{Module["dynCall_viiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiii(index,a1,a2,a3,a4){try{return Module["dynCall_iiiii"](index,a1,a2,a3,a4)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6){try{Module["dynCall_viiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iii(index,a1,a2){try{return Module["dynCall_iii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiii(index,a1,a2,a3,a4,a5){try{return Module["dynCall_iiiiii"](index,a1,a2,a3,a4,a5)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiii(index,a1,a2,a3,a4,a5,a6,a7){try{Module["dynCall_viiiiiii"](index,a1,a2,a3,a4,a5,a6,a7)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}Module.asmGlobalArg={"Math":Math,"Int8Array":Int8Array,"Int16Array":Int16Array,"Int32Array":Int32Array,"Uint8Array":Uint8Array,"Uint16Array":Uint16Array,"Uint32Array":Uint32Array,"Float32Array":Float32Array,"Float64Array":Float64Array};Module.asmLibraryArg={"abort":abort,"assert":assert,"min":Math_min,"invoke_iiii":invoke_iiii,"invoke_viiiii":invoke_viiiii,"invoke_vi":invoke_vi,"invoke_vii":invoke_vii,"invoke_iiiiiii":invoke_iiiiiii,"invoke_viiiiiiiiiiii":invoke_viiiiiiiiiiii,"invoke_ii":invoke_ii,"invoke_viii":invoke_viii,"invoke_viiiiiiiii":invoke_viiiiiiiii,"invoke_iiiii":invoke_iiiii,"invoke_viiiiii":invoke_viiiiii,"invoke_iii":invoke_iii,"invoke_iiiiii":invoke_iiiiii,"invoke_viiiiiii":invoke_viiiiiii,"_sbrk":_sbrk,"_lrint":_lrint,"_abort":_abort,"_emscripten_memcpy_big":_emscripten_memcpy_big,"_rint":_rint,"STACKTOP":STACKTOP,"STACK_MAX":STACK_MAX,"tempDoublePtr":tempDoublePtr,"ABORT":ABORT,"cttz_i8":cttz_i8,"ctlz_i8":ctlz_i8,"NaN":NaN,"Infinity":Infinity};// EMSCRIPTEN_START_ASM
+var asm=(function(global,env,buffer) {
+"use asm";var a=new global.Int8Array(buffer);var b=new global.Int16Array(buffer);var c=new global.Int32Array(buffer);var d=new global.Uint8Array(buffer);var e=new global.Uint16Array(buffer);var f=new global.Uint32Array(buffer);var g=new global.Float32Array(buffer);var h=new global.Float64Array(buffer);var i=env.STACKTOP|0;var j=env.STACK_MAX|0;var k=env.tempDoublePtr|0;var l=env.ABORT|0;var m=env.cttz_i8|0;var n=env.ctlz_i8|0;var o=0;var p=0;var q=0;var r=0;var s=+env.NaN,t=+env.Infinity;var u=0,v=0,w=0,x=0,y=0.0,z=0,A=0,B=0,C=0.0;var D=0;var E=0;var F=0;var G=0;var H=0;var I=0;var J=0;var K=0;var L=0;var M=0;var N=global.Math.floor;var O=global.Math.abs;var P=global.Math.sqrt;var Q=global.Math.pow;var R=global.Math.cos;var S=global.Math.sin;var T=global.Math.tan;var U=global.Math.acos;var V=global.Math.asin;var W=global.Math.atan;var X=global.Math.atan2;var Y=global.Math.exp;var Z=global.Math.log;var _=global.Math.ceil;var $=global.Math.imul;var aa=env.abort;var ba=env.assert;var ca=env.min;var da=env.invoke_iiii;var ea=env.invoke_viiiii;var fa=env.invoke_vi;var ga=env.invoke_vii;var ha=env.invoke_iiiiiii;var ia=env.invoke_viiiiiiiiiiii;var ja=env.invoke_ii;var ka=env.invoke_viii;var la=env.invoke_viiiiiiiii;var ma=env.invoke_iiiii;var na=env.invoke_viiiiii;var oa=env.invoke_iii;var pa=env.invoke_iiiiii;var qa=env.invoke_viiiiiii;var ra=env._sbrk;var sa=env._lrint;var ta=env._abort;var ua=env._emscripten_memcpy_big;var va=env._rint;var wa=0.0;
+// EMSCRIPTEN_START_FUNCS
+function La(a){a=a|0;var b=0;b=i;i=i+a|0;i=i+15&-16;return b|0}function Ma(){return i|0}function Na(a){a=a|0;i=a}function Oa(a,b){a=a|0;b=b|0;if(!o){o=a;p=b}}function Pa(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0]}function Qa(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0];a[k+4>>0]=a[b+4>>0];a[k+5>>0]=a[b+5>>0];a[k+6>>0]=a[b+6>>0];a[k+7>>0]=a[b+7>>0]}function Ra(a){a=a|0;D=a}function Sa(){return D|0}function Ta(b,d){b=b|0;d=d|0;var e=0,f=0;e=i;if(!(a[(c[b+204>>2]|0)+43>>0]|0)){i=e;return}f=c[(c[b+200>>2]|0)+13128>>2]|0;d=(d|0)%(f|0)|0;if((d|0)!=2?!((f|0)==2&(d|0)==0):0){i=e;return}fe(c[b+152>>2]|0,c[b+136>>2]|0,199)|0;i=e;return}function Ua(b,d){b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0;e=i;g=b+204|0;f=c[g>>2]|0;if((c[(c[f+1668>>2]|0)+(c[b+2500>>2]<<2)>>2]|0)==(d|0)){Va(b);f=b+1449|0;if(a[f>>0]|0){j=c[g>>2]|0;if((a[j+42>>0]|0)!=0?(j=c[j+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0)h=5}else h=5;if((h|0)==5)Wa(b);if(a[b+1448>>0]|0){i=e;return}if(!(a[(c[g>>2]|0)+43>>0]|0)){i=e;return}g=c[(c[b+200>>2]|0)+13128>>2]|0;if((d|0)%(g|0)|0){i=e;return}if((g|0)==1){Wa(b);i=e;return}if((a[f>>0]|0)!=1){i=e;return}fe(c[b+136>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}if((a[f+42>>0]|0)!=0?(j=c[f+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0){if((a[b+141>>0]|0)==1)Xa(c[b+136>>2]|0);else Va(b);Wa(b);f=c[g>>2]|0}if(!(a[f+43>>0]|0)){i=e;return}f=b+200|0;if((d|0)%(c[(c[f>>2]|0)+13128>>2]|0)|0){i=e;return}d=b+136|0;Ya((c[d>>2]|0)+224|0)|0;if((a[b+141>>0]|0)==1)Xa(c[d>>2]|0);else Va(b);if((c[(c[f>>2]|0)+13128>>2]|0)==1){Wa(b);i=e;return}else{fe(c[d>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}}function Va(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;e=a+136|0;a=c[e>>2]|0;d=a+204|0;ad(d,1);g=a+212|0;f=c[g>>2]|0;h=0-f&7;if(h){ad(d,h);f=c[g>>2]|0}Yc((c[e>>2]|0)+224|0,(c[d>>2]|0)+((f|0)/8|0)|0,(7-f+(c[a+216>>2]|0)|0)/8|0);i=b;return}function Wa(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0;g=i;f=c[b+1440>>2]|0;e=2-f|0;e=(a[b+2060>>0]|0)==0|(f|0)==2?e:e^3;f=b+2112|0;b=b+136|0;h=0;do{j=d[680+(e*199|0)+h>>0]|0;l=a[f>>0]|0;k=l<<24>>24;if(l<<24>>24<0)k=0;else k=(k|0)>51?51:k;j=((j<<3&120)+ -16+(($(k,((j>>>4)*5|0)+ -45|0)|0)>>4)<<1)+ -127|0;j=j>>31^j;if((j|0)>124)j=j&1|124;a[(c[b>>2]|0)+h>>0]=j;h=h+1|0}while((h|0)!=199);a[(c[b>>2]|0)+199>>0]=0;a[(c[b>>2]|0)+200>>0]=0;a[(c[b>>2]|0)+201>>0]=0;a[(c[b>>2]|0)+202>>0]=0;i=g;return}function Xa(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=a+224|0;e=c[a+240>>2]|0;f=c[d>>2]|0;e=(f&1|0)==0?e:e+ -1|0;e=(f&511|0)==0?e:e+ -1|0;a=(c[a+244>>2]|0)-e|0;if((a|0)<0){i=b;return}Yc(d,e,a);i=b;return}function Ya(a){a=a|0;var b=0,d=0,e=0,f=0,g=0;b=i;f=a+4|0;d=c[f>>2]|0;e=d+ -2|0;c[f>>2]=e;g=c[a>>2]|0;if((g|0)>=(e<<17|0)){g=(c[a+16>>2]|0)-(c[a+12>>2]|0)|0;i=b;return g|0}d=(d+ -258|0)>>>31;c[f>>2]=e<<d;g=g<<d;c[a>>2]=g;if(g&65535){g=0;i=b;return g|0}yb(a);g=0;i=b;return g|0}function Za(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a)|0;i=b;return a|0}function _a(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;h=d[e>>0]|0;j=b+4|0;k=c[j>>2]|0;l=d[2880+((k<<1&384)+(h|512))>>0]|0;k=k-l|0;m=k<<17;n=c[b>>2]|0;g=m-n>>31;c[b>>2]=n-(g&m);c[j>>2]=(g&l-k)+k;h=g^h;a[e>>0]=a[h+4032>>0]|0;e=h&1;h=c[j>>2]|0;g=d[2880+h>>0]|0;c[j>>2]=h<<g;g=c[b>>2]<<g;c[b>>2]=g;if(g&65535){i=f;return e|0}j=b+16|0;h=c[j>>2]|0;c[b>>2]=(((d[h+1>>0]|0)<<1|(d[h>>0]|0)<<9)+ -65535<<7-(d[2880+((g+ -1^g)>>15)>>0]|0))+g;if(h>>>0>=(c[b+20>>2]|0)>>>0){i=f;return e|0}c[j>>2]=h+2;i=f;return e|0}function $a(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(_a(d+224|0,d+1|0)|0)){d=0;i=b;return d|0}d=(ab((c[a>>2]|0)+224|0)|0)==0;d=d?1:2;i=b;return d|0}function ab(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[a>>2]<<1;c[a>>2]=d;if(!(d&65534)){yb(a);d=c[a>>2]|0}e=c[a+4>>2]<<17;if((d|0)<(e|0)){e=0;i=b;return e|0}c[a>>2]=d-e;e=1;i=b;return e|0}function bb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function cb(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=c[(c[a+200>>2]|0)+52>>2]|0;d=(d|0)>10?31:(1<<d+ -5)+ -1|0;e=a+136|0;if((d|0)>0)a=0;else{f=0;i=b;return f|0}while(1){f=a+1|0;if(!(ab((c[e>>2]|0)+224|0)|0)){d=4;break}if((f|0)<(d|0))a=f;else{a=f;d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function db(a){a=a|0;var b=0;b=i;a=ab((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function eb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function fb(a){a=a|0;var b=0;b=i;a=Ya((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function gb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+5|0)|0;i=b;return a|0}function hb(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;a=a+136|0;g=9;e=0;while(1){h=c[a>>2]|0;f=e;e=e+1|0;if(!(_a(h+224|0,h+g|0)|0)){e=f;g=0;break}if((e|0)>=5){f=0;g=0;d=4;break}else g=10}do if((d|0)==4){while(1){d=0;if(!(ab((c[a>>2]|0)+224|0)|0)){d=5;break}g=(1<<f)+g|0;f=f+1|0;if((f|0)<31)d=4;else break}if((d|0)==5)if(!f)break;do{f=f+ -1|0;g=((ab((c[a>>2]|0)+224|0)|0)<<f)+g|0}while((f|0)!=0)}while(0);i=b;return g+e|0}function ib(a){a=a|0;var b=0;b=i;a=ab((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function jb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+176|0)|0;i=b;return a|0}function kb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0;d=i;e=a[(c[b+204>>2]|0)+1633>>0]|0;e=(e&255)<5?5:e&255;f=b+136|0;if(!e){g=0;i=d;return g|0}else b=0;while(1){h=c[f>>2]|0;g=b+1|0;if(!(_a(h+224|0,h+177|0)|0)){e=4;break}if((g|0)<(e|0))b=g;else{b=g;e=4;break}}if((e|0)==4){i=d;return b|0}return 0}function lb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0;j=i;k=c[b+200>>2]|0;n=(1<<c[k+13080>>2])+ -1|0;l=n&g;m=c[k+13064>>2]|0;h=f>>m;m=g>>m;g=c[b+136>>2]|0;if((a[g+308>>0]|0)==0?(n&f|0)==0:0)f=0;else{f=h+ -1+($(c[k+13140>>2]|0,m)|0)|0;f=d[(c[b+4336>>2]|0)+f>>0]|0}if((a[g+309>>0]|0)==0&(l|0)==0){n=0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=_a(f,n)|0;i=j;return n|0}n=($(c[k+13140>>2]|0,m+ -1|0)|0)+h|0;n=d[(c[b+4336>>2]|0)+n>>0]|0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=_a(f,n)|0;i=j;return n|0}function mb(a,b){a=a|0;b=b|0;var d=0;d=i;b=c[a+136>>2]|0;b=(_a(b+224|0,b+13|0)|0)==0;i=d;return(b?3:0)|0}function nb(a){a=a|0;var b=0;b=i;a=Ya((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function ob(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+17|0)|0;i=b;return a|0}function pb(a){a=a|0;var b=0,d=0,e=0;b=i;e=a+136|0;d=0;while(1){a=d+1|0;if(!(ab((c[e>>2]|0)+224|0)|0)){a=d;d=4;break}if((a|0)<2)d=a;else{d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function qb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(ab((c[d>>2]|0)+224|0)|0)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=(ab((c[d>>2]|0)+224|0)|0|a)<<1;a=ab((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function rb(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(_a(d+224|0,d+18|0)|0)){d=4;i=b;return d|0}d=(ab((c[a>>2]|0)+224|0)|0)<<1;d=ab((c[a>>2]|0)+224|0)|0|d;i=b;return d|0}function sb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(42-b)|0)|0;i=d;return a|0}function tb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(b+42)|0)|0;i=d;return a|0}function ub(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+((b|0)==0|40)|0)|0;i=d;return a|0}function vb(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;a=a+136|0;f=(b<<2)+166|0;e=0;while(1){g=c[a>>2]|0;b=e+1|0;if(!(_a(g+224|0,g+(f+e)|0)|0)){b=e;a=4;break}if((b|0)<4)e=b;else{a=4;break}}if((a|0)==4){i=d;return b|0}return 0}function wb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=_a(a+224|0,a+(b+174)|0)|0;i=d;return a|0}function xb(f,g,h,j,k,l){f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ta=0,ua=0,va=0,wa=0,xa=0,ya=0,Ba=0,Ca=0,Da=0,Fa=0,Ga=0,Ha=0,Ia=0,Ja=0,Ka=0,La=0,Ma=0,Na=0;n=i;i=i+96|0;v=n+24|0;s=n+8|0;t=n;u=f+136|0;o=c[u>>2]|0;p=c[f+160>>2]|0;m=c[p+(l<<2)+32>>2]|0;r=f+200|0;T=c[r>>2]|0;h=$(h>>c[T+(l<<2)+13180>>2],m)|0;h=(c[p+(l<<2)>>2]|0)+(h+(g>>c[T+(l<<2)+13168>>2]<<c[T+56>>2]))|0;T=(l|0)!=0;g=o+320|0;p=T?o+11680|0:g;x=v+0|0;q=x+64|0;do{a[x>>0]=0;x=x+1|0}while((x|0)<(q|0));S=1<<j;y=(l|0)==0;x=c[(y?o+288|0:o+292|0)>>2]|0;q=S<<j;ce(p|0,0,q<<1|0)|0;z=o+31256|0;if(!(a[z>>0]|0)){A=a[o+272>>0]|0;C=f+204|0;Ma=c[C>>2]|0;if((a[Ma+21>>0]|0)!=0?(d[Ma+1629>>0]|0)>=(j|0):0){F=c[u>>2]|0;F=_a(F+224|0,F+(T&1|46)|0)|0}else F=0;if(y){B=c[r>>2]|0;G=B;B=(c[B+13192>>2]|0)+A|0}else{B=c[C>>2]|0;if((l|0)==1)B=(c[f+2072>>2]|0)+(c[B+28>>2]|0)+(a[o+302>>0]|0)|0;else B=(c[f+2076>>2]|0)+(c[B+32>>2]|0)+(a[o+303>>0]|0)|0;B=B+A|0;G=c[r>>2]|0;A=c[G+13192>>2]|0;E=0-A|0;if((B|0)>=(E|0))E=(B|0)>57?57:B;do if((c[G+4>>2]|0)==1){if((E|0)>=30)if((E|0)>43){E=E+ -6|0;break}else{E=c[176+(E+ -30<<2)>>2]|0;break}}else E=(E|0)>51?51:E;while(0);B=A+E|0}A=(c[G+52>>2]|0)+j|0;E=A+ -5|0;A=1<<A+ -6;B=d[168+(d[232+B>>0]|0)>>0]<<d[312+B>>0];if((a[G+634>>0]|0)!=0?!((F|0)!=0&(j|0)>2):0){H=c[C>>2]|0;G=(a[H+68>>0]|0)==0?G+635|0:H+69|0;H=((c[o+31244>>2]|0)!=1?3:0)+l|0;C=G+((j+ -2|0)*384|0)+(H<<6)|0;if((j|0)>3)ia=a[G+((j+ -4|0)*6|0)+H+1536>>0]|0;else ia=16}else{ia=16;C=0}}else{A=0;ia=0;B=0;C=0;E=0;F=0}I=(j<<1)+ -1|0;if(y){G=(j*3|0)+ -6+(j+ -1>>2)|0;J=j+1>>2}else{G=15;J=j+ -2|0}if((I|0)>0){L=G+52|0;H=0;while(1){Ma=c[u>>2]|0;K=H+1|0;if(!(_a(Ma+224|0,Ma+(L+(H>>J))|0)|0))break;if((K|0)<(I|0))H=K;else{H=K;break}}K=G+70|0;G=0;while(1){Ma=c[u>>2]|0;L=G+1|0;if(!(_a(Ma+224|0,Ma+(K+(G>>J))|0)|0))break;if((L|0)<(I|0))G=L;else{G=L;break}}if((H|0)>3){I=(H>>1)+ -1|0;K=ab((c[u>>2]|0)+224|0)|0;if((I|0)>1){J=1;do{K=ab((c[u>>2]|0)+224|0)|0|K<<1;J=J+1|0}while((J|0)!=(I|0))}H=K+((H&1|2)<<I)|0}if((G|0)>3){J=(G>>1)+ -1|0;K=ab((c[u>>2]|0)+224|0)|0;if((J|0)>1){I=1;do{K=ab((c[u>>2]|0)+224|0)|0|K<<1;I=I+1|0}while((I|0)!=(J|0))}I=H;L=K+((G&1|2)<<J)|0}else{I=H;L=G}}else{I=0;L=0}do if((k|0)!=2){M=I>>2;N=L>>2;if((k|0)==1){G=I;H=L;P=d[536+(L<<3)+I>>0]|0;J=488;K=504;L=496;I=520;break}else if(k){J=I;K=L;w=49;break}P=d[(I&3)+(392+((L&3)<<2))>>0]|0;if((S|0)==8){G=I;H=L;P=(d[416+(N<<1)+M>>0]<<4)+P|0;J=496;K=8;L=488;I=24;break}else if((S|0)==16){G=I;H=L;P=(d[392+(N<<2)+M>>0]<<4)+P|0;J=8;K=8;L=24;I=24;break}else if((S|0)==4){G=I;H=L;J=408;K=8;L=408;I=24;break}else{G=I;H=L;P=(d[424+(N<<3)+M>>0]<<4)+P|0;J=40;K=8;L=104;I=24;break}}else{J=L;K=I;M=L>>2;N=I>>2;w=49}while(0);if((w|0)==49){G=J;H=K;P=d[536+(J<<3)+K>>0]|0;J=496;K=520;L=488;I=504}O=P+1|0;P=P>>4;if((P|0)>-1){Q=(1<<j+ -2)+ -1|0;R=(l|0)>0;l=R?90:88;S=S+ -1>>2;W=T?27:0;U=(j|0)==2;T=W+3|0;V=(j|0)==3;Z=(k|0)==0?9:15;_=y?0:27;da=(F|0)==0;aa=y?42:43;fa=y?40:41;ba=y?2:0;ea=o+31244|0;ca=x&-17;ga=f+204|0;Y=((B|0)<0)<<31>>31;X=((A|0)<0)<<31>>31;ha=(F|0)!=0&(j|0)>2;k=(j|0)<4;ia=ia&255;ma=(y&1)<<1;ja=ma|1;ra=1;ka=P;oa=0;xa=16;while(1){na=ka<<4;wa=a[J+ka>>0]|0;ta=wa&255;va=a[L+ka>>0]|0;ua=va&255;la=(ka|0)>0;if((ka|0)<(P|0)&la){if((ta|0)<(Q|0))pa=d[v+(ta+1<<3)+ua>>0]|0;else pa=0;if((ua|0)<(Q|0))pa=(d[ua+1+(v+(ta<<3))>>0]|0)+pa|0;ya=c[u>>2]|0;ya=(_a(ya+224|0,ya+(((pa|0)>1?1:pa)+l)|0)|0)&255;a[v+(ta<<3)+ua>>0]=ya;qa=1}else{if(!((ta|0)==(M|0)&(ua|0)==(N|0)))if(!(wa<<24>>24))ya=va<<24>>24==0&1;else ya=0;else ya=1;a[v+(ta<<3)+ua>>0]=ya;qa=0}na=O-na|0;pa=(ka|0)==(P|0);if(pa){a[s>>0]=na+255;sa=na+ -2|0;na=1}else{sa=15;na=0}if((ta|0)<(S|0))Ba=(a[v+(ta+1<<3)+ua>>0]|0)!=0&1;else Ba=0;if((ua|0)<(S|0))Ba=((a[ua+1+(v+(ta<<3))>>0]|0)!=0&1)<<1|Ba;do if(ya<<24>>24!=0&(sa|0)>-1){if(!(c[(c[r>>2]|0)+13100>>2]|0))if(U){wa=600;va=W}else w=73;else if(da){ya=(a[z>>0]|0)!=0;if(ya|U){wa=ya?664:600;va=ya?fa:W}else w=73}else{wa=664;va=fa}do if((w|0)==73){w=0;ya=(Ba<<4)+616|0;if(!y){wa=ya;va=W+(V?9:12)|0;break}va=(va|wa)<<24>>24==0?W:T;if(V){wa=ya;va=va+Z|0;break}else{wa=ya;va=va+21|0;break}}while(0);if((sa|0)>0){ya=va+92|0;do{Ma=c[u>>2]|0;if(_a(Ma+224|0,Ma+(ya+(d[wa+((d[I+sa>>0]<<2)+(d[K+sa>>0]|0))>>0]|0))|0)|0){a[s+(na&255)>>0]=sa;qa=0;na=na+1<<24>>24}sa=sa+ -1|0}while((sa|0)>0)}if(qa){a[s+(na&255)>>0]=0;qa=na+1<<24>>24;break}if(c[(c[r>>2]|0)+13100>>2]|0)if(da?(a[z>>0]|0)==0:0)w=87;else qa=aa;else w=87;if((w|0)==87){w=0;qa=(ka|0)==0?_:va+2|0}Ma=c[u>>2]|0;if((_a(Ma+224|0,Ma+(qa+92)|0)|0)==1){a[s+(na&255)>>0]=0;qa=na+1<<24>>24}else qa=na}else qa=na;while(0);na=qa&255;a:do if(qa<<24>>24){qa=la?ba:0;if(!(c[(c[r>>2]|0)+13116>>2]|0))Ga=0;else{if(da?(a[z>>0]|0)==0:0)oa=ma;else oa=ja;Ga=(d[o+oa+199>>0]|0)>>>2}sa=qa|(ra|0)==0&(pa^1)&1;Da=a[s>>0]|0;va=Da&255;qa=na>>>0>8?8:na;if(!qa){pa=-1;ra=1}else{ya=sa<<2;pa=-1;ra=1;wa=0;do{Ma=ra+ya|0;La=c[u>>2]|0;Ma=(_a(La+224|0,La+((R?Ma+16|0:Ma)+136)|0)|0)&255;a[t+wa>>0]=Ma;if(!(Ma<<24>>24))ra=((ra+ -1|0)>>>0<2&1)+ra|0;else{pa=(pa|0)==-1?wa:pa;ra=0}wa=wa+1|0}while((wa|0)<(qa|0))}wa=na+ -1|0;qa=a[s+wa>>0]|0;ya=qa&255;do if(!(a[z>>0]|0)){if((c[ea>>2]|0)==1?!((c[(c[r>>2]|0)+13104>>2]|0)==0|da|(ca|0)!=10):0){va=0;break}va=(va-ya|0)>3&1}else va=0;while(0);if((pa|0)!=-1){La=c[u>>2]|0;La=_a(La+224|0,La+((R?sa|4:sa)|160)|0)|0;Ma=t+pa|0;a[Ma>>0]=(d[Ma>>0]|0)+La}sa=(va|0)==0;if((a[(c[ga>>2]|0)+4>>0]|0)==0|sa){wa=0;va=0;do{va=ab((c[u>>2]|0)+224|0)|0|va<<1;wa=wa+1|0}while((wa|0)<(na|0));Ba=va<<16-na}else{va=wa&255;if(!((wa&255)<<24>>24))ya=0;else{wa=0;ya=0;do{ya=ab((c[u>>2]|0)+224|0)|0|ya<<1;wa=wa+1|0}while((wa|0)<(va|0))}Ba=ya<<17-na}ta=ta<<2;va=ua<<2;ua=o+oa+199|0;wa=0;Ha=0;Ca=xa;Fa=0;while(1){xa=Da&255;ya=(d[K+xa>>0]|0)+ta|0;xa=(d[I+xa>>0]|0)+va|0;b:do if((wa|0)<8){Ia=(d[t+wa>>0]|0)+1|0;Ma=(wa|0)==(pa|0);if((Ia|0)==((Ma?3:2)|0)&0==((Ma?0:0)|0))Ja=0;else{Ja=0;break}while(1){Ka=Ja+1|0;if(!(ab((c[u>>2]|0)+224|0)|0)){w=120;break}if((Ka|0)<31)Ja=Ka;else{w=124;break}}do if((w|0)==120){w=0;if((Ja|0)>=3){Ka=Ja;w=124;break}if((Ga|0)>0){Ka=0;La=0;do{La=ab((c[u>>2]|0)+224|0)|0|La<<1;Ka=Ka+1|0}while((Ka|0)!=(Ga|0))}else La=0;Ka=La+(Ja<<Ga)|0}while(0);if((w|0)==124){w=0;Ja=Ka+ -3|0;if((Ja+Ga|0)>0){La=Ga+ -3+Ka|0;Ka=0;Ma=0;do{Ma=ab((c[u>>2]|0)+224|0)|0|Ma<<1;Ka=Ka+1|0}while((Ka|0)!=(La|0))}else Ma=0;Ka=Ma+((1<<Ja)+2<<Ga)|0}Ia=ae(Ka|0,((Ka|0)<0)<<31>>31|0,Ia|0,0)|0;Ja=D;La=3<<Ga;Na=((La|0)<0)<<31>>31;Ma=c[(c[r>>2]|0)+13116>>2]|0;do if((Ja|0)>(Na|0)|(Ja|0)==(Na|0)&Ia>>>0>La>>>0){La=Ga+1|0;if(Ma){Ga=La;break}Ga=(Ga|0)>3?4:La;break b}while(0);if(!((Ma|0)!=0&(Ha|0)==0))break;Ha=a[ua>>0]|0;La=(Ha&255)>>>2;if((Ka|0)>=(3<<La|0)){a[ua>>0]=Ha+1<<24>>24;Ha=1;break}if((Ka<<1|0)>=(1<<La|0)|Ha<<24>>24==0){Ha=1;break}a[ua>>0]=Ha+ -1<<24>>24;Ha=1}else{Ia=0;while(1){Ja=Ia+1|0;if(!(ab((c[u>>2]|0)+224|0)|0)){w=138;break}if((Ja|0)<31)Ia=Ja;else{w=142;break}}do if((w|0)==138){w=0;if((Ia|0)>=3){Ja=Ia;w=142;break}if((Ga|0)>0){Ja=0;Ka=0;do{Ka=ab((c[u>>2]|0)+224|0)|0|Ka<<1;Ja=Ja+1|0}while((Ja|0)!=(Ga|0))}else Ka=0;Ka=Ka+(Ia<<Ga)|0}while(0);if((w|0)==142){w=0;Ia=Ja+ -3|0;if((Ia+Ga|0)>0){Ka=Ga+ -3+Ja|0;Ja=0;La=0;do{La=ab((c[u>>2]|0)+224|0)|0|La<<1;Ja=Ja+1|0}while((Ja|0)!=(Ka|0))}else La=0;Ka=La+((1<<Ia)+2<<Ga)|0}Ia=Ka+1|0;Ja=((Ia|0)<0)<<31>>31;Ma=c[(c[r>>2]|0)+13116>>2]|0;do if((Ka|0)>=(3<<Ga|0)){La=Ga+1|0;if(Ma){Ga=La;break}Ga=(Ga|0)>3?4:La;break b}while(0);if(!((Ma|0)!=0&(Ha|0)==0))break;La=a[ua>>0]|0;Ha=(La&255)>>>2;if((Ka|0)>=(3<<Ha|0)){a[ua>>0]=La+1<<24>>24;Ha=1;break}if((Ka<<1|0)>=(1<<Ha|0)|La<<24>>24==0){Ha=1;break}a[ua>>0]=La+ -1<<24>>24;Ha=1}while(0);do if(!((a[(c[ga>>2]|0)+4>>0]|0)==0|sa)){Fa=ae(Ia|0,Ja|0,Fa|0,0)|0;if(Da<<24>>24!=qa<<24>>24)break;Na=(Fa&1|0)==0;Ma=$d(0,0,Ia|0,Ja|0)|0;Ia=Na?Ia:Ma;Ja=Na?Ja:D}while(0);Na=(Ba&32768|0)==0;Da=$d(0,0,Ia|0,Ja|0)|0;Da=Na?Ia:Da;Ia=Na?Ja:D;Ba=Ba<<1&131070;Ja=Da&65535;do if(!(a[z>>0]|0)){do if(!((a[(c[r>>2]|0)+634>>0]|0)==0|ha)){if(!((xa|ya|0)!=0|k)){Ca=ia;break}if((j|0)==3)Ca=(xa<<3)+ya|0;else if((j|0)==4)Ca=(xa>>>1<<3)+(ya>>>1)|0;else if((j|0)==5)Ca=(xa>>>2<<3)+(ya>>>2)|0;else Ca=(xa<<2)+ya|0;Ca=d[C+Ca>>0]|0}while(0);Da=ke(Da|0,Ia|0,B|0,Y|0)|0;Da=ke(Da|0,D|0,Ca|0,((Ca|0)<0)<<31>>31|0)|0;Da=ae(Da|0,D|0,A|0,X|0)|0;Da=_d(Da|0,D|0,E|0)|0;Ia=D;if((Ia|0)<0){Ja=(Da&-32768|0)==-32768&(Ia&268435455|0)==268435455?Da&65535:-32768;break}else{Ja=Ia>>>0>0|(Ia|0)==0&Da>>>0>32767?32767:Da&65535;break}}while(0);b[p+((xa<<j)+ya<<1)>>1]=Ja;wa=wa+1|0;if((wa|0)>=(na|0)){xa=Ca;break a}Da=a[s+wa>>0]|0}}while(0);if(la)ka=ka+ -1|0;else break}}do if(a[z>>0]|0){if((c[(c[r>>2]|0)+13104>>2]|0)!=0?(x&-17|0)==10:0)Ea[c[f+2632>>2]&7](p,j&65535,(x|0)==26&1)}else{if(F){if(((j|0)==2?(c[(c[r>>2]|0)+13096>>2]|0)!=0:0)?(c[o+31244>>2]|0)==1:0){s=0;do{La=p+(15-s<<1)|0;Ma=b[La>>1]|0;Na=p+(s<<1)|0;b[La>>1]=b[Na>>1]|0;b[Na>>1]=Ma;s=s+1|0}while((s|0)!=8)}s=j&65535;Aa[c[f+2628>>2]&7](p,s);if(!(c[(c[r>>2]|0)+13104>>2]|0))break;if((c[o+31244>>2]|0)!=1)break;if((x&-17|0)!=10)break;Ea[c[f+2632>>2]&7](p,s,(x|0)==26&1);break}if(y&(c[o+31244>>2]|0)==1&(j|0)==2){za[c[f+2636>>2]&7](p);break}r=(G|0)>(H|0)?G:H;if(!r){za[c[f+(j+ -2<<2)+2656>>2]&7](p);break}s=H+4+G|0;do if((r|0)>=4){if((r|0)<8){s=(s|0)<8?s:8;break}if((r|0)<12)s=(s|0)<24?s:24}else s=(s|0)<4?s:4;while(0);Aa[c[f+(j+ -2<<2)+2640>>2]&7](p,s)}while(0);if(!(a[o+304>>0]|0)){Na=j+ -2|0;Na=f+(Na<<2)+2612|0;Na=c[Na>>2]|0;Ea[Na&7](h,p,m);i=n;return}if((q|0)<=0){Na=j+ -2|0;Na=f+(Na<<2)+2612|0;Na=c[Na>>2]|0;Ea[Na&7](h,p,m);i=n;return}o=c[o+284>>2]|0;r=0;do{Na=p+(r<<1)|0;b[Na>>1]=(($(b[g+(r<<1)>>1]|0,o)|0)>>>3)+(e[Na>>1]|0);r=r+1|0}while((r|0)!=(q|0));Na=j+ -2|0;Na=f+(Na<<2)+2612|0;Na=c[Na>>2]|0;Ea[Na&7](h,p,m);i=n;return}function yb(a){a=a|0;var b=0,e=0,f=0;b=i;f=a+16|0;e=c[f>>2]|0;c[a>>2]=(c[a>>2]|0)+ -65535+((d[e+1>>0]|0)<<1|(d[e>>0]|0)<<9);if(e>>>0>=(c[a+20>>2]|0)>>>0){i=b;return}c[f>>2]=e+2;i=b;return}function zb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;f=i;h=b+136|0;n=c[h>>2]|0;g=b+200|0;j=c[g>>2]|0;m=c[j+13080>>2]|0;q=(1<<m)+ -1|0;m=-1<<m-(c[(c[b+204>>2]|0)+24>>2]|0);o=m&d;p=m&e;k=c[j+13140>>2]|0;j=c[j+13064>>2]|0;l=o>>j;j=p>>j;if(!(q&d))o=0;else o=(o&q|0)!=0;if(!(q&e))p=0;else p=(p&q|0)!=0;q=n+203|0;if((a[q>>0]|0)==0?(m&(e|d)|0)!=0:0)d=c[n+276>>2]|0;else{a[q>>0]=(a[n+300>>0]|0)==0&1;d=a[b+2112>>0]|0}if(o){e=l+ -1+($(j,k)|0)|0;e=a[(c[b+4316>>2]|0)+e>>0]|0}else e=d;if(p){d=($(j+ -1|0,k)|0)+l|0;d=a[(c[b+4316>>2]|0)+d>>0]|0}b=e+1+d>>1;h=c[h>>2]|0;j=c[h+280>>2]|0;if(!j){a[h+272>>0]=b;i=f;return}g=c[(c[g>>2]|0)+13192>>2]|0;b=j+52+b+(g<<1)|0;if((b|0)>0)j=b;else j=-52-g+1+b|0;a[h+272>>0]=b-g-j+((j|0)%(g+52|0)|0);i=f;return}function Ab(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0;g=i;j=c[b+136>>2]|0;k=b+200|0;do if((e|0)>0&(e&7|0)==0){if(((a[b+2062>>0]|0)==0?(c[j+31312>>2]&4|0)!=0:0)?((e|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0)break;if(((a[(c[b+204>>2]|0)+53>>0]|0)==0?(c[j+31312>>2]&8|0)!=0:0)?((e|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0)break;h=1<<f;if((h|0)>0){l=b+2596|0;m=b+4320|0;n=0;do{o=n+d+($(c[l>>2]|0,e)|0)>>2;a[(c[m>>2]|0)+o>>0]=2;n=n+4|0}while((n|0)<(h|0))}}while(0);if(!((d|0)>0&(d&7|0)==0)){i=g;return}if(((a[b+2062>>0]|0)==0?(c[j+31312>>2]&1|0)!=0:0)?((d|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0){i=g;return}if(((a[(c[b+204>>2]|0)+53>>0]|0)==0?(c[j+31312>>2]&2|0)!=0:0)?((d|0)%(1<<c[(c[k>>2]|0)+13080>>2]|0)|0|0)==0:0){i=g;return}h=1<<f;if((h|0)<=0){i=g;return}j=b+2596|0;b=b+4324|0;k=0;do{o=($(c[j>>2]|0,k+e|0)|0)+d>>2;a[(c[b>>2]|0)+o>>0]=2;k=k+4|0}while((k|0)<(h|0));i=g;return}function Bb(e,f,g,h){e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0;j=i;i=i+32|0;o=j+8|0;w=j;n=j+18|0;r=j+16|0;l=e+200|0;J=c[l>>2]|0;u=c[J+13120>>2]|0;k=(u-h|0)<=(f|0);b[n>>1]=0;b[r>>1]=0;v=c[J+13080>>2]|0;t=1<<v;v=($(g>>v,c[J+13128>>2]|0)|0)+(f>>v)|0;s=c[e+2508>>2]|0;m=c[s+(v<<3)+4>>2]|0;x=c[s+(v<<3)>>2]|0;if((c[J+68>>2]|0)!=0?(a[J+13056>>0]|0)!=0:0)p=1;else p=(a[(c[e+204>>2]|0)+40>>0]|0)!=0;q=(f|0)!=0;if(q){v=v+ -1|0;y=c[s+(v<<3)>>2]|0;v=c[s+(v<<3)+4>>2]|0}else{y=0;v=0}s=t+f|0;s=(s|0)>(u|0)?u:s;t=t+g|0;z=c[J+13124>>2]|0;t=(t|0)>(z|0)?z:t;z=(s|0)==(u|0)?s:s+ -8|0;u=(t|0)>(g|0);if(u){J=q?f:8;M=(J|0)<(s|0);P=q?f+ -8|0:0;E=e+2596|0;O=e+4320|0;H=e+4316|0;B=w+4|0;C=e+160|0;D=n+1|0;Q=r+1|0;K=e+4300|0;L=e+4284|0;I=e+4324|0;F=e+4304|0;G=e+4288|0;A=(P|0)>=(z|0);T=x;S=m;N=g;do{if(M){R=N+4|0;W=S+ -2&-2;U=J;do{Z=c[E>>2]|0;ba=($(Z,N)|0)+U>>2;_=c[I>>2]|0;ba=a[_+ba>>0]|0;ca=ba&255;Z=a[_+(($(Z,R)|0)+U>>2)>>0]|0;_=Z&255;ba=ba<<24>>24!=0;Z=Z<<24>>24==0;do if(!(Z&(ba^1))){V=U+ -1|0;X=c[l>>2]|0;aa=c[X+13064>>2]|0;Y=$(N>>aa,c[X+13140>>2]|0)|0;da=c[H>>2]|0;aa=(a[da+(Y+(V>>aa))>>0]|0)+1+(a[da+(Y+(U>>aa))>>0]|0)>>1;Y=aa+T|0;if((Y|0)<0)Y=0;else Y=(Y|0)>51?51:Y;Y=d[1280+Y>>0]|0;if(ba){ba=(ca<<1)+W+aa|0;if((ba|0)<0)ba=0;else ba=(ba|0)>53?53:ba;ba=d[1336+ba>>0]|0}else ba=0;c[w>>2]=ba;if(Z)Z=0;else{Z=(_<<1)+W+aa|0;if((Z|0)<0)Z=0;else Z=(Z|0)>53?53:Z;Z=d[1336+Z>>0]|0}c[B>>2]=Z;ca=c[C>>2]|0;Z=c[ca+32>>2]|0;da=$(Z,N)|0;X=(c[ca>>2]|0)+((U<<c[X+56>>2])+da)|0;if(p){a[n>>0]=Gb(e,V,N)|0;a[D>>0]=Gb(e,V,R)|0;a[r>>0]=Gb(e,U,N)|0;a[Q>>0]=Gb(e,U,R)|0;Ha[c[F>>2]&3](X,Z,Y,w,n,r);break}else{Ha[c[G>>2]&3](X,Z,Y,w,n,r);break}}while(0);U=U+8|0}while((U|0)<(s|0))}if(!((N|0)==0|A)){R=N+ -1|0;V=S;S=P;do{Y=$(c[E>>2]|0,N)|0;Z=c[O>>2]|0;aa=a[Z+(Y+S>>2)>>0]|0;ba=aa&255;U=S+4|0;Y=a[Z+(Y+U>>2)>>0]|0;Z=Y&255;aa=aa<<24>>24!=0;Y=Y<<24>>24==0;do if(!(Y&(aa^1))){W=c[l>>2]|0;V=c[W+13064>>2]|0;_=S>>V;T=c[W+13140>>2]|0;da=($(R>>V,T)|0)+_|0;X=c[H>>2]|0;_=(a[X+da>>0]|0)+1+(a[X+(($(N>>V,T)|0)+_)>>0]|0)>>1;T=(S|0)>=(f|0);V=T?m:v;T=T?x:y;X=_+T|0;if((X|0)<0)X=0;else X=(X|0)>51?51:X;X=d[1280+X>>0]|0;if(aa){aa=(ba<<1)+(V+ -2&-2)+_|0;if((aa|0)<0)aa=0;else aa=(aa|0)>53?53:aa;aa=d[1336+aa>>0]|0}else aa=0;c[w>>2]=aa;if(Y)Y=0;else{Y=(Z<<1)+(V+ -2&-2)+_|0;if((Y|0)<0)Y=0;else Y=(Y|0)>53?53:Y;Y=d[1336+Y>>0]|0}c[B>>2]=Y;ca=c[C>>2]|0;Y=c[ca+32>>2]|0;da=$(Y,N)|0;W=(c[ca>>2]|0)+((S<<c[W+56>>2])+da)|0;if(p){a[n>>0]=Gb(e,S,R)|0;a[D>>0]=Gb(e,U,R)|0;a[r>>0]=Gb(e,S,N)|0;a[Q>>0]=Gb(e,U,N)|0;Ha[c[K>>2]&3](W,Y,X,w,n,r);break}else{Ha[c[L>>2]&3](W,Y,X,w,n,r);break}}while(0);S=S+8|0}while((S|0)<(z|0));S=V}N=N+8|0}while((N|0)<(t|0));J=c[l>>2]|0}else S=m;if(c[J+4>>2]|0){C=q?v:m;F=e+2596|0;D=e+4320|0;v=e+4316|0;x=o+4|0;w=e+160|0;B=n+1|0;A=r+1|0;G=e+4308|0;E=e+4292|0;H=e+4324|0;z=e+4312|0;y=e+4296|0;I=1;do{O=1<<c[J+(I<<2)+13168>>2];P=1<<c[J+(I<<2)+13180>>2];if(u){N=O<<3;L=q?f:N;K=(L|0)<(s|0);J=P<<3;M=q?f-N|0:0;O=O<<2;P=P<<2;Q=g;do{if(K){R=Q+P|0;T=L;do{W=c[F>>2]|0;Y=($(W,Q)|0)+T>>2;da=c[H>>2]|0;Y=(a[da+Y>>0]|0)==2;W=(a[da+(($(W,R)|0)+T>>2)>>0]|0)==2;do if(Y|W){U=T+ -1|0;V=c[l>>2]|0;da=c[V+13064>>2]|0;_=U>>da;X=c[V+13140>>2]|0;Z=$(Q>>da,X)|0;aa=c[v>>2]|0;ba=T>>da;X=$(R>>da,X)|0;X=(a[aa+(X+_)>>0]|0)+1+(a[aa+(X+ba)>>0]|0)>>1;if(Y)Y=Hb(e,(a[aa+(Z+ba)>>0]|0)+1+(a[aa+(Z+_)>>0]|0)>>1,I,S)|0;else Y=0;c[o>>2]=Y;if(W)W=Hb(e,X,I,S)|0;else W=0;c[x>>2]=W;ca=c[w>>2]|0;W=c[ca+(I<<2)+32>>2]|0;da=$(W,Q>>c[V+(I<<2)+13180>>2])|0;V=(c[ca+(I<<2)>>2]|0)+((T>>c[V+(I<<2)+13168>>2]<<c[V+56>>2])+da)|0;if(p){a[n>>0]=Gb(e,U,Q)|0;a[B>>0]=Gb(e,U,R)|0;a[r>>0]=Gb(e,T,Q)|0;a[A>>0]=Gb(e,T,R)|0;ya[c[z>>2]&3](V,W,o,n,r);break}else{ya[c[y>>2]&3](V,W,o,n,r);break}}while(0);T=T+N|0}while((T|0)<(s|0))}if(Q){U=s-((s|0)==(c[(c[l>>2]|0)+13120>>2]|0)?0:N)|0;if((M|0)<(U|0)){T=Q+ -1|0;S=M;do{W=$(c[F>>2]|0,Q)|0;da=c[D>>2]|0;R=S+O|0;X=(a[da+(W+S>>2)>>0]|0)==2;W=(a[da+(W+R>>2)>>0]|0)==2;do if(X|W){if(X){da=c[l>>2]|0;ca=c[da+13064>>2]|0;Y=S>>ca;da=c[da+13140>>2]|0;aa=($(T>>ca,da)|0)+Y|0;ba=c[v>>2]|0;Y=(a[ba+aa>>0]|0)+1+(a[ba+(($(Q>>ca,da)|0)+Y)>>0]|0)>>1}else Y=0;if(W){da=c[l>>2]|0;ca=c[da+13064>>2]|0;V=R>>ca;da=c[da+13140>>2]|0;aa=($(T>>ca,da)|0)+V|0;ba=c[v>>2]|0;V=(a[ba+aa>>0]|0)+1+(a[ba+(($(Q>>ca,da)|0)+V)>>0]|0)>>1}else V=0;if(X)X=Hb(e,Y,I,C)|0;else X=0;c[o>>2]=X;if(W)V=Hb(e,V,I,m)|0;else V=0;c[x>>2]=V;da=c[l>>2]|0;ca=c[w>>2]|0;V=c[ca+(I<<2)+32>>2]|0;W=$(V,Q>>c[da+13184>>2])|0;W=(c[ca+(I<<2)>>2]|0)+((S>>c[da+13172>>2]<<c[da+56>>2])+W)|0;if(p){a[n>>0]=Gb(e,S,T)|0;a[B>>0]=Gb(e,R,T)|0;a[r>>0]=Gb(e,S,Q)|0;a[A>>0]=Gb(e,R,Q)|0;ya[c[G>>2]&3](W,V,o,n,r);break}else{ya[c[E>>2]&3](W,V,o,n,r);break}}while(0);S=S+N|0}while((S|0)<(U|0));S=C}else S=C}Q=Q+J|0}while((Q|0)<(t|0))}I=I+1|0;J=c[l>>2]|0}while((I|0)!=3)}if(!(a[J+12941>>0]|0)){if((a[e+140>>0]&1)==0|k^1){i=j;return}i=j;return}n=(c[J+13124>>2]|0)-h|0;l=(g|0)==0;m=(f|0)==0;if(!(l|m))Cb(e,f-h|0,g-h|0);n=(n|0)>(g|0);if(!(m|n))Cb(e,f-h|0,g);k=k^1;!(l|k)?(Cb(e,f,g-h|0),(a[e+140>>0]&1)!=0):0;if(n|k){i=j;return}Cb(e,f,g);if(!(a[e+140>>0]&1)){i=j;return}i=j;return}function Cb(e,f,g){e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0;v=i;i=i+48|0;j=v+24|0;h=v+42|0;u=v+40|0;r=v+16|0;n=v+8|0;m=v;k=e+200|0;S=c[k>>2]|0;s=c[S+13080>>2]|0;q=f>>s;s=g>>s;D=S+13128|0;p=($(s,c[D>>2]|0)|0)+q|0;L=c[e+204>>2]|0;N=L+1668|0;M=c[(c[N>>2]|0)+(p<<2)>>2]|0;l=e+2504|0;o=c[l>>2]|0;t=o+(p*148|0)|0;b[h>>1]=0;b[u>>1]=0;c[r>>2]=0;E=($(c[D>>2]|0,s)|0)+q|0;E=a[(c[e+4352>>2]|0)+E>>0]|0;if((a[L+42>>0]|0)!=0?(a[L+53>>0]|0)==0:0){R=1;O=1}else{R=E<<24>>24==0&1;O=0}G=(q|0)==0;c[j>>2]=G&1;I=(s|0)==0;A=j+4|0;c[A>>2]=I&1;H=(q|0)==((c[D>>2]|0)+ -1|0);z=j+8|0;c[z>>2]=H&1;F=(s|0)==((c[S+13132>>2]|0)+ -1|0);w=j+12|0;c[w>>2]=F&1;if(R<<24>>24){if(G)J=0;else{if(O){J=c[L+1676>>2]|0;J=(c[J+(M<<2)>>2]|0)!=(c[J+(c[(c[N>>2]|0)+(p+ -1<<2)>>2]<<2)>>2]|0)&1}else J=0;if(E<<24>>24==0?(pa=$(c[D>>2]|0,s)|0,oa=c[e+4328>>2]|0,(c[oa+(pa+q<<2)>>2]|0)!=(c[oa+(q+ -1+pa<<2)>>2]|0)):0)K=1;else K=J;a[h>>0]=K}if(H)K=0;else{if(O){K=c[L+1676>>2]|0;K=(c[K+(M<<2)>>2]|0)!=(c[K+(c[(c[N>>2]|0)+(p+1<<2)>>2]<<2)>>2]|0)&1}else K=0;if(E<<24>>24==0?(pa=$(c[D>>2]|0,s)|0,oa=c[e+4328>>2]|0,(c[oa+(pa+q<<2)>>2]|0)!=(c[oa+(q+1+pa<<2)>>2]|0)):0)P=1;else P=K;a[h+1>>0]=P}if(I)P=0;else{if(O){P=c[L+1676>>2]|0;P=(c[P+(M<<2)>>2]|0)!=(c[P+(c[(c[N>>2]|0)+(p-(c[D>>2]|0)<<2)>>2]<<2)>>2]|0)&1}else P=0;if(E<<24>>24==0?(pa=c[D>>2]|0,na=($(pa,s)|0)+q|0,oa=c[e+4328>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,s+ -1|0)|0)+q<<2)>>2]|0)):0)Q=1;else Q=P;a[u>>0]=Q}if(F)L=0;else{if(O){L=c[L+1676>>2]|0;L=(c[L+(M<<2)>>2]|0)!=(c[L+(c[(c[N>>2]|0)+((c[D>>2]|0)+p<<2)>>2]<<2)>>2]|0)&1}else L=0;if(E<<24>>24==0?(pa=c[D>>2]|0,na=($(pa,s)|0)+q|0,oa=c[e+4328>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,s+1|0)|0)+q<<2)>>2]|0)):0)M=1;else M=L;a[u+1>>0]=M}if(!G)if(I)B=47;else{if(!(E<<24>>24)){pa=c[D>>2]|0;na=($(pa,s)|0)+q|0;oa=c[e+4328>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(q+ -1+($(pa,s+ -1|0)|0)<<2)>>2]|0):0)B=38;else M=1}else if(!(J<<24>>24))B=38;else M=1;if((B|0)==38)M=P<<24>>24!=0&1;a[r>>0]=M;B=40}else B=40;if((B|0)==40)if(!I){if(!H){if(!(E<<24>>24)){pa=c[D>>2]|0;na=($(pa,s)|0)+q|0;oa=c[e+4328>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(q+1+($(pa,s+ -1|0)|0)<<2)>>2]|0):0)B=45;else I=1}else if(!(K<<24>>24))B=45;else I=1;if((B|0)==45)I=P<<24>>24!=0&1;a[r+1>>0]=I;B=47}}else B=47;if((B|0)==47?!(H|F):0){if(!(E<<24>>24)){pa=c[D>>2]|0;na=($(pa,s)|0)+q|0;oa=c[e+4328>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(q+1+($(pa,s+1|0)|0)<<2)>>2]|0):0)B=51;else H=1}else if(!(K<<24>>24))B=51;else H=1;if((B|0)==51)H=L<<24>>24!=0&1;a[r+2>>0]=H}if(!(G|F)){if(!(E<<24>>24)){pa=c[D>>2]|0;na=($(pa,s)|0)+q|0;oa=c[e+4328>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(q+ -1+($(pa,s+1|0)|0)<<2)>>2]|0):0)B=57;else D=1}else if(!(J<<24>>24))B=57;else D=1;if((B|0)==57)D=L<<24>>24!=0&1;a[r+3>>0]=D}}O=(c[S+4>>2]|0)!=0?3:1;I=e+160|0;H=e+168|0;P=e+2672|0;J=s<<1;G=J+ -1|0;D=n+4|0;E=s+ -1|0;Q=q+1|0;L=q+ -1|0;J=J+2|0;F=m+4|0;K=s+1|0;N=q<<1;M=N+ -1|0;N=N+2|0;R=e+((R&255)<<2)+2676|0;na=S;Y=0;while(1){ka=c[na+(Y<<2)+13168>>2]|0;_=f>>ka;ha=c[na+(Y<<2)+13180>>2]|0;Z=g>>ha;ba=c[I>>2]|0;S=c[ba+(Y<<2)+32>>2]|0;U=1<<c[na+13080>>2];X=U>>ka;W=U>>ha;ka=c[na+13120>>2]>>ka;ca=ka-_|0;X=(X|0)>(ca|0)?ca:X;ha=c[na+13124>>2]>>ha;ca=ha-Z|0;W=(W|0)>(ca|0)?ca:W;ca=$(S,Z)|0;ga=c[na+56>>2]|0;ca=(_<<ga)+ca|0;ba=c[ba+(Y<<2)>>2]|0;aa=ba+ca|0;U=U+2<<ga;da=c[H>>2]|0;fa=1<<ga;ea=U+fa|0;V=da+ea|0;T=o+(p*148|0)+Y+142|0;ia=d[T>>0]|0;if((ia|0)==1){ea=X<<ga;if((W|0)>0){da=V;ba=0;ca=aa;while(1){fe(da|0,ca|0,ea|0)|0;ba=ba+1|0;if((ba|0)==(W|0))break;else{da=da+U|0;ca=ca+S|0}}}Eb(e,aa,S,_,Z,X,W,Y,q,s);Fa[c[P>>2]&1](aa,V,S,U,t,j,X,W,Y);Fb(e,aa,V,S,U,f,g,X,W,Y);a[T>>0]=3}else if((ia|0)==2){ja=c[j>>2]|0;ia=c[z>>2]|0;la=c[w>>2]|0;do if(!(c[A>>2]|0)){pa=1-ja|0;oa=pa<<ga;ma=fa-oa|0;c[n>>2]=ba+(ca-S-oa);c[D>>2]=(c[e+(Y<<2)+172>>2]|0)+(($(ka,G)|0)+_-pa<<ga);do if((ja|0)!=1){oa=da+ma|0;pa=L+($(c[na+13128>>2]|0,E)|0)|0;pa=c[n+(((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0;if(!ga){a[oa>>0]=a[pa>>0]|0;na=c[k>>2]|0;oa=fa;break}else{b[oa>>1]=b[pa>>1]|0;oa=fa;break}}else oa=0;while(0);pa=($(c[na+13128>>2]|0,E)|0)+q|0;na=X<<ga;fe(da+(oa+ma)|0,(c[n+(((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0)+oa|0,na|0)|0;if((ia|0)!=1){na=oa+na|0;pa=Q+($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)|0;ma=da+(na+ma)|0;na=(c[n+(((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0)+na|0;if(!ga){a[ma>>0]=a[na>>0]|0;break}else{b[ma>>1]=b[na>>1]|0;break}}}while(0);do if(!la){pa=1-ja|0;oa=pa<<ga;la=($(W,U)|0)+ea-oa|0;c[m>>2]=ba+(($(W,S)|0)+ca-oa);c[F>>2]=(c[e+(Y<<2)+172>>2]|0)+(($(ka,J)|0)+_-pa<<ga);do if((ja|0)!=1){ka=da+la|0;ma=L+($(c[(c[k>>2]|0)+13128>>2]|0,K)|0)|0;ma=c[m+(((a[(c[l>>2]|0)+(ma*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0;if(!ga){a[ka>>0]=a[ma>>0]|0;ka=fa;break}else{b[ka>>1]=b[ma>>1]|0;ka=fa;break}}else ka=0;while(0);pa=($(c[(c[k>>2]|0)+13128>>2]|0,K)|0)+q|0;ma=X<<ga;fe(da+(ka+la)|0,(c[m+(((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0)+ka|0,ma|0)|0;if((ia|0)!=1){ka=ka+ma|0;pa=Q+($(c[(c[k>>2]|0)+13128>>2]|0,K)|0)|0;la=da+(ka+la)|0;ka=(c[m+(((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3&1)<<2)>>2]|0)+ka|0;if(!ga){a[la>>0]=a[ka>>0]|0;break}else{b[la>>1]=b[ka>>1]|0;break}}}while(0);do if(!ja){pa=L+($(c[(c[k>>2]|0)+13128>>2]|0,s)|0)|0;if((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3){la=da+U|0;ja=(c[e+(Y<<2)+184>>2]|0)+(($(ha,M)|0)+Z<<ga)|0;ka=(W|0)>0;if(!ga){if(ka)ka=0;else{ja=0;break}while(1){a[la>>0]=a[ja>>0]|0;ka=ka+1|0;if((ka|0)==(W|0)){ja=0;break}else{la=la+U|0;ja=ja+fa|0}}}else{if(ka)ka=0;else{ja=0;break}while(1){b[la>>1]=b[ja>>1]|0;ka=ka+1|0;if((ka|0)==(W|0)){ja=0;break}else{la=la+U|0;ja=ja+fa|0}}}}else ja=1}else ja=0;while(0);do if(!ia){pa=Q+($(c[(c[k>>2]|0)+13128>>2]|0,s)|0)|0;if((a[(c[l>>2]|0)+(pa*148|0)+Y+142>>0]|0)==3){ia=da+((X<<ga)+ea)|0;ha=(c[e+(Y<<2)+184>>2]|0)+(($(ha,N)|0)+Z<<ga)|0;ka=(W|0)>0;if(!ga){if(ka)B=0;else break;while(1){a[ia>>0]=a[ha>>0]|0;B=B+1|0;if((B|0)==(W|0)){C=0;B=96;break}else{ia=ia+U|0;ha=ha+fa|0}}}else{if(ka)B=0;else break;while(1){b[ia>>1]=b[ha>>1]|0;B=B+1|0;if((B|0)==(W|0)){C=0;B=96;break}else{ia=ia+U|0;ha=ha+fa|0}}}}else{C=1;B=96}}else{C=0;B=96}while(0);if((B|0)==96?(B=0,x=ja<<ga,y=ja+X+C<<ga,(W|0)>0):0){ea=da+(ea-x)|0;da=0;ba=ba+(ca-x)|0;while(1){fe(ea|0,ba|0,y|0)|0;da=da+1|0;if((da|0)==(W|0))break;else{ea=ea+U|0;ba=ba+S|0}}}Eb(e,aa,S,_,Z,X,W,Y,q,s);Ca[c[R>>2]&3](aa,V,S,U,t,j,X,W,Y,h,u,r);Fb(e,aa,V,S,U,f,g,X,W,Y);a[T>>0]=3}Y=Y+1|0;if((Y|0)>=(O|0))break;na=c[k>>2]|0}i=v;return}function Db(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;h=c[a+200>>2]|0;k=((c[h+13120>>2]|0)-e|0)>(b|0);h=((c[h+13124>>2]|0)-e|0)>(d|0);j=(d|0)==0;g=(b|0)==0;if(!(j|g))Bb(a,b-e|0,d-e|0,e);if(!(j|k))Bb(a,b,d-e|0,e);if(g|h){i=f;return}Bb(a,b-e|0,d,e);i=f;return}function Eb(d,e,f,g,h,j,k,l,m,n){d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;q=c[d+200>>2]|0;p=c[q+56>>2]|0;s=c[q+13120>>2]>>c[q+(l<<2)+13168>>2];q=c[q+13124>>2]>>c[q+(l<<2)+13180>>2];u=d+(l<<2)+172|0;t=n<<1;r=j<<p;fe((c[u>>2]|0)+(($(s,t)|0)+g<<p)|0,e|0,r|0)|0;fe((c[u>>2]|0)+(($(s,t|1)|0)+g<<p)|0,e+($(k+ -1|0,f)|0)|0,r|0)|0;d=d+(l<<2)+184|0;r=c[d>>2]|0;l=m<<1;t=r+(($(q,l)|0)+h<<p)|0;m=1<<p;n=(p|0)==0;g=(k|0)>0;if(n){if(g){r=t;t=0;s=e;while(1){a[r>>0]=a[s>>0]|0;t=t+1|0;if((t|0)==(k|0))break;else{r=r+m|0;s=s+f|0}}r=c[d>>2]|0}}else if(g){d=0;s=e;while(1){b[t>>1]=b[s>>1]|0;d=d+1|0;if((d|0)==(k|0))break;else{t=t+m|0;s=s+f|0}}}h=r+(($(q,l|1)|0)+h<<p)|0;j=e+(j+ -1<<p)|0;if(n){if(g)p=0;else{i=o;return}while(1){a[h>>0]=a[j>>0]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}else{if(g)p=0;else{i=o;return}while(1){b[h>>1]=b[j>>1]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}}function Fb(b,d,e,f,g,h,j,k,l,m){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0;n=i;t=c[b+200>>2]|0;if(!(a[(c[b+204>>2]|0)+40>>0]|0)){if(!(a[t+13056>>0]|0)){i=n;return}if(!(c[t+68>>2]|0)){i=n;return}}p=b+200|0;C=c[t+13084>>2]|0;v=1<<C;o=c[t+(m<<2)+13168>>2]|0;s=c[t+(m<<2)+13180>>2]|0;m=h>>C;z=j>>C;k=k+h>>C;l=l+j>>C;t=v>>o<<c[t+56>>2];if((z|0)>=(l|0)){i=n;return}u=(m|0)<(k|0);b=b+4348|0;v=v>>s;w=(v|0)>0;do{if(u){x=z-j|0;y=m;do{A=c[p>>2]|0;C=($(c[A+13156>>2]|0,z)|0)+y|0;if((a[(c[b>>2]|0)+C>>0]|0)!=0?(r=c[A+13084>>2]|0,q=x<<r>>s,r=y-h<<r>>o<<c[A+56>>2],w):0){C=e+(($(q,g)|0)+r)|0;A=0;B=d+(($(q,f)|0)+r)|0;while(1){fe(B|0,C|0,t|0)|0;A=A+1|0;if((A|0)==(v|0))break;else{C=C+g|0;B=B+f|0}}}y=y+1|0}while((y|0)!=(k|0))}z=z+1|0}while((z|0)!=(l|0));i=n;return}function Gb(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;f=i;g=c[a+200>>2]|0;h=c[g+13084>>2]|0;if((e|b|0)<0){e=2;i=f;return e|0}b=b>>h;e=e>>h;h=c[g+13156>>2]|0;if((b|0)>=(h|0)){e=2;i=f;return e|0}if((e|0)>=(c[g+13160>>2]|0)){e=2;i=f;return e|0}e=($(h,e)|0)+b|0;e=d[(c[a+4348>>2]|0)+e>>0]|0;i=f;return e|0}function Hb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0;h=i;j=c[b+204>>2]|0;e=(c[((f|0)==1?j+28|0:j+32|0)>>2]|0)+e|0;if((e|0)<0)e=0;else e=(e|0)>57?57:e;do if((c[(c[b+200>>2]|0)+4>>2]|0)==1){if((e|0)>=30)if((e|0)>43){e=e+ -6|0;break}else{e=d[1392+(e+ -30)>>0]|0;break}}else if((e|0)<0)e=0;else e=(e|0)>51?51:e;while(0);g=g+2+e|0;if((g|0)<0){j=0;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}j=(g|0)>53?53:g;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}function Ib(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;g=i;k=b+4376|0;c[k>>2]=0;a:do if((e|0)>1){m=0;while(1){if(!(a[d+m>>0]|0)){if((m|0)>0){l=m+ -1|0;l=(a[d+l>>0]|0)==0?l:m}else l=m;m=l+2|0;if(((m|0)<(e|0)?(a[d+(l+1)>>0]|0)==0:0)?(j=a[d+m>>0]|0,(j&255)<4):0)break}else l=m;m=l+2|0;if((l+3|0)>=(e|0))break a}m=l;e=j<<24>>24==3?e:l}else m=0;while(0);if((m|0)>=(e+ -1|0)){c[f+12>>2]=d;c[f+8>>2]=e;q=e;i=g;return q|0}nd(f,f+4|0,e+32|0);j=c[f>>2]|0;if(!j){q=-12;i=g;return q|0}fe(j|0,d|0,m|0)|0;o=m+2|0;b:do if((o|0)<(e|0)){l=b+4384|0;b=b+4380|0;n=m;c:while(1){p=d+o|0;q=a[p>>0]|0;do if((q&255)<=3){p=a[d+m>>0]|0;if(!(p<<24>>24))if(!(a[d+(m+1)>>0]|0)){if(q<<24>>24!=3){e=m;break b}o=n+1|0;a[j+n>>0]=0;n=n+2|0;a[j+o>>0]=0;m=m+3|0;q=(c[k>>2]|0)+1|0;c[k>>2]=q;p=c[l>>2]|0;if((p|0)<(q|0)){p=p<<1;c[l>>2]=p;ld(b,p,4)|0;p=c[b>>2]|0;if(!p){f=-12;break c}}else{p=c[b>>2]|0;if(!p)break}c[p+((c[k>>2]|0)+ -1<<2)>>2]=o}else{p=0;h=26}else h=26}else{a[j+n>>0]=a[d+m>>0]|0;a[j+(n+1)>>0]=a[d+(m+1)>>0]|0;p=a[p>>0]|0;n=n+2|0;m=o;h=26}while(0);if((h|0)==26){h=0;a[j+n>>0]=p;n=n+1|0;m=m+1|0}o=m+2|0;if((o|0)>=(e|0)){h=15;break b}}i=g;return f|0}else{n=m;h=15}while(0);if((h|0)==15)if((m|0)<(e|0)){h=e+n|0;k=m;while(1){a[j+n>>0]=a[d+k>>0]|0;k=k+1|0;if((k|0)==(e|0))break;else n=n+1|0}n=h-m|0}else e=m;h=j+n+0|0;d=h+32|0;do{a[h>>0]=0;h=h+1|0}while((h|0)<(d|0));c[f+12>>2]=j;c[f+8>>2]=n;q=e;i=g;return q|0}function Jb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0;e=i;f=b+60|0;d=c[f>>2]|0;Zc();ac();f=c[f>>2]|0;c[f+4>>2]=b;g=md(31328)|0;c[f+136>>2]=g;if((((g|0)!=0?(c[f+72>>2]=g,c[f+8>>2]=f,g=fd(199)|0,c[f+152>>2]=g,(g|0)!=0):0)?(g=wd()|0,c[f+164>>2]=g,(g|0)!=0):0)?(h=wd()|0,c[f+2524>>2]=h,(h|0)!=0):0){c[f+2528>>2]=h;c[f+2592>>2]=2147483647;a[f+4469>>0]=1;c[f+2584>>2]=0;c[d+4368>>2]=0;c[d+4520>>2]=0;f=b+808|0;if(!(c[f>>2]&2))a[d+141>>0]=1;else a[d+141>>0]=c[b+800>>2];if((c[f>>2]&1|0)!=0?(c[b+800>>2]|0)>1:0){a[d+140>>0]=1;h=0;i=e;return h|0}a[d+140>>0]=2;h=0;i=e;return h|0}Lb(b)|0;h=-12;i=e;return h|0}function Kb(f,g,h,j){f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ca=0,Da=0,Ea=0,Fa=0,Ga=0,Ha=0,Ia=0,Ja=0,Ka=0,La=0,Ma=0,Na=0,Oa=0,Pa=0,Qa=0,Ra=0,Sa=0,Ta=0,Ua=0,Va=0,Wa=0,Xa=0,Ya=0,Za=0,_a=0,$a=0,ab=0;k=i;i=i+16|0;q=k+8|0;n=k;m=c[f+60>>2]|0;l=j+28|0;K=c[l>>2]|0;if(!K){g=$b(m,g,1)|0;if((g|0)<0){_a=g;i=k;return _a|0}c[h>>2]=g;_a=0;i=k;return _a|0}r=m+2520|0;c[r>>2]=0;f=m+4524|0;b[f>>1]=1;J=c[j+24>>2]|0;c[r>>2]=0;G=m+2584|0;A=m+2588|0;c[A>>2]=c[G>>2];c[G>>2]=0;w=m+4408|0;c[w>>2]=0;a:do if((K|0)>3){H=m+4470|0;F=m+4412|0;z=m+4404|0;v=m+4388|0;C=m+4396|0;x=m+4392|0;E=m+4384|0;y=m+4380|0;j=m+4376|0;u=m+136|0;t=m+2512|0;I=m+4480|0;while(1){B=(a[H>>0]|0)==0;if(B){while(1){L=J+1|0;if(((a[J>>0]|0)==0?(a[L>>0]|0)==0:0)?(a[J+2>>0]|0)==1:0)break;if((K|0)<5){p=-1094995529;o=180;break a}J=L;K=K+ -1|0}J=J+3|0;M=0;K=K+ -3|0}else{L=c[I>>2]|0;if((L|0)>0){N=0;M=0;do{N=d[J+M>>0]|N<<8;M=M+1|0}while((M|0)!=(L|0));M=N}else M=0;K=K-L|0;if((M|0)>(K|0)){p=-1094995529;o=180;break a}J=J+L|0}B=B?K:M;L=c[F>>2]|0;N=c[w>>2]|0;if((L|0)<(N+1|0)){L=L+1|0;M=kd(c[z>>2]|0,L,16)|0;if(!M){p=-12;o=180;break a}c[z>>2]=M;N=c[F>>2]|0;ce(M+(N<<4)|0,0,L-N<<4|0)|0;ld(v,L,4)|0;ld(C,L,4)|0;ld(x,L,4)|0;N=c[C>>2]|0;c[N+(c[F>>2]<<2)>>2]=1024;N=od(c[N+(c[F>>2]<<2)>>2]|0,4)|0;c[(c[x>>2]|0)+(c[F>>2]<<2)>>2]=N;c[F>>2]=L;N=c[w>>2]|0}c[E>>2]=c[(c[C>>2]|0)+(N<<2)>>2];c[y>>2]=c[(c[x>>2]|0)+(N<<2)>>2];M=c[z>>2]|0;L=Ib(m,J,B,M+(N<<4)|0)|0;c[(c[v>>2]|0)+(c[w>>2]<<2)>>2]=c[j>>2];c[(c[C>>2]|0)+(c[w>>2]<<2)>>2]=c[E>>2];Za=c[y>>2]|0;_a=c[w>>2]|0;c[w>>2]=_a+1;c[(c[x>>2]|0)+(_a<<2)>>2]=Za;if((L|0)<0){p=L;o=180;break a}Za=c[u>>2]|0;Xa=c[M+(N<<4)+12>>2]|0;Ya=c[M+(N<<4)+8>>2]|0;Ya=Ya>>>0>268435455?-8:Ya<<3;_a=Ya>>>0>2147483639|(Xa|0)==0;Ya=_a?0:Ya;Xa=_a?0:Xa;B=_a?-1094995529:0;c[Za+204>>2]=Xa;c[Za+216>>2]=Ya;c[Za+220>>2]=Ya+8;c[Za+208>>2]=Xa+(Ya>>3);c[Za+212>>2]=0;if(_a){p=B;o=180;break a}Ob(m)|0;if(((c[t>>2]|0)+ -36|0)>>>0<2)c[G>>2]=1;K=K-L|0;if((K|0)<=3)break;else J=J+L|0}if((c[w>>2]|0)>0){la=m+4|0;Aa=m+1448|0;ba=m+2046|0;_=m+1428|0;za=m+204|0;qa=m+200|0;Ia=m+1449|0;Ja=m+1432|0;Na=m+1436|0;Oa=m+2580|0;Ga=m+156|0;Da=m+1440|0;I=m+1450|0;L=m+1620|0;pa=m+2572|0;K=m+2516|0;M=m+2576|0;W=m+2056|0;X=m+2057|0;N=m+2058|0;P=m+2052|0;O=m+2048|0;La=m+2068|0;S=m+2072|0;Q=m+2076|0;T=m+2080|0;Y=m+2061|0;V=m+2084|0;U=m+2088|0;Z=m+2062|0;J=m+1451|0;Ma=m+2108|0;Ha=m+2112|0;Ka=m+2500|0;Ca=m+2592|0;ma=m+2604|0;na=m+4416|0;aa=q+4|0;ra=m+4320|0;ua=m+2596|0;sa=m+2600|0;va=m+4324|0;wa=m+4344|0;xa=m+4348|0;ya=m+4328|0;oa=m+160|0;Fa=m+140|0;Ea=m+164|0;R=m+2096|0;F=m+2100|0;E=m+2104|0;G=m+141|0;H=m+4368|0;da=m+2504|0;ca=m+2508|0;fa=m+4332|0;ea=m+4336|0;ga=m+4340|0;ia=m+4352|0;ha=m+4316|0;ja=m+2608|0;Pa=m+196|0;Qa=m+4364|0;ka=m+168|0;C=0;b:while(1){c[j>>2]=c[(c[v>>2]|0)+(C<<2)>>2];c[y>>2]=c[(c[x>>2]|0)+(C<<2)>>2];Za=c[z>>2]|0;Ya=c[Za+(C<<4)+12>>2]|0;Za=c[Za+(C<<4)+8>>2]|0;_a=c[u>>2]|0;Za=Za>>>0>268435455?-8:Za<<3;Ra=Za>>>0>2147483639|(Ya|0)==0;Za=Ra?0:Za;Ya=Ra?0:Ya;c[_a+204>>2]=Ya;c[_a+216>>2]=Za;c[_a+220>>2]=Za+8;c[_a+208>>2]=Ya+(Za>>3);c[_a+212>>2]=0;c:do if(Ra){s=Ra?-1094995529:0;o=178}else{Ra=Ob(m)|0;d:do if((Ra|0)>=0){if(!Ra)break c;switch(c[t>>2]|0){case 48:{Ra=Dc(m)|0;if((Ra|0)<0)break d;else break c};case 34:{Ra=Ec(m)|0;if((Ra|0)<0)break d;else break c};case 40:case 39:{Ra=Gc(m)|0;if((Ra|0)<0)break d;else break c};case 9:case 8:case 7:case 6:case 21:case 20:case 19:case 18:case 17:case 16:case 5:case 4:case 3:case 2:case 0:case 1:{Ra=c[u>>2]|0;Sa=Ra+204|0;_a=(bd(Sa)|0)&255;a[Aa>>0]=_a;Ta=c[t>>2]|0;if(!((Ta+ -16|0)>>>0>4|_a<<24>>24==0)?(b[Qa>>1]=(e[Qa>>1]|0)+1&255,c[Ca>>2]=2147483647,(Ta+ -19|0)>>>0<2):0){Yb(m);Ta=c[t>>2]|0}a[ba>>0]=0;if((Ta+ -16|0)>>>0<8)a[ba>>0]=bd(Sa)|0;Ta=dd(Sa)|0;c[_>>2]=Ta;if(Ta>>>0>255){p=B;o=180;break a}Ta=c[m+(Ta<<2)+400>>2]|0;if(!Ta){p=B;o=180;break a}if(!(a[Aa>>0]|0)){Wa=c[Ta+4>>2]|0;if((c[za>>2]|0)!=(Wa|0)){p=B;o=180;break a}}else Wa=c[Ta+4>>2]|0;c[za>>2]=Wa;Ua=c[t>>2]|0;Va=(Ua|0)==21;if(Va?(c[A>>2]|0)==1:0)a[ba>>0]=1;Ta=c[qa>>2]|0;Wa=c[(c[m+(c[Wa>>2]<<2)+272>>2]|0)+4>>2]|0;if((Ta|0)!=(Wa|0)){c[qa>>2]=Wa;e:do if(Ta){if((Ua+ -16|0)>>>0>7|Va)break;do if((c[Wa+13120>>2]|0)==(c[Ta+13120>>2]|0)){if((c[Wa+13124>>2]|0)!=(c[Ta+13124>>2]|0))break;if((c[Wa+76+(((c[Wa+72>>2]|0)+ -1|0)*12|0)>>2]|0)==(c[Ta+(((c[Ta+72>>2]|0)+ -1|0)*12|0)+76>>2]|0))break e}while(0);a[ba>>0]=0}while(0);Yb(m);Ta=c[qa>>2]|0;Nb(m);Va=c[Ta+13064>>2]|0;Wa=Ta+13120|0;ab=c[Wa>>2]|0;Xa=Ta+13124|0;$a=c[Xa>>2]|0;Va=$(($a>>Va)+1|0,(ab>>Va)+1|0)|0;Ua=$(c[Ta+13132>>2]|0,c[Ta+13128>>2]|0)|0;_a=Ta+13156|0;Za=Ta+13160|0;Ya=$(c[Za>>2]|0,c[_a>>2]|0)|0;c[ua>>2]=(ab>>2)+1;c[sa>>2]=($a>>2)+1;c[da>>2]=pd(Ua,148)|0;$a=pd(Ua,8)|0;c[ca>>2]=$a;if((c[da>>2]|0)==0|($a|0)==0){o=71;break b}$a=Ta+13144|0;ab=Ta+13140|0;c[fa>>2]=fd($(c[ab>>2]|0,c[$a>>2]|0)|0)|0;ab=od(c[$a>>2]|0,c[ab>>2]|0)|0;c[ea>>2]=ab;if((c[fa>>2]|0)==0|(ab|0)==0){o=71;break b}c[wa>>2]=od(c[Ta+13148>>2]|0,c[Ta+13152>>2]|0)|0;c[ga>>2]=md(Ya)|0;Ya=fd($((c[Za>>2]|0)+1|0,(c[_a>>2]|0)+1|0)|0)|0;c[xa>>2]=Ya;if(!(c[ga>>2]|0)){o=71;break b}if((c[wa>>2]|0)==0|(Ya|0)==0){o=71;break b}c[ia>>2]=fd(Ua)|0;c[ya>>2]=od(Va,4)|0;ab=od(Va,1)|0;c[ha>>2]=ab;if(!ab){o=71;break b}if(!(c[ia>>2]|0)){o=71;break b}if(!(c[ya>>2]|0)){o=71;break b}c[ra>>2]=pd(c[ua>>2]|0,c[sa>>2]|0)|0;ab=pd(c[ua>>2]|0,c[sa>>2]|0)|0;c[va>>2]=ab;if((c[ra>>2]|0)==0|(ab|0)==0){o=71;break b}ab=c[la>>2]|0;c[ab+124>>2]=c[Wa>>2];c[ab+128>>2]=c[Xa>>2];c[ab+116>>2]=c[Ta+12>>2];c[ab+120>>2]=c[Ta+16>>2];c[ab+136>>2]=c[Ta+60>>2];c[ab+172>>2]=c[Ta+(((c[Ta+72>>2]|0)+ -1|0)*12|0)+80>>2];ab=Ta+160|0;c[q+0>>2]=c[ab+0>>2];c[q+4>>2]=c[ab+4>>2];if(!(c[Ta+176>>2]|0)){Ua=c[la>>2]|0;c[Ua+392>>2]=1}else{Ua=c[la>>2]|0;c[Ua+392>>2]=(c[Ta+184>>2]|0)!=0?2:1}if(!(c[Ta+188>>2]|0)){c[Ua+380>>2]=2;c[Ua+384>>2]=2;c[Ua+388>>2]=2}else{c[Ua+380>>2]=d[Ta+192>>0];c[Ua+384>>2]=d[Ta+193>>0];c[Ua+388>>2]=d[Ta+194>>0]}bc(ja,c[Ta+52>>2]|0);if(a[Ta+12941>>0]|0){Ua=c[qa>>2]|0;Va=(c[Ua+4>>2]|0)!=0?3:1;ab=(1<<c[Ua+13080>>2])+2|0;ab=$(ab,ab)|0;c[ka>>2]=fd(ab<<c[Ua+56>>2])|0;Ua=0;do{ab=c[qa>>2]|0;$a=c[ab+13124>>2]>>c[ab+(Ua<<2)+13180>>2];_a=$(c[ab+13120>>2]>>c[ab+(Ua<<2)+13168>>2]<<1,c[ab+13132>>2]|0)|0;c[m+(Ua<<2)+172>>2]=fd(_a<<c[ab+56>>2])|0;ab=c[qa>>2]|0;$a=$($a<<1,c[ab+13128>>2]|0)|0;c[m+(Ua<<2)+184>>2]=fd($a<<c[ab+56>>2])|0;Ua=Ua+1|0}while((Ua|0)<(Va|0))}c[qa>>2]=Ta;c[Pa>>2]=c[(c[m+(c[Ta>>2]<<2)+208>>2]|0)+4>>2];b[Qa>>1]=(e[Qa>>1]|0)+1&255;c[Ca>>2]=2147483647}ab=c[la>>2]|0;c[ab+832>>2]=d[Ta+302>>0];c[ab+836>>2]=d[Ta+335>>0];a[Ia>>0]=0;do if(!(a[Aa>>0]|0)){if(a[(c[za>>2]|0)+41>>0]|0){a[Ia>>0]=bd(Sa)|0;Ta=c[qa>>2]|0}Ta=($(c[Ta+13128>>2]<<1,c[Ta+13132>>2]|0)|0)+ -2|0;Ua=Ta>>>0>65535;Ta=Ua?Ta>>>16:Ta;Ua=Ua?16:0;if(Ta&65280){Ua=Ua|8;Ta=Ta>>>8}Ta=_c(Sa,(d[4680+Ta>>0]|0)+Ua|0)|0;c[Ja>>2]=Ta;ab=c[qa>>2]|0;if(Ta>>>0>=($(c[ab+13132>>2]|0,c[ab+13128>>2]|0)|0)>>>0){p=B;o=180;break a}if(a[Ia>>0]|0)if(!(a[Ga>>0]|0)){p=B;o=180;break a}else break;else{c[Na>>2]=Ta;c[Oa>>2]=(c[Oa>>2]|0)+1;o=82;break}}else{c[Na>>2]=0;c[Ja>>2]=0;c[Oa>>2]=0;a[Ga>>0]=0;o=82}while(0);f:do if((o|0)==82){o=0;a[Ga>>0]=0;if((c[(c[za>>2]|0)+1624>>2]|0)>0){Ta=0;do{ad(Sa,1);Ta=Ta+1|0}while((Ta|0)<(c[(c[za>>2]|0)+1624>>2]|0))}Ta=dd(Sa)|0;c[Da>>2]=Ta;if(Ta>>>0>=3){p=B;o=180;break a}if(!((Ta|0)==2?1:((c[t>>2]|0)+ -16|0)>>>0>7)){p=B;o=180;break a}a[I>>0]=1;if(a[(c[za>>2]|0)+39>>0]|0)a[I>>0]=bd(Sa)|0;if(a[(c[qa>>2]|0)+8>>0]|0)a[J>>0]=_c(Sa,2)|0;if(((c[t>>2]|0)+ -19|0)>>>0>=2){o=91;break b}c[L>>2]=0;c[pa>>2]=0;if(!(c[K>>2]|0))c[M>>2]=0;do if(a[(c[qa>>2]|0)+12941>>0]|0){a[W>>0]=bd(Sa)|0;if(!(c[(c[qa>>2]|0)+4>>2]|0)){a[X>>0]=0;a[N>>0]=0;break}else{ab=(bd(Sa)|0)&255;a[N>>0]=ab;a[X>>0]=ab;break}}else{a[W>>0]=0;a[X>>0]=0;a[N>>0]=0}while(0);c[P>>2]=0;c[O>>2]=0;c[La>>2]=ed(Sa)|0;Ta=c[za>>2]|0;if(!(a[Ta+36>>0]|0)){c[S>>2]=0;c[Q>>2]=0}else{c[S>>2]=ed(Sa)|0;c[Q>>2]=ed(Sa)|0;Ta=c[za>>2]|0}if(!(a[Ta+1631>>0]|0))a[T>>0]=0;else{a[T>>0]=bd(Sa)|0;Ta=c[za>>2]|0}g:do if(!(a[Ta+55>>0]|0)){a[Y>>0]=0;c[V>>2]=0;c[U>>2]=0}else{do if(a[Ta+56>>0]|0){if(!(bd(Sa)|0)){Ta=c[za>>2]|0;break}ab=(bd(Sa)|0)&255;a[Y>>0]=ab;if(ab<<24>>24)break g;c[V>>2]=(ed(Sa)|0)<<1;c[U>>2]=(ed(Sa)|0)<<1;break g}while(0);a[Y>>0]=a[Ta+57>>0]|0;c[V>>2]=c[Ta+60>>2];c[U>>2]=c[Ta+64>>2]}while(0);Ta=a[(c[za>>2]|0)+54>>0]|0;h:do if(Ta<<24>>24){do if(!(a[W>>0]|0)){if(a[X>>0]|0)break;if(a[Y>>0]|0)break h}while(0);a[Z>>0]=bd(Sa)|0;break f}while(0);a[Z>>0]=Ta}while(0);c[Ma>>2]=0;ab=c[za>>2]|0;if(!((a[ab+42>>0]|0)==0?(a[ab+43>>0]|0)==0:0))o=122;i:do if((o|0)==122){o=0;ab=dd(Sa)|0;c[Ma>>2]=ab;if((ab|0)<=0){c[H>>2]=0;break}Ta=(dd(Sa)|0)+1|0;Ua=Ta>>4;Ta=Ta&15;jd(R);jd(F);jd(E);c[R>>2]=od(c[Ma>>2]|0,4)|0;c[F>>2]=od(c[Ma>>2]|0,4)|0;Va=od(c[Ma>>2]|0,4)|0;c[E>>2]=Va;if(!(c[R>>2]|0)){o=127;break b}if((c[F>>2]|0)==0|(Va|0)==0){o=127;break b}if((c[Ma>>2]|0)>0){Xa=(Ua|0)>0;Wa=(Ta|0)==0;Va=0;do{if(Xa){Ya=0;Za=0;do{Za=(_c(Sa,16)|0)+(Za<<16)|0;Ya=Ya+1|0}while((Ya|0)!=(Ua|0))}else Za=0;if(!Wa)Za=(_c(Sa,Ta)|0)+(Za<<Ta)|0;c[(c[R>>2]|0)+(Va<<2)>>2]=Za+1;Va=Va+1|0}while((Va|0)<(c[Ma>>2]|0))}do if((d[G>>0]|0)>1){ab=c[za>>2]|0;if((c[ab+48>>2]|0)<=1?(c[ab+44>>2]|0)<=1:0)break;c[H>>2]=0;a[G>>0]=1;break i}while(0);c[H>>2]=0}while(0);Ta=c[za>>2]|0;if(a[Ta+1628>>0]|0){Ta=dd(Sa)|0;$a=de(Ta|0,0,3)|0;Za=D;ab=(c[Ra+216>>2]|0)-(c[Ra+212>>2]|0)|0;_a=((ab|0)<0)<<31>>31;if((Za|0)>(_a|0)|(Za|0)==(_a|0)&$a>>>0>ab>>>0){p=B;o=180;break a}if(Ta){Ua=0;do{ad(Sa,8);Ua=Ua+1|0}while((Ua|0)!=(Ta|0))}Ta=c[za>>2]|0}Sa=(c[Ta+16>>2]|0)+26+(c[La>>2]|0)|0;a[Ha>>0]=Sa;Sa=Sa<<24;if((Sa|0)>855638016){p=B;o=180;break a}if((Sa>>24|0)<(0-(c[(c[qa>>2]|0)+13192>>2]|0)|0)){p=B;o=180;break a}ab=c[Ja>>2]|0;c[Ka>>2]=ab;if((ab|0)==0?(a[Ia>>0]|0)!=0:0){p=B;o=180;break a}if(((c[Ra+216>>2]|0)-(c[Ra+212>>2]|0)|0)<0){p=B;o=180;break a}a[(c[u>>2]|0)+203>>0]=(a[Ia>>0]|0)==0&1;if(!(a[(c[za>>2]|0)+22>>0]|0))a[(c[u>>2]|0)+272>>0]=a[Ha>>0]|0;a[Ga>>0]=1;a[(c[u>>2]|0)+302>>0]=0;a[(c[u>>2]|0)+303>>0]=0;Sa=c[Ca>>2]|0;Ra=c[t>>2]|0;j:do if((Sa|0)==2147483647)switch(Ra|0){case 18:case 16:case 17:case 21:{Sa=c[pa>>2]|0;c[Ca>>2]=Sa;break j};case 20:case 19:{c[Ca>>2]=-2147483648;Sa=-2147483648;break j};default:{Sa=2147483647;break j}}while(0);do if((Ra+ -8|0)>>>0<2){if((c[pa>>2]|0)<=(Sa|0)){c[ma>>2]=0;break c}if((Ra|0)!=9)break;c[Ca>>2]=-2147483648}while(0);k:do if(!(a[Aa>>0]|0)){if(!(c[r>>2]|0)){Ra=0;break d}}else{Sa=c[u>>2]|0;$a=c[qa>>2]|0;Ra=c[$a+13064>>2]|0;ab=c[$a+13120>>2]>>Ra;Ra=(c[$a+13124>>2]>>Ra)+1|0;ce(c[ra>>2]|0,0,$(c[sa>>2]|0,c[ua>>2]|0)|0)|0;ce(c[va>>2]|0,0,$(c[sa>>2]|0,c[ua>>2]|0)|0)|0;$a=c[qa>>2]|0;ce(c[wa>>2]|0,0,$(c[$a+13152>>2]|0,c[$a+13148>>2]|0)|0)|0;$a=c[qa>>2]|0;ce(c[xa>>2]|0,0,$((c[$a+13160>>2]|0)+1|0,(c[$a+13156>>2]|0)+1|0)|0)|0;ce(c[ya>>2]|0,-1,$((ab<<2)+4|0,Ra)|0)|0;c[ma>>2]=0;c[na>>2]=c[t>>2];Ra=c[za>>2]|0;if(a[Ra+42>>0]|0)c[Sa+312>>2]=c[c[Ra+1648>>2]>>2]<<c[(c[qa>>2]|0)+13080>>2];Ra=_b(m,oa,c[pa>>2]|0)|0;do if((Ra|0)>=0){c[(c[c[r>>2]>>2]|0)+80>>2]=((c[t>>2]|0)+ -16|0)>>>0<8&1;c[(c[oa>>2]|0)+84>>2]=3-(c[Da>>2]|0);yd(c[Ea>>2]|0);Ra=$b(m,c[Ea>>2]|0,0)|0;if((Ra|0)<0)break;break k}while(0);if(!(c[r>>2]|0)){o=167;break b}c[r>>2]=0;if((Ra|0)<0){p=B;break a}}while(0);if((c[t>>2]|0)!=(c[na>>2]|0)){p=B;o=180;break a}c[q>>2]=0;c[aa>>2]=1;Ra=c[la>>2]|0;Ba[c[Ra+816>>2]&1](Ra,1,q,n,1,4)|0;Ra=c[n>>2]|0;ab=c[qa>>2]|0;if((Ra|0)>=($(c[ab+13132>>2]|0,c[ab+13128>>2]|0)|0))c[ma>>2]=1;if((Ra|0)<0)break d;else break c};case 37:case 36:{b[Qa>>1]=(e[Qa>>1]|0)+1&255;c[Ca>>2]=2147483647;break c};default:break c}}while(0);s=(c[(c[la>>2]|0)+688>>2]&8|0)==0?0:Ra;o=178}while(0);if((o|0)==178?(o=0,(s|0)<0):0){p=B;o=180;break a}C=C+1|0;if((C|0)>=(c[w>>2]|0)){p=B;o=180;break a}}if((o|0)==71){Nb(m);Nb(m);c[qa>>2]=0;p=B;o=180;break}else if((o|0)==91)ta();else if((o|0)==127){c[Ma>>2]=0;p=B;o=180;break}else if((o|0)==167){c[r>>2]=0;p=B;break}}else{p=B;o=180}}else{p=0;o=180}while(0);if((p|0)<0){ab=p;i=k;return ab|0}n=m+2604|0;if(c[n>>2]|0)c[n>>2]=0;m=c[m+164>>2]|0;if(c[m+304>>2]|0){ab=m+128|0;c[ab>>2]=e[f>>1];c[ab+4>>2]=0;zd(g,m);c[h>>2]=1}ab=c[l>>2]|0;i=k;return ab|0}function Lb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=c[b+60>>2]|0;Nb(e);b=e+4412|0;f=e+4392|0;if((c[b>>2]|0)>0){g=0;do{jd((c[f>>2]|0)+(g<<2)|0);g=g+1|0}while((g|0)<(c[b>>2]|0))}jd(e+4396|0);jd(e+4388|0);jd(f);jd(e+152|0);jd(e+168|0);jd(e+172|0);jd(e+184|0);jd(e+176|0);jd(e+188|0);jd(e+180|0);jd(e+192|0);xd(e+164|0);g=e+2524|0;Xb(e,g,-1);xd(g);g=e+208|0;f=0;do{vd(g+(f<<2)|0);f=f+1|0}while((f|0)!=16);g=e+272|0;f=0;do{vd(g+(f<<2)|0);f=f+1|0}while((f|0)!=32);f=e+400|0;g=0;do{vd(f+(g<<2)|0);g=g+1|0}while((g|0)!=256);c[e+200>>2]=0;c[e+204>>2]=0;c[e+196>>2]=0;vd(e+1424|0);jd(e+2096|0);jd(e+2100|0);jd(e+2104|0);h=e+141|0;l=a[h>>0]|0;f=e+72|0;if((l&255)>1){g=e+8|0;j=1;do{k=f+(j<<2)|0;if(c[k>>2]|0){jd(k);jd(g+(j<<2)|0);l=a[h>>0]|0}j=j+1|0}while((j|0)<(l&255|0))}g=e+136|0;if((c[g>>2]|0)==(c[f>>2]|0))c[g>>2]=0;jd(f);f=e+4404|0;if((c[b>>2]|0)<=0){jd(f);c[b>>2]=0;i=d;return 0}e=0;do{jd((c[f>>2]|0)+(e<<4)|0);e=e+1|0}while((e|0)<(c[b>>2]|0));jd(f);c[b>>2]=0;i=d;return 0}function Mb(a){a=a|0;var b=0;b=i;a=c[a+60>>2]|0;Zb(a);c[a+2592>>2]=2147483647;i=b;return}function Nb(a){a=a|0;var b=0;b=i;jd(a+2504|0);jd(a+2508|0);jd(a+4332|0);jd(a+4336|0);jd(a+4340|0);jd(a+4344|0);jd(a+4348|0);jd(a+4316|0);jd(a+4328|0);jd(a+4352|0);jd(a+4320|0);jd(a+4324|0);jd(a+2096|0);jd(a+2104|0);jd(a+2100|0);i=b;return}function Ob(a){a=a|0;var b=0,d=0,e=0;b=i;d=(c[a+136>>2]|0)+204|0;if(bd(d)|0){e=-1094995529;i=b;return e|0}c[a+2512>>2]=_c(d,6)|0;e=_c(d,6)|0;d=(_c(d,3)|0)+ -1|0;c[a+2516>>2]=d;if((d|0)<0){e=-1094995529;i=b;return e|0}e=(e|0)==0&1;i=b;return e|0}function Pb(e,f){e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0;f=i;h=c[e+60>>2]|0;k=h+200|0;E=c[k>>2]|0;e=1<<c[E+13080>>2];l=h+204|0;n=c[l>>2]|0;A=c[(c[n+1668>>2]|0)+(c[h+2500>>2]<<2)>>2]|0;m=(a[h+1449>>0]|0)==0;if(!A)if(m)g=4;else{W=-1094995529;i=f;return W|0}else if(!m){m=h+4328|0;r=h+1436|0;if((c[(c[m>>2]|0)+(c[(c[n+1672>>2]|0)+(A+ -1<<2)>>2]<<2)>>2]|0)!=(c[r>>2]|0)){W=-1094995529;i=f;return W|0}}else g=4;if((g|0)==4){m=h+4328|0;r=h+1436|0}q=e+ -1|0;s=h+136|0;p=h+2504|0;y=h+2056|0;o=h+2057|0;w=h+2084|0;x=h+2508|0;v=h+2088|0;u=h+2062|0;t=h+4352|0;z=0;n=0;do{if((A|0)>=(c[E+13136>>2]|0))break;G=c[l>>2]|0;B=c[(c[G+1672>>2]|0)+(A<<2)>>2]|0;J=E+13120|0;I=E+13080|0;H=c[I>>2]|0;n=q+(c[J>>2]|0)>>H;z=((B|0)%(n|0)|0)<<H;n=((B|0)/(n|0)|0)<<H;C=c[s>>2]|0;H=1<<H;F=c[r>>2]|0;D=B-F|0;c[(c[m>>2]|0)+(B<<2)>>2]=F;do if(!(a[G+43>>0]|0)){if(!(a[G+42>>0]|0)){c[C+312>>2]=c[J>>2];G=E;break}if((A|0)!=0?(W=c[G+1676>>2]|0,(c[W+(A<<2)>>2]|0)!=(c[W+(A+ -1<<2)>>2]|0)):0){W=c[I>>2]|0;c[C+312>>2]=(c[(c[G+1648>>2]|0)+(c[(c[G+1664>>2]|0)+(z>>W<<2)>>2]<<2)>>2]<<W)+z;a[C+203>>0]=1;G=c[k>>2]|0}else G=E}else{if((z|0)==0?(H+ -1&n|0)==0:0){a[C+203>>0]=1;E=c[k>>2]|0}c[C+312>>2]=c[E+13120>>2];G=E}while(0);E=H+n|0;H=c[G+13124>>2]|0;c[C+316>>2]=(E|0)>(H|0)?H:E;E=C+31312|0;c[E>>2]=0;H=c[l>>2]|0;if(!(a[H+42>>0]|0)){if((B|0)==(F|0)){c[E>>2]=1;F=1}else F=0;if((D|0)<(c[G+13128>>2]|0)){F=F|4;c[E>>2]=F}}else{if((z|0)>0){W=c[H+1676>>2]|0;I=B+ -1|0;if((c[W+(A<<2)>>2]|0)==(c[W+(c[(c[H+1668>>2]|0)+(I<<2)>>2]<<2)>>2]|0))F=0;else{c[E>>2]=2;F=2}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(I<<2)>>2]|0)){F=F|1;c[E>>2]=F}}else F=0;if((n|0)>0){W=c[H+1676>>2]|0;I=G+13128|0;G=c[I>>2]|0;if((c[W+(A<<2)>>2]|0)!=(c[W+(c[(c[H+1668>>2]|0)+(B-G<<2)>>2]<<2)>>2]|0)){F=F|8;c[E>>2]=F;G=c[I>>2]|0}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(B-G<<2)>>2]|0)){F=F|4;c[E>>2]=F}}}E=(z|0)>0;if(E&(D|0)>0)G=(F>>>1&1^1)&255;else G=0;a[C+308>>0]=G;if((n|0)>0){if((D|0)<(c[(c[k>>2]|0)+13128>>2]|0))F=0;else F=(F>>>3&1^1)&255;a[C+309>>0]=F;F=c[(c[k>>2]|0)+13128>>2]|0;if((D+1|0)<(F|0))F=0;else{W=c[l>>2]|0;V=c[W+1676>>2]|0;F=(c[V+(A<<2)>>2]|0)==(c[V+(c[(c[W+1668>>2]|0)+(B+1-F<<2)>>2]<<2)>>2]|0)&1}a[C+310>>0]=F;if(E?(j=c[(c[k>>2]|0)+13128>>2]|0,(D|0)>(j|0)):0){D=c[l>>2]|0;W=c[D+1676>>2]|0;D=(c[W+(A<<2)>>2]|0)==(c[W+(c[(c[D+1668>>2]|0)+(B+ -1-j<<2)>>2]<<2)>>2]|0)&1}else D=0}else{a[C+309>>0]=0;a[C+310>>0]=0;D=0}a[C+311>>0]=D;Ua(h,A);D=c[k>>2]|0;E=c[D+13080>>2]|0;F=z>>E;E=n>>E;G=c[s>>2]|0;D=($(c[D+13128>>2]|0,E)|0)+F|0;C=c[p>>2]|0;if((a[y>>0]|0)==0?(a[o>>0]|0)==0:0){M=0;H=0}else{if((F|0)>0?(a[G+308>>0]|0)!=0:0)M=Za(h)|0;else M=0;if((E|0)>0&(M|0)==0)if(!(a[G+309>>0]|0)){M=0;H=0}else{M=0;H=(Za(h)|0)!=0}else H=0}I=(c[(c[k>>2]|0)+4>>2]|0)!=0?3:1;L=C+(D*148|0)+143|0;G=C+(D*148|0)+144|0;K=C+(D*148|0)+104|0;J=C+(D*148|0)+108|0;R=(M|0)==0;S=R&(H^1);M=E+ -1|0;O=F+ -1|0;P=0;do{Q=c[l>>2]|0;Q=d[((P|0)==0?Q+1644|0:Q+1645|0)>>0]|0;a:do if(a[h+P+2056>>0]|0){T=(P|0)==2;do if(!T){if(S){U=($a(h)|0)&255;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(!R){U=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(H){U=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}else{a[C+(D*148|0)+P+142>>0]=0;break a}}else{U=a[L>>0]|0;a[G>>0]=U;c[J>>2]=c[K>>2];N=G}while(0);if(U<<24>>24){U=0;do{do if(!S){if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}else{c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=cb(h)|0;while(0);U=U+1|0}while((U|0)!=4);do if((a[N>>0]|0)==1){T=0;do{do if(c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0){if(S){c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=db(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}else{c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;while(0);T=T+1|0}while((T|0)!=4);if(S){a[C+(D*148|0)+P+96>>0]=bb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}else{a[C+(D*148|0)+P+96>>0]=0;break}}else if(!T){if(S){c[C+(D*148|0)+(P<<2)+100>>2]=eb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}else{c[C+(D*148|0)+(P<<2)+100>>2]=0;break}}while(0);b[C+(D*148|0)+(P*10|0)+112>>1]=0;T=0;do{W=c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0;V=T;T=T+1|0;U=C+(D*148|0)+(P*10|0)+(T<<1)+112|0;b[U>>1]=W;if((a[N>>0]|0)==2){if((V|0)>1){W=0-W|0;b[U>>1]=W}}else if(c[C+(D*148|0)+(P<<4)+(V<<2)+48>>2]|0){W=0-W|0;b[U>>1]=W}b[U>>1]=W<<16>>16<<Q}while((T|0)!=4)}}else a[C+(D*148|0)+P+142>>0]=0;while(0);P=P+1|0}while((P|0)<(I|0));C=c[x>>2]|0;c[C+(B<<3)>>2]=c[w>>2];c[C+(B<<3)+4>>2]=c[v>>2];a[(c[t>>2]|0)+B>>0]=a[u>>0]|0;C=Qb(h,z,n,c[(c[k>>2]|0)+13080>>2]|0,0)|0;if((C|0)<0){g=108;break}A=A+1|0;Ta(h,A);Db(h,z,n,e);E=c[k>>2]|0}while((C|0)!=0);if((g|0)==108){c[(c[m>>2]|0)+(B<<2)>>2]=-1;W=C;i=f;return W|0}if((z+e|0)<(c[E+13120>>2]|0)){W=A;i=f;return W|0}if((n+e|0)<(c[E+13124>>2]|0)){W=A;i=f;return W|0}Bb(h,z,n,e);W=A;i=f;return W|0}function Qb(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0;j=i;i=i+32|0;z=j;B=j+20|0;A=b+136|0;r=c[A>>2]|0;l=1<<g;m=b+200|0;s=c[m>>2]|0;q=b+204|0;t=c[q>>2]|0;p=(1<<(c[s+13080>>2]|0)-(c[t+24>>2]|0))+ -1|0;c[r+31232>>2]=h;k=l+e|0;if(((k|0)<=(c[s+13120>>2]|0)?(l+f|0)<=(c[s+13124>>2]|0):0)?(c[s+13064>>2]|0)>>>0<g>>>0:0){s=lb(b,h,e,f)|0;t=c[q>>2]|0}else s=(c[s+13064>>2]|0)>>>0<g>>>0&1;if((a[t+22>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(c[t+24>>2]|0)|0)>>>0<=g>>>0:0){a[r+300>>0]=0;c[r+280>>2]=0}if((a[b+2080>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(d[(c[q>>2]|0)+1632>>0]|0)|0)>>>0<=g>>>0:0)a[r+301>>0]=0;if(s){o=l>>1;n=o+e|0;q=o+f|0;g=g+ -1|0;h=h+1|0;s=Qb(b,e,f,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}if(s){if((n|0)<(c[(c[m>>2]|0)+13120>>2]|0)){s=Qb(b,n,f,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}if(s){if((q|0)<(c[(c[m>>2]|0)+13124>>2]|0)){s=Qb(b,e,q,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}if(s){X=c[m>>2]|0;if((n|0)<(c[X+13120>>2]|0)?(q|0)<(c[X+13124>>2]|0):0){s=Qb(b,n,q,g,h)|0;if((s|0)<0){X=s;i=j;return X|0}}}else s=0}else s=0}else s=0;if((p&k|0)==0?(p&l+f|0)==0:0)c[r+276>>2]=a[r+272>>0];if(!s){X=0;i=j;return X|0}k=c[m>>2]|0;if((n+o|0)<(c[k+13120>>2]|0))k=1;else k=(q+o|0)<(c[k+13124>>2]|0);X=k&1;i=j;return X|0}p=c[A>>2]|0;s=c[m>>2]|0;r=c[s+13064>>2]|0;h=c[s+13140>>2]|0;s=1<<(c[s+13080>>2]|0)-(c[(c[q>>2]|0)+24>>2]|0);c[p+31236>>2]=e;c[p+31240>>2]=f;x=p+31252|0;a[x>>0]=1;v=p+31244|0;c[v>>2]=1;E=p+31248|0;c[E>>2]=0;w=p+31254|0;a[w>>0]=0;y=p+31253|0;a[y>>0]=0;t=($(f>>r,h)|0)+(e>>r)|0;H=b+4332|0;a[(c[H>>2]|0)+t>>0]=0;X=p+31268|0;a[X>>0]=1;a[X+1>>0]=1;a[X+2>>0]=1;a[X+3>>0]=1;r=l>>r;s=s+ -1|0;if(a[(c[q>>2]|0)+40>>0]|0){X=(gb(b)|0)&255;a[p+31256>>0]=X;if(X<<24>>24)Rb(b,e,f,g)}else a[p+31256>>0]=0;u=(r|0)>0;if(u){G=t;F=0;while(1){ce((c[H>>2]|0)+G|0,0,r|0)|0;F=F+1|0;if((F|0)==(r|0))break;else G=G+h|0}}if((c[v>>2]|0)==1?(c[(c[m>>2]|0)+13064>>2]|0)!=(g|0):0)D=c[E>>2]|0;else{F=mb(b,g)|0;c[E>>2]=F;E=c[v>>2]|0;if((F|0)==3)G=(E|0)==1&1;else G=0;a[w>>0]=G;if((E|0)==1)D=F;else ta()}if((((D|0)==0?(C=c[m>>2]|0,(c[C+68>>2]|0)!=0):0)?(c[C+13048>>2]|0)>>>0<=g>>>0:0)?(c[C+13052>>2]|0)>>>0>=g>>>0:0){C=(nb(b)|0)&255;a[y>>0]=C}else C=a[y>>0]|0;do if(!(C<<24>>24)){C=c[A>>2]|0;E=(c[C+31248>>2]|0)==3;D=E?2:1;H=0;do{F=H<<1;G=0;do{a[B+(G+F)>>0]=ob(b)|0;G=G+1|0}while((G|0)<(D|0));H=H+1|0}while((H|0)<(D|0));Q=l>>(E&1);P=C+31264|0;R=z+4|0;E=z+8|0;F=b+4340|0;O=C+31260|0;N=0;do{J=N<<1;H=($(N,Q)|0)+f|0;I=0;do{L=I+J|0;U=(a[B+L>>0]|0)==0;if(U)c[P>>2]=qb(b)|0;else c[O>>2]=pb(b)|0;V=($(I,Q)|0)+e|0;T=c[A>>2]|0;X=c[m>>2]|0;S=c[X+13084>>2]|0;M=V>>S;K=H>>S;G=c[X+13156>>2]|0;S=Q>>S;X=c[X+13080>>2]|0;W=(1<<X)+ -1|0;V=W&V;if((a[T+309>>0]|0)==0?(W&H|0)==0:0)W=1;else{W=($(K+ -1|0,G)|0)+M|0;W=d[(c[F>>2]|0)+W>>0]|0}if((a[T+308>>0]|0)==0&(V|0)==0)V=1;else{V=M+ -1+($(K,G)|0)|0;V=d[(c[F>>2]|0)+V>>0]|0}X=(H>>X<<X|0)<(H|0)?W:1;do if((V|0)==(X|0))if(V>>>0<2){c[z>>2]=0;c[R>>2]=1;c[E>>2]=26;V=0;X=1;W=26;break}else{c[z>>2]=V;X=(V+29&31)+2|0;c[R>>2]=X;W=(V+31&31)+2|0;c[E>>2]=W;break}else{c[z>>2]=V;c[R>>2]=X;if(!((V|0)==0|(X|0)==0)){c[E>>2]=0;W=0;break}if((V|0)==1|(X|0)==1){c[E>>2]=26;W=26;break}else{c[E>>2]=1;W=1;break}}while(0);if(U){if((V|0)>(X|0)){c[R>>2]=V;U=X&255;c[z>>2]=U}else{U=V;V=X}if((U|0)>(W|0)){c[E>>2]=U;X=W&255;c[z>>2]=X;W=U;U=X}if((V|0)>(W|0)){c[E>>2]=V;X=W&255;c[R>>2]=X}else{X=V;V=W}T=c[T+31264>>2]|0;T=((T|0)>=(U|0)&1)+T|0;T=((T|0)>=(X|0)&1)+T|0;T=((T|0)>=(V|0)&1)+T|0}else T=c[z+(c[T+31260>>2]<<2)>>2]|0;S=(S|0)==0?1:S;T=T&255;if((S|0)>0){U=0;do{X=($(U+K|0,G)|0)+M|0;ce((c[F>>2]|0)+X|0,T|0,S|0)|0;U=U+1|0}while((U|0)<(S|0))}a[C+L+31268>>0]=T;I=I+1|0}while((I|0)<(D|0));N=N+1|0}while((N|0)<(D|0));z=c[(c[m>>2]|0)+4>>2]|0;if((z|0)==3){B=0;do{z=B<<1;E=0;do{G=rb(b)|0;F=E+z|0;a[C+F+31281>>0]=G;A=a[C+F+31268>>0]|0;do if((G|0)!=4){G=a[1528+G>>0]|0;F=C+F+31277|0;if(A<<24>>24==G<<24>>24){a[F>>0]=34;break}else{a[F>>0]=G;break}}else a[C+F+31277>>0]=A;while(0);E=E+1|0}while((E|0)<(D|0));B=B+1|0}while((B|0)<(D|0))}else if(!z)break;else if((z|0)!=2){A=rb(b)|0;z=a[C+31268>>0]|0;if((A|0)==4){a[C+31277>>0]=z;break}A=a[1528+A>>0]|0;B=C+31277|0;if(z<<24>>24==A<<24>>24){a[B>>0]=34;break}else{a[B>>0]=A;break}}else{z=rb(b)|0;a[C+31281>>0]=z;A=a[C+31268>>0]|0;if((z|0)==4)z=A&255;else{z=a[1528+z>>0]|0;z=A<<24>>24==z<<24>>24?34:z&255}a[C+31277>>0]=a[1536+z>>0]|0;break}}else{G=c[m>>2]|0;B=c[G+13084>>2]|0;E=l>>B;C=c[G+13156>>2]|0;D=e>>B;B=f>>B;E=(E|0)==0?1:E;if((E|0)>0){F=b+4340|0;G=0;do{X=($(G+B|0,C)|0)+D|0;ce((c[F>>2]|0)+X|0,1,E|0)|0;G=G+1|0}while((G|0)<(E|0));G=c[m>>2]|0}K=c[A>>2]|0;V=c[b+160>>2]|0;A=c[V+32>>2]|0;D=$(A,f)|0;H=c[G+56>>2]|0;D=(c[V>>2]|0)+((e<<H)+D)|0;E=c[V+36>>2]|0;J=c[G+13184>>2]|0;B=$(f>>J,E)|0;I=c[G+13172>>2]|0;B=(c[V+4>>2]|0)+((e>>I<<H)+B)|0;C=c[V+40>>2]|0;X=c[G+13188>>2]|0;F=$(f>>X,C)|0;W=c[G+13176>>2]|0;F=(c[V+8>>2]|0)+((e>>W<<H)+F)|0;H=$(d[G+13044>>0]|0,l<<g)|0;J=($(l>>W,l>>X)|0)+($(l>>I,l>>J)|0)|0;G=($(d[G+13045>>0]|0,J)|0)+H|0;H=K+224|0;J=G+7>>3;I=c[K+240>>2]|0;X=c[H>>2]|0;I=(X&1|0)==0?I:I+ -1|0;I=(X&511|0)==0?I:I+ -1|0;K=(c[K+244>>2]|0)-I|0;if((K|0)<(J|0))I=0;else Yc(H,I+J|0,K-J|0);if(!(a[b+2061>>0]|0))Ab(b,e,f,g);X=G>>>0>2147483639|(I|0)==0;W=X?0:G;V=X?0:I;c[z>>2]=V;c[z+12>>2]=W;c[z+16>>2]=W+8;c[z+4>>2]=V+(W+7>>3);c[z+8>>2]=0;if(X)z=-1094995529;else{W=b+2608|0;Ha[c[W>>2]&3](D,A,l,l,z,d[(c[m>>2]|0)+13044>>0]|0);X=c[m>>2]|0;Ha[c[W>>2]&3](B,E,l>>c[X+13172>>2],l>>c[X+13184>>2],z,d[X+13045>>0]|0);X=c[m>>2]|0;Ha[c[W>>2]&3](F,C,l>>c[X+13176>>2],l>>c[X+13188>>2],z,d[X+13045>>0]|0);z=0}if(a[(c[m>>2]|0)+13056>>0]|0)Rb(b,e,f,g);if((z|0)<0){X=z;i=j;return X|0}}while(0);do if(!(a[y>>0]|0)){if(!(a[x>>0]|0)){if(a[b+2061>>0]|0)break;Ab(b,e,f,g);break}x=c[m>>2]|0;if((c[v>>2]|0)==1)v=(d[w>>0]|0)+(c[x+13092>>2]|0)|0;else v=c[x+13088>>2]|0;a[p+31255>>0]=v;v=Sb(b,e,f,e,f,e,f,g,g,0,0,1520,1520)|0;if((v|0)<0){X=v;i=j;return X|0}}while(0);if((a[(c[q>>2]|0)+22>>0]|0)!=0?(a[p+300>>0]|0)==0:0)zb(b,e,f,g);if(u){q=b+4316|0;g=p+272|0;u=0;while(1){ce((c[q>>2]|0)+t|0,a[g>>0]|0,r|0)|0;u=u+1|0;if((u|0)==(r|0))break;else t=t+h|0}}if((s&k|0)==0?(s&l+f|0)==0:0)c[p+276>>2]=a[p+272>>0];q=c[m>>2]|0;X=c[q+13064>>2]|0;g=l>>X;r=e>>X;e=f>>X;if((g|0)>0?(n=b+4336|0,o=c[p+31232>>2]&255,X=($(c[q+13140>>2]|0,e)|0)+r|0,ce((c[n>>2]|0)+X|0,o|0,g|0)|0,(g|0)!=1):0){p=1;do{X=($(c[(c[m>>2]|0)+13140>>2]|0,p+e|0)|0)+r|0;ce((c[n>>2]|0)+X|0,o|0,g|0)|0;p=p+1|0}while((p|0)!=(g|0))}e=c[m>>2]|0;m=1<<c[e+13080>>2];if(((k|0)%(m|0)|0|0)!=0?(k|0)<(c[e+13120>>2]|0):0){X=1;i=j;return X|0}X=l+f|0;if(((X|0)%(m|0)|0|0)!=0?(X|0)<(c[e+13124>>2]|0):0){X=1;i=j;return X|0}X=(fb(b)|0)==0&1;i=j;return X|0}function Rb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;m=1<<f;n=c[b+200>>2]|0;l=c[n+13084>>2]|0;f=c[n+13156>>2]|0;k=m+d|0;j=c[n+13120>>2]|0;m=m+e|0;n=c[n+13124>>2]|0;h=e>>l;e=((m|0)>(n|0)?n:m)>>l;if((h|0)>=(e|0)){i=g;return}d=d>>l;j=((k|0)>(j|0)?j:k)>>l;k=(d|0)<(j|0);b=b+4348|0;do{if(k){m=$(h,f)|0;l=d;do{a[(c[b>>2]|0)+(l+m)>>0]=2;l=l+1|0}while((l|0)!=(j|0))}h=h+1|0}while((h|0)!=(e|0));i=g;return}function Sb(e,f,g,h,j,k,l,m,n,o,p,q,r){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;r=r|0;var s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0;s=i;i=i+16|0;w=s+8|0;t=s;z=e+136|0;v=c[z>>2]|0;K=c[q>>2]|0;c[w>>2]=K;M=c[q+4>>2]|0;A=w+4|0;c[A>>2]=M;y=c[r>>2]|0;c[t>>2]=y;J=c[r+4>>2]|0;x=t+4|0;c[x>>2]=J;q=a[v+31254>>0]|0;do if(q<<24>>24){if((o|0)==1){c[v+288>>2]=d[v+p+31268>>0];if((c[(c[e+200>>2]|0)+4>>2]|0)==3){c[v+292>>2]=d[v+p+31277>>0];c[v+296>>2]=d[v+p+31281>>0];break}else{c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0];break}}}else{c[v+288>>2]=d[v+31268>>0];c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0]}while(0);r=e+200|0;G=c[r>>2]|0;B=(c[G+13076>>2]|0)>>>0<n>>>0;if(((!B?(c[G+13072>>2]|0)>>>0<n>>>0:0)?(d[v+31255>>0]|0)>(o|0):0)?!(q<<24>>24!=0&(o|0)==0):0)q=(sb(e,n)|0)&255;else{if((c[G+13088>>2]|0)==0?(c[v+31244>>2]|0)==0:0)G=(o|0)==0&(c[v+31248>>2]|0)!=0;else G=0;if(B)q=1;else q=(q<<24>>24!=0&(o|0)==0|G)&1}B=(n|0)>2;G=c[(c[r>>2]|0)+4>>2]|0;if(B)if(!G)L=y;else E=20;else if((G|0)==3)E=20;else L=y;do if((E|0)==20){G=(o|0)==0;if(!((K|0)==0&(G^1))){K=tb(e,o)|0;c[w>>2]=K;if((c[(c[r>>2]|0)+4>>2]|0)==2?q<<24>>24==0|(n|0)==3:0){M=tb(e,o)|0;c[A>>2]=M}if(!G)E=25}else{K=0;E=25}if((E|0)==25)if(!y){L=0;break}L=tb(e,o)|0;c[t>>2]=L;if((c[(c[r>>2]|0)+4>>2]|0)==2?q<<24>>24==0|(n|0)==3:0){J=tb(e,o)|0;c[x>>2]=J}}while(0);if(!(q<<24>>24)){A=c[r>>2]|0;y=c[A+13072>>2]|0;x=1<<y;q=c[A+13148>>2]|0;if(((o|0)==0?(c[v+31244>>2]|0)!=1:0)&(K|0)==0&(L|0)==0)if((c[A+4>>2]|0)==2?(M|J|0)!=0:0)E=37;else o=1;else E=37;if((E|0)==37){o=ub(e,o)|0;A=c[r>>2]|0}G=c[z>>2]|0;A=n-(c[A+13172>>2]|0)|0;z=G+31244|0;if((c[z>>2]|0)==1){I=1<<n;Cc(e,f,g,I,I);Ub(e,f,g,n,0)}I=(o|0)!=0;K=(K|L|0)==0;do if(I)if(K)E=46;else{F=0;E=48}else if(K){K=c[r>>2]|0;L=c[K+4>>2]|0;if((L|0)==2){if(M){E=46;break}if(J){M=0;E=46;break}}if(!((c[z>>2]|0)!=1|(L|0)==0)){if(B|(L|0)==3){t=1<<(c[K+13172>>2]|0)+A;w=1<<(c[K+13184>>2]|0)+A;Cc(e,f,g,t,w);Ub(e,f,g,A,1);Ub(e,f,g,A,2);if((c[(c[r>>2]|0)+4>>2]|0)!=2)break;M=(1<<A)+g|0;Cc(e,f,M,t,w);Ub(e,f,M,A,1);Ub(e,f,M,A,2);break}if((p|0)==3?(F=1<<n+1,H=1<<(c[K+13184>>2]|0)+n,Cc(e,h,j,F,H),Ub(e,h,j,n,1),Ub(e,h,j,n,2),(c[(c[r>>2]|0)+4>>2]|0)==2):0){M=(1<<n)+j|0;Cc(e,h,M,F,H);Ub(e,h,M,n,1);Ub(e,h,M,n,2)}}}else{F=0;E=48}while(0);if((E|0)==46)if((c[(c[r>>2]|0)+4>>2]|0)==2){F=(M|J|0)==0;E=48}else{F=1;E=48}a:do if((E|0)==48){E=e+204|0;do if((a[(c[E>>2]|0)+22>>0]|0)!=0?(D=G+300|0,(a[D>>0]|0)==0):0){M=hb(e)|0;J=G+280|0;c[J>>2]=M;if(M){M=(ib(e)|0)==1;H=c[J>>2]|0;if(M){H=0-H|0;c[J>>2]=H}}else H=0;a[D>>0]=1;M=(c[(c[r>>2]|0)+13192>>2]|0)/2|0;if((H|0)<(-26-M|0)|(H|0)>(M+25|0)){M=-1094995529;i=s;return M|0}else{zb(e,k,l,m);break}}while(0);if((!((a[e+2080>>0]|0)==0|F)?(a[G+31256>>0]|0)==0:0)?(C=G+301|0,(a[C>>0]|0)==0):0){if(!(jb(e)|0)){a[G+302>>0]=0;a[G+303>>0]=0}else{l=c[E>>2]|0;if(!(a[l+1633>>0]|0))m=0;else{m=kb(e)|0;l=c[E>>2]|0}a[G+302>>0]=a[l+m+1634>>0]|0;a[G+303>>0]=a[(c[E>>2]|0)+m+1639>>0]|0}a[C>>0]=1}if((c[z>>2]|0)==1&(n|0)<4){m=c[G+288>>2]|0;if((m+ -6|0)>>>0<9)k=2;else k=(m+ -22|0)>>>0<9&1;m=c[G+292>>2]|0;if((m+ -6|0)>>>0<9)m=2;else m=(m+ -22|0)>>>0<9&1}else{k=0;m=0}l=G+304|0;a[l>>0]=0;if(I)xb(e,f,g,n,k,0);k=c[r>>2]|0;C=c[k+4>>2]|0;if(C){if(!(B|(C|0)==3)){if((p|0)!=3)break;p=1<<n+1;A=1<<(c[k+13184>>2]|0)+n;l=0;do{if((c[z>>2]|0)==1){M=(l<<n)+j|0;Cc(e,h,M,p,A);Ub(e,h,M,n,1)}if(c[w+(l<<2)>>2]|0)xb(e,h,(l<<n)+j|0,n,m,1);l=l+1|0}while((l|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));w=0;while(1){if((c[z>>2]|0)==1){M=(w<<n)+j|0;Cc(e,h,M,p,A);Ub(e,h,M,n,2)}if(c[t+(w<<2)>>2]|0)xb(e,h,(w<<n)+j|0,n,m,2);w=w+1|0;if((w|0)>=(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))break a}}h=1<<(c[k+13172>>2]|0)+A;j=1<<(c[k+13184>>2]|0)+A;do if((a[(c[E>>2]|0)+1630>>0]|0)==0|I^1)a[l>>0]=0;else{if(c[z>>2]|0){M=(c[G+296>>2]|0)==4;a[l>>0]=M&1;if(!M)break}else a[l>>0]=1;Tb(e,0)}while(0);p=e+160|0;E=G+320|0;D=G+11680|0;C=1<<A<<A;k=(C|0)>0;B=e+(A+ -2<<2)+2612|0;F=G+284|0;I=0;do{if((c[z>>2]|0)==1){M=(I<<A)+g|0;Cc(e,f,M,h,j);Ub(e,f,M,A,1)}do if(!(c[w+(I<<2)>>2]|0)){if(!(a[l>>0]|0))break;L=c[p>>2]|0;G=c[L+36>>2]|0;H=c[r>>2]|0;M=$(g>>c[H+13184>>2],G)|0;H=(c[L+4>>2]|0)+(M+(f>>c[H+13172>>2]<<c[H+56>>2]))|0;if(k){I=0;do{b[D+(I<<1)>>1]=($(b[E+(I<<1)>>1]|0,c[F>>2]|0)|0)>>>3;I=I+1|0}while((I|0)!=(C|0));I=C}else I=0;Ea[c[B>>2]&7](H,D,G)}else xb(e,f,(I<<A)+g|0,A,m,1);while(0);I=I+1|0}while((I|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));if(!(a[l>>0]|0))H=0;else{Tb(e,1);H=0}do{if((c[z>>2]|0)==1){M=(H<<A)+g|0;Cc(e,f,M,h,j);Ub(e,f,M,A,2)}do if(!(c[t+(H<<2)>>2]|0)){if(!(a[l>>0]|0))break;L=c[p>>2]|0;w=c[L+40>>2]|0;G=c[r>>2]|0;M=$(g>>c[G+13188>>2],w)|0;G=(c[L+8>>2]|0)+(M+(f>>c[G+13176>>2]<<c[G+56>>2]))|0;if(k){H=0;do{b[D+(H<<1)>>1]=($(b[E+(H<<1)>>1]|0,c[F>>2]|0)|0)>>>3;H=H+1|0}while((H|0)!=(C|0));H=C}else H=0;Ea[c[B>>2]&7](G,D,w)}else xb(e,f,(H<<A)+g|0,A,m,2);while(0);H=H+1|0}while((H|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))}}while(0);if((o|0)!=0?(u=1<<n,(u|0)>0):0){t=e+4344|0;r=0;do{w=$(r+g>>y,q)|0;h=0;do{a[(c[t>>2]|0)+((h+f>>y)+w)>>0]=1;h=h+x|0}while((h|0)<(u|0));r=r+x|0}while((r|0)<(u|0))}if(((a[e+2061>>0]|0)==0?(Ab(e,f,g,n),(a[(c[e+204>>2]|0)+40>>0]|0)!=0):0)?(a[v+31256>>0]|0)!=0:0)Rb(e,f,g,n)}else{v=n+ -1|0;u=1<<v;n=u+f|0;u=u+g|0;r=o+1|0;q=Sb(e,f,g,f,g,k,l,m,v,r,0,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=Sb(e,n,g,f,g,k,l,m,v,r,1,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=Sb(e,f,u,f,g,k,l,m,v,r,2,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}f=Sb(e,n,u,f,g,k,l,m,v,r,3,w,t)|0;if((f|0)<0){M=f;i=s;return M|0}}M=0;i=s;return M|0}function Tb(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=c[a+136>>2]|0;f=vb(a,b)|0;if(!f){c[e+284>>2]=0;i=d;return}else{c[e+284>>2]=1-((wb(a,b)|0)<<1)<<f+ -1;i=d;return}}function Ub(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0;j=i;i=i+272|0;t=j+195|0;z=j+130|0;w=j+65|0;v=j;r=c[b+136>>2]|0;q=c[b+200>>2]|0;O=c[q+(h<<2)+13168>>2]|0;N=c[q+(h<<2)+13180>>2]|0;k=1<<g;ea=k<<O;V=c[q+13072>>2]|0;ga=k<<N;T=c[q+13164>>2]|0;P=e>>V&T;R=f>>V&T;S=T+2|0;Q=($(R,S)|0)+P|0;U=c[b+204>>2]|0;aa=c[U+1684>>2]|0;Q=c[aa+(Q<<2)>>2]|0;l=c[b+160>>2]|0;b=c[l+(h<<2)+32>>2]|0;l=c[l+(h<<2)>>2]|0;m=($(b,f>>N)|0)+(e>>O)|0;n=l+m|0;o=(h|0)==0;p=c[(o?r+288|0:r+292|0)>>2]|0;u=t+1|0;y=w+1|0;x=z+1|0;s=v+1|0;if(!(c[r+31288>>2]|0))fa=0;else fa=(Q|0)>(c[aa+(P+ -1+($(T&R+(ga>>V),S)|0)<<2)>>2]|0);da=fa&1;_=c[r+31292>>2]|0;H=c[r+31300>>2]|0;Z=c[r+31296>>2]|0;if(!(c[r+31304>>2]|0))ba=0;else ba=(Q|0)>(c[aa+(($(S,R+ -1|0)|0)+(T&P+(ea>>V))<<2)>>2]|0);S=ba&1;V=(ga<<1)+f|0;R=q+13124|0;aa=c[R>>2]|0;P=ga+f|0;V=((V|0)>(aa|0)?aa:V)-P>>N;aa=(ea<<1)+e|0;T=q+13120|0;ja=c[T>>2]|0;Q=ea+e|0;aa=((aa|0)>(ja|0)?ja:aa)-Q>>O;U=U+20|0;if((a[U>>0]|0)==1){ca=c[q+13084>>2]|0;ha=ga>>ca;ea=ea>>ca;ia=(1<<ca)+ -1|0;ga=ia&f;ea=((ea|0)==0&1)+ea|0;ia=(ia&e|0)!=0;if(!(ia|fa^1)){fa=(c[q+13160>>2]|0)-(P>>ca)|0;fa=(ha|0)>(fa|0)?fa:ha;if((fa|0)>0){da=0;ja=0;do{da=da|1;ja=ja+2|0}while((ja|0)<(fa|0))}else da=0}if(!((_|0)!=1|ia)){fa=(c[q+13160>>2]|0)-(f>>ca)|0;fa=(ha|0)>(fa|0)?fa:ha;if((fa|0)>0){_=0;ha=0;do{_=_|1;ha=ha+2|0}while((ha|0)<(fa|0))}else _=0}fa=(ga|0)!=0;if(!((Z|0)!=1|fa)){ga=(c[q+13156>>2]|0)-(e>>ca)|0;ga=(ea|0)>(ga|0)?ga:ea;if((ga|0)>0){Z=0;ha=0;do{Z=Z|1;ha=ha+2|0}while((ha|0)<(ga|0))}else Z=0}if(!(fa|ba^1)){ca=(c[q+13156>>2]|0)-(Q>>ca)|0;ca=(ea|0)>(ca|0)?ca:ea;if((ca|0)>0){S=0;ba=0;do{S=S|1;ba=ba+2|0}while((ba|0)<(ca|0))}else S=0}ba=u+0|0;ca=ba+64|0;do{a[ba>>0]=128;ba=ba+1|0}while((ba|0)<(ca|0));ba=w+0|0;ca=ba+65|0;do{a[ba>>0]=128;ba=ba+1|0}while((ba|0)<(ca|0));ea=S}else ea=S;ba=(H|0)!=0;if(ba){ja=a[l+(m+~b)>>0]|0;a[t>>0]=ja;a[w>>0]=ja}ca=(Z|0)!=0;if(ca)fe(y|0,l+(m-b)|0,k|0)|0;S=(ea|0)!=0;if(S?(Y=k+1|0,fe(w+Y|0,l+(k-b+m)|0,k|0)|0,X=$(d[l+(k+ -1-b+m+aa)>>0]|0,16843009)|0,W=k-aa|0,(W|0)>0):0){Y=aa+Y|0;aa=0;do{ja=w+(Y+aa)|0;a[ja>>0]=X;a[ja+1>>0]=X>>8;a[ja+2>>0]=X>>16;a[ja+3>>0]=X>>24;aa=aa+4|0}while((aa|0)<(W|0))}W=(_|0)!=0;if(W&(k|0)>0){Y=m+ -1|0;X=0;do{ja=X;X=X+1|0;a[t+X>>0]=a[l+(Y+($(ja,b)|0))>>0]|0}while((X|0)!=(k|0))}X=(da|0)!=0;if(X){aa=V+k|0;fa=m+ -1|0;if((V|0)>0){Y=k;do{ja=Y;Y=Y+1|0;a[t+Y>>0]=a[l+(fa+($(ja,b)|0))>>0]|0}while((Y|0)<(aa|0))}Y=$(d[l+(fa+($(aa+ -1|0,b)|0))>>0]|0,16843009)|0;aa=k-V|0;if((aa|0)>0){V=k+1+V|0;fa=0;do{ja=t+(V+fa)|0;a[ja>>0]=Y;a[ja+1>>0]=Y>>8;a[ja+2>>0]=Y>>16;a[ja+3>>0]=Y>>24;fa=fa+4|0}while((fa|0)<(aa|0))}}do if((a[U>>0]|0)==1?(ja=da|_,L=(ja|0)==0,ja=ja|H,M=(ja|0)==0,(Z|ea|ja|0)!=0):0){U=k<<1;V=c[T>>2]|0;if(((U<<O)+e|0)<(V|0))T=U;else T=V-e>>O;R=c[R>>2]|0;if(((U<<N)+f|0)>=(R|0))U=R-f>>N;if(!S)if((Q|0)<(V|0))O=k;else O=V-e>>O;else O=T;if(!X)if((P|0)<(R|0))U=k;else U=R-f>>N;N=a[w>>0]|0;if(M)a[t>>0]=N;a[t>>0]=N;if(!L){L=0;while(1)if((L|0)<(U|0))L=L+4|0;else break}if(!W?(K=$(N&255,16843009)|0,(k|0)>0):0){L=0;do{ja=t+(L|1)|0;a[ja>>0]=K;a[ja+1>>0]=K>>8;a[ja+2>>0]=K>>16;a[ja+3>>0]=K>>24;L=L+4|0}while((L|0)<(k|0))}do if(!X){L=$(d[t+k>>0]|0,16843009)|0;if((k|0)<=0)break;K=k+1|0;M=0;do{ja=t+(K+M)|0;a[ja>>0]=L;a[ja+1>>0]=L>>8;a[ja+2>>0]=L>>16;a[ja+3>>0]=L>>24;M=M+4|0}while((M|0)<(k|0))}while(0);f=(f|0)==0;if((e|0)==0&(U|0)>0){e=0;do{ja=t+(e|1)|0;a[ja>>0]=0;a[ja+1>>0]=0;a[ja+2>>0]=0;a[ja+3>>0]=0;e=e+4|0}while((e|0)<(U|0))}a[w>>0]=a[t>>0]|0;if(f)break;else e=0;while(1)if((e|0)<(O|0))e=e+4|0;else break}while(0);a:do if(!X){if(W){f=$(d[t+k>>0]|0,16843009)|0;if((k|0)<=0){J=84;break}J=k+1|0;e=0;while(1){ja=t+(J+e)|0;a[ja>>0]=f;a[ja+1>>0]=f>>8;a[ja+2>>0]=f>>16;a[ja+3>>0]=f>>24;e=e+4|0;if((e|0)>=(k|0)){J=84;break a}}}if(ba){e=$(d[t>>0]|0,16843009)|0;J=k<<1;if((k|0)>0)I=0;else{J=87;break}while(1){ja=t+(I|1)|0;a[ja>>0]=e;a[ja+1>>0]=e>>8;a[ja+2>>0]=e>>16;a[ja+3>>0]=e>>24;I=I+4|0;if((I|0)>=(J|0)){J=87;break a}}}if(ca){I=a[y>>0]|0;a[t>>0]=I;I=$(I&255,16843009)|0;H=k<<1;if((k|0)>0)J=0;else{J=89;break}while(1){ja=t+(J|1)|0;a[ja>>0]=I;a[ja+1>>0]=I>>8;a[ja+2>>0]=I>>16;a[ja+3>>0]=I>>24;J=J+4|0;if((J|0)>=(H|0)){J=89;break a}}}if(!S){a[t>>0]=-128;J=k<<1;f=(k|0)>0;if(f)e=0;else{J=84;break}do{ja=w+(e|1)|0;a[ja>>0]=-2139062144;a[ja+1>>0]=-2139062144>>8;a[ja+2>>0]=-2139062144>>16;a[ja+3>>0]=-2139062144>>24;e=e+4|0}while((e|0)<(J|0));if(f)e=0;else{J=84;break}while(1){ja=t+(e|1)|0;a[ja>>0]=-2139062144;a[ja+1>>0]=-2139062144>>8;a[ja+2>>0]=-2139062144>>16;a[ja+3>>0]=-2139062144>>24;e=e+4|0;if((e|0)>=(J|0)){J=84;break a}}}H=w+(k+1)|0;e=a[H>>0]|0;I=$(e&255,16843009)|0;G=(k|0)>0;if(G)J=0;else{a[t>>0]=e;break}do{ja=w+(J|1)|0;a[ja>>0]=I;a[ja+1>>0]=I>>8;a[ja+2>>0]=I>>16;a[ja+3>>0]=I>>24;J=J+4|0}while((J|0)<(k|0));I=a[H>>0]|0;a[t>>0]=I;I=$(I&255,16843009)|0;H=k<<1;if(G){G=0;do{ja=t+(G|1)|0;a[ja>>0]=I;a[ja+1>>0]=I>>8;a[ja+2>>0]=I>>16;a[ja+3>>0]=I>>24;G=G+4|0}while((G|0)<(H|0));J=92}else J=92}else J=84;while(0);if((J|0)==84)if((_|0)==0?(I=$(d[t+(k+1)>>0]|0,16843009)|0,(k|0)>0):0){J=0;do{ja=t+(J|1)|0;a[ja>>0]=I;a[ja+1>>0]=I>>8;a[ja+2>>0]=I>>16;a[ja+3>>0]=I>>24;J=J+4|0}while((J|0)<(k|0));J=87}else J=87;if((J|0)==87)if(!H){a[t>>0]=a[u>>0]|0;J=89}else J=89;if((J|0)==89)if((Z|0)==0?(G=$(d[t>>0]|0,16843009)|0,(k|0)>0):0){H=0;do{ja=w+(H|1)|0;a[ja>>0]=G;a[ja+1>>0]=G>>8;a[ja+2>>0]=G>>16;a[ja+3>>0]=G>>24;H=H+4|0}while((H|0)<(k|0));J=92}else J=92;if(((J|0)==92?!S:0)?(F=$(d[w+k>>0]|0,16843009)|0,(k|0)>0):0){H=k+1|0;G=0;do{ja=w+(H+G)|0;a[ja>>0]=F;a[ja+1>>0]=F>>8;a[ja+2>>0]=F>>16;a[ja+3>>0]=F>>24;G=G+4|0}while((G|0)<(k|0))}F=a[t>>0]|0;a[w>>0]=F;b:do if(!(c[q+13112>>2]|0)){if(o){if((p|0)==1|(k|0)==4){s=y;break}}else if(((p|0)==1?1:(c[q+4>>2]|0)!=3)|(k|0)==4){s=y;break}ja=p+ -26|0;ja=(ja|0)>-1?ja:26-p|0;ia=p+ -10|0;ia=(ia|0)>-1?ia:10-p|0;if((((ja|0)>(ia|0)?ia:ja)|0)>(c[1576+(g+ -3<<2)>>2]|0)){if((o&(a[q+13061>>0]|0)!=0&(g|0)==5?(D=F&255,E=a[w+64>>0]|0,C=E&255,ja=C+D-(d[w+32>>0]<<1)|0,(((ja|0)>-1?ja:0-ja|0)|0)<8):0)?(A=t+64|0,B=a[A>>0]|0,ja=(B&255)+D-(d[t+32>>0]<<1)|0,(((ja|0)>-1?ja:0-ja|0)|0)<8):0){a[v>>0]=F;a[v+64>>0]=E;w=0;do{ja=w;w=w+1|0;a[v+w>>0]=(($(D,63-ja|0)|0)+32+($(C,w)|0)|0)>>>6}while((w|0)!=63);w=0;while(1){v=w+1|0;a[t+v>>0]=(($(F&255,63-w|0)|0)+32+($(B&255,v)|0)|0)>>>6;if((v|0)==63)break b;F=a[t>>0]|0;B=a[A>>0]|0;w=v}}A=k<<1;D=a[t+A>>0]|0;a[z+A>>0]=D;B=a[w+A>>0]|0;a[v+A>>0]=B;A=A+ -2|0;C=(A|0)>-1;if(C){E=A;while(1){ja=E+1|0;ia=D;D=a[t+ja>>0]|0;a[z+ja>>0]=((ia&255)+2+((D&255)<<1)+(d[t+E>>0]|0)|0)>>>2;if((E|0)<=0)break;else E=E+ -1|0}}ja=((d[u>>0]|0)+2+((F&255)<<1)+(d[y>>0]|0)|0)>>>2&255;a[z>>0]=ja;a[v>>0]=ja;if(C)while(1){ja=A+1|0;ia=B;B=a[w+ja>>0]|0;a[v+ja>>0]=((ia&255)+2+((B&255)<<1)+(d[w+A>>0]|0)|0)>>>2;if((A|0)<=0){u=x;break}else A=A+ -1|0}else u=x}else s=y}else s=y;while(0);if(!p){Vb(n,s,u,b,g);i=j;return}else if((p|0)==1){if((k|0)>0){p=k;h=0;do{p=(d[u+h>>0]|0)+p+(d[s+h>>0]|0)|0;h=h+1|0}while((h|0)!=(k|0));q=p>>g+1;r=$(q,16843009)|0;g=0;do{p=($(g,b)|0)+m|0;h=0;do{ja=l+(p+h)|0;a[ja>>0]=r;a[ja+1>>0]=r>>8;a[ja+2>>0]=r>>16;a[ja+3>>0]=r>>24;h=h+4|0}while((h|0)<(k|0));g=g+1|0}while((g|0)!=(k|0))}else q=k>>g+1;if(!(o&(k|0)<32)){i=j;return}a[n>>0]=((q<<1)+2+(d[u>>0]|0)+(d[s>>0]|0)|0)>>>2;if((k|0)<=1){i=j;return}n=(q*3|0)+2|0;o=1;do{a[l+(o+m)>>0]=((d[s+o>>0]|0)+n|0)>>>2;o=o+1|0}while((o|0)!=(k|0));o=1;do{a[l+(($(o,b)|0)+m)>>0]=((d[u+o>>0]|0)+n|0)>>>2;o=o+1|0}while((o|0)!=(k|0));i=j;return}else{if(!(c[q+13104>>2]|0))l=0;else l=(a[r+31256>>0]|0)!=0;Wb(n,s,u,b,h,p,k,l&1);i=j;return}}function Vb(b,c,e,f,g){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0;m=i;j=1<<g;if((j|0)<=0){i=m;return}l=j+ -1|0;h=c+j|0;k=e+j|0;g=g+1|0;n=0;do{o=e+n|0;p=l-n|0;q=$(n,f)|0;n=n+1|0;r=0;do{v=$(d[o>>0]|0,l-r|0)|0;s=r;r=r+1|0;u=$(d[h>>0]|0,r)|0;t=$(d[c+s>>0]|0,p)|0;a[b+(s+q)>>0]=v+j+u+t+($(d[k>>0]|0,n)|0)>>g}while((r|0)!=(j|0))}while((n|0)!=(j|0));i=m;return}function Wb(c,e,f,g,h,j,k,l){c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;m=i;i=i+112|0;o=m;n=a[1592+(j+ -2)>>0]|0;p=o+k|0;q=($(n,k)|0)>>5;if((j|0)>17){s=e+ -1|0;r=j+ -11|0;if(r>>>0<15&(q|0)<-1){if((k|0)>=0){s=0;do{u=e+(s+ -1)|0;u=d[u>>0]|d[u+1>>0]<<8|d[u+2>>0]<<16|d[u+3>>0]<<24;v=o+(s+k)|0;a[v>>0]=u;a[v+1>>0]=u>>8;a[v+2>>0]=u>>16;a[v+3>>0]=u>>24;s=s+4|0}while((s|0)<=(k|0))}if((q|0)<0){r=b[1632+(r<<1)>>1]|0;do{a[o+(q+k)>>0]=a[f+((($(r,q)|0)+128>>8)+ -1)>>0]|0;q=q+1|0}while((q|0)!=0)}}else p=s;o=(k|0)>0;if(o){q=0;do{u=q;q=q+1|0;s=$(q,n)|0;r=s>>5;s=s&31;if(!s){r=r+1|0;s=$(u,g)|0;t=0;do{u=p+(r+t)|0;u=d[u>>0]|d[u+1>>0]<<8|d[u+2>>0]<<16|d[u+3>>0]<<24;v=c+(t+s)|0;a[v>>0]=u;a[v+1>>0]=u>>8;a[v+2>>0]=u>>16;a[v+3>>0]=u>>24;t=t+4|0}while((t|0)<(k|0))}else{t=32-s|0;v=$(u,g)|0;u=0;do{w=u+r|0;x=$(d[p+(w+1)>>0]|0,t)|0;a[c+(u+v)>>0]=(x+16+($(d[p+(w+2)>>0]|0,s)|0)|0)>>>5;w=u|1;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;w=u|2;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;w=u|3;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;u=u+4|0}while((u|0)<(k|0))}}while((q|0)!=(k|0))}if(!((j|0)==26&(h|0)==0&(k|0)<32&(l|0)==0&o)){i=m;return}j=f+ -1|0;n=0;do{h=((d[f+n>>0]|0)-(d[j>>0]|0)>>1)+(d[e>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+($(n,g)|0)>>0]=h;n=n+1|0}while((n|0)!=(k|0));i=m;return}s=f+ -1|0;r=j+ -11|0;if(r>>>0<15&(q|0)<-1){if((k|0)>=0){s=0;do{x=f+(s+ -1)|0;x=d[x>>0]|d[x+1>>0]<<8|d[x+2>>0]<<16|d[x+3>>0]<<24;y=o+(s+k)|0;a[y>>0]=x;a[y+1>>0]=x>>8;a[y+2>>0]=x>>16;a[y+3>>0]=x>>24;s=s+4|0}while((s|0)<=(k|0))}if((q|0)<0){r=b[1632+(r<<1)>>1]|0;do{a[o+(q+k)>>0]=a[e+((($(r,q)|0)+128>>8)+ -1)>>0]|0;q=q+1|0}while((q|0)!=0)}}else p=s;q=(k|0)>0;if(q){o=0;do{r=o;o=o+1|0;t=$(o,n)|0;u=t>>5;t=t&31;if(!t){s=u+1|0;t=0;do{a[c+(($(t,g)|0)+r)>>0]=a[p+(s+t)>>0]|0;t=t+1|0}while((t|0)!=(k|0))}else{s=32-t|0;v=0;do{y=v+u|0;x=$(d[p+(y+1)>>0]|0,s)|0;a[c+(($(v,g)|0)+r)>>0]=(x+16+($(d[p+(y+2)>>0]|0,t)|0)|0)>>>5;v=v+1|0}while((v|0)!=(k|0))}}while((o|0)!=(k|0))}if(!((j|0)==10&(h|0)==0&(k|0)<32&(l|0)==0&q)){i=m;return}g=e+ -1|0;n=0;do{j=((d[e+n>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(j>>>0>255)j=0-j>>31;a[c+n>>0]=j;j=n|1;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;j=n|2;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;j=n|3;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;n=n+4|0}while((n|0)<(k|0));i=m;return}function Xb(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0;g=i;h=c[e>>2]|0;if(!h){i=g;return}if(!(c[h+304>>2]|0)){i=g;return}h=e+46|0;f=(d[h>>0]|0)&(f^255)&255;a[h>>0]=f;if(f<<24>>24){i=g;return}Uc(c[b+4>>2]|0,e+4|0);c[e+24>>2]=0;i=g;return}function Yb(a){a=a|0;var b=0;b=i;Xb(a,a+2524|0,6);i=b;return}function Zb(a){a=a|0;var b=0;b=i;Xb(a,a+2524|0,-1);i=b;return}function _b(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=d+4364|0;if(((c[(c[d+2524>>2]|0)+304>>2]|0)!=0?(b[d+2568>>1]|0)==(b[h>>1]|0):0)?(c[d+2544>>2]|0)==(f|0):0){j=-1094995529;i=g;return j|0}j=d+2524|0;if(c[(c[j>>2]|0)+304>>2]|0){j=-12;i=g;return j|0}if((Tc(c[d+4>>2]|0,d+2528|0,1)|0)<0){j=-12;i=g;return j|0}k=d+200|0;m=c[k>>2]|0;c[d+2540>>2]=$(c[m+13132>>2]|0,c[m+13128>>2]|0)|0;m=d+4520|0;l=c[j>>2]|0;c[l+244>>2]=(c[m>>2]|0)==1&1;c[l+240>>2]=((c[m>>2]|0)+ -1|0)>>>0<2&1;c[e>>2]=l;c[d+2520>>2]=j;a[d+2570>>0]=(a[d+1450>>0]|0)==0?2:3;c[d+2544>>2]=f;b[d+2568>>1]=b[h>>1]|0;j=d+2552|0;f=(c[k>>2]|0)+20|0;c[j+0>>2]=c[f+0>>2];c[j+4>>2]=c[f+4>>2];c[j+8>>2]=c[f+8>>2];c[j+12>>2]=c[f+12>>2];j=0;i=g;return j|0}function $b(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;g=i;l=d+2046|0;k=d+2572|0;h=d+4366|0;n=(f|0)==0;m=d+4364|0;f=d+200|0;while(1){if((a[l>>0]|0)==1){p=d+2524|0;o=d+2570|0;if(((a[o>>0]&8)==0?(c[d+2544>>2]|0)!=(c[k>>2]|0):0)?(b[d+2568>>1]|0)==(b[h>>1]|0):0)Xb(d,p,1)}else o=d+2570|0;if(!(a[o>>0]&1))o=0;else o=(b[d+2568>>1]|0)==(b[h>>1]|0)&1;if(((n?(b[h>>1]|0)==(b[m>>1]|0):0)?(j=c[f>>2]|0,(j|0)!=0):0)?(o|0)<=(c[j+(((c[j+72>>2]|0)+ -1|0)*12|0)+80>>2]|0):0){d=0;h=21;break}if(o){h=15;break}o=b[h>>1]|0;if(o<<16>>16==(b[m>>1]|0)){d=0;h=21;break}b[h>>1]=(o&65535)+1&255}if((h|0)==15){h=d+2524|0;e=Ad(e,c[h>>2]|0)|0;if(!(a[d+2570>>0]&8))Xb(d,h,1);else Xb(d,h,9);p=(e|0)<0?e:1;i=g;return p|0}else if((h|0)==21){i=g;return d|0}return 0}function ac(){var b=0,c=0,d=0,e=0,f=0;b=i;if(!(a[1664]|0))c=0;else{i=b;return}do{d=0;do{f=($(d<<1|1,c)|0)&127;e=f>>>0>63;f=e?f+ -64|0:f;e=e?-1:1;if((f|0)>31){f=64-f|0;e=0-e|0}a[1664+(c<<5)+d>>0]=$(a[2688+f>>0]|0,e)|0;d=d+1|0}while((d|0)!=32);c=c+1|0}while((c|0)!=32);i=b;return}function bc(a,b){a=a|0;b=b|0;c[a>>2]=1;c[a+4>>2]=1;c[a+8>>2]=2;c[a+12>>2]=3;c[a+16>>2]=4;c[a+20>>2]=1;c[a+24>>2]=5;c[a+28>>2]=2;c[a+32>>2]=2;c[a+36>>2]=3;c[a+40>>2]=4;c[a+44>>2]=5;c[a+48>>2]=3;c[a+52>>2]=4;c[a+56>>2]=5;c[a+60>>2]=6;c[a+64>>2]=1;c[a+68>>2]=1;c[a+72>>2]=2;c[a+1676>>2]=2;c[a+1680>>2]=3;c[a+1684>>2]=1;c[a+1688>>2]=2;c[a+1692>>2]=2;c[a+1696>>2]=3;c[a+1700>>2]=1;c[a+1704>>2]=2;return}function cc(b,c,d,e,f,g){b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0;h=i;if((e|0)<=0){i=h;return}k=(d|0)>0;j=8-g|0;m=0;while(1){if(k){l=0;do{a[b+l>>0]=(_c(f,g)|0)<<j;l=l+1|0}while((l|0)!=(d|0))}m=m+1|0;if((m|0)==(e|0))break;else b=b+c|0}i=h;return}function dc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==4)break;else j=j+2|0}h=h+1|0;if((h|0)==4)break;else{e=e+8|0;c=c+f|0}}i=g;return}function ec(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==8)break;else j=j+2|0}h=h+1|0;if((h|0)==8)break;else{e=e+16|0;c=c+f|0}}i=g;return}function fc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==16)break;else j=j+2|0}h=h+1|0;if((h|0)==16)break;else{e=e+32|0;c=c+f|0}}i=g;return}function gc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==32)break;else j=j+2|0}h=h+1|0;if((h|0)==32)break;else{e=e+64|0;c=c+f|0}}i=g;return}function hc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;c=c<<16>>16;e=7-c|0;c=1<<c;if((e|0)>0){f=1<<e+ -1;if((c|0)>0)g=0;else{i=d;return}while(1){h=a;j=0;while(1){b[h>>1]=(b[h>>1]|0)+f>>e;j=j+1|0;if((j|0)==(c|0))break;else h=h+2|0}g=g+1|0;if((g|0)==(c|0))break;else a=a+(c<<1)|0}i=d;return}if((c|0)<=0){i=d;return}e=0-e|0;f=0;while(1){g=a;h=0;while(1){b[g>>1]=b[g>>1]<<e;h=h+1|0;if((h|0)==(c|0))break;else g=g+2|0}f=f+1|0;if((f|0)==(c|0))break;else a=a+(c<<1)|0}i=d;return}
+
+
+
+function ic(a,c,d){a=a|0;c=c|0;d=d|0;var f=0,g=0,h=0,j=0,k=0;f=i;c=1<<(c<<16>>16);if(d){d=c+ -1|0;if((d|0)<=0){i=f;return}g=(c|0)>0;h=0;do{if(g){j=0;do{k=a+(j+c<<1)|0;b[k>>1]=(e[k>>1]|0)+(e[a+(j<<1)>>1]|0);j=j+1|0}while((j|0)!=(c|0))}a=a+(c<<1)|0;h=h+1|0}while((h|0)!=(d|0));i=f;return}if((c|0)<=0){i=f;return}d=(c|0)>1;h=0;while(1){if(d){j=b[a>>1]|0;g=1;do{k=a+(g<<1)|0;j=(e[k>>1]|0)+(j&65535)&65535;b[k>>1]=j;g=g+1|0}while((g|0)!=(c|0))}h=h+1|0;if((h|0)==(c|0))break;else a=a+(c<<1)|0}i=f;return}function jc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;c=i;e=0;d=a;while(1){p=b[d>>1]|0;m=d+16|0;n=b[m>>1]|0;g=n+p|0;f=d+24|0;o=b[f>>1]|0;l=o+n|0;j=p-o|0;h=d+8|0;k=(b[h>>1]|0)*74|0;o=((p-n+o|0)*74|0)+64|0;n=o>>7;if((n+32768|0)>>>0>65535)n=o>>31^32767;b[m>>1]=n;m=(g*29|0)+64+(l*55|0)+k|0;n=m>>7;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[d>>1]=n;l=($(l,-29)|0)+64+(j*55|0)+k|0;m=l>>7;if((m+32768|0)>>>0>65535)m=l>>31^32767;b[h>>1]=m;g=(g*55|0)+64+(j*29|0)-k|0;h=g>>7;if((h+32768|0)>>>0>65535)h=g>>31^32767;b[f>>1]=h;e=e+1|0;if((e|0)==4){d=0;break}else d=d+2|0}while(1){p=b[a>>1]|0;l=a+4|0;m=b[l>>1]|0;g=m+p|0;e=a+6|0;n=b[e>>1]|0;k=n+m|0;h=p-n|0;f=a+2|0;j=(b[f>>1]|0)*74|0;n=((p-m+n|0)*74|0)+2048|0;m=n>>12;if((m+32768|0)>>>0>65535)m=n>>31^32767;b[l>>1]=m;l=(g*29|0)+2048+(k*55|0)+j|0;m=l>>12;if((m+32768|0)>>>0>65535)m=l>>31^32767;b[a>>1]=m;k=($(k,-29)|0)+2048+(h*55|0)+j|0;l=k>>12;if((l+32768|0)>>>0>65535)l=k>>31^32767;b[f>>1]=l;f=(g*55|0)+2048+(h*29|0)-j|0;g=f>>12;if((g+32768|0)>>>0>65535)g=f>>31^32767;b[e>>1]=g;d=d+1|0;if((d|0)==4)break;else a=a+8|0}i=c;return}function kc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0;c=i;f=0;e=a;while(1){l=b[e>>1]<<6;j=e+16|0;k=b[j>>1]<<6;g=k+l|0;k=l-k|0;l=e+8|0;m=b[l>>1]|0;d=e+24|0;n=b[d>>1]|0;h=(n*36|0)+(m*83|0)|0;m=($(n,-83)|0)+(m*36|0)|0;n=g+64+h|0;o=n>>7;if((o+32768|0)>>>0>65535)o=n>>31^32767;b[e>>1]=o;o=k+64+m|0;n=o>>7;if((n+32768|0)>>>0>65535)n=o>>31^32767;b[l>>1]=n;l=k-m+64|0;k=l>>7;if((k+32768|0)>>>0>65535)k=l>>31^32767;b[j>>1]=k;h=g-h+64|0;g=h>>7;if((g+32768|0)>>>0>65535)g=h>>31^32767;b[d>>1]=g;f=f+1|0;if((f|0)==4){e=0;break}else e=e+2|0}while(1){k=b[a>>1]<<6;h=a+4|0;l=b[h>>1]<<6;g=l+k|0;l=k-l|0;k=a+2|0;j=b[k>>1]|0;d=a+6|0;m=b[d>>1]|0;f=(m*36|0)+(j*83|0)|0;j=($(m,-83)|0)+(j*36|0)|0;m=g+2048+f|0;n=m>>12;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[a>>1]=n;m=l+2048+j|0;n=m>>12;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[k>>1]=n;k=l-j+2048|0;j=k>>12;if((j+32768|0)>>>0>65535)j=k>>31^32767;b[h>>1]=j;f=g-f+2048|0;g=f>>12;if((g+32768|0)>>>0>65535)g=f>>31^32767;b[d>>1]=g;e=e+1|0;if((e|0)==4)break;else a=a+8|0}i=c;return}function lc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0;h=i;i=i+64|0;j=h+48|0;p=h+32|0;f=h+16|0;g=h;q=(e|0)>8;r=e+4|0;k=j+4|0;l=j+8|0;m=j+12|0;o=0;r=(r|0)>8?8:r;n=d;while(1){c[p+0>>2]=0;c[p+4>>2]=0;c[p+8>>2]=0;c[p+12>>2]=0;w=(r|0)>1;s=0;do{if(w){t=p+(s<<2)|0;v=c[t>>2]|0;u=1;do{v=($(b[n+(u<<3<<1)>>1]|0,a[1664+(u<<2<<5)+s>>0]|0)|0)+v|0;u=u+2|0}while((u|0)<(r|0));c[t>>2]=v}s=s+1|0}while((s|0)!=4);v=b[n>>1]<<6;u=b[n+64>>1]<<6;w=u+v|0;u=v-u|0;v=b[n+32>>1]|0;t=b[n+96>>1]|0;s=(t*36|0)+(v*83|0)|0;v=($(t,-83)|0)+(v*36|0)|0;t=s+w|0;c[j>>2]=t;c[k>>2]=v+u;c[l>>2]=u-v;c[m>>2]=w-s;s=0;while(1){u=c[p+(s<<2)>>2]|0;v=t+64+u|0;w=v>>7;if((w+32768|0)>>>0>65535)w=v>>31^32767;b[n+(s<<3<<1)>>1]=w;t=t-u+64|0;u=t>>7;if((u+32768|0)>>>0>65535)u=t>>31^32767;b[n+(7-s<<3<<1)>>1]=u;s=s+1|0;if((s|0)==4)break;t=c[j+(s<<2)>>2]|0}if((r|0)<8)r=(o&3|0)==0&(o|0)!=0?r+ -4|0:r;o=o+1|0;if((o|0)==8)break;else n=n+2|0}j=q?8:e;n=(j|0)>1;k=f+4|0;l=f+8|0;m=f+12|0;o=0;while(1){c[g+0>>2]=0;c[g+4>>2]=0;c[g+8>>2]=0;c[g+12>>2]=0;e=0;do{if(n){r=g+(e<<2)|0;p=c[r>>2]|0;q=1;do{p=($(b[d+(q<<1)>>1]|0,a[1664+(q<<2<<5)+e>>0]|0)|0)+p|0;q=q+2|0}while((q|0)<(j|0));c[r>>2]=p}e=e+1|0}while((e|0)!=4);v=b[d>>1]<<6;u=b[d+8>>1]<<6;w=u+v|0;u=v-u|0;v=b[d+4>>1]|0;p=b[d+12>>1]|0;e=(p*36|0)+(v*83|0)|0;v=($(p,-83)|0)+(v*36|0)|0;p=e+w|0;c[f>>2]=p;c[k>>2]=v+u;c[l>>2]=u-v;c[m>>2]=w-e;e=0;while(1){q=c[g+(e<<2)>>2]|0;r=p+2048+q|0;s=r>>12;if((s+32768|0)>>>0>65535)s=r>>31^32767;b[d+(e<<1)>>1]=s;p=p-q+2048|0;q=p>>12;if((q+32768|0)>>>0>65535)q=p>>31^32767;b[d+(7-e<<1)>>1]=q;e=e+1|0;if((e|0)==4)break;p=c[f+(e<<2)>>2]|0}o=o+1|0;if((o|0)==8)break;else d=d+16|0}i=h;return}function mc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;j=i;i=i+192|0;t=j+160|0;u=j+128|0;m=j+112|0;l=j+96|0;g=j+64|0;h=j+32|0;f=j+16|0;k=j;s=(e|0)>16;v=e+4|0;n=m+4|0;o=m+8|0;p=m+12|0;r=0;v=(v|0)>16?16:v;q=d;while(1){c[u+0>>2]=0;c[u+4>>2]=0;c[u+8>>2]=0;c[u+12>>2]=0;c[u+16>>2]=0;c[u+20>>2]=0;c[u+24>>2]=0;c[u+28>>2]=0;A=(v|0)>1;z=0;do{if(A){y=u+(z<<2)|0;w=c[y>>2]|0;x=1;do{w=($(b[q+(x<<4<<1)>>1]|0,a[1664+(x<<1<<5)+z>>0]|0)|0)+w|0;x=x+2|0}while((x|0)<(v|0));c[y>>2]=w}z=z+1|0}while((z|0)!=8);c[l+0>>2]=0;c[l+4>>2]=0;c[l+8>>2]=0;c[l+12>>2]=0;z=0;do{x=l+(z<<2)|0;w=c[x>>2]|0;y=1;do{w=($(b[q+(y<<5<<1)>>1]|0,a[1664+(y<<2<<5)+z>>0]|0)|0)+w|0;y=y+2|0}while((y|0)<8);c[x>>2]=w;z=z+1|0}while((z|0)!=4);z=b[q>>1]<<6;y=b[q+256>>1]<<6;A=y+z|0;y=z-y|0;z=b[q+128>>1]|0;w=b[q+384>>1]|0;x=(w*36|0)+(z*83|0)|0;z=($(w,-83)|0)+(z*36|0)|0;w=x+A|0;c[m>>2]=w;c[n>>2]=z+y;c[o>>2]=y-z;c[p>>2]=A-x;x=0;while(1){A=c[l+(x<<2)>>2]|0;c[t+(x<<2)>>2]=A+w;c[t+(7-x<<2)>>2]=w-A;x=x+1|0;if((x|0)==4){w=0;break}w=c[m+(x<<2)>>2]|0}do{x=c[t+(w<<2)>>2]|0;y=c[u+(w<<2)>>2]|0;A=x+64+y|0;z=A>>7;if((z+32768|0)>>>0>65535)z=A>>31^32767;b[q+(w<<4<<1)>>1]=z;x=x-y+64|0;y=x>>7;if((y+32768|0)>>>0>65535)y=x>>31^32767;b[q+(15-w<<4<<1)>>1]=y;w=w+1|0}while((w|0)!=8);if((v|0)<16)v=(r&3|0)==0&(r|0)!=0?v+ -4|0:v;r=r+1|0;if((r|0)==16)break;else q=q+2|0}o=s?16:e;p=(o|0)>1;l=f+4|0;m=f+8|0;n=f+12|0;q=0;while(1){c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;c[h+16>>2]=0;c[h+20>>2]=0;c[h+24>>2]=0;c[h+28>>2]=0;r=0;do{if(p){e=h+(r<<2)|0;t=c[e>>2]|0;s=1;do{t=($(b[d+(s<<1)>>1]|0,a[1664+(s<<1<<5)+r>>0]|0)|0)+t|0;s=s+2|0}while((s|0)<(o|0));c[e>>2]=t}r=r+1|0}while((r|0)!=8);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;t=0;do{r=k+(t<<2)|0;s=c[r>>2]|0;e=1;do{s=($(b[d+(e<<1<<1)>>1]|0,a[1664+(e<<2<<5)+t>>0]|0)|0)+s|0;e=e+2|0}while((e|0)<8);c[r>>2]=s;t=t+1|0}while((t|0)!=4);z=b[d>>1]<<6;y=b[d+16>>1]<<6;A=y+z|0;y=z-y|0;z=b[d+8>>1]|0;r=b[d+24>>1]|0;e=(r*36|0)+(z*83|0)|0;z=($(r,-83)|0)+(z*36|0)|0;r=e+A|0;c[f>>2]=r;c[l>>2]=z+y;c[m>>2]=y-z;c[n>>2]=A-e;e=0;while(1){A=c[k+(e<<2)>>2]|0;c[g+(e<<2)>>2]=A+r;c[g+(7-e<<2)>>2]=r-A;e=e+1|0;if((e|0)==4){r=0;break}r=c[f+(e<<2)>>2]|0}do{e=c[g+(r<<2)>>2]|0;s=c[h+(r<<2)>>2]|0;u=e+2048+s|0;t=u>>12;if((t+32768|0)>>>0>65535)t=u>>31^32767;b[d+(r<<1)>>1]=t;e=e-s+2048|0;s=e>>12;if((s+32768|0)>>>0>65535)s=e>>31^32767;b[d+(15-r<<1)>>1]=s;r=r+1|0}while((r|0)!=8);q=q+1|0;if((q|0)==16)break;else d=d+32|0}i=j;return}function nc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0;m=i;i=i+320|0;g=m+256|0;l=m+192|0;o=m+160|0;s=m+128|0;u=m+112|0;t=m+96|0;f=m+64|0;j=m+32|0;h=m+16|0;k=m;q=(e|0)>32;x=e+4|0;v=u+4|0;w=u+8|0;n=u+12|0;p=0;x=(x|0)>32?32:x;r=d;while(1){y=l+0|0;z=y+64|0;do{c[y>>2]=0;y=y+4|0}while((y|0)<(z|0));B=(x|0)>1;A=0;do{if(B){z=l+(A<<2)|0;y=c[z>>2]|0;C=1;do{y=($(b[r+(C<<5<<1)>>1]|0,a[1664+(C<<5)+A>>0]|0)|0)+y|0;C=C+2|0}while((C|0)<(x|0));c[z>>2]=y}A=A+1|0}while((A|0)!=16);c[s+0>>2]=0;c[s+4>>2]=0;c[s+8>>2]=0;c[s+12>>2]=0;c[s+16>>2]=0;c[s+20>>2]=0;c[s+24>>2]=0;c[s+28>>2]=0;y=(x|0)/2|0;z=(x|0)>3;A=0;do{if(z){D=s+(A<<2)|0;B=c[D>>2]|0;C=1;do{B=($(b[r+(C<<6<<1)>>1]|0,a[1664+(C<<1<<5)+A>>0]|0)|0)+B|0;C=C+2|0}while((C|0)<(y|0));c[D>>2]=B}A=A+1|0}while((A|0)!=8);c[t+0>>2]=0;c[t+4>>2]=0;c[t+8>>2]=0;c[t+12>>2]=0;A=0;do{B=t+(A<<2)|0;z=c[B>>2]|0;y=1;do{z=($(b[r+(y<<7<<1)>>1]|0,a[1664+(y<<2<<5)+A>>0]|0)|0)+z|0;y=y+2|0}while((y|0)<8);c[B>>2]=z;A=A+1|0}while((A|0)!=4);C=b[r>>1]<<6;B=b[r+1024>>1]<<6;D=B+C|0;B=C-B|0;C=b[r+512>>1]|0;y=b[r+1536>>1]|0;z=(y*36|0)+(C*83|0)|0;C=($(y,-83)|0)+(C*36|0)|0;y=z+D|0;c[u>>2]=y;c[v>>2]=C+B;c[w>>2]=B-C;c[n>>2]=D-z;z=0;while(1){D=c[t+(z<<2)>>2]|0;c[o+(z<<2)>>2]=D+y;c[o+(7-z<<2)>>2]=y-D;z=z+1|0;if((z|0)==4){y=0;break}y=c[u+(z<<2)>>2]|0}do{C=c[o+(y<<2)>>2]|0;D=c[s+(y<<2)>>2]|0;c[g+(y<<2)>>2]=D+C;c[g+(15-y<<2)>>2]=C-D;y=y+1|0}while((y|0)!=8);y=0;do{z=c[g+(y<<2)>>2]|0;A=c[l+(y<<2)>>2]|0;B=z+64+A|0;C=B>>7;if((C+32768|0)>>>0>65535)C=B>>31^32767;b[r+(y<<5<<1)>>1]=C;z=z-A+64|0;A=z>>7;if((A+32768|0)>>>0>65535)A=z>>31^32767;b[r+(31-y<<5<<1)>>1]=A;y=y+1|0}while((y|0)!=16);if((x|0)<32)x=(p&3|0)==0&(p|0)!=0?x+ -4|0:x;p=p+1|0;if((p|0)==32)break;else r=r+2|0}p=q?32:e;o=(p|0)>1;n=(p|0)/2|0;q=(p|0)>3;s=h+4|0;r=h+8|0;e=h+12|0;t=0;while(1){y=l+0|0;z=y+64|0;do{c[y>>2]=0;y=y+4|0}while((y|0)<(z|0));v=0;do{if(o){w=l+(v<<2)|0;u=c[w>>2]|0;x=1;do{u=($(b[d+(x<<1)>>1]|0,a[1664+(x<<5)+v>>0]|0)|0)+u|0;x=x+2|0}while((x|0)<(p|0));c[w>>2]=u}v=v+1|0}while((v|0)!=16);c[j+0>>2]=0;c[j+4>>2]=0;c[j+8>>2]=0;c[j+12>>2]=0;c[j+16>>2]=0;c[j+20>>2]=0;c[j+24>>2]=0;c[j+28>>2]=0;x=0;do{if(q){u=j+(x<<2)|0;w=c[u>>2]|0;v=1;do{D=v<<1;w=($(b[d+(D<<1)>>1]|0,a[1664+(D<<5)+x>>0]|0)|0)+w|0;v=v+2|0}while((v|0)<(n|0));c[u>>2]=w}x=x+1|0}while((x|0)!=8);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;u=0;do{v=k+(u<<2)|0;x=c[v>>2]|0;w=1;do{D=w<<2;x=($(b[d+(D<<1)>>1]|0,a[1664+(D<<5)+u>>0]|0)|0)+x|0;w=w+2|0}while((w|0)<8);c[v>>2]=x;u=u+1|0}while((u|0)!=4);C=b[d>>1]<<6;B=b[d+32>>1]<<6;D=B+C|0;B=C-B|0;C=b[d+16>>1]|0;u=b[d+48>>1]|0;v=(u*36|0)+(C*83|0)|0;C=($(u,-83)|0)+(C*36|0)|0;u=v+D|0;c[h>>2]=u;c[s>>2]=C+B;c[r>>2]=B-C;c[e>>2]=D-v;v=0;while(1){D=c[k+(v<<2)>>2]|0;c[f+(v<<2)>>2]=D+u;c[f+(7-v<<2)>>2]=u-D;v=v+1|0;if((v|0)==4){u=0;break}u=c[h+(v<<2)>>2]|0}do{C=c[f+(u<<2)>>2]|0;D=c[j+(u<<2)>>2]|0;c[g+(u<<2)>>2]=D+C;c[g+(15-u<<2)>>2]=C-D;u=u+1|0}while((u|0)!=8);u=0;do{v=c[g+(u<<2)>>2]|0;w=c[l+(u<<2)>>2]|0;x=v+2048+w|0;y=x>>12;if((y+32768|0)>>>0>65535)y=x>>31^32767;b[d+(u<<1)>>1]=y;v=v-w+2048|0;w=v>>12;if((w+32768|0)>>>0>65535)w=v>>31^32767;b[d+(31-u<<1)>>1]=w;u=u+1|0}while((u|0)!=16);t=t+1|0;if((t|0)==32)break;else d=d+64|0}i=m;return}function oc(a){a=a|0;var c=0,d=0,e=0,f=0;c=i;d=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;e=0;do{f=e<<2;b[a+(f<<1)>>1]=d;b[a+((f|1)<<1)>>1]=d;b[a+((f|2)<<1)>>1]=d;b[a+((f|3)<<1)>>1]=d;e=e+1|0}while((e|0)!=4);i=c;return}function pc(a){a=a|0;var c=0,d=0,e=0,f=0;c=i;d=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;e=0;do{f=e<<3;b[a+(f<<1)>>1]=d;b[a+((f|1)<<1)>>1]=d;b[a+((f|2)<<1)>>1]=d;b[a+((f|3)<<1)>>1]=d;b[a+((f|4)<<1)>>1]=d;b[a+((f|5)<<1)>>1]=d;b[a+((f|6)<<1)>>1]=d;b[a+((f|7)<<1)>>1]=d;e=e+1|0}while((e|0)!=8);i=c;return}function qc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0;c=i;e=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;d=0;do{f=d<<4;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=16);d=d+1|0}while((d|0)!=16);i=c;return}function rc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0;c=i;e=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;d=0;do{f=d<<5;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=32);d=d+1|0}while((d|0)!=32);i=c;return}function sc(e,f,g,h,j,k,l,m,n){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0;o=i;i=i+128|0;k=o;q=k+0|0;p=q+128|0;do{c[q>>2]=0;q=q+4|0}while((q|0)<(p|0));q=d[j+n+96>>0]|0;c[k+((q&31)<<2)>>2]=b[j+(n*10|0)+114>>1];c[k+((q+1&31)<<2)>>2]=b[j+(n*10|0)+116>>1];c[k+((q+2&31)<<2)>>2]=b[j+(n*10|0)+118>>1];c[k+((q+3&31)<<2)>>2]=b[j+(n*10|0)+120>>1];if((m|0)<=0){i=o;return}n=(l|0)>0;j=0;while(1){if(n){p=0;do{q=d[f+p>>0]|0;q=q+(c[k+(q>>>3<<2)>>2]|0)|0;if(q>>>0>255)q=0-q>>31;a[e+p>>0]=q;p=p+1|0}while((p|0)!=(l|0))}j=j+1|0;if((j|0)==(m|0))break;else{e=e+g|0;f=f+h|0}}i=o;return}function tc(e,f,g,h,j,k,l,m,n,o,p,q){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;var r=0,s=0,t=0,u=0;p=i;o=j+(n*10|0)+112|0;r=c[j+(n<<2)+100>>2]|0;if((r|0)!=1){if(c[k>>2]|0){q=b[o>>1]|0;if((m|0)>0){s=0;do{t=(d[f+($(s,h)|0)>>0]|0)+q|0;if(t>>>0>255)t=0-t>>31;a[e+($(s,g)|0)>>0]=t;s=s+1|0}while((s|0)!=(m|0));q=1}else q=1}else q=0;if(c[k+8>>2]|0){s=b[o>>1]|0;l=l+ -1|0;if((m|0)>0){t=0;do{u=(d[f+(($(t,h)|0)+l)>>0]|0)+s|0;if(u>>>0>255)u=0-u>>31;a[e+(($(t,g)|0)+l)>>0]=u;t=t+1|0}while((t|0)!=(m|0))}}if(!r){s=m;t=q;u=0;r=l;Bc(e,f,g,h,j,r,s,n,t,u);i=p;return}}else q=0;if(c[k+4>>2]|0){r=b[o>>1]|0;if((q|0)<(l|0)){s=q;do{t=(d[f+s>>0]|0)+r|0;if(t>>>0>255)t=0-t>>31;a[e+s>>0]=t;s=s+1|0}while((s|0)!=(l|0));r=1}else r=1}else r=0;if(!(c[k+12>>2]|0)){s=m;t=q;u=r;r=l;Bc(e,f,g,h,j,r,s,n,t,u);i=p;return}k=b[o>>1]|0;o=m+ -1|0;t=$(o,g)|0;m=$(o,h)|0;if((q|0)<(l|0))s=q;else{s=o;t=q;u=r;r=l;Bc(e,f,g,h,j,r,s,n,t,u);i=p;return}do{u=(d[f+(s+m)>>0]|0)+k|0;if(u>>>0>255)u=0-u>>31;a[e+(s+t)>>0]=u;s=s+1|0}while((s|0)!=(l|0));Bc(e,f,g,h,j,l,o,n,q,r);i=p;return}function uc(e,f,g,h,j,k,l,m,n,o,p,q){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;var r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0;s=i;C=j+(n*10|0)+112|0;B=c[j+(n<<2)+100>>2]|0;A=(B|0)!=1;if(A){if(c[k>>2]|0){D=b[C>>1]|0;if((m|0)>0){E=0;do{G=(d[f+($(E,h)|0)>>0]|0)+D|0;if(G>>>0>255)G=0-G>>31;a[e+($(E,g)|0)>>0]=G;E=E+1|0}while((E|0)!=(m|0));D=1}else D=1}else D=0;if(c[k+8>>2]|0){E=b[C>>1]|0;l=l+ -1|0;if((m|0)>0){G=0;do{H=(d[f+(($(G,h)|0)+l)>>0]|0)+E|0;if(H>>>0>255)H=0-H>>31;a[e+(($(G,g)|0)+l)>>0]=H;G=G+1|0}while((G|0)!=(m|0))}}if(!B){C=1;E=0}else F=13}else{D=0;F=13}if((F|0)==13){if(c[k+4>>2]|0){F=b[C>>1]|0;if((D|0)<(l|0)){E=D;do{G=(d[f+E>>0]|0)+F|0;if(G>>>0>255)G=0-G>>31;a[e+E>>0]=G;E=E+1|0}while((E|0)!=(l|0));E=1}else E=1}else E=0;if(c[k+12>>2]|0){C=b[C>>1]|0;m=m+ -1|0;G=$(m,g)|0;H=$(m,h)|0;if((D|0)<(l|0)){F=D;do{I=(d[f+(F+H)>>0]|0)+C|0;if(I>>>0>255)I=0-I>>31;a[e+(F+G)>>0]=I;F=F+1|0}while((F|0)!=(l|0));C=0}else C=0}else C=0}Bc(e,f,g,h,j,l,m,n,D,E);j=(B|0)==2;if((a[q>>0]|0)==0&j?(c[k>>2]|0)==0:0)n=(c[k+4>>2]|0)==0;else n=0;H=n&1;n=q+1|0;B=(B|0)==3;if((a[n>>0]|0)==0&B?(c[k+4>>2]|0)==0:0)F=(c[k+8>>2]|0)==0;else F=0;J=F&1;F=q+2|0;if((a[F>>0]|0)==0&j?(c[k+8>>2]|0)==0:0)G=(c[k+12>>2]|0)==0;else G=0;I=G&1;G=q+3|0;if((a[G>>0]|0)==0&B?(c[k>>2]|0)==0:0)k=(c[k+12>>2]|0)==0;else k=0;k=k&1;A=A^1;if(!((a[o>>0]|0)==0|A)?(z=H+E|0,y=m-k|0,(z|0)<(y|0)):0)do{a[e+($(z,g)|0)>>0]=a[f+($(z,h)|0)>>0]|0;z=z+1|0}while((z|0)!=(y|0));if(!((a[o+1>>0]|0)==0|A)?(x=J+E|0,w=m-I|0,(x|0)<(w|0)):0){o=l+ -1|0;do{a[e+(o+($(x,g)|0))>>0]=a[f+(o+($(x,h)|0))>>0]|0;x=x+1|0}while((x|0)!=(w|0))}if(!((a[p>>0]|0)==0|C)?(v=H+D|0,u=l-J|0,(v|0)<(u|0)):0)do{a[e+v>>0]=a[f+v>>0]|0;v=v+1|0}while((v|0)!=(u|0));if(!((a[p+1>>0]|0)==0|C)?(t=k+D|0,r=l-I|0,(t|0)<(r|0)):0){u=m+ -1|0;p=$(u,h)|0;u=$(u,g)|0;do{a[e+(t+u)>>0]=a[f+(t+p)>>0]|0;t=t+1|0}while((t|0)!=(r|0))}if((a[q>>0]|0)!=0&j)a[e>>0]=a[f>>0]|0;if((a[n>>0]|0)!=0&B){J=l+ -1|0;a[e+J>>0]=a[f+J>>0]|0}if((a[F>>0]|0)!=0&j){J=m+ -1|0;I=l+ -1|0;a[e+(I+($(J,g)|0))>>0]=a[f+(I+($(J,h)|0))>>0]|0}if(!((a[G>>0]|0)!=0&B)){i=s;return}J=m+ -1|0;a[e+($(J,g)|0)>>0]=a[f+($(J,h)|0)>>0]|0;i=s;return}function vc(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;Ac(a,b,1,c,d,e,f);i=g;return}function wc(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;Ac(a,1,b,c,d,e,f);i=g;return}function xc(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;var f=0;f=i;zc(a,b,1,c,d,e);i=f;return}function yc(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;var f=0;f=i;zc(a,1,b,c,d,e);i=f;return}function zc(b,e,f,g,h,j){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;m=i;l=$(e,-2)|0;k=0-e|0;n=0;while(1){p=c[g+(n<<2)>>2]|0;if((p|0)>=1){s=0-p|0;r=(a[h+n>>0]|0)==0;q=(a[j+n>>0]|0)==0;o=0;t=b;while(1){v=t+k|0;x=d[v>>0]|0;u=d[t>>0]|0;w=(d[t+l>>0]|0)+4-(d[t+e>>0]|0)+(u-x<<2)>>3;if((w|0)<(s|0))w=s;else w=(w|0)>(p|0)?p:w;if(r){x=w+x|0;if(x>>>0>255)x=0-x>>31;a[v>>0]=x}if(q){u=u-w|0;if(u>>>0>255)u=0-u>>31;a[t>>0]=u}o=o+1|0;if((o|0)==4)break;else t=t+f|0}}n=n+1|0;if((n|0)==2)break;else b=b+(f<<2)|0}i=m;return}function Ac(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0;t=i;o=$(e,-3)|0;p=$(e,-2)|0;q=0-e|0;r=e<<1;D=f*3|0;B=D+o|0;C=D+p|0;A=D-e|0;E=D+r|0;l=D+e|0;y=g>>3;v=g>>2;n=$(e,-4)|0;s=e*3|0;w=D+n|0;x=(f+e|0)*3|0;u=(g>>1)+g>>3;z=f<<2;m=f<<2;F=0;do{U=a[b+o>>0]|0;T=a[b+p>>0]|0;S=a[b+q>>0]|0;J=S&255;N=(U&255)-((T&255)<<1)+J|0;N=(N|0)>-1?N:0-N|0;P=a[b+r>>0]|0;Q=a[b+e>>0]|0;R=a[b>>0]|0;X=R&255;V=(P&255)-((Q&255)<<1)+X|0;V=(V|0)>-1?V:0-V|0;_=d[b+A>>0]|0;M=(d[b+B>>0]|0)-((d[b+C>>0]|0)<<1)+_|0;M=(M|0)>-1?M:0-M|0;Y=d[b+D>>0]|0;O=(d[b+E>>0]|0)-((d[b+l>>0]|0)<<1)+Y|0;O=(O|0)>-1?O:0-O|0;H=V+N|0;I=O+M|0;G=c[h+(F<<2)>>2]|0;W=a[j+F>>0]|0;L=a[k+F>>0]|0;do if((I+H|0)<(g|0)){Z=(G*5|0)+1>>1;aa=(d[b+n>>0]|0)-J|0;K=a[b+s>>0]|0;ba=(K&255)-X|0;if((((((((ba|0)>-1?ba:0-ba|0)+((aa|0)>-1?aa:0-aa|0)|0)<(y|0)?(ba=J-X|0,(((ba|0)>-1?ba:0-ba|0)|0)<(Z|0)):0)?(ba=(d[b+w>>0]|0)-_|0,aa=(d[b+x>>0]|0)-Y|0,(((aa|0)>-1?aa:0-aa|0)+((ba|0)>-1?ba:0-ba|0)|0)<(y|0)):0)?(ba=_-Y|0,(((ba|0)>-1?ba:0-ba|0)|0)<(Z|0)):0)?(H<<1|0)<(v|0):0)?(I<<1|0)<(v|0):0){G=G<<1;H=W<<24>>24==0;I=0-G|0;J=L<<24>>24==0;Y=K;K=1;L=b;while(1){V=L+o|0;U=U&255;W=L+p|0;T=T&255;X=L+q|0;N=S&255;R=R&255;S=L+e|0;Q=Q&255;M=L+r|0;O=P&255;P=Y&255;if(H){Y=d[L+n>>0]|0;Z=(U+4+Q+(N+T+R<<1)>>3)-N|0;if((Z|0)<(I|0))Z=I;else Z=(Z|0)>(G|0)?G:Z;a[X>>0]=Z+N;X=((U+2+T+N+R|0)>>>2)-T|0;if((X|0)<(I|0))X=I;else X=(X|0)>(G|0)?G:X;a[W>>0]=X+T;W=((U*3|0)+4+T+N+R+(Y<<1)>>3)-U|0;if((W|0)<(I|0))W=I;else W=(W|0)>(G|0)?G:W;a[V>>0]=W+U}if(J){T=(T+4+O+(R+N+Q<<1)>>3)-R|0;if((T|0)<(I|0))T=I;else T=(T|0)>(G|0)?G:T;a[L>>0]=T+R;T=((N+2+R+Q+O|0)>>>2)-Q|0;if((T|0)<(I|0))T=I;else T=(T|0)>(G|0)?G:T;a[S>>0]=T+Q;N=(N+4+R+Q+(O*3|0)+(P<<1)>>3)-O|0;if((N|0)<(I|0))N=I;else N=(N|0)>(G|0)?G:N;a[M>>0]=N+O}M=L+f|0;if((K|0)==4)break;U=a[L+(o+f)>>0]|0;T=a[L+(p+f)>>0]|0;S=a[L+(f-e)>>0]|0;R=a[M>>0]|0;Q=a[L+(f+e)>>0]|0;P=a[L+(r+f)>>0]|0;Y=a[L+(s+f)>>0]|0;K=K+1|0;L=M}b=b+m|0;break}H=G>>1;I=G*10|0;J=0-G|0;K=W<<24>>24!=0;L=L<<24>>24!=0;M=(M+N|0)<(u|0)&(K^1);N=0-H|0;O=(O+V|0)<(u|0)&(L^1);V=T;W=R;Y=Q;Q=1;R=b;while(1){X=U&255;T=R+p|0;V=V&255;_=R+q|0;Z=S&255;W=W&255;S=R+e|0;U=Y&255;P=P&255;Y=((W-Z|0)*9|0)+8+($(U-V|0,-3)|0)>>4;if((((Y|0)>-1?Y:0-Y|0)|0)<(I|0)){if((Y|0)<(J|0))Y=J;else Y=(Y|0)>(G|0)?G:Y;if(!K){aa=Y+Z|0;if(aa>>>0>255)aa=0-aa>>31;a[_>>0]=aa}if(!L){_=W-Y|0;if(_>>>0>255)_=0-_>>31;a[R>>0]=_}if(M){X=((X+1+Z|0)>>>1)-V+Y>>1;if((X|0)<(N|0))X=N;else X=(X|0)>(H|0)?H:X;V=X+V|0;if(V>>>0>255)V=0-V>>31;a[T>>0]=V}if(O){P=((W+1+P|0)>>>1)-U-Y>>1;if((P|0)<(N|0))P=N;else P=(P|0)>(H|0)?H:P;P=P+U|0;if(P>>>0>255)P=0-P>>31;a[S>>0]=P}}T=R+f|0;if((Q|0)==4)break;U=a[R+(o+f)>>0]|0;V=a[R+(p+f)>>0]|0;S=a[R+(f-e)>>0]|0;W=a[T>>0]|0;Y=a[R+(f+e)>>0]|0;P=a[R+(r+f)>>0]|0;Q=Q+1|0;R=T}b=b+m|0}else b=b+z|0;while(0);F=F+1|0}while((F|0)!=2);i=t;return}function Bc(e,f,g,h,j,k,l,m,n,o){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;var p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0;t=i;v=c[j+(m<<2)+100>>2]|0;q=a[2728+(v<<2)>>0]|0;r=a[2730+(v<<2)>>0]|0;if((o|0)>=(l|0)){i=t;return}u=(n|0)<(k|0);s=o;p=$((a[2729+(v<<2)>>0]|0)+o|0,h)|0;v=$((a[2731+(v<<2)>>0]|0)+o|0,h)|0;w=$(o,g)|0;o=$(o,h)|0;while(1){if(u){y=p+q|0;x=v+r|0;z=n;do{A=a[f+(z+o)>>0]|0;B=a[f+(y+z)>>0]|0;if((A&255)>(B&255))B=3;else B=((A<<24>>24!=B<<24>>24)<<31>>31)+2|0;C=a[f+(x+z)>>0]|0;if((A&255)>(C&255))C=1;else C=(A<<24>>24!=C<<24>>24)<<31>>31;A=(b[j+(m*10|0)+(d[2720+(C+B)>>0]<<1)+112>>1]|0)+(A&255)|0;if(A>>>0>255)A=0-A>>31;a[e+(z+w)>>0]=A;z=z+1|0}while((z|0)!=(k|0))}s=s+1|0;if((s|0)==(l|0))break;else{p=p+h|0;v=v+h|0;w=w+g|0;o=o+h|0}}i=t;return}function Cc(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0;j=i;k=c[b+136>>2]|0;l=(c[b+200>>2]|0)+13080|0;r=(1<<c[l>>2])+ -1|0;o=r&e;n=r&f;q=(n|0)!=0|(a[k+309>>0]|0)!=0;m=q&1;c[k+31296>>2]=m;p=(o|0)!=0|(a[k+308>>0]|0)!=0;b=p&1;c[k+31292>>2]=b;if(!(r&(f|e)))p=d[k+311>>0]|0;else p=p&q&1;c[k+31300>>2]=p;if((o+g|0)==(1<<c[l>>2]|0))m=(a[k+310>>0]|0)!=0&(n|0)==0&1;c[k+31308>>2]=m;if(!m){q=0;q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}q=(g+e|0)<(c[k+312>>2]|0);q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}function Dc(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0;f=i;i=i+16|0;e=f;h=c[b+136>>2]|0;g=h+204|0;j=td(13196)|0;c[e>>2]=j;if(!j){t=-12;i=f;return t|0}j=c[j+4>>2]|0;l=td(468)|0;if(!l){t=-12;i=f;return t|0}k=c[l+4>>2]|0;c[k+4>>2]=1;n=k+8|0;c[n>>2]=1;a[k>>0]=0;c[k+348>>2]=1;p=k+352|0;m=k+380|0;q=k+408|0;o=0;do{c[p+(o<<2)>>2]=1;c[m+(o<<2)>>2]=0;c[q+(o<<2)>>2]=-1;o=o+1|0}while((o|0)<(c[n>>2]|0));c[k+436>>2]=0;c[k+440>>2]=1;a[k+444>>0]=0;o=b+208|0;vd(o);c[o>>2]=l;c[j>>2]=0;o=j+72|0;c[o>>2]=1;t=_c(g,8)|0;p=j+4|0;c[p>>2]=t;do if((t|0)<=3){a[j+8>>0]=0;n=j+13120|0;c[n>>2]=cd(g,32)|0;k=cd(g,32)|0;m=j+13124|0;c[m>>2]=k;k=Qc(c[n>>2]|0,k,0,c[b+4>>2]|0)|0;if((k|0)>=0){t=_c(g,8)|0;l=j+52|0;c[l>>2]=t+8;if(!t){p=c[p>>2]|0;if((p|0)==1){c[j+60>>2]=0;p=0}else if((p|0)==2){c[j+60>>2]=4;p=4}else if(!p){c[j+60>>2]=8;p=8}else{c[j+60>>2]=5;p=5}c[j+56>>2]=0;p=Bd(p)|0;if(p){c[j+13180>>2]=0;c[j+13168>>2]=0;t=d[p+5>>0]|0;c[j+13172>>2]=t;c[j+13176>>2]=t;t=d[p+6>>0]|0;c[j+13184>>2]=t;c[j+13188>>2]=t;c[j+64>>2]=8;if((c[o>>2]|0)>0){p=j+76|0;q=0;do{c[p+(q*12|0)>>2]=1;c[p+(q*12|0)+4>>2]=0;c[p+(q*12|0)+8>>2]=-1;q=q+1|0}while((q|0)<(c[o>>2]|0))}s=(dd(g)|0)+3|0;t=j+13064|0;c[t>>2]=s;s=1<<s;r=s+ -1|0;s=0-s|0;c[n>>2]=r+(c[n>>2]|0)&s;c[m>>2]=r+(c[m>>2]|0)&s;s=j+13068|0;c[s>>2]=dd(g)|0;r=j+13072|0;c[r>>2]=(dd(g)|0)+2;o=dd(g)|0;p=c[r>>2]|0;q=j+13076|0;c[q>>2]=p+o;if(p>>>0<(c[t>>2]|0)>>>0){u=dd(g)|0;o=j+13092|0;c[o>>2]=u;p=j+13088|0;c[p>>2]=u;a[j+12940>>0]=1;a[j+12941>>0]=bd(g)|0;u=bd(g)|0;c[j+68>>2]=u;if(u){u=j+13044|0;a[u>>0]=(_c(g,4)|0)+1;a[j+13045>>0]=(_c(g,4)|0)+1;v=(dd(g)|0)+3|0;c[j+13048>>2]=v;c[j+13052>>2]=v+(dd(g)|0);if((d[u>>0]|0|0)>(c[l>>2]|0)){k=-1094995529;break}a[j+13056>>0]=bd(g)|0}c[j+2184>>2]=0;a[j+12942>>0]=0;a[j+13060>>0]=1;a[j+13061>>0]=bd(g)|0;c[j+160>>2]=0;c[j+164>>2]=1;if((bd(g)|0)!=0?(v=bd(g)|0,ad(g,7),(v|0)!=0):0){c[j+13096>>2]=bd(g)|0;c[j+13100>>2]=bd(g)|0;c[j+13104>>2]=bd(g)|0;c[j+13108>>2]=bd(g)|0;bd(g)|0;c[j+13112>>2]=bd(g)|0;bd(g)|0;c[j+13116>>2]=bd(g)|0;bd(g)|0}g=c[n>>2]|0;c[j+12>>2]=g;n=c[m>>2]|0;c[j+16>>2]=n;t=c[t>>2]|0;v=(c[s>>2]|0)+t|0;c[j+13080>>2]=v;s=t+ -1|0;c[j+13084>>2]=s;m=1<<v;u=g+ -1+m>>v;c[j+13128>>2]=u;m=n+ -1+m>>v;c[j+13132>>2]=m;c[j+13136>>2]=$(m,u)|0;c[j+13140>>2]=g>>t;c[j+13144>>2]=n>>t;u=c[r>>2]|0;c[j+13148>>2]=g>>u;c[j+13152>>2]=n>>u;c[j+13156>>2]=g>>s;c[j+13160>>2]=n>>s;u=v-u|0;c[j+13164>>2]=(1<<u)+ -1;c[j+13192>>2]=((c[l>>2]|0)*6|0)+ -48;t=(1<<t)+ -1|0;if((((((t&g|0)==0?!((n&t|0)!=0|v>>>0>6):0)?(c[p>>2]|0)>>>0<=u>>>0:0)?(c[o>>2]|0)>>>0<=u>>>0:0)?(c[q>>2]|0)>>>0<=(v>>>0>5?5:v)>>>0:0)?((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)>=0:0){g=b+272|0;h=c[g>>2]|0;if((h|0)!=0?(v=c[e>>2]|0,(Yd(c[h+4>>2]|0,c[v+4>>2]|0,c[v+8>>2]|0)|0)==0):0){vd(e);v=0;i=f;return v|0}else h=0;do{j=b+(h<<2)+400|0;k=c[j>>2]|0;do if(k){if(c[c[k+4>>2]>>2]|0)break;vd(j)}while(0);h=h+1|0}while((h|0)!=256);h=c[g>>2]|0;do if(h){j=b+200|0;if((c[j>>2]|0)!=(c[h+4>>2]|0))break;u=b+1424|0;vd(u);v=ud(c[g>>2]|0)|0;c[u>>2]=v;if(v)break;c[j>>2]=0}while(0);vd(g);c[g>>2]=c[e>>2];v=0;i=f;return v|0}}else k=-1094995529}else k=-22}else k=-1094995529}}else k=-1094995529;while(0);vd(e);v=k;i=f;return v|0}function Ec(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0;f=i;i=i+16|0;e=f+4|0;j=f;l=b+136|0;g=c[l>>2]|0;n=g+204|0;h=md(1692)|0;c[j>>2]=h;if(!h){I=-12;i=f;return I|0}I=qd(h,1692,6,0,0)|0;c[e>>2]=I;if(!I){jd(j);I=-12;i=f;return I|0}a[(c[j>>2]|0)+53>>0]=1;h=c[j>>2]|0;c[h+44>>2]=1;c[h+48>>2]=1;a[h+52>>0]=1;a[(c[j>>2]|0)+57>>0]=0;h=c[j>>2]|0;c[h+60>>2]=0;c[h+64>>2]=0;a[h+1629>>0]=2;h=dd(n)|0;a:do if((h>>>0<=255?(k=dd(n)|0,c[c[j>>2]>>2]=k,k>>>0<=31):0)?(m=c[b+(k<<2)+272>>2]|0,(m|0)!=0):0){k=c[m+4>>2]|0;I=(bd(n)|0)&255;a[(c[j>>2]|0)+41>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+39>>0]=I;I=_c(n,3)|0;c[(c[j>>2]|0)+1624>>2]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+4>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+5>>0]=I;I=(dd(n)|0)+1|0;c[(c[j>>2]|0)+8>>2]=I;I=(dd(n)|0)+1|0;c[(c[j>>2]|0)+12>>2]=I;I=ed(n)|0;c[(c[j>>2]|0)+16>>2]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+20>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+21>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+22>>0]=I;I=c[j>>2]|0;c[I+24>>2]=0;if(a[I+22>>0]|0){I=dd(n)|0;c[(c[j>>2]|0)+24>>2]=I}I=ed(n)|0;c[(c[j>>2]|0)+28>>2]=I;if((I+12|0)>>>0<=24?(I=ed(n)|0,c[(c[j>>2]|0)+32>>2]=I,(I+12|0)>>>0<=24):0){I=(bd(n)|0)&255;a[(c[j>>2]|0)+36>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+37>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+38>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+40>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+42>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+43>>0]=I;if(a[(c[j>>2]|0)+42>>0]|0){m=(dd(n)|0)+1|0;c[(c[j>>2]|0)+44>>2]=m;m=(dd(n)|0)+1|0;o=c[j>>2]|0;c[o+48>>2]=m;o=c[o+44>>2]|0;if(!o){b=-1094995529;break}if((m|0)==0?1:(o|0)>=(c[k+13120>>2]|0)){b=-1094995529;break}if((m|0)>=(c[k+13124>>2]|0)){b=-1094995529;break}m=od(o,4)|0;c[(c[j>>2]|0)+1648>>2]=m;m=od(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=m;m=c[j>>2]|0;if(!(c[m+1648>>2]|0)){b=-12;break}if(!(c[m+1652>>2]|0)){b=-12;break}p=(bd(n)|0)&255;a[(c[j>>2]|0)+52>>0]=p;p=c[j>>2]|0;if(!(a[p+52>>0]|0)){q=(c[p+44>>2]|0)+ -1|0;if((q|0)>0){o=0;m=0;r=0;do{q=(dd(n)|0)+1|0;p=c[j>>2]|0;c[(c[p+1648>>2]|0)+(r<<2)>>2]=q;o=ae(q|0,0,o|0,m|0)|0;m=D;r=r+1|0;q=(c[p+44>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=0;o=0}r=c[k+13128>>2]|0;s=((r|0)<0)<<31>>31;if(!(m>>>0<s>>>0|(m|0)==(s|0)&o>>>0<r>>>0)){b=-1094995529;break}I=$d(r|0,s|0,o|0,m|0)|0;c[(c[p+1648>>2]|0)+(q<<2)>>2]=I;q=(c[p+48>>2]|0)+ -1|0;if((q|0)>0){p=0;o=0;r=0;do{q=(dd(n)|0)+1|0;m=c[j>>2]|0;c[(c[m+1652>>2]|0)+(r<<2)>>2]=q;p=ae(q|0,0,p|0,o|0)|0;o=D;r=r+1|0;q=(c[m+48>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=p;o=0;p=0}r=c[k+13132>>2]|0;s=((r|0)<0)<<31>>31;if(!(o>>>0<s>>>0|(o|0)==(s|0)&p>>>0<r>>>0)){b=-1094995529;break}I=$d(r|0,s|0,p|0,o|0)|0;c[(c[m+1652>>2]|0)+(q<<2)>>2]=I}I=(bd(n)|0)&255;a[(c[j>>2]|0)+53>>0]=I}I=(bd(n)|0)&255;a[(c[j>>2]|0)+54>>0]=I;I=(bd(n)|0)&255;a[(c[j>>2]|0)+55>>0]=I;if((a[(c[j>>2]|0)+55>>0]|0)!=0?(I=(bd(n)|0)&255,a[(c[j>>2]|0)+56>>0]=I,I=(bd(n)|0)&255,a[(c[j>>2]|0)+57>>0]=I,(a[(c[j>>2]|0)+57>>0]|0)==0):0){m=(ed(n)|0)<<1;c[(c[j>>2]|0)+60>>2]=m;m=(ed(n)|0)<<1;I=c[j>>2]|0;c[I+64>>2]=m;if(((c[I+60>>2]|0)+13|0)>>>0>26){b=-1094995529;break}if((m+13|0)>>>0>26){b=-1094995529;break}}p=(bd(n)|0)&255;a[(c[j>>2]|0)+68>>0]=p;p=c[j>>2]|0;if(a[p+68>>0]|0){q=0;do{o=p+(q<<6)+69|0;m=o+16|0;do{a[o>>0]=16;o=o+1|0}while((o|0)<(m|0));a[p+q+1605>>0]=16;a[p+q+1611>>0]=16;q=q+1|0}while((q|0)!=6);o=p+453|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+517|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+581|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+645|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+709|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+773|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+837|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+901|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+965|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1029|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1093|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1157|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1221|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1285|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1349|0;q=2744;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1413|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1477|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1541|0;q=2808;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));m=c[j>>2]|0;v=(c[l>>2]|0)+204|0;w=0;do{p=(w|0)>0?64:16;q=(w|0)>1;o=w+ -2|0;x=(w|0)==3?3:1;r=1<<(w<<1)+4;t=(r|0)>0;s=(w|0)==0;r=(r|0)<64?r:64;u=0;do{if(!(((bd(v)|0)&255)<<24>>24)){y=dd(v)|0;if(y){if(u>>>0<y>>>0){b=-1094995529;break a}y=u-y|0;fe(m+(w*384|0)+(u<<6)+69|0,m+(w*384|0)+(y<<6)+69|0,p|0)|0;if(q)a[m+(o*6|0)+u+1605>>0]=a[m+(o*6|0)+y+1605>>0]|0}}else{if(q){z=(ed(v)|0)+8|0;a[m+(o*6|0)+u+1605>>0]=z}else z=8;if(t){y=0;do{if(s)A=(d[24+y>>0]<<2)+(d[8+y>>0]|0)|0;else A=(d[104+y>>0]<<3)+(d[40+y>>0]|0)|0;z=(z+256+(ed(v)|0)|0)%256|0;a[m+(w*384|0)+(u<<6)+A+69>>0]=z;y=y+1|0}while((y|0)!=(r|0))}}u=u+x|0}while((u|0)<6);w=w+1|0}while((w|0)<4);if((c[k+4>>2]|0)==3){o=0;do{a[m+o+1285>>0]=a[m+o+901>>0]|0;a[m+o+1349>>0]=a[m+o+965>>0]|0;a[m+o+1477>>0]=a[m+o+1093>>0]|0;a[m+o+1541>>0]=a[m+o+1157>>0]|0;o=o+1|0}while((o|0)!=64);a[m+1612>>0]=a[m+1606>>0]|0;a[m+1613>>0]=a[m+1607>>0]|0;a[m+1615>>0]=a[m+1609>>0]|0;a[m+1616>>0]=a[m+1610>>0]|0}}I=(bd(n)|0)&255;a[(c[j>>2]|0)+1617>>0]=I;I=(dd(n)|0)+2|0;c[(c[j>>2]|0)+1620>>2]=I;m=k+13080|0;if(I>>>0<=(c[m>>2]|0)>>>0){I=(bd(n)|0)&255;a[(c[j>>2]|0)+1628>>0]=I;do if((bd(n)|0)!=0?(I=bd(n)|0,_c(n,7)|0,(I|0)!=0):0){n=c[j>>2]|0;p=(c[l>>2]|0)+204|0;if(a[n+21>>0]|0)a[n+1629>>0]=(dd(p)|0)+2;a[n+1630>>0]=bd(p)|0;I=(bd(p)|0)&255;a[n+1631>>0]=I;if(I<<24>>24){a[n+1632>>0]=dd(p)|0;I=dd(p)|0;o=n+1633|0;a[o>>0]=I;if((I&255)>>>0<5)l=0;else break;while(1){a[n+l+1634>>0]=ed(p)|0;a[n+l+1639>>0]=ed(p)|0;if((l|0)<(d[o>>0]|0))l=l+1|0;else break}}a[n+1644>>0]=dd(p)|0;a[n+1645>>0]=dd(p)|0}while(0);l=od((c[(c[j>>2]|0)+44>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1656>>2]=l;l=od((c[(c[j>>2]|0)+48>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1660>>2]=l;l=k+13128|0;o=od(c[l>>2]|0,4)|0;c[(c[j>>2]|0)+1664>>2]=o;o=c[j>>2]|0;n=c[o+1656>>2]|0;if(((n|0)!=0?(c[o+1660>>2]|0)!=0:0)?(c[o+1664>>2]|0)!=0:0){if(a[o+52>>0]|0){p=c[o+1648>>2]|0;if(!p){o=od(c[o+44>>2]|0,4)|0;c[(c[j>>2]|0)+1648>>2]=o;o=od(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=o;o=c[j>>2]|0;p=c[o+1648>>2]|0;if(!p){b=-12;break}}n=c[o+1652>>2]|0;if(!n){b=-12;break}q=o+44|0;s=c[q>>2]|0;if((s|0)>0){r=0;do{I=r;r=r+1|0;H=c[l>>2]|0;c[p+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}q=o+48|0;s=c[q>>2]|0;if((s|0)>0){p=k+13132|0;r=0;do{I=r;r=r+1|0;H=c[p>>2]|0;c[n+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}n=c[o+1656>>2]|0}c[n>>2]=0;q=o+44|0;if((c[q>>2]|0)>0){p=c[o+1648>>2]|0;r=0;s=0;do{r=(c[p+(s<<2)>>2]|0)+r|0;s=s+1|0;c[n+(s<<2)>>2]=r}while((s|0)<(c[q>>2]|0))}s=c[o+1660>>2]|0;c[s>>2]=0;r=o+48|0;if((c[r>>2]|0)>0){q=c[o+1652>>2]|0;t=0;p=0;do{t=(c[q+(p<<2)>>2]|0)+t|0;p=p+1|0;c[s+(p<<2)>>2]=t}while((p|0)<(c[r>>2]|0))}r=c[l>>2]|0;if((r|0)>0){o=c[o+1664>>2]|0;p=0;q=0;do{q=(p>>>0>(c[n+(q<<2)>>2]|0)>>>0&1)+q|0;c[o+(p<<2)>>2]=q;p=p+1|0;r=c[l>>2]|0}while((p|0)<(r|0))}x=$(c[k+13132>>2]|0,r)|0;n=od(x,4)|0;c[(c[j>>2]|0)+1668>>2]=n;n=od(x,4)|0;c[(c[j>>2]|0)+1672>>2]=n;n=od(x,4)|0;c[(c[j>>2]|0)+1676>>2]=n;n=k+13164|0;q=(c[n>>2]|0)+2|0;q=od($(q,q)|0,4)|0;c[(c[j>>2]|0)+1688>>2]=q;q=c[j>>2]|0;p=c[q+1668>>2]|0;if(!p){b=-12;break}w=c[q+1672>>2]|0;if(!w){b=-12;break}o=c[q+1676>>2]|0;if(!o){b=-12;break}if(!(c[q+1688>>2]|0)){b=-12;break}if((x|0)>0){B=q+44|0;r=q+48|0;s=c[q+1660>>2]|0;v=c[q+1648>>2]|0;u=c[q+1656>>2]|0;t=q+1652|0;A=0;do{C=c[l>>2]|0;y=(A|0)%(C|0)|0;z=(A|0)/(C|0)|0;G=c[B>>2]|0;E=0;while(1){if((E|0)>=(G|0)){E=0;break}F=E+1|0;if(y>>>0<(c[u+(F<<2)>>2]|0)>>>0)break;else E=F}H=c[r>>2]|0;F=0;while(1){if((F|0)>=(H|0)){F=0;break}G=F+1|0;if(z>>>0<(c[s+(G<<2)>>2]|0)>>>0)break;else F=G}if((E|0)>0){G=c[(c[t>>2]|0)+(F<<2)>>2]|0;H=0;I=0;do{I=($(c[v+(H<<2)>>2]|0,G)|0)+I|0;H=H+1|0}while((H|0)!=(E|0))}else I=0;if((F|0)>0){G=c[t>>2]|0;H=0;do{I=($(c[G+(H<<2)>>2]|0,C)|0)+I|0;H=H+1|0}while((H|0)!=(F|0))}H=$(c[v+(E<<2)>>2]|0,z-(c[s+(F<<2)>>2]|0)|0)|0;I=I+y+H-(c[u+(E<<2)>>2]|0)|0;c[p+(A<<2)>>2]=I;c[w+(I<<2)>>2]=A;A=A+1|0}while((A|0)!=(x|0))}else r=q+48|0;x=c[r>>2]|0;if((x|0)>0){s=q+44|0;t=q+1660|0;q=q+1656|0;z=c[s>>2]|0;u=0;w=0;while(1){v=u;u=u+1|0;if((z|0)>0){x=c[t>>2]|0;y=x+(u<<2)|0;G=c[y>>2]|0;B=z;z=0;do{E=c[x+(v<<2)>>2]|0;A=z;z=z+1|0;if(E>>>0<G>>>0){B=c[q>>2]|0;C=B+(z<<2)|0;F=c[C>>2]|0;do{H=c[B+(A<<2)>>2]|0;if(H>>>0<F>>>0){do{c[o+(c[p+(($(c[l>>2]|0,E)|0)+H<<2)>>2]<<2)>>2]=w;H=H+1|0;F=c[C>>2]|0}while(H>>>0<F>>>0);G=c[y>>2]|0}E=E+1|0}while(E>>>0<G>>>0);B=c[s>>2]|0}w=w+1|0}while((z|0)<(B|0));v=c[r>>2]|0;z=B}else v=x;if((u|0)>=(v|0))break;else x=v}}else w=0;o=od(w,4)|0;c[(c[j>>2]|0)+1680>>2]=o;o=c[j>>2]|0;p=c[o+1680>>2]|0;if(!p){b=-12;break}r=o+48|0;u=c[r>>2]|0;if((u|0)>0){q=o+44|0;t=c[q>>2]|0;s=0;do{if((t|0)>0){u=c[o+1660>>2]|0;v=c[o+1656>>2]|0;w=0;do{I=$(c[l>>2]|0,c[u+(s<<2)>>2]|0)|0;c[p+(($(t,s)|0)+w<<2)>>2]=(c[v+(w<<2)>>2]|0)+I;w=w+1|0;t=c[q>>2]|0}while((w|0)<(t|0));u=c[r>>2]|0}s=s+1|0}while((s|0)<(u|0))}k=(c[m>>2]|0)-(c[k+13072>>2]|0)|0;v=c[n>>2]|0;c[o+1684>>2]=(c[o+1688>>2]|0)+(v+3<<2);p=v+2|0;if((p|0)>0){m=c[(c[j>>2]|0)+1688>>2]|0;o=0;do{c[m+(($(p,o)|0)<<2)>>2]=-1;c[m+(o<<2)>>2]=-1;o=o+1|0;v=c[n>>2]|0;p=v+2|0}while((o|0)<(p|0))}if((v|0)>-1){m=c[j>>2]|0;j=m+1668|0;p=k<<1;o=(k|0)>0;m=m+1684|0;q=0;while(1){if((v|0)>-1){r=q>>k;t=c[j>>2]|0;s=c[m>>2]|0;u=0;while(1){z=c[t+(($(c[l>>2]|0,r)|0)+(u>>k)<<2)>>2]<<p;if(o){w=0;do{y=1<<w;if(!(y&q))x=0;else x=y<<1<<w;z=((y&u|0)==0?0:y<<w)+z+x|0;w=w+1|0}while((w|0)!=(k|0))}c[s+(($(v+2|0,q)|0)+u<<2)>>2]=z;v=c[n>>2]|0;if((u|0)<(v|0))u=u+1|0;else break}}if((q|0)<(v|0))q=q+1|0;else break}}if(((c[g+216>>2]|0)-(c[g+212>>2]|0)|0)<0){b=0;break}I=b+(h<<2)+400|0;vd(I);c[I>>2]=c[e>>2];I=0;i=f;return I|0}else b=-12}else b=-1094995529}else b=-1094995529}else b=-1094995529;while(0);vd(e);I=b;i=f;return I|0}function Fc(a,b){a=a|0;b=b|0;var d=0;a=i;i=i+16|0;d=a;c[d>>2]=b;jd(b+1648|0);jd(b+1652|0);jd(b+1656|0);jd(b+1660|0);jd(b+1664|0);jd(b+1668|0);jd(b+1672|0);jd(b+1680|0);jd(b+1676|0);jd(b+1688|0);jd(d);i=a;return}function Gc(a){a=a|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=a+136|0;g=a+2512|0;f=a+4524|0;while(1){h=(c[e>>2]|0)+204|0;j=0;do{k=_c(h,8)|0;j=k+j|0}while((k|0)==255);k=0;do{l=_c(h,8)|0;k=l+k|0}while((l|0)==255);do if((c[g>>2]|0)==39)if((j|0)==256){Hc(a);break}else if((j|0)==257){b[f>>1]=_c(h,16)|0;break}else{ad(h,k<<3);break}else if((j|0)==132){Hc(a);break}else{ad(h,k<<3);break}while(0);h=c[e>>2]|0;if(((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)<=0){a=15;break}if(($c(h+204|0,8)|0)==128){a=15;break}}if((a|0)==15){i=d;return 1}return 0}function Hc(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;e=(c[b+136>>2]|0)+204|0;g=(_c(e,8)|0)&255;f=b+4468|0;h=0;do{if((g|0)==2)ad(e,32);else if(!g){a[f>>0]=1;j=0;do{a[b+(h<<4)+j+4420>>0]=_c(e,8)|0;j=j+1|0}while((j|0)!=16)}else if((g|0)==1)ad(e,16);h=h+1|0}while((h|0)!=3);i=d;return}function Ic(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0;d=i;f=c[b+52>>2]|0;e=a+60|0;if((f|0)>0){if((c[e>>2]|0)==0?(f=md(f)|0,c[e>>2]=f,(f|0)==0):0){f=-12;i=d;return f|0}}else c[e>>2]=0;f=a+12|0;c[f>>2]=b;c[a+424>>2]=0;c[a+800>>2]=1;h=a+912|0;g=a+936|0;c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;c[g>>2]=0;c[g+4>>2]=-2147483648;g=a+928|0;c[g>>2]=0;c[g+4>>2]=-2147483648;a=Da[c[b+76>>2]&3](a)|0;if((a|0)>=0){h=0;i=d;return h|0}jd(e);c[f>>2]=0;h=a;i=d;return h|0}function Jc(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;if(!a){i=b;return 0}e=a+12|0;f=c[e>>2]|0;if((f|0)!=0?(d=c[f+92>>2]|0,(d|0)!=0):0)Da[d&3](a)|0;c[a+796>>2]=0;jd(a+60|0);c[e>>2]=0;c[a+808>>2]=0;i=b;return 0}function Kc(a,b,d,e,f,g){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0;h=i;if((f|0)<=0){i=h;return 0}j=(e|0)==0;k=0;do{l=d+($(k,g)|0)|0;l=Ia[b&1](a,l)|0;if(!j)c[e+(k<<2)>>2]=l;k=k+1|0}while((k|0)!=(f|0));i=h;return 0}function Lc(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;if((f|0)<=0){i=g;return 0}h=(e|0)==0;j=0;do{k=Ga[b&1](a,d,j,0)|0;if(!h)c[e+(j<<2)>>2]=k;j=j+1|0}while((j|0)!=(f|0));i=g;return 0}function Mc(b,f,g){b=b|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;g=i;h=Bd(c[f+76>>2]|0)|0;b=h+4|0;if(!(a[b>>0]|0)){p=0;i=g;return p|0}k=f+64|0;l=h+5|0;m=f+68|0;n=h+6|0;j=0;while(1){p=($((((e[h+(j<<1)+8>>1]|0)>>>11&15)+8|0)>>>3,c[k>>2]|0)|0)+31&-32;if((j+ -1|0)>>>0<2){p=0-(0-p>>d[l>>0])|0;c[f+(j<<2)+32>>2]=p;o=0-(0-((c[m>>2]|0)+31&-32)>>d[n>>0])|0}else{c[f+(j<<2)+32>>2]=p;o=(c[m>>2]|0)+31&-32}o=sd(($(p,o)|0)+32|0)|0;c[f+(j<<2)+304>>2]=o;if(!o){b=-1;f=8;break}c[f+(j<<2)>>2]=c[o+4>>2];j=j+1|0;if((j|0)>=(d[b>>0]|0)){b=0;f=8;break}}if((f|0)==8){i=g;return b|0}return 0}function Nc(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;ce(a|0,0,976)|0;e=(b|0)!=0;if(e){c[a+8>>2]=c[b+8>>2];c[a+48>>2]=c[b+12>>2]}else c[a+8>>2]=-1;c[a+100>>2]=0;c[a+104>>2]=1;c[a+888>>2]=0;c[a+892>>2]=1;c[a+896>>2]=0;c[a+900>>2]=1;c[a+476>>2]=1;c[a+816>>2]=1;c[a+820>>2]=1;c[a+220>>2]=0;c[a+224>>2]=1;c[a+136>>2]=-1;c[a+416>>2]=-1;g=a+696|0;c[g>>2]=0;c[g+4>>2]=-2147483648;if((e?(f=c[b+52>>2]|0,(f|0)!=0):0)?(g=md(f)|0,c[a+60>>2]=g,(g|0)==0):0){g=-12;i=d;return g|0}g=0;i=d;return g|0}function Oc(a){a=a|0;var b=0,c=0;b=i;c=fd(976)|0;if(c){if((Nc(c,a)|0)<0){id(c);c=0}}else c=0;i=b;return c|0}function Pc(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;i=i+80|0;g=f;k=g+0|0;j=e+0|0;h=k+80|0;do{c[k>>2]=c[j>>2];k=k+4|0;j=j+4|0}while((k|0)<(h|0));h=a+12|0;j=c[h>>2]|0;if(!j){k=-22;i=f;return k|0}if(c[j+8>>2]|0){k=-22;i=f;return k|0}c[d>>2]=0;j=c[a+124>>2]|0;k=c[a+128>>2]|0;if(!j){if(k){k=-22;i=f;return k|0}}else{if(!((j|0)>0&(k|0)>0)){k=-22;i=f;return k|0}if((j+128|0)>>>0>=(268435455/((k+128|0)>>>0)|0)>>>0){k=-22;i=f;return k|0}}yd(b);h=c[h>>2]|0;if(((c[h+16>>2]&32|0)==0?(c[e+28>>2]|0)==0:0)?(c[a+808>>2]&1|0)==0:0){k=0;i=f;return k|0}g=Ga[c[h+88>>2]&1](a,b,d,g)|0;if(!(c[d>>2]|0)){yd(b);k=g;i=f;return k|0}else{k=a+424|0;c[k>>2]=(c[k>>2]|0)+1;k=g;i=f;return k|0}return 0}function Qc(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=i;if((a|0)>0&(b|0)>0?(a+128|0)>>>0<(268435455/((b+128|0)>>>0)|0)>>>0:0){d=0;i=c;return d|0}d=-22;i=c;return d|0}function Rc(a,b){a=a|0;b=b|0;return 0}function Sc(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;e=a+8|0;if(!(c[e>>2]|0)){g=c[a+116>>2]|0;h=a+120|0;j=c[h>>2]|0;if(!((g|0)>0&(j|0)>0)){l=-22;i=f;return l|0}if((g+128|0)>>>0>=(268435455/((j+128|0)>>>0)|0)>>>0){l=-22;i=f;return l|0}j=c[a+136>>2]|0;if((j|0)<0){l=-22;i=f;return l|0}k=b+64|0;l=b+68|0;if((c[k>>2]|0)>=1?(c[l>>2]|0)>=1:0)g=1;else{m=a+792|0;n=0-(0-(c[a+124>>2]|0)>>c[m>>2])|0;c[k>>2]=(g|0)>(n|0)?g:n;k=c[h>>2]|0;g=0-(0-(c[a+128>>2]|0)>>c[m>>2])|0;c[l>>2]=(k|0)>(g|0)?k:g;g=0}c[b+76>>2]=j}else g=1;d=xa[c[a+476>>2]&1](a,b,d)|0;if(c[e>>2]|g){n=d;i=f;return n|0}c[b+64>>2]=c[a+116>>2];c[b+68>>2]=c[a+120>>2];n=d;i=f;return n|0}function Tc(a,b,d){a=a|0;b=b|0;d=d|0;var e=0;e=i;c[b+4>>2]=a;a=Sc(a,c[b>>2]|0,d)|0;i=e;return a|0}function Uc(a,b){a=a|0;b=b|0;a=i;b=c[b>>2]|0;if(b)yd(b);i=a;return}function Vc(a){a=a|0;return}function Wc(a,b,c){a=a|0;b=b|0;c=c|0;return}function Xc(a){a=a|0;var b=0,d=0;b=i;d=a+8|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+16|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+64|0;c[d>>2]=-1;c[d+4>>2]=-1;d=a+72|0;c[d>>2]=0;c[d+4>>2]=0;d=a+32|0;c[a>>2]=0;c[d+0>>2]=0;c[d+4>>2]=0;c[d+8>>2]=0;c[d+12>>2]=0;c[d+16>>2]=0;i=b;return}function Yc(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;g=a+16|0;c[a+12>>2]=b;c[a+20>>2]=b+e;h=b+1|0;c[g>>2]=h;e=(d[b>>0]|0)<<18;c[a>>2]=e;f=b+2|0;c[g>>2]=f;e=(d[h>>0]|0)<<10|e;c[a>>2]=e;c[g>>2]=b+3;c[a>>2]=(d[f>>0]|0)<<2|e|2;c[a+4>>2]=510;return}function Zc(){var b=0,e=0,f=0,g=0,h=0,j=0;b=i;if(!(c[718]|0))e=0;else{i=b;return}while(1)if(e){g=(e&65280|0)==0;a[2880+e>>0]=(g?8:0)-(d[4680+(g?e:e>>>8)>>0]|0);e=e+1|0;if((e|0)==512){e=0;break}else continue}else{a[2880]=9;e=1;continue}while(1){f=e<<1;g=0;do{j=a[4224+(e<<2)+g>>0]|0;h=(g<<7)+f|0;a[(h|1)+3392>>0]=j;a[h+3392>>0]=j;g=g+1|0}while((g|0)!=4);j=(d[4480+e>>0]|0)<<1;a[f+4032>>0]=j;a[f+4033>>0]=j|1;if(e){h=(d[4544+e>>0]|0)<<1;j=128-f|0;a[j+3903>>0]=h;a[j+3902>>0]=h|1;e=e+1|0;if((e|0)==64)break;else continue}else{e=128-f|0;a[e+3903>>0]=1;a[e+3902>>0]=0;e=1;continue}}g=4160|0;f=4608|0;e=g+63|0;do{a[g>>0]=a[f>>0]|0;g=g+1|0;f=f+1|0}while((g|0)<(e|0));c[718]=1;i=b;return}function _c(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0;e=i;f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;a=(c[a>>2]|0)+(h>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7)>>>(32-b|0);b=h+b|0;c[f>>2]=g>>>0>b>>>0?b:g;i=e;return a|0}function $c(a,b){a=a|0;b=b|0;var e=0,f=0;e=i;f=c[a+8>>2]|0;a=(c[a>>2]|0)+(f>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(f&7)>>>(32-b|0);i=e;return a|0}function ad(a,b){a=a|0;b=b|0;var d=0;d=a+8|0;a=c[a+16>>2]|0;b=(c[d>>2]|0)+b|0;c[d>>2]=a>>>0>b>>>0?b:a;return}function bd(a){a=a|0;var b=0,e=0,f=0;e=a+8|0;f=c[e>>2]|0;b=(d[(c[a>>2]|0)+(f>>>3)>>0]|0)<<(f&7)>>>7&1;c[e>>2]=((f|0)<(c[a+16>>2]|0)&1)+f;return b|0}function cd(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0;e=i;if(!b){j=0;i=e;return j|0}f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;j=c[a>>2]|0;a=j+(h>>>3)|0;a=(ee(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7);if((b|0)<26){j=h+b|0;c[f>>2]=g>>>0>j>>>0?j:g;j=a>>>(32-b|0);i=e;return j|0}else{k=h+16|0;k=g>>>0>k>>>0?k:g;c[f>>2]=k;h=b+ -16|0;j=j+(k>>>3)|0;j=(ee(d[j>>0]|d[j+1>>0]<<8|d[j+2>>0]<<16|d[j+3>>0]<<24|0)|0)<<(k&7)>>>(48-b|0);b=k+h|0;c[f>>2]=g>>>0>b>>>0?b:g;j=j|a>>>16<<h;i=e;return j|0}return 0}function dd(a){a=a|0;var b=0,e=0,f=0,g=0,h=0,j=0;b=i;i=i+32|0;e=b;c[e+0>>2]=c[a+0>>2];c[e+4>>2]=c[a+4>>2];c[e+8>>2]=c[a+8>>2];c[e+12>>2]=c[a+12>>2];c[e+16>>2]=c[a+16>>2];e=cd(e,32)|0;f=e>>>0>65535;e=f?e>>>16:e;f=f?16:0;if(e&65280){f=f|8;e=e>>>8}j=31-f-(d[4680+e>>0]|0)|0;g=a+8|0;f=c[g>>2]|0;e=0-f|0;h=(c[a+16>>2]|0)-f|0;if((j|0)<(e|0)){h=e;h=h+f|0;c[g>>2]=h;j=j+1|0;j=cd(a,j)|0;j=j+ -1|0;i=b;return j|0}h=(h|0)<(j|0)?h:j;h=h+f|0;c[g>>2]=h;j=j+1|0;j=cd(a,j)|0;j=j+ -1|0;i=b;return j|0}function ed(a){a=a|0;var b=0;b=i;a=dd(a)|0;if(!(a&1)){a=0-(a>>>1)|0;i=b;return a|0}else{a=(a+1|0)>>>1;i=b;return a|0}return 0}function fd(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[1168]|0;if((d+ -32|0)>>>0>=a>>>0){e=Vd(a)|0;if((e|0)==0&(a|0)==0)if((d|0)==32)e=0;else e=Vd(1)|0}else e=0;i=b;return e|0}function gd(a,b){a=a|0;b=b|0;var d=0;d=i;if(((c[1168]|0)+ -32|0)>>>0<b>>>0){b=0;i=d;return b|0}b=Xd(a,((b|0)==0&1)+b|0)|0;i=d;return b|0}function hd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;f=$(d,b)|0;if((d|b)>>>0>65535&(d|0)!=0?((f>>>0)/(d>>>0)|0|0)!=(b|0):0){Wd(a);d=0;i=e;return d|0}if(((c[1168]|0)+ -32|0)>>>0<f>>>0)b=0;else b=Xd(a,((f|0)==0&1)+f|0)|0;if((b|0)!=0|(f|0)==0){d=b;i=e;return d|0}Wd(a);d=0;i=e;return d|0}function id(a){a=a|0;var b=0;b=i;Wd(a);i=b;return}function jd(a){a=a|0;var b=0;b=i;Wd(c[a>>2]|0);c[a>>2]=0;i=b;return}function kd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if(((d|0)!=0?(2147483647/(d>>>0)|0)>>>0>b>>>0:0)?(f=$(d,b)|0,((c[1168]|0)+ -32|0)>>>0>=f>>>0):0)a=Xd(a,((f|0)==0&1)+f|0)|0;else a=0;i=e;return a|0}function ld(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;f=i;e=hd(c[a>>2]|0,b,d)|0;c[a>>2]=e;i=f;return((e|0)!=0|(b|0)==0|(d|0)==0?0:-12)|0}function md(a){a=a|0;var b=0,c=0;c=i;b=fd(a)|0;if(b)ce(b|0,0,a|0)|0;i=c;return b|0}function nd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if((c[b>>2]|0)>>>0>d>>>0){i=e;return}f=((d*17|0)>>>4)+32|0;d=f>>>0>d>>>0?f:d;Wd(c[a>>2]|0);f=fd(d)|0;c[a>>2]=f;c[b>>2]=(f|0)==0?0:d;i=e;return}function od(a,b){a=a|0;b=b|0;var c=0;c=i;if((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)b=fd($(b,a)|0)|0;else b=0;i=c;return b|0}function pd(a,b){a=a|0;b=b|0;var c=0,d=0,e=0;c=i;if(((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)?(e=$(b,a)|0,d=fd(e)|0,(d|0)!=0):0)ce(d|0,0,e|0)|0;else d=0;i=c;return d|0}function qd(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;g=i;i=i+16|0;h=g;j=md(24)|0;c[h>>2]=j;if(!j){f=0;i=g;return f|0}c[j>>2]=a;c[j+4>>2]=b;c[j+12>>2]=(d|0)!=0?d:7;c[j+16>>2]=e;c[j+8>>2]=1;if(f&1){f=(c[h>>2]|0)+20|0;c[f>>2]=c[f>>2]|1}j=md(12)|0;if(!j){jd(h);f=0;i=g;return f|0}else{c[j>>2]=c[h>>2];c[j+4>>2]=a;c[j+8>>2]=b;f=j;i=g;return f|0}return 0}function rd(a,b){a=a|0;b=b|0;a=i;id(b);i=a;return}function sd(a){a=a|0;var b=0,d=0,e=0;b=i;i=i+16|0;d=b;e=fd(a)|0;c[d>>2]=e;if(e){a=qd(e,a,7,0,0)|0;if(!a){jd(d);a=0}}else a=0;i=b;return a|0}function td(a){a=a|0;var b=0,d=0;b=i;d=sd(a)|0;if(!d){d=0;i=b;return d|0}ce(c[d+4>>2]|0,0,a|0)|0;i=b;return d|0}function ud(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b;d=md(12)|0;if(!d){e=0;i=b;return e|0}c[d+0>>2]=c[a+0>>2];c[d+4>>2]=c[a+4>>2];c[d+8>>2]=c[a+8>>2];f=(c[a>>2]|0)+8|0;a=c[f>>2]|0;c[f>>2]=a+1;c[e>>2]=a+1;e=d;i=b;return e|0}function vd(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b+4|0;d=b;if(!a){i=b;return}f=c[a>>2]|0;if(!f){i=b;return}f=c[f>>2]|0;c[d>>2]=f;jd(a);a=f+8|0;f=c[a>>2]|0;c[a>>2]=f+ -1;c[e>>2]=f+ -1;if(c[e>>2]|0){i=b;return}f=c[d>>2]|0;Aa[c[f+12>>2]&7](c[f+16>>2]|0,c[f>>2]|0);jd(d);i=b;return}function wd(){var a=0,b=0,d=0;a=i;b=md(400)|0;if(!b){b=0;i=a;return b|0}ce(b|0,0,400)|0;d=b+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+376|0;c[d>>2]=0;c[d+4>>2]=0;d=b+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=a;return b|0}function xd(a){a=a|0;var b=0,d=0;b=i;if((a|0)!=0?(d=c[a>>2]|0,(d|0)!=0):0){yd(d);jd(a)}i=b;return}function yd(a){a=a|0;var b=0,d=0;b=i;vd(a+304|0);vd(a+308|0);vd(a+312|0);vd(a+316|0);vd(a+320|0);vd(a+324|0);vd(a+328|0);vd(a+332|0);ce(a|0,0,400)|0;d=a+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+376|0;c[d>>2]=0;c[d+4>>2]=0;d=a+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[a+392>>2]=-1;c[a+80>>2]=1;c[a+120>>2]=0;c[a+124>>2]=1;c[a+76>>2]=-1;c[a+344>>2]=2;c[a+348>>2]=2;c[a+352>>2]=2;c[a+340>>2]=0;c[a+356>>2]=0;i=b;return}function zd(a,b){a=a|0;b=b|0;var d=0;d=i;fe(a|0,b|0,400)|0;ce(b|0,0,400)|0;a=b+136|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+144|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+128|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+360|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+376|0;c[a>>2]=0;c[a+4>>2]=0;a=b+368|0;c[a>>2]=-1;c[a+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=d;return}function Ad(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;c[a+76>>2]=c[b+76>>2];c[a+64>>2]=c[b+64>>2];c[a+68>>2]=c[b+68>>2];c[a+388>>2]=c[b+388>>2];j=b+296|0;h=c[j+4>>2]|0;f=a+296|0;c[f>>2]=c[j>>2];c[f+4>>2]=h;c[a+72>>2]=c[b+72>>2];f=c[b+304>>2]|0;if(!f)ta();else{e=f;g=0}while(1){if((e|0)!=0?(j=ud(e)|0,c[a+(g<<2)+304>>2]=j,(j|0)==0):0){e=5;break}g=g+1|0;if(g>>>0>=8){e=8;break}e=c[b+(g<<2)+304>>2]|0}if((e|0)==5){yd(a);j=-12;i=d;return j|0}else if((e|0)==8){c[a+0>>2]=c[b+0>>2];c[a+4>>2]=c[b+4>>2];c[a+8>>2]=c[b+8>>2];c[a+12>>2]=c[b+12>>2];c[a+16>>2]=c[b+16>>2];c[a+20>>2]=c[b+20>>2];c[a+24>>2]=c[b+24>>2];c[a+28>>2]=c[b+28>>2];j=a+32|0;h=b+32|0;c[j+0>>2]=c[h+0>>2];c[j+4>>2]=c[h+4>>2];c[j+8>>2]=c[h+8>>2];c[j+12>>2]=c[h+12>>2];c[j+16>>2]=c[h+16>>2];c[j+20>>2]=c[h+20>>2];c[j+24>>2]=c[h+24>>2];c[j+28>>2]=c[h+28>>2];j=0;i=d;return j|0}return 0}function Bd(a){a=a|0;var b=0,d=0,e=0,f=0;d=i;e=0;while(1){f=e+1|0;if((c[4936+(e*24|0)>>2]|0)==(a|0))break;if(f>>>0<4)e=f;else{e=0;b=5;break}}if((b|0)==5){i=d;return e|0}f=4940+(e*24|0)|0;i=d;return f|0}function Cd(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0;f=i;g=(c[b+24>>2]|0)==0?1:3;if((g|0)>(e|0)){b=c[b+8>>2]|0;c[d>>2]=c[b+(e<<2)+32>>2];b=c[b+(e<<2)>>2]|0;i=f;return b|0}if((a[b+29>>0]|0)!=0&(g|0)==(e|0)){b=c[b+12>>2]|0;c[d>>2]=c[b+32>>2];b=c[b>>2]|0;i=f;return b|0}else{c[d>>2]=0;b=0;i=f;return b|0}return 0}function Dd(d,e){d=d|0;e=e|0;var f=0,g=0,h=0;f=i;if(!(c[d+8>>2]|0)){h=-1;i=f;return h|0}c[e>>2]=c[d+16>>2];c[e+4>>2]=c[d+20>>2];a[e+8>>0]=c[d+24>>2];g=d+31|0;if(!(a[d+29>>0]|0))h=0;else h=(a[g>>0]|0)==0&1;a[e+9>>0]=h;a[e+12>>0]=a[d+33>>0]|0;a[e+13>>0]=a[g>>0]|0;a[e+14>>0]=a[d+32>>0]|0;a[e+10>>0]=c[d+36>>2];a[e+11>>0]=a[d+30>>0]|0;a[e+15>>0]=a[d+34>>0]|0;b[e+16>>1]=b[d+48>>1]|0;h=0;i=f;return h|0}function Ed(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0.0,p=0,q=0.0,r=0.0,s=0.0,t=0,u=0.0,v=0,w=0.0,x=0.0,y=0.0;f=i;if(!(c[b+8>>2]|0)){t=-1;i=f;return t|0}g=b+68|0;if((a[g>>0]|0)!=0|e>>>0>1){t=-1;i=f;return t|0}a[b+76>>0]=(e|0)==1&1;l=b+77|0;a[l>>0]=0;a[b+78>>0]=0;h=b+24|0;if(((c[h>>2]|0)+ -1|0)>>>0<2?(t=b+16|0,p=c[t>>2]|0,j=b+84|0,c[j>>2]=(p+1|0)/2|0,c[b+88>>2]=((c[b+20>>2]|0)+1|0)/2|0,c[b+124>>2]=fd(p)|0,c[b+128>>2]=fd(c[t>>2]|0)|0,c[b+196>>2]=fd((c[j>>2]<<1)+14|0)|0,(c[h>>2]|0)==1):0){m=0;do{c[b+(m<<2)+132>>2]=fd(c[j>>2]|0)|0;c[b+(m<<2)+164>>2]=fd(c[j>>2]|0)|0;m=m+1|0}while((m|0)!=8)}j=d[b+30>>0]|0;v=(a[l>>0]|0)!=0?16:8;l=b+36|0;t=c[l>>2]|0;m=a[b+32>>0]|0;n=m&255;p=30-v|0;o=+((1<<v)+ -1|0)*+(1<<p|0);q=o/+((1<<j)+ -1|0);m=m<<24>>24!=0;if(m){v=j+ -8|0;r=o/+(224<<v|0);o=o/+(219<<v|0)}else{r=q;o=q}if(!t){u=.114;s=.299;k=11}else if((t|0)==3){u=.0722;s=.2126;k=11}else if((t|0)==4){u=.0593;s=.2627;k=11}if((k|0)==11){y=1.0-s;c[b+220>>2]=sa(+(r*y*2.0))|0;w=1.0-u;x=w-s;c[b+224>>2]=sa(+(r*(u*2.0*w/x)))|0;c[b+228>>2]=sa(+(r*(s*2.0*y/x)))|0;c[b+232>>2]=sa(+(r*w*2.0))|0}k=sa(+q)|0;c[b+208>>2]=k;c[b+200>>2]=p;t=1<<p+ -1;p=b+204|0;c[p>>2]=t;c[b+236>>2]=1<<j+ -1;if(m){v=sa(+o)|0;c[b+212>>2]=v;v=$(v,-16<<j+ -8)|0;c[b+216>>2]=v+(c[p>>2]|0)}else{c[b+212>>2]=k;c[b+216>>2]=t}c[b+240>>2]=j;c[b+244>>2]=n;if(!(c[h>>2]|0))c[b+248>>2]=4;else c[b+248>>2]=c[6064+(c[l>>2]<<2)>>2];a[g>>0]=1;c[b+72>>2]=e;c[b+92>>2]=Cd(b,b+108|0,0)|0;if(!(c[h>>2]|0))e=1;else{c[b+96>>2]=Cd(b,b+112|0,1)|0;c[b+100>>2]=Cd(b,b+116|0,2)|0;e=3}if(!(a[b+29>>0]|0))c[b+104>>2]=0;else c[b+104>>2]=Cd(b,b+120|0,e)|0;c[b+80>>2]=0;v=0;i=f;return v|0}function Fd(a,b,d){a=a|0;b=b|0;d=d|0;c[b>>2]=0;c[d>>2]=1;return}function Gd(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0;g=i;h=b+80|0;l=c[h>>2]|0;if(l>>>0>=(c[b+20>>2]|0)>>>0){u=-1;i=g;return u|0}f=c[b+16>>2]|0;m=(c[b+92>>2]|0)+($(c[b+108>>2]|0,l)|0)|0;j=b+76|0;if(!(a[j>>0]|0))k=(a[b+78>>0]|0)!=0?4:3;else k=4;n=c[b+24>>2]|0;if(!n)Ka[c[b+248>>2]&7](b+200|0,e,m,0,0,f,k);else if((n|0)==2){u=(c[b+96>>2]|0)+($(c[b+112>>2]|0,l)|0)|0;p=(c[b+100>>2]|0)+($(c[b+116>>2]|0,l)|0)|0;t=b+124|0;q=b+30|0;r=b+28|0;s=b+196|0;Id(c[t>>2]|0,u,f,d[q>>0]|0,d[r>>0]|0,c[s>>2]|0);u=b+128|0;Id(c[u>>2]|0,p,f,d[q>>0]|0,d[r>>0]|0,c[s>>2]|0);Ka[c[b+248>>2]&7](b+200|0,e,m,c[t>>2]|0,c[u>>2]|0,f,k)}else if((n|0)==3){t=(c[b+96>>2]|0)+($(c[b+112>>2]|0,l)|0)|0;u=(c[b+100>>2]|0)+($(c[b+116>>2]|0,l)|0)|0;Ka[c[b+248>>2]&7](b+200|0,e,m,t,u,f,k)}else if((n|0)==1){if(!l){n=b+96|0;o=b+112|0;p=b+100|0;q=b+116|0;r=b+84|0;s=b+88|0;t=0;do{u=(t|0)>4?t+ -8|0:t;if((u|0)<0)u=0;else{v=c[s>>2]|0;u=(u|0)<(v|0)?u:v+ -1|0}w=(c[n>>2]|0)+($(c[o>>2]|0,u)|0)|0;v=(c[p>>2]|0)+($(c[q>>2]|0,u)|0)|0;fe(c[b+(t<<2)+132>>2]|0,w|0,c[r>>2]|0)|0;fe(c[b+(t<<2)+164>>2]|0,v|0,c[r>>2]|0)|0;t=t+1|0}while((t|0)!=8)}o=l>>1;q=(o|0)%8|0;w=l&1;n=b+124|0;t=b+196|0;u=b+30|0;v=b+28|0;Hd(c[n>>2]|0,b+132|0,f,q,c[t>>2]|0,d[u>>0]|0,w,d[v>>0]|0);p=b+128|0;Hd(c[p>>2]|0,b+164|0,f,q,c[t>>2]|0,d[u>>0]|0,w,d[v>>0]|0);if(w){u=(q+5|0)%8|0;t=o+5|0;v=c[b+88>>2]|0;v=(t|0)<(v|0)?t:v+ -1|0;t=(c[b+96>>2]|0)+($(v,c[b+112>>2]|0)|0)|0;v=(c[b+100>>2]|0)+($(c[b+116>>2]|0,v)|0)|0;w=b+84|0;fe(c[b+(u<<2)+132>>2]|0,t|0,c[w>>2]|0)|0;fe(c[b+(u<<2)+164>>2]|0,v|0,c[w>>2]|0)|0}Ka[c[b+248>>2]&7](b+200|0,e,m,c[n>>2]|0,c[p>>2]|0,f,k)}else{w=-1;i=g;return w|0}a:do if(!(a[b+31>>0]|0)){if(a[j>>0]|0){if(!(a[b+29>>0]|0)){if((f|0)<=0)break;b=e+3|0;e=0;while(1){a[b>>0]=-1;e=e+1|0;if((e|0)==(f|0))break a;else b=b+4|0}}j=c[b+104>>2]|0;k=$(c[b+120>>2]|0,l)|0;o=e+3|0;if((c[b+240>>2]|0)==8){if((f|0)>0){l=0;while(1){a[o>>0]=a[j+(l+k)>>0]|0;l=l+1|0;if((l|0)==(f|0))break;else o=o+4|0}}}else{l=c[b+208>>2]|0;m=c[b+204>>2]|0;n=c[b+200>>2]|0;if((f|0)>0){p=0;while(1){a[o>>0]=($(d[j+(p+k)>>0]|0,l)|0)+m>>n;p=p+1|0;if((p|0)==(f|0))break;else o=o+4|0}}}if(a[b+33>>0]|0){if(!(c[1258]|0)){c[1258]=1;b=1;do{c[5040+(b<<2)>>2]=(((b|0)/2|0)+16711808|0)/(b|0)|0;b=b+1|0}while((b|0)!=256)}if((f|0)>0){b=0;while(1){k=a[e+3>>0]|0;if(!(k<<24>>24)){a[e>>0]=-1;a[e+1>>0]=-1;a[e+2>>0]=-1}else{j=c[5040+((k&255)<<2)>>2]|0;l=a[e>>0]|0;if((l&255)<(k&255))l=(($(l&255,j)|0)+32768|0)>>>16&255;else l=-1;a[e>>0]=l;l=e+1|0;m=a[l>>0]|0;if((m&255)<(k&255))m=(($(m&255,j)|0)+32768|0)>>>16&255;else m=-1;a[l>>0]=m;l=e+2|0;m=a[l>>0]|0;if((m&255)<(k&255))j=(($(m&255,j)|0)+32768|0)>>>16&255;else j=-1;a[l>>0]=j}b=b+1|0;if((b|0)==(f|0))break;else e=e+4|0}}}}}else{m=c[b+104>>2]|0;l=$(c[b+120>>2]|0,l)|0;b=c[b+240>>2]|0;q=1<<b+ -1;p=(f|0)>0;if(p){o=e;n=0;while(1){v=d[m+(n+l)>>0]|0;a[o>>0]=($(d[o>>0]|0,v)|0)+q>>b;w=o+1|0;a[w>>0]=($(d[w>>0]|0,v)|0)+q>>b;w=o+2|0;a[w>>0]=($(d[w>>0]|0,v)|0)+q>>b;n=n+1|0;if((n|0)==(f|0))break;else o=o+k|0}}if(!((a[j>>0]|0)==0|p^1)){e=e+3|0;b=0;while(1){a[e>>0]=-1;b=b+1|0;if((b|0)==(f|0))break;else e=e+4|0}}}while(0);c[h>>2]=(c[h>>2]|0)+1;w=0;i=g;return w|0}function Hd(e,f,g,h,j,k,l,m){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;n=i;p=c[f+((h+5&7)<<2)>>2]|0;t=c[f+((h+6&7)<<2)>>2]|0;q=c[f+((h+7&7)<<2)>>2]|0;s=c[f+((h&7)<<2)>>2]|0;r=c[f+((h+1&7)<<2)>>2]|0;o=c[f+((h+2&7)<<2)>>2]|0;f=c[f+((h+3&7)<<2)>>2]|0;u=k+ -8|0;v=1<<u>>1;h=(g+1|0)/2|0;w=(g|0)>0;if(!l){if(w){l=0;do{y=$(d[t+l>>0]|0,-6)|0;z=$(d[r+l>>0]|0,-10)|0;b[j+(l+3<<1)>>1]=(d[p+l>>0]<<1)+v+y+((d[q+l>>0]|0)*18|0)+((d[s+l>>0]|0)*57|0)+z+(d[o+l>>0]<<2)-(d[f+l>>0]|0)>>u;l=l+1|0}while((l|0)<(h|0))}}else if(w){l=0;do{y=$(d[q+l>>0]|0,-10)|0;z=$(d[o+l>>0]|0,-6)|0;b[j+(l+3<<1)>>1]=v-(d[p+l>>0]|0)+(d[t+l>>0]<<2)+y+((d[s+l>>0]|0)*57|0)+((d[r+l>>0]|0)*18|0)+z+(d[f+l>>0]<<1)>>u;l=l+1|0}while((l|0)<(h|0))}o=j+6|0;z=b[o>>1]|0;b[j>>1]=z;q=j+2|0;b[q>>1]=z;p=j+4|0;b[p>>1]=z;z=b[j+(h+2<<1)>>1]|0;b[j+(h+3<<1)>>1]=z;b[j+(h+4<<1)>>1]=z;b[j+(h+5<<1)>>1]=z;b[j+(h+6<<1)>>1]=z;h=(1<<k)+ -1|0;if(!m){p=14-k|0;m=1<<p>>1;t=20-k|0;s=1<<t+ -1;if((g|0)>1){r=g+ -2|0;k=r>>>1;q=k<<1;f=e;while(1){u=(b[o>>1]|0)+m>>p;if((u|0)<0)u=0;else u=((u|0)>(h|0)?h:u)&255;a[f>>0]=u;z=$((b[o+4>>1]|0)+(b[o+ -2>>1]|0)|0,-11)|0;u=o;o=o+2|0;u=s-(b[u+ -6>>1]|0)-(b[u+8>>1]|0)+((b[u+6>>1]|0)+(b[u+ -4>>1]|0)<<2)+z+(((b[o>>1]|0)+(b[u>>1]|0)|0)*40|0)>>t;if((u|0)<0)u=0;else u=((u|0)>(h|0)?h:u)&255;a[f+1>>0]=u;g=g+ -2|0;if((g|0)<=1)break;else f=f+2|0}e=e+(q+2)|0;g=r-q|0;o=j+(k+4<<1)|0}if(!g){i=n;return}j=(b[o>>1]|0)+m>>p;if((j|0)<0)j=0;else j=((j|0)>(h|0)?h:j)&255;a[e>>0]=j;i=n;return}else{k=20-k|0;m=1<<k+ -1;l=b[j>>1]|0;v=b[q>>1]|0;u=b[p>>1]|0;f=b[o>>1]|0;s=b[j+8>>1]|0;t=b[j+10>>1]|0;if((g|0)>1){r=g+ -2|0;p=r>>>1;q=p<<1;x=e;while(1){w=b[o+6>>1]|0;y=f*57|0;z=(t<<2)+m+($(s,-10)|0)+y+(u*18|0)+($(v,-6)|0)+(l<<1)-w>>k;if((z|0)<0)z=0;else z=((z|0)>(h|0)?h:z)&255;a[x>>0]=z;l=($(t,-6)|0)+m+(s*18|0)+y+($(u,-10)|0)-l+(v<<2)+(w<<1)>>k;if((l|0)<0)l=0;else l=((l|0)>(h|0)?h:l)&255;a[x+1>>0]=l;g=g+ -2|0;if((g|0)<=1)break;else{B=t;A=s;y=f;z=u;l=v;t=w;x=x+2|0;o=o+2|0;s=B;f=A;u=y;v=z}}l=v;v=u;u=f;f=s;s=t;t=w;e=e+(q+2)|0;g=r-q|0;o=j+(p+4<<1)|0}if(!g){i=n;return}j=(t<<2)+m+($(s,-10)|0)+(f*57|0)+(u*18|0)+($(v,-6)|0)+(l<<1)-(b[o+6>>1]|0)>>k;if((j|0)<0)j=0;else j=((j|0)>(h|0)?h:j)&255;a[e>>0]=j;i=n;return}}function Id(b,c,e,f,g,h){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;j=i;v=(e+1|0)/2|0;k=h+3|0;fe(k|0,c|0,v|0)|0;ce(h|0,a[c>>0]|0,3)|0;ce(h+(v+3)|0,a[c+(v+ -1)>>0]|0,4)|0;c=(1<<f)+ -1|0;if(!g){if((e|0)>1){f=e+ -2|0;g=f>>>1;l=g<<1;m=b;while(1){a[m>>0]=a[k>>0]|0;v=$((d[k+2>>0]|0)+(d[k+ -1>>0]|0)|0,-11)|0;n=k;k=k+1|0;n=32-(d[n+ -3>>0]|0)-(d[n+4>>0]|0)+((d[n+3>>0]|0)+(d[n+ -2>>0]|0)<<2)+v+(((d[k>>0]|0)+(d[n>>0]|0)|0)*40|0)>>6;if((n|0)<0)n=0;else n=((n|0)>(c|0)?c:n)&255;a[m+1>>0]=n;e=e+ -2|0;if((e|0)<=1)break;else m=m+2|0}b=b+(l+2)|0;e=f-l|0;k=h+(g+4)|0}if(!e){i=j;return}a[b>>0]=a[k>>0]|0;i=j;return}q=d[h>>0]|0;r=d[h+1>>0]|0;m=d[h+2>>0]|0;p=d[k>>0]|0;o=d[h+4>>0]|0;n=d[h+5>>0]|0;if((e|0)>1){f=e+ -2|0;g=f>>>1;l=g<<1;t=b;while(1){s=d[k+3>>0]|0;u=p*57|0;v=(n<<2)+32+($(o,-10)|0)+u+(m*18|0)+($(r,-6)|0)+(q<<1)-s>>6;if((v|0)<0)v=0;else v=((v|0)>(c|0)?c:v)&255;a[t>>0]=v;q=($(n,-6)|0)+32+(o*18|0)+u+($(m,-10)|0)-q+(r<<2)+(s<<1)>>6;if((q|0)<0)q=0;else q=((q|0)>(c|0)?c:q)&255;a[t+1>>0]=q;e=e+ -2|0;if((e|0)<=1)break;else{x=n;w=o;u=p;v=m;q=r;n=s;t=t+2|0;k=k+1|0;o=x;p=w;m=u;r=v}}q=r;r=m;m=p;p=o;o=n;n=s;b=b+(l+2)|0;e=f-l|0;k=h+(g+4)|0}if(!e){i=j;return}h=(n<<2)+32+($(o,-10)|0)+(p*57|0)+(m*18|0)+($(r,-6)|0)+(q<<1)-(d[k+3>>0]|0)>>6;if((h|0)<0)h=0;else h=((h|0)>(c|0)?c:h)&255;a[b>>0]=h;i=j;return}function Jd(){var a=0,b=0;a=i;b=md(252)|0;if(!b)b=0;i=a;return b|0}function Kd(e,f,g){e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0;k=i;i=i+80|0;n=k+72|0;l=k+60|0;h=k+48|0;u=k+44|0;x=k+40|0;t=k+36|0;p=k;y=a[e+40>>0]|0;a:do if(((((((g|0)>=6?(a[f>>0]|0)==66:0)?(a[f+1>>0]|0)==80:0)?(a[f+2>>0]|0)==71:0)?(a[f+3>>0]|0)==-5:0)?(R=a[f+4>>0]|0,N=R&255,J=N>>>5,c[p+8>>2]=J,(R&255)<=191):0)?(R=(N&15)+8|0,a[p+13>>0]=R,(R&255)>>>0<=14):0){L=a[f+5>>0]|0;v=L&255;M=v>>>4;c[p+24>>2]=M;A=v&8;R=v>>>2&1;a[p+16>>0]=v>>>1&1;r=p+17|0;a[r>>0]=v&1;v=p+18|0;b[v>>1]=0;s=p+20|0;b[s>>1]=0;w=p+22|0;b[w>>1]=0;O=p+12|0;a[O>>0]=0;Q=p+14|0;a[Q>>0]=0;P=p+15|0;a[P>>0]=0;if(!(N&16))if(!R)N=0;else{a[O>>0]=1;a[Q>>0]=1;N=1}else{a[O>>0]=1;a[P>>0]=R;N=0}if((((((((((L&255)<=79?(J|0)!=0|(M|0)==0:0)?!(N<<24>>24!=0&(J|0)==0):0)?(F=Qd(p,f+6|0,g+ -6|0)|0,(F|0)>=0):0)?(C=(c[p>>2]|0)>>>0>1073741823?-1:F,(C|0)>=0):0)?(E=C+6|0,z=p+4|0,I=Qd(z,f+E|0,g-E|0)|0,(I|0)>=0):0)?(H=c[z>>2]|0,D=H>>>0>1073741823?-1:I,(D|0)>=0):0)?(G=D+E|0,!((c[p>>2]|0)==0|(H|0)==0)):0)?(q=p+28|0,K=Qd(q,f+G|0,g-G|0)|0,(K|0)>=0):0)?(B=(c[q>>2]|0)>>>0>1073741823?-1:K,(B|0)>=0):0){z=B+G|0;c[n>>2]=0;do if(!A){c[p+32>>2]=0;m=48}else{A=Qd(n,f+z|0,g-z|0)|0;if((A|0)<0){z=-1;break a}B=c[n>>2]|0;A=B>>>0>1073741823?-1:A;if((A|0)<0){z=-1;break a}C=A+z|0;A=p+32|0;c[A>>2]=0;z=C+B|0;if((z|0)>(g|0)){z=-1;break a}y=y<<24>>24!=0;if(!y?(a[r>>0]|0)==0:0)break;if((C|0)>=(z|0)){z=C;m=48;break}while(1){B=Qd(l,f+C|0,z-C|0)|0;if((B|0)<0){z=-1;break a}C=B+C|0;D=Qd(h,f+C|0,z-C|0)|0;if((D|0)<0){z=-1;break a}B=c[h>>2]|0;D=B>>>0>1073741823?-1:D;if((D|0)<0){z=-1;break a}D=D+C|0;C=D+B|0;if(C>>>0>z>>>0){z=-1;break a}do if(a[r>>0]|0){if((c[l>>2]|0)!=5)break;F=Qd(u,f+D|0,z-D|0)|0;if((F|0)<0){z=-1;break a}E=c[u>>2]|0;F=E>>>0>1073741823?-1:F;if((F|0)<0){z=-1;break a}G=F+D|0;H=Qd(x,f+G|0,z-G|0)|0;if((H|0)<0){z=-1;break a}F=c[x>>2]|0;H=F>>>0>1073741823?-1:H;if((H|0)<0){z=-1;break a}R=H+G|0;if((Qd(t,f+R|0,z-R|0)|0)<0){z=-1;break a}G=c[t>>2]|0;if(!((F&65535|0)==(F|0)&((G>>>0>1073741823|(F|0)==0|(G|0)==0)^1))){z=-1;break a}if((G&65535|0)!=(G|0)){z=-1;break a}if((E&65535|0)!=(E|0)){z=-1;break a}b[v>>1]=E;b[s>>1]=F;b[w>>1]=G}while(0);if(y){P=fd(16)|0;c[P>>2]=c[l>>2];Q=P+4|0;c[Q>>2]=B;R=P+12|0;c[R>>2]=0;c[A>>2]=P;A=fd(B)|0;c[P+8>>2]=A;fe(A|0,f+D|0,c[Q>>2]|0)|0;A=R}if((C|0)>=(z|0)){z=C;m=48;break}}}while(0);do if((m|0)==48){if(!(a[r>>0]|0))break;if(!(b[s>>1]|0)){z=-1;break a}}while(0);if(c[q>>2]|0)break;c[q>>2]=g-z}else z=-1}else z=-1;while(0);if((z|0)<0){R=z;i=k;return R|0}u=c[p>>2]|0;v=c[p+4>>2]|0;B=c[p+12>>2]|0;y=B&255;A=c[p+24>>2]|0;w=(B&65535)>>>8;t=w&255;w=w&65535;q=e+16|0;c[q>>2]=u;r=e+20|0;c[r>>2]=v;x=c[p+8>>2]|0;C=e+24|0;c[C>>2]=x;s=B>>>24&255;B=B>>>16&255;if((x|0)==5){c[C>>2]=2;a[e+28>>0]=0;x=2}else if((x|0)==4){c[C>>2]=1;a[e+28>>0]=0;x=1}else{c[C>>2]=x;a[e+28>>0]=1}a[e+29>>0]=y;a[e+33>>0]=s;a[e+31>>0]=B;s=c[p+16>>2]|0;a[e+32>>0]=s;c[e+36>>2]=A;a[e+30>>0]=t;a[e+34>>0]=(s&65535)>>>8;b[e+48>>1]=s>>>16;s=c[p+20>>2]|0;b[e+50>>1]=s;b[e+52>>1]=s>>>16;s=e+44|0;c[s>>2]=c[p+32>>2];do if(((c[p+28>>2]|0)+z|0)>>>0<=g>>>0){A=f+z|0;g=g-z|0;c[l>>2]=0;c[l+4>>2]=0;p=l+8|0;c[p>>2]=0;c[h>>2]=0;c[h+4>>2]=0;t=h+8|0;c[t>>2]=0;if(!(y<<24>>24)){z=A;y=g}else{y=Nd(l,e+12|0,e+4|0,A,g,u,v,0,w)|0;if((y|0)<0)break;z=f+(y+z)|0;y=g-y|0}f=e+8|0;A=Nd(h,f,e,z,y,u,v,x,w)|0;if((A|0)>=0){u=y-A|0;v=e+4|0;y=c[v>>2]|0;w=(y|0)!=0;c[n>>2]=0;x=n+4|0;c[x>>2]=0;b:do if((u|0)>0){y=n+((w&1)<<2)|0;D=0;H=0;z=z+A|0;A=u;G=1;c:while(1){B=(G|0)!=0;if((A|0)<((B?5:2)|0)){n=-1;break b}if(B)F=0;else F=(a[z+2>>0]|0)==0?4:3;if((A|0)<(F+3|0)){n=-1;break b}C=z+F|0;B=d[C>>0]|0;E=B<<5&32|(d[z+(F+1)>>0]|0)>>>3;B=B>>>1&63;do if((B+ -32|0)>>>0<4|(B|0)==39|B>>>0>40)if(D)if(!(c[y>>2]|0))B=H;else break c;else{B=H;D=0}else if((B>>>0<10|(B+ -16|0)>>>0<6?(o=F+2|0,(o|0)<(A|0)):0)?(a[z+o>>0]|0)<0:0){if((H|0)!=0?(c[y>>2]|0)!=0:0)break c;if(w&(E|0)==1){c[x>>2]=1;B=H;D=H;break}else{c[n>>2]=1;B=1;D=1;break}}else B=H;while(0);do if((G|0)!=1){if(((((A|0)>3?(a[z>>0]|0)==0:0)?(a[z+1>>0]|0)==0:0)?(a[z+2>>0]|0)==0:0)?(a[z+3>>0]|0)==1:0){G=4;break}if((A|0)<=2){n=-1;break b}if(a[z>>0]|0){n=-1;break b}if(a[z+1>>0]|0){n=-1;break b}if((a[z+2>>0]|0)==1)G=3;else{n=-1;break b}}else G=0;while(0);H=G+2|0;if((H|0)>(A|0)){n=-1;break b}d:do if((H|0)<(A|0))while(1){K=(a[z+G>>0]|0)==0;do if(K){if(a[z+(G+1)>>0]|0)break;if((a[z+H>>0]|0)==1)break d}while(0);J=H;H=G+3|0;if((H|0)>=(A|0)){G=A;break d}I=G+1|0;if(!K){G=I;continue}if(a[z+I>>0]|0){G=I;continue}if(a[z+J>>0]|0){G=I;continue}J=(a[z+H>>0]|0)==1;if(J){G=J?G:A;break}else G=I}else G=A;while(0);if((G|0)<0){n=-1;break b}H=G-F|0;F=H+3|0;E=w&(E|0)==1;J=E?l:h;I=J+8|0;if((Od(J,(c[I>>2]|0)+F|0)|0)<0){n=-1;break b}K=c[J>>2]|0;J=c[I>>2]|0;a[K+J>>0]=0;a[K+(J+1)>>0]=0;a[K+(J+2)>>0]=1;fe(K+(J+3)|0,C|0,H|0)|0;if(E){R=K+(J+4)|0;a[R>>0]=d[R>>0]&7}c[I>>2]=J+F;A=A-G|0;if((A|0)>0){H=B;z=z+G|0;G=0}else break}y=c[v>>2]|0;m=105}else{A=u;m=105}while(0);do if((m|0)==105){if(y){if((Od(l,(c[p>>2]|0)+32|0)|0)<0){n=-1;break}if((Pd(c[v>>2]|0,c[e+12>>2]|0,c[l>>2]|0,c[p>>2]|0)|0)<0){n=-1;break}}if((Od(h,(c[t>>2]|0)+32|0)|0)<0)n=-1;else{n=(Pd(c[e>>2]|0,c[f>>2]|0,c[h>>2]|0,c[t>>2]|0)|0)<0;n=n?-1:u-A|0}}while(0);id(c[l>>2]|0);id(c[h>>2]|0);if((((n|0)>=0?(g-u+n|0)>=0:0)?(Ld(e),j=c[f>>2]|0,(c[j+64>>2]|0)>=(c[q>>2]|0)):0)?(c[j+68>>2]|0)>=(c[r>>2]|0):0){c[e+80>>2]=-1;R=0;i=k;return R|0}}}while(0);xd(e+8|0);xd(e+12|0);c[s>>2]=0;R=-1;i=k;return R|0}function Ld(a){a=a|0;var b=0,d=0,e=0;b=i;d=a+4|0;e=c[d>>2]|0;if(e){Jc(e)|0;id(c[d>>2]|0);c[d>>2]=0}d=c[a>>2]|0;if(!d){i=b;return}Jc(d)|0;id(c[a>>2]|0);c[a>>2]=0;i=b;return}function Md(a){a=a|0;var b=0,d=0;b=i;id(c[a+124>>2]|0);id(c[a+128>>2]|0);d=0;do{id(c[a+(d<<2)+132>>2]|0);id(c[a+(d<<2)+164>>2]|0);d=d+1|0}while((d|0)!=8);id(c[a+196>>2]|0);id(c[a+56>>2]|0);Ld(a);xd(a+8|0);xd(a+12|0);id(a);i=b;return}function Nd(b,d,e,f,g,h,j,k,l){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0;n=i;i=i+16|0;q=n+4|0;m=n;p=Qd(q,f,g)|0;if((p|0)<0){t=-1;i=n;return t|0}r=c[q>>2]|0;t=r>>>0>1073741823?-1:p;if((t|0)<0){t=-1;i=n;return t|0}s=g-t|0;if(r>>>0>s>>>0){t=-1;i=n;return t|0}q=r+10|0;p=fd(q)|0;a[p>>0]=k;a[p+1>>0]=h>>>24;a[p+2>>0]=h>>>16;a[p+3>>0]=h>>>8;a[p+4>>0]=h;a[p+5>>0]=j>>>24;a[p+6>>0]=j>>>16;a[p+7>>0]=j>>>8;a[p+8>>0]=j;a[p+9>>0]=l+248;fe(p+10|0,f+t|0,r|0)|0;l=s-r|0;k=fd(10-r+(q<<1)+l|0)|0;a[k>>0]=0;a[k+1>>0]=0;a[k+2>>0]=0;a[k+3>>0]=1;a[k+4>>0]=96;a[k+5>>0]=1;if((q|0)>0){r=0;f=6;do{j=r+1|0;h=a[p+r>>0]|0;if((j|0)<(q|0)&h<<24>>24==0)if(!(a[p+j>>0]|0)){a[k+f>>0]=0;a[k+(f+1)>>0]=0;a[k+(f+2)>>0]=3;r=r+2|0;f=f+3|0}else{h=0;o=8}else o=8;if((o|0)==8){o=0;a[k+f>>0]=h;r=j;f=f+1|0}}while((r|0)<(q|0));if(!f){f=0;o=12}else o=11}else{f=6;o=11}if((o|0)==11)if(!(a[k+(f+ -1)>>0]|0))o=12;if((o|0)==12){a[k+f>>0]=-128;f=f+1|0}id(p);o=g-l|0;if((o|0)<0){t=-1;i=n;return t|0}g=b+8|0;if((Od(b,(c[g>>2]|0)+f|0)|0)<0){id(k);t=-1;i=n;return t|0}fe((c[b>>2]|0)+(c[g>>2]|0)|0,k|0,f|0)|0;c[g>>2]=(c[g>>2]|0)+f;id(k);b=Oc(1416)|0;if(!b){t=-1;i=n;return t|0}g=wd()|0;c[m>>2]=g;if(!g){t=-1;i=n;return t|0}t=b+688|0;c[t>>2]=c[t>>2]|1;if((Ic(b,1416,0)|0)<0){xd(m);t=-1;i=n;return t|0}else{c[e>>2]=b;c[d>>2]=g;t=o;i=n;return t|0}return 0}function Od(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=a+4|0;f=c[e>>2]|0;if((f|0)<(b|0)){f=(f*3|0)/2|0;f=(f|0)<(b|0)?b:f;b=gd(c[a>>2]|0,f)|0;if(!b)a=-1;else{c[a>>2]=b;c[e>>2]=f;a=0}}else a=0;i=d;return a|0}function Pd(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;j=i;i=i+96|0;h=j;g=j+80|0;Xc(h);c[h+24>>2]=e;c[h+28>>2]=f;e=e+f+0|0;f=e+32|0;do{a[e>>0]=0;e=e+1|0}while((e|0)<(f|0));e=(Pc(b,d,g,h)|0)<0;i=j;return(e|(c[g>>2]|0)==0)<<31>>31|0}function Qd(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;a:do if((f|0)>=1){j=a[e>>0]|0;h=j&255;if(j<<24>>24>-1){c[b>>2]=h;b=1;break}if(j<<24>>24!=-128){j=e+1|0;h=h&127;while(1){if((f|0)<2){b=-1;break a}k=j;j=j+1|0;k=d[k>>0]|0;h=k&127|h<<7;if(!(k&128))break;else f=f+ -1|0}c[b>>2]=h;b=j-e|0}else b=-1}else b=-1;while(0);i=g;return b|0}function Rd(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0;h=i;if((c[b+40>>2]|0)==8?(c[b+44>>2]|0)==0:0){if((j|0)>0)g=0;else{i=h;return}while(1){n=a[f+g>>0]|0;a[e>>0]=n;a[e+1>>0]=n;a[e+2>>0]=n;g=g+1|0;if((g|0)==(j|0))break;else e=e+k|0}i=h;return}l=c[b+12>>2]|0;g=c[b+16>>2]|0;b=c[b>>2]|0;if((j|0)>0)m=0;else{i=h;return}while(1){n=($(d[f+m>>0]|0,l)|0)+g>>b;if((n|0)<0)n=0;else n=(n|0)>255?-1:n&255;a[e>>0]=n;a[e+1>>0]=n;a[e+2>>0]=n;m=m+1|0;if((m|0)==(j|0))break;else e=e+k|0}i=h;return}function Sd(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;q=i;s=c[b+20>>2]|0;n=c[b+24>>2]|0;o=c[b+28>>2]|0;l=c[b+32>>2]|0;p=c[b+12>>2]|0;r=c[b+16>>2]|0;m=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)t=0;else{i=q;return}while(1){v=$(d[f+t>>0]|0,p)|0;u=(d[g+t>>0]|0)-b|0;w=(d[h+t>>0]|0)-b|0;v=v+r|0;x=v+($(w,s)|0)>>m;if((x|0)<0)x=0;else x=(x|0)>255?-1:x&255;a[e>>0]=x;w=v-($(u,n)|0)-($(w,o)|0)>>m;if((w|0)<0)w=0;else w=(w|0)>255?-1:w&255;a[e+1>>0]=w;u=v+($(u,l)|0)>>m;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[e+2>>0]=u;t=t+1|0;if((t|0)==(j|0))break;else e=e+k|0}i=q;return}function Td(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0;l=i;if((c[b+40>>2]|0)==8?(c[b+44>>2]|0)==0:0){if((j|0)>0)m=0;else{i=l;return}while(1){a[e>>0]=a[h+m>>0]|0;a[e+1>>0]=a[f+m>>0]|0;a[e+2>>0]=a[g+m>>0]|0;m=m+1|0;if((m|0)==(j|0))break;else e=e+k|0}i=l;return}n=c[b+12>>2]|0;m=c[b+16>>2]|0;b=c[b>>2]|0;if((j|0)>0)o=0;else{i=l;return}while(1){p=($(d[h+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e>>0]=p;p=($(d[f+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e+1>>0]=p;p=($(d[g+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e+2>>0]=p;o=o+1|0;if((o|0)==(j|0))break;else e=e+k|0}i=l;return}function Ud(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;l=c[b+12>>2]|0;m=c[b+16>>2]|0;n=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)p=0;else{i=o;return}while(1){t=d[f+p>>0]|0;s=(d[g+p>>0]|0)-b|0;r=(d[h+p>>0]|0)-b|0;q=t-s|0;u=($(q+r|0,l)|0)+m>>n;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[e>>0]=u;s=($(s+t|0,l)|0)+m>>n;if((s|0)<0)s=0;else s=(s|0)>255?-1:s&255;a[e+1>>0]=s;q=($(q-r|0,l)|0)+m>>n;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[e+2>>0]=q;p=p+1|0;if((p|0)==(j|0))break;else e=e+k|0}i=o;return}function Vd(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0;d=i;if((b|0)==0|b>>>0>2147483583){k=0;i=d;return k|0}do if(!(c[1523]|0)){e=ra(64)|0;if((e|0)==(-1|0)){k=0;i=d;return k|0}else{c[1524]=ra(0)|0;c[1523]=6088;c[1522]=6088;c[1527]=6104;c[1526]=6104;k=e+16|0;a[e+15>>0]=-86;j=c[1527]|0;c[1527]=k;c[k>>2]=6104;c[e+20>>2]=j;c[j>>2]=k;j=e+24|0;k=c[1523]|0;c[1523]=j;c[j>>2]=6088;c[e+28>>2]=k;c[k>>2]=j;break}}while(0);e=b+40&-32;h=c[1524]|0;g=c[1522]|0;k=6092|0;while(1){f=c[k>>2]|0;b=f+ -8|0;k=c[f+ -4>>2]|0;if((k|0)==6104)j=h;else j=k;j=j-b|0;if(e>>>0<j>>>0){h=12;break}if((f|0)==(g|0)){h=10;break}k=f+4|0;if((e|0)==(j|0)){h=15;break}}do if((h|0)==10)if((ra(e+32-j|0)|0)==(-1|0)){k=0;i=d;return k|0}else{c[1524]=ra(0)|0;k=c[g+ -4>>2]|0;f=g;h=12;break}else if((h|0)==15){j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}while(0);if((h|0)==12){h=b+e|0;c[f+ -4>>2]=h;c[h>>2]=b;c[b+(e|4)>>2]=k;c[k>>2]=h;h=b+(e|8)|0;k=f+4|0;j=c[k>>2]|0;c[k>>2]=h;c[h>>2]=f;c[b+(e|12)>>2]=j;c[j>>2]=h;a[b+(e+ -1)>>0]=-86;j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}a[b+ -1>>0]=85;k=f;i=d;return k|0}function Wd(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;if(!b){i=d;return}g=b+ -8|0;e=c[1523]|0;c[1523]=b;c[b>>2]=6088;f=b+4|0;c[f>>2]=e;c[e>>2]=b;a[b+ -9>>0]=-86;e=c[g>>2]|0;if((e|0)!=6104?(a[e+ -1>>0]|0)==-86:0){g=c[b+ -4>>2]|0;c[e+4>>2]=g;c[g>>2]=e;b=c[b>>2]|0;g=c[f>>2]|0;c[b+4>>2]=g;c[g>>2]=b}else e=g;b=c[e+4>>2]|0;if((b|0)==6104){i=d;return}if((a[b+ -1>>0]|0)!=-86){i=d;return}g=c[b>>2]|0;h=c[b+4>>2]|0;c[g+4>>2]=h;c[h>>2]=g;h=e+8|0;g=c[h>>2]|0;j=e+12|0;f=c[j>>2]|0;c[g+4>>2]=f;c[f>>2]=g;f=b+8|0;g=b+12|0;e=c[g>>2]|0;c[g>>2]=h;c[h>>2]=f;c[j>>2]=e;c[e>>2]=h;f=c[f>>2]|0;g=c[g>>2]|0;c[f+4>>2]=g;c[g>>2]=f;i=d;return}function Xd(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;do if(a){if(!b){Wd(a);e=0;break}e=Vd(b)|0;if(!e)e=0;else{f=(c[a+ -4>>2]|0)-a+ -1|0;fe(e|0,a|0,(f>>>0>b>>>0?b:f)|0)|0;Wd(a)}}else e=Vd(b)|0;while(0);i=d;return e|0}function Yd(b,c,d){b=b|0;c=c|0;d=d|0;var e=0,f=0,g=0,h=0;f=i;if(!d){h=0;i=f;return h|0}while(1){g=a[b>>0]|0;h=a[c>>0]|0;if(g<<24>>24!=h<<24>>24)break;d=d+ -1|0;if(!d){b=0;e=5;break}else{b=b+1|0;c=c+1|0}}if((e|0)==5){i=f;return b|0}h=(g&255)-(h&255)|0;i=f;return h|0}function Zd(){}function _d(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=(b|0)<0?-1:0;return b>>c-32|0}function $d(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;b=b-d-(c>>>0>a>>>0|0)>>>0;return(D=b,a-c>>>0|0)|0}function ae(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=a+c>>>0;return(D=b+d+(c>>>0<a>>>0|0)>>>0,c|0)|0}function be(b){b=b|0;var c=0;c=b;while(a[c>>0]|0)c=c+1|0;return c-b|0}function ce(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,i=0;f=b+e|0;if((e|0)>=20){d=d&255;i=b&3;h=d|d<<8|d<<16|d<<24;g=f&~3;if(i){i=b+4-i|0;while((b|0)<(i|0)){a[b>>0]=d;b=b+1|0}}while((b|0)<(g|0)){c[b>>2]=h;b=b+4|0}}while((b|0)<(f|0)){a[b>>0]=d;b=b+1|0}return b-e|0}function de(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b<<c|(a&(1<<c)-1<<32-c)>>>32-c;return a<<c}D=a<<c-32;return 0}function ee(a){a=a|0;return(a&255)<<24|(a>>8&255)<<16|(a>>16&255)<<8|a>>>24|0}function fe(b,d,e){b=b|0;d=d|0;e=e|0;var f=0;if((e|0)>=4096)return ua(b|0,d|0,e|0)|0;f=b|0;if((b&3)==(d&3)){while(b&3){if(!e)return f|0;a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}while((e|0)>=4){c[b>>2]=c[d>>2];b=b+4|0;d=d+4|0;e=e-4|0}}while((e|0)>0){a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}return f|0}function ge(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=0;return b>>>c-32|0}function he(b){b=b|0;var c=0;c=a[n+(b>>>24)>>0]|0;if((c|0)<8)return c|0;c=a[n+(b>>16&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[n+(b>>8&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[n+(b&255)>>0]|0)+24|0}function ie(b){b=b|0;var c=0;c=a[m+(b&255)>>0]|0;if((c|0)<8)return c|0;c=a[m+(b>>8&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[m+(b>>16&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[m+(b>>>24)>>0]|0)+24|0}function je(a,b){a=a|0;b=b|0;var c=0,d=0,e=0,f=0;f=a&65535;d=b&65535;c=$(d,f)|0;e=a>>>16;d=(c>>>16)+($(d,e)|0)|0;b=b>>>16;a=$(b,f)|0;return(D=(d>>>16)+($(b,e)|0)+(((d&65535)+a|0)>>>16)|0,d+a<<16|c&65535|0)|0}function ke(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;var e=0,f=0;e=a;f=c;a=je(e,f)|0;c=D;return(D=($(b,f)|0)+($(d,e)|0)+c|c&0,a|0|0)|0}function le(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;return xa[a&1](b|0,c|0,d|0)|0}function me(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;ya[a&3](b|0,c|0,d|0,e|0,f|0)}function ne(a,b){a=a|0;b=b|0;za[a&7](b|0)}function oe(a,b,c){a=a|0;b=b|0;c=c|0;Aa[a&7](b|0,c|0)}function pe(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;return Ba[a&1](b|0,c|0,d|0,e|0,f|0,g|0)|0}function qe(a,b,c,d,e,f,g,h,i,j,k,l,m){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;m=m|0;Ca[a&3](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0,l|0,m|0)}function re(a,b){a=a|0;b=b|0;return Da[a&3](b|0)|0}function se(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;Ea[a&7](b|0,c|0,d|0)}function te(a,b,c,d,e,f,g,h,i,j){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;Fa[a&1](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0)}function ue(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;return Ga[a&1](b|0,c|0,d|0,e|0)|0}function ve(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;Ha[a&3](b|0,c|0,d|0,e|0,f|0,g|0)}function we(a,b,c){a=a|0;b=b|0;c=c|0;return Ia[a&1](b|0,c|0)|0}function xe(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;return Ja[a&1](b|0,c|0,d|0,e|0,f|0)|0}function ye(a,b,c,d,e,f,g,h){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;Ka[a&7](b|0,c|0,d|0,e|0,f|0,g|0,h|0)}function ze(a,b,c){a=a|0;b=b|0;c=c|0;aa(0);return 0}function Ae(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;aa(1)}function Be(a){a=a|0;aa(2)}function Ce(a,b){a=a|0;b=b|0;aa(3)}function De(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(4);return 0}function Ee(a,b,c,d,e,f,g,h,i,j,k,l){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;aa(5)}function Fe(a){a=a|0;aa(6);return 0}function Ge(a,b,c){a=a|0;b=b|0;c=c|0;aa(7)}function He(a,b,c,d,e,f,g,h,i){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;aa(8)}function Ie(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;aa(9);return 0}function Je(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(10)}function Ke(a,b){a=a|0;b=b|0;aa(11);return 0}function Le(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;aa(12);return 0}function Me(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;aa(13)}
+
+
+
+
+// EMSCRIPTEN_END_FUNCS
+var xa=[ze,Mc];var ya=[Ae,xc,yc,Ae];var za=[Be,Mb,jc,oc,pc,qc,rc,Be];var Aa=[Ce,hc,kc,lc,mc,nc,Fc,rd];var Ba=[De,Kc];var Ca=[Ee,tc,uc,Ee];var Da=[Fe,Jb,Lb,Fe];var Ea=[Ge,dc,ec,fc,gc,ic,Ge,Ge];var Fa=[He,sc];var Ga=[Ie,Kb];var Ha=[Je,cc,vc,wc];var Ia=[Ke,Pb];var Ja=[Le,Lc];var Ka=[Me,Sd,Td,Ud,Rd,Me,Me,Me];return{_i64Subtract:$d,_free:Wd,_bpg_decoder_decode:Kd,_bpg_decoder_start:Ed,_realloc:Xd,_i64Add:ae,_bpg_decoder_open:Jd,_bitshift64Ashr:_d,_strlen:be,_bpg_decoder_get_info:Dd,_memset:ce,_malloc:Vd,_memcpy:fe,_bpg_decoder_get_line:Gd,_bpg_decoder_close:Md,_bpg_decoder_get_frame_duration:Fd,_llvm_bswap_i32:ee,_bitshift64Shl:de,runPostSets:Zd,stackAlloc:La,stackSave:Ma,stackRestore:Na,setThrew:Oa,setTempRet0:Ra,getTempRet0:Sa,dynCall_iiii:le,dynCall_viiiii:me,dynCall_vi:ne,dynCall_vii:oe,dynCall_iiiiiii:pe,dynCall_viiiiiiiiiiii:qe,dynCall_ii:re,dynCall_viii:se,dynCall_viiiiiiiii:te,dynCall_iiiii:ue,dynCall_viiiiii:ve,dynCall_iii:we,dynCall_iiiiii:xe,dynCall_viiiiiii:ye}})
+
+
+// EMSCRIPTEN_END_ASM
+(Module.asmGlobalArg,Module.asmLibraryArg,buffer);var _i64Subtract=Module["_i64Subtract"]=asm["_i64Subtract"];var _free=Module["_free"]=asm["_free"];var _bpg_decoder_decode=Module["_bpg_decoder_decode"]=asm["_bpg_decoder_decode"];var _bpg_decoder_start=Module["_bpg_decoder_start"]=asm["_bpg_decoder_start"];var _realloc=Module["_realloc"]=asm["_realloc"];var _i64Add=Module["_i64Add"]=asm["_i64Add"];var _bpg_decoder_open=Module["_bpg_decoder_open"]=asm["_bpg_decoder_open"];var _bitshift64Ashr=Module["_bitshift64Ashr"]=asm["_bitshift64Ashr"];var _strlen=Module["_strlen"]=asm["_strlen"];var _bpg_decoder_get_info=Module["_bpg_decoder_get_info"]=asm["_bpg_decoder_get_info"];var _memset=Module["_memset"]=asm["_memset"];var _malloc=Module["_malloc"]=asm["_malloc"];var _memcpy=Module["_memcpy"]=asm["_memcpy"];var _bpg_decoder_get_line=Module["_bpg_decoder_get_line"]=asm["_bpg_decoder_get_line"];var _bpg_decoder_close=Module["_bpg_decoder_close"]=asm["_bpg_decoder_close"];var _bpg_decoder_get_frame_duration=Module["_bpg_decoder_get_frame_duration"]=asm["_bpg_decoder_get_frame_duration"];var _llvm_bswap_i32=Module["_llvm_bswap_i32"]=asm["_llvm_bswap_i32"];var _bitshift64Shl=Module["_bitshift64Shl"]=asm["_bitshift64Shl"];var runPostSets=Module["runPostSets"]=asm["runPostSets"];var dynCall_iiii=Module["dynCall_iiii"]=asm["dynCall_iiii"];var dynCall_viiiii=Module["dynCall_viiiii"]=asm["dynCall_viiiii"];var dynCall_vi=Module["dynCall_vi"]=asm["dynCall_vi"];var dynCall_vii=Module["dynCall_vii"]=asm["dynCall_vii"];var dynCall_iiiiiii=Module["dynCall_iiiiiii"]=asm["dynCall_iiiiiii"];var dynCall_viiiiiiiiiiii=Module["dynCall_viiiiiiiiiiii"]=asm["dynCall_viiiiiiiiiiii"];var dynCall_ii=Module["dynCall_ii"]=asm["dynCall_ii"];var dynCall_viii=Module["dynCall_viii"]=asm["dynCall_viii"];var dynCall_viiiiiiiii=Module["dynCall_viiiiiiiii"]=asm["dynCall_viiiiiiiii"];var dynCall_iiiii=Module["dynCall_iiiii"]=asm["dynCall_iiiii"];var dynCall_viiiiii=Module["dynCall_viiiiii"]=asm["dynCall_viiiiii"];var dynCall_iii=Module["dynCall_iii"]=asm["dynCall_iii"];var dynCall_iiiiii=Module["dynCall_iiiiii"]=asm["dynCall_iiiiii"];var dynCall_viiiiiii=Module["dynCall_viiiiiii"]=asm["dynCall_viiiiiii"];Runtime.stackAlloc=asm["stackAlloc"];Runtime.stackSave=asm["stackSave"];Runtime.stackRestore=asm["stackRestore"];Runtime.setTempRet0=asm["setTempRet0"];Runtime.getTempRet0=asm["getTempRet0"];var i64Math=null;if(memoryInitializer){if(typeof Module["locateFile"]==="function"){memoryInitializer=Module["locateFile"](memoryInitializer)}else if(Module["memoryInitializerPrefixURL"]){memoryInitializer=Module["memoryInitializerPrefixURL"]+memoryInitializer}if(ENVIRONMENT_IS_NODE||ENVIRONMENT_IS_SHELL){var data=Module["readBinary"](memoryInitializer);HEAPU8.set(data,STATIC_BASE)}else{addRunDependency("memory initializer");Browser.asyncLoad(memoryInitializer,(function(data){HEAPU8.set(data,STATIC_BASE);removeRunDependency("memory initializer")}),(function(data){throw"could not load memory initializer "+memoryInitializer}))}}function ExitStatus(status){this.name="ExitStatus";this.message="Program terminated with exit("+status+")";this.status=status}ExitStatus.prototype=new Error;ExitStatus.prototype.constructor=ExitStatus;var initialStackTop;var preloadStartTime=null;var calledMain=false;dependenciesFulfilled=function runCaller(){if(!Module["calledRun"]&&shouldRunNow)run();if(!Module["calledRun"])dependenciesFulfilled=runCaller};function run(args){args=args||Module["arguments"];if(preloadStartTime===null)preloadStartTime=Date.now();if(runDependencies>0){return}preRun();if(runDependencies>0)return;if(Module["calledRun"])return;function doRun(){if(Module["calledRun"])return;Module["calledRun"]=true;if(ABORT)return;ensureInitRuntime();preMain();if(ENVIRONMENT_IS_WEB&&preloadStartTime!==null){Module.printErr("pre-main prep time: "+(Date.now()-preloadStartTime)+" ms")}postRun()}if(Module["setStatus"]){Module["setStatus"]("Running...");setTimeout((function(){setTimeout((function(){Module["setStatus"]("")}),1);doRun()}),1)}else{doRun()}}Module["run"]=Module.run=run;function exit(status){if(Module["noExitRuntime"]){return}ABORT=true;EXITSTATUS=status;STACKTOP=initialStackTop;exitRuntime();if(ENVIRONMENT_IS_NODE){process["stdout"]["once"]("drain",(function(){process["exit"](status)}));console.log(" ");setTimeout((function(){process["exit"](status)}),500)}else if(ENVIRONMENT_IS_SHELL&&typeof quit==="function"){quit(status)}throw new ExitStatus(status)}Module["exit"]=Module.exit=exit;function abort(text){if(text){Module.print(text);Module.printErr(text)}ABORT=true;EXITSTATUS=1;var extra="\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.";throw"abort() at "+stackTrace()+extra}Module["abort"]=Module.abort=abort;if(Module["preInit"]){if(typeof Module["preInit"]=="function")Module["preInit"]=[Module["preInit"]];while(Module["preInit"].length>0){Module["preInit"].pop()()}}var shouldRunNow=true;if(Module["noInitialRun"]){shouldRunNow=false}run();window["BPGDecoder"]=(function(ctx){this.ctx=ctx;this["imageData"]=null;this["onload"]=null;this["frames"]=null;this["loop_count"]=0});window["BPGDecoder"].prototype={malloc:Module["cwrap"]("malloc","number",["number"]),free:Module["cwrap"]("free","void",["number"]),bpg_decoder_open:Module["cwrap"]("bpg_decoder_open","number",[]),bpg_decoder_decode:Module["cwrap"]("bpg_decoder_decode","number",["number","array","number"]),bpg_decoder_get_info:Module["cwrap"]("bpg_decoder_get_info","number",["number","number"]),bpg_decoder_start:Module["cwrap"]("bpg_decoder_start","number",["number","number"]),bpg_decoder_get_frame_duration:Module["cwrap"]("bpg_decoder_get_frame_duration","void",["number","number","number"]),bpg_decoder_get_line:Module["cwrap"]("bpg_decoder_get_line","number",["number","number"]),bpg_decoder_close:Module["cwrap"]("bpg_decoder_close","void",["number"]),load:(function(url){var request=new XMLHttpRequest;var this1=this;request.open("get",url,true);request.responseType="arraybuffer";request.onload=(function(event){this1._onload(request,event)});request.send()}),_onload:(function(request,event){var data=request.response;var array=new Uint8Array(data);var img,w,h,img_info_buf,cimg,p0,rgba_line,w4,frame_count;var heap8,heap16,heap32,dst,v,i,y,func,duration,frames,loop_count;img=this.bpg_decoder_open();if(this.bpg_decoder_decode(img,array,array.length)<0){console.log("could not decode image");return}img_info_buf=this.malloc(5*4);this.bpg_decoder_get_info(img,img_info_buf);heap8=Module["HEAPU8"];heap16=Module["HEAPU16"];heap32=Module["HEAPU32"];w=heap32[img_info_buf>>2];h=heap32[img_info_buf+4>>2];loop_count=heap16[img_info_buf+16>>1];w4=w*4;rgba_line=this.malloc(w4);frame_count=0;frames=[];for(;;){if(this.bpg_decoder_start(img,1)<0)break;this.bpg_decoder_get_frame_duration(img,img_info_buf,img_info_buf+4);duration=heap32[img_info_buf>>2]*1e3/heap32[img_info_buf+4>>2];cimg=this.ctx.createImageData(w,h);dst=cimg.data;p0=0;for(y=0;y<h;y++){this.bpg_decoder_get_line(img,rgba_line);for(i=0;i<w4;i=i+1|0){dst[p0]=heap8[rgba_line+i|0]|0;p0=p0+1|0}}frames[frame_count++]={"img":cimg,"duration":duration}}this.free(rgba_line);this.free(img_info_buf);this.bpg_decoder_close(img);this["loop_count"]=loop_count;this["frames"]=frames;this["imageData"]=frames[0]["img"];if(this["onload"])this["onload"]()})};window.onload=(function(){var i,n,el,tab,tab1,url,dec,canvas,id,style,ctx,dw,dh;tab=document.images;n=tab.length;tab1=[];for(i=0;i<n;i++){el=tab[i];url=el.src;if(url.substr(-4,4).toLowerCase()==".bpg"){tab1[tab1.length]=el}}n=tab1.length;for(i=0;i<n;i++){el=tab1[i];url=el.src;canvas=document.createElement("canvas");if(el.id)canvas.id=el.id;if(el.className)canvas.className=el.className;dw=el.getAttribute("width")|0;if(dw){canvas.style.width=dw+"px"}dh=el.getAttribute("height")|0;if(dh){canvas.style.height=dh+"px"}el.parentNode.replaceChild(canvas,el);ctx=canvas.getContext("2d");dec=new BPGDecoder(ctx);dec.onload=(function(canvas,ctx){var dec=this;var frames=this["frames"];var imageData=frames[0]["img"];function next_frame(){var frame_index=dec.frame_index;if(++frame_index>=frames.length){if(dec["loop_count"]==0||dec.loop_counter<dec["loop_count"]){frame_index=0;dec.loop_counter++}else{frame_index=-1}}if(frame_index>=0){dec.frame_index=frame_index;ctx.putImageData(frames[frame_index]["img"],0,0);setTimeout(next_frame,frames[frame_index]["duration"])}}canvas.width=imageData.width;canvas.height=imageData.height;ctx.putImageData(imageData,0,0);if(frames.length>1){dec.frame_index=0;dec.loop_counter=0;setTimeout(next_frame,frames[0]["duration"])}}).bind(dec,canvas,ctx);dec.load(url)}})}))()
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/html/bpgdec8a.js	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,27 @@
+((function(){var Module={};var Module;if(!Module)Module=(typeof Module!=="undefined"?Module:null)||{};var moduleOverrides={};for(var key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof require==="function";var ENVIRONMENT_IS_WEB=typeof window==="object";var ENVIRONMENT_IS_WORKER=typeof importScripts==="function";var ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;if(ENVIRONMENT_IS_NODE){if(!Module["print"])Module["print"]=function print(x){process["stdout"].write(x+"\n")};if(!Module["printErr"])Module["printErr"]=function printErr(x){process["stderr"].write(x+"\n")};var nodeFS=require("fs");var nodePath=require("path");Module["read"]=function read(filename,binary){filename=nodePath["normalize"](filename);var ret=nodeFS["readFileSync"](filename);if(!ret&&filename!=nodePath["resolve"](filename)){filename=path.join(__dirname,"..","src",filename);ret=nodeFS["readFileSync"](filename)}if(ret&&!binary)ret=ret.toString();return ret};Module["readBinary"]=function readBinary(filename){return Module["read"](filename,true)};Module["load"]=function load(f){globalEval(read(f))};Module["thisProgram"]=process["argv"][1].replace(/\\/g,"/");Module["arguments"]=process["argv"].slice(2);if(typeof module!=="undefined"){module["exports"]=Module}process["on"]("uncaughtException",(function(ex){if(!(ex instanceof ExitStatus)){throw ex}}))}else if(ENVIRONMENT_IS_SHELL){if(!Module["print"])Module["print"]=print;if(typeof printErr!="undefined")Module["printErr"]=printErr;if(typeof read!="undefined"){Module["read"]=read}else{Module["read"]=function read(){throw"no read() available (jsc?)"}}Module["readBinary"]=function readBinary(f){if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}var data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){Module["arguments"]=scriptArgs}else if(typeof arguments!="undefined"){Module["arguments"]=arguments}this["Module"]=Module}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){Module["read"]=function read(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText};if(typeof arguments!="undefined"){Module["arguments"]=arguments}if(typeof console!=="undefined"){if(!Module["print"])Module["print"]=function print(x){console.log(x)};if(!Module["printErr"])Module["printErr"]=function printErr(x){console.log(x)}}else{var TRY_USE_DUMP=false;if(!Module["print"])Module["print"]=TRY_USE_DUMP&&typeof dump!=="undefined"?(function(x){dump(x)}):(function(x){})}if(ENVIRONMENT_IS_WEB){window["Module"]=Module}else{Module["load"]=importScripts}}else{throw"Unknown runtime environment. Where are we?"}function globalEval(x){eval.call(null,x)}if(!Module["load"]&&Module["read"]){Module["load"]=function load(f){globalEval(Module["read"](f))}}if(!Module["print"]){Module["print"]=(function(){})}if(!Module["printErr"]){Module["printErr"]=Module["print"]}if(!Module["arguments"]){Module["arguments"]=[]}if(!Module["thisProgram"]){Module["thisProgram"]="./this.program"}Module.print=Module["print"];Module.printErr=Module["printErr"];Module["preRun"]=[];Module["postRun"]=[];for(var key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}var Runtime={setTempRet0:(function(value){tempRet0=value}),getTempRet0:(function(){return tempRet0}),stackSave:(function(){return STACKTOP}),stackRestore:(function(stackTop){STACKTOP=stackTop}),getNativeTypeSize:(function(type){switch(type){case"i1":case"i8":return 1;case"i16":return 2;case"i32":return 4;case"i64":return 8;case"float":return 4;case"double":return 8;default:{if(type[type.length-1]==="*"){return Runtime.QUANTUM_SIZE}else if(type[0]==="i"){var bits=parseInt(type.substr(1));assert(bits%8===0);return bits/8}else{return 0}}}}),getNativeFieldSize:(function(type){return Math.max(Runtime.getNativeTypeSize(type),Runtime.QUANTUM_SIZE)}),STACK_ALIGN:16,getAlignSize:(function(type,size,vararg){if(!vararg&&(type=="i64"||type=="double"))return 8;if(!type)return Math.min(size,8);return Math.min(size||(type?Runtime.getNativeFieldSize(type):0),Runtime.QUANTUM_SIZE)}),dynCall:(function(sig,ptr,args){if(args&&args.length){if(!args.splice)args=Array.prototype.slice.call(args);args.splice(0,0,ptr);return Module["dynCall_"+sig].apply(null,args)}else{return Module["dynCall_"+sig].call(null,ptr)}}),functionPointers:[],addFunction:(function(func){for(var i=0;i<Runtime.functionPointers.length;i++){if(!Runtime.functionPointers[i]){Runtime.functionPointers[i]=func;return 2*(1+i)}}throw"Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS."}),removeFunction:(function(index){Runtime.functionPointers[(index-2)/2]=null}),getAsmConst:(function(code,numArgs){if(!Runtime.asmConstCache)Runtime.asmConstCache={};var func=Runtime.asmConstCache[code];if(func)return func;var args=[];for(var i=0;i<numArgs;i++){args.push(String.fromCharCode(36)+i)}var source=Pointer_stringify(code);if(source[0]==='"'){if(source.indexOf('"',1)===source.length-1){source=source.substr(1,source.length-2)}else{abort("invalid EM_ASM input |"+source+"|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)")}}try{var evalled=eval("(function(Module, FS) { return function("+args.join(",")+"){ "+source+" } })")(Module,typeof FS!=="undefined"?FS:null)}catch(e){Module.printErr("error in executing inline EM_ASM code: "+e+" on: \n\n"+source+"\n\nwith args |"+args+"| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)");throw e}return Runtime.asmConstCache[code]=evalled}),warnOnce:(function(text){if(!Runtime.warnOnce.shown)Runtime.warnOnce.shown={};if(!Runtime.warnOnce.shown[text]){Runtime.warnOnce.shown[text]=1;Module.printErr(text)}}),funcWrappers:{},getFuncWrapper:(function(func,sig){assert(sig);if(!Runtime.funcWrappers[sig]){Runtime.funcWrappers[sig]={}}var sigCache=Runtime.funcWrappers[sig];if(!sigCache[func]){sigCache[func]=function dynCall_wrapper(){return Runtime.dynCall(sig,func,arguments)}}return sigCache[func]}),UTF8Processor:(function(){var buffer=[];var needed=0;this.processCChar=(function(code){code=code&255;if(buffer.length==0){if((code&128)==0){return String.fromCharCode(code)}buffer.push(code);if((code&224)==192){needed=1}else if((code&240)==224){needed=2}else{needed=3}return""}if(needed){buffer.push(code);needed--;if(needed>0)return""}var c1=buffer[0];var c2=buffer[1];var c3=buffer[2];var c4=buffer[3];var ret;if(buffer.length==2){ret=String.fromCharCode((c1&31)<<6|c2&63)}else if(buffer.length==3){ret=String.fromCharCode((c1&15)<<12|(c2&63)<<6|c3&63)}else{var codePoint=(c1&7)<<18|(c2&63)<<12|(c3&63)<<6|c4&63;ret=String.fromCharCode(((codePoint-65536)/1024|0)+55296,(codePoint-65536)%1024+56320)}buffer.length=0;return ret});this.processJSString=function processJSString(string){string=unescape(encodeURIComponent(string));var ret=[];for(var i=0;i<string.length;i++){ret.push(string.charCodeAt(i))}return ret}}),getCompilerSetting:(function(name){throw"You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work"}),stackAlloc:(function(size){var ret=STACKTOP;STACKTOP=STACKTOP+size|0;STACKTOP=STACKTOP+15&-16;return ret}),staticAlloc:(function(size){var ret=STATICTOP;STATICTOP=STATICTOP+size|0;STATICTOP=STATICTOP+15&-16;return ret}),dynamicAlloc:(function(size){var ret=DYNAMICTOP;DYNAMICTOP=DYNAMICTOP+size|0;DYNAMICTOP=DYNAMICTOP+15&-16;if(DYNAMICTOP>=TOTAL_MEMORY)enlargeMemory();return ret}),alignMemory:(function(size,quantum){var ret=size=Math.ceil(size/(quantum?quantum:16))*(quantum?quantum:16);return ret}),makeBigInt:(function(low,high,unsigned){var ret=unsigned?+(low>>>0)+ +(high>>>0)*+4294967296:+(low>>>0)+ +(high|0)*+4294967296;return ret}),GLOBAL_BASE:8,QUANTUM_SIZE:4,__dummy__:0};Module["Runtime"]=Runtime;var __THREW__=0;var ABORT=false;var EXITSTATUS=0;var undef=0;var tempValue,tempInt,tempBigInt,tempInt2,tempBigInt2,tempPair,tempBigIntI,tempBigIntR,tempBigIntS,tempBigIntP,tempBigIntD,tempDouble,tempFloat;var tempI64,tempI64b;var tempRet0,tempRet1,tempRet2,tempRet3,tempRet4,tempRet5,tempRet6,tempRet7,tempRet8,tempRet9;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}var globalScope=this;function getCFunc(ident){var func=Module["_"+ident];if(!func){try{func=eval("_"+ident)}catch(e){}}assert(func,"Cannot call unknown function "+ident+" (perhaps LLVM optimizations or closure removed it?)");return func}var cwrap,ccall;((function(){var stack=0;var JSfuncs={"stackSave":(function(){stack=Runtime.stackSave()}),"stackRestore":(function(){Runtime.stackRestore(stack)}),"arrayToC":(function(arr){var ret=Runtime.stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}),"stringToC":(function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){ret=Runtime.stackAlloc((str.length<<2)+1);writeStringToMemory(str,ret)}return ret})};var toC={"string":JSfuncs["stringToC"],"array":JSfuncs["arrayToC"]};ccall=function ccallFunc(ident,returnType,argTypes,args){var func=getCFunc(ident);var cArgs=[];if(args){for(var i=0;i<args.length;i++){var converter=toC[argTypes[i]];if(converter){if(stack===0)stack=Runtime.stackSave();cArgs[i]=converter(args[i])}else{cArgs[i]=args[i]}}}var ret=func.apply(null,cArgs);if(returnType==="string")ret=Pointer_stringify(ret);if(stack!==0)JSfuncs["stackRestore"]();return ret};var sourceRegex=/^function\s*\(([^)]*)\)\s*{\s*([^*]*?)[\s;]*(?:return\s*(.*?)[;\s]*)?}$/;function parseJSFunc(jsfunc){var parsed=jsfunc.toString().match(sourceRegex).slice(1);return{arguments:parsed[0],body:parsed[1],returnValue:parsed[2]}}var JSsource={};for(var fun in JSfuncs){if(JSfuncs.hasOwnProperty(fun)){JSsource[fun]=parseJSFunc(JSfuncs[fun])}}cwrap=function cwrap(ident,returnType,argTypes){argTypes=argTypes||[];var cfunc=getCFunc(ident);var numericArgs=argTypes.every((function(type){return type==="number"}));var numericRet=returnType!=="string";if(numericRet&&numericArgs){return cfunc}var argNames=argTypes.map((function(x,i){return"$"+i}));var funcstr="(function("+argNames.join(",")+") {";var nargs=argTypes.length;if(!numericArgs){funcstr+=JSsource["stackSave"].body+";";for(var i=0;i<nargs;i++){var arg=argNames[i],type=argTypes[i];if(type==="number")continue;var convertCode=JSsource[type+"ToC"];funcstr+="var "+convertCode.arguments+" = "+arg+";";funcstr+=convertCode.body+";";funcstr+=arg+"="+convertCode.returnValue+";"}}var cfuncname=parseJSFunc((function(){return cfunc})).returnValue;funcstr+="var ret = "+cfuncname+"("+argNames.join(",")+");";if(!numericRet){var strgfy=parseJSFunc((function(){return Pointer_stringify})).returnValue;funcstr+="ret = "+strgfy+"(ret);"}if(!numericArgs){funcstr+=JSsource["stackRestore"].body+";"}funcstr+="return ret})";return eval(funcstr)}}))();Module["cwrap"]=cwrap;Module["ccall"]=ccall;function setValue(ptr,value,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":HEAP8[ptr>>0]=value;break;case"i8":HEAP8[ptr>>0]=value;break;case"i16":HEAP16[ptr>>1]=value;break;case"i32":HEAP32[ptr>>2]=value;break;case"i64":tempI64=[value>>>0,(tempDouble=value,+Math_abs(tempDouble)>=+1?tempDouble>+0?(Math_min(+Math_floor(tempDouble/+4294967296),+4294967295)|0)>>>0:~~+Math_ceil((tempDouble- +(~~tempDouble>>>0))/+4294967296)>>>0:0)],HEAP32[ptr>>2]=tempI64[0],HEAP32[ptr+4>>2]=tempI64[1];break;case"float":HEAPF32[ptr>>2]=value;break;case"double":HEAPF64[ptr>>3]=value;break;default:abort("invalid type for setValue: "+type)}}Module["setValue"]=setValue;function getValue(ptr,type,noSafe){type=type||"i8";if(type.charAt(type.length-1)==="*")type="i32";switch(type){case"i1":return HEAP8[ptr>>0];case"i8":return HEAP8[ptr>>0];case"i16":return HEAP16[ptr>>1];case"i32":return HEAP32[ptr>>2];case"i64":return HEAP32[ptr>>2];case"float":return HEAPF32[ptr>>2];case"double":return HEAPF64[ptr>>3];default:abort("invalid type for setValue: "+type)}return null}Module["getValue"]=getValue;var ALLOC_NORMAL=0;var ALLOC_STACK=1;var ALLOC_STATIC=2;var ALLOC_DYNAMIC=3;var ALLOC_NONE=4;Module["ALLOC_NORMAL"]=ALLOC_NORMAL;Module["ALLOC_STACK"]=ALLOC_STACK;Module["ALLOC_STATIC"]=ALLOC_STATIC;Module["ALLOC_DYNAMIC"]=ALLOC_DYNAMIC;Module["ALLOC_NONE"]=ALLOC_NONE;function allocate(slab,types,allocator,ptr){var zeroinit,size;if(typeof slab==="number"){zeroinit=true;size=slab}else{zeroinit=false;size=slab.length}var singleType=typeof types==="string"?types:null;var ret;if(allocator==ALLOC_NONE){ret=ptr}else{ret=[_malloc,Runtime.stackAlloc,Runtime.staticAlloc,Runtime.dynamicAlloc][allocator===undefined?ALLOC_STATIC:allocator](Math.max(size,singleType?1:types.length))}if(zeroinit){var ptr=ret,stop;assert((ret&3)==0);stop=ret+(size&~3);for(;ptr<stop;ptr+=4){HEAP32[ptr>>2]=0}stop=ret+size;while(ptr<stop){HEAP8[ptr++>>0]=0}return ret}if(singleType==="i8"){if(slab.subarray||slab.slice){HEAPU8.set(slab,ret)}else{HEAPU8.set(new Uint8Array(slab),ret)}return ret}var i=0,type,typeSize,previousType;while(i<size){var curr=slab[i];if(typeof curr==="function"){curr=Runtime.getFunctionIndex(curr)}type=singleType||types[i];if(type===0){i++;continue}if(type=="i64")type="i32";setValue(ret+i,curr,type);if(previousType!==type){typeSize=Runtime.getNativeTypeSize(type);previousType=type}i+=typeSize}return ret}Module["allocate"]=allocate;function demangleAll(text){return text}function jsStackTrace(){var err=new Error;if(!err.stack){try{throw new Error(0)}catch(e){err=e}if(!err.stack){return"(no stack trace available)"}}return err.stack.toString()}function stackTrace(){return demangleAll(jsStackTrace())}Module["stackTrace"]=stackTrace;var PAGE_SIZE=4096;function alignMemoryPage(x){return x+4095&-4096}var HEAP;var HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;var STATIC_BASE=0,STATICTOP=0,staticSealed=false;var STACK_BASE=0,STACKTOP=0,STACK_MAX=0;var DYNAMIC_BASE=0,DYNAMICTOP=0;function enlargeMemory(){abort("Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value "+TOTAL_MEMORY+", (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.")}var TOTAL_STACK=Module["TOTAL_STACK"]||5242880;var TOTAL_MEMORY=Module["TOTAL_MEMORY"]||33554432;var FAST_MEMORY=Module["FAST_MEMORY"]||2097152;var totalMemory=64*1024;while(totalMemory<TOTAL_MEMORY||totalMemory<2*TOTAL_STACK){if(totalMemory<16*1024*1024){totalMemory*=2}else{totalMemory+=16*1024*1024}}if(totalMemory!==TOTAL_MEMORY){Module.printErr("increasing TOTAL_MEMORY to "+totalMemory+" to be compliant with the asm.js spec");TOTAL_MEMORY=totalMemory}assert(typeof Int32Array!=="undefined"&&typeof Float64Array!=="undefined"&&!!(new Int32Array(1))["subarray"]&&!!(new Int32Array(1))["set"],"JS engine does not provide full typed array support");var buffer=new ArrayBuffer(TOTAL_MEMORY);HEAP8=new Int8Array(buffer);HEAP16=new Int16Array(buffer);HEAP32=new Int32Array(buffer);HEAPU8=new Uint8Array(buffer);HEAPU16=new Uint16Array(buffer);HEAPU32=new Uint32Array(buffer);HEAPF32=new Float32Array(buffer);HEAPF64=new Float64Array(buffer);HEAP32[0]=255;assert(HEAPU8[0]===255&&HEAPU8[3]===0,"Typed arrays 2 must be run on a little-endian system");Module["HEAP"]=HEAP;Module["buffer"]=buffer;Module["HEAP8"]=HEAP8;Module["HEAP16"]=HEAP16;Module["HEAP32"]=HEAP32;Module["HEAPU8"]=HEAPU8;Module["HEAPU16"]=HEAPU16;Module["HEAPU32"]=HEAPU32;Module["HEAPF32"]=HEAPF32;Module["HEAPF64"]=HEAPF64;function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback=="function"){callback();continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){Runtime.dynCall("v",func)}else{Runtime.dynCall("vi",func,[callback.arg])}}else{func(callback.arg===undefined?null:callback.arg)}}}var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function ensureInitRuntime(){if(runtimeInitialized)return;runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){callRuntimeCallbacks(__ATEXIT__);runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}Module["addOnPreRun"]=Module.addOnPreRun=addOnPreRun;function addOnInit(cb){__ATINIT__.unshift(cb)}Module["addOnInit"]=Module.addOnInit=addOnInit;function addOnPreMain(cb){__ATMAIN__.unshift(cb)}Module["addOnPreMain"]=Module.addOnPreMain=addOnPreMain;function addOnExit(cb){__ATEXIT__.unshift(cb)}Module["addOnExit"]=Module.addOnExit=addOnExit;function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}Module["addOnPostRun"]=Module.addOnPostRun=addOnPostRun;function intArrayFromString(stringy,dontAddNull,length){var ret=(new Runtime.UTF8Processor).processJSString(stringy);if(length){ret.length=length}if(!dontAddNull){ret.push(0)}return ret}Module["intArrayFromString"]=intArrayFromString;function intArrayToString(array){var ret=[];for(var i=0;i<array.length;i++){var chr=array[i];if(chr>255){chr&=255}ret.push(String.fromCharCode(chr))}return ret.join("")}Module["intArrayToString"]=intArrayToString;function writeStringToMemory(string,buffer,dontAddNull){var array=intArrayFromString(string,dontAddNull);var i=0;while(i<array.length){var chr=array[i];HEAP8[buffer+i>>0]=chr;i=i+1}}Module["writeStringToMemory"]=writeStringToMemory;function writeArrayToMemory(array,buffer){for(var i=0;i<array.length;i++){HEAP8[buffer+i>>0]=array[i]}}Module["writeArrayToMemory"]=writeArrayToMemory;function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i<str.length;i++){HEAP8[buffer+i>>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer+str.length>>0]=0}Module["writeAsciiToMemory"]=writeAsciiToMemory;function unSign(value,bits,ignore){if(value>=0){return value}return bits<=32?2*Math.abs(1<<bits-1)+value:Math.pow(2,bits)+value}function reSign(value,bits,ignore){if(value<=0){return value}var half=bits<=32?Math.abs(1<<bits-1):Math.pow(2,bits-1);if(value>=half&&(bits<=32||value>half)){value=-2*half+value}return value}if(!Math["imul"]||Math["imul"](4294967295,5)!==-5)Math["imul"]=function imul(a,b){var ah=a>>>16;var al=a&65535;var bh=b>>>16;var bl=b&65535;return al*bl+(ah*bl+al*bh<<16)|0};Math.imul=Math["imul"];var Math_abs=Math.abs;var Math_cos=Math.cos;var Math_sin=Math.sin;var Math_tan=Math.tan;var Math_acos=Math.acos;var Math_asin=Math.asin;var Math_atan=Math.atan;var Math_atan2=Math.atan2;var Math_exp=Math.exp;var Math_log=Math.log;var Math_sqrt=Math.sqrt;var Math_ceil=Math.ceil;var Math_floor=Math.floor;var Math_pow=Math.pow;var Math_imul=Math.imul;var Math_fround=Math.fround;var Math_min=Math.min;var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}Module["addRunDependency"]=addRunDependency;function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["removeRunDependency"]=removeRunDependency;Module["preloadedImages"]={};Module["preloadedAudios"]={};var memoryInitializer=null;STATIC_BASE=8;STATICTOP=STATIC_BASE+6304;__ATINIT__.push();allocate([0,0,1,0,1,2,0,1,2,3,1,2,3,2,3,3,0,1,0,2,1,0,3,2,1,0,3,2,1,3,2,3,0,0,1,0,1,2,0,1,2,3,0,1,2,3,4,0,1,2,3,4,5,0,1,2,3,4,5,6,0,1,2,3,4,5,6,7,1,2,3,4,5,6,7,2,3,4,5,6,7,3,4,5,6,7,4,5,6,7,5,6,7,6,7,7,0,1,0,2,1,0,3,2,1,0,4,3,2,1,0,5,4,3,2,1,0,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,7,6,5,4,3,2,7,6,5,4,3,7,6,5,4,7,6,5,7,6,7,40,45,51,57,64,72,0,0,29,0,0,0,30,0,0,0,31,0,0,0,32,0,0,0,33,0,0,0,33,0,0,0,34,0,0,0,34,0,0,0,35,0,0,0,35,0,0,0,36,0,0,0,36,0,0,0,37,0,0,0,37,0,0,0,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,8,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,12,12,0,0,0,0,0,0,0,2,5,9,1,4,8,12,3,7,11,14,6,10,13,15,0,0,0,0,0,0,0,0,0,2,1,3,0,0,0,0,0,2,5,9,14,20,27,35,1,4,8,13,19,26,34,42,3,7,12,18,25,33,41,48,6,11,17,24,32,40,47,53,10,16,23,31,39,46,52,57,15,22,30,38,45,51,56,60,21,29,37,44,50,55,59,62,28,36,43,49,54,58,61,63,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,0,1,2,3,16,17,18,19,4,5,6,7,20,21,22,23,8,9,10,11,24,25,26,27,12,13,14,15,28,29,30,31,32,33,34,35,48,49,50,51,36,37,38,39,52,53,54,55,40,41,42,43,56,57,58,59,44,45,46,47,60,61,62,63,0,1,4,5,2,3,4,5,6,6,8,8,7,7,8,8,1,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,2,2,2,2,1,1,1,1,0,0,0,0,0,0,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,1,0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,153,200,139,141,157,154,154,154,154,154,154,154,154,184,154,154,154,184,63,139,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,153,138,138,111,141,94,138,182,154,139,139,139,139,139,139,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,110,110,124,125,140,153,125,127,140,109,111,143,127,111,79,108,123,63,91,171,134,141,111,111,125,110,110,94,124,108,124,107,125,141,179,153,125,107,125,141,179,153,125,107,125,141,179,153,125,140,139,182,182,152,136,152,136,153,136,139,111,136,139,111,141,111,140,92,137,138,140,152,138,139,153,74,149,92,139,107,122,152,140,179,166,182,140,227,122,197,138,153,136,167,152,152,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,185,107,139,126,154,197,185,201,154,154,154,149,154,139,154,154,154,152,139,110,122,95,79,63,31,31,153,153,153,153,140,198,140,198,168,79,124,138,94,153,111,149,107,167,154,139,139,139,139,139,139,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,125,110,94,110,95,79,125,111,110,78,110,111,111,95,94,108,123,108,121,140,61,154,155,154,139,153,139,123,123,63,153,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,123,123,107,121,107,121,167,151,183,140,151,183,140,140,140,154,196,196,167,154,152,167,182,182,134,149,136,153,121,136,137,169,194,166,167,154,167,137,182,107,167,91,122,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,153,160,107,139,126,154,197,185,201,154,154,154,134,154,139,154,154,183,152,139,154,137,95,79,63,31,31,153,153,153,153,169,198,169,198,168,79,224,167,122,153,111,149,92,167,154,139,139,139,139,139,139,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,125,110,124,110,95,94,125,111,111,79,125,126,111,111,79,108,123,93,121,140,61,154,170,154,139,153,139,123,123,63,124,166,183,140,136,153,154,166,183,140,136,153,154,166,183,140,136,153,154,170,153,138,138,122,121,122,121,167,151,183,140,151,183,140,140,140,154,196,167,167,154,152,167,182,182,134,149,136,153,121,136,122,169,208,166,167,154,152,167,182,107,167,91,107,107,167,154,154,154,154,154,154,154,154,154,154,154,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,5,5,6,6,7,8,9,10,11,13,14,16,18,20,22,24,0,0,29,30,31,32,33,33,34,34,35,35,36,36,37,37,0,0,0,0,0,0,1,0,2,0,3,0,0,0,4,0,0,0,5,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,104,101,118,99,0,0,0,0,200,5,0,0,0,0,0,0,0,0,0,0,53,54,50,72,34,48,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,100,30,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,26,10,1,0,0,0,0,0,1,2,2,2,2,3,5,7,8,10,12,13,15,17,18,19,20,21,22,23,23,24,24,25,25,26,27,27,28,28,29,29,30,31,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,32,26,21,17,13,9,5,2,0,254,251,247,243,239,235,230,224,230,235,239,243,247,251,254,0,2,5,9,13,17,21,26,32,0,0,0,0,0,0,0,0,240,154,249,114,252,138,253,30,254,122,254,197,254,0,255,197,254,122,254,30,254,138,253,114,252,154,249,0,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64,90,90,90,89,88,87,85,83,82,80,78,75,73,70,67,64,61,57,54,50,46,43,38,36,31,25,22,18,13,9,4,254,58,10,254,252,54,16,254,250,46,28,252,252,36,36,252,252,28,46,250,254,16,54,252,254,10,58,254,0,0,0,0,255,4,246,58,17,251,1,0,255,4,246,58,17,251,1,0,255,4,245,40,40,245,4,255,255,4,245,40,40,245,4,255,0,1,251,17,58,246,4,255,0,1,251,17,58,246,4,255,1,2,0,3,4,0,0,0,255,0,1,0,0,255,0,1,255,255,1,1,1,255,255,1,0,1,1,0,0,2,2,0,1,2,2,1,0,3,3,0,1,3,3,1,2,3,3,2,16,16,16,16,17,18,21,24,16,16,16,16,17,19,22,25,16,16,17,18,20,22,25,29,16,16,18,21,24,27,31,36,17,17,20,24,30,35,41,47,18,19,22,27,35,44,54,65,21,22,25,31,41,54,70,88,24,25,29,36,47,65,88,115,16,16,16,16,17,18,20,24,16,16,16,17,18,20,24,25,16,16,17,18,20,24,25,28,16,17,18,20,24,25,28,33,17,18,20,24,25,28,33,41,18,20,24,25,28,33,41,54,20,24,25,28,33,41,54,71,24,25,28,33,41,54,71,91,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,176,208,240,128,167,197,227,128,158,187,216,123,150,178,205,116,142,169,195,111,135,160,185,105,128,152,175,100,122,144,166,95,116,137,158,90,110,130,150,85,104,123,142,81,99,117,135,77,94,111,128,73,89,105,122,69,85,100,116,66,80,95,110,62,76,90,104,59,72,86,99,56,69,81,94,53,65,77,89,51,62,73,85,48,59,69,80,46,56,66,76,43,53,63,72,41,50,59,69,39,48,56,65,37,45,54,62,35,43,51,59,33,41,48,56,32,39,46,53,30,37,43,50,29,35,41,48,27,33,39,45,26,31,37,43,24,30,35,41,23,28,33,39,22,27,32,37,21,26,30,35,20,24,29,33,19,23,27,31,18,22,26,30,17,21,25,28,16,20,23,27,15,19,22,25,14,18,21,24,14,17,20,23,13,16,19,22,12,15,18,21,12,14,17,20,11,14,16,19,11,13,15,18,10,12,15,17,10,12,14,16,9,11,13,15,9,11,12,14,8,10,12,14,8,9,11,13,7,9,11,12,7,9,10,12,7,8,10,11,6,8,9,11,6,7,9,10,6,7,8,9,2,2,2,2,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,62,63,0,0,1,2,2,4,4,5,6,7,8,9,9,11,11,12,13,13,15,15,16,16,18,18,19,19,21,21,22,22,23,24,24,25,26,26,27,27,28,29,29,30,30,30,31,32,32,33,33,33,34,34,35,35,35,36,36,36,37,37,37,38,38,63,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,0,255,255,255,127,0,0,0,0,0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,0,0,0,0,3,1,1,0,36,56,37,56,38,56,0,0,0,0,0,0,4,0,0,0,0,0,0,0,3,1,0,16,36,56,37,56,38,56,0,0,0,0,0,0,5,0,0,0,0,0,0,0,3,0,0,16,36,56,37,56,38,56,0,0,0,0,0,0,8,0,0,0,0,0,0,0,1,0,0,0,36,56,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_NONE,Runtime.GLOBAL_BASE);var tempDoublePtr=Runtime.alignMemory(allocate(12,"i8",ALLOC_STATIC),8);assert(tempDoublePtr%8==0);function copyTempFloat(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3]}function copyTempDouble(ptr){HEAP8[tempDoublePtr]=HEAP8[ptr];HEAP8[tempDoublePtr+1]=HEAP8[ptr+1];HEAP8[tempDoublePtr+2]=HEAP8[ptr+2];HEAP8[tempDoublePtr+3]=HEAP8[ptr+3];HEAP8[tempDoublePtr+4]=HEAP8[ptr+4];HEAP8[tempDoublePtr+5]=HEAP8[ptr+5];HEAP8[tempDoublePtr+6]=HEAP8[ptr+6];HEAP8[tempDoublePtr+7]=HEAP8[ptr+7]}Module["_bitshift64Ashr"]=_bitshift64Ashr;Module["_i64Subtract"]=_i64Subtract;function _sbrk(bytes){var self=_sbrk;if(!self.called){DYNAMICTOP=alignMemoryPage(DYNAMICTOP);self.called=true;assert(Runtime.dynamicAlloc);self.alloc=Runtime.dynamicAlloc;Runtime.dynamicAlloc=(function(){abort("cannot dynamically allocate, sbrk now has control")})}var ret=DYNAMICTOP;if(bytes!=0)self.alloc(bytes);return ret}Module["_i64Add"]=_i64Add;Module["_strlen"]=_strlen;Module["_memset"]=_memset;Module["_bitshift64Shl"]=_bitshift64Shl;function _abort(){Module["abort"]()}Module["_llvm_bswap_i32"]=_llvm_bswap_i32;function _rint(x){if(Math.abs(x%1)!==.5)return Math.round(x);return x+x%2+(x<0?1:-1)}function _lrint(){return _rint.apply(null,arguments)}function _emscripten_memcpy_big(dest,src,num){HEAPU8.set(HEAPU8.subarray(src,src+num),dest);return dest}Module["_memcpy"]=_memcpy;STACK_BASE=STACKTOP=Runtime.alignMemory(STATICTOP);staticSealed=true;STACK_MAX=STACK_BASE+TOTAL_STACK;DYNAMIC_BASE=DYNAMICTOP=Runtime.alignMemory(STACK_MAX);assert(DYNAMIC_BASE<TOTAL_MEMORY,"TOTAL_MEMORY not big enough for stack");var ctlz_i8=allocate([8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"i8",ALLOC_DYNAMIC);var cttz_i8=allocate([8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0],"i8",ALLOC_DYNAMIC);function invoke_iiii(index,a1,a2,a3){try{return Module["dynCall_iiii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11){try{Module["dynCall_viiiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10){try{Module["dynCall_viiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiii(index,a1,a2,a3,a4,a5){try{Module["dynCall_viiiii"](index,a1,a2,a3,a4,a5)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vi(index,a1){try{Module["dynCall_vi"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_vii(index,a1,a2){try{Module["dynCall_vii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiiii(index,a1,a2,a3,a4,a5,a6){try{return Module["dynCall_iiiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9){try{Module["dynCall_viiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12){try{Module["dynCall_viiiiiiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_ii(index,a1){try{return Module["dynCall_ii"](index,a1)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viii(index,a1,a2,a3){try{Module["dynCall_viii"](index,a1,a2,a3)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8){try{Module["dynCall_viiiiiiii"](index,a1,a2,a3,a4,a5,a6,a7,a8)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiii(index,a1,a2,a3,a4){try{return Module["dynCall_iiiii"](index,a1,a2,a3,a4)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6){try{Module["dynCall_viiiiii"](index,a1,a2,a3,a4,a5,a6)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iii(index,a1,a2){try{return Module["dynCall_iii"](index,a1,a2)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_iiiiii(index,a1,a2,a3,a4,a5){try{return Module["dynCall_iiiiii"](index,a1,a2,a3,a4,a5)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}function invoke_viiiiiii(index,a1,a2,a3,a4,a5,a6,a7){try{Module["dynCall_viiiiiii"](index,a1,a2,a3,a4,a5,a6,a7)}catch(e){if(typeof e!=="number"&&e!=="longjmp")throw e;asm["setThrew"](1,0)}}Module.asmGlobalArg={"Math":Math,"Int8Array":Int8Array,"Int16Array":Int16Array,"Int32Array":Int32Array,"Uint8Array":Uint8Array,"Uint16Array":Uint16Array,"Uint32Array":Uint32Array,"Float32Array":Float32Array,"Float64Array":Float64Array};Module.asmLibraryArg={"abort":abort,"assert":assert,"min":Math_min,"invoke_iiii":invoke_iiii,"invoke_viiiiiiiiiii":invoke_viiiiiiiiiii,"invoke_viiiiiiiiii":invoke_viiiiiiiiii,"invoke_viiiii":invoke_viiiii,"invoke_vi":invoke_vi,"invoke_vii":invoke_vii,"invoke_iiiiiii":invoke_iiiiiii,"invoke_viiiiiiiii":invoke_viiiiiiiii,"invoke_viiiiiiiiiiii":invoke_viiiiiiiiiiii,"invoke_ii":invoke_ii,"invoke_viii":invoke_viii,"invoke_viiiiiiii":invoke_viiiiiiii,"invoke_iiiii":invoke_iiiii,"invoke_viiiiii":invoke_viiiiii,"invoke_iii":invoke_iii,"invoke_iiiiii":invoke_iiiiii,"invoke_viiiiiii":invoke_viiiiiii,"_sbrk":_sbrk,"_lrint":_lrint,"_abort":_abort,"_emscripten_memcpy_big":_emscripten_memcpy_big,"_rint":_rint,"STACKTOP":STACKTOP,"STACK_MAX":STACK_MAX,"tempDoublePtr":tempDoublePtr,"ABORT":ABORT,"cttz_i8":cttz_i8,"ctlz_i8":ctlz_i8,"NaN":NaN,"Infinity":Infinity};// EMSCRIPTEN_START_ASM
+var asm=(function(global,env,buffer) {
+"use asm";var a=new global.Int8Array(buffer);var b=new global.Int16Array(buffer);var c=new global.Int32Array(buffer);var d=new global.Uint8Array(buffer);var e=new global.Uint16Array(buffer);var f=new global.Uint32Array(buffer);var g=new global.Float32Array(buffer);var h=new global.Float64Array(buffer);var i=env.STACKTOP|0;var j=env.STACK_MAX|0;var k=env.tempDoublePtr|0;var l=env.ABORT|0;var m=env.cttz_i8|0;var n=env.ctlz_i8|0;var o=0;var p=0;var q=0;var r=0;var s=+env.NaN,t=+env.Infinity;var u=0,v=0,w=0,x=0,y=0.0,z=0,A=0,B=0,C=0.0;var D=0;var E=0;var F=0;var G=0;var H=0;var I=0;var J=0;var K=0;var L=0;var M=0;var N=global.Math.floor;var O=global.Math.abs;var P=global.Math.sqrt;var Q=global.Math.pow;var R=global.Math.cos;var S=global.Math.sin;var T=global.Math.tan;var U=global.Math.acos;var V=global.Math.asin;var W=global.Math.atan;var X=global.Math.atan2;var Y=global.Math.exp;var Z=global.Math.log;var _=global.Math.ceil;var $=global.Math.imul;var aa=env.abort;var ba=env.assert;var ca=env.min;var da=env.invoke_iiii;var ea=env.invoke_viiiiiiiiiii;var fa=env.invoke_viiiiiiiiii;var ga=env.invoke_viiiii;var ha=env.invoke_vi;var ia=env.invoke_vii;var ja=env.invoke_iiiiiii;var ka=env.invoke_viiiiiiiii;var la=env.invoke_viiiiiiiiiiii;var ma=env.invoke_ii;var na=env.invoke_viii;var oa=env.invoke_viiiiiiii;var pa=env.invoke_iiiii;var qa=env.invoke_viiiiii;var ra=env.invoke_iii;var sa=env.invoke_iiiiii;var ta=env.invoke_viiiiiii;var ua=env._sbrk;var va=env._lrint;var wa=env._abort;var xa=env._emscripten_memcpy_big;var ya=env._rint;var za=0.0;
+// EMSCRIPTEN_START_FUNCS
+function ic(e,f,g,h,j,k,l,m,n,o,p,q,r){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;r=r|0;var s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0;s=i;i=i+16|0;w=s+8|0;t=s;z=e+136|0;v=c[z>>2]|0;K=c[q>>2]|0;c[w>>2]=K;M=c[q+4>>2]|0;A=w+4|0;c[A>>2]=M;y=c[r>>2]|0;c[t>>2]=y;J=c[r+4>>2]|0;x=t+4|0;c[x>>2]=J;q=a[v+31254>>0]|0;do if(q<<24>>24){if((o|0)==1){c[v+288>>2]=d[v+p+31268>>0];if((c[(c[e+200>>2]|0)+4>>2]|0)==3){c[v+292>>2]=d[v+p+31277>>0];c[v+296>>2]=d[v+p+31281>>0];break}else{c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0];break}}}else{c[v+288>>2]=d[v+31268>>0];c[v+292>>2]=d[v+31277>>0];c[v+296>>2]=d[v+31281>>0]}while(0);r=e+200|0;G=c[r>>2]|0;B=(c[G+13076>>2]|0)>>>0<n>>>0;if(((!B?(c[G+13072>>2]|0)>>>0<n>>>0:0)?(d[v+31255>>0]|0)>(o|0):0)?!(q<<24>>24!=0&(o|0)==0):0)q=(Gb(e,n)|0)&255;else{if((c[G+13088>>2]|0)==0?(c[v+31244>>2]|0)==0:0)G=(o|0)==0&(c[v+31248>>2]|0)!=0;else G=0;if(B)q=1;else q=(q<<24>>24!=0&(o|0)==0|G)&1}B=(n|0)>2;G=c[(c[r>>2]|0)+4>>2]|0;if(B)if(!G)L=y;else E=20;else if((G|0)==3)E=20;else L=y;do if((E|0)==20){G=(o|0)==0;if(!((K|0)==0&(G^1))){K=Hb(e,o)|0;c[w>>2]=K;if((c[(c[r>>2]|0)+4>>2]|0)==2?q<<24>>24==0|(n|0)==3:0){M=Hb(e,o)|0;c[A>>2]=M}if(!G)E=25}else{K=0;E=25}if((E|0)==25)if(!y){L=0;break}L=Hb(e,o)|0;c[t>>2]=L;if((c[(c[r>>2]|0)+4>>2]|0)==2?q<<24>>24==0|(n|0)==3:0){J=Hb(e,o)|0;c[x>>2]=J}}while(0);if(!(q<<24>>24)){A=c[r>>2]|0;y=c[A+13072>>2]|0;x=1<<y;q=c[A+13148>>2]|0;if(((o|0)==0?(c[v+31244>>2]|0)!=1:0)&(K|0)==0&(L|0)==0)if((c[A+4>>2]|0)==2?(M|J|0)!=0:0)E=37;else o=1;else E=37;if((E|0)==37){o=Ib(e,o)|0;A=c[r>>2]|0}G=c[z>>2]|0;A=n-(c[A+13172>>2]|0)|0;z=G+31244|0;if((c[z>>2]|0)==1){I=1<<n;qd(e,f,g,I,I);mc(e,f,g,n,0)}I=(o|0)!=0;K=(K|L|0)==0;do if(I)if(K)E=46;else{F=0;E=48}else if(K){K=c[r>>2]|0;L=c[K+4>>2]|0;if((L|0)==2){if(M){E=46;break}if(J){M=0;E=46;break}}if(!((c[z>>2]|0)!=1|(L|0)==0)){if(B|(L|0)==3){t=1<<(c[K+13172>>2]|0)+A;w=1<<(c[K+13184>>2]|0)+A;qd(e,f,g,t,w);mc(e,f,g,A,1);mc(e,f,g,A,2);if((c[(c[r>>2]|0)+4>>2]|0)!=2)break;M=(1<<A)+g|0;qd(e,f,M,t,w);mc(e,f,M,A,1);mc(e,f,M,A,2);break}if((p|0)==3?(F=1<<n+1,H=1<<(c[K+13184>>2]|0)+n,qd(e,h,j,F,H),mc(e,h,j,n,1),mc(e,h,j,n,2),(c[(c[r>>2]|0)+4>>2]|0)==2):0){M=(1<<n)+j|0;qd(e,h,M,F,H);mc(e,h,M,n,1);mc(e,h,M,n,2)}}}else{F=0;E=48}while(0);if((E|0)==46)if((c[(c[r>>2]|0)+4>>2]|0)==2){F=(M|J|0)==0;E=48}else{F=1;E=48}a:do if((E|0)==48){E=e+204|0;do if((a[(c[E>>2]|0)+22>>0]|0)!=0?(D=G+300|0,(a[D>>0]|0)==0):0){M=ob(e)|0;J=G+280|0;c[J>>2]=M;if(M){M=(pb(e)|0)==1;H=c[J>>2]|0;if(M){H=0-H|0;c[J>>2]=H}}else H=0;a[D>>0]=1;M=(c[(c[r>>2]|0)+13192>>2]|0)/2|0;if((H|0)<(-26-M|0)|(H|0)>(M+25|0)){M=-1094995529;i=s;return M|0}else{Ob(e,k,l,m);break}}while(0);if((!((a[e+3068>>0]|0)==0|F)?(a[G+31256>>0]|0)==0:0)?(C=G+301|0,(a[C>>0]|0)==0):0){if(!(qb(e)|0)){a[G+302>>0]=0;a[G+303>>0]=0}else{l=c[E>>2]|0;if(!(a[l+1633>>0]|0))m=0;else{m=rb(e)|0;l=c[E>>2]|0}a[G+302>>0]=a[l+m+1634>>0]|0;a[G+303>>0]=a[(c[E>>2]|0)+m+1639>>0]|0}a[C>>0]=1}if((c[z>>2]|0)==1&(n|0)<4){m=c[G+288>>2]|0;if((m+ -6|0)>>>0<9)k=2;else k=(m+ -22|0)>>>0<9&1;m=c[G+292>>2]|0;if((m+ -6|0)>>>0<9)m=2;else m=(m+ -22|0)>>>0<9&1}else{k=0;m=0}l=G+304|0;a[l>>0]=0;if(I)Lb(e,f,g,n,k,0);k=c[r>>2]|0;C=c[k+4>>2]|0;if(C){if(!(B|(C|0)==3)){if((p|0)!=3)break;p=1<<n+1;A=1<<(c[k+13184>>2]|0)+n;l=0;do{if((c[z>>2]|0)==1){M=(l<<n)+j|0;qd(e,h,M,p,A);mc(e,h,M,n,1)}if(c[w+(l<<2)>>2]|0)Lb(e,h,(l<<n)+j|0,n,m,1);l=l+1|0}while((l|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));w=0;while(1){if((c[z>>2]|0)==1){M=(w<<n)+j|0;qd(e,h,M,p,A);mc(e,h,M,n,2)}if(c[t+(w<<2)>>2]|0)Lb(e,h,(w<<n)+j|0,n,m,2);w=w+1|0;if((w|0)>=(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))break a}}h=1<<(c[k+13172>>2]|0)+A;j=1<<(c[k+13184>>2]|0)+A;do if((a[(c[E>>2]|0)+1630>>0]|0)==0|I^1)a[l>>0]=0;else{if(c[z>>2]|0){M=(c[G+296>>2]|0)==4;a[l>>0]=M&1;if(!M)break}else a[l>>0]=1;jc(e,0)}while(0);p=e+160|0;E=G+320|0;D=G+11680|0;C=1<<A<<A;k=(C|0)>0;B=e+(A+ -2<<2)+5856|0;F=G+284|0;I=0;do{if((c[z>>2]|0)==1){M=(I<<A)+g|0;qd(e,f,M,h,j);mc(e,f,M,A,1)}do if(!(c[w+(I<<2)>>2]|0)){if(!(a[l>>0]|0))break;L=c[p>>2]|0;G=c[L+36>>2]|0;H=c[r>>2]|0;M=$(g>>c[H+13184>>2],G)|0;H=(c[L+4>>2]|0)+(M+(f>>c[H+13172>>2]<<c[H+56>>2]))|0;if(k){I=0;do{b[D+(I<<1)>>1]=($(b[E+(I<<1)>>1]|0,c[F>>2]|0)|0)>>>3;I=I+1|0}while((I|0)!=(C|0));I=C}else I=0;Ka[c[B>>2]&7](H,D,G)}else Lb(e,f,(I<<A)+g|0,A,m,1);while(0);I=I+1|0}while((I|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0));if(!(a[l>>0]|0))H=0;else{jc(e,1);H=0}do{if((c[z>>2]|0)==1){M=(H<<A)+g|0;qd(e,f,M,h,j);mc(e,f,M,A,2)}do if(!(c[t+(H<<2)>>2]|0)){if(!(a[l>>0]|0))break;L=c[p>>2]|0;w=c[L+40>>2]|0;G=c[r>>2]|0;M=$(g>>c[G+13188>>2],w)|0;G=(c[L+8>>2]|0)+(M+(f>>c[G+13176>>2]<<c[G+56>>2]))|0;if(k){H=0;do{b[D+(H<<1)>>1]=($(b[E+(H<<1)>>1]|0,c[F>>2]|0)|0)>>>3;H=H+1|0}while((H|0)!=(C|0));H=C}else H=0;Ka[c[B>>2]&7](G,D,w)}else Lb(e,f,(H<<A)+g|0,A,m,2);while(0);H=H+1|0}while((H|0)<(((c[(c[r>>2]|0)+4>>2]|0)==2?2:1)|0))}}while(0);if((o|0)!=0?(u=1<<n,(u|0)>0):0){t=e+7596|0;r=0;do{w=$(r+g>>y,q)|0;h=0;do{a[(c[t>>2]|0)+((h+f>>y)+w)>>0]=1;h=h+x|0}while((h|0)<(u|0));r=r+x|0}while((r|0)<(u|0))}if(((a[e+3049>>0]|0)==0?(Pb(e,f,g,n),(a[(c[e+204>>2]|0)+40>>0]|0)!=0):0)?(a[v+31256>>0]|0)!=0:0)fc(e,f,g,n)}else{v=n+ -1|0;u=1<<v;n=u+f|0;u=u+g|0;r=o+1|0;q=ic(e,f,g,f,g,k,l,m,v,r,0,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=ic(e,n,g,f,g,k,l,m,v,r,1,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}q=ic(e,f,u,f,g,k,l,m,v,r,2,w,t)|0;if((q|0)<0){M=q;i=s;return M|0}f=ic(e,n,u,f,g,k,l,m,v,r,3,w,t)|0;if((f|0)<0){M=f;i=s;return M|0}}M=0;i=s;return M|0}function jc(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=c[a+136>>2]|0;f=Jb(a,b)|0;if(!f){c[e+284>>2]=0;i=d;return}else{c[e+284>>2]=1-((Kb(a,b)|0)<<1)<<f+ -1;i=d;return}}function kc(e,f,g,h,j,k,l,m,n,o,p){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;var q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;q=i;v=c[e+136>>2]|0;t=c[h>>2]|0;u=c[h+32>>2]|0;y=c[e+200>>2]|0;w=c[y+13120>>2]|0;x=c[y+13124>>2]|0;z=b[j>>1]|0;h=z&3;A=b[j+2>>1]|0;j=A&3;r=c[e+2428>>2]|0;if((r|0)==1)r=(a[(c[e+204>>2]|0)+37>>0]|0)!=0;else if(!r)r=(a[(c[e+204>>2]|0)+38>>0]|0)!=0;else r=0;s=d[1408+m>>0]|0;k=(z<<16>>16>>2)+k|0;l=(A<<16>>16>>2)+l|0;A=$(l,u)|0;y=c[y+56>>2]|0;A=A+(k<<y)|0;z=t+A|0;if(!((!((k|0)<3|(l|0)<4)?(k|0)<(-4-m+w|0):0)?(l|0)<(-4-n+x|0):0)){B=80<<y;z=3<<y;A=t+(A+($(u,-3)|0)-z)|0;Ca[c[e+7560>>2]&1](v+320|0,A,B,u,m+7|0,n+7|0,k+ -3|0,l+ -3|0,w,x);z=v+((B*3|0)+z)+320|0;u=B}t=(h|0)!=0&1;v=(j|0)!=0&1;if(r){Ba[c[e+(s<<4)+(v<<3)+(t<<2)+6248>>2]&7](f,g,z,u,n,d[e+3101>>0]|0,o,p,h,j,m);i=q;return}else{La[c[e+(s<<4)+(v<<3)+(t<<2)+6088>>2]&7](f,g,z,u,n,h,j,m);i=q;return}}function lc(e,f,g,h,j,k,l,m,n,o,p,q,r){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;r=r|0;var s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0;s=i;z=c[e+136>>2]|0;B=c[e+200>>2]|0;y=c[B+13172>>2]|0;x=c[B+13120>>2]>>y;A=c[B+13184>>2]|0;w=c[B+13124>>2]>>A;t=c[e+2428>>2]|0;if((t|0)==1)u=(a[(c[e+204>>2]|0)+37>>0]|0)!=0;else if(!t)u=(a[(c[e+204>>2]|0)+38>>0]|0)!=0;else u=0;t=d[1408+n>>0]|0;E=b[p+(k<<2)>>1]|0;D=y+2|0;v=E&(1<<D)+ -1;k=b[p+(k<<2)+2>>1]|0;C=A+2|0;p=k&(1<<C)+ -1;y=v<<1-y;A=p<<1-A;l=(E>>D)+l|0;m=(k>>C)+m|0;C=$(m,j)|0;k=c[B+56>>2]|0;C=C+(l<<k)|0;B=h+C|0;if(!((!((l|0)<1|(m|0)<2)?(l|0)<(-2-n+x|0):0)?(m|0)<(-2-o+w|0):0)){E=80<<k;B=1<<k;Ca[c[e+7560>>2]&1](z+320|0,h+(C-j-B)|0,E,j,n+3|0,o+3|0,l+ -1|0,m+ -1|0,x,w);B=z+(E+B)+320|0;j=E}v=(v|0)!=0&1;w=(p|0)!=0&1;if(u){Ba[c[e+(t<<4)+(w<<3)+(v<<2)+7048>>2]&7](f,g,B,j,o,b[e+3102>>1]|0,q,r,y,A,n);i=s;return}else{La[c[e+(t<<4)+(w<<3)+(v<<2)+6888>>2]&7](f,g,B,j,o,y,A,n);i=s;return}}function mc(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0;j=i;i=i+272|0;u=j+195|0;A=j+130|0;y=j+65|0;w=j;s=c[b+136>>2]|0;r=c[b+200>>2]|0;L=c[r+(h<<2)+13168>>2]|0;M=c[r+(h<<2)+13180>>2]|0;l=1<<g;fa=l<<L;Z=c[r+13072>>2]|0;ha=l<<M;ba=c[r+13164>>2]|0;N=e>>Z&ba;aa=f>>Z&ba;_=ba+2|0;Y=($(aa,_)|0)+N|0;T=c[b+204>>2]|0;U=c[T+1684>>2]|0;Y=c[U+(Y<<2)>>2]|0;n=c[b+160>>2]|0;k=c[n+(h<<2)+32>>2]|0;n=c[n+(h<<2)>>2]|0;m=($(k,f>>M)|0)+(e>>L)|0;o=n+m|0;K=c[r+13156>>2]|0;p=(h|0)==0;q=c[(p?s+288|0:s+292|0)>>2]|0;v=u+1|0;z=y+1|0;x=A+1|0;t=w+1|0;if(!(c[s+31288>>2]|0))ja=0;else ja=(Y|0)>(c[U+(N+ -1+($(ba&aa+(ha>>Z),_)|0)<<2)>>2]|0);ga=ja&1;R=c[s+31292>>2]|0;S=c[s+31300>>2]|0;P=c[s+31296>>2]|0;if(!(c[s+31304>>2]|0))U=0;else U=(Y|0)>(c[U+(($(_,aa+ -1|0)|0)+(ba&N+(fa>>Z))<<2)>>2]|0);N=U&1;aa=(ha<<1)+f|0;ba=r+13124|0;ea=c[ba>>2]|0;Z=ha+f|0;aa=((aa|0)>(ea|0)?ea:aa)-Z>>M;ea=(fa<<1)+e|0;Y=r+13120|0;ca=c[Y>>2]|0;_=fa+e|0;ea=((ea|0)>(ca|0)?ca:ea)-_>>L;ca=T+20|0;if((a[ca>>0]|0)==1){T=c[r+13084>>2]|0;ia=ha>>T;fa=fa>>T;ka=(1<<T)+ -1|0;ha=ka&f;fa=((fa|0)==0&1)+fa|0;ka=(ka&e|0)!=0;if(!(ka|ja^1)){ma=e+ -1>>T;la=Z>>T;na=(c[r+13160>>2]|0)-la|0;na=(ia|0)>(na|0)?na:ia;if((na|0)>0){oa=c[(c[b+3508>>2]|0)+16>>2]|0;ga=0;ja=0;do{ga=(a[oa+((($(ja+la|0,K)|0)+ma|0)*12|0)+10>>0]|0)==0|ga;ja=ja+2|0}while((ja|0)<(na|0))}else ga=0}if(!((R|0)!=1|ka)){ja=e+ -1>>T;ka=f>>T;ma=(c[r+13160>>2]|0)-ka|0;ma=(ia|0)>(ma|0)?ma:ia;if((ma|0)>0){ia=c[(c[b+3508>>2]|0)+16>>2]|0;R=0;la=0;do{R=(a[ia+((($(la+ka|0,K)|0)+ja|0)*12|0)+10>>0]|0)==0|R;la=la+2|0}while((la|0)<(ma|0))}else R=0}if((S|0)==1){S=($(f+ -1>>T,K)|0)+(e+ -1>>T)|0;S=(a[(c[(c[b+3508>>2]|0)+16>>2]|0)+(S*12|0)+10>>0]|0)==0&1}ha=(ha|0)!=0;if(!((P|0)!=1|ha)){P=e>>T;ia=K-P|0;ia=(fa|0)>(ia|0)?ia:fa;if((ia|0)>0){ja=($(f+ -1>>T,K)|0)+P|0;la=c[(c[b+3508>>2]|0)+16>>2]|0;P=0;ka=0;do{P=(a[la+((ja+ka|0)*12|0)+10>>0]|0)==0|P;ka=ka+2|0}while((ka|0)<(ia|0))}else P=0}if(!(ha|U^1)){N=_>>T;U=K-N|0;U=(fa|0)>(U|0)?U:fa;if((U|0)>0){ha=($(f+ -1>>T,K)|0)+N|0;fa=c[(c[b+3508>>2]|0)+16>>2]|0;N=0;T=0;do{N=(a[fa+((ha+T|0)*12|0)+10>>0]|0)==0|N;T=T+2|0}while((T|0)<(U|0))}else N=0}U=v+0|0;T=U+64|0;do{a[U>>0]=128;U=U+1|0}while((U|0)<(T|0));U=y+0|0;T=U+65|0;do{a[U>>0]=128;U=U+1|0}while((U|0)<(T|0));fa=N}else fa=N;T=(S|0)!=0;if(T){oa=a[n+(m+~k)>>0]|0;a[u>>0]=oa;a[y>>0]=oa}U=(P|0)!=0;if(U)mf(z|0,n+(m-k)|0,l|0)|0;N=(fa|0)!=0;if(N?(da=l+1|0,mf(y+da|0,n+(l-k+m)|0,l|0)|0,W=$(d[n+(l+ -1-k+m+ea)>>0]|0,16843009)|0,V=l-ea|0,(V|0)>0):0){ea=ea+da|0;da=0;do{oa=y+(ea+da)|0;a[oa>>0]=W;a[oa+1>>0]=W>>8;a[oa+2>>0]=W>>16;a[oa+3>>0]=W>>24;da=da+4|0}while((da|0)<(V|0))}V=(R|0)!=0;if(V&(l|0)>0){da=m+ -1|0;W=0;do{oa=W;W=W+1|0;a[u+W>>0]=a[n+(da+($(oa,k)|0))>>0]|0}while((W|0)!=(l|0))}W=(ga|0)!=0;if(W){ea=aa+l|0;da=m+ -1|0;if((aa|0)>0){ha=l;do{oa=ha;ha=ha+1|0;a[u+ha>>0]=a[n+(da+($(oa,k)|0))>>0]|0}while((ha|0)<(ea|0))}ia=$(d[n+(da+($(ea+ -1|0,k)|0))>>0]|0,16843009)|0;ha=l-aa|0;if((ha|0)>0){ea=l+1+aa|0;da=0;do{oa=u+(ea+da)|0;a[oa>>0]=ia;a[oa+1>>0]=ia>>8;a[oa+2>>0]=ia>>16;a[oa+3>>0]=ia>>24;da=da+4|0}while((da|0)<(ha|0))}}do if((a[ca>>0]|0)==1?(oa=ga|R,Q=(oa|0)!=0,oa=oa|S,X=(oa|0)==0,(P|fa|oa|0)!=0):0){da=l<<1;ca=c[Y>>2]|0;if(((da<<L)+e|0)<(ca|0))Y=da;else Y=ca-e>>L;ba=c[ba>>2]|0;if(((da<<M)+f|0)>=(ba|0))da=ba-f>>M;aa=(W?aa:0)+l|0;if(!N)if((_|0)<(ca|0))Y=l;else Y=ca-e>>L;if(!W)if((Z|0)<(ba|0))Z=l;else Z=ba-f>>M;else Z=da;do if(X){do if((Y|0)>0){ba=c[r+13084>>2]|0;aa=$((-1<<M)+f>>ba,K)|0;_=c[(c[b+3508>>2]|0)+16>>2]|0;X=0;while(1){ca=X+1|0;if(!(a[_+((aa+((X<<L)+e>>ba)|0)*12|0)+10>>0]|0))break;if((ca|0)<(Y|0))X=ca;else{X=ca;break}}if((X|0)>0)if((e|0)>0){if((X|0)<=-1)break;aa=(-1<<M)+f|0;_=c[b+3508>>2]|0;do{ba=X;X=X+ -1|0;oa=c[r+13084>>2]|0;oa=($(aa>>oa,K)|0)+((X<<L)+e>>oa)|0;if(a[(c[_+16>>2]|0)+(oa*12|0)+10>>0]|0)a[y+ba>>0]=a[y+(ba+1)>>0]|0}while((ba|0)>0)}else{aa=(-1<<M)+f|0;_=c[b+3508>>2]|0;do{ba=X;X=X+ -1|0;oa=c[r+13084>>2]|0;oa=($(aa>>oa,K)|0)+((X<<L)+e>>oa)|0;if(a[(c[_+16>>2]|0)+(oa*12|0)+10>>0]|0)a[y+ba>>0]=a[y+(ba+1)>>0]|0}while((X|0)>0);a[y>>0]=a[z>>0]|0;break}}while(0);X=a[y>>0]|0;a[u>>0]=X}else{ca=(-1<<L)+e|0;ea=b+3508|0;while(1){oa=aa;aa=aa+ -1|0;X=c[r+13084>>2]|0;if((oa|0)<=0){J=59;break}_=(aa<<M)+f|0;oa=($(_>>X,K)|0)+(ca>>X)|0;da=c[ea>>2]|0;ba=c[da+16>>2]|0;if(!(a[ba+(oa*12|0)+10>>0]|0)){ea=_;break}}if((J|0)==59){ba=c[ea>>2]|0;da=ba;ba=c[ba+16>>2]|0;ea=(aa<<M)+f|0}_=r+13084|0;aa=da+16|0;if(!(a[ba+((($(ea>>X,K)|0)+(ca>>X)|0)*12|0)+10>>0]|0)){X=a[y>>0]|0;break}if((Y|0)>0){ca=$((-1<<M)+f>>X,K)|0;da=0;while(1){ea=da+1|0;if(!(a[ba+((ca+((da<<L)+e>>X)|0)*12|0)+10>>0]|0))break;if((ea|0)<(Y|0))da=ea;else{da=ea;break}}if((da|0)>-1)J=68}else{da=0;J=68}a:do if((J|0)==68){ca=(-1<<M)+f|0;while(1){ea=da+ -1|0;if(a[ba+((($(ca>>X,K)|0)+((ea<<L)+e>>X)|0)*12|0)+10>>0]|0)a[y+da>>0]=a[y+(da+1)>>0]|0;if((da|0)<=0)break a;X=c[_>>2]|0;ba=c[aa>>2]|0;da=ea}}while(0);X=a[y>>0]|0;a[u>>0]=X}while(0);a[u>>0]=X;if(Q&(Z|0)>0){aa=$(X&255,16843009)|0;Q=(-1<<L)+e|0;X=c[b+3508>>2]|0;_=0;do{oa=c[r+13084>>2]|0;oa=($((_<<M)+f>>oa,K)|0)+(Q>>oa)|0;if(!(a[(c[X+16>>2]|0)+(oa*12|0)+10>>0]|0))aa=$(d[u+((_|3)+1)>>0]|0,16843009)|0;else{oa=u+(_|1)|0;a[oa>>0]=aa;a[oa+1>>0]=aa>>8;a[oa+2>>0]=aa>>16;a[oa+3>>0]=aa>>24}_=_+4|0}while((_|0)<(Z|0))}if(!V?(O=$(d[u>>0]|0,16843009)|0,(l|0)>0):0){Q=0;do{oa=u+(Q|1)|0;a[oa>>0]=O;a[oa+1>>0]=O>>8;a[oa+2>>0]=O>>16;a[oa+3>>0]=O>>24;Q=Q+4|0}while((Q|0)<(l|0))}do if(!W){O=$(d[u+l>>0]|0,16843009)|0;if((l|0)<=0)break;Q=l+1|0;X=0;do{oa=u+(Q+X)|0;a[oa>>0]=O;a[oa+1>>0]=O>>8;a[oa+2>>0]=O>>16;a[oa+3>>0]=O>>24;X=X+4|0}while((X|0)<(l|0))}while(0);Q=(e|0)==0;O=(f|0)==0;_=(Z|0)>0;b:do if(Q|O){if(Q){if(_)Q=0;else break;while(1){oa=u+(Q|1)|0;a[oa>>0]=0;a[oa+1>>0]=0;a[oa+2>>0]=0;a[oa+3>>0]=0;Q=Q+4|0;if((Q|0)>=(Z|0))break b}}if(!_)break;_=$(d[u+Z>>0]|0,16843009)|0;X=(-1<<L)+e|0;Q=c[b+3508>>2]|0;Z=Z+ -1|0;do{oa=c[r+13084>>2]|0;oa=($((Z+ -3<<M)+f>>oa,K)|0)+(X>>oa)|0;aa=u+(Z+ -2)|0;if(!(a[(c[Q+16>>2]|0)+(oa*12|0)+10>>0]|0))_=$(d[aa>>0]|0,16843009)|0;else{a[aa>>0]=_;a[aa+1>>0]=_>>8;a[aa+2>>0]=_>>16;a[aa+3>>0]=_>>24}Z=Z+ -4|0}while((Z|0)>-1)}else{Q=(-1<<L)+e|0;X=r+13084|0;ba=c[X>>2]|0;ca=Q>>ba;if(_){aa=$(d[u+Z>>0]|0,16843009)|0;_=c[b+3508>>2]|0;Z=Z+ -1|0;do{oa=($((Z+ -3<<M)+f>>ba,K)|0)+ca|0;ca=u+(Z+ -2)|0;if(!(a[(c[_+16>>2]|0)+(oa*12|0)+10>>0]|0))aa=$(d[ca>>0]|0,16843009)|0;else{a[ca>>0]=aa;a[ca+1>>0]=aa>>8;a[ca+2>>0]=aa>>16;a[ca+3>>0]=aa>>24;ba=c[X>>2]|0}Z=Z+ -4|0;ca=Q>>ba}while((Z|0)>-1)}else _=c[b+3508>>2]|0;oa=($((-1<<M)+f>>ba,K)|0)+ca|0;if(!(a[(c[_+16>>2]|0)+(oa*12|0)+10>>0]|0))break;a[u>>0]=a[v>>0]|0}while(0);Q=a[u>>0]|0;a[y>>0]=Q;if(!((Y|0)>0&(O^1)))break;O=$(Q&255,16843009)|0;M=(-1<<M)+f|0;b=c[b+3508>>2]|0;f=0;do{oa=c[r+13084>>2]|0;oa=($(M>>oa,K)|0)+((f<<L)+e>>oa)|0;if(!(a[(c[b+16>>2]|0)+(oa*12|0)+10>>0]|0))O=$(d[y+((f|3)+1)>>0]|0,16843009)|0;else{oa=y+(f|1)|0;a[oa>>0]=O;a[oa+1>>0]=O>>8;a[oa+2>>0]=O>>16;a[oa+3>>0]=O>>24}f=f+4|0}while((f|0)<(Y|0))}while(0);c:do if(!W){if(V){J=$(d[u+l>>0]|0,16843009)|0;if((l|0)<=0){J=148;break}K=l+1|0;L=0;while(1){oa=u+(K+L)|0;a[oa>>0]=J;a[oa+1>>0]=J>>8;a[oa+2>>0]=J>>16;a[oa+3>>0]=J>>24;L=L+4|0;if((L|0)>=(l|0)){J=148;break c}}}if(T){J=$(d[u>>0]|0,16843009)|0;K=l<<1;if((l|0)>0)I=0;else{J=151;break}while(1){oa=u+(I|1)|0;a[oa>>0]=J;a[oa+1>>0]=J>>8;a[oa+2>>0]=J>>16;a[oa+3>>0]=J>>24;I=I+4|0;if((I|0)>=(K|0)){J=151;break c}}}if(U){J=a[z>>0]|0;a[u>>0]=J;J=$(J&255,16843009)|0;I=l<<1;if((l|0)>0)K=0;else{J=153;break}while(1){oa=u+(K|1)|0;a[oa>>0]=J;a[oa+1>>0]=J>>8;a[oa+2>>0]=J>>16;a[oa+3>>0]=J>>24;K=K+4|0;if((K|0)>=(I|0)){J=153;break c}}}if(!N){a[u>>0]=-128;J=l<<1;L=(l|0)>0;if(L)K=0;else{J=148;break}do{oa=y+(K|1)|0;a[oa>>0]=-2139062144;a[oa+1>>0]=-2139062144>>8;a[oa+2>>0]=-2139062144>>16;a[oa+3>>0]=-2139062144>>24;K=K+4|0}while((K|0)<(J|0));if(L)K=0;else{J=148;break}while(1){oa=u+(K|1)|0;a[oa>>0]=-2139062144;a[oa+1>>0]=-2139062144>>8;a[oa+2>>0]=-2139062144>>16;a[oa+3>>0]=-2139062144>>24;K=K+4|0;if((K|0)>=(J|0)){J=148;break c}}}I=y+(l+1)|0;L=a[I>>0]|0;K=$(L&255,16843009)|0;H=(l|0)>0;if(H)J=0;else{a[u>>0]=L;break}do{oa=y+(J|1)|0;a[oa>>0]=K;a[oa+1>>0]=K>>8;a[oa+2>>0]=K>>16;a[oa+3>>0]=K>>24;J=J+4|0}while((J|0)<(l|0));I=a[I>>0]|0;a[u>>0]=I;I=$(I&255,16843009)|0;J=l<<1;if(H){H=0;do{oa=u+(H|1)|0;a[oa>>0]=I;a[oa+1>>0]=I>>8;a[oa+2>>0]=I>>16;a[oa+3>>0]=I>>24;H=H+4|0}while((H|0)<(J|0));J=156}else J=156}else J=148;while(0);if((J|0)==148)if((R|0)==0?(I=$(d[u+(l+1)>>0]|0,16843009)|0,(l|0)>0):0){J=0;do{oa=u+(J|1)|0;a[oa>>0]=I;a[oa+1>>0]=I>>8;a[oa+2>>0]=I>>16;a[oa+3>>0]=I>>24;J=J+4|0}while((J|0)<(l|0));J=151}else J=151;if((J|0)==151)if(!S){a[u>>0]=a[v>>0]|0;J=153}else J=153;if((J|0)==153)if((P|0)==0?(H=$(d[u>>0]|0,16843009)|0,(l|0)>0):0){I=0;do{oa=y+(I|1)|0;a[oa>>0]=H;a[oa+1>>0]=H>>8;a[oa+2>>0]=H>>16;a[oa+3>>0]=H>>24;I=I+4|0}while((I|0)<(l|0));J=156}else J=156;if(((J|0)==156?!N:0)?(G=$(d[y+l>>0]|0,16843009)|0,(l|0)>0):0){I=l+1|0;H=0;do{oa=y+(I+H)|0;a[oa>>0]=G;a[oa+1>>0]=G>>8;a[oa+2>>0]=G>>16;a[oa+3>>0]=G>>24;H=H+4|0}while((H|0)<(l|0))}G=a[u>>0]|0;a[y>>0]=G;d:do if(!(c[r+13112>>2]|0)){if(p){if((q|0)==1|(l|0)==4){t=z;break}}else if(((q|0)==1?1:(c[r+4>>2]|0)!=3)|(l|0)==4){t=z;break}oa=q+ -26|0;oa=(oa|0)>-1?oa:26-q|0;na=q+ -10|0;na=(na|0)>-1?na:10-q|0;if((((oa|0)>(na|0)?na:oa)|0)>(c[1664+(g+ -3<<2)>>2]|0)){if((p&(a[r+13061>>0]|0)!=0&(g|0)==5?(E=G&255,F=a[y+64>>0]|0,D=F&255,oa=D+E-(d[y+32>>0]<<1)|0,(((oa|0)>-1?oa:0-oa|0)|0)<8):0)?(B=u+64|0,C=a[B>>0]|0,oa=(C&255)+E-(d[u+32>>0]<<1)|0,(((oa|0)>-1?oa:0-oa|0)|0)<8):0){a[w>>0]=G;a[w+64>>0]=F;x=0;do{oa=x;x=x+1|0;a[w+x>>0]=(($(E,63-oa|0)|0)+32+($(D,x)|0)|0)>>>6}while((x|0)!=63);x=0;while(1){w=x+1|0;a[u+w>>0]=(($(G&255,63-x|0)|0)+32+($(C&255,w)|0)|0)>>>6;if((w|0)==63)break d;G=a[u>>0]|0;C=a[B>>0]|0;x=w}}B=l<<1;F=a[u+B>>0]|0;a[A+B>>0]=F;C=a[y+B>>0]|0;a[w+B>>0]=C;B=B+ -2|0;D=(B|0)>-1;if(D){E=B;while(1){oa=E+1|0;na=F;F=a[u+oa>>0]|0;a[A+oa>>0]=((na&255)+2+((F&255)<<1)+(d[u+E>>0]|0)|0)>>>2;if((E|0)<=0)break;else E=E+ -1|0}}oa=((d[v>>0]|0)+2+((G&255)<<1)+(d[z>>0]|0)|0)>>>2&255;a[A>>0]=oa;a[w>>0]=oa;if(D)while(1){oa=B+1|0;na=C;C=a[y+oa>>0]|0;a[w+oa>>0]=((na&255)+2+((C&255)<<1)+(d[y+B>>0]|0)|0)>>>2;if((B|0)<=0){v=x;break}else B=B+ -1|0}else v=x}else t=z}else t=z;while(0);if(!q){nc(o,t,v,k,g);i=j;return}else if((q|0)==1){if((l|0)>0){q=l;h=0;do{q=(d[v+h>>0]|0)+q+(d[t+h>>0]|0)|0;h=h+1|0}while((h|0)!=(l|0));q=q>>g+1;h=$(q,16843009)|0;r=0;do{s=($(r,k)|0)+m|0;g=0;do{oa=n+(s+g)|0;a[oa>>0]=h;a[oa+1>>0]=h>>8;a[oa+2>>0]=h>>16;a[oa+3>>0]=h>>24;g=g+4|0}while((g|0)<(l|0));r=r+1|0}while((r|0)!=(l|0))}else q=l>>g+1;if(!(p&(l|0)<32)){i=j;return}a[o>>0]=((q<<1)+2+(d[v>>0]|0)+(d[t>>0]|0)|0)>>>2;if((l|0)<=1){i=j;return}o=(q*3|0)+2|0;p=1;do{a[n+(p+m)>>0]=((d[t+p>>0]|0)+o|0)>>>2;p=p+1|0}while((p|0)!=(l|0));p=1;do{a[n+(($(p,k)|0)+m)>>0]=((d[v+p>>0]|0)+o|0)>>>2;p=p+1|0}while((p|0)!=(l|0));i=j;return}else{if(!(c[r+13104>>2]|0))m=0;else m=(a[s+31256>>0]|0)!=0;oc(o,t,v,k,h,q,l,m&1);i=j;return}}function nc(b,c,e,f,g){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0;m=i;j=1<<g;if((j|0)<=0){i=m;return}l=j+ -1|0;h=c+j|0;k=e+j|0;g=g+1|0;n=0;do{o=e+n|0;p=l-n|0;q=$(n,f)|0;n=n+1|0;r=0;do{v=$(d[o>>0]|0,l-r|0)|0;s=r;r=r+1|0;u=$(d[h>>0]|0,r)|0;t=$(d[c+s>>0]|0,p)|0;a[b+(s+q)>>0]=v+j+u+t+($(d[k>>0]|0,n)|0)>>g}while((r|0)!=(j|0))}while((n|0)!=(j|0));i=m;return}function oc(c,e,f,g,h,j,k,l){c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;m=i;i=i+112|0;o=m;n=a[1680+(j+ -2)>>0]|0;p=o+k|0;q=($(n,k)|0)>>5;if((j|0)>17){s=e+ -1|0;r=j+ -11|0;if(r>>>0<15&(q|0)<-1){if((k|0)>=0){s=0;do{u=e+(s+ -1)|0;u=d[u>>0]|d[u+1>>0]<<8|d[u+2>>0]<<16|d[u+3>>0]<<24;v=o+(s+k)|0;a[v>>0]=u;a[v+1>>0]=u>>8;a[v+2>>0]=u>>16;a[v+3>>0]=u>>24;s=s+4|0}while((s|0)<=(k|0))}if((q|0)<0){r=b[1720+(r<<1)>>1]|0;do{a[o+(q+k)>>0]=a[f+((($(r,q)|0)+128>>8)+ -1)>>0]|0;q=q+1|0}while((q|0)!=0)}}else p=s;o=(k|0)>0;if(o){q=0;do{u=q;q=q+1|0;s=$(q,n)|0;r=s>>5;s=s&31;if(!s){r=r+1|0;s=$(u,g)|0;t=0;do{u=p+(r+t)|0;u=d[u>>0]|d[u+1>>0]<<8|d[u+2>>0]<<16|d[u+3>>0]<<24;v=c+(t+s)|0;a[v>>0]=u;a[v+1>>0]=u>>8;a[v+2>>0]=u>>16;a[v+3>>0]=u>>24;t=t+4|0}while((t|0)<(k|0))}else{t=32-s|0;v=$(u,g)|0;u=0;do{w=u+r|0;x=$(d[p+(w+1)>>0]|0,t)|0;a[c+(u+v)>>0]=(x+16+($(d[p+(w+2)>>0]|0,s)|0)|0)>>>5;w=u|1;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;w=u|2;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;w=u|3;x=w+r|0;y=$(d[p+(x+1)>>0]|0,t)|0;a[c+(w+v)>>0]=(y+16+($(d[p+(x+2)>>0]|0,s)|0)|0)>>>5;u=u+4|0}while((u|0)<(k|0))}}while((q|0)!=(k|0))}if(!((j|0)==26&(h|0)==0&(k|0)<32&(l|0)==0&o)){i=m;return}j=f+ -1|0;n=0;do{h=((d[f+n>>0]|0)-(d[j>>0]|0)>>1)+(d[e>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+($(n,g)|0)>>0]=h;n=n+1|0}while((n|0)!=(k|0));i=m;return}s=f+ -1|0;r=j+ -11|0;if(r>>>0<15&(q|0)<-1){if((k|0)>=0){s=0;do{x=f+(s+ -1)|0;x=d[x>>0]|d[x+1>>0]<<8|d[x+2>>0]<<16|d[x+3>>0]<<24;y=o+(s+k)|0;a[y>>0]=x;a[y+1>>0]=x>>8;a[y+2>>0]=x>>16;a[y+3>>0]=x>>24;s=s+4|0}while((s|0)<=(k|0))}if((q|0)<0){r=b[1720+(r<<1)>>1]|0;do{a[o+(q+k)>>0]=a[e+((($(r,q)|0)+128>>8)+ -1)>>0]|0;q=q+1|0}while((q|0)!=0)}}else p=s;q=(k|0)>0;if(q){o=0;do{r=o;o=o+1|0;t=$(o,n)|0;u=t>>5;t=t&31;if(!t){s=u+1|0;t=0;do{a[c+(($(t,g)|0)+r)>>0]=a[p+(s+t)>>0]|0;t=t+1|0}while((t|0)!=(k|0))}else{s=32-t|0;v=0;do{y=v+u|0;x=$(d[p+(y+1)>>0]|0,s)|0;a[c+(($(v,g)|0)+r)>>0]=(x+16+($(d[p+(y+2)>>0]|0,t)|0)|0)>>>5;v=v+1|0}while((v|0)!=(k|0))}}while((o|0)!=(k|0))}if(!((j|0)==10&(h|0)==0&(k|0)<32&(l|0)==0&q)){i=m;return}g=e+ -1|0;n=0;do{j=((d[e+n>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(j>>>0>255)j=0-j>>31;a[c+n>>0]=j;j=n|1;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;j=n|2;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;j=n|3;h=((d[e+j>>0]|0)-(d[g>>0]|0)>>1)+(d[f>>0]|0)|0;if(h>>>0>255)h=0-h>>31;a[c+j>>0]=h;n=n+4|0}while((n|0)<(k|0));i=m;return}function pc(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0;g=i;h=c[e>>2]|0;if(!h){i=g;return}if(!(c[h+304>>2]|0)){i=g;return}h=e+70|0;f=(d[h>>0]|0)&(f^255)&255;a[h>>0]=f;if(f<<24>>24){i=g;return}Qd(c[b+4>>2]|0,e+4|0);ue(e+56|0);c[e+16>>2]=0;ue(e+64|0);ue(e+60|0);c[e+24>>2]=0;c[e+20>>2]=0;c[e+36>>2]=0;i=g;return}function qc(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0;g=c[a+200>>2]|0;f=c[g+13080>>2]|0;e=($(e>>f,c[g+13128>>2]|0)|0)+(d>>f)|0;return c[(c[b+24>>2]|0)+(c[(c[(c[a+204>>2]|0)+1668>>2]|0)+(e<<2)>>2]<<2)>>2]|0}function rc(a){a=a|0;var b=0,c=0;b=i;c=0;do{pc(a,a+(c*72|0)+3512|0,6);c=c+1|0}while((c|0)!=32);i=b;return}function sc(a){a=a|0;var b=0,c=0;b=i;c=0;do{pc(a,a+(c*72|0)+3512|0,-1);c=c+1|0}while((c|0)!=32);i=b;return}function tc(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;h=d+7616|0;k=0;do{if(((c[(c[d+(k*72|0)+3512>>2]|0)+304>>2]|0)!=0?(b[d+(k*72|0)+3580>>1]|0)==(b[h>>1]|0):0)?(c[d+(k*72|0)+3544>>2]|0)==(f|0):0){e=-1094995529;j=8;break}k=k+1|0}while(k>>>0<32);if((j|0)==8){i=g;return e|0}j=uc(d)|0;if(!j){k=-12;i=g;return k|0}c[e>>2]=c[j>>2];c[d+3508>>2]=j;a[j+70>>0]=(a[d+2438>>0]|0)==0?2:3;c[j+32>>2]=f;b[j+68>>1]=b[h>>1]|0;k=j+40|0;j=(c[d+200>>2]|0)+20|0;c[k+0>>2]=c[j+0>>2];c[k+4>>2]=c[j+4>>2];c[k+8>>2]=c[j+8>>2];c[k+12>>2]=c[j+12>>2];k=0;i=g;return k|0}function uc(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0,j=0,k=0;b=i;f=0;while(1){d=a+(f*72|0)+3512|0;j=f+1|0;if(!(c[(c[d>>2]|0)+304>>2]|0))break;if(j>>>0<32)f=j;else{a=0;e=13;break}}if((e|0)==13){i=b;return a|0}if((Pd(c[a+4>>2]|0,a+(f*72|0)+3516|0,1)|0)<0){j=0;i=b;return j|0}k=c[a+200>>2]|0;j=a+(f*72|0)+3540|0;c[j>>2]=$(c[k+13132>>2]|0,c[k+13128>>2]|0)|0;k=se((c[a+7660>>2]|0)*392|0)|0;e=a+(f*72|0)+3576|0;c[e>>2]=k;if(((k|0)!=0?(g=ye(c[a+1428>>2]|0)|0,c[a+(f*72|0)+3568>>2]=g,(g|0)!=0):0)?(c[a+(f*72|0)+3528>>2]=c[g+4>>2],h=ye(c[a+1432>>2]|0)|0,c[a+(f*72|0)+3572>>2]=h,(h|0)!=0):0){h=c[h+4>>2]|0;f=a+(f*72|0)+3536|0;c[f>>2]=h;g=c[j>>2]|0;a:do if((g|0)>0){j=0;while(1){c[h+(j<<2)>>2]=c[(c[e>>2]|0)+4>>2];j=j+1|0;if((j|0)>=(g|0))break a;h=c[f>>2]|0}}while(0);j=a+7772|0;k=c[d>>2]|0;c[k+244>>2]=(c[j>>2]|0)==1&1;c[k+240>>2]=((c[j>>2]|0)+ -1|0)>>>0<2&1;k=d;i=b;return k|0}pc(a,d,-1);k=0;i=b;return k|0}function vc(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0;g=i;k=d+3034|0;j=d+5816|0;h=d+7618|0;f=(f|0)==0;n=d+7616|0;m=d+200|0;o=0;while(1){if((a[k>>0]|0)==1){q=0;do{p=d+(q*72|0)+3512|0;if(((a[d+(q*72|0)+3582>>0]&8)==0?(c[d+(q*72|0)+3544>>2]|0)!=(c[j>>2]|0):0)?(b[d+(q*72|0)+3580>>1]|0)==(b[h>>1]|0):0)pc(d,p,1);q=q+1|0}while((q|0)!=32);p=0;r=2147483647;q=0}else{p=0;r=2147483647;q=0}do{if((a[d+(p*72|0)+3582>>0]&1)!=0?(b[d+(p*72|0)+3580>>1]|0)==(b[h>>1]|0):0){s=c[d+(p*72|0)+3544>>2]|0;t=(s|0)<(r|0);o=t?p:o;r=t?s:r;q=q+1|0}p=p+1|0}while((p|0)!=32);if(((f?(b[h>>1]|0)==(b[n>>1]|0):0)?(l=c[m>>2]|0,(l|0)!=0):0)?(q|0)<=(c[l+(((c[l+72>>2]|0)+ -1|0)*12|0)+80>>2]|0):0){d=0;h=23;break}if(q){h=17;break}p=b[h>>1]|0;if(p<<16>>16==(b[n>>1]|0)){d=0;h=23;break}b[h>>1]=(p&65535)+1&255}if((h|0)==17){h=d+(o*72|0)+3512|0;e=Fe(e,c[h>>2]|0)|0;if(!(a[d+(o*72|0)+3582>>0]&8))pc(d,h,1);else pc(d,h,9);t=(e|0)<0?e:1;i=g;return t|0}else if((h|0)==23){i=g;return d|0}return 0}function wc(e){e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0;g=i;f=e+7618|0;h=e+5816|0;k=0;j=0;do{if((a[e+(j*72|0)+3582>>0]|0)!=0?(b[e+(j*72|0)+3580>>1]|0)==(b[f>>1]|0):0)k=((c[e+(j*72|0)+3544>>2]|0)!=(c[h>>2]|0)&1)+k|0;j=j+1|0}while((j|0)!=32);j=c[e+200>>2]|0;if(!j){i=g;return}if((k|0)<(c[j+(((c[j+72>>2]|0)+ -1|0)*12|0)+76>>2]|0)){i=g;return}else{k=0;j=2147483647}do{l=a[e+(k*72|0)+3582>>0]|0;if(l<<24>>24!=0?(b[e+(k*72|0)+3580>>1]|0)==(b[f>>1]|0):0){m=c[e+(k*72|0)+3544>>2]|0;j=(l<<24>>24==1?(m|0)!=(c[h>>2]|0):0)&(m|0)<(j|0)?m:j}k=k+1|0}while((k|0)!=32);l=0;do{k=e+(l*72|0)+3582|0;h=d[k>>0]|0;if(((h&1|0)!=0?(b[e+(l*72|0)+3580>>1]|0)==(b[f>>1]|0):0)?(c[e+(l*72|0)+3544>>2]|0)<=(j|0):0)a[k>>0]=h|8;l=l+1|0}while((l|0)!=32);i=g;return}function xc(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;d=i;i=i+208|0;e=d+12|0;g=d;l=(c[b+2428>>2]|0)==0;f=b+3508|0;k=c[f>>2]|0;o=c[k+28>>2]|0;j=c[(c[(c[b+204>>2]|0)+1668>>2]|0)+(c[b+2420>>2]<<2)>>2]|0;n=c[b+5824>>2]|0;m=k+64|0;r=c[m>>2]|0;if(n>>>0>=(((c[r+8>>2]|0)>>>0)/392|0)>>>0){A=-1094995529;i=d;return A|0}p=k+24|0;if((j|0)<(o|0)?(c[(c[p>>2]|0)+(j<<2)>>2]=(c[r+4>>2]|0)+(n*392|0),q=j+1|0,(q|0)!=(o|0)):0)do{c[(c[p>>2]|0)+(q<<2)>>2]=(c[(c[m>>2]|0)+4>>2]|0)+(n*392|0);q=q+1|0}while((q|0)!=(o|0));c[k+20>>2]=c[(c[p>>2]|0)+(j<<2)>>2];if(((c[b+1824>>2]|0)+(c[b+1628>>2]|0)|0)==(0-(c[b+2216>>2]|0)|0)){A=-1094995529;i=d;return A|0}q=l?2:1;n=g+4|0;m=g+8|0;o=e+192|0;r=b+3051|0;s=b+3052|0;k=0;l=0;a:while(1){jf(e|0,0,196)|0;j=c[(c[f>>2]|0)+20>>2]|0;u=j+(k*196|0)|0;p=l<<24>>24!=0&1;c[g>>2]=p;c[n>>2]=p^1;c[m>>2]=3;p=b+(k<<2)+3036|0;t=c[p>>2]|0;if(!t)A=0;else{A=0;v=0;while(1){y=c[g+(v<<2)>>2]|0;x=c[b+(y*196|0)+1628>>2]|0;b:do if((x|0)>0){w=(v|0)==2&1;z=0;do{if((A|0)>=16)break b;c[e+(A<<2)+64>>2]=c[b+(y*196|0)+(z<<2)+1500>>2];c[e+(c[o>>2]<<2)>>2]=c[b+(y*196|0)+(z<<2)+1436>>2];c[e+(c[o>>2]<<2)+128>>2]=w;A=(c[o>>2]|0)+1|0;c[o>>2]=A;z=z+1|0}while((z|0)<(x|0))}while(0);v=v+1|0;if((v|0)!=3)continue;if(A>>>0<t>>>0)v=0;else break}}if(a[b+k+3032>>0]|0){if(t){t=j+(k*196|0)+192|0;v=0;do{u=c[b+(k<<7)+(v<<2)+2776>>2]|0;if((u|0)>=(A|0)){e=-1094995529;b=24;break a}c[j+(k*196|0)+(v<<2)+64>>2]=c[e+(u<<2)+64>>2];c[j+(k*196|0)+(v<<2)>>2]=c[e+(u<<2)>>2];c[j+(k*196|0)+(v<<2)+128>>2]=c[e+(u<<2)+128>>2];c[t>>2]=(c[t>>2]|0)+1;v=v+1|0}while(v>>>0<(c[p>>2]|0)>>>0)}}else{mf(u|0,e|0,196)|0;A=j+(k*196|0)+192|0;z=c[A>>2]|0;y=c[p>>2]|0;c[A>>2]=z>>>0>y>>>0?y:z}if((a[r>>0]|0)==l<<24>>24?(h=c[s>>2]|0,h>>>0<(c[j+(k*196|0)+192>>2]|0)>>>0):0)c[(c[f>>2]|0)+36>>2]=c[j+(k*196|0)+(h<<2)>>2];l=l+1<<24>>24;k=l&255;if(k>>>0>=q>>>0){e=0;b=24;break}}if((b|0)==24){i=d;return e|0}return 0}function yc(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0;e=i;f=c[b+2608>>2]|0;if(!f){c[b+1824>>2]=0;c[b+1628>>2]=0;k=0;i=e;return k|0}h=b+3508|0;g=0;do{if((b+(g*72|0)+3512|0)!=(c[h>>2]|0)){k=b+(g*72|0)+3582|0;a[k>>0]=d[k>>0]&249}g=g+1|0}while((g|0)!=32);c[b+1628>>2]=0;c[b+1824>>2]=0;c[b+2020>>2]=0;c[b+2216>>2]=0;c[b+2412>>2]=0;j=f+4|0;a:do if((c[j>>2]|0)>0){g=b+5816|0;h=0;while(1){if(!(a[f+h+136>>0]|0))k=2;else k=h>>>0>=(c[f>>2]|0)>>>0&1;k=zc(b,b+(k*196|0)+1436|0,(c[f+(h<<2)+8>>2]|0)+(c[g>>2]|0)|0,2)|0;h=h+1|0;if((k|0)<0)break;if((h|0)>=(c[j>>2]|0))break a}i=e;return k|0}while(0);f=b+2772|0;b:do if(!(a[f>>0]|0))f=0;else{g=0;while(1){k=zc(b,b+(((a[b+g+2740>>0]|0)!=0?3:4)*196|0)+1436|0,c[b+(g<<2)+2612>>2]|0,4)|0;g=g+1|0;if((k|0)<0)break;if((g|0)>=(d[f>>0]|0)){f=0;break b}}i=e;return k|0}while(0);do{pc(b,b+(f*72|0)+3512|0,0);f=f+1|0}while((f|0)!=32);k=0;i=e;return k|0}function zc(e,f,g,h){e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0;j=i;k=e+200|0;o=(1<<c[(c[k>>2]|0)+64>>2])+ -1|0;l=e+7616|0;q=0;while(1){m=e+(q*72|0)+3512|0;if(((c[(c[m>>2]|0)+304>>2]|0)!=0?(b[e+(q*72|0)+3580>>1]|0)==(b[l>>1]|0):0)?(c[e+(q*72|0)+3544>>2]&o|0)==(g|0):0)break;q=q+1|0;if(q>>>0>=32){q=0;p=6;break}}a:do if((p|0)==6)while(1){m=e+(q*72|0)+3512|0;if((c[(c[m>>2]|0)+304>>2]|0)!=0?(b[e+(q*72|0)+3580>>1]|0)==(b[l>>1]|0):0){p=c[e+(q*72|0)+3544>>2]|0;if((p|0)==(g|0))break a;if((p&o|0)==(g|0))break a}q=q+1|0;if(q>>>0>=32){m=0;break}else p=6}while(0);if((m|0)==(c[e+3508>>2]|0)){q=-1094995529;i=j;return q|0}if(!m){m=uc(e)|0;if(!m){q=-12;i=j;return q|0}p=c[k>>2]|0;o=c[m>>2]|0;if(!(c[p+56>>2]|0)){o=c[o+304>>2]|0;if((o|0)!=0?(jf(c[o+4>>2]|0,1<<(c[p+52>>2]|0)+ -1&255|0,c[o+8>>2]|0)|0,n=c[(c[m>>2]|0)+308>>2]|0,(n|0)!=0):0){o=1;do{o=o+1|0;jf(c[n+4>>2]|0,1<<(c[(c[k>>2]|0)+52>>2]|0)+ -1&255|0,c[n+8>>2]|0)|0;n=c[(c[m>>2]|0)+(o<<2)+304>>2]|0}while((n|0)!=0)}}else if(c[o>>2]|0){n=0;do{if((c[p+13124>>2]>>c[p+(n<<2)+13180>>2]|0)>0){o=0;do{if((c[p+13120>>2]>>c[p+(n<<2)+13168>>2]|0)>0){q=p;p=0;do{r=1<<(c[q+52>>2]|0)+ -1&65535;q=c[m>>2]|0;q=(c[q+(n<<2)>>2]|0)+(($(c[q+(n<<2)+32>>2]|0,o)|0)+(p<<1))|0;a[q>>0]=r;a[q+1>>0]=r>>8;p=p+1|0;q=c[k>>2]|0}while((p|0)<(c[q+13120>>2]>>c[q+(n<<2)+13168>>2]|0));p=q}o=o+1|0}while((o|0)<(c[p+13124>>2]>>c[p+(n<<2)+13180>>2]|0));o=c[m>>2]|0}n=n+1|0}while((c[o+(n<<2)>>2]|0)!=0)}c[m+32>>2]=g;b[m+68>>1]=b[l>>1]|0;a[m+70>>0]=0}r=f+192|0;c[f+(c[r>>2]<<2)+64>>2]=c[m+32>>2];c[f+(c[r>>2]<<2)>>2]=m;c[r>>2]=(c[r>>2]|0)+1;r=m+70|0;a[r>>0]=d[r>>0]&249|h;r=0;i=j;return r|0}function Ac(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;e=1<<c[(c[a+200>>2]|0)+64>>2];f=c[a+5820>>2]|0;g=(f|0)%(e|0)|0;f=f-g|0;if((g|0)>(b|0)?(g-b|0)>=((e|0)/2|0|0):0)f=f+e|0;else if((g|0)<(b|0))f=f-((b-g|0)>((e|0)/2|0|0)?e:0)|0;i=d;return(((c[a+3500>>2]|0)+ -16|0)>>>0<3?0:f)+b|0}function Bc(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;e=c[b+2608>>2]|0;if(e){j=c[e>>2]|0;if(!j){g=0;h=0}else{g=j>>>0>1;h=0;f=0;do{f=((a[e+h+136>>0]|0)!=0&1)+f|0;h=h+1|0}while(h>>>0<j>>>0);g=g?j:1;h=f}f=c[e+4>>2]|0;if((g|0)<(f|0))do{h=((a[e+g+136>>0]|0)!=0&1)+h|0;g=g+1|0}while((g|0)<(f|0))}else h=0;e=a[b+2772>>0]|0;if(!(e<<24>>24)){j=h;i=d;return j|0}f=e&255;e=0;do{h=((a[b+e+2740>>0]|0)!=0&1)+h|0;e=e+1|0}while((e|0)<(f|0));i=d;return h|0}function Cc(){var b=0,c=0,d=0,e=0,f=0;b=i;if(!(a[1752]|0))c=0;else{i=b;return}do{d=0;do{f=($(d<<1|1,c)|0)&127;e=f>>>0>63;f=e?f+ -64|0:f;e=e?-1:1;if((f|0)>31){f=64-f|0;e=0-e|0}a[1752+(c<<5)+d>>0]=$(a[2776+f>>0]|0,e)|0;d=d+1|0}while((d|0)!=32);c=c+1|0}while((c|0)!=32);i=b;return}function Dc(a,b){a=a|0;b=b|0;c[a>>2]=1;c[a+4>>2]=1;c[a+8>>2]=2;c[a+12>>2]=3;c[a+16>>2]=4;c[a+20>>2]=1;c[a+24>>2]=5;c[a+28>>2]=2;c[a+32>>2]=2;c[a+36>>2]=3;c[a+40>>2]=4;c[a+44>>2]=5;c[a+48>>2]=3;c[a+52>>2]=4;c[a+56>>2]=5;c[a+60>>2]=6;c[a+236>>2]=1;c[a+252>>2]=1;c[a+268>>2]=1;c[a+284>>2]=1;c[a+300>>2]=1;c[a+316>>2]=1;c[a+332>>2]=1;c[a+348>>2]=1;c[a+364>>2]=1;c[a+380>>2]=1;c[a+240>>2]=2;c[a+256>>2]=2;c[a+272>>2]=2;c[a+288>>2]=2;c[a+304>>2]=2;c[a+320>>2]=2;c[a+336>>2]=2;c[a+352>>2]=2;c[a+368>>2]=2;c[a+384>>2]=2;c[a+244>>2]=3;c[a+260>>2]=3;c[a+276>>2]=3;c[a+292>>2]=3;c[a+308>>2]=3;c[a+324>>2]=3;c[a+340>>2]=3;c[a+356>>2]=3;c[a+372>>2]=3;c[a+388>>2]=3;c[a+248>>2]=4;c[a+264>>2]=4;c[a+280>>2]=4;c[a+296>>2]=4;c[a+312>>2]=4;c[a+328>>2]=4;c[a+344>>2]=4;c[a+360>>2]=4;c[a+376>>2]=4;c[a+392>>2]=4;c[a+396>>2]=1;c[a+412>>2]=1;c[a+428>>2]=1;c[a+444>>2]=1;c[a+460>>2]=1;c[a+476>>2]=1;c[a+492>>2]=1;c[a+508>>2]=1;c[a+524>>2]=1;c[a+540>>2]=1;c[a+400>>2]=2;c[a+416>>2]=2;c[a+432>>2]=2;c[a+448>>2]=2;c[a+464>>2]=2;c[a+480>>2]=2;c[a+496>>2]=2;c[a+512>>2]=2;c[a+528>>2]=2;c[a+544>>2]=2;c[a+404>>2]=3;c[a+420>>2]=3;c[a+436>>2]=3;c[a+452>>2]=3;c[a+468>>2]=3;c[a+484>>2]=3;c[a+500>>2]=3;c[a+516>>2]=3;c[a+532>>2]=3;c[a+548>>2]=3;c[a+408>>2]=4;c[a+424>>2]=4;c[a+440>>2]=4;c[a+456>>2]=4;c[a+472>>2]=4;c[a+488>>2]=4;c[a+504>>2]=4;c[a+520>>2]=4;c[a+536>>2]=4;c[a+552>>2]=4;c[a+1036>>2]=1;c[a+1052>>2]=1;c[a+1068>>2]=1;c[a+1084>>2]=1;c[a+1100>>2]=1;c[a+1116>>2]=1;c[a+1132>>2]=1;c[a+1148>>2]=1;c[a+1164>>2]=1;c[a+1180>>2]=1;c[a+1040>>2]=5;c[a+1056>>2]=5;c[a+1072>>2]=5;c[a+1088>>2]=5;c[a+1104>>2]=5;c[a+1120>>2]=5;c[a+1136>>2]=5;c[a+1152>>2]=5;c[a+1168>>2]=5;c[a+1184>>2]=5;c[a+1044>>2]=6;c[a+1060>>2]=6;c[a+1076>>2]=6;c[a+1092>>2]=6;c[a+1108>>2]=6;c[a+1124>>2]=6;c[a+1140>>2]=6;c[a+1156>>2]=6;c[a+1172>>2]=6;c[a+1188>>2]=6;c[a+1048>>2]=7;c[a+1064>>2]=7;c[a+1080>>2]=7;c[a+1096>>2]=7;c[a+1112>>2]=7;c[a+1128>>2]=7;c[a+1144>>2]=7;c[a+1160>>2]=7;c[a+1176>>2]=7;c[a+1192>>2]=7;c[a+1196>>2]=1;c[a+1212>>2]=1;c[a+1228>>2]=1;c[a+1244>>2]=1;c[a+1260>>2]=1;c[a+1276>>2]=1;c[a+1292>>2]=1;c[a+1308>>2]=1;c[a+1324>>2]=1;c[a+1340>>2]=1;c[a+1200>>2]=5;c[a+1216>>2]=5;c[a+1232>>2]=5;c[a+1248>>2]=5;c[a+1264>>2]=5;c[a+1280>>2]=5;c[a+1296>>2]=5;c[a+1312>>2]=5;c[a+1328>>2]=5;c[a+1344>>2]=5;c[a+1204>>2]=6;c[a+1220>>2]=6;c[a+1236>>2]=6;c[a+1252>>2]=6;c[a+1268>>2]=6;c[a+1284>>2]=6;c[a+1300>>2]=6;c[a+1316>>2]=6;c[a+1332>>2]=6;c[a+1348>>2]=6;c[a+1208>>2]=7;c[a+1224>>2]=7;c[a+1240>>2]=7;c[a+1256>>2]=7;c[a+1272>>2]=7;c[a+1288>>2]=7;c[a+1304>>2]=7;c[a+1320>>2]=7;c[a+1336>>2]=7;c[a+1352>>2]=7;c[a+64>>2]=1;c[a+68>>2]=1;c[a+72>>2]=2;c[a+1676>>2]=2;c[a+1680>>2]=3;c[a+1684>>2]=1;c[a+1688>>2]=2;c[a+1692>>2]=2;c[a+1696>>2]=3;c[a+1700>>2]=1;c[a+1704>>2]=2;return}function Ec(b,c,d,e,f,g){b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0;h=i;if((e|0)<=0){i=h;return}k=(d|0)>0;j=8-g|0;m=0;while(1){if(k){l=0;do{a[b+l>>0]=(Xd(f,g)|0)<<j;l=l+1|0}while((l|0)!=(d|0))}m=m+1|0;if((m|0)==(e|0))break;else b=b+c|0}i=h;return}function Fc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==4)break;else j=j+2|0}h=h+1|0;if((h|0)==4)break;else{e=e+8|0;c=c+f|0}}i=g;return}function Gc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==8)break;else j=j+2|0}h=h+1|0;if((h|0)==8)break;else{e=e+16|0;c=c+f|0}}i=g;return}function Hc(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==16)break;else j=j+2|0}h=h+1|0;if((h|0)==16)break;else{e=e+32|0;c=c+f|0}}i=g;return}function Ic(c,e,f){c=c|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;g=i;h=0;while(1){j=e;k=0;while(1){l=c+k|0;m=(b[j>>1]|0)+(d[l>>0]|0)|0;if(m>>>0>255)m=0-m>>31;a[l>>0]=m;k=k+1|0;if((k|0)==32)break;else j=j+2|0}h=h+1|0;if((h|0)==32)break;else{e=e+64|0;c=c+f|0}}i=g;return}function Jc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;c=c<<16>>16;e=7-c|0;c=1<<c;if((e|0)>0){f=1<<e+ -1;if((c|0)>0)g=0;else{i=d;return}while(1){h=a;j=0;while(1){b[h>>1]=(b[h>>1]|0)+f>>e;j=j+1|0;if((j|0)==(c|0))break;else h=h+2|0}g=g+1|0;if((g|0)==(c|0))break;else a=a+(c<<1)|0}i=d;return}if((c|0)<=0){i=d;return}e=0-e|0;f=0;while(1){g=a;h=0;while(1){b[g>>1]=b[g>>1]<<e;h=h+1|0;if((h|0)==(c|0))break;else g=g+2|0}f=f+1|0;if((f|0)==(c|0))break;else a=a+(c<<1)|0}i=d;return}function Kc(a,c,d){a=a|0;c=c|0;d=d|0;var f=0,g=0,h=0,j=0,k=0;f=i;c=1<<(c<<16>>16);if(d){d=c+ -1|0;if((d|0)<=0){i=f;return}g=(c|0)>0;h=0;do{if(g){j=0;do{k=a+(j+c<<1)|0;b[k>>1]=(e[k>>1]|0)+(e[a+(j<<1)>>1]|0);j=j+1|0}while((j|0)!=(c|0))}a=a+(c<<1)|0;h=h+1|0}while((h|0)!=(d|0));i=f;return}if((c|0)<=0){i=f;return}d=(c|0)>1;h=0;while(1){if(d){j=b[a>>1]|0;g=1;do{k=a+(g<<1)|0;j=(e[k>>1]|0)+(j&65535)&65535;b[k>>1]=j;g=g+1|0}while((g|0)!=(c|0))}h=h+1|0;if((h|0)==(c|0))break;else a=a+(c<<1)|0}i=f;return}function Lc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;c=i;e=0;d=a;while(1){p=b[d>>1]|0;m=d+16|0;n=b[m>>1]|0;g=n+p|0;f=d+24|0;o=b[f>>1]|0;l=o+n|0;j=p-o|0;h=d+8|0;k=(b[h>>1]|0)*74|0;o=((p-n+o|0)*74|0)+64|0;n=o>>7;if((n+32768|0)>>>0>65535)n=o>>31^32767;b[m>>1]=n;m=(g*29|0)+64+(l*55|0)+k|0;n=m>>7;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[d>>1]=n;l=($(l,-29)|0)+64+(j*55|0)+k|0;m=l>>7;if((m+32768|0)>>>0>65535)m=l>>31^32767;b[h>>1]=m;g=(g*55|0)+64+(j*29|0)-k|0;h=g>>7;if((h+32768|0)>>>0>65535)h=g>>31^32767;b[f>>1]=h;e=e+1|0;if((e|0)==4){d=0;break}else d=d+2|0}while(1){p=b[a>>1]|0;l=a+4|0;m=b[l>>1]|0;g=m+p|0;e=a+6|0;n=b[e>>1]|0;k=n+m|0;h=p-n|0;f=a+2|0;j=(b[f>>1]|0)*74|0;n=((p-m+n|0)*74|0)+2048|0;m=n>>12;if((m+32768|0)>>>0>65535)m=n>>31^32767;b[l>>1]=m;l=(g*29|0)+2048+(k*55|0)+j|0;m=l>>12;if((m+32768|0)>>>0>65535)m=l>>31^32767;b[a>>1]=m;k=($(k,-29)|0)+2048+(h*55|0)+j|0;l=k>>12;if((l+32768|0)>>>0>65535)l=k>>31^32767;b[f>>1]=l;f=(g*55|0)+2048+(h*29|0)-j|0;g=f>>12;if((g+32768|0)>>>0>65535)g=f>>31^32767;b[e>>1]=g;d=d+1|0;if((d|0)==4)break;else a=a+8|0}i=c;return}function Mc(a,c){a=a|0;c=c|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0;c=i;f=0;e=a;while(1){l=b[e>>1]<<6;j=e+16|0;k=b[j>>1]<<6;g=k+l|0;k=l-k|0;l=e+8|0;m=b[l>>1]|0;d=e+24|0;n=b[d>>1]|0;h=(n*36|0)+(m*83|0)|0;m=($(n,-83)|0)+(m*36|0)|0;n=g+64+h|0;o=n>>7;if((o+32768|0)>>>0>65535)o=n>>31^32767;b[e>>1]=o;o=k+64+m|0;n=o>>7;if((n+32768|0)>>>0>65535)n=o>>31^32767;b[l>>1]=n;l=k-m+64|0;k=l>>7;if((k+32768|0)>>>0>65535)k=l>>31^32767;b[j>>1]=k;h=g-h+64|0;g=h>>7;if((g+32768|0)>>>0>65535)g=h>>31^32767;b[d>>1]=g;f=f+1|0;if((f|0)==4){e=0;break}else e=e+2|0}while(1){k=b[a>>1]<<6;h=a+4|0;l=b[h>>1]<<6;g=l+k|0;l=k-l|0;k=a+2|0;j=b[k>>1]|0;d=a+6|0;m=b[d>>1]|0;f=(m*36|0)+(j*83|0)|0;j=($(m,-83)|0)+(j*36|0)|0;m=g+2048+f|0;n=m>>12;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[a>>1]=n;m=l+2048+j|0;n=m>>12;if((n+32768|0)>>>0>65535)n=m>>31^32767;b[k>>1]=n;k=l-j+2048|0;j=k>>12;if((j+32768|0)>>>0>65535)j=k>>31^32767;b[h>>1]=j;f=g-f+2048|0;g=f>>12;if((g+32768|0)>>>0>65535)g=f>>31^32767;b[d>>1]=g;e=e+1|0;if((e|0)==4)break;else a=a+8|0}i=c;return}function Nc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0;h=i;i=i+64|0;j=h+48|0;p=h+32|0;f=h+16|0;g=h;q=(e|0)>8;r=e+4|0;k=j+4|0;l=j+8|0;m=j+12|0;o=0;r=(r|0)>8?8:r;n=d;while(1){c[p+0>>2]=0;c[p+4>>2]=0;c[p+8>>2]=0;c[p+12>>2]=0;w=(r|0)>1;s=0;do{if(w){t=p+(s<<2)|0;v=c[t>>2]|0;u=1;do{v=($(b[n+(u<<3<<1)>>1]|0,a[1752+(u<<2<<5)+s>>0]|0)|0)+v|0;u=u+2|0}while((u|0)<(r|0));c[t>>2]=v}s=s+1|0}while((s|0)!=4);v=b[n>>1]<<6;u=b[n+64>>1]<<6;w=u+v|0;u=v-u|0;v=b[n+32>>1]|0;t=b[n+96>>1]|0;s=(t*36|0)+(v*83|0)|0;v=($(t,-83)|0)+(v*36|0)|0;t=s+w|0;c[j>>2]=t;c[k>>2]=v+u;c[l>>2]=u-v;c[m>>2]=w-s;s=0;while(1){u=c[p+(s<<2)>>2]|0;v=t+64+u|0;w=v>>7;if((w+32768|0)>>>0>65535)w=v>>31^32767;b[n+(s<<3<<1)>>1]=w;t=t-u+64|0;u=t>>7;if((u+32768|0)>>>0>65535)u=t>>31^32767;b[n+(7-s<<3<<1)>>1]=u;s=s+1|0;if((s|0)==4)break;t=c[j+(s<<2)>>2]|0}if((r|0)<8)r=(o&3|0)==0&(o|0)!=0?r+ -4|0:r;o=o+1|0;if((o|0)==8)break;else n=n+2|0}j=q?8:e;n=(j|0)>1;k=f+4|0;l=f+8|0;m=f+12|0;o=0;while(1){c[g+0>>2]=0;c[g+4>>2]=0;c[g+8>>2]=0;c[g+12>>2]=0;e=0;do{if(n){r=g+(e<<2)|0;p=c[r>>2]|0;q=1;do{p=($(b[d+(q<<1)>>1]|0,a[1752+(q<<2<<5)+e>>0]|0)|0)+p|0;q=q+2|0}while((q|0)<(j|0));c[r>>2]=p}e=e+1|0}while((e|0)!=4);v=b[d>>1]<<6;u=b[d+8>>1]<<6;w=u+v|0;u=v-u|0;v=b[d+4>>1]|0;p=b[d+12>>1]|0;e=(p*36|0)+(v*83|0)|0;v=($(p,-83)|0)+(v*36|0)|0;p=e+w|0;c[f>>2]=p;c[k>>2]=v+u;c[l>>2]=u-v;c[m>>2]=w-e;e=0;while(1){q=c[g+(e<<2)>>2]|0;r=p+2048+q|0;s=r>>12;if((s+32768|0)>>>0>65535)s=r>>31^32767;b[d+(e<<1)>>1]=s;p=p-q+2048|0;q=p>>12;if((q+32768|0)>>>0>65535)q=p>>31^32767;b[d+(7-e<<1)>>1]=q;e=e+1|0;if((e|0)==4)break;p=c[f+(e<<2)>>2]|0}o=o+1|0;if((o|0)==8)break;else d=d+16|0}i=h;return}function Oc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;j=i;i=i+192|0;t=j+160|0;u=j+128|0;m=j+112|0;l=j+96|0;g=j+64|0;h=j+32|0;f=j+16|0;k=j;s=(e|0)>16;v=e+4|0;n=m+4|0;o=m+8|0;p=m+12|0;r=0;v=(v|0)>16?16:v;q=d;while(1){c[u+0>>2]=0;c[u+4>>2]=0;c[u+8>>2]=0;c[u+12>>2]=0;c[u+16>>2]=0;c[u+20>>2]=0;c[u+24>>2]=0;c[u+28>>2]=0;A=(v|0)>1;z=0;do{if(A){y=u+(z<<2)|0;w=c[y>>2]|0;x=1;do{w=($(b[q+(x<<4<<1)>>1]|0,a[1752+(x<<1<<5)+z>>0]|0)|0)+w|0;x=x+2|0}while((x|0)<(v|0));c[y>>2]=w}z=z+1|0}while((z|0)!=8);c[l+0>>2]=0;c[l+4>>2]=0;c[l+8>>2]=0;c[l+12>>2]=0;z=0;do{x=l+(z<<2)|0;w=c[x>>2]|0;y=1;do{w=($(b[q+(y<<5<<1)>>1]|0,a[1752+(y<<2<<5)+z>>0]|0)|0)+w|0;y=y+2|0}while((y|0)<8);c[x>>2]=w;z=z+1|0}while((z|0)!=4);z=b[q>>1]<<6;y=b[q+256>>1]<<6;A=y+z|0;y=z-y|0;z=b[q+128>>1]|0;w=b[q+384>>1]|0;x=(w*36|0)+(z*83|0)|0;z=($(w,-83)|0)+(z*36|0)|0;w=x+A|0;c[m>>2]=w;c[n>>2]=z+y;c[o>>2]=y-z;c[p>>2]=A-x;x=0;while(1){A=c[l+(x<<2)>>2]|0;c[t+(x<<2)>>2]=A+w;c[t+(7-x<<2)>>2]=w-A;x=x+1|0;if((x|0)==4){w=0;break}w=c[m+(x<<2)>>2]|0}do{x=c[t+(w<<2)>>2]|0;y=c[u+(w<<2)>>2]|0;A=x+64+y|0;z=A>>7;if((z+32768|0)>>>0>65535)z=A>>31^32767;b[q+(w<<4<<1)>>1]=z;x=x-y+64|0;y=x>>7;if((y+32768|0)>>>0>65535)y=x>>31^32767;b[q+(15-w<<4<<1)>>1]=y;w=w+1|0}while((w|0)!=8);if((v|0)<16)v=(r&3|0)==0&(r|0)!=0?v+ -4|0:v;r=r+1|0;if((r|0)==16)break;else q=q+2|0}o=s?16:e;p=(o|0)>1;l=f+4|0;m=f+8|0;n=f+12|0;q=0;while(1){c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;c[h+16>>2]=0;c[h+20>>2]=0;c[h+24>>2]=0;c[h+28>>2]=0;r=0;do{if(p){e=h+(r<<2)|0;t=c[e>>2]|0;s=1;do{t=($(b[d+(s<<1)>>1]|0,a[1752+(s<<1<<5)+r>>0]|0)|0)+t|0;s=s+2|0}while((s|0)<(o|0));c[e>>2]=t}r=r+1|0}while((r|0)!=8);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;t=0;do{r=k+(t<<2)|0;s=c[r>>2]|0;e=1;do{s=($(b[d+(e<<1<<1)>>1]|0,a[1752+(e<<2<<5)+t>>0]|0)|0)+s|0;e=e+2|0}while((e|0)<8);c[r>>2]=s;t=t+1|0}while((t|0)!=4);z=b[d>>1]<<6;y=b[d+16>>1]<<6;A=y+z|0;y=z-y|0;z=b[d+8>>1]|0;r=b[d+24>>1]|0;e=(r*36|0)+(z*83|0)|0;z=($(r,-83)|0)+(z*36|0)|0;r=e+A|0;c[f>>2]=r;c[l>>2]=z+y;c[m>>2]=y-z;c[n>>2]=A-e;e=0;while(1){A=c[k+(e<<2)>>2]|0;c[g+(e<<2)>>2]=A+r;c[g+(7-e<<2)>>2]=r-A;e=e+1|0;if((e|0)==4){r=0;break}r=c[f+(e<<2)>>2]|0}do{e=c[g+(r<<2)>>2]|0;s=c[h+(r<<2)>>2]|0;u=e+2048+s|0;t=u>>12;if((t+32768|0)>>>0>65535)t=u>>31^32767;b[d+(r<<1)>>1]=t;e=e-s+2048|0;s=e>>12;if((s+32768|0)>>>0>65535)s=e>>31^32767;b[d+(15-r<<1)>>1]=s;r=r+1|0}while((r|0)!=8);q=q+1|0;if((q|0)==16)break;else d=d+32|0}i=j;return}function Pc(d,e){d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0;m=i;i=i+320|0;g=m+256|0;l=m+192|0;o=m+160|0;s=m+128|0;u=m+112|0;t=m+96|0;f=m+64|0;j=m+32|0;h=m+16|0;k=m;q=(e|0)>32;x=e+4|0;v=u+4|0;w=u+8|0;n=u+12|0;p=0;x=(x|0)>32?32:x;r=d;while(1){y=l+0|0;z=y+64|0;do{c[y>>2]=0;y=y+4|0}while((y|0)<(z|0));B=(x|0)>1;A=0;do{if(B){z=l+(A<<2)|0;y=c[z>>2]|0;C=1;do{y=($(b[r+(C<<5<<1)>>1]|0,a[1752+(C<<5)+A>>0]|0)|0)+y|0;C=C+2|0}while((C|0)<(x|0));c[z>>2]=y}A=A+1|0}while((A|0)!=16);c[s+0>>2]=0;c[s+4>>2]=0;c[s+8>>2]=0;c[s+12>>2]=0;c[s+16>>2]=0;c[s+20>>2]=0;c[s+24>>2]=0;c[s+28>>2]=0;y=(x|0)/2|0;z=(x|0)>3;A=0;do{if(z){D=s+(A<<2)|0;B=c[D>>2]|0;C=1;do{B=($(b[r+(C<<6<<1)>>1]|0,a[1752+(C<<1<<5)+A>>0]|0)|0)+B|0;C=C+2|0}while((C|0)<(y|0));c[D>>2]=B}A=A+1|0}while((A|0)!=8);c[t+0>>2]=0;c[t+4>>2]=0;c[t+8>>2]=0;c[t+12>>2]=0;A=0;do{B=t+(A<<2)|0;z=c[B>>2]|0;y=1;do{z=($(b[r+(y<<7<<1)>>1]|0,a[1752+(y<<2<<5)+A>>0]|0)|0)+z|0;y=y+2|0}while((y|0)<8);c[B>>2]=z;A=A+1|0}while((A|0)!=4);C=b[r>>1]<<6;B=b[r+1024>>1]<<6;D=B+C|0;B=C-B|0;C=b[r+512>>1]|0;y=b[r+1536>>1]|0;z=(y*36|0)+(C*83|0)|0;C=($(y,-83)|0)+(C*36|0)|0;y=z+D|0;c[u>>2]=y;c[v>>2]=C+B;c[w>>2]=B-C;c[n>>2]=D-z;z=0;while(1){D=c[t+(z<<2)>>2]|0;c[o+(z<<2)>>2]=D+y;c[o+(7-z<<2)>>2]=y-D;z=z+1|0;if((z|0)==4){y=0;break}y=c[u+(z<<2)>>2]|0}do{C=c[o+(y<<2)>>2]|0;D=c[s+(y<<2)>>2]|0;c[g+(y<<2)>>2]=D+C;c[g+(15-y<<2)>>2]=C-D;y=y+1|0}while((y|0)!=8);y=0;do{z=c[g+(y<<2)>>2]|0;A=c[l+(y<<2)>>2]|0;B=z+64+A|0;C=B>>7;if((C+32768|0)>>>0>65535)C=B>>31^32767;b[r+(y<<5<<1)>>1]=C;z=z-A+64|0;A=z>>7;if((A+32768|0)>>>0>65535)A=z>>31^32767;b[r+(31-y<<5<<1)>>1]=A;y=y+1|0}while((y|0)!=16);if((x|0)<32)x=(p&3|0)==0&(p|0)!=0?x+ -4|0:x;p=p+1|0;if((p|0)==32)break;else r=r+2|0}p=q?32:e;o=(p|0)>1;n=(p|0)/2|0;q=(p|0)>3;s=h+4|0;r=h+8|0;e=h+12|0;t=0;while(1){y=l+0|0;z=y+64|0;do{c[y>>2]=0;y=y+4|0}while((y|0)<(z|0));v=0;do{if(o){w=l+(v<<2)|0;u=c[w>>2]|0;x=1;do{u=($(b[d+(x<<1)>>1]|0,a[1752+(x<<5)+v>>0]|0)|0)+u|0;x=x+2|0}while((x|0)<(p|0));c[w>>2]=u}v=v+1|0}while((v|0)!=16);c[j+0>>2]=0;c[j+4>>2]=0;c[j+8>>2]=0;c[j+12>>2]=0;c[j+16>>2]=0;c[j+20>>2]=0;c[j+24>>2]=0;c[j+28>>2]=0;x=0;do{if(q){u=j+(x<<2)|0;w=c[u>>2]|0;v=1;do{D=v<<1;w=($(b[d+(D<<1)>>1]|0,a[1752+(D<<5)+x>>0]|0)|0)+w|0;v=v+2|0}while((v|0)<(n|0));c[u>>2]=w}x=x+1|0}while((x|0)!=8);c[k+0>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[k+12>>2]=0;u=0;do{v=k+(u<<2)|0;x=c[v>>2]|0;w=1;do{D=w<<2;x=($(b[d+(D<<1)>>1]|0,a[1752+(D<<5)+u>>0]|0)|0)+x|0;w=w+2|0}while((w|0)<8);c[v>>2]=x;u=u+1|0}while((u|0)!=4);C=b[d>>1]<<6;B=b[d+32>>1]<<6;D=B+C|0;B=C-B|0;C=b[d+16>>1]|0;u=b[d+48>>1]|0;v=(u*36|0)+(C*83|0)|0;C=($(u,-83)|0)+(C*36|0)|0;u=v+D|0;c[h>>2]=u;c[s>>2]=C+B;c[r>>2]=B-C;c[e>>2]=D-v;v=0;while(1){D=c[k+(v<<2)>>2]|0;c[f+(v<<2)>>2]=D+u;c[f+(7-v<<2)>>2]=u-D;v=v+1|0;if((v|0)==4){u=0;break}u=c[h+(v<<2)>>2]|0}do{C=c[f+(u<<2)>>2]|0;D=c[j+(u<<2)>>2]|0;c[g+(u<<2)>>2]=D+C;c[g+(15-u<<2)>>2]=C-D;u=u+1|0}while((u|0)!=8);u=0;do{v=c[g+(u<<2)>>2]|0;w=c[l+(u<<2)>>2]|0;x=v+2048+w|0;y=x>>12;if((y+32768|0)>>>0>65535)y=x>>31^32767;b[d+(u<<1)>>1]=y;v=v-w+2048|0;w=v>>12;if((w+32768|0)>>>0>65535)w=v>>31^32767;b[d+(31-u<<1)>>1]=w;u=u+1|0}while((u|0)!=16);t=t+1|0;if((t|0)==32)break;else d=d+64|0}i=m;return}function Qc(a){a=a|0;var c=0,d=0,e=0,f=0;c=i;d=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;e=0;do{f=e<<2;b[a+(f<<1)>>1]=d;b[a+((f|1)<<1)>>1]=d;b[a+((f|2)<<1)>>1]=d;b[a+((f|3)<<1)>>1]=d;e=e+1|0}while((e|0)!=4);i=c;return}function Rc(a){a=a|0;var c=0,d=0,e=0,f=0;c=i;d=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;e=0;do{f=e<<3;b[a+(f<<1)>>1]=d;b[a+((f|1)<<1)>>1]=d;b[a+((f|2)<<1)>>1]=d;b[a+((f|3)<<1)>>1]=d;b[a+((f|4)<<1)>>1]=d;b[a+((f|5)<<1)>>1]=d;b[a+((f|6)<<1)>>1]=d;b[a+((f|7)<<1)>>1]=d;e=e+1|0}while((e|0)!=8);i=c;return}function Sc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0;c=i;e=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;d=0;do{f=d<<4;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=16);d=d+1|0}while((d|0)!=16);i=c;return}function Tc(a){a=a|0;var c=0,d=0,e=0,f=0,g=0;c=i;e=((((b[a>>1]|0)+1|0)>>>1)+32|0)>>>6&65535;d=0;do{f=d<<5;g=0;do{b[a+(g+f<<1)>>1]=e;g=g+1|0}while((g|0)!=32);d=d+1|0}while((d|0)!=32);i=c;return}function Uc(a,b,c,d,e,f,g,h){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;f=i;if((e|0)>0)g=0;else{i=f;return}while(1){mf(a|0,c|0,h|0)|0;g=g+1|0;if((g|0)==(e|0))break;else{a=a+b|0;c=c+d|0}}i=f;return}function Vc(b,c,e,f,g,h,j,k){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0;j=i;s=h+ -1|0;l=2840+(s<<4)|0;if((g|0)<=0){i=j;return}h=(k|0)>0;p=2841+(s<<4)|0;q=2842+(s<<4)|0;r=2843+(s<<4)|0;o=2844+(s<<4)|0;n=2845+(s<<4)|0;m=2846+(s<<4)|0;s=2847+(s<<4)|0;w=0;while(1){if(h){t=a[l>>0]|0;y=a[p>>0]|0;v=a[q>>0]|0;A=a[r>>0]|0;B=a[o>>0]|0;C=a[n>>0]|0;D=a[m>>0]|0;z=a[s>>0]|0;u=0;do{K=$(d[e+(u+ -3)>>0]|0,t)|0;J=$(d[e+(u+ -2)>>0]|0,y)|0;I=$(d[e+(u+ -1)>>0]|0,v)|0;H=$(d[e+u>>0]|0,A)|0;x=u;u=u+1|0;G=$(d[e+u>>0]|0,B)|0;F=$(d[e+(x+2)>>0]|0,C)|0;E=$(d[e+(x+3)>>0]|0,D)|0;E=K+32+J+I+H+G+F+E+($(d[e+(x+4)>>0]|0,z)|0)>>6;if(E>>>0>255)E=0-E>>31;a[b+x>>0]=E}while((u|0)!=(k|0))}w=w+1|0;if((w|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=j;return}function Wc(b,c,e,f,g,h,j,k){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0;h=i;u=j+ -1|0;j=2840+(u<<4)|0;if((g|0)<=0){i=h;return}l=(k|0)>0;q=f*3|0;t=2841+(u<<4)|0;r=f<<1;s=2842+(u<<4)|0;p=2843+(u<<4)|0;o=2844+(u<<4)|0;n=2845+(u<<4)|0;m=2846+(u<<4)|0;v=2847+(u<<4)|0;u=f<<2;B=0;while(1){if(l){y=a[j>>0]|0;D=a[t>>0]|0;E=a[s>>0]|0;F=a[p>>0]|0;C=a[o>>0]|0;z=a[n>>0]|0;x=a[m>>0]|0;w=a[v>>0]|0;A=0;do{M=$(d[e+(A-q)>>0]|0,y)|0;L=$(d[e+(A-r)>>0]|0,D)|0;K=$(d[e+(A-f)>>0]|0,E)|0;J=$(d[e+A>>0]|0,F)|0;I=$(d[e+(A+f)>>0]|0,C)|0;H=$(d[e+(A+r)>>0]|0,z)|0;G=$(d[e+(A+q)>>0]|0,x)|0;G=M+32+L+K+J+I+H+G+($(d[e+(A+u)>>0]|0,w)|0)>>6;if(G>>>0>255)G=0-G>>31;a[b+A>>0]=G;A=A+1|0}while((A|0)!=(k|0))}B=B+1|0;if((B|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=h;return}function Xc(c,d,e,f,g,h,j,k){c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0;l=i;i=i+9088|0;m=l;v=h+ -1|0;n=2840+(v<<4)|0;h=g+7|0;if((h|0)>0){t=(k|0)>0;p=2841+(v<<4)|0;o=2842+(v<<4)|0;u=2843+(v<<4)|0;q=2844+(v<<4)|0;r=2845+(v<<4)|0;s=2846+(v<<4)|0;v=2847+(v<<4)|0;y=e+($(f,-3)|0)|0;z=m;A=0;while(1){if(t){E=a[n>>0]|0;D=a[p>>0]|0;F=a[o>>0]|0;G=a[u>>0]|0;H=a[q>>0]|0;I=a[r>>0]|0;e=a[s>>0]|0;K=a[v>>0]|0;w=a[y+ -2>>0]|0;x=a[y+ -1>>0]|0;B=a[y>>0]|0;C=a[y+1>>0]|0;M=a[y+2>>0]|0;L=a[y+3>>0]|0;N=a[y+ -3>>0]|0;J=0;while(1){O=($(w&255,D)|0)+($(N&255,E)|0)+($(x&255,F)|0)+($(B&255,G)|0)+($(C&255,H)|0)+($(M&255,I)|0)+($(L&255,e)|0)|0;N=a[y+(J+4)>>0]|0;b[z+(J<<1)>>1]=O+($(N&255,K)|0);J=J+1|0;if((J|0)==(k|0))break;else{S=L;R=M;Q=C;P=B;O=x;L=N;N=w;M=S;C=R;B=Q;x=P;w=O}}}A=A+1|0;if((A|0)==(h|0))break;else{y=y+f|0;z=z+128|0}}}s=j+ -1|0;j=2840+(s<<4)|0;if((g|0)<=0){i=l;return}f=(k|0)>0;h=2841+(s<<4)|0;n=2842+(s<<4)|0;o=2843+(s<<4)|0;p=2844+(s<<4)|0;q=2845+(s<<4)|0;r=2846+(s<<4)|0;s=2847+(s<<4)|0;u=m+384|0;v=0;while(1){if(f){e=a[j>>0]|0;m=a[h>>0]|0;x=a[n>>0]|0;y=a[o>>0]|0;z=a[p>>0]|0;A=a[q>>0]|0;B=a[r>>0]|0;w=a[s>>0]|0;t=0;do{C=$(b[u+(t+ -192<<1)>>1]|0,e)|0;C=($(b[u+(t+ -128<<1)>>1]|0,m)|0)+C|0;C=C+($(b[u+(t+ -64<<1)>>1]|0,x)|0)|0;C=C+($(b[u+(t<<1)>>1]|0,y)|0)|0;C=C+($(b[u+(t+64<<1)>>1]|0,z)|0)|0;C=C+($(b[u+(t+128<<1)>>1]|0,A)|0)|0;C=C+($(b[u+(t+192<<1)>>1]|0,B)|0)|0;C=(C+($(b[u+(t+256<<1)>>1]|0,w)|0)>>6)+32>>6;if(C>>>0>255)C=0-C>>31;a[c+t>>0]=C;t=t+1|0}while((t|0)!=(k|0))}v=v+1|0;if((v|0)==(g|0))break;else{c=c+d|0;u=u+128|0}}i=l;return}function Yc(b,c,e,f,g,h,j,k,l,m,n){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0;m=i;h=h+6|0;l=1<<h>>1;if((g|0)<=0){i=m;return}o=(n|0)>0;j=j<<6;q=0;while(1){if(o){p=0;do{r=(($(j,d[e+p>>0]|0)|0)+l>>h)+k|0;if(r>>>0>255)r=0-r>>31;a[b+p>>0]=r;p=p+1|0}while((p|0)!=(n|0))}q=q+1|0;if((q|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=m;return}function Zc(b,c,e,f,g,h,j,k,l,m,n){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0;m=i;w=l+ -1|0;l=2840+(w<<4)|0;s=h+6|0;p=1<<s>>1;if((g|0)<=0){i=m;return}o=(n|0)>0;v=2841+(w<<4)|0;u=2842+(w<<4)|0;t=2843+(w<<4)|0;h=2844+(w<<4)|0;r=2845+(w<<4)|0;q=2846+(w<<4)|0;w=2847+(w<<4)|0;x=0;while(1){if(o){C=a[l>>0]|0;y=a[v>>0]|0;E=a[u>>0]|0;F=a[t>>0]|0;G=a[h>>0]|0;H=a[r>>0]|0;D=a[q>>0]|0;z=a[w>>0]|0;A=0;do{I=$(d[e+(A+ -3)>>0]|0,C)|0;I=($(d[e+(A+ -2)>>0]|0,y)|0)+I|0;I=I+($(d[e+(A+ -1)>>0]|0,E)|0)|0;I=I+($(d[e+A>>0]|0,F)|0)|0;B=A;A=A+1|0;I=I+($(d[e+A>>0]|0,G)|0)|0;I=I+($(d[e+(B+2)>>0]|0,H)|0)|0;I=I+($(d[e+(B+3)>>0]|0,D)|0)|0;I=(($(I+($(d[e+(B+4)>>0]|0,z)|0)|0,j)|0)+p>>s)+k|0;if(I>>>0>255)I=0-I>>31;a[b+B>>0]=I}while((A|0)!=(n|0))}x=x+1|0;if((x|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=m;return}function _c(b,c,e,f,g,h,j,k,l,m,n){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0;l=i;y=m+ -1|0;m=2840+(y<<4)|0;u=h+6|0;p=1<<u>>1;if((g|0)<=0){i=l;return}o=(n|0)>0;v=f*3|0;x=2841+(y<<4)|0;w=f<<1;h=2842+(y<<4)|0;t=2843+(y<<4)|0;s=2844+(y<<4)|0;r=2845+(y<<4)|0;q=2846+(y<<4)|0;z=2847+(y<<4)|0;y=f<<2;G=0;while(1){if(o){D=a[m>>0]|0;I=a[x>>0]|0;J=a[h>>0]|0;H=a[t>>0]|0;E=a[s>>0]|0;C=a[r>>0]|0;B=a[q>>0]|0;A=a[z>>0]|0;F=0;do{K=$(d[e+(F-v)>>0]|0,D)|0;K=($(d[e+(F-w)>>0]|0,I)|0)+K|0;K=K+($(d[e+(F-f)>>0]|0,J)|0)|0;K=K+($(d[e+F>>0]|0,H)|0)|0;K=K+($(d[e+(F+f)>>0]|0,E)|0)|0;K=K+($(d[e+(F+w)>>0]|0,C)|0)|0;K=K+($(d[e+(F+v)>>0]|0,B)|0)|0;K=(($(K+($(d[e+(F+y)>>0]|0,A)|0)|0,j)|0)+p>>u)+k|0;if(K>>>0>255)K=0-K>>31;a[b+F>>0]=K;F=F+1|0}while((F|0)!=(n|0))}G=G+1|0;if((G|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=l;return}function $c(c,d,e,f,g,h,j,k,l,m,n){c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0;o=i;i=i+9088|0;q=o;p=h+6|0;h=1<<p>>1;z=l+ -1|0;l=2840+(z<<4)|0;s=g+7|0;if((s|0)>0){x=(n|0)>0;r=2841+(z<<4)|0;y=2842+(z<<4)|0;t=2843+(z<<4)|0;u=2844+(z<<4)|0;v=2845+(z<<4)|0;w=2846+(z<<4)|0;z=2847+(z<<4)|0;D=e+($(f,-3)|0)|0;E=q;F=0;while(1){if(x){J=a[l>>0]|0;I=a[r>>0]|0;K=a[y>>0]|0;L=a[t>>0]|0;B=a[u>>0]|0;N=a[v>>0]|0;O=a[w>>0]|0;P=a[z>>0]|0;C=a[D+ -2>>0]|0;M=a[D+ -1>>0]|0;e=a[D>>0]|0;G=a[D+1>>0]|0;H=a[D+2>>0]|0;Q=a[D+3>>0]|0;R=a[D+ -3>>0]|0;A=0;while(1){S=($(C&255,I)|0)+($(R&255,J)|0)+($(M&255,K)|0)+($(e&255,L)|0)+($(G&255,B)|0)+($(H&255,N)|0)+($(Q&255,O)|0)|0;R=a[D+(A+4)>>0]|0;b[E+(A<<1)>>1]=S+($(R&255,P)|0);A=A+1|0;if((A|0)==(n|0))break;else{W=Q;V=H;U=G;T=e;S=M;Q=R;R=C;H=W;G=V;e=U;M=T;C=S}}}F=F+1|0;if((F|0)==(s|0))break;else{D=D+f|0;E=E+128|0}}}w=m+ -1|0;r=2840+(w<<4)|0;if((g|0)<=0){i=o;return}l=(n|0)>0;s=2841+(w<<4)|0;t=2842+(w<<4)|0;u=2843+(w<<4)|0;v=2844+(w<<4)|0;m=2845+(w<<4)|0;f=2846+(w<<4)|0;w=2847+(w<<4)|0;z=q+384|0;q=0;while(1){if(l){A=a[r>>0]|0;B=a[s>>0]|0;C=a[t>>0]|0;D=a[u>>0]|0;E=a[v>>0]|0;F=a[m>>0]|0;y=a[f>>0]|0;x=a[w>>0]|0;e=0;do{G=$(b[z+(e+ -192<<1)>>1]|0,A)|0;G=($(b[z+(e+ -128<<1)>>1]|0,B)|0)+G|0;G=G+($(b[z+(e+ -64<<1)>>1]|0,C)|0)|0;G=G+($(b[z+(e<<1)>>1]|0,D)|0)|0;G=G+($(b[z+(e+64<<1)>>1]|0,E)|0)|0;G=G+($(b[z+(e+128<<1)>>1]|0,F)|0)|0;G=G+($(b[z+(e+192<<1)>>1]|0,y)|0)|0;G=(($(G+($(b[z+(e+256<<1)>>1]|0,x)|0)>>6,j)|0)+h>>p)+k|0;if(G>>>0>255)G=0-G>>31;a[c+e>>0]=G;e=e+1|0}while((e|0)!=(n|0))}q=q+1|0;if((q|0)==(g|0))break;else{c=c+d|0;z=z+128|0}}i=o;return}function ad(b,c,e,f,g,h,j,k){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;j=i;o=h+ -1|0;h=2808+(o<<2)|0;if((g|0)<=0){i=j;return}l=(k|0)>0;n=2809+(o<<2)|0;m=2810+(o<<2)|0;o=2811+(o<<2)|0;s=0;while(1){if(l){p=a[h>>0]|0;u=a[n>>0]|0;v=a[m>>0]|0;t=a[o>>0]|0;r=0;do{y=$(d[e+(r+ -1)>>0]|0,p)|0;x=$(d[e+r>>0]|0,u)|0;q=r;r=r+1|0;w=$(d[e+r>>0]|0,v)|0;w=y+32+x+w+($(d[e+(q+2)>>0]|0,t)|0)>>6;if(w>>>0>255)w=0-w>>31;a[b+q>>0]=w}while((r|0)!=(k|0))}s=s+1|0;if((s|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=j;return}function bd(b,c,e,f,g,h,j,k){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;h=i;o=j+ -1|0;j=2808+(o<<2)|0;if((g|0)<=0){i=h;return}l=(k|0)>0;n=2809+(o<<2)|0;m=2810+(o<<2)|0;p=2811+(o<<2)|0;o=f<<1;t=0;while(1){if(l){u=a[j>>0]|0;v=a[n>>0]|0;s=a[m>>0]|0;r=a[p>>0]|0;q=0;do{y=$(d[e+(q-f)>>0]|0,u)|0;x=$(d[e+q>>0]|0,v)|0;w=$(d[e+(q+f)>>0]|0,s)|0;w=y+32+x+w+($(d[e+(q+o)>>0]|0,r)|0)>>6;if(w>>>0>255)w=0-w>>31;a[b+q>>0]=w;q=q+1|0}while((q|0)!=(k|0))}t=t+1|0;if((t|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=h;return}function cd(c,d,e,f,g,h,j,k){c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0;l=i;i=i+8576|0;m=l;r=h+ -1|0;h=2808+(r<<2)|0;n=g+3|0;if((n|0)>0){q=(k|0)>0;o=2809+(r<<2)|0;p=2810+(r<<2)|0;r=2811+(r<<2)|0;u=e+(0-f)|0;v=m;w=0;while(1){if(q){A=a[h>>0]|0;e=a[o>>0]|0;y=a[p>>0]|0;x=a[r>>0]|0;B=a[u+ -1>>0]|0;s=a[u>>0]|0;t=a[u+1>>0]|0;z=0;while(1){C=($(s&255,e)|0)+($(B&255,A)|0)+($(t&255,y)|0)|0;B=a[u+(z+2)>>0]|0;b[v+(z<<1)>>1]=C+($(B&255,x)|0);z=z+1|0;if((z|0)==(k|0))break;else{D=t;C=s;t=B;s=D;B=C}}}w=w+1|0;if((w|0)==(n|0))break;else{u=u+f|0;v=v+128|0}}}o=j+ -1|0;f=2808+(o<<2)|0;if((g|0)<=0){i=l;return}j=(k|0)>0;h=2809+(o<<2)|0;n=2810+(o<<2)|0;o=2811+(o<<2)|0;q=m+128|0;r=0;while(1){if(j){e=a[f>>0]|0;s=a[h>>0]|0;t=a[n>>0]|0;p=a[o>>0]|0;m=0;do{u=$(b[q+(m+ -64<<1)>>1]|0,e)|0;u=($(b[q+(m<<1)>>1]|0,s)|0)+u|0;u=u+($(b[q+(m+64<<1)>>1]|0,t)|0)|0;u=(u+($(b[q+(m+128<<1)>>1]|0,p)|0)>>6)+32>>6;if(u>>>0>255)u=0-u>>31;a[c+m>>0]=u;m=m+1|0}while((m|0)!=(k|0))}r=r+1|0;if((r|0)==(g|0))break;else{c=c+d|0;q=q+128|0}}i=l;return}function dd(b,c,e,f,g,h,j,k,l,m,n){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;m=i;s=l+ -1|0;l=2808+(s<<2)|0;h=h+6|0;o=1<<h>>1;if((g|0)<=0){i=m;return}r=(n|0)>0;p=2809+(s<<2)|0;q=2810+(s<<2)|0;s=2811+(s<<2)|0;x=0;while(1){if(r){y=a[l>>0]|0;t=a[p>>0]|0;u=a[q>>0]|0;z=a[s>>0]|0;w=0;do{A=$(d[e+(w+ -1)>>0]|0,y)|0;A=($(d[e+w>>0]|0,t)|0)+A|0;v=w;w=w+1|0;A=A+($(d[e+w>>0]|0,u)|0)|0;A=(($(A+($(d[e+(v+2)>>0]|0,z)|0)|0,j)|0)+o>>h)+k|0;if(A>>>0>255)A=0-A>>31;a[b+v>>0]=A}while((w|0)!=(n|0))}x=x+1|0;if((x|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=m;return}function ed(b,c,e,f,g,h,j,k,l,m,n){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;l=i;s=m+ -1|0;m=2808+(s<<2)|0;h=h+6|0;o=1<<h>>1;if((g|0)<=0){i=l;return}p=(n|0)>0;q=2809+(s<<2)|0;r=2810+(s<<2)|0;s=2811+(s<<2)|0;t=f<<1;y=0;while(1){if(p){x=a[m>>0]|0;u=a[q>>0]|0;z=a[r>>0]|0;w=a[s>>0]|0;v=0;do{A=$(d[e+(v-f)>>0]|0,x)|0;A=($(d[e+v>>0]|0,u)|0)+A|0;A=A+($(d[e+(v+f)>>0]|0,z)|0)|0;A=(($(A+($(d[e+(v+t)>>0]|0,w)|0)|0,j)|0)+o>>h)+k|0;if(A>>>0>255)A=0-A>>31;a[b+v>>0]=A;v=v+1|0}while((v|0)!=(n|0))}y=y+1|0;if((y|0)==(g|0))break;else{b=b+c|0;e=e+f|0}}i=l;return}function fd(c,d,e,f,g,h,j,k,l,m,n){c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0;o=i;i=i+8576|0;p=o;v=l+ -1|0;q=2808+(v<<2)|0;h=h+6|0;l=1<<h>>1;t=g+3|0;if((t|0)>0){u=(n|0)>0;s=2809+(v<<2)|0;r=2810+(v<<2)|0;v=2811+(v<<2)|0;A=e+(0-f)|0;w=p;x=0;while(1){if(u){B=a[q>>0]|0;E=a[s>>0]|0;C=a[r>>0]|0;D=a[v>>0]|0;F=a[A+ -1>>0]|0;y=a[A>>0]|0;z=a[A+1>>0]|0;e=0;while(1){G=($(y&255,E)|0)+($(F&255,B)|0)+($(z&255,C)|0)|0;F=a[A+(e+2)>>0]|0;b[w+(e<<1)>>1]=G+($(F&255,D)|0);e=e+1|0;if((e|0)==(n|0))break;else{H=z;G=y;z=F;y=H;F=G}}}x=x+1|0;if((x|0)==(t|0))break;else{A=A+f|0;w=w+128|0}}}s=m+ -1|0;m=2808+(s<<2)|0;if((g|0)<=0){i=o;return}r=(n|0)>0;q=2809+(s<<2)|0;f=2810+(s<<2)|0;s=2811+(s<<2)|0;u=p+128|0;p=0;while(1){if(r){e=a[m>>0]|0;w=a[q>>0]|0;x=a[f>>0]|0;v=a[s>>0]|0;t=0;do{y=$(b[u+(t+ -64<<1)>>1]|0,e)|0;y=($(b[u+(t<<1)>>1]|0,w)|0)+y|0;y=y+($(b[u+(t+64<<1)>>1]|0,x)|0)|0;y=(($(y+($(b[u+(t+128<<1)>>1]|0,v)|0)>>6,j)|0)+l>>h)+k|0;if(y>>>0>255)y=0-y>>31;a[c+t>>0]=y;t=t+1|0}while((t|0)!=(n|0))}p=p+1|0;if((p|0)==(g|0))break;else{c=c+d|0;u=u+128|0}}i=o;return}function gd(e,f,g,h,j,k,l,m,n){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0;o=i;i=i+128|0;k=o;q=k+0|0;p=q+128|0;do{c[q>>2]=0;q=q+4|0}while((q|0)<(p|0));q=d[j+n+96>>0]|0;c[k+((q&31)<<2)>>2]=b[j+(n*10|0)+114>>1];c[k+((q+1&31)<<2)>>2]=b[j+(n*10|0)+116>>1];c[k+((q+2&31)<<2)>>2]=b[j+(n*10|0)+118>>1];c[k+((q+3&31)<<2)>>2]=b[j+(n*10|0)+120>>1];if((m|0)<=0){i=o;return}n=(l|0)>0;j=0;while(1){if(n){p=0;do{q=d[f+p>>0]|0;q=q+(c[k+(q>>>3<<2)>>2]|0)|0;if(q>>>0>255)q=0-q>>31;a[e+p>>0]=q;p=p+1|0}while((p|0)!=(l|0))}j=j+1|0;if((j|0)==(m|0))break;else{e=e+g|0;f=f+h|0}}i=o;return}function hd(e,f,g,h,j,k,l,m,n,o,p,q){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;var r=0,s=0,t=0,u=0;p=i;o=j+(n*10|0)+112|0;r=c[j+(n<<2)+100>>2]|0;if((r|0)!=1){if(c[k>>2]|0){q=b[o>>1]|0;if((m|0)>0){s=0;do{t=(d[f+($(s,h)|0)>>0]|0)+q|0;if(t>>>0>255)t=0-t>>31;a[e+($(s,g)|0)>>0]=t;s=s+1|0}while((s|0)!=(m|0));q=1}else q=1}else q=0;if(c[k+8>>2]|0){s=b[o>>1]|0;l=l+ -1|0;if((m|0)>0){t=0;do{u=(d[f+(($(t,h)|0)+l)>>0]|0)+s|0;if(u>>>0>255)u=0-u>>31;a[e+(($(t,g)|0)+l)>>0]=u;t=t+1|0}while((t|0)!=(m|0))}}if(!r){s=m;t=q;u=0;r=l;pd(e,f,g,h,j,r,s,n,t,u);i=p;return}}else q=0;if(c[k+4>>2]|0){r=b[o>>1]|0;if((q|0)<(l|0)){s=q;do{t=(d[f+s>>0]|0)+r|0;if(t>>>0>255)t=0-t>>31;a[e+s>>0]=t;s=s+1|0}while((s|0)!=(l|0));r=1}else r=1}else r=0;if(!(c[k+12>>2]|0)){s=m;t=q;u=r;r=l;pd(e,f,g,h,j,r,s,n,t,u);i=p;return}k=b[o>>1]|0;o=m+ -1|0;t=$(o,g)|0;m=$(o,h)|0;if((q|0)<(l|0))s=q;else{s=o;t=q;u=r;r=l;pd(e,f,g,h,j,r,s,n,t,u);i=p;return}do{u=(d[f+(s+m)>>0]|0)+k|0;if(u>>>0>255)u=0-u>>31;a[e+(s+t)>>0]=u;s=s+1|0}while((s|0)!=(l|0));pd(e,f,g,h,j,l,o,n,q,r);i=p;return}function id(e,f,g,h,j,k,l,m,n,o,p,q){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;p=p|0;q=q|0;var r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0;s=i;C=j+(n*10|0)+112|0;B=c[j+(n<<2)+100>>2]|0;A=(B|0)!=1;if(A){if(c[k>>2]|0){D=b[C>>1]|0;if((m|0)>0){E=0;do{G=(d[f+($(E,h)|0)>>0]|0)+D|0;if(G>>>0>255)G=0-G>>31;a[e+($(E,g)|0)>>0]=G;E=E+1|0}while((E|0)!=(m|0));D=1}else D=1}else D=0;if(c[k+8>>2]|0){E=b[C>>1]|0;l=l+ -1|0;if((m|0)>0){G=0;do{H=(d[f+(($(G,h)|0)+l)>>0]|0)+E|0;if(H>>>0>255)H=0-H>>31;a[e+(($(G,g)|0)+l)>>0]=H;G=G+1|0}while((G|0)!=(m|0))}}if(!B){C=1;E=0}else F=13}else{D=0;F=13}if((F|0)==13){if(c[k+4>>2]|0){F=b[C>>1]|0;if((D|0)<(l|0)){E=D;do{G=(d[f+E>>0]|0)+F|0;if(G>>>0>255)G=0-G>>31;a[e+E>>0]=G;E=E+1|0}while((E|0)!=(l|0));E=1}else E=1}else E=0;if(c[k+12>>2]|0){C=b[C>>1]|0;m=m+ -1|0;G=$(m,g)|0;H=$(m,h)|0;if((D|0)<(l|0)){F=D;do{I=(d[f+(F+H)>>0]|0)+C|0;if(I>>>0>255)I=0-I>>31;a[e+(F+G)>>0]=I;F=F+1|0}while((F|0)!=(l|0));C=0}else C=0}else C=0}pd(e,f,g,h,j,l,m,n,D,E);j=(B|0)==2;if((a[q>>0]|0)==0&j?(c[k>>2]|0)==0:0)n=(c[k+4>>2]|0)==0;else n=0;H=n&1;n=q+1|0;B=(B|0)==3;if((a[n>>0]|0)==0&B?(c[k+4>>2]|0)==0:0)F=(c[k+8>>2]|0)==0;else F=0;J=F&1;F=q+2|0;if((a[F>>0]|0)==0&j?(c[k+8>>2]|0)==0:0)G=(c[k+12>>2]|0)==0;else G=0;I=G&1;G=q+3|0;if((a[G>>0]|0)==0&B?(c[k>>2]|0)==0:0)k=(c[k+12>>2]|0)==0;else k=0;k=k&1;A=A^1;if(!((a[o>>0]|0)==0|A)?(z=H+E|0,y=m-k|0,(z|0)<(y|0)):0)do{a[e+($(z,g)|0)>>0]=a[f+($(z,h)|0)>>0]|0;z=z+1|0}while((z|0)!=(y|0));if(!((a[o+1>>0]|0)==0|A)?(x=J+E|0,w=m-I|0,(x|0)<(w|0)):0){o=l+ -1|0;do{a[e+(o+($(x,g)|0))>>0]=a[f+(o+($(x,h)|0))>>0]|0;x=x+1|0}while((x|0)!=(w|0))}if(!((a[p>>0]|0)==0|C)?(v=H+D|0,u=l-J|0,(v|0)<(u|0)):0)do{a[e+v>>0]=a[f+v>>0]|0;v=v+1|0}while((v|0)!=(u|0));if(!((a[p+1>>0]|0)==0|C)?(t=k+D|0,r=l-I|0,(t|0)<(r|0)):0){u=m+ -1|0;p=$(u,h)|0;u=$(u,g)|0;do{a[e+(t+u)>>0]=a[f+(t+p)>>0]|0;t=t+1|0}while((t|0)!=(r|0))}if((a[q>>0]|0)!=0&j)a[e>>0]=a[f>>0]|0;if((a[n>>0]|0)!=0&B){J=l+ -1|0;a[e+J>>0]=a[f+J>>0]|0}if((a[F>>0]|0)!=0&j){J=m+ -1|0;I=l+ -1|0;a[e+(I+($(J,g)|0))>>0]=a[f+(I+($(J,h)|0))>>0]|0}if(!((a[G>>0]|0)!=0&B)){i=s;return}J=m+ -1|0;a[e+($(J,g)|0)>>0]=a[f+($(J,h)|0)>>0]|0;i=s;return}function jd(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;od(a,b,1,c,d,e,f);i=g;return}function kd(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;var g=0;g=i;od(a,1,b,c,d,e,f);i=g;return}function ld(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;var f=0;f=i;nd(a,b,1,c,d,e);i=f;return}function md(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;var f=0;f=i;nd(a,1,b,c,d,e);i=f;return}function nd(b,e,f,g,h,j){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;m=i;l=$(e,-2)|0;k=0-e|0;n=0;while(1){p=c[g+(n<<2)>>2]|0;if((p|0)>=1){s=0-p|0;r=(a[h+n>>0]|0)==0;q=(a[j+n>>0]|0)==0;o=0;t=b;while(1){v=t+k|0;x=d[v>>0]|0;u=d[t>>0]|0;w=(d[t+l>>0]|0)+4-(d[t+e>>0]|0)+(u-x<<2)>>3;if((w|0)<(s|0))w=s;else w=(w|0)>(p|0)?p:w;if(r){x=w+x|0;if(x>>>0>255)x=0-x>>31;a[v>>0]=x}if(q){u=u-w|0;if(u>>>0>255)u=0-u>>31;a[t>>0]=u}o=o+1|0;if((o|0)==4)break;else t=t+f|0}}n=n+1|0;if((n|0)==2)break;else b=b+(f<<2)|0}i=m;return}function od(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0;t=i;o=$(e,-3)|0;p=$(e,-2)|0;q=0-e|0;r=e<<1;D=f*3|0;B=D+o|0;C=D+p|0;A=D-e|0;E=D+r|0;l=D+e|0;y=g>>3;v=g>>2;n=$(e,-4)|0;s=e*3|0;w=D+n|0;x=(f+e|0)*3|0;u=(g>>1)+g>>3;z=f<<2;m=f<<2;F=0;do{U=a[b+o>>0]|0;T=a[b+p>>0]|0;S=a[b+q>>0]|0;J=S&255;N=(U&255)-((T&255)<<1)+J|0;N=(N|0)>-1?N:0-N|0;P=a[b+r>>0]|0;Q=a[b+e>>0]|0;R=a[b>>0]|0;X=R&255;V=(P&255)-((Q&255)<<1)+X|0;V=(V|0)>-1?V:0-V|0;_=d[b+A>>0]|0;M=(d[b+B>>0]|0)-((d[b+C>>0]|0)<<1)+_|0;M=(M|0)>-1?M:0-M|0;Y=d[b+D>>0]|0;O=(d[b+E>>0]|0)-((d[b+l>>0]|0)<<1)+Y|0;O=(O|0)>-1?O:0-O|0;H=V+N|0;I=O+M|0;G=c[h+(F<<2)>>2]|0;W=a[j+F>>0]|0;L=a[k+F>>0]|0;do if((I+H|0)<(g|0)){Z=(G*5|0)+1>>1;aa=(d[b+n>>0]|0)-J|0;K=a[b+s>>0]|0;ba=(K&255)-X|0;if((((((((ba|0)>-1?ba:0-ba|0)+((aa|0)>-1?aa:0-aa|0)|0)<(y|0)?(ba=J-X|0,(((ba|0)>-1?ba:0-ba|0)|0)<(Z|0)):0)?(ba=(d[b+w>>0]|0)-_|0,aa=(d[b+x>>0]|0)-Y|0,(((aa|0)>-1?aa:0-aa|0)+((ba|0)>-1?ba:0-ba|0)|0)<(y|0)):0)?(ba=_-Y|0,(((ba|0)>-1?ba:0-ba|0)|0)<(Z|0)):0)?(H<<1|0)<(v|0):0)?(I<<1|0)<(v|0):0){G=G<<1;H=W<<24>>24==0;I=0-G|0;J=L<<24>>24==0;Y=K;K=1;L=b;while(1){V=L+o|0;U=U&255;W=L+p|0;T=T&255;X=L+q|0;N=S&255;R=R&255;S=L+e|0;Q=Q&255;M=L+r|0;O=P&255;P=Y&255;if(H){Y=d[L+n>>0]|0;Z=(U+4+Q+(N+T+R<<1)>>3)-N|0;if((Z|0)<(I|0))Z=I;else Z=(Z|0)>(G|0)?G:Z;a[X>>0]=Z+N;X=((U+2+T+N+R|0)>>>2)-T|0;if((X|0)<(I|0))X=I;else X=(X|0)>(G|0)?G:X;a[W>>0]=X+T;W=((U*3|0)+4+T+N+R+(Y<<1)>>3)-U|0;if((W|0)<(I|0))W=I;else W=(W|0)>(G|0)?G:W;a[V>>0]=W+U}if(J){T=(T+4+O+(R+N+Q<<1)>>3)-R|0;if((T|0)<(I|0))T=I;else T=(T|0)>(G|0)?G:T;a[L>>0]=T+R;T=((N+2+R+Q+O|0)>>>2)-Q|0;if((T|0)<(I|0))T=I;else T=(T|0)>(G|0)?G:T;a[S>>0]=T+Q;N=(N+4+R+Q+(O*3|0)+(P<<1)>>3)-O|0;if((N|0)<(I|0))N=I;else N=(N|0)>(G|0)?G:N;a[M>>0]=N+O}M=L+f|0;if((K|0)==4)break;U=a[L+(o+f)>>0]|0;T=a[L+(p+f)>>0]|0;S=a[L+(f-e)>>0]|0;R=a[M>>0]|0;Q=a[L+(f+e)>>0]|0;P=a[L+(r+f)>>0]|0;Y=a[L+(s+f)>>0]|0;K=K+1|0;L=M}b=b+m|0;break}H=G>>1;I=G*10|0;J=0-G|0;K=W<<24>>24!=0;L=L<<24>>24!=0;M=(M+N|0)<(u|0)&(K^1);N=0-H|0;O=(O+V|0)<(u|0)&(L^1);V=T;W=R;Y=Q;Q=1;R=b;while(1){X=U&255;T=R+p|0;V=V&255;_=R+q|0;Z=S&255;W=W&255;S=R+e|0;U=Y&255;P=P&255;Y=((W-Z|0)*9|0)+8+($(U-V|0,-3)|0)>>4;if((((Y|0)>-1?Y:0-Y|0)|0)<(I|0)){if((Y|0)<(J|0))Y=J;else Y=(Y|0)>(G|0)?G:Y;if(!K){aa=Y+Z|0;if(aa>>>0>255)aa=0-aa>>31;a[_>>0]=aa}if(!L){_=W-Y|0;if(_>>>0>255)_=0-_>>31;a[R>>0]=_}if(M){X=((X+1+Z|0)>>>1)-V+Y>>1;if((X|0)<(N|0))X=N;else X=(X|0)>(H|0)?H:X;V=X+V|0;if(V>>>0>255)V=0-V>>31;a[T>>0]=V}if(O){P=((W+1+P|0)>>>1)-U-Y>>1;if((P|0)<(N|0))P=N;else P=(P|0)>(H|0)?H:P;P=P+U|0;if(P>>>0>255)P=0-P>>31;a[S>>0]=P}}T=R+f|0;if((Q|0)==4)break;U=a[R+(o+f)>>0]|0;V=a[R+(p+f)>>0]|0;S=a[R+(f-e)>>0]|0;W=a[T>>0]|0;Y=a[R+(f+e)>>0]|0;P=a[R+(r+f)>>0]|0;Q=Q+1|0;R=T}b=b+m|0}else b=b+z|0;while(0);F=F+1|0}while((F|0)!=2);i=t;return}function pd(e,f,g,h,j,k,l,m,n,o){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;var p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0;t=i;v=c[j+(m<<2)+100>>2]|0;q=a[2896+(v<<2)>>0]|0;r=a[2898+(v<<2)>>0]|0;if((o|0)>=(l|0)){i=t;return}u=(n|0)<(k|0);s=o;p=$((a[2897+(v<<2)>>0]|0)+o|0,h)|0;v=$((a[2899+(v<<2)>>0]|0)+o|0,h)|0;w=$(o,g)|0;o=$(o,h)|0;while(1){if(u){y=p+q|0;x=v+r|0;z=n;do{A=a[f+(z+o)>>0]|0;B=a[f+(y+z)>>0]|0;if((A&255)>(B&255))B=3;else B=((A<<24>>24!=B<<24>>24)<<31>>31)+2|0;C=a[f+(x+z)>>0]|0;if((A&255)>(C&255))C=1;else C=(A<<24>>24!=C<<24>>24)<<31>>31;A=(b[j+(m*10|0)+(d[2888+(C+B)>>0]<<1)+112>>1]|0)+(A&255)|0;if(A>>>0>255)A=0-A>>31;a[e+(z+w)>>0]=A;z=z+1|0}while((z|0)!=(k|0))}s=s+1|0;if((s|0)==(l|0))break;else{p=p+h|0;v=v+h|0;w=w+g|0;o=o+h|0}}i=t;return}function qd(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0;j=i;k=c[b+136>>2]|0;l=(c[b+200>>2]|0)+13080|0;r=(1<<c[l>>2])+ -1|0;o=r&e;n=r&f;q=(n|0)!=0|(a[k+309>>0]|0)!=0;m=q&1;c[k+31296>>2]=m;p=(o|0)!=0|(a[k+308>>0]|0)!=0;b=p&1;c[k+31292>>2]=b;if(!(r&(f|e)))p=d[k+311>>0]|0;else p=p&q&1;c[k+31300>>2]=p;if((o+g|0)==(1<<c[l>>2]|0))m=(a[k+310>>0]|0)!=0&(n|0)==0&1;c[k+31308>>2]=m;if(!m){q=0;q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}q=(g+e|0)<(c[k+312>>2]|0);q=q&1;r=k+31304|0;c[r>>2]=q;r=h+f|0;q=k+316|0;q=c[q>>2]|0;q=(r|0)<(q|0);q=q?b:0;r=k+31288|0;c[r>>2]=q;i=j;return}function rd(e,f,g,h,j,k,l,m,n){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ta=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ba=0,Ca=0;o=i;i=i+80|0;t=o+4|0;s=o;xa=o+11&-4;p=xa;q=e+136|0;r=c[q>>2]|0;G=e+204|0;if((1<<k|0)==8?(c[(c[G>>2]|0)+1620>>2]|0)>2:0){v=8;u=8;l=0;Aa=1;f=c[r+31236>>2]|0;g=c[r+31240>>2]|0}else{v=j;u=h;Aa=0}qd(e,f,g,u,v);ya=c[q>>2]|0;A=c[e+3508>>2]|0;k=c[A+20>>2]|0;A=c[A+16>>2]|0;Z=e+200|0;Ba=c[Z>>2]|0;V=c[Ba+13156>>2]|0;la=c[ya+31288>>2]|0;za=c[ya+31292>>2]|0;_=c[ya+31300>>2]|0;wa=c[ya+31296>>2]|0;ta=c[ya+31308>>2]|0;X=f+ -1|0;ma=g+v|0;W=ma+ -1|0;ua=f+u|0;S=ua+ -1|0;Y=g+ -1|0;q=e+2428|0;r=c[e+3036>>2]|0;if((c[q>>2]|0)!=1){Ca=c[e+3040>>2]|0;r=r>>>0>Ca>>>0?Ca:r}l=(Aa|0)==0&(l|0)==1;if(l?(va=c[ya+31248>>2]|0,(va|0)==7|(va|0)==6|(va|0)==2):0){l=va;xa=0;ya=0;va=14}else va=7;do if((va|0)==7){Aa=c[(c[G>>2]|0)+1620>>2]&255;if((X>>Aa|0)==(f>>Aa|0))Aa=(W>>Aa|0)==(g>>Aa|0);else Aa=0;if(!(Aa|(za|0)==0)){za=c[Ba+13084>>2]|0;za=($(W>>za,V)|0)+(X>>za)|0;Ca=(a[A+(za*12|0)+10>>0]|0)!=0;Aa=Ca&1;if(Ca){Ba=A+(za*12|0)|0;Ca=xa;c[Ca+0>>2]=c[Ba+0>>2];c[Ca+4>>2]=c[Ba+4>>2];c[Ca+8>>2]=c[Ba+8>>2];if(!m)break;else{xa=Aa;va=1}}else{xa=Aa;va=0}}else{xa=0;va=0}if(l){l=c[ya+31248>>2]|0;ya=va;va=14}else{ya=va;va=15}}while(0);if((va|0)==14)if((l|0)==5|(l|0)==4|(l|0)==1){wa=0;va=30}else va=15;a:do if((va|0)==15){l=c[(c[G>>2]|0)+1620>>2]&255;if((S>>l|0)==(f>>l|0))l=(Y>>l|0)==(g>>l|0);else l=0;if(!(l|(wa|0)==0)){za=c[(c[Z>>2]|0)+13084>>2]|0;Aa=($(Y>>za,V)|0)+(S>>za)|0;Ba=a[A+(Aa*12|0)+10>>0]|0;Ca=Ba<<24>>24!=0;wa=Ca&1;if(Ca){l=A+(Aa*12|0)|0;do if((xa|0)!=0?(Ca=($(W>>za,V)|0)+(X>>za)|0,oa=A+(Ca*12|0)|0,na=d[l>>0]|d[l+1>>0]<<8|d[l+2>>0]<<16|d[l+3>>0]<<24,qa=A+(Aa*12|0)+4|0,qa=d[qa>>0]|d[qa+1>>0]<<8|d[qa+2>>0]<<16|d[qa+3>>0]<<24,sa=A+(Aa*12|0)+8|0,sa=d[sa>>0]|d[sa+1>>0]<<8,oa=d[oa>>0]|d[oa+1>>0]<<8|d[oa+2>>0]<<16|d[oa+3>>0]<<24,pa=A+(Ca*12|0)+4|0,pa=d[pa>>0]|d[pa+1>>0]<<8|d[pa+2>>0]<<16|d[pa+3>>0]<<24,ra=A+(Ca*12|0)+8|0,ra=d[ra>>0]|d[ra+1>>0]<<8,Ba<<24>>24==(a[A+(Ca*12|0)+10>>0]|0)):0){if(Ba<<24>>24==1)na=((na|0)==(oa|0)?(sa&255)<<24>>24==(ra&255)<<24>>24:0)&1;else if(Ba<<24>>24==3){if((sa&255)<<24>>24==(ra&255)<<24>>24)na=((na|0)==(oa|0)?((sa&65535)>>>8&255)<<24>>24==((ra&65535)>>>8&255)<<24>>24:0)&(qa|0)==(pa|0);else na=0;na=na&1}else if(Ba<<24>>24==2)na=((qa|0)==(pa|0)?((sa&65535)>>>8&255)<<24>>24==((ra&65535)>>>8&255)<<24>>24:0)&1;else break;if(na){va=30;break a}}while(0);Ca=p+(ya*12|0)|0;c[Ca+0>>2]=c[l+0>>2];c[Ca+4>>2]=c[l+4>>2];c[Ca+8>>2]=c[l+8>>2];if((ya|0)!=(m|0)){ya=ya+1|0;va=30}}else va=30}else{wa=0;va=30}}while(0);b:do if((va|0)==30){c:do if(((ta|0)!=0?(ka=c[Z>>2]|0,da=c[ka+13084>>2]|0,ba=$(Y>>da,V)|0,ca=ba+(ua>>da)|0,aa=a[A+(ca*12|0)+10>>0]|0,aa<<24>>24!=0):0)?(ua|0)<(c[ka+13120>>2]|0):0){Ca=c[ka+13080>>2]|0;if(((Y>>Ca|0)>=(g>>Ca|0)?(ua>>Ca|0)>=(f>>Ca|0):0)?(za=c[ka+13072>>2]|0,Aa=c[ka+13164>>2]|0,l=Aa+2|0,Ca=($(g>>za&Aa,l)|0)+(f>>za&Aa)|0,Ba=c[(c[G>>2]|0)+1684>>2]|0,(c[Ba+(($(Y>>za&Aa,l)|0)+(ua>>za&Aa)<<2)>>2]|0)>(c[Ba+(Ca<<2)>>2]|0)):0)break;Ca=c[(c[G>>2]|0)+1620>>2]&255;if((ua>>Ca|0)==(f>>Ca|0)?(Y>>Ca|0)==(g>>Ca|0):0)break;ka=A+(ca*12|0)|0;do if((wa|0)!=0?(Ca=ba+(S>>da)|0,fa=A+(Ca*12|0)|0,ea=d[ka>>0]|d[ka+1>>0]<<8|d[ka+2>>0]<<16|d[ka+3>>0]<<24,ia=A+(ca*12|0)+4|0,ia=d[ia>>0]|d[ia+1>>0]<<8|d[ia+2>>0]<<16|d[ia+3>>0]<<24,ga=A+(ca*12|0)+8|0,ga=d[ga>>0]|d[ga+1>>0]<<8,fa=d[fa>>0]|d[fa+1>>0]<<8|d[fa+2>>0]<<16|d[fa+3>>0]<<24,ja=A+(Ca*12|0)+4|0,ja=d[ja>>0]|d[ja+1>>0]<<8|d[ja+2>>0]<<16|d[ja+3>>0]<<24,ha=A+(Ca*12|0)+8|0,ha=d[ha>>0]|d[ha+1>>0]<<8,aa<<24>>24==(a[A+(Ca*12|0)+10>>0]|0)):0){if(aa<<24>>24==1)aa=((ea|0)==(fa|0)?(ga&255)<<24>>24==(ha&255)<<24>>24:0)&1;else if(aa<<24>>24==2)aa=((ia|0)==(ja|0)?((ga&65535)>>>8&255)<<24>>24==((ha&65535)>>>8&255)<<24>>24:0)&1;else if(aa<<24>>24==3){if((ga&255)<<24>>24==(ha&255)<<24>>24)aa=((ea|0)==(fa|0)?((ga&65535)>>>8&255)<<24>>24==((ha&65535)>>>8&255)<<24>>24:0)&(ia|0)==(ja|0);else aa=0;aa=aa&1}else break;if(aa)break c}while(0);Ca=p+(ya*12|0)|0;c[Ca+0>>2]=c[ka+0>>2];c[Ca+4>>2]=c[ka+4>>2];c[Ca+8>>2]=c[ka+8>>2];if((ya|0)==(m|0))break b;ya=ya+1|0}while(0);d:do if(((la|0)!=0?(U=c[Z>>2]|0,N=c[U+13084>>2]|0,O=X>>N,J=($(ma>>N,V)|0)+O|0,M=a[A+(J*12|0)+10>>0]|0,M<<24>>24!=0):0)?(ma|0)<(c[U+13124>>2]|0):0){Ca=c[U+13080>>2]|0;if(((ma>>Ca|0)>=(g>>Ca|0)?(X>>Ca|0)>=(f>>Ca|0):0)?(za=c[U+13072>>2]|0,Aa=c[U+13164>>2]|0,l=Aa+2|0,Ca=($(g>>za&Aa,l)|0)+(f>>za&Aa)|0,Ba=c[(c[G>>2]|0)+1684>>2]|0,(c[Ba+(($(ma>>za&Aa,l)|0)+(X>>za&Aa)<<2)>>2]|0)>(c[Ba+(Ca<<2)>>2]|0)):0)break;Ca=c[(c[G>>2]|0)+1620>>2]&255;if((X>>Ca|0)==(f>>Ca|0)?(ma>>Ca|0)==(g>>Ca|0):0)break;U=A+(J*12|0)|0;do if((xa|0)!=0?(Ca=($(W>>N,V)|0)+O|0,R=A+(Ca*12|0)|0,Q=d[U>>0]|d[U+1>>0]<<8|d[U+2>>0]<<16|d[U+3>>0]<<24,P=A+(J*12|0)+4|0,P=d[P>>0]|d[P+1>>0]<<8|d[P+2>>0]<<16|d[P+3>>0]<<24,T=A+(J*12|0)+8|0,T=d[T>>0]|d[T+1>>0]<<8,R=d[R>>0]|d[R+1>>0]<<8|d[R+2>>0]<<16|d[R+3>>0]<<24,L=A+(Ca*12|0)+4|0,L=d[L>>0]|d[L+1>>0]<<8|d[L+2>>0]<<16|d[L+3>>0]<<24,K=A+(Ca*12|0)+8|0,K=d[K>>0]|d[K+1>>0]<<8,M<<24>>24==(a[A+(Ca*12|0)+10>>0]|0)):0){if(M<<24>>24==3){if((T&255)<<24>>24==(K&255)<<24>>24)J=((Q|0)==(R|0)?((T&65535)>>>8&255)<<24>>24==((K&65535)>>>8&255)<<24>>24:0)&(P|0)==(L|0);else J=0;J=J&1}else if(M<<24>>24==1)J=((Q|0)==(R|0)?(T&255)<<24>>24==(K&255)<<24>>24:0)&1;else if(M<<24>>24==2)J=((P|0)==(L|0)?((T&65535)>>>8&255)<<24>>24==((K&65535)>>>8&255)<<24>>24:0)&1;else break;if(J)break d}while(0);Ca=p+(ya*12|0)|0;c[Ca+0>>2]=c[U+0>>2];c[Ca+4>>2]=c[U+4>>2];c[Ca+8>>2]=c[U+8>>2];if((ya|0)==(m|0))break b;ya=ya+1|0}while(0);e:do if((_|0)!=0?(z=c[(c[Z>>2]|0)+13084>>2]|0,y=$(Y>>z,V)|0,F=X>>z,x=y+F|0,w=a[A+(x*12|0)+10>>0]|0,w<<24>>24!=0):0){Ca=c[(c[G>>2]|0)+1620>>2]&255;if((X>>Ca|0)==(f>>Ca|0)?(Y>>Ca|0)==(g>>Ca|0):0)break;do if((xa|0)!=0?(E=A+(x*12|0)|0,Ca=($(W>>z,V)|0)+F|0,B=A+(Ca*12|0)|0,E=d[E>>0]|d[E+1>>0]<<8|d[E+2>>0]<<16|d[E+3>>0]<<24,D=A+(x*12|0)+4|0,D=d[D>>0]|d[D+1>>0]<<8|d[D+2>>0]<<16|d[D+3>>0]<<24,I=A+(x*12|0)+8|0,I=d[I>>0]|d[I+1>>0]<<8,B=d[B>>0]|d[B+1>>0]<<8|d[B+2>>0]<<16|d[B+3>>0]<<24,H=A+(Ca*12|0)+4|0,H=d[H>>0]|d[H+1>>0]<<8|d[H+2>>0]<<16|d[H+3>>0]<<24,C=A+(Ca*12|0)+8|0,C=d[C>>0]|d[C+1>>0]<<8,w<<24>>24==(a[A+(Ca*12|0)+10>>0]|0)):0){if(w<<24>>24==3){if((I&255)<<24>>24==(C&255)<<24>>24)B=((E|0)==(B|0)?((I&65535)>>>8&255)<<24>>24==((C&65535)>>>8&255)<<24>>24:0)&(D|0)==(H|0);else B=0;B=B&1}else if(w<<24>>24==1)B=((E|0)==(B|0)?(I&255)<<24>>24==(C&255)<<24>>24:0)&1;else if(w<<24>>24==2)B=((D|0)==(H|0)?((I&65535)>>>8&255)<<24>>24==((C&65535)>>>8&255)<<24>>24:0)&1;else break;if(B)break e}while(0);if(wa){B=A+(x*12|0)|0;Ca=y+(S>>z)|0;C=A+(Ca*12|0)|0;z=d[B>>0]|d[B+1>>0]<<8|d[B+2>>0]<<16|d[B+3>>0]<<24;y=A+(x*12|0)+4|0;y=d[y>>0]|d[y+1>>0]<<8|d[y+2>>0]<<16|d[y+3>>0]<<24;x=A+(x*12|0)+8|0;x=d[x>>0]|d[x+1>>0]<<8;C=d[C>>0]|d[C+1>>0]<<8|d[C+2>>0]<<16|d[C+3>>0]<<24;D=A+(Ca*12|0)+4|0;D=d[D>>0]|d[D+1>>0]<<8|d[D+2>>0]<<16|d[D+3>>0]<<24;E=A+(Ca*12|0)+8|0;E=d[E>>0]|d[E+1>>0]<<8;do if(w<<24>>24==(a[A+(Ca*12|0)+10>>0]|0))if(w<<24>>24==1){w=((z|0)==(C|0)?(x&255)<<24>>24==(E&255)<<24>>24:0)&1;break}else if(w<<24>>24==2){w=((y|0)==(D|0)?((x&65535)>>>8&255)<<24>>24==((E&65535)>>>8&255)<<24>>24:0)&1;break}else if(w<<24>>24==3){if((x&255)<<24>>24==(E&255)<<24>>24)w=((z|0)==(C|0)?((x&65535)>>>8&255)<<24>>24==((E&65535)>>>8&255)<<24>>24:0)&(y|0)==(D|0);else w=0;w=w&1;break}else{w=0;break}else w=0;while(0);if((w|0)!=0|(ya|0)==4)break}else{if((ya|0)==4){ya=4;break}B=A+(x*12|0)|0}Ca=p+(ya*12|0)|0;c[Ca+0>>2]=c[B+0>>2];c[Ca+4>>2]=c[B+4>>2];c[Ca+8>>2]=c[B+8>>2];if((ya|0)==(m|0))break b;ya=ya+1|0}while(0);w=e+3080|0;if((a[e+3035>>0]|0)!=0?ya>>>0<(c[w>>2]|0)>>>0:0){c[t>>2]=0;c[s>>2]=0;x=vd(e,f,g,u,v,0,t,0)|0;if(!(c[q>>2]|0))e=vd(e,f,g,u,v,0,s,1)|0;else e=0;if(e|x){a[p+(ya*12|0)+10>>0]=(e<<1)+x;b[p+(ya*12|0)+8>>1]=0;c[p+(ya*12|0)>>2]=c[t>>2];c[p+(ya*12|0)+4>>2]=c[s>>2];if((ya|0)==(m|0))break;ya=ya+1|0}}e=c[w>>2]|0;f:do if((c[q>>2]|0)==0&(ya|0)>1&ya>>>0<e>>>0){s=$(ya+ -1|0,ya)|0;t=0;while(1){if((t|0)>=(s|0))break f;Ba=d[2912+(t<<1)>>0]|0;Ca=d[2913+(t<<1)>>0]|0;f=c[p+(Ba*12|0)>>2]|0;g=a[p+(Ba*12|0)+8>>0]|0;v=c[p+(Ca*12|0)+4>>2]|0;u=a[p+(Ca*12|0)+9>>0]|0;if(((a[p+(Ba*12|0)+10>>0]&1)!=0?(a[p+(Ca*12|0)+10>>0]&2)!=0:0)?!((f|0)==(v|0)?(c[k+(g<<24>>24<<2)+64>>2]|0)==(c[k+(u<<24>>24<<2)+260>>2]|0):0):0){a[p+(ya*12|0)+8>>0]=g;a[p+(ya*12|0)+9>>0]=u;a[p+(ya*12|0)+10>>0]=3;c[p+(ya*12|0)>>2]=f;c[p+(ya*12|0)+4>>2]=v;if((ya|0)==(m|0))break b;e=c[w>>2]|0;ya=ya+1|0}if(ya>>>0<e>>>0)t=t+1|0;else break}}while(0);if(ya>>>0<e>>>0){k=0;while(1){a[p+(ya*12|0)+10>>0]=((c[q>>2]|0)==0&1)<<1|1;c[p+(ya*12|0)>>2]=0;c[p+(ya*12|0)+4>>2]=0;Ca=(k|0)<(r|0)?k&255:0;a[p+(ya*12|0)+8>>0]=Ca;a[p+(ya*12|0)+9>>0]=Ca;if((ya|0)==(m|0))break b;ya=ya+1|0;if(ya>>>0>=(c[w>>2]|0)>>>0)break b;k=k+1|0}}}while(0);q=p+(m*12|0)|0;p=p+(m*12|0)+10|0;if((a[p>>0]|0)!=3){c[n+0>>2]=c[q+0>>2];c[n+4>>2]=c[q+4>>2];c[n+8>>2]=c[q+8>>2];i=o;return}if((j+h|0)!=12){c[n+0>>2]=c[q+0>>2];c[n+4>>2]=c[q+4>>2];c[n+8>>2]=c[q+8>>2];i=o;return}a[p>>0]=1;c[n+0>>2]=c[q+0>>2];c[n+4>>2]=c[q+4>>2];c[n+8>>2]=c[q+8>>2];i=o;return}function sd(d,e,f,g,h,j,k,l,m,n,o){d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;o=o|0;var p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0;k=i;i=i+32|0;j=k;r=k+16|0;q=k+12|0;l=k+8|0;H=c[d+136>>2]|0;z=c[(c[d+3508>>2]|0)+16>>2]|0;u=d+200|0;F=c[u>>2]|0;A=c[F+13156>>2]|0;x=j;c[x>>2]=0;c[x+4>>2]=0;x=c[H+31292>>2]|0;B=c[H+31300>>2]|0;C=c[H+31296>>2]|0;E=c[H+31308>>2]|0;p=a[m+o+8>>0]|0;s=(o|0)==0&1;t=e+ -1|0;w=h+f|0;if(((c[H+31288>>2]|0)!=0?(H=c[F+13084>>2]|0,(a[z+((($(w>>H,A)|0)+(t>>H)|0)*12|0)+10>>0]|0)!=0):0)?(w|0)<(c[F+13124>>2]|0):0){H=c[F+13080>>2]|0;if((w>>H|0)>=(f>>H|0)?(t>>H|0)>=(e>>H|0):0){I=c[F+13072>>2]|0;D=c[F+13164>>2]|0;J=D+2|0;G=($(f>>I&D,J)|0)+(e>>I&D)|0;H=c[(c[d+204>>2]|0)+1684>>2]|0;G=(c[H+(($(w>>I&D,J)|0)+(t>>I&D)<<2)>>2]|0)<=(c[H+(G<<2)>>2]|0)}else G=1}else G=0;D=w+ -1|0;if(!x)H=0;else{H=c[F+13084>>2]|0;H=(a[z+((($(D>>H,A)|0)+(t>>H)|0)*12|0)+10>>0]|0)!=0}x=G|H;if(G){J=c[F+13084>>2]|0;if((td(d,t>>J,w>>J,o,r,o,p)|0)==0?(J=c[(c[u>>2]|0)+13084>>2]|0,(td(d,t>>J,w>>J,s,r,o,p)|0)==0):0)v=12;else F=1}else v=12;do if((v|0)==12){if(H){J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,t>>J,D>>J,o,r,o,p)|0){F=1;break}J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,t>>J,D>>J,s,r,o,p)|0){F=1;break}}if(G){J=c[(c[u>>2]|0)+13084>>2]|0;if(ud(d,t>>J,w>>J,o,r,o,p)|0){F=1;break}J=c[(c[u>>2]|0)+13084>>2]|0;if(ud(d,t>>J,w>>J,s,r,o,p)|0){F=1;break}}if(H){J=c[(c[u>>2]|0)+13084>>2]|0;if(ud(d,t>>J,D>>J,o,r,o,p)|0){F=1;break}J=c[(c[u>>2]|0)+13084>>2]|0;if(ud(d,t>>J,D>>J,s,r,o,p)|0){F=1;break}}F=0}while(0);D=g+e|0;w=f+ -1|0;if(((E|0)!=0?(y=c[u>>2]|0,J=c[y+13084>>2]|0,(a[z+((($(w>>J,A)|0)+(D>>J)|0)*12|0)+10>>0]|0)!=0):0)?(D|0)<(c[y+13120>>2]|0):0){J=c[y+13080>>2]|0;if((w>>J|0)>=(f>>J|0)?(D>>J|0)>=(e>>J|0):0){H=c[y+13072>>2]|0;I=c[y+13164>>2]|0;G=I+2|0;E=($(f>>H&I,G)|0)+(e>>H&I)|0;J=c[(c[d+204>>2]|0)+1684>>2]|0;E=(c[J+(($(w>>H&I,G)|0)+(D>>H&I)<<2)>>2]|0)<=(c[J+(E<<2)>>2]|0)}else E=1}else E=0;y=D+ -1|0;if(!C)C=0;else{C=c[(c[u>>2]|0)+13084>>2]|0;C=(a[z+((($(w>>C,A)|0)+(y>>C)|0)*12|0)+10>>0]|0)!=0}if(!B)z=0;else{J=c[(c[u>>2]|0)+13084>>2]|0;z=(a[z+((($(w>>J,A)|0)+(t>>J)|0)*12|0)+10>>0]|0)!=0}if(E){J=c[(c[u>>2]|0)+13084>>2]|0;if((td(d,D>>J,w>>J,o,q,o,p)|0)==0?(J=c[(c[u>>2]|0)+13084>>2]|0,(td(d,D>>J,w>>J,s,q,o,p)|0)==0):0)v=35;else A=1}else v=35;do if((v|0)==35){if(C){J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,y>>J,w>>J,o,q,o,p)|0){A=1;break}J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,y>>J,w>>J,s,q,o,p)|0){A=1;break}}if(z){J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,t>>J,w>>J,o,q,o,p)|0){A=1;break}J=c[(c[u>>2]|0)+13084>>2]|0;if(td(d,t>>J,w>>J,s,q,o,p)|0){A=1;break}}A=0}while(0);do if(!x){if(A){c[r>>2]=c[q>>2];F=1}if(E){A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,D>>A,w>>A,o,q,o,p)|0;if(A)break;A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,D>>A,w>>A,s,q,o,p)|0}else A=0;if(C&(A|0)==0){A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,y>>A,w>>A,o,q,o,p)|0;if(A)break;A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,y>>A,w>>A,s,q,o,p)|0}if(z&(A|0)==0){A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,t>>A,w>>A,o,q,o,p)|0;if(!A){A=c[(c[u>>2]|0)+13084>>2]|0;A=ud(d,t>>A,w>>A,s,q,o,p)|0}}}while(0);t=(F|0)!=0;if(t){c[j>>2]=c[r>>2];s=1}else s=0;do if(A){if(t){J=c[r>>2]|0;q=c[q>>2]|0;if((J&65535)<<16>>16==(q&65535)<<16>>16?(J>>>16&65535)<<16>>16==(q>>>16&65535)<<16>>16:0)break}else q=c[q>>2]|0;r=s+1|0;c[j+(s<<2)>>2]=q;if(r>>>0<2)s=r;else{J=m+(o<<2)|0;I=j+(n<<2)|0;I=c[I>>2]|0;b[J>>1]=I;b[J+2>>1]=I>>>16;i=k;return}}while(0);if(!((a[d+3035>>0]|0)!=0&(s|0)==(n|0))){J=m+(o<<2)|0;I=j+(n<<2)|0;I=c[I>>2]|0;b[J>>1]=I;b[J+2>>1]=I>>>16;i=k;return}if(!(vd(d,e,f,g,h,p,l,o)|0)){J=m+(o<<2)|0;I=j+(n<<2)|0;I=c[I>>2]|0;b[J>>1]=I;b[J+2>>1]=I>>>16;i=k;return}c[j+(n<<2)>>2]=c[l>>2];J=m+(o<<2)|0;I=j+(n<<2)|0;I=c[I>>2]|0;b[J>>1]=I;b[J+2>>1]=I>>>16;i=k;return}function td(d,f,g,h,j,k,l){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0;o=i;n=c[d+3508>>2]|0;m=c[n+16>>2]|0;n=c[n+20>>2]|0;f=($(c[(c[d+200>>2]|0)+13156>>2]|0,g)|0)+f|0;if(!(a[m+(f*12|0)+10>>0]&1<<h)){d=0;i=o;return d|0}if((c[n+(h*196|0)+(a[m+(f*12|0)+h+8>>0]<<2)+64>>2]|0)!=(c[n+(k*196|0)+(l<<2)+64>>2]|0)){d=0;i=o;return d|0}d=m+(f*12|0)+(h<<2)|0;d=e[d>>1]|e[d+2>>1]<<16;b[j>>1]=d;b[j+2>>1]=d>>>16;d=1;i=o;return d|0}function ud(d,f,g,h,j,k,l){d=d|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0;m=i;o=d+3508|0;n=c[o>>2]|0;p=c[n+16>>2]|0;n=c[n+20>>2]|0;g=($(c[(c[d+200>>2]|0)+13156>>2]|0,g)|0)+f|0;if(!(a[p+(g*12|0)+10>>0]&1<<h)){g=0;i=m;return g|0}f=c[n+(k*196|0)+(l<<2)+128>>2]|0;if((c[n+(h*196|0)+(a[p+(g*12|0)+h+8>>0]<<2)+128>>2]|0)!=(f|0)){g=0;i=m;return g|0}n=p+(g*12|0)+(h<<2)|0;n=e[n>>1]|e[n+2>>1]<<16;b[j>>1]=n;b[j+2>>1]=n>>>16;if(f){g=1;i=m;return g|0}p=c[o>>2]|0;f=c[p+20>>2]|0;h=c[f+(h*196|0)+(a[(c[p+16>>2]|0)+(g*12|0)+h+8>>0]<<2)+64>>2]|0;k=c[f+(k*196|0)+(l<<2)+64>>2]|0;if((h|0)==(k|0)){g=1;i=m;return g|0}d=c[d+5816>>2]|0;l=(d|0)!=(h|0)?d-h|0:1;d=d-k|0;if((l+128|0)>>>0>255)k=l>>31^127;else k=l;l=k<<24>>24;if((d+128|0)>>>0>255)d=d>>31^127;g=(l|0)/2|0;d=($(d<<24>>24,(((k&255)<<24>>24>-2?g:0-g|0)+16384|0)/(l|0)|0)|0)+32>>6;if((d|0)<-4096)d=-4096;else d=(d|0)>4095?4095:d;k=$(n<<16>>16,d)|0;k=k+127+(k>>>31)|0;l=k>>8;if((l+32768|0)>>>0>65535)l=k>>31^32767;b[j>>1]=l;n=$(n>>16,d)|0;n=n+127+(n>>>31)|0;d=n>>8;if((d+32768|0)>>>0>65535)d=n>>31^32767;b[j+2>>1]=d;g=1;i=m;return g|0}function vd(d,e,f,g,h,j,k,l){d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;s=i;i=i+32|0;o=s+12|0;m=s;p=d+200|0;u=c[p>>2]|0;r=c[u+13156>>2]|0;n=c[(c[d+3508>>2]|0)+36>>2]|0;if(!n){b[k>>1]=0;b[k+2>>1]=0>>>16;w=0;i=s;return w|0}t=c[n+16>>2]|0;q=c[n+32>>2]|0;v=g+e|0;w=h+f|0;if(!t){w=0;i=s;return w|0}x=c[u+13080>>2]|0;if(((f>>x|0)==(w>>x|0)?(w|0)<(c[u+13124>>2]|0):0)?(v|0)<(c[u+13120>>2]|0):0){v=v&-16;w=w&-16;if((a[d+140>>0]|0)==1)u=c[p>>2]|0;u=c[u+13084>>2]|0;u=t+((($(w>>u,r)|0)+(v>>u)|0)*12|0)|0;c[m+0>>2]=c[u+0>>2];c[m+4>>2]=c[u+4>>2];c[m+8>>2]=c[u+8>>2];u=qc(d,n,v,w)|0;c[o+0>>2]=c[m+0>>2];c[o+4>>2]=c[m+4>>2];c[o+8>>2]=c[m+8>>2];u=wd(d,o,j,k,l,q,u)|0;if(u){x=u;i=s;return x|0}}e=(g>>1)+e&-16;h=(h>>1)+f&-16;x=c[(c[p>>2]|0)+13084>>2]|0;x=t+((($(h>>x,r)|0)+(e>>x)|0)*12|0)|0;c[m+0>>2]=c[x+0>>2];c[m+4>>2]=c[x+4>>2];c[m+8>>2]=c[x+8>>2];x=qc(d,n,e,h)|0;c[o+0>>2]=c[m+0>>2];c[o+4>>2]=c[m+4>>2];c[o+8>>2]=c[m+8>>2];x=wd(d,o,j,k,l,q,x)|0;i=s;return x|0}function wd(b,d,e,f,g,h,j){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0;m=i;k=c[(c[b+3508>>2]|0)+20>>2]|0;n=a[d+10>>0]|0;if(!(n<<24>>24)){r=0;i=m;return r|0}if(!(n&1)){r=xd(f,d+4|0,h,c[b+5816>>2]|0,k,g,e,j,1,a[d+9>>0]|0)|0;i=m;return r|0}if(n<<24>>24==1){r=xd(f,d,h,c[b+5816>>2]|0,k,g,e,j,0,a[d+8>>0]|0)|0;i=m;return r|0}else if(n<<24>>24==3){n=b+5816|0;o=c[k+192>>2]|0;a:do if((o|0)>0){p=c[n>>2]|0;q=0;while(1){if((c[k+(q<<2)+64>>2]|0)>(p|0)){p=1;break a}q=q+1|0;if((q|0)>=(o|0)){p=0;break}}}else p=0;while(0);o=c[k+388>>2]|0;b:do if((o|0)>0){q=c[n>>2]|0;r=0;while(1){if((c[k+(r<<2)+260>>2]|0)>(q|0))break b;r=r+1|0;if((r|0)>=(o|0)){l=21;break}}}else l=21;while(0);if((l|0)==21?(p|0)==0:0)if(!g){r=xd(f,d,h,c[n>>2]|0,k,0,e,j,0,a[d+8>>0]|0)|0;i=m;return r|0}else{r=xd(f,d+4|0,h,c[n>>2]|0,k,g,e,j,1,a[d+9>>0]|0)|0;i=m;return r|0}if((a[b+3051>>0]|0)==1){r=xd(f,d,h,c[n>>2]|0,k,g,e,j,0,a[d+8>>0]|0)|0;i=m;return r|0}else{r=xd(f,d+4|0,h,c[n>>2]|0,k,g,e,j,1,a[d+9>>0]|0)|0;i=m;return r|0}}else{r=0;i=m;return r|0}return 0}
+
+
+
+function Ra(a){a=a|0;var b=0;b=i;i=i+a|0;i=i+15&-16;return b|0}function Sa(){return i|0}function Ta(a){a=a|0;i=a}function Ua(a,b){a=a|0;b=b|0;if(!o){o=a;p=b}}function Va(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0]}function Wa(b){b=b|0;a[k>>0]=a[b>>0];a[k+1>>0]=a[b+1>>0];a[k+2>>0]=a[b+2>>0];a[k+3>>0]=a[b+3>>0];a[k+4>>0]=a[b+4>>0];a[k+5>>0]=a[b+5>>0];a[k+6>>0]=a[b+6>>0];a[k+7>>0]=a[b+7>>0]}function Xa(a){a=a|0;D=a}function Ya(){return D|0}function Za(b,d){b=b|0;d=d|0;var e=0,f=0;e=i;if(!(a[(c[b+204>>2]|0)+43>>0]|0)){i=e;return}f=c[(c[b+200>>2]|0)+13128>>2]|0;d=(d|0)%(f|0)|0;if((d|0)!=2?!((f|0)==2&(d|0)==0):0){i=e;return}mf(c[b+152>>2]|0,c[b+136>>2]|0,199)|0;i=e;return}function _a(b,d){b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0;e=i;g=b+204|0;f=c[g>>2]|0;if((c[(c[f+1668>>2]|0)+(c[b+3488>>2]<<2)>>2]|0)==(d|0)){$a(b);f=b+2437|0;if(a[f>>0]|0){j=c[g>>2]|0;if((a[j+42>>0]|0)!=0?(j=c[j+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0)h=5}else h=5;if((h|0)==5)ab(b);if(a[b+2436>>0]|0){i=e;return}if(!(a[(c[g>>2]|0)+43>>0]|0)){i=e;return}g=c[(c[b+200>>2]|0)+13128>>2]|0;if((d|0)%(g|0)|0){i=e;return}if((g|0)==1){ab(b);i=e;return}if((a[f>>0]|0)!=1){i=e;return}mf(c[b+136>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}if((a[f+42>>0]|0)!=0?(j=c[f+1676>>2]|0,(c[j+(d<<2)>>2]|0)!=(c[j+(d+ -1<<2)>>2]|0)):0){if((a[b+141>>0]|0)==1)bb(c[b+136>>2]|0);else $a(b);ab(b);f=c[g>>2]|0}if(!(a[f+43>>0]|0)){i=e;return}f=b+200|0;if((d|0)%(c[(c[f>>2]|0)+13128>>2]|0)|0){i=e;return}d=b+136|0;cb((c[d>>2]|0)+224|0)|0;if((a[b+141>>0]|0)==1)bb(c[d>>2]|0);else $a(b);if((c[(c[f>>2]|0)+13128>>2]|0)==1){ab(b);i=e;return}else{mf(c[d>>2]|0,c[b+152>>2]|0,199)|0;i=e;return}}function $a(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;e=a+136|0;a=c[e>>2]|0;d=a+204|0;Zd(d,1);g=a+212|0;f=c[g>>2]|0;h=0-f&7;if(h){Zd(d,h);f=c[g>>2]|0}Vd((c[e>>2]|0)+224|0,(c[d>>2]|0)+((f|0)/8|0)|0,(7-f+(c[a+216>>2]|0)|0)/8|0);i=b;return}function ab(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0;g=i;f=c[b+2428>>2]|0;e=2-f|0;e=(a[b+3048>>0]|0)==0|(f|0)==2?e:e^3;f=b+3100|0;b=b+136|0;h=0;do{j=d[680+(e*199|0)+h>>0]|0;l=a[f>>0]|0;k=l<<24>>24;if(l<<24>>24<0)k=0;else k=(k|0)>51?51:k;j=((j<<3&120)+ -16+(($(k,((j>>>4)*5|0)+ -45|0)|0)>>4)<<1)+ -127|0;j=j>>31^j;if((j|0)>124)j=j&1|124;a[(c[b>>2]|0)+h>>0]=j;h=h+1|0}while((h|0)!=199);a[(c[b>>2]|0)+199>>0]=0;a[(c[b>>2]|0)+200>>0]=0;a[(c[b>>2]|0)+201>>0]=0;a[(c[b>>2]|0)+202>>0]=0;i=g;return}function bb(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=a+224|0;e=c[a+240>>2]|0;f=c[d>>2]|0;e=(f&1|0)==0?e:e+ -1|0;e=(f&511|0)==0?e:e+ -1|0;a=(c[a+244>>2]|0)-e|0;if((a|0)<0){i=b;return}Vd(d,e,a);i=b;return}function cb(a){a=a|0;var b=0,d=0,e=0,f=0,g=0;b=i;f=a+4|0;d=c[f>>2]|0;e=d+ -2|0;c[f>>2]=e;g=c[a>>2]|0;if((g|0)>=(e<<17|0)){g=(c[a+16>>2]|0)-(c[a+12>>2]|0)|0;i=b;return g|0}d=(d+ -258|0)>>>31;c[f>>2]=e<<d;g=g<<d;c[a>>2]=g;if(g&65535){g=0;i=b;return g|0}Nb(a);g=0;i=b;return g|0}function db(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a)|0;i=b;return a|0}function eb(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;h=d[e>>0]|0;j=b+4|0;k=c[j>>2]|0;l=d[3072+((k<<1&384)+(h|512))>>0]|0;k=k-l|0;m=k<<17;n=c[b>>2]|0;g=m-n>>31;c[b>>2]=n-(g&m);c[j>>2]=(g&l-k)+k;h=g^h;a[e>>0]=a[h+4224>>0]|0;e=h&1;h=c[j>>2]|0;g=d[3072+h>>0]|0;c[j>>2]=h<<g;g=c[b>>2]<<g;c[b>>2]=g;if(g&65535){i=f;return e|0}j=b+16|0;h=c[j>>2]|0;c[b>>2]=(((d[h+1>>0]|0)<<1|(d[h>>0]|0)<<9)+ -65535<<7-(d[3072+((g+ -1^g)>>15)>>0]|0))+g;if(h>>>0>=(c[b+20>>2]|0)>>>0){i=f;return e|0}c[j>>2]=h+2;i=f;return e|0}function fb(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(eb(d+224|0,d+1|0)|0)){d=0;i=b;return d|0}d=(gb((c[a>>2]|0)+224|0)|0)==0;d=d?1:2;i=b;return d|0}function gb(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[a>>2]<<1;c[a>>2]=d;if(!(d&65534)){Nb(a);d=c[a>>2]|0}e=c[a+4>>2]<<17;if((d|0)<(e|0)){e=0;i=b;return e|0}c[a>>2]=d-e;e=1;i=b;return e|0}function hb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(gb((c[d>>2]|0)+224|0)|0)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=gb((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function ib(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=c[(c[a+200>>2]|0)+52>>2]|0;d=(d|0)>10?31:(1<<d+ -5)+ -1|0;e=a+136|0;if((d|0)>0)a=0;else{f=0;i=b;return f|0}while(1){f=a+1|0;if(!(gb((c[e>>2]|0)+224|0)|0)){d=4;break}if((f|0)<(d|0))a=f;else{a=f;d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function jb(a){a=a|0;var b=0;b=i;a=gb((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function kb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(gb((c[d>>2]|0)+224|0)|0)<<1;a=gb((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function lb(a){a=a|0;var b=0;b=i;a=cb((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function mb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+5|0)|0;i=b;return a|0}function nb(b,d,e,f,g){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0;h=i;l=c[b+200>>2]|0;j=c[l+13140>>2]|0;l=(1<<c[l+13080>>2])+ -1|0;k=l&e;e=c[b+136>>2]|0;if((a[e+308>>0]|0)==0?(l&d|0)==0:0)d=0;else{d=f+ -1+($(j,g)|0)|0;d=(a[(c[b+7584>>2]|0)+d>>0]|0)!=0&1}if((a[e+309>>0]|0)==0&(k|0)==0){l=d;d=e+224|0;l=l+6|0;l=e+l|0;l=eb(d,l)|0;i=h;return l|0}l=($(j,g+ -1|0)|0)+f|0;l=((a[(c[b+7584>>2]|0)+l>>0]|0)!=0&1)+d|0;d=e+224|0;l=l+6|0;l=e+l|0;l=eb(d,l)|0;i=h;return l|0}function ob(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;b=i;a=a+136|0;g=9;e=0;while(1){h=c[a>>2]|0;f=e;e=e+1|0;if(!(eb(h+224|0,h+g|0)|0)){e=f;g=0;break}if((e|0)>=5){f=0;g=0;d=4;break}else g=10}do if((d|0)==4){while(1){d=0;if(!(gb((c[a>>2]|0)+224|0)|0)){d=5;break}g=(1<<f)+g|0;f=f+1|0;if((f|0)<31)d=4;else break}if((d|0)==5)if(!f)break;do{f=f+ -1|0;g=((gb((c[a>>2]|0)+224|0)|0)<<f)+g|0}while((f|0)!=0)}while(0);i=b;return g+e|0}function pb(a){a=a|0;var b=0;b=i;a=gb((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function qb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+176|0)|0;i=b;return a|0}function rb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0;d=i;e=a[(c[b+204>>2]|0)+1633>>0]|0;e=(e&255)<5?5:e&255;f=b+136|0;if(!e){g=0;i=d;return g|0}else b=0;while(1){h=c[f>>2]|0;g=b+1|0;if(!(eb(h+224|0,h+177|0)|0)){e=4;break}if((g|0)<(e|0))b=g;else{b=g;e=4;break}}if((e|0)==4){i=d;return b|0}return 0}function sb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+12|0)|0;i=b;return a|0}function tb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0;j=i;k=c[b+200>>2]|0;n=(1<<c[k+13080>>2])+ -1|0;l=n&g;m=c[k+13064>>2]|0;h=f>>m;m=g>>m;g=c[b+136>>2]|0;if((a[g+308>>0]|0)==0?(n&f|0)==0:0)f=0;else{f=h+ -1+($(c[k+13140>>2]|0,m)|0)|0;f=d[(c[b+7588>>2]|0)+f>>0]|0}if((a[g+309>>0]|0)==0&(l|0)==0){n=0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=eb(f,n)|0;i=j;return n|0}n=($(c[k+13140>>2]|0,m+ -1|0)|0)+h|0;n=d[(c[b+7588>>2]|0)+n>>0]|0;m=(f|0)>(e|0);m=m&1;n=(n|0)>(e|0);n=n&1;f=g+224|0;m=m|2;n=m+n|0;n=g+n|0;n=eb(f,n)|0;i=j;return n|0}function ub(b,d){b=b|0;d=d|0;var e=0,f=0,g=0;e=i;f=b+136|0;g=c[f>>2]|0;do if(!(eb(g+224|0,g+13|0)|0)){b=c[b+200>>2]|0;if((c[b+13064>>2]|0)==(d|0)){b=c[f>>2]|0;if((c[b+31244>>2]|0)==1){f=3;break}if(eb(b+224|0,b+14|0)|0){f=1;break}if((d|0)==3){f=2;break}f=c[f>>2]|0;f=(eb(f+224|0,f+15|0)|0)==0;f=f?3:2;break}g=(a[b+12940>>0]|0)==0;d=c[f>>2]|0;d=(eb(d+224|0,d+14|0)|0)!=0;if(g){f=d?1:2;break}b=c[f>>2]|0;b=(eb(b+224|0,b+16|0)|0)!=0;if(d){if(b){f=1;break}f=(gb((c[f>>2]|0)+224|0)|0)==0;f=f?4:5;break}else{if(b){f=2;break}f=(gb((c[f>>2]|0)+224|0)|0)==0;f=f?6:7;break}}else f=0;while(0);i=e;return f|0}function vb(a){a=a|0;var b=0;b=i;a=cb((c[a+136>>2]|0)+224|0)|0;i=b;return a|0}function wb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+17|0)|0;i=b;return a|0}function xb(a){a=a|0;var b=0,d=0,e=0;b=i;e=a+136|0;d=0;while(1){a=d+1|0;if(!(gb((c[e>>2]|0)+224|0)|0)){a=d;d=4;break}if((a|0)<2)d=a;else{d=4;break}}if((d|0)==4){i=b;return a|0}return 0}function yb(a){a=a|0;var b=0,d=0;b=i;d=a+136|0;a=(gb((c[d>>2]|0)+224|0)|0)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=(gb((c[d>>2]|0)+224|0)|0|a)<<1;a=gb((c[d>>2]|0)+224|0)|0|a;i=b;return a|0}function zb(a){a=a|0;var b=0,d=0;b=i;a=a+136|0;d=c[a>>2]|0;if(!(eb(d+224|0,d+18|0)|0)){d=4;i=b;return d|0}d=(gb((c[a>>2]|0)+224|0)|0)<<1;d=gb((c[a>>2]|0)+224|0)|0|d;i=b;return d|0}function Ab(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;d=a+136|0;e=c[d>>2]|0;e=eb(e+224|0,e+21|0)|0;if(!e){f=0;i=b;return f|0}a=a+3080|0;if(e>>>0>=((c[a>>2]|0)+ -1|0)>>>0){f=e;i=b;return f|0}while(1){f=e+1|0;if(!(gb((c[d>>2]|0)+224|0)|0)){d=5;break}if(f>>>0<((c[a>>2]|0)+ -1|0)>>>0)e=f;else{e=f;d=5;break}}if((d|0)==5){i=b;return e|0}return 0}function Bb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+20|0)|0;i=b;return a|0}function Cb(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0;e=i;g=a+136|0;a=c[g>>2]|0;f=a+224|0;if((d+b|0)==12){g=eb(f,a+26|0)|0;i=e;return g|0}if(eb(f,a+((c[a+31232>>2]|0)+22)|0)|0){g=2;i=e;return g|0}g=c[g>>2]|0;g=eb(g+224|0,g+26|0)|0;i=e;return g|0}function Db(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;b=b+ -1|0;e=(b|0)>2;f=e?2:b;a=a+136|0;if((f|0)>0)h=0;else{h=0;i=d;return h|0}while(1){j=c[a>>2]|0;g=h+1|0;if(!(eb(j+224|0,j+(h+27)|0)|0)){g=h;break}if((g|0)<(f|0))h=g;else break}if((g|0)!=2|e^1){j=g;i=d;return j|0}else e=2;while(1){f=e+1|0;if(!(gb((c[a>>2]|0)+224|0)|0)){b=7;break}if((f|0)<(b|0))e=f;else{e=f;b=7;break}}if((b|0)==7){i=d;return e|0}return 0}function Eb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+35|0)|0;i=b;return a|0}function Fb(a){a=a|0;var b=0;b=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+36|0)|0;i=b;return a|0}function Gb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+(42-b)|0)|0;i=d;return a|0}function Hb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+(b+42)|0)|0;i=d;return a|0}function Ib(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+((b|0)==0|40)|0)|0;i=d;return a|0}function Jb(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;a=a+136|0;f=(b<<2)+166|0;e=0;while(1){g=c[a>>2]|0;b=e+1|0;if(!(eb(g+224|0,g+(f+e)|0)|0)){b=e;a=4;break}if((b|0)<4)e=b;else{a=4;break}}if((a|0)==4){i=d;return b|0}return 0}function Kb(a,b){a=a|0;b=b|0;var d=0;d=i;a=c[a+136>>2]|0;a=eb(a+224|0,a+(b+174)|0)|0;i=d;return a|0}function Lb(f,g,h,j,k,l){f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ta=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ba=0,Ca=0,Da=0,Ga=0,Ha=0,Ia=0,Ja=0,La=0,Ma=0,Na=0,Oa=0,Pa=0,Qa=0;m=i;i=i+96|0;w=m+24|0;t=m+8|0;v=m;s=f+136|0;p=c[s>>2]|0;x=c[f+160>>2]|0;n=c[x+(l<<2)+32>>2]|0;r=f+200|0;o=c[r>>2]|0;W=$(h>>c[o+(l<<2)+13180>>2],n)|0;o=(c[x+(l<<2)>>2]|0)+(W+(g>>c[o+(l<<2)+13168>>2]<<c[o+56>>2]))|0;W=(l|0)!=0;g=p+320|0;h=W?p+11680|0:g;x=w+0|0;q=x+64|0;do{a[x>>0]=0;x=x+1|0}while((x|0)<(q|0));V=1<<j;y=(l|0)==0;x=c[(y?p+288|0:p+292|0)>>2]|0;q=V<<j;jf(h|0,0,q<<1|0)|0;z=p+31256|0;if(!(a[z>>0]|0)){A=a[p+272>>0]|0;C=f+204|0;Pa=c[C>>2]|0;if((a[Pa+21>>0]|0)!=0?(d[Pa+1629>>0]|0)>=(j|0):0){I=c[s>>2]|0;I=eb(I+224|0,I+(W&1|46)|0)|0}else I=0;if(y){B=c[r>>2]|0;E=B;B=(c[B+13192>>2]|0)+A|0}else{B=c[C>>2]|0;if((l|0)==1)B=(c[f+3060>>2]|0)+(c[B+28>>2]|0)+(a[p+302>>0]|0)|0;else B=(c[f+3064>>2]|0)+(c[B+32>>2]|0)+(a[p+303>>0]|0)|0;B=B+A|0;E=c[r>>2]|0;A=c[E+13192>>2]|0;F=0-A|0;if((B|0)<(F|0))B=F;else B=(B|0)>57?57:B;do if((c[E+4>>2]|0)==1){if((B|0)>=30)if((B|0)>43){B=B+ -6|0;break}else{B=c[176+(B+ -30<<2)>>2]|0;break}}else B=(B|0)>51?51:B;while(0);B=A+B|0}A=(c[E+52>>2]|0)+j|0;H=A+ -5|0;A=1<<A+ -6;B=d[168+(d[232+B>>0]|0)>>0]<<d[312+B>>0];if((a[E+634>>0]|0)!=0?!((I|0)!=0&(j|0)>2):0){F=c[C>>2]|0;F=(a[F+68>>0]|0)==0?E+635|0:F+69|0;C=((c[p+31244>>2]|0)!=1?3:0)+l|0;E=F+((j+ -2|0)*384|0)+(C<<6)|0;if((j|0)>3)la=a[F+((j+ -4|0)*6|0)+C+1536>>0]|0;else la=16}else{la=16;E=0}}else{A=0;la=0;B=0;E=0;H=0;I=0}F=p+31244|0;do if((c[F>>2]|0)==0?(c[(c[r>>2]|0)+13108>>2]|0)!=0:0){if((I|0)==0?(a[z>>0]|0)==0:0){C=0;G=0;break}G=c[s>>2]|0;C=W&1;G=eb(G+224|0,G+(C|48)|0)|0;if(G){Pa=c[s>>2]|0;C=eb(Pa+224|0,Pa+(C|50)|0)|0}else{C=0;G=0}}else{C=0;G=0}while(0);L=(j<<1)+ -1|0;if(y){J=(j*3|0)+ -6+(j+ -1>>2)|0;M=j+1>>2}else{J=15;M=j+ -2|0}if((L|0)>0){O=J+52|0;K=0;while(1){Pa=c[s>>2]|0;N=K+1|0;if(!(eb(Pa+224|0,Pa+(O+(K>>M))|0)|0))break;if((N|0)<(L|0))K=N;else{K=N;break}}N=J+70|0;J=0;while(1){Pa=c[s>>2]|0;O=J+1|0;if(!(eb(Pa+224|0,Pa+(N+(J>>M))|0)|0))break;if((O|0)<(L|0))J=O;else{J=O;break}}if((K|0)>3){L=(K>>1)+ -1|0;N=gb((c[s>>2]|0)+224|0)|0;if((L|0)>1){M=1;do{N=gb((c[s>>2]|0)+224|0)|0|N<<1;M=M+1|0}while((M|0)!=(L|0))}K=N+((K&1|2)<<L)|0}if((J|0)>3){M=(J>>1)+ -1|0;N=gb((c[s>>2]|0)+224|0)|0;if((M|0)>1){L=1;do{N=gb((c[s>>2]|0)+224|0)|0|N<<1;L=L+1|0}while((L|0)!=(M|0))}L=K;O=N+((J&1|2)<<M)|0}else{L=K;O=J}}else{L=0;O=0}do if((k|0)!=2){P=L>>2;Q=O>>2;if((k|0)==1){J=L;K=O;S=d[536+(O<<3)+L>>0]|0;L=488;M=504;N=496;O=520;break}else if(k){N=L;M=O;u=55;break}S=d[(L&3)+(392+((O&3)<<2))>>0]|0;if((V|0)==8){J=L;K=O;S=(d[416+(Q<<1)+P>>0]<<4)+S|0;L=496;M=8;N=488;O=24;break}else if((V|0)==16){J=L;K=O;S=(d[392+(Q<<2)+P>>0]<<4)+S|0;L=8;M=8;N=24;O=24;break}else if((V|0)==4){J=L;K=O;L=408;M=8;N=408;O=24;break}else{J=L;K=O;S=(d[424+(Q<<3)+P>>0]<<4)+S|0;L=40;M=8;N=104;O=24;break}}else{N=O;M=L;P=O>>2;Q=L>>2;u=55}while(0);if((u|0)==55){J=N;K=M;S=d[536+(N<<3)+M>>0]|0;L=496;M=520;N=488;O=504}R=S+1|0;T=S>>4;if((T|0)>-1){S=(1<<j+ -2)+ -1|0;U=(l|0)>0;l=U?90:88;V=V+ -1>>2;W=W?27:0;Z=(j|0)==2;Y=W+3|0;X=(j|0)==3;k=(k|0)==0?9:15;ja=y?0:27;ga=(I|0)==0;da=y?42:43;aa=y?40:41;ea=y?2:0;ia=(G|0)==0;ha=(x&-17|0)!=10&ia;ka=f+204|0;ca=((B|0)<0)<<31>>31;ba=((A|0)<0)<<31>>31;_=(I|0)!=0&(j|0)>2;fa=(j|0)<4;pa=la&255;ma=(y&1)<<1;la=ma|1;ta=1;oa=T;ra=0;Aa=16;while(1){qa=oa<<4;za=a[L+oa>>0]|0;wa=za&255;ya=a[N+oa>>0]|0;xa=ya&255;na=(oa|0)>0;if((oa|0)<(T|0)&na){if((wa|0)<(S|0))sa=d[w+(wa+1<<3)+xa>>0]|0;else sa=0;if((xa|0)<(S|0))sa=(d[xa+1+(w+(wa<<3))>>0]|0)+sa|0;Ba=c[s>>2]|0;Ba=(eb(Ba+224|0,Ba+(((sa|0)>1?1:sa)+l)|0)|0)&255;a[w+(wa<<3)+xa>>0]=Ba;ua=1}else{if(!((wa|0)==(P|0)&(xa|0)==(Q|0)))if(!(za<<24>>24))Ba=ya<<24>>24==0&1;else Ba=0;else Ba=1;a[w+(wa<<3)+xa>>0]=Ba;ua=0}qa=R-qa|0;sa=(oa|0)==(T|0);if(sa){a[t>>0]=qa+255;va=qa+ -2|0;qa=1}else{va=15;qa=0}if((wa|0)<(V|0))Ca=(a[w+(wa+1<<3)+xa>>0]|0)!=0&1;else Ca=0;if((xa|0)<(V|0))Ca=((a[xa+1+(w+(wa<<3))>>0]|0)!=0&1)<<1|Ca;do if(Ba<<24>>24!=0&(va|0)>-1){if(!(c[(c[r>>2]|0)+13100>>2]|0))if(Z){ya=600;za=W}else u=79;else if(ga){Ba=(a[z>>0]|0)!=0;if(Ba|Z){ya=Ba?664:600;za=Ba?aa:W}else u=79}else{ya=664;za=aa}do if((u|0)==79){u=0;Ba=(Ca<<4)+616|0;if(!y){ya=Ba;za=W+(X?9:12)|0;break}za=(ya|za)<<24>>24==0?W:Y;if(X){ya=Ba;za=za+k|0;break}else{ya=Ba;za=za+21|0;break}}while(0);if((va|0)>0){Ba=za+92|0;do{Pa=c[s>>2]|0;if(eb(Pa+224|0,Pa+(Ba+(d[ya+((d[O+va>>0]<<2)+(d[M+va>>0]|0))>>0]|0))|0)|0){a[t+(qa&255)>>0]=va;ua=0;qa=qa+1<<24>>24}va=va+ -1|0}while((va|0)>0)}if(ua){a[t+(qa&255)>>0]=0;ua=qa+1<<24>>24;break}if(c[(c[r>>2]|0)+13100>>2]|0)if(ga?(a[z>>0]|0)==0:0)u=93;else ua=da;else u=93;if((u|0)==93){u=0;ua=(oa|0)==0?ja:za+2|0}Pa=c[s>>2]|0;if((eb(Pa+224|0,Pa+(ua+92)|0)|0)==1){a[t+(qa&255)>>0]=0;ua=qa+1<<24>>24}else ua=qa}else ua=qa;while(0);qa=ua&255;a:do if(ua<<24>>24){ua=na?ea:0;if(!(c[(c[r>>2]|0)+13116>>2]|0))Ia=0;else{if(ga?(a[z>>0]|0)==0:0)ra=ma;else ra=la;Ia=(d[p+ra+199>>0]|0)>>>2}va=ua|(ta|0)==0&(sa^1)&1;Ga=a[t>>0]|0;ya=Ga&255;za=qa>>>0>8?8:qa;if(!za){sa=-1;ta=1}else{Ba=va<<2;sa=-1;ta=1;ua=0;do{Pa=ta+Ba|0;Oa=c[s>>2]|0;Pa=(eb(Oa+224|0,Oa+((U?Pa+16|0:Pa)+136)|0)|0)&255;a[v+ua>>0]=Pa;if(!(Pa<<24>>24))ta=((ta+ -1|0)>>>0<2&1)+ta|0;else{sa=(sa|0)==-1?ua:sa;ta=0}ua=ua+1|0}while((ua|0)<(za|0))}za=qa+ -1|0;ua=a[t+za>>0]|0;Ba=ua&255;b:do if(!(a[z>>0]|0)){do if((c[F>>2]|0)==1){if((c[(c[r>>2]|0)+13104>>2]|0)==0|ga){u=113;break}if(!ha){ya=0;break b}}else u=113;while(0);if((u|0)==113?(u=0,!ia):0){ya=0;break}ya=(ya-Ba|0)>3&1}else ya=0;while(0);if((sa|0)!=-1){Oa=c[s>>2]|0;Oa=eb(Oa+224|0,Oa+((U?va|4:va)|160)|0)|0;Pa=v+sa|0;a[Pa>>0]=(d[Pa>>0]|0)+Oa}va=(ya|0)==0;if((a[(c[ka>>2]|0)+4>>0]|0)==0|va){za=0;ya=0;do{ya=gb((c[s>>2]|0)+224|0)|0|ya<<1;za=za+1|0}while((za|0)<(qa|0));Ca=ya<<16-qa}else{ya=za&255;if(!((za&255)<<24>>24))Ba=0;else{za=0;Ba=0;do{Ba=gb((c[s>>2]|0)+224|0)|0|Ba<<1;za=za+1|0}while((za|0)<(ya|0))}Ca=Ba<<17-qa}wa=wa<<2;xa=xa<<2;ya=p+ra+199|0;za=0;Ja=0;Da=Aa;Ha=0;while(1){Aa=Ga&255;Ba=(d[M+Aa>>0]|0)+wa|0;Aa=(d[O+Aa>>0]|0)+xa|0;c:do if((za|0)<8){La=(d[v+za>>0]|0)+1|0;Pa=(za|0)==(sa|0);if((La|0)==((Pa?3:2)|0)&0==((Pa?0:0)|0))Ma=0;else{Ma=0;break}while(1){Na=Ma+1|0;if(!(gb((c[s>>2]|0)+224|0)|0)){u=128;break}if((Na|0)<31)Ma=Na;else{u=132;break}}do if((u|0)==128){u=0;if((Ma|0)>=3){Na=Ma;u=132;break}if((Ia|0)>0){Na=0;Oa=0;do{Oa=gb((c[s>>2]|0)+224|0)|0|Oa<<1;Na=Na+1|0}while((Na|0)!=(Ia|0))}else Oa=0;Na=Oa+(Ma<<Ia)|0}while(0);if((u|0)==132){u=0;Ma=Na+ -3|0;if((Ma+Ia|0)>0){Oa=Ia+ -3+Na|0;Na=0;Pa=0;do{Pa=gb((c[s>>2]|0)+224|0)|0|Pa<<1;Na=Na+1|0}while((Na|0)!=(Oa|0))}else Pa=0;Na=Pa+((1<<Ma)+2<<Ia)|0}La=gf(Na|0,((Na|0)<0)<<31>>31|0,La|0,0)|0;Ma=D;Oa=3<<Ia;Qa=((Oa|0)<0)<<31>>31;Pa=c[(c[r>>2]|0)+13116>>2]|0;do if((Ma|0)>(Qa|0)|(Ma|0)==(Qa|0)&La>>>0>Oa>>>0){Oa=Ia+1|0;if(Pa){Ia=Oa;break}Ia=(Ia|0)>3?4:Oa;break c}while(0);if(!((Pa|0)!=0&(Ja|0)==0))break;Oa=a[ya>>0]|0;Ja=(Oa&255)>>>2;if((Na|0)>=(3<<Ja|0)){a[ya>>0]=Oa+1<<24>>24;Ja=1;break}if((Na<<1|0)>=(1<<Ja|0)|Oa<<24>>24==0){Ja=1;break}a[ya>>0]=Oa+ -1<<24>>24;Ja=1}else{La=0;while(1){Ma=La+1|0;if(!(gb((c[s>>2]|0)+224|0)|0)){u=146;break}if((Ma|0)<31)La=Ma;else{u=150;break}}do if((u|0)==146){u=0;if((La|0)>=3){Ma=La;u=150;break}if((Ia|0)>0){Ma=0;Na=0;do{Na=gb((c[s>>2]|0)+224|0)|0|Na<<1;Ma=Ma+1|0}while((Ma|0)!=(Ia|0))}else Na=0;Na=Na+(La<<Ia)|0}while(0);if((u|0)==150){u=0;La=Ma+ -3|0;if((La+Ia|0)>0){Ma=Ia+ -3+Ma|0;Na=0;Oa=0;do{Oa=gb((c[s>>2]|0)+224|0)|0|Oa<<1;Na=Na+1|0}while((Na|0)!=(Ma|0))}else Oa=0;Na=Oa+((1<<La)+2<<Ia)|0}La=Na+1|0;Ma=((La|0)<0)<<31>>31;Pa=c[(c[r>>2]|0)+13116>>2]|0;do if((Na|0)>=(3<<Ia|0)){Oa=Ia+1|0;if(Pa){Ia=Oa;break}Ia=(Ia|0)>3?4:Oa;break c}while(0);if(!((Pa|0)!=0&(Ja|0)==0))break;Oa=a[ya>>0]|0;Ja=(Oa&255)>>>2;if((Na|0)>=(3<<Ja|0)){a[ya>>0]=Oa+1<<24>>24;Ja=1;break}if((Na<<1|0)>=(1<<Ja|0)|Oa<<24>>24==0){Ja=1;break}a[ya>>0]=Oa+ -1<<24>>24;Ja=1}while(0);do if(!((a[(c[ka>>2]|0)+4>>0]|0)==0|va)){Ha=gf(La|0,Ma|0,Ha|0,0)|0;if(Ga<<24>>24!=ua<<24>>24)break;Qa=(Ha&1|0)==0;Pa=ff(0,0,La|0,Ma|0)|0;La=Qa?La:Pa;Ma=Qa?Ma:D}while(0);Qa=(Ca&32768|0)==0;Ga=ff(0,0,La|0,Ma|0)|0;Ga=Qa?La:Ga;La=Qa?Ma:D;Ca=Ca<<1&131070;Ma=Ga&65535;do if(!(a[z>>0]|0)){do if(!((a[(c[r>>2]|0)+634>>0]|0)==0|_)){if(!((Aa|Ba|0)!=0|fa)){Da=pa;break}if((j|0)==3)Da=(Aa<<3)+Ba|0;else if((j|0)==4)Da=(Aa>>>1<<3)+(Ba>>>1)|0;else if((j|0)==5)Da=(Aa>>>2<<3)+(Ba>>>2)|0;else Da=(Aa<<2)+Ba|0;Da=d[E+Da>>0]|0}while(0);Ga=rf(Ga|0,La|0,B|0,ca|0)|0;Ga=rf(Ga|0,D|0,Da|0,((Da|0)<0)<<31>>31|0)|0;Ga=gf(Ga|0,D|0,A|0,ba|0)|0;Ga=ef(Ga|0,D|0,H|0)|0;La=D;if((La|0)<0){Ma=(Ga&-32768|0)==-32768&(La&268435455|0)==268435455?Ga&65535:-32768;break}else{Ma=La>>>0>0|(La|0)==0&Ga>>>0>32767?32767:Ga&65535;break}}while(0);b[h+((Aa<<j)+Ba<<1)>>1]=Ma;za=za+1|0;if((za|0)>=(qa|0)){Aa=Da;break a}Ga=a[t+za>>0]|0}}while(0);if(na)oa=oa+ -1|0;else break}}do if(!(a[z>>0]|0)){if(I){if(((j|0)==2?(c[(c[r>>2]|0)+13096>>2]|0)!=0:0)?(c[F>>2]|0)==1:0){s=0;do{Oa=h+(15-s<<1)|0;Pa=b[Oa>>1]|0;Qa=h+(s<<1)|0;b[Oa>>1]=b[Qa>>1]|0;b[Qa>>1]=Pa;s=s+1|0}while((s|0)!=8)}s=j&65535;Fa[c[f+5872>>2]&15](h,s);if(!G){if(!(c[(c[r>>2]|0)+13104>>2]|0))break;if((c[F>>2]|0)!=1)break;if((x&-17|0)!=10)break;C=(x|0)==26&1}Ka[c[f+5876>>2]&7](h,s,C);break}if(y&(c[F>>2]|0)==1&(j|0)==2){Ea[c[f+5880>>2]&7](h);break}r=(J|0)>(K|0)?J:K;if(!r){Ea[c[f+(j+ -2<<2)+5900>>2]&7](h);break}s=K+4+J|0;do if((r|0)>=4){if((r|0)<8){s=(s|0)<8?s:8;break}if((r|0)<12)s=(s|0)<24?s:24}else s=(s|0)<4?s:4;while(0);Fa[c[f+(j+ -2<<2)+5884>>2]&15](h,s)}else{r=(c[(c[r>>2]|0)+13104>>2]|0)==0;if(!G){if(r)break;if((x&-17|0)==10)u=185;else break}else if(!r)u=185;if((u|0)==185)C=(x|0)==26&1;Ka[c[f+5876>>2]&7](h,j&65535,C)}while(0);if(!(a[p+304>>0]|0)){Qa=j+ -2|0;Qa=f+(Qa<<2)+5856|0;Qa=c[Qa>>2]|0;Ka[Qa&7](o,h,n);i=m;return}if((q|0)<=0){Qa=j+ -2|0;Qa=f+(Qa<<2)+5856|0;Qa=c[Qa>>2]|0;Ka[Qa&7](o,h,n);i=m;return}p=c[p+284>>2]|0;r=0;do{Qa=h+(r<<1)|0;b[Qa>>1]=(($(b[g+(r<<1)>>1]|0,p)|0)>>>3)+(e[Qa>>1]|0);r=r+1|0}while((r|0)!=(q|0));Qa=j+ -2|0;Qa=f+(Qa<<2)+5856|0;Qa=c[Qa>>2]|0;Ka[Qa&7](o,h,n);i=m;return}function Mb(a,d,e,f){a=a|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0;f=i;e=a+136|0;a=c[e>>2]|0;d=eb(a+224|0,a+31|0)|0;g=c[e>>2]|0;g=eb(g+224|0,g+31|0)|0;if(!d)d=0;else{l=c[e>>2]|0;d=(eb(l+224|0,l+34|0)|0)+d|0}if(!g)g=0;else{l=c[e>>2]|0;g=(eb(l+224|0,l+34|0)|0)+g|0}if((d|0)==2){k=1;j=2;while(1){if(!(gb((c[e>>2]|0)+224|0)|0)){d=7;break}j=(1<<k)+j|0;k=k+1|0;if((k|0)>=31){d=9;break}}if((d|0)==7){h=c[e>>2]|0;l=h+224|0;if(!k)k=l;else{h=l;d=10}}else if((d|0)==9){h=(c[e>>2]|0)+224|0;d=10}if((d|0)==10)while(1){k=k+ -1|0;j=((gb(h)|0)<<k)+j|0;h=c[e>>2]|0;d=h+224|0;if(!k){k=d;break}else{h=d;d=10}}d=c[k>>2]<<1;c[k>>2]=d;if(!(d&65534)){Nb(k);d=c[k>>2]|0}m=c[h+228>>2]<<17;h=d-m|0;l=h>>31;c[k>>2]=(l&m)+h;b[a+31272>>1]=(l^0-j)-l}else if((d|0)==1){h=c[e>>2]|0;d=h+224|0;j=c[d>>2]<<1;c[d>>2]=j;if(!(j&65534)){Nb(d);j=c[d>>2]|0}k=c[h+228>>2]<<17;l=j-k|0;m=l>>31;c[d>>2]=(m&k)+l;b[a+31272>>1]=(m^65535)-m}else if(!d)b[a+31272>>1]=0;if((g|0)==1){d=c[e>>2]|0;e=d+224|0;g=c[e>>2]<<1;c[e>>2]=g;if(!(g&65534)){Nb(e);g=c[e>>2]|0}k=c[d+228>>2]<<17;l=g-k|0;m=l>>31;c[e>>2]=(m&k)+l;b[a+31274>>1]=(m^65535)-m;i=f;return}else if(!g){b[a+31274>>1]=0;i=f;return}else if((g|0)==2){h=1;j=2;while(1){if(!(gb((c[e>>2]|0)+224|0)|0)){d=20;break}j=(1<<h)+j|0;h=h+1|0;if((h|0)>=31){d=22;break}}if((d|0)==20){k=c[e>>2]|0;g=k+224|0;if(!h)e=k;else d=23}else if((d|0)==22){g=(c[e>>2]|0)+224|0;d=23}if((d|0)==23)while(1){h=h+ -1|0;j=((gb(g)|0)<<h)+j|0;d=c[e>>2]|0;g=d+224|0;if(!h){e=d;break}else d=23}d=c[g>>2]<<1;c[g>>2]=d;if(!(d&65534)){Nb(g);d=c[g>>2]|0}k=c[e+228>>2]<<17;l=d-k|0;m=l>>31;c[g>>2]=(m&k)+l;b[a+31274>>1]=(m^0-j)-m;i=f;return}else{i=f;return}}function Nb(a){a=a|0;var b=0,e=0,f=0;b=i;f=a+16|0;e=c[f>>2]|0;c[a>>2]=(c[a>>2]|0)+ -65535+((d[e+1>>0]|0)<<1|(d[e>>0]|0)<<9);if(e>>>0>=(c[a+20>>2]|0)>>>0){i=b;return}c[f>>2]=e+2;i=b;return}function Ob(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;f=i;h=b+136|0;n=c[h>>2]|0;g=b+200|0;j=c[g>>2]|0;m=c[j+13080>>2]|0;q=(1<<m)+ -1|0;m=-1<<m-(c[(c[b+204>>2]|0)+24>>2]|0);o=m&d;p=m&e;k=c[j+13140>>2]|0;j=c[j+13064>>2]|0;l=o>>j;j=p>>j;if(!(q&d))o=0;else o=(o&q|0)!=0;if(!(q&e))p=0;else p=(p&q|0)!=0;q=n+203|0;if((a[q>>0]|0)==0?(m&(e|d)|0)!=0:0)d=c[n+276>>2]|0;else{a[q>>0]=(a[n+300>>0]|0)==0&1;d=a[b+3100>>0]|0}if(o){e=l+ -1+($(j,k)|0)|0;e=a[(c[b+7568>>2]|0)+e>>0]|0}else e=d;if(p){d=($(j+ -1|0,k)|0)+l|0;d=a[(c[b+7568>>2]|0)+d>>0]|0}b=e+1+d>>1;h=c[h>>2]|0;j=c[h+280>>2]|0;if(!j){a[h+272>>0]=b;i=f;return}g=c[(c[g>>2]|0)+13192>>2]|0;b=j+52+b+(g<<1)|0;if((b|0)>0)j=b;else j=-52-g+1+b|0;a[h+272>>0]=b-g-j+((j|0)%(g+52|0)|0);i=f;return}function Pb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0;k=i;t=c[b+136>>2]|0;r=b+200|0;v=c[r>>2]|0;g=c[v+13084>>2]|0;p=c[v+13072>>2]|0;h=c[v+13156>>2]|0;l=c[v+13148>>2]|0;o=b+3508|0;u=c[o>>2]|0;j=c[u+16>>2]|0;q=$(h,e>>g)|0;m=d>>g;n=(a[j+((q+m|0)*12|0)+10>>0]|0)==0;do if((e|0)>0&(e&7|0)==0){if(((a[b+3050>>0]|0)==0?(c[t+31312>>2]&4|0)!=0:0)?((e|0)%(1<<c[v+13080>>2]|0)|0|0)==0:0)break;if((a[(c[b+204>>2]|0)+53>>0]|0)==0?(w=c[t+31312>>2]|0,(w&8|0)!=0):0){if(!((e|0)%(1<<c[v+13080>>2]|0)|0))break}else w=c[t+31312>>2]|0;if(!(w&4)){u=c[u+20>>2]|0;x=e+ -1|0}else{x=e+ -1|0;u=qc(b,u,d,x)|0}w=1<<f;if((w|0)>0){v=$(h,x>>g)|0;B=$(l,x>>p)|0;A=b+7596|0;C=$(l,e>>p)|0;x=b+5840|0;y=b+7572|0;D=0;do{z=D+d|0;J=z>>g;G=z>>p;I=J+v|0;E=j+(I*12|0)|0;J=J+q|0;F=j+(J*12|0)|0;K=c[A>>2]|0;H=a[K+(G+B)>>0]|0;G=a[K+(G+C)>>0]|0;if((a[j+(J*12|0)+10>>0]|0)!=0?(a[j+(I*12|0)+10>>0]|0)!=0:0)if(!((G|H)<<24>>24))E=(Qb(b,F,E,u)|0)&255;else E=1;else E=2;K=($(c[x>>2]|0,e)|0)+z>>2;a[(c[y>>2]|0)+K>>0]=E;D=D+4|0}while((D|0)<(w|0))}}while(0);do if((d|0)>0&(d&7|0)==0){if(((a[b+3050>>0]|0)==0?(c[t+31312>>2]&1|0)!=0:0)?((d|0)%(1<<c[(c[r>>2]|0)+13080>>2]|0)|0|0)==0:0)break;if((a[(c[b+204>>2]|0)+53>>0]|0)==0?(s=c[t+31312>>2]|0,(s&2|0)!=0):0){if(!((d|0)%(1<<c[(c[r>>2]|0)+13080>>2]|0)|0))break}else s=c[t+31312>>2]|0;q=c[o>>2]|0;if(!(s&1)){r=c[q+20>>2]|0;s=d+ -1|0}else{s=d+ -1|0;r=qc(b,q,s,e)|0}q=s>>g;w=s>>p;x=d>>p;z=1<<f;if((z|0)>0){v=b+7596|0;y=b+5840|0;u=b+7576|0;t=0;do{s=t+e|0;J=$(s>>g,h)|0;K=J+q|0;A=j+(K*12|0)|0;J=J+m|0;B=j+(J*12|0)|0;C=$(s>>p,l)|0;I=c[v>>2]|0;D=a[I+(C+w)>>0]|0;C=a[I+(C+x)>>0]|0;if((a[j+(J*12|0)+10>>0]|0)!=0?(a[j+(K*12|0)+10>>0]|0)!=0:0)if(!((C|D)<<24>>24))A=(Qb(b,B,A,r)|0)&255;else A=1;else A=2;K=($(c[y>>2]|0,s)|0)+d>>2;a[(c[u>>2]|0)+K>>0]=A;t=t+4|0}while((t|0)<(z|0))}}while(0);if((g|0)>=(f|0)|n){i=k;return}l=c[(c[o>>2]|0)+20>>2]|0;m=1<<f;f=(m|0)>8;if(f){q=b+5840|0;r=b+7572|0;s=8;do{n=s+e|0;t=$(n+ -1>>g,h)|0;o=$(n>>g,h)|0;p=0;do{K=p+d|0;J=K>>g;J=(Qb(b,j+((J+o|0)*12|0)|0,j+((J+t|0)*12|0)|0,l)|0)&255;K=($(c[q>>2]|0,n)|0)+K>>2;a[(c[r>>2]|0)+K>>0]=J;p=p+4|0}while((p|0)<(m|0));s=s+8|0}while((s|0)<(m|0))}if((m|0)<=0){i=k;return}n=b+5840|0;o=b+7576|0;p=0;do{q=p+e|0;if(f){r=$(q>>g,h)|0;s=8;do{K=s+d|0;J=(Qb(b,j+(((K>>g)+r|0)*12|0)|0,j+(((K+ -1>>g)+r|0)*12|0)|0,l)|0)&255;K=($(c[n>>2]|0,q)|0)+K>>2;a[(c[o>>2]|0)+K>>0]=J;s=s+8|0}while((s|0)<(m|0))}p=p+4|0}while((p|0)<(m|0));i=k;return}function Qb(d,e,f,g){d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0;h=i;m=a[e+10>>0]|0;k=a[f+10>>0]|0;l=k<<24>>24==3;if(m<<24>>24!=3){if(l){m=1;i=h;return m|0}if(!(m&1)){l=e+4|0;j=e+6|0;e=(c[(c[d+3508>>2]|0)+20>>2]|0)+(a[e+9>>0]<<2)+260|0}else{l=e;j=e+2|0;e=(c[(c[d+3508>>2]|0)+20>>2]|0)+(a[e+8>>0]<<2)+64|0}if(!(k&1)){k=f+4|0;d=f+6|0;f=g+(a[f+9>>0]<<2)+260|0}else{k=f;d=f+2|0;f=g+(a[f+8>>0]<<2)+64|0}if((c[e>>2]|0)!=(c[f>>2]|0)){m=1;i=h;return m|0}m=(b[l>>1]|0)-(b[k>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[j>>1]|0)-(b[d>>1]|0)|0;m=(((m|0)>-1?m:0-m|0)|0)>3&1;i=h;return m|0}if(!l){m=1;i=h;return m|0}l=c[(c[d+3508>>2]|0)+20>>2]|0;k=c[l+(a[e+8>>0]<<2)+64>>2]|0;d=c[g+(a[f+8>>0]<<2)+64>>2]|0;m=(k|0)==(d|0);a:do if(m){do if((k|0)==(c[l+(a[e+9>>0]<<2)+260>>2]|0)){if((k|0)!=(c[g+(a[f+9>>0]<<2)+260>>2]|0))if(m)break;else{j=16;break a}g=b[f>>1]|0;j=b[e>>1]|0;m=g-j|0;if(!((((((m|0)>-1?m:0-m|0)|0)<=3?(m=(b[f+2>>1]|0)-(b[e+2>>1]|0)|0,(((m|0)>-1?m:0-m|0)|0)<=3):0)?(m=(b[f+4>>1]|0)-(b[e+4>>1]|0)|0,(((m|0)>-1?m:0-m|0)|0)<=3):0)?(m=(b[f+6>>1]|0)-(b[e+6>>1]|0)|0,(((m|0)>-1?m:0-m|0)|0)<=3):0)){m=(b[f+4>>1]|0)-j|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+6>>1]|0)-(b[e+2>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=g-(b[e+4>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+2>>1]|0)-(b[e+6>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}}m=0;i=h;return m|0}while(0);g=c[g+(a[f+9>>0]<<2)+260>>2]|0;if((g|0)==(c[l+(a[e+9>>0]<<2)+260>>2]|0)){m=(b[f>>1]|0)-(b[e>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+2>>1]|0)-(b[e+2>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+4>>1]|0)-(b[e+4>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}else{m=(b[f+6>>1]|0)-(b[e+6>>1]|0)|0;i=h;return(((m|0)>-1?m:0-m|0)|0)>3|0}}}else j=16;while(0);if((j|0)==16)g=c[g+(a[f+9>>0]<<2)+260>>2]|0;if((g|0)!=(k|0)){m=1;i=h;return m|0}if((d|0)!=(c[l+(a[e+9>>0]<<2)+260>>2]|0)){m=1;i=h;return m|0}m=(b[f+4>>1]|0)-(b[e>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+6>>1]|0)-(b[e+2>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f>>1]|0)-(b[e+4>>1]|0)|0;if((((m|0)>-1?m:0-m|0)|0)>3){m=1;i=h;return m|0}m=(b[f+2>>1]|0)-(b[e+6>>1]|0)|0;m=(((m|0)>-1?m:0-m|0)|0)>3&1;i=h;return m|0}function Rb(e,f,g,h){e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0;j=i;i=i+32|0;o=j+8|0;w=j;n=j+18|0;r=j+16|0;l=e+200|0;J=c[l>>2]|0;u=c[J+13120>>2]|0;k=(u-h|0)<=(f|0);b[n>>1]=0;b[r>>1]=0;v=c[J+13080>>2]|0;t=1<<v;v=($(g>>v,c[J+13128>>2]|0)|0)+(f>>v)|0;s=c[e+3496>>2]|0;m=c[s+(v<<3)+4>>2]|0;x=c[s+(v<<3)>>2]|0;if((c[J+68>>2]|0)!=0?(a[J+13056>>0]|0)!=0:0)p=1;else p=(a[(c[e+204>>2]|0)+40>>0]|0)!=0;q=(f|0)!=0;if(q){v=v+ -1|0;y=c[s+(v<<3)>>2]|0;v=c[s+(v<<3)+4>>2]|0}else{y=0;v=0}s=t+f|0;s=(s|0)>(u|0)?u:s;t=t+g|0;z=c[J+13124>>2]|0;t=(t|0)>(z|0)?z:t;z=(s|0)==(u|0)?s:s+ -8|0;u=(t|0)>(g|0);if(u){J=q?f:8;M=(J|0)<(s|0);P=q?f+ -8|0:0;E=e+5840|0;O=e+7572|0;H=e+7568|0;B=w+4|0;C=e+160|0;D=n+1|0;Q=r+1|0;K=e+7544|0;L=e+7528|0;I=e+7576|0;F=e+7548|0;G=e+7532|0;A=(P|0)>=(z|0);T=x;S=m;N=g;do{if(M){R=N+4|0;W=S+ -2&-2;U=J;do{Z=c[E>>2]|0;ba=($(Z,N)|0)+U>>2;_=c[I>>2]|0;ba=a[_+ba>>0]|0;ca=ba&255;Z=a[_+(($(Z,R)|0)+U>>2)>>0]|0;_=Z&255;ba=ba<<24>>24!=0;Z=Z<<24>>24==0;do if(!(Z&(ba^1))){V=U+ -1|0;X=c[l>>2]|0;aa=c[X+13064>>2]|0;Y=$(N>>aa,c[X+13140>>2]|0)|0;da=c[H>>2]|0;aa=(a[da+(Y+(V>>aa))>>0]|0)+1+(a[da+(Y+(U>>aa))>>0]|0)>>1;Y=aa+T|0;if((Y|0)<0)Y=0;else Y=(Y|0)>51?51:Y;Y=d[1280+Y>>0]|0;if(ba){ba=(ca<<1)+W+aa|0;if((ba|0)<0)ba=0;else ba=(ba|0)>53?53:ba;ba=d[1336+ba>>0]|0}else ba=0;c[w>>2]=ba;if(Z)Z=0;else{Z=(_<<1)+W+aa|0;if((Z|0)<0)Z=0;else Z=(Z|0)>53?53:Z;Z=d[1336+Z>>0]|0}c[B>>2]=Z;ca=c[C>>2]|0;Z=c[ca+32>>2]|0;da=$(Z,N)|0;X=(c[ca>>2]|0)+((U<<c[X+56>>2])+da)|0;if(p){a[n>>0]=Wb(e,V,N)|0;a[D>>0]=Wb(e,V,R)|0;a[r>>0]=Wb(e,U,N)|0;a[Q>>0]=Wb(e,U,R)|0;Na[c[F>>2]&3](X,Z,Y,w,n,r);break}else{Na[c[G>>2]&3](X,Z,Y,w,n,r);break}}while(0);U=U+8|0}while((U|0)<(s|0))}if(!((N|0)==0|A)){R=N+ -1|0;V=S;S=P;do{Y=$(c[E>>2]|0,N)|0;Z=c[O>>2]|0;aa=a[Z+(Y+S>>2)>>0]|0;ba=aa&255;U=S+4|0;Y=a[Z+(Y+U>>2)>>0]|0;Z=Y&255;aa=aa<<24>>24!=0;Y=Y<<24>>24==0;do if(!(Y&(aa^1))){W=c[l>>2]|0;V=c[W+13064>>2]|0;_=S>>V;T=c[W+13140>>2]|0;da=($(R>>V,T)|0)+_|0;X=c[H>>2]|0;_=(a[X+da>>0]|0)+1+(a[X+(($(N>>V,T)|0)+_)>>0]|0)>>1;T=(S|0)>=(f|0);V=T?m:v;T=T?x:y;X=_+T|0;if((X|0)<0)X=0;else X=(X|0)>51?51:X;X=d[1280+X>>0]|0;if(aa){aa=(ba<<1)+(V+ -2&-2)+_|0;if((aa|0)<0)aa=0;else aa=(aa|0)>53?53:aa;aa=d[1336+aa>>0]|0}else aa=0;c[w>>2]=aa;if(Y)Y=0;else{Y=(Z<<1)+(V+ -2&-2)+_|0;if((Y|0)<0)Y=0;else Y=(Y|0)>53?53:Y;Y=d[1336+Y>>0]|0}c[B>>2]=Y;ca=c[C>>2]|0;Y=c[ca+32>>2]|0;da=$(Y,N)|0;W=(c[ca>>2]|0)+((S<<c[W+56>>2])+da)|0;if(p){a[n>>0]=Wb(e,S,R)|0;a[D>>0]=Wb(e,U,R)|0;a[r>>0]=Wb(e,S,N)|0;a[Q>>0]=Wb(e,U,N)|0;Na[c[K>>2]&3](W,Y,X,w,n,r);break}else{Na[c[L>>2]&3](W,Y,X,w,n,r);break}}while(0);S=S+8|0}while((S|0)<(z|0));S=V}N=N+8|0}while((N|0)<(t|0));J=c[l>>2]|0}else S=m;if(c[J+4>>2]|0){C=q?v:m;F=e+5840|0;D=e+7572|0;v=e+7568|0;x=o+4|0;w=e+160|0;B=n+1|0;A=r+1|0;G=e+7552|0;E=e+7536|0;H=e+7576|0;z=e+7556|0;y=e+7540|0;I=1;do{O=1<<c[J+(I<<2)+13168>>2];P=1<<c[J+(I<<2)+13180>>2];if(u){N=O<<3;L=q?f:N;K=(L|0)<(s|0);J=P<<3;M=q?f-N|0:0;O=O<<2;P=P<<2;Q=g;do{if(K){R=Q+P|0;T=L;do{W=c[F>>2]|0;Y=($(W,Q)|0)+T>>2;da=c[H>>2]|0;Y=(a[da+Y>>0]|0)==2;W=(a[da+(($(W,R)|0)+T>>2)>>0]|0)==2;do if(Y|W){U=T+ -1|0;V=c[l>>2]|0;da=c[V+13064>>2]|0;_=U>>da;X=c[V+13140>>2]|0;Z=$(Q>>da,X)|0;aa=c[v>>2]|0;ba=T>>da;X=$(R>>da,X)|0;X=(a[aa+(X+_)>>0]|0)+1+(a[aa+(X+ba)>>0]|0)>>1;if(Y)Y=Xb(e,(a[aa+(Z+ba)>>0]|0)+1+(a[aa+(Z+_)>>0]|0)>>1,I,S)|0;else Y=0;c[o>>2]=Y;if(W)W=Xb(e,X,I,S)|0;else W=0;c[x>>2]=W;ca=c[w>>2]|0;W=c[ca+(I<<2)+32>>2]|0;da=$(W,Q>>c[V+(I<<2)+13180>>2])|0;V=(c[ca+(I<<2)>>2]|0)+((T>>c[V+(I<<2)+13168>>2]<<c[V+56>>2])+da)|0;if(p){a[n>>0]=Wb(e,U,Q)|0;a[B>>0]=Wb(e,U,R)|0;a[r>>0]=Wb(e,T,Q)|0;a[A>>0]=Wb(e,T,R)|0;Da[c[z>>2]&3](V,W,o,n,r);break}else{Da[c[y>>2]&3](V,W,o,n,r);break}}while(0);T=T+N|0}while((T|0)<(s|0))}if(Q){U=s-((s|0)==(c[(c[l>>2]|0)+13120>>2]|0)?0:N)|0;if((M|0)<(U|0)){T=Q+ -1|0;S=M;do{W=$(c[F>>2]|0,Q)|0;da=c[D>>2]|0;R=S+O|0;X=(a[da+(W+S>>2)>>0]|0)==2;W=(a[da+(W+R>>2)>>0]|0)==2;do if(X|W){if(X){da=c[l>>2]|0;ca=c[da+13064>>2]|0;Y=S>>ca;da=c[da+13140>>2]|0;aa=($(T>>ca,da)|0)+Y|0;ba=c[v>>2]|0;Y=(a[ba+aa>>0]|0)+1+(a[ba+(($(Q>>ca,da)|0)+Y)>>0]|0)>>1}else Y=0;if(W){da=c[l>>2]|0;ca=c[da+13064>>2]|0;V=R>>ca;da=c[da+13140>>2]|0;aa=($(T>>ca,da)|0)+V|0;ba=c[v>>2]|0;V=(a[ba+aa>>0]|0)+1+(a[ba+(($(Q>>ca,da)|0)+V)>>0]|0)>>1}else V=0;if(X)X=Xb(e,Y,I,C)|0;else X=0;c[o>>2]=X;if(W)V=Xb(e,V,I,m)|0;else V=0;c[x>>2]=V;da=c[l>>2]|0;ca=c[w>>2]|0;V=c[ca+(I<<2)+32>>2]|0;W=$(V,Q>>c[da+13184>>2])|0;W=(c[ca+(I<<2)>>2]|0)+((S>>c[da+13172>>2]<<c[da+56>>2])+W)|0;if(p){a[n>>0]=Wb(e,S,T)|0;a[B>>0]=Wb(e,R,T)|0;a[r>>0]=Wb(e,S,Q)|0;a[A>>0]=Wb(e,R,Q)|0;Da[c[G>>2]&3](W,V,o,n,r);break}else{Da[c[E>>2]&3](W,V,o,n,r);break}}while(0);S=S+N|0}while((S|0)<(U|0));S=C}else S=C}Q=Q+J|0}while((Q|0)<(t|0))}I=I+1|0;J=c[l>>2]|0}while((I|0)!=3)}if(!(a[J+12941>>0]|0)){if((a[e+140>>0]&1)==0|k^1){i=j;return}i=j;return}n=(c[J+13124>>2]|0)-h|0;l=(g|0)==0;m=(f|0)==0;if(!(l|m))Sb(e,f-h|0,g-h|0);n=(n|0)>(g|0);if(!(m|n))Sb(e,f-h|0,g);k=k^1;!(l|k)?(Sb(e,f,g-h|0),(a[e+140>>0]&1)!=0):0;if(n|k){i=j;return}Sb(e,f,g);if(!(a[e+140>>0]&1)){i=j;return}i=j;return}function Sb(e,f,g){e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0;h=i;i=i+48|0;l=h+24|0;r=h+42|0;s=h+40|0;p=h+16|0;k=h+8|0;t=h;o=e+200|0;S=c[o>>2]|0;y=c[S+13080>>2]|0;j=f>>y;y=g>>y;G=S+13128|0;n=($(y,c[G>>2]|0)|0)+j|0;M=c[e+204>>2]|0;L=M+1668|0;N=c[(c[L>>2]|0)+(n<<2)>>2]|0;A=e+3492|0;m=c[A>>2]|0;q=m+(n*148|0)|0;b[r>>1]=0;b[s>>1]=0;c[p>>2]=0;F=($(c[G>>2]|0,y)|0)+j|0;F=a[(c[e+7604>>2]|0)+F>>0]|0;if((a[M+42>>0]|0)!=0?(a[M+53>>0]|0)==0:0){R=1;O=1}else{R=F<<24>>24==0&1;O=0}D=(j|0)==0;c[l>>2]=D&1;I=(y|0)==0;u=l+4|0;c[u>>2]=I&1;H=(j|0)==((c[G>>2]|0)+ -1|0);z=l+8|0;c[z>>2]=H&1;E=(y|0)==((c[S+13132>>2]|0)+ -1|0);x=l+12|0;c[x>>2]=E&1;if(R<<24>>24){if(D)J=0;else{if(O){J=c[M+1676>>2]|0;J=(c[J+(N<<2)>>2]|0)!=(c[J+(c[(c[L>>2]|0)+(n+ -1<<2)>>2]<<2)>>2]|0)&1}else J=0;if(F<<24>>24==0?(pa=$(c[G>>2]|0,y)|0,oa=c[e+7580>>2]|0,(c[oa+(pa+j<<2)>>2]|0)!=(c[oa+(j+ -1+pa<<2)>>2]|0)):0)K=1;else K=J;a[r>>0]=K}if(H)K=0;else{if(O){K=c[M+1676>>2]|0;K=(c[K+(N<<2)>>2]|0)!=(c[K+(c[(c[L>>2]|0)+(n+1<<2)>>2]<<2)>>2]|0)&1}else K=0;if(F<<24>>24==0?(pa=$(c[G>>2]|0,y)|0,oa=c[e+7580>>2]|0,(c[oa+(pa+j<<2)>>2]|0)!=(c[oa+(j+1+pa<<2)>>2]|0)):0)P=1;else P=K;a[r+1>>0]=P}if(I)P=0;else{if(O){P=c[M+1676>>2]|0;P=(c[P+(N<<2)>>2]|0)!=(c[P+(c[(c[L>>2]|0)+(n-(c[G>>2]|0)<<2)>>2]<<2)>>2]|0)&1}else P=0;if(F<<24>>24==0?(pa=c[G>>2]|0,na=($(pa,y)|0)+j|0,oa=c[e+7580>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,y+ -1|0)|0)+j<<2)>>2]|0)):0)Q=1;else Q=P;a[s>>0]=Q}if(E)L=0;else{if(O){pa=c[M+1676>>2]|0;L=(c[pa+(N<<2)>>2]|0)!=(c[pa+(c[(c[L>>2]|0)+((c[G>>2]|0)+n<<2)>>2]<<2)>>2]|0)&1}else L=0;if(F<<24>>24==0?(pa=c[G>>2]|0,na=($(pa,y)|0)+j|0,oa=c[e+7580>>2]|0,(c[oa+(na<<2)>>2]|0)!=(c[oa+(($(pa,y+1|0)|0)+j<<2)>>2]|0)):0)M=1;else M=L;a[s+1>>0]=M}if(!D)if(I)B=47;else{if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+7580>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+ -1+($(pa,y+ -1|0)|0)<<2)>>2]|0):0)B=38;else M=1}else if(!(J<<24>>24))B=38;else M=1;if((B|0)==38)M=P<<24>>24!=0&1;a[p>>0]=M;B=40}else B=40;if((B|0)==40)if(!I){if(!H){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+7580>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+1+($(pa,y+ -1|0)|0)<<2)>>2]|0):0)B=45;else I=1}else if(!(K<<24>>24))B=45;else I=1;if((B|0)==45)I=P<<24>>24!=0&1;a[p+1>>0]=I;B=47}}else B=47;if((B|0)==47?!(H|E):0){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+7580>>2]|0;if(K<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+1+($(pa,y+1|0)|0)<<2)>>2]|0):0)B=51;else H=1}else if(!(K<<24>>24))B=51;else H=1;if((B|0)==51)H=L<<24>>24!=0&1;a[p+2>>0]=H}if(!(D|E)){if(!(F<<24>>24)){pa=c[G>>2]|0;na=($(pa,y)|0)+j|0;oa=c[e+7580>>2]|0;if(J<<24>>24==0?(c[oa+(na<<2)>>2]|0)==(c[oa+(j+ -1+($(pa,y+1|0)|0)<<2)>>2]|0):0)B=57;else D=1}else if(!(J<<24>>24))B=57;else D=1;if((B|0)==57)D=L<<24>>24!=0&1;a[p+3>>0]=D}}N=(c[S+4>>2]|0)!=0?3:1;E=e+160|0;D=e+168|0;F=e+5916|0;P=y<<1;H=P+ -1|0;G=k+4|0;O=y+ -1|0;J=j+1|0;L=j+ -1|0;P=P+2|0;Q=t+4|0;M=y+1|0;I=j<<1;K=I+ -1|0;I=I+2|0;R=e+((R&255)<<2)+5920|0;na=S;_=0;while(1){ka=c[na+(_<<2)+13168>>2]|0;V=f>>ka;ha=c[na+(_<<2)+13180>>2]|0;aa=g>>ha;ba=c[E>>2]|0;W=c[ba+(_<<2)+32>>2]|0;S=1<<c[na+13080>>2];Z=S>>ka;Y=S>>ha;ka=c[na+13120>>2]>>ka;ca=ka-V|0;Z=(Z|0)>(ca|0)?ca:Z;ha=c[na+13124>>2]>>ha;ca=ha-aa|0;Y=(Y|0)>(ca|0)?ca:Y;ca=$(W,aa)|0;fa=c[na+56>>2]|0;ca=(V<<fa)+ca|0;ba=c[ba+(_<<2)>>2]|0;X=ba+ca|0;S=S+2<<fa;ea=c[D>>2]|0;ga=1<<fa;da=S+ga|0;U=ea+da|0;T=m+(n*148|0)+_+142|0;ia=d[T>>0]|0;if((ia|0)==2){ja=c[l>>2]|0;ia=c[z>>2]|0;la=c[x>>2]|0;do if(!(c[u>>2]|0)){pa=1-ja|0;oa=pa<<fa;ma=ga-oa|0;c[k>>2]=ba+(ca-W-oa);c[G>>2]=(c[e+(_<<2)+172>>2]|0)+(($(ka,H)|0)+V-pa<<fa);do if((ja|0)!=1){oa=ea+ma|0;pa=L+($(c[na+13128>>2]|0,O)|0)|0;pa=c[k+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0;if(!fa){a[oa>>0]=a[pa>>0]|0;na=c[o>>2]|0;oa=ga;break}else{b[oa>>1]=b[pa>>1]|0;oa=ga;break}}else oa=0;while(0);pa=($(c[na+13128>>2]|0,O)|0)+j|0;na=Z<<fa;mf(ea+(oa+ma)|0,(c[k+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+oa|0,na|0)|0;if((ia|0)!=1){pa=oa+na|0;oa=J+($(c[(c[o>>2]|0)+13128>>2]|0,O)|0)|0;na=ea+(pa+ma)|0;ma=(c[k+(((a[(c[A>>2]|0)+(oa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+pa|0;if(!fa){a[na>>0]=a[ma>>0]|0;break}else{b[na>>1]=b[ma>>1]|0;break}}}while(0);do if(!la){pa=1-ja|0;oa=pa<<fa;la=($(Y,S)|0)+da-oa|0;c[t>>2]=ba+(($(Y,W)|0)+ca-oa);c[Q>>2]=(c[e+(_<<2)+172>>2]|0)+(($(ka,P)|0)+V-pa<<fa);do if((ja|0)!=1){ka=ea+la|0;ma=L+($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)|0;ma=c[t+(((a[(c[A>>2]|0)+(ma*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0;if(!fa){a[ka>>0]=a[ma>>0]|0;ma=ga;break}else{b[ka>>1]=b[ma>>1]|0;ma=ga;break}}else ma=0;while(0);pa=($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)+j|0;ka=Z<<fa;mf(ea+(ma+la)|0,(c[t+(((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+ma|0,ka|0)|0;if((ia|0)!=1){pa=ma+ka|0;oa=J+($(c[(c[o>>2]|0)+13128>>2]|0,M)|0)|0;ka=ea+(pa+la)|0;la=(c[t+(((a[(c[A>>2]|0)+(oa*148|0)+_+142>>0]|0)==3&1)<<2)>>2]|0)+pa|0;if(!fa){a[ka>>0]=a[la>>0]|0;break}else{b[ka>>1]=b[la>>1]|0;break}}}while(0);do if(!ja){pa=L+($(c[(c[o>>2]|0)+13128>>2]|0,y)|0)|0;if((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3){la=ea+S|0;ja=(c[e+(_<<2)+184>>2]|0)+(($(ha,K)|0)+aa<<fa)|0;ka=(Y|0)>0;if(!fa){if(ka)ka=0;else{ja=0;break}while(1){a[la>>0]=a[ja>>0]|0;ka=ka+1|0;if((ka|0)==(Y|0)){ja=0;break}else{la=la+S|0;ja=ja+ga|0}}}else{if(ka)ka=0;else{ja=0;break}while(1){b[la>>1]=b[ja>>1]|0;ka=ka+1|0;if((ka|0)==(Y|0)){ja=0;break}else{la=la+S|0;ja=ja+ga|0}}}}else ja=1}else ja=0;while(0);do if(!ia){pa=J+($(c[(c[o>>2]|0)+13128>>2]|0,y)|0)|0;if((a[(c[A>>2]|0)+(pa*148|0)+_+142>>0]|0)==3){ia=ea+((Z<<fa)+da)|0;ha=(c[e+(_<<2)+184>>2]|0)+(($(ha,I)|0)+aa<<fa)|0;ka=(Y|0)>0;if(!fa){if(ka)B=0;else break;while(1){a[ia>>0]=a[ha>>0]|0;B=B+1|0;if((B|0)==(Y|0)){C=0;B=96;break}else{ia=ia+S|0;ha=ha+ga|0}}}else{if(ka)B=0;else break;while(1){b[ia>>1]=b[ha>>1]|0;B=B+1|0;if((B|0)==(Y|0)){C=0;B=96;break}else{ia=ia+S|0;ha=ha+ga|0}}}}else{C=1;B=96}}else{C=0;B=96}while(0);if((B|0)==96?(B=0,v=ja<<fa,w=ja+Z+C<<fa,(Y|0)>0):0){da=ea+(da-v)|0;ea=0;ba=ba+(ca-v)|0;while(1){mf(da|0,ba|0,w|0)|0;ea=ea+1|0;if((ea|0)==(Y|0))break;else{da=da+S|0;ba=ba+W|0}}}Ub(e,X,W,V,aa,Z,Y,_,j,y);Ia[c[R>>2]&3](X,U,W,S,q,l,Z,Y,_,r,s,p);Vb(e,X,U,W,S,f,g,Z,Y,_);a[T>>0]=3}else if((ia|0)==1){ca=Z<<fa;if((Y|0)>0){ba=U;da=0;ea=X;while(1){mf(ba|0,ea|0,ca|0)|0;da=da+1|0;if((da|0)==(Y|0))break;else{ba=ba+S|0;ea=ea+W|0}}}Ub(e,X,W,V,aa,Z,Y,_,j,y);Ha[c[F>>2]&1](X,U,W,S,q,l,Z,Y,_);Vb(e,X,U,W,S,f,g,Z,Y,_);a[T>>0]=3}_=_+1|0;if((_|0)>=(N|0))break;na=c[o>>2]|0}i=h;return}function Tb(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;h=c[a+200>>2]|0;k=((c[h+13120>>2]|0)-e|0)>(b|0);h=((c[h+13124>>2]|0)-e|0)>(d|0);j=(d|0)==0;g=(b|0)==0;if(!(j|g))Rb(a,b-e|0,d-e|0,e);if(!(j|k))Rb(a,b,d-e|0,e);if(g|h){i=f;return}Rb(a,b-e|0,d,e);i=f;return}function Ub(d,e,f,g,h,j,k,l,m,n){d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;n=n|0;var o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;q=c[d+200>>2]|0;p=c[q+56>>2]|0;s=c[q+13120>>2]>>c[q+(l<<2)+13168>>2];q=c[q+13124>>2]>>c[q+(l<<2)+13180>>2];u=d+(l<<2)+172|0;t=n<<1;r=j<<p;mf((c[u>>2]|0)+(($(s,t)|0)+g<<p)|0,e|0,r|0)|0;mf((c[u>>2]|0)+(($(s,t|1)|0)+g<<p)|0,e+($(k+ -1|0,f)|0)|0,r|0)|0;d=d+(l<<2)+184|0;r=c[d>>2]|0;l=m<<1;t=r+(($(q,l)|0)+h<<p)|0;m=1<<p;n=(p|0)==0;g=(k|0)>0;if(n){if(g){r=t;t=0;s=e;while(1){a[r>>0]=a[s>>0]|0;t=t+1|0;if((t|0)==(k|0))break;else{r=r+m|0;s=s+f|0}}r=c[d>>2]|0}}else if(g){d=0;s=e;while(1){b[t>>1]=b[s>>1]|0;d=d+1|0;if((d|0)==(k|0))break;else{t=t+m|0;s=s+f|0}}}h=r+(($(q,l|1)|0)+h<<p)|0;j=e+(j+ -1<<p)|0;if(n){if(g)p=0;else{i=o;return}while(1){a[h>>0]=a[j>>0]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}else{if(g)p=0;else{i=o;return}while(1){b[h>>1]=b[j>>1]|0;p=p+1|0;if((p|0)==(k|0))break;else{h=h+m|0;j=j+f|0}}i=o;return}}function Vb(b,d,e,f,g,h,j,k,l,m){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0;n=i;t=c[b+200>>2]|0;if(!(a[(c[b+204>>2]|0)+40>>0]|0)){if(!(a[t+13056>>0]|0)){i=n;return}if(!(c[t+68>>2]|0)){i=n;return}}p=b+200|0;C=c[t+13084>>2]|0;v=1<<C;o=c[t+(m<<2)+13168>>2]|0;s=c[t+(m<<2)+13180>>2]|0;m=h>>C;z=j>>C;k=k+h>>C;l=l+j>>C;t=v>>o<<c[t+56>>2];if((z|0)>=(l|0)){i=n;return}u=(m|0)<(k|0);b=b+7600|0;v=v>>s;w=(v|0)>0;do{if(u){x=z-j|0;y=m;do{A=c[p>>2]|0;C=($(c[A+13156>>2]|0,z)|0)+y|0;if((a[(c[b>>2]|0)+C>>0]|0)!=0?(r=c[A+13084>>2]|0,q=x<<r>>s,r=y-h<<r>>o<<c[A+56>>2],w):0){C=e+(($(q,g)|0)+r)|0;A=0;B=d+(($(q,f)|0)+r)|0;while(1){mf(B|0,C|0,t|0)|0;A=A+1|0;if((A|0)==(v|0))break;else{C=C+g|0;B=B+f|0}}}y=y+1|0}while((y|0)!=(k|0))}z=z+1|0}while((z|0)!=(l|0));i=n;return}function Wb(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;f=i;g=c[a+200>>2]|0;h=c[g+13084>>2]|0;if((e|b|0)<0){e=2;i=f;return e|0}b=b>>h;e=e>>h;h=c[g+13156>>2]|0;if((b|0)>=(h|0)){e=2;i=f;return e|0}if((e|0)>=(c[g+13160>>2]|0)){e=2;i=f;return e|0}e=($(h,e)|0)+b|0;e=d[(c[a+7600>>2]|0)+e>>0]|0;i=f;return e|0}function Xb(b,e,f,g){b=b|0;e=e|0;f=f|0;g=g|0;var h=0,j=0;h=i;j=c[b+204>>2]|0;e=(c[((f|0)==1?j+28|0:j+32|0)>>2]|0)+e|0;if((e|0)<0)e=0;else e=(e|0)>57?57:e;do if((c[(c[b+200>>2]|0)+4>>2]|0)==1){if((e|0)>=30)if((e|0)>43){e=e+ -6|0;break}else{e=d[1392+(e+ -30)>>0]|0;break}}else if((e|0)<0)e=0;else e=(e|0)>51?51:e;while(0);g=g+2+e|0;if((g|0)<0){j=0;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}j=(g|0)>53?53:g;j=1336+j|0;j=a[j>>0]|0;j=j&255;i=h;return j|0}function Yb(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0;g=i;k=b+7628|0;c[k>>2]=0;a:do if((e|0)>1){m=0;while(1){if(!(a[d+m>>0]|0)){if((m|0)>0){l=m+ -1|0;l=(a[d+l>>0]|0)==0?l:m}else l=m;m=l+2|0;if(((m|0)<(e|0)?(a[d+(l+1)>>0]|0)==0:0)?(j=a[d+m>>0]|0,(j&255)<4):0)break}else l=m;m=l+2|0;if((l+3|0)>=(e|0))break a}m=l;e=j<<24>>24==3?e:l}else m=0;while(0);if((m|0)>=(e+ -1|0)){c[f+12>>2]=d;c[f+8>>2]=e;q=e;i=g;return q|0}me(f,f+4|0,e+32|0);j=c[f>>2]|0;if(!j){q=-12;i=g;return q|0}mf(j|0,d|0,m|0)|0;o=m+2|0;b:do if((o|0)<(e|0)){l=b+7636|0;b=b+7632|0;n=m;c:while(1){p=d+o|0;q=a[p>>0]|0;do if((q&255)<=3){p=a[d+m>>0]|0;if(!(p<<24>>24))if(!(a[d+(m+1)>>0]|0)){if(q<<24>>24!=3){e=m;break b}o=n+1|0;a[j+n>>0]=0;n=n+2|0;a[j+o>>0]=0;m=m+3|0;q=(c[k>>2]|0)+1|0;c[k>>2]=q;p=c[l>>2]|0;if((p|0)<(q|0)){p=p<<1;c[l>>2]=p;ke(b,p,4)|0;p=c[b>>2]|0;if(!p){f=-12;break c}}else{p=c[b>>2]|0;if(!p)break}c[p+((c[k>>2]|0)+ -1<<2)>>2]=o}else{p=0;h=26}else h=26}else{a[j+n>>0]=a[d+m>>0]|0;a[j+(n+1)>>0]=a[d+(m+1)>>0]|0;p=a[p>>0]|0;n=n+2|0;m=o;h=26}while(0);if((h|0)==26){h=0;a[j+n>>0]=p;n=n+1|0;m=m+1|0}o=m+2|0;if((o|0)>=(e|0)){h=15;break b}}i=g;return f|0}else{n=m;h=15}while(0);if((h|0)==15)if((m|0)<(e|0)){h=e+n|0;k=m;while(1){a[j+n>>0]=a[d+k>>0]|0;k=k+1|0;if((k|0)==(e|0))break;else n=n+1|0}n=h-m|0}else e=m;h=j+n+0|0;d=h+32|0;do{a[h>>0]=0;h=h+1|0}while((h|0)<(d|0));c[f+12>>2]=j;c[f+8>>2]=n;q=e;i=g;return q|0}function Zb(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;f=b+60|0;e=c[f>>2]|0;Wd();Cc();f=c[f>>2]|0;c[f+4>>2]=b;g=le(31328)|0;c[f+136>>2]=g;a:do if(((g|0)!=0?(c[f+72>>2]=g,c[f+8>>2]=f,j=ee(199)|0,c[f+152>>2]=j,(j|0)!=0):0)?(j=Be()|0,c[f+164>>2]=j,(j|0)!=0):0){g=f+3512|0;h=0;do{j=Be()|0;c[g+(h*72|0)>>2]=j;if(!j)break a;c[g+(h*72|0)+4>>2]=j;h=h+1|0}while(h>>>0<32);c[f+5836>>2]=2147483647;a[f+7721>>0]=1;c[f+5828>>2]=0;c[e+7620>>2]=0;c[e+7772>>2]=0;f=b+808|0;if(!(c[f>>2]&2))a[e+141>>0]=1;else a[e+141>>0]=c[b+800>>2];if((c[f>>2]&1|0)!=0?(c[b+800>>2]|0)>1:0){a[e+140>>0]=1;j=0;i=d;return j|0}a[e+140>>0]=2;j=0;i=d;return j|0}while(0);$b(b)|0;j=-12;i=d;return j|0}function _b(f,g,h,j){f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0,aa=0,ba=0,ca=0,da=0,ea=0,fa=0,ga=0,ha=0,ia=0,ja=0,ka=0,la=0,ma=0,na=0,oa=0,pa=0,qa=0,ra=0,sa=0,ta=0,ua=0,va=0,wa=0,xa=0,ya=0,za=0,Aa=0,Ba=0,Ca=0,Da=0,Ea=0,Fa=0,Ha=0,Ia=0,Ja=0,Ka=0,La=0,Ma=0,Na=0,Oa=0,Pa=0,Qa=0,Ra=0,Sa=0,Ta=0,Ua=0,Va=0,Wa=0,Xa=0,Ya=0,Za=0,_a=0,$a=0,ab=0,bb=0,cb=0,db=0,eb=0,fb=0,gb=0,hb=0,ib=0,jb=0,kb=0,lb=0,mb=0,nb=0,ob=0,pb=0,qb=0;k=i;i=i+48|0;o=k+8|0;n=k+32|0;r=k+16|0;p=k;m=c[f+60>>2]|0;f=j+28|0;L=c[f>>2]|0;if(!L){g=vc(m,g,1)|0;if((g|0)<0){pb=g;i=k;return pb|0}c[h>>2]=g;pb=0;i=k;return pb|0}q=m+3508|0;c[q>>2]=0;l=m+7776|0;b[l>>1]=1;K=c[j+24>>2]|0;c[q>>2]=0;G=m+5828|0;s=m+5832|0;c[s>>2]=c[G>>2];c[G>>2]=0;w=m+7660|0;c[w>>2]=0;a:do if((L|0)>3){J=m+7722|0;H=m+7664|0;A=m+7656|0;j=m+7640|0;E=m+7648|0;y=m+7644|0;F=m+7636|0;z=m+7632|0;x=m+7628|0;u=m+136|0;v=m+3500|0;I=m+7732|0;while(1){C=(a[J>>0]|0)==0;if(C){while(1){M=K+1|0;if(((a[K>>0]|0)==0?(a[M>>0]|0)==0:0)?(a[K+2>>0]|0)==1:0)break;if((L|0)<5){C=-1094995529;break a}K=M;L=L+ -1|0}K=K+3|0;N=0;L=L+ -3|0}else{M=c[I>>2]|0;if((M|0)>0){O=0;N=0;do{O=d[K+N>>0]|O<<8;N=N+1|0}while((N|0)!=(M|0));N=O}else N=0;L=L-M|0;if((N|0)>(L|0)){C=-1094995529;break a}K=K+M|0}C=C?L:N;M=c[H>>2]|0;O=c[w>>2]|0;if((M|0)<(O+1|0)){M=M+1|0;N=je(c[A>>2]|0,M,16)|0;if(!N){C=-12;break a}c[A>>2]=N;O=c[H>>2]|0;jf(N+(O<<4)|0,0,M-O<<4|0)|0;ke(j,M,4)|0;ke(E,M,4)|0;ke(y,M,4)|0;O=c[E>>2]|0;c[O+(c[H>>2]<<2)>>2]=1024;O=ne(c[O+(c[H>>2]<<2)>>2]|0,4)|0;c[(c[y>>2]|0)+(c[H>>2]<<2)>>2]=O;c[H>>2]=M;O=c[w>>2]|0}c[F>>2]=c[(c[E>>2]|0)+(O<<2)>>2];c[z>>2]=c[(c[y>>2]|0)+(O<<2)>>2];N=c[A>>2]|0;M=Yb(m,K,C,N+(O<<4)|0)|0;c[(c[j>>2]|0)+(c[w>>2]<<2)>>2]=c[x>>2];c[(c[E>>2]|0)+(c[w>>2]<<2)>>2]=c[F>>2];ob=c[z>>2]|0;pb=c[w>>2]|0;c[w>>2]=pb+1;c[(c[y>>2]|0)+(pb<<2)>>2]=ob;if((M|0)<0){C=M;break a}ob=c[u>>2]|0;mb=c[N+(O<<4)+12>>2]|0;nb=c[N+(O<<4)+8>>2]|0;nb=nb>>>0>268435455?-8:nb<<3;pb=nb>>>0>2147483639|(mb|0)==0;nb=pb?0:nb;mb=pb?0:mb;C=pb?-1094995529:0;c[ob+204>>2]=mb;c[ob+216>>2]=nb;c[ob+220>>2]=nb+8;c[ob+208>>2]=mb+(nb>>3);c[ob+212>>2]=0;if(pb)break a;cc(m)|0;if(((c[v>>2]|0)+ -36|0)>>>0<2)c[G>>2]=1;L=L-M|0;if((L|0)<=3)break;else K=K+M|0}if((c[w>>2]|0)>0){ma=m+4|0;Ba=m+2436|0;bb=m+3034|0;db=m+2416|0;Aa=m+204|0;sa=m+200|0;Ia=m+2437|0;Ja=m+2420|0;Na=m+2424|0;Oa=m+5824|0;Fa=m+156|0;Da=m+2428|0;N=m+2438|0;P=m+2608|0;ra=m+5816|0;U=m+3504|0;V=m+5820|0;ba=m+3044|0;ca=m+3045|0;W=m+3046|0;ja=m+3040|0;ha=m+3036|0;fa=m+3032|0;ia=m+3033|0;M=m+3047|0;eb=m+3048|0;la=m+3052|0;ka=m+3035|0;L=m+3080|0;E=m+3101|0;K=m+3102|0;ga=m+3051|0;La=m+3056|0;Y=m+3060|0;X=m+3064|0;Z=m+3068|0;da=m+3049|0;aa=m+3072|0;_=m+3076|0;ea=m+3050|0;R=m+2432|0;Q=m+2440|0;T=m+2772|0;O=m+2439|0;Ma=m+3096|0;Ha=m+3100|0;Ka=m+3488|0;Ca=m+5836|0;na=m+5848|0;oa=m+7668|0;cb=o+4|0;ta=m+7572|0;va=m+5840|0;ua=m+5844|0;wa=m+7576|0;xa=m+7596|0;ya=m+7600|0;za=m+7580|0;qa=m+160|0;pa=m+140|0;Ea=m+164|0;J=m+3084|0;G=m+3088|0;F=m+3092|0;H=m+141|0;I=m+7620|0;$a=m+3492|0;ab=m+3496|0;Za=m+7584|0;_a=m+7588|0;Ya=m+7592|0;Wa=m+7604|0;Xa=m+7568|0;Ua=m+1428|0;Va=m+1432|0;Sa=m+5852|0;Ta=m+7560|0;Pa=m+196|0;Qa=m+7616|0;Ra=m+168|0;S=0;b:while(1){c[x>>2]=c[(c[j>>2]|0)+(S<<2)>>2];c[z>>2]=c[(c[y>>2]|0)+(S<<2)>>2];ob=c[A>>2]|0;nb=c[ob+(S<<4)+12>>2]|0;ob=c[ob+(S<<4)+8>>2]|0;pb=c[u>>2]|0;ob=ob>>>0>268435455?-8:ob<<3;fb=ob>>>0>2147483639|(nb|0)==0;ob=fb?0:ob;nb=fb?0:nb;c[pb+204>>2]=nb;c[pb+216>>2]=ob;c[pb+220>>2]=ob+8;c[pb+208>>2]=nb+(ob>>3);c[pb+212>>2]=0;c:do if(fb){t=fb?-1094995529:0;B=272}else{fb=cc(m)|0;d:do if((fb|0)>=0){if(!fb)break c;switch(c[v>>2]|0){case 9:case 8:case 7:case 6:case 21:case 20:case 19:case 18:case 17:case 16:case 5:case 4:case 3:case 2:case 0:case 1:{fb=c[u>>2]|0;gb=fb+204|0;pb=(_d(gb)|0)&255;a[Ba>>0]=pb;hb=c[v>>2]|0;if(!((hb+ -16|0)>>>0>4|pb<<24>>24==0)?(b[Qa>>1]=(e[Qa>>1]|0)+1&255,c[Ca>>2]=2147483647,(hb+ -19|0)>>>0<2):0){rc(m);hb=c[v>>2]|0}a[bb>>0]=0;if((hb+ -16|0)>>>0<8)a[bb>>0]=_d(gb)|0;hb=ae(gb)|0;c[db>>2]=hb;if(hb>>>0>255)break a;hb=c[m+(hb<<2)+400>>2]|0;if(!hb)break a;if(!(a[Ba>>0]|0)){kb=c[hb+4>>2]|0;if((c[Aa>>2]|0)!=(kb|0))break a}else kb=c[hb+4>>2]|0;c[Aa>>2]=kb;jb=c[v>>2]|0;ib=(jb|0)==21;if(ib?(c[s>>2]|0)==1:0)a[bb>>0]=1;hb=c[sa>>2]|0;kb=c[(c[m+(c[kb>>2]<<2)+272>>2]|0)+4>>2]|0;if((hb|0)!=(kb|0)){c[sa>>2]=kb;e:do if(hb){if((jb+ -16|0)>>>0>7|ib)break;do if((c[kb+13120>>2]|0)==(c[hb+13120>>2]|0)){if((c[kb+13124>>2]|0)!=(c[hb+13124>>2]|0))break;if((c[kb+76+(((c[kb+72>>2]|0)+ -1|0)*12|0)>>2]|0)==(c[hb+(((c[hb+72>>2]|0)+ -1|0)*12|0)+76>>2]|0))break e}while(0);a[bb>>0]=0}while(0);rc(m);hb=c[sa>>2]|0;bc(m);ib=c[hb+13064>>2]|0;jb=hb+13120|0;qb=c[jb>>2]|0;kb=hb+13124|0;pb=c[kb>>2]|0;ib=$((pb>>ib)+1|0,(qb>>ib)+1|0)|0;mb=$(c[hb+13132>>2]|0,c[hb+13128>>2]|0)|0;ob=hb+13156|0;nb=hb+13160|0;lb=$(c[nb>>2]|0,c[ob>>2]|0)|0;c[va>>2]=(qb>>2)+1;c[ua>>2]=(pb>>2)+1;c[$a>>2]=oe(mb,148)|0;pb=oe(mb,8)|0;c[ab>>2]=pb;if((c[$a>>2]|0)==0|(pb|0)==0){B=72;break b}pb=hb+13144|0;qb=hb+13140|0;c[Za>>2]=ee($(c[qb>>2]|0,c[pb>>2]|0)|0)|0;qb=ne(c[pb>>2]|0,c[qb>>2]|0)|0;c[_a>>2]=qb;if((c[Za>>2]|0)==0|(qb|0)==0){B=72;break b}c[xa>>2]=ne(c[hb+13148>>2]|0,c[hb+13152>>2]|0)|0;c[Ya>>2]=le(lb)|0;nb=ee($((c[nb>>2]|0)+1|0,(c[ob>>2]|0)+1|0)|0)|0;c[ya>>2]=nb;if(!(c[Ya>>2]|0)){B=72;break b}if((c[xa>>2]|0)==0|(nb|0)==0){B=72;break b}c[Wa>>2]=ee(mb)|0;c[za>>2]=ne(ib,4)|0;qb=ne(ib,1)|0;c[Xa>>2]=qb;if(!qb){B=72;break b}if(!(c[Wa>>2]|0)){B=72;break b}if(!(c[za>>2]|0)){B=72;break b}c[ta>>2]=oe(c[va>>2]|0,c[ua>>2]|0)|0;qb=oe(c[va>>2]|0,c[ua>>2]|0)|0;c[wa>>2]=qb;if((c[ta>>2]|0)==0|(qb|0)==0){B=72;break b}c[Ua>>2]=ve(lb*12|0,3)|0;qb=ve(mb*392|0,3)|0;c[Va>>2]=qb;if((c[Ua>>2]|0)==0|(qb|0)==0){B=72;break b}qb=c[ma>>2]|0;c[qb+124>>2]=c[jb>>2];c[qb+128>>2]=c[kb>>2];c[qb+116>>2]=c[hb+12>>2];c[qb+120>>2]=c[hb+16>>2];c[qb+136>>2]=c[hb+60>>2];c[qb+172>>2]=c[hb+(((c[hb+72>>2]|0)+ -1|0)*12|0)+80>>2];qb=hb+160|0;c[o+0>>2]=c[qb+0>>2];c[o+4>>2]=c[qb+4>>2];if(!(c[hb+176>>2]|0)){ib=c[ma>>2]|0;c[ib+392>>2]=1}else{ib=c[ma>>2]|0;c[ib+392>>2]=(c[hb+184>>2]|0)!=0?2:1}if(!(c[hb+188>>2]|0)){c[ib+380>>2]=2;c[ib+384>>2]=2;c[ib+388>>2]=2}else{c[ib+380>>2]=d[hb+192>>0];c[ib+384>>2]=d[hb+193>>0];c[ib+388>>2]=d[hb+194>>0]}qb=hb+52|0;Dc(Sa,c[qb>>2]|0);de(Ta,c[qb>>2]|0);if(a[hb+12941>>0]|0){ib=c[sa>>2]|0;jb=(c[ib+4>>2]|0)!=0?3:1;qb=(1<<c[ib+13080>>2])+2|0;qb=$(qb,qb)|0;c[Ra>>2]=ee(qb<<c[ib+56>>2])|0;ib=0;do{qb=c[sa>>2]|0;pb=c[qb+13124>>2]>>c[qb+(ib<<2)+13180>>2];ob=$(c[qb+13120>>2]>>c[qb+(ib<<2)+13168>>2]<<1,c[qb+13132>>2]|0)|0;c[m+(ib<<2)+172>>2]=ee(ob<<c[qb+56>>2])|0;qb=c[sa>>2]|0;pb=$(pb<<1,c[qb+13128>>2]|0)|0;c[m+(ib<<2)+184>>2]=ee(pb<<c[qb+56>>2])|0;ib=ib+1|0}while((ib|0)<(jb|0))}c[sa>>2]=hb;c[Pa>>2]=c[(c[m+(c[hb>>2]<<2)+208>>2]|0)+4>>2];b[Qa>>1]=(e[Qa>>1]|0)+1&255;c[Ca>>2]=2147483647}qb=c[ma>>2]|0;c[qb+832>>2]=d[hb+302>>0];c[qb+836>>2]=d[hb+335>>0];a[Ia>>0]=0;do if(!(a[Ba>>0]|0)){if(a[(c[Aa>>2]|0)+41>>0]|0){a[Ia>>0]=_d(gb)|0;hb=c[sa>>2]|0}ib=($(c[hb+13128>>2]<<1,c[hb+13132>>2]|0)|0)+ -2|0;hb=ib>>>0>65535;ib=hb?ib>>>16:ib;hb=hb?16:0;if(ib&65280){hb=hb|8;ib=ib>>>8}hb=Xd(gb,(d[4872+ib>>0]|0)+hb|0)|0;c[Ja>>2]=hb;qb=c[sa>>2]|0;if(hb>>>0>=($(c[qb+13132>>2]|0,c[qb+13128>>2]|0)|0)>>>0)break a;if(a[Ia>>0]|0)if(!(a[Fa>>0]|0))break a;else break;else{c[Na>>2]=hb;c[Oa>>2]=(c[Oa>>2]|0)+1;B=83;break}}else{c[Na>>2]=0;c[Ja>>2]=0;c[Oa>>2]=0;a[Fa>>0]=0;B=83}while(0);f:do if((B|0)==83){B=0;a[Fa>>0]=0;if((c[(c[Aa>>2]|0)+1624>>2]|0)>0){hb=0;do{Zd(gb,1);hb=hb+1|0}while((hb|0)<(c[(c[Aa>>2]|0)+1624>>2]|0))}hb=ae(gb)|0;c[Da>>2]=hb;if(hb>>>0>=3)break a;if(!((hb|0)==2?1:((c[v>>2]|0)+ -16|0)>>>0>7))break a;a[N>>0]=1;if(a[(c[Aa>>2]|0)+39>>0]|0)a[N>>0]=_d(gb)|0;if(a[(c[sa>>2]|0)+8>>0]|0)a[O>>0]=Xd(gb,2)|0;do if(((c[v>>2]|0)+ -19|0)>>>0>=2){hb=Xd(gb,c[(c[sa>>2]|0)+64>>2]|0)|0;c[R>>2]=hb;hb=Ac(m,hb)|0;do if(!(a[Ba>>0]|0)){ib=c[ra>>2]|0;if((hb|0)==(ib|0))break;if(!(c[(c[ma>>2]|0)+688>>2]&8))hb=ib;else break a}while(0);c[ra>>2]=hb;qb=(_d(gb)|0)==0;ib=c[sa>>2]|0;if(qb){if((yd(m,Q,ib,1)|0)<0)break a;c[P>>2]=Q;ib=c[sa>>2]|0}else{hb=c[ib+2184>>2]|0;if(!hb)break a;hb=(hb<<1)+ -2|0;jb=hb>>>0>65535;hb=jb?hb>>>16:hb;jb=jb?16:0;if(hb&65280){jb=jb|8;hb=hb>>>8}hb=(d[4872+hb>>0]|0)+jb|0;if((hb|0)>0){hb=Xd(gb,hb)|0;ib=c[sa>>2]|0}else hb=0;c[P>>2]=ib+(hb*168|0)+2188}hb=ib+64|0;jb=c[hb>>2]|0;a[T>>0]=0;do if(a[ib+12942>>0]|0){kb=ib+13040|0;if(!(a[kb>>0]|0))lb=0;else lb=ae(gb)|0;mb=ae(gb)|0;qb=gf(mb|0,0,lb|0,0)|0;pb=D;if(pb>>>0>0|(pb|0)==0&qb>>>0>32)if(!(c[(c[ma>>2]|0)+688>>2]&8))break;else break a;qb=mb+lb|0;a[T>>0]=qb;if(!(qb&255))break;else{mb=0;nb=0}do{if(mb>>>0<lb>>>0){ob=a[kb>>0]|0;if((ob&255)>1){ob=((ob&255)<<1)+ -2|0;pb=ob>>>0>65535;ob=pb?ob>>>16:ob;pb=pb?16:0;if(ob&65280){pb=pb|8;ob=ob>>>8}ob=(Xd(gb,(d[4872+ob>>0]|0)+pb|0)|0)&255}else ob=0;c[m+(mb<<2)+2612>>2]=e[ib+(ob<<1)+12944>>1];a[m+mb+2740>>0]=a[ib+ob+13008>>0]|0}else{c[m+(mb<<2)+2612>>2]=Xd(gb,c[hb>>2]|0)|0;a[m+mb+2740>>0]=_d(gb)|0}if(((_d(gb)|0)&255)<<24>>24){qb=ae(gb)|0;nb=qb+((mb|0)==0|(mb|0)==(lb|0)?0:nb)|0;qb=m+(mb<<2)+2612|0;c[qb>>2]=(c[ra>>2]|0)-(nb<<jb)-(c[R>>2]|0)+(c[qb>>2]|0)}mb=mb+1|0}while((mb|0)<(d[T>>0]|0))}while(0);if(!(a[(c[sa>>2]|0)+13060>>0]|0)){a[ka>>0]=0;break}else{a[ka>>0]=_d(gb)|0;break}}else{c[P>>2]=0;c[ra>>2]=0}while(0);g:do if(!(c[U>>2]|0)){switch(c[v>>2]|0){case 9:case 8:case 7:case 6:case 4:case 2:case 0:break g;default:{}}c[V>>2]=c[ra>>2]}while(0);do if(a[(c[sa>>2]|0)+12941>>0]|0){a[ba>>0]=_d(gb)|0;if(!(c[(c[sa>>2]|0)+4>>2]|0)){a[ca>>0]=0;a[W>>0]=0;break}else{qb=(_d(gb)|0)&255;a[W>>0]=qb;a[ca>>0]=qb;break}}else{a[ba>>0]=0;a[ca>>0]=0;a[W>>0]=0}while(0);c[ja>>2]=0;c[ha>>2]=0;ib=c[Da>>2]|0;if(ib>>>0<2){hb=c[Aa>>2]|0;c[ha>>2]=c[hb+8>>2];if(!ib)c[ja>>2]=c[hb+12>>2];do if(!(_d(gb)|0))B=138;else{hb=(ae(gb)|0)+1|0;c[ha>>2]=hb;if(c[Da>>2]|0)break;c[ja>>2]=(ae(gb)|0)+1;B=138}while(0);if((B|0)==138){B=0;hb=c[ha>>2]|0}if(hb>>>0>16)break a;if((c[ja>>2]|0)>>>0>16)break a;a[fa>>0]=0;a[ia>>0]=0;jb=Bc(m)|0;if(!jb)break a;do if((jb|0)>1?(a[(c[Aa>>2]|0)+1617>>0]|0)!=0:0){qb=(_d(gb)|0)&255;a[fa>>0]=qb;do if(qb<<24>>24){if(!(c[ha>>2]|0))break;qb=(jb<<1)+ -2|0;hb=qb>>>0>65535;qb=hb?qb>>>16:qb;hb=hb?16:0;ib=(qb&65280|0)==0;hb=(d[4872+(ib?qb:qb>>>8)>>0]|0)+(ib?hb:hb|8)|0;ib=0;do{c[m+(ib<<2)+2776>>2]=Xd(gb,hb)|0;ib=ib+1|0}while(ib>>>0<(c[ha>>2]|0)>>>0)}while(0);if(c[Da>>2]|0)break;qb=_d(gb)|0;a[ia>>0]=qb;if((qb&255|0)!=1){B=152;break}if(!(c[ja>>2]|0)){B=152;break}qb=(jb<<1)+ -2|0;hb=qb>>>0>65535;qb=hb?qb>>>16:qb;hb=hb?16:0;B=(qb&65280|0)==0;hb=(d[4872+(B?qb:qb>>>8)>>0]|0)+(B?hb:hb|8)|0;B=0;do{c[m+(B<<2)+2904>>2]=Xd(gb,hb)|0;B=B+1|0}while(B>>>0<(c[ja>>2]|0)>>>0);B=152}else B=152;while(0);do if((B|0)==152){if(c[Da>>2]|0)break;a[M>>0]=_d(gb)|0}while(0);if(!(a[(c[Aa>>2]|0)+5>>0]|0))B=0;else B=(_d(gb)|0)&255;a[eb>>0]=B;c[la>>2]=0;do if(a[ka>>0]|0){a[ga>>0]=0;if(!(c[Da>>2]|0)){B=(_d(gb)|0)==0&1;a[ga>>0]=B}else B=0;if((c[m+((B&255)<<2)+3036>>2]|0)>>>0<=1)break;qb=ae(gb)|0;c[la>>2]=qb;if(qb>>>0>=(c[m+(d[ga>>0]<<2)+3036>>2]|0)>>>0)break a}while(0);hb=c[Aa>>2]|0;if((a[hb+37>>0]|0)!=0?(c[Da>>2]|0)==1:0)B=165;else B=163;do if((B|0)==163){B=0;if(!(a[hb+38>>0]|0))break;if(!(c[Da>>2]|0))B=165}while(0);do if((B|0)==165){B=0;a[E>>0]=ae(gb)|0;if(c[(c[sa>>2]|0)+4>>2]|0){hb=be(gb)|0;hb=(d[E>>0]|0)+hb|0;if((hb|0)<0)hb=0;else hb=(hb|0)>7?7:hb&65535;b[K>>1]=hb}if(!(c[ha>>2]|0))hb=0;else{ib=0;do{qb=(_d(gb)|0)&255;a[n+ib>>0]=qb;if(!(qb<<24>>24)){b[m+(ib<<1)+3104>>1]=1<<d[E>>0];b[m+(ib<<1)+3296>>1]=0}ib=ib+1|0;hb=c[ha>>2]|0}while(ib>>>0<hb>>>0)}ib=(hb|0)==0;if(!(c[(c[sa>>2]|0)+4>>2]|0)){if(ib)break;jf(r|0,0,(hb>>>0>1?hb:1)|0)|0}else{if(ib)break;else hb=0;do{a[r+hb>>0]=_d(gb)|0;hb=hb+1|0;ib=c[ha>>2]|0}while(hb>>>0<ib>>>0);hb=ib}if(!hb)break;else ib=0;do{if(a[n+ib>>0]|0){qb=be(gb)|0;b[m+(ib<<1)+3104>>1]=(1<<d[E>>0])+qb;b[m+(ib<<1)+3296>>1]=be(gb)|0}if(!(a[r+ib>>0]|0)){qb=1<<b[K>>1]&65535;b[m+(ib<<2)+3136>>1]=qb;b[m+(ib<<2)+3328>>1]=0;b[m+(ib<<2)+3138>>1]=qb;b[m+(ib<<2)+3330>>1]=0}else{hb=0;do{jb=be(gb)|0;qb=be(gb)|0;jb=(1<<b[K>>1])+jb|0;b[m+(ib<<2)+(hb<<1)+3136>>1]=jb;jb=qb-(jb<<16>>9>>b[K>>1])+128|0;if((jb|0)<-128)jb=-128;else jb=(jb|0)>127?127:jb&65535;b[m+(ib<<2)+(hb<<1)+3328>>1]=jb;hb=hb+1|0}while((hb|0)!=2)}ib=ib+1|0}while(ib>>>0<(c[ha>>2]|0)>>>0)}while(0);pb=ae(gb)|0;qb=5-pb|0;c[L>>2]=qb;if((pb|0)==5|qb>>>0>5)break a}c[La>>2]=be(gb)|0;hb=c[Aa>>2]|0;if(!(a[hb+36>>0]|0)){c[Y>>2]=0;c[X>>2]=0}else{c[Y>>2]=be(gb)|0;c[X>>2]=be(gb)|0;hb=c[Aa>>2]|0}if(!(a[hb+1631>>0]|0))a[Z>>0]=0;else{a[Z>>0]=_d(gb)|0;hb=c[Aa>>2]|0}h:do if(!(a[hb+55>>0]|0)){a[da>>0]=0;c[aa>>2]=0;c[_>>2]=0}else{do if(a[hb+56>>0]|0){if(!(_d(gb)|0)){hb=c[Aa>>2]|0;break}qb=(_d(gb)|0)&255;a[da>>0]=qb;if(qb<<24>>24)break h;c[aa>>2]=(be(gb)|0)<<1;c[_>>2]=(be(gb)|0)<<1;break h}while(0);a[da>>0]=a[hb+57>>0]|0;c[aa>>2]=c[hb+60>>2];c[_>>2]=c[hb+64>>2]}while(0);hb=a[(c[Aa>>2]|0)+54>>0]|0;i:do if(hb<<24>>24){do if(!(a[ba>>0]|0)){if(a[ca>>0]|0)break;if(a[da>>0]|0)break i}while(0);a[ea>>0]=_d(gb)|0;break f}while(0);a[ea>>0]=hb}while(0);c[Ma>>2]=0;qb=c[Aa>>2]|0;if(!((a[qb+42>>0]|0)==0?(a[qb+43>>0]|0)==0:0))B=211;j:do if((B|0)==211){B=0;qb=ae(gb)|0;c[Ma>>2]=qb;if((qb|0)<=0){c[I>>2]=0;break}hb=(ae(gb)|0)+1|0;ib=hb>>4;hb=hb&15;ie(J);ie(G);ie(F);c[J>>2]=ne(c[Ma>>2]|0,4)|0;c[G>>2]=ne(c[Ma>>2]|0,4)|0;jb=ne(c[Ma>>2]|0,4)|0;c[F>>2]=jb;if(!(c[J>>2]|0)){B=216;break b}if((c[G>>2]|0)==0|(jb|0)==0){B=216;break b}if((c[Ma>>2]|0)>0){lb=(ib|0)>0;kb=(hb|0)==0;jb=0;do{if(lb){mb=0;nb=0;do{nb=(Xd(gb,16)|0)+(nb<<16)|0;mb=mb+1|0}while((mb|0)!=(ib|0))}else nb=0;if(!kb)nb=(Xd(gb,hb)|0)+(nb<<hb)|0;c[(c[J>>2]|0)+(jb<<2)>>2]=nb+1;jb=jb+1|0}while((jb|0)<(c[Ma>>2]|0))}do if((d[H>>0]|0)>1){qb=c[Aa>>2]|0;if((c[qb+48>>2]|0)<=1?(c[qb+44>>2]|0)<=1:0)break;c[I>>2]=0;a[H>>0]=1;break j}while(0);c[I>>2]=0}while(0);hb=c[Aa>>2]|0;if(a[hb+1628>>0]|0){hb=ae(gb)|0;pb=kf(hb|0,0,3)|0;nb=D;qb=(c[fb+216>>2]|0)-(c[fb+212>>2]|0)|0;ob=((qb|0)<0)<<31>>31;if((nb|0)>(ob|0)|(nb|0)==(ob|0)&pb>>>0>qb>>>0)break a;if(hb){ib=0;do{Zd(gb,8);ib=ib+1|0}while((ib|0)!=(hb|0))}hb=c[Aa>>2]|0}gb=(c[hb+16>>2]|0)+26+(c[La>>2]|0)|0;a[Ha>>0]=gb;gb=gb<<24;if((gb|0)>855638016)break a;if((gb>>24|0)<(0-(c[(c[sa>>2]|0)+13192>>2]|0)|0))break a;qb=c[Ja>>2]|0;c[Ka>>2]=qb;if((qb|0)==0?(a[Ia>>0]|0)!=0:0)break a;if(((c[fb+216>>2]|0)-(c[fb+212>>2]|0)|0)<0)break a;a[(c[u>>2]|0)+203>>0]=(a[Ia>>0]|0)==0&1;if(!(a[(c[Aa>>2]|0)+22>>0]|0))a[(c[u>>2]|0)+272>>0]=a[Ha>>0]|0;a[Fa>>0]=1;a[(c[u>>2]|0)+302>>0]=0;a[(c[u>>2]|0)+303>>0]=0;gb=c[Ca>>2]|0;fb=c[v>>2]|0;k:do if((gb|0)==2147483647)switch(fb|0){case 18:case 16:case 17:case 21:{gb=c[ra>>2]|0;c[Ca>>2]=gb;break k};case 20:case 19:{c[Ca>>2]=-2147483648;gb=-2147483648;break k};default:{gb=2147483647;break k}}while(0);do if((fb+ -8|0)>>>0<2){if((c[ra>>2]|0)<=(gb|0)){c[na>>2]=0;break c}if((fb|0)!=9)break;c[Ca>>2]=-2147483648}while(0);l:do if(!(a[Ba>>0]|0)){if(!(c[q>>2]|0)){fb=0;break d}}else{fb=c[u>>2]|0;pb=c[sa>>2]|0;gb=c[pb+13064>>2]|0;qb=c[pb+13120>>2]>>gb;gb=(c[pb+13124>>2]>>gb)+1|0;jf(c[ta>>2]|0,0,$(c[ua>>2]|0,c[va>>2]|0)|0)|0;jf(c[wa>>2]|0,0,$(c[ua>>2]|0,c[va>>2]|0)|0)|0;pb=c[sa>>2]|0;jf(c[xa>>2]|0,0,$(c[pb+13152>>2]|0,c[pb+13148>>2]|0)|0)|0;pb=c[sa>>2]|0;jf(c[ya>>2]|0,0,$((c[pb+13160>>2]|0)+1|0,(c[pb+13156>>2]|0)+1|0)|0)|0;jf(c[za>>2]|0,-1,$((qb<<2)+4|0,gb)|0)|0;c[na>>2]=0;c[oa>>2]=c[v>>2];gb=c[Aa>>2]|0;if(a[gb+42>>0]|0)c[fb+312>>2]=c[c[gb+1648>>2]>>2]<<c[(c[sa>>2]|0)+13080>>2];fb=tc(m,qa,c[ra>>2]|0)|0;do if((fb|0)<0)t=fb;else{fb=yc(m)|0;if((fb|0)<0){t=fb;break}qb=((c[v>>2]|0)+ -16|0)>>>0<8;c[(c[c[q>>2]>>2]|0)+80>>2]=qb&1;c[(c[qa>>2]|0)+84>>2]=3-(c[Da>>2]|0);if(!qb)wc(m);De(c[Ea>>2]|0);fb=vc(m,c[Ea>>2]|0,0)|0;if((fb|0)<0){t=fb;break}fb=c[v>>2]|0;break l}while(0);do if((c[q>>2]|0)!=0?(a[pa>>0]|0)!=1:0)break;while(0);c[q>>2]=0;B=272;break c}while(0);if((fb|0)!=(c[oa>>2]|0))break a;do if(!(a[Ia>>0]|0)){if((c[Da>>2]|0)==2)break;fb=xc(m)|0;if((fb|0)<0)break d}while(0);c[o>>2]=0;c[cb>>2]=1;fb=c[ma>>2]|0;Ga[c[fb+816>>2]&1](fb,1,o,p,1,4)|0;fb=c[p>>2]|0;qb=c[sa>>2]|0;if((fb|0)>=($(c[qb+13132>>2]|0,c[qb+13128>>2]|0)|0))c[na>>2]=1;if((fb|0)<0)break d;else break c};case 48:{fb=zd(m)|0;if((fb|0)<0)break d;else break c};case 34:{fb=Ad(m)|0;if((fb|0)<0)break d;else break c};case 40:case 39:{fb=Cd(m)|0;if((fb|0)<0)break d;else break c};case 37:case 36:{b[Qa>>1]=(e[Qa>>1]|0)+1&255;c[Ca>>2]=2147483647;break c};default:break c}}while(0);t=(c[(c[ma>>2]|0)+688>>2]&8|0)==0?0:fb;B=272}while(0);if((B|0)==272?(B=0,(t|0)<0):0)break a;S=S+1|0;if((S|0)>=(c[w>>2]|0))break a}if((B|0)==72){bc(m);bc(m);c[sa>>2]=0;break}else if((B|0)==216){c[Ma>>2]=0;break}}}else C=0;while(0);if((C|0)<0){qb=C;i=k;return qb|0}n=m+5848|0;if(c[n>>2]|0)c[n>>2]=0;m=c[m+164>>2]|0;if(c[m+304>>2]|0){qb=m+128|0;c[qb>>2]=e[l>>1];c[qb+4>>2]=0;Ee(g,m);c[h>>2]=1}qb=c[f>>2]|0;i=k;return qb|0}function $b(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=c[b+60>>2]|0;bc(e);b=e+7664|0;f=e+7644|0;if((c[b>>2]|0)>0){g=0;do{ie((c[f>>2]|0)+(g<<2)|0);g=g+1|0}while((g|0)<(c[b>>2]|0))}ie(e+7648|0);ie(e+7640|0);ie(f);ie(e+152|0);ie(e+168|0);ie(e+172|0);ie(e+184|0);ie(e+176|0);ie(e+188|0);ie(e+180|0);ie(e+192|0);Ce(e+164|0);g=e+3512|0;f=0;do{l=g+(f*72|0)|0;pc(e,l,-1);Ce(l);f=f+1|0}while((f|0)!=32);g=e+208|0;f=0;do{ue(g+(f<<2)|0);f=f+1|0}while((f|0)!=16);g=e+272|0;f=0;do{ue(g+(f<<2)|0);f=f+1|0}while((f|0)!=32);g=e+400|0;f=0;do{ue(g+(f<<2)|0);f=f+1|0}while((f|0)!=256);c[e+200>>2]=0;c[e+204>>2]=0;c[e+196>>2]=0;ue(e+1424|0);ie(e+3084|0);ie(e+3088|0);ie(e+3092|0);h=e+141|0;l=a[h>>0]|0;f=e+72|0;if((l&255)>1){g=e+8|0;j=1;do{k=f+(j<<2)|0;if(c[k>>2]|0){ie(k);ie(g+(j<<2)|0);l=a[h>>0]|0}j=j+1|0}while((j|0)<(l&255|0))}g=e+136|0;if((c[g>>2]|0)==(c[f>>2]|0))c[g>>2]=0;ie(f);f=e+7656|0;if((c[b>>2]|0)<=0){ie(f);c[b>>2]=0;i=d;return 0}e=0;do{ie((c[f>>2]|0)+(e<<4)|0);e=e+1|0}while((e|0)<(c[b>>2]|0));ie(f);c[b>>2]=0;i=d;return 0}function ac(a){a=a|0;var b=0;b=i;a=c[a+60>>2]|0;sc(a);c[a+5836>>2]=2147483647;i=b;return}function bc(a){a=a|0;var b=0;b=i;ie(a+3492|0);ie(a+3496|0);ie(a+7584|0);ie(a+7588|0);ie(a+7592|0);ie(a+7596|0);ie(a+7600|0);ie(a+7568|0);ie(a+7580|0);ie(a+7604|0);ie(a+7572|0);ie(a+7576|0);ie(a+3084|0);ie(a+3092|0);ie(a+3088|0);we(a+1428|0);we(a+1432|0);i=b;return}function cc(a){a=a|0;var b=0,d=0,e=0;b=i;d=(c[a+136>>2]|0)+204|0;if(_d(d)|0){e=-1094995529;i=b;return e|0}c[a+3500>>2]=Xd(d,6)|0;e=Xd(d,6)|0;d=(Xd(d,3)|0)+ -1|0;c[a+3504>>2]=d;if((d|0)<0){e=-1094995529;i=b;return e|0}e=(e|0)==0&1;i=b;return e|0}function dc(e,f){e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0;f=i;h=c[e+60>>2]|0;k=h+200|0;E=c[k>>2]|0;e=1<<c[E+13080>>2];l=h+204|0;n=c[l>>2]|0;A=c[(c[n+1668>>2]|0)+(c[h+3488>>2]<<2)>>2]|0;m=(a[h+2437>>0]|0)==0;if(!A)if(m)g=4;else{W=-1094995529;i=f;return W|0}else if(!m){m=h+7580|0;r=h+2424|0;if((c[(c[m>>2]|0)+(c[(c[n+1672>>2]|0)+(A+ -1<<2)>>2]<<2)>>2]|0)!=(c[r>>2]|0)){W=-1094995529;i=f;return W|0}}else g=4;if((g|0)==4){m=h+7580|0;r=h+2424|0}q=e+ -1|0;s=h+136|0;p=h+3492|0;y=h+3044|0;o=h+3045|0;w=h+3072|0;x=h+3496|0;v=h+3076|0;u=h+3050|0;t=h+7604|0;z=0;n=0;do{if((A|0)>=(c[E+13136>>2]|0))break;G=c[l>>2]|0;B=c[(c[G+1672>>2]|0)+(A<<2)>>2]|0;J=E+13120|0;I=E+13080|0;H=c[I>>2]|0;n=q+(c[J>>2]|0)>>H;z=((B|0)%(n|0)|0)<<H;n=((B|0)/(n|0)|0)<<H;C=c[s>>2]|0;H=1<<H;F=c[r>>2]|0;D=B-F|0;c[(c[m>>2]|0)+(B<<2)>>2]=F;do if(!(a[G+43>>0]|0)){if(!(a[G+42>>0]|0)){c[C+312>>2]=c[J>>2];G=E;break}if((A|0)!=0?(W=c[G+1676>>2]|0,(c[W+(A<<2)>>2]|0)!=(c[W+(A+ -1<<2)>>2]|0)):0){W=c[I>>2]|0;c[C+312>>2]=(c[(c[G+1648>>2]|0)+(c[(c[G+1664>>2]|0)+(z>>W<<2)>>2]<<2)>>2]<<W)+z;a[C+203>>0]=1;G=c[k>>2]|0}else G=E}else{if((z|0)==0?(H+ -1&n|0)==0:0){a[C+203>>0]=1;E=c[k>>2]|0}c[C+312>>2]=c[E+13120>>2];G=E}while(0);E=H+n|0;H=c[G+13124>>2]|0;c[C+316>>2]=(E|0)>(H|0)?H:E;E=C+31312|0;c[E>>2]=0;H=c[l>>2]|0;if(!(a[H+42>>0]|0)){if((B|0)==(F|0)){c[E>>2]=1;F=1}else F=0;if((D|0)<(c[G+13128>>2]|0)){F=F|4;c[E>>2]=F}}else{if((z|0)>0){W=c[H+1676>>2]|0;I=B+ -1|0;if((c[W+(A<<2)>>2]|0)==(c[W+(c[(c[H+1668>>2]|0)+(I<<2)>>2]<<2)>>2]|0))F=0;else{c[E>>2]=2;F=2}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(I<<2)>>2]|0)){F=F|1;c[E>>2]=F}}else F=0;if((n|0)>0){W=c[H+1676>>2]|0;I=G+13128|0;G=c[I>>2]|0;if((c[W+(A<<2)>>2]|0)!=(c[W+(c[(c[H+1668>>2]|0)+(B-G<<2)>>2]<<2)>>2]|0)){F=F|8;c[E>>2]=F;G=c[I>>2]|0}W=c[m>>2]|0;if((c[W+(B<<2)>>2]|0)!=(c[W+(B-G<<2)>>2]|0)){F=F|4;c[E>>2]=F}}}E=(z|0)>0;if(E&(D|0)>0)G=(F>>>1&1^1)&255;else G=0;a[C+308>>0]=G;if((n|0)>0){if((D|0)<(c[(c[k>>2]|0)+13128>>2]|0))F=0;else F=(F>>>3&1^1)&255;a[C+309>>0]=F;F=c[(c[k>>2]|0)+13128>>2]|0;if((D+1|0)<(F|0))F=0;else{W=c[l>>2]|0;V=c[W+1676>>2]|0;F=(c[V+(A<<2)>>2]|0)==(c[V+(c[(c[W+1668>>2]|0)+(B+1-F<<2)>>2]<<2)>>2]|0)&1}a[C+310>>0]=F;if(E?(j=c[(c[k>>2]|0)+13128>>2]|0,(D|0)>(j|0)):0){D=c[l>>2]|0;W=c[D+1676>>2]|0;D=(c[W+(A<<2)>>2]|0)==(c[W+(c[(c[D+1668>>2]|0)+(B+ -1-j<<2)>>2]<<2)>>2]|0)&1}else D=0}else{a[C+309>>0]=0;a[C+310>>0]=0;D=0}a[C+311>>0]=D;_a(h,A);D=c[k>>2]|0;E=c[D+13080>>2]|0;F=z>>E;E=n>>E;G=c[s>>2]|0;D=($(c[D+13128>>2]|0,E)|0)+F|0;C=c[p>>2]|0;if((a[y>>0]|0)==0?(a[o>>0]|0)==0:0){M=0;H=0}else{if((F|0)>0?(a[G+308>>0]|0)!=0:0)M=db(h)|0;else M=0;if((E|0)>0&(M|0)==0)if(!(a[G+309>>0]|0)){M=0;H=0}else{M=0;H=(db(h)|0)!=0}else H=0}I=(c[(c[k>>2]|0)+4>>2]|0)!=0?3:1;L=C+(D*148|0)+143|0;G=C+(D*148|0)+144|0;K=C+(D*148|0)+104|0;J=C+(D*148|0)+108|0;R=(M|0)==0;S=R&(H^1);M=E+ -1|0;O=F+ -1|0;P=0;do{Q=c[l>>2]|0;Q=d[((P|0)==0?Q+1644|0:Q+1645|0)>>0]|0;a:do if(a[h+P+3044>>0]|0){T=(P|0)==2;do if(!T){if(S){U=(fb(h)|0)&255;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(!R){U=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}if(H){U=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;U=a[(c[p>>2]|0)+(U*148|0)+P+142>>0]|0;N=C+(D*148|0)+P+142|0;a[N>>0]=U;break}else{a[C+(D*148|0)+P+142>>0]=0;break a}}else{U=a[L>>0]|0;a[G>>0]=U;c[J>>2]=c[K>>2];N=G}while(0);if(U<<24>>24){U=0;do{do if(!S){if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(U<<2)>>2];break}else{c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(U<<2)>>2]=ib(h)|0;while(0);U=U+1|0}while((U|0)!=4);do if((a[N>>0]|0)==1){T=0;do{do if(c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0){if(S){c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=jb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<4)+(T<<2)+48>>2];break}else{c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;break}}else c[C+(D*148|0)+(P<<4)+(T<<2)+48>>2]=0;while(0);T=T+1|0}while((T|0)!=4);if(S){a[C+(D*148|0)+P+96>>0]=hb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;a[C+(D*148|0)+P+96>>0]=a[(c[p>>2]|0)+(W*148|0)+P+96>>0]|0;break}else{a[C+(D*148|0)+P+96>>0]=0;break}}else if(!T){if(S){c[C+(D*148|0)+(P<<2)+100>>2]=kb(h)|0;break}if(!R){W=($(c[(c[k>>2]|0)+13128>>2]|0,E)|0)+O|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}if(H){W=($(c[(c[k>>2]|0)+13128>>2]|0,M)|0)+F|0;c[C+(D*148|0)+(P<<2)+100>>2]=c[(c[p>>2]|0)+(W*148|0)+(P<<2)+100>>2];break}else{c[C+(D*148|0)+(P<<2)+100>>2]=0;break}}while(0);b[C+(D*148|0)+(P*10|0)+112>>1]=0;T=0;do{W=c[C+(D*148|0)+(P<<4)+(T<<2)>>2]|0;V=T;T=T+1|0;U=C+(D*148|0)+(P*10|0)+(T<<1)+112|0;b[U>>1]=W;if((a[N>>0]|0)==2){if((V|0)>1){W=0-W|0;b[U>>1]=W}}else if(c[C+(D*148|0)+(P<<4)+(V<<2)+48>>2]|0){W=0-W|0;b[U>>1]=W}b[U>>1]=W<<16>>16<<Q}while((T|0)!=4)}}else a[C+(D*148|0)+P+142>>0]=0;while(0);P=P+1|0}while((P|0)<(I|0));C=c[x>>2]|0;c[C+(B<<3)>>2]=c[w>>2];c[C+(B<<3)+4>>2]=c[v>>2];a[(c[t>>2]|0)+B>>0]=a[u>>0]|0;C=ec(h,z,n,c[(c[k>>2]|0)+13080>>2]|0,0)|0;if((C|0)<0){g=108;break}A=A+1|0;Za(h,A);Tb(h,z,n,e);E=c[k>>2]|0}while((C|0)!=0);if((g|0)==108){c[(c[m>>2]|0)+(B<<2)>>2]=-1;W=C;i=f;return W|0}if((z+e|0)<(c[E+13120>>2]|0)){W=A;i=f;return W|0}if((n+e|0)<(c[E+13124>>2]|0)){W=A;i=f;return W|0}Rb(h,z,n,e);W=A;i=f;return W|0}function ec(b,e,f,g,h){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0,S=0,T=0,U=0,V=0,W=0,X=0,Y=0,Z=0,_=0;j=i;i=i+32|0;z=j;C=j+20|0;B=b+136|0;p=c[B>>2]|0;k=1<<g;m=b+200|0;s=c[m>>2]|0;q=b+204|0;t=c[q>>2]|0;r=(1<<(c[s+13080>>2]|0)-(c[t+24>>2]|0))+ -1|0;c[p+31232>>2]=h;l=k+e|0;if(((l|0)<=(c[s+13120>>2]|0)?(k+f|0)<=(c[s+13124>>2]|0):0)?(c[s+13064>>2]|0)>>>0<g>>>0:0){s=tb(b,h,e,f)|0;t=c[q>>2]|0}else s=(c[s+13064>>2]|0)>>>0<g>>>0&1;if((a[t+22>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(c[t+24>>2]|0)|0)>>>0<=g>>>0:0){a[p+300>>0]=0;c[p+280>>2]=0}if((a[b+3068>>0]|0)!=0?((c[(c[m>>2]|0)+13080>>2]|0)-(d[(c[q>>2]|0)+1632>>0]|0)|0)>>>0<=g>>>0:0)a[p+301>>0]=0;if(s){n=k>>1;q=n+e|0;o=n+f|0;g=g+ -1|0;h=h+1|0;s=ec(b,e,f,g,h)|0;if((s|0)<0){_=s;i=j;return _|0}if(s){if((q|0)<(c[(c[m>>2]|0)+13120>>2]|0)){s=ec(b,q,f,g,h)|0;if((s|0)<0){_=s;i=j;return _|0}}if(s){if((o|0)<(c[(c[m>>2]|0)+13124>>2]|0)){s=ec(b,e,o,g,h)|0;if((s|0)<0){_=s;i=j;return _|0}}if(s){_=c[m>>2]|0;if((q|0)<(c[_+13120>>2]|0)?(o|0)<(c[_+13124>>2]|0):0){s=ec(b,q,o,g,h)|0;if((s|0)<0){_=s;i=j;return _|0}}}else s=0}else s=0}else s=0;if((r&l|0)==0?(r&k+f|0)==0:0)c[p+276>>2]=a[p+272>>0];if(!s){_=0;i=j;return _|0}k=c[m>>2]|0;if((q+n|0)<(c[k+13120>>2]|0))k=1;else k=(o+n|0)<(c[k+13124>>2]|0);_=k&1;i=j;return _|0}p=c[B>>2]|0;s=c[m>>2]|0;r=c[s+13064>>2]|0;h=c[s+13140>>2]|0;G=e>>r;F=f>>r;s=1<<(c[s+13080>>2]|0)-(c[(c[q>>2]|0)+24>>2]|0);c[p+31236>>2]=e;c[p+31240>>2]=f;y=p+31252|0;a[y>>0]=1;v=p+31244|0;c[v>>2]=1;x=p+31248|0;c[x>>2]=0;u=p+31254|0;a[u>>0]=0;w=p+31253|0;a[w>>0]=0;t=($(F,h)|0)+G|0;E=b+7584|0;a[(c[E>>2]|0)+t>>0]=0;_=p+31268|0;a[_>>0]=1;a[_+1>>0]=1;a[_+2>>0]=1;a[_+3>>0]=1;r=k>>r;s=s+ -1|0;if(a[(c[q>>2]|0)+40>>0]|0){_=(mb(b)|0)&255;a[p+31256>>0]=_;if(_<<24>>24)fc(b,e,f,g)}else a[p+31256>>0]=0;A=b+2428|0;if((c[A>>2]|0)==2){if((r|0)>0){G=t;F=0;while(1){jf((c[E>>2]|0)+G|0,0,r|0)|0;F=F+1|0;if((F|0)==(r|0))break;else G=G+h|0}}}else{H=nb(b,e,f,G,F)|0;I=H&255;if((r|0)>0){F=t;G=0;while(1){jf((c[E>>2]|0)+F|0,I|0,r|0)|0;G=G+1|0;if((G|0)==(r|0))break;else F=F+h|0}}c[v>>2]=(H&255|0)!=0?2:0}do if(!(a[(c[E>>2]|0)+t>>0]|0)){if((c[A>>2]|0)==2)A=c[v>>2]|0;else{A=sb(b)|0;c[v>>2]=A}if((A|0)==1?(c[(c[m>>2]|0)+13064>>2]|0)!=(g|0):0){E=c[x>>2]|0;A=53}else A=50;a:do if((A|0)==50){E=ub(b,g)|0;c[x>>2]=E;G=c[v>>2]|0;if((E|0)==3)F=(G|0)==1&1;else F=0;a[u>>0]=F;if((G|0)==1)A=53;else{hc(b,e,f,g);switch(c[x>>2]|0){case 0:{gc(b,e,f,k,k,g,0);break a};case 1:{_=(k|0)/2|0;gc(b,e,f,k,_,g,0);gc(b,e,_+f|0,k,_,g,1);break a};case 2:{_=(k|0)/2|0;gc(b,e,f,_,k,g,0);gc(b,_+e|0,f,_,k,g,1);break a};case 4:{_=(k|0)/4|0;gc(b,e,f,k,_,g,0);gc(b,e,_+f|0,k,(3<<g|0)/4|0,g,1);break a};case 5:{_=(3<<g|0)/4|0;gc(b,e,f,k,_,g,0);gc(b,e,_+f|0,k,(k|0)/4|0,g,1);break a};case 6:{_=(k|0)/4|0;gc(b,e,f,_,k,g,0);gc(b,_+e|0,f,(3<<g|0)/4|0,k,g,1);break a};case 7:{_=(3<<g|0)/4|0;gc(b,e,f,_,k,g,0);gc(b,_+e|0,f,(k|0)/4|0,k,g,1);break a};case 3:{_=(k|0)/2|0;gc(b,e,f,_,_,g,0);Y=_+e|0;gc(b,Y,f,_,_,g,1);Z=_+f|0;gc(b,e,Z,_,_,g,2);gc(b,Y,Z,_,_,g,3);break a};default:break a}}}while(0);do if((A|0)==53){if((((E|0)==0?(D=c[m>>2]|0,(c[D+68>>2]|0)!=0):0)?(c[D+13048>>2]|0)>>>0<=g>>>0:0)?(c[D+13052>>2]|0)>>>0>=g>>>0:0){D=(vb(b)|0)&255;a[w>>0]=D}else D=a[w>>0]|0;if(D<<24>>24){hc(b,e,f,g);I=c[B>>2]|0;Y=c[b+160>>2]|0;G=c[Y+32>>2]|0;F=$(G,f)|0;J=c[m>>2]|0;E=c[J+56>>2]|0;F=(c[Y>>2]|0)+((e<<E)+F)|0;B=c[Y+36>>2]|0;L=c[J+13184>>2]|0;H=$(f>>L,B)|0;K=c[J+13172>>2]|0;H=(c[Y+4>>2]|0)+((e>>K<<E)+H)|0;D=c[Y+40>>2]|0;_=c[J+13188>>2]|0;C=$(f>>_,D)|0;Z=c[J+13176>>2]|0;C=(c[Y+8>>2]|0)+((e>>Z<<E)+C)|0;E=$(d[J+13044>>0]|0,k<<g)|0;L=($(k>>Z,k>>_)|0)+($(k>>K,k>>L)|0)|0;E=($(d[J+13045>>0]|0,L)|0)+E|0;L=I+224|0;J=E+7>>3;K=c[I+240>>2]|0;_=c[L>>2]|0;K=(_&1|0)==0?K:K+ -1|0;K=(_&511|0)==0?K:K+ -1|0;I=(c[I+244>>2]|0)-K|0;if((I|0)<(J|0))K=0;else Vd(L,K+J|0,I-J|0);if(!(a[b+3049>>0]|0))Pb(b,e,f,g);_=E>>>0>2147483639|(K|0)==0;Z=_?0:E;Y=_?0:K;c[z>>2]=Y;c[z+12>>2]=Z;c[z+16>>2]=Z+8;c[z+4>>2]=Y+(Z+7>>3);c[z+8>>2]=0;if(_)z=-1094995529;else{Z=b+5852|0;Na[c[Z>>2]&3](F,G,k,k,z,d[(c[m>>2]|0)+13044>>0]|0);_=c[m>>2]|0;Na[c[Z>>2]&3](H,B,k>>c[_+13172>>2],k>>c[_+13184>>2],z,d[_+13045>>0]|0);_=c[m>>2]|0;Na[c[Z>>2]&3](C,D,k>>c[_+13176>>2],k>>c[_+13188>>2],z,d[_+13045>>0]|0);z=0}if(a[(c[m>>2]|0)+13056>>0]|0)fc(b,e,f,g);if((z|0)>=0)break;i=j;return z|0}D=c[B>>2]|0;G=(c[D+31248>>2]|0)==3;E=G?2:1;F=0;do{H=F<<1;I=0;do{a[C+(I+H)>>0]=wb(b)|0;I=I+1|0}while((I|0)<(E|0));F=F+1|0}while((F|0)<(E|0));K=k>>(G&1);J=D+31264|0;L=b+3508|0;T=z+4|0;Q=z+8|0;M=b+7592|0;I=D+31260|0;H=0;do{F=H<<1;G=($(H,K)|0)+f|0;R=0;do{P=R+F|0;X=(a[C+P>>0]|0)==0;if(X)c[J>>2]=yb(b)|0;else c[I>>2]=xb(b)|0;U=($(R,K)|0)+e|0;W=c[B>>2]|0;Y=c[m>>2]|0;V=c[Y+13084>>2]|0;O=U>>V;S=G>>V;N=c[Y+13156>>2]|0;V=K>>V;Y=c[Y+13080>>2]|0;_=(1<<Y)+ -1|0;U=_&U;if((a[W+309>>0]|0)==0?(_&G|0)==0:0)_=1;else{_=($(S+ -1|0,N)|0)+O|0;_=d[(c[M>>2]|0)+_>>0]|0}if((a[W+308>>0]|0)==0&(U|0)==0)Z=1;else{Z=O+ -1+($(S,N)|0)|0;Z=d[(c[M>>2]|0)+Z>>0]|0}U=c[(c[L>>2]|0)+16>>2]|0;_=(G>>Y<<Y|0)<(G|0)?_:1;do if((Z|0)==(_|0))if(Z>>>0<2){c[z>>2]=0;c[T>>2]=1;c[Q>>2]=26;Z=0;_=1;Y=26;break}else{c[z>>2]=Z;_=(Z+29&31)+2|0;c[T>>2]=_;Y=(Z+31&31)+2|0;c[Q>>2]=Y;break}else{c[z>>2]=Z;c[T>>2]=_;if(!((Z|0)==0|(_|0)==0)){c[Q>>2]=0;Y=0;break}if((Z|0)==1|(_|0)==1){c[Q>>2]=26;Y=26;break}else{c[Q>>2]=1;Y=1;break}}while(0);if(X){if((Z|0)>(_|0)){c[T>>2]=Z;X=_&255;c[z>>2]=X}else{X=Z;Z=_}if((X|0)>(Y|0)){c[Q>>2]=X;_=Y&255;c[z>>2]=_;Y=X;X=_}if((Z|0)>(Y|0)){c[Q>>2]=Z;_=Y&255;c[T>>2]=_;Y=Z}else _=Z;W=c[W+31264>>2]|0;W=((W|0)>=(X|0)&1)+W|0;W=((W|0)>=(_|0)&1)+W|0;W=((W|0)>=(Y|0)&1)+W|0}else W=c[z+(c[W+31260>>2]<<2)>>2]|0;V=(V|0)==0?1:V;W=W&255;if((V|0)>0){X=0;do{Y=($(X+S|0,N)|0)+O|0;jf((c[M>>2]|0)+Y|0,W|0,V|0)|0;Y=X+O|0;Z=0;do{a[U+((Y+($(Z+S|0,N)|0)|0)*12|0)+10>>0]=0;Z=Z+1|0}while((Z|0)<(V|0));X=X+1|0}while((X|0)<(V|0))}a[D+P+31268>>0]=W;R=R+1|0}while((R|0)<(E|0));H=H+1|0}while((H|0)<(E|0));z=c[(c[m>>2]|0)+4>>2]|0;if((z|0)==3){z=0;do{B=z<<1;C=0;do{G=zb(b)|0;H=C+B|0;a[D+H+31281>>0]=G;F=a[D+H+31268>>0]|0;do if((G|0)!=4){G=a[1600+G>>0]|0;H=D+H+31277|0;if(F<<24>>24==G<<24>>24){a[H>>0]=34;break}else{a[H>>0]=G;break}}else a[D+H+31277>>0]=F;while(0);C=C+1|0}while((C|0)<(E|0));z=z+1|0}while((z|0)<(E|0))}else if(!z)break;else if((z|0)!=2){B=zb(b)|0;z=a[D+31268>>0]|0;if((B|0)==4){a[D+31277>>0]=z;break}B=a[1600+B>>0]|0;C=D+31277|0;if(z<<24>>24==B<<24>>24){a[C>>0]=34;break}else{a[C>>0]=B;break}}else{B=zb(b)|0;a[D+31281>>0]=B;z=a[D+31268>>0]|0;if((B|0)==4)z=z&255;else{_=a[1600+B>>0]|0;z=z<<24>>24==_<<24>>24?34:_&255}a[D+31277>>0]=a[1608+z>>0]|0;break}}while(0);if(!(a[w>>0]|0)){do if((c[v>>2]|0)==1)A=139;else{if((c[x>>2]|0)==0?(a[p+31276>>0]|0)!=0:0){A=139;break}w=(Fb(b)|0)&255;a[y>>0]=w}while(0);if((A|0)==139)w=a[y>>0]|0;if(!(w<<24>>24)){if(a[b+3049>>0]|0)break;Pb(b,e,f,g);break}w=c[m>>2]|0;if((c[v>>2]|0)==1)u=(d[u>>0]|0)+(c[w+13092>>2]|0)|0;else u=c[w+13088>>2]|0;a[p+31255>>0]=u;u=ic(b,e,f,e,f,e,f,g,g,0,0,1592,1592)|0;if((u|0)<0){_=u;i=j;return _|0}}}else{gc(b,e,f,k,k,g,0);hc(b,e,f,g);if(!(a[b+3049>>0]|0))Pb(b,e,f,g)}while(0);if((a[(c[q>>2]|0)+22>>0]|0)!=0?(a[p+300>>0]|0)==0:0)Ob(b,e,f,g);if((r|0)>0){q=b+7568|0;g=p+272|0;u=0;while(1){jf((c[q>>2]|0)+t|0,a[g>>0]|0,r|0)|0;u=u+1|0;if((u|0)==(r|0))break;else t=t+h|0}}if((s&l|0)==0?(s&k+f|0)==0:0)c[p+276>>2]=a[p+272>>0];r=c[m>>2]|0;_=c[r+13064>>2]|0;g=k>>_;q=e>>_;e=f>>_;if((g|0)>0?(n=b+7588|0,o=c[p+31232>>2]&255,_=($(c[r+13140>>2]|0,e)|0)+q|0,jf((c[n>>2]|0)+_|0,o|0,g|0)|0,(g|0)!=1):0){p=1;do{_=($(c[(c[m>>2]|0)+13140>>2]|0,p+e|0)|0)+q|0;jf((c[n>>2]|0)+_|0,o|0,g|0)|0;p=p+1|0}while((p|0)!=(g|0))}m=c[m>>2]|0;e=1<<c[m+13080>>2];if(((l|0)%(e|0)|0|0)!=0?(l|0)<(c[m+13120>>2]|0):0){_=1;i=j;return _|0}_=k+f|0;if(((_|0)%(e|0)|0|0)!=0?(_|0)<(c[m+13124>>2]|0):0){_=1;i=j;return _|0}_=(lb(b)|0)==0&1;i=j;return _|0}function fc(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;m=1<<f;n=c[b+200>>2]|0;l=c[n+13084>>2]|0;f=c[n+13156>>2]|0;k=m+d|0;j=c[n+13120>>2]|0;m=m+e|0;n=c[n+13124>>2]|0;h=e>>l;e=((m|0)>(n|0)?n:m)>>l;if((h|0)>=(e|0)){i=g;return}d=d>>l;j=((k|0)>(j|0)?j:k)>>l;k=(d|0)<(j|0);b=b+7600|0;do{if(k){m=$(h,f)|0;l=d;do{a[(c[b>>2]|0)+(l+m)>>0]=2;l=l+1|0}while((l|0)!=(j|0))}h=h+1|0}while((h|0)!=(e|0));i=g;return}function gc(f,g,h,j,k,l,m){f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;r=i;i=i+16|0;n=r;x=c[f+136>>2]|0;c[n+0>>2]=c[412];c[n+4>>2]=c[413];c[n+8>>2]=c[414];s=f+200|0;A=c[s>>2]|0;w=c[A+13156>>2]|0;u=c[f+3508>>2]|0;v=c[u+16>>2]|0;u=c[u+20>>2]|0;o=f+160|0;z=c[o>>2]|0;t=$(c[z+32>>2]|0,h>>c[A+13180>>2])|0;B=c[A+56>>2]|0;t=(c[z>>2]|0)+((g>>c[A+13168>>2]<<B)+t)|0;q=$(h>>c[A+13184>>2],c[z+36>>2]|0)|0;q=(c[z+4>>2]|0)+((g>>c[A+13172>>2]<<B)+q)|0;p=$(h>>c[A+13188>>2],c[z+40>>2]|0)|0;p=(c[z+8>>2]|0)+((g>>c[A+13176>>2]<<B)+p)|0;B=c[A+13064>>2]|0;B=($(h>>B,c[A+13140>>2]|0)|0)+(g>>B)|0;a:do if(!(a[(c[f+7584>>2]|0)+B>>0]|0)){B=(Bb(f)|0)&255;a[x+31276>>0]=B;if(B<<24>>24){if((c[f+3080>>2]|0)>>>0>1)x=Ab(f)|0;else x=0;rd(f,g,h,j,k,l,m,x,n);A=c[(c[s>>2]|0)+13084>>2]|0;m=g>>A;l=h>>A;if((k>>A|0)>0)x=0;else break;while(1){if((j>>A|0)>0){z=($(x+l|0,w)|0)+m|0;y=0;do{A=v+((z+y|0)*12|0)|0;c[A+0>>2]=c[n+0>>2];c[A+4>>2]=c[n+4>>2];c[A+8>>2]=c[n+8>>2];y=y+1|0;A=c[(c[s>>2]|0)+13084>>2]|0}while((y|0)<(j>>A|0))}x=x+1|0;if((x|0)>=(k>>A|0))break a}}qd(f,g,h,j,k);y=n+10|0;a[y>>0]=0;if(!(c[f+2428>>2]|0)){A=Cb(f,j,k)|0;if((A|0)==1){A=1;z=22}else z=19}else{A=0;z=19}if((z|0)==19){B=c[f+3036>>2]|0;if(B)a[n+8>>0]=Db(f,B)|0;a[y>>0]=1;Mb(f,g,h,0);sd(f,g,h,j,k,l,m,0,n,Eb(f)|0,0);B=c[n>>2]|0;b[n>>1]=B+(e[x+31272>>1]|0);b[n+2>>1]=(B>>>16)+(e[x+31274>>1]|0);if(A)z=22}if((z|0)==22){z=c[f+3040>>2]|0;if(z)a[n+9>>0]=Db(f,z)|0;if((a[f+3047>>0]|0)==1&(A|0)==2)c[x+31272>>2]=0;else Mb(f,g,h,1);a[y>>0]=(d[y>>0]|0)+2;sd(f,g,h,j,k,l,m,0,n,Eb(f)|0,1);A=n+4|0;B=c[A>>2]|0;b[A>>1]=B+(e[x+31272>>1]|0);b[n+6>>1]=(B>>>16)+(e[x+31274>>1]|0)}A=c[(c[s>>2]|0)+13084>>2]|0;l=g>>A;m=h>>A;if((k>>A|0)>0){x=0;do{if((j>>A|0)>0){y=($(x+m|0,w)|0)+l|0;z=0;do{A=v+((y+z|0)*12|0)|0;c[A+0>>2]=c[n+0>>2];c[A+4>>2]=c[n+4>>2];c[A+8>>2]=c[n+8>>2];z=z+1|0;A=c[(c[s>>2]|0)+13084>>2]|0}while((z|0)<(j>>A|0))}x=x+1|0}while((x|0)<(k>>A|0))}}else{if((c[f+3080>>2]|0)>>>0>1)x=Ab(f)|0;else x=0;y=1<<l;rd(f,g,h,y,y,l,m,x,n);y=c[(c[s>>2]|0)+13084>>2]|0;l=g>>y;m=h>>y;if((k>>y|0)>0){x=0;do{if((j>>y|0)>0){A=($(x+m|0,w)|0)+l|0;z=0;do{y=v+((A+z|0)*12|0)|0;c[y+0>>2]=c[n+0>>2];c[y+4>>2]=c[n+4>>2];c[y+8>>2]=c[n+8>>2];z=z+1|0;y=c[(c[s>>2]|0)+13084>>2]|0}while((z|0)<(j>>y|0))}x=x+1|0}while((x|0)<(k>>y|0))}}while(0);w=a[n+10>>0]|0;if(w&1){w=c[n+8>>2]|0;v=c[u+(w<<24>>24<<2)>>2]|0;if(!v){i=r;return}else w=w>>>16&255}else v=0;if(w&2){u=c[u+(a[n+9>>0]<<2)+196>>2]|0;if(!u){i=r;return}}else u=0;if(w<<24>>24==1){u=n+8|0;B=a[u>>0]|0;kc(f,t,c[(c[o>>2]|0)+32>>2]|0,c[v>>2]|0,n,g,h,j,k,b[f+(B<<1)+3104>>1]|0,b[f+(B<<1)+3296>>1]|0);s=c[s>>2]|0;if(!(c[s+4>>2]|0)){i=r;return}z=c[s+13172>>2]|0;x=g>>z;A=c[s+13184>>2]|0;y=h>>A;z=j>>z;A=k>>A;B=c[v>>2]|0;m=a[u>>0]|0;lc(f,q,c[(c[o>>2]|0)+36>>2]|0,c[B+4>>2]|0,c[B+36>>2]|0,0,x,y,z,A,n,b[f+(m<<2)+3136>>1]|0,b[f+(m<<2)+3328>>1]|0);m=c[v>>2]|0;B=a[u>>0]|0;lc(f,p,c[(c[o>>2]|0)+40>>2]|0,c[m+8>>2]|0,c[m+40>>2]|0,0,x,y,z,A,n,b[f+(B<<2)+3138>>1]|0,b[f+(B<<2)+3330>>1]|0);i=r;return}else if(w<<24>>24==3)wa();else if(w<<24>>24==2){v=n+9|0;B=a[v>>0]|0;kc(f,t,c[(c[o>>2]|0)+32>>2]|0,c[u>>2]|0,n+4|0,g,h,j,k,b[f+(B<<1)+3264>>1]|0,b[f+(B<<1)+3392>>1]|0);s=c[s>>2]|0;if(!(c[s+4>>2]|0)){i=r;return}z=c[s+13172>>2]|0;x=g>>z;A=c[s+13184>>2]|0;y=h>>A;z=j>>z;A=k>>A;B=c[u>>2]|0;m=a[v>>0]|0;lc(f,q,c[(c[o>>2]|0)+36>>2]|0,c[B+4>>2]|0,c[B+36>>2]|0,1,x,y,z,A,n,b[f+(m<<2)+3200>>1]|0,b[f+(m<<2)+3424>>1]|0);m=c[u>>2]|0;B=a[v>>0]|0;lc(f,p,c[(c[o>>2]|0)+40>>2]|0,c[m+8>>2]|0,c[m+40>>2]|0,1,x,y,z,A,n,b[f+(B<<2)+3202>>1]|0,b[f+(B<<2)+3426>>1]|0);i=r;return}else{i=r;return}}function hc(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;k=c[b+136>>2]|0;h=c[b+200>>2]|0;j=c[h+13084>>2]|0;l=1<<f>>j;h=c[h+13156>>2]|0;f=c[(c[b+3508>>2]|0)+16>>2]|0;d=d>>j;j=e>>j;e=(l|0)==0?1:l;l=(e|0)>0;if(l){b=b+7592|0;m=0;do{n=($(m+j|0,h)|0)+d|0;jf((c[b>>2]|0)+n|0,1,e|0)|0;m=m+1|0}while((m|0)<(e|0))}if((c[k+31244>>2]|0)!=1|l^1){i=g;return}else k=0;do{b=($(k+j|0,h)|0)+d|0;l=0;do{a[f+((b+l|0)*12|0)+10>>0]=0;l=l+1|0}while((l|0)<(e|0));k=k+1|0}while((k|0)<(e|0));i=g;return}
+
+
+
+function xd(a,d,e,f,g,h,j,k,l,m){a=a|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0;n=i;o=c[g+(h*196|0)+(j<<2)+128>>2]|0;if((o|0)!=(c[k+(l*196|0)+(m<<2)+128>>2]|0)){b[a>>1]=0;b[a+2>>1]=0;k=0;i=n;return k|0}k=c[k+(l*196|0)+(m<<2)+64>>2]|0;m=e-k|0;f=f-(c[g+(h*196|0)+(j<<2)+64>>2]|0)|0;if((o|0)==0?(m|0)!=(f|0)&(k|0)!=(e|0):0){if((m+128|0)>>>0>255)m=m>>31^127;e=m<<24>>24;if((f+128|0)>>>0>255)f=f>>31^127;k=(e|0)/2|0;e=($(f<<24>>24,(((m&255)<<24>>24>-2?k:0-k|0)+16384|0)/(e|0)|0)|0)+32>>6;if((e|0)<-4096)e=-4096;else e=(e|0)>4095?4095:e;f=$(b[d>>1]|0,e)|0;f=f+127+(f>>>31)|0;o=f>>8;if((o+32768|0)>>>0>65535)o=f>>31^32767;b[a>>1]=o;d=$(b[d+2>>1]|0,e)|0;d=d+127+(d>>>31)|0;e=d>>8;if((e+32768|0)>>>0>65535)e=d>>31^32767;b[a+2>>1]=e;k=1;i=n;return k|0}b[a>>1]=b[d>>1]|0;b[a+2>>1]=b[d+2>>1]|0;k=1;i=n;return k|0}function yd(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0;g=i;b=(c[b+136>>2]|0)+204|0;j=e+2188|0;if(((j|0)!=(d|0)?(h=e+2184|0,(c[h>>2]|0)!=0):0)?((_d(b)|0)&255)<<24>>24!=0:0){do if(f){f=(ae(b)|0)+1|0;h=c[h>>2]|0;if(f>>>0>h>>>0){n=-1094995529;i=g;return n|0}else{h=h-f|0;break}}else h=((d-j|0)/168|0)+ -1|0;while(0);f=_d(b)|0;j=(ae(b)|0)+1|0;if((j|0)==0|j>>>0>32768){n=-1094995529;i=g;return n|0}l=$(1-(f<<1&510)|0,j)|0;f=e+(h*168|0)+2192|0;if((c[f>>2]|0)<0){c[d+4>>2]=0;c[d>>2]=0;n=0;i=g;return n|0}else{k=0;j=0;m=0}while(1){n=_d(b)|0;a[d+j+136>>0]=n;if(!((n&255|0)==0?((_d(b)|0)&255)<<24>>24==0:0)){if((k|0)<(c[f>>2]|0))n=(c[e+(h*168|0)+(k<<2)+2196>>2]|0)+l|0;else n=l;c[d+(j<<2)+8>>2]=n;j=j+1|0;m=(n>>>31)+m|0}if((k|0)<(c[f>>2]|0))k=k+1|0;else break}b=d+4|0;c[b>>2]=j;c[d>>2]=m;if((j|0)>1){e=1;do{h=c[d+(e<<2)+8>>2]|0;f=a[d+e+136>>0]|0;if((e|0)>0){k=e;do{m=k;k=k+ -1|0;l=d+(k<<2)+8|0;j=c[l>>2]|0;if((h|0)<(j|0)){c[d+(m<<2)+8>>2]=j;n=d+k+136|0;a[d+m+136>>0]=a[n>>0]|0;c[l>>2]=h;a[n>>0]=f}}while((k|0)>0);j=c[b>>2]|0}e=e+1|0}while((e|0)<(j|0));m=c[d>>2]|0}if(m>>>0>1)b=0;else{n=0;i=g;return n|0}do{m=m+ -1|0;n=d+(b<<2)+8|0;j=c[n>>2]|0;f=d+b+136|0;l=a[f>>0]|0;k=d+(m<<2)+8|0;c[n>>2]=c[k>>2];n=d+m+136|0;a[f>>0]=a[n>>0]|0;c[k>>2]=j;a[n>>0]=l;b=b+1|0}while(b>>>0<(c[d>>2]|0)>>>1>>>0);d=0;i=g;return d|0}c[d>>2]=ae(b)|0;e=ae(b)|0;h=c[d>>2]|0;if(h>>>0>15|e>>>0>15){n=-1094995529;i=g;return n|0}n=h+e|0;c[d+4>>2]=n;if(!n){n=0;i=g;return n|0}if(h){f=0;h=0;do{h=h+ -1-(ae(b)|0)|0;c[d+(f<<2)+8>>2]=h;a[d+f+136>>0]=_d(b)|0;f=f+1|0}while(f>>>0<(c[d>>2]|0)>>>0)}if(!e){n=0;i=g;return n|0}else{f=0;h=0}do{h=h+1+(ae(b)|0)|0;c[d+((c[d>>2]|0)+f<<2)+8>>2]=h;n=(_d(b)|0)&255;a[d+((c[d>>2]|0)+f)+136>>0]=n;f=f+1|0}while((f|0)!=(e|0));d=0;i=g;return d|0}function zd(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0;f=i;i=i+16|0;e=f;h=c[b+136>>2]|0;g=h+204|0;j=se(13196)|0;c[e>>2]=j;if(!j){t=-12;i=f;return t|0}j=c[j+4>>2]|0;l=se(468)|0;if(!l){t=-12;i=f;return t|0}k=c[l+4>>2]|0;c[k+4>>2]=1;n=k+8|0;c[n>>2]=1;a[k>>0]=0;c[k+348>>2]=1;p=k+352|0;m=k+380|0;q=k+408|0;o=0;do{c[p+(o<<2)>>2]=1;c[m+(o<<2)>>2]=0;c[q+(o<<2)>>2]=-1;o=o+1|0}while((o|0)<(c[n>>2]|0));c[k+436>>2]=0;c[k+440>>2]=1;a[k+444>>0]=0;o=b+208|0;ue(o);c[o>>2]=l;c[j>>2]=0;o=j+72|0;c[o>>2]=1;t=Xd(g,8)|0;p=j+4|0;c[p>>2]=t;do if((t|0)<=3){a[j+8>>0]=0;n=j+13120|0;c[n>>2]=$d(g,32)|0;k=$d(g,32)|0;m=j+13124|0;c[m>>2]=k;k=Md(c[n>>2]|0,k,0,c[b+4>>2]|0)|0;if((k|0)>=0){t=Xd(g,8)|0;l=j+52|0;c[l>>2]=t+8;if(!t){p=c[p>>2]|0;if(!p){c[j+60>>2]=8;p=8}else if((p|0)==1){c[j+60>>2]=0;p=0}else if((p|0)==2){c[j+60>>2]=4;p=4}else{c[j+60>>2]=5;p=5}c[j+56>>2]=0;p=Ge(p)|0;if(p){c[j+13180>>2]=0;c[j+13168>>2]=0;t=d[p+5>>0]|0;c[j+13172>>2]=t;c[j+13176>>2]=t;t=d[p+6>>0]|0;c[j+13184>>2]=t;c[j+13188>>2]=t;c[j+64>>2]=8;if((c[o>>2]|0)>0){p=j+76|0;q=0;do{c[p+(q*12|0)>>2]=1;c[p+(q*12|0)+4>>2]=0;c[p+(q*12|0)+8>>2]=-1;q=q+1|0}while((q|0)<(c[o>>2]|0))}s=(ae(g)|0)+3|0;t=j+13064|0;c[t>>2]=s;s=1<<s;r=s+ -1|0;s=0-s|0;c[n>>2]=r+(c[n>>2]|0)&s;c[m>>2]=r+(c[m>>2]|0)&s;s=j+13068|0;c[s>>2]=ae(g)|0;r=j+13072|0;c[r>>2]=(ae(g)|0)+2;o=ae(g)|0;p=c[r>>2]|0;q=j+13076|0;c[q>>2]=p+o;if(p>>>0<(c[t>>2]|0)>>>0){u=ae(g)|0;o=j+13092|0;c[o>>2]=u;p=j+13088|0;c[p>>2]=u;a[j+12940>>0]=1;a[j+12941>>0]=_d(g)|0;u=_d(g)|0;c[j+68>>2]=u;if(u){u=j+13044|0;a[u>>0]=(Xd(g,4)|0)+1;a[j+13045>>0]=(Xd(g,4)|0)+1;v=(ae(g)|0)+3|0;c[j+13048>>2]=v;c[j+13052>>2]=v+(ae(g)|0);if((d[u>>0]|0|0)>(c[l>>2]|0)){k=-1094995529;break}a[j+13056>>0]=_d(g)|0}c[j+2184>>2]=0;a[j+12942>>0]=0;a[j+13060>>0]=1;a[j+13061>>0]=_d(g)|0;c[j+160>>2]=0;c[j+164>>2]=1;if((_d(g)|0)!=0?(v=_d(g)|0,Zd(g,7),(v|0)!=0):0){c[j+13096>>2]=_d(g)|0;c[j+13100>>2]=_d(g)|0;c[j+13104>>2]=_d(g)|0;c[j+13108>>2]=_d(g)|0;_d(g)|0;c[j+13112>>2]=_d(g)|0;_d(g)|0;c[j+13116>>2]=_d(g)|0;_d(g)|0}g=c[n>>2]|0;c[j+12>>2]=g;n=c[m>>2]|0;c[j+16>>2]=n;t=c[t>>2]|0;v=(c[s>>2]|0)+t|0;c[j+13080>>2]=v;s=t+ -1|0;c[j+13084>>2]=s;m=1<<v;u=g+ -1+m>>v;c[j+13128>>2]=u;m=n+ -1+m>>v;c[j+13132>>2]=m;c[j+13136>>2]=$(m,u)|0;c[j+13140>>2]=g>>t;c[j+13144>>2]=n>>t;u=c[r>>2]|0;c[j+13148>>2]=g>>u;c[j+13152>>2]=n>>u;c[j+13156>>2]=g>>s;c[j+13160>>2]=n>>s;u=v-u|0;c[j+13164>>2]=(1<<u)+ -1;c[j+13192>>2]=((c[l>>2]|0)*6|0)+ -48;t=(1<<t)+ -1|0;if((((((t&g|0)==0?!((n&t|0)!=0|v>>>0>6):0)?(c[p>>2]|0)>>>0<=u>>>0:0)?(c[o>>2]|0)>>>0<=u>>>0:0)?(c[q>>2]|0)>>>0<=(v>>>0>5?5:v)>>>0:0)?((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)>=0:0){g=b+272|0;h=c[g>>2]|0;if((h|0)!=0?(v=c[e>>2]|0,(cf(c[h+4>>2]|0,c[v+4>>2]|0,c[v+8>>2]|0)|0)==0):0){ue(e);v=0;i=f;return v|0}else h=0;do{j=b+(h<<2)+400|0;k=c[j>>2]|0;do if(k){if(c[c[k+4>>2]>>2]|0)break;ue(j)}while(0);h=h+1|0}while((h|0)!=256);h=c[g>>2]|0;do if(h){j=b+200|0;if((c[j>>2]|0)!=(c[h+4>>2]|0))break;u=b+1424|0;ue(u);v=te(c[g>>2]|0)|0;c[u>>2]=v;if(v)break;c[j>>2]=0}while(0);ue(g);c[g>>2]=c[e>>2];v=0;i=f;return v|0}}else k=-1094995529}else k=-22}else k=-1094995529}}else k=-1094995529;while(0);ue(e);v=k;i=f;return v|0}function Ad(b){b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,E=0,F=0,G=0,H=0,I=0;f=i;i=i+16|0;e=f+4|0;j=f;l=b+136|0;g=c[l>>2]|0;n=g+204|0;h=le(1692)|0;c[j>>2]=h;if(!h){I=-12;i=f;return I|0}I=pe(h,1692,6,0,0)|0;c[e>>2]=I;if(!I){ie(j);I=-12;i=f;return I|0}a[(c[j>>2]|0)+53>>0]=1;h=c[j>>2]|0;c[h+44>>2]=1;c[h+48>>2]=1;a[h+52>>0]=1;a[(c[j>>2]|0)+57>>0]=0;h=c[j>>2]|0;c[h+60>>2]=0;c[h+64>>2]=0;a[h+1629>>0]=2;h=ae(n)|0;a:do if((h>>>0<=255?(k=ae(n)|0,c[c[j>>2]>>2]=k,k>>>0<=31):0)?(m=c[b+(k<<2)+272>>2]|0,(m|0)!=0):0){k=c[m+4>>2]|0;I=(_d(n)|0)&255;a[(c[j>>2]|0)+41>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+39>>0]=I;I=Xd(n,3)|0;c[(c[j>>2]|0)+1624>>2]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+4>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+5>>0]=I;I=(ae(n)|0)+1|0;c[(c[j>>2]|0)+8>>2]=I;I=(ae(n)|0)+1|0;c[(c[j>>2]|0)+12>>2]=I;I=be(n)|0;c[(c[j>>2]|0)+16>>2]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+20>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+21>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+22>>0]=I;I=c[j>>2]|0;c[I+24>>2]=0;if(a[I+22>>0]|0){I=ae(n)|0;c[(c[j>>2]|0)+24>>2]=I}I=be(n)|0;c[(c[j>>2]|0)+28>>2]=I;if((I+12|0)>>>0<=24?(I=be(n)|0,c[(c[j>>2]|0)+32>>2]=I,(I+12|0)>>>0<=24):0){I=(_d(n)|0)&255;a[(c[j>>2]|0)+36>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+37>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+38>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+40>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+42>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+43>>0]=I;if(a[(c[j>>2]|0)+42>>0]|0){m=(ae(n)|0)+1|0;c[(c[j>>2]|0)+44>>2]=m;m=(ae(n)|0)+1|0;o=c[j>>2]|0;c[o+48>>2]=m;o=c[o+44>>2]|0;if(!o){b=-1094995529;break}if((m|0)==0?1:(o|0)>=(c[k+13120>>2]|0)){b=-1094995529;break}if((m|0)>=(c[k+13124>>2]|0)){b=-1094995529;break}m=ne(o,4)|0;c[(c[j>>2]|0)+1648>>2]=m;m=ne(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=m;m=c[j>>2]|0;if(!(c[m+1648>>2]|0)){b=-12;break}if(!(c[m+1652>>2]|0)){b=-12;break}p=(_d(n)|0)&255;a[(c[j>>2]|0)+52>>0]=p;p=c[j>>2]|0;if(!(a[p+52>>0]|0)){q=(c[p+44>>2]|0)+ -1|0;if((q|0)>0){o=0;m=0;r=0;do{q=(ae(n)|0)+1|0;p=c[j>>2]|0;c[(c[p+1648>>2]|0)+(r<<2)>>2]=q;o=gf(q|0,0,o|0,m|0)|0;m=D;r=r+1|0;q=(c[p+44>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=0;o=0}r=c[k+13128>>2]|0;s=((r|0)<0)<<31>>31;if(!(m>>>0<s>>>0|(m|0)==(s|0)&o>>>0<r>>>0)){b=-1094995529;break}I=ff(r|0,s|0,o|0,m|0)|0;c[(c[p+1648>>2]|0)+(q<<2)>>2]=I;q=(c[p+48>>2]|0)+ -1|0;if((q|0)>0){p=0;o=0;r=0;do{q=(ae(n)|0)+1|0;m=c[j>>2]|0;c[(c[m+1652>>2]|0)+(r<<2)>>2]=q;p=gf(q|0,0,p|0,o|0)|0;o=D;r=r+1|0;q=(c[m+48>>2]|0)+ -1|0}while((r|0)<(q|0))}else{m=p;o=0;p=0}r=c[k+13132>>2]|0;s=((r|0)<0)<<31>>31;if(!(o>>>0<s>>>0|(o|0)==(s|0)&p>>>0<r>>>0)){b=-1094995529;break}I=ff(r|0,s|0,p|0,o|0)|0;c[(c[m+1652>>2]|0)+(q<<2)>>2]=I}I=(_d(n)|0)&255;a[(c[j>>2]|0)+53>>0]=I}I=(_d(n)|0)&255;a[(c[j>>2]|0)+54>>0]=I;I=(_d(n)|0)&255;a[(c[j>>2]|0)+55>>0]=I;if((a[(c[j>>2]|0)+55>>0]|0)!=0?(I=(_d(n)|0)&255,a[(c[j>>2]|0)+56>>0]=I,I=(_d(n)|0)&255,a[(c[j>>2]|0)+57>>0]=I,(a[(c[j>>2]|0)+57>>0]|0)==0):0){m=(be(n)|0)<<1;c[(c[j>>2]|0)+60>>2]=m;m=(be(n)|0)<<1;I=c[j>>2]|0;c[I+64>>2]=m;if(((c[I+60>>2]|0)+13|0)>>>0>26){b=-1094995529;break}if((m+13|0)>>>0>26){b=-1094995529;break}}p=(_d(n)|0)&255;a[(c[j>>2]|0)+68>>0]=p;p=c[j>>2]|0;if(a[p+68>>0]|0){q=0;do{o=p+(q<<6)+69|0;m=o+16|0;do{a[o>>0]=16;o=o+1|0}while((o|0)<(m|0));a[p+q+1605>>0]=16;a[p+q+1611>>0]=16;q=q+1|0}while((q|0)!=6);o=p+453|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+517|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+581|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+645|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+709|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+773|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+837|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+901|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+965|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1029|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1093|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1157|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1221|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1285|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1349|0;q=2936;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1413|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1477|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));o=p+1541|0;q=3e3;m=o+64|0;do{a[o>>0]=a[q>>0]|0;o=o+1|0;q=q+1|0}while((o|0)<(m|0));m=c[j>>2]|0;v=(c[l>>2]|0)+204|0;w=0;do{p=(w|0)>0?64:16;q=(w|0)>1;o=w+ -2|0;x=(w|0)==3?3:1;r=1<<(w<<1)+4;t=(r|0)>0;s=(w|0)==0;r=(r|0)<64?r:64;u=0;do{if(!(((_d(v)|0)&255)<<24>>24)){y=ae(v)|0;if(y){if(u>>>0<y>>>0){b=-1094995529;break a}y=u-y|0;mf(m+(w*384|0)+(u<<6)+69|0,m+(w*384|0)+(y<<6)+69|0,p|0)|0;if(q)a[m+(o*6|0)+u+1605>>0]=a[m+(o*6|0)+y+1605>>0]|0}}else{if(q){z=(be(v)|0)+8|0;a[m+(o*6|0)+u+1605>>0]=z}else z=8;if(t){y=0;do{if(s)A=(d[24+y>>0]<<2)+(d[8+y>>0]|0)|0;else A=(d[104+y>>0]<<3)+(d[40+y>>0]|0)|0;z=(z+256+(be(v)|0)|0)%256|0;a[m+(w*384|0)+(u<<6)+A+69>>0]=z;y=y+1|0}while((y|0)!=(r|0))}}u=u+x|0}while((u|0)<6);w=w+1|0}while((w|0)<4);if((c[k+4>>2]|0)==3){o=0;do{a[m+o+1285>>0]=a[m+o+901>>0]|0;a[m+o+1349>>0]=a[m+o+965>>0]|0;a[m+o+1477>>0]=a[m+o+1093>>0]|0;a[m+o+1541>>0]=a[m+o+1157>>0]|0;o=o+1|0}while((o|0)!=64);a[m+1612>>0]=a[m+1606>>0]|0;a[m+1613>>0]=a[m+1607>>0]|0;a[m+1615>>0]=a[m+1609>>0]|0;a[m+1616>>0]=a[m+1610>>0]|0}}I=(_d(n)|0)&255;a[(c[j>>2]|0)+1617>>0]=I;I=(ae(n)|0)+2|0;c[(c[j>>2]|0)+1620>>2]=I;m=k+13080|0;if(I>>>0<=(c[m>>2]|0)>>>0){I=(_d(n)|0)&255;a[(c[j>>2]|0)+1628>>0]=I;do if((_d(n)|0)!=0?(I=_d(n)|0,Xd(n,7)|0,(I|0)!=0):0){n=c[j>>2]|0;p=(c[l>>2]|0)+204|0;if(a[n+21>>0]|0)a[n+1629>>0]=(ae(p)|0)+2;a[n+1630>>0]=_d(p)|0;I=(_d(p)|0)&255;a[n+1631>>0]=I;if(I<<24>>24){a[n+1632>>0]=ae(p)|0;I=ae(p)|0;o=n+1633|0;a[o>>0]=I;if((I&255)>>>0<5)l=0;else break;while(1){a[n+l+1634>>0]=be(p)|0;a[n+l+1639>>0]=be(p)|0;if((l|0)<(d[o>>0]|0))l=l+1|0;else break}}a[n+1644>>0]=ae(p)|0;a[n+1645>>0]=ae(p)|0}while(0);l=ne((c[(c[j>>2]|0)+44>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1656>>2]=l;l=ne((c[(c[j>>2]|0)+48>>2]|0)+1|0,4)|0;c[(c[j>>2]|0)+1660>>2]=l;l=k+13128|0;o=ne(c[l>>2]|0,4)|0;c[(c[j>>2]|0)+1664>>2]=o;o=c[j>>2]|0;n=c[o+1656>>2]|0;if(((n|0)!=0?(c[o+1660>>2]|0)!=0:0)?(c[o+1664>>2]|0)!=0:0){if(a[o+52>>0]|0){p=c[o+1648>>2]|0;if(!p){o=ne(c[o+44>>2]|0,4)|0;c[(c[j>>2]|0)+1648>>2]=o;o=ne(c[(c[j>>2]|0)+48>>2]|0,4)|0;c[(c[j>>2]|0)+1652>>2]=o;o=c[j>>2]|0;p=c[o+1648>>2]|0;if(!p){b=-12;break}}n=c[o+1652>>2]|0;if(!n){b=-12;break}q=o+44|0;s=c[q>>2]|0;if((s|0)>0){r=0;do{I=r;r=r+1|0;H=c[l>>2]|0;c[p+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}q=o+48|0;s=c[q>>2]|0;if((s|0)>0){p=k+13132|0;r=0;do{I=r;r=r+1|0;H=c[p>>2]|0;c[n+(I<<2)>>2]=(($(H,r)|0)/(s|0)|0)-(($(H,I)|0)/(s|0)|0);s=c[q>>2]|0}while((r|0)<(s|0))}n=c[o+1656>>2]|0}c[n>>2]=0;q=o+44|0;if((c[q>>2]|0)>0){p=c[o+1648>>2]|0;r=0;s=0;do{r=(c[p+(s<<2)>>2]|0)+r|0;s=s+1|0;c[n+(s<<2)>>2]=r}while((s|0)<(c[q>>2]|0))}s=c[o+1660>>2]|0;c[s>>2]=0;r=o+48|0;if((c[r>>2]|0)>0){q=c[o+1652>>2]|0;t=0;p=0;do{t=(c[q+(p<<2)>>2]|0)+t|0;p=p+1|0;c[s+(p<<2)>>2]=t}while((p|0)<(c[r>>2]|0))}r=c[l>>2]|0;if((r|0)>0){o=c[o+1664>>2]|0;p=0;q=0;do{q=(p>>>0>(c[n+(q<<2)>>2]|0)>>>0&1)+q|0;c[o+(p<<2)>>2]=q;p=p+1|0;r=c[l>>2]|0}while((p|0)<(r|0))}x=$(c[k+13132>>2]|0,r)|0;n=ne(x,4)|0;c[(c[j>>2]|0)+1668>>2]=n;n=ne(x,4)|0;c[(c[j>>2]|0)+1672>>2]=n;n=ne(x,4)|0;c[(c[j>>2]|0)+1676>>2]=n;n=k+13164|0;q=(c[n>>2]|0)+2|0;q=ne($(q,q)|0,4)|0;c[(c[j>>2]|0)+1688>>2]=q;q=c[j>>2]|0;p=c[q+1668>>2]|0;if(!p){b=-12;break}w=c[q+1672>>2]|0;if(!w){b=-12;break}o=c[q+1676>>2]|0;if(!o){b=-12;break}if(!(c[q+1688>>2]|0)){b=-12;break}if((x|0)>0){B=q+44|0;r=q+48|0;s=c[q+1660>>2]|0;v=c[q+1648>>2]|0;u=c[q+1656>>2]|0;t=q+1652|0;A=0;do{C=c[l>>2]|0;y=(A|0)%(C|0)|0;z=(A|0)/(C|0)|0;G=c[B>>2]|0;E=0;while(1){if((E|0)>=(G|0)){E=0;break}F=E+1|0;if(y>>>0<(c[u+(F<<2)>>2]|0)>>>0)break;else E=F}H=c[r>>2]|0;F=0;while(1){if((F|0)>=(H|0)){F=0;break}G=F+1|0;if(z>>>0<(c[s+(G<<2)>>2]|0)>>>0)break;else F=G}if((E|0)>0){G=c[(c[t>>2]|0)+(F<<2)>>2]|0;H=0;I=0;do{I=($(c[v+(H<<2)>>2]|0,G)|0)+I|0;H=H+1|0}while((H|0)!=(E|0))}else I=0;if((F|0)>0){G=c[t>>2]|0;H=0;do{I=($(c[G+(H<<2)>>2]|0,C)|0)+I|0;H=H+1|0}while((H|0)!=(F|0))}H=$(c[v+(E<<2)>>2]|0,z-(c[s+(F<<2)>>2]|0)|0)|0;I=I+y+H-(c[u+(E<<2)>>2]|0)|0;c[p+(A<<2)>>2]=I;c[w+(I<<2)>>2]=A;A=A+1|0}while((A|0)!=(x|0))}else r=q+48|0;x=c[r>>2]|0;if((x|0)>0){s=q+44|0;t=q+1660|0;q=q+1656|0;z=c[s>>2]|0;u=0;w=0;while(1){v=u;u=u+1|0;if((z|0)>0){x=c[t>>2]|0;y=x+(u<<2)|0;G=c[y>>2]|0;B=z;z=0;do{E=c[x+(v<<2)>>2]|0;A=z;z=z+1|0;if(E>>>0<G>>>0){B=c[q>>2]|0;C=B+(z<<2)|0;F=c[C>>2]|0;do{H=c[B+(A<<2)>>2]|0;if(H>>>0<F>>>0){do{c[o+(c[p+(($(c[l>>2]|0,E)|0)+H<<2)>>2]<<2)>>2]=w;H=H+1|0;F=c[C>>2]|0}while(H>>>0<F>>>0);G=c[y>>2]|0}E=E+1|0}while(E>>>0<G>>>0);B=c[s>>2]|0}w=w+1|0}while((z|0)<(B|0));v=c[r>>2]|0;z=B}else v=x;if((u|0)>=(v|0))break;else x=v}}else w=0;o=ne(w,4)|0;c[(c[j>>2]|0)+1680>>2]=o;o=c[j>>2]|0;p=c[o+1680>>2]|0;if(!p){b=-12;break}r=o+48|0;u=c[r>>2]|0;if((u|0)>0){q=o+44|0;t=c[q>>2]|0;s=0;do{if((t|0)>0){u=c[o+1660>>2]|0;v=c[o+1656>>2]|0;w=0;do{I=$(c[l>>2]|0,c[u+(s<<2)>>2]|0)|0;c[p+(($(t,s)|0)+w<<2)>>2]=(c[v+(w<<2)>>2]|0)+I;w=w+1|0;t=c[q>>2]|0}while((w|0)<(t|0));u=c[r>>2]|0}s=s+1|0}while((s|0)<(u|0))}k=(c[m>>2]|0)-(c[k+13072>>2]|0)|0;v=c[n>>2]|0;c[o+1684>>2]=(c[o+1688>>2]|0)+(v+3<<2);p=v+2|0;if((p|0)>0){m=c[(c[j>>2]|0)+1688>>2]|0;o=0;do{c[m+(($(p,o)|0)<<2)>>2]=-1;c[m+(o<<2)>>2]=-1;o=o+1|0;v=c[n>>2]|0;p=v+2|0}while((o|0)<(p|0))}if((v|0)>-1){m=c[j>>2]|0;j=m+1668|0;p=k<<1;o=(k|0)>0;m=m+1684|0;q=0;while(1){if((v|0)>-1){r=q>>k;t=c[j>>2]|0;s=c[m>>2]|0;u=0;while(1){z=c[t+(($(c[l>>2]|0,r)|0)+(u>>k)<<2)>>2]<<p;if(o){w=0;do{y=1<<w;if(!(y&q))x=0;else x=y<<1<<w;z=((y&u|0)==0?0:y<<w)+z+x|0;w=w+1|0}while((w|0)!=(k|0))}c[s+(($(v+2|0,q)|0)+u<<2)>>2]=z;v=c[n>>2]|0;if((u|0)<(v|0))u=u+1|0;else break}}if((q|0)<(v|0))q=q+1|0;else break}}if(((c[g+216>>2]|0)-(c[g+212>>2]|0)|0)<0){b=0;break}I=b+(h<<2)+400|0;ue(I);c[I>>2]=c[e>>2];I=0;i=f;return I|0}else b=-12}else b=-1094995529}else b=-1094995529}else b=-1094995529;while(0);ue(e);I=b;i=f;return I|0}function Bd(a,b){a=a|0;b=b|0;var d=0;a=i;i=i+16|0;d=a;c[d>>2]=b;ie(b+1648|0);ie(b+1652|0);ie(b+1656|0);ie(b+1660|0);ie(b+1664|0);ie(b+1668|0);ie(b+1672|0);ie(b+1680|0);ie(b+1676|0);ie(b+1688|0);ie(d);i=a;return}function Cd(a){a=a|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0,l=0;d=i;e=a+136|0;g=a+3500|0;f=a+7776|0;while(1){h=(c[e>>2]|0)+204|0;j=0;do{k=Xd(h,8)|0;j=k+j|0}while((k|0)==255);k=0;do{l=Xd(h,8)|0;k=l+k|0}while((l|0)==255);do if((c[g>>2]|0)==39)if((j|0)==257){b[f>>1]=Xd(h,16)|0;break}else if((j|0)==256){Dd(a);break}else{Zd(h,k<<3);break}else if((j|0)==132){Dd(a);break}else{Zd(h,k<<3);break}while(0);h=c[e>>2]|0;if(((c[h+216>>2]|0)-(c[h+212>>2]|0)|0)<=0){a=15;break}if((Yd(h+204|0,8)|0)==128){a=15;break}}if((a|0)==15){i=d;return 1}return 0}function Dd(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;e=(c[b+136>>2]|0)+204|0;g=(Xd(e,8)|0)&255;f=b+7720|0;h=0;do{if((g|0)==1)Zd(e,16);else if(!g){a[f>>0]=1;j=0;do{a[b+(h<<4)+j+7672>>0]=Xd(e,8)|0;j=j+1|0}while((j|0)!=16)}else if((g|0)==2)Zd(e,32);h=h+1|0}while((h|0)!=3);i=d;return}function Ed(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0;d=i;f=c[b+52>>2]|0;e=a+60|0;if((f|0)>0){if((c[e>>2]|0)==0?(f=le(f)|0,c[e>>2]=f,(f|0)==0):0){f=-12;i=d;return f|0}}else c[e>>2]=0;f=a+12|0;c[f>>2]=b;c[a+424>>2]=0;c[a+800>>2]=1;h=a+912|0;g=a+936|0;c[h+0>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[h+12>>2]=0;c[g>>2]=0;c[g+4>>2]=-2147483648;g=a+928|0;c[g>>2]=0;c[g+4>>2]=-2147483648;a=Ja[c[b+76>>2]&7](a)|0;if((a|0)>=0){h=0;i=d;return h|0}ie(e);c[f>>2]=0;h=a;i=d;return h|0}function Fd(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;if(!a){i=b;return 0}e=a+12|0;f=c[e>>2]|0;if((f|0)!=0?(d=c[f+92>>2]|0,(d|0)!=0):0)Ja[d&7](a)|0;c[a+796>>2]=0;ie(a+60|0);c[e>>2]=0;c[a+808>>2]=0;i=b;return 0}function Gd(a,b,d,e,f,g){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0;h=i;if((f|0)<=0){i=h;return 0}j=(e|0)==0;k=0;do{l=d+($(k,g)|0)|0;l=Oa[b&1](a,l)|0;if(!j)c[e+(k<<2)>>2]=l;k=k+1|0}while((k|0)!=(f|0));i=h;return 0}function Hd(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;if((f|0)<=0){i=g;return 0}h=(e|0)==0;j=0;do{k=Ma[b&1](a,d,j,0)|0;if(!h)c[e+(j<<2)>>2]=k;j=j+1|0}while((j|0)!=(f|0));i=g;return 0}function Id(b,f,g){b=b|0;f=f|0;g=g|0;var h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0;g=i;h=Ge(c[f+76>>2]|0)|0;b=h+4|0;if(!(a[b>>0]|0)){p=0;i=g;return p|0}k=f+64|0;l=h+5|0;m=f+68|0;n=h+6|0;j=0;while(1){p=($((((e[h+(j<<1)+8>>1]|0)>>>11&15)+8|0)>>>3,c[k>>2]|0)|0)+31&-32;if((j+ -1|0)>>>0<2){p=0-(0-p>>d[l>>0])|0;c[f+(j<<2)+32>>2]=p;o=0-(0-((c[m>>2]|0)+31&-32)>>d[n>>0])|0}else{c[f+(j<<2)+32>>2]=p;o=(c[m>>2]|0)+31&-32}o=re(($(p,o)|0)+32|0)|0;c[f+(j<<2)+304>>2]=o;if(!o){b=-1;f=8;break}c[f+(j<<2)>>2]=c[o+4>>2];j=j+1|0;if((j|0)>=(d[b>>0]|0)){b=0;f=8;break}}if((f|0)==8){i=g;return b|0}return 0}function Jd(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0;d=i;jf(a|0,0,976)|0;e=(b|0)!=0;if(e){c[a+8>>2]=c[b+8>>2];c[a+48>>2]=c[b+12>>2]}else c[a+8>>2]=-1;c[a+100>>2]=0;c[a+104>>2]=1;c[a+888>>2]=0;c[a+892>>2]=1;c[a+896>>2]=0;c[a+900>>2]=1;c[a+476>>2]=1;c[a+816>>2]=1;c[a+820>>2]=1;c[a+220>>2]=0;c[a+224>>2]=1;c[a+136>>2]=-1;c[a+416>>2]=-1;g=a+696|0;c[g>>2]=0;c[g+4>>2]=-2147483648;if((e?(f=c[b+52>>2]|0,(f|0)!=0):0)?(g=le(f)|0,c[a+60>>2]=g,(g|0)==0):0){g=-12;i=d;return g|0}g=0;i=d;return g|0}function Kd(a){a=a|0;var b=0,c=0;b=i;c=ee(976)|0;if(c){if((Jd(c,a)|0)<0){he(c);c=0}}else c=0;i=b;return c|0}function Ld(a,b,d,e){a=a|0;b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,j=0,k=0;f=i;i=i+80|0;g=f;k=g+0|0;j=e+0|0;h=k+80|0;do{c[k>>2]=c[j>>2];k=k+4|0;j=j+4|0}while((k|0)<(h|0));h=a+12|0;j=c[h>>2]|0;if(!j){k=-22;i=f;return k|0}if(c[j+8>>2]|0){k=-22;i=f;return k|0}c[d>>2]=0;j=c[a+124>>2]|0;k=c[a+128>>2]|0;if(!j){if(k){k=-22;i=f;return k|0}}else{if(!((j|0)>0&(k|0)>0)){k=-22;i=f;return k|0}if((j+128|0)>>>0>=(268435455/((k+128|0)>>>0)|0)>>>0){k=-22;i=f;return k|0}}De(b);h=c[h>>2]|0;if(((c[h+16>>2]&32|0)==0?(c[e+28>>2]|0)==0:0)?(c[a+808>>2]&1|0)==0:0){k=0;i=f;return k|0}g=Ma[c[h+88>>2]&1](a,b,d,g)|0;if(!(c[d>>2]|0)){De(b);k=g;i=f;return k|0}else{k=a+424|0;c[k>>2]=(c[k>>2]|0)+1;k=g;i=f;return k|0}return 0}function Md(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=i;if((a|0)>0&(b|0)>0?(a+128|0)>>>0<(268435455/((b+128|0)>>>0)|0)>>>0:0){d=0;i=c;return d|0}d=-22;i=c;return d|0}function Nd(a,b){a=a|0;b=b|0;return 0}function Od(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0;f=i;e=a+8|0;if(!(c[e>>2]|0)){g=c[a+116>>2]|0;h=a+120|0;j=c[h>>2]|0;if(!((g|0)>0&(j|0)>0)){l=-22;i=f;return l|0}if((g+128|0)>>>0>=(268435455/((j+128|0)>>>0)|0)>>>0){l=-22;i=f;return l|0}j=c[a+136>>2]|0;if((j|0)<0){l=-22;i=f;return l|0}k=b+64|0;l=b+68|0;if((c[k>>2]|0)>=1?(c[l>>2]|0)>=1:0)g=1;else{m=a+792|0;n=0-(0-(c[a+124>>2]|0)>>c[m>>2])|0;c[k>>2]=(g|0)>(n|0)?g:n;k=c[h>>2]|0;g=0-(0-(c[a+128>>2]|0)>>c[m>>2])|0;c[l>>2]=(k|0)>(g|0)?k:g;g=0}c[b+76>>2]=j}else g=1;d=Aa[c[a+476>>2]&1](a,b,d)|0;if(c[e>>2]|g){n=d;i=f;return n|0}c[b+64>>2]=c[a+116>>2];c[b+68>>2]=c[a+120>>2];n=d;i=f;return n|0}function Pd(a,b,d){a=a|0;b=b|0;d=d|0;var e=0;e=i;c[b+4>>2]=a;a=Od(a,c[b>>2]|0,d)|0;i=e;return a|0}function Qd(a,b){a=a|0;b=b|0;a=i;b=c[b>>2]|0;if(b)De(b);i=a;return}function Rd(a){a=a|0;return}function Sd(a,b,c){a=a|0;b=b|0;c=c|0;return}function Td(a,b,c){a=a|0;b=b|0;c=c|0;return}function Ud(a){a=a|0;var b=0,d=0;b=i;d=a+8|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+16|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+64|0;c[d>>2]=-1;c[d+4>>2]=-1;d=a+72|0;c[d>>2]=0;c[d+4>>2]=0;d=a+32|0;c[a>>2]=0;c[d+0>>2]=0;c[d+4>>2]=0;c[d+8>>2]=0;c[d+12>>2]=0;c[d+16>>2]=0;i=b;return}function Vd(a,b,e){a=a|0;b=b|0;e=e|0;var f=0,g=0,h=0;g=a+16|0;c[a+12>>2]=b;c[a+20>>2]=b+e;h=b+1|0;c[g>>2]=h;e=(d[b>>0]|0)<<18;c[a>>2]=e;f=b+2|0;c[g>>2]=f;e=(d[h>>0]|0)<<10|e;c[a>>2]=e;c[g>>2]=b+3;c[a>>2]=(d[f>>0]|0)<<2|e|2;c[a+4>>2]=510;return}function Wd(){var b=0,e=0,f=0,g=0,h=0,j=0;b=i;if(!(c[766]|0))e=0;else{i=b;return}while(1)if(e){g=(e&65280|0)==0;a[3072+e>>0]=(g?8:0)-(d[4872+(g?e:e>>>8)>>0]|0);e=e+1|0;if((e|0)==512){e=0;break}else continue}else{a[3072]=9;e=1;continue}while(1){f=e<<1;g=0;do{j=a[4416+(e<<2)+g>>0]|0;h=(g<<7)+f|0;a[(h|1)+3584>>0]=j;a[h+3584>>0]=j;g=g+1|0}while((g|0)!=4);j=(d[4672+e>>0]|0)<<1;a[f+4224>>0]=j;a[f+4225>>0]=j|1;if(e){h=(d[4736+e>>0]|0)<<1;j=128-f|0;a[j+4095>>0]=h;a[j+4094>>0]=h|1;e=e+1|0;if((e|0)==64)break;else continue}else{e=128-f|0;a[e+4095>>0]=1;a[e+4094>>0]=0;e=1;continue}}g=4352|0;f=4800|0;e=g+63|0;do{a[g>>0]=a[f>>0]|0;g=g+1|0;f=f+1|0}while((g|0)<(e|0));c[766]=1;i=b;return}function Xd(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0;e=i;f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;a=(c[a>>2]|0)+(h>>>3)|0;a=(lf(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7)>>>(32-b|0);b=h+b|0;c[f>>2]=g>>>0>b>>>0?b:g;i=e;return a|0}function Yd(a,b){a=a|0;b=b|0;var e=0,f=0;e=i;f=c[a+8>>2]|0;a=(c[a>>2]|0)+(f>>>3)|0;a=(lf(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(f&7)>>>(32-b|0);i=e;return a|0}function Zd(a,b){a=a|0;b=b|0;var d=0;d=a+8|0;a=c[a+16>>2]|0;b=(c[d>>2]|0)+b|0;c[d>>2]=a>>>0>b>>>0?b:a;return}function _d(a){a=a|0;var b=0,e=0,f=0;e=a+8|0;f=c[e>>2]|0;b=(d[(c[a>>2]|0)+(f>>>3)>>0]|0)<<(f&7)>>>7&1;c[e>>2]=((f|0)<(c[a+16>>2]|0)&1)+f;return b|0}function $d(a,b){a=a|0;b=b|0;var e=0,f=0,g=0,h=0,j=0,k=0;e=i;if(!b){j=0;i=e;return j|0}f=a+8|0;h=c[f>>2]|0;g=c[a+16>>2]|0;j=c[a>>2]|0;a=j+(h>>>3)|0;a=(lf(d[a>>0]|d[a+1>>0]<<8|d[a+2>>0]<<16|d[a+3>>0]<<24|0)|0)<<(h&7);if((b|0)<26){j=h+b|0;c[f>>2]=g>>>0>j>>>0?j:g;j=a>>>(32-b|0);i=e;return j|0}else{k=h+16|0;k=g>>>0>k>>>0?k:g;c[f>>2]=k;h=b+ -16|0;j=j+(k>>>3)|0;j=(lf(d[j>>0]|d[j+1>>0]<<8|d[j+2>>0]<<16|d[j+3>>0]<<24|0)|0)<<(k&7)>>>(48-b|0);b=k+h|0;c[f>>2]=g>>>0>b>>>0?b:g;j=j|a>>>16<<h;i=e;return j|0}return 0}function ae(a){a=a|0;var b=0,e=0,f=0,g=0,h=0,j=0;b=i;i=i+32|0;e=b;c[e+0>>2]=c[a+0>>2];c[e+4>>2]=c[a+4>>2];c[e+8>>2]=c[a+8>>2];c[e+12>>2]=c[a+12>>2];c[e+16>>2]=c[a+16>>2];e=$d(e,32)|0;f=e>>>0>65535;e=f?e>>>16:e;f=f?16:0;if(e&65280){f=f|8;e=e>>>8}j=31-f-(d[4872+e>>0]|0)|0;g=a+8|0;f=c[g>>2]|0;e=0-f|0;h=(c[a+16>>2]|0)-f|0;if((j|0)<(e|0)){h=e;h=h+f|0;c[g>>2]=h;j=j+1|0;j=$d(a,j)|0;j=j+ -1|0;i=b;return j|0}h=(h|0)<(j|0)?h:j;h=h+f|0;c[g>>2]=h;j=j+1|0;j=$d(a,j)|0;j=j+ -1|0;i=b;return j|0}function be(a){a=a|0;var b=0;b=i;a=ae(a)|0;if(!(a&1)){a=0-(a>>>1)|0;i=b;return a|0}else{a=(a+1|0)>>>1;i=b;return a|0}return 0}function ce(b,c,d,e,f,g,h,j,k,l){b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0;m=i;if((k|0)==0|(l|0)==0){i=m;return}if((j|0)<(l|0)){if((j|0)<=(0-g|0)){y=1-g|0;c=c+($(y-j|0,e)|0)|0;j=y}}else{y=l+ -1|0;c=c+($(y-j|0,e)|0)|0;j=y}if((h|0)<(k|0)){if((h|0)<=(0-f|0)){y=1-f|0;c=c+(y-h)|0;h=y}}else{y=k+ -1|0;c=c+(y-h)|0;h=y}s=(j|0)>0;v=s?0:0-j|0;r=(h|0)>0;n=r?0:0-h|0;t=l-j|0;t=(t|0)<(g|0)?t:g;o=k-h|0;k=(o|0)<(f|0);o=k?o:f;p=o-n|0;q=c+(n+($(v,e)|0))|0;y=b+n|0;if((v|0)>0){u=(s?j:0)-j|0;w=(r?h:0)+($(u,d)|0)-h|0;x=0;while(1){mf(y|0,q|0,p|0)|0;x=x+1|0;if((x|0)>=(v|0))break;else y=y+d|0}b=b+w|0}else{b=y;u=0}if((u|0)<(t|0)){l=j+ -1-l|0;y=~g;y=(l|0)>(y|0)?l:y;l=~y;t=$(~u-y|0,d)|0;j=(r?h:0)+($((s?j+ -1|0:-1)-j-u-y|0,e)|0)-h|0;h=b;while(1){mf(h|0,q|0,p|0)|0;u=u+1|0;if((u|0)==(l|0))break;else{h=h+d|0;q=q+e|0}}b=b+t|0;q=c+j|0;u=l}l=q+(0-e)|0;if((u|0)<(g|0)){e=$(g-u|0,d)|0;c=b;while(1){mf(c|0,l|0,p|0)|0;u=u+1|0;if((u|0)==(g|0))break;else c=c+d|0}b=b+e|0}if(!g){i=m;return}e=(n|0)>0;p=o+ -1|0;c=b+(0-(($(g,d)|0)+n))|0;while(1){g=g+ -1|0;if(e){q=c+n|0;l=0;do{a[c+l>>0]=a[q>>0]|0;l=l+1|0}while((l|0)<(n|0))}if(k){l=c+p|0;q=o;do{a[c+q>>0]=a[l>>0]|0;q=q+1|0}while((q|0)<(f|0))}if(!g)break;else c=c+d|0}i=m;return}function de(a,b){a=a|0;b=b|0;c[a>>2]=1;return}function ee(a){a=a|0;var b=0,d=0,e=0;b=i;d=c[1216]|0;if((d+ -32|0)>>>0>=a>>>0){e=$e(a)|0;if((e|0)==0&(a|0)==0)if((d|0)==32)e=0;else e=$e(1)|0}else e=0;i=b;return e|0}function fe(a,b){a=a|0;b=b|0;var d=0;d=i;if(((c[1216]|0)+ -32|0)>>>0<b>>>0){b=0;i=d;return b|0}b=bf(a,((b|0)==0&1)+b|0)|0;i=d;return b|0}function ge(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;f=$(d,b)|0;if((d|b)>>>0>65535&(d|0)!=0?((f>>>0)/(d>>>0)|0|0)!=(b|0):0){af(a);d=0;i=e;return d|0}if(((c[1216]|0)+ -32|0)>>>0<f>>>0)b=0;else b=bf(a,((f|0)==0&1)+f|0)|0;if((b|0)!=0|(f|0)==0){d=b;i=e;return d|0}af(a);d=0;i=e;return d|0}function he(a){a=a|0;var b=0;b=i;af(a);i=b;return}function ie(a){a=a|0;var b=0;b=i;af(c[a>>2]|0);c[a>>2]=0;i=b;return}function je(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if(((d|0)!=0?(2147483647/(d>>>0)|0)>>>0>b>>>0:0)?(f=$(d,b)|0,((c[1216]|0)+ -32|0)>>>0>=f>>>0):0)a=bf(a,((f|0)==0&1)+f|0)|0;else a=0;i=e;return a|0}function ke(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;f=i;e=ge(c[a>>2]|0,b,d)|0;c[a>>2]=e;i=f;return((e|0)!=0|(b|0)==0|(d|0)==0?0:-12)|0}function le(a){a=a|0;var b=0,c=0;c=i;b=ee(a)|0;if(b)jf(b|0,0,a|0)|0;i=c;return b|0}function me(a,b,d){a=a|0;b=b|0;d=d|0;var e=0,f=0;e=i;if((c[b>>2]|0)>>>0>d>>>0){i=e;return}f=((d*17|0)>>>4)+32|0;d=f>>>0>d>>>0?f:d;af(c[a>>2]|0);f=ee(d)|0;c[a>>2]=f;c[b>>2]=(f|0)==0?0:d;i=e;return}function ne(a,b){a=a|0;b=b|0;var c=0;c=i;if((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)b=ee($(b,a)|0)|0;else b=0;i=c;return b|0}function oe(a,b){a=a|0;b=b|0;var c=0,d=0,e=0;c=i;if(((b|0)!=0?(2147483647/(b>>>0)|0)>>>0>a>>>0:0)?(e=$(b,a)|0,d=ee(e)|0,(d|0)!=0):0)jf(d|0,0,e|0)|0;else d=0;i=c;return d|0}function pe(a,b,d,e,f){a=a|0;b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;g=i;i=i+16|0;h=g;j=le(24)|0;c[h>>2]=j;if(!j){f=0;i=g;return f|0}c[j>>2]=a;c[j+4>>2]=b;c[j+12>>2]=(d|0)!=0?d:7;c[j+16>>2]=e;c[j+8>>2]=1;if(f&1){f=(c[h>>2]|0)+20|0;c[f>>2]=c[f>>2]|1}j=le(12)|0;if(!j){ie(h);f=0;i=g;return f|0}else{c[j>>2]=c[h>>2];c[j+4>>2]=a;c[j+8>>2]=b;f=j;i=g;return f|0}return 0}function qe(a,b){a=a|0;b=b|0;a=i;he(b);i=a;return}function re(a){a=a|0;var b=0,d=0,e=0;b=i;i=i+16|0;d=b;e=ee(a)|0;c[d>>2]=e;if(e){a=pe(e,a,7,0,0)|0;if(!a){ie(d);a=0}}else a=0;i=b;return a|0}function se(a){a=a|0;var b=0,d=0;b=i;d=re(a)|0;if(!d){d=0;i=b;return d|0}jf(c[d+4>>2]|0,0,a|0)|0;i=b;return d|0}function te(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b;d=le(12)|0;if(!d){e=0;i=b;return e|0}c[d+0>>2]=c[a+0>>2];c[d+4>>2]=c[a+4>>2];c[d+8>>2]=c[a+8>>2];f=(c[a>>2]|0)+8|0;a=c[f>>2]|0;c[f>>2]=a+1;c[e>>2]=a+1;e=d;i=b;return e|0}function ue(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;e=b+4|0;d=b;if(!a){i=b;return}f=c[a>>2]|0;if(!f){i=b;return}f=c[f>>2]|0;c[d>>2]=f;ie(a);a=f+8|0;f=c[a>>2]|0;c[a>>2]=f+ -1;c[e>>2]=f+ -1;if(c[e>>2]|0){i=b;return}f=c[d>>2]|0;Fa[c[f+12>>2]&15](c[f+16>>2]|0,c[f>>2]|0);ie(d);i=b;return}function ve(a,b){a=a|0;b=b|0;var d=0,e=0;d=i;e=le(20)|0;if(!e){b=0;i=d;return b|0}c[e+12>>2]=a;c[e+16>>2]=(b|0)!=0?b:4;c[e+4>>2]=1;b=e;i=d;return b|0}function we(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;d=b;if(!a){i=b;return}e=c[a>>2]|0;if(!e){i=b;return}c[a>>2]=0;f=e+4|0;a=c[f>>2]|0;c[f>>2]=a+ -1;c[d>>2]=a+ -1;if(c[d>>2]|0){i=b;return}xe(e);i=b;return}function xe(a){a=a|0;var b=0,d=0,e=0,f=0;b=i;i=i+16|0;d=b+4|0;e=b;c[d>>2]=a;if(!(c[a>>2]|0)){ie(d);i=b;return}do{f=c[a>>2]|0;c[e>>2]=f;c[c[d>>2]>>2]=c[f+16>>2];f=c[e>>2]|0;Fa[c[f+8>>2]&15](c[f+4>>2]|0,c[f>>2]|0);ie(e);a=c[d>>2]|0}while((c[a>>2]|0)!=0);ie(d);i=b;return}function ye(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0,j=0,k=0;d=i;i=i+16|0;b=d+4|0;e=d;f=c[a>>2]|0;j=0;while(1){if((f|0)==(j|0)){f=4;break}g=c[a>>2]|0;if((g|0)==(f|0))c[a>>2]=0;if(!g){f=5;break}else{j=f;f=g}}if((f|0)==4)if(!j)f=5;a:do if((f|0)==5){g=a+4|0;f=a+8|0;b:do if((c[g>>2]|0)<=(c[f>>2]|0))c:while(1){c[b>>2]=c[g>>2];j=c[b>>2]|0;c[b>>2]=c[f>>2];if((j|0)>(c[b>>2]|0))break b;h=c[a>>2]|0;j=0;while(1){if((h|0)==(j|0))break;j=c[a>>2]|0;if((j|0)==(h|0))c[a>>2]=0;if(!j)continue c;else{k=h;h=j;j=k}}if(j)break a}while(0);j=Ja[c[a+16>>2]&7](c[a+12>>2]|0)|0;c[e>>2]=j;if(!j){k=0;i=d;return k|0}h=le(20)|0;if(!h){ue(e);k=0;i=d;return k|0}else{c[h>>2]=c[c[j>>2]>>2];c[h+4>>2]=c[(c[j>>2]|0)+16>>2];c[h+8>>2]=c[(c[j>>2]|0)+12>>2];c[h+12>>2]=a;c[(c[j>>2]|0)+16>>2]=h;c[(c[c[e>>2]>>2]|0)+12>>2]=8;k=c[g>>2]|0;c[g>>2]=k+1;c[b>>2]=k+1;k=c[f>>2]|0;c[f>>2]=k+1;c[b>>2]=k+1;k=c[e>>2]|0;i=d;return k|0}}while(0);e=j+16|0;ze(c[e>>2]|0);c[e>>2]=0;e=pe(c[j>>2]|0,c[a+12>>2]|0,8,j,0)|0;if(!e){ze(j);k=0;i=d;return k|0}else{j=a+4|0;k=c[j>>2]|0;c[j>>2]=k+1;c[b>>2]=k+1;k=e;i=d;return k|0}return 0}function ze(a){a=a|0;var b=0,d=0,e=0,f=0,g=0,h=0;d=i;if(!a){i=d;return}b=c[a+12>>2]|0;f=a+16|0;if(!(c[f>>2]|0))e=a;else do{e=c[f>>2]|0;f=e+16|0}while((c[f>>2]|0)!=0);f=c[b>>2]|0;if(!f)c[b>>2]=a;if(!f){i=d;return}do{f=c[b>>2]|0;g=0;while(1){if((f|0)==(g|0))break;g=c[b>>2]|0;if((g|0)==(f|0))c[b>>2]=0;if(!g){g=0;break}else{h=f;f=g;g=h}}f=e+16|0;c[f>>2]=g;if(c[f>>2]|0)do{e=c[f>>2]|0;f=e+16|0}while((c[f>>2]|0)!=0);f=c[b>>2]|0;if(!f)c[b>>2]=a}while((f|0)!=0);i=d;return}function Ae(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;b=i;i=i+16|0;e=b;d=c[a+12>>2]|0;ze(a);f=d+4|0;a=c[f>>2]|0;c[f>>2]=a+ -1;c[e>>2]=a+ -1;if(c[e>>2]|0){i=b;return}xe(d);i=b;return}function Be(){var a=0,b=0,d=0;a=i;b=le(400)|0;if(!b){b=0;i=a;return b|0}jf(b|0,0,400)|0;d=b+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=b+376|0;c[d>>2]=0;c[d+4>>2]=0;d=b+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=a;return b|0}function Ce(a){a=a|0;var b=0,d=0;b=i;if((a|0)!=0?(d=c[a>>2]|0,(d|0)!=0):0){De(d);ie(a)}i=b;return}function De(a){a=a|0;var b=0,d=0;b=i;ue(a+304|0);ue(a+308|0);ue(a+312|0);ue(a+316|0);ue(a+320|0);ue(a+324|0);ue(a+328|0);ue(a+332|0);jf(a|0,0,400)|0;d=a+136|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+144|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+128|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+360|0;c[d>>2]=0;c[d+4>>2]=-2147483648;d=a+376|0;c[d>>2]=0;c[d+4>>2]=0;d=a+368|0;c[d>>2]=-1;c[d+4>>2]=-1;c[a+392>>2]=-1;c[a+80>>2]=1;c[a+120>>2]=0;c[a+124>>2]=1;c[a+76>>2]=-1;c[a+344>>2]=2;c[a+348>>2]=2;c[a+352>>2]=2;c[a+340>>2]=0;c[a+356>>2]=0;i=b;return}function Ee(a,b){a=a|0;b=b|0;var d=0;d=i;mf(a|0,b|0,400)|0;jf(b|0,0,400)|0;a=b+136|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+144|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+128|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+360|0;c[a>>2]=0;c[a+4>>2]=-2147483648;a=b+376|0;c[a>>2]=0;c[a+4>>2]=0;a=b+368|0;c[a>>2]=-1;c[a+4>>2]=-1;c[b+392>>2]=-1;c[b+80>>2]=1;c[b+120>>2]=0;c[b+124>>2]=1;c[b+76>>2]=-1;c[b+344>>2]=2;c[b+348>>2]=2;c[b+352>>2]=2;c[b+340>>2]=0;c[b+356>>2]=0;i=d;return}function Fe(a,b){a=a|0;b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;c[a+76>>2]=c[b+76>>2];c[a+64>>2]=c[b+64>>2];c[a+68>>2]=c[b+68>>2];c[a+388>>2]=c[b+388>>2];j=b+296|0;h=c[j+4>>2]|0;f=a+296|0;c[f>>2]=c[j>>2];c[f+4>>2]=h;c[a+72>>2]=c[b+72>>2];f=c[b+304>>2]|0;if(!f)wa();else{e=f;g=0}while(1){if((e|0)!=0?(j=te(e)|0,c[a+(g<<2)+304>>2]=j,(j|0)==0):0){e=5;break}g=g+1|0;if(g>>>0>=8){e=8;break}e=c[b+(g<<2)+304>>2]|0}if((e|0)==5){De(a);j=-12;i=d;return j|0}else if((e|0)==8){c[a+0>>2]=c[b+0>>2];c[a+4>>2]=c[b+4>>2];c[a+8>>2]=c[b+8>>2];c[a+12>>2]=c[b+12>>2];c[a+16>>2]=c[b+16>>2];c[a+20>>2]=c[b+20>>2];c[a+24>>2]=c[b+24>>2];c[a+28>>2]=c[b+28>>2];j=a+32|0;h=b+32|0;c[j+0>>2]=c[h+0>>2];c[j+4>>2]=c[h+4>>2];c[j+8>>2]=c[h+8>>2];c[j+12>>2]=c[h+12>>2];c[j+16>>2]=c[h+16>>2];c[j+20>>2]=c[h+20>>2];c[j+24>>2]=c[h+24>>2];c[j+28>>2]=c[h+28>>2];j=0;i=d;return j|0}return 0}function Ge(a){a=a|0;var b=0,d=0,e=0,f=0;d=i;e=0;while(1){f=e+1|0;if((c[5128+(e*24|0)>>2]|0)==(a|0))break;if(f>>>0<4)e=f;else{e=0;b=5;break}}if((b|0)==5){i=d;return e|0}f=5132+(e*24|0)|0;i=d;return f|0}function He(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0;f=i;g=(c[b+24>>2]|0)==0?1:3;if((g|0)>(e|0)){b=c[b+8>>2]|0;c[d>>2]=c[b+(e<<2)+32>>2];b=c[b+(e<<2)>>2]|0;i=f;return b|0}if((a[b+29>>0]|0)!=0&(g|0)==(e|0)){b=c[b+12>>2]|0;c[d>>2]=c[b+32>>2];b=c[b>>2]|0;i=f;return b|0}else{c[d>>2]=0;b=0;i=f;return b|0}return 0}function Ie(d,e){d=d|0;e=e|0;var f=0,g=0,h=0;f=i;if(!(c[d+8>>2]|0)){h=-1;i=f;return h|0}c[e>>2]=c[d+16>>2];c[e+4>>2]=c[d+20>>2];a[e+8>>0]=c[d+24>>2];g=d+31|0;if(!(a[d+29>>0]|0))h=0;else h=(a[g>>0]|0)==0&1;a[e+9>>0]=h;a[e+12>>0]=a[d+33>>0]|0;a[e+13>>0]=a[g>>0]|0;a[e+14>>0]=a[d+32>>0]|0;a[e+10>>0]=c[d+36>>2];a[e+11>>0]=a[d+30>>0]|0;a[e+15>>0]=a[d+34>>0]|0;b[e+16>>1]=b[d+48>>1]|0;h=0;i=f;return h|0}function Je(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0.0,p=0,q=0.0,r=0.0,s=0.0,t=0.0,u=0,v=0,w=0.0,x=0.0,y=0.0;f=i;i=i+32|0;k=f+12|0;h=f;if(!(c[b+8>>2]|0)){u=-1;i=f;return u|0}g=b+68|0;do if(a[g>>0]|0){if(!(a[b+34>>0]|0)){u=-1;i=f;return u|0}if(!(a[b+41>>0]|0)){u=-1;i=f;return u|0}if((c[b+72>>2]|0)!=(e|0)){u=-1;i=f;return u|0}e=b+60|0;j=c[e>>2]|0;g=c[b+64>>2]|0;if((j|0)>=(g|0)){u=-1;i=f;return u|0}u=(c[b+56>>2]|0)+j|0;c[k>>2]=0;c[k+4>>2]=0;c[k+8>>2]=0;c[h>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;g=Te(b,k,h,u,g-j|0,0)|0;he(c[k>>2]|0);he(c[h>>2]|0);if((g|0)<0){u=-1;i=f;return u|0}else{c[e>>2]=(c[e>>2]|0)+g;h=b+24|0;break}}else{if(e>>>0>1){u=-1;i=f;return u|0}a[b+76>>0]=(e|0)==1&1;m=b+77|0;a[m>>0]=0;a[b+78>>0]=0;h=b+24|0;if(((c[h>>2]|0)+ -1|0)>>>0<2?(u=b+16|0,p=c[u>>2]|0,l=b+84|0,c[l>>2]=(p+1|0)/2|0,c[b+88>>2]=((c[b+20>>2]|0)+1|0)/2|0,c[b+124>>2]=ee(p)|0,c[b+128>>2]=ee(c[u>>2]|0)|0,c[b+196>>2]=ee((c[l>>2]<<1)+14|0)|0,(c[h>>2]|0)==1):0){k=0;do{c[b+(k<<2)+132>>2]=ee(c[l>>2]|0)|0;c[b+(k<<2)+164>>2]=ee(c[l>>2]|0)|0;k=k+1|0}while((k|0)!=8)}k=d[b+30>>0]|0;v=(a[m>>0]|0)!=0?16:8;l=b+36|0;u=c[l>>2]|0;m=a[b+32>>0]|0;n=m&255;p=30-v|0;o=+((1<<v)+ -1|0)*+(1<<p|0);q=o/+((1<<k)+ -1|0);m=m<<24>>24!=0;if(m){v=k+ -8|0;r=o/+(224<<v|0);o=o/+(219<<v|0)}else{r=q;o=q}if((u|0)==4){s=.0593;t=.2627;j=12}else if((u|0)==3){s=.0722;t=.2126;j=12}else if(!u){s=.114;t=.299;j=12}if((j|0)==12){y=1.0-t;c[b+220>>2]=va(+(r*y*2.0))|0;w=1.0-s;x=w-t;c[b+224>>2]=va(+(r*(s*2.0*w/x)))|0;c[b+228>>2]=va(+(r*(t*2.0*y/x)))|0;c[b+232>>2]=va(+(r*w*2.0))|0}j=va(+q)|0;c[b+208>>2]=j;c[b+200>>2]=p;p=1<<p+ -1;u=b+204|0;c[u>>2]=p;c[b+236>>2]=1<<k+ -1;if(m){v=va(+o)|0;c[b+212>>2]=v;v=$(v,-16<<k+ -8)|0;c[b+216>>2]=v+(c[u>>2]|0)}else{c[b+212>>2]=j;c[b+216>>2]=p}c[b+240>>2]=k;c[b+244>>2]=n;if(!(c[h>>2]|0))c[b+248>>2]=4;else c[b+248>>2]=c[6256+(c[l>>2]<<2)>>2];a[g>>0]=1;c[b+72>>2]=e}while(0);c[b+92>>2]=He(b,b+108|0,0)|0;if(!(c[h>>2]|0))e=1;else{c[b+96>>2]=He(b,b+112|0,1)|0;c[b+100>>2]=He(b,b+116|0,2)|0;e=3}if(!(a[b+29>>0]|0))c[b+104>>2]=0;else c[b+104>>2]=He(b,b+120|0,e)|0;c[b+80>>2]=0;v=0;i=f;return v|0}function Ke(b,d,f){b=b|0;d=d|0;f=f|0;var g=0,h=0;g=i;h=c[b+8>>2]|0;if((h|0)!=0?(a[b+34>>0]|0)!=0:0){h=h+128|0;h=rf(e[b+50>>1]|0,0,c[h>>2]|0,c[h+4>>2]|0)|0;c[d>>2]=h;d=e[b+52>>1]|0;c[f>>2]=d;i=g;return}c[d>>2]=0;d=1;c[f>>2]=d;i=g;return}function Le(b,e){b=b|0;e=e|0;var f=0,g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0;g=i;h=b+80|0;l=c[h>>2]|0;if(l>>>0>=(c[b+20>>2]|0)>>>0){u=-1;i=g;return u|0}f=c[b+16>>2]|0;m=(c[b+92>>2]|0)+($(c[b+108>>2]|0,l)|0)|0;j=b+76|0;if(!(a[j>>0]|0))k=(a[b+78>>0]|0)!=0?4:3;else k=4;n=c[b+24>>2]|0;if((n|0)==2){u=(c[b+96>>2]|0)+($(c[b+112>>2]|0,l)|0)|0;p=(c[b+100>>2]|0)+($(c[b+116>>2]|0,l)|0)|0;t=b+124|0;q=b+30|0;r=b+28|0;s=b+196|0;Ne(c[t>>2]|0,u,f,d[q>>0]|0,d[r>>0]|0,c[s>>2]|0);u=b+128|0;Ne(c[u>>2]|0,p,f,d[q>>0]|0,d[r>>0]|0,c[s>>2]|0);Qa[c[b+248>>2]&7](b+200|0,e,m,c[t>>2]|0,c[u>>2]|0,f,k)}else if(!n)Qa[c[b+248>>2]&7](b+200|0,e,m,0,0,f,k);else if((n|0)==1){if(!l){n=b+96|0;o=b+112|0;p=b+100|0;q=b+116|0;r=b+84|0;s=b+88|0;t=0;do{u=(t|0)>4?t+ -8|0:t;if((u|0)<0)u=0;else{v=c[s>>2]|0;u=(u|0)<(v|0)?u:v+ -1|0}w=(c[n>>2]|0)+($(c[o>>2]|0,u)|0)|0;v=(c[p>>2]|0)+($(c[q>>2]|0,u)|0)|0;mf(c[b+(t<<2)+132>>2]|0,w|0,c[r>>2]|0)|0;mf(c[b+(t<<2)+164>>2]|0,v|0,c[r>>2]|0)|0;t=t+1|0}while((t|0)!=8)}o=l>>1;q=(o|0)%8|0;w=l&1;n=b+124|0;t=b+196|0;u=b+30|0;v=b+28|0;Me(c[n>>2]|0,b+132|0,f,q,c[t>>2]|0,d[u>>0]|0,w,d[v>>0]|0);p=b+128|0;Me(c[p>>2]|0,b+164|0,f,q,c[t>>2]|0,d[u>>0]|0,w,d[v>>0]|0);if(w){u=(q+5|0)%8|0;t=o+5|0;v=c[b+88>>2]|0;v=(t|0)<(v|0)?t:v+ -1|0;t=(c[b+96>>2]|0)+($(v,c[b+112>>2]|0)|0)|0;v=(c[b+100>>2]|0)+($(c[b+116>>2]|0,v)|0)|0;w=b+84|0;mf(c[b+(u<<2)+132>>2]|0,t|0,c[w>>2]|0)|0;mf(c[b+(u<<2)+164>>2]|0,v|0,c[w>>2]|0)|0}Qa[c[b+248>>2]&7](b+200|0,e,m,c[n>>2]|0,c[p>>2]|0,f,k)}else if((n|0)==3){v=(c[b+96>>2]|0)+($(c[b+112>>2]|0,l)|0)|0;w=(c[b+100>>2]|0)+($(c[b+116>>2]|0,l)|0)|0;Qa[c[b+248>>2]&7](b+200|0,e,m,v,w,f,k)}else{w=-1;i=g;return w|0}a:do if(!(a[b+31>>0]|0)){if(a[j>>0]|0){if(!(a[b+29>>0]|0)){if((f|0)<=0)break;b=e+3|0;e=0;while(1){a[b>>0]=-1;e=e+1|0;if((e|0)==(f|0))break a;else b=b+4|0}}j=c[b+104>>2]|0;k=$(c[b+120>>2]|0,l)|0;o=e+3|0;if((c[b+240>>2]|0)==8){if((f|0)>0){l=0;while(1){a[o>>0]=a[j+(l+k)>>0]|0;l=l+1|0;if((l|0)==(f|0))break;else o=o+4|0}}}else{l=c[b+208>>2]|0;m=c[b+204>>2]|0;n=c[b+200>>2]|0;if((f|0)>0){p=0;while(1){a[o>>0]=($(d[j+(p+k)>>0]|0,l)|0)+m>>n;p=p+1|0;if((p|0)==(f|0))break;else o=o+4|0}}}if(a[b+33>>0]|0){if(!(c[1306]|0)){c[1306]=1;b=1;do{c[5232+(b<<2)>>2]=(((b|0)/2|0)+16711808|0)/(b|0)|0;b=b+1|0}while((b|0)!=256)}if((f|0)>0){b=0;while(1){k=a[e+3>>0]|0;if(!(k<<24>>24)){a[e>>0]=-1;a[e+1>>0]=-1;a[e+2>>0]=-1}else{j=c[5232+((k&255)<<2)>>2]|0;l=a[e>>0]|0;if((l&255)<(k&255))l=(($(l&255,j)|0)+32768|0)>>>16&255;else l=-1;a[e>>0]=l;l=e+1|0;m=a[l>>0]|0;if((m&255)<(k&255))m=(($(m&255,j)|0)+32768|0)>>>16&255;else m=-1;a[l>>0]=m;l=e+2|0;m=a[l>>0]|0;if((m&255)<(k&255))j=(($(m&255,j)|0)+32768|0)>>>16&255;else j=-1;a[l>>0]=j}b=b+1|0;if((b|0)==(f|0))break;else e=e+4|0}}}}}else{m=c[b+104>>2]|0;l=$(c[b+120>>2]|0,l)|0;b=c[b+240>>2]|0;q=1<<b+ -1;p=(f|0)>0;if(p){o=e;n=0;while(1){v=d[m+(n+l)>>0]|0;a[o>>0]=($(d[o>>0]|0,v)|0)+q>>b;w=o+1|0;a[w>>0]=($(d[w>>0]|0,v)|0)+q>>b;w=o+2|0;a[w>>0]=($(d[w>>0]|0,v)|0)+q>>b;n=n+1|0;if((n|0)==(f|0))break;else o=o+k|0}}if(!((a[j>>0]|0)==0|p^1)){e=e+3|0;b=0;while(1){a[e>>0]=-1;b=b+1|0;if((b|0)==(f|0))break;else e=e+4|0}}}while(0);c[h>>2]=(c[h>>2]|0)+1;w=0;i=g;return w|0}function Me(e,f,g,h,j,k,l,m){e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;m=m|0;var n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0;n=i;p=c[f+((h+5&7)<<2)>>2]|0;t=c[f+((h+6&7)<<2)>>2]|0;q=c[f+((h+7&7)<<2)>>2]|0;s=c[f+((h&7)<<2)>>2]|0;r=c[f+((h+1&7)<<2)>>2]|0;o=c[f+((h+2&7)<<2)>>2]|0;f=c[f+((h+3&7)<<2)>>2]|0;u=k+ -8|0;v=1<<u>>1;h=(g+1|0)/2|0;w=(g|0)>0;if(!l){if(w){l=0;do{y=$(d[t+l>>0]|0,-6)|0;z=$(d[r+l>>0]|0,-10)|0;b[j+(l+3<<1)>>1]=(d[p+l>>0]<<1)+v+y+((d[q+l>>0]|0)*18|0)+((d[s+l>>0]|0)*57|0)+z+(d[o+l>>0]<<2)-(d[f+l>>0]|0)>>u;l=l+1|0}while((l|0)<(h|0))}}else if(w){l=0;do{y=$(d[q+l>>0]|0,-10)|0;z=$(d[o+l>>0]|0,-6)|0;b[j+(l+3<<1)>>1]=v-(d[p+l>>0]|0)+(d[t+l>>0]<<2)+y+((d[s+l>>0]|0)*57|0)+((d[r+l>>0]|0)*18|0)+z+(d[f+l>>0]<<1)>>u;l=l+1|0}while((l|0)<(h|0))}o=j+6|0;z=b[o>>1]|0;b[j>>1]=z;q=j+2|0;b[q>>1]=z;p=j+4|0;b[p>>1]=z;z=b[j+(h+2<<1)>>1]|0;b[j+(h+3<<1)>>1]=z;b[j+(h+4<<1)>>1]=z;b[j+(h+5<<1)>>1]=z;b[j+(h+6<<1)>>1]=z;h=(1<<k)+ -1|0;if(!m){p=14-k|0;m=1<<p>>1;t=20-k|0;s=1<<t+ -1;if((g|0)>1){r=g+ -2|0;k=r>>>1;q=k<<1;f=e;while(1){u=(b[o>>1]|0)+m>>p;if((u|0)<0)u=0;else u=((u|0)>(h|0)?h:u)&255;a[f>>0]=u;z=$((b[o+4>>1]|0)+(b[o+ -2>>1]|0)|0,-11)|0;u=o;o=o+2|0;u=s-(b[u+ -6>>1]|0)-(b[u+8>>1]|0)+((b[u+6>>1]|0)+(b[u+ -4>>1]|0)<<2)+z+(((b[o>>1]|0)+(b[u>>1]|0)|0)*40|0)>>t;if((u|0)<0)u=0;else u=((u|0)>(h|0)?h:u)&255;a[f+1>>0]=u;g=g+ -2|0;if((g|0)<=1)break;else f=f+2|0}e=e+(q+2)|0;g=r-q|0;o=j+(k+4<<1)|0}if(!g){i=n;return}j=(b[o>>1]|0)+m>>p;if((j|0)<0)j=0;else j=((j|0)>(h|0)?h:j)&255;a[e>>0]=j;i=n;return}else{k=20-k|0;m=1<<k+ -1;l=b[j>>1]|0;v=b[q>>1]|0;u=b[p>>1]|0;f=b[o>>1]|0;s=b[j+8>>1]|0;t=b[j+10>>1]|0;if((g|0)>1){r=g+ -2|0;p=r>>>1;q=p<<1;x=e;while(1){w=b[o+6>>1]|0;y=f*57|0;z=(t<<2)+m+($(s,-10)|0)+y+(u*18|0)+($(v,-6)|0)+(l<<1)-w>>k;if((z|0)<0)z=0;else z=((z|0)>(h|0)?h:z)&255;a[x>>0]=z;l=($(t,-6)|0)+m+(s*18|0)+y+($(u,-10)|0)-l+(v<<2)+(w<<1)>>k;if((l|0)<0)l=0;else l=((l|0)>(h|0)?h:l)&255;a[x+1>>0]=l;g=g+ -2|0;if((g|0)<=1)break;else{B=t;A=s;y=f;z=u;l=v;t=w;x=x+2|0;o=o+2|0;s=B;f=A;u=y;v=z}}l=v;v=u;u=f;f=s;s=t;t=w;e=e+(q+2)|0;g=r-q|0;o=j+(p+4<<1)|0}if(!g){i=n;return}j=(t<<2)+m+($(s,-10)|0)+(f*57|0)+(u*18|0)+($(v,-6)|0)+(l<<1)-(b[o+6>>1]|0)>>k;if((j|0)<0)j=0;else j=((j|0)>(h|0)?h:j)&255;a[e>>0]=j;i=n;return}}function Ne(b,c,e,f,g,h){b=b|0;c=c|0;e=e|0;f=f|0;g=g|0;h=h|0;var j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;j=i;v=(e+1|0)/2|0;k=h+3|0;mf(k|0,c|0,v|0)|0;jf(h|0,a[c>>0]|0,3)|0;jf(h+(v+3)|0,a[c+(v+ -1)>>0]|0,4)|0;c=(1<<f)+ -1|0;if(!g){if((e|0)>1){f=e+ -2|0;g=f>>>1;l=g<<1;m=b;while(1){a[m>>0]=a[k>>0]|0;v=$((d[k+2>>0]|0)+(d[k+ -1>>0]|0)|0,-11)|0;n=k;k=k+1|0;n=32-(d[n+ -3>>0]|0)-(d[n+4>>0]|0)+((d[n+3>>0]|0)+(d[n+ -2>>0]|0)<<2)+v+(((d[k>>0]|0)+(d[n>>0]|0)|0)*40|0)>>6;if((n|0)<0)n=0;else n=((n|0)>(c|0)?c:n)&255;a[m+1>>0]=n;e=e+ -2|0;if((e|0)<=1)break;else m=m+2|0}b=b+(l+2)|0;e=f-l|0;k=h+(g+4)|0}if(!e){i=j;return}a[b>>0]=a[k>>0]|0;i=j;return}q=d[h>>0]|0;r=d[h+1>>0]|0;m=d[h+2>>0]|0;p=d[k>>0]|0;o=d[h+4>>0]|0;n=d[h+5>>0]|0;if((e|0)>1){f=e+ -2|0;g=f>>>1;l=g<<1;t=b;while(1){s=d[k+3>>0]|0;u=p*57|0;v=(n<<2)+32+($(o,-10)|0)+u+(m*18|0)+($(r,-6)|0)+(q<<1)-s>>6;if((v|0)<0)v=0;else v=((v|0)>(c|0)?c:v)&255;a[t>>0]=v;q=($(n,-6)|0)+32+(o*18|0)+u+($(m,-10)|0)-q+(r<<2)+(s<<1)>>6;if((q|0)<0)q=0;else q=((q|0)>(c|0)?c:q)&255;a[t+1>>0]=q;e=e+ -2|0;if((e|0)<=1)break;else{x=n;w=o;u=p;v=m;q=r;n=s;t=t+2|0;k=k+1|0;o=x;p=w;m=u;r=v}}q=r;r=m;m=p;p=o;o=n;n=s;b=b+(l+2)|0;e=f-l|0;k=h+(g+4)|0}if(!e){i=j;return}h=(n<<2)+32+($(o,-10)|0)+(p*57|0)+(m*18|0)+($(r,-6)|0)+(q<<1)-(d[k+3>>0]|0)>>6;if((h|0)<0)h=0;else h=((h|0)>(c|0)?c:h)&255;a[b>>0]=h;i=j;return}function Oe(){var a=0,b=0;a=i;b=le(252)|0;if(!b)b=0;i=a;return b|0}function Pe(d,e,f){d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=0,O=0,P=0,Q=0,R=0;g=i;i=i+80|0;h=g+64|0;j=g+52|0;u=g+48|0;s=g+44|0;r=g+40|0;w=g+36|0;n=g;y=a[d+40>>0]|0;a:do if(((((((f|0)>=6?(a[e>>0]|0)==66:0)?(a[e+1>>0]|0)==80:0)?(a[e+2>>0]|0)==71:0)?(a[e+3>>0]|0)==-5:0)?(R=a[e+4>>0]|0,P=R&255,z=P>>>5,c[n+8>>2]=z,(R&255)<=191):0)?(R=(P&15)+8|0,a[n+13>>0]=R,(R&255)>>>0<=14):0){H=a[e+5>>0]|0;v=H&255;I=v>>>4;c[n+24>>2]=I;A=v&8;N=v>>>2&1;a[n+16>>0]=v>>>1&1;p=n+17|0;a[p>>0]=v&1;v=n+18|0;b[v>>1]=0;q=n+20|0;b[q>>1]=0;t=n+22|0;b[t>>1]=0;Q=n+12|0;a[Q>>0]=0;R=n+14|0;a[R>>0]=0;O=n+15|0;a[O>>0]=0;if(!(P&16))if(!N)N=0;else{a[Q>>0]=1;a[R>>0]=1;N=1}else{a[Q>>0]=1;a[O>>0]=N;N=0}if((((((((((H&255)<=79?(z|0)!=0|(I|0)==0:0)?!(N<<24>>24!=0&(z|0)==0):0)?(G=We(n,e+6|0,f+ -6|0)|0,(G|0)>=0):0)?(F=(c[n>>2]|0)>>>0>1073741823?-1:G,(F|0)>=0):0)?(L=F+6|0,J=n+4|0,E=We(J,e+L|0,f-L|0)|0,(E|0)>=0):0)?(B=c[J>>2]|0,K=B>>>0>1073741823?-1:E,(K|0)>=0):0)?(C=K+L|0,!((c[n>>2]|0)==0|(B|0)==0)):0)?(o=n+28|0,D=We(o,e+C|0,f-C|0)|0,(D|0)>=0):0)?(M=(c[o>>2]|0)>>>0>1073741823?-1:D,(M|0)>=0):0){z=M+C|0;c[h>>2]=0;do if(!A){c[n+32>>2]=0;x=48}else{A=We(h,e+z|0,f-z|0)|0;if((A|0)<0){z=-1;break a}C=c[h>>2]|0;A=C>>>0>1073741823?-1:A;if((A|0)<0){z=-1;break a}B=A+z|0;A=n+32|0;c[A>>2]=0;z=B+C|0;if((z|0)>(f|0)){z=-1;break a}y=y<<24>>24!=0;if(!y?(a[p>>0]|0)==0:0)break;if((B|0)<(z|0))x=A;else{z=B;x=48;break}while(1){A=We(j,e+B|0,z-B|0)|0;if((A|0)<0){z=-1;break a}B=A+B|0;C=We(u,e+B|0,z-B|0)|0;if((C|0)<0){z=-1;break a}A=c[u>>2]|0;C=A>>>0>1073741823?-1:C;if((C|0)<0){z=-1;break a}C=C+B|0;B=C+A|0;if(B>>>0>z>>>0){z=-1;break a}do if(a[p>>0]|0){if((c[j>>2]|0)!=5)break;E=We(s,e+C|0,z-C|0)|0;if((E|0)<0){z=-1;break a}D=c[s>>2]|0;E=D>>>0>1073741823?-1:E;if((E|0)<0){z=-1;break a}F=E+C|0;G=We(r,e+F|0,z-F|0)|0;if((G|0)<0){z=-1;break a}E=c[r>>2]|0;G=E>>>0>1073741823?-1:G;if((G|0)<0){z=-1;break a}R=G+F|0;if((We(w,e+R|0,z-R|0)|0)<0){z=-1;break a}F=c[w>>2]|0;if(!((E&65535|0)==(E|0)&((F>>>0>1073741823|(E|0)==0|(F|0)==0)^1))){z=-1;break a}if((F&65535|0)!=(F|0)){z=-1;break a}if((D&65535|0)!=(D|0)){z=-1;break a}b[v>>1]=D;b[q>>1]=E;b[t>>1]=F}while(0);if(y){P=ee(16)|0;c[P>>2]=c[j>>2];Q=P+4|0;c[Q>>2]=A;R=P+12|0;c[R>>2]=0;c[x>>2]=P;x=ee(A)|0;c[P+8>>2]=x;mf(x|0,e+C|0,c[Q>>2]|0)|0;x=R}if((B|0)>=(z|0)){z=B;x=48;break}}}while(0);do if((x|0)==48){if(!(a[p>>0]|0))break;if(!(b[q>>1]|0)){z=-1;break a}}while(0);if(c[o>>2]|0)break;c[o>>2]=f-z}else z=-1}else z=-1;while(0);if((z|0)<0){R=z;i=g;return R|0}q=c[n>>2]|0;r=c[n+4>>2]|0;u=c[n+12>>2]|0;v=u&255;w=c[n+24>>2]|0;s=(u&65535)>>>8;x=s&255;s=s&65535;o=d+16|0;c[o>>2]=q;p=d+20|0;c[p>>2]=r;t=c[n+8>>2]|0;A=d+24|0;c[A>>2]=t;y=u>>>24&255;u=u>>>16&255;if((t|0)==5){c[A>>2]=2;a[d+28>>0]=0;t=2}else if((t|0)==4){c[A>>2]=1;a[d+28>>0]=0;t=1}else{c[A>>2]=t;a[d+28>>0]=1}a[d+29>>0]=v;a[d+33>>0]=y;a[d+31>>0]=u;u=c[n+16>>2]|0;a[d+32>>0]=u;c[d+36>>2]=w;a[d+30>>0]=x;w=d+34|0;a[w>>0]=(u&65535)>>>8;b[d+48>>1]=u>>>16;u=c[n+20>>2]|0;b[d+50>>1]=u;b[d+52>>1]=u>>>16;u=d+44|0;c[u>>2]=c[n+32>>2];do if(((c[n+28>>2]|0)+z|0)>>>0<=f>>>0){x=e+z|0;n=f-z|0;c[h>>2]=0;c[h+4>>2]=0;c[h+8>>2]=0;c[j>>2]=0;c[j+4>>2]=0;c[j+8>>2]=0;if(!(v<<24>>24))y=n;else{v=Se(h,d+12|0,d+4|0,x,n,q,r,0,s)|0;if((v|0)<0)break;x=e+(v+z)|0;y=n-v|0}v=d+8|0;q=Se(j,v,d,x,y,q,r,t,s)|0;if(((q|0)>=0?(l=y-q|0,k=Te(d,h,j,x+q|0,l,1)|0,he(c[h>>2]|0),he(c[j>>2]|0),(k|0)>=0):0)?(m=k+(n-l)|0,(m|0)>=0):0){h=m+z|0;a[d+41>>0]=1;if(!(a[w>>0]|0))Qe(d);else{j=f-h|0;f=ee(j)|0;c[d+56>>2]=f;if(!f)break;mf(f|0,e+h|0,j|0)|0;c[d+64>>2]=j;c[d+60>>2]=0}R=c[v>>2]|0;if((c[R+64>>2]|0)>=(c[o>>2]|0)?(c[R+68>>2]|0)>=(c[p>>2]|0):0){c[d+80>>2]=-1;R=0;i=g;return R|0}}}while(0);Ce(d+8|0);Ce(d+12|0);c[u>>2]=0;R=-1;i=g;return R|0}function Qe(a){a=a|0;var b=0,d=0,e=0;b=i;d=a+4|0;e=c[d>>2]|0;if(e){Fd(e)|0;he(c[d>>2]|0);c[d>>2]=0}d=c[a>>2]|0;if(!d){i=b;return}Fd(d)|0;he(c[a>>2]|0);c[a>>2]=0;i=b;return}function Re(a){a=a|0;var b=0,d=0;b=i;he(c[a+124>>2]|0);he(c[a+128>>2]|0);d=0;do{he(c[a+(d<<2)+132>>2]|0);he(c[a+(d<<2)+164>>2]|0);d=d+1|0}while((d|0)!=8);he(c[a+196>>2]|0);he(c[a+56>>2]|0);Qe(a);Ce(a+8|0);Ce(a+12|0);he(a);i=b;return}function Se(b,d,e,f,g,h,j,k,l){b=b|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;l=l|0;var m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0;n=i;i=i+16|0;q=n+4|0;m=n;p=We(q,f,g)|0;if((p|0)<0){t=-1;i=n;return t|0}r=c[q>>2]|0;t=r>>>0>1073741823?-1:p;if((t|0)<0){t=-1;i=n;return t|0}s=g-t|0;if(r>>>0>s>>>0){t=-1;i=n;return t|0}q=r+10|0;p=ee(q)|0;a[p>>0]=k;a[p+1>>0]=h>>>24;a[p+2>>0]=h>>>16;a[p+3>>0]=h>>>8;a[p+4>>0]=h;a[p+5>>0]=j>>>24;a[p+6>>0]=j>>>16;a[p+7>>0]=j>>>8;a[p+8>>0]=j;a[p+9>>0]=l+248;mf(p+10|0,f+t|0,r|0)|0;l=s-r|0;k=ee(10-r+(q<<1)+l|0)|0;a[k>>0]=0;a[k+1>>0]=0;a[k+2>>0]=0;a[k+3>>0]=1;a[k+4>>0]=96;a[k+5>>0]=1;if((q|0)>0){r=0;f=6;do{j=r+1|0;h=a[p+r>>0]|0;if((j|0)<(q|0)&h<<24>>24==0)if(!(a[p+j>>0]|0)){a[k+f>>0]=0;a[k+(f+1)>>0]=0;a[k+(f+2)>>0]=3;r=r+2|0;f=f+3|0}else{h=0;o=8}else o=8;if((o|0)==8){o=0;a[k+f>>0]=h;r=j;f=f+1|0}}while((r|0)<(q|0));if(!f){f=0;o=12}else o=11}else{f=6;o=11}if((o|0)==11)if(!(a[k+(f+ -1)>>0]|0))o=12;if((o|0)==12){a[k+f>>0]=-128;f=f+1|0}he(p);o=g-l|0;if((o|0)<0){t=-1;i=n;return t|0}g=b+8|0;if((Ue(b,(c[g>>2]|0)+f|0)|0)<0){he(k);t=-1;i=n;return t|0}mf((c[b>>2]|0)+(c[g>>2]|0)|0,k|0,f|0)|0;c[g>>2]=(c[g>>2]|0)+f;he(k);b=Kd(1488)|0;if(!b){t=-1;i=n;return t|0}g=Be()|0;c[m>>2]=g;if(!g){t=-1;i=n;return t|0}t=b+688|0;c[t>>2]=c[t>>2]|1;if((Ed(b,1488,0)|0)<0){Ce(m);t=-1;i=n;return t|0}else{c[e>>2]=b;c[d>>2]=g;t=o;i=n;return t|0}return 0}function Te(b,e,f,g,h,j){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;var k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0;k=i;i=i+16|0;p=k;l=b+4|0;n=(c[l>>2]|0)!=0;c[p>>2]=0;o=p+4|0;c[o>>2]=0;a:do if((h|0)>0){q=p+((n&1)<<2)|0;t=0;x=0;r=h;w=(j|0)!=0;b:while(1){if((r|0)<((w?5:2)|0)){m=48;break}if(w)v=0;else v=(a[g+2>>0]|0)==0?4:3;if((r|0)<(v+3|0)){m=48;break}s=g+v|0;j=d[s>>0]|0;u=j<<5&32|(d[g+(v+1)>>0]|0)>>>3;j=j>>>1&63;do if((j+ -32|0)>>>0<4|(j|0)==39|j>>>0>40)if(t)if(!(c[q>>2]|0))j=x;else break a;else{j=x;t=0}else if((j>>>0<10|(j+ -16|0)>>>0<6?(m=v+2|0,(m|0)<(r|0)):0)?(a[g+m>>0]|0)<0:0){if((x|0)!=0?(c[q>>2]|0)!=0:0)break a;if(n&(u|0)==1){c[o>>2]=1;j=x;t=x;break}else{c[p>>2]=1;j=1;t=1;break}}else j=x;while(0);do if(!w){if(((((r|0)>3?(a[g>>0]|0)==0:0)?(a[g+1>>0]|0)==0:0)?(a[g+2>>0]|0)==0:0)?(a[g+3>>0]|0)==1:0){w=4;break}if((r|0)<=2){m=48;break b}if(a[g>>0]|0){m=48;break b}if(a[g+1>>0]|0){m=48;break b}if((a[g+2>>0]|0)==1)w=3;else{m=48;break b}}else w=0;while(0);x=w+2|0;if((x|0)>(r|0)){m=48;break}c:do if((x|0)<(r|0))while(1){z=(a[g+w>>0]|0)==0;if((z?(a[g+(w+1)>>0]|0)==0:0)?(a[g+x>>0]|0)==1:0)break c;A=x;x=w+3|0;if((x|0)>=(r|0)){w=r;break c}y=w+1|0;if(!z){w=y;continue}if(a[g+y>>0]|0){w=y;continue}if(a[g+A>>0]|0){w=y;continue}z=(a[g+x>>0]|0)==1;if(z){w=z?w:r;break}else w=y}else w=r;while(0);if((w|0)<0){m=48;break}v=w-v|0;x=v+3|0;u=n&(u|0)==1;z=u?e:f;y=z+8|0;if((Ue(z,(c[y>>2]|0)+x|0)|0)<0){m=48;break}A=c[z>>2]|0;z=c[y>>2]|0;a[A+z>>0]=0;a[A+(z+1)>>0]=0;a[A+(z+2)>>0]=1;mf(A+(z+3)|0,s|0,v|0)|0;if(u){A=A+(z+4)|0;a[A>>0]=d[A>>0]&7}c[y>>2]=(c[y>>2]|0)+x;r=r-w|0;if((r|0)>0){x=j;g=g+w|0;w=0}else break a}if((m|0)==48){i=k;return-1}}else r=h;while(0);if(c[l>>2]|0){m=e+8|0;if((Ue(e,(c[m>>2]|0)+32|0)|0)<0){i=k;return-1}if((Ve(c[l>>2]|0,c[b+12>>2]|0,c[e>>2]|0,c[m>>2]|0)|0)<0){i=k;return-1}}l=f+8|0;if((Ue(f,(c[l>>2]|0)+32|0)|0)<0){i=k;return-1}else{A=(Ve(c[b>>2]|0,c[b+8>>2]|0,c[f>>2]|0,c[l>>2]|0)|0)<0;i=k;return(A?-1:h-r|0)|0}return 0}function Ue(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;e=a+4|0;f=c[e>>2]|0;if((f|0)<(b|0)){f=(f*3|0)/2|0;f=(f|0)<(b|0)?b:f;b=fe(c[a>>2]|0,f)|0;if(!b)a=-1;else{c[a>>2]=b;c[e>>2]=f;a=0}}else a=0;i=d;return a|0}function Ve(b,d,e,f){b=b|0;d=d|0;e=e|0;f=f|0;var g=0,h=0,j=0;j=i;i=i+96|0;h=j;g=j+80|0;Ud(h);c[h+24>>2]=e;c[h+28>>2]=f;e=e+f+0|0;f=e+32|0;do{a[e>>0]=0;e=e+1|0}while((e|0)<(f|0));e=(Ld(b,d,g,h)|0)<0;i=j;return(e|(c[g>>2]|0)==0)<<31>>31|0}function We(b,e,f){b=b|0;e=e|0;f=f|0;var g=0,h=0,j=0,k=0;g=i;a:do if((f|0)>=1){j=a[e>>0]|0;h=j&255;if(j<<24>>24>-1){c[b>>2]=h;b=1;break}if(j<<24>>24!=-128){j=e+1|0;h=h&127;while(1){if((f|0)<2){b=-1;break a}k=j;j=j+1|0;k=d[k>>0]|0;h=k&127|h<<7;if(!(k&128))break;else f=f+ -1|0}c[b>>2]=h;b=j-e|0}else b=-1}else b=-1;while(0);i=g;return b|0}function Xe(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0;h=i;if((c[b+40>>2]|0)==8?(c[b+44>>2]|0)==0:0){if((j|0)>0)g=0;else{i=h;return}while(1){n=a[f+g>>0]|0;a[e>>0]=n;a[e+1>>0]=n;a[e+2>>0]=n;g=g+1|0;if((g|0)==(j|0))break;else e=e+k|0}i=h;return}l=c[b+12>>2]|0;g=c[b+16>>2]|0;b=c[b>>2]|0;if((j|0)>0)m=0;else{i=h;return}while(1){n=($(d[f+m>>0]|0,l)|0)+g>>b;if((n|0)<0)n=0;else n=(n|0)>255?-1:n&255;a[e>>0]=n;a[e+1>>0]=n;a[e+2>>0]=n;m=m+1|0;if((m|0)==(j|0))break;else e=e+k|0}i=h;return}function Ye(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0;q=i;s=c[b+20>>2]|0;n=c[b+24>>2]|0;o=c[b+28>>2]|0;l=c[b+32>>2]|0;p=c[b+12>>2]|0;r=c[b+16>>2]|0;m=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)t=0;else{i=q;return}while(1){v=$(d[f+t>>0]|0,p)|0;u=(d[g+t>>0]|0)-b|0;w=(d[h+t>>0]|0)-b|0;v=v+r|0;x=v+($(w,s)|0)>>m;if((x|0)<0)x=0;else x=(x|0)>255?-1:x&255;a[e>>0]=x;w=v-($(u,n)|0)-($(w,o)|0)>>m;if((w|0)<0)w=0;else w=(w|0)>255?-1:w&255;a[e+1>>0]=w;u=v+($(u,l)|0)>>m;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[e+2>>0]=u;t=t+1|0;if((t|0)==(j|0))break;else e=e+k|0}i=q;return}function Ze(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0;l=i;if((c[b+40>>2]|0)==8?(c[b+44>>2]|0)==0:0){if((j|0)>0)m=0;else{i=l;return}while(1){a[e>>0]=a[h+m>>0]|0;a[e+1>>0]=a[f+m>>0]|0;a[e+2>>0]=a[g+m>>0]|0;m=m+1|0;if((m|0)==(j|0))break;else e=e+k|0}i=l;return}n=c[b+12>>2]|0;m=c[b+16>>2]|0;b=c[b>>2]|0;if((j|0)>0)o=0;else{i=l;return}while(1){p=($(d[h+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e>>0]=p;p=($(d[f+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e+1>>0]=p;p=($(d[g+o>>0]|0,n)|0)+m>>b;if((p|0)<0)p=0;else p=(p|0)>255?-1:p&255;a[e+2>>0]=p;o=o+1|0;if((o|0)==(j|0))break;else e=e+k|0}i=l;return}function _e(b,e,f,g,h,j,k){b=b|0;e=e|0;f=f|0;g=g|0;h=h|0;j=j|0;k=k|0;var l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0;o=i;l=c[b+12>>2]|0;m=c[b+16>>2]|0;n=c[b>>2]|0;b=c[b+36>>2]|0;if((j|0)>0)p=0;else{i=o;return}while(1){t=d[f+p>>0]|0;s=(d[g+p>>0]|0)-b|0;r=(d[h+p>>0]|0)-b|0;q=t-s|0;u=($(q+r|0,l)|0)+m>>n;if((u|0)<0)u=0;else u=(u|0)>255?-1:u&255;a[e>>0]=u;s=($(s+t|0,l)|0)+m>>n;if((s|0)<0)s=0;else s=(s|0)>255?-1:s&255;a[e+1>>0]=s;q=($(q-r|0,l)|0)+m>>n;if((q|0)<0)q=0;else q=(q|0)>255?-1:q&255;a[e+2>>0]=q;p=p+1|0;if((p|0)==(j|0))break;else e=e+k|0}i=o;return}function $e(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0,k=0;d=i;if((b|0)==0|b>>>0>2147483583){k=0;i=d;return k|0}do if(!(c[1571]|0)){e=ua(64)|0;if((e|0)==(-1|0)){k=0;i=d;return k|0}else{c[1572]=ua(0)|0;c[1571]=6280;c[1570]=6280;c[1575]=6296;c[1574]=6296;k=e+16|0;a[e+15>>0]=-86;j=c[1575]|0;c[1575]=k;c[k>>2]=6296;c[e+20>>2]=j;c[j>>2]=k;j=e+24|0;k=c[1571]|0;c[1571]=j;c[j>>2]=6280;c[e+28>>2]=k;c[k>>2]=j;break}}while(0);e=b+40&-32;h=c[1572]|0;g=c[1570]|0;k=6284|0;while(1){f=c[k>>2]|0;b=f+ -8|0;k=c[f+ -4>>2]|0;if((k|0)==6296)j=h;else j=k;j=j-b|0;if(e>>>0<j>>>0){h=12;break}if((f|0)==(g|0)){h=10;break}k=f+4|0;if((e|0)==(j|0)){h=15;break}}do if((h|0)==10)if((ua(e+32-j|0)|0)==(-1|0)){k=0;i=d;return k|0}else{c[1572]=ua(0)|0;k=c[g+ -4>>2]|0;f=g;h=12;break}else if((h|0)==15){j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}while(0);if((h|0)==12){h=b+e|0;c[f+ -4>>2]=h;c[h>>2]=b;c[b+(e|4)>>2]=k;c[k>>2]=h;h=b+(e|8)|0;k=f+4|0;j=c[k>>2]|0;c[k>>2]=h;c[h>>2]=f;c[b+(e|12)>>2]=j;c[j>>2]=h;a[b+(e+ -1)>>0]=-86;j=c[f>>2]|0;k=c[k>>2]|0;c[j+4>>2]=k;c[k>>2]=j}a[b+ -1>>0]=85;k=f;i=d;return k|0}function af(b){b=b|0;var d=0,e=0,f=0,g=0,h=0,j=0;d=i;if(!b){i=d;return}g=b+ -8|0;e=c[1571]|0;c[1571]=b;c[b>>2]=6280;f=b+4|0;c[f>>2]=e;c[e>>2]=b;a[b+ -9>>0]=-86;e=c[g>>2]|0;if((e|0)!=6296?(a[e+ -1>>0]|0)==-86:0){g=c[b+ -4>>2]|0;c[e+4>>2]=g;c[g>>2]=e;b=c[b>>2]|0;g=c[f>>2]|0;c[b+4>>2]=g;c[g>>2]=b}else e=g;b=c[e+4>>2]|0;if((b|0)==6296){i=d;return}if((a[b+ -1>>0]|0)!=-86){i=d;return}g=c[b>>2]|0;h=c[b+4>>2]|0;c[g+4>>2]=h;c[h>>2]=g;h=e+8|0;g=c[h>>2]|0;j=e+12|0;f=c[j>>2]|0;c[g+4>>2]=f;c[f>>2]=g;f=b+8|0;g=b+12|0;e=c[g>>2]|0;c[g>>2]=h;c[h>>2]=f;c[j>>2]=e;c[e>>2]=h;f=c[f>>2]|0;g=c[g>>2]|0;c[f+4>>2]=g;c[g>>2]=f;i=d;return}function bf(a,b){a=a|0;b=b|0;var d=0,e=0,f=0;d=i;do if(a){if(!b){af(a);e=0;break}e=$e(b)|0;if(!e)e=0;else{f=(c[a+ -4>>2]|0)-a+ -1|0;mf(e|0,a|0,(f>>>0>b>>>0?b:f)|0)|0;af(a)}}else e=$e(b)|0;while(0);i=d;return e|0}function cf(b,c,d){b=b|0;c=c|0;d=d|0;var e=0,f=0,g=0,h=0;f=i;if(!d){h=0;i=f;return h|0}while(1){g=a[b>>0]|0;h=a[c>>0]|0;if(g<<24>>24!=h<<24>>24)break;d=d+ -1|0;if(!d){b=0;e=5;break}else{b=b+1|0;c=c+1|0}}if((e|0)==5){i=f;return b|0}h=(g&255)-(h&255)|0;i=f;return h|0}function df(){}function ef(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=(b|0)<0?-1:0;return b>>c-32|0}function ff(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;b=b-d-(c>>>0>a>>>0|0)>>>0;return(D=b,a-c>>>0|0)|0}function gf(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;c=a+c>>>0;return(D=b+d+(c>>>0<a>>>0|0)>>>0,c|0)|0}function hf(b){b=b|0;var c=0;c=b;while(a[c>>0]|0)c=c+1|0;return c-b|0}function jf(b,d,e){b=b|0;d=d|0;e=e|0;var f=0,g=0,h=0,i=0;f=b+e|0;if((e|0)>=20){d=d&255;i=b&3;h=d|d<<8|d<<16|d<<24;g=f&~3;if(i){i=b+4-i|0;while((b|0)<(i|0)){a[b>>0]=d;b=b+1|0}}while((b|0)<(g|0)){c[b>>2]=h;b=b+4|0}}while((b|0)<(f|0)){a[b>>0]=d;b=b+1|0}return b-e|0}function kf(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b<<c|(a&(1<<c)-1<<32-c)>>>32-c;return a<<c}D=a<<c-32;return 0}function lf(a){a=a|0;return(a&255)<<24|(a>>8&255)<<16|(a>>16&255)<<8|a>>>24|0}function mf(b,d,e){b=b|0;d=d|0;e=e|0;var f=0;if((e|0)>=4096)return xa(b|0,d|0,e|0)|0;f=b|0;if((b&3)==(d&3)){while(b&3){if(!e)return f|0;a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}while((e|0)>=4){c[b>>2]=c[d>>2];b=b+4|0;d=d+4|0;e=e-4|0}}while((e|0)>0){a[b>>0]=a[d>>0]|0;b=b+1|0;d=d+1|0;e=e-1|0}return f|0}function nf(a,b,c){a=a|0;b=b|0;c=c|0;if((c|0)<32){D=b>>>c;return a>>>c|(b&(1<<c)-1)<<32-c}D=0;return b>>>c-32|0}function of(b){b=b|0;var c=0;c=a[n+(b>>>24)>>0]|0;if((c|0)<8)return c|0;c=a[n+(b>>16&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[n+(b>>8&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[n+(b&255)>>0]|0)+24|0}function pf(b){b=b|0;var c=0;c=a[m+(b&255)>>0]|0;if((c|0)<8)return c|0;c=a[m+(b>>8&255)>>0]|0;if((c|0)<8)return c+8|0;c=a[m+(b>>16&255)>>0]|0;if((c|0)<8)return c+16|0;return(a[m+(b>>>24)>>0]|0)+24|0}function qf(a,b){a=a|0;b=b|0;var c=0,d=0,e=0,f=0;f=a&65535;d=b&65535;c=$(d,f)|0;e=a>>>16;d=(c>>>16)+($(d,e)|0)|0;b=b>>>16;a=$(b,f)|0;return(D=(d>>>16)+($(b,e)|0)+(((d&65535)+a|0)>>>16)|0,d+a<<16|c&65535|0)|0}function rf(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;var e=0,f=0;e=a;f=c;a=qf(e,f)|0;c=D;return(D=($(b,f)|0)+($(d,e)|0)+c|c&0,a|0|0)|0}function sf(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;return Aa[a&1](b|0,c|0,d|0)|0}function tf(a,b,c,d,e,f,g,h,i,j,k,l){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;Ba[a&7](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0,l|0)}function uf(a,b,c,d,e,f,g,h,i,j,k){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;Ca[a&1](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0)}function vf(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;Da[a&3](b|0,c|0,d|0,e|0,f|0)}function wf(a,b){a=a|0;b=b|0;Ea[a&7](b|0)}function xf(a,b,c){a=a|0;b=b|0;c=c|0;Fa[a&15](b|0,c|0)}function yf(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;return Ga[a&1](b|0,c|0,d|0,e|0,f|0,g|0)|0}function zf(a,b,c,d,e,f,g,h,i,j){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;Ha[a&1](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0)}function Af(a,b,c,d,e,f,g,h,i,j,k,l,m){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;m=m|0;Ia[a&3](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0,j|0,k|0,l|0,m|0)}function Bf(a,b){a=a|0;b=b|0;return Ja[a&7](b|0)|0}function Cf(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;Ka[a&7](b|0,c|0,d|0)}function Df(a,b,c,d,e,f,g,h,i){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;La[a&7](b|0,c|0,d|0,e|0,f|0,g|0,h|0,i|0)}function Ef(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;return Ma[a&1](b|0,c|0,d|0,e|0)|0}function Ff(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;Na[a&3](b|0,c|0,d|0,e|0,f|0,g|0)}function Gf(a,b,c){a=a|0;b=b|0;c=c|0;return Oa[a&1](b|0,c|0)|0}function Hf(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;return Pa[a&1](b|0,c|0,d|0,e|0,f|0)|0}function If(a,b,c,d,e,f,g,h){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;Qa[a&7](b|0,c|0,d|0,e|0,f|0,g|0,h|0)}function Jf(a,b,c){a=a|0;b=b|0;c=c|0;aa(0);return 0}function Kf(a,b,c,d,e,f,g,h,i,j,k){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;aa(1)}function Lf(a,b,c,d,e,f,g,h,i,j){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;aa(2)}function Mf(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;aa(3)}function Nf(a){a=a|0;aa(4)}function Of(a,b){a=a|0;b=b|0;aa(5)}function Pf(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(6);return 0}function Qf(a,b,c,d,e,f,g,h,i){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;aa(7)}function Rf(a,b,c,d,e,f,g,h,i,j,k,l){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0;l=l|0;aa(8)}function Sf(a){a=a|0;aa(9);return 0}function Tf(a,b,c){a=a|0;b=b|0;c=c|0;aa(10)}function Uf(a,b,c,d,e,f,g,h){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;aa(11)}function Vf(a,b,c,d){a=a|0;b=b|0;c=c|0;d=d|0;aa(12);return 0}function Wf(a,b,c,d,e,f){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;aa(13)}function Xf(a,b){a=a|0;b=b|0;aa(14);return 0}function Yf(a,b,c,d,e){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;aa(15);return 0}function Zf(a,b,c,d,e,f,g){a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;aa(16)}
+
+
+
+
+// EMSCRIPTEN_END_FUNCS
+var Aa=[Jf,Id];var Ba=[Kf,Yc,Zc,_c,$c,dd,ed,fd];var Ca=[Lf,ce];var Da=[Mf,ld,md,Mf];var Ea=[Nf,ac,Lc,Qc,Rc,Sc,Tc,Nf];var Fa=[Of,Jc,Mc,Nc,Oc,Pc,Bd,qe,Ae,Of,Of,Of,Of,Of,Of,Of];var Ga=[Pf,Gd];var Ha=[Qf,gd];var Ia=[Rf,hd,id,Rf];var Ja=[Sf,Zb,$b,se,re,Sf,Sf,Sf];var Ka=[Tf,Fc,Gc,Hc,Ic,Kc,Tf,Tf];var La=[Uf,Uc,Vc,Wc,Xc,ad,bd,cd];var Ma=[Vf,_b];var Na=[Wf,Ec,jd,kd];var Oa=[Xf,dc];var Pa=[Yf,Hd];var Qa=[Zf,Ye,Ze,_e,Xe,Zf,Zf,Zf];return{_i64Subtract:ff,_free:af,_bpg_decoder_decode:Pe,_bpg_decoder_start:Je,_realloc:bf,_i64Add:gf,_bpg_decoder_open:Oe,_bitshift64Ashr:ef,_strlen:hf,_bpg_decoder_get_info:Ie,_memset:jf,_malloc:$e,_memcpy:mf,_bpg_decoder_get_line:Le,_bpg_decoder_close:Re,_bpg_decoder_get_frame_duration:Ke,_llvm_bswap_i32:lf,_bitshift64Shl:kf,runPostSets:df,stackAlloc:Ra,stackSave:Sa,stackRestore:Ta,setThrew:Ua,setTempRet0:Xa,getTempRet0:Ya,dynCall_iiii:sf,dynCall_viiiiiiiiiii:tf,dynCall_viiiiiiiiii:uf,dynCall_viiiii:vf,dynCall_vi:wf,dynCall_vii:xf,dynCall_iiiiiii:yf,dynCall_viiiiiiiii:zf,dynCall_viiiiiiiiiiii:Af,dynCall_ii:Bf,dynCall_viii:Cf,dynCall_viiiiiiii:Df,dynCall_iiiii:Ef,dynCall_viiiiii:Ff,dynCall_iii:Gf,dynCall_iiiiii:Hf,dynCall_viiiiiii:If}})
+
+
+// EMSCRIPTEN_END_ASM
+(Module.asmGlobalArg,Module.asmLibraryArg,buffer);var _i64Subtract=Module["_i64Subtract"]=asm["_i64Subtract"];var _free=Module["_free"]=asm["_free"];var _bpg_decoder_decode=Module["_bpg_decoder_decode"]=asm["_bpg_decoder_decode"];var _bpg_decoder_start=Module["_bpg_decoder_start"]=asm["_bpg_decoder_start"];var _realloc=Module["_realloc"]=asm["_realloc"];var _i64Add=Module["_i64Add"]=asm["_i64Add"];var _bpg_decoder_open=Module["_bpg_decoder_open"]=asm["_bpg_decoder_open"];var _bitshift64Ashr=Module["_bitshift64Ashr"]=asm["_bitshift64Ashr"];var _strlen=Module["_strlen"]=asm["_strlen"];var _bpg_decoder_get_info=Module["_bpg_decoder_get_info"]=asm["_bpg_decoder_get_info"];var _memset=Module["_memset"]=asm["_memset"];var _malloc=Module["_malloc"]=asm["_malloc"];var _memcpy=Module["_memcpy"]=asm["_memcpy"];var _bpg_decoder_get_line=Module["_bpg_decoder_get_line"]=asm["_bpg_decoder_get_line"];var _bpg_decoder_close=Module["_bpg_decoder_close"]=asm["_bpg_decoder_close"];var _bpg_decoder_get_frame_duration=Module["_bpg_decoder_get_frame_duration"]=asm["_bpg_decoder_get_frame_duration"];var _llvm_bswap_i32=Module["_llvm_bswap_i32"]=asm["_llvm_bswap_i32"];var _bitshift64Shl=Module["_bitshift64Shl"]=asm["_bitshift64Shl"];var runPostSets=Module["runPostSets"]=asm["runPostSets"];var dynCall_iiii=Module["dynCall_iiii"]=asm["dynCall_iiii"];var dynCall_viiiiiiiiiii=Module["dynCall_viiiiiiiiiii"]=asm["dynCall_viiiiiiiiiii"];var dynCall_viiiiiiiiii=Module["dynCall_viiiiiiiiii"]=asm["dynCall_viiiiiiiiii"];var dynCall_viiiii=Module["dynCall_viiiii"]=asm["dynCall_viiiii"];var dynCall_vi=Module["dynCall_vi"]=asm["dynCall_vi"];var dynCall_vii=Module["dynCall_vii"]=asm["dynCall_vii"];var dynCall_iiiiiii=Module["dynCall_iiiiiii"]=asm["dynCall_iiiiiii"];var dynCall_viiiiiiiii=Module["dynCall_viiiiiiiii"]=asm["dynCall_viiiiiiiii"];var dynCall_viiiiiiiiiiii=Module["dynCall_viiiiiiiiiiii"]=asm["dynCall_viiiiiiiiiiii"];var dynCall_ii=Module["dynCall_ii"]=asm["dynCall_ii"];var dynCall_viii=Module["dynCall_viii"]=asm["dynCall_viii"];var dynCall_viiiiiiii=Module["dynCall_viiiiiiii"]=asm["dynCall_viiiiiiii"];var dynCall_iiiii=Module["dynCall_iiiii"]=asm["dynCall_iiiii"];var dynCall_viiiiii=Module["dynCall_viiiiii"]=asm["dynCall_viiiiii"];var dynCall_iii=Module["dynCall_iii"]=asm["dynCall_iii"];var dynCall_iiiiii=Module["dynCall_iiiiii"]=asm["dynCall_iiiiii"];var dynCall_viiiiiii=Module["dynCall_viiiiiii"]=asm["dynCall_viiiiiii"];Runtime.stackAlloc=asm["stackAlloc"];Runtime.stackSave=asm["stackSave"];Runtime.stackRestore=asm["stackRestore"];Runtime.setTempRet0=asm["setTempRet0"];Runtime.getTempRet0=asm["getTempRet0"];var i64Math=null;if(memoryInitializer){if(typeof Module["locateFile"]==="function"){memoryInitializer=Module["locateFile"](memoryInitializer)}else if(Module["memoryInitializerPrefixURL"]){memoryInitializer=Module["memoryInitializerPrefixURL"]+memoryInitializer}if(ENVIRONMENT_IS_NODE||ENVIRONMENT_IS_SHELL){var data=Module["readBinary"](memoryInitializer);HEAPU8.set(data,STATIC_BASE)}else{addRunDependency("memory initializer");Browser.asyncLoad(memoryInitializer,(function(data){HEAPU8.set(data,STATIC_BASE);removeRunDependency("memory initializer")}),(function(data){throw"could not load memory initializer "+memoryInitializer}))}}function ExitStatus(status){this.name="ExitStatus";this.message="Program terminated with exit("+status+")";this.status=status}ExitStatus.prototype=new Error;ExitStatus.prototype.constructor=ExitStatus;var initialStackTop;var preloadStartTime=null;var calledMain=false;dependenciesFulfilled=function runCaller(){if(!Module["calledRun"]&&shouldRunNow)run();if(!Module["calledRun"])dependenciesFulfilled=runCaller};function run(args){args=args||Module["arguments"];if(preloadStartTime===null)preloadStartTime=Date.now();if(runDependencies>0){return}preRun();if(runDependencies>0)return;if(Module["calledRun"])return;function doRun(){if(Module["calledRun"])return;Module["calledRun"]=true;if(ABORT)return;ensureInitRuntime();preMain();if(ENVIRONMENT_IS_WEB&&preloadStartTime!==null){Module.printErr("pre-main prep time: "+(Date.now()-preloadStartTime)+" ms")}postRun()}if(Module["setStatus"]){Module["setStatus"]("Running...");setTimeout((function(){setTimeout((function(){Module["setStatus"]("")}),1);doRun()}),1)}else{doRun()}}Module["run"]=Module.run=run;function exit(status){if(Module["noExitRuntime"]){return}ABORT=true;EXITSTATUS=status;STACKTOP=initialStackTop;exitRuntime();if(ENVIRONMENT_IS_NODE){process["stdout"]["once"]("drain",(function(){process["exit"](status)}));console.log(" ");setTimeout((function(){process["exit"](status)}),500)}else if(ENVIRONMENT_IS_SHELL&&typeof quit==="function"){quit(status)}throw new ExitStatus(status)}Module["exit"]=Module.exit=exit;function abort(text){if(text){Module.print(text);Module.printErr(text)}ABORT=true;EXITSTATUS=1;var extra="\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.";throw"abort() at "+stackTrace()+extra}Module["abort"]=Module.abort=abort;if(Module["preInit"]){if(typeof Module["preInit"]=="function")Module["preInit"]=[Module["preInit"]];while(Module["preInit"].length>0){Module["preInit"].pop()()}}var shouldRunNow=true;if(Module["noInitialRun"]){shouldRunNow=false}run();window["BPGDecoder"]=(function(ctx){this.ctx=ctx;this["imageData"]=null;this["onload"]=null;this["frames"]=null;this["loop_count"]=0});window["BPGDecoder"].prototype={malloc:Module["cwrap"]("malloc","number",["number"]),free:Module["cwrap"]("free","void",["number"]),bpg_decoder_open:Module["cwrap"]("bpg_decoder_open","number",[]),bpg_decoder_decode:Module["cwrap"]("bpg_decoder_decode","number",["number","array","number"]),bpg_decoder_get_info:Module["cwrap"]("bpg_decoder_get_info","number",["number","number"]),bpg_decoder_start:Module["cwrap"]("bpg_decoder_start","number",["number","number"]),bpg_decoder_get_frame_duration:Module["cwrap"]("bpg_decoder_get_frame_duration","void",["number","number","number"]),bpg_decoder_get_line:Module["cwrap"]("bpg_decoder_get_line","number",["number","number"]),bpg_decoder_close:Module["cwrap"]("bpg_decoder_close","void",["number"]),load:(function(url){var request=new XMLHttpRequest;var this1=this;request.open("get",url,true);request.responseType="arraybuffer";request.onload=(function(event){this1._onload(request,event)});request.send()}),_onload:(function(request,event){var data=request.response;var array=new Uint8Array(data);var img,w,h,img_info_buf,cimg,p0,rgba_line,w4,frame_count;var heap8,heap16,heap32,dst,v,i,y,func,duration,frames,loop_count;img=this.bpg_decoder_open();if(this.bpg_decoder_decode(img,array,array.length)<0){console.log("could not decode image");return}img_info_buf=this.malloc(5*4);this.bpg_decoder_get_info(img,img_info_buf);heap8=Module["HEAPU8"];heap16=Module["HEAPU16"];heap32=Module["HEAPU32"];w=heap32[img_info_buf>>2];h=heap32[img_info_buf+4>>2];loop_count=heap16[img_info_buf+16>>1];w4=w*4;rgba_line=this.malloc(w4);frame_count=0;frames=[];for(;;){if(this.bpg_decoder_start(img,1)<0)break;this.bpg_decoder_get_frame_duration(img,img_info_buf,img_info_buf+4);duration=heap32[img_info_buf>>2]*1e3/heap32[img_info_buf+4>>2];cimg=this.ctx.createImageData(w,h);dst=cimg.data;p0=0;for(y=0;y<h;y++){this.bpg_decoder_get_line(img,rgba_line);for(i=0;i<w4;i=i+1|0){dst[p0]=heap8[rgba_line+i|0]|0;p0=p0+1|0}}frames[frame_count++]={"img":cimg,"duration":duration}}this.free(rgba_line);this.free(img_info_buf);this.bpg_decoder_close(img);this["loop_count"]=loop_count;this["frames"]=frames;this["imageData"]=frames[0]["img"];if(this["onload"])this["onload"]()})};window.onload=(function(){var i,n,el,tab,tab1,url,dec,canvas,id,style,ctx,dw,dh;tab=document.images;n=tab.length;tab1=[];for(i=0;i<n;i++){el=tab[i];url=el.src;if(url.substr(-4,4).toLowerCase()==".bpg"){tab1[tab1.length]=el}}n=tab1.length;for(i=0;i<n;i++){el=tab1[i];url=el.src;canvas=document.createElement("canvas");if(el.id)canvas.id=el.id;if(el.className)canvas.className=el.className;dw=el.getAttribute("width")|0;if(dw){canvas.style.width=dw+"px"}dh=el.getAttribute("height")|0;if(dh){canvas.style.height=dh+"px"}el.parentNode.replaceChild(canvas,el);ctx=canvas.getContext("2d");dec=new BPGDecoder(ctx);dec.onload=(function(canvas,ctx){var dec=this;var frames=this["frames"];var imageData=frames[0]["img"];function next_frame(){var frame_index=dec.frame_index;if(++frame_index>=frames.length){if(dec["loop_count"]==0||dec.loop_counter<dec["loop_count"]){frame_index=0;dec.loop_counter++}else{frame_index=-1}}if(frame_index>=0){dec.frame_index=frame_index;ctx.putImageData(frames[frame_index]["img"],0,0);setTimeout(next_frame,frames[frame_index]["duration"])}}canvas.width=imageData.width;canvas.height=imageData.height;ctx.putImageData(imageData,0,0);if(frames.length>1){dec.frame_index=0;dec.loop_counter=0;setTimeout(next_frame,frames[0]["duration"])}}).bind(dec,canvas,ctx);dec.load(url)}})}))()
+
+
+
+
Binary file html/clock.bpg has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/html/index.html	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,38 @@
+<html>
+<head>
+<meta charset="UTF-8"> 
+<!-- The following scripts are available (sorted by increasing size):
+     bpgdec8.js  : 8 bit only, no animation
+     bpgdec.js   : up to 14 bits, no animation
+     bpgdec8a.js : 8 bit only, animations
+-->
+<script type="text/javascript" src="bpgdec8a.js"></script>
+</head>
+<body>
+<h1>BPG Decoding Demo</h1>
+IMG tag:<br>
+<img src="lena512color.bpg">
+<p>
+IMG tag with animated image:<br>
+<img src="clock.bpg">
+<p>
+Dynamic loading in a canvas:<br>
+<canvas id="mycanvas" width="512" height="512"></canvas>
+<script>
+(function ()
+{
+    var img, canvas, ctx;
+
+    canvas = document.getElementById("mycanvas");
+    ctx = canvas.getContext("2d");
+
+    img = new BPGDecoder(ctx);
+    img.onload = function() {
+        /* draw the image to the canvas */
+        ctx.putImageData(this.imageData, 0, 0);
+    };
+    img.load("lena512color.bpg");
+})();
+</script>
+</body>
+</html>
Binary file html/lena512color.bpg has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/Makefile	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,54 @@
+CC=gcc
+CFLAGS=-m64 -Os -Wall -MMD -fno-asynchronous-unwind-tables -g -Wno-sign-compare -Wno-unused-but-set-variable
+LDFLAGS= -m64 -g
+CFLAGS+=-D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE
+PWD:=$(shell pwd)
+CFLAGS+=-I$(PWD)
+
+CXX=g++
+CXXFLAGS=$(CFLAGS)
+
+PROGS=TAppEncoder jsenc
+
+
+all: $(PROGS)
+
+ENC_OBJS=$(addprefix TLibEncoder/, SyntaxElementWriter.o TEncSbac.o \
+TEncBinCoderCABACCounter.o TEncGOP.o\
+TEncSampleAdaptiveOffset.o TEncBinCoderCABAC.o TEncAnalyze.o\
+TEncEntropy.o TEncTop.o SEIwrite.o TEncPic.o TEncRateCtrl.o\
+WeightPredAnalysis.o TEncSlice.o TEncCu.o NALwrite.o TEncCavlc.o\
+TEncSearch.o TEncPreanalyzer.o)
+ENC_OBJS+=TLibVideoIO/TVideoIOYuv.o
+ENC_OBJS+=$(addprefix TLibCommon/, TComWeightPrediction.o TComLoopFilter.o\
+TComBitStream.o TComMotionInfo.o TComSlice.o ContextModel3DBuffer.o\
+TComPic.o TComRdCostWeightPrediction.o TComTU.o TComPicSym.o\
+TComPicYuv.o TComYuv.o TComTrQuant.o TComInterpolationFilter.o\
+ContextModel.o TComSampleAdaptiveOffset.o SEI.o TComPrediction.o\
+TComDataCU.o TComChromaFormat.o Debug.o TComRom.o\
+TComPicYuvMD5.o TComRdCost.o TComPattern.o TComCABACTables.o)
+ENC_OBJS+=libmd5/libmd5.o
+ENC_OBJS+=TAppEncCfg.o TAppEncTop.o program_options_lite.o 
+
+TAppEncoder: $(ENC_OBJS) encmain.o
+	$(CXX) -o $@ $(LDFLAGS) $^ -lm
+
+jsenc: jsenc.o jctvc_glue.o $(ENC_OBJS)
+	$(CXX) -o $@ $(LDFLAGS) $^ -lpng -lm
+
+%.o: %.c
+	$(CC) $(CFLAGS) -c -o $@ $<
+
+%.o: %.cpp
+	$(CXX) $(CXXFLAGS) -c -o $@ $<
+
+clean:
+	rm -f *.o *.d *~ \
+	TLibEncoder/*.o TLibEncoder/*.d TLibEncoder/*~ \
+	TLibVideoIO/*.o TLibVideoIO/*.d TLibVideoIO/*~ \
+        TLibCommon/*.o TLibCommon/*.d TLibCommon/*~
+
+-include $(wildcard *.d)
+-include $(wildcard TLibEncoder/*.d)
+-include $(wildcard TLibVideoIO/*.d)
+-include $(wildcard TLibCommon/*.d)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TAppEncCfg.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2405 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TAppEncCfg.cpp
+    \brief    Handle encoder configuration parameters
+*/
+
+#include <stdlib.h>
+#include <cassert>
+#include <cstring>
+#include <string>
+#include <limits>
+#include "TLibCommon/TComRom.h"
+#include "TAppEncCfg.h"
+#include "program_options_lite.h"
+#include "TLibEncoder/TEncRateCtrl.h"
+#ifdef WIN32
+#define strdup _strdup
+#endif
+
+#define MACRO_TO_STRING_HELPER(val) #val
+#define MACRO_TO_STRING(val) MACRO_TO_STRING_HELPER(val)
+
+using namespace std;
+namespace po = df::program_options_lite;
+
+
+
+enum ExtendedProfileName // this is used for determining profile strings, where multiple profiles map to a single profile idc with various constraint flag combinations
+{
+  NONE = 0,
+  MAIN = 1,
+  MAIN10 = 2,
+  MAINSTILLPICTURE = 3,
+  MAINREXT = 4,
+  HIGHTHROUGHPUTREXT = 5, // Placeholder profile for development
+  // The following are RExt profiles, which would map to the MAINREXT profile idc.
+  // The enumeration indicates the bit-depth constraint in the bottom 2 digits
+  //                           the chroma format in the next digit
+  //                           the intra constraint in the top digit
+  MONOCHROME_8      = 1008,
+  MONOCHROME_12     = 1012,
+  MONOCHROME_16     = 1016,
+  MAIN_12           = 1112,
+  MAIN_422_10       = 1210,
+  MAIN_422_12       = 1212,
+  MAIN_444          = 1308,
+  MAIN_444_10       = 1310,
+  MAIN_444_12       = 1312,
+  MAIN_444_16       = 1316, // non-standard profile definition, used for development purposes
+  MAIN_INTRA        = 2108,
+  MAIN_10_INTRA     = 2110,
+  MAIN_12_INTRA     = 2112,
+  MAIN_422_10_INTRA = 2210,
+  MAIN_422_12_INTRA = 2212,
+  MAIN_444_INTRA    = 2308,
+  MAIN_444_10_INTRA = 2310,
+  MAIN_444_12_INTRA = 2312,
+  MAIN_444_16_INTRA = 2316
+};
+
+
+//! \ingroup TAppEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / initialization / destroy
+// ====================================================================================================================
+
+TAppEncCfg::TAppEncCfg()
+: m_pchInputFile()
+, m_pchBitstreamFile()
+, m_pchReconFile()
+, m_inputColourSpaceConvert(IPCOLOURSPACE_UNCHANGED)
+, m_snrInternalColourSpace(false)
+, m_outputInternalColourSpace(false)
+, m_pchdQPFile()
+, m_scalingListFile()
+{
+  m_aidQP = NULL;
+  m_startOfCodedInterval = NULL;
+  m_codedPivotValue = NULL;
+  m_targetPivotValue = NULL;
+}
+
+TAppEncCfg::~TAppEncCfg()
+{
+  if ( m_aidQP )
+  {
+    delete[] m_aidQP;
+  }
+  if ( m_startOfCodedInterval )
+  {
+    delete[] m_startOfCodedInterval;
+    m_startOfCodedInterval = NULL;
+  }
+   if ( m_codedPivotValue )
+  {
+    delete[] m_codedPivotValue;
+    m_codedPivotValue = NULL;
+  }
+  if ( m_targetPivotValue )
+  {
+    delete[] m_targetPivotValue;
+    m_targetPivotValue = NULL;
+  }
+
+  free(m_pchInputFile);
+  free(m_pchBitstreamFile);
+  free(m_pchReconFile);
+  free(m_pchdQPFile);
+  free(m_scalingListFile);
+}
+
+Void TAppEncCfg::create()
+{
+}
+
+Void TAppEncCfg::destroy()
+{
+}
+
+std::istringstream &operator>>(std::istringstream &in, GOPEntry &entry)     //input
+{
+  in>>entry.m_sliceType;
+  in>>entry.m_POC;
+  in>>entry.m_QPOffset;
+  in>>entry.m_QPFactor;
+  in>>entry.m_tcOffsetDiv2;
+  in>>entry.m_betaOffsetDiv2;
+  in>>entry.m_temporalId;
+  in>>entry.m_numRefPicsActive;
+  in>>entry.m_numRefPics;
+  for ( Int i = 0; i < entry.m_numRefPics; i++ )
+  {
+    in>>entry.m_referencePics[i];
+  }
+  in>>entry.m_interRPSPrediction;
+#if AUTO_INTER_RPS
+  if (entry.m_interRPSPrediction==1)
+  {
+    in>>entry.m_deltaRPS;
+    in>>entry.m_numRefIdc;
+    for ( Int i = 0; i < entry.m_numRefIdc; i++ )
+    {
+      in>>entry.m_refIdc[i];
+    }
+  }
+  else if (entry.m_interRPSPrediction==2)
+  {
+    in>>entry.m_deltaRPS;
+  }
+#else
+  if (entry.m_interRPSPrediction)
+  {
+    in>>entry.m_deltaRPS;
+    in>>entry.m_numRefIdc;
+    for ( Int i = 0; i < entry.m_numRefIdc; i++ )
+    {
+      in>>entry.m_refIdc[i];
+    }
+  }
+#endif
+  return in;
+}
+
+Bool confirmPara(Bool bflag, const Char* message);
+
+static inline ChromaFormat numberToChromaFormat(const Int val)
+{
+  switch (val)
+  {
+    case 400: return CHROMA_400; break;
+    case 420: return CHROMA_420; break;
+    case 422: return CHROMA_422; break;
+    case 444: return CHROMA_444; break;
+    default:  return NUM_CHROMA_FORMAT;
+  }
+}
+
+static const struct MapStrToProfile
+{
+  const Char* str;
+  Profile::Name value;
+}
+strToProfile[] =
+{
+  {"none",                 Profile::NONE               },
+  {"main",                 Profile::MAIN               },
+  {"main10",               Profile::MAIN10             },
+  {"main-still-picture",   Profile::MAINSTILLPICTURE   },
+  {"main-RExt",            Profile::MAINREXT           },
+  {"high-throughput-RExt", Profile::HIGHTHROUGHPUTREXT }
+};
+
+static const struct MapStrToExtendedProfile
+{
+  const Char* str;
+  ExtendedProfileName value;
+}
+strToExtendedProfile[] =
+{
+    {"none",               NONE             },
+    {"main",               MAIN             },
+    {"main10",             MAIN10           },
+    {"main-still-picture", MAINSTILLPICTURE },
+    {"main-RExt",          MAINREXT         },
+    {"high-throughput-RExt", HIGHTHROUGHPUTREXT },
+    {"monochrome",         MONOCHROME_8     },
+    {"monochrome12",       MONOCHROME_12    },
+    {"monochrome16",       MONOCHROME_16    },
+    {"main12",             MAIN_12          },
+    {"main_422_10",        MAIN_422_10      },
+    {"main_422_12",        MAIN_422_12      },
+    {"main_444",           MAIN_444         },
+    {"main_444_10",        MAIN_444_10      },
+    {"main_444_12",        MAIN_444_12      },
+    {"main_444_16",        MAIN_444_16      },
+    {"main_intra",         MAIN_INTRA       },
+    {"main_10_intra",      MAIN_10_INTRA    },
+    {"main_12_intra",      MAIN_12_INTRA    },
+    {"main_422_10_intra",  MAIN_422_10_INTRA},
+    {"main_422_12_intra",  MAIN_422_12_INTRA},
+    {"main_444_intra",     MAIN_444_INTRA   },
+    {"main_444_10_intra",  MAIN_444_10_INTRA},
+    {"main_444_12_intra",  MAIN_444_12_INTRA},
+    {"main_444_16_intra",  MAIN_444_16_INTRA}
+};
+
+static const ExtendedProfileName validRExtProfileNames[2/* intraConstraintFlag*/][4/* bit depth constraint 8=0, 10=1, 12=2, 16=3*/][4/*chroma format*/]=
+{
+    {
+        { MONOCHROME_8,  NONE,          NONE,              MAIN_444          }, // 8-bit  inter for 400, 420, 422 and 444
+        { NONE,          NONE,          MAIN_422_10,       MAIN_444_10       }, // 10-bit inter for 400, 420, 422 and 444
+        { MONOCHROME_12, MAIN_12,       MAIN_422_12,       MAIN_444_12       }, // 12-bit inter for 400, 420, 422 and 444
+        { MONOCHROME_16, NONE,          NONE,              MAIN_444_16       }  // 16-bit inter for 400, 420, 422 and 444 (the latter is non standard used for development)
+    },
+    {
+        { NONE,          MAIN_INTRA,    NONE,              MAIN_444_INTRA    }, // 8-bit  intra for 400, 420, 422 and 444
+        { NONE,          MAIN_10_INTRA, MAIN_422_10_INTRA, MAIN_444_10_INTRA }, // 10-bit intra for 400, 420, 422 and 444
+        { NONE,          MAIN_12_INTRA, MAIN_422_12_INTRA, MAIN_444_12_INTRA }, // 12-bit intra for 400, 420, 422 and 444
+        { NONE,          NONE,          NONE,              MAIN_444_16_INTRA }  // 16-bit intra for 400, 420, 422 and 444
+    }
+};
+
+static const struct MapStrToTier
+{
+  const Char* str;
+  Level::Tier value;
+}
+strToTier[] =
+{
+  {"main", Level::MAIN},
+  {"high", Level::HIGH},
+};
+
+static const struct MapStrToLevel
+{
+  const Char* str;
+  Level::Name value;
+}
+strToLevel[] =
+{
+  {"none",Level::NONE},
+  {"1",   Level::LEVEL1},
+  {"2",   Level::LEVEL2},
+  {"2.1", Level::LEVEL2_1},
+  {"3",   Level::LEVEL3},
+  {"3.1", Level::LEVEL3_1},
+  {"4",   Level::LEVEL4},
+  {"4.1", Level::LEVEL4_1},
+  {"5",   Level::LEVEL5},
+  {"5.1", Level::LEVEL5_1},
+  {"5.2", Level::LEVEL5_2},
+  {"6",   Level::LEVEL6},
+  {"6.1", Level::LEVEL6_1},
+  {"6.2", Level::LEVEL6_2},
+  {"8.5", Level::LEVEL8_5},
+};
+
+static const struct MapStrToCostMode
+{
+  const Char* str;
+  CostMode    value;
+}
+strToCostMode[] =
+{
+  {"lossy",                     COST_STANDARD_LOSSY},
+  {"sequence_level_lossless",   COST_SEQUENCE_LEVEL_LOSSLESS},
+  {"lossless",                  COST_LOSSLESS_CODING},
+  {"mixed_lossless_lossy",      COST_MIXED_LOSSLESS_LOSSY_CODING}
+};
+
+static const struct MapStrToScalingListMode
+{
+  const Char* str;
+  ScalingListMode value;
+}
+strToScalingListMode[] =
+{
+  {"0",       SCALING_LIST_OFF},
+  {"1",       SCALING_LIST_DEFAULT},
+  {"2",       SCALING_LIST_FILE_READ},
+  {"off",     SCALING_LIST_OFF},
+  {"default", SCALING_LIST_DEFAULT},
+  {"file",    SCALING_LIST_FILE_READ}
+};
+
+template<typename T, typename P>
+static std::string enumToString(P map[], UInt mapLen, const T val)
+{
+  for (UInt i = 0; i < mapLen; i++)
+  {
+    if (val == map[i].value)
+    {
+      return map[i].str;
+    }
+  }
+  return std::string();
+}
+
+template<typename T, typename P>
+static istream& readStrToEnum(P map[], UInt mapLen, istream &in, T &val)
+{
+  string str;
+  in >> str;
+
+  for (UInt i = 0; i < mapLen; i++)
+  {
+    if (str == map[i].str)
+    {
+      val = map[i].value;
+      goto found;
+    }
+  }
+  /* not found */
+  in.setstate(ios::failbit);
+found:
+  return in;
+}
+
+//inline to prevent compiler warnings for "unused static function"
+
+static inline istream& operator >> (istream &in, ExtendedProfileName &profile)
+{
+  return readStrToEnum(strToExtendedProfile, sizeof(strToExtendedProfile)/sizeof(*strToExtendedProfile), in, profile);
+}
+
+namespace Level
+{
+  static inline istream& operator >> (istream &in, Tier &tier)
+  {
+    return readStrToEnum(strToTier, sizeof(strToTier)/sizeof(*strToTier), in, tier);
+  }
+
+  static inline istream& operator >> (istream &in, Name &level)
+  {
+    return readStrToEnum(strToLevel, sizeof(strToLevel)/sizeof(*strToLevel), in, level);
+  }
+}
+
+static inline istream& operator >> (istream &in, CostMode &mode)
+{
+  return readStrToEnum(strToCostMode, sizeof(strToCostMode)/sizeof(*strToCostMode), in, mode);
+}
+
+static inline istream& operator >> (istream &in, ScalingListMode &mode)
+{
+  return readStrToEnum(strToScalingListMode, sizeof(strToScalingListMode)/sizeof(*strToScalingListMode), in, mode);
+}
+
+template <class T>
+struct SMultiValueInput
+{
+  const T              minValIncl;
+  const T              maxValIncl; // Use 0 for unlimited
+  const std::size_t    minNumValuesIncl;
+  const std::size_t    maxNumValuesIncl; // Use 0 for unlimited
+        std::vector<T> values;
+  SMultiValueInput() : minValIncl(0), maxValIncl(0), minNumValuesIncl(0), maxNumValuesIncl(0), values() { }
+  SMultiValueInput(std::vector<T> &defaults) : minValIncl(0), maxValIncl(0), minNumValuesIncl(0), maxNumValuesIncl(0), values(defaults) { }
+  SMultiValueInput(const T &minValue, const T &maxValue, std::size_t minNumberValues=0, std::size_t maxNumberValues=0)
+    : minValIncl(minValue), maxValIncl(maxValue), minNumValuesIncl(minNumberValues), maxNumValuesIncl(maxNumberValues), values()  { }
+  SMultiValueInput(const T &minValue, const T &maxValue, std::size_t minNumberValues, std::size_t maxNumberValues, const T* defValues, const UInt numDefValues)
+    : minValIncl(minValue), maxValIncl(maxValue), minNumValuesIncl(minNumberValues), maxNumValuesIncl(maxNumberValues), values(defValues, defValues+numDefValues)  { }
+  SMultiValueInput<T> &operator=(const std::vector<T> &userValues) { values=userValues; return *this; }
+  SMultiValueInput<T> &operator=(const SMultiValueInput<T> &userValues) { values=userValues.values; return *this; }
+};
+
+static inline istream& operator >> (istream &in, SMultiValueInput<UInt> &values)
+{
+  values.values.clear();
+  string str;
+  in >> str;
+  if (!str.empty())
+  {
+    const Char *pStr=str.c_str();
+    // soak up any whitespace
+    for(;isspace(*pStr);pStr++);
+
+    while (*pStr != 0)
+    {
+      Char *eptr;
+      UInt val=strtoul(pStr, &eptr, 0);
+      if (*eptr!=0 && !isspace(*eptr) && *eptr!=',')
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      if (val<values.minValIncl || val>values.maxValIncl)
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+
+      if (values.maxNumValuesIncl != 0 && values.values.size() >= values.maxNumValuesIncl)
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      values.values.push_back(val);
+      // soak up any whitespace and up to 1 comma.
+      pStr=eptr;
+      for(;isspace(*pStr);pStr++);
+      if (*pStr == ',') pStr++;
+      for(;isspace(*pStr);pStr++);
+    }
+  }
+  if (values.values.size() < values.minNumValuesIncl)
+  {
+    in.setstate(ios::failbit);
+  }
+  return in;
+}
+
+static inline istream& operator >> (istream &in, SMultiValueInput<Int> &values)
+{
+  values.values.clear();
+  string str;
+  in >> str;
+  if (!str.empty())
+  {
+    const Char *pStr=str.c_str();
+    // soak up any whitespace
+    for(;isspace(*pStr);pStr++);
+
+    while (*pStr != 0)
+    {
+      Char *eptr;
+      Int val=strtol(pStr, &eptr, 0);
+      if (*eptr!=0 && !isspace(*eptr) && *eptr!=',')
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      if (val<values.minValIncl || val>values.maxValIncl)
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+
+      if (values.maxNumValuesIncl != 0 && values.values.size() >= values.maxNumValuesIncl)
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      values.values.push_back(val);
+      // soak up any whitespace and up to 1 comma.
+      pStr=eptr;
+      for(;isspace(*pStr);pStr++);
+      if (*pStr == ',') pStr++;
+      for(;isspace(*pStr);pStr++);
+    }
+  }
+  if (values.values.size() < values.minNumValuesIncl)
+  {
+    in.setstate(ios::failbit);
+  }
+  return in;
+}
+
+static inline istream& operator >> (istream &in, SMultiValueInput<Bool> &values)
+{
+  values.values.clear();
+  string str;
+  in >> str;
+  if (!str.empty())
+  {
+    const Char *pStr=str.c_str();
+    // soak up any whitespace
+    for(;isspace(*pStr);pStr++);
+
+    while (*pStr != 0)
+    {
+      Char *eptr;
+      Int val=strtol(pStr, &eptr, 0);
+      if (*eptr!=0 && !isspace(*eptr) && *eptr!=',')
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      if (val<Int(values.minValIncl) || val>Int(values.maxValIncl))
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+
+      if (values.maxNumValuesIncl != 0 && values.values.size() >= values.maxNumValuesIncl)
+      {
+        in.setstate(ios::failbit);
+        break;
+      }
+      values.values.push_back(val!=0);
+      // soak up any whitespace and up to 1 comma.
+      pStr=eptr;
+      for(;isspace(*pStr);pStr++);
+      if (*pStr == ',') pStr++;
+      for(;isspace(*pStr);pStr++);
+    }
+  }
+  if (values.values.size() < values.minNumValuesIncl)
+  {
+    in.setstate(ios::failbit);
+  }
+  return in;
+}
+
+static Void
+automaticallySelectRExtProfile(const Bool bUsingGeneralRExtTools,
+                               const Bool bUsingChromaQPAdjustment,
+                               const Bool bUsingExtendedPrecision,
+                               const Bool bIntraConstraintFlag,
+                               UInt &bitDepthConstraint,
+                               ChromaFormat &chromaFormatConstraint,
+                               const Int  maxBitDepth,
+                               const ChromaFormat chromaFormat)
+{
+  // Try to choose profile, according to table in Q1013.
+  UInt trialBitDepthConstraint=maxBitDepth;
+  if (trialBitDepthConstraint<8) trialBitDepthConstraint=8;
+  else if (trialBitDepthConstraint==9 || trialBitDepthConstraint==11) trialBitDepthConstraint++;
+  else if (trialBitDepthConstraint>12) trialBitDepthConstraint=16;
+
+  // both format and bit depth constraints are unspecified
+  if (bUsingExtendedPrecision || trialBitDepthConstraint==16)
+  {
+    bitDepthConstraint = 16;
+    chromaFormatConstraint = (!bIntraConstraintFlag && chromaFormat==CHROMA_400) ? CHROMA_400 : CHROMA_444;
+  }
+  else if (bUsingGeneralRExtTools)
+  {
+    if (chromaFormat == CHROMA_400 && !bIntraConstraintFlag)
+    {
+      bitDepthConstraint = 16;
+      chromaFormatConstraint = CHROMA_400;
+    }
+    else
+    {
+      bitDepthConstraint = trialBitDepthConstraint;
+      chromaFormatConstraint = CHROMA_444;
+    }
+  }
+  else if (chromaFormat == CHROMA_400)
+  {
+    if (bIntraConstraintFlag)
+    {
+      chromaFormatConstraint = CHROMA_420; // there is no intra 4:0:0 profile.
+      bitDepthConstraint     = trialBitDepthConstraint;
+    }
+    else
+    {
+      chromaFormatConstraint = CHROMA_400;
+      bitDepthConstraint     = trialBitDepthConstraint == 8 ? 8 : 12;
+    }
+  }
+  else
+  {
+    bitDepthConstraint = trialBitDepthConstraint;
+    chromaFormatConstraint = chromaFormat;
+    if (bUsingChromaQPAdjustment && chromaFormat == CHROMA_420) chromaFormatConstraint = CHROMA_422; // 4:2:0 cannot use the chroma qp tool.
+    if (chromaFormatConstraint == CHROMA_422 && bitDepthConstraint == 8) bitDepthConstraint = 10; // there is no 8-bit 4:2:2 profile.
+    if (chromaFormatConstraint == CHROMA_420 && !bIntraConstraintFlag) bitDepthConstraint = 12; // there is no 8 or 10-bit 4:2:0 inter RExt profile.
+  }
+}
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/** \param  argc        number of arguments
+    \param  argv        array of arguments
+    \retval             true when success
+ */
+Bool TAppEncCfg::parseCfg( Int argc, Char* argv[] )
+{
+  Bool do_help = false;
+
+  string cfg_InputFile;
+  string cfg_BitstreamFile;
+  string cfg_ReconFile;
+  string cfg_dQPFile;
+  string cfg_ScalingListFile;
+
+  Int tmpChromaFormat;
+  Int tmpInputChromaFormat;
+  Int tmpConstraintChromaFormat;
+  string inputColourSpaceConvert;
+  ExtendedProfileName extendedProfile;
+  Int saoOffsetBitShift[MAX_NUM_CHANNEL_TYPE];
+
+  // Multi-value input fields:                                // minval, maxval (incl), min_entries, max_entries (incl) [, default values, number of default values]
+  SMultiValueInput<UInt> cfg_ColumnWidth                     (0, std::numeric_limits<UInt>::max(), 0, std::numeric_limits<UInt>::max());
+  SMultiValueInput<UInt> cfg_RowHeight                       (0, std::numeric_limits<UInt>::max(), 0, std::numeric_limits<UInt>::max());
+  SMultiValueInput<Int>  cfg_startOfCodedInterval            (std::numeric_limits<Int>::min(), std::numeric_limits<Int>::max(), 0, 1<<16);
+  SMultiValueInput<Int>  cfg_codedPivotValue                 (std::numeric_limits<Int>::min(), std::numeric_limits<Int>::max(), 0, 1<<16);
+  SMultiValueInput<Int>  cfg_targetPivotValue                (std::numeric_limits<Int>::min(), std::numeric_limits<Int>::max(), 0, 1<<16);
+
+  const UInt defaultInputKneeCodes[3]  = { 600, 800, 900 };
+  const UInt defaultOutputKneeCodes[3] = { 100, 250, 450 };
+  SMultiValueInput<UInt> cfg_kneeSEIInputKneePointValue      (1,  999, 0, 999, defaultInputKneeCodes,  sizeof(defaultInputKneeCodes )/sizeof(UInt));
+  SMultiValueInput<UInt> cfg_kneeSEIOutputKneePointValue     (0, 1000, 0, 999, defaultOutputKneeCodes, sizeof(defaultOutputKneeCodes)/sizeof(UInt));
+  const Int defaultPrimaryCodes[6]     = { 0,50000, 0,0, 50000,0 };
+  const Int defaultWhitePointCode[2]   = { 16667, 16667 };
+  SMultiValueInput<Int>  cfg_DisplayPrimariesCode            (0, 50000, 3, 3, defaultPrimaryCodes,   sizeof(defaultPrimaryCodes  )/sizeof(Int));
+  SMultiValueInput<Int>  cfg_DisplayWhitePointCode           (0, 50000, 2, 2, defaultWhitePointCode, sizeof(defaultWhitePointCode)/sizeof(Int));
+
+  SMultiValueInput<Bool> cfg_timeCodeSeiTimeStampFlag        (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiNumUnitFieldBasedFlag(0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiCountingType         (0,  6, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiFullTimeStampFlag    (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiDiscontinuityFlag    (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiCntDroppedFlag       (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiNumberOfFrames       (0,511, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiSecondsValue         (0, 59, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiMinutesValue         (0, 59, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiHoursValue           (0, 23, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiSecondsFlag          (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiMinutesFlag          (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Bool> cfg_timeCodeSeiHoursFlag            (0,  1, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiTimeOffsetLength     (0, 31, 0, MAX_TIMECODE_SEI_SETS);
+  SMultiValueInput<Int>  cfg_timeCodeSeiTimeOffsetValue      (std::numeric_limits<Int>::min(), std::numeric_limits<Int>::max(), 0, MAX_TIMECODE_SEI_SETS);
+
+  po::Options opts;
+  opts.addOptions()
+  ("help",                                            do_help,                                          false, "this help text")
+  ("c",    po::parseConfigFile, "configuration file name")
+
+  // File, I/O and source parameters
+  ("InputFile,i",                                     cfg_InputFile,                               string(""), "Original YUV input file name")
+  ("BitstreamFile,b",                                 cfg_BitstreamFile,                           string(""), "Bitstream output file name")
+  ("ReconFile,o",                                     cfg_ReconFile,                               string(""), "Reconstructed YUV output file name")
+  ("SourceWidth,-wdt",                                m_iSourceWidth,                                       0, "Source picture width")
+  ("SourceHeight,-hgt",                               m_iSourceHeight,                                      0, "Source picture height")
+  ("InputBitDepth",                                   m_inputBitDepth[CHANNEL_TYPE_LUMA],                   8, "Bit-depth of input file")
+  ("OutputBitDepth",                                  m_outputBitDepth[CHANNEL_TYPE_LUMA],                  0, "Bit-depth of output file (default:InternalBitDepth)")
+  ("MSBExtendedBitDepth",                             m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA],             0, "bit depth of luma component after addition of MSBs of value 0 (used for synthesising High Dynamic Range source material). (default:InputBitDepth)")
+  ("InternalBitDepth",                                m_internalBitDepth[CHANNEL_TYPE_LUMA],                0, "Bit-depth the codec operates at. (default:MSBExtendedBitDepth). If different to MSBExtendedBitDepth, source data will be converted")
+  ("InputBitDepthC",                                  m_inputBitDepth[CHANNEL_TYPE_CHROMA],                 0, "As per InputBitDepth but for chroma component. (default:InputBitDepth)")
+  ("OutputBitDepthC",                                 m_outputBitDepth[CHANNEL_TYPE_CHROMA],                0, "As per OutputBitDepth but for chroma component. (default:InternalBitDepthC)")
+  ("MSBExtendedBitDepthC",                            m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA],           0, "As per MSBExtendedBitDepth but for chroma component. (default:MSBExtendedBitDepth)")
+  ("InternalBitDepthC",                               m_internalBitDepth[CHANNEL_TYPE_CHROMA],              0, "As per InternalBitDepth but for chroma component. (default:InternalBitDepth)")
+  ("ExtendedPrecision",                               m_useExtendedPrecision,                           false, "Increased internal accuracies to support high bit depths (not valid in V1 profiles)")
+  ("HighPrecisionPredictionWeighting",                m_useHighPrecisionPredictionWeighting,            false, "Use high precision option for weighted prediction (not valid in V1 profiles)")
+  ("InputColourSpaceConvert",                         inputColourSpaceConvert,                     string(""), "Colour space conversion to apply to input video. Permitted values are (empty string=UNCHANGED) " + getListOfColourSpaceConverts(true))
+  ("SNRInternalColourSpace",                          m_snrInternalColourSpace,                         false, "If true, then no colour space conversion is applied prior to SNR, otherwise inverse of input is applied.")
+  ("OutputInternalColourSpace",                       m_outputInternalColourSpace,                      false, "If true, then no colour space conversion is applied for reconstructed video, otherwise inverse of input is applied.")
+  ("InputChromaFormat",                               tmpInputChromaFormat,                               420, "InputChromaFormatIDC")
+  ("MSEBasedSequencePSNR",                            m_printMSEBasedSequencePSNR,                      false, "0 (default) emit sequence PSNR only as a linear average of the frame PSNRs, 1 = also emit a sequence PSNR based on an average of the frame MSEs")
+  ("PrintFrameMSE",                                   m_printFrameMSE,                                  false, "0 (default) emit only bit count and PSNRs for each frame, 1 = also emit MSE values")
+  ("PrintSequenceMSE",                                m_printSequenceMSE,                               false, "0 (default) emit only bit rate and PSNRs for the whole sequence, 1 = also emit MSE values")
+  ("CabacZeroWordPaddingEnabled",                     m_cabacZeroWordPaddingEnabled,                    false, "0 (default) do not add conforming cabac-zero-words to bit streams, 1 = add cabac-zero-words")
+  ("ChromaFormatIDC,-cf",                             tmpChromaFormat,                                      0, "ChromaFormatIDC (400|420|422|444 or set 0 (default) for same as InputChromaFormat)")
+  ("ConformanceMode",                                 m_conformanceWindowMode,                              0, "Deprecated alias of ConformanceWindowMode")
+  ("ConformanceWindowMode",                           m_conformanceWindowMode,                              0, "Window conformance mode (0: no window, 1:automatic padding, 2:padding, 3:conformance")
+  ("HorizontalPadding,-pdx",                          m_aiPad[0],                                           0, "Horizontal source padding for conformance window mode 2")
+  ("VerticalPadding,-pdy",                            m_aiPad[1],                                           0, "Vertical source padding for conformance window mode 2")
+  ("ConfLeft",                                        m_confWinLeft,                                        0, "Deprecated alias of ConfWinLeft")
+  ("ConfRight",                                       m_confWinRight,                                       0, "Deprecated alias of ConfWinRight")
+  ("ConfTop",                                         m_confWinTop,                                         0, "Deprecated alias of ConfWinTop")
+  ("ConfBottom",                                      m_confWinBottom,                                      0, "Deprecated alias of ConfWinBottom")
+  ("ConfWinLeft",                                     m_confWinLeft,                                        0, "Left offset for window conformance mode 3")
+  ("ConfWinRight",                                    m_confWinRight,                                       0, "Right offset for window conformance mode 3")
+  ("ConfWinTop",                                      m_confWinTop,                                         0, "Top offset for window conformance mode 3")
+  ("ConfWinBottom",                                   m_confWinBottom,                                      0, "Bottom offset for window conformance mode 3")
+  ("FrameRate,-fr",                                   m_iFrameRate,                                         0, "Frame rate")
+  ("FrameSkip,-fs",                                   m_FrameSkip,                                         0u, "Number of frames to skip at start of input YUV")
+  ("FramesToBeEncoded,f",                             m_framesToBeEncoded,                                  0, "Number of frames to be encoded (default=all)")
+
+  //Field coding parameters
+  ("FieldCoding",                                     m_isField,                                        false, "Signals if it's a field based coding")
+  ("TopFieldFirst, Tff",                              m_isTopFieldFirst,                                false, "In case of field based coding, signals whether if it's a top field first or not")
+
+  // Profile and level
+  ("Profile",                                         extendedProfile,                                   NONE, "Profile name to use for encoding. Use main (for main), main10 (for main10), main-still-picture, main-RExt (for Range Extensions profile), any of the RExt specific profile names, or none")
+  ("Level",                                           m_level,                                    Level::NONE, "Level limit to be used, eg 5.1, or none")
+  ("Tier",                                            m_levelTier,                                Level::MAIN, "Tier to use for interpretation of --Level (main or high only)")
+  ("MaxBitDepthConstraint",                           m_bitDepthConstraint,                                0u, "Bit depth to use for profile-constraint for RExt profiles. 0=automatically choose based upon other parameters")
+  ("MaxChromaFormatConstraint",                       tmpConstraintChromaFormat,                            0, "Chroma-format to use for the profile-constraint for RExt profiles. 0=automatically choose based upon other parameters")
+  ("IntraConstraintFlag",                             m_intraConstraintFlag,                            false, "Value of general_intra_constraint_flag to use for RExt profiles (not used if an explicit RExt sub-profile is specified)")
+  ("LowerBitRateConstraintFlag",                      m_lowerBitRateConstraintFlag,                      true, "Value of general_lower_bit_rate_constraint_flag to use for RExt profiles")
+
+  ("ProgressiveSource",                               m_progressiveSourceFlag,                          false, "Indicate that source is progressive")
+  ("InterlacedSource",                                m_interlacedSourceFlag,                           false, "Indicate that source is interlaced")
+  ("NonPackedSource",                                 m_nonPackedConstraintFlag,                        false, "Indicate that source does not contain frame packing")
+  ("FrameOnly",                                       m_frameOnlyConstraintFlag,                        false, "Indicate that the bitstream contains only frames")
+
+  // Unit definition parameters
+  ("MaxCUWidth",                                      m_uiMaxCUWidth,                                     64u)
+  ("MaxCUHeight",                                     m_uiMaxCUHeight,                                    64u)
+  // todo: remove defaults from MaxCUSize
+  ("MaxCUSize,s",                                     m_uiMaxCUWidth,                                     64u, "Maximum CU size")
+  ("MaxCUSize,s",                                     m_uiMaxCUHeight,                                    64u, "Maximum CU size")
+  ("MaxPartitionDepth,h",                             m_uiMaxCUDepth,                                      4u, "CU depth")
+
+  ("QuadtreeTULog2MaxSize",                           m_uiQuadtreeTULog2MaxSize,                           6u, "Maximum TU size in logarithm base 2")
+  ("QuadtreeTULog2MinSize",                           m_uiQuadtreeTULog2MinSize,                           2u, "Minimum TU size in logarithm base 2")
+
+  ("QuadtreeTUMaxDepthIntra",                         m_uiQuadtreeTUMaxDepthIntra,                         1u, "Depth of TU tree for intra CUs")
+  ("QuadtreeTUMaxDepthInter",                         m_uiQuadtreeTUMaxDepthInter,                         2u, "Depth of TU tree for inter CUs")
+
+  // Coding structure paramters
+  ("IntraPeriod,-ip",                                 m_iIntraPeriod,                                      -1, "Intra period in frames, (-1: only first frame)")
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  ("DecodingRefreshType,-dr",                         m_iDecodingRefreshType,                               0, "Intra refresh type (0:none 1:CRA 2:IDR 3:RecPointSEI)")
+#else
+  ("DecodingRefreshType,-dr",                         m_iDecodingRefreshType,                               0, "Intra refresh type (0:none 1:CRA 2:IDR)")
+#endif
+  ("GOPSize,g",                                       m_iGOPSize,                                           1, "GOP size of temporal structure")
+
+  // motion search options
+  ("FastSearch",                                      m_iFastSearch,                                        1, "0:Full search  1:Diamond  2:PMVFAST")
+  ("SearchRange,-sr",                                 m_iSearchRange,                                      96, "Motion search range")
+  ("BipredSearchRange",                               m_bipredSearchRange,                                  4, "Motion search range for bipred refinement")
+  ("HadamardME",                                      m_bUseHADME,                                       true, "Hadamard ME for fractional-pel")
+  ("ASR",                                             m_bUseASR,                                        false, "Adaptive motion search range")
+
+  // Mode decision parameters
+  ("LambdaModifier0,-LM0",                            m_adLambdaModifier[ 0 ],                  ( Double )1.0, "Lambda modifier for temporal layer 0")
+  ("LambdaModifier1,-LM1",                            m_adLambdaModifier[ 1 ],                  ( Double )1.0, "Lambda modifier for temporal layer 1")
+  ("LambdaModifier2,-LM2",                            m_adLambdaModifier[ 2 ],                  ( Double )1.0, "Lambda modifier for temporal layer 2")
+  ("LambdaModifier3,-LM3",                            m_adLambdaModifier[ 3 ],                  ( Double )1.0, "Lambda modifier for temporal layer 3")
+  ("LambdaModifier4,-LM4",                            m_adLambdaModifier[ 4 ],                  ( Double )1.0, "Lambda modifier for temporal layer 4")
+  ("LambdaModifier5,-LM5",                            m_adLambdaModifier[ 5 ],                  ( Double )1.0, "Lambda modifier for temporal layer 5")
+  ("LambdaModifier6,-LM6",                            m_adLambdaModifier[ 6 ],                  ( Double )1.0, "Lambda modifier for temporal layer 6")
+
+  /* Quantization parameters */
+  ("QP,q",                                            m_fQP,                                             30.0, "Qp value, if value is float, QP is switched once during encoding")
+  ("DeltaQpRD,-dqr",                                  m_uiDeltaQpRD,                                       0u, "max dQp offset for slice")
+  ("MaxDeltaQP,d",                                    m_iMaxDeltaQP,                                        0, "max dQp offset for block")
+  ("MaxCuDQPDepth,-dqd",                              m_iMaxCuDQPDepth,                                     0, "max depth for a minimum CuDQP")
+  ("MaxCUChromaQpAdjustmentDepth",                    m_maxCUChromaQpAdjustmentDepth,                      -1, "Maximum depth for CU chroma Qp adjustment - set less than 0 to disable")
+
+  ("CbQpOffset,-cbqpofs",                             m_cbQpOffset,                                         0, "Chroma Cb QP Offset")
+  ("CrQpOffset,-crqpofs",                             m_crQpOffset,                                         0, "Chroma Cr QP Offset")
+
+#if ADAPTIVE_QP_SELECTION
+  ("AdaptiveQpSelection,-aqps",                       m_bUseAdaptQpSelect,                              false, "AdaptiveQpSelection")
+#endif
+
+  ("AdaptiveQP,-aq",                                  m_bUseAdaptiveQP,                                 false, "QP adaptation based on a psycho-visual model")
+  ("MaxQPAdaptationRange,-aqr",                       m_iQPAdaptationRange,                                 6, "QP adaptation range")
+  ("dQPFile,m",                                       cfg_dQPFile,                                 string(""), "dQP file name")
+  ("RDOQ",                                            m_useRDOQ,                                         true)
+  ("RDOQTS",                                          m_useRDOQTS,                                       true)
+  ("RDpenalty",                                       m_rdPenalty,                                          0,  "RD-penalty for 32x32 TU for intra in non-intra slices. 0:disabled  1:RD-penalty  2:maximum RD-penalty")
+
+  // Deblocking filter parameters
+  ("LoopFilterDisable",                               m_bLoopFilterDisable,                             false)
+  ("LoopFilterOffsetInPPS",                           m_loopFilterOffsetInPPS,                          false)
+  ("LoopFilterBetaOffset_div2",                       m_loopFilterBetaOffsetDiv2,                           0)
+  ("LoopFilterTcOffset_div2",                         m_loopFilterTcOffsetDiv2,                             0)
+  ("DeblockingFilterControlPresent",                  m_DeblockingFilterControlPresent,                 false)
+  ("DeblockingFilterMetric",                          m_DeblockingFilterMetric,                         false)
+
+  // Coding tools
+  ("AMP",                                             m_enableAMP,                                       true, "Enable asymmetric motion partitions")
+  ("CrossComponentPrediction",                        m_useCrossComponentPrediction,                    false, "Enable the use of cross-component prediction (not valid in V1 profiles)")
+  ("ReconBasedCrossCPredictionEstimate",              m_reconBasedCrossCPredictionEstimate,             false, "When determining the alpha value for cross-component prediction, use the decoded residual rather than the pre-transform encoder-side residual")
+  ("SaoLumaOffsetBitShift",                           saoOffsetBitShift[CHANNEL_TYPE_LUMA],                 0, "Specify the luma SAO bit-shift. If negative, automatically calculate a suitable value based upon bit depth and initial QP")
+  ("SaoChromaOffsetBitShift",                         saoOffsetBitShift[CHANNEL_TYPE_CHROMA],               0, "Specify the chroma SAO bit-shift. If negative, automatically calculate a suitable value based upon bit depth and initial QP")
+  ("TransformSkip",                                   m_useTransformSkip,                               false, "Intra transform skipping")
+  ("TransformSkipFast",                               m_useTransformSkipFast,                           false, "Fast intra transform skipping")
+  ("TransformSkipLog2MaxSize",                        m_transformSkipLog2MaxSize,                          2U, "Specify transform-skip maximum size. Minimum 2. (not valid in V1 profiles)")
+  ("ImplicitResidualDPCM",                            m_useResidualDPCM[RDPCM_SIGNAL_IMPLICIT],         false, "Enable implicitly signalled residual DPCM for intra (also known as sample-adaptive intra predict) (not valid in V1 profiles)")
+  ("ExplicitResidualDPCM",                            m_useResidualDPCM[RDPCM_SIGNAL_EXPLICIT],         false, "Enable explicitly signalled residual DPCM for inter (not valid in V1 profiles)")
+  ("ResidualRotation",                                m_useResidualRotation,                            false, "Enable rotation of transform-skipped and transquant-bypassed TUs through 180 degrees prior to entropy coding (not valid in V1 profiles)")
+  ("SingleSignificanceMapContext",                    m_useSingleSignificanceMapContext,                false, "Enable, for transform-skipped and transquant-bypassed TUs, the selection of a single significance map context variable for all coefficients (not valid in V1 profiles)")
+  ("GolombRiceParameterAdaptation",                   m_useGolombRiceParameterAdaptation,               false, "Enable the adaptation of the Golomb-Rice parameter over the course of each slice")
+  ("AlignCABACBeforeBypass",                          m_alignCABACBeforeBypass,                         false, "Align the CABAC engine to a defined fraction of a bit prior to coding bypass data. Must be 1 in high bit rate profile, 0 otherwise" )
+  ("SAO",                                             m_bUseSAO,                                         true, "Enable Sample Adaptive Offset")
+  ("MaxNumOffsetsPerPic",                             m_maxNumOffsetsPerPic,                             2048, "Max number of SAO offset per picture (Default: 2048)")
+  ("SAOLcuBoundary",                                  m_saoCtuBoundary,                                 false, "0: right/bottom CTU boundary areas skipped from SAO parameter estimation, 1: non-deblocked pixels are used for those areas")
+  ("SliceMode",                                       m_sliceMode,                                          0, "0: Disable all Recon slice limits, 1: Enforce max # of CTUs, 2: Enforce max # of bytes, 3:specify tiles per dependent slice")
+  ("SliceArgument",                                   m_sliceArgument,                                      0, "Depending on SliceMode being:"
+                                                                                                               "\t1: max number of CTUs per slice"
+                                                                                                               "\t2: max number of bytes per slice"
+                                                                                                               "\t3: max number of tiles per slice")
+  ("SliceSegmentMode",                                m_sliceSegmentMode,                                   0, "0: Disable all slice segment limits, 1: Enforce max # of CTUs, 2: Enforce max # of bytes, 3:specify tiles per dependent slice")
+  ("SliceSegmentArgument",                            m_sliceSegmentArgument,                               0, "Depending on SliceSegmentMode being:"
+                                                                                                               "\t1: max number of CTUs per slice segment"
+                                                                                                               "\t2: max number of bytes per slice segment"
+                                                                                                               "\t3: max number of tiles per slice segment")
+  ("LFCrossSliceBoundaryFlag",                        m_bLFCrossSliceBoundaryFlag,                       true)
+
+  ("ConstrainedIntraPred",                            m_bUseConstrainedIntraPred,                       false, "Constrained Intra Prediction")
+  ("PCMEnabledFlag",                                  m_usePCM,                                         false)
+  ("PCMLog2MaxSize",                                  m_pcmLog2MaxSize,                                    5u)
+  ("PCMLog2MinSize",                                  m_uiPCMLog2MinSize,                                  3u)
+
+  ("PCMInputBitDepthFlag",                            m_bPCMInputBitDepthFlag,                           true)
+  ("PCMFilterDisableFlag",                            m_bPCMFilterDisableFlag,                          false)
+  ("IntraReferenceSmoothing",                         m_enableIntraReferenceSmoothing,                   true, "0: Disable use of intra reference smoothing. 1: Enable use of intra reference smoothing (not valid in V1 profiles)")
+  ("WeightedPredP,-wpP",                              m_useWeightedPred,                                false, "Use weighted prediction in P slices")
+  ("WeightedPredB,-wpB",                              m_useWeightedBiPred,                              false, "Use weighted (bidirectional) prediction in B slices")
+  ("Log2ParallelMergeLevel",                          m_log2ParallelMergeLevel,                            2u, "Parallel merge estimation region")
+    //deprecated copies of renamed tile parameters
+  ("UniformSpacingIdc",                               m_tileUniformSpacingFlag,                         false,      "deprecated alias of TileUniformSpacing")
+  ("ColumnWidthArray",                                cfg_ColumnWidth,                        cfg_ColumnWidth, "deprecated alias of TileColumnWidthArray")
+  ("RowHeightArray",                                  cfg_RowHeight,                            cfg_RowHeight, "deprecated alias of TileRowHeightArray")
+
+  ("TileUniformSpacing",                              m_tileUniformSpacingFlag,                         false,      "Indicates that tile columns and rows are distributed uniformly")
+  ("NumTileColumnsMinus1",                            m_numTileColumnsMinus1,                               0,          "Number of tile columns in a picture minus 1")
+  ("NumTileRowsMinus1",                               m_numTileRowsMinus1,                                  0,          "Number of rows in a picture minus 1")
+  ("TileColumnWidthArray",                            cfg_ColumnWidth,                        cfg_ColumnWidth, "Array containing tile column width values in units of CTU")
+  ("TileRowHeightArray",                              cfg_RowHeight,                            cfg_RowHeight, "Array containing tile row height values in units of CTU")
+  ("LFCrossTileBoundaryFlag",                         m_bLFCrossTileBoundaryFlag,                        true, "1: cross-tile-boundary loop filtering. 0:non-cross-tile-boundary loop filtering")
+  ("WaveFrontSynchro",                                m_iWaveFrontSynchro,                                  0, "0: no synchro; 1 synchro with top-right-right")
+  ("ScalingList",                                     m_useScalingListId,                    SCALING_LIST_OFF, "0/off: no scaling list, 1/default: default scaling lists, 2/file: scaling lists specified in ScalingListFile")
+  ("ScalingListFile",                                 cfg_ScalingListFile,                         string(""), "Scaling list file name. Use an empty string to produce help.")
+  ("SignHideFlag,-SBH",                               m_signHideFlag,                                       1)
+  ("MaxNumMergeCand",                                 m_maxNumMergeCand,                                   5u, "Maximum number of merge candidates")
+  /* Misc. */
+  ("SEIDecodedPictureHash",                           m_decodedPictureHashSEIEnabled,                       0, "Control generation of decode picture hash SEI messages\n"
+                                                                                                               "\t3: checksum\n"
+                                                                                                               "\t2: CRC\n"
+                                                                                                               "\t1: use MD5\n"
+                                                                                                               "\t0: disable")
+  ("SEIpictureDigest",                                m_decodedPictureHashSEIEnabled,                       0, "deprecated alias for SEIDecodedPictureHash")
+  ("TMVPMode",                                        m_TMVPModeId,                                         1, "TMVP mode 0: TMVP disable for all slices. 1: TMVP enable for all slices (default) 2: TMVP enable for certain slices only")
+  ("FEN",                                             m_bUseFastEnc,                                    false, "fast encoder setting")
+  ("ECU",                                             m_bUseEarlyCU,                                    false, "Early CU setting")
+  ("FDM",                                             m_useFastDecisionForMerge,                         true, "Fast decision for Merge RD Cost")
+  ("CFM",                                             m_bUseCbfFastMode,                                false, "Cbf fast mode setting")
+  ("ESD",                                             m_useEarlySkipDetection,                          false, "Early SKIP detection setting")
+  ( "RateControl",                                    m_RCEnableRateControl,                            false, "Rate control: enable rate control" )
+  ( "TargetBitrate",                                  m_RCTargetBitrate,                                    0, "Rate control: target bit-rate" )
+  ( "KeepHierarchicalBit",                            m_RCKeepHierarchicalBit,                              0, "Rate control: 0: equal bit allocation; 1: fixed ratio bit allocation; 2: adaptive ratio bit allocation" )
+  ( "LCULevelRateControl",                            m_RCLCULevelRC,                                    true, "Rate control: true: CTU level RC; false: picture level RC" )
+  ( "RCLCUSeparateModel",                             m_RCUseLCUSeparateModel,                           true, "Rate control: use CTU level separate R-lambda model" )
+  ( "InitialQP",                                      m_RCInitialQP,                                        0, "Rate control: initial QP" )
+  ( "RCForceIntraQP",                                 m_RCForceIntraQP,                                 false, "Rate control: force intra QP to be equal to initial QP" )
+
+  ("TransquantBypassEnableFlag",                      m_TransquantBypassEnableFlag,                     false, "transquant_bypass_enable_flag indicator in PPS")
+  ("CUTransquantBypassFlagForce",                     m_CUTransquantBypassFlagForce,                    false, "Force transquant bypass mode, when transquant_bypass_enable_flag is enabled")
+  ("CostMode",                                        m_costMode,                         COST_STANDARD_LOSSY, "Use alternative cost functions: choose between 'lossy', 'sequence_level_lossless', 'lossless' (which forces QP to " MACRO_TO_STRING(LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP) ") and 'mixed_lossless_lossy' (which used QP'=" MACRO_TO_STRING(LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME) " for pre-estimates of transquant-bypass blocks).")
+  ("RecalculateQPAccordingToLambda",                  m_recalculateQPAccordingToLambda,                 false, "Recalculate QP values according to lambda values. Do not suggest to be enabled in all intra case")
+  ("StrongIntraSmoothing,-sis",                       m_useStrongIntraSmoothing,                         true, "Enable strong intra smoothing for 32x32 blocks")
+  ("SEIActiveParameterSets",                          m_activeParameterSetsSEIEnabled,                      0, "Enable generation of active parameter sets SEI messages")
+  ("VuiParametersPresent,-vui",                       m_vuiParametersPresentFlag,                       false, "Enable generation of vui_parameters()")
+  ("AspectRatioInfoPresent",                          m_aspectRatioInfoPresentFlag,                     false, "Signals whether aspect_ratio_idc is present")
+  ("AspectRatioIdc",                                  m_aspectRatioIdc,                                     0, "aspect_ratio_idc")
+  ("SarWidth",                                        m_sarWidth,                                           0, "horizontal size of the sample aspect ratio")
+  ("SarHeight",                                       m_sarHeight,                                          0, "vertical size of the sample aspect ratio")
+  ("OverscanInfoPresent",                             m_overscanInfoPresentFlag,                        false, "Indicates whether conformant decoded pictures are suitable for display using overscan\n")
+  ("OverscanAppropriate",                             m_overscanAppropriateFlag,                        false, "Indicates whether conformant decoded pictures are suitable for display using overscan\n")
+  ("VideoSignalTypePresent",                          m_videoSignalTypePresentFlag,                     false, "Signals whether video_format, video_full_range_flag, and colour_description_present_flag are present")
+  ("VideoFormat",                                     m_videoFormat,                                        5, "Indicates representation of pictures")
+  ("VideoFullRange",                                  m_videoFullRangeFlag,                             false, "Indicates the black level and range of luma and chroma signals")
+  ("ColourDescriptionPresent",                        m_colourDescriptionPresentFlag,                   false, "Signals whether colour_primaries, transfer_characteristics and matrix_coefficients are present")
+  ("ColourPrimaries",                                 m_colourPrimaries,                                    2, "Indicates chromaticity coordinates of the source primaries")
+  ("TransferCharacteristics",                         m_transferCharacteristics,                            2, "Indicates the opto-electronic transfer characteristics of the source")
+  ("MatrixCoefficients",                              m_matrixCoefficients,                                 2, "Describes the matrix coefficients used in deriving luma and chroma from RGB primaries")
+  ("ChromaLocInfoPresent",                            m_chromaLocInfoPresentFlag,                       false, "Signals whether chroma_sample_loc_type_top_field and chroma_sample_loc_type_bottom_field are present")
+  ("ChromaSampleLocTypeTopField",                     m_chromaSampleLocTypeTopField,                        0, "Specifies the location of chroma samples for top field")
+  ("ChromaSampleLocTypeBottomField",                  m_chromaSampleLocTypeBottomField,                     0, "Specifies the location of chroma samples for bottom field")
+  ("NeutralChromaIndication",                         m_neutralChromaIndicationFlag,                    false, "Indicates that the value of all decoded chroma samples is equal to 1<<(BitDepthCr-1)")
+  ("DefaultDisplayWindowFlag",                        m_defaultDisplayWindowFlag,                       false, "Indicates the presence of the Default Window parameters")
+  ("DefDispWinLeftOffset",                            m_defDispWinLeftOffset,                               0, "Specifies the left offset of the default display window from the conformance window")
+  ("DefDispWinRightOffset",                           m_defDispWinRightOffset,                              0, "Specifies the right offset of the default display window from the conformance window")
+  ("DefDispWinTopOffset",                             m_defDispWinTopOffset,                                0, "Specifies the top offset of the default display window from the conformance window")
+  ("DefDispWinBottomOffset",                          m_defDispWinBottomOffset,                             0, "Specifies the bottom offset of the default display window from the conformance window")
+  ("FrameFieldInfoPresentFlag",                       m_frameFieldInfoPresentFlag,                      false, "Indicates that pic_struct and field coding related values are present in picture timing SEI messages")
+  ("PocProportionalToTimingFlag",                     m_pocProportionalToTimingFlag,                    false, "Indicates that the POC value is proportional to the output time w.r.t. first picture in CVS")
+  ("NumTicksPocDiffOneMinus1",                        m_numTicksPocDiffOneMinus1,                           0, "Number of ticks minus 1 that for a POC difference of one")
+  ("BitstreamRestriction",                            m_bitstreamRestrictionFlag,                       false, "Signals whether bitstream restriction parameters are present")
+  ("TilesFixedStructure",                             m_tilesFixedStructureFlag,                        false, "Indicates that each active picture parameter set has the same values of the syntax elements related to tiles")
+  ("MotionVectorsOverPicBoundaries",                  m_motionVectorsOverPicBoundariesFlag,             false, "Indicates that no samples outside the picture boundaries are used for inter prediction")
+  ("MaxBytesPerPicDenom",                             m_maxBytesPerPicDenom,                                2, "Indicates a number of bytes not exceeded by the sum of the sizes of the VCL NAL units associated with any coded picture")
+  ("MaxBitsPerMinCuDenom",                            m_maxBitsPerMinCuDenom,                               1, "Indicates an upper bound for the number of bits of coding_unit() data")
+  ("Log2MaxMvLengthHorizontal",                       m_log2MaxMvLengthHorizontal,                         15, "Indicate the maximum absolute value of a decoded horizontal MV component in quarter-pel luma units")
+  ("Log2MaxMvLengthVertical",                         m_log2MaxMvLengthVertical,                           15, "Indicate the maximum absolute value of a decoded vertical MV component in quarter-pel luma units")
+  ("SEIRecoveryPoint",                                m_recoveryPointSEIEnabled,                            0, "Control generation of recovery point SEI messages")
+  ("SEIBufferingPeriod",                              m_bufferingPeriodSEIEnabled,                          0, "Control generation of buffering period SEI messages")
+  ("SEIPictureTiming",                                m_pictureTimingSEIEnabled,                            0, "Control generation of picture timing SEI messages")
+  ("SEIToneMappingInfo",                              m_toneMappingInfoSEIEnabled,                      false, "Control generation of Tone Mapping SEI messages")
+  ("SEIToneMapId",                                    m_toneMapId,                                          0, "Specifies Id of Tone Mapping SEI message for a given session")
+  ("SEIToneMapCancelFlag",                            m_toneMapCancelFlag,                              false, "Indicates that Tone Mapping SEI message cancels the persistence or follows")
+  ("SEIToneMapPersistenceFlag",                       m_toneMapPersistenceFlag,                          true, "Specifies the persistence of the Tone Mapping SEI message")
+  ("SEIToneMapCodedDataBitDepth",                     m_toneMapCodedDataBitDepth,                           8, "Specifies Coded Data BitDepth of Tone Mapping SEI messages")
+  ("SEIToneMapTargetBitDepth",                        m_toneMapTargetBitDepth,                              8, "Specifies Output BitDepth of Tone mapping function")
+  ("SEIToneMapModelId",                               m_toneMapModelId,                                     0, "Specifies Model utilized for mapping coded data into target_bit_depth range\n"
+                                                                                                               "\t0:  linear mapping with clipping\n"
+                                                                                                               "\t1:  sigmoidal mapping\n"
+                                                                                                               "\t2:  user-defined table mapping\n"
+                                                                                                               "\t3:  piece-wise linear mapping\n"
+                                                                                                               "\t4:  luminance dynamic range information ")
+  ("SEIToneMapMinValue",                              m_toneMapMinValue,                                    0, "Specifies the minimum value in mode 0")
+  ("SEIToneMapMaxValue",                              m_toneMapMaxValue,                                 1023, "Specifies the maximum value in mode 0")
+  ("SEIToneMapSigmoidMidpoint",                       m_sigmoidMidpoint,                                  512, "Specifies the centre point in mode 1")
+  ("SEIToneMapSigmoidWidth",                          m_sigmoidWidth,                                     960, "Specifies the distance between 5% and 95% values of the target_bit_depth in mode 1")
+  ("SEIToneMapStartOfCodedInterval",                  cfg_startOfCodedInterval,      cfg_startOfCodedInterval, "Array of user-defined mapping table")
+  ("SEIToneMapNumPivots",                             m_numPivots,                                          0, "Specifies the number of pivot points in mode 3")
+  ("SEIToneMapCodedPivotValue",                       cfg_codedPivotValue,                cfg_codedPivotValue, "Array of pivot point")
+  ("SEIToneMapTargetPivotValue",                      cfg_targetPivotValue,              cfg_targetPivotValue, "Array of pivot point")
+  ("SEIToneMapCameraIsoSpeedIdc",                     m_cameraIsoSpeedIdc,                                  0, "Indicates the camera ISO speed for daylight illumination")
+  ("SEIToneMapCameraIsoSpeedValue",                   m_cameraIsoSpeedValue,                              400, "Specifies the camera ISO speed for daylight illumination of Extended_ISO")
+  ("SEIToneMapExposureIndexIdc",                      m_exposureIndexIdc,                                   0, "Indicates the exposure index setting of the camera")
+  ("SEIToneMapExposureIndexValue",                    m_exposureIndexValue,                               400, "Specifies the exposure index setting of the camera of Extended_ISO")
+  ("SEIToneMapExposureCompensationValueSignFlag",     m_exposureCompensationValueSignFlag,                  0, "Specifies the sign of ExposureCompensationValue")
+  ("SEIToneMapExposureCompensationValueNumerator",    m_exposureCompensationValueNumerator,                 0, "Specifies the numerator of ExposureCompensationValue")
+  ("SEIToneMapExposureCompensationValueDenomIdc",     m_exposureCompensationValueDenomIdc,                  2, "Specifies the denominator of ExposureCompensationValue")
+  ("SEIToneMapRefScreenLuminanceWhite",               m_refScreenLuminanceWhite,                          350, "Specifies reference screen brightness setting in units of candela per square metre")
+  ("SEIToneMapExtendedRangeWhiteLevel",               m_extendedRangeWhiteLevel,                          800, "Indicates the luminance dynamic range")
+  ("SEIToneMapNominalBlackLevelLumaCodeValue",        m_nominalBlackLevelLumaCodeValue,                    16, "Specifies luma sample value of the nominal black level assigned decoded pictures")
+  ("SEIToneMapNominalWhiteLevelLumaCodeValue",        m_nominalWhiteLevelLumaCodeValue,                   235, "Specifies luma sample value of the nominal white level assigned decoded pictures")
+  ("SEIToneMapExtendedWhiteLevelLumaCodeValue",       m_extendedWhiteLevelLumaCodeValue,                  300, "Specifies luma sample value of the extended dynamic range assigned decoded pictures")
+  ("SEIChromaSamplingFilterHint",                     m_chromaSamplingFilterSEIenabled,                 false, "Control generation of the chroma sampling filter hint SEI message")
+  ("SEIChromaSamplingHorizontalFilterType",           m_chromaSamplingHorFilterIdc,                         2, "Defines the Index of the chroma sampling horizontal filter\n"
+                                                                                                               "\t0: unspecified  - Chroma filter is unknown or is determined by the application"
+                                                                                                               "\t1: User-defined - Filter coefficients are specified in the chroma sampling filter hint SEI message"
+                                                                                                               "\t2: Standards-defined - ITU-T Rec. T.800 | ISO/IEC15444-1, 5/3 filter")
+  ("SEIChromaSamplingVerticalFilterType",             m_chromaSamplingVerFilterIdc,                         2, "Defines the Index of the chroma sampling vertical filter\n"
+                                                                                                               "\t0: unspecified  - Chroma filter is unknown or is determined by the application"
+                                                                                                               "\t1: User-defined - Filter coefficients are specified in the chroma sampling filter hint SEI message"
+                                                                                                               "\t2: Standards-defined - ITU-T Rec. T.800 | ISO/IEC15444-1, 5/3 filter")
+  ("SEIFramePacking",                                 m_framePackingSEIEnabled,                             0, "Control generation of frame packing SEI messages")
+  ("SEIFramePackingType",                             m_framePackingSEIType,                                0, "Define frame packing arrangement\n"
+                                                                                                               "\t3: side by side - frames are displayed horizontally\n"
+                                                                                                               "\t4: top bottom - frames are displayed vertically\n"
+                                                                                                               "\t5: frame alternation - one frame is alternated with the other")
+  ("SEIFramePackingId",                               m_framePackingSEIId,                                  0, "Id of frame packing SEI message for a given session")
+  ("SEIFramePackingQuincunx",                         m_framePackingSEIQuincunx,                            0, "Indicate the presence of a Quincunx type video frame")
+  ("SEIFramePackingInterpretation",                   m_framePackingSEIInterpretation,                      0, "Indicate the interpretation of the frame pair\n"
+                                                                                                               "\t0: unspecified\n"
+                                                                                                               "\t1: stereo pair, frame0 represents left view\n"
+                                                                                                               "\t2: stereo pair, frame0 represents right view")
+  ("SEISegmentedRectFramePacking",                    m_segmentedRectFramePackingSEIEnabled,                0, "Controls generation of segmented rectangular frame packing SEI messages")
+  ("SEISegmentedRectFramePackingCancel",              m_segmentedRectFramePackingSEICancel,             false, "If equal to 1, cancels the persistence of any previous SRFPA SEI message")
+  ("SEISegmentedRectFramePackingType",                m_segmentedRectFramePackingSEIType,                   0, "Specifies the arrangement of the frames in the reconstructed picture")
+  ("SEISegmentedRectFramePackingPersistence",         m_segmentedRectFramePackingSEIPersistence,        false, "If equal to 0, the SEI applies to the current frame only")
+  ("SEIDisplayOrientation",                           m_displayOrientationSEIAngle,                         0, "Control generation of display orientation SEI messages\n"
+                                                                                                               "\tN: 0 < N < (2^16 - 1) enable display orientation SEI message with anticlockwise_rotation = N and display_orientation_repetition_period = 1\n"
+                                                                                                               "\t0: disable")
+  ("SEITemporalLevel0Index",                          m_temporalLevel0IndexSEIEnabled,                      0, "Control generation of temporal level 0 index SEI messages")
+  ("SEIGradualDecodingRefreshInfo",                   m_gradualDecodingRefreshInfoEnabled,                  0, "Control generation of gradual decoding refresh information SEI message")
+  ("SEINoDisplay",                                    m_noDisplaySEITLayer,                                 0, "Control generation of no display SEI message\n"
+                                                                                                               "\tN: 0 < N enable no display SEI message for temporal layer N or higher\n"
+                                                                                                               "\t0: disable")
+  ("SEIDecodingUnitInfo",                             m_decodingUnitInfoSEIEnabled,                         0, "Control generation of decoding unit information SEI message.")
+  ("SEISOPDescription",                               m_SOPDescriptionSEIEnabled,                           0, "Control generation of SOP description SEI messages")
+  ("SEIScalableNesting",                              m_scalableNestingSEIEnabled,                          0, "Control generation of scalable nesting SEI messages")
+  ("SEITempMotionConstrainedTileSets",                m_tmctsSEIEnabled,                                false, "Control generation of temporal motion constrained tile sets SEI message")
+  ("SEITimeCodeEnabled",                              m_timeCodeSEIEnabled,                             false, "Control generation of time code information SEI message")
+  ("SEITimeCodeNumClockTs",                           m_timeCodeSEINumTs,                                   0, "Number of clock time sets [0..3]")
+  ("SEITimeCodeTimeStampFlag",                        cfg_timeCodeSeiTimeStampFlag,          cfg_timeCodeSeiTimeStampFlag,         "Time stamp flag associated to each time set")
+  ("SEITimeCodeFieldBasedFlag",                       cfg_timeCodeSeiNumUnitFieldBasedFlag,  cfg_timeCodeSeiNumUnitFieldBasedFlag, "Field based flag associated to each time set")
+  ("SEITimeCodeCountingType",                         cfg_timeCodeSeiCountingType,           cfg_timeCodeSeiCountingType,          "Counting type associated to each time set")
+  ("SEITimeCodeFullTsFlag",                           cfg_timeCodeSeiFullTimeStampFlag,      cfg_timeCodeSeiFullTimeStampFlag,     "Full time stamp flag associated to each time set")
+  ("SEITimeCodeDiscontinuityFlag",                    cfg_timeCodeSeiDiscontinuityFlag,      cfg_timeCodeSeiDiscontinuityFlag,     "Discontinuity flag associated to each time set")
+  ("SEITimeCodeCntDroppedFlag",                       cfg_timeCodeSeiCntDroppedFlag,         cfg_timeCodeSeiCntDroppedFlag,        "Counter dropped flag associated to each time set")
+  ("SEITimeCodeNumFrames",                            cfg_timeCodeSeiNumberOfFrames,         cfg_timeCodeSeiNumberOfFrames,        "Number of frames associated to each time set")
+  ("SEITimeCodeSecondsValue",                         cfg_timeCodeSeiSecondsValue,           cfg_timeCodeSeiSecondsValue,          "Seconds value for each time set")
+  ("SEITimeCodeMinutesValue",                         cfg_timeCodeSeiMinutesValue,           cfg_timeCodeSeiMinutesValue,          "Minutes value for each time set")
+  ("SEITimeCodeHoursValue",                           cfg_timeCodeSeiHoursValue,             cfg_timeCodeSeiHoursValue,            "Hours value for each time set")
+  ("SEITimeCodeSecondsFlag",                          cfg_timeCodeSeiSecondsFlag,            cfg_timeCodeSeiSecondsFlag,           "Flag to signal seconds value presence in each time set")
+  ("SEITimeCodeMinutesFlag",                          cfg_timeCodeSeiMinutesFlag,            cfg_timeCodeSeiMinutesFlag,           "Flag to signal minutes value presence in each time set")
+  ("SEITimeCodeHoursFlag",                            cfg_timeCodeSeiHoursFlag,              cfg_timeCodeSeiHoursFlag,             "Flag to signal hours value presence in each time set")
+  ("SEITimeCodeOffsetLength",                         cfg_timeCodeSeiTimeOffsetLength,       cfg_timeCodeSeiTimeOffsetLength,      "Time offset length associated to each time set")
+  ("SEITimeCodeTimeOffset",                           cfg_timeCodeSeiTimeOffsetValue,        cfg_timeCodeSeiTimeOffsetValue,       "Time offset associated to each time set")
+  ("SEIKneeFunctionInfo",                             m_kneeSEIEnabled,                                 false, "Control generation of Knee function SEI messages")
+  ("SEIKneeFunctionId",                               m_kneeSEIId,                                          0, "Specifies Id of Knee function SEI message for a given session")
+  ("SEIKneeFunctionCancelFlag",                       m_kneeSEICancelFlag,                              false, "Indicates that Knee function SEI message cancels the persistence or follows")
+  ("SEIKneeFunctionPersistenceFlag",                  m_kneeSEIPersistenceFlag,                          true, "Specifies the persistence of the Knee function SEI message")
+  ("SEIKneeFunctionInputDrange",                      m_kneeSEIInputDrange,                              1000, "Specifies the peak luminance level for the input picture of Knee function SEI messages")
+  ("SEIKneeFunctionInputDispLuminance",               m_kneeSEIInputDispLuminance,                        100, "Specifies the expected display brightness for the input picture of Knee function SEI messages")
+  ("SEIKneeFunctionOutputDrange",                     m_kneeSEIOutputDrange,                             4000, "Specifies the peak luminance level for the output picture of Knee function SEI messages")
+  ("SEIKneeFunctionOutputDispLuminance",              m_kneeSEIOutputDispLuminance,                       800, "Specifies the expected display brightness for the output picture of Knee function SEI messages")
+  ("SEIKneeFunctionNumKneePointsMinus1",              m_kneeSEINumKneePointsMinus1,                         2, "Specifies the number of knee points - 1")
+  ("SEIKneeFunctionInputKneePointValue",              cfg_kneeSEIInputKneePointValue,   cfg_kneeSEIInputKneePointValue, "Array of input knee point")
+  ("SEIKneeFunctionOutputKneePointValue",             cfg_kneeSEIOutputKneePointValue, cfg_kneeSEIOutputKneePointValue, "Array of output knee point")
+  ("SEIMasteringDisplayColourVolume",                 m_masteringDisplay.colourVolumeSEIEnabled,         false, "Control generation of mastering display colour volume SEI messages")
+  ("SEIMasteringDisplayMaxLuminance",                 m_masteringDisplay.maxLuminance,                  10000u, "Specifies the mastering display maximum luminance value in units of 1/10000 candela per square metre (32-bit code value)")
+  ("SEIMasteringDisplayMinLuminance",                 m_masteringDisplay.minLuminance,                      0u, "Specifies the mastering display minimum luminance value in units of 1/10000 candela per square metre (32-bit code value)")
+  ("SEIMasteringDisplayPrimaries",                    cfg_DisplayPrimariesCode,       cfg_DisplayPrimariesCode, "Mastering display primaries for all three colour planes in CIE xy coordinates in increments of 1/50000 (results in the ranges 0 to 50000 inclusive)")
+  ("SEIMasteringDisplayWhitePoint",                   cfg_DisplayWhitePointCode,     cfg_DisplayWhitePointCode, "Mastering display white point CIE xy coordinates in normalised increments of 1/50000 (e.g. 0.333 = 16667)")
+  ("Verbose",                                m_verboseLevel,                                       1, "verbose level")
+    
+  ;
+
+  for(Int i=1; i<MAX_GOP+1; i++) {
+    std::ostringstream cOSS;
+    cOSS<<"Frame"<<i;
+    opts.addOptions()(cOSS.str(), m_GOPList[i-1], GOPEntry());
+  }
+  po::setDefaults(opts);
+  const list<const Char*>& argv_unhandled = po::scanArgv(opts, argc, (const Char**) argv);
+
+  for (list<const Char*>::const_iterator it = argv_unhandled.begin(); it != argv_unhandled.end(); it++)
+  {
+    fprintf(stderr, "Unhandled argument ignored: `%s'\n", *it);
+  }
+
+  if (argc == 1 || do_help)
+  {
+    /* argc == 1: no options have been specified */
+    po::doHelp(cout, opts);
+    return false;
+  }
+
+  /*
+   * Set any derived parameters
+   */
+  /* convert std::string to c string for compatability */
+  m_pchInputFile = cfg_InputFile.empty() ? NULL : strdup(cfg_InputFile.c_str());
+  m_pchBitstreamFile = cfg_BitstreamFile.empty() ? NULL : strdup(cfg_BitstreamFile.c_str());
+  m_pchReconFile = cfg_ReconFile.empty() ? NULL : strdup(cfg_ReconFile.c_str());
+  m_pchdQPFile = cfg_dQPFile.empty() ? NULL : strdup(cfg_dQPFile.c_str());
+
+  if(m_isField)
+  {
+    //Frame height
+    m_iSourceHeightOrg = m_iSourceHeight;
+    //Field height
+    m_iSourceHeight = m_iSourceHeight >> 1;
+    //number of fields to encode
+    m_framesToBeEncoded *= 2;
+  }
+
+  if( !m_tileUniformSpacingFlag && m_numTileColumnsMinus1 > 0 )
+  {
+    if (cfg_ColumnWidth.values.size() > m_numTileColumnsMinus1)
+    {
+      printf( "The number of columns whose width are defined is larger than the allowed number of columns.\n" );
+      exit( EXIT_FAILURE );
+    }
+    else if (cfg_ColumnWidth.values.size() < m_numTileColumnsMinus1)
+    {
+      printf( "The width of some columns is not defined.\n" );
+      exit( EXIT_FAILURE );
+    }
+    else
+    {
+      m_tileColumnWidth.resize(m_numTileColumnsMinus1);
+      for(UInt i=0; i<cfg_ColumnWidth.values.size(); i++)
+        m_tileColumnWidth[i]=cfg_ColumnWidth.values[i];
+    }
+  }
+  else
+  {
+    m_tileColumnWidth.clear();
+  }
+
+  if( !m_tileUniformSpacingFlag && m_numTileRowsMinus1 > 0 )
+  {
+    if (cfg_RowHeight.values.size() > m_numTileRowsMinus1)
+    {
+      printf( "The number of rows whose height are defined is larger than the allowed number of rows.\n" );
+      exit( EXIT_FAILURE );
+    }
+    else if (cfg_RowHeight.values.size() < m_numTileRowsMinus1)
+    {
+      printf( "The height of some rows is not defined.\n" );
+      exit( EXIT_FAILURE );
+    }
+    else
+    {
+      m_tileRowHeight.resize(m_numTileRowsMinus1);
+      for(UInt i=0; i<cfg_RowHeight.values.size(); i++)
+        m_tileRowHeight[i]=cfg_RowHeight.values[i];
+    }
+  }
+  else
+  {
+    m_tileRowHeight.clear();
+  }
+
+  m_scalingListFile = cfg_ScalingListFile.empty() ? NULL : strdup(cfg_ScalingListFile.c_str());
+
+  /* rules for input, output and internal bitdepths as per help text */
+  if (m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA  ] == 0) { m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA  ] = m_inputBitDepth      [CHANNEL_TYPE_LUMA  ]; }
+  if (m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA] == 0) { m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA] = m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA  ]; }
+  if (m_internalBitDepth   [CHANNEL_TYPE_LUMA  ] == 0) { m_internalBitDepth   [CHANNEL_TYPE_LUMA  ] = m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA  ]; }
+  if (m_internalBitDepth   [CHANNEL_TYPE_CHROMA] == 0) { m_internalBitDepth   [CHANNEL_TYPE_CHROMA] = m_internalBitDepth   [CHANNEL_TYPE_LUMA  ]; }
+  if (m_inputBitDepth      [CHANNEL_TYPE_CHROMA] == 0) { m_inputBitDepth      [CHANNEL_TYPE_CHROMA] = m_inputBitDepth      [CHANNEL_TYPE_LUMA  ]; }
+  if (m_outputBitDepth     [CHANNEL_TYPE_LUMA  ] == 0) { m_outputBitDepth     [CHANNEL_TYPE_LUMA  ] = m_internalBitDepth   [CHANNEL_TYPE_LUMA  ]; }
+  if (m_outputBitDepth     [CHANNEL_TYPE_CHROMA] == 0) { m_outputBitDepth     [CHANNEL_TYPE_CHROMA] = m_internalBitDepth   [CHANNEL_TYPE_CHROMA]; }
+
+  m_InputChromaFormatIDC = numberToChromaFormat(tmpInputChromaFormat);
+  m_chromaFormatIDC      = ((tmpChromaFormat == 0) ? (m_InputChromaFormatIDC) : (numberToChromaFormat(tmpChromaFormat)));
+
+  if (extendedProfile >= 1000 && extendedProfile <= 2316)
+  {
+    m_profile = Profile::MAINREXT;
+    if (m_bitDepthConstraint != 0 || tmpConstraintChromaFormat != 0)
+    {
+      fprintf(stderr, "Error: The bit depth and chroma format constraints are not used when an explicit RExt profile is specified\n");
+      exit(EXIT_FAILURE);
+    }
+    m_bitDepthConstraint     = (extendedProfile%100);
+    m_intraConstraintFlag    = (extendedProfile>=2000);
+    switch ((extendedProfile/100)%10)
+    {
+      case 0:  tmpConstraintChromaFormat=400; break;
+      case 1:  tmpConstraintChromaFormat=420; break;
+      case 2:  tmpConstraintChromaFormat=422; break;
+      default: tmpConstraintChromaFormat=444; break;
+    }
+  }
+  else
+  {
+    m_profile = Profile::Name(extendedProfile);
+  }
+
+  if (m_profile == Profile::HIGHTHROUGHPUTREXT )
+  {
+    if (m_bitDepthConstraint == 0) m_bitDepthConstraint = 16;
+    m_chromaFormatConstraint = (tmpConstraintChromaFormat == 0) ? CHROMA_444 : numberToChromaFormat(tmpConstraintChromaFormat);
+  }
+  else if (m_profile == Profile::MAINREXT)
+  {
+    if (m_bitDepthConstraint == 0 && tmpConstraintChromaFormat == 0)
+    {
+      // produce a valid combination, if possible.
+      const Bool bUsingGeneralRExtTools  = m_useResidualRotation                    ||
+                                           m_useSingleSignificanceMapContext        ||
+                                           m_useResidualDPCM[RDPCM_SIGNAL_IMPLICIT] ||
+                                           m_useResidualDPCM[RDPCM_SIGNAL_EXPLICIT] ||
+                                           !m_enableIntraReferenceSmoothing         ||
+                                           m_useGolombRiceParameterAdaptation       ||
+                                           m_transformSkipLog2MaxSize!=2;
+      const Bool bUsingChromaQPAdjustment= m_maxCUChromaQpAdjustmentDepth >= 0;
+      const Bool bUsingExtendedPrecision = m_useExtendedPrecision;
+      m_chromaFormatConstraint = NUM_CHROMA_FORMAT;
+      automaticallySelectRExtProfile(bUsingGeneralRExtTools,
+                                     bUsingChromaQPAdjustment,
+                                     bUsingExtendedPrecision,
+                                     m_intraConstraintFlag,
+                                     m_bitDepthConstraint,
+                                     m_chromaFormatConstraint,
+                                     m_chromaFormatIDC==CHROMA_400 ? m_internalBitDepth[CHANNEL_TYPE_LUMA] : std::max(m_internalBitDepth[CHANNEL_TYPE_LUMA], m_internalBitDepth[CHANNEL_TYPE_CHROMA]),
+                                     m_chromaFormatIDC);
+    }
+    else if (m_bitDepthConstraint == 0 || tmpConstraintChromaFormat == 0)
+    {
+      fprintf(stderr, "Error: The bit depth and chroma format constraints must either both be specified or both be configured automatically\n");
+      exit(EXIT_FAILURE);
+    }
+    else
+    {
+      m_chromaFormatConstraint = numberToChromaFormat(tmpConstraintChromaFormat);
+    }
+  }
+  else
+  {
+    m_chromaFormatConstraint = (tmpConstraintChromaFormat == 0) ? m_chromaFormatIDC : numberToChromaFormat(tmpConstraintChromaFormat);
+    m_bitDepthConstraint = (m_profile == Profile::MAIN10?10:8);
+  }
+
+
+  m_inputColourSpaceConvert = stringToInputColourSpaceConvert(inputColourSpaceConvert, true);
+
+  switch (m_conformanceWindowMode)
+  {
+  case 0:
+    {
+      // no conformance or padding
+      m_confWinLeft = m_confWinRight = m_confWinTop = m_confWinBottom = 0;
+      m_aiPad[1] = m_aiPad[0] = 0;
+      break;
+    }
+  case 1:
+    {
+      // automatic padding to minimum CU size
+      Int minCuSize = m_uiMaxCUHeight >> (m_uiMaxCUDepth - 1);
+      if (m_iSourceWidth % minCuSize)
+      {
+        m_aiPad[0] = m_confWinRight  = ((m_iSourceWidth / minCuSize) + 1) * minCuSize - m_iSourceWidth;
+        m_iSourceWidth  += m_confWinRight;
+      }
+      if (m_iSourceHeight % minCuSize)
+      {
+        m_aiPad[1] = m_confWinBottom = ((m_iSourceHeight / minCuSize) + 1) * minCuSize - m_iSourceHeight;
+        m_iSourceHeight += m_confWinBottom;
+        if ( m_isField )
+        {
+          m_iSourceHeightOrg += m_confWinBottom << 1;
+          m_aiPad[1] = m_confWinBottom << 1;
+        }
+      }
+      if (m_aiPad[0] % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0)
+      {
+        fprintf(stderr, "Error: picture width is not an integer multiple of the specified chroma subsampling\n");
+        exit(EXIT_FAILURE);
+      }
+      if (m_aiPad[1] % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0)
+      {
+        fprintf(stderr, "Error: picture height is not an integer multiple of the specified chroma subsampling\n");
+        exit(EXIT_FAILURE);
+      }
+      break;
+    }
+  case 2:
+    {
+      //padding
+      m_iSourceWidth  += m_aiPad[0];
+      m_iSourceHeight += m_aiPad[1];
+      m_confWinRight  = m_aiPad[0];
+      m_confWinBottom = m_aiPad[1];
+      break;
+    }
+  case 3:
+    {
+      // conformance
+      if ((m_confWinLeft == 0) && (m_confWinRight == 0) && (m_confWinTop == 0) && (m_confWinBottom == 0))
+      {
+        fprintf(stderr, "Warning: Conformance window enabled, but all conformance window parameters set to zero\n");
+      }
+      if ((m_aiPad[1] != 0) || (m_aiPad[0]!=0))
+      {
+        fprintf(stderr, "Warning: Conformance window enabled, padding parameters will be ignored\n");
+      }
+      m_aiPad[1] = m_aiPad[0] = 0;
+      break;
+    }
+  }
+
+  // allocate slice-based dQP values
+  m_aidQP = new Int[ m_framesToBeEncoded + m_iGOPSize + 1 ];
+  ::memset( m_aidQP, 0, sizeof(Int)*( m_framesToBeEncoded + m_iGOPSize + 1 ) );
+
+  // handling of floating-point QP values
+  // if QP is not integer, sequence is split into two sections having QP and QP+1
+  m_iQP = (Int)( m_fQP );
+  if ( m_iQP < m_fQP )
+  {
+    Int iSwitchPOC = (Int)( m_framesToBeEncoded - (m_fQP - m_iQP)*m_framesToBeEncoded + 0.5 );
+
+    iSwitchPOC = (Int)( (Double)iSwitchPOC / m_iGOPSize + 0.5 )*m_iGOPSize;
+    for ( Int i=iSwitchPOC; i<m_framesToBeEncoded + m_iGOPSize + 1; i++ )
+    {
+      m_aidQP[i] = 1;
+    }
+  }
+
+  for(UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    if (saoOffsetBitShift[ch]<0)
+    {
+      if (m_internalBitDepth[ch]>10)
+      {
+        m_saoOffsetBitShift[ch]=UInt(Clip3<Int>(0, m_internalBitDepth[ch]-10, Int(m_internalBitDepth[ch]-10 + 0.165*m_iQP - 3.22 + 0.5) ) );
+      }
+      else
+      {
+        m_saoOffsetBitShift[ch]=0;
+      }
+    }
+    else
+    {
+      m_saoOffsetBitShift[ch]=UInt(saoOffsetBitShift[ch]);
+    }
+  }
+
+  // reading external dQP description from file
+  if ( m_pchdQPFile )
+  {
+    FILE* fpt=fopen( m_pchdQPFile, "r" );
+    if ( fpt )
+    {
+      Int iValue;
+      Int iPOC = 0;
+      while ( iPOC < m_framesToBeEncoded )
+      {
+        if ( fscanf(fpt, "%d", &iValue ) == EOF ) break;
+        m_aidQP[ iPOC ] = iValue;
+        iPOC++;
+      }
+      fclose(fpt);
+    }
+  }
+  m_iWaveFrontSubstreams = m_iWaveFrontSynchro ? (m_iSourceHeight + m_uiMaxCUHeight - 1) / m_uiMaxCUHeight : 1;
+
+  if( m_masteringDisplay.colourVolumeSEIEnabled )
+  {
+    for(UInt idx=0; idx<6; idx++)
+    {
+      m_masteringDisplay.primaries[idx/2][idx%2] = UShort((cfg_DisplayPrimariesCode.values.size() > idx) ? cfg_DisplayPrimariesCode.values[idx] : 0);
+    }
+    for(UInt idx=0; idx<2; idx++)
+    {
+      m_masteringDisplay.whitePoint[idx] = UShort((cfg_DisplayWhitePointCode.values.size() > idx) ? cfg_DisplayWhitePointCode.values[idx] : 0);
+    }
+  }
+    
+  if( m_toneMappingInfoSEIEnabled && !m_toneMapCancelFlag )
+  {
+    if( m_toneMapModelId == 2 && !cfg_startOfCodedInterval.values.empty() )
+    {
+      const UInt num = 1u<< m_toneMapTargetBitDepth;
+      m_startOfCodedInterval = new Int[num];
+      for(UInt i=0; i<num; i++)
+      {
+        m_startOfCodedInterval[i] = cfg_startOfCodedInterval.values.size() > i ? cfg_startOfCodedInterval.values[i] : 0;
+      }
+    }
+    else
+    {
+      m_startOfCodedInterval = NULL;
+    }
+    if( ( m_toneMapModelId == 3 ) && ( m_numPivots > 0 ) )
+    {
+      if( !cfg_codedPivotValue.values.empty() && !cfg_targetPivotValue.values.empty() )
+      {
+        m_codedPivotValue  = new Int[m_numPivots];
+        m_targetPivotValue = new Int[m_numPivots];
+        for(UInt i=0; i<m_numPivots; i++)
+        {
+          m_codedPivotValue[i]  = cfg_codedPivotValue.values.size()  > i ? cfg_codedPivotValue.values [i] : 0;
+          m_targetPivotValue[i] = cfg_targetPivotValue.values.size() > i ? cfg_targetPivotValue.values[i] : 0;
+        }
+      }
+    }
+    else
+    {
+      m_codedPivotValue = NULL;
+      m_targetPivotValue = NULL;
+    }
+  }
+
+  if( m_kneeSEIEnabled && !m_kneeSEICancelFlag )
+  {
+    assert ( m_kneeSEINumKneePointsMinus1 >= 0 && m_kneeSEINumKneePointsMinus1 < 999 );
+    m_kneeSEIInputKneePoint  = new Int[m_kneeSEINumKneePointsMinus1+1];
+    m_kneeSEIOutputKneePoint = new Int[m_kneeSEINumKneePointsMinus1+1];
+    for(Int i=0; i<(m_kneeSEINumKneePointsMinus1+1); i++)
+    {
+      m_kneeSEIInputKneePoint[i]  = cfg_kneeSEIInputKneePointValue.values.size()  > i ? cfg_kneeSEIInputKneePointValue.values[i]  : 1;
+      m_kneeSEIOutputKneePoint[i] = cfg_kneeSEIOutputKneePointValue.values.size() > i ? cfg_kneeSEIOutputKneePointValue.values[i] : 0;
+    }
+  }
+
+  if(m_timeCodeSEIEnabled)
+  {
+    for(Int i = 0; i < m_timeCodeSEINumTs && i < MAX_TIMECODE_SEI_SETS; i++)
+    {
+      m_timeSetArray[i].clockTimeStampFlag    = cfg_timeCodeSeiTimeStampFlag        .values.size()>i ? cfg_timeCodeSeiTimeStampFlag        .values [i] : false;
+      m_timeSetArray[i].numUnitFieldBasedFlag = cfg_timeCodeSeiNumUnitFieldBasedFlag.values.size()>i ? cfg_timeCodeSeiNumUnitFieldBasedFlag.values [i] : 0;
+      m_timeSetArray[i].countingType          = cfg_timeCodeSeiCountingType         .values.size()>i ? cfg_timeCodeSeiCountingType         .values [i] : 0;
+      m_timeSetArray[i].fullTimeStampFlag     = cfg_timeCodeSeiFullTimeStampFlag    .values.size()>i ? cfg_timeCodeSeiFullTimeStampFlag    .values [i] : 0;
+      m_timeSetArray[i].discontinuityFlag     = cfg_timeCodeSeiDiscontinuityFlag    .values.size()>i ? cfg_timeCodeSeiDiscontinuityFlag    .values [i] : 0;
+      m_timeSetArray[i].cntDroppedFlag        = cfg_timeCodeSeiCntDroppedFlag       .values.size()>i ? cfg_timeCodeSeiCntDroppedFlag       .values [i] : 0;
+      m_timeSetArray[i].numberOfFrames        = cfg_timeCodeSeiNumberOfFrames       .values.size()>i ? cfg_timeCodeSeiNumberOfFrames       .values [i] : 0;
+      m_timeSetArray[i].secondsValue          = cfg_timeCodeSeiSecondsValue         .values.size()>i ? cfg_timeCodeSeiSecondsValue         .values [i] : 0;
+      m_timeSetArray[i].minutesValue          = cfg_timeCodeSeiMinutesValue         .values.size()>i ? cfg_timeCodeSeiMinutesValue         .values [i] : 0;
+      m_timeSetArray[i].hoursValue            = cfg_timeCodeSeiHoursValue           .values.size()>i ? cfg_timeCodeSeiHoursValue           .values [i] : 0;
+      m_timeSetArray[i].secondsFlag           = cfg_timeCodeSeiSecondsFlag          .values.size()>i ? cfg_timeCodeSeiSecondsFlag          .values [i] : 0;
+      m_timeSetArray[i].minutesFlag           = cfg_timeCodeSeiMinutesFlag          .values.size()>i ? cfg_timeCodeSeiMinutesFlag          .values [i] : 0;
+      m_timeSetArray[i].hoursFlag             = cfg_timeCodeSeiHoursFlag            .values.size()>i ? cfg_timeCodeSeiHoursFlag            .values [i] : 0;
+      m_timeSetArray[i].timeOffsetLength      = cfg_timeCodeSeiTimeOffsetLength     .values.size()>i ? cfg_timeCodeSeiTimeOffsetLength     .values [i] : 0;
+      m_timeSetArray[i].timeOffsetValue       = cfg_timeCodeSeiTimeOffsetValue      .values.size()>i ? cfg_timeCodeSeiTimeOffsetValue      .values [i] : 0;
+    }
+  }
+
+  // check validity of input parameters
+  xCheckParameter();
+
+  // set global varibles
+  xSetGlobal();
+
+  // print-out parameters
+  if (m_verboseLevel) {
+    xPrintParameter();
+  }
+
+  return true;
+}
+
+
+// ====================================================================================================================
+// Private member functions
+// ====================================================================================================================
+
+Void TAppEncCfg::xCheckParameter()
+{
+  if (!m_decodedPictureHashSEIEnabled && 0)
+  {
+    fprintf(stderr, "******************************************************************\n");
+    fprintf(stderr, "** WARNING: --SEIDecodedPictureHash is now disabled by default. **\n");
+    fprintf(stderr, "**          Automatic verification of decoded pictures by a     **\n");
+    fprintf(stderr, "**          decoder requires this option to be enabled.         **\n");
+    fprintf(stderr, "******************************************************************\n");
+  }
+  if( m_profile==Profile::NONE && 0)
+  {
+    fprintf(stderr, "***************************************************************************\n");
+    fprintf(stderr, "** WARNING: For conforming bitstreams a valid Profile value must be set! **\n");
+    fprintf(stderr, "***************************************************************************\n");
+  }
+  if( m_level==Level::NONE && 0)
+  {
+    fprintf(stderr, "***************************************************************************\n");
+    fprintf(stderr, "** WARNING: For conforming bitstreams a valid Level value must be set!   **\n");
+    fprintf(stderr, "***************************************************************************\n");
+  }
+
+  Bool check_failed = false; /* abort if there is a fatal configuration problem */
+#define xConfirmPara(a,b) check_failed |= confirmPara(a,b)
+
+  const UInt maxBitDepth=(m_chromaFormatIDC==CHROMA_400) ? m_internalBitDepth[CHANNEL_TYPE_LUMA] : std::max(m_internalBitDepth[CHANNEL_TYPE_LUMA], m_internalBitDepth[CHANNEL_TYPE_CHROMA]);
+  xConfirmPara(m_bitDepthConstraint<maxBitDepth, "The internalBitDepth must not be greater than the bitDepthConstraint value");
+  xConfirmPara(m_chromaFormatConstraint<m_chromaFormatIDC, "The chroma format used must not be greater than the chromaFormatConstraint value");
+
+  if (m_profile==Profile::MAINREXT || m_profile==Profile::HIGHTHROUGHPUTREXT)
+  {
+    xConfirmPara(m_lowerBitRateConstraintFlag==false && m_intraConstraintFlag==false, "The lowerBitRateConstraint flag cannot be false when intraConstraintFlag is false");
+    xConfirmPara(m_alignCABACBeforeBypass && m_profile!=Profile::HIGHTHROUGHPUTREXT, "AlignCABACBeforeBypass must not be enabled unless the high throughput profile is being used.");
+    if (m_profile == Profile::MAINREXT)
+    {
+      const UInt intraIdx = m_intraConstraintFlag ? 1:0;
+      const UInt bitDepthIdx = (m_bitDepthConstraint == 8 ? 0 : (m_bitDepthConstraint ==10 ? 1 : (m_bitDepthConstraint == 12 ? 2 : (m_bitDepthConstraint == 16 ? 3 : 4 ))));
+      const UInt chromaFormatIdx = UInt(m_chromaFormatConstraint);
+      const Bool bValidProfile = (bitDepthIdx > 3 || chromaFormatIdx>3) ? false : (validRExtProfileNames[intraIdx][bitDepthIdx][chromaFormatIdx] != NONE);
+      xConfirmPara(!bValidProfile, "Invalid intra constraint flag, bit depth constraint flag and chroma format constraint flag combination for a RExt profile");
+      const Bool bUsingGeneralRExtTools  = m_useResidualRotation                    ||
+                                           m_useSingleSignificanceMapContext        ||
+                                           m_useResidualDPCM[RDPCM_SIGNAL_IMPLICIT] ||
+                                           m_useResidualDPCM[RDPCM_SIGNAL_EXPLICIT] ||
+                                           !m_enableIntraReferenceSmoothing         ||
+                                           m_useGolombRiceParameterAdaptation       ||
+                                           m_transformSkipLog2MaxSize!=2;
+      const Bool bUsingChromaQPTool      = m_maxCUChromaQpAdjustmentDepth >= 0;
+      const Bool bUsingExtendedPrecision = m_useExtendedPrecision;
+
+      xConfirmPara((m_chromaFormatConstraint==CHROMA_420 || m_chromaFormatConstraint==CHROMA_400) && bUsingChromaQPTool, "CU Chroma QP adjustment cannot be used for 4:0:0 or 4:2:0 RExt profiles");
+      xConfirmPara(m_bitDepthConstraint != 16 && bUsingExtendedPrecision, "Extended precision can only be used in 16-bit RExt profiles");
+      if (!(m_chromaFormatConstraint == CHROMA_400 && m_bitDepthConstraint == 16) && m_chromaFormatConstraint!=CHROMA_444)
+      {
+        xConfirmPara(bUsingGeneralRExtTools, "Combination of tools and profiles are not possible in the specified RExt profile.");
+      }
+      if (!m_intraConstraintFlag && m_bitDepthConstraint==16 && m_chromaFormatConstraint==CHROMA_444 && 0)
+      {
+        fprintf(stderr, "********************************************************************************************************\n");
+        fprintf(stderr, "** WARNING: The RExt constraint flags describe a non standard combination (used for development only) **\n");
+        fprintf(stderr, "********************************************************************************************************\n");
+      }
+    }
+    else
+    {
+      xConfirmPara( m_chromaFormatConstraint != CHROMA_444, "chroma format constraint must be 4:4:4 in the High Throughput 4:4:4 16-bit Intra profile.");
+      xConfirmPara( m_bitDepthConstraint     != 16,         "bit depth constraint must be 4:4:4 in the High Throughput 4:4:4 16-bit Intra profile.");
+      xConfirmPara( m_intraConstraintFlag    != 1,          "intra constraint flag must be 1 in the High Throughput 4:4:4 16-bit Intra profile.");
+    }
+  }
+  else
+  {
+    xConfirmPara(m_bitDepthConstraint!=((m_profile==Profile::MAIN10)?10:8), "BitDepthConstraint must be 8 for MAIN profile and 10 for MAIN10 profile.");
+    xConfirmPara(m_chromaFormatConstraint!=CHROMA_420, "ChromaFormatConstraint must be 420 for non main-RExt profiles.");
+    xConfirmPara(m_intraConstraintFlag==true, "IntraConstraintFlag must be false for non main_RExt profiles.");
+    xConfirmPara(m_lowerBitRateConstraintFlag==false, "LowerBitrateConstraintFlag must be true for non main-RExt profiles.");
+
+    xConfirmPara(m_useCrossComponentPrediction==true, "CrossComponentPrediction must not be used for non main-RExt profiles.");
+    xConfirmPara(m_transformSkipLog2MaxSize!=2, "Transform Skip Log2 Max Size must be 2 for V1 profiles.");
+    xConfirmPara(m_useResidualRotation==true, "UseResidualRotation must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useSingleSignificanceMapContext==true, "UseSingleSignificanceMapContext must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useResidualDPCM[RDPCM_SIGNAL_IMPLICIT]==true, "ImplicitResidualDPCM must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useResidualDPCM[RDPCM_SIGNAL_EXPLICIT]==true, "ExplicitResidualDPCM must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useGolombRiceParameterAdaptation==true, "GolombRiceParameterAdaption must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useExtendedPrecision==true, "UseExtendedPrecision must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_useHighPrecisionPredictionWeighting==true, "UseHighPrecisionPredictionWeighting must not be enabled for non main-RExt profiles.");
+    xConfirmPara(m_enableIntraReferenceSmoothing==false, "EnableIntraReferenceSmoothing must be enabled for non main-RExt profiles.");
+    xConfirmPara(m_alignCABACBeforeBypass, "AlignCABACBeforeBypass cannot be enabled for non main-RExt profiles.");
+  }
+
+  // check range of parameters
+  xConfirmPara( m_inputBitDepth[CHANNEL_TYPE_LUMA  ] < 8,                                   "InputBitDepth must be at least 8" );
+  xConfirmPara( m_inputBitDepth[CHANNEL_TYPE_CHROMA] < 8,                                   "InputBitDepthC must be at least 8" );
+
+#if !RExt__HIGH_BIT_DEPTH_SUPPORT
+  if (m_useExtendedPrecision)
+  {
+    for (UInt channelType = 0; channelType < MAX_NUM_CHANNEL_TYPE; channelType++)
+    {
+      xConfirmPara((m_internalBitDepth[channelType] > 8) , "Model is not configured to support high enough internal accuracies - enable RExt__HIGH_BIT_DEPTH_SUPPORT to use increased precision internal data types etc...");
+    }
+  }
+  else
+  {
+    for (UInt channelType = 0; channelType < MAX_NUM_CHANNEL_TYPE; channelType++)
+    {
+      xConfirmPara((m_internalBitDepth[channelType] > 12) , "Model is not configured to support high enough internal accuracies - enable RExt__HIGH_BIT_DEPTH_SUPPORT to use increased precision internal data types etc...");
+    }
+  }
+#endif
+
+  xConfirmPara( (m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA  ] < m_inputBitDepth[CHANNEL_TYPE_LUMA  ]), "MSB-extended bit depth for luma channel (--MSBExtendedBitDepth) must be greater than or equal to input bit depth for luma channel (--InputBitDepth)" );
+  xConfirmPara( (m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA] < m_inputBitDepth[CHANNEL_TYPE_CHROMA]), "MSB-extended bit depth for chroma channel (--MSBExtendedBitDepthC) must be greater than or equal to input bit depth for chroma channel (--InputBitDepthC)" );
+
+  xConfirmPara( m_saoOffsetBitShift[CHANNEL_TYPE_LUMA]   > (m_internalBitDepth[CHANNEL_TYPE_LUMA  ]<10?0:(m_internalBitDepth[CHANNEL_TYPE_LUMA  ]-10)), "SaoLumaOffsetBitShift must be in the range of 0 to InternalBitDepth-10, inclusive");
+  xConfirmPara( m_saoOffsetBitShift[CHANNEL_TYPE_CHROMA] > (m_internalBitDepth[CHANNEL_TYPE_CHROMA]<10?0:(m_internalBitDepth[CHANNEL_TYPE_CHROMA]-10)), "SaoChromaOffsetBitShift must be in the range of 0 to InternalBitDepthC-10, inclusive");
+
+  xConfirmPara( m_chromaFormatIDC >= NUM_CHROMA_FORMAT,                                     "ChromaFormatIDC must be either 400, 420, 422 or 444" );
+  std::string sTempIPCSC="InputColourSpaceConvert must be empty, "+getListOfColourSpaceConverts(true);
+  xConfirmPara( m_inputColourSpaceConvert >= NUMBER_INPUT_COLOUR_SPACE_CONVERSIONS,         sTempIPCSC.c_str() );
+  xConfirmPara( m_InputChromaFormatIDC >= NUM_CHROMA_FORMAT,                                "InputChromaFormatIDC must be either 400, 420, 422 or 444" );
+  xConfirmPara( m_iFrameRate <= 0,                                                          "Frame rate must be more than 1" );
+  xConfirmPara( m_framesToBeEncoded <= 0,                                                   "Total Number Of Frames encoded must be more than 0" );
+  xConfirmPara( m_iGOPSize < 1 ,                                                            "GOP Size must be greater or equal to 1" );
+  xConfirmPara( m_iGOPSize > 1 &&  m_iGOPSize % 2,                                          "GOP Size must be a multiple of 2, if GOP Size is greater than 1" );
+  xConfirmPara( (m_iIntraPeriod > 0 && m_iIntraPeriod < m_iGOPSize) || m_iIntraPeriod == 0, "Intra period must be more than GOP size, or -1 , not 0" );
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  xConfirmPara( m_iDecodingRefreshType < 0 || m_iDecodingRefreshType > 3,                   "Decoding Refresh Type must be comprised between 0 and 3 included" );
+  if(m_iDecodingRefreshType == 3)
+  {
+    xConfirmPara( !m_recoveryPointSEIEnabled,                                               "When using RecoveryPointSEI messages as RA points, recoveryPointSEI must be enabled" );
+  }
+#else
+  xConfirmPara( m_iDecodingRefreshType < 0 || m_iDecodingRefreshType > 2,                   "Decoding Refresh Type must be equal to 0, 1 or 2" );
+#endif
+
+  if (m_isField)
+  {
+    if (!m_pictureTimingSEIEnabled)
+    {
+      fprintf(stderr, "****************************************************************************\n");
+      fprintf(stderr, "** WARNING: Picture Timing SEI should be enabled for field coding!        **\n");
+      fprintf(stderr, "****************************************************************************\n");
+    }
+  }
+  if ( m_bufferingPeriodSEIEnabled && !m_activeParameterSetsSEIEnabled)
+  {
+    fprintf(stderr, "****************************************************************************\n");
+    fprintf(stderr, "** WARNING: using buffering period SEI requires SPS activation with       **\n"); 
+    fprintf(stderr, "**          active parameter sets SEI. Enabling active parameter sets SEI **\n");
+    fprintf(stderr, "****************************************************************************\n");
+    m_activeParameterSetsSEIEnabled = 1;
+  }
+  if ( m_pictureTimingSEIEnabled && !m_activeParameterSetsSEIEnabled)
+  {
+    fprintf(stderr, "****************************************************************************\n");
+    fprintf(stderr, "** WARNING: using picture timing SEI requires SPS activation with active  **\n"); 
+    fprintf(stderr, "**          parameter sets SEI. Enabling active parameter sets SEI.       **\n");
+    fprintf(stderr, "****************************************************************************\n");
+    m_activeParameterSetsSEIEnabled = 1;
+  }
+
+  if(m_useCrossComponentPrediction && (m_chromaFormatIDC != CHROMA_444))
+  {
+    fprintf(stderr, "****************************************************************************\n");
+    fprintf(stderr, "** WARNING: Cross-component prediction is specified for 4:4:4 format only **\n");
+    fprintf(stderr, "****************************************************************************\n");
+
+    m_useCrossComponentPrediction = false;
+  }
+
+  if ( m_CUTransquantBypassFlagForce && m_bUseHADME)
+  {
+    fprintf(stderr, "****************************************************************************\n");
+    fprintf(stderr, "** WARNING: --HadamardME has been disabled due to the enabling of         **\n");
+    fprintf(stderr, "**          --CUTransquantBypassFlagForce                                 **\n");
+    fprintf(stderr, "****************************************************************************\n");
+    m_bUseHADME = false; // this has been disabled so that the lambda is calculated slightly differently for lossless modes (as a result of JCTVC-R0104).
+  }
+
+  xConfirmPara (m_transformSkipLog2MaxSize < 2, "Transform Skip Log2 Max Size must be at least 2 (4x4)");
+
+  if (m_transformSkipLog2MaxSize!=2 && m_useTransformSkipFast)
+  {
+    fprintf(stderr, "***************************************************************************\n");
+    fprintf(stderr, "** WARNING: Transform skip fast is enabled (which only tests NxN splits),**\n");
+    fprintf(stderr, "**          but transform skip log2 max size is not 2 (4x4)              **\n");
+    fprintf(stderr, "**          It may be better to disable transform skip fast mode         **\n");
+    fprintf(stderr, "***************************************************************************\n");
+  }
+
+  xConfirmPara( m_iQP <  -6 * (m_internalBitDepth[CHANNEL_TYPE_LUMA] - 8) || m_iQP > 51,    "QP exceeds supported range (-QpBDOffsety to 51)" );
+  xConfirmPara( m_loopFilterBetaOffsetDiv2 < -6 || m_loopFilterBetaOffsetDiv2 > 6,        "Loop Filter Beta Offset div. 2 exceeds supported range (-6 to 6)");
+  xConfirmPara( m_loopFilterTcOffsetDiv2 < -6 || m_loopFilterTcOffsetDiv2 > 6,            "Loop Filter Tc Offset div. 2 exceeds supported range (-6 to 6)");
+  xConfirmPara( m_iFastSearch < 0 || m_iFastSearch > 2,                                     "Fast Search Mode is not supported value (0:Full search  1:Diamond  2:PMVFAST)" );
+  xConfirmPara( m_iSearchRange < 0 ,                                                        "Search Range must be more than 0" );
+  xConfirmPara( m_bipredSearchRange < 0 ,                                                   "Search Range must be more than 0" );
+  xConfirmPara( m_iMaxDeltaQP > 7,                                                          "Absolute Delta QP exceeds supported range (0 to 7)" );
+  xConfirmPara( m_iMaxCuDQPDepth > m_uiMaxCUDepth - 1,                                          "Absolute depth for a minimum CuDQP exceeds maximum coding unit depth" );
+
+  xConfirmPara( m_cbQpOffset < -12,   "Min. Chroma Cb QP Offset is -12" );
+  xConfirmPara( m_cbQpOffset >  12,   "Max. Chroma Cb QP Offset is  12" );
+  xConfirmPara( m_crQpOffset < -12,   "Min. Chroma Cr QP Offset is -12" );
+  xConfirmPara( m_crQpOffset >  12,   "Max. Chroma Cr QP Offset is  12" );
+
+  xConfirmPara( m_iQPAdaptationRange <= 0,                                                  "QP Adaptation Range must be more than 0" );
+  if (m_iDecodingRefreshType == 2)
+  {
+    xConfirmPara( m_iIntraPeriod > 0 && m_iIntraPeriod <= m_iGOPSize ,                      "Intra period must be larger than GOP size for periodic IDR pictures");
+  }
+  xConfirmPara( (m_uiMaxCUWidth  >> m_uiMaxCUDepth) < 4,                                    "Minimum partition width size should be larger than or equal to 8");
+  xConfirmPara( (m_uiMaxCUHeight >> m_uiMaxCUDepth) < 4,                                    "Minimum partition height size should be larger than or equal to 8");
+  xConfirmPara( m_uiMaxCUWidth < 16,                                                        "Maximum partition width size should be larger than or equal to 16");
+  xConfirmPara( m_uiMaxCUHeight < 16,                                                       "Maximum partition height size should be larger than or equal to 16");
+  xConfirmPara( (m_iSourceWidth  % (m_uiMaxCUWidth  >> (m_uiMaxCUDepth-1)))!=0,             "Resulting coded frame width must be a multiple of the minimum CU size");
+  xConfirmPara( (m_iSourceHeight % (m_uiMaxCUHeight >> (m_uiMaxCUDepth-1)))!=0,             "Resulting coded frame height must be a multiple of the minimum CU size");
+
+  xConfirmPara( m_uiQuadtreeTULog2MinSize < 2,                                        "QuadtreeTULog2MinSize must be 2 or greater.");
+  xConfirmPara( m_uiQuadtreeTULog2MaxSize > 5,                                        "QuadtreeTULog2MaxSize must be 5 or smaller.");
+  xConfirmPara( m_uiQuadtreeTULog2MaxSize < m_uiQuadtreeTULog2MinSize,                "QuadtreeTULog2MaxSize must be greater than or equal to m_uiQuadtreeTULog2MinSize.");
+  xConfirmPara( (1<<m_uiQuadtreeTULog2MaxSize) > m_uiMaxCUWidth,                      "QuadtreeTULog2MaxSize must be log2(maxCUSize) or smaller.");
+  xConfirmPara( ( 1 << m_uiQuadtreeTULog2MinSize ) >= ( m_uiMaxCUWidth  >> (m_uiMaxCUDepth-1)), "QuadtreeTULog2MinSize must not be greater than or equal to minimum CU size" );
+  xConfirmPara( ( 1 << m_uiQuadtreeTULog2MinSize ) >= ( m_uiMaxCUHeight >> (m_uiMaxCUDepth-1)), "QuadtreeTULog2MinSize must not be greater than or equal to minimum CU size" );
+  xConfirmPara( m_uiQuadtreeTUMaxDepthInter < 1,                                                         "QuadtreeTUMaxDepthInter must be greater than or equal to 1" );
+  xConfirmPara( m_uiMaxCUWidth < ( 1 << (m_uiQuadtreeTULog2MinSize + m_uiQuadtreeTUMaxDepthInter - 1) ), "QuadtreeTUMaxDepthInter must be less than or equal to the difference between log2(maxCUSize) and QuadtreeTULog2MinSize plus 1" );
+  xConfirmPara( m_uiQuadtreeTUMaxDepthIntra < 1,                                                         "QuadtreeTUMaxDepthIntra must be greater than or equal to 1" );
+  xConfirmPara( m_uiMaxCUWidth < ( 1 << (m_uiQuadtreeTULog2MinSize + m_uiQuadtreeTUMaxDepthIntra - 1) ), "QuadtreeTUMaxDepthInter must be less than or equal to the difference between log2(maxCUSize) and QuadtreeTULog2MinSize plus 1" );
+
+  xConfirmPara(  m_maxNumMergeCand < 1,  "MaxNumMergeCand must be 1 or greater.");
+  xConfirmPara(  m_maxNumMergeCand > 5,  "MaxNumMergeCand must be 5 or smaller.");
+
+#if ADAPTIVE_QP_SELECTION
+  xConfirmPara( m_bUseAdaptQpSelect == true && m_iQP < 0,                                              "AdaptiveQpSelection must be disabled when QP < 0.");
+  xConfirmPara( m_bUseAdaptQpSelect == true && (m_cbQpOffset !=0 || m_crQpOffset != 0 ),               "AdaptiveQpSelection must be disabled when ChromaQpOffset is not equal to 0.");
+#endif
+
+  if( m_usePCM)
+  {
+    for (UInt channelType = 0; channelType < MAX_NUM_CHANNEL_TYPE; channelType++)
+    {
+      xConfirmPara(((m_MSBExtendedBitDepth[channelType] > m_internalBitDepth[channelType]) && m_bPCMInputBitDepthFlag), "PCM bit depth cannot be greater than internal bit depth (PCMInputBitDepthFlag cannot be used when InputBitDepth or MSBExtendedBitDepth > InternalBitDepth)");
+    }
+    xConfirmPara(  m_uiPCMLog2MinSize < 3,                                      "PCMLog2MinSize must be 3 or greater.");
+    xConfirmPara(  m_uiPCMLog2MinSize > 5,                                      "PCMLog2MinSize must be 5 or smaller.");
+    xConfirmPara(  m_pcmLog2MaxSize > 5,                                        "PCMLog2MaxSize must be 5 or smaller.");
+    xConfirmPara(  m_pcmLog2MaxSize < m_uiPCMLog2MinSize,                       "PCMLog2MaxSize must be equal to or greater than m_uiPCMLog2MinSize.");
+  }
+
+  xConfirmPara( m_sliceMode < 0 || m_sliceMode > 3, "SliceMode exceeds supported range (0 to 3)" );
+  if (m_sliceMode!=0)
+  {
+    xConfirmPara( m_sliceArgument < 1 ,         "SliceArgument should be larger than or equal to 1" );
+  }
+  xConfirmPara( m_sliceSegmentMode < 0 || m_sliceSegmentMode > 3, "SliceSegmentMode exceeds supported range (0 to 3)" );
+  if (m_sliceSegmentMode!=0)
+  {
+    xConfirmPara( m_sliceSegmentArgument < 1 ,         "SliceSegmentArgument should be larger than or equal to 1" );
+  }
+
+  Bool tileFlag = (m_numTileColumnsMinus1 > 0 || m_numTileRowsMinus1 > 0 );
+  if (m_profile!=Profile::HIGHTHROUGHPUTREXT)
+  {
+    xConfirmPara( tileFlag && m_iWaveFrontSynchro,            "Tile and Wavefront can not be applied together, except in the High Throughput Intra 4:4:4 16 profile");
+  }
+
+  xConfirmPara( m_iSourceWidth  % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Picture width must be an integer multiple of the specified chroma subsampling");
+  xConfirmPara( m_iSourceHeight % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Picture height must be an integer multiple of the specified chroma subsampling");
+
+  xConfirmPara( m_aiPad[0] % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Horizontal padding must be an integer multiple of the specified chroma subsampling");
+  xConfirmPara( m_aiPad[1] % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Vertical padding must be an integer multiple of the specified chroma subsampling");
+
+  xConfirmPara( m_confWinLeft   % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Left conformance window offset must be an integer multiple of the specified chroma subsampling");
+  xConfirmPara( m_confWinRight  % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Right conformance window offset must be an integer multiple of the specified chroma subsampling");
+  xConfirmPara( m_confWinTop    % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Top conformance window offset must be an integer multiple of the specified chroma subsampling");
+  xConfirmPara( m_confWinBottom % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Bottom conformance window offset must be an integer multiple of the specified chroma subsampling");
+
+  xConfirmPara( m_defaultDisplayWindowFlag && !m_vuiParametersPresentFlag, "VUI needs to be enabled for default display window");
+
+  if (m_defaultDisplayWindowFlag)
+  {
+    xConfirmPara( m_defDispWinLeftOffset   % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Left default display window offset must be an integer multiple of the specified chroma subsampling");
+    xConfirmPara( m_defDispWinRightOffset  % TComSPS::getWinUnitX(m_chromaFormatIDC) != 0, "Right default display window offset must be an integer multiple of the specified chroma subsampling");
+    xConfirmPara( m_defDispWinTopOffset    % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Top default display window offset must be an integer multiple of the specified chroma subsampling");
+    xConfirmPara( m_defDispWinBottomOffset % TComSPS::getWinUnitY(m_chromaFormatIDC) != 0, "Bottom default display window offset must be an integer multiple of the specified chroma subsampling");
+  }
+
+  // max CU width and height should be power of 2
+  UInt ui = m_uiMaxCUWidth;
+  while(ui)
+  {
+    ui >>= 1;
+    if( (ui & 1) == 1)
+      xConfirmPara( ui != 1 , "Width should be 2^n");
+  }
+  ui = m_uiMaxCUHeight;
+  while(ui)
+  {
+    ui >>= 1;
+    if( (ui & 1) == 1)
+      xConfirmPara( ui != 1 , "Height should be 2^n");
+  }
+
+  /* if this is an intra-only sequence, ie IntraPeriod=1, don't verify the GOP structure
+   * This permits the ability to omit a GOP structure specification */
+  if (m_iIntraPeriod == 1 && m_GOPList[0].m_POC == -1)
+  {
+    m_GOPList[0] = GOPEntry();
+    m_GOPList[0].m_QPFactor = 1;
+    m_GOPList[0].m_betaOffsetDiv2 = 0;
+    m_GOPList[0].m_tcOffsetDiv2 = 0;
+    m_GOPList[0].m_POC = 1;
+    m_GOPList[0].m_numRefPicsActive = 4;
+  }
+  else
+  {
+    xConfirmPara( m_intraConstraintFlag, "IntraConstraintFlag cannot be 1 for inter sequences");
+  }
+
+  Bool verifiedGOP=false;
+  Bool errorGOP=false;
+  Int checkGOP=1;
+  Int numRefs = m_isField ? 2 : 1;
+  Int refList[MAX_NUM_REF_PICS+1];
+  refList[0]=0;
+  if(m_isField)
+  {
+    refList[1] = 1;
+  }
+  Bool isOK[MAX_GOP];
+  for(Int i=0; i<MAX_GOP; i++)
+  {
+    isOK[i]=false;
+  }
+  Int numOK=0;
+  xConfirmPara( m_iIntraPeriod >=0&&(m_iIntraPeriod%m_iGOPSize!=0), "Intra period must be a multiple of GOPSize, or -1" );
+
+  for(Int i=0; i<m_iGOPSize; i++)
+  {
+    if(m_GOPList[i].m_POC==m_iGOPSize)
+    {
+      xConfirmPara( m_GOPList[i].m_temporalId!=0 , "The last frame in each GOP must have temporal ID = 0 " );
+    }
+  }
+
+  if ( (m_iIntraPeriod != 1) && !m_loopFilterOffsetInPPS && m_DeblockingFilterControlPresent && (!m_bLoopFilterDisable) )
+  {
+    for(Int i=0; i<m_iGOPSize; i++)
+    {
+      xConfirmPara( (m_GOPList[i].m_betaOffsetDiv2 + m_loopFilterBetaOffsetDiv2) < -6 || (m_GOPList[i].m_betaOffsetDiv2 + m_loopFilterBetaOffsetDiv2) > 6, "Loop Filter Beta Offset div. 2 for one of the GOP entries exceeds supported range (-6 to 6)" );
+      xConfirmPara( (m_GOPList[i].m_tcOffsetDiv2 + m_loopFilterTcOffsetDiv2) < -6 || (m_GOPList[i].m_tcOffsetDiv2 + m_loopFilterTcOffsetDiv2) > 6, "Loop Filter Tc Offset div. 2 for one of the GOP entries exceeds supported range (-6 to 6)" );
+    }
+  }
+
+  m_extraRPSs=0;
+  //start looping through frames in coding order until we can verify that the GOP structure is correct.
+  while(!verifiedGOP&&!errorGOP)
+  {
+    Int curGOP = (checkGOP-1)%m_iGOPSize;
+    Int curPOC = ((checkGOP-1)/m_iGOPSize)*m_iGOPSize + m_GOPList[curGOP].m_POC;
+    if(m_GOPList[curGOP].m_POC<0)
+    {
+      printf("\nError: found fewer Reference Picture Sets than GOPSize\n");
+      errorGOP=true;
+    }
+    else
+    {
+      //check that all reference pictures are available, or have a POC < 0 meaning they might be available in the next GOP.
+      Bool beforeI = false;
+      for(Int i = 0; i< m_GOPList[curGOP].m_numRefPics; i++)
+      {
+        Int absPOC = curPOC+m_GOPList[curGOP].m_referencePics[i];
+        if(absPOC < 0)
+        {
+          beforeI=true;
+        }
+        else
+        {
+          Bool found=false;
+          for(Int j=0; j<numRefs; j++)
+          {
+            if(refList[j]==absPOC)
+            {
+              found=true;
+              for(Int k=0; k<m_iGOPSize; k++)
+              {
+                if(absPOC%m_iGOPSize == m_GOPList[k].m_POC%m_iGOPSize)
+                {
+                  if(m_GOPList[k].m_temporalId==m_GOPList[curGOP].m_temporalId)
+                  {
+                    m_GOPList[k].m_refPic = true;
+                  }
+                  m_GOPList[curGOP].m_usedByCurrPic[i]=m_GOPList[k].m_temporalId<=m_GOPList[curGOP].m_temporalId;
+                }
+              }
+            }
+          }
+          if(!found)
+          {
+            printf("\nError: ref pic %d is not available for GOP frame %d\n",m_GOPList[curGOP].m_referencePics[i],curGOP+1);
+            errorGOP=true;
+          }
+        }
+      }
+      if(!beforeI&&!errorGOP)
+      {
+        //all ref frames were present
+        if(!isOK[curGOP])
+        {
+          numOK++;
+          isOK[curGOP]=true;
+          if(numOK==m_iGOPSize)
+          {
+            verifiedGOP=true;
+          }
+        }
+      }
+      else
+      {
+        //create a new GOPEntry for this frame containing all the reference pictures that were available (POC > 0)
+        m_GOPList[m_iGOPSize+m_extraRPSs]=m_GOPList[curGOP];
+        Int newRefs=0;
+        for(Int i = 0; i< m_GOPList[curGOP].m_numRefPics; i++)
+        {
+          Int absPOC = curPOC+m_GOPList[curGOP].m_referencePics[i];
+          if(absPOC>=0)
+          {
+            m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[newRefs]=m_GOPList[curGOP].m_referencePics[i];
+            m_GOPList[m_iGOPSize+m_extraRPSs].m_usedByCurrPic[newRefs]=m_GOPList[curGOP].m_usedByCurrPic[i];
+            newRefs++;
+          }
+        }
+        Int numPrefRefs = m_GOPList[curGOP].m_numRefPicsActive;
+
+        for(Int offset = -1; offset>-checkGOP; offset--)
+        {
+          //step backwards in coding order and include any extra available pictures we might find useful to replace the ones with POC < 0.
+          Int offGOP = (checkGOP-1+offset)%m_iGOPSize;
+          Int offPOC = ((checkGOP-1+offset)/m_iGOPSize)*m_iGOPSize + m_GOPList[offGOP].m_POC;
+          if(offPOC>=0&&m_GOPList[offGOP].m_temporalId<=m_GOPList[curGOP].m_temporalId)
+          {
+            Bool newRef=false;
+            for(Int i=0; i<numRefs; i++)
+            {
+              if(refList[i]==offPOC)
+              {
+                newRef=true;
+              }
+            }
+            for(Int i=0; i<newRefs; i++)
+            {
+              if(m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[i]==offPOC-curPOC)
+              {
+                newRef=false;
+              }
+            }
+            if(newRef)
+            {
+              Int insertPoint=newRefs;
+              //this picture can be added, find appropriate place in list and insert it.
+              if(m_GOPList[offGOP].m_temporalId==m_GOPList[curGOP].m_temporalId)
+              {
+                m_GOPList[offGOP].m_refPic = true;
+              }
+              for(Int j=0; j<newRefs; j++)
+              {
+                if(m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[j]<offPOC-curPOC||m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[j]>0)
+                {
+                  insertPoint = j;
+                  break;
+                }
+              }
+              Int prev = offPOC-curPOC;
+              Int prevUsed = m_GOPList[offGOP].m_temporalId<=m_GOPList[curGOP].m_temporalId;
+              for(Int j=insertPoint; j<newRefs+1; j++)
+              {
+                Int newPrev = m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[j];
+                Int newUsed = m_GOPList[m_iGOPSize+m_extraRPSs].m_usedByCurrPic[j];
+                m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[j]=prev;
+                m_GOPList[m_iGOPSize+m_extraRPSs].m_usedByCurrPic[j]=prevUsed;
+                prevUsed=newUsed;
+                prev=newPrev;
+              }
+              newRefs++;
+            }
+          }
+          if(newRefs>=numPrefRefs)
+          {
+            break;
+          }
+        }
+        m_GOPList[m_iGOPSize+m_extraRPSs].m_numRefPics=newRefs;
+        m_GOPList[m_iGOPSize+m_extraRPSs].m_POC = curPOC;
+        if (m_extraRPSs == 0)
+        {
+          m_GOPList[m_iGOPSize+m_extraRPSs].m_interRPSPrediction = 0;
+          m_GOPList[m_iGOPSize+m_extraRPSs].m_numRefIdc = 0;
+        }
+        else
+        {
+          Int rIdx =  m_iGOPSize + m_extraRPSs - 1;
+          Int refPOC = m_GOPList[rIdx].m_POC;
+          Int refPics = m_GOPList[rIdx].m_numRefPics;
+          Int newIdc=0;
+          for(Int i = 0; i<= refPics; i++)
+          {
+            Int deltaPOC = ((i != refPics)? m_GOPList[rIdx].m_referencePics[i] : 0);  // check if the reference abs POC is >= 0
+            Int absPOCref = refPOC+deltaPOC;
+            Int refIdc = 0;
+            for (Int j = 0; j < m_GOPList[m_iGOPSize+m_extraRPSs].m_numRefPics; j++)
+            {
+              if ( (absPOCref - curPOC) == m_GOPList[m_iGOPSize+m_extraRPSs].m_referencePics[j])
+              {
+                if (m_GOPList[m_iGOPSize+m_extraRPSs].m_usedByCurrPic[j])
+                {
+                  refIdc = 1;
+                }
+                else
+                {
+                  refIdc = 2;
+                }
+              }
+            }
+            m_GOPList[m_iGOPSize+m_extraRPSs].m_refIdc[newIdc]=refIdc;
+            newIdc++;
+          }
+          m_GOPList[m_iGOPSize+m_extraRPSs].m_interRPSPrediction = 1;
+          m_GOPList[m_iGOPSize+m_extraRPSs].m_numRefIdc = newIdc;
+          m_GOPList[m_iGOPSize+m_extraRPSs].m_deltaRPS = refPOC - m_GOPList[m_iGOPSize+m_extraRPSs].m_POC;
+        }
+        curGOP=m_iGOPSize+m_extraRPSs;
+        m_extraRPSs++;
+      }
+      numRefs=0;
+      for(Int i = 0; i< m_GOPList[curGOP].m_numRefPics; i++)
+      {
+        Int absPOC = curPOC+m_GOPList[curGOP].m_referencePics[i];
+        if(absPOC >= 0)
+        {
+          refList[numRefs]=absPOC;
+          numRefs++;
+        }
+      }
+      refList[numRefs]=curPOC;
+      numRefs++;
+    }
+    checkGOP++;
+  }
+  xConfirmPara(errorGOP,"Invalid GOP structure given");
+  m_maxTempLayer = 1;
+  for(Int i=0; i<m_iGOPSize; i++)
+  {
+    if(m_GOPList[i].m_temporalId >= m_maxTempLayer)
+    {
+      m_maxTempLayer = m_GOPList[i].m_temporalId+1;
+    }
+    xConfirmPara(m_GOPList[i].m_sliceType!='B' && m_GOPList[i].m_sliceType!='P' && m_GOPList[i].m_sliceType!='I', "Slice type must be equal to B or P or I");
+  }
+  for(Int i=0; i<MAX_TLAYER; i++)
+  {
+    m_numReorderPics[i] = 0;
+    m_maxDecPicBuffering[i] = 1;
+  }
+  for(Int i=0; i<m_iGOPSize; i++)
+  {
+    if(m_GOPList[i].m_numRefPics+1 > m_maxDecPicBuffering[m_GOPList[i].m_temporalId])
+    {
+      m_maxDecPicBuffering[m_GOPList[i].m_temporalId] = m_GOPList[i].m_numRefPics + 1;
+    }
+    Int highestDecodingNumberWithLowerPOC = 0;
+    for(Int j=0; j<m_iGOPSize; j++)
+    {
+      if(m_GOPList[j].m_POC <= m_GOPList[i].m_POC)
+      {
+        highestDecodingNumberWithLowerPOC = j;
+      }
+    }
+    Int numReorder = 0;
+    for(Int j=0; j<highestDecodingNumberWithLowerPOC; j++)
+    {
+      if(m_GOPList[j].m_temporalId <= m_GOPList[i].m_temporalId &&
+        m_GOPList[j].m_POC > m_GOPList[i].m_POC)
+      {
+        numReorder++;
+      }
+    }
+    if(numReorder > m_numReorderPics[m_GOPList[i].m_temporalId])
+    {
+      m_numReorderPics[m_GOPList[i].m_temporalId] = numReorder;
+    }
+  }
+  for(Int i=0; i<MAX_TLAYER-1; i++)
+  {
+    // a lower layer can not have higher value of m_numReorderPics than a higher layer
+    if(m_numReorderPics[i+1] < m_numReorderPics[i])
+    {
+      m_numReorderPics[i+1] = m_numReorderPics[i];
+    }
+    // the value of num_reorder_pics[ i ] shall be in the range of 0 to max_dec_pic_buffering[ i ] - 1, inclusive
+    if(m_numReorderPics[i] > m_maxDecPicBuffering[i] - 1)
+    {
+      m_maxDecPicBuffering[i] = m_numReorderPics[i] + 1;
+    }
+    // a lower layer can not have higher value of m_uiMaxDecPicBuffering than a higher layer
+    if(m_maxDecPicBuffering[i+1] < m_maxDecPicBuffering[i])
+    {
+      m_maxDecPicBuffering[i+1] = m_maxDecPicBuffering[i];
+    }
+  }
+
+  // the value of num_reorder_pics[ i ] shall be in the range of 0 to max_dec_pic_buffering[ i ] -  1, inclusive
+  if(m_numReorderPics[MAX_TLAYER-1] > m_maxDecPicBuffering[MAX_TLAYER-1] - 1)
+  {
+    m_maxDecPicBuffering[MAX_TLAYER-1] = m_numReorderPics[MAX_TLAYER-1] + 1;
+  }
+
+  if(m_vuiParametersPresentFlag && m_bitstreamRestrictionFlag)
+  {
+    Int PicSizeInSamplesY =  m_iSourceWidth * m_iSourceHeight;
+    if(tileFlag)
+    {
+      Int maxTileWidth = 0;
+      Int maxTileHeight = 0;
+      Int widthInCU = (m_iSourceWidth % m_uiMaxCUWidth) ? m_iSourceWidth/m_uiMaxCUWidth + 1: m_iSourceWidth/m_uiMaxCUWidth;
+      Int heightInCU = (m_iSourceHeight % m_uiMaxCUHeight) ? m_iSourceHeight/m_uiMaxCUHeight + 1: m_iSourceHeight/m_uiMaxCUHeight;
+      if(m_tileUniformSpacingFlag)
+      {
+        maxTileWidth = m_uiMaxCUWidth*((widthInCU+m_numTileColumnsMinus1)/(m_numTileColumnsMinus1+1));
+        maxTileHeight = m_uiMaxCUHeight*((heightInCU+m_numTileRowsMinus1)/(m_numTileRowsMinus1+1));
+        // if only the last tile-row is one treeblock higher than the others
+        // the maxTileHeight becomes smaller if the last row of treeblocks has lower height than the others
+        if(!((heightInCU-1)%(m_numTileRowsMinus1+1)))
+        {
+          maxTileHeight = maxTileHeight - m_uiMaxCUHeight + (m_iSourceHeight % m_uiMaxCUHeight);
+        }
+        // if only the last tile-column is one treeblock wider than the others
+        // the maxTileWidth becomes smaller if the last column of treeblocks has lower width than the others
+        if(!((widthInCU-1)%(m_numTileColumnsMinus1+1)))
+        {
+          maxTileWidth = maxTileWidth - m_uiMaxCUWidth + (m_iSourceWidth % m_uiMaxCUWidth);
+        }
+      }
+      else // not uniform spacing
+      {
+        if(m_numTileColumnsMinus1<1)
+        {
+          maxTileWidth = m_iSourceWidth;
+        }
+        else
+        {
+          Int accColumnWidth = 0;
+          for(Int col=0; col<(m_numTileColumnsMinus1); col++)
+          {
+            maxTileWidth = m_tileColumnWidth[col]>maxTileWidth ? m_tileColumnWidth[col]:maxTileWidth;
+            accColumnWidth += m_tileColumnWidth[col];
+          }
+          maxTileWidth = (widthInCU-accColumnWidth)>maxTileWidth ? m_uiMaxCUWidth*(widthInCU-accColumnWidth):m_uiMaxCUWidth*maxTileWidth;
+        }
+        if(m_numTileRowsMinus1<1)
+        {
+          maxTileHeight = m_iSourceHeight;
+        }
+        else
+        {
+          Int accRowHeight = 0;
+          for(Int row=0; row<(m_numTileRowsMinus1); row++)
+          {
+            maxTileHeight = m_tileRowHeight[row]>maxTileHeight ? m_tileRowHeight[row]:maxTileHeight;
+            accRowHeight += m_tileRowHeight[row];
+          }
+          maxTileHeight = (heightInCU-accRowHeight)>maxTileHeight ? m_uiMaxCUHeight*(heightInCU-accRowHeight):m_uiMaxCUHeight*maxTileHeight;
+        }
+      }
+      Int maxSizeInSamplesY = maxTileWidth*maxTileHeight;
+      m_minSpatialSegmentationIdc = 4*PicSizeInSamplesY/maxSizeInSamplesY-4;
+    }
+    else if(m_iWaveFrontSynchro)
+    {
+      m_minSpatialSegmentationIdc = 4*PicSizeInSamplesY/((2*m_iSourceHeight+m_iSourceWidth)*m_uiMaxCUHeight)-4;
+    }
+    else if(m_sliceMode == FIXED_NUMBER_OF_CTU)
+    {
+      m_minSpatialSegmentationIdc = 4*PicSizeInSamplesY/(m_sliceArgument*m_uiMaxCUWidth*m_uiMaxCUHeight)-4;
+    }
+    else
+    {
+      m_minSpatialSegmentationIdc = 0;
+    }
+  }
+
+  xConfirmPara( m_iWaveFrontSynchro < 0, "WaveFrontSynchro cannot be negative" );
+  xConfirmPara( m_iWaveFrontSubstreams <= 0, "WaveFrontSubstreams must be positive" );
+  xConfirmPara( m_iWaveFrontSubstreams > 1 && !m_iWaveFrontSynchro, "Must have WaveFrontSynchro > 0 in order to have WaveFrontSubstreams > 1" );
+
+  xConfirmPara( m_decodedPictureHashSEIEnabled<0 || m_decodedPictureHashSEIEnabled>3, "this hash type is not correct!\n");
+
+  if (m_toneMappingInfoSEIEnabled)
+  {
+    xConfirmPara( m_toneMapCodedDataBitDepth < 8 || m_toneMapCodedDataBitDepth > 14 , "SEIToneMapCodedDataBitDepth must be in rage 8 to 14");
+    xConfirmPara( m_toneMapTargetBitDepth < 1 || (m_toneMapTargetBitDepth > 16 && m_toneMapTargetBitDepth < 255) , "SEIToneMapTargetBitDepth must be in rage 1 to 16 or equal to 255");
+    xConfirmPara( m_toneMapModelId < 0 || m_toneMapModelId > 4 , "SEIToneMapModelId must be in rage 0 to 4");
+    xConfirmPara( m_cameraIsoSpeedValue == 0, "SEIToneMapCameraIsoSpeedValue shall not be equal to 0");
+    xConfirmPara( m_exposureIndexValue  == 0, "SEIToneMapExposureIndexValue shall not be equal to 0");
+    xConfirmPara( m_extendedRangeWhiteLevel < 100, "SEIToneMapExtendedRangeWhiteLevel should be greater than or equal to 100");
+    xConfirmPara( m_nominalBlackLevelLumaCodeValue >= m_nominalWhiteLevelLumaCodeValue, "SEIToneMapNominalWhiteLevelLumaCodeValue shall be greater than SEIToneMapNominalBlackLevelLumaCodeValue");
+    xConfirmPara( m_extendedWhiteLevelLumaCodeValue < m_nominalWhiteLevelLumaCodeValue, "SEIToneMapExtendedWhiteLevelLumaCodeValue shall be greater than or equal to SEIToneMapNominalWhiteLevelLumaCodeValue");
+  }
+
+  if (m_kneeSEIEnabled && !m_kneeSEICancelFlag)
+  {
+    xConfirmPara( m_kneeSEINumKneePointsMinus1 < 0 || m_kneeSEINumKneePointsMinus1 > 998, "SEIKneeFunctionNumKneePointsMinus1 must be in the range of 0 to 998");
+    for ( UInt i=0; i<=m_kneeSEINumKneePointsMinus1; i++ ){
+      xConfirmPara( m_kneeSEIInputKneePoint[i] < 1 || m_kneeSEIInputKneePoint[i] > 999, "SEIKneeFunctionInputKneePointValue must be in the range of 1 to 999");
+      xConfirmPara( m_kneeSEIOutputKneePoint[i] < 0 || m_kneeSEIOutputKneePoint[i] > 1000, "SEIKneeFunctionInputKneePointValue must be in the range of 0 to 1000");
+      if ( i > 0 )
+      {
+        xConfirmPara( m_kneeSEIInputKneePoint[i-1] >= m_kneeSEIInputKneePoint[i],  "The i-th SEIKneeFunctionInputKneePointValue must be greater than the (i-1)-th value");
+        xConfirmPara( m_kneeSEIOutputKneePoint[i-1] > m_kneeSEIOutputKneePoint[i],  "The i-th SEIKneeFunctionOutputKneePointValue must be greater than or equal to the (i-1)-th value");
+      }
+    }
+  }
+
+  if ( m_RCEnableRateControl )
+  {
+    if ( m_RCForceIntraQP )
+    {
+      if ( m_RCInitialQP == 0 )
+      {
+        printf( "\nInitial QP for rate control is not specified. Reset not to use force intra QP!" );
+        m_RCForceIntraQP = false;
+      }
+    }
+    xConfirmPara( m_uiDeltaQpRD > 0, "Rate control cannot be used together with slice level multiple-QP optimization!\n" );
+  }
+
+  xConfirmPara(!m_TransquantBypassEnableFlag && m_CUTransquantBypassFlagForce, "CUTransquantBypassFlagForce cannot be 1 when TransquantBypassEnableFlag is 0");
+
+  xConfirmPara(m_log2ParallelMergeLevel < 2, "Log2ParallelMergeLevel should be larger than or equal to 2");
+
+  if (m_framePackingSEIEnabled)
+  {
+    xConfirmPara(m_framePackingSEIType < 3 || m_framePackingSEIType > 5 , "SEIFramePackingType must be in rage 3 to 5");
+  }
+
+  if (m_segmentedRectFramePackingSEIEnabled)
+  {
+    xConfirmPara(m_framePackingSEIEnabled > 0 , "SEISegmentedRectFramePacking must be 0 when SEIFramePacking is 1");
+  }
+
+  if((m_numTileColumnsMinus1 <= 0) && (m_numTileRowsMinus1 <= 0) && m_tmctsSEIEnabled)
+  {
+    printf("SEITempMotionConstrainedTileSets is set to false to disable 'temporal_motion_constrained_tile_sets' SEI because there are no tiles enabled\n");
+    m_tmctsSEIEnabled = false;
+  }
+
+  if(m_timeCodeSEIEnabled)
+  {
+    xConfirmPara(m_timeCodeSEINumTs > MAX_TIMECODE_SEI_SETS, "Number of time sets cannot exceed 3");
+  }
+
+#undef xConfirmPara
+  if (check_failed)
+  {
+    exit(EXIT_FAILURE);
+  }
+}
+
+/** \todo use of global variables should be removed later
+ */
+Void TAppEncCfg::xSetGlobal()
+{
+  // set max CU width & height
+  g_uiMaxCUWidth  = m_uiMaxCUWidth;
+  g_uiMaxCUHeight = m_uiMaxCUHeight;
+
+  // compute actual CU depth with respect to config depth and max transform size
+  g_uiAddCUDepth  = 0;
+  while( (m_uiMaxCUWidth>>m_uiMaxCUDepth) > ( 1 << ( m_uiQuadtreeTULog2MinSize + g_uiAddCUDepth )  ) ) g_uiAddCUDepth++;
+
+  g_uiAddCUDepth+=getMaxCUDepthOffset(m_chromaFormatIDC, m_uiQuadtreeTULog2MinSize); // if minimum TU larger than 4x4, allow for additional part indices for 4:2:2 SubTUs.
+
+  m_uiMaxCUDepth += g_uiAddCUDepth;
+  g_uiAddCUDepth++;
+  g_uiMaxCUDepth = m_uiMaxCUDepth;
+
+  // set internal bit-depth and constants
+  for (UInt channelType = 0; channelType < MAX_NUM_CHANNEL_TYPE; channelType++)
+  {
+#if O0043_BEST_EFFORT_DECODING
+    g_bitDepthInStream[channelType] = g_bitDepth[channelType] = m_internalBitDepth[channelType];
+#else
+    g_bitDepth   [channelType] = m_internalBitDepth[channelType];
+#endif
+    g_PCMBitDepth[channelType] = m_bPCMInputBitDepthFlag ? m_MSBExtendedBitDepth[channelType] : m_internalBitDepth[channelType];
+
+    if (m_useExtendedPrecision) g_maxTrDynamicRange[channelType] = std::max<Int>(15, (g_bitDepth[channelType] + 6));
+    else                        g_maxTrDynamicRange[channelType] = 15;
+  }
+}
+
+const Char *profileToString(const Profile::Name profile)
+{
+  static const UInt numberOfProfiles = sizeof(strToProfile)/sizeof(*strToProfile);
+
+  for (UInt profileIndex = 0; profileIndex < numberOfProfiles; profileIndex++)
+  {
+    if (strToProfile[profileIndex].value == profile) return strToProfile[profileIndex].str;
+  }
+
+  //if we get here, we didn't find this profile in the list - so there is an error
+  std::cerr << "ERROR: Unknown profile \"" << profile << "\" in profileToString" << std::endl;
+  assert(false);
+  exit(1);
+  return "";
+}
+
+Void TAppEncCfg::xPrintParameter()
+{
+  printf("\n");
+  printf("Input          File               : %s\n", m_pchInputFile          );
+  printf("Bitstream      File               : %s\n", m_pchBitstreamFile      );
+  printf("Reconstruction File               : %s\n", m_pchReconFile          );
+  printf("Real     Format                   : %dx%d %dHz\n", m_iSourceWidth - m_confWinLeft - m_confWinRight, m_iSourceHeight - m_confWinTop - m_confWinBottom, m_iFrameRate );
+  printf("Internal Format                   : %dx%d %dHz\n", m_iSourceWidth, m_iSourceHeight, m_iFrameRate );
+  printf("Sequence PSNR output              : %s\n", (m_printMSEBasedSequencePSNR ? "Linear average, MSE-based" : "Linear average only") );
+  printf("Sequence MSE output               : %s\n", (m_printSequenceMSE ? "Enabled" : "Disabled") );
+  printf("Frame MSE output                  : %s\n", (m_printFrameMSE    ? "Enabled" : "Disabled") );
+  printf("Cabac-zero-word-padding           : %s\n", (m_cabacZeroWordPaddingEnabled? "Enabled" : "Disabled") );
+  if (m_isField)
+  {
+    printf("Frame/Field                       : Field based coding\n");
+    printf("Field index                       : %u - %d (%d fields)\n", m_FrameSkip, m_FrameSkip+m_framesToBeEncoded-1, m_framesToBeEncoded );
+    printf("Field Order                       : %s field first\n", m_isTopFieldFirst?"Top":"Bottom");
+
+  }
+  else
+  {
+    printf("Frame/Field                       : Frame based coding\n");
+    printf("Frame index                       : %u - %d (%d frames)\n", m_FrameSkip, m_FrameSkip+m_framesToBeEncoded-1, m_framesToBeEncoded );
+  }
+  if (m_profile == Profile::MAINREXT)
+  {
+    const UInt intraIdx = m_intraConstraintFlag ? 1:0;
+    const UInt bitDepthIdx = (m_bitDepthConstraint == 8 ? 0 : (m_bitDepthConstraint ==10 ? 1 : (m_bitDepthConstraint == 12 ? 2 : (m_bitDepthConstraint == 16 ? 3 : 4 ))));
+    const UInt chromaFormatIdx = UInt(m_chromaFormatConstraint);
+    const ExtendedProfileName validProfileName = (bitDepthIdx > 3 || chromaFormatIdx>3) ? NONE : validRExtProfileNames[intraIdx][bitDepthIdx][chromaFormatIdx];
+    std::string rextSubProfile;
+    if (validProfileName!=NONE) rextSubProfile=enumToString(strToExtendedProfile, sizeof(strToExtendedProfile)/sizeof(*strToExtendedProfile), validProfileName);
+    if (rextSubProfile == "main_444_16") rextSubProfile="main_444_16 [NON STANDARD]";
+    printf("Profile                           : %s (%s)\n", profileToString(m_profile), (rextSubProfile.empty())?"INVALID REXT PROFILE":rextSubProfile.c_str() );
+  }
+  else
+  {
+    printf("Profile                           : %s\n", profileToString(m_profile) );
+  }
+  printf("CU size / depth                   : %d / %d\n", m_uiMaxCUWidth, m_uiMaxCUDepth );
+  printf("RQT trans. size (min / max)       : %d / %d\n", 1 << m_uiQuadtreeTULog2MinSize, 1 << m_uiQuadtreeTULog2MaxSize );
+  printf("Max RQT depth inter               : %d\n", m_uiQuadtreeTUMaxDepthInter);
+  printf("Max RQT depth intra               : %d\n", m_uiQuadtreeTUMaxDepthIntra);
+  printf("Min PCM size                      : %d\n", 1 << m_uiPCMLog2MinSize);
+  printf("Motion search range               : %d\n", m_iSearchRange );
+  printf("Intra period                      : %d\n", m_iIntraPeriod );
+  printf("Decoding refresh type             : %d\n", m_iDecodingRefreshType );
+  printf("QP                                : %5.2f\n", m_fQP );
+  printf("Max dQP signaling depth           : %d\n", m_iMaxCuDQPDepth);
+
+  printf("Cb QP Offset                      : %d\n", m_cbQpOffset   );
+  printf("Cr QP Offset                      : %d\n", m_crQpOffset);
+  printf("Max CU chroma QP adjustment depth : %d\n", m_maxCUChromaQpAdjustmentDepth);
+  printf("QP adaptation                     : %d (range=%d)\n", m_bUseAdaptiveQP, (m_bUseAdaptiveQP ? m_iQPAdaptationRange : 0) );
+  printf("GOP size                          : %d\n", m_iGOPSize );
+  printf("Input bit depth                   : (Y:%d, C:%d)\n", m_inputBitDepth[CHANNEL_TYPE_LUMA], m_inputBitDepth[CHANNEL_TYPE_CHROMA] );
+  printf("MSB-extended bit depth            : (Y:%d, C:%d)\n", m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA], m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA] );
+  printf("Internal bit depth                : (Y:%d, C:%d)\n", m_internalBitDepth[CHANNEL_TYPE_LUMA], m_internalBitDepth[CHANNEL_TYPE_CHROMA] );
+  printf("PCM sample bit depth              : (Y:%d, C:%d)\n", g_PCMBitDepth[CHANNEL_TYPE_LUMA],      g_PCMBitDepth[CHANNEL_TYPE_CHROMA] );
+  printf("Extended precision processing     : %s\n", (m_useExtendedPrecision                   ? "Enabled" : "Disabled") );
+  printf("Intra reference smoothing         : %s\n", (m_enableIntraReferenceSmoothing          ? "Enabled" : "Disabled") );
+  printf("Implicit residual DPCM            : %s\n", (m_useResidualDPCM[RDPCM_SIGNAL_IMPLICIT] ? "Enabled" : "Disabled") );
+  printf("Explicit residual DPCM            : %s\n", (m_useResidualDPCM[RDPCM_SIGNAL_EXPLICIT] ? "Enabled" : "Disabled") );
+  printf("Residual rotation                 : %s\n", (m_useResidualRotation                    ? "Enabled" : "Disabled") );
+  printf("Single significance map context   : %s\n", (m_useSingleSignificanceMapContext        ? "Enabled" : "Disabled") );
+  printf("Cross-component prediction        : %s\n", (m_useCrossComponentPrediction            ? (m_reconBasedCrossCPredictionEstimate ? "Enabled (reconstructed-residual-based estimate)" : "Enabled (encoder-side-residual-based estimate)") : "Disabled") );
+  printf("High-precision prediction weight  : %s\n", (m_useHighPrecisionPredictionWeighting    ? "Enabled" : "Disabled") );
+  printf("Golomb-Rice parameter adaptation  : %s\n", (m_useGolombRiceParameterAdaptation       ? "Enabled" : "Disabled") );
+  printf("CABAC bypass bit alignment        : %s\n", (m_alignCABACBeforeBypass                 ? "Enabled" : "Disabled") );
+  if (m_bUseSAO)
+  {
+    printf("Sao Luma Offset bit shifts        : %d\n", m_saoOffsetBitShift[CHANNEL_TYPE_LUMA]);
+    printf("Sao Chroma Offset bit shifts      : %d\n", m_saoOffsetBitShift[CHANNEL_TYPE_CHROMA]);
+  }
+
+  switch (m_costMode)
+  {
+    case COST_STANDARD_LOSSY:               printf("Cost function:                    : Lossy coding (default)\n"); break;
+    case COST_SEQUENCE_LEVEL_LOSSLESS:      printf("Cost function:                    : Sequence_level_lossless coding\n"); break;
+    case COST_LOSSLESS_CODING:              printf("Cost function:                    : Lossless coding with fixed QP of %d\n", LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP); break;
+    case COST_MIXED_LOSSLESS_LOSSY_CODING:  printf("Cost function:                    : Mixed_lossless_lossy coding with QP'=%d for lossless evaluation\n", LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME); break;
+    default:                                printf("Cost function:                    : Unknown\n"); break;
+  }
+
+  printf("RateControl                       : %d\n", m_RCEnableRateControl );
+
+  if(m_RCEnableRateControl)
+  {
+    printf("TargetBitrate                     : %d\n", m_RCTargetBitrate );
+    printf("KeepHierarchicalBit               : %d\n", m_RCKeepHierarchicalBit );
+    printf("LCULevelRC                        : %d\n", m_RCLCULevelRC );
+    printf("UseLCUSeparateModel               : %d\n", m_RCUseLCUSeparateModel );
+    printf("InitialQP                         : %d\n", m_RCInitialQP );
+    printf("ForceIntraQP                      : %d\n", m_RCForceIntraQP );
+  }
+
+  printf("Max Num Merge Candidates          : %d\n", m_maxNumMergeCand);
+  printf("\n");
+
+  printf("TOOL CFG: ");
+  printf("IBD:%d ", ((g_bitDepth[CHANNEL_TYPE_LUMA] > m_MSBExtendedBitDepth[CHANNEL_TYPE_LUMA]) || (g_bitDepth[CHANNEL_TYPE_CHROMA] > m_MSBExtendedBitDepth[CHANNEL_TYPE_CHROMA])));
+  printf("HAD:%d ", m_bUseHADME           );
+  printf("RDQ:%d ", m_useRDOQ            );
+  printf("RDQTS:%d ", m_useRDOQTS        );
+  printf("RDpenalty:%d ", m_rdPenalty  );
+  printf("SQP:%d ", m_uiDeltaQpRD         );
+  printf("ASR:%d ", m_bUseASR             );
+  printf("FEN:%d ", m_bUseFastEnc         );
+  printf("ECU:%d ", m_bUseEarlyCU         );
+  printf("FDM:%d ", m_useFastDecisionForMerge );
+  printf("CFM:%d ", m_bUseCbfFastMode         );
+  printf("ESD:%d ", m_useEarlySkipDetection  );
+  printf("RQT:%d ", 1     );
+  printf("TransformSkip:%d ",     m_useTransformSkip              );
+  printf("TransformSkipFast:%d ", m_useTransformSkipFast       );
+  printf("TransformSkipLog2MaxSize:%d ", m_transformSkipLog2MaxSize);
+  printf("Slice: M=%d ", m_sliceMode);
+  if (m_sliceMode!=NO_SLICES)
+  {
+    printf("A=%d ", m_sliceArgument);
+  }
+  printf("SliceSegment: M=%d ",m_sliceSegmentMode);
+  if (m_sliceSegmentMode!=NO_SLICES)
+  {
+    printf("A=%d ", m_sliceSegmentArgument);
+  }
+  printf("CIP:%d ", m_bUseConstrainedIntraPred);
+  printf("SAO:%d ", (m_bUseSAO)?(1):(0));
+  printf("PCM:%d ", (m_usePCM && (1<<m_uiPCMLog2MinSize) <= m_uiMaxCUWidth)? 1 : 0);
+
+  if (m_TransquantBypassEnableFlag && m_CUTransquantBypassFlagForce)
+  {
+    printf("TransQuantBypassEnabled: =1");
+  }
+  else
+  {
+    printf("TransQuantBypassEnabled:%d ", (m_TransquantBypassEnableFlag)? 1:0 );
+  }
+
+  printf("WPP:%d ", (Int)m_useWeightedPred);
+  printf("WPB:%d ", (Int)m_useWeightedBiPred);
+  printf("PME:%d ", m_log2ParallelMergeLevel);
+  printf(" WaveFrontSynchro:%d WaveFrontSubstreams:%d",
+          m_iWaveFrontSynchro, m_iWaveFrontSubstreams);
+  printf(" ScalingList:%d ", m_useScalingListId );
+  printf("TMVPMode:%d ", m_TMVPModeId     );
+#if ADAPTIVE_QP_SELECTION
+  printf("AQpS:%d", m_bUseAdaptQpSelect   );
+#endif
+
+  printf(" SignBitHidingFlag:%d ", m_signHideFlag);
+  printf("RecalQP:%d", m_recalculateQPAccordingToLambda ? 1 : 0 );
+
+  printf("\n\n");
+
+  fflush(stdout);
+}
+
+Bool confirmPara(Bool bflag, const Char* message)
+{
+  if (!bflag)
+    return false;
+
+  printf("Error: %s\n",message);
+  return true;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TAppEncCfg.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,374 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TAppEncCfg.h
+    \brief    Handle encoder configuration parameters (header)
+*/
+
+#ifndef __TAPPENCCFG__
+#define __TAPPENCCFG__
+
+#include "TLibCommon/CommonDef.h"
+
+#include "TLibEncoder/TEncCfg.h"
+#include <sstream>
+#include <vector>
+//! \ingroup TAppEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// encoder configuration class
+class TAppEncCfg
+{
+protected:
+  // file I/O
+  Char*     m_pchInputFile;                                   ///< source file name
+  Char*     m_pchBitstreamFile;                               ///< output bitstream file
+  Char*     m_pchReconFile;                                   ///< output reconstruction file
+  Double    m_adLambdaModifier[ MAX_TLAYER ];                 ///< Lambda modifier array for each temporal layer
+  // source specification
+  Int       m_iFrameRate;                                     ///< source frame-rates (Hz)
+  UInt      m_FrameSkip;                                   ///< number of skipped frames from the beginning
+  Int       m_iSourceWidth;                                   ///< source width in pixel
+  Int       m_iSourceHeight;                                  ///< source height in pixel (when interlaced = field height)
+
+  Int       m_iSourceHeightOrg;                               ///< original source height in pixel (when interlaced = frame height)
+
+  Bool      m_isField;                                        ///< enable field coding
+  Bool      m_isTopFieldFirst;
+
+  Int       m_conformanceWindowMode;
+  Int       m_confWinLeft;
+  Int       m_confWinRight;
+  Int       m_confWinTop;
+  Int       m_confWinBottom;
+  Int       m_framesToBeEncoded;                              ///< number of encoded frames
+  Int       m_aiPad[2];                                       ///< number of padded pixels for width and height
+  InputColourSpaceConversion m_inputColourSpaceConvert;       ///< colour space conversion to apply to input video
+  Bool      m_snrInternalColourSpace;                       ///< if true, then no colour space conversion is applied for snr calculation, otherwise inverse of input is applied.
+  Bool      m_outputInternalColourSpace;                    ///< if true, then no colour space conversion is applied for reconstructed video, otherwise inverse of input is applied.
+  ChromaFormat m_InputChromaFormatIDC;
+
+  Bool      m_printMSEBasedSequencePSNR;
+  Bool      m_printFrameMSE;
+  Bool      m_printSequenceMSE;
+  Bool      m_cabacZeroWordPaddingEnabled;
+
+  // profile/level
+  Profile::Name m_profile;
+  Level::Tier   m_levelTier;
+  Level::Name   m_level;
+  UInt          m_bitDepthConstraint;
+  ChromaFormat  m_chromaFormatConstraint;
+  Bool          m_intraConstraintFlag;
+  Bool          m_lowerBitRateConstraintFlag;
+  Bool m_progressiveSourceFlag;
+  Bool m_interlacedSourceFlag;
+  Bool m_nonPackedConstraintFlag;
+  Bool m_frameOnlyConstraintFlag;
+
+  // coding structure
+  Int       m_iIntraPeriod;                                   ///< period of I-slice (random access period)
+  Int       m_iDecodingRefreshType;                           ///< random access type
+  Int       m_iGOPSize;                                       ///< GOP size of hierarchical structure
+  Int       m_extraRPSs;                                      ///< extra RPSs added to handle CRA
+  GOPEntry  m_GOPList[MAX_GOP];                               ///< the coding structure entries from the config file
+  Int       m_numReorderPics[MAX_TLAYER];                     ///< total number of reorder pictures
+  Int       m_maxDecPicBuffering[MAX_TLAYER];                 ///< total number of pictures in the decoded picture buffer
+  Bool      m_useCrossComponentPrediction;                    ///< flag enabling the use of cross-component prediction
+  Bool      m_reconBasedCrossCPredictionEstimate;             ///< causes the alpha calculation in encoder search to be based on the decoded residual rather than the pre-transform encoder-side residual
+  UInt      m_saoOffsetBitShift[MAX_NUM_CHANNEL_TYPE];        ///< number of bits for the upward bit shift operation on the decoded SAO offsets
+  Bool      m_useTransformSkip;                               ///< flag for enabling intra transform skipping
+  Bool      m_useTransformSkipFast;                           ///< flag for enabling fast intra transform skipping
+  UInt      m_transformSkipLog2MaxSize;                       ///< transform-skip maximum size (minimum of 2)
+  Bool      m_useResidualRotation;                            ///< control flag for transform-skip/transquant-bypass residual rotation
+  Bool      m_useSingleSignificanceMapContext;                ///< control flag for transform-skip/transquant-bypass single significance map context
+  Bool      m_useResidualDPCM[NUMBER_OF_RDPCM_SIGNALLING_MODES];///< control flags for residual DPCM
+  Bool      m_enableAMP;
+  Bool      m_useGolombRiceParameterAdaptation;               ///< control flag for Golomb-Rice parameter adaptation over each slice
+  Bool      m_alignCABACBeforeBypass;
+
+  // coding quality
+  Double    m_fQP;                                            ///< QP value of key-picture (floating point)
+  Int       m_iQP;                                            ///< QP value of key-picture (integer)
+  Char*     m_pchdQPFile;                                     ///< QP offset for each slice (initialized from external file)
+  Int*      m_aidQP;                                          ///< array of slice QP values
+  Int       m_iMaxDeltaQP;                                    ///< max. |delta QP|
+  UInt      m_uiDeltaQpRD;                                    ///< dQP range for multi-pass slice QP optimization
+  Int       m_iMaxCuDQPDepth;                                 ///< Max. depth for a minimum CuDQPSize (0:default)
+  Int       m_maxCUChromaQpAdjustmentDepth;
+
+  Int       m_cbQpOffset;                                     ///< Chroma Cb QP Offset (0:default)
+  Int       m_crQpOffset;                                     ///< Chroma Cr QP Offset (0:default)
+
+#if ADAPTIVE_QP_SELECTION
+  Bool      m_bUseAdaptQpSelect;
+#endif
+  TComSEIMasteringDisplay m_masteringDisplay;
+
+  Bool      m_bUseAdaptiveQP;                                 ///< Flag for enabling QP adaptation based on a psycho-visual model
+  Int       m_iQPAdaptationRange;                             ///< dQP range by QP adaptation
+
+  Int       m_maxTempLayer;                                  ///< Max temporal layer
+
+  // coding unit (CU) definition
+  // TODO: Remove MaxCUWidth/MaxCUHeight and replace with MaxCUSize.
+  UInt      m_uiMaxCUWidth;                                   ///< max. CU width in pixel
+  UInt      m_uiMaxCUHeight;                                  ///< max. CU height in pixel
+  UInt      m_uiMaxCUDepth;                                   ///< max. CU depth
+
+  // transfom unit (TU) definition
+  UInt      m_uiQuadtreeTULog2MaxSize;
+  UInt      m_uiQuadtreeTULog2MinSize;
+
+  UInt      m_uiQuadtreeTUMaxDepthInter;
+  UInt      m_uiQuadtreeTUMaxDepthIntra;
+
+  // coding tools (bit-depth)
+  Int       m_inputBitDepth   [MAX_NUM_CHANNEL_TYPE];         ///< bit-depth of input file
+  Int       m_outputBitDepth  [MAX_NUM_CHANNEL_TYPE];         ///< bit-depth of output file
+  Int       m_MSBExtendedBitDepth[MAX_NUM_CHANNEL_TYPE];      ///< bit-depth of input samples after MSB extension
+  Int       m_internalBitDepth[MAX_NUM_CHANNEL_TYPE];         ///< bit-depth codec operates at (input/output files will be converted)
+  Bool      m_useExtendedPrecision;
+  Bool      m_useHighPrecisionPredictionWeighting;
+
+  //coding tools (chroma format)
+  ChromaFormat m_chromaFormatIDC;
+
+  // coding tools (PCM bit-depth)
+  Bool      m_bPCMInputBitDepthFlag;                          ///< 0: PCM bit-depth is internal bit-depth. 1: PCM bit-depth is input bit-depth.
+
+  // coding tool (SAO)
+  Bool      m_bUseSAO;
+  Int       m_maxNumOffsetsPerPic;                            ///< SAO maximun number of offset per picture
+  Bool      m_saoCtuBoundary;                                 ///< SAO parameter estimation using non-deblocked pixels for CTU bottom and right boundary areas
+  // coding tools (loop filter)
+  Bool      m_bLoopFilterDisable;                             ///< flag for using deblocking filter
+  Bool      m_loopFilterOffsetInPPS;                         ///< offset for deblocking filter in 0 = slice header, 1 = PPS
+  Int       m_loopFilterBetaOffsetDiv2;                     ///< beta offset for deblocking filter
+  Int       m_loopFilterTcOffsetDiv2;                       ///< tc offset for deblocking filter
+  Bool      m_DeblockingFilterControlPresent;                 ///< deblocking filter control present flag in PPS
+  Bool      m_DeblockingFilterMetric;                         ///< blockiness metric in encoder
+
+  // coding tools (PCM)
+  Bool      m_usePCM;                                         ///< flag for using IPCM
+  UInt      m_pcmLog2MaxSize;                                 ///< log2 of maximum PCM block size
+  UInt      m_uiPCMLog2MinSize;                               ///< log2 of minimum PCM block size
+  Bool      m_bPCMFilterDisableFlag;                          ///< PCM filter disable flag
+  Bool      m_enableIntraReferenceSmoothing;                  ///< flag for enabling(default)/disabling intra reference smoothing/filtering
+
+  // coding tools (encoder-only parameters)
+  Bool      m_bUseASR;                                        ///< flag for using adaptive motion search range
+  Bool      m_bUseHADME;                                      ///< flag for using HAD in sub-pel ME
+  Bool      m_useRDOQ;                                       ///< flag for using RD optimized quantization
+  Bool      m_useRDOQTS;                                     ///< flag for using RD optimized quantization for transform skip
+  Int       m_rdPenalty;                                      ///< RD-penalty for 32x32 TU for intra in non-intra slices (0: no RD-penalty, 1: RD-penalty, 2: maximum RD-penalty)
+  Int       m_iFastSearch;                                    ///< ME mode, 0 = full, 1 = diamond, 2 = PMVFAST
+  Int       m_iSearchRange;                                   ///< ME search range
+  Int       m_bipredSearchRange;                              ///< ME search range for bipred refinement
+  Bool      m_bUseFastEnc;                                    ///< flag for using fast encoder setting
+  Bool      m_bUseEarlyCU;                                    ///< flag for using Early CU setting
+  Bool      m_useFastDecisionForMerge;                        ///< flag for using Fast Decision Merge RD-Cost
+  Bool      m_bUseCbfFastMode;                              ///< flag for using Cbf Fast PU Mode Decision
+  Bool      m_useEarlySkipDetection;                         ///< flag for using Early SKIP Detection
+  Int       m_sliceMode;                                     ///< 0: no slice limits, 1 : max number of CTBs per slice, 2: max number of bytes per slice,
+                                                             ///< 3: max number of tiles per slice
+  Int       m_sliceArgument;                                 ///< argument according to selected slice mode
+  Int       m_sliceSegmentMode;                              ///< 0: no slice segment limits, 1 : max number of CTBs per slice segment, 2: max number of bytes per slice segment,
+                                                             ///< 3: max number of tiles per slice segment
+  Int       m_sliceSegmentArgument;                          ///< argument according to selected slice segment mode
+
+  Bool      m_bLFCrossSliceBoundaryFlag;  ///< 1: filter across slice boundaries 0: do not filter across slice boundaries
+  Bool      m_bLFCrossTileBoundaryFlag;   ///< 1: filter across tile boundaries  0: do not filter across tile boundaries
+  Bool      m_tileUniformSpacingFlag;
+  Int       m_numTileColumnsMinus1;
+  Int       m_numTileRowsMinus1;
+  std::vector<Int> m_tileColumnWidth;
+  std::vector<Int> m_tileRowHeight;
+  Int       m_iWaveFrontSynchro; //< 0: no WPP. >= 1: WPP is enabled, the "Top right" from which inheritance occurs is this LCU offset in the line above the current.
+  Int       m_iWaveFrontFlush; //< enable(1)/disable(0) the CABAC flush at the end of each line of LCUs.
+  Int       m_iWaveFrontSubstreams; //< If iWaveFrontSynchro, this is the number of substreams per frame (dependent tiles) or per tile (independent tiles).
+
+  Bool      m_bUseConstrainedIntraPred;                       ///< flag for using constrained intra prediction
+
+  Int       m_decodedPictureHashSEIEnabled;                    ///< Checksum(3)/CRC(2)/MD5(1)/disable(0) acting on decoded picture hash SEI message
+  Int       m_recoveryPointSEIEnabled;
+  Int       m_bufferingPeriodSEIEnabled;
+  Int       m_pictureTimingSEIEnabled;
+  Bool      m_toneMappingInfoSEIEnabled;
+  Bool      m_chromaSamplingFilterSEIenabled;
+  Int       m_chromaSamplingHorFilterIdc;
+  Int       m_chromaSamplingVerFilterIdc;
+  Int       m_toneMapId;
+  Bool      m_toneMapCancelFlag;
+  Bool      m_toneMapPersistenceFlag;
+  Int       m_toneMapCodedDataBitDepth;
+  Int       m_toneMapTargetBitDepth;
+  Int       m_toneMapModelId;
+  Int       m_toneMapMinValue;
+  Int       m_toneMapMaxValue;
+  Int       m_sigmoidMidpoint;
+  Int       m_sigmoidWidth;
+  Int       m_numPivots;
+  Int       m_cameraIsoSpeedIdc;
+  Int       m_cameraIsoSpeedValue;
+  Int       m_exposureIndexIdc;
+  Int       m_exposureIndexValue;
+  Int       m_exposureCompensationValueSignFlag;
+  Int       m_exposureCompensationValueNumerator;
+  Int       m_exposureCompensationValueDenomIdc;
+  Int       m_refScreenLuminanceWhite;
+  Int       m_extendedRangeWhiteLevel;
+  Int       m_nominalBlackLevelLumaCodeValue;
+  Int       m_nominalWhiteLevelLumaCodeValue;
+  Int       m_extendedWhiteLevelLumaCodeValue;
+  Int*      m_startOfCodedInterval;
+  Int*      m_codedPivotValue;
+  Int*      m_targetPivotValue;
+  Int       m_framePackingSEIEnabled;
+  Int       m_framePackingSEIType;
+  Int       m_framePackingSEIId;
+  Int       m_framePackingSEIQuincunx;
+  Int       m_framePackingSEIInterpretation;
+  Int       m_segmentedRectFramePackingSEIEnabled;
+  Bool      m_segmentedRectFramePackingSEICancel;
+  Int       m_segmentedRectFramePackingSEIType;
+  Bool      m_segmentedRectFramePackingSEIPersistence;
+  Int       m_displayOrientationSEIAngle;
+  Int       m_temporalLevel0IndexSEIEnabled;
+  Int       m_gradualDecodingRefreshInfoEnabled;
+  Int       m_noDisplaySEITLayer;
+  Int       m_decodingUnitInfoSEIEnabled;
+  Int       m_SOPDescriptionSEIEnabled;
+  Int       m_scalableNestingSEIEnabled;
+  Bool      m_tmctsSEIEnabled;
+  Bool      m_timeCodeSEIEnabled;
+  Int       m_timeCodeSEINumTs;
+  TComSEITimeSet m_timeSetArray[MAX_TIMECODE_SEI_SETS];
+  Bool      m_kneeSEIEnabled;
+  Int       m_kneeSEIId;
+  Bool      m_kneeSEICancelFlag;
+  Bool      m_kneeSEIPersistenceFlag;
+  Int       m_kneeSEIInputDrange;
+  Int       m_kneeSEIInputDispLuminance;
+  Int       m_kneeSEIOutputDrange;
+  Int       m_kneeSEIOutputDispLuminance;
+  Int       m_kneeSEINumKneePointsMinus1;
+  Int*      m_kneeSEIInputKneePoint;
+  Int*      m_kneeSEIOutputKneePoint;
+  // weighted prediction
+  Bool      m_useWeightedPred;                    ///< Use of weighted prediction in P slices
+  Bool      m_useWeightedBiPred;                  ///< Use of bi-directional weighted prediction in B slices
+
+  UInt      m_log2ParallelMergeLevel;                         ///< Parallel merge estimation region
+  UInt      m_maxNumMergeCand;                                ///< Max number of merge candidates
+
+  Int       m_TMVPModeId;
+  Int       m_signHideFlag;
+  Bool      m_RCEnableRateControl;                ///< enable rate control or not
+  Int       m_RCTargetBitrate;                    ///< target bitrate when rate control is enabled
+  Int       m_RCKeepHierarchicalBit;              ///< 0: equal bit allocation; 1: fixed ratio bit allocation; 2: adaptive ratio bit allocation
+  Bool      m_RCLCULevelRC;                       ///< true: LCU level rate control; false: picture level rate control NOTE: code-tidy - rename to m_RCCtuLevelRC
+  Bool      m_RCUseLCUSeparateModel;              ///< use separate R-lambda model at LCU level                        NOTE: code-tidy - rename to m_RCUseCtuSeparateModel
+  Int       m_RCInitialQP;                        ///< inital QP for rate control
+  Bool      m_RCForceIntraQP;                     ///< force all intra picture to use initial QP or not
+  ScalingListMode m_useScalingListId;                         ///< using quantization matrix
+  Char*     m_scalingListFile;                                ///< quantization matrix file name
+
+  Bool      m_TransquantBypassEnableFlag;                     ///< transquant_bypass_enable_flag setting in PPS.
+  Bool      m_CUTransquantBypassFlagForce;                    ///< if transquant_bypass_enable_flag, then, if true, all CU transquant bypass flags will be set to true.
+  CostMode  m_costMode;                                       ///< Cost mode to use
+
+  Bool      m_recalculateQPAccordingToLambda;                 ///< recalculate QP value according to the lambda value
+  Bool      m_useStrongIntraSmoothing;                        ///< enable strong intra smoothing for 32x32 blocks where the reference samples are flat
+  Int       m_activeParameterSetsSEIEnabled;
+
+  Bool      m_vuiParametersPresentFlag;                       ///< enable generation of VUI parameters
+  Bool      m_aspectRatioInfoPresentFlag;                     ///< Signals whether aspect_ratio_idc is present
+  Int       m_aspectRatioIdc;                                 ///< aspect_ratio_idc
+  Int       m_sarWidth;                                       ///< horizontal size of the sample aspect ratio
+  Int       m_sarHeight;                                      ///< vertical size of the sample aspect ratio
+  Bool      m_overscanInfoPresentFlag;                        ///< Signals whether overscan_appropriate_flag is present
+  Bool      m_overscanAppropriateFlag;                        ///< Indicates whether conformant decoded pictures are suitable for display using overscan
+  Bool      m_videoSignalTypePresentFlag;                     ///< Signals whether video_format, video_full_range_flag, and colour_description_present_flag are present
+  Int       m_videoFormat;                                    ///< Indicates representation of pictures
+  Bool      m_videoFullRangeFlag;                             ///< Indicates the black level and range of luma and chroma signals
+  Bool      m_colourDescriptionPresentFlag;                   ///< Signals whether colour_primaries, transfer_characteristics and matrix_coefficients are present
+  Int       m_colourPrimaries;                                ///< Indicates chromaticity coordinates of the source primaries
+  Int       m_transferCharacteristics;                        ///< Indicates the opto-electronic transfer characteristics of the source
+  Int       m_matrixCoefficients;                             ///< Describes the matrix coefficients used in deriving luma and chroma from RGB primaries
+  Bool      m_chromaLocInfoPresentFlag;                       ///< Signals whether chroma_sample_loc_type_top_field and chroma_sample_loc_type_bottom_field are present
+  Int       m_chromaSampleLocTypeTopField;                    ///< Specifies the location of chroma samples for top field
+  Int       m_chromaSampleLocTypeBottomField;                 ///< Specifies the location of chroma samples for bottom field
+  Bool      m_neutralChromaIndicationFlag;                    ///< Indicates that the value of all decoded chroma samples is equal to 1<<(BitDepthCr-1)
+  Bool      m_defaultDisplayWindowFlag;                       ///< Indicates the presence of the default window parameters
+  Int       m_defDispWinLeftOffset;                           ///< Specifies the left offset from the conformance window of the default window
+  Int       m_defDispWinRightOffset;                          ///< Specifies the right offset from the conformance window of the default window
+  Int       m_defDispWinTopOffset;                            ///< Specifies the top offset from the conformance window of the default window
+  Int       m_defDispWinBottomOffset;                         ///< Specifies the bottom offset from the conformance window of the default window
+  Bool      m_frameFieldInfoPresentFlag;                      ///< Indicates that pic_struct values are present in picture timing SEI messages
+  Bool      m_pocProportionalToTimingFlag;                    ///< Indicates that the POC value is proportional to the output time w.r.t. first picture in CVS
+  Int       m_numTicksPocDiffOneMinus1;                       ///< Number of ticks minus 1 that for a POC difference of one
+  Bool      m_bitstreamRestrictionFlag;                       ///< Signals whether bitstream restriction parameters are present
+  Bool      m_tilesFixedStructureFlag;                        ///< Indicates that each active picture parameter set has the same values of the syntax elements related to tiles
+  Bool      m_motionVectorsOverPicBoundariesFlag;             ///< Indicates that no samples outside the picture boundaries are used for inter prediction
+  Int       m_minSpatialSegmentationIdc;                      ///< Indicates the maximum size of the spatial segments in the pictures in the coded video sequence
+  Int       m_maxBytesPerPicDenom;                            ///< Indicates a number of bytes not exceeded by the sum of the sizes of the VCL NAL units associated with any coded picture
+  Int       m_maxBitsPerMinCuDenom;                           ///< Indicates an upper bound for the number of bits of coding_unit() data
+  Int       m_log2MaxMvLengthHorizontal;                      ///< Indicate the maximum absolute value of a decoded horizontal MV component in quarter-pel luma units
+  Int       m_log2MaxMvLengthVertical;                        ///< Indicate the maximum absolute value of a decoded vertical MV component in quarter-pel luma units
+
+  // internal member functions
+  Void  xSetGlobal      ();                                   ///< set global variables
+  Void  xCheckParameter ();                                   ///< check validity of configuration values
+  Void  xPrintParameter ();                                   ///< print configuration values
+  Void  xPrintUsage     ();                                   ///< print usage
+public:
+  TAppEncCfg();
+  virtual ~TAppEncCfg();
+
+public:
+  Void  create    ();                                         ///< create option handling class
+  Void  destroy   ();                                         ///< destroy option handling class
+  Bool  parseCfg  ( Int argc, Char* argv[] );                 ///< parse configuration file to fill member variables
+
+  Int       m_verboseLevel; /* verbose level */
+};// END CLASS DEFINITION TAppEncCfg
+
+//! \}
+
+#endif // __TAPPENCCFG__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TAppEncTop.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,695 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TAppEncTop.cpp
+    \brief    Encoder application class
+*/
+
+#include <list>
+#include <fstream>
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <iomanip>
+
+#include "TAppEncTop.h"
+#include "TLibEncoder/AnnexBwrite.h"
+
+using namespace std;
+
+//! \ingroup TAppEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / initialization / destroy
+// ====================================================================================================================
+
+TAppEncTop::TAppEncTop()
+{
+  m_iFrameRcvd = 0;
+  m_totalBytes = 0;
+  m_essentialBytes = 0;
+}
+
+TAppEncTop::~TAppEncTop()
+{
+}
+
+Void TAppEncTop::xInitLibCfg()
+{
+  TComVPS vps;
+
+  vps.setMaxTLayers                                               ( m_maxTempLayer );
+  if (m_maxTempLayer == 1)
+  {
+    vps.setTemporalNestingFlag(true);
+  }
+  vps.setMaxLayers                                                ( 1 );
+  for(Int i = 0; i < MAX_TLAYER; i++)
+  {
+    vps.setNumReorderPics                                         ( m_numReorderPics[i], i );
+    vps.setMaxDecPicBuffering                                     ( m_maxDecPicBuffering[i], i );
+  }
+  m_cTEncTop.setVPS(&vps);
+
+  m_cTEncTop.setProfile                                           ( m_profile);
+  m_cTEncTop.setLevel                                             ( m_levelTier, m_level);
+  m_cTEncTop.setProgressiveSourceFlag                             ( m_progressiveSourceFlag);
+  m_cTEncTop.setInterlacedSourceFlag                              ( m_interlacedSourceFlag);
+  m_cTEncTop.setNonPackedConstraintFlag                           ( m_nonPackedConstraintFlag);
+  m_cTEncTop.setFrameOnlyConstraintFlag                           ( m_frameOnlyConstraintFlag);
+  m_cTEncTop.setBitDepthConstraintValue                           ( m_bitDepthConstraint );
+  m_cTEncTop.setChromaFormatConstraintValue                       ( m_chromaFormatConstraint );
+  m_cTEncTop.setIntraConstraintFlag                               ( m_intraConstraintFlag );
+  m_cTEncTop.setLowerBitRateConstraintFlag                        ( m_lowerBitRateConstraintFlag );
+
+  m_cTEncTop.setPrintMSEBasedSequencePSNR                         ( m_printMSEBasedSequencePSNR);
+  m_cTEncTop.setPrintFrameMSE                                     ( m_printFrameMSE);
+  m_cTEncTop.setPrintSequenceMSE                                  ( m_printSequenceMSE);
+  m_cTEncTop.setCabacZeroWordPaddingEnabled                       ( m_cabacZeroWordPaddingEnabled );
+
+  m_cTEncTop.setFrameRate                                         ( m_iFrameRate );
+  m_cTEncTop.setFrameSkip                                         ( m_FrameSkip );
+  m_cTEncTop.setSourceWidth                                       ( m_iSourceWidth );
+  m_cTEncTop.setSourceHeight                                      ( m_iSourceHeight );
+  m_cTEncTop.setConformanceWindow                                 ( m_confWinLeft, m_confWinRight, m_confWinTop, m_confWinBottom );
+  m_cTEncTop.setFramesToBeEncoded                                 ( m_framesToBeEncoded );
+
+  //====== Coding Structure ========
+  m_cTEncTop.setIntraPeriod                                       ( m_iIntraPeriod );
+  m_cTEncTop.setDecodingRefreshType                               ( m_iDecodingRefreshType );
+  m_cTEncTop.setGOPSize                                           ( m_iGOPSize );
+  m_cTEncTop.setGopList                                           ( m_GOPList );
+  m_cTEncTop.setExtraRPSs                                         ( m_extraRPSs );
+  for(Int i = 0; i < MAX_TLAYER; i++)
+  {
+    m_cTEncTop.setNumReorderPics                                  ( m_numReorderPics[i], i );
+    m_cTEncTop.setMaxDecPicBuffering                              ( m_maxDecPicBuffering[i], i );
+  }
+  for( UInt uiLoop = 0; uiLoop < MAX_TLAYER; ++uiLoop )
+  {
+    m_cTEncTop.setLambdaModifier                                  ( uiLoop, m_adLambdaModifier[ uiLoop ] );
+  }
+  m_cTEncTop.setQP                                                ( m_iQP );
+
+  m_cTEncTop.setPad                                               ( m_aiPad );
+
+  m_cTEncTop.setMaxTempLayer                                      ( m_maxTempLayer );
+  m_cTEncTop.setUseAMP( m_enableAMP );
+
+  //===== Slice ========
+
+  //====== Loop/Deblock Filter ========
+  m_cTEncTop.setLoopFilterDisable                                 ( m_bLoopFilterDisable       );
+  m_cTEncTop.setLoopFilterOffsetInPPS                             ( m_loopFilterOffsetInPPS );
+  m_cTEncTop.setLoopFilterBetaOffset                              ( m_loopFilterBetaOffsetDiv2  );
+  m_cTEncTop.setLoopFilterTcOffset                                ( m_loopFilterTcOffsetDiv2    );
+  m_cTEncTop.setDeblockingFilterControlPresent                    ( m_DeblockingFilterControlPresent);
+  m_cTEncTop.setDeblockingFilterMetric                            ( m_DeblockingFilterMetric );
+
+  //====== Motion search ========
+  m_cTEncTop.setFastSearch                                        ( m_iFastSearch  );
+  m_cTEncTop.setSearchRange                                       ( m_iSearchRange );
+  m_cTEncTop.setBipredSearchRange                                 ( m_bipredSearchRange );
+
+  //====== Quality control ========
+  m_cTEncTop.setMaxDeltaQP                                        ( m_iMaxDeltaQP  );
+  m_cTEncTop.setMaxCuDQPDepth                                     ( m_iMaxCuDQPDepth  );
+  m_cTEncTop.setMaxCUChromaQpAdjustmentDepth                      ( m_maxCUChromaQpAdjustmentDepth );
+  m_cTEncTop.setChromaCbQpOffset                                  ( m_cbQpOffset     );
+  m_cTEncTop.setChromaCrQpOffset                                  ( m_crQpOffset  );
+
+  m_cTEncTop.setChromaFormatIdc                                   ( m_chromaFormatIDC  );
+
+#if ADAPTIVE_QP_SELECTION
+  m_cTEncTop.setUseAdaptQpSelect                                  ( m_bUseAdaptQpSelect   );
+#endif
+
+  m_cTEncTop.setUseAdaptiveQP                                     ( m_bUseAdaptiveQP  );
+  m_cTEncTop.setQPAdaptationRange                                 ( m_iQPAdaptationRange );
+  m_cTEncTop.setUseExtendedPrecision                              ( m_useExtendedPrecision );
+  m_cTEncTop.setUseHighPrecisionPredictionWeighting               ( m_useHighPrecisionPredictionWeighting );
+  //====== Tool list ========
+  m_cTEncTop.setDeltaQpRD                                         ( m_uiDeltaQpRD  );
+  m_cTEncTop.setUseASR                                            ( m_bUseASR      );
+  m_cTEncTop.setUseHADME                                          ( m_bUseHADME    );
+  m_cTEncTop.setdQPs                                              ( m_aidQP        );
+  m_cTEncTop.setUseRDOQ                                           ( m_useRDOQ     );
+  m_cTEncTop.setUseRDOQTS                                         ( m_useRDOQTS   );
+  m_cTEncTop.setRDpenalty                                         ( m_rdPenalty );
+  m_cTEncTop.setQuadtreeTULog2MaxSize                             ( m_uiQuadtreeTULog2MaxSize );
+  m_cTEncTop.setQuadtreeTULog2MinSize                             ( m_uiQuadtreeTULog2MinSize );
+  m_cTEncTop.setQuadtreeTUMaxDepthInter                           ( m_uiQuadtreeTUMaxDepthInter );
+  m_cTEncTop.setQuadtreeTUMaxDepthIntra                           ( m_uiQuadtreeTUMaxDepthIntra );
+  m_cTEncTop.setUseFastEnc                                        ( m_bUseFastEnc  );
+  m_cTEncTop.setUseEarlyCU                                        ( m_bUseEarlyCU  );
+  m_cTEncTop.setUseFastDecisionForMerge                           ( m_useFastDecisionForMerge  );
+  m_cTEncTop.setUseCbfFastMode                                    ( m_bUseCbfFastMode  );
+  m_cTEncTop.setUseEarlySkipDetection                             ( m_useEarlySkipDetection );
+  m_cTEncTop.setUseCrossComponentPrediction                       ( m_useCrossComponentPrediction );
+  m_cTEncTop.setUseReconBasedCrossCPredictionEstimate             ( m_reconBasedCrossCPredictionEstimate );
+  m_cTEncTop.setSaoOffsetBitShift                                 ( CHANNEL_TYPE_LUMA  , m_saoOffsetBitShift[CHANNEL_TYPE_LUMA]   );
+  m_cTEncTop.setSaoOffsetBitShift                                 ( CHANNEL_TYPE_CHROMA, m_saoOffsetBitShift[CHANNEL_TYPE_CHROMA] );
+  m_cTEncTop.setUseTransformSkip                                  ( m_useTransformSkip      );
+  m_cTEncTop.setUseTransformSkipFast                              ( m_useTransformSkipFast  );
+  m_cTEncTop.setUseResidualRotation                               ( m_useResidualRotation   );
+  m_cTEncTop.setUseSingleSignificanceMapContext                   ( m_useSingleSignificanceMapContext   );
+  m_cTEncTop.setUseGolombRiceParameterAdaptation                  ( m_useGolombRiceParameterAdaptation );
+  m_cTEncTop.setAlignCABACBeforeBypass                            ( m_alignCABACBeforeBypass );
+  m_cTEncTop.setTransformSkipLog2MaxSize                          ( m_transformSkipLog2MaxSize  );
+  for (UInt signallingModeIndex = 0; signallingModeIndex < NUMBER_OF_RDPCM_SIGNALLING_MODES; signallingModeIndex++)
+  {
+    m_cTEncTop.setUseResidualDPCM                                 ( RDPCMSignallingMode(signallingModeIndex), m_useResidualDPCM[signallingModeIndex]);
+  }
+  m_cTEncTop.setUseConstrainedIntraPred                           ( m_bUseConstrainedIntraPred );
+  m_cTEncTop.setPCMLog2MinSize                                    ( m_uiPCMLog2MinSize);
+  m_cTEncTop.setUsePCM                                            ( m_usePCM );
+  m_cTEncTop.setPCMLog2MaxSize                                    ( m_pcmLog2MaxSize);
+  m_cTEncTop.setMaxNumMergeCand                                   ( m_maxNumMergeCand );
+
+
+  //====== Weighted Prediction ========
+  m_cTEncTop.setUseWP                                             ( m_useWeightedPred      );
+  m_cTEncTop.setWPBiPred                                          ( m_useWeightedBiPred   );
+  //====== Parallel Merge Estimation ========
+  m_cTEncTop.setLog2ParallelMergeLevelMinus2                      ( m_log2ParallelMergeLevel - 2 );
+
+  //====== Slice ========
+  m_cTEncTop.setSliceMode                                         ( (SliceConstraint) m_sliceMode );
+  m_cTEncTop.setSliceArgument                                     ( m_sliceArgument            );
+
+  //====== Dependent Slice ========
+  m_cTEncTop.setSliceSegmentMode                                  (  (SliceConstraint) m_sliceSegmentMode );
+  m_cTEncTop.setSliceSegmentArgument                              ( m_sliceSegmentArgument     );
+
+  if(m_sliceMode == NO_SLICES )
+  {
+    m_bLFCrossSliceBoundaryFlag = true;
+  }
+  m_cTEncTop.setLFCrossSliceBoundaryFlag                          ( m_bLFCrossSliceBoundaryFlag );
+  m_cTEncTop.setUseSAO                                            ( m_bUseSAO );
+  m_cTEncTop.setMaxNumOffsetsPerPic                               ( m_maxNumOffsetsPerPic);
+
+  m_cTEncTop.setSaoCtuBoundary                                    ( m_saoCtuBoundary);
+  m_cTEncTop.setPCMInputBitDepthFlag                              ( m_bPCMInputBitDepthFlag);
+  m_cTEncTop.setPCMFilterDisableFlag                              ( m_bPCMFilterDisableFlag);
+
+  m_cTEncTop.setDisableIntraReferenceSmoothing                    (!m_enableIntraReferenceSmoothing );
+  m_cTEncTop.setDecodedPictureHashSEIEnabled                      ( m_decodedPictureHashSEIEnabled );
+  m_cTEncTop.setRecoveryPointSEIEnabled                           ( m_recoveryPointSEIEnabled );
+  m_cTEncTop.setBufferingPeriodSEIEnabled                         ( m_bufferingPeriodSEIEnabled );
+  m_cTEncTop.setPictureTimingSEIEnabled                           ( m_pictureTimingSEIEnabled );
+  m_cTEncTop.setToneMappingInfoSEIEnabled                         ( m_toneMappingInfoSEIEnabled );
+  m_cTEncTop.setTMISEIToneMapId                                   ( m_toneMapId );
+  m_cTEncTop.setTMISEIToneMapCancelFlag                           ( m_toneMapCancelFlag );
+  m_cTEncTop.setTMISEIToneMapPersistenceFlag                      ( m_toneMapPersistenceFlag );
+  m_cTEncTop.setTMISEICodedDataBitDepth                           ( m_toneMapCodedDataBitDepth );
+  m_cTEncTop.setTMISEITargetBitDepth                              ( m_toneMapTargetBitDepth );
+  m_cTEncTop.setTMISEIModelID                                     ( m_toneMapModelId );
+  m_cTEncTop.setTMISEIMinValue                                    ( m_toneMapMinValue );
+  m_cTEncTop.setTMISEIMaxValue                                    ( m_toneMapMaxValue );
+  m_cTEncTop.setTMISEISigmoidMidpoint                             ( m_sigmoidMidpoint );
+  m_cTEncTop.setTMISEISigmoidWidth                                ( m_sigmoidWidth );
+  m_cTEncTop.setTMISEIStartOfCodedInterva                         ( m_startOfCodedInterval );
+  m_cTEncTop.setTMISEINumPivots                                   ( m_numPivots );
+  m_cTEncTop.setTMISEICodedPivotValue                             ( m_codedPivotValue );
+  m_cTEncTop.setTMISEITargetPivotValue                            ( m_targetPivotValue );
+  m_cTEncTop.setTMISEICameraIsoSpeedIdc                           ( m_cameraIsoSpeedIdc );
+  m_cTEncTop.setTMISEICameraIsoSpeedValue                         ( m_cameraIsoSpeedValue );
+  m_cTEncTop.setTMISEIExposureIndexIdc                            ( m_exposureIndexIdc );
+  m_cTEncTop.setTMISEIExposureIndexValue                          ( m_exposureIndexValue );
+  m_cTEncTop.setTMISEIExposureCompensationValueSignFlag           ( m_exposureCompensationValueSignFlag );
+  m_cTEncTop.setTMISEIExposureCompensationValueNumerator          ( m_exposureCompensationValueNumerator );
+  m_cTEncTop.setTMISEIExposureCompensationValueDenomIdc           ( m_exposureCompensationValueDenomIdc );
+  m_cTEncTop.setTMISEIRefScreenLuminanceWhite                     ( m_refScreenLuminanceWhite );
+  m_cTEncTop.setTMISEIExtendedRangeWhiteLevel                     ( m_extendedRangeWhiteLevel );
+  m_cTEncTop.setTMISEINominalBlackLevelLumaCodeValue              ( m_nominalBlackLevelLumaCodeValue );
+  m_cTEncTop.setTMISEINominalWhiteLevelLumaCodeValue              ( m_nominalWhiteLevelLumaCodeValue );
+  m_cTEncTop.setTMISEIExtendedWhiteLevelLumaCodeValue             ( m_extendedWhiteLevelLumaCodeValue );
+  m_cTEncTop.setChromaSamplingFilterHintEnabled                   ( m_chromaSamplingFilterSEIenabled );
+  m_cTEncTop.setChromaSamplingHorFilterIdc                        ( m_chromaSamplingHorFilterIdc );
+  m_cTEncTop.setChromaSamplingVerFilterIdc                        ( m_chromaSamplingVerFilterIdc );
+  m_cTEncTop.setFramePackingArrangementSEIEnabled                 ( m_framePackingSEIEnabled );
+  m_cTEncTop.setFramePackingArrangementSEIType                    ( m_framePackingSEIType );
+  m_cTEncTop.setFramePackingArrangementSEIId                      ( m_framePackingSEIId );
+  m_cTEncTop.setFramePackingArrangementSEIQuincunx                ( m_framePackingSEIQuincunx );
+  m_cTEncTop.setFramePackingArrangementSEIInterpretation          ( m_framePackingSEIInterpretation );
+  m_cTEncTop.setSegmentedRectFramePackingArrangementSEIEnabled    ( m_segmentedRectFramePackingSEIEnabled );
+  m_cTEncTop.setSegmentedRectFramePackingArrangementSEICancel     ( m_segmentedRectFramePackingSEICancel );
+  m_cTEncTop.setSegmentedRectFramePackingArrangementSEIType       ( m_segmentedRectFramePackingSEIType );
+  m_cTEncTop.setSegmentedRectFramePackingArrangementSEIPersistence( m_segmentedRectFramePackingSEIPersistence );
+  m_cTEncTop.setDisplayOrientationSEIAngle                        ( m_displayOrientationSEIAngle );
+  m_cTEncTop.setTemporalLevel0IndexSEIEnabled                     ( m_temporalLevel0IndexSEIEnabled );
+  m_cTEncTop.setGradualDecodingRefreshInfoEnabled                 ( m_gradualDecodingRefreshInfoEnabled );
+  m_cTEncTop.setNoDisplaySEITLayer                                ( m_noDisplaySEITLayer );
+  m_cTEncTop.setDecodingUnitInfoSEIEnabled                        ( m_decodingUnitInfoSEIEnabled );
+  m_cTEncTop.setSOPDescriptionSEIEnabled                          ( m_SOPDescriptionSEIEnabled );
+  m_cTEncTop.setScalableNestingSEIEnabled                         ( m_scalableNestingSEIEnabled );
+  m_cTEncTop.setTMCTSSEIEnabled                                   ( m_tmctsSEIEnabled );
+  m_cTEncTop.setTimeCodeSEIEnabled                                ( m_timeCodeSEIEnabled );
+  m_cTEncTop.setNumberOfTimeSets                                  ( m_timeCodeSEINumTs );
+  for(Int i = 0; i < m_timeCodeSEINumTs; i++) { m_cTEncTop.setTimeSet(m_timeSetArray[i], i); }
+  m_cTEncTop.setKneeSEIEnabled                                    ( m_kneeSEIEnabled );
+  m_cTEncTop.setKneeSEIId                                         ( m_kneeSEIId );
+  m_cTEncTop.setKneeSEICancelFlag                                 ( m_kneeSEICancelFlag );
+  m_cTEncTop.setKneeSEIPersistenceFlag                            ( m_kneeSEIPersistenceFlag );
+  m_cTEncTop.setKneeSEIInputDrange                                ( m_kneeSEIInputDrange );
+  m_cTEncTop.setKneeSEIInputDispLuminance                         ( m_kneeSEIInputDispLuminance );
+  m_cTEncTop.setKneeSEIOutputDrange                               ( m_kneeSEIOutputDrange );
+  m_cTEncTop.setKneeSEIOutputDispLuminance                        ( m_kneeSEIOutputDispLuminance );
+  m_cTEncTop.setKneeSEINumKneePointsMinus1                        ( m_kneeSEINumKneePointsMinus1 );
+  m_cTEncTop.setKneeSEIInputKneePoint                             ( m_kneeSEIInputKneePoint );
+  m_cTEncTop.setKneeSEIOutputKneePoint                            ( m_kneeSEIOutputKneePoint );
+  m_cTEncTop.setMasteringDisplaySEI                               ( m_masteringDisplay );
+
+  m_cTEncTop.setTileUniformSpacingFlag                            ( m_tileUniformSpacingFlag );
+  m_cTEncTop.setNumColumnsMinus1                                  ( m_numTileColumnsMinus1 );
+  m_cTEncTop.setNumRowsMinus1                                     ( m_numTileRowsMinus1 );
+  if(!m_tileUniformSpacingFlag)
+  {
+    m_cTEncTop.setColumnWidth                                     ( m_tileColumnWidth );
+    m_cTEncTop.setRowHeight                                       ( m_tileRowHeight );
+  }
+  m_cTEncTop.xCheckGSParameters();
+  Int uiTilesCount = (m_numTileRowsMinus1+1) * (m_numTileColumnsMinus1+1);
+  if(uiTilesCount == 1)
+  {
+    m_bLFCrossTileBoundaryFlag = true;
+  }
+  m_cTEncTop.setLFCrossTileBoundaryFlag                           ( m_bLFCrossTileBoundaryFlag );
+  m_cTEncTop.setWaveFrontSynchro                                  ( m_iWaveFrontSynchro );
+  m_cTEncTop.setWaveFrontSubstreams                               ( m_iWaveFrontSubstreams );
+  m_cTEncTop.setTMVPModeId                                        ( m_TMVPModeId );
+  m_cTEncTop.setUseScalingListId                                  ( m_useScalingListId  );
+  m_cTEncTop.setScalingListFile                                   ( m_scalingListFile   );
+  m_cTEncTop.setSignHideFlag                                      ( m_signHideFlag);
+  m_cTEncTop.setUseRateCtrl                                       ( m_RCEnableRateControl );
+  m_cTEncTop.setTargetBitrate                                     ( m_RCTargetBitrate );
+  m_cTEncTop.setKeepHierBit                                       ( m_RCKeepHierarchicalBit );
+  m_cTEncTop.setLCULevelRC                                        ( m_RCLCULevelRC );
+  m_cTEncTop.setUseLCUSeparateModel                               ( m_RCUseLCUSeparateModel );
+  m_cTEncTop.setInitialQP                                         ( m_RCInitialQP );
+  m_cTEncTop.setForceIntraQP                                      ( m_RCForceIntraQP );
+  m_cTEncTop.setTransquantBypassEnableFlag                        ( m_TransquantBypassEnableFlag );
+  m_cTEncTop.setCUTransquantBypassFlagForceValue                  ( m_CUTransquantBypassFlagForce );
+  m_cTEncTop.setCostMode                                          ( m_costMode );
+  m_cTEncTop.setUseRecalculateQPAccordingToLambda                 ( m_recalculateQPAccordingToLambda );
+  m_cTEncTop.setUseStrongIntraSmoothing                           ( m_useStrongIntraSmoothing );
+  m_cTEncTop.setActiveParameterSetsSEIEnabled                     ( m_activeParameterSetsSEIEnabled );
+  m_cTEncTop.setVuiParametersPresentFlag                          ( m_vuiParametersPresentFlag );
+  m_cTEncTop.setAspectRatioInfoPresentFlag                        ( m_aspectRatioInfoPresentFlag);
+  m_cTEncTop.setAspectRatioIdc                                    ( m_aspectRatioIdc );
+  m_cTEncTop.setSarWidth                                          ( m_sarWidth );
+  m_cTEncTop.setSarHeight                                         ( m_sarHeight );
+  m_cTEncTop.setOverscanInfoPresentFlag                           ( m_overscanInfoPresentFlag );
+  m_cTEncTop.setOverscanAppropriateFlag                           ( m_overscanAppropriateFlag );
+  m_cTEncTop.setVideoSignalTypePresentFlag                        ( m_videoSignalTypePresentFlag );
+  m_cTEncTop.setVideoFormat                                       ( m_videoFormat );
+  m_cTEncTop.setVideoFullRangeFlag                                ( m_videoFullRangeFlag );
+  m_cTEncTop.setColourDescriptionPresentFlag                      ( m_colourDescriptionPresentFlag );
+  m_cTEncTop.setColourPrimaries                                   ( m_colourPrimaries );
+  m_cTEncTop.setTransferCharacteristics                           ( m_transferCharacteristics );
+  m_cTEncTop.setMatrixCoefficients                                ( m_matrixCoefficients );
+  m_cTEncTop.setChromaLocInfoPresentFlag                          ( m_chromaLocInfoPresentFlag );
+  m_cTEncTop.setChromaSampleLocTypeTopField                       ( m_chromaSampleLocTypeTopField );
+  m_cTEncTop.setChromaSampleLocTypeBottomField                    ( m_chromaSampleLocTypeBottomField );
+  m_cTEncTop.setNeutralChromaIndicationFlag                       ( m_neutralChromaIndicationFlag );
+  m_cTEncTop.setDefaultDisplayWindow                              ( m_defDispWinLeftOffset, m_defDispWinRightOffset, m_defDispWinTopOffset, m_defDispWinBottomOffset );
+  m_cTEncTop.setFrameFieldInfoPresentFlag                         ( m_frameFieldInfoPresentFlag );
+  m_cTEncTop.setPocProportionalToTimingFlag                       ( m_pocProportionalToTimingFlag );
+  m_cTEncTop.setNumTicksPocDiffOneMinus1                          ( m_numTicksPocDiffOneMinus1    );
+  m_cTEncTop.setBitstreamRestrictionFlag                          ( m_bitstreamRestrictionFlag );
+  m_cTEncTop.setTilesFixedStructureFlag                           ( m_tilesFixedStructureFlag );
+  m_cTEncTop.setMotionVectorsOverPicBoundariesFlag                ( m_motionVectorsOverPicBoundariesFlag );
+  m_cTEncTop.setMinSpatialSegmentationIdc                         ( m_minSpatialSegmentationIdc );
+  m_cTEncTop.setMaxBytesPerPicDenom                               ( m_maxBytesPerPicDenom );
+  m_cTEncTop.setMaxBitsPerMinCuDenom                              ( m_maxBitsPerMinCuDenom );
+  m_cTEncTop.setLog2MaxMvLengthHorizontal                         ( m_log2MaxMvLengthHorizontal );
+  m_cTEncTop.setLog2MaxMvLengthVertical                           ( m_log2MaxMvLengthVertical );
+}
+
+Void TAppEncTop::xCreateLib()
+{
+  // Video I/O
+  m_cTVideoIOYuvInputFile.open( m_pchInputFile,     false, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth );  // read  mode
+  m_cTVideoIOYuvInputFile.skipFrames(m_FrameSkip, m_iSourceWidth - m_aiPad[0], m_iSourceHeight - m_aiPad[1], m_InputChromaFormatIDC);
+
+  if (m_pchReconFile)
+  {
+    m_cTVideoIOYuvReconFile.open(m_pchReconFile, true, m_outputBitDepth, m_outputBitDepth, m_internalBitDepth);  // write mode
+  }
+
+  // Neo Decoder
+  m_cTEncTop.create();
+}
+
+Void TAppEncTop::xDestroyLib()
+{
+  // Video I/O
+  m_cTVideoIOYuvInputFile.close();
+  m_cTVideoIOYuvReconFile.close();
+
+  // Neo Decoder
+  m_cTEncTop.destroy();
+}
+
+Void TAppEncTop::xInitLib(Bool isFieldCoding)
+{
+  m_cTEncTop.init(isFieldCoding);
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/**
+ - create internal class
+ - initialize internal variable
+ - until the end of input YUV file, call encoding function in TEncTop class
+ - delete allocated buffers
+ - destroy internal class
+ .
+ */
+Void TAppEncTop::encode()
+{
+  fstream bitstreamFile(m_pchBitstreamFile, fstream::binary | fstream::out);
+  if (!bitstreamFile)
+  {
+    fprintf(stderr, "\nfailed to open bitstream file `%s' for writing\n", m_pchBitstreamFile);
+    exit(EXIT_FAILURE);
+  }
+
+  TComPicYuv*       pcPicYuvOrg = new TComPicYuv;
+  TComPicYuv*       pcPicYuvRec = NULL;
+
+  // initialize internal class & member variables
+  xInitLibCfg();
+  xCreateLib();
+  xInitLib(m_isField);
+
+  if (m_verboseLevel) {
+    printChromaFormat();
+  }
+
+  // main encoder loop
+  Int   iNumEncoded = 0;
+  Bool  bEos = false;
+
+  const InputColourSpaceConversion ipCSC  =  m_inputColourSpaceConvert;
+  const InputColourSpaceConversion snrCSC = (!m_snrInternalColourSpace) ? m_inputColourSpaceConvert : IPCOLOURSPACE_UNCHANGED;
+
+  list<AccessUnit> outputAccessUnits; ///< list of access units to write out.  is populated by the encoding process
+
+  TComPicYuv cPicYuvTrueOrg;
+
+  // allocate original YUV buffer
+  if( m_isField )
+  {
+    pcPicYuvOrg->create( m_iSourceWidth, m_iSourceHeightOrg, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxCUDepth );
+  cPicYuvTrueOrg.create(m_iSourceWidth, m_iSourceHeightOrg, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxCUDepth);
+  }
+  else
+  {
+    pcPicYuvOrg->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxCUDepth );
+  cPicYuvTrueOrg.create(m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxCUDepth);
+  }
+
+  while ( !bEos )
+  {
+    // get buffers
+    xGetBuffer(pcPicYuvRec);
+
+    // read input YUV file
+    m_cTVideoIOYuvInputFile.read( pcPicYuvOrg, &cPicYuvTrueOrg, ipCSC, m_aiPad, m_InputChromaFormatIDC );
+
+    // increase number of received frames
+    m_iFrameRcvd++;
+
+    bEos = (m_isField && (m_iFrameRcvd == (m_framesToBeEncoded >> 1) )) || ( !m_isField && (m_iFrameRcvd == m_framesToBeEncoded) );
+
+    Bool flush = 0;
+    // if end of file (which is only detected on a read failure) flush the encoder of any queued pictures
+    if (m_cTVideoIOYuvInputFile.isEof())
+    {
+      flush = true;
+      bEos = true;
+      m_iFrameRcvd--;
+      m_cTEncTop.setFramesToBeEncoded(m_iFrameRcvd);
+    }
+
+    // call encoding function for one frame
+    if ( m_isField ) m_cTEncTop.encode( bEos, flush ? 0 : pcPicYuvOrg, flush ? 0 : &cPicYuvTrueOrg, snrCSC, m_cListPicYuvRec, outputAccessUnits, iNumEncoded, m_isTopFieldFirst );
+    else             m_cTEncTop.encode( bEos, flush ? 0 : pcPicYuvOrg, flush ? 0 : &cPicYuvTrueOrg, snrCSC, m_cListPicYuvRec, outputAccessUnits, iNumEncoded );
+
+    // write bistream to file if necessary
+    if ( iNumEncoded > 0 )
+    {
+      xWriteOutput(bitstreamFile, iNumEncoded, outputAccessUnits);
+      outputAccessUnits.clear();
+    }
+  }
+
+  if (m_verboseLevel) {
+    m_cTEncTop.printSummary(m_isField);
+  }
+
+  // delete original YUV buffer
+  pcPicYuvOrg->destroy();
+  delete pcPicYuvOrg;
+  pcPicYuvOrg = NULL;
+
+  // delete used buffers in encoder class
+  m_cTEncTop.deletePicBuffer();
+  cPicYuvTrueOrg.destroy();
+
+  // delete buffers & classes
+  xDeleteBuffer();
+  xDestroyLib();
+
+  if (m_verboseLevel) {
+    printRateSummary();
+  }
+
+  return;
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+/**
+ - application has picture buffer list with size of GOP
+ - picture buffer list acts as ring buffer
+ - end of the list has the latest picture
+ .
+ */
+Void TAppEncTop::xGetBuffer( TComPicYuv*& rpcPicYuvRec)
+{
+  assert( m_iGOPSize > 0 );
+
+  // org. buffer
+  if ( m_cListPicYuvRec.size() >= (UInt)m_iGOPSize ) // buffer will be 1 element longer when using field coding, to maintain first field whilst processing second.
+  {
+    rpcPicYuvRec = m_cListPicYuvRec.popFront();
+
+  }
+  else
+  {
+    rpcPicYuvRec = new TComPicYuv;
+
+    rpcPicYuvRec->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxCUDepth );
+
+  }
+  m_cListPicYuvRec.pushBack( rpcPicYuvRec );
+}
+
+Void TAppEncTop::xDeleteBuffer( )
+{
+  TComList<TComPicYuv*>::iterator iterPicYuvRec  = m_cListPicYuvRec.begin();
+
+  Int iSize = Int( m_cListPicYuvRec.size() );
+
+  for ( Int i = 0; i < iSize; i++ )
+  {
+    TComPicYuv*  pcPicYuvRec  = *(iterPicYuvRec++);
+    pcPicYuvRec->destroy();
+    delete pcPicYuvRec; pcPicYuvRec = NULL;
+  }
+
+}
+
+/** \param iNumEncoded  number of encoded frames
+ */
+Void TAppEncTop::xWriteOutput(std::ostream& bitstreamFile, Int iNumEncoded, const std::list<AccessUnit>& accessUnits)
+{
+  const InputColourSpaceConversion ipCSC = (!m_outputInternalColourSpace) ? m_inputColourSpaceConvert : IPCOLOURSPACE_UNCHANGED;
+
+  if (m_isField)
+  {
+    //Reinterlace fields
+    Int i;
+    TComList<TComPicYuv*>::iterator iterPicYuvRec = m_cListPicYuvRec.end();
+    list<AccessUnit>::const_iterator iterBitstream = accessUnits.begin();
+
+    for ( i = 0; i < iNumEncoded; i++ )
+    {
+      --iterPicYuvRec;
+    }
+
+    for ( i = 0; i < iNumEncoded/2; i++ )
+    {
+      TComPicYuv*  pcPicYuvRecTop  = *(iterPicYuvRec++);
+      TComPicYuv*  pcPicYuvRecBottom  = *(iterPicYuvRec++);
+
+      if (m_pchReconFile)
+      {
+        m_cTVideoIOYuvReconFile.write( pcPicYuvRecTop, pcPicYuvRecBottom, ipCSC, m_confWinLeft, m_confWinRight, m_confWinTop, m_confWinBottom, NUM_CHROMA_FORMAT, m_isTopFieldFirst );
+      }
+
+      const AccessUnit& auTop = *(iterBitstream++);
+      const vector<UInt>& statsTop = writeAnnexB(bitstreamFile, auTop);
+      rateStatsAccum(auTop, statsTop);
+
+      const AccessUnit& auBottom = *(iterBitstream++);
+      const vector<UInt>& statsBottom = writeAnnexB(bitstreamFile, auBottom);
+      rateStatsAccum(auBottom, statsBottom);
+    }
+  }
+  else
+  {
+    Int i;
+
+    TComList<TComPicYuv*>::iterator iterPicYuvRec = m_cListPicYuvRec.end();
+    list<AccessUnit>::const_iterator iterBitstream = accessUnits.begin();
+
+    for ( i = 0; i < iNumEncoded; i++ )
+    {
+      --iterPicYuvRec;
+    }
+
+    for ( i = 0; i < iNumEncoded; i++ )
+    {
+      TComPicYuv*  pcPicYuvRec  = *(iterPicYuvRec++);
+      if (m_pchReconFile)
+      {
+        m_cTVideoIOYuvReconFile.write( pcPicYuvRec, ipCSC, m_confWinLeft, m_confWinRight, m_confWinTop, m_confWinBottom );
+      }
+
+      const AccessUnit& au = *(iterBitstream++);
+      const vector<UInt>& stats = writeAnnexB(bitstreamFile, au);
+      rateStatsAccum(au, stats);
+    }
+  }
+}
+
+/**
+ *
+ */
+Void TAppEncTop::rateStatsAccum(const AccessUnit& au, const std::vector<UInt>& annexBsizes)
+{
+  AccessUnit::const_iterator it_au = au.begin();
+  vector<UInt>::const_iterator it_stats = annexBsizes.begin();
+
+  for (; it_au != au.end(); it_au++, it_stats++)
+  {
+    switch ((*it_au)->m_nalUnitType)
+    {
+    case NAL_UNIT_CODED_SLICE_TRAIL_R:
+    case NAL_UNIT_CODED_SLICE_TRAIL_N:
+    case NAL_UNIT_CODED_SLICE_TSA_R:
+    case NAL_UNIT_CODED_SLICE_TSA_N:
+    case NAL_UNIT_CODED_SLICE_STSA_R:
+    case NAL_UNIT_CODED_SLICE_STSA_N:
+    case NAL_UNIT_CODED_SLICE_BLA_W_LP:
+    case NAL_UNIT_CODED_SLICE_BLA_W_RADL:
+    case NAL_UNIT_CODED_SLICE_BLA_N_LP:
+    case NAL_UNIT_CODED_SLICE_IDR_W_RADL:
+    case NAL_UNIT_CODED_SLICE_IDR_N_LP:
+    case NAL_UNIT_CODED_SLICE_CRA:
+    case NAL_UNIT_CODED_SLICE_RADL_N:
+    case NAL_UNIT_CODED_SLICE_RADL_R:
+    case NAL_UNIT_CODED_SLICE_RASL_N:
+    case NAL_UNIT_CODED_SLICE_RASL_R:
+    case NAL_UNIT_VPS:
+    case NAL_UNIT_SPS:
+    case NAL_UNIT_PPS:
+      m_essentialBytes += *it_stats;
+      break;
+    default:
+      break;
+    }
+
+    m_totalBytes += *it_stats;
+  }
+}
+
+Void TAppEncTop::printRateSummary()
+{
+  Double time = (Double) m_iFrameRcvd / m_iFrameRate;
+  printf("Bytes written to file: %u (%.3f kbps)\n", m_totalBytes, 0.008 * m_totalBytes / time);
+#if VERBOSE_RATE
+  printf("Bytes for SPS/PPS/Slice (Incl. Annex B): %u (%.3f kbps)\n", m_essentialBytes, 0.008 * m_essentialBytes / time);
+#endif
+}
+
+Void TAppEncTop::printChromaFormat()
+{
+  std::cout << std::setw(43) << "Input ChromaFormatIDC = ";
+  switch (m_InputChromaFormatIDC)
+  {
+  case CHROMA_400:  std::cout << "  4:0:0"; break;
+  case CHROMA_420:  std::cout << "  4:2:0"; break;
+  case CHROMA_422:  std::cout << "  4:2:2"; break;
+  case CHROMA_444:  std::cout << "  4:4:4"; break;
+  default:
+    std::cerr << "Invalid";
+    exit(1);
+  }
+  std::cout << std::endl;
+
+  std::cout << std::setw(43) << "Output (internal) ChromaFormatIDC = ";
+  switch (m_cTEncTop.getChromaFormatIdc())
+  {
+  case CHROMA_400:  std::cout << "  4:0:0"; break;
+  case CHROMA_420:  std::cout << "  4:2:0"; break;
+  case CHROMA_422:  std::cout << "  4:2:2"; break;
+  case CHROMA_444:  std::cout << "  4:4:4"; break;
+  default:
+    std::cerr << "Invalid";
+    exit(1);
+  }
+  std::cout << "\n" << std::endl;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TAppEncTop.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,103 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TAppEncTop.h
+    \brief    Encoder application class (header)
+*/
+
+#ifndef __TAPPENCTOP__
+#define __TAPPENCTOP__
+
+#include <list>
+#include <ostream>
+
+#include "TLibEncoder/TEncTop.h"
+#include "TLibVideoIO/TVideoIOYuv.h"
+#include "TLibCommon/AccessUnit.h"
+#include "TAppEncCfg.h"
+
+//! \ingroup TAppEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// encoder application class
+class TAppEncTop : public TAppEncCfg
+{
+private:
+  // class interface
+  TEncTop                    m_cTEncTop;                    ///< encoder class
+  TVideoIOYuv                m_cTVideoIOYuvInputFile;       ///< input YUV file
+  TVideoIOYuv                m_cTVideoIOYuvReconFile;       ///< output reconstruction file
+
+  TComList<TComPicYuv*>      m_cListPicYuvRec;              ///< list of reconstruction YUV files
+
+  Int                        m_iFrameRcvd;                  ///< number of received frames
+
+  UInt m_essentialBytes;
+  UInt m_totalBytes;
+
+protected:
+  // initialization
+  Void  xCreateLib        ();                               ///< create files & encoder class
+  Void  xInitLibCfg       ();                               ///< initialize internal variables
+  Void  xInitLib          (Bool isFieldCoding);             ///< initialize encoder class
+  Void  xDestroyLib       ();                               ///< destroy encoder class
+
+  /// obtain required buffers
+  Void xGetBuffer(TComPicYuv*& rpcPicYuvRec);
+
+  /// delete allocated buffers
+  Void  xDeleteBuffer     ();
+
+  // file I/O
+  Void xWriteOutput(std::ostream& bitstreamFile, Int iNumEncoded, const std::list<AccessUnit>& accessUnits); ///< write bitstream to file
+  Void rateStatsAccum(const AccessUnit& au, const std::vector<UInt>& stats);
+  Void printRateSummary();
+  Void printChromaFormat();
+
+public:
+  TAppEncTop();
+  virtual ~TAppEncTop();
+
+  Void        encode      ();                               ///< main encoding function
+  TEncTop&    getTEncTop  ()   { return  m_cTEncTop; }      ///< return encoder class pointer reference
+
+};// END CLASS DEFINITION TAppEncTop
+
+//! \}
+
+#endif // __TAPPENCTOP__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/AccessUnit.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,76 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ \file     AccessUnit.h
+ \brief    Access Unit class (header)
+ */
+
+#pragma once
+
+#ifndef __ACCESSUNIT__
+#define __ACCESSUNIT__
+
+#include <list>
+#include "NAL.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+/**
+ * An AccessUnit is a list of one or more NAL units, according to the
+ * working draft.  All NAL units within the object belong to the same
+ * access unit.
+ *
+ * NALUnits held in the AccessUnit list are in EBSP format.  Attempting
+ * to insert an OutputNALUnit into the access unit will automatically cause
+ * the nalunit to have its headers written and anti-emulation performed.
+ *
+ * The AccessUnit owns all pointers stored within.  Destroying the
+ * AccessUnit will delete all contained objects.
+ */
+class AccessUnit : public std::list<NALUnitEBSP*> // NOTE: Should not inherit from STL.
+{
+public:
+  ~AccessUnit()
+  {
+    for (AccessUnit::iterator it = this->begin(); it != this->end(); it++)
+    {
+      delete *it;
+    }
+  }
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/CommonDef.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,299 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     CommonDef.h
+    \brief    Defines constants, macros and tool parameters
+*/
+
+#ifndef __COMMONDEF__
+#define __COMMONDEF__
+
+#include <algorithm>
+#include <iostream>
+#include <assert.h>
+
+#if _MSC_VER > 1000
+// disable "signed and unsigned mismatch"
+#pragma warning( disable : 4018 )
+// disable Bool coercion "performance warning"
+#pragma warning( disable : 4800 )
+#endif // _MSC_VER > 1000
+#include "TypeDef.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Version information
+// ====================================================================================================================
+
+#define NV_VERSION        "16.2"                 ///< Current software version
+
+// ====================================================================================================================
+// Platform information
+// ====================================================================================================================
+
+#ifdef __GNUC__
+#define NVM_COMPILEDBY  "[GCC %d.%d.%d]", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__
+#ifdef __IA64__
+#define NVM_ONARCH    "[on 64-bit] "
+#else
+#define NVM_ONARCH    "[on 32-bit] "
+#endif
+#endif
+
+#ifdef __INTEL_COMPILER
+#define NVM_COMPILEDBY  "[ICC %d]", __INTEL_COMPILER
+#elif  _MSC_VER
+#define NVM_COMPILEDBY  "[VS %d]", _MSC_VER
+#endif
+
+#ifndef NVM_COMPILEDBY
+#define NVM_COMPILEDBY "[Unk-CXX]"
+#endif
+
+#ifdef _WIN32
+#define NVM_ONOS        "[Windows]"
+#elif  __linux
+#define NVM_ONOS        "[Linux]"
+#elif  __CYGWIN__
+#define NVM_ONOS        "[Cygwin]"
+#elif __APPLE__
+#define NVM_ONOS        "[Mac OS X]"
+#else
+#define NVM_ONOS "[Unk-OS]"
+#endif
+
+#define NVM_BITS          "[%d bit] ", (sizeof(Void*) == 8 ? 64 : 32) ///< used for checking 64-bit O/S
+
+#ifndef NULL
+#define NULL              0
+#endif
+
+// ====================================================================================================================
+// Common constants
+// ====================================================================================================================
+
+#define _SUMMARY_OUT_               0           ///< print-out PSNR results of all slices to summary.txt
+#define _SUMMARY_PIC_               0           ///< print-out PSNR results for each slice type to summary.txt
+
+#define MAX_GOP                     64          ///< max. value of hierarchical GOP size
+
+#define MAX_NUM_REF_PICS            16          ///< max. number of pictures used for reference
+#define MAX_NUM_REF                 16          ///< max. number of entries in picture reference list
+
+#define MAX_UINT                    0xFFFFFFFFU ///< max. value of unsigned 32-bit integer
+#define MAX_INT                     2147483647  ///< max. value of signed 32-bit integer
+#define MAX_INT64                   0x7FFFFFFFFFFFFFFFLL  ///< max. value of signed 64-bit integer
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+#define MAX_INTERMEDIATE_INT        MAX_INT64
+#else
+#define MAX_INTERMEDIATE_INT        MAX_INT
+#endif
+
+#define MAX_DOUBLE                  1.7e+308    ///< max. value of Double-type value
+
+#define MIN_QP                      0
+#define MAX_QP                      51
+
+#define NOT_VALID                   -1
+
+// ====================================================================================================================
+// Macro functions
+// ====================================================================================================================
+
+extern Int g_bitDepth[MAX_NUM_CHANNEL_TYPE];
+
+template <typename T> inline T Clip3 (const T minVal, const T maxVal, const T a) { return std::min<T> (std::max<T> (minVal, a) , maxVal); }  ///< general min/max clip
+template <typename T> inline T ClipBD(const T x, const Int bitDepth)             { return Clip3(T(0), T((1 << bitDepth)-1), x);           }
+template <typename T> inline T Clip  (const T x, const ChannelType type)         { return ClipBD(x, g_bitDepth[type]);                    }
+
+template <typename T> inline Void Check3( T minVal, T maxVal, T a)
+{
+  if ((a > maxVal) || (a < minVal))
+  {
+    std::cerr << "ERROR: Range check " << minVal << " >= " << a << " <= " << maxVal << " failed" << std::endl;
+    assert(false);
+    exit(1);
+  }
+}  ///< general min/max clip
+
+#define DATA_ALIGN                  1                                                                 ///< use 32-bit aligned malloc/free
+#if     DATA_ALIGN && _WIN32 && ( _MSC_VER > 1300 )
+#define xMalloc( type, len )        _aligned_malloc( sizeof(type)*(len), 32 )
+#define xFree( ptr )                _aligned_free  ( ptr )
+#else
+#define xMalloc( type, len )        malloc   ( sizeof(type)*(len) )
+#define xFree( ptr )                free     ( ptr )
+#endif
+
+#define FATAL_ERROR_0(MESSAGE, EXITCODE)                      \
+{                                                             \
+  printf(MESSAGE);                                            \
+  exit(EXITCODE);                                             \
+}
+
+template <typename ValueType> inline ValueType leftShift       (const ValueType value, const Int shift) { return (shift >= 0) ? ( value                                  << shift) : ( value                                   >> -shift); }
+template <typename ValueType> inline ValueType rightShift      (const ValueType value, const Int shift) { return (shift >= 0) ? ( value                                  >> shift) : ( value                                   << -shift); }
+template <typename ValueType> inline ValueType leftShift_round (const ValueType value, const Int shift) { return (shift >= 0) ? ( value                                  << shift) : ((value + (ValueType(1) << (-shift - 1))) >> -shift); }
+template <typename ValueType> inline ValueType rightShift_round(const ValueType value, const Int shift) { return (shift >= 0) ? ((value + (ValueType(1) << (shift - 1))) >> shift) : ( value                                   << -shift); }
+#if O0043_BEST_EFFORT_DECODING
+// when shift = 0, returns value
+// when shift = 1, (value + 0 + value[1]) >> 1
+// when shift = 2, (value + 1 + value[2]) >> 2
+// when shift = 3, (value + 3 + value[3]) >> 3
+template <typename ValueType> inline ValueType rightShiftEvenRounding(const ValueType value, const UInt shift) { return (shift == 0) ? value : ((value + (1<<(shift-1))-1 + ((value>>shift)&1)) >> shift) ; }
+#endif
+
+// ====================================================================================================================
+// Coding tool configuration
+// ====================================================================================================================
+
+// AMVP: advanced motion vector prediction
+#define AMVP_MAX_NUM_CANDS          2           ///< max number of final candidates
+#define AMVP_MAX_NUM_CANDS_MEM      3           ///< max number of candidates
+// MERGE
+#define MRG_MAX_NUM_CANDS           5
+
+// Reference memory management
+#define DYN_REF_FREE                0           ///< dynamic free of reference memories
+
+// Explicit temporal layer QP offset
+#define MAX_TLAYER                  7           ///< max number of temporal layer
+#define HB_LAMBDA_FOR_LDC           1           ///< use of B-style lambda for non-key pictures in low-delay mode
+
+// Fast estimation of generalized B in low-delay mode
+#define GPB_SIMPLE                  1           ///< Simple GPB mode
+#if     GPB_SIMPLE
+#define GPB_SIMPLE_UNI              1           ///< Simple mode for uni-direction
+#endif
+
+// Fast ME using smoother MV assumption
+#define FASTME_SMOOTHER_MV          1           ///< reduce ME time using faster option
+
+// Adaptive search range depending on POC difference
+#define ADAPT_SR_SCALE              1           ///< division factor for adaptive search range
+
+#define CLIP_TO_709_RANGE           0
+
+// Early-skip threshold (encoder)
+#define EARLY_SKIP_THRES            1.50        ///< if RD < thres*avg[BestSkipRD]
+
+
+#define MAX_CHROMA_FORMAT_IDC      3
+
+// TODO: Existing names used for the different NAL unit types can be altered to better reflect the names in the spec.
+//       However, the names in the spec are not yet stable at this point. Once the names are stable, a cleanup
+//       effort can be done without use of macros to alter the names used to indicate the different NAL unit types.
+enum NalUnitType
+{
+  NAL_UNIT_CODED_SLICE_TRAIL_N = 0, // 0
+  NAL_UNIT_CODED_SLICE_TRAIL_R,     // 1
+
+  NAL_UNIT_CODED_SLICE_TSA_N,       // 2
+  NAL_UNIT_CODED_SLICE_TSA_R,       // 3
+
+  NAL_UNIT_CODED_SLICE_STSA_N,      // 4
+  NAL_UNIT_CODED_SLICE_STSA_R,      // 5
+
+  NAL_UNIT_CODED_SLICE_RADL_N,      // 6
+  NAL_UNIT_CODED_SLICE_RADL_R,      // 7
+
+  NAL_UNIT_CODED_SLICE_RASL_N,      // 8
+  NAL_UNIT_CODED_SLICE_RASL_R,      // 9
+
+  NAL_UNIT_RESERVED_VCL_N10,
+  NAL_UNIT_RESERVED_VCL_R11,
+  NAL_UNIT_RESERVED_VCL_N12,
+  NAL_UNIT_RESERVED_VCL_R13,
+  NAL_UNIT_RESERVED_VCL_N14,
+  NAL_UNIT_RESERVED_VCL_R15,
+
+  NAL_UNIT_CODED_SLICE_BLA_W_LP,    // 16
+  NAL_UNIT_CODED_SLICE_BLA_W_RADL,  // 17
+  NAL_UNIT_CODED_SLICE_BLA_N_LP,    // 18
+  NAL_UNIT_CODED_SLICE_IDR_W_RADL,  // 19
+  NAL_UNIT_CODED_SLICE_IDR_N_LP,    // 20
+  NAL_UNIT_CODED_SLICE_CRA,         // 21
+  NAL_UNIT_RESERVED_IRAP_VCL22,
+  NAL_UNIT_RESERVED_IRAP_VCL23,
+
+  NAL_UNIT_RESERVED_VCL24,
+  NAL_UNIT_RESERVED_VCL25,
+  NAL_UNIT_RESERVED_VCL26,
+  NAL_UNIT_RESERVED_VCL27,
+  NAL_UNIT_RESERVED_VCL28,
+  NAL_UNIT_RESERVED_VCL29,
+  NAL_UNIT_RESERVED_VCL30,
+  NAL_UNIT_RESERVED_VCL31,
+
+  NAL_UNIT_VPS,                     // 32
+  NAL_UNIT_SPS,                     // 33
+  NAL_UNIT_PPS,                     // 34
+  NAL_UNIT_ACCESS_UNIT_DELIMITER,   // 35
+  NAL_UNIT_EOS,                     // 36
+  NAL_UNIT_EOB,                     // 37
+  NAL_UNIT_FILLER_DATA,             // 38
+  NAL_UNIT_PREFIX_SEI,              // 39
+  NAL_UNIT_SUFFIX_SEI,              // 40
+
+  NAL_UNIT_RESERVED_NVCL41,
+  NAL_UNIT_RESERVED_NVCL42,
+  NAL_UNIT_RESERVED_NVCL43,
+  NAL_UNIT_RESERVED_NVCL44,
+  NAL_UNIT_RESERVED_NVCL45,
+  NAL_UNIT_RESERVED_NVCL46,
+  NAL_UNIT_RESERVED_NVCL47,
+  NAL_UNIT_UNSPECIFIED_48,
+  NAL_UNIT_UNSPECIFIED_49,
+  NAL_UNIT_UNSPECIFIED_50,
+  NAL_UNIT_UNSPECIFIED_51,
+  NAL_UNIT_UNSPECIFIED_52,
+  NAL_UNIT_UNSPECIFIED_53,
+  NAL_UNIT_UNSPECIFIED_54,
+  NAL_UNIT_UNSPECIFIED_55,
+  NAL_UNIT_UNSPECIFIED_56,
+  NAL_UNIT_UNSPECIFIED_57,
+  NAL_UNIT_UNSPECIFIED_58,
+  NAL_UNIT_UNSPECIFIED_59,
+  NAL_UNIT_UNSPECIFIED_60,
+  NAL_UNIT_UNSPECIFIED_61,
+  NAL_UNIT_UNSPECIFIED_62,
+  NAL_UNIT_UNSPECIFIED_63,
+  NAL_UNIT_INVALID,
+};
+
+//! \}
+
+#endif // end of #ifndef  __COMMONDEF__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/ContextModel.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,129 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     ContextModel.cpp
+    \brief    context model class
+*/
+
+#include <algorithm>
+
+#include "ContextModel.h"
+
+using namespace std;
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/**
+ - initialize context model with respect to QP and initialization value
+ .
+ \param  qp         input QP value
+ \param  initValue  8 bit initialization value
+ */
+Void ContextModel::init( Int qp, Int initValue )
+{
+  qp = Clip3(0, 51, qp);
+
+  Int  slope      = (initValue>>4)*5 - 45;
+  Int  offset     = ((initValue&15)<<3)-16;
+  Int  initState  =  min( max( 1, ( ( ( slope * qp ) >> 4 ) + offset ) ), 126 );
+  UInt mpState    = (initState >= 64 );
+  m_ucState       = ( (mpState? (initState - 64):(63 - initState)) <<1) + mpState;
+}
+
+const UChar ContextModel::m_aucNextStateMPS[ ContextModel::m_totalStates ] =
+{
+  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+  18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+  34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+  50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+  66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+  82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+  98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+  114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 124, 125, 126, 127
+};
+
+const UChar ContextModel::m_aucNextStateLPS[ ContextModel::m_totalStates ] =
+{
+  1, 0, 0, 1, 2, 3, 4, 5, 4, 5, 8, 9, 8, 9, 10, 11,
+  12, 13, 14, 15, 16, 17, 18, 19, 18, 19, 22, 23, 22, 23, 24, 25,
+  26, 27, 26, 27, 30, 31, 30, 31, 32, 33, 32, 33, 36, 37, 36, 37,
+  38, 39, 38, 39, 42, 43, 42, 43, 44, 45, 44, 45, 46, 47, 48, 49,
+  48, 49, 50, 51, 52, 53, 52, 53, 54, 55, 54, 55, 56, 57, 58, 59,
+  58, 59, 60, 61, 60, 61, 60, 61, 62, 63, 64, 65, 64, 65, 66, 67,
+  66, 67, 66, 67, 68, 69, 68, 69, 70, 71, 70, 71, 70, 71, 72, 73,
+  72, 73, 72, 73, 74, 75, 74, 75, 74, 75, 76, 77, 76, 77, 126, 127
+};
+
+#if FAST_BIT_EST
+UChar ContextModel::m_nextState[ ContextModel::m_totalStates ][2 /*MPS = [0|1]*/];
+
+Void ContextModel::buildNextStateTable()
+{
+  for (Int i = 0; i < ContextModel::m_totalStates; i++)
+  {
+    for (Int j = 0; j < 2; j++)
+    {
+      m_nextState[i][j] = ((i&1) == j) ? m_aucNextStateMPS[i] : m_aucNextStateLPS[i];
+    }
+  }
+}
+#endif
+
+const Int ContextModel::m_entropyBits[ ContextModel::m_totalStates ] =
+{
+#if FAST_BIT_EST
+  // Corrected table, most notably for last state
+  0x07b23, 0x085f9, 0x074a0, 0x08cbc, 0x06ee4, 0x09354, 0x067f4, 0x09c1b, 0x060b0, 0x0a62a, 0x05a9c, 0x0af5b, 0x0548d, 0x0b955, 0x04f56, 0x0c2a9,
+  0x04a87, 0x0cbf7, 0x045d6, 0x0d5c3, 0x04144, 0x0e01b, 0x03d88, 0x0e937, 0x039e0, 0x0f2cd, 0x03663, 0x0fc9e, 0x03347, 0x10600, 0x03050, 0x10f95,
+  0x02d4d, 0x11a02, 0x02ad3, 0x12333, 0x0286e, 0x12cad, 0x02604, 0x136df, 0x02425, 0x13f48, 0x021f4, 0x149c4, 0x0203e, 0x1527b, 0x01e4d, 0x15d00,
+  0x01c99, 0x166de, 0x01b18, 0x17017, 0x019a5, 0x17988, 0x01841, 0x18327, 0x016df, 0x18d50, 0x015d9, 0x19547, 0x0147c, 0x1a083, 0x0138e, 0x1a8a3,
+  0x01251, 0x1b418, 0x01166, 0x1bd27, 0x01068, 0x1c77b, 0x00f7f, 0x1d18e, 0x00eda, 0x1d91a, 0x00e19, 0x1e254, 0x00d4f, 0x1ec9a, 0x00c90, 0x1f6e0,
+  0x00c01, 0x1fef8, 0x00b5f, 0x208b1, 0x00ab6, 0x21362, 0x00a15, 0x21e46, 0x00988, 0x2285d, 0x00934, 0x22ea8, 0x008a8, 0x239b2, 0x0081d, 0x24577,
+  0x007c9, 0x24ce6, 0x00763, 0x25663, 0x00710, 0x25e8f, 0x006a0, 0x26a26, 0x00672, 0x26f23, 0x005e8, 0x27ef8, 0x005ba, 0x284b5, 0x0055e, 0x29057,
+  0x0050c, 0x29bab, 0x004c1, 0x2a674, 0x004a7, 0x2aa5e, 0x0046f, 0x2b32f, 0x0041f, 0x2c0ad, 0x003e7, 0x2ca8d, 0x003ba, 0x2d323, 0x0010c, 0x3bfbb
+#else
+  0x08000, 0x08000, 0x076da, 0x089a0, 0x06e92, 0x09340, 0x0670a, 0x09cdf, 0x06029, 0x0a67f, 0x059dd, 0x0b01f, 0x05413, 0x0b9bf, 0x04ebf, 0x0c35f,
+  0x049d3, 0x0ccff, 0x04546, 0x0d69e, 0x0410d, 0x0e03e, 0x03d22, 0x0e9de, 0x0397d, 0x0f37e, 0x03619, 0x0fd1e, 0x032ee, 0x106be, 0x02ffa, 0x1105d,
+  0x02d37, 0x119fd, 0x02aa2, 0x1239d, 0x02836, 0x12d3d, 0x025f2, 0x136dd, 0x023d1, 0x1407c, 0x021d2, 0x14a1c, 0x01ff2, 0x153bc, 0x01e2f, 0x15d5c,
+  0x01c87, 0x166fc, 0x01af7, 0x1709b, 0x0197f, 0x17a3b, 0x0181d, 0x183db, 0x016d0, 0x18d7b, 0x01595, 0x1971b, 0x0146c, 0x1a0bb, 0x01354, 0x1aa5a,
+  0x0124c, 0x1b3fa, 0x01153, 0x1bd9a, 0x01067, 0x1c73a, 0x00f89, 0x1d0da, 0x00eb7, 0x1da79, 0x00df0, 0x1e419, 0x00d34, 0x1edb9, 0x00c82, 0x1f759,
+  0x00bda, 0x200f9, 0x00b3c, 0x20a99, 0x00aa5, 0x21438, 0x00a17, 0x21dd8, 0x00990, 0x22778, 0x00911, 0x23118, 0x00898, 0x23ab8, 0x00826, 0x24458,
+  0x007ba, 0x24df7, 0x00753, 0x25797, 0x006f2, 0x26137, 0x00696, 0x26ad7, 0x0063f, 0x27477, 0x005ed, 0x27e17, 0x0059f, 0x287b6, 0x00554, 0x29156,
+  0x0050e, 0x29af6, 0x004cc, 0x2a497, 0x0048d, 0x2ae35, 0x00451, 0x2b7d6, 0x00418, 0x2c176, 0x003e2, 0x2cb15, 0x003af, 0x2d4b5, 0x0037f, 0x2de55
+#endif
+};
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/ContextModel.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,108 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/** \file     ContextModel.h
+    \brief    context model class (header)
+*/
+
+#ifndef __CONTEXTMODEL__
+#define __CONTEXTMODEL__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "CommonDef.h"
+#include "TComRom.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// context model class
+class ContextModel
+{
+public:
+  ContextModel  ()                        { m_ucState = 0; m_binsCoded = 0; }
+  ~ContextModel ()                        {}
+
+  UChar getState  ()                { return ( m_ucState >> 1 ); }                    ///< get current state
+  UChar getMps    ()                { return ( m_ucState  & 1 ); }                    ///< get curret MPS
+  Void  setStateAndMps( UChar ucState, UChar ucMPS) { m_ucState = (ucState << 1) + ucMPS; } ///< set state and MPS
+
+  Void init ( Int qp, Int initValue );   ///< initialize state with initial probability
+
+  Void updateLPS ()
+  {
+    m_ucState = m_aucNextStateLPS[ m_ucState ];
+  }
+
+  Void updateMPS ()
+  {
+    m_ucState = m_aucNextStateMPS[ m_ucState ];
+  }
+
+  Int getEntropyBits(Short val) { return m_entropyBits[m_ucState ^ val]; }
+
+#if FAST_BIT_EST
+  Void update( Int binVal )
+  {
+    m_ucState = m_nextState[m_ucState][binVal];
+  }
+  static Void buildNextStateTable();
+  static Int getEntropyBitsTrm( Int val ) { return m_entropyBits[126 ^ val]; }
+#endif
+  Void setBinsCoded(UInt val)   { m_binsCoded = val;  }
+  UInt getBinsCoded()           { return m_binsCoded;   }
+
+private:
+  UChar         m_ucState;                                                                  ///< internal state variable
+
+  static const  UInt  m_totalStates = (1 << CONTEXT_STATE_BITS) * 2; //*2 for MPS = [0|1]
+  static const  UChar m_aucNextStateMPS[m_totalStates];
+  static const  UChar m_aucNextStateLPS[m_totalStates];
+  static const  Int   m_entropyBits    [m_totalStates];
+#if FAST_BIT_EST
+  static UChar m_nextState[m_totalStates][2 /*MPS = [0|1]*/];
+#endif
+  UInt          m_binsCoded;
+};
+
+//! \}
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/ContextModel3DBuffer.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,120 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     ContextModel3DBuffer.cpp
+    \brief    context model 3D buffer class
+*/
+
+#include "ContextModel3DBuffer.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / initialization / destroy
+// ====================================================================================================================
+
+ContextModel3DBuffer::ContextModel3DBuffer( UInt uiSizeZ, UInt uiSizeY, UInt uiSizeX, ContextModel *basePtr, Int &count )
+: m_sizeX  ( uiSizeX )
+, m_sizeXY ( uiSizeX * uiSizeY )
+, m_sizeXYZ( uiSizeX * uiSizeY * uiSizeZ )
+{
+  // allocate 3D buffer
+  m_contextModel = basePtr;
+  count += m_sizeXYZ;
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/**
+ * Initialize 3D buffer with respect to slicetype, QP and given initial probability table
+ *
+ * \param  eSliceType      slice type
+ * \param  iQp             input QP value
+ * \param  psCtxModel      given probability table
+ */
+Void ContextModel3DBuffer::initBuffer( SliceType sliceType, Int qp, UChar* ctxModel )
+{
+  ctxModel += sliceType * m_sizeXYZ;
+
+  for ( Int n = 0; n < m_sizeXYZ; n++ )
+  {
+    m_contextModel[ n ].init( qp, ctxModel[ n ] );
+    m_contextModel[ n ].setBinsCoded( 0 );
+  }
+}
+
+/**
+ * Calculate the cost of choosing a probability table based on the current probability of CABAC at encoder
+ *
+ * \param  sliceType      slice type
+ * \param  qp             input QP value
+ * \param  ctxModel      given probability table
+ */
+UInt ContextModel3DBuffer::calcCost( SliceType sliceType, Int qp, UChar* ctxModel )
+{
+  UInt cost = 0;
+  ctxModel += sliceType * m_sizeXYZ;
+
+  for ( Int n = 0; n < m_sizeXYZ; n++ )
+  {
+    ContextModel tmpContextModel;
+    tmpContextModel.init( qp, ctxModel[ n ] );
+
+    // Map the 64 CABAC states to their corresponding probability values
+    static Double aStateToProbLPS[] = {0.50000000, 0.47460857, 0.45050660, 0.42762859, 0.40591239, 0.38529900, 0.36573242, 0.34715948, 0.32952974, 0.31279528, 0.29691064, 0.28183267, 0.26752040, 0.25393496, 0.24103941, 0.22879875, 0.21717969, 0.20615069, 0.19568177, 0.18574449, 0.17631186, 0.16735824, 0.15885931, 0.15079198, 0.14313433, 0.13586556, 0.12896592, 0.12241667, 0.11620000, 0.11029903, 0.10469773, 0.09938088, 0.09433404, 0.08954349, 0.08499621, 0.08067986, 0.07658271, 0.07269362, 0.06900203, 0.06549791, 0.06217174, 0.05901448, 0.05601756, 0.05317283, 0.05047256, 0.04790942, 0.04547644, 0.04316702, 0.04097487, 0.03889405, 0.03691890, 0.03504406, 0.03326442, 0.03157516, 0.02997168, 0.02844963, 0.02700488, 0.02563349, 0.02433175, 0.02309612, 0.02192323, 0.02080991, 0.01975312, 0.01875000};
+
+    Double probLPS          = aStateToProbLPS[ m_contextModel[ n ].getState() ];
+    Double prob0, prob1;
+    if (m_contextModel[ n ].getMps()==1)
+    {
+      prob0 = probLPS;
+      prob1 = 1.0-prob0;
+    }
+    else
+    {
+      prob1 = probLPS;
+      prob0 = 1.0-prob1;
+    }
+
+    if (m_contextModel[ n ].getBinsCoded()>0)
+    {
+      cost += (UInt) (prob0 * tmpContextModel.getEntropyBits( 0 ) + prob1 * tmpContextModel.getEntropyBits( 1 ));
+    }
+  }
+
+  return cost;
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/ContextModel3DBuffer.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,98 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     ContextModel3DBuffer.h
+    \brief    context model 3D buffer class (header)
+*/
+
+#ifndef __CONTEXTMODEL3DBUFFER__
+#define __CONTEXTMODEL3DBUFFER__
+
+#include <stdio.h>
+#include <assert.h>
+#include <memory.h>
+
+#include "CommonDef.h"
+#include "ContextModel.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// context model 3D buffer class
+class ContextModel3DBuffer
+{
+protected:
+  ContextModel* m_contextModel; ///< array of context models
+  const UInt    m_sizeX;        ///< X size of 3D buffer
+  const UInt    m_sizeXY;       ///< X times Y size of 3D buffer
+  const UInt    m_sizeXYZ;      ///< total size of 3D buffer
+
+public:
+  ContextModel3DBuffer  ( UInt uiSizeZ, UInt uiSizeY, UInt uiSizeX, ContextModel *basePtr, Int &count );
+  ~ContextModel3DBuffer () {}
+
+  // access functions
+  ContextModel& get( UInt uiZ, UInt uiY, UInt uiX )
+  {
+    return  m_contextModel[ uiZ * m_sizeXY + uiY * m_sizeX + uiX ];
+  }
+  ContextModel* get( UInt uiZ, UInt uiY )
+  {
+    return &m_contextModel[ uiZ * m_sizeXY + uiY * m_sizeX ];
+  }
+  ContextModel* get( UInt uiZ )
+  {
+    return &m_contextModel[ uiZ * m_sizeXY ];
+  }
+
+  // initialization & copy functions
+  Void initBuffer( SliceType eSliceType, Int iQp, UChar* ctxModel );          ///< initialize 3D buffer by slice type & QP
+
+  UInt calcCost( SliceType sliceType, Int qp, UChar* ctxModel );      ///< determine cost of choosing a probability table based on current probabilities
+  /** copy from another buffer
+   * \param src buffer to copy from
+   */
+  Void copyFrom( const ContextModel3DBuffer* src )
+  {
+    assert( m_sizeXYZ == src->m_sizeXYZ );
+    ::memcpy( m_contextModel, src->m_contextModel, sizeof(ContextModel) * m_sizeXYZ );
+  }
+};
+
+//! \}
+
+#endif // _HM_CONTEXT_MODEL_3DBUFFER_H_
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/ContextTables.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,502 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     ContextTables.h
+    \brief    Defines constants and tables for SBAC
+    \todo     number of context models is not matched to actual use, should be fixed
+*/
+
+#ifndef __CONTEXTTABLES__
+#define __CONTEXTTABLES__
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+#define MAX_NUM_CTX_MOD             512       ///< maximum number of supported contexts
+
+#define NUM_SPLIT_FLAG_CTX            3       ///< number of context models for split flag
+#define NUM_SKIP_FLAG_CTX             3       ///< number of context models for skip flag
+
+#define NUM_MERGE_FLAG_EXT_CTX        1       ///< number of context models for merge flag of merge extended
+#define NUM_MERGE_IDX_EXT_CTX         1       ///< number of context models for merge index of merge extended
+
+#define NUM_PART_SIZE_CTX             4       ///< number of context models for partition size
+#define NUM_PRED_MODE_CTX             1       ///< number of context models for prediction mode
+
+#define NUM_ADI_CTX                   1       ///< number of context models for intra prediction
+
+#define NUM_CHROMA_PRED_CTX           2       ///< number of context models for intra prediction (chroma)
+#define NUM_INTER_DIR_CTX             5       ///< number of context models for inter prediction direction
+#define NUM_MV_RES_CTX                2       ///< number of context models for motion vector difference
+#define NUM_CHROMA_QP_ADJ_FLAG_CTX    1       ///< number of context models for chroma_qp_adjustment_flag
+#define NUM_CHROMA_QP_ADJ_IDC_CTX     1       ///< number of context models for chroma_qp_adjustment_idc
+
+#define NUM_REF_NO_CTX                2       ///< number of context models for reference index
+#define NUM_TRANS_SUBDIV_FLAG_CTX     3       ///< number of context models for transform subdivision flags
+#define NUM_QT_ROOT_CBF_CTX           1       ///< number of context models for QT ROOT CBF
+#define NUM_DELTA_QP_CTX              3       ///< number of context models for dQP
+
+#define NUM_SIG_CG_FLAG_CTX           2       ///< number of context models for MULTI_LEVEL_SIGNIFICANCE
+#define NUM_EXPLICIT_RDPCM_FLAG_CTX   1       ///< number of context models for the flag which specifies whether to use RDPCM on inter coded residues
+#define NUM_EXPLICIT_RDPCM_DIR_CTX    1       ///< number of context models for the flag which specifies which RDPCM direction is used on inter coded residues
+
+//--------------------------------------------------------------------------------------------------
+
+// context size definitions for significance map
+
+#define NUM_SIG_FLAG_CTX_LUMA        28       ///< number of context models for luma sig flag
+#define NUM_SIG_FLAG_CTX_CHROMA      16       ///< number of context models for chroma sig flag
+
+//                                                                                                           |----Luma-----|  |---Chroma----|
+static const UInt significanceMapContextSetStart         [MAX_NUM_CHANNEL_TYPE][CONTEXT_NUMBER_OF_TYPES] = { {0,  9, 21, 27}, {0,  9, 12, 15} };
+static const UInt significanceMapContextSetSize          [MAX_NUM_CHANNEL_TYPE][CONTEXT_NUMBER_OF_TYPES] = { {9, 12,  6,  1}, {9,  3,  3,  1} };
+static const UInt nonDiagonalScan8x8ContextOffset        [MAX_NUM_CHANNEL_TYPE]                          = {  6,               0              };
+static const UInt notFirstGroupNeighbourhoodContextOffset[MAX_NUM_CHANNEL_TYPE]                          = {  3,               0              };
+
+//------------------
+
+#define NEIGHBOURHOOD_00_CONTEXT_1_THRESHOLD_4x4  3
+#define NEIGHBOURHOOD_00_CONTEXT_2_THRESHOLD_4x4  1
+
+//------------------
+
+#define FIRST_SIG_FLAG_CTX_LUMA                   0
+#define FIRST_SIG_FLAG_CTX_CHROMA     (FIRST_SIG_FLAG_CTX_LUMA + NUM_SIG_FLAG_CTX_LUMA)
+
+#define NUM_SIG_FLAG_CTX              (NUM_SIG_FLAG_CTX_LUMA + NUM_SIG_FLAG_CTX_CHROMA)       ///< number of context models for sig flag
+
+//--------------------------------------------------------------------------------------------------
+
+// context size definitions for last significant coefficient position
+
+#define NUM_CTX_LAST_FLAG_SETS         2
+
+#define NUM_CTX_LAST_FLAG_XY          15      ///< number of context models for last coefficient position
+
+//--------------------------------------------------------------------------------------------------
+
+// context size definitions for greater-than-one and greater-than-two maps
+
+#define NUM_ONE_FLAG_CTX_PER_SET       4      ///< number of context models for greater than 1 flag in a set
+#define NUM_ABS_FLAG_CTX_PER_SET       1      ///< number of context models for greater than 2 flag in a set
+
+//------------------
+
+#define NUM_CTX_SETS_LUMA              4      ///< number of context model sets for luminance
+#define NUM_CTX_SETS_CHROMA            2      ///< number of context model sets for combined chrominance
+
+#define FIRST_CTX_SET_LUMA             0      ///< index of first luminance context set
+
+//------------------
+
+#define NUM_ONE_FLAG_CTX_LUMA         (NUM_ONE_FLAG_CTX_PER_SET * NUM_CTX_SETS_LUMA)           ///< number of context models for greater than 1 flag of luma
+#define NUM_ONE_FLAG_CTX_CHROMA       (NUM_ONE_FLAG_CTX_PER_SET * NUM_CTX_SETS_CHROMA)         ///< number of context models for greater than 1 flag of chroma
+
+#define NUM_ABS_FLAG_CTX_LUMA         (NUM_ABS_FLAG_CTX_PER_SET * NUM_CTX_SETS_LUMA)           ///< number of context models for greater than 2 flag of luma
+#define NUM_ABS_FLAG_CTX_CHROMA       (NUM_ABS_FLAG_CTX_PER_SET * NUM_CTX_SETS_CHROMA)         ///< number of context models for greater than 2 flag of chroma
+
+#define NUM_ONE_FLAG_CTX              (NUM_ONE_FLAG_CTX_LUMA + NUM_ONE_FLAG_CTX_CHROMA)        ///< number of context models for greater than 1 flag
+#define NUM_ABS_FLAG_CTX              (NUM_ABS_FLAG_CTX_LUMA + NUM_ABS_FLAG_CTX_CHROMA)        ///< number of context models for greater than 2 flag
+
+#define FIRST_CTX_SET_CHROMA          (FIRST_CTX_SET_LUMA + NUM_CTX_SETS_LUMA)                 ///< index of first chrominance context set
+
+//--------------------------------------------------------------------------------------------------
+
+// context size definitions for CBF
+
+#define NUM_QT_CBF_CTX_SETS           2
+
+#define NUM_QT_CBF_CTX_PER_SET        5       ///< number of context models for QT CBF
+
+#define FIRST_CBF_CTX_LUMA            0       ///< index of first luminance CBF context
+
+#define FIRST_CBF_CTX_CHROMA          (FIRST_CBF_CTX_LUMA + NUM_QT_CBF_CTX_PER_SET)  ///< index of first chrominance CBF context
+
+
+//--------------------------------------------------------------------------------------------------
+
+#define NUM_MVP_IDX_CTX               1       ///< number of context models for MVP index
+
+#define NUM_SAO_MERGE_FLAG_CTX        1       ///< number of context models for SAO merge flags
+#define NUM_SAO_TYPE_IDX_CTX          1       ///< number of context models for SAO type index
+
+#define NUM_TRANSFORMSKIP_FLAG_CTX    1       ///< number of context models for transform skipping
+
+#define NUM_CU_TRANSQUANT_BYPASS_FLAG_CTX  1
+
+#define NUM_CROSS_COMPONENT_PREDICTION_CTX 10
+
+#define CNU                          154      ///< dummy initialization value for unused context models 'Context model Not Used'
+
+
+// ====================================================================================================================
+// Tables
+// ====================================================================================================================
+
+// initial probability for cu_transquant_bypass flag
+static const UChar
+INIT_CU_TRANSQUANT_BYPASS_FLAG[NUMBER_OF_SLICE_TYPES][NUM_CU_TRANSQUANT_BYPASS_FLAG_CTX] =
+{
+  { 154 },
+  { 154 },
+  { 154 },
+};
+
+// initial probability for split flag
+static const UChar
+INIT_SPLIT_FLAG[NUMBER_OF_SLICE_TYPES][NUM_SPLIT_FLAG_CTX] =
+{
+  { 107,  139,  126, },
+  { 107,  139,  126, },
+  { 139,  141,  157, },
+};
+
+static const UChar
+INIT_SKIP_FLAG[NUMBER_OF_SLICE_TYPES][NUM_SKIP_FLAG_CTX] =
+{
+  { 197,  185,  201, },
+  { 197,  185,  201, },
+  { CNU,  CNU,  CNU, },
+};
+
+static const UChar
+INIT_MERGE_FLAG_EXT[NUMBER_OF_SLICE_TYPES][NUM_MERGE_FLAG_EXT_CTX] =
+{
+  { 154, },
+  { 110, },
+  { CNU, },
+};
+
+static const UChar
+INIT_MERGE_IDX_EXT[NUMBER_OF_SLICE_TYPES][NUM_MERGE_IDX_EXT_CTX] =
+{
+  { 137, },
+  { 122, },
+  { CNU, },
+};
+
+static const UChar
+INIT_PART_SIZE[NUMBER_OF_SLICE_TYPES][NUM_PART_SIZE_CTX] =
+{
+  { 154,  139,  154, 154 },
+  { 154,  139,  154, 154 },
+  { 184,  CNU,  CNU, CNU },
+};
+
+static const UChar
+INIT_PRED_MODE[NUMBER_OF_SLICE_TYPES][NUM_PRED_MODE_CTX] =
+{
+  { 134, },
+  { 149, },
+  { CNU, },
+};
+
+static const UChar
+INIT_INTRA_PRED_MODE[NUMBER_OF_SLICE_TYPES][NUM_ADI_CTX] =
+{
+  { 183, },
+  { 154, },
+  { 184, },
+};
+
+static const UChar
+INIT_CHROMA_PRED_MODE[NUMBER_OF_SLICE_TYPES][NUM_CHROMA_PRED_CTX] =
+{
+  { 152,  139, },
+  { 152,  139, },
+  {  63,  139, },
+};
+
+static const UChar
+INIT_INTER_DIR[NUMBER_OF_SLICE_TYPES][NUM_INTER_DIR_CTX] =
+{
+  {  95,   79,   63,   31,  31, },
+  {  95,   79,   63,   31,  31, },
+  { CNU,  CNU,  CNU,  CNU, CNU, },
+};
+
+static const UChar
+INIT_MVD[NUMBER_OF_SLICE_TYPES][NUM_MV_RES_CTX] =
+{
+  { 169,  198, },
+  { 140,  198, },
+  { CNU,  CNU, },
+};
+
+static const UChar
+INIT_REF_PIC[NUMBER_OF_SLICE_TYPES][NUM_REF_NO_CTX] =
+{
+  { 153,  153 },
+  { 153,  153 },
+  { CNU,  CNU },
+};
+
+static const UChar
+INIT_DQP[NUMBER_OF_SLICE_TYPES][NUM_DELTA_QP_CTX] =
+{
+  { 154,  154,  154, },
+  { 154,  154,  154, },
+  { 154,  154,  154, },
+};
+
+static const UChar
+INIT_CHROMA_QP_ADJ_FLAG[NUMBER_OF_SLICE_TYPES][NUM_CHROMA_QP_ADJ_FLAG_CTX] =
+{
+  { 154, },
+  { 154, },
+  { 154, },
+};
+
+static const UChar
+INIT_CHROMA_QP_ADJ_IDC[NUMBER_OF_SLICE_TYPES][NUM_CHROMA_QP_ADJ_IDC_CTX] =
+{
+  { 154, },
+  { 154, },
+  { 154, },
+};
+
+//--------------------------------------------------------------------------------------------------
+
+//Initialisation for CBF
+
+//                                 |---------Luminance---------|
+#define BSLICE_LUMA_CBF_CONTEXT     153,  111,  CNU,  CNU,  CNU
+#define PSLICE_LUMA_CBF_CONTEXT     153,  111,  CNU,  CNU,  CNU
+#define ISLICE_LUMA_CBF_CONTEXT     111,  141,  CNU,  CNU,  CNU
+//                                 |--------Chrominance--------|
+#define BSLICE_CHROMA_CBF_CONTEXT   149,   92,  167,  154,  154
+#define PSLICE_CHROMA_CBF_CONTEXT   149,  107,  167,  154,  154
+#define ISLICE_CHROMA_CBF_CONTEXT    94,  138,  182,  154,  154
+
+
+static const UChar
+INIT_QT_CBF[NUMBER_OF_SLICE_TYPES][NUM_QT_CBF_CTX_SETS * NUM_QT_CBF_CTX_PER_SET] =
+{
+  { BSLICE_LUMA_CBF_CONTEXT, BSLICE_CHROMA_CBF_CONTEXT },
+  { PSLICE_LUMA_CBF_CONTEXT, PSLICE_CHROMA_CBF_CONTEXT },
+  { ISLICE_LUMA_CBF_CONTEXT, ISLICE_CHROMA_CBF_CONTEXT },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+static const UChar
+INIT_QT_ROOT_CBF[NUMBER_OF_SLICE_TYPES][NUM_QT_ROOT_CBF_CTX] =
+{
+  {  79, },
+  {  79, },
+  { CNU, },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+//Initialisation for last-significant-position
+
+//                                           |------------------------------Luminance----------------------------------|
+#define BSLICE_LUMA_LAST_POSITION_CONTEXT     125, 110, 124, 110,  95,  94, 125, 111, 111,  79, 125, 126, 111, 111,  79
+#define PSLICE_LUMA_LAST_POSITION_CONTEXT     125, 110,  94, 110,  95,  79, 125, 111, 110,  78, 110, 111, 111,  95,  94
+#define ISLICE_LUMA_LAST_POSITION_CONTEXT     110, 110, 124, 125, 140, 153, 125, 127, 140, 109, 111, 143, 127, 111,  79
+//                                           |------------------------------Chrominance--------------------------------|
+#define BSLICE_CHROMA_LAST_POSITION_CONTEXT   108, 123,  93, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU
+#define PSLICE_CHROMA_LAST_POSITION_CONTEXT   108, 123, 108, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU
+#define ISLICE_CHROMA_LAST_POSITION_CONTEXT   108, 123,  63, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU, CNU
+
+
+static const UChar
+INIT_LAST[NUMBER_OF_SLICE_TYPES][NUM_CTX_LAST_FLAG_SETS * NUM_CTX_LAST_FLAG_XY] =
+{
+  { BSLICE_LUMA_LAST_POSITION_CONTEXT, BSLICE_CHROMA_LAST_POSITION_CONTEXT },
+  { PSLICE_LUMA_LAST_POSITION_CONTEXT, PSLICE_CHROMA_LAST_POSITION_CONTEXT },
+  { ISLICE_LUMA_LAST_POSITION_CONTEXT, ISLICE_CHROMA_LAST_POSITION_CONTEXT },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+static const UChar
+INIT_SIG_CG_FLAG[NUMBER_OF_SLICE_TYPES][2 * NUM_SIG_CG_FLAG_CTX] =
+{
+  { 121,  140,
+    61,  154,
+  },
+  { 121,  140,
+    61,  154,
+  },
+  {  91,  171,
+    134,  141,
+  },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+//Initialisation for significance map
+
+//                                          |-DC-|  |-----------------4x4------------------|  |------8x8 Diagonal Scan------|  |----8x8 Non-Diagonal Scan----|  |-NxN First group-|  |-NxN Other group-| |-Single context-|
+//                                          |    |  |                                      |  |-First Group-| |-Other Group-|  |-First Group-| |-Other Group-|  |                 |  |                 | |                |
+#define BSLICE_LUMA_SIGNIFICANCE_CONTEXT     170,    154, 139, 153, 139, 123, 123,  63, 124,   166, 183, 140,  136, 153, 154,   166, 183, 140,  136, 153, 154,   166,   183,   140,   136,   153,   154,        140
+#define PSLICE_LUMA_SIGNIFICANCE_CONTEXT     155,    154, 139, 153, 139, 123, 123,  63, 153,   166, 183, 140,  136, 153, 154,   166, 183, 140,  136, 153, 154,   166,   183,   140,   136,   153,   154,        140
+#define ISLICE_LUMA_SIGNIFICANCE_CONTEXT     111,    111, 125, 110, 110,  94, 124, 108, 124,   107, 125, 141,  179, 153, 125,   107, 125, 141,  179, 153, 125,   107,   125,   141,   179,   153,   125,        141
+
+//                                          |-DC-|  |-----------------4x4------------------|  |-8x8 Any group-|  |-NxN Any group-| |-Single context-|
+#define BSLICE_CHROMA_SIGNIFICANCE_CONTEXT   170,    153, 138, 138, 122, 121, 122, 121, 167,   151,  183,  140,   151,  183,  140,        140
+#define PSLICE_CHROMA_SIGNIFICANCE_CONTEXT   170,    153, 123, 123, 107, 121, 107, 121, 167,   151,  183,  140,   151,  183,  140,        140
+#define ISLICE_CHROMA_SIGNIFICANCE_CONTEXT   140,    139, 182, 182, 152, 136, 152, 136, 153,   136,  139,  111,   136,  139,  111,        111
+
+//------------------------------------------------
+
+static const UChar
+INIT_SIG_FLAG[NUMBER_OF_SLICE_TYPES][NUM_SIG_FLAG_CTX] =
+{
+  { BSLICE_LUMA_SIGNIFICANCE_CONTEXT, BSLICE_CHROMA_SIGNIFICANCE_CONTEXT },
+  { PSLICE_LUMA_SIGNIFICANCE_CONTEXT, PSLICE_CHROMA_SIGNIFICANCE_CONTEXT },
+  { ISLICE_LUMA_SIGNIFICANCE_CONTEXT, ISLICE_CHROMA_SIGNIFICANCE_CONTEXT },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+//Initialisation for greater-than-one flags and greater-than-two flags
+
+//                                 |------Set 0-------| |------Set 1-------| |------Set 2-------| |------Set 3-------|
+#define BSLICE_LUMA_ONE_CONTEXT     154, 196, 167, 167,  154, 152, 167, 182,  182, 134, 149, 136,  153, 121, 136, 122
+#define PSLICE_LUMA_ONE_CONTEXT     154, 196, 196, 167,  154, 152, 167, 182,  182, 134, 149, 136,  153, 121, 136, 137
+#define ISLICE_LUMA_ONE_CONTEXT     140,  92, 137, 138,  140, 152, 138, 139,  153,  74, 149,  92,  139, 107, 122, 152
+
+#define BSLICE_LUMA_ABS_CONTEXT     107,                 167,                  91,                 107
+#define PSLICE_LUMA_ABS_CONTEXT     107,                 167,                  91,                 122
+#define ISLICE_LUMA_ABS_CONTEXT     138,                 153,                 136,                 167
+
+//                                 |------Set 4-------| |------Set 5-------|
+#define BSLICE_CHROMA_ONE_CONTEXT   169, 208, 166, 167,  154, 152, 167, 182
+#define PSLICE_CHROMA_ONE_CONTEXT   169, 194, 166, 167,  154, 167, 137, 182
+#define ISLICE_CHROMA_ONE_CONTEXT   140, 179, 166, 182,  140, 227, 122, 197
+
+#define BSLICE_CHROMA_ABS_CONTEXT   107,                 167
+#define PSLICE_CHROMA_ABS_CONTEXT   107,                 167
+#define ISLICE_CHROMA_ABS_CONTEXT   152,                 152
+
+
+//------------------------------------------------
+
+static const UChar
+INIT_ONE_FLAG[NUMBER_OF_SLICE_TYPES][NUM_ONE_FLAG_CTX] =
+{
+  { BSLICE_LUMA_ONE_CONTEXT, BSLICE_CHROMA_ONE_CONTEXT },
+  { PSLICE_LUMA_ONE_CONTEXT, PSLICE_CHROMA_ONE_CONTEXT },
+  { ISLICE_LUMA_ONE_CONTEXT, ISLICE_CHROMA_ONE_CONTEXT },
+};
+
+static const UChar
+INIT_ABS_FLAG[NUMBER_OF_SLICE_TYPES][NUM_ABS_FLAG_CTX] =
+{
+  { BSLICE_LUMA_ABS_CONTEXT, BSLICE_CHROMA_ABS_CONTEXT },
+  { PSLICE_LUMA_ABS_CONTEXT, PSLICE_CHROMA_ABS_CONTEXT },
+  { ISLICE_LUMA_ABS_CONTEXT, ISLICE_CHROMA_ABS_CONTEXT },
+};
+
+
+//--------------------------------------------------------------------------------------------------
+
+static const UChar
+INIT_MVP_IDX[NUMBER_OF_SLICE_TYPES][NUM_MVP_IDX_CTX] =
+{
+  { 168, },
+  { 168, },
+  { CNU, },
+};
+
+static const UChar
+INIT_SAO_MERGE_FLAG[NUMBER_OF_SLICE_TYPES][NUM_SAO_MERGE_FLAG_CTX] =
+{
+  { 153,  },
+  { 153,  },
+  { 153,  },
+};
+
+static const UChar
+INIT_SAO_TYPE_IDX[NUMBER_OF_SLICE_TYPES][NUM_SAO_TYPE_IDX_CTX] =
+{
+  { 160, },
+  { 185, },
+  { 200, },
+};
+
+static const UChar
+INIT_TRANS_SUBDIV_FLAG[NUMBER_OF_SLICE_TYPES][NUM_TRANS_SUBDIV_FLAG_CTX] =
+{
+  { 224,  167,  122, },
+  { 124,  138,   94, },
+  { 153,  138,  138, },
+};
+
+static const UChar
+INIT_TRANSFORMSKIP_FLAG[NUMBER_OF_SLICE_TYPES][2*NUM_TRANSFORMSKIP_FLAG_CTX] =
+{
+  { 139,  139},
+  { 139,  139},
+  { 139,  139},
+};
+
+static const UChar
+INIT_EXPLICIT_RDPCM_FLAG[NUMBER_OF_SLICE_TYPES][2*NUM_EXPLICIT_RDPCM_FLAG_CTX] =
+{
+  {139, 139},
+  {139, 139},
+  {CNU, CNU}
+};
+
+static const UChar
+INIT_EXPLICIT_RDPCM_DIR[NUMBER_OF_SLICE_TYPES][2*NUM_EXPLICIT_RDPCM_DIR_CTX] =
+{
+  {139, 139},
+  {139, 139},
+  {CNU, CNU}
+};
+
+static const UChar
+INIT_CROSS_COMPONENT_PREDICTION[NUMBER_OF_SLICE_TYPES][NUM_CROSS_COMPONENT_PREDICTION_CTX] =
+{
+  { 154, 154, 154, 154, 154, 154, 154, 154, 154, 154 },
+  { 154, 154, 154, 154, 154, 154, 154, 154, 154, 154 },
+  { 154, 154, 154, 154, 154, 154, 154, 154, 154, 154 },
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/Debug.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,449 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     Debug.cpp
+    \brief    Defines types and objects for environment-variable-based debugging and feature control
+*/
+
+#include "Debug.h"
+#include <algorithm>
+#include <math.h>
+#include "TComDataCU.h"
+#include "TComPic.h"
+#include "TComYuv.h"
+
+static const UInt settingNameWidth  = 66;
+static const UInt settingHelpWidth  = 84;
+static const UInt settingValueWidth = 3;
+
+#ifdef DEBUG_STRING
+// these strings are used to reorder the debug output so that the encoder and decoder match.
+const Char *debug_reorder_data_inter_token[MAX_NUM_COMPONENT+1]
+ = {"Start of channel 0 inter debug\n", "Start of channel 1 inter debug\n", "Start of channel 2 inter debug\n", "End of inter residual debug\n"} ;
+const Char *partSizeToString[NUMBER_OF_PART_SIZES]={"2Nx2N(0)", "2NxN(1)", "Nx2N(2)", "NxN(3)", "2Nx(N/2+3N/2)(4)", "2Nx(3N/2+N/2)(5)", "(N/2+3N/2)x2N(6)", "(3N/2+N/2)x2N(7)"};
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+//EnvVar definition
+
+std::list<std::pair<std::string, std::string> > &EnvVar::getEnvVarList()
+{
+  static std::list<std::pair<std::string, std::string> > varInfoList;
+  return varInfoList;
+}
+
+std::list<EnvVar*> &EnvVar::getEnvVarInUse()
+{
+  static std::list<EnvVar*> varInUseList;
+  return varInUseList;
+}
+
+static inline Void printPair(const std::pair<std::string, std::string> &p)
+{
+  if (p.second=="")
+  {
+    std::cout << "\n" << std::setw(settingNameWidth) << p.first << "\n" << std::endl;
+  }
+  else
+  {
+    std::cout << std::setw(settingNameWidth) << p.first << ":   " << p.second << "\n" << std::endl;
+  }
+}
+
+static inline Void printVal(const EnvVar* env)
+{
+  std::cout << std::setw(settingNameWidth) << env->getName() << " = " << std::setw(settingValueWidth) << env->getInt() << " (string = " << std::setw(15) << env->getString() << ")" << std::endl;
+}
+
+//static inline Bool sameEnvName( const std::pair<std::string, std::string> &a,
+//                                const std::pair<std::string, std::string> &b )
+//{
+//  // only check env name
+//  return (a.first==b.first);
+//}
+
+Void EnvVar::printEnvVar()
+{
+//  getEnvVarList().unique(sameEnvName);
+  if (getEnvVarList().size()!=0)
+  {
+    std::cout << "--- Environment variables:\n" << std::endl;
+    for_each(getEnvVarList().begin(), getEnvVarList().end(), printPair);
+  }
+  std::cout << std::endl;
+}
+
+Void EnvVar::printEnvVarInUse()
+{
+  if (getEnvVarInUse().size()!=0)
+  {
+    std::cout << "RExt Environment variables set as follows: \n" << std::endl;
+    for_each(getEnvVarInUse().begin(), getEnvVarInUse().end(), printVal);
+  }
+  std::cout << std::endl;
+}
+
+EnvVar::EnvVar(const std::string &sName, const std::string &sDefault, const std::string &sHelp) :
+                                                                m_sName(sName),
+                                                                m_sHelp(sHelp),
+                                                                m_sVal(),
+                                                                m_dVal(0),
+                                                                m_iVal(0),
+                                                                m_bSet(false)
+{
+  if (getenv(m_sName.c_str()))
+  {
+    m_sVal = getenv(m_sName.c_str());
+    m_bSet = true;
+    getEnvVarInUse().push_back(this);
+  }
+  else m_sVal = sDefault;
+
+  m_dVal = strtod(m_sVal.c_str(), 0);
+  m_iVal = Int(m_dVal);
+
+  getEnvVarList().push_back( std::pair<std::string, std::string>(m_sName, indentNewLines(lineWrap(splitOnSettings(m_sHelp), settingHelpWidth), (settingNameWidth + 4))) );
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+// Debug environment variables:
+
+EnvVar Debug("-- Debugging","","");
+
+EnvVar DebugOptionList::DebugSBAC             ("DEBUG_SBAC",        "0", "Output debug data from SBAC entropy coder (coefficient data etc.)"                              );
+EnvVar DebugOptionList::DebugRQT              ("DEBUG_RQT",         "0", "Output RQT debug data from entropy coder"                                                       );
+EnvVar DebugOptionList::DebugPred             ("DEBUG_PRED",        "0", "Output prediction debug"                                                                        );
+EnvVar DebugOptionList::ForceLumaMode         ("FORCE_LUMA_MODE",   "0", "Force a particular intra direction for Luma (0-34)"                                             );
+EnvVar DebugOptionList::ForceChromaMode       ("FORCE_CHROMA_MODE", "0", "Force a particular intra direction for chroma (0-5)"                                            );
+
+#ifdef DEBUG_STRING
+EnvVar DebugOptionList::DebugString_Structure ("DEBUG_STRUCTURE",   "0", "Produce output on chosen structure                        bit0=intra, bit1=inter");
+EnvVar DebugOptionList::DebugString_Pred      ("DEBUG_PRED",        "0", "Produce output on prediction data.                        bit0=intra, bit1=inter");
+EnvVar DebugOptionList::DebugString_Resi      ("DEBUG_RESI",        "0", "Produce output on residual data.                          bit0=intra, bit1=inter");
+EnvVar DebugOptionList::DebugString_Reco      ("DEBUG_RECO",        "0", "Produce output on reconstructed data.                     bit0=intra, bit1=inter");
+EnvVar DebugOptionList::DebugString_InvTran   ("DEBUG_INV_QT",      "0", "Produce output on inverse-quantiser and transform stages. bit0=intra, bit1=inter");
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+//macro value printing function
+
+Void printMacroSettings()
+{
+  std::cout << "Non-environment-variable-controlled macros set as follows: \n" << std::endl;
+
+  //------------------------------------------------
+
+  //setting macros
+
+  PRINT_CONSTANT(RExt__DECODER_DEBUG_BIT_STATISTICS,                                settingNameWidth, settingValueWidth);
+  PRINT_CONSTANT(RExt__HIGH_BIT_DEPTH_SUPPORT,                                      settingNameWidth, settingValueWidth);
+  PRINT_CONSTANT(RExt__HIGH_PRECISION_FORWARD_TRANSFORM,                            settingNameWidth, settingValueWidth);
+
+  PRINT_CONSTANT(O0043_BEST_EFFORT_DECODING,                                        settingNameWidth, settingValueWidth);
+
+  PRINT_CONSTANT(RD_TEST_SAO_DISABLE_AT_PICTURE_LEVEL,                              settingNameWidth, settingValueWidth);
+
+  //------------------------------------------------
+
+  std::cout << std::endl;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+//Debugging
+
+UInt  g_debugCounter  = 0;
+Bool  g_printDebug    = false;
+Void* g_debugAddr     = NULL;
+
+#ifdef DEBUG_ENCODER_SEARCH_BINS
+const UInt debugEncoderSearchBinTargetLine = 0;
+const UInt debugEncoderSearchBinWindow     = 1000000;
+#endif
+
+#ifdef DEBUG_CABAC_BINS
+const UInt debugCabacBinTargetLine = 0;
+const UInt debugCabacBinWindow     = 1000000;
+#endif
+
+Void printSBACCoeffData(  const UInt          lastX,
+                          const UInt          lastY,
+                          const UInt          width,
+                          const UInt          height,
+                          const UInt          chan,
+                          const UInt          absPart,
+                          const UInt          scanIdx,
+                          const TCoeff *const pCoeff,
+                          const Bool          finalEncode
+                        )
+{
+  if (DebugOptionList::DebugSBAC.getInt()!=0 && finalEncode)
+  {
+    std::cout << "Size: " << width << "x" << height << ", Last X/Y: (" << lastX << ", " << lastY << "), absPartIdx: " << absPart << ", scanIdx: " << scanIdx << ", chan: " << chan << std::endl;
+    for (Int i=0; i<width*height; i++)
+    {
+      std::cout << std::setw(3) << pCoeff[i];// + dcVal;
+      if (i%width == width-1) std::cout << std::endl;
+      else                    std::cout << ",";
+    }
+    std::cout << std::endl;
+  }
+}
+
+Void printCbfArray( TComDataCU* pcCU  )
+{
+  const UInt CUSizeInParts = pcCU->getWidth(0)/4;
+  const UInt numValidComp=pcCU->getPic()->getNumberValidComponents();
+  for (UInt ch=0; ch<numValidComp; ch++)
+  {
+    const ComponentID compID=ComponentID(ch);
+    printf("channel: %d\n", ch);
+    for (Int y=0; y<CUSizeInParts; y++)
+    {
+      for (Int x=0; x<CUSizeInParts; x++)
+      {
+        printf(x+1==CUSizeInParts?"%3d\n":"%3d, ", pcCU->getCbf(compID)[g_auiRasterToZscan[y*CUSizeInParts + x]]);
+      }
+    }
+  }
+}
+
+UInt getDecimalWidth(const Double value)
+{
+  return (value == 0) ? 1 : (UInt(floor(log10(fabs(value)))) + ((value < 0) ? 2 : 1));
+                                                               //for the minus sign
+}
+
+UInt getZScanIndex(const UInt x, const UInt y)
+{
+  UInt remainingX = x;
+  UInt remainingY = y;
+  UInt offset     = 0;
+  UInt result     = 0;
+
+  while ((remainingX != 0) || (remainingY != 0))
+  {
+    result |= ((remainingX & 0x1) << offset) | ((remainingY & 0x1) << (offset + 1));
+
+    remainingX >>= 1;
+    remainingY >>= 1;
+    offset      += 2;
+  }
+
+  return result;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+//String manipulation functions for aligning and wrapping printed text
+
+
+std::string splitOnSettings(const std::string &input)
+{
+  std::string result = input;
+
+  std::string::size_type searchFromPosition = 0;
+
+  while (searchFromPosition < result.length())
+  {
+    //find the " = " that is used to define each setting
+    std::string::size_type equalsPosition = result.find(" = ", searchFromPosition);
+
+    if (equalsPosition == std::string::npos) break;
+
+    //then find the end of the numeric characters
+    std::string::size_type splitPosition = result.find_last_of("1234567890", equalsPosition);
+
+    //then find the last space before the first numeric character...
+    if (splitPosition != std::string::npos) splitPosition = result.find_last_of(' ', splitPosition);
+
+    //...and replace it with a new line
+    if (splitPosition != std::string::npos) result.replace(splitPosition, 1, 1, '\n');
+
+    //start the next search from the end of the " = " string
+    searchFromPosition = (equalsPosition + 3);
+  }
+
+  return result;
+}
+
+
+std::string lineWrap(const std::string &input, const UInt maximumLineLength)
+{
+  if (maximumLineLength == 0) return input;
+  std::string result = input;
+
+  std::string::size_type lineStartPosition = result.find_first_not_of(' '); //don't wrap any leading spaces in the string
+
+  while (lineStartPosition != std::string::npos)
+  {
+    //------------------------------------------------
+
+    const std::string::size_type searchFromPosition = lineStartPosition + maximumLineLength;
+
+    if (searchFromPosition >= result.length()) break;
+
+    //------------------------------------------------
+
+    //first check to see if there is another new line character before the maximum line length
+    //we can't use find for this unfortunately because it doesn't take both a beginning and an end for its search range
+    std::string::size_type nextLineStartPosition = std::string::npos;
+    for (std::string::size_type currentPosition = lineStartPosition; currentPosition <= searchFromPosition; currentPosition++)
+    {
+      if (result[currentPosition] == '\n') { nextLineStartPosition = currentPosition + 1; break; }
+    }
+
+    //------------------------------------------------
+
+    //if there ia another new line character before the maximum line length, we need to start this loop again from that position
+    if (nextLineStartPosition != std::string::npos) lineStartPosition = nextLineStartPosition;
+    else
+    {
+      std::string::size_type spacePosition = std::string::npos;
+
+      //search backwards for the last space character (must use signed Int because lineStartPosition can be 0)
+      for (Int currentPosition = Int(searchFromPosition); currentPosition >= Int(lineStartPosition); currentPosition--)
+      {
+        if (result[currentPosition] == ' ') { spacePosition = currentPosition; break; }
+      }
+
+      //if we didn't find a space searching backwards, we must hyphenate
+      if (spacePosition == std::string::npos)
+      {
+        result.insert(searchFromPosition, "-\n");
+        lineStartPosition = searchFromPosition + 2; //make sure the next search ignores the hyphen
+      }
+      else //if we found a space to split on, replace it with a new line character
+      {
+        result.replace(spacePosition, 1, 1, '\n');
+        lineStartPosition = spacePosition + 1;
+      }
+    }
+
+    //------------------------------------------------
+  }
+
+  return result;
+}
+
+
+std::string indentNewLines(const std::string &input, const UInt indentBy)
+{
+  std::string result = input;
+
+  const std::string indentString(indentBy, ' ');
+  std::string::size_type offset = 0;
+
+  while ((offset = result.find('\n', offset)) != std::string::npos)
+  {
+    if ((++offset) >= result.length()) break; //increment offset so we don't find the same \n again and do no indentation at the end
+    result.insert(offset, indentString);
+  }
+
+  return result;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------- //
+
+
+Void printBlockToStream( std::ostream &ss, const Char *pLinePrefix, TComYuv &src, const UInt numSubBlocksAcross, const UInt numSubBlocksUp, const UInt defWidth )
+{
+  const UInt numValidComp=src.getNumberValidComponents();
+
+  for (UInt ch=0; ch<numValidComp ; ch++)
+  {
+    const ComponentID compID = ComponentID(ch);
+    const UInt width  = src.getWidth(compID);
+    const UInt height = src.getHeight(compID);
+    const UInt stride = src.getStride(compID);
+    const Pel* blkSrc = src.getAddr(compID);
+    const UInt subBlockWidth=width/numSubBlocksAcross;
+    const UInt subBlockHeight=height/numSubBlocksUp;
+
+    ss << pLinePrefix << " compID: " << compID << "\n";
+    for (UInt y=0; y<height; y++)
+    {
+      if ((y%subBlockHeight)==0 && y!=0)
+        ss << pLinePrefix << '\n';
+
+      ss << pLinePrefix;
+      for (UInt x=0; x<width; x++)
+      {
+        if ((x%subBlockWidth)==0 && x!=0)
+          ss << std::setw(defWidth+2) << "";
+
+        ss << std::setw(defWidth) << blkSrc[y*stride + x] << ' ';
+      }
+      ss << '\n';
+    }
+    ss << pLinePrefix << " --- \n";
+  }
+}
+
+#ifdef DEBUG_STRING
+Int DebugStringGetPredModeMask(PredMode mode)
+{
+  return (mode==MODE_INTRA)?1:2;
+}
+
+Void DebugInterPredResiReco(std::string &sDebug, TComYuv &pred, TComYuv &resi, TComYuv &reco, Int predmode_mask)
+{
+  if (DebugOptionList::DebugString_Pred.getInt()&predmode_mask)
+  {
+    std::stringstream ss(std::stringstream::out);
+    printBlockToStream(ss, "###inter-pred: ", pred);
+    std::string debugTmp;
+    debugTmp=ss.str();
+    sDebug=debugTmp+sDebug;
+  }
+  if (DebugOptionList::DebugString_Resi.getInt()&predmode_mask)
+  {
+    std::stringstream ss(std::stringstream::out);
+    printBlockToStream(ss, "###inter-resi: ", resi);
+    sDebug+=ss.str();
+  }
+  if (DebugOptionList::DebugString_Reco.getInt()&predmode_mask)
+  {
+    std::stringstream ss(std::stringstream::out);
+    printBlockToStream(ss, "###inter-reco: ", reco);
+    sDebug+=ss.str();
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/Debug.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,266 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     Debug.h
+    \brief    Defines types and objects for environment-variable-based debugging and feature control
+*/
+
+#ifndef __DEBUG__
+#define __DEBUG__
+
+#include <iostream>
+#include <iomanip>
+#include <string>
+#include <list>
+#include <stdlib.h>
+#include <sstream>
+#include <TLibCommon/CommonDef.h>
+
+#ifdef DEBUG_STRING
+extern const Char *debug_reorder_data_inter_token[MAX_NUM_COMPONENT+1];
+extern const Char *partSizeToString[NUMBER_OF_PART_SIZES];
+#endif
+
+// ---------------------------------------------------------------------------------------------- //
+
+//constant print-out macro
+
+#define PRINT_CONSTANT(NAME, NAME_WIDTH, VALUE_WIDTH) std::cout << std::setw(NAME_WIDTH) << #NAME << " = " << std::setw(VALUE_WIDTH) << NAME << std::endl;
+
+// ---------------------------------------------------------------------------------------------- //
+
+// ---- Environment variables for test/debug ---- //
+
+class EnvVar
+{
+private:
+  std::string m_sName;
+  std::string m_sHelp;
+  std::string m_sVal;
+  Double      m_dVal;
+  Int         m_iVal;
+  Bool        m_bSet;
+
+public:
+
+  static std::list< std::pair<std::string, std::string> > &getEnvVarList();
+  static std::list<EnvVar*>                               &getEnvVarInUse();
+  static Void printEnvVar();
+  static Void printEnvVarInUse();
+
+  EnvVar(const std::string &sName, const std::string &sDefault, const std::string &sHelp);
+
+  Double              getDouble()   const       { return m_dVal;    }
+  Int                 getInt()      const       { return m_iVal;    }
+  const std::string  &getString()   const       { return m_sVal;    }
+  Bool                isSet()       const       { return m_bSet;    }
+  Bool                isTrue()      const       { return m_iVal!=0; }
+  const std::string  &getName()     const       { return m_sName;   }
+
+};
+
+
+// ---------------------------------------------------------------------------------------------- //
+
+// ---- Control switches for debugging and feature control ---- //
+
+namespace DebugOptionList
+{
+  extern EnvVar DebugSBAC;
+  extern EnvVar DebugRQT;
+  extern EnvVar DebugPred;
+  extern EnvVar ForceLumaMode;
+  extern EnvVar ForceChromaMode;
+
+#ifdef DEBUG_STRING
+  extern EnvVar DebugString_Structure;
+  extern EnvVar DebugString_Pred;
+  extern EnvVar DebugString_Resi;
+  extern EnvVar DebugString_Reco;
+  extern EnvVar DebugString_InvTran;
+#endif
+}
+
+// ---------------------------------------------------------------------------------------------- //
+
+Void printMacroSettings();
+
+// ---------------------------------------------------------------------------------------------- //
+
+//Debugging
+
+extern Bool g_bFinalEncode;
+extern UInt g_debugCounter;
+extern Bool g_printDebug;
+extern Void* g_debugAddr;
+
+#ifdef DEBUG_ENCODER_SEARCH_BINS
+extern const UInt debugEncoderSearchBinTargetLine;
+extern const UInt debugEncoderSearchBinWindow;
+#endif
+
+#ifdef DEBUG_CABAC_BINS
+extern const UInt debugCabacBinTargetLine;
+extern const UInt debugCabacBinWindow;
+#endif
+
+
+Void printSBACCoeffData(  const UInt          lastX,
+                          const UInt          lastY,
+                          const UInt          width,
+                          const UInt          height,
+                          const UInt          chan,
+                          const UInt          absPart,
+                          const UInt          scanIdx,
+                          const TCoeff *const pCoeff,
+                          const Bool          finalEncode=true
+                        );
+
+
+Void printCbfArray( class TComDataCU* pcCU  );
+
+UInt getDecimalWidth(const Double value);
+UInt getZScanIndex(const UInt x, const UInt y);
+
+//template specialisation for Char types to get it to render as a number
+template <typename ValueType> inline Void writeValueToStream       (const ValueType &value, std::ostream &stream, const UInt outputWidth) { stream << std::setw(outputWidth) <<      value;  }
+template <>                   inline Void writeValueToStream<Char >(const Char      &value, std::ostream &stream, const UInt outputWidth) { stream << std::setw(outputWidth) <<  Int(value); }
+template <>                   inline Void writeValueToStream<UChar>(const UChar     &value, std::ostream &stream, const UInt outputWidth) { stream << std::setw(outputWidth) << UInt(value); }
+
+template <typename ValueType>
+Void printBlock(const ValueType    *const source,
+                const UInt                width,
+                const UInt                height,
+                const UInt                stride,
+                const UInt                outputValueWidth = 0,         //if set to 0, the maximum output width will be calculated and used
+                const Bool                onlyPrintEdges   = false,     //print only the top row and left column for printing prediction reference samples
+                const Bool                printInZScan     = false,     //output values in Z-scan format (useful for values addressed by AbsPartIdxes)
+                const Int                 shiftLeftBy      = 0,         //set a negative value to right-shift instead
+                const Bool                printAverage     = false,     //also print the average of the values in the block
+                      std::ostream      & stream           = std::cout)
+{
+  //find the maximum output width
+  UInt outputWidth = outputValueWidth;
+
+  if (outputWidth == 0)
+  {
+    ValueType minimumValue = leftShift(source[0], shiftLeftBy);
+    ValueType maximumValue = minimumValue;
+
+    for (UInt y = 0; y < height; y++)
+      for (UInt x = 0; x < width; x++)
+      {
+        ValueType value = 0;
+
+        if (!onlyPrintEdges || (x == 0) || (y == 0))
+        {
+          value = leftShift(source[printInZScan ? getZScanIndex(x, y) : ((y * stride) + x)], shiftLeftBy);
+        }
+
+        if      (value < minimumValue) minimumValue = value;
+        else if (value > maximumValue) maximumValue = value;
+      }
+
+    outputWidth = std::max<UInt>(getDecimalWidth(Double(minimumValue)), getDecimalWidth(Double(maximumValue))) + 1; //+1 so the numbers don't run into each other
+  }
+
+  //------------------
+  //print out the block
+
+  ValueType valueSum = 0;
+
+  for (UInt y = 0; y < height; y++)
+  {
+    for (UInt x = 0; x < width; x++)
+    {
+      ValueType value = 0;
+
+      if (!onlyPrintEdges || (x == 0) || (y == 0))
+      {
+        value     = leftShift(source[printInZScan ? getZScanIndex(x, y) : ((y * stride) + x)], shiftLeftBy);
+        valueSum += value;
+      }
+
+      writeValueToStream(value, stream, outputWidth);
+    }
+    stream << "\n";
+  }
+
+  const Int valueCount = onlyPrintEdges ? Int((width + height) - 1) : Int(width * height);
+  if (printAverage) stream << "Average: " << (valueSum / valueCount) << "\n";
+  stream << "\n";
+}
+
+
+template <typename T>
+Void printBlockToStream( std::ostream &ss, const Char *pLinePrefix, const T * blkSrc, const UInt width, const UInt height, const UInt stride, const UInt subBlockWidth=0, const UInt subBlockHeight=0, const UInt defWidth=3 )
+{
+  for (UInt y=0; y<height; y++)
+  {
+    if (subBlockHeight!=0 && (y%subBlockHeight)==0 && y!=0)
+      ss << pLinePrefix << '\n';
+
+    ss << pLinePrefix;
+    for (UInt x=0; x<width; x++)
+    {
+      if (subBlockWidth!=0 && (x%subBlockWidth)==0 && x!=0)
+        ss << std::setw(defWidth+2) << "";
+
+      ss << std::setw(defWidth) << blkSrc[y*stride + x] << ' ';
+    }
+    ss << '\n';
+  }
+}
+
+class TComYuv;
+Void printBlockToStream( std::ostream &ss, const Char *pLinePrefix, TComYuv &src, const UInt numSubBlocksAcross=1, const UInt numSubBlocksUp=1, const UInt defWidth=3 );
+
+// ---------------------------------------------------------------------------------------------- //
+
+//String manipulation functions for aligning and wrapping printed text
+
+std::string splitOnSettings(const std::string &input);
+
+std::string lineWrap(const std::string &input, const UInt maximumLineLength);
+
+std::string indentNewLines(const std::string &input, const UInt indentBy);
+
+// ---------------------------------------------------------------------------------------------- //
+
+#ifdef DEBUG_STRING
+  Int DebugStringGetPredModeMask(PredMode mode);
+  Void DebugInterPredResiReco(std::string &sDebug, TComYuv &pred, TComYuv &resi, TComYuv &reco, Int predmode_mask);
+#endif
+
+
+#endif /* __DEBUG__ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/NAL.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,118 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifndef __NAL__
+#define __NAL__
+
+#include <vector>
+#include <sstream>
+#include "CommonDef.h"
+
+class TComOutputBitstream;
+
+/**
+ * Represents a single NALunit header and the associated RBSPayload
+ */
+struct NALUnit
+{
+  NalUnitType m_nalUnitType; ///< nal_unit_type
+  UInt        m_temporalId;  ///< temporal_id
+  UInt        m_reservedZero6Bits; ///< reserved_zero_6bits
+
+  /** construct an NALunit structure with given header values. */
+  NALUnit(
+    NalUnitType nalUnitType,
+    Int         temporalId = 0,
+    Int         reservedZero6Bits = 0)
+    :m_nalUnitType (nalUnitType)
+    ,m_temporalId  (temporalId)
+    ,m_reservedZero6Bits(reservedZero6Bits)
+  {}
+
+  /** default constructor - no initialization; must be perfomed by user */
+  NALUnit() {}
+
+  /** returns true if the NALunit is a slice NALunit */
+  Bool isSlice()
+  {
+    return m_nalUnitType == NAL_UNIT_CODED_SLICE_TRAIL_R
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_TRAIL_N
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_TSA_R
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_TSA_N
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_STSA_R
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_STSA_N
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA_W_LP
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA_N_LP
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_N_LP
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_CRA
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_RADL_N
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_RADL_R
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N
+        || m_nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R;
+  }
+  Bool isSei()
+  {
+    return m_nalUnitType == NAL_UNIT_PREFIX_SEI
+        || m_nalUnitType == NAL_UNIT_SUFFIX_SEI;
+  }
+
+  Bool isVcl()
+  {
+    return ( (UInt)m_nalUnitType < 32 );
+  }
+};
+
+struct OutputNALUnit;
+
+/**
+ * A single NALunit, with complete payload in EBSP format.
+ */
+struct NALUnitEBSP : public NALUnit
+{
+  std::ostringstream m_nalUnitData;
+
+  /**
+   * convert the OutputNALUnit #nalu# into EBSP format by writing out
+   * the NALUnit header, then the rbsp_bytes including any
+   * emulation_prevention_three_byte symbols.
+   */
+  NALUnitEBSP(OutputNALUnit& nalu);
+};
+//! \}
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/SEI.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,126 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     SEI.cpp
+    \brief    helper functions for SEI handling
+*/
+
+#include "CommonDef.h"
+#include "SEI.h"
+
+//Table D-7 Meaning of camera iso sensitivity indicator and exposure index rating indicator
+Int  Table_exp_indicator[32] = {0, 10, 12, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125, 160, 200, 250, 320, 400, 500, 640, 800, 1000, 1250, 1600, 2000, 2500, 3200, 4000, 5000, 6400, 8000, -1};
+
+SEIMessages getSeisByType(SEIMessages &seiList, SEI::PayloadType seiType)
+{
+  SEIMessages result;
+
+  for (SEIMessages::iterator it=seiList.begin(); it!=seiList.end(); it++)
+  {
+    if ((*it)->payloadType() == seiType)
+    {
+      result.push_back(*it);
+    }
+  }
+  return result;
+}
+
+SEIMessages extractSeisByType(SEIMessages &seiList, SEI::PayloadType seiType)
+{
+  SEIMessages result;
+
+  SEIMessages::iterator it=seiList.begin();
+  while ( it!=seiList.end() )
+  {
+    if ((*it)->payloadType() == seiType)
+    {
+      result.push_back(*it);
+      it = seiList.erase(it);
+    }
+    else
+    {
+      it++;
+    }
+  }
+  return result;
+}
+
+
+Void deleteSEIs (SEIMessages &seiList)
+{
+  for (SEIMessages::iterator it=seiList.begin(); it!=seiList.end(); it++)
+  {
+    delete (*it);
+  }
+  seiList.clear();
+}
+
+
+// Static member
+const Char *SEI::getSEIMessageString(SEI::PayloadType payloadType)
+{
+  switch (payloadType)
+  {
+    case SEI::BUFFERING_PERIOD:                     return "Buffering period";
+    case SEI::PICTURE_TIMING:                       return "Picture timing";
+    case SEI::PAN_SCAN_RECT:                        return "Pan-scan rectangle";                   // not currently decoded
+    case SEI::FILLER_PAYLOAD:                       return "Filler payload";                       // not currently decoded
+    case SEI::USER_DATA_REGISTERED_ITU_T_T35:       return "User data registered";                 // not currently decoded
+    case SEI::USER_DATA_UNREGISTERED:               return "User data unregistered";
+    case SEI::RECOVERY_POINT:                       return "Recovery point";
+    case SEI::SCENE_INFO:                           return "Scene information";                    // not currently decoded
+    case SEI::FULL_FRAME_SNAPSHOT:                  return "Picture snapshot";                     // not currently decoded
+    case SEI::PROGRESSIVE_REFINEMENT_SEGMENT_START: return "Progressive refinement segment start"; // not currently decoded
+    case SEI::PROGRESSIVE_REFINEMENT_SEGMENT_END:   return "Progressive refinement segment end";   // not currently decoded
+    case SEI::FILM_GRAIN_CHARACTERISTICS:           return "Film grain characteristics";           // not currently decoded
+    case SEI::POST_FILTER_HINT:                     return "Post filter hint";                     // not currently decoded
+    case SEI::TONE_MAPPING_INFO:                    return "Tone mapping information";
+    case SEI::KNEE_FUNCTION_INFO:                   return "Knee function information";
+    case SEI::FRAME_PACKING:                        return "Frame packing arrangement";
+    case SEI::DISPLAY_ORIENTATION:                  return "Display orientation";
+    case SEI::SOP_DESCRIPTION:                      return "Structure of pictures information";
+    case SEI::ACTIVE_PARAMETER_SETS:                return "Active parameter sets";
+    case SEI::DECODING_UNIT_INFO:                   return "Decoding unit information";
+    case SEI::TEMPORAL_LEVEL0_INDEX:                return "Temporal sub-layer zero index";
+    case SEI::DECODED_PICTURE_HASH:                 return "Decoded picture hash";
+    case SEI::SCALABLE_NESTING:                     return "Scalable nesting";
+    case SEI::REGION_REFRESH_INFO:                  return "Region refresh information";
+    case SEI::NO_DISPLAY:                           return "No display";
+    case SEI::TIME_CODE:                            return "Time code";
+    case SEI::MASTERING_DISPLAY_COLOUR_VOLUME:      return "Mastering display colour volume";
+    case SEI::SEGM_RECT_FRAME_PACKING:              return "Segmented rectangular frame packing arrangement";
+    case SEI::TEMP_MOTION_CONSTRAINED_TILE_SETS:    return "Temporal motion constrained tile sets";
+    case SEI::CHROMA_SAMPLING_FILTER_HINT:          return "Chroma sampling filter hint";
+    default:                                        return "Unknown";
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/SEI.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,591 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SEI__
+#define __SEI__
+
+#pragma once
+#include <list>
+#include <vector>
+#include <cstring>
+
+#include "TypeDef.h"
+#include "libmd5/MD5.h"
+
+//! \ingroup TLibCommon
+//! \{
+class TComSPS;
+
+/**
+ * Abstract class representing an SEI message with lightweight RTTI.
+ */
+class SEI
+{
+public:
+  enum PayloadType
+  {
+    BUFFERING_PERIOD                     = 0,
+    PICTURE_TIMING                       = 1,
+    PAN_SCAN_RECT                        = 2,
+    FILLER_PAYLOAD                       = 3,
+    USER_DATA_REGISTERED_ITU_T_T35       = 4,
+    USER_DATA_UNREGISTERED               = 5,
+    RECOVERY_POINT                       = 6,
+    SCENE_INFO                           = 9,
+    FULL_FRAME_SNAPSHOT                  = 15,
+    PROGRESSIVE_REFINEMENT_SEGMENT_START = 16,
+    PROGRESSIVE_REFINEMENT_SEGMENT_END   = 17,
+    FILM_GRAIN_CHARACTERISTICS           = 19,
+    POST_FILTER_HINT                     = 22,
+    TONE_MAPPING_INFO                    = 23,
+    FRAME_PACKING                        = 45,
+    DISPLAY_ORIENTATION                  = 47,
+    SOP_DESCRIPTION                      = 128,
+    ACTIVE_PARAMETER_SETS                = 129,
+    DECODING_UNIT_INFO                   = 130,
+    TEMPORAL_LEVEL0_INDEX                = 131,
+    DECODED_PICTURE_HASH                 = 132,
+    SCALABLE_NESTING                     = 133,
+    REGION_REFRESH_INFO                  = 134,
+    NO_DISPLAY                           = 135,
+    TIME_CODE                            = 136,
+    MASTERING_DISPLAY_COLOUR_VOLUME      = 137,
+    SEGM_RECT_FRAME_PACKING              = 138,
+    TEMP_MOTION_CONSTRAINED_TILE_SETS    = 139,
+    CHROMA_SAMPLING_FILTER_HINT          = 140,
+    KNEE_FUNCTION_INFO                   = 141
+  };
+
+  SEI() {}
+  virtual ~SEI() {}
+
+  static const Char *getSEIMessageString(SEI::PayloadType payloadType);
+
+  virtual PayloadType payloadType() const = 0;
+};
+
+static const UInt ISO_IEC_11578_LEN=16;
+
+class SEIuserDataUnregistered : public SEI
+{
+public:
+  PayloadType payloadType() const { return USER_DATA_UNREGISTERED; }
+
+  SEIuserDataUnregistered()
+    : userData(0)
+    {}
+
+  virtual ~SEIuserDataUnregistered()
+  {
+    delete userData;
+  }
+
+  UChar uuid_iso_iec_11578[ISO_IEC_11578_LEN];
+  UInt  userDataLength;
+  UChar *userData;
+};
+
+class SEIDecodedPictureHash : public SEI
+{
+public:
+  PayloadType payloadType() const { return DECODED_PICTURE_HASH; }
+
+  SEIDecodedPictureHash() {}
+  virtual ~SEIDecodedPictureHash() {}
+
+  enum Method
+  {
+    MD5,
+    CRC,
+    CHECKSUM,
+    RESERVED,
+  } method;
+
+  TComDigest m_digest;
+};
+
+class SEIActiveParameterSets : public SEI
+{
+public:
+  PayloadType payloadType() const { return ACTIVE_PARAMETER_SETS; }
+
+  SEIActiveParameterSets()
+    : activeVPSId            (0)
+    , m_selfContainedCvsFlag (false)
+    , m_noParameterSetUpdateFlag (false)
+    , numSpsIdsMinus1        (0)
+  {}
+  virtual ~SEIActiveParameterSets() {}
+
+  Int activeVPSId;
+  Bool m_selfContainedCvsFlag;
+  Bool m_noParameterSetUpdateFlag;
+  Int numSpsIdsMinus1;
+  std::vector<Int> activeSeqParameterSetId; 
+};
+
+class SEIBufferingPeriod : public SEI
+{
+public:
+  PayloadType payloadType() const { return BUFFERING_PERIOD; }
+
+  SEIBufferingPeriod()
+  : m_bpSeqParameterSetId (0)
+  , m_rapCpbParamsPresentFlag (false)
+  , m_cpbDelayOffset      (0)
+  , m_dpbDelayOffset      (0)
+  {
+    ::memset(m_initialCpbRemovalDelay, 0, sizeof(m_initialCpbRemovalDelay));
+    ::memset(m_initialCpbRemovalDelayOffset, 0, sizeof(m_initialCpbRemovalDelayOffset));
+    ::memset(m_initialAltCpbRemovalDelay, 0, sizeof(m_initialAltCpbRemovalDelay));
+    ::memset(m_initialAltCpbRemovalDelayOffset, 0, sizeof(m_initialAltCpbRemovalDelayOffset));
+  }
+  virtual ~SEIBufferingPeriod() {}
+
+  UInt m_bpSeqParameterSetId;
+  Bool m_rapCpbParamsPresentFlag;
+  UInt m_cpbDelayOffset;
+  UInt m_dpbDelayOffset;
+  UInt m_initialCpbRemovalDelay         [MAX_CPB_CNT][2];
+  UInt m_initialCpbRemovalDelayOffset   [MAX_CPB_CNT][2];
+  UInt m_initialAltCpbRemovalDelay      [MAX_CPB_CNT][2];
+  UInt m_initialAltCpbRemovalDelayOffset[MAX_CPB_CNT][2];
+  Bool m_concatenationFlag;
+  UInt m_auCpbRemovalDelayDelta;
+};
+class SEIPictureTiming : public SEI
+{
+public:
+  PayloadType payloadType() const { return PICTURE_TIMING; }
+
+  SEIPictureTiming()
+  : m_picStruct               (0)
+  , m_sourceScanType          (0)
+  , m_duplicateFlag           (false)
+  , m_picDpbOutputDuDelay     (0)
+  , m_numNalusInDuMinus1      (NULL)
+  , m_duCpbRemovalDelayMinus1 (NULL)
+  {}
+  virtual ~SEIPictureTiming()
+  {
+    if( m_numNalusInDuMinus1 != NULL )
+    {
+      delete m_numNalusInDuMinus1;
+    }
+    if( m_duCpbRemovalDelayMinus1  != NULL )
+    {
+      delete m_duCpbRemovalDelayMinus1;
+    }
+  }
+
+  UInt  m_picStruct;
+  UInt  m_sourceScanType;
+  Bool  m_duplicateFlag;
+
+  UInt  m_auCpbRemovalDelay;
+  UInt  m_picDpbOutputDelay;
+  UInt  m_picDpbOutputDuDelay;
+  UInt  m_numDecodingUnitsMinus1;
+  Bool  m_duCommonCpbRemovalDelayFlag;
+  UInt  m_duCommonCpbRemovalDelayMinus1;
+  UInt* m_numNalusInDuMinus1;
+  UInt* m_duCpbRemovalDelayMinus1;
+};
+
+class SEIDecodingUnitInfo : public SEI
+{
+public:
+  PayloadType payloadType() const { return DECODING_UNIT_INFO; }
+
+  SEIDecodingUnitInfo()
+    : m_decodingUnitIdx(0)
+    , m_duSptCpbRemovalDelay(0)
+    , m_dpbOutputDuDelayPresentFlag(false)
+    , m_picSptDpbOutputDuDelay(0)
+  {}
+  virtual ~SEIDecodingUnitInfo() {}
+  Int m_decodingUnitIdx;
+  Int m_duSptCpbRemovalDelay;
+  Bool m_dpbOutputDuDelayPresentFlag;
+  Int m_picSptDpbOutputDuDelay;
+};
+
+class SEIRecoveryPoint : public SEI
+{
+public:
+  PayloadType payloadType() const { return RECOVERY_POINT; }
+
+  SEIRecoveryPoint() {}
+  virtual ~SEIRecoveryPoint() {}
+
+  Int  m_recoveryPocCnt;
+  Bool m_exactMatchingFlag;
+  Bool m_brokenLinkFlag;
+};
+
+class SEIFramePacking : public SEI
+{
+public:
+  PayloadType payloadType() const { return FRAME_PACKING; }
+
+  SEIFramePacking() {}
+  virtual ~SEIFramePacking() {}
+
+  Int  m_arrangementId;
+  Bool m_arrangementCancelFlag;
+  Int  m_arrangementType;
+  Bool m_quincunxSamplingFlag;
+  Int  m_contentInterpretationType;
+  Bool m_spatialFlippingFlag;
+  Bool m_frame0FlippedFlag;
+  Bool m_fieldViewsFlag;
+  Bool m_currentFrameIsFrame0Flag;
+  Bool m_frame0SelfContainedFlag;
+  Bool m_frame1SelfContainedFlag;
+  Int  m_frame0GridPositionX;
+  Int  m_frame0GridPositionY;
+  Int  m_frame1GridPositionX;
+  Int  m_frame1GridPositionY;
+  Int  m_arrangementReservedByte;
+  Bool m_arrangementPersistenceFlag;
+  Bool m_upsampledAspectRatio;
+};
+
+class SEISegmentedRectFramePacking : public SEI
+{
+public:
+  PayloadType payloadType() const { return SEGM_RECT_FRAME_PACKING; }
+
+  SEISegmentedRectFramePacking() {}
+  virtual ~SEISegmentedRectFramePacking() {}
+
+  Bool m_arrangementCancelFlag;
+  Int  m_contentInterpretationType;
+  Bool m_arrangementPersistenceFlag;
+};
+
+class SEIDisplayOrientation : public SEI
+{
+public:
+  PayloadType payloadType() const { return DISPLAY_ORIENTATION; }
+
+  SEIDisplayOrientation()
+    : cancelFlag(true)
+    , persistenceFlag(0)
+    , extensionFlag(false)
+    {}
+  virtual ~SEIDisplayOrientation() {}
+
+  Bool cancelFlag;
+  Bool horFlip;
+  Bool verFlip;
+
+  UInt anticlockwiseRotation;
+  Bool persistenceFlag;
+  Bool extensionFlag;
+};
+
+class SEITemporalLevel0Index : public SEI
+{
+public:
+  PayloadType payloadType() const { return TEMPORAL_LEVEL0_INDEX; }
+
+  SEITemporalLevel0Index()
+    : tl0Idx(0)
+    , rapIdx(0)
+    {}
+  virtual ~SEITemporalLevel0Index() {}
+
+  UInt tl0Idx;
+  UInt rapIdx;
+};
+
+class SEIGradualDecodingRefreshInfo : public SEI
+{
+public:
+  PayloadType payloadType() const { return REGION_REFRESH_INFO; }
+
+  SEIGradualDecodingRefreshInfo()
+    : m_gdrForegroundFlag(0)
+  {}
+  virtual ~SEIGradualDecodingRefreshInfo() {}
+
+  Bool m_gdrForegroundFlag;
+};
+
+class SEINoDisplay : public SEI
+{
+public:
+  PayloadType payloadType() const { return NO_DISPLAY; }
+
+  SEINoDisplay()
+    : m_noDisplay(false)
+  {}
+  virtual ~SEINoDisplay() {}
+
+  Bool m_noDisplay;
+};
+
+class SEISOPDescription : public SEI
+{
+public:
+  PayloadType payloadType() const { return SOP_DESCRIPTION; }
+
+  SEISOPDescription() {}
+  virtual ~SEISOPDescription() {}
+
+  UInt m_sopSeqParameterSetId;
+  UInt m_numPicsInSopMinus1;
+
+  UInt m_sopDescVclNaluType[MAX_NUM_PICS_IN_SOP];
+  UInt m_sopDescTemporalId[MAX_NUM_PICS_IN_SOP];
+  UInt m_sopDescStRpsIdx[MAX_NUM_PICS_IN_SOP];
+  Int m_sopDescPocDelta[MAX_NUM_PICS_IN_SOP];
+};
+
+class SEIToneMappingInfo : public SEI
+{
+public:
+  PayloadType payloadType() const { return TONE_MAPPING_INFO; }
+  SEIToneMappingInfo() {}
+  virtual ~SEIToneMappingInfo() {}
+
+  Int    m_toneMapId;
+  Bool   m_toneMapCancelFlag;
+  Bool   m_toneMapPersistenceFlag;
+  Int    m_codedDataBitDepth;
+  Int    m_targetBitDepth;
+  Int    m_modelId;
+  Int    m_minValue;
+  Int    m_maxValue;
+  Int    m_sigmoidMidpoint;
+  Int    m_sigmoidWidth;
+  std::vector<Int> m_startOfCodedInterval;
+  Int    m_numPivots;
+  std::vector<Int> m_codedPivotValue;
+  std::vector<Int> m_targetPivotValue;
+  Int    m_cameraIsoSpeedIdc;
+  Int    m_cameraIsoSpeedValue;
+  Int    m_exposureIndexIdc;
+  Int    m_exposureIndexValue;
+  Int    m_exposureCompensationValueSignFlag;
+  Int    m_exposureCompensationValueNumerator;
+  Int    m_exposureCompensationValueDenomIdc;
+  Int    m_refScreenLuminanceWhite;
+  Int    m_extendedRangeWhiteLevel;
+  Int    m_nominalBlackLevelLumaCodeValue;
+  Int    m_nominalWhiteLevelLumaCodeValue;
+  Int    m_extendedWhiteLevelLumaCodeValue;
+};
+
+class SEIKneeFunctionInfo : public SEI
+{
+public:
+  PayloadType payloadType() const { return KNEE_FUNCTION_INFO; }
+  SEIKneeFunctionInfo() {}
+  virtual ~SEIKneeFunctionInfo() {}
+
+  Int   m_kneeId;
+  Bool  m_kneeCancelFlag;
+  Bool  m_kneePersistenceFlag;
+  Int   m_kneeInputDrange;
+  Int   m_kneeInputDispLuminance;
+  Int   m_kneeOutputDrange;
+  Int   m_kneeOutputDispLuminance;
+  Int   m_kneeNumKneePointsMinus1;
+  std::vector<Int> m_kneeInputKneePoint;
+  std::vector<Int> m_kneeOutputKneePoint;
+};
+
+class SEIChromaSamplingFilterHint : public SEI
+{
+public:
+  PayloadType payloadType() const {return CHROMA_SAMPLING_FILTER_HINT;}
+  SEIChromaSamplingFilterHint() {}
+  virtual ~SEIChromaSamplingFilterHint() {
+    if(m_verChromaFilterIdc == 1)
+    {
+      for(Int i = 0; i < m_numVerticalFilters; i ++)
+      {
+        free(m_verFilterCoeff[i]);
+      }
+      free(m_verFilterCoeff);
+      free(m_verTapLengthMinus1);
+    }
+    if(m_horChromaFilterIdc == 1)
+    {
+      for(Int i = 0; i < m_numHorizontalFilters; i ++)
+      {
+        free(m_horFilterCoeff[i]);
+      }
+      free(m_horFilterCoeff);
+      free(m_horTapLengthMinus1);
+    }
+  }
+
+  Int   m_verChromaFilterIdc;
+  Int   m_horChromaFilterIdc;
+  Bool  m_verFilteringProcessFlag;
+  Int   m_targetFormatIdc;
+  Bool  m_perfectReconstructionFlag;
+  Int   m_numVerticalFilters;
+  Int*  m_verTapLengthMinus1;
+  Int** m_verFilterCoeff;
+  Int   m_numHorizontalFilters;
+  Int*  m_horTapLengthMinus1;
+  Int** m_horFilterCoeff;
+};
+
+class SEIMasteringDisplayColourVolume : public SEI
+{
+public:
+    PayloadType payloadType() const { return MASTERING_DISPLAY_COLOUR_VOLUME; }
+    SEIMasteringDisplayColourVolume() {}
+    virtual ~SEIMasteringDisplayColourVolume(){}
+    
+    TComSEIMasteringDisplay values;
+};
+
+typedef std::list<SEI*> SEIMessages;
+
+/// output a selection of SEI messages by payload type. Ownership stays in original message list.
+SEIMessages getSeisByType(SEIMessages &seiList, SEI::PayloadType seiType);
+
+/// remove a selection of SEI messages by payload type from the original list and return them in a new list.
+SEIMessages extractSeisByType(SEIMessages &seiList, SEI::PayloadType seiType);
+
+/// delete list of SEI messages (freeing the referenced objects)
+Void deleteSEIs (SEIMessages &seiList);
+
+class SEIScalableNesting : public SEI
+{
+public:
+  PayloadType payloadType() const { return SCALABLE_NESTING; }
+
+  SEIScalableNesting() {}
+  virtual ~SEIScalableNesting()
+  {
+    if (!m_callerOwnsSEIs)
+    {
+      deleteSEIs(m_nestedSEIs);
+    }
+  }
+
+  Bool  m_bitStreamSubsetFlag;
+  Bool  m_nestingOpFlag;
+  Bool  m_defaultOpFlag;                             //value valid if m_nestingOpFlag != 0
+  UInt  m_nestingNumOpsMinus1;                       // -"-
+  UInt  m_nestingMaxTemporalIdPlus1[MAX_TLAYER];     // -"-
+  UInt  m_nestingOpIdx[MAX_NESTING_NUM_OPS];         // -"-
+
+  Bool  m_allLayersFlag;                             //value valid if m_nestingOpFlag == 0
+  UInt  m_nestingNoOpMaxTemporalIdPlus1;             //value valid if m_nestingOpFlag == 0 and m_allLayersFlag == 0
+  UInt  m_nestingNumLayersMinus1;                    //value valid if m_nestingOpFlag == 0 and m_allLayersFlag == 0
+  UChar m_nestingLayerId[MAX_NESTING_NUM_LAYER];     //value valid if m_nestingOpFlag == 0 and m_allLayersFlag == 0. This can e.g. be a static array of 64 UChar values
+
+  Bool  m_callerOwnsSEIs;
+  SEIMessages m_nestedSEIs;
+};
+
+class SEITimeCode : public SEI
+{
+public:
+  PayloadType payloadType() const { return TIME_CODE; }
+  SEITimeCode() {}
+  virtual ~SEITimeCode(){}
+
+  UInt numClockTs;
+  TComSEITimeSet timeSetArray[MAX_TIMECODE_SEI_SETS];
+};
+
+//definition according to P1005_v1;
+class SEITempMotionConstrainedTileSets: public SEI
+{
+  struct TileSetData
+  {
+    protected:
+      std::vector<Int> m_top_left_tile_index;  //[tileSetIdx][tileIdx];
+      std::vector<Int> m_bottom_right_tile_index;
+
+    public:
+      Int     m_mcts_id;  
+      Bool    m_display_tile_set_flag;
+      Int     m_num_tile_rects_in_set; //_minus1;
+      Bool    m_exact_sample_value_match_flag;
+      Bool    m_mcts_tier_level_idc_present_flag;
+      Bool    m_mcts_tier_flag;
+      Int     m_mcts_level_idc;
+
+      Void setNumberOfTileRects(const Int number)
+      {
+        m_top_left_tile_index    .resize(number);
+        m_bottom_right_tile_index.resize(number);
+      }
+
+      Int  getNumberOfTileRects() const
+      {
+        assert(m_top_left_tile_index.size() == m_bottom_right_tile_index.size());
+        return Int(m_top_left_tile_index.size());
+      }
+
+            Int &topLeftTileIndex    (const Int tileRectIndex)       { return m_top_left_tile_index    [tileRectIndex]; }
+            Int &bottomRightTileIndex(const Int tileRectIndex)       { return m_bottom_right_tile_index[tileRectIndex]; }
+      const Int &topLeftTileIndex    (const Int tileRectIndex) const { return m_top_left_tile_index    [tileRectIndex]; }
+      const Int &bottomRightTileIndex(const Int tileRectIndex) const { return m_bottom_right_tile_index[tileRectIndex]; }
+  };
+
+protected:
+  std::vector<TileSetData> m_tile_set_data;
+
+public:
+
+  Bool    m_mc_all_tiles_exact_sample_value_match_flag;
+  Bool    m_each_tile_one_tile_set_flag;
+  Bool    m_limited_tile_set_display_flag;
+  Bool    m_max_mcs_tier_level_idc_present_flag;
+  Bool    m_max_mcts_tier_flag;
+  Int     m_max_mcts_level_idc;
+
+  PayloadType payloadType() const { return TEMP_MOTION_CONSTRAINED_TILE_SETS; }
+
+  Void setNumberOfTileSets(const Int number)       { m_tile_set_data.resize(number);     }
+  Int  getNumberOfTileSets()                 const { return Int(m_tile_set_data.size()); }
+
+        TileSetData &tileSetData (const Int index)       { return m_tile_set_data[index]; }
+  const TileSetData &tileSetData (const Int index) const { return m_tile_set_data[index]; }
+
+};
+
+#endif
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComBitCounter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,71 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComBitCounter.h
+    \brief    Class for counting bits (header)
+*/
+
+#ifndef __TCOMBITCOUNTER__
+#define __TCOMBITCOUNTER__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "TComBitStream.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// class for counting bits
+class TComBitCounter : public TComBitIf
+{
+protected:
+  UInt  m_uiBitCounter;
+
+public:
+  TComBitCounter()            {}
+  virtual ~TComBitCounter()   {}
+
+  Void        write                 ( UInt /*uiBits*/, UInt uiNumberOfBits )  { m_uiBitCounter += uiNumberOfBits; }
+  Void        resetBits             ()                                    { m_uiBitCounter = 0;               }
+  UInt getNumberOfWrittenBits() const { return m_uiBitCounter; }
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComBitStream.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,390 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComBitStream.cpp
+    \brief    class for handling bitstream
+*/
+
+#include <stdint.h>
+#include <vector>
+#include "TComBitStream.h"
+#include <string.h>
+#include <memory.h>
+
+using namespace std;
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TComOutputBitstream::TComOutputBitstream()
+{
+  clear();
+}
+
+TComOutputBitstream::~TComOutputBitstream()
+{
+}
+
+TComInputBitstream::TComInputBitstream(std::vector<uint8_t>* buf)
+{
+  m_fifo = buf;
+  m_fifo_idx = 0;
+  m_held_bits = 0;
+  m_num_held_bits = 0;
+  m_numBitsRead = 0;
+}
+
+TComInputBitstream::~TComInputBitstream()
+{
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+Char* TComOutputBitstream::getByteStream() const
+{
+  return (Char*) &m_fifo.front();
+}
+
+UInt TComOutputBitstream::getByteStreamLength()
+{
+  return UInt(m_fifo.size());
+}
+
+Void TComOutputBitstream::clear()
+{
+  m_fifo.clear();
+  m_held_bits = 0;
+  m_num_held_bits = 0;
+}
+
+Void TComOutputBitstream::write   ( UInt uiBits, UInt uiNumberOfBits )
+{
+  assert( uiNumberOfBits <= 32 );
+  assert( uiNumberOfBits == 32 || (uiBits & (~0 << uiNumberOfBits)) == 0 );
+
+  /* any modulo 8 remainder of num_total_bits cannot be written this time,
+   * and will be held until next time. */
+  UInt num_total_bits = uiNumberOfBits + m_num_held_bits;
+  UInt next_num_held_bits = num_total_bits % 8;
+
+  /* form a byte aligned word (write_bits), by concatenating any held bits
+   * with the new bits, discarding the bits that will form the next_held_bits.
+   * eg: H = held bits, V = n new bits        /---- next_held_bits
+   * len(H)=7, len(V)=1: ... ---- HHHH HHHV . 0000 0000, next_num_held_bits=0
+   * len(H)=7, len(V)=2: ... ---- HHHH HHHV . V000 0000, next_num_held_bits=1
+   * if total_bits < 8, the value of v_ is not used */
+  UChar next_held_bits = uiBits << (8 - next_num_held_bits);
+
+  if (!(num_total_bits >> 3))
+  {
+    /* insufficient bits accumulated to write out, append new_held_bits to
+     * current held_bits */
+    /* NB, this requires that v only contains 0 in bit positions {31..n} */
+    m_held_bits |= next_held_bits;
+    m_num_held_bits = next_num_held_bits;
+    return;
+  }
+
+  /* topword serves to justify held_bits to align with the msb of uiBits */
+  UInt topword = (uiNumberOfBits - next_num_held_bits) & ~((1 << 3) -1);
+  UInt write_bits = (m_held_bits << topword) | (uiBits >> next_num_held_bits);
+
+  switch (num_total_bits >> 3)
+  {
+  case 4: m_fifo.push_back(write_bits >> 24);
+  case 3: m_fifo.push_back(write_bits >> 16);
+  case 2: m_fifo.push_back(write_bits >> 8);
+  case 1: m_fifo.push_back(write_bits);
+  }
+
+  m_held_bits = next_held_bits;
+  m_num_held_bits = next_num_held_bits;
+}
+
+Void TComOutputBitstream::writeAlignOne()
+{
+  UInt num_bits = getNumBitsUntilByteAligned();
+  write((1 << num_bits) - 1, num_bits);
+  return;
+}
+
+Void TComOutputBitstream::writeAlignZero()
+{
+  if (0 == m_num_held_bits)
+  {
+    return;
+  }
+  m_fifo.push_back(m_held_bits);
+  m_held_bits = 0;
+  m_num_held_bits = 0;
+}
+
+/**
+ - add substream to the end of the current bitstream
+ .
+ \param  pcSubstream  substream to be added
+ */
+Void   TComOutputBitstream::addSubstream( TComOutputBitstream* pcSubstream )
+{
+  UInt uiNumBits = pcSubstream->getNumberOfWrittenBits();
+
+  const vector<uint8_t>& rbsp = pcSubstream->getFIFO();
+  for (vector<uint8_t>::const_iterator it = rbsp.begin(); it != rbsp.end();)
+  {
+    write(*it++, 8);
+  }
+  if (uiNumBits&0x7)
+  {
+    write(pcSubstream->getHeldBits()>>(8-(uiNumBits&0x7)), uiNumBits&0x7);
+  }
+}
+
+Void TComOutputBitstream::writeByteAlignment()
+{
+  write( 1, 1);
+  writeAlignZero();
+}
+
+Int TComOutputBitstream::countStartCodeEmulations()
+{
+  UInt cnt = 0;
+  vector<uint8_t>& rbsp   = getFIFO();
+  for (vector<uint8_t>::iterator it = rbsp.begin(); it != rbsp.end();)
+  {
+    vector<uint8_t>::iterator found = it;
+    do
+    {
+      // find the next emulated 00 00 {00,01,02,03}
+      // NB, end()-1, prevents finding a trailing two byte sequence
+      found = search_n(found, rbsp.end()-1, 2, 0);
+      found++;
+      // if not found, found == end, otherwise found = second zero byte
+      if (found == rbsp.end())
+      {
+        break;
+      }
+      if (*(++found) <= 3)
+      {
+        break;
+      }
+    } while (true);
+    it = found;
+    if (found != rbsp.end())
+    {
+      cnt++;
+    }
+  }
+  return cnt;
+}
+
+/**
+ * read #uiNumberOfBits# from bitstream without updating the bitstream
+ * state, storing the result in #ruiBits#.
+ *
+ * If reading #uiNumberOfBits# would overrun the bitstream buffer,
+ * the bitsream is effectively padded with sufficient zero-bits to
+ * avoid the overrun.
+ */
+Void TComInputBitstream::pseudoRead ( UInt uiNumberOfBits, UInt& ruiBits )
+{
+  UInt saved_num_held_bits = m_num_held_bits;
+  UChar saved_held_bits = m_held_bits;
+  UInt saved_fifo_idx = m_fifo_idx;
+
+  UInt num_bits_to_read = min(uiNumberOfBits, getNumBitsLeft());
+  read(num_bits_to_read, ruiBits);
+  ruiBits <<= (uiNumberOfBits - num_bits_to_read);
+
+  m_fifo_idx = saved_fifo_idx;
+  m_held_bits = saved_held_bits;
+  m_num_held_bits = saved_num_held_bits;
+}
+
+
+Void TComInputBitstream::read (UInt uiNumberOfBits, UInt& ruiBits)
+{
+  assert( uiNumberOfBits <= 32 );
+
+  m_numBitsRead += uiNumberOfBits;
+
+  /* NB, bits are extracted from the MSB of each byte. */
+  UInt retval = 0;
+  if (uiNumberOfBits <= m_num_held_bits)
+  {
+    /* n=1, len(H)=7:   -VHH HHHH, shift_down=6, mask=0xfe
+     * n=3, len(H)=7:   -VVV HHHH, shift_down=4, mask=0xf8
+     */
+    retval = m_held_bits >> (m_num_held_bits - uiNumberOfBits);
+    retval &= ~(0xff << uiNumberOfBits);
+    m_num_held_bits -= uiNumberOfBits;
+    ruiBits = retval;
+    return;
+  }
+
+  /* all num_held_bits will go into retval
+   *   => need to mask leftover bits from previous extractions
+   *   => align retval with top of extracted word */
+  /* n=5, len(H)=3: ---- -VVV, mask=0x07, shift_up=5-3=2,
+   * n=9, len(H)=3: ---- -VVV, mask=0x07, shift_up=9-3=6 */
+  uiNumberOfBits -= m_num_held_bits;
+  retval = m_held_bits & ~(0xff << m_num_held_bits);
+  retval <<= uiNumberOfBits;
+
+  /* number of whole bytes that need to be loaded to form retval */
+  /* n=32, len(H)=0, load 4bytes, shift_down=0
+   * n=32, len(H)=1, load 4bytes, shift_down=1
+   * n=31, len(H)=1, load 4bytes, shift_down=1+1
+   * n=8,  len(H)=0, load 1byte,  shift_down=0
+   * n=8,  len(H)=3, load 1byte,  shift_down=3
+   * n=5,  len(H)=1, load 1byte,  shift_down=1+3
+   */
+  UInt aligned_word = 0;
+  UInt num_bytes_to_load = (uiNumberOfBits - 1) >> 3;
+  assert(m_fifo_idx + num_bytes_to_load < m_fifo->size());
+
+  switch (num_bytes_to_load)
+  {
+  case 3: aligned_word  = (*m_fifo)[m_fifo_idx++] << 24;
+  case 2: aligned_word |= (*m_fifo)[m_fifo_idx++] << 16;
+  case 1: aligned_word |= (*m_fifo)[m_fifo_idx++] <<  8;
+  case 0: aligned_word |= (*m_fifo)[m_fifo_idx++];
+  }
+
+  /* resolve remainder bits */
+  UInt next_num_held_bits = (32 - uiNumberOfBits) % 8;
+
+  /* copy required part of aligned_word into retval */
+  retval |= aligned_word >> next_num_held_bits;
+
+  /* store held bits */
+  m_num_held_bits = next_num_held_bits;
+  m_held_bits = aligned_word;
+
+  ruiBits = retval;
+}
+
+/**
+ * insert the contents of the bytealigned (and flushed) bitstream src
+ * into this at byte position pos.
+ */
+Void TComOutputBitstream::insertAt(const TComOutputBitstream& src, UInt pos)
+{
+  UInt src_bits = src.getNumberOfWrittenBits();
+  assert(0 == src_bits % 8);
+
+  vector<uint8_t>::iterator at = m_fifo.begin() + pos;
+  m_fifo.insert(at, src.m_fifo.begin(), src.m_fifo.end());
+}
+
+UInt TComInputBitstream::readOutTrailingBits ()
+{
+  UInt count=0;
+  UInt uiBits = 0;
+
+  while ( ( getNumBitsLeft() > 0 ) && (getNumBitsUntilByteAligned()!=0) )
+  {
+    count++;
+    read ( 1, uiBits );
+  }
+  return count;
+}
+//
+//TComOutputBitstream& TComOutputBitstream::operator= (const TComOutputBitstream& src)
+//{
+//  vector<uint8_t>::iterator at = m_fifo.begin();
+//  m_fifo.insert(at, src.m_fifo.begin(), src.m_fifo.end());
+//
+//  m_num_held_bits             = src.m_num_held_bits;
+//  m_held_bits                 = src.m_held_bits;
+//
+//  return *this;
+//}
+
+/**
+ - extract substream from the current bitstream
+ .
+ \param  pcBitstream  bitstream which contains substreams
+ \param  uiNumBits    number of bits to transfer
+ */
+TComInputBitstream *TComInputBitstream::extractSubstream( UInt uiNumBits )
+{
+  UInt uiNumBytes = uiNumBits/8;
+  std::vector<uint8_t>* buf = new std::vector<uint8_t>;
+  UInt uiByte;
+  for (UInt ui = 0; ui < uiNumBytes; ui++)
+  {
+    read(8, uiByte);
+    buf->push_back(uiByte);
+  }
+  if (uiNumBits&0x7)
+  {
+    uiByte = 0;
+    read(uiNumBits&0x7, uiByte);
+    uiByte <<= 8-(uiNumBits&0x7);
+    buf->push_back(uiByte);
+  }
+  return new TComInputBitstream(buf);
+}
+
+/**
+ - delete internal fifo
+ */
+Void TComInputBitstream::deleteFifo()
+{
+  delete m_fifo;
+  m_fifo = NULL;
+}
+
+UInt TComInputBitstream::readByteAlignment()
+{
+  UInt code = 0;
+  read( 1, code );
+  assert(code == 1);
+
+  UInt numBits = getNumBitsUntilByteAligned();
+  if(numBits)
+  {
+    assert(numBits <= getNumBitsLeft());
+    read( numBits, code );
+    assert(code == 0);
+  }
+  return numBits+1;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComBitStream.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,228 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComBitStream.h
+    \brief    class for handling bitstream (header)
+*/
+
+#ifndef __TCOMBITSTREAM__
+#define __TCOMBITSTREAM__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include <stdint.h>
+#include <vector>
+#include <stdio.h>
+#include <assert.h>
+#include "CommonDef.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// pure virtual class for basic bit handling
+class TComBitIf
+{
+public:
+  virtual Void        writeAlignOne         () {};
+  virtual Void        writeAlignZero        () {};
+  virtual Void        write                 ( UInt uiBits, UInt uiNumberOfBits )  = 0;
+  virtual Void        resetBits             ()                                    = 0;
+  virtual UInt getNumberOfWrittenBits() const = 0;
+  virtual ~TComBitIf() {}
+};
+
+/**
+ * Model of a writable bitstream that accumulates bits to produce a
+ * bytestream.
+ */
+class TComOutputBitstream : public TComBitIf
+{
+  /**
+   * FIFO for storage of bytes.  Use:
+   *  - fifo.push_back(x) to append words
+   *  - fifo.clear() to empty the FIFO
+   *  - &fifo.front() to get a pointer to the data array.
+   *    NB, this pointer is only valid until the next push_back()/clear()
+   */
+  std::vector<uint8_t> m_fifo;
+
+  UInt m_num_held_bits; /// number of bits not flushed to bytestream.
+  UChar m_held_bits; /// the bits held and not flushed to bytestream.
+                             /// this value is always msb-aligned, bigendian.
+public:
+  // create / destroy
+  TComOutputBitstream();
+  ~TComOutputBitstream();
+
+  // interface for encoding
+  /**
+   * append uiNumberOfBits least significant bits of uiBits to
+   * the current bitstream
+   */
+  Void        write           ( UInt uiBits, UInt uiNumberOfBits );
+
+  /** insert one bits until the bitstream is byte-aligned */
+  Void        writeAlignOne   ();
+
+  /** insert zero bits until the bitstream is byte-aligned */
+  Void        writeAlignZero  ();
+
+  /** this function should never be called */
+  Void resetBits() { assert(0); }
+
+  // utility functions
+
+  /**
+   * Return a pointer to the start of the byte-stream buffer.
+   * Pointer is valid until the next write/flush/reset call.
+   * NB, data is arranged such that subsequent bytes in the
+   * bytestream are stored in ascending addresses.
+   */
+  Char* getByteStream() const;
+
+  /**
+   * Return the number of valid bytes available from  getByteStream()
+   */
+  UInt getByteStreamLength();
+
+  /**
+   * Reset all internal state.
+   */
+  Void clear();
+
+  /**
+   * returns the number of bits that need to be written to
+   * achieve byte alignment.
+   */
+  Int getNumBitsUntilByteAligned() { return (8 - m_num_held_bits) & 0x7; }
+
+  /**
+   * Return the number of bits that have been written since the last clear()
+   */
+  UInt getNumberOfWrittenBits() const { return UInt(m_fifo.size()) * 8 + m_num_held_bits; }
+
+  Void insertAt(const TComOutputBitstream& src, UInt pos);
+
+  /**
+   * Return a reference to the internal fifo
+   */
+  std::vector<uint8_t>& getFIFO() { return m_fifo; }
+
+  UChar getHeldBits  ()          { return m_held_bits;          }
+
+  //TComOutputBitstream& operator= (const TComOutputBitstream& src);
+  /** Return a reference to the internal fifo */
+  const std::vector<uint8_t>& getFIFO() const { return m_fifo; }
+
+  Void          addSubstream    ( TComOutputBitstream* pcSubstream );
+  Void writeByteAlignment();
+
+  //! returns the number of start code emulations contained in the current buffer
+  Int countStartCodeEmulations();
+};
+
+/**
+ * Model of an input bitstream that extracts bits from a predefined
+ * bytestream.
+ */
+class TComInputBitstream
+{
+  std::vector<uint8_t> *m_fifo; /// FIFO for storage of complete bytes
+  std::vector<UInt> m_emulationPreventionByteLocation;
+
+protected:
+  UInt m_fifo_idx; /// Read index into m_fifo
+
+  UInt m_num_held_bits;
+  UChar m_held_bits;
+  UInt  m_numBitsRead;
+
+public:
+  /**
+   * Create a new bitstream reader object that reads from #buf#.  Ownership
+   * of #buf# remains with the callee, although the constructed object
+   * will hold a reference to #buf#
+   */
+  TComInputBitstream(std::vector<uint8_t>* buf);
+  ~TComInputBitstream();
+
+  // interface for decoding
+  Void        pseudoRead      ( UInt uiNumberOfBits, UInt& ruiBits );
+  Void        read            ( UInt uiNumberOfBits, UInt& ruiBits );
+  Void        readByte        ( UInt &ruiBits )
+  {
+    assert(m_fifo_idx < m_fifo->size());
+    ruiBits = (*m_fifo)[m_fifo_idx++];
+  }
+
+  Void        peekPreviousByte( UInt &byte )
+  {
+    assert(m_fifo_idx > 0);
+    byte = (*m_fifo)[m_fifo_idx - 1];
+  }
+
+  UInt        readOutTrailingBits ();
+  UChar getHeldBits  ()          { return m_held_bits;          }
+  TComOutputBitstream& operator= (const TComOutputBitstream& src);
+  UInt  getByteLocation              ( )                     { return m_fifo_idx                    ; }
+
+  // Peek at bits in word-storage. Used in determining if we have completed reading of current bitstream and therefore slice in LCEC.
+  UInt        peekBits (UInt uiBits) { UInt tmp; pseudoRead(uiBits, tmp); return tmp; }
+
+  // utility functions
+  UInt read(UInt numberOfBits) { UInt tmp; read(numberOfBits, tmp); return tmp; }
+  UInt     readByte() { UInt tmp; readByte( tmp ); return tmp; }
+  UInt getNumBitsUntilByteAligned() { return m_num_held_bits & (0x7); }
+  UInt getNumBitsLeft() { return 8*((UInt)m_fifo->size() - m_fifo_idx) + m_num_held_bits; }
+  TComInputBitstream *extractSubstream( UInt uiNumBits ); // Read the nominated number of bits, and return as a bitstream.
+  Void                deleteFifo(); // Delete internal fifo of bitstream.
+  UInt  getNumBitsRead() { return m_numBitsRead; }
+  UInt readByteAlignment();
+
+  Void      pushEmulationPreventionByteLocation ( UInt pos )                  { m_emulationPreventionByteLocation.push_back( pos ); }
+  UInt      numEmulationPreventionBytesRead     ()                            { return (UInt) m_emulationPreventionByteLocation.size();    }
+  std::vector<UInt>  getEmulationPreventionByteLocation  ()                   { return m_emulationPreventionByteLocation;           }
+  UInt      getEmulationPreventionByteLocation  ( UInt idx )                  { return m_emulationPreventionByteLocation[ idx ];    }
+  Void      clearEmulationPreventionByteLocation()                            { m_emulationPreventionByteLocation.clear();          }
+  Void      setEmulationPreventionByteLocation  ( std::vector<UInt> vec )     { m_emulationPreventionByteLocation = vec;            }
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComCABACTables.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,123 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComCABACTables.cpp
+ *  \brief    static class for CABAC tables
+ */
+
+#include "TComCABACTables.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+const UChar TComCABACTables::sm_aucLPSTable[1 << CONTEXT_STATE_BITS][4] =
+{
+  { 128, 176, 208, 240},
+  { 128, 167, 197, 227},
+  { 128, 158, 187, 216},
+  { 123, 150, 178, 205},
+  { 116, 142, 169, 195},
+  { 111, 135, 160, 185},
+  { 105, 128, 152, 175},
+  { 100, 122, 144, 166},
+  {  95, 116, 137, 158},
+  {  90, 110, 130, 150},
+  {  85, 104, 123, 142},
+  {  81,  99, 117, 135},
+  {  77,  94, 111, 128},
+  {  73,  89, 105, 122},
+  {  69,  85, 100, 116},
+  {  66,  80,  95, 110},
+  {  62,  76,  90, 104},
+  {  59,  72,  86,  99},
+  {  56,  69,  81,  94},
+  {  53,  65,  77,  89},
+  {  51,  62,  73,  85},
+  {  48,  59,  69,  80},
+  {  46,  56,  66,  76},
+  {  43,  53,  63,  72},
+  {  41,  50,  59,  69},
+  {  39,  48,  56,  65},
+  {  37,  45,  54,  62},
+  {  35,  43,  51,  59},
+  {  33,  41,  48,  56},
+  {  32,  39,  46,  53},
+  {  30,  37,  43,  50},
+  {  29,  35,  41,  48},
+  {  27,  33,  39,  45},
+  {  26,  31,  37,  43},
+  {  24,  30,  35,  41},
+  {  23,  28,  33,  39},
+  {  22,  27,  32,  37},
+  {  21,  26,  30,  35},
+  {  20,  24,  29,  33},
+  {  19,  23,  27,  31},
+  {  18,  22,  26,  30},
+  {  17,  21,  25,  28},
+  {  16,  20,  23,  27},
+  {  15,  19,  22,  25},
+  {  14,  18,  21,  24},
+  {  14,  17,  20,  23},
+  {  13,  16,  19,  22},
+  {  12,  15,  18,  21},
+  {  12,  14,  17,  20},
+  {  11,  14,  16,  19},
+  {  11,  13,  15,  18},
+  {  10,  12,  15,  17},
+  {  10,  12,  14,  16},
+  {   9,  11,  13,  15},
+  {   9,  11,  12,  14},
+  {   8,  10,  12,  14},
+  {   8,   9,  11,  13},
+  {   7,   9,  11,  12},
+  {   7,   9,  10,  12},
+  {   7,   8,  10,  11},
+  {   6,   8,   9,  11},
+  {   6,   7,   9,  10},
+  {   6,   7,   8,   9},
+  {   2,   2,   2,   2}
+};
+
+const UChar TComCABACTables::sm_aucRenormTable[32] =
+{
+  6,  5,  4,  4,
+  3,  3,  3,  3,
+  2,  2,  2,  2,
+  2,  2,  2,  2,
+  1,  1,  1,  1,
+  1,  1,  1,  1,
+  1,  1,  1,  1,
+  1,  1,  1,  1
+};
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComCABACTables.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,61 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComCABACTables.h
+    \brief    static class for CABAC tables
+*/
+
+#ifndef __TCOMCABACTABLES__
+#define __TCOMCABACTABLES__
+
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComRom.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+/**
+ * \brief static class for CABAC tables
+ */
+
+class TComCABACTables
+{
+public:
+  const static UChar  sm_aucLPSTable[1 << CONTEXT_STATE_BITS][4];
+  const static UChar  sm_aucRenormTable[32];
+};
+
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComChromaFormat.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,142 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "TComChromaFormat.h"
+#include "TComPic.h"
+#include "TComDataCU.h"
+#include "TComTrQuant.h"
+#include "TComTU.h"
+
+
+
+
+//----------------------------------------------------------------------------------------------------------------------
+
+InputColourSpaceConversion stringToInputColourSpaceConvert(const std::string &value, const Bool bIsForward)
+{
+  if (value.empty() || value=="UNCHANGED") return IPCOLOURSPACE_UNCHANGED;
+  if (bIsForward)
+  {
+    if (value=="YCbCrtoYYY")                 return IPCOLOURSPACE_YCbCrtoYYY;
+    if (value=="YCbCrtoYCrCb")               return IPCOLOURSPACE_YCbCrtoYCrCb;
+    if (value=="RGBtoGBR")                   return IPCOLOURSPACE_RGBtoGBR;
+  }
+  else
+  {
+    if (value=="YCrCbtoYCbCr")               return IPCOLOURSPACE_YCbCrtoYCrCb;
+    if (value=="GBRtoRGB")                   return IPCOLOURSPACE_RGBtoGBR;
+  }
+  return NUMBER_INPUT_COLOUR_SPACE_CONVERSIONS;
+}
+
+std::string getListOfColourSpaceConverts(const Bool bIsForward)
+{
+  if (bIsForward)
+  {
+    return "UNCHANGED, YCbCrtoYCrCb, YCbCrtoYYY or RGBtoGBR";
+  }
+  else
+  {
+    return "UNCHANGED, YCrCbtoYCbCr or GBRtoRGB";
+  }
+}
+
+
+//----------------------------------------------------------------------------------------------------------------------
+
+Void getTUEntropyCodingParameters(      TUEntropyCodingParameters &result,
+                                        TComTU                    &rTu,
+                                  const ComponentID                component)
+{
+  //------------------------------------------------
+
+  //set the local parameters
+
+        TComDataCU    *const pcCU            = rTu.getCU();
+  const TComRectangle &      area            = rTu.getRect(component);
+  const UInt                 uiAbsPartIdx    = rTu.GetAbsPartIdxTU(component);
+  const UInt                 log2BlockWidth  = g_aucConvertToBit[area.width]  + 2;
+  const UInt                 log2BlockHeight = g_aucConvertToBit[area.height] + 2;
+  const ChannelType          channelType     = toChannelType(component);
+
+  result.scanType = COEFF_SCAN_TYPE(pcCU->getCoefScanIdx(uiAbsPartIdx, area.width, area.height, component));
+
+  //------------------------------------------------
+
+  //set the group layout
+
+  result.widthInGroups  = area.width  >> MLS_CG_LOG2_WIDTH;
+  result.heightInGroups = area.height >> MLS_CG_LOG2_HEIGHT;
+
+  //------------------------------------------------
+
+  //set the scan orders
+
+  const UInt log2WidthInGroups  = g_aucConvertToBit[result.widthInGroups  * 4];
+  const UInt log2HeightInGroups = g_aucConvertToBit[result.heightInGroups * 4];
+
+  result.scan   = g_scanOrder[ SCAN_GROUPED_4x4 ][ result.scanType ][ log2BlockWidth    ][ log2BlockHeight    ];
+  result.scanCG = g_scanOrder[ SCAN_UNGROUPED   ][ result.scanType ][ log2WidthInGroups ][ log2HeightInGroups ];
+
+  //------------------------------------------------
+
+  //set the significance map context selection parameters
+
+  if (pcCU->getSlice()->getSPS()->getUseSingleSignificanceMapContext()
+      && (pcCU->getCUTransquantBypass(uiAbsPartIdx) || (pcCU->getTransformSkip(uiAbsPartIdx, component) != 0)))
+  {
+    result.firstSignificanceMapContext = significanceMapContextSetStart[channelType][CONTEXT_TYPE_SINGLE];
+  }
+  else
+  {
+    if ((area.width == 4) && (area.height == 4))
+    {
+      result.firstSignificanceMapContext = significanceMapContextSetStart[channelType][CONTEXT_TYPE_4x4];
+    }
+    else if ((area.width == 8) && (area.height == 8))
+    {
+      result.firstSignificanceMapContext = significanceMapContextSetStart[channelType][CONTEXT_TYPE_8x8];
+      if (result.scanType != SCAN_DIAG) result.firstSignificanceMapContext += nonDiagonalScan8x8ContextOffset[channelType];
+    }
+    else
+    {
+      result.firstSignificanceMapContext = significanceMapContextSetStart[channelType][CONTEXT_TYPE_NxN];
+    }
+  }
+
+  //------------------------------------------------
+}
+
+
+//----------------------------------------------------------------------------------------------------------------------
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComChromaFormat.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,277 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TCOMCHROMAFORMAT__
+#define __TCOMCHROMAFORMAT__
+
+#include "CommonDef.h"
+#include "TComRectangle.h"
+#include "ContextTables.h"
+#include "TComRom.h"
+#include <iostream>
+#include <vector>
+#include <assert.h>
+#include "Debug.h"
+
+//======================================================================================================================
+//Chroma format utility functions  =====================================================================================
+//======================================================================================================================
+
+class TComDataCU;
+
+
+static inline ChannelType toChannelType             (const ComponentID id)                         { return (id==COMPONENT_Y)? CHANNEL_TYPE_LUMA : CHANNEL_TYPE_CHROMA; }
+static inline Bool        isLuma                    (const ComponentID id)                         { return (id==COMPONENT_Y);                                          }
+static inline Bool        isLuma                    (const ChannelType id)                         { return (id==CHANNEL_TYPE_LUMA);                                    }
+static inline Bool        isChroma                  (const ComponentID id)                         { return (id!=COMPONENT_Y);                                          }
+static inline Bool        isChroma                  (const ChannelType id)                         { return (id!=CHANNEL_TYPE_LUMA);                                    }
+static inline UInt        getChannelTypeScaleX      (const ChannelType id, const ChromaFormat fmt) { return (isLuma(id) || (fmt==CHROMA_444)) ? 0 : 1;                  }
+static inline UInt        getChannelTypeScaleY      (const ChannelType id, const ChromaFormat fmt) { return (isLuma(id) || (fmt!=CHROMA_420)) ? 0 : 1;                  }
+static inline UInt        getComponentScaleX        (const ComponentID id, const ChromaFormat fmt) { return getChannelTypeScaleX(toChannelType(id), fmt);               }
+static inline UInt        getComponentScaleY        (const ComponentID id, const ChromaFormat fmt) { return getChannelTypeScaleY(toChannelType(id), fmt);               }
+static inline UInt        getNumberValidChannelTypes(const ChromaFormat fmt)                       { return (fmt==CHROMA_400) ? 1 : MAX_NUM_CHANNEL_TYPE;               }
+static inline UInt        getNumberValidComponents  (const ChromaFormat fmt)                       { return (fmt==CHROMA_400) ? 1 : MAX_NUM_COMPONENT;                  }
+static inline Bool        isChromaEnabled           (const ChromaFormat fmt)                       { return  fmt!=CHROMA_400;                                           }
+static inline ComponentID getFirstComponentOfChannel(const ChannelType id)                         { return (isLuma(id) ? COMPONENT_Y : COMPONENT_Cb);                  }
+
+InputColourSpaceConversion stringToInputColourSpaceConvert(const std::string &value, const Bool bIsForward);
+std::string getListOfColourSpaceConverts(const Bool bIsForward);
+
+//------------------------------------------------
+
+static inline UInt getTotalSamples(const UInt width, const UInt height, const ChromaFormat format)
+{
+  const UInt samplesPerChannel = width * height;
+
+  switch (format)
+  {
+    case CHROMA_400: return  samplesPerChannel;           break;
+    case CHROMA_420: return (samplesPerChannel * 3) >> 1; break;
+    case CHROMA_422: return  samplesPerChannel * 2;       break;
+    case CHROMA_444: return  samplesPerChannel * 3;       break;
+    default:
+      std::cerr << "ERROR: Unrecognised chroma format in getTotalSamples()" << std::endl;
+      exit(1);
+      break;
+  }
+
+  return MAX_UINT;
+}
+
+//------------------------------------------------
+
+static inline UInt getTotalBits(const UInt width, const UInt height, const ChromaFormat format, const Int bitDepths[MAX_NUM_CHANNEL_TYPE])
+{
+  const UInt samplesPerChannel = width * height;
+
+  switch (format)
+  {
+    case CHROMA_400: return  samplesPerChannel *  bitDepths[CHANNEL_TYPE_LUMA];                                              break;
+    case CHROMA_420: return (samplesPerChannel * (bitDepths[CHANNEL_TYPE_LUMA]*2 +   bitDepths[CHANNEL_TYPE_CHROMA]) ) >> 1; break;
+    case CHROMA_422: return  samplesPerChannel * (bitDepths[CHANNEL_TYPE_LUMA]   +   bitDepths[CHANNEL_TYPE_CHROMA]);        break;
+    case CHROMA_444: return  samplesPerChannel * (bitDepths[CHANNEL_TYPE_LUMA]   + 2*bitDepths[CHANNEL_TYPE_CHROMA]);        break;
+    default:
+      std::cerr << "ERROR: Unrecognised chroma format in getTotalSamples()" << std::endl;
+      exit(1);
+      break;
+  }
+
+  return MAX_UINT;
+}
+
+
+//------------------------------------------------
+
+// In HM, a CU only has one chroma intra prediction direction, that corresponds to the top left luma intra prediction
+// even if the NxN PU split occurs when 4 sub-TUs exist for chroma.
+// Use this function to allow NxN PU splitting for chroma.
+
+static inline Bool enable4ChromaPUsInIntraNxNCU(const ChromaFormat chFmt)
+{
+  return (chFmt == CHROMA_444);
+}
+
+
+//------------------------------------------------
+
+//returns the part index of the luma region that is co-located with the specified chroma region
+
+static inline UInt getChromasCorrespondingPULumaIdx(const UInt lumaZOrderIdxInCtu, const ChromaFormat chFmt)
+{
+  return enable4ChromaPUsInIntraNxNCU(chFmt) ? lumaZOrderIdxInCtu : lumaZOrderIdxInCtu & (~((1<<(2*g_uiAddCUDepth))-1)); //(lumaZOrderIdxInCtu/numParts)*numParts;
+}
+
+//------------------------------------------------
+
+// If chroma format is 4:2:2 and a chroma-square-sub-tu is possible for the smallest TU, then increase the depth by 1 to allow for more parts.
+
+static inline UInt getMaxCUDepthOffset(const ChromaFormat chFmt, const UInt quadtreeTULog2MinSize)
+{
+  return (chFmt==CHROMA_422 && quadtreeTULog2MinSize>2) ? 1 : 0;
+}
+
+//======================================================================================================================
+//Intra prediction  ====================================================================================================
+//======================================================================================================================
+
+static inline Bool filterIntraReferenceSamples (const ChannelType chType, const ChromaFormat chFmt, const Bool intraReferenceSmoothingDisabled)
+{
+  return (!intraReferenceSmoothingDisabled) && (isLuma(chType) || (chFmt == CHROMA_444));
+}
+
+
+//======================================================================================================================
+//Transform and Quantisation  ==========================================================================================
+//======================================================================================================================
+
+static inline Bool TUCompRectHasAssociatedTransformSkipFlag(const TComRectangle &rectSamples, const UInt transformSkipLog2MaxSize)
+{
+  return (rectSamples.width <= (1<<transformSkipLog2MaxSize));
+}
+
+
+//------------------------------------------------
+
+static inline Int getTransformShift(const ChannelType type, const UInt uiLog2TrSize)
+{
+#if O0043_BEST_EFFORT_DECODING
+  return g_maxTrDynamicRange[type] - g_bitDepthInStream[type] - uiLog2TrSize;
+#else
+  return g_maxTrDynamicRange[type] - g_bitDepth[type] - uiLog2TrSize;
+#endif
+}
+
+
+//------------------------------------------------
+
+static inline Int getScaledChromaQP(Int unscaledChromaQP, const ChromaFormat chFmt)
+{
+  return g_aucChromaScale[chFmt][Clip3(0, (chromaQPMappingTableSize - 1), unscaledChromaQP)];
+}
+
+
+//======================================================================================================================
+//Scaling lists  =======================================================================================================
+//======================================================================================================================
+
+static inline Int getScalingListType(const PredMode predMode, const ComponentID compID)
+{
+  return ((predMode != MODE_INTER) ? 0 : MAX_NUM_COMPONENT) + compID;
+}
+
+
+//------------------------------------------------
+
+
+//======================================================================================================================
+//Context variable selection  ==========================================================================================
+//======================================================================================================================
+
+//context variable source tables
+
+static const UInt significanceMapContextStartTable[MAX_NUM_CHANNEL_TYPE] = {FIRST_SIG_FLAG_CTX_LUMA, FIRST_SIG_FLAG_CTX_CHROMA};
+static const UInt contextSetStartTable            [MAX_NUM_CHANNEL_TYPE] = {FIRST_CTX_SET_LUMA,      FIRST_CTX_SET_CHROMA     };
+static const UInt CBFContextStartTable            [MAX_NUM_CHANNEL_TYPE] = {FIRST_CBF_CTX_LUMA,      FIRST_CBF_CTX_CHROMA     };
+
+
+//------------------------------------------------
+
+//Function for last-significant-coefficient context selection parameters
+
+static inline Void getLastSignificantContextParameters (const ComponentID  component,
+                                                        const Int          width,
+                                                        const Int          height,
+                                                              Int         &result_offsetX,
+                                                              Int         &result_offsetY,
+                                                              Int         &result_shiftX,
+                                                              Int         &result_shiftY)
+{
+  const UInt convertedWidth  = g_aucConvertToBit[width];
+  const UInt convertedHeight = g_aucConvertToBit[height];
+
+  result_offsetX = (isChroma(component)) ? 0               : ((convertedWidth  * 3) + ((convertedWidth  + 1) >> 2));
+  result_offsetY = (isChroma(component)) ? 0               : ((convertedHeight * 3) + ((convertedHeight + 1) >> 2));
+  result_shiftX  = (isChroma(component)) ? convertedWidth  : ((convertedWidth  + 3) >> 2);
+  result_shiftY  = (isChroma(component)) ? convertedHeight : ((convertedHeight + 3) >> 2);
+}
+
+
+//------------------------------------------------
+
+//Function for significance map context index offset selection
+
+static inline UInt getSignificanceMapContextOffset (const ComponentID component)
+{
+  return significanceMapContextStartTable[toChannelType(component)];
+}
+
+
+//------------------------------------------------
+
+// Function for greater-than-one map/greater-than-two map context set selection
+
+static inline UInt getContextSetIndex (const ComponentID  component,
+                                       const UInt         subsetIndex,
+                                       const Bool         foundACoefficientGreaterThan1)
+{
+  const UInt notFirstSubsetOffset     = (isLuma(component) && (subsetIndex > 0)) ? 2 : 0;
+  const UInt foundAGreaterThan1Offset = foundACoefficientGreaterThan1            ? 1 : 0;
+
+  return contextSetStartTable[toChannelType(component)] + notFirstSubsetOffset + foundAGreaterThan1Offset;
+}
+
+
+//------------------------------------------------
+
+//Function for CBF context index offset
+
+static inline UInt getCBFContextOffset (const ComponentID component)
+{
+  return CBFContextStartTable[toChannelType(component)];
+}
+
+
+//======================================================================================================================
+//Entropy coding parameters ============================================================================================
+//======================================================================================================================
+
+Void getTUEntropyCodingParameters(      TUEntropyCodingParameters &result,
+                                  class TComTU                    &rTu,
+                                  const ComponentID                component);
+
+
+//======================================================================================================================
+//End  =================================================================================================================
+//======================================================================================================================
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComCodingStatistics.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,470 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TCOMCODINGSTATISTICS__
+#define __TCOMCODINGSTATISTICS__
+
+#include "TypeDef.h"
+#include <stdio.h>
+#include <string>
+#include <map>
+#include <math.h>
+#include "TComChromaFormat.h"
+
+static const Int64 TCOMCODINGSTATISTICS_ENTROPYSCALE=32768;
+
+
+enum TComCodingStatisticsType
+{
+  STATS__NAL_UNIT_TOTAL_BODY,// This is a special case and is not included in the total sums.
+  STATS__NAL_UNIT_PACKING,
+  STATS__EMULATION_PREVENTION_3_BYTES,
+  STATS__NAL_UNIT_HEADER_BITS,
+  STATS__CABAC_INITIALISATION,
+  STATS__CABAC_BITS__TQ_BYPASS_FLAG,
+  STATS__CABAC_BITS__SKIP_FLAG,
+  STATS__CABAC_BITS__MERGE_FLAG,
+  STATS__CABAC_BITS__MERGE_INDEX,
+  STATS__CABAC_BITS__MVP_IDX,
+  STATS__CABAC_BITS__SPLIT_FLAG,
+  STATS__CABAC_BITS__PART_SIZE,
+  STATS__CABAC_BITS__PRED_MODE,
+  STATS__CABAC_BITS__INTRA_DIR_ANG,
+  STATS__CABAC_BITS__INTER_DIR,
+  STATS__CABAC_BITS__REF_FRM_IDX,
+  STATS__CABAC_BITS__MVD,
+  STATS__CABAC_BITS__MVD_EP,
+  STATS__CABAC_BITS__TRANSFORM_SUBDIV_FLAG,
+  STATS__CABAC_BITS__QT_ROOT_CBF,
+  STATS__CABAC_BITS__DELTA_QP_EP,
+  STATS__CABAC_BITS__CHROMA_QP_ADJUSTMENT,
+  STATS__CABAC_BITS__QT_CBF,
+  STATS__CABAC_BITS__CROSS_COMPONENT_PREDICTION,
+  STATS__CABAC_BITS__TRANSFORM_SKIP_FLAGS,
+
+  STATS__CABAC_BITS__LAST_SIG_X_Y,
+  STATS__CABAC_BITS__SIG_COEFF_GROUP_FLAG,
+  STATS__CABAC_BITS__SIG_COEFF_MAP_FLAG,
+  STATS__CABAC_BITS__GT1_FLAG,
+  STATS__CABAC_BITS__GT2_FLAG,
+  STATS__CABAC_BITS__SIGN_BIT,
+  STATS__CABAC_BITS__ESCAPE_BITS,
+
+  STATS__CABAC_BITS__SAO,
+  STATS__CABAC_TRM_BITS,
+  STATS__CABAC_FIXED_BITS,
+  STATS__CABAC_PCM_ALIGN_BITS,
+  STATS__CABAC_PCM_CODE_BITS,
+  STATS__BYTE_ALIGNMENT_BITS,
+  STATS__TRAILING_BITS,
+  STATS__EXPLICIT_RDPCM_BITS,
+  STATS__CABAC_EP_BIT_ALIGNMENT,
+  STATS__CABAC_BITS__ALIGNED_SIGN_BIT,
+  STATS__CABAC_BITS__ALIGNED_ESCAPE_BITS,
+  STATS__NUM_STATS
+};
+
+static inline const Char* getName(TComCodingStatisticsType name)
+{
+  static const Char *statNames[]=
+  {
+    "NAL_UNIT_TOTAL_BODY", // This is a special case and is not included in the total sums.
+    "NAL_UNIT_PACKING",
+    "EMULATION_PREVENTION_3_BYTES",
+    "NAL_UNIT_HEADER_BITS",
+    "CABAC_INITIALISATION-and-rounding",
+    "CABAC_BITS__TQ_BYPASS_FLAG",
+    "CABAC_BITS__SKIP_FLAG",
+    "CABAC_BITS__MERGE_FLAG",
+    "CABAC_BITS__MERGE_INDEX",
+    "CABAC_BITS__MVP_IDX",
+    "CABAC_BITS__SPLIT_FLAG",
+    "CABAC_BITS__PART_SIZE",
+    "CABAC_BITS__PRED_MODE",
+    "CABAC_BITS__INTRA_DIR_ANG",
+    "CABAC_BITS__INTER_DIR",
+    "CABAC_BITS__REF_FRM_IDX",
+    "CABAC_BITS__MVD",
+    "CABAC_BITS__MVD_EP",
+    "CABAC_BITS__TRANSFORM_SUBDIV_FLAG",
+    "CABAC_BITS__QT_ROOT_CBF",
+    "CABAC_BITS__DELTA_QP_EP",
+    "CABAC_BITS__CHROMA_QP_ADJUSTMENT",
+    "CABAC_BITS__QT_CBF",
+    "CABAC_BITS__CROSS_COMPONENT_PREDICTION",
+    "CABAC_BITS__TRANSFORM_SKIP_FLAGS",
+    "CABAC_BITS__LAST_SIG_X_Y",
+    "CABAC_BITS__SIG_COEFF_GROUP_FLAG",
+    "CABAC_BITS__SIG_COEFF_MAP_FLAG",
+    "CABAC_BITS__GT1_FLAG",
+    "CABAC_BITS__GT2_FLAG",
+    "CABAC_BITS__SIGN_BIT",
+    "CABAC_BITS__ESCAPE_BITS",
+    "CABAC_BITS__SAO",
+    "CABAC_TRM_BITS",
+    "CABAC_FIXED_BITS",
+    "CABAC_PCM_ALIGN_BITS",
+    "CABAC_PCM_CODE_BITS",
+    "BYTE_ALIGNMENT_BITS",
+    "TRAILING_BITS",
+    "EXPLICIT_RDPCM_BITS",
+    "CABAC_EP_BIT_ALIGNMENT",
+    "CABAC_BITS__ALIGNED_SIGN_BIT",
+    "CABAC_BITS__ALIGNED_ESCAPE_BITS"
+  };
+  assert(STATS__NUM_STATS == sizeof(statNames)/sizeof(Char *) && name < STATS__NUM_STATS);
+  return statNames[name];
+}
+
+static inline Bool isAlignedBins(TComCodingStatisticsType statT) { return statT==STATS__CABAC_BITS__ALIGNED_SIGN_BIT || statT==STATS__CABAC_BITS__ALIGNED_ESCAPE_BITS; }
+
+static const UInt CODING_STATS_NUM_WIDTHS=7;
+static const UInt CODING_STATS_NUM_SUBCLASSES=CODING_STATS_NUM_WIDTHS*(1+MAX_NUM_COMPONENT+MAX_NUM_CHANNEL_TYPE);
+
+class TComCodingStatisticsClassType
+{
+public:
+
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t)
+    : type(t), subClass(0)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const UInt log2w )
+    : type(t), subClass(log2w)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const Int log2w )
+    : type(t), subClass(log2w)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const ComponentID cid )
+    : type(t), subClass((cid+1)*CODING_STATS_NUM_WIDTHS)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const ChannelType chid )
+    : type(t), subClass((chid+MAX_NUM_COMPONENT+1)*CODING_STATS_NUM_WIDTHS)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const UInt log2w, const ComponentID cid )
+    : type(t), subClass((cid+1)*CODING_STATS_NUM_WIDTHS + log2w)
+  { }
+  TComCodingStatisticsClassType(const TComCodingStatisticsType t, const UInt log2w, const ChannelType chid )
+    : type(t), subClass((chid+MAX_NUM_COMPONENT+1)*CODING_STATS_NUM_WIDTHS + log2w)
+  { }
+
+  static UInt GetSubClassWidth(const UInt subClass)
+  {
+    return subClass%CODING_STATS_NUM_WIDTHS;
+  }
+
+  static const Char *GetSubClassString(const UInt subClass)
+  {
+    assert (subClass<CODING_STATS_NUM_SUBCLASSES);
+    static const Char *strings[1+MAX_NUM_COMPONENT+MAX_NUM_CHANNEL_TYPE]={"-", "Y", "Cb", "Cr", "Luma", "Chroma"};
+    return strings[subClass/CODING_STATS_NUM_WIDTHS];
+  }
+
+  TComCodingStatisticsType type;
+  UInt subClass;
+};
+
+
+
+class TComCodingStatistics
+{
+  public:
+
+
+    struct StatLogValue
+    {
+      UInt values[512+1];
+      StatLogValue()
+      {
+        const Double es=Double(TCOMCODINGSTATISTICS_ENTROPYSCALE);
+        values[0]=0;
+        for(UInt i=1; i<sizeof(values)/sizeof(UInt); i++)
+        {
+          values[i]=UInt(    log(Double(i))*es/log(2.0)  );
+        }
+      }
+    };
+
+    struct SStat
+    {
+      SStat() : bits(0), count(0), sum(0) { }
+      Int64 bits;
+      Int64 count;
+      Int64 sum;
+      Void clear() { bits=0; count=0; sum=0; }
+
+      SStat &operator+=(const SStat &src)
+      { bits+=src.bits; count+=src.count; sum+=src.sum; return *this; }
+    };
+
+    class TComCodingStatisticsData
+    {
+      private:
+        SStat statistics[STATS__NUM_STATS+1][CODING_STATS_NUM_SUBCLASSES];
+        SStat statistics_ep[STATS__NUM_STATS+1][CODING_STATS_NUM_SUBCLASSES ];
+        std::map<std::string, SStat> mappings_ep;
+        friend class TComCodingStatistics;
+    };
+
+  private:
+
+    TComCodingStatisticsData data;
+
+    TComCodingStatistics() : data()
+    { }
+
+    static Void OutputLine(const Char *pName, const Char sep, UInt width, const Char *pSubClassStr, const SStat &sCABAC, const SStat &sEP)
+    {
+      if (width==0)
+        OutputLine(pName, sep, "-", pSubClassStr, sCABAC, sEP);
+      else
+        printf("%c%-45s%c  %6d %6s %12lld %12lld %12lld %12lld %12lld %12lld %12lld (%12lld)%c\n",
+          sep=='~'?'[':' ', pName, sep, 1<<width, pSubClassStr,
+              sCABAC.count, sCABAC.sum, sCABAC.bits, sEP.count, sEP.sum, sEP.bits, sCABAC.bits+sEP.bits, (sCABAC.bits+sEP.bits)/8, sep=='~'?']':' ');
+    }
+    static Void OutputLine(const Char *pName, const Char sep, const Char *pWidthString, const Char *pSubClassStr, const SStat &sCABAC, const SStat &sEP)
+    {
+      printf("%c%-45s%c  %6s %6s %12lld %12lld %12lld %12lld %12lld %12lld %12lld (%12lld)%c\n",
+          sep=='~'?'[':' ', pName, sep, pWidthString, pSubClassStr,
+              sCABAC.count, sCABAC.sum, sCABAC.bits, sEP.count, sEP.sum, sEP.bits, sCABAC.bits+sEP.bits, (sCABAC.bits+sEP.bits)/8, sep=='~'?']':' ');
+    }
+    static Void OutputLine(const Char *pName, const Char sep, const Char *pWidthString, const Char *pSubClassStr,  const SStat &sEP)
+    {
+      printf("%c%-45s%c  %6s %6s %12s %12s %12s %12lld %12lld %12lld %12lld (%12lld)%c\n",
+          sep=='~'?'[':' ', pName, sep, pWidthString, pSubClassStr,
+              "", "", "", sEP.count, sEP.sum, sEP.bits, sEP.bits, (sEP.bits)/8, sep=='~'?']':' ');
+    }
+
+    static Void OutputDashedLine(const Char *pText)
+    {
+      printf("--%s",pText);
+      UInt tot=0;
+      for(;pText[tot]!=0; tot++);
+      tot+=2;
+      for (; tot<168; tot++)
+      {
+        printf("-");
+      }
+      printf("\n");
+    }
+
+    ~TComCodingStatistics()
+    {
+      const Int64 es=TCOMCODINGSTATISTICS_ENTROPYSCALE;
+
+      Int64 cr=0; // CABAC remainder, which is added to "STATS__CABAC_INITIALISATION"
+      {
+        Int64 totalCABACbits=0, roundedCABACbits=0;
+        for(Int i=STATS__NAL_UNIT_PACKING; i<STATS__NUM_STATS; i++)
+        {
+          for(UInt c=0; c<CODING_STATS_NUM_SUBCLASSES; c++)
+          {
+            totalCABACbits+=data.statistics[i][c].bits;
+            roundedCABACbits+=data.statistics[i][c].bits/es;
+          }
+        }
+        Int64 remainder=totalCABACbits - roundedCABACbits*es;
+        cr=(remainder+es/2)/es;
+      }
+
+      printf("Note %s will be excluded from the total as it should be the sum of all the other entries (except for %s)\n", getName(STATS__NAL_UNIT_TOTAL_BODY), getName(STATS__NAL_UNIT_PACKING));
+      printf(" %-45s-   Width   Type  CABAC Count    CABAC Sum   CABAC bits     EP Count       EP Sum      EP bits   Total bits ( Total bytes)\n", "Decoder statistics");
+
+      OutputDashedLine("");
+      SStat cabacTotalBits, epTotalBits;
+      SStat statTotals_cabac[CODING_STATS_NUM_SUBCLASSES];
+      SStat statTotals_ep[CODING_STATS_NUM_SUBCLASSES];
+
+      for(Int i=0; i<STATS__NUM_STATS; i++)
+      {
+        SStat cabacSubTotal, epSubTotal;
+        Bool bHadClassifiedEntry=false;
+        const Char *pName=getName(TComCodingStatisticsType(i));
+
+        for(UInt c=0; c<CODING_STATS_NUM_SUBCLASSES; c++)
+        {
+          SStat &sCABACorig=data.statistics[i][c];
+          SStat &sEP=data.statistics_ep[i][c];
+
+          if (sCABACorig.bits==0 && sEP.bits==0) continue;
+
+          SStat sCABAC;
+          {
+            Int64 thisCABACbits=sCABACorig.bits/es; if (i==STATS__CABAC_INITIALISATION && sCABACorig.bits!=0) { thisCABACbits+=cr; cr=0; }
+            sCABAC.bits=thisCABACbits; sCABAC.count=sCABACorig.count; sCABAC.sum=sCABACorig.sum;
+          }
+          UInt width=TComCodingStatisticsClassType::GetSubClassWidth(c);
+          OutputLine(pName, ':', width, TComCodingStatisticsClassType::GetSubClassString(c), sCABAC, sEP);
+          cabacSubTotal+=sCABAC;
+          epSubTotal+=sEP;
+          if (i!=STATS__NAL_UNIT_TOTAL_BODY)
+          {
+            cabacTotalBits+=sCABAC;
+            epTotalBits+=sEP;
+            statTotals_cabac[c]+=sCABAC;
+            statTotals_ep[c]+=sEP;
+          }
+          bHadClassifiedEntry=bHadClassifiedEntry||(c!=0);
+        }
+        if (bHadClassifiedEntry)
+        {
+          OutputLine(pName, '~', "~~ST~~", "~~ST~~", cabacSubTotal, epSubTotal);
+        }
+        if (i==STATS__NAL_UNIT_TOTAL_BODY)
+        {
+          OutputDashedLine("");
+        }
+      }
+      OutputDashedLine("");
+      OutputLine("CABAC Sub-total", '~', "~~ST~~", "~~ST~~", cabacTotalBits, epTotalBits);
+
+      OutputDashedLine("CAVLC HEADER BITS");
+      SStat cavlcTotalBits;
+      for(std::map<std::string, SStat>::iterator it=data.mappings_ep.begin(); it!=data.mappings_ep.end(); it++)
+      {
+        SStat s=it->second;
+        cavlcTotalBits+=s;
+        OutputLine(it->first.c_str(), ':', "-", "-", s);
+      }
+
+      OutputDashedLine("");
+      OutputLine("CAVLC Header Sub-total", '~', "~~ST~~", "~~ST~~", cavlcTotalBits);
+
+      // Now output the breakdowns
+      OutputDashedLine("CABAC Break down by size");
+      for(UInt s=0; s<CODING_STATS_NUM_WIDTHS; s++)
+      {
+        SStat subTotalCabac, subTotalEP;
+        for(UInt c=0; c<CODING_STATS_NUM_SUBCLASSES; c+=CODING_STATS_NUM_WIDTHS)
+        {
+          subTotalCabac+=statTotals_cabac[c+s];
+          subTotalEP+=statTotals_ep[c+s];
+        }
+        if (subTotalCabac.bits!=0 || subTotalEP.bits!=0)
+        {
+          OutputLine("CABAC by size Sub-total", '=', s, "All", subTotalCabac, subTotalEP);
+        }
+      }
+      OutputDashedLine("Break down by component/Channel type");
+      for(UInt c=0; c<CODING_STATS_NUM_SUBCLASSES; c+=CODING_STATS_NUM_WIDTHS)
+      {
+        SStat subTotalCabac, subTotalEP;
+        for(UInt s=0; s<CODING_STATS_NUM_WIDTHS; s++)
+        {
+          subTotalCabac+=statTotals_cabac[c+s];
+          subTotalEP+=statTotals_ep[c+s];
+        }
+        if (subTotalCabac.bits!=0 || subTotalEP.bits!=0)
+        {
+          OutputLine("CABAC by type Sub-total", '=', "-", TComCodingStatisticsClassType::GetSubClassString(c), subTotalCabac, subTotalEP);
+        }
+      }
+      OutputDashedLine("Break down by size and component/Channel type");
+      for(UInt c=0; c<CODING_STATS_NUM_SUBCLASSES; c+=CODING_STATS_NUM_WIDTHS)
+      {
+        for(UInt s=0; s<CODING_STATS_NUM_WIDTHS; s++)
+        {
+          SStat subTotalCabac, subTotalEP;
+          subTotalCabac+=statTotals_cabac[c+s];
+          subTotalEP+=statTotals_ep[c+s];
+          if (subTotalCabac.bits!=0 || subTotalEP.bits!=0)
+          {
+            OutputLine("CABAC by size and type Sub-total", '=', s, TComCodingStatisticsClassType::GetSubClassString(c), subTotalCabac, subTotalEP);
+          }
+        }
+      }
+
+      OutputDashedLine("");
+      OutputLine("CABAC Sub-total", '~', "~~ST~~", "~~ST~~", cabacTotalBits, epTotalBits);
+      OutputLine("CAVLC Header Sub-total", '~', "~~ST~~", "~~ST~~", cavlcTotalBits);
+      OutputDashedLine("GRAND TOTAL");
+      epTotalBits+=cavlcTotalBits;
+      OutputLine("TOTAL", '~', "~~GT~~", "~~GT~~", cabacTotalBits, epTotalBits);
+    }
+
+
+  public:
+    static TComCodingStatistics& GetSingletonInstance()
+    {
+      static TComCodingStatistics inst;
+      return inst;
+    }
+
+    static const TComCodingStatisticsData &GetStatistics()         { return GetSingletonInstance().data; }
+    static Void SetStatistics(const TComCodingStatisticsData &src) { GetSingletonInstance().data=src; }
+
+    static SStat &GetStatisticEP(const TComCodingStatisticsClassType &stat) { return GetSingletonInstance().data.statistics_ep[stat.type][stat.subClass]; }
+
+    static SStat &GetStatisticEP(const std::string &str) { return GetSingletonInstance().data.mappings_ep[str]; }
+
+    static SStat &GetStatisticEP(const Char *pKey) {return GetStatisticEP(std::string(pKey)); }
+
+    static Void IncrementStatisticEP(const TComCodingStatisticsClassType &stat, const Int numBits, const Int value)
+    {
+      SStat &s=GetStatisticEP(stat);
+      s.bits+=numBits;
+      s.count++;
+      s.sum+=value;
+    }
+
+    static Void IncrementStatisticEP(const std::string &str, const Int numBits, const Int value)
+    {
+      SStat &s=GetStatisticEP(str);
+      s.bits+=numBits;
+      s.count++;
+      s.sum+=value;
+    }
+
+    static Void IncrementStatisticEP(const Char *pKey, const Int numBits, const Int value)
+    {
+      SStat &s=GetStatisticEP(pKey);
+      s.bits+=numBits;
+      s.count++;
+      s.sum+=value;
+    }
+
+    StatLogValue values;
+
+    static Void UpdateCABACStat(const TComCodingStatisticsClassType &stat, UInt uiRangeBefore, UInt uiRangeAfter, Int val)
+    {
+      TComCodingStatistics &inst=GetSingletonInstance();
+      // doing rangeBefore*p(x)=rangeAfter
+      // p(x)=rangeAfter/rangeBefore
+      // entropy = -log2(p(x))=-log(p(x))/log(2) = -(log rangeAfter - log rangeBefore) / log(2) = (log rangeBefore / log 2 - log rangeAfter / log 2)
+      SStat &s=inst.data.statistics[stat.type][stat.subClass];
+      s.bits+=inst.values.values[uiRangeBefore]-inst.values.values[uiRangeAfter];
+      s.count++;
+      s.sum+=val;
+    }
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComDataCU.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3354 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComDataCU.cpp
+    \brief    CU data structure
+    \todo     not all entities are documented
+*/
+
+#include "TComDataCU.h"
+#include "TComTU.h"
+#include "TComPic.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+#if ADAPTIVE_QP_SELECTION
+  TCoeff * TComDataCU::m_pcGlbArlCoeff[MAX_NUM_COMPONENT] = { NULL, NULL, NULL };
+#endif
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TComDataCU::TComDataCU()
+{
+  m_pcPic              = NULL;
+  m_pcSlice            = NULL;
+  m_puhDepth           = NULL;
+
+  m_skipFlag           = NULL;
+
+  m_pePartSize         = NULL;
+  m_pePredMode         = NULL;
+  m_CUTransquantBypass = NULL;
+  m_puhWidth           = NULL;
+  m_puhHeight          = NULL;
+  m_phQP               = NULL;
+  m_ChromaQpAdj        = NULL;
+  m_pbMergeFlag        = NULL;
+  m_puhMergeIndex      = NULL;
+  for(UInt i=0; i<MAX_NUM_CHANNEL_TYPE; i++)
+  {
+    m_puhIntraDir[i]     = NULL;
+  }
+  m_puhInterDir        = NULL;
+  m_puhTrIdx           = NULL;
+
+  for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    m_puhCbf[comp]                        = NULL;
+    m_crossComponentPredictionAlpha[comp] = NULL;
+    m_puhTransformSkip[comp]              = NULL;
+    m_pcTrCoeff[comp]                     = NULL;
+#if ADAPTIVE_QP_SELECTION
+    m_pcArlCoeff[comp]                    = NULL;
+#endif
+    m_pcIPCMSample[comp]                  = NULL;
+    m_explicitRdpcmMode[comp]             = NULL;
+  }
+#if ADAPTIVE_QP_SELECTION
+  m_ArlCoeffIsAliasedAllocation = false;
+#endif
+  m_pbIPCMFlag         = NULL;
+
+  m_pCtuAboveLeft      = NULL;
+  m_pCtuAboveRight     = NULL;
+  m_pCtuAbove          = NULL;
+  m_pCtuLeft           = NULL;
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i]  = NULL;
+    m_apiMVPIdx[i]       = NULL;
+    m_apiMVPNum[i]       = NULL;
+  }
+
+  m_bDecSubCu          = false;
+}
+
+TComDataCU::~TComDataCU()
+{
+}
+
+Void TComDataCU::create( ChromaFormat chromaFormatIDC, UInt uiNumPartition, UInt uiWidth, UInt uiHeight, Bool bDecSubCu, Int unitSize
+#if ADAPTIVE_QP_SELECTION
+                        , Bool bGlobalRMARLBuffer
+#endif
+                        )
+{
+  m_bDecSubCu = bDecSubCu;
+
+  m_pcPic              = NULL;
+  m_pcSlice            = NULL;
+  m_uiNumPartition     = uiNumPartition;
+  m_unitSize = unitSize;
+
+  if ( !bDecSubCu )
+  {
+    m_phQP               = (Char*     )xMalloc(Char,     uiNumPartition);
+    m_puhDepth           = (UChar*    )xMalloc(UChar,    uiNumPartition);
+    m_puhWidth           = (UChar*    )xMalloc(UChar,    uiNumPartition);
+    m_puhHeight          = (UChar*    )xMalloc(UChar,    uiNumPartition);
+
+    m_ChromaQpAdj        = new UChar[ uiNumPartition ];
+    m_skipFlag           = new Bool[ uiNumPartition ];
+    m_pePartSize         = new Char[ uiNumPartition ];
+    memset( m_pePartSize, NUMBER_OF_PART_SIZES,uiNumPartition * sizeof( *m_pePartSize ) );
+    m_pePredMode         = new Char[ uiNumPartition ];
+    m_CUTransquantBypass = new Bool[ uiNumPartition ];
+
+    m_pbMergeFlag        = (Bool*  )xMalloc(Bool,   uiNumPartition);
+    m_puhMergeIndex      = (UChar* )xMalloc(UChar,  uiNumPartition);
+
+    for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+    {
+      m_puhIntraDir[ch] = (UChar* )xMalloc(UChar,  uiNumPartition);
+    }
+    m_puhInterDir        = (UChar* )xMalloc(UChar,  uiNumPartition);
+
+    m_puhTrIdx           = (UChar* )xMalloc(UChar,  uiNumPartition);
+
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      const RefPicList rpl=RefPicList(i);
+      m_apiMVPIdx[rpl]       = new Char[ uiNumPartition ];
+      m_apiMVPNum[rpl]       = new Char[ uiNumPartition ];
+      memset( m_apiMVPIdx[rpl], -1,uiNumPartition * sizeof( Char ) );
+    }
+
+    for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+    {
+      const ComponentID compID = ComponentID(comp);
+      const UInt chromaShift = getComponentScaleX(compID, chromaFormatIDC) + getComponentScaleY(compID, chromaFormatIDC);
+      const UInt totalSize   = (uiWidth * uiHeight) >> chromaShift;
+
+      m_crossComponentPredictionAlpha[compID] = (Char*  )xMalloc(Char,   uiNumPartition);
+      m_puhTransformSkip[compID]              = (UChar* )xMalloc(UChar,  uiNumPartition);
+      m_explicitRdpcmMode[compID]             = (UChar* )xMalloc(UChar,  uiNumPartition);
+      m_puhCbf[compID]                        = (UChar* )xMalloc(UChar,  uiNumPartition);
+      m_pcTrCoeff[compID]                     = (TCoeff*)xMalloc(TCoeff, totalSize);
+      memset( m_pcTrCoeff[compID], 0, (totalSize * sizeof( TCoeff )) );
+
+#if ADAPTIVE_QP_SELECTION
+      if( bGlobalRMARLBuffer )
+      {
+        if (m_pcGlbArlCoeff[compID] == NULL) m_pcGlbArlCoeff[compID] = (TCoeff*)xMalloc(TCoeff, totalSize);
+
+        m_pcArlCoeff[compID] = m_pcGlbArlCoeff[compID];
+        m_ArlCoeffIsAliasedAllocation = true;
+      }
+      else
+      {
+         m_pcArlCoeff[compID] = (TCoeff*)xMalloc(TCoeff, totalSize);
+      }
+#endif
+      m_pcIPCMSample[compID] = (Pel*   )xMalloc(Pel , totalSize);
+    }
+
+    m_pbIPCMFlag         = (Bool*  )xMalloc(Bool, uiNumPartition);
+
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      m_acCUMvField[i].create( uiNumPartition );
+    }
+
+  }
+  else
+  {
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      m_acCUMvField[i].setNumPartition(uiNumPartition );
+    }
+  }
+
+  // create motion vector fields
+
+  m_pCtuAboveLeft      = NULL;
+  m_pCtuAboveRight     = NULL;
+  m_pCtuAbove          = NULL;
+  m_pCtuLeft           = NULL;
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i]  = NULL;
+  }
+}
+
+Void TComDataCU::destroy()
+{
+  // encoder-side buffer free
+  if ( !m_bDecSubCu )
+  {
+    if ( m_phQP               ) { xFree(m_phQP);                m_phQP               = NULL; }
+    if ( m_puhDepth           ) { xFree(m_puhDepth);            m_puhDepth           = NULL; }
+    if ( m_puhWidth           ) { xFree(m_puhWidth);            m_puhWidth           = NULL; }
+    if ( m_puhHeight          ) { xFree(m_puhHeight);           m_puhHeight          = NULL; }
+
+    if ( m_skipFlag           ) { delete[] m_skipFlag;          m_skipFlag          = NULL; }
+
+    if ( m_pePartSize         ) { delete[] m_pePartSize;        m_pePartSize         = NULL; }
+    if ( m_pePredMode         ) { delete[] m_pePredMode;        m_pePredMode         = NULL; }
+    if ( m_ChromaQpAdj        ) { delete[] m_ChromaQpAdj;       m_ChromaQpAdj        = NULL; }
+    if ( m_CUTransquantBypass ) { delete[] m_CUTransquantBypass;m_CUTransquantBypass = NULL; }
+    if ( m_puhInterDir        ) { xFree(m_puhInterDir);         m_puhInterDir        = NULL; }
+    if ( m_pbMergeFlag        ) { xFree(m_pbMergeFlag);         m_pbMergeFlag        = NULL; }
+    if ( m_puhMergeIndex      ) { xFree(m_puhMergeIndex);       m_puhMergeIndex      = NULL; }
+
+    for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+    {
+      xFree(m_puhIntraDir[ch]);
+      m_puhIntraDir[ch] = NULL;
+    }
+
+    if ( m_puhTrIdx           ) { xFree(m_puhTrIdx);            m_puhTrIdx          = NULL; }
+
+    for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+    {
+      if ( m_crossComponentPredictionAlpha[comp] ) { xFree(m_crossComponentPredictionAlpha[comp]); m_crossComponentPredictionAlpha[comp] = NULL; }
+      if ( m_puhTransformSkip[comp]              ) { xFree(m_puhTransformSkip[comp]);              m_puhTransformSkip[comp]              = NULL; }
+      if ( m_puhCbf[comp]                        ) { xFree(m_puhCbf[comp]);                        m_puhCbf[comp]                        = NULL; }
+      if ( m_pcTrCoeff[comp]                     ) { xFree(m_pcTrCoeff[comp]);                     m_pcTrCoeff[comp]                     = NULL; }
+      if ( m_explicitRdpcmMode[comp]             ) { xFree(m_explicitRdpcmMode[comp]);             m_explicitRdpcmMode[comp]             = NULL; }
+
+#if ADAPTIVE_QP_SELECTION
+      if (!m_ArlCoeffIsAliasedAllocation)
+      {
+        if ( m_pcArlCoeff[comp]     ) { xFree(m_pcArlCoeff[comp]);      m_pcArlCoeff[comp]    = NULL; }
+      }
+
+      if ( m_pcGlbArlCoeff[comp]  ) { xFree(m_pcGlbArlCoeff[comp]);   m_pcGlbArlCoeff[comp] = NULL; }
+#endif
+
+      if ( m_pcIPCMSample[comp]   ) { xFree(m_pcIPCMSample[comp]);    m_pcIPCMSample[comp]  = NULL; }
+    }
+    if ( m_pbIPCMFlag         ) { xFree(m_pbIPCMFlag   );       m_pbIPCMFlag        = NULL; }
+
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      const RefPicList rpl=RefPicList(i);
+      if ( m_apiMVPIdx[rpl]       ) { delete[] m_apiMVPIdx[rpl];      m_apiMVPIdx[rpl]      = NULL; }
+      if ( m_apiMVPNum[rpl]       ) { delete[] m_apiMVPNum[rpl];      m_apiMVPNum[rpl]      = NULL; }
+    }
+
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      const RefPicList rpl=RefPicList(i);
+      m_acCUMvField[rpl].destroy();
+    }
+  }
+
+  m_pcPic              = NULL;
+  m_pcSlice            = NULL;
+
+  m_pCtuAboveLeft      = NULL;
+  m_pCtuAboveRight     = NULL;
+  m_pCtuAbove          = NULL;
+  m_pCtuLeft           = NULL;
+
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i]  = NULL;
+  }
+
+}
+
+Bool TComDataCU::CUIsFromSameTile            ( const TComDataCU *pCU /* Can be NULL */) const
+{
+  return pCU!=NULL &&
+         pCU->getSlice() != NULL &&
+         m_pcPic->getPicSym()->getTileIdxMap( pCU->getCtuRsAddr() ) == m_pcPic->getPicSym()->getTileIdxMap(getCtuRsAddr());
+}
+
+Bool TComDataCU::CUIsFromSameSliceAndTile    ( const TComDataCU *pCU /* Can be NULL */) const
+{
+  return pCU!=NULL &&
+         pCU->getSlice() != NULL &&
+         pCU->getSlice()->getSliceCurStartCtuTsAddr() == getSlice()->getSliceCurStartCtuTsAddr() &&
+         m_pcPic->getPicSym()->getTileIdxMap( pCU->getCtuRsAddr() ) == m_pcPic->getPicSym()->getTileIdxMap(getCtuRsAddr())
+         ;
+}
+
+Bool TComDataCU::CUIsFromSameSliceTileAndWavefrontRow( const TComDataCU *pCU /* Can be NULL */) const
+{
+  return CUIsFromSameSliceAndTile(pCU)
+         && (!getSlice()->getPPS()->getEntropyCodingSyncEnabledFlag() || getPic()->getCtu(getCtuRsAddr())->getCUPelY() == getPic()->getCtu(pCU->getCtuRsAddr())->getCUPelY());
+}
+
+Bool TComDataCU::isLastSubCUOfCtu(const UInt absPartIdx)
+{
+  TComPic* pcPic = getPic();
+  TComSlice * pcSlice = pcPic->getSlice(pcPic->getCurrSliceIdx());
+
+  const UInt picWidth = pcSlice->getSPS()->getPicWidthInLumaSamples();
+  const UInt picHeight = pcSlice->getSPS()->getPicHeightInLumaSamples();
+  const UInt granularityWidth = g_uiMaxCUWidth;
+
+  const UInt cuPosX = getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[absPartIdx] ];
+  const UInt cuPosY = getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[absPartIdx] ];
+
+  return (((cuPosX+getWidth( absPartIdx))%granularityWidth==0||(cuPosX+getWidth( absPartIdx)==picWidth ))
+       && ((cuPosY+getHeight(absPartIdx))%granularityWidth==0||(cuPosY+getHeight(absPartIdx)==picHeight)));
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+// --------------------------------------------------------------------------------------------------------------------
+// Initialization
+// --------------------------------------------------------------------------------------------------------------------
+
+/**
+ - initialize top-level CU
+ - internal buffers are already created
+ - set values before encoding a CU
+ .
+ \param  pcPic     picture (TComPic) class pointer
+ \param  iCUAddr   CU address
+ */
+Void TComDataCU::initCtu( TComPic* pcPic, UInt ctuRsAddr )
+{
+
+  m_pcPic              = pcPic;
+  m_pcSlice            = pcPic->getSlice(pcPic->getCurrSliceIdx());
+  m_ctuRsAddr          = ctuRsAddr;
+  m_uiCUPelX           = ( ctuRsAddr % pcPic->getFrameWidthInCtus() ) * g_uiMaxCUWidth;
+  m_uiCUPelY           = ( ctuRsAddr / pcPic->getFrameWidthInCtus() ) * g_uiMaxCUHeight;
+  m_absZIdxInCtu       = 0;
+  m_dTotalCost         = MAX_DOUBLE;
+  m_uiTotalDistortion  = 0;
+  m_uiTotalBits        = 0;
+  m_uiTotalBins        = 0;
+  m_uiNumPartition     = pcPic->getNumPartitionsInCtu();
+
+  memset( m_skipFlag          , false,                      m_uiNumPartition * sizeof( *m_skipFlag ) );
+
+  memset( m_pePartSize        , NUMBER_OF_PART_SIZES,       m_uiNumPartition * sizeof( *m_pePartSize ) );
+  memset( m_pePredMode        , NUMBER_OF_PREDICTION_MODES, m_uiNumPartition * sizeof( *m_pePredMode ) );
+  memset( m_CUTransquantBypass, false,                      m_uiNumPartition * sizeof( *m_CUTransquantBypass) );
+  memset( m_puhDepth          , 0,                          m_uiNumPartition * sizeof( *m_puhDepth ) );
+  memset( m_puhTrIdx          , 0,                          m_uiNumPartition * sizeof( *m_puhTrIdx ) );
+  memset( m_puhWidth          , g_uiMaxCUWidth,             m_uiNumPartition * sizeof( *m_puhWidth ) );
+  memset( m_puhHeight         , g_uiMaxCUHeight,            m_uiNumPartition * sizeof( *m_puhHeight ) );
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    memset( m_apiMVPIdx[rpl]  , -1,                         m_uiNumPartition * sizeof( *m_apiMVPIdx[rpl] ) );
+    memset( m_apiMVPNum[rpl]  , -1,                         m_uiNumPartition * sizeof( *m_apiMVPNum[rpl] ) );
+  }
+  memset( m_phQP              , getSlice()->getSliceQp(),   m_uiNumPartition * sizeof( *m_phQP ) );
+  memset( m_ChromaQpAdj       , 0,                          m_uiNumPartition * sizeof( *m_ChromaQpAdj ) );
+  for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    memset( m_crossComponentPredictionAlpha[comp] , 0,                     m_uiNumPartition * sizeof( *m_crossComponentPredictionAlpha[comp] ) );
+    memset( m_puhTransformSkip[comp]              , 0,                     m_uiNumPartition * sizeof( *m_puhTransformSkip[comp]) );
+    memset( m_puhCbf[comp]                        , 0,                     m_uiNumPartition * sizeof( *m_puhCbf[comp] ) );
+    memset( m_explicitRdpcmMode[comp]             , NUMBER_OF_RDPCM_MODES, m_uiNumPartition * sizeof( *m_explicitRdpcmMode[comp] ) );
+  }
+  memset( m_pbMergeFlag       , false,                    m_uiNumPartition * sizeof( *m_pbMergeFlag ) );
+  memset( m_puhMergeIndex     , 0,                        m_uiNumPartition * sizeof( *m_puhMergeIndex ) );
+  for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    memset( m_puhIntraDir[ch] , ((ch==0) ? DC_IDX : 0),   m_uiNumPartition * sizeof( *(m_puhIntraDir[ch]) ) );
+  }
+  memset( m_puhInterDir       , 0,                        m_uiNumPartition * sizeof( *m_puhInterDir ) );
+  memset( m_pbIPCMFlag        , false,                    m_uiNumPartition * sizeof( *m_pbIPCMFlag ) );
+
+  const UInt numCoeffY    = g_uiMaxCUWidth*g_uiMaxCUHeight;
+  for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    const UInt componentShift = m_pcPic->getComponentScaleX(ComponentID(comp)) + m_pcPic->getComponentScaleY(ComponentID(comp));
+    memset( m_pcTrCoeff[comp], 0, sizeof(TCoeff)* numCoeffY>>componentShift );
+#if ADAPTIVE_QP_SELECTION
+    memset( m_pcArlCoeff[comp], 0, sizeof(TCoeff)* numCoeffY>>componentShift );
+#endif
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_acCUMvField[i].clearMvField();
+  }
+
+  // Setting neighbor CU
+  m_pCtuLeft        = NULL;
+  m_pCtuAbove       = NULL;
+  m_pCtuAboveLeft   = NULL;
+  m_pCtuAboveRight  = NULL;
+
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i]  = NULL;
+  }
+
+  UInt frameWidthInCtus = pcPic->getFrameWidthInCtus();
+  if ( m_ctuRsAddr % frameWidthInCtus )
+  {
+    m_pCtuLeft = pcPic->getCtu( m_ctuRsAddr - 1 );
+  }
+
+  if ( m_ctuRsAddr / frameWidthInCtus )
+  {
+    m_pCtuAbove = pcPic->getCtu( m_ctuRsAddr - frameWidthInCtus );
+  }
+
+  if ( m_pCtuLeft && m_pCtuAbove )
+  {
+    m_pCtuAboveLeft = pcPic->getCtu( m_ctuRsAddr - frameWidthInCtus - 1 );
+  }
+
+  if ( m_pCtuAbove && ( (m_ctuRsAddr%frameWidthInCtus) < (frameWidthInCtus-1) )  )
+  {
+    m_pCtuAboveRight = pcPic->getCtu( m_ctuRsAddr - frameWidthInCtus + 1 );
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    if ( getSlice()->getNumRefIdx( rpl ) > 0 )
+    {
+      m_apcCUColocated[rpl] = getSlice()->getRefPic( rpl, 0)->getCtu( m_ctuRsAddr );
+    }
+  }
+}
+
+
+/** initialize prediction data with enabling sub-CTU-level delta QP
+*\param  uiDepth  depth of the current CU
+*\param  qp     qp for the current CU
+*- set CU width and CU height according to depth
+*- set qp value according to input qp
+*- set last-coded qp value according to input last-coded qp
+*/
+Void TComDataCU::initEstData( const UInt uiDepth, const Int qp, const Bool bTransquantBypass )
+{
+  m_dTotalCost         = MAX_DOUBLE;
+  m_uiTotalDistortion  = 0;
+  m_uiTotalBits        = 0;
+  m_uiTotalBins        = 0;
+
+  UChar uhWidth  = g_uiMaxCUWidth  >> uiDepth;
+  UChar uhHeight = g_uiMaxCUHeight >> uiDepth;
+
+  for (UInt ui = 0; ui < m_uiNumPartition; ui++)
+  {
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      const RefPicList rpl=RefPicList(i);
+      m_apiMVPIdx[rpl][ui]  = -1;
+      m_apiMVPNum[rpl][ui]  = -1;
+    }
+    m_puhDepth  [ui]    = uiDepth;
+    m_puhWidth  [ui]    = uhWidth;
+    m_puhHeight [ui]    = uhHeight;
+    m_puhTrIdx  [ui]    = 0;
+    for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+    {
+      m_crossComponentPredictionAlpha[comp][ui] = 0;
+      m_puhTransformSkip             [comp][ui] = 0;
+      m_explicitRdpcmMode            [comp][ui] = NUMBER_OF_RDPCM_MODES;
+    }
+    m_skipFlag[ui]      = false;
+    m_pePartSize[ui]    = NUMBER_OF_PART_SIZES;
+    m_pePredMode[ui]    = NUMBER_OF_PREDICTION_MODES;
+    m_CUTransquantBypass[ui] = bTransquantBypass;
+    m_pbIPCMFlag[ui]    = 0;
+    m_phQP[ui]          = qp;
+    m_ChromaQpAdj[ui]   = 0;
+    m_pbMergeFlag[ui]   = 0;
+    m_puhMergeIndex[ui] = 0;
+
+    for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+    {
+      m_puhIntraDir[ch][ui] = ((ch==0) ? DC_IDX : 0);
+    }
+
+    m_puhInterDir[ui] = 0;
+    for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+    {
+      m_puhCbf[comp][ui] = 0;
+    }
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_acCUMvField[i].clearMvField();
+  }
+
+  const UInt numCoeffY = uhWidth*uhHeight;
+
+  for (UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    const ComponentID component = ComponentID(comp);
+    const UInt numCoeff = numCoeffY >> (getPic()->getComponentScaleX(component) + getPic()->getComponentScaleY(component));
+    memset( m_pcTrCoeff[comp],    0, numCoeff * sizeof( TCoeff ) );
+#if ADAPTIVE_QP_SELECTION
+    memset( m_pcArlCoeff[comp],   0, numCoeff * sizeof( TCoeff ) );
+#endif
+    memset( m_pcIPCMSample[comp], 0, numCoeff * sizeof( Pel) );
+  }
+}
+
+
+// initialize Sub partition
+Void TComDataCU::initSubCU( TComDataCU* pcCU, UInt uiPartUnitIdx, UInt uiDepth, Int qp )
+{
+  assert( uiPartUnitIdx<4 );
+
+  UInt uiPartOffset = ( pcCU->getTotalNumPart()>>2 )*uiPartUnitIdx;
+
+  m_pcPic              = pcCU->getPic();
+  m_pcSlice            = m_pcPic->getSlice(m_pcPic->getCurrSliceIdx());
+  m_ctuRsAddr          = pcCU->getCtuRsAddr();
+  m_absZIdxInCtu       = pcCU->getZorderIdxInCtu() + uiPartOffset;
+
+  m_uiCUPelX           = pcCU->getCUPelX() + ( g_uiMaxCUWidth>>uiDepth  )*( uiPartUnitIdx &  1 );
+  m_uiCUPelY           = pcCU->getCUPelY() + ( g_uiMaxCUHeight>>uiDepth  )*( uiPartUnitIdx >> 1 );
+
+  m_dTotalCost         = MAX_DOUBLE;
+  m_uiTotalDistortion  = 0;
+  m_uiTotalBits        = 0;
+  m_uiTotalBins        = 0;
+  m_uiNumPartition     = pcCU->getTotalNumPart() >> 2;
+
+  Int iSizeInUchar = sizeof( UChar  ) * m_uiNumPartition;
+  Int iSizeInBool  = sizeof( Bool   ) * m_uiNumPartition;
+  Int sizeInChar = sizeof( Char  ) * m_uiNumPartition;
+
+  memset( m_phQP,              qp,  sizeInChar );
+  memset( m_pbMergeFlag,        0, iSizeInBool  );
+  memset( m_puhMergeIndex,      0, iSizeInUchar );
+  for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    memset( m_puhIntraDir[ch],  ((ch==0) ? DC_IDX : 0), iSizeInUchar );
+  }
+
+  memset( m_puhInterDir,        0, iSizeInUchar );
+  memset( m_puhTrIdx,           0, iSizeInUchar );
+
+  for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    memset( m_crossComponentPredictionAlpha[comp], 0, iSizeInUchar );
+    memset( m_puhTransformSkip[comp],              0, iSizeInUchar );
+    memset( m_puhCbf[comp],                        0, iSizeInUchar );
+    memset( m_explicitRdpcmMode[comp],             NUMBER_OF_RDPCM_MODES, iSizeInUchar );
+  }
+
+  memset( m_puhDepth,     uiDepth, iSizeInUchar );
+
+  UChar uhWidth  = g_uiMaxCUWidth  >> uiDepth;
+  UChar uhHeight = g_uiMaxCUHeight >> uiDepth;
+  memset( m_puhWidth,          uhWidth,  iSizeInUchar );
+  memset( m_puhHeight,         uhHeight, iSizeInUchar );
+  memset( m_pbIPCMFlag,        0, iSizeInBool  );
+  for (UInt ui = 0; ui < m_uiNumPartition; ui++)
+  {
+    m_skipFlag[ui]   = false;
+    m_pePartSize[ui] = NUMBER_OF_PART_SIZES;
+    m_pePredMode[ui] = NUMBER_OF_PREDICTION_MODES;
+    m_CUTransquantBypass[ui] = false;
+    m_ChromaQpAdj[ui] = 0;
+
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      const RefPicList rpl=RefPicList(i);
+      m_apiMVPIdx[rpl][ui] = -1;
+      m_apiMVPNum[rpl][ui] = -1;
+    }
+  }
+
+  const UInt numCoeffY    = uhWidth*uhHeight;
+  for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    const UInt componentShift = m_pcPic->getComponentScaleX(ComponentID(ch)) + m_pcPic->getComponentScaleY(ComponentID(ch));
+    memset( m_pcTrCoeff[ch],  0, sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#if ADAPTIVE_QP_SELECTION
+    memset( m_pcArlCoeff[ch], 0, sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#endif
+    memset( m_pcIPCMSample[ch], 0, sizeof(Pel)* (numCoeffY>>componentShift) );
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_acCUMvField[i].clearMvField();
+  }
+
+  m_pCtuLeft        = pcCU->getCtuLeft();
+  m_pCtuAbove       = pcCU->getCtuAbove();
+  m_pCtuAboveLeft   = pcCU->getCtuAboveLeft();
+  m_pCtuAboveRight  = pcCU->getCtuAboveRight();
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i] = pcCU->getCUColocated(RefPicList(i));
+  }
+}
+
+Void TComDataCU::setOutsideCUPart( UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiNumPartition = m_uiNumPartition >> (uiDepth << 1);
+  UInt uiSizeInUchar = sizeof( UChar  ) * uiNumPartition;
+
+  UChar uhWidth  = g_uiMaxCUWidth  >> uiDepth;
+  UChar uhHeight = g_uiMaxCUHeight >> uiDepth;
+  memset( m_puhDepth    + uiAbsPartIdx,     uiDepth,  uiSizeInUchar );
+  memset( m_puhWidth    + uiAbsPartIdx,     uhWidth,  uiSizeInUchar );
+  memset( m_puhHeight   + uiAbsPartIdx,     uhHeight, uiSizeInUchar );
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Copy
+// --------------------------------------------------------------------------------------------------------------------
+
+Void TComDataCU::copySubCU( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiPart = uiAbsPartIdx;
+
+  m_pcPic              = pcCU->getPic();
+  m_pcSlice            = pcCU->getSlice();
+  m_ctuRsAddr          = pcCU->getCtuRsAddr();
+  m_absZIdxInCtu       = uiAbsPartIdx;
+
+  m_uiCUPelX           = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsPartIdx] ];
+  m_uiCUPelY           = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsPartIdx] ];
+
+  m_skipFlag=pcCU->getSkipFlag()          + uiPart;
+
+  m_phQP=pcCU->getQP()                    + uiPart;
+  m_ChromaQpAdj = pcCU->getChromaQpAdj()  + uiPart;
+  m_pePartSize = pcCU->getPartitionSize() + uiPart;
+  m_pePredMode=pcCU->getPredictionMode()  + uiPart;
+  m_CUTransquantBypass  = pcCU->getCUTransquantBypass()+uiPart;
+
+  m_pbMergeFlag         = pcCU->getMergeFlag()        + uiPart;
+  m_puhMergeIndex       = pcCU->getMergeIndex()       + uiPart;
+
+  for (UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_puhIntraDir[ch]   = pcCU->getIntraDir(ChannelType(ch)) + uiPart;
+  }
+
+  m_puhInterDir         = pcCU->getInterDir()         + uiPart;
+  m_puhTrIdx            = pcCU->getTransformIdx()     + uiPart;
+
+  for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    m_crossComponentPredictionAlpha[comp] = pcCU->getCrossComponentPredictionAlpha(ComponentID(comp)) + uiPart;
+    m_puhTransformSkip[comp]              = pcCU->getTransformSkip(ComponentID(comp))                 + uiPart;
+    m_puhCbf[comp]                        = pcCU->getCbf(ComponentID(comp))                           + uiPart;
+    m_explicitRdpcmMode[comp]             = pcCU->getExplicitRdpcmMode(ComponentID(comp))             + uiPart;
+  }
+
+  m_puhDepth=pcCU->getDepth()                     + uiPart;
+  m_puhWidth=pcCU->getWidth()                     + uiPart;
+  m_puhHeight=pcCU->getHeight()                   + uiPart;
+
+  m_pbIPCMFlag         = pcCU->getIPCMFlag()        + uiPart;
+
+  m_pCtuAboveLeft      = pcCU->getCtuAboveLeft();
+  m_pCtuAboveRight     = pcCU->getCtuAboveRight();
+  m_pCtuAbove          = pcCU->getCtuAbove();
+  m_pCtuLeft           = pcCU->getCtuLeft();
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    m_apcCUColocated[rpl] = pcCU->getCUColocated(rpl);
+    m_apiMVPIdx[rpl]=pcCU->getMVPIdx(rpl)  + uiPart;
+    m_apiMVPNum[rpl]=pcCU->getMVPNum(rpl)  + uiPart;
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    m_acCUMvField[rpl].linkToWithOffset( pcCU->getCUMvField(rpl), uiPart );
+  }
+
+  UInt uiMaxCuWidth=pcCU->getSlice()->getSPS()->getMaxCUWidth();
+  UInt uiMaxCuHeight=pcCU->getSlice()->getSPS()->getMaxCUHeight();
+
+  UInt uiCoffOffset = uiMaxCuWidth*uiMaxCuHeight*uiAbsPartIdx/pcCU->getPic()->getNumPartitionsInCtu();
+
+  for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    const ComponentID component = ComponentID(ch);
+    const UInt componentShift   = m_pcPic->getComponentScaleX(component) + m_pcPic->getComponentScaleY(component);
+    const UInt offset           = uiCoffOffset >> componentShift;
+    m_pcTrCoeff[ch] = pcCU->getCoeff(component) + offset;
+#if ADAPTIVE_QP_SELECTION
+    m_pcArlCoeff[ch] = pcCU->getArlCoeff(component) + offset;
+#endif
+    m_pcIPCMSample[ch] = pcCU->getPCMSample(component) + offset;
+  }
+}
+
+// Copy inter prediction info from the biggest CU
+Void TComDataCU::copyInterPredInfoFrom    ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefPicList )
+{
+  m_pcPic              = pcCU->getPic();
+  m_pcSlice            = pcCU->getSlice();
+  m_ctuRsAddr          = pcCU->getCtuRsAddr();
+  m_absZIdxInCtu       = uiAbsPartIdx;
+
+  Int iRastPartIdx     = g_auiZscanToRaster[uiAbsPartIdx];
+  m_uiCUPelX           = pcCU->getCUPelX() + m_pcPic->getMinCUWidth ()*( iRastPartIdx % m_pcPic->getNumPartInCtuWidth() );
+  m_uiCUPelY           = pcCU->getCUPelY() + m_pcPic->getMinCUHeight()*( iRastPartIdx / m_pcPic->getNumPartInCtuWidth() );
+
+  m_pCtuAboveLeft      = pcCU->getCtuAboveLeft();
+  m_pCtuAboveRight     = pcCU->getCtuAboveRight();
+  m_pCtuAbove          = pcCU->getCtuAbove();
+  m_pCtuLeft           = pcCU->getCtuLeft();
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_apcCUColocated[i]  = pcCU->getCUColocated(RefPicList(i));
+  }
+
+  m_skipFlag           = pcCU->getSkipFlag ()             + uiAbsPartIdx;
+
+  m_pePartSize         = pcCU->getPartitionSize ()        + uiAbsPartIdx;
+  m_pePredMode         = pcCU->getPredictionMode()        + uiAbsPartIdx;
+  m_ChromaQpAdj        = pcCU->getChromaQpAdj()           + uiAbsPartIdx;
+  m_CUTransquantBypass = pcCU->getCUTransquantBypass()    + uiAbsPartIdx;
+  m_puhInterDir        = pcCU->getInterDir      ()        + uiAbsPartIdx;
+
+  m_puhDepth           = pcCU->getDepth ()                + uiAbsPartIdx;
+  m_puhWidth           = pcCU->getWidth ()                + uiAbsPartIdx;
+  m_puhHeight          = pcCU->getHeight()                + uiAbsPartIdx;
+
+  m_pbMergeFlag        = pcCU->getMergeFlag()             + uiAbsPartIdx;
+  m_puhMergeIndex      = pcCU->getMergeIndex()            + uiAbsPartIdx;
+
+  m_apiMVPIdx[eRefPicList] = pcCU->getMVPIdx(eRefPicList) + uiAbsPartIdx;
+  m_apiMVPNum[eRefPicList] = pcCU->getMVPNum(eRefPicList) + uiAbsPartIdx;
+
+  m_acCUMvField[ eRefPicList ].linkToWithOffset( pcCU->getCUMvField(eRefPicList), uiAbsPartIdx );
+}
+
+// Copy small CU to bigger CU.
+// One of quarter parts overwritten by predicted sub part.
+Void TComDataCU::copyPartFrom( TComDataCU* pcCU, UInt uiPartUnitIdx, UInt uiDepth )
+{
+  assert( uiPartUnitIdx<4 );
+
+  m_dTotalCost         += pcCU->getTotalCost();
+  m_uiTotalDistortion  += pcCU->getTotalDistortion();
+  m_uiTotalBits        += pcCU->getTotalBits();
+
+  UInt uiOffset         = pcCU->getTotalNumPart()*uiPartUnitIdx;
+  const UInt numValidComp=pcCU->getPic()->getNumberValidComponents();
+  const UInt numValidChan=pcCU->getPic()->getChromaFormat()==CHROMA_400 ? 1:2;
+
+  UInt uiNumPartition = pcCU->getTotalNumPart();
+  Int iSizeInUchar  = sizeof( UChar ) * uiNumPartition;
+  Int iSizeInBool   = sizeof( Bool  ) * uiNumPartition;
+
+  Int sizeInChar  = sizeof( Char ) * uiNumPartition;
+  memcpy( m_skipFlag   + uiOffset, pcCU->getSkipFlag(),       sizeof( *m_skipFlag )   * uiNumPartition );
+  memcpy( m_phQP       + uiOffset, pcCU->getQP(),             sizeInChar                        );
+  memcpy( m_pePartSize + uiOffset, pcCU->getPartitionSize(),  sizeof( *m_pePartSize ) * uiNumPartition );
+  memcpy( m_pePredMode + uiOffset, pcCU->getPredictionMode(), sizeof( *m_pePredMode ) * uiNumPartition );
+  memcpy( m_ChromaQpAdj + uiOffset, pcCU->getChromaQpAdj(),   sizeof( *m_ChromaQpAdj ) * uiNumPartition );
+  memcpy( m_CUTransquantBypass + uiOffset, pcCU->getCUTransquantBypass(), sizeof( *m_CUTransquantBypass ) * uiNumPartition );
+  memcpy( m_pbMergeFlag         + uiOffset, pcCU->getMergeFlag(),         iSizeInBool  );
+  memcpy( m_puhMergeIndex       + uiOffset, pcCU->getMergeIndex(),        iSizeInUchar );
+
+  for (UInt ch=0; ch<numValidChan; ch++)
+  {
+    memcpy( m_puhIntraDir[ch]   + uiOffset, pcCU->getIntraDir(ChannelType(ch)), iSizeInUchar );
+  }
+
+  memcpy( m_puhInterDir         + uiOffset, pcCU->getInterDir(),          iSizeInUchar );
+  memcpy( m_puhTrIdx            + uiOffset, pcCU->getTransformIdx(),      iSizeInUchar );
+
+  for(UInt comp=0; comp<numValidComp; comp++)
+  {
+    memcpy( m_crossComponentPredictionAlpha[comp] + uiOffset, pcCU->getCrossComponentPredictionAlpha(ComponentID(comp)), iSizeInUchar );
+    memcpy( m_puhTransformSkip[comp]              + uiOffset, pcCU->getTransformSkip(ComponentID(comp))                , iSizeInUchar );
+    memcpy( m_puhCbf[comp]                        + uiOffset, pcCU->getCbf(ComponentID(comp))                          , iSizeInUchar );
+    memcpy( m_explicitRdpcmMode[comp]             + uiOffset, pcCU->getExplicitRdpcmMode(ComponentID(comp))            , iSizeInUchar );
+  }
+
+  memcpy( m_puhDepth  + uiOffset, pcCU->getDepth(),  iSizeInUchar );
+  memcpy( m_puhWidth  + uiOffset, pcCU->getWidth(),  iSizeInUchar );
+  memcpy( m_puhHeight + uiOffset, pcCU->getHeight(), iSizeInUchar );
+
+  memcpy( m_pbIPCMFlag + uiOffset, pcCU->getIPCMFlag(), iSizeInBool );
+
+  m_pCtuAboveLeft      = pcCU->getCtuAboveLeft();
+  m_pCtuAboveRight     = pcCU->getCtuAboveRight();
+  m_pCtuAbove          = pcCU->getCtuAbove();
+  m_pCtuLeft           = pcCU->getCtuLeft();
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    memcpy( m_apiMVPIdx[rpl] + uiOffset, pcCU->getMVPIdx(rpl), iSizeInUchar );
+    memcpy( m_apiMVPNum[rpl] + uiOffset, pcCU->getMVPNum(rpl), iSizeInUchar );
+    m_apcCUColocated[rpl] = pcCU->getCUColocated(rpl);
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    m_acCUMvField[rpl].copyFrom( pcCU->getCUMvField( rpl ), pcCU->getTotalNumPart(), uiOffset );
+  }
+
+  const UInt numCoeffY = g_uiMaxCUWidth*g_uiMaxCUHeight >> (uiDepth<<1);
+  const UInt offsetY   = uiPartUnitIdx*numCoeffY;
+  for (UInt ch=0; ch<numValidComp; ch++)
+  {
+    const ComponentID component = ComponentID(ch);
+    const UInt componentShift   = m_pcPic->getComponentScaleX(component) + m_pcPic->getComponentScaleY(component);
+    const UInt offset           = offsetY>>componentShift;
+    memcpy( m_pcTrCoeff [ch] + offset, pcCU->getCoeff(component),    sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#if ADAPTIVE_QP_SELECTION
+    memcpy( m_pcArlCoeff[ch] + offset, pcCU->getArlCoeff(component), sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#endif
+    memcpy( m_pcIPCMSample[ch] + offset, pcCU->getPCMSample(component), sizeof(Pel)*(numCoeffY>>componentShift) );
+  }
+
+  m_uiTotalBins += pcCU->getTotalBins();
+}
+
+// Copy current predicted part to a CU in picture.
+// It is used to predict for next part
+Void TComDataCU::copyToPic( UChar uhDepth )
+{
+  TComDataCU* pCtu = m_pcPic->getCtu( m_ctuRsAddr );
+  const UInt numValidComp=pCtu->getPic()->getNumberValidComponents();
+  const UInt numValidChan=pCtu->getPic()->getChromaFormat()==CHROMA_400 ? 1:2;
+
+  pCtu->getTotalCost()       = m_dTotalCost;
+  pCtu->getTotalDistortion() = m_uiTotalDistortion;
+  pCtu->getTotalBits()       = m_uiTotalBits;
+
+  Int iSizeInUchar  = sizeof( UChar ) * m_uiNumPartition;
+  Int iSizeInBool   = sizeof( Bool  ) * m_uiNumPartition;
+  Int sizeInChar  = sizeof( Char ) * m_uiNumPartition;
+
+  memcpy( pCtu->getSkipFlag() + m_absZIdxInCtu, m_skipFlag, sizeof( *m_skipFlag ) * m_uiNumPartition );
+
+  memcpy( pCtu->getQP() + m_absZIdxInCtu, m_phQP, sizeInChar  );
+
+  memcpy( pCtu->getPartitionSize()  + m_absZIdxInCtu, m_pePartSize, sizeof( *m_pePartSize ) * m_uiNumPartition );
+  memcpy( pCtu->getPredictionMode() + m_absZIdxInCtu, m_pePredMode, sizeof( *m_pePredMode ) * m_uiNumPartition );
+  memcpy( pCtu->getChromaQpAdj() + m_absZIdxInCtu, m_ChromaQpAdj, sizeof( *m_ChromaQpAdj ) * m_uiNumPartition );
+  memcpy( pCtu->getCUTransquantBypass()+ m_absZIdxInCtu, m_CUTransquantBypass, sizeof( *m_CUTransquantBypass ) * m_uiNumPartition );
+  memcpy( pCtu->getMergeFlag()         + m_absZIdxInCtu, m_pbMergeFlag,         iSizeInBool  );
+  memcpy( pCtu->getMergeIndex()        + m_absZIdxInCtu, m_puhMergeIndex,       iSizeInUchar );
+  for (UInt ch=0; ch<numValidChan; ch++)
+  {
+    memcpy( pCtu->getIntraDir(ChannelType(ch)) + m_absZIdxInCtu, m_puhIntraDir[ch], iSizeInUchar);
+  }
+
+  memcpy( pCtu->getInterDir()          + m_absZIdxInCtu, m_puhInterDir,         iSizeInUchar );
+  memcpy( pCtu->getTransformIdx()      + m_absZIdxInCtu, m_puhTrIdx,            iSizeInUchar );
+
+  for(UInt comp=0; comp<numValidComp; comp++)
+  {
+    memcpy( pCtu->getCrossComponentPredictionAlpha(ComponentID(comp)) + m_absZIdxInCtu, m_crossComponentPredictionAlpha[comp], iSizeInUchar );
+    memcpy( pCtu->getTransformSkip(ComponentID(comp))                 + m_absZIdxInCtu, m_puhTransformSkip[comp],              iSizeInUchar );
+    memcpy( pCtu->getCbf(ComponentID(comp))                           + m_absZIdxInCtu, m_puhCbf[comp],                        iSizeInUchar );
+    memcpy( pCtu->getExplicitRdpcmMode(ComponentID(comp))             + m_absZIdxInCtu, m_explicitRdpcmMode[comp],             iSizeInUchar );
+  }
+
+  memcpy( pCtu->getDepth()  + m_absZIdxInCtu, m_puhDepth,  iSizeInUchar );
+  memcpy( pCtu->getWidth()  + m_absZIdxInCtu, m_puhWidth,  iSizeInUchar );
+  memcpy( pCtu->getHeight() + m_absZIdxInCtu, m_puhHeight, iSizeInUchar );
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    memcpy( pCtu->getMVPIdx(rpl) + m_absZIdxInCtu, m_apiMVPIdx[rpl], iSizeInUchar );
+    memcpy( pCtu->getMVPNum(rpl) + m_absZIdxInCtu, m_apiMVPNum[rpl], iSizeInUchar );
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    m_acCUMvField[rpl].copyTo( pCtu->getCUMvField( rpl ), m_absZIdxInCtu );
+  }
+
+  memcpy( pCtu->getIPCMFlag() + m_absZIdxInCtu, m_pbIPCMFlag,         iSizeInBool  );
+
+  const UInt numCoeffY    = (g_uiMaxCUWidth*g_uiMaxCUHeight)>>(uhDepth<<1);
+  const UInt offsetY      = m_absZIdxInCtu*m_pcPic->getMinCUWidth()*m_pcPic->getMinCUHeight();
+  for (UInt comp=0; comp<numValidComp; comp++)
+  {
+    const ComponentID component = ComponentID(comp);
+    const UInt componentShift   = m_pcPic->getComponentScaleX(component) + m_pcPic->getComponentScaleY(component);
+    memcpy( pCtu->getCoeff(component)   + (offsetY>>componentShift), m_pcTrCoeff[component], sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#if ADAPTIVE_QP_SELECTION
+    memcpy( pCtu->getArlCoeff(component) + (offsetY>>componentShift), m_pcArlCoeff[component], sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#endif
+    memcpy( pCtu->getPCMSample(component) + (offsetY>>componentShift), m_pcIPCMSample[component], sizeof(Pel)*(numCoeffY>>componentShift) );
+  }
+
+  pCtu->getTotalBins() = m_uiTotalBins;
+}
+
+Void TComDataCU::copyToPic( UChar uhDepth, UInt uiPartIdx, UInt uiPartDepth )
+{
+  TComDataCU*   pCtu       = m_pcPic->getCtu( m_ctuRsAddr );
+  UInt          uiQNumPart  = m_uiNumPartition>>(uiPartDepth<<1);
+
+  UInt uiPartStart          = uiPartIdx*uiQNumPart;
+  UInt uiPartOffset         = m_absZIdxInCtu + uiPartStart;
+
+  const UInt numValidComp=pCtu->getPic()->getNumberValidComponents();
+  const UInt numValidChan=pCtu->getPic()->getChromaFormat()==CHROMA_400 ? 1:2;
+
+  pCtu->getTotalCost()       = m_dTotalCost;
+  pCtu->getTotalDistortion() = m_uiTotalDistortion;
+  pCtu->getTotalBits()       = m_uiTotalBits;
+
+  Int iSizeInUchar  = sizeof( UChar  ) * uiQNumPart;
+  Int iSizeInBool   = sizeof( Bool   ) * uiQNumPart;
+  Int sizeInChar  = sizeof( Char ) * uiQNumPart;
+
+  memcpy( pCtu->getSkipFlag()       + uiPartOffset, m_skipFlag,   sizeof( *m_skipFlag )   * uiQNumPart );
+
+  memcpy( pCtu->getQP() + uiPartOffset, m_phQP, sizeInChar );
+  memcpy( pCtu->getPartitionSize()  + uiPartOffset, m_pePartSize, sizeof( *m_pePartSize ) * uiQNumPart );
+  memcpy( pCtu->getPredictionMode() + uiPartOffset, m_pePredMode, sizeof( *m_pePredMode ) * uiQNumPart );
+
+  memcpy( pCtu->getCUTransquantBypass()+ uiPartOffset, m_CUTransquantBypass, sizeof( *m_CUTransquantBypass ) * uiQNumPart );
+  memcpy( pCtu->getMergeFlag()         + uiPartOffset, m_pbMergeFlag,         iSizeInBool  );
+  memcpy( pCtu->getMergeIndex()        + uiPartOffset, m_puhMergeIndex,       iSizeInUchar );
+  for (UInt ch=0; ch<numValidChan; ch++)
+  {
+    memcpy( pCtu->getIntraDir(ChannelType(ch)) + uiPartOffset, m_puhIntraDir[ch], iSizeInUchar );
+  }
+
+  memcpy( pCtu->getInterDir()          + uiPartOffset, m_puhInterDir,         iSizeInUchar );
+  memcpy( pCtu->getTransformIdx()      + uiPartOffset, m_puhTrIdx,            iSizeInUchar );
+
+  for(UInt comp=0; comp<numValidComp; comp++)
+  {
+    memcpy( pCtu->getCrossComponentPredictionAlpha(ComponentID(comp)) + uiPartOffset, m_crossComponentPredictionAlpha[comp], iSizeInUchar );
+    memcpy( pCtu->getTransformSkip(ComponentID(comp) )                + uiPartOffset, m_puhTransformSkip[comp],   iSizeInUchar );
+    memcpy( pCtu->getCbf(ComponentID(comp))                           + uiPartOffset, m_puhCbf[comp],             iSizeInUchar );
+    memcpy( pCtu->getExplicitRdpcmMode(ComponentID(comp) )            + uiPartOffset, m_explicitRdpcmMode[comp],  iSizeInUchar );
+  }
+
+  memcpy( pCtu->getDepth()  + uiPartOffset, m_puhDepth,  iSizeInUchar );
+  memcpy( pCtu->getWidth()  + uiPartOffset, m_puhWidth,  iSizeInUchar );
+  memcpy( pCtu->getHeight() + uiPartOffset, m_puhHeight, iSizeInUchar );
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    memcpy( pCtu->getMVPIdx(rpl) + uiPartOffset, m_apiMVPIdx[rpl], iSizeInUchar );
+    memcpy( pCtu->getMVPNum(rpl) + uiPartOffset, m_apiMVPNum[rpl], iSizeInUchar );
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    const RefPicList rpl=RefPicList(i);
+    m_acCUMvField[rpl].copyTo( pCtu->getCUMvField( rpl ), m_absZIdxInCtu, uiPartStart, uiQNumPart );
+  }
+
+  memcpy( pCtu->getIPCMFlag() + uiPartOffset, m_pbIPCMFlag,         iSizeInBool  );
+
+  const UInt numCoeffY    = (g_uiMaxCUWidth*g_uiMaxCUHeight)>>((uhDepth+uiPartDepth)<<1);
+  const UInt offsetY      = uiPartOffset*m_pcPic->getMinCUWidth()*m_pcPic->getMinCUHeight();
+  for (UInt comp=0; comp<numValidComp; comp++)
+  {
+    UInt componentShift = m_pcPic->getComponentScaleX(ComponentID(comp)) + m_pcPic->getComponentScaleY(ComponentID(comp));
+    memcpy( pCtu->getCoeff(ComponentID(comp)) + (offsetY>>componentShift), m_pcTrCoeff[comp], sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#if ADAPTIVE_QP_SELECTION
+    memcpy( pCtu->getArlCoeff(ComponentID(comp)) + (offsetY>>componentShift), m_pcArlCoeff[comp], sizeof(TCoeff)*(numCoeffY>>componentShift) );
+#endif
+    memcpy( pCtu->getPCMSample(ComponentID(comp)) + (offsetY>>componentShift), m_pcIPCMSample[comp], sizeof(Pel)*(numCoeffY>>componentShift) );
+  }
+
+  pCtu->getTotalBins() = m_uiTotalBins;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Other public functions
+// --------------------------------------------------------------------------------------------------------------------
+
+TComDataCU* TComDataCU::getPULeft( UInt& uiLPartUnitIdx,
+                                   UInt uiCurrPartUnitIdx,
+                                   Bool bEnforceSliceRestriction,
+                                   Bool bEnforceTileRestriction )
+{
+  UInt uiAbsPartIdx       = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  UInt uiAbsZorderCUIdx   = g_auiZscanToRaster[m_absZIdxInCtu];
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+
+  if ( !RasterAddress::isZeroCol( uiAbsPartIdx, numPartInCtuWidth ) )
+  {
+    uiLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx - 1 ];
+    if ( RasterAddress::isEqualCol( uiAbsPartIdx, uiAbsZorderCUIdx, numPartInCtuWidth ) )
+    {
+      return m_pcPic->getCtu( getCtuRsAddr() );
+    }
+    else
+    {
+      uiLPartUnitIdx -= m_absZIdxInCtu;
+      return this;
+    }
+  }
+
+  uiLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx + numPartInCtuWidth - 1 ];
+  if ( (bEnforceSliceRestriction && !CUIsFromSameSlice(m_pCtuLeft)) || (bEnforceTileRestriction && !CUIsFromSameTile(m_pCtuLeft)) )
+  {
+    return NULL;
+  }
+  return m_pCtuLeft;
+}
+
+
+TComDataCU* TComDataCU::getPUAbove( UInt& uiAPartUnitIdx,
+                                    UInt uiCurrPartUnitIdx,
+                                    Bool bEnforceSliceRestriction,
+                                    Bool planarAtCtuBoundary,
+                                    Bool bEnforceTileRestriction )
+{
+  UInt uiAbsPartIdx       = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  UInt uiAbsZorderCUIdx   = g_auiZscanToRaster[m_absZIdxInCtu];
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+
+  if ( !RasterAddress::isZeroRow( uiAbsPartIdx, numPartInCtuWidth ) )
+  {
+    uiAPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx - numPartInCtuWidth ];
+    if ( RasterAddress::isEqualRow( uiAbsPartIdx, uiAbsZorderCUIdx, numPartInCtuWidth ) )
+    {
+      return m_pcPic->getCtu( getCtuRsAddr() );
+    }
+    else
+    {
+      uiAPartUnitIdx -= m_absZIdxInCtu;
+      return this;
+    }
+  }
+
+  if(planarAtCtuBoundary)
+  {
+    return NULL;
+  }
+
+  uiAPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx + m_pcPic->getNumPartitionsInCtu() - numPartInCtuWidth ];
+
+  if ( (bEnforceSliceRestriction && !CUIsFromSameSlice(m_pCtuAbove)) || (bEnforceTileRestriction && !CUIsFromSameTile(m_pCtuAbove)) )
+  {
+    return NULL;
+  }
+  return m_pCtuAbove;
+}
+
+TComDataCU* TComDataCU::getPUAboveLeft( UInt& uiALPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction )
+{
+  UInt uiAbsPartIdx       = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  UInt uiAbsZorderCUIdx   = g_auiZscanToRaster[m_absZIdxInCtu];
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+
+  if ( !RasterAddress::isZeroCol( uiAbsPartIdx, numPartInCtuWidth ) )
+  {
+    if ( !RasterAddress::isZeroRow( uiAbsPartIdx, numPartInCtuWidth ) )
+    {
+      uiALPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx - numPartInCtuWidth - 1 ];
+      if ( RasterAddress::isEqualRowOrCol( uiAbsPartIdx, uiAbsZorderCUIdx, numPartInCtuWidth ) )
+      {
+        return m_pcPic->getCtu( getCtuRsAddr() );
+      }
+      else
+      {
+        uiALPartUnitIdx -= m_absZIdxInCtu;
+        return this;
+      }
+    }
+    uiALPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx + getPic()->getNumPartitionsInCtu() - numPartInCtuWidth - 1 ];
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAbove) )
+    {
+      return NULL;
+    }
+    return m_pCtuAbove;
+  }
+
+  if ( !RasterAddress::isZeroRow( uiAbsPartIdx, numPartInCtuWidth ) )
+  {
+    uiALPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdx - 1 ];
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuLeft) )
+    {
+      return NULL;
+    }
+    return m_pCtuLeft;
+  }
+
+  uiALPartUnitIdx = g_auiRasterToZscan[ m_pcPic->getNumPartitionsInCtu() - 1 ];
+  if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAboveLeft) )
+  {
+    return NULL;
+  }
+  return m_pCtuAboveLeft;
+}
+
+TComDataCU* TComDataCU::getPUAboveRight( UInt& uiARPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction )
+{
+  UInt uiAbsPartIdxRT     = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  UInt uiAbsZorderCUIdx   = g_auiZscanToRaster[ m_absZIdxInCtu ] + m_puhWidth[0] / m_pcPic->getMinCUWidth() - 1;
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+
+  if( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelX() + g_auiRasterToPelX[uiAbsPartIdxRT] + m_pcPic->getMinCUWidth() ) >= m_pcSlice->getSPS()->getPicWidthInLumaSamples() )
+  {
+    uiARPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  if ( RasterAddress::lessThanCol( uiAbsPartIdxRT, numPartInCtuWidth - 1, numPartInCtuWidth ) )
+  {
+    if ( !RasterAddress::isZeroRow( uiAbsPartIdxRT, numPartInCtuWidth ) )
+    {
+      if ( uiCurrPartUnitIdx > g_auiRasterToZscan[ uiAbsPartIdxRT - numPartInCtuWidth + 1 ] )
+      {
+        uiARPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxRT - numPartInCtuWidth + 1 ];
+        if ( RasterAddress::isEqualRowOrCol( uiAbsPartIdxRT, uiAbsZorderCUIdx, numPartInCtuWidth ) )
+        {
+          return m_pcPic->getCtu( getCtuRsAddr() );
+        }
+        else
+        {
+          uiARPartUnitIdx -= m_absZIdxInCtu;
+          return this;
+        }
+      }
+      uiARPartUnitIdx = MAX_UINT;
+      return NULL;
+    }
+    uiARPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxRT + m_pcPic->getNumPartitionsInCtu() - numPartInCtuWidth + 1 ];
+
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAbove) )
+    {
+      return NULL;
+    }
+    return m_pCtuAbove;
+  }
+
+  if ( !RasterAddress::isZeroRow( uiAbsPartIdxRT, numPartInCtuWidth ) )
+  {
+    uiARPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  uiARPartUnitIdx = g_auiRasterToZscan[ m_pcPic->getNumPartitionsInCtu() - numPartInCtuWidth ];
+
+  if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAboveRight) )
+  {
+    return NULL;
+  }
+  return m_pCtuAboveRight;
+}
+
+TComDataCU* TComDataCU::getPUBelowLeft( UInt& uiBLPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction )
+{
+  UInt uiAbsPartIdxLB     = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+  UInt uiAbsZorderCUIdxLB = g_auiZscanToRaster[ m_absZIdxInCtu ] + (m_puhHeight[0] / m_pcPic->getMinCUHeight() - 1)*numPartInCtuWidth;
+
+  if( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelY() + g_auiRasterToPelY[uiAbsPartIdxLB] + m_pcPic->getMinCUHeight() ) >= m_pcSlice->getSPS()->getPicHeightInLumaSamples() )
+  {
+    uiBLPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  if ( RasterAddress::lessThanRow( uiAbsPartIdxLB, m_pcPic->getNumPartInCtuHeight() - 1, numPartInCtuWidth ) )
+  {
+    if ( !RasterAddress::isZeroCol( uiAbsPartIdxLB, numPartInCtuWidth ) )
+    {
+      if ( uiCurrPartUnitIdx > g_auiRasterToZscan[ uiAbsPartIdxLB + numPartInCtuWidth - 1 ] )
+      {
+        uiBLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxLB + numPartInCtuWidth - 1 ];
+        if ( RasterAddress::isEqualRowOrCol( uiAbsPartIdxLB, uiAbsZorderCUIdxLB, numPartInCtuWidth ) )
+        {
+          return m_pcPic->getCtu( getCtuRsAddr() );
+        }
+        else
+        {
+          uiBLPartUnitIdx -= m_absZIdxInCtu;
+          return this;
+        }
+      }
+      uiBLPartUnitIdx = MAX_UINT;
+      return NULL;
+    }
+    uiBLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxLB + numPartInCtuWidth*2 - 1 ];
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuLeft) )
+    {
+      return NULL;
+    }
+    return m_pCtuLeft;
+  }
+
+  uiBLPartUnitIdx = MAX_UINT;
+  return NULL;
+}
+
+TComDataCU* TComDataCU::getPUBelowLeftAdi(UInt& uiBLPartUnitIdx,  UInt uiCurrPartUnitIdx, UInt uiPartUnitOffset, Bool bEnforceSliceRestriction)
+{
+  UInt uiAbsPartIdxLB     = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+  UInt uiAbsZorderCUIdxLB = g_auiZscanToRaster[ m_absZIdxInCtu ] + ((m_puhHeight[0] / m_pcPic->getMinCUHeight()) - 1)*numPartInCtuWidth;
+
+  if( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelY() + g_auiRasterToPelY[uiAbsPartIdxLB] + (m_pcPic->getPicSym()->getMinCUHeight() * uiPartUnitOffset)) >= m_pcSlice->getSPS()->getPicHeightInLumaSamples())
+  {
+    uiBLPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  if ( RasterAddress::lessThanRow( uiAbsPartIdxLB, m_pcPic->getNumPartInCtuHeight() - uiPartUnitOffset, numPartInCtuWidth ) )
+  {
+    if ( !RasterAddress::isZeroCol( uiAbsPartIdxLB, numPartInCtuWidth ) )
+    {
+      if ( uiCurrPartUnitIdx > g_auiRasterToZscan[ uiAbsPartIdxLB + uiPartUnitOffset * numPartInCtuWidth - 1 ] )
+      {
+        uiBLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxLB + uiPartUnitOffset * numPartInCtuWidth - 1 ];
+        if ( RasterAddress::isEqualRowOrCol( uiAbsPartIdxLB, uiAbsZorderCUIdxLB, numPartInCtuWidth ) )
+        {
+          return m_pcPic->getCtu( getCtuRsAddr() );
+        }
+        else
+        {
+          uiBLPartUnitIdx -= m_absZIdxInCtu;
+          return this;
+        }
+      }
+      uiBLPartUnitIdx = MAX_UINT;
+      return NULL;
+    }
+    uiBLPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxLB + (1+uiPartUnitOffset) * numPartInCtuWidth - 1 ];
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuLeft) )
+    {
+      return NULL;
+    }
+    return m_pCtuLeft;
+  }
+
+  uiBLPartUnitIdx = MAX_UINT;
+  return NULL;
+}
+
+TComDataCU* TComDataCU::getPUAboveRightAdi(UInt&  uiARPartUnitIdx, UInt uiCurrPartUnitIdx, UInt uiPartUnitOffset, Bool bEnforceSliceRestriction)
+{
+  UInt uiAbsPartIdxRT     = g_auiZscanToRaster[uiCurrPartUnitIdx];
+  UInt uiAbsZorderCUIdx   = g_auiZscanToRaster[ m_absZIdxInCtu ] + (m_puhWidth[0] / m_pcPic->getMinCUWidth()) - 1;
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+
+  if( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelX() + g_auiRasterToPelX[uiAbsPartIdxRT] + (m_pcPic->getPicSym()->getMinCUHeight() * uiPartUnitOffset)) >= m_pcSlice->getSPS()->getPicWidthInLumaSamples() )
+  {
+    uiARPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  if ( RasterAddress::lessThanCol( uiAbsPartIdxRT, numPartInCtuWidth - uiPartUnitOffset, numPartInCtuWidth ) )
+  {
+    if ( !RasterAddress::isZeroRow( uiAbsPartIdxRT, numPartInCtuWidth ) )
+    {
+      if ( uiCurrPartUnitIdx > g_auiRasterToZscan[ uiAbsPartIdxRT - numPartInCtuWidth + uiPartUnitOffset ] )
+      {
+        uiARPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxRT - numPartInCtuWidth + uiPartUnitOffset ];
+        if ( RasterAddress::isEqualRowOrCol( uiAbsPartIdxRT, uiAbsZorderCUIdx, numPartInCtuWidth ) )
+        {
+          return m_pcPic->getCtu( getCtuRsAddr() );
+        }
+        else
+        {
+          uiARPartUnitIdx -= m_absZIdxInCtu;
+          return this;
+        }
+      }
+      uiARPartUnitIdx = MAX_UINT;
+      return NULL;
+    }
+
+    uiARPartUnitIdx = g_auiRasterToZscan[ uiAbsPartIdxRT + m_pcPic->getNumPartitionsInCtu() - numPartInCtuWidth + uiPartUnitOffset ];
+    if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAbove) )
+    {
+      return NULL;
+    }
+    return m_pCtuAbove;
+  }
+
+  if ( !RasterAddress::isZeroRow( uiAbsPartIdxRT, numPartInCtuWidth ) )
+  {
+    uiARPartUnitIdx = MAX_UINT;
+    return NULL;
+  }
+
+  uiARPartUnitIdx = g_auiRasterToZscan[ m_pcPic->getNumPartitionsInCtu() - numPartInCtuWidth + uiPartUnitOffset-1 ];
+  if ( bEnforceSliceRestriction && !CUIsFromSameSliceAndTile(m_pCtuAboveRight) )
+  {
+    return NULL;
+  }
+  return m_pCtuAboveRight;
+}
+
+/** Get left QpMinCu
+*\param   uiLPartUnitIdx
+*\param   uiCurrAbsIdxInCtu
+*\returns TComDataCU*   point of TComDataCU of left QpMinCu
+*/
+TComDataCU* TComDataCU::getQpMinCuLeft( UInt& uiLPartUnitIdx, UInt uiCurrAbsIdxInCtu )
+{
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+  UInt absZorderQpMinCUIdx = (uiCurrAbsIdxInCtu>>((g_uiMaxCUDepth - getSlice()->getPPS()->getMaxCuDQPDepth())<<1))<<((g_uiMaxCUDepth -getSlice()->getPPS()->getMaxCuDQPDepth())<<1);
+  UInt absRorderQpMinCUIdx = g_auiZscanToRaster[absZorderQpMinCUIdx];
+
+  // check for left CTU boundary
+  if ( RasterAddress::isZeroCol(absRorderQpMinCUIdx, numPartInCtuWidth) )
+  {
+    return NULL;
+  }
+
+  // get index of left-CU relative to top-left corner of current quantization group
+  uiLPartUnitIdx = g_auiRasterToZscan[absRorderQpMinCUIdx - 1];
+
+  // return pointer to current CTU
+  return m_pcPic->getCtu( getCtuRsAddr() );
+}
+
+/** Get Above QpMinCu
+*\param   uiAPartUnitIdx
+*\param   uiCurrAbsIdxInCtu
+*\returns TComDataCU*   point of TComDataCU of above QpMinCu
+*/
+TComDataCU* TComDataCU::getQpMinCuAbove( UInt& uiAPartUnitIdx, UInt uiCurrAbsIdxInCtu )
+{
+  const UInt numPartInCtuWidth = m_pcPic->getNumPartInCtuWidth();
+  UInt absZorderQpMinCUIdx = (uiCurrAbsIdxInCtu>>((g_uiMaxCUDepth - getSlice()->getPPS()->getMaxCuDQPDepth())<<1))<<((g_uiMaxCUDepth - getSlice()->getPPS()->getMaxCuDQPDepth())<<1);
+  UInt absRorderQpMinCUIdx = g_auiZscanToRaster[absZorderQpMinCUIdx];
+
+  // check for top CTU boundary
+  if ( RasterAddress::isZeroRow( absRorderQpMinCUIdx, numPartInCtuWidth) )
+  {
+    return NULL;
+  }
+
+  // get index of top-CU relative to top-left corner of current quantization group
+  uiAPartUnitIdx = g_auiRasterToZscan[absRorderQpMinCUIdx - numPartInCtuWidth];
+
+  // return pointer to current CTU
+  return m_pcPic->getCtu( getCtuRsAddr() );
+}
+
+
+
+/** Get reference QP from left QpMinCu or latest coded QP
+*\param   uiCurrAbsIdxInCtu
+*\returns Char   reference QP value
+*/
+Char TComDataCU::getRefQP( UInt uiCurrAbsIdxInCtu )
+{
+  UInt lPartIdx = MAX_UINT;
+  UInt aPartIdx = MAX_UINT;
+  TComDataCU* cULeft  = getQpMinCuLeft ( lPartIdx, m_absZIdxInCtu + uiCurrAbsIdxInCtu );
+  TComDataCU* cUAbove = getQpMinCuAbove( aPartIdx, m_absZIdxInCtu + uiCurrAbsIdxInCtu );
+  return (((cULeft? cULeft->getQP( lPartIdx ): getLastCodedQP( uiCurrAbsIdxInCtu )) + (cUAbove? cUAbove->getQP( aPartIdx ): getLastCodedQP( uiCurrAbsIdxInCtu )) + 1) >> 1);
+}
+
+Int TComDataCU::getLastValidPartIdx( Int iAbsPartIdx )
+{
+  Int iLastValidPartIdx = iAbsPartIdx-1;
+  while ( iLastValidPartIdx >= 0
+       && getPredictionMode( iLastValidPartIdx ) == NUMBER_OF_PREDICTION_MODES )
+  {
+    UInt uiDepth = getDepth( iLastValidPartIdx );
+    iLastValidPartIdx -= m_uiNumPartition>>(uiDepth<<1);
+  }
+  return iLastValidPartIdx;
+}
+
+Char TComDataCU::getLastCodedQP( UInt uiAbsPartIdx )
+{
+  UInt uiQUPartIdxMask = ~((1<<((g_uiMaxCUDepth - getSlice()->getPPS()->getMaxCuDQPDepth())<<1))-1);
+  Int iLastValidPartIdx = getLastValidPartIdx( uiAbsPartIdx&uiQUPartIdxMask ); // A idx will be invalid if it is off the right or bottom edge of the picture.
+  // If this CU is in the first CTU of the slice and there is no valid part before this one, use slice QP
+  if ( getPic()->getPicSym()->getCtuTsToRsAddrMap(getSlice()->getSliceCurStartCtuTsAddr()) == getCtuRsAddr() && Int(getZorderIdxInCtu())+iLastValidPartIdx<0)
+  {
+    return getSlice()->getSliceQp();
+  }
+  else if ( iLastValidPartIdx >= 0 )
+  {
+    // If there is a valid part within the current Sub-CU, use it
+    return getQP( iLastValidPartIdx );
+  }
+  else
+  {
+    if ( getZorderIdxInCtu() > 0 )
+    {
+      // If this wasn't the first sub-cu within the Ctu, explore the CTU itself.
+      return getPic()->getCtu( getCtuRsAddr() )->getLastCodedQP( getZorderIdxInCtu() ); // TODO - remove this recursion
+    }
+    else if ( getPic()->getPicSym()->getCtuRsToTsAddrMap(getCtuRsAddr()) > 0
+      && CUIsFromSameSliceTileAndWavefrontRow(getPic()->getCtu(getPic()->getPicSym()->getCtuTsToRsAddrMap(getPic()->getPicSym()->getCtuRsToTsAddrMap(getCtuRsAddr())-1))) )
+    {
+      // If this isn't the first Ctu (how can it be due to the first 'if'?), and the previous Ctu is from the same tile, examine the previous Ctu.
+      return getPic()->getCtu( getPic()->getPicSym()->getCtuTsToRsAddrMap(getPic()->getPicSym()->getCtuRsToTsAddrMap(getCtuRsAddr())-1) )->getLastCodedQP( getPic()->getNumPartitionsInCtu() );  // TODO - remove this recursion
+    }
+    else
+    {
+      // No other options available - use the slice-level QP.
+      return getSlice()->getSliceQp();
+    }
+  }
+}
+
+
+/** Check whether the CU is coded in lossless coding mode
+ * \param   uiAbsPartIdx
+ * \returns true if the CU is coded in lossless coding mode; false if otherwise
+ */
+Bool TComDataCU::isLosslessCoded(UInt absPartIdx)
+{
+  return (getSlice()->getPPS()->getTransquantBypassEnableFlag() && getCUTransquantBypass (absPartIdx));
+}
+
+
+/** Get allowed chroma intra modes
+*\param   uiAbsPartIdx
+*\param   uiModeList  pointer to chroma intra modes array
+*\returns
+*- fill uiModeList with chroma intra modes
+*/
+Void TComDataCU::getAllowedChromaDir( UInt uiAbsPartIdx, UInt uiModeList[NUM_CHROMA_MODE] )
+{
+  uiModeList[0] = PLANAR_IDX;
+  uiModeList[1] = VER_IDX;
+  uiModeList[2] = HOR_IDX;
+  uiModeList[3] = DC_IDX;
+  uiModeList[4] = DM_CHROMA_IDX;
+  assert(4<NUM_CHROMA_MODE);
+
+  UInt uiLumaMode = getIntraDir( CHANNEL_TYPE_LUMA, uiAbsPartIdx );
+
+  for( Int i = 0; i < NUM_CHROMA_MODE - 1; i++ )
+  {
+    if( uiLumaMode == uiModeList[i] )
+    {
+      uiModeList[i] = 34; // VER+8 mode
+      break;
+    }
+  }
+}
+
+/** Get most probable intra modes
+*\param   uiAbsPartIdx
+*\param   uiIntraDirPred  pointer to the array for MPM storage
+*\param   piMode          it is set with MPM mode in case both MPM are equal. It is used to restrict RD search at encode side.
+*\returns Number of MPM
+*/
+Int TComDataCU::getIntraDirPredictor( UInt uiAbsPartIdx, Int uiIntraDirPred[NUM_MOST_PROBABLE_MODES], const ComponentID compID, Int* piMode  )
+{
+  TComDataCU* pcCULeft, *pcCUAbove;
+  UInt        LeftPartIdx  = MAX_UINT;
+  UInt        AbovePartIdx = MAX_UINT;
+  Int         iLeftIntraDir, iAboveIntraDir;
+  Int         uiPredNum = 0;
+
+  const ChannelType chType = toChannelType(compID);
+  const ChromaFormat chForm = getPic()->getChromaFormat();
+  // Get intra direction of left PU
+  pcCULeft = getPULeft( LeftPartIdx, m_absZIdxInCtu + uiAbsPartIdx );
+
+  if (isChroma(compID)) LeftPartIdx = getChromasCorrespondingPULumaIdx(LeftPartIdx, chForm);
+  iLeftIntraDir  = pcCULeft ? ( pcCULeft->isIntra( LeftPartIdx ) ? pcCULeft->getIntraDir( chType, LeftPartIdx ) : DC_IDX ) : DC_IDX;
+
+  // Get intra direction of above PU
+  pcCUAbove = getPUAbove( AbovePartIdx, m_absZIdxInCtu + uiAbsPartIdx, true, true );
+
+  if (isChroma(compID)) AbovePartIdx = getChromasCorrespondingPULumaIdx(AbovePartIdx, chForm);
+  iAboveIntraDir = pcCUAbove ? ( pcCUAbove->isIntra( AbovePartIdx ) ? pcCUAbove->getIntraDir( chType, AbovePartIdx ) : DC_IDX ) : DC_IDX;
+
+  if (isChroma(chType))
+  {
+    if (iLeftIntraDir  == DM_CHROMA_IDX) iLeftIntraDir  = pcCULeft-> getIntraDir( CHANNEL_TYPE_LUMA, LeftPartIdx  );
+    if (iAboveIntraDir == DM_CHROMA_IDX) iAboveIntraDir = pcCUAbove->getIntraDir( CHANNEL_TYPE_LUMA, AbovePartIdx );
+  }
+
+  assert (2<NUM_MOST_PROBABLE_MODES);
+  uiPredNum = NUM_MOST_PROBABLE_MODES;
+  if(iLeftIntraDir == iAboveIntraDir)
+  {
+    if( piMode )
+    {
+      *piMode = 1;
+    }
+
+    if (iLeftIntraDir > 1) // angular modes
+    {
+      uiIntraDirPred[0] = iLeftIntraDir;
+      uiIntraDirPred[1] = ((iLeftIntraDir + 29) % 32) + 2;
+      uiIntraDirPred[2] = ((iLeftIntraDir - 1 ) % 32) + 2;
+    }
+    else //non-angular
+    {
+      uiIntraDirPred[0] = PLANAR_IDX;
+      uiIntraDirPred[1] = DC_IDX;
+      uiIntraDirPred[2] = VER_IDX;
+    }
+  }
+  else
+  {
+    if( piMode )
+    {
+      *piMode = 2;
+    }
+    uiIntraDirPred[0] = iLeftIntraDir;
+    uiIntraDirPred[1] = iAboveIntraDir;
+
+    if (iLeftIntraDir && iAboveIntraDir ) //both modes are non-planar
+    {
+      uiIntraDirPred[2] = PLANAR_IDX;
+    }
+    else
+    {
+      uiIntraDirPred[2] =  (iLeftIntraDir+iAboveIntraDir)<2? VER_IDX : DC_IDX;
+    }
+  }
+  for (Int i=0; i<uiPredNum; i++)
+    assert(uiIntraDirPred[i] < 35);
+
+  return uiPredNum;
+}
+
+UInt TComDataCU::getCtxSplitFlag( UInt uiAbsPartIdx, UInt uiDepth )
+{
+  TComDataCU* pcTempCU;
+  UInt        uiTempPartIdx;
+  UInt        uiCtx;
+  // Get left split flag
+  pcTempCU = getPULeft( uiTempPartIdx, m_absZIdxInCtu + uiAbsPartIdx );
+  uiCtx  = ( pcTempCU ) ? ( ( pcTempCU->getDepth( uiTempPartIdx ) > uiDepth ) ? 1 : 0 ) : 0;
+
+  // Get above split flag
+  pcTempCU = getPUAbove( uiTempPartIdx, m_absZIdxInCtu + uiAbsPartIdx );
+  uiCtx += ( pcTempCU ) ? ( ( pcTempCU->getDepth( uiTempPartIdx ) > uiDepth ) ? 1 : 0 ) : 0;
+
+  return uiCtx;
+}
+
+UInt TComDataCU::getCtxQtCbf( TComTU &rTu, const ChannelType chType )
+{
+  const UInt transformDepth = rTu.GetTransformDepthRel();
+
+  if (isChroma(chType))
+  {
+    return transformDepth;
+  }
+  else
+  {
+    const UInt uiCtx = ( transformDepth == 0 ? 1 : 0 );
+    return uiCtx;
+  }
+}
+
+UInt TComDataCU::getQuadtreeTULog2MinSizeInCU( UInt absPartIdx )
+{
+  UInt log2CbSize = g_aucConvertToBit[getWidth( absPartIdx )] + 2;
+  PartSize  partSize  = getPartitionSize( absPartIdx );
+  UInt quadtreeTUMaxDepth = isIntra( absPartIdx ) ? m_pcSlice->getSPS()->getQuadtreeTUMaxDepthIntra() : m_pcSlice->getSPS()->getQuadtreeTUMaxDepthInter();
+  Int intraSplitFlag = ( isIntra( absPartIdx ) && partSize == SIZE_NxN ) ? 1 : 0;
+  Int interSplitFlag = ((quadtreeTUMaxDepth == 1) && isInter( absPartIdx ) && (partSize != SIZE_2Nx2N) );
+
+  UInt log2MinTUSizeInCU = 0;
+  if (log2CbSize < (m_pcSlice->getSPS()->getQuadtreeTULog2MinSize() + quadtreeTUMaxDepth - 1 + interSplitFlag + intraSplitFlag) )
+  {
+    // when fully making use of signaled TUMaxDepth + inter/intraSplitFlag, resulting luma TB size is < QuadtreeTULog2MinSize
+    log2MinTUSizeInCU = m_pcSlice->getSPS()->getQuadtreeTULog2MinSize();
+  }
+  else
+  {
+    // when fully making use of signaled TUMaxDepth + inter/intraSplitFlag, resulting luma TB size is still >= QuadtreeTULog2MinSize
+    log2MinTUSizeInCU = log2CbSize - ( quadtreeTUMaxDepth - 1 + interSplitFlag + intraSplitFlag); // stop when trafoDepth == hierarchy_depth = splitFlag
+    if ( log2MinTUSizeInCU > m_pcSlice->getSPS()->getQuadtreeTULog2MaxSize())
+    {
+      // when fully making use of signaled TUMaxDepth + inter/intraSplitFlag, resulting luma TB size is still > QuadtreeTULog2MaxSize
+      log2MinTUSizeInCU = m_pcSlice->getSPS()->getQuadtreeTULog2MaxSize();
+    }
+  }
+  return log2MinTUSizeInCU;
+}
+
+UInt TComDataCU::getCtxSkipFlag( UInt uiAbsPartIdx )
+{
+  TComDataCU* pcTempCU;
+  UInt        uiTempPartIdx;
+  UInt        uiCtx = 0;
+
+  // Get BCBP of left PU
+  pcTempCU = getPULeft( uiTempPartIdx, m_absZIdxInCtu + uiAbsPartIdx );
+  uiCtx    = ( pcTempCU ) ? pcTempCU->isSkipped( uiTempPartIdx ) : 0;
+
+  // Get BCBP of above PU
+  pcTempCU = getPUAbove( uiTempPartIdx, m_absZIdxInCtu + uiAbsPartIdx );
+  uiCtx   += ( pcTempCU ) ? pcTempCU->isSkipped( uiTempPartIdx ) : 0;
+
+  return uiCtx;
+}
+
+UInt TComDataCU::getCtxInterDir( UInt uiAbsPartIdx )
+{
+  return getDepth( uiAbsPartIdx );
+}
+
+
+UChar TComDataCU::getQtRootCbf( UInt uiIdx )
+{
+  const UInt numberValidComponents = getPic()->getNumberValidComponents();
+  return getCbf( uiIdx, COMPONENT_Y, 0 )
+          || ((numberValidComponents > COMPONENT_Cb) && getCbf( uiIdx, COMPONENT_Cb, 0 ))
+          || ((numberValidComponents > COMPONENT_Cr) && getCbf( uiIdx, COMPONENT_Cr, 0 ));
+}
+
+Void TComDataCU::setCbfSubParts( const UInt uiCbf[MAX_NUM_COMPONENT], UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+  for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    memset( m_puhCbf[comp] + uiAbsPartIdx, uiCbf[comp], sizeof( UChar ) * uiCurrPartNumb );
+  }
+}
+
+Void TComDataCU::setCbfSubParts( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+  memset( m_puhCbf[compID] + uiAbsPartIdx, uiCbf, sizeof( UChar ) * uiCurrPartNumb );
+}
+
+/** Sets a coded block flag for all sub-partitions of a partition
+ * \param uiCbf The value of the coded block flag to be set
+ * \param eTType
+ * \param uiAbsPartIdx
+ * \param uiPartIdx
+ * \param uiDepth
+ * \returns Void
+ */
+Void TComDataCU::setCbfSubParts ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart<UChar>( uiCbf, m_puhCbf[compID], uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+Void TComDataCU::setCbfPartRange ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes )
+{
+  memset((m_puhCbf[compID] + uiAbsPartIdx), uiCbf, (sizeof(UChar) * uiCoveredPartIdxes));
+}
+
+Void TComDataCU::bitwiseOrCbfPartRange( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes )
+{
+  const UInt stopAbsPartIdx = uiAbsPartIdx + uiCoveredPartIdxes;
+
+  for (UInt subPartIdx = uiAbsPartIdx; subPartIdx < stopAbsPartIdx; subPartIdx++)
+  {
+    m_puhCbf[compID][subPartIdx] |= uiCbf;
+  }
+}
+
+Void TComDataCU::setDepthSubParts( UInt uiDepth, UInt uiAbsPartIdx )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+  memset( m_puhDepth + uiAbsPartIdx, uiDepth, sizeof(UChar)*uiCurrPartNumb );
+}
+
+Bool TComDataCU::isFirstAbsZorderIdxInDepth (UInt uiAbsPartIdx, UInt uiDepth)
+{
+  UInt uiPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+  return (((m_absZIdxInCtu + uiAbsPartIdx)% uiPartNumb) == 0);
+}
+
+Void TComDataCU::setPartSizeSubParts( PartSize eMode, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  assert( sizeof( *m_pePartSize) == 1 );
+  memset( m_pePartSize + uiAbsPartIdx, eMode, m_pcPic->getNumPartitionsInCtu() >> ( 2 * uiDepth ) );
+}
+
+Void TComDataCU::setCUTransquantBypassSubParts( Bool flag, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  memset( m_CUTransquantBypass + uiAbsPartIdx, flag, m_pcPic->getNumPartitionsInCtu() >> ( 2 * uiDepth ) );
+}
+
+Void TComDataCU::setSkipFlagSubParts( Bool skip, UInt absPartIdx, UInt depth )
+{
+  assert( sizeof( *m_skipFlag) == 1 );
+  memset( m_skipFlag + absPartIdx, skip, m_pcPic->getNumPartitionsInCtu() >> ( 2 * depth ) );
+}
+
+Void TComDataCU::setPredModeSubParts( PredMode eMode, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  assert( sizeof( *m_pePredMode) == 1 );
+  memset( m_pePredMode + uiAbsPartIdx, eMode, m_pcPic->getNumPartitionsInCtu() >> ( 2 * uiDepth ) );
+}
+
+Void TComDataCU::setChromaQpAdjSubParts( UChar val, Int absPartIdx, Int depth )
+{
+  assert( sizeof(*m_ChromaQpAdj) == 1 );
+  memset( m_ChromaQpAdj + absPartIdx, val, m_pcPic->getNumPartitionsInCtu() >> ( 2 * depth ) );
+}
+
+Void TComDataCU::setQPSubCUs( Int qp, UInt absPartIdx, UInt depth, Bool &foundNonZeroCbf )
+{
+  UInt currPartNumb = m_pcPic->getNumPartitionsInCtu() >> (depth << 1);
+  UInt currPartNumQ = currPartNumb >> 2;
+  const UInt numValidComp = m_pcPic->getNumberValidComponents();
+
+  if(!foundNonZeroCbf)
+  {
+    if(getDepth(absPartIdx) > depth)
+    {
+      for ( UInt partUnitIdx = 0; partUnitIdx < 4; partUnitIdx++ )
+      {
+        setQPSubCUs( qp, absPartIdx+partUnitIdx*currPartNumQ, depth+1, foundNonZeroCbf );
+      }
+    }
+    else
+    {
+      if(getCbf( absPartIdx, COMPONENT_Y ) || (numValidComp>COMPONENT_Cb && getCbf( absPartIdx, COMPONENT_Cb )) || (numValidComp>COMPONENT_Cr && getCbf( absPartIdx, COMPONENT_Cr) ) )
+      {
+        foundNonZeroCbf = true;
+      }
+      else
+      {
+        setQPSubParts(qp, absPartIdx, depth);
+      }
+    }
+  }
+}
+
+Void TComDataCU::setQPSubParts( Int qp, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  const UInt numPart = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+  memset(m_phQP+uiAbsPartIdx, qp, numPart);
+}
+
+Void TComDataCU::setIntraDirSubParts( const ChannelType channelType, const UInt dir, const UInt absPartIdx, const UInt depth )
+{
+  UInt numPart = m_pcPic->getNumPartitionsInCtu() >> (depth << 1);
+  memset( m_puhIntraDir[channelType] + absPartIdx, dir,sizeof(UChar)*numPart );
+}
+
+template<typename T>
+Void TComDataCU::setSubPart( T uiParameter, T* puhBaseCtu, UInt uiCUAddr, UInt uiCUDepth, UInt uiPUIdx )
+{
+  assert( sizeof(T) == 1 ); // Using memset() works only for types of size 1
+
+  UInt uiCurrPartNumQ = (m_pcPic->getNumPartitionsInCtu() >> (2 * uiCUDepth)) >> 2;
+  switch ( m_pePartSize[ uiCUAddr ] )
+  {
+    case SIZE_2Nx2N:
+      memset( puhBaseCtu + uiCUAddr, uiParameter, 4 * uiCurrPartNumQ );
+      break;
+    case SIZE_2NxN:
+      memset( puhBaseCtu + uiCUAddr, uiParameter, 2 * uiCurrPartNumQ );
+      break;
+    case SIZE_Nx2N:
+      memset( puhBaseCtu + uiCUAddr, uiParameter, uiCurrPartNumQ );
+      memset( puhBaseCtu + uiCUAddr + 2 * uiCurrPartNumQ, uiParameter, uiCurrPartNumQ );
+      break;
+    case SIZE_NxN:
+      memset( puhBaseCtu + uiCUAddr, uiParameter, uiCurrPartNumQ );
+      break;
+    case SIZE_2NxnU:
+      if ( uiPUIdx == 0 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 1) );
+        memset( puhBaseCtu + uiCUAddr + uiCurrPartNumQ, uiParameter, (uiCurrPartNumQ >> 1) );
+      }
+      else if ( uiPUIdx == 1 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 1) );
+        memset( puhBaseCtu + uiCUAddr + uiCurrPartNumQ, uiParameter, ((uiCurrPartNumQ >> 1) + (uiCurrPartNumQ << 1)) );
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_2NxnD:
+      if ( uiPUIdx == 0 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, ((uiCurrPartNumQ << 1) + (uiCurrPartNumQ >> 1)) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1) + uiCurrPartNumQ, uiParameter, (uiCurrPartNumQ >> 1) );
+      }
+      else if ( uiPUIdx == 1 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 1) );
+        memset( puhBaseCtu + uiCUAddr + uiCurrPartNumQ, uiParameter, (uiCurrPartNumQ >> 1) );
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_nLx2N:
+      if ( uiPUIdx == 0 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1) + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+      }
+      else if ( uiPUIdx == 1 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ + (uiCurrPartNumQ >> 2)) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1) + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ + (uiCurrPartNumQ >> 2)) );
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_nRx2N:
+      if ( uiPUIdx == 0 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ + (uiCurrPartNumQ >> 2)) );
+        memset( puhBaseCtu + uiCUAddr + uiCurrPartNumQ + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1), uiParameter, (uiCurrPartNumQ + (uiCurrPartNumQ >> 2)) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1) + uiCurrPartNumQ + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+      }
+      else if ( uiPUIdx == 1 )
+      {
+        memset( puhBaseCtu + uiCUAddr, uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1), uiParameter, (uiCurrPartNumQ >> 2) );
+        memset( puhBaseCtu + uiCUAddr + (uiCurrPartNumQ << 1) + (uiCurrPartNumQ >> 1), uiParameter, (uiCurrPartNumQ >> 2) );
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    default:
+      assert( 0 );
+      break;
+  }
+}
+
+Void TComDataCU::setMergeFlagSubParts ( Bool bMergeFlag, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart( bMergeFlag, m_pbMergeFlag, uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+Void TComDataCU::setMergeIndexSubParts ( UInt uiMergeIndex, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart<UChar>( uiMergeIndex, m_puhMergeIndex, uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+Void TComDataCU::setInterDirSubParts( UInt uiDir, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart<UChar>( uiDir, m_puhInterDir, uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+Void TComDataCU::setMVPIdxSubParts( Int iMVPIdx, RefPicList eRefPicList, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart<Char>( iMVPIdx, m_apiMVPIdx[eRefPicList], uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+Void TComDataCU::setMVPNumSubParts( Int iMVPNum, RefPicList eRefPicList, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth )
+{
+  setSubPart<Char>( iMVPNum, m_apiMVPNum[eRefPicList], uiAbsPartIdx, uiDepth, uiPartIdx );
+}
+
+
+Void TComDataCU::setTrIdxSubParts( UInt uiTrIdx, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+
+  memset( m_puhTrIdx + uiAbsPartIdx, uiTrIdx, sizeof(UChar)*uiCurrPartNumb );
+}
+
+Void TComDataCU::setTransformSkipSubParts( const UInt useTransformSkip[MAX_NUM_COMPONENT], UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+
+  for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    memset( m_puhTransformSkip[i] + uiAbsPartIdx, useTransformSkip[i], sizeof( UChar ) * uiCurrPartNumb );
+  }
+}
+
+Void TComDataCU::setTransformSkipSubParts( UInt useTransformSkip, ComponentID compID, UInt uiAbsPartIdx, UInt uiDepth)
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+
+  memset( m_puhTransformSkip[compID] + uiAbsPartIdx, useTransformSkip, sizeof( UChar ) * uiCurrPartNumb );
+}
+
+Void TComDataCU::setTransformSkipPartRange ( UInt useTransformSkip, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes )
+{
+  memset((m_puhTransformSkip[compID] + uiAbsPartIdx), useTransformSkip, (sizeof(UChar) * uiCoveredPartIdxes));
+}
+
+Void TComDataCU::setCrossComponentPredictionAlphaPartRange( Char alphaValue, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes )
+{
+  memset((m_crossComponentPredictionAlpha[compID] + uiAbsPartIdx), alphaValue, (sizeof(Char) * uiCoveredPartIdxes));
+}
+
+Void TComDataCU::setExplicitRdpcmModePartRange ( UInt rdpcmMode, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes )
+{
+  memset((m_explicitRdpcmMode[compID] + uiAbsPartIdx), rdpcmMode, (sizeof(UChar) * uiCoveredPartIdxes));
+}
+
+Void TComDataCU::setSizeSubParts( UInt uiWidth, UInt uiHeight, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+
+  memset( m_puhWidth  + uiAbsPartIdx, uiWidth,  sizeof(UChar)*uiCurrPartNumb );
+  memset( m_puhHeight + uiAbsPartIdx, uiHeight, sizeof(UChar)*uiCurrPartNumb );
+}
+
+UChar TComDataCU::getNumPartitions(const UInt uiAbsPartIdx)
+{
+  UChar iNumPart = 0;
+
+  switch ( m_pePartSize[uiAbsPartIdx] )
+  {
+    case SIZE_2Nx2N:    iNumPart = 1; break;
+    case SIZE_2NxN:     iNumPart = 2; break;
+    case SIZE_Nx2N:     iNumPart = 2; break;
+    case SIZE_NxN:      iNumPart = 4; break;
+    case SIZE_2NxnU:    iNumPart = 2; break;
+    case SIZE_2NxnD:    iNumPart = 2; break;
+    case SIZE_nLx2N:    iNumPart = 2; break;
+    case SIZE_nRx2N:    iNumPart = 2; break;
+    default:            assert (0);   break;
+  }
+
+  return  iNumPart;
+}
+
+Void TComDataCU::getPartIndexAndSize( UInt uiPartIdx, UInt& ruiPartAddr, Int& riWidth, Int& riHeight )
+{
+  switch ( m_pePartSize[0] )
+  {
+    case SIZE_2NxN:
+      riWidth = getWidth(0);      riHeight = getHeight(0) >> 1; ruiPartAddr = ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 1;
+      break;
+    case SIZE_Nx2N:
+      riWidth = getWidth(0) >> 1; riHeight = getHeight(0);      ruiPartAddr = ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 2;
+      break;
+    case SIZE_NxN:
+      riWidth = getWidth(0) >> 1; riHeight = getHeight(0) >> 1; ruiPartAddr = ( m_uiNumPartition >> 2 ) * uiPartIdx;
+      break;
+    case SIZE_2NxnU:
+      riWidth     = getWidth(0);
+      riHeight    = ( uiPartIdx == 0 ) ?  getHeight(0) >> 2 : ( getHeight(0) >> 2 ) + ( getHeight(0) >> 1 );
+      ruiPartAddr = ( uiPartIdx == 0 ) ? 0 : m_uiNumPartition >> 3;
+      break;
+    case SIZE_2NxnD:
+      riWidth     = getWidth(0);
+      riHeight    = ( uiPartIdx == 0 ) ?  ( getHeight(0) >> 2 ) + ( getHeight(0) >> 1 ) : getHeight(0) >> 2;
+      ruiPartAddr = ( uiPartIdx == 0 ) ? 0 : (m_uiNumPartition >> 1) + (m_uiNumPartition >> 3);
+      break;
+    case SIZE_nLx2N:
+      riWidth     = ( uiPartIdx == 0 ) ? getWidth(0) >> 2 : ( getWidth(0) >> 2 ) + ( getWidth(0) >> 1 );
+      riHeight    = getHeight(0);
+      ruiPartAddr = ( uiPartIdx == 0 ) ? 0 : m_uiNumPartition >> 4;
+      break;
+    case SIZE_nRx2N:
+      riWidth     = ( uiPartIdx == 0 ) ? ( getWidth(0) >> 2 ) + ( getWidth(0) >> 1 ) : getWidth(0) >> 2;
+      riHeight    = getHeight(0);
+      ruiPartAddr = ( uiPartIdx == 0 ) ? 0 : (m_uiNumPartition >> 2) + (m_uiNumPartition >> 4);
+      break;
+    default:
+      assert ( m_pePartSize[0] == SIZE_2Nx2N );
+      riWidth = getWidth(0);      riHeight = getHeight(0);      ruiPartAddr = 0;
+      break;
+  }
+}
+
+
+Void TComDataCU::getMvField ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefPicList, TComMvField& rcMvField )
+{
+  if ( pcCU == NULL )  // OUT OF BOUNDARY
+  {
+    TComMv  cZeroMv;
+    rcMvField.setMvField( cZeroMv, NOT_VALID );
+    return;
+  }
+
+  TComCUMvField*  pcCUMvField = pcCU->getCUMvField( eRefPicList );
+  rcMvField.setMvField( pcCUMvField->getMv( uiAbsPartIdx ), pcCUMvField->getRefIdx( uiAbsPartIdx ) );
+}
+
+Void TComDataCU::deriveLeftRightTopIdxGeneral ( UInt uiAbsPartIdx, UInt uiPartIdx, UInt& ruiPartIdxLT, UInt& ruiPartIdxRT )
+{
+  ruiPartIdxLT = m_absZIdxInCtu + uiAbsPartIdx;
+  UInt uiPUWidth = 0;
+
+  switch ( m_pePartSize[uiAbsPartIdx] )
+  {
+    case SIZE_2Nx2N: uiPUWidth = m_puhWidth[uiAbsPartIdx];  break;
+    case SIZE_2NxN:  uiPUWidth = m_puhWidth[uiAbsPartIdx];   break;
+    case SIZE_Nx2N:  uiPUWidth = m_puhWidth[uiAbsPartIdx]  >> 1;  break;
+    case SIZE_NxN:   uiPUWidth = m_puhWidth[uiAbsPartIdx]  >> 1; break;
+    case SIZE_2NxnU:   uiPUWidth = m_puhWidth[uiAbsPartIdx]; break;
+    case SIZE_2NxnD:   uiPUWidth = m_puhWidth[uiAbsPartIdx]; break;
+    case SIZE_nLx2N:
+      if ( uiPartIdx == 0 )
+      {
+        uiPUWidth = m_puhWidth[uiAbsPartIdx]  >> 2;
+      }
+      else if ( uiPartIdx == 1 )
+      {
+        uiPUWidth = (m_puhWidth[uiAbsPartIdx]  >> 1) + (m_puhWidth[uiAbsPartIdx]  >> 2);
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_nRx2N:
+      if ( uiPartIdx == 0 )
+      {
+        uiPUWidth = (m_puhWidth[uiAbsPartIdx]  >> 1) + (m_puhWidth[uiAbsPartIdx]  >> 2);
+      }
+      else if ( uiPartIdx == 1 )
+      {
+        uiPUWidth = m_puhWidth[uiAbsPartIdx]  >> 2;
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    default:
+      assert (0);
+      break;
+  }
+
+  ruiPartIdxRT = g_auiRasterToZscan [g_auiZscanToRaster[ ruiPartIdxLT ] + uiPUWidth / m_pcPic->getMinCUWidth() - 1 ];
+}
+
+Void TComDataCU::deriveLeftBottomIdxGeneral( UInt uiAbsPartIdx, UInt uiPartIdx, UInt& ruiPartIdxLB )
+{
+  UInt uiPUHeight = 0;
+  switch ( m_pePartSize[uiAbsPartIdx] )
+  {
+    case SIZE_2Nx2N: uiPUHeight = m_puhHeight[uiAbsPartIdx];    break;
+    case SIZE_2NxN:  uiPUHeight = m_puhHeight[uiAbsPartIdx] >> 1;    break;
+    case SIZE_Nx2N:  uiPUHeight = m_puhHeight[uiAbsPartIdx];  break;
+    case SIZE_NxN:   uiPUHeight = m_puhHeight[uiAbsPartIdx] >> 1;    break;
+    case SIZE_2NxnU:
+      if ( uiPartIdx == 0 )
+      {
+        uiPUHeight = m_puhHeight[uiAbsPartIdx] >> 2;
+      }
+      else if ( uiPartIdx == 1 )
+      {
+        uiPUHeight = (m_puhHeight[uiAbsPartIdx] >> 1) + (m_puhHeight[uiAbsPartIdx] >> 2);
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_2NxnD:
+      if ( uiPartIdx == 0 )
+      {
+        uiPUHeight = (m_puhHeight[uiAbsPartIdx] >> 1) + (m_puhHeight[uiAbsPartIdx] >> 2);
+      }
+      else if ( uiPartIdx == 1 )
+      {
+        uiPUHeight = m_puhHeight[uiAbsPartIdx] >> 2;
+      }
+      else
+      {
+        assert(0);
+      }
+      break;
+    case SIZE_nLx2N: uiPUHeight = m_puhHeight[uiAbsPartIdx];  break;
+    case SIZE_nRx2N: uiPUHeight = m_puhHeight[uiAbsPartIdx];  break;
+    default:
+      assert (0);
+      break;
+  }
+
+  ruiPartIdxLB      = g_auiRasterToZscan [g_auiZscanToRaster[ m_absZIdxInCtu + uiAbsPartIdx ] + ((uiPUHeight / m_pcPic->getMinCUHeight()) - 1)*m_pcPic->getNumPartInCtuWidth()];
+}
+
+Void TComDataCU::deriveLeftRightTopIdx ( UInt uiPartIdx, UInt& ruiPartIdxLT, UInt& ruiPartIdxRT )
+{
+  ruiPartIdxLT = m_absZIdxInCtu;
+  ruiPartIdxRT = g_auiRasterToZscan [g_auiZscanToRaster[ ruiPartIdxLT ] + m_puhWidth[0] / m_pcPic->getMinCUWidth() - 1 ];
+
+  switch ( m_pePartSize[0] )
+  {
+    case SIZE_2Nx2N:                                                                                                                                break;
+    case SIZE_2NxN:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 1; ruiPartIdxRT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 1;
+      break;
+    case SIZE_Nx2N:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 2; ruiPartIdxRT -= ( uiPartIdx == 1 )? 0 : m_uiNumPartition >> 2;
+      break;
+    case SIZE_NxN:
+      ruiPartIdxLT += ( m_uiNumPartition >> 2 ) * uiPartIdx;         ruiPartIdxRT +=  ( m_uiNumPartition >> 2 ) * ( uiPartIdx - 1 );
+      break;
+    case SIZE_2NxnU:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 3;
+      ruiPartIdxRT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 3;
+      break;
+    case SIZE_2NxnD:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : ( m_uiNumPartition >> 1 ) + ( m_uiNumPartition >> 3 );
+      ruiPartIdxRT += ( uiPartIdx == 0 )? 0 : ( m_uiNumPartition >> 1 ) + ( m_uiNumPartition >> 3 );
+      break;
+    case SIZE_nLx2N:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 4;
+      ruiPartIdxRT -= ( uiPartIdx == 1 )? 0 : ( m_uiNumPartition >> 2 ) + ( m_uiNumPartition >> 4 );
+      break;
+    case SIZE_nRx2N:
+      ruiPartIdxLT += ( uiPartIdx == 0 )? 0 : ( m_uiNumPartition >> 2 ) + ( m_uiNumPartition >> 4 );
+      ruiPartIdxRT -= ( uiPartIdx == 1 )? 0 : m_uiNumPartition >> 4;
+      break;
+    default:
+      assert (0);
+      break;
+  }
+
+}
+
+Void TComDataCU::deriveLeftBottomIdx( UInt  uiPartIdx,      UInt&      ruiPartIdxLB )
+{
+  ruiPartIdxLB      = g_auiRasterToZscan [g_auiZscanToRaster[ m_absZIdxInCtu ] + ( ((m_puhHeight[0] / m_pcPic->getMinCUHeight())>>1) - 1)*m_pcPic->getNumPartInCtuWidth()];
+
+  switch ( m_pePartSize[0] )
+  {
+    case SIZE_2Nx2N:
+      ruiPartIdxLB += m_uiNumPartition >> 1;
+      break;
+    case SIZE_2NxN:
+      ruiPartIdxLB += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 1;
+      break;
+    case SIZE_Nx2N:
+      ruiPartIdxLB += ( uiPartIdx == 0 )? m_uiNumPartition >> 1 : (m_uiNumPartition >> 2)*3;
+      break;
+    case SIZE_NxN:
+      ruiPartIdxLB += ( m_uiNumPartition >> 2 ) * uiPartIdx;
+      break;
+    case SIZE_2NxnU:
+      ruiPartIdxLB += ( uiPartIdx == 0 ) ? -((Int)m_uiNumPartition >> 3) : m_uiNumPartition >> 1;
+      break;
+    case SIZE_2NxnD:
+      ruiPartIdxLB += ( uiPartIdx == 0 ) ? (m_uiNumPartition >> 2) + (m_uiNumPartition >> 3): m_uiNumPartition >> 1;
+      break;
+    case SIZE_nLx2N:
+      ruiPartIdxLB += ( uiPartIdx == 0 ) ? m_uiNumPartition >> 1 : (m_uiNumPartition >> 1) + (m_uiNumPartition >> 4);
+      break;
+    case SIZE_nRx2N:
+      ruiPartIdxLB += ( uiPartIdx == 0 ) ? m_uiNumPartition >> 1 : (m_uiNumPartition >> 1) + (m_uiNumPartition >> 2) + (m_uiNumPartition >> 4);
+      break;
+    default:
+      assert (0);
+      break;
+  }
+}
+
+/** Derives the partition index of neighbouring bottom right block
+ * \param [in]  eCUMode
+ * \param [in]  uiPartIdx
+ * \param [out] ruiPartIdxRB
+ */
+Void TComDataCU::deriveRightBottomIdx( UInt  uiPartIdx,      UInt&      ruiPartIdxRB )
+{
+  ruiPartIdxRB      = g_auiRasterToZscan [g_auiZscanToRaster[ m_absZIdxInCtu ] + ( ((m_puhHeight[0] / m_pcPic->getMinCUHeight())>>1) - 1)*m_pcPic->getNumPartInCtuWidth() +  m_puhWidth[0] / m_pcPic->getMinCUWidth() - 1];
+
+  switch ( m_pePartSize[0] )
+  {
+    case SIZE_2Nx2N:
+      ruiPartIdxRB += m_uiNumPartition >> 1;
+      break;
+    case SIZE_2NxN:
+      ruiPartIdxRB += ( uiPartIdx == 0 )? 0 : m_uiNumPartition >> 1;
+      break;
+    case SIZE_Nx2N:
+      ruiPartIdxRB += ( uiPartIdx == 0 )? m_uiNumPartition >> 2 : (m_uiNumPartition >> 1);
+      break;
+    case SIZE_NxN:
+      ruiPartIdxRB += ( m_uiNumPartition >> 2 ) * ( uiPartIdx - 1 );
+      break;
+    case SIZE_2NxnU:
+      ruiPartIdxRB += ( uiPartIdx == 0 ) ? -((Int)m_uiNumPartition >> 3) : m_uiNumPartition >> 1;
+      break;
+    case SIZE_2NxnD:
+      ruiPartIdxRB += ( uiPartIdx == 0 ) ? (m_uiNumPartition >> 2) + (m_uiNumPartition >> 3): m_uiNumPartition >> 1;
+      break;
+    case SIZE_nLx2N:
+      ruiPartIdxRB += ( uiPartIdx == 0 ) ? (m_uiNumPartition >> 3) + (m_uiNumPartition >> 4): m_uiNumPartition >> 1;
+      break;
+    case SIZE_nRx2N:
+      ruiPartIdxRB += ( uiPartIdx == 0 ) ? (m_uiNumPartition >> 2) + (m_uiNumPartition >> 3) + (m_uiNumPartition >> 4) : m_uiNumPartition >> 1;
+      break;
+    default:
+      assert (0);
+      break;
+  }
+}
+
+Void TComDataCU::deriveLeftRightTopIdxAdi ( UInt& ruiPartIdxLT, UInt& ruiPartIdxRT, UInt uiPartOffset, UInt uiPartDepth )
+{
+  UInt uiNumPartInWidth = (m_puhWidth[0]/m_pcPic->getMinCUWidth())>>uiPartDepth;
+  ruiPartIdxLT = m_absZIdxInCtu + uiPartOffset;
+  ruiPartIdxRT = g_auiRasterToZscan[ g_auiZscanToRaster[ ruiPartIdxLT ] + uiNumPartInWidth - 1 ];
+}
+
+Void TComDataCU::deriveLeftBottomIdxAdi( UInt& ruiPartIdxLB, UInt uiPartOffset, UInt uiPartDepth )
+{
+  UInt uiAbsIdx;
+  UInt uiMinCuWidth, uiWidthInMinCus;
+
+  uiMinCuWidth    = getPic()->getMinCUWidth();
+  uiWidthInMinCus = (getWidth(0)/uiMinCuWidth)>>uiPartDepth;
+  uiAbsIdx        = getZorderIdxInCtu()+uiPartOffset+(m_uiNumPartition>>(uiPartDepth<<1))-1;
+  uiAbsIdx        = g_auiZscanToRaster[uiAbsIdx]-(uiWidthInMinCus-1);
+  ruiPartIdxLB    = g_auiRasterToZscan[uiAbsIdx];
+}
+
+Bool TComDataCU::hasEqualMotion( UInt uiAbsPartIdx, TComDataCU* pcCandCU, UInt uiCandAbsPartIdx )
+{
+  if ( getInterDir( uiAbsPartIdx ) != pcCandCU->getInterDir( uiCandAbsPartIdx ) )
+  {
+    return false;
+  }
+
+  for ( UInt uiRefListIdx = 0; uiRefListIdx < 2; uiRefListIdx++ )
+  {
+    if ( getInterDir( uiAbsPartIdx ) & ( 1 << uiRefListIdx ) )
+    {
+      if ( getCUMvField( RefPicList( uiRefListIdx ) )->getMv( uiAbsPartIdx )     != pcCandCU->getCUMvField( RefPicList( uiRefListIdx ) )->getMv( uiCandAbsPartIdx ) ||
+        getCUMvField( RefPicList( uiRefListIdx ) )->getRefIdx( uiAbsPartIdx ) != pcCandCU->getCUMvField( RefPicList( uiRefListIdx ) )->getRefIdx( uiCandAbsPartIdx ) )
+      {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+/** Constructs a list of merging candidates
+ * \param uiAbsPartIdx
+ * \param uiPUIdx
+ * \param uiDepth
+ * \param pcMvFieldNeighbours
+ * \param puhInterDirNeighbours
+ * \param numValidMergeCand
+ */
+Void TComDataCU::getInterMergeCandidates( UInt uiAbsPartIdx, UInt uiPUIdx, TComMvField* pcMvFieldNeighbours, UChar* puhInterDirNeighbours, Int& numValidMergeCand, Int mrgCandIdx )
+{
+  UInt uiAbsPartAddr = m_absZIdxInCtu + uiAbsPartIdx;
+  Bool abCandIsInter[ MRG_MAX_NUM_CANDS ];
+  for( UInt ui = 0; ui < getSlice()->getMaxNumMergeCand(); ++ui )
+  {
+    abCandIsInter[ui] = false;
+    pcMvFieldNeighbours[ ( ui << 1 )     ].setRefIdx(NOT_VALID);
+    pcMvFieldNeighbours[ ( ui << 1 ) + 1 ].setRefIdx(NOT_VALID);
+  }
+  numValidMergeCand = getSlice()->getMaxNumMergeCand();
+  // compute the location of the current PU
+  Int xP, yP, nPSW, nPSH;
+  this->getPartPosition(uiPUIdx, xP, yP, nPSW, nPSH);
+
+  Int iCount = 0;
+
+  UInt uiPartIdxLT, uiPartIdxRT, uiPartIdxLB;
+  PartSize cCurPS = getPartitionSize( uiAbsPartIdx );
+  deriveLeftRightTopIdxGeneral( uiAbsPartIdx, uiPUIdx, uiPartIdxLT, uiPartIdxRT );
+  deriveLeftBottomIdxGeneral( uiAbsPartIdx, uiPUIdx, uiPartIdxLB );
+
+  //left
+  UInt uiLeftPartIdx = 0;
+  TComDataCU* pcCULeft = 0;
+  pcCULeft = getPULeft( uiLeftPartIdx, uiPartIdxLB );
+
+  Bool isAvailableA1 = pcCULeft &&
+                       pcCULeft->isDiffMER(xP -1, yP+nPSH-1, xP, yP) &&
+                       !( uiPUIdx == 1 && (cCurPS == SIZE_Nx2N || cCurPS == SIZE_nLx2N || cCurPS == SIZE_nRx2N) ) &&
+                       pcCULeft->isInter( uiLeftPartIdx ) ;
+
+  if ( isAvailableA1 )
+  {
+    abCandIsInter[iCount] = true;
+    // get Inter Dir
+    puhInterDirNeighbours[iCount] = pcCULeft->getInterDir( uiLeftPartIdx );
+    // get Mv from Left
+    pcCULeft->getMvField( pcCULeft, uiLeftPartIdx, REF_PIC_LIST_0, pcMvFieldNeighbours[iCount<<1] );
+    if ( getSlice()->isInterB() )
+    {
+      pcCULeft->getMvField( pcCULeft, uiLeftPartIdx, REF_PIC_LIST_1, pcMvFieldNeighbours[(iCount<<1)+1] );
+    }
+    if ( mrgCandIdx == iCount )
+    {
+      return;
+    }
+    iCount ++;
+  }
+
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+  // above
+  UInt uiAbovePartIdx = 0;
+  TComDataCU* pcCUAbove = 0;
+  pcCUAbove = getPUAbove( uiAbovePartIdx, uiPartIdxRT );
+
+  Bool isAvailableB1 = pcCUAbove &&
+                       pcCUAbove->isDiffMER(xP+nPSW-1, yP-1, xP, yP) &&
+                       !( uiPUIdx == 1 && (cCurPS == SIZE_2NxN || cCurPS == SIZE_2NxnU || cCurPS == SIZE_2NxnD) ) &&
+                       pcCUAbove->isInter( uiAbovePartIdx );
+
+  if ( isAvailableB1 && (!isAvailableA1 || !pcCULeft->hasEqualMotion( uiLeftPartIdx, pcCUAbove, uiAbovePartIdx ) ) )
+  {
+    abCandIsInter[iCount] = true;
+    // get Inter Dir
+    puhInterDirNeighbours[iCount] = pcCUAbove->getInterDir( uiAbovePartIdx );
+    // get Mv from Left
+    pcCUAbove->getMvField( pcCUAbove, uiAbovePartIdx, REF_PIC_LIST_0, pcMvFieldNeighbours[iCount<<1] );
+    if ( getSlice()->isInterB() )
+    {
+      pcCUAbove->getMvField( pcCUAbove, uiAbovePartIdx, REF_PIC_LIST_1, pcMvFieldNeighbours[(iCount<<1)+1] );
+    }
+    if ( mrgCandIdx == iCount )
+    {
+      return;
+    }
+    iCount ++;
+  }
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  // above right
+  UInt uiAboveRightPartIdx = 0;
+  TComDataCU* pcCUAboveRight = 0;
+  pcCUAboveRight = getPUAboveRight( uiAboveRightPartIdx, uiPartIdxRT );
+
+  Bool isAvailableB0 = pcCUAboveRight &&
+                       pcCUAboveRight->isDiffMER(xP+nPSW, yP-1, xP, yP) &&
+                       pcCUAboveRight->isInter( uiAboveRightPartIdx );
+
+  if ( isAvailableB0 && ( !isAvailableB1 || !pcCUAbove->hasEqualMotion( uiAbovePartIdx, pcCUAboveRight, uiAboveRightPartIdx ) ) )
+  {
+    abCandIsInter[iCount] = true;
+    // get Inter Dir
+    puhInterDirNeighbours[iCount] = pcCUAboveRight->getInterDir( uiAboveRightPartIdx );
+    // get Mv from Left
+    pcCUAboveRight->getMvField( pcCUAboveRight, uiAboveRightPartIdx, REF_PIC_LIST_0, pcMvFieldNeighbours[iCount<<1] );
+    if ( getSlice()->isInterB() )
+    {
+      pcCUAboveRight->getMvField( pcCUAboveRight, uiAboveRightPartIdx, REF_PIC_LIST_1, pcMvFieldNeighbours[(iCount<<1)+1] );
+    }
+    if ( mrgCandIdx == iCount )
+    {
+      return;
+    }
+    iCount ++;
+  }
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  //left bottom
+  UInt uiLeftBottomPartIdx = 0;
+  TComDataCU* pcCULeftBottom = 0;
+  pcCULeftBottom = this->getPUBelowLeft( uiLeftBottomPartIdx, uiPartIdxLB );
+
+  Bool isAvailableA0 = pcCULeftBottom &&
+                       pcCULeftBottom->isDiffMER(xP-1, yP+nPSH, xP, yP) &&
+                       pcCULeftBottom->isInter( uiLeftBottomPartIdx ) ;
+
+  if ( isAvailableA0 && ( !isAvailableA1 || !pcCULeft->hasEqualMotion( uiLeftPartIdx, pcCULeftBottom, uiLeftBottomPartIdx ) ) )
+  {
+    abCandIsInter[iCount] = true;
+    // get Inter Dir
+    puhInterDirNeighbours[iCount] = pcCULeftBottom->getInterDir( uiLeftBottomPartIdx );
+    // get Mv from Left
+    pcCULeftBottom->getMvField( pcCULeftBottom, uiLeftBottomPartIdx, REF_PIC_LIST_0, pcMvFieldNeighbours[iCount<<1] );
+    if ( getSlice()->isInterB() )
+    {
+      pcCULeftBottom->getMvField( pcCULeftBottom, uiLeftBottomPartIdx, REF_PIC_LIST_1, pcMvFieldNeighbours[(iCount<<1)+1] );
+    }
+    if ( mrgCandIdx == iCount )
+    {
+      return;
+    }
+    iCount ++;
+  }
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  // above left
+  if( iCount < 4 )
+  {
+    UInt uiAboveLeftPartIdx = 0;
+    TComDataCU* pcCUAboveLeft = 0;
+    pcCUAboveLeft = getPUAboveLeft( uiAboveLeftPartIdx, uiAbsPartAddr );
+
+    Bool isAvailableB2 = pcCUAboveLeft &&
+                         pcCUAboveLeft->isDiffMER(xP-1, yP-1, xP, yP) &&
+                         pcCUAboveLeft->isInter( uiAboveLeftPartIdx );
+
+    if ( isAvailableB2 && ( !isAvailableA1 || !pcCULeft->hasEqualMotion( uiLeftPartIdx, pcCUAboveLeft, uiAboveLeftPartIdx ) )
+        && ( !isAvailableB1 || !pcCUAbove->hasEqualMotion( uiAbovePartIdx, pcCUAboveLeft, uiAboveLeftPartIdx ) ) )
+    {
+      abCandIsInter[iCount] = true;
+      // get Inter Dir
+      puhInterDirNeighbours[iCount] = pcCUAboveLeft->getInterDir( uiAboveLeftPartIdx );
+      // get Mv from Left
+      pcCUAboveLeft->getMvField( pcCUAboveLeft, uiAboveLeftPartIdx, REF_PIC_LIST_0, pcMvFieldNeighbours[iCount<<1] );
+      if ( getSlice()->isInterB() )
+      {
+        pcCUAboveLeft->getMvField( pcCUAboveLeft, uiAboveLeftPartIdx, REF_PIC_LIST_1, pcMvFieldNeighbours[(iCount<<1)+1] );
+      }
+      if ( mrgCandIdx == iCount )
+      {
+        return;
+      }
+      iCount ++;
+    }
+  }
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  if ( getSlice()->getEnableTMVPFlag() )
+  {
+    //>> MTK colocated-RightBottom
+    UInt uiPartIdxRB;
+
+    deriveRightBottomIdx( uiPUIdx, uiPartIdxRB );
+
+    UInt uiAbsPartIdxTmp = g_auiZscanToRaster[uiPartIdxRB];
+    const UInt numPartInCtuWidth  = m_pcPic->getNumPartInCtuWidth();
+    const UInt numPartInCtuHeight = m_pcPic->getNumPartInCtuHeight();
+
+    TComMv cColMv;
+    Int iRefIdx;
+    Int ctuRsAddr = -1;
+
+    if (   ( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelX() + g_auiRasterToPelX[uiAbsPartIdxTmp] + m_pcPic->getMinCUWidth () ) < m_pcSlice->getSPS()->getPicWidthInLumaSamples () )  // image boundary check
+        && ( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelY() + g_auiRasterToPelY[uiAbsPartIdxTmp] + m_pcPic->getMinCUHeight() ) < m_pcSlice->getSPS()->getPicHeightInLumaSamples() ) )
+    {
+      if ( ( uiAbsPartIdxTmp % numPartInCtuWidth < numPartInCtuWidth - 1 ) &&           // is not at the last column of CTU
+        ( uiAbsPartIdxTmp / numPartInCtuWidth < numPartInCtuHeight - 1 ) )              // is not at the last row    of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ uiAbsPartIdxTmp + numPartInCtuWidth + 1 ];
+        ctuRsAddr = getCtuRsAddr();
+      }
+      else if ( uiAbsPartIdxTmp % numPartInCtuWidth < numPartInCtuWidth - 1 )           // is not at the last column of CTU But is last row of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ (uiAbsPartIdxTmp + numPartInCtuWidth + 1) % m_pcPic->getNumPartitionsInCtu() ];
+      }
+      else if ( uiAbsPartIdxTmp / numPartInCtuWidth < numPartInCtuHeight - 1 )          // is not at the last row of CTU But is last column of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ uiAbsPartIdxTmp + 1 ];
+        ctuRsAddr = getCtuRsAddr() + 1;
+      }
+      else //is the right bottom corner of CTU
+      {
+        uiAbsPartAddr = 0;
+      }
+    }
+
+    iRefIdx = 0;
+
+    Bool bExistMV = false;
+    UInt uiPartIdxCenter;
+    Int dir = 0;
+    UInt uiArrayAddr = iCount;
+    xDeriveCenterIdx( uiPUIdx, uiPartIdxCenter );
+    bExistMV = ctuRsAddr >= 0 && xGetColMVP( REF_PIC_LIST_0, ctuRsAddr, uiAbsPartAddr, cColMv, iRefIdx );
+    if( bExistMV == false )
+    {
+      bExistMV = xGetColMVP( REF_PIC_LIST_0, getCtuRsAddr(), uiPartIdxCenter,  cColMv, iRefIdx );
+    }
+    if( bExistMV )
+    {
+      dir |= 1;
+      pcMvFieldNeighbours[ 2 * uiArrayAddr ].setMvField( cColMv, iRefIdx );
+    }
+
+    if ( getSlice()->isInterB() )
+    {
+      bExistMV = ctuRsAddr >= 0 && xGetColMVP( REF_PIC_LIST_1, ctuRsAddr, uiAbsPartAddr, cColMv, iRefIdx);
+      if( bExistMV == false )
+      {
+        bExistMV = xGetColMVP( REF_PIC_LIST_1, getCtuRsAddr(), uiPartIdxCenter, cColMv, iRefIdx );
+      }
+      if( bExistMV )
+      {
+        dir |= 2;
+        pcMvFieldNeighbours[ 2 * uiArrayAddr + 1 ].setMvField( cColMv, iRefIdx );
+      }
+    }
+
+    if (dir != 0)
+    {
+      puhInterDirNeighbours[uiArrayAddr] = dir;
+      abCandIsInter[uiArrayAddr] = true;
+
+      if ( mrgCandIdx == iCount )
+      {
+        return;
+      }
+      iCount++;
+    }
+  }
+  // early termination
+  if (iCount == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  UInt uiArrayAddr = iCount;
+  UInt uiCutoff = uiArrayAddr;
+
+  if ( getSlice()->isInterB() )
+  {
+    static const UInt NUM_PRIORITY_LIST=12;
+    static const UInt uiPriorityList0[NUM_PRIORITY_LIST] = {0 , 1, 0, 2, 1, 2, 0, 3, 1, 3, 2, 3};
+    static const UInt uiPriorityList1[NUM_PRIORITY_LIST] = {1 , 0, 2, 0, 2, 1, 3, 0, 3, 1, 3, 2};
+
+    for (Int idx=0; idx<uiCutoff*(uiCutoff-1) && uiArrayAddr!= getSlice()->getMaxNumMergeCand(); idx++)
+    {
+      assert(idx<NUM_PRIORITY_LIST);
+      Int i = uiPriorityList0[idx];
+      Int j = uiPriorityList1[idx];
+      if (abCandIsInter[i] && abCandIsInter[j]&& (puhInterDirNeighbours[i]&0x1)&&(puhInterDirNeighbours[j]&0x2))
+      {
+        abCandIsInter[uiArrayAddr] = true;
+        puhInterDirNeighbours[uiArrayAddr] = 3;
+
+        // get Mv from cand[i] and cand[j]
+        pcMvFieldNeighbours[uiArrayAddr << 1].setMvField(pcMvFieldNeighbours[i<<1].getMv(), pcMvFieldNeighbours[i<<1].getRefIdx());
+        pcMvFieldNeighbours[( uiArrayAddr << 1 ) + 1].setMvField(pcMvFieldNeighbours[(j<<1)+1].getMv(), pcMvFieldNeighbours[(j<<1)+1].getRefIdx());
+
+        Int iRefPOCL0 = m_pcSlice->getRefPOC( REF_PIC_LIST_0, pcMvFieldNeighbours[(uiArrayAddr<<1)].getRefIdx() );
+        Int iRefPOCL1 = m_pcSlice->getRefPOC( REF_PIC_LIST_1, pcMvFieldNeighbours[(uiArrayAddr<<1)+1].getRefIdx() );
+        if (iRefPOCL0 == iRefPOCL1 && pcMvFieldNeighbours[(uiArrayAddr<<1)].getMv() == pcMvFieldNeighbours[(uiArrayAddr<<1)+1].getMv())
+        {
+          abCandIsInter[uiArrayAddr] = false;
+        }
+        else
+        {
+          uiArrayAddr++;
+        }
+      }
+    }
+  }
+  // early termination
+  if (uiArrayAddr == getSlice()->getMaxNumMergeCand())
+  {
+    return;
+  }
+
+  Int iNumRefIdx = (getSlice()->isInterB()) ? min(m_pcSlice->getNumRefIdx(REF_PIC_LIST_0), m_pcSlice->getNumRefIdx(REF_PIC_LIST_1)) : m_pcSlice->getNumRefIdx(REF_PIC_LIST_0);
+
+  Int r = 0;
+  Int refcnt = 0;
+  while (uiArrayAddr < getSlice()->getMaxNumMergeCand())
+  {
+    abCandIsInter[uiArrayAddr] = true;
+    puhInterDirNeighbours[uiArrayAddr] = 1;
+    pcMvFieldNeighbours[uiArrayAddr << 1].setMvField( TComMv(0, 0), r);
+
+    if ( getSlice()->isInterB() )
+    {
+      puhInterDirNeighbours[uiArrayAddr] = 3;
+      pcMvFieldNeighbours[(uiArrayAddr << 1) + 1].setMvField(TComMv(0, 0), r);
+    }
+    uiArrayAddr++;
+
+    if ( refcnt == iNumRefIdx - 1 )
+    {
+      r = 0;
+    }
+    else
+    {
+      ++r;
+      ++refcnt;
+    }
+  }
+  numValidMergeCand = uiArrayAddr;
+}
+
+/** Check whether the current PU and a spatial neighboring PU are in a same ME region.
+ * \param xN, xN   location of the upper-left corner pixel of a neighboring PU
+ * \param xP, yP   location of the upper-left corner pixel of the current PU
+ * \returns Bool
+ */
+Bool TComDataCU::isDiffMER(Int xN, Int yN, Int xP, Int yP)
+{
+
+  UInt plevel = this->getSlice()->getPPS()->getLog2ParallelMergeLevelMinus2() + 2;
+  if ((xN>>plevel)!= (xP>>plevel))
+  {
+    return true;
+  }
+  if ((yN>>plevel)!= (yP>>plevel))
+  {
+    return true;
+  }
+  return false;
+}
+
+/** calculate the location of upper-left corner pixel and size of the current PU.
+ * \param partIdx  PU index within a CU
+ * \param xP, yP   location of the upper-left corner pixel of the current PU
+ * \param PSW, nPSH    size of the curren PU
+ * \returns Void
+ */
+Void TComDataCU::getPartPosition( UInt partIdx, Int& xP, Int& yP, Int& nPSW, Int& nPSH)
+{
+  UInt col = m_uiCUPelX;
+  UInt row = m_uiCUPelY;
+
+  switch ( m_pePartSize[0] )
+  {
+  case SIZE_2NxN:
+    nPSW = getWidth(0);
+    nPSH = getHeight(0) >> 1;
+    xP   = col;
+    yP   = (partIdx ==0)? row: row + nPSH;
+    break;
+  case SIZE_Nx2N:
+    nPSW = getWidth(0) >> 1;
+    nPSH = getHeight(0);
+    xP   = (partIdx ==0)? col: col + nPSW;
+    yP   = row;
+    break;
+  case SIZE_NxN:
+    nPSW = getWidth(0) >> 1;
+    nPSH = getHeight(0) >> 1;
+    xP   = col + (partIdx&0x1)*nPSW;
+    yP   = row + (partIdx>>1)*nPSH;
+    break;
+  case SIZE_2NxnU:
+    nPSW = getWidth(0);
+    nPSH = ( partIdx == 0 ) ?  getHeight(0) >> 2 : ( getHeight(0) >> 2 ) + ( getHeight(0) >> 1 );
+    xP   = col;
+    yP   = (partIdx ==0)? row: row + getHeight(0) - nPSH;
+
+    break;
+  case SIZE_2NxnD:
+    nPSW = getWidth(0);
+    nPSH = ( partIdx == 0 ) ?  ( getHeight(0) >> 2 ) + ( getHeight(0) >> 1 ) : getHeight(0) >> 2;
+    xP   = col;
+    yP   = (partIdx ==0)? row: row + getHeight(0) - nPSH;
+    break;
+  case SIZE_nLx2N:
+    nPSW = ( partIdx == 0 ) ? getWidth(0) >> 2 : ( getWidth(0) >> 2 ) + ( getWidth(0) >> 1 );
+    nPSH = getHeight(0);
+    xP   = (partIdx ==0)? col: col + getWidth(0) - nPSW;
+    yP   = row;
+    break;
+  case SIZE_nRx2N:
+    nPSW = ( partIdx == 0 ) ? ( getWidth(0) >> 2 ) + ( getWidth(0) >> 1 ) : getWidth(0) >> 2;
+    nPSH = getHeight(0);
+    xP   = (partIdx ==0)? col: col + getWidth(0) - nPSW;
+    yP   = row;
+    break;
+  default:
+    assert ( m_pePartSize[0] == SIZE_2Nx2N );
+    nPSW = getWidth(0);
+    nPSH = getHeight(0);
+    xP   = col ;
+    yP   = row ;
+
+    break;
+  }
+}
+
+/** Constructs a list of candidates for AMVP
+ * \param uiPartIdx
+ * \param uiPartAddr
+ * \param eRefPicList
+ * \param iRefIdx
+ * \param pInfo
+ */
+Void TComDataCU::fillMvpCand ( UInt uiPartIdx, UInt uiPartAddr, RefPicList eRefPicList, Int iRefIdx, AMVPInfo* pInfo )
+{
+  TComMv cMvPred;
+  Bool bAddedSmvp = false;
+
+  pInfo->iN = 0;
+  if (iRefIdx < 0)
+  {
+    return;
+  }
+
+  //-- Get Spatial MV
+  UInt uiPartIdxLT, uiPartIdxRT, uiPartIdxLB;
+  const UInt numPartInCtuWidth  = m_pcPic->getNumPartInCtuWidth();
+  const UInt numPartInCtuHeight = m_pcPic->getNumPartInCtuHeight();
+  Bool bAdded = false;
+
+  deriveLeftRightTopIdx( uiPartIdx, uiPartIdxLT, uiPartIdxRT );
+  deriveLeftBottomIdx( uiPartIdx, uiPartIdxLB );
+
+  TComDataCU* tmpCU = NULL;
+  UInt idx;
+  tmpCU = getPUBelowLeft(idx, uiPartIdxLB);
+  bAddedSmvp = (tmpCU != NULL) && (tmpCU->isInter(idx));
+
+  if (!bAddedSmvp)
+  {
+    tmpCU = getPULeft(idx, uiPartIdxLB);
+    bAddedSmvp = (tmpCU != NULL) && (tmpCU->isInter(idx));
+  }
+
+  // Left predictor search
+  bAdded = xAddMVPCand( pInfo, eRefPicList, iRefIdx, uiPartIdxLB, MD_BELOW_LEFT);
+  if (!bAdded)
+  {
+    bAdded = xAddMVPCand( pInfo, eRefPicList, iRefIdx, uiPartIdxLB, MD_LEFT );
+  }
+
+  if(!bAdded)
+  {
+    bAdded = xAddMVPCandOrder( pInfo, eRefPicList, iRefIdx, uiPartIdxLB, MD_BELOW_LEFT);
+    if (!bAdded)
+    {
+      xAddMVPCandOrder( pInfo, eRefPicList, iRefIdx, uiPartIdxLB, MD_LEFT );
+    }
+  }
+
+  // Above predictor search
+  bAdded = xAddMVPCand( pInfo, eRefPicList, iRefIdx, uiPartIdxRT, MD_ABOVE_RIGHT);
+
+  if (!bAdded)
+  {
+    bAdded = xAddMVPCand( pInfo, eRefPicList, iRefIdx, uiPartIdxRT, MD_ABOVE);
+  }
+
+  if(!bAdded)
+  {
+    xAddMVPCand( pInfo, eRefPicList, iRefIdx, uiPartIdxLT, MD_ABOVE_LEFT);
+  }
+
+  if(!bAddedSmvp)
+  {
+    bAdded = xAddMVPCandOrder( pInfo, eRefPicList, iRefIdx, uiPartIdxRT, MD_ABOVE_RIGHT);
+    if (!bAdded)
+    {
+      bAdded = xAddMVPCandOrder( pInfo, eRefPicList, iRefIdx, uiPartIdxRT, MD_ABOVE);
+    }
+
+    if(!bAdded)
+    {
+      xAddMVPCandOrder( pInfo, eRefPicList, iRefIdx, uiPartIdxLT, MD_ABOVE_LEFT);
+    }
+  }
+
+  if ( pInfo->iN == 2 )
+  {
+    if ( pInfo->m_acMvCand[ 0 ] == pInfo->m_acMvCand[ 1 ] )
+    {
+      pInfo->iN = 1;
+    }
+  }
+
+  if ( getSlice()->getEnableTMVPFlag() )
+  {
+    // Get Temporal Motion Predictor
+    Int iRefIdx_Col = iRefIdx;
+    TComMv cColMv;
+    UInt uiPartIdxRB;
+    UInt uiAbsPartIdx;
+    UInt uiAbsPartAddr;
+
+    deriveRightBottomIdx( uiPartIdx, uiPartIdxRB );
+    uiAbsPartAddr = m_absZIdxInCtu + uiPartAddr;
+
+    //----  co-located RightBottom Temporal Predictor (H) ---//
+    uiAbsPartIdx = g_auiZscanToRaster[uiPartIdxRB];
+    Int ctuRsAddr = -1;
+    if (  ( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelX() + g_auiRasterToPelX[uiAbsPartIdx] + m_pcPic->getMinCUWidth () ) < m_pcSlice->getSPS()->getPicWidthInLumaSamples () )  // image boundary check
+       && ( ( m_pcPic->getCtu(m_ctuRsAddr)->getCUPelY() + g_auiRasterToPelY[uiAbsPartIdx] + m_pcPic->getMinCUHeight() ) < m_pcSlice->getSPS()->getPicHeightInLumaSamples() ) )
+    {
+      if ( ( uiAbsPartIdx % numPartInCtuWidth < numPartInCtuWidth - 1 ) &&  // is not at the last column of CTU
+           ( uiAbsPartIdx / numPartInCtuWidth < numPartInCtuHeight - 1 ) )  // is not at the last row    of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ uiAbsPartIdx + numPartInCtuWidth + 1 ];
+        ctuRsAddr = getCtuRsAddr();
+      }
+      else if ( uiAbsPartIdx % numPartInCtuWidth < numPartInCtuWidth - 1 )  // is not at the last column of CTU But is last row of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ (uiAbsPartIdx + numPartInCtuWidth + 1) % m_pcPic->getNumPartitionsInCtu() ];
+      }
+      else if ( uiAbsPartIdx / numPartInCtuWidth < numPartInCtuHeight - 1 ) // is not at the last row of CTU But is last column of CTU
+      {
+        uiAbsPartAddr = g_auiRasterToZscan[ uiAbsPartIdx + 1 ];
+        ctuRsAddr = getCtuRsAddr() + 1;
+      }
+      else //is the right bottom corner of CTU
+      {
+        uiAbsPartAddr = 0;
+      }
+    }
+    if ( ctuRsAddr >= 0 && xGetColMVP( eRefPicList, ctuRsAddr, uiAbsPartAddr, cColMv, iRefIdx_Col ) )
+    {
+      pInfo->m_acMvCand[pInfo->iN++] = cColMv;
+    }
+    else
+    {
+      UInt uiPartIdxCenter;
+      xDeriveCenterIdx( uiPartIdx, uiPartIdxCenter );
+      if (xGetColMVP( eRefPicList, getCtuRsAddr(), uiPartIdxCenter,  cColMv, iRefIdx_Col ))
+      {
+        pInfo->m_acMvCand[pInfo->iN++] = cColMv;
+      }
+    }
+    //----  co-located RightBottom Temporal Predictor  ---//
+  }
+
+  if (pInfo->iN > AMVP_MAX_NUM_CANDS)
+  {
+    pInfo->iN = AMVP_MAX_NUM_CANDS;
+  }
+
+  while (pInfo->iN < AMVP_MAX_NUM_CANDS)
+  {
+    pInfo->m_acMvCand[pInfo->iN].set(0,0);
+    pInfo->iN++;
+  }
+  return ;
+}
+
+
+Bool TComDataCU::isBipredRestriction(UInt puIdx)
+{
+  Int width = 0;
+  Int height = 0;
+  UInt partAddr;
+
+  getPartIndexAndSize( puIdx, partAddr, width, height );
+  if ( getWidth(0) == 8 && (width < 8 || height < 8) )
+  {
+    return true;
+  }
+  return false;
+}
+
+
+Void TComDataCU::clipMv    (TComMv&  rcMv)
+{
+  Int  iMvShift = 2;
+  Int iOffset = 8;
+  Int iHorMax = ( m_pcSlice->getSPS()->getPicWidthInLumaSamples() + iOffset - m_uiCUPelX - 1 ) << iMvShift;
+  Int iHorMin = (       -(Int)g_uiMaxCUWidth - iOffset - (Int)m_uiCUPelX + 1 ) << iMvShift;
+
+  Int iVerMax = ( m_pcSlice->getSPS()->getPicHeightInLumaSamples() + iOffset - m_uiCUPelY - 1 ) << iMvShift;
+  Int iVerMin = (       -(Int)g_uiMaxCUHeight - iOffset - (Int)m_uiCUPelY + 1 ) << iMvShift;
+
+  rcMv.setHor( min (iHorMax, max (iHorMin, rcMv.getHor())) );
+  rcMv.setVer( min (iVerMax, max (iVerMin, rcMv.getVer())) );
+}
+
+
+UInt TComDataCU::getIntraSizeIdx(UInt uiAbsPartIdx)
+{
+  UInt uiShift = ( m_pePartSize[uiAbsPartIdx]==SIZE_NxN ? 1 : 0 );
+
+  UChar uiWidth = m_puhWidth[uiAbsPartIdx]>>uiShift;
+  UInt  uiCnt = 0;
+  while( uiWidth )
+  {
+    uiCnt++;
+    uiWidth>>=1;
+  }
+  uiCnt-=2;
+  return uiCnt > 6 ? 6 : uiCnt;
+}
+
+Void TComDataCU::clearCbf( UInt uiIdx, ComponentID compID, UInt uiNumParts )
+{
+  memset( &m_puhCbf[compID][uiIdx], 0, sizeof(UChar)*uiNumParts);
+}
+
+/** Set a I_PCM flag for all sub-partitions of a partition.
+ * \param bIpcmFlag I_PCM flag
+ * \param uiAbsPartIdx patition index
+ * \param uiDepth CU depth
+ * \returns Void
+ */
+Void TComDataCU::setIPCMFlagSubParts  (Bool bIpcmFlag, UInt uiAbsPartIdx, UInt uiDepth)
+{
+  UInt uiCurrPartNumb = m_pcPic->getNumPartitionsInCtu() >> (uiDepth << 1);
+
+  memset(m_pbIPCMFlag + uiAbsPartIdx, bIpcmFlag, sizeof(Bool)*uiCurrPartNumb );
+}
+
+/** Test whether the current block is skipped
+ * \param uiPartIdx Block index
+ * \returns Flag indicating whether the block is skipped
+ */
+Bool TComDataCU::isSkipped( UInt uiPartIdx )
+{
+  return ( getSkipFlag( uiPartIdx ) );
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+Bool TComDataCU::xAddMVPCand( AMVPInfo* pInfo, RefPicList eRefPicList, Int iRefIdx, UInt uiPartUnitIdx, MVP_DIR eDir )
+{
+  TComDataCU* pcTmpCU = NULL;
+  UInt uiIdx;
+  switch( eDir )
+  {
+    case MD_LEFT:
+    {
+      pcTmpCU = getPULeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+    case MD_ABOVE:
+    {
+      pcTmpCU = getPUAbove(uiIdx, uiPartUnitIdx);
+      break;
+    }
+    case MD_ABOVE_RIGHT:
+    {
+      pcTmpCU = getPUAboveRight(uiIdx, uiPartUnitIdx);
+      break;
+    }
+    case MD_BELOW_LEFT:
+    {
+      pcTmpCU = getPUBelowLeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+    case MD_ABOVE_LEFT:
+    {
+      pcTmpCU = getPUAboveLeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+    default:
+    {
+      break;
+    }
+  }
+
+  if ( pcTmpCU == NULL )
+  {
+    return false;
+  }
+
+  if ( pcTmpCU->getCUMvField(eRefPicList)->getRefIdx(uiIdx) >= 0 && m_pcSlice->getRefPic( eRefPicList, iRefIdx)->getPOC() == pcTmpCU->getSlice()->getRefPOC( eRefPicList, pcTmpCU->getCUMvField(eRefPicList)->getRefIdx(uiIdx) ))
+  {
+    TComMv cMvPred = pcTmpCU->getCUMvField(eRefPicList)->getMv(uiIdx);
+
+    pInfo->m_acMvCand[ pInfo->iN++] = cMvPred;
+    return true;
+  }
+
+  RefPicList eRefPicList2nd = REF_PIC_LIST_0;
+  if(       eRefPicList == REF_PIC_LIST_0 )
+  {
+    eRefPicList2nd = REF_PIC_LIST_1;
+  }
+  else if ( eRefPicList == REF_PIC_LIST_1)
+  {
+    eRefPicList2nd = REF_PIC_LIST_0;
+  }
+
+
+  Int iCurrRefPOC = m_pcSlice->getRefPic( eRefPicList, iRefIdx)->getPOC();
+  Int iNeibRefPOC;
+
+
+  if( pcTmpCU->getCUMvField(eRefPicList2nd)->getRefIdx(uiIdx) >= 0 )
+  {
+    iNeibRefPOC = pcTmpCU->getSlice()->getRefPOC( eRefPicList2nd, pcTmpCU->getCUMvField(eRefPicList2nd)->getRefIdx(uiIdx) );
+    if( iNeibRefPOC == iCurrRefPOC ) // Same Reference Frame But Diff List//
+    {
+      TComMv cMvPred = pcTmpCU->getCUMvField(eRefPicList2nd)->getMv(uiIdx);
+      pInfo->m_acMvCand[ pInfo->iN++] = cMvPred;
+      return true;
+    }
+  }
+  return false;
+}
+
+/**
+ * \param pInfo
+ * \param eRefPicList
+ * \param iRefIdx
+ * \param uiPartUnitIdx
+ * \param eDir
+ * \returns Bool
+ */
+Bool TComDataCU::xAddMVPCandOrder( AMVPInfo* pInfo, RefPicList eRefPicList, Int iRefIdx, UInt uiPartUnitIdx, MVP_DIR eDir )
+{
+  TComDataCU* pcTmpCU = NULL;
+  UInt uiIdx;
+  switch( eDir )
+  {
+  case MD_LEFT:
+    {
+      pcTmpCU = getPULeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+  case MD_ABOVE:
+    {
+      pcTmpCU = getPUAbove(uiIdx, uiPartUnitIdx);
+      break;
+    }
+  case MD_ABOVE_RIGHT:
+    {
+      pcTmpCU = getPUAboveRight(uiIdx, uiPartUnitIdx);
+      break;
+    }
+  case MD_BELOW_LEFT:
+    {
+      pcTmpCU = getPUBelowLeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+  case MD_ABOVE_LEFT:
+    {
+      pcTmpCU = getPUAboveLeft(uiIdx, uiPartUnitIdx);
+      break;
+    }
+  default:
+    {
+      break;
+    }
+  }
+
+  if ( pcTmpCU == NULL )
+  {
+    return false;
+  }
+
+  RefPicList eRefPicList2nd = REF_PIC_LIST_0;
+  if(       eRefPicList == REF_PIC_LIST_0 )
+  {
+    eRefPicList2nd = REF_PIC_LIST_1;
+  }
+  else if ( eRefPicList == REF_PIC_LIST_1)
+  {
+    eRefPicList2nd = REF_PIC_LIST_0;
+  }
+
+  Int iCurrPOC = m_pcSlice->getPOC();
+  Int iCurrRefPOC = m_pcSlice->getRefPic( eRefPicList, iRefIdx)->getPOC();
+  Int iNeibPOC = iCurrPOC;
+  Int iNeibRefPOC;
+  Bool bIsCurrRefLongTerm = m_pcSlice->getRefPic( eRefPicList, iRefIdx)->getIsLongTerm();
+  Bool bIsNeibRefLongTerm = false;
+
+  //---------------  V1 (END) ------------------//
+  if( pcTmpCU->getCUMvField(eRefPicList)->getRefIdx(uiIdx) >= 0)
+  {
+    iNeibRefPOC = pcTmpCU->getSlice()->getRefPOC( eRefPicList, pcTmpCU->getCUMvField(eRefPicList)->getRefIdx(uiIdx) );
+    TComMv cMvPred = pcTmpCU->getCUMvField(eRefPicList)->getMv(uiIdx);
+    TComMv rcMv;
+
+    bIsNeibRefLongTerm = pcTmpCU->getSlice()->getRefPic( eRefPicList, pcTmpCU->getCUMvField(eRefPicList)->getRefIdx(uiIdx) )->getIsLongTerm();
+    if ( bIsCurrRefLongTerm == bIsNeibRefLongTerm )
+    {
+      if ( bIsCurrRefLongTerm || bIsNeibRefLongTerm )
+      {
+        rcMv = cMvPred;
+      }
+      else
+      {
+        Int iScale = xGetDistScaleFactor( iCurrPOC, iCurrRefPOC, iNeibPOC, iNeibRefPOC );
+        if ( iScale == 4096 )
+        {
+          rcMv = cMvPred;
+        }
+        else
+        {
+          rcMv = cMvPred.scaleMv( iScale );
+        }
+      }
+
+      pInfo->m_acMvCand[ pInfo->iN++] = rcMv;
+      return true;
+    }
+  }
+  //---------------------- V2(END) --------------------//
+  if( pcTmpCU->getCUMvField(eRefPicList2nd)->getRefIdx(uiIdx) >= 0)
+  {
+    iNeibRefPOC = pcTmpCU->getSlice()->getRefPOC( eRefPicList2nd, pcTmpCU->getCUMvField(eRefPicList2nd)->getRefIdx(uiIdx) );
+    TComMv cMvPred = pcTmpCU->getCUMvField(eRefPicList2nd)->getMv(uiIdx);
+    TComMv rcMv;
+
+    bIsNeibRefLongTerm = pcTmpCU->getSlice()->getRefPic( eRefPicList2nd, pcTmpCU->getCUMvField(eRefPicList2nd)->getRefIdx(uiIdx) )->getIsLongTerm();
+    if ( bIsCurrRefLongTerm == bIsNeibRefLongTerm )
+    {
+      if ( bIsCurrRefLongTerm || bIsNeibRefLongTerm )
+      {
+        rcMv = cMvPred;
+      }
+      else
+      {
+        Int iScale = xGetDistScaleFactor( iCurrPOC, iCurrRefPOC, iNeibPOC, iNeibRefPOC );
+        if ( iScale == 4096 )
+        {
+          rcMv = cMvPred;
+        }
+        else
+        {
+          rcMv = cMvPred.scaleMv( iScale );
+        }
+      }
+
+      pInfo->m_acMvCand[ pInfo->iN++] = rcMv;
+      return true;
+    }
+  }
+  //---------------------- V3(END) --------------------//
+  return false;
+}
+
+/**
+ * \param eRefPicList
+ * \param uiCUAddr
+ * \param uiPartUnitIdx
+ * \param riRefIdx
+ * \returns Bool
+ */
+Bool TComDataCU::xGetColMVP( RefPicList eRefPicList, Int ctuRsAddr, Int uiPartUnitIdx, TComMv& rcMv, Int& riRefIdx )
+{
+  UInt uiAbsPartAddr = uiPartUnitIdx;
+
+  RefPicList  eColRefPicList;
+  Int iColPOC, iColRefPOC, iCurrPOC, iCurrRefPOC, iScale;
+  TComMv cColMv;
+
+  // use coldir.
+  TComPic *pColPic = getSlice()->getRefPic( RefPicList(getSlice()->isInterB() ? 1-getSlice()->getColFromL0Flag() : 0), getSlice()->getColRefIdx());
+  TComDataCU *pColCtu = pColPic->getCtu( ctuRsAddr );
+  if(pColCtu->getPic()==0||pColCtu->getPartitionSize(uiPartUnitIdx)==NUMBER_OF_PART_SIZES)
+  {
+    return false;
+  }
+  iCurrPOC = m_pcSlice->getPOC();
+  iColPOC = pColCtu->getSlice()->getPOC();
+
+  if (!pColCtu->isInter(uiAbsPartAddr))
+  {
+    return false;
+  }
+
+  eColRefPicList = getSlice()->getCheckLDC() ? eRefPicList : RefPicList(getSlice()->getColFromL0Flag());
+
+  Int iColRefIdx = pColCtu->getCUMvField(RefPicList(eColRefPicList))->getRefIdx(uiAbsPartAddr);
+
+  if (iColRefIdx < 0 )
+  {
+    eColRefPicList = RefPicList(1 - eColRefPicList);
+    iColRefIdx = pColCtu->getCUMvField(RefPicList(eColRefPicList))->getRefIdx(uiAbsPartAddr);
+
+    if (iColRefIdx < 0 )
+    {
+      return false;
+    }
+  }
+
+  // Scale the vector.
+  iColRefPOC = pColCtu->getSlice()->getRefPOC(eColRefPicList, iColRefIdx);
+  cColMv = pColCtu->getCUMvField(eColRefPicList)->getMv(uiAbsPartAddr);
+
+  iCurrRefPOC = m_pcSlice->getRefPic(eRefPicList, riRefIdx)->getPOC();
+
+  Bool bIsCurrRefLongTerm = m_pcSlice->getRefPic(eRefPicList, riRefIdx)->getIsLongTerm();
+  Bool bIsColRefLongTerm = pColCtu->getSlice()->getIsUsedAsLongTerm(eColRefPicList, iColRefIdx);
+
+  if ( bIsCurrRefLongTerm != bIsColRefLongTerm )
+  {
+    return false;
+  }
+
+  if ( bIsCurrRefLongTerm || bIsColRefLongTerm )
+  {
+    rcMv = cColMv;
+  }
+  else
+  {
+    iScale = xGetDistScaleFactor(iCurrPOC, iCurrRefPOC, iColPOC, iColRefPOC);
+    if ( iScale == 4096 )
+    {
+      rcMv = cColMv;
+    }
+    else
+    {
+      rcMv = cColMv.scaleMv( iScale );
+    }
+  }
+
+  return true;
+}
+
+UInt TComDataCU::xGetMvdBits(TComMv cMvd)
+{
+  return ( xGetComponentBits(cMvd.getHor()) + xGetComponentBits(cMvd.getVer()) );
+}
+
+UInt TComDataCU::xGetComponentBits(Int iVal)
+{
+  UInt uiLength = 1;
+  UInt uiTemp   = ( iVal <= 0) ? (-iVal<<1)+1: (iVal<<1);
+
+  assert ( uiTemp );
+
+  while ( 1 != uiTemp )
+  {
+    uiTemp >>= 1;
+    uiLength += 2;
+  }
+
+  return uiLength;
+}
+
+
+Int TComDataCU::xGetDistScaleFactor(Int iCurrPOC, Int iCurrRefPOC, Int iColPOC, Int iColRefPOC)
+{
+  Int iDiffPocD = iColPOC - iColRefPOC;
+  Int iDiffPocB = iCurrPOC - iCurrRefPOC;
+
+  if( iDiffPocD == iDiffPocB )
+  {
+    return 4096;
+  }
+  else
+  {
+    Int iTDB      = Clip3( -128, 127, iDiffPocB );
+    Int iTDD      = Clip3( -128, 127, iDiffPocD );
+    Int iX        = (0x4000 + abs(iTDD/2)) / iTDD;
+    Int iScale    = Clip3( -4096, 4095, (iTDB * iX + 32) >> 6 );
+    return iScale;
+  }
+}
+
+/**
+ * \param eCUMode
+ * \param uiPartIdx
+ * \param ruiPartIdxCenter
+ * \returns Void
+ */
+Void TComDataCU::xDeriveCenterIdx( UInt uiPartIdx, UInt& ruiPartIdxCenter )
+{
+  UInt uiPartAddr;
+  Int  iPartWidth;
+  Int  iPartHeight;
+  getPartIndexAndSize( uiPartIdx, uiPartAddr, iPartWidth, iPartHeight);
+
+  ruiPartIdxCenter = m_absZIdxInCtu+uiPartAddr; // partition origin.
+  ruiPartIdxCenter = g_auiRasterToZscan[ g_auiZscanToRaster[ ruiPartIdxCenter ]
+                                        + ( iPartHeight/m_pcPic->getMinCUHeight()  )/2*m_pcPic->getNumPartInCtuWidth()
+                                        + ( iPartWidth/m_pcPic->getMinCUWidth()  )/2];
+}
+
+Void TComDataCU::compressMV()
+{
+  Int scaleFactor = 4 * AMVP_DECIMATION_FACTOR / m_unitSize;
+  if (scaleFactor > 0)
+  {
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      m_acCUMvField[i].compress(m_pePredMode, scaleFactor);
+    }
+  }
+}
+
+UInt TComDataCU::getCoefScanIdx(const UInt uiAbsPartIdx, const UInt uiWidth, const UInt uiHeight, const ComponentID compID) const
+{
+  //------------------------------------------------
+
+  //this mechanism is available for intra only
+
+  if (!isIntra(uiAbsPartIdx)) return SCAN_DIAG;
+
+  //------------------------------------------------
+
+  //check that MDCS can be used for this TU
+
+  const ChromaFormat format = getPic()->getChromaFormat();
+
+  const UInt maximumWidth  = MDCS_MAXIMUM_WIDTH  >> getComponentScaleX(compID, format);
+  const UInt maximumHeight = MDCS_MAXIMUM_HEIGHT >> getComponentScaleY(compID, format);
+
+  if ((uiWidth > maximumWidth) || (uiHeight > maximumHeight)) return SCAN_DIAG;
+
+  //------------------------------------------------
+
+  //otherwise, select the appropriate mode
+
+  UInt uiDirMode  = getIntraDir(toChannelType(compID), uiAbsPartIdx);
+
+  if (uiDirMode==DM_CHROMA_IDX)
+  {
+    uiDirMode = getIntraDir(CHANNEL_TYPE_LUMA, getChromasCorrespondingPULumaIdx(uiAbsPartIdx, getPic()->getChromaFormat()));
+  }
+
+  if (isChroma(compID) && (format == CHROMA_422))
+  {
+    uiDirMode = g_chroma422IntraAngleMappingTable[uiDirMode];
+  }
+
+  //------------------
+
+  if      (abs((Int)uiDirMode - VER_IDX) <= MDCS_ANGLE_LIMIT) return SCAN_HOR;
+  else if (abs((Int)uiDirMode - HOR_IDX) <= MDCS_ANGLE_LIMIT) return SCAN_VER;
+  else return SCAN_DIAG;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComDataCU.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,570 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComDataCU.h
+    \brief    CU data structure (header)
+    \todo     not all entities are documented
+*/
+
+#ifndef __TCOMDATACU__
+#define __TCOMDATACU__
+
+#include <assert.h>
+
+// Include files
+#include "CommonDef.h"
+#include "TComMotionInfo.h"
+#include "TComSlice.h"
+#include "TComRdCost.h"
+#include "TComPattern.h"
+
+#include <algorithm>
+#include <vector>
+
+//! \ingroup TLibCommon
+//! \{
+
+class TComTU; // forward declaration
+
+static const UInt NUM_MOST_PROBABLE_MODES=3;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// CU data structure class
+class TComDataCU
+{
+private:
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // class pointers
+  // -------------------------------------------------------------------------------------------------------------------
+
+  TComPic*      m_pcPic;              ///< picture class pointer
+  TComSlice*    m_pcSlice;            ///< slice header pointer
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // CU description
+  // -------------------------------------------------------------------------------------------------------------------
+
+  UInt          m_ctuRsAddr;          ///< CTU (also known as LCU) address in a slice (Raster-scan address, as opposed to tile-scan/encoding order).
+  UInt          m_absZIdxInCtu;       ///< absolute address in a CTU. It's Z scan order
+  UInt          m_uiCUPelX;           ///< CU position in a pixel (X)
+  UInt          m_uiCUPelY;           ///< CU position in a pixel (Y)
+  UInt          m_uiNumPartition;     ///< total number of minimum partitions in a CU
+  UChar*        m_puhWidth;           ///< array of widths
+  UChar*        m_puhHeight;          ///< array of heights
+  UChar*        m_puhDepth;           ///< array of depths
+  Int           m_unitSize;           ///< size of a "minimum partition"
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // CU data
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Bool*          m_skipFlag;           ///< array of skip flags
+  Char*          m_pePartSize;         ///< array of partition sizes
+  Char*          m_pePredMode;         ///< array of prediction modes
+  Char*          m_crossComponentPredictionAlpha[MAX_NUM_COMPONENT]; ///< array of cross-component prediction alpha values
+  Bool*          m_CUTransquantBypass;   ///< array of cu_transquant_bypass flags
+  Char*          m_phQP;               ///< array of QP values
+  UChar*         m_ChromaQpAdj;        ///< array of chroma QP adjustments (indexed)
+  UInt           m_codedChromaQpAdj;
+  UChar*         m_puhTrIdx;           ///< array of transform indices
+  UChar*         m_puhTransformSkip[MAX_NUM_COMPONENT];///< array of transform skipping flags
+  UChar*         m_puhCbf[MAX_NUM_COMPONENT];          ///< array of coded block flags (CBF)
+  TComCUMvField  m_acCUMvField[NUM_REF_PIC_LIST_01];    ///< array of motion vectors.
+  TCoeff*        m_pcTrCoeff[MAX_NUM_COMPONENT];       ///< array of transform coefficient buffers (0->Y, 1->Cb, 2->Cr)
+#if ADAPTIVE_QP_SELECTION
+  TCoeff*        m_pcArlCoeff[MAX_NUM_COMPONENT];  // ARL coefficient buffer (0->Y, 1->Cb, 2->Cr)
+  static TCoeff* m_pcGlbArlCoeff[MAX_NUM_COMPONENT]; // global ARL buffer
+  Bool           m_ArlCoeffIsAliasedAllocation;  ///< ARL coefficient buffer is an alias of the global buffer and must not be free()'d
+#endif
+
+  Pel*           m_pcIPCMSample[MAX_NUM_COMPONENT];    ///< PCM sample buffer (0->Y, 1->Cb, 2->Cr)
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // neighbour access variables
+  // -------------------------------------------------------------------------------------------------------------------
+
+  TComDataCU*   m_pCtuAboveLeft;      ///< pointer of above-left CTU.
+  TComDataCU*   m_pCtuAboveRight;     ///< pointer of above-right CTU.
+  TComDataCU*   m_pCtuAbove;          ///< pointer of above CTU.
+  TComDataCU*   m_pCtuLeft;           ///< pointer of left CTU
+  TComDataCU*   m_apcCUColocated[NUM_REF_PIC_LIST_01];  ///< pointer of temporally colocated CU's for both directions
+  TComMvField   m_cMvFieldA;          ///< motion vector of position A
+  TComMvField   m_cMvFieldB;          ///< motion vector of position B
+  TComMvField   m_cMvFieldC;          ///< motion vector of position C
+  TComMv        m_cMvPred;            ///< motion vector predictor
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // coding tool information
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Bool*         m_pbMergeFlag;        ///< array of merge flags
+  UChar*        m_puhMergeIndex;      ///< array of merge candidate indices
+#if AMP_MRG
+  Bool          m_bIsMergeAMP;
+#endif
+  UChar*        m_puhIntraDir[MAX_NUM_CHANNEL_TYPE]; // 0-> Luma, 1-> Chroma
+  UChar*        m_puhInterDir;        ///< array of inter directions
+  Char*         m_apiMVPIdx[NUM_REF_PIC_LIST_01];       ///< array of motion vector predictor candidates
+  Char*         m_apiMVPNum[NUM_REF_PIC_LIST_01];       ///< array of number of possible motion vectors predictors
+  Bool*         m_pbIPCMFlag;         ///< array of intra_pcm flags
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // misc. variables
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Bool          m_bDecSubCu;          ///< indicates decoder-mode
+  Double        m_dTotalCost;         ///< sum of partition RD costs
+  Distortion    m_uiTotalDistortion;  ///< sum of partition distortion
+  UInt          m_uiTotalBits;        ///< sum of partition bits
+  UInt          m_uiTotalBins;        ///< sum of partition bins
+  Char          m_codedQP;
+  UChar*        m_explicitRdpcmMode[MAX_NUM_COMPONENT]; ///< Stores the explicit RDPCM mode for all TUs belonging to this CU
+
+protected:
+
+  /// add possible motion vector predictor candidates
+  Bool          xAddMVPCand           ( AMVPInfo* pInfo, RefPicList eRefPicList, Int iRefIdx, UInt uiPartUnitIdx, MVP_DIR eDir );
+  Bool          xAddMVPCandOrder      ( AMVPInfo* pInfo, RefPicList eRefPicList, Int iRefIdx, UInt uiPartUnitIdx, MVP_DIR eDir );
+
+  Void          deriveRightBottomIdx        ( UInt uiPartIdx, UInt& ruiPartIdxRB );
+  Bool          xGetColMVP( RefPicList eRefPicList, Int ctuRsAddr, Int uiPartUnitIdx, TComMv& rcMv, Int& riRefIdx );
+
+  /// compute required bits to encode MVD (used in AMVP)
+  UInt          xGetMvdBits           ( TComMv cMvd );
+  UInt          xGetComponentBits     ( Int iVal );
+
+  /// compute scaling factor from POC difference
+  Int           xGetDistScaleFactor   ( Int iCurrPOC, Int iCurrRefPOC, Int iColPOC, Int iColRefPOC );
+
+  Void xDeriveCenterIdx( UInt uiPartIdx, UInt& ruiPartIdxCenter );
+
+public:
+  TComDataCU();
+  virtual ~TComDataCU();
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // create / destroy / initialize / copy
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void          create                ( ChromaFormat chromaFormatIDC, UInt uiNumPartition, UInt uiWidth, UInt uiHeight, Bool bDecSubCu, Int unitSize
+#if ADAPTIVE_QP_SELECTION
+    , Bool bGlobalRMARLBuffer = false
+#endif
+    );
+  Void          destroy               ();
+
+  Void          initCtu               ( TComPic* pcPic, UInt ctuRsAddr );
+  Void          initEstData           ( const UInt uiDepth, const Int qp, const Bool bTransquantBypass );
+  Void          initSubCU             ( TComDataCU* pcCU, UInt uiPartUnitIdx, UInt uiDepth, Int qp );
+  Void          setOutsideCUPart      ( UInt uiAbsPartIdx, UInt uiDepth );
+
+  Void          copySubCU             ( TComDataCU* pcCU, UInt uiPartUnitIdx, UInt uiDepth );
+  Void          copyInterPredInfoFrom ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefPicList );
+  Void          copyPartFrom          ( TComDataCU* pcCU, UInt uiPartUnitIdx, UInt uiDepth );
+
+  Void          copyToPic             ( UChar uiDepth );
+  Void          copyToPic             ( UChar uiDepth, UInt uiPartIdx, UInt uiPartDepth );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for CU description
+  // -------------------------------------------------------------------------------------------------------------------
+
+  TComPic*        getPic              ()                        { return m_pcPic;           }
+  const TComPic*  getPic              () const                  { return m_pcPic;           }
+  TComSlice*       getSlice           ()                        { return m_pcSlice;         }
+  const TComSlice* getSlice           () const                  { return m_pcSlice;         }
+  UInt&         getCtuRsAddr          ()                        { return m_ctuRsAddr;       }
+  UInt          getCtuRsAddr          () const                  { return m_ctuRsAddr;       }
+  UInt          getZorderIdxInCtu     () const                  { return m_absZIdxInCtu;    }
+  UInt          getCUPelX             () const                  { return m_uiCUPelX;        }
+  UInt          getCUPelY             () const                  { return m_uiCUPelY;        }
+
+  UChar*        getDepth              ()                        { return m_puhDepth;        }
+  UChar         getDepth              ( UInt uiIdx ) const      { return m_puhDepth[uiIdx]; }
+  Void          setDepth              ( UInt uiIdx, UChar  uh ) { m_puhDepth[uiIdx] = uh;   }
+
+  Void          setDepthSubParts      ( UInt uiDepth, UInt uiAbsPartIdx );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for CU data
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Char*         getPartitionSize      ()                        { return m_pePartSize;        }
+  PartSize      getPartitionSize      ( UInt uiIdx )            { return static_cast<PartSize>( m_pePartSize[uiIdx] ); }
+  Void          setPartitionSize      ( UInt uiIdx, PartSize uh){ m_pePartSize[uiIdx] = uh;   }
+  Void          setPartSizeSubParts   ( PartSize eMode, UInt uiAbsPartIdx, UInt uiDepth );
+  Void          setCUTransquantBypassSubParts( Bool flag, UInt uiAbsPartIdx, UInt uiDepth );
+
+  Bool*         getSkipFlag            ()                        { return m_skipFlag;          }
+  Bool          getSkipFlag            (UInt idx)                { return m_skipFlag[idx];     }
+  Void          setSkipFlag           ( UInt idx, Bool skip)     { m_skipFlag[idx] = skip;   }
+  Void          setSkipFlagSubParts   ( Bool skip, UInt absPartIdx, UInt depth );
+
+  Char*         getPredictionMode     ()                        { return m_pePredMode;        }
+  PredMode      getPredictionMode     ( UInt uiIdx )            { return static_cast<PredMode>( m_pePredMode[uiIdx] ); }
+  Void          setPredictionMode     ( UInt uiIdx, PredMode uh){ m_pePredMode[uiIdx] = uh;   }
+  Void          setPredModeSubParts   ( PredMode eMode, UInt uiAbsPartIdx, UInt uiDepth );
+
+  Char*         getCrossComponentPredictionAlpha( ComponentID compID )             { return m_crossComponentPredictionAlpha[compID];         }
+  Char          getCrossComponentPredictionAlpha( UInt uiIdx, ComponentID compID ) { return m_crossComponentPredictionAlpha[compID][uiIdx];  }
+
+  Bool*         getCUTransquantBypass ()                        { return m_CUTransquantBypass;        }
+  Bool          getCUTransquantBypass( UInt uiIdx )             { return m_CUTransquantBypass[uiIdx]; }
+
+  UChar*        getWidth              ()                        { return m_puhWidth;          }
+  UChar         getWidth              ( UInt uiIdx )            { return m_puhWidth[uiIdx];   }
+  Void          setWidth              ( UInt uiIdx, UChar  uh ) { m_puhWidth[uiIdx] = uh;     }
+
+  UChar*        getHeight             ()                        { return m_puhHeight;         }
+  UChar         getHeight             ( UInt uiIdx )            { return m_puhHeight[uiIdx];  }
+  Void          setHeight             ( UInt uiIdx, UChar  uh ) { m_puhHeight[uiIdx] = uh;    }
+
+  Void          setSizeSubParts       ( UInt uiWidth, UInt uiHeight, UInt uiAbsPartIdx, UInt uiDepth );
+
+  Char*         getQP                 ()                        { return m_phQP;              }
+  Char          getQP                 ( UInt uiIdx ) const      { return m_phQP[uiIdx];       }
+  Void          setQP                 ( UInt uiIdx, Char value ){ m_phQP[uiIdx] =  value;     }
+  Void          setQPSubParts         ( Int qp,   UInt uiAbsPartIdx, UInt uiDepth );
+  Int           getLastValidPartIdx   ( Int iAbsPartIdx );
+  Char          getLastCodedQP        ( UInt uiAbsPartIdx );
+  Void          setQPSubCUs           ( Int qp, UInt absPartIdx, UInt depth, Bool &foundNonZeroCbf );
+  Void          setCodedQP            ( Char qp )               { m_codedQP = qp;             }
+  Char          getCodedQP            ()                        { return m_codedQP;           }
+
+  UChar*        getChromaQpAdj        ()                        { return m_ChromaQpAdj;       }
+  UChar         getChromaQpAdj        (Int idx)           const { return m_ChromaQpAdj[idx];  }
+  Void          setChromaQpAdj        (Int idx, UChar val)      { m_ChromaQpAdj[idx] = val;   }
+  Void          setChromaQpAdjSubParts( UChar val, Int absPartIdx, Int depth );
+  Void          setCodedChromaQpAdj   ( Char qp )               { m_codedChromaQpAdj = qp;    }
+  Char          getCodedChromaQpAdj   ()                        { return m_codedChromaQpAdj;  }
+
+  Bool          isLosslessCoded       ( UInt absPartIdx );
+
+  UChar*        getTransformIdx       ()                        { return m_puhTrIdx;          }
+  UChar         getTransformIdx       ( UInt uiIdx )            { return m_puhTrIdx[uiIdx];   }
+  Void          setTrIdxSubParts      ( UInt uiTrIdx, UInt uiAbsPartIdx, UInt uiDepth );
+
+  UChar*        getTransformSkip      ( ComponentID compID )    { return m_puhTransformSkip[compID];}
+  UChar         getTransformSkip      ( UInt uiIdx, ComponentID compID)    { return m_puhTransformSkip[compID][uiIdx];}
+  Void          setTransformSkipSubParts  ( UInt useTransformSkip, ComponentID compID, UInt uiAbsPartIdx, UInt uiDepth);
+  Void          setTransformSkipSubParts  ( const UInt useTransformSkip[MAX_NUM_COMPONENT], UInt uiAbsPartIdx, UInt uiDepth );
+
+  UChar*        getExplicitRdpcmMode      ( ComponentID component ) { return m_explicitRdpcmMode[component]; }
+  UChar         getExplicitRdpcmMode      ( ComponentID component, UInt partIdx ) {return m_explicitRdpcmMode[component][partIdx]; }
+  Void          setExplicitRdpcmModePartRange ( UInt rdpcmMode, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes );
+
+  Bool          isRDPCMEnabled         ( UInt uiAbsPartIdx )  { return getSlice()->getSPS()->getUseResidualDPCM(isIntra(uiAbsPartIdx) ? RDPCM_SIGNAL_IMPLICIT : RDPCM_SIGNAL_EXPLICIT); }
+
+  Void          setCrossComponentPredictionAlphaPartRange    ( Char alphaValue, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes );
+  Void          setTransformSkipPartRange                    ( UInt useTransformSkip, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes );
+
+  UInt          getQuadtreeTULog2MinSizeInCU( UInt uiIdx );
+
+  TComCUMvField* getCUMvField         ( RefPicList e )          { return  &m_acCUMvField[e];  }
+
+  TCoeff*       getCoeff              (ComponentID component)   { return m_pcTrCoeff[component]; }
+
+#if ADAPTIVE_QP_SELECTION
+  TCoeff*       getArlCoeff           ( ComponentID component ) { return m_pcArlCoeff[component]; }
+#endif
+  Pel*          getPCMSample          ( ComponentID component ) { return m_pcIPCMSample[component]; }
+
+  UChar         getCbf    ( UInt uiIdx, ComponentID eType )                  { return m_puhCbf[eType][uiIdx];  }
+  UChar*        getCbf    ( ComponentID eType )                              { return m_puhCbf[eType];         }
+  UChar         getCbf    ( UInt uiIdx, ComponentID eType, UInt uiTrDepth )  { return ( ( getCbf( uiIdx, eType ) >> uiTrDepth ) & 0x1 ); }
+  Void          setCbf    ( UInt uiIdx, ComponentID eType, UChar uh )        { m_puhCbf[eType][uiIdx] = uh;    }
+  Void          clearCbf  ( UInt uiIdx, ComponentID eType, UInt uiNumParts );
+  UChar         getQtRootCbf          ( UInt uiIdx );
+
+  Void          setCbfSubParts        ( const UInt uiCbf[MAX_NUM_COMPONENT],  UInt uiAbsPartIdx, UInt uiDepth           );
+  Void          setCbfSubParts        ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiDepth                    );
+  Void          setCbfSubParts        ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth    );
+
+  Void          setCbfPartRange       ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes      );
+  Void          bitwiseOrCbfPartRange ( UInt uiCbf, ComponentID compID, UInt uiAbsPartIdx, UInt uiCoveredPartIdxes      );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for coding tool information
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Bool*         getMergeFlag          ()                        { return m_pbMergeFlag;               }
+  Bool          getMergeFlag          ( UInt uiIdx )            { return m_pbMergeFlag[uiIdx];        }
+  Void          setMergeFlag          ( UInt uiIdx, Bool b )    { m_pbMergeFlag[uiIdx] = b;           }
+  Void          setMergeFlagSubParts  ( Bool bMergeFlag, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth );
+
+  UChar*        getMergeIndex         ()                        { return m_puhMergeIndex;                         }
+  UChar         getMergeIndex         ( UInt uiIdx )            { return m_puhMergeIndex[uiIdx];                  }
+  Void          setMergeIndex         ( UInt uiIdx, UInt uiMergeIndex ) { m_puhMergeIndex[uiIdx] = uiMergeIndex;  }
+  Void          setMergeIndexSubParts ( UInt uiMergeIndex, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth );
+  template <typename T>
+  Void          setSubPart            ( T bParameter, T* pbBaseCtu, UInt uiCUAddr, UInt uiCUDepth, UInt uiPUIdx );
+
+#if AMP_MRG
+  Void          setMergeAMP( Bool b )      { m_bIsMergeAMP = b; }
+  Bool          getMergeAMP( )             { return m_bIsMergeAMP; }
+#endif
+
+  UChar*        getIntraDir         ( const ChannelType channelType )                   const { return m_puhIntraDir[channelType];         }
+  UChar         getIntraDir         ( const ChannelType channelType, const UInt uiIdx ) const { return m_puhIntraDir[channelType][uiIdx];  }
+
+  Void          setIntraDirSubParts ( const ChannelType channelType,
+                                      const UInt uiDir,
+                                      const UInt uiAbsPartIdx,
+                                      const UInt uiDepth );
+
+  UChar*        getInterDir           ()                        { return m_puhInterDir;               }
+  UChar         getInterDir           ( UInt uiIdx )            { return m_puhInterDir[uiIdx];        }
+  Void          setInterDir           ( UInt uiIdx, UChar  uh ) { m_puhInterDir[uiIdx] = uh;          }
+  Void          setInterDirSubParts   ( UInt uiDir,  UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth );
+  Bool*         getIPCMFlag           ()                        { return m_pbIPCMFlag;               }
+  Bool          getIPCMFlag           (UInt uiIdx )             { return m_pbIPCMFlag[uiIdx];        }
+  Void          setIPCMFlag           (UInt uiIdx, Bool b )     { m_pbIPCMFlag[uiIdx] = b;           }
+  Void          setIPCMFlagSubParts   (Bool bIpcmFlag, UInt uiAbsPartIdx, UInt uiDepth);
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for accessing partition information
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void          getPartIndexAndSize   ( UInt uiPartIdx, UInt& ruiPartAddr, Int& riWidth, Int& riHeight );
+  UChar         getNumPartitions      ( const UInt uiAbsPartIdx = 0 );
+  Bool          isFirstAbsZorderIdxInDepth (UInt uiAbsPartIdx, UInt uiDepth);
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for motion vector
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void          getMvField            ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefPicList, TComMvField& rcMvField );
+
+  Void          fillMvpCand           ( UInt uiPartIdx, UInt uiPartAddr, RefPicList eRefPicList, Int iRefIdx, AMVPInfo* pInfo );
+  Bool          isDiffMER             ( Int xN, Int yN, Int xP, Int yP);
+  Void          getPartPosition       ( UInt partIdx, Int& xP, Int& yP, Int& nPSW, Int& nPSH);
+
+  Void          setMVPIdx             ( RefPicList eRefPicList, UInt uiIdx, Int iMVPIdx)  { m_apiMVPIdx[eRefPicList][uiIdx] = iMVPIdx;  }
+  Int           getMVPIdx             ( RefPicList eRefPicList, UInt uiIdx)               { return m_apiMVPIdx[eRefPicList][uiIdx];     }
+  Char*         getMVPIdx             ( RefPicList eRefPicList )                          { return m_apiMVPIdx[eRefPicList];            }
+
+  Void          setMVPNum             ( RefPicList eRefPicList, UInt uiIdx, Int iMVPNum ) { m_apiMVPNum[eRefPicList][uiIdx] = iMVPNum;  }
+  Int           getMVPNum             ( RefPicList eRefPicList, UInt uiIdx )              { return m_apiMVPNum[eRefPicList][uiIdx];     }
+  Char*         getMVPNum             ( RefPicList eRefPicList )                          { return m_apiMVPNum[eRefPicList];            }
+
+  Void          setMVPIdxSubParts     ( Int iMVPIdx, RefPicList eRefPicList, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth );
+  Void          setMVPNumSubParts     ( Int iMVPNum, RefPicList eRefPicList, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth );
+
+  Void          clipMv                ( TComMv&     rcMv     );
+  Void          getMvPredLeft         ( TComMv&     rcMvPred )   { rcMvPred = m_cMvFieldA.getMv(); }
+  Void          getMvPredAbove        ( TComMv&     rcMvPred )   { rcMvPred = m_cMvFieldB.getMv(); }
+  Void          getMvPredAboveRight   ( TComMv&     rcMvPred )   { rcMvPred = m_cMvFieldC.getMv(); }
+
+  Void          compressMV            ();
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // utility functions for neighbouring information
+  // -------------------------------------------------------------------------------------------------------------------
+
+  TComDataCU*   getCtuLeft                  () { return m_pCtuLeft;       }
+  TComDataCU*   getCtuAbove                 () { return m_pCtuAbove;      }
+  TComDataCU*   getCtuAboveLeft             () { return m_pCtuAboveLeft;  }
+  TComDataCU*   getCtuAboveRight            () { return m_pCtuAboveRight; }
+  TComDataCU*   getCUColocated              ( RefPicList eRefPicList ) { return m_apcCUColocated[eRefPicList]; }
+  Bool          CUIsFromSameSlice           ( const TComDataCU *pCU /* Can be NULL */) const { return ( pCU!=NULL && pCU->getSlice()->getSliceCurStartCtuTsAddr() == getSlice()->getSliceCurStartCtuTsAddr() ); }
+  Bool          CUIsFromSameTile            ( const TComDataCU *pCU /* Can be NULL */) const;
+  Bool          CUIsFromSameSliceAndTile    ( const TComDataCU *pCU /* Can be NULL */) const;
+  Bool          CUIsFromSameSliceTileAndWavefrontRow( const TComDataCU *pCU /* Can be NULL */) const;
+  Bool          isLastSubCUOfCtu(const UInt absPartIdx);
+
+
+  TComDataCU*   getPULeft                   ( UInt&  uiLPartUnitIdx,
+                                              UInt uiCurrPartUnitIdx,
+                                              Bool bEnforceSliceRestriction=true,
+                                              Bool bEnforceTileRestriction=true );
+  TComDataCU*   getPUAbove                  ( UInt&  uiAPartUnitIdx,
+                                              UInt uiCurrPartUnitIdx,
+                                              Bool bEnforceSliceRestriction=true,
+                                              Bool planarAtCTUBoundary = false,
+                                              Bool bEnforceTileRestriction=true );
+  TComDataCU*   getPUAboveLeft              ( UInt&  uiALPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction=true );
+  TComDataCU*   getPUAboveRight             ( UInt&  uiARPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction=true );
+  TComDataCU*   getPUBelowLeft              ( UInt&  uiBLPartUnitIdx, UInt uiCurrPartUnitIdx, Bool bEnforceSliceRestriction=true );
+
+  TComDataCU*   getQpMinCuLeft              ( UInt&  uiLPartUnitIdx , UInt uiCurrAbsIdxInCtu );
+  TComDataCU*   getQpMinCuAbove             ( UInt&  uiAPartUnitIdx , UInt uiCurrAbsIdxInCtu );
+  Char          getRefQP                    ( UInt   uiCurrAbsIdxInCtu                       );
+
+  TComDataCU*   getPUAboveRightAdi          ( UInt&  uiARPartUnitIdx, UInt uiCurrPartUnitIdx, UInt uiPartUnitOffset = 1, Bool bEnforceSliceRestriction=true );
+  TComDataCU*   getPUBelowLeftAdi           ( UInt&  uiBLPartUnitIdx, UInt uiCurrPartUnitIdx, UInt uiPartUnitOffset = 1, Bool bEnforceSliceRestriction=true );
+
+  Void          deriveLeftRightTopIdx       ( UInt uiPartIdx, UInt& ruiPartIdxLT, UInt& ruiPartIdxRT );
+  Void          deriveLeftBottomIdx         ( UInt uiPartIdx, UInt& ruiPartIdxLB );
+
+  Void          deriveLeftRightTopIdxAdi    ( UInt& ruiPartIdxLT, UInt& ruiPartIdxRT, UInt uiPartOffset, UInt uiPartDepth );
+  Void          deriveLeftBottomIdxAdi      ( UInt& ruiPartIdxLB, UInt  uiPartOffset, UInt uiPartDepth ); // NOTE: Unused function.
+
+  Bool          hasEqualMotion              ( UInt uiAbsPartIdx, TComDataCU* pcCandCU, UInt uiCandAbsPartIdx );
+  Void          getInterMergeCandidates       ( UInt uiAbsPartIdx, UInt uiPUIdx, TComMvField* pcMFieldNeighbours, UChar* puhInterDirNeighbours, Int& numValidMergeCand, Int mrgCandIdx = -1 );
+
+  Void          deriveLeftRightTopIdxGeneral  ( UInt uiAbsPartIdx, UInt uiPartIdx, UInt& ruiPartIdxLT, UInt& ruiPartIdxRT );
+  Void          deriveLeftBottomIdxGeneral    ( UInt uiAbsPartIdx, UInt uiPartIdx, UInt& ruiPartIdxLB );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for modes
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Bool          isIntra            ( UInt uiPartIdx )  const { return m_pePredMode[ uiPartIdx ] == MODE_INTRA;                                              }
+  Bool          isInter            ( UInt uiPartIdx )  const { return m_pePredMode[ uiPartIdx ] == MODE_INTER;                                              }
+  Bool          isSkipped          ( UInt uiPartIdx );                                                     ///< SKIP (no residual)
+  Bool          isBipredRestriction( UInt puIdx );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for symbol prediction (most probable / mode conversion)
+  // -------------------------------------------------------------------------------------------------------------------
+
+  UInt          getIntraSizeIdx                 ( UInt uiAbsPartIdx                                       );
+
+  Void          getAllowedChromaDir             ( UInt uiAbsPartIdx, UInt* uiModeList );
+  Int           getIntraDirPredictor            ( UInt uiAbsPartIdx, Int uiIntraDirPred[NUM_MOST_PROBABLE_MODES], const ComponentID compID, Int* piMode = NULL );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for SBAC context
+  // -------------------------------------------------------------------------------------------------------------------
+
+  UInt          getCtxSplitFlag                 ( UInt   uiAbsPartIdx, UInt uiDepth                   );
+  UInt          getCtxQtCbf                     ( TComTU &rTu, const ChannelType chType );
+
+  UInt          getCtxSkipFlag                  ( UInt   uiAbsPartIdx                                 );
+  UInt          getCtxInterDir                  ( UInt   uiAbsPartIdx                                 );
+
+  UInt&         getTotalBins            ()                            { return m_uiTotalBins;                              }
+  // -------------------------------------------------------------------------------------------------------------------
+  // member functions for RD cost storage
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Double&       getTotalCost()                  { return m_dTotalCost;        }
+  Distortion&   getTotalDistortion()            { return m_uiTotalDistortion; }
+  UInt&         getTotalBits()                  { return m_uiTotalBits;       }
+  UInt&         getTotalNumPart()               { return m_uiNumPartition;    }
+
+  UInt          getCoefScanIdx(const UInt uiAbsPartIdx, const UInt uiWidth, const UInt uiHeight, const ComponentID compID) const ;
+
+};
+
+namespace RasterAddress
+{
+  /** Check whether 2 addresses point to the same column
+   * \param addrA          First address in raster scan order
+   * \param addrB          Second address in raters scan order
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool isEqualCol( Int addrA, Int addrB, Int numUnitsPerRow )
+  {
+    // addrA % numUnitsPerRow == addrB % numUnitsPerRow
+    return (( addrA ^ addrB ) &  ( numUnitsPerRow - 1 ) ) == 0;
+  }
+
+  /** Check whether 2 addresses point to the same row
+   * \param addrA          First address in raster scan order
+   * \param addrB          Second address in raters scan order
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool isEqualRow( Int addrA, Int addrB, Int numUnitsPerRow )
+  {
+    // addrA / numUnitsPerRow == addrB / numUnitsPerRow
+    return (( addrA ^ addrB ) &~ ( numUnitsPerRow - 1 ) ) == 0;
+  }
+
+  /** Check whether 2 addresses point to the same row or column
+   * \param addrA          First address in raster scan order
+   * \param addrB          Second address in raters scan order
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool isEqualRowOrCol( Int addrA, Int addrB, Int numUnitsPerRow )
+  {
+    return isEqualCol( addrA, addrB, numUnitsPerRow ) | isEqualRow( addrA, addrB, numUnitsPerRow );
+  }
+
+  /** Check whether one address points to the first column
+   * \param addr           Address in raster scan order
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool isZeroCol( Int addr, Int numUnitsPerRow )
+  {
+    // addr % numUnitsPerRow == 0
+    return ( addr & ( numUnitsPerRow - 1 ) ) == 0;
+  }
+
+  /** Check whether one address points to the first row
+   * \param addr           Address in raster scan order
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool isZeroRow( Int addr, Int numUnitsPerRow )
+  {
+    // addr / numUnitsPerRow == 0
+    return ( addr &~ ( numUnitsPerRow - 1 ) ) == 0;
+  }
+
+  /** Check whether one address points to a column whose index is smaller than a given value
+   * \param addr           Address in raster scan order
+   * \param val            Given column index value
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool lessThanCol( Int addr, Int val, Int numUnitsPerRow )
+  {
+    // addr % numUnitsPerRow < val
+    return ( addr & ( numUnitsPerRow - 1 ) ) < val;
+  }
+
+  /** Check whether one address points to a row whose index is smaller than a given value
+   * \param addr           Address in raster scan order
+   * \param val            Given row index value
+   * \param numUnitsPerRow Number of units in a row
+   * \return Result of test
+   */
+  static inline Bool lessThanRow( Int addr, Int val, Int numUnitsPerRow )
+  {
+    // addr / numUnitsPerRow < val
+    return addr < val * numUnitsPerRow;
+  }
+}
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComInterpolationFilter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,383 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Implementation of TComInterpolationFilter class
+ */
+
+// ====================================================================================================================
+// Includes
+// ====================================================================================================================
+
+#include "TComRom.h"
+#include "TComInterpolationFilter.h"
+#include <assert.h>
+
+#include "TComChromaFormat.h"
+
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Tables
+// ====================================================================================================================
+
+const TFilterCoeff TComInterpolationFilter::m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_LUMA] =
+{
+  {  0, 0,   0, 64,  0,   0, 0,  0 },
+  { -1, 4, -10, 58, 17,  -5, 1,  0 },
+  { -1, 4, -11, 40, 40, -11, 4, -1 },
+  {  0, 1,  -5, 17, 58, -10, 4, -1 }
+};
+
+const TFilterCoeff TComInterpolationFilter::m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_CHROMA] =
+{
+  {  0, 64,  0,  0 },
+  { -2, 58, 10, -2 },
+  { -4, 54, 16, -2 },
+  { -6, 46, 28, -4 },
+  { -4, 36, 36, -4 },
+  { -4, 28, 46, -6 },
+  { -2, 16, 54, -4 },
+  { -2, 10, 58, -2 }
+};
+
+// ====================================================================================================================
+// Private member functions
+// ====================================================================================================================
+
+/**
+ * \brief Apply unit FIR filter to a block of samples
+ *
+ * \param bitDepth   bitDepth of samples
+ * \param src        Pointer to source samples
+ * \param srcStride  Stride of source samples
+ * \param dst        Pointer to destination samples
+ * \param dstStride  Stride of destination samples
+ * \param width      Width of block
+ * \param height     Height of block
+ * \param isFirst    Flag indicating whether it is the first filtering operation
+ * \param isLast     Flag indicating whether it is the last filtering operation
+ */
+Void TComInterpolationFilter::filterCopy(Int bitDepth, const Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Bool isFirst, Bool isLast)
+{
+  Int row, col;
+
+  if ( isFirst == isLast )
+  {
+    for (row = 0; row < height; row++)
+    {
+      for (col = 0; col < width; col++)
+      {
+        dst[col] = src[col];
+      }
+
+      src += srcStride;
+      dst += dstStride;
+    }
+  }
+  else if ( isFirst )
+  {
+    const Int shift = std::max<Int>(2, (IF_INTERNAL_PREC - bitDepth));
+
+    for (row = 0; row < height; row++)
+    {
+      for (col = 0; col < width; col++)
+      {
+        Pel val = leftShift_round(src[col], shift);
+        dst[col] = val - (Pel)IF_INTERNAL_OFFS;
+      }
+
+      src += srcStride;
+      dst += dstStride;
+    }
+  }
+  else
+  {
+    const Int shift = std::max<Int>(2, (IF_INTERNAL_PREC - bitDepth));
+
+    Pel maxVal = (1 << bitDepth) - 1;
+    Pel minVal = 0;
+    for (row = 0; row < height; row++)
+    {
+      for (col = 0; col < width; col++)
+      {
+        Pel val = src[ col ];
+        val = rightShift_round((val + IF_INTERNAL_OFFS), shift);
+        if (val < minVal) val = minVal;
+        if (val > maxVal) val = maxVal;
+        dst[col] = val;
+      }
+
+      src += srcStride;
+      dst += dstStride;
+    }
+  }
+}
+
+/**
+ * \brief Apply FIR filter to a block of samples
+ *
+ * \tparam N          Number of taps
+ * \tparam isVertical Flag indicating filtering along vertical direction
+ * \tparam isFirst    Flag indicating whether it is the first filtering operation
+ * \tparam isLast     Flag indicating whether it is the last filtering operation
+ * \param  bitDepth   Bit depth of samples
+ * \param  src        Pointer to source samples
+ * \param  srcStride  Stride of source samples
+ * \param  dst        Pointer to destination samples
+ * \param  dstStride  Stride of destination samples
+ * \param  width      Width of block
+ * \param  height     Height of block
+ * \param  coeff      Pointer to filter taps
+ */
+template<Int N, Bool isVertical, Bool isFirst, Bool isLast>
+Void TComInterpolationFilter::filter(Int bitDepth, Pel const *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, TFilterCoeff const *coeff)
+{
+  Int row, col;
+
+  Pel c[8];
+  c[0] = coeff[0];
+  c[1] = coeff[1];
+  if ( N >= 4 )
+  {
+    c[2] = coeff[2];
+    c[3] = coeff[3];
+  }
+  if ( N >= 6 )
+  {
+    c[4] = coeff[4];
+    c[5] = coeff[5];
+  }
+  if ( N == 8 )
+  {
+    c[6] = coeff[6];
+    c[7] = coeff[7];
+  }
+
+  Int cStride = ( isVertical ) ? srcStride : 1;
+  src -= ( N/2 - 1 ) * cStride;
+
+  Int offset;
+  Pel maxVal;
+  Int headRoom = std::max<Int>(2, (IF_INTERNAL_PREC - bitDepth));
+  Int shift    = IF_FILTER_PREC;
+  // with the current settings (IF_INTERNAL_PREC = 14 and IF_FILTER_PREC = 6), though headroom can be
+  // negative for bit depths greater than 14, shift will remain non-negative for bit depths of 8->20
+  assert(shift >= 0);
+
+  if ( isLast )
+  {
+    shift += (isFirst) ? 0 : headRoom;
+    offset = 1 << (shift - 1);
+    offset += (isFirst) ? 0 : IF_INTERNAL_OFFS << IF_FILTER_PREC;
+    maxVal = (1 << bitDepth) - 1;
+  }
+  else
+  {
+    shift -= (isFirst) ? headRoom : 0;
+    offset = (isFirst) ? -IF_INTERNAL_OFFS << shift : 0;
+    maxVal = 0;
+  }
+
+  for (row = 0; row < height; row++)
+  {
+    for (col = 0; col < width; col++)
+    {
+      Int sum;
+
+      sum  = src[ col + 0 * cStride] * c[0];
+      sum += src[ col + 1 * cStride] * c[1];
+      if ( N >= 4 )
+      {
+        sum += src[ col + 2 * cStride] * c[2];
+        sum += src[ col + 3 * cStride] * c[3];
+      }
+      if ( N >= 6 )
+      {
+        sum += src[ col + 4 * cStride] * c[4];
+        sum += src[ col + 5 * cStride] * c[5];
+      }
+      if ( N == 8 )
+      {
+        sum += src[ col + 6 * cStride] * c[6];
+        sum += src[ col + 7 * cStride] * c[7];
+      }
+
+      Pel val = ( sum + offset ) >> shift;
+      if ( isLast )
+      {
+        val = ( val < 0 ) ? 0 : val;
+        val = ( val > maxVal ) ? maxVal : val;
+      }
+      dst[col] = val;
+    }
+
+    src += srcStride;
+    dst += dstStride;
+  }
+}
+
+/**
+ * \brief Filter a block of samples (horizontal)
+ *
+ * \tparam N          Number of taps
+ * \param  bitDepth   Bit depth of samples
+ * \param  src        Pointer to source samples
+ * \param  srcStride  Stride of source samples
+ * \param  dst        Pointer to destination samples
+ * \param  dstStride  Stride of destination samples
+ * \param  width      Width of block
+ * \param  height     Height of block
+ * \param  isLast     Flag indicating whether it is the last filtering operation
+ * \param  coeff      Pointer to filter taps
+ */
+template<Int N>
+Void TComInterpolationFilter::filterHor(Int bitDepth, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Bool isLast, TFilterCoeff const *coeff)
+{
+  if ( isLast )
+  {
+    filter<N, false, true, true>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+  else
+  {
+    filter<N, false, true, false>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+}
+
+/**
+ * \brief Filter a block of samples (vertical)
+ *
+ * \tparam N          Number of taps
+ * \param  src        Pointer to source samples
+ * \param  srcStride  Stride of source samples
+ * \param  dst        Pointer to destination samples
+ * \param  dstStride  Stride of destination samples
+ * \param  width      Width of block
+ * \param  height     Height of block
+ * \param  isFirst    Flag indicating whether it is the first filtering operation
+ * \param  isLast     Flag indicating whether it is the last filtering operation
+ * \param  coeff      Pointer to filter taps
+ */
+template<Int N>
+Void TComInterpolationFilter::filterVer(Int bitDepth, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Bool isFirst, Bool isLast, TFilterCoeff const *coeff)
+{
+  if ( isFirst && isLast )
+  {
+    filter<N, true, true, true>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+  else if ( isFirst && !isLast )
+  {
+    filter<N, true, true, false>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+  else if ( !isFirst && isLast )
+  {
+    filter<N, true, false, true>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+  else
+  {
+    filter<N, true, false, false>(bitDepth, src, srcStride, dst, dstStride, width, height, coeff);
+  }
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/**
+ * \brief Filter a block of Luma/Chroma samples (horizontal)
+ *
+ * \param  src        Pointer to source samples
+ * \param  srcStride  Stride of source samples
+ * \param  dst        Pointer to destination samples
+ * \param  dstStride  Stride of destination samples
+ * \param  width      Width of block
+ * \param  height     Height of block
+ * \param  frac       Fractional sample offset
+ * \param  isLast     Flag indicating whether it is the last filtering operation
+ */
+Void TComInterpolationFilter::filterHor(const ComponentID compID, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Int frac, Bool isLast, const ChromaFormat fmt )
+{
+  if ( frac == 0 )
+  {
+    filterCopy(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, true, isLast );
+  }
+  else if (isLuma(compID))
+  {
+    assert(frac >= 0 && frac < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS);
+    filterHor<NTAPS_LUMA>(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, isLast, m_lumaFilter[frac]);
+  }
+  else
+  {
+    const UInt csx = getComponentScaleX(compID, fmt);
+    assert(frac >=0 && csx<2 && (frac<<(1-csx)) < CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS);
+    filterHor<NTAPS_CHROMA>(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, isLast, m_chromaFilter[frac<<(1-csx)]);
+  }
+}
+
+
+/**
+ * \brief Filter a block of Luma/Chroma samples (vertical)
+ *
+ * \param  src        Pointer to source samples
+ * \param  srcStride  Stride of source samples
+ * \param  dst        Pointer to destination samples
+ * \param  dstStride  Stride of destination samples
+ * \param  width      Width of block
+ * \param  height     Height of block
+ * \param  frac       Fractional sample offset
+ * \param  isFirst    Flag indicating whether it is the first filtering operation
+ * \param  isLast     Flag indicating whether it is the last filtering operation
+ */
+Void TComInterpolationFilter::filterVer(const ComponentID compID, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Int frac, Bool isFirst, Bool isLast, const ChromaFormat fmt )
+{
+  if ( frac == 0 )
+  {
+    filterCopy(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, isFirst, isLast );
+  }
+  else if (isLuma(compID))
+  {
+    assert(frac >= 0 && frac < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS);
+    filterVer<NTAPS_LUMA>(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, isFirst, isLast, m_lumaFilter[frac]);
+  }
+  else
+  {
+    const UInt csy = getComponentScaleY(compID, fmt);
+    assert(frac >=0 && csy<2 && (frac<<(1-csy)) < CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS);
+    filterVer<NTAPS_CHROMA>(g_bitDepth[toChannelType(compID)], src, srcStride, dst, dstStride, width, height, isFirst, isLast, m_chromaFilter[frac<<(1-csy)]);
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComInterpolationFilter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,81 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Declaration of TComInterpolationFilter class
+ */
+
+#ifndef __TCOMINTERPOLATIONFILTER__
+#define __TCOMINTERPOLATIONFILTER__
+
+#include "TypeDef.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+#define NTAPS_LUMA        8 ///< Number of taps for luma
+#define NTAPS_CHROMA      4 ///< Number of taps for chroma
+#define IF_INTERNAL_PREC 14 ///< Number of bits for internal precision
+#define IF_FILTER_PREC    6 ///< Log2 of sum of filter taps
+#define IF_INTERNAL_OFFS (1<<(IF_INTERNAL_PREC-1)) ///< Offset used internally
+
+/**
+ * \brief Interpolation filter class
+ */
+class TComInterpolationFilter
+{
+  static const TFilterCoeff m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_LUMA];     ///< Luma filter taps
+  static const TFilterCoeff m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_CHROMA]; ///< Chroma filter taps
+
+  static Void filterCopy(Int bitDepth, const Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Bool isFirst, Bool isLast);
+
+  template<Int N, Bool isVertical, Bool isFirst, Bool isLast>
+  static Void filter(Int bitDepth, Pel const *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, TFilterCoeff const *coeff);
+
+  template<Int N>
+  static Void filterHor(Int bitDepth, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height,               Bool isLast, TFilterCoeff const *coeff);
+  template<Int N>
+  static Void filterVer(Int bitDepth, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Bool isFirst, Bool isLast, TFilterCoeff const *coeff);
+
+public:
+  TComInterpolationFilter() {}
+  ~TComInterpolationFilter() {}
+
+  Void filterHor(const ComponentID compID, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Int frac,               Bool isLast, const ChromaFormat fmt );
+  Void filterVer(const ComponentID compID, Pel *src, Int srcStride, Pel *dst, Int dstStride, Int width, Int height, Int frac, Bool isFirst, Bool isLast, const ChromaFormat fmt );
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComList.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,115 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComList.h
+    \brief    general list class (header)
+*/
+
+#ifndef __TCOMLIST__
+#define __TCOMLIST__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include <list>
+#include <assert.h>
+#include "CommonDef.h"
+
+#include <cstdlib>
+using namespace std;
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// list template
+template< class C >
+class TComList : public std::list< C > // NOTE: should not inherit from STL classes
+{
+public:
+  typedef typename std::list<C>::iterator TComIterator;
+
+  TComList& operator += ( const TComList& rcTComList)
+  {
+    if( ! rcTComList.empty() )
+    {
+      insert( this->end(), rcTComList.begin(), rcTComList.end());
+    }
+    return *this;
+  } // leszek
+
+  C popBack()
+  {
+    C cT = this->back();
+    this->pop_back();
+    return cT;
+  }
+
+  C popFront()
+  {
+    C cT = this->front();
+    this->pop_front();
+    return cT;
+  }
+
+  Void pushBack( const C& rcT )
+  {
+    /*assert( sizeof(C) == 4);*/
+    if( rcT != NULL )
+    {
+      this->push_back( rcT);
+    }
+  }
+
+  Void pushFront( const C& rcT )
+  {
+    /*assert( sizeof(C) == 4);*/
+    if( rcT != NULL )
+    {
+      this->push_front( rcT);
+    }
+  }
+
+  TComIterator find( const C& rcT ) // leszek
+  {
+    return std::list< C >::find( this->begin(), this->end(), rcT );
+  }
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComLoopFilter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,923 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComLoopFilter.cpp
+    \brief    deblocking filter
+*/
+
+#include "TComLoopFilter.h"
+#include "TComSlice.h"
+#include "TComMv.h"
+#include "TComTU.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+//#define   EDGE_VER    0
+//#define   EDGE_HOR    1
+
+#define DEFAULT_INTRA_TC_OFFSET 2 ///< Default intra TC offset
+
+// ====================================================================================================================
+// Tables
+// ====================================================================================================================
+
+const UChar TComLoopFilter::sm_tcTable[MAX_QP + 1 + DEFAULT_INTRA_TC_OFFSET] =
+{
+  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,5,5,6,6,7,8,9,10,11,13,14,16,18,20,22,24
+};
+
+const UChar TComLoopFilter::sm_betaTable[MAX_QP + 1] =
+{
+  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64
+};
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TComLoopFilter::TComLoopFilter()
+: m_uiNumPartitions(0)
+, m_bLFCrossTileBoundary(true)
+{
+  for( Int edgeDir = 0; edgeDir < NUM_EDGE_DIR; edgeDir++ )
+  {
+    m_aapucBS       [edgeDir] = NULL;
+    m_aapbEdgeFilter[edgeDir] = NULL;
+  }
+}
+
+TComLoopFilter::~TComLoopFilter()
+{
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+Void TComLoopFilter::setCfg( Bool bLFCrossTileBoundary )
+{
+  m_bLFCrossTileBoundary = bLFCrossTileBoundary;
+}
+
+Void TComLoopFilter::create( UInt uiMaxCUDepth )
+{
+  destroy();
+  m_uiNumPartitions = 1 << ( uiMaxCUDepth<<1 );
+  for( Int edgeDir = 0; edgeDir < NUM_EDGE_DIR; edgeDir++ )
+  {
+    m_aapucBS       [edgeDir] = new UChar[m_uiNumPartitions];
+    m_aapbEdgeFilter[edgeDir] = new Bool [m_uiNumPartitions];
+  }
+}
+
+Void TComLoopFilter::destroy()
+{
+  for( Int edgeDir = 0; edgeDir < NUM_EDGE_DIR; edgeDir++ )
+  {
+    if (m_aapucBS[edgeDir] != NULL)
+    {
+      delete [] m_aapucBS[edgeDir];
+      m_aapucBS[edgeDir] = NULL;
+    }
+
+    if (m_aapbEdgeFilter[edgeDir])
+    {
+      delete [] m_aapbEdgeFilter[edgeDir];
+      m_aapbEdgeFilter[edgeDir] = NULL;
+    }
+  }
+}
+
+/**
+ - call deblocking function for every CU
+ .
+ \param  pcPic   picture class (TComPic) pointer
+ */
+Void TComLoopFilter::loopFilterPic( TComPic* pcPic )
+{
+  // Horizontal filtering
+  for ( UInt ctuRsAddr = 0; ctuRsAddr < pcPic->getNumberOfCtusInFrame(); ctuRsAddr++ )
+  {
+    TComDataCU* pCtu = pcPic->getCtu( ctuRsAddr );
+
+    ::memset( m_aapucBS       [EDGE_VER], 0, sizeof( UChar ) * m_uiNumPartitions );
+    ::memset( m_aapbEdgeFilter[EDGE_VER], 0, sizeof( Bool  ) * m_uiNumPartitions );
+
+    // CU-based deblocking
+    xDeblockCU( pCtu, 0, 0, EDGE_VER );
+  }
+
+  // Vertical filtering
+  for ( UInt ctuRsAddr = 0; ctuRsAddr < pcPic->getNumberOfCtusInFrame(); ctuRsAddr++ )
+  {
+    TComDataCU* pCtu = pcPic->getCtu( ctuRsAddr );
+
+    ::memset( m_aapucBS       [EDGE_HOR], 0, sizeof( UChar ) * m_uiNumPartitions );
+    ::memset( m_aapbEdgeFilter[EDGE_HOR], 0, sizeof( Bool  ) * m_uiNumPartitions );
+
+    // CU-based deblocking
+    xDeblockCU( pCtu, 0, 0, EDGE_HOR );
+  }
+}
+
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+/**
+ - Deblocking filter process in CU-based (the same function as conventional's)
+ .
+ \param Edge          the direction of the edge in block boundary (horizonta/vertical), which is added newly
+*/
+Void TComLoopFilter::xDeblockCU( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir )
+{
+  if(pcCU->getPic()==0||pcCU->getPartitionSize(uiAbsZorderIdx)==NUMBER_OF_PART_SIZES)
+  {
+    return;
+  }
+  TComPic* pcPic     = pcCU->getPic();
+  UInt uiCurNumParts = pcPic->getNumPartitionsInCtu() >> (uiDepth<<1);
+  UInt uiQNumParts   = uiCurNumParts>>2;
+
+  if( pcCU->getDepth(uiAbsZorderIdx) > uiDepth )
+  {
+    for ( UInt uiPartIdx = 0; uiPartIdx < 4; uiPartIdx++, uiAbsZorderIdx+=uiQNumParts )
+    {
+      UInt uiLPelX   = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsZorderIdx] ];
+      UInt uiTPelY   = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsZorderIdx] ];
+      if( ( uiLPelX < pcCU->getSlice()->getSPS()->getPicWidthInLumaSamples() ) && ( uiTPelY < pcCU->getSlice()->getSPS()->getPicHeightInLumaSamples() ) )
+      {
+        xDeblockCU( pcCU, uiAbsZorderIdx, uiDepth+1, edgeDir );
+      }
+    }
+    return;
+  }
+
+  xSetLoopfilterParam( pcCU, uiAbsZorderIdx );
+  TComTURecurse tuRecurse(pcCU, uiAbsZorderIdx);
+  xSetEdgefilterTU   ( tuRecurse );
+  xSetEdgefilterPU   ( pcCU, uiAbsZorderIdx );
+
+  for( UInt uiPartIdx = uiAbsZorderIdx; uiPartIdx < uiAbsZorderIdx + uiCurNumParts; uiPartIdx++ )
+  {
+    UInt uiBSCheck;
+    if( (g_uiMaxCUWidth >> g_uiMaxCUDepth) == 4 )
+    {
+      uiBSCheck = (edgeDir == EDGE_VER && uiPartIdx%2 == 0) || (edgeDir == EDGE_HOR && (uiPartIdx-((uiPartIdx>>2)<<2))/2 == 0);
+    }
+    else
+    {
+      uiBSCheck = 1;
+    }
+
+    if ( m_aapbEdgeFilter[edgeDir][uiPartIdx] && uiBSCheck )
+    {
+      xGetBoundaryStrengthSingle ( pcCU, edgeDir, uiPartIdx );
+    }
+  }
+
+  UInt uiPelsInPart = g_uiMaxCUWidth >> g_uiMaxCUDepth;
+  UInt PartIdxIncr = DEBLOCK_SMALLEST_BLOCK / uiPelsInPart ? DEBLOCK_SMALLEST_BLOCK / uiPelsInPart : 1 ;
+
+  UInt uiSizeInPU = pcPic->getNumPartInCtuWidth()>>(uiDepth);
+  const ChromaFormat chFmt=pcPic->getChromaFormat();
+  const UInt shiftFactor  = edgeDir == EDGE_VER ? pcPic->getComponentScaleX(COMPONENT_Cb) : pcPic->getComponentScaleY(COMPONENT_Cb);
+  const Bool bAlwaysDoChroma=chFmt==CHROMA_444;
+
+  for ( Int iEdge = 0; iEdge < uiSizeInPU ; iEdge+=PartIdxIncr)
+  {
+    xEdgeFilterLuma     ( pcCU, uiAbsZorderIdx, uiDepth, edgeDir, iEdge );
+    if ( chFmt!=CHROMA_400 && (bAlwaysDoChroma ||
+                               (uiPelsInPart>DEBLOCK_SMALLEST_BLOCK) ||
+                               (iEdge % ( (DEBLOCK_SMALLEST_BLOCK<<shiftFactor)/uiPelsInPart ) ) == 0
+                              )
+       )
+    {
+      xEdgeFilterChroma   ( pcCU, uiAbsZorderIdx, uiDepth, edgeDir, iEdge );
+    }
+  }
+}
+
+Void TComLoopFilter::xSetEdgefilterMultiple( TComDataCU*    pcCU,
+                                             UInt           uiAbsZorderIdx,
+                                             UInt           uiDepth,
+                                             DeblockEdgeDir edgeDir,
+                                             Int            iEdgeIdx,
+                                             Bool           bValue,
+                                             UInt           uiWidthInBaseUnits,
+                                             UInt           uiHeightInBaseUnits,
+                                             const TComRectangle *rect)
+{
+  if ( uiWidthInBaseUnits == 0 )
+  {
+    uiWidthInBaseUnits  = pcCU->getPic()->getNumPartInCtuWidth () >> uiDepth;
+  }
+  if ( uiHeightInBaseUnits == 0 )
+  {
+    uiHeightInBaseUnits = pcCU->getPic()->getNumPartInCtuHeight() >> uiDepth;
+  }
+  const UInt uiNumElem = edgeDir == EDGE_VER ? uiHeightInBaseUnits : uiWidthInBaseUnits;
+  assert( uiNumElem > 0 );
+  assert( uiWidthInBaseUnits > 0 );
+  assert( uiHeightInBaseUnits > 0 );
+  for( UInt ui = 0; ui < uiNumElem; ui++ )
+  {
+    const UInt uiBsIdx = xCalcBsIdx( pcCU, uiAbsZorderIdx, edgeDir, iEdgeIdx, ui, rect );
+    m_aapbEdgeFilter[edgeDir][uiBsIdx] = bValue;
+    if (iEdgeIdx == 0)
+    {
+      m_aapucBS[edgeDir][uiBsIdx] = bValue;
+    }
+  }
+}
+
+Void TComLoopFilter::xSetEdgefilterTU(  TComTU &rTu )
+{
+  TComDataCU* pcCU  = rTu.getCU();
+  UInt uiTransDepthTotal = rTu.GetTransformDepthTotal();
+
+  if( pcCU->getTransformIdx( rTu.GetAbsPartIdxTU() ) + pcCU->getDepth( rTu.GetAbsPartIdxTU()) > uiTransDepthTotal )
+  {
+    TComTURecurse tuChild(rTu, false);
+    do
+    {
+      xSetEdgefilterTU( tuChild );
+    } while (tuChild.nextSection(rTu));
+    return;
+  }
+
+  const TComRectangle &rect = rTu.getRect(COMPONENT_Y);
+
+  const UInt uiWidthInBaseUnits  = rect.width / (g_uiMaxCUWidth >> g_uiMaxCUDepth);
+  const UInt uiHeightInBaseUnits = rect.height / (g_uiMaxCUWidth >> g_uiMaxCUDepth);
+
+  xSetEdgefilterMultiple( pcCU, rTu.GetAbsPartIdxCU(), uiTransDepthTotal, EDGE_VER, 0, m_stLFCUParam.bInternalEdge, uiWidthInBaseUnits, uiHeightInBaseUnits, &rect );
+  xSetEdgefilterMultiple( pcCU, rTu.GetAbsPartIdxCU(), uiTransDepthTotal, EDGE_HOR, 0, m_stLFCUParam.bInternalEdge, uiWidthInBaseUnits, uiHeightInBaseUnits, &rect );
+}
+
+Void TComLoopFilter::xSetEdgefilterPU( TComDataCU* pcCU, UInt uiAbsZorderIdx )
+{
+  const UInt uiDepth = pcCU->getDepth( uiAbsZorderIdx );
+  const UInt uiWidthInBaseUnits  = pcCU->getPic()->getNumPartInCtuWidth () >> uiDepth;
+  const UInt uiHeightInBaseUnits = pcCU->getPic()->getNumPartInCtuHeight() >> uiDepth;
+  const UInt uiHWidthInBaseUnits  = uiWidthInBaseUnits  >> 1;
+  const UInt uiHHeightInBaseUnits = uiHeightInBaseUnits >> 1;
+  const UInt uiQWidthInBaseUnits  = uiWidthInBaseUnits  >> 2;
+  const UInt uiQHeightInBaseUnits = uiHeightInBaseUnits >> 2;
+
+  xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_VER, 0, m_stLFCUParam.bLeftEdge );
+  xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_HOR, 0, m_stLFCUParam.bTopEdge );
+
+  switch ( pcCU->getPartitionSize( uiAbsZorderIdx ) )
+  {
+    case SIZE_2Nx2N:
+    {
+      break;
+    }
+    case SIZE_2NxN:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_HOR, uiHHeightInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_Nx2N:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_VER, uiHWidthInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_NxN:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_VER, uiHWidthInBaseUnits, m_stLFCUParam.bInternalEdge );
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_HOR, uiHHeightInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_2NxnU:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_HOR, uiQHeightInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_2NxnD:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_HOR, uiHeightInBaseUnits - uiQHeightInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_nLx2N:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_VER, uiQWidthInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    case SIZE_nRx2N:
+    {
+      xSetEdgefilterMultiple( pcCU, uiAbsZorderIdx, uiDepth, EDGE_VER, uiWidthInBaseUnits - uiQWidthInBaseUnits, m_stLFCUParam.bInternalEdge );
+      break;
+    }
+    default:
+    {
+      break;
+    }
+  }
+}
+
+
+Void TComLoopFilter::xSetLoopfilterParam( TComDataCU* pcCU, UInt uiAbsZorderIdx )
+{
+  UInt uiX           = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[ uiAbsZorderIdx ] ];
+  UInt uiY           = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[ uiAbsZorderIdx ] ];
+
+  TComDataCU* pcTempCU;
+  UInt        uiTempPartIdx;
+
+  m_stLFCUParam.bInternalEdge = ! pcCU->getSlice()->getDeblockingFilterDisable();
+
+  if ( (uiX == 0) || pcCU->getSlice()->getDeblockingFilterDisable() )
+  {
+    m_stLFCUParam.bLeftEdge = false;
+  }
+  else
+  {
+    m_stLFCUParam.bLeftEdge = true;
+  }
+  if ( m_stLFCUParam.bLeftEdge )
+  {
+    pcTempCU = pcCU->getPULeft( uiTempPartIdx, uiAbsZorderIdx, !pcCU->getSlice()->getLFCrossSliceBoundaryFlag(), !m_bLFCrossTileBoundary);
+
+    if ( pcTempCU != NULL )
+    {
+      m_stLFCUParam.bLeftEdge = true;
+    }
+    else
+    {
+      m_stLFCUParam.bLeftEdge = false;
+    }
+  }
+
+  if ( (uiY == 0 ) || pcCU->getSlice()->getDeblockingFilterDisable() )
+  {
+    m_stLFCUParam.bTopEdge = false;
+  }
+  else
+  {
+    m_stLFCUParam.bTopEdge = true;
+  }
+  if ( m_stLFCUParam.bTopEdge )
+  {
+    pcTempCU = pcCU->getPUAbove( uiTempPartIdx, uiAbsZorderIdx, !pcCU->getSlice()->getLFCrossSliceBoundaryFlag(), false, !m_bLFCrossTileBoundary);
+
+    if ( pcTempCU != NULL )
+    {
+      m_stLFCUParam.bTopEdge = true;
+    }
+    else
+    {
+      m_stLFCUParam.bTopEdge = false;
+    }
+  }
+}
+
+Void TComLoopFilter::xGetBoundaryStrengthSingle ( TComDataCU* pCtu, DeblockEdgeDir edgeDir, UInt uiAbsPartIdx4x4BlockWithinCtu )
+{
+  TComSlice * const pcSlice = pCtu->getSlice();
+
+  const Bool lfCrossSliceBoundaryFlag=pCtu->getSlice()->getLFCrossSliceBoundaryFlag();
+
+  const UInt uiPartQ = uiAbsPartIdx4x4BlockWithinCtu;
+  TComDataCU* const pcCUQ = pCtu;
+
+  UInt uiPartP;
+  TComDataCU* pcCUP;
+  UInt uiBs = 0;
+
+  //-- Calculate Block Index
+  if (edgeDir == EDGE_VER)
+  {
+    pcCUP = pcCUQ->getPULeft(uiPartP, uiPartQ, !lfCrossSliceBoundaryFlag, !m_bLFCrossTileBoundary);
+  }
+  else  // (edgeDir == EDGE_HOR)
+  {
+    pcCUP = pcCUQ->getPUAbove(uiPartP, uiPartQ, !pCtu->getSlice()->getLFCrossSliceBoundaryFlag(), false, !m_bLFCrossTileBoundary);
+  }
+
+  //-- Set BS for Intra MB : BS = 4 or 3
+  if ( pcCUP->isIntra(uiPartP) || pcCUQ->isIntra(uiPartQ) )
+  {
+    uiBs = 2;
+  }
+
+  //-- Set BS for not Intra MB : BS = 2 or 1 or 0
+  if ( !pcCUP->isIntra(uiPartP) && !pcCUQ->isIntra(uiPartQ) )
+  {
+    UInt nsPartQ = uiPartQ;
+    UInt nsPartP = uiPartP;
+
+    if ( m_aapucBS[edgeDir][uiAbsPartIdx4x4BlockWithinCtu] && (pcCUQ->getCbf( nsPartQ, COMPONENT_Y, pcCUQ->getTransformIdx(nsPartQ)) != 0 || pcCUP->getCbf( nsPartP, COMPONENT_Y, pcCUP->getTransformIdx(nsPartP) ) != 0) )
+    {
+      uiBs = 1;
+    }
+    else
+    {
+      if (edgeDir == EDGE_HOR)
+      {
+        pcCUP = pcCUQ->getPUAbove(uiPartP, uiPartQ, !pCtu->getSlice()->getLFCrossSliceBoundaryFlag(), false, !m_bLFCrossTileBoundary);
+      }
+      if (pcSlice->isInterB() || pcCUP->getSlice()->isInterB())
+      {
+        Int iRefIdx;
+        TComPic *piRefP0, *piRefP1, *piRefQ0, *piRefQ1;
+        iRefIdx = pcCUP->getCUMvField(REF_PIC_LIST_0)->getRefIdx(uiPartP);
+        piRefP0 = (iRefIdx < 0) ? NULL : pcCUP->getSlice()->getRefPic(REF_PIC_LIST_0, iRefIdx);
+        iRefIdx = pcCUP->getCUMvField(REF_PIC_LIST_1)->getRefIdx(uiPartP);
+        piRefP1 = (iRefIdx < 0) ? NULL : pcCUP->getSlice()->getRefPic(REF_PIC_LIST_1, iRefIdx);
+        iRefIdx = pcCUQ->getCUMvField(REF_PIC_LIST_0)->getRefIdx(uiPartQ);
+        piRefQ0 = (iRefIdx < 0) ? NULL : pcSlice->getRefPic(REF_PIC_LIST_0, iRefIdx);
+        iRefIdx = pcCUQ->getCUMvField(REF_PIC_LIST_1)->getRefIdx(uiPartQ);
+        piRefQ1 = (iRefIdx < 0) ? NULL : pcSlice->getRefPic(REF_PIC_LIST_1, iRefIdx);
+
+        TComMv pcMvP0 = pcCUP->getCUMvField(REF_PIC_LIST_0)->getMv(uiPartP);
+        TComMv pcMvP1 = pcCUP->getCUMvField(REF_PIC_LIST_1)->getMv(uiPartP);
+        TComMv pcMvQ0 = pcCUQ->getCUMvField(REF_PIC_LIST_0)->getMv(uiPartQ);
+        TComMv pcMvQ1 = pcCUQ->getCUMvField(REF_PIC_LIST_1)->getMv(uiPartQ);
+
+        if (piRefP0 == NULL) pcMvP0.setZero();
+        if (piRefP1 == NULL) pcMvP1.setZero();
+        if (piRefQ0 == NULL) pcMvQ0.setZero();
+        if (piRefQ1 == NULL) pcMvQ1.setZero();
+
+        if ( ((piRefP0==piRefQ0)&&(piRefP1==piRefQ1)) || ((piRefP0==piRefQ1)&&(piRefP1==piRefQ0)) )
+        {
+          if ( piRefP0 != piRefP1 )   // Different L0 & L1
+          {
+            if ( piRefP0 == piRefQ0 )
+            {
+              uiBs  = ((abs(pcMvQ0.getHor() - pcMvP0.getHor()) >= 4) ||
+                       (abs(pcMvQ0.getVer() - pcMvP0.getVer()) >= 4) ||
+                       (abs(pcMvQ1.getHor() - pcMvP1.getHor()) >= 4) ||
+                       (abs(pcMvQ1.getVer() - pcMvP1.getVer()) >= 4)) ? 1 : 0;
+            }
+            else
+            {
+              uiBs  = ((abs(pcMvQ1.getHor() - pcMvP0.getHor()) >= 4) ||
+                       (abs(pcMvQ1.getVer() - pcMvP0.getVer()) >= 4) ||
+                       (abs(pcMvQ0.getHor() - pcMvP1.getHor()) >= 4) ||
+                       (abs(pcMvQ0.getVer() - pcMvP1.getVer()) >= 4)) ? 1 : 0;
+            }
+          }
+          else    // Same L0 & L1
+          {
+            uiBs  = ((abs(pcMvQ0.getHor() - pcMvP0.getHor()) >= 4) ||
+                     (abs(pcMvQ0.getVer() - pcMvP0.getVer()) >= 4) ||
+                     (abs(pcMvQ1.getHor() - pcMvP1.getHor()) >= 4) ||
+                     (abs(pcMvQ1.getVer() - pcMvP1.getVer()) >= 4)) &&
+                    ((abs(pcMvQ1.getHor() - pcMvP0.getHor()) >= 4) ||
+                     (abs(pcMvQ1.getVer() - pcMvP0.getVer()) >= 4) ||
+                     (abs(pcMvQ0.getHor() - pcMvP1.getHor()) >= 4) ||
+                     (abs(pcMvQ0.getVer() - pcMvP1.getVer()) >= 4)) ? 1 : 0;
+          }
+        }
+        else // for all different Ref_Idx
+        {
+          uiBs = 1;
+        }
+      }
+      else  // pcSlice->isInterP()
+      {
+        Int iRefIdx;
+        TComPic *piRefP0, *piRefQ0;
+        iRefIdx = pcCUP->getCUMvField(REF_PIC_LIST_0)->getRefIdx(uiPartP);
+        piRefP0 = (iRefIdx < 0) ? NULL : pcCUP->getSlice()->getRefPic(REF_PIC_LIST_0, iRefIdx);
+        iRefIdx = pcCUQ->getCUMvField(REF_PIC_LIST_0)->getRefIdx(uiPartQ);
+        piRefQ0 = (iRefIdx < 0) ? NULL : pcSlice->getRefPic(REF_PIC_LIST_0, iRefIdx);
+        TComMv pcMvP0 = pcCUP->getCUMvField(REF_PIC_LIST_0)->getMv(uiPartP);
+        TComMv pcMvQ0 = pcCUQ->getCUMvField(REF_PIC_LIST_0)->getMv(uiPartQ);
+
+        if (piRefP0 == NULL) pcMvP0.setZero();
+        if (piRefQ0 == NULL) pcMvQ0.setZero();
+
+        uiBs  = ((piRefP0 != piRefQ0) ||
+                 (abs(pcMvQ0.getHor() - pcMvP0.getHor()) >= 4) ||
+                 (abs(pcMvQ0.getVer() - pcMvP0.getVer()) >= 4)) ? 1 : 0;
+      }
+    }   // enf of "if( one of BCBP == 0 )"
+  }   // enf of "if( not Intra )"
+
+  m_aapucBS[edgeDir][uiAbsPartIdx4x4BlockWithinCtu] = uiBs;
+}
+
+
+Void TComLoopFilter::xEdgeFilterLuma( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir, Int iEdge  )
+{
+  TComPicYuv* pcPicYuvRec = pcCU->getPic()->getPicYuvRec();
+  Pel* piSrc    = pcPicYuvRec->getAddr(COMPONENT_Y, pcCU->getCtuRsAddr(), uiAbsZorderIdx );
+  Pel* piTmpSrc = piSrc;
+
+  const Bool lfCrossSliceBoundaryFlag=pcCU->getSlice()->getLFCrossSliceBoundaryFlag();
+
+  Int  iStride = pcPicYuvRec->getStride(COMPONENT_Y);
+  Int iQP = 0;
+  Int iQP_P = 0;
+  Int iQP_Q = 0;
+  UInt uiNumParts = pcCU->getPic()->getNumPartInCtuWidth()>>uiDepth;
+
+  UInt  uiPelsInPart = g_uiMaxCUWidth >> g_uiMaxCUDepth;
+  UInt  uiBsAbsIdx = 0, uiBs = 0;
+  Int   iOffset, iSrcStep;
+
+  Bool  bPCMFilter = (pcCU->getSlice()->getSPS()->getUsePCM() && pcCU->getSlice()->getSPS()->getPCMFilterDisableFlag())? true : false;
+  Bool  bPartPNoFilter = false;
+  Bool  bPartQNoFilter = false;
+  UInt  uiPartPIdx = 0;
+  UInt  uiPartQIdx = 0;
+  TComDataCU* pcCUP = pcCU;
+  TComDataCU* pcCUQ = pcCU;
+  Int  betaOffsetDiv2 = pcCUQ->getSlice()->getDeblockingFilterBetaOffsetDiv2();
+  Int  tcOffsetDiv2 = pcCUQ->getSlice()->getDeblockingFilterTcOffsetDiv2();
+
+  if (edgeDir == EDGE_VER)
+  {
+    iOffset = 1;
+    iSrcStep = iStride;
+    piTmpSrc += iEdge*uiPelsInPart;
+  }
+  else  // (edgeDir == EDGE_HOR)
+  {
+    iOffset = iStride;
+    iSrcStep = 1;
+    piTmpSrc += iEdge*uiPelsInPart*iStride;
+  }
+
+  for ( UInt iIdx = 0; iIdx < uiNumParts; iIdx++ )
+  {
+    uiBsAbsIdx = xCalcBsIdx( pcCU, uiAbsZorderIdx, edgeDir, iEdge, iIdx);
+    uiBs = m_aapucBS[edgeDir][uiBsAbsIdx];
+    if ( uiBs )
+    {
+      iQP_Q = pcCU->getQP( uiBsAbsIdx );
+      uiPartQIdx = uiBsAbsIdx;
+      // Derive neighboring PU index
+      if (edgeDir == EDGE_VER)
+      {
+        pcCUP = pcCUQ->getPULeft (uiPartPIdx, uiPartQIdx,!lfCrossSliceBoundaryFlag, !m_bLFCrossTileBoundary);
+      }
+      else  // (iDir == EDGE_HOR)
+      {
+        pcCUP = pcCUQ->getPUAbove(uiPartPIdx, uiPartQIdx,!pcCU->getSlice()->getLFCrossSliceBoundaryFlag(), false, !m_bLFCrossTileBoundary);
+      }
+
+      iQP_P = pcCUP->getQP(uiPartPIdx);
+      iQP = (iQP_P + iQP_Q + 1) >> 1;
+      Int iBitdepthScale = 1 << (g_bitDepth[CHANNEL_TYPE_LUMA]-8);
+
+      Int iIndexTC = Clip3(0, MAX_QP+DEFAULT_INTRA_TC_OFFSET, Int(iQP + DEFAULT_INTRA_TC_OFFSET*(uiBs-1) + (tcOffsetDiv2 << 1)));
+      Int iIndexB = Clip3(0, MAX_QP, iQP + (betaOffsetDiv2 << 1));
+
+      Int iTc =  sm_tcTable[iIndexTC]*iBitdepthScale;
+      Int iBeta = sm_betaTable[iIndexB]*iBitdepthScale;
+      Int iSideThreshold = (iBeta+(iBeta>>1))>>3;
+      Int iThrCut = iTc*10;
+
+
+      UInt  uiBlocksInPart = uiPelsInPart / 4 ? uiPelsInPart / 4 : 1;
+      for (UInt iBlkIdx = 0; iBlkIdx<uiBlocksInPart; iBlkIdx ++)
+      {
+        Int dp0 = xCalcDP( piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+0), iOffset);
+        Int dq0 = xCalcDQ( piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+0), iOffset);
+        Int dp3 = xCalcDP( piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+3), iOffset);
+        Int dq3 = xCalcDQ( piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+3), iOffset);
+        Int d0 = dp0 + dq0;
+        Int d3 = dp3 + dq3;
+
+        Int dp = dp0 + dp3;
+        Int dq = dq0 + dq3;
+        Int d =  d0 + d3;
+
+        if (bPCMFilter || pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+        {
+          // Check if each of PUs is I_PCM with LF disabling
+          bPartPNoFilter = (bPCMFilter && pcCUP->getIPCMFlag(uiPartPIdx));
+          bPartQNoFilter = (bPCMFilter && pcCUQ->getIPCMFlag(uiPartQIdx));
+
+          // check if each of PUs is lossless coded
+          bPartPNoFilter = bPartPNoFilter || (pcCUP->isLosslessCoded(uiPartPIdx) );
+          bPartQNoFilter = bPartQNoFilter || (pcCUQ->isLosslessCoded(uiPartQIdx) );
+        }
+
+        if (d < iBeta)
+        {
+          Bool bFilterP = (dp < iSideThreshold);
+          Bool bFilterQ = (dq < iSideThreshold);
+
+          Bool sw =  xUseStrongFiltering( iOffset, 2*d0, iBeta, iTc, piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+0))
+          && xUseStrongFiltering( iOffset, 2*d3, iBeta, iTc, piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+3));
+
+          for ( Int i = 0; i < DEBLOCK_SMALLEST_BLOCK/2; i++)
+          {
+            xPelFilterLuma( piTmpSrc+iSrcStep*(iIdx*uiPelsInPart+iBlkIdx*4+i), iOffset, iTc, sw, bPartPNoFilter, bPartQNoFilter, iThrCut, bFilterP, bFilterQ);
+          }
+        }
+      }
+    }
+  }
+}
+
+
+Void TComLoopFilter::xEdgeFilterChroma( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir, Int iEdge )
+{
+  TComPicYuv* pcPicYuvRec = pcCU->getPic()->getPicYuvRec();
+  Int         iStride     = pcPicYuvRec->getStride(COMPONENT_Cb);
+  Pel*        piSrcCb     = pcPicYuvRec->getAddr( COMPONENT_Cb, pcCU->getCtuRsAddr(), uiAbsZorderIdx );
+  Pel*        piSrcCr     = pcPicYuvRec->getAddr( COMPONENT_Cr, pcCU->getCtuRsAddr(), uiAbsZorderIdx );
+  Int iQP = 0;
+  Int iQP_P = 0;
+  Int iQP_Q = 0;
+
+  UInt  uiPelsInPartChromaH = g_uiMaxCUWidth >> (g_uiMaxCUDepth+pcPicYuvRec->getComponentScaleX(COMPONENT_Cb));
+  UInt  uiPelsInPartChromaV = g_uiMaxCUWidth >> (g_uiMaxCUDepth+pcPicYuvRec->getComponentScaleY(COMPONENT_Cb));
+
+  Int   iOffset, iSrcStep;
+  UInt  uiLoopLength;
+
+  const UInt uiCtuWidthInBaseUnits = pcCU->getPic()->getNumPartInCtuWidth();
+
+  Bool  bPCMFilter = (pcCU->getSlice()->getSPS()->getUsePCM() && pcCU->getSlice()->getSPS()->getPCMFilterDisableFlag())? true : false;
+  Bool  bPartPNoFilter = false;
+  Bool  bPartQNoFilter = false;
+  TComDataCU* pcCUQ = pcCU;
+  Int tcOffsetDiv2 = pcCU->getSlice()->getDeblockingFilterTcOffsetDiv2();
+
+  // Vertical Position
+  UInt uiEdgeNumInCtuVert = g_auiZscanToRaster[uiAbsZorderIdx]%uiCtuWidthInBaseUnits + iEdge;
+  UInt uiEdgeNumInCtuHor = g_auiZscanToRaster[uiAbsZorderIdx]/uiCtuWidthInBaseUnits + iEdge;
+
+  if ( (uiPelsInPartChromaH < DEBLOCK_SMALLEST_BLOCK) && (uiPelsInPartChromaV < DEBLOCK_SMALLEST_BLOCK) &&
+       (
+         ( (uiEdgeNumInCtuVert%(DEBLOCK_SMALLEST_BLOCK/uiPelsInPartChromaH)) && (edgeDir==EDGE_VER) ) ||
+         ( (uiEdgeNumInCtuHor %(DEBLOCK_SMALLEST_BLOCK/uiPelsInPartChromaV)) && (edgeDir==EDGE_HOR) )
+       )
+     )
+  {
+    return;
+  }
+
+
+  const Bool lfCrossSliceBoundaryFlag=pcCU->getSlice()->getLFCrossSliceBoundaryFlag();
+
+  UInt  uiNumParts = pcCU->getPic()->getNumPartInCtuWidth()>>uiDepth;
+
+  UInt  uiBsAbsIdx;
+  UChar ucBs;
+
+  Pel* piTmpSrcCb = piSrcCb;
+  Pel* piTmpSrcCr = piSrcCr;
+
+  if (edgeDir == EDGE_VER)
+  {
+    iOffset   = 1;
+    iSrcStep  = iStride;
+    piTmpSrcCb += iEdge*uiPelsInPartChromaH;
+    piTmpSrcCr += iEdge*uiPelsInPartChromaH;
+    uiLoopLength=uiPelsInPartChromaV;
+  }
+  else  // (edgeDir == EDGE_HOR)
+  {
+    iOffset   = iStride;
+    iSrcStep  = 1;
+    piTmpSrcCb += iEdge*iStride*uiPelsInPartChromaV;
+    piTmpSrcCr += iEdge*iStride*uiPelsInPartChromaV;
+    uiLoopLength=uiPelsInPartChromaH;
+  }
+
+  for ( UInt iIdx = 0; iIdx < uiNumParts; iIdx++ )
+  {
+    uiBsAbsIdx = xCalcBsIdx( pcCU, uiAbsZorderIdx, edgeDir, iEdge, iIdx);
+    ucBs = m_aapucBS[edgeDir][uiBsAbsIdx];
+
+    if ( ucBs > 1)
+    {
+      iQP_Q = pcCU->getQP( uiBsAbsIdx );
+      UInt  uiPartQIdx = uiBsAbsIdx;
+      // Derive neighboring PU index
+      TComDataCU* pcCUP;
+      UInt  uiPartPIdx;
+
+      if (edgeDir == EDGE_VER)
+      {
+        pcCUP = pcCUQ->getPULeft (uiPartPIdx, uiPartQIdx,!lfCrossSliceBoundaryFlag, !m_bLFCrossTileBoundary);
+      }
+      else  // (edgeDir == EDGE_HOR)
+      {
+        pcCUP = pcCUQ->getPUAbove(uiPartPIdx, uiPartQIdx,!pcCU->getSlice()->getLFCrossSliceBoundaryFlag(), false, !m_bLFCrossTileBoundary);
+      }
+
+      iQP_P = pcCUP->getQP(uiPartPIdx);
+
+      if (bPCMFilter || pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+      {
+        // Check if each of PUs is I_PCM with LF disabling
+        bPartPNoFilter = (bPCMFilter && pcCUP->getIPCMFlag(uiPartPIdx));
+        bPartQNoFilter = (bPCMFilter && pcCUQ->getIPCMFlag(uiPartQIdx));
+
+        // check if each of PUs is lossless coded
+        bPartPNoFilter = bPartPNoFilter || (pcCUP->isLosslessCoded(uiPartPIdx));
+        bPartQNoFilter = bPartQNoFilter || (pcCUQ->isLosslessCoded(uiPartQIdx));
+      }
+
+      for ( UInt chromaIdx = 0; chromaIdx < 2; chromaIdx++ )
+      {
+        Int chromaQPOffset  = pcCU->getSlice()->getPPS()->getQpOffset(ComponentID(chromaIdx + 1));
+        Pel* piTmpSrcChroma = (chromaIdx == 0) ? piTmpSrcCb : piTmpSrcCr;
+
+        iQP = ((iQP_P + iQP_Q + 1) >> 1) + chromaQPOffset;
+        if (iQP >= chromaQPMappingTableSize)
+        {
+          if (pcPicYuvRec->getChromaFormat()==CHROMA_420) iQP -=6;
+          else if (iQP>51) iQP=51;
+        }
+        else if (iQP >= 0 )
+        {
+          iQP = getScaledChromaQP(iQP, pcPicYuvRec->getChromaFormat());
+        }
+
+        Int iBitdepthScale = 1 << (g_bitDepth[CHANNEL_TYPE_CHROMA]-8);
+
+        Int iIndexTC = Clip3(0, MAX_QP+DEFAULT_INTRA_TC_OFFSET, iQP + DEFAULT_INTRA_TC_OFFSET*(ucBs - 1) + (tcOffsetDiv2 << 1));
+        Int iTc =  sm_tcTable[iIndexTC]*iBitdepthScale;
+
+        for ( UInt uiStep = 0; uiStep < uiLoopLength; uiStep++ )
+        {
+          xPelFilterChroma( piTmpSrcChroma + iSrcStep*(uiStep+iIdx*uiLoopLength), iOffset, iTc , bPartPNoFilter, bPartQNoFilter);
+        }
+      }
+    }
+  }
+}
+
+/**
+ - Deblocking for the luminance component with strong or weak filter
+ .
+ \param piSrc           pointer to picture data
+ \param iOffset         offset value for picture data
+ \param tc              tc value
+ \param sw              decision strong/weak filter
+ \param bPartPNoFilter  indicator to disable filtering on partP
+ \param bPartQNoFilter  indicator to disable filtering on partQ
+ \param iThrCut         threshold value for weak filter decision
+ \param bFilterSecondP  decision weak filter/no filter for partP
+ \param bFilterSecondQ  decision weak filter/no filter for partQ
+*/
+__inline Void TComLoopFilter::xPelFilterLuma( Pel* piSrc, Int iOffset, Int tc, Bool sw, Bool bPartPNoFilter, Bool bPartQNoFilter, Int iThrCut, Bool bFilterSecondP, Bool bFilterSecondQ)
+{
+  Int delta;
+
+  Pel m4  = piSrc[0];
+  Pel m3  = piSrc[-iOffset];
+  Pel m5  = piSrc[ iOffset];
+  Pel m2  = piSrc[-iOffset*2];
+  Pel m6  = piSrc[ iOffset*2];
+  Pel m1  = piSrc[-iOffset*3];
+  Pel m7  = piSrc[ iOffset*3];
+  Pel m0  = piSrc[-iOffset*4];
+
+  if (sw)
+  {
+    piSrc[-iOffset]   = Clip3(m3-2*tc, m3+2*tc, ((m1 + 2*m2 + 2*m3 + 2*m4 + m5 + 4) >> 3));
+    piSrc[0]          = Clip3(m4-2*tc, m4+2*tc, ((m2 + 2*m3 + 2*m4 + 2*m5 + m6 + 4) >> 3));
+    piSrc[-iOffset*2] = Clip3(m2-2*tc, m2+2*tc, ((m1 + m2 + m3 + m4 + 2)>>2));
+    piSrc[ iOffset]   = Clip3(m5-2*tc, m5+2*tc, ((m3 + m4 + m5 + m6 + 2)>>2));
+    piSrc[-iOffset*3] = Clip3(m1-2*tc, m1+2*tc, ((2*m0 + 3*m1 + m2 + m3 + m4 + 4 )>>3));
+    piSrc[ iOffset*2] = Clip3(m6-2*tc, m6+2*tc, ((m3 + m4 + m5 + 3*m6 + 2*m7 +4 )>>3));
+  }
+  else
+  {
+    /* Weak filter */
+    delta = (9*(m4-m3) -3*(m5-m2) + 8)>>4 ;
+
+    if ( abs(delta) < iThrCut )
+    {
+      delta = Clip3(-tc, tc, delta);
+      piSrc[-iOffset] = Clip((m3+delta), CHANNEL_TYPE_LUMA);
+      piSrc[0] = Clip((m4-delta), CHANNEL_TYPE_LUMA);
+
+      Int tc2 = tc>>1;
+      if(bFilterSecondP)
+      {
+        Int delta1 = Clip3(-tc2, tc2, (( ((m1+m3+1)>>1)- m2+delta)>>1));
+        piSrc[-iOffset*2] = Clip((m2+delta1), CHANNEL_TYPE_LUMA);
+      }
+      if(bFilterSecondQ)
+      {
+        Int delta2 = Clip3(-tc2, tc2, (( ((m6+m4+1)>>1)- m5-delta)>>1));
+        piSrc[ iOffset] = Clip((m5+delta2), CHANNEL_TYPE_LUMA);
+      }
+    }
+  }
+
+  if(bPartPNoFilter)
+  {
+    piSrc[-iOffset] = m3;
+    piSrc[-iOffset*2] = m2;
+    piSrc[-iOffset*3] = m1;
+  }
+  if(bPartQNoFilter)
+  {
+    piSrc[0] = m4;
+    piSrc[ iOffset] = m5;
+    piSrc[ iOffset*2] = m6;
+  }
+}
+
+/**
+ - Deblocking of one line/column for the chrominance component
+ .
+ \param piSrc           pointer to picture data
+ \param iOffset         offset value for picture data
+ \param tc              tc value
+ \param bPartPNoFilter  indicator to disable filtering on partP
+ \param bPartQNoFilter  indicator to disable filtering on partQ
+ */
+__inline Void TComLoopFilter::xPelFilterChroma( Pel* piSrc, Int iOffset, Int tc, Bool bPartPNoFilter, Bool bPartQNoFilter)
+{
+  Int delta;
+
+  Pel m4  = piSrc[0];
+  Pel m3  = piSrc[-iOffset];
+  Pel m5  = piSrc[ iOffset];
+  Pel m2  = piSrc[-iOffset*2];
+
+  delta = Clip3(-tc,tc, (((( m4 - m3 ) << 2 ) + m2 - m5 + 4 ) >> 3) );
+  piSrc[-iOffset] = Clip((m3+delta), CHANNEL_TYPE_CHROMA);
+  piSrc[0] = Clip((m4-delta), CHANNEL_TYPE_CHROMA);
+
+  if(bPartPNoFilter)
+  {
+    piSrc[-iOffset] = m3;
+  }
+  if(bPartQNoFilter)
+  {
+    piSrc[0] = m4;
+  }
+}
+
+/**
+ - Decision between strong and weak filter
+ .
+ \param offset         offset value for picture data
+ \param d               d value
+ \param beta            beta value
+ \param tc              tc value
+ \param piSrc           pointer to picture data
+ */
+__inline Bool TComLoopFilter::xUseStrongFiltering( Int offset, Int d, Int beta, Int tc, Pel* piSrc)
+{
+  Pel m4  = piSrc[0];
+  Pel m3  = piSrc[-offset];
+  Pel m7  = piSrc[ offset*3];
+  Pel m0  = piSrc[-offset*4];
+
+  Int d_strong = abs(m0-m3) + abs(m7-m4);
+
+  return ( (d_strong < (beta>>3)) && (d<(beta>>2)) && ( abs(m3-m4) < ((tc*5+1)>>1)) );
+}
+
+__inline Int TComLoopFilter::xCalcDP( Pel* piSrc, Int iOffset)
+{
+  return abs( piSrc[-iOffset*3] - 2*piSrc[-iOffset*2] + piSrc[-iOffset] ) ;
+}
+
+__inline Int TComLoopFilter::xCalcDQ( Pel* piSrc, Int iOffset)
+{
+  return abs( piSrc[0] - 2*piSrc[iOffset] + piSrc[iOffset*2] );
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComLoopFilter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,143 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComLoopFilter.h
+    \brief    deblocking filter (header)
+*/
+
+#ifndef __TCOMLOOPFILTER__
+#define __TCOMLOOPFILTER__
+
+#include "CommonDef.h"
+#include "TComPic.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+#define DEBLOCK_SMALLEST_BLOCK  8
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// deblocking filter class
+class TComLoopFilter
+{
+private:
+
+  UInt      m_uiNumPartitions;
+  UChar*    m_aapucBS[NUM_EDGE_DIR];         ///< Bs for [Ver/Hor][Y/U/V][Blk_Idx]
+  Bool*     m_aapbEdgeFilter[NUM_EDGE_DIR];
+  LFCUParam m_stLFCUParam;                   ///< status structure
+
+  Bool      m_bLFCrossTileBoundary;
+
+protected:
+  /// CU-level deblocking function
+  Void xDeblockCU                 ( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir );
+
+  // set / get functions
+  Void xSetLoopfilterParam        ( TComDataCU* pcCU, UInt uiAbsZorderIdx );
+  // filtering functions
+  Void xSetEdgefilterTU           ( TComTU &rTu );
+  Void xSetEdgefilterPU           ( TComDataCU* pcCU, UInt uiAbsZorderIdx );
+  Void xGetBoundaryStrengthSingle ( TComDataCU* pCtu, DeblockEdgeDir edgeDir, UInt uiPartIdx );
+  UInt xCalcBsIdx                 ( TComDataCU* pcCU, UInt absZIdxInCtu, DeblockEdgeDir edgeDir, Int iEdgeIdx, Int iBaseUnitIdx, const struct TComRectangle *rect=NULL )
+  {
+    TComPic* const pcPic = pcCU->getPic();
+    const UInt ctuWidthInBaseUnits = pcPic->getNumPartInCtuWidth();
+    Int rasterOffsetTU=0;
+    if (rect != NULL)
+    {
+      const UInt minCuWidth =pcPic->getMinCUWidth();
+      const UInt minCuHeight=pcPic->getMinCUHeight();
+      rasterOffsetTU = rect->x0/minCuWidth + (rect->y0/minCuHeight)*ctuWidthInBaseUnits;
+    }
+    if( edgeDir == EDGE_VER )
+    {
+      return g_auiRasterToZscan[g_auiZscanToRaster[absZIdxInCtu] + iBaseUnitIdx * ctuWidthInBaseUnits + iEdgeIdx + rasterOffsetTU ];
+    }
+    else
+    {
+      return g_auiRasterToZscan[g_auiZscanToRaster[absZIdxInCtu] + iEdgeIdx * ctuWidthInBaseUnits + iBaseUnitIdx + rasterOffsetTU ];
+    }
+  }
+
+  Void xSetEdgefilterMultiple( TComDataCU* pcCU,
+                               UInt uiAbsZorderIdx,
+                               UInt uiDepth,
+                               DeblockEdgeDir edgeDir,
+                               Int iEdgeIdx,
+                               Bool bValue,
+                               UInt uiWidthInBaseUnits = 0,
+                               UInt uiHeightInBaseUnits = 0,
+                               const TComRectangle *rect = 0
+                               );
+
+  Void xEdgeFilterLuma            ( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir, Int iEdge );
+  Void xEdgeFilterChroma          ( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, DeblockEdgeDir edgeDir, Int iEdge );
+
+  __inline Void xPelFilterLuma( Pel* piSrc, Int iOffset, Int tc, Bool sw, Bool bPartPNoFilter, Bool bPartQNoFilter, Int iThrCut, Bool bFilterSecondP, Bool bFilterSecondQ);
+  __inline Void xPelFilterChroma( Pel* piSrc, Int iOffset, Int tc, Bool bPartPNoFilter, Bool bPartQNoFilter);
+
+
+  __inline Bool xUseStrongFiltering( Int offset, Int d, Int beta, Int tc, Pel* piSrc);
+  __inline Int xCalcDP( Pel* piSrc, Int iOffset);
+  __inline Int xCalcDQ( Pel* piSrc, Int iOffset);
+
+  static const UChar sm_tcTable[54];
+  static const UChar sm_betaTable[52];
+
+public:
+  TComLoopFilter();
+  virtual ~TComLoopFilter();
+
+  Void  create                    ( UInt uiMaxCUDepth );
+  Void  destroy                   ();
+
+  /// set configuration
+  Void setCfg( Bool bLFCrossTileBoundary );
+
+  /// picture-level deblocking filter
+  Void loopFilterPic( TComPic* pcPic );
+
+  static Int getBeta( Int qp )
+  {
+    Int indexB = Clip3( 0, MAX_QP, qp );
+    return sm_betaTable[ indexB ];
+  }
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComMotionInfo.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,351 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComMotionInfo.cpp
+    \brief    motion information handling classes
+*/
+
+#include <memory.h>
+#include "TComMotionInfo.h"
+#include "assert.h"
+#include <stdlib.h>
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+// --------------------------------------------------------------------------------------------------------------------
+// Create / destroy
+// --------------------------------------------------------------------------------------------------------------------
+
+Void TComCUMvField::create( UInt uiNumPartition )
+{
+  assert(m_pcMv     == NULL);
+  assert(m_pcMvd    == NULL);
+  assert(m_piRefIdx == NULL);
+
+  m_pcMv     = new TComMv[ uiNumPartition ];
+  m_pcMvd    = new TComMv[ uiNumPartition ];
+  m_piRefIdx = new Char  [ uiNumPartition ];
+
+  m_uiNumPartition = uiNumPartition;
+}
+
+Void TComCUMvField::destroy()
+{
+  assert(m_pcMv     != NULL);
+  assert(m_pcMvd    != NULL);
+  assert(m_piRefIdx != NULL);
+
+  delete[] m_pcMv;
+  delete[] m_pcMvd;
+  delete[] m_piRefIdx;
+
+  m_pcMv     = NULL;
+  m_pcMvd    = NULL;
+  m_piRefIdx = NULL;
+
+  m_uiNumPartition = 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Clear / copy
+// --------------------------------------------------------------------------------------------------------------------
+
+Void TComCUMvField::clearMvField()
+{
+  for ( Int i = 0; i < m_uiNumPartition; i++ )
+  {
+    m_pcMv [ i ].setZero();
+    m_pcMvd[ i ].setZero();
+  }
+  assert( sizeof( *m_piRefIdx ) == 1 );
+  memset( m_piRefIdx, NOT_VALID, m_uiNumPartition * sizeof( *m_piRefIdx ) );
+}
+
+Void TComCUMvField::copyFrom( TComCUMvField const * pcCUMvFieldSrc, Int iNumPartSrc, Int iPartAddrDst )
+{
+  Int iSizeInTComMv = sizeof( TComMv ) * iNumPartSrc;
+
+  memcpy( m_pcMv     + iPartAddrDst, pcCUMvFieldSrc->m_pcMv,     iSizeInTComMv );
+  memcpy( m_pcMvd    + iPartAddrDst, pcCUMvFieldSrc->m_pcMvd,    iSizeInTComMv );
+  memcpy( m_piRefIdx + iPartAddrDst, pcCUMvFieldSrc->m_piRefIdx, sizeof( *m_piRefIdx ) * iNumPartSrc );
+}
+
+Void TComCUMvField::copyTo( TComCUMvField* pcCUMvFieldDst, Int iPartAddrDst ) const
+{
+  copyTo( pcCUMvFieldDst, iPartAddrDst, 0, m_uiNumPartition );
+}
+
+Void TComCUMvField::copyTo( TComCUMvField* pcCUMvFieldDst, Int iPartAddrDst, UInt uiOffset, UInt uiNumPart ) const
+{
+  Int iSizeInTComMv = sizeof( TComMv ) * uiNumPart;
+  Int iOffset = uiOffset + iPartAddrDst;
+
+  memcpy( pcCUMvFieldDst->m_pcMv     + iOffset, m_pcMv     + uiOffset, iSizeInTComMv );
+  memcpy( pcCUMvFieldDst->m_pcMvd    + iOffset, m_pcMvd    + uiOffset, iSizeInTComMv );
+  memcpy( pcCUMvFieldDst->m_piRefIdx + iOffset, m_piRefIdx + uiOffset, sizeof( *m_piRefIdx ) * uiNumPart );
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Set
+// --------------------------------------------------------------------------------------------------------------------
+
+template <typename T>
+Void TComCUMvField::setAll( T *p, T const & val, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx  )
+{
+  Int i;
+  p += iPartAddr;
+  Int numElements = m_uiNumPartition >> ( 2 * uiDepth );
+
+  switch( eCUMode )
+  {
+    case SIZE_2Nx2N:
+      for ( i = 0; i < numElements; i++ )
+      {
+        p[ i ] = val;
+      }
+      break;
+
+    case SIZE_2NxN:
+      numElements >>= 1;
+      for ( i = 0; i < numElements; i++ )
+      {
+        p[ i ] = val;
+      }
+      break;
+
+    case SIZE_Nx2N:
+      numElements >>= 2;
+      for ( i = 0; i < numElements; i++ )
+      {
+        p[ i                   ] = val;
+        p[ i + 2 * numElements ] = val;
+      }
+      break;
+
+    case SIZE_NxN:
+      numElements >>= 2;
+      for ( i = 0; i < numElements; i++)
+      {
+        p[ i ] = val;
+      }
+      break;
+    case SIZE_2NxnU:
+    {
+      Int iCurrPartNumQ = numElements>>2;
+      if( iPartIdx == 0 )
+      {
+        T *pT  = p;
+        T *pT2 = p + iCurrPartNumQ;
+        for (i = 0; i < (iCurrPartNumQ>>1); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+      }
+      else
+      {
+        T *pT  = p;
+        for (i = 0; i < (iCurrPartNumQ>>1); i++)
+        {
+          pT[i] = val;
+        }
+
+        pT = p + iCurrPartNumQ;
+        for (i = 0; i < ( (iCurrPartNumQ>>1) + (iCurrPartNumQ<<1) ); i++)
+        {
+          pT[i] = val;
+        }
+      }
+      break;
+    }
+  case SIZE_2NxnD:
+    {
+      Int iCurrPartNumQ = numElements>>2;
+      if( iPartIdx == 0 )
+      {
+        T *pT  = p;
+        for (i = 0; i < ( (iCurrPartNumQ>>1) + (iCurrPartNumQ<<1) ); i++)
+        {
+          pT[i] = val;
+        }
+        pT = p + ( numElements - iCurrPartNumQ );
+        for (i = 0; i < (iCurrPartNumQ>>1); i++)
+        {
+          pT[i] = val;
+        }
+      }
+      else
+      {
+        T *pT  = p;
+        T *pT2 = p + iCurrPartNumQ;
+        for (i = 0; i < (iCurrPartNumQ>>1); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+      }
+      break;
+    }
+  case SIZE_nLx2N:
+    {
+      Int iCurrPartNumQ = numElements>>2;
+      if( iPartIdx == 0 )
+      {
+        T *pT  = p;
+        T *pT2 = p + (iCurrPartNumQ<<1);
+        T *pT3 = p + (iCurrPartNumQ>>1);
+        T *pT4 = p + (iCurrPartNumQ<<1) + (iCurrPartNumQ>>1);
+
+        for (i = 0; i < (iCurrPartNumQ>>2); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+          pT3[i] = val;
+          pT4[i] = val;
+        }
+      }
+      else
+      {
+        T *pT  = p;
+        T *pT2 = p + (iCurrPartNumQ<<1);
+        for (i = 0; i < (iCurrPartNumQ>>2); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+
+        pT  = p + (iCurrPartNumQ>>1);
+        pT2 = p + (iCurrPartNumQ<<1) + (iCurrPartNumQ>>1);
+        for (i = 0; i < ( (iCurrPartNumQ>>2) + iCurrPartNumQ ); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+      }
+      break;
+    }
+  case SIZE_nRx2N:
+    {
+      Int iCurrPartNumQ = numElements>>2;
+      if( iPartIdx == 0 )
+      {
+        T *pT  = p;
+        T *pT2 = p + (iCurrPartNumQ<<1);
+        for (i = 0; i < ( (iCurrPartNumQ>>2) + iCurrPartNumQ ); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+
+        pT  = p + iCurrPartNumQ + (iCurrPartNumQ>>1);
+        pT2 = p + numElements - iCurrPartNumQ + (iCurrPartNumQ>>1);
+        for (i = 0; i < (iCurrPartNumQ>>2); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+        }
+      }
+      else
+      {
+        T *pT  = p;
+        T *pT2 = p + (iCurrPartNumQ>>1);
+        T *pT3 = p + (iCurrPartNumQ<<1);
+        T *pT4 = p + (iCurrPartNumQ<<1) + (iCurrPartNumQ>>1);
+        for (i = 0; i < (iCurrPartNumQ>>2); i++)
+        {
+          pT [i] = val;
+          pT2[i] = val;
+          pT3[i] = val;
+          pT4[i] = val;
+        }
+      }
+      break;
+    }
+    default:
+      assert(0);
+      break;
+  }
+}
+
+Void TComCUMvField::setAllMv( TComMv const & mv, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx )
+{
+  setAll(m_pcMv, mv, eCUMode, iPartAddr, uiDepth, iPartIdx);
+}
+
+Void TComCUMvField::setAllMvd( TComMv const & mvd, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx )
+{
+  setAll(m_pcMvd, mvd, eCUMode, iPartAddr, uiDepth, iPartIdx);
+}
+
+Void TComCUMvField::setAllRefIdx ( Int iRefIdx, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx )
+{
+  setAll(m_piRefIdx, static_cast<Char>(iRefIdx), eCUMode, iPartAddr, uiDepth, iPartIdx);
+}
+
+Void TComCUMvField::setAllMvField( TComMvField const & mvField, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx )
+{
+  setAllMv    ( mvField.getMv(),     eCUMode, iPartAddr, uiDepth, iPartIdx );
+  setAllRefIdx( mvField.getRefIdx(), eCUMode, iPartAddr, uiDepth, iPartIdx );
+}
+
+/**Subsampling of the stored prediction mode, reference index and motion vector
+ * \param pePredMode Pointer to prediction modes
+ * \param scale      Factor by which to subsample motion information
+ */
+Void TComCUMvField::compress(Char* pePredMode, Int scale)
+{
+  Int N = scale * scale;
+  assert( N > 0 && N <= m_uiNumPartition);
+
+  for ( Int uiPartIdx = 0; uiPartIdx < m_uiNumPartition; uiPartIdx += N )
+  {
+    TComMv cMv(0,0);
+    Int iRefIdx = 0;
+
+    cMv = m_pcMv[ uiPartIdx ];
+    PredMode predMode = static_cast<PredMode>( pePredMode[ uiPartIdx ] );
+    iRefIdx = m_piRefIdx[ uiPartIdx ];
+    for ( Int i = 0; i < N; i++ )
+    {
+      m_pcMv[ uiPartIdx + i ] = cMv;
+      pePredMode[ uiPartIdx + i ] = predMode;
+      m_piRefIdx[ uiPartIdx + i ] = iRefIdx;
+    }
+  }
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComMotionInfo.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,160 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComMotionInfo.h
+    \brief    motion information handling classes (header)
+    \todo     TComMvField seems to be better to be inherited from TComMv
+*/
+
+#ifndef __TCOMMOTIONINFO__
+#define __TCOMMOTIONINFO__
+
+#include <memory.h>
+#include "CommonDef.h"
+#include "TComMv.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Type definition
+// ====================================================================================================================
+
+/// parameters for AMVP
+typedef struct _AMVPInfo
+{
+  TComMv m_acMvCand[ AMVP_MAX_NUM_CANDS_MEM ];  ///< array of motion vector predictor candidates
+  Int    iN;                                ///< number of motion vector predictor candidates
+} AMVPInfo;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// class for motion vector with reference index
+class TComMvField
+{
+private:
+  TComMv    m_acMv;
+  Int       m_iRefIdx;
+
+public:
+  TComMvField() : m_iRefIdx( NOT_VALID ) {}
+
+  Void setMvField( TComMv const & cMv, Int iRefIdx )
+  {
+    m_acMv    = cMv;
+    m_iRefIdx = iRefIdx;
+  }
+
+  Void setRefIdx( Int refIdx ) { m_iRefIdx = refIdx; }
+
+  TComMv const & getMv() const { return  m_acMv; }
+  TComMv       & getMv()       { return  m_acMv; }
+
+  Int getRefIdx() const { return  m_iRefIdx;       }
+  Int getHor   () const { return  m_acMv.getHor(); }
+  Int getVer   () const { return  m_acMv.getVer(); }
+};
+
+/// class for motion information in one CU
+class TComCUMvField
+{
+private:
+  TComMv*   m_pcMv;
+  TComMv*   m_pcMvd;
+  Char*     m_piRefIdx;
+  UInt      m_uiNumPartition;
+  AMVPInfo  m_cAMVPInfo;
+
+  template <typename T>
+  Void setAll( T *p, T const & val, PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx );
+
+public:
+  TComCUMvField() : m_pcMv(NULL), m_pcMvd(NULL), m_piRefIdx(NULL), m_uiNumPartition(0) {}
+  ~TComCUMvField() {}
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // create / destroy
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Void    create( UInt uiNumPartition );
+  Void    destroy();
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // clear / copy
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Void    clearMvField();
+
+  Void    copyFrom( TComCUMvField const * pcCUMvFieldSrc, Int iNumPartSrc, Int iPartAddrDst );
+  Void    copyTo  ( TComCUMvField* pcCUMvFieldDst, Int iPartAddrDst ) const;
+  Void    copyTo  ( TComCUMvField* pcCUMvFieldDst, Int iPartAddrDst, UInt uiOffset, UInt uiNumPart ) const;
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // get
+  // ------------------------------------------------------------------------------------------------------------------
+
+  TComMv const & getMv    ( Int iIdx ) const { return  m_pcMv    [iIdx]; }
+  TComMv const & getMvd   ( Int iIdx ) const { return  m_pcMvd   [iIdx]; }
+  Int            getRefIdx( Int iIdx ) const { return  m_piRefIdx[iIdx]; }
+
+  AMVPInfo* getAMVPInfo () { return &m_cAMVPInfo; }
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // set
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Void    setAllMv     ( TComMv const & rcMv,         PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx=0 );
+  Void    setAllMvd    ( TComMv const & rcMvd,        PartSize eCUMode, Int iPartAddr, UInt uiDepth, Int iPartIdx=0 );
+  Void    setAllRefIdx ( Int iRefIdx,                 PartSize eMbMode, Int iPartAddr, UInt uiDepth, Int iPartIdx=0 );
+  Void    setAllMvField( TComMvField const & mvField, PartSize eMbMode, Int iPartAddr, UInt uiDepth, Int iPartIdx=0 );
+
+  Void setNumPartition( Int iNumPart )
+  {
+    m_uiNumPartition = iNumPart;
+  }
+
+  Void linkToWithOffset( TComCUMvField const * src, Int offset )
+  {
+    m_pcMv     = src->m_pcMv     + offset;
+    m_pcMvd    = src->m_pcMvd    + offset;
+    m_piRefIdx = src->m_piRefIdx + offset;
+  }
+
+  Void compress(Char* pePredMode, Int scale);
+};
+
+//! \}
+
+#endif // __TCOMMOTIONINFO__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComMv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,156 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComMv.h
+    \brief    motion vector class (header)
+*/
+
+#ifndef __TCOMMV__
+#define __TCOMMV__
+
+#include "CommonDef.h"
+#include <cstdlib>
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// basic motion vector class
+class TComMv
+{
+private:
+  Short m_iHor;     ///< horizontal component of motion vector
+  Short m_iVer;     ///< vertical component of motion vector
+
+public:
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // constructors
+  // ------------------------------------------------------------------------------------------------------------------
+
+  TComMv() :
+  m_iHor(0),
+  m_iVer(0)
+  {
+  }
+
+  TComMv( Short iHor, Short iVer ) :
+  m_iHor(iHor),
+  m_iVer(iVer)
+  {
+  }
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // set
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Void  set       ( Short iHor, Short iVer)     { m_iHor = iHor;  m_iVer = iVer;            }
+  Void  setHor    ( Short i )                   { m_iHor = i;                               }
+  Void  setVer    ( Short i )                   { m_iVer = i;                               }
+  Void  setZero   ()                            { m_iHor = m_iVer = 0;  }
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // get
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Int   getHor    () const { return m_iHor;          }
+  Int   getVer    () const { return m_iVer;          }
+  Int   getAbsHor () const { return abs( m_iHor );   }
+  Int   getAbsVer () const { return abs( m_iVer );   }
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // operations
+  // ------------------------------------------------------------------------------------------------------------------
+
+  const TComMv& operator += (const TComMv& rcMv)
+  {
+    m_iHor += rcMv.m_iHor;
+    m_iVer += rcMv.m_iVer;
+    return  *this;
+  }
+
+  const TComMv& operator-= (const TComMv& rcMv)
+  {
+    m_iHor -= rcMv.m_iHor;
+    m_iVer -= rcMv.m_iVer;
+    return  *this;
+  }
+
+  const TComMv& operator>>= (const Int i)
+  {
+    m_iHor >>= i;
+    m_iVer >>= i;
+    return  *this;
+  }
+
+  const TComMv& operator<<= (const Int i)
+  {
+    m_iHor <<= i;
+    m_iVer <<= i;
+    return  *this;
+  }
+
+  const TComMv operator - ( const TComMv& rcMv ) const
+  {
+    return TComMv( m_iHor - rcMv.m_iHor, m_iVer - rcMv.m_iVer );
+  }
+
+  const TComMv operator + ( const TComMv& rcMv ) const
+  {
+    return TComMv( m_iHor + rcMv.m_iHor, m_iVer + rcMv.m_iVer );
+  }
+
+  Bool operator== ( const TComMv& rcMv ) const
+  {
+    return (m_iHor==rcMv.m_iHor && m_iVer==rcMv.m_iVer);
+  }
+
+  Bool operator!= ( const TComMv& rcMv ) const
+  {
+    return (m_iHor!=rcMv.m_iHor || m_iVer!=rcMv.m_iVer);
+  }
+
+  const TComMv scaleMv( Int iScale ) const
+  {
+    Int mvx = Clip3( -32768, 32767, (iScale * getHor() + 127 + (iScale * getHor() < 0)) >> 8 );
+    Int mvy = Clip3( -32768, 32767, (iScale * getVer() + 127 + (iScale * getVer() < 0)) >> 8 );
+    return TComMv( mvx, mvy );
+  }
+};// END CLASS DEFINITION TComMV
+
+//! \}
+
+#endif // __TCOMMV__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPattern.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,734 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPattern.cpp
+    \brief    neighbouring pixel access classes
+*/
+
+#include "TComPic.h"
+#include "TComPattern.h"
+#include "TComDataCU.h"
+#include "TComTU.h"
+#include "Debug.h"
+#include "TComPrediction.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// Forward declarations
+
+/// padding of unavailable reference samples for intra prediction
+#if O0043_BEST_EFFORT_DECODING
+Void fillReferenceSamples( const Int bitDepth, const Int bitDepthDelta, TComDataCU* pcCU, const Pel* piRoiOrigin, Pel* piAdiTemp, const Bool* bNeighborFlags,
+#else
+Void fillReferenceSamples( const Int bitDepth, TComDataCU* pcCU, const Pel* piRoiOrigin, Pel* piAdiTemp, const Bool* bNeighborFlags,
+#endif
+                           const Int iNumIntraNeighbor, const Int unitWidth, const Int unitHeight, const Int iAboveUnits, const Int iLeftUnits,
+                           const UInt uiCuWidth, const UInt uiCuHeight, const UInt uiWidth, const UInt uiHeight, const Int iPicStride,
+                           const ChannelType chType, const ChromaFormat chFmt );
+
+/// constrained intra prediction
+Bool  isAboveLeftAvailable  ( TComDataCU* pcCU, UInt uiPartIdxLT );
+Int   isAboveAvailable      ( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxRT, Bool* bValidFlags );
+Int   isLeftAvailable       ( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxLB, Bool* bValidFlags );
+Int   isAboveRightAvailable ( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxRT, Bool* bValidFlags );
+Int   isBelowLeftAvailable  ( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxLB, Bool* bValidFlags );
+
+
+// ====================================================================================================================
+// Public member functions (TComPatternParam)
+// ====================================================================================================================
+
+/** \param  piTexture     pixel data
+ \param  iRoiWidth     pattern width
+ \param  iRoiHeight    pattern height
+ \param  iStride       buffer stride
+ \param  iOffsetLeft   neighbour offset (left)
+ \param  iOffsetRight  neighbour offset (right)
+ \param  iOffsetAbove  neighbour offset (above)
+ \param  iOffsetBottom neighbour offset (bottom)
+ */
+Void TComPatternParam::setPatternParamPel ( Pel* piTexture,
+                                           Int iRoiWidth,
+                                           Int iRoiHeight,
+                                           Int iStride
+                                           )
+{
+  m_piROIOrigin    = piTexture;
+  m_iROIWidth       = iRoiWidth;
+  m_iROIHeight      = iRoiHeight;
+  m_iPatternStride  = iStride;
+}
+
+// ====================================================================================================================
+// Public member functions (TComPattern)
+// ====================================================================================================================
+
+Void TComPattern::initPattern (Pel* piY,
+                               Int iRoiWidth,
+                               Int iRoiHeight,
+                               Int iStride)
+{
+  m_cPatternY. setPatternParamPel( piY,  iRoiWidth, iRoiHeight, iStride);
+}
+
+
+// TODO: move this function to TComPrediction.cpp.
+Void TComPrediction::initAdiPatternChType( TComTU &rTu, Bool& bAbove, Bool& bLeft, const ComponentID compID, const Bool bFilterRefSamples DEBUG_STRING_FN_DECLARE(sDebug))
+{
+  const ChannelType chType    = toChannelType(compID);
+
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiZorderIdxInPart=rTu.GetAbsPartIdxTU();
+  const UInt uiTuWidth        = rTu.getRect(compID).width;
+  const UInt uiTuHeight       = rTu.getRect(compID).height;
+  const UInt uiTuWidth2       = uiTuWidth  << 1;
+  const UInt uiTuHeight2      = uiTuHeight << 1;
+
+  const Int  iBaseUnitSize    = g_uiMaxCUWidth >> g_uiMaxCUDepth;
+  const Int  iUnitWidth       = iBaseUnitSize  >> pcCU->getPic()->getPicYuvRec()->getComponentScaleX(compID);
+  const Int  iUnitHeight      = iBaseUnitSize  >> pcCU->getPic()->getPicYuvRec()->getComponentScaleY(compID);
+  const Int  iTUWidthInUnits  = uiTuWidth  / iUnitWidth;
+  const Int  iTUHeightInUnits = uiTuHeight / iUnitHeight;
+  const Int  iAboveUnits      = iTUWidthInUnits  << 1;
+  const Int  iLeftUnits       = iTUHeightInUnits << 1;
+
+  assert(iTUHeightInUnits > 0 && iTUWidthInUnits > 0);
+
+  const Int  iPartIdxStride   = pcCU->getPic()->getNumPartInCtuWidth();
+  const UInt uiPartIdxLT      = pcCU->getZorderIdxInCtu() + uiZorderIdxInPart;
+  const UInt uiPartIdxRT      = g_auiRasterToZscan[ g_auiZscanToRaster[ uiPartIdxLT ] +   iTUWidthInUnits  - 1                   ];
+  const UInt uiPartIdxLB      = g_auiRasterToZscan[ g_auiZscanToRaster[ uiPartIdxLT ] + ((iTUHeightInUnits - 1) * iPartIdxStride)];
+
+  Int   iPicStride = pcCU->getPic()->getStride(compID);
+  Bool  bNeighborFlags[4 * MAX_NUM_SPU_W + 1];
+  Int   iNumIntraNeighbor = 0;
+
+  bNeighborFlags[iLeftUnits] = isAboveLeftAvailable( pcCU, uiPartIdxLT );
+  iNumIntraNeighbor += bNeighborFlags[iLeftUnits] ? 1 : 0;
+  iNumIntraNeighbor  += isAboveAvailable     ( pcCU, uiPartIdxLT, uiPartIdxRT, (bNeighborFlags + iLeftUnits + 1)                    );
+  iNumIntraNeighbor  += isAboveRightAvailable( pcCU, uiPartIdxLT, uiPartIdxRT, (bNeighborFlags + iLeftUnits + 1 + iTUWidthInUnits ) );
+  iNumIntraNeighbor  += isLeftAvailable      ( pcCU, uiPartIdxLT, uiPartIdxLB, (bNeighborFlags + iLeftUnits - 1)                    );
+  iNumIntraNeighbor  += isBelowLeftAvailable ( pcCU, uiPartIdxLT, uiPartIdxLB, (bNeighborFlags + iLeftUnits - 1 - iTUHeightInUnits) );
+
+  bAbove = true;
+  bLeft  = true;
+
+  const ChromaFormat chFmt       = rTu.GetChromaFormat();
+  const UInt         uiROIWidth  = uiTuWidth2+1;
+  const UInt         uiROIHeight = uiTuHeight2+1;
+
+  assert(uiROIWidth*uiROIHeight <= m_iYuvExtSize);
+
+#ifdef DEBUG_STRING
+  std::stringstream ss(stringstream::out);
+#endif
+
+  {
+    Pel *piAdiTemp   = m_piYuvExt[compID][PRED_BUF_UNFILTERED];
+    Pel *piRoiOrigin = pcCU->getPic()->getPicYuvRec()->getAddr(compID, pcCU->getCtuRsAddr(), pcCU->getZorderIdxInCtu()+uiZorderIdxInPart);
+#if O0043_BEST_EFFORT_DECODING
+    fillReferenceSamples (g_bitDepthInStream[chType], g_bitDepthInStream[chType] - g_bitDepth[chType], pcCU, piRoiOrigin, piAdiTemp, bNeighborFlags, iNumIntraNeighbor,  iUnitWidth, iUnitHeight, iAboveUnits, iLeftUnits,
+#else
+    fillReferenceSamples (g_bitDepth[chType], pcCU, piRoiOrigin, piAdiTemp, bNeighborFlags, iNumIntraNeighbor,  iUnitWidth, iUnitHeight, iAboveUnits, iLeftUnits,
+#endif
+                          uiTuWidth, uiTuHeight, uiROIWidth, uiROIHeight, iPicStride, toChannelType(compID), chFmt);
+
+
+#ifdef DEBUG_STRING
+    if (DebugOptionList::DebugString_Pred.getInt()&DebugStringGetPredModeMask(MODE_INTRA))
+    {
+      ss << "###: generating Ref Samples for channel " << compID << " and " << rTu.getRect(compID).width << " x " << rTu.getRect(compID).height << "\n";
+      for (UInt y=0; y<uiROIHeight; y++)
+      {
+        ss << "###: - ";
+        for (UInt x=0; x<uiROIWidth; x++)
+        {
+          if (x==0 || y==0)
+            ss << piAdiTemp[y*uiROIWidth + x] << ", ";
+//          if (x%16==15) ss << "\nPart size: ~ ";
+        }
+        ss << "\n";
+      }
+    }
+#endif
+
+    if (bFilterRefSamples)
+    {
+      // generate filtered intra prediction samples
+
+            Int          stride    = uiROIWidth;
+      const Pel         *piSrcPtr  = piAdiTemp                             + (stride * uiTuHeight2); // bottom left
+            Pel         *piDestPtr = m_piYuvExt[compID][PRED_BUF_FILTERED] + (stride * uiTuHeight2); // bottom left
+
+      //------------------------------------------------
+
+      Bool useStrongIntraSmoothing = isLuma(chType) && pcCU->getSlice()->getSPS()->getUseStrongIntraSmoothing();
+
+      const Pel bottomLeft = piAdiTemp[stride * uiTuHeight2];
+      const Pel topLeft    = piAdiTemp[0];
+      const Pel topRight   = piAdiTemp[uiTuWidth2];
+
+      if (useStrongIntraSmoothing)
+      {
+#if O0043_BEST_EFFORT_DECODING
+        const Int  threshold     = 1 << (g_bitDepthInStream[chType] - 5);
+#else
+        const Int  threshold     = 1 << (g_bitDepth[chType] - 5);
+#endif
+        const Bool bilinearLeft  = abs((bottomLeft + topLeft ) - (2 * piAdiTemp[stride * uiTuHeight])) < threshold; //difference between the
+        const Bool bilinearAbove = abs((topLeft    + topRight) - (2 * piAdiTemp[         uiTuWidth ])) < threshold; //ends and the middle
+        if ((uiTuWidth < 32) || (!bilinearLeft) || (!bilinearAbove))
+          useStrongIntraSmoothing = false;
+      }
+
+      *piDestPtr = *piSrcPtr; // bottom left is not filtered
+      piDestPtr -= stride;
+      piSrcPtr  -= stride;
+
+      //------------------------------------------------
+
+      //left column (bottom to top)
+
+      if (useStrongIntraSmoothing)
+      {
+        const Int shift = g_aucConvertToBit[uiTuHeight] + 3; //log2(uiTuHeight2)
+
+        for(UInt i=1; i<uiTuHeight2; i++, piDestPtr-=stride)
+        {
+          *piDestPtr = (((uiTuHeight2 - i) * bottomLeft) + (i * topLeft) + uiTuHeight) >> shift;
+        }
+
+        piSrcPtr -= stride * (uiTuHeight2 - 1);
+      }
+      else
+      {
+        for(UInt i=1; i<uiTuHeight2; i++, piDestPtr-=stride, piSrcPtr-=stride)
+        {
+          *piDestPtr = ( piSrcPtr[stride] + 2*piSrcPtr[0] + piSrcPtr[-stride] + 2 ) >> 2;
+        }
+      }
+
+      //------------------------------------------------
+
+      //top-left
+
+      if (useStrongIntraSmoothing)
+      {
+        *piDestPtr = piSrcPtr[0];
+      }
+      else
+      {
+        *piDestPtr = ( piSrcPtr[stride] + 2*piSrcPtr[0] + piSrcPtr[1] + 2 ) >> 2;
+      }
+      piDestPtr += 1;
+      piSrcPtr  += 1;
+
+      //------------------------------------------------
+
+      //top row (left-to-right)
+
+      if (useStrongIntraSmoothing)
+      {
+        const Int shift = g_aucConvertToBit[uiTuWidth] + 3; //log2(uiTuWidth2)
+
+        for(UInt i=1; i<uiTuWidth2; i++, piDestPtr++)
+        {
+          *piDestPtr = (((uiTuWidth2 - i) * topLeft) + (i * topRight) + uiTuWidth) >> shift;
+        }
+
+        piSrcPtr += uiTuWidth2 - 1;
+      }
+      else
+      {
+        for(UInt i=1; i<uiTuWidth2; i++, piDestPtr++, piSrcPtr++)
+        {
+          *piDestPtr = ( piSrcPtr[1] + 2*piSrcPtr[0] + piSrcPtr[-1] + 2 ) >> 2;
+        }
+      }
+
+      //------------------------------------------------
+
+      *piDestPtr=*piSrcPtr; // far right is not filtered
+
+#ifdef DEBUG_STRING
+    if (DebugOptionList::DebugString_Pred.getInt()&DebugStringGetPredModeMask(MODE_INTRA))
+    {
+      ss << "###: filtered result for channel " << compID <<"\n";
+      for (UInt y=0; y<uiROIHeight; y++)
+      {
+        ss << "###: - ";
+        for (UInt x=0; x<uiROIWidth; x++)
+        {
+          if (x==0 || y==0)
+            ss << m_piYuvExt[compID][PRED_BUF_FILTERED][y*uiROIWidth + x] << ", ";
+//          if (x%16==15) ss << "\nPart size: ~ ";
+        }
+        ss << "\n";
+      }
+    }
+#endif
+
+
+    }
+  }
+  DEBUG_STRING_APPEND(sDebug, ss.str())
+}
+
+#if O0043_BEST_EFFORT_DECODING
+Void fillReferenceSamples( const Int bitDepth, const Int bitDepthDelta, TComDataCU* pcCU, const Pel* piRoiOrigin, Pel* piAdiTemp, const Bool* bNeighborFlags,
+#else
+Void fillReferenceSamples( const Int bitDepth, TComDataCU* pcCU, const Pel* piRoiOrigin, Pel* piAdiTemp, const Bool* bNeighborFlags,
+#endif
+                           const Int iNumIntraNeighbor, const Int unitWidth, const Int unitHeight, const Int iAboveUnits, const Int iLeftUnits,
+                           const UInt uiCuWidth, const UInt uiCuHeight, const UInt uiWidth, const UInt uiHeight, const Int iPicStride,
+                           const ChannelType chType, const ChromaFormat chFmt )
+{
+  const Pel* piRoiTemp;
+  Int  i, j;
+  Int  iDCValue = 1 << (bitDepth - 1);
+  const Int iTotalUnits = iAboveUnits + iLeftUnits + 1; //+1 for top-left
+
+  if (iNumIntraNeighbor == 0)
+  {
+    // Fill border with DC value
+    for (i=0; i<uiWidth; i++)
+    {
+      piAdiTemp[i] = iDCValue;
+    }
+    for (i=1; i<uiHeight; i++)
+    {
+      piAdiTemp[i*uiWidth] = iDCValue;
+    }
+  }
+  else if (iNumIntraNeighbor == iTotalUnits)
+  {
+    // Fill top-left border and top and top right with rec. samples
+    piRoiTemp = piRoiOrigin - iPicStride - 1;
+
+    for (i=0; i<uiWidth; i++)
+    {
+#if O0043_BEST_EFFORT_DECODING
+      piAdiTemp[i] = piRoiTemp[i] << bitDepthDelta;
+#else
+      piAdiTemp[i] = piRoiTemp[i];
+#endif
+    }
+
+    // Fill left and below left border with rec. samples
+    piRoiTemp = piRoiOrigin - 1;
+
+    for (i=1; i<uiHeight; i++)
+    {
+#if O0043_BEST_EFFORT_DECODING
+      piAdiTemp[i*uiWidth] = (*(piRoiTemp)) << bitDepthDelta;
+#else
+      piAdiTemp[i*uiWidth] = *(piRoiTemp);
+#endif
+      piRoiTemp += iPicStride;
+    }
+  }
+  else // reference samples are partially available
+  {
+    // all above units have "unitWidth" samples each, all left/below-left units have "unitHeight" samples each
+    const Int  iTotalSamples = (iLeftUnits * unitHeight) + ((iAboveUnits + 1) * unitWidth);
+    Pel  piAdiLine[5 * MAX_CU_SIZE];
+    Pel  *piAdiLineTemp;
+    const Bool *pbNeighborFlags;
+
+
+    // Initialize
+    for (i=0; i<iTotalSamples; i++)
+    {
+      piAdiLine[i] = iDCValue;
+    }
+
+    // Fill top-left sample
+    piRoiTemp = piRoiOrigin - iPicStride - 1;
+    piAdiLineTemp = piAdiLine + (iLeftUnits * unitHeight);
+    pbNeighborFlags = bNeighborFlags + iLeftUnits;
+    if (*pbNeighborFlags)
+    {
+#if O0043_BEST_EFFORT_DECODING
+      Pel topLeftVal=piRoiTemp[0] << bitDepthDelta;
+#else
+      Pel topLeftVal=piRoiTemp[0];
+#endif
+      for (i=0; i<unitWidth; i++)
+      {
+        piAdiLineTemp[i] = topLeftVal;
+      }
+    }
+
+    // Fill left & below-left samples (downwards)
+    piRoiTemp += iPicStride;
+    piAdiLineTemp--;
+    pbNeighborFlags--;
+
+    for (j=0; j<iLeftUnits; j++)
+    {
+      if (*pbNeighborFlags)
+      {
+        for (i=0; i<unitHeight; i++)
+        {
+#if O0043_BEST_EFFORT_DECODING
+          piAdiLineTemp[-i] = piRoiTemp[i*iPicStride] << bitDepthDelta;
+#else
+          piAdiLineTemp[-i] = piRoiTemp[i*iPicStride];
+#endif
+        }
+      }
+      piRoiTemp += unitHeight*iPicStride;
+      piAdiLineTemp -= unitHeight;
+      pbNeighborFlags--;
+    }
+
+    // Fill above & above-right samples (left-to-right) (each unit has "unitWidth" samples)
+    piRoiTemp = piRoiOrigin - iPicStride;
+    // offset line buffer by iNumUints2*unitHeight (for left/below-left) + unitWidth (for above-left)
+    piAdiLineTemp = piAdiLine + (iLeftUnits * unitHeight) + unitWidth;
+    pbNeighborFlags = bNeighborFlags + iLeftUnits + 1;
+    for (j=0; j<iAboveUnits; j++)
+    {
+      if (*pbNeighborFlags)
+      {
+        for (i=0; i<unitWidth; i++)
+        {
+#if O0043_BEST_EFFORT_DECODING
+          piAdiLineTemp[i] = piRoiTemp[i] << bitDepthDelta;
+#else
+          piAdiLineTemp[i] = piRoiTemp[i];
+#endif
+        }
+      }
+      piRoiTemp += unitWidth;
+      piAdiLineTemp += unitWidth;
+      pbNeighborFlags++;
+    }
+
+    // Pad reference samples when necessary
+    Int iCurrJnit = 0;
+    Pel  *piAdiLineCur   = piAdiLine;
+    const UInt piAdiLineTopRowOffset = iLeftUnits * (unitHeight - unitWidth);
+
+    if (!bNeighborFlags[0])
+    {
+      // very bottom unit of bottom-left; at least one unit will be valid.
+      {
+        Int   iNext = 1;
+        while (iNext < iTotalUnits && !bNeighborFlags[iNext])
+        {
+          iNext++;
+        }
+        Pel *piAdiLineNext = piAdiLine + ((iNext < iLeftUnits) ? (iNext * unitHeight) : (piAdiLineTopRowOffset + (iNext * unitWidth)));
+        const Pel refSample = *piAdiLineNext;
+        // Pad unavailable samples with new value
+        Int iNextOrTop = std::min<Int>(iNext, iLeftUnits);
+        // fill left column
+        while (iCurrJnit < iNextOrTop)
+        {
+          for (i=0; i<unitHeight; i++)
+          {
+            piAdiLineCur[i] = refSample;
+          }
+          piAdiLineCur += unitHeight;
+          iCurrJnit++;
+        }
+        // fill top row
+        while (iCurrJnit < iNext)
+        {
+          for (i=0; i<unitWidth; i++)
+          {
+            piAdiLineCur[i] = refSample;
+          }
+          piAdiLineCur += unitWidth;
+          iCurrJnit++;
+        }
+      }
+    }
+
+    // pad all other reference samples.
+    while (iCurrJnit < iTotalUnits)
+    {
+      if (!bNeighborFlags[iCurrJnit]) // samples not available
+      {
+        {
+          const Int numSamplesInCurrUnit = (iCurrJnit >= iLeftUnits) ? unitWidth : unitHeight;
+          const Pel refSample = *(piAdiLineCur-1);
+          for (i=0; i<numSamplesInCurrUnit; i++)
+          {
+            piAdiLineCur[i] = refSample;
+          }
+          piAdiLineCur += numSamplesInCurrUnit;
+          iCurrJnit++;
+        }
+      }
+      else
+      {
+        piAdiLineCur += (iCurrJnit >= iLeftUnits) ? unitWidth : unitHeight;
+        iCurrJnit++;
+      }
+    }
+
+    // Copy processed samples
+
+    piAdiLineTemp = piAdiLine + uiHeight + unitWidth - 2;
+    // top left, top and top right samples
+    for (i=0; i<uiWidth; i++)
+    {
+      piAdiTemp[i] = piAdiLineTemp[i];
+    }
+
+    piAdiLineTemp = piAdiLine + uiHeight - 1;
+    for (i=1; i<uiHeight; i++)
+    {
+      piAdiTemp[i*uiWidth] = piAdiLineTemp[-i];
+    }
+  }
+}
+
+/** Get pointer to reference samples for intra prediction
+ * \param uiDirMode   prediction mode index
+ * \param log2BlkSize size of block (2 = 4x4, 3 = 8x8, 4 = 16x16, 5 = 32x32, 6 = 64x64)
+ * \param piAdiBuf    pointer to unfiltered reference samples
+ * \return            pointer to (possibly filtered) reference samples
+ *
+ * The prediction mode index is used to determine whether a smoothed reference sample buffer is returned.
+ */
+
+Bool TComPrediction::filteringIntraReferenceSamples(const ComponentID compID, UInt uiDirMode, UInt uiTuChWidth, UInt uiTuChHeight, const ChromaFormat chFmt, const Bool intraReferenceSmoothingDisabled)
+{
+  Bool bFilter;
+
+  if (!filterIntraReferenceSamples(toChannelType(compID), chFmt, intraReferenceSmoothingDisabled))
+  {
+    bFilter=false;
+  }
+  else
+  {
+    assert(uiTuChWidth>=4 && uiTuChHeight>=4 && uiTuChWidth<128 && uiTuChHeight<128);
+
+    if (uiDirMode == DC_IDX)
+    {
+      bFilter=false; //no smoothing for DC or LM chroma
+    }
+    else
+    {
+      Int diff = min<Int>(abs((Int) uiDirMode - HOR_IDX), abs((Int)uiDirMode - VER_IDX));
+      UInt sizeIndex=g_aucConvertToBit[uiTuChWidth];
+      assert(sizeIndex < MAX_INTRA_FILTER_DEPTHS);
+      bFilter = diff > m_aucIntraFilter[toChannelType(compID)][sizeIndex];
+    }
+  }
+  return bFilter;
+}
+
+Bool isAboveLeftAvailable( TComDataCU* pcCU, UInt uiPartIdxLT )
+{
+  Bool bAboveLeftFlag;
+  UInt uiPartAboveLeft;
+  TComDataCU* pcCUAboveLeft = pcCU->getPUAboveLeft( uiPartAboveLeft, uiPartIdxLT );
+  if(pcCU->getSlice()->getPPS()->getConstrainedIntraPred())
+  {
+    bAboveLeftFlag = ( pcCUAboveLeft && pcCUAboveLeft->isIntra( uiPartAboveLeft ) );
+  }
+  else
+  {
+    bAboveLeftFlag = (pcCUAboveLeft ? true : false);
+  }
+  return bAboveLeftFlag;
+}
+
+Int isAboveAvailable( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxRT, Bool *bValidFlags )
+{
+  const UInt uiRasterPartBegin = g_auiZscanToRaster[uiPartIdxLT];
+  const UInt uiRasterPartEnd = g_auiZscanToRaster[uiPartIdxRT]+1;
+  const UInt uiIdxStep = 1;
+  Bool *pbValidFlags = bValidFlags;
+  Int iNumIntra = 0;
+
+  for ( UInt uiRasterPart = uiRasterPartBegin; uiRasterPart < uiRasterPartEnd; uiRasterPart += uiIdxStep )
+  {
+    UInt uiPartAbove;
+    TComDataCU* pcCUAbove = pcCU->getPUAbove( uiPartAbove, g_auiRasterToZscan[uiRasterPart] );
+    if(pcCU->getSlice()->getPPS()->getConstrainedIntraPred())
+    {
+      if ( pcCUAbove && pcCUAbove->isIntra( uiPartAbove ) )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    else
+    {
+      if (pcCUAbove)
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    pbValidFlags++;
+  }
+  return iNumIntra;
+}
+
+Int isLeftAvailable( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxLB, Bool *bValidFlags )
+{
+  const UInt uiRasterPartBegin = g_auiZscanToRaster[uiPartIdxLT];
+  const UInt uiRasterPartEnd = g_auiZscanToRaster[uiPartIdxLB]+1;
+  const UInt uiIdxStep = pcCU->getPic()->getNumPartInCtuWidth();
+  Bool *pbValidFlags = bValidFlags;
+  Int iNumIntra = 0;
+
+  for ( UInt uiRasterPart = uiRasterPartBegin; uiRasterPart < uiRasterPartEnd; uiRasterPart += uiIdxStep )
+  {
+    UInt uiPartLeft;
+    TComDataCU* pcCULeft = pcCU->getPULeft( uiPartLeft, g_auiRasterToZscan[uiRasterPart] );
+    if(pcCU->getSlice()->getPPS()->getConstrainedIntraPred())
+    {
+      if ( pcCULeft && pcCULeft->isIntra( uiPartLeft ) )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    else
+    {
+      if ( pcCULeft )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    pbValidFlags--; // opposite direction
+  }
+
+  return iNumIntra;
+}
+
+Int isAboveRightAvailable( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxRT, Bool *bValidFlags )
+{
+  const UInt uiNumUnitsInPU = g_auiZscanToRaster[uiPartIdxRT] - g_auiZscanToRaster[uiPartIdxLT] + 1;
+  Bool *pbValidFlags = bValidFlags;
+  Int iNumIntra = 0;
+
+  for ( UInt uiOffset = 1; uiOffset <= uiNumUnitsInPU; uiOffset++ )
+  {
+    UInt uiPartAboveRight;
+    TComDataCU* pcCUAboveRight = pcCU->getPUAboveRightAdi( uiPartAboveRight, uiPartIdxRT, uiOffset );
+    if(pcCU->getSlice()->getPPS()->getConstrainedIntraPred())
+    {
+      if ( pcCUAboveRight && pcCUAboveRight->isIntra( uiPartAboveRight ) )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    else
+    {
+      if ( pcCUAboveRight )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    pbValidFlags++;
+  }
+
+  return iNumIntra;
+}
+
+Int isBelowLeftAvailable( TComDataCU* pcCU, UInt uiPartIdxLT, UInt uiPartIdxLB, Bool *bValidFlags )
+{
+  const UInt uiNumUnitsInPU = (g_auiZscanToRaster[uiPartIdxLB] - g_auiZscanToRaster[uiPartIdxLT]) / pcCU->getPic()->getNumPartInCtuWidth() + 1;
+  Bool *pbValidFlags = bValidFlags;
+  Int iNumIntra = 0;
+
+  for ( UInt uiOffset = 1; uiOffset <= uiNumUnitsInPU; uiOffset++ )
+  {
+    UInt uiPartBelowLeft;
+    TComDataCU* pcCUBelowLeft = pcCU->getPUBelowLeftAdi( uiPartBelowLeft, uiPartIdxLB, uiOffset );
+    if(pcCU->getSlice()->getPPS()->getConstrainedIntraPred())
+    {
+      if ( pcCUBelowLeft && pcCUBelowLeft->isIntra( uiPartBelowLeft ) )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    else
+    {
+      if ( pcCUBelowLeft )
+      {
+        iNumIntra++;
+        *pbValidFlags = true;
+      }
+      else
+      {
+        *pbValidFlags = false;
+      }
+    }
+    pbValidFlags--; // opposite direction
+  }
+
+  return iNumIntra;
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPattern.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,114 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPattern.h
+    \brief    neighbouring pixel access classes (header)
+*/
+
+#ifndef __TCOMPATTERN__
+#define __TCOMPATTERN__
+
+// Include files
+#include <stdio.h>
+#include "CommonDef.h"
+#include <string>
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+class TComDataCU;
+class TComTU;
+
+/// neighbouring pixel access class for one component
+class TComPatternParam
+{
+private:
+  Pel*  m_piROIOrigin;
+
+public:
+  Int   m_iROIWidth;
+  Int   m_iROIHeight;
+  Int   m_iPatternStride;
+
+  /// return starting position of ROI (ROI = &pattern[AboveOffset][LeftOffset])
+  __inline Pel*  getROIOrigin()
+  {
+    return  m_piROIOrigin;
+  }
+
+  /// set parameters from Pel buffer for accessing neighbouring pixels
+  Void setPatternParamPel (Pel*        piTexture,
+                           Int         iRoiWidth,
+                           Int         iRoiHeight,
+                           Int         iStride
+                           );
+};
+
+/// neighbouring pixel access class for all components
+class TComPattern
+{
+private:
+  TComPatternParam  m_cPatternY;
+//  TComPatternParam  m_cPatternCb;
+  //TComPatternParam  m_cPatternCr;
+
+public:
+
+  // ROI & pattern information, (ROI = &pattern[AboveOffset][LeftOffset])
+  Pel*  getROIY()                 { return m_cPatternY.getROIOrigin();    }
+  Int   getROIYWidth()            { return m_cPatternY.m_iROIWidth;       }
+  Int   getROIYHeight()           { return m_cPatternY.m_iROIHeight;      }
+  Int   getPatternLStride()       { return m_cPatternY.m_iPatternStride;  }
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // initialization functions
+  // -------------------------------------------------------------------------------------------------------------------
+
+  /// set parameters from Pel buffers for accessing neighbouring pixels
+  Void initPattern            (Pel*        piY,
+                               Int         iRoiWidth,
+                               Int         iRoiHeight,
+                               Int         iStride );
+
+
+
+
+};
+
+//! \}
+
+#endif // __TCOMPATTERN__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPic.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,177 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPic.cpp
+    \brief    picture class
+*/
+
+#include "TComPic.h"
+#include "SEI.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TComPic::TComPic()
+: m_uiTLayer                              (0)
+, m_bUsedByCurr                           (false)
+, m_bIsLongTerm                           (false)
+, m_apcPicSym                             (NULL)
+, m_pcPicYuvPred                          (NULL)
+, m_pcPicYuvResi                          (NULL)
+, m_bReconstructed                        (false)
+, m_bNeededForOutput                      (false)
+, m_uiCurrSliceIdx                        (0)
+, m_bCheckLTMSB                           (false)
+{
+  for(UInt i=0; i<NUM_PIC_YUV; i++)
+  {
+    m_apcPicYuv[i]      = NULL;
+  }
+}
+
+TComPic::~TComPic()
+{
+}
+
+Void TComPic::create( Int iWidth, Int iHeight, ChromaFormat chromaFormatIDC, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth, Window &conformanceWindow, Window &defaultDisplayWindow,
+                      Int *numReorderPics, Bool bIsVirtual)
+{
+  m_apcPicSym     = new TComPicSym;  m_apcPicSym   ->create( chromaFormatIDC, iWidth, iHeight, uiMaxWidth, uiMaxHeight, uiMaxDepth );
+  if (!bIsVirtual)
+  {
+    m_apcPicYuv[PIC_YUV_ORG]  = new TComPicYuv;  m_apcPicYuv[PIC_YUV_ORG]->create( iWidth, iHeight, chromaFormatIDC, uiMaxWidth, uiMaxHeight, uiMaxDepth );
+    m_apcPicYuv[PIC_YUV_TRUE_ORG]  = new TComPicYuv;  m_apcPicYuv[PIC_YUV_TRUE_ORG]->create( iWidth, iHeight, chromaFormatIDC, uiMaxWidth, uiMaxHeight, uiMaxDepth );
+  }
+  m_apcPicYuv[PIC_YUV_REC]  = new TComPicYuv;  m_apcPicYuv[PIC_YUV_REC]->create( iWidth, iHeight, chromaFormatIDC, uiMaxWidth, uiMaxHeight, uiMaxDepth );
+
+  // there are no SEI messages associated with this picture initially
+  if (m_SEIs.size() > 0)
+  {
+    deleteSEIs (m_SEIs);
+  }
+  m_bUsedByCurr = false;
+
+  /* store conformance window parameters with picture */
+  m_conformanceWindow = conformanceWindow;
+
+  /* store display window parameters with picture */
+  m_defaultDisplayWindow = defaultDisplayWindow;
+
+  /* store number of reorder pics with picture */
+  memcpy(m_numReorderPics, numReorderPics, MAX_TLAYER*sizeof(Int));
+
+  return;
+}
+
+Void TComPic::destroy()
+{
+  if (m_apcPicSym)
+  {
+    m_apcPicSym->destroy();
+    delete m_apcPicSym;
+    m_apcPicSym = NULL;
+  }
+
+  for(UInt i=0; i<NUM_PIC_YUV; i++)
+  {
+    if (m_apcPicYuv[i])
+    {
+      m_apcPicYuv[i]->destroy();
+      delete m_apcPicYuv[i];
+      m_apcPicYuv[i]  = NULL;
+    }
+  }
+
+  deleteSEIs(m_SEIs);
+}
+
+Void TComPic::compressMotion()
+{
+  TComPicSym* pPicSym = getPicSym();
+  for ( UInt uiCUAddr = 0; uiCUAddr < pPicSym->getNumberOfCtusInFrame(); uiCUAddr++ )
+  {
+    TComDataCU* pCtu = pPicSym->getCtu(uiCUAddr);
+    pCtu->compressMV();
+  }
+}
+
+Bool  TComPic::getSAOMergeAvailability(Int currAddr, Int mergeAddr)
+{
+  Bool mergeCtbInSliceSeg = (mergeAddr >= getPicSym()->getCtuTsToRsAddrMap(getCtu(currAddr)->getSlice()->getSliceCurStartCtuTsAddr()));
+  Bool mergeCtbInTile     = (getPicSym()->getTileIdxMap(mergeAddr) == getPicSym()->getTileIdxMap(currAddr));
+  return (mergeCtbInSliceSeg && mergeCtbInTile);
+}
+
+UInt TComPic::getSubstreamForCtuAddr(const UInt ctuAddr, const Bool bAddressInRaster, TComSlice *pcSlice)
+{
+  UInt subStrm;
+
+  if (pcSlice->getPPS()->getNumSubstreams() > 1) // wavefronts, and possibly tiles being used.
+  {
+    if (pcSlice->getPPS()->getEntropyCodingSyncEnabledFlag())
+    {
+      const TComPicSym &picSym            = *(getPicSym());
+      const UInt ctuRsAddr                = bAddressInRaster?ctuAddr : picSym.getCtuTsToRsAddrMap(ctuAddr);
+      const UInt frameWidthInCtus         = picSym.getFrameWidthInCtus();
+      const UInt tileIndex                = picSym.getTileIdxMap(ctuRsAddr);
+      const UInt numTileColumns           = (picSym.getNumTileColumnsMinus1()+1);
+      const TComTile *pTile               = picSym.getTComTile(tileIndex);
+      const UInt firstCtuRsAddrOfTile     = pTile->getFirstCtuRsAddr();
+      const UInt tileYInCtus              = firstCtuRsAddrOfTile / frameWidthInCtus;
+      // independent tiles => substreams are "per tile"
+      const UInt ctuLine                  = ctuRsAddr / frameWidthInCtus;
+      const UInt startingSubstreamForTile =(tileYInCtus*numTileColumns) + (pTile->getTileHeightInCtus()*(tileIndex%numTileColumns));
+      subStrm = startingSubstreamForTile + (ctuLine - tileYInCtus);
+    }
+    else
+    {
+      const TComPicSym &picSym            = *(getPicSym());
+      const UInt ctuRsAddr                = bAddressInRaster?ctuAddr : picSym.getCtuTsToRsAddrMap(ctuAddr);
+      const UInt tileIndex                = picSym.getTileIdxMap(ctuRsAddr);
+      subStrm=tileIndex;
+    }
+  }
+  else
+  {
+    // dependent tiles => substreams are "per frame".
+    subStrm = 0;
+  }
+  return subStrm;
+}
+
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPic.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,181 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPic.h
+    \brief    picture class (header)
+*/
+
+#ifndef __TCOMPIC__
+#define __TCOMPIC__
+
+// Include files
+#include "CommonDef.h"
+#include "TComPicSym.h"
+#include "TComPicYuv.h"
+#include "TComBitStream.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// picture class (symbol + YUV buffers)
+
+class TComPic
+{
+public:
+  typedef enum { PIC_YUV_ORG=0, PIC_YUV_REC=1, PIC_YUV_TRUE_ORG=2, NUM_PIC_YUV=3 } PIC_YUV_T;
+     // TRUE_ORG is the input file without any pre-encoder colour space conversion (but with possible bit depth increment)
+  TComPicYuv*   getPicYuvTrueOrg()        { return  m_apcPicYuv[PIC_YUV_TRUE_ORG]; }
+
+private:
+  UInt                  m_uiTLayer;               //  Temporal layer
+  Bool                  m_bUsedByCurr;            //  Used by current picture
+  Bool                  m_bIsLongTerm;            //  IS long term picture
+  TComPicSym*           m_apcPicSym;              //  Symbol
+  TComPicYuv*           m_apcPicYuv[NUM_PIC_YUV];
+
+  TComPicYuv*           m_pcPicYuvPred;           //  Prediction
+  TComPicYuv*           m_pcPicYuvResi;           //  Residual
+  Bool                  m_bReconstructed;
+  Bool                  m_bNeededForOutput;
+  UInt                  m_uiCurrSliceIdx;         // Index of current slice
+  Bool                  m_bCheckLTMSB;
+
+  Int                   m_numReorderPics[MAX_TLAYER];
+  Window                m_conformanceWindow;
+  Window                m_defaultDisplayWindow;
+
+  Bool                  m_isTop;
+  Bool                  m_isField;
+
+  std::vector<std::vector<TComDataCU*> > m_vSliceCUDataLink;
+
+  SEIMessages  m_SEIs; ///< Any SEI messages that have been received.  If !NULL we own the object.
+
+public:
+  TComPic();
+  virtual ~TComPic();
+
+  Void          create( Int iWidth, Int iHeight, ChromaFormat chromaFormatIDC, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth, Window &conformanceWindow, Window &defaultDisplayWindow,
+                        Int *numReorderPics,Bool bIsVirtual /*= false*/ );
+
+  virtual Void  destroy();
+
+  UInt          getTLayer() const               { return m_uiTLayer;   }
+  Void          setTLayer( UInt uiTLayer ) { m_uiTLayer = uiTLayer; }
+
+  Bool          getUsedByCurr() const            { return m_bUsedByCurr; }
+  Void          setUsedByCurr( Bool bUsed ) { m_bUsedByCurr = bUsed; }
+  Bool          getIsLongTerm() const            { return m_bIsLongTerm; }
+  Void          setIsLongTerm( Bool lt ) { m_bIsLongTerm = lt; }
+  Void          setCheckLTMSBPresent     (Bool b ) {m_bCheckLTMSB=b;}
+  Bool          getCheckLTMSBPresent     () { return m_bCheckLTMSB;}
+
+  TComPicSym*   getPicSym()           { return  m_apcPicSym;    }
+  TComSlice*    getSlice(Int i)       { return  m_apcPicSym->getSlice(i);  }
+  Int           getPOC() const        { return  m_apcPicSym->getSlice(m_uiCurrSliceIdx)->getPOC();  }
+  TComDataCU*   getCtu( UInt ctuRsAddr )           { return  m_apcPicSym->getCtu( ctuRsAddr ); }
+  const TComDataCU* getCtu( UInt ctuRsAddr ) const { return  m_apcPicSym->getCtu( ctuRsAddr ); }
+
+  TComPicYuv*   getPicYuvOrg()        { return  m_apcPicYuv[PIC_YUV_ORG]; }
+  TComPicYuv*   getPicYuvRec()        { return  m_apcPicYuv[PIC_YUV_REC]; }
+
+  TComPicYuv*   getPicYuvPred()       { return  m_pcPicYuvPred; }
+  TComPicYuv*   getPicYuvResi()       { return  m_pcPicYuvResi; }
+  Void          setPicYuvPred( TComPicYuv* pcPicYuv )       { m_pcPicYuvPred = pcPicYuv; }
+  Void          setPicYuvResi( TComPicYuv* pcPicYuv )       { m_pcPicYuvResi = pcPicYuv; }
+
+  UInt          getNumberOfCtusInFrame() const     { return m_apcPicSym->getNumberOfCtusInFrame(); }
+  UInt          getNumPartInCtuWidth() const       { return m_apcPicSym->getNumPartInCtuWidth();   }
+  UInt          getNumPartInCtuHeight() const      { return m_apcPicSym->getNumPartInCtuHeight();  }
+  UInt          getNumPartitionsInCtu() const      { return m_apcPicSym->getNumPartitionsInCtu();  }
+  UInt          getFrameWidthInCtus() const        { return m_apcPicSym->getFrameWidthInCtus();    }
+  UInt          getFrameHeightInCtus() const       { return m_apcPicSym->getFrameHeightInCtus();   }
+  UInt          getMinCUWidth() const              { return m_apcPicSym->getMinCUWidth();          }
+  UInt          getMinCUHeight() const             { return m_apcPicSym->getMinCUHeight();         }
+
+  Int           getStride(const ComponentID id) const          { return m_apcPicYuv[PIC_YUV_REC]->getStride(id); }
+  Int           getComponentScaleX(const ComponentID id) const    { return m_apcPicYuv[PIC_YUV_REC]->getComponentScaleX(id); }
+  Int           getComponentScaleY(const ComponentID id) const    { return m_apcPicYuv[PIC_YUV_REC]->getComponentScaleY(id); }
+  ChromaFormat  getChromaFormat() const                           { return m_apcPicYuv[PIC_YUV_REC]->getChromaFormat(); }
+  Int           getNumberValidComponents() const                  { return m_apcPicYuv[PIC_YUV_REC]->getNumberValidComponents(); }
+
+  Void          setReconMark (Bool b) { m_bReconstructed = b;     }
+  Bool          getReconMark () const      { return m_bReconstructed;  }
+  Void          setOutputMark (Bool b) { m_bNeededForOutput = b;     }
+  Bool          getOutputMark () const      { return m_bNeededForOutput;  }
+
+  Void          setNumReorderPics(Int i, UInt tlayer) { m_numReorderPics[tlayer] = i;    }
+  Int           getNumReorderPics(UInt tlayer)        { return m_numReorderPics[tlayer]; }
+
+  Void          compressMotion();
+  UInt          getCurrSliceIdx() const           { return m_uiCurrSliceIdx;                }
+  Void          setCurrSliceIdx(UInt i)      { m_uiCurrSliceIdx = i;                   }
+  UInt          getNumAllocatedSlice() const      {return m_apcPicSym->getNumAllocatedSlice();}
+  Void          allocateNewSlice()           {m_apcPicSym->allocateNewSlice();         }
+  Void          clearSliceBuffer()           {m_apcPicSym->clearSliceBuffer();         }
+
+  Window&       getConformanceWindow()  { return m_conformanceWindow; }
+  Window&       getDefDisplayWindow()   { return m_defaultDisplayWindow; }
+
+  Bool          getSAOMergeAvailability(Int currAddr, Int mergeAddr);
+
+  UInt          getSubstreamForCtuAddr(const UInt ctuAddr, const Bool bAddressInRaster, TComSlice *pcSlice);
+
+  /* field coding parameters*/
+
+   Void              setTopField(Bool b)                  {m_isTop = b;}
+   Bool              isTopField()                         {return m_isTop;}
+   Void              setField(Bool b)                     {m_isField = b;}
+   Bool              isField()                            {return m_isField;}
+
+  /** transfer ownership of seis to this picture */
+  Void setSEIs(SEIMessages& seis) { m_SEIs = seis; }
+
+  /**
+   * return the current list of SEI messages associated with this picture.
+   * Pointer is valid until this->destroy() is called */
+  SEIMessages& getSEIs() { return m_SEIs; }
+
+  /**
+   * return the current list of SEI messages associated with this picture.
+   * Pointer is valid until this->destroy() is called */
+  const SEIMessages& getSEIs() const { return m_SEIs; }
+};// END CLASS DEFINITION TComPic
+
+//! \}
+
+#endif // __TCOMPIC__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPicSym.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,483 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPicSym.cpp
+    \brief    picture symbol class
+*/
+
+#include "TComPicSym.h"
+#include "TComSampleAdaptiveOffset.h"
+#include "TComSlice.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TComPicSym::TComPicSym()
+:m_frameWidthInCtus(0)
+,m_frameHeightInCtus(0)
+,m_uiMaxCUWidth(0)
+,m_uiMaxCUHeight(0)
+,m_uiMinCUWidth(0)
+,m_uiMinCUHeight(0)
+,m_uhTotalDepth(0)
+,m_numPartitionsInCtu(0)
+,m_numPartInCtuWidth(0)
+,m_numPartInCtuHeight(0)
+,m_numCtusInFrame(0)
+,m_apcTComSlice(NULL)
+,m_uiNumAllocatedSlice(0)
+,m_pictureCtuArray(NULL)
+,m_numTileColumnsMinus1(0)
+,m_numTileRowsMinus1(0)
+,m_ctuTsToRsAddrMap(NULL)
+,m_puiTileIdxMap(NULL)
+,m_ctuRsToTsAddrMap(NULL)
+,m_saoBlkParams(NULL)
+{}
+
+
+Void TComPicSym::create  ( ChromaFormat chromaFormatIDC, Int iPicWidth, Int iPicHeight, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth )
+{
+  UInt i;
+
+  m_uhTotalDepth       = uiMaxDepth;
+  m_numPartitionsInCtu = 1<<(m_uhTotalDepth<<1);
+
+  m_uiMaxCUWidth       = uiMaxWidth;
+  m_uiMaxCUHeight      = uiMaxHeight;
+
+  m_uiMinCUWidth       = uiMaxWidth  >> m_uhTotalDepth;
+  m_uiMinCUHeight      = uiMaxHeight >> m_uhTotalDepth;
+
+  m_numPartInCtuWidth  = m_uiMaxCUWidth  / m_uiMinCUWidth;  // equivalent to 1<<m_uhTotalDepth
+  m_numPartInCtuHeight = m_uiMaxCUHeight / m_uiMinCUHeight; // equivalent to 1<<m_uhTotalDepth
+
+  m_frameWidthInCtus   = ( iPicWidth %m_uiMaxCUWidth  ) ? iPicWidth /m_uiMaxCUWidth  + 1 : iPicWidth /m_uiMaxCUWidth;
+  m_frameHeightInCtus  = ( iPicHeight%m_uiMaxCUHeight ) ? iPicHeight/m_uiMaxCUHeight + 1 : iPicHeight/m_uiMaxCUHeight;
+
+  m_numCtusInFrame     = m_frameWidthInCtus * m_frameHeightInCtus;
+  m_pictureCtuArray    = new TComDataCU*[m_numCtusInFrame];
+
+  if (m_uiNumAllocatedSlice>0)
+  {
+    for ( i=0; i<m_uiNumAllocatedSlice ; i++ )
+    {
+      delete m_apcTComSlice[i];
+    }
+    delete [] m_apcTComSlice;
+  }
+  m_apcTComSlice      = new TComSlice*[m_numCtusInFrame];
+  m_apcTComSlice[0]   = new TComSlice;
+  m_uiNumAllocatedSlice = 1;
+  for ( i=0; i<m_numCtusInFrame ; i++ )
+  {
+    m_pictureCtuArray[i] = new TComDataCU;
+    m_pictureCtuArray[i]->create( chromaFormatIDC, m_numPartitionsInCtu, m_uiMaxCUWidth, m_uiMaxCUHeight, false, m_uiMaxCUWidth >> m_uhTotalDepth
+#if ADAPTIVE_QP_SELECTION
+      , true
+#endif
+      );
+  }
+
+  m_ctuTsToRsAddrMap = new UInt[m_numCtusInFrame+1];
+  m_puiTileIdxMap    = new UInt[m_numCtusInFrame];
+  m_ctuRsToTsAddrMap = new UInt[m_numCtusInFrame+1];
+
+  for( i=0; i<m_numCtusInFrame; i++ )
+  {
+    m_ctuTsToRsAddrMap[i] = i;
+    m_ctuRsToTsAddrMap[i] = i;
+  }
+
+  m_saoBlkParams = new SAOBlkParam[m_numCtusInFrame];
+}
+
+Void TComPicSym::destroy()
+{
+  if (m_uiNumAllocatedSlice>0)
+  {
+    for (Int i = 0; i<m_uiNumAllocatedSlice ; i++ )
+    {
+      delete m_apcTComSlice[i];
+    }
+    delete [] m_apcTComSlice;
+  }
+  m_apcTComSlice = NULL;
+
+  for (Int i = 0; i < m_numCtusInFrame; i++)
+  {
+    m_pictureCtuArray[i]->destroy();
+    delete m_pictureCtuArray[i];
+    m_pictureCtuArray[i] = NULL;
+  }
+  delete [] m_pictureCtuArray;
+  m_pictureCtuArray = NULL;
+
+  delete [] m_ctuTsToRsAddrMap;
+  m_ctuTsToRsAddrMap = NULL;
+
+  delete [] m_puiTileIdxMap;
+  m_puiTileIdxMap = NULL;
+
+  delete [] m_ctuRsToTsAddrMap;
+  m_ctuRsToTsAddrMap = NULL;
+
+  if(m_saoBlkParams)
+  {
+    delete[] m_saoBlkParams; m_saoBlkParams = NULL;
+  }
+}
+
+Void TComPicSym::allocateNewSlice()
+{
+  assert ((m_uiNumAllocatedSlice + 1) <= m_numCtusInFrame);
+  m_apcTComSlice[m_uiNumAllocatedSlice ++] = new TComSlice;
+  if (m_uiNumAllocatedSlice>=2)
+  {
+    m_apcTComSlice[m_uiNumAllocatedSlice-1]->copySliceInfo( m_apcTComSlice[m_uiNumAllocatedSlice-2] );
+    m_apcTComSlice[m_uiNumAllocatedSlice-1]->initSlice();
+  }
+}
+
+Void TComPicSym::clearSliceBuffer()
+{
+  UInt i;
+  for (i = 1; i < m_uiNumAllocatedSlice; i++)
+  {
+    delete m_apcTComSlice[i];
+  }
+  m_uiNumAllocatedSlice = 1;
+}
+
+Void TComPicSym::initCtuTsRsAddrMaps()
+{
+  //generate the Coding Order Map and Inverse Coding Order Map
+  for(Int ctuTsAddr=0, ctuRsAddr=0; ctuTsAddr<getNumberOfCtusInFrame(); ctuTsAddr++, ctuRsAddr = xCalculateNextCtuRSAddr(ctuRsAddr))
+  {
+    setCtuTsToRsAddrMap(ctuTsAddr, ctuRsAddr);
+    setCtuRsToTsAddrMap(ctuRsAddr, ctuTsAddr);
+  }
+  setCtuTsToRsAddrMap(getNumberOfCtusInFrame(), getNumberOfCtusInFrame());
+  setCtuRsToTsAddrMap(getNumberOfCtusInFrame(), getNumberOfCtusInFrame());
+}
+
+Void TComPicSym::initTiles(TComPPS *pps)
+{
+  //set NumColumnsMinus1 and NumRowsMinus1
+  setNumTileColumnsMinus1( pps->getNumTileColumnsMinus1() );
+  setNumTileRowsMinus1(    pps->getNumTileRowsMinus1()    );
+
+  const Int numCols = pps->getNumTileColumnsMinus1() + 1;
+  const Int numRows = pps->getNumTileRowsMinus1() + 1;
+  const Int numTiles = numRows * numCols;
+
+  // allocate memory for tile parameters
+  m_tileParameters.resize(numTiles);
+
+  if( pps->getTileUniformSpacingFlag() )
+  {
+    //set width and height for each (uniform) tile
+    for(Int row=0; row < numRows; row++)
+    {
+      for(Int col=0; col < numCols; col++)
+      {
+        const Int tileIdx = row * numCols + col;
+        m_tileParameters[tileIdx].setTileWidthInCtus(  (col+1)*getFrameWidthInCtus( )/numCols - (col*getFrameWidthInCtus( ))/numCols );
+        m_tileParameters[tileIdx].setTileHeightInCtus( (row+1)*getFrameHeightInCtus()/numRows - (row*getFrameHeightInCtus())/numRows );
+      }
+    }
+  }
+  else
+  {
+    //set the width for each tile
+    for(Int row=0; row < numRows; row++)
+    {
+      Int cumulativeTileWidth = 0;
+      for(Int col=0; col < getNumTileColumnsMinus1(); col++)
+      {
+        m_tileParameters[row * numCols + col].setTileWidthInCtus( pps->getTileColumnWidth(col) );
+        cumulativeTileWidth += pps->getTileColumnWidth(col);
+      }
+      m_tileParameters[row * numCols + getNumTileColumnsMinus1()].setTileWidthInCtus( getFrameWidthInCtus()-cumulativeTileWidth );
+    }
+
+    //set the height for each tile
+    for(Int col=0; col < numCols; col++)
+    {
+      Int cumulativeTileHeight = 0;
+      for(Int row=0; row < getNumTileRowsMinus1(); row++)
+      {
+        m_tileParameters[row * numCols + col].setTileHeightInCtus( pps->getTileRowHeight(row) );
+        cumulativeTileHeight += pps->getTileRowHeight(row);
+      }
+      m_tileParameters[getNumTileRowsMinus1() * numCols + col].setTileHeightInCtus( getFrameHeightInCtus()-cumulativeTileHeight );
+    }
+  }
+
+#if TILE_SIZE_CHECK
+  Int minWidth  = 1;
+  Int minHeight = 1;
+  const Int profileIdc = pps->getSPS()->getPTL()->getGeneralPTL()->getProfileIdc();
+  if (  profileIdc == Profile::MAIN || profileIdc == Profile::MAIN10) //TODO: add more profiles to the tile-size check...
+  {
+    if (pps->getTilesEnabledFlag())
+    {
+      minHeight = 64  / g_uiMaxCUHeight;
+      minWidth  = 256 / g_uiMaxCUWidth;
+    }
+  }
+  for(Int row=0; row < numRows; row++)
+  {
+    for(Int col=0; col < numCols; col++)
+    {
+      const Int tileIdx = row * numCols + col;
+      assert (m_tileParameters[tileIdx].getTileWidthInCtus() >= minWidth);
+      assert (m_tileParameters[tileIdx].getTileHeightInCtus() >= minHeight);
+    }
+  }
+#endif
+
+  //initialize each tile of the current picture
+  for( Int row=0; row < numRows; row++ )
+  {
+    for( Int col=0; col < numCols; col++ )
+    {
+      const Int tileIdx = row * numCols + col;
+
+      //initialize the RightEdgePosInCU for each tile
+      Int rightEdgePosInCTU = 0;
+      for( Int i=0; i <= col; i++ )
+      {
+        rightEdgePosInCTU += m_tileParameters[row * numCols + i].getTileWidthInCtus();
+      }
+      m_tileParameters[tileIdx].setRightEdgePosInCtus(rightEdgePosInCTU-1);
+
+      //initialize the BottomEdgePosInCU for each tile
+      Int bottomEdgePosInCTU = 0;
+      for( Int i=0; i <= row; i++ )
+      {
+        bottomEdgePosInCTU += m_tileParameters[i * numCols + col].getTileHeightInCtus();
+      }
+      m_tileParameters[tileIdx].setBottomEdgePosInCtus(bottomEdgePosInCTU-1);
+
+      //initialize the FirstCUAddr for each tile
+      m_tileParameters[tileIdx].setFirstCtuRsAddr( (m_tileParameters[tileIdx].getBottomEdgePosInCtus() - m_tileParameters[tileIdx].getTileHeightInCtus() + 1) * getFrameWidthInCtus() +
+                                                    m_tileParameters[tileIdx].getRightEdgePosInCtus()  - m_tileParameters[tileIdx].getTileWidthInCtus()  + 1);
+    }
+  }
+
+  Int  columnIdx = 0;
+  Int  rowIdx = 0;
+
+  //initialize the TileIdxMap
+  for( Int i=0; i<m_numCtusInFrame; i++)
+  {
+    for( Int col=0; col < numCols; col++)
+    {
+      if(i % getFrameWidthInCtus() <= m_tileParameters[col].getRightEdgePosInCtus())
+      {
+        columnIdx = col;
+        break;
+      }
+    }
+    for(Int row=0; row < numRows; row++)
+    {
+      if(i / getFrameWidthInCtus() <= m_tileParameters[row*numCols].getBottomEdgePosInCtus())
+      {
+        rowIdx = row;
+        break;
+      }
+    }
+    m_puiTileIdxMap[i] = rowIdx * numCols + columnIdx;
+  }
+}
+UInt TComPicSym::xCalculateNextCtuRSAddr( UInt currCtuRsAddr )
+{
+  UInt  nextCtuRsAddr;
+
+  //get the tile index for the current CTU
+  const UInt uiTileIdx = getTileIdxMap(currCtuRsAddr);
+
+  //get the raster scan address for the next CTU
+  if( currCtuRsAddr % m_frameWidthInCtus == getTComTile(uiTileIdx)->getRightEdgePosInCtus() && currCtuRsAddr / m_frameWidthInCtus == getTComTile(uiTileIdx)->getBottomEdgePosInCtus() )
+  //the current CTU is the last CTU of the tile
+  {
+    if(uiTileIdx+1 == getNumTiles())
+    {
+      nextCtuRsAddr = m_numCtusInFrame;
+    }
+    else
+    {
+      nextCtuRsAddr = getTComTile(uiTileIdx+1)->getFirstCtuRsAddr();
+    }
+  }
+  else //the current CTU is not the last CTU of the tile
+  {
+    if( currCtuRsAddr % m_frameWidthInCtus == getTComTile(uiTileIdx)->getRightEdgePosInCtus() )  //the current CTU is on the rightmost edge of the tile
+    {
+      nextCtuRsAddr = currCtuRsAddr + m_frameWidthInCtus - getTComTile(uiTileIdx)->getTileWidthInCtus() + 1;
+    }
+    else
+    {
+      nextCtuRsAddr = currCtuRsAddr + 1;
+    }
+  }
+
+  return nextCtuRsAddr;
+}
+
+Void TComPicSym::deriveLoopFilterBoundaryAvailibility(Int ctuRsAddr,
+                                                      Bool& isLeftAvail,
+                                                      Bool& isRightAvail,
+                                                      Bool& isAboveAvail,
+                                                      Bool& isBelowAvail,
+                                                      Bool& isAboveLeftAvail,
+                                                      Bool& isAboveRightAvail,
+                                                      Bool& isBelowLeftAvail,
+                                                      Bool& isBelowRightAvail
+                                                      )
+{
+
+  isLeftAvail      = (ctuRsAddr % m_frameWidthInCtus != 0);
+  isRightAvail     = (ctuRsAddr % m_frameWidthInCtus != m_frameWidthInCtus-1);
+  isAboveAvail     = (ctuRsAddr >= m_frameWidthInCtus );
+  isBelowAvail     = (ctuRsAddr <  m_numCtusInFrame - m_frameWidthInCtus);
+  isAboveLeftAvail = (isAboveAvail && isLeftAvail);
+  isAboveRightAvail= (isAboveAvail && isRightAvail);
+  isBelowLeftAvail = (isBelowAvail && isLeftAvail);
+  isBelowRightAvail= (isBelowAvail && isRightAvail);
+
+  Bool isLoopFiltAcrossTilePPS = getCtu(ctuRsAddr)->getSlice()->getPPS()->getLoopFilterAcrossTilesEnabledFlag();
+
+  {
+    TComDataCU* ctuCurr  = getCtu(ctuRsAddr);
+    TComDataCU* ctuLeft  = isLeftAvail ?getCtu(ctuRsAddr-1):NULL;
+    TComDataCU* ctuRight = isRightAvail?getCtu(ctuRsAddr+1):NULL;
+    TComDataCU* ctuAbove = isAboveAvail?getCtu(ctuRsAddr-m_frameWidthInCtus):NULL;
+    TComDataCU* ctuBelow = isBelowAvail?getCtu(ctuRsAddr+m_frameWidthInCtus):NULL;
+    TComDataCU* ctuAboveLeft  = isAboveLeftAvail ? getCtu(ctuRsAddr-m_frameWidthInCtus-1):NULL;
+    TComDataCU* ctuAboveRight = isAboveRightAvail? getCtu(ctuRsAddr-m_frameWidthInCtus+1):NULL;
+    TComDataCU* ctuBelowLeft  = isBelowLeftAvail ? getCtu(ctuRsAddr+m_frameWidthInCtus-1):NULL;
+    TComDataCU* ctuBelowRight = isBelowRightAvail? getCtu(ctuRsAddr+m_frameWidthInCtus+1):NULL;
+
+    {
+      //left
+      if(ctuLeft != NULL)
+      {
+        isLeftAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuLeft->getSlice()->getSliceCurStartCtuTsAddr())?ctuCurr->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+      //above
+      if(ctuAbove != NULL)
+      {
+        isAboveAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuAbove->getSlice()->getSliceCurStartCtuTsAddr())?ctuCurr->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+      //right
+      if(ctuRight != NULL)
+      {
+        isRightAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuRight->getSlice()->getSliceCurStartCtuTsAddr())?ctuRight->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+      //below
+      if(ctuBelow != NULL)
+      {
+        isBelowAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuBelow->getSlice()->getSliceCurStartCtuTsAddr())?ctuBelow->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+      //above-left
+      if(ctuAboveLeft != NULL)
+      {
+        isAboveLeftAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuAboveLeft->getSlice()->getSliceCurStartCtuTsAddr())?ctuCurr->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+      //below-right
+      if(ctuBelowRight != NULL)
+      {
+        isBelowRightAvail = (ctuCurr->getSlice()->getSliceCurStartCtuTsAddr() != ctuBelowRight->getSlice()->getSliceCurStartCtuTsAddr())?ctuBelowRight->getSlice()->getLFCrossSliceBoundaryFlag():true;
+      }
+
+      //above-right
+      if(ctuAboveRight != NULL)
+      {
+        Int curSliceStartTsAddr  = ctuCurr->getSlice()->getSliceCurStartCtuTsAddr();
+        Int aboveRightSliceStartTsAddr = ctuAboveRight->getSlice()->getSliceCurStartCtuTsAddr();
+
+        isAboveRightAvail = (curSliceStartTsAddr == aboveRightSliceStartTsAddr)?(true):
+          (
+          (curSliceStartTsAddr > aboveRightSliceStartTsAddr)?(ctuCurr->getSlice()->getLFCrossSliceBoundaryFlag())
+          :(ctuAboveRight->getSlice()->getLFCrossSliceBoundaryFlag())
+          );
+      }
+      //below-left
+      if(ctuBelowLeft != NULL)
+      {
+        Int curSliceStartTsAddr       = ctuCurr->getSlice()->getSliceCurStartCtuTsAddr();
+        Int belowLeftSliceStartTsAddr = ctuBelowLeft->getSlice()->getSliceCurStartCtuTsAddr();
+
+        isBelowLeftAvail = (curSliceStartTsAddr == belowLeftSliceStartTsAddr)?(true):
+          (
+          (curSliceStartTsAddr > belowLeftSliceStartTsAddr)?(ctuCurr->getSlice()->getLFCrossSliceBoundaryFlag())
+          :(ctuBelowLeft->getSlice()->getLFCrossSliceBoundaryFlag())
+          );
+      }
+    }
+
+    if(!isLoopFiltAcrossTilePPS)
+    {
+      isLeftAvail      = (!isLeftAvail      ) ?false:(getTileIdxMap( ctuLeft->getCtuRsAddr()         ) == getTileIdxMap( ctuRsAddr ));
+      isAboveAvail     = (!isAboveAvail     ) ?false:(getTileIdxMap( ctuAbove->getCtuRsAddr()        ) == getTileIdxMap( ctuRsAddr ));
+      isRightAvail     = (!isRightAvail     ) ?false:(getTileIdxMap( ctuRight->getCtuRsAddr()        ) == getTileIdxMap( ctuRsAddr ));
+      isBelowAvail     = (!isBelowAvail     ) ?false:(getTileIdxMap( ctuBelow->getCtuRsAddr()        ) == getTileIdxMap( ctuRsAddr ));
+      isAboveLeftAvail = (!isAboveLeftAvail ) ?false:(getTileIdxMap( ctuAboveLeft->getCtuRsAddr()    ) == getTileIdxMap( ctuRsAddr ));
+      isAboveRightAvail= (!isAboveRightAvail) ?false:(getTileIdxMap( ctuAboveRight->getCtuRsAddr()   ) == getTileIdxMap( ctuRsAddr ));
+      isBelowLeftAvail = (!isBelowLeftAvail ) ?false:(getTileIdxMap( ctuBelowLeft->getCtuRsAddr()    ) == getTileIdxMap( ctuRsAddr ));
+      isBelowRightAvail= (!isBelowRightAvail) ?false:(getTileIdxMap( ctuBelowRight->getCtuRsAddr()   ) == getTileIdxMap( ctuRsAddr ));
+    }
+  }
+
+}
+
+
+TComTile::TComTile()
+: m_tileWidthInCtus     (0)
+, m_tileHeightInCtus    (0)
+, m_rightEdgePosInCtus  (0)
+, m_bottomEdgePosInCtus (0)
+, m_firstCtuRsAddr      (0)
+{
+}
+
+TComTile::~TComTile()
+{
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPicSym.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,162 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPicSym.h
+    \brief    picture symbol class (header)
+*/
+
+#ifndef __TCOMPICSYM__
+#define __TCOMPICSYM__
+
+
+// Include files
+#include "CommonDef.h"
+#include "TComSlice.h"
+#include "TComDataCU.h"
+class TComSampleAdaptiveOffset;
+class TComPPS;
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+class TComTile
+{
+private:
+  UInt      m_tileWidthInCtus;
+  UInt      m_tileHeightInCtus;
+  UInt      m_rightEdgePosInCtus;
+  UInt      m_bottomEdgePosInCtus;
+  UInt      m_firstCtuRsAddr;
+
+public:
+  TComTile();
+  virtual ~TComTile();
+
+  Void      setTileWidthInCtus     ( UInt i )            { m_tileWidthInCtus = i; }
+  UInt      getTileWidthInCtus     () const              { return m_tileWidthInCtus; }
+  Void      setTileHeightInCtus    ( UInt i )            { m_tileHeightInCtus = i; }
+  UInt      getTileHeightInCtus    () const              { return m_tileHeightInCtus; }
+  Void      setRightEdgePosInCtus  ( UInt i )            { m_rightEdgePosInCtus = i; }
+  UInt      getRightEdgePosInCtus  () const              { return m_rightEdgePosInCtus; }
+  Void      setBottomEdgePosInCtus ( UInt i )            { m_bottomEdgePosInCtus = i; }
+  UInt      getBottomEdgePosInCtus () const              { return m_bottomEdgePosInCtus; }
+  Void      setFirstCtuRsAddr      ( UInt i )            { m_firstCtuRsAddr = i; }
+  UInt      getFirstCtuRsAddr      () const              { return m_firstCtuRsAddr; }
+};
+
+/// picture symbol class
+class TComPicSym
+{
+private:
+  UInt          m_frameWidthInCtus;
+  UInt          m_frameHeightInCtus;
+
+  UInt          m_uiMaxCUWidth;
+  UInt          m_uiMaxCUHeight;
+  UInt          m_uiMinCUWidth;
+  UInt          m_uiMinCUHeight;
+
+  UChar         m_uhTotalDepth;       ///< max. depth
+  UInt          m_numPartitionsInCtu;
+  UInt          m_numPartInCtuWidth;
+  UInt          m_numPartInCtuHeight;
+  UInt          m_numCtusInFrame;
+
+  TComSlice**   m_apcTComSlice;
+  UInt          m_uiNumAllocatedSlice;
+  TComDataCU**  m_pictureCtuArray;        ///< array of CU data.
+
+  Int           m_numTileColumnsMinus1;
+  Int           m_numTileRowsMinus1;
+  std::vector<TComTile> m_tileParameters;
+  UInt*         m_ctuTsToRsAddrMap;    ///< for a given TS (Tile-Scan; coding order) address, returns the RS (Raster-Scan) address. cf CtbAddrTsToRs in specification.
+  UInt*         m_puiTileIdxMap;       ///< the map of the tile index relative to CTU raster scan address
+  UInt*         m_ctuRsToTsAddrMap;    ///< for a given RS (Raster-Scan) address, returns the TS (Tile-Scan; coding order) address. cf CtbAddrRsToTs in specification.
+
+  SAOBlkParam *m_saoBlkParams;
+
+public:
+  Void               create  ( ChromaFormat chromaFormatIDC, Int iPicWidth, Int iPicHeight, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth );
+  Void               destroy ();
+
+  TComPicSym  ();
+  TComSlice*         getSlice(UInt i)                                      { return m_apcTComSlice[i];             }
+  const TComSlice*   getSlice(UInt i) const                                { return m_apcTComSlice[i];             }
+  UInt               getFrameWidthInCtus() const                           { return m_frameWidthInCtus;            }
+  UInt               getFrameHeightInCtus() const                          { return m_frameHeightInCtus;           }
+  UInt               getMinCUWidth() const                                 { return m_uiMinCUWidth;                }
+  UInt               getMinCUHeight() const                                { return m_uiMinCUHeight;               }
+  UInt               getNumberOfCtusInFrame() const                        { return m_numCtusInFrame;              }
+  TComDataCU*        getCtu( UInt ctuRsAddr )                              { return m_pictureCtuArray[ctuRsAddr];  }
+  const TComDataCU*  getCtu( UInt ctuRsAddr ) const                        { return m_pictureCtuArray[ctuRsAddr];  }
+
+  Void               setSlice(TComSlice* p, UInt i)                        { m_apcTComSlice[i] = p;           }
+  UInt               getNumAllocatedSlice() const                          { return m_uiNumAllocatedSlice;         }
+  Void               allocateNewSlice();
+  Void               clearSliceBuffer();
+  UInt               getNumPartitionsInCtu() const                         { return m_numPartitionsInCtu;   }
+  UInt               getNumPartInCtuWidth() const                          { return m_numPartInCtuWidth;    }
+  UInt               getNumPartInCtuHeight() const                         { return m_numPartInCtuHeight;   }
+  Void               setNumTileColumnsMinus1( Int i )                      { m_numTileColumnsMinus1 = i;    }
+  Int                getNumTileColumnsMinus1() const                       { return m_numTileColumnsMinus1; }
+  Void               setNumTileRowsMinus1( Int i )                         { m_numTileRowsMinus1 = i;       }
+  Int                getNumTileRowsMinus1() const                          { return m_numTileRowsMinus1;    }
+  Int                getNumTiles() const                                   { return (m_numTileRowsMinus1+1)*(m_numTileColumnsMinus1+1); }
+  TComTile*          getTComTile  ( UInt tileIdx )                         { return &(m_tileParameters[tileIdx]); }
+  const TComTile*    getTComTile  ( UInt tileIdx ) const                   { return &(m_tileParameters[tileIdx]); }
+  Void               setCtuTsToRsAddrMap( Int ctuTsAddr, Int ctuRsAddr )   { *(m_ctuTsToRsAddrMap + ctuTsAddr) = ctuRsAddr; }
+  UInt               getCtuTsToRsAddrMap( Int ctuTsAddr ) const            { return *(m_ctuTsToRsAddrMap + (ctuTsAddr>=m_numCtusInFrame ? m_numCtusInFrame : ctuTsAddr)); }
+  UInt               getTileIdxMap( Int ctuRsAddr ) const                  { return *(m_puiTileIdxMap + ctuRsAddr); }
+  Void               setCtuRsToTsAddrMap( Int ctuRsAddr, Int ctuTsOrder )  { *(m_ctuRsToTsAddrMap + ctuRsAddr) = ctuTsOrder; }
+  UInt               getCtuRsToTsAddrMap( Int ctuRsAddr ) const            { return *(m_ctuRsToTsAddrMap + (ctuRsAddr>=m_numCtusInFrame ? m_numCtusInFrame : ctuRsAddr)); }
+  Void               initTiles(TComPPS *pps);
+
+  Void               initCtuTsRsAddrMaps();
+  SAOBlkParam*       getSAOBlkParam()                                      { return m_saoBlkParams;}
+  const SAOBlkParam* getSAOBlkParam() const                                { return m_saoBlkParams;}
+  Void               deriveLoopFilterBoundaryAvailibility(Int ctuRsAddr,
+                                                          Bool& isLeftAvail, Bool& isRightAvail, Bool& isAboveAvail, Bool& isBelowAvail,
+                                                          Bool& isAboveLeftAvail, Bool& isAboveRightAvail, Bool& isBelowLeftAvail, Bool& isBelowRightAvail);
+protected:
+  UInt               xCalculateNextCtuRSAddr( UInt uiCurrCtuRSAddr );
+
+};// END CLASS DEFINITION TComPicSym
+
+//! \}
+
+#endif // __TCOMPICSYM__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPicYuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,257 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPicYuv.cpp
+    \brief    picture YUV buffer class
+*/
+
+#include <cstdlib>
+#include <assert.h>
+#include <memory.h>
+
+#ifdef __APPLE__
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+
+#include "TComPicYuv.h"
+#include "TLibVideoIO/TVideoIOYuv.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+TComPicYuv::TComPicYuv()
+{
+  for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    m_apiPicBuf[i]    = NULL;   // Buffer (including margin)
+    m_piPicOrg[i]     = NULL;    // m_apiPicBufY + m_iMarginLuma*getStride() + m_iMarginLuma
+  }
+
+  for(UInt i=0; i<MAX_NUM_CHANNEL_TYPE; i++)
+  {
+    m_ctuOffsetInBuffer[i]=0;
+    m_subCuOffsetInBuffer[i]=0;
+  }
+
+  m_bIsBorderExtended = false;
+}
+
+
+
+
+TComPicYuv::~TComPicYuv()
+{
+}
+
+
+
+
+Void TComPicYuv::create( const Int  iPicWidth,    const  Int iPicHeight,    const ChromaFormat chromaFormatIDC,
+                         const UInt uiMaxCUWidth, const UInt uiMaxCUHeight, const UInt uiMaxCUDepth )
+{
+  m_iPicWidth         = iPicWidth;
+  m_iPicHeight        = iPicHeight;
+  m_chromaFormatIDC   = chromaFormatIDC;
+  m_iMarginX          = g_uiMaxCUWidth  + 16; // for 16-byte alignment
+  m_iMarginY          = g_uiMaxCUHeight + 16;  // margin for 8-tap filter and infinite padding
+  m_bIsBorderExtended = false;
+
+  // assign the picture arrays and set up the ptr to the top left of the original picture
+  {
+    Int chan=0;
+    for(; chan<getNumberValidComponents(); chan++)
+    {
+      const ComponentID ch=ComponentID(chan);
+      m_apiPicBuf[chan] = (Pel*)xMalloc( Pel, getStride(ch)       * getTotalHeight(ch));
+      m_piPicOrg[chan]  = m_apiPicBuf[chan] + (m_iMarginY >> getComponentScaleY(ch))   * getStride(ch)       + (m_iMarginX >> getComponentScaleX(ch));
+    }
+    for(;chan<MAX_NUM_COMPONENT; chan++)
+    {
+      m_apiPicBuf[chan] = NULL;
+      m_piPicOrg[chan]  = NULL;
+    }
+  }
+
+
+  const Int numCuInWidth  = m_iPicWidth  / uiMaxCUWidth  + (m_iPicWidth  % uiMaxCUWidth  != 0);
+  const Int numCuInHeight = m_iPicHeight / uiMaxCUHeight + (m_iPicHeight % uiMaxCUHeight != 0);
+  for(Int chan=0; chan<2; chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    const Int ctuHeight=uiMaxCUHeight>>getComponentScaleY(ch);
+    const Int ctuWidth=uiMaxCUWidth>>getComponentScaleX(ch);
+    const Int stride = getStride(ch);
+
+    m_ctuOffsetInBuffer[chan] = new Int[numCuInWidth * numCuInHeight];
+
+    for (Int cuRow = 0; cuRow < numCuInHeight; cuRow++)
+      for (Int cuCol = 0; cuCol < numCuInWidth; cuCol++)
+        m_ctuOffsetInBuffer[chan][cuRow * numCuInWidth + cuCol] = stride * cuRow * ctuHeight + cuCol * ctuWidth;
+
+    m_subCuOffsetInBuffer[chan] = new Int[(size_t)1 << (2 * uiMaxCUDepth)];
+
+    const Int numSubBlockPartitions=(1<<uiMaxCUDepth);
+    const Int minSubBlockHeight    =(ctuHeight >> uiMaxCUDepth);
+    const Int minSubBlockWidth     =(ctuWidth  >> uiMaxCUDepth);
+
+    for (Int buRow = 0; buRow < numSubBlockPartitions; buRow++)
+      for (Int buCol = 0; buCol < numSubBlockPartitions; buCol++)
+        m_subCuOffsetInBuffer[chan][(buRow << uiMaxCUDepth) + buCol] = stride  * buRow * minSubBlockHeight + buCol * minSubBlockWidth;
+  }
+  return;
+}
+
+
+
+Void TComPicYuv::destroy()
+{
+  for(Int chan=0; chan<MAX_NUM_COMPONENT; chan++)
+  {
+    m_piPicOrg[chan] = NULL;
+
+    if( m_apiPicBuf[chan] ){ xFree( m_apiPicBuf[chan] );    m_apiPicBuf[chan] = NULL; }
+  }
+
+  for(UInt chan=0; chan<MAX_NUM_CHANNEL_TYPE; chan++)
+  {
+    if (m_ctuOffsetInBuffer[chan]) delete[] m_ctuOffsetInBuffer[chan]; m_ctuOffsetInBuffer[chan] = NULL;
+    if (m_subCuOffsetInBuffer[chan]) delete[] m_subCuOffsetInBuffer[chan]; m_subCuOffsetInBuffer[chan] = NULL;
+  }
+}
+
+
+
+Void  TComPicYuv::copyToPic (TComPicYuv*  pcPicYuvDst) const
+{
+  assert( m_iPicWidth  == pcPicYuvDst->getWidth(COMPONENT_Y)  );
+  assert( m_iPicHeight == pcPicYuvDst->getHeight(COMPONENT_Y) );
+  assert( m_chromaFormatIDC == pcPicYuvDst->getChromaFormat() );
+
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    ::memcpy ( pcPicYuvDst->getBuf(ch), m_apiPicBuf[ch], sizeof (Pel) * getStride(ch) * getTotalHeight(ch));
+  }
+  return;
+}
+
+
+Void TComPicYuv::extendPicBorder ()
+{
+  if ( m_bIsBorderExtended ) return;
+
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    Pel *piTxt=getAddr(ch); // piTxt = point to (0,0) of image within bigger picture.
+    const Int iStride=getStride(ch);
+    const Int iWidth=getWidth(ch);
+    const Int iHeight=getHeight(ch);
+    const Int iMarginX=getMarginX(ch);
+    const Int iMarginY=getMarginY(ch);
+
+    Pel*  pi = piTxt;
+    // do left and right margins
+    for (Int y = 0; y < iHeight; y++)
+    {
+      for (Int x = 0; x < iMarginX; x++ )
+      {
+        pi[ -iMarginX + x ] = pi[0];
+        pi[    iWidth + x ] = pi[iWidth-1];
+      }
+      pi += iStride;
+    }
+
+    // pi is now the (0,height) (bottom left of image within bigger picture
+    pi -= (iStride + iMarginX);
+    // pi is now the (-marginX, height-1)
+    for (Int y = 0; y < iMarginY; y++ )
+    {
+      ::memcpy( pi + (y+1)*iStride, pi, sizeof(Pel)*(iWidth + (iMarginX<<1)) );
+    }
+
+    // pi is still (-marginX, height-1)
+    pi -= ((iHeight-1) * iStride);
+    // pi is now (-marginX, 0)
+    for (Int y = 0; y < iMarginY; y++ )
+    {
+      ::memcpy( pi - (y+1)*iStride, pi, sizeof(Pel)*(iWidth + (iMarginX<<1)) );
+    }
+  }
+
+  m_bIsBorderExtended = true;
+}
+
+
+
+// NOTE: This function is never called, but may be useful for developers.
+Void TComPicYuv::dump (const Char* pFileName, Bool bAdd) const
+{
+  FILE* pFile;
+  if (!bAdd)
+  {
+    pFile = fopen (pFileName, "wb");
+  }
+  else
+  {
+    pFile = fopen (pFileName, "ab");
+  }
+
+
+  for(Int chan = 0; chan < getNumberValidComponents(); chan++)
+  {
+    const ComponentID  ch     = ComponentID(chan);
+    const Int          shift  = g_bitDepth[toChannelType(ch)] - 8;
+    const Int          offset = (shift>0)?(1<<(shift-1)):0;
+    const Pel         *pi     = getAddr(ch);
+    const Int          stride = getStride(ch);
+    const Int          height = getHeight(ch);
+    const Int          width  = getWidth(ch);
+
+    for (Int y = 0; y < height; y++ )
+    {
+      for (Int x = 0; x < width; x++ )
+      {
+        UChar uc = (UChar)Clip3<Pel>(0, 255, (pi[x]+offset)>>shift);
+        fwrite( &uc, sizeof(UChar), 1, pFile );
+      }
+      pi += stride;
+    }
+  }
+
+  fclose(pFile);
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPicYuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,166 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPicYuv.h
+    \brief    picture YUV buffer class (header)
+*/
+
+#ifndef __TCOMPICYUV__
+#define __TCOMPICYUV__
+
+#include <stdio.h>
+#include "CommonDef.h"
+#include "TComRom.h"
+#include "TComChromaFormat.h"
+#include "SEI.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// picture YUV buffer class
+class TComPicYuv
+{
+private:
+
+  // ------------------------------------------------------------------------------------------------
+  //  YUV buffer
+  // ------------------------------------------------------------------------------------------------
+
+  Pel*  m_apiPicBuf[MAX_NUM_COMPONENT];             ///< Buffer (including margin)
+
+  Pel*  m_piPicOrg[MAX_NUM_COMPONENT];              ///< m_apiPicBufY + m_iMarginLuma*getStride() + m_iMarginLuma
+
+  // ------------------------------------------------------------------------------------------------
+  //  Parameter for general YUV buffer usage
+  // ------------------------------------------------------------------------------------------------
+
+  Int   m_iPicWidth;                                ///< Width of picture in pixels
+  Int   m_iPicHeight;                               ///< Height of picture in pixels
+  ChromaFormat m_chromaFormatIDC;                   ///< Chroma Format
+
+  Int*  m_ctuOffsetInBuffer[MAX_NUM_CHANNEL_TYPE];  ///< Gives an offset in the buffer for a given CTU (and channel)
+  Int*  m_subCuOffsetInBuffer[MAX_NUM_CHANNEL_TYPE];///< Gives an offset in the buffer for a given sub-CU (and channel), relative to start of CTU
+
+  Int   m_iMarginX;                                 ///< margin of Luma channel (chroma's may be smaller, depending on ratio)
+  Int   m_iMarginY;                                 ///< margin of Luma channel (chroma's may be smaller, depending on ratio)
+
+  Bool  m_bIsBorderExtended;
+
+public:
+               TComPicYuv         ();
+  virtual     ~TComPicYuv         ();
+
+  // ------------------------------------------------------------------------------------------------
+  //  Memory management
+  // ------------------------------------------------------------------------------------------------
+
+  Void          create            (const Int iPicWidth,
+                                   const Int iPicHeight,
+                                   const ChromaFormat chromaFormatIDC,
+                                   const UInt uiMaxCUWidth,
+                                   const UInt uiMaxCUHeight,
+                                   const UInt uiMaxCUDepth );
+
+  Void          destroy           ();
+
+  // The following have been removed - Use CHROMA_400 in the above function call.
+  //Void  createLuma  ( Int iPicWidth, Int iPicHeight, UInt uiMaxCUWidth, UInt uiMaxCUHeight, UInt uhMaxCUDepth );
+  //Void  destroyLuma ();
+
+  // ------------------------------------------------------------------------------------------------
+  //  Get information of picture
+  // ------------------------------------------------------------------------------------------------
+
+  Int           getWidth          (const ComponentID id) const { return  m_iPicWidth >> getComponentScaleX(id);   }
+  Int           getHeight         (const ComponentID id) const { return  m_iPicHeight >> getComponentScaleY(id);  }
+  ChromaFormat  getChromaFormat   ()                     const { return m_chromaFormatIDC; }
+  UInt          getNumberValidComponents() const { return ::getNumberValidComponents(m_chromaFormatIDC); }
+
+  Int           getStride         (const ComponentID id) const { return ((m_iPicWidth     ) + (m_iMarginX  <<1)) >> getComponentScaleX(id); }
+  Int           getTotalHeight    (const ComponentID id) const { return ((m_iPicHeight    ) + (m_iMarginY  <<1)) >> getComponentScaleY(id); }
+
+  Int           getMarginX        (const ComponentID id) const { return m_iMarginX >> getComponentScaleX(id);  }
+  Int           getMarginY        (const ComponentID id) const { return m_iMarginY >> getComponentScaleY(id);  }
+
+  // ------------------------------------------------------------------------------------------------
+  //  Access function for picture buffer
+  // ------------------------------------------------------------------------------------------------
+
+  //  Access starting position of picture buffer with margin
+  Pel*          getBuf            (const ComponentID ch)       { return  m_apiPicBuf[ch];   }
+
+  //  Access starting position of original picture
+  Pel*          getAddr           (const ComponentID ch)       { return  m_piPicOrg[ch];   }
+  const Pel*    getAddr           (const ComponentID ch) const { return  m_piPicOrg[ch];   }
+
+  //  Access starting position of original picture for specific coding unit (CU) or partition unit (PU)
+  Pel*          getAddr           (const ComponentID ch, const Int ctuRSAddr )       { return m_piPicOrg[ch] + m_ctuOffsetInBuffer[ch==0?0:1][ ctuRSAddr ]; }
+  const Pel*    getAddr           (const ComponentID ch, const Int ctuRSAddr ) const { return m_piPicOrg[ch] + m_ctuOffsetInBuffer[ch==0?0:1][ ctuRSAddr ]; }
+  Pel*          getAddr           (const ComponentID ch, const Int ctuRSAddr, const Int uiAbsZorderIdx )
+                                     { return m_piPicOrg[ch] + m_ctuOffsetInBuffer[ch==0?0:1][ctuRSAddr] + m_subCuOffsetInBuffer[ch==0?0:1][g_auiZscanToRaster[uiAbsZorderIdx]]; }
+  const Pel*    getAddr           (const ComponentID ch, const Int ctuRSAddr, const Int uiAbsZorderIdx ) const
+                                     { return m_piPicOrg[ch] + m_ctuOffsetInBuffer[ch==0?0:1][ctuRSAddr] + m_subCuOffsetInBuffer[ch==0?0:1][g_auiZscanToRaster[uiAbsZorderIdx]]; }
+
+  UInt          getComponentScaleX(const ComponentID id) const { return ::getComponentScaleX(id, m_chromaFormatIDC); }
+  UInt          getComponentScaleY(const ComponentID id) const { return ::getComponentScaleY(id, m_chromaFormatIDC); }
+
+  // ------------------------------------------------------------------------------------------------
+  //  Miscellaneous
+  // ------------------------------------------------------------------------------------------------
+
+  //  Copy function to picture
+  Void          copyToPic         ( TComPicYuv*  pcPicYuvDst ) const ;
+
+  //  Extend function of picture buffer
+  Void          extendPicBorder   ();
+
+  //  Dump picture
+  Void          dump              (const Char* pFileName, Bool bAdd = false) const ;
+
+  // Set border extension flag
+  Void          setBorderExtension(Bool b) { m_bIsBorderExtended = b; }
+};// END CLASS DEFINITION TComPicYuv
+
+
+// These functions now return the length of the digest strings.
+UInt calcChecksum(const TComPicYuv& pic, TComDigest &digest);
+UInt calcCRC     (const TComPicYuv& pic, TComDigest &digest);
+UInt calcMD5     (const TComPicYuv& pic, TComDigest &digest);
+std::string digestToString(const TComDigest &digest, Int numChar);
+//! \}
+
+#endif // __TCOMPICYUV__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPicYuvMD5.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,222 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "TComPicYuv.h"
+#include "libmd5/MD5.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+/**
+ * Update md5 using n samples from plane, each sample is adjusted to
+ * OUTBIT_BITDEPTH_DIV8.
+ */
+template<UInt OUTPUT_BITDEPTH_DIV8>
+static Void md5_block(MD5& md5, const Pel* plane, UInt n)
+{
+  /* create a 64 byte buffer for packing Pel's into */
+  UChar buf[64/OUTPUT_BITDEPTH_DIV8][OUTPUT_BITDEPTH_DIV8];
+  for (UInt i = 0; i < n; i++)
+  {
+    Pel pel = plane[i];
+    /* perform bitdepth and endian conversion */
+    for (UInt d = 0; d < OUTPUT_BITDEPTH_DIV8; d++)
+    {
+      buf[i][d] = pel >> (d*8);
+    }
+  }
+  md5.update((UChar*)buf, n * OUTPUT_BITDEPTH_DIV8);
+}
+
+/**
+ * Update md5 with all samples in plane in raster order, each sample
+ * is adjusted to OUTBIT_BITDEPTH_DIV8.
+ */
+template<UInt OUTPUT_BITDEPTH_DIV8>
+static Void md5_plane(MD5& md5, const Pel* plane, UInt width, UInt height, UInt stride)
+{
+  /* N is the number of samples to process per md5 update.
+   * All N samples must fit in buf */
+  UInt N = 32;
+  UInt width_modN = width % N;
+  UInt width_less_modN = width - width_modN;
+
+  for (UInt y = 0; y < height; y++)
+  {
+    /* convert pels into unsigned chars in little endian byte order.
+     * NB, for 8bit data, data is truncated to 8bits. */
+    for (UInt x = 0; x < width_less_modN; x += N)
+      md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y*stride + x], N);
+
+    /* mop up any of the remaining line */
+    md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y*stride + width_less_modN], width_modN);
+  }
+}
+
+
+UInt compCRC(Int bitdepth, const Pel* plane, UInt width, UInt height, UInt stride, TComDigest &digest)
+{
+  UInt crcMsb;
+  UInt bitVal;
+  UInt crcVal = 0xffff;
+  UInt bitIdx;
+  for (UInt y = 0; y < height; y++)
+  {
+    for (UInt x = 0; x < width; x++)
+    {
+      // take CRC of first pictureData byte
+      for(bitIdx=0; bitIdx<8; bitIdx++)
+      {
+        crcMsb = (crcVal >> 15) & 1;
+        bitVal = (plane[y*stride+x] >> (7 - bitIdx)) & 1;
+        crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021);
+      }
+      // take CRC of second pictureData byte if bit depth is greater than 8-bits
+      if(bitdepth > 8)
+      {
+        for(bitIdx=0; bitIdx<8; bitIdx++)
+        {
+          crcMsb = (crcVal >> 15) & 1;
+          bitVal = (plane[y*stride+x] >> (15 - bitIdx)) & 1;
+          crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021);
+        }
+      }
+    }
+  }
+  for(bitIdx=0; bitIdx<16; bitIdx++)
+  {
+    crcMsb = (crcVal >> 15) & 1;
+    crcVal = ((crcVal << 1) & 0xffff) ^ (crcMsb * 0x1021);
+  }
+
+  digest.hash.push_back((crcVal>>8)  & 0xff);
+  digest.hash.push_back( crcVal      & 0xff);
+  return 2;
+}
+
+UInt calcCRC(const TComPicYuv& pic, TComDigest &digest)
+{
+  UInt digestLen=0;
+  digest.hash.clear();
+  for(Int chan=0; chan<pic.getNumberValidComponents(); chan++)
+  {
+    const ComponentID compID=ComponentID(chan);
+    digestLen=compCRC(g_bitDepth[toChannelType(compID)], pic.getAddr(compID), pic.getWidth(compID), pic.getHeight(compID), pic.getStride(compID), digest);
+  }
+  return digestLen;
+}
+
+UInt compChecksum(Int bitdepth, const Pel* plane, UInt width, UInt height, UInt stride, TComDigest &digest)
+{
+  UInt checksum = 0;
+  UChar xor_mask;
+
+  for (UInt y = 0; y < height; y++)
+  {
+    for (UInt x = 0; x < width; x++)
+    {
+      xor_mask = (x & 0xff) ^ (y & 0xff) ^ (x >> 8) ^ (y >> 8);
+      checksum = (checksum + ((plane[y*stride+x] & 0xff) ^ xor_mask)) & 0xffffffff;
+
+      if(bitdepth > 8)
+      {
+        checksum = (checksum + ((plane[y*stride+x]>>8) ^ xor_mask)) & 0xffffffff;
+      }
+    }
+  }
+
+  digest.hash.push_back((checksum>>24) & 0xff);
+  digest.hash.push_back((checksum>>16) & 0xff);
+  digest.hash.push_back((checksum>>8)  & 0xff);
+  digest.hash.push_back( checksum      & 0xff);
+  return 4;
+}
+
+UInt calcChecksum(const TComPicYuv& pic, TComDigest &digest)
+{
+  UInt digestLen=0;
+  digest.hash.clear();
+  for(Int chan=0; chan<pic.getNumberValidComponents(); chan++)
+  {
+    const ComponentID compID=ComponentID(chan);
+    digestLen=compChecksum(g_bitDepth[toChannelType(compID)], pic.getAddr(compID), pic.getWidth(compID), pic.getHeight(compID), pic.getStride(compID), digest);
+  }
+  return digestLen;
+}
+/**
+ * Calculate the MD5sum of pic, storing the result in digest.
+ * MD5 calculation is performed on Y' then Cb, then Cr; each in raster order.
+ * Pel data is inserted into the MD5 function in little-endian byte order,
+ * using sufficient bytes to represent the picture bitdepth.  Eg, 10bit data
+ * uses little-endian two byte words; 8bit data uses single byte words.
+ */
+UInt calcMD5(const TComPicYuv& pic, TComDigest &digest)
+{
+  /* choose an md5_plane packing function based on the system bitdepth */
+  typedef Void (*MD5PlaneFunc)(MD5&, const Pel*, UInt, UInt, UInt);
+  MD5PlaneFunc md5_plane_func;
+
+  MD5 md5[MAX_NUM_COMPONENT];
+
+  digest.hash.clear();
+  for(Int chan=0; chan<pic.getNumberValidComponents(); chan++)
+  {
+    const ComponentID compID=ComponentID(chan);
+    md5_plane_func = g_bitDepth[toChannelType(compID)] <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>;
+    UChar tmp_digest[MD5_DIGEST_STRING_LENGTH];
+    md5_plane_func(md5[compID], pic.getAddr(compID), pic.getWidth(compID), pic.getHeight(compID), pic.getStride(compID));
+    md5[compID].finalize(tmp_digest);
+    for(UInt i=0; i<MD5_DIGEST_STRING_LENGTH; i++)
+    {
+      digest.hash.push_back(tmp_digest[i]);
+    }
+  }
+  return 16;
+}
+
+std::string digestToString(const TComDigest &digest, Int numChar)
+{
+  static const Char* hex = "0123456789abcdef";
+  std::string result;
+
+  for(Int pos=0; pos<Int(digest.hash.size()); pos++)
+  {
+    if ((pos % numChar) == 0 && pos!=0 ) result += ',';
+    result += hex[digest.hash[pos] >> 4];
+    result += hex[digest.hash[pos] & 0xf];
+  }
+
+  return result;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPrediction.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,841 @@
+/* The copyright in this software is beinOMg made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPrediction.cpp
+    \brief    prediction class
+*/
+
+#include <memory.h>
+#include "TComPrediction.h"
+#include "TComTU.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Tables
+// ====================================================================================================================
+
+const UChar TComPrediction::m_aucIntraFilter[MAX_NUM_CHANNEL_TYPE][MAX_INTRA_FILTER_DEPTHS] =
+{
+  { // Luma
+    10, //4x4
+    7, //8x8
+    1, //16x16
+    0, //32x32
+    10, //64x64
+  },
+  { // Chroma
+    10, //4xn
+    7, //8xn
+    1, //16xn
+    0, //32xn
+    10, //64xn
+  }
+
+};
+
+// ====================================================================================================================
+// Constructor / destructor / initialize
+// ====================================================================================================================
+
+TComPrediction::TComPrediction()
+: m_pLumaRecBuffer(0)
+, m_iLumaRecStride(0)
+{
+  for(UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    for(UInt buf=0; buf<2; buf++)
+    {
+      m_piYuvExt[ch][buf] = NULL;
+    }
+  }
+}
+
+TComPrediction::~TComPrediction()
+{
+  destroy();
+}
+
+Void TComPrediction::destroy()
+{
+  for(UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    for(UInt buf=0; buf<NUM_PRED_BUF; buf++)
+    {
+      delete [] m_piYuvExt[ch][buf];
+      m_piYuvExt[ch][buf] = NULL;
+    }
+  }
+
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_acYuvPred[i].destroy();
+  }
+
+  m_cYuvPredTemp.destroy();
+
+  if( m_pLumaRecBuffer )
+  {
+    delete [] m_pLumaRecBuffer;
+    m_pLumaRecBuffer = 0;
+  }
+  m_iLumaRecStride = 0;
+
+  for (UInt i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; i++)
+  {
+    for (UInt j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; j++)
+    {
+      m_filteredBlock[i][j].destroy();
+    }
+    m_filteredBlockTmp[i].destroy();
+  }
+}
+
+Void TComPrediction::initTempBuff(ChromaFormat chromaFormatIDC)
+{
+  // if it has been initialised before, but the chroma format has changed, release the memory and start again.
+  if( m_piYuvExt[COMPONENT_Y][PRED_BUF_UNFILTERED] != NULL && m_cYuvPredTemp.getChromaFormat()!=chromaFormatIDC)
+  {
+    destroy();
+  }
+
+  if( m_piYuvExt[COMPONENT_Y][PRED_BUF_UNFILTERED] == NULL ) // check if first is null (in which case, nothing initialised yet)
+  {
+    Int extWidth  = MAX_CU_SIZE + 16;
+    Int extHeight = MAX_CU_SIZE + 1;
+
+    for (UInt i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; i++)
+    {
+      m_filteredBlockTmp[i].create(extWidth, extHeight + 7, chromaFormatIDC);
+      for (UInt j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; j++)
+      {
+        m_filteredBlock[i][j].create(extWidth, extHeight, chromaFormatIDC);
+      }
+    }
+
+    m_iYuvExtSize = (MAX_CU_SIZE*2+1) * (MAX_CU_SIZE*2+1);
+    for(UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+    {
+      for(UInt buf=0; buf<NUM_PRED_BUF; buf++)
+      {
+        m_piYuvExt[ch][buf] = new Pel[ m_iYuvExtSize ];
+      }
+    }
+
+    // new structure
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      m_acYuvPred[i] .create( MAX_CU_SIZE, MAX_CU_SIZE, chromaFormatIDC );
+    }
+
+    m_cYuvPredTemp.create( MAX_CU_SIZE, MAX_CU_SIZE, chromaFormatIDC );
+  }
+
+
+  if (m_iLumaRecStride != (MAX_CU_SIZE>>1) + 1)
+  {
+    m_iLumaRecStride =  (MAX_CU_SIZE>>1) + 1;
+    if (!m_pLumaRecBuffer)
+    {
+      m_pLumaRecBuffer = new Pel[ m_iLumaRecStride * m_iLumaRecStride ];
+    }
+  }
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+// Function for calculating DC value of the reference samples used in Intra prediction
+//NOTE: Bit-Limit - 25-bit source
+Pel TComPrediction::predIntraGetPredValDC( const Pel* pSrc, Int iSrcStride, UInt iWidth, UInt iHeight, ChannelType channelType, ChromaFormat format, Bool bAbove, Bool bLeft )
+{
+  assert(iWidth > 0 && iHeight > 0);
+  Int iInd, iSum = 0;
+  Pel pDcVal;
+
+  if (bAbove)
+  {
+    for (iInd = 0;iInd < iWidth;iInd++)
+    {
+      iSum += pSrc[iInd-iSrcStride];
+    }
+  }
+  if (bLeft)
+  {
+    for (iInd = 0;iInd < iHeight;iInd++)
+    {
+      iSum += pSrc[iInd*iSrcStride-1];
+    }
+  }
+
+  if (bAbove && bLeft)
+  {
+    pDcVal = (iSum + iWidth) / (iWidth + iHeight);
+  }
+  else if (bAbove)
+  {
+    pDcVal = (iSum + iWidth/2) / iWidth;
+  }
+  else if (bLeft)
+  {
+    pDcVal = (iSum + iHeight/2) / iHeight;
+  }
+  else
+  {
+    pDcVal = pSrc[-1]; // Default DC value already calculated and placed in the prediction array if no neighbors are available
+  }
+
+  return pDcVal;
+}
+
+// Function for deriving the angular Intra predictions
+
+/** Function for deriving the simplified angular intra predictions.
+ * \param pSrc pointer to reconstructed sample array
+ * \param srcStride the stride of the reconstructed sample array
+ * \param rpDst reference to pointer for the prediction sample array
+ * \param dstStride the stride of the prediction sample array
+ * \param width the width of the block
+ * \param height the height of the block
+ * \param dirMode the intra prediction mode index
+ * \param blkAboveAvailable boolean indication if the block above is available
+ * \param blkLeftAvailable boolean indication if the block to the left is available
+ *
+ * This function derives the prediction samples for the angular mode based on the prediction direction indicated by
+ * the prediction mode index. The prediction direction is given by the displacement of the bottom row of the block and
+ * the reference row above the block in the case of vertical prediction or displacement of the rightmost column
+ * of the block and reference column left from the block in the case of the horizontal prediction. The displacement
+ * is signalled at 1/32 pixel accuracy. When projection of the predicted pixel falls inbetween reference samples,
+ * the predicted value for the pixel is linearly interpolated from the reference samples. All reference samples are taken
+ * from the extended main reference.
+ */
+//NOTE: Bit-Limit - 25-bit source
+Void TComPrediction::xPredIntraAng(       Int bitDepth,
+                                    const Pel* pSrc,     Int srcStride,
+                                          Pel* pTrueDst, Int dstStrideTrue,
+                                          UInt uiWidth, UInt uiHeight, ChannelType channelType, ChromaFormat format,
+                                          UInt dirMode, Bool blkAboveAvailable, Bool blkLeftAvailable
+                                  , const Bool bEnableEdgeFilters
+                                  )
+{
+  Int width=Int(uiWidth);
+  Int height=Int(uiHeight);
+
+  // Map the mode index to main prediction direction and angle
+  assert( dirMode != PLANAR_IDX ); //no planar
+  const Bool modeDC        = dirMode==DC_IDX;
+
+  // Do the DC prediction
+  if (modeDC)
+  {
+    const Pel dcval = predIntraGetPredValDC(pSrc, srcStride, width, height, channelType, format, blkAboveAvailable, blkLeftAvailable);
+
+    for (Int y=height;y>0;y--, pTrueDst+=dstStrideTrue)
+    {
+      for (Int x=0; x<width;) // width is always a multiple of 4.
+      {
+        pTrueDst[x++] = dcval;
+      }
+    }
+  }
+  else // Do angular predictions
+  {
+    const Bool       bIsModeVer         = (dirMode >= 18);
+    const Int        intraPredAngleMode = (bIsModeVer) ? (Int)dirMode - VER_IDX :  -((Int)dirMode - HOR_IDX);
+    const Int        absAngMode         = abs(intraPredAngleMode);
+    const Int        signAng            = intraPredAngleMode < 0 ? -1 : 1;
+    const Bool       edgeFilter         = bEnableEdgeFilters && isLuma(channelType) && (width <= MAXIMUM_INTRA_FILTERED_WIDTH) && (height <= MAXIMUM_INTRA_FILTERED_HEIGHT);
+
+    // Set bitshifts and scale the angle parameter to block size
+    static const Int angTable[9]    = {0,    2,    5,   9,  13,  17,  21,  26,  32};
+    static const Int invAngTable[9] = {0, 4096, 1638, 910, 630, 482, 390, 315, 256}; // (256 * 32) / Angle
+    Int invAngle                    = invAngTable[absAngMode];
+    Int absAng                      = angTable[absAngMode];
+    Int intraPredAngle              = signAng * absAng;
+
+    Pel* refMain;
+    Pel* refSide;
+
+    Pel  refAbove[2*MAX_CU_SIZE+1];
+    Pel  refLeft[2*MAX_CU_SIZE+1];
+
+    // Initialise the Main and Left reference array.
+    if (intraPredAngle < 0)
+    {
+      const Int refMainOffsetPreScale = (bIsModeVer ? height : width ) - 1;
+      const Int refMainOffset         = height - 1;
+      for (Int x=0;x<width+1;x++)
+      {
+        refAbove[x+refMainOffset] = pSrc[x-srcStride-1];
+      }
+      for (Int y=0;y<height+1;y++)
+      {
+        refLeft[y+refMainOffset] = pSrc[(y-1)*srcStride-1];
+      }
+      refMain = (bIsModeVer ? refAbove : refLeft)  + refMainOffset;
+      refSide = (bIsModeVer ? refLeft  : refAbove) + refMainOffset;
+
+      // Extend the Main reference to the left.
+      Int invAngleSum    = 128;       // rounding for (shift by 8)
+      for (Int k=-1; k>(refMainOffsetPreScale+1)*intraPredAngle>>5; k--)
+      {
+        invAngleSum += invAngle;
+        refMain[k] = refSide[invAngleSum>>8];
+      }
+    }
+    else
+    {
+      for (Int x=0;x<2*width+1;x++)
+      {
+        refAbove[x] = pSrc[x-srcStride-1];
+      }
+      for (Int y=0;y<2*height+1;y++)
+      {
+        refLeft[y] = pSrc[(y-1)*srcStride-1];
+      }
+      refMain = bIsModeVer ? refAbove : refLeft ;
+      refSide = bIsModeVer ? refLeft  : refAbove;
+    }
+
+    // swap width/height if we are doing a horizontal mode:
+    Pel tempArray[MAX_CU_SIZE*MAX_CU_SIZE];
+    const Int dstStride = bIsModeVer ? dstStrideTrue : MAX_CU_SIZE;
+    Pel *pDst = bIsModeVer ? pTrueDst : tempArray;
+    if (!bIsModeVer)
+    {
+      std::swap(width, height);
+    }
+
+    if (intraPredAngle == 0)  // pure vertical or pure horizontal
+    {
+      for (Int y=0;y<height;y++)
+      {
+        for (Int x=0;x<width;x++)
+        {
+          pDst[y*dstStride+x] = refMain[x+1];
+        }
+      }
+
+      if (edgeFilter)
+      {
+        for (Int y=0;y<height;y++)
+        {
+          pDst[y*dstStride] = Clip3 (0, ((1 << bitDepth) - 1), pDst[y*dstStride] + (( refSide[y+1] - refSide[0] ) >> 1) );
+        }
+      }
+    }
+    else
+    {
+      Pel *pDsty=pDst;
+
+      for (Int y=0, deltaPos=intraPredAngle; y<height; y++, deltaPos+=intraPredAngle, pDsty+=dstStride)
+      {
+        const Int deltaInt   = deltaPos >> 5;
+        const Int deltaFract = deltaPos & (32 - 1);
+
+        if (deltaFract)
+        {
+          // Do linear filtering
+          const Pel *pRM=refMain+deltaInt+1;
+          Int lastRefMainPel=*pRM++;
+          for (Int x=0;x<width;pRM++,x++)
+          {
+            Int thisRefMainPel=*pRM;
+            pDsty[x+0] = (Pel) ( ((32-deltaFract)*lastRefMainPel + deltaFract*thisRefMainPel +16) >> 5 );
+            lastRefMainPel=thisRefMainPel;
+          }
+        }
+        else
+        {
+          // Just copy the integer samples
+          for (Int x=0;x<width; x++)
+          {
+            pDsty[x] = refMain[x+deltaInt+1];
+          }
+        }
+      }
+    }
+
+    // Flip the block if this is the horizontal mode
+    if (!bIsModeVer)
+    {
+      for (Int y=0; y<height; y++)
+      {
+        for (Int x=0; x<width; x++)
+        {
+          pTrueDst[x*dstStrideTrue] = pDst[x];
+        }
+        pTrueDst++;
+        pDst+=dstStride;
+      }
+    }
+  }
+}
+
+Void TComPrediction::predIntraAng( const ComponentID compID, UInt uiDirMode, Pel* piOrg /* Will be null for decoding */, UInt uiOrgStride, Pel* piPred, UInt uiStride, TComTU &rTu, Bool bAbove, Bool bLeft, const Bool bUseFilteredPredSamples, const Bool bUseLosslessDPCM )
+{
+  const ChromaFormat   format      = rTu.GetChromaFormat();
+  const ChannelType    channelType = toChannelType(compID);
+  const TComRectangle &rect        = rTu.getRect(isLuma(compID) ? COMPONENT_Y : COMPONENT_Cb);
+  const Int            iWidth      = rect.width;
+  const Int            iHeight     = rect.height;
+
+  assert( g_aucConvertToBit[ iWidth ] >= 0 ); //   4x  4
+  assert( g_aucConvertToBit[ iWidth ] <= 5 ); // 128x128
+  //assert( iWidth == iHeight  );
+
+        Pel *pDst = piPred;
+
+  // get starting pixel in block
+  const Int sw = (2 * iWidth + 1);
+
+  if ( bUseLosslessDPCM )
+  {
+    const Pel *ptrSrc = getPredictorPtr( compID, false );
+    // Sample Adaptive intra-Prediction (SAP)
+    if (uiDirMode==HOR_IDX)
+    {
+      // left column filled with reference samples
+      // remaining columns filled with piOrg data (if available).
+      for(Int y=0; y<iHeight; y++)
+      {
+        piPred[y*uiStride+0] = ptrSrc[(y+1)*sw];
+      }
+      if (piOrg!=0)
+      {
+        piPred+=1; // miss off first column
+        for(Int y=0; y<iHeight; y++, piPred+=uiStride, piOrg+=uiOrgStride)
+        {
+          memcpy(piPred, piOrg, (iWidth-1)*sizeof(Pel));
+        }
+      }
+    }
+    else // VER_IDX
+    {
+      // top row filled with reference samples
+      // remaining rows filled with piOrd data (if available)
+      for(Int x=0; x<iWidth; x++)
+      {
+        piPred[x] = ptrSrc[x+1];
+      }
+      if (piOrg!=0)
+      {
+        piPred+=uiStride; // miss off the first row
+        for(Int y=1; y<iHeight; y++, piPred+=uiStride, piOrg+=uiOrgStride)
+        {
+          memcpy(piPred, piOrg, iWidth*sizeof(Pel));
+        }
+      }
+    }
+  }
+  else
+  {
+    const Pel *ptrSrc = getPredictorPtr( compID, bUseFilteredPredSamples );
+
+    if ( uiDirMode == PLANAR_IDX )
+    {
+      xPredIntraPlanar( ptrSrc+sw+1, sw, pDst, uiStride, iWidth, iHeight, channelType, format );
+    }
+    else
+    {
+      // Create the prediction
+            TComDataCU *const pcCU              = rTu.getCU();
+      const UInt              uiAbsPartIdx      = rTu.GetAbsPartIdxTU();
+      const Bool              enableEdgeFilters = !(pcCU->isRDPCMEnabled(uiAbsPartIdx) && pcCU->getCUTransquantBypass(uiAbsPartIdx));
+
+#if O0043_BEST_EFFORT_DECODING
+      xPredIntraAng( g_bitDepthInStream[channelType], ptrSrc+sw+1, sw, pDst, uiStride, iWidth, iHeight, channelType, format, uiDirMode, bAbove, bLeft, enableEdgeFilters );
+#else
+      xPredIntraAng( g_bitDepth[channelType], ptrSrc+sw+1, sw, pDst, uiStride, iWidth, iHeight, channelType, format, uiDirMode, bAbove, bLeft, enableEdgeFilters );
+#endif
+
+      if(( uiDirMode == DC_IDX ) && bAbove && bLeft )
+      {
+        xDCPredFiltering( ptrSrc+sw+1, sw, pDst, uiStride, iWidth, iHeight, channelType );
+      }
+    }
+  }
+
+}
+
+/** Function for checking identical motion.
+ * \param TComDataCU* pcCU
+ * \param UInt PartAddr
+ */
+Bool TComPrediction::xCheckIdenticalMotion ( TComDataCU* pcCU, UInt PartAddr )
+{
+  if( pcCU->getSlice()->isInterB() && !pcCU->getSlice()->getPPS()->getWPBiPred() )
+  {
+    if( pcCU->getCUMvField(REF_PIC_LIST_0)->getRefIdx(PartAddr) >= 0 && pcCU->getCUMvField(REF_PIC_LIST_1)->getRefIdx(PartAddr) >= 0)
+    {
+      Int RefPOCL0 = pcCU->getSlice()->getRefPic(REF_PIC_LIST_0, pcCU->getCUMvField(REF_PIC_LIST_0)->getRefIdx(PartAddr))->getPOC();
+      Int RefPOCL1 = pcCU->getSlice()->getRefPic(REF_PIC_LIST_1, pcCU->getCUMvField(REF_PIC_LIST_1)->getRefIdx(PartAddr))->getPOC();
+      if(RefPOCL0 == RefPOCL1 && pcCU->getCUMvField(REF_PIC_LIST_0)->getMv(PartAddr) == pcCU->getCUMvField(REF_PIC_LIST_1)->getMv(PartAddr))
+      {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+Void TComPrediction::motionCompensation ( TComDataCU* pcCU, TComYuv* pcYuvPred, RefPicList eRefPicList, Int iPartIdx )
+{
+  Int         iWidth;
+  Int         iHeight;
+  UInt        uiPartAddr;
+
+  if ( iPartIdx >= 0 )
+  {
+    pcCU->getPartIndexAndSize( iPartIdx, uiPartAddr, iWidth, iHeight );
+    if ( eRefPicList != REF_PIC_LIST_X )
+    {
+      if( pcCU->getSlice()->getPPS()->getUseWP())
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred, true );
+      }
+      else
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred );
+      }
+      if ( pcCU->getSlice()->getPPS()->getUseWP() )
+      {
+        xWeightedPredictionUni( pcCU, pcYuvPred, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred );
+      }
+    }
+    else
+    {
+      if ( xCheckIdenticalMotion( pcCU, uiPartAddr ) )
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, REF_PIC_LIST_0, pcYuvPred );
+      }
+      else
+      {
+        xPredInterBi  (pcCU, uiPartAddr, iWidth, iHeight, pcYuvPred );
+      }
+    }
+    return;
+  }
+
+  for ( iPartIdx = 0; iPartIdx < pcCU->getNumPartitions(); iPartIdx++ )
+  {
+    pcCU->getPartIndexAndSize( iPartIdx, uiPartAddr, iWidth, iHeight );
+
+    if ( eRefPicList != REF_PIC_LIST_X )
+    {
+      if( pcCU->getSlice()->getPPS()->getUseWP())
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred, true );
+      }
+      else
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred );
+      }
+      if ( pcCU->getSlice()->getPPS()->getUseWP() )
+      {
+        xWeightedPredictionUni( pcCU, pcYuvPred, uiPartAddr, iWidth, iHeight, eRefPicList, pcYuvPred );
+      }
+    }
+    else
+    {
+      if ( xCheckIdenticalMotion( pcCU, uiPartAddr ) )
+      {
+        xPredInterUni (pcCU, uiPartAddr, iWidth, iHeight, REF_PIC_LIST_0, pcYuvPred );
+      }
+      else
+      {
+        xPredInterBi  (pcCU, uiPartAddr, iWidth, iHeight, pcYuvPred );
+      }
+    }
+  }
+  return;
+}
+
+Void TComPrediction::xPredInterUni ( TComDataCU* pcCU, UInt uiPartAddr, Int iWidth, Int iHeight, RefPicList eRefPicList, TComYuv* pcYuvPred, Bool bi )
+{
+  Int         iRefIdx     = pcCU->getCUMvField( eRefPicList )->getRefIdx( uiPartAddr );           assert (iRefIdx >= 0);
+  TComMv      cMv         = pcCU->getCUMvField( eRefPicList )->getMv( uiPartAddr );
+  pcCU->clipMv(cMv);
+
+  for (UInt ch=COMPONENT_Y; ch<pcYuvPred->getNumberValidComponents(); ch++)
+    xPredInterBlk  (ComponentID(ch),  pcCU, pcCU->getSlice()->getRefPic( eRefPicList, iRefIdx )->getPicYuvRec(), uiPartAddr, &cMv, iWidth, iHeight, pcYuvPred, bi );
+}
+
+Void TComPrediction::xPredInterBi ( TComDataCU* pcCU, UInt uiPartAddr, Int iWidth, Int iHeight, TComYuv* pcYuvPred )
+{
+  TComYuv* pcMbYuv;
+  Int      iRefIdx[NUM_REF_PIC_LIST_01] = {-1, -1};
+
+  for ( UInt refList = 0; refList < NUM_REF_PIC_LIST_01; refList++ )
+  {
+    RefPicList eRefPicList = (refList ? REF_PIC_LIST_1 : REF_PIC_LIST_0);
+    iRefIdx[refList] = pcCU->getCUMvField( eRefPicList )->getRefIdx( uiPartAddr );
+
+    if ( iRefIdx[refList] < 0 )
+    {
+      continue;
+    }
+
+    assert( iRefIdx[refList] < pcCU->getSlice()->getNumRefIdx(eRefPicList) );
+
+    pcMbYuv = &m_acYuvPred[refList];
+    if( pcCU->getCUMvField( REF_PIC_LIST_0 )->getRefIdx( uiPartAddr ) >= 0 && pcCU->getCUMvField( REF_PIC_LIST_1 )->getRefIdx( uiPartAddr ) >= 0 )
+    {
+      xPredInterUni ( pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcMbYuv, true );
+    }
+    else
+    {
+      if ( ( pcCU->getSlice()->getPPS()->getUseWP()       && pcCU->getSlice()->getSliceType() == P_SLICE ) ||
+           ( pcCU->getSlice()->getPPS()->getWPBiPred()    && pcCU->getSlice()->getSliceType() == B_SLICE ) )
+      {
+        xPredInterUni ( pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcMbYuv, true );
+      }
+      else
+      {
+        xPredInterUni ( pcCU, uiPartAddr, iWidth, iHeight, eRefPicList, pcMbYuv );
+      }
+    }
+  }
+
+  if ( pcCU->getSlice()->getPPS()->getWPBiPred()    && pcCU->getSlice()->getSliceType() == B_SLICE  )
+  {
+    xWeightedPredictionBi( pcCU, &m_acYuvPred[REF_PIC_LIST_0], &m_acYuvPred[REF_PIC_LIST_1], iRefIdx[REF_PIC_LIST_0], iRefIdx[REF_PIC_LIST_1], uiPartAddr, iWidth, iHeight, pcYuvPred );
+  }
+  else if ( pcCU->getSlice()->getPPS()->getUseWP() && pcCU->getSlice()->getSliceType() == P_SLICE )
+  {
+    xWeightedPredictionUni( pcCU, &m_acYuvPred[REF_PIC_LIST_0], uiPartAddr, iWidth, iHeight, REF_PIC_LIST_0, pcYuvPred );
+  }
+  else
+  {
+    xWeightedAverage( &m_acYuvPred[REF_PIC_LIST_0], &m_acYuvPred[REF_PIC_LIST_1], iRefIdx[REF_PIC_LIST_0], iRefIdx[REF_PIC_LIST_1], uiPartAddr, iWidth, iHeight, pcYuvPred );
+  }
+}
+
+/**
+ * \brief Generate motion-compensated block
+ *
+ * \param cu       Pointer to current CU
+ * \param refPic   Pointer to reference picture
+ * \param partAddr Address of block within CU
+ * \param mv       Motion vector
+ * \param width    Width of block
+ * \param height   Height of block
+ * \param dstPic   Pointer to destination picture
+ * \param bi       Flag indicating whether bipred is used
+ */
+
+
+Void TComPrediction::xPredInterBlk(const ComponentID compID, TComDataCU *cu, TComPicYuv *refPic, UInt partAddr, TComMv *mv, Int width, Int height, TComYuv *dstPic, Bool bi )
+{
+  Int     refStride  = refPic->getStride(compID);
+  Int     dstStride  = dstPic->getStride(compID);
+  Int shiftHor=(2+refPic->getComponentScaleX(compID));
+  Int shiftVer=(2+refPic->getComponentScaleY(compID));
+
+  Int     refOffset  = (mv->getHor() >> shiftHor) + (mv->getVer() >> shiftVer) * refStride;
+
+  Pel*    ref     = refPic->getAddr(compID, cu->getCtuRsAddr(), cu->getZorderIdxInCtu() + partAddr ) + refOffset;
+
+  Pel*    dst = dstPic->getAddr( compID, partAddr );
+
+  Int     xFrac  = mv->getHor() & ((1<<shiftHor)-1);
+  Int     yFrac  = mv->getVer() & ((1<<shiftVer)-1);
+  UInt    cxWidth  = width  >> refPic->getComponentScaleX(compID);
+  UInt    cxHeight = height >> refPic->getComponentScaleY(compID);
+
+  const ChromaFormat chFmt = cu->getPic()->getChromaFormat();
+
+  if ( yFrac == 0 )
+  {
+    m_if.filterHor(compID, ref, refStride, dst,  dstStride, cxWidth, cxHeight, xFrac, !bi, chFmt);
+  }
+  else if ( xFrac == 0 )
+  {
+    m_if.filterVer(compID, ref, refStride, dst, dstStride, cxWidth, cxHeight, yFrac, true, !bi, chFmt);
+  }
+  else
+  {
+    Int   tmpStride = m_filteredBlockTmp[0].getStride(compID);
+    Pel*  tmp       = m_filteredBlockTmp[0].getAddr(compID);
+
+    const Int vFilterSize = isLuma(compID) ? NTAPS_LUMA : NTAPS_CHROMA;
+
+    m_if.filterHor(compID, ref - ((vFilterSize>>1) -1)*refStride, refStride, tmp, tmpStride, cxWidth, cxHeight+vFilterSize-1, xFrac, false,      chFmt);
+    m_if.filterVer(compID, tmp + ((vFilterSize>>1) -1)*tmpStride, tmpStride, dst, dstStride, cxWidth, cxHeight,               yFrac, false, !bi, chFmt);
+  }
+}
+
+Void TComPrediction::xWeightedAverage( TComYuv* pcYuvSrc0, TComYuv* pcYuvSrc1, Int iRefIdx0, Int iRefIdx1, UInt uiPartIdx, Int iWidth, Int iHeight, TComYuv* pcYuvDst )
+{
+  if( iRefIdx0 >= 0 && iRefIdx1 >= 0 )
+  {
+    pcYuvDst->addAvg( pcYuvSrc0, pcYuvSrc1, uiPartIdx, iWidth, iHeight );
+  }
+  else if ( iRefIdx0 >= 0 && iRefIdx1 <  0 )
+  {
+    pcYuvSrc0->copyPartToPartYuv( pcYuvDst, uiPartIdx, iWidth, iHeight );
+  }
+  else if ( iRefIdx0 <  0 && iRefIdx1 >= 0 )
+  {
+    pcYuvSrc1->copyPartToPartYuv( pcYuvDst, uiPartIdx, iWidth, iHeight );
+  }
+}
+
+// AMVP
+Void TComPrediction::getMvPredAMVP( TComDataCU* pcCU, UInt uiPartIdx, UInt uiPartAddr, RefPicList eRefPicList, TComMv& rcMvPred )
+{
+  AMVPInfo* pcAMVPInfo = pcCU->getCUMvField(eRefPicList)->getAMVPInfo();
+
+  if( pcAMVPInfo->iN <= 1 )
+  {
+    rcMvPred = pcAMVPInfo->m_acMvCand[0];
+
+    pcCU->setMVPIdxSubParts( 0, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+    pcCU->setMVPNumSubParts( pcAMVPInfo->iN, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+    return;
+  }
+
+  assert(pcCU->getMVPIdx(eRefPicList,uiPartAddr) >= 0);
+  rcMvPred = pcAMVPInfo->m_acMvCand[pcCU->getMVPIdx(eRefPicList,uiPartAddr)];
+  return;
+}
+
+/** Function for deriving planar intra prediction.
+ * \param pSrc pointer to reconstructed sample array
+ * \param srcStride the stride of the reconstructed sample array
+ * \param rpDst reference to pointer for the prediction sample array
+ * \param dstStride the stride of the prediction sample array
+ * \param width the width of the block
+ * \param height the height of the block
+ *
+ * This function derives the prediction samples for planar mode (intra coding).
+ */
+//NOTE: Bit-Limit - 24-bit source
+Void TComPrediction::xPredIntraPlanar( const Pel* pSrc, Int srcStride, Pel* rpDst, Int dstStride, UInt width, UInt height, ChannelType channelType, ChromaFormat format )
+{
+  assert(width <= height);
+
+  Int leftColumn[MAX_CU_SIZE+1], topRow[MAX_CU_SIZE+1], bottomRow[MAX_CU_SIZE], rightColumn[MAX_CU_SIZE];
+  UInt shift1Dhor = g_aucConvertToBit[ width ] + 2;
+  UInt shift1Dver = g_aucConvertToBit[ height ] + 2;
+
+  // Get left and above reference column and row
+  for(Int k=0;k<width+1;k++)
+  {
+    topRow[k] = pSrc[k-srcStride];
+  }
+
+  for (Int k=0; k < height+1; k++)
+  {
+    leftColumn[k] = pSrc[k*srcStride-1];
+  }
+
+  // Prepare intermediate variables used in interpolation
+  Int bottomLeft = leftColumn[height];
+  Int topRight   = topRow[width];
+
+  for(Int k=0;k<width;k++)
+  {
+    bottomRow[k]  = bottomLeft - topRow[k];
+    topRow[k]     <<= shift1Dver;
+  }
+
+  for(Int k=0;k<height;k++)
+  {
+    rightColumn[k]  = topRight - leftColumn[k];
+    leftColumn[k]   <<= shift1Dhor;
+  }
+
+  const UInt topRowShift = 0;
+
+  // Generate prediction signal
+  for (Int y=0;y<height;y++)
+  {
+    Int horPred = leftColumn[y] + width;
+    for (Int x=0;x<width;x++)
+    {
+      horPred += rightColumn[y];
+      topRow[x] += bottomRow[x];
+
+      Int vertPred = ((topRow[x] + topRowShift)>>topRowShift);
+      rpDst[y*dstStride+x] = ( horPred + vertPred ) >> (shift1Dhor+1);
+    }
+  }
+}
+
+/** Function for filtering intra DC predictor.
+ * \param pSrc pointer to reconstructed sample array
+ * \param iSrcStride the stride of the reconstructed sample array
+ * \param rpDst reference to pointer for the prediction sample array
+ * \param iDstStride the stride of the prediction sample array
+ * \param iWidth the width of the block
+ * \param iHeight the height of the block
+ *
+ * This function performs filtering left and top edges of the prediction samples for DC mode (intra coding).
+ */
+Void TComPrediction::xDCPredFiltering( const Pel* pSrc, Int iSrcStride, Pel* pDst, Int iDstStride, Int iWidth, Int iHeight, ChannelType channelType )
+{
+  Int x, y, iDstStride2, iSrcStride2;
+
+  if (isLuma(channelType) && (iWidth <= MAXIMUM_INTRA_FILTERED_WIDTH) && (iHeight <= MAXIMUM_INTRA_FILTERED_HEIGHT))
+  {
+    //top-left
+    pDst[0] = (Pel)((pSrc[-iSrcStride] + pSrc[-1] + 2 * pDst[0] + 2) >> 2);
+
+    //top row (vertical filter)
+    for ( x = 1; x < iWidth; x++ )
+    {
+      pDst[x] = (Pel)((pSrc[x - iSrcStride] +  3 * pDst[x] + 2) >> 2);
+    }
+
+    //left column (horizontal filter)
+    for ( y = 1, iDstStride2 = iDstStride, iSrcStride2 = iSrcStride-1; y < iHeight; y++, iDstStride2+=iDstStride, iSrcStride2+=iSrcStride )
+    {
+      pDst[iDstStride2] = (Pel)((pSrc[iSrcStride2] + 3 * pDst[iDstStride2] + 2) >> 2);
+    }
+  }
+
+  return;
+}
+
+/* Static member function */
+Bool TComPrediction::UseDPCMForFirstPassIntraEstimation(TComTU &rTu, const UInt uiDirMode)
+{
+  return (rTu.getCU()->isRDPCMEnabled(rTu.GetAbsPartIdxTU()) ) &&
+          rTu.getCU()->getCUTransquantBypass(rTu.GetAbsPartIdxTU()) &&
+          (uiDirMode==HOR_IDX || uiDirMode==VER_IDX);
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComPrediction.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,143 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComPrediction.h
+    \brief    prediction class (header)
+*/
+
+#ifndef __TCOMPREDICTION__
+#define __TCOMPREDICTION__
+
+
+// Include files
+#include "TComPic.h"
+#include "TComMotionInfo.h"
+#include "TComPattern.h"
+#include "TComTrQuant.h"
+#include "TComInterpolationFilter.h"
+#include "TComWeightPrediction.h"
+
+class TComTU; // forward declaration
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// prediction class
+typedef enum PRED_BUF_E
+{
+  PRED_BUF_UNFILTERED=0,
+  PRED_BUF_FILTERED=1,
+  NUM_PRED_BUF=2
+} PRED_BUF;
+
+static const UInt MAX_INTRA_FILTER_DEPTHS=5;
+
+class TComPrediction : public TComWeightPrediction
+{
+private:
+  static const UChar m_aucIntraFilter[MAX_NUM_CHANNEL_TYPE][MAX_INTRA_FILTER_DEPTHS];
+
+protected:
+  Pel*      m_piYuvExt[MAX_NUM_COMPONENT][NUM_PRED_BUF];
+  Int       m_iYuvExtSize;
+
+  TComYuv   m_acYuvPred[NUM_REF_PIC_LIST_01];
+  TComYuv   m_cYuvPredTemp;
+  TComYuv m_filteredBlock[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS];
+  TComYuv m_filteredBlockTmp[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS];
+
+  TComInterpolationFilter m_if;
+
+  Pel*   m_pLumaRecBuffer;       ///< array for downsampled reconstructed luma sample
+  Int    m_iLumaRecStride;       ///< stride of #m_pLumaRecBuffer array
+
+  Void xPredIntraAng            ( Int bitDepth, const Pel* pSrc, Int srcStride, Pel* pDst, Int dstStride, UInt width, UInt height, ChannelType channelType, ChromaFormat format, UInt dirMode, Bool blkAboveAvailable, Bool blkLeftAvailable, const Bool bEnableEdgeFilters );
+  Void xPredIntraPlanar         ( const Pel* pSrc, Int srcStride, Pel* rpDst, Int dstStride, UInt width, UInt height, ChannelType channelType, ChromaFormat format );
+
+  // motion compensation functions
+  Void xPredInterUni            ( TComDataCU* pcCU,                          UInt uiPartAddr,               Int iWidth, Int iHeight, RefPicList eRefPicList, TComYuv* pcYuvPred, Bool bi=false          );
+  Void xPredInterBi             ( TComDataCU* pcCU,                          UInt uiPartAddr,               Int iWidth, Int iHeight,                         TComYuv* pcYuvPred          );
+  Void xPredInterBlk(const ComponentID compID, TComDataCU *cu, TComPicYuv *refPic, UInt partAddr, TComMv *mv, Int width, Int height, TComYuv *dstPic, Bool bi );
+  Void xWeightedAverage         ( TComYuv* pcYuvSrc0, TComYuv* pcYuvSrc1, Int iRefIdx0, Int iRefIdx1, UInt uiPartAddr, Int iWidth, Int iHeight, TComYuv* pcYuvDst );
+
+  Void xGetLLSPrediction ( const Pel* pSrc0, Int iSrcStride, Pel* pDst0, Int iDstStride, UInt uiWidth, UInt uiHeight, UInt uiExt0, const ChromaFormat chFmt  DEBUG_STRING_FN_DECLARE(sDebug) );
+
+  Void xDCPredFiltering( const Pel* pSrc, Int iSrcStride, Pel* pDst, Int iDstStride, Int iWidth, Int iHeight, ChannelType channelType );
+  Bool xCheckIdenticalMotion    ( TComDataCU* pcCU, UInt PartAddr);
+  Void destroy();
+
+public:
+  TComPrediction();
+  virtual ~TComPrediction();
+
+  Void    initTempBuff(ChromaFormat chromaFormatIDC);
+
+  ChromaFormat getChromaFormat() const { return m_cYuvPredTemp.getChromaFormat(); }
+
+  // inter
+  Void motionCompensation         ( TComDataCU*  pcCU, TComYuv* pcYuvPred, RefPicList eRefPicList = REF_PIC_LIST_X, Int iPartIdx = -1 );
+
+  // motion vector prediction
+  Void getMvPredAMVP              ( TComDataCU* pcCU, UInt uiPartIdx, UInt uiPartAddr, RefPicList eRefPicList, TComMv& rcMvPred );
+
+  // Angular Intra
+  Void predIntraAng               ( const ComponentID compID, UInt uiDirMode, Pel *piOrg /* Will be null for decoding */, UInt uiOrgStride, Pel* piPred, UInt uiStride, TComTU &rTu, Bool bAbove, Bool bLeft, const Bool bUseFilteredPredSamples, const Bool bUseLosslessDPCM = false );
+
+  Pel  predIntraGetPredValDC      ( const Pel* pSrc, Int iSrcStride, UInt iWidth, UInt iHeight, ChannelType channelType, ChromaFormat format, Bool bAbove, Bool bLeft );
+
+  Pel*  getPredictorPtr           ( const ComponentID compID, const Bool bUseFilteredPredictions )
+  {
+    return m_piYuvExt[compID][bUseFilteredPredictions?PRED_BUF_FILTERED:PRED_BUF_UNFILTERED];
+  }
+
+  // This function is actually still in TComPattern.cpp
+  /// set parameters from CU data for accessing ADI data
+  Void initAdiPatternChType ( TComTU &rTu,
+                              Bool&       bAbove,
+                              Bool&       bLeft,
+                              const ComponentID compID, const Bool bFilterRefSamples
+                              DEBUG_STRING_FN_DECLARE(sDebug)
+                              );
+
+  static Bool filteringIntraReferenceSamples(const ComponentID compID, UInt uiDirMode, UInt uiTuChWidth, UInt uiTuChHeight, const ChromaFormat chFmt, const Bool intraReferenceSmoothingDisabled);
+
+  static Bool UseDPCMForFirstPassIntraEstimation(TComTU &rTu, const UInt uiDirMode);
+};
+
+//! \}
+
+#endif // __TCOMPREDICTION__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRdCost.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1606 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRdCost.cpp
+    \brief    RD cost computation class
+*/
+
+#include <math.h>
+#include <assert.h>
+#include "TComRom.h"
+#include "TComRdCost.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+TComRdCost::TComRdCost()
+{
+  init();
+}
+
+TComRdCost::~TComRdCost()
+{
+}
+
+// Calculate RD functions
+Double TComRdCost::calcRdCost( UInt uiBits, Distortion uiDistortion, Bool bFlag, DFunc eDFunc )
+{
+  Double dRdCost = 0.0;
+  Double dLambda = 0.0;
+
+  switch ( eDFunc )
+  {
+    case DF_SSE:
+      assert(0);
+      break;
+    case DF_SAD:
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+      dLambda = m_dLambdaMotionSAD[0]; // 0 is valid, because for lossless blocks, the cost equation is modified to compensate.
+#else
+      dLambda = (Double)m_uiLambdaMotionSAD[0]; // 0 is valid, because for lossless blocks, the cost equation is modified to compensate.
+#endif
+      break;
+    case DF_DEFAULT:
+      dLambda =         m_dLambda;
+      break;
+    case DF_SSE_FRAME:
+      dLambda =         m_dFrameLambda;
+      break;
+    default:
+      assert (0);
+      break;
+  }
+
+  if (bFlag) //NOTE: this "bFlag" is never true
+  {
+    // Intra8x8, Intra4x4 Block only...
+    if (m_costMode != COST_STANDARD_LOSSY)
+    {
+      dRdCost = (Double(uiDistortion) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+    }
+    else
+    {
+      dRdCost = (((Double)uiDistortion) + ((Double)uiBits * dLambda));
+    }
+  }
+  else
+  {
+    if (eDFunc == DF_SAD)
+    {
+      if (m_costMode != COST_STANDARD_LOSSY)
+      {
+        dRdCost = ((Double(uiDistortion) * 65536) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+      }
+      else
+      {
+        dRdCost = floor(Double(uiDistortion) + (floor((Double(uiBits) * dLambda) + 0.5) / 65536.0));
+      }
+    }
+    else
+    {
+      if (m_costMode != COST_STANDARD_LOSSY)
+      {
+        dRdCost = (Double(uiDistortion) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+      }
+      else
+      {
+        dRdCost = floor(Double(uiDistortion) + (Double(uiBits) * dLambda) + 0.5);
+      }
+    }
+  }
+
+  return dRdCost;
+}
+
+Double TComRdCost::calcRdCost64( UInt64 uiBits, UInt64 uiDistortion, Bool bFlag, DFunc eDFunc )
+{
+  Double dRdCost = 0.0;
+  Double dLambda = 0.0;
+
+  switch ( eDFunc )
+  {
+    case DF_SSE:
+      assert(0);
+      break;
+    case DF_SAD:
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+      dLambda = m_dLambdaMotionSAD[0]; // 0 is valid, because for lossless blocks, the cost equation is modified to compensate.
+#else
+      dLambda = (Double)m_uiLambdaMotionSAD[0]; // 0 is valid, because for lossless blocks, the cost equation is modified to compensate.
+#endif
+      break;
+    case DF_DEFAULT:
+      dLambda =         m_dLambda;
+      break;
+    case DF_SSE_FRAME:
+      dLambda =         m_dFrameLambda;
+      break;
+    default:
+      assert (0);
+      break;
+  }
+
+  if (bFlag) //NOTE: this "bFlag" is never true
+  {
+    // Intra8x8, Intra4x4 Block only...
+    if (m_costMode != COST_STANDARD_LOSSY)
+    {
+      dRdCost = (Double(uiDistortion) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+    }
+    else
+    {
+      dRdCost = (((Double)(Int64)uiDistortion) + ((Double)(Int64)uiBits * dLambda));
+    }
+  }
+  else
+  {
+    if (eDFunc == DF_SAD)
+    {
+      if (m_costMode != COST_STANDARD_LOSSY)
+      {
+        dRdCost = ((Double(uiDistortion) * 65536) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+      }
+      else
+      {
+        dRdCost = floor(Double(uiDistortion) + (floor((Double(uiBits) * dLambda) + 0.5) / 65536.0));
+      }
+    }
+    else
+    {
+      if (m_costMode != COST_STANDARD_LOSSY)
+      {
+        dRdCost = (Double(uiDistortion) / dLambda) + Double(uiBits); // all lossless costs would have uiDistortion=0, and therefore this cost function can be used.
+      }
+      else
+      {
+        dRdCost = floor(Double(uiDistortion) + (Double(uiBits) * dLambda) + 0.5);
+      }
+    }
+  }
+
+  return dRdCost;
+}
+
+Void TComRdCost::setLambda( Double dLambda )
+{
+  m_dLambda           = dLambda;
+  m_sqrtLambda        = sqrt(m_dLambda);
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  m_dLambdaMotionSAD[0] = 65536.0 * m_sqrtLambda;
+  m_dLambdaMotionSSE[0] = 65536.0 * m_dLambda;
+#if FULL_NBIT
+  dLambda = 0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12) / 3.0));
+#else
+  dLambda = 0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12 - 6 * (g_bitDepth[CHANNEL_TYPE_LUMA] - 8)) / 3.0));
+#endif
+  m_dLambdaMotionSAD[1] = 65536.0 * sqrt(dLambda);
+  m_dLambdaMotionSSE[1] = 65536.0 * dLambda;
+#else
+  m_uiLambdaMotionSAD[0] = (UInt)floor(65536.0 * m_sqrtLambda);
+  m_uiLambdaMotionSSE[0] = (UInt)floor(65536.0 * m_dLambda   );
+#if FULL_NBIT
+  dLambda = 0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12) / 3.0));
+#else
+  dLambda = 0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12 - 6 * (g_bitDepth[CHANNEL_TYPE_LUMA] - 8)) / 3.0));
+#endif
+  m_uiLambdaMotionSAD[1] = (UInt)floor(65536.0 * sqrt(dLambda));
+  m_uiLambdaMotionSSE[1] = (UInt)floor(65536.0 * dLambda   );
+#endif
+}
+
+
+// Initalize Function Pointer by [eDFunc]
+Void TComRdCost::init()
+{
+  m_afpDistortFunc[DF_DEFAULT] = NULL;                  // for DF_DEFAULT
+
+  m_afpDistortFunc[DF_SSE    ] = TComRdCost::xGetSSE;
+  m_afpDistortFunc[DF_SSE4   ] = TComRdCost::xGetSSE4;
+  m_afpDistortFunc[DF_SSE8   ] = TComRdCost::xGetSSE8;
+  m_afpDistortFunc[DF_SSE16  ] = TComRdCost::xGetSSE16;
+  m_afpDistortFunc[DF_SSE32  ] = TComRdCost::xGetSSE32;
+  m_afpDistortFunc[DF_SSE64  ] = TComRdCost::xGetSSE64;
+  m_afpDistortFunc[DF_SSE16N ] = TComRdCost::xGetSSE16N;
+
+  m_afpDistortFunc[DF_SAD    ] = TComRdCost::xGetSAD;
+  m_afpDistortFunc[DF_SAD4   ] = TComRdCost::xGetSAD4;
+  m_afpDistortFunc[DF_SAD8   ] = TComRdCost::xGetSAD8;
+  m_afpDistortFunc[DF_SAD16  ] = TComRdCost::xGetSAD16;
+  m_afpDistortFunc[DF_SAD32  ] = TComRdCost::xGetSAD32;
+  m_afpDistortFunc[DF_SAD64  ] = TComRdCost::xGetSAD64;
+  m_afpDistortFunc[DF_SAD16N ] = TComRdCost::xGetSAD16N;
+
+  m_afpDistortFunc[DF_SADS   ] = TComRdCost::xGetSAD;
+  m_afpDistortFunc[DF_SADS4  ] = TComRdCost::xGetSAD4;
+  m_afpDistortFunc[DF_SADS8  ] = TComRdCost::xGetSAD8;
+  m_afpDistortFunc[DF_SADS16 ] = TComRdCost::xGetSAD16;
+  m_afpDistortFunc[DF_SADS32 ] = TComRdCost::xGetSAD32;
+  m_afpDistortFunc[DF_SADS64 ] = TComRdCost::xGetSAD64;
+  m_afpDistortFunc[DF_SADS16N] = TComRdCost::xGetSAD16N;
+
+#if AMP_SAD
+  m_afpDistortFunc[DF_SAD12  ] = TComRdCost::xGetSAD12;
+  m_afpDistortFunc[DF_SAD24  ] = TComRdCost::xGetSAD24;
+  m_afpDistortFunc[DF_SAD48  ] = TComRdCost::xGetSAD48;
+
+  m_afpDistortFunc[DF_SADS12 ] = TComRdCost::xGetSAD12;
+  m_afpDistortFunc[DF_SADS24 ] = TComRdCost::xGetSAD24;
+  m_afpDistortFunc[DF_SADS48 ] = TComRdCost::xGetSAD48;
+#endif
+  m_afpDistortFunc[DF_HADS   ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS4  ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS8  ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS16 ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS32 ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS64 ] = TComRdCost::xGetHADs;
+  m_afpDistortFunc[DF_HADS16N] = TComRdCost::xGetHADs;
+
+  m_costMode                   = COST_STANDARD_LOSSY;
+
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  m_dCost                      = 0;
+#else
+  m_uiCost                     = 0;
+#endif
+  m_iCostScale                 = 0;
+}
+
+UInt TComRdCost::xGetComponentBits( Int iVal )
+{
+  UInt uiLength = 1;
+  UInt uiTemp   = ( iVal <= 0) ? (-iVal<<1)+1: (iVal<<1);
+
+  assert ( uiTemp );
+
+  while ( 1 != uiTemp )
+  {
+    uiTemp >>= 1;
+    uiLength += 2;
+  }
+
+  return uiLength;
+}
+
+Void TComRdCost::setDistParam( UInt uiBlkWidth, UInt uiBlkHeight, DFunc eDFunc, DistParam& rcDistParam )
+{
+  // set Block Width / Height
+  rcDistParam.iCols    = uiBlkWidth;
+  rcDistParam.iRows    = uiBlkHeight;
+  rcDistParam.DistFunc = m_afpDistortFunc[eDFunc + g_aucConvertToBit[ rcDistParam.iCols ] + 1 ];
+
+  // initialize
+  rcDistParam.iSubShift  = 0;
+}
+
+// Setting the Distortion Parameter for Inter (ME)
+Void TComRdCost::setDistParam( TComPattern* pcPatternKey, Pel* piRefY, Int iRefStride, DistParam& rcDistParam )
+{
+  // set Original & Curr Pointer / Stride
+  rcDistParam.pOrg = pcPatternKey->getROIY();
+  rcDistParam.pCur = piRefY;
+
+  rcDistParam.iStrideOrg = pcPatternKey->getPatternLStride();
+  rcDistParam.iStrideCur = iRefStride;
+
+  // set Block Width / Height
+  rcDistParam.iCols    = pcPatternKey->getROIYWidth();
+  rcDistParam.iRows    = pcPatternKey->getROIYHeight();
+  rcDistParam.DistFunc = m_afpDistortFunc[DF_SAD + g_aucConvertToBit[ rcDistParam.iCols ] + 1 ];
+
+#if AMP_SAD
+  if (rcDistParam.iCols == 12)
+  {
+    rcDistParam.DistFunc = m_afpDistortFunc[DF_SAD12];
+  }
+  else if (rcDistParam.iCols == 24)
+  {
+    rcDistParam.DistFunc = m_afpDistortFunc[DF_SAD24];
+  }
+  else if (rcDistParam.iCols == 48)
+  {
+    rcDistParam.DistFunc = m_afpDistortFunc[DF_SAD48];
+  }
+#endif
+
+  // initialize
+  rcDistParam.iSubShift  = 0;
+}
+
+// Setting the Distortion Parameter for Inter (subpel ME with step)
+Void TComRdCost::setDistParam( TComPattern* pcPatternKey, Pel* piRefY, Int iRefStride, Int iStep, DistParam& rcDistParam, Bool bHADME )
+{
+  // set Original & Curr Pointer / Stride
+  rcDistParam.pOrg = pcPatternKey->getROIY();
+  rcDistParam.pCur = piRefY;
+
+  rcDistParam.iStrideOrg = pcPatternKey->getPatternLStride();
+  rcDistParam.iStrideCur = iRefStride * iStep;
+
+  // set Step for interpolated buffer
+  rcDistParam.iStep = iStep;
+
+  // set Block Width / Height
+  rcDistParam.iCols    = pcPatternKey->getROIYWidth();
+  rcDistParam.iRows    = pcPatternKey->getROIYHeight();
+
+  // set distortion function
+  if ( !bHADME )
+  {
+    rcDistParam.DistFunc = m_afpDistortFunc[DF_SADS + g_aucConvertToBit[ rcDistParam.iCols ] + 1 ];
+#if AMP_SAD
+    if (rcDistParam.iCols == 12)
+    {
+      rcDistParam.DistFunc = m_afpDistortFunc[DF_SADS12];
+    }
+    else if (rcDistParam.iCols == 24)
+    {
+      rcDistParam.DistFunc = m_afpDistortFunc[DF_SADS24];
+    }
+    else if (rcDistParam.iCols == 48)
+    {
+      rcDistParam.DistFunc = m_afpDistortFunc[DF_SADS48];
+    }
+#endif
+  }
+  else
+  {
+    rcDistParam.DistFunc = m_afpDistortFunc[DF_HADS + g_aucConvertToBit[ rcDistParam.iCols ] + 1 ];
+  }
+
+  // initialize
+  rcDistParam.iSubShift  = 0;
+}
+
+Void TComRdCost::setDistParam( DistParam& rcDP, Int bitDepth, Pel* p1, Int iStride1, Pel* p2, Int iStride2, Int iWidth, Int iHeight, Bool bHadamard )
+{
+  rcDP.pOrg       = p1;
+  rcDP.pCur       = p2;
+  rcDP.iStrideOrg = iStride1;
+  rcDP.iStrideCur = iStride2;
+  rcDP.iCols      = iWidth;
+  rcDP.iRows      = iHeight;
+  rcDP.iStep      = 1;
+  rcDP.iSubShift  = 0;
+  rcDP.bitDepth   = bitDepth;
+  rcDP.DistFunc   = m_afpDistortFunc[ ( bHadamard ? DF_HADS : DF_SADS ) + g_aucConvertToBit[ iWidth ] + 1 ];
+}
+
+Distortion TComRdCost::calcHAD( Int bitDepth, Pel* pi0, Int iStride0, Pel* pi1, Int iStride1, Int iWidth, Int iHeight )
+{
+  Distortion uiSum = 0;
+  Int x, y;
+
+  if ( ( (iWidth % 8) == 0 ) && ( (iHeight % 8) == 0 ) )
+  {
+    for ( y=0; y<iHeight; y+= 8 )
+    {
+      for ( x=0; x<iWidth; x+= 8 )
+      {
+        uiSum += xCalcHADs8x8( &pi0[x], &pi1[x], iStride0, iStride1, 1 );
+      }
+      pi0 += iStride0*8;
+      pi1 += iStride1*8;
+    }
+  }
+  else
+  {
+    assert ( ( (iWidth % 4) == 0 ) && ( (iHeight % 4) == 0 ) );
+
+    for ( y=0; y<iHeight; y+= 4 )
+    {
+      for ( x=0; x<iWidth; x+= 4 )
+      {
+        uiSum += xCalcHADs4x4( &pi0[x], &pi1[x], iStride0, iStride1, 1 );
+      }
+      pi0 += iStride0*4;
+      pi1 += iStride1*4;
+    }
+  }
+
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(bitDepth-8) );
+}
+
+Distortion TComRdCost::getDistPart( Int bitDepth, Pel* piCur, Int iCurStride,  Pel* piOrg, Int iOrgStride, UInt uiBlkWidth, UInt uiBlkHeight, const ComponentID compID, DFunc eDFunc )
+{
+  DistParam cDtParam;
+  setDistParam( uiBlkWidth, uiBlkHeight, eDFunc, cDtParam );
+  cDtParam.pOrg       = piOrg;
+  cDtParam.pCur       = piCur;
+  cDtParam.iStrideOrg = iOrgStride;
+  cDtParam.iStrideCur = iCurStride;
+  cDtParam.iStep      = 1;
+
+  cDtParam.bApplyWeight = false;
+  cDtParam.compIdx      = MAX_NUM_COMPONENT; // just for assert: to be sure it was set before use
+  cDtParam.bitDepth     = bitDepth;
+
+  if (isChroma(compID))
+  {
+    return ((Distortion) (m_distortionWeight[compID] * cDtParam.DistFunc( &cDtParam )));
+  }
+  else
+  {
+    return cDtParam.DistFunc( &cDtParam );
+  }
+}
+
+// ====================================================================================================================
+// Distortion functions
+// ====================================================================================================================
+
+// --------------------------------------------------------------------------------------------------------------------
+// SAD
+// --------------------------------------------------------------------------------------------------------------------
+
+Distortion TComRdCost::xGetSAD( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iCols   = pcDtParam->iCols;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-- )
+  {
+    for (Int n = 0; n < iCols; n++ )
+    {
+      uiSum += abs( piOrg[n] - piCur[n] );
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+Distortion TComRdCost::xGetSAD4( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+Distortion TComRdCost::xGetSAD8( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg      = pcDtParam->pOrg;
+  const Pel* piCur      = pcDtParam->pCur;
+  Int  iRows      = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+Distortion TComRdCost::xGetSAD16( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+    uiSum += abs( piOrg[12] - piCur[12] );
+    uiSum += abs( piOrg[13] - piCur[13] );
+    uiSum += abs( piOrg[14] - piCur[14] );
+    uiSum += abs( piOrg[15] - piCur[15] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+#if AMP_SAD
+Distortion TComRdCost::xGetSAD12( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+#endif
+
+Distortion TComRdCost::xGetSAD16N( DistParam* pcDtParam )
+{
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iCols   = pcDtParam->iCols;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    for (Int n = 0; n < iCols; n+=16 )
+    {
+      uiSum += abs( piOrg[n+ 0] - piCur[n+ 0] );
+      uiSum += abs( piOrg[n+ 1] - piCur[n+ 1] );
+      uiSum += abs( piOrg[n+ 2] - piCur[n+ 2] );
+      uiSum += abs( piOrg[n+ 3] - piCur[n+ 3] );
+      uiSum += abs( piOrg[n+ 4] - piCur[n+ 4] );
+      uiSum += abs( piOrg[n+ 5] - piCur[n+ 5] );
+      uiSum += abs( piOrg[n+ 6] - piCur[n+ 6] );
+      uiSum += abs( piOrg[n+ 7] - piCur[n+ 7] );
+      uiSum += abs( piOrg[n+ 8] - piCur[n+ 8] );
+      uiSum += abs( piOrg[n+ 9] - piCur[n+ 9] );
+      uiSum += abs( piOrg[n+10] - piCur[n+10] );
+      uiSum += abs( piOrg[n+11] - piCur[n+11] );
+      uiSum += abs( piOrg[n+12] - piCur[n+12] );
+      uiSum += abs( piOrg[n+13] - piCur[n+13] );
+      uiSum += abs( piOrg[n+14] - piCur[n+14] );
+      uiSum += abs( piOrg[n+15] - piCur[n+15] );
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+Distortion TComRdCost::xGetSAD32( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+    uiSum += abs( piOrg[12] - piCur[12] );
+    uiSum += abs( piOrg[13] - piCur[13] );
+    uiSum += abs( piOrg[14] - piCur[14] );
+    uiSum += abs( piOrg[15] - piCur[15] );
+    uiSum += abs( piOrg[16] - piCur[16] );
+    uiSum += abs( piOrg[17] - piCur[17] );
+    uiSum += abs( piOrg[18] - piCur[18] );
+    uiSum += abs( piOrg[19] - piCur[19] );
+    uiSum += abs( piOrg[20] - piCur[20] );
+    uiSum += abs( piOrg[21] - piCur[21] );
+    uiSum += abs( piOrg[22] - piCur[22] );
+    uiSum += abs( piOrg[23] - piCur[23] );
+    uiSum += abs( piOrg[24] - piCur[24] );
+    uiSum += abs( piOrg[25] - piCur[25] );
+    uiSum += abs( piOrg[26] - piCur[26] );
+    uiSum += abs( piOrg[27] - piCur[27] );
+    uiSum += abs( piOrg[28] - piCur[28] );
+    uiSum += abs( piOrg[29] - piCur[29] );
+    uiSum += abs( piOrg[30] - piCur[30] );
+    uiSum += abs( piOrg[31] - piCur[31] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+#if AMP_SAD
+Distortion TComRdCost::xGetSAD24( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+    uiSum += abs( piOrg[12] - piCur[12] );
+    uiSum += abs( piOrg[13] - piCur[13] );
+    uiSum += abs( piOrg[14] - piCur[14] );
+    uiSum += abs( piOrg[15] - piCur[15] );
+    uiSum += abs( piOrg[16] - piCur[16] );
+    uiSum += abs( piOrg[17] - piCur[17] );
+    uiSum += abs( piOrg[18] - piCur[18] );
+    uiSum += abs( piOrg[19] - piCur[19] );
+    uiSum += abs( piOrg[20] - piCur[20] );
+    uiSum += abs( piOrg[21] - piCur[21] );
+    uiSum += abs( piOrg[22] - piCur[22] );
+    uiSum += abs( piOrg[23] - piCur[23] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+#endif
+
+Distortion TComRdCost::xGetSAD64( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+    uiSum += abs( piOrg[12] - piCur[12] );
+    uiSum += abs( piOrg[13] - piCur[13] );
+    uiSum += abs( piOrg[14] - piCur[14] );
+    uiSum += abs( piOrg[15] - piCur[15] );
+    uiSum += abs( piOrg[16] - piCur[16] );
+    uiSum += abs( piOrg[17] - piCur[17] );
+    uiSum += abs( piOrg[18] - piCur[18] );
+    uiSum += abs( piOrg[19] - piCur[19] );
+    uiSum += abs( piOrg[20] - piCur[20] );
+    uiSum += abs( piOrg[21] - piCur[21] );
+    uiSum += abs( piOrg[22] - piCur[22] );
+    uiSum += abs( piOrg[23] - piCur[23] );
+    uiSum += abs( piOrg[24] - piCur[24] );
+    uiSum += abs( piOrg[25] - piCur[25] );
+    uiSum += abs( piOrg[26] - piCur[26] );
+    uiSum += abs( piOrg[27] - piCur[27] );
+    uiSum += abs( piOrg[28] - piCur[28] );
+    uiSum += abs( piOrg[29] - piCur[29] );
+    uiSum += abs( piOrg[30] - piCur[30] );
+    uiSum += abs( piOrg[31] - piCur[31] );
+    uiSum += abs( piOrg[32] - piCur[32] );
+    uiSum += abs( piOrg[33] - piCur[33] );
+    uiSum += abs( piOrg[34] - piCur[34] );
+    uiSum += abs( piOrg[35] - piCur[35] );
+    uiSum += abs( piOrg[36] - piCur[36] );
+    uiSum += abs( piOrg[37] - piCur[37] );
+    uiSum += abs( piOrg[38] - piCur[38] );
+    uiSum += abs( piOrg[39] - piCur[39] );
+    uiSum += abs( piOrg[40] - piCur[40] );
+    uiSum += abs( piOrg[41] - piCur[41] );
+    uiSum += abs( piOrg[42] - piCur[42] );
+    uiSum += abs( piOrg[43] - piCur[43] );
+    uiSum += abs( piOrg[44] - piCur[44] );
+    uiSum += abs( piOrg[45] - piCur[45] );
+    uiSum += abs( piOrg[46] - piCur[46] );
+    uiSum += abs( piOrg[47] - piCur[47] );
+    uiSum += abs( piOrg[48] - piCur[48] );
+    uiSum += abs( piOrg[49] - piCur[49] );
+    uiSum += abs( piOrg[50] - piCur[50] );
+    uiSum += abs( piOrg[51] - piCur[51] );
+    uiSum += abs( piOrg[52] - piCur[52] );
+    uiSum += abs( piOrg[53] - piCur[53] );
+    uiSum += abs( piOrg[54] - piCur[54] );
+    uiSum += abs( piOrg[55] - piCur[55] );
+    uiSum += abs( piOrg[56] - piCur[56] );
+    uiSum += abs( piOrg[57] - piCur[57] );
+    uiSum += abs( piOrg[58] - piCur[58] );
+    uiSum += abs( piOrg[59] - piCur[59] );
+    uiSum += abs( piOrg[60] - piCur[60] );
+    uiSum += abs( piOrg[61] - piCur[61] );
+    uiSum += abs( piOrg[62] - piCur[62] );
+    uiSum += abs( piOrg[63] - piCur[63] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+#if AMP_SAD
+Distortion TComRdCost::xGetSAD48( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSADw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iSubShift  = pcDtParam->iSubShift;
+  Int  iSubStep   = ( 1 << iSubShift );
+  Int  iStrideCur = pcDtParam->iStrideCur*iSubStep;
+  Int  iStrideOrg = pcDtParam->iStrideOrg*iSubStep;
+
+  Distortion uiSum = 0;
+
+  for( ; iRows != 0; iRows-=iSubStep )
+  {
+    uiSum += abs( piOrg[0] - piCur[0] );
+    uiSum += abs( piOrg[1] - piCur[1] );
+    uiSum += abs( piOrg[2] - piCur[2] );
+    uiSum += abs( piOrg[3] - piCur[3] );
+    uiSum += abs( piOrg[4] - piCur[4] );
+    uiSum += abs( piOrg[5] - piCur[5] );
+    uiSum += abs( piOrg[6] - piCur[6] );
+    uiSum += abs( piOrg[7] - piCur[7] );
+    uiSum += abs( piOrg[8] - piCur[8] );
+    uiSum += abs( piOrg[9] - piCur[9] );
+    uiSum += abs( piOrg[10] - piCur[10] );
+    uiSum += abs( piOrg[11] - piCur[11] );
+    uiSum += abs( piOrg[12] - piCur[12] );
+    uiSum += abs( piOrg[13] - piCur[13] );
+    uiSum += abs( piOrg[14] - piCur[14] );
+    uiSum += abs( piOrg[15] - piCur[15] );
+    uiSum += abs( piOrg[16] - piCur[16] );
+    uiSum += abs( piOrg[17] - piCur[17] );
+    uiSum += abs( piOrg[18] - piCur[18] );
+    uiSum += abs( piOrg[19] - piCur[19] );
+    uiSum += abs( piOrg[20] - piCur[20] );
+    uiSum += abs( piOrg[21] - piCur[21] );
+    uiSum += abs( piOrg[22] - piCur[22] );
+    uiSum += abs( piOrg[23] - piCur[23] );
+    uiSum += abs( piOrg[24] - piCur[24] );
+    uiSum += abs( piOrg[25] - piCur[25] );
+    uiSum += abs( piOrg[26] - piCur[26] );
+    uiSum += abs( piOrg[27] - piCur[27] );
+    uiSum += abs( piOrg[28] - piCur[28] );
+    uiSum += abs( piOrg[29] - piCur[29] );
+    uiSum += abs( piOrg[30] - piCur[30] );
+    uiSum += abs( piOrg[31] - piCur[31] );
+    uiSum += abs( piOrg[32] - piCur[32] );
+    uiSum += abs( piOrg[33] - piCur[33] );
+    uiSum += abs( piOrg[34] - piCur[34] );
+    uiSum += abs( piOrg[35] - piCur[35] );
+    uiSum += abs( piOrg[36] - piCur[36] );
+    uiSum += abs( piOrg[37] - piCur[37] );
+    uiSum += abs( piOrg[38] - piCur[38] );
+    uiSum += abs( piOrg[39] - piCur[39] );
+    uiSum += abs( piOrg[40] - piCur[40] );
+    uiSum += abs( piOrg[41] - piCur[41] );
+    uiSum += abs( piOrg[42] - piCur[42] );
+    uiSum += abs( piOrg[43] - piCur[43] );
+    uiSum += abs( piOrg[44] - piCur[44] );
+    uiSum += abs( piOrg[45] - piCur[45] );
+    uiSum += abs( piOrg[46] - piCur[46] );
+    uiSum += abs( piOrg[47] - piCur[47] );
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  uiSum <<= iSubShift;
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// SSE
+// --------------------------------------------------------------------------------------------------------------------
+
+Distortion TComRdCost::xGetSSE( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iCols   = pcDtParam->iCols;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+    for (Int n = 0; n < iCols; n++ )
+    {
+      iTemp = piOrg[n  ] - piCur[n  ];
+      uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE4( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    assert( pcDtParam->iCols == 4 );
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+
+    iTemp = piOrg[0] - piCur[0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[1] - piCur[1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[2] - piCur[2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[3] - piCur[3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE8( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    assert( pcDtParam->iCols == 8 );
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+    iTemp = piOrg[0] - piCur[0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[1] - piCur[1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[2] - piCur[2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[3] - piCur[3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[4] - piCur[4]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[5] - piCur[5]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[6] - piCur[6]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[7] - piCur[7]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE16( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    assert( pcDtParam->iCols == 16 );
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+
+    iTemp = piOrg[ 0] - piCur[ 0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 1] - piCur[ 1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 2] - piCur[ 2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 3] - piCur[ 3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 4] - piCur[ 4]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 5] - piCur[ 5]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 6] - piCur[ 6]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 7] - piCur[ 7]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 8] - piCur[ 8]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 9] - piCur[ 9]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[10] - piCur[10]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[11] - piCur[11]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[12] - piCur[12]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[13] - piCur[13]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[14] - piCur[14]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[15] - piCur[15]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE16N( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iCols   = pcDtParam->iCols;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+    for (Int n = 0; n < iCols; n+=16 )
+    {
+
+      iTemp = piOrg[n+ 0] - piCur[n+ 0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 1] - piCur[n+ 1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 2] - piCur[n+ 2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 3] - piCur[n+ 3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 4] - piCur[n+ 4]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 5] - piCur[n+ 5]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 6] - piCur[n+ 6]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 7] - piCur[n+ 7]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 8] - piCur[n+ 8]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+ 9] - piCur[n+ 9]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+10] - piCur[n+10]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+11] - piCur[n+11]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+12] - piCur[n+12]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+13] - piCur[n+13]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+14] - piCur[n+14]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+      iTemp = piOrg[n+15] - piCur[n+15]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE32( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    assert( pcDtParam->iCols == 32 );
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+
+    iTemp = piOrg[ 0] - piCur[ 0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 1] - piCur[ 1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 2] - piCur[ 2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 3] - piCur[ 3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 4] - piCur[ 4]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 5] - piCur[ 5]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 6] - piCur[ 6]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 7] - piCur[ 7]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 8] - piCur[ 8]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 9] - piCur[ 9]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[10] - piCur[10]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[11] - piCur[11]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[12] - piCur[12]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[13] - piCur[13]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[14] - piCur[14]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[15] - piCur[15]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[16] - piCur[16]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[17] - piCur[17]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[18] - piCur[18]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[19] - piCur[19]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[20] - piCur[20]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[21] - piCur[21]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[22] - piCur[22]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[23] - piCur[23]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[24] - piCur[24]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[25] - piCur[25]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[26] - piCur[26]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[27] - piCur[27]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[28] - piCur[28]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[29] - piCur[29]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[30] - piCur[30]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[31] - piCur[31]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+Distortion TComRdCost::xGetSSE64( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    assert( pcDtParam->iCols == 64 );
+    return TComRdCostWeightPrediction::xGetSSEw( pcDtParam );
+  }
+  const Pel* piOrg   = pcDtParam->pOrg;
+  const Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+
+  Distortion uiSum   = 0;
+  UInt       uiShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Intermediate_Int  iTemp;
+
+  for( ; iRows != 0; iRows-- )
+  {
+    iTemp = piOrg[ 0] - piCur[ 0]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 1] - piCur[ 1]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 2] - piCur[ 2]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 3] - piCur[ 3]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 4] - piCur[ 4]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 5] - piCur[ 5]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 6] - piCur[ 6]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 7] - piCur[ 7]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 8] - piCur[ 8]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[ 9] - piCur[ 9]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[10] - piCur[10]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[11] - piCur[11]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[12] - piCur[12]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[13] - piCur[13]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[14] - piCur[14]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[15] - piCur[15]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[16] - piCur[16]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[17] - piCur[17]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[18] - piCur[18]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[19] - piCur[19]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[20] - piCur[20]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[21] - piCur[21]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[22] - piCur[22]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[23] - piCur[23]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[24] - piCur[24]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[25] - piCur[25]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[26] - piCur[26]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[27] - piCur[27]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[28] - piCur[28]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[29] - piCur[29]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[30] - piCur[30]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[31] - piCur[31]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[32] - piCur[32]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[33] - piCur[33]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[34] - piCur[34]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[35] - piCur[35]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[36] - piCur[36]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[37] - piCur[37]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[38] - piCur[38]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[39] - piCur[39]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[40] - piCur[40]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[41] - piCur[41]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[42] - piCur[42]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[43] - piCur[43]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[44] - piCur[44]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[45] - piCur[45]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[46] - piCur[46]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[47] - piCur[47]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[48] - piCur[48]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[49] - piCur[49]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[50] - piCur[50]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[51] - piCur[51]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[52] - piCur[52]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[53] - piCur[53]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[54] - piCur[54]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[55] - piCur[55]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[56] - piCur[56]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[57] - piCur[57]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[58] - piCur[58]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[59] - piCur[59]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[60] - piCur[60]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[61] - piCur[61]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[62] - piCur[62]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+    iTemp = piOrg[63] - piCur[63]; uiSum += Distortion(( iTemp * iTemp ) >> uiShift);
+
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  return ( uiSum );
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// HADAMARD with step (used in fractional search)
+// --------------------------------------------------------------------------------------------------------------------
+
+Distortion TComRdCost::xCalcHADs2x2( Pel *piOrg, Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  Distortion satd = 0;
+  TCoeff diff[4], m[4];
+  assert( iStep == 1 );
+  diff[0] = piOrg[0             ] - piCur[0];
+  diff[1] = piOrg[1             ] - piCur[1];
+  diff[2] = piOrg[iStrideOrg    ] - piCur[0 + iStrideCur];
+  diff[3] = piOrg[iStrideOrg + 1] - piCur[1 + iStrideCur];
+  m[0] = diff[0] + diff[2];
+  m[1] = diff[1] + diff[3];
+  m[2] = diff[0] - diff[2];
+  m[3] = diff[1] - diff[3];
+
+  satd += abs(m[0] + m[1]);
+  satd += abs(m[0] - m[1]);
+  satd += abs(m[2] + m[3]);
+  satd += abs(m[2] - m[3]);
+
+  return satd;
+}
+
+Distortion TComRdCost::xCalcHADs4x4( Pel *piOrg, Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  Int k;
+  Distortion satd = 0;
+  TCoeff diff[16], m[16], d[16];
+
+  assert( iStep == 1 );
+  for( k = 0; k < 16; k+=4 )
+  {
+    diff[k+0] = piOrg[0] - piCur[0];
+    diff[k+1] = piOrg[1] - piCur[1];
+    diff[k+2] = piOrg[2] - piCur[2];
+    diff[k+3] = piOrg[3] - piCur[3];
+
+    piCur += iStrideCur;
+    piOrg += iStrideOrg;
+  }
+
+  /*===== hadamard transform =====*/
+  m[ 0] = diff[ 0] + diff[12];
+  m[ 1] = diff[ 1] + diff[13];
+  m[ 2] = diff[ 2] + diff[14];
+  m[ 3] = diff[ 3] + diff[15];
+  m[ 4] = diff[ 4] + diff[ 8];
+  m[ 5] = diff[ 5] + diff[ 9];
+  m[ 6] = diff[ 6] + diff[10];
+  m[ 7] = diff[ 7] + diff[11];
+  m[ 8] = diff[ 4] - diff[ 8];
+  m[ 9] = diff[ 5] - diff[ 9];
+  m[10] = diff[ 6] - diff[10];
+  m[11] = diff[ 7] - diff[11];
+  m[12] = diff[ 0] - diff[12];
+  m[13] = diff[ 1] - diff[13];
+  m[14] = diff[ 2] - diff[14];
+  m[15] = diff[ 3] - diff[15];
+
+  d[ 0] = m[ 0] + m[ 4];
+  d[ 1] = m[ 1] + m[ 5];
+  d[ 2] = m[ 2] + m[ 6];
+  d[ 3] = m[ 3] + m[ 7];
+  d[ 4] = m[ 8] + m[12];
+  d[ 5] = m[ 9] + m[13];
+  d[ 6] = m[10] + m[14];
+  d[ 7] = m[11] + m[15];
+  d[ 8] = m[ 0] - m[ 4];
+  d[ 9] = m[ 1] - m[ 5];
+  d[10] = m[ 2] - m[ 6];
+  d[11] = m[ 3] - m[ 7];
+  d[12] = m[12] - m[ 8];
+  d[13] = m[13] - m[ 9];
+  d[14] = m[14] - m[10];
+  d[15] = m[15] - m[11];
+
+  m[ 0] = d[ 0] + d[ 3];
+  m[ 1] = d[ 1] + d[ 2];
+  m[ 2] = d[ 1] - d[ 2];
+  m[ 3] = d[ 0] - d[ 3];
+  m[ 4] = d[ 4] + d[ 7];
+  m[ 5] = d[ 5] + d[ 6];
+  m[ 6] = d[ 5] - d[ 6];
+  m[ 7] = d[ 4] - d[ 7];
+  m[ 8] = d[ 8] + d[11];
+  m[ 9] = d[ 9] + d[10];
+  m[10] = d[ 9] - d[10];
+  m[11] = d[ 8] - d[11];
+  m[12] = d[12] + d[15];
+  m[13] = d[13] + d[14];
+  m[14] = d[13] - d[14];
+  m[15] = d[12] - d[15];
+
+  d[ 0] = m[ 0] + m[ 1];
+  d[ 1] = m[ 0] - m[ 1];
+  d[ 2] = m[ 2] + m[ 3];
+  d[ 3] = m[ 3] - m[ 2];
+  d[ 4] = m[ 4] + m[ 5];
+  d[ 5] = m[ 4] - m[ 5];
+  d[ 6] = m[ 6] + m[ 7];
+  d[ 7] = m[ 7] - m[ 6];
+  d[ 8] = m[ 8] + m[ 9];
+  d[ 9] = m[ 8] - m[ 9];
+  d[10] = m[10] + m[11];
+  d[11] = m[11] - m[10];
+  d[12] = m[12] + m[13];
+  d[13] = m[12] - m[13];
+  d[14] = m[14] + m[15];
+  d[15] = m[15] - m[14];
+
+  for (k=0; k<16; ++k)
+  {
+    satd += abs(d[k]);
+  }
+  satd = ((satd+1)>>1);
+
+  return satd;
+}
+
+Distortion TComRdCost::xCalcHADs8x8( Pel *piOrg, Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  Int k, i, j, jj;
+  Distortion sad = 0;
+  TCoeff diff[64], m1[8][8], m2[8][8], m3[8][8];
+  assert( iStep == 1 );
+  for( k = 0; k < 64; k += 8 )
+  {
+    diff[k+0] = piOrg[0] - piCur[0];
+    diff[k+1] = piOrg[1] - piCur[1];
+    diff[k+2] = piOrg[2] - piCur[2];
+    diff[k+3] = piOrg[3] - piCur[3];
+    diff[k+4] = piOrg[4] - piCur[4];
+    diff[k+5] = piOrg[5] - piCur[5];
+    diff[k+6] = piOrg[6] - piCur[6];
+    diff[k+7] = piOrg[7] - piCur[7];
+
+    piCur += iStrideCur;
+    piOrg += iStrideOrg;
+  }
+
+  //horizontal
+  for (j=0; j < 8; j++)
+  {
+    jj = j << 3;
+    m2[j][0] = diff[jj  ] + diff[jj+4];
+    m2[j][1] = diff[jj+1] + diff[jj+5];
+    m2[j][2] = diff[jj+2] + diff[jj+6];
+    m2[j][3] = diff[jj+3] + diff[jj+7];
+    m2[j][4] = diff[jj  ] - diff[jj+4];
+    m2[j][5] = diff[jj+1] - diff[jj+5];
+    m2[j][6] = diff[jj+2] - diff[jj+6];
+    m2[j][7] = diff[jj+3] - diff[jj+7];
+
+    m1[j][0] = m2[j][0] + m2[j][2];
+    m1[j][1] = m2[j][1] + m2[j][3];
+    m1[j][2] = m2[j][0] - m2[j][2];
+    m1[j][3] = m2[j][1] - m2[j][3];
+    m1[j][4] = m2[j][4] + m2[j][6];
+    m1[j][5] = m2[j][5] + m2[j][7];
+    m1[j][6] = m2[j][4] - m2[j][6];
+    m1[j][7] = m2[j][5] - m2[j][7];
+
+    m2[j][0] = m1[j][0] + m1[j][1];
+    m2[j][1] = m1[j][0] - m1[j][1];
+    m2[j][2] = m1[j][2] + m1[j][3];
+    m2[j][3] = m1[j][2] - m1[j][3];
+    m2[j][4] = m1[j][4] + m1[j][5];
+    m2[j][5] = m1[j][4] - m1[j][5];
+    m2[j][6] = m1[j][6] + m1[j][7];
+    m2[j][7] = m1[j][6] - m1[j][7];
+  }
+
+  //vertical
+  for (i=0; i < 8; i++)
+  {
+    m3[0][i] = m2[0][i] + m2[4][i];
+    m3[1][i] = m2[1][i] + m2[5][i];
+    m3[2][i] = m2[2][i] + m2[6][i];
+    m3[3][i] = m2[3][i] + m2[7][i];
+    m3[4][i] = m2[0][i] - m2[4][i];
+    m3[5][i] = m2[1][i] - m2[5][i];
+    m3[6][i] = m2[2][i] - m2[6][i];
+    m3[7][i] = m2[3][i] - m2[7][i];
+
+    m1[0][i] = m3[0][i] + m3[2][i];
+    m1[1][i] = m3[1][i] + m3[3][i];
+    m1[2][i] = m3[0][i] - m3[2][i];
+    m1[3][i] = m3[1][i] - m3[3][i];
+    m1[4][i] = m3[4][i] + m3[6][i];
+    m1[5][i] = m3[5][i] + m3[7][i];
+    m1[6][i] = m3[4][i] - m3[6][i];
+    m1[7][i] = m3[5][i] - m3[7][i];
+
+    m2[0][i] = m1[0][i] + m1[1][i];
+    m2[1][i] = m1[0][i] - m1[1][i];
+    m2[2][i] = m1[2][i] + m1[3][i];
+    m2[3][i] = m1[2][i] - m1[3][i];
+    m2[4][i] = m1[4][i] + m1[5][i];
+    m2[5][i] = m1[4][i] - m1[5][i];
+    m2[6][i] = m1[6][i] + m1[7][i];
+    m2[7][i] = m1[6][i] - m1[7][i];
+  }
+
+  for (i = 0; i < 8; i++)
+  {
+    for (j = 0; j < 8; j++)
+    {
+      sad += abs(m2[i][j]);
+    }
+  }
+
+  sad=((sad+2)>>2);
+
+  return sad;
+}
+
+
+Distortion TComRdCost::xGetHADs( DistParam* pcDtParam )
+{
+  if ( pcDtParam->bApplyWeight )
+  {
+    return TComRdCostWeightPrediction::xGetHADsw( pcDtParam );
+  }
+  Pel* piOrg   = pcDtParam->pOrg;
+  Pel* piCur   = pcDtParam->pCur;
+  Int  iRows   = pcDtParam->iRows;
+  Int  iCols   = pcDtParam->iCols;
+  Int  iStrideCur = pcDtParam->iStrideCur;
+  Int  iStrideOrg = pcDtParam->iStrideOrg;
+  Int  iStep  = pcDtParam->iStep;
+
+  Int  x, y;
+
+  Distortion uiSum = 0;
+
+  if( ( iRows % 8 == 0) && (iCols % 8 == 0) )
+  {
+    Int  iOffsetOrg = iStrideOrg<<3;
+    Int  iOffsetCur = iStrideCur<<3;
+    for ( y=0; y<iRows; y+= 8 )
+    {
+      for ( x=0; x<iCols; x+= 8 )
+      {
+        uiSum += xCalcHADs8x8( &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iOffsetOrg;
+      piCur += iOffsetCur;
+    }
+  }
+  else if( ( iRows % 4 == 0) && (iCols % 4 == 0) )
+  {
+    Int  iOffsetOrg = iStrideOrg<<2;
+    Int  iOffsetCur = iStrideCur<<2;
+
+    for ( y=0; y<iRows; y+= 4 )
+    {
+      for ( x=0; x<iCols; x+= 4 )
+      {
+        uiSum += xCalcHADs4x4( &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iOffsetOrg;
+      piCur += iOffsetCur;
+    }
+  }
+  else if( ( iRows % 2 == 0) && (iCols % 2 == 0) )
+  {
+    Int  iOffsetOrg = iStrideOrg<<1;
+    Int  iOffsetCur = iStrideCur<<1;
+    for ( y=0; y<iRows; y+=2 )
+    {
+      for ( x=0; x<iCols; x+=2 )
+      {
+        uiSum += xCalcHADs2x2( &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iOffsetOrg;
+      piCur += iOffsetCur;
+    }
+  }
+  else
+  {
+    assert(false);
+  }
+
+  return ( uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8) );
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRdCost.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,230 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRdCost.h
+    \brief    RD cost computation classes (header)
+*/
+
+#ifndef __TCOMRDCOST__
+#define __TCOMRDCOST__
+
+
+#include "CommonDef.h"
+#include "TComPattern.h"
+#include "TComMv.h"
+
+#include "TComSlice.h"
+#include "TComRdCostWeightPrediction.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+class DistParam;
+class TComPattern;
+
+// ====================================================================================================================
+// Type definition
+// ====================================================================================================================
+
+// for function pointer
+typedef Distortion (*FpDistFunc) (DistParam*); // TODO: can this pointer be replaced with a reference? - there are no NULL checks on pointer.
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// distortion parameter class
+class DistParam
+{
+public:
+  Pel*  pOrg;
+  Pel*  pCur;
+  Int   iStrideOrg;
+  Int   iStrideCur;
+  Int   iRows;
+  Int   iCols;
+  Int   iStep;
+  FpDistFunc DistFunc;
+  Int   bitDepth;
+
+  Bool            bApplyWeight;     // whether weighted prediction is used or not
+  WPScalingParam  *wpCur;           // weighted prediction scaling parameters for current ref
+  ComponentID     compIdx;
+
+  // (vertical) subsampling shift (for reducing complexity)
+  // - 0 = no subsampling, 1 = even rows, 2 = every 4th, etc.
+  Int   iSubShift;
+
+  DistParam()
+  {
+    pOrg = NULL;
+    pCur = NULL;
+    iStrideOrg = 0;
+    iStrideCur = 0;
+    iRows = 0;
+    iCols = 0;
+    iStep = 1;
+    DistFunc = NULL;
+    iSubShift = 0;
+    bitDepth = 0;
+  }
+};
+
+/// RD cost computation class
+class TComRdCost
+{
+private:
+  // for distortion
+
+  FpDistFunc              m_afpDistortFunc[DF_TOTAL_FUNCTIONS]; // [eDFunc]
+  CostMode                m_costMode;
+  Double                  m_distortionWeight[MAX_NUM_COMPONENT]; // only chroma values are used.
+  Double                  m_dLambda;
+  Double                  m_sqrtLambda;
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  Double                  m_dLambdaMotionSAD[2 /* 0=standard, 1=for transquant bypass when mixed-lossless cost evaluation enabled*/];
+  Double                  m_dLambdaMotionSSE[2 /* 0=standard, 1=for transquant bypass when mixed-lossless cost evaluation enabled*/];
+#else
+  UInt                    m_uiLambdaMotionSAD[2 /* 0=standard, 1=for transquant bypass when mixed-lossless cost evaluation enabled*/];
+  UInt                    m_uiLambdaMotionSSE[2 /* 0=standard, 1=for transquant bypass when mixed-lossless cost evaluation enabled*/];
+#endif
+  Double                  m_dFrameLambda;
+
+  // for motion cost
+  TComMv                  m_mvPredictor;
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  Double                  m_dCost;
+#else
+  UInt                    m_uiCost;
+#endif
+  Int                     m_iCostScale;
+
+public:
+  TComRdCost();
+  virtual ~TComRdCost();
+
+  Double  calcRdCost  ( UInt   uiBits, Distortion uiDistortion, Bool bFlag = false, DFunc eDFunc = DF_DEFAULT );
+  Double  calcRdCost64( UInt64 uiBits, UInt64 uiDistortion, Bool bFlag = false, DFunc eDFunc = DF_DEFAULT );
+
+  Void    setDistortionWeight  ( const ComponentID compID, const Double distortionWeight ) { m_distortionWeight[compID] = distortionWeight; }
+  Void    setLambda      ( Double dLambda );
+  Void    setFrameLambda ( Double dLambda ) { m_dFrameLambda = dLambda; }
+
+  Double  getSqrtLambda ()   { return m_sqrtLambda; }
+
+  Double  getLambda() { return m_dLambda; }
+  Double  getChromaWeight () { return ((m_distortionWeight[COMPONENT_Cb] + m_distortionWeight[COMPONENT_Cr]) / 2.0); }
+
+  Void    setCostMode(CostMode   m )    { m_costMode = m; }
+
+  // Distortion Functions
+  Void    init();
+
+  Void    setDistParam( UInt uiBlkWidth, UInt uiBlkHeight, DFunc eDFunc, DistParam& rcDistParam );
+  Void    setDistParam( TComPattern* pcPatternKey, Pel* piRefY, Int iRefStride,            DistParam& rcDistParam );
+  Void    setDistParam( TComPattern* pcPatternKey, Pel* piRefY, Int iRefStride, Int iStep, DistParam& rcDistParam, Bool bHADME=false );
+  Void    setDistParam( DistParam& rcDP, Int bitDepth, Pel* p1, Int iStride1, Pel* p2, Int iStride2, Int iWidth, Int iHeight, Bool bHadamard = false );
+
+  Distortion calcHAD(Int bitDepth, Pel* pi0, Int iStride0, Pel* pi1, Int iStride1, Int iWidth, Int iHeight );
+
+  // for motion cost
+  UInt    xGetComponentBits( Int iVal );
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  Void    getMotionCost( Bool bSad, Int iAdd, Bool bIsTransquantBypass ) { m_dCost = (bSad ? m_dLambdaMotionSAD[(bIsTransquantBypass && m_costMode==COST_MIXED_LOSSLESS_LOSSY_CODING) ?1:0] + iAdd : m_dLambdaMotionSSE[(bIsTransquantBypass && m_costMode==COST_MIXED_LOSSLESS_LOSSY_CODING)?1:0] + iAdd); }
+#else
+  Void    getMotionCost( Bool bSad, Int iAdd, Bool bIsTransquantBypass ) { m_uiCost = (bSad ? m_uiLambdaMotionSAD[(bIsTransquantBypass && m_costMode==COST_MIXED_LOSSLESS_LOSSY_CODING) ?1:0] + iAdd : m_uiLambdaMotionSSE[(bIsTransquantBypass && m_costMode==COST_MIXED_LOSSLESS_LOSSY_CODING)?1:0] + iAdd); }
+#endif
+  Void    setPredictor( TComMv& rcMv )
+  {
+    m_mvPredictor = rcMv;
+  }
+  Void    setCostScale( Int iCostScale )    { m_iCostScale = iCostScale; }
+  __inline Distortion getCost( Int x, Int y )
+  {
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+    return Distortion((m_dCost * getBits(x, y)) / 65536.0);
+#else
+    return m_uiCost * getBits(x, y) >> 16;
+#endif
+  }
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+  Distortion getCost( UInt b )                 { return Distortion(( m_dCost * b ) / 65536.0); }
+#else
+  Distortion getCost( UInt b )                 { return ( m_uiCost * b ) >> 16; }
+#endif
+  UInt    getBits( Int x, Int y )
+  {
+    return xGetComponentBits((x << m_iCostScale) - m_mvPredictor.getHor())
+    +      xGetComponentBits((y << m_iCostScale) - m_mvPredictor.getVer());
+  }
+
+private:
+
+  static Distortion xGetSSE           ( DistParam* pcDtParam );
+  static Distortion xGetSSE4          ( DistParam* pcDtParam );
+  static Distortion xGetSSE8          ( DistParam* pcDtParam );
+  static Distortion xGetSSE16         ( DistParam* pcDtParam );
+  static Distortion xGetSSE32         ( DistParam* pcDtParam );
+  static Distortion xGetSSE64         ( DistParam* pcDtParam );
+  static Distortion xGetSSE16N        ( DistParam* pcDtParam );
+
+  static Distortion xGetSAD           ( DistParam* pcDtParam );
+  static Distortion xGetSAD4          ( DistParam* pcDtParam );
+  static Distortion xGetSAD8          ( DistParam* pcDtParam );
+  static Distortion xGetSAD16         ( DistParam* pcDtParam );
+  static Distortion xGetSAD32         ( DistParam* pcDtParam );
+  static Distortion xGetSAD64         ( DistParam* pcDtParam );
+  static Distortion xGetSAD16N        ( DistParam* pcDtParam );
+
+#if AMP_SAD
+  static Distortion xGetSAD12         ( DistParam* pcDtParam );
+  static Distortion xGetSAD24         ( DistParam* pcDtParam );
+  static Distortion xGetSAD48         ( DistParam* pcDtParam );
+
+#endif
+
+  static Distortion xGetHADs          ( DistParam* pcDtParam );
+  static Distortion xCalcHADs2x2      ( Pel *piOrg, Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+  static Distortion xCalcHADs4x4      ( Pel *piOrg, Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+  static Distortion xCalcHADs8x8      ( Pel *piOrg, Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+
+
+public:
+
+  Distortion   getDistPart(Int bitDepth, Pel* piCur, Int iCurStride,  Pel* piOrg, Int iOrgStride, UInt uiBlkWidth, UInt uiBlkHeight, const ComponentID compID, DFunc eDFunc = DF_SSE );
+
+};// END CLASS DEFINITION TComRdCost
+
+//! \}
+
+#endif // __TCOMRDCOST__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRdCostWeightPrediction.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,489 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRdCostWeightPrediction.cpp
+    \brief    RD cost computation class with Weighted-Prediction
+*/
+
+#include <math.h>
+#include <assert.h>
+#include "TComRdCost.h"
+#include "TComRdCostWeightPrediction.h"
+
+static Distortion xCalcHADs2x2w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+static Distortion xCalcHADs4x4w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+static Distortion xCalcHADs8x8w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCurr, Int iStrideOrg, Int iStrideCur, Int iStep );
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// SAD
+// --------------------------------------------------------------------------------------------------------------------
+/** get weighted SAD cost
+ * \param pcDtParam
+ * \returns Distortion
+ */
+Distortion TComRdCostWeightPrediction::xGetSADw( DistParam* pcDtParam )
+{
+  const Pel            *piOrg      = pcDtParam->pOrg;
+  const Pel            *piCur      = pcDtParam->pCur;
+  const Int             iCols      = pcDtParam->iCols;
+  const Int             iStrideCur = pcDtParam->iStrideCur;
+  const Int             iStrideOrg = pcDtParam->iStrideOrg;
+  const ComponentID     compID     = pcDtParam->compIdx;
+
+  assert(compID<MAX_NUM_COMPONENT);
+
+  const WPScalingParam &wpCur      = pcDtParam->wpCur[compID];
+
+  const Int             w0         = wpCur.w;
+  const Int             offset     = wpCur.offset;
+  const Int             shift      = wpCur.shift;
+  const Int             round      = wpCur.round;
+
+  Distortion uiSum = 0;
+
+  for(Int iRows = pcDtParam->iRows; iRows != 0; iRows-- )
+  {
+    for (Int n = 0; n < iCols; n++ )
+    {
+      const Pel pred = ( (w0*piCur[n] + round) >> shift ) + offset ;
+
+      uiSum += abs( piOrg[n] - pred );
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  pcDtParam->compIdx = MAX_NUM_COMPONENT;  // reset for DEBUG (assert test)
+
+  return uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// SSE
+// --------------------------------------------------------------------------------------------------------------------
+/** get weighted SSD cost
+ * \param pcDtParam
+ * \returns Distortion
+ */
+Distortion TComRdCostWeightPrediction::xGetSSEw( DistParam* pcDtParam )
+{
+  const Pel            *piOrg           = pcDtParam->pOrg;
+  const Pel            *piCur           = pcDtParam->pCur;
+  const Int             iCols           = pcDtParam->iCols;
+  const Int             iStrideOrg      = pcDtParam->iStrideOrg;
+  const Int             iStrideCur      = pcDtParam->iStrideCur;
+  const ComponentID     compIdx         = pcDtParam->compIdx;
+
+  assert( pcDtParam->iSubShift == 0 ); // NOTE: what is this protecting?
+
+  assert(compIdx<MAX_NUM_COMPONENT);
+  const WPScalingParam &wpCur           = pcDtParam->wpCur[compIdx];
+  const Int             w0              = wpCur.w;
+  const Int             offset          = wpCur.offset;
+  const Int             shift           = wpCur.shift;
+  const Int             round           = wpCur.round;
+  const UInt            distortionShift = DISTORTION_PRECISION_ADJUSTMENT((pcDtParam->bitDepth-8) << 1);
+
+  Distortion sum = 0;
+
+  for(Int iRows = pcDtParam->iRows ; iRows != 0; iRows-- )
+  {
+    for (Int n = 0; n < iCols; n++ )
+    {
+      const Pel pred     = ( (w0*piCur[n] + round) >> shift ) + offset ;
+      const Pel residual = piOrg[n] - pred;
+      sum += ( Distortion(residual) * Distortion(residual) ) >> distortionShift;
+    }
+    piOrg += iStrideOrg;
+    piCur += iStrideCur;
+  }
+
+  pcDtParam->compIdx = MAX_NUM_COMPONENT; // reset for DEBUG (assert test)
+
+  return sum;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// HADAMARD with step (used in fractional search)
+// --------------------------------------------------------------------------------------------------------------------
+/** get weighted Hadamard cost for 2x2 block
+ * \param *piOrg
+ * \param *piCur
+ * \param iStrideOrg
+ * \param iStrideCur
+ * \param iStep
+ * \returns Distortion
+ */
+Distortion xCalcHADs2x2w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  const Int round  = wpCur.round;
+  const Int shift  = wpCur.shift;
+  const Int offset = wpCur.offset;
+  const Int w0     = wpCur.w;
+
+  Distortion satd  = 0;
+  TCoeff     diff[4];
+  TCoeff     m[4];
+
+  Pel   pred;
+
+  pred    = ( (w0*piCur[0*iStep             ] + round) >> shift ) + offset ;
+  diff[0] = piOrg[0             ] - pred;
+  pred    = ( (w0*piCur[1*iStep             ] + round) >> shift ) + offset ;
+  diff[1] = piOrg[1             ] - pred;
+  pred    = ( (w0*piCur[0*iStep + iStrideCur] + round) >> shift ) + offset ;
+  diff[2] = piOrg[iStrideOrg    ] - pred;
+  pred    = ( (w0*piCur[1*iStep + iStrideCur] + round) >> shift ) + offset ;
+  diff[3] = piOrg[iStrideOrg + 1] - pred;
+
+  m[0] = diff[0] + diff[2];
+  m[1] = diff[1] + diff[3];
+  m[2] = diff[0] - diff[2];
+  m[3] = diff[1] - diff[3];
+
+  satd += abs(m[0] + m[1]);
+  satd += abs(m[0] - m[1]);
+  satd += abs(m[2] + m[3]);
+  satd += abs(m[2] - m[3]);
+
+  return satd;
+}
+
+
+/** get weighted Hadamard cost for 4x4 block
+ * \param *piOrg
+ * \param *piCur
+ * \param iStrideOrg
+ * \param iStrideCur
+ * \param iStep
+ * \returns Distortion
+ */
+Distortion xCalcHADs4x4w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  const Int round  = wpCur.round;
+  const Int shift  = wpCur.shift;
+  const Int offset = wpCur.offset;
+  const Int w0     = wpCur.w;
+
+  Distortion satd = 0;
+  TCoeff     diff[16];
+  TCoeff     m[16];
+  TCoeff     d[16];
+
+
+  for(Int k = 0; k < 16; k+=4 )
+  {
+    Pel pred;
+    pred      = ( (w0*piCur[0*iStep] + round) >> shift ) + offset ;
+    diff[k+0] = piOrg[0] - pred;
+    pred      = ( (w0*piCur[1*iStep] + round) >> shift ) + offset ;
+    diff[k+1] = piOrg[1] - pred;
+    pred      = ( (w0*piCur[2*iStep] + round) >> shift ) + offset ;
+    diff[k+2] = piOrg[2] - pred;
+    pred      = ( (w0*piCur[3*iStep] + round) >> shift ) + offset ;
+    diff[k+3] = piOrg[3] - pred;
+
+    piCur += iStrideCur;
+    piOrg += iStrideOrg;
+  }
+
+  /*===== hadamard transform =====*/
+  m[ 0] = diff[ 0] + diff[12];
+  m[ 1] = diff[ 1] + diff[13];
+  m[ 2] = diff[ 2] + diff[14];
+  m[ 3] = diff[ 3] + diff[15];
+  m[ 4] = diff[ 4] + diff[ 8];
+  m[ 5] = diff[ 5] + diff[ 9];
+  m[ 6] = diff[ 6] + diff[10];
+  m[ 7] = diff[ 7] + diff[11];
+  m[ 8] = diff[ 4] - diff[ 8];
+  m[ 9] = diff[ 5] - diff[ 9];
+  m[10] = diff[ 6] - diff[10];
+  m[11] = diff[ 7] - diff[11];
+  m[12] = diff[ 0] - diff[12];
+  m[13] = diff[ 1] - diff[13];
+  m[14] = diff[ 2] - diff[14];
+  m[15] = diff[ 3] - diff[15];
+
+  d[ 0] = m[ 0] + m[ 4];
+  d[ 1] = m[ 1] + m[ 5];
+  d[ 2] = m[ 2] + m[ 6];
+  d[ 3] = m[ 3] + m[ 7];
+  d[ 4] = m[ 8] + m[12];
+  d[ 5] = m[ 9] + m[13];
+  d[ 6] = m[10] + m[14];
+  d[ 7] = m[11] + m[15];
+  d[ 8] = m[ 0] - m[ 4];
+  d[ 9] = m[ 1] - m[ 5];
+  d[10] = m[ 2] - m[ 6];
+  d[11] = m[ 3] - m[ 7];
+  d[12] = m[12] - m[ 8];
+  d[13] = m[13] - m[ 9];
+  d[14] = m[14] - m[10];
+  d[15] = m[15] - m[11];
+
+  m[ 0] = d[ 0] + d[ 3];
+  m[ 1] = d[ 1] + d[ 2];
+  m[ 2] = d[ 1] - d[ 2];
+  m[ 3] = d[ 0] - d[ 3];
+  m[ 4] = d[ 4] + d[ 7];
+  m[ 5] = d[ 5] + d[ 6];
+  m[ 6] = d[ 5] - d[ 6];
+  m[ 7] = d[ 4] - d[ 7];
+  m[ 8] = d[ 8] + d[11];
+  m[ 9] = d[ 9] + d[10];
+  m[10] = d[ 9] - d[10];
+  m[11] = d[ 8] - d[11];
+  m[12] = d[12] + d[15];
+  m[13] = d[13] + d[14];
+  m[14] = d[13] - d[14];
+  m[15] = d[12] - d[15];
+
+  d[ 0] = m[ 0] + m[ 1];
+  d[ 1] = m[ 0] - m[ 1];
+  d[ 2] = m[ 2] + m[ 3];
+  d[ 3] = m[ 3] - m[ 2];
+  d[ 4] = m[ 4] + m[ 5];
+  d[ 5] = m[ 4] - m[ 5];
+  d[ 6] = m[ 6] + m[ 7];
+  d[ 7] = m[ 7] - m[ 6];
+  d[ 8] = m[ 8] + m[ 9];
+  d[ 9] = m[ 8] - m[ 9];
+  d[10] = m[10] + m[11];
+  d[11] = m[11] - m[10];
+  d[12] = m[12] + m[13];
+  d[13] = m[12] - m[13];
+  d[14] = m[14] + m[15];
+  d[15] = m[15] - m[14];
+
+  for (Int k=0; k<16; ++k)
+  {
+    satd += abs(d[k]);
+  }
+  satd = ((satd+1)>>1);
+
+  return satd;
+}
+
+
+/** get weighted Hadamard cost for 8x8 block
+ * \param *piOrg
+ * \param *piCur
+ * \param iStrideOrg
+ * \param iStrideCur
+ * \param iStep
+ * \returns Distortion
+ */
+Distortion xCalcHADs8x8w( const WPScalingParam &wpCur, const Pel *piOrg, const Pel *piCur, Int iStrideOrg, Int iStrideCur, Int iStep )
+{
+  Distortion sad=0;
+  TCoeff diff[64], m1[8][8], m2[8][8], m3[8][8];
+  Int iStep2 = iStep<<1;
+  Int iStep3 = iStep2 + iStep;
+  Int iStep4 = iStep3 + iStep;
+  Int iStep5 = iStep4 + iStep;
+  Int iStep6 = iStep5 + iStep;
+  Int iStep7 = iStep6 + iStep;
+  const Int round  = wpCur.round;
+  const Int shift  = wpCur.shift;
+  const Int offset = wpCur.offset;
+  const Int w0     = wpCur.w;
+
+  Pel   pred;
+
+  for(Int k = 0; k < 64; k+=8 )
+  {
+    pred      = ( (w0*piCur[     0] + round) >> shift ) + offset ;
+    diff[k+0] = piOrg[0] - pred;
+    pred      = ( (w0*piCur[iStep ] + round) >> shift ) + offset ;
+    diff[k+1] = piOrg[1] - pred;
+    pred      = ( (w0*piCur[iStep2] + round) >> shift ) + offset ;
+    diff[k+2] = piOrg[2] - pred;
+    pred      = ( (w0*piCur[iStep3] + round) >> shift ) + offset ;
+    diff[k+3] = piOrg[3] - pred;
+    pred      = ( (w0*piCur[iStep4] + round) >> shift ) + offset ;
+    diff[k+4] = piOrg[4] - pred;
+    pred      = ( (w0*piCur[iStep5] + round) >> shift ) + offset ;
+    diff[k+5] = piOrg[5] - pred;
+    pred      = ( (w0*piCur[iStep6] + round) >> shift ) + offset ;
+    diff[k+6] = piOrg[6] - pred;
+    pred      = ( (w0*piCur[iStep7] + round) >> shift ) + offset ;
+    diff[k+7] = piOrg[7] - pred;
+
+    piCur += iStrideCur;
+    piOrg += iStrideOrg;
+  }
+
+  //horizontal
+  for (Int j=0; j < 8; j++)
+  {
+    const Int jj = j << 3;
+    m2[j][0] = diff[jj  ] + diff[jj+4];
+    m2[j][1] = diff[jj+1] + diff[jj+5];
+    m2[j][2] = diff[jj+2] + diff[jj+6];
+    m2[j][3] = diff[jj+3] + diff[jj+7];
+    m2[j][4] = diff[jj  ] - diff[jj+4];
+    m2[j][5] = diff[jj+1] - diff[jj+5];
+    m2[j][6] = diff[jj+2] - diff[jj+6];
+    m2[j][7] = diff[jj+3] - diff[jj+7];
+
+    m1[j][0] = m2[j][0] + m2[j][2];
+    m1[j][1] = m2[j][1] + m2[j][3];
+    m1[j][2] = m2[j][0] - m2[j][2];
+    m1[j][3] = m2[j][1] - m2[j][3];
+    m1[j][4] = m2[j][4] + m2[j][6];
+    m1[j][5] = m2[j][5] + m2[j][7];
+    m1[j][6] = m2[j][4] - m2[j][6];
+    m1[j][7] = m2[j][5] - m2[j][7];
+
+    m2[j][0] = m1[j][0] + m1[j][1];
+    m2[j][1] = m1[j][0] - m1[j][1];
+    m2[j][2] = m1[j][2] + m1[j][3];
+    m2[j][3] = m1[j][2] - m1[j][3];
+    m2[j][4] = m1[j][4] + m1[j][5];
+    m2[j][5] = m1[j][4] - m1[j][5];
+    m2[j][6] = m1[j][6] + m1[j][7];
+    m2[j][7] = m1[j][6] - m1[j][7];
+  }
+
+  //vertical
+  for (Int i=0; i < 8; i++)
+  {
+    m3[0][i] = m2[0][i] + m2[4][i];
+    m3[1][i] = m2[1][i] + m2[5][i];
+    m3[2][i] = m2[2][i] + m2[6][i];
+    m3[3][i] = m2[3][i] + m2[7][i];
+    m3[4][i] = m2[0][i] - m2[4][i];
+    m3[5][i] = m2[1][i] - m2[5][i];
+    m3[6][i] = m2[2][i] - m2[6][i];
+    m3[7][i] = m2[3][i] - m2[7][i];
+
+    m1[0][i] = m3[0][i] + m3[2][i];
+    m1[1][i] = m3[1][i] + m3[3][i];
+    m1[2][i] = m3[0][i] - m3[2][i];
+    m1[3][i] = m3[1][i] - m3[3][i];
+    m1[4][i] = m3[4][i] + m3[6][i];
+    m1[5][i] = m3[5][i] + m3[7][i];
+    m1[6][i] = m3[4][i] - m3[6][i];
+    m1[7][i] = m3[5][i] - m3[7][i];
+
+    m2[0][i] = m1[0][i] + m1[1][i];
+    m2[1][i] = m1[0][i] - m1[1][i];
+    m2[2][i] = m1[2][i] + m1[3][i];
+    m2[3][i] = m1[2][i] - m1[3][i];
+    m2[4][i] = m1[4][i] + m1[5][i];
+    m2[5][i] = m1[4][i] - m1[5][i];
+    m2[6][i] = m1[6][i] + m1[7][i];
+    m2[7][i] = m1[6][i] - m1[7][i];
+  }
+
+  for (Int j=0; j < 8; j++)
+  {
+    for (Int i=0; i < 8; i++)
+    {
+      sad += (abs(m2[j][i]));
+    }
+  }
+
+  sad=((sad+2)>>2);
+
+  return sad;
+}
+
+
+/** get weighted Hadamard cost
+ * \param *pcDtParam
+ * \returns Distortion
+ */
+Distortion TComRdCostWeightPrediction::xGetHADsw( DistParam* pcDtParam )
+{
+  const Pel        *piOrg      = pcDtParam->pOrg;
+  const Pel        *piCur      = pcDtParam->pCur;
+  const Int         iRows      = pcDtParam->iRows;
+  const Int         iCols      = pcDtParam->iCols;
+  const Int         iStrideCur = pcDtParam->iStrideCur;
+  const Int         iStrideOrg = pcDtParam->iStrideOrg;
+  const Int         iStep      = pcDtParam->iStep;
+  const ComponentID compIdx    = pcDtParam->compIdx;
+  assert(compIdx<MAX_NUM_COMPONENT);
+  const WPScalingParam  wpCur    = pcDtParam->wpCur[compIdx];
+
+  Distortion uiSum = 0;
+
+  if( ( iRows % 8 == 0) && (iCols % 8 == 0) )
+  {
+    const Int iOffsetOrg = iStrideOrg<<3;
+    const Int iOffsetCur = iStrideCur<<3;
+    for (Int y=0; y<iRows; y+= 8 )
+    {
+      for (Int x=0; x<iCols; x+= 8 )
+      {
+        uiSum += xCalcHADs8x8w( wpCur, &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iOffsetOrg;
+      piCur += iOffsetCur;
+    }
+  }
+  else if( ( iRows % 4 == 0) && (iCols % 4 == 0) )
+  {
+    const Int iOffsetOrg = iStrideOrg<<2;
+    const Int iOffsetCur = iStrideCur<<2;
+
+    for (Int y=0; y<iRows; y+= 4 )
+    {
+      for (Int x=0; x<iCols; x+= 4 )
+      {
+        uiSum += xCalcHADs4x4w( wpCur, &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iOffsetOrg;
+      piCur += iOffsetCur;
+    }
+  }
+  else
+  {
+    for (Int y=0; y<iRows; y+=2 )
+    {
+      for (Int x=0; x<iCols; x+=2 )
+      {
+        uiSum += xCalcHADs2x2w( wpCur, &piOrg[x], &piCur[x*iStep], iStrideOrg, iStrideCur, iStep );
+      }
+      piOrg += iStrideOrg;
+      piCur += iStrideCur;
+    }
+  }
+
+  return uiSum >> DISTORTION_PRECISION_ADJUSTMENT(pcDtParam->bitDepth-8);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRdCostWeightPrediction.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,64 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRdCostWeightPrediction.h
+    \brief    RD cost computation namespace (header)
+*/
+
+#ifndef __TCOMRDCOSTWEIGHTPREDICTION__
+#define __TCOMRDCOSTWEIGHTPREDICTION__
+
+
+#include "CommonDef.h"
+#include "TComPattern.h"
+#include "TComMv.h"
+#include "TComRdCost.h"
+#include "TComSlice.h"
+
+class DistParam;
+class TComPattern;
+
+// ====================================================================================================================
+// Namespace definition
+// ====================================================================================================================
+
+/// RD cost computation namespace, with Weighted Prediction
+namespace TComRdCostWeightPrediction
+{
+  Distortion xGetSSEw ( DistParam* pcDtParam );
+  Distortion xGetSADw ( DistParam* pcDtParam );
+  Distortion xGetHADsw( DistParam* pcDtParam );
+}// END NAMESPACE DEFINITION TComRdCostWeightPrediction
+
+#endif // __TCOMRDCOSTWEIGHTPREDICTION__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRectangle.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,50 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef __TCOMRECTANGLE__
+#define __TCOMRECTANGLE__
+
+
+struct TComRectangle
+{
+  UInt width;
+  UInt height;
+  UInt x0;
+  UInt y0;
+};
+
+
+
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRom.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,675 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRom.cpp
+    \brief    global variables & functions
+*/
+
+#include "TComRom.h"
+#include <memory.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <iomanip>
+#include <assert.h>
+#include "TComDataCU.h"
+#include "Debug.h"
+// ====================================================================================================================
+// Initialize / destroy functions
+// ====================================================================================================================
+
+//! \ingroup TLibCommon
+//! \{
+
+class ScanGenerator
+{
+private:
+  UInt m_line, m_column;
+  const UInt m_blockWidth, m_blockHeight;
+  const UInt m_stride;
+  const COEFF_SCAN_TYPE m_scanType;
+
+public:
+  ScanGenerator(UInt blockWidth, UInt blockHeight, UInt stride, COEFF_SCAN_TYPE scanType)
+    : m_line(0), m_column(0), m_blockWidth(blockWidth), m_blockHeight(blockHeight), m_stride(stride), m_scanType(scanType)
+  { }
+
+  UInt GetCurrentX() const { return m_column; }
+  UInt GetCurrentY() const { return m_line; }
+
+  UInt GetNextIndex(UInt blockOffsetX, UInt blockOffsetY)
+  {
+    Int rtn=((m_line + blockOffsetY) * m_stride) + m_column + blockOffsetX;
+
+    //advance line and column to the next position
+    switch (m_scanType)
+    {
+      //------------------------------------------------
+
+      case SCAN_DIAG:
+        {
+          if ((m_column == (m_blockWidth - 1)) || (m_line == 0)) //if we reach the end of a rank, go diagonally down to the next one
+          {
+            m_line   += m_column + 1;
+            m_column  = 0;
+
+            if (m_line >= m_blockHeight) //if that takes us outside the block, adjust so that we are back on the bottom row
+            {
+              m_column += m_line - (m_blockHeight - 1);
+              m_line    = m_blockHeight - 1;
+            }
+          }
+          else
+          {
+            m_column++;
+            m_line--;
+          }
+        }
+        break;
+
+      //------------------------------------------------
+
+      case SCAN_HOR:
+        {
+          if (m_column == (m_blockWidth - 1))
+          {
+            m_line++;
+            m_column = 0;
+          }
+          else m_column++;
+        }
+        break;
+
+      //------------------------------------------------
+
+      case SCAN_VER:
+        {
+          if (m_line == (m_blockHeight - 1))
+          {
+            m_column++;
+            m_line = 0;
+          }
+          else m_line++;
+        }
+        break;
+
+      //------------------------------------------------
+
+      default:
+        {
+          std::cerr << "ERROR: Unknown scan type \"" << m_scanType << "\"in ScanGenerator::GetNextIndex" << std::endl;
+          exit(1);
+        }
+        break;
+    }
+
+    return rtn;
+  }
+};
+
+// initialize ROM variables
+Void initROM()
+{
+  Int i, c;
+
+  // g_aucConvertToBit[ x ]: log2(x/4), if x=4 -> 0, x=8 -> 1, x=16 -> 2, ...
+  ::memset( g_aucConvertToBit,   -1, sizeof( g_aucConvertToBit ) );
+  c=0;
+  for ( i=4; i<=MAX_CU_SIZE; i*=2 )
+  {
+    g_aucConvertToBit[ i ] = c;
+    c++;
+  }
+
+  // initialise scan orders
+  for(UInt log2BlockHeight = 0; log2BlockHeight < MAX_CU_DEPTH; log2BlockHeight++)
+  {
+    for(UInt log2BlockWidth = 0; log2BlockWidth < MAX_CU_DEPTH; log2BlockWidth++)
+    {
+      const UInt blockWidth  = 1 << log2BlockWidth;
+      const UInt blockHeight = 1 << log2BlockHeight;
+      const UInt totalValues = blockWidth * blockHeight;
+
+      //--------------------------------------------------------------------------------------------------
+
+      //non-grouped scan orders
+
+      for (UInt scanTypeIndex = 0; scanTypeIndex < SCAN_NUMBER_OF_TYPES; scanTypeIndex++)
+      {
+        const COEFF_SCAN_TYPE scanType = COEFF_SCAN_TYPE(scanTypeIndex);
+
+        g_scanOrder[SCAN_UNGROUPED][scanType][log2BlockWidth][log2BlockHeight] = new UInt[totalValues];
+
+        ScanGenerator fullBlockScan(blockWidth, blockHeight, blockWidth, scanType);
+
+        for (UInt scanPosition = 0; scanPosition < totalValues; scanPosition++)
+        {
+          g_scanOrder[SCAN_UNGROUPED][scanType][log2BlockWidth][log2BlockHeight][scanPosition] = fullBlockScan.GetNextIndex(0, 0);
+        }
+      }
+
+      //--------------------------------------------------------------------------------------------------
+
+      //grouped scan orders
+
+      const UInt  groupWidth           = 1           << MLS_CG_LOG2_WIDTH;
+      const UInt  groupHeight          = 1           << MLS_CG_LOG2_HEIGHT;
+      const UInt  widthInGroups        = blockWidth  >> MLS_CG_LOG2_WIDTH;
+      const UInt  heightInGroups       = blockHeight >> MLS_CG_LOG2_HEIGHT;
+
+      const UInt  groupSize            = groupWidth    * groupHeight;
+      const UInt  totalGroups          = widthInGroups * heightInGroups;
+
+      for (UInt scanTypeIndex = 0; scanTypeIndex < SCAN_NUMBER_OF_TYPES; scanTypeIndex++)
+      {
+        const COEFF_SCAN_TYPE scanType = COEFF_SCAN_TYPE(scanTypeIndex);
+
+        g_scanOrder[SCAN_GROUPED_4x4][scanType][log2BlockWidth][log2BlockHeight] = new UInt[totalValues];
+
+        ScanGenerator fullBlockScan(widthInGroups, heightInGroups, groupWidth, scanType);
+
+        for (UInt groupIndex = 0; groupIndex < totalGroups; groupIndex++)
+        {
+          const UInt groupPositionY  = fullBlockScan.GetCurrentY();
+          const UInt groupPositionX  = fullBlockScan.GetCurrentX();
+          const UInt groupOffsetX    = groupPositionX * groupWidth;
+          const UInt groupOffsetY    = groupPositionY * groupHeight;
+          const UInt groupOffsetScan = groupIndex     * groupSize;
+
+          ScanGenerator groupScan(groupWidth, groupHeight, blockWidth, scanType);
+
+          for (UInt scanPosition = 0; scanPosition < groupSize; scanPosition++)
+          {
+            g_scanOrder[SCAN_GROUPED_4x4][scanType][log2BlockWidth][log2BlockHeight][groupOffsetScan + scanPosition] = groupScan.GetNextIndex(groupOffsetX, groupOffsetY);
+          }
+
+          fullBlockScan.GetNextIndex(0,0);
+        }
+      }
+
+      //--------------------------------------------------------------------------------------------------
+    }
+  }
+}
+
+Void destroyROM()
+{
+  for(UInt groupTypeIndex = 0; groupTypeIndex < SCAN_NUMBER_OF_GROUP_TYPES; groupTypeIndex++)
+  {
+    for (UInt scanOrderIndex = 0; scanOrderIndex < SCAN_NUMBER_OF_TYPES; scanOrderIndex++)
+    {
+      for (UInt log2BlockWidth = 0; log2BlockWidth < MAX_CU_DEPTH; log2BlockWidth++)
+      {
+        for (UInt log2BlockHeight = 0; log2BlockHeight < MAX_CU_DEPTH; log2BlockHeight++)
+        {
+          delete [] g_scanOrder[groupTypeIndex][scanOrderIndex][log2BlockWidth][log2BlockHeight];
+        }
+      }
+    }
+  }
+}
+
+// ====================================================================================================================
+// Data structure related table & variable
+// ====================================================================================================================
+
+UInt g_uiMaxCUWidth  = MAX_CU_SIZE;
+UInt g_uiMaxCUHeight = MAX_CU_SIZE;
+UInt g_uiMaxCUDepth  = MAX_CU_DEPTH;
+UInt g_uiAddCUDepth  = 0;
+UInt g_auiZscanToRaster [ MAX_NUM_SPU_W*MAX_NUM_SPU_W ] = { 0, };
+UInt g_auiRasterToZscan [ MAX_NUM_SPU_W*MAX_NUM_SPU_W ] = { 0, };
+UInt g_auiRasterToPelX  [ MAX_NUM_SPU_W*MAX_NUM_SPU_W ] = { 0, };
+UInt g_auiRasterToPelY  [ MAX_NUM_SPU_W*MAX_NUM_SPU_W ] = { 0, };
+
+UInt g_auiPUOffset[NUMBER_OF_PART_SIZES] = { 0, 8, 4, 4, 2, 10, 1, 5};
+
+Void initZscanToRaster ( Int iMaxDepth, Int iDepth, UInt uiStartVal, UInt*& rpuiCurrIdx )
+{
+  Int iStride = 1 << ( iMaxDepth - 1 );
+
+  if ( iDepth == iMaxDepth )
+  {
+    rpuiCurrIdx[0] = uiStartVal;
+    rpuiCurrIdx++;
+  }
+  else
+  {
+    Int iStep = iStride >> iDepth;
+    initZscanToRaster( iMaxDepth, iDepth+1, uiStartVal,                     rpuiCurrIdx );
+    initZscanToRaster( iMaxDepth, iDepth+1, uiStartVal+iStep,               rpuiCurrIdx );
+    initZscanToRaster( iMaxDepth, iDepth+1, uiStartVal+iStep*iStride,       rpuiCurrIdx );
+    initZscanToRaster( iMaxDepth, iDepth+1, uiStartVal+iStep*iStride+iStep, rpuiCurrIdx );
+  }
+}
+
+Void initRasterToZscan ( UInt uiMaxCUWidth, UInt uiMaxCUHeight, UInt uiMaxDepth )
+{
+  UInt  uiMinCUWidth  = uiMaxCUWidth  >> ( uiMaxDepth - 1 );
+  UInt  uiMinCUHeight = uiMaxCUHeight >> ( uiMaxDepth - 1 );
+
+  UInt  uiNumPartInWidth  = (UInt)uiMaxCUWidth  / uiMinCUWidth;
+  UInt  uiNumPartInHeight = (UInt)uiMaxCUHeight / uiMinCUHeight;
+
+  for ( UInt i = 0; i < uiNumPartInWidth*uiNumPartInHeight; i++ )
+  {
+    g_auiRasterToZscan[ g_auiZscanToRaster[i] ] = i;
+  }
+}
+
+Void initRasterToPelXY ( UInt uiMaxCUWidth, UInt uiMaxCUHeight, UInt uiMaxDepth )
+{
+  UInt    i;
+
+  UInt* uiTempX = &g_auiRasterToPelX[0];
+  UInt* uiTempY = &g_auiRasterToPelY[0];
+
+  UInt  uiMinCUWidth  = uiMaxCUWidth  >> ( uiMaxDepth - 1 );
+  UInt  uiMinCUHeight = uiMaxCUHeight >> ( uiMaxDepth - 1 );
+
+  UInt  uiNumPartInWidth  = uiMaxCUWidth  / uiMinCUWidth;
+  UInt  uiNumPartInHeight = uiMaxCUHeight / uiMinCUHeight;
+
+  uiTempX[0] = 0; uiTempX++;
+  for ( i = 1; i < uiNumPartInWidth; i++ )
+  {
+    uiTempX[0] = uiTempX[-1] + uiMinCUWidth; uiTempX++;
+  }
+  for ( i = 1; i < uiNumPartInHeight; i++ )
+  {
+    memcpy(uiTempX, uiTempX-uiNumPartInWidth, sizeof(UInt)*uiNumPartInWidth);
+    uiTempX += uiNumPartInWidth;
+  }
+
+  for ( i = 1; i < uiNumPartInWidth*uiNumPartInHeight; i++ )
+  {
+    uiTempY[i] = ( i / uiNumPartInWidth ) * uiMinCUWidth;
+  }
+}
+
+Int g_maxTrDynamicRange[MAX_NUM_CHANNEL_TYPE];
+
+Int g_quantScales[SCALING_LIST_REM_NUM] =
+{
+  26214,23302,20560,18396,16384,14564
+};
+
+Int g_invQuantScales[SCALING_LIST_REM_NUM] =
+{
+  40,45,51,57,64,72
+};
+
+//--------------------------------------------------------------------------------------------------
+
+//structures
+
+#define DEFINE_DST4x4_MATRIX(a,b,c,d) \
+{ \
+  {  a,  b,  c,  d }, \
+  {  c,  c,  0, -c }, \
+  {  d, -a, -c,  b }, \
+  {  b, -d,  c, -a }, \
+}
+
+#define DEFINE_DCT4x4_MATRIX(a,b,c) \
+{ \
+  { a,  a,  a,  a}, \
+  { b,  c, -c, -b}, \
+  { a, -a, -a,  a}, \
+  { c, -b,  b, -c}  \
+}
+
+#define DEFINE_DCT8x8_MATRIX(a,b,c,d,e,f,g) \
+{ \
+  { a,  a,  a,  a,  a,  a,  a,  a}, \
+  { d,  e,  f,  g, -g, -f, -e, -d}, \
+  { b,  c, -c, -b, -b, -c,  c,  b}, \
+  { e, -g, -d, -f,  f,  d,  g, -e}, \
+  { a, -a, -a,  a,  a, -a, -a,  a}, \
+  { f, -d,  g,  e, -e, -g,  d, -f}, \
+  { c, -b,  b, -c, -c,  b, -b,  c}, \
+  { g, -f,  e, -d,  d, -e,  f, -g}  \
+}
+
+#define DEFINE_DCT16x16_MATRIX(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o) \
+{ \
+  { a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a}, \
+  { h,  i,  j,  k,  l,  m,  n,  o, -o, -n, -m, -l, -k, -j, -i, -h}, \
+  { d,  e,  f,  g, -g, -f, -e, -d, -d, -e, -f, -g,  g,  f,  e,  d}, \
+  { i,  l,  o, -m, -j, -h, -k, -n,  n,  k,  h,  j,  m, -o, -l, -i}, \
+  { b,  c, -c, -b, -b, -c,  c,  b,  b,  c, -c, -b, -b, -c,  c,  b}, \
+  { j,  o, -k, -i, -n,  l,  h,  m, -m, -h, -l,  n,  i,  k, -o, -j}, \
+  { e, -g, -d, -f,  f,  d,  g, -e, -e,  g,  d,  f, -f, -d, -g,  e}, \
+  { k, -m, -i,  o,  h,  n, -j, -l,  l,  j, -n, -h, -o,  i,  m, -k}, \
+  { a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a}, \
+  { l, -j, -n,  h, -o, -i,  m,  k, -k, -m,  i,  o, -h,  n,  j, -l}, \
+  { f, -d,  g,  e, -e, -g,  d, -f, -f,  d, -g, -e,  e,  g, -d,  f}, \
+  { m, -h,  l,  n, -i,  k,  o, -j,  j, -o, -k,  i, -n, -l,  h, -m}, \
+  { c, -b,  b, -c, -c,  b, -b,  c,  c, -b,  b, -c, -c,  b, -b,  c}, \
+  { n, -k,  h, -j,  m,  o, -l,  i, -i,  l, -o, -m,  j, -h,  k, -n}, \
+  { g, -f,  e, -d,  d, -e,  f, -g, -g,  f, -e,  d, -d,  e, -f,  g}, \
+  { o, -n,  m, -l,  k, -j,  i, -h,  h, -i,  j, -k,  l, -m,  n, -o}  \
+}
+
+#define DEFINE_DCT32x32_MATRIX(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E) \
+{ \
+  { a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a,  a}, \
+  { p,  q,  r,  s,  t,  u,  v,  w,  x,  y,  z,  A,  B,  C,  D,  E, -E, -D, -C, -B, -A, -z, -y, -x, -w, -v, -u, -t, -s, -r, -q, -p}, \
+  { h,  i,  j,  k,  l,  m,  n,  o, -o, -n, -m, -l, -k, -j, -i, -h, -h, -i, -j, -k, -l, -m, -n, -o,  o,  n,  m,  l,  k,  j,  i,  h}, \
+  { q,  t,  w,  z,  C, -E, -B, -y, -v, -s, -p, -r, -u, -x, -A, -D,  D,  A,  x,  u,  r,  p,  s,  v,  y,  B,  E, -C, -z, -w, -t, -q}, \
+  { d,  e,  f,  g, -g, -f, -e, -d, -d, -e, -f, -g,  g,  f,  e,  d,  d,  e,  f,  g, -g, -f, -e, -d, -d, -e, -f, -g,  g,  f,  e,  d}, \
+  { r,  w,  B, -D, -y, -t, -p, -u, -z, -E,  A,  v,  q,  s,  x,  C, -C, -x, -s, -q, -v, -A,  E,  z,  u,  p,  t,  y,  D, -B, -w, -r}, \
+  { i,  l,  o, -m, -j, -h, -k, -n,  n,  k,  h,  j,  m, -o, -l, -i, -i, -l, -o,  m,  j,  h,  k,  n, -n, -k, -h, -j, -m,  o,  l,  i}, \
+  { s,  z, -D, -w, -p, -v, -C,  A,  t,  r,  y, -E, -x, -q, -u, -B,  B,  u,  q,  x,  E, -y, -r, -t, -A,  C,  v,  p,  w,  D, -z, -s}, \
+  { b,  c, -c, -b, -b, -c,  c,  b,  b,  c, -c, -b, -b, -c,  c,  b,  b,  c, -c, -b, -b, -c,  c,  b,  b,  c, -c, -b, -b, -c,  c,  b}, \
+  { t,  C, -y, -p, -x,  D,  u,  s,  B, -z, -q, -w,  E,  v,  r,  A, -A, -r, -v, -E,  w,  q,  z, -B, -s, -u, -D,  x,  p,  y, -C, -t}, \
+  { j,  o, -k, -i, -n,  l,  h,  m, -m, -h, -l,  n,  i,  k, -o, -j, -j, -o,  k,  i,  n, -l, -h, -m,  m,  h,  l, -n, -i, -k,  o,  j}, \
+  { u, -E, -t, -v,  D,  s,  w, -C, -r, -x,  B,  q,  y, -A, -p, -z,  z,  p,  A, -y, -q, -B,  x,  r,  C, -w, -s, -D,  v,  t,  E, -u}, \
+  { e, -g, -d, -f,  f,  d,  g, -e, -e,  g,  d,  f, -f, -d, -g,  e,  e, -g, -d, -f,  f,  d,  g, -e, -e,  g,  d,  f, -f, -d, -g,  e}, \
+  { v, -B, -p, -C,  u,  w, -A, -q, -D,  t,  x, -z, -r, -E,  s,  y, -y, -s,  E,  r,  z, -x, -t,  D,  q,  A, -w, -u,  C,  p,  B, -v}, \
+  { k, -m, -i,  o,  h,  n, -j, -l,  l,  j, -n, -h, -o,  i,  m, -k, -k,  m,  i, -o, -h, -n,  j,  l, -l, -j,  n,  h,  o, -i, -m,  k}, \
+  { w, -y, -u,  A,  s, -C, -q,  E,  p,  D, -r, -B,  t,  z, -v, -x,  x,  v, -z, -t,  B,  r, -D, -p, -E,  q,  C, -s, -A,  u,  y, -w}, \
+  { a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a,  a, -a, -a,  a}, \
+  { x, -v, -z,  t,  B, -r, -D,  p, -E, -q,  C,  s, -A, -u,  y,  w, -w, -y,  u,  A, -s, -C,  q,  E, -p,  D,  r, -B, -t,  z,  v, -x}, \
+  { l, -j, -n,  h, -o, -i,  m,  k, -k, -m,  i,  o, -h,  n,  j, -l, -l,  j,  n, -h,  o,  i, -m, -k,  k,  m, -i, -o,  h, -n, -j,  l}, \
+  { y, -s, -E,  r, -z, -x,  t,  D, -q,  A,  w, -u, -C,  p, -B, -v,  v,  B, -p,  C,  u, -w, -A,  q, -D, -t,  x,  z, -r,  E,  s, -y}, \
+  { f, -d,  g,  e, -e, -g,  d, -f, -f,  d, -g, -e,  e,  g, -d,  f,  f, -d,  g,  e, -e, -g,  d, -f, -f,  d, -g, -e,  e,  g, -d,  f}, \
+  { z, -p,  A,  y, -q,  B,  x, -r,  C,  w, -s,  D,  v, -t,  E,  u, -u, -E,  t, -v, -D,  s, -w, -C,  r, -x, -B,  q, -y, -A,  p, -z}, \
+  { m, -h,  l,  n, -i,  k,  o, -j,  j, -o, -k,  i, -n, -l,  h, -m, -m,  h, -l, -n,  i, -k, -o,  j, -j,  o,  k, -i,  n,  l, -h,  m}, \
+  { A, -r,  v, -E, -w,  q, -z, -B,  s, -u,  D,  x, -p,  y,  C, -t,  t, -C, -y,  p, -x, -D,  u, -s,  B,  z, -q,  w,  E, -v,  r, -A}, \
+  { c, -b,  b, -c, -c,  b, -b,  c,  c, -b,  b, -c, -c,  b, -b,  c,  c, -b,  b, -c, -c,  b, -b,  c,  c, -b,  b, -c, -c,  b, -b,  c}, \
+  { B, -u,  q, -x,  E,  y, -r,  t, -A, -C,  v, -p,  w, -D, -z,  s, -s,  z,  D, -w,  p, -v,  C,  A, -t,  r, -y, -E,  x, -q,  u, -B}, \
+  { n, -k,  h, -j,  m,  o, -l,  i, -i,  l, -o, -m,  j, -h,  k, -n, -n,  k, -h,  j, -m, -o,  l, -i,  i, -l,  o,  m, -j,  h, -k,  n}, \
+  { C, -x,  s, -q,  v, -A, -E,  z, -u,  p, -t,  y, -D, -B,  w, -r,  r, -w,  B,  D, -y,  t, -p,  u, -z,  E,  A, -v,  q, -s,  x, -C}, \
+  { g, -f,  e, -d,  d, -e,  f, -g, -g,  f, -e,  d, -d,  e, -f,  g,  g, -f,  e, -d,  d, -e,  f, -g, -g,  f, -e,  d, -d,  e, -f,  g}, \
+  { D, -A,  x, -u,  r, -p,  s, -v,  y, -B,  E,  C, -z,  w, -t,  q, -q,  t, -w,  z, -C, -E,  B, -y,  v, -s,  p, -r,  u, -x,  A, -D}, \
+  { o, -n,  m, -l,  k, -j,  i, -h,  h, -i,  j, -k,  l, -m,  n, -o, -o,  n, -m,  l, -k,  j, -i,  h, -h,  i, -j,  k, -l,  m, -n,  o}, \
+  { E, -D,  C, -B,  A, -z,  y, -x,  w, -v,  u, -t,  s, -r,  q, -p,  p, -q,  r, -s,  t, -u,  v, -w,  x, -y,  z, -A,  B, -C,  D, -E}  \
+}
+
+//--------------------------------------------------------------------------------------------------
+
+//coefficients
+
+#if RExt__HIGH_PRECISION_FORWARD_TRANSFORM
+const TMatrixCoeff g_aiT4 [TRANSFORM_NUMBER_OF_DIRECTIONS][4][4]   =
+{
+  DEFINE_DCT4x4_MATRIX  (16384, 21266,  9224),
+  DEFINE_DCT4x4_MATRIX  (   64,    83,    36)
+};
+
+const TMatrixCoeff g_aiT8 [TRANSFORM_NUMBER_OF_DIRECTIONS][8][8]   =
+{
+  DEFINE_DCT8x8_MATRIX  (16384, 21266,  9224, 22813, 19244, 12769,  4563),
+  DEFINE_DCT8x8_MATRIX  (   64,    83,    36,    89,    75,    50,    18)
+};
+
+const TMatrixCoeff g_aiT16[TRANSFORM_NUMBER_OF_DIRECTIONS][16][16] =
+{
+  DEFINE_DCT16x16_MATRIX(16384, 21266,  9224, 22813, 19244, 12769,  4563, 23120, 22063, 20450, 17972, 14642, 11109,  6446,  2316),
+  DEFINE_DCT16x16_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9)
+};
+
+const TMatrixCoeff g_aiT32[TRANSFORM_NUMBER_OF_DIRECTIONS][32][32] =
+{
+  DEFINE_DCT32x32_MATRIX(16384, 21266,  9224, 22813, 19244, 12769,  4563, 23120, 22063, 20450, 17972, 14642, 11109,  6446,  2316, 23106, 22852, 22445, 21848, 20995, 19810, 18601, 17143, 15718, 13853, 11749,  9846,  7908,  5573,  3281,   946),
+  DEFINE_DCT32x32_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9,    90,    90,    88,    85,    82,    78,    73,    67,    61,    54,    46,    38,    31,    22,    13,     4)
+};
+
+const TMatrixCoeff g_as_DST_MAT_4[TRANSFORM_NUMBER_OF_DIRECTIONS][4][4] =
+{
+  DEFINE_DST4x4_MATRIX( 7424, 14081, 18893, 21505),
+  DEFINE_DST4x4_MATRIX(   29,    55,    74,    84)
+};
+
+#else
+
+const TMatrixCoeff g_aiT4 [TRANSFORM_NUMBER_OF_DIRECTIONS][4][4]   =
+{
+  DEFINE_DCT4x4_MATRIX  (   64,    83,    36),
+  DEFINE_DCT4x4_MATRIX  (   64,    83,    36)
+};
+
+const TMatrixCoeff g_aiT8 [TRANSFORM_NUMBER_OF_DIRECTIONS][8][8]   =
+{
+  DEFINE_DCT8x8_MATRIX  (   64,    83,    36,    89,    75,    50,    18),
+  DEFINE_DCT8x8_MATRIX  (   64,    83,    36,    89,    75,    50,    18)
+};
+
+const TMatrixCoeff g_aiT16[TRANSFORM_NUMBER_OF_DIRECTIONS][16][16] =
+{
+  DEFINE_DCT16x16_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9),
+  DEFINE_DCT16x16_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9)
+};
+
+const TMatrixCoeff g_aiT32[TRANSFORM_NUMBER_OF_DIRECTIONS][32][32] =
+{
+  DEFINE_DCT32x32_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9,    90,    90,    88,    85,    82,    78,    73,    67,    61,    54,    46,    38,    31,    22,    13,     4),
+  DEFINE_DCT32x32_MATRIX(   64,    83,    36,    89,    75,    50,    18,    90,    87,    80,    70,    57,    43,    25,     9,    90,    90,    88,    85,    82,    78,    73,    67,    61,    54,    46,    38,    31,    22,    13,     4)
+};
+
+const TMatrixCoeff g_as_DST_MAT_4[TRANSFORM_NUMBER_OF_DIRECTIONS][4][4] =
+{
+  DEFINE_DST4x4_MATRIX(   29,    55,    74,    84),
+  DEFINE_DST4x4_MATRIX(   29,    55,    74,    84)
+};
+#endif
+
+
+//--------------------------------------------------------------------------------------------------
+
+#undef DEFINE_DST4x4_MATRIX
+#undef DEFINE_DCT4x4_MATRIX
+#undef DEFINE_DCT8x8_MATRIX
+#undef DEFINE_DCT16x16_MATRIX
+#undef DEFINE_DCT32x32_MATRIX
+
+//--------------------------------------------------------------------------------------------------
+
+
+const UChar g_aucChromaScale[NUM_CHROMA_FORMAT][chromaQPMappingTableSize]=
+{
+  //0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,29,30,31,32,33,33,34,34,35,35,36,36,37,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51 },
+  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,51,51,51,51,51,51 },
+  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,51,51,51,51,51,51 }
+};
+
+// ====================================================================================================================
+// ADI
+// ====================================================================================================================
+
+#if FAST_UDI_USE_MPM
+const UChar g_aucIntraModeNumFast[MAX_CU_DEPTH] =
+{
+  3,  //   2x2
+  8,  //   4x4
+  8,  //   8x8
+  3,  //  16x16
+  3,  //  32x32
+  3   //  64x64
+};
+#else // FAST_UDI_USE_MPM
+const UChar g_aucIntraModeNumFast[MAX_CU_DEPTH] =
+{
+  3,  //   2x2
+  9,  //   4x4
+  9,  //   8x8
+  4,  //  16x16   33
+  4,  //  32x32   33
+  5   //  64x64   33
+};
+#endif // FAST_UDI_USE_MPM
+
+const UChar g_chroma422IntraAngleMappingTable[NUM_INTRA_MODE] =
+  //0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, DM
+  { 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31, DM_CHROMA_IDX};
+
+// ====================================================================================================================
+// Bit-depth
+// ====================================================================================================================
+
+Int g_bitDepth   [MAX_NUM_CHANNEL_TYPE] = {8, 8};
+#if O0043_BEST_EFFORT_DECODING
+Int g_bitDepthInStream   [MAX_NUM_CHANNEL_TYPE] = {8, 8}; // In the encoder, this is the same as g_bitDepth. In the decoder, this can vary from g_bitDepth if the decoder is forced to use 'best-effort decoding' at a particular bit-depth.
+#endif
+Int g_PCMBitDepth[MAX_NUM_CHANNEL_TYPE] = {8, 8};    // PCM bit-depth
+
+// ====================================================================================================================
+// Misc.
+// ====================================================================================================================
+
+Char  g_aucConvertToBit  [ MAX_CU_SIZE+1 ];
+
+#if ENC_DEC_TRACE
+FILE*  g_hTrace = NULL; // Set to NULL to open up a file. Set to stdout to use the current output
+const Bool g_bEncDecTraceEnable  = true;
+const Bool g_bEncDecTraceDisable = false;
+Bool   g_HLSTraceEnable = true;
+Bool   g_bJustDoIt = false;
+UInt64 g_nSymbolCounter = 0;
+#endif
+// ====================================================================================================================
+// Scanning order & context model mapping
+// ====================================================================================================================
+
+// scanning order table
+UInt* g_scanOrder[SCAN_NUMBER_OF_GROUP_TYPES][SCAN_NUMBER_OF_TYPES][ MAX_CU_DEPTH ][ MAX_CU_DEPTH ];
+
+const UInt ctxIndMap4x4[4*4] =
+{
+  0, 1, 4, 5,
+  2, 3, 4, 5,
+  6, 6, 8, 8,
+  7, 7, 8, 8
+};
+
+const UInt g_uiMinInGroup[ LAST_SIGNIFICANT_GROUPS ] = {0,1,2,3,4,6,8,12,16,24};
+const UInt g_uiGroupIdx[ MAX_TU_SIZE ]   = {0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9};
+
+const Char *MatrixType[SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM] =
+{
+  {
+    "INTRA4X4_LUMA",
+    "INTRA4X4_CHROMAU",
+    "INTRA4X4_CHROMAV",
+    "INTER4X4_LUMA",
+    "INTER4X4_CHROMAU",
+    "INTER4X4_CHROMAV"
+  },
+  {
+    "INTRA8X8_LUMA",
+    "INTRA8X8_CHROMAU",
+    "INTRA8X8_CHROMAV",
+    "INTER8X8_LUMA",
+    "INTER8X8_CHROMAU",
+    "INTER8X8_CHROMAV"
+  },
+  {
+    "INTRA16X16_LUMA",
+    "INTRA16X16_CHROMAU",
+    "INTRA16X16_CHROMAV",
+    "INTER16X16_LUMA",
+    "INTER16X16_CHROMAU",
+    "INTER16X16_CHROMAV"
+  },
+  {
+   "INTRA32X32_LUMA",
+   "INTRA32X32_CHROMAU_FROM16x16_CHROMAU",
+   "INTRA32X32_CHROMAV_FROM16x16_CHROMAV",
+   "INTER32X32_LUMA",
+   "INTER32X32_CHROMAU_FROM16x16_CHROMAU",
+   "INTER32X32_CHROMAV_FROM16x16_CHROMAV"
+  },
+};
+
+const Char *MatrixType_DC[SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM] =
+{
+  {
+  },
+  {
+  },
+  {
+    "INTRA16X16_LUMA_DC",
+    "INTRA16X16_CHROMAU_DC",
+    "INTRA16X16_CHROMAV_DC",
+    "INTER16X16_LUMA_DC",
+    "INTER16X16_CHROMAU_DC",
+    "INTER16X16_CHROMAV_DC"
+  },
+  {
+    "INTRA32X32_LUMA_DC",
+    "INTRA32X32_CHROMAU_DC_FROM16x16_CHROMAU",
+    "INTRA32X32_CHROMAV_DC_FROM16x16_CHROMAV",
+    "INTER32X32_LUMA_DC",
+    "INTER32X32_CHROMAU_DC_FROM16x16_CHROMAU",
+    "INTER32X32_CHROMAV_DC_FROM16x16_CHROMAV"
+  },
+};
+
+Int g_quantTSDefault4x4[4*4] =
+{
+  16,16,16,16,
+  16,16,16,16,
+  16,16,16,16,
+  16,16,16,16
+};
+
+Int g_quantIntraDefault8x8[8*8] =
+{
+  16,16,16,16,17,18,21,24,
+  16,16,16,16,17,19,22,25,
+  16,16,17,18,20,22,25,29,
+  16,16,18,21,24,27,31,36,
+  17,17,20,24,30,35,41,47,
+  18,19,22,27,35,44,54,65,
+  21,22,25,31,41,54,70,88,
+  24,25,29,36,47,65,88,115
+};
+
+Int g_quantInterDefault8x8[8*8] =
+{
+  16,16,16,16,17,18,20,24,
+  16,16,16,17,18,20,24,25,
+  16,16,17,18,20,24,25,28,
+  16,17,18,20,24,25,28,33,
+  17,18,20,24,25,28,33,41,
+  18,20,24,25,28,33,41,54,
+  20,24,25,28,33,41,54,71,
+  24,25,28,33,41,54,71,91
+};
+
+UInt g_scalingListSize   [SCALING_LIST_SIZE_NUM] = {16,64,256,1024};
+UInt g_scalingListSizeX  [SCALING_LIST_SIZE_NUM] = { 4, 8, 16,  32};
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComRom.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,237 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComRom.h
+    \brief    global variables & functions (header)
+*/
+
+#ifndef __TCOMROM__
+#define __TCOMROM__
+
+#include "CommonDef.h"
+
+#include<stdio.h>
+#include<iostream>
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Macros
+// ====================================================================================================================
+
+#define     MAX_CU_DEPTH             6                          // log2(CTUSize)
+#define     MAX_CU_SIZE             (1<<(MAX_CU_DEPTH))         // maximum allowable size of CU, surely 64? (not 1<<7 = 128)
+#define     MIN_PU_SIZE              4
+#define     MIN_TU_SIZE              4
+#define     MAX_TU_SIZE             32
+#define     MAX_NUM_SPU_W           (MAX_CU_SIZE/MIN_PU_SIZE)   // maximum number of SPU in horizontal line
+
+#define     SCALING_LIST_REM_NUM     6
+
+// ====================================================================================================================
+// Initialize / destroy functions
+// ====================================================================================================================
+
+Void         initROM();
+Void         destroyROM();
+
+// ====================================================================================================================
+// Data structure related table & variable
+// ====================================================================================================================
+
+// flexible conversion from relative to absolute index
+extern       UInt   g_auiZscanToRaster[ MAX_NUM_SPU_W*MAX_NUM_SPU_W ];
+extern       UInt   g_auiRasterToZscan[ MAX_NUM_SPU_W*MAX_NUM_SPU_W ];
+extern       UInt*  g_scanOrder[SCAN_NUMBER_OF_GROUP_TYPES][SCAN_NUMBER_OF_TYPES][ MAX_CU_DEPTH ][ MAX_CU_DEPTH ];
+
+Void         initZscanToRaster ( Int iMaxDepth, Int iDepth, UInt uiStartVal, UInt*& rpuiCurrIdx );
+Void         initRasterToZscan ( UInt uiMaxCUWidth, UInt uiMaxCUHeight, UInt uiMaxDepth         );
+
+// conversion of partition index to picture pel position
+extern       UInt   g_auiRasterToPelX[ MAX_NUM_SPU_W*MAX_NUM_SPU_W ];
+extern       UInt   g_auiRasterToPelY[ MAX_NUM_SPU_W*MAX_NUM_SPU_W ];
+
+Void         initRasterToPelXY ( UInt uiMaxCUWidth, UInt uiMaxCUHeight, UInt uiMaxDepth );
+
+// global variable (CTU width/height, max. CU depth)
+extern       UInt g_uiMaxCUWidth;
+extern       UInt g_uiMaxCUHeight;
+extern       UInt g_uiMaxCUDepth;
+extern       UInt g_uiAddCUDepth;
+
+extern       UInt g_auiPUOffset[NUMBER_OF_PART_SIZES];
+
+#define QUANT_SHIFT                14 // Q(4) = 2^14
+#define IQUANT_SHIFT                6
+#define SCALE_BITS                 15 // Inherited from TMuC, pressumably for fractional bit estimates in RDOQ
+
+extern Int g_maxTrDynamicRange[MAX_NUM_CHANNEL_TYPE];
+
+#define SQRT2                      11585
+#define SQRT2_SHIFT                13
+#define INVSQRT2                   11585
+#define INVSQRT2_SHIFT             14
+#define ADDITIONAL_MULTIPLIER_BITS 14
+
+#define SHIFT_INV_1ST               7 // Shift after first inverse transform stage
+#define SHIFT_INV_2ND              12 // Shift after second inverse transform stage
+
+extern Int g_quantScales[SCALING_LIST_REM_NUM];             // Q(QP%6)
+extern Int g_invQuantScales[SCALING_LIST_REM_NUM];          // IQ(QP%6)
+
+#if RExt__HIGH_PRECISION_FORWARD_TRANSFORM
+static const Int g_transformMatrixShift[TRANSFORM_NUMBER_OF_DIRECTIONS] = { 14, 6 };
+#else
+static const Int g_transformMatrixShift[TRANSFORM_NUMBER_OF_DIRECTIONS] = {  6, 6 };
+#endif
+
+extern const TMatrixCoeff g_aiT4 [TRANSFORM_NUMBER_OF_DIRECTIONS][4][4];
+extern const TMatrixCoeff g_aiT8 [TRANSFORM_NUMBER_OF_DIRECTIONS][8][8];
+extern const TMatrixCoeff g_aiT16[TRANSFORM_NUMBER_OF_DIRECTIONS][16][16];
+extern const TMatrixCoeff g_aiT32[TRANSFORM_NUMBER_OF_DIRECTIONS][32][32];
+
+// ====================================================================================================================
+// Luma QP to Chroma QP mapping
+// ====================================================================================================================
+
+static const Int chromaQPMappingTableSize = 58;
+
+extern const UChar  g_aucChromaScale[NUM_CHROMA_FORMAT][chromaQPMappingTableSize];
+
+// ====================================================================================================================
+// Entropy Coding
+// ====================================================================================================================
+
+#define CONTEXT_STATE_BITS             6
+#define LAST_SIGNIFICANT_GROUPS       10
+
+// ====================================================================================================================
+// Scanning order & context mapping table
+// ====================================================================================================================
+
+extern const UInt   ctxIndMap4x4[4*4];
+
+extern const UInt   g_uiGroupIdx[ MAX_TU_SIZE ];
+extern const UInt   g_uiMinInGroup[ LAST_SIGNIFICANT_GROUPS ];
+
+// ====================================================================================================================
+// ADI table
+// ====================================================================================================================
+
+extern const UChar  g_aucIntraModeNumFast[MAX_CU_DEPTH];
+
+extern const UChar  g_chroma422IntraAngleMappingTable[NUM_INTRA_MODE];
+
+// ====================================================================================================================
+// Bit-depth
+// ====================================================================================================================
+
+extern        Int g_bitDepth   [MAX_NUM_CHANNEL_TYPE];
+extern        Int g_PCMBitDepth[MAX_NUM_CHANNEL_TYPE];
+#if O0043_BEST_EFFORT_DECODING
+extern        Int g_bitDepthInStream   [MAX_NUM_CHANNEL_TYPE]; // In the encoder, this is the same as g_bitDepth. In the decoder, this can vary from g_bitDepth if the decoder is forced to use 'best-effort decoding' at a particular bit-depth.
+#endif
+
+// ====================================================================================================================
+// Mode-Dependent DST Matrices
+// ====================================================================================================================
+
+extern const TMatrixCoeff g_as_DST_MAT_4 [TRANSFORM_NUMBER_OF_DIRECTIONS][4][4];
+
+// ====================================================================================================================
+// Misc.
+// ====================================================================================================================
+
+extern       Char   g_aucConvertToBit  [ MAX_CU_SIZE+1 ];   // from width to log2(width)-2
+
+#ifndef ENC_DEC_TRACE
+#define ENC_DEC_TRACE 0
+#endif
+
+
+#if ENC_DEC_TRACE
+extern FILE*  g_hTrace;
+extern Bool   g_bJustDoIt;
+extern const Bool g_bEncDecTraceEnable;
+extern const Bool g_bEncDecTraceDisable;
+extern Bool   g_HLSTraceEnable;
+extern UInt64 g_nSymbolCounter;
+
+#define COUNTER_START    1
+#define COUNTER_END      0 //( UInt64(1) << 63 )
+
+#define DTRACE_CABAC_F(x)     if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "%f", x );
+#define DTRACE_CABAC_V(x)     if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "%d", x );
+#define DTRACE_CABAC_VL(x)    if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "%lld", x );
+#define DTRACE_CABAC_T(x)     if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "%s", x );
+#define DTRACE_CABAC_X(x)     if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "%x", x );
+#define DTRACE_CABAC_R( x,y ) if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, x,    y );
+#define DTRACE_CABAC_N        if ( ( g_nSymbolCounter >= COUNTER_START && g_nSymbolCounter <= COUNTER_END )|| g_bJustDoIt ) fprintf( g_hTrace, "\n"    );
+
+#else
+
+#define DTRACE_CABAC_F(x)
+#define DTRACE_CABAC_V(x)
+#define DTRACE_CABAC_VL(x)
+#define DTRACE_CABAC_T(x)
+#define DTRACE_CABAC_X(x)
+#define DTRACE_CABAC_R( x,y )
+#define DTRACE_CABAC_N
+
+#endif
+
+
+#define SCALING_LIST_NUM (MAX_NUM_COMPONENT * NUMBER_OF_PREDICTION_MODES) ///< list number for quantization matrix
+
+#define SCALING_LIST_START_VALUE 8                                        ///< start value for dpcm mode
+#define MAX_MATRIX_COEF_NUM 64                                            ///< max coefficient number for quantization matrix
+#define MAX_MATRIX_SIZE_NUM 8                                             ///< max size number for quantization matrix
+#define SCALING_LIST_BITS 8                                               ///< bit depth of scaling list entries
+#define LOG2_SCALING_LIST_NEUTRAL_VALUE 4                                 ///< log2 of the value that, when used in a scaling list, has no effect on quantisation
+#define SCALING_LIST_DC 16                                                ///< default DC value
+
+extern const Char *MatrixType[SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM];
+extern const Char *MatrixType_DC[SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM];
+
+extern Int g_quantTSDefault4x4[4*4];
+extern Int g_quantIntraDefault8x8[8*8];
+extern Int g_quantInterDefault8x8[8*8];
+
+extern UInt g_scalingListSize [SCALING_LIST_SIZE_NUM];
+extern UInt g_scalingListSizeX[SCALING_LIST_SIZE_NUM];
+extern UInt g_scalingListNum  [SCALING_LIST_SIZE_NUM];
+//! \}
+
+#endif  //__TCOMROM__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComSampleAdaptiveOffset.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,724 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComSampleAdaptiveOffset.cpp
+    \brief    sample adaptive offset class
+*/
+
+#include "TComSampleAdaptiveOffset.h"
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+
+//! \ingroup TLibCommon
+//! \{
+
+UInt g_saoMaxOffsetQVal[MAX_NUM_COMPONENT];
+
+SAOOffset::SAOOffset()
+{
+  reset();
+}
+
+SAOOffset::~SAOOffset()
+{
+
+}
+
+Void SAOOffset::reset()
+{
+  modeIdc = SAO_MODE_OFF;
+  typeIdc = -1;
+  typeAuxInfo = -1;
+  ::memset(offset, 0, sizeof(Int)* MAX_NUM_SAO_CLASSES);
+}
+
+const SAOOffset& SAOOffset::operator= (const SAOOffset& src)
+{
+  modeIdc = src.modeIdc;
+  typeIdc = src.typeIdc;
+  typeAuxInfo = src.typeAuxInfo;
+  ::memcpy(offset, src.offset, sizeof(Int)* MAX_NUM_SAO_CLASSES);
+
+  return *this;
+}
+
+
+SAOBlkParam::SAOBlkParam()
+{
+  reset();
+}
+
+SAOBlkParam::~SAOBlkParam()
+{
+
+}
+
+Void SAOBlkParam::reset()
+{
+  for(Int compIdx = 0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+  {
+    offsetParam[compIdx].reset();
+  }
+}
+
+const SAOBlkParam& SAOBlkParam::operator= (const SAOBlkParam& src)
+{
+  for(Int compIdx = 0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+  {
+    offsetParam[compIdx] = src.offsetParam[compIdx];
+  }
+  return *this;
+
+}
+
+TComSampleAdaptiveOffset::TComSampleAdaptiveOffset()
+{
+  m_tempPicYuv = NULL;
+  m_lineBufWidth = 0;
+  m_signLineBuf1 = NULL;
+  m_signLineBuf2 = NULL;
+}
+
+
+TComSampleAdaptiveOffset::~TComSampleAdaptiveOffset()
+{
+  destroy();
+
+  if (m_signLineBuf1) delete[] m_signLineBuf1; m_signLineBuf1 = NULL;
+  if (m_signLineBuf2) delete[] m_signLineBuf2; m_signLineBuf2 = NULL;
+}
+
+Void TComSampleAdaptiveOffset::create( Int picWidth, Int picHeight, ChromaFormat format, UInt maxCUWidth, UInt maxCUHeight, UInt maxCUDepth, UInt lumaBitShift, UInt chromaBitShift )
+{
+  destroy();
+
+  m_picWidth        = picWidth;
+  m_picHeight       = picHeight;
+  m_chromaFormatIDC = format;
+  m_maxCUWidth      = maxCUWidth;
+  m_maxCUHeight     = maxCUHeight;
+
+  m_numCTUInWidth   = (m_picWidth/m_maxCUWidth) + ((m_picWidth % m_maxCUWidth)?1:0);
+  m_numCTUInHeight  = (m_picHeight/m_maxCUHeight) + ((m_picHeight % m_maxCUHeight)?1:0);
+  m_numCTUsPic      = m_numCTUInHeight*m_numCTUInWidth;
+
+  //temporary picture buffer
+  if ( !m_tempPicYuv )
+  {
+    m_tempPicYuv = new TComPicYuv;
+    m_tempPicYuv->create( m_picWidth, m_picHeight, m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, maxCUDepth );
+  }
+
+  //bit-depth related
+  for(Int compIdx = 0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+  {
+    Int bitDepthSample = g_bitDepth[toChannelType(ComponentID(compIdx))];
+    m_offsetStepLog2  [compIdx] = isLuma(ComponentID(compIdx))? lumaBitShift : chromaBitShift;
+    g_saoMaxOffsetQVal[compIdx] = (1<<(min(bitDepthSample,MAX_SAO_TRUNCATED_BITDEPTH)-5))-1; //Table 9-32, inclusive
+  }
+}
+
+Void TComSampleAdaptiveOffset::destroy()
+{
+  if ( m_tempPicYuv )
+  {
+    m_tempPicYuv->destroy();
+    delete m_tempPicYuv;
+    m_tempPicYuv = NULL;
+  }
+}
+
+Void TComSampleAdaptiveOffset::invertQuantOffsets(ComponentID compIdx, Int typeIdc, Int typeAuxInfo, Int* dstOffsets, Int* srcOffsets)
+{
+  Int codedOffset[MAX_NUM_SAO_CLASSES];
+
+  ::memcpy(codedOffset, srcOffsets, sizeof(Int)*MAX_NUM_SAO_CLASSES);
+  ::memset(dstOffsets, 0, sizeof(Int)*MAX_NUM_SAO_CLASSES);
+
+  if(typeIdc == SAO_TYPE_START_BO)
+  {
+    for(Int i=0; i< 4; i++)
+    {
+      dstOffsets[(typeAuxInfo+ i)%NUM_SAO_BO_CLASSES] = codedOffset[(typeAuxInfo+ i)%NUM_SAO_BO_CLASSES]*(1<<m_offsetStepLog2[compIdx]);
+    }
+  }
+  else //EO
+  {
+    for(Int i=0; i< NUM_SAO_EO_CLASSES; i++)
+    {
+      dstOffsets[i] = codedOffset[i] *(1<<m_offsetStepLog2[compIdx]);
+    }
+    assert(dstOffsets[SAO_CLASS_EO_PLAIN] == 0); //keep EO plain offset as zero
+  }
+
+}
+
+Int TComSampleAdaptiveOffset::getMergeList(TComPic* pic, Int ctuRsAddr, SAOBlkParam* blkParams, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES])
+{
+  Int ctuX = ctuRsAddr % m_numCTUInWidth;
+  Int ctuY = ctuRsAddr / m_numCTUInWidth;
+  Int mergedCTUPos;
+  Int numValidMergeCandidates = 0;
+
+  for(Int mergeType=0; mergeType< NUM_SAO_MERGE_TYPES; mergeType++)
+  {
+    SAOBlkParam* mergeCandidate = NULL;
+
+    switch(mergeType)
+    {
+    case SAO_MERGE_ABOVE:
+      {
+        if(ctuY > 0)
+        {
+          mergedCTUPos = ctuRsAddr- m_numCTUInWidth;
+          if( pic->getSAOMergeAvailability(ctuRsAddr, mergedCTUPos) )
+          {
+            mergeCandidate = &(blkParams[mergedCTUPos]);
+          }
+        }
+      }
+      break;
+    case SAO_MERGE_LEFT:
+      {
+        if(ctuX > 0)
+        {
+          mergedCTUPos = ctuRsAddr- 1;
+          if( pic->getSAOMergeAvailability(ctuRsAddr, mergedCTUPos) )
+          {
+            mergeCandidate = &(blkParams[mergedCTUPos]);
+          }
+        }
+      }
+      break;
+    default:
+      {
+        printf("not a supported merge type");
+        assert(0);
+        exit(-1);
+      }
+    }
+
+    mergeList[mergeType]=mergeCandidate;
+    if (mergeCandidate != NULL)
+    {
+      numValidMergeCandidates++;
+    }
+  }
+
+  return numValidMergeCandidates;
+}
+
+
+Void TComSampleAdaptiveOffset::reconstructBlkSAOParam(SAOBlkParam& recParam, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES])
+{
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+  for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    const ComponentID component = ComponentID(compIdx);
+    SAOOffset& offsetParam = recParam[component];
+
+    if(offsetParam.modeIdc == SAO_MODE_OFF)
+    {
+      continue;
+    }
+
+    switch(offsetParam.modeIdc)
+    {
+    case SAO_MODE_NEW:
+      {
+        invertQuantOffsets(component, offsetParam.typeIdc, offsetParam.typeAuxInfo, offsetParam.offset, offsetParam.offset);
+      }
+      break;
+    case SAO_MODE_MERGE:
+      {
+        SAOBlkParam* mergeTarget = mergeList[offsetParam.typeIdc];
+        assert(mergeTarget != NULL);
+
+        offsetParam = (*mergeTarget)[component];
+      }
+      break;
+    default:
+      {
+        printf("Not a supported mode");
+        assert(0);
+        exit(-1);
+      }
+    }
+  }
+}
+
+Void TComSampleAdaptiveOffset::reconstructBlkSAOParams(TComPic* pic, SAOBlkParam* saoBlkParams)
+{
+  for(Int compIdx = 0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+  {
+    m_picSAOEnabled[compIdx] = false;
+  }
+
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+
+  for(Int ctuRsAddr=0; ctuRsAddr< m_numCTUsPic; ctuRsAddr++)
+  {
+    SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES] = { NULL };
+    getMergeList(pic, ctuRsAddr, saoBlkParams, mergeList);
+
+    reconstructBlkSAOParam(saoBlkParams[ctuRsAddr], mergeList);
+
+    for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+    {
+      if(saoBlkParams[ctuRsAddr][compIdx].modeIdc != SAO_MODE_OFF)
+      {
+        m_picSAOEnabled[compIdx] = true;
+      }
+    }
+  }
+}
+
+
+Void TComSampleAdaptiveOffset::offsetBlock(ComponentID compIdx, Int typeIdx, Int* offset
+                                          , Pel* srcBlk, Pel* resBlk, Int srcStride, Int resStride,  Int width, Int height
+                                          , Bool isLeftAvail,  Bool isRightAvail, Bool isAboveAvail, Bool isBelowAvail, Bool isAboveLeftAvail, Bool isAboveRightAvail, Bool isBelowLeftAvail, Bool isBelowRightAvail)
+{
+  if(m_lineBufWidth != m_maxCUWidth)
+  {
+    m_lineBufWidth = m_maxCUWidth;
+
+    if (m_signLineBuf1) delete[] m_signLineBuf1; m_signLineBuf1 = NULL;
+    m_signLineBuf1 = new Char[m_lineBufWidth+1];
+
+    if (m_signLineBuf2) delete[] m_signLineBuf2; m_signLineBuf2 = NULL;
+    m_signLineBuf2 = new Char[m_lineBufWidth+1];
+  }
+
+  const Int maxSampleValueIncl = (1<< g_bitDepth[toChannelType(compIdx)] )-1;
+
+  Int x,y, startX, startY, endX, endY, edgeType;
+  Int firstLineStartX, firstLineEndX, lastLineStartX, lastLineEndX;
+  Char signLeft, signRight, signDown;
+
+  Pel* srcLine = srcBlk;
+  Pel* resLine = resBlk;
+
+  switch(typeIdx)
+  {
+  case SAO_TYPE_EO_0:
+    {
+      offset += 2;
+      startX = isLeftAvail ? 0 : 1;
+      endX   = isRightAvail ? width : (width -1);
+      for (y=0; y< height; y++)
+      {
+        signLeft = (Char)sgn(srcLine[startX] - srcLine[startX-1]);
+        for (x=startX; x< endX; x++)
+        {
+          signRight = (Char)sgn(srcLine[x] - srcLine[x+1]); 
+          edgeType =  signRight + signLeft;
+          signLeft  = -signRight;
+
+          resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+        }
+        srcLine  += srcStride;
+        resLine += resStride;
+      }
+
+    }
+    break;
+  case SAO_TYPE_EO_90:
+    {
+      offset += 2;
+      Char *signUpLine = m_signLineBuf1;
+
+      startY = isAboveAvail ? 0 : 1;
+      endY   = isBelowAvail ? height : height-1;
+      if (!isAboveAvail)
+      {
+        srcLine += srcStride;
+        resLine += resStride;
+      }
+
+      Pel* srcLineAbove= srcLine- srcStride;
+      for (x=0; x< width; x++)
+      {
+        signUpLine[x] = (Char)sgn(srcLine[x] - srcLineAbove[x]);
+      }
+
+      Pel* srcLineBelow;
+      for (y=startY; y<endY; y++)
+      {
+        srcLineBelow= srcLine+ srcStride;
+
+        for (x=0; x< width; x++)
+        {
+          signDown  = (Char)sgn(srcLine[x] - srcLineBelow[x]);
+          edgeType = signDown + signUpLine[x];
+          signUpLine[x]= -signDown;
+
+          resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+        }
+        srcLine += srcStride;
+        resLine += resStride;
+      }
+
+    }
+    break;
+  case SAO_TYPE_EO_135:
+    {
+      offset += 2;
+      Char *signUpLine, *signDownLine, *signTmpLine;
+
+      signUpLine  = m_signLineBuf1;
+      signDownLine= m_signLineBuf2;
+
+      startX = isLeftAvail ? 0 : 1 ;
+      endX   = isRightAvail ? width : (width-1);
+
+      //prepare 2nd line's upper sign
+      Pel* srcLineBelow= srcLine+ srcStride;
+      for (x=startX; x< endX+1; x++)
+      {
+        signUpLine[x] = (Char)sgn(srcLineBelow[x] - srcLine[x- 1]);
+      }
+
+      //1st line
+      Pel* srcLineAbove= srcLine- srcStride;
+      firstLineStartX = isAboveLeftAvail ? 0 : 1;
+      firstLineEndX   = isAboveAvail? endX: 1;
+      for(x= firstLineStartX; x< firstLineEndX; x++)
+      {
+        edgeType  =  sgn(srcLine[x] - srcLineAbove[x- 1]) - signUpLine[x+1];
+
+        resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+      }
+      srcLine  += srcStride;
+      resLine  += resStride;
+
+
+      //middle lines
+      for (y= 1; y< height-1; y++)
+      {
+        srcLineBelow= srcLine+ srcStride;
+
+        for (x=startX; x<endX; x++)
+        {
+          signDown =  (Char)sgn(srcLine[x] - srcLineBelow[x+ 1]);
+          edgeType =  signDown + signUpLine[x];
+          resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+
+          signDownLine[x+1] = -signDown;
+        }
+        signDownLine[startX] = (Char)sgn(srcLineBelow[startX] - srcLine[startX-1]);
+
+        signTmpLine  = signUpLine;
+        signUpLine   = signDownLine;
+        signDownLine = signTmpLine;
+
+        srcLine += srcStride;
+        resLine += resStride;
+      }
+
+      //last line
+      srcLineBelow= srcLine+ srcStride;
+      lastLineStartX = isBelowAvail ? startX : (width -1);
+      lastLineEndX   = isBelowRightAvail ? width : (width -1);
+      for(x= lastLineStartX; x< lastLineEndX; x++)
+      {
+        edgeType =  sgn(srcLine[x] - srcLineBelow[x+ 1]) + signUpLine[x];
+        resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+
+      }
+    }
+    break;
+  case SAO_TYPE_EO_45:
+    {
+      offset += 2;
+      Char *signUpLine = m_signLineBuf1+1;
+
+      startX = isLeftAvail ? 0 : 1;
+      endX   = isRightAvail ? width : (width -1);
+
+      //prepare 2nd line upper sign
+      Pel* srcLineBelow= srcLine+ srcStride;
+      for (x=startX-1; x< endX; x++)
+      {
+        signUpLine[x] = (Char)sgn(srcLineBelow[x] - srcLine[x+1]);
+      }
+
+
+      //first line
+      Pel* srcLineAbove= srcLine- srcStride;
+      firstLineStartX = isAboveAvail ? startX : (width -1 );
+      firstLineEndX   = isAboveRightAvail ? width : (width-1);
+      for(x= firstLineStartX; x< firstLineEndX; x++)
+      {
+        edgeType = sgn(srcLine[x] - srcLineAbove[x+1]) -signUpLine[x-1];
+        resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+      }
+      srcLine += srcStride;
+      resLine += resStride;
+
+      //middle lines
+      for (y= 1; y< height-1; y++)
+      {
+        srcLineBelow= srcLine+ srcStride;
+
+        for(x= startX; x< endX; x++)
+        {
+          signDown =  (Char)sgn(srcLine[x] - srcLineBelow[x-1]);
+          edgeType =  signDown + signUpLine[x];
+          resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+          signUpLine[x-1] = -signDown;
+        }
+        signUpLine[endX-1] = (Char)sgn(srcLineBelow[endX-1] - srcLine[endX]);
+        srcLine  += srcStride;
+        resLine += resStride;
+      }
+
+      //last line
+      srcLineBelow= srcLine+ srcStride;
+      lastLineStartX = isBelowLeftAvail ? 0 : 1;
+      lastLineEndX   = isBelowAvail ? endX : 1;
+      for(x= lastLineStartX; x< lastLineEndX; x++)
+      {
+        edgeType = sgn(srcLine[x] - srcLineBelow[x-1]) + signUpLine[x];
+        resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[edgeType]);
+
+      }
+    }
+    break;
+  case SAO_TYPE_BO:
+    {
+      const Int shiftBits = g_bitDepth[toChannelType(compIdx)] - NUM_SAO_BO_CLASSES_LOG2;
+      for (y=0; y< height; y++)
+      {
+        for (x=0; x< width; x++)
+        {
+          resLine[x] = Clip3<Int>(0, maxSampleValueIncl, srcLine[x] + offset[srcLine[x] >> shiftBits] );
+        }
+        srcLine += srcStride;
+        resLine += resStride;
+      }
+    }
+    break;
+  default:
+    {
+      printf("Not a supported SAO types\n");
+      assert(0);
+      exit(-1);
+    }
+  }
+}
+
+Void TComSampleAdaptiveOffset::offsetCTU(Int ctuRsAddr, TComPicYuv* srcYuv, TComPicYuv* resYuv, SAOBlkParam& saoblkParam, TComPic* pPic)
+{
+  Bool isLeftAvail,isRightAvail,isAboveAvail,isBelowAvail,isAboveLeftAvail,isAboveRightAvail,isBelowLeftAvail,isBelowRightAvail;
+
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+  Bool bAllOff=true;
+  for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    if (saoblkParam[compIdx].modeIdc != SAO_MODE_OFF) bAllOff=false;
+  }
+  if (bAllOff) return;
+
+  //block boundary availability
+  pPic->getPicSym()->deriveLoopFilterBoundaryAvailibility(ctuRsAddr, isLeftAvail,isRightAvail,isAboveAvail,isBelowAvail,isAboveLeftAvail,isAboveRightAvail,isBelowLeftAvail,isBelowRightAvail);
+
+  Int yPos   = (ctuRsAddr / m_numCTUInWidth)*m_maxCUHeight;
+  Int xPos   = (ctuRsAddr % m_numCTUInWidth)*m_maxCUWidth;
+  Int height = (yPos + m_maxCUHeight > m_picHeight)?(m_picHeight- yPos):m_maxCUHeight;
+  Int width  = (xPos + m_maxCUWidth  > m_picWidth )?(m_picWidth - xPos):m_maxCUWidth;
+
+  for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    const ComponentID component = ComponentID(compIdx);
+    SAOOffset& ctbOffset = saoblkParam[compIdx];
+
+    if(ctbOffset.modeIdc != SAO_MODE_OFF)
+    {
+      const UInt componentScaleX = getComponentScaleX(component, pPic->getChromaFormat());
+      const UInt componentScaleY = getComponentScaleY(component, pPic->getChromaFormat());
+
+      Int  blkWidth   = (width  >> componentScaleX);
+      Int  blkHeight  = (height >> componentScaleY);
+      Int  blkXPos    = (xPos   >> componentScaleX);
+      Int  blkYPos    = (yPos   >> componentScaleY);
+
+      Int  srcStride  = srcYuv->getStride(component);
+      Pel* srcBlk     = srcYuv->getAddr(component) + blkYPos*srcStride + blkXPos;
+
+      Int  resStride  = resYuv->getStride(component);
+      Pel* resBlk     = resYuv->getAddr(component) + blkYPos*resStride + blkXPos;
+
+      offsetBlock( component, ctbOffset.typeIdc, ctbOffset.offset
+                  , srcBlk, resBlk, srcStride, resStride, blkWidth, blkHeight
+                  , isLeftAvail, isRightAvail
+                  , isAboveAvail, isBelowAvail
+                  , isAboveLeftAvail, isAboveRightAvail
+                  , isBelowLeftAvail, isBelowRightAvail
+                  );
+    }
+  } //compIdx
+
+}
+
+
+Void TComSampleAdaptiveOffset::SAOProcess(TComPic* pDecPic)
+{
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+  Bool bAllDisabled=true;
+  for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    if (m_picSAOEnabled[compIdx]) bAllDisabled=false;
+  }
+  if (bAllDisabled) return;
+
+  TComPicYuv* resYuv = pDecPic->getPicYuvRec();
+  TComPicYuv* srcYuv = m_tempPicYuv;
+  resYuv->copyToPic(srcYuv);
+  for(Int ctuRsAddr= 0; ctuRsAddr < m_numCTUsPic; ctuRsAddr++)
+  {
+    offsetCTU(ctuRsAddr, srcYuv, resYuv, (pDecPic->getPicSym()->getSAOBlkParam())[ctuRsAddr], pDecPic);
+  } //ctu
+}
+
+
+/** PCM LF disable process.
+ * \param pcPic picture (TComPic) pointer
+ * \returns Void
+ *
+ * \note Replace filtered sample values of PCM mode blocks with the transmitted and reconstructed ones.
+ */
+Void TComSampleAdaptiveOffset::PCMLFDisableProcess (TComPic* pcPic)
+{
+  xPCMRestoration(pcPic);
+}
+
+/** Picture-level PCM restoration.
+ * \param pcPic picture (TComPic) pointer
+ * \returns Void
+ */
+Void TComSampleAdaptiveOffset::xPCMRestoration(TComPic* pcPic)
+{
+  Bool  bPCMFilter = (pcPic->getSlice(0)->getSPS()->getUsePCM() && pcPic->getSlice(0)->getSPS()->getPCMFilterDisableFlag())? true : false;
+
+  if(bPCMFilter || pcPic->getSlice(0)->getPPS()->getTransquantBypassEnableFlag())
+  {
+    for( UInt ctuRsAddr = 0; ctuRsAddr < pcPic->getNumberOfCtusInFrame() ; ctuRsAddr++ )
+    {
+      TComDataCU* pcCU = pcPic->getCtu(ctuRsAddr);
+
+      xPCMCURestoration(pcCU, 0, 0);
+    }
+  }
+}
+
+/** PCM CU restoration.
+ * \param pcCU pointer to current CU
+ * \param uiAbsPartIdx part index
+ * \param uiDepth CU depth
+ * \returns Void
+ */
+Void TComSampleAdaptiveOffset::xPCMCURestoration ( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth )
+{
+  TComPic* pcPic     = pcCU->getPic();
+  UInt uiCurNumParts = pcPic->getNumPartitionsInCtu() >> (uiDepth<<1);
+  UInt uiQNumParts   = uiCurNumParts>>2;
+
+  // go to sub-CU
+  if( pcCU->getDepth(uiAbsZorderIdx) > uiDepth )
+  {
+    for ( UInt uiPartIdx = 0; uiPartIdx < 4; uiPartIdx++, uiAbsZorderIdx+=uiQNumParts )
+    {
+      UInt uiLPelX   = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsZorderIdx] ];
+      UInt uiTPelY   = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsZorderIdx] ];
+      if( ( uiLPelX < pcCU->getSlice()->getSPS()->getPicWidthInLumaSamples() ) && ( uiTPelY < pcCU->getSlice()->getSPS()->getPicHeightInLumaSamples() ) )
+        xPCMCURestoration( pcCU, uiAbsZorderIdx, uiDepth+1 );
+    }
+    return;
+  }
+
+  // restore PCM samples
+  if ((pcCU->getIPCMFlag(uiAbsZorderIdx)&& pcPic->getSlice(0)->getSPS()->getPCMFilterDisableFlag()) || pcCU->isLosslessCoded( uiAbsZorderIdx))
+  {
+    const UInt numComponents=pcPic->getNumberValidComponents();
+    for(UInt comp=0; comp<numComponents; comp++)
+    {
+      xPCMSampleRestoration (pcCU, uiAbsZorderIdx, uiDepth, ComponentID(comp));
+    }
+  }
+}
+
+/** PCM sample restoration.
+ * \param pcCU pointer to current CU
+ * \param uiAbsPartIdx part index
+ * \param uiDepth CU depth
+ * \param ttText texture component type
+ * \returns Void
+ */
+Void TComSampleAdaptiveOffset::xPCMSampleRestoration (TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, const ComponentID compID)
+{
+        TComPicYuv* pcPicYuvRec = pcCU->getPic()->getPicYuvRec();
+        UInt uiPcmLeftShiftBit;
+  const UInt uiMinCoeffSize = pcCU->getPic()->getMinCUWidth()*pcCU->getPic()->getMinCUHeight();
+  const UInt csx=pcPicYuvRec->getComponentScaleX(compID);
+  const UInt csy=pcPicYuvRec->getComponentScaleY(compID);
+  const UInt uiOffset   = (uiMinCoeffSize*uiAbsZorderIdx)>>(csx+csy);
+
+        Pel *piSrc = pcPicYuvRec->getAddr(compID, pcCU->getCtuRsAddr(), uiAbsZorderIdx);
+  const Pel *piPcm = pcCU->getPCMSample(compID) + uiOffset;
+  const UInt uiStride  = pcPicYuvRec->getStride(compID);
+  const UInt uiWidth  = ((g_uiMaxCUWidth >> uiDepth) >> csx);
+  const UInt uiHeight = ((g_uiMaxCUWidth >> uiDepth) >> csy);
+
+  if ( pcCU->isLosslessCoded(uiAbsZorderIdx) && !pcCU->getIPCMFlag(uiAbsZorderIdx) )
+  {
+    uiPcmLeftShiftBit = 0;
+  }
+  else
+  {
+    uiPcmLeftShiftBit = g_bitDepth[toChannelType(compID)] - pcCU->getSlice()->getSPS()->getPCMBitDepth(toChannelType(compID));
+  }
+
+  for(UInt uiY = 0; uiY < uiHeight; uiY++ )
+  {
+    for(UInt uiX = 0; uiX < uiWidth; uiX++ )
+    {
+      piSrc[uiX] = (piPcm[uiX] << uiPcmLeftShiftBit);
+    }
+    piPcm += uiWidth;
+    piSrc += uiStride;
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComSampleAdaptiveOffset.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,106 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComSampleAdaptiveOffset.h
+    \brief    sample adaptive offset class (header)
+*/
+
+#ifndef __TCOMSAMPLEADAPTIVEOFFSET__
+#define __TCOMSAMPLEADAPTIVEOFFSET__
+
+#include "CommonDef.h"
+#include "TComPic.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+#define MAX_SAO_TRUNCATED_BITDEPTH     10
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+extern UInt g_saoMaxOffsetQVal[MAX_NUM_COMPONENT];
+
+template <typename T> Int sgn(T val)
+{
+  return (T(0) < val) - (val < T(0));
+}
+
+class TComSampleAdaptiveOffset
+{
+public:
+  TComSampleAdaptiveOffset();
+  virtual ~TComSampleAdaptiveOffset();
+  Void SAOProcess(TComPic* pDecPic);
+  Void create( Int picWidth, Int picHeight, ChromaFormat format, UInt maxCUWidth, UInt maxCUHeight, UInt maxCUDepth, UInt lumaBitShift, UInt chromaBitShift );
+  Void destroy();
+  Void reconstructBlkSAOParams(TComPic* pic, SAOBlkParam* saoBlkParams);
+  Void PCMLFDisableProcess (TComPic* pcPic);
+protected:
+  Void offsetBlock(ComponentID compIdx, Int typeIdx, Int* offset, Pel* srcBlk, Pel* resBlk, Int srcStride, Int resStride,  Int width, Int height
+                  , Bool isLeftAvail, Bool isRightAvail, Bool isAboveAvail, Bool isBelowAvail, Bool isAboveLeftAvail, Bool isAboveRightAvail, Bool isBelowLeftAvail, Bool isBelowRightAvail);
+  Void invertQuantOffsets(ComponentID compIdx, Int typeIdc, Int typeAuxInfo, Int* dstOffsets, Int* srcOffsets);
+  Void reconstructBlkSAOParam(SAOBlkParam& recParam, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES]);
+  Int  getMergeList(TComPic* pic, Int ctuRsAddr, SAOBlkParam* blkParams, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES]);
+  Void offsetCTU(Int ctuRsAddr, TComPicYuv* srcYuv, TComPicYuv* resYuv, SAOBlkParam& saoblkParam, TComPic* pPic);
+  Void xPCMRestoration(TComPic* pcPic);
+  Void xPCMCURestoration ( TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth );
+  Void xPCMSampleRestoration (TComDataCU* pcCU, UInt uiAbsZorderIdx, UInt uiDepth, ComponentID component);
+protected:
+  UInt m_offsetStepLog2[MAX_NUM_COMPONENT]; //offset step
+  TComPicYuv*   m_tempPicYuv; //temporary buffer
+  Int m_picWidth;
+  Int m_picHeight;
+  Int m_maxCUWidth;
+  Int m_maxCUHeight;
+  Int m_numCTUInWidth;
+  Int m_numCTUInHeight;
+  Int m_numCTUsPic;
+
+
+  Int m_lineBufWidth;
+  Char* m_signLineBuf1;
+  Char* m_signLineBuf2;
+  ChromaFormat m_chromaFormatIDC;
+private:
+  Bool m_picSAOEnabled[MAX_NUM_COMPONENT];
+};
+
+//! \}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComSlice.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2417 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComSlice.cpp
+    \brief    slice header and SPS class
+*/
+
+#include "CommonDef.h"
+#include "TComSlice.h"
+#include "TComPic.h"
+#include "TLibEncoder/TEncSbac.h"
+//#include "TLibDecoder/TDecSbac.h"
+
+
+//! \ingroup TLibCommon
+//! \{
+
+TComSlice::TComSlice()
+: m_iPPSId                        ( -1 )
+, m_PicOutputFlag                 ( true )
+, m_iPOC                          ( 0 )
+, m_iLastIDR                      ( 0 )
+, m_iAssociatedIRAP               ( 0 )
+, m_iAssociatedIRAPType           ( NAL_UNIT_INVALID )
+, m_pcRPS                         ( 0 )
+, m_LocalRPS                      ( )
+, m_iBDidx                        ( 0 )
+, m_RefPicListModification        ( )
+, m_eNalUnitType                  ( NAL_UNIT_CODED_SLICE_IDR_W_RADL )
+, m_eSliceType                    ( I_SLICE )
+, m_iSliceQp                      ( 0 )
+, m_dependentSliceSegmentFlag     ( false )
+#if ADAPTIVE_QP_SELECTION
+, m_iSliceQpBase                  ( 0 )
+#endif
+, m_ChromaQpAdjEnabled            ( false )
+, m_deblockingFilterDisable       ( false )
+, m_deblockingFilterOverrideFlag  ( false )
+, m_deblockingFilterBetaOffsetDiv2( 0 )
+, m_deblockingFilterTcOffsetDiv2  ( 0 )
+, m_bCheckLDC                     ( false )
+, m_iSliceQpDelta                 ( 0 )
+, m_iDepth                        ( 0 )
+, m_bRefenced                     ( false )
+, m_pcVPS                         ( NULL )
+, m_pcSPS                         ( NULL )
+, m_pcPPS                         ( NULL )
+, m_pcPic                         ( NULL )
+#if ADAPTIVE_QP_SELECTION
+, m_pcTrQuant                     ( NULL )
+#endif
+, m_colFromL0Flag                 ( 1 )
+, m_noOutputPriorPicsFlag         ( false )
+, m_noRaslOutputFlag              ( false )
+, m_handleCraAsBlaFlag            ( false )
+, m_colRefIdx                     ( 0 )
+, m_maxNumMergeCand               ( 0 )
+, m_uiTLayer                      ( 0 )
+, m_bTLayerSwitchingFlag          ( false )
+, m_sliceMode                     ( NO_SLICES )
+, m_sliceArgument                 ( 0 )
+, m_sliceCurStartCtuTsAddr        ( 0 )
+, m_sliceCurEndCtuTsAddr          ( 0 )
+, m_sliceIdx                      ( 0 )
+, m_sliceSegmentMode              ( NO_SLICES )
+, m_sliceSegmentArgument          ( 0 )
+, m_sliceSegmentCurStartCtuTsAddr ( 0 )
+, m_sliceSegmentCurEndCtuTsAddr   ( 0 )
+, m_nextSlice                     ( false )
+, m_nextSliceSegment              ( false )
+, m_sliceBits                     ( 0 )
+, m_sliceSegmentBits              ( 0 )
+, m_bFinalized                    ( false )
+, m_substreamSizes                ( )
+, m_scalingList                   ( NULL )
+, m_cabacInitFlag                 ( false )
+, m_bLMvdL1Zero                   ( false )
+, m_temporalLayerNonReferenceFlag ( false )
+, m_LFCrossSliceBoundaryFlag      ( false )
+, m_enableTMVPFlag                ( true )
+{
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_aiNumRefIdx[i] = 0;
+  }
+
+  for (UInt component = 0; component < MAX_NUM_COMPONENT; component++)
+  {
+    m_lambdas            [component] = 0.0;
+    m_iSliceChromaQpDelta[component] = 0;
+  }
+
+  initEqualRef();
+
+  for ( Int idx = 0; idx < MAX_NUM_REF; idx++ )
+  {
+    m_list1IdxToList0Idx[idx] = -1;
+  }
+
+  for(Int iNumCount = 0; iNumCount < MAX_NUM_REF; iNumCount++)
+  {
+    for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+    {
+      m_apcRefPicList [i][iNumCount] = NULL;
+      m_aiRefPOCList  [i][iNumCount] = 0;
+    }
+  }
+
+  resetWpScaling();
+  initWpAcDcParam();
+
+  for(Int ch=0; ch < MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_saoEnabledFlag[ch] = false;
+  }
+}
+
+TComSlice::~TComSlice()
+{
+}
+
+
+Void TComSlice::initSlice()
+{
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_aiNumRefIdx[i]      = 0;
+  }
+  m_colFromL0Flag = 1;
+
+  m_colRefIdx = 0;
+  initEqualRef();
+
+  m_bCheckLDC = false;
+
+  for (UInt component = 0; component < MAX_NUM_COMPONENT; component++) m_iSliceChromaQpDelta[component] = 0;
+
+  m_maxNumMergeCand = MRG_MAX_NUM_CANDS;
+
+  m_bFinalized=false;
+
+  m_substreamSizes.clear();
+  m_cabacInitFlag        = false;
+  m_enableTMVPFlag = true;
+}
+
+Bool TComSlice::getRapPicFlag()
+{
+  return getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA;
+}
+
+
+Void  TComSlice::sortPicList        (TComList<TComPic*>& rcListPic)
+{
+  TComPic*    pcPicExtract;
+  TComPic*    pcPicInsert;
+
+  TComList<TComPic*>::iterator    iterPicExtract;
+  TComList<TComPic*>::iterator    iterPicExtract_1;
+  TComList<TComPic*>::iterator    iterPicInsert;
+
+  for (Int i = 1; i < (Int)(rcListPic.size()); i++)
+  {
+    iterPicExtract = rcListPic.begin();
+    for (Int j = 0; j < i; j++) iterPicExtract++;
+    pcPicExtract = *(iterPicExtract);
+    pcPicExtract->setCurrSliceIdx(0);
+
+    iterPicInsert = rcListPic.begin();
+    while (iterPicInsert != iterPicExtract)
+    {
+      pcPicInsert = *(iterPicInsert);
+      pcPicInsert->setCurrSliceIdx(0);
+      if (pcPicInsert->getPOC() >= pcPicExtract->getPOC())
+      {
+        break;
+      }
+
+      iterPicInsert++;
+    }
+
+    iterPicExtract_1 = iterPicExtract;    iterPicExtract_1++;
+
+    //  swap iterPicExtract and iterPicInsert, iterPicExtract = curr. / iterPicInsert = insertion position
+    rcListPic.insert (iterPicInsert, iterPicExtract, iterPicExtract_1);
+    rcListPic.erase  (iterPicExtract);
+  }
+}
+
+TComPic* TComSlice::xGetRefPic (TComList<TComPic*>& rcListPic, Int poc)
+{
+  TComList<TComPic*>::iterator  iterPic = rcListPic.begin();
+  TComPic*                      pcPic = *(iterPic);
+  while ( iterPic != rcListPic.end() )
+  {
+    if(pcPic->getPOC() == poc)
+    {
+      break;
+    }
+    iterPic++;
+    pcPic = *(iterPic);
+  }
+  return  pcPic;
+}
+
+
+TComPic* TComSlice::xGetLongTermRefPic(TComList<TComPic*>& rcListPic, Int poc, Bool pocHasMsb)
+{
+  TComList<TComPic*>::iterator  iterPic = rcListPic.begin();
+  TComPic*                      pcPic = *(iterPic);
+  TComPic*                      pcStPic = pcPic;
+
+  Int pocCycle = 1 << getSPS()->getBitsForPOC();
+  if (!pocHasMsb)
+  {
+    poc = poc & (pocCycle - 1);
+  }
+
+  while ( iterPic != rcListPic.end() )
+  {
+    pcPic = *(iterPic);
+    if (pcPic && pcPic->getPOC()!=this->getPOC() && pcPic->getSlice( 0 )->isReferenced())
+    {
+      Int picPoc = pcPic->getPOC();
+      if (!pocHasMsb)
+      {
+        picPoc = picPoc & (pocCycle - 1);
+      }
+
+      if (poc == picPoc)
+      {
+        if(pcPic->getIsLongTerm())
+        {
+          return pcPic;
+        }
+        else
+        {
+          pcStPic = pcPic;
+        }
+        break;
+      }
+    }
+
+    iterPic++;
+  }
+
+  return  pcStPic;
+}
+
+Void TComSlice::setRefPOCList       ()
+{
+  for (Int iDir = 0; iDir < NUM_REF_PIC_LIST_01; iDir++)
+  {
+    for (Int iNumRefIdx = 0; iNumRefIdx < m_aiNumRefIdx[iDir]; iNumRefIdx++)
+    {
+      m_aiRefPOCList[iDir][iNumRefIdx] = m_apcRefPicList[iDir][iNumRefIdx]->getPOC();
+    }
+  }
+
+}
+
+Void TComSlice::setList1IdxToList0Idx()
+{
+  Int idxL0, idxL1;
+  for ( idxL1 = 0; idxL1 < getNumRefIdx( REF_PIC_LIST_1 ); idxL1++ )
+  {
+    m_list1IdxToList0Idx[idxL1] = -1;
+    for ( idxL0 = 0; idxL0 < getNumRefIdx( REF_PIC_LIST_0 ); idxL0++ )
+    {
+      if ( m_apcRefPicList[REF_PIC_LIST_0][idxL0]->getPOC() == m_apcRefPicList[REF_PIC_LIST_1][idxL1]->getPOC() )
+      {
+        m_list1IdxToList0Idx[idxL1] = idxL0;
+        break;
+      }
+    }
+  }
+}
+
+Void TComSlice::setRefPicList( TComList<TComPic*>& rcListPic, Bool checkNumPocTotalCurr )
+{
+  if (!checkNumPocTotalCurr)
+  {
+    if (m_eSliceType == I_SLICE)
+    {
+      ::memset( m_apcRefPicList, 0, sizeof (m_apcRefPicList));
+      ::memset( m_aiNumRefIdx,   0, sizeof ( m_aiNumRefIdx ));
+
+      return;
+    }
+
+    m_aiNumRefIdx[REF_PIC_LIST_0] = getNumRefIdx(REF_PIC_LIST_0);
+    m_aiNumRefIdx[REF_PIC_LIST_1] = getNumRefIdx(REF_PIC_LIST_1);
+  }
+
+  TComPic*  pcRefPic= NULL;
+  static const UInt MAX_NUM_NEGATIVE_PICTURES=16;
+  TComPic*  RefPicSetStCurr0[MAX_NUM_NEGATIVE_PICTURES];
+  TComPic*  RefPicSetStCurr1[MAX_NUM_NEGATIVE_PICTURES];
+  TComPic*  RefPicSetLtCurr[MAX_NUM_NEGATIVE_PICTURES];
+  UInt NumPocStCurr0 = 0;
+  UInt NumPocStCurr1 = 0;
+  UInt NumPocLtCurr = 0;
+  Int i;
+
+  for(i=0; i < m_pcRPS->getNumberOfNegativePictures(); i++)
+  {
+    if(m_pcRPS->getUsed(i))
+    {
+      pcRefPic = xGetRefPic(rcListPic, getPOC()+m_pcRPS->getDeltaPOC(i));
+      pcRefPic->setIsLongTerm(0);
+      pcRefPic->getPicYuvRec()->extendPicBorder();
+      RefPicSetStCurr0[NumPocStCurr0] = pcRefPic;
+      NumPocStCurr0++;
+      pcRefPic->setCheckLTMSBPresent(false);
+    }
+  }
+
+  for(; i < m_pcRPS->getNumberOfNegativePictures()+m_pcRPS->getNumberOfPositivePictures(); i++)
+  {
+    if(m_pcRPS->getUsed(i))
+    {
+      pcRefPic = xGetRefPic(rcListPic, getPOC()+m_pcRPS->getDeltaPOC(i));
+      pcRefPic->setIsLongTerm(0);
+      pcRefPic->getPicYuvRec()->extendPicBorder();
+      RefPicSetStCurr1[NumPocStCurr1] = pcRefPic;
+      NumPocStCurr1++;
+      pcRefPic->setCheckLTMSBPresent(false);
+    }
+  }
+
+  for(i = m_pcRPS->getNumberOfNegativePictures()+m_pcRPS->getNumberOfPositivePictures()+m_pcRPS->getNumberOfLongtermPictures()-1; i > m_pcRPS->getNumberOfNegativePictures()+m_pcRPS->getNumberOfPositivePictures()-1 ; i--)
+  {
+    if(m_pcRPS->getUsed(i))
+    {
+      pcRefPic = xGetLongTermRefPic(rcListPic, m_pcRPS->getPOC(i), m_pcRPS->getCheckLTMSBPresent(i));
+      pcRefPic->setIsLongTerm(1);
+      pcRefPic->getPicYuvRec()->extendPicBorder();
+      RefPicSetLtCurr[NumPocLtCurr] = pcRefPic;
+      NumPocLtCurr++;
+    }
+    if(pcRefPic==NULL)
+    {
+      pcRefPic = xGetLongTermRefPic(rcListPic, m_pcRPS->getPOC(i), m_pcRPS->getCheckLTMSBPresent(i));
+    }
+    pcRefPic->setCheckLTMSBPresent(m_pcRPS->getCheckLTMSBPresent(i));
+  }
+
+  // ref_pic_list_init
+  TComPic*  rpsCurrList0[MAX_NUM_REF+1];
+  TComPic*  rpsCurrList1[MAX_NUM_REF+1];
+  Int numPocTotalCurr = NumPocStCurr0 + NumPocStCurr1 + NumPocLtCurr;
+
+  if (checkNumPocTotalCurr)
+  {
+    // The variable NumPocTotalCurr is derived as specified in subclause 7.4.7.2. It is a requirement of bitstream conformance that the following applies to the value of NumPocTotalCurr:
+    // - If the current picture is a BLA or CRA picture, the value of NumPocTotalCurr shall be equal to 0.
+    // - Otherwise, when the current picture contains a P or B slice, the value of NumPocTotalCurr shall not be equal to 0.
+    if (getRapPicFlag())
+    {
+      assert(numPocTotalCurr == 0);
+    }
+
+    if (m_eSliceType == I_SLICE)
+    {
+      ::memset( m_apcRefPicList, 0, sizeof (m_apcRefPicList));
+      ::memset( m_aiNumRefIdx,   0, sizeof ( m_aiNumRefIdx ));
+
+      return;
+    }
+
+    assert(numPocTotalCurr > 0);
+
+    m_aiNumRefIdx[0] = getNumRefIdx(REF_PIC_LIST_0);
+    m_aiNumRefIdx[1] = getNumRefIdx(REF_PIC_LIST_1);
+  }
+
+  Int cIdx = 0;
+  for ( i=0; i<NumPocStCurr0; i++, cIdx++)
+  {
+    rpsCurrList0[cIdx] = RefPicSetStCurr0[i];
+  }
+  for ( i=0; i<NumPocStCurr1; i++, cIdx++)
+  {
+    rpsCurrList0[cIdx] = RefPicSetStCurr1[i];
+  }
+  for ( i=0; i<NumPocLtCurr;  i++, cIdx++)
+  {
+    rpsCurrList0[cIdx] = RefPicSetLtCurr[i];
+  }
+  assert(cIdx == numPocTotalCurr);
+
+  if (m_eSliceType==B_SLICE)
+  {
+    cIdx = 0;
+    for ( i=0; i<NumPocStCurr1; i++, cIdx++)
+    {
+      rpsCurrList1[cIdx] = RefPicSetStCurr1[i];
+    }
+    for ( i=0; i<NumPocStCurr0; i++, cIdx++)
+    {
+      rpsCurrList1[cIdx] = RefPicSetStCurr0[i];
+    }
+    for ( i=0; i<NumPocLtCurr;  i++, cIdx++)
+    {
+      rpsCurrList1[cIdx] = RefPicSetLtCurr[i];
+    }
+    assert(cIdx == numPocTotalCurr);
+  }
+
+  ::memset(m_bIsUsedAsLongTerm, 0, sizeof(m_bIsUsedAsLongTerm));
+
+  for (Int rIdx = 0; rIdx < m_aiNumRefIdx[REF_PIC_LIST_0]; rIdx ++)
+  {
+    cIdx = m_RefPicListModification.getRefPicListModificationFlagL0() ? m_RefPicListModification.getRefPicSetIdxL0(rIdx) : rIdx % numPocTotalCurr;
+    assert(cIdx >= 0 && cIdx < numPocTotalCurr);
+    m_apcRefPicList[REF_PIC_LIST_0][rIdx] = rpsCurrList0[ cIdx ];
+    m_bIsUsedAsLongTerm[REF_PIC_LIST_0][rIdx] = ( cIdx >= NumPocStCurr0 + NumPocStCurr1 );
+  }
+  if ( m_eSliceType != B_SLICE )
+  {
+    m_aiNumRefIdx[REF_PIC_LIST_1] = 0;
+    ::memset( m_apcRefPicList[REF_PIC_LIST_1], 0, sizeof(m_apcRefPicList[REF_PIC_LIST_1]));
+  }
+  else
+  {
+    for (Int rIdx = 0; rIdx < m_aiNumRefIdx[REF_PIC_LIST_1]; rIdx ++)
+    {
+      cIdx = m_RefPicListModification.getRefPicListModificationFlagL1() ? m_RefPicListModification.getRefPicSetIdxL1(rIdx) : rIdx % numPocTotalCurr;
+      assert(cIdx >= 0 && cIdx < numPocTotalCurr);
+      m_apcRefPicList[REF_PIC_LIST_1][rIdx] = rpsCurrList1[ cIdx ];
+      m_bIsUsedAsLongTerm[REF_PIC_LIST_1][rIdx] = ( cIdx >= NumPocStCurr0 + NumPocStCurr1 );
+    }
+  }
+}
+
+Int TComSlice::getNumRpsCurrTempList()
+{
+  Int numRpsCurrTempList = 0;
+
+  if (m_eSliceType == I_SLICE)
+  {
+    return 0;
+  }
+  for(UInt i=0; i < m_pcRPS->getNumberOfNegativePictures()+ m_pcRPS->getNumberOfPositivePictures() + m_pcRPS->getNumberOfLongtermPictures(); i++)
+  {
+    if(m_pcRPS->getUsed(i))
+    {
+      numRpsCurrTempList++;
+    }
+  }
+  return numRpsCurrTempList;
+}
+
+Void TComSlice::initEqualRef()
+{
+  for (Int iDir = 0; iDir < NUM_REF_PIC_LIST_01; iDir++)
+  {
+    for (Int iRefIdx1 = 0; iRefIdx1 < MAX_NUM_REF; iRefIdx1++)
+    {
+      for (Int iRefIdx2 = iRefIdx1; iRefIdx2 < MAX_NUM_REF; iRefIdx2++)
+      {
+        m_abEqualRef[iDir][iRefIdx1][iRefIdx2] = m_abEqualRef[iDir][iRefIdx2][iRefIdx1] = (iRefIdx1 == iRefIdx2? true : false);
+      }
+    }
+  }
+}
+
+Void TComSlice::checkColRefIdx(UInt curSliceIdx, TComPic* pic)
+{
+  Int i;
+  TComSlice* curSlice = pic->getSlice(curSliceIdx);
+  Int currColRefPOC =  curSlice->getRefPOC( RefPicList(1 - curSlice->getColFromL0Flag()), curSlice->getColRefIdx());
+  TComSlice* preSlice;
+  Int preColRefPOC;
+  for(i=curSliceIdx-1; i>=0; i--)
+  {
+    preSlice = pic->getSlice(i);
+    if(preSlice->getSliceType() != I_SLICE)
+    {
+      preColRefPOC  = preSlice->getRefPOC( RefPicList(1 - preSlice->getColFromL0Flag()), preSlice->getColRefIdx());
+      if(currColRefPOC != preColRefPOC)
+      {
+        printf("Collocated_ref_idx shall always be the same for all slices of a coded picture!\n");
+        exit(EXIT_FAILURE);
+      }
+      else
+      {
+        break;
+      }
+    }
+  }
+}
+
+Void TComSlice::checkCRA(TComReferencePictureSet *pReferencePictureSet, Int& pocCRA, NalUnitType& associatedIRAPType, TComList<TComPic *>& rcListPic)
+{
+  for(Int i = 0; i < pReferencePictureSet->getNumberOfNegativePictures()+pReferencePictureSet->getNumberOfPositivePictures(); i++)
+  {
+    if(pocCRA < MAX_UINT && getPOC() > pocCRA)
+    {
+      assert(getPOC()+pReferencePictureSet->getDeltaPOC(i) >= pocCRA);
+    }
+  }
+  for(Int i = pReferencePictureSet->getNumberOfNegativePictures()+pReferencePictureSet->getNumberOfPositivePictures(); i < pReferencePictureSet->getNumberOfPictures(); i++)
+  {
+    if(pocCRA < MAX_UINT && getPOC() > pocCRA)
+    {
+      if (!pReferencePictureSet->getCheckLTMSBPresent(i))
+      {
+        assert(xGetLongTermRefPic(rcListPic, pReferencePictureSet->getPOC(i), false)->getPOC() >= pocCRA);
+      }
+      else
+      {
+        assert(pReferencePictureSet->getPOC(i) >= pocCRA);
+      }
+    }
+  }
+  if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL || getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP ) // IDR picture found
+  {
+    pocCRA = getPOC();
+    associatedIRAPType = getNalUnitType();
+  }
+  else if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA ) // CRA picture found
+  {
+    pocCRA = getPOC();
+    associatedIRAPType = getNalUnitType();
+  }
+  else if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+         || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+         || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP ) // BLA picture found
+  {
+    pocCRA = getPOC();
+    associatedIRAPType = getNalUnitType();
+  }
+}
+
+/** Function for marking the reference pictures when an IDR/CRA/CRANT/BLA/BLANT is encountered.
+ * \param pocCRA POC of the CRA/CRANT/BLA/BLANT picture
+ * \param bRefreshPending flag indicating if a deferred decoding refresh is pending
+ * \param rcListPic reference to the reference picture list
+ * This function marks the reference pictures as "unused for reference" in the following conditions.
+ * If the nal_unit_type is IDR/BLA/BLANT, all pictures in the reference picture list
+ * are marked as "unused for reference"
+ *    If the nal_unit_type is BLA/BLANT, set the pocCRA to the temporal reference of the current picture.
+ * Otherwise
+ *    If the bRefreshPending flag is true (a deferred decoding refresh is pending) and the current
+ *    temporal reference is greater than the temporal reference of the latest CRA/CRANT/BLA/BLANT picture (pocCRA),
+ *    mark all reference pictures except the latest CRA/CRANT/BLA/BLANT picture as "unused for reference" and set
+ *    the bRefreshPending flag to false.
+ *    If the nal_unit_type is CRA/CRANT, set the bRefreshPending flag to true and pocCRA to the temporal
+ *    reference of the current picture.
+ * Note that the current picture is already placed in the reference list and its marking is not changed.
+ * If the current picture has a nal_ref_idc that is not 0, it will remain marked as "used for reference".
+ */
+Void TComSlice::decodingRefreshMarking(Int& pocCRA, Bool& bRefreshPending, TComList<TComPic*>& rcListPic)
+{
+  TComPic* rpcPic;
+  Int      pocCurr = getPOC();
+
+  if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+    || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+    || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP
+    || getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+    || getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP )  // IDR or BLA picture
+  {
+    // mark all pictures as not used for reference
+    TComList<TComPic*>::iterator        iterPic       = rcListPic.begin();
+    while (iterPic != rcListPic.end())
+    {
+      rpcPic = *(iterPic);
+      rpcPic->setCurrSliceIdx(0);
+      if (rpcPic->getPOC() != pocCurr) rpcPic->getSlice(0)->setReferenced(false);
+      iterPic++;
+    }
+    if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+      || getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP )
+    {
+      pocCRA = pocCurr;
+    }
+#if EFFICIENT_FIELD_IRAP
+    bRefreshPending = true;
+#endif
+  }
+  else // CRA or No DR
+  {
+#if EFFICIENT_FIELD_IRAP
+    if(getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_IDR_N_LP || getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL)
+    {
+      if (bRefreshPending==true && pocCurr > m_iLastIDR) // IDR reference marking pending 
+      {
+        TComList<TComPic*>::iterator        iterPic       = rcListPic.begin();
+        while (iterPic != rcListPic.end())
+        {
+          rpcPic = *(iterPic);
+          if (rpcPic->getPOC() != pocCurr && rpcPic->getPOC() != m_iLastIDR)
+          {
+            rpcPic->getSlice(0)->setReferenced(false);
+          }
+          iterPic++;
+        }
+        bRefreshPending = false; 
+      }
+    }
+    else
+    {
+#endif
+      if (bRefreshPending==true && pocCurr > pocCRA) // CRA reference marking pending
+      {
+        TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+        while (iterPic != rcListPic.end())
+        {
+          rpcPic = *(iterPic);
+          if (rpcPic->getPOC() != pocCurr && rpcPic->getPOC() != pocCRA)
+          {
+            rpcPic->getSlice(0)->setReferenced(false);
+          }
+          iterPic++;
+        }
+        bRefreshPending = false;
+      }
+#if EFFICIENT_FIELD_IRAP
+    }
+#endif
+    if ( getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA ) // CRA picture found
+    {
+      bRefreshPending = true;
+      pocCRA = pocCurr;
+    }
+  }
+}
+
+Void TComSlice::copySliceInfo(TComSlice *pSrc)
+{
+  assert( pSrc != NULL );
+
+  Int i, j, k;
+
+  m_iPOC                 = pSrc->m_iPOC;
+  m_eNalUnitType         = pSrc->m_eNalUnitType;
+  m_eSliceType           = pSrc->m_eSliceType;
+  m_iSliceQp             = pSrc->m_iSliceQp;
+#if ADAPTIVE_QP_SELECTION
+  m_iSliceQpBase         = pSrc->m_iSliceQpBase;
+#endif
+  m_ChromaQpAdjEnabled = pSrc->m_ChromaQpAdjEnabled;
+  m_deblockingFilterDisable   = pSrc->m_deblockingFilterDisable;
+  m_deblockingFilterOverrideFlag = pSrc->m_deblockingFilterOverrideFlag;
+  m_deblockingFilterBetaOffsetDiv2 = pSrc->m_deblockingFilterBetaOffsetDiv2;
+  m_deblockingFilterTcOffsetDiv2 = pSrc->m_deblockingFilterTcOffsetDiv2;
+
+  for (i = 0; i < NUM_REF_PIC_LIST_01; i++)
+  {
+    m_aiNumRefIdx[i]     = pSrc->m_aiNumRefIdx[i];
+  }
+
+  for (i = 0; i < MAX_NUM_REF; i++)
+  {
+    m_list1IdxToList0Idx[i] = pSrc->m_list1IdxToList0Idx[i];
+  }
+
+  m_bCheckLDC             = pSrc->m_bCheckLDC;
+  m_iSliceQpDelta        = pSrc->m_iSliceQpDelta;
+  for (UInt component = 0; component < MAX_NUM_COMPONENT; component++) m_iSliceChromaQpDelta[component] = pSrc->m_iSliceChromaQpDelta[component];
+  for (i = 0; i < NUM_REF_PIC_LIST_01; i++)
+  {
+    for (j = 0; j < MAX_NUM_REF; j++)
+    {
+      m_apcRefPicList[i][j]  = pSrc->m_apcRefPicList[i][j];
+      m_aiRefPOCList[i][j]   = pSrc->m_aiRefPOCList[i][j];
+      m_bIsUsedAsLongTerm[i][j] = pSrc->m_bIsUsedAsLongTerm[i][j];
+    }
+    m_bIsUsedAsLongTerm[i][MAX_NUM_REF] = pSrc->m_bIsUsedAsLongTerm[i][MAX_NUM_REF];
+  }
+  m_iDepth               = pSrc->m_iDepth;
+
+  // referenced slice
+  m_bRefenced            = pSrc->m_bRefenced;
+
+  // access channel
+  m_pcSPS                = pSrc->m_pcSPS;
+  m_pcPPS                = pSrc->m_pcPPS;
+  m_pcRPS                = pSrc->m_pcRPS;
+  m_iLastIDR             = pSrc->m_iLastIDR;
+
+  m_pcPic                = pSrc->m_pcPic;
+
+  m_colFromL0Flag        = pSrc->m_colFromL0Flag;
+  m_colRefIdx            = pSrc->m_colRefIdx;
+
+  setLambdas(pSrc->getLambdas());
+
+  for (i = 0; i < NUM_REF_PIC_LIST_01; i++)
+  {
+    for (j = 0; j < MAX_NUM_REF; j++)
+    {
+      for (k =0; k < MAX_NUM_REF; k++)
+      {
+        m_abEqualRef[i][j][k] = pSrc->m_abEqualRef[i][j][k];
+      }
+    }
+  }
+
+  m_uiTLayer                      = pSrc->m_uiTLayer;
+  m_bTLayerSwitchingFlag          = pSrc->m_bTLayerSwitchingFlag;
+
+  m_sliceMode                     = pSrc->m_sliceMode;
+  m_sliceArgument                 = pSrc->m_sliceArgument;
+  m_sliceCurStartCtuTsAddr        = pSrc->m_sliceCurStartCtuTsAddr;
+  m_sliceCurEndCtuTsAddr          = pSrc->m_sliceCurEndCtuTsAddr;
+  m_sliceIdx                      = pSrc->m_sliceIdx;
+  m_sliceSegmentMode              = pSrc->m_sliceSegmentMode;
+  m_sliceSegmentArgument          = pSrc->m_sliceSegmentArgument;
+  m_sliceSegmentCurStartCtuTsAddr = pSrc->m_sliceSegmentCurStartCtuTsAddr;
+  m_sliceSegmentCurEndCtuTsAddr   = pSrc->m_sliceSegmentCurEndCtuTsAddr;
+  m_nextSlice                     = pSrc->m_nextSlice;
+  m_nextSliceSegment              = pSrc->m_nextSliceSegment;
+
+  for ( UInt e=0 ; e<NUM_REF_PIC_LIST_01 ; e++ )
+  {
+    for ( UInt n=0 ; n<MAX_NUM_REF ; n++ )
+    {
+      memcpy(m_weightPredTable[e][n], pSrc->m_weightPredTable[e][n], sizeof(WPScalingParam)*MAX_NUM_COMPONENT );
+    }
+  }
+
+  for( UInt ch = 0 ; ch < MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_saoEnabledFlag[ch] = pSrc->m_saoEnabledFlag[ch];
+  }
+
+  m_cabacInitFlag                = pSrc->m_cabacInitFlag;
+
+  m_bLMvdL1Zero = pSrc->m_bLMvdL1Zero;
+  m_LFCrossSliceBoundaryFlag = pSrc->m_LFCrossSliceBoundaryFlag;
+  m_enableTMVPFlag                = pSrc->m_enableTMVPFlag;
+  m_maxNumMergeCand               = pSrc->m_maxNumMergeCand;
+}
+
+
+Int TComSlice::m_prevTid0POC = 0;
+
+/** Function for setting the slice's temporal layer ID and corresponding temporal_layer_switching_point_flag.
+ * \param uiTLayer Temporal layer ID of the current slice
+ * The decoder calls this function to set temporal_layer_switching_point_flag for each temporal layer based on
+ * the SPS's temporal_id_nesting_flag and the parsed PPS.  Then, current slice's temporal layer ID and
+ * temporal_layer_switching_point_flag is set accordingly.
+ */
+Void TComSlice::setTLayerInfo( UInt uiTLayer )
+{
+  m_uiTLayer = uiTLayer;
+}
+
+/** Function for checking if this is a switching-point
+*/
+Bool TComSlice::isTemporalLayerSwitchingPoint(TComList<TComPic*>& rcListPic)
+{
+  TComPic* rpcPic;
+  // loop through all pictures in the reference picture buffer
+  TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+  while ( iterPic != rcListPic.end())
+  {
+    rpcPic = *(iterPic++);
+    if(rpcPic->getSlice(0)->isReferenced() && rpcPic->getPOC() != getPOC())
+    {
+      if(rpcPic->getTLayer() >= getTLayer())
+      {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+/** Function for checking if this is a STSA candidate
+ */
+Bool TComSlice::isStepwiseTemporalLayerSwitchingPointCandidate(TComList<TComPic*>& rcListPic)
+{
+  TComPic* rpcPic;
+
+  TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+  while ( iterPic != rcListPic.end())
+  {
+    rpcPic = *(iterPic++);
+    if(rpcPic->getSlice(0)->isReferenced() &&  (rpcPic->getUsedByCurr()==true) && rpcPic->getPOC() != getPOC())
+    {
+      if(rpcPic->getTLayer() >= getTLayer())
+      {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+
+Void TComSlice::checkLeadingPictureRestrictions(TComList<TComPic*>& rcListPic)
+{
+  TComPic* rpcPic;
+
+  Int nalUnitType = this->getNalUnitType();
+
+  // When a picture is a leading picture, it shall be a RADL or RASL picture.
+  if(this->getAssociatedIRAPPOC() > this->getPOC())
+  {
+    // Do not check IRAP pictures since they may get a POC lower than their associated IRAP
+    if(nalUnitType < NAL_UNIT_CODED_SLICE_BLA_W_LP ||
+       nalUnitType > NAL_UNIT_RESERVED_IRAP_VCL23)
+    {
+      assert(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+             nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R ||
+             nalUnitType == NAL_UNIT_CODED_SLICE_RADL_N ||
+             nalUnitType == NAL_UNIT_CODED_SLICE_RADL_R);
+    }
+  }
+
+  // When a picture is a trailing picture, it shall not be a RADL or RASL picture.
+  if(this->getAssociatedIRAPPOC() < this->getPOC())
+  {
+    assert(nalUnitType != NAL_UNIT_CODED_SLICE_RASL_N &&
+           nalUnitType != NAL_UNIT_CODED_SLICE_RASL_R &&
+           nalUnitType != NAL_UNIT_CODED_SLICE_RADL_N &&
+           nalUnitType != NAL_UNIT_CODED_SLICE_RADL_R);
+  }
+
+  // No RASL pictures shall be present in the bitstream that are associated
+  // with a BLA picture having nal_unit_type equal to BLA_W_RADL or BLA_N_LP.
+  if(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+     nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R)
+  {
+    assert(this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_BLA_W_RADL &&
+           this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_BLA_N_LP);
+  }
+
+  // No RASL pictures shall be present in the bitstream that are associated with
+  // an IDR picture.
+  if(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+     nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R)
+  {
+    assert(this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_IDR_N_LP   &&
+           this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_IDR_W_RADL);
+  }
+
+  // No RADL pictures shall be present in the bitstream that are associated with
+  // a BLA picture having nal_unit_type equal to BLA_N_LP or that are associated
+  // with an IDR picture having nal_unit_type equal to IDR_N_LP.
+  if(nalUnitType == NAL_UNIT_CODED_SLICE_RADL_N ||
+     nalUnitType == NAL_UNIT_CODED_SLICE_RADL_R)
+  {
+    assert(this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_BLA_N_LP   &&
+           this->getAssociatedIRAPType() != NAL_UNIT_CODED_SLICE_IDR_N_LP);
+  }
+
+  // loop through all pictures in the reference picture buffer
+  TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+  while ( iterPic != rcListPic.end())
+  {
+    rpcPic = *(iterPic++);
+#if BUGFIX_INTRAPERIOD
+    if(!rpcPic->getReconMark())
+    {
+      continue;
+    }
+#endif
+    if (rpcPic->getPOC() == this->getPOC())
+    {
+      continue;
+    }
+
+    // Any picture that has PicOutputFlag equal to 1 that precedes an IRAP picture
+    // in decoding order shall precede the IRAP picture in output order.
+    // (Note that any picture following in output order would be present in the DPB)
+    if(rpcPic->getSlice(0)->getPicOutputFlag() == 1 && !this->getNoOutputPriorPicsFlag())
+    {
+      if(nalUnitType == NAL_UNIT_CODED_SLICE_BLA_N_LP    ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_BLA_W_LP    ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_BLA_W_RADL  ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_CRA         ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_IDR_N_LP    ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL)
+      {
+        assert(rpcPic->getPOC() < this->getPOC());
+      }
+    }
+
+    // Any picture that has PicOutputFlag equal to 1 that precedes an IRAP picture
+    // in decoding order shall precede any RADL picture associated with the IRAP
+    // picture in output order.
+    if(rpcPic->getSlice(0)->getPicOutputFlag() == 1)
+    {
+      if((nalUnitType == NAL_UNIT_CODED_SLICE_RADL_N ||
+          nalUnitType == NAL_UNIT_CODED_SLICE_RADL_R))
+      {
+        // rpcPic precedes the IRAP in decoding order
+        if(this->getAssociatedIRAPPOC() > rpcPic->getSlice(0)->getAssociatedIRAPPOC())
+        {
+          // rpcPic must not be the IRAP picture
+          if(this->getAssociatedIRAPPOC() != rpcPic->getPOC())
+          {
+            assert(rpcPic->getPOC() < this->getPOC());
+          }
+        }
+      }
+    }
+
+    // When a picture is a leading picture, it shall precede, in decoding order,
+    // all trailing pictures that are associated with the same IRAP picture.
+      if(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_RADL_N ||
+         nalUnitType == NAL_UNIT_CODED_SLICE_RADL_R)
+      {
+        if(rpcPic->getSlice(0)->getAssociatedIRAPPOC() == this->getAssociatedIRAPPOC())
+        {
+          // rpcPic is a picture that preceded the leading in decoding order since it exist in the DPB
+          // rpcPic would violate the constraint if it was a trailing picture
+          assert(rpcPic->getPOC() <= this->getAssociatedIRAPPOC());
+        }
+      }
+
+    // Any RASL picture associated with a CRA or BLA picture shall precede any
+    // RADL picture associated with the CRA or BLA picture in output order
+    if(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+       nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R)
+    {
+      if((this->getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_BLA_N_LP   ||
+          this->getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_BLA_W_LP   ||
+          this->getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL ||
+          this->getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_CRA)       &&
+          this->getAssociatedIRAPPOC() == rpcPic->getSlice(0)->getAssociatedIRAPPOC())
+      {
+        if(rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_RADL_N ||
+           rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_RADL_R)
+        {
+          assert(rpcPic->getPOC() > this->getPOC());
+        }
+      }
+    }
+
+    // Any RASL picture associated with a CRA picture shall follow, in output
+    // order, any IRAP picture that precedes the CRA picture in decoding order.
+    if(nalUnitType == NAL_UNIT_CODED_SLICE_RASL_N ||
+       nalUnitType == NAL_UNIT_CODED_SLICE_RASL_R)
+    {
+      if(this->getAssociatedIRAPType() == NAL_UNIT_CODED_SLICE_CRA)
+      {
+        if(rpcPic->getSlice(0)->getPOC() < this->getAssociatedIRAPPOC() &&
+           (rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP   ||
+            rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP   ||
+            rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL ||
+            rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP   ||
+            rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL ||
+            rpcPic->getSlice(0)->getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA))
+        {
+          assert(this->getPOC() > rpcPic->getSlice(0)->getPOC());
+        }
+      }
+    }
+  }
+}
+
+
+
+/** Function for applying picture marking based on the Reference Picture Set in pReferencePictureSet.
+*/
+Void TComSlice::applyReferencePictureSet( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet)
+{
+  TComPic* rpcPic;
+  Int i, isReference;
+
+  checkLeadingPictureRestrictions(rcListPic);
+
+  // loop through all pictures in the reference picture buffer
+  TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+  while ( iterPic != rcListPic.end())
+  {
+    rpcPic = *(iterPic++);
+
+    if(!rpcPic->getSlice( 0 )->isReferenced())
+    {
+      continue;
+    }
+
+    isReference = 0;
+    // loop through all pictures in the Reference Picture Set
+    // to see if the picture should be kept as reference picture
+    for(i=0;i<pReferencePictureSet->getNumberOfPositivePictures()+pReferencePictureSet->getNumberOfNegativePictures();i++)
+    {
+      if(!rpcPic->getIsLongTerm() && rpcPic->getPicSym()->getSlice(0)->getPOC() == this->getPOC() + pReferencePictureSet->getDeltaPOC(i))
+      {
+        isReference = 1;
+        rpcPic->setUsedByCurr(pReferencePictureSet->getUsed(i));
+        rpcPic->setIsLongTerm(0);
+      }
+    }
+    for(;i<pReferencePictureSet->getNumberOfPictures();i++)
+    {
+      if(pReferencePictureSet->getCheckLTMSBPresent(i)==true)
+      {
+        if(rpcPic->getIsLongTerm() && (rpcPic->getPicSym()->getSlice(0)->getPOC()) == pReferencePictureSet->getPOC(i))
+        {
+          isReference = 1;
+          rpcPic->setUsedByCurr(pReferencePictureSet->getUsed(i));
+        }
+      }
+      else
+      {
+        Int pocCycle = 1<<rpcPic->getPicSym()->getSlice(0)->getSPS()->getBitsForPOC();
+        Int curPoc = rpcPic->getPicSym()->getSlice(0)->getPOC() & (pocCycle-1);
+        Int refPoc = pReferencePictureSet->getPOC(i) & (pocCycle-1);
+        if(rpcPic->getIsLongTerm() && curPoc == refPoc)
+        {
+          isReference = 1;
+          rpcPic->setUsedByCurr(pReferencePictureSet->getUsed(i));
+        }
+      }
+
+    }
+    // mark the picture as "unused for reference" if it is not in
+    // the Reference Picture Set
+    if(rpcPic->getPicSym()->getSlice(0)->getPOC() != this->getPOC() && isReference == 0)
+    {
+      rpcPic->getSlice( 0 )->setReferenced( false );
+      rpcPic->setUsedByCurr(0);
+      rpcPic->setIsLongTerm(0);
+    }
+    //check that pictures of higher temporal layers are not used
+    assert(rpcPic->getSlice( 0 )->isReferenced()==0||rpcPic->getUsedByCurr()==0||rpcPic->getTLayer()<=this->getTLayer());
+    //check that pictures of higher or equal temporal layer are not in the RPS if the current picture is a TSA picture
+    if(this->getNalUnitType() == NAL_UNIT_CODED_SLICE_TSA_R || this->getNalUnitType() == NAL_UNIT_CODED_SLICE_TSA_N)
+    {
+      assert(rpcPic->getSlice( 0 )->isReferenced()==0||rpcPic->getTLayer()<this->getTLayer());
+    }
+    //check that pictures marked as temporal layer non-reference pictures are not used for reference
+    if(rpcPic->getPicSym()->getSlice(0)->getPOC() != this->getPOC() && rpcPic->getTLayer()==this->getTLayer())
+    {
+      assert(rpcPic->getSlice( 0 )->isReferenced()==0||rpcPic->getUsedByCurr()==0||rpcPic->getSlice( 0 )->getTemporalLayerNonReferenceFlag()==false);
+    }
+  }
+}
+
+/** Function for applying picture marking based on the Reference Picture Set in pReferencePictureSet.
+*/
+#if ALLOW_RECOVERY_POINT_AS_RAP
+Int TComSlice::checkThatAllRefPicsAreAvailable( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool printErrors, Int pocRandomAccess, Bool bUseRecoveryPoint)
+#else
+Int TComSlice::checkThatAllRefPicsAreAvailable( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool printErrors, Int pocRandomAccess)
+#endif
+{
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  Int atLeastOneUnabledByRecoveryPoint = 0;
+  Int atLeastOneFlushedByPreviousIDR = 0;
+#endif
+  TComPic* rpcPic;
+  Int i, isAvailable;
+  Int atLeastOneLost = 0;
+  Int atLeastOneRemoved = 0;
+  Int iPocLost = 0;
+
+  // loop through all long-term pictures in the Reference Picture Set
+  // to see if the picture should be kept as reference picture
+  for(i=pReferencePictureSet->getNumberOfNegativePictures()+pReferencePictureSet->getNumberOfPositivePictures();i<pReferencePictureSet->getNumberOfPictures();i++)
+  {
+    isAvailable = 0;
+    // loop through all pictures in the reference picture buffer
+    TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+    while ( iterPic != rcListPic.end())
+    {
+      rpcPic = *(iterPic++);
+      if(pReferencePictureSet->getCheckLTMSBPresent(i)==true)
+      {
+        if(rpcPic->getIsLongTerm() && (rpcPic->getPicSym()->getSlice(0)->getPOC()) == pReferencePictureSet->getPOC(i) && rpcPic->getSlice(0)->isReferenced())
+        {
+#if ALLOW_RECOVERY_POINT_AS_RAP
+          if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess && this->getPOC() + pReferencePictureSet->getDeltaPOC(i) < pocRandomAccess)
+          {
+            isAvailable = 0;
+          }
+          else
+          {
+            isAvailable = 1;
+          }
+#else
+          isAvailable = 1;
+#endif
+        }
+      }
+      else
+      {
+        Int pocCycle = 1<<rpcPic->getPicSym()->getSlice(0)->getSPS()->getBitsForPOC();
+        Int curPoc = rpcPic->getPicSym()->getSlice(0)->getPOC() & (pocCycle-1);
+        Int refPoc = pReferencePictureSet->getPOC(i) & (pocCycle-1);
+        if(rpcPic->getIsLongTerm() && curPoc == refPoc && rpcPic->getSlice(0)->isReferenced())
+        {
+#if ALLOW_RECOVERY_POINT_AS_RAP
+          if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess && this->getPOC() + pReferencePictureSet->getDeltaPOC(i) < pocRandomAccess)
+          {
+            isAvailable = 0;
+          }
+          else
+          {
+            isAvailable = 1;
+          }
+#else
+          isAvailable = 1;
+#endif
+        }
+      }
+    }
+    // if there was no such long-term check the short terms
+    if(!isAvailable)
+    {
+      iterPic = rcListPic.begin();
+      while ( iterPic != rcListPic.end())
+      {
+        rpcPic = *(iterPic++);
+
+        Int pocCycle = 1 << rpcPic->getPicSym()->getSlice(0)->getSPS()->getBitsForPOC();
+        Int curPoc = rpcPic->getPicSym()->getSlice(0)->getPOC();
+        Int refPoc = pReferencePictureSet->getPOC(i);
+        if (!pReferencePictureSet->getCheckLTMSBPresent(i))
+        {
+          curPoc = curPoc & (pocCycle - 1);
+          refPoc = refPoc & (pocCycle - 1);
+        }
+
+        if (rpcPic->getSlice(0)->isReferenced() && curPoc == refPoc)
+        {
+#if ALLOW_RECOVERY_POINT_AS_RAP
+          if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess && this->getPOC() + pReferencePictureSet->getDeltaPOC(i) < pocRandomAccess)
+          {
+            isAvailable = 0;
+          }
+          else
+          {
+            isAvailable = 1;
+            rpcPic->setIsLongTerm(1);
+            break;
+          }
+#else
+          isAvailable = 1;
+          rpcPic->setIsLongTerm(1);
+          break;
+#endif
+        }
+      }
+    }
+    // report that a picture is lost if it is in the Reference Picture Set
+    // but not available as reference picture
+    if(isAvailable == 0)
+    {
+      if (this->getPOC() + pReferencePictureSet->getDeltaPOC(i) >= pocRandomAccess)
+      {
+        if(!pReferencePictureSet->getUsed(i) )
+        {
+          if(printErrors)
+          {
+            printf("\nLong-term reference picture with POC = %3d seems to have been removed or not correctly decoded.", this->getPOC() + pReferencePictureSet->getDeltaPOC(i));
+          }
+          atLeastOneRemoved = 1;
+        }
+        else
+        {
+          if(printErrors)
+          {
+            printf("\nLong-term reference picture with POC = %3d is lost or not correctly decoded!", this->getPOC() + pReferencePictureSet->getDeltaPOC(i));
+          }
+          atLeastOneLost = 1;
+          iPocLost=this->getPOC() + pReferencePictureSet->getDeltaPOC(i);
+        }
+      }
+#if ALLOW_RECOVERY_POINT_AS_RAP
+      else if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess)
+      {
+        atLeastOneUnabledByRecoveryPoint = 1;
+      }
+      else if(bUseRecoveryPoint && (this->getAssociatedIRAPType()==NAL_UNIT_CODED_SLICE_IDR_N_LP || this->getAssociatedIRAPType()==NAL_UNIT_CODED_SLICE_IDR_W_RADL))
+      {
+        atLeastOneFlushedByPreviousIDR = 1;
+      }
+#endif
+    }
+  }
+  // loop through all short-term pictures in the Reference Picture Set
+  // to see if the picture should be kept as reference picture
+  for(i=0;i<pReferencePictureSet->getNumberOfNegativePictures()+pReferencePictureSet->getNumberOfPositivePictures();i++)
+  {
+    isAvailable = 0;
+    // loop through all pictures in the reference picture buffer
+    TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+    while ( iterPic != rcListPic.end())
+    {
+      rpcPic = *(iterPic++);
+
+      if(!rpcPic->getIsLongTerm() && rpcPic->getPicSym()->getSlice(0)->getPOC() == this->getPOC() + pReferencePictureSet->getDeltaPOC(i) && rpcPic->getSlice(0)->isReferenced())
+      {
+#if ALLOW_RECOVERY_POINT_AS_RAP
+        if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess && this->getPOC() + pReferencePictureSet->getDeltaPOC(i) < pocRandomAccess)
+        {
+          isAvailable = 0;
+        }
+        else
+        {
+          isAvailable = 1;
+        }
+#else
+        isAvailable = 1;
+#endif
+      }
+    }
+    // report that a picture is lost if it is in the Reference Picture Set
+    // but not available as reference picture
+    if(isAvailable == 0)
+    {
+      if (this->getPOC() + pReferencePictureSet->getDeltaPOC(i) >= pocRandomAccess)
+      {
+        if(!pReferencePictureSet->getUsed(i) )
+        {
+          if(printErrors)
+          {
+            printf("\nShort-term reference picture with POC = %3d seems to have been removed or not correctly decoded.", this->getPOC() + pReferencePictureSet->getDeltaPOC(i));
+          }
+          atLeastOneRemoved = 1;
+        }
+        else
+        {
+          if(printErrors)
+          {
+            printf("\nShort-term reference picture with POC = %3d is lost or not correctly decoded!", this->getPOC() + pReferencePictureSet->getDeltaPOC(i));
+          }
+          atLeastOneLost = 1;
+          iPocLost=this->getPOC() + pReferencePictureSet->getDeltaPOC(i);
+        }
+      }
+#if ALLOW_RECOVERY_POINT_AS_RAP
+      else if(bUseRecoveryPoint && this->getPOC() > pocRandomAccess)
+      {
+        atLeastOneUnabledByRecoveryPoint = 1;
+      }
+      else if(bUseRecoveryPoint && (this->getAssociatedIRAPType()==NAL_UNIT_CODED_SLICE_IDR_N_LP || this->getAssociatedIRAPType()==NAL_UNIT_CODED_SLICE_IDR_W_RADL))
+      {
+        atLeastOneFlushedByPreviousIDR = 1;
+      }
+#endif
+    }
+  }
+
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  if(atLeastOneUnabledByRecoveryPoint || atLeastOneFlushedByPreviousIDR)
+  {
+    return -1;
+  }    
+#endif
+  if(atLeastOneLost)
+  {
+    return iPocLost+1;
+  }
+  if(atLeastOneRemoved)
+  {
+    return -2;
+  }
+  else
+  {
+    return 0;
+  }
+}
+
+/** Function for constructing an explicit Reference Picture Set out of the available pictures in a referenced Reference Picture Set
+*/
+#if ALLOW_RECOVERY_POINT_AS_RAP
+Void TComSlice::createExplicitReferencePictureSetFromReference( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool isRAP, Int pocRandomAccess, Bool bUseRecoveryPoint)
+#else
+Void TComSlice::createExplicitReferencePictureSetFromReference( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool isRAP)
+#endif
+{
+  TComPic* rpcPic;
+  Int i, j;
+  Int k = 0;
+  Int nrOfNegativePictures = 0;
+  Int nrOfPositivePictures = 0;
+  TComReferencePictureSet* pcRPS = this->getLocalRPS();
+#if EFFICIENT_FIELD_IRAP
+  Bool irapIsInRPS = false;
+#endif
+
+  // loop through all pictures in the Reference Picture Set
+  for(i=0;i<pReferencePictureSet->getNumberOfPictures();i++)
+  {
+    j = 0;
+    // loop through all pictures in the reference picture buffer
+    TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+    while ( iterPic != rcListPic.end())
+    {
+      j++;
+      rpcPic = *(iterPic++);
+
+      if(rpcPic->getPicSym()->getSlice(0)->getPOC() == this->getPOC() + pReferencePictureSet->getDeltaPOC(i) && rpcPic->getSlice(0)->isReferenced())
+      {
+        // This picture exists as a reference picture
+        // and should be added to the explicit Reference Picture Set
+        pcRPS->setDeltaPOC(k, pReferencePictureSet->getDeltaPOC(i));
+        pcRPS->setUsed(k, pReferencePictureSet->getUsed(i) && (!isRAP));
+#if ALLOW_RECOVERY_POINT_AS_RAP
+        pcRPS->setUsed(k, pcRPS->getUsed(k) && !(bUseRecoveryPoint && this->getPOC() > pocRandomAccess && this->getPOC() + pReferencePictureSet->getDeltaPOC(i) < pocRandomAccess) ); 
+#endif
+
+        if(pcRPS->getDeltaPOC(k) < 0)
+        {
+          nrOfNegativePictures++;
+        }
+        else
+        {
+#if EFFICIENT_FIELD_IRAP
+          if(rpcPic->getPicSym()->getSlice(0)->getPOC() == this->getAssociatedIRAPPOC() && this->getAssociatedIRAPPOC() == this->getPOC()+1)
+          {
+            irapIsInRPS = true;
+          }
+#endif
+          nrOfPositivePictures++;
+        }
+        k++;
+      }
+    }
+  }
+
+#if EFFICIENT_FIELD_IRAP
+  Bool useNewRPS = false;
+  // if current picture is complimentary field associated to IRAP, add the IRAP to its RPS. 
+  if(m_pcPic->isField() && !irapIsInRPS)
+  {
+    TComList<TComPic*>::iterator iterPic = rcListPic.begin();
+    while ( iterPic != rcListPic.end())
+    {
+      rpcPic = *(iterPic++);
+      if(rpcPic->getPicSym()->getSlice(0)->getPOC() == this->getAssociatedIRAPPOC() && this->getAssociatedIRAPPOC() == this->getPOC()+1)
+      {
+        pcRPS->setDeltaPOC(k, 1);
+        pcRPS->setUsed(k, true);
+        nrOfPositivePictures++;
+        k ++;
+        useNewRPS = true;
+      }
+    }
+  }
+#endif
+  pcRPS->setNumberOfNegativePictures(nrOfNegativePictures);
+  pcRPS->setNumberOfPositivePictures(nrOfPositivePictures);
+  pcRPS->setNumberOfPictures(nrOfNegativePictures+nrOfPositivePictures);
+  // This is a simplistic inter rps example. A smarter encoder will look for a better reference RPS to do the
+  // inter RPS prediction with.  Here we just use the reference used by pReferencePictureSet.
+  // If pReferencePictureSet is not inter_RPS_predicted, then inter_RPS_prediction is for the current RPS also disabled.
+  if (!pReferencePictureSet->getInterRPSPrediction()
+#if EFFICIENT_FIELD_IRAP
+    || useNewRPS
+#endif
+    )
+  {
+    pcRPS->setInterRPSPrediction(false);
+    pcRPS->setNumRefIdc(0);
+  }
+  else
+  {
+    Int rIdx =  this->getRPSidx() - pReferencePictureSet->getDeltaRIdxMinus1() - 1;
+    Int deltaRPS = pReferencePictureSet->getDeltaRPS();
+    TComReferencePictureSet* pcRefRPS = this->getSPS()->getRPSList()->getReferencePictureSet(rIdx);
+    Int iRefPics = pcRefRPS->getNumberOfPictures();
+    Int iNewIdc=0;
+    for(i=0; i<= iRefPics; i++)
+    {
+      Int deltaPOC = ((i != iRefPics)? pcRefRPS->getDeltaPOC(i) : 0);  // check if the reference abs POC is >= 0
+      Int iRefIdc = 0;
+      for (j=0; j < pcRPS->getNumberOfPictures(); j++) // loop through the  pictures in the new RPS
+      {
+        if ( (deltaPOC + deltaRPS) == pcRPS->getDeltaPOC(j))
+        {
+          if (pcRPS->getUsed(j))
+          {
+            iRefIdc = 1;
+          }
+          else
+          {
+            iRefIdc = 2;
+          }
+        }
+      }
+      pcRPS->setRefIdc(i, iRefIdc);
+      iNewIdc++;
+    }
+    pcRPS->setInterRPSPrediction(true);
+    pcRPS->setNumRefIdc(iNewIdc);
+    pcRPS->setDeltaRPS(deltaRPS);
+    pcRPS->setDeltaRIdxMinus1(pReferencePictureSet->getDeltaRIdxMinus1() + this->getSPS()->getRPSList()->getNumberOfReferencePictureSets() - this->getRPSidx());
+  }
+
+  this->setRPS(pcRPS);
+  this->setRPSidx(-1);
+}
+
+/** get AC and DC values for weighted pred
+ * \param *wp
+ * \returns Void
+ */
+Void  TComSlice::getWpAcDcParam(WPACDCParam *&wp)
+{
+  wp = m_weightACDCParam;
+}
+
+/** init AC and DC values for weighted pred
+ * \returns Void
+ */
+Void  TComSlice::initWpAcDcParam()
+{
+  for(Int iComp = 0; iComp < MAX_NUM_COMPONENT; iComp++ )
+  {
+    m_weightACDCParam[iComp].iAC = 0;
+    m_weightACDCParam[iComp].iDC = 0;
+  }
+}
+
+/** get WP tables for weighted pred
+ * \param RefPicList
+ * \param iRefIdx
+ * \param *&WPScalingParam
+ * \returns Void
+ */
+Void  TComSlice::getWpScaling( RefPicList e, Int iRefIdx, WPScalingParam *&wp )
+{
+  assert (e<NUM_REF_PIC_LIST_01);
+  wp = m_weightPredTable[e][iRefIdx];
+}
+
+/** reset Default WP tables settings : no weight.
+ * \param WPScalingParam
+ * \returns Void
+ */
+Void  TComSlice::resetWpScaling()
+{
+  for ( Int e=0 ; e<NUM_REF_PIC_LIST_01 ; e++ )
+  {
+    for ( Int i=0 ; i<MAX_NUM_REF ; i++ )
+    {
+      for ( Int yuv=0 ; yuv<MAX_NUM_COMPONENT ; yuv++ )
+      {
+        WPScalingParam  *pwp = &(m_weightPredTable[e][i][yuv]);
+        pwp->bPresentFlag      = false;
+        pwp->uiLog2WeightDenom = 0;
+        pwp->uiLog2WeightDenom = 0;
+        pwp->iWeight           = 1;
+        pwp->iOffset           = 0;
+      }
+    }
+  }
+}
+
+/** init WP table
+ * \returns Void
+ */
+Void  TComSlice::initWpScaling()
+{
+  const Bool bUseHighPrecisionPredictionWeighting = getSPS()->getUseHighPrecisionPredictionWeighting();
+  for ( Int e=0 ; e<NUM_REF_PIC_LIST_01 ; e++ )
+  {
+    for ( Int i=0 ; i<MAX_NUM_REF ; i++ )
+    {
+      for ( Int yuv=0 ; yuv<MAX_NUM_COMPONENT ; yuv++ )
+      {
+        WPScalingParam  *pwp = &(m_weightPredTable[e][i][yuv]);
+        if ( !pwp->bPresentFlag )
+        {
+          // Inferring values not present :
+          pwp->iWeight = (1 << pwp->uiLog2WeightDenom);
+          pwp->iOffset = 0;
+        }
+
+        const Int offsetScalingFactor = bUseHighPrecisionPredictionWeighting ? 1 : (1 << (g_bitDepth[toChannelType(ComponentID(yuv))]-8));
+
+        pwp->w      = pwp->iWeight;
+        pwp->o      = pwp->iOffset * offsetScalingFactor; //NOTE: This value of the ".o" variable is never used - .o is set immediately before it gets used
+        pwp->shift  = pwp->uiLog2WeightDenom;
+        pwp->round  = (pwp->uiLog2WeightDenom>=1) ? (1 << (pwp->uiLog2WeightDenom-1)) : (0);
+      }
+    }
+  }
+}
+
+// ------------------------------------------------------------------------------------------------
+// Video parameter set (VPS)
+// ------------------------------------------------------------------------------------------------
+TComVPS::TComVPS()
+: m_VPSId                     (  0)
+, m_uiMaxTLayers              (  1)
+, m_uiMaxLayers               (  1)
+, m_bTemporalIdNestingFlag    (false)
+, m_numHrdParameters          (  0)
+, m_maxNuhReservedZeroLayerId (  0)
+, m_hrdParameters             (NULL)
+, m_hrdOpSetIdx               (NULL)
+, m_cprmsPresentFlag          (NULL)
+{
+
+  for( Int i = 0; i < MAX_TLAYER; i++)
+  {
+    m_numReorderPics[i] = 0;
+    m_uiMaxDecPicBuffering[i] = 1;
+    m_uiMaxLatencyIncrease[i] = 0;
+  }
+}
+
+TComVPS::~TComVPS()
+{
+  if( m_hrdParameters    != NULL )     delete[] m_hrdParameters;
+  if( m_hrdOpSetIdx      != NULL )     delete[] m_hrdOpSetIdx;
+  if( m_cprmsPresentFlag != NULL )     delete[] m_cprmsPresentFlag;
+}
+
+// ------------------------------------------------------------------------------------------------
+// Sequence parameter set (SPS)
+// ------------------------------------------------------------------------------------------------
+
+TComSPS::TComSPS()
+: m_SPSId                     (  0)
+, m_VPSId                     (  0)
+, m_chromaFormatIdc           (CHROMA_420)
+, m_uiMaxTLayers              (  1)
+// Structure
+, m_picWidthInLumaSamples     (352)
+, m_picHeightInLumaSamples    (288)
+, m_log2MinCodingBlockSize    (  0)
+, m_log2DiffMaxMinCodingBlockSize(0)
+, m_uiMaxCUWidth              ( 32)
+, m_uiMaxCUHeight             ( 32)
+, m_uiMaxCUDepth              (  3)
+, m_bLongTermRefsPresent      (false)
+, m_uiQuadtreeTULog2MaxSize   (  0)
+, m_uiQuadtreeTULog2MinSize   (  0)
+, m_uiQuadtreeTUMaxDepthInter (  0)
+, m_uiQuadtreeTUMaxDepthIntra (  0)
+// Tool list
+, m_usePCM                    (false)
+, m_pcmLog2MaxSize            (  5)
+, m_uiPCMLog2MinSize          (  7)
+, m_useExtendedPrecision      (false)
+, m_useHighPrecisionPredictionWeighting(false)
+, m_useResidualRotation       (false)
+, m_useSingleSignificanceMapContext(false)
+, m_useGolombRiceParameterAdaptation(false)
+, m_alignCABACBeforeBypass    (false)
+, m_bPCMFilterDisableFlag     (false)
+, m_disableIntraReferenceSmoothing(false)
+, m_uiBitsForPOC              (  8)
+, m_numLongTermRefPicSPS      (  0)
+, m_uiMaxTrSize               ( 32)
+, m_bUseSAO                   (false)
+, m_bTemporalIdNestingFlag    (false)
+, m_scalingListEnabledFlag    (false)
+, m_useStrongIntraSmoothing   (false)
+, m_vuiParametersPresentFlag  (false)
+, m_vuiParameters             ()
+{
+  for(Int ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_uiBitDepth   [ch] = 8;
+    m_uiPCMBitDepth[ch] = 8;
+    m_qpBDOffset   [ch] = 0;
+  }
+
+  for ( Int i = 0; i < MAX_TLAYER; i++ )
+  {
+    m_uiMaxLatencyIncrease[i] = 0;
+    m_uiMaxDecPicBuffering[i] = 1;
+    m_numReorderPics[i]       = 0;
+  }
+
+  for (UInt signallingModeIndex = 0; signallingModeIndex < NUMBER_OF_RDPCM_SIGNALLING_MODES; signallingModeIndex++)
+  {
+    m_useResidualDPCM[signallingModeIndex] = false;
+  }
+
+  m_scalingList = new TComScalingList;
+  ::memset(m_ltRefPicPocLsbSps, 0, sizeof(m_ltRefPicPocLsbSps));
+  ::memset(m_usedByCurrPicLtSPSFlag, 0, sizeof(m_usedByCurrPicLtSPSFlag));
+}
+
+TComSPS::~TComSPS()
+{
+  delete m_scalingList;
+  m_RPSList.destroy();
+}
+
+Void  TComSPS::createRPSList( Int numRPS )
+{
+  m_RPSList.destroy();
+  m_RPSList.create(numRPS);
+}
+
+Void TComSPS::setHrdParameters( UInt frameRate, UInt numDU, UInt bitRate, Bool randomAccess )
+{
+  if( !getVuiParametersPresentFlag() )
+  {
+    return;
+  }
+
+  TComVUI *vui = getVuiParameters();
+  TComHRD *hrd = vui->getHrdParameters();
+
+  TimingInfo *timingInfo = vui->getTimingInfo();
+  timingInfo->setTimingInfoPresentFlag( true );
+  switch( frameRate )
+  {
+  case 24:
+    timingInfo->setNumUnitsInTick( 1125000 );    timingInfo->setTimeScale    ( 27000000 );
+    break;
+  case 25:
+    timingInfo->setNumUnitsInTick( 1080000 );    timingInfo->setTimeScale    ( 27000000 );
+    break;
+  case 30:
+    timingInfo->setNumUnitsInTick( 900900 );     timingInfo->setTimeScale    ( 27000000 );
+    break;
+  case 50:
+    timingInfo->setNumUnitsInTick( 540000 );     timingInfo->setTimeScale    ( 27000000 );
+    break;
+  case 60:
+    timingInfo->setNumUnitsInTick( 450450 );     timingInfo->setTimeScale    ( 27000000 );
+    break;
+  default:
+    timingInfo->setNumUnitsInTick( 1001 );       timingInfo->setTimeScale    ( 60000 );
+    break;
+  }
+
+  Bool rateCnt = ( bitRate > 0 );
+  hrd->setNalHrdParametersPresentFlag( rateCnt );
+  hrd->setVclHrdParametersPresentFlag( rateCnt );
+
+  hrd->setSubPicCpbParamsPresentFlag( ( numDU > 1 ) );
+
+  if( hrd->getSubPicCpbParamsPresentFlag() )
+  {
+    hrd->setTickDivisorMinus2( 100 - 2 );                          //
+    hrd->setDuCpbRemovalDelayLengthMinus1( 7 );                    // 8-bit precision ( plus 1 for last DU in AU )
+    hrd->setSubPicCpbParamsInPicTimingSEIFlag( true );
+    hrd->setDpbOutputDelayDuLengthMinus1( 5 + 7 );                 // With sub-clock tick factor of 100, at least 7 bits to have the same value as AU dpb delay
+  }
+  else
+  {
+    hrd->setSubPicCpbParamsInPicTimingSEIFlag( false );
+  }
+
+  hrd->setBitRateScale( 4 );                                       // in units of 2~( 6 + 4 ) = 1,024 bps
+  hrd->setCpbSizeScale( 6 );                                       // in units of 2~( 4 + 4 ) = 1,024 bit
+  hrd->setDuCpbSizeScale( 6 );                                       // in units of 2~( 4 + 4 ) = 1,024 bit
+
+  hrd->setInitialCpbRemovalDelayLengthMinus1(15);                  // assuming 0.5 sec, log2( 90,000 * 0.5 ) = 16-bit
+  if( randomAccess )
+  {
+    hrd->setCpbRemovalDelayLengthMinus1(5);                        // 32 = 2^5 (plus 1)
+    hrd->setDpbOutputDelayLengthMinus1 (5);                        // 32 + 3 = 2^6
+  }
+  else
+  {
+    hrd->setCpbRemovalDelayLengthMinus1(9);                        // max. 2^10
+    hrd->setDpbOutputDelayLengthMinus1 (9);                        // max. 2^10
+  }
+
+/*
+   Note: only the case of "vps_max_temporal_layers_minus1 = 0" is supported.
+*/
+  Int i, j;
+  UInt bitrateValue, cpbSizeValue;
+  UInt duCpbSizeValue;
+  UInt duBitRateValue = 0;
+
+  for( i = 0; i < MAX_TLAYER; i ++ )
+  {
+    hrd->setFixedPicRateFlag( i, 1 );
+    hrd->setPicDurationInTcMinus1( i, 0 );
+    hrd->setLowDelayHrdFlag( i, 0 );
+    hrd->setCpbCntMinus1( i, 0 );
+
+    bitrateValue = bitRate;
+    cpbSizeValue = bitRate;                                     // 1 second
+    duCpbSizeValue = bitRate/numDU;
+    duBitRateValue = bitRate;
+
+    for( j = 0; j < ( hrd->getCpbCntMinus1( i ) + 1 ); j ++ )
+    {
+      hrd->setBitRateValueMinus1( i, j, 0, ( bitrateValue - 1 ) );
+      hrd->setCpbSizeValueMinus1( i, j, 0, ( cpbSizeValue - 1 ) );
+      hrd->setDuCpbSizeValueMinus1( i, j, 0, ( duCpbSizeValue - 1 ) );
+      hrd->setCbrFlag( i, j, 0, ( j == 0 ) );
+
+      hrd->setBitRateValueMinus1( i, j, 1, ( bitrateValue - 1) );
+      hrd->setCpbSizeValueMinus1( i, j, 1, ( cpbSizeValue - 1 ) );
+      hrd->setDuCpbSizeValueMinus1( i, j, 1, ( duCpbSizeValue - 1 ) );
+      hrd->setDuBitRateValueMinus1( i, j, 1, ( duBitRateValue - 1 ) );
+      hrd->setCbrFlag( i, j, 1, ( j == 0 ) );
+    }
+  }
+}
+
+const Int TComSPS::m_winUnitX[]={1,2,2,1};
+const Int TComSPS::m_winUnitY[]={1,2,1,1};
+
+TComPPS::TComPPS()
+: m_PPSId                            (0)
+, m_SPSId                            (0)
+, m_picInitQPMinus26                 (0)
+, m_useDQP                           (false)
+, m_bConstrainedIntraPred            (false)
+, m_bSliceChromaQpFlag               (false)
+, m_pcSPS                            (NULL)
+, m_uiMaxCuDQPDepth                  (0)
+, m_uiMinCuDQPSize                   (0)
+, m_MaxCuChromaQpAdjDepth            (0)
+, m_MinCuChromaQpAdjSize             (0)
+, m_ChromaQpAdjTableSize             (0)
+, m_chromaCbQpOffset                 (0)
+, m_chromaCrQpOffset                 (0)
+, m_numRefIdxL0DefaultActive         (1)
+, m_numRefIdxL1DefaultActive         (1)
+, m_useCrossComponentPrediction      (false)
+, m_TransquantBypassEnableFlag       (false)
+, m_useTransformSkip                 (false)
+, m_transformSkipLog2MaxSize         (2)
+, m_dependentSliceSegmentsEnabledFlag(false)
+, m_tilesEnabledFlag                 (false)
+, m_entropyCodingSyncEnabledFlag     (false)
+, m_loopFilterAcrossTilesEnabledFlag (true)
+, m_uniformSpacingFlag               (false)
+, m_numTileColumnsMinus1             (0)
+, m_numTileRowsMinus1                (0)
+, m_numSubstreams                    (1)
+, m_signHideFlag                     (0)
+, m_cabacInitPresentFlag             (false)
+, m_encCABACTableIdx                 (I_SLICE)
+, m_sliceHeaderExtensionPresentFlag  (false)
+, m_loopFilterAcrossSlicesEnabledFlag(false)
+, m_listsModificationPresentFlag     (0)
+, m_numExtraSliceHeaderBits          (0)
+{
+  m_scalingList = new TComScalingList;
+  for(Int ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_saoOffsetBitShift[ch] = 0;
+  }
+  m_ChromaQpAdjTable[0].u.comp.CbOffset = 0;
+  m_ChromaQpAdjTable[0].u.comp.CrOffset = 0;
+}
+
+TComPPS::~TComPPS()
+{
+  delete m_scalingList;
+}
+
+TComReferencePictureSet::TComReferencePictureSet()
+: m_numberOfPictures (0)
+, m_numberOfNegativePictures (0)
+, m_numberOfPositivePictures (0)
+, m_numberOfLongtermPictures (0)
+, m_interRPSPrediction (0)
+, m_deltaRIdxMinus1 (0)
+, m_deltaRPS (0)
+, m_numRefIdc (0)
+{
+  ::memset( m_deltaPOC, 0, sizeof(m_deltaPOC) );
+  ::memset( m_POC, 0, sizeof(m_POC) );
+  ::memset( m_used, 0, sizeof(m_used) );
+  ::memset( m_refIdc, 0, sizeof(m_refIdc) );
+}
+
+TComReferencePictureSet::~TComReferencePictureSet()
+{
+}
+
+Void TComReferencePictureSet::setUsed(Int bufferNum, Bool used)
+{
+  m_used[bufferNum] = used;
+}
+
+Void TComReferencePictureSet::setDeltaPOC(Int bufferNum, Int deltaPOC)
+{
+  m_deltaPOC[bufferNum] = deltaPOC;
+}
+
+Void TComReferencePictureSet::setNumberOfPictures(Int numberOfPictures)
+{
+  m_numberOfPictures = numberOfPictures;
+}
+
+Int TComReferencePictureSet::getUsed(Int bufferNum)
+{
+  return m_used[bufferNum];
+}
+
+Int TComReferencePictureSet::getDeltaPOC(Int bufferNum)
+{
+  return m_deltaPOC[bufferNum];
+}
+
+Int TComReferencePictureSet::getNumberOfPictures()
+{
+  return m_numberOfPictures;
+}
+
+Int TComReferencePictureSet::getPOC(Int bufferNum)
+{
+  return m_POC[bufferNum];
+}
+
+Void TComReferencePictureSet::setPOC(Int bufferNum, Int POC)
+{
+  m_POC[bufferNum] = POC;
+}
+
+Bool TComReferencePictureSet::getCheckLTMSBPresent(Int bufferNum)
+{
+  return m_bCheckLTMSB[bufferNum];
+}
+
+Void TComReferencePictureSet::setCheckLTMSBPresent(Int bufferNum, Bool b)
+{
+  m_bCheckLTMSB[bufferNum] = b;
+}
+
+/** set the reference idc value at uiBufferNum entry to the value of iRefIdc
+ * \param uiBufferNum
+ * \param iRefIdc
+ * \returns Void
+ */
+Void TComReferencePictureSet::setRefIdc(Int bufferNum, Int refIdc)
+{
+  m_refIdc[bufferNum] = refIdc;
+}
+
+/** get the reference idc value at uiBufferNum
+ * \param uiBufferNum
+ * \returns Int
+ */
+Int  TComReferencePictureSet::getRefIdc(Int bufferNum)
+{
+  return m_refIdc[bufferNum];
+}
+
+/** Sorts the deltaPOC and Used by current values in the RPS based on the deltaPOC values.
+ *  deltaPOC values are sorted with -ve values before the +ve values.  -ve values are in decreasing order.
+ *  +ve values are in increasing order.
+ * \returns Void
+ */
+Void TComReferencePictureSet::sortDeltaPOC()
+{
+  // sort in increasing order (smallest first)
+  for(Int j=1; j < getNumberOfPictures(); j++)
+  {
+    Int deltaPOC = getDeltaPOC(j);
+    Bool used = getUsed(j);
+    for (Int k=j-1; k >= 0; k--)
+    {
+      Int temp = getDeltaPOC(k);
+      if (deltaPOC < temp)
+      {
+        setDeltaPOC(k+1, temp);
+        setUsed(k+1, getUsed(k));
+        setDeltaPOC(k, deltaPOC);
+        setUsed(k, used);
+      }
+    }
+  }
+  // flip the negative values to largest first
+  Int numNegPics = getNumberOfNegativePictures();
+  for(Int j=0, k=numNegPics-1; j < numNegPics>>1; j++, k--)
+  {
+    Int deltaPOC = getDeltaPOC(j);
+    Bool used = getUsed(j);
+    setDeltaPOC(j, getDeltaPOC(k));
+    setUsed(j, getUsed(k));
+    setDeltaPOC(k, deltaPOC);
+    setUsed(k, used);
+  }
+}
+
+/** Prints the deltaPOC and RefIdc (if available) values in the RPS.
+ *  A "*" is added to the deltaPOC value if it is Used bu current.
+ * \returns Void
+ */
+Void TComReferencePictureSet::printDeltaPOC()
+{
+  printf("DeltaPOC = { ");
+  for(Int j=0; j < getNumberOfPictures(); j++)
+  {
+    printf("%d%s ", getDeltaPOC(j), (getUsed(j)==1)?"*":"");
+  }
+  if (getInterRPSPrediction())
+  {
+    printf("}, RefIdc = { ");
+    for(Int j=0; j < getNumRefIdc(); j++)
+    {
+      printf("%d ", getRefIdc(j));
+    }
+  }
+  printf("}\n");
+}
+
+TComRPSList::TComRPSList()
+:m_referencePictureSets (NULL)
+{
+}
+
+TComRPSList::~TComRPSList()
+{
+}
+
+Void TComRPSList::create( Int numberOfReferencePictureSets)
+{
+  m_numberOfReferencePictureSets = numberOfReferencePictureSets;
+  m_referencePictureSets = new TComReferencePictureSet[numberOfReferencePictureSets];
+}
+
+Void TComRPSList::destroy()
+{
+  if (m_referencePictureSets)
+  {
+    delete [] m_referencePictureSets;
+  }
+  m_numberOfReferencePictureSets = 0;
+  m_referencePictureSets = NULL;
+}
+
+
+
+TComReferencePictureSet* TComRPSList::getReferencePictureSet(Int referencePictureSetNum)
+{
+  return &m_referencePictureSets[referencePictureSetNum];
+}
+
+Int TComRPSList::getNumberOfReferencePictureSets()
+{
+  return m_numberOfReferencePictureSets;
+}
+
+Void TComRPSList::setNumberOfReferencePictureSets(Int numberOfReferencePictureSets)
+{
+  m_numberOfReferencePictureSets = numberOfReferencePictureSets;
+}
+
+TComRefPicListModification::TComRefPicListModification()
+: m_bRefPicListModificationFlagL0 (false)
+, m_bRefPicListModificationFlagL1 (false)
+{
+  ::memset( m_RefPicSetIdxL0, 0, sizeof(m_RefPicSetIdxL0) );
+  ::memset( m_RefPicSetIdxL1, 0, sizeof(m_RefPicSetIdxL1) );
+}
+
+TComRefPicListModification::~TComRefPicListModification()
+{
+}
+
+TComScalingList::TComScalingList()
+{
+  init();
+}
+
+TComScalingList::~TComScalingList()
+{
+  destroy();
+}
+
+/** set default quantization matrix to array
+*/
+Void TComSlice::setDefaultScalingList()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId=0;listId<SCALING_LIST_NUM;listId++)
+    {
+      getScalingList()->processDefaultMatrix(sizeId, listId);
+    }
+  }
+}
+/** check if use default quantization matrix
+ * \returns true if use default quantization matrix in all size
+*/
+Bool TComSlice::checkDefaultScalingList()
+{
+  UInt defaultCounter=0;
+
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId=0;listId<SCALING_LIST_NUM;listId++)
+    {
+      if( !memcmp(getScalingList()->getScalingListAddress(sizeId,listId), getScalingList()->getScalingListDefaultAddress(sizeId, listId),sizeof(Int)*min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId])) // check value of matrix
+     && ((sizeId < SCALING_LIST_16x16) || (getScalingList()->getScalingListDC(sizeId,listId) == 16))) // check DC value
+      {
+        defaultCounter++;
+      }
+    }
+  }
+
+  return (defaultCounter == (SCALING_LIST_NUM * SCALING_LIST_SIZE_NUM )) ? false : true;
+}
+
+/** get scaling matrix from RefMatrixID
+ * \param sizeId size index
+ * \param Index of input matrix
+ * \param Index of reference matrix
+ */
+Void TComScalingList::processRefMatrix( UInt sizeId, UInt listId , UInt refListId )
+{
+  ::memcpy(getScalingListAddress(sizeId, listId),((listId == refListId)? getScalingListDefaultAddress(sizeId, refListId): getScalingListAddress(sizeId, refListId)),sizeof(Int)*min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId]));
+}
+
+/** parse syntax infomation
+ *  \param pchFile syntax infomation
+ *  \returns false if successful
+ */
+
+static Void outputScalingListHelp(std::ostream &os)
+{
+  os << "The scaling list file specifies all matrices and their DC values; none can be missing,\n"
+         "but their order is arbitrary.\n\n"
+         "The matrices are specified by:\n"
+         "<matrix name><unchecked data>\n"
+         "  <value>,<value>,<value>,....\n\n"
+         "  Line-feeds can be added arbitrarily between values, and the number of values needs to be\n"
+         "  at least the number of entries for the matrix (superfluous entries are ignored).\n"
+         "  The <unchecked data> is text on the same line as the matrix that is not checked\n"
+         "  except to ensure that the matrix name token is unique. It is recommended that it is ' ='\n"
+         "  The values in the matrices are the absolute values (0-255), not the delta values as\n"
+         "  exchanged between the encoder and decoder\n\n"
+         "The DC values (for matrix sizes larger than 8x8) are specified by:\n"
+         "<matrix name>_DC<unchecked data>\n"
+         "  <value>\n";
+
+  os << "The permitted matrix names are:\n";
+  for(UInt sizeIdc = 0; sizeIdc < SCALING_LIST_SIZE_NUM; sizeIdc++)
+  {
+    for(UInt listIdc = 0; listIdc < SCALING_LIST_NUM; listIdc++)
+    {
+      if ((sizeIdc!=SCALING_LIST_32x32) || (listIdc%(SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) == 0))
+      {
+        os << "  " << MatrixType[sizeIdc][listIdc] << '\n';
+      }
+    }
+  }
+}
+
+Void TComScalingList::outputScalingLists(std::ostream &os) const
+{
+  for(UInt sizeIdc = 0; sizeIdc < SCALING_LIST_SIZE_NUM; sizeIdc++)
+  {
+    const UInt size = min(8,4<<(sizeIdc));
+    for(UInt listIdc = 0; listIdc < SCALING_LIST_NUM; listIdc++)
+    {
+      if ((sizeIdc!=SCALING_LIST_32x32) || (listIdc%(SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) == 0))
+      {
+        const Int *src = getScalingListAddress(sizeIdc, listIdc);
+        os << (MatrixType[sizeIdc][listIdc]) << " =\n  ";
+        for(UInt y=0; y<size; y++)
+        {
+          for(UInt x=0; x<size; x++, src++)
+          {
+            os << std::setw(3) << (*src) << ", ";
+          }
+          os << (y+1<size?"\n  ":"\n");
+        }
+        if(sizeIdc > SCALING_LIST_8x8)
+        {
+          os << MatrixType_DC[sizeIdc][listIdc] << " = \n  " << std::setw(3) << getScalingListDC(sizeIdc, listIdc) << "\n";
+        }
+        os << "\n";
+      }
+    }
+  }
+}
+
+Bool TComScalingList::xParseScalingList(Char* pchFile)
+{
+  static const Int LINE_SIZE=1024;
+  FILE *fp = NULL;
+  Char line[LINE_SIZE];
+
+  if (pchFile == NULL)
+  {
+    fprintf(stderr, "Error: no scaling list file specified. Help on scaling lists being output\n");
+    outputScalingListHelp(std::cout);
+    std::cout << "\n\nExample scaling list file using default values:\n\n";
+    outputScalingLists(std::cout);
+    exit (1);
+    return true;
+  }
+  else if ((fp = fopen(pchFile,"r")) == (FILE*)NULL)
+  {
+    fprintf(stderr, "Error: cannot open scaling list file %s for reading\n",pchFile);
+    return true;
+  }
+
+  for(UInt sizeIdc = 0; sizeIdc < SCALING_LIST_SIZE_NUM; sizeIdc++)
+  {
+    const UInt size = min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeIdc]);
+
+    for(UInt listIdc = 0; listIdc < SCALING_LIST_NUM; listIdc++)
+    {
+      Int * const src = getScalingListAddress(sizeIdc, listIdc);
+
+      if ((sizeIdc==SCALING_LIST_32x32) && (listIdc%(SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) != 0)) // derive chroma32x32 from chroma16x16
+      {
+        const Int *srcNextSmallerSize = getScalingListAddress(sizeIdc-1, listIdc);
+        for(UInt i=0; i<size; i++)
+        {
+          src[i] = srcNextSmallerSize[i];
+        }
+        setScalingListDC(sizeIdc,listIdc,(sizeIdc > SCALING_LIST_8x8) ? getScalingListDC(sizeIdc-1, listIdc) : src[0]);
+      }
+      else
+      {
+        {
+          fseek(fp, 0, SEEK_SET);
+          Bool bFound=false;
+          while ((!feof(fp)) && (!bFound))
+          {
+            Char *ret = fgets(line, LINE_SIZE, fp);
+            Char *findNamePosition= ret==NULL ? NULL : strstr(line, MatrixType[sizeIdc][listIdc]);
+            // This could be a match against the DC string as well, so verify it isn't
+            if (findNamePosition!= NULL && (MatrixType_DC[sizeIdc][listIdc]==NULL || strstr(line, MatrixType_DC[sizeIdc][listIdc])==NULL))
+            {
+              bFound=true;
+            }
+          }
+          if (!bFound)
+          {
+            fprintf(stderr, "Error: cannot find Matrix %s from scaling list file %s\n", MatrixType[sizeIdc][listIdc], pchFile);
+            return true;
+          }
+        }
+        for (UInt i=0; i<size; i++)
+        {
+          Int data;
+          if (fscanf(fp, "%d,", &data)!=1)
+          {
+            fprintf(stderr, "Error: cannot read value #%d for Matrix %s from scaling list file %s at file position %ld\n", i, MatrixType[sizeIdc][listIdc], pchFile, ftell(fp));
+            return true;
+          }
+          if (data<0 || data>255)
+          {
+            fprintf(stderr, "Error: QMatrix entry #%d of value %d for Matrix %s from scaling list file %s at file position %ld is out of range (0 to 255)\n", i, data, MatrixType[sizeIdc][listIdc], pchFile, ftell(fp));
+            return true;
+          }
+          src[i] = data;
+        }
+
+        //set DC value for default matrix check
+        setScalingListDC(sizeIdc,listIdc,src[0]);
+
+        if(sizeIdc > SCALING_LIST_8x8)
+        {
+          {
+            fseek(fp, 0, SEEK_SET);
+            Bool bFound=false;
+            while ((!feof(fp)) && (!bFound))
+            {
+              Char *ret = fgets(line, LINE_SIZE, fp);
+              Char *findNamePosition= ret==NULL ? NULL : strstr(line, MatrixType_DC[sizeIdc][listIdc]);
+              if (findNamePosition!= NULL)
+              {
+                // This won't be a match against the non-DC string.
+                bFound=true;
+              }
+            }
+            if (!bFound)
+            {
+              fprintf(stderr, "Error: cannot find DC Matrix %s from scaling list file %s\n", MatrixType_DC[sizeIdc][listIdc], pchFile);
+              return true;
+            }
+          }
+          Int data;
+          if (fscanf(fp, "%d,", &data)!=1)
+          {
+            fprintf(stderr, "Error: cannot read DC %s from scaling list file %s at file position %ld\n", MatrixType_DC[sizeIdc][listIdc], pchFile, ftell(fp));
+            return true;
+          }
+          if (data<0 || data>255)
+          {
+            fprintf(stderr, "Error: DC value %d for Matrix %s from scaling list file %s at file position %ld is out of range (0 to 255)\n", data, MatrixType[sizeIdc][listIdc], pchFile, ftell(fp));
+            return true;
+          }
+          //overwrite DC value when size of matrix is larger than 16x16
+          setScalingListDC(sizeIdc,listIdc,data);
+        }
+      }
+    }
+  }
+//  std::cout << "\n\nRead scaling lists of:\n\n";
+//  outputScalingLists(std::cout);
+
+  fclose(fp);
+  return false;
+}
+
+/** initialization process of quantization matrix array
+ */
+Void TComScalingList::init()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId = 0; listId < SCALING_LIST_NUM; listId++)
+    {
+      m_scalingListCoef[sizeId][listId] = new Int [min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId])];
+    }
+  }
+}
+
+/** destroy quantization matrix array
+ */
+Void TComScalingList::destroy()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId = 0; listId < SCALING_LIST_NUM; listId++)
+    {
+      if(m_scalingListCoef[sizeId][listId]) delete [] m_scalingListCoef[sizeId][listId];
+    }
+  }
+}
+
+/** get default address of quantization matrix
+ * \param sizeId size index
+ * \param listId list index
+ * \returns pointer of quantization matrix
+ */
+Int* TComScalingList::getScalingListDefaultAddress(UInt sizeId, UInt listId)
+{
+  Int *src = 0;
+  switch(sizeId)
+  {
+    case SCALING_LIST_4x4:
+      src = g_quantTSDefault4x4;
+      break;
+    case SCALING_LIST_8x8:
+    case SCALING_LIST_16x16:
+    case SCALING_LIST_32x32:
+      src = (listId < (SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) ) ? g_quantIntraDefault8x8 : g_quantInterDefault8x8;
+      break;
+    default:
+      assert(0);
+      src = NULL;
+      break;
+  }
+  return src;
+}
+
+/** process of default matrix
+ * \param sizeId size index
+ * \param Index of input matrix
+ */
+Void TComScalingList::processDefaultMatrix(UInt sizeId, UInt listId)
+{
+  ::memcpy(getScalingListAddress(sizeId, listId),getScalingListDefaultAddress(sizeId,listId),sizeof(Int)*min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId]));
+  setScalingListDC(sizeId,listId,SCALING_LIST_DC);
+}
+
+/** check DC value of matrix for default matrix signaling
+ */
+Void TComScalingList::checkDcOfMatrix()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId = 0; listId < SCALING_LIST_NUM; listId++)
+    {
+      //check default matrix?
+      if(getScalingListDC(sizeId,listId) == 0)
+      {
+        processDefaultMatrix(sizeId, listId);
+      }
+    }
+  }
+}
+
+ParameterSetManager::ParameterSetManager()
+: m_vpsMap(MAX_NUM_VPS)
+, m_spsMap(MAX_NUM_SPS)
+, m_ppsMap(MAX_NUM_PPS)
+, m_activeVPSId(-1)
+, m_activeSPSId(-1)
+, m_activePPSId(-1)
+{
+}
+
+
+ParameterSetManager::~ParameterSetManager()
+{
+}
+
+//! activate a SPS from a active parameter sets SEI message
+//! \returns true, if activation is successful
+Bool ParameterSetManager::activateSPSWithSEI(Int spsId)
+{
+  TComSPS *sps = m_spsMap.getPS(spsId);
+  if (sps)
+  {
+    Int vpsId = sps->getVPSId();
+    if (m_vpsMap.getPS(vpsId))
+    {
+      m_activeVPSId = vpsId;
+      m_activeSPSId = spsId;
+      return true;
+    }
+    else
+    {
+      printf("Warning: tried to activate SPS using an Active parameter sets SEI message. Referenced VPS does not exist.");
+    }
+  }
+  else
+  {
+    printf("Warning: tried to activate non-existing SPS using an Active parameter sets SEI message.");
+  }
+  return false;
+}
+
+//! activate a PPS and depending on isIDR parameter also SPS and VPS
+//! \returns true, if activation is successful
+Bool ParameterSetManager::activatePPS(Int ppsId, Bool isIRAP)
+{
+  TComPPS *pps = m_ppsMap.getPS(ppsId);
+  if (pps)
+  {
+    Int spsId = pps->getSPSId();
+    if (!isIRAP && (spsId != m_activeSPSId))
+    {
+      printf("Warning: tried to activate PPS referring to a inactive SPS at non-IDR.");
+      return false;
+    }
+    TComSPS *sps = m_spsMap.getPS(spsId);
+    if (sps)
+    {
+      Int vpsId = sps->getVPSId();
+      if (!isIRAP && (vpsId != m_activeVPSId))
+      {
+        printf("Warning: tried to activate PPS referring to a inactive VPS at non-IDR.");
+        return false;
+      }
+      if (m_vpsMap.getPS(vpsId))
+      {
+        m_activePPSId = ppsId;
+        m_activeVPSId = vpsId;
+        m_activeSPSId = spsId;
+        return true;
+      }
+      else
+      {
+        printf("Warning: tried to activate PPS that refers to a non-existing VPS.");
+      }
+    }
+    else
+    {
+      printf("Warning: tried to activate a PPS that refers to a non-existing SPS.");
+    }
+  }
+  else
+  {
+    printf("Warning: tried to activate non-existing PPS.");
+  }
+  return false;
+}
+
+ProfileTierLevel::ProfileTierLevel()
+  : m_profileSpace    (0)
+  , m_tierFlag        (Level::MAIN)
+  , m_profileIdc      (Profile::NONE)
+  , m_levelIdc        (Level::NONE)
+  , m_progressiveSourceFlag  (false)
+  , m_interlacedSourceFlag   (false)
+  , m_nonPackedConstraintFlag(false)
+  , m_frameOnlyConstraintFlag(false)
+{
+  ::memset(m_profileCompatibilityFlag, 0, sizeof(m_profileCompatibilityFlag));
+}
+
+TComPTL::TComPTL()
+{
+  ::memset(m_subLayerProfilePresentFlag, 0, sizeof(m_subLayerProfilePresentFlag));
+  ::memset(m_subLayerLevelPresentFlag,   0, sizeof(m_subLayerLevelPresentFlag  ));
+}
+
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComSlice.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1595 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComSlice.h
+    \brief    slice header and SPS class (header)
+*/
+
+#ifndef __TCOMSLICE__
+#define __TCOMSLICE__
+
+#include <cstring>
+#include <map>
+#include <vector>
+#include "CommonDef.h"
+#include "TComRom.h"
+#include "TComList.h"
+#include "TComChromaFormat.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+class TComPic;
+class TComTrQuant;
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+static const UInt REF_PIC_LIST_NUM_IDX=32;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// Reference Picture Set class
+class TComReferencePictureSet
+{
+private:
+  Int  m_numberOfPictures;
+  Int  m_numberOfNegativePictures;
+  Int  m_numberOfPositivePictures;
+  Int  m_numberOfLongtermPictures;
+  Int  m_deltaPOC[MAX_NUM_REF_PICS];
+  Int  m_POC[MAX_NUM_REF_PICS];
+  Bool m_used[MAX_NUM_REF_PICS];
+  Bool m_interRPSPrediction;
+  Int  m_deltaRIdxMinus1;
+  Int  m_deltaRPS;
+  Int  m_numRefIdc;
+  Int  m_refIdc[MAX_NUM_REF_PICS+1];
+  Bool m_bCheckLTMSB[MAX_NUM_REF_PICS];
+  Int  m_pocLSBLT[MAX_NUM_REF_PICS];
+  Int  m_deltaPOCMSBCycleLT[MAX_NUM_REF_PICS];
+  Bool m_deltaPocMSBPresentFlag[MAX_NUM_REF_PICS];
+
+public:
+  TComReferencePictureSet();
+  virtual ~TComReferencePictureSet();
+  Int   getPocLSBLT(Int i)                       { return m_pocLSBLT[i]; }
+  Void  setPocLSBLT(Int i, Int x)                { m_pocLSBLT[i] = x; }
+  Int   getDeltaPocMSBCycleLT(Int i)             { return m_deltaPOCMSBCycleLT[i]; }
+  Void  setDeltaPocMSBCycleLT(Int i, Int x)      { m_deltaPOCMSBCycleLT[i] = x; }
+  Bool  getDeltaPocMSBPresentFlag(Int i)         { return m_deltaPocMSBPresentFlag[i]; }
+  Void  setDeltaPocMSBPresentFlag(Int i, Bool x) { m_deltaPocMSBPresentFlag[i] = x;    }
+  Void setUsed(Int bufferNum, Bool used);
+  Void setDeltaPOC(Int bufferNum, Int deltaPOC);
+  Void setPOC(Int bufferNum, Int deltaPOC);
+  Void setNumberOfPictures(Int numberOfPictures);
+  Void setCheckLTMSBPresent(Int bufferNum, Bool b );
+  Bool getCheckLTMSBPresent(Int bufferNum);
+
+  Int  getUsed(Int bufferNum);
+  Int  getDeltaPOC(Int bufferNum);
+  Int  getPOC(Int bufferNum);
+  Int  getNumberOfPictures();
+
+  Void setNumberOfNegativePictures(Int number)  { m_numberOfNegativePictures = number; }
+  Int  getNumberOfNegativePictures()            { return m_numberOfNegativePictures; }
+  Void setNumberOfPositivePictures(Int number)  { m_numberOfPositivePictures = number; }
+  Int  getNumberOfPositivePictures()            { return m_numberOfPositivePictures; }
+  Void setNumberOfLongtermPictures(Int number)  { m_numberOfLongtermPictures = number; }
+  Int  getNumberOfLongtermPictures()            { return m_numberOfLongtermPictures; }
+
+  Void setInterRPSPrediction(Bool flag)         { m_interRPSPrediction = flag; }
+  Bool getInterRPSPrediction()                  { return m_interRPSPrediction; }
+  Void setDeltaRIdxMinus1(Int x)                { m_deltaRIdxMinus1 = x; }
+  Int  getDeltaRIdxMinus1()                     { return m_deltaRIdxMinus1; }
+  Void setDeltaRPS(Int x)                       { m_deltaRPS = x; }
+  Int  getDeltaRPS()                            { return m_deltaRPS; }
+  Void setNumRefIdc(Int x)                      { m_numRefIdc = x; }
+  Int  getNumRefIdc()                           { return m_numRefIdc; }
+
+  Void setRefIdc(Int bufferNum, Int refIdc);
+  Int  getRefIdc(Int bufferNum);
+
+  Void sortDeltaPOC();
+  Void printDeltaPOC();
+};
+
+/// Reference Picture Set set class
+class TComRPSList
+{
+private:
+  Int  m_numberOfReferencePictureSets;
+  TComReferencePictureSet* m_referencePictureSets;
+
+public:
+  TComRPSList();
+  virtual ~TComRPSList();
+
+  Void  create  (Int numberOfEntries);
+  Void  destroy ();
+
+
+  TComReferencePictureSet* getReferencePictureSet(Int referencePictureSetNum);
+  Int getNumberOfReferencePictureSets();
+  Void setNumberOfReferencePictureSets(Int numberOfReferencePictureSets);
+};
+
+/// SCALING_LIST class
+class TComScalingList
+{
+public:
+  TComScalingList();
+  virtual ~TComScalingList();
+  Void     setScalingListPresentFlag      (Bool b)                             { m_scalingListPresentFlag = b;    }
+  Bool     getScalingListPresentFlag      ()                                   { return m_scalingListPresentFlag; }
+  Int*           getScalingListAddress    (UInt sizeId, UInt listId)           { return m_scalingListCoef[sizeId][listId]; } //!< get matrix coefficient
+  const Int*     getScalingListAddress    (UInt sizeId, UInt listId) const     { return m_scalingListCoef[sizeId][listId]; } //!< get matrix coefficient
+  Bool     checkPredMode                  (UInt sizeId, UInt listId);
+  Void     setRefMatrixId                 (UInt sizeId, UInt listId, UInt u)   { m_refMatrixId[sizeId][listId] = u;    }     //!< set reference matrix ID
+  UInt     getRefMatrixId                 (UInt sizeId, UInt listId)           { return m_refMatrixId[sizeId][listId]; }     //!< get reference matrix ID
+  Int*     getScalingListDefaultAddress   (UInt sizeId, UInt listId);                                                        //!< get default matrix coefficient
+  Void     processDefaultMatrix           (UInt sizeId, UInt listId);
+  Void     setScalingListDC               (UInt sizeId, UInt listId, UInt u)   { m_scalingListDC[sizeId][listId] = u; }      //!< set DC value
+
+  Int      getScalingListDC               (UInt sizeId, UInt listId) const     { return m_scalingListDC[sizeId][listId]; }   //!< get DC value
+  Void     checkDcOfMatrix                ();
+  Void     processRefMatrix               (UInt sizeId, UInt listId , UInt refListId );
+  Bool     xParseScalingList              (Char* pchFile);
+
+private:
+  Void     init                    ();
+  Void     destroy                 ();
+  Void     outputScalingLists(std::ostream &os) const;
+  Int      m_scalingListDC               [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM]; //!< the DC value of the matrix coefficient for 16x16
+  Bool     m_useDefaultScalingMatrixFlag [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM]; //!< UseDefaultScalingMatrixFlag
+  UInt     m_refMatrixId                 [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM]; //!< RefMatrixID
+  Bool     m_scalingListPresentFlag;                                                //!< flag for using default matrix
+  UInt     m_predMatrixId                [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM]; //!< reference list index
+  Int      *m_scalingListCoef            [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM]; //!< quantization matrix
+};
+
+class ProfileTierLevel
+{
+  Int               m_profileSpace;
+  Level::Tier       m_tierFlag;
+  Profile::Name     m_profileIdc;
+  Bool              m_profileCompatibilityFlag[32];
+  Level::Name       m_levelIdc;
+
+  Bool              m_progressiveSourceFlag;
+  Bool              m_interlacedSourceFlag;
+  Bool              m_nonPackedConstraintFlag;
+  Bool              m_frameOnlyConstraintFlag;
+  UInt              m_bitDepthConstraintValue;
+  ChromaFormat      m_chromaFormatConstraintValue;
+  Bool              m_intraConstraintFlag;
+  Bool              m_lowerBitRateConstraintFlag;
+
+public:
+  ProfileTierLevel();
+
+  Int  getProfileSpace() const                     { return m_profileSpace; }
+  Void setProfileSpace(Int x)                      { m_profileSpace = x; }
+
+  Level::Tier  getTierFlag() const                 { return m_tierFlag; }
+  Void setTierFlag(Level::Tier x)                  { m_tierFlag = x; }
+
+  Profile::Name   getProfileIdc() const            { return m_profileIdc; }
+  Void setProfileIdc(Profile::Name x)              { m_profileIdc = x; }
+
+  Bool getProfileCompatibilityFlag(Int i) const    { return m_profileCompatibilityFlag[i]; }
+  Void setProfileCompatibilityFlag(Int i, Bool x)  { m_profileCompatibilityFlag[i] = x; }
+
+  Level::Name   getLevelIdc() const                { return m_levelIdc; }
+  Void setLevelIdc(Level::Name x)                  { m_levelIdc = x; }
+
+  Bool getProgressiveSourceFlag() const            { return m_progressiveSourceFlag; }
+  Void setProgressiveSourceFlag(Bool b)            { m_progressiveSourceFlag = b; }
+
+  Bool getInterlacedSourceFlag() const             { return m_interlacedSourceFlag; }
+  Void setInterlacedSourceFlag(Bool b)             { m_interlacedSourceFlag = b; }
+
+  Bool getNonPackedConstraintFlag() const          { return m_nonPackedConstraintFlag; }
+  Void setNonPackedConstraintFlag(Bool b)          { m_nonPackedConstraintFlag = b; }
+
+  Bool getFrameOnlyConstraintFlag() const          { return m_frameOnlyConstraintFlag; }
+  Void setFrameOnlyConstraintFlag(Bool b)          { m_frameOnlyConstraintFlag = b; }
+
+  UInt getBitDepthConstraint() const               { return m_bitDepthConstraintValue; }
+  Void setBitDepthConstraint(UInt bitDepth)        { m_bitDepthConstraintValue=bitDepth; }
+
+  ChromaFormat getChromaFormatConstraint() const   { return m_chromaFormatConstraintValue; }
+  Void setChromaFormatConstraint(ChromaFormat fmt) { m_chromaFormatConstraintValue=fmt; }
+
+  Bool getIntraConstraintFlag() const              { return m_intraConstraintFlag; }
+  Void setIntraConstraintFlag(Bool b)              { m_intraConstraintFlag = b; }
+
+  Bool getLowerBitRateConstraintFlag() const       { return m_lowerBitRateConstraintFlag; }
+  Void setLowerBitRateConstraintFlag(Bool b)       { m_lowerBitRateConstraintFlag = b; }
+};
+
+
+class TComPTL
+{
+  ProfileTierLevel m_generalPTL;
+  ProfileTierLevel m_subLayerPTL    [MAX_TLAYER-1];      // max. value of max_sub_layers_minus1 is MAX_TLAYER-1 (= 6)
+  Bool m_subLayerProfilePresentFlag [MAX_TLAYER-1];
+  Bool m_subLayerLevelPresentFlag   [MAX_TLAYER-1];
+
+public:
+  TComPTL();
+  Bool getSubLayerProfilePresentFlag(Int i) const { return m_subLayerProfilePresentFlag[i]; }
+  Void setSubLayerProfilePresentFlag(Int i, Bool x) { m_subLayerProfilePresentFlag[i] = x; }
+
+  Bool getSubLayerLevelPresentFlag(Int i) const { return m_subLayerLevelPresentFlag[i]; }
+  Void setSubLayerLevelPresentFlag(Int i, Bool x) { m_subLayerLevelPresentFlag[i] = x; }
+
+  ProfileTierLevel* getGeneralPTL()  { return &m_generalPTL; }
+  ProfileTierLevel* getSubLayerPTL(Int i)  { return &m_subLayerPTL[i]; }
+};
+
+/// VPS class
+
+struct HrdSubLayerInfo
+{
+  Bool fixedPicRateFlag;
+  Bool fixedPicRateWithinCvsFlag;
+  UInt picDurationInTcMinus1;
+  Bool lowDelayHrdFlag;
+  UInt cpbCntMinus1;
+  UInt bitRateValueMinus1[MAX_CPB_CNT][2];
+  UInt cpbSizeValue      [MAX_CPB_CNT][2];
+  UInt ducpbSizeValue    [MAX_CPB_CNT][2];
+  UInt cbrFlag           [MAX_CPB_CNT][2];
+  UInt duBitRateValue    [MAX_CPB_CNT][2];
+};
+
+class TComHRD
+{
+private:
+  Bool m_nalHrdParametersPresentFlag;
+  Bool m_vclHrdParametersPresentFlag;
+  Bool m_subPicCpbParamsPresentFlag;
+  UInt m_tickDivisorMinus2;
+  UInt m_duCpbRemovalDelayLengthMinus1;
+  Bool m_subPicCpbParamsInPicTimingSEIFlag;
+  UInt m_dpbOutputDelayDuLengthMinus1;
+  UInt m_bitRateScale;
+  UInt m_cpbSizeScale;
+  UInt m_ducpbSizeScale;
+  UInt m_initialCpbRemovalDelayLengthMinus1;
+  UInt m_cpbRemovalDelayLengthMinus1;
+  UInt m_dpbOutputDelayLengthMinus1;
+  UInt m_numDU;
+  HrdSubLayerInfo m_HRD[MAX_TLAYER];
+
+public:
+  TComHRD()
+  :m_nalHrdParametersPresentFlag(0)
+  ,m_vclHrdParametersPresentFlag(0)
+  ,m_subPicCpbParamsPresentFlag(false)
+  ,m_tickDivisorMinus2(0)
+  ,m_duCpbRemovalDelayLengthMinus1(0)
+  ,m_subPicCpbParamsInPicTimingSEIFlag(false)
+  ,m_dpbOutputDelayDuLengthMinus1(0)
+  ,m_bitRateScale(0)
+  ,m_cpbSizeScale(0)
+  ,m_initialCpbRemovalDelayLengthMinus1(23)
+  ,m_cpbRemovalDelayLengthMinus1(23)
+  ,m_dpbOutputDelayLengthMinus1(23)
+  {}
+
+  virtual ~TComHRD() {}
+
+  Void setNalHrdParametersPresentFlag       ( Bool flag )  { m_nalHrdParametersPresentFlag = flag;         }
+  Bool getNalHrdParametersPresentFlag       ( )            { return m_nalHrdParametersPresentFlag;         }
+
+  Void setVclHrdParametersPresentFlag       ( Bool flag )  { m_vclHrdParametersPresentFlag = flag;         }
+  Bool getVclHrdParametersPresentFlag       ( )            { return m_vclHrdParametersPresentFlag;         }
+
+  Void setSubPicCpbParamsPresentFlag        ( Bool flag )  { m_subPicCpbParamsPresentFlag = flag;          }
+  Bool getSubPicCpbParamsPresentFlag        ( )            { return m_subPicCpbParamsPresentFlag;          }
+
+  Void setTickDivisorMinus2                 ( UInt value ) { m_tickDivisorMinus2 = value;                  }
+  UInt getTickDivisorMinus2                 ( )            { return m_tickDivisorMinus2;                   }
+
+  Void setDuCpbRemovalDelayLengthMinus1     ( UInt value ) { m_duCpbRemovalDelayLengthMinus1 = value;      }
+  UInt getDuCpbRemovalDelayLengthMinus1     ( )            { return m_duCpbRemovalDelayLengthMinus1;       }
+
+  Void setSubPicCpbParamsInPicTimingSEIFlag ( Bool flag)   { m_subPicCpbParamsInPicTimingSEIFlag = flag;   }
+  Bool getSubPicCpbParamsInPicTimingSEIFlag ()             { return m_subPicCpbParamsInPicTimingSEIFlag;   }
+
+  Void setDpbOutputDelayDuLengthMinus1      (UInt value )  { m_dpbOutputDelayDuLengthMinus1 = value;       }
+  UInt getDpbOutputDelayDuLengthMinus1      ()             { return m_dpbOutputDelayDuLengthMinus1;        }
+
+  Void setBitRateScale                      ( UInt value ) { m_bitRateScale = value;                       }
+  UInt getBitRateScale                      ( )            { return m_bitRateScale;                        }
+
+  Void setCpbSizeScale                      ( UInt value ) { m_cpbSizeScale = value;                       }
+  UInt getCpbSizeScale                      ( )            { return m_cpbSizeScale;                        }
+  Void setDuCpbSizeScale                    ( UInt value ) { m_ducpbSizeScale = value;                     }
+  UInt getDuCpbSizeScale                    ( )            { return m_ducpbSizeScale;                      }
+
+  Void setInitialCpbRemovalDelayLengthMinus1( UInt value ) { m_initialCpbRemovalDelayLengthMinus1 = value; }
+  UInt getInitialCpbRemovalDelayLengthMinus1( )            { return m_initialCpbRemovalDelayLengthMinus1;  }
+
+  Void setCpbRemovalDelayLengthMinus1       ( UInt value ) { m_cpbRemovalDelayLengthMinus1 = value;        }
+  UInt getCpbRemovalDelayLengthMinus1       ( )            { return m_cpbRemovalDelayLengthMinus1;         }
+
+  Void setDpbOutputDelayLengthMinus1        ( UInt value ) { m_dpbOutputDelayLengthMinus1 = value;         }
+  UInt getDpbOutputDelayLengthMinus1        ( )            { return m_dpbOutputDelayLengthMinus1;          }
+
+  Void setFixedPicRateFlag       ( Int layer, Bool flag )  { m_HRD[layer].fixedPicRateFlag = flag;         }
+  Bool getFixedPicRateFlag       ( Int layer            )  { return m_HRD[layer].fixedPicRateFlag;         }
+
+  Void setFixedPicRateWithinCvsFlag       ( Int layer, Bool flag )  { m_HRD[layer].fixedPicRateWithinCvsFlag = flag;         }
+  Bool getFixedPicRateWithinCvsFlag       ( Int layer            )  { return m_HRD[layer].fixedPicRateWithinCvsFlag;         }
+
+  Void setPicDurationInTcMinus1  ( Int layer, UInt value ) { m_HRD[layer].picDurationInTcMinus1 = value;   }
+  UInt getPicDurationInTcMinus1  ( Int layer             ) { return m_HRD[layer].picDurationInTcMinus1;    }
+
+  Void setLowDelayHrdFlag        ( Int layer, Bool flag )  { m_HRD[layer].lowDelayHrdFlag = flag;          }
+  Bool getLowDelayHrdFlag        ( Int layer            )  { return m_HRD[layer].lowDelayHrdFlag;          }
+
+  Void setCpbCntMinus1           ( Int layer, UInt value ) { m_HRD[layer].cpbCntMinus1 = value; }
+  UInt getCpbCntMinus1           ( Int layer            )  { return m_HRD[layer].cpbCntMinus1; }
+
+  Void setBitRateValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl, UInt value ) { m_HRD[layer].bitRateValueMinus1[cpbcnt][nalOrVcl] = value; }
+  UInt getBitRateValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl             ) { return m_HRD[layer].bitRateValueMinus1[cpbcnt][nalOrVcl];  }
+
+  Void setCpbSizeValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl, UInt value ) { m_HRD[layer].cpbSizeValue[cpbcnt][nalOrVcl] = value;       }
+  UInt getCpbSizeValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl            )  { return m_HRD[layer].cpbSizeValue[cpbcnt][nalOrVcl];        }
+  Void setDuCpbSizeValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl, UInt value ) { m_HRD[layer].ducpbSizeValue[cpbcnt][nalOrVcl] = value;       }
+  UInt getDuCpbSizeValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl            )  { return m_HRD[layer].ducpbSizeValue[cpbcnt][nalOrVcl];        }
+  Void setDuBitRateValueMinus1     ( Int layer, Int cpbcnt, Int nalOrVcl, UInt value ) { m_HRD[layer].duBitRateValue[cpbcnt][nalOrVcl] = value;       }
+  UInt getDuBitRateValueMinus1     (Int layer, Int cpbcnt, Int nalOrVcl )              { return m_HRD[layer].duBitRateValue[cpbcnt][nalOrVcl];        }
+  Void setCbrFlag                ( Int layer, Int cpbcnt, Int nalOrVcl, UInt value ) { m_HRD[layer].cbrFlag[cpbcnt][nalOrVcl] = value;            }
+  Bool getCbrFlag                ( Int layer, Int cpbcnt, Int nalOrVcl             ) { return m_HRD[layer].cbrFlag[cpbcnt][nalOrVcl];             }
+
+  Void setNumDU                              ( UInt value ) { m_numDU = value;                            }
+  UInt getNumDU                              ( )            { return m_numDU;          }
+  Bool getCpbDpbDelaysPresentFlag() { return getNalHrdParametersPresentFlag() || getVclHrdParametersPresentFlag(); }
+};
+
+class TimingInfo
+{
+  Bool m_timingInfoPresentFlag;
+  UInt m_numUnitsInTick;
+  UInt m_timeScale;
+  Bool m_pocProportionalToTimingFlag;
+  Int  m_numTicksPocDiffOneMinus1;
+public:
+  TimingInfo()
+  : m_timingInfoPresentFlag(false)
+  , m_numUnitsInTick(1001)
+  , m_timeScale(60000)
+  , m_pocProportionalToTimingFlag(false)
+  , m_numTicksPocDiffOneMinus1(0) {}
+
+  Void setTimingInfoPresentFlag             ( Bool flag )  { m_timingInfoPresentFlag = flag;               }
+  Bool getTimingInfoPresentFlag             ( )            { return m_timingInfoPresentFlag;               }
+
+  Void setNumUnitsInTick                    ( UInt value ) { m_numUnitsInTick = value;                     }
+  UInt getNumUnitsInTick                    ( )            { return m_numUnitsInTick;                      }
+
+  Void setTimeScale                         ( UInt value ) { m_timeScale = value;                          }
+  UInt getTimeScale                         ( )            { return m_timeScale;                           }
+
+  Bool getPocProportionalToTimingFlag       ( )            { return m_pocProportionalToTimingFlag;         }
+  Void setPocProportionalToTimingFlag       (Bool x      ) { m_pocProportionalToTimingFlag = x;            }
+
+  Int  getNumTicksPocDiffOneMinus1          ( )            { return m_numTicksPocDiffOneMinus1;            }
+  Void setNumTicksPocDiffOneMinus1          (Int x       ) { m_numTicksPocDiffOneMinus1 = x;               }
+};
+
+struct ChromaQpAdj
+{
+  union
+  {
+    struct {
+      Int CbOffset;
+      Int CrOffset;
+    } comp;
+    Int offset[2]; /* two chroma components */
+  } u;
+};
+
+class TComVPS
+{
+private:
+  Int         m_VPSId;
+  UInt        m_uiMaxTLayers;
+  UInt        m_uiMaxLayers;
+  Bool        m_bTemporalIdNestingFlag;
+
+  UInt        m_numReorderPics[MAX_TLAYER];
+  UInt        m_uiMaxDecPicBuffering[MAX_TLAYER];
+  UInt        m_uiMaxLatencyIncrease[MAX_TLAYER]; // Really max latency increase plus 1 (value 0 expresses no limit)
+
+  UInt        m_numHrdParameters;
+  UInt        m_maxNuhReservedZeroLayerId;
+  TComHRD*    m_hrdParameters;
+  UInt*       m_hrdOpSetIdx;
+  Bool*       m_cprmsPresentFlag;
+  UInt        m_numOpSets;
+  Bool        m_layerIdIncludedFlag[MAX_VPS_OP_SETS_PLUS1][MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1];
+
+  TComPTL     m_pcPTL;
+  TimingInfo  m_timingInfo;
+
+public:
+  TComVPS();
+  virtual ~TComVPS();
+
+  Void    createHrdParamBuffer()
+  {
+    m_hrdParameters    = new TComHRD[ getNumHrdParameters() ];
+    m_hrdOpSetIdx      = new UInt   [ getNumHrdParameters() ];
+    m_cprmsPresentFlag = new Bool   [ getNumHrdParameters() ];
+  }
+
+  TComHRD* getHrdParameters   ( UInt i )             { return &m_hrdParameters[ i ]; }
+  UInt    getHrdOpSetIdx      ( UInt i )             { return m_hrdOpSetIdx[ i ]; }
+  Void    setHrdOpSetIdx      ( UInt val, UInt i )   { m_hrdOpSetIdx[ i ] = val;  }
+  Bool    getCprmsPresentFlag ( UInt i )             { return m_cprmsPresentFlag[ i ]; }
+  Void    setCprmsPresentFlag ( Bool val, UInt i )   { m_cprmsPresentFlag[ i ] = val;  }
+
+  Int     getVPSId       ()                   { return m_VPSId;          }
+  Void    setVPSId       (Int i)              { m_VPSId = i;             }
+
+  UInt    getMaxTLayers  ()                   { return m_uiMaxTLayers;   }
+  Void    setMaxTLayers  (UInt t)             { m_uiMaxTLayers = t; }
+
+  UInt    getMaxLayers   ()                   { return m_uiMaxLayers;   }
+  Void    setMaxLayers   (UInt l)             { m_uiMaxLayers = l; }
+
+  Bool    getTemporalNestingFlag   ()         { return m_bTemporalIdNestingFlag;   }
+  Void    setTemporalNestingFlag   (Bool t)   { m_bTemporalIdNestingFlag = t; }
+
+  Void    setNumReorderPics(UInt v, UInt tLayer)                { m_numReorderPics[tLayer] = v;    }
+  UInt    getNumReorderPics(UInt tLayer)                        { return m_numReorderPics[tLayer]; }
+
+  Void    setMaxDecPicBuffering(UInt v, UInt tLayer)            { assert(tLayer < MAX_TLAYER); m_uiMaxDecPicBuffering[tLayer] = v;    }
+  UInt    getMaxDecPicBuffering(UInt tLayer)                    { return m_uiMaxDecPicBuffering[tLayer]; }
+
+  Void    setMaxLatencyIncrease(UInt v, UInt tLayer)            { m_uiMaxLatencyIncrease[tLayer] = v;    }
+  UInt    getMaxLatencyIncrease(UInt tLayer)                    { return m_uiMaxLatencyIncrease[tLayer]; }
+
+  UInt    getNumHrdParameters()                                 { return m_numHrdParameters; }
+  Void    setNumHrdParameters(UInt v)                           { m_numHrdParameters = v;    }
+
+  UInt    getMaxNuhReservedZeroLayerId()                        { return m_maxNuhReservedZeroLayerId; }
+  Void    setMaxNuhReservedZeroLayerId(UInt v)                  { m_maxNuhReservedZeroLayerId = v;    }
+
+  UInt    getMaxOpSets()                                        { return m_numOpSets; }
+  Void    setMaxOpSets(UInt v)                                  { m_numOpSets = v;    }
+  Bool    getLayerIdIncludedFlag(UInt opsIdx, UInt id)          { return m_layerIdIncludedFlag[opsIdx][id]; }
+  Void    setLayerIdIncludedFlag(Bool v, UInt opsIdx, UInt id)  { m_layerIdIncludedFlag[opsIdx][id] = v;    }
+
+  TComPTL* getPTL() { return &m_pcPTL; }
+  TimingInfo* getTimingInfo() { return &m_timingInfo; }
+};
+
+class Window
+{
+private:
+  Bool          m_enabledFlag;
+  Int           m_winLeftOffset;
+  Int           m_winRightOffset;
+  Int           m_winTopOffset;
+  Int           m_winBottomOffset;
+public:
+  Window()
+  : m_enabledFlag (false)
+  , m_winLeftOffset     (0)
+  , m_winRightOffset    (0)
+  , m_winTopOffset      (0)
+  , m_winBottomOffset   (0)
+  { }
+
+  Bool          getWindowEnabledFlag() const      { return m_enabledFlag; }
+  Void          resetWindow()                     { m_enabledFlag = false; m_winLeftOffset = m_winRightOffset = m_winTopOffset = m_winBottomOffset = 0; }
+  Int           getWindowLeftOffset() const       { return m_enabledFlag ? m_winLeftOffset : 0; }
+  Void          setWindowLeftOffset(Int val)      { m_winLeftOffset = val; m_enabledFlag = true; }
+  Int           getWindowRightOffset() const      { return m_enabledFlag ? m_winRightOffset : 0; }
+  Void          setWindowRightOffset(Int val)     { m_winRightOffset = val; m_enabledFlag = true; }
+  Int           getWindowTopOffset() const        { return m_enabledFlag ? m_winTopOffset : 0; }
+  Void          setWindowTopOffset(Int val)       { m_winTopOffset = val; m_enabledFlag = true; }
+  Int           getWindowBottomOffset() const     { return m_enabledFlag ? m_winBottomOffset: 0; }
+  Void          setWindowBottomOffset(Int val)    { m_winBottomOffset = val; m_enabledFlag = true; }
+
+  Void          setWindow(Int offsetLeft, Int offsetLRight, Int offsetLTop, Int offsetLBottom)
+  {
+    m_enabledFlag       = true;
+    m_winLeftOffset     = offsetLeft;
+    m_winRightOffset    = offsetLRight;
+    m_winTopOffset      = offsetLTop;
+    m_winBottomOffset   = offsetLBottom;
+  }
+};
+
+
+class TComVUI
+{
+private:
+  Bool m_aspectRatioInfoPresentFlag;
+  Int  m_aspectRatioIdc;
+  Int  m_sarWidth;
+  Int  m_sarHeight;
+  Bool m_overscanInfoPresentFlag;
+  Bool m_overscanAppropriateFlag;
+  Bool m_videoSignalTypePresentFlag;
+  Int  m_videoFormat;
+  Bool m_videoFullRangeFlag;
+  Bool m_colourDescriptionPresentFlag;
+  Int  m_colourPrimaries;
+  Int  m_transferCharacteristics;
+  Int  m_matrixCoefficients;
+  Bool m_chromaLocInfoPresentFlag;
+  Int  m_chromaSampleLocTypeTopField;
+  Int  m_chromaSampleLocTypeBottomField;
+  Bool m_neutralChromaIndicationFlag;
+  Bool m_fieldSeqFlag;
+  Window m_defaultDisplayWindow;
+  Bool m_frameFieldInfoPresentFlag;
+  Bool m_hrdParametersPresentFlag;
+  Bool m_bitstreamRestrictionFlag;
+  Bool m_tilesFixedStructureFlag;
+  Bool m_motionVectorsOverPicBoundariesFlag;
+  Bool m_restrictedRefPicListsFlag;
+  Int  m_minSpatialSegmentationIdc;
+  Int  m_maxBytesPerPicDenom;
+  Int  m_maxBitsPerMinCuDenom;
+  Int  m_log2MaxMvLengthHorizontal;
+  Int  m_log2MaxMvLengthVertical;
+  TComHRD m_hrdParameters;
+  TimingInfo m_timingInfo;
+
+public:
+  TComVUI()
+    :m_aspectRatioInfoPresentFlag(false) //TODO: This initialiser list contains magic numbers
+    ,m_aspectRatioIdc(0)
+    ,m_sarWidth(0)
+    ,m_sarHeight(0)
+    ,m_overscanInfoPresentFlag(false)
+    ,m_overscanAppropriateFlag(false)
+    ,m_videoSignalTypePresentFlag(false)
+    ,m_videoFormat(5)
+    ,m_videoFullRangeFlag(false)
+    ,m_colourDescriptionPresentFlag(false)
+    ,m_colourPrimaries(2)
+    ,m_transferCharacteristics(2)
+    ,m_matrixCoefficients(2)
+    ,m_chromaLocInfoPresentFlag(false)
+    ,m_chromaSampleLocTypeTopField(0)
+    ,m_chromaSampleLocTypeBottomField(0)
+    ,m_neutralChromaIndicationFlag(false)
+    ,m_fieldSeqFlag(false)
+    ,m_frameFieldInfoPresentFlag(false)
+    ,m_hrdParametersPresentFlag(false)
+    ,m_bitstreamRestrictionFlag(false)
+    ,m_tilesFixedStructureFlag(false)
+    ,m_motionVectorsOverPicBoundariesFlag(true)
+    ,m_restrictedRefPicListsFlag(1)
+    ,m_minSpatialSegmentationIdc(0)
+    ,m_maxBytesPerPicDenom(2)
+    ,m_maxBitsPerMinCuDenom(1)
+    ,m_log2MaxMvLengthHorizontal(15)
+    ,m_log2MaxMvLengthVertical(15)
+  {}
+
+  virtual ~TComVUI() {}
+
+  Bool getAspectRatioInfoPresentFlag() { return m_aspectRatioInfoPresentFlag; }
+  Void setAspectRatioInfoPresentFlag(Bool i) { m_aspectRatioInfoPresentFlag = i; }
+
+  Int getAspectRatioIdc() { return m_aspectRatioIdc; }
+  Void setAspectRatioIdc(Int i) { m_aspectRatioIdc = i; }
+
+  Int getSarWidth() { return m_sarWidth; }
+  Void setSarWidth(Int i) { m_sarWidth = i; }
+
+  Int getSarHeight() { return m_sarHeight; }
+  Void setSarHeight(Int i) { m_sarHeight = i; }
+
+  Bool getOverscanInfoPresentFlag() { return m_overscanInfoPresentFlag; }
+  Void setOverscanInfoPresentFlag(Bool i) { m_overscanInfoPresentFlag = i; }
+
+  Bool getOverscanAppropriateFlag() { return m_overscanAppropriateFlag; }
+  Void setOverscanAppropriateFlag(Bool i) { m_overscanAppropriateFlag = i; }
+
+  Bool getVideoSignalTypePresentFlag() { return m_videoSignalTypePresentFlag; }
+  Void setVideoSignalTypePresentFlag(Bool i) { m_videoSignalTypePresentFlag = i; }
+
+  Int getVideoFormat() { return m_videoFormat; }
+  Void setVideoFormat(Int i) { m_videoFormat = i; }
+
+  Bool getVideoFullRangeFlag() { return m_videoFullRangeFlag; }
+  Void setVideoFullRangeFlag(Bool i) { m_videoFullRangeFlag = i; }
+
+  Bool getColourDescriptionPresentFlag() { return m_colourDescriptionPresentFlag; }
+  Void setColourDescriptionPresentFlag(Bool i) { m_colourDescriptionPresentFlag = i; }
+
+  Int getColourPrimaries() { return m_colourPrimaries; }
+  Void setColourPrimaries(Int i) { m_colourPrimaries = i; }
+
+  Int getTransferCharacteristics() { return m_transferCharacteristics; }
+  Void setTransferCharacteristics(Int i) { m_transferCharacteristics = i; }
+
+  Int getMatrixCoefficients() { return m_matrixCoefficients; }
+  Void setMatrixCoefficients(Int i) { m_matrixCoefficients = i; }
+
+  Bool getChromaLocInfoPresentFlag() { return m_chromaLocInfoPresentFlag; }
+  Void setChromaLocInfoPresentFlag(Bool i) { m_chromaLocInfoPresentFlag = i; }
+
+  Int getChromaSampleLocTypeTopField() { return m_chromaSampleLocTypeTopField; }
+  Void setChromaSampleLocTypeTopField(Int i) { m_chromaSampleLocTypeTopField = i; }
+
+  Int getChromaSampleLocTypeBottomField() { return m_chromaSampleLocTypeBottomField; }
+  Void setChromaSampleLocTypeBottomField(Int i) { m_chromaSampleLocTypeBottomField = i; }
+
+  Bool getNeutralChromaIndicationFlag() { return m_neutralChromaIndicationFlag; }
+  Void setNeutralChromaIndicationFlag(Bool i) { m_neutralChromaIndicationFlag = i; }
+
+  Bool getFieldSeqFlag() { return m_fieldSeqFlag; }
+  Void setFieldSeqFlag(Bool i) { m_fieldSeqFlag = i; }
+
+  Bool getFrameFieldInfoPresentFlag() { return m_frameFieldInfoPresentFlag; }
+  Void setFrameFieldInfoPresentFlag(Bool i) { m_frameFieldInfoPresentFlag = i; }
+
+  Window& getDefaultDisplayWindow()                              { return m_defaultDisplayWindow;                }
+  Void    setDefaultDisplayWindow(Window& defaultDisplayWindow ) { m_defaultDisplayWindow = defaultDisplayWindow; }
+
+  Bool getHrdParametersPresentFlag() { return m_hrdParametersPresentFlag; }
+  Void setHrdParametersPresentFlag(Bool i) { m_hrdParametersPresentFlag = i; }
+
+  Bool getBitstreamRestrictionFlag() { return m_bitstreamRestrictionFlag; }
+  Void setBitstreamRestrictionFlag(Bool i) { m_bitstreamRestrictionFlag = i; }
+
+  Bool getTilesFixedStructureFlag() { return m_tilesFixedStructureFlag; }
+  Void setTilesFixedStructureFlag(Bool i) { m_tilesFixedStructureFlag = i; }
+
+  Bool getMotionVectorsOverPicBoundariesFlag() { return m_motionVectorsOverPicBoundariesFlag; }
+  Void setMotionVectorsOverPicBoundariesFlag(Bool i) { m_motionVectorsOverPicBoundariesFlag = i; }
+
+  Bool getRestrictedRefPicListsFlag() { return m_restrictedRefPicListsFlag; }
+  Void setRestrictedRefPicListsFlag(Bool b) { m_restrictedRefPicListsFlag = b; }
+
+  Int getMinSpatialSegmentationIdc() { return m_minSpatialSegmentationIdc; }
+  Void setMinSpatialSegmentationIdc(Int i) { m_minSpatialSegmentationIdc = i; }
+
+  Int getMaxBytesPerPicDenom() { return m_maxBytesPerPicDenom; }
+  Void setMaxBytesPerPicDenom(Int i) { m_maxBytesPerPicDenom = i; }
+
+  Int getMaxBitsPerMinCuDenom() { return m_maxBitsPerMinCuDenom; }
+  Void setMaxBitsPerMinCuDenom(Int i) { m_maxBitsPerMinCuDenom = i; }
+
+  Int getLog2MaxMvLengthHorizontal() { return m_log2MaxMvLengthHorizontal; }
+  Void setLog2MaxMvLengthHorizontal(Int i) { m_log2MaxMvLengthHorizontal = i; }
+
+  Int getLog2MaxMvLengthVertical() { return m_log2MaxMvLengthVertical; }
+  Void setLog2MaxMvLengthVertical(Int i) { m_log2MaxMvLengthVertical = i; }
+
+  TComHRD* getHrdParameters                 ()             { return &m_hrdParameters; }
+
+  TimingInfo* getTimingInfo() { return &m_timingInfo; }
+};
+
+/// SPS class
+class TComSPS
+{
+private:
+  Int          m_SPSId;
+  Int          m_VPSId;
+  ChromaFormat m_chromaFormatIdc;
+
+  UInt        m_uiMaxTLayers;           // maximum number of temporal layers
+
+  // Structure
+  UInt        m_picWidthInLumaSamples;
+  UInt        m_picHeightInLumaSamples;
+
+  Int         m_log2MinCodingBlockSize;
+  Int         m_log2DiffMaxMinCodingBlockSize;
+  UInt        m_uiMaxCUWidth;
+  UInt        m_uiMaxCUHeight;
+  UInt        m_uiMaxCUDepth;
+
+  Window      m_conformanceWindow;
+
+  TComRPSList m_RPSList;
+  Bool        m_bLongTermRefsPresent;
+  Bool        m_TMVPFlagsPresent;
+  Int         m_numReorderPics[MAX_TLAYER];
+
+  // Tool list
+  UInt        m_uiQuadtreeTULog2MaxSize;
+  UInt        m_uiQuadtreeTULog2MinSize;
+  UInt        m_uiQuadtreeTUMaxDepthInter;
+  UInt        m_uiQuadtreeTUMaxDepthIntra;
+  Bool        m_usePCM;
+  UInt        m_pcmLog2MaxSize;
+  UInt        m_uiPCMLog2MinSize;
+  Bool        m_useAMP;
+
+  // Parameter
+  Int         m_uiBitDepth[MAX_NUM_CHANNEL_TYPE];
+  Int         m_qpBDOffset[MAX_NUM_CHANNEL_TYPE];
+  Bool        m_useExtendedPrecision;
+  Bool        m_useHighPrecisionPredictionWeighting;
+  Bool        m_useResidualRotation;
+  Bool        m_useSingleSignificanceMapContext;
+  Bool        m_useGolombRiceParameterAdaptation;
+  Bool        m_alignCABACBeforeBypass;
+  Bool        m_useResidualDPCM[NUMBER_OF_RDPCM_SIGNALLING_MODES];
+  UInt        m_uiPCMBitDepth[MAX_NUM_CHANNEL_TYPE];
+  Bool        m_bPCMFilterDisableFlag;
+  Bool        m_disableIntraReferenceSmoothing;
+
+  UInt        m_uiBitsForPOC;
+  UInt        m_numLongTermRefPicSPS;
+  UInt        m_ltRefPicPocLsbSps[MAX_NUM_LONG_TERM_REF_PICS];
+  Bool        m_usedByCurrPicLtSPSFlag[MAX_NUM_LONG_TERM_REF_PICS];
+  // Max physical transform size
+  UInt        m_uiMaxTrSize;
+
+  Int m_iAMPAcc[MAX_CU_DEPTH];
+  Bool        m_bUseSAO;
+
+  Bool        m_bTemporalIdNestingFlag; // temporal_id_nesting_flag
+
+  Bool        m_scalingListEnabledFlag;
+  Bool        m_scalingListPresentFlag;
+  TComScalingList*     m_scalingList;   //!< ScalingList class pointer
+  UInt        m_uiMaxDecPicBuffering[MAX_TLAYER];
+  UInt        m_uiMaxLatencyIncrease[MAX_TLAYER];  // Really max latency increase plus 1 (value 0 expresses no limit)
+
+  Bool        m_useDF;
+  Bool        m_useStrongIntraSmoothing;
+
+  Bool        m_vuiParametersPresentFlag;
+  TComVUI     m_vuiParameters;
+
+  static const Int   m_winUnitX[MAX_CHROMA_FORMAT_IDC+1];
+  static const Int   m_winUnitY[MAX_CHROMA_FORMAT_IDC+1];
+  TComPTL     m_pcPTL;
+
+#if O0043_BEST_EFFORT_DECODING
+  UInt        m_forceDecodeBitDepth; // 0 = do not force the decoder's bit depth, other = force the decoder's bit depth to this value (best effort decoding)
+#endif
+
+public:
+  TComSPS();
+  virtual ~TComSPS();
+#if O0043_BEST_EFFORT_DECODING
+  Void setForceDecodeBitDepth(UInt bitDepth) { m_forceDecodeBitDepth = bitDepth; }
+  UInt getForceDecodeBitDepth()        const { return m_forceDecodeBitDepth;     }
+#endif
+
+  Int  getVPSId       ()         { return m_VPSId;          }
+  Void setVPSId       (Int i)    { m_VPSId = i;             }
+  Int  getSPSId       ()         { return m_SPSId;          }
+  Void setSPSId       (Int i)    { m_SPSId = i;             }
+  ChromaFormat  getChromaFormatIdc ()         { return m_chromaFormatIdc;       }
+  Void setChromaFormatIdc (ChromaFormat i)    { m_chromaFormatIdc = i;          }
+
+  static Int getWinUnitX (Int chromaFormatIdc) { assert (chromaFormatIdc >= 0 && chromaFormatIdc <= MAX_CHROMA_FORMAT_IDC); return m_winUnitX[chromaFormatIdc];      }
+  static Int getWinUnitY (Int chromaFormatIdc) { assert (chromaFormatIdc >= 0 && chromaFormatIdc <= MAX_CHROMA_FORMAT_IDC); return m_winUnitY[chromaFormatIdc];      }
+
+  // structure
+  Void setPicWidthInLumaSamples       ( UInt u ) { m_picWidthInLumaSamples = u;        }
+  UInt getPicWidthInLumaSamples       ()         { return  m_picWidthInLumaSamples;    }
+  Void setPicHeightInLumaSamples      ( UInt u ) { m_picHeightInLumaSamples = u;       }
+  UInt getPicHeightInLumaSamples      ()         { return  m_picHeightInLumaSamples;   }
+
+  Window& getConformanceWindow()                           { return  m_conformanceWindow;             }
+  Void    setConformanceWindow(Window& conformanceWindow ) { m_conformanceWindow = conformanceWindow; }
+
+  UInt  getNumLongTermRefPicSPS()             { return m_numLongTermRefPicSPS; }
+  Void  setNumLongTermRefPicSPS(UInt val)     { m_numLongTermRefPicSPS = val; }
+
+  UInt  getLtRefPicPocLsbSps(UInt index)             { assert( index < MAX_NUM_LONG_TERM_REF_PICS ); return m_ltRefPicPocLsbSps[index]; }
+  Void  setLtRefPicPocLsbSps(UInt index, UInt val)   { assert( index < MAX_NUM_LONG_TERM_REF_PICS ); m_ltRefPicPocLsbSps[index] = val; }
+
+  Bool getUsedByCurrPicLtSPSFlag(Int i)              { assert( i < MAX_NUM_LONG_TERM_REF_PICS ); return m_usedByCurrPicLtSPSFlag[i];}
+  Void setUsedByCurrPicLtSPSFlag(Int i, Bool x)      { assert( i < MAX_NUM_LONG_TERM_REF_PICS ); m_usedByCurrPicLtSPSFlag[i] = x;}
+
+  Int  getLog2MinCodingBlockSize() const           { return m_log2MinCodingBlockSize; }
+  Void setLog2MinCodingBlockSize(Int val)          { m_log2MinCodingBlockSize = val; }
+  Int  getLog2DiffMaxMinCodingBlockSize() const    { return m_log2DiffMaxMinCodingBlockSize; }
+  Void setLog2DiffMaxMinCodingBlockSize(Int val)   { m_log2DiffMaxMinCodingBlockSize = val; }
+
+  Void setMaxCUWidth  ( UInt u ) { m_uiMaxCUWidth = u;      }
+  UInt getMaxCUWidth  ()         { return  m_uiMaxCUWidth;  }
+  Void setMaxCUHeight ( UInt u ) { m_uiMaxCUHeight = u;     }
+  UInt getMaxCUHeight ()         { return  m_uiMaxCUHeight; }
+  Void setMaxCUDepth  ( UInt u ) { m_uiMaxCUDepth = u;      }
+  UInt getMaxCUDepth  ()         { return  m_uiMaxCUDepth;  }
+  Void setUsePCM      ( Bool b ) { m_usePCM = b;           }
+  Bool getUsePCM      ()         { return m_usePCM;        }
+  Void setPCMLog2MaxSize  ( UInt u ) { m_pcmLog2MaxSize = u;      }
+  UInt getPCMLog2MaxSize  ()         { return  m_pcmLog2MaxSize;  }
+  Void setPCMLog2MinSize  ( UInt u ) { m_uiPCMLog2MinSize = u;      }
+  UInt getPCMLog2MinSize  ()         { return  m_uiPCMLog2MinSize;  }
+  Void setBitsForPOC  ( UInt u ) { m_uiBitsForPOC = u;      }
+  UInt getBitsForPOC  ()         { return m_uiBitsForPOC;   }
+  Bool getUseAMP() { return m_useAMP; }
+  Void setUseAMP( Bool b ) { m_useAMP = b; }
+  Void setQuadtreeTULog2MaxSize( UInt u ) { m_uiQuadtreeTULog2MaxSize = u;    }
+  UInt getQuadtreeTULog2MaxSize()         { return m_uiQuadtreeTULog2MaxSize; }
+  Void setQuadtreeTULog2MinSize( UInt u ) { m_uiQuadtreeTULog2MinSize = u;    }
+  UInt getQuadtreeTULog2MinSize()         { return m_uiQuadtreeTULog2MinSize; }
+  Void setQuadtreeTUMaxDepthInter( UInt u ) { m_uiQuadtreeTUMaxDepthInter = u;    }
+  Void setQuadtreeTUMaxDepthIntra( UInt u ) { m_uiQuadtreeTUMaxDepthIntra = u;    }
+  UInt getQuadtreeTUMaxDepthInter()         { return m_uiQuadtreeTUMaxDepthInter; }
+  UInt getQuadtreeTUMaxDepthIntra()         { return m_uiQuadtreeTUMaxDepthIntra; }
+  Void setNumReorderPics(Int i, UInt tlayer)              { m_numReorderPics[tlayer] = i;    }
+  Int  getNumReorderPics(UInt tlayer)                     { return m_numReorderPics[tlayer]; }
+  Void         createRPSList( Int numRPS );
+  TComRPSList* getRPSList()                      { return &m_RPSList;          }
+  Bool      getLongTermRefsPresent()         { return m_bLongTermRefsPresent; }
+  Void      setLongTermRefsPresent(Bool b)   { m_bLongTermRefsPresent=b;      }
+  Bool      getTMVPFlagsPresent()         { return m_TMVPFlagsPresent; }
+  Void      setTMVPFlagsPresent(Bool b)   { m_TMVPFlagsPresent=b;      }
+  // physical transform
+  Void setMaxTrSize   ( UInt u ) { m_uiMaxTrSize = u;       }
+  UInt getMaxTrSize   ()         { return  m_uiMaxTrSize;   }
+
+  // AMP accuracy
+  Int       getAMPAcc   ( UInt uiDepth ) { return m_iAMPAcc[uiDepth]; }
+  Void      setAMPAcc   ( UInt uiDepth, Int iAccu ) { assert( uiDepth < g_uiMaxCUDepth);  m_iAMPAcc[uiDepth] = iAccu; }
+
+  // Bit-depth
+  Int       getBitDepth     (ChannelType type)         { return m_uiBitDepth[type];   }
+  Void      setBitDepth     (ChannelType type, Int u ) { m_uiBitDepth[type] = u;      }
+  Int       getDifferentialLumaChromaBitDepth() const  { return Int(m_uiBitDepth[CHANNEL_TYPE_LUMA]) - Int(m_uiBitDepth[CHANNEL_TYPE_CHROMA]); }
+  Int       getQpBDOffset   (ChannelType type) const   { return m_qpBDOffset[type];   }
+  Void      setQpBDOffset   (ChannelType type, Int i)  { m_qpBDOffset[type] = i;      }
+  Bool      getUseExtendedPrecision()          const   { return m_useExtendedPrecision;  }
+  Void      setUseExtendedPrecision(Bool value)        { m_useExtendedPrecision = value; }
+  Bool      getUseHighPrecisionPredictionWeighting() const { return m_useHighPrecisionPredictionWeighting; }
+  Void      setUseHighPrecisionPredictionWeighting(Bool value) { m_useHighPrecisionPredictionWeighting = value; }
+
+  Void      setUseSAO                  (Bool bVal)  {m_bUseSAO = bVal;}
+  Bool      getUseSAO                  ()           {return m_bUseSAO;}
+
+  Bool      getUseResidualRotation            ()                 const { return m_useResidualRotation;  }
+  Void      setUseResidualRotation            (const Bool value)       { m_useResidualRotation = value; }
+
+  Bool      getUseSingleSignificanceMapContext()                 const { return m_useSingleSignificanceMapContext;  }
+  Void      setUseSingleSignificanceMapContext(const Bool value)       { m_useSingleSignificanceMapContext = value; }
+
+  Bool      getUseGolombRiceParameterAdaptation()                 const { return m_useGolombRiceParameterAdaptation;  }
+  Void      setUseGolombRiceParameterAdaptation(const Bool value)       { m_useGolombRiceParameterAdaptation = value; }
+
+  Bool      getAlignCABACBeforeBypass         ()                 const { return m_alignCABACBeforeBypass;  }
+  Void      setAlignCABACBeforeBypass         (const Bool value)       { m_alignCABACBeforeBypass = value; }
+
+  Bool      getUseResidualDPCM (const RDPCMSignallingMode signallingMode)        const      { return m_useResidualDPCM[signallingMode];  }
+  Void      setUseResidualDPCM (const RDPCMSignallingMode signallingMode, const Bool value) { m_useResidualDPCM[signallingMode] = value; }
+
+  UInt      getMaxTLayers()                           { return m_uiMaxTLayers; }
+  Void      setMaxTLayers( UInt uiMaxTLayers )        { assert( uiMaxTLayers <= MAX_TLAYER ); m_uiMaxTLayers = uiMaxTLayers; }
+
+  Bool      getTemporalIdNestingFlag()                { return m_bTemporalIdNestingFlag; }
+  Void      setTemporalIdNestingFlag( Bool bValue )   { m_bTemporalIdNestingFlag = bValue; }
+  UInt      getPCMBitDepth         (ChannelType type) const   { return m_uiPCMBitDepth[type]; }
+  Void      setPCMBitDepth         (ChannelType type, UInt u) { m_uiPCMBitDepth[type] = u;    }
+  Void      setPCMFilterDisableFlag     ( Bool   bValue  )    { m_bPCMFilterDisableFlag = bValue; }
+  Bool      getPCMFilterDisableFlag     ()                    { return m_bPCMFilterDisableFlag;   }
+  Void      setDisableIntraReferenceSmoothing (Bool bValue)   { m_disableIntraReferenceSmoothing=bValue; }
+  Bool      getDisableIntraReferenceSmoothing ()        const { return m_disableIntraReferenceSmoothing; }
+
+  Bool getScalingListFlag       ()         { return m_scalingListEnabledFlag;     }
+  Void setScalingListFlag       ( Bool b ) { m_scalingListEnabledFlag  = b;       }
+  Bool getScalingListPresentFlag()         { return m_scalingListPresentFlag;     }
+  Void setScalingListPresentFlag( Bool b ) { m_scalingListPresentFlag  = b;       }
+  Void setScalingList      ( TComScalingList *scalingList);
+  TComScalingList* getScalingList ()       { return m_scalingList; }               //!< get ScalingList class pointer in SPS
+  UInt getMaxDecPicBuffering  (UInt tlayer)            { return m_uiMaxDecPicBuffering[tlayer]; }
+  Void setMaxDecPicBuffering  ( UInt ui, UInt tlayer ) { assert(tlayer < MAX_TLAYER);  m_uiMaxDecPicBuffering[tlayer] = ui;   }
+  UInt getMaxLatencyIncrease  (UInt tlayer)            { return m_uiMaxLatencyIncrease[tlayer];   }
+  Void setMaxLatencyIncrease  ( UInt ui , UInt tlayer) { m_uiMaxLatencyIncrease[tlayer] = ui;      }
+
+  Void setUseStrongIntraSmoothing (Bool bVal)  {m_useStrongIntraSmoothing = bVal;}
+  Bool getUseStrongIntraSmoothing ()           {return m_useStrongIntraSmoothing;}
+
+  Bool getVuiParametersPresentFlag() { return m_vuiParametersPresentFlag; }
+  Void setVuiParametersPresentFlag(Bool b) { m_vuiParametersPresentFlag = b; }
+  TComVUI* getVuiParameters() { return &m_vuiParameters; }
+  Void setHrdParameters( UInt frameRate, UInt numDU, UInt bitRate, Bool randomAccess );
+
+  TComPTL* getPTL()     { return &m_pcPTL; }
+};
+
+/// Reference Picture Lists class
+
+class TComRefPicListModification
+{
+private:
+  UInt      m_bRefPicListModificationFlagL0;
+  UInt      m_bRefPicListModificationFlagL1;
+  UInt      m_RefPicSetIdxL0[REF_PIC_LIST_NUM_IDX];
+  UInt      m_RefPicSetIdxL1[REF_PIC_LIST_NUM_IDX];
+
+public:
+  TComRefPicListModification();
+  virtual ~TComRefPicListModification();
+
+  Void  create                    ();
+  Void  destroy                   ();
+
+  Bool       getRefPicListModificationFlagL0() { return m_bRefPicListModificationFlagL0; }
+  Void       setRefPicListModificationFlagL0(Bool flag) { m_bRefPicListModificationFlagL0 = flag; }
+  Bool       getRefPicListModificationFlagL1() { return m_bRefPicListModificationFlagL1; }
+  Void       setRefPicListModificationFlagL1(Bool flag) { m_bRefPicListModificationFlagL1 = flag; }
+  Void       setRefPicSetIdxL0(UInt idx, UInt refPicSetIdx) { assert(idx<REF_PIC_LIST_NUM_IDX); m_RefPicSetIdxL0[idx] = refPicSetIdx; }
+  UInt       getRefPicSetIdxL0(UInt idx) { assert(idx<REF_PIC_LIST_NUM_IDX); return m_RefPicSetIdxL0[idx]; }
+  Void       setRefPicSetIdxL1(UInt idx, UInt refPicSetIdx) { assert(idx<REF_PIC_LIST_NUM_IDX); m_RefPicSetIdxL1[idx] = refPicSetIdx; }
+  UInt       getRefPicSetIdxL1(UInt idx) { assert(idx<REF_PIC_LIST_NUM_IDX); return m_RefPicSetIdxL1[idx]; }
+};
+
+/// PPS class
+class TComPPS
+{
+private:
+  Int         m_PPSId;                    // pic_parameter_set_id
+  Int         m_SPSId;                    // seq_parameter_set_id
+  Int         m_picInitQPMinus26;
+  Bool        m_useDQP;
+  Bool        m_bConstrainedIntraPred;    // constrained_intra_pred_flag
+  Bool        m_bSliceChromaQpFlag;       // slicelevel_chroma_qp_flag
+
+  // access channel
+  TComSPS*    m_pcSPS;
+  UInt        m_uiMaxCuDQPDepth;
+  UInt        m_uiMinCuDQPSize;
+
+  /* Chroma QP Adjustments*/
+  Int         m_MaxCuChromaQpAdjDepth;
+  Int         m_MinCuChromaQpAdjSize;
+  Int         m_ChromaQpAdjTableSize;
+  ChromaQpAdj m_ChromaQpAdjTable[7];
+
+  Int         m_chromaCbQpOffset;
+  Int         m_chromaCrQpOffset;
+
+  UInt        m_numRefIdxL0DefaultActive;
+  UInt        m_numRefIdxL1DefaultActive;
+
+  Bool        m_bUseWeightPred;           // Use of Weighting Prediction (P_SLICE)
+  Bool        m_useWeightedBiPred;        // Use of Weighting Bi-Prediction (B_SLICE)
+  Bool        m_OutputFlagPresentFlag;   // Indicates the presence of output_flag in slice header
+  Bool        m_useCrossComponentPrediction;
+  UInt        m_saoOffsetBitShift[MAX_NUM_CHANNEL_TYPE];
+  Bool        m_TransquantBypassEnableFlag; // Indicates presence of cu_transquant_bypass_flag in CUs.
+  Bool        m_useTransformSkip;
+  Int         m_transformSkipLog2MaxSize;
+  Bool        m_dependentSliceSegmentsEnabledFlag;     //!< Indicates the presence of dependent slices
+  Bool        m_tilesEnabledFlag;              //!< Indicates the presence of tiles
+  Bool        m_entropyCodingSyncEnabledFlag;  //!< Indicates the presence of wavefronts
+
+  Bool             m_loopFilterAcrossTilesEnabledFlag;
+  Bool             m_uniformSpacingFlag;
+  Int              m_numTileColumnsMinus1;
+  Int              m_numTileRowsMinus1;
+  std::vector<Int> m_tileColumnWidth;
+  std::vector<Int> m_tileRowHeight;
+
+  Int      m_numSubstreams;
+
+  Int      m_signHideFlag;
+
+  Bool     m_cabacInitPresentFlag;
+  UInt     m_encCABACTableIdx;           // Used to transmit table selection across slices
+
+  Bool     m_sliceHeaderExtensionPresentFlag;
+  Bool     m_loopFilterAcrossSlicesEnabledFlag;
+  Bool     m_deblockingFilterControlPresentFlag;
+  Bool     m_deblockingFilterOverrideEnabledFlag;
+  Bool     m_picDisableDeblockingFilterFlag;
+  Int      m_deblockingFilterBetaOffsetDiv2;    //< beta offset for deblocking filter
+  Int      m_deblockingFilterTcOffsetDiv2;      //< tc offset for deblocking filter
+  Bool     m_scalingListPresentFlag;
+  TComScalingList*     m_scalingList;   //!< ScalingList class pointer
+  Bool     m_listsModificationPresentFlag;
+  UInt     m_log2ParallelMergeLevelMinus2;
+  Int      m_numExtraSliceHeaderBits;
+
+public:
+  TComPPS();
+  virtual ~TComPPS();
+
+  Int       getPPSId ()      { return m_PPSId; }
+  Void      setPPSId (Int i) { m_PPSId = i; }
+  Int       getSPSId ()      { return m_SPSId; }
+  Void      setSPSId (Int i) { m_SPSId = i; }
+
+  Int       getPicInitQPMinus26 ()         { return  m_picInitQPMinus26; }
+  Void      setPicInitQPMinus26 ( Int i )  { m_picInitQPMinus26 = i;     }
+  Bool      getUseDQP ()                   { return m_useDQP;        }
+  Void      setUseDQP ( Bool b )           { m_useDQP   = b;         }
+  Bool      getConstrainedIntraPred ()         { return  m_bConstrainedIntraPred; }
+  Void      setConstrainedIntraPred ( Bool b ) { m_bConstrainedIntraPred = b;     }
+  Bool      getSliceChromaQpFlag ()         { return  m_bSliceChromaQpFlag; }
+  Void      setSliceChromaQpFlag ( Bool b ) { m_bSliceChromaQpFlag = b;     }
+
+  Void      setSPS              ( TComSPS* pcSPS ) { m_pcSPS = pcSPS; }
+  TComSPS*  getSPS              ()         { return m_pcSPS;          }
+  Void      setMaxCuDQPDepth    ( UInt u ) { m_uiMaxCuDQPDepth = u;   }
+  UInt      getMaxCuDQPDepth    ()         { return m_uiMaxCuDQPDepth;}
+  Void      setMinCuDQPSize     ( UInt u ) { m_uiMinCuDQPSize = u;    }
+  UInt      getMinCuDQPSize     ()         { return m_uiMinCuDQPSize; }
+
+  Void      setQpOffset(ComponentID compID, Int i ) { if (compID==COMPONENT_Cb) m_chromaCbQpOffset = i; else if (compID==COMPONENT_Cr) m_chromaCrQpOffset = i; else assert(0); }
+  Int       getQpOffset(ComponentID compID) const { return (compID==COMPONENT_Y) ? 0 : (compID==COMPONENT_Cb ? m_chromaCbQpOffset : m_chromaCrQpOffset ); }
+
+  Void      setMaxCuChromaQpAdjDepth ( UInt u ) { m_MaxCuChromaQpAdjDepth = u; }
+  UInt      getMaxCuChromaQpAdjDepth ()         { return m_MaxCuChromaQpAdjDepth; }
+  Void      setMinCuChromaQpAdjSize ( UInt u ) { m_MinCuChromaQpAdjSize = u; }
+  UInt      getMinCuChromaQpAdjSize ()         { return m_MinCuChromaQpAdjSize; }
+  Void      clearChromaQpAdjTable() { m_ChromaQpAdjTableSize = 0; }
+  Int       getChromaQpAdjTableSize() { return m_ChromaQpAdjTableSize; }
+  const ChromaQpAdj& getChromaQpAdjTableAt( Int idx ) const { return m_ChromaQpAdjTable[idx]; }
+  Void      setChromaQpAdjTableAt( Int idx, Int cbOffset, Int crOffset )
+  {
+    m_ChromaQpAdjTable[idx].u.comp.CbOffset = cbOffset;
+    m_ChromaQpAdjTable[idx].u.comp.CrOffset = crOffset;
+    m_ChromaQpAdjTableSize = max(m_ChromaQpAdjTableSize, idx);
+  }
+
+  Void      setNumRefIdxL0DefaultActive(UInt ui)    { m_numRefIdxL0DefaultActive=ui;     }
+  UInt      getNumRefIdxL0DefaultActive()           { return m_numRefIdxL0DefaultActive; }
+  Void      setNumRefIdxL1DefaultActive(UInt ui)    { m_numRefIdxL1DefaultActive=ui;     }
+  UInt      getNumRefIdxL1DefaultActive()           { return m_numRefIdxL1DefaultActive; }
+
+  Bool getUseWP                     ()          { return m_bUseWeightPred;  }
+  Bool getWPBiPred                  ()          { return m_useWeightedBiPred;     }
+  Void setUseWP                     ( Bool b )  { m_bUseWeightPred = b;     }
+  Void setWPBiPred                  ( Bool b )  { m_useWeightedBiPred = b;  }
+
+  Bool      getUseCrossComponentPrediction()         const { return m_useCrossComponentPrediction;  }
+  Void      setUseCrossComponentPrediction(Bool value)     { m_useCrossComponentPrediction = value; }
+
+  UInt      getSaoOffsetBitShift(ChannelType type) const            { return m_saoOffsetBitShift[type];       }
+  Void      setSaoOffsetBitShift(ChannelType type, UInt uiBitShift) { m_saoOffsetBitShift[type] = uiBitShift; }
+
+  Void      setOutputFlagPresentFlag( Bool b )  { m_OutputFlagPresentFlag = b;    }
+  Bool      getOutputFlagPresentFlag()          { return m_OutputFlagPresentFlag; }
+  Void      setTransquantBypassEnableFlag( Bool b ) { m_TransquantBypassEnableFlag = b; }
+  Bool      getTransquantBypassEnableFlag()         { return m_TransquantBypassEnableFlag; }
+
+  Bool      getUseTransformSkip       ()         { return m_useTransformSkip;     }
+  Void      setUseTransformSkip       ( Bool b ) { m_useTransformSkip  = b;       }
+  UInt      getTransformSkipLog2MaxSize ()         { return m_transformSkipLog2MaxSize;     }
+  Void      setTransformSkipLog2MaxSize ( UInt u ) { m_transformSkipLog2MaxSize  = u;       }
+
+  Void    setLoopFilterAcrossTilesEnabledFlag  (Bool b)    { m_loopFilterAcrossTilesEnabledFlag = b; }
+  Bool    getLoopFilterAcrossTilesEnabledFlag  ()          { return m_loopFilterAcrossTilesEnabledFlag;   }
+  Bool    getDependentSliceSegmentsEnabledFlag() const     { return m_dependentSliceSegmentsEnabledFlag; }
+  Void    setDependentSliceSegmentsEnabledFlag(Bool val)   { m_dependentSliceSegmentsEnabledFlag = val; }
+  Bool    getEntropyCodingSyncEnabledFlag() const          { return m_entropyCodingSyncEnabledFlag; }
+  Void    setEntropyCodingSyncEnabledFlag(Bool val)        { m_entropyCodingSyncEnabledFlag = val; }
+
+  Void     setTilesEnabledFlag       (Bool val)                             { m_tilesEnabledFlag = val; }
+  Bool     getTilesEnabledFlag       () const                               { return m_tilesEnabledFlag; }
+  Void     setTileUniformSpacingFlag (Bool b)                               { m_uniformSpacingFlag = b; }
+  Bool     getTileUniformSpacingFlag () const                               { return m_uniformSpacingFlag; }
+  Void     setNumTileColumnsMinus1   (Int i)                                { m_numTileColumnsMinus1 = i; }
+  Int      getNumTileColumnsMinus1   () const                               { return m_numTileColumnsMinus1; }
+  Void     setTileColumnWidth        (const std::vector<Int>& columnWidth ) { m_tileColumnWidth = columnWidth; }
+  UInt     getTileColumnWidth        (UInt columnIdx) const                 { return  m_tileColumnWidth[columnIdx]; }
+  Void     setNumTileRowsMinus1      (Int i)                                { m_numTileRowsMinus1 = i; }
+  Int      getNumTileRowsMinus1      () const                               { return m_numTileRowsMinus1; }
+  Void     setTileRowHeight          (const std::vector<Int>& rowHeight)    { m_tileRowHeight = rowHeight;  }
+  UInt     getTileRowHeight          (UInt rowIdx) const                    { return m_tileRowHeight[rowIdx]; }
+
+  Void     setNumSubstreams    (Int numSubstreams)                     { m_numSubstreams = numSubstreams; }
+  Int      getNumSubstreams    ()                                      { return m_numSubstreams; }
+
+  Void      setSignHideFlag( Int signHideFlag ) { m_signHideFlag = signHideFlag; }
+  Int       getSignHideFlag()                    { return m_signHideFlag; }
+
+  Void     setCabacInitPresentFlag( Bool flag )     { m_cabacInitPresentFlag = flag;    }
+  Void     setEncCABACTableIdx( Int idx )           { m_encCABACTableIdx = idx;         }
+  Bool     getCabacInitPresentFlag()                { return m_cabacInitPresentFlag;    }
+  UInt     getEncCABACTableIdx()                    { return m_encCABACTableIdx;        }
+  Void     setDeblockingFilterControlPresentFlag( Bool val )  { m_deblockingFilterControlPresentFlag = val; }
+  Bool     getDeblockingFilterControlPresentFlag()            { return m_deblockingFilterControlPresentFlag; }
+  Void     setDeblockingFilterOverrideEnabledFlag( Bool val ) { m_deblockingFilterOverrideEnabledFlag = val; }
+  Bool     getDeblockingFilterOverrideEnabledFlag()           { return m_deblockingFilterOverrideEnabledFlag; }
+  Void     setPicDisableDeblockingFilterFlag(Bool val)        { m_picDisableDeblockingFilterFlag = val; }       //!< set offset for deblocking filter disabled
+  Bool     getPicDisableDeblockingFilterFlag()                { return m_picDisableDeblockingFilterFlag; }      //!< get offset for deblocking filter disabled
+  Void     setDeblockingFilterBetaOffsetDiv2(Int val)         { m_deblockingFilterBetaOffsetDiv2 = val; }       //!< set beta offset for deblocking filter
+  Int      getDeblockingFilterBetaOffsetDiv2()                { return m_deblockingFilterBetaOffsetDiv2; }      //!< get beta offset for deblocking filter
+  Void     setDeblockingFilterTcOffsetDiv2(Int val)           { m_deblockingFilterTcOffsetDiv2 = val; }               //!< set tc offset for deblocking filter
+  Int      getDeblockingFilterTcOffsetDiv2()                  { return m_deblockingFilterTcOffsetDiv2; }              //!< get tc offset for deblocking filter
+  Bool     getScalingListPresentFlag()         { return m_scalingListPresentFlag;     }
+  Void     setScalingListPresentFlag( Bool b ) { m_scalingListPresentFlag  = b;       }
+  Void     setScalingList      ( TComScalingList *scalingList);
+  TComScalingList* getScalingList ()          { return m_scalingList; }         //!< get ScalingList class pointer in PPS
+  Bool getListsModificationPresentFlag ()          { return m_listsModificationPresentFlag; }
+  Void setListsModificationPresentFlag ( Bool b )  { m_listsModificationPresentFlag = b;    }
+  UInt getLog2ParallelMergeLevelMinus2      ()                    { return m_log2ParallelMergeLevelMinus2; }
+  Void setLog2ParallelMergeLevelMinus2      (UInt mrgLevel)       { m_log2ParallelMergeLevelMinus2 = mrgLevel; }
+  Int getNumExtraSliceHeaderBits() { return m_numExtraSliceHeaderBits; }
+  Void setNumExtraSliceHeaderBits(Int i) { m_numExtraSliceHeaderBits = i; }
+  Void      setLoopFilterAcrossSlicesEnabledFlag ( Bool   bValue  )    { m_loopFilterAcrossSlicesEnabledFlag = bValue; }
+  Bool      getLoopFilterAcrossSlicesEnabledFlag ()                    { return m_loopFilterAcrossSlicesEnabledFlag;   }
+    Bool getSliceHeaderExtensionPresentFlag   ()                    { return m_sliceHeaderExtensionPresentFlag; }
+  Void setSliceHeaderExtensionPresentFlag   (Bool val)            { m_sliceHeaderExtensionPresentFlag = val; }
+};
+
+struct WPScalingParam
+{
+  // Explicit weighted prediction parameters parsed in slice header,
+  // or Implicit weighted prediction parameters (8 bits depth values).
+  Bool bPresentFlag;
+  UInt uiLog2WeightDenom;
+  Int  iWeight;
+  Int  iOffset;
+
+  // Weighted prediction scaling values built from above parameters (bitdepth scaled):
+  Int  w;
+  Int  o;
+  Int  offset;
+  Int  shift;
+  Int  round;
+};
+
+struct WPACDCParam
+{
+  Int64 iAC;
+  Int64 iDC;
+};
+
+/// slice header class
+class TComSlice
+{
+
+private:
+  //  Bitstream writing
+  Bool        m_saoEnabledFlag[MAX_NUM_CHANNEL_TYPE];
+  Int         m_iPPSId;               ///< picture parameter set ID
+  Bool        m_PicOutputFlag;        ///< pic_output_flag
+  Int         m_iPOC;
+  Int         m_iLastIDR;
+  Int         m_iAssociatedIRAP;
+  NalUnitType m_iAssociatedIRAPType;
+  static Int  m_prevTid0POC;
+  TComReferencePictureSet *m_pcRPS;
+  TComReferencePictureSet m_LocalRPS;
+  Int         m_iBDidx;
+  TComRefPicListModification m_RefPicListModification;
+  NalUnitType m_eNalUnitType;         ///< Nal unit type for the slice
+  SliceType   m_eSliceType;
+  Int         m_iSliceQp;
+  Bool        m_dependentSliceSegmentFlag;
+#if ADAPTIVE_QP_SELECTION
+  Int         m_iSliceQpBase;
+#endif
+  Bool        m_ChromaQpAdjEnabled;
+  Bool        m_deblockingFilterDisable;
+  Bool        m_deblockingFilterOverrideFlag;      //< offsets for deblocking filter inherit from PPS
+  Int         m_deblockingFilterBetaOffsetDiv2;    //< beta offset for deblocking filter
+  Int         m_deblockingFilterTcOffsetDiv2;      //< tc offset for deblocking filter
+  Int         m_list1IdxToList0Idx[MAX_NUM_REF];
+  Int         m_aiNumRefIdx   [NUM_REF_PIC_LIST_01];    //  for multiple reference of current slice
+
+  Bool        m_bCheckLDC;
+
+  //  Data
+  Int         m_iSliceQpDelta;
+  Int         m_iSliceChromaQpDelta[MAX_NUM_COMPONENT];
+  TComPic*    m_apcRefPicList [NUM_REF_PIC_LIST_01][MAX_NUM_REF+1];
+  Int         m_aiRefPOCList  [NUM_REF_PIC_LIST_01][MAX_NUM_REF+1];
+  Bool        m_bIsUsedAsLongTerm[NUM_REF_PIC_LIST_01][MAX_NUM_REF+1];
+  Int         m_iDepth;
+
+  // referenced slice?
+  Bool        m_bRefenced;
+
+  // access channel
+  TComVPS*    m_pcVPS;
+  TComSPS*    m_pcSPS;
+  TComPPS*    m_pcPPS;
+  TComPic*    m_pcPic;
+#if ADAPTIVE_QP_SELECTION
+  TComTrQuant* m_pcTrQuant;
+#endif
+  UInt        m_colFromL0Flag;  // collocated picture from List0 flag
+
+  Bool        m_noOutputPriorPicsFlag;
+  Bool        m_noRaslOutputFlag;
+  Bool        m_handleCraAsBlaFlag;
+
+  UInt        m_colRefIdx;
+  UInt        m_maxNumMergeCand;
+
+  Double      m_lambdas[MAX_NUM_COMPONENT];
+
+  Bool        m_abEqualRef  [NUM_REF_PIC_LIST_01][MAX_NUM_REF][MAX_NUM_REF];
+  UInt        m_uiTLayer;
+  Bool        m_bTLayerSwitchingFlag;
+
+  SliceConstraint m_sliceMode;
+  UInt        m_sliceArgument;
+  UInt        m_sliceCurStartCtuTsAddr;
+  UInt        m_sliceCurEndCtuTsAddr;
+  UInt        m_sliceIdx;
+  SliceConstraint m_sliceSegmentMode;
+  UInt        m_sliceSegmentArgument;
+  UInt        m_sliceSegmentCurStartCtuTsAddr;
+  UInt        m_sliceSegmentCurEndCtuTsAddr;
+  Bool        m_nextSlice;
+  Bool        m_nextSliceSegment;
+  UInt        m_sliceBits;
+  UInt        m_sliceSegmentBits;
+  Bool        m_bFinalized;
+
+  WPScalingParam  m_weightPredTable[NUM_REF_PIC_LIST_01][MAX_NUM_REF][MAX_NUM_COMPONENT]; // [REF_PIC_LIST_0 or REF_PIC_LIST_1][refIdx][0:Y, 1:U, 2:V]
+  WPACDCParam    m_weightACDCParam[MAX_NUM_COMPONENT];
+
+  std::vector<UInt> m_substreamSizes;
+
+  TComScalingList*     m_scalingList;                 //!< pointer of quantization matrix
+  Bool        m_cabacInitFlag;
+
+  Bool       m_bLMvdL1Zero;
+  Bool       m_temporalLayerNonReferenceFlag;
+  Bool       m_LFCrossSliceBoundaryFlag;
+
+  Bool       m_enableTMVPFlag;
+public:
+  TComSlice();
+  virtual ~TComSlice();
+  Void      initSlice       ();
+  Void      initTiles();
+
+  Void      setVPS          ( TComVPS* pcVPS ) { m_pcVPS = pcVPS; }
+  TComVPS*  getVPS          () { return m_pcVPS; }
+  Void      setSPS          ( TComSPS* pcSPS ) { m_pcSPS = pcSPS; }
+  TComSPS*  getSPS          () { return m_pcSPS; }
+  const TComSPS*  getSPS() const  { return m_pcSPS; }
+
+  Void      setPPS          ( TComPPS* pcPPS )         { assert(pcPPS!=NULL); m_pcPPS = pcPPS; m_iPPSId = pcPPS->getPPSId(); }
+  TComPPS*  getPPS          () { return m_pcPPS; }
+  const TComPPS*  getPPS() const { return m_pcPPS; }
+
+#if ADAPTIVE_QP_SELECTION
+  Void          setTrQuant          ( TComTrQuant* pcTrQuant ) { m_pcTrQuant = pcTrQuant; }
+  TComTrQuant*  getTrQuant          () { return m_pcTrQuant; }
+#endif
+
+  Void      setPPSId        ( Int PPSId )         { m_iPPSId = PPSId; }
+  Int       getPPSId        () { return m_iPPSId; }
+  Void      setPicOutputFlag( Bool b )         { m_PicOutputFlag = b;    }
+  Bool      getPicOutputFlag()                 { return m_PicOutputFlag; }
+  Void      setSaoEnabledFlag(ChannelType chType, Bool s) {m_saoEnabledFlag[chType] =s; }
+  Bool      getSaoEnabledFlag(ChannelType chType) { return m_saoEnabledFlag[chType]; }
+  Void      setRPS          ( TComReferencePictureSet *pcRPS ) { m_pcRPS = pcRPS; }
+  TComReferencePictureSet*  getRPS          () { return m_pcRPS; }
+  TComReferencePictureSet*  getLocalRPS     () { return &m_LocalRPS; }
+
+  Void      setRPSidx          ( Int iBDidx ) { m_iBDidx = iBDidx; }
+  Int       getRPSidx          () { return m_iBDidx; }
+  Int       getPrevTid0POC      ()                        { return  m_prevTid0POC;       }
+  TComRefPicListModification* getRefPicListModification() { return &m_RefPicListModification; }
+  Void      setLastIDR(Int iIDRPOC)                       { m_iLastIDR = iIDRPOC; }
+  Int       getLastIDR()                                  { return m_iLastIDR; }
+  Void      setAssociatedIRAPPOC(Int iAssociatedIRAPPOC)             { m_iAssociatedIRAP = iAssociatedIRAPPOC; }
+  Int       getAssociatedIRAPPOC()                        { return m_iAssociatedIRAP; }
+  Void      setAssociatedIRAPType(NalUnitType associatedIRAPType)    { m_iAssociatedIRAPType = associatedIRAPType; }
+  NalUnitType getAssociatedIRAPType()                        { return m_iAssociatedIRAPType; }
+  SliceType getSliceType    ()                          { return  m_eSliceType;         }
+  Int       getPOC          ()                          { return  m_iPOC;           }
+  Int       getSliceQp      ()                          { return  m_iSliceQp;           }
+  Bool      getDependentSliceSegmentFlag()             const { return m_dependentSliceSegmentFlag; }
+  Void      setDependentSliceSegmentFlag(Bool val)           { m_dependentSliceSegmentFlag = val; }
+#if ADAPTIVE_QP_SELECTION
+  Int       getSliceQpBase  ()                         const { return  m_iSliceQpBase;       }
+#endif
+  Int       getSliceQpDelta ()                         const { return  m_iSliceQpDelta;      }
+  Int       getSliceChromaQpDelta (ComponentID compID) const { return  isLuma(compID) ? 0 : m_iSliceChromaQpDelta[compID]; }
+  Bool      getUseChromaQpAdj()                        const { return m_ChromaQpAdjEnabled; }
+  Bool      getDeblockingFilterDisable()               const { return  m_deblockingFilterDisable; }
+  Bool      getDeblockingFilterOverrideFlag()          const { return  m_deblockingFilterOverrideFlag; }
+  Int       getDeblockingFilterBetaOffsetDiv2()        const { return  m_deblockingFilterBetaOffsetDiv2; }
+  Int       getDeblockingFilterTcOffsetDiv2()          const { return  m_deblockingFilterTcOffsetDiv2; }
+
+  Int       getNumRefIdx        ( RefPicList e )       const    { return  m_aiNumRefIdx[e];             }
+  TComPic*  getPic              ()                              { return  m_pcPic;                      }
+  TComPic*  getRefPic           ( RefPicList e, Int iRefIdx)    { return  m_apcRefPicList[e][iRefIdx];  }
+  Int       getRefPOC           ( RefPicList e, Int iRefIdx)    { return  m_aiRefPOCList[e][iRefIdx];   }
+  Int       getDepth            ()                              { return  m_iDepth;                     }
+  UInt      getColFromL0Flag    ()                              { return  m_colFromL0Flag;              }
+  UInt      getColRefIdx        ()                              { return  m_colRefIdx;                  }
+  Void      checkColRefIdx      (UInt curSliceIdx, TComPic* pic);
+  Bool      getIsUsedAsLongTerm (Int i, Int j)                  { return m_bIsUsedAsLongTerm[i][j]; }
+  Void      setIsUsedAsLongTerm (Int i, Int j, Bool value)      { m_bIsUsedAsLongTerm[i][j] = value; }
+  Bool      getCheckLDC     ()                                  { return m_bCheckLDC; }
+  Bool      getMvdL1ZeroFlag ()                                 { return m_bLMvdL1Zero;    }
+  Int       getNumRpsCurrTempList();
+  Int       getList1IdxToList0Idx ( Int list1Idx )              { return m_list1IdxToList0Idx[list1Idx]; }
+  Void      setReferenced(Bool b)                               { m_bRefenced = b; }
+  Bool      isReferenced()                                      { return m_bRefenced; }
+  Bool      isReferenceNalu()                                   { return ((getNalUnitType() <= NAL_UNIT_RESERVED_VCL_R15) && (getNalUnitType()%2 != 0)) || ((getNalUnitType() >= NAL_UNIT_CODED_SLICE_BLA_W_LP) && (getNalUnitType() <= NAL_UNIT_RESERVED_IRAP_VCL23) ); }
+  Void      setPOC              ( Int i )                       { m_iPOC              = i; if ((getTLayer()==0) && (isReferenceNalu() && (getNalUnitType()!=NAL_UNIT_CODED_SLICE_RASL_R)&& (getNalUnitType()!=NAL_UNIT_CODED_SLICE_RADL_R))) {m_prevTid0POC=i;} }
+  Void      setNalUnitType      ( NalUnitType e )               { m_eNalUnitType      = e;      }
+  NalUnitType getNalUnitType    () const                        { return m_eNalUnitType;        }
+  Bool      getRapPicFlag       ();
+  Bool      getIdrPicFlag       ()                              { return getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL || getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP; }
+  Bool      isIRAP              () const                        { return (getNalUnitType() >= 16) && (getNalUnitType() <= 23); }
+  Void      checkCRA(TComReferencePictureSet *pReferencePictureSet, Int& pocCRA, NalUnitType& associatedIRAPType, TComList<TComPic *>& rcListPic);
+  Void      decodingRefreshMarking(Int& pocCRA, Bool& bRefreshPending, TComList<TComPic*>& rcListPic);
+  Void      setSliceType        ( SliceType e )                 { m_eSliceType        = e;      }
+  Void      setSliceQp          ( Int i )                       { m_iSliceQp          = i;      }
+#if ADAPTIVE_QP_SELECTION
+  Void      setSliceQpBase      ( Int i )                       { m_iSliceQpBase      = i;      }
+#endif
+  Void      setSliceQpDelta     ( Int i )                       { m_iSliceQpDelta     = i;      }
+  Void      setSliceChromaQpDelta( ComponentID compID, Int i )  { m_iSliceChromaQpDelta[compID] = isLuma(compID) ? 0 : i;  }
+  Void      setUseChromaQpAdj   ( Bool b )                      { m_ChromaQpAdjEnabled = b;     }
+  Void      setDeblockingFilterDisable( Bool b )                { m_deblockingFilterDisable= b;      }
+  Void      setDeblockingFilterOverrideFlag( Bool b )           { m_deblockingFilterOverrideFlag = b; }
+  Void      setDeblockingFilterBetaOffsetDiv2( Int i )          { m_deblockingFilterBetaOffsetDiv2 = i; }
+  Void      setDeblockingFilterTcOffsetDiv2( Int i )            { m_deblockingFilterTcOffsetDiv2 = i; }
+
+  Void      setRefPic           ( TComPic* p, RefPicList e, Int iRefIdx ) { m_apcRefPicList[e][iRefIdx] = p; }
+  Void      setRefPOC           ( Int i, RefPicList e, Int iRefIdx ) { m_aiRefPOCList[e][iRefIdx] = i; }
+  Void      setNumRefIdx        ( RefPicList e, Int i )         { m_aiNumRefIdx[e]    = i;      }
+  Void      setPic              ( TComPic* p )                  { m_pcPic             = p;      }
+  Void      setDepth            ( Int iDepth )                  { m_iDepth            = iDepth; }
+
+  Void      setRefPicList       ( TComList<TComPic*>& rcListPic, Bool checkNumPocTotalCurr = false );
+  Void      setRefPOCList       ();
+  Void      setColFromL0Flag    ( UInt colFromL0 ) { m_colFromL0Flag = colFromL0; }
+  Void      setColRefIdx        ( UInt refIdx) { m_colRefIdx = refIdx; }
+  Void      setCheckLDC         ( Bool b )                      { m_bCheckLDC = b; }
+  Void      setMvdL1ZeroFlag     ( Bool b)                       { m_bLMvdL1Zero = b; }
+
+  Bool      isIntra         ()                          { return  m_eSliceType == I_SLICE;  }
+  Bool      isInterB        ()                          { return  m_eSliceType == B_SLICE;  }
+  Bool      isInterP        ()                          { return  m_eSliceType == P_SLICE;  }
+
+  Void      setLambdas ( const Double lambdas[MAX_NUM_COMPONENT] ) { for (Int component = 0; component < MAX_NUM_COMPONENT; component++) m_lambdas[component] = lambdas[component]; }
+  const Double* getLambdas() const { return m_lambdas; }
+
+  Void      initEqualRef();
+  Bool      isEqualRef  ( RefPicList e, Int iRefIdx1, Int iRefIdx2 )
+  {
+    assert(e<NUM_REF_PIC_LIST_01);
+    if (iRefIdx1 < 0 || iRefIdx2 < 0) return false;
+    return m_abEqualRef[e][iRefIdx1][iRefIdx2];
+  }
+
+  Void setEqualRef( RefPicList e, Int iRefIdx1, Int iRefIdx2, Bool b)
+  {
+    assert(e<NUM_REF_PIC_LIST_01);
+    m_abEqualRef[e][iRefIdx1][iRefIdx2] = m_abEqualRef[e][iRefIdx2][iRefIdx1] = b;
+  }
+
+  static Void      sortPicList         ( TComList<TComPic*>& rcListPic );
+  Void setList1IdxToList0Idx();
+
+  UInt getTLayer             ()                            { return m_uiTLayer;                      }
+  Void setTLayer             ( UInt uiTLayer )             { m_uiTLayer = uiTLayer;                  }
+
+  Void setTLayerInfo( UInt uiTLayer );
+  Void decodingMarking( TComList<TComPic*>& rcListPic, Int iGOPSIze, Int& iMaxRefPicNum );
+  Void checkLeadingPictureRestrictions( TComList<TComPic*>& rcListPic );
+  Void applyReferencePictureSet( TComList<TComPic*>& rcListPic, TComReferencePictureSet *RPSList);
+  Bool isTemporalLayerSwitchingPoint( TComList<TComPic*>& rcListPic );
+  Bool isStepwiseTemporalLayerSwitchingPointCandidate( TComList<TComPic*>& rcListPic );
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  Int  checkThatAllRefPicsAreAvailable( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool printErrors, Int pocRandomAccess = 0, Bool bUseRecoveryPoint = false);
+  Void createExplicitReferencePictureSetFromReference( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool isRAP, Int pocRandomAccess = 0, Bool bUseRecoveryPoint = false);
+#else
+  Int  checkThatAllRefPicsAreAvailable( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool printErrors, Int pocRandomAccess = 0);
+  Void createExplicitReferencePictureSetFromReference( TComList<TComPic*>& rcListPic, TComReferencePictureSet *pReferencePictureSet, Bool isRAP);
+#endif
+  Void setMaxNumMergeCand               (UInt val )          { m_maxNumMergeCand = val;                 }
+  UInt getMaxNumMergeCand               ()                   { return m_maxNumMergeCand;                }
+
+  Void setNoOutputPriorPicsFlag         ( Bool val )         { m_noOutputPriorPicsFlag = val;           }
+  Bool getNoOutputPriorPicsFlag         ()                   { return m_noOutputPriorPicsFlag;          }
+
+  Void setNoRaslOutputFlag              ( Bool val )         { m_noRaslOutputFlag = val;                }
+  Bool getNoRaslOutputFlag              ()                   { return m_noRaslOutputFlag;               }
+
+  Void setHandleCraAsBlaFlag            ( Bool val )         { m_handleCraAsBlaFlag = val;              }
+  Bool getHandleCraAsBlaFlag            ()                   { return m_handleCraAsBlaFlag;             }
+
+  Void setSliceMode                     ( SliceConstraint mode ) { m_sliceMode = mode;                  }
+  SliceConstraint getSliceMode          () const            { return m_sliceMode;                       }
+  Void setSliceArgument                 ( UInt uiArgument ) { m_sliceArgument = uiArgument;             }
+  UInt getSliceArgument                 ()                  { return m_sliceArgument;                   }
+  Void setSliceCurStartCtuTsAddr        ( UInt ctuTsAddr )  { m_sliceCurStartCtuTsAddr = ctuTsAddr;     } // CTU Tile-scan address (as opposed to raster-scan)
+  UInt getSliceCurStartCtuTsAddr        () const            { return m_sliceCurStartCtuTsAddr;          } // CTU Tile-scan address (as opposed to raster-scan)
+  Void setSliceCurEndCtuTsAddr          ( UInt ctuTsAddr )  { m_sliceCurEndCtuTsAddr = ctuTsAddr;       } // CTU Tile-scan address (as opposed to raster-scan)
+  UInt getSliceCurEndCtuTsAddr          () const            { return m_sliceCurEndCtuTsAddr;            } // CTU Tile-scan address (as opposed to raster-scan)
+  Void setSliceIdx                      ( UInt i)           { m_sliceIdx = i;                           }
+  UInt getSliceIdx                      ()                  { return  m_sliceIdx;                       }
+  Void copySliceInfo                    (TComSlice *pcSliceSrc);
+  Void setSliceSegmentMode              ( SliceConstraint mode ) { m_sliceSegmentMode = mode;           }
+  SliceConstraint getSliceSegmentMode   () const            { return m_sliceSegmentMode;                }
+  Void setSliceSegmentArgument          ( UInt uiArgument ) { m_sliceSegmentArgument = uiArgument;      }
+  UInt getSliceSegmentArgument          ()                  { return m_sliceSegmentArgument;            }
+  Void setSliceSegmentCurStartCtuTsAddr ( UInt ctuTsAddr )  { m_sliceSegmentCurStartCtuTsAddr = ctuTsAddr; } // CTU Tile-scan address (as opposed to raster-scan)
+  UInt getSliceSegmentCurStartCtuTsAddr () const            { return m_sliceSegmentCurStartCtuTsAddr;      } // CTU Tile-scan address (as opposed to raster-scan)
+  Void setSliceSegmentCurEndCtuTsAddr   ( UInt ctuTsAddr )  { m_sliceSegmentCurEndCtuTsAddr = ctuTsAddr;   } // CTU Tile-scan address (as opposed to raster-scan)
+  UInt getSliceSegmentCurEndCtuTsAddr   () const            { return m_sliceSegmentCurEndCtuTsAddr;        } // CTU Tile-scan address (as opposed to raster-scan)
+  Void setSliceBits                     ( UInt uiVal )      { m_sliceBits = uiVal;                      }
+  UInt getSliceBits                     ()                  { return m_sliceBits;                       }
+  Void setSliceSegmentBits              ( UInt uiVal )      { m_sliceSegmentBits = uiVal;            }
+  UInt getSliceSegmentBits              ()                  { return m_sliceSegmentBits;             }
+  Void setFinalized                     ( Bool uiVal )      { m_bFinalized = uiVal;                       }
+  Bool getFinalized                     ()                  { return m_bFinalized;                        }
+  Void  setWpScaling    ( WPScalingParam  wp[NUM_REF_PIC_LIST_01][MAX_NUM_REF][MAX_NUM_COMPONENT] ) { memcpy(m_weightPredTable, wp, sizeof(WPScalingParam)*NUM_REF_PIC_LIST_01*MAX_NUM_REF*MAX_NUM_COMPONENT); }
+  Void  getWpScaling    ( RefPicList e, Int iRefIdx, WPScalingParam *&wp);
+
+  Void  resetWpScaling  ();
+  Void  initWpScaling   ();
+  inline Bool applyWP   () { return( (m_eSliceType==P_SLICE && m_pcPPS->getUseWP()) || (m_eSliceType==B_SLICE && m_pcPPS->getWPBiPred()) ); }
+
+  Void  setWpAcDcParam  ( WPACDCParam wp[MAX_NUM_COMPONENT] ) { memcpy(m_weightACDCParam, wp, sizeof(WPACDCParam)*MAX_NUM_COMPONENT); }
+  Void  getWpAcDcParam  ( WPACDCParam *&wp );
+  Void  initWpAcDcParam ();
+
+  Void clearSubstreamSizes       ( )                        { return m_substreamSizes.clear();        }
+  UInt getNumberOfSubstreamSizes ( )                        { return (UInt) m_substreamSizes.size();  }
+  Void addSubstreamSize          ( UInt size )              { m_substreamSizes.push_back(size);       }
+  UInt getSubstreamSize          ( Int idx )                { assert(idx<getNumberOfSubstreamSizes()); return m_substreamSizes[idx]; }
+
+  Void  setScalingList              ( TComScalingList* scalingList ) { m_scalingList = scalingList; }
+  TComScalingList*   getScalingList ()                               { return m_scalingList; }
+  Void  setDefaultScalingList       ();
+  Bool  checkDefaultScalingList     ();
+  Void      setCabacInitFlag  ( Bool val ) { m_cabacInitFlag = val;      }  //!< set CABAC initial flag
+  Bool      getCabacInitFlag  ()           { return m_cabacInitFlag;     }  //!< get CABAC initial flag
+  Bool      getTemporalLayerNonReferenceFlag()       { return m_temporalLayerNonReferenceFlag;}
+  Void      setTemporalLayerNonReferenceFlag(Bool x) { m_temporalLayerNonReferenceFlag = x;}
+  Void      setLFCrossSliceBoundaryFlag     ( Bool   val )    { m_LFCrossSliceBoundaryFlag = val; }
+  Bool      getLFCrossSliceBoundaryFlag     ()                { return m_LFCrossSliceBoundaryFlag;}
+
+  Void      setEnableTMVPFlag     ( Bool   b )    { m_enableTMVPFlag = b; }
+  Bool      getEnableTMVPFlag     ()              { return m_enableTMVPFlag;}
+
+protected:
+  TComPic*  xGetRefPic        (TComList<TComPic*>& rcListPic, Int poc);
+  TComPic*  xGetLongTermRefPic(TComList<TComPic*>& rcListPic, Int poc, Bool pocHasMsb);
+};// END CLASS DEFINITION TComSlice
+
+
+template <class T> class ParameterSetMap
+{
+public:
+  ParameterSetMap(Int maxId)
+  :m_maxId (maxId)
+  {}
+
+  ~ParameterSetMap()
+  {
+    for (typename std::map<Int,T *>::iterator i = m_paramsetMap.begin(); i!= m_paramsetMap.end(); i++)
+    {
+      delete (*i).second;
+    }
+  }
+
+  Void storePS(Int psId, T *ps)
+  {
+    assert ( psId < m_maxId );
+    if ( m_paramsetMap.find(psId) != m_paramsetMap.end() )
+    {
+      delete m_paramsetMap[psId];
+    }
+    m_paramsetMap[psId] = ps;
+  }
+
+  Void mergePSList(ParameterSetMap<T> &rPsList)
+  {
+    for (typename std::map<Int,T *>::iterator i = rPsList.m_paramsetMap.begin(); i!= rPsList.m_paramsetMap.end(); i++)
+    {
+      storePS(i->first, i->second);
+    }
+    rPsList.m_paramsetMap.clear();
+  }
+
+
+  T* getPS(Int psId)
+  {
+    return ( m_paramsetMap.find(psId) == m_paramsetMap.end() ) ? NULL : m_paramsetMap[psId];
+  }
+
+  T* getFirstPS()
+  {
+    return (m_paramsetMap.begin() == m_paramsetMap.end() ) ? NULL : m_paramsetMap.begin()->second;
+  }
+
+private:
+  std::map<Int,T *> m_paramsetMap;
+  Int               m_maxId;
+};
+
+class ParameterSetManager
+{
+public:
+  ParameterSetManager();
+  virtual ~ParameterSetManager();
+
+  //! store sequence parameter set and take ownership of it
+  Void storeVPS(TComVPS *vps) { m_vpsMap.storePS( vps->getVPSId(), vps); };
+  //! get pointer to existing video parameter set
+  TComVPS* getVPS(Int vpsId)  { return m_vpsMap.getPS(vpsId); };
+  TComVPS* getFirstVPS()      { return m_vpsMap.getFirstPS(); };
+
+  //! store sequence parameter set and take ownership of it
+  Void storeSPS(TComSPS *sps) { m_spsMap.storePS( sps->getSPSId(), sps); };
+  //! get pointer to existing sequence parameter set
+  TComSPS* getSPS(Int spsId)  { return m_spsMap.getPS(spsId); };
+  TComSPS* getFirstSPS()      { return m_spsMap.getFirstPS(); };
+
+  //! store picture parameter set and take ownership of it
+  Void storePPS(TComPPS *pps) { m_ppsMap.storePS( pps->getPPSId(), pps); };
+  //! get pointer to existing picture parameter set
+  TComPPS* getPPS(Int ppsId)  { return m_ppsMap.getPS(ppsId); };
+  TComPPS* getFirstPPS()      { return m_ppsMap.getFirstPS(); };
+
+  //! activate a SPS from a active parameter sets SEI message
+  //! \returns true, if activation is successful
+  Bool activateSPSWithSEI(Int SPSId);
+
+  //! activate a PPS and depending on isIDR parameter also SPS and VPS
+  //! \returns true, if activation is successful
+  Bool activatePPS(Int ppsId, Bool isIRAP);
+
+  TComVPS* getActiveVPS(){ return m_vpsMap.getPS(m_activeVPSId); };
+  TComSPS* getActiveSPS(){ return m_spsMap.getPS(m_activeSPSId); };
+  TComPPS* getActivePPS(){ return m_ppsMap.getPS(m_activePPSId); };
+
+protected:
+
+  ParameterSetMap<TComVPS> m_vpsMap;
+  ParameterSetMap<TComSPS> m_spsMap;
+  ParameterSetMap<TComPPS> m_ppsMap;
+
+  Int m_activeVPSId;
+  Int m_activeSPSId;
+  Int m_activePPSId;
+};
+
+//! \}
+
+#endif // __TCOMSLICE__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComTU.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,254 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "TComTU.h"
+#include "TComRom.h"
+#include "TComDataCU.h"
+#include "TComPic.h"
+
+//----------------------------------------------------------------------------------------------------------------------
+
+/*static*/ const UInt TComTU::NUMBER_OF_SECTIONS[TComTU::NUMBER_OF_SPLIT_MODES] = { 1, 2, 4 };
+
+static     const UInt         partIdxStepShift  [TComTU::NUMBER_OF_SPLIT_MODES] = { 0, 1, 2 };
+
+//----------------------------------------------------------------------------------------------------------------------
+
+TComTU::TComTU(TComDataCU *pcCU, const UInt absPartIdxCU, const UInt cuDepth, const UInt initTrDepthRelCU)
+  : mChromaFormat(pcCU->getSlice()->getSPS()->getChromaFormatIdc()),
+    mbProcessLastOfLevel(true), // does not matter. the top level is not 4 quadrants.
+    mCuDepth(cuDepth),
+    mSection(0),
+    mSplitMode(DONT_SPLIT),
+    mAbsPartIdxCU(absPartIdxCU),
+    mAbsPartIdxTURelCU(0),
+    mAbsPartIdxStep(pcCU->getPic()->getNumPartitionsInCtu() >> (pcCU->getDepth(absPartIdxCU)<<1)),
+    mpcCU(pcCU),
+    mLog2TrLumaSize(0),
+    mpParent(NULL)
+{
+  TComSPS *pSPS=pcCU->getSlice()->getSPS();
+  mLog2TrLumaSize = g_aucConvertToBit[pSPS->getMaxCUWidth() >> (mCuDepth+initTrDepthRelCU)]+2;
+
+  const UInt baseOffset444=pcCU->getPic()->getMinCUWidth()*pcCU->getPic()->getMinCUHeight()*absPartIdxCU;
+
+  for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    mTrDepthRelCU[i] = initTrDepthRelCU;
+    const UInt csx=getComponentScaleX(ComponentID(i), mChromaFormat);
+    const UInt csy=getComponentScaleY(ComponentID(i), mChromaFormat);
+    mOrigWidth[i]=mRect[i].width = (i < getNumberValidComponents(mChromaFormat)) ? (pcCU->getWidth( absPartIdxCU) >> csx) : 0;
+    mRect[i].height              = (i < getNumberValidComponents(mChromaFormat)) ? (pcCU->getHeight(absPartIdxCU) >> csy) : 0;
+    mRect[i].x0=0;
+    mRect[i].y0=0;
+    mCodeAll[i]=true;
+    mOffsets[i]=baseOffset444>>(csx+csy);
+  }
+}
+
+
+
+TComTURecurse::TComTURecurse(      TComDataCU *pcCU,
+                             const UInt        absPartIdxCU)
+  : TComTU(pcCU, absPartIdxCU, pcCU->getDepth(absPartIdxCU), 0)
+{ }
+
+
+
+TComTU::TComTU(TComTU &parent, const Bool bProcessLastOfLevel, const TU_SPLIT_MODE splitMode, const Bool splitAtCurrentDepth, const ComponentID absPartIdxSourceComponent)
+  : mChromaFormat(parent.mChromaFormat),
+    mbProcessLastOfLevel(bProcessLastOfLevel),
+    mCuDepth(parent.mCuDepth),
+    mSection(0),
+    mSplitMode(splitMode),
+    mAbsPartIdxCU(parent.mAbsPartIdxCU),
+    mAbsPartIdxTURelCU(parent.GetRelPartIdxTU(absPartIdxSourceComponent)),
+    mAbsPartIdxStep(std::max<UInt>(1, (parent.GetAbsPartIdxNumParts(absPartIdxSourceComponent) >> partIdxStepShift[splitMode]))),
+    mpcCU(parent.mpcCU),
+    mLog2TrLumaSize(parent.mLog2TrLumaSize - ((splitMode != QUAD_SPLIT) ? 0 : 1)), //no change in width for vertical split
+    mpParent(&parent)
+{
+  for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    mTrDepthRelCU[i] = parent.mTrDepthRelCU[i] + ((splitAtCurrentDepth || (splitMode == DONT_SPLIT)) ? 0 : 1);
+  }
+
+  if (mSplitMode==DONT_SPLIT)
+  {
+    for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+    {
+      mRect[i] = (parent.mRect[i]);
+      mOffsets[i]=parent.mOffsets[i];
+      mCodeAll[i]=true; // The 1 TU at this level is coded.
+      mOrigWidth[i]=mRect[i].width;
+    }
+    return;
+  }
+  else if (mSplitMode==VERTICAL_SPLIT)
+  {
+    for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+    {
+      mRect[i].x0 = (parent.mRect[i].x0);
+      mRect[i].y0 = (parent.mRect[i].y0);
+      mRect[i].width  = (parent.mRect[i].width);
+      mRect[i].height = (parent.mRect[i].height)>>1;
+      mOffsets[i]=parent.mOffsets[i];
+      mCodeAll[i]=true; // The 2 TUs at this level is coded.
+      mOrigWidth[i]=mRect[i].width;
+    }
+    return;
+  }
+
+  for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    mRect[i].width = (parent.mRect[i].width >> 1);
+    mRect[i].height= (parent.mRect[i].height>> 1);
+    mRect[i].x0=parent.mRect[i].x0;
+    mRect[i].y0=parent.mRect[i].y0;
+    mOffsets[i]=parent.mOffsets[i];
+
+    if ((mRect[i].width < MIN_TU_SIZE || mRect[i].height < MIN_TU_SIZE) && mRect[i].width!=0)
+    {
+      const UInt numPels=mRect[i].width * mRect[i].height;
+      if (numPels < (MIN_TU_SIZE*MIN_TU_SIZE))
+      {
+        // this level doesn't have enough pixels to have 4 blocks of any relative dimension
+        mRect[i].width = parent.mRect[i].width;
+        mRect[i].height= parent.mRect[i].height;
+        mCodeAll[i]=false; // go up a level, so only process one entry of a quadrant
+        mTrDepthRelCU[i]--;
+      }
+      else if (mRect[i].width < mRect[i].height)
+      {
+        mRect[i].width=MIN_TU_SIZE;
+        mRect[i].height=numPels/MIN_TU_SIZE;
+        mCodeAll[i]=true;
+      }
+      else
+      {
+        mRect[i].height=MIN_TU_SIZE;
+        mRect[i].width=numPels/MIN_TU_SIZE;
+        mCodeAll[i]=true;
+      }
+    }
+    else
+    {
+      mCodeAll[i]=true;
+    }
+
+    mOrigWidth[i]=mRect[i].width;
+    if (!mCodeAll[i] && mbProcessLastOfLevel) mRect[i].width=0;
+  }
+}
+
+Bool TComTURecurse::nextSection(const TComTU &parent)
+{
+  if (mSplitMode==DONT_SPLIT)
+  {
+    mSection++;
+    return false;
+  }
+  else
+  {
+    for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+    {
+      mOffsets[i]+=mRect[i].width*mRect[i].height;
+      if (mbProcessLastOfLevel) mRect[i].width=mOrigWidth[i];
+      mRect[i].x0+=mRect[i].width;
+      const TComRectangle &parentRect=parent.getRect(ComponentID(i));
+      if (mRect[i].x0 >= parentRect.x0+parentRect.width)
+      {
+        mRect[i].x0=parentRect.x0;
+        mRect[i].y0+=mRect[i].height;
+      }
+      if (!mCodeAll[i])
+      {
+        if (!mbProcessLastOfLevel || mSection!=2) mRect[i].width=0;
+      }
+    }
+    assert(mRect[COMPONENT_Cb].x0==mRect[COMPONENT_Cr].x0);
+    assert(mRect[COMPONENT_Cb].y0==mRect[COMPONENT_Cr].y0);
+    assert(mRect[COMPONENT_Cb].width==mRect[COMPONENT_Cr].width);
+    assert(mRect[COMPONENT_Cb].height==mRect[COMPONENT_Cr].height);
+
+    mAbsPartIdxTURelCU+=mAbsPartIdxStep;
+    mSection++;
+    return mSection< (1<<mSplitMode);
+  }
+}
+
+
+UInt TComTU::GetEquivalentLog2TrSize(const ComponentID compID)     const
+{
+  return g_aucConvertToBit[ getRect(compID).height ] + 2;
+}
+
+
+Bool TComTU::useDST(const ComponentID compID)
+{
+        TComDataCU *const pcCU       = getCU();
+  const UInt              absPartIdx = GetAbsPartIdxTU(compID);
+
+  return isLuma(compID) && pcCU->isIntra(absPartIdx);
+}
+
+
+Bool TComTU::isNonTransformedResidualRotated(const ComponentID compID)
+{
+  // rotation only for 4x4 intra, and is only used for non-transformed blocks (the latter is not checked here)
+  return    getCU()->getSlice()->getSPS()->getUseResidualRotation()
+         && mRect[compID].width == 4
+         && getCU()->isIntra(GetAbsPartIdxTU());
+}
+
+
+UInt TComTU::getGolombRiceStatisticsIndex(const ComponentID compID)
+{
+        TComDataCU *const pcCU             = getCU();
+  const UInt              absPartIdx       = GetAbsPartIdxTU(compID);
+  const Bool              transformSkip    = pcCU->getTransformSkip(absPartIdx, compID);
+  const Bool              transquantBypass = pcCU->getCUTransquantBypass(absPartIdx);
+
+  //--------
+
+  const UInt channelTypeOffset    =  isChroma(compID)                   ? 2 : 0;
+  const UInt nonTransformedOffset = (transformSkip || transquantBypass) ? 1 : 0;
+
+  //--------
+
+  const UInt selectedIndex = channelTypeOffset + nonTransformedOffset;
+  assert(selectedIndex < RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS);
+
+  return selectedIndex;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComTU.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,167 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TCOMTU__
+#define __TCOMTU__
+
+class TComTU; // forward declaration
+
+#include "CommonDef.h"
+#include "TComRectangle.h"
+#include "TComChromaFormat.h"
+
+class TComDataCU; // forward declaration
+
+//----------------------------------------------------------------------------------------------------------------------
+
+
+class TComTU
+{
+  public:
+    typedef enum TU_SPLIT_MODE { DONT_SPLIT=0, VERTICAL_SPLIT=1, QUAD_SPLIT=2, NUMBER_OF_SPLIT_MODES=3 } SPLIT_MODE;
+
+    static const UInt NUMBER_OF_SECTIONS[NUMBER_OF_SPLIT_MODES];
+
+  protected:
+    ChromaFormat  mChromaFormat;
+    Bool          mbProcessLastOfLevel; // if true, then if size n/2 x n/2 is invalid, the nxn block for a channel is processed only for the last block, not the first.
+    UInt          mCuDepth;
+    UInt          mTrDepthRelCU[MAX_NUM_COMPONENT];
+    UInt          mSection;
+    TU_SPLIT_MODE mSplitMode;
+    TComRectangle mRect[MAX_NUM_COMPONENT];
+    Bool          mCodeAll[MAX_NUM_COMPONENT];
+    UInt          mOrigWidth[MAX_NUM_COMPONENT];
+    UInt          mOffsets[MAX_NUM_COMPONENT];
+    UInt          mAbsPartIdxCU;
+    UInt          mAbsPartIdxTURelCU;
+    UInt          mAbsPartIdxStep;
+    TComDataCU   *mpcCU;
+    UInt          mLog2TrLumaSize;
+    TComTU       *mpParent;
+
+    TComTU(const TComTU &);           // not defined - do not use
+    TComTU&operator=(const TComTU &); // not defined - do not use
+
+  public:
+    TComTU(      TComDataCU *pcCU,
+           const UInt        absPartIdxCU,
+           const UInt        cuDepth,
+           const UInt        initTrDepthRelCU);
+
+  protected:
+    TComTU(      TComTU        &parentLevel,
+           const Bool           bProcessLastOfLevel,
+           const TU_SPLIT_MODE  splitMode                 = QUAD_SPLIT,
+           const Bool           splitAtCurrentDepth       = false,
+           const ComponentID    absPartIdxSourceComponent = COMPONENT_Y
+           );
+
+  public:
+          TComTU *Parent()       { return mpParent; }
+    const TComTU *Parent() const { return mpParent; }
+
+    UInt getCoefficientOffset(const ComponentID compID)        const { return mOffsets[compID]; }
+
+    const TComRectangle &getRect(const ComponentID compID)     const { return mRect[compID];    }
+
+    Bool ProcessingAllQuadrants(const ComponentID compID)      const { return mCodeAll[compID]; }
+    Bool ProcessComponentSection(const ComponentID compID)     const { return mRect[compID].width != 0; }
+    Bool ProcessChannelSection(const ChannelType chType)       const { return mRect[chType].width != 0; }
+    UInt GetSectionNumber()                                    const { return mSection; }
+
+    UInt getCUDepth()                                          const { return mCuDepth; }
+
+    UInt GetTransformDepthTotal()                              const { return mCuDepth+GetTransformDepthRel(); }
+    UInt GetTransformDepthTotalAdj(const ComponentID compID)   const { return mCuDepth+GetTransformDepthRelAdj(compID); }
+
+    UInt GetTransformDepthRel()                                const { return mTrDepthRelCU[COMPONENT_Y]; }
+    UInt GetTransformDepthRelAdj(const ComponentID compID)     const { return mTrDepthRelCU[compID]; }
+    UInt GetTransformDepthRelAdj(const ChannelType chType)     const
+    {
+      assert(isLuma(chType) || (mTrDepthRelCU[COMPONENT_Cb] == mTrDepthRelCU[COMPONENT_Cr]));
+      return mTrDepthRelCU[isLuma(chType) ? COMPONENT_Y : COMPONENT_Cb];
+    }
+
+    UInt GetAbsPartIdxCU()                                     const { return mAbsPartIdxCU; }
+    UInt GetRelPartIdxTU()                                     const { return mAbsPartIdxTURelCU; }
+    UInt GetRelPartIdxTU(const ComponentID compID)             const { return ProcessingAllQuadrants(compID) ? mAbsPartIdxTURelCU : (mAbsPartIdxTURelCU & (~0x3)); }
+    UInt GetAbsPartIdxTU()                                     const { return GetAbsPartIdxCU() + GetRelPartIdxTU(); }
+    UInt GetAbsPartIdxTU(const ComponentID compID)             const { return GetAbsPartIdxCU() + GetRelPartIdxTU(compID); }
+    UInt GetAbsPartIdxNumParts()                               const { return mAbsPartIdxStep; }
+    UInt GetAbsPartIdxNumParts(const ComponentID compID)       const { return ProcessingAllQuadrants(compID) ? mAbsPartIdxStep : (mAbsPartIdxStep * NUMBER_OF_SECTIONS[mSplitMode]); }
+
+    ChromaFormat GetChromaFormat()                             const { return mChromaFormat; }
+
+    TComDataCU *getCU()                                              { return mpcCU; }
+    const TComDataCU *getCU()                                  const { return mpcCU; }
+    Bool IsLastSection() const { return mSection+1>=((1<<mSplitMode)); }
+
+    UInt GetLog2LumaTrSize()                                   const { return mLog2TrLumaSize; }
+    UInt GetEquivalentLog2TrSize(const ComponentID compID)     const;
+    TU_SPLIT_MODE GetSplitMode()                               const { return mSplitMode; }
+
+    Bool useDST(const ComponentID compID);
+    Bool isNonTransformedResidualRotated(const ComponentID compID);
+
+    UInt getGolombRiceStatisticsIndex(const ComponentID compID);
+};
+
+
+
+class TComTURecurse : public TComTU
+{
+  public:
+
+    TComTURecurse(      TComDataCU *pcCU,
+                  const UInt        absPartIdxCU,
+                  const UInt        forcedDepthOfCU)
+      : TComTU(pcCU, absPartIdxCU, forcedDepthOfCU, 0) { }
+
+    TComTURecurse(      TComDataCU *pcCU,
+                  const UInt        absPartIdxCU); // CU's depth is taken from CU->getDepth(idx)
+
+    TComTURecurse(      TComTU        &parentLevel,                            //Parent TU from which recursion children are derived
+                  const Bool           bProcessLastOfLevel,                    //If true (and the split results in a "step-up" for chroma), the chroma TU is colocated with the last luma TU instead of the first
+                  const TU_SPLIT_MODE  splitMode                 = QUAD_SPLIT, //DONT_SPLIT = create one new TU as a copy of its parent, VERTICAL_SPLIT = split the TU into top and bottom halves, QUAD_SPLIT = split the TU into four equal quadrants
+                  const Bool           splitAtCurrentDepth       = false,      //Set true to keep the current depth when applying a vertical or quad split
+                  const ComponentID    absPartIdxSourceComponent = COMPONENT_Y //Specifies which component of the parent TU should be used to initialise the absPartIdx of the first child and the absPartIdx step (this is needed when splitting a "stepped-up" chroma TU)
+                  )
+                  : TComTU(parentLevel, bProcessLastOfLevel, splitMode, splitAtCurrentDepth, absPartIdxSourceComponent) { }
+
+    Bool nextSection(const TComTU &parent); // returns true if there is another section to process, and prepares internal structures, else returns false
+};
+
+//----------------------------------------------------------------------------------------------------------------------
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComTrQuant.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3333 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComTrQuant.cpp
+    \brief    transform and quantization class
+*/
+
+#include <stdlib.h>
+#include <math.h>
+#include <limits>
+#include <memory.h>
+#include "TComTrQuant.h"
+#include "TComPic.h"
+#include "ContextTables.h"
+#include "TComTU.h"
+#include "Debug.h"
+
+typedef struct
+{
+  Int    iNNZbeforePos0;
+  Double d64CodedLevelandDist; // distortion and level cost only
+  Double d64UncodedDist;    // all zero coded block distortion
+  Double d64SigCost;
+  Double d64SigCost_0;
+} coeffGroupRDStats;
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+#define RDOQ_CHROMA                 1           ///< use of RDOQ in chroma
+
+
+// ====================================================================================================================
+// QpParam constructor
+// ====================================================================================================================
+
+QpParam::QpParam(const Int           qpy,
+                 const ChannelType   chType,
+                 const Int           qpBdOffset,
+                 const Int           chromaQPOffset,
+                 const ChromaFormat  chFmt )
+{
+  Int baseQp;
+
+  if(isLuma(chType))
+  {
+    baseQp = qpy + qpBdOffset;
+  }
+  else
+  {
+    baseQp = Clip3( -qpBdOffset, (chromaQPMappingTableSize - 1), qpy + chromaQPOffset );
+
+    if(baseQp < 0)
+    {
+      baseQp = baseQp + qpBdOffset;
+    }
+    else
+    {
+      baseQp = getScaledChromaQP(baseQp, chFmt) + qpBdOffset;
+    }
+  }
+
+  Qp =baseQp;
+  per=baseQp/6;
+  rem=baseQp%6;
+}
+
+QpParam::QpParam(const TComDataCU &cu, const ComponentID compID)
+{
+  Int chromaQpOffset = 0;
+
+  if (isChroma(compID))
+  {
+    chromaQpOffset += cu.getSlice()->getPPS()->getQpOffset(compID);
+    chromaQpOffset += cu.getSlice()->getSliceChromaQpDelta(compID);
+
+    chromaQpOffset += cu.getSlice()->getPPS()->getChromaQpAdjTableAt(cu.getChromaQpAdj(0)).u.offset[Int(compID)-1];
+  }
+
+  *this = QpParam(cu.getQP( 0 ),
+                  toChannelType(compID),
+                  cu.getSlice()->getSPS()->getQpBDOffset(toChannelType(compID)),
+                  chromaQpOffset,
+                  cu.getPic()->getChromaFormat());
+}
+
+
+// ====================================================================================================================
+// TComTrQuant class member functions
+// ====================================================================================================================
+
+TComTrQuant::TComTrQuant()
+{
+  // allocate temporary buffers
+  m_plTempCoeff  = new TCoeff[ MAX_CU_SIZE*MAX_CU_SIZE ];
+
+  // allocate bit estimation class  (for RDOQ)
+  m_pcEstBitsSbac = new estBitsSbacStruct;
+  initScalingList();
+}
+
+TComTrQuant::~TComTrQuant()
+{
+  // delete temporary buffers
+  if ( m_plTempCoeff )
+  {
+    delete [] m_plTempCoeff;
+    m_plTempCoeff = NULL;
+  }
+
+  // delete bit estimation class
+  if ( m_pcEstBitsSbac )
+  {
+    delete m_pcEstBitsSbac;
+  }
+  destroyScalingList();
+}
+
+#if ADAPTIVE_QP_SELECTION
+Void TComTrQuant::storeSliceQpNext(TComSlice* pcSlice)
+{
+  // NOTE: does this work with negative QPs or when some blocks are transquant-bypass enabled?
+
+  Int qpBase = pcSlice->getSliceQpBase();
+  Int sliceQpused = pcSlice->getSliceQp();
+  Int sliceQpnext;
+  Double alpha = qpBase < 17 ? 0.5 : 1;
+
+  Int cnt=0;
+  for(Int u=1; u<=LEVEL_RANGE; u++)
+  {
+    cnt += m_sliceNsamples[u] ;
+  }
+
+  if( !m_useRDOQ )
+  {
+    sliceQpused = qpBase;
+    alpha = 0.5;
+  }
+
+  if( cnt > 120 )
+  {
+    Double sum = 0;
+    Int k = 0;
+    for(Int u=1; u<LEVEL_RANGE; u++)
+    {
+      sum += u*m_sliceSumC[u];
+      k += u*u*m_sliceNsamples[u];
+    }
+
+    Int v;
+    Double q[MAX_QP+1] ;
+    for(v=0; v<=MAX_QP; v++)
+    {
+      q[v] = (Double)(g_invQuantScales[v%6] * (1<<(v/6)))/64 ;
+    }
+
+    Double qnext = sum/k * q[sliceQpused] / (1<<ARL_C_PRECISION);
+
+    for(v=0; v<MAX_QP; v++)
+    {
+      if(qnext < alpha * q[v] + (1 - alpha) * q[v+1] )
+      {
+        break;
+      }
+    }
+    sliceQpnext = Clip3(sliceQpused - 3, sliceQpused + 3, v);
+  }
+  else
+  {
+    sliceQpnext = sliceQpused;
+  }
+
+  m_qpDelta[qpBase] = sliceQpnext - qpBase;
+}
+
+Void TComTrQuant::initSliceQpDelta()
+{
+  for(Int qp=0; qp<=MAX_QP; qp++)
+  {
+    m_qpDelta[qp] = qp < 17 ? 0 : 1;
+  }
+}
+
+Void TComTrQuant::clearSliceARLCnt()
+{
+  memset(m_sliceSumC, 0, sizeof(Double)*(LEVEL_RANGE+1));
+  memset(m_sliceNsamples, 0, sizeof(Int)*(LEVEL_RANGE+1));
+}
+#endif
+
+
+
+#if MATRIX_MULT
+/** NxN forward transform (2D) using brute force matrix multiplication (3 nested loops)
+ *  \param block pointer to input data (residual)
+ *  \param coeff pointer to output data (transform coefficients)
+ *  \param uiStride stride of input data
+ *  \param uiTrSize transform size (uiTrSize x uiTrSize)
+ *  \param uiMode is Intra Prediction mode used in Mode-Dependent DCT/DST only
+ */
+Void xTr(Int bitDepth, Pel *block, TCoeff *coeff, UInt uiStride, UInt uiTrSize, Bool useDST, const Int maxTrDynamicRange)
+{
+  UInt i,j,k;
+  TCoeff iSum;
+  TCoeff tmp[MAX_TU_SIZE * MAX_TU_SIZE];
+  const TMatrixCoeff *iT;
+  UInt uiLog2TrSize = g_aucConvertToBit[ uiTrSize ] + 2;
+
+  if (uiTrSize==4)
+  {
+    iT  = (useDST ? g_as_DST_MAT_4[TRANSFORM_FORWARD][0] : g_aiT4[TRANSFORM_FORWARD][0]);
+  }
+  else if (uiTrSize==8)
+  {
+    iT = g_aiT8[TRANSFORM_FORWARD][0];
+  }
+  else if (uiTrSize==16)
+  {
+    iT = g_aiT16[TRANSFORM_FORWARD][0];
+  }
+  else if (uiTrSize==32)
+  {
+    iT = g_aiT32[TRANSFORM_FORWARD][0];
+  }
+  else
+  {
+    assert(0);
+  }
+
+  static const Int TRANSFORM_MATRIX_SHIFT = g_transformMatrixShift[TRANSFORM_FORWARD];
+
+  const Int shift_1st = (uiLog2TrSize +  bitDepth + TRANSFORM_MATRIX_SHIFT) - maxTrDynamicRange;
+  const Int shift_2nd = uiLog2TrSize + TRANSFORM_MATRIX_SHIFT;
+  const Int add_1st = (shift_1st>0) ? (1<<(shift_1st-1)) : 0;
+  const Int add_2nd = 1<<(shift_2nd-1);
+
+  /* Horizontal transform */
+
+  for (i=0; i<uiTrSize; i++)
+  {
+    for (j=0; j<uiTrSize; j++)
+    {
+      iSum = 0;
+      for (k=0; k<uiTrSize; k++)
+      {
+        iSum += iT[i*uiTrSize+k]*block[j*uiStride+k];
+      }
+      tmp[i*uiTrSize+j] = (iSum + add_1st)>>shift_1st;
+    }
+  }
+
+  /* Vertical transform */
+  for (i=0; i<uiTrSize; i++)
+  {
+    for (j=0; j<uiTrSize; j++)
+    {
+      iSum = 0;
+      for (k=0; k<uiTrSize; k++)
+      {
+        iSum += iT[i*uiTrSize+k]*tmp[j*uiTrSize+k];
+      }
+      coeff[i*uiTrSize+j] = (iSum + add_2nd)>>shift_2nd;
+    }
+  }
+}
+
+/** NxN inverse transform (2D) using brute force matrix multiplication (3 nested loops)
+ *  \param coeff pointer to input data (transform coefficients)
+ *  \param block pointer to output data (residual)
+ *  \param uiStride stride of output data
+ *  \param uiTrSize transform size (uiTrSize x uiTrSize)
+ *  \param uiMode is Intra Prediction mode used in Mode-Dependent DCT/DST only
+ */
+Void xITr(Int bitDepth, TCoeff *coeff, Pel *block, UInt uiStride, UInt uiTrSize, Bool useDST, const Int maxTrDynamicRange)
+{
+  UInt i,j,k;
+  TCoeff iSum;
+  TCoeff tmp[MAX_TU_SIZE * MAX_TU_SIZE];
+  const TMatrixCoeff *iT;
+
+  if (uiTrSize==4)
+  {
+    iT  = (useDST ? g_as_DST_MAT_4[TRANSFORM_INVERSE][0] : g_aiT4[TRANSFORM_INVERSE][0]);
+  }
+  else if (uiTrSize==8)
+  {
+    iT = g_aiT8[TRANSFORM_INVERSE][0];
+  }
+  else if (uiTrSize==16)
+  {
+    iT = g_aiT16[TRANSFORM_INVERSE][0];
+  }
+  else if (uiTrSize==32)
+  {
+    iT = g_aiT32[TRANSFORM_INVERSE][0];
+  }
+  else
+  {
+    assert(0);
+  }
+
+  static const Int TRANSFORM_MATRIX_SHIFT = g_transformMatrixShift[TRANSFORM_INVERSE];
+
+  const Int shift_1st = TRANSFORM_MATRIX_SHIFT + 1; //1 has been added to shift_1st at the expense of shift_2nd
+  const Int shift_2nd = (TRANSFORM_MATRIX_SHIFT + maxTrDynamicRange - 1) - bitDepth;
+  const TCoeff clipMinimum = -(1 << maxTrDynamicRange);
+  const TCoeff clipMaximum =  (1 << maxTrDynamicRange) - 1;
+  assert(shift_2nd>=0);
+  const Int add_1st = 1<<(shift_1st-1);
+  const Int add_2nd = (shift_2nd>0) ? (1<<(shift_2nd-1)) : 0;
+
+  /* Horizontal transform */
+  for (i=0; i<uiTrSize; i++)
+  {
+    for (j=0; j<uiTrSize; j++)
+    {
+      iSum = 0;
+      for (k=0; k<uiTrSize; k++)
+      {
+        iSum += iT[k*uiTrSize+i]*coeff[k*uiTrSize+j];
+      }
+
+      // Clipping here is not in the standard, but is used to protect the "Pel" data type into which the inverse-transformed samples will be copied
+      tmp[i*uiTrSize+j] = Clip3<TCoeff>(clipMinimum, clipMaximum, (iSum + add_1st)>>shift_1st);
+    }
+  }
+
+  /* Vertical transform */
+  for (i=0; i<uiTrSize; i++)
+  {
+    for (j=0; j<uiTrSize; j++)
+    {
+      iSum = 0;
+      for (k=0; k<uiTrSize; k++)
+      {
+        iSum += iT[k*uiTrSize+j]*tmp[i*uiTrSize+k];
+      }
+
+      block[i*uiStride+j] = Clip3<TCoeff>(std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max(), (iSum + add_2nd)>>shift_2nd);
+    }
+  }
+}
+
+#endif //MATRIX_MULT
+
+
+/** 4x4 forward transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (residual)
+ *  \param dst   output data (transform coefficients)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterfly4(TCoeff *src, TCoeff *dst, Int shift, Int line)
+{
+  Int j;
+  TCoeff E[2],O[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* E and O */
+    E[0] = src[0] + src[3];
+    O[0] = src[0] - src[3];
+    E[1] = src[1] + src[2];
+    O[1] = src[1] - src[2];
+
+    dst[0]      = (g_aiT4[TRANSFORM_FORWARD][0][0]*E[0] + g_aiT4[TRANSFORM_FORWARD][0][1]*E[1] + add)>>shift;
+    dst[2*line] = (g_aiT4[TRANSFORM_FORWARD][2][0]*E[0] + g_aiT4[TRANSFORM_FORWARD][2][1]*E[1] + add)>>shift;
+    dst[line]   = (g_aiT4[TRANSFORM_FORWARD][1][0]*O[0] + g_aiT4[TRANSFORM_FORWARD][1][1]*O[1] + add)>>shift;
+    dst[3*line] = (g_aiT4[TRANSFORM_FORWARD][3][0]*O[0] + g_aiT4[TRANSFORM_FORWARD][3][1]*O[1] + add)>>shift;
+
+    src += 4;
+    dst ++;
+  }
+}
+
+// Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
+// give identical results
+Void fastForwardDst(TCoeff *block, TCoeff *coeff, Int shift)  // input block, output coeff
+{
+  Int i;
+  TCoeff c[4];
+  TCoeff rnd_factor = (shift > 0) ? (1<<(shift-1)) : 0;
+  for (i=0; i<4; i++)
+  {
+    // Intermediate Variables
+    c[0] = block[4*i+0];
+    c[1] = block[4*i+1];
+    c[2] = block[4*i+2];
+    c[3] = block[4*i+3];
+
+    for (Int row = 0; row < 4; row++)
+    {
+      TCoeff result = 0;
+      for (Int column = 0; column < 4; column++)
+        result += c[column] * g_as_DST_MAT_4[TRANSFORM_FORWARD][row][column]; // use the defined matrix, rather than hard-wired numbers
+
+      coeff[(row * 4) + i] = rightShift((result + rnd_factor), shift);
+    }
+  }
+}
+
+Void fastInverseDst(TCoeff *tmp, TCoeff *block, Int shift, const TCoeff outputMinimum, const TCoeff outputMaximum)  // input tmp, output block
+{
+  Int i;
+  TCoeff c[4];
+  TCoeff rnd_factor = (shift > 0) ? (1<<(shift-1)) : 0;
+  for (i=0; i<4; i++)
+  {
+    // Intermediate Variables
+    c[0] = tmp[   i];
+    c[1] = tmp[4 +i];
+    c[2] = tmp[8 +i];
+    c[3] = tmp[12+i];
+
+    for (Int column = 0; column < 4; column++)
+    {
+      TCoeff &result = block[(i * 4) + column];
+
+      result = 0;
+      for (Int row = 0; row < 4; row++)
+        result += c[row] * g_as_DST_MAT_4[TRANSFORM_INVERSE][row][column]; // use the defined matrix, rather than hard-wired numbers
+
+      result = Clip3( outputMinimum, outputMaximum, rightShift((result + rnd_factor), shift));
+    }
+  }
+}
+
+/** 4x4 inverse transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (transform coefficients)
+ *  \param dst   output data (residual)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterflyInverse4(TCoeff *src, TCoeff *dst, Int shift, Int line, const TCoeff outputMinimum, const TCoeff outputMaximum)
+{
+  Int j;
+  TCoeff E[2],O[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+    O[0] = g_aiT4[TRANSFORM_INVERSE][1][0]*src[line] + g_aiT4[TRANSFORM_INVERSE][3][0]*src[3*line];
+    O[1] = g_aiT4[TRANSFORM_INVERSE][1][1]*src[line] + g_aiT4[TRANSFORM_INVERSE][3][1]*src[3*line];
+    E[0] = g_aiT4[TRANSFORM_INVERSE][0][0]*src[0]    + g_aiT4[TRANSFORM_INVERSE][2][0]*src[2*line];
+    E[1] = g_aiT4[TRANSFORM_INVERSE][0][1]*src[0]    + g_aiT4[TRANSFORM_INVERSE][2][1]*src[2*line];
+
+    /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+    dst[0] = Clip3( outputMinimum, outputMaximum, (E[0] + O[0] + add)>>shift );
+    dst[1] = Clip3( outputMinimum, outputMaximum, (E[1] + O[1] + add)>>shift );
+    dst[2] = Clip3( outputMinimum, outputMaximum, (E[1] - O[1] + add)>>shift );
+    dst[3] = Clip3( outputMinimum, outputMaximum, (E[0] - O[0] + add)>>shift );
+
+    src   ++;
+    dst += 4;
+  }
+}
+
+/** 8x8 forward transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (residual)
+ *  \param dst   output data (transform coefficients)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterfly8(TCoeff *src, TCoeff *dst, Int shift, Int line)
+{
+  Int j,k;
+  TCoeff E[4],O[4];
+  TCoeff EE[2],EO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* E and O*/
+    for (k=0;k<4;k++)
+    {
+      E[k] = src[k] + src[7-k];
+      O[k] = src[k] - src[7-k];
+    }
+    /* EE and EO */
+    EE[0] = E[0] + E[3];
+    EO[0] = E[0] - E[3];
+    EE[1] = E[1] + E[2];
+    EO[1] = E[1] - E[2];
+
+    dst[0]      = (g_aiT8[TRANSFORM_FORWARD][0][0]*EE[0] + g_aiT8[TRANSFORM_FORWARD][0][1]*EE[1] + add)>>shift;
+    dst[4*line] = (g_aiT8[TRANSFORM_FORWARD][4][0]*EE[0] + g_aiT8[TRANSFORM_FORWARD][4][1]*EE[1] + add)>>shift;
+    dst[2*line] = (g_aiT8[TRANSFORM_FORWARD][2][0]*EO[0] + g_aiT8[TRANSFORM_FORWARD][2][1]*EO[1] + add)>>shift;
+    dst[6*line] = (g_aiT8[TRANSFORM_FORWARD][6][0]*EO[0] + g_aiT8[TRANSFORM_FORWARD][6][1]*EO[1] + add)>>shift;
+
+    dst[line]   = (g_aiT8[TRANSFORM_FORWARD][1][0]*O[0] + g_aiT8[TRANSFORM_FORWARD][1][1]*O[1] + g_aiT8[TRANSFORM_FORWARD][1][2]*O[2] + g_aiT8[TRANSFORM_FORWARD][1][3]*O[3] + add)>>shift;
+    dst[3*line] = (g_aiT8[TRANSFORM_FORWARD][3][0]*O[0] + g_aiT8[TRANSFORM_FORWARD][3][1]*O[1] + g_aiT8[TRANSFORM_FORWARD][3][2]*O[2] + g_aiT8[TRANSFORM_FORWARD][3][3]*O[3] + add)>>shift;
+    dst[5*line] = (g_aiT8[TRANSFORM_FORWARD][5][0]*O[0] + g_aiT8[TRANSFORM_FORWARD][5][1]*O[1] + g_aiT8[TRANSFORM_FORWARD][5][2]*O[2] + g_aiT8[TRANSFORM_FORWARD][5][3]*O[3] + add)>>shift;
+    dst[7*line] = (g_aiT8[TRANSFORM_FORWARD][7][0]*O[0] + g_aiT8[TRANSFORM_FORWARD][7][1]*O[1] + g_aiT8[TRANSFORM_FORWARD][7][2]*O[2] + g_aiT8[TRANSFORM_FORWARD][7][3]*O[3] + add)>>shift;
+
+    src += 8;
+    dst ++;
+  }
+}
+
+/** 8x8 inverse transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (transform coefficients)
+ *  \param dst   output data (residual)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterflyInverse8(TCoeff *src, TCoeff *dst, Int shift, Int line, const TCoeff outputMinimum, const TCoeff outputMaximum)
+{
+  Int j,k;
+  TCoeff E[4],O[4];
+  TCoeff EE[2],EO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+    for (k=0;k<4;k++)
+    {
+      O[k] = g_aiT8[TRANSFORM_INVERSE][ 1][k]*src[line]   + g_aiT8[TRANSFORM_INVERSE][ 3][k]*src[3*line] +
+             g_aiT8[TRANSFORM_INVERSE][ 5][k]*src[5*line] + g_aiT8[TRANSFORM_INVERSE][ 7][k]*src[7*line];
+    }
+
+    EO[0] = g_aiT8[TRANSFORM_INVERSE][2][0]*src[ 2*line ] + g_aiT8[TRANSFORM_INVERSE][6][0]*src[ 6*line ];
+    EO[1] = g_aiT8[TRANSFORM_INVERSE][2][1]*src[ 2*line ] + g_aiT8[TRANSFORM_INVERSE][6][1]*src[ 6*line ];
+    EE[0] = g_aiT8[TRANSFORM_INVERSE][0][0]*src[ 0      ] + g_aiT8[TRANSFORM_INVERSE][4][0]*src[ 4*line ];
+    EE[1] = g_aiT8[TRANSFORM_INVERSE][0][1]*src[ 0      ] + g_aiT8[TRANSFORM_INVERSE][4][1]*src[ 4*line ];
+
+    /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+    E[0] = EE[0] + EO[0];
+    E[3] = EE[0] - EO[0];
+    E[1] = EE[1] + EO[1];
+    E[2] = EE[1] - EO[1];
+    for (k=0;k<4;k++)
+    {
+      dst[ k   ] = Clip3( outputMinimum, outputMaximum, (E[k] + O[k] + add)>>shift );
+      dst[ k+4 ] = Clip3( outputMinimum, outputMaximum, (E[3-k] - O[3-k] + add)>>shift );
+    }
+    src ++;
+    dst += 8;
+  }
+}
+
+/** 16x16 forward transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (residual)
+ *  \param dst   output data (transform coefficients)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterfly16(TCoeff *src, TCoeff *dst, Int shift, Int line)
+{
+  Int j,k;
+  TCoeff E[8],O[8];
+  TCoeff EE[4],EO[4];
+  TCoeff EEE[2],EEO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* E and O*/
+    for (k=0;k<8;k++)
+    {
+      E[k] = src[k] + src[15-k];
+      O[k] = src[k] - src[15-k];
+    }
+    /* EE and EO */
+    for (k=0;k<4;k++)
+    {
+      EE[k] = E[k] + E[7-k];
+      EO[k] = E[k] - E[7-k];
+    }
+    /* EEE and EEO */
+    EEE[0] = EE[0] + EE[3];
+    EEO[0] = EE[0] - EE[3];
+    EEE[1] = EE[1] + EE[2];
+    EEO[1] = EE[1] - EE[2];
+
+    dst[ 0      ] = (g_aiT16[TRANSFORM_FORWARD][ 0][0]*EEE[0] + g_aiT16[TRANSFORM_FORWARD][ 0][1]*EEE[1] + add)>>shift;
+    dst[ 8*line ] = (g_aiT16[TRANSFORM_FORWARD][ 8][0]*EEE[0] + g_aiT16[TRANSFORM_FORWARD][ 8][1]*EEE[1] + add)>>shift;
+    dst[ 4*line ] = (g_aiT16[TRANSFORM_FORWARD][ 4][0]*EEO[0] + g_aiT16[TRANSFORM_FORWARD][ 4][1]*EEO[1] + add)>>shift;
+    dst[ 12*line] = (g_aiT16[TRANSFORM_FORWARD][12][0]*EEO[0] + g_aiT16[TRANSFORM_FORWARD][12][1]*EEO[1] + add)>>shift;
+
+    for (k=2;k<16;k+=4)
+    {
+      dst[ k*line ] = (g_aiT16[TRANSFORM_FORWARD][k][0]*EO[0] + g_aiT16[TRANSFORM_FORWARD][k][1]*EO[1] +
+                       g_aiT16[TRANSFORM_FORWARD][k][2]*EO[2] + g_aiT16[TRANSFORM_FORWARD][k][3]*EO[3] + add)>>shift;
+    }
+
+    for (k=1;k<16;k+=2)
+    {
+      dst[ k*line ] = (g_aiT16[TRANSFORM_FORWARD][k][0]*O[0] + g_aiT16[TRANSFORM_FORWARD][k][1]*O[1] +
+                       g_aiT16[TRANSFORM_FORWARD][k][2]*O[2] + g_aiT16[TRANSFORM_FORWARD][k][3]*O[3] +
+                       g_aiT16[TRANSFORM_FORWARD][k][4]*O[4] + g_aiT16[TRANSFORM_FORWARD][k][5]*O[5] +
+                       g_aiT16[TRANSFORM_FORWARD][k][6]*O[6] + g_aiT16[TRANSFORM_FORWARD][k][7]*O[7] + add)>>shift;
+    }
+
+    src += 16;
+    dst ++;
+
+  }
+}
+
+/** 16x16 inverse transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (transform coefficients)
+ *  \param dst   output data (residual)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterflyInverse16(TCoeff *src, TCoeff *dst, Int shift, Int line, const TCoeff outputMinimum, const TCoeff outputMaximum)
+{
+  Int j,k;
+  TCoeff E[8],O[8];
+  TCoeff EE[4],EO[4];
+  TCoeff EEE[2],EEO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+    for (k=0;k<8;k++)
+    {
+      O[k] = g_aiT16[TRANSFORM_INVERSE][ 1][k]*src[ line]   + g_aiT16[TRANSFORM_INVERSE][ 3][k]*src[ 3*line] +
+             g_aiT16[TRANSFORM_INVERSE][ 5][k]*src[ 5*line] + g_aiT16[TRANSFORM_INVERSE][ 7][k]*src[ 7*line] +
+             g_aiT16[TRANSFORM_INVERSE][ 9][k]*src[ 9*line] + g_aiT16[TRANSFORM_INVERSE][11][k]*src[11*line] +
+             g_aiT16[TRANSFORM_INVERSE][13][k]*src[13*line] + g_aiT16[TRANSFORM_INVERSE][15][k]*src[15*line];
+    }
+    for (k=0;k<4;k++)
+    {
+      EO[k] = g_aiT16[TRANSFORM_INVERSE][ 2][k]*src[ 2*line] + g_aiT16[TRANSFORM_INVERSE][ 6][k]*src[ 6*line] +
+              g_aiT16[TRANSFORM_INVERSE][10][k]*src[10*line] + g_aiT16[TRANSFORM_INVERSE][14][k]*src[14*line];
+    }
+    EEO[0] = g_aiT16[TRANSFORM_INVERSE][4][0]*src[ 4*line ] + g_aiT16[TRANSFORM_INVERSE][12][0]*src[ 12*line ];
+    EEE[0] = g_aiT16[TRANSFORM_INVERSE][0][0]*src[ 0      ] + g_aiT16[TRANSFORM_INVERSE][ 8][0]*src[ 8*line  ];
+    EEO[1] = g_aiT16[TRANSFORM_INVERSE][4][1]*src[ 4*line ] + g_aiT16[TRANSFORM_INVERSE][12][1]*src[ 12*line ];
+    EEE[1] = g_aiT16[TRANSFORM_INVERSE][0][1]*src[ 0      ] + g_aiT16[TRANSFORM_INVERSE][ 8][1]*src[ 8*line  ];
+
+    /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+    for (k=0;k<2;k++)
+    {
+      EE[k] = EEE[k] + EEO[k];
+      EE[k+2] = EEE[1-k] - EEO[1-k];
+    }
+    for (k=0;k<4;k++)
+    {
+      E[k] = EE[k] + EO[k];
+      E[k+4] = EE[3-k] - EO[3-k];
+    }
+    for (k=0;k<8;k++)
+    {
+      dst[k]   = Clip3( outputMinimum, outputMaximum, (E[k] + O[k] + add)>>shift );
+      dst[k+8] = Clip3( outputMinimum, outputMaximum, (E[7-k] - O[7-k] + add)>>shift );
+    }
+    src ++;
+    dst += 16;
+  }
+}
+
+/** 32x32 forward transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (residual)
+ *  \param dst   output data (transform coefficients)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterfly32(TCoeff *src, TCoeff *dst, Int shift, Int line)
+{
+  Int j,k;
+  TCoeff E[16],O[16];
+  TCoeff EE[8],EO[8];
+  TCoeff EEE[4],EEO[4];
+  TCoeff EEEE[2],EEEO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* E and O*/
+    for (k=0;k<16;k++)
+    {
+      E[k] = src[k] + src[31-k];
+      O[k] = src[k] - src[31-k];
+    }
+    /* EE and EO */
+    for (k=0;k<8;k++)
+    {
+      EE[k] = E[k] + E[15-k];
+      EO[k] = E[k] - E[15-k];
+    }
+    /* EEE and EEO */
+    for (k=0;k<4;k++)
+    {
+      EEE[k] = EE[k] + EE[7-k];
+      EEO[k] = EE[k] - EE[7-k];
+    }
+    /* EEEE and EEEO */
+    EEEE[0] = EEE[0] + EEE[3];
+    EEEO[0] = EEE[0] - EEE[3];
+    EEEE[1] = EEE[1] + EEE[2];
+    EEEO[1] = EEE[1] - EEE[2];
+
+    dst[ 0       ] = (g_aiT32[TRANSFORM_FORWARD][ 0][0]*EEEE[0] + g_aiT32[TRANSFORM_FORWARD][ 0][1]*EEEE[1] + add)>>shift;
+    dst[ 16*line ] = (g_aiT32[TRANSFORM_FORWARD][16][0]*EEEE[0] + g_aiT32[TRANSFORM_FORWARD][16][1]*EEEE[1] + add)>>shift;
+    dst[ 8*line  ] = (g_aiT32[TRANSFORM_FORWARD][ 8][0]*EEEO[0] + g_aiT32[TRANSFORM_FORWARD][ 8][1]*EEEO[1] + add)>>shift;
+    dst[ 24*line ] = (g_aiT32[TRANSFORM_FORWARD][24][0]*EEEO[0] + g_aiT32[TRANSFORM_FORWARD][24][1]*EEEO[1] + add)>>shift;
+    for (k=4;k<32;k+=8)
+    {
+      dst[ k*line ] = (g_aiT32[TRANSFORM_FORWARD][k][0]*EEO[0] + g_aiT32[TRANSFORM_FORWARD][k][1]*EEO[1] +
+                       g_aiT32[TRANSFORM_FORWARD][k][2]*EEO[2] + g_aiT32[TRANSFORM_FORWARD][k][3]*EEO[3] + add)>>shift;
+    }
+    for (k=2;k<32;k+=4)
+    {
+      dst[ k*line ] = (g_aiT32[TRANSFORM_FORWARD][k][0]*EO[0] + g_aiT32[TRANSFORM_FORWARD][k][1]*EO[1] +
+                       g_aiT32[TRANSFORM_FORWARD][k][2]*EO[2] + g_aiT32[TRANSFORM_FORWARD][k][3]*EO[3] +
+                       g_aiT32[TRANSFORM_FORWARD][k][4]*EO[4] + g_aiT32[TRANSFORM_FORWARD][k][5]*EO[5] +
+                       g_aiT32[TRANSFORM_FORWARD][k][6]*EO[6] + g_aiT32[TRANSFORM_FORWARD][k][7]*EO[7] + add)>>shift;
+    }
+    for (k=1;k<32;k+=2)
+    {
+      dst[ k*line ] = (g_aiT32[TRANSFORM_FORWARD][k][ 0]*O[ 0] + g_aiT32[TRANSFORM_FORWARD][k][ 1]*O[ 1] +
+                       g_aiT32[TRANSFORM_FORWARD][k][ 2]*O[ 2] + g_aiT32[TRANSFORM_FORWARD][k][ 3]*O[ 3] +
+                       g_aiT32[TRANSFORM_FORWARD][k][ 4]*O[ 4] + g_aiT32[TRANSFORM_FORWARD][k][ 5]*O[ 5] +
+                       g_aiT32[TRANSFORM_FORWARD][k][ 6]*O[ 6] + g_aiT32[TRANSFORM_FORWARD][k][ 7]*O[ 7] +
+                       g_aiT32[TRANSFORM_FORWARD][k][ 8]*O[ 8] + g_aiT32[TRANSFORM_FORWARD][k][ 9]*O[ 9] +
+                       g_aiT32[TRANSFORM_FORWARD][k][10]*O[10] + g_aiT32[TRANSFORM_FORWARD][k][11]*O[11] +
+                       g_aiT32[TRANSFORM_FORWARD][k][12]*O[12] + g_aiT32[TRANSFORM_FORWARD][k][13]*O[13] +
+                       g_aiT32[TRANSFORM_FORWARD][k][14]*O[14] + g_aiT32[TRANSFORM_FORWARD][k][15]*O[15] + add)>>shift;
+    }
+
+    src += 32;
+    dst ++;
+  }
+}
+
+/** 32x32 inverse transform implemented using partial butterfly structure (1D)
+ *  \param src   input data (transform coefficients)
+ *  \param dst   output data (residual)
+ *  \param shift specifies right shift after 1D transform
+ */
+Void partialButterflyInverse32(TCoeff *src, TCoeff *dst, Int shift, Int line, const TCoeff outputMinimum, const TCoeff outputMaximum)
+{
+  Int j,k;
+  TCoeff E[16],O[16];
+  TCoeff EE[8],EO[8];
+  TCoeff EEE[4],EEO[4];
+  TCoeff EEEE[2],EEEO[2];
+  TCoeff add = (shift > 0) ? (1<<(shift-1)) : 0;
+
+  for (j=0; j<line; j++)
+  {
+    /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+    for (k=0;k<16;k++)
+    {
+      O[k] = g_aiT32[TRANSFORM_INVERSE][ 1][k]*src[ line    ] + g_aiT32[TRANSFORM_INVERSE][ 3][k]*src[ 3*line  ] +
+             g_aiT32[TRANSFORM_INVERSE][ 5][k]*src[ 5*line  ] + g_aiT32[TRANSFORM_INVERSE][ 7][k]*src[ 7*line  ] +
+             g_aiT32[TRANSFORM_INVERSE][ 9][k]*src[ 9*line  ] + g_aiT32[TRANSFORM_INVERSE][11][k]*src[ 11*line ] +
+             g_aiT32[TRANSFORM_INVERSE][13][k]*src[ 13*line ] + g_aiT32[TRANSFORM_INVERSE][15][k]*src[ 15*line ] +
+             g_aiT32[TRANSFORM_INVERSE][17][k]*src[ 17*line ] + g_aiT32[TRANSFORM_INVERSE][19][k]*src[ 19*line ] +
+             g_aiT32[TRANSFORM_INVERSE][21][k]*src[ 21*line ] + g_aiT32[TRANSFORM_INVERSE][23][k]*src[ 23*line ] +
+             g_aiT32[TRANSFORM_INVERSE][25][k]*src[ 25*line ] + g_aiT32[TRANSFORM_INVERSE][27][k]*src[ 27*line ] +
+             g_aiT32[TRANSFORM_INVERSE][29][k]*src[ 29*line ] + g_aiT32[TRANSFORM_INVERSE][31][k]*src[ 31*line ];
+    }
+    for (k=0;k<8;k++)
+    {
+      EO[k] = g_aiT32[TRANSFORM_INVERSE][ 2][k]*src[ 2*line  ] + g_aiT32[TRANSFORM_INVERSE][ 6][k]*src[ 6*line  ] +
+              g_aiT32[TRANSFORM_INVERSE][10][k]*src[ 10*line ] + g_aiT32[TRANSFORM_INVERSE][14][k]*src[ 14*line ] +
+              g_aiT32[TRANSFORM_INVERSE][18][k]*src[ 18*line ] + g_aiT32[TRANSFORM_INVERSE][22][k]*src[ 22*line ] +
+              g_aiT32[TRANSFORM_INVERSE][26][k]*src[ 26*line ] + g_aiT32[TRANSFORM_INVERSE][30][k]*src[ 30*line ];
+    }
+    for (k=0;k<4;k++)
+    {
+      EEO[k] = g_aiT32[TRANSFORM_INVERSE][ 4][k]*src[  4*line ] + g_aiT32[TRANSFORM_INVERSE][12][k]*src[ 12*line ] +
+               g_aiT32[TRANSFORM_INVERSE][20][k]*src[ 20*line ] + g_aiT32[TRANSFORM_INVERSE][28][k]*src[ 28*line ];
+    }
+    EEEO[0] = g_aiT32[TRANSFORM_INVERSE][8][0]*src[ 8*line ] + g_aiT32[TRANSFORM_INVERSE][24][0]*src[ 24*line ];
+    EEEO[1] = g_aiT32[TRANSFORM_INVERSE][8][1]*src[ 8*line ] + g_aiT32[TRANSFORM_INVERSE][24][1]*src[ 24*line ];
+    EEEE[0] = g_aiT32[TRANSFORM_INVERSE][0][0]*src[ 0      ] + g_aiT32[TRANSFORM_INVERSE][16][0]*src[ 16*line ];
+    EEEE[1] = g_aiT32[TRANSFORM_INVERSE][0][1]*src[ 0      ] + g_aiT32[TRANSFORM_INVERSE][16][1]*src[ 16*line ];
+
+    /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+    EEE[0] = EEEE[0] + EEEO[0];
+    EEE[3] = EEEE[0] - EEEO[0];
+    EEE[1] = EEEE[1] + EEEO[1];
+    EEE[2] = EEEE[1] - EEEO[1];
+    for (k=0;k<4;k++)
+    {
+      EE[k] = EEE[k] + EEO[k];
+      EE[k+4] = EEE[3-k] - EEO[3-k];
+    }
+    for (k=0;k<8;k++)
+    {
+      E[k] = EE[k] + EO[k];
+      E[k+8] = EE[7-k] - EO[7-k];
+    }
+    for (k=0;k<16;k++)
+    {
+      dst[k]    = Clip3( outputMinimum, outputMaximum, (E[k] + O[k] + add)>>shift );
+      dst[k+16] = Clip3( outputMinimum, outputMaximum, (E[15-k] - O[15-k] + add)>>shift );
+    }
+    src ++;
+    dst += 32;
+  }
+}
+
+/** MxN forward transform (2D)
+*  \param block input data (residual)
+*  \param coeff output data (transform coefficients)
+*  \param iWidth input data (width of transform)
+*  \param iHeight input data (height of transform)
+*/
+Void xTrMxN(Int bitDepth, TCoeff *block, TCoeff *coeff, Int iWidth, Int iHeight, Bool useDST, const Int maxTrDynamicRange)
+{
+  static const Int TRANSFORM_MATRIX_SHIFT = g_transformMatrixShift[TRANSFORM_FORWARD];
+
+  const Int shift_1st = ((g_aucConvertToBit[iWidth] + 2) +  bitDepth + TRANSFORM_MATRIX_SHIFT) - maxTrDynamicRange;
+  const Int shift_2nd = (g_aucConvertToBit[iHeight] + 2) + TRANSFORM_MATRIX_SHIFT;
+
+  assert(shift_1st >= 0);
+  assert(shift_2nd >= 0);
+
+  TCoeff tmp[ MAX_TU_SIZE * MAX_TU_SIZE ];
+
+  switch (iWidth)
+  {
+    case 4:
+      {
+        if ((iHeight == 4) && useDST)    // Check for DCT or DST
+        {
+           fastForwardDst( block, tmp, shift_1st );
+        }
+        else partialButterfly4 ( block, tmp, shift_1st, iHeight );
+      }
+      break;
+
+    case 8:     partialButterfly8 ( block, tmp, shift_1st, iHeight );  break;
+    case 16:    partialButterfly16( block, tmp, shift_1st, iHeight );  break;
+    case 32:    partialButterfly32( block, tmp, shift_1st, iHeight );  break;
+    default:
+      assert(0); exit (1); break;
+  }
+
+  switch (iHeight)
+  {
+    case 4:
+      {
+        if ((iWidth == 4) && useDST)    // Check for DCT or DST
+        {
+          fastForwardDst( tmp, coeff, shift_2nd );
+        }
+        else partialButterfly4 ( tmp, coeff, shift_2nd, iWidth );
+      }
+      break;
+
+    case 8:     partialButterfly8 ( tmp, coeff, shift_2nd, iWidth );    break;
+    case 16:    partialButterfly16( tmp, coeff, shift_2nd, iWidth );    break;
+    case 32:    partialButterfly32( tmp, coeff, shift_2nd, iWidth );    break;
+    default:
+      assert(0); exit (1); break;
+  }
+}
+
+
+/** MxN inverse transform (2D)
+*  \param coeff input data (transform coefficients)
+*  \param block output data (residual)
+*  \param iWidth input data (width of transform)
+*  \param iHeight input data (height of transform)
+*/
+Void xITrMxN(Int bitDepth, TCoeff *coeff, TCoeff *block, Int iWidth, Int iHeight, Bool useDST, const Int maxTrDynamicRange)
+{
+  static const Int TRANSFORM_MATRIX_SHIFT = g_transformMatrixShift[TRANSFORM_INVERSE];
+
+  Int shift_1st = TRANSFORM_MATRIX_SHIFT + 1; //1 has been added to shift_1st at the expense of shift_2nd
+  Int shift_2nd = (TRANSFORM_MATRIX_SHIFT + maxTrDynamicRange - 1) - bitDepth;
+  const TCoeff clipMinimum = -(1 << maxTrDynamicRange);
+  const TCoeff clipMaximum =  (1 << maxTrDynamicRange) - 1;
+
+  assert(shift_1st >= 0);
+  assert(shift_2nd >= 0);
+
+  TCoeff tmp[MAX_TU_SIZE * MAX_TU_SIZE];
+
+  switch (iHeight)
+  {
+    case 4:
+      {
+        if ((iWidth == 4) && useDST)    // Check for DCT or DST
+        {
+          fastInverseDst( coeff, tmp, shift_1st, clipMinimum, clipMaximum);
+        }
+        else partialButterflyInverse4 ( coeff, tmp, shift_1st, iWidth, clipMinimum, clipMaximum);
+      }
+      break;
+
+    case  8: partialButterflyInverse8 ( coeff, tmp, shift_1st, iWidth, clipMinimum, clipMaximum); break;
+    case 16: partialButterflyInverse16( coeff, tmp, shift_1st, iWidth, clipMinimum, clipMaximum); break;
+    case 32: partialButterflyInverse32( coeff, tmp, shift_1st, iWidth, clipMinimum, clipMaximum); break;
+
+    default:
+      assert(0); exit (1); break;
+  }
+
+  switch (iWidth)
+  {
+    // Clipping here is not in the standard, but is used to protect the "Pel" data type into which the inverse-transformed samples will be copied
+    case 4:
+      {
+        if ((iHeight == 4) && useDST)    // Check for DCT or DST
+        {
+          fastInverseDst( tmp, block, shift_2nd, std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max() );
+        }
+        else partialButterflyInverse4 ( tmp, block, shift_2nd, iHeight, std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max());
+      }
+      break;
+
+    case  8: partialButterflyInverse8 ( tmp, block, shift_2nd, iHeight, std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max()); break;
+    case 16: partialButterflyInverse16( tmp, block, shift_2nd, iHeight, std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max()); break;
+    case 32: partialButterflyInverse32( tmp, block, shift_2nd, iHeight, std::numeric_limits<Pel>::min(), std::numeric_limits<Pel>::max()); break;
+
+    default:
+      assert(0); exit (1); break;
+  }
+}
+
+
+// To minimize the distortion only. No rate is considered.
+Void TComTrQuant::signBitHidingHDQ( const ComponentID compID, TCoeff* pQCoef, TCoeff* pCoef, TCoeff* deltaU, const TUEntropyCodingParameters &codingParameters )
+{
+  const UInt width     = codingParameters.widthInGroups  << MLS_CG_LOG2_WIDTH;
+  const UInt height    = codingParameters.heightInGroups << MLS_CG_LOG2_HEIGHT;
+  const UInt groupSize = 1 << MLS_CG_SIZE;
+
+  const TCoeff entropyCodingMinimum = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+  const TCoeff entropyCodingMaximum =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+
+  Int lastCG = -1;
+  Int absSum = 0 ;
+  Int n ;
+
+  for( Int subSet = (width*height-1) >> MLS_CG_SIZE; subSet >= 0; subSet-- )
+  {
+    Int  subPos = subSet << MLS_CG_SIZE;
+    Int  firstNZPosInCG=groupSize , lastNZPosInCG=-1 ;
+    absSum = 0 ;
+
+    for(n = groupSize-1; n >= 0; --n )
+    {
+      if( pQCoef[ codingParameters.scan[ n + subPos ]] )
+      {
+        lastNZPosInCG = n;
+        break;
+      }
+    }
+
+    for(n = 0; n <groupSize; n++ )
+    {
+      if( pQCoef[ codingParameters.scan[ n + subPos ]] )
+      {
+        firstNZPosInCG = n;
+        break;
+      }
+    }
+
+    for(n = firstNZPosInCG; n <=lastNZPosInCG; n++ )
+    {
+      absSum += Int(pQCoef[ codingParameters.scan[ n + subPos ]]);
+    }
+
+    if(lastNZPosInCG>=0 && lastCG==-1)
+    {
+      lastCG = 1 ;
+    }
+
+    if( lastNZPosInCG-firstNZPosInCG>=SBH_THRESHOLD )
+    {
+      UInt signbit = (pQCoef[codingParameters.scan[subPos+firstNZPosInCG]]>0?0:1) ;
+      if( signbit!=(absSum&0x1) )  //compare signbit with sum_parity
+      {
+        TCoeff curCost    = std::numeric_limits<TCoeff>::max();
+        TCoeff minCostInc = std::numeric_limits<TCoeff>::max();
+        Int minPos =-1, finalChange=0, curChange=0;
+
+        for( n = (lastCG==1?lastNZPosInCG:groupSize-1) ; n >= 0; --n )
+        {
+          UInt blkPos   = codingParameters.scan[ n+subPos ];
+          if(pQCoef[ blkPos ] != 0 )
+          {
+            if(deltaU[blkPos]>0)
+            {
+              curCost = - deltaU[blkPos];
+              curChange=1 ;
+            }
+            else
+            {
+              //curChange =-1;
+              if(n==firstNZPosInCG && abs(pQCoef[blkPos])==1)
+              {
+                curCost = std::numeric_limits<TCoeff>::max();
+              }
+              else
+              {
+                curCost = deltaU[blkPos];
+                curChange =-1;
+              }
+            }
+          }
+          else
+          {
+            if(n<firstNZPosInCG)
+            {
+              UInt thisSignBit = (pCoef[blkPos]>=0?0:1);
+              if(thisSignBit != signbit )
+              {
+                curCost = std::numeric_limits<TCoeff>::max();
+              }
+              else
+              {
+                curCost = - (deltaU[blkPos])  ;
+                curChange = 1 ;
+              }
+            }
+            else
+            {
+              curCost = - (deltaU[blkPos])  ;
+              curChange = 1 ;
+            }
+          }
+
+          if( curCost<minCostInc)
+          {
+            minCostInc = curCost ;
+            finalChange = curChange ;
+            minPos = blkPos ;
+          }
+        } //CG loop
+
+        if(pQCoef[minPos] == entropyCodingMaximum || pQCoef[minPos] == entropyCodingMinimum)
+        {
+          finalChange = -1;
+        }
+
+        if(pCoef[minPos]>=0)
+        {
+          pQCoef[minPos] += finalChange ;
+        }
+        else
+        {
+          pQCoef[minPos] -= finalChange ;
+        }
+      } // Hide
+    }
+    if(lastCG==1)
+    {
+      lastCG=0 ;
+    }
+  } // TU loop
+
+  return;
+}
+
+
+Void TComTrQuant::xQuant(       TComTU       &rTu,
+                                TCoeff      * pSrc,
+                                TCoeff      * pDes,
+#if ADAPTIVE_QP_SELECTION
+                                TCoeff      *pArlDes,
+#endif
+                                TCoeff       &uiAbsSum,
+                          const ComponentID   compID,
+                          const QpParam      &cQP )
+{
+  const TComRectangle &rect = rTu.getRect(compID);
+  const UInt uiWidth        = rect.width;
+  const UInt uiHeight       = rect.height;
+  TComDataCU* pcCU          = rTu.getCU();
+  const UInt uiAbsPartIdx   = rTu.GetAbsPartIdxTU();
+
+  TCoeff* piCoef    = pSrc;
+  TCoeff* piQCoef   = pDes;
+#if ADAPTIVE_QP_SELECTION
+  TCoeff* piArlCCoef = pArlDes;
+#endif
+
+  const Bool useTransformSkip = pcCU->getTransformSkip(uiAbsPartIdx, compID);
+
+  Bool useRDOQ = useTransformSkip ? m_useRDOQTS : m_useRDOQ;
+  if ( useRDOQ && (isLuma(compID) || RDOQ_CHROMA) )
+  {
+#if ADAPTIVE_QP_SELECTION
+    xRateDistOptQuant( rTu, piCoef, pDes, pArlDes, uiAbsSum, compID, cQP );
+#else
+    xRateDistOptQuant( rTu, piCoef, pDes, uiAbsSum, compID, cQP );
+#endif
+  }
+  else
+  {
+    TUEntropyCodingParameters codingParameters;
+    getTUEntropyCodingParameters(codingParameters, rTu, compID);
+
+    const TCoeff entropyCodingMinimum = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+    const TCoeff entropyCodingMaximum =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+
+    TCoeff deltaU[MAX_TU_SIZE * MAX_TU_SIZE];
+
+    const UInt uiLog2TrSize = rTu.GetEquivalentLog2TrSize(compID);
+
+    Int scalingListType = getScalingListType(pcCU->getPredictionMode(uiAbsPartIdx), compID);
+    assert(scalingListType < SCALING_LIST_NUM);
+    Int *piQuantCoeff = getQuantCoeff(scalingListType, cQP.rem, uiLog2TrSize-2);
+
+    const Bool enableScalingLists             = getUseScalingList(uiWidth, uiHeight, (pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0));
+    const Int  defaultQuantisationCoefficient = g_quantScales[cQP.rem];
+
+    /* for 422 chroma blocks, the effective scaling applied during transformation is not a power of 2, hence it cannot be
+     * implemented as a bit-shift (the quantised result will be sqrt(2) * larger than required). Alternatively, adjust the
+     * uiLog2TrSize applied in iTransformShift, such that the result is 1/sqrt(2) the required result (i.e. smaller)
+     * Then a QP+3 (sqrt(2)) or QP-3 (1/sqrt(2)) method could be used to get the required result
+     */
+
+    // Represents scaling through forward transform
+    Int iTransformShift = getTransformShift(toChannelType(compID), uiLog2TrSize);
+    if (useTransformSkip && pcCU->getSlice()->getSPS()->getUseExtendedPrecision())
+    {
+      iTransformShift = std::max<Int>(0, iTransformShift);
+    }
+
+    const Int iQBits = QUANT_SHIFT + cQP.per + iTransformShift;
+    // QBits will be OK for any internal bit depth as the reduction in transform shift is balanced by an increase in Qp_per due to QpBDOffset
+
+#if ADAPTIVE_QP_SELECTION
+    Int iQBitsC = MAX_INT;
+    Int iAddC   = MAX_INT;
+
+    if (m_bUseAdaptQpSelect)
+    {
+      iQBitsC = iQBits - ARL_C_PRECISION;
+      iAddC   = 1 << (iQBitsC-1);
+    }
+#endif
+
+    const Int iAdd   = (pcCU->getSlice()->getSliceType()==I_SLICE ? 171 : 85) << (iQBits-9);
+    const Int qBits8 = iQBits - 8;
+
+    for( Int uiBlockPos = 0; uiBlockPos < uiWidth*uiHeight; uiBlockPos++ )
+    {
+      const TCoeff iLevel   = piCoef[uiBlockPos];
+      const TCoeff iSign    = (iLevel < 0 ? -1: 1);
+
+      const Int64  tmpLevel = (Int64)abs(iLevel) * (enableScalingLists ? piQuantCoeff[uiBlockPos] : defaultQuantisationCoefficient);
+
+#if ADAPTIVE_QP_SELECTION
+      if( m_bUseAdaptQpSelect )
+      {
+        piArlCCoef[uiBlockPos] = (TCoeff)((tmpLevel + iAddC ) >> iQBitsC);
+      }
+#endif
+
+      const TCoeff quantisedMagnitude = TCoeff((tmpLevel + iAdd ) >> iQBits);
+      deltaU[uiBlockPos] = (TCoeff)((tmpLevel - (quantisedMagnitude<<iQBits) )>> qBits8);
+
+      uiAbsSum += quantisedMagnitude;
+      const TCoeff quantisedCoefficient = quantisedMagnitude * iSign;
+
+      piQCoef[uiBlockPos] = Clip3<TCoeff>( entropyCodingMinimum, entropyCodingMaximum, quantisedCoefficient );
+    } // for n
+
+    if( pcCU->getSlice()->getPPS()->getSignHideFlag() )
+    {
+      if(uiAbsSum >= 2) //this prevents TUs with only one coefficient of value 1 from being tested
+      {
+        signBitHidingHDQ( compID, piQCoef, piCoef, deltaU, codingParameters ) ;
+      }
+    }
+  } //if RDOQ
+  //return;
+}
+
+Void TComTrQuant::xDeQuant(       TComTU        &rTu,
+                            const TCoeff       * pSrc,
+                                  TCoeff       * pDes,
+                            const ComponentID    compID,
+                            const QpParam       &cQP )
+{
+  assert(compID<MAX_NUM_COMPONENT);
+
+        TComDataCU          *pcCU               = rTu.getCU();
+  const UInt                 uiAbsPartIdx       = rTu.GetAbsPartIdxTU();
+  const TComRectangle       &rect               = rTu.getRect(compID);
+  const UInt                 uiWidth            = rect.width;
+  const UInt                 uiHeight           = rect.height;
+  const TCoeff        *const piQCoef            = pSrc;
+        TCoeff        *const piCoef             = pDes;
+  const UInt                 uiLog2TrSize       = rTu.GetEquivalentLog2TrSize(compID);
+  const UInt                 numSamplesInBlock  = uiWidth*uiHeight;
+  const TCoeff               transformMinimum   = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+  const TCoeff               transformMaximum   =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+  const Bool                 enableScalingLists = getUseScalingList(uiWidth, uiHeight, (pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0));
+  const Int                  scalingListType    = getScalingListType(pcCU->getPredictionMode(uiAbsPartIdx), compID);
+
+  assert (scalingListType < SCALING_LIST_NUM);
+  assert ( uiWidth <= m_uiMaxTrSize );
+
+  // Represents scaling through forward transform
+  const Bool bClipTransformShiftTo0 = (pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0) && pcCU->getSlice()->getSPS()->getUseExtendedPrecision();
+  const Int  originalTransformShift = getTransformShift(toChannelType(compID), uiLog2TrSize);
+  const Int  iTransformShift        = bClipTransformShiftTo0 ? std::max<Int>(0, originalTransformShift) : originalTransformShift;
+
+  const Int QP_per = cQP.per;
+  const Int QP_rem = cQP.rem;
+
+  const Int rightShift = (IQUANT_SHIFT - (iTransformShift + QP_per)) + (enableScalingLists ? LOG2_SCALING_LIST_NEUTRAL_VALUE : 0);
+
+  if(enableScalingLists)
+  {
+    //from the dequantisation equation:
+    //iCoeffQ                         = ((Intermediate_Int(clipQCoef) * piDequantCoef[deQuantIdx]) + iAdd ) >> rightShift
+    //(sizeof(Intermediate_Int) * 8)  =              inputBitDepth    +    dequantCoefBits                   - rightShift
+    const UInt             dequantCoefBits     = 1 + IQUANT_SHIFT + SCALING_LIST_BITS;
+    const UInt             targetInputBitDepth = std::min<UInt>((g_maxTrDynamicRange[toChannelType(compID)] + 1), (((sizeof(Intermediate_Int) * 8) + rightShift) - dequantCoefBits));
+
+    const Intermediate_Int inputMinimum        = -(1 << (targetInputBitDepth - 1));
+    const Intermediate_Int inputMaximum        =  (1 << (targetInputBitDepth - 1)) - 1;
+
+    Int *piDequantCoef = getDequantCoeff(scalingListType,QP_rem,uiLog2TrSize-2);
+
+    if(rightShift > 0)
+    {
+      const Intermediate_Int iAdd = 1 << (rightShift - 1);
+
+      for( Int n = 0; n < numSamplesInBlock; n++ )
+      {
+        const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, piQCoef[n]));
+        const Intermediate_Int iCoeffQ   = ((Intermediate_Int(clipQCoef) * piDequantCoef[n]) + iAdd ) >> rightShift;
+
+        piCoef[n] = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+      }
+    }
+    else
+    {
+      const Int leftShift = -rightShift;
+
+      for( Int n = 0; n < numSamplesInBlock; n++ )
+      {
+        const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, piQCoef[n]));
+        const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * piDequantCoef[n]) << leftShift;
+
+        piCoef[n] = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+      }
+    }
+  }
+  else
+  {
+    const Int scale     =  g_invQuantScales[QP_rem];
+    const Int scaleBits =     (IQUANT_SHIFT + 1)   ;
+
+    //from the dequantisation equation:
+    //iCoeffQ                         = Intermediate_Int((Int64(clipQCoef) * scale + iAdd) >> rightShift);
+    //(sizeof(Intermediate_Int) * 8)  =                    inputBitDepth   + scaleBits      - rightShift
+    const UInt             targetInputBitDepth = std::min<UInt>((g_maxTrDynamicRange[toChannelType(compID)] + 1), (((sizeof(Intermediate_Int) * 8) + rightShift) - scaleBits));
+    const Intermediate_Int inputMinimum        = -(1 << (targetInputBitDepth - 1));
+    const Intermediate_Int inputMaximum        =  (1 << (targetInputBitDepth - 1)) - 1;
+
+    if (rightShift > 0)
+    {
+      const Intermediate_Int iAdd = 1 << (rightShift - 1);
+
+      for( Int n = 0; n < numSamplesInBlock; n++ )
+      {
+        const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, piQCoef[n]));
+        const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * scale + iAdd) >> rightShift;
+
+        piCoef[n] = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+      }
+    }
+    else
+    {
+      const Int leftShift = -rightShift;
+
+      for( Int n = 0; n < numSamplesInBlock; n++ )
+      {
+        const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, piQCoef[n]));
+        const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * scale) << leftShift;
+
+        piCoef[n] = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+      }
+    }
+  }
+}
+
+
+Void TComTrQuant::init(   UInt  uiMaxTrSize,
+                          Bool  bUseRDOQ,
+                          Bool  bUseRDOQTS,
+                          Bool  bEnc,
+                          Bool  useTransformSkipFast
+#if ADAPTIVE_QP_SELECTION
+                        , Bool bUseAdaptQpSelect
+#endif
+                       )
+{
+  m_uiMaxTrSize  = uiMaxTrSize;
+  m_bEnc         = bEnc;
+  m_useRDOQ      = bUseRDOQ;
+  m_useRDOQTS    = bUseRDOQTS;
+#if ADAPTIVE_QP_SELECTION
+  m_bUseAdaptQpSelect = bUseAdaptQpSelect;
+#endif
+  m_useTransformSkipFast = useTransformSkipFast;
+}
+
+
+Void TComTrQuant::transformNxN(       TComTU        & rTu,
+                                const ComponentID     compID,
+                                      Pel          *  pcResidual,
+                                const UInt            uiStride,
+                                      TCoeff       *  rpcCoeff,
+#if ADAPTIVE_QP_SELECTION
+                                      TCoeff       *  pcArlCoeff,
+#endif
+                                      TCoeff        & uiAbsSum,
+                                const QpParam       & cQP
+                              )
+{
+  const TComRectangle &rect = rTu.getRect(compID);
+  const UInt uiWidth        = rect.width;
+  const UInt uiHeight       = rect.height;
+  TComDataCU* pcCU          = rTu.getCU();
+  const UInt uiAbsPartIdx   = rTu.GetAbsPartIdxTU();
+  const UInt uiOrgTrDepth   = rTu.GetTransformDepthRel();
+
+  uiAbsSum=0;
+
+  RDPCMMode rdpcmMode = RDPCM_OFF;
+  rdpcmNxN( rTu, compID, pcResidual, uiStride, cQP, rpcCoeff, uiAbsSum, rdpcmMode );
+
+  if (rdpcmMode == RDPCM_OFF)
+  {
+    uiAbsSum = 0;
+    //transform and quantise
+    if(pcCU->getCUTransquantBypass(uiAbsPartIdx))
+    {
+      const Bool rotateResidual = rTu.isNonTransformedResidualRotated(compID);
+      const UInt uiSizeMinus1   = (uiWidth * uiHeight) - 1;
+
+      for (UInt y = 0, coefficientIndex = 0; y<uiHeight; y++)
+      {
+        for (UInt x = 0; x<uiWidth; x++, coefficientIndex++)
+        {
+          const Pel currentSample = pcResidual[(y * uiStride) + x];
+
+          rpcCoeff[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex] = currentSample;
+          uiAbsSum += TCoeff(abs(currentSample));
+        }
+      }
+    }
+    else
+    {
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+      std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU at input to transform\n";
+      printBlock(pcResidual, uiWidth, uiHeight, uiStride);
+#endif
+
+      assert( (pcCU->getSlice()->getSPS()->getMaxTrSize() >= uiWidth) );
+
+      if(pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0)
+      {
+        xTransformSkip( pcResidual, uiStride, m_plTempCoeff, rTu, compID );
+      }
+      else
+      {
+        xT( compID, rTu.useDST(compID), pcResidual, uiStride, m_plTempCoeff, uiWidth, uiHeight );
+      }
+
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+      std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU between transform and quantiser\n";
+      printBlock(m_plTempCoeff, uiWidth, uiHeight, uiWidth);
+#endif
+
+      xQuant( rTu, m_plTempCoeff, rpcCoeff,
+
+#if ADAPTIVE_QP_SELECTION
+              pcArlCoeff,
+#endif
+              uiAbsSum, compID, cQP );
+
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+      std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU at output of quantiser\n";
+      printBlock(rpcCoeff, uiWidth, uiHeight, uiWidth);
+#endif
+    }
+  }
+
+    //set the CBF
+  pcCU->setCbfPartRange((((uiAbsSum > 0) ? 1 : 0) << uiOrgTrDepth), compID, uiAbsPartIdx, rTu.GetAbsPartIdxNumParts(compID));
+}
+
+
+Void TComTrQuant::invTransformNxN(      TComTU        &rTu,
+                                  const ComponentID    compID,
+                                        Pel          *pcResidual,
+                                  const UInt           uiStride,
+                                        TCoeff       * pcCoeff,
+                                  const QpParam       &cQP
+                                        DEBUG_STRING_FN_DECLAREP(psDebug))
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const TComRectangle &rect = rTu.getRect(compID);
+  const UInt uiWidth = rect.width;
+  const UInt uiHeight = rect.height;
+
+  if (uiWidth != uiHeight) //for intra, the TU will have been split above this level, so this condition won't be true, hence this only affects inter
+  {
+    //------------------------------------------------
+
+    //recurse deeper
+
+    TComTURecurse subTURecurse(rTu, false, TComTU::VERTICAL_SPLIT, true, compID);
+
+    do
+    {
+      //------------------
+
+      const UInt lineOffset = subTURecurse.GetSectionNumber() * subTURecurse.getRect(compID).height;
+
+      Pel    *subTUResidual     = pcResidual + (lineOffset * uiStride);
+      TCoeff *subTUCoefficients = pcCoeff     + (lineOffset * subTURecurse.getRect(compID).width);
+
+      invTransformNxN(subTURecurse, compID, subTUResidual, uiStride, subTUCoefficients, cQP DEBUG_STRING_PASS_INTO(psDebug));
+
+      //------------------
+
+    }
+    while (subTURecurse.nextSection(rTu));
+
+    //------------------------------------------------
+
+    return;
+  }
+
+#if defined DEBUG_STRING
+  if (psDebug)
+  {
+    std::stringstream ss(stringstream::out);
+    printBlockToStream(ss, (compID==0)?"###InvTran ip Ch0: " : ((compID==1)?"###InvTran ip Ch1: ":"###InvTran ip Ch2: "), pcCoeff, uiWidth, uiHeight, uiWidth);
+    DEBUG_STRING_APPEND((*psDebug), ss.str())
+  }
+#endif
+
+  if(pcCU->getCUTransquantBypass(uiAbsPartIdx))
+  {
+    const Bool rotateResidual = rTu.isNonTransformedResidualRotated(compID);
+    const UInt uiSizeMinus1   = (uiWidth * uiHeight) - 1;
+
+    for (UInt y = 0, coefficientIndex = 0; y<uiHeight; y++)
+    {
+      for (UInt x = 0; x<uiWidth; x++, coefficientIndex++)
+      {
+        pcResidual[(y * uiStride) + x] = Pel(pcCoeff[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex]);
+      }
+    }
+  }
+  else
+  {
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+    std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU at input to dequantiser\n";
+    printBlock(pcCoeff, uiWidth, uiHeight, uiWidth);
+#endif
+
+    xDeQuant(rTu, pcCoeff, m_plTempCoeff, compID, cQP);
+
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+    std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU between dequantiser and inverse-transform\n";
+    printBlock(m_plTempCoeff, uiWidth, uiHeight, uiWidth);
+#endif
+
+#if defined DEBUG_STRING
+    if (psDebug)
+    {
+      std::stringstream ss(stringstream::out);
+      printBlockToStream(ss, "###InvTran deq: ", m_plTempCoeff, uiWidth, uiHeight, uiWidth);
+      (*psDebug)+=ss.str();
+    }
+#endif
+
+    if(pcCU->getTransformSkip(uiAbsPartIdx, compID))
+    {
+      xITransformSkip( m_plTempCoeff, pcResidual, uiStride, rTu, compID );
+
+#if defined DEBUG_STRING
+      if (psDebug)
+      {
+        std::stringstream ss(stringstream::out);
+        printBlockToStream(ss, "###InvTran resi: ", pcResidual, uiWidth, uiHeight, uiStride);
+        (*psDebug)+=ss.str();
+        (*psDebug)+="(<- was a Transform-skipped block)\n";
+      }
+#endif
+    }
+    else
+    {
+      xIT( compID, rTu.useDST(compID), m_plTempCoeff, pcResidual, uiStride, uiWidth, uiHeight );
+
+#if defined DEBUG_STRING
+      if (psDebug)
+      {
+        std::stringstream ss(stringstream::out);
+        printBlockToStream(ss, "###InvTran resi: ", pcResidual, uiWidth, uiHeight, uiStride);
+        (*psDebug)+=ss.str();
+        (*psDebug)+="(<- was a Transformed block)\n";
+      }
+#endif
+    }
+
+#ifdef DEBUG_TRANSFORM_AND_QUANTISE
+    std::cout << g_debugCounter << ": " << uiWidth << "x" << uiHeight << " channel " << compID << " TU at output of inverse-transform\n";
+    printBlock(pcResidual, uiWidth, uiHeight, uiStride);
+    g_debugCounter++;
+#endif
+  }
+
+  invRdpcmNxN( rTu, compID, pcResidual, uiStride );
+}
+
+Void TComTrQuant::invRecurTransformNxN( const ComponentID compID,
+                                        TComYuv *pResidual,
+                                        TComTU &rTu)
+{
+  if (!rTu.ProcessComponentSection(compID)) return;
+
+  TComDataCU* pcCU = rTu.getCU();
+  UInt absPartIdxTU = rTu.GetAbsPartIdxTU();
+  UInt uiTrMode=rTu.GetTransformDepthRel();
+  if( (pcCU->getCbf(absPartIdxTU, compID, uiTrMode) == 0) && (isLuma(compID) || !pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction()) )
+  {
+    return;
+  }
+
+  if( uiTrMode == pcCU->getTransformIdx( absPartIdxTU ) )
+  {
+    const TComRectangle &tuRect      = rTu.getRect(compID);
+    const Int            uiStride    = pResidual->getStride( compID );
+          Pel           *rpcResidual = pResidual->getAddr( compID );
+          UInt           uiAddr      = (tuRect.x0 + uiStride*tuRect.y0);
+          Pel           *pResi       = rpcResidual + uiAddr;
+          TCoeff        *pcCoeff     = pcCU->getCoeff(compID) + rTu.getCoefficientOffset(compID);
+
+    const QpParam cQP(*pcCU, compID);
+
+    if(pcCU->getCbf(absPartIdxTU, compID, uiTrMode) != 0)
+    {
+      DEBUG_STRING_NEW(sTemp)
+#ifdef DEBUG_STRING
+      std::string *psDebug=((DebugOptionList::DebugString_InvTran.getInt()&(pcCU->isIntra(absPartIdxTU)?1:(pcCU->isInter(absPartIdxTU)?2:4)))!=0) ? &sTemp : 0;
+#endif
+
+      invTransformNxN( rTu, compID, pResi, uiStride, pcCoeff, cQP DEBUG_STRING_PASS_INTO(psDebug) );
+
+#ifdef DEBUG_STRING
+      if (psDebug != 0)
+        std::cout << (*psDebug);
+#endif
+    }
+
+    if (isChroma(compID) && (pcCU->getCrossComponentPredictionAlpha(absPartIdxTU, compID) != 0))
+    {
+      const Pel *piResiLuma = pResidual->getAddr( COMPONENT_Y );
+      const Int  strideLuma = pResidual->getStride( COMPONENT_Y );
+      const Int  tuWidth    = rTu.getRect( compID ).width;
+      const Int  tuHeight   = rTu.getRect( compID ).height;
+
+      if(pcCU->getCbf(absPartIdxTU, COMPONENT_Y, uiTrMode) != 0)
+      {
+        pResi = rpcResidual + uiAddr;
+        const Pel *pResiLuma = piResiLuma + uiAddr;
+
+        crossComponentPrediction( rTu, compID, pResiLuma, pResi, pResi, tuWidth, tuHeight, strideLuma, uiStride, uiStride, true );
+      }
+    }
+  }
+  else
+  {
+    TComTURecurse tuRecurseChild(rTu, false);
+    do
+    {
+      invRecurTransformNxN( compID, pResidual, tuRecurseChild );
+    }
+    while (tuRecurseChild.nextSection(rTu));
+  }
+}
+
+Void TComTrQuant::applyForwardRDPCM( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride, const QpParam& cQP, TCoeff* pcCoeff, TCoeff &uiAbsSum, const RDPCMMode mode )
+{
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+
+  const Bool bLossless      = pcCU->getCUTransquantBypass( uiAbsPartIdx );
+  const UInt uiWidth        = rTu.getRect(compID).width;
+  const UInt uiHeight       = rTu.getRect(compID).height;
+  const Bool rotateResidual = rTu.isNonTransformedResidualRotated(compID);
+  const UInt uiSizeMinus1   = (uiWidth * uiHeight) - 1;
+
+  Pel reconstructedResi[MAX_TU_SIZE * MAX_TU_SIZE];
+
+  UInt uiX = 0;
+  UInt uiY = 0;
+
+        UInt &majorAxis             = (mode == RDPCM_HOR) ? uiX      : uiY;
+        UInt &minorAxis             = (mode == RDPCM_HOR) ? uiY      : uiX;
+  const UInt  majorAxisLimit        = (mode == RDPCM_HOR) ? uiWidth  : uiHeight;
+  const UInt  minorAxisLimit        = (mode == RDPCM_HOR) ? uiHeight : uiWidth;
+  const UInt  referenceSampleOffset = (mode == RDPCM_HOR) ? 1        : uiWidth;
+
+  const Bool bUseHalfRoundingPoint = (mode != RDPCM_OFF);
+
+  uiAbsSum = 0;
+
+  for ( majorAxis = 0; majorAxis < majorAxisLimit; majorAxis++ )
+  {
+    for ( minorAxis = 0; minorAxis < minorAxisLimit; minorAxis++ )
+    {
+      const UInt sampleIndex      = (uiY * uiWidth) + uiX;
+      const UInt coefficientIndex = (rotateResidual ? (uiSizeMinus1-sampleIndex) : sampleIndex);
+      const Pel  currentSample    = pcResidual[(uiY * uiStride) + uiX];
+      const Pel  referenceSample  = ((mode != RDPCM_OFF) && (majorAxis > 0)) ? reconstructedResi[sampleIndex - referenceSampleOffset] : 0;
+
+      const Pel  encoderSideDelta = currentSample - referenceSample;
+
+      Pel reconstructedDelta;
+      if ( bLossless )
+      {
+        pcCoeff[coefficientIndex] = encoderSideDelta;
+        reconstructedDelta        = encoderSideDelta;
+      }
+      else
+      {
+        transformSkipQuantOneSample(rTu, compID, encoderSideDelta, pcCoeff, coefficientIndex, cQP, bUseHalfRoundingPoint);
+        invTrSkipDeQuantOneSample  (rTu, compID, pcCoeff[coefficientIndex], reconstructedDelta, cQP, coefficientIndex);
+      }
+
+      uiAbsSum += abs(pcCoeff[coefficientIndex]);
+
+      reconstructedResi[sampleIndex] = reconstructedDelta + referenceSample;
+    }
+  }
+}
+
+Void TComTrQuant::rdpcmNxN   ( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride, const QpParam& cQP, TCoeff* pcCoeff, TCoeff &uiAbsSum, RDPCMMode& rdpcmMode )
+{
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+
+  if (!pcCU->isRDPCMEnabled(uiAbsPartIdx) || ((pcCU->getTransformSkip(uiAbsPartIdx, compID) == 0) && !pcCU->getCUTransquantBypass(uiAbsPartIdx)))
+  {
+    rdpcmMode = RDPCM_OFF;
+  }
+  else if ( pcCU->isIntra( uiAbsPartIdx ) )
+  {
+    const ChromaFormat chFmt = pcCU->getPic()->getPicYuvOrg()->getChromaFormat();
+    const ChannelType chType = toChannelType(compID);
+    const UInt uiChPredMode  = pcCU->getIntraDir( chType, uiAbsPartIdx );
+    const UInt uiChCodedMode = (uiChPredMode==DM_CHROMA_IDX && isChroma(compID)) ? pcCU->getIntraDir(CHANNEL_TYPE_LUMA, getChromasCorrespondingPULumaIdx(uiAbsPartIdx, chFmt)) : uiChPredMode;
+    const UInt uiChFinalMode = ((chFmt == CHROMA_422)       && isChroma(compID)) ? g_chroma422IntraAngleMappingTable[uiChCodedMode] : uiChCodedMode;
+
+    if (uiChFinalMode == VER_IDX || uiChFinalMode == HOR_IDX)
+    {
+      rdpcmMode = (uiChFinalMode == VER_IDX) ? RDPCM_VER : RDPCM_HOR;
+      applyForwardRDPCM( rTu, compID, pcResidual, uiStride, cQP, pcCoeff, uiAbsSum, rdpcmMode );
+    }
+    else rdpcmMode = RDPCM_OFF;
+  }
+  else // not intra, need to select the best mode
+  {
+    const UInt uiWidth  = rTu.getRect(compID).width;
+    const UInt uiHeight = rTu.getRect(compID).height;
+
+    RDPCMMode bestMode   = NUMBER_OF_RDPCM_MODES;
+    TCoeff    bestAbsSum = std::numeric_limits<TCoeff>::max();
+    TCoeff    bestCoefficients[MAX_TU_SIZE * MAX_TU_SIZE];
+
+    for (UInt modeIndex = 0; modeIndex < NUMBER_OF_RDPCM_MODES; modeIndex++)
+    {
+      const RDPCMMode mode = RDPCMMode(modeIndex);
+
+      TCoeff currAbsSum = 0;
+
+      applyForwardRDPCM( rTu, compID, pcResidual, uiStride, cQP, pcCoeff, currAbsSum, mode );
+
+      if (currAbsSum < bestAbsSum)
+      {
+        bestMode   = mode;
+        bestAbsSum = currAbsSum;
+        if (mode != RDPCM_OFF)
+        {
+          memcpy(bestCoefficients, pcCoeff, (uiWidth * uiHeight * sizeof(TCoeff)));
+        }
+      }
+    }
+
+    rdpcmMode = bestMode;
+    uiAbsSum  = bestAbsSum;
+
+    if (rdpcmMode != RDPCM_OFF) //the TU is re-transformed and quantised if DPCM_OFF is returned, so there is no need to preserve it here
+    {
+      memcpy(pcCoeff, bestCoefficients, (uiWidth * uiHeight * sizeof(TCoeff)));
+    }
+  }
+
+  pcCU->setExplicitRdpcmModePartRange(rdpcmMode, compID, uiAbsPartIdx, rTu.GetAbsPartIdxNumParts(compID));
+}
+
+Void TComTrQuant::invRdpcmNxN( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride )
+{
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+
+  if (pcCU->isRDPCMEnabled( uiAbsPartIdx ) && ((pcCU->getTransformSkip(uiAbsPartIdx, compID ) != 0) || pcCU->getCUTransquantBypass(uiAbsPartIdx)))
+  {
+    const UInt uiWidth  = rTu.getRect(compID).width;
+    const UInt uiHeight = rTu.getRect(compID).height;
+
+    RDPCMMode rdpcmMode = RDPCM_OFF;
+
+    if ( pcCU->isIntra( uiAbsPartIdx ) )
+    {
+      const ChromaFormat chFmt = pcCU->getPic()->getPicYuvRec()->getChromaFormat();
+      const ChannelType chType = toChannelType(compID);
+      const UInt uiChPredMode  = pcCU->getIntraDir( chType, uiAbsPartIdx );
+      const UInt uiChCodedMode = (uiChPredMode==DM_CHROMA_IDX && isChroma(compID)) ? pcCU->getIntraDir(CHANNEL_TYPE_LUMA, getChromasCorrespondingPULumaIdx(uiAbsPartIdx, chFmt)) : uiChPredMode;
+      const UInt uiChFinalMode = ((chFmt == CHROMA_422)       && isChroma(compID)) ? g_chroma422IntraAngleMappingTable[uiChCodedMode] : uiChCodedMode;
+
+      if (uiChFinalMode == VER_IDX || uiChFinalMode == HOR_IDX)
+      {
+        rdpcmMode = (uiChFinalMode == VER_IDX) ? RDPCM_VER : RDPCM_HOR;
+      }
+    }
+    else  // not intra case
+    {
+      rdpcmMode = RDPCMMode(pcCU->getExplicitRdpcmMode( compID, uiAbsPartIdx ));
+    }
+
+    if (rdpcmMode == RDPCM_VER)
+    {
+      pcResidual += uiStride; //start from row 1
+
+      for( UInt uiY = 1; uiY < uiHeight; uiY++ )
+      {
+        for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+        {
+          pcResidual[ uiX ] = pcResidual[ uiX ] + pcResidual [ (Int)uiX - (Int)uiStride ];
+        }
+        pcResidual += uiStride;
+      }
+    }
+    else if (rdpcmMode == RDPCM_HOR)
+    {
+      for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+      {
+        for( UInt uiX = 1; uiX < uiWidth; uiX++ )
+        {
+          pcResidual[ uiX ] = pcResidual[ uiX ] + pcResidual [ (Int)uiX-1 ];
+        }
+        pcResidual += uiStride;
+      }
+    }
+  }
+}
+
+// ------------------------------------------------------------------------------------------------
+// Logical transform
+// ------------------------------------------------------------------------------------------------
+
+/** Wrapper function between HM interface and core NxN forward transform (2D)
+ *  \param piBlkResi input data (residual)
+ *  \param psCoeff output data (transform coefficients)
+ *  \param uiStride stride of input residual data
+ *  \param iSize transform size (iSize x iSize)
+ *  \param uiMode is Intra Prediction mode used in Mode-Dependent DCT/DST only
+ */
+Void TComTrQuant::xT( const ComponentID compID, Bool useDST, Pel* piBlkResi, UInt uiStride, TCoeff* psCoeff, Int iWidth, Int iHeight )
+{
+#if MATRIX_MULT
+  if( iWidth == iHeight)
+  {
+    xTr(g_bitDepth[toChannelType(compID)], piBlkResi, psCoeff, uiStride, (UInt)iWidth, useDST, g_maxTrDynamicRange[toChannelType(compID)]);
+    return;
+  }
+#endif
+
+  TCoeff block[ MAX_TU_SIZE * MAX_TU_SIZE ];
+  TCoeff coeff[ MAX_TU_SIZE * MAX_TU_SIZE ];
+
+  for (Int y = 0; y < iHeight; y++)
+    for (Int x = 0; x < iWidth; x++)
+    {
+      block[(y * iWidth) + x] = piBlkResi[(y * uiStride) + x];
+    }
+
+  xTrMxN( g_bitDepth[toChannelType(compID)], block, coeff, iWidth, iHeight, useDST, g_maxTrDynamicRange[toChannelType(compID)] );
+
+  memcpy(psCoeff, coeff, (iWidth * iHeight * sizeof(TCoeff)));
+}
+
+/** Wrapper function between HM interface and core NxN inverse transform (2D)
+ *  \param plCoef input data (transform coefficients)
+ *  \param pResidual output data (residual)
+ *  \param uiStride stride of input residual data
+ *  \param iSize transform size (iSize x iSize)
+ *  \param uiMode is Intra Prediction mode used in Mode-Dependent DCT/DST only
+ */
+Void TComTrQuant::xIT( const ComponentID compID, Bool useDST, TCoeff* plCoef, Pel* pResidual, UInt uiStride, Int iWidth, Int iHeight )
+{
+#if MATRIX_MULT
+  if( iWidth == iHeight )
+  {
+#if O0043_BEST_EFFORT_DECODING
+    xITr(g_bitDepthInStream[toChannelType(compID)], plCoef, pResidual, uiStride, (UInt)iWidth, useDST, g_maxTrDynamicRange[toChannelType(compID)]);
+#else
+    xITr(g_bitDepth[toChannelType(compID)], plCoef, pResidual, uiStride, (UInt)iWidth, useDST, g_maxTrDynamicRange[toChannelType(compID)]);
+#endif
+    return;
+  }
+#endif
+
+  TCoeff block[ MAX_TU_SIZE * MAX_TU_SIZE ];
+  TCoeff coeff[ MAX_TU_SIZE * MAX_TU_SIZE ];
+
+  memcpy(coeff, plCoef, (iWidth * iHeight * sizeof(TCoeff)));
+
+#if O0043_BEST_EFFORT_DECODING
+  xITrMxN( g_bitDepthInStream[toChannelType(compID)], coeff, block, iWidth, iHeight, useDST, g_maxTrDynamicRange[toChannelType(compID)] );
+#else
+  xITrMxN( g_bitDepth[toChannelType(compID)], coeff, block, iWidth, iHeight, useDST, g_maxTrDynamicRange[toChannelType(compID)] );
+#endif
+
+  for (Int y = 0; y < iHeight; y++)
+    for (Int x = 0; x < iWidth; x++)
+    {
+      pResidual[(y * uiStride) + x] = Pel(block[(y * iWidth) + x]);
+    }
+}
+
+/** Wrapper function between HM interface and core 4x4 transform skipping
+ *  \param piBlkResi input data (residual)
+ *  \param psCoeff output data (transform coefficients)
+ *  \param uiStride stride of input residual data
+ *  \param iSize transform size (iSize x iSize)
+ */
+Void TComTrQuant::xTransformSkip( Pel* piBlkResi, UInt uiStride, TCoeff* psCoeff, TComTU &rTu, const ComponentID component )
+{
+  const TComRectangle &rect = rTu.getRect(component);
+  const Int width           = rect.width;
+  const Int height          = rect.height;
+
+  Int iTransformShift = getTransformShift(toChannelType(component), rTu.GetEquivalentLog2TrSize(component));
+  if (rTu.getCU()->getSlice()->getSPS()->getUseExtendedPrecision())
+  {
+    iTransformShift = std::max<Int>(0, iTransformShift);
+  }
+
+  const Bool rotateResidual = rTu.isNonTransformedResidualRotated(component);
+  const UInt uiSizeMinus1   = (width * height) - 1;
+
+  if (iTransformShift >= 0)
+  {
+    for (UInt y = 0, coefficientIndex = 0; y < height; y++)
+    {
+      for (UInt x = 0; x < width; x++, coefficientIndex++)
+      {
+        psCoeff[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex] = TCoeff(piBlkResi[(y * uiStride) + x]) << iTransformShift;
+      }
+    }
+  }
+  else //for very high bit depths
+  {
+    iTransformShift = -iTransformShift;
+    const TCoeff offset = 1 << (iTransformShift - 1);
+
+    for (UInt y = 0, coefficientIndex = 0; y < height; y++)
+    {
+      for (UInt x = 0; x < width; x++, coefficientIndex++)
+      {
+        psCoeff[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex] = (TCoeff(piBlkResi[(y * uiStride) + x]) + offset) >> iTransformShift;
+      }
+    }
+  }
+}
+
+/** Wrapper function between HM interface and core NxN transform skipping
+ *  \param plCoef input data (coefficients)
+ *  \param pResidual output data (residual)
+ *  \param uiStride stride of input residual data
+ *  \param iSize transform size (iSize x iSize)
+ */
+Void TComTrQuant::xITransformSkip( TCoeff* plCoef, Pel* pResidual, UInt uiStride, TComTU &rTu, const ComponentID component )
+{
+  const TComRectangle &rect = rTu.getRect(component);
+  const Int width           = rect.width;
+  const Int height          = rect.height;
+
+  Int iTransformShift = getTransformShift(toChannelType(component), rTu.GetEquivalentLog2TrSize(component));
+  if (rTu.getCU()->getSlice()->getSPS()->getUseExtendedPrecision())
+  {
+    iTransformShift = std::max<Int>(0, iTransformShift);
+  }
+
+  const Bool rotateResidual = rTu.isNonTransformedResidualRotated(component);
+  const UInt uiSizeMinus1   = (width * height) - 1;
+
+  if (iTransformShift >= 0)
+  {
+    const TCoeff offset = iTransformShift==0 ? 0 : (1 << (iTransformShift - 1));
+
+    for (UInt y = 0, coefficientIndex = 0; y < height; y++)
+    {
+      for (UInt x = 0; x < width; x++, coefficientIndex++)
+      {
+        pResidual[(y * uiStride) + x] =  Pel((plCoef[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex] + offset) >> iTransformShift);
+      }
+    }
+  }
+  else //for very high bit depths
+  {
+    iTransformShift = -iTransformShift;
+
+    for (UInt y = 0, coefficientIndex = 0; y < height; y++)
+    {
+      for (UInt x = 0; x < width; x++, coefficientIndex++)
+      {
+        pResidual[(y * uiStride) + x] = Pel(plCoef[rotateResidual ? (uiSizeMinus1 - coefficientIndex) : coefficientIndex] << iTransformShift);
+      }
+    }
+  }
+}
+
+/** RDOQ with CABAC
+ * \param pcCU pointer to coding unit structure
+ * \param plSrcCoeff pointer to input buffer
+ * \param piDstCoeff reference to pointer to output buffer
+ * \param uiWidth block width
+ * \param uiHeight block height
+ * \param uiAbsSum reference to absolute sum of quantized transform coefficient
+ * \param eTType plane type / luminance or chrominance
+ * \param uiAbsPartIdx absolute partition index
+ * \returns Void
+ * Rate distortion optimized quantization for entropy
+ * coding engines using probability models like CABAC
+ */
+Void TComTrQuant::xRateDistOptQuant                 (       TComTU       &rTu,
+                                                            TCoeff      * plSrcCoeff,
+                                                            TCoeff      * piDstCoeff,
+#if ADAPTIVE_QP_SELECTION
+                                                            TCoeff      * piArlDstCoeff,
+#endif
+                                                            TCoeff       &uiAbsSum,
+                                                      const ComponentID   compID,
+                                                      const QpParam      &cQP  )
+{
+  const TComRectangle  & rect             = rTu.getRect(compID);
+  const UInt             uiWidth          = rect.width;
+  const UInt             uiHeight         = rect.height;
+        TComDataCU    *  pcCU             = rTu.getCU();
+  const UInt             uiAbsPartIdx     = rTu.GetAbsPartIdxTU();
+  const ChannelType      channelType      = toChannelType(compID);
+  const UInt             uiLog2TrSize     = rTu.GetEquivalentLog2TrSize(compID);
+
+  const Bool             extendedPrecision = pcCU->getSlice()->getSPS()->getUseExtendedPrecision();
+
+  /* for 422 chroma blocks, the effective scaling applied during transformation is not a power of 2, hence it cannot be
+   * implemented as a bit-shift (the quantised result will be sqrt(2) * larger than required). Alternatively, adjust the
+   * uiLog2TrSize applied in iTransformShift, such that the result is 1/sqrt(2) the required result (i.e. smaller)
+   * Then a QP+3 (sqrt(2)) or QP-3 (1/sqrt(2)) method could be used to get the required result
+   */
+
+  // Represents scaling through forward transform
+  Int iTransformShift            = getTransformShift(channelType, uiLog2TrSize);
+  if ((pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0) && pcCU->getSlice()->getSPS()->getUseExtendedPrecision())
+  {
+    iTransformShift = std::max<Int>(0, iTransformShift);
+  }
+
+  const Bool bUseGolombRiceParameterAdaptation = pcCU->getSlice()->getSPS()->getUseGolombRiceParameterAdaptation();
+  const UInt initialGolombRiceParameter        = m_pcEstBitsSbac->golombRiceAdaptationStatistics[rTu.getGolombRiceStatisticsIndex(compID)] / RExt__GOLOMB_RICE_INCREMENT_DIVISOR;
+        UInt uiGoRiceParam                     = initialGolombRiceParameter;
+  Double     d64BlockUncodedCost               = 0;
+  const UInt uiLog2BlockWidth                  = g_aucConvertToBit[ uiWidth  ] + 2;
+  const UInt uiLog2BlockHeight                 = g_aucConvertToBit[ uiHeight ] + 2;
+  const UInt uiMaxNumCoeff                     = uiWidth * uiHeight;
+  assert(compID<MAX_NUM_COMPONENT);
+
+  Int scalingListType = getScalingListType(pcCU->getPredictionMode(uiAbsPartIdx), compID);
+  assert(scalingListType < SCALING_LIST_NUM);
+
+#if ADAPTIVE_QP_SELECTION
+  memset(piArlDstCoeff, 0, sizeof(TCoeff) *  uiMaxNumCoeff);
+#endif
+
+  Double pdCostCoeff [ MAX_TU_SIZE * MAX_TU_SIZE ];
+  Double pdCostSig   [ MAX_TU_SIZE * MAX_TU_SIZE ];
+  Double pdCostCoeff0[ MAX_TU_SIZE * MAX_TU_SIZE ];
+  memset( pdCostCoeff, 0, sizeof(Double) *  uiMaxNumCoeff );
+  memset( pdCostSig,   0, sizeof(Double) *  uiMaxNumCoeff );
+  Int rateIncUp   [ MAX_TU_SIZE * MAX_TU_SIZE ];
+  Int rateIncDown [ MAX_TU_SIZE * MAX_TU_SIZE ];
+  Int sigRateDelta[ MAX_TU_SIZE * MAX_TU_SIZE ];
+  TCoeff deltaU   [ MAX_TU_SIZE * MAX_TU_SIZE ];
+  memset( rateIncUp,    0, sizeof(Int   ) *  uiMaxNumCoeff );
+  memset( rateIncDown,  0, sizeof(Int   ) *  uiMaxNumCoeff );
+  memset( sigRateDelta, 0, sizeof(Int   ) *  uiMaxNumCoeff );
+  memset( deltaU,       0, sizeof(TCoeff) *  uiMaxNumCoeff );
+
+  const Int iQBits = QUANT_SHIFT + cQP.per + iTransformShift;                   // Right shift of non-RDOQ quantizer;  level = (coeff*uiQ + offset)>>q_bits
+  const Double *const pdErrScale = getErrScaleCoeff(scalingListType, (uiLog2TrSize-2), cQP.rem);
+  const Int    *const piQCoef    = getQuantCoeff(scalingListType, cQP.rem, (uiLog2TrSize-2));
+
+  const Bool   enableScalingLists             = getUseScalingList(uiWidth, uiHeight, (pcCU->getTransformSkip(uiAbsPartIdx, compID) != 0));
+  const Int    defaultQuantisationCoefficient = g_quantScales[cQP.rem];
+  const Double defaultErrorScale              = getErrScaleCoeffNoScalingList(scalingListType, (uiLog2TrSize-2), cQP.rem);
+
+  const TCoeff entropyCodingMinimum = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+  const TCoeff entropyCodingMaximum =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+
+#if ADAPTIVE_QP_SELECTION
+  Int iQBitsC = iQBits - ARL_C_PRECISION;
+  Int iAddC =  1 << (iQBitsC-1);
+#endif
+
+  TUEntropyCodingParameters codingParameters;
+  getTUEntropyCodingParameters(codingParameters, rTu, compID);
+  const UInt uiCGSize = (1 << MLS_CG_SIZE);
+
+  Double pdCostCoeffGroupSig[ MLS_GRP_NUM ];
+  UInt uiSigCoeffGroupFlag[ MLS_GRP_NUM ];
+  Int iCGLastScanPos = -1;
+
+  UInt    uiCtxSet            = 0;
+  Int     c1                  = 1;
+  Int     c2                  = 0;
+  Double  d64BaseCost         = 0;
+  Int     iLastScanPos        = -1;
+
+  UInt    c1Idx     = 0;
+  UInt    c2Idx     = 0;
+  Int     baseLevel;
+
+  memset( pdCostCoeffGroupSig,   0, sizeof(Double) * MLS_GRP_NUM );
+  memset( uiSigCoeffGroupFlag,   0, sizeof(UInt) * MLS_GRP_NUM );
+
+  UInt uiCGNum = uiWidth * uiHeight >> MLS_CG_SIZE;
+  Int iScanPos;
+  coeffGroupRDStats rdStats;
+
+  const UInt significanceMapContextOffset = getSignificanceMapContextOffset(compID);
+
+  for (Int iCGScanPos = uiCGNum-1; iCGScanPos >= 0; iCGScanPos--)
+  {
+    UInt uiCGBlkPos = codingParameters.scanCG[ iCGScanPos ];
+    UInt uiCGPosY   = uiCGBlkPos / codingParameters.widthInGroups;
+    UInt uiCGPosX   = uiCGBlkPos - (uiCGPosY * codingParameters.widthInGroups);
+
+    memset( &rdStats, 0, sizeof (coeffGroupRDStats));
+
+    const Int patternSigCtx = TComTrQuant::calcPatternSigCtx(uiSigCoeffGroupFlag, uiCGPosX, uiCGPosY, codingParameters.widthInGroups, codingParameters.heightInGroups);
+
+    for (Int iScanPosinCG = uiCGSize-1; iScanPosinCG >= 0; iScanPosinCG--)
+    {
+      iScanPos = iCGScanPos*uiCGSize + iScanPosinCG;
+      //===== quantization =====
+      UInt    uiBlkPos          = codingParameters.scan[iScanPos];
+      // set coeff
+
+      const Int    quantisationCoefficient = (enableScalingLists) ? piQCoef   [uiBlkPos] : defaultQuantisationCoefficient;
+      const Double errorScale              = (enableScalingLists) ? pdErrScale[uiBlkPos] : defaultErrorScale;
+
+      const Int64  tmpLevel                = Int64(abs(plSrcCoeff[ uiBlkPos ])) * quantisationCoefficient;
+
+      const Intermediate_Int lLevelDouble  = (Intermediate_Int)min<Int64>(tmpLevel, MAX_INTERMEDIATE_INT - (Intermediate_Int(1) << (iQBits - 1)));
+
+#if ADAPTIVE_QP_SELECTION
+      if( m_bUseAdaptQpSelect )
+      {
+        piArlDstCoeff[uiBlkPos]   = (TCoeff)(( lLevelDouble + iAddC) >> iQBitsC );
+      }
+#endif
+      const UInt uiMaxAbsLevel  = std::min<UInt>(UInt(entropyCodingMaximum), UInt((lLevelDouble + (Intermediate_Int(1) << (iQBits - 1))) >> iQBits));
+
+      const Double dErr         = Double( lLevelDouble );
+      pdCostCoeff0[ iScanPos ]  = dErr * dErr * errorScale;
+      d64BlockUncodedCost      += pdCostCoeff0[ iScanPos ];
+      piDstCoeff[ uiBlkPos ]    = uiMaxAbsLevel;
+
+      if ( uiMaxAbsLevel > 0 && iLastScanPos < 0 )
+      {
+        iLastScanPos            = iScanPos;
+        uiCtxSet                = getContextSetIndex(compID, (iScanPos >> MLS_CG_SIZE), 0);
+        iCGLastScanPos          = iCGScanPos;
+      }
+
+      if ( iLastScanPos >= 0 )
+      {
+        //===== coefficient level estimation =====
+        UInt  uiLevel;
+        UInt  uiOneCtx         = (NUM_ONE_FLAG_CTX_PER_SET * uiCtxSet) + c1;
+        UInt  uiAbsCtx         = (NUM_ABS_FLAG_CTX_PER_SET * uiCtxSet) + c2;
+
+        if( iScanPos == iLastScanPos )
+        {
+          uiLevel              = xGetCodedLevel( pdCostCoeff[ iScanPos ], pdCostCoeff0[ iScanPos ], pdCostSig[ iScanPos ],
+                                                  lLevelDouble, uiMaxAbsLevel, significanceMapContextOffset, uiOneCtx, uiAbsCtx, uiGoRiceParam,
+                                                  c1Idx, c2Idx, iQBits, errorScale, 1, extendedPrecision, channelType
+                                                  );
+        }
+        else
+        {
+          UShort uiCtxSig      = significanceMapContextOffset + getSigCtxInc( patternSigCtx, codingParameters, iScanPos, uiLog2BlockWidth, uiLog2BlockHeight, channelType );
+
+          uiLevel              = xGetCodedLevel( pdCostCoeff[ iScanPos ], pdCostCoeff0[ iScanPos ], pdCostSig[ iScanPos ],
+                                                  lLevelDouble, uiMaxAbsLevel, uiCtxSig, uiOneCtx, uiAbsCtx, uiGoRiceParam,
+                                                  c1Idx, c2Idx, iQBits, errorScale, 0, extendedPrecision, channelType
+                                                  );
+
+          sigRateDelta[ uiBlkPos ] = m_pcEstBitsSbac->significantBits[ uiCtxSig ][ 1 ] - m_pcEstBitsSbac->significantBits[ uiCtxSig ][ 0 ];
+        }
+
+        deltaU[ uiBlkPos ]        = TCoeff((lLevelDouble - (Intermediate_Int(uiLevel) << iQBits)) >> (iQBits-8));
+
+        if( uiLevel > 0 )
+        {
+          Int rateNow = xGetICRate( uiLevel, uiOneCtx, uiAbsCtx, uiGoRiceParam, c1Idx, c2Idx, extendedPrecision, channelType );
+          rateIncUp   [ uiBlkPos ] = xGetICRate( uiLevel+1, uiOneCtx, uiAbsCtx, uiGoRiceParam, c1Idx, c2Idx, extendedPrecision, channelType ) - rateNow;
+          rateIncDown [ uiBlkPos ] = xGetICRate( uiLevel-1, uiOneCtx, uiAbsCtx, uiGoRiceParam, c1Idx, c2Idx, extendedPrecision, channelType ) - rateNow;
+        }
+        else // uiLevel == 0
+        {
+          rateIncUp   [ uiBlkPos ] = m_pcEstBitsSbac->m_greaterOneBits[ uiOneCtx ][ 0 ];
+        }
+        piDstCoeff[ uiBlkPos ] = uiLevel;
+        d64BaseCost           += pdCostCoeff [ iScanPos ];
+
+        baseLevel = (c1Idx < C1FLAG_NUMBER) ? (2 + (c2Idx < C2FLAG_NUMBER)) : 1;
+        if( uiLevel >= baseLevel )
+        {
+          if (uiLevel > 3*(1<<uiGoRiceParam))
+          {
+            uiGoRiceParam = bUseGolombRiceParameterAdaptation ? (uiGoRiceParam + 1) : (std::min<UInt>((uiGoRiceParam + 1), 4));
+          }
+        }
+        if ( uiLevel >= 1)
+        {
+          c1Idx ++;
+        }
+
+        //===== update bin model =====
+        if( uiLevel > 1 )
+        {
+          c1 = 0;
+          c2 += (c2 < 2);
+          c2Idx ++;
+        }
+        else if( (c1 < 3) && (c1 > 0) && uiLevel)
+        {
+          c1++;
+        }
+
+        //===== context set update =====
+        if( ( iScanPos % uiCGSize == 0 ) && ( iScanPos > 0 ) )
+        {
+          uiCtxSet          = getContextSetIndex(compID, ((iScanPos - 1) >> MLS_CG_SIZE), (c1 == 0)); //(iScanPos - 1) because we do this **before** entering the final group
+          c1                = 1;
+          c2                = 0;
+          c1Idx             = 0;
+          c2Idx             = 0;
+          uiGoRiceParam     = initialGolombRiceParameter;
+        }
+      }
+      else
+      {
+        d64BaseCost    += pdCostCoeff0[ iScanPos ];
+      }
+      rdStats.d64SigCost += pdCostSig[ iScanPos ];
+      if (iScanPosinCG == 0 )
+      {
+        rdStats.d64SigCost_0 = pdCostSig[ iScanPos ];
+      }
+      if (piDstCoeff[ uiBlkPos ] )
+      {
+        uiSigCoeffGroupFlag[ uiCGBlkPos ] = 1;
+        rdStats.d64CodedLevelandDist += pdCostCoeff[ iScanPos ] - pdCostSig[ iScanPos ];
+        rdStats.d64UncodedDist += pdCostCoeff0[ iScanPos ];
+        if ( iScanPosinCG != 0 )
+        {
+          rdStats.iNNZbeforePos0++;
+        }
+      }
+    } //end for (iScanPosinCG)
+
+    if (iCGLastScanPos >= 0)
+    {
+      if( iCGScanPos )
+      {
+        if (uiSigCoeffGroupFlag[ uiCGBlkPos ] == 0)
+        {
+          UInt  uiCtxSig = getSigCoeffGroupCtxInc( uiSigCoeffGroupFlag, uiCGPosX, uiCGPosY, codingParameters.widthInGroups, codingParameters.heightInGroups );
+          d64BaseCost += xGetRateSigCoeffGroup(0, uiCtxSig) - rdStats.d64SigCost;;
+          pdCostCoeffGroupSig[ iCGScanPos ] = xGetRateSigCoeffGroup(0, uiCtxSig);
+        }
+        else
+        {
+          if (iCGScanPos < iCGLastScanPos) //skip the last coefficient group, which will be handled together with last position below.
+          {
+            if ( rdStats.iNNZbeforePos0 == 0 )
+            {
+              d64BaseCost -= rdStats.d64SigCost_0;
+              rdStats.d64SigCost -= rdStats.d64SigCost_0;
+            }
+            // rd-cost if SigCoeffGroupFlag = 0, initialization
+            Double d64CostZeroCG = d64BaseCost;
+
+            // add SigCoeffGroupFlag cost to total cost
+            UInt  uiCtxSig = getSigCoeffGroupCtxInc( uiSigCoeffGroupFlag, uiCGPosX, uiCGPosY, codingParameters.widthInGroups, codingParameters.heightInGroups );
+
+            if (iCGScanPos < iCGLastScanPos)
+            {
+              d64BaseCost  += xGetRateSigCoeffGroup(1, uiCtxSig);
+              d64CostZeroCG += xGetRateSigCoeffGroup(0, uiCtxSig);
+              pdCostCoeffGroupSig[ iCGScanPos ] = xGetRateSigCoeffGroup(1, uiCtxSig);
+            }
+
+            // try to convert the current coeff group from non-zero to all-zero
+            d64CostZeroCG += rdStats.d64UncodedDist;  // distortion for resetting non-zero levels to zero levels
+            d64CostZeroCG -= rdStats.d64CodedLevelandDist;   // distortion and level cost for keeping all non-zero levels
+            d64CostZeroCG -= rdStats.d64SigCost;     // sig cost for all coeffs, including zero levels and non-zerl levels
+
+            // if we can save cost, change this block to all-zero block
+            if ( d64CostZeroCG < d64BaseCost )
+            {
+              uiSigCoeffGroupFlag[ uiCGBlkPos ] = 0;
+              d64BaseCost = d64CostZeroCG;
+              if (iCGScanPos < iCGLastScanPos)
+              {
+                pdCostCoeffGroupSig[ iCGScanPos ] = xGetRateSigCoeffGroup(0, uiCtxSig);
+              }
+              // reset coeffs to 0 in this block
+              for (Int iScanPosinCG = uiCGSize-1; iScanPosinCG >= 0; iScanPosinCG--)
+              {
+                iScanPos      = iCGScanPos*uiCGSize + iScanPosinCG;
+                UInt uiBlkPos = codingParameters.scan[ iScanPos ];
+
+                if (piDstCoeff[ uiBlkPos ])
+                {
+                  piDstCoeff [ uiBlkPos ] = 0;
+                  pdCostCoeff[ iScanPos ] = pdCostCoeff0[ iScanPos ];
+                  pdCostSig  [ iScanPos ] = 0;
+                }
+              }
+            } // end if ( d64CostAllZeros < d64BaseCost )
+          }
+        } // end if if (uiSigCoeffGroupFlag[ uiCGBlkPos ] == 0)
+      }
+      else
+      {
+        uiSigCoeffGroupFlag[ uiCGBlkPos ] = 1;
+      }
+    }
+  } //end for (iCGScanPos)
+
+  //===== estimate last position =====
+  if ( iLastScanPos < 0 )
+  {
+    return;
+  }
+
+  Double  d64BestCost         = 0;
+  Int     ui16CtxCbf          = 0;
+  Int     iBestLastIdxP1      = 0;
+  if( !pcCU->isIntra( uiAbsPartIdx ) && isLuma(compID) && pcCU->getTransformIdx( uiAbsPartIdx ) == 0 )
+  {
+    ui16CtxCbf   = 0;
+    d64BestCost  = d64BlockUncodedCost + xGetICost( m_pcEstBitsSbac->blockRootCbpBits[ ui16CtxCbf ][ 0 ] );
+    d64BaseCost += xGetICost( m_pcEstBitsSbac->blockRootCbpBits[ ui16CtxCbf ][ 1 ] );
+  }
+  else
+  {
+    ui16CtxCbf   = pcCU->getCtxQtCbf( rTu, channelType );
+    ui16CtxCbf  += getCBFContextOffset(compID);
+    d64BestCost  = d64BlockUncodedCost + xGetICost( m_pcEstBitsSbac->blockCbpBits[ ui16CtxCbf ][ 0 ] );
+    d64BaseCost += xGetICost( m_pcEstBitsSbac->blockCbpBits[ ui16CtxCbf ][ 1 ] );
+  }
+
+
+  Bool bFoundLast = false;
+  for (Int iCGScanPos = iCGLastScanPos; iCGScanPos >= 0; iCGScanPos--)
+  {
+    UInt uiCGBlkPos = codingParameters.scanCG[ iCGScanPos ];
+
+    d64BaseCost -= pdCostCoeffGroupSig [ iCGScanPos ];
+    if (uiSigCoeffGroupFlag[ uiCGBlkPos ])
+    {
+      for (Int iScanPosinCG = uiCGSize-1; iScanPosinCG >= 0; iScanPosinCG--)
+      {
+        iScanPos = iCGScanPos*uiCGSize + iScanPosinCG;
+
+        if (iScanPos > iLastScanPos) continue;
+        UInt   uiBlkPos     = codingParameters.scan[iScanPos];
+
+        if( piDstCoeff[ uiBlkPos ] )
+        {
+          UInt   uiPosY       = uiBlkPos >> uiLog2BlockWidth;
+          UInt   uiPosX       = uiBlkPos - ( uiPosY << uiLog2BlockWidth );
+
+          Double d64CostLast= codingParameters.scanType == SCAN_VER ? xGetRateLast( uiPosY, uiPosX, compID ) : xGetRateLast( uiPosX, uiPosY, compID );
+          Double totalCost = d64BaseCost + d64CostLast - pdCostSig[ iScanPos ];
+
+          if( totalCost < d64BestCost )
+          {
+            iBestLastIdxP1  = iScanPos + 1;
+            d64BestCost     = totalCost;
+          }
+          if( piDstCoeff[ uiBlkPos ] > 1 )
+          {
+            bFoundLast = true;
+            break;
+          }
+          d64BaseCost      -= pdCostCoeff[ iScanPos ];
+          d64BaseCost      += pdCostCoeff0[ iScanPos ];
+        }
+        else
+        {
+          d64BaseCost      -= pdCostSig[ iScanPos ];
+        }
+      } //end for
+      if (bFoundLast)
+      {
+        break;
+      }
+    } // end if (uiSigCoeffGroupFlag[ uiCGBlkPos ])
+  } // end for
+
+
+  for ( Int scanPos = 0; scanPos < iBestLastIdxP1; scanPos++ )
+  {
+    Int blkPos = codingParameters.scan[ scanPos ];
+    TCoeff level = piDstCoeff[ blkPos ];
+    uiAbsSum += level;
+    piDstCoeff[ blkPos ] = ( plSrcCoeff[ blkPos ] < 0 ) ? -level : level;
+  }
+
+  //===== clean uncoded coefficients =====
+  for ( Int scanPos = iBestLastIdxP1; scanPos <= iLastScanPos; scanPos++ )
+  {
+    piDstCoeff[ codingParameters.scan[ scanPos ] ] = 0;
+  }
+
+
+  if( pcCU->getSlice()->getPPS()->getSignHideFlag() && uiAbsSum>=2)
+  {
+    const Double inverseQuantScale = Double(g_invQuantScales[cQP.rem]);
+    Int64 rdFactor = (Int64)(inverseQuantScale * inverseQuantScale * (1 << (2 * cQP.per))
+                             / m_dLambda / 16 / (1 << (2 * DISTORTION_PRECISION_ADJUSTMENT(g_bitDepth[channelType] - 8)))
+                             + 0.5);
+
+    Int lastCG = -1;
+    Int absSum = 0 ;
+    Int n ;
+
+    for( Int subSet = (uiWidth*uiHeight-1) >> MLS_CG_SIZE; subSet >= 0; subSet-- )
+    {
+      Int  subPos     = subSet << MLS_CG_SIZE;
+      Int  firstNZPosInCG=uiCGSize , lastNZPosInCG=-1 ;
+      absSum = 0 ;
+
+      for(n = uiCGSize-1; n >= 0; --n )
+      {
+        if( piDstCoeff[ codingParameters.scan[ n + subPos ]] )
+        {
+          lastNZPosInCG = n;
+          break;
+        }
+      }
+
+      for(n = 0; n <uiCGSize; n++ )
+      {
+        if( piDstCoeff[ codingParameters.scan[ n + subPos ]] )
+        {
+          firstNZPosInCG = n;
+          break;
+        }
+      }
+
+      for(n = firstNZPosInCG; n <=lastNZPosInCG; n++ )
+      {
+        absSum += Int(piDstCoeff[ codingParameters.scan[ n + subPos ]]);
+      }
+
+      if(lastNZPosInCG>=0 && lastCG==-1)
+      {
+        lastCG = 1;
+      }
+
+      if( lastNZPosInCG-firstNZPosInCG>=SBH_THRESHOLD )
+      {
+        UInt signbit = (piDstCoeff[codingParameters.scan[subPos+firstNZPosInCG]]>0?0:1);
+        if( signbit!=(absSum&0x1) )  // hide but need tune
+        {
+          // calculate the cost
+          Int64 minCostInc = MAX_INT64, curCost = MAX_INT64;
+          Int minPos = -1, finalChange = 0, curChange = 0;
+
+          for( n = (lastCG==1?lastNZPosInCG:uiCGSize-1) ; n >= 0; --n )
+          {
+            UInt uiBlkPos   = codingParameters.scan[ n + subPos ];
+            if(piDstCoeff[ uiBlkPos ] != 0 )
+            {
+              Int64 costUp   = rdFactor * ( - deltaU[uiBlkPos] ) + rateIncUp[uiBlkPos];
+              Int64 costDown = rdFactor * (   deltaU[uiBlkPos] ) + rateIncDown[uiBlkPos]
+                               -   ((abs(piDstCoeff[uiBlkPos]) == 1) ? sigRateDelta[uiBlkPos] : 0);
+
+              if(lastCG==1 && lastNZPosInCG==n && abs(piDstCoeff[uiBlkPos])==1)
+              {
+                costDown -= (4<<15);
+              }
+
+              if(costUp<costDown)
+              {
+                curCost = costUp;
+                curChange =  1;
+              }
+              else
+              {
+                curChange = -1;
+                if(n==firstNZPosInCG && abs(piDstCoeff[uiBlkPos])==1)
+                {
+                  curCost = MAX_INT64;
+                }
+                else
+                {
+                  curCost = costDown;
+                }
+              }
+            }
+            else
+            {
+              curCost = rdFactor * ( - (abs(deltaU[uiBlkPos])) ) + (1<<15) + rateIncUp[uiBlkPos] + sigRateDelta[uiBlkPos] ;
+              curChange = 1 ;
+
+              if(n<firstNZPosInCG)
+              {
+                UInt thissignbit = (plSrcCoeff[uiBlkPos]>=0?0:1);
+                if(thissignbit != signbit )
+                {
+                  curCost = MAX_INT64;
+                }
+              }
+            }
+
+            if( curCost<minCostInc)
+            {
+              minCostInc = curCost;
+              finalChange = curChange;
+              minPos = uiBlkPos;
+            }
+          }
+
+          if(piDstCoeff[minPos] == entropyCodingMaximum || piDstCoeff[minPos] == entropyCodingMinimum)
+          {
+            finalChange = -1;
+          }
+
+          if(plSrcCoeff[minPos]>=0)
+          {
+            piDstCoeff[minPos] += finalChange ;
+          }
+          else
+          {
+            piDstCoeff[minPos] -= finalChange ;
+          }
+        }
+      }
+
+      if(lastCG==1)
+      {
+        lastCG=0 ;
+      }
+    }
+  }
+}
+
+
+/** Pattern decision for context derivation process of significant_coeff_flag
+ * \param sigCoeffGroupFlag pointer to prior coded significant coeff group
+ * \param uiCGPosX column of current coefficient group
+ * \param uiCGPosY row of current coefficient group
+ * \param width width of the block
+ * \param height height of the block
+ * \returns pattern for current coefficient group
+ */
+Int  TComTrQuant::calcPatternSigCtx( const UInt* sigCoeffGroupFlag, UInt uiCGPosX, UInt uiCGPosY, UInt widthInGroups, UInt heightInGroups )
+{
+  if ((widthInGroups <= 1) && (heightInGroups <= 1)) return 0;
+
+  const Bool rightAvailable = uiCGPosX < (widthInGroups  - 1);
+  const Bool belowAvailable = uiCGPosY < (heightInGroups - 1);
+
+  UInt sigRight = 0;
+  UInt sigLower = 0;
+
+  if (rightAvailable) sigRight = ((sigCoeffGroupFlag[ (uiCGPosY * widthInGroups) + uiCGPosX + 1 ] != 0) ? 1 : 0);
+  if (belowAvailable) sigLower = ((sigCoeffGroupFlag[ (uiCGPosY + 1) * widthInGroups + uiCGPosX ] != 0) ? 1 : 0);
+
+  return sigRight + (sigLower << 1);
+}
+
+
+/** Context derivation process of coeff_abs_significant_flag
+ * \param patternSigCtx pattern for current coefficient group
+ * \param codingParameters coding parmeters for the TU (includes the scan)
+ * \param scanPosition current position in scan order
+ * \param log2BlockWidth log2 width of the block
+ * \param log2BlockHeight log2 height of the block
+ * \param ChannelType channel type (CHANNEL_TYPE_LUMA/CHROMA)
+ * \returns ctxInc for current scan position
+ */
+Int TComTrQuant::getSigCtxInc    (       Int                        patternSigCtx,
+                                   const TUEntropyCodingParameters &codingParameters,
+                                   const Int                        scanPosition,
+                                   const Int                        log2BlockWidth,
+                                   const Int                        log2BlockHeight,
+                                   const ChannelType                chanType)
+{
+  if (codingParameters.firstSignificanceMapContext == significanceMapContextSetStart[chanType][CONTEXT_TYPE_SINGLE])
+  {
+    //single context mode
+    return significanceMapContextSetStart[chanType][CONTEXT_TYPE_SINGLE];
+  }
+
+  const UInt rasterPosition = codingParameters.scan[scanPosition];
+  const UInt posY           = rasterPosition >> log2BlockWidth;
+  const UInt posX           = rasterPosition - (posY << log2BlockWidth);
+
+  if ((posX + posY) == 0) return 0; //special case for the DC context variable
+
+  Int offset = MAX_INT;
+
+  if ((log2BlockWidth == 2) && (log2BlockHeight == 2)) //4x4
+  {
+    offset = ctxIndMap4x4[ (4 * posY) + posX ];
+  }
+  else
+  {
+    Int cnt = 0;
+
+    switch (patternSigCtx)
+    {
+      //------------------
+
+      case 0: //neither neighbouring group is significant
+        {
+          const Int posXinSubset     = posX & ((1 << MLS_CG_LOG2_WIDTH)  - 1);
+          const Int posYinSubset     = posY & ((1 << MLS_CG_LOG2_HEIGHT) - 1);
+          const Int posTotalInSubset = posXinSubset + posYinSubset;
+
+          //first N coefficients in scan order use 2; the next few use 1; the rest use 0.
+          const UInt context1Threshold = NEIGHBOURHOOD_00_CONTEXT_1_THRESHOLD_4x4;
+          const UInt context2Threshold = NEIGHBOURHOOD_00_CONTEXT_2_THRESHOLD_4x4;
+
+          cnt = (posTotalInSubset >= context1Threshold) ? 0 : ((posTotalInSubset >= context2Threshold) ? 1 : 2);
+        }
+        break;
+
+      //------------------
+
+      case 1: //right group is significant, below is not
+        {
+          const Int posYinSubset = posY & ((1 << MLS_CG_LOG2_HEIGHT) - 1);
+          const Int groupHeight  = 1 << MLS_CG_LOG2_HEIGHT;
+
+          cnt = (posYinSubset >= (groupHeight >> 1)) ? 0 : ((posYinSubset >= (groupHeight >> 2)) ? 1 : 2); //top quarter uses 2; second-from-top quarter uses 1; bottom half uses 0
+        }
+        break;
+
+      //------------------
+
+      case 2: //below group is significant, right is not
+        {
+          const Int posXinSubset = posX & ((1 << MLS_CG_LOG2_WIDTH)  - 1);
+          const Int groupWidth   = 1 << MLS_CG_LOG2_WIDTH;
+
+          cnt = (posXinSubset >= (groupWidth >> 1)) ? 0 : ((posXinSubset >= (groupWidth >> 2)) ? 1 : 2); //left quarter uses 2; second-from-left quarter uses 1; right half uses 0
+        }
+        break;
+
+      //------------------
+
+      case 3: //both neighbouring groups are significant
+        {
+          cnt = 2;
+        }
+        break;
+
+      //------------------
+
+      default:
+        std::cerr << "ERROR: Invalid patternSigCtx \"" << Int(patternSigCtx) << "\" in getSigCtxInc" << std::endl;
+        exit(1);
+        break;
+    }
+
+    //------------------------------------------------
+
+    const Bool notFirstGroup = ((posX >> MLS_CG_LOG2_WIDTH) + (posY >> MLS_CG_LOG2_HEIGHT)) > 0;
+
+    offset = (notFirstGroup ? notFirstGroupNeighbourhoodContextOffset[chanType] : 0) + cnt;
+  }
+
+  return codingParameters.firstSignificanceMapContext + offset;
+}
+
+
+/** Get the best level in RD sense
+ * \param rd64CodedCost reference to coded cost
+ * \param rd64CodedCost0 reference to cost when coefficient is 0
+ * \param rd64CodedCostSig reference to cost of significant coefficient
+ * \param lLevelDouble reference to unscaled quantized level
+ * \param uiMaxAbsLevel scaled quantized level
+ * \param ui16CtxNumSig current ctxInc for coeff_abs_significant_flag
+ * \param ui16CtxNumOne current ctxInc for coeff_abs_level_greater1 (1st bin of coeff_abs_level_minus1 in AVC)
+ * \param ui16CtxNumAbs current ctxInc for coeff_abs_level_greater2 (remaining bins of coeff_abs_level_minus1 in AVC)
+ * \param ui16AbsGoRice current Rice parameter for coeff_abs_level_minus3
+ * \param iQBits quantization step size
+ * \param dTemp correction factor
+ * \param bLast indicates if the coefficient is the last significant
+ * \returns best quantized transform level for given scan position
+ * This method calculates the best quantized transform level for a given scan position.
+ */
+__inline UInt TComTrQuant::xGetCodedLevel ( Double&          rd64CodedCost,
+                                            Double&          rd64CodedCost0,
+                                            Double&          rd64CodedCostSig,
+                                            Intermediate_Int lLevelDouble,
+                                            UInt             uiMaxAbsLevel,
+                                            UShort           ui16CtxNumSig,
+                                            UShort           ui16CtxNumOne,
+                                            UShort           ui16CtxNumAbs,
+                                            UShort           ui16AbsGoRice,
+                                            UInt             c1Idx,
+                                            UInt             c2Idx,
+                                            Int              iQBits,
+                                            Double           errorScale,
+                                            Bool             bLast,
+                                            Bool             useLimitedPrefixLength,
+                                            ChannelType      channelType
+                                            ) const
+{
+  Double dCurrCostSig   = 0;
+  UInt   uiBestAbsLevel = 0;
+
+  if( !bLast && uiMaxAbsLevel < 3 )
+  {
+    rd64CodedCostSig    = xGetRateSigCoef( 0, ui16CtxNumSig );
+    rd64CodedCost       = rd64CodedCost0 + rd64CodedCostSig;
+    if( uiMaxAbsLevel == 0 )
+    {
+      return uiBestAbsLevel;
+    }
+  }
+  else
+  {
+    rd64CodedCost       = MAX_DOUBLE;
+  }
+
+  if( !bLast )
+  {
+    dCurrCostSig        = xGetRateSigCoef( 1, ui16CtxNumSig );
+  }
+
+  UInt uiMinAbsLevel    = ( uiMaxAbsLevel > 1 ? uiMaxAbsLevel - 1 : 1 );
+  for( Int uiAbsLevel  = uiMaxAbsLevel; uiAbsLevel >= uiMinAbsLevel ; uiAbsLevel-- )
+  {
+    Double dErr         = Double( lLevelDouble  - ( Intermediate_Int(uiAbsLevel) << iQBits ) );
+    Double dCurrCost    = dErr * dErr * errorScale + xGetICost( xGetICRate( uiAbsLevel, ui16CtxNumOne, ui16CtxNumAbs, ui16AbsGoRice, c1Idx, c2Idx, useLimitedPrefixLength, channelType ) );
+    dCurrCost          += dCurrCostSig;
+
+    if( dCurrCost < rd64CodedCost )
+    {
+      uiBestAbsLevel    = uiAbsLevel;
+      rd64CodedCost     = dCurrCost;
+      rd64CodedCostSig  = dCurrCostSig;
+    }
+  }
+
+  return uiBestAbsLevel;
+}
+
+/** Calculates the cost for specific absolute transform level
+ * \param uiAbsLevel scaled quantized level
+ * \param ui16CtxNumOne current ctxInc for coeff_abs_level_greater1 (1st bin of coeff_abs_level_minus1 in AVC)
+ * \param ui16CtxNumAbs current ctxInc for coeff_abs_level_greater2 (remaining bins of coeff_abs_level_minus1 in AVC)
+ * \param ui16AbsGoRice Rice parameter for coeff_abs_level_minus3
+ * \returns cost of given absolute transform level
+ */
+__inline Int TComTrQuant::xGetICRate         ( UInt                            uiAbsLevel,
+                                               UShort                          ui16CtxNumOne,
+                                               UShort                          ui16CtxNumAbs,
+                                               UShort                          ui16AbsGoRice,
+                                               UInt                            c1Idx,
+                                               UInt                            c2Idx,
+                                               Bool                            useLimitedPrefixLength,
+                                               ChannelType                     channelType
+                                               ) const
+{
+  Int  iRate      = Int(xGetIEPRate()); // cost of sign bit
+  UInt baseLevel  = (c1Idx < C1FLAG_NUMBER) ? (2 + (c2Idx < C2FLAG_NUMBER)) : 1;
+
+  if ( uiAbsLevel >= baseLevel )
+  {
+    UInt symbol     = uiAbsLevel - baseLevel;
+    UInt length;
+    if (symbol < (COEF_REMAIN_BIN_REDUCTION << ui16AbsGoRice))
+    {
+      length = symbol>>ui16AbsGoRice;
+      iRate += (length+1+ui16AbsGoRice)<< 15;
+    }
+    else if (useLimitedPrefixLength)
+    {
+      const UInt maximumPrefixLength = (32 - (COEF_REMAIN_BIN_REDUCTION + g_maxTrDynamicRange[channelType]));
+
+      UInt prefixLength = 0;
+      UInt suffix       = (symbol >> ui16AbsGoRice) - COEF_REMAIN_BIN_REDUCTION;
+
+      while ((prefixLength < maximumPrefixLength) && (suffix > ((2 << prefixLength) - 2)))
+      {
+        prefixLength++;
+      }
+
+      const UInt suffixLength = (prefixLength == maximumPrefixLength) ? (g_maxTrDynamicRange[channelType] - ui16AbsGoRice) : (prefixLength + 1/*separator*/);
+
+      iRate += (COEF_REMAIN_BIN_REDUCTION + prefixLength + suffixLength + ui16AbsGoRice) << 15;
+    }
+    else
+    {
+      length = ui16AbsGoRice;
+      symbol  = symbol - ( COEF_REMAIN_BIN_REDUCTION << ui16AbsGoRice);
+      while (symbol >= (1<<length))
+      {
+        symbol -=  (1<<(length++));
+      }
+      iRate += (COEF_REMAIN_BIN_REDUCTION+length+1-ui16AbsGoRice+length)<< 15;
+    }
+
+    if (c1Idx < C1FLAG_NUMBER)
+    {
+      iRate += m_pcEstBitsSbac->m_greaterOneBits[ ui16CtxNumOne ][ 1 ];
+
+      if (c2Idx < C2FLAG_NUMBER)
+      {
+        iRate += m_pcEstBitsSbac->m_levelAbsBits[ ui16CtxNumAbs ][ 1 ];
+      }
+    }
+  }
+  else if( uiAbsLevel == 1 )
+  {
+    iRate += m_pcEstBitsSbac->m_greaterOneBits[ ui16CtxNumOne ][ 0 ];
+  }
+  else if( uiAbsLevel == 2 )
+  {
+    iRate += m_pcEstBitsSbac->m_greaterOneBits[ ui16CtxNumOne ][ 1 ];
+    iRate += m_pcEstBitsSbac->m_levelAbsBits[ ui16CtxNumAbs ][ 0 ];
+  }
+  else
+  {
+    iRate = 0;
+  }
+
+  return  iRate;
+}
+
+__inline Double TComTrQuant::xGetRateSigCoeffGroup  ( UShort                    uiSignificanceCoeffGroup,
+                                                UShort                          ui16CtxNumSig ) const
+{
+  return xGetICost( m_pcEstBitsSbac->significantCoeffGroupBits[ ui16CtxNumSig ][ uiSignificanceCoeffGroup ] );
+}
+
+/** Calculates the cost of signaling the last significant coefficient in the block
+ * \param uiPosX X coordinate of the last significant coefficient
+ * \param uiPosY Y coordinate of the last significant coefficient
+ * \returns cost of last significant coefficient
+ */
+/*
+ * \param uiWidth width of the transform unit (TU)
+*/
+__inline Double TComTrQuant::xGetRateLast   ( const UInt                      uiPosX,
+                                              const UInt                      uiPosY,
+                                              const ComponentID               component  ) const
+{
+  UInt uiCtxX   = g_uiGroupIdx[uiPosX];
+  UInt uiCtxY   = g_uiGroupIdx[uiPosY];
+
+  Double uiCost = m_pcEstBitsSbac->lastXBits[toChannelType(component)][ uiCtxX ] + m_pcEstBitsSbac->lastYBits[toChannelType(component)][ uiCtxY ];
+
+  if( uiCtxX > 3 )
+  {
+    uiCost += xGetIEPRate() * ((uiCtxX-2)>>1);
+  }
+  if( uiCtxY > 3 )
+  {
+    uiCost += xGetIEPRate() * ((uiCtxY-2)>>1);
+  }
+  return xGetICost( uiCost );
+}
+
+ /** Calculates the cost for specific absolute transform level
+ * \param uiAbsLevel scaled quantized level
+ * \param ui16CtxNumOne current ctxInc for coeff_abs_level_greater1 (1st bin of coeff_abs_level_minus1 in AVC)
+ * \param ui16CtxNumAbs current ctxInc for coeff_abs_level_greater2 (remaining bins of coeff_abs_level_minus1 in AVC)
+ * \param ui16CtxBase current global offset for coeff_abs_level_greater1 and coeff_abs_level_greater2
+ * \returns cost of given absolute transform level
+ */
+__inline Double TComTrQuant::xGetRateSigCoef  ( UShort                          uiSignificance,
+                                                UShort                          ui16CtxNumSig ) const
+{
+  return xGetICost( m_pcEstBitsSbac->significantBits[ ui16CtxNumSig ][ uiSignificance ] );
+}
+
+/** Get the cost for a specific rate
+ * \param dRate rate of a bit
+ * \returns cost at the specific rate
+ */
+__inline Double TComTrQuant::xGetICost        ( Double                          dRate         ) const
+{
+  return m_dLambda * dRate;
+}
+
+/** Get the cost of an equal probable bit
+ * \returns cost of equal probable bit
+ */
+__inline Double TComTrQuant::xGetIEPRate      (                                               ) const
+{
+  return 32768;
+}
+
+/** Context derivation process of coeff_abs_significant_flag
+ * \param uiSigCoeffGroupFlag significance map of L1
+ * \param uiBlkX column of current scan position
+ * \param uiBlkY row of current scan position
+ * \param uiLog2BlkSize log2 value of block size
+ * \returns ctxInc for current scan position
+ */
+UInt TComTrQuant::getSigCoeffGroupCtxInc  (const UInt*  uiSigCoeffGroupFlag,
+                                           const UInt   uiCGPosX,
+                                           const UInt   uiCGPosY,
+                                           const UInt   widthInGroups,
+                                           const UInt   heightInGroups)
+{
+  UInt sigRight = 0;
+  UInt sigLower = 0;
+
+  if (uiCGPosX < (widthInGroups  - 1)) sigRight = ((uiSigCoeffGroupFlag[ (uiCGPosY * widthInGroups) + uiCGPosX + 1 ] != 0) ? 1 : 0);
+  if (uiCGPosY < (heightInGroups - 1)) sigLower = ((uiSigCoeffGroupFlag[ (uiCGPosY + 1) * widthInGroups + uiCGPosX ] != 0) ? 1 : 0);
+
+  return ((sigRight + sigLower) != 0) ? 1 : 0;
+}
+
+
+/** set quantized matrix coefficient for encode
+ * \param scalingList quantaized matrix address
+ */
+Void TComTrQuant::setScalingList(TComScalingList *scalingList, const ChromaFormat format)
+{
+  const Int minimumQp = 0;
+  const Int maximumQp = SCALING_LIST_REM_NUM;
+
+  for(UInt size = 0; size < SCALING_LIST_SIZE_NUM; size++)
+  {
+    for(UInt list = 0; list < SCALING_LIST_NUM; list++)
+    {
+      for(Int qp = minimumQp; qp < maximumQp; qp++)
+      {
+        xSetScalingListEnc(scalingList,list,size,qp,format);
+        xSetScalingListDec(scalingList,list,size,qp,format);
+        setErrScaleCoeff(list,size,qp);
+      }
+    }
+  }
+}
+/** set quantized matrix coefficient for decode
+ * \param scalingList quantaized matrix address
+ */
+Void TComTrQuant::setScalingListDec(TComScalingList *scalingList, const ChromaFormat format)
+{
+  const Int minimumQp = 0;
+  const Int maximumQp = SCALING_LIST_REM_NUM;
+
+  for(UInt size = 0; size < SCALING_LIST_SIZE_NUM; size++)
+  {
+    for(UInt list = 0; list < SCALING_LIST_NUM; list++)
+    {
+      for(Int qp = minimumQp; qp < maximumQp; qp++)
+      {
+        xSetScalingListDec(scalingList,list,size,qp,format);
+      }
+    }
+  }
+}
+/** set error scale coefficients
+ * \param list List ID
+ * \param uiSize Size
+ * \param uiQP Quantization parameter
+ */
+Void TComTrQuant::setErrScaleCoeff(UInt list, UInt size, Int qp)
+{
+  const UInt uiLog2TrSize = g_aucConvertToBit[ g_scalingListSizeX[size] ] + 2;
+  const ChannelType channelType = ((list == 0) || (list == MAX_NUM_COMPONENT)) ? CHANNEL_TYPE_LUMA : CHANNEL_TYPE_CHROMA;
+
+  const Int iTransformShift = getTransformShift(channelType, uiLog2TrSize);  // Represents scaling through forward transform
+
+  UInt i,uiMaxNumCoeff = g_scalingListSize[size];
+  Int *piQuantcoeff;
+  Double *pdErrScale;
+  piQuantcoeff   = getQuantCoeff(list, qp,size);
+  pdErrScale     = getErrScaleCoeff(list, size, qp);
+
+  Double dErrScale = (Double)(1<<SCALE_BITS);                                // Compensate for scaling of bitcount in Lagrange cost function
+  dErrScale = dErrScale*pow(2.0,(-2.0*iTransformShift));                     // Compensate for scaling through forward transform
+
+  for(i=0;i<uiMaxNumCoeff;i++)
+  {
+    pdErrScale[i] =  dErrScale / piQuantcoeff[i] / piQuantcoeff[i] / (1 << DISTORTION_PRECISION_ADJUSTMENT(2 * (g_bitDepth[channelType] - 8)));
+  }
+
+  getErrScaleCoeffNoScalingList(list, size, qp) = dErrScale / g_quantScales[qp] / g_quantScales[qp] / (1 << DISTORTION_PRECISION_ADJUSTMENT(2 * (g_bitDepth[channelType] - 8)));
+}
+
+/** set quantized matrix coefficient for encode
+ * \param scalingList quantaized matrix address
+ * \param listId List index
+ * \param sizeId size index
+ * \param uiQP Quantization parameter
+ */
+Void TComTrQuant::xSetScalingListEnc(TComScalingList *scalingList, UInt listId, UInt sizeId, Int qp, const ChromaFormat format)
+{
+  UInt width  = g_scalingListSizeX[sizeId];
+  UInt height = g_scalingListSizeX[sizeId];
+  UInt ratio  = g_scalingListSizeX[sizeId]/min(MAX_MATRIX_SIZE_NUM,(Int)g_scalingListSizeX[sizeId]);
+  Int *quantcoeff;
+  Int *coeff  = scalingList->getScalingListAddress(sizeId,listId);
+  quantcoeff  = getQuantCoeff(listId, qp, sizeId);
+
+  Int quantScales = g_quantScales[qp];
+
+  processScalingListEnc(coeff,
+                        quantcoeff,
+                        (quantScales << LOG2_SCALING_LIST_NEUTRAL_VALUE),
+                        height, width, ratio,
+                        min(MAX_MATRIX_SIZE_NUM, (Int)g_scalingListSizeX[sizeId]),
+                        scalingList->getScalingListDC(sizeId,listId));
+}
+
+/** set quantized matrix coefficient for decode
+ * \param scalingList quantaized matrix address
+ * \param list List index
+ * \param size size index
+ * \param uiQP Quantization parameter
+ */
+Void TComTrQuant::xSetScalingListDec(TComScalingList *scalingList, UInt listId, UInt sizeId, Int qp, const ChromaFormat format)
+{
+  UInt width  = g_scalingListSizeX[sizeId];
+  UInt height = g_scalingListSizeX[sizeId];
+  UInt ratio  = g_scalingListSizeX[sizeId]/min(MAX_MATRIX_SIZE_NUM,(Int)g_scalingListSizeX[sizeId]);
+  Int *dequantcoeff;
+  Int *coeff  = scalingList->getScalingListAddress(sizeId,listId);
+
+  dequantcoeff = getDequantCoeff(listId, qp, sizeId);
+
+  Int invQuantScale = g_invQuantScales[qp];
+
+  processScalingListDec(coeff,
+                        dequantcoeff,
+                        invQuantScale,
+                        height, width, ratio,
+                        min(MAX_MATRIX_SIZE_NUM, (Int)g_scalingListSizeX[sizeId]),
+                        scalingList->getScalingListDC(sizeId,listId));
+}
+
+/** set flat matrix value to quantized coefficient
+ */
+Void TComTrQuant::setFlatScalingList(const ChromaFormat format)
+{
+  const Int minimumQp = 0;
+  const Int maximumQp = SCALING_LIST_REM_NUM;
+
+  for(UInt size = 0; size < SCALING_LIST_SIZE_NUM; size++)
+  {
+    for(UInt list = 0; list < SCALING_LIST_NUM; list++)
+    {
+      for(Int qp = minimumQp; qp < maximumQp; qp++)
+      {
+        xsetFlatScalingList(list,size,qp,format);
+        setErrScaleCoeff(list,size,qp);
+      }
+    }
+  }
+}
+
+/** set flat matrix value to quantized coefficient
+ * \param list List ID
+ * \param uiQP Quantization parameter
+ * \param uiSize Size
+ */
+Void TComTrQuant::xsetFlatScalingList(UInt list, UInt size, Int qp, const ChromaFormat format)
+{
+  UInt i,num = g_scalingListSize[size];
+  Int *quantcoeff;
+  Int *dequantcoeff;
+
+  Int quantScales    = g_quantScales   [qp];
+  Int invQuantScales = g_invQuantScales[qp] << 4;
+
+  quantcoeff   = getQuantCoeff(list, qp, size);
+  dequantcoeff = getDequantCoeff(list, qp, size);
+
+  for(i=0;i<num;i++)
+  {
+    *quantcoeff++ = quantScales;
+    *dequantcoeff++ = invQuantScales;
+  }
+}
+
+/** set quantized matrix coefficient for encode
+ * \param coeff quantaized matrix address
+ * \param quantcoeff quantaized matrix address
+ * \param quantScales Q(QP%6)
+ * \param height height
+ * \param width width
+ * \param ratio ratio for upscale
+ * \param sizuNum matrix size
+ * \param dc dc parameter
+ */
+Void TComTrQuant::processScalingListEnc( Int *coeff, Int *quantcoeff, Int quantScales, UInt height, UInt width, UInt ratio, Int sizuNum, UInt dc)
+{
+  for(UInt j=0;j<height;j++)
+  {
+    for(UInt i=0;i<width;i++)
+    {
+      quantcoeff[j*width + i] = quantScales / coeff[sizuNum * (j / ratio) + i / ratio];
+    }
+  }
+
+  if(ratio > 1)
+  {
+    quantcoeff[0] = quantScales / dc;
+  }
+}
+
+/** set quantized matrix coefficient for decode
+ * \param coeff quantaized matrix address
+ * \param dequantcoeff quantaized matrix address
+ * \param invQuantScales IQ(QP%6))
+ * \param height height
+ * \param width width
+ * \param ratio ratio for upscale
+ * \param sizuNum matrix size
+ * \param dc dc parameter
+ */
+Void TComTrQuant::processScalingListDec( Int *coeff, Int *dequantcoeff, Int invQuantScales, UInt height, UInt width, UInt ratio, Int sizuNum, UInt dc)
+{
+  for(UInt j=0;j<height;j++)
+  {
+    for(UInt i=0;i<width;i++)
+    {
+      dequantcoeff[j*width + i] = invQuantScales * coeff[sizuNum * (j / ratio) + i / ratio];
+    }
+  }
+
+  if(ratio > 1)
+  {
+    dequantcoeff[0] = invQuantScales * dc;
+  }
+}
+
+/** initialization process of scaling list array
+ */
+Void TComTrQuant::initScalingList()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt qp = 0; qp < SCALING_LIST_REM_NUM; qp++)
+    {
+      for(UInt listId = 0; listId < SCALING_LIST_NUM; listId++)
+      {
+        m_quantCoef   [sizeId][listId][qp] = new Int    [g_scalingListSize[sizeId]];
+        m_dequantCoef [sizeId][listId][qp] = new Int    [g_scalingListSize[sizeId]];
+        m_errScale    [sizeId][listId][qp] = new Double [g_scalingListSize[sizeId]];
+      } // listID loop
+    }
+  }
+}
+
+/** destroy quantization matrix array
+ */
+Void TComTrQuant::destroyScalingList()
+{
+  for(UInt sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    for(UInt listId = 0; listId < SCALING_LIST_NUM; listId++)
+    {
+      for(UInt qp = 0; qp < SCALING_LIST_REM_NUM; qp++)
+      {
+        if(m_quantCoef   [sizeId][listId][qp]) delete [] m_quantCoef   [sizeId][listId][qp];
+        if(m_dequantCoef [sizeId][listId][qp]) delete [] m_dequantCoef [sizeId][listId][qp];
+        if(m_errScale    [sizeId][listId][qp]) delete [] m_errScale    [sizeId][listId][qp];
+      }
+    }
+  }
+}
+
+Void TComTrQuant::transformSkipQuantOneSample(TComTU &rTu, const ComponentID compID, const Pel resiDiff, TCoeff* pcCoeff, const UInt uiPos, const QpParam &cQP, const Bool bUseHalfRoundingPoint)
+{
+        TComDataCU    *pcCU                           = rTu.getCU();
+  const UInt           uiAbsPartIdx                   = rTu.GetAbsPartIdxTU();
+  const TComRectangle &rect                           = rTu.getRect(compID);
+  const UInt           uiWidth                        = rect.width;
+  const UInt           uiHeight                       = rect.height;
+  const Int            iTransformShift                = getTransformShift(toChannelType(compID), rTu.GetEquivalentLog2TrSize(compID));
+  const Int            scalingListType                = getScalingListType(pcCU->getPredictionMode(uiAbsPartIdx), compID);
+  const Bool           enableScalingLists             = getUseScalingList(uiWidth, uiHeight, true);
+  const Int            defaultQuantisationCoefficient = g_quantScales[cQP.rem];
+
+  assert( scalingListType < SCALING_LIST_NUM );
+  const Int *const piQuantCoeff = getQuantCoeff( scalingListType, cQP.rem, (rTu.GetEquivalentLog2TrSize(compID)-2) );
+
+
+  /* for 422 chroma blocks, the effective scaling applied during transformation is not a power of 2, hence it cannot be
+  * implemented as a bit-shift (the quantised result will be sqrt(2) * larger than required). Alternatively, adjust the
+  * uiLog2TrSize applied in iTransformShift, such that the result is 1/sqrt(2) the required result (i.e. smaller)
+  * Then a QP+3 (sqrt(2)) or QP-3 (1/sqrt(2)) method could be used to get the required result
+  */
+
+  const Int iQBits = QUANT_SHIFT + cQP.per + iTransformShift;
+  // QBits will be OK for any internal bit depth as the reduction in transform shift is balanced by an increase in Qp_per due to QpBDOffset
+
+  const Int iAdd = ( bUseHalfRoundingPoint ? 256 : (pcCU->getSlice()->getSliceType() == I_SLICE ? 171 : 85) ) << (iQBits - 9);
+
+  TCoeff transformedCoefficient;
+
+  // transform-skip
+  if (iTransformShift >= 0)
+  {
+    transformedCoefficient = resiDiff << iTransformShift;
+  }
+  else // for very high bit depths
+  {
+    const Int iTrShiftNeg  = -iTransformShift;
+    const Int offset       = 1 << (iTrShiftNeg - 1);
+    transformedCoefficient = ( resiDiff + offset ) >> iTrShiftNeg;
+  }
+
+  // quantization
+  const TCoeff iSign = (transformedCoefficient < 0 ? -1: 1);
+
+  const Int quantisationCoefficient = enableScalingLists ? piQuantCoeff[uiPos] : defaultQuantisationCoefficient;
+
+  const Int64 tmpLevel = (Int64)abs(transformedCoefficient) * quantisationCoefficient;
+
+  const TCoeff quantisedCoefficient = (TCoeff((tmpLevel + iAdd ) >> iQBits)) * iSign;
+
+  const TCoeff entropyCodingMinimum = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+  const TCoeff entropyCodingMaximum =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+  pcCoeff[ uiPos ] = Clip3<TCoeff>( entropyCodingMinimum, entropyCodingMaximum, quantisedCoefficient );
+}
+
+
+Void TComTrQuant::invTrSkipDeQuantOneSample( TComTU &rTu, ComponentID compID, TCoeff inSample, Pel &reconSample, const QpParam &cQP, UInt uiPos )
+{
+        TComDataCU    *pcCU               = rTu.getCU();
+  const UInt           uiAbsPartIdx       = rTu.GetAbsPartIdxTU();
+  const TComRectangle &rect               = rTu.getRect(compID);
+  const UInt           uiWidth            = rect.width;
+  const UInt           uiHeight           = rect.height;
+  const Int            QP_per             = cQP.per;
+  const Int            QP_rem             = cQP.rem;
+  const Int            iTransformShift    = getTransformShift(toChannelType(compID), rTu.GetEquivalentLog2TrSize(compID));
+  const Int            scalingListType    = getScalingListType(pcCU->getPredictionMode(uiAbsPartIdx), compID);
+  const Bool           enableScalingLists = getUseScalingList(uiWidth, uiHeight, true);
+  const UInt           uiLog2TrSize       = rTu.GetEquivalentLog2TrSize(compID);
+
+  assert( scalingListType < SCALING_LIST_NUM );
+
+  const Int rightShift = (IQUANT_SHIFT - (iTransformShift + QP_per)) + (enableScalingLists ? LOG2_SCALING_LIST_NEUTRAL_VALUE : 0);
+
+  const TCoeff transformMinimum = -(1 << g_maxTrDynamicRange[toChannelType(compID)]);
+  const TCoeff transformMaximum =  (1 << g_maxTrDynamicRange[toChannelType(compID)]) - 1;
+
+  // Dequantisation
+
+  TCoeff dequantisedSample;
+
+  if(enableScalingLists)
+  {
+    const UInt             dequantCoefBits     = 1 + IQUANT_SHIFT + SCALING_LIST_BITS;
+    const UInt             targetInputBitDepth = std::min<UInt>((g_maxTrDynamicRange[toChannelType(compID)] + 1), (((sizeof(Intermediate_Int) * 8) + rightShift) - dequantCoefBits));
+
+    const Intermediate_Int inputMinimum        = -(1 << (targetInputBitDepth - 1));
+    const Intermediate_Int inputMaximum        =  (1 << (targetInputBitDepth - 1)) - 1;
+
+    Int *piDequantCoef = getDequantCoeff(scalingListType,QP_rem,uiLog2TrSize-2);
+
+    if(rightShift > 0)
+    {
+      const Intermediate_Int iAdd      = 1 << (rightShift - 1);
+      const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, inSample));
+      const Intermediate_Int iCoeffQ   = ((Intermediate_Int(clipQCoef) * piDequantCoef[uiPos]) + iAdd ) >> rightShift;
+
+      dequantisedSample = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+    }
+    else
+    {
+      const Int              leftShift = -rightShift;
+      const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, inSample));
+      const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * piDequantCoef[uiPos]) << leftShift;
+
+      dequantisedSample = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+    }
+  }
+  else
+  {
+    const Int scale     =  g_invQuantScales[QP_rem];
+    const Int scaleBits =     (IQUANT_SHIFT + 1)   ;
+
+    const UInt             targetInputBitDepth = std::min<UInt>((g_maxTrDynamicRange[toChannelType(compID)] + 1), (((sizeof(Intermediate_Int) * 8) + rightShift) - scaleBits));
+    const Intermediate_Int inputMinimum        = -(1 << (targetInputBitDepth - 1));
+    const Intermediate_Int inputMaximum        =  (1 << (targetInputBitDepth - 1)) - 1;
+
+    if (rightShift > 0)
+    {
+      const Intermediate_Int iAdd      = 1 << (rightShift - 1);
+      const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, inSample));
+      const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * scale + iAdd) >> rightShift;
+
+      dequantisedSample = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+    }
+    else
+    {
+      const Int              leftShift = -rightShift;
+      const TCoeff           clipQCoef = TCoeff(Clip3<Intermediate_Int>(inputMinimum, inputMaximum, inSample));
+      const Intermediate_Int iCoeffQ   = (Intermediate_Int(clipQCoef) * scale) << leftShift;
+
+      dequantisedSample = TCoeff(Clip3<Intermediate_Int>(transformMinimum,transformMaximum,iCoeffQ));
+    }
+  }
+
+  // Inverse transform-skip
+
+  if (iTransformShift >= 0)
+  {
+    const TCoeff offset = iTransformShift==0 ? 0 : (1 << (iTransformShift - 1));
+    reconSample =  Pel(( dequantisedSample + offset ) >> iTransformShift);
+  }
+  else //for very high bit depths
+  {
+    const Int iTrShiftNeg = -iTransformShift;
+    reconSample = Pel(dequantisedSample << iTrShiftNeg);
+  }
+}
+
+
+Void TComTrQuant::crossComponentPrediction(       TComTU      & rTu,
+                                            const ComponentID   compID,
+                                            const Pel         * piResiL,
+                                            const Pel         * piResiC,
+                                                  Pel         * piResiT,
+                                            const Int           width,
+                                            const Int           height,
+                                            const Int           strideL,
+                                            const Int           strideC,
+                                            const Int           strideT,
+                                            const Bool          reverse )
+{
+  const Pel *pResiL = piResiL;
+  const Pel *pResiC = piResiC;
+        Pel *pResiT = piResiT;
+
+  TComDataCU *pCU = rTu.getCU();
+  const Char alpha = pCU->getCrossComponentPredictionAlpha( rTu.GetAbsPartIdxTU( compID ), compID );
+  const Int diffBitDepth = pCU->getSlice()->getSPS()->getDifferentialLumaChromaBitDepth();
+
+  for( Int y = 0; y < height; y++ )
+  {
+    if (reverse)
+    {
+      for( Int x = 0; x < width; x++ )
+      {
+        pResiT[x] = pResiC[x] + (( alpha * rightShift( pResiL[x], diffBitDepth) ) >> 3);
+      }
+    }
+    else
+    {
+      for( Int x = 0; x < width; x++ )
+      {
+        pResiT[x] = pResiC[x] - (( alpha * rightShift(pResiL[x], diffBitDepth) ) >> 3);
+      }
+    }
+
+    pResiL += strideL;
+    pResiC += strideC;
+    pResiT += strideT;
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComTrQuant.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,328 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComTrQuant.h
+    \brief    transform and quantization class (header)
+*/
+
+#ifndef __TCOMTRQUANT__
+#define __TCOMTRQUANT__
+
+#include "CommonDef.h"
+#include "TComYuv.h"
+#include "TComDataCU.h"
+#include "TComChromaFormat.h"
+#include "ContextTables.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Constants
+// ====================================================================================================================
+
+#define QP_BITS                 15
+
+// ====================================================================================================================
+// Type definition
+// ====================================================================================================================
+
+typedef struct
+{
+  Int significantCoeffGroupBits[NUM_SIG_CG_FLAG_CTX][2 /*Flag = [0|1]*/];
+  Int significantBits[NUM_SIG_FLAG_CTX][2 /*Flag = [0|1]*/];
+  Int lastXBits[MAX_NUM_CHANNEL_TYPE][LAST_SIGNIFICANT_GROUPS];
+  Int lastYBits[MAX_NUM_CHANNEL_TYPE][LAST_SIGNIFICANT_GROUPS];
+  Int m_greaterOneBits[NUM_ONE_FLAG_CTX][2 /*Flag = [0|1]*/];
+  Int m_levelAbsBits[NUM_ABS_FLAG_CTX][2 /*Flag = [0|1]*/];
+
+  Int blockCbpBits[NUM_QT_CBF_CTX_SETS * NUM_QT_CBF_CTX_PER_SET][2 /*Flag = [0|1]*/];
+  Int blockRootCbpBits[4][2 /*Flag = [0|1]*/];
+
+  Int golombRiceAdaptationStatistics[RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS];
+} estBitsSbacStruct;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// QP struct
+struct QpParam
+{
+  Int Qp;
+  Int per;
+  Int rem;
+
+  QpParam(const Int           qpy,
+          const ChannelType   chType,
+          const Int           qpBdOffset,
+          const Int           chromaQPOffset,
+          const ChromaFormat  chFmt );
+
+  QpParam(const TComDataCU   &cu, const ComponentID compID);
+
+}; // END STRUCT DEFINITION QpParam
+
+
+/// transform and quantization class
+class TComTrQuant
+{
+public:
+  TComTrQuant();
+  ~TComTrQuant();
+
+  // initialize class
+  Void init                 ( UInt  uiMaxTrSize,
+                              Bool useRDOQ                = false,
+                              Bool useRDOQTS              = false,
+                              Bool bEnc                   = false,
+                              Bool useTransformSkipFast   = false
+#if ADAPTIVE_QP_SELECTION
+                            , Bool bUseAdaptQpSelect      = false
+#endif
+                              );
+
+  // transform & inverse transform functions
+  Void transformNxN(       TComTU         & rTu,
+                     const ComponentID      compID,
+                           Pel           *  pcResidual,
+                     const UInt             uiStride,
+                           TCoeff        *  rpcCoeff,
+#if ADAPTIVE_QP_SELECTION
+                           TCoeff        * rpcArlCoeff,
+#endif
+                           TCoeff         & uiAbsSum,
+                     const QpParam        & cQP
+                    );
+
+
+  Void invTransformNxN(      TComTU       & rTu,
+                       const ComponentID    compID,
+                             Pel         *pcResidual,
+                       const UInt           uiStride,
+                             TCoeff      *  pcCoeff,
+                       const QpParam      & cQP
+                             DEBUG_STRING_FN_DECLAREP(psDebug));
+
+  Void invRecurTransformNxN ( const ComponentID compID, TComYuv *pResidual, TComTU &rTu );
+
+  Void rdpcmNxN   ( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride, const QpParam& cQP, TCoeff* pcCoeff, TCoeff &uiAbsSum, RDPCMMode& rdpcmMode );
+  Void invRdpcmNxN( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride );
+
+  Void applyForwardRDPCM( TComTU& rTu, const ComponentID compID, Pel* pcResidual, const UInt uiStride, const QpParam& cQP, TCoeff* pcCoeff, TCoeff &uiAbsSum, const RDPCMMode mode );
+
+  // Misc functions
+
+#if RDOQ_CHROMA_LAMBDA
+  Void setLambdas(const Double lambdas[MAX_NUM_COMPONENT]) { for (UInt component = 0; component < MAX_NUM_COMPONENT; component++) m_lambdas[component] = lambdas[component]; }
+  Void selectLambda(const ComponentID compIdx) { m_dLambda = m_lambdas[compIdx]; }
+#else
+  Void setLambda(Double dLambda) { m_dLambda = dLambda;}
+#endif
+  Void setRDOQOffset( UInt uiRDOQOffset ) { m_uiRDOQOffset = uiRDOQOffset; }
+
+  estBitsSbacStruct* m_pcEstBitsSbac;
+
+  static Int      calcPatternSigCtx( const UInt* sigCoeffGroupFlag, UInt uiCGPosX, UInt uiCGPosY, UInt widthInGroups, UInt heightInGroups );
+
+  static Int      getSigCtxInc     ( Int                              patternSigCtx,
+                                     const TUEntropyCodingParameters &codingParameters,
+                                     const Int                        scanPosition,
+                                     const Int                        log2BlockWidth,
+                                     const Int                        log2BlockHeight,
+                                     const ChannelType                chanType
+                                    );
+
+  static UInt getSigCoeffGroupCtxInc  (const UInt*  uiSigCoeffGroupFlag,
+                                       const UInt   uiCGPosX,
+                                       const UInt   uiCGPosY,
+                                       const UInt   widthInGroups,
+                                       const UInt   heightInGroups);
+
+  Void initScalingList                      ();
+  Void destroyScalingList                   ();
+  Void setErrScaleCoeff    ( UInt list, UInt size, Int qp );
+  Double* getErrScaleCoeff              ( UInt list, UInt size, Int qp ) { return m_errScale             [size][list][qp]; };  //!< get Error Scale Coefficent
+  Double& getErrScaleCoeffNoScalingList ( UInt list, UInt size, Int qp ) { return m_errScaleNoScalingList[size][list][qp]; };  //!< get Error Scale Coefficent
+  Int* getQuantCoeff                    ( UInt list, Int qp, UInt size ) { return m_quantCoef            [size][list][qp]; };  //!< get Quant Coefficent
+  Int* getDequantCoeff                  ( UInt list, Int qp, UInt size ) { return m_dequantCoef          [size][list][qp]; };  //!< get DeQuant Coefficent
+  Void setUseScalingList   ( Bool bUseScalingList){ m_scalingListEnabledFlag = bUseScalingList; };
+  Bool getUseScalingList   (const UInt width, const UInt height, const Bool isTransformSkip){ return m_scalingListEnabledFlag && (!isTransformSkip || ((width == 4) && (height == 4))); };
+  Void setFlatScalingList  (const ChromaFormat format);
+  Void xsetFlatScalingList ( UInt list, UInt size, Int qp, const ChromaFormat format);
+  Void xSetScalingListEnc  ( TComScalingList *scalingList, UInt list, UInt size, Int qp, const ChromaFormat format);
+  Void xSetScalingListDec  ( TComScalingList *scalingList, UInt list, UInt size, Int qp, const ChromaFormat format);
+  Void setScalingList      ( TComScalingList *scalingList, const ChromaFormat format);
+  Void setScalingListDec   ( TComScalingList *scalingList, const ChromaFormat format);
+  Void processScalingListEnc( Int *coeff, Int *quantcoeff, Int quantScales, UInt height, UInt width, UInt ratio, Int sizuNum, UInt dc);
+  Void processScalingListDec( Int *coeff, Int *dequantcoeff, Int invQuantScales, UInt height, UInt width, UInt ratio, Int sizuNum, UInt dc);
+#if ADAPTIVE_QP_SELECTION
+  Void    initSliceQpDelta() ;
+  Void    storeSliceQpNext(TComSlice* pcSlice);
+  Void    clearSliceARLCnt();
+  Int     getQpDelta(Int qp) { return m_qpDelta[qp]; }
+  Int*    getSliceNSamples(){ return m_sliceNsamples ;}
+  Double* getSliceSumC()    { return m_sliceSumC; }
+#endif
+  Void transformSkipQuantOneSample(TComTU &rTu, const ComponentID compID, const Pel resiDiff, TCoeff* pcCoeff, const UInt uiPos, const QpParam &cQP, const Bool bUseHalfRoundingPoint);
+  Void invTrSkipDeQuantOneSample(TComTU &rTu, ComponentID compID, TCoeff pcCoeff, Pel &reconSample, const QpParam &cQP, UInt uiPos );
+
+protected:
+#if ADAPTIVE_QP_SELECTION
+  Int     m_qpDelta[MAX_QP+1];
+  Int     m_sliceNsamples[LEVEL_RANGE+1];
+  Double  m_sliceSumC[LEVEL_RANGE+1] ;
+#endif
+  TCoeff* m_plTempCoeff;
+
+//  QpParam  m_cQP; - removed - placed on the stack.
+#if RDOQ_CHROMA_LAMBDA
+  Double   m_lambdas[MAX_NUM_COMPONENT];
+#endif
+  Double   m_dLambda;
+  UInt     m_uiRDOQOffset;
+  UInt     m_uiMaxTrSize;
+  Bool     m_bEnc;
+  Bool     m_useRDOQ;
+  Bool     m_useRDOQTS;
+#if ADAPTIVE_QP_SELECTION
+  Bool     m_bUseAdaptQpSelect;
+#endif
+  Bool     m_useTransformSkipFast;
+
+  Bool     m_scalingListEnabledFlag;
+
+  Int      *m_quantCoef            [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM][SCALING_LIST_REM_NUM]; ///< array of quantization matrix coefficient 4x4
+  Int      *m_dequantCoef          [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM][SCALING_LIST_REM_NUM]; ///< array of dequantization matrix coefficient 4x4
+  Double   *m_errScale             [SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM][SCALING_LIST_REM_NUM]; ///< array of quantization matrix coefficient 4x4
+  Double    m_errScaleNoScalingList[SCALING_LIST_SIZE_NUM][SCALING_LIST_NUM][SCALING_LIST_REM_NUM]; ///< array of quantization matrix coefficient 4x4
+
+private:
+  // forward Transform
+  Void xT   ( const ComponentID compID, Bool useDST, Pel* piBlkResi, UInt uiStride, TCoeff* psCoeff, Int iWidth, Int iHeight );
+
+  // skipping Transform
+  Void xTransformSkip ( Pel* piBlkResi, UInt uiStride, TCoeff* psCoeff, TComTU &rTu, const ComponentID component );
+
+  Void signBitHidingHDQ( const ComponentID compID, TCoeff* pQCoef, TCoeff* pCoef, TCoeff* deltaU, const TUEntropyCodingParameters &codingParameters );
+
+  // quantization
+  Void xQuant(       TComTU       &rTu,
+                     TCoeff      * pSrc,
+                     TCoeff      * pDes,
+#if ADAPTIVE_QP_SELECTION
+                     TCoeff      *pArlDes,
+#endif
+                     TCoeff       &uiAbsSum,
+               const ComponentID   compID,
+               const QpParam      &cQP );
+
+  // RDOQ functions
+
+  Void           xRateDistOptQuant (       TComTU       &rTu,
+                                           TCoeff      * plSrcCoeff,
+                                           TCoeff      * piDstCoeff,
+#if ADAPTIVE_QP_SELECTION
+                                           TCoeff      *piArlDstCoeff,
+#endif
+                                           TCoeff       &uiAbsSum,
+                                     const ComponentID   compID,
+                                     const QpParam      &cQP );
+
+__inline UInt              xGetCodedLevel  ( Double&          rd64CodedCost,
+                                             Double&          rd64CodedCost0,
+                                             Double&          rd64CodedCostSig,
+                                             Intermediate_Int lLevelDouble,
+                                             UInt             uiMaxAbsLevel,
+                                             UShort           ui16CtxNumSig,
+                                             UShort           ui16CtxNumOne,
+                                             UShort           ui16CtxNumAbs,
+                                             UShort           ui16AbsGoRice,
+                                             UInt             c1Idx,
+                                             UInt             c2Idx,
+                                             Int              iQBits,
+                                             Double           errorScale,
+                                             Bool             bLast,
+                                             Bool             useLimitedPrefixLength,
+                                             ChannelType      channelType
+                                             ) const;
+
+
+  __inline Int xGetICRate  ( UInt   uiAbsLevel,
+                             UShort ui16CtxNumOne,
+                             UShort ui16CtxNumAbs,
+                             UShort ui16AbsGoRice,
+                             UInt   c1Idx,
+                             UInt   c2Idx,
+                             Bool   useLimitedPrefixLength,
+                             ChannelType channelType
+                           ) const;
+
+  __inline Double xGetRateLast         ( const UInt uiPosX, const UInt uiPosY, const ComponentID component ) const;
+  __inline Double xGetRateSigCoeffGroup( UShort uiSignificanceCoeffGroup, UShort ui16CtxNumSig             ) const;
+  __inline Double xGetRateSigCoef      ( UShort uiSignificance,           UShort ui16CtxNumSig             ) const;
+  __inline Double xGetICost            ( Double dRate                                                      ) const;
+  __inline Double xGetIEPRate          (                                                                   ) const;
+
+
+  // dequantization
+  Void xDeQuant(       TComTU       &rTu,
+                 const TCoeff      * pSrc,
+                       TCoeff      * pDes,
+                 const ComponentID   compID,
+                 const QpParam      &cQP );
+
+  // inverse transform
+  Void xIT    ( const ComponentID compID, Bool useDST, TCoeff* plCoef, Pel* pResidual, UInt uiStride, Int iWidth, Int iHeight );
+
+  // inverse skipping transform
+  Void xITransformSkip ( TCoeff* plCoef, Pel* pResidual, UInt uiStride, TComTU &rTu, const ComponentID component );
+
+public:
+  static Void crossComponentPrediction(      TComTU      &rTu,
+                                       const ComponentID  compID,
+                                       const Pel         *piResiL,
+                                       const Pel         *piResiC,
+                                             Pel         *piResiT,
+                                       const Int          width,
+                                       const Int          height,
+                                       const Int          strideL,
+                                       const Int          strideC,
+                                       const Int          strideT,
+                                       const Bool         reverse);
+
+};// END CLASS DEFINITION TComTrQuant
+
+//! \}
+
+#endif // __TCOMTRQUANT__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComWeightPrediction.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,376 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComWeightPrediction.h
+    \brief    weighting prediction class (header)
+*/
+
+// Include files
+#include "TComSlice.h"
+#include "TComWeightPrediction.h"
+#include "TComInterpolationFilter.h"
+
+
+static inline Pel weightBidir( Int w0, Pel P0, Int w1, Pel P1, Int round, Int shift, Int offset, Int clipBD)
+{
+  return ClipBD( ( (w0*(P0 + IF_INTERNAL_OFFS) + w1*(P1 + IF_INTERNAL_OFFS) + round + (offset << (shift-1))) >> shift ), clipBD );
+}
+
+
+static inline Pel weightUnidir( Int w0, Pel P0, Int round, Int shift, Int offset, Int clipBD)
+{
+  return ClipBD( ( (w0*(P0 + IF_INTERNAL_OFFS) + round) >> shift ) + offset, clipBD );
+}
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+TComWeightPrediction::TComWeightPrediction()
+{
+}
+
+
+/** weighted averaging for bi-pred
+ * \param TComYuv* pcYuvSrc0
+ * \param TComYuv* pcYuvSrc1
+ * \param iPartUnitIdx
+ * \param iWidth
+ * \param iHeight
+ * \param WPScalingParam *wp0
+ * \param WPScalingParam *wp1
+ * \param TComYuv* rpcYuvDst
+ * \returns Void
+ */
+Void TComWeightPrediction::addWeightBi( const TComYuv              *pcYuvSrc0,
+                                        const TComYuv              *pcYuvSrc1,
+                                        const UInt                  iPartUnitIdx,
+                                        const UInt                  uiWidth,
+                                        const UInt                  uiHeight,
+                                        const WPScalingParam *const wp0,
+                                        const WPScalingParam *const wp1,
+                                              TComYuv        *const rpcYuvDst,
+                                        const Bool                  bRoundLuma )
+{
+
+  const Bool enableRounding[MAX_NUM_COMPONENT]={ bRoundLuma, true, true };
+
+  const UInt numValidComponent = pcYuvSrc0->getNumberValidComponents();
+
+  for(Int componentIndex=0; componentIndex<numValidComponent; componentIndex++)
+  {
+    const ComponentID compID=ComponentID(componentIndex);
+
+    const Pel* pSrc0       = pcYuvSrc0->getAddr( compID,  iPartUnitIdx );
+    const Pel* pSrc1       = pcYuvSrc1->getAddr( compID,  iPartUnitIdx );
+          Pel* pDst        = rpcYuvDst->getAddr( compID,  iPartUnitIdx );
+
+    // Luma : --------------------------------------------
+    const Int  w0          = wp0[compID].w;
+    const Int  offset      = wp0[compID].offset;
+    const Int  clipBD      = g_bitDepth[toChannelType(compID)];
+    const Int  shiftNum    = std::max<Int>(2, (IF_INTERNAL_PREC - clipBD));
+    const Int  shift       = wp0[compID].shift + shiftNum;
+    const Int  round       = (enableRounding[compID] && (shift > 0)) ? (1<<(shift-1)) : 0;
+    const Int  w1          = wp1[compID].w;
+    const UInt csx         = pcYuvSrc0->getComponentScaleX(compID);
+    const UInt csy         = pcYuvSrc0->getComponentScaleY(compID);
+    const Int  iHeight     = uiHeight>>csy;
+    const Int  iWidth      = uiWidth>>csx;
+
+    const UInt iSrc0Stride = pcYuvSrc0->getStride(compID);
+    const UInt iSrc1Stride = pcYuvSrc1->getStride(compID);
+    const UInt iDstStride  = rpcYuvDst->getStride(compID);
+
+    for ( Int y = iHeight-1; y >= 0; y-- )
+    {
+      // do it in batches of 4 (partial unroll)
+      Int x = iWidth-1;
+      for ( ; x >= 3; )
+      {
+        pDst[x] = weightBidir(w0,pSrc0[x], w1,pSrc1[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightBidir(w0,pSrc0[x], w1,pSrc1[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightBidir(w0,pSrc0[x], w1,pSrc1[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightBidir(w0,pSrc0[x], w1,pSrc1[x], round, shift, offset, clipBD); x--;
+      }
+      for( ; x >= 0; x-- )
+      {
+        pDst[x] = weightBidir(w0,pSrc0[x], w1,pSrc1[x], round, shift, offset, clipBD);
+      }
+
+      pSrc0 += iSrc0Stride;
+      pSrc1 += iSrc1Stride;
+      pDst  += iDstStride;
+    } // y loop
+  } // compID loop
+}
+
+
+/** weighted averaging for uni-pred
+ * \param TComYuv* pcYuvSrc0
+ * \param iPartUnitIdx
+ * \param iWidth
+ * \param iHeight
+ * \param WPScalingParam *wp0
+ * \param TComYuv* rpcYuvDst
+ * \returns Void
+ */
+Void TComWeightPrediction::addWeightUni( const TComYuv        *const pcYuvSrc0,
+                                         const UInt                  iPartUnitIdx,
+                                         const UInt                  uiWidth,
+                                         const UInt                  uiHeight,
+                                         const WPScalingParam *const wp0,
+                                               TComYuv        *const pcYuvDst )
+{
+  const UInt numValidComponent = pcYuvSrc0->getNumberValidComponents();
+
+  for(Int componentIndex=0; componentIndex<numValidComponent; componentIndex++)
+  {
+    const ComponentID compID=ComponentID(componentIndex);
+
+    const Pel* pSrc0       = pcYuvSrc0->getAddr( compID,  iPartUnitIdx );
+          Pel* pDst        = pcYuvDst->getAddr( compID,  iPartUnitIdx );
+
+    // Luma : --------------------------------------------
+    const Int  w0          = wp0[compID].w;
+    const Int  offset      = wp0[compID].offset;
+    const Int  clipBD      = g_bitDepth[toChannelType(compID)];
+    const Int  shiftNum    = std::max<Int>(2, (IF_INTERNAL_PREC - clipBD));
+    const Int  shift       = wp0[compID].shift + shiftNum;
+    const Int  round       = (shift > 0) ? (1<<(shift-1)) : 0;
+    const UInt iSrc0Stride = pcYuvSrc0->getStride(compID);
+    const UInt iDstStride  = pcYuvDst->getStride(compID);
+    const UInt csx         = pcYuvSrc0->getComponentScaleX(compID);
+    const UInt csy         = pcYuvSrc0->getComponentScaleY(compID);
+    const Int  iHeight     = uiHeight>>csy;
+    const Int  iWidth      = uiWidth>>csx;
+
+    for (Int y = iHeight-1; y >= 0; y-- )
+    {
+      Int x = iWidth-1;
+      for ( ; x >= 3; )
+      {
+        pDst[x] = weightUnidir(w0, pSrc0[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightUnidir(w0, pSrc0[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightUnidir(w0, pSrc0[x], round, shift, offset, clipBD); x--;
+        pDst[x] = weightUnidir(w0, pSrc0[x], round, shift, offset, clipBD); x--;
+      }
+      for( ; x >= 0; x--)
+      {
+        pDst[x] = weightUnidir(w0, pSrc0[x], round, shift, offset, clipBD);
+      }
+      pSrc0 += iSrc0Stride;
+      pDst  += iDstStride;
+    }
+  }
+}
+
+
+//=======================================================
+//  getWpScaling()
+//=======================================================
+/** derivation of wp tables
+ * \param TComDataCU* pcCU
+ * \param iRefIdx0
+ * \param iRefIdx1
+ * \param WPScalingParam *&wp0
+ * \param WPScalingParam *&wp1
+ * \param ibdi
+ * \returns Void
+ */
+Void TComWeightPrediction::getWpScaling(       TComDataCU *const pcCU,
+                                         const Int               iRefIdx0,
+                                         const Int               iRefIdx1,
+                                               WPScalingParam  *&wp0,
+                                               WPScalingParam  *&wp1)
+{
+  assert(iRefIdx0 >= 0 || iRefIdx1 >= 0);
+
+        TComSlice *const pcSlice  = pcCU->getSlice();
+  const Bool             wpBiPred = pcCU->getSlice()->getPPS()->getWPBiPred();
+  const Bool             bBiDir   = (iRefIdx0>=0 && iRefIdx1>=0);
+  const Bool             bUniDir  = !bBiDir;
+
+  if ( bUniDir || wpBiPred )
+  { // explicit --------------------
+    if ( iRefIdx0 >= 0 )
+    {
+      pcSlice->getWpScaling(REF_PIC_LIST_0, iRefIdx0, wp0);
+    }
+    if ( iRefIdx1 >= 0 )
+    {
+      pcSlice->getWpScaling(REF_PIC_LIST_1, iRefIdx1, wp1);
+    }
+  }
+  else
+  {
+    assert(0);
+  }
+
+  if ( iRefIdx0 < 0 )
+  {
+    wp0 = NULL;
+  }
+  if ( iRefIdx1 < 0 )
+  {
+    wp1 = NULL;
+  }
+
+  const UInt numValidComponent                    = pcCU->getPic()->getNumberValidComponents();
+  const Bool bUseHighPrecisionPredictionWeighting = pcSlice->getSPS()->getUseHighPrecisionPredictionWeighting();
+
+  if ( bBiDir )
+  { // Bi-Dir case
+    for ( Int yuv=0 ; yuv<numValidComponent ; yuv++ )
+    {
+      const Int bitDepth            = g_bitDepth[toChannelType(ComponentID(yuv))];
+      const Int offsetScalingFactor = bUseHighPrecisionPredictionWeighting ? 1 : (1 << (bitDepth-8));
+
+      wp0[yuv].w      = wp0[yuv].iWeight;
+      wp1[yuv].w      = wp1[yuv].iWeight;
+      wp0[yuv].o      = wp0[yuv].iOffset * offsetScalingFactor;
+      wp1[yuv].o      = wp1[yuv].iOffset * offsetScalingFactor;
+      wp0[yuv].offset = wp0[yuv].o + wp1[yuv].o;
+      wp0[yuv].shift  = wp0[yuv].uiLog2WeightDenom + 1;
+      wp0[yuv].round  = (1 << wp0[yuv].uiLog2WeightDenom);
+      wp1[yuv].offset = wp0[yuv].offset;
+      wp1[yuv].shift  = wp0[yuv].shift;
+      wp1[yuv].round  = wp0[yuv].round;
+    }
+  }
+  else
+  {  // Unidir
+    WPScalingParam *const pwp = (iRefIdx0>=0) ? wp0 : wp1 ;
+
+    for ( Int yuv=0 ; yuv<numValidComponent ; yuv++ )
+    {
+      const Int bitDepth            = g_bitDepth[toChannelType(ComponentID(yuv))];
+      const Int offsetScalingFactor = bUseHighPrecisionPredictionWeighting ? 1 : (1 << (bitDepth-8));
+
+      pwp[yuv].w      = pwp[yuv].iWeight;
+      pwp[yuv].offset = pwp[yuv].iOffset * offsetScalingFactor;
+      pwp[yuv].shift  = pwp[yuv].uiLog2WeightDenom;
+      pwp[yuv].round  = (pwp[yuv].uiLog2WeightDenom>=1) ? (1 << (pwp[yuv].uiLog2WeightDenom-1)) : (0);
+    }
+  }
+}
+
+
+/** weighted prediction for bi-pred
+ * \param TComDataCU* pcCU
+ * \param TComYuv* pcYuvSrc0
+ * \param TComYuv* pcYuvSrc1
+ * \param iRefIdx0
+ * \param iRefIdx1
+ * \param uiPartIdx
+ * \param iWidth
+ * \param iHeight
+ * \param TComYuv* rpcYuvDst
+ * \returns Void
+ */
+Void TComWeightPrediction::xWeightedPredictionBi(       TComDataCU *const pcCU,
+                                                  const TComYuv    *const pcYuvSrc0,
+                                                  const TComYuv    *const pcYuvSrc1,
+                                                  const Int               iRefIdx0,
+                                                  const Int               iRefIdx1,
+                                                  const UInt              uiPartIdx,
+                                                  const Int               iWidth,
+                                                  const Int               iHeight,
+                                                        TComYuv          *rpcYuvDst )
+{
+  WPScalingParam  *pwp0;
+  WPScalingParam  *pwp1;
+
+  assert(pcCU->getSlice()->getPPS()->getWPBiPred());
+
+  getWpScaling(pcCU, iRefIdx0, iRefIdx1, pwp0, pwp1);
+
+  if( iRefIdx0 >= 0 && iRefIdx1 >= 0 )
+  {
+    addWeightBi(pcYuvSrc0, pcYuvSrc1, uiPartIdx, iWidth, iHeight, pwp0, pwp1, rpcYuvDst );
+  }
+  else if ( iRefIdx0 >= 0 && iRefIdx1 <  0 )
+  {
+    addWeightUni( pcYuvSrc0, uiPartIdx, iWidth, iHeight, pwp0, rpcYuvDst );
+  }
+  else if ( iRefIdx0 <  0 && iRefIdx1 >= 0 )
+  {
+    addWeightUni( pcYuvSrc1, uiPartIdx, iWidth, iHeight, pwp1, rpcYuvDst );
+  }
+  else
+  {
+    assert (0);
+  }
+}
+
+
+/** weighted prediction for uni-pred
+ * \param TComDataCU* pcCU
+ * \param TComYuv* pcYuvSrc
+ * \param uiPartAddr
+ * \param iWidth
+ * \param iHeight
+ * \param eRefPicList
+ * \param TComYuv* pcYuvPred
+ * \param iPartIdx
+ * \param iRefIdx
+ * \returns Void
+ */
+Void TComWeightPrediction::xWeightedPredictionUni(       TComDataCU *const pcCU,
+                                                   const TComYuv    *const pcYuvSrc,
+                                                   const UInt              uiPartAddr,
+                                                   const Int               iWidth,
+                                                   const Int               iHeight,
+                                                   const RefPicList        eRefPicList,
+                                                         TComYuv          *pcYuvPred,
+                                                   const Int               iRefIdx_input)
+{
+  WPScalingParam  *pwp, *pwpTmp;
+
+  Int iRefIdx=iRefIdx_input;
+  if ( iRefIdx < 0 )
+  {
+    iRefIdx   = pcCU->getCUMvField( eRefPicList )->getRefIdx( uiPartAddr );
+  }
+  assert (iRefIdx >= 0);
+
+  if ( eRefPicList == REF_PIC_LIST_0 )
+  {
+    getWpScaling(pcCU, iRefIdx, -1, pwp, pwpTmp);
+  }
+  else
+  {
+    getWpScaling(pcCU, -1, iRefIdx, pwpTmp, pwp);
+  }
+  addWeightUni( pcYuvSrc, uiPartAddr, iWidth, iHeight, pwp, pcYuvPred );
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComWeightPrediction.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,101 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComWeightPrediction.h
+    \brief    weighting prediction class (header)
+*/
+
+#ifndef __TCOMWEIGHTPREDICTION__
+#define __TCOMWEIGHTPREDICTION__
+
+
+// Include files
+#include "TComPic.h"
+#include "TComMotionInfo.h"
+#include "TComPattern.h"
+#include "TComTrQuant.h"
+#include "TComInterpolationFilter.h"
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+/// weighting prediction class
+class TComWeightPrediction
+{
+public:
+  TComWeightPrediction();
+
+  Void  getWpScaling(                 TComDataCU     *const pcCU,
+                                const Int                   iRefIdx0,
+                                const Int                   iRefIdx1,
+                                      WPScalingParam      *&wp0,
+                                      WPScalingParam      *&wp1);
+
+  Void addWeightBi(             const TComYuv              *pcYuvSrc0,
+                                const TComYuv              *pcYuvSrc1,
+                                const UInt                  iPartUnitIdx,
+                                const UInt                  uiWidth,
+                                const UInt                  uiHeight,
+                                const WPScalingParam *const wp0,
+                                const WPScalingParam *const wp1,
+                                      TComYuv        *const rpcYuvDst,
+                                const Bool                  bRoundLuma=true );
+
+  Void  addWeightUni(           const TComYuv        *const pcYuvSrc0,
+                                const UInt                  iPartUnitIdx,
+                                const UInt                  uiWidth,
+                                const UInt                  uiHeight,
+                                const WPScalingParam *const wp0,
+                                      TComYuv        *const rpcYuvDst );
+
+  Void  xWeightedPredictionUni(       TComDataCU     *const pcCU,
+                                const TComYuv        *const pcYuvSrc,
+                                const UInt                  uiPartAddr,
+                                const Int                   iWidth,
+                                const Int                   iHeight,
+                                const RefPicList            eRefPicList,
+                                      TComYuv              *pcYuvPred,
+                                const Int                   iRefIdx=-1 );
+
+  Void  xWeightedPredictionBi(        TComDataCU     *const pcCU,
+                                const TComYuv        *const pcYuvSrc0,
+                                const TComYuv        *const pcYuvSrc1,
+                                const Int                   iRefIdx0,
+                                const Int                   iRefIdx1,
+                                const UInt                  uiPartIdx,
+                                const Int                   iWidth,
+                                const Int                   iHeight,
+                                      TComYuv              *pcYuvDst );
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComYuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,426 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComYuv.cpp
+    \brief    general YUV buffer class
+    \todo     this should be merged with TComPicYuv
+*/
+
+#include <stdlib.h>
+#include <memory.h>
+#include <assert.h>
+#include <math.h>
+
+#include "CommonDef.h"
+#include "TComYuv.h"
+#include "TComInterpolationFilter.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+TComYuv::TComYuv()
+{
+  for(Int comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    m_apiBuf[comp] = NULL;
+  }
+}
+
+TComYuv::~TComYuv()
+{
+}
+
+Void TComYuv::create( UInt iWidth, UInt iHeight, ChromaFormat chromaFormatIDC )
+{
+  // set width and height
+  m_iWidth   = iWidth;
+  m_iHeight  = iHeight;
+  m_chromaFormatIDC = chromaFormatIDC;
+
+  for(Int ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    // memory allocation
+    m_apiBuf[ch]  = (Pel*)xMalloc( Pel, getWidth(ComponentID(ch))*getHeight(ComponentID(ch)) );
+  }
+}
+
+Void TComYuv::destroy()
+{
+  // memory free
+  for(Int ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    if (m_apiBuf[ch]!=NULL) { xFree( m_apiBuf[ch] ); m_apiBuf[ch] = NULL; }
+  }
+}
+
+Void TComYuv::clear()
+{
+  for(Int ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    if (m_apiBuf[ch]!=NULL)
+      ::memset( m_apiBuf[ch], 0, ( getWidth(ComponentID(ch)) * getHeight(ComponentID(ch))  )*sizeof(Pel) );
+  }
+}
+
+
+
+
+Void TComYuv::copyToPicYuv   ( TComPicYuv* pcPicYuvDst, const UInt ctuRsAddr, const UInt uiAbsZorderIdx, const UInt uiPartDepth, const UInt uiPartIdx ) const
+{
+  for(Int ch=0; ch<getNumberValidComponents(); ch++)
+    copyToPicComponent  ( ComponentID(ch), pcPicYuvDst, ctuRsAddr, uiAbsZorderIdx, uiPartDepth, uiPartIdx );
+}
+
+Void TComYuv::copyToPicComponent  ( const ComponentID ch, TComPicYuv* pcPicYuvDst, const UInt ctuRsAddr, const UInt uiAbsZorderIdx, const UInt uiPartDepth, const UInt uiPartIdx ) const
+{
+  const Int iWidth  = getWidth(ch) >>uiPartDepth;
+  const Int iHeight = getHeight(ch)>>uiPartDepth;
+
+  const Pel* pSrc     = getAddr(ch, uiPartIdx, iWidth);
+        Pel* pDst     = pcPicYuvDst->getAddr ( ch, ctuRsAddr, uiAbsZorderIdx );
+
+  const UInt  iSrcStride  = getStride(ch);
+  const UInt  iDstStride  = pcPicYuvDst->getStride(ch);
+
+  for ( Int y = iHeight; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, sizeof(Pel)*iWidth);
+    pDst += iDstStride;
+    pSrc += iSrcStride;
+  }
+}
+
+
+
+
+Void TComYuv::copyFromPicYuv   ( const TComPicYuv* pcPicYuvSrc, const UInt ctuRsAddr, const UInt uiAbsZorderIdx )
+{
+  for(Int ch=0; ch<getNumberValidComponents(); ch++)
+    copyFromPicComponent  ( ComponentID(ch), pcPicYuvSrc, ctuRsAddr, uiAbsZorderIdx );
+}
+
+Void TComYuv::copyFromPicComponent  ( const ComponentID ch, const TComPicYuv* pcPicYuvSrc, const UInt ctuRsAddr, const UInt uiAbsZorderIdx )
+{
+        Pel* pDst     = getAddr(ch);
+  const Pel* pSrc     = pcPicYuvSrc->getAddr ( ch, ctuRsAddr, uiAbsZorderIdx );
+
+  const UInt iDstStride  = getStride(ch);
+  const UInt iSrcStride  = pcPicYuvSrc->getStride(ch);
+  const Int  iWidth=getWidth(ch);
+  const Int  iHeight=getHeight(ch);
+
+  for (Int y = iHeight; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, sizeof(Pel)*iWidth);
+    pDst += iDstStride;
+    pSrc += iSrcStride;
+  }
+}
+
+
+
+
+Void TComYuv::copyToPartYuv( TComYuv* pcYuvDst, const UInt uiDstPartIdx ) const
+{
+  for(Int ch=0; ch<getNumberValidComponents(); ch++)
+    copyToPartComponent  ( ComponentID(ch), pcYuvDst, uiDstPartIdx );
+}
+
+Void TComYuv::copyToPartComponent( const ComponentID ch, TComYuv* pcYuvDst, const UInt uiDstPartIdx ) const
+{
+  const Pel* pSrc     = getAddr(ch);
+        Pel* pDst     = pcYuvDst->getAddr( ch, uiDstPartIdx );
+
+  const UInt iSrcStride  = getStride(ch);
+  const UInt iDstStride  = pcYuvDst->getStride(ch);
+  const Int  iWidth=getWidth(ch);
+  const Int  iHeight=getHeight(ch);
+
+  for (Int y = iHeight; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, sizeof(Pel)*iWidth);
+    pDst += iDstStride;
+    pSrc += iSrcStride;
+  }
+}
+
+
+
+
+Void TComYuv::copyPartToYuv( TComYuv* pcYuvDst, const UInt uiSrcPartIdx ) const
+{
+  for(Int ch=0; ch<getNumberValidComponents(); ch++)
+    copyPartToComponent  ( ComponentID(ch), pcYuvDst, uiSrcPartIdx );
+}
+
+Void TComYuv::copyPartToComponent( const ComponentID ch, TComYuv* pcYuvDst, const UInt uiSrcPartIdx ) const
+{
+  const Pel* pSrc     = getAddr(ch, uiSrcPartIdx);
+        Pel* pDst     = pcYuvDst->getAddr(ch, 0 );
+
+  const UInt  iSrcStride  = getStride(ch);
+  const UInt  iDstStride  = pcYuvDst->getStride(ch);
+
+  const UInt uiHeight = pcYuvDst->getHeight(ch);
+  const UInt uiWidth = pcYuvDst->getWidth(ch);
+
+  for ( UInt y = uiHeight; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, sizeof(Pel)*uiWidth);
+    pDst += iDstStride;
+    pSrc += iSrcStride;
+  }
+}
+
+
+
+
+Void TComYuv::copyPartToPartYuv   ( TComYuv* pcYuvDst, const UInt uiPartIdx, const UInt iWidth, const UInt iHeight ) const
+{
+  for(Int ch=0; ch<getNumberValidComponents(); ch++)
+    copyPartToPartComponent   (ComponentID(ch), pcYuvDst, uiPartIdx, iWidth>>getComponentScaleX(ComponentID(ch)), iHeight>>getComponentScaleY(ComponentID(ch)) );
+}
+
+Void TComYuv::copyPartToPartComponent  ( const ComponentID ch, TComYuv* pcYuvDst, const UInt uiPartIdx, const UInt iWidthComponent, const UInt iHeightComponent ) const
+{
+  const Pel* pSrc =           getAddr(ch, uiPartIdx);
+        Pel* pDst = pcYuvDst->getAddr(ch, uiPartIdx);
+  if( pSrc == pDst )
+  {
+    //th not a good idea
+    //th best would be to fix the caller
+    return ;
+  }
+
+  const UInt  iSrcStride = getStride(ch);
+  const UInt  iDstStride = pcYuvDst->getStride(ch);
+  for ( UInt y = iHeightComponent; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, iWidthComponent * sizeof(Pel) );
+    pSrc += iSrcStride;
+    pDst += iDstStride;
+  }
+}
+
+
+
+
+Void TComYuv::copyPartToPartComponentMxN  ( const ComponentID ch, TComYuv* pcYuvDst, const TComRectangle &rect) const
+{
+  const Pel* pSrc =           getAddrPix( ch, rect.x0, rect.y0 );
+        Pel* pDst = pcYuvDst->getAddrPix( ch, rect.x0, rect.y0 );
+  if( pSrc == pDst )
+  {
+    //th not a good idea
+    //th best would be to fix the caller
+    return ;
+  }
+
+  const UInt  iSrcStride = getStride(ch);
+  const UInt  iDstStride = pcYuvDst->getStride(ch);
+  const UInt uiHeightComponent=rect.height;
+  const UInt uiWidthComponent=rect.width;
+  for ( UInt y = uiHeightComponent; y != 0; y-- )
+  {
+    ::memcpy( pDst, pSrc, uiWidthComponent * sizeof( Pel ) );
+    pSrc += iSrcStride;
+    pDst += iDstStride;
+  }
+}
+
+
+
+
+Void TComYuv::addClip( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt uiTrUnitIdx, const UInt uiPartSize )
+{
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    const Int uiPartWidth =uiPartSize>>getComponentScaleX(ch);
+    const Int uiPartHeight=uiPartSize>>getComponentScaleY(ch);
+
+    const Pel* pSrc0 = pcYuvSrc0->getAddr(ch, uiTrUnitIdx, uiPartWidth );
+    const Pel* pSrc1 = pcYuvSrc1->getAddr(ch, uiTrUnitIdx, uiPartWidth );
+          Pel* pDst  = getAddr(ch, uiTrUnitIdx, uiPartWidth );
+
+    const UInt iSrc0Stride = pcYuvSrc0->getStride(ch);
+    const UInt iSrc1Stride = pcYuvSrc1->getStride(ch);
+    const UInt iDstStride  = getStride(ch);
+    const Int clipbd = g_bitDepth[toChannelType(ch)];
+#if O0043_BEST_EFFORT_DECODING
+    const Int bitDepthDelta = g_bitDepthInStream[toChannelType(ch)] - g_bitDepth[toChannelType(ch)];
+#endif
+
+    for ( Int y = uiPartHeight-1; y >= 0; y-- )
+    {
+      for ( Int x = uiPartWidth-1; x >= 0; x-- )
+      {
+#if O0043_BEST_EFFORT_DECODING
+        pDst[x] = Pel(ClipBD<Int>( Int(pSrc0[x]) + rightShiftEvenRounding<Pel>(pSrc1[x], bitDepthDelta), clipbd));
+#else
+        pDst[x] = Pel(ClipBD<Int>( Int(pSrc0[x]) + Int(pSrc1[x]), clipbd));
+#endif
+      }
+      pSrc0 += iSrc0Stride;
+      pSrc1 += iSrc1Stride;
+      pDst  += iDstStride;
+    }
+  }
+}
+
+
+
+
+Void TComYuv::subtract( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt uiTrUnitIdx, const UInt uiPartSize )
+{
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    const Int uiPartWidth =uiPartSize>>getComponentScaleX(ch);
+    const Int uiPartHeight=uiPartSize>>getComponentScaleY(ch);
+
+    const Pel* pSrc0 = pcYuvSrc0->getAddr( ch, uiTrUnitIdx, uiPartWidth );
+    const Pel* pSrc1 = pcYuvSrc1->getAddr( ch, uiTrUnitIdx, uiPartWidth );
+          Pel* pDst  = getAddr( ch, uiTrUnitIdx, uiPartWidth );
+
+    const Int  iSrc0Stride = pcYuvSrc0->getStride(ch);
+    const Int  iSrc1Stride = pcYuvSrc1->getStride(ch);
+    const Int  iDstStride  = getStride(ch);
+
+    for (Int y = uiPartHeight-1; y >= 0; y-- )
+    {
+      for (Int x = uiPartWidth-1; x >= 0; x-- )
+      {
+        pDst[x] = pSrc0[x] - pSrc1[x];
+      }
+      pSrc0 += iSrc0Stride;
+      pSrc1 += iSrc1Stride;
+      pDst  += iDstStride;
+    }
+  }
+}
+
+
+
+
+Void TComYuv::addAvg( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt iPartUnitIdx, const UInt uiWidth, const UInt uiHeight )
+{
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    const Pel* pSrc0  = pcYuvSrc0->getAddr( ch, iPartUnitIdx );
+    const Pel* pSrc1  = pcYuvSrc1->getAddr( ch, iPartUnitIdx );
+    Pel* pDst   = getAddr( ch, iPartUnitIdx );
+
+    const UInt  iSrc0Stride = pcYuvSrc0->getStride(ch);
+    const UInt  iSrc1Stride = pcYuvSrc1->getStride(ch);
+    const UInt  iDstStride  = getStride(ch);
+    const Int   clipbd      = g_bitDepth[toChannelType(ch)];
+    const Int   shiftNum    = std::max<Int>(2, (IF_INTERNAL_PREC - clipbd)) + 1;
+    const Int   offset      = ( 1 << ( shiftNum - 1 ) ) + 2 * IF_INTERNAL_OFFS;
+
+    const Int   iWidth      = uiWidth  >> getComponentScaleX(ch);
+    const Int   iHeight     = uiHeight >> getComponentScaleY(ch);
+
+    if (iWidth&1)
+    {
+      assert(0);
+      exit(-1);
+    }
+    else if (iWidth&2)
+    {
+      for ( Int y = 0; y < iHeight; y++ )
+      {
+        for (Int x=0 ; x < iWidth; x+=2 )
+        {
+          pDst[ x + 0 ] = ClipBD( rightShift(( pSrc0[ x + 0 ] + pSrc1[ x + 0 ] + offset ), shiftNum), clipbd );
+          pDst[ x + 1 ] = ClipBD( rightShift(( pSrc0[ x + 1 ] + pSrc1[ x + 1 ] + offset ), shiftNum), clipbd );
+        }
+        pSrc0 += iSrc0Stride;
+        pSrc1 += iSrc1Stride;
+        pDst  += iDstStride;
+      }
+    }
+    else
+    {
+      for ( Int y = 0; y < iHeight; y++ )
+      {
+        for (Int x=0 ; x < iWidth; x+=4 )
+        {
+          pDst[ x + 0 ] = ClipBD( rightShift(( pSrc0[ x + 0 ] + pSrc1[ x + 0 ] + offset ), shiftNum), clipbd );
+          pDst[ x + 1 ] = ClipBD( rightShift(( pSrc0[ x + 1 ] + pSrc1[ x + 1 ] + offset ), shiftNum), clipbd );
+          pDst[ x + 2 ] = ClipBD( rightShift(( pSrc0[ x + 2 ] + pSrc1[ x + 2 ] + offset ), shiftNum), clipbd );
+          pDst[ x + 3 ] = ClipBD( rightShift(( pSrc0[ x + 3 ] + pSrc1[ x + 3 ] + offset ), shiftNum), clipbd );
+        }
+        pSrc0 += iSrc0Stride;
+        pSrc1 += iSrc1Stride;
+        pDst  += iDstStride;
+      }
+    }
+  }
+}
+
+Void TComYuv::removeHighFreq( const TComYuv* pcYuvSrc, const UInt uiPartIdx, const UInt uiWidth, UInt const uiHeight )
+{
+  for(Int chan=0; chan<getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+#if !DISABLING_CLIP_FOR_BIPREDME
+    const ChannelType chType=toChannelType(ch);
+#endif
+
+    const Pel* pSrc  = pcYuvSrc->getAddr(ch, uiPartIdx);
+    Pel* pDst  = getAddr(ch, uiPartIdx);
+
+    const Int iSrcStride = pcYuvSrc->getStride(ch);
+    const Int iDstStride = getStride(ch);
+    const Int iWidth  = uiWidth >>getComponentScaleX(ch);
+    const Int iHeight = uiHeight>>getComponentScaleY(ch);
+
+    for ( Int y = iHeight-1; y >= 0; y-- )
+    {
+      for ( Int x = iWidth-1; x >= 0; x-- )
+      {
+#if DISABLING_CLIP_FOR_BIPREDME
+        pDst[x ] = (2 * pDst[x]) - pSrc[x];
+#else
+        pDst[x ] = Clip((2 * pDst[x]) - pSrc[x], chType);
+#endif
+      }
+      pSrc += iSrcStride;
+      pDst += iDstStride;
+    }
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TComYuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,207 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TComYuv.h
+    \brief    general YUV buffer class (header)
+    \todo     this should be merged with TComPicYuv \n
+              check usage of removeHighFreq function
+*/
+
+#ifndef __TCOMYUV__
+#define __TCOMYUV__
+#include <assert.h>
+#include "CommonDef.h"
+#include "TComPicYuv.h"
+#include "TComRectangle.h"
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// general YUV buffer class
+class TComYuv
+{
+private:
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  YUV buffer
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Pel*    m_apiBuf[MAX_NUM_COMPONENT];
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  Parameter for general YUV buffer usage
+  // ------------------------------------------------------------------------------------------------------------------
+
+  UInt     m_iWidth;
+  UInt     m_iHeight;
+  ChromaFormat m_chromaFormatIDC; ////< Chroma Format
+
+  // dims 16x16
+  // blkSize=4x4
+
+  // these functions assume a square CU, of size width*width, split into square TUs each of size blkSize*blkSize.
+  // iTransUnitIdx is the raster-scanned index of the sub-block (TU) in question.
+  // eg for a 16x16 CU, with 4x4 TUs:
+  //  0  1  2  3
+  //  4  5  6  7
+  //  8  9 10 11
+  // 12 13 14 15
+
+  // So, for iTransUnitIdx=14, 14*4 & 15 =8=X offset.
+  //                           14*4 / 16 =3=Y block offset
+  //                                      3*4*16 = Y offset within buffer
+
+
+public:
+
+               TComYuv                    ();
+  virtual     ~TComYuv                    ();
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  Memory management
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Void         create                     ( const UInt iWidth, const UInt iHeight, const ChromaFormat chromaFormatIDC );  ///< Create  YUV buffer
+  Void         destroy                    ();                             ///< Destroy YUV buffer
+  Void         clear                      ();                             ///< clear   YUV buffer
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  Copy, load, store YUV buffer
+  // ------------------------------------------------------------------------------------------------------------------
+
+  //  Copy YUV buffer to picture buffer
+  Void         copyToPicYuv               ( TComPicYuv* pcPicYuvDst, const UInt ctuRsAddr, const UInt uiAbsZorderIdx, const UInt uiPartDepth = 0, const UInt uiPartIdx = 0 ) const ;
+  Void         copyToPicComponent         ( const ComponentID id, TComPicYuv* pcPicYuvDst, const UInt iCtuRsAddr, const UInt uiAbsZorderIdx, const UInt uiPartDepth = 0, const UInt uiPartIdx = 0 ) const ;
+
+  //  Copy YUV buffer from picture buffer
+  Void         copyFromPicYuv             ( const TComPicYuv* pcPicYuvSrc, const  UInt ctuRsAddr, const UInt uiAbsZorderIdx );
+  Void         copyFromPicComponent       ( const ComponentID id, const TComPicYuv* pcPicYuvSrc, const UInt iCtuRsAddr, const UInt uiAbsZorderIdx );
+
+  //  Copy Small YUV buffer to the part of other Big YUV buffer
+  Void         copyToPartYuv              ( TComYuv*    pcYuvDst,    const UInt uiDstPartIdx ) const ;
+  Void         copyToPartComponent        ( const ComponentID id, TComYuv*    pcYuvDst,    const UInt uiDstPartIdx ) const ;
+
+  //  Copy the part of Big YUV buffer to other Small YUV buffer
+  Void         copyPartToYuv              ( TComYuv*    pcYuvDst,   const UInt uiSrcPartIdx ) const;
+  Void         copyPartToComponent        ( const ComponentID id, TComYuv*    pcYuvDst,    const UInt uiSrcPartIdx ) const;
+
+  //  Copy YUV partition buffer to other YUV partition buffer
+  Void         copyPartToPartYuv          ( TComYuv*    pcYuvDst, const UInt uiPartIdx, const UInt uiWidth, const UInt uiHeight ) const;
+  Void         copyPartToPartComponent    ( const ComponentID id, TComYuv*    pcYuvDst, const UInt uiPartIdx, const UInt uiWidthComponent, const UInt uiHeightComponent ) const;
+
+ // Copy YUV partition buffer to other YUV partition buffer for non-square blocks
+  Void         copyPartToPartComponentMxN ( const ComponentID id, TComYuv*    pcYuvDst, const TComRectangle &rect ) const;
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  Algebraic operation for YUV buffer
+  // ------------------------------------------------------------------------------------------------------------------
+
+  //  Clip(pcYuvSrc0 + pcYuvSrc1) -> m_apiBuf
+  Void         addClip                    ( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt uiTrUnitIdx, const UInt uiPartSize );
+
+  //  pcYuvSrc0 - pcYuvSrc1 -> m_apiBuf
+  Void         subtract                   ( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt uiTrUnitIdx, const UInt uiPartSize );
+
+  //  (pcYuvSrc0 + pcYuvSrc1)/2 for YUV partition
+  Void         addAvg                     ( const TComYuv* pcYuvSrc0, const TComYuv* pcYuvSrc1, const UInt iPartUnitIdx, const UInt iWidth, const UInt iHeight );
+
+  Void         removeHighFreq             ( const TComYuv* pcYuvSrc, const UInt uiPartIdx, const UInt uiWidth, const UInt uiHeight );
+
+  // ------------------------------------------------------------------------------------------------------------------
+  //  Access function for YUV buffer
+  // ------------------------------------------------------------------------------------------------------------------
+
+  //  Access starting position of YUV buffer
+  Pel*         getAddr                    (const ComponentID id)                    { return m_apiBuf[id]; }
+  const Pel*   getAddr                    (const ComponentID id) const              { return m_apiBuf[id]; }
+
+  //  Access starting position of YUV partition unit buffer
+  Pel*         getAddr                    (const ComponentID id, const UInt uiPartUnitIdx)
+                                              {
+                                                  Int blkX = g_auiRasterToPelX[ g_auiZscanToRaster[ uiPartUnitIdx ] ] >> getComponentScaleX(id);
+                                                  Int blkY = g_auiRasterToPelY[ g_auiZscanToRaster[ uiPartUnitIdx ] ] >> getComponentScaleY(id);
+                                                  assert((blkX<getWidth(id) && blkY<getHeight(id)));
+                                                  return m_apiBuf[id] + blkX + blkY * getStride(id);
+                                              }
+  const Pel*   getAddr                    (const ComponentID id, const UInt uiPartUnitIdx) const
+                                              {
+                                                  Int blkX = g_auiRasterToPelX[ g_auiZscanToRaster[ uiPartUnitIdx ] ] >> getComponentScaleX(id);
+                                                  Int blkY = g_auiRasterToPelY[ g_auiZscanToRaster[ uiPartUnitIdx ] ] >> getComponentScaleY(id);
+                                                  assert((blkX<getWidth(id) && blkY<getHeight(id)));
+                                                  return m_apiBuf[id] + blkX + blkY * getStride(id);
+                                              }
+
+  //  Access starting position of YUV transform unit buffer
+  Pel*         getAddr                    (const ComponentID id, const UInt iTransUnitIdx, const UInt iBlkSizeForComponent)
+                                              {
+                                                UInt width=getWidth(id);
+                                                Int blkX = ( iTransUnitIdx * iBlkSizeForComponent ) &  ( width - 1 );
+                                                Int blkY = ( iTransUnitIdx * iBlkSizeForComponent ) &~ ( width - 1 );
+                                                if (m_chromaFormatIDC==CHROMA_422 && id!=COMPONENT_Y) blkY<<=1;
+//                                                assert((blkX<getWidth(id) && blkY<getHeight(id)));
+                                                return m_apiBuf[id] + blkX + blkY * iBlkSizeForComponent;
+                                              }
+
+  const Pel*   getAddr                    (const ComponentID id, const UInt iTransUnitIdx, const UInt iBlkSizeForComponent) const
+                                              {
+                                                UInt width=getWidth(id);
+                                                Int blkX = ( iTransUnitIdx * iBlkSizeForComponent ) &  ( width - 1 );
+                                                Int blkY = ( iTransUnitIdx * iBlkSizeForComponent ) &~ ( width - 1 );
+                                                if (m_chromaFormatIDC==CHROMA_422 && id!=COMPONENT_Y) blkY<<=1;
+//                                                UInt w=getWidth(id), h=getHeight(id);
+//                                                assert((blkX<w && blkY<h));
+                                                return m_apiBuf[id] + blkX + blkY * iBlkSizeForComponent;
+                                              }
+
+  // Access starting position of YUV transform unit buffer by pix offset for square & non-square blocks
+  Pel*         getAddrPix                 (const ComponentID id, const UInt iPixX, const UInt iPixY )       { return m_apiBuf[id] + iPixY * getStride(id) + iPixX; }
+  const Pel*   getAddrPix                 (const ComponentID id, const UInt iPixX, const UInt iPixY ) const { return m_apiBuf[id] + iPixY * getStride(id) + iPixX; }
+
+  //  Get stride value of YUV buffer
+  UInt         getStride                  (const ComponentID id) const { return m_iWidth >> getComponentScaleX(id);   }
+  UInt         getHeight                  (const ComponentID id) const { return m_iHeight >> getComponentScaleY(id);  }
+  UInt         getWidth                   (const ComponentID id) const { return m_iWidth >> getComponentScaleX(id);   }
+  ChromaFormat getChromaFormat            ()                     const { return m_chromaFormatIDC; }
+  UInt         getNumberValidComponents   ()                     const { return ::getNumberValidComponents(m_chromaFormatIDC); }
+  UInt         getComponentScaleX         (const ComponentID id) const { return ::getComponentScaleX(id, m_chromaFormatIDC); }
+  UInt         getComponentScaleY         (const ComponentID id) const { return ::getComponentScaleY(id, m_chromaFormatIDC); }
+
+};// END CLASS DEFINITION TComYuv
+
+//! \}
+
+#endif // __TCOMYUV__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibCommon/TypeDef.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,826 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TypeDef.h
+    \brief    Define basic types, new types and enumerations
+*/
+
+#ifndef __TYPEDEF__
+#define __TYPEDEF__
+
+#include <vector>
+
+//! \ingroup TLibCommon
+//! \{
+
+// ====================================================================================================================
+// Debugging
+// ====================================================================================================================
+
+// #define DEBUG_STRING                 // enable to print out final decision debug info at encoder and decoder
+// #define DEBUG_ENCODER_SEARCH_BINS    // enable to print out each bin as it is coded during encoder search
+// #define DEBUG_CABAC_BINS             // enable to print out each bin as it is coded during final encode and decode
+// #define DEBUG_INTRA_SEARCH_COSTS     // enable to print out the cost for each mode during encoder search
+// #define DEBUG_TRANSFORM_AND_QUANTISE // enable to print out each TU as it passes through the transform-quantise-dequantise-inverseTransform process
+
+#ifdef DEBUG_STRING
+  #define DEBUG_STRING_PASS_INTO(name) , name
+  #define DEBUG_STRING_PASS_INTO_OPTIONAL(name, exp) , (exp==0)?0:name
+  #define DEBUG_STRING_FN_DECLARE(name) , std::string &name
+  #define DEBUG_STRING_FN_DECLAREP(name) , std::string *name
+  #define DEBUG_STRING_NEW(name) std::string name;
+  #define DEBUG_STRING_OUTPUT(os, name) os << name;
+  #define DEBUG_STRING_APPEND(str1, str2) str1+=str2;
+  #define DEBUG_STRING_SWAP(str1, str2) str1.swap(str2);
+  #define DEBUG_STRING_CHANNEL_CONDITION(compID) (true)
+  #include <sstream>
+  #include <iomanip>
+#else
+  #define DEBUG_STRING_PASS_INTO(name)
+  #define DEBUG_STRING_PASS_INTO_OPTIONAL(name, exp)
+  #define DEBUG_STRING_FN_DECLARE(name)
+  #define DEBUG_STRING_FN_DECLAREP(name)
+  #define DEBUG_STRING_NEW(name)
+  #define DEBUG_STRING_OUTPUT(os, name)
+  #define DEBUG_STRING_APPEND(str1, str2)
+  #define DEBUG_STRING_SWAP(srt1, str2)
+  #define DEBUG_STRING_CHANNEL_CONDITION(compID)
+#endif
+
+
+// ====================================================================================================================
+// Tool Switches
+// ====================================================================================================================
+
+#define HARMONIZE_GOP_FIRST_FIELD_COUPLE                  1
+#define EFFICIENT_FIELD_IRAP                              1
+#define ALLOW_RECOVERY_POINT_AS_RAP                       1
+#define BUGFIX_INTRAPERIOD                                1
+
+#define SAO_ENCODE_ALLOW_USE_PREDEBLOCK                   1
+
+#define TILE_SIZE_CHECK                                   1
+
+#define MAX_NUM_PICS_IN_SOP                            1024
+
+#define MAX_NESTING_NUM_OPS                            1024
+#define MAX_NESTING_NUM_LAYER                            64
+
+#define MAX_VPS_NUM_HRD_PARAMETERS                        1
+#define MAX_VPS_OP_SETS_PLUS1                          1024
+#define MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1          1
+
+#define MAXIMUM_INTRA_FILTERED_WIDTH                     16
+#define MAXIMUM_INTRA_FILTERED_HEIGHT                    16
+
+#define MAX_CPB_CNT                                      32 ///< Upper bound of (cpb_cnt_minus1 + 1)
+#define MAX_NUM_LAYER_IDS                                64
+
+#define COEF_REMAIN_BIN_REDUCTION                         3 ///< indicates the level at which the VLC
+                                                            ///< transitions from Golomb-Rice to TU+EG(k)
+
+#define CU_DQP_TU_CMAX                                    5 ///< max number bins for truncated unary
+#define CU_DQP_EG_k                                       0 ///< expgolomb order
+
+#define SBH_THRESHOLD                                     4  ///< I0156: value of the fixed SBH controlling threshold
+
+#define DISABLING_CLIP_FOR_BIPREDME                       1  ///< Ticket #175
+
+#define C1FLAG_NUMBER                                     8 // maximum number of largerThan1 flag coded in one chunk :  16 in HM5
+#define C2FLAG_NUMBER                                     1 // maximum number of largerThan2 flag coded in one chunk:  16 in HM5
+
+#define SAO_ENCODING_CHOICE                               1  ///< I0184: picture early termination
+#if SAO_ENCODING_CHOICE
+#define SAO_ENCODING_RATE                                 0.75
+#define SAO_ENCODING_CHOICE_CHROMA                        1 ///< J0044: picture early termination Luma and Chroma are handled separately
+#if SAO_ENCODING_CHOICE_CHROMA
+#define SAO_ENCODING_RATE_CHROMA                          0.5
+#endif
+#endif
+
+#define MAX_NUM_SAO_OFFSETS                               4
+
+#define MAX_NUM_VPS                                      16
+#define MAX_NUM_SPS                                      16
+#define MAX_NUM_PPS                                      64
+
+#define RDOQ_CHROMA_LAMBDA                                1   ///< F386: weighting of chroma for RDOQ
+
+#define MIN_SCAN_POS_CROSS                                4
+
+#define FAST_BIT_EST                                      1   ///< G763: Table-based bit estimation for CABAC
+
+#define MLS_GRP_NUM                                      64     ///< G644 : Max number of coefficient groups, max(16, 64)
+#define MLS_CG_LOG2_WIDTH                                 2
+#define MLS_CG_LOG2_HEIGHT                                2
+#define MLS_CG_SIZE                                     (MLS_CG_LOG2_WIDTH + MLS_CG_LOG2_HEIGHT)  ///< G644 : Coefficient group size of 4x4
+
+#define ADAPTIVE_QP_SELECTION                             1      ///< G382: Adaptive reconstruction levels, non-normative part for adaptive QP selection
+#if ADAPTIVE_QP_SELECTION
+#define ARL_C_PRECISION                                   7      ///< G382: 7-bit arithmetic precision
+#define LEVEL_RANGE                                      30     ///< G382: max coefficient level in statistics collection
+#endif
+
+#define HHI_RQT_INTRA_SPEEDUP                             1           ///< tests one best mode with full rqt
+#define HHI_RQT_INTRA_SPEEDUP_MOD                         0           ///< tests two best modes with full rqt
+
+#if HHI_RQT_INTRA_SPEEDUP_MOD && !HHI_RQT_INTRA_SPEEDUP
+#error
+#endif
+
+#define VERBOSE_RATE 0 ///< Print additional rate information in encoder
+
+#define AMVP_DECIMATION_FACTOR                            4
+
+#define SCAN_SET_SIZE                                    16
+#define LOG2_SCAN_SET_SIZE                                4
+
+#define FAST_UDI_MAX_RDMODE_NUM                          35          ///< maximum number of RD comparison in fast-UDI estimation loop
+
+#define NUM_INTRA_MODE                                   36
+
+#define WRITE_BACK                                        1           ///< Enable/disable the encoder to replace the deltaPOC and Used by current from the config file with the values derived by the refIdc parameter.
+#define AUTO_INTER_RPS                                    1           ///< Enable/disable the automatic generation of refIdc from the deltaPOC and Used by current from the config file.
+#define PRINT_RPS_INFO                                    0           ///< Enable/disable the printing of bits used to send the RPS.
+                                                                        // using one nearest frame as reference frame, and the other frames are high quality (POC%4==0) frames (1+X)
+                                                                        // this should be done with encoder only decision
+                                                                        // but because of the absence of reference frame management, the related code was hard coded currently
+
+#define RVM_VCEGAM10_M 4
+
+#define PLANAR_IDX                                        0
+#define VER_IDX                                          26                    // index for intra VERTICAL   mode
+#define HOR_IDX                                          10                    // index for intra HORIZONTAL mode
+#define DC_IDX                                            1                    // index for intra DC mode
+#define NUM_CHROMA_MODE                                   5                    // total number of chroma modes
+#define DM_CHROMA_IDX                                    36                    // chroma mode index for derived from luma intra mode
+#define INVALID_MODE_IDX                                 (NUM_INTRA_MODE+1)    // value used to indicate an invalid intra mode
+#define STOPCHROMASEARCH_MODE_IDX                        (INVALID_MODE_IDX+1)  // value used to signal the end of a chroma mode search
+
+#define MDCS_ANGLE_LIMIT                                  4         ///< (default 4) 0 = Horizontal/vertical only, 1 = Horizontal/vertical +/- 1, 2 = Horizontal/vertical +/- 2 etc...
+#define MDCS_MAXIMUM_WIDTH                                8         ///< (default 8) (measured in pixels) TUs with width greater than this can only use diagonal scan
+#define MDCS_MAXIMUM_HEIGHT                               8         ///< (default 8) (measured in pixels) TUs with height greater than this can only use diagonal scan
+
+#define FAST_UDI_USE_MPM 1
+
+#define RDO_WITHOUT_DQP_BITS                              0           ///< Disable counting dQP bits in RDO-based mode decision
+
+#define LOG2_MAX_NUM_COLUMNS_MINUS1                       7
+#define LOG2_MAX_NUM_ROWS_MINUS1                          7
+#define LOG2_MAX_COLUMN_WIDTH                            13
+#define LOG2_MAX_ROW_HEIGHT                              13
+
+#define MATRIX_MULT                                       0 // Brute force matrix multiplication instead of partial butterfly
+
+#define AMP_SAD                                           1 ///< dedicated SAD functions for AMP
+#define AMP_ENC_SPEEDUP                                   1 ///< encoder only speed-up by AMP mode skipping
+#if AMP_ENC_SPEEDUP
+#define AMP_MRG                                           1 ///< encoder only force merge for AMP partition (no motion search for AMP)
+#endif
+
+#define CABAC_INIT_PRESENT_FLAG                           1
+
+#define LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS    4
+#define CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS  8
+
+#define MAX_NUM_LONG_TERM_REF_PICS                       33
+
+#define DECODER_CHECK_SUBSTREAM_AND_SLICE_TRAILING_BYTES  1
+
+#define RD_TEST_SAO_DISABLE_AT_PICTURE_LEVEL              0 ///< 1 = tests whether SAO should be disabled at the picture level,  0 (default) = does not apply this additional test
+
+#define O0043_BEST_EFFORT_DECODING                        0 ///< 0 (default) = disable code related to best effort decoding, 1 = enable code relating to best effort decoding [ decode-side only ].
+
+// Cost mode support
+
+#define LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP       0 ///< QP to use for lossless coding.
+#define LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME 4 ///< QP' to use for mixed_lossy_lossless coding.
+
+// Debug support
+
+#define ENVIRONMENT_VARIABLE_DEBUG_AND_TEST               0 ///< When enabled, allows control of debug modifications via environment variables
+
+#define PRINT_MACRO_VALUES                                1 ///< When enabled, the encoder prints out a list of the non-environment-variable controlled macros and their values on startup
+
+// TODO: rename this macro to DECODER_DEBUG_BIT_STATISTICS (may currently cause merge issues with other branches)
+// This can be enabled by the makefile
+#ifndef RExt__DECODER_DEBUG_BIT_STATISTICS
+#define RExt__DECODER_DEBUG_BIT_STATISTICS                                     0 ///< 0 (default) = decoder reports as normal, 1 = decoder produces bit usage statistics (will impact decoder run time by up to ~10%)
+#endif
+
+// This can be enabled by the makefile
+#ifndef RExt__HIGH_BIT_DEPTH_SUPPORT
+#define RExt__HIGH_BIT_DEPTH_SUPPORT                                           0 ///< 0 (default) use data type definitions for 8-10 bit video, 1 = use larger data types to allow for up to 16-bit video (originally developed as part of N0188)
+#endif
+
+#define RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS                           4
+#define RExt__GOLOMB_RICE_INCREMENT_DIVISOR                                    4
+
+#define RExt__PREDICTION_WEIGHTING_ANALYSIS_DC_PRECISION                       0 ///< Additional fixed bit precision used during encoder-side weighting prediction analysis. Currently only used when high_precision_prediction_weighting_flag is set, for backwards compatibility reasons.
+
+#define MAX_TIMECODE_SEI_SETS                                                  3 ///< Maximum number of time sets
+
+
+//------------------------------------------------
+// Derived macros
+//------------------------------------------------
+
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+#define FULL_NBIT                                                              1 ///< When enabled, use distortion measure derived from all bits of source data, otherwise discard (bitDepth - 8) least-significant bits of distortion
+#define RExt__HIGH_PRECISION_FORWARD_TRANSFORM                                 1 ///< 0 use original 6-bit transform matrices for both forward and inverse transform, 1 (default) = use original matrices for inverse transform and high precision matrices for forward transform
+#else
+#define FULL_NBIT                                                              0 ///< When enabled, use distortion measure derived from all bits of source data, otherwise discard (bitDepth - 8) least-significant bits of distortion
+#define RExt__HIGH_PRECISION_FORWARD_TRANSFORM                                 0 ///< 0 (default) use original 6-bit transform matrices for both forward and inverse transform, 1 = use original matrices for inverse transform and high precision matrices for forward transform
+#endif
+
+#if FULL_NBIT
+# define DISTORTION_PRECISION_ADJUSTMENT(x)  0
+#else
+# define DISTORTION_PRECISION_ADJUSTMENT(x) (x)
+#endif
+
+
+//------------------------------------------------
+// Error checks
+//------------------------------------------------
+
+#if ((RExt__HIGH_PRECISION_FORWARD_TRANSFORM != 0) && (RExt__HIGH_BIT_DEPTH_SUPPORT == 0))
+#error ERROR: cannot enable RExt__HIGH_PRECISION_FORWARD_TRANSFORM without RExt__HIGH_BIT_DEPTH_SUPPORT
+#endif
+
+// ====================================================================================================================
+// Basic type redefinition
+// ====================================================================================================================
+
+typedef       void                Void;
+typedef       bool                Bool;
+
+#ifdef __arm__
+typedef       signed char         Char;
+#else
+typedef       char                Char;
+#endif
+typedef       unsigned char       UChar;
+typedef       short               Short;
+typedef       unsigned short      UShort;
+typedef       int                 Int;
+typedef       unsigned int        UInt;
+typedef       double              Double;
+typedef       float               Float;
+
+
+// ====================================================================================================================
+// 64-bit integer type
+// ====================================================================================================================
+
+#ifdef _MSC_VER
+typedef       __int64             Int64;
+
+#if _MSC_VER <= 1200 // MS VC6
+typedef       __int64             UInt64;   // MS VC6 does not support unsigned __int64 to double conversion
+#else
+typedef       unsigned __int64    UInt64;
+#endif
+
+#else
+
+typedef       long long           Int64;
+typedef       unsigned long long  UInt64;
+
+#endif
+
+
+// ====================================================================================================================
+// Enumeration
+// ====================================================================================================================
+
+enum RDPCMMode
+{
+  RDPCM_OFF             = 0,
+  RDPCM_HOR             = 1,
+  RDPCM_VER             = 2,
+  NUMBER_OF_RDPCM_MODES = 3
+};
+
+enum RDPCMSignallingMode
+{
+  RDPCM_SIGNAL_IMPLICIT            = 0,
+  RDPCM_SIGNAL_EXPLICIT            = 1,
+  NUMBER_OF_RDPCM_SIGNALLING_MODES = 2
+};
+
+/// supported slice type
+enum SliceType
+{
+  B_SLICE               = 0,
+  P_SLICE               = 1,
+  I_SLICE               = 2,
+  NUMBER_OF_SLICE_TYPES = 3
+};
+
+/// chroma formats (according to semantics of chroma_format_idc)
+enum ChromaFormat
+{
+  CHROMA_400        = 0,
+  CHROMA_420        = 1,
+  CHROMA_422        = 2,
+  CHROMA_444        = 3,
+  NUM_CHROMA_FORMAT = 4
+};
+
+enum ChannelType
+{
+  CHANNEL_TYPE_LUMA    = 0,
+  CHANNEL_TYPE_CHROMA  = 1,
+  MAX_NUM_CHANNEL_TYPE = 2
+};
+
+enum ComponentID
+{
+  COMPONENT_Y       = 0,
+  COMPONENT_Cb      = 1,
+  COMPONENT_Cr      = 2,
+  MAX_NUM_COMPONENT = 3
+};
+
+enum InputColourSpaceConversion // defined in terms of conversion prior to input of encoder.
+{
+  IPCOLOURSPACE_UNCHANGED               = 0,
+  IPCOLOURSPACE_YCbCrtoYCrCb            = 1, // Mainly used for debug!
+  IPCOLOURSPACE_YCbCrtoYYY              = 2, // Mainly used for debug!
+  IPCOLOURSPACE_RGBtoGBR                = 3,
+  NUMBER_INPUT_COLOUR_SPACE_CONVERSIONS = 4
+};
+
+enum DeblockEdgeDir
+{
+  EDGE_VER     = 0,
+  EDGE_HOR     = 1,
+  NUM_EDGE_DIR = 2
+};
+
+/// supported partition shape
+enum PartSize
+{
+  SIZE_2Nx2N           = 0,           ///< symmetric motion partition,  2Nx2N
+  SIZE_2NxN            = 1,           ///< symmetric motion partition,  2Nx N
+  SIZE_Nx2N            = 2,           ///< symmetric motion partition,   Nx2N
+  SIZE_NxN             = 3,           ///< symmetric motion partition,   Nx N
+  SIZE_2NxnU           = 4,           ///< asymmetric motion partition, 2Nx( N/2) + 2Nx(3N/2)
+  SIZE_2NxnD           = 5,           ///< asymmetric motion partition, 2Nx(3N/2) + 2Nx( N/2)
+  SIZE_nLx2N           = 6,           ///< asymmetric motion partition, ( N/2)x2N + (3N/2)x2N
+  SIZE_nRx2N           = 7,           ///< asymmetric motion partition, (3N/2)x2N + ( N/2)x2N
+  NUMBER_OF_PART_SIZES = 8
+};
+
+/// supported prediction type
+enum PredMode
+{
+  MODE_INTER                 = 0,     ///< inter-prediction mode
+  MODE_INTRA                 = 1,     ///< intra-prediction mode
+  NUMBER_OF_PREDICTION_MODES = 2,
+};
+
+/// reference list index
+enum RefPicList
+{
+  REF_PIC_LIST_0               = 0,   ///< reference list 0
+  REF_PIC_LIST_1               = 1,   ///< reference list 1
+  NUM_REF_PIC_LIST_01          = 2,
+  REF_PIC_LIST_X               = 100  ///< special mark
+};
+
+/// distortion function index
+enum DFunc
+{
+  DF_DEFAULT         = 0,
+  DF_SSE             = 1,      ///< general size SSE
+  DF_SSE4            = 2,      ///<   4xM SSE
+  DF_SSE8            = 3,      ///<   8xM SSE
+  DF_SSE16           = 4,      ///<  16xM SSE
+  DF_SSE32           = 5,      ///<  32xM SSE
+  DF_SSE64           = 6,      ///<  64xM SSE
+  DF_SSE16N          = 7,      ///< 16NxM SSE
+
+  DF_SAD             = 8,      ///< general size SAD
+  DF_SAD4            = 9,      ///<   4xM SAD
+  DF_SAD8            = 10,     ///<   8xM SAD
+  DF_SAD16           = 11,     ///<  16xM SAD
+  DF_SAD32           = 12,     ///<  32xM SAD
+  DF_SAD64           = 13,     ///<  64xM SAD
+  DF_SAD16N          = 14,     ///< 16NxM SAD
+
+  DF_SADS            = 15,     ///< general size SAD with step
+  DF_SADS4           = 16,     ///<   4xM SAD with step
+  DF_SADS8           = 17,     ///<   8xM SAD with step
+  DF_SADS16          = 18,     ///<  16xM SAD with step
+  DF_SADS32          = 19,     ///<  32xM SAD with step
+  DF_SADS64          = 20,     ///<  64xM SAD with step
+  DF_SADS16N         = 21,     ///< 16NxM SAD with step
+
+  DF_HADS            = 22,     ///< general size Hadamard with step
+  DF_HADS4           = 23,     ///<   4xM HAD with step
+  DF_HADS8           = 24,     ///<   8xM HAD with step
+  DF_HADS16          = 25,     ///<  16xM HAD with step
+  DF_HADS32          = 26,     ///<  32xM HAD with step
+  DF_HADS64          = 27,     ///<  64xM HAD with step
+  DF_HADS16N         = 28,     ///< 16NxM HAD with step
+
+#if AMP_SAD
+  DF_SAD12           = 43,
+  DF_SAD24           = 44,
+  DF_SAD48           = 45,
+
+  DF_SADS12          = 46,
+  DF_SADS24          = 47,
+  DF_SADS48          = 48,
+
+  DF_SSE_FRAME       = 50,     ///< Frame-based SSE
+  DF_TOTAL_FUNCTIONS = 64
+#else
+  DF_SSE_FRAME       = 32,     ///< Frame-based SSE
+  DF_TOTAL_FUNCTIONS = 33
+#endif
+};
+
+/// index for SBAC based RD optimization
+enum CI_IDX
+{
+  CI_CURR_BEST = 0,     ///< best mode index
+  CI_NEXT_BEST,         ///< next best index
+  CI_TEMP_BEST,         ///< temporal index
+  CI_CHROMA_INTRA,      ///< chroma intra index
+  CI_QT_TRAFO_TEST,
+  CI_QT_TRAFO_ROOT,
+  CI_NUM,               ///< total number
+};
+
+/// motion vector predictor direction used in AMVP
+enum MVP_DIR
+{
+  MD_LEFT = 0,          ///< MVP of left block
+  MD_ABOVE,             ///< MVP of above block
+  MD_ABOVE_RIGHT,       ///< MVP of above right block
+  MD_BELOW_LEFT,        ///< MVP of below left block
+  MD_ABOVE_LEFT         ///< MVP of above left block
+};
+
+enum StoredResidualType
+{
+  RESIDUAL_RECONSTRUCTED          = 0,
+  RESIDUAL_ENCODER_SIDE           = 1,
+  NUMBER_OF_STORED_RESIDUAL_TYPES = 2
+};
+
+enum TransformDirection
+{
+  TRANSFORM_FORWARD              = 0,
+  TRANSFORM_INVERSE              = 1,
+  TRANSFORM_NUMBER_OF_DIRECTIONS = 2
+};
+
+/// supported ME search methods
+enum MESearchMethod
+{
+  FULL_SEARCH                = 0,     ///< Full search
+  DIAMOND                    = 1,     ///< Fast search
+  SELECTIVE                  = 2      ///< Selective search
+};
+
+/// coefficient scanning type used in ACS
+enum COEFF_SCAN_TYPE
+{
+  SCAN_DIAG = 0,        ///< up-right diagonal scan
+  SCAN_HOR  = 1,        ///< horizontal first scan
+  SCAN_VER  = 2,        ///< vertical first scan
+  SCAN_NUMBER_OF_TYPES = 3
+};
+
+enum COEFF_SCAN_GROUP_TYPE
+{
+  SCAN_UNGROUPED   = 0,
+  SCAN_GROUPED_4x4 = 1,
+  SCAN_NUMBER_OF_GROUP_TYPES = 2
+};
+
+enum SignificanceMapContextType
+{
+  CONTEXT_TYPE_4x4    = 0,
+  CONTEXT_TYPE_8x8    = 1,
+  CONTEXT_TYPE_NxN    = 2,
+  CONTEXT_TYPE_SINGLE = 3,
+  CONTEXT_NUMBER_OF_TYPES = 4
+};
+
+enum ScalingListMode
+{
+  SCALING_LIST_OFF,
+  SCALING_LIST_DEFAULT,
+  SCALING_LIST_FILE_READ
+};
+
+enum ScalingListSize
+{
+  SCALING_LIST_4x4 = 0,
+  SCALING_LIST_8x8,
+  SCALING_LIST_16x16,
+  SCALING_LIST_32x32,
+  SCALING_LIST_SIZE_NUM
+};
+
+// Slice / Slice segment encoding modes
+enum SliceConstraint
+{
+  NO_SLICES              = 0,          ///< don't use slices / slice segments
+  FIXED_NUMBER_OF_CTU    = 1,          ///< Limit maximum number of largest coding tree units in a slice / slice segments
+  FIXED_NUMBER_OF_BYTES  = 2,          ///< Limit maximum number of bytes in a slice / slice segment
+  FIXED_NUMBER_OF_TILES  = 3,          ///< slices / slice segments span an integer number of tiles
+};
+
+enum SAOMode //mode
+{
+  SAO_MODE_OFF = 0,
+  SAO_MODE_NEW,
+  SAO_MODE_MERGE,
+  NUM_SAO_MODES
+};
+
+enum SAOModeMergeTypes
+{
+  SAO_MERGE_LEFT =0,
+  SAO_MERGE_ABOVE,
+  NUM_SAO_MERGE_TYPES
+};
+
+
+enum SAOModeNewTypes
+{
+  SAO_TYPE_START_EO =0,
+  SAO_TYPE_EO_0 = SAO_TYPE_START_EO,
+  SAO_TYPE_EO_90,
+  SAO_TYPE_EO_135,
+  SAO_TYPE_EO_45,
+
+  SAO_TYPE_START_BO,
+  SAO_TYPE_BO = SAO_TYPE_START_BO,
+
+  NUM_SAO_NEW_TYPES
+};
+#define NUM_SAO_EO_TYPES_LOG2 2
+
+enum SAOEOClasses
+{
+  SAO_CLASS_EO_FULL_VALLEY = 0,
+  SAO_CLASS_EO_HALF_VALLEY = 1,
+  SAO_CLASS_EO_PLAIN       = 2,
+  SAO_CLASS_EO_HALF_PEAK   = 3,
+  SAO_CLASS_EO_FULL_PEAK   = 4,
+  NUM_SAO_EO_CLASSES,
+};
+
+#define NUM_SAO_BO_CLASSES_LOG2  5
+#define NUM_SAO_BO_CLASSES       (1<<NUM_SAO_BO_CLASSES_LOG2)
+
+namespace Profile
+{
+  enum Name
+  {
+    NONE = 0,
+    MAIN = 1,
+    MAIN10 = 2,
+    MAINSTILLPICTURE = 3,
+    MAINREXT = 4,
+    HIGHTHROUGHPUTREXT = 5
+  };
+}
+
+namespace Level
+{
+  enum Tier
+  {
+    MAIN = 0,
+    HIGH = 1,
+  };
+
+  enum Name
+  {
+    // code = (level * 30)
+    NONE     = 0,
+    LEVEL1   = 30,
+    LEVEL2   = 60,
+    LEVEL2_1 = 63,
+    LEVEL3   = 90,
+    LEVEL3_1 = 93,
+    LEVEL4   = 120,
+    LEVEL4_1 = 123,
+    LEVEL5   = 150,
+    LEVEL5_1 = 153,
+    LEVEL5_2 = 156,
+    LEVEL6   = 180,
+    LEVEL6_1 = 183,
+    LEVEL6_2 = 186,
+    LEVEL8_5 = 255,
+  };
+}
+
+enum CostMode
+{
+  COST_STANDARD_LOSSY              = 0,
+  COST_SEQUENCE_LEVEL_LOSSLESS     = 1,
+  COST_LOSSLESS_CODING             = 2,
+  COST_MIXED_LOSSLESS_LOSSY_CODING = 3
+};
+
+enum SPSExtensionFlagIndex
+{
+  SPS_EXT__REXT           = 0,
+//SPS_EXT__MVHEVC         = 1, //for use in future versions
+//SPS_EXT__SHVC           = 2, //for use in future versions
+  NUM_SPS_EXTENSION_FLAGS = 8
+};
+
+enum PPSExtensionFlagIndex
+{
+  PPS_EXT__REXT           = 0,
+//PPS_EXT__MVHEVC         = 1, //for use in future versions
+//PPS_EXT__SHVC           = 2, //for use in future versions
+  NUM_PPS_EXTENSION_FLAGS = 8
+};
+
+// ====================================================================================================================
+// Type definition
+// ====================================================================================================================
+
+#if RExt__HIGH_BIT_DEPTH_SUPPORT
+typedef       Int             Pel;               ///< pixel type
+typedef       Int64           TCoeff;            ///< transform coefficient
+typedef       Int             TMatrixCoeff;      ///< transform matrix coefficient
+typedef       Short           TFilterCoeff;      ///< filter coefficient
+typedef       Int64           Intermediate_Int;  ///< used as intermediate value in calculations
+typedef       UInt64          Intermediate_UInt; ///< used as intermediate value in calculations
+#else
+typedef       Short           Pel;               ///< pixel type
+typedef       Int             TCoeff;            ///< transform coefficient
+typedef       Short           TMatrixCoeff;      ///< transform matrix coefficient
+typedef       Short           TFilterCoeff;      ///< filter coefficient
+typedef       Int             Intermediate_Int;  ///< used as intermediate value in calculations
+typedef       UInt            Intermediate_UInt; ///< used as intermediate value in calculations
+#endif
+
+#if FULL_NBIT
+typedef       UInt64          Distortion;        ///< distortion measurement
+#else
+typedef       UInt            Distortion;        ///< distortion measurement
+#endif
+
+/// parameters for adaptive loop filter
+class TComPicSym;
+
+#define MAX_NUM_SAO_CLASSES  32  //(NUM_SAO_EO_GROUPS > NUM_SAO_BO_GROUPS)?NUM_SAO_EO_GROUPS:NUM_SAO_BO_GROUPS
+
+struct SAOOffset
+{
+  SAOMode modeIdc; // NEW, MERGE, OFF
+  Int typeIdc;     // union of SAOModeMergeTypes and SAOModeNewTypes, depending on modeIdc.
+  Int typeAuxInfo; // BO: starting band index
+  Int offset[MAX_NUM_SAO_CLASSES];
+
+  SAOOffset();
+  ~SAOOffset();
+  Void reset();
+
+  const SAOOffset& operator= (const SAOOffset& src);
+};
+
+struct SAOBlkParam
+{
+
+  SAOBlkParam();
+  ~SAOBlkParam();
+  Void reset();
+  const SAOBlkParam& operator= (const SAOBlkParam& src);
+  SAOOffset& operator[](Int compIdx){ return offsetParam[compIdx];}
+private:
+  SAOOffset offsetParam[MAX_NUM_COMPONENT];
+
+};
+
+
+/// parameters for deblocking filter
+typedef struct _LFCUParam
+{
+  Bool bInternalEdge;                     ///< indicates internal edge
+  Bool bLeftEdge;                         ///< indicates left edge
+  Bool bTopEdge;                          ///< indicates top edge
+} LFCUParam;
+
+
+
+//TU settings for entropy encoding
+struct TUEntropyCodingParameters
+{
+  const UInt            *scan;
+  const UInt            *scanCG;
+        COEFF_SCAN_TYPE  scanType;
+        UInt             widthInGroups;
+        UInt             heightInGroups;
+        UInt             firstSignificanceMapContext;
+};
+
+
+struct TComDigest
+{
+  std::vector<UChar> hash;
+
+  Bool operator==(const TComDigest &other) const
+  {
+    if (other.hash.size() != hash.size()) return false;
+    for(UInt i=0; i<UInt(hash.size()); i++)
+      if (other.hash[i] != hash[i]) return false;
+    return true;
+  }
+
+  Bool operator!=(const TComDigest &other) const
+  {
+    return !(*this == other);
+  }
+};
+
+struct TComSEITimeSet
+{
+  TComSEITimeSet() : clockTimeStampFlag(false),
+                     numUnitFieldBasedFlag(false),
+                     countingType(0),
+                     fullTimeStampFlag(false),
+                     discontinuityFlag(false),
+                     cntDroppedFlag(false),
+                     numberOfFrames(0),
+                     secondsValue(0),
+                     minutesValue(0),
+                     hoursValue(0),
+                     secondsFlag(false),
+                     minutesFlag(false),
+                     hoursFlag(false),
+                     timeOffsetLength(0),
+                     timeOffsetValue(0)
+  { }
+  Bool clockTimeStampFlag;
+  Bool numUnitFieldBasedFlag;
+  Int  countingType;
+  Bool fullTimeStampFlag;
+  Bool discontinuityFlag;
+  Bool cntDroppedFlag;
+  Int  numberOfFrames;
+  Int  secondsValue;
+  Int  minutesValue;
+  Int  hoursValue;
+  Bool secondsFlag;
+  Bool minutesFlag;
+  Bool hoursFlag;
+  Int  timeOffsetLength;
+  Int  timeOffsetValue;
+};
+
+struct TComSEIMasteringDisplay
+{
+  Bool      colourVolumeSEIEnabled;
+  UInt      maxLuminance;
+  UInt      minLuminance;
+  UShort    primaries[3][2];
+  UShort    whitePoint[2];
+};
+//! \}
+
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/AnnexBwrite.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,91 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifndef __ANNEXBWRITE__
+#define __ANNEXBWRITE__
+
+#include <ostream>
+#include "TLibCommon/AccessUnit.h"
+#include "NALwrite.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+/**
+ * write all NALunits in au to bytestream out in a manner satisfying
+ * AnnexB of AVC.  NALunits are written in the order they are found in au.
+ * the zero_byte word is appended to:
+ *  - the initial startcode in the access unit,
+ *  - any SPS/PPS nal units
+ */
+static std::vector<UInt> writeAnnexB(std::ostream& out, const AccessUnit& au)
+{
+  std::vector<UInt> annexBsizes;
+
+  for (AccessUnit::const_iterator it = au.begin(); it != au.end(); it++)
+  {
+    const NALUnitEBSP& nalu = **it;
+    UInt size = 0; /* size of annexB unit in bytes */
+
+    static const Char start_code_prefix[] = {0,0,0,1};
+    if (it == au.begin() || nalu.m_nalUnitType == NAL_UNIT_VPS || nalu.m_nalUnitType == NAL_UNIT_SPS || nalu.m_nalUnitType == NAL_UNIT_PPS)
+    {
+      /* From AVC, When any of the following conditions are fulfilled, the
+       * zero_byte syntax element shall be present:
+       *  - the nal_unit_type within the nal_unit() is equal to 7 (sequence
+       *    parameter set) or 8 (picture parameter set),
+       *  - the byte stream NAL unit syntax structure contains the first NAL
+       *    unit of an access unit in decoding order, as specified by subclause
+       *    7.4.1.2.3.
+       */
+      out.write(start_code_prefix, 4);
+      size += 4;
+    }
+    else
+    {
+      out.write(start_code_prefix+1, 3);
+      size += 3;
+    }
+    out << nalu.m_nalUnitData.str();
+    size += UInt(nalu.m_nalUnitData.str().size());
+
+    annexBsizes.push_back(size);
+  }
+
+  return annexBsizes;
+}
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/NALwrite.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,123 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <vector>
+#include <algorithm>
+#include <ostream>
+
+#include "TLibCommon/NAL.h"
+#include "TLibCommon/TComBitStream.h"
+#include "NALwrite.h"
+
+using namespace std;
+
+//! \ingroup TLibEncoder
+//! \{
+
+static const Char emulation_prevention_three_byte[] = {3};
+
+Void writeNalUnitHeader(ostream& out, OutputNALUnit& nalu)       // nal_unit_header()
+{
+TComOutputBitstream bsNALUHeader;
+
+  bsNALUHeader.write(0,1);                    // forbidden_zero_bit
+  bsNALUHeader.write(nalu.m_nalUnitType, 6);  // nal_unit_type
+  bsNALUHeader.write(nalu.m_reservedZero6Bits, 6);                   // nuh_reserved_zero_6bits
+  bsNALUHeader.write(nalu.m_temporalId+1, 3); // nuh_temporal_id_plus1
+
+  out.write(bsNALUHeader.getByteStream(), bsNALUHeader.getByteStreamLength());
+}
+/**
+ * write nalu to bytestream out, performing RBSP anti startcode
+ * emulation as required.  nalu.m_RBSPayload must be byte aligned.
+ */
+Void write(ostream& out, OutputNALUnit& nalu)
+{
+  writeNalUnitHeader(out, nalu);
+  /* write out rsbp_byte's, inserting any required
+   * emulation_prevention_three_byte's */
+  /* 7.4.1 ...
+   * emulation_prevention_three_byte is a byte equal to 0x03. When an
+   * emulation_prevention_three_byte is present in the NAL unit, it shall be
+   * discarded by the decoding process.
+   * The last byte of the NAL unit shall not be equal to 0x00.
+   * Within the NAL unit, the following three-byte sequences shall not occur at
+   * any byte-aligned position:
+   *  - 0x000000
+   *  - 0x000001
+   *  - 0x000002
+   * Within the NAL unit, any four-byte sequence that starts with 0x000003
+   * other than the following sequences shall not occur at any byte-aligned
+   * position:
+   *  - 0x00000300
+   *  - 0x00000301
+   *  - 0x00000302
+   *  - 0x00000303
+   */
+  vector<uint8_t>& rbsp   = nalu.m_Bitstream.getFIFO();
+
+  vector<uint8_t> outputBuffer;
+  outputBuffer.resize(rbsp.size()*2+1); //there can never be enough emulation_prevention_three_bytes to require this much space
+  std::size_t outputAmount = 0;
+  Int         zeroCount    = 0;
+  for (vector<uint8_t>::iterator it = rbsp.begin(); it != rbsp.end(); it++)
+  {
+    const uint8_t v=(*it);
+    if (zeroCount==2 && v<=3)
+    {
+      outputBuffer[outputAmount++]=emulation_prevention_three_byte[0];
+      zeroCount=0;
+    }
+    if (v==0) zeroCount++; else zeroCount=0;
+    outputBuffer[outputAmount++]=v;
+  }
+
+  /* 7.4.1.1
+   * ... when the last byte of the RBSP data is equal to 0x00 (which can
+   * only occur when the RBSP ends in a cabac_zero_word), a final byte equal
+   * to 0x03 is appended to the end of the data.
+   */
+  if (zeroCount>0) outputBuffer[outputAmount++]=emulation_prevention_three_byte[0];
+  out.write((Char*)&(*outputBuffer.begin()), outputAmount);
+}
+
+/**
+ * Write rbsp_trailing_bits to bs causing it to become byte-aligned
+ */
+Void writeRBSPTrailingBits(TComOutputBitstream& bs)
+{
+  bs.write( 1, 1 );
+  bs.writeAlignZero();
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/NALwrite.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,88 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifndef __NALWRITE__
+#define __NALWRITE__
+
+#include <ostream>
+
+#include "TLibCommon/TypeDef.h"
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/NAL.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+/**
+ * A convenience wrapper to NALUnit that also provides a
+ * bitstream object.
+ */
+struct OutputNALUnit : public NALUnit
+{
+  /**
+   * construct an OutputNALunit structure with given header values and
+   * storage for a bitstream.  Upon construction the NALunit header is
+   * written to the bitstream.
+   */
+  OutputNALUnit(
+    NalUnitType nalUnitType,
+    UInt temporalID = 0,
+    UInt reserved_zero_6bits = 0)
+  : NALUnit(nalUnitType, temporalID, reserved_zero_6bits)
+  , m_Bitstream()
+  {}
+
+  OutputNALUnit& operator=(const NALUnit& src)
+  {
+    m_Bitstream.clear();
+    static_cast<NALUnit*>(this)->operator=(src);
+    return *this;
+  }
+
+  TComOutputBitstream m_Bitstream;
+};
+
+Void write(std::ostream& out, OutputNALUnit& nalu);
+Void writeRBSPTrailingBits(TComOutputBitstream& bs);
+
+inline NALUnitEBSP::NALUnitEBSP(OutputNALUnit& nalu)
+  : NALUnit(nalu)
+{
+  write(m_nalUnitData, nalu);
+}
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/SEIwrite.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,791 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "TLibCommon/TComBitCounter.h"
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/SEI.h"
+#include "TLibCommon/TComSlice.h"
+#include "TLibCommon/TComPicYuv.h"
+#include "SEIwrite.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+#if ENC_DEC_TRACE
+Void  xTraceSEIHeader()
+{
+  fprintf( g_hTrace, "=========== SEI message ===========\n");
+}
+
+Void  xTraceSEIMessageType(SEI::PayloadType payloadType)
+{
+  fprintf( g_hTrace, "=========== %s SEI message ===========\n", SEI::getSEIMessageString(payloadType));
+}
+#endif
+
+Void SEIWriter::xWriteSEIpayloadData(TComBitIf& bs, const SEI& sei, TComSPS *sps)
+{
+  switch (sei.payloadType())
+  {
+  case SEI::USER_DATA_UNREGISTERED:
+    xWriteSEIuserDataUnregistered(*static_cast<const SEIuserDataUnregistered*>(&sei));
+    break;
+  case SEI::ACTIVE_PARAMETER_SETS:
+    xWriteSEIActiveParameterSets(*static_cast<const SEIActiveParameterSets*>(& sei));
+    break;
+  case SEI::DECODING_UNIT_INFO:
+    xWriteSEIDecodingUnitInfo(*static_cast<const SEIDecodingUnitInfo*>(& sei), sps);
+    break;
+  case SEI::DECODED_PICTURE_HASH:
+    xWriteSEIDecodedPictureHash(*static_cast<const SEIDecodedPictureHash*>(&sei));
+    break;
+  case SEI::BUFFERING_PERIOD:
+    xWriteSEIBufferingPeriod(*static_cast<const SEIBufferingPeriod*>(&sei), sps);
+    break;
+  case SEI::PICTURE_TIMING:
+    xWriteSEIPictureTiming(*static_cast<const SEIPictureTiming*>(&sei), sps);
+    break;
+  case SEI::RECOVERY_POINT:
+    xWriteSEIRecoveryPoint(*static_cast<const SEIRecoveryPoint*>(&sei));
+    break;
+  case SEI::FRAME_PACKING:
+    xWriteSEIFramePacking(*static_cast<const SEIFramePacking*>(&sei));
+    break;
+  case SEI::SEGM_RECT_FRAME_PACKING:
+    xWriteSEISegmentedRectFramePacking(*static_cast<const SEISegmentedRectFramePacking*>(&sei));
+    break;
+  case SEI::DISPLAY_ORIENTATION:
+    xWriteSEIDisplayOrientation(*static_cast<const SEIDisplayOrientation*>(&sei));
+    break;
+  case SEI::TEMPORAL_LEVEL0_INDEX:
+    xWriteSEITemporalLevel0Index(*static_cast<const SEITemporalLevel0Index*>(&sei));
+    break;
+  case SEI::REGION_REFRESH_INFO:
+    xWriteSEIGradualDecodingRefreshInfo(*static_cast<const SEIGradualDecodingRefreshInfo*>(&sei));
+    break;
+  case SEI::NO_DISPLAY:
+    xWriteSEINoDisplay(*static_cast<const SEINoDisplay*>(&sei));
+    break;
+  case SEI::TONE_MAPPING_INFO:
+    xWriteSEIToneMappingInfo(*static_cast<const SEIToneMappingInfo*>(&sei));
+    break;
+  case SEI::SOP_DESCRIPTION:
+    xWriteSEISOPDescription(*static_cast<const SEISOPDescription*>(&sei));
+    break;
+  case SEI::SCALABLE_NESTING:
+    xWriteSEIScalableNesting(bs, *static_cast<const SEIScalableNesting*>(&sei), sps);
+    break;
+  case SEI::CHROMA_SAMPLING_FILTER_HINT:
+    xWriteSEIChromaSamplingFilterHint(*static_cast<const SEIChromaSamplingFilterHint*>(&sei)/*, sps*/);
+    break;
+  case SEI::TEMP_MOTION_CONSTRAINED_TILE_SETS:
+    xWriteSEITempMotionConstrainedTileSets(bs, *static_cast<const SEITempMotionConstrainedTileSets*>(&sei));
+    break;
+  case SEI::TIME_CODE:
+    xWriteSEITimeCode(*static_cast<const SEITimeCode*>(&sei));
+    break;
+  case SEI::KNEE_FUNCTION_INFO:
+    xWriteSEIKneeFunctionInfo(*static_cast<const SEIKneeFunctionInfo*>(&sei));
+    break;
+  case SEI::MASTERING_DISPLAY_COLOUR_VOLUME:
+    xWriteSEIMasteringDisplayColourVolume(*static_cast<const SEIMasteringDisplayColourVolume*>(&sei));
+    break;
+  default:
+    assert(!"Unhandled SEI message");
+    break;
+  }
+  xWriteByteAlign();
+}
+
+/**
+ * marshal a single SEI message sei, storing the marshalled representation
+ * in bitstream bs.
+ */
+Void SEIWriter::writeSEImessage(TComBitIf& bs, const SEI& sei, TComSPS *sps)
+{
+  /* calculate how large the payload data is */
+  /* TODO: this would be far nicer if it used vectored buffers */
+  TComBitCounter bs_count;
+  bs_count.resetBits();
+  setBitstream(&bs_count);
+
+
+#if ENC_DEC_TRACE
+  Bool traceEnable = g_HLSTraceEnable;
+  g_HLSTraceEnable = false;
+#endif
+  xWriteSEIpayloadData(bs_count, sei, sps);
+#if ENC_DEC_TRACE
+  g_HLSTraceEnable = traceEnable;
+#endif
+
+  UInt payload_data_num_bits = bs_count.getNumberOfWrittenBits();
+  assert(0 == payload_data_num_bits % 8);
+
+  setBitstream(&bs);
+
+#if ENC_DEC_TRACE
+  if (g_HLSTraceEnable)
+  xTraceSEIHeader();
+#endif
+
+  UInt payloadType = sei.payloadType();
+  for (; payloadType >= 0xff; payloadType -= 0xff)
+  {
+    WRITE_CODE(0xff, 8, "payload_type");
+  }
+  WRITE_CODE(payloadType, 8, "payload_type");
+
+  UInt payloadSize = payload_data_num_bits/8;
+  for (; payloadSize >= 0xff; payloadSize -= 0xff)
+  {
+    WRITE_CODE(0xff, 8, "payload_size");
+  }
+  WRITE_CODE(payloadSize, 8, "payload_size");
+
+  /* payloadData */
+#if ENC_DEC_TRACE
+  if (g_HLSTraceEnable)
+  xTraceSEIMessageType(sei.payloadType());
+#endif
+
+  xWriteSEIpayloadData(bs, sei, sps);
+}
+
+/**
+ * marshal a user_data_unregistered SEI message sei, storing the marshalled
+ * representation in bitstream bs.
+ */
+Void SEIWriter::xWriteSEIuserDataUnregistered(const SEIuserDataUnregistered &sei)
+{
+  for (UInt i = 0; i < ISO_IEC_11578_LEN; i++)
+  {
+    WRITE_CODE(sei.uuid_iso_iec_11578[i], 8 , "sei.uuid_iso_iec_11578[i]");
+  }
+
+  for (UInt i = 0; i < sei.userDataLength; i++)
+  {
+    WRITE_CODE(sei.userData[i], 8 , "user_data");
+  }
+}
+
+/**
+ * marshal a decoded picture hash SEI message, storing the marshalled
+ * representation in bitstream bs.
+ */
+Void SEIWriter::xWriteSEIDecodedPictureHash(const SEIDecodedPictureHash& sei)
+{
+  const Char *traceString="\0";
+  switch (sei.method)
+  {
+    case SEIDecodedPictureHash::MD5: traceString="picture_md5"; break;
+    case SEIDecodedPictureHash::CRC: traceString="picture_crc"; break;
+    case SEIDecodedPictureHash::CHECKSUM: traceString="picture_checksum"; break;
+    default: assert(false); break;
+  }
+
+  if (traceString != 0) //use of this variable is needed to avoid a compiler error with G++ 4.6.1
+  {
+    WRITE_CODE(sei.method, 8, "hash_type");
+    for(UInt i=0; i<UInt(sei.m_digest.hash.size()); i++)
+    {
+      WRITE_CODE(sei.m_digest.hash[i], 8, traceString);
+    }
+  }
+}
+
+Void SEIWriter::xWriteSEIActiveParameterSets(const SEIActiveParameterSets& sei)
+{
+  WRITE_CODE(sei.activeVPSId,     4,         "active_video_parameter_set_id");
+  WRITE_FLAG(sei.m_selfContainedCvsFlag,     "self_contained_cvs_flag");
+  WRITE_FLAG(sei.m_noParameterSetUpdateFlag, "no_parameter_set_update_flag");
+  WRITE_UVLC(sei.numSpsIdsMinus1,            "num_sps_ids_minus1");
+
+  assert (sei.activeSeqParameterSetId.size() == (sei.numSpsIdsMinus1 + 1));
+
+  for (Int i = 0; i < sei.activeSeqParameterSetId.size(); i++)
+  {
+    WRITE_UVLC(sei.activeSeqParameterSetId[i], "active_seq_parameter_set_id"); 
+  }
+}
+
+Void SEIWriter::xWriteSEIDecodingUnitInfo(const SEIDecodingUnitInfo& sei, TComSPS *sps)
+{
+  TComVUI *vui = sps->getVuiParameters();
+  WRITE_UVLC(sei.m_decodingUnitIdx, "decoding_unit_idx");
+  if(vui->getHrdParameters()->getSubPicCpbParamsInPicTimingSEIFlag())
+  {
+    WRITE_CODE( sei.m_duSptCpbRemovalDelay, (vui->getHrdParameters()->getDuCpbRemovalDelayLengthMinus1() + 1), "du_spt_cpb_removal_delay");
+  }
+  WRITE_FLAG( sei.m_dpbOutputDuDelayPresentFlag, "dpb_output_du_delay_present_flag");
+  if(sei.m_dpbOutputDuDelayPresentFlag)
+  {
+    WRITE_CODE(sei.m_picSptDpbOutputDuDelay, vui->getHrdParameters()->getDpbOutputDelayDuLengthMinus1() + 1, "pic_spt_dpb_output_du_delay");
+  }
+}
+
+Void SEIWriter::xWriteSEIBufferingPeriod(const SEIBufferingPeriod& sei, TComSPS *sps)
+{
+  Int i, nalOrVcl;
+  TComVUI *vui = sps->getVuiParameters();
+  TComHRD *hrd = vui->getHrdParameters();
+
+  WRITE_UVLC( sei.m_bpSeqParameterSetId, "bp_seq_parameter_set_id" );
+  if( !hrd->getSubPicCpbParamsPresentFlag() )
+  {
+    WRITE_FLAG( sei.m_rapCpbParamsPresentFlag, "irap_cpb_params_present_flag" );
+  }
+  if( sei.m_rapCpbParamsPresentFlag )
+  {
+    WRITE_CODE( sei.m_cpbDelayOffset, hrd->getCpbRemovalDelayLengthMinus1() + 1, "cpb_delay_offset" );
+    WRITE_CODE( sei.m_dpbDelayOffset, hrd->getDpbOutputDelayLengthMinus1()  + 1, "dpb_delay_offset" );
+  }
+  WRITE_FLAG( sei.m_concatenationFlag, "concatenation_flag");
+  WRITE_CODE( sei.m_auCpbRemovalDelayDelta - 1, ( hrd->getCpbRemovalDelayLengthMinus1() + 1 ), "au_cpb_removal_delay_delta_minus1" );
+  for( nalOrVcl = 0; nalOrVcl < 2; nalOrVcl ++ )
+  {
+    if( ( ( nalOrVcl == 0 ) && ( hrd->getNalHrdParametersPresentFlag() ) ) ||
+        ( ( nalOrVcl == 1 ) && ( hrd->getVclHrdParametersPresentFlag() ) ) )
+    {
+      for( i = 0; i < ( hrd->getCpbCntMinus1( 0 ) + 1 ); i ++ )
+      {
+        WRITE_CODE( sei.m_initialCpbRemovalDelay[i][nalOrVcl],( hrd->getInitialCpbRemovalDelayLengthMinus1() + 1 ) ,           "initial_cpb_removal_delay" );
+        WRITE_CODE( sei.m_initialCpbRemovalDelayOffset[i][nalOrVcl],( hrd->getInitialCpbRemovalDelayLengthMinus1() + 1 ),      "initial_cpb_removal_delay_offset" );
+        if( hrd->getSubPicCpbParamsPresentFlag() || sei.m_rapCpbParamsPresentFlag )
+        {
+          WRITE_CODE( sei.m_initialAltCpbRemovalDelay[i][nalOrVcl], ( hrd->getInitialCpbRemovalDelayLengthMinus1() + 1 ) ,     "initial_alt_cpb_removal_delay" );
+          WRITE_CODE( sei.m_initialAltCpbRemovalDelayOffset[i][nalOrVcl], ( hrd->getInitialCpbRemovalDelayLengthMinus1() + 1 ),"initial_alt_cpb_removal_delay_offset" );
+        }
+      }
+    }
+  }
+}
+Void SEIWriter::xWriteSEIPictureTiming(const SEIPictureTiming& sei,  TComSPS *sps)
+{
+  Int i;
+  TComVUI *vui = sps->getVuiParameters();
+  TComHRD *hrd = vui->getHrdParameters();
+
+  if( vui->getFrameFieldInfoPresentFlag() )
+  {
+    WRITE_CODE( sei.m_picStruct, 4,              "pic_struct" );
+    WRITE_CODE( sei.m_sourceScanType, 2,         "source_scan_type" );
+    WRITE_FLAG( sei.m_duplicateFlag ? 1 : 0,     "duplicate_flag" );
+  }
+
+  if( hrd->getCpbDpbDelaysPresentFlag() )
+  {
+    WRITE_CODE( sei.m_auCpbRemovalDelay - 1, ( hrd->getCpbRemovalDelayLengthMinus1() + 1 ),                                         "au_cpb_removal_delay_minus1" );
+    WRITE_CODE( sei.m_picDpbOutputDelay, ( hrd->getDpbOutputDelayLengthMinus1() + 1 ),                                          "pic_dpb_output_delay" );
+    if(hrd->getSubPicCpbParamsPresentFlag())
+    {
+      WRITE_CODE(sei.m_picDpbOutputDuDelay, hrd->getDpbOutputDelayDuLengthMinus1()+1, "pic_dpb_output_du_delay" );
+    }
+    if( hrd->getSubPicCpbParamsPresentFlag() && hrd->getSubPicCpbParamsInPicTimingSEIFlag() )
+    {
+      WRITE_UVLC( sei.m_numDecodingUnitsMinus1,     "num_decoding_units_minus1" );
+      WRITE_FLAG( sei.m_duCommonCpbRemovalDelayFlag, "du_common_cpb_removal_delay_flag" );
+      if( sei.m_duCommonCpbRemovalDelayFlag )
+      {
+        WRITE_CODE( sei.m_duCommonCpbRemovalDelayMinus1, ( hrd->getDuCpbRemovalDelayLengthMinus1() + 1 ),                       "du_common_cpb_removal_delay_minus1" );
+      }
+      for( i = 0; i <= sei.m_numDecodingUnitsMinus1; i ++ )
+      {
+        WRITE_UVLC( sei.m_numNalusInDuMinus1[ i ],  "num_nalus_in_du_minus1");
+        if( ( !sei.m_duCommonCpbRemovalDelayFlag ) && ( i < sei.m_numDecodingUnitsMinus1 ) )
+        {
+          WRITE_CODE( sei.m_duCpbRemovalDelayMinus1[ i ], ( hrd->getDuCpbRemovalDelayLengthMinus1() + 1 ),                        "du_cpb_removal_delay_minus1" );
+        }
+      }
+    }
+  }
+}
+Void SEIWriter::xWriteSEIRecoveryPoint(const SEIRecoveryPoint& sei)
+{
+  WRITE_SVLC( sei.m_recoveryPocCnt,    "recovery_poc_cnt"    );
+  WRITE_FLAG( sei.m_exactMatchingFlag, "exact_matching_flag" );
+  WRITE_FLAG( sei.m_brokenLinkFlag,    "broken_link_flag"    );
+}
+Void SEIWriter::xWriteSEIFramePacking(const SEIFramePacking& sei)
+{
+  WRITE_UVLC( sei.m_arrangementId,                  "frame_packing_arrangement_id" );
+  WRITE_FLAG( sei.m_arrangementCancelFlag,          "frame_packing_arrangement_cancel_flag" );
+
+  if( sei.m_arrangementCancelFlag == 0 ) {
+    WRITE_CODE( sei.m_arrangementType, 7,           "frame_packing_arrangement_type" );
+
+    WRITE_FLAG( sei.m_quincunxSamplingFlag,         "quincunx_sampling_flag" );
+    WRITE_CODE( sei.m_contentInterpretationType, 6, "content_interpretation_type" );
+    WRITE_FLAG( sei.m_spatialFlippingFlag,          "spatial_flipping_flag" );
+    WRITE_FLAG( sei.m_frame0FlippedFlag,            "frame0_flipped_flag" );
+    WRITE_FLAG( sei.m_fieldViewsFlag,               "field_views_flag" );
+    WRITE_FLAG( sei.m_currentFrameIsFrame0Flag,     "current_frame_is_frame0_flag" );
+
+    WRITE_FLAG( sei.m_frame0SelfContainedFlag,      "frame0_self_contained_flag" );
+    WRITE_FLAG( sei.m_frame1SelfContainedFlag,      "frame1_self_contained_flag" );
+
+    if(sei.m_quincunxSamplingFlag == 0 && sei.m_arrangementType != 5)
+    {
+      WRITE_CODE( sei.m_frame0GridPositionX, 4,     "frame0_grid_position_x" );
+      WRITE_CODE( sei.m_frame0GridPositionY, 4,     "frame0_grid_position_y" );
+      WRITE_CODE( sei.m_frame1GridPositionX, 4,     "frame1_grid_position_x" );
+      WRITE_CODE( sei.m_frame1GridPositionY, 4,     "frame1_grid_position_y" );
+    }
+
+    WRITE_CODE( sei.m_arrangementReservedByte, 8,   "frame_packing_arrangement_reserved_byte" );
+    WRITE_FLAG( sei.m_arrangementPersistenceFlag,   "frame_packing_arrangement_persistence_flag" );
+  }
+
+  WRITE_FLAG( sei.m_upsampledAspectRatio,           "upsampled_aspect_ratio" );
+}
+
+Void SEIWriter::xWriteSEISegmentedRectFramePacking(const SEISegmentedRectFramePacking& sei)
+{
+  WRITE_FLAG( sei.m_arrangementCancelFlag,          "segmented_rect_frame_packing_arrangement_cancel_flag" );
+  if( sei.m_arrangementCancelFlag == 0 ) 
+  {
+    WRITE_CODE( sei.m_contentInterpretationType, 2, "segmented_rect_content_interpretation_type" );
+    WRITE_FLAG( sei.m_arrangementPersistenceFlag,   "segmented_rect_frame_packing_arrangement_persistence" );
+  }
+}
+
+Void SEIWriter::xWriteSEIToneMappingInfo(const SEIToneMappingInfo& sei)
+{
+  Int i;
+  WRITE_UVLC( sei.m_toneMapId,                    "tone_map_id" );
+  WRITE_FLAG( sei.m_toneMapCancelFlag,            "tone_map_cancel_flag" );
+  if( !sei.m_toneMapCancelFlag )
+  {
+    WRITE_FLAG( sei.m_toneMapPersistenceFlag,     "tone_map_persistence_flag" );
+    WRITE_CODE( sei.m_codedDataBitDepth,    8,    "coded_data_bit_depth" );
+    WRITE_CODE( sei.m_targetBitDepth,       8,    "target_bit_depth" );
+    WRITE_UVLC( sei.m_modelId,                    "model_id" );
+    switch(sei.m_modelId)
+    {
+    case 0:
+      {
+        WRITE_CODE( sei.m_minValue,  32,        "min_value" );
+        WRITE_CODE( sei.m_maxValue, 32,         "max_value" );
+        break;
+      }
+    case 1:
+      {
+        WRITE_CODE( sei.m_sigmoidMidpoint, 32,  "sigmoid_midpoint" );
+        WRITE_CODE( sei.m_sigmoidWidth,    32,  "sigmoid_width"    );
+        break;
+      }
+    case 2:
+      {
+        UInt num = 1u << sei.m_targetBitDepth;
+        for(i = 0; i < num; i++)
+        {
+          WRITE_CODE( sei.m_startOfCodedInterval[i], (( sei.m_codedDataBitDepth + 7 ) >> 3 ) << 3,  "start_of_coded_interval" );
+        }
+        break;
+      }
+    case 3:
+      {
+        WRITE_CODE( sei.m_numPivots, 16,          "num_pivots" );
+        for(i = 0; i < sei.m_numPivots; i++ )
+        {
+          WRITE_CODE( sei.m_codedPivotValue[i], (( sei.m_codedDataBitDepth + 7 ) >> 3 ) << 3,       "coded_pivot_value" );
+          WRITE_CODE( sei.m_targetPivotValue[i], (( sei.m_targetBitDepth + 7 ) >> 3 ) << 3,         "target_pivot_value");
+        }
+        break;
+      }
+    case 4:
+      {
+        WRITE_CODE( sei.m_cameraIsoSpeedIdc,    8,    "camera_iso_speed_idc" );
+        if( sei.m_cameraIsoSpeedIdc == 255) //Extended_ISO
+        {
+          WRITE_CODE( sei.m_cameraIsoSpeedValue,    32,    "camera_iso_speed_value" );
+        }
+        WRITE_CODE( sei.m_exposureIndexIdc,     8,    "exposure_index_idc" );
+        if( sei.m_exposureIndexIdc == 255) //Extended_ISO
+        {
+          WRITE_CODE( sei.m_exposureIndexValue,     32,    "exposure_index_value" );
+        }
+        WRITE_FLAG( sei.m_exposureCompensationValueSignFlag,           "exposure_compensation_value_sign_flag" );
+        WRITE_CODE( sei.m_exposureCompensationValueNumerator,     16,  "exposure_compensation_value_numerator" );
+        WRITE_CODE( sei.m_exposureCompensationValueDenomIdc,      16,  "exposure_compensation_value_denom_idc" );
+        WRITE_CODE( sei.m_refScreenLuminanceWhite,                32,  "ref_screen_luminance_white" );
+        WRITE_CODE( sei.m_extendedRangeWhiteLevel,                32,  "extended_range_white_level" );
+        WRITE_CODE( sei.m_nominalBlackLevelLumaCodeValue,         16,  "nominal_black_level_luma_code_value" );
+        WRITE_CODE( sei.m_nominalWhiteLevelLumaCodeValue,         16,  "nominal_white_level_luma_code_value" );
+        WRITE_CODE( sei.m_extendedWhiteLevelLumaCodeValue,        16,  "extended_white_level_luma_code_value" );
+        break;
+      }
+    default:
+      {
+        assert(!"Undefined SEIToneMapModelId");
+        break;
+      }
+    }//switch m_modelId
+  }//if(!sei.m_toneMapCancelFlag)
+}
+
+Void SEIWriter::xWriteSEIDisplayOrientation(const SEIDisplayOrientation &sei)
+{
+  WRITE_FLAG( sei.cancelFlag,           "display_orientation_cancel_flag" );
+  if( !sei.cancelFlag )
+  {
+    WRITE_FLAG( sei.horFlip,                   "hor_flip" );
+    WRITE_FLAG( sei.verFlip,                   "ver_flip" );
+    WRITE_CODE( sei.anticlockwiseRotation, 16, "anticlockwise_rotation" );
+    WRITE_FLAG( sei.persistenceFlag,          "display_orientation_persistence_flag" );
+  }
+}
+
+Void SEIWriter::xWriteSEITemporalLevel0Index(const SEITemporalLevel0Index &sei)
+{
+  WRITE_CODE( sei.tl0Idx, 8 , "tl0_idx" );
+  WRITE_CODE( sei.rapIdx, 8 , "rap_idx" );
+}
+
+Void SEIWriter::xWriteSEIGradualDecodingRefreshInfo(const SEIGradualDecodingRefreshInfo &sei)
+{
+  WRITE_FLAG( sei.m_gdrForegroundFlag, "gdr_foreground_flag");
+}
+
+Void SEIWriter::xWriteSEINoDisplay(const SEINoDisplay &sei)
+{
+}
+
+Void SEIWriter::xWriteSEISOPDescription(const SEISOPDescription& sei)
+{
+  WRITE_UVLC( sei.m_sopSeqParameterSetId,           "sop_seq_parameter_set_id"               );
+  WRITE_UVLC( sei.m_numPicsInSopMinus1,             "num_pics_in_sop_minus1"               );
+  for (UInt i = 0; i <= sei.m_numPicsInSopMinus1; i++)
+  {
+    WRITE_CODE( sei.m_sopDescVclNaluType[i], 6, "sop_desc_vcl_nalu_type" );
+    WRITE_CODE( sei.m_sopDescTemporalId[i],  3, "sop_desc_temporal_id" );
+    if (sei.m_sopDescVclNaluType[i] != NAL_UNIT_CODED_SLICE_IDR_W_RADL && sei.m_sopDescVclNaluType[i] != NAL_UNIT_CODED_SLICE_IDR_N_LP)
+    {
+      WRITE_UVLC( sei.m_sopDescStRpsIdx[i],           "sop_desc_st_rps_idx"               );
+    }
+    if (i > 0)
+    {
+      WRITE_SVLC( sei.m_sopDescPocDelta[i],           "sop_desc_poc_delta"               );
+    }
+  }
+}
+
+Void SEIWriter::xWriteSEIScalableNesting(TComBitIf& bs, const SEIScalableNesting& sei, TComSPS *sps)
+{
+  WRITE_FLAG( sei.m_bitStreamSubsetFlag,             "bitstream_subset_flag"         );
+  WRITE_FLAG( sei.m_nestingOpFlag,                   "nesting_op_flag      "         );
+  if (sei.m_nestingOpFlag)
+  {
+    WRITE_FLAG( sei.m_defaultOpFlag,                 "default_op_flag"               );
+    WRITE_UVLC( sei.m_nestingNumOpsMinus1,           "nesting_num_ops_minus1"        );
+    for (UInt i = (sei.m_defaultOpFlag ? 1 : 0); i <= sei.m_nestingNumOpsMinus1; i++)
+    {
+      WRITE_CODE( sei.m_nestingMaxTemporalIdPlus1[i], 3,  "nesting_max_temporal_id_plus1" );
+      WRITE_UVLC( sei.m_nestingOpIdx[i],                  "nesting_op_idx"                );
+    }
+  }
+  else
+  {
+    WRITE_FLAG( sei.m_allLayersFlag,                      "all_layers_flag"               );
+    if (!sei.m_allLayersFlag)
+    {
+      WRITE_CODE( sei.m_nestingNoOpMaxTemporalIdPlus1, 3, "nesting_no_op_max_temporal_id_plus1" );
+      WRITE_UVLC( sei.m_nestingNumLayersMinus1,           "nesting_num_layers"                  );
+      for (UInt i = 0; i <= sei.m_nestingNumLayersMinus1; i++)
+      {
+        WRITE_CODE( sei.m_nestingLayerId[i], 6,           "nesting_layer_id"              );
+      }
+    }
+  }
+
+  // byte alignment
+  while ( m_pcBitIf->getNumberOfWrittenBits() % 8 != 0 )
+  {
+    WRITE_FLAG( 0, "nesting_zero_bit" );
+  }
+
+  // write nested SEI messages
+  for (SEIMessages::const_iterator it = sei.m_nestedSEIs.begin(); it != sei.m_nestedSEIs.end(); it++)
+  {
+    writeSEImessage(bs, *(*it), sps);
+  }
+}
+
+Void SEIWriter::xWriteSEITempMotionConstrainedTileSets(TComBitIf& bs, const SEITempMotionConstrainedTileSets& sei)
+{
+  //UInt code;
+  WRITE_FLAG((sei.m_mc_all_tiles_exact_sample_value_match_flag ? 1 : 0), "mc_all_tiles_exact_sample_value_match_flag"); 
+  WRITE_FLAG((sei.m_each_tile_one_tile_set_flag                ? 1 : 0), "each_tile_one_tile_set_flag"               );
+
+  if(!sei.m_each_tile_one_tile_set_flag)
+  {
+    WRITE_FLAG((sei.m_limited_tile_set_display_flag ? 1 : 0), "limited_tile_set_display_flag");
+    WRITE_UVLC((sei.getNumberOfTileSets() - 1),               "num_sets_in_message_minus1"   );
+
+    if(sei.getNumberOfTileSets() > 0)
+    {
+      for(Int i = 0; i < sei.getNumberOfTileSets(); i++)
+      {
+        WRITE_UVLC(sei.tileSetData(i).m_mcts_id, "mcts_id");
+
+        if(sei.m_limited_tile_set_display_flag)
+        { 
+          WRITE_FLAG((sei.tileSetData(i).m_display_tile_set_flag ? 1 : 0), "display_tile_set_flag");  
+        }
+
+        WRITE_UVLC((sei.tileSetData(i).getNumberOfTileRects() - 1), "num_tile_rects_in_set_minus1"); 
+        
+        for(Int j = 0; j < sei.tileSetData(i).getNumberOfTileRects(); j++)
+        {
+          WRITE_UVLC(sei.tileSetData(i).topLeftTileIndex    (j), "top_left_tile_index");  
+          WRITE_UVLC(sei.tileSetData(i).bottomRightTileIndex(j), "bottom_right_tile_index");  
+        }
+
+        if(!sei.m_mc_all_tiles_exact_sample_value_match_flag)
+        {
+          WRITE_FLAG((sei.tileSetData(i).m_exact_sample_value_match_flag ? 1 : 0), "exact_sample_value_match_flag");  
+        }
+
+        WRITE_FLAG((sei.tileSetData(i).m_mcts_tier_level_idc_present_flag ? 1 : 0), "mcts_tier_level_idc_present_flag");
+
+        if(sei.tileSetData(i).m_mcts_tier_level_idc_present_flag)
+        {
+          WRITE_FLAG((sei.tileSetData(i).m_mcts_tier_flag ? 1 : 0), "mcts_tier_flag");
+          WRITE_CODE( sei.tileSetData(i).m_mcts_level_idc, 8,       "mcts_level_idc"); 
+        }
+      }
+    }
+  }
+  else
+  {
+    WRITE_FLAG((sei.m_max_mcs_tier_level_idc_present_flag ? 1 : 0), "max_mcs_tier_level_idc_present_flag");
+
+    if(sei.m_max_mcs_tier_level_idc_present_flag)
+    {
+      WRITE_FLAG((sei.m_max_mcts_tier_flag ? 1 : 0), "max_mcts_tier_flag");  
+      WRITE_CODE( sei.m_max_mcts_level_idc, 8,       "max_mcts_level_idc"); 
+    }
+  }
+}
+
+Void SEIWriter::xWriteSEITimeCode(const SEITimeCode& sei)
+{
+  WRITE_CODE(sei.numClockTs, 2, "num_clock_ts");
+  for(Int i = 0; i < sei.numClockTs; i++)
+  {
+    const TComSEITimeSet &currentTimeSet = sei.timeSetArray[i];
+    WRITE_FLAG(currentTimeSet.clockTimeStampFlag, "clock_time_stamp_flag");
+    if(currentTimeSet.clockTimeStampFlag)
+    {
+      WRITE_FLAG(currentTimeSet.numUnitFieldBasedFlag, "units_field_based_flag");
+      WRITE_CODE(currentTimeSet.countingType, 5, "counting_type");
+      WRITE_FLAG(currentTimeSet.fullTimeStampFlag, "full_timestamp_flag");
+      WRITE_FLAG(currentTimeSet.discontinuityFlag, "discontinuity_flag");
+      WRITE_FLAG(currentTimeSet.cntDroppedFlag, "cnt_dropped_flag");
+      WRITE_CODE(currentTimeSet.numberOfFrames, 9, "n_frames");
+      if(currentTimeSet.fullTimeStampFlag)
+      {
+        WRITE_CODE(currentTimeSet.secondsValue, 6, "seconds_value");
+        WRITE_CODE(currentTimeSet.minutesValue, 6, "minutes_value");
+        WRITE_CODE(currentTimeSet.hoursValue, 5, "hours_value");
+      }
+      else
+      {
+        WRITE_FLAG(currentTimeSet.secondsFlag, "seconds_flag");
+        if(currentTimeSet.secondsFlag)
+        {
+          WRITE_CODE(currentTimeSet.secondsValue, 6, "seconds_value");
+          WRITE_FLAG(currentTimeSet.minutesFlag, "minutes_flag");
+          if(currentTimeSet.minutesFlag)
+          {
+            WRITE_CODE(currentTimeSet.minutesValue, 6, "minutes_value");
+            WRITE_FLAG(currentTimeSet.hoursFlag, "hours_flag");
+            if(currentTimeSet.hoursFlag)
+              WRITE_CODE(currentTimeSet.hoursValue, 5, "hours_value");
+          }
+        }
+      }
+      WRITE_CODE(currentTimeSet.timeOffsetLength, 5, "time_offset_length");
+      if(currentTimeSet.timeOffsetLength > 0)
+      {
+        if(currentTimeSet.timeOffsetValue >= 0)
+        {
+          WRITE_CODE((UInt)currentTimeSet.timeOffsetValue, currentTimeSet.timeOffsetLength, "time_offset_value");
+        }
+        else
+        {
+          //  Two's complement conversion
+          UInt offsetValue = ~(currentTimeSet.timeOffsetValue) + 1;
+          offsetValue |= (1 << (currentTimeSet.timeOffsetLength-1));
+          WRITE_CODE(offsetValue, currentTimeSet.timeOffsetLength, "time_offset_value");
+        }
+      }
+    }
+  }
+}
+
+Void SEIWriter::xWriteSEIChromaSamplingFilterHint(const SEIChromaSamplingFilterHint &sei/*, TComSPS* sps*/)
+{
+  WRITE_CODE(sei.m_verChromaFilterIdc, 8, "ver_chroma_filter_idc");
+  WRITE_CODE(sei.m_horChromaFilterIdc, 8, "hor_chroma_filter_idc");
+  WRITE_FLAG(sei.m_verFilteringProcessFlag, "ver_filtering_process_flag");
+  if(sei.m_verChromaFilterIdc == 1 || sei.m_horChromaFilterIdc == 1)
+  {
+    writeUserDefinedCoefficients(sei);
+  }
+}
+
+// write hardcoded chroma filter coefficients in the SEI messages
+Void SEIWriter::writeUserDefinedCoefficients(const SEIChromaSamplingFilterHint &sei)
+{
+  Int const iNumVerticalFilters = 3;
+  Int verticalTapLength_minus1[iNumVerticalFilters] = {5,3,3};
+  Int* userVerticalCoefficients[iNumVerticalFilters];
+  for(Int i = 0; i < iNumVerticalFilters; i ++)
+  {
+    userVerticalCoefficients[i] = (Int*)malloc( (verticalTapLength_minus1[i]+1) * sizeof(Int));
+  }
+  userVerticalCoefficients[0][0] = -3;
+  userVerticalCoefficients[0][1] = 13;
+  userVerticalCoefficients[0][2] = 31;
+  userVerticalCoefficients[0][3] = 23;
+  userVerticalCoefficients[0][4] = 3;
+  userVerticalCoefficients[0][5] = -3;
+
+  userVerticalCoefficients[1][0] = -1;
+  userVerticalCoefficients[1][1] = 25;
+  userVerticalCoefficients[1][2] = 247;
+  userVerticalCoefficients[1][3] = -15;
+
+  userVerticalCoefficients[2][0] = -20;
+  userVerticalCoefficients[2][1] = 186;
+  userVerticalCoefficients[2][2] = 100;
+  userVerticalCoefficients[2][3] = -10;
+  
+  Int const iNumHorizontalFilters = 1;
+  Int horizontalTapLength_minus1[iNumHorizontalFilters] = {3};
+  Int* userHorizontalCoefficients[iNumHorizontalFilters];
+  for(Int i = 0; i < iNumHorizontalFilters; i ++)
+  {
+    userHorizontalCoefficients[i] = (Int*)malloc( (horizontalTapLength_minus1[i]+1) * sizeof(Int));
+  }
+  userHorizontalCoefficients[0][0] = 1;
+  userHorizontalCoefficients[0][1] = 6;
+  userHorizontalCoefficients[0][2] = 1;
+
+  WRITE_UVLC(3, "target_format_idc");
+  if(sei.m_verChromaFilterIdc == 1)
+  {
+    WRITE_UVLC(iNumVerticalFilters, "num_vertical_filters");
+    if(iNumVerticalFilters > 0)
+    {
+      for(Int i = 0; i < iNumVerticalFilters; i ++)
+      {
+        WRITE_UVLC(verticalTapLength_minus1[i], "ver_tap_length_minus_1");
+        for(Int j = 0; j < verticalTapLength_minus1[i]; j ++)
+        {
+          WRITE_SVLC(userVerticalCoefficients[i][j], "ver_filter_coeff");
+        }
+      }
+    }
+  }
+  if(sei.m_horChromaFilterIdc == 1)
+  {
+    WRITE_UVLC(iNumHorizontalFilters, "num_horizontal_filters");
+    if(iNumHorizontalFilters > 0)
+    {
+      for(Int i = 0; i < iNumHorizontalFilters; i ++)
+      {
+        WRITE_UVLC(horizontalTapLength_minus1[i], "hor_tap_length_minus_1");
+        for(Int j = 0; j < horizontalTapLength_minus1[i]; j ++)
+        {
+          WRITE_SVLC(userHorizontalCoefficients[i][j], "hor_filter_coeff");
+        }
+      }
+    }
+  }
+}
+
+Void SEIWriter::xWriteSEIKneeFunctionInfo(const SEIKneeFunctionInfo &sei)
+{
+  WRITE_UVLC( sei.m_kneeId, "knee_function_id" );
+  WRITE_FLAG( sei.m_kneeCancelFlag, "knee_function_cancel_flag" ); 
+  if ( !sei.m_kneeCancelFlag )
+  {
+    WRITE_FLAG( sei.m_kneePersistenceFlag, "knee_function_persistence_flag" );
+    WRITE_CODE( (UInt)sei.m_kneeInputDrange , 32,  "input_d_range" );
+    WRITE_CODE( (UInt)sei.m_kneeInputDispLuminance, 32,  "input_disp_luminance" );
+    WRITE_CODE( (UInt)sei.m_kneeOutputDrange, 32,  "output_d_range" );
+    WRITE_CODE( (UInt)sei.m_kneeOutputDispLuminance, 32,  "output_disp_luminance" );
+    WRITE_UVLC( sei.m_kneeNumKneePointsMinus1, "num_knee_points_minus1" );
+    for(Int i = 0; i <= sei.m_kneeNumKneePointsMinus1; i++ )
+    {
+      WRITE_CODE( (UInt)sei.m_kneeInputKneePoint[i], 10,"input_knee_point" );
+      WRITE_CODE( (UInt)sei.m_kneeOutputKneePoint[i], 10, "output_knee_point" );
+    }
+  }
+}
+
+
+Void SEIWriter::xWriteSEIMasteringDisplayColourVolume(const SEIMasteringDisplayColourVolume& sei)
+{
+  WRITE_CODE( sei.values.primaries[0][0],  16,  "display_primaries_x[0]" );
+  WRITE_CODE( sei.values.primaries[0][1],  16,  "display_primaries_y[0]" );
+
+  WRITE_CODE( sei.values.primaries[1][0],  16,  "display_primaries_x[1]" );
+  WRITE_CODE( sei.values.primaries[1][1],  16,  "display_primaries_y[1]" );
+
+  WRITE_CODE( sei.values.primaries[2][0],  16,  "display_primaries_x[2]" );
+  WRITE_CODE( sei.values.primaries[2][1],  16,  "display_primaries_y[2]" );
+
+  WRITE_CODE( sei.values.whitePoint[0],    16,  "white_point_x" );
+  WRITE_CODE( sei.values.whitePoint[1],    16,  "white_point_y" );
+    
+  WRITE_CODE( sei.values.maxLuminance,     32,  "max_display_mastering_luminance" );
+  WRITE_CODE( sei.values.minLuminance,     32,  "min_display_mastering_luminance" );
+}
+
+
+Void SEIWriter::xWriteByteAlign()
+{
+  if( m_pcBitIf->getNumberOfWrittenBits() % 8 != 0)
+  {
+    WRITE_FLAG( 1, "payload_bit_equal_to_one" );
+    while( m_pcBitIf->getNumberOfWrittenBits() % 8 != 0 )
+    {
+      WRITE_FLAG( 0, "payload_bit_equal_to_zero" );
+    }
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/SEIwrite.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,84 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifndef __SEIWRITE__
+#define __SEIWRITE__
+
+#include "SyntaxElementWriter.h"
+#include "TLibCommon/SEI.h"
+
+class TComBitIf;
+
+//! \ingroup TLibEncoder
+//! \{
+class SEIWriter:public SyntaxElementWriter
+{
+public:
+  SEIWriter() {};
+  virtual ~SEIWriter() {};
+
+  Void writeSEImessage(TComBitIf& bs, const SEI& sei, TComSPS *sps);
+
+protected:
+  Void xWriteSEIpayloadData(TComBitIf& bs, const SEI& sei, TComSPS *sps);
+  Void xWriteSEIuserDataUnregistered(const SEIuserDataUnregistered &sei);
+  Void xWriteSEIActiveParameterSets(const SEIActiveParameterSets& sei);
+  Void xWriteSEIDecodingUnitInfo(const SEIDecodingUnitInfo& sei, TComSPS *sps);
+  Void xWriteSEIDecodedPictureHash(const SEIDecodedPictureHash& sei);
+  Void xWriteSEIBufferingPeriod(const SEIBufferingPeriod& sei, TComSPS *sps);
+  Void xWriteSEIPictureTiming(const SEIPictureTiming& sei, TComSPS *sps);
+  TComSPS *m_pSPS;
+  Void xWriteSEIRecoveryPoint(const SEIRecoveryPoint& sei);
+  Void xWriteSEIFramePacking(const SEIFramePacking& sei);
+  Void xWriteSEISegmentedRectFramePacking(const SEISegmentedRectFramePacking& sei);
+  Void xWriteSEIDisplayOrientation(const SEIDisplayOrientation &sei);
+  Void xWriteSEITemporalLevel0Index(const SEITemporalLevel0Index &sei);
+  Void xWriteSEIGradualDecodingRefreshInfo(const SEIGradualDecodingRefreshInfo &sei);
+  Void xWriteSEINoDisplay(const SEINoDisplay &sei);
+  Void xWriteSEIToneMappingInfo(const SEIToneMappingInfo& sei);
+  Void xWriteSEISOPDescription(const SEISOPDescription& sei);
+  Void xWriteSEIScalableNesting(TComBitIf& bs, const SEIScalableNesting& sei, TComSPS *sps);
+  Void xWriteSEITempMotionConstrainedTileSets(TComBitIf& bs, const SEITempMotionConstrainedTileSets& sei);
+  Void xWriteSEITimeCode(const SEITimeCode& sei);
+  Void xWriteSEIChromaSamplingFilterHint(const SEIChromaSamplingFilterHint& sei/*, TComSPS *sps*/);
+  Void writeUserDefinedCoefficients(const SEIChromaSamplingFilterHint& sei);
+  Void xWriteSEIKneeFunctionInfo(const SEIKneeFunctionInfo &sei);
+  Void xWriteSEIMasteringDisplayColourVolume( const SEIMasteringDisplayColourVolume& sei);
+  Void xWriteByteAlign();
+};
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/SyntaxElementWriter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,132 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     SyntaxElementWriter.cpp
+    \brief    CAVLC encoder class
+*/
+
+#include "TLibCommon/CommonDef.h"
+#include "SyntaxElementWriter.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+#if ENC_DEC_TRACE
+
+Void  SyntaxElementWriter::xWriteCodeTr (UInt value, UInt  length, const Char *pSymbolName)
+{
+  xWriteCode (value,length);
+  if( g_HLSTraceEnable )
+  {
+    fprintf( g_hTrace, "%8lld  ", g_nSymbolCounter++ );
+    if( length<10 )
+    {
+      fprintf( g_hTrace, "%-50s u(%d)  : %d\n", pSymbolName, length, value );
+    }
+    else
+    {
+      fprintf( g_hTrace, "%-50s u(%d) : %d\n", pSymbolName, length, value );
+    }
+  }
+}
+
+Void  SyntaxElementWriter::xWriteUvlcTr (UInt value, const Char *pSymbolName)
+{
+  xWriteUvlc (value);
+  if( g_HLSTraceEnable )
+  {
+    fprintf( g_hTrace, "%8lld  ", g_nSymbolCounter++ );
+    fprintf( g_hTrace, "%-50s ue(v) : %d\n", pSymbolName, value );
+  }
+}
+
+Void  SyntaxElementWriter::xWriteSvlcTr (Int value, const Char *pSymbolName)
+{
+  xWriteSvlc(value);
+  if( g_HLSTraceEnable )
+  {
+    fprintf( g_hTrace, "%8lld  ", g_nSymbolCounter++ );
+    fprintf( g_hTrace, "%-50s se(v) : %d\n", pSymbolName, value );
+  }
+}
+
+Void  SyntaxElementWriter::xWriteFlagTr(UInt value, const Char *pSymbolName)
+{
+  xWriteFlag(value);
+  if( g_HLSTraceEnable )
+  {
+    fprintf( g_hTrace, "%8lld  ", g_nSymbolCounter++ );
+    fprintf( g_hTrace, "%-50s u(1)  : %d\n", pSymbolName, value );
+  }
+}
+
+#endif
+
+
+Void SyntaxElementWriter::xWriteCode     ( UInt uiCode, UInt uiLength )
+{
+  assert ( uiLength > 0 );
+  m_pcBitIf->write( uiCode, uiLength );
+}
+
+Void SyntaxElementWriter::xWriteUvlc     ( UInt uiCode )
+{
+  UInt uiLength = 1;
+  UInt uiTemp = ++uiCode;
+
+  assert ( uiTemp );
+
+  while( 1 != uiTemp )
+  {
+    uiTemp >>= 1;
+    uiLength += 2;
+  }
+  // Take care of cases where uiLength > 32
+  m_pcBitIf->write( 0, uiLength >> 1);
+  m_pcBitIf->write( uiCode, (uiLength+1) >> 1);
+}
+
+Void SyntaxElementWriter::xWriteSvlc     ( Int iCode )
+{
+  UInt uiCode;
+
+  uiCode = xConvertToUInt( iCode );
+  xWriteUvlc( uiCode );
+}
+
+Void SyntaxElementWriter::xWriteFlag( UInt uiCode )
+{
+  m_pcBitIf->write( uiCode, 1 );
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/SyntaxElementWriter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,97 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     SyntaxElementWriter.h
+    \brief    CAVLC encoder class (header)
+*/
+
+#ifndef __SYNTAXELEMENTWRITER__
+#define __SYNTAXELEMENTWRITER__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/TComRom.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+#if ENC_DEC_TRACE
+
+#define WRITE_CODE( value, length, name)    xWriteCodeTr ( value, length, name )
+#define WRITE_UVLC( value,         name)    xWriteUvlcTr ( value,         name )
+#define WRITE_SVLC( value,         name)    xWriteSvlcTr ( value,         name )
+#define WRITE_FLAG( value,         name)    xWriteFlagTr ( value,         name )
+
+#else
+
+#define WRITE_CODE( value, length, name)     xWriteCode ( value, length )
+#define WRITE_UVLC( value,         name)     xWriteUvlc ( value )
+#define WRITE_SVLC( value,         name)     xWriteSvlc ( value )
+#define WRITE_FLAG( value,         name)     xWriteFlag ( value )
+
+#endif
+
+class SyntaxElementWriter
+{
+protected:
+  TComBitIf*    m_pcBitIf;
+
+  SyntaxElementWriter()
+  :m_pcBitIf(NULL)
+  {};
+  virtual ~SyntaxElementWriter() {};
+
+  Void  setBitstream          ( TComBitIf* p )  { m_pcBitIf = p;  }
+
+  Void  xWriteCode            ( UInt uiCode, UInt uiLength );
+  Void  xWriteUvlc            ( UInt uiCode );
+  Void  xWriteSvlc            ( Int  iCode   );
+  Void  xWriteFlag            ( UInt uiCode );
+#if ENC_DEC_TRACE
+  Void  xWriteCodeTr          ( UInt value, UInt  length, const Char *pSymbolName);
+  Void  xWriteUvlcTr          ( UInt value,               const Char *pSymbolName);
+  Void  xWriteSvlcTr          ( Int  value,               const Char *pSymbolName);
+  Void  xWriteFlagTr          ( UInt value,               const Char *pSymbolName);
+#endif
+
+  UInt  xConvertToUInt        ( Int iValue ) {  return ( iValue <= 0) ? -iValue<<1 : (iValue<<1)-1; }
+};
+
+//! \}
+
+#endif // !defined(__SYNTAXELEMENTWRITER__)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncAnalyze.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,54 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncAnalyze.cpp
+    \brief    encoder analyzer class
+*/
+
+#include "TEncAnalyze.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+//////////////////////////////////////////////////////////////////////
+// Construction/Destruction
+//////////////////////////////////////////////////////////////////////
+
+TEncAnalyze             m_gcAnalyzeAll;
+TEncAnalyze             m_gcAnalyzeI;
+TEncAnalyze             m_gcAnalyzeP;
+TEncAnalyze             m_gcAnalyzeB;
+
+TEncAnalyze             m_gcAnalyzeAll_in;
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncAnalyze.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,366 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncAnalyze.h
+    \brief    encoder analyzer class (header)
+*/
+
+#ifndef __TENCANALYZE__
+#define __TENCANALYZE__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include <stdio.h>
+#include <memory.h>
+#include <assert.h>
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComChromaFormat.h"
+#include "math.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// encoder analyzer class
+class TEncAnalyze
+{
+private:
+  Double    m_dPSNRSum[MAX_NUM_COMPONENT];
+  Double    m_dAddBits;
+  UInt      m_uiNumPic;
+  Double    m_dFrmRate; //--CFG_KDY
+  Double    m_MSEyuvframe[MAX_NUM_COMPONENT]; // sum of MSEs
+
+public:
+  virtual ~TEncAnalyze()  {}
+  TEncAnalyze() { clear(); }
+
+  Void  addResult( Double psnr[MAX_NUM_COMPONENT], Double bits, const Double MSEyuvframe[MAX_NUM_COMPONENT])
+  {
+    m_dAddBits  += bits;
+    for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+    {
+      m_dPSNRSum[i] += psnr[i];
+      m_MSEyuvframe[i] += MSEyuvframe[i];
+    }
+
+    m_uiNumPic++;
+  }
+
+  Double  getPsnr(ComponentID compID) const { return  m_dPSNRSum[compID];  }
+  Double  getBits()                   const { return  m_dAddBits;   }
+  Void    setBits(Double numBits)     { m_dAddBits=numBits; }
+  UInt    getNumPic()                 const { return  m_uiNumPic;   }
+
+  Void    setFrmRate  (Double dFrameRate) { m_dFrmRate = dFrameRate; } //--CFG_KDY
+  Void    clear()
+  {
+    m_dAddBits = 0;
+    for(UInt i=0; i<MAX_NUM_COMPONENT; i++)
+    {
+      m_dPSNRSum[i] = 0;
+      m_MSEyuvframe[i] = 0;
+    }
+    m_uiNumPic = 0;
+  }
+
+
+  Void calculateCombinedValues(const ChromaFormat chFmt, Double &PSNRyuv, Double &MSEyuv)
+  {
+    MSEyuv    = 0;
+    Int scale = 0;
+
+    Int maximumBitDepth = g_bitDepth[0];
+    for (UInt channelTypeIndex = 1; channelTypeIndex < MAX_NUM_CHANNEL_TYPE; channelTypeIndex++)
+      if (g_bitDepth[channelTypeIndex] > maximumBitDepth)
+        maximumBitDepth = g_bitDepth[channelTypeIndex];
+
+    const UInt maxval                = 255 << (maximumBitDepth - 8);
+    const UInt numberValidComponents = getNumberValidComponents(chFmt);
+
+    for (UInt comp=0; comp<numberValidComponents; comp++)
+    {
+      const ComponentID compID        = ComponentID(comp);
+      const UInt        csx           = getComponentScaleX(compID, chFmt);
+      const UInt        csy           = getComponentScaleY(compID, chFmt);
+      const Int         scaleChan     = (4>>(csx+csy));
+      const UInt        bitDepthShift = 2 * (maximumBitDepth - g_bitDepth[toChannelType(compID)]); //*2 because this is a squared number
+
+      const Double      channelMSE    = (m_MSEyuvframe[compID] * Double(1 << bitDepthShift)) / Double(getNumPic());
+
+      scale  += scaleChan;
+      MSEyuv += scaleChan * channelMSE;
+    }
+
+    MSEyuv /= Double(scale);  // i.e. divide by 6 for 4:2:0, 8 for 4:2:2 etc.
+    PSNRyuv = (MSEyuv==0 ? 999.99 : 10*log10((maxval*maxval)/MSEyuv));
+  }
+
+
+  Void    printOut ( Char cDelim, const ChromaFormat chFmt, const Bool printMSEBasedSNR, const Bool printSequenceMSE )
+  {
+    Double dFps     =   m_dFrmRate; //--CFG_KDY
+    Double dScale   = dFps / 1000 / (Double)m_uiNumPic;
+
+    Double MSEBasedSNR[MAX_NUM_COMPONENT];
+    if (printMSEBasedSNR)
+    {
+      for (UInt componentIndex = 0; componentIndex < MAX_NUM_COMPONENT; componentIndex++)
+      {
+        const ComponentID compID = ComponentID(componentIndex);
+
+        if (getNumPic() == 0) MSEBasedSNR[compID] = 0 * dScale; // this is the same calculation that will be evaluated for any other statistic when there are no frames (it should result in NaN). We use it here so all the output is consistent.
+        else
+        {
+          //NOTE: this is not the true maximum value for any bitDepth other than 8. It comes from the original HM PSNR calculation
+          const UInt maxval = 255 << (g_bitDepth[toChannelType(compID)] - 8);
+          const Double MSE = m_MSEyuvframe[compID];
+
+          MSEBasedSNR[compID] = (MSE == 0) ? 999.99 : (10 * log10((maxval * maxval) / (MSE / (Double)getNumPic())));
+        }
+      }
+    }
+
+    switch (chFmt)
+    {
+      case CHROMA_400:
+        if (printMSEBasedSNR)
+        {
+          printf( "         \tTotal Frames |   "   "Bitrate     "  "Y-PSNR" );
+
+          if (printSequenceMSE) printf( "    Y-MSE\n" );
+          else printf("\n");
+
+          //printf( "\t------------ "  " ----------"   " -------- "  " -------- "  " --------\n" );
+          printf( "Average: \t %8d    %c "          "%12.4lf  "    "%8.4lf",
+                 getNumPic(), cDelim,
+                 getBits() * dScale,
+                 getPsnr(COMPONENT_Y) / (Double)getNumPic() );
+
+          if (printSequenceMSE)
+          {
+            printf( "  %8.4lf\n", m_MSEyuvframe[COMPONENT_Y ] / (Double)getNumPic() );
+          }
+          else printf("\n");
+
+          printf( "From MSE:\t %8d    %c "          "%12.4lf  "    "%8.4lf\n",
+                 getNumPic(), cDelim,
+                 getBits() * dScale,
+                 MSEBasedSNR[COMPONENT_Y] );
+        }
+        else
+        {
+          printf( "\tTotal Frames |   "   "Bitrate     "  "Y-PSNR" );
+
+          if (printSequenceMSE) printf( "    Y-MSE\n" );
+          else printf("\n");
+
+          //printf( "\t------------ "  " ----------"   " -------- "  " -------- "  " --------\n" );
+          printf( "\t %8d    %c "          "%12.4lf  "    "%8.4lf",
+                 getNumPic(), cDelim,
+                 getBits() * dScale,
+                 getPsnr(COMPONENT_Y) / (Double)getNumPic() );
+
+          if (printSequenceMSE)
+          {
+            printf( "  %8.4lf\n", m_MSEyuvframe[COMPONENT_Y ] / (Double)getNumPic() );
+          }
+          else printf("\n");
+        }
+        break;
+      case CHROMA_420:
+      case CHROMA_422:
+      case CHROMA_444:
+        {
+          Double PSNRyuv = MAX_DOUBLE;
+          Double MSEyuv  = MAX_DOUBLE;
+          
+          calculateCombinedValues(chFmt, PSNRyuv, MSEyuv);
+
+          if (printMSEBasedSNR)
+          {
+            printf( "         \tTotal Frames |   "   "Bitrate     "  "Y-PSNR    "  "U-PSNR    "  "V-PSNR    "  "YUV-PSNR " );
+
+            if (printSequenceMSE) printf( " Y-MSE     "  "U-MSE     "  "V-MSE    "  "YUV-MSE \n" );
+            else printf("\n");
+
+            //printf( "\t------------ "  " ----------"   " -------- "  " -------- "  " --------\n" );
+            printf( "Average: \t %8d    %c "          "%12.4lf  "    "%8.4lf  "   "%8.4lf  "    "%8.4lf  "   "%8.4lf",
+                   getNumPic(), cDelim,
+                   getBits() * dScale,
+                   getPsnr(COMPONENT_Y) / (Double)getNumPic(),
+                   getPsnr(COMPONENT_Cb) / (Double)getNumPic(),
+                   getPsnr(COMPONENT_Cr) / (Double)getNumPic(),
+                   PSNRyuv );
+
+            if (printSequenceMSE)
+            {
+              printf( "  %8.4lf  "   "%8.4lf  "    "%8.4lf  "   "%8.4lf\n",
+                     m_MSEyuvframe[COMPONENT_Y ] / (Double)getNumPic(),
+                     m_MSEyuvframe[COMPONENT_Cb] / (Double)getNumPic(),
+                     m_MSEyuvframe[COMPONENT_Cr] / (Double)getNumPic(),
+                     MSEyuv );
+            }
+            else printf("\n");
+
+            printf( "From MSE:\t %8d    %c "          "%12.4lf  "    "%8.4lf  "   "%8.4lf  "    "%8.4lf  "   "%8.4lf\n",
+                   getNumPic(), cDelim,
+                   getBits() * dScale,
+                   MSEBasedSNR[COMPONENT_Y],
+                   MSEBasedSNR[COMPONENT_Cb],
+                   MSEBasedSNR[COMPONENT_Cr],
+                   PSNRyuv );
+          }
+          else
+          {
+            printf( "\tTotal Frames |   "   "Bitrate     "  "Y-PSNR    "  "U-PSNR    "  "V-PSNR    "  "YUV-PSNR " );
+            
+            if (printSequenceMSE) printf( " Y-MSE     "  "U-MSE     "  "V-MSE    "  "YUV-MSE \n" );
+            else printf("\n");
+
+            //printf( "\t------------ "  " ----------"   " -------- "  " -------- "  " --------\n" );
+            printf( "\t %8d    %c "          "%12.4lf  "    "%8.4lf  "   "%8.4lf  "    "%8.4lf  "   "%8.4lf",
+                   getNumPic(), cDelim,
+                   getBits() * dScale,
+                   getPsnr(COMPONENT_Y) / (Double)getNumPic(),
+                   getPsnr(COMPONENT_Cb) / (Double)getNumPic(),
+                   getPsnr(COMPONENT_Cr) / (Double)getNumPic(),
+                   PSNRyuv );
+
+            if (printSequenceMSE)
+            {
+              printf( "  %8.4lf  "   "%8.4lf  "    "%8.4lf  "   "%8.4lf\n",
+                     m_MSEyuvframe[COMPONENT_Y ] / (Double)getNumPic(),
+                     m_MSEyuvframe[COMPONENT_Cb] / (Double)getNumPic(),
+                     m_MSEyuvframe[COMPONENT_Cr] / (Double)getNumPic(),
+                     MSEyuv );
+            }
+            else printf("\n");
+          }
+        }
+        break;
+      default:
+        fprintf(stderr, "Unknown format during print out\n");
+        exit(1);
+        break;
+    }
+  }
+
+
+  Void    printSummary(const ChromaFormat chFmt, const Bool printSequenceMSE, Char ch='T')
+  {
+    FILE* pFile = NULL;
+
+    switch( ch )
+    {
+      case 'T':
+        pFile = fopen ("summaryTotal.txt", "at");
+        break;
+      case 'I':
+        pFile = fopen ("summary_I.txt", "at");
+        break;
+      case 'P':
+        pFile = fopen ("summary_P.txt", "at");
+        break;
+      case 'B':
+        pFile = fopen ("summary_B.txt", "at");
+        break;
+      default:
+        assert(0);
+        return;
+        break;
+    }
+
+    Double dFps     =   m_dFrmRate; //--CFG_KDY
+    Double dScale   = dFps / 1000 / (Double)m_uiNumPic;
+    switch (chFmt)
+    {
+      case CHROMA_400:
+        fprintf(pFile, "%f\t %f\n",
+            getBits() * dScale,
+            getPsnr(COMPONENT_Y) / (Double)getNumPic() );
+        break;
+      case CHROMA_420:
+      case CHROMA_422:
+      case CHROMA_444:
+        {
+          Double PSNRyuv = MAX_DOUBLE;
+          Double MSEyuv  = MAX_DOUBLE;
+          
+          calculateCombinedValues(chFmt, PSNRyuv, MSEyuv);
+
+          fprintf(pFile, "%f\t %f\t %f\t %f\t %f",
+              getBits() * dScale,
+              getPsnr(COMPONENT_Y) / (Double)getNumPic(),
+              getPsnr(COMPONENT_Cb) / (Double)getNumPic(),
+              getPsnr(COMPONENT_Cr) / (Double)getNumPic(),
+              PSNRyuv );
+
+          if (printSequenceMSE)
+          {
+            fprintf(pFile, "\t %f\t %f\t %f\t %f\n",
+                m_MSEyuvframe[COMPONENT_Y ] / (Double)getNumPic(),
+                m_MSEyuvframe[COMPONENT_Cb] / (Double)getNumPic(),
+                m_MSEyuvframe[COMPONENT_Cr] / (Double)getNumPic(),
+                MSEyuv );
+          }
+          else fprintf(pFile, "\n");
+
+          break;
+        }
+
+      default:
+          fprintf(stderr, "Unknown format during print out\n");
+          exit(1);
+          break;
+    }
+
+    fclose(pFile);
+  }
+};
+
+extern TEncAnalyze             m_gcAnalyzeAll;
+extern TEncAnalyze             m_gcAnalyzeI;
+extern TEncAnalyze             m_gcAnalyzeP;
+extern TEncAnalyze             m_gcAnalyzeB;
+
+extern TEncAnalyze             m_gcAnalyzeAll_in;
+
+//! \}
+
+#endif // !defined(AFX_TENCANALYZE_H__C79BCAA2_6AC8_4175_A0FE_CF02F5829233__INCLUDED_)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncBinCoder.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,83 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncBinCoder.h
+    \brief    binary entropy encoder interface
+*/
+
+#ifndef __TENCBINCODER__
+#define __TENCBINCODER__
+
+#include "TLibCommon/ContextModel.h"
+#include "TLibCommon/TComBitStream.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncBinCABAC;
+
+class TEncBinIf
+{
+public:
+  virtual Void  init              ( TComBitIf* pcTComBitIf )                  = 0;
+  virtual Void  uninit            ()                                          = 0;
+
+  virtual Void  start             ()                                          = 0;
+  virtual Void  finish            ()                                          = 0;
+  virtual Void  copyState         ( const TEncBinIf* pcTEncBinIf )            = 0;
+  virtual Void  flush            ()                                           = 0;
+
+  virtual Void  resetBac          ()                                          = 0;
+  virtual Void  encodePCMAlignBits()                                          = 0;
+  virtual Void  xWritePCMCode     ( UInt uiCode, UInt uiLength )              = 0;
+
+  virtual Void  resetBits         ()                                          = 0;
+  virtual UInt  getNumWrittenBits ()                                          = 0;
+
+  virtual Void  encodeBin         ( UInt  uiBin,  ContextModel& rcCtxModel )  = 0;
+  virtual Void  encodeBinEP       ( UInt  uiBin                            )  = 0;
+  virtual Void  encodeBinsEP      ( UInt  uiBins, Int numBins              )  = 0;
+  virtual Void  encodeBinTrm      ( UInt  uiBin                            )  = 0;
+
+  virtual Void  align             ()                                          = 0;
+
+  virtual TEncBinCABAC*   getTEncBinCABAC   ()  { return 0; }
+  virtual const TEncBinCABAC*   getTEncBinCABAC   () const { return 0; }
+
+  virtual ~TEncBinIf() {}
+};
+
+//! \}
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncBinCoderCABAC.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,440 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncBinCoderCABAC.cpp
+    \brief    binary entropy encoder of CABAC
+*/
+
+#include "TEncBinCoderCABAC.h"
+#include "TLibCommon/TComRom.h"
+#include "TLibCommon/Debug.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+
+TEncBinCABAC::TEncBinCABAC()
+: m_pcTComBitIf( 0 )
+, m_binCountIncrement( 0 )
+#if FAST_BIT_EST
+, m_fracBits( 0 )
+#endif
+{
+}
+
+TEncBinCABAC::~TEncBinCABAC()
+{
+}
+
+Void TEncBinCABAC::init( TComBitIf* pcTComBitIf )
+{
+  m_pcTComBitIf = pcTComBitIf;
+}
+
+Void TEncBinCABAC::uninit()
+{
+  m_pcTComBitIf = 0;
+}
+
+Void TEncBinCABAC::start()
+{
+  m_uiLow            = 0;
+  m_uiRange          = 510;
+  m_bitsLeft         = 23;
+  m_numBufferedBytes = 0;
+  m_bufferedByte     = 0xff;
+#if FAST_BIT_EST
+  m_fracBits         = 0;
+#endif
+}
+
+Void TEncBinCABAC::finish()
+{
+  if ( m_uiLow >> ( 32 - m_bitsLeft ) )
+  {
+    //assert( m_numBufferedBytes > 0 );
+    //assert( m_bufferedByte != 0xff );
+    m_pcTComBitIf->write( m_bufferedByte + 1, 8 );
+    while ( m_numBufferedBytes > 1 )
+    {
+      m_pcTComBitIf->write( 0x00, 8 );
+      m_numBufferedBytes--;
+    }
+    m_uiLow -= 1 << ( 32 - m_bitsLeft );
+  }
+  else
+  {
+    if ( m_numBufferedBytes > 0 )
+    {
+      m_pcTComBitIf->write( m_bufferedByte, 8 );
+    }
+    while ( m_numBufferedBytes > 1 )
+    {
+      m_pcTComBitIf->write( 0xff, 8 );
+      m_numBufferedBytes--;
+    }
+  }
+  m_pcTComBitIf->write( m_uiLow >> 8, 24 - m_bitsLeft );
+}
+
+Void TEncBinCABAC::flush()
+{
+  encodeBinTrm(1);
+  finish();
+  m_pcTComBitIf->write(1, 1);
+  m_pcTComBitIf->writeAlignZero();
+
+  start();
+}
+
+/** Reset BAC register and counter values.
+ * \returns Void
+ */
+Void TEncBinCABAC::resetBac()
+{
+  start();
+}
+
+/** Encode PCM alignment zero bits.
+ * \returns Void
+ */
+Void TEncBinCABAC::encodePCMAlignBits()
+{
+  finish();
+  m_pcTComBitIf->write(1, 1);
+  m_pcTComBitIf->writeAlignZero(); // pcm align zero
+}
+
+/** Write a PCM code.
+ * \param uiCode code value
+ * \param uiLength code bit-depth
+ * \returns Void
+ */
+Void TEncBinCABAC::xWritePCMCode(UInt uiCode, UInt uiLength)
+{
+  m_pcTComBitIf->write(uiCode, uiLength);
+}
+
+Void TEncBinCABAC::copyState( const TEncBinIf* pcTEncBinIf )
+{
+  const TEncBinCABAC* pcTEncBinCABAC = pcTEncBinIf->getTEncBinCABAC();
+  m_uiLow           = pcTEncBinCABAC->m_uiLow;
+  m_uiRange         = pcTEncBinCABAC->m_uiRange;
+  m_bitsLeft        = pcTEncBinCABAC->m_bitsLeft;
+  m_bufferedByte    = pcTEncBinCABAC->m_bufferedByte;
+  m_numBufferedBytes = pcTEncBinCABAC->m_numBufferedBytes;
+#if FAST_BIT_EST
+  m_fracBits = pcTEncBinCABAC->m_fracBits;
+#endif
+}
+
+Void TEncBinCABAC::resetBits()
+{
+  m_uiLow            = 0;
+  m_bitsLeft         = 23;
+  m_numBufferedBytes = 0;
+  m_bufferedByte     = 0xff;
+  if ( m_binCountIncrement )
+  {
+    m_uiBinsCoded = 0;
+  }
+#if FAST_BIT_EST
+  m_fracBits &= 32767;
+#endif
+}
+
+UInt TEncBinCABAC::getNumWrittenBits()
+{
+  return m_pcTComBitIf->getNumberOfWrittenBits() + 8 * m_numBufferedBytes + 23 - m_bitsLeft;
+}
+
+/**
+ * \brief Encode bin
+ *
+ * \param binValue   bin value
+ * \param rcCtxModel context model
+ */
+Void TEncBinCABAC::encodeBin( UInt binValue, ContextModel &rcCtxModel )
+{
+  //{
+  //  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  //  DTRACE_CABAC_T( "\tstate=" )
+  //  DTRACE_CABAC_V( ( rcCtxModel.getState() << 1 ) + rcCtxModel.getMps() )
+  //  DTRACE_CABAC_T( "\tsymbol=" )
+  //  DTRACE_CABAC_V( binValue )
+  //  DTRACE_CABAC_T( "\n" )
+  //}
+
+#ifdef DEBUG_CABAC_BINS
+  const UInt startingRange = m_uiRange;
+#endif
+
+  m_uiBinsCoded += m_binCountIncrement;
+  rcCtxModel.setBinsCoded( 1 );
+
+  UInt  uiLPS   = TComCABACTables::sm_aucLPSTable[ rcCtxModel.getState() ][ ( m_uiRange >> 6 ) & 3 ];
+  m_uiRange    -= uiLPS;
+
+  if( binValue != rcCtxModel.getMps() )
+  {
+    Int numBits = TComCABACTables::sm_aucRenormTable[ uiLPS >> 3 ];
+    m_uiLow     = ( m_uiLow + m_uiRange ) << numBits;
+    m_uiRange   = uiLPS << numBits;
+    rcCtxModel.updateLPS();
+    m_bitsLeft -= numBits;
+    testAndWriteOut();
+  }
+  else
+  {
+    rcCtxModel.updateMPS();
+
+    if ( m_uiRange < 256 )
+    {
+      m_uiLow <<= 1;
+      m_uiRange <<= 1;
+      m_bitsLeft--;
+      testAndWriteOut();
+    }
+  }
+
+#ifdef DEBUG_CABAC_BINS
+  if ((g_debugCounter + debugCabacBinWindow) >= debugCabacBinTargetLine)
+    std::cout << g_debugCounter << ": coding bin value " << binValue << ", range = [" << startingRange << "->" << m_uiRange << "]\n";
+
+  if (g_debugCounter >= debugCabacBinTargetLine)
+  {
+    Char breakPointThis;
+    breakPointThis = 7;
+  }
+  if (g_debugCounter >= (debugCabacBinTargetLine + debugCabacBinWindow)) exit(0);
+  g_debugCounter++;
+#endif
+}
+
+/**
+ * \brief Encode equiprobable bin
+ *
+ * \param binValue bin value
+ */
+Void TEncBinCABAC::encodeBinEP( UInt binValue )
+{
+  if (false)
+  {
+    DTRACE_CABAC_VL( g_nSymbolCounter++ )
+    DTRACE_CABAC_T( "\tEPsymbol=" )
+    DTRACE_CABAC_V( binValue )
+    DTRACE_CABAC_T( "\n" )
+  }
+
+  m_uiBinsCoded += m_binCountIncrement;
+
+  if (m_uiRange == 256)
+  {
+    encodeAlignedBinsEP(binValue, 1);
+    return;
+  }
+
+  m_uiLow <<= 1;
+  if( binValue )
+  {
+    m_uiLow += m_uiRange;
+  }
+  m_bitsLeft--;
+
+  testAndWriteOut();
+}
+
+/**
+ * \brief Encode equiprobable bins
+ *
+ * \param binValues bin values
+ * \param numBins number of bins
+ */
+Void TEncBinCABAC::encodeBinsEP( UInt binValues, Int numBins )
+{
+  m_uiBinsCoded += numBins & -m_binCountIncrement;
+
+  if (false)
+  {
+    for ( Int i = 0; i < numBins; i++ )
+    {
+      DTRACE_CABAC_VL( g_nSymbolCounter++ )
+      DTRACE_CABAC_T( "\tEPsymbol=" )
+      DTRACE_CABAC_V( ( binValues >> ( numBins - 1 - i ) ) & 1 )
+      DTRACE_CABAC_T( "\n" )
+    }
+  }
+
+  if (m_uiRange == 256)
+  {
+    encodeAlignedBinsEP(binValues, numBins);
+    return;
+  }
+
+  while ( numBins > 8 )
+  {
+    numBins -= 8;
+    UInt pattern = binValues >> numBins;
+    m_uiLow <<= 8;
+    m_uiLow += m_uiRange * pattern;
+    binValues -= pattern << numBins;
+    m_bitsLeft -= 8;
+
+    testAndWriteOut();
+  }
+
+  m_uiLow <<= numBins;
+  m_uiLow += m_uiRange * binValues;
+  m_bitsLeft -= numBins;
+
+  testAndWriteOut();
+}
+
+Void TEncBinCABAC::align()
+{
+  m_uiRange = 256;
+}
+
+Void TEncBinCABAC::encodeAlignedBinsEP( UInt binValues, Int numBins )
+{
+  Int binsRemaining = numBins;
+
+  assert(m_uiRange == 256); //aligned encode only works when range = 256
+
+  while (binsRemaining > 0)
+  {
+    const UInt binsToCode = std::min<UInt>(binsRemaining, 8); //code bytes if able to take advantage of the system's byte-write function
+    const UInt binMask    = (1 << binsToCode) - 1;
+
+    const UInt newBins = (binValues >> (binsRemaining - binsToCode)) & binMask;
+
+    //The process of encoding an EP bin is the same as that of coding a normal
+    //bin where the symbol ranges for 1 and 0 are both half the range:
+    //
+    //  low = (low + range/2) << 1       (to encode a 1)
+    //  low =  low            << 1       (to encode a 0)
+    //
+    //  i.e.
+    //  low = (low + (bin * range/2)) << 1
+    //
+    //  which is equivalent to:
+    //
+    //  low = (low << 1) + (bin * range)
+    //
+    //  this can be generalised for multiple bins, producing the following expression:
+    //
+    m_uiLow = (m_uiLow << binsToCode) + (newBins << 8); //range is known to be 256
+
+    binsRemaining -= binsToCode;
+    m_bitsLeft    -= binsToCode;
+
+    testAndWriteOut();
+  }
+}
+
+/**
+ * \brief Encode terminating bin
+ *
+ * \param binValue bin value
+ */
+Void TEncBinCABAC::encodeBinTrm( UInt binValue )
+{
+  m_uiBinsCoded += m_binCountIncrement;
+  m_uiRange -= 2;
+  if( binValue )
+  {
+    m_uiLow  += m_uiRange;
+    m_uiLow <<= 7;
+    m_uiRange = 2 << 7;
+    m_bitsLeft -= 7;
+  }
+  else if ( m_uiRange >= 256 )
+  {
+    return;
+  }
+  else
+  {
+    m_uiLow   <<= 1;
+    m_uiRange <<= 1;
+    m_bitsLeft--;
+  }
+
+  testAndWriteOut();
+}
+
+Void TEncBinCABAC::testAndWriteOut()
+{
+  if ( m_bitsLeft < 12 )
+  {
+    writeOut();
+  }
+}
+
+/**
+ * \brief Move bits from register into bitstream
+ */
+Void TEncBinCABAC::writeOut()
+{
+  UInt leadByte = m_uiLow >> (24 - m_bitsLeft);
+  m_bitsLeft += 8;
+  m_uiLow &= 0xffffffffu >> m_bitsLeft;
+
+  if ( leadByte == 0xff )
+  {
+    m_numBufferedBytes++;
+  }
+  else
+  {
+    if ( m_numBufferedBytes > 0 )
+    {
+      UInt carry = leadByte >> 8;
+      UInt byte = m_bufferedByte + carry;
+      m_bufferedByte = leadByte & 0xff;
+      m_pcTComBitIf->write( byte, 8 );
+
+      byte = ( 0xff + carry ) & 0xff;
+      while ( m_numBufferedBytes > 1 )
+      {
+        m_pcTComBitIf->write( byte, 8 );
+        m_numBufferedBytes--;
+      }
+    }
+    else
+    {
+      m_numBufferedBytes = 1;
+      m_bufferedByte = leadByte;
+    }
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncBinCoderCABAC.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,108 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncBinCoderCABAC.h
+    \brief    binary entropy encoder of CABAC
+*/
+
+#ifndef __TENCBINCODERCABAC__
+#define __TENCBINCODERCABAC__
+
+#include "TLibCommon/TComCABACTables.h"
+#include "TEncBinCoder.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncBinCABAC : public TEncBinIf
+{
+public:
+  TEncBinCABAC ();
+  virtual ~TEncBinCABAC();
+
+  Void  init              ( TComBitIf* pcTComBitIf );
+  Void  uninit            ();
+
+  Void  start             ();
+  Void  finish            ();
+  Void  copyState         ( const TEncBinIf* pcTEncBinIf );
+  Void  flush            ();
+
+  Void  resetBac          ();
+  Void  encodePCMAlignBits();
+  Void  xWritePCMCode     ( UInt uiCode, UInt uiLength );
+
+  Void  resetBits         ();
+  UInt  getNumWrittenBits ();
+
+  Void  encodeBin         ( UInt  binValue,  ContextModel& rcCtxModel );
+  Void  encodeBinEP       ( UInt  binValue                            );
+  Void  encodeBinsEP      ( UInt  binValues, Int numBins              );
+  Void  encodeBinTrm      ( UInt  binValue                            );
+
+  Void  align             ();
+  Void  encodeAlignedBinsEP( UInt  binValues, Int numBins             );
+
+  TEncBinCABAC* getTEncBinCABAC()  { return this; }
+  const TEncBinCABAC* getTEncBinCABAC() const { return this; }
+
+  Void  setBinsCoded              ( UInt uiVal )  { m_uiBinsCoded = uiVal;               }
+  UInt  getBinsCoded              ()              { return m_uiBinsCoded;                }
+  Void  setBinCountingEnableFlag  ( Bool bFlag )  { m_binCountIncrement = bFlag ? 1 : 0; }
+  Bool  getBinCountingEnableFlag  ()              { return m_binCountIncrement != 0;     }
+
+#if FAST_BIT_EST
+protected:
+#else
+private:
+#endif
+  Void testAndWriteOut();
+  Void writeOut();
+
+  TComBitIf*          m_pcTComBitIf;
+  UInt                m_uiLow;
+  UInt                m_uiRange;
+  UInt                m_bufferedByte;
+  Int                 m_numBufferedBytes;
+  Int                 m_bitsLeft;
+  UInt                m_uiBinsCoded;
+  Int                 m_binCountIncrement;
+#if FAST_BIT_EST
+  UInt64 m_fracBits;
+#endif
+};
+
+//! \}
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncBinCoderCABACCounter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,139 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncBinCoderCABAC.cpp
+    \brief    binary entropy encoder of CABAC
+*/
+
+#include "TEncBinCoderCABACCounter.h"
+#include "TLibCommon/TComRom.h"
+#include "TLibCommon/Debug.h"
+
+
+#if FAST_BIT_EST
+
+//! \ingroup TLibEncoder
+//! \{
+
+
+TEncBinCABACCounter::TEncBinCABACCounter()
+{
+}
+
+TEncBinCABACCounter::~TEncBinCABACCounter()
+{
+}
+
+Void TEncBinCABACCounter::finish()
+{
+  m_pcTComBitIf->write(0, UInt(m_fracBits >> 15) );
+  m_fracBits &= 32767;
+}
+
+UInt TEncBinCABACCounter::getNumWrittenBits()
+{
+  return m_pcTComBitIf->getNumberOfWrittenBits() + UInt( m_fracBits >> 15 );
+}
+
+/**
+ * \brief Encode bin
+ *
+ * \param binValue   bin value
+ * \param rcCtxModel context model
+ */
+Void TEncBinCABACCounter::encodeBin( UInt binValue, ContextModel &rcCtxModel )
+{
+#ifdef DEBUG_ENCODER_SEARCH_BINS
+  const UInt64 startingFracBits = m_fracBits;
+#endif
+
+  m_uiBinsCoded += m_binCountIncrement;
+  m_fracBits += rcCtxModel.getEntropyBits( binValue );
+  rcCtxModel.update( binValue );
+
+#ifdef DEBUG_ENCODER_SEARCH_BINS
+  if ((g_debugCounter + debugEncoderSearchBinWindow) >= debugEncoderSearchBinTargetLine)
+    std::cout << g_debugCounter << ": coding bin value " << binValue << ", fracBits = [" << startingFracBits << "->" << m_fracBits << "]\n";
+
+  if (g_debugCounter >= debugEncoderSearchBinTargetLine)
+  {
+    Char breakPointThis;
+    breakPointThis = 7;
+  }
+  if (g_debugCounter >= (debugEncoderSearchBinTargetLine + debugEncoderSearchBinWindow)) exit(0);
+  g_debugCounter++;
+#endif
+}
+
+/**
+ * \brief Encode equiprobable bin
+ *
+ * \param binValue bin value
+ */
+Void TEncBinCABACCounter::encodeBinEP( UInt binValue )
+{
+  m_uiBinsCoded += m_binCountIncrement;
+  m_fracBits += 32768;
+}
+
+/**
+ * \brief Encode equiprobable bins
+ *
+ * \param binValues bin values
+ * \param numBins number of bins
+ */
+Void TEncBinCABACCounter::encodeBinsEP( UInt binValues, Int numBins )
+{
+  m_uiBinsCoded += numBins & -m_binCountIncrement;
+  m_fracBits += 32768 * numBins;
+}
+
+/**
+ * \brief Encode terminating bin
+ *
+ * \param binValue bin value
+ */
+Void TEncBinCABACCounter::encodeBinTrm( UInt binValue )
+{
+  m_uiBinsCoded += m_binCountIncrement;
+  m_fracBits += ContextModel::getEntropyBitsTrm( binValue );
+}
+
+Void TEncBinCABACCounter::align()
+{
+  m_fracBits = (m_fracBits + 32767) & (~32767);
+}
+
+//! \}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncBinCoderCABACCounter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,74 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncBinCoderCABAC.h
+    \brief    binary entropy encoder of CABAC
+*/
+
+#ifndef __TENCBINCODERCABACCOUNTER__
+#define __TENCBINCODERCABACCOUNTER__
+
+
+#include "TEncBinCoderCABAC.h"
+
+#if FAST_BIT_EST
+
+//! \ingroup TLibEncoder
+//! \{
+
+
+class TEncBinCABACCounter : public TEncBinCABAC
+{
+public:
+  TEncBinCABACCounter ();
+  virtual ~TEncBinCABACCounter();
+
+  Void  finish            ();
+  UInt  getNumWrittenBits ();
+
+  Void  encodeBin         ( UInt  binValue,  ContextModel& rcCtxModel );
+  Void  encodeBinEP       ( UInt  binValue                            );
+  Void  encodeBinsEP      ( UInt  binValues, Int numBins              );
+  Void  encodeBinTrm      ( UInt  binValue                            );
+
+  Void  align             ();
+
+private:
+};
+
+//! \}
+
+#endif
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncCavlc.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1517 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncCavlc.cpp
+    \brief    CAVLC encoder class
+*/
+
+#include "../TLibCommon/CommonDef.h"
+#include "TEncCavlc.h"
+#include "SEIwrite.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+#define SRPS_IN_SLICE 1
+
+#if ENC_DEC_TRACE
+
+Void  xTraceSPSHeader (TComSPS *pSPS)
+{
+  fprintf( g_hTrace, "=========== Sequence Parameter Set ID: %d ===========\n", pSPS->getSPSId() );
+}
+
+Void  xTracePPSHeader (TComPPS *pPPS)
+{
+  fprintf( g_hTrace, "=========== Picture Parameter Set ID: %d ===========\n", pPPS->getPPSId() );
+}
+
+Void  xTraceSliceHeader (TComSlice *pSlice)
+{
+  fprintf( g_hTrace, "=========== Slice ===========\n");
+}
+
+#endif
+
+
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TEncCavlc::TEncCavlc()
+{
+  m_pcBitIf           = NULL;
+}
+
+TEncCavlc::~TEncCavlc()
+{
+}
+
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+Void TEncCavlc::resetEntropy()
+{
+}
+
+
+Void TEncCavlc::codeDFFlag(UInt uiCode, const Char *pSymbolName)
+{
+  WRITE_FLAG(uiCode, pSymbolName);
+}
+Void TEncCavlc::codeDFSvlc(Int iCode, const Char *pSymbolName)
+{
+  WRITE_SVLC(iCode, pSymbolName);
+}
+
+Void TEncCavlc::codeShortTermRefPicSet( TComSPS* pcSPS, TComReferencePictureSet* rps, Bool calledFromSliceHeader, Int idx)
+{
+#if PRINT_RPS_INFO
+  Int lastBits = getNumberOfWrittenBits();
+#endif
+  if (idx > 0)
+  {
+  WRITE_FLAG( rps->getInterRPSPrediction(), "inter_ref_pic_set_prediction_flag" ); // inter_RPS_prediction_flag
+  }
+  if (rps->getInterRPSPrediction())
+  {
+    Int deltaRPS = rps->getDeltaRPS();
+    if(calledFromSliceHeader)
+    {
+      WRITE_UVLC( rps->getDeltaRIdxMinus1(), "delta_idx_minus1" ); // delta index of the Reference Picture Set used for prediction minus 1
+    }
+
+    WRITE_CODE( (deltaRPS >=0 ? 0: 1), 1, "delta_rps_sign" ); //delta_rps_sign
+    WRITE_UVLC( abs(deltaRPS) - 1, "abs_delta_rps_minus1"); // absolute delta RPS minus 1
+
+    for(Int j=0; j < rps->getNumRefIdc(); j++)
+    {
+      Int refIdc = rps->getRefIdc(j);
+      WRITE_CODE( (refIdc==1? 1: 0), 1, "used_by_curr_pic_flag" ); //first bit is "1" if Idc is 1
+      if (refIdc != 1)
+      {
+        WRITE_CODE( refIdc>>1, 1, "use_delta_flag" ); //second bit is "1" if Idc is 2, "0" otherwise.
+      }
+    }
+  }
+  else
+  {
+    WRITE_UVLC( rps->getNumberOfNegativePictures(), "num_negative_pics" );
+    WRITE_UVLC( rps->getNumberOfPositivePictures(), "num_positive_pics" );
+    Int prev = 0;
+    for(Int j=0 ; j < rps->getNumberOfNegativePictures(); j++)
+    {
+      WRITE_UVLC( prev-rps->getDeltaPOC(j)-1, "delta_poc_s0_minus1" );
+      prev = rps->getDeltaPOC(j);
+      WRITE_FLAG( rps->getUsed(j), "used_by_curr_pic_s0_flag");
+    }
+    prev = 0;
+    for(Int j=rps->getNumberOfNegativePictures(); j < rps->getNumberOfNegativePictures()+rps->getNumberOfPositivePictures(); j++)
+    {
+      WRITE_UVLC( rps->getDeltaPOC(j)-prev-1, "delta_poc_s1_minus1" );
+      prev = rps->getDeltaPOC(j);
+      WRITE_FLAG( rps->getUsed(j), "used_by_curr_pic_s1_flag" );
+    }
+  }
+
+#if PRINT_RPS_INFO
+  printf("irps=%d (%2d bits) ", rps->getInterRPSPrediction(), getNumberOfWrittenBits() - lastBits);
+  rps->printDeltaPOC();
+#endif
+}
+
+
+Void TEncCavlc::codePPS( TComPPS* pcPPS )
+{
+#if ENC_DEC_TRACE
+  xTracePPSHeader (pcPPS);
+#endif
+
+  const UInt numberValidComponents = getNumberValidComponents(pcPPS->getSPS()->getChromaFormatIdc());
+
+  WRITE_UVLC( pcPPS->getPPSId(),                             "pps_pic_parameter_set_id" );
+  WRITE_UVLC( pcPPS->getSPSId(),                             "pps_seq_parameter_set_id" );
+  WRITE_FLAG( pcPPS->getDependentSliceSegmentsEnabledFlag()    ? 1 : 0, "dependent_slice_segments_enabled_flag" );
+  WRITE_FLAG( pcPPS->getOutputFlagPresentFlag() ? 1 : 0,     "output_flag_present_flag" );
+  WRITE_CODE( pcPPS->getNumExtraSliceHeaderBits(), 3,        "num_extra_slice_header_bits");
+  WRITE_FLAG( pcPPS->getSignHideFlag(), "sign_data_hiding_flag" );
+  WRITE_FLAG( pcPPS->getCabacInitPresentFlag() ? 1 : 0,   "cabac_init_present_flag" );
+  WRITE_UVLC( pcPPS->getNumRefIdxL0DefaultActive()-1,     "num_ref_idx_l0_default_active_minus1");
+  WRITE_UVLC( pcPPS->getNumRefIdxL1DefaultActive()-1,     "num_ref_idx_l1_default_active_minus1");
+
+  WRITE_SVLC( pcPPS->getPicInitQPMinus26(),                  "init_qp_minus26");
+  WRITE_FLAG( pcPPS->getConstrainedIntraPred() ? 1 : 0,      "constrained_intra_pred_flag" );
+  WRITE_FLAG( pcPPS->getUseTransformSkip() ? 1 : 0,  "transform_skip_enabled_flag" );
+  WRITE_FLAG( pcPPS->getUseDQP() ? 1 : 0, "cu_qp_delta_enabled_flag" );
+  if ( pcPPS->getUseDQP() )
+  {
+    WRITE_UVLC( pcPPS->getMaxCuDQPDepth(), "diff_cu_qp_delta_depth" );
+  }
+
+  WRITE_SVLC( COMPONENT_Cb<numberValidComponents ?  (pcPPS->getQpOffset(COMPONENT_Cb)) : 0, "pps_cb_qp_offset" );
+  WRITE_SVLC( COMPONENT_Cr<numberValidComponents ?  (pcPPS->getQpOffset(COMPONENT_Cr)) : 0, "pps_cr_qp_offset" );
+
+  assert(numberValidComponents <= 3); // if more than 3 components (eg 4:4:4:4), then additional offsets will have to go in extension area...
+
+  WRITE_FLAG( pcPPS->getSliceChromaQpFlag() ? 1 : 0,          "pps_slice_chroma_qp_offsets_present_flag" );
+
+  WRITE_FLAG( pcPPS->getUseWP() ? 1 : 0,  "weighted_pred_flag" );   // Use of Weighting Prediction (P_SLICE)
+  WRITE_FLAG( pcPPS->getWPBiPred() ? 1 : 0, "weighted_bipred_flag" );  // Use of Weighting Bi-Prediction (B_SLICE)
+  WRITE_FLAG( pcPPS->getTransquantBypassEnableFlag() ? 1 : 0, "transquant_bypass_enable_flag" );
+  WRITE_FLAG( pcPPS->getTilesEnabledFlag()             ? 1 : 0, "tiles_enabled_flag" );
+  WRITE_FLAG( pcPPS->getEntropyCodingSyncEnabledFlag() ? 1 : 0, "entropy_coding_sync_enabled_flag" );
+  if( pcPPS->getTilesEnabledFlag() )
+  {
+    WRITE_UVLC( pcPPS->getNumTileColumnsMinus1(),                                    "num_tile_columns_minus1" );
+    WRITE_UVLC( pcPPS->getNumTileRowsMinus1(),                                       "num_tile_rows_minus1" );
+    WRITE_FLAG( pcPPS->getTileUniformSpacingFlag(),                                  "uniform_spacing_flag" );
+    if( !pcPPS->getTileUniformSpacingFlag() )
+    {
+      for(UInt i=0; i<pcPPS->getNumTileColumnsMinus1(); i++)
+      {
+        WRITE_UVLC( pcPPS->getTileColumnWidth(i)-1,                                  "column_width_minus1" );
+      }
+      for(UInt i=0; i<pcPPS->getNumTileRowsMinus1(); i++)
+      {
+        WRITE_UVLC( pcPPS->getTileRowHeight(i)-1,                                    "row_height_minus1" );
+      }
+    }
+    if(pcPPS->getNumTileColumnsMinus1() !=0 || pcPPS->getNumTileRowsMinus1() !=0)
+    {
+      WRITE_FLAG( pcPPS->getLoopFilterAcrossTilesEnabledFlag()?1 : 0,          "loop_filter_across_tiles_enabled_flag");
+    }
+  }
+  WRITE_FLAG( pcPPS->getLoopFilterAcrossSlicesEnabledFlag()?1 : 0,        "loop_filter_across_slices_enabled_flag");
+  WRITE_FLAG( pcPPS->getDeblockingFilterControlPresentFlag()?1 : 0,       "deblocking_filter_control_present_flag");
+  if(pcPPS->getDeblockingFilterControlPresentFlag())
+  {
+    WRITE_FLAG( pcPPS->getDeblockingFilterOverrideEnabledFlag() ? 1 : 0,  "deblocking_filter_override_enabled_flag" );
+    WRITE_FLAG( pcPPS->getPicDisableDeblockingFilterFlag() ? 1 : 0,       "pps_disable_deblocking_filter_flag" );
+    if(!pcPPS->getPicDisableDeblockingFilterFlag())
+    {
+      WRITE_SVLC( pcPPS->getDeblockingFilterBetaOffsetDiv2(),             "pps_beta_offset_div2" );
+      WRITE_SVLC( pcPPS->getDeblockingFilterTcOffsetDiv2(),               "pps_tc_offset_div2" );
+    }
+  }
+  WRITE_FLAG( pcPPS->getScalingListPresentFlag() ? 1 : 0,                          "pps_scaling_list_data_present_flag" );
+  if( pcPPS->getScalingListPresentFlag() )
+  {
+    codeScalingList( m_pcSlice->getScalingList() );
+  }
+  WRITE_FLAG( pcPPS->getListsModificationPresentFlag(), "lists_modification_present_flag");
+  WRITE_UVLC( pcPPS->getLog2ParallelMergeLevelMinus2(), "log2_parallel_merge_level_minus2");
+  WRITE_FLAG( pcPPS->getSliceHeaderExtensionPresentFlag() ? 1 : 0, "slice_segment_header_extension_present_flag");
+
+  Bool pps_extension_present_flag=false;
+  Bool pps_extension_flags[NUM_PPS_EXTENSION_FLAGS]={false};
+
+  pps_extension_flags[PPS_EXT__REXT] = (
+             ( pcPPS->getUseTransformSkip() && (pcPPS->getTransformSkipLog2MaxSize() != 2))
+          || pcPPS->getUseCrossComponentPrediction()
+          || ( pcPPS->getChromaQpAdjTableSize() > 0 )
+          || ( pcPPS->getSaoOffsetBitShift(CHANNEL_TYPE_LUMA) !=0 ) || ( pcPPS->getSaoOffsetBitShift(CHANNEL_TYPE_CHROMA) !=0 )
+     )
+    ;
+
+  // Other PPS extension flags checked here.
+
+  for(Int i=0; i<NUM_PPS_EXTENSION_FLAGS; i++)
+  {
+    pps_extension_present_flag|=pps_extension_flags[i];
+  }
+
+  WRITE_FLAG( (pps_extension_present_flag?1:0), "pps_extension_present_flag" );
+
+  if (pps_extension_present_flag)
+  {
+    for(Int i=0; i<NUM_PPS_EXTENSION_FLAGS; i++)
+    {
+      WRITE_FLAG( pps_extension_flags[i]?1:0, "pps_extension_flag[]" );
+    }
+
+    for(Int i=0; i<NUM_PPS_EXTENSION_FLAGS; i++) // loop used so that the order is determined by the enum.
+    {
+      if (pps_extension_flags[i])
+      {
+        switch (PPSExtensionFlagIndex(i))
+        {
+          case PPS_EXT__REXT:
+
+            if (pcPPS->getUseTransformSkip())
+            {
+              WRITE_UVLC( pcPPS->getTransformSkipLog2MaxSize()-2,                 "log2_transform_skip_max_size_minus2");
+            }
+
+            WRITE_FLAG((pcPPS->getUseCrossComponentPrediction() ? 1 : 0),         "cross_component_prediction_flag" );
+
+            WRITE_FLAG(UInt(pcPPS->getChromaQpAdjTableSize() > 0),                "chroma_qp_adjustment_enabled_flag" );
+            if (pcPPS->getChromaQpAdjTableSize() > 0)
+            {
+              WRITE_UVLC(pcPPS->getMaxCuChromaQpAdjDepth(),                       "diff_cu_chroma_qp_adjustment_depth");
+              WRITE_UVLC(pcPPS->getChromaQpAdjTableSize() - 1,                    "chroma_qp_adjustment_table_size_minus1");
+              /* skip zero index */
+              for (Int chromaQpAdjustmentIndex = 1; chromaQpAdjustmentIndex <= pcPPS->getChromaQpAdjTableSize(); chromaQpAdjustmentIndex++)
+              {
+                WRITE_SVLC(pcPPS->getChromaQpAdjTableAt(chromaQpAdjustmentIndex).u.comp.CbOffset,     "cb_qp_adjustnemt[i]");
+                WRITE_SVLC(pcPPS->getChromaQpAdjTableAt(chromaQpAdjustmentIndex).u.comp.CrOffset,     "cr_qp_adjustnemt[i]");
+              }
+            }
+
+            WRITE_UVLC( pcPPS->getSaoOffsetBitShift(CHANNEL_TYPE_LUMA),           "sao_luma_bit_shift"   );
+            WRITE_UVLC( pcPPS->getSaoOffsetBitShift(CHANNEL_TYPE_CHROMA),         "sao_chroma_bit_shift" );
+            break;
+          default:
+            assert(pps_extension_flags[i]==false); // Should never get here with an active PPS extension flag.
+            break;
+        } // switch
+      } // if flag present
+    } // loop over PPS flags
+  } // pps_extension_present_flag is non-zero
+}
+
+Void TEncCavlc::codeVUI( TComVUI *pcVUI, TComSPS* pcSPS )
+{
+#if ENC_DEC_TRACE
+  fprintf( g_hTrace, "----------- vui_parameters -----------\n");
+#endif
+  WRITE_FLAG(pcVUI->getAspectRatioInfoPresentFlag(),            "aspect_ratio_info_present_flag");
+  if (pcVUI->getAspectRatioInfoPresentFlag())
+  {
+    WRITE_CODE(pcVUI->getAspectRatioIdc(), 8,                   "aspect_ratio_idc" );
+    if (pcVUI->getAspectRatioIdc() == 255)
+    {
+      WRITE_CODE(pcVUI->getSarWidth(), 16,                      "sar_width");
+      WRITE_CODE(pcVUI->getSarHeight(), 16,                     "sar_height");
+    }
+  }
+  WRITE_FLAG(pcVUI->getOverscanInfoPresentFlag(),               "overscan_info_present_flag");
+  if (pcVUI->getOverscanInfoPresentFlag())
+  {
+    WRITE_FLAG(pcVUI->getOverscanAppropriateFlag(),             "overscan_appropriate_flag");
+  }
+  WRITE_FLAG(pcVUI->getVideoSignalTypePresentFlag(),            "video_signal_type_present_flag");
+  if (pcVUI->getVideoSignalTypePresentFlag())
+  {
+    WRITE_CODE(pcVUI->getVideoFormat(), 3,                      "video_format");
+    WRITE_FLAG(pcVUI->getVideoFullRangeFlag(),                  "video_full_range_flag");
+    WRITE_FLAG(pcVUI->getColourDescriptionPresentFlag(),        "colour_description_present_flag");
+    if (pcVUI->getColourDescriptionPresentFlag())
+    {
+      WRITE_CODE(pcVUI->getColourPrimaries(), 8,                "colour_primaries");
+      WRITE_CODE(pcVUI->getTransferCharacteristics(), 8,        "transfer_characteristics");
+      WRITE_CODE(pcVUI->getMatrixCoefficients(), 8,             "matrix_coefficients");
+    }
+  }
+
+  WRITE_FLAG(pcVUI->getChromaLocInfoPresentFlag(),              "chroma_loc_info_present_flag");
+  if (pcVUI->getChromaLocInfoPresentFlag())
+  {
+    WRITE_UVLC(pcVUI->getChromaSampleLocTypeTopField(),         "chroma_sample_loc_type_top_field");
+    WRITE_UVLC(pcVUI->getChromaSampleLocTypeBottomField(),      "chroma_sample_loc_type_bottom_field");
+  }
+
+  WRITE_FLAG(pcVUI->getNeutralChromaIndicationFlag(),           "neutral_chroma_indication_flag");
+  WRITE_FLAG(pcVUI->getFieldSeqFlag(),                          "field_seq_flag");
+  WRITE_FLAG(pcVUI->getFrameFieldInfoPresentFlag(),             "frame_field_info_present_flag");
+
+  Window defaultDisplayWindow = pcVUI->getDefaultDisplayWindow();
+  WRITE_FLAG(defaultDisplayWindow.getWindowEnabledFlag(),       "default_display_window_flag");
+  if( defaultDisplayWindow.getWindowEnabledFlag() )
+  {
+    WRITE_UVLC(defaultDisplayWindow.getWindowLeftOffset()  / TComSPS::getWinUnitX(pcSPS->getChromaFormatIdc()), "def_disp_win_left_offset");
+    WRITE_UVLC(defaultDisplayWindow.getWindowRightOffset() / TComSPS::getWinUnitX(pcSPS->getChromaFormatIdc()), "def_disp_win_right_offset");
+    WRITE_UVLC(defaultDisplayWindow.getWindowTopOffset()   / TComSPS::getWinUnitY(pcSPS->getChromaFormatIdc()), "def_disp_win_top_offset");
+    WRITE_UVLC(defaultDisplayWindow.getWindowBottomOffset()/ TComSPS::getWinUnitY(pcSPS->getChromaFormatIdc()), "def_disp_win_bottom_offset");
+  }
+  TimingInfo *timingInfo = pcVUI->getTimingInfo();
+  WRITE_FLAG(timingInfo->getTimingInfoPresentFlag(),          "vui_timing_info_present_flag");
+  if(timingInfo->getTimingInfoPresentFlag())
+  {
+    WRITE_CODE(timingInfo->getNumUnitsInTick(), 32,           "vui_num_units_in_tick");
+    WRITE_CODE(timingInfo->getTimeScale(),      32,           "vui_time_scale");
+    WRITE_FLAG(timingInfo->getPocProportionalToTimingFlag(),  "vui_poc_proportional_to_timing_flag");
+    if(timingInfo->getPocProportionalToTimingFlag())
+    {
+      WRITE_UVLC(timingInfo->getNumTicksPocDiffOneMinus1(),   "vui_num_ticks_poc_diff_one_minus1");
+    }
+    WRITE_FLAG(pcVUI->getHrdParametersPresentFlag(),              "hrd_parameters_present_flag");
+    if( pcVUI->getHrdParametersPresentFlag() )
+    {
+      codeHrdParameters(pcVUI->getHrdParameters(), 1, pcSPS->getMaxTLayers() - 1 );
+    }
+  }
+
+  WRITE_FLAG(pcVUI->getBitstreamRestrictionFlag(),              "bitstream_restriction_flag");
+  if (pcVUI->getBitstreamRestrictionFlag())
+  {
+    WRITE_FLAG(pcVUI->getTilesFixedStructureFlag(),             "tiles_fixed_structure_flag");
+    WRITE_FLAG(pcVUI->getMotionVectorsOverPicBoundariesFlag(),  "motion_vectors_over_pic_boundaries_flag");
+    WRITE_FLAG(pcVUI->getRestrictedRefPicListsFlag(),           "restricted_ref_pic_lists_flag");
+    WRITE_UVLC(pcVUI->getMinSpatialSegmentationIdc(),           "min_spatial_segmentation_idc");
+    WRITE_UVLC(pcVUI->getMaxBytesPerPicDenom(),                 "max_bytes_per_pic_denom");
+    WRITE_UVLC(pcVUI->getMaxBitsPerMinCuDenom(),                "max_bits_per_mincu_denom");
+    WRITE_UVLC(pcVUI->getLog2MaxMvLengthHorizontal(),           "log2_max_mv_length_horizontal");
+    WRITE_UVLC(pcVUI->getLog2MaxMvLengthVertical(),             "log2_max_mv_length_vertical");
+  }
+}
+
+Void TEncCavlc::codeHrdParameters( TComHRD *hrd, Bool commonInfPresentFlag, UInt maxNumSubLayersMinus1 )
+{
+  if( commonInfPresentFlag )
+  {
+    WRITE_FLAG( hrd->getNalHrdParametersPresentFlag() ? 1 : 0 ,  "nal_hrd_parameters_present_flag" );
+    WRITE_FLAG( hrd->getVclHrdParametersPresentFlag() ? 1 : 0 ,  "vcl_hrd_parameters_present_flag" );
+    if( hrd->getNalHrdParametersPresentFlag() || hrd->getVclHrdParametersPresentFlag() )
+    {
+      WRITE_FLAG( hrd->getSubPicCpbParamsPresentFlag() ? 1 : 0,  "sub_pic_cpb_params_present_flag" );
+      if( hrd->getSubPicCpbParamsPresentFlag() )
+      {
+        WRITE_CODE( hrd->getTickDivisorMinus2(), 8,              "tick_divisor_minus2" );
+        WRITE_CODE( hrd->getDuCpbRemovalDelayLengthMinus1(), 5,  "du_cpb_removal_delay_length_minus1" );
+        WRITE_FLAG( hrd->getSubPicCpbParamsInPicTimingSEIFlag() ? 1 : 0, "sub_pic_cpb_params_in_pic_timing_sei_flag" );
+        WRITE_CODE( hrd->getDpbOutputDelayDuLengthMinus1(), 5,   "dpb_output_delay_du_length_minus1"  );
+      }
+      WRITE_CODE( hrd->getBitRateScale(), 4,                     "bit_rate_scale" );
+      WRITE_CODE( hrd->getCpbSizeScale(), 4,                     "cpb_size_scale" );
+      if( hrd->getSubPicCpbParamsPresentFlag() )
+      {
+        WRITE_CODE( hrd->getDuCpbSizeScale(), 4,                "du_cpb_size_scale" );
+      }
+      WRITE_CODE( hrd->getInitialCpbRemovalDelayLengthMinus1(), 5, "initial_cpb_removal_delay_length_minus1" );
+      WRITE_CODE( hrd->getCpbRemovalDelayLengthMinus1(),        5, "au_cpb_removal_delay_length_minus1" );
+      WRITE_CODE( hrd->getDpbOutputDelayLengthMinus1(),         5, "dpb_output_delay_length_minus1" );
+    }
+  }
+  Int i, j, nalOrVcl;
+  for( i = 0; i <= maxNumSubLayersMinus1; i ++ )
+  {
+    WRITE_FLAG( hrd->getFixedPicRateFlag( i ) ? 1 : 0,          "fixed_pic_rate_general_flag");
+    if( !hrd->getFixedPicRateFlag( i ) )
+    {
+      WRITE_FLAG( hrd->getFixedPicRateWithinCvsFlag( i ) ? 1 : 0, "fixed_pic_rate_within_cvs_flag");
+    }
+    else
+    {
+      hrd->setFixedPicRateWithinCvsFlag( i, true );
+    }
+    if( hrd->getFixedPicRateWithinCvsFlag( i ) )
+    {
+      WRITE_UVLC( hrd->getPicDurationInTcMinus1( i ),           "elemental_duration_in_tc_minus1");
+    }
+    else
+    {
+      WRITE_FLAG( hrd->getLowDelayHrdFlag( i ) ? 1 : 0,           "low_delay_hrd_flag");
+    }
+    if (!hrd->getLowDelayHrdFlag( i ))
+    {
+      WRITE_UVLC( hrd->getCpbCntMinus1( i ),                      "cpb_cnt_minus1");
+    }
+
+    for( nalOrVcl = 0; nalOrVcl < 2; nalOrVcl ++ )
+    {
+      if( ( ( nalOrVcl == 0 ) && ( hrd->getNalHrdParametersPresentFlag() ) ) ||
+          ( ( nalOrVcl == 1 ) && ( hrd->getVclHrdParametersPresentFlag() ) ) )
+      {
+        for( j = 0; j <= ( hrd->getCpbCntMinus1( i ) ); j ++ )
+        {
+          WRITE_UVLC( hrd->getBitRateValueMinus1( i, j, nalOrVcl ), "bit_rate_value_minus1");
+          WRITE_UVLC( hrd->getCpbSizeValueMinus1( i, j, nalOrVcl ), "cpb_size_value_minus1");
+          if( hrd->getSubPicCpbParamsPresentFlag() )
+          {
+            WRITE_UVLC( hrd->getDuCpbSizeValueMinus1( i, j, nalOrVcl ), "cpb_size_du_value_minus1");
+            WRITE_UVLC( hrd->getDuBitRateValueMinus1( i, j, nalOrVcl ), "bit_rate_du_value_minus1");
+          }
+          WRITE_FLAG( hrd->getCbrFlag( i, j, nalOrVcl ) ? 1 : 0, "cbr_flag");
+        }
+      }
+    }
+  }
+}
+
+Void TEncCavlc::codeSPS( TComSPS* pcSPS )
+{
+
+  const ChromaFormat format                = pcSPS->getChromaFormatIdc();
+  const Bool         chromaEnabled         = isChromaEnabled(format);
+
+#if ENC_DEC_TRACE
+  xTraceSPSHeader (pcSPS);
+#endif
+  WRITE_CODE( pcSPS->getVPSId (),          4,       "sps_video_parameter_set_id" );
+  WRITE_CODE( pcSPS->getMaxTLayers() - 1,  3,       "sps_max_sub_layers_minus1" );
+  WRITE_FLAG( pcSPS->getTemporalIdNestingFlag() ? 1 : 0,                             "sps_temporal_id_nesting_flag" );
+  codePTL(pcSPS->getPTL(), 1, pcSPS->getMaxTLayers() - 1);
+  WRITE_UVLC( pcSPS->getSPSId (),                   "sps_seq_parameter_set_id" );
+  WRITE_UVLC( Int(pcSPS->getChromaFormatIdc ()),    "chroma_format_idc" );
+  if( format == CHROMA_444 )
+  {
+    WRITE_FLAG( 0,                                  "separate_colour_plane_flag");
+  }
+
+  WRITE_UVLC( pcSPS->getPicWidthInLumaSamples (),   "pic_width_in_luma_samples" );
+  WRITE_UVLC( pcSPS->getPicHeightInLumaSamples(),   "pic_height_in_luma_samples" );
+  Window conf = pcSPS->getConformanceWindow();
+
+  WRITE_FLAG( conf.getWindowEnabledFlag(),          "conformance_window_flag" );
+  if (conf.getWindowEnabledFlag())
+  {
+    WRITE_UVLC( conf.getWindowLeftOffset()   / TComSPS::getWinUnitX(pcSPS->getChromaFormatIdc() ), "conf_win_left_offset" );
+    WRITE_UVLC( conf.getWindowRightOffset()  / TComSPS::getWinUnitX(pcSPS->getChromaFormatIdc() ), "conf_win_right_offset" );
+    WRITE_UVLC( conf.getWindowTopOffset()    / TComSPS::getWinUnitY(pcSPS->getChromaFormatIdc() ), "conf_win_top_offset" );
+    WRITE_UVLC( conf.getWindowBottomOffset() / TComSPS::getWinUnitY(pcSPS->getChromaFormatIdc() ), "conf_win_bottom_offset" );
+  }
+
+  WRITE_UVLC( pcSPS->getBitDepth(CHANNEL_TYPE_LUMA) - 8,                      "bit_depth_luma_minus8" );
+
+  WRITE_UVLC( chromaEnabled ? (pcSPS->getBitDepth(CHANNEL_TYPE_CHROMA) - 8):0,  "bit_depth_chroma_minus8" );
+
+  WRITE_UVLC( pcSPS->getBitsForPOC()-4,                 "log2_max_pic_order_cnt_lsb_minus4" );
+
+  const Bool subLayerOrderingInfoPresentFlag = 1;
+  WRITE_FLAG(subLayerOrderingInfoPresentFlag,       "sps_sub_layer_ordering_info_present_flag");
+  for(UInt i=0; i <= pcSPS->getMaxTLayers()-1; i++)
+  {
+    WRITE_UVLC( pcSPS->getMaxDecPicBuffering(i) - 1,       "sps_max_dec_pic_buffering_minus1[i]" );
+    WRITE_UVLC( pcSPS->getNumReorderPics(i),               "sps_num_reorder_pics[i]" );
+    WRITE_UVLC( pcSPS->getMaxLatencyIncrease(i),           "sps_max_latency_increase_plus1[i]" );
+    if (!subLayerOrderingInfoPresentFlag)
+    {
+      break;
+    }
+  }
+  assert( pcSPS->getMaxCUWidth() == pcSPS->getMaxCUHeight() );
+
+  WRITE_UVLC( pcSPS->getLog2MinCodingBlockSize() - 3,                                "log2_min_coding_block_size_minus3" );
+  WRITE_UVLC( pcSPS->getLog2DiffMaxMinCodingBlockSize(),                             "log2_diff_max_min_coding_block_size" );
+  WRITE_UVLC( pcSPS->getQuadtreeTULog2MinSize() - 2,                                 "log2_min_transform_block_size_minus2" );
+  WRITE_UVLC( pcSPS->getQuadtreeTULog2MaxSize() - pcSPS->getQuadtreeTULog2MinSize(), "log2_diff_max_min_transform_block_size" );
+  WRITE_UVLC( pcSPS->getQuadtreeTUMaxDepthInter() - 1,                               "max_transform_hierarchy_depth_inter" );
+  WRITE_UVLC( pcSPS->getQuadtreeTUMaxDepthIntra() - 1,                               "max_transform_hierarchy_depth_intra" );
+  WRITE_FLAG( pcSPS->getScalingListFlag() ? 1 : 0,                                   "scaling_list_enabled_flag" );
+  if(pcSPS->getScalingListFlag())
+  {
+    WRITE_FLAG( pcSPS->getScalingListPresentFlag() ? 1 : 0,                          "sps_scaling_list_data_present_flag" );
+    if(pcSPS->getScalingListPresentFlag())
+    {
+      codeScalingList( m_pcSlice->getScalingList() );
+    }
+  }
+  WRITE_FLAG( pcSPS->getUseAMP() ? 1 : 0,                                            "amp_enabled_flag" );
+  WRITE_FLAG( pcSPS->getUseSAO() ? 1 : 0,                                            "sample_adaptive_offset_enabled_flag");
+
+  WRITE_FLAG( pcSPS->getUsePCM() ? 1 : 0,                                            "pcm_enabled_flag");
+  if( pcSPS->getUsePCM() )
+  {
+    WRITE_CODE( pcSPS->getPCMBitDepth(CHANNEL_TYPE_LUMA) - 1, 4,                            "pcm_sample_bit_depth_luma_minus1" );
+    WRITE_CODE( chromaEnabled ? (pcSPS->getPCMBitDepth(CHANNEL_TYPE_CHROMA) - 1) : 0, 4,    "pcm_sample_bit_depth_chroma_minus1" );
+    WRITE_UVLC( pcSPS->getPCMLog2MinSize() - 3,                                      "log2_min_pcm_luma_coding_block_size_minus3" );
+    WRITE_UVLC( pcSPS->getPCMLog2MaxSize() - pcSPS->getPCMLog2MinSize(),             "log2_diff_max_min_pcm_luma_coding_block_size" );
+    WRITE_FLAG( pcSPS->getPCMFilterDisableFlag()?1 : 0,                              "pcm_loop_filter_disable_flag");
+  }
+
+  assert( pcSPS->getMaxTLayers() > 0 );
+
+  TComRPSList* rpsList = pcSPS->getRPSList();
+  TComReferencePictureSet*      rps;
+
+#if SRPS_IN_SLICE
+    WRITE_UVLC(0, "num_short_term_ref_pic_sets" );
+#else
+    WRITE_UVLC(rpsList->getNumberOfReferencePictureSets(), "num_short_term_ref_pic_sets" );
+    for(Int i=0; i < rpsList->getNumberOfReferencePictureSets(); i++)
+      {
+        rps = rpsList->getReferencePictureSet(i);
+        codeShortTermRefPicSet(pcSPS,rps,false, i);
+      }
+#endif
+  WRITE_FLAG( pcSPS->getLongTermRefsPresent() ? 1 : 0,         "long_term_ref_pics_present_flag" );
+  if (pcSPS->getLongTermRefsPresent())
+  {
+    WRITE_UVLC(pcSPS->getNumLongTermRefPicSPS(), "num_long_term_ref_pic_sps" );
+    for (UInt k = 0; k < pcSPS->getNumLongTermRefPicSPS(); k++)
+    {
+      WRITE_CODE( pcSPS->getLtRefPicPocLsbSps(k), pcSPS->getBitsForPOC(), "lt_ref_pic_poc_lsb_sps");
+      WRITE_FLAG( pcSPS->getUsedByCurrPicLtSPSFlag(k), "used_by_curr_pic_lt_sps_flag");
+    }
+  }
+  WRITE_FLAG( pcSPS->getTMVPFlagsPresent()  ? 1 : 0,           "sps_temporal_mvp_enable_flag" );
+
+  WRITE_FLAG( pcSPS->getUseStrongIntraSmoothing(),             "sps_strong_intra_smoothing_enable_flag" );
+
+  WRITE_FLAG( pcSPS->getVuiParametersPresentFlag(),             "vui_parameters_present_flag" );
+  if (pcSPS->getVuiParametersPresentFlag())
+  {
+      codeVUI(pcSPS->getVuiParameters(), pcSPS);
+  }
+
+  Bool sps_extension_present_flag=false;
+  Bool sps_extension_flags[NUM_SPS_EXTENSION_FLAGS]={false};
+
+  sps_extension_flags[SPS_EXT__REXT] = (
+          pcSPS->getUseResidualRotation()
+       || pcSPS->getUseSingleSignificanceMapContext()
+       || pcSPS->getUseResidualDPCM(RDPCM_SIGNAL_IMPLICIT)
+       || pcSPS->getUseResidualDPCM(RDPCM_SIGNAL_EXPLICIT)
+       || pcSPS->getUseExtendedPrecision()
+       || pcSPS->getDisableIntraReferenceSmoothing()
+       || pcSPS->getUseHighPrecisionPredictionWeighting()
+       || pcSPS->getUseGolombRiceParameterAdaptation()
+       || pcSPS->getAlignCABACBeforeBypass()
+    );
+
+  // Other SPS extension flags checked here.
+
+  for(Int i=0; i<NUM_SPS_EXTENSION_FLAGS; i++)
+  {
+    sps_extension_present_flag|=sps_extension_flags[i];
+  }
+
+  WRITE_FLAG( (sps_extension_present_flag?1:0), "sps_extension_present_flag" );
+
+  if (sps_extension_present_flag)
+  {
+    for(Int i=0; i<NUM_SPS_EXTENSION_FLAGS; i++)
+    {
+      WRITE_FLAG( sps_extension_flags[i]?1:0, "sps_extension_flag[]" );
+    }
+
+    for(Int i=0; i<NUM_SPS_EXTENSION_FLAGS; i++) // loop used so that the order is determined by the enum.
+    {
+      if (sps_extension_flags[i])
+      {
+        switch (SPSExtensionFlagIndex(i))
+        {
+          case SPS_EXT__REXT:
+
+            WRITE_FLAG( (pcSPS->getUseResidualRotation() ? 1 : 0),                  "transform_skip_rotation_enabled_flag");
+            WRITE_FLAG( (pcSPS->getUseSingleSignificanceMapContext() ? 1 : 0),      "transform_skip_context_enabled_flag");
+            WRITE_FLAG( (pcSPS->getUseResidualDPCM(RDPCM_SIGNAL_IMPLICIT) ? 1 : 0), "residual_dpcm_implicit_enabled_flag" );
+            WRITE_FLAG( (pcSPS->getUseResidualDPCM(RDPCM_SIGNAL_EXPLICIT) ? 1 : 0), "residual_dpcm_explicit_enabled_flag" );
+            WRITE_FLAG( (pcSPS->getUseExtendedPrecision() ? 1 : 0),                 "extended_precision_processing_flag" );
+            WRITE_FLAG( (pcSPS->getDisableIntraReferenceSmoothing() ? 1 : 0),       "intra_smoothing_disabled_flag" );
+            WRITE_FLAG( (pcSPS->getUseHighPrecisionPredictionWeighting() ? 1 : 0),  "high_precision_prediction_weighting_flag" );
+            WRITE_FLAG( (pcSPS->getUseGolombRiceParameterAdaptation() ? 1 : 0),     "golomb_rice_parameter_adaptation_flag" );
+            WRITE_FLAG( (pcSPS->getAlignCABACBeforeBypass() ? 1 : 0),               "cabac_bypass_alignment_enabled_flag" );
+            break;
+          default:
+            assert(sps_extension_flags[i]==false); // Should never get here with an active SPS extension flag.
+            break;
+        }
+      }
+    }
+  }
+}
+
+Void TEncCavlc::codeVPS( TComVPS* pcVPS )
+{
+  WRITE_CODE( pcVPS->getVPSId(),                    4,        "vps_video_parameter_set_id" );
+  WRITE_CODE( 3,                                    2,        "vps_reserved_three_2bits" );
+  WRITE_CODE( 0,                                    6,        "vps_reserved_zero_6bits" );
+  WRITE_CODE( pcVPS->getMaxTLayers() - 1,           3,        "vps_max_sub_layers_minus1" );
+  WRITE_FLAG( pcVPS->getTemporalNestingFlag(),                "vps_temporal_id_nesting_flag" );
+  assert (pcVPS->getMaxTLayers()>1||pcVPS->getTemporalNestingFlag());
+  WRITE_CODE( 0xffff,                              16,        "vps_reserved_ffff_16bits" );
+  codePTL( pcVPS->getPTL(), true, pcVPS->getMaxTLayers() - 1 );
+  const Bool subLayerOrderingInfoPresentFlag = 1;
+  WRITE_FLAG(subLayerOrderingInfoPresentFlag,              "vps_sub_layer_ordering_info_present_flag");
+  for(UInt i=0; i <= pcVPS->getMaxTLayers()-1; i++)
+  {
+    WRITE_UVLC( pcVPS->getMaxDecPicBuffering(i) - 1,       "vps_max_dec_pic_buffering_minus1[i]" );
+    WRITE_UVLC( pcVPS->getNumReorderPics(i),               "vps_num_reorder_pics[i]" );
+    WRITE_UVLC( pcVPS->getMaxLatencyIncrease(i),           "vps_max_latency_increase_plus1[i]" );
+    if (!subLayerOrderingInfoPresentFlag)
+    {
+      break;
+    }
+  }
+
+  assert( pcVPS->getNumHrdParameters() <= MAX_VPS_NUM_HRD_PARAMETERS );
+  assert( pcVPS->getMaxNuhReservedZeroLayerId() < MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1 );
+  WRITE_CODE( pcVPS->getMaxNuhReservedZeroLayerId(), 6,     "vps_max_nuh_reserved_zero_layer_id" );
+  pcVPS->setMaxOpSets(1);
+  WRITE_UVLC( pcVPS->getMaxOpSets() - 1,                    "vps_max_op_sets_minus1" );
+  for( UInt opsIdx = 1; opsIdx <= ( pcVPS->getMaxOpSets() - 1 ); opsIdx ++ )
+  {
+    // Operation point set
+    for( UInt i = 0; i <= pcVPS->getMaxNuhReservedZeroLayerId(); i ++ )
+    {
+      // Only applicable for version 1
+      pcVPS->setLayerIdIncludedFlag( true, opsIdx, i );
+      WRITE_FLAG( pcVPS->getLayerIdIncludedFlag( opsIdx, i ) ? 1 : 0, "layer_id_included_flag[opsIdx][i]" );
+    }
+  }
+  TimingInfo *timingInfo = pcVPS->getTimingInfo();
+  WRITE_FLAG(timingInfo->getTimingInfoPresentFlag(),          "vps_timing_info_present_flag");
+  if(timingInfo->getTimingInfoPresentFlag())
+  {
+    WRITE_CODE(timingInfo->getNumUnitsInTick(), 32,           "vps_num_units_in_tick");
+    WRITE_CODE(timingInfo->getTimeScale(),      32,           "vps_time_scale");
+    WRITE_FLAG(timingInfo->getPocProportionalToTimingFlag(),  "vps_poc_proportional_to_timing_flag");
+    if(timingInfo->getPocProportionalToTimingFlag())
+    {
+      WRITE_UVLC(timingInfo->getNumTicksPocDiffOneMinus1(),   "vps_num_ticks_poc_diff_one_minus1");
+    }
+    pcVPS->setNumHrdParameters( 0 );
+    WRITE_UVLC( pcVPS->getNumHrdParameters(),                 "vps_num_hrd_parameters" );
+
+    if( pcVPS->getNumHrdParameters() > 0 )
+    {
+      pcVPS->createHrdParamBuffer();
+    }
+    for( UInt i = 0; i < pcVPS->getNumHrdParameters(); i ++ )
+    {
+      // Only applicable for version 1
+      pcVPS->setHrdOpSetIdx( 0, i );
+      WRITE_UVLC( pcVPS->getHrdOpSetIdx( i ),                "hrd_op_set_idx" );
+      if( i > 0 )
+      {
+        WRITE_FLAG( pcVPS->getCprmsPresentFlag( i ) ? 1 : 0, "cprms_present_flag[i]" );
+      }
+      codeHrdParameters(pcVPS->getHrdParameters(i), pcVPS->getCprmsPresentFlag( i ), pcVPS->getMaxTLayers() - 1);
+    }
+  }
+  WRITE_FLAG( 0,                     "vps_extension_flag" );
+
+  //future extensions here..
+
+  return;
+}
+
+Void TEncCavlc::codeSliceHeader         ( TComSlice* pcSlice )
+{
+#if ENC_DEC_TRACE
+  xTraceSliceHeader (pcSlice);
+#endif
+
+  const ChromaFormat format                = pcSlice->getSPS()->getChromaFormatIdc();
+  const UInt         numberValidComponents = getNumberValidComponents(format);
+  const Bool         chromaEnabled         = isChromaEnabled(format);
+
+  //calculate number of bits required for slice address
+  Int maxSliceSegmentAddress = pcSlice->getPic()->getNumberOfCtusInFrame();
+  Int bitsSliceSegmentAddress = 0;
+  while(maxSliceSegmentAddress>(1<<bitsSliceSegmentAddress))
+  {
+    bitsSliceSegmentAddress++;
+  }
+  const Int ctuTsAddress = pcSlice->getSliceSegmentCurStartCtuTsAddr();
+
+  //write slice address
+  const Int sliceSegmentRsAddress = pcSlice->getPic()->getPicSym()->getCtuTsToRsAddrMap(ctuTsAddress);
+
+  WRITE_FLAG( sliceSegmentRsAddress==0, "first_slice_segment_in_pic_flag" );
+  if ( pcSlice->getRapPicFlag() )
+  {
+    WRITE_FLAG( pcSlice->getNoOutputPriorPicsFlag() ? 1 : 0, "no_output_of_prior_pics_flag" );
+  }
+  WRITE_UVLC( pcSlice->getPPS()->getPPSId(), "slice_pic_parameter_set_id" );
+  if ( pcSlice->getPPS()->getDependentSliceSegmentsEnabledFlag() && (sliceSegmentRsAddress!=0) )
+  {
+    WRITE_FLAG( pcSlice->getDependentSliceSegmentFlag() ? 1 : 0, "dependent_slice_segment_flag" );
+  }
+  if(sliceSegmentRsAddress>0)
+  {
+    WRITE_CODE( sliceSegmentRsAddress, bitsSliceSegmentAddress, "slice_segment_address" );
+  }
+  if ( !pcSlice->getDependentSliceSegmentFlag() )
+  {
+    for (Int i = 0; i < pcSlice->getPPS()->getNumExtraSliceHeaderBits(); i++)
+    {
+      assert(!!"slice_reserved_undetermined_flag[]");
+      WRITE_FLAG(0, "slice_reserved_undetermined_flag[]");
+    }
+
+    WRITE_UVLC( pcSlice->getSliceType(),       "slice_type" );
+
+    if( pcSlice->getPPS()->getOutputFlagPresentFlag() )
+    {
+      WRITE_FLAG( pcSlice->getPicOutputFlag() ? 1 : 0, "pic_output_flag" );
+    }
+
+    if( !pcSlice->getIdrPicFlag() )
+    {
+      Int picOrderCntLSB = (pcSlice->getPOC()-pcSlice->getLastIDR()+(1<<pcSlice->getSPS()->getBitsForPOC())) & ((1<<pcSlice->getSPS()->getBitsForPOC())-1);
+      WRITE_CODE( picOrderCntLSB, pcSlice->getSPS()->getBitsForPOC(), "pic_order_cnt_lsb");
+      TComReferencePictureSet* rps = pcSlice->getRPS();
+
+      // check for bitstream restriction stating that:
+      // If the current picture is a BLA or CRA picture, the value of NumPocTotalCurr shall be equal to 0.
+      // Ideally this process should not be repeated for each slice in a picture
+      if (pcSlice->isIRAP())
+      {
+        for (Int picIdx = 0; picIdx < rps->getNumberOfPictures(); picIdx++)
+        {
+          assert (!rps->getUsed(picIdx));
+        }
+      }
+
+#if SRPS_IN_SLICE
+      {
+        WRITE_FLAG( 0, "short_term_ref_pic_set_sps_flag");
+        codeShortTermRefPicSet(pcSlice->getSPS(), rps, true, 0);
+      }
+#else
+      if(pcSlice->getRPSidx() < 0)
+      {
+        WRITE_FLAG( 0, "short_term_ref_pic_set_sps_flag");
+        codeShortTermRefPicSet(pcSlice->getSPS(), rps, true, pcSlice->getSPS()->getRPSList()->getNumberOfReferencePictureSets());
+      }
+      else
+      {
+        WRITE_FLAG( 1, "short_term_ref_pic_set_sps_flag");
+        Int numBits = 0;
+        while ((1 << numBits) < pcSlice->getSPS()->getRPSList()->getNumberOfReferencePictureSets())
+        {
+          numBits++;
+        }
+        if (numBits > 0)
+        {
+          WRITE_CODE( pcSlice->getRPSidx(), numBits, "short_term_ref_pic_set_idx" );
+        }
+      }
+#endif
+      if(pcSlice->getSPS()->getLongTermRefsPresent())
+      {
+        Int numLtrpInSH = rps->getNumberOfLongtermPictures();
+        Int ltrpInSPS[MAX_NUM_REF_PICS];
+        Int numLtrpInSPS = 0;
+        UInt ltrpIndex;
+        Int counter = 0;
+        for(Int k = rps->getNumberOfPictures()-1; k > rps->getNumberOfPictures()-rps->getNumberOfLongtermPictures()-1; k--)
+        {
+          if (findMatchingLTRP(pcSlice, &ltrpIndex, rps->getPOC(k), rps->getUsed(k)))
+          {
+            ltrpInSPS[numLtrpInSPS] = ltrpIndex;
+            numLtrpInSPS++;
+          }
+          else
+          {
+            counter++;
+          }
+        }
+        numLtrpInSH -= numLtrpInSPS;
+
+        Int bitsForLtrpInSPS = 0;
+        while (pcSlice->getSPS()->getNumLongTermRefPicSPS() > (1 << bitsForLtrpInSPS))
+        {
+          bitsForLtrpInSPS++;
+        }
+        if (pcSlice->getSPS()->getNumLongTermRefPicSPS() > 0)
+        {
+          WRITE_UVLC( numLtrpInSPS, "num_long_term_sps");
+        }
+        WRITE_UVLC( numLtrpInSH, "num_long_term_pics");
+        // Note that the LSBs of the LT ref. pic. POCs must be sorted before.
+        // Not sorted here because LT ref indices will be used in setRefPicList()
+        Int prevDeltaMSB = 0, prevLSB = 0;
+        Int offset = rps->getNumberOfNegativePictures() + rps->getNumberOfPositivePictures();
+        for(Int i=rps->getNumberOfPictures()-1 ; i > offset-1; i--)
+        {
+          if (counter < numLtrpInSPS)
+          {
+            if (bitsForLtrpInSPS > 0)
+            {
+              WRITE_CODE( ltrpInSPS[counter], bitsForLtrpInSPS, "lt_idx_sps[i]");
+            }
+          }
+          else
+          {
+            WRITE_CODE( rps->getPocLSBLT(i), pcSlice->getSPS()->getBitsForPOC(), "poc_lsb_lt");
+            WRITE_FLAG( rps->getUsed(i), "used_by_curr_pic_lt_flag");
+          }
+          WRITE_FLAG( rps->getDeltaPocMSBPresentFlag(i), "delta_poc_msb_present_flag");
+
+          if(rps->getDeltaPocMSBPresentFlag(i))
+          {
+            Bool deltaFlag = false;
+            //  First LTRP from SPS                 ||  First LTRP from SH                              || curr LSB            != prev LSB
+            if( (i == rps->getNumberOfPictures()-1) || (i == rps->getNumberOfPictures()-1-numLtrpInSPS) || (rps->getPocLSBLT(i) != prevLSB) )
+            {
+              deltaFlag = true;
+            }
+            if(deltaFlag)
+            {
+              WRITE_UVLC( rps->getDeltaPocMSBCycleLT(i), "delta_poc_msb_cycle_lt[i]" );
+            }
+            else
+            {
+              Int differenceInDeltaMSB = rps->getDeltaPocMSBCycleLT(i) - prevDeltaMSB;
+              assert(differenceInDeltaMSB >= 0);
+              WRITE_UVLC( differenceInDeltaMSB, "delta_poc_msb_cycle_lt[i]" );
+            }
+            prevLSB = rps->getPocLSBLT(i);
+            prevDeltaMSB = rps->getDeltaPocMSBCycleLT(i);
+          }
+        }
+      }
+      if (pcSlice->getSPS()->getTMVPFlagsPresent())
+      {
+        WRITE_FLAG( pcSlice->getEnableTMVPFlag() ? 1 : 0, "slice_temporal_mvp_enable_flag" );
+      }
+    }
+    if(pcSlice->getSPS()->getUseSAO())
+    {
+       WRITE_FLAG( pcSlice->getSaoEnabledFlag(CHANNEL_TYPE_LUMA), "slice_sao_luma_flag" );
+       if (chromaEnabled) WRITE_FLAG( pcSlice->getSaoEnabledFlag(CHANNEL_TYPE_CHROMA), "slice_sao_chroma_flag" );
+    }
+
+    //check if numrefidxes match the defaults. If not, override
+
+    if (!pcSlice->isIntra())
+    {
+      Bool overrideFlag = (pcSlice->getNumRefIdx( REF_PIC_LIST_0 )!=pcSlice->getPPS()->getNumRefIdxL0DefaultActive()||(pcSlice->isInterB()&&pcSlice->getNumRefIdx( REF_PIC_LIST_1 )!=pcSlice->getPPS()->getNumRefIdxL1DefaultActive()));
+      WRITE_FLAG( overrideFlag ? 1 : 0,                               "num_ref_idx_active_override_flag");
+      if (overrideFlag)
+      {
+        WRITE_UVLC( pcSlice->getNumRefIdx( REF_PIC_LIST_0 ) - 1,      "num_ref_idx_l0_active_minus1" );
+        if (pcSlice->isInterB())
+        {
+          WRITE_UVLC( pcSlice->getNumRefIdx( REF_PIC_LIST_1 ) - 1,    "num_ref_idx_l1_active_minus1" );
+        }
+        else
+        {
+          pcSlice->setNumRefIdx(REF_PIC_LIST_1, 0);
+        }
+      }
+    }
+    else
+    {
+      pcSlice->setNumRefIdx(REF_PIC_LIST_0, 0);
+      pcSlice->setNumRefIdx(REF_PIC_LIST_1, 0);
+    }
+
+    if( pcSlice->getPPS()->getListsModificationPresentFlag() && pcSlice->getNumRpsCurrTempList() > 1)
+    {
+      TComRefPicListModification* refPicListModification = pcSlice->getRefPicListModification();
+      if(!pcSlice->isIntra())
+      {
+        WRITE_FLAG(pcSlice->getRefPicListModification()->getRefPicListModificationFlagL0() ? 1 : 0,       "ref_pic_list_modification_flag_l0" );
+        if (pcSlice->getRefPicListModification()->getRefPicListModificationFlagL0())
+        {
+          Int numRpsCurrTempList0 = pcSlice->getNumRpsCurrTempList();
+          if (numRpsCurrTempList0 > 1)
+          {
+            Int length = 1;
+            numRpsCurrTempList0 --;
+            while ( numRpsCurrTempList0 >>= 1)
+            {
+              length ++;
+            }
+            for(Int i = 0; i < pcSlice->getNumRefIdx( REF_PIC_LIST_0 ); i++)
+            {
+              WRITE_CODE( refPicListModification->getRefPicSetIdxL0(i), length, "list_entry_l0");
+            }
+          }
+        }
+      }
+      if(pcSlice->isInterB())
+      {
+        WRITE_FLAG(pcSlice->getRefPicListModification()->getRefPicListModificationFlagL1() ? 1 : 0,       "ref_pic_list_modification_flag_l1" );
+        if (pcSlice->getRefPicListModification()->getRefPicListModificationFlagL1())
+        {
+          Int numRpsCurrTempList1 = pcSlice->getNumRpsCurrTempList();
+          if ( numRpsCurrTempList1 > 1 )
+          {
+            Int length = 1;
+            numRpsCurrTempList1 --;
+            while ( numRpsCurrTempList1 >>= 1)
+            {
+              length ++;
+            }
+            for(Int i = 0; i < pcSlice->getNumRefIdx( REF_PIC_LIST_1 ); i++)
+            {
+              WRITE_CODE( refPicListModification->getRefPicSetIdxL1(i), length, "list_entry_l1");
+            }
+          }
+        }
+      }
+    }
+
+    if (pcSlice->isInterB())
+    {
+      WRITE_FLAG( pcSlice->getMvdL1ZeroFlag() ? 1 : 0,   "mvd_l1_zero_flag");
+    }
+
+    if(!pcSlice->isIntra())
+    {
+      if (!pcSlice->isIntra() && pcSlice->getPPS()->getCabacInitPresentFlag())
+      {
+        SliceType sliceType   = pcSlice->getSliceType();
+        Int  encCABACTableIdx = pcSlice->getPPS()->getEncCABACTableIdx();
+        Bool encCabacInitFlag = (sliceType!=encCABACTableIdx && encCABACTableIdx!=I_SLICE) ? true : false;
+        pcSlice->setCabacInitFlag( encCabacInitFlag );
+        WRITE_FLAG( encCabacInitFlag?1:0, "cabac_init_flag" );
+      }
+    }
+
+    if ( pcSlice->getEnableTMVPFlag() )
+    {
+      if ( pcSlice->getSliceType() == B_SLICE )
+      {
+        WRITE_FLAG( pcSlice->getColFromL0Flag(), "collocated_from_l0_flag" );
+      }
+
+      if ( pcSlice->getSliceType() != I_SLICE &&
+        ((pcSlice->getColFromL0Flag()==1 && pcSlice->getNumRefIdx(REF_PIC_LIST_0)>1)||
+        (pcSlice->getColFromL0Flag()==0  && pcSlice->getNumRefIdx(REF_PIC_LIST_1)>1)))
+      {
+        WRITE_UVLC( pcSlice->getColRefIdx(), "collocated_ref_idx" );
+      }
+    }
+    if ( (pcSlice->getPPS()->getUseWP() && pcSlice->getSliceType()==P_SLICE) || (pcSlice->getPPS()->getWPBiPred() && pcSlice->getSliceType()==B_SLICE) )
+    {
+      xCodePredWeightTable( pcSlice );
+    }
+    assert(pcSlice->getMaxNumMergeCand()<=MRG_MAX_NUM_CANDS);
+    if (!pcSlice->isIntra())
+    {
+      WRITE_UVLC(MRG_MAX_NUM_CANDS - pcSlice->getMaxNumMergeCand(), "five_minus_max_num_merge_cand");
+    }
+    Int iCode = pcSlice->getSliceQp() - ( pcSlice->getPPS()->getPicInitQPMinus26() + 26 );
+    WRITE_SVLC( iCode, "slice_qp_delta" );
+    if (pcSlice->getPPS()->getSliceChromaQpFlag())
+    {
+      if (numberValidComponents > COMPONENT_Cb) { WRITE_SVLC( pcSlice->getSliceChromaQpDelta(COMPONENT_Cb), "slice_qp_delta_cb" ); }
+      if (numberValidComponents > COMPONENT_Cr) { WRITE_SVLC( pcSlice->getSliceChromaQpDelta(COMPONENT_Cr), "slice_qp_delta_cr" ); }
+      assert(numberValidComponents <= COMPONENT_Cr+1);
+    }
+
+    if (pcSlice->getPPS()->getChromaQpAdjTableSize() > 0)
+    {
+      WRITE_FLAG(pcSlice->getUseChromaQpAdj(), "slice_chroma_qp_adjustment_enabled_flag");
+    }
+
+    if (pcSlice->getPPS()->getDeblockingFilterControlPresentFlag())
+    {
+      if (pcSlice->getPPS()->getDeblockingFilterOverrideEnabledFlag() )
+      {
+        WRITE_FLAG(pcSlice->getDeblockingFilterOverrideFlag(), "deblocking_filter_override_flag");
+      }
+      if (pcSlice->getDeblockingFilterOverrideFlag())
+      {
+        WRITE_FLAG(pcSlice->getDeblockingFilterDisable(), "slice_disable_deblocking_filter_flag");
+        if(!pcSlice->getDeblockingFilterDisable())
+        {
+          WRITE_SVLC (pcSlice->getDeblockingFilterBetaOffsetDiv2(), "slice_beta_offset_div2");
+          WRITE_SVLC (pcSlice->getDeblockingFilterTcOffsetDiv2(),   "slice_tc_offset_div2");
+        }
+      }
+    }
+
+    Bool isSAOEnabled = pcSlice->getSPS()->getUseSAO() && (pcSlice->getSaoEnabledFlag(CHANNEL_TYPE_LUMA) || (chromaEnabled && pcSlice->getSaoEnabledFlag(CHANNEL_TYPE_CHROMA)));
+    Bool isDBFEnabled = (!pcSlice->getDeblockingFilterDisable());
+
+    if(pcSlice->getPPS()->getLoopFilterAcrossSlicesEnabledFlag() && ( isSAOEnabled || isDBFEnabled ))
+    {
+      WRITE_FLAG(pcSlice->getLFCrossSliceBoundaryFlag()?1:0, "slice_loop_filter_across_slices_enabled_flag");
+    }
+  }
+  if(pcSlice->getPPS()->getSliceHeaderExtensionPresentFlag())
+  {
+    WRITE_UVLC(0,"slice_header_extension_length");
+  }
+}
+
+Void TEncCavlc::codePTL( TComPTL* pcPTL, Bool profilePresentFlag, Int maxNumSubLayersMinus1)
+{
+  if(profilePresentFlag)
+  {
+    codeProfileTier(pcPTL->getGeneralPTL());    // general_...
+  }
+  WRITE_CODE( Int(pcPTL->getGeneralPTL()->getLevelIdc()), 8, "general_level_idc" );
+
+  for (Int i = 0; i < maxNumSubLayersMinus1; i++)
+  {
+    if(profilePresentFlag)
+    {
+      WRITE_FLAG( pcPTL->getSubLayerProfilePresentFlag(i), "sub_layer_profile_present_flag[i]" );
+    }
+
+    WRITE_FLAG( pcPTL->getSubLayerLevelPresentFlag(i),   "sub_layer_level_present_flag[i]" );
+  }
+
+  if (maxNumSubLayersMinus1 > 0)
+  {
+    for (Int i = maxNumSubLayersMinus1; i < 8; i++)
+    {
+      WRITE_CODE(0, 2, "reserved_zero_2bits");
+    }
+  }
+
+  for(Int i = 0; i < maxNumSubLayersMinus1; i++)
+  {
+    if( profilePresentFlag && pcPTL->getSubLayerProfilePresentFlag(i) )
+    {
+      codeProfileTier(pcPTL->getSubLayerPTL(i));  // sub_layer_...
+    }
+    if( pcPTL->getSubLayerLevelPresentFlag(i) )
+    {
+      WRITE_CODE( Int(pcPTL->getSubLayerPTL(i)->getLevelIdc()), 8, "sub_layer_level_idc[i]" );
+    }
+  }
+}
+Void TEncCavlc::codeProfileTier( ProfileTierLevel* ptl )
+{
+  WRITE_CODE( ptl->getProfileSpace(), 2 ,     "XXX_profile_space[]");
+  WRITE_FLAG( ptl->getTierFlag()==Level::HIGH, "XXX_tier_flag[]"    );
+  WRITE_CODE( Int(ptl->getProfileIdc()), 5 ,  "XXX_profile_idc[]"  );
+  for(Int j = 0; j < 32; j++)
+  {
+    WRITE_FLAG( ptl->getProfileCompatibilityFlag(j), "XXX_profile_compatibility_flag[][j]");
+  }
+
+  WRITE_FLAG(ptl->getProgressiveSourceFlag(),   "general_progressive_source_flag");
+  WRITE_FLAG(ptl->getInterlacedSourceFlag(),    "general_interlaced_source_flag");
+  WRITE_FLAG(ptl->getNonPackedConstraintFlag(), "general_non_packed_constraint_flag");
+  WRITE_FLAG(ptl->getFrameOnlyConstraintFlag(), "general_frame_only_constraint_flag");
+
+  if (ptl->getProfileIdc() == Profile::MAINREXT || ptl->getProfileIdc() == Profile::HIGHTHROUGHPUTREXT )
+  {
+    const UInt         bitDepthConstraint=ptl->getBitDepthConstraint();
+    WRITE_FLAG(bitDepthConstraint<=12, "general_max_12bit_constraint_flag");
+    WRITE_FLAG(bitDepthConstraint<=10, "general_max_10bit_constraint_flag");
+    WRITE_FLAG(bitDepthConstraint<= 8, "general_max_8bit_constraint_flag");
+    const ChromaFormat chromaFmtConstraint=ptl->getChromaFormatConstraint();
+    WRITE_FLAG(chromaFmtConstraint==CHROMA_422||chromaFmtConstraint==CHROMA_420||chromaFmtConstraint==CHROMA_400, "general_max_422chroma_constraint_flag");
+    WRITE_FLAG(chromaFmtConstraint==CHROMA_420||chromaFmtConstraint==CHROMA_400,                                  "general_max_420chroma_constraint_flag");
+    WRITE_FLAG(chromaFmtConstraint==CHROMA_400,                                                                   "general_max_monochrome_constraint_flag");
+    WRITE_FLAG(ptl->getIntraConstraintFlag(),        "general_intra_constraint_flag");
+    WRITE_FLAG(0,                                    "general_one_picture_only_constraint_flag");
+    WRITE_FLAG(ptl->getLowerBitRateConstraintFlag(), "general_lower_bit_rate_constraint_flag");
+    WRITE_CODE(0 , 16, "XXX_reserved_zero_35bits[0..15]");
+    WRITE_CODE(0 , 16, "XXX_reserved_zero_35bits[16..31]");
+    WRITE_CODE(0 ,  3, "XXX_reserved_zero_35bits[32..34]");
+  }
+  else
+  {
+    WRITE_CODE(0x0000 , 16, "XXX_reserved_zero_44bits[0..15]");
+    WRITE_CODE(0x0000 , 16, "XXX_reserved_zero_44bits[16..31]");
+    WRITE_CODE(0x000  , 12, "XXX_reserved_zero_44bits[32..43]");
+  }
+}
+
+/**
+ - write tiles and wavefront substreams sizes for the slice header.
+ .
+ \param pcSlice Where we find the substream size information.
+ */
+Void  TEncCavlc::codeTilesWPPEntryPoint( TComSlice* pSlice )
+{
+  if (!pSlice->getPPS()->getTilesEnabledFlag() && !pSlice->getPPS()->getEntropyCodingSyncEnabledFlag())
+  {
+    return;
+  }
+  UInt maxOffset = 0;
+  for(Int idx=0; idx<pSlice->getNumberOfSubstreamSizes(); idx++)
+  {
+    UInt offset=pSlice->getSubstreamSize(idx);
+    if ( offset > maxOffset )
+    {
+      maxOffset = offset;
+    }
+  }
+
+  // Determine number of bits "offsetLenMinus1+1" required for entry point information
+  UInt offsetLenMinus1 = 0;
+  while (maxOffset >= (1u << (offsetLenMinus1 + 1)))
+  {
+    offsetLenMinus1++;
+    assert(offsetLenMinus1 + 1 < 32);
+  }
+
+  WRITE_UVLC(pSlice->getNumberOfSubstreamSizes(), "num_entry_point_offsets");
+  if (pSlice->getNumberOfSubstreamSizes()>0)
+  {
+    WRITE_UVLC(offsetLenMinus1, "offset_len_minus1");
+
+    for (UInt idx=0; idx<pSlice->getNumberOfSubstreamSizes(); idx++)
+    {
+      WRITE_CODE(pSlice->getSubstreamSize(idx)-1, offsetLenMinus1+1, "entry_point_offset_minus1");
+    }
+  }
+}
+
+Void TEncCavlc::codeTerminatingBit      ( UInt uilsLast )
+{
+}
+
+Void TEncCavlc::codeSliceFinish ()
+{
+}
+
+Void TEncCavlc::codeMVPIdx ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codePartSize( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codePredMode( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeMergeFlag    ( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeMergeIndex    ( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeInterModeFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, UInt uiEncMode )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeSkipFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeSplitFlag   ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeQtCbf( TComTU &rTu, const ComponentID compID, const Bool lowestLevel )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeQtRootCbf( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeQtCbfZero( TComTU &rTu, const ChannelType chType )
+{
+  assert(0);
+}
+Void TEncCavlc::codeQtRootCbfZero( TComDataCU* pcCU )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeTransformSkipFlags (TComTU &rTu, ComponentID component )
+{
+  assert(0);
+}
+
+/** Code I_PCM information.
+ * \param pcCU pointer to CU
+ * \param uiAbsPartIdx CU index
+ * \returns Void
+ */
+Void TEncCavlc::codeIPCMInfo( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeIntraDirLumaAng( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool isMultiple)
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeIntraDirChroma( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeInterDir( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeRefFrmIdx( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeMvd( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeCrossComponentPrediction( TComTU& /*rTu*/, ComponentID /*compID*/ )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeDeltaQP( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  Int iDQp  = pcCU->getQP( uiAbsPartIdx ) - pcCU->getRefQP( uiAbsPartIdx );
+
+  Int qpBdOffsetY =  pcCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA);
+  iDQp = (iDQp + 78 + qpBdOffsetY + (qpBdOffsetY/2)) % (52 + qpBdOffsetY) - 26 - (qpBdOffsetY/2);
+
+  xWriteSvlc( iDQp );
+
+  return;
+}
+
+Void TEncCavlc::codeChromaQpAdjustment( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  assert(0);
+}
+
+Void TEncCavlc::codeCoeffNxN    ( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID )
+{
+  assert(0);
+}
+
+Void TEncCavlc::estBit( estBitsSbacStruct* pcEstBitsCabac, Int width, Int height, ChannelType chType )
+{
+  // printf("error : no VLC mode support in this version\n");
+  return;
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+/** code explicit wp tables
+ * \param TComSlice* pcSlice
+ * \returns Void
+ */
+Void TEncCavlc::xCodePredWeightTable( TComSlice* pcSlice )
+{
+  WPScalingParam  *wp;
+  const ChromaFormat    format                = pcSlice->getPic()->getChromaFormat();
+  const UInt            numberValidComponents = getNumberValidComponents(format);
+  const Bool            bChroma               = isChromaEnabled(format);
+  const Int             iNbRef                = (pcSlice->getSliceType() == B_SLICE ) ? (2) : (1);
+        Bool            bDenomCoded           = false;
+        UInt            uiMode                = 0;
+        UInt            uiTotalSignalledWeightFlags = 0;
+
+  if ( (pcSlice->getSliceType()==P_SLICE && pcSlice->getPPS()->getUseWP()) || (pcSlice->getSliceType()==B_SLICE && pcSlice->getPPS()->getWPBiPred()) )
+  {
+    uiMode = 1; // explicit
+  }
+  if(uiMode == 1)
+  {
+    for ( Int iNumRef=0 ; iNumRef<iNbRef ; iNumRef++ )
+    {
+      RefPicList  eRefPicList = ( iNumRef ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+
+      // NOTE: wp[].uiLog2WeightDenom and wp[].bPresentFlag are actually per-channel-type settings.
+
+      for ( Int iRefIdx=0 ; iRefIdx<pcSlice->getNumRefIdx(eRefPicList) ; iRefIdx++ )
+      {
+        pcSlice->getWpScaling(eRefPicList, iRefIdx, wp);
+        if ( !bDenomCoded )
+        {
+          Int iDeltaDenom;
+          WRITE_UVLC( wp[COMPONENT_Y].uiLog2WeightDenom, "luma_log2_weight_denom" );     // ue(v): luma_log2_weight_denom
+
+          if( bChroma )
+          {
+            assert(wp[COMPONENT_Cb].uiLog2WeightDenom == wp[COMPONENT_Cr].uiLog2WeightDenom); // check the channel-type settings are consistent across components.
+            iDeltaDenom = (wp[COMPONENT_Cb].uiLog2WeightDenom - wp[COMPONENT_Y].uiLog2WeightDenom);
+            WRITE_SVLC( iDeltaDenom, "delta_chroma_log2_weight_denom" );       // se(v): delta_chroma_log2_weight_denom
+          }
+          bDenomCoded = true;
+        }
+        WRITE_FLAG( wp[COMPONENT_Y].bPresentFlag, "luma_weight_lX_flag" );               // u(1): luma_weight_lX_flag
+        uiTotalSignalledWeightFlags += wp[COMPONENT_Y].bPresentFlag;
+      }
+      if (bChroma)
+      {
+        for ( Int iRefIdx=0 ; iRefIdx<pcSlice->getNumRefIdx(eRefPicList) ; iRefIdx++ )
+        {
+          pcSlice->getWpScaling(eRefPicList, iRefIdx, wp);
+          assert(wp[COMPONENT_Cb].bPresentFlag == wp[COMPONENT_Cr].bPresentFlag); // check the channel-type settings are consistent across components.
+          WRITE_FLAG( wp[COMPONENT_Cb].bPresentFlag, "chroma_weight_lX_flag" );           // u(1): chroma_weight_lX_flag
+          uiTotalSignalledWeightFlags += 2*wp[COMPONENT_Cb].bPresentFlag;
+        }
+      }
+
+      for ( Int iRefIdx=0 ; iRefIdx<pcSlice->getNumRefIdx(eRefPicList) ; iRefIdx++ )
+      {
+        pcSlice->getWpScaling(eRefPicList, iRefIdx, wp);
+        if ( wp[COMPONENT_Y].bPresentFlag )
+        {
+          Int iDeltaWeight = (wp[COMPONENT_Y].iWeight - (1<<wp[COMPONENT_Y].uiLog2WeightDenom));
+          WRITE_SVLC( iDeltaWeight, "delta_luma_weight_lX" );                            // se(v): delta_luma_weight_lX
+          WRITE_SVLC( wp[COMPONENT_Y].iOffset, "luma_offset_lX" );                       // se(v): luma_offset_lX
+        }
+
+        if ( bChroma )
+        {
+          if ( wp[COMPONENT_Cb].bPresentFlag )
+          {
+            for ( Int j = COMPONENT_Cb ; j < numberValidComponents ; j++ )
+            {
+              assert(wp[COMPONENT_Cb].uiLog2WeightDenom == wp[COMPONENT_Cr].uiLog2WeightDenom);
+              Int iDeltaWeight = (wp[j].iWeight - (1<<wp[COMPONENT_Cb].uiLog2WeightDenom));
+              WRITE_SVLC( iDeltaWeight, "delta_chroma_weight_lX" );            // se(v): delta_chroma_weight_lX
+
+              Int range=pcSlice->getSPS()->getUseHighPrecisionPredictionWeighting() ? (1<<g_bitDepth[CHANNEL_TYPE_CHROMA])/2 : 128;
+              Int pred = ( range - ( ( range*wp[j].iWeight)>>(wp[j].uiLog2WeightDenom) ) );
+              Int iDeltaChroma = (wp[j].iOffset - pred);
+              WRITE_SVLC( iDeltaChroma, "delta_chroma_offset_lX" );            // se(v): delta_chroma_offset_lX
+            }
+          }
+        }
+      }
+    }
+    assert(uiTotalSignalledWeightFlags<=24);
+  }
+}
+
+/** code quantization matrix
+ *  \param scalingList quantization matrix information
+ */
+Void TEncCavlc::codeScalingList( TComScalingList* scalingList )
+{
+  UInt listId,sizeId;
+  Bool scalingListPredModeFlag;
+
+  //for each size
+  for(sizeId = 0; sizeId < SCALING_LIST_SIZE_NUM; sizeId++)
+  {
+    Int predListStep = (sizeId == SCALING_LIST_32x32? (SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) : 1); // if 32x32, skip over chroma entries.
+
+    for(listId = 0; listId < SCALING_LIST_NUM; listId+=predListStep)
+    {
+      scalingListPredModeFlag = scalingList->checkPredMode( sizeId, listId );
+      WRITE_FLAG( scalingListPredModeFlag, "scaling_list_pred_mode_flag" );
+      if(!scalingListPredModeFlag)// Copy Mode
+      {
+        if (sizeId == SCALING_LIST_32x32)
+        {
+          // adjust the code, to cope with the missing chroma entries
+          WRITE_UVLC( ((Int)listId - (Int)scalingList->getRefMatrixId (sizeId,listId)) / (SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES), "scaling_list_pred_matrix_id_delta");
+        }
+        else
+        {
+          WRITE_UVLC( (Int)listId - (Int)scalingList->getRefMatrixId (sizeId,listId), "scaling_list_pred_matrix_id_delta");
+        }
+      }
+      else// DPCM Mode
+      {
+        xCodeScalingList(scalingList, sizeId, listId);
+      }
+    }
+  }
+  return;
+}
+/** code DPCM
+ * \param scalingList quantization matrix information
+ * \param sizeIdc size index
+ * \param listIdc list index
+ */
+Void TEncCavlc::xCodeScalingList(TComScalingList* scalingList, UInt sizeId, UInt listId)
+{
+  Int coefNum = min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId]);
+  UInt* scan  = g_scanOrder[SCAN_UNGROUPED][SCAN_DIAG][sizeId==0 ? 2 : 3][sizeId==0 ? 2 : 3];
+  Int nextCoef = SCALING_LIST_START_VALUE;
+  Int data;
+  Int *src = scalingList->getScalingListAddress(sizeId, listId);
+    if( sizeId > SCALING_LIST_8x8 )
+    {
+      WRITE_SVLC( scalingList->getScalingListDC(sizeId,listId) - 8, "scaling_list_dc_coef_minus8");
+      nextCoef = scalingList->getScalingListDC(sizeId,listId);
+    }
+    for(Int i=0;i<coefNum;i++)
+    {
+      data = src[scan[i]] - nextCoef;
+      nextCoef = src[scan[i]];
+      if(data > 127)
+      {
+        data = data - 256;
+      }
+      if(data < -128)
+      {
+        data = data + 256;
+      }
+
+      WRITE_SVLC( data,  "scaling_list_delta_coef");
+    }
+}
+Bool TEncCavlc::findMatchingLTRP ( TComSlice* pcSlice, UInt *ltrpsIndex, Int ltrpPOC, Bool usedFlag )
+{
+  // Bool state = true, state2 = false;
+  Int lsb = ltrpPOC & ((1<<pcSlice->getSPS()->getBitsForPOC())-1);
+  for (Int k = 0; k < pcSlice->getSPS()->getNumLongTermRefPicSPS(); k++)
+  {
+    if ( (lsb == pcSlice->getSPS()->getLtRefPicPocLsbSps(k)) && (usedFlag == pcSlice->getSPS()->getUsedByCurrPicLtSPSFlag(k)) )
+    {
+      *ltrpsIndex = k;
+      return true;
+    }
+  }
+  return false;
+}
+Bool TComScalingList::checkPredMode(UInt sizeId, UInt listId)
+{
+  Int predListStep = (sizeId == SCALING_LIST_32x32? (SCALING_LIST_NUM/NUMBER_OF_PREDICTION_MODES) : 1); // if 32x32, skip over chroma entries.
+
+  for(Int predListIdx = (Int)listId ; predListIdx >= 0; predListIdx-=predListStep)
+  {
+    if( !memcmp(getScalingListAddress(sizeId,listId),((listId == predListIdx) ?
+      getScalingListDefaultAddress(sizeId, predListIdx): getScalingListAddress(sizeId, predListIdx)),sizeof(Int)*min(MAX_MATRIX_COEF_NUM,(Int)g_scalingListSize[sizeId])) // check value of matrix
+     && ((sizeId < SCALING_LIST_16x16) || (getScalingListDC(sizeId,listId) == getScalingListDC(sizeId,predListIdx)))) // check DC value
+    {
+      setRefMatrixId(sizeId, listId, predListIdx);
+      return false;
+    }
+  }
+  return true;
+}
+
+Void TEncCavlc::codeExplicitRdpcmMode( TComTU &rTu, const ComponentID compID )
+ {
+   assert(0);
+ }
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncCavlc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,144 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncCavlc.h
+    \brief    CAVLC encoder class (header)
+*/
+
+#ifndef __TENCCAVLC__
+#define __TENCCAVLC__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/TComRom.h"
+#include "TEncEntropy.h"
+#include "SyntaxElementWriter.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncTop;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// CAVLC encoder class
+class TEncCavlc : public SyntaxElementWriter, public TEncEntropyIf
+{
+public:
+  TEncCavlc();
+  virtual ~TEncCavlc();
+
+protected:
+  TComSlice*    m_pcSlice;
+
+  Void codeShortTermRefPicSet              ( TComSPS* pcSPS, TComReferencePictureSet* pcRPS, Bool calledFromSliceHeader, Int idx );
+  Bool findMatchingLTRP ( TComSlice* pcSlice, UInt *ltrpsIndex, Int ltrpPOC, Bool usedFlag );
+
+public:
+
+  Void  resetEntropy          ();
+  Void  determineCabacInitIdx  () {};
+
+  Void  setBitstream          ( TComBitIf* p )  { m_pcBitIf = p;  }
+  Void  setSlice              ( TComSlice* p )  { m_pcSlice = p;  }
+  Void  resetBits             ()                { m_pcBitIf->resetBits(); }
+  UInt  getNumberOfWrittenBits()                { return  m_pcBitIf->getNumberOfWrittenBits();  }
+  Void  codeVPS                 ( TComVPS* pcVPS );
+  Void  codeVUI                 ( TComVUI *pcVUI, TComSPS* pcSPS );
+  Void  codeSPS                 ( TComSPS* pcSPS );
+  Void  codePPS                 ( TComPPS* pcPPS );
+  Void  codeSliceHeader         ( TComSlice* pcSlice );
+  Void  codePTL                 ( TComPTL* pcPTL, Bool profilePresentFlag, Int maxNumSubLayersMinus1);
+  Void  codeProfileTier         ( ProfileTierLevel* ptl );
+  Void  codeHrdParameters       ( TComHRD *hrd, Bool commonInfPresentFlag, UInt maxNumSubLayersMinus1 );
+  Void  codeTilesWPPEntryPoint( TComSlice* pSlice );
+  Void  codeTerminatingBit      ( UInt uilsLast );
+  Void  codeSliceFinish         ();
+
+  Void codeMVPIdx ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void codeSAOBlkParam(SAOBlkParam& saoBlkParam, Bool* sliceEnabled, Bool leftMergeAvail, Bool aboveMergeAvail, Bool onlyEstMergeInfo = false){printf("only supported in CABAC"); assert(0); exit(-1);}
+  Void codeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeSkipFlag      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeMergeFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeMergeIndex    ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+
+  Void codeAlfCtrlFlag   ( ComponentID component, UInt code ) {printf("Not supported\n"); assert(0);}
+  Void codeInterModeFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, UInt uiEncMode );
+  Void codeSplitFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
+
+  Void codePartSize      ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
+  Void codePredMode      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+
+  Void codeIPCMInfo      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+
+  Void codeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx );
+  Void codeQtCbf         ( TComTU &rTu, const ComponentID compID, const Bool lowestLevel );
+  Void codeQtRootCbf     ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeQtCbfZero     ( TComTU &rTu, const ChannelType chType );
+  Void codeQtRootCbfZero ( TComDataCU* pcCU );
+  Void codeIntraDirLumaAng( TComDataCU* pcCU, UInt absPartIdx, Bool isMultiple);
+  Void codeIntraDirChroma( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeInterDir      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeRefFrmIdx     ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void codeMvd           ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+
+  Void codeCrossComponentPrediction( TComTU &rTu, ComponentID compID );
+
+  Void codeDeltaQP       ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeChromaQpAdjustment( TComDataCU* pcCU, UInt uiAbsPartIdx );
+
+  Void codeCoeffNxN      ( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID );
+  Void codeTransformSkipFlags ( TComTU &rTu, ComponentID component );
+
+  Void estBit            ( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType );
+
+  Void xCodePredWeightTable          ( TComSlice* pcSlice );
+
+  Void codeScalingList  ( TComScalingList* scalingList );
+  Void xCodeScalingList ( TComScalingList* scalingList, UInt sizeId, UInt listId);
+  Void codeDFFlag       ( UInt uiCode, const Char *pSymbolName );
+  Void codeDFSvlc       ( Int   iCode, const Char *pSymbolName );
+
+  Void codeExplicitRdpcmMode( TComTU &rTu, const ComponentID compID );
+};
+
+//! \}
+
+#endif // !defined(AFX_TENCCAVLC_H__EE8A0B30_945B_4169_B290_24D3AD52296F__INCLUDED_)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncCfg.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,879 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncCfg.h
+    \brief    encoder configuration class (header)
+*/
+
+#ifndef __TENCCFG__
+#define __TENCCFG__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComSlice.h"
+#include <assert.h>
+
+struct GOPEntry
+{
+  Int m_POC;
+  Int m_QPOffset;
+  Double m_QPFactor;
+  Int m_tcOffsetDiv2;
+  Int m_betaOffsetDiv2;
+  Int m_temporalId;
+  Bool m_refPic;
+  Int m_numRefPicsActive;
+  Char m_sliceType;
+  Int m_numRefPics;
+  Int m_referencePics[MAX_NUM_REF_PICS];
+  Int m_usedByCurrPic[MAX_NUM_REF_PICS];
+#if AUTO_INTER_RPS
+  Int m_interRPSPrediction;
+#else
+  Bool m_interRPSPrediction;
+#endif
+  Int m_deltaRPS;
+  Int m_numRefIdc;
+  Int m_refIdc[MAX_NUM_REF_PICS+1];
+  Bool m_isEncoded;
+  GOPEntry()
+  : m_POC(-1)
+  , m_QPOffset(0)
+  , m_QPFactor(0)
+  , m_tcOffsetDiv2(0)
+  , m_betaOffsetDiv2(0)
+  , m_temporalId(0)
+  , m_refPic(false)
+  , m_numRefPicsActive(0)
+  , m_sliceType('P')
+  , m_numRefPics(0)
+  , m_interRPSPrediction(false)
+  , m_deltaRPS(0)
+  , m_numRefIdc(0)
+  , m_isEncoded(false)
+  {
+    ::memset( m_referencePics, 0, sizeof(m_referencePics) );
+    ::memset( m_usedByCurrPic, 0, sizeof(m_usedByCurrPic) );
+    ::memset( m_refIdc,        0, sizeof(m_refIdc) );
+  }
+};
+
+std::istringstream &operator>>(std::istringstream &in, GOPEntry &entry);     //input
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// encoder configuration class
+class TEncCfg
+{
+protected:
+  //==== File I/O ========
+  Int       m_iFrameRate;
+  Int       m_FrameSkip;
+  Int       m_iSourceWidth;
+  Int       m_iSourceHeight;
+  Window    m_conformanceWindow;
+  Int       m_framesToBeEncoded;
+  Double    m_adLambdaModifier[ MAX_TLAYER ];
+
+  Bool      m_printMSEBasedSequencePSNR;
+  Bool      m_printFrameMSE;
+  Bool      m_printSequenceMSE;
+  Bool      m_cabacZeroWordPaddingEnabled;
+
+  /* profile & level */
+  Profile::Name m_profile;
+  Level::Tier   m_levelTier;
+  Level::Name   m_level;
+  Bool m_progressiveSourceFlag;
+  Bool m_interlacedSourceFlag;
+  Bool m_nonPackedConstraintFlag;
+  Bool m_frameOnlyConstraintFlag;
+  UInt              m_bitDepthConstraintValue;
+  ChromaFormat      m_chromaFormatConstraintValue;
+  Bool              m_intraConstraintFlag;
+  Bool              m_lowerBitRateConstraintFlag;
+
+  //====== Coding Structure ========
+  UInt      m_uiIntraPeriod;
+  UInt      m_uiDecodingRefreshType;            ///< the type of decoding refresh employed for the random access.
+  Int       m_iGOPSize;
+  GOPEntry  m_GOPList[MAX_GOP];
+  Int       m_extraRPSs;
+  Int       m_maxDecPicBuffering[MAX_TLAYER];
+  Int       m_numReorderPics[MAX_TLAYER];
+
+  Int       m_iQP;                              //  if (AdaptiveQP == OFF)
+
+  Int       m_aiPad[2];
+
+
+  Int       m_iMaxRefPicNum;                     ///< this is used to mimic the sliding mechanism used by the decoder
+                                                 // TODO: We need to have a common sliding mechanism used by both the encoder and decoder
+
+  Int       m_maxTempLayer;                      ///< Max temporal layer
+  Bool m_useAMP;
+  //======= Transform =============
+  UInt      m_uiQuadtreeTULog2MaxSize;
+  UInt      m_uiQuadtreeTULog2MinSize;
+  UInt      m_uiQuadtreeTUMaxDepthInter;
+  UInt      m_uiQuadtreeTUMaxDepthIntra;
+
+  //====== Loop/Deblock Filter ========
+  Bool      m_bLoopFilterDisable;
+  Bool      m_loopFilterOffsetInPPS;
+  Int       m_loopFilterBetaOffsetDiv2;
+  Int       m_loopFilterTcOffsetDiv2;
+  Bool      m_DeblockingFilterControlPresent;
+  Bool      m_DeblockingFilterMetric;
+  Bool      m_bUseSAO;
+  Int       m_maxNumOffsetsPerPic;
+  Bool      m_saoCtuBoundary;
+
+  //====== Motion search ========
+  Int       m_iFastSearch;                      //  0:Full search  1:Diamond  2:PMVFAST
+  Int       m_iSearchRange;                     //  0:Full frame
+  Int       m_bipredSearchRange;
+
+  //====== Quality control ========
+  Int       m_iMaxDeltaQP;                      //  Max. absolute delta QP (1:default)
+  Int       m_iMaxCuDQPDepth;                   //  Max. depth for a minimum CuDQP (0:default)
+  Int       m_maxCUChromaQpAdjustmentDepth;
+
+  Int       m_chromaCbQpOffset;                 //  Chroma Cb QP Offset (0:default)
+  Int       m_chromaCrQpOffset;                 //  Chroma Cr Qp Offset (0:default)
+  ChromaFormat m_chromaFormatIDC;
+
+#if ADAPTIVE_QP_SELECTION
+  Bool      m_bUseAdaptQpSelect;
+#endif
+  Bool      m_useExtendedPrecision;
+  Bool      m_useHighPrecisionPredictionWeighting;
+  Bool      m_bUseAdaptiveQP;
+  Int       m_iQPAdaptationRange;
+
+  //====== Tool list ========
+  Bool      m_bUseASR;
+  Bool      m_bUseHADME;
+  Bool      m_useRDOQ;
+  Bool      m_useRDOQTS;
+  UInt      m_rdPenalty;
+  Bool      m_bUseFastEnc;
+  Bool      m_bUseEarlyCU;
+  Bool      m_useFastDecisionForMerge;
+  Bool      m_bUseCbfFastMode;
+  Bool      m_useEarlySkipDetection;
+  Bool      m_useCrossComponentPrediction;
+  Bool      m_reconBasedCrossCPredictionEstimate;
+  UInt      m_saoOffsetBitShift[MAX_NUM_CHANNEL_TYPE];
+  Bool      m_useTransformSkip;
+  Bool      m_useTransformSkipFast;
+  UInt      m_transformSkipLog2MaxSize;
+  Bool      m_useResidualRotation;
+  Bool      m_useSingleSignificanceMapContext;
+  Bool      m_useGolombRiceParameterAdaptation;
+  Bool      m_alignCABACBeforeBypass;
+  Bool      m_useResidualDPCM[NUMBER_OF_RDPCM_SIGNALLING_MODES];
+  Int*      m_aidQP;
+  UInt      m_uiDeltaQpRD;
+
+  Bool      m_bUseConstrainedIntraPred;
+  Bool      m_usePCM;
+  UInt      m_pcmLog2MaxSize;
+  UInt      m_uiPCMLog2MinSize;
+  //====== Slice ========
+  SliceConstraint m_sliceMode;
+  Int       m_sliceArgument;
+  //====== Dependent Slice ========
+  SliceConstraint m_sliceSegmentMode;
+  Int       m_sliceSegmentArgument;
+  Bool      m_bLFCrossSliceBoundaryFlag;
+
+  Bool      m_bPCMInputBitDepthFlag;
+  UInt      m_uiPCMBitDepthLuma;
+  UInt      m_uiPCMBitDepthChroma;
+  Bool      m_bPCMFilterDisableFlag;
+  Bool      m_disableIntraReferenceSmoothing;
+  Bool      m_loopFilterAcrossTilesEnabledFlag;
+  Bool      m_tileUniformSpacingFlag;
+  Int       m_iNumColumnsMinus1;
+  Int       m_iNumRowsMinus1;
+  std::vector<Int> m_tileColumnWidth;
+  std::vector<Int> m_tileRowHeight;
+
+  Int       m_iWaveFrontSynchro;
+  Int       m_iWaveFrontSubstreams;
+
+  Int       m_decodedPictureHashSEIEnabled;              ///< Checksum(3)/CRC(2)/MD5(1)/disable(0) acting on decoded picture hash SEI message
+  Int       m_bufferingPeriodSEIEnabled;
+  Int       m_pictureTimingSEIEnabled;
+  Int       m_recoveryPointSEIEnabled;
+  Bool      m_toneMappingInfoSEIEnabled;
+  Int       m_toneMapId;
+  Bool      m_toneMapCancelFlag;
+  Bool      m_toneMapPersistenceFlag;
+  Int       m_codedDataBitDepth;
+  Int       m_targetBitDepth;
+  Int       m_modelId;
+  Int       m_minValue;
+  Int       m_maxValue;
+  Int       m_sigmoidMidpoint;
+  Int       m_sigmoidWidth;
+  Int       m_numPivots;
+  Int       m_cameraIsoSpeedIdc;
+  Int       m_cameraIsoSpeedValue;
+  Int       m_exposureIndexIdc;
+  Int       m_exposureIndexValue;
+  Int       m_exposureCompensationValueSignFlag;
+  Int       m_exposureCompensationValueNumerator;
+  Int       m_exposureCompensationValueDenomIdc;
+  Int       m_refScreenLuminanceWhite;
+  Int       m_extendedRangeWhiteLevel;
+  Int       m_nominalBlackLevelLumaCodeValue;
+  Int       m_nominalWhiteLevelLumaCodeValue;
+  Int       m_extendedWhiteLevelLumaCodeValue;
+  Int*      m_startOfCodedInterval;
+  Int*      m_codedPivotValue;
+  Int*      m_targetPivotValue;
+  Int       m_framePackingSEIEnabled;
+  Int       m_framePackingSEIType;
+  Int       m_framePackingSEIId;
+  Int       m_framePackingSEIQuincunx;
+  Int       m_framePackingSEIInterpretation;
+  Int       m_segmentedRectFramePackingSEIEnabled;
+  Bool      m_segmentedRectFramePackingSEICancel;
+  Int       m_segmentedRectFramePackingSEIType;
+  Bool      m_segmentedRectFramePackingSEIPersistence;
+  Int       m_displayOrientationSEIAngle;
+  Int       m_temporalLevel0IndexSEIEnabled;
+  Int       m_gradualDecodingRefreshInfoEnabled;
+  Int       m_noDisplaySEITLayer;
+  Int       m_decodingUnitInfoSEIEnabled;
+  Int       m_SOPDescriptionSEIEnabled;
+  Int       m_scalableNestingSEIEnabled;
+  Bool      m_tmctsSEIEnabled;
+  Bool      m_timeCodeSEIEnabled;
+  Int       m_timeCodeSEINumTs;
+  TComSEITimeSet   m_timeSetArray[MAX_TIMECODE_SEI_SETS];
+  Bool      m_kneeSEIEnabled;
+  Int       m_kneeSEIId;
+  Bool      m_kneeSEICancelFlag;
+  Bool      m_kneeSEIPersistenceFlag;
+  Int       m_kneeSEIInputDrange;
+  Int       m_kneeSEIInputDispLuminance;
+  Int       m_kneeSEIOutputDrange;
+  Int       m_kneeSEIOutputDispLuminance;
+  Int       m_kneeSEINumKneePointsMinus1;
+  Int*      m_kneeSEIInputKneePoint;
+  Int*      m_kneeSEIOutputKneePoint;
+  TComSEIMasteringDisplay m_masteringDisplay;
+  //====== Weighted Prediction ========
+  Bool      m_useWeightedPred;       //< Use of Weighting Prediction (P_SLICE)
+  Bool      m_useWeightedBiPred;    //< Use of Bi-directional Weighting Prediction (B_SLICE)
+  UInt      m_log2ParallelMergeLevelMinus2;       ///< Parallel merge estimation region
+  UInt      m_maxNumMergeCand;                    ///< Maximum number of merge candidates
+  ScalingListMode m_useScalingListId;            ///< Using quantization matrix i.e. 0=off, 1=default, 2=file.
+  Char*     m_scalingListFile;          ///< quantization matrix file name
+  Int       m_TMVPModeId;
+  Int       m_signHideFlag;
+  Bool      m_RCEnableRateControl;
+  Int       m_RCTargetBitrate;
+  Int       m_RCKeepHierarchicalBit;
+  Bool      m_RCLCULevelRC;
+  Bool      m_RCUseLCUSeparateModel;
+  Int       m_RCInitialQP;
+  Bool      m_RCForceIntraQP;
+  Bool      m_TransquantBypassEnableFlag;                     ///< transquant_bypass_enable_flag setting in PPS.
+  Bool      m_CUTransquantBypassFlagForce;                    ///< if transquant_bypass_enable_flag, then, if true, all CU transquant bypass flags will be set to true.
+
+  CostMode  m_costMode;                                       ///< The cost function to use, primarily when considering lossless coding.
+
+  TComVPS   m_cVPS;
+  Bool      m_recalculateQPAccordingToLambda;                 ///< recalculate QP value according to the lambda value
+  Int       m_activeParameterSetsSEIEnabled;                  ///< enable active parameter set SEI message
+  Bool      m_vuiParametersPresentFlag;                       ///< enable generation of VUI parameters
+  Bool      m_aspectRatioInfoPresentFlag;                     ///< Signals whether aspect_ratio_idc is present
+  Bool      m_chromaSamplingFilterHintEnabled;                ///< Signals whether chroma sampling filter hint data is present
+  Int       m_chromaSamplingHorFilterIdc;                     ///< Specifies the Index of filter to use
+  Int       m_chromaSamplingVerFilterIdc;                     ///< Specifies the Index of filter to use
+  Int       m_aspectRatioIdc;                                 ///< aspect_ratio_idc
+  Int       m_sarWidth;                                       ///< horizontal size of the sample aspect ratio
+  Int       m_sarHeight;                                      ///< vertical size of the sample aspect ratio
+  Bool      m_overscanInfoPresentFlag;                        ///< Signals whether overscan_appropriate_flag is present
+  Bool      m_overscanAppropriateFlag;                        ///< Indicates whether conformant decoded pictures are suitable for display using overscan
+  Bool      m_videoSignalTypePresentFlag;                     ///< Signals whether video_format, video_full_range_flag, and colour_description_present_flag are present
+  Int       m_videoFormat;                                    ///< Indicates representation of pictures
+  Bool      m_videoFullRangeFlag;                             ///< Indicates the black level and range of luma and chroma signals
+  Bool      m_colourDescriptionPresentFlag;                   ///< Signals whether colour_primaries, transfer_characteristics and matrix_coefficients are present
+  Int       m_colourPrimaries;                                ///< Indicates chromaticity coordinates of the source primaries
+  Int       m_transferCharacteristics;                        ///< Indicates the opto-electronic transfer characteristics of the source
+  Int       m_matrixCoefficients;                             ///< Describes the matrix coefficients used in deriving luma and chroma from RGB primaries
+  Bool      m_chromaLocInfoPresentFlag;                       ///< Signals whether chroma_sample_loc_type_top_field and chroma_sample_loc_type_bottom_field are present
+  Int       m_chromaSampleLocTypeTopField;                    ///< Specifies the location of chroma samples for top field
+  Int       m_chromaSampleLocTypeBottomField;                 ///< Specifies the location of chroma samples for bottom field
+  Bool      m_neutralChromaIndicationFlag;                    ///< Indicates that the value of all decoded chroma samples is equal to 1<<(BitDepthCr-1)
+  Window    m_defaultDisplayWindow;                           ///< Represents the default display window parameters
+  Bool      m_frameFieldInfoPresentFlag;                      ///< Indicates that pic_struct and other field coding related values are present in picture timing SEI messages
+  Bool      m_pocProportionalToTimingFlag;                    ///< Indicates that the POC value is proportional to the output time w.r.t. first picture in CVS
+  Int       m_numTicksPocDiffOneMinus1;                       ///< Number of ticks minus 1 that for a POC difference of one
+  Bool      m_bitstreamRestrictionFlag;                       ///< Signals whether bitstream restriction parameters are present
+  Bool      m_tilesFixedStructureFlag;                        ///< Indicates that each active picture parameter set has the same values of the syntax elements related to tiles
+  Bool      m_motionVectorsOverPicBoundariesFlag;             ///< Indicates that no samples outside the picture boundaries are used for inter prediction
+  Int       m_minSpatialSegmentationIdc;                      ///< Indicates the maximum size of the spatial segments in the pictures in the coded video sequence
+  Int       m_maxBytesPerPicDenom;                            ///< Indicates a number of bytes not exceeded by the sum of the sizes of the VCL NAL units associated with any coded picture
+  Int       m_maxBitsPerMinCuDenom;                           ///< Indicates an upper bound for the number of bits of coding_unit() data
+  Int       m_log2MaxMvLengthHorizontal;                      ///< Indicate the maximum absolute value of a decoded horizontal MV component in quarter-pel luma units
+  Int       m_log2MaxMvLengthVertical;                        ///< Indicate the maximum absolute value of a decoded vertical MV component in quarter-pel luma units
+
+  Bool      m_useStrongIntraSmoothing;                        ///< enable the use of strong intra smoothing (bi_linear interpolation) for 32x32 blocks when reference samples are flat.
+
+public:
+  TEncCfg()
+  : m_tileColumnWidth()
+  , m_tileRowHeight()
+  {}
+
+  virtual ~TEncCfg()
+  {}
+
+  Void setProfile(Profile::Name profile) { m_profile = profile; }
+  Void setLevel(Level::Tier tier, Level::Name level) { m_levelTier = tier; m_level = level; }
+
+  Void      setFrameRate                    ( Int   i )      { m_iFrameRate = i; }
+  Void      setFrameSkip                    ( UInt i ) { m_FrameSkip = i; }
+  Void      setSourceWidth                  ( Int   i )      { m_iSourceWidth = i; }
+  Void      setSourceHeight                 ( Int   i )      { m_iSourceHeight = i; }
+
+  Window   &getConformanceWindow()                           { return m_conformanceWindow; }
+  Void      setConformanceWindow (Int confLeft, Int confRight, Int confTop, Int confBottom ) { m_conformanceWindow.setWindow (confLeft, confRight, confTop, confBottom); }
+
+  Void      setFramesToBeEncoded            ( Int   i )      { m_framesToBeEncoded = i; }
+
+  Bool      getPrintMSEBasedSequencePSNR    ()         const { return m_printMSEBasedSequencePSNR;  }
+  Void      setPrintMSEBasedSequencePSNR    (Bool value)     { m_printMSEBasedSequencePSNR = value; }
+
+  Bool      getPrintFrameMSE                ()         const { return m_printFrameMSE;              }
+  Void      setPrintFrameMSE                (Bool value)     { m_printFrameMSE = value;             }
+
+  Bool      getPrintSequenceMSE             ()         const { return m_printSequenceMSE;           }
+  Void      setPrintSequenceMSE             (Bool value)     { m_printSequenceMSE = value;          }
+
+  Bool      getCabacZeroWordPaddingEnabled()           const { return m_cabacZeroWordPaddingEnabled;  }
+  Void      setCabacZeroWordPaddingEnabled(Bool value)       { m_cabacZeroWordPaddingEnabled = value; }
+
+  //====== Coding Structure ========
+  Void      setIntraPeriod                  ( Int   i )      { m_uiIntraPeriod = (UInt)i; }
+  Void      setDecodingRefreshType          ( Int   i )      { m_uiDecodingRefreshType = (UInt)i; }
+  Void      setGOPSize                      ( Int   i )      { m_iGOPSize = i; }
+  Void      setGopList                      ( GOPEntry*  GOPList ) {  for ( Int i = 0; i < MAX_GOP; i++ ) m_GOPList[i] = GOPList[i]; }
+  Void      setExtraRPSs                    ( Int   i )      { m_extraRPSs = i; }
+  GOPEntry  getGOPEntry                     ( Int   i )      { return m_GOPList[i]; }
+  Void      setEncodedFlag                  ( Int  i, Bool value )  { m_GOPList[i].m_isEncoded = value; }
+  Void      setMaxDecPicBuffering           ( UInt u, UInt tlayer ) { m_maxDecPicBuffering[tlayer] = u;    }
+  Void      setNumReorderPics               ( Int  i, UInt tlayer ) { m_numReorderPics[tlayer] = i;    }
+
+  Void      setQP                           ( Int   i )      { m_iQP = i; }
+
+  Void      setPad                          ( Int*  iPad                   )      { for ( Int i = 0; i < 2; i++ ) m_aiPad[i] = iPad[i]; }
+
+  Int       getMaxRefPicNum                 ()                              { return m_iMaxRefPicNum;           }
+  Void      setMaxRefPicNum                 ( Int iMaxRefPicNum )           { m_iMaxRefPicNum = iMaxRefPicNum;  }
+
+  Int       getMaxTempLayer                 ()                              { return m_maxTempLayer;              } 
+  Void      setMaxTempLayer                 ( Int maxTempLayer )            { m_maxTempLayer = maxTempLayer;      }
+  //======== Transform =============
+  Void      setQuadtreeTULog2MaxSize        ( UInt  u )      { m_uiQuadtreeTULog2MaxSize = u; }
+  Void      setQuadtreeTULog2MinSize        ( UInt  u )      { m_uiQuadtreeTULog2MinSize = u; }
+  Void      setQuadtreeTUMaxDepthInter      ( UInt  u )      { m_uiQuadtreeTUMaxDepthInter = u; }
+  Void      setQuadtreeTUMaxDepthIntra      ( UInt  u )      { m_uiQuadtreeTUMaxDepthIntra = u; }
+
+  Void setUseAMP( Bool b ) { m_useAMP = b; }
+
+  //====== Loop/Deblock Filter ========
+  Void      setLoopFilterDisable            ( Bool  b )      { m_bLoopFilterDisable       = b; }
+  Void      setLoopFilterOffsetInPPS        ( Bool  b )      { m_loopFilterOffsetInPPS      = b; }
+  Void      setLoopFilterBetaOffset         ( Int   i )      { m_loopFilterBetaOffsetDiv2  = i; }
+  Void      setLoopFilterTcOffset           ( Int   i )      { m_loopFilterTcOffsetDiv2    = i; }
+  Void      setDeblockingFilterControlPresent ( Bool b ) { m_DeblockingFilterControlPresent = b; }
+  Void      setDeblockingFilterMetric       ( Bool  b )      { m_DeblockingFilterMetric = b; }
+
+  //====== Motion search ========
+  Void      setFastSearch                   ( Int   i )      { m_iFastSearch = i; }
+  Void      setSearchRange                  ( Int   i )      { m_iSearchRange = i; }
+  Void      setBipredSearchRange            ( Int   i )      { m_bipredSearchRange = i; }
+
+  //====== Quality control ========
+  Void      setMaxDeltaQP                   ( Int   i )      { m_iMaxDeltaQP = i; }
+  Void      setMaxCuDQPDepth                ( Int   i )      { m_iMaxCuDQPDepth = i; }
+
+  Int       getMaxCUChromaQpAdjustmentDepth ()         const { return m_maxCUChromaQpAdjustmentDepth;  }
+  Void      setMaxCUChromaQpAdjustmentDepth (Int value)      { m_maxCUChromaQpAdjustmentDepth = value; }
+
+  Void      setChromaCbQpOffset             ( Int   i )      { m_chromaCbQpOffset = i; }
+  Void      setChromaCrQpOffset             ( Int   i )      { m_chromaCrQpOffset = i; }
+
+  Void      setChromaFormatIdc              ( ChromaFormat cf ) { m_chromaFormatIDC = cf; }
+  ChromaFormat  getChromaFormatIdc          ( )              { return m_chromaFormatIDC; }
+
+#if ADAPTIVE_QP_SELECTION
+  Void      setUseAdaptQpSelect             ( Bool   i ) { m_bUseAdaptQpSelect    = i; }
+  Bool      getUseAdaptQpSelect             ()           { return   m_bUseAdaptQpSelect; }
+#endif
+
+  Bool      getUseExtendedPrecision         ()         const { return m_useExtendedPrecision;  }
+  Void      setUseExtendedPrecision         (Bool value)     { m_useExtendedPrecision = value; }
+
+  Bool      getUseHighPrecisionPredictionWeighting() const { return m_useHighPrecisionPredictionWeighting; }
+  Void      setUseHighPrecisionPredictionWeighting(Bool value) { m_useHighPrecisionPredictionWeighting = value; }
+
+  Void      setUseAdaptiveQP                ( Bool  b )      { m_bUseAdaptiveQP = b; }
+  Void      setQPAdaptationRange            ( Int   i )      { m_iQPAdaptationRange = i; }
+
+  //====== Sequence ========
+  Int       getFrameRate                    ()      { return  m_iFrameRate; }
+  UInt      getFrameSkip                    ()      { return  m_FrameSkip; }
+  Int       getSourceWidth                  ()      { return  m_iSourceWidth; }
+  Int       getSourceHeight                 ()      { return  m_iSourceHeight; }
+  Int       getFramesToBeEncoded            ()      { return  m_framesToBeEncoded; }
+  Void setLambdaModifier                    ( UInt uiIndex, Double dValue ) { m_adLambdaModifier[ uiIndex ] = dValue; }
+  Double getLambdaModifier                  ( UInt uiIndex ) const { return m_adLambdaModifier[ uiIndex ]; }
+
+  //==== Coding Structure ========
+  UInt      getIntraPeriod                  ()      { return  m_uiIntraPeriod; }
+  UInt      getDecodingRefreshType          ()      { return  m_uiDecodingRefreshType; }
+  Int       getGOPSize                      ()      { return  m_iGOPSize; }
+  Int       getMaxDecPicBuffering           (UInt tlayer) { return m_maxDecPicBuffering[tlayer]; }
+  Int       getNumReorderPics               (UInt tlayer) { return m_numReorderPics[tlayer]; }
+  Int       getQP                           ()      { return  m_iQP; }
+
+  Int       getPad                          ( Int i )      { assert (i < 2 );                      return  m_aiPad[i]; }
+
+  //======== Transform =============
+  UInt      getQuadtreeTULog2MaxSize        ()      const { return m_uiQuadtreeTULog2MaxSize; }
+  UInt      getQuadtreeTULog2MinSize        ()      const { return m_uiQuadtreeTULog2MinSize; }
+  UInt      getQuadtreeTUMaxDepthInter      ()      const { return m_uiQuadtreeTUMaxDepthInter; }
+  UInt      getQuadtreeTUMaxDepthIntra      ()      const { return m_uiQuadtreeTUMaxDepthIntra; }
+
+  //==== Loop/Deblock Filter ========
+  Bool      getLoopFilterDisable            ()      { return  m_bLoopFilterDisable;       }
+  Bool      getLoopFilterOffsetInPPS        ()      { return m_loopFilterOffsetInPPS; }
+  Int       getLoopFilterBetaOffset         ()      { return m_loopFilterBetaOffsetDiv2; }
+  Int       getLoopFilterTcOffset           ()      { return m_loopFilterTcOffsetDiv2; }
+  Bool      getDeblockingFilterControlPresent()  { return  m_DeblockingFilterControlPresent; }
+  Bool      getDeblockingFilterMetric       ()      { return m_DeblockingFilterMetric; }
+
+  //==== Motion search ========
+  Int       getFastSearch                   ()      { return  m_iFastSearch; }
+  Int       getSearchRange                  ()      { return  m_iSearchRange; }
+
+  //==== Quality control ========
+  Int       getMaxDeltaQP                   ()      { return  m_iMaxDeltaQP; }
+  Int       getMaxCuDQPDepth                ()      { return  m_iMaxCuDQPDepth; }
+  Bool      getUseAdaptiveQP                ()      { return  m_bUseAdaptiveQP; }
+  Int       getQPAdaptationRange            ()      { return  m_iQPAdaptationRange; }
+
+  //==== Tool list ========
+  Void      setUseASR                       ( Bool  b )     { m_bUseASR     = b; }
+  Void      setUseHADME                     ( Bool  b )     { m_bUseHADME   = b; }
+  Void      setUseRDOQ                      ( Bool  b )     { m_useRDOQ    = b; }
+  Void      setUseRDOQTS                    ( Bool  b )     { m_useRDOQTS  = b; }
+  Void      setRDpenalty                 ( UInt  b )     { m_rdPenalty  = b; }
+  Void      setUseFastEnc                   ( Bool  b )     { m_bUseFastEnc = b; }
+  Void      setUseEarlyCU                   ( Bool  b )     { m_bUseEarlyCU = b; }
+  Void      setUseFastDecisionForMerge      ( Bool  b )     { m_useFastDecisionForMerge = b; }
+  Void      setUseCbfFastMode            ( Bool  b )     { m_bUseCbfFastMode = b; }
+  Void      setUseEarlySkipDetection        ( Bool  b )     { m_useEarlySkipDetection = b; }
+  Void      setUseConstrainedIntraPred      ( Bool  b )     { m_bUseConstrainedIntraPred = b; }
+  Void      setPCMInputBitDepthFlag         ( Bool  b )     { m_bPCMInputBitDepthFlag = b; }
+  Void      setPCMFilterDisableFlag         ( Bool  b )     {  m_bPCMFilterDisableFlag = b; }
+  Void      setUsePCM                       ( Bool  b )     {  m_usePCM = b;               }
+  Void      setPCMLog2MaxSize               ( UInt u )      { m_pcmLog2MaxSize = u;      }
+  Void      setPCMLog2MinSize               ( UInt u )     { m_uiPCMLog2MinSize = u;      }
+  Void      setdQPs                         ( Int*  p )     { m_aidQP       = p; }
+  Void      setDeltaQpRD                    ( UInt  u )     {m_uiDeltaQpRD  = u; }
+  Bool      getUseASR                       ()      { return m_bUseASR;     }
+  Bool      getUseHADME                     ()      { return m_bUseHADME;   }
+  Bool      getUseRDOQ                      ()      { return m_useRDOQ;    }
+  Bool      getUseRDOQTS                    ()      { return m_useRDOQTS;  }
+  Int       getRDpenalty                    ()      { return m_rdPenalty;  }
+  Bool      getUseFastEnc                   ()      { return m_bUseFastEnc; }
+  Bool      getUseEarlyCU                   ()      { return m_bUseEarlyCU; }
+  Bool      getUseFastDecisionForMerge      ()      { return m_useFastDecisionForMerge; }
+  Bool      getUseCbfFastMode               ()      { return m_bUseCbfFastMode; }
+  Bool      getUseEarlySkipDetection        ()      { return m_useEarlySkipDetection; }
+  Bool      getUseConstrainedIntraPred      ()      { return m_bUseConstrainedIntraPred; }
+  Bool      getPCMInputBitDepthFlag         ()      { return m_bPCMInputBitDepthFlag;   }
+  Bool      getPCMFilterDisableFlag         ()      { return m_bPCMFilterDisableFlag;   }
+  Bool      getUsePCM                       ()      { return m_usePCM;                 }
+  UInt      getPCMLog2MaxSize               ()      { return m_pcmLog2MaxSize;  }
+  UInt      getPCMLog2MinSize               ()      { return  m_uiPCMLog2MinSize;  }
+
+  Bool      getUseCrossComponentPrediction        ()                const { return m_useCrossComponentPrediction;   }
+  Void      setUseCrossComponentPrediction        (const Bool value)      { m_useCrossComponentPrediction = value;  }
+  Bool      getUseReconBasedCrossCPredictionEstimate ()                const { return m_reconBasedCrossCPredictionEstimate;  }
+  Void      setUseReconBasedCrossCPredictionEstimate (const Bool value)      { m_reconBasedCrossCPredictionEstimate = value; }
+  Void      setSaoOffsetBitShift(ChannelType type, UInt uiBitShift)          { m_saoOffsetBitShift[type] = uiBitShift; }
+
+  Bool getUseTransformSkip                             ()      { return m_useTransformSkip;        }
+  Void setUseTransformSkip                             ( Bool b ) { m_useTransformSkip  = b;       }
+  Bool getUseResidualRotation                          ()            const { return m_useResidualRotation;  }
+  Void setUseResidualRotation                          (const Bool value)  { m_useResidualRotation = value; }
+  Bool getUseSingleSignificanceMapContext              ()            const { return m_useSingleSignificanceMapContext;  }
+  Void setUseSingleSignificanceMapContext              (const Bool value)  { m_useSingleSignificanceMapContext = value; }
+  Bool getUseGolombRiceParameterAdaptation             ()                 const { return m_useGolombRiceParameterAdaptation;  }
+  Void setUseGolombRiceParameterAdaptation             (const Bool value)       { m_useGolombRiceParameterAdaptation = value; }
+  Bool getAlignCABACBeforeBypass                       ()       const      { return m_alignCABACBeforeBypass;  }
+  Void setAlignCABACBeforeBypass                       (const Bool value)  { m_alignCABACBeforeBypass = value; }
+  Bool getUseResidualDPCM                              (const RDPCMSignallingMode signallingMode)        const      { return m_useResidualDPCM[signallingMode];  }
+  Void setUseResidualDPCM                              (const RDPCMSignallingMode signallingMode, const Bool value) { m_useResidualDPCM[signallingMode] = value; }
+  Bool getUseTransformSkipFast                         ()      { return m_useTransformSkipFast;    }
+  Void setUseTransformSkipFast                         ( Bool b ) { m_useTransformSkipFast  = b;   }
+  UInt getTransformSkipLog2MaxSize                     () const      { return m_transformSkipLog2MaxSize;     }
+  Void setTransformSkipLog2MaxSize                     ( UInt u )    { m_transformSkipLog2MaxSize  = u;       }
+  Void setDisableIntraReferenceSmoothing               (Bool bValue) { m_disableIntraReferenceSmoothing=bValue; }
+  Bool getDisableIntraReferenceSmoothing               ()      const { return m_disableIntraReferenceSmoothing; }
+
+  Int*      getdQPs                         ()      { return m_aidQP;       }
+  UInt      getDeltaQpRD                    ()      { return m_uiDeltaQpRD; }
+
+  //====== Slice ========
+  Void  setSliceMode                   ( SliceConstraint  i )        { m_sliceMode = i;              }
+  Void  setSliceArgument               ( Int  i )                    { m_sliceArgument = i;          }
+  SliceConstraint getSliceMode         () const                      { return m_sliceMode;           }
+  Int   getSliceArgument               ()                            { return m_sliceArgument;       }
+  //====== Dependent Slice ========
+  Void  setSliceSegmentMode            ( SliceConstraint  i )        { m_sliceSegmentMode = i;       }
+  Void  setSliceSegmentArgument        ( Int  i )                    { m_sliceSegmentArgument = i;   }
+  SliceConstraint getSliceSegmentMode  () const                      { return m_sliceSegmentMode;    }
+  Int   getSliceSegmentArgument        ()                            { return m_sliceSegmentArgument;}
+  Void      setLFCrossSliceBoundaryFlag     ( Bool   bValue  )       { m_bLFCrossSliceBoundaryFlag = bValue; }
+  Bool      getLFCrossSliceBoundaryFlag     ()                       { return m_bLFCrossSliceBoundaryFlag;   }
+
+  Void      setUseSAO                  (Bool bVal)                   { m_bUseSAO = bVal; }
+  Bool      getUseSAO                  ()                            { return m_bUseSAO; }
+  Void  setMaxNumOffsetsPerPic                   (Int iVal)          { m_maxNumOffsetsPerPic = iVal; }
+  Int   getMaxNumOffsetsPerPic                   ()                  { return m_maxNumOffsetsPerPic; }
+  Void  setSaoCtuBoundary              (Bool val)                    { m_saoCtuBoundary = val; }
+  Bool  getSaoCtuBoundary              ()                            { return m_saoCtuBoundary; }
+  Void  setLFCrossTileBoundaryFlag               ( Bool   val  )     { m_loopFilterAcrossTilesEnabledFlag = val; }
+  Bool  getLFCrossTileBoundaryFlag               ()                  { return m_loopFilterAcrossTilesEnabledFlag;   }
+  Void  setTileUniformSpacingFlag      ( Bool b )                    { m_tileUniformSpacingFlag = b; }
+  Bool  getTileUniformSpacingFlag      ()                            { return m_tileUniformSpacingFlag; }
+  Void  setNumColumnsMinus1            ( Int i )                     { m_iNumColumnsMinus1 = i; }
+  Int   getNumColumnsMinus1            ()                            { return m_iNumColumnsMinus1; }
+  Void  setColumnWidth ( const std::vector<Int>& columnWidth )       { m_tileColumnWidth = columnWidth; }
+  UInt  getColumnWidth                 ( UInt columnIdx )            { return m_tileColumnWidth[columnIdx]; }
+  Void  setNumRowsMinus1               ( Int i )                     { m_iNumRowsMinus1 = i; }
+  Int   getNumRowsMinus1               ()                            { return m_iNumRowsMinus1; }
+  Void  setRowHeight ( const std::vector<Int>& rowHeight)            { m_tileRowHeight = rowHeight; }
+  UInt  getRowHeight                   ( UInt rowIdx )               { return m_tileRowHeight[rowIdx]; }
+  Void  xCheckGSParameters();
+  Void  setWaveFrontSynchro(Int iWaveFrontSynchro)                   { m_iWaveFrontSynchro = iWaveFrontSynchro; }
+  Int   getWaveFrontsynchro()                                        { return m_iWaveFrontSynchro; }
+  Void  setWaveFrontSubstreams(Int iWaveFrontSubstreams)             { m_iWaveFrontSubstreams = iWaveFrontSubstreams; }
+  Int   getWaveFrontSubstreams()                                     { return m_iWaveFrontSubstreams; }
+  Void  setDecodedPictureHashSEIEnabled(Int b)                       { m_decodedPictureHashSEIEnabled = b; }
+  Int   getDecodedPictureHashSEIEnabled()                            { return m_decodedPictureHashSEIEnabled; }
+  Void  setBufferingPeriodSEIEnabled(Int b)                          { m_bufferingPeriodSEIEnabled = b; }
+  Int   getBufferingPeriodSEIEnabled()                               { return m_bufferingPeriodSEIEnabled; }
+  Void  setPictureTimingSEIEnabled(Int b)                            { m_pictureTimingSEIEnabled = b; }
+  Int   getPictureTimingSEIEnabled()                                 { return m_pictureTimingSEIEnabled; }
+  Void  setRecoveryPointSEIEnabled(Int b)                            { m_recoveryPointSEIEnabled = b; }
+  Int   getRecoveryPointSEIEnabled()                                 { return m_recoveryPointSEIEnabled; }
+  Void  setToneMappingInfoSEIEnabled(Bool b)                         { m_toneMappingInfoSEIEnabled = b;  }
+  Bool  getToneMappingInfoSEIEnabled()                               { return m_toneMappingInfoSEIEnabled;  }
+  Void  setTMISEIToneMapId(Int b)                                    { m_toneMapId = b;  }
+  Int   getTMISEIToneMapId()                                         { return m_toneMapId;  }
+  Void  setTMISEIToneMapCancelFlag(Bool b)                           { m_toneMapCancelFlag=b;  }
+  Bool  getTMISEIToneMapCancelFlag()                                 { return m_toneMapCancelFlag;  }
+  Void  setTMISEIToneMapPersistenceFlag(Bool b)                      { m_toneMapPersistenceFlag = b;  }
+  Bool   getTMISEIToneMapPersistenceFlag()                           { return m_toneMapPersistenceFlag;  }
+  Void  setTMISEICodedDataBitDepth(Int b)                            { m_codedDataBitDepth = b;  }
+  Int   getTMISEICodedDataBitDepth()                                 { return m_codedDataBitDepth;  }
+  Void  setTMISEITargetBitDepth(Int b)                               { m_targetBitDepth = b;  }
+  Int   getTMISEITargetBitDepth()                                    { return m_targetBitDepth;  }
+  Void  setTMISEIModelID(Int b)                                      { m_modelId = b;  }
+  Int   getTMISEIModelID()                                           { return m_modelId;  }
+  Void  setTMISEIMinValue(Int b)                                     { m_minValue = b;  }
+  Int   getTMISEIMinValue()                                          { return m_minValue;  }
+  Void  setTMISEIMaxValue(Int b)                                     { m_maxValue = b;  }
+  Int   getTMISEIMaxValue()                                          { return m_maxValue;  }
+  Void  setTMISEISigmoidMidpoint(Int b)                              { m_sigmoidMidpoint = b;  }
+  Int   getTMISEISigmoidMidpoint()                                   { return m_sigmoidMidpoint;  }
+  Void  setTMISEISigmoidWidth(Int b)                                 { m_sigmoidWidth = b;  }
+  Int   getTMISEISigmoidWidth()                                      { return m_sigmoidWidth;  }
+  Void  setTMISEIStartOfCodedInterva( Int*  p )                      { m_startOfCodedInterval = p;  }
+  Int*  getTMISEIStartOfCodedInterva()                               { return m_startOfCodedInterval;  }
+  Void  setTMISEINumPivots(Int b)                                    { m_numPivots = b;  }
+  Int   getTMISEINumPivots()                                         { return m_numPivots;  }
+  Void  setTMISEICodedPivotValue( Int*  p )                          { m_codedPivotValue = p;  }
+  Int*  getTMISEICodedPivotValue()                                   { return m_codedPivotValue;  }
+  Void  setTMISEITargetPivotValue( Int*  p )                         { m_targetPivotValue = p;  }
+  Int*  getTMISEITargetPivotValue()                                  { return m_targetPivotValue;  }
+  Void  setTMISEICameraIsoSpeedIdc(Int b)                            { m_cameraIsoSpeedIdc = b;  }
+  Int   getTMISEICameraIsoSpeedIdc()                                 { return m_cameraIsoSpeedIdc;  }
+  Void  setTMISEICameraIsoSpeedValue(Int b)                          { m_cameraIsoSpeedValue = b;  }
+  Int   getTMISEICameraIsoSpeedValue()                               { return m_cameraIsoSpeedValue;  }
+  Void  setTMISEIExposureIndexIdc(Int b)                             { m_exposureIndexIdc = b;  }
+  Int   getTMISEIExposurIndexIdc()                                   { return m_exposureIndexIdc;  }
+  Void  setTMISEIExposureIndexValue(Int b)                           { m_exposureIndexValue = b;  }
+  Int   getTMISEIExposurIndexValue()                                 { return m_exposureIndexValue;  }
+  Void  setTMISEIExposureCompensationValueSignFlag(Int b)            { m_exposureCompensationValueSignFlag = b;  }
+  Int   getTMISEIExposureCompensationValueSignFlag()                 { return m_exposureCompensationValueSignFlag;  }
+  Void  setTMISEIExposureCompensationValueNumerator(Int b)           { m_exposureCompensationValueNumerator = b;  }
+  Int   getTMISEIExposureCompensationValueNumerator()                { return m_exposureCompensationValueNumerator;  }
+  Void  setTMISEIExposureCompensationValueDenomIdc(Int b)            { m_exposureCompensationValueDenomIdc =b;  }
+  Int   getTMISEIExposureCompensationValueDenomIdc()                 { return m_exposureCompensationValueDenomIdc;  }
+  Void  setTMISEIRefScreenLuminanceWhite(Int b)                      { m_refScreenLuminanceWhite = b;  }
+  Int   getTMISEIRefScreenLuminanceWhite()                           { return m_refScreenLuminanceWhite;  }
+  Void  setTMISEIExtendedRangeWhiteLevel(Int b)                      { m_extendedRangeWhiteLevel = b;  }
+  Int   getTMISEIExtendedRangeWhiteLevel()                           { return m_extendedRangeWhiteLevel;  }
+  Void  setTMISEINominalBlackLevelLumaCodeValue(Int b)               { m_nominalBlackLevelLumaCodeValue = b;  }
+  Int   getTMISEINominalBlackLevelLumaCodeValue()                    { return m_nominalBlackLevelLumaCodeValue;  }
+  Void  setTMISEINominalWhiteLevelLumaCodeValue(Int b)               { m_nominalWhiteLevelLumaCodeValue = b;  }
+  Int   getTMISEINominalWhiteLevelLumaCodeValue()                    { return m_nominalWhiteLevelLumaCodeValue;  }
+  Void  setTMISEIExtendedWhiteLevelLumaCodeValue(Int b)              { m_extendedWhiteLevelLumaCodeValue =b;  }
+  Int   getTMISEIExtendedWhiteLevelLumaCodeValue()                   { return m_extendedWhiteLevelLumaCodeValue;  }
+  Void  setFramePackingArrangementSEIEnabled(Int b)                  { m_framePackingSEIEnabled = b; }
+  Int   getFramePackingArrangementSEIEnabled()                       { return m_framePackingSEIEnabled; }
+  Void  setFramePackingArrangementSEIType(Int b)                     { m_framePackingSEIType = b; }
+  Int   getFramePackingArrangementSEIType()                          { return m_framePackingSEIType; }
+  Void  setFramePackingArrangementSEIId(Int b)                       { m_framePackingSEIId = b; }
+  Int   getFramePackingArrangementSEIId()                            { return m_framePackingSEIId; }
+  Void  setFramePackingArrangementSEIQuincunx(Int b)                 { m_framePackingSEIQuincunx = b; }
+  Int   getFramePackingArrangementSEIQuincunx()                      { return m_framePackingSEIQuincunx; }
+  Void  setFramePackingArrangementSEIInterpretation(Int b)           { m_framePackingSEIInterpretation = b; }
+  Int   getFramePackingArrangementSEIInterpretation()                { return m_framePackingSEIInterpretation; }
+  Void  setSegmentedRectFramePackingArrangementSEIEnabled(Int b)     { m_segmentedRectFramePackingSEIEnabled = b; }
+  Int   getSegmentedRectFramePackingArrangementSEIEnabled()          { return m_segmentedRectFramePackingSEIEnabled; }
+  Void  setSegmentedRectFramePackingArrangementSEICancel(Int b)      { m_segmentedRectFramePackingSEICancel = b; }
+  Int   getSegmentedRectFramePackingArrangementSEICancel()           { return m_segmentedRectFramePackingSEICancel; }
+  Void  setSegmentedRectFramePackingArrangementSEIType(Int b)        { m_segmentedRectFramePackingSEIType = b; }
+  Int   getSegmentedRectFramePackingArrangementSEIType()             { return m_segmentedRectFramePackingSEIType; }
+  Void  setSegmentedRectFramePackingArrangementSEIPersistence(Int b) { m_segmentedRectFramePackingSEIPersistence = b; }
+  Int   getSegmentedRectFramePackingArrangementSEIPersistence()      { return m_segmentedRectFramePackingSEIPersistence; }
+  Void  setDisplayOrientationSEIAngle(Int b)                         { m_displayOrientationSEIAngle = b; }
+  Int   getDisplayOrientationSEIAngle()                              { return m_displayOrientationSEIAngle; }
+  Void  setTemporalLevel0IndexSEIEnabled(Int b)                      { m_temporalLevel0IndexSEIEnabled = b; }
+  Int   getTemporalLevel0IndexSEIEnabled()                           { return m_temporalLevel0IndexSEIEnabled; }
+  Void  setGradualDecodingRefreshInfoEnabled(Int b)                  { m_gradualDecodingRefreshInfoEnabled = b;    }
+  Int   getGradualDecodingRefreshInfoEnabled()                       { return m_gradualDecodingRefreshInfoEnabled; }
+  Void  setNoDisplaySEITLayer(Int b)                                 { m_noDisplaySEITLayer = b;    }
+  Int   getNoDisplaySEITLayer()                                      { return m_noDisplaySEITLayer; }
+  Void  setDecodingUnitInfoSEIEnabled(Int b)                         { m_decodingUnitInfoSEIEnabled = b;    }
+  Int   getDecodingUnitInfoSEIEnabled()                              { return m_decodingUnitInfoSEIEnabled; }
+  Void  setSOPDescriptionSEIEnabled(Int b)                           { m_SOPDescriptionSEIEnabled = b; }
+  Int   getSOPDescriptionSEIEnabled()                                { return m_SOPDescriptionSEIEnabled; }
+  Void  setScalableNestingSEIEnabled(Int b)                          { m_scalableNestingSEIEnabled = b; }
+  Int   getScalableNestingSEIEnabled()                               { return m_scalableNestingSEIEnabled; }
+  Void  setTMCTSSEIEnabled(Bool b)                                   { m_tmctsSEIEnabled = b; }
+  Bool  getTMCTSSEIEnabled()                                         { return m_tmctsSEIEnabled; }
+  Void  setTimeCodeSEIEnabled(Bool b)                                { m_timeCodeSEIEnabled = b; }
+  Bool  getTimeCodeSEIEnabled()                                      { return m_timeCodeSEIEnabled; }
+  Void  setNumberOfTimeSets(Int value)                               { m_timeCodeSEINumTs = value; }
+  Int   getNumberOfTimesets()                                        { return m_timeCodeSEINumTs; }
+  Void  setTimeSet(TComSEITimeSet element, Int index)                { m_timeSetArray[index] = element; }
+  TComSEITimeSet &getTimeSet(Int index)                              { return m_timeSetArray[index]; }
+  const TComSEITimeSet &getTimeSet(Int index) const                  { return m_timeSetArray[index]; }
+  Void  setKneeSEIEnabled(Int b)                                     { m_kneeSEIEnabled = b; }
+  Bool  getKneeSEIEnabled()                                          { return m_kneeSEIEnabled; }
+  Void  setKneeSEIId(Int b)                                          { m_kneeSEIId = b; }
+  Int   getKneeSEIId()                                               { return m_kneeSEIId; }
+  Void  setKneeSEICancelFlag(Bool b)                                 { m_kneeSEICancelFlag=b; }
+  Bool  getKneeSEICancelFlag()                                       { return m_kneeSEICancelFlag; }
+  Void  setKneeSEIPersistenceFlag(Bool b)                            { m_kneeSEIPersistenceFlag = b; }
+  Bool  getKneeSEIPersistenceFlag()                                  { return m_kneeSEIPersistenceFlag; }
+  Void  setKneeSEIInputDrange(Int b)                                 { m_kneeSEIInputDrange = b; }
+  Int   getKneeSEIInputDrange()                                      { return m_kneeSEIInputDrange; }
+  Void  setKneeSEIInputDispLuminance(Int b)                          { m_kneeSEIInputDispLuminance = b; }
+  Int   getKneeSEIInputDispLuminance()                               { return m_kneeSEIInputDispLuminance; }
+  Void  setKneeSEIOutputDrange(Int b)                                { m_kneeSEIOutputDrange = b; }
+  Int   getKneeSEIOutputDrange()                                     { return m_kneeSEIOutputDrange; }
+  Void  setKneeSEIOutputDispLuminance(Int b)                         { m_kneeSEIOutputDispLuminance = b; }
+  Int   getKneeSEIOutputDispLuminance()                              { return m_kneeSEIOutputDispLuminance; }
+  Void  setKneeSEINumKneePointsMinus1(Int b)                         { m_kneeSEINumKneePointsMinus1 = b; }
+  Int   getKneeSEINumKneePointsMinus1()                              { return m_kneeSEINumKneePointsMinus1; }
+  Void  setKneeSEIInputKneePoint(Int *p)                             { m_kneeSEIInputKneePoint = p; }
+  Int*  getKneeSEIInputKneePoint()                                   { return m_kneeSEIInputKneePoint; }
+  Void  setKneeSEIOutputKneePoint(Int *p)                            { m_kneeSEIOutputKneePoint = p; }
+  Int*  getKneeSEIOutputKneePoint()                                  { return m_kneeSEIOutputKneePoint; }
+  Void  setMasteringDisplaySEI(const TComSEIMasteringDisplay &src)   { m_masteringDisplay = src; }
+  const TComSEIMasteringDisplay &getMasteringDisplaySEI() const      { return m_masteringDisplay; }
+  Void         setUseWP               ( Bool b )                     { m_useWeightedPred   = b;    }
+  Void         setWPBiPred            ( Bool b )                     { m_useWeightedBiPred = b;    }
+  Bool         getUseWP               ()                             { return m_useWeightedPred;   }
+  Bool         getWPBiPred            ()                             { return m_useWeightedBiPred; }
+  Void         setLog2ParallelMergeLevelMinus2   ( UInt u )          { m_log2ParallelMergeLevelMinus2       = u;    }
+  UInt         getLog2ParallelMergeLevelMinus2   ()                  { return m_log2ParallelMergeLevelMinus2;       }
+  Void         setMaxNumMergeCand                ( UInt u )          { m_maxNumMergeCand = u;      }
+  UInt         getMaxNumMergeCand                ()                  { return m_maxNumMergeCand;   }
+  Void         setUseScalingListId    ( ScalingListMode u )          { m_useScalingListId       = u;   }
+  ScalingListMode getUseScalingListId    ()                          { return m_useScalingListId;      }
+  Void         setScalingListFile     ( Char*  pch )                 { m_scalingListFile     = pch; }
+  Char*        getScalingListFile     ()                             { return m_scalingListFile;    }
+  Void         setTMVPModeId ( Int  u )                              { m_TMVPModeId = u;    }
+  Int          getTMVPModeId ()                                      { return m_TMVPModeId; }
+  Void         setSignHideFlag( Int signHideFlag )                   { m_signHideFlag = signHideFlag; }
+  Int          getSignHideFlag()                                     { return m_signHideFlag; }
+  Bool         getUseRateCtrl         ()                             { return m_RCEnableRateControl;   }
+  Void         setUseRateCtrl         ( Bool b )                     { m_RCEnableRateControl = b;      }
+  Int          getTargetBitrate       ()                             { return m_RCTargetBitrate;       }
+  Void         setTargetBitrate       ( Int bitrate )                { m_RCTargetBitrate  = bitrate;   }
+  Int          getKeepHierBit         ()                             { return m_RCKeepHierarchicalBit; }
+  Void         setKeepHierBit         ( Int i )                      { m_RCKeepHierarchicalBit = i;    }
+  Bool         getLCULevelRC          ()                             { return m_RCLCULevelRC; }
+  Void         setLCULevelRC          ( Bool b )                     { m_RCLCULevelRC = b; }
+  Bool         getUseLCUSeparateModel ()                             { return m_RCUseLCUSeparateModel; }
+  Void         setUseLCUSeparateModel ( Bool b )                     { m_RCUseLCUSeparateModel = b;    }
+  Int          getInitialQP           ()                             { return m_RCInitialQP;           }
+  Void         setInitialQP           ( Int QP )                     { m_RCInitialQP = QP;             }
+  Bool         getForceIntraQP        ()                             { return m_RCForceIntraQP;        }
+  Void         setForceIntraQP        ( Bool b )                     { m_RCForceIntraQP = b;           }
+  Bool         getTransquantBypassEnableFlag()                       { return m_TransquantBypassEnableFlag; }
+  Void         setTransquantBypassEnableFlag(Bool flag)              { m_TransquantBypassEnableFlag = flag; }
+  Bool         getCUTransquantBypassFlagForceValue()                 { return m_CUTransquantBypassFlagForce; }
+  Void         setCUTransquantBypassFlagForceValue(Bool flag)        { m_CUTransquantBypassFlagForce = flag; }
+  CostMode     getCostMode( )                                        { return m_costMode; }
+  Void         setCostMode(CostMode m )                              { m_costMode = m; }
+
+  Void         setVPS(TComVPS *p)                                    { m_cVPS = *p; }
+  TComVPS *    getVPS()                                              { return &m_cVPS; }
+  Void         setUseRecalculateQPAccordingToLambda (Bool b)         { m_recalculateQPAccordingToLambda = b;    }
+  Bool         getUseRecalculateQPAccordingToLambda ()               { return m_recalculateQPAccordingToLambda; }
+
+  Void         setUseStrongIntraSmoothing ( Bool b )                 { m_useStrongIntraSmoothing = b;    }
+  Bool         getUseStrongIntraSmoothing ()                         { return m_useStrongIntraSmoothing; }
+
+  Void         setActiveParameterSetsSEIEnabled ( Int b )            { m_activeParameterSetsSEIEnabled = b; }
+  Int          getActiveParameterSetsSEIEnabled ()                   { return m_activeParameterSetsSEIEnabled; }
+  Bool         getVuiParametersPresentFlag()                         { return m_vuiParametersPresentFlag; }
+  Void         setVuiParametersPresentFlag(Bool i)                   { m_vuiParametersPresentFlag = i; }
+  Bool         getAspectRatioInfoPresentFlag()                       { return m_aspectRatioInfoPresentFlag; }
+  Void         setAspectRatioInfoPresentFlag(Bool i)                 { m_aspectRatioInfoPresentFlag = i; }
+  Int          getAspectRatioIdc()                                   { return m_aspectRatioIdc; }
+  Void         setAspectRatioIdc(Int i)                              { m_aspectRatioIdc = i; }
+  Int          getSarWidth()                                         { return m_sarWidth; }
+  Void         setSarWidth(Int i)                                    { m_sarWidth = i; }
+  Int          getSarHeight()                                        { return m_sarHeight; }
+  Void         setSarHeight(Int i)                                   { m_sarHeight = i; }
+  Bool         getOverscanInfoPresentFlag()                          { return m_overscanInfoPresentFlag; }
+  Void         setOverscanInfoPresentFlag(Bool i)                    { m_overscanInfoPresentFlag = i; }
+  Bool         getOverscanAppropriateFlag()                          { return m_overscanAppropriateFlag; }
+  Void         setOverscanAppropriateFlag(Bool i)                    { m_overscanAppropriateFlag = i; }
+  Bool         getVideoSignalTypePresentFlag()                       { return m_videoSignalTypePresentFlag; }
+  Void         setVideoSignalTypePresentFlag(Bool i)                 { m_videoSignalTypePresentFlag = i; }
+  Int          getVideoFormat()                                      { return m_videoFormat; }
+  Void         setVideoFormat(Int i)                                 { m_videoFormat = i; }
+  Bool         getVideoFullRangeFlag()                               { return m_videoFullRangeFlag; }
+  Void         setVideoFullRangeFlag(Bool i)                         { m_videoFullRangeFlag = i; }
+  Bool         getColourDescriptionPresentFlag()                     { return m_colourDescriptionPresentFlag; }
+  Void         setColourDescriptionPresentFlag(Bool i)               { m_colourDescriptionPresentFlag = i; }
+  Int          getColourPrimaries()                                  { return m_colourPrimaries; }
+  Void         setColourPrimaries(Int i)                             { m_colourPrimaries = i; }
+  Int          getTransferCharacteristics()                          { return m_transferCharacteristics; }
+  Void         setTransferCharacteristics(Int i)                     { m_transferCharacteristics = i; }
+  Int          getMatrixCoefficients()                               { return m_matrixCoefficients; }
+  Void         setMatrixCoefficients(Int i)                          { m_matrixCoefficients = i; }
+  Bool         getChromaLocInfoPresentFlag()                         { return m_chromaLocInfoPresentFlag; }
+  Void         setChromaLocInfoPresentFlag(Bool i)                   { m_chromaLocInfoPresentFlag = i; }
+  Int          getChromaSampleLocTypeTopField()                      { return m_chromaSampleLocTypeTopField; }
+  Void         setChromaSampleLocTypeTopField(Int i)                 { m_chromaSampleLocTypeTopField = i; }
+  Int          getChromaSampleLocTypeBottomField()                   { return m_chromaSampleLocTypeBottomField; }
+  Void         setChromaSampleLocTypeBottomField(Int i)              { m_chromaSampleLocTypeBottomField = i; }
+  Bool         getNeutralChromaIndicationFlag()                      { return m_neutralChromaIndicationFlag; }
+  Void         setNeutralChromaIndicationFlag(Bool i)                { m_neutralChromaIndicationFlag = i; }
+  Window      &getDefaultDisplayWindow()                             { return m_defaultDisplayWindow; }
+  Void         setDefaultDisplayWindow (Int offsetLeft, Int offsetRight, Int offsetTop, Int offsetBottom ) { m_defaultDisplayWindow.setWindow (offsetLeft, offsetRight, offsetTop, offsetBottom); }
+  Bool         getFrameFieldInfoPresentFlag()                        { return m_frameFieldInfoPresentFlag; }
+  Void         setFrameFieldInfoPresentFlag(Bool i)                  { m_frameFieldInfoPresentFlag = i; }
+  Bool         getPocProportionalToTimingFlag()                      { return m_pocProportionalToTimingFlag; }
+  Void         setPocProportionalToTimingFlag(Bool x)                { m_pocProportionalToTimingFlag = x;    }
+  Int          getNumTicksPocDiffOneMinus1()                         { return m_numTicksPocDiffOneMinus1;    }
+  Void         setNumTicksPocDiffOneMinus1(Int x)                    { m_numTicksPocDiffOneMinus1 = x;       }
+  Bool         getBitstreamRestrictionFlag()                         { return m_bitstreamRestrictionFlag; }
+  Void         setBitstreamRestrictionFlag(Bool i)                   { m_bitstreamRestrictionFlag = i; }
+  Bool         getTilesFixedStructureFlag()                          { return m_tilesFixedStructureFlag; }
+  Void         setTilesFixedStructureFlag(Bool i)                    { m_tilesFixedStructureFlag = i; }
+  Bool         getMotionVectorsOverPicBoundariesFlag()               { return m_motionVectorsOverPicBoundariesFlag; }
+  Void         setMotionVectorsOverPicBoundariesFlag(Bool i)         { m_motionVectorsOverPicBoundariesFlag = i; }
+  Int          getMinSpatialSegmentationIdc()                        { return m_minSpatialSegmentationIdc; }
+  Void         setMinSpatialSegmentationIdc(Int i)                   { m_minSpatialSegmentationIdc = i; }
+  Int          getMaxBytesPerPicDenom()                              { return m_maxBytesPerPicDenom; }
+  Void         setMaxBytesPerPicDenom(Int i)                         { m_maxBytesPerPicDenom = i; }
+  Int          getMaxBitsPerMinCuDenom()                             { return m_maxBitsPerMinCuDenom; }
+  Void         setMaxBitsPerMinCuDenom(Int i)                        { m_maxBitsPerMinCuDenom = i; }
+  Int          getLog2MaxMvLengthHorizontal()                        { return m_log2MaxMvLengthHorizontal; }
+  Void         setLog2MaxMvLengthHorizontal(Int i)                   { m_log2MaxMvLengthHorizontal = i; }
+  Int          getLog2MaxMvLengthVertical()                          { return m_log2MaxMvLengthVertical; }
+  Void         setLog2MaxMvLengthVertical(Int i)                     { m_log2MaxMvLengthVertical = i; }
+
+  Bool         getProgressiveSourceFlag() const                      { return m_progressiveSourceFlag; }
+  Void         setProgressiveSourceFlag(Bool b)                      { m_progressiveSourceFlag = b; }
+
+  Bool         getInterlacedSourceFlag() const                       { return m_interlacedSourceFlag; }
+  Void         setInterlacedSourceFlag(Bool b)                       { m_interlacedSourceFlag = b; }
+
+  Bool         getNonPackedConstraintFlag() const                    { return m_nonPackedConstraintFlag; }
+  Void         setNonPackedConstraintFlag(Bool b)                    { m_nonPackedConstraintFlag = b; }
+
+  Bool         getFrameOnlyConstraintFlag() const                    { return m_frameOnlyConstraintFlag; }
+  Void         setFrameOnlyConstraintFlag(Bool b)                    { m_frameOnlyConstraintFlag = b; }
+
+  UInt         getBitDepthConstraintValue() const                    { return m_bitDepthConstraintValue; }
+  Void         setBitDepthConstraintValue(UInt v)                    { m_bitDepthConstraintValue=v; }
+
+  ChromaFormat getChromaFormatConstraintValue() const                { return m_chromaFormatConstraintValue; }
+  Void         setChromaFormatConstraintValue(ChromaFormat v)        { m_chromaFormatConstraintValue=v; }
+
+  Bool         getIntraConstraintFlag() const                        { return m_intraConstraintFlag; }
+  Void         setIntraConstraintFlag(Bool b)                        { m_intraConstraintFlag=b; }
+
+  Bool         getLowerBitRateConstraintFlag() const                 { return m_lowerBitRateConstraintFlag; }
+  Void         setLowerBitRateConstraintFlag(Bool b)                 { m_lowerBitRateConstraintFlag=b; }
+  Bool      getChromaSamplingFilterHintEnabled()                     { return m_chromaSamplingFilterHintEnabled;}
+  Void      setChromaSamplingFilterHintEnabled(Bool i)               { m_chromaSamplingFilterHintEnabled = i;}
+  Int       getChromaSamplingHorFilterIdc()                          { return m_chromaSamplingHorFilterIdc;}
+  Void      setChromaSamplingHorFilterIdc(Int i)                     { m_chromaSamplingHorFilterIdc = i;}
+  Int       getChromaSamplingVerFilterIdc()                          { return m_chromaSamplingVerFilterIdc;}
+  Void      setChromaSamplingVerFilterIdc(Int i)                     { m_chromaSamplingVerFilterIdc = i;}
+};
+
+//! \}
+
+#endif // !defined(AFX_TENCCFG_H__6B99B797_F4DA_4E46_8E78_7656339A6C41__INCLUDED_)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncCu.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1649 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncCu.cpp
+    \brief    Coding Unit (CU) encoder class
+*/
+
+#include <stdio.h>
+#include "TEncTop.h"
+#include "TEncCu.h"
+#include "TEncAnalyze.h"
+#include "TLibCommon/Debug.h"
+
+#include <cmath>
+#include <algorithm>
+using namespace std;
+
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+/**
+ \param    uiTotalDepth  total number of allowable depth
+ \param    uiMaxWidth    largest CU width
+ \param    uiMaxHeight   largest CU height
+ */
+Void TEncCu::create(UChar uhTotalDepth, UInt uiMaxWidth, UInt uiMaxHeight, ChromaFormat chromaFormat)
+{
+  Int i;
+
+  m_uhTotalDepth   = uhTotalDepth + 1;
+  m_ppcBestCU      = new TComDataCU*[m_uhTotalDepth-1];
+  m_ppcTempCU      = new TComDataCU*[m_uhTotalDepth-1];
+
+  m_ppcPredYuvBest = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcResiYuvBest = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcRecoYuvBest = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcPredYuvTemp = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcResiYuvTemp = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcRecoYuvTemp = new TComYuv*[m_uhTotalDepth-1];
+  m_ppcOrigYuv     = new TComYuv*[m_uhTotalDepth-1];
+
+  UInt uiNumPartitions;
+  for( i=0 ; i<m_uhTotalDepth-1 ; i++)
+  {
+    uiNumPartitions = 1<<( ( m_uhTotalDepth - i - 1 )<<1 );
+    UInt uiWidth  = uiMaxWidth  >> i;
+    UInt uiHeight = uiMaxHeight >> i;
+
+    m_ppcBestCU[i] = new TComDataCU; m_ppcBestCU[i]->create( chromaFormat, uiNumPartitions, uiWidth, uiHeight, false, uiMaxWidth >> (m_uhTotalDepth - 1) );
+    m_ppcTempCU[i] = new TComDataCU; m_ppcTempCU[i]->create( chromaFormat, uiNumPartitions, uiWidth, uiHeight, false, uiMaxWidth >> (m_uhTotalDepth - 1) );
+
+    m_ppcPredYuvBest[i] = new TComYuv; m_ppcPredYuvBest[i]->create(uiWidth, uiHeight, chromaFormat);
+    m_ppcResiYuvBest[i] = new TComYuv; m_ppcResiYuvBest[i]->create(uiWidth, uiHeight, chromaFormat);
+    m_ppcRecoYuvBest[i] = new TComYuv; m_ppcRecoYuvBest[i]->create(uiWidth, uiHeight, chromaFormat);
+
+    m_ppcPredYuvTemp[i] = new TComYuv; m_ppcPredYuvTemp[i]->create(uiWidth, uiHeight, chromaFormat);
+    m_ppcResiYuvTemp[i] = new TComYuv; m_ppcResiYuvTemp[i]->create(uiWidth, uiHeight, chromaFormat);
+    m_ppcRecoYuvTemp[i] = new TComYuv; m_ppcRecoYuvTemp[i]->create(uiWidth, uiHeight, chromaFormat);
+
+    m_ppcOrigYuv    [i] = new TComYuv; m_ppcOrigYuv    [i]->create(uiWidth, uiHeight, chromaFormat);
+  }
+
+  m_bEncodeDQP          = false;
+  m_CodeChromaQpAdjFlag = false;
+  m_ChromaQpAdjIdc      = 0;
+
+  // initialize partition order.
+  UInt* piTmp = &g_auiZscanToRaster[0];
+  initZscanToRaster( m_uhTotalDepth, 1, 0, piTmp);
+  initRasterToZscan( uiMaxWidth, uiMaxHeight, m_uhTotalDepth );
+
+  // initialize conversion matrix from partition index to pel
+  initRasterToPelXY( uiMaxWidth, uiMaxHeight, m_uhTotalDepth );
+}
+
+Void TEncCu::destroy()
+{
+  Int i;
+
+  for( i=0 ; i<m_uhTotalDepth-1 ; i++)
+  {
+    if(m_ppcBestCU[i])
+    {
+      m_ppcBestCU[i]->destroy();      delete m_ppcBestCU[i];      m_ppcBestCU[i] = NULL;
+    }
+    if(m_ppcTempCU[i])
+    {
+      m_ppcTempCU[i]->destroy();      delete m_ppcTempCU[i];      m_ppcTempCU[i] = NULL;
+    }
+    if(m_ppcPredYuvBest[i])
+    {
+      m_ppcPredYuvBest[i]->destroy(); delete m_ppcPredYuvBest[i]; m_ppcPredYuvBest[i] = NULL;
+    }
+    if(m_ppcResiYuvBest[i])
+    {
+      m_ppcResiYuvBest[i]->destroy(); delete m_ppcResiYuvBest[i]; m_ppcResiYuvBest[i] = NULL;
+    }
+    if(m_ppcRecoYuvBest[i])
+    {
+      m_ppcRecoYuvBest[i]->destroy(); delete m_ppcRecoYuvBest[i]; m_ppcRecoYuvBest[i] = NULL;
+    }
+    if(m_ppcPredYuvTemp[i])
+    {
+      m_ppcPredYuvTemp[i]->destroy(); delete m_ppcPredYuvTemp[i]; m_ppcPredYuvTemp[i] = NULL;
+    }
+    if(m_ppcResiYuvTemp[i])
+    {
+      m_ppcResiYuvTemp[i]->destroy(); delete m_ppcResiYuvTemp[i]; m_ppcResiYuvTemp[i] = NULL;
+    }
+    if(m_ppcRecoYuvTemp[i])
+    {
+      m_ppcRecoYuvTemp[i]->destroy(); delete m_ppcRecoYuvTemp[i]; m_ppcRecoYuvTemp[i] = NULL;
+    }
+    if(m_ppcOrigYuv[i])
+    {
+      m_ppcOrigYuv[i]->destroy();     delete m_ppcOrigYuv[i];     m_ppcOrigYuv[i] = NULL;
+    }
+  }
+  if(m_ppcBestCU)
+  {
+    delete [] m_ppcBestCU;
+    m_ppcBestCU = NULL;
+  }
+  if(m_ppcTempCU)
+  {
+    delete [] m_ppcTempCU;
+    m_ppcTempCU = NULL;
+  }
+
+  if(m_ppcPredYuvBest)
+  {
+    delete [] m_ppcPredYuvBest;
+    m_ppcPredYuvBest = NULL;
+  }
+  if(m_ppcResiYuvBest)
+  {
+    delete [] m_ppcResiYuvBest;
+    m_ppcResiYuvBest = NULL;
+  }
+  if(m_ppcRecoYuvBest)
+  {
+    delete [] m_ppcRecoYuvBest;
+    m_ppcRecoYuvBest = NULL;
+  }
+  if(m_ppcPredYuvTemp)
+  {
+    delete [] m_ppcPredYuvTemp;
+    m_ppcPredYuvTemp = NULL;
+  }
+  if(m_ppcResiYuvTemp)
+  {
+    delete [] m_ppcResiYuvTemp;
+    m_ppcResiYuvTemp = NULL;
+  }
+  if(m_ppcRecoYuvTemp)
+  {
+    delete [] m_ppcRecoYuvTemp;
+    m_ppcRecoYuvTemp = NULL;
+  }
+  if(m_ppcOrigYuv)
+  {
+    delete [] m_ppcOrigYuv;
+    m_ppcOrigYuv = NULL;
+  }
+}
+
+/** \param    pcEncTop      pointer of encoder class
+ */
+Void TEncCu::init( TEncTop* pcEncTop )
+{
+  m_pcEncCfg           = pcEncTop;
+  m_pcPredSearch       = pcEncTop->getPredSearch();
+  m_pcTrQuant          = pcEncTop->getTrQuant();
+  m_pcRdCost           = pcEncTop->getRdCost();
+
+  m_pcEntropyCoder     = pcEncTop->getEntropyCoder();
+  m_pcBinCABAC         = pcEncTop->getBinCABAC();
+
+  m_pppcRDSbacCoder    = pcEncTop->getRDSbacCoder();
+  m_pcRDGoOnSbacCoder  = pcEncTop->getRDGoOnSbacCoder();
+
+  m_pcRateCtrl         = pcEncTop->getRateCtrl();
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/** \param  rpcCU pointer of CU data class
+ */
+Void TEncCu::compressCtu( TComDataCU* pCtu )
+{
+  // initialize CU data
+  m_ppcBestCU[0]->initCtu( pCtu->getPic(), pCtu->getCtuRsAddr() );
+  m_ppcTempCU[0]->initCtu( pCtu->getPic(), pCtu->getCtuRsAddr() );
+
+  // analysis of CU
+  DEBUG_STRING_NEW(sDebug)
+
+  xCompressCU( m_ppcBestCU[0], m_ppcTempCU[0], 0 DEBUG_STRING_PASS_INTO(sDebug) );
+  DEBUG_STRING_OUTPUT(std::cout, sDebug)
+
+#if ADAPTIVE_QP_SELECTION
+  if( m_pcEncCfg->getUseAdaptQpSelect() )
+  {
+    if(pCtu->getSlice()->getSliceType()!=I_SLICE) //IIII
+    {
+      xCtuCollectARLStats( pCtu );
+    }
+  }
+#endif
+}
+/** \param  pcCU  pointer of CU data class
+ */
+Void TEncCu::encodeCtu ( TComDataCU* pCtu )
+{
+  if ( pCtu->getSlice()->getPPS()->getUseDQP() )
+  {
+    setdQPFlag(true);
+  }
+
+  if ( pCtu->getSlice()->getUseChromaQpAdj() )
+  {
+    setCodeChromaQpAdjFlag(true);
+  }
+
+  // Encode CU data
+  xEncodeCU( pCtu, 0, 0 );
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+/** Derive small set of test modes for AMP encoder speed-up
+ *\param   rpcBestCU
+ *\param   eParentPartSize
+ *\param   bTestAMP_Hor
+ *\param   bTestAMP_Ver
+ *\param   bTestMergeAMP_Hor
+ *\param   bTestMergeAMP_Ver
+ *\returns Void
+*/
+#if AMP_ENC_SPEEDUP
+#if AMP_MRG
+Void TEncCu::deriveTestModeAMP (TComDataCU *pcBestCU, PartSize eParentPartSize, Bool &bTestAMP_Hor, Bool &bTestAMP_Ver, Bool &bTestMergeAMP_Hor, Bool &bTestMergeAMP_Ver)
+#else
+Void TEncCu::deriveTestModeAMP (TComDataCU *pcBestCU, PartSize eParentPartSize, Bool &bTestAMP_Hor, Bool &bTestAMP_Ver)
+#endif
+{
+  if ( pcBestCU->getPartitionSize(0) == SIZE_2NxN )
+  {
+    bTestAMP_Hor = true;
+  }
+  else if ( pcBestCU->getPartitionSize(0) == SIZE_Nx2N )
+  {
+    bTestAMP_Ver = true;
+  }
+  else if ( pcBestCU->getPartitionSize(0) == SIZE_2Nx2N && pcBestCU->getMergeFlag(0) == false && pcBestCU->isSkipped(0) == false )
+  {
+    bTestAMP_Hor = true;
+    bTestAMP_Ver = true;
+  }
+
+#if AMP_MRG
+  //! Utilizing the partition size of parent PU
+  if ( eParentPartSize >= SIZE_2NxnU && eParentPartSize <= SIZE_nRx2N )
+  {
+    bTestMergeAMP_Hor = true;
+    bTestMergeAMP_Ver = true;
+  }
+
+  if ( eParentPartSize == NUMBER_OF_PART_SIZES ) //! if parent is intra
+  {
+    if ( pcBestCU->getPartitionSize(0) == SIZE_2NxN )
+    {
+      bTestMergeAMP_Hor = true;
+    }
+    else if ( pcBestCU->getPartitionSize(0) == SIZE_Nx2N )
+    {
+      bTestMergeAMP_Ver = true;
+    }
+  }
+
+  if ( pcBestCU->getPartitionSize(0) == SIZE_2Nx2N && pcBestCU->isSkipped(0) == false )
+  {
+    bTestMergeAMP_Hor = true;
+    bTestMergeAMP_Ver = true;
+  }
+
+  if ( pcBestCU->getWidth(0) == 64 )
+  {
+    bTestAMP_Hor = false;
+    bTestAMP_Ver = false;
+  }
+#else
+  //! Utilizing the partition size of parent PU
+  if ( eParentPartSize >= SIZE_2NxnU && eParentPartSize <= SIZE_nRx2N )
+  {
+    bTestAMP_Hor = true;
+    bTestAMP_Ver = true;
+  }
+
+  if ( eParentPartSize == SIZE_2Nx2N )
+  {
+    bTestAMP_Hor = false;
+    bTestAMP_Ver = false;
+  }
+#endif
+}
+#endif
+
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+/** Compress a CU block recursively with enabling sub-CTU-level delta QP
+ *\param   rpcBestCU
+ *\param   rpcTempCU
+ *\param   uiDepth
+ *\returns Void
+ *
+ *- for loop of QP value to compress the current CU with all possible QP
+*/
+#if AMP_ENC_SPEEDUP
+Void TEncCu::xCompressCU( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth DEBUG_STRING_FN_DECLARE(sDebug_), PartSize eParentPartSize )
+#else
+Void TEncCu::xCompressCU( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth )
+#endif
+{
+  TComPic* pcPic = rpcBestCU->getPic();
+  DEBUG_STRING_NEW(sDebug)
+
+  // get Original YUV data from picture
+  m_ppcOrigYuv[uiDepth]->copyFromPicYuv( pcPic->getPicYuvOrg(), rpcBestCU->getCtuRsAddr(), rpcBestCU->getZorderIdxInCtu() );
+
+    // variable for Early CU determination
+  Bool    bSubBranch = true;
+
+  // variable for Cbf fast mode PU decision
+  Bool    doNotBlockPu = true;
+  Bool    earlyDetectionSkipMode = false;
+
+  Bool bBoundary = false;
+  UInt uiLPelX   = rpcBestCU->getCUPelX();
+  UInt uiRPelX   = uiLPelX + rpcBestCU->getWidth(0)  - 1;
+  UInt uiTPelY   = rpcBestCU->getCUPelY();
+  UInt uiBPelY   = uiTPelY + rpcBestCU->getHeight(0) - 1;
+
+  Int iBaseQP = xComputeQP( rpcBestCU, uiDepth );
+  Int iMinQP;
+  Int iMaxQP;
+  Bool isAddLowestQP = false;
+
+  const UInt numberValidComponents = rpcBestCU->getPic()->getNumberValidComponents();
+
+  if( (g_uiMaxCUWidth>>uiDepth) >= rpcTempCU->getSlice()->getPPS()->getMinCuDQPSize() )
+  {
+    Int idQP = m_pcEncCfg->getMaxDeltaQP();
+    iMinQP = Clip3( -rpcTempCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, iBaseQP-idQP );
+    iMaxQP = Clip3( -rpcTempCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, iBaseQP+idQP );
+  }
+  else
+  {
+    iMinQP = rpcTempCU->getQP(0);
+    iMaxQP = rpcTempCU->getQP(0);
+  }
+
+  if ( m_pcEncCfg->getUseRateCtrl() )
+  {
+    iMinQP = m_pcRateCtrl->getRCQP();
+    iMaxQP = m_pcRateCtrl->getRCQP();
+  }
+
+  // transquant-bypass (TQB) processing loop variable initialisation ---
+
+  const Int lowestQP = iMinQP; // For TQB, use this QP which is the lowest non TQB QP tested (rather than QP'=0) - that way delta QPs are smaller, and TQB can be tested at all CU levels.
+
+  if ( (rpcTempCU->getSlice()->getPPS()->getTransquantBypassEnableFlag()) )
+  {
+    isAddLowestQP = true; // mark that the first iteration is to cost TQB mode.
+    iMinQP = iMinQP - 1;  // increase loop variable range by 1, to allow testing of TQB mode along with other QPs
+    if ( m_pcEncCfg->getCUTransquantBypassFlagForceValue() )
+    {
+      iMaxQP = iMinQP;
+    }
+  }
+
+  TComSlice * pcSlice = rpcTempCU->getPic()->getSlice(rpcTempCU->getPic()->getCurrSliceIdx());
+  // We need to split, so don't try these modes.
+  if ( ( uiRPelX < rpcBestCU->getSlice()->getSPS()->getPicWidthInLumaSamples() ) &&
+       ( uiBPelY < rpcBestCU->getSlice()->getSPS()->getPicHeightInLumaSamples() ) )
+  {
+    for (Int iQP=iMinQP; iQP<=iMaxQP; iQP++)
+    {
+      const Bool bIsLosslessMode = isAddLowestQP && (iQP == iMinQP);
+
+      if (bIsLosslessMode)
+      {
+        iQP = lowestQP;
+      }
+
+      m_ChromaQpAdjIdc = 0;
+      if (pcSlice->getUseChromaQpAdj())
+      {
+        /* Pre-estimation of chroma QP based on input block activity may be performed
+         * here, using for example m_ppcOrigYuv[uiDepth] */
+        /* To exercise the current code, the index used for adjustment is based on
+         * block position
+         */
+        Int lgMinCuSize = pcSlice->getSPS()->getLog2MinCodingBlockSize();
+        m_ChromaQpAdjIdc = ((uiLPelX >> lgMinCuSize) + (uiTPelY >> lgMinCuSize)) % (pcSlice->getPPS()->getChromaQpAdjTableSize() + 1);
+      }
+
+      rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+      // do inter modes, SKIP and 2Nx2N
+      if( rpcBestCU->getSlice()->getSliceType() != I_SLICE )
+      {
+        // 2Nx2N
+        if(m_pcEncCfg->getUseEarlySkipDetection())
+        {
+          xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2Nx2N DEBUG_STRING_PASS_INTO(sDebug) );
+          rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );//by Competition for inter_2Nx2N
+        }
+        // SKIP
+        xCheckRDCostMerge2Nx2N( rpcBestCU, rpcTempCU DEBUG_STRING_PASS_INTO(sDebug), &earlyDetectionSkipMode );//by Merge for inter_2Nx2N
+        rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+        if(!m_pcEncCfg->getUseEarlySkipDetection())
+        {
+          // 2Nx2N, NxN
+          xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2Nx2N DEBUG_STRING_PASS_INTO(sDebug) );
+          rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+          if(m_pcEncCfg->getUseCbfFastMode())
+          {
+            doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+          }
+        }
+      }
+
+      if (bIsLosslessMode) // Restore loop variable if lossless mode was searched.
+      {
+        iQP = iMinQP;
+      }
+    }
+
+    if(!earlyDetectionSkipMode)
+    {
+      for (Int iQP=iMinQP; iQP<=iMaxQP; iQP++)
+      {
+        const Bool bIsLosslessMode = isAddLowestQP && (iQP == iMinQP); // If lossless, then iQP is irrelevant for subsequent modules.
+
+        if (bIsLosslessMode)
+        {
+          iQP = lowestQP;
+        }
+
+        rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+        // do inter modes, NxN, 2NxN, and Nx2N
+        if( rpcBestCU->getSlice()->getSliceType() != I_SLICE )
+        {
+          // 2Nx2N, NxN
+          if(!( (rpcBestCU->getWidth(0)==8) && (rpcBestCU->getHeight(0)==8) ))
+          {
+            if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth && doNotBlockPu)
+            {
+              xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_NxN DEBUG_STRING_PASS_INTO(sDebug)   );
+              rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            }
+          }
+
+          if(doNotBlockPu)
+          {
+            xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_Nx2N DEBUG_STRING_PASS_INTO(sDebug)  );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_Nx2N )
+            {
+              doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+            }
+          }
+          if(doNotBlockPu)
+          {
+            xCheckRDCostInter      ( rpcBestCU, rpcTempCU, SIZE_2NxN DEBUG_STRING_PASS_INTO(sDebug)  );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_2NxN)
+            {
+              doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+            }
+          }
+
+          //! Try AMP (SIZE_2NxnU, SIZE_2NxnD, SIZE_nLx2N, SIZE_nRx2N)
+          if( pcPic->getSlice(0)->getSPS()->getAMPAcc(uiDepth) )
+          {
+#if AMP_ENC_SPEEDUP
+            Bool bTestAMP_Hor = false, bTestAMP_Ver = false;
+
+#if AMP_MRG
+            Bool bTestMergeAMP_Hor = false, bTestMergeAMP_Ver = false;
+
+            deriveTestModeAMP (rpcBestCU, eParentPartSize, bTestAMP_Hor, bTestAMP_Ver, bTestMergeAMP_Hor, bTestMergeAMP_Ver);
+#else
+            deriveTestModeAMP (rpcBestCU, eParentPartSize, bTestAMP_Hor, bTestAMP_Ver);
+#endif
+
+            //! Do horizontal AMP
+            if ( bTestAMP_Hor )
+            {
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnU DEBUG_STRING_PASS_INTO(sDebug) );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_2NxnU )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnD DEBUG_STRING_PASS_INTO(sDebug) );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_2NxnD )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+            }
+#if AMP_MRG
+            else if ( bTestMergeAMP_Hor )
+            {
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnU DEBUG_STRING_PASS_INTO(sDebug), true );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_2NxnU )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnD DEBUG_STRING_PASS_INTO(sDebug), true );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_2NxnD )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+            }
+#endif
+
+            //! Do horizontal AMP
+            if ( bTestAMP_Ver )
+            {
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nLx2N DEBUG_STRING_PASS_INTO(sDebug) );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_nLx2N )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nRx2N DEBUG_STRING_PASS_INTO(sDebug) );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+              }
+            }
+#if AMP_MRG
+            else if ( bTestMergeAMP_Ver )
+            {
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nLx2N DEBUG_STRING_PASS_INTO(sDebug), true );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+                if(m_pcEncCfg->getUseCbfFastMode() && rpcBestCU->getPartitionSize(0) == SIZE_nLx2N )
+                {
+                  doNotBlockPu = rpcBestCU->getQtRootCbf( 0 ) != 0;
+                }
+              }
+              if(doNotBlockPu)
+              {
+                xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nRx2N DEBUG_STRING_PASS_INTO(sDebug), true );
+                rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+              }
+            }
+#endif
+
+#else
+            xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnU );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_2NxnD );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nLx2N );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+            xCheckRDCostInter( rpcBestCU, rpcTempCU, SIZE_nRx2N );
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+#endif
+          }
+        }
+
+        // do normal intra modes
+        // speedup for inter frames
+        Double intraCost = 0.0;
+
+        if((rpcBestCU->getSlice()->getSliceType() == I_SLICE)                                     ||
+           (rpcBestCU->getCbf( 0, COMPONENT_Y  ) != 0)                                            ||
+          ((rpcBestCU->getCbf( 0, COMPONENT_Cb ) != 0) && (numberValidComponents > COMPONENT_Cb)) ||
+          ((rpcBestCU->getCbf( 0, COMPONENT_Cr ) != 0) && (numberValidComponents > COMPONENT_Cr))  ) // avoid very complex intra if it is unlikely
+        {
+          xCheckRDCostIntra( rpcBestCU, rpcTempCU, intraCost, SIZE_2Nx2N DEBUG_STRING_PASS_INTO(sDebug) );
+          rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+          if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth )
+          {
+            if( rpcTempCU->getWidth(0) > ( 1 << rpcTempCU->getSlice()->getSPS()->getQuadtreeTULog2MinSize() ) )
+            {
+              Double tmpIntraCost;
+              xCheckRDCostIntra( rpcBestCU, rpcTempCU, tmpIntraCost, SIZE_NxN DEBUG_STRING_PASS_INTO(sDebug)   );
+              intraCost = std::min(intraCost, tmpIntraCost);
+              rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+            }
+          }
+        }
+
+        // test PCM
+        if(pcPic->getSlice(0)->getSPS()->getUsePCM()
+          && rpcTempCU->getWidth(0) <= (1<<pcPic->getSlice(0)->getSPS()->getPCMLog2MaxSize())
+          && rpcTempCU->getWidth(0) >= (1<<pcPic->getSlice(0)->getSPS()->getPCMLog2MinSize()) )
+        {
+          UInt uiRawBits = getTotalBits(rpcBestCU->getWidth(0), rpcBestCU->getHeight(0), rpcBestCU->getPic()->getChromaFormat(), g_bitDepth);
+          UInt uiBestBits = rpcBestCU->getTotalBits();
+          if((uiBestBits > uiRawBits) || (rpcBestCU->getTotalCost() > m_pcRdCost->calcRdCost(uiRawBits, 0)))
+          {
+            xCheckIntraPCM (rpcBestCU, rpcTempCU);
+            rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+          }
+        }
+
+        if (bIsLosslessMode) // Restore loop variable if lossless mode was searched.
+        {
+          iQP = iMinQP;
+        }
+      }
+    }
+
+    m_pcEntropyCoder->resetBits();
+    m_pcEntropyCoder->encodeSplitFlag( rpcBestCU, 0, uiDepth, true );
+    rpcBestCU->getTotalBits() += m_pcEntropyCoder->getNumberOfWrittenBits(); // split bits
+    rpcBestCU->getTotalBins() += ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+    rpcBestCU->getTotalCost()  = m_pcRdCost->calcRdCost( rpcBestCU->getTotalBits(), rpcBestCU->getTotalDistortion() );
+
+    // Early CU determination
+    if( m_pcEncCfg->getUseEarlyCU() && rpcBestCU->isSkipped(0) )
+    {
+      bSubBranch = false;
+    }
+    else
+    {
+      bSubBranch = true;
+    }
+  }
+  else
+  {
+    bBoundary = true;
+  }
+
+  // copy orginal YUV samples to PCM buffer
+  if( rpcBestCU->isLosslessCoded(0) && (rpcBestCU->getIPCMFlag(0) == false))
+  {
+    xFillPCMBuffer(rpcBestCU, m_ppcOrigYuv[uiDepth]);
+  }
+
+  if( (g_uiMaxCUWidth>>uiDepth) == rpcTempCU->getSlice()->getPPS()->getMinCuDQPSize() )
+  {
+    Int idQP = m_pcEncCfg->getMaxDeltaQP();
+    iMinQP = Clip3( -rpcTempCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, iBaseQP-idQP );
+    iMaxQP = Clip3( -rpcTempCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, iBaseQP+idQP );
+  }
+  else if( (g_uiMaxCUWidth>>uiDepth) > rpcTempCU->getSlice()->getPPS()->getMinCuDQPSize() )
+  {
+    iMinQP = iBaseQP;
+    iMaxQP = iBaseQP;
+  }
+  else
+  {
+    const Int iStartQP = rpcTempCU->getQP(0);
+    iMinQP = iStartQP;
+    iMaxQP = iStartQP;
+  }
+
+  if ( m_pcEncCfg->getUseRateCtrl() )
+  {
+    iMinQP = m_pcRateCtrl->getRCQP();
+    iMaxQP = m_pcRateCtrl->getRCQP();
+  }
+
+  if ( m_pcEncCfg->getCUTransquantBypassFlagForceValue() )
+  {
+    iMaxQP = iMinQP; // If all TUs are forced into using transquant bypass, do not loop here.
+  }
+
+  for (Int iQP=iMinQP; iQP<=iMaxQP; iQP++)
+  {
+    const Bool bIsLosslessMode = false; // False at this level. Next level down may set it to true.
+
+    rpcTempCU->initEstData( uiDepth, iQP, bIsLosslessMode );
+
+    // further split
+    if( bSubBranch && uiDepth < g_uiMaxCUDepth - g_uiAddCUDepth )
+    {
+      UChar       uhNextDepth         = uiDepth+1;
+      TComDataCU* pcSubBestPartCU     = m_ppcBestCU[uhNextDepth];
+      TComDataCU* pcSubTempPartCU     = m_ppcTempCU[uhNextDepth];
+      DEBUG_STRING_NEW(sTempDebug)
+
+      for ( UInt uiPartUnitIdx = 0; uiPartUnitIdx < 4; uiPartUnitIdx++ )
+      {
+        pcSubBestPartCU->initSubCU( rpcTempCU, uiPartUnitIdx, uhNextDepth, iQP );           // clear sub partition datas or init.
+        pcSubTempPartCU->initSubCU( rpcTempCU, uiPartUnitIdx, uhNextDepth, iQP );           // clear sub partition datas or init.
+
+        if( ( pcSubBestPartCU->getCUPelX() < pcSlice->getSPS()->getPicWidthInLumaSamples() ) && ( pcSubBestPartCU->getCUPelY() < pcSlice->getSPS()->getPicHeightInLumaSamples() ) )
+        {
+          if ( 0 == uiPartUnitIdx) //initialize RD with previous depth buffer
+          {
+            m_pppcRDSbacCoder[uhNextDepth][CI_CURR_BEST]->load(m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST]);
+          }
+          else
+          {
+            m_pppcRDSbacCoder[uhNextDepth][CI_CURR_BEST]->load(m_pppcRDSbacCoder[uhNextDepth][CI_NEXT_BEST]);
+          }
+
+#if AMP_ENC_SPEEDUP
+          DEBUG_STRING_NEW(sChild)
+          if ( !rpcBestCU->isInter(0) )
+          {
+            xCompressCU( pcSubBestPartCU, pcSubTempPartCU, uhNextDepth DEBUG_STRING_PASS_INTO(sChild), NUMBER_OF_PART_SIZES );
+          }
+          else
+          {
+
+            xCompressCU( pcSubBestPartCU, pcSubTempPartCU, uhNextDepth DEBUG_STRING_PASS_INTO(sChild), rpcBestCU->getPartitionSize(0) );
+          }
+          DEBUG_STRING_APPEND(sTempDebug, sChild)
+#else
+          xCompressCU( pcSubBestPartCU, pcSubTempPartCU, uhNextDepth );
+#endif
+
+          rpcTempCU->copyPartFrom( pcSubBestPartCU, uiPartUnitIdx, uhNextDepth );         // Keep best part data to current temporary data.
+          xCopyYuv2Tmp( pcSubBestPartCU->getTotalNumPart()*uiPartUnitIdx, uhNextDepth );
+        }
+        else
+        {
+          pcSubBestPartCU->copyToPic( uhNextDepth );
+          rpcTempCU->copyPartFrom( pcSubBestPartCU, uiPartUnitIdx, uhNextDepth );
+        }
+      }
+
+      if( !bBoundary )
+      {
+        m_pcEntropyCoder->resetBits();
+        m_pcEntropyCoder->encodeSplitFlag( rpcTempCU, 0, uiDepth, true );
+
+        rpcTempCU->getTotalBits() += m_pcEntropyCoder->getNumberOfWrittenBits(); // split bits
+        rpcTempCU->getTotalBins() += ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+      }
+      rpcTempCU->getTotalCost()  = m_pcRdCost->calcRdCost( rpcTempCU->getTotalBits(), rpcTempCU->getTotalDistortion() );
+
+      if( (g_uiMaxCUWidth>>uiDepth) == rpcTempCU->getSlice()->getPPS()->getMinCuDQPSize() && rpcTempCU->getSlice()->getPPS()->getUseDQP())
+      {
+        Bool hasResidual = false;
+        for( UInt uiBlkIdx = 0; uiBlkIdx < rpcTempCU->getTotalNumPart(); uiBlkIdx ++)
+        {
+          if( (     rpcTempCU->getCbf(uiBlkIdx, COMPONENT_Y)
+                || (rpcTempCU->getCbf(uiBlkIdx, COMPONENT_Cb) && (numberValidComponents > COMPONENT_Cb))
+                || (rpcTempCU->getCbf(uiBlkIdx, COMPONENT_Cr) && (numberValidComponents > COMPONENT_Cr)) ) )
+          {
+            hasResidual = true;
+            break;
+          }
+        }
+
+        UInt uiTargetPartIdx = 0;
+        if ( hasResidual )
+        {
+#if !RDO_WITHOUT_DQP_BITS
+          m_pcEntropyCoder->resetBits();
+          m_pcEntropyCoder->encodeQP( rpcTempCU, uiTargetPartIdx, false );
+          rpcTempCU->getTotalBits() += m_pcEntropyCoder->getNumberOfWrittenBits(); // dQP bits
+          rpcTempCU->getTotalBins() += ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+          rpcTempCU->getTotalCost()  = m_pcRdCost->calcRdCost( rpcTempCU->getTotalBits(), rpcTempCU->getTotalDistortion() );
+#endif
+
+          Bool foundNonZeroCbf = false;
+          rpcTempCU->setQPSubCUs( rpcTempCU->getRefQP( uiTargetPartIdx ), 0, uiDepth, foundNonZeroCbf );
+          assert( foundNonZeroCbf );
+        }
+        else
+        {
+          rpcTempCU->setQPSubParts( rpcTempCU->getRefQP( uiTargetPartIdx ), 0, uiDepth ); // set QP to default QP
+        }
+      }
+
+      m_pppcRDSbacCoder[uhNextDepth][CI_NEXT_BEST]->store(m_pppcRDSbacCoder[uiDepth][CI_TEMP_BEST]);
+
+      // TODO: this does not account for the slice bytes already written. See other instances of FIXED_NUMBER_OF_BYTES
+      Bool isEndOfSlice        = rpcBestCU->getSlice()->getSliceMode()==FIXED_NUMBER_OF_BYTES
+                                 && (rpcBestCU->getTotalBits()>rpcBestCU->getSlice()->getSliceArgument()<<3);
+      Bool isEndOfSliceSegment = rpcBestCU->getSlice()->getSliceSegmentMode()==FIXED_NUMBER_OF_BYTES
+                                 && (rpcBestCU->getTotalBits()>rpcBestCU->getSlice()->getSliceSegmentArgument()<<3);
+      if(isEndOfSlice||isEndOfSliceSegment)
+      {
+        if (m_pcEncCfg->getCostMode()==COST_MIXED_LOSSLESS_LOSSY_CODING)
+          rpcBestCU->getTotalCost()=rpcTempCU->getTotalCost() + (1.0 / m_pcRdCost->getLambda());
+        else
+          rpcBestCU->getTotalCost()=rpcTempCU->getTotalCost()+1;
+      }
+
+      xCheckBestMode( rpcBestCU, rpcTempCU, uiDepth DEBUG_STRING_PASS_INTO(sDebug) DEBUG_STRING_PASS_INTO(sTempDebug) DEBUG_STRING_PASS_INTO(false) ); // RD compare current larger prediction
+                                                                                       // with sub partitioned prediction.
+    }
+  }
+
+  DEBUG_STRING_APPEND(sDebug_, sDebug);
+
+  rpcBestCU->copyToPic(uiDepth);                                                     // Copy Best data to Picture for next partition prediction.
+
+  xCopyYuv2Pic( rpcBestCU->getPic(), rpcBestCU->getCtuRsAddr(), rpcBestCU->getZorderIdxInCtu(), uiDepth, uiDepth, rpcBestCU, uiLPelX, uiTPelY );   // Copy Yuv data to picture Yuv
+  if (bBoundary)
+  {
+    return;
+  }
+
+  // Assert if Best prediction mode is NONE
+  // Selected mode's RD-cost must be not MAX_DOUBLE.
+  assert( rpcBestCU->getPartitionSize ( 0 ) != NUMBER_OF_PART_SIZES       );
+  assert( rpcBestCU->getPredictionMode( 0 ) != NUMBER_OF_PREDICTION_MODES );
+  assert( rpcBestCU->getTotalCost     (   ) != MAX_DOUBLE                 );
+}
+
+/** finish encoding a cu and handle end-of-slice conditions
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param uiDepth
+ * \returns Void
+ */
+Void TEncCu::finishCU( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  TComPic* pcPic = pcCU->getPic();
+  TComSlice * pcSlice = pcCU->getPic()->getSlice(pcCU->getPic()->getCurrSliceIdx());
+
+  //Calculate end address
+  const Int  currentCTUTsAddr = pcPic->getPicSym()->getCtuRsToTsAddrMap(pcCU->getCtuRsAddr());
+  const Bool isLastSubCUOfCtu = pcCU->isLastSubCUOfCtu(uiAbsPartIdx);
+  if ( isLastSubCUOfCtu )
+  {
+    // The 1-terminating bit is added to all streams, so don't add it here when it's 1.
+    // i.e. when the slice segment CurEnd CTU address is the current CTU address+1.
+    if (pcSlice->getSliceSegmentCurEndCtuTsAddr() != currentCTUTsAddr+1)
+    {
+      m_pcEntropyCoder->encodeTerminatingBit( 0 );
+    }
+  }
+}
+
+/** Compute QP for each CU
+ * \param pcCU Target CU
+ * \param uiDepth CU depth
+ * \returns quantization parameter
+ */
+Int TEncCu::xComputeQP( TComDataCU* pcCU, UInt uiDepth )
+{
+  Int iBaseQp = pcCU->getSlice()->getSliceQp();
+  Int iQpOffset = 0;
+  if ( m_pcEncCfg->getUseAdaptiveQP() )
+  {
+    TEncPic* pcEPic = dynamic_cast<TEncPic*>( pcCU->getPic() );
+    UInt uiAQDepth = min( uiDepth, pcEPic->getMaxAQDepth()-1 );
+    TEncPicQPAdaptationLayer* pcAQLayer = pcEPic->getAQLayer( uiAQDepth );
+    UInt uiAQUPosX = pcCU->getCUPelX() / pcAQLayer->getAQPartWidth();
+    UInt uiAQUPosY = pcCU->getCUPelY() / pcAQLayer->getAQPartHeight();
+    UInt uiAQUStride = pcAQLayer->getAQPartStride();
+    TEncQPAdaptationUnit* acAQU = pcAQLayer->getQPAdaptationUnit();
+
+    Double dMaxQScale = pow(2.0, m_pcEncCfg->getQPAdaptationRange()/6.0);
+    Double dAvgAct = pcAQLayer->getAvgActivity();
+    Double dCUAct = acAQU[uiAQUPosY * uiAQUStride + uiAQUPosX].getActivity();
+    Double dNormAct = (dMaxQScale*dCUAct + dAvgAct) / (dCUAct + dMaxQScale*dAvgAct);
+    Double dQpOffset = log(dNormAct) / log(2.0) * 6.0;
+    iQpOffset = Int(floor( dQpOffset + 0.49999 ));
+  }
+
+  return Clip3(-pcCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, iBaseQp+iQpOffset );
+}
+
+/** encode a CU block recursively
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param uiDepth
+ * \returns Void
+ */
+Void TEncCu::xEncodeCU( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  TComPic* pcPic = pcCU->getPic();
+
+  Bool bBoundary = false;
+  UInt uiLPelX   = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsPartIdx] ];
+  UInt uiRPelX   = uiLPelX + (g_uiMaxCUWidth>>uiDepth)  - 1;
+  UInt uiTPelY   = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsPartIdx] ];
+  UInt uiBPelY   = uiTPelY + (g_uiMaxCUHeight>>uiDepth) - 1;
+
+  TComSlice * pcSlice = pcCU->getPic()->getSlice(pcCU->getPic()->getCurrSliceIdx());
+  if( ( uiRPelX < pcSlice->getSPS()->getPicWidthInLumaSamples() ) && ( uiBPelY < pcSlice->getSPS()->getPicHeightInLumaSamples() ) )
+  {
+    m_pcEntropyCoder->encodeSplitFlag( pcCU, uiAbsPartIdx, uiDepth );
+  }
+  else
+  {
+    bBoundary = true;
+  }
+
+  if( ( ( uiDepth < pcCU->getDepth( uiAbsPartIdx ) ) && ( uiDepth < (g_uiMaxCUDepth-g_uiAddCUDepth) ) ) || bBoundary )
+  {
+    UInt uiQNumParts = ( pcPic->getNumPartitionsInCtu() >> (uiDepth<<1) )>>2;
+    if( (g_uiMaxCUWidth>>uiDepth) == pcCU->getSlice()->getPPS()->getMinCuDQPSize() && pcCU->getSlice()->getPPS()->getUseDQP())
+    {
+      setdQPFlag(true);
+    }
+
+    if( (g_uiMaxCUWidth>>uiDepth) == pcCU->getSlice()->getPPS()->getMinCuChromaQpAdjSize() && pcCU->getSlice()->getUseChromaQpAdj())
+    {
+      setCodeChromaQpAdjFlag(true);
+    }
+
+    for ( UInt uiPartUnitIdx = 0; uiPartUnitIdx < 4; uiPartUnitIdx++, uiAbsPartIdx+=uiQNumParts )
+    {
+      uiLPelX   = pcCU->getCUPelX() + g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsPartIdx] ];
+      uiTPelY   = pcCU->getCUPelY() + g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsPartIdx] ];
+      if( ( uiLPelX < pcSlice->getSPS()->getPicWidthInLumaSamples() ) && ( uiTPelY < pcSlice->getSPS()->getPicHeightInLumaSamples() ) )
+      {
+        xEncodeCU( pcCU, uiAbsPartIdx, uiDepth+1 );
+      }
+    }
+    return;
+  }
+
+  if( (g_uiMaxCUWidth>>uiDepth) >= pcCU->getSlice()->getPPS()->getMinCuDQPSize() && pcCU->getSlice()->getPPS()->getUseDQP())
+  {
+    setdQPFlag(true);
+  }
+
+  if( (g_uiMaxCUWidth>>uiDepth) >= pcCU->getSlice()->getPPS()->getMinCuChromaQpAdjSize() && pcCU->getSlice()->getUseChromaQpAdj())
+  {
+    setCodeChromaQpAdjFlag(true);
+  }
+
+  if (pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+  {
+    m_pcEntropyCoder->encodeCUTransquantBypassFlag( pcCU, uiAbsPartIdx );
+  }
+
+  if( !pcCU->getSlice()->isIntra() )
+  {
+    m_pcEntropyCoder->encodeSkipFlag( pcCU, uiAbsPartIdx );
+  }
+
+  if( pcCU->isSkipped( uiAbsPartIdx ) )
+  {
+    m_pcEntropyCoder->encodeMergeIndex( pcCU, uiAbsPartIdx );
+    finishCU(pcCU,uiAbsPartIdx,uiDepth);
+    return;
+  }
+
+  m_pcEntropyCoder->encodePredMode( pcCU, uiAbsPartIdx );
+  m_pcEntropyCoder->encodePartSize( pcCU, uiAbsPartIdx, uiDepth );
+
+  if (pcCU->isIntra( uiAbsPartIdx ) && pcCU->getPartitionSize( uiAbsPartIdx ) == SIZE_2Nx2N )
+  {
+    m_pcEntropyCoder->encodeIPCMInfo( pcCU, uiAbsPartIdx );
+
+    if(pcCU->getIPCMFlag(uiAbsPartIdx))
+    {
+      // Encode slice finish
+      finishCU(pcCU,uiAbsPartIdx,uiDepth);
+      return;
+    }
+  }
+
+  // prediction Info ( Intra : direction mode, Inter : Mv, reference idx )
+  m_pcEntropyCoder->encodePredInfo( pcCU, uiAbsPartIdx );
+
+  // Encode Coefficients
+  Bool bCodeDQP = getdQPFlag();
+  Bool codeChromaQpAdj = getCodeChromaQpAdjFlag();
+  m_pcEntropyCoder->encodeCoeff( pcCU, uiAbsPartIdx, uiDepth, bCodeDQP, codeChromaQpAdj );
+  setCodeChromaQpAdjFlag( codeChromaQpAdj );
+  setdQPFlag( bCodeDQP );
+
+  // --- write terminating bit ---
+  finishCU(pcCU,uiAbsPartIdx,uiDepth);
+}
+
+Int xCalcHADs8x8_ISlice(Pel *piOrg, Int iStrideOrg)
+{
+  Int k, i, j, jj;
+  Int diff[64], m1[8][8], m2[8][8], m3[8][8], iSumHad = 0;
+
+  for( k = 0; k < 64; k += 8 )
+  {
+    diff[k+0] = piOrg[0] ;
+    diff[k+1] = piOrg[1] ;
+    diff[k+2] = piOrg[2] ;
+    diff[k+3] = piOrg[3] ;
+    diff[k+4] = piOrg[4] ;
+    diff[k+5] = piOrg[5] ;
+    diff[k+6] = piOrg[6] ;
+    diff[k+7] = piOrg[7] ;
+
+    piOrg += iStrideOrg;
+  }
+
+  //horizontal
+  for (j=0; j < 8; j++)
+  {
+    jj = j << 3;
+    m2[j][0] = diff[jj  ] + diff[jj+4];
+    m2[j][1] = diff[jj+1] + diff[jj+5];
+    m2[j][2] = diff[jj+2] + diff[jj+6];
+    m2[j][3] = diff[jj+3] + diff[jj+7];
+    m2[j][4] = diff[jj  ] - diff[jj+4];
+    m2[j][5] = diff[jj+1] - diff[jj+5];
+    m2[j][6] = diff[jj+2] - diff[jj+6];
+    m2[j][7] = diff[jj+3] - diff[jj+7];
+
+    m1[j][0] = m2[j][0] + m2[j][2];
+    m1[j][1] = m2[j][1] + m2[j][3];
+    m1[j][2] = m2[j][0] - m2[j][2];
+    m1[j][3] = m2[j][1] - m2[j][3];
+    m1[j][4] = m2[j][4] + m2[j][6];
+    m1[j][5] = m2[j][5] + m2[j][7];
+    m1[j][6] = m2[j][4] - m2[j][6];
+    m1[j][7] = m2[j][5] - m2[j][7];
+
+    m2[j][0] = m1[j][0] + m1[j][1];
+    m2[j][1] = m1[j][0] - m1[j][1];
+    m2[j][2] = m1[j][2] + m1[j][3];
+    m2[j][3] = m1[j][2] - m1[j][3];
+    m2[j][4] = m1[j][4] + m1[j][5];
+    m2[j][5] = m1[j][4] - m1[j][5];
+    m2[j][6] = m1[j][6] + m1[j][7];
+    m2[j][7] = m1[j][6] - m1[j][7];
+  }
+
+  //vertical
+  for (i=0; i < 8; i++)
+  {
+    m3[0][i] = m2[0][i] + m2[4][i];
+    m3[1][i] = m2[1][i] + m2[5][i];
+    m3[2][i] = m2[2][i] + m2[6][i];
+    m3[3][i] = m2[3][i] + m2[7][i];
+    m3[4][i] = m2[0][i] - m2[4][i];
+    m3[5][i] = m2[1][i] - m2[5][i];
+    m3[6][i] = m2[2][i] - m2[6][i];
+    m3[7][i] = m2[3][i] - m2[7][i];
+
+    m1[0][i] = m3[0][i] + m3[2][i];
+    m1[1][i] = m3[1][i] + m3[3][i];
+    m1[2][i] = m3[0][i] - m3[2][i];
+    m1[3][i] = m3[1][i] - m3[3][i];
+    m1[4][i] = m3[4][i] + m3[6][i];
+    m1[5][i] = m3[5][i] + m3[7][i];
+    m1[6][i] = m3[4][i] - m3[6][i];
+    m1[7][i] = m3[5][i] - m3[7][i];
+
+    m2[0][i] = m1[0][i] + m1[1][i];
+    m2[1][i] = m1[0][i] - m1[1][i];
+    m2[2][i] = m1[2][i] + m1[3][i];
+    m2[3][i] = m1[2][i] - m1[3][i];
+    m2[4][i] = m1[4][i] + m1[5][i];
+    m2[5][i] = m1[4][i] - m1[5][i];
+    m2[6][i] = m1[6][i] + m1[7][i];
+    m2[7][i] = m1[6][i] - m1[7][i];
+  }
+
+  for (i = 0; i < 8; i++)
+  {
+    for (j = 0; j < 8; j++)
+    {
+      iSumHad += abs(m2[i][j]);
+    }
+  }
+  iSumHad -= abs(m2[0][0]);
+  iSumHad =(iSumHad+2)>>2;
+  return(iSumHad);
+}
+
+Int  TEncCu::updateCtuDataISlice(TComDataCU* pCtu, Int width, Int height)
+{
+  Int  xBl, yBl;
+  const Int iBlkSize = 8;
+
+  Pel* pOrgInit   = pCtu->getPic()->getPicYuvOrg()->getAddr(COMPONENT_Y, pCtu->getCtuRsAddr(), 0);
+  Int  iStrideOrig = pCtu->getPic()->getPicYuvOrg()->getStride(COMPONENT_Y);
+  Pel  *pOrg;
+
+  Int iSumHad = 0;
+  for ( yBl=0; (yBl+iBlkSize)<=height; yBl+= iBlkSize)
+  {
+    for ( xBl=0; (xBl+iBlkSize)<=width; xBl+= iBlkSize)
+    {
+      pOrg = pOrgInit + iStrideOrig*yBl + xBl;
+      iSumHad += xCalcHADs8x8_ISlice(pOrg, iStrideOrig);
+    }
+  }
+  return(iSumHad);
+}
+
+/** check RD costs for a CU block encoded with merge
+ * \param rpcBestCU
+ * \param rpcTempCU
+ * \returns Void
+ */
+Void TEncCu::xCheckRDCostMerge2Nx2N( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU DEBUG_STRING_FN_DECLARE(sDebug), Bool *earlyDetectionSkipMode )
+{
+  assert( rpcTempCU->getSlice()->getSliceType() != I_SLICE );
+  TComMvField  cMvFieldNeighbours[2 * MRG_MAX_NUM_CANDS]; // double length for mv of both lists
+  UChar uhInterDirNeighbours[MRG_MAX_NUM_CANDS];
+  Int numValidMergeCand = 0;
+  const Bool bTransquantBypassFlag = rpcTempCU->getCUTransquantBypass(0);
+
+  for( UInt ui = 0; ui < rpcTempCU->getSlice()->getMaxNumMergeCand(); ++ui )
+  {
+    uhInterDirNeighbours[ui] = 0;
+  }
+  UChar uhDepth = rpcTempCU->getDepth( 0 );
+  rpcTempCU->setPartSizeSubParts( SIZE_2Nx2N, 0, uhDepth ); // interprets depth relative to CTU level
+  rpcTempCU->getInterMergeCandidates( 0, 0, cMvFieldNeighbours,uhInterDirNeighbours, numValidMergeCand );
+
+  Int mergeCandBuffer[MRG_MAX_NUM_CANDS];
+  for( UInt ui = 0; ui < numValidMergeCand; ++ui )
+  {
+    mergeCandBuffer[ui] = 0;
+  }
+
+  Bool bestIsSkip = false;
+
+  UInt iteration;
+  if ( rpcTempCU->isLosslessCoded(0))
+  {
+    iteration = 1;
+  }
+  else
+  {
+    iteration = 2;
+  }
+  DEBUG_STRING_NEW(bestStr)
+
+  for( UInt uiNoResidual = 0; uiNoResidual < iteration; ++uiNoResidual )
+  {
+    for( UInt uiMergeCand = 0; uiMergeCand < numValidMergeCand; ++uiMergeCand )
+    {
+      if(!(uiNoResidual==1 && mergeCandBuffer[uiMergeCand]==1))
+      {
+        if( !(bestIsSkip && uiNoResidual == 0) )
+        {
+          DEBUG_STRING_NEW(tmpStr)
+          // set MC parameters
+          rpcTempCU->setPredModeSubParts( MODE_INTER, 0, uhDepth ); // interprets depth relative to CTU level
+          rpcTempCU->setCUTransquantBypassSubParts( bTransquantBypassFlag, 0, uhDepth );
+          rpcTempCU->setChromaQpAdjSubParts( bTransquantBypassFlag ? 0 : m_ChromaQpAdjIdc, 0, uhDepth );
+          rpcTempCU->setPartSizeSubParts( SIZE_2Nx2N, 0, uhDepth ); // interprets depth relative to CTU level
+          rpcTempCU->setMergeFlagSubParts( true, 0, 0, uhDepth ); // interprets depth relative to CTU level
+          rpcTempCU->setMergeIndexSubParts( uiMergeCand, 0, 0, uhDepth ); // interprets depth relative to CTU level
+          rpcTempCU->setInterDirSubParts( uhInterDirNeighbours[uiMergeCand], 0, 0, uhDepth ); // interprets depth relative to CTU level
+          rpcTempCU->getCUMvField( REF_PIC_LIST_0 )->setAllMvField( cMvFieldNeighbours[0 + 2*uiMergeCand], SIZE_2Nx2N, 0, 0 ); // interprets depth relative to rpcTempCU level
+          rpcTempCU->getCUMvField( REF_PIC_LIST_1 )->setAllMvField( cMvFieldNeighbours[1 + 2*uiMergeCand], SIZE_2Nx2N, 0, 0 ); // interprets depth relative to rpcTempCU level
+
+          // do MC
+          m_pcPredSearch->motionCompensation ( rpcTempCU, m_ppcPredYuvTemp[uhDepth] );
+          // estimate residual and encode everything
+          m_pcPredSearch->encodeResAndCalcRdInterCU( rpcTempCU,
+                                                     m_ppcOrigYuv    [uhDepth],
+                                                     m_ppcPredYuvTemp[uhDepth],
+                                                     m_ppcResiYuvTemp[uhDepth],
+                                                     m_ppcResiYuvBest[uhDepth],
+                                                     m_ppcRecoYuvTemp[uhDepth],
+                                                     (uiNoResidual != 0) DEBUG_STRING_PASS_INTO(tmpStr) );
+
+#ifdef DEBUG_STRING
+          DebugInterPredResiReco(tmpStr, *(m_ppcPredYuvTemp[uhDepth]), *(m_ppcResiYuvBest[uhDepth]), *(m_ppcRecoYuvTemp[uhDepth]), DebugStringGetPredModeMask(rpcTempCU->getPredictionMode(0)));
+#endif
+
+          if ((uiNoResidual == 0) && (rpcTempCU->getQtRootCbf(0) == 0))
+          {
+            // If no residual when allowing for one, then set mark to not try case where residual is forced to 0
+            mergeCandBuffer[uiMergeCand] = 1;
+          }
+
+          rpcTempCU->setSkipFlagSubParts( rpcTempCU->getQtRootCbf(0) == 0, 0, uhDepth );
+          Int orgQP = rpcTempCU->getQP( 0 );
+          xCheckDQP( rpcTempCU );
+          xCheckBestMode(rpcBestCU, rpcTempCU, uhDepth DEBUG_STRING_PASS_INTO(bestStr) DEBUG_STRING_PASS_INTO(tmpStr));
+
+          rpcTempCU->initEstData( uhDepth, orgQP, bTransquantBypassFlag );
+
+          if( m_pcEncCfg->getUseFastDecisionForMerge() && !bestIsSkip )
+          {
+            bestIsSkip = rpcBestCU->getQtRootCbf(0) == 0;
+          }
+        }
+      }
+    }
+
+    if(uiNoResidual == 0 && m_pcEncCfg->getUseEarlySkipDetection())
+    {
+      if(rpcBestCU->getQtRootCbf( 0 ) == 0)
+      {
+        if( rpcBestCU->getMergeFlag( 0 ))
+        {
+          *earlyDetectionSkipMode = true;
+        }
+        else if(m_pcEncCfg->getFastSearch() != SELECTIVE)
+        {
+          Int absoulte_MV=0;
+          for ( UInt uiRefListIdx = 0; uiRefListIdx < 2; uiRefListIdx++ )
+          {
+            if ( rpcBestCU->getSlice()->getNumRefIdx( RefPicList( uiRefListIdx ) ) > 0 )
+            {
+              TComCUMvField* pcCUMvField = rpcBestCU->getCUMvField(RefPicList( uiRefListIdx ));
+              Int iHor = pcCUMvField->getMvd( 0 ).getAbsHor();
+              Int iVer = pcCUMvField->getMvd( 0 ).getAbsVer();
+              absoulte_MV+=iHor+iVer;
+            }
+          }
+
+          if(absoulte_MV == 0)
+          {
+            *earlyDetectionSkipMode = true;
+          }
+        }
+      }
+    }
+  }
+  DEBUG_STRING_APPEND(sDebug, bestStr)
+}
+
+
+#if AMP_MRG
+Void TEncCu::xCheckRDCostInter( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, PartSize ePartSize DEBUG_STRING_FN_DECLARE(sDebug), Bool bUseMRG)
+#else
+Void TEncCu::xCheckRDCostInter( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, PartSize ePartSize )
+#endif
+{
+  DEBUG_STRING_NEW(sTest)
+
+  UChar uhDepth = rpcTempCU->getDepth( 0 );
+
+  rpcTempCU->setDepthSubParts( uhDepth, 0 );
+
+  rpcTempCU->setSkipFlagSubParts( false, 0, uhDepth );
+
+  rpcTempCU->setPartSizeSubParts  ( ePartSize,  0, uhDepth );
+  rpcTempCU->setPredModeSubParts  ( MODE_INTER, 0, uhDepth );
+  rpcTempCU->setChromaQpAdjSubParts( rpcTempCU->getCUTransquantBypass(0) ? 0 : m_ChromaQpAdjIdc, 0, uhDepth );
+
+#if AMP_MRG
+  rpcTempCU->setMergeAMP (true);
+  m_pcPredSearch->predInterSearch ( rpcTempCU, m_ppcOrigYuv[uhDepth], m_ppcPredYuvTemp[uhDepth], m_ppcResiYuvTemp[uhDepth], m_ppcRecoYuvTemp[uhDepth] DEBUG_STRING_PASS_INTO(sTest), false, bUseMRG );
+#else
+  m_pcPredSearch->predInterSearch ( rpcTempCU, m_ppcOrigYuv[uhDepth], m_ppcPredYuvTemp[uhDepth], m_ppcResiYuvTemp[uhDepth], m_ppcRecoYuvTemp[uhDepth] );
+#endif
+
+#if AMP_MRG
+  if ( !rpcTempCU->getMergeAMP() )
+  {
+    return;
+  }
+#endif
+
+  m_pcPredSearch->encodeResAndCalcRdInterCU( rpcTempCU, m_ppcOrigYuv[uhDepth], m_ppcPredYuvTemp[uhDepth], m_ppcResiYuvTemp[uhDepth], m_ppcResiYuvBest[uhDepth], m_ppcRecoYuvTemp[uhDepth], false DEBUG_STRING_PASS_INTO(sTest) );
+  rpcTempCU->getTotalCost()  = m_pcRdCost->calcRdCost( rpcTempCU->getTotalBits(), rpcTempCU->getTotalDistortion() );
+
+#ifdef DEBUG_STRING
+  DebugInterPredResiReco(sTest, *(m_ppcPredYuvTemp[uhDepth]), *(m_ppcResiYuvBest[uhDepth]), *(m_ppcRecoYuvTemp[uhDepth]), DebugStringGetPredModeMask(rpcTempCU->getPredictionMode(0)));
+#endif
+
+  xCheckDQP( rpcTempCU );
+  xCheckBestMode(rpcBestCU, rpcTempCU, uhDepth DEBUG_STRING_PASS_INTO(sDebug) DEBUG_STRING_PASS_INTO(sTest));
+}
+
+Void TEncCu::xCheckRDCostIntra( TComDataCU *&rpcBestCU,
+                                TComDataCU *&rpcTempCU,
+                                Double      &cost,
+                                PartSize     eSize
+                                DEBUG_STRING_FN_DECLARE(sDebug) )
+{
+  DEBUG_STRING_NEW(sTest)
+
+  UInt uiDepth = rpcTempCU->getDepth( 0 );
+
+  rpcTempCU->setSkipFlagSubParts( false, 0, uiDepth );
+
+  rpcTempCU->setPartSizeSubParts( eSize, 0, uiDepth );
+  rpcTempCU->setPredModeSubParts( MODE_INTRA, 0, uiDepth );
+  rpcTempCU->setChromaQpAdjSubParts( rpcTempCU->getCUTransquantBypass(0) ? 0 : m_ChromaQpAdjIdc, 0, uiDepth );
+
+  Bool bSeparateLumaChroma = true; // choose estimation mode
+
+  Distortion uiPreCalcDistC = 0;
+  if (rpcBestCU->getPic()->getChromaFormat()==CHROMA_400)
+  {
+    bSeparateLumaChroma=true;
+  }
+
+  Pel resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE];
+
+  if( !bSeparateLumaChroma )
+  {
+    // after this function, the direction will be PLANAR, DC, HOR or VER
+    // however, if Luma ends up being one of those, the chroma dir must be later changed to DM_CHROMA.
+    m_pcPredSearch->preestChromaPredMode( rpcTempCU, m_ppcOrigYuv[uiDepth], m_ppcPredYuvTemp[uiDepth] );
+  }
+  m_pcPredSearch->estIntraPredQT( rpcTempCU, m_ppcOrigYuv[uiDepth], m_ppcPredYuvTemp[uiDepth], m_ppcResiYuvTemp[uiDepth], m_ppcRecoYuvTemp[uiDepth], resiLuma, uiPreCalcDistC, bSeparateLumaChroma DEBUG_STRING_PASS_INTO(sTest) );
+
+  m_ppcRecoYuvTemp[uiDepth]->copyToPicComponent(COMPONENT_Y, rpcTempCU->getPic()->getPicYuvRec(), rpcTempCU->getCtuRsAddr(), rpcTempCU->getZorderIdxInCtu() );
+
+  if (rpcBestCU->getPic()->getChromaFormat()!=CHROMA_400)
+  {
+    m_pcPredSearch->estIntraPredChromaQT( rpcTempCU, m_ppcOrigYuv[uiDepth], m_ppcPredYuvTemp[uiDepth], m_ppcResiYuvTemp[uiDepth], m_ppcRecoYuvTemp[uiDepth], resiLuma, uiPreCalcDistC DEBUG_STRING_PASS_INTO(sTest) );
+  }
+
+  m_pcEntropyCoder->resetBits();
+
+  if ( rpcTempCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+  {
+    m_pcEntropyCoder->encodeCUTransquantBypassFlag( rpcTempCU, 0,          true );
+  }
+
+  m_pcEntropyCoder->encodeSkipFlag ( rpcTempCU, 0,          true );
+  m_pcEntropyCoder->encodePredMode( rpcTempCU, 0,          true );
+  m_pcEntropyCoder->encodePartSize( rpcTempCU, 0, uiDepth, true );
+  m_pcEntropyCoder->encodePredInfo( rpcTempCU, 0 );
+  m_pcEntropyCoder->encodeIPCMInfo(rpcTempCU, 0, true );
+
+  // Encode Coefficients
+  Bool bCodeDQP = getdQPFlag();
+  Bool codeChromaQpAdjFlag = getCodeChromaQpAdjFlag();
+  m_pcEntropyCoder->encodeCoeff( rpcTempCU, 0, uiDepth, bCodeDQP, codeChromaQpAdjFlag );
+  setCodeChromaQpAdjFlag( codeChromaQpAdjFlag );
+  setdQPFlag( bCodeDQP );
+
+  m_pcRDGoOnSbacCoder->store(m_pppcRDSbacCoder[uiDepth][CI_TEMP_BEST]);
+
+  rpcTempCU->getTotalBits() = m_pcEntropyCoder->getNumberOfWrittenBits();
+  rpcTempCU->getTotalBins() = ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+  rpcTempCU->getTotalCost() = m_pcRdCost->calcRdCost( rpcTempCU->getTotalBits(), rpcTempCU->getTotalDistortion() );
+
+  xCheckDQP( rpcTempCU );
+
+  cost = rpcTempCU->getTotalCost();
+
+  xCheckBestMode(rpcBestCU, rpcTempCU, uiDepth DEBUG_STRING_PASS_INTO(sDebug) DEBUG_STRING_PASS_INTO(sTest));
+}
+
+
+/** Check R-D costs for a CU with PCM mode.
+ * \param rpcBestCU pointer to best mode CU data structure
+ * \param rpcTempCU pointer to testing mode CU data structure
+ * \returns Void
+ *
+ * \note Current PCM implementation encodes sample values in a lossless way. The distortion of PCM mode CUs are zero. PCM mode is selected if the best mode yields bits greater than that of PCM mode.
+ */
+Void TEncCu::xCheckIntraPCM( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU )
+{
+  UInt uiDepth = rpcTempCU->getDepth( 0 );
+
+  rpcTempCU->setSkipFlagSubParts( false, 0, uiDepth );
+
+  rpcTempCU->setIPCMFlag(0, true);
+  rpcTempCU->setIPCMFlagSubParts (true, 0, rpcTempCU->getDepth(0));
+  rpcTempCU->setPartSizeSubParts( SIZE_2Nx2N, 0, uiDepth );
+  rpcTempCU->setPredModeSubParts( MODE_INTRA, 0, uiDepth );
+  rpcTempCU->setTrIdxSubParts ( 0, 0, uiDepth );
+  rpcTempCU->setChromaQpAdjSubParts( rpcTempCU->getCUTransquantBypass(0) ? 0 : m_ChromaQpAdjIdc, 0, uiDepth );
+
+  m_pcPredSearch->IPCMSearch( rpcTempCU, m_ppcOrigYuv[uiDepth], m_ppcPredYuvTemp[uiDepth], m_ppcResiYuvTemp[uiDepth], m_ppcRecoYuvTemp[uiDepth]);
+
+  m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST]);
+
+  m_pcEntropyCoder->resetBits();
+
+  if ( rpcTempCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+  {
+    m_pcEntropyCoder->encodeCUTransquantBypassFlag( rpcTempCU, 0,          true );
+  }
+
+  m_pcEntropyCoder->encodeSkipFlag ( rpcTempCU, 0,          true );
+  m_pcEntropyCoder->encodePredMode ( rpcTempCU, 0,          true );
+  m_pcEntropyCoder->encodePartSize ( rpcTempCU, 0, uiDepth, true );
+  m_pcEntropyCoder->encodeIPCMInfo ( rpcTempCU, 0, true );
+
+  m_pcRDGoOnSbacCoder->store(m_pppcRDSbacCoder[uiDepth][CI_TEMP_BEST]);
+
+  rpcTempCU->getTotalBits() = m_pcEntropyCoder->getNumberOfWrittenBits();
+  rpcTempCU->getTotalBins() = ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+  rpcTempCU->getTotalCost() = m_pcRdCost->calcRdCost( rpcTempCU->getTotalBits(), rpcTempCU->getTotalDistortion() );
+
+  xCheckDQP( rpcTempCU );
+  DEBUG_STRING_NEW(a)
+  DEBUG_STRING_NEW(b)
+  xCheckBestMode(rpcBestCU, rpcTempCU, uiDepth DEBUG_STRING_PASS_INTO(a) DEBUG_STRING_PASS_INTO(b));
+}
+
+/** check whether current try is the best with identifying the depth of current try
+ * \param rpcBestCU
+ * \param rpcTempCU
+ * \returns Void
+ */
+Void TEncCu::xCheckBestMode( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth DEBUG_STRING_FN_DECLARE(sParent) DEBUG_STRING_FN_DECLARE(sTest) DEBUG_STRING_PASS_INTO(Bool bAddSizeInfo) )
+{
+  if( rpcTempCU->getTotalCost() < rpcBestCU->getTotalCost() )
+  {
+    TComYuv* pcYuv;
+    // Change Information data
+    TComDataCU* pcCU = rpcBestCU;
+    rpcBestCU = rpcTempCU;
+    rpcTempCU = pcCU;
+
+    // Change Prediction data
+    pcYuv = m_ppcPredYuvBest[uiDepth];
+    m_ppcPredYuvBest[uiDepth] = m_ppcPredYuvTemp[uiDepth];
+    m_ppcPredYuvTemp[uiDepth] = pcYuv;
+
+    // Change Reconstruction data
+    pcYuv = m_ppcRecoYuvBest[uiDepth];
+    m_ppcRecoYuvBest[uiDepth] = m_ppcRecoYuvTemp[uiDepth];
+    m_ppcRecoYuvTemp[uiDepth] = pcYuv;
+
+    pcYuv = NULL;
+    pcCU  = NULL;
+
+    // store temp best CI for next CU coding
+    m_pppcRDSbacCoder[uiDepth][CI_TEMP_BEST]->store(m_pppcRDSbacCoder[uiDepth][CI_NEXT_BEST]);
+
+
+#ifdef DEBUG_STRING
+    DEBUG_STRING_SWAP(sParent, sTest)
+    const PredMode predMode=rpcBestCU->getPredictionMode(0);
+    if ((DebugOptionList::DebugString_Structure.getInt()&DebugStringGetPredModeMask(predMode)) && bAddSizeInfo)
+    {
+      std::stringstream ss(stringstream::out);
+      ss <<"###: " << (predMode==MODE_INTRA?"Intra   ":"Inter   ") << partSizeToString[rpcBestCU->getPartitionSize(0)] << " CU at " << rpcBestCU->getCUPelX() << ", " << rpcBestCU->getCUPelY() << " width=" << UInt(rpcBestCU->getWidth(0)) << std::endl;
+      sParent+=ss.str();
+    }
+#endif
+  }
+}
+
+Void TEncCu::xCheckDQP( TComDataCU* pcCU )
+{
+  UInt uiDepth = pcCU->getDepth( 0 );
+
+  if( pcCU->getSlice()->getPPS()->getUseDQP() && (g_uiMaxCUWidth>>uiDepth) >= pcCU->getSlice()->getPPS()->getMinCuDQPSize() )
+  {
+    if ( pcCU->getQtRootCbf( 0) )
+    {
+#if !RDO_WITHOUT_DQP_BITS
+      m_pcEntropyCoder->resetBits();
+      m_pcEntropyCoder->encodeQP( pcCU, 0, false );
+      pcCU->getTotalBits() += m_pcEntropyCoder->getNumberOfWrittenBits(); // dQP bits
+      pcCU->getTotalBins() += ((TEncBinCABAC *)((TEncSbac*)m_pcEntropyCoder->m_pcEntropyCoderIf)->getEncBinIf())->getBinsCoded();
+      pcCU->getTotalCost() = m_pcRdCost->calcRdCost( pcCU->getTotalBits(), pcCU->getTotalDistortion() );
+#endif
+    }
+    else
+    {
+      pcCU->setQPSubParts( pcCU->getRefQP( 0 ), 0, uiDepth ); // set QP to default QP
+    }
+  }
+}
+
+Void TEncCu::xCopyAMVPInfo (AMVPInfo* pSrc, AMVPInfo* pDst)
+{
+  pDst->iN = pSrc->iN;
+  for (Int i = 0; i < pSrc->iN; i++)
+  {
+    pDst->m_acMvCand[i] = pSrc->m_acMvCand[i];
+  }
+}
+Void TEncCu::xCopyYuv2Pic(TComPic* rpcPic, UInt uiCUAddr, UInt uiAbsPartIdx, UInt uiDepth, UInt uiSrcDepth, TComDataCU* pcCU, UInt uiLPelX, UInt uiTPelY )
+{
+  UInt uiAbsPartIdxInRaster = g_auiZscanToRaster[uiAbsPartIdx];
+  UInt uiSrcBlkWidth = rpcPic->getNumPartInCtuWidth() >> (uiSrcDepth);
+  UInt uiBlkWidth    = rpcPic->getNumPartInCtuWidth() >> (uiDepth);
+  UInt uiPartIdxX = ( ( uiAbsPartIdxInRaster % rpcPic->getNumPartInCtuWidth() ) % uiSrcBlkWidth) / uiBlkWidth;
+  UInt uiPartIdxY = ( ( uiAbsPartIdxInRaster / rpcPic->getNumPartInCtuWidth() ) % uiSrcBlkWidth) / uiBlkWidth;
+  UInt uiPartIdx = uiPartIdxY * ( uiSrcBlkWidth / uiBlkWidth ) + uiPartIdxX;
+  m_ppcRecoYuvBest[uiSrcDepth]->copyToPicYuv( rpcPic->getPicYuvRec (), uiCUAddr, uiAbsPartIdx, uiDepth - uiSrcDepth, uiPartIdx);
+
+  m_ppcPredYuvBest[uiSrcDepth]->copyToPicYuv( rpcPic->getPicYuvPred (), uiCUAddr, uiAbsPartIdx, uiDepth - uiSrcDepth, uiPartIdx);
+}
+
+Void TEncCu::xCopyYuv2Tmp( UInt uiPartUnitIdx, UInt uiNextDepth )
+{
+  UInt uiCurrDepth = uiNextDepth - 1;
+  m_ppcRecoYuvBest[uiNextDepth]->copyToPartYuv( m_ppcRecoYuvTemp[uiCurrDepth], uiPartUnitIdx );
+  m_ppcPredYuvBest[uiNextDepth]->copyToPartYuv( m_ppcPredYuvBest[uiCurrDepth], uiPartUnitIdx);
+}
+
+/** Function for filling the PCM buffer of a CU using its original sample array
+ * \param pcCU pointer to current CU
+ * \param pcOrgYuv pointer to original sample array
+ * \returns Void
+ */
+Void TEncCu::xFillPCMBuffer     ( TComDataCU* pCU, TComYuv* pOrgYuv )
+{
+  const ChromaFormat format = pCU->getPic()->getChromaFormat();
+  const UInt numberValidComponents = getNumberValidComponents(format);
+  for (UInt componentIndex = 0; componentIndex < numberValidComponents; componentIndex++)
+  {
+    const ComponentID component = ComponentID(componentIndex);
+
+    const UInt width  = pCU->getWidth(0)  >> getComponentScaleX(component, format);
+    const UInt height = pCU->getHeight(0) >> getComponentScaleY(component, format);
+
+    Pel *source      = pOrgYuv->getAddr(component, 0, width);
+    Pel *destination = pCU->getPCMSample(component);
+
+    const UInt sourceStride = pOrgYuv->getStride(component);
+
+    for (Int line = 0; line < height; line++)
+    {
+      for (Int column = 0; column < width; column++)
+      {
+        destination[column] = source[column];
+      }
+
+      source      += sourceStride;
+      destination += width;
+    }
+  }
+}
+
+#if ADAPTIVE_QP_SELECTION
+/** Collect ARL statistics from one block
+  */
+Int TEncCu::xTuCollectARLStats(TCoeff* rpcCoeff, TCoeff* rpcArlCoeff, Int NumCoeffInCU, Double* cSum, UInt* numSamples )
+{
+  for( Int n = 0; n < NumCoeffInCU; n++ )
+  {
+    TCoeff u = abs( rpcCoeff[ n ] );
+    TCoeff absc = rpcArlCoeff[ n ];
+
+    if( u != 0 )
+    {
+      if( u < LEVEL_RANGE )
+      {
+        cSum[ u ] += ( Double )absc;
+        numSamples[ u ]++;
+      }
+      else
+      {
+        cSum[ LEVEL_RANGE ] += ( Double )absc - ( Double )( u << ARL_C_PRECISION );
+        numSamples[ LEVEL_RANGE ]++;
+      }
+    }
+  }
+
+  return 0;
+}
+
+/** Collect ARL statistics from one CTU
+ * \param pcCU
+ */
+Void TEncCu::xCtuCollectARLStats(TComDataCU* pCtu )
+{
+  Double cSum[ LEVEL_RANGE + 1 ];     //: the sum of DCT coefficients corresponding to datatype and quantization output
+  UInt numSamples[ LEVEL_RANGE + 1 ]; //: the number of coefficients corresponding to datatype and quantization output
+
+  TCoeff* pCoeffY = pCtu->getCoeff(COMPONENT_Y);
+  TCoeff* pArlCoeffY = pCtu->getArlCoeff(COMPONENT_Y);
+
+  UInt uiMinCUWidth = g_uiMaxCUWidth >> g_uiMaxCUDepth;
+  UInt uiMinNumCoeffInCU = 1 << uiMinCUWidth;
+
+  memset( cSum, 0, sizeof( Double )*(LEVEL_RANGE+1) );
+  memset( numSamples, 0, sizeof( UInt )*(LEVEL_RANGE+1) );
+
+  // Collect stats to cSum[][] and numSamples[][]
+  for(Int i = 0; i < pCtu->getTotalNumPart(); i ++ )
+  {
+    UInt uiTrIdx = pCtu->getTransformIdx(i);
+
+    if(pCtu->isInter(i) && pCtu->getCbf( i, COMPONENT_Y, uiTrIdx ) )
+    {
+      xTuCollectARLStats(pCoeffY, pArlCoeffY, uiMinNumCoeffInCU, cSum, numSamples);
+    }//Note that only InterY is processed. QP rounding is based on InterY data only.
+
+    pCoeffY  += uiMinNumCoeffInCU;
+    pArlCoeffY  += uiMinNumCoeffInCU;
+  }
+
+  for(Int u=1; u<LEVEL_RANGE;u++)
+  {
+    m_pcTrQuant->getSliceSumC()[u] += cSum[ u ] ;
+    m_pcTrQuant->getSliceNSamples()[u] += numSamples[ u ] ;
+  }
+  m_pcTrQuant->getSliceSumC()[LEVEL_RANGE] += cSum[ LEVEL_RANGE ] ;
+  m_pcTrQuant->getSliceNSamples()[LEVEL_RANGE] += numSamples[ LEVEL_RANGE ] ;
+}
+#endif
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncCu.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,177 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncCu.h
+    \brief    Coding Unit (CU) encoder class (header)
+*/
+
+#ifndef __TENCCU__
+#define __TENCCU__
+
+// Include files
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComYuv.h"
+#include "TLibCommon/TComPrediction.h"
+#include "TLibCommon/TComTrQuant.h"
+#include "TLibCommon/TComBitCounter.h"
+#include "TLibCommon/TComDataCU.h"
+
+#include "TEncEntropy.h"
+#include "TEncSearch.h"
+#include "TEncRateCtrl.h"
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncTop;
+class TEncSbac;
+class TEncCavlc;
+class TEncSlice;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// CU encoder class
+class TEncCu
+{
+private:
+
+  TComDataCU**            m_ppcBestCU;      ///< Best CUs in each depth
+  TComDataCU**            m_ppcTempCU;      ///< Temporary CUs in each depth
+  UChar                   m_uhTotalDepth;
+
+  TComYuv**               m_ppcPredYuvBest; ///< Best Prediction Yuv for each depth
+  TComYuv**               m_ppcResiYuvBest; ///< Best Residual Yuv for each depth
+  TComYuv**               m_ppcRecoYuvBest; ///< Best Reconstruction Yuv for each depth
+  TComYuv**               m_ppcPredYuvTemp; ///< Temporary Prediction Yuv for each depth
+  TComYuv**               m_ppcResiYuvTemp; ///< Temporary Residual Yuv for each depth
+  TComYuv**               m_ppcRecoYuvTemp; ///< Temporary Reconstruction Yuv for each depth
+  TComYuv**               m_ppcOrigYuv;     ///< Original Yuv for each depth
+
+  //  Data : encoder control
+  Bool                    m_bEncodeDQP;
+  Bool                    m_CodeChromaQpAdjFlag;
+  Int                     m_ChromaQpAdjIdc;
+
+  //  Access channel
+  TEncCfg*                m_pcEncCfg;
+  TEncSearch*             m_pcPredSearch;
+  TComTrQuant*            m_pcTrQuant;
+  TComRdCost*             m_pcRdCost;
+
+  TEncEntropy*            m_pcEntropyCoder;
+  TEncBinCABAC*           m_pcBinCABAC;
+
+  // SBAC RD
+  TEncSbac***             m_pppcRDSbacCoder;
+  TEncSbac*               m_pcRDGoOnSbacCoder;
+  TEncRateCtrl*           m_pcRateCtrl;
+
+public:
+  /// copy parameters from encoder class
+  Void  init                ( TEncTop* pcEncTop );
+
+  /// create internal buffers
+  Void  create              ( UChar uhTotalDepth, UInt iMaxWidth, UInt iMaxHeight, ChromaFormat chromaFormat );
+
+  /// destroy internal buffers
+  Void  destroy             ();
+
+  /// CTU analysis function
+  Void  compressCtu         ( TComDataCU*  pCtu );
+
+  /// CTU encoding function
+  Void  encodeCtu           ( TComDataCU*  pCtu );
+
+  Int   updateCtuDataISlice ( TComDataCU* pCtu, Int width, Int height );
+
+protected:
+  Void  finishCU            ( TComDataCU*  pcCU, UInt uiAbsPartIdx,           UInt uiDepth        );
+#if AMP_ENC_SPEEDUP
+  Void  xCompressCU         ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth DEBUG_STRING_FN_DECLARE(sDebug), PartSize eParentPartSize = NUMBER_OF_PART_SIZES );
+#else
+  Void  xCompressCU         ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth        );
+#endif
+  Void  xEncodeCU           ( TComDataCU*  pcCU, UInt uiAbsPartIdx,           UInt uiDepth        );
+
+  Int   xComputeQP          ( TComDataCU* pcCU, UInt uiDepth );
+  Void  xCheckBestMode      ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, UInt uiDepth DEBUG_STRING_FN_DECLARE(sParent) DEBUG_STRING_FN_DECLARE(sTest) DEBUG_STRING_PASS_INTO(Bool bAddSizeInfo=true));
+
+  Void  xCheckRDCostMerge2Nx2N( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU DEBUG_STRING_FN_DECLARE(sDebug), Bool *earlyDetectionSkipMode );
+
+#if AMP_MRG
+  Void  xCheckRDCostInter   ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, PartSize ePartSize DEBUG_STRING_FN_DECLARE(sDebug), Bool bUseMRG = false  );
+#else
+  Void  xCheckRDCostInter   ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU, PartSize ePartSize  );
+#endif
+
+  Void  xCheckRDCostIntra   ( TComDataCU *&rpcBestCU,
+                              TComDataCU *&rpcTempCU,
+                              Double      &cost,
+                              PartSize     ePartSize
+                              DEBUG_STRING_FN_DECLARE(sDebug)
+                            );
+
+  Void  xCheckDQP           ( TComDataCU*  pcCU );
+
+  Void  xCheckIntraPCM      ( TComDataCU*& rpcBestCU, TComDataCU*& rpcTempCU                      );
+  Void  xCopyAMVPInfo       ( AMVPInfo* pSrc, AMVPInfo* pDst );
+  Void  xCopyYuv2Pic        (TComPic* rpcPic, UInt uiCUAddr, UInt uiAbsPartIdx, UInt uiDepth, UInt uiSrcDepth, TComDataCU* pcCU, UInt uiLPelX, UInt uiTPelY );
+  Void  xCopyYuv2Tmp        ( UInt uhPartUnitIdx, UInt uiDepth );
+
+  Bool getdQPFlag           ()                        { return m_bEncodeDQP;        }
+  Void setdQPFlag           ( Bool b )                { m_bEncodeDQP = b;           }
+
+  Bool getCodeChromaQpAdjFlag() { return m_CodeChromaQpAdjFlag; }
+  Void setCodeChromaQpAdjFlag( Bool b ) { m_CodeChromaQpAdjFlag = b; }
+
+#if ADAPTIVE_QP_SELECTION
+  // Adaptive reconstruction level (ARL) statistics collection functions
+  Void xCtuCollectARLStats(TComDataCU* pCtu);
+  Int  xTuCollectARLStats(TCoeff* rpcCoeff, TCoeff* rpcArlCoeff, Int NumCoeffInCU, Double* cSum, UInt* numSamples );
+#endif
+
+#if AMP_ENC_SPEEDUP
+#if AMP_MRG
+  Void deriveTestModeAMP (TComDataCU *pcBestCU, PartSize eParentPartSize, Bool &bTestAMP_Hor, Bool &bTestAMP_Ver, Bool &bTestMergeAMP_Hor, Bool &bTestMergeAMP_Ver);
+#else
+  Void deriveTestModeAMP (TComDataCU *pcBestCU, PartSize eParentPartSize, Bool &bTestAMP_Hor, Bool &bTestAMP_Ver);
+#endif
+#endif
+
+  Void  xFillPCMBuffer     ( TComDataCU* pCU, TComYuv* pOrgYuv );
+};
+
+//! \}
+
+#endif // __TENCMB__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncEntropy.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,741 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncEntropy.cpp
+    \brief    entropy encoder class
+*/
+
+#include "TEncEntropy.h"
+#include "TLibCommon/TypeDef.h"
+#include "TLibCommon/TComSampleAdaptiveOffset.h"
+#include "TLibCommon/TComTU.h"
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+#include "../TLibCommon/Debug.h"
+static const Bool bDebugPredEnabled = DebugOptionList::DebugPred.getInt()!=0;
+#endif
+
+//! \ingroup TLibEncoder
+//! \{
+
+Void TEncEntropy::setEntropyCoder ( TEncEntropyIf* e, TComSlice* pcSlice )
+{
+  m_pcEntropyCoderIf = e;
+  m_pcEntropyCoderIf->setSlice ( pcSlice );
+}
+
+Void TEncEntropy::encodeSliceHeader ( TComSlice* pcSlice )
+{
+  m_pcEntropyCoderIf->codeSliceHeader( pcSlice );
+  return;
+}
+
+Void  TEncEntropy::encodeTilesWPPEntryPoint( TComSlice* pSlice )
+{
+  m_pcEntropyCoderIf->codeTilesWPPEntryPoint( pSlice );
+}
+
+Void TEncEntropy::encodeTerminatingBit      ( UInt uiIsLast )
+{
+  m_pcEntropyCoderIf->codeTerminatingBit( uiIsLast );
+
+  return;
+}
+
+Void TEncEntropy::encodeSliceFinish()
+{
+  m_pcEntropyCoderIf->codeSliceFinish();
+}
+
+Void TEncEntropy::encodePPS( TComPPS* pcPPS )
+{
+  m_pcEntropyCoderIf->codePPS( pcPPS );
+  return;
+}
+
+Void TEncEntropy::encodeSPS( TComSPS* pcSPS )
+{
+  m_pcEntropyCoderIf->codeSPS( pcSPS );
+  return;
+}
+
+Void TEncEntropy::encodeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+  m_pcEntropyCoderIf->codeCUTransquantBypassFlag( pcCU, uiAbsPartIdx );
+}
+
+Void TEncEntropy::encodeVPS( TComVPS* pcVPS )
+{
+  m_pcEntropyCoderIf->codeVPS( pcVPS );
+  return;
+}
+
+Void TEncEntropy::encodeSkipFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if ( pcCU->getSlice()->isIntra() )
+  {
+    return;
+  }
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+  m_pcEntropyCoderIf->codeSkipFlag( pcCU, uiAbsPartIdx );
+}
+
+/** encode merge flag
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \returns Void
+ */
+Void TEncEntropy::encodeMergeFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  // at least one merge candidate exists
+  m_pcEntropyCoderIf->codeMergeFlag( pcCU, uiAbsPartIdx );
+}
+
+/** encode merge index
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param bRD
+ * \returns Void
+ */
+Void TEncEntropy::encodeMergeIndex( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+    assert( pcCU->getPartitionSize(uiAbsPartIdx) == SIZE_2Nx2N );
+  }
+  m_pcEntropyCoderIf->codeMergeIndex( pcCU, uiAbsPartIdx );
+}
+
+
+/** encode prediction mode
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param bRD
+ * \returns Void
+ */
+Void TEncEntropy::encodePredMode( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+
+  if ( pcCU->getSlice()->isIntra() )
+  {
+    return;
+  }
+
+  m_pcEntropyCoderIf->codePredMode( pcCU, uiAbsPartIdx );
+}
+
+// Split mode
+Void TEncEntropy::encodeSplitFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+
+  m_pcEntropyCoderIf->codeSplitFlag( pcCU, uiAbsPartIdx, uiDepth );
+}
+
+/** encode partition size
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param uiDepth
+ * \param bRD
+ * \returns Void
+ */
+Void TEncEntropy::encodePartSize( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+
+  m_pcEntropyCoderIf->codePartSize( pcCU, uiAbsPartIdx, uiDepth );
+}
+
+
+/** Encode I_PCM information.
+ * \param pcCU pointer to CU
+ * \param uiAbsPartIdx CU index
+ * \param bRD flag indicating estimation or encoding
+ * \returns Void
+ */
+Void TEncEntropy::encodeIPCMInfo( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if(!pcCU->getSlice()->getSPS()->getUsePCM()
+    || pcCU->getWidth(uiAbsPartIdx) > (1<<pcCU->getSlice()->getSPS()->getPCMLog2MaxSize())
+    || pcCU->getWidth(uiAbsPartIdx) < (1<<pcCU->getSlice()->getSPS()->getPCMLog2MinSize()))
+  {
+    return;
+  }
+
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+
+  m_pcEntropyCoderIf->codeIPCMInfo ( pcCU, uiAbsPartIdx );
+
+}
+
+Void TEncEntropy::xEncodeTransform( Bool& bCodeDQP, Bool& codeChromaQpAdj, TComTU &rTu )
+{
+//pcCU, absPartIdxCU, uiAbsPartIdx, uiDepth+1, uiTrIdx+1, quadrant,
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+  const UInt numValidComponent = pcCU->getPic()->getNumberValidComponents();
+  const Bool bChroma = isChromaEnabled(pcCU->getPic()->getChromaFormat());
+  const UInt uiTrIdx = rTu.GetTransformDepthRel();
+  const UInt uiDepth = rTu.GetTransformDepthTotal();
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  const Bool bDebugRQT=g_bFinalEncode && DebugOptionList::DebugRQT.getInt()!=0;
+  if (bDebugRQT)
+    printf("x..codeTransform: offsetLuma=%d offsetChroma=%d absPartIdx=%d, uiDepth=%d\n width=%d, height=%d, uiTrIdx=%d, uiInnerQuadIdx=%d\n",
+           rTu.getCoefficientOffset(COMPONENT_Y), rTu.getCoefficientOffset(COMPONENT_Cb), uiAbsPartIdx, uiDepth, rTu.getRect(COMPONENT_Y).width, rTu.getRect(COMPONENT_Y).height, rTu.GetTransformDepthRel(), rTu.GetSectionNumber());
+#endif
+  const UInt uiSubdiv = pcCU->getTransformIdx( uiAbsPartIdx ) > uiTrIdx;// + pcCU->getDepth( uiAbsPartIdx ) > uiDepth;
+  const UInt uiLog2TrafoSize = rTu.GetLog2LumaTrSize();
+
+
+  UInt cbf[MAX_NUM_COMPONENT] = {0,0,0};
+  Bool bHaveACodedBlock       = false;
+  Bool bHaveACodedChromaBlock = false;
+
+  for(UInt ch=0; ch<numValidComponent; ch++)
+  {
+    const ComponentID compID = ComponentID(ch);
+
+    cbf[compID] = pcCU->getCbf( uiAbsPartIdx, compID , uiTrIdx );
+    
+    if (cbf[ch] != 0)
+    {
+      bHaveACodedBlock = true;
+      if (isChroma(compID)) bHaveACodedChromaBlock = true;
+    }
+  }
+
+  if( pcCU->isIntra(uiAbsPartIdx) && pcCU->getPartitionSize(uiAbsPartIdx) == SIZE_NxN && uiDepth == pcCU->getDepth(uiAbsPartIdx) )
+  {
+    assert( uiSubdiv );
+  }
+  else if( pcCU->isInter(uiAbsPartIdx) && (pcCU->getPartitionSize(uiAbsPartIdx) != SIZE_2Nx2N) && uiDepth == pcCU->getDepth(uiAbsPartIdx) &&  (pcCU->getSlice()->getSPS()->getQuadtreeTUMaxDepthInter() == 1) )
+  {
+    if ( uiLog2TrafoSize > pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) )
+    {
+      assert( uiSubdiv );
+    }
+    else
+    {
+      assert(!uiSubdiv );
+    }
+  }
+  else if( uiLog2TrafoSize > pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() )
+  {
+    assert( uiSubdiv );
+  }
+  else if( uiLog2TrafoSize == pcCU->getSlice()->getSPS()->getQuadtreeTULog2MinSize() )
+  {
+    assert( !uiSubdiv );
+  }
+  else if( uiLog2TrafoSize == pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) )
+  {
+    assert( !uiSubdiv );
+  }
+  else
+  {
+    assert( uiLog2TrafoSize > pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) );
+    m_pcEntropyCoderIf->codeTransformSubdivFlag( uiSubdiv, 5 - uiLog2TrafoSize );
+  }
+
+  const UInt uiTrDepthCurr = uiDepth - pcCU->getDepth( uiAbsPartIdx );
+  const Bool bFirstCbfOfCU = uiTrDepthCurr == 0;
+
+  for(UInt ch=COMPONENT_Cb; ch<numValidComponent; ch++)
+  {
+    const ComponentID compID=ComponentID(ch);
+    if( bFirstCbfOfCU || rTu.ProcessingAllQuadrants(compID) )
+    {
+      if( bFirstCbfOfCU || pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepthCurr - 1 ) )
+      {
+        m_pcEntropyCoderIf->codeQtCbf( rTu, compID, (uiSubdiv == 0) );
+      }
+    }
+    else
+    {
+      assert( pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepthCurr ) == pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepthCurr - 1 ) );
+    }
+  }
+
+  if( uiSubdiv )
+  {
+    TComTURecurse tuRecurseChild(rTu, true);
+    do
+    {
+      xEncodeTransform( bCodeDQP, codeChromaQpAdj, tuRecurseChild );
+    }
+    while (tuRecurseChild.nextSection(rTu));
+  }
+  else
+  {
+    {
+      DTRACE_CABAC_VL( g_nSymbolCounter++ );
+      DTRACE_CABAC_T( "\tTrIdx: abspart=" );
+      DTRACE_CABAC_V( uiAbsPartIdx );
+      DTRACE_CABAC_T( "\tdepth=" );
+      DTRACE_CABAC_V( uiDepth );
+      DTRACE_CABAC_T( "\ttrdepth=" );
+      DTRACE_CABAC_V( pcCU->getTransformIdx( uiAbsPartIdx ) );
+      DTRACE_CABAC_T( "\n" );
+    }
+
+    if( !pcCU->isIntra(uiAbsPartIdx) && uiDepth == pcCU->getDepth( uiAbsPartIdx ) && (!bChroma || (!pcCU->getCbf( uiAbsPartIdx, COMPONENT_Cb, 0 ) && !pcCU->getCbf( uiAbsPartIdx, COMPONENT_Cr, 0 ) ) ) )
+    {
+      assert( pcCU->getCbf( uiAbsPartIdx, COMPONENT_Y, 0 ) );
+      //      printf( "saved one bin! " );
+    }
+    else
+    {
+      m_pcEntropyCoderIf->codeQtCbf( rTu, COMPONENT_Y, true ); //luma CBF is always at the lowest level
+    }
+
+    if ( bHaveACodedBlock )
+    {
+      // dQP: only for CTU once
+      if ( pcCU->getSlice()->getPPS()->getUseDQP() )
+      {
+        if ( bCodeDQP )
+        {
+          encodeQP( pcCU, rTu.GetAbsPartIdxCU() );
+          bCodeDQP = false;
+        }
+      }
+
+      if ( pcCU->getSlice()->getUseChromaQpAdj() )
+      {
+        if ( bHaveACodedChromaBlock && codeChromaQpAdj && !pcCU->getCUTransquantBypass(rTu.GetAbsPartIdxCU()) )
+        {
+          encodeChromaQpAdjustment( pcCU, rTu.GetAbsPartIdxCU() );
+          codeChromaQpAdj = false;
+        }
+      }
+
+      const UInt numValidComp=pcCU->getPic()->getNumberValidComponents();
+
+      for(UInt ch=COMPONENT_Y; ch<numValidComp; ch++)
+      {
+        const ComponentID compID=ComponentID(ch);
+
+        if (rTu.ProcessComponentSection(compID))
+        {
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+          if (bDebugRQT) printf("Call NxN for chan %d width=%d height=%d cbf=%d\n", compID, rTu.getRect(compID).width, rTu.getRect(compID).height, 1);
+#endif
+
+          if (rTu.getRect(compID).width != rTu.getRect(compID).height)
+          {
+            //code two sub-TUs
+            TComTURecurse subTUIterator(rTu, false, TComTU::VERTICAL_SPLIT, true, compID);
+
+            do
+            {
+              const UChar subTUCBF = pcCU->getCbf(subTUIterator.GetAbsPartIdxTU(compID), compID, (uiTrIdx + 1));
+
+              if (subTUCBF != 0)
+              {
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+                if (bDebugRQT) printf("Call NxN for chan %d width=%d height=%d cbf=%d\n", compID, subTUIterator.getRect(compID).width, subTUIterator.getRect(compID).height, 1);
+#endif
+                m_pcEntropyCoderIf->codeCoeffNxN( subTUIterator, (pcCU->getCoeff(compID) + subTUIterator.getCoefficientOffset(compID)), compID );
+              }
+            }
+            while (subTUIterator.nextSection(rTu));
+          }
+          else
+          {
+            if (isChroma(compID) && (cbf[COMPONENT_Y] != 0))
+            {
+              m_pcEntropyCoderIf->codeCrossComponentPrediction( rTu, compID );
+            }
+
+            if (cbf[compID] != 0)
+            {
+              m_pcEntropyCoderIf->codeCoeffNxN( rTu, (pcCU->getCoeff(compID) + rTu.getCoefficientOffset(compID)), compID );
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+
+// Intra direction for Luma
+Void TEncEntropy::encodeIntraDirModeLuma  ( TComDataCU* pcCU, UInt absPartIdx, Bool isMultiplePU )
+{
+  m_pcEntropyCoderIf->codeIntraDirLumaAng( pcCU, absPartIdx , isMultiplePU);
+}
+
+
+// Intra direction for Chroma
+Void TEncEntropy::encodeIntraDirModeChroma( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  m_pcEntropyCoderIf->codeIntraDirChroma( pcCU, uiAbsPartIdx );
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  if (bDebugPredEnabled && g_bFinalEncode)
+  {
+    UInt cdir=pcCU->getIntraDir(CHANNEL_TYPE_CHROMA, uiAbsPartIdx);
+    if (cdir==36) cdir=pcCU->getIntraDir(CHANNEL_TYPE_LUMA, uiAbsPartIdx);
+    printf("coding chroma Intra dir: %d, uiAbsPartIdx: %d, luma dir: %d\n", cdir, uiAbsPartIdx, pcCU->getIntraDir(CHANNEL_TYPE_LUMA, uiAbsPartIdx));
+  }
+#endif
+}
+
+
+Void TEncEntropy::encodePredInfo( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  if( pcCU->isIntra( uiAbsPartIdx ) )                                 // If it is Intra mode, encode intra prediction mode.
+  {
+    encodeIntraDirModeLuma  ( pcCU, uiAbsPartIdx,true );
+    if (pcCU->getPic()->getChromaFormat()!=CHROMA_400)
+    {
+      encodeIntraDirModeChroma( pcCU, uiAbsPartIdx );
+
+      if (enable4ChromaPUsInIntraNxNCU(pcCU->getPic()->getChromaFormat()) && pcCU->getPartitionSize( uiAbsPartIdx )==SIZE_NxN)
+      {
+        UInt uiPartOffset = ( pcCU->getPic()->getNumPartitionsInCtu() >> ( pcCU->getDepth(uiAbsPartIdx) << 1 ) ) >> 2;
+        encodeIntraDirModeChroma( pcCU, uiAbsPartIdx + uiPartOffset   );
+        encodeIntraDirModeChroma( pcCU, uiAbsPartIdx + uiPartOffset*2 );
+        encodeIntraDirModeChroma( pcCU, uiAbsPartIdx + uiPartOffset*3 );
+      }
+    }
+  }
+  else                                                                // if it is Inter mode, encode motion vector and reference index
+  {
+    encodePUWise( pcCU, uiAbsPartIdx );
+  }
+}
+
+Void TEncEntropy::encodeCrossComponentPrediction( TComTU &rTu, ComponentID compID )
+{
+  m_pcEntropyCoderIf->codeCrossComponentPrediction( rTu, compID );
+}
+
+/** encode motion information for every PU block
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param bRD
+ * \returns Void
+ */
+Void TEncEntropy::encodePUWise( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  const Bool bDebugPred = bDebugPredEnabled && g_bFinalEncode;
+#endif
+
+  PartSize ePartSize = pcCU->getPartitionSize( uiAbsPartIdx );
+  UInt uiNumPU = ( ePartSize == SIZE_2Nx2N ? 1 : ( ePartSize == SIZE_NxN ? 4 : 2 ) );
+  UInt uiDepth = pcCU->getDepth( uiAbsPartIdx );
+  UInt uiPUOffset = ( g_auiPUOffset[UInt( ePartSize )] << ( ( pcCU->getSlice()->getSPS()->getMaxCUDepth() - uiDepth ) << 1 ) ) >> 4;
+
+  for ( UInt uiPartIdx = 0, uiSubPartIdx = uiAbsPartIdx; uiPartIdx < uiNumPU; uiPartIdx++, uiSubPartIdx += uiPUOffset )
+  {
+    encodeMergeFlag( pcCU, uiSubPartIdx );
+    if ( pcCU->getMergeFlag( uiSubPartIdx ) )
+    {
+      encodeMergeIndex( pcCU, uiSubPartIdx );
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+      if (bDebugPred)
+      {
+        std::cout << "Coded merge flag, CU absPartIdx: " << uiAbsPartIdx << " PU(" << uiPartIdx << ") absPartIdx: " << uiSubPartIdx;
+        std::cout << " merge index: " << (UInt)pcCU->getMergeIndex(uiSubPartIdx) << std::endl;
+      }
+#endif
+    }
+    else
+    {
+      encodeInterDirPU( pcCU, uiSubPartIdx );
+      for ( UInt uiRefListIdx = 0; uiRefListIdx < 2; uiRefListIdx++ )
+      {
+        if ( pcCU->getSlice()->getNumRefIdx( RefPicList( uiRefListIdx ) ) > 0 )
+        {
+          encodeRefFrmIdxPU ( pcCU, uiSubPartIdx, RefPicList( uiRefListIdx ) );
+          encodeMvdPU       ( pcCU, uiSubPartIdx, RefPicList( uiRefListIdx ) );
+          encodeMVPIdxPU    ( pcCU, uiSubPartIdx, RefPicList( uiRefListIdx ) );
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+          if (bDebugPred)
+          {
+            std::cout << "refListIdx: " << uiRefListIdx << std::endl;
+            std::cout << "MVD horizontal: " << pcCU->getCUMvField(RefPicList(uiRefListIdx))->getMvd( uiAbsPartIdx ).getHor() << std::endl;
+            std::cout << "MVD vertical:   " << pcCU->getCUMvField(RefPicList(uiRefListIdx))->getMvd( uiAbsPartIdx ).getVer() << std::endl;
+            std::cout << "MVPIdxPU: " << pcCU->getMVPIdx(RefPicList( uiRefListIdx ), uiSubPartIdx) << std::endl;
+            std::cout << "InterDir: " << (UInt)pcCU->getInterDir(uiSubPartIdx) << std::endl;
+          }
+#endif
+        }
+      }
+    }
+  }
+
+  return;
+}
+
+Void TEncEntropy::encodeInterDirPU( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  if ( !pcCU->getSlice()->isInterB() )
+  {
+    return;
+  }
+
+  m_pcEntropyCoderIf->codeInterDir( pcCU, uiAbsPartIdx );
+
+  return;
+}
+
+/** encode reference frame index for a PU block
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param eRefList
+ * \returns Void
+ */
+Void TEncEntropy::encodeRefFrmIdxPU( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  assert( pcCU->isInter( uiAbsPartIdx ) );
+
+  if ( ( pcCU->getSlice()->getNumRefIdx( eRefList ) == 1 ) )
+  {
+    return;
+  }
+
+  if ( pcCU->getInterDir( uiAbsPartIdx ) & ( 1 << eRefList ) )
+  {
+    m_pcEntropyCoderIf->codeRefFrmIdx( pcCU, uiAbsPartIdx, eRefList );
+  }
+
+  return;
+}
+
+/** encode motion vector difference for a PU block
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param eRefList
+ * \returns Void
+ */
+Void TEncEntropy::encodeMvdPU( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  assert( pcCU->isInter( uiAbsPartIdx ) );
+
+  if ( pcCU->getInterDir( uiAbsPartIdx ) & ( 1 << eRefList ) )
+  {
+    m_pcEntropyCoderIf->codeMvd( pcCU, uiAbsPartIdx, eRefList );
+  }
+  return;
+}
+
+Void TEncEntropy::encodeMVPIdxPU( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  if ( (pcCU->getInterDir( uiAbsPartIdx ) & ( 1 << eRefList )) )
+  {
+    m_pcEntropyCoderIf->codeMVPIdx( pcCU, uiAbsPartIdx, eRefList );
+  }
+
+  return;
+}
+
+Void TEncEntropy::encodeQtCbf( TComTU &rTu, const ComponentID compID, const Bool lowestLevel )
+{
+  m_pcEntropyCoderIf->codeQtCbf( rTu, compID, lowestLevel );
+}
+
+Void TEncEntropy::encodeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx )
+{
+  m_pcEntropyCoderIf->codeTransformSubdivFlag( uiSymbol, uiCtx );
+}
+
+Void TEncEntropy::encodeQtRootCbf( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  m_pcEntropyCoderIf->codeQtRootCbf( pcCU, uiAbsPartIdx );
+}
+
+Void TEncEntropy::encodeQtCbfZero( TComTU &rTu, const ChannelType chType )
+{
+  m_pcEntropyCoderIf->codeQtCbfZero( rTu, chType );
+}
+
+Void TEncEntropy::encodeQtRootCbfZero( TComDataCU* pcCU )
+{
+  m_pcEntropyCoderIf->codeQtRootCbfZero( pcCU );
+}
+
+// dQP
+Void TEncEntropy::encodeQP( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD )
+{
+  if( bRD )
+  {
+    uiAbsPartIdx = 0;
+  }
+
+  if ( pcCU->getSlice()->getPPS()->getUseDQP() )
+  {
+    m_pcEntropyCoderIf->codeDeltaQP( pcCU, uiAbsPartIdx );
+  }
+}
+
+/** encode chroma qp adjustment
+ * \returns Void
+ */
+Void TEncEntropy::encodeChromaQpAdjustment( TComDataCU* cu, UInt absPartIdx, Bool inRd )
+{
+  if( inRd )
+  {
+    absPartIdx = 0;
+  }
+
+  m_pcEntropyCoderIf->codeChromaQpAdjustment( cu, absPartIdx );
+}
+
+// texture
+
+/** encode coefficients
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \param uiDepth
+ * \param uiWidth
+ * \param uiHeight
+ */
+Void TEncEntropy::encodeCoeff( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool& bCodeDQP, Bool& codeChromaQpAdj )
+{
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  const Bool bDebugRQT=g_bFinalEncode && DebugOptionList::DebugRQT.getInt()!=0;
+#endif
+
+  if( pcCU->isIntra(uiAbsPartIdx) )
+  {
+    if (false)
+    {
+      DTRACE_CABAC_VL( g_nSymbolCounter++ )
+      DTRACE_CABAC_T( "\tdecodeTransformIdx()\tCUDepth=" )
+      DTRACE_CABAC_V( uiDepth )
+      DTRACE_CABAC_T( "\n" )
+    }
+  }
+  else
+  {
+    if( !(pcCU->getMergeFlag( uiAbsPartIdx ) && pcCU->getPartitionSize(uiAbsPartIdx) == SIZE_2Nx2N ) )
+    {
+      m_pcEntropyCoderIf->codeQtRootCbf( pcCU, uiAbsPartIdx );
+    }
+    if ( !pcCU->getQtRootCbf( uiAbsPartIdx ) )
+    {
+      return;
+    }
+  }
+
+  TComTURecurse tuRecurse(pcCU, uiAbsPartIdx, uiDepth);
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  if (bDebugRQT) printf("..codeCoeff: uiAbsPartIdx=%d, PU format=%d, 2Nx2N=%d, NxN=%d\n", uiAbsPartIdx, pcCU->getPartitionSize(uiAbsPartIdx), SIZE_2Nx2N, SIZE_NxN);
+#endif
+
+  xEncodeTransform( bCodeDQP, codeChromaQpAdj, tuRecurse );
+}
+
+Void TEncEntropy::encodeCoeffNxN( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID)
+{
+  TComDataCU *pcCU = rTu.getCU();
+
+  if (pcCU->getCbf(rTu.GetAbsPartIdxTU(), compID, rTu.GetTransformDepthRel()) != 0)
+  {
+    if (rTu.getRect(compID).width != rTu.getRect(compID).height)
+    {
+      //code two sub-TUs
+      TComTURecurse subTUIterator(rTu, false, TComTU::VERTICAL_SPLIT, true, compID);
+
+      const UInt subTUSize = subTUIterator.getRect(compID).width * subTUIterator.getRect(compID).height;
+
+      do
+      {
+        const UChar subTUCBF = pcCU->getCbf(subTUIterator.GetAbsPartIdxTU(compID), compID, (subTUIterator.GetTransformDepthRel() + 1));
+
+        if (subTUCBF != 0)
+        {
+          m_pcEntropyCoderIf->codeCoeffNxN( subTUIterator, (pcCoef + (subTUIterator.GetSectionNumber() * subTUSize)), compID);
+        }
+      }
+      while (subTUIterator.nextSection(rTu));
+    }
+    else
+    {
+      m_pcEntropyCoderIf->codeCoeffNxN(rTu, pcCoef, compID);
+    }
+  }
+}
+
+Void TEncEntropy::estimateBit (estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, const ChannelType chType)
+{
+  const UInt heightAtEntropyCoding = (width != height) ? (height >> 1) : height;
+
+  m_pcEntropyCoderIf->estBit ( pcEstBitsSbac, width, heightAtEntropyCoding, chType );
+}
+
+Int TEncEntropy::countNonZeroCoeffs( TCoeff* pcCoef, UInt uiSize )
+{
+  Int count = 0;
+
+  for ( Int i = 0; i < uiSize; i++ )
+  {
+    count += pcCoef[i] != 0;
+  }
+
+  return count;
+}
+
+/** encode quantization matrix
+ * \param scalingList quantization matrix information
+ */
+Void TEncEntropy::encodeScalingList( TComScalingList* scalingList )
+{
+  m_pcEntropyCoderIf->codeScalingList( scalingList );
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncEntropy.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,193 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncEntropy.h
+    \brief    entropy encoder class (header)
+*/
+
+#ifndef __TENCENTROPY__
+#define __TENCENTROPY__
+
+#include "TLibCommon/TComSlice.h"
+#include "TLibCommon/TComDataCU.h"
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/ContextModel.h"
+#include "TLibCommon/TComPic.h"
+#include "TLibCommon/TComTrQuant.h"
+#include "TLibCommon/TComSampleAdaptiveOffset.h"
+#include "TLibCommon/TComChromaFormat.h"
+
+class TEncSbac;
+class TEncCavlc;
+class SEI;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// entropy encoder pure class
+class TEncEntropyIf
+{
+public:
+  virtual Void  resetEntropy          ()                = 0;
+  virtual Void  determineCabacInitIdx ()                = 0;
+  virtual Void  setBitstream          ( TComBitIf* p )  = 0;
+  virtual Void  setSlice              ( TComSlice* p )  = 0;
+  virtual Void  resetBits             ()                = 0;
+  virtual UInt  getNumberOfWrittenBits()                = 0;
+
+  virtual Void  codeVPS                 ( TComVPS* pcVPS )                                      = 0;
+  virtual Void  codeSPS                 ( TComSPS* pcSPS )                                      = 0;
+  virtual Void  codePPS                 ( TComPPS* pcPPS )                                      = 0;
+  virtual Void  codeSliceHeader         ( TComSlice* pcSlice )                                  = 0;
+
+  virtual Void  codeTilesWPPEntryPoint  ( TComSlice* pSlice )     = 0;
+  virtual Void  codeTerminatingBit      ( UInt uilsLast )                                       = 0;
+  virtual Void  codeSliceFinish         ()                                                      = 0;
+  virtual Void codeMVPIdx ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList ) = 0;
+  virtual Void codeScalingList   ( TComScalingList* scalingList )      = 0;
+
+public:
+  virtual Void codeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeSkipFlag      ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeMergeFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeMergeIndex    ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeSplitFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth ) = 0;
+
+  virtual Void codePartSize      ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth ) = 0;
+  virtual Void codePredMode      ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+
+  virtual Void codeIPCMInfo      ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+
+  virtual Void codeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx ) = 0;
+  virtual Void codeQtCbf         ( TComTU &rTu, const ComponentID compID, const Bool lowestLevel ) = 0;
+  virtual Void codeQtRootCbf     ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeQtCbfZero     ( TComTU &rTu, const ChannelType chType ) = 0;
+  virtual Void codeQtRootCbfZero ( TComDataCU* pcCU ) = 0;
+  virtual Void codeIntraDirLumaAng( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool isMultiplePU ) = 0;
+
+  virtual Void codeIntraDirChroma( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeInterDir      ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeRefFrmIdx     ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )      = 0;
+  virtual Void codeMvd           ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )      = 0;
+
+  virtual Void codeCrossComponentPrediction( TComTU &rTu, ComponentID compID ) = 0;
+
+  virtual Void codeDeltaQP       ( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeChromaQpAdjustment( TComDataCU* pcCU, UInt uiAbsPartIdx ) = 0;
+  virtual Void codeCoeffNxN      ( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID ) = 0;
+  virtual Void codeTransformSkipFlags ( TComTU &rTu, ComponentID component ) = 0;
+  virtual Void codeSAOBlkParam   (SAOBlkParam& saoBlkParam, Bool* sliceEnabled, Bool leftMergeAvail, Bool aboveMergeAvail, Bool onlyEstMergeInfo = false)    =0;
+  virtual Void estBit               (estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType) = 0;
+
+  virtual Void codeDFFlag (UInt uiCode, const Char *pSymbolName) = 0;
+  virtual Void codeDFSvlc (Int iCode, const Char *pSymbolName)   = 0;
+
+  virtual Void codeExplicitRdpcmMode ( TComTU &rTu, const ComponentID compID ) = 0;
+
+  virtual ~TEncEntropyIf() {}
+};
+
+/// entropy encoder class
+class TEncEntropy
+{
+public:
+  Void    setEntropyCoder           ( TEncEntropyIf* e, TComSlice* pcSlice );
+  Void    setBitstream              ( TComBitIf* p )          { m_pcEntropyCoderIf->setBitstream(p);  }
+  Void    resetBits                 ()                        { m_pcEntropyCoderIf->resetBits();      }
+  UInt    getNumberOfWrittenBits    ()                        { return m_pcEntropyCoderIf->getNumberOfWrittenBits(); }
+  Void    resetEntropy              ()                        { m_pcEntropyCoderIf->resetEntropy();  }
+  Void    determineCabacInitIdx     ()                        { m_pcEntropyCoderIf->determineCabacInitIdx(); }
+
+  Void    encodeSliceHeader         ( TComSlice* pcSlice );
+  Void    encodeTilesWPPEntryPoint( TComSlice* pSlice );
+  Void    encodeTerminatingBit      ( UInt uiIsLast );
+  Void    encodeSliceFinish         ();
+  TEncEntropyIf*      m_pcEntropyCoderIf;
+
+public:
+  Void encodeVPS               ( TComVPS* pcVPS);
+  // SPS
+  Void encodeSPS               ( TComSPS* pcSPS );
+  Void encodePPS               ( TComPPS* pcPPS );
+  Void encodeSplitFlag         ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool bRD = false );
+  Void encodeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodeSkipFlag          ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodePUWise       ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void encodeInterDirPU   ( TComDataCU* pcSubCU, UInt uiAbsPartIdx  );
+  Void encodeRefFrmIdxPU  ( TComDataCU* pcSubCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void encodeMvdPU        ( TComDataCU* pcSubCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void encodeMVPIdxPU     ( TComDataCU* pcSubCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void encodeMergeFlag    ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void encodeMergeIndex   ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodePredMode          ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodePartSize          ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool bRD = false );
+  Void encodeIPCMInfo          ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodePredInfo          ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void encodeIntraDirModeLuma  ( TComDataCU* pcCU, UInt absPartIdx, Bool isMultiplePU = false );
+
+  Void encodeIntraDirModeChroma( TComDataCU* pcCU, UInt uiAbsPartIdx );
+
+  Void encodeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx );
+  Void encodeQtCbf             ( TComTU &rTu, const ComponentID compID, const Bool lowestLevel );
+
+  Void encodeQtCbfZero         ( TComTU &rTu, const ChannelType chType );
+  Void encodeQtRootCbfZero     ( TComDataCU* pcCU );
+  Void encodeQtRootCbf         ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void encodeQP                ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+  Void encodeChromaQpAdjustment ( TComDataCU* pcCU, UInt uiAbsPartIdx, Bool bRD = false );
+
+  Void encodeScalingList       ( TComScalingList* scalingList );
+
+  Void encodeCrossComponentPrediction( TComTU &rTu, ComponentID compID );
+
+private:
+  Void xEncodeTransform        ( Bool& bCodeDQP, Bool& codeChromaQpAdj, TComTU &rTu );
+
+public:
+  Void encodeCoeff             ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, Bool& bCodeDQP, Bool& codeChromaQpAdj );
+
+  Void encodeCoeffNxN         ( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID );
+
+  Void estimateBit             ( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType );
+
+  Void encodeSAOBlkParam(SAOBlkParam& saoBlkParam, Bool* sliceEnabled, Bool leftMergeAvail, Bool aboveMergeAvail){m_pcEntropyCoderIf->codeSAOBlkParam(saoBlkParam, sliceEnabled, leftMergeAvail, aboveMergeAvail, false);}
+
+  static Int countNonZeroCoeffs( TCoeff* pcCoef, UInt uiSize );
+
+};// END CLASS DEFINITION TEncEntropy
+
+//! \}
+
+#endif // __TENCENTROPY__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncGOP.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2837 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncGOP.cpp
+    \brief    GOP encoder class
+*/
+
+#include <list>
+#include <algorithm>
+#include <functional>
+
+#include "TEncTop.h"
+#include "TEncGOP.h"
+#include "TEncAnalyze.h"
+#include "libmd5/MD5.h"
+#include "TLibCommon/SEI.h"
+#include "TLibCommon/NAL.h"
+#include "NALwrite.h"
+#include <time.h>
+#include <math.h>
+
+#define VERBOSE_FRAME 0
+
+using namespace std;
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+Bool g_bFinalEncode = false;
+#endif
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / initialization / destroy
+// ====================================================================================================================
+Int getLSB(Int poc, Int maxLSB)
+{
+  if (poc >= 0)
+  {
+    return poc % maxLSB;
+  }
+  else
+  {
+    return (maxLSB - ((-poc) % maxLSB)) % maxLSB;
+  }
+}
+
+TEncGOP::TEncGOP()
+{
+  m_iLastIDR            = 0;
+  m_iGopSize            = 0;
+  m_iNumPicCoded        = 0; //Niko
+  m_bFirst              = true;
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  m_iLastRecoveryPicPOC = 0;
+#endif
+
+  m_pcCfg               = NULL;
+  m_pcSliceEncoder      = NULL;
+  m_pcListPic           = NULL;
+
+  m_pcEntropyCoder      = NULL;
+  m_pcCavlcCoder        = NULL;
+  m_pcSbacCoder         = NULL;
+  m_pcBinCABAC          = NULL;
+
+  m_bSeqFirst           = true;
+
+  m_bRefreshPending     = 0;
+  m_pocCRA            = 0;
+  m_numLongTermRefPicSPS = 0;
+  ::memset(m_ltRefPicPocLsbSps, 0, sizeof(m_ltRefPicPocLsbSps));
+  ::memset(m_ltRefPicUsedByCurrPicFlag, 0, sizeof(m_ltRefPicUsedByCurrPicFlag));
+  m_cpbRemovalDelay   = 0;
+  m_lastBPSEI         = 0;
+  xResetNonNestedSEIPresentFlags();
+  xResetNestedSEIPresentFlags();
+  m_associatedIRAPType = NAL_UNIT_CODED_SLICE_IDR_N_LP;
+  m_associatedIRAPPOC  = 0;
+  return;
+}
+
+TEncGOP::~TEncGOP()
+{
+}
+
+/** Create list to contain pointers to CTU start addresses of slice.
+ */
+Void  TEncGOP::create()
+{
+  m_bLongtermTestPictureHasBeenCoded = 0;
+  m_bLongtermTestPictureHasBeenCoded2 = 0;
+}
+
+Void  TEncGOP::destroy()
+{
+}
+
+Void TEncGOP::init ( TEncTop* pcTEncTop )
+{
+  m_pcEncTop     = pcTEncTop;
+  m_pcCfg                = pcTEncTop;
+  m_pcSliceEncoder       = pcTEncTop->getSliceEncoder();
+  m_pcListPic            = pcTEncTop->getListPic();
+
+  m_pcEntropyCoder       = pcTEncTop->getEntropyCoder();
+  m_pcCavlcCoder         = pcTEncTop->getCavlcCoder();
+  m_pcSbacCoder          = pcTEncTop->getSbacCoder();
+  m_pcBinCABAC           = pcTEncTop->getBinCABAC();
+  m_pcLoopFilter         = pcTEncTop->getLoopFilter();
+
+  m_pcSAO                = pcTEncTop->getSAO();
+  m_pcRateCtrl           = pcTEncTop->getRateCtrl();
+  m_lastBPSEI          = 0;
+  m_totalCoded         = 0;
+
+}
+
+SEIActiveParameterSets* TEncGOP::xCreateSEIActiveParameterSets (TComSPS *sps)
+{
+  SEIActiveParameterSets *seiActiveParameterSets = new SEIActiveParameterSets();
+  seiActiveParameterSets->activeVPSId = m_pcCfg->getVPS()->getVPSId();
+  seiActiveParameterSets->m_selfContainedCvsFlag = false;
+  seiActiveParameterSets->m_noParameterSetUpdateFlag = false;
+  seiActiveParameterSets->numSpsIdsMinus1 = 0;
+  seiActiveParameterSets->activeSeqParameterSetId.resize(seiActiveParameterSets->numSpsIdsMinus1 + 1);
+  seiActiveParameterSets->activeSeqParameterSetId[0] = sps->getSPSId();
+  return seiActiveParameterSets;
+}
+
+SEIFramePacking* TEncGOP::xCreateSEIFramePacking()
+{
+  SEIFramePacking *seiFramePacking = new SEIFramePacking();
+  seiFramePacking->m_arrangementId = m_pcCfg->getFramePackingArrangementSEIId();
+  seiFramePacking->m_arrangementCancelFlag = 0;
+  seiFramePacking->m_arrangementType = m_pcCfg->getFramePackingArrangementSEIType();
+  assert((seiFramePacking->m_arrangementType > 2) && (seiFramePacking->m_arrangementType < 6) );
+  seiFramePacking->m_quincunxSamplingFlag = m_pcCfg->getFramePackingArrangementSEIQuincunx();
+  seiFramePacking->m_contentInterpretationType = m_pcCfg->getFramePackingArrangementSEIInterpretation();
+  seiFramePacking->m_spatialFlippingFlag = 0;
+  seiFramePacking->m_frame0FlippedFlag = 0;
+  seiFramePacking->m_fieldViewsFlag = (seiFramePacking->m_arrangementType == 2);
+  seiFramePacking->m_currentFrameIsFrame0Flag = ((seiFramePacking->m_arrangementType == 5) && (m_iNumPicCoded&1));
+  seiFramePacking->m_frame0SelfContainedFlag = 0;
+  seiFramePacking->m_frame1SelfContainedFlag = 0;
+  seiFramePacking->m_frame0GridPositionX = 0;
+  seiFramePacking->m_frame0GridPositionY = 0;
+  seiFramePacking->m_frame1GridPositionX = 0;
+  seiFramePacking->m_frame1GridPositionY = 0;
+  seiFramePacking->m_arrangementReservedByte = 0;
+  seiFramePacking->m_arrangementPersistenceFlag = true;
+  seiFramePacking->m_upsampledAspectRatio = 0;
+  return seiFramePacking;
+}
+
+SEISegmentedRectFramePacking* TEncGOP::xCreateSEISegmentedRectFramePacking()
+{
+  SEISegmentedRectFramePacking *seiSegmentedRectFramePacking = new SEISegmentedRectFramePacking();
+  seiSegmentedRectFramePacking->m_arrangementCancelFlag = m_pcCfg->getSegmentedRectFramePackingArrangementSEICancel();
+  seiSegmentedRectFramePacking->m_contentInterpretationType = m_pcCfg->getSegmentedRectFramePackingArrangementSEIType();
+  seiSegmentedRectFramePacking->m_arrangementPersistenceFlag = m_pcCfg->getSegmentedRectFramePackingArrangementSEIPersistence();
+  return seiSegmentedRectFramePacking;
+}
+
+SEIDisplayOrientation* TEncGOP::xCreateSEIDisplayOrientation()
+{
+  SEIDisplayOrientation *seiDisplayOrientation = new SEIDisplayOrientation();
+  seiDisplayOrientation->cancelFlag = false;
+  seiDisplayOrientation->horFlip = false;
+  seiDisplayOrientation->verFlip = false;
+  seiDisplayOrientation->anticlockwiseRotation = m_pcCfg->getDisplayOrientationSEIAngle();
+  return seiDisplayOrientation;
+}
+SEIToneMappingInfo*  TEncGOP::xCreateSEIToneMappingInfo()
+{
+  SEIToneMappingInfo *seiToneMappingInfo = new SEIToneMappingInfo();
+  seiToneMappingInfo->m_toneMapId = m_pcCfg->getTMISEIToneMapId();
+  seiToneMappingInfo->m_toneMapCancelFlag = m_pcCfg->getTMISEIToneMapCancelFlag();
+  seiToneMappingInfo->m_toneMapPersistenceFlag = m_pcCfg->getTMISEIToneMapPersistenceFlag();
+
+  seiToneMappingInfo->m_codedDataBitDepth = m_pcCfg->getTMISEICodedDataBitDepth();
+  assert(seiToneMappingInfo->m_codedDataBitDepth >= 8 && seiToneMappingInfo->m_codedDataBitDepth <= 14);
+  seiToneMappingInfo->m_targetBitDepth = m_pcCfg->getTMISEITargetBitDepth();
+  assert(seiToneMappingInfo->m_targetBitDepth >= 1 && seiToneMappingInfo->m_targetBitDepth <= 17);
+  seiToneMappingInfo->m_modelId = m_pcCfg->getTMISEIModelID();
+  assert(seiToneMappingInfo->m_modelId >=0 &&seiToneMappingInfo->m_modelId<=4);
+
+  switch( seiToneMappingInfo->m_modelId)
+  {
+  case 0:
+    {
+      seiToneMappingInfo->m_minValue = m_pcCfg->getTMISEIMinValue();
+      seiToneMappingInfo->m_maxValue = m_pcCfg->getTMISEIMaxValue();
+      break;
+    }
+  case 1:
+    {
+      seiToneMappingInfo->m_sigmoidMidpoint = m_pcCfg->getTMISEISigmoidMidpoint();
+      seiToneMappingInfo->m_sigmoidWidth = m_pcCfg->getTMISEISigmoidWidth();
+      break;
+    }
+  case 2:
+    {
+      UInt num = 1u<<(seiToneMappingInfo->m_targetBitDepth);
+      seiToneMappingInfo->m_startOfCodedInterval.resize(num);
+      Int* ptmp = m_pcCfg->getTMISEIStartOfCodedInterva();
+      if(ptmp)
+      {
+        for(Int i=0; i<num;i++)
+        {
+          seiToneMappingInfo->m_startOfCodedInterval[i] = ptmp[i];
+        }
+      }
+      break;
+    }
+  case 3:
+    {
+      seiToneMappingInfo->m_numPivots = m_pcCfg->getTMISEINumPivots();
+      seiToneMappingInfo->m_codedPivotValue.resize(seiToneMappingInfo->m_numPivots);
+      seiToneMappingInfo->m_targetPivotValue.resize(seiToneMappingInfo->m_numPivots);
+      Int* ptmpcoded = m_pcCfg->getTMISEICodedPivotValue();
+      Int* ptmptarget = m_pcCfg->getTMISEITargetPivotValue();
+      if(ptmpcoded&&ptmptarget)
+      {
+        for(Int i=0; i<(seiToneMappingInfo->m_numPivots);i++)
+        {
+          seiToneMappingInfo->m_codedPivotValue[i]=ptmpcoded[i];
+          seiToneMappingInfo->m_targetPivotValue[i]=ptmptarget[i];
+         }
+       }
+       break;
+     }
+  case 4:
+     {
+       seiToneMappingInfo->m_cameraIsoSpeedIdc = m_pcCfg->getTMISEICameraIsoSpeedIdc();
+       seiToneMappingInfo->m_cameraIsoSpeedValue = m_pcCfg->getTMISEICameraIsoSpeedValue();
+       assert( seiToneMappingInfo->m_cameraIsoSpeedValue !=0 );
+       seiToneMappingInfo->m_exposureIndexIdc = m_pcCfg->getTMISEIExposurIndexIdc();
+       seiToneMappingInfo->m_exposureIndexValue = m_pcCfg->getTMISEIExposurIndexValue();
+       assert( seiToneMappingInfo->m_exposureIndexValue !=0 );
+       seiToneMappingInfo->m_exposureCompensationValueSignFlag = m_pcCfg->getTMISEIExposureCompensationValueSignFlag();
+       seiToneMappingInfo->m_exposureCompensationValueNumerator = m_pcCfg->getTMISEIExposureCompensationValueNumerator();
+       seiToneMappingInfo->m_exposureCompensationValueDenomIdc = m_pcCfg->getTMISEIExposureCompensationValueDenomIdc();
+       seiToneMappingInfo->m_refScreenLuminanceWhite = m_pcCfg->getTMISEIRefScreenLuminanceWhite();
+       seiToneMappingInfo->m_extendedRangeWhiteLevel = m_pcCfg->getTMISEIExtendedRangeWhiteLevel();
+       assert( seiToneMappingInfo->m_extendedRangeWhiteLevel >= 100 );
+       seiToneMappingInfo->m_nominalBlackLevelLumaCodeValue = m_pcCfg->getTMISEINominalBlackLevelLumaCodeValue();
+       seiToneMappingInfo->m_nominalWhiteLevelLumaCodeValue = m_pcCfg->getTMISEINominalWhiteLevelLumaCodeValue();
+       assert( seiToneMappingInfo->m_nominalWhiteLevelLumaCodeValue > seiToneMappingInfo->m_nominalBlackLevelLumaCodeValue );
+       seiToneMappingInfo->m_extendedWhiteLevelLumaCodeValue = m_pcCfg->getTMISEIExtendedWhiteLevelLumaCodeValue();
+       assert( seiToneMappingInfo->m_extendedWhiteLevelLumaCodeValue >= seiToneMappingInfo->m_nominalWhiteLevelLumaCodeValue );
+       break;
+    }
+  default:
+    {
+      assert(!"Undefined SEIToneMapModelId");
+      break;
+    }
+  }
+  return seiToneMappingInfo;
+}
+
+SEITempMotionConstrainedTileSets* TEncGOP::xCreateSEITempMotionConstrainedTileSets ()
+{
+  TComPPS *pps = m_pcEncTop->getPPS();
+  SEITempMotionConstrainedTileSets *sei = new SEITempMotionConstrainedTileSets();
+  if(pps->getTilesEnabledFlag())
+  {
+    sei->m_mc_all_tiles_exact_sample_value_match_flag = false;
+    sei->m_each_tile_one_tile_set_flag                = false;
+    sei->m_limited_tile_set_display_flag              = false;
+    sei->setNumberOfTileSets((pps->getNumTileColumnsMinus1() + 1) * (pps->getNumTileRowsMinus1() + 1));
+
+    for(Int i=0; i < sei->getNumberOfTileSets(); i++)
+    {
+      sei->tileSetData(i).m_mcts_id = i;  //depends the application;
+      sei->tileSetData(i).setNumberOfTileRects(1);
+
+      for(Int j=0; j<sei->tileSetData(i).getNumberOfTileRects(); j++)
+      {
+        sei->tileSetData(i).topLeftTileIndex(j)     = i+j;
+        sei->tileSetData(i).bottomRightTileIndex(j) = i+j;
+      }
+
+      sei->tileSetData(i).m_exact_sample_value_match_flag    = false;
+      sei->tileSetData(i).m_mcts_tier_level_idc_present_flag = false;
+    }
+  }
+  else
+  {
+    assert(!"Tile is not enabled");
+  }
+  return sei;
+}
+
+SEIKneeFunctionInfo* TEncGOP::xCreateSEIKneeFunctionInfo()
+{
+  SEIKneeFunctionInfo *seiKneeFunctionInfo = new SEIKneeFunctionInfo();
+  seiKneeFunctionInfo->m_kneeId = m_pcCfg->getKneeSEIId();
+  seiKneeFunctionInfo->m_kneeCancelFlag = m_pcCfg->getKneeSEICancelFlag();
+  if ( !seiKneeFunctionInfo->m_kneeCancelFlag )
+  {
+    seiKneeFunctionInfo->m_kneePersistenceFlag = m_pcCfg->getKneeSEIPersistenceFlag();
+    seiKneeFunctionInfo->m_kneeInputDrange = m_pcCfg->getKneeSEIInputDrange();
+    seiKneeFunctionInfo->m_kneeInputDispLuminance = m_pcCfg->getKneeSEIInputDispLuminance();
+    seiKneeFunctionInfo->m_kneeOutputDrange = m_pcCfg->getKneeSEIOutputDrange();
+    seiKneeFunctionInfo->m_kneeOutputDispLuminance = m_pcCfg->getKneeSEIOutputDispLuminance();
+
+    seiKneeFunctionInfo->m_kneeNumKneePointsMinus1 = m_pcCfg->getKneeSEINumKneePointsMinus1();
+    Int* piInputKneePoint  = m_pcCfg->getKneeSEIInputKneePoint();
+    Int* piOutputKneePoint = m_pcCfg->getKneeSEIOutputKneePoint();
+    if(piInputKneePoint&&piOutputKneePoint)
+    {
+      seiKneeFunctionInfo->m_kneeInputKneePoint.resize(seiKneeFunctionInfo->m_kneeNumKneePointsMinus1+1);
+      seiKneeFunctionInfo->m_kneeOutputKneePoint.resize(seiKneeFunctionInfo->m_kneeNumKneePointsMinus1+1);
+      for(Int i=0; i<=seiKneeFunctionInfo->m_kneeNumKneePointsMinus1; i++)
+      {
+        seiKneeFunctionInfo->m_kneeInputKneePoint[i] = piInputKneePoint[i];
+        seiKneeFunctionInfo->m_kneeOutputKneePoint[i] = piOutputKneePoint[i];
+       }
+    }
+  }
+  return seiKneeFunctionInfo;
+}
+
+SEIChromaSamplingFilterHint* TEncGOP::xCreateSEIChromaSamplingFilterHint(Bool bChromaLocInfoPresent, Int iHorFilterIndex, Int iVerFilterIndex)
+{
+  SEIChromaSamplingFilterHint *seiChromaSamplingFilterHint = new SEIChromaSamplingFilterHint();
+  seiChromaSamplingFilterHint->m_verChromaFilterIdc = iVerFilterIndex;
+  seiChromaSamplingFilterHint->m_horChromaFilterIdc = iHorFilterIndex;
+  seiChromaSamplingFilterHint->m_verFilteringProcessFlag = 1;
+  seiChromaSamplingFilterHint->m_targetFormatIdc = 3;
+  seiChromaSamplingFilterHint->m_perfectReconstructionFlag = false;
+  if(seiChromaSamplingFilterHint->m_verChromaFilterIdc == 1)
+  {
+    seiChromaSamplingFilterHint->m_numVerticalFilters = 1;
+    seiChromaSamplingFilterHint->m_verTapLengthMinus1 = (Int*)malloc(seiChromaSamplingFilterHint->m_numVerticalFilters * sizeof(Int));
+    seiChromaSamplingFilterHint->m_verFilterCoeff =    (Int**)malloc(seiChromaSamplingFilterHint->m_numVerticalFilters * sizeof(Int*));
+    for(Int i = 0; i < seiChromaSamplingFilterHint->m_numVerticalFilters; i ++)
+    {
+      seiChromaSamplingFilterHint->m_verTapLengthMinus1[i] = 0;
+      seiChromaSamplingFilterHint->m_verFilterCoeff[i] = (Int*)malloc(seiChromaSamplingFilterHint->m_verTapLengthMinus1[i] * sizeof(Int));
+      for(Int j = 0; j < seiChromaSamplingFilterHint->m_verTapLengthMinus1[i]; j ++)
+      {
+        seiChromaSamplingFilterHint->m_verFilterCoeff[i][j] = 0;
+      }
+    }
+  }
+  else
+  {
+    seiChromaSamplingFilterHint->m_numVerticalFilters = 0;
+    seiChromaSamplingFilterHint->m_verTapLengthMinus1 = NULL;
+    seiChromaSamplingFilterHint->m_verFilterCoeff = NULL;
+  }
+  if(seiChromaSamplingFilterHint->m_horChromaFilterIdc == 1)
+  {
+    seiChromaSamplingFilterHint->m_numHorizontalFilters = 1;
+    seiChromaSamplingFilterHint->m_horTapLengthMinus1 = (Int*)malloc(seiChromaSamplingFilterHint->m_numHorizontalFilters * sizeof(Int));
+    seiChromaSamplingFilterHint->m_horFilterCoeff = (Int**)malloc(seiChromaSamplingFilterHint->m_numHorizontalFilters * sizeof(Int*));
+    for(Int i = 0; i < seiChromaSamplingFilterHint->m_numHorizontalFilters; i ++)
+    {
+      seiChromaSamplingFilterHint->m_horTapLengthMinus1[i] = 0;
+      seiChromaSamplingFilterHint->m_horFilterCoeff[i] = (Int*)malloc(seiChromaSamplingFilterHint->m_horTapLengthMinus1[i] * sizeof(Int));
+      for(Int j = 0; j < seiChromaSamplingFilterHint->m_horTapLengthMinus1[i]; j ++)
+      {
+        seiChromaSamplingFilterHint->m_horFilterCoeff[i][j] = 0;
+      }
+    }
+  }
+  else
+  {
+    seiChromaSamplingFilterHint->m_numHorizontalFilters = 0;
+    seiChromaSamplingFilterHint->m_horTapLengthMinus1 = NULL;
+    seiChromaSamplingFilterHint->m_horFilterCoeff = NULL;
+  }
+  return seiChromaSamplingFilterHint;
+}
+
+Void TEncGOP::xCreateLeadingSEIMessages (/*SEIMessages seiMessages,*/ AccessUnit &accessUnit, TComSPS *sps)
+{
+  OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+
+  if(m_pcCfg->getActiveParameterSetsSEIEnabled())
+  {
+    SEIActiveParameterSets *sei = xCreateSEIActiveParameterSets (sps);
+
+    //nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+    m_activeParameterSetSEIPresentInAU = true;
+  }
+
+  if(m_pcCfg->getFramePackingArrangementSEIEnabled())
+  {
+    SEIFramePacking *sei = xCreateSEIFramePacking ();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+  }
+  if(m_pcCfg->getSegmentedRectFramePackingArrangementSEIEnabled())
+  {
+    SEISegmentedRectFramePacking *sei = xCreateSEISegmentedRectFramePacking ();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+  }
+  if (m_pcCfg->getDisplayOrientationSEIAngle())
+  {
+    SEIDisplayOrientation *sei = xCreateSEIDisplayOrientation();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+  }
+
+  if(m_pcCfg->getToneMappingInfoSEIEnabled())
+  {
+    SEIToneMappingInfo *sei = xCreateSEIToneMappingInfo ();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+  }
+
+  if(m_pcCfg->getTMCTSSEIEnabled())
+  {
+    SEITempMotionConstrainedTileSets *sei_tmcts = xCreateSEITempMotionConstrainedTileSets ();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei_tmcts, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei_tmcts;
+  }
+
+  if(m_pcCfg->getTimeCodeSEIEnabled())
+  {
+    SEITimeCode sei_time_code;
+    //  Set data as per command line options
+    sei_time_code.numClockTs = m_pcCfg->getNumberOfTimesets();
+    for(Int i = 0; i < sei_time_code.numClockTs; i++)
+      sei_time_code.timeSetArray[i] = m_pcCfg->getTimeSet(i);
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, sei_time_code, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+  }
+
+  if(m_pcCfg->getKneeSEIEnabled())
+  {
+    SEIKneeFunctionInfo *sei = xCreateSEIKneeFunctionInfo();
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, *sei, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+    delete sei;
+  }
+    
+  if(m_pcCfg->getMasteringDisplaySEI().colourVolumeSEIEnabled)
+  {
+    const TComSEIMasteringDisplay &seiCfg=m_pcCfg->getMasteringDisplaySEI();
+    SEIMasteringDisplayColourVolume mdcv;
+    mdcv.values = seiCfg;
+
+    nalu = NALUnit(NAL_UNIT_PREFIX_SEI);
+    m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+    m_seiWriter.writeSEImessage(nalu.m_Bitstream, mdcv, sps);
+    writeRBSPTrailingBits(nalu.m_Bitstream);
+    accessUnit.push_back(new NALUnitEBSP(nalu));
+      
+  }
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+Void TEncGOP::compressGOP( Int iPOCLast, Int iNumPicRcvd, TComList<TComPic*>& rcListPic,
+                           TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsInGOP,
+                           Bool isField, Bool isTff, const InputColourSpaceConversion snr_conversion, const Bool printFrameMSE )
+{
+  // TODO: Split this function up.
+
+  TComPic*        pcPic = NULL;
+  TComPicYuv*     pcPicYuvRecOut;
+  TComSlice*      pcSlice;
+  TComOutputBitstream  *pcBitstreamRedirect;
+  pcBitstreamRedirect = new TComOutputBitstream;
+  AccessUnit::iterator  itLocationToPushSliceHeaderNALU; // used to store location where NALU containing slice header is to be inserted
+
+  xInitGOP( iPOCLast, iNumPicRcvd, rcListPic, rcListPicYuvRecOut, isField );
+
+  m_iNumPicCoded = 0;
+  SEIPictureTiming pictureTimingSEI;
+  Bool writeSOP = m_pcCfg->getSOPDescriptionSEIEnabled();
+
+  // Initialize Scalable Nesting SEI with single layer values
+  SEIScalableNesting scalableNestingSEI;
+  scalableNestingSEI.m_bitStreamSubsetFlag           = 1;      // If the nested SEI messages are picture buffereing SEI mesages, picure timing SEI messages or sub-picture timing SEI messages, bitstream_subset_flag shall be equal to 1
+  scalableNestingSEI.m_nestingOpFlag                 = 0;
+  scalableNestingSEI.m_nestingNumOpsMinus1           = 0;      //nesting_num_ops_minus1
+  scalableNestingSEI.m_allLayersFlag                 = 0;
+  scalableNestingSEI.m_nestingNoOpMaxTemporalIdPlus1 = 6 + 1;  //nesting_no_op_max_temporal_id_plus1
+  scalableNestingSEI.m_nestingNumLayersMinus1        = 1 - 1;  //nesting_num_layers_minus1
+  scalableNestingSEI.m_nestingLayerId[0]             = 0;
+  scalableNestingSEI.m_callerOwnsSEIs                = true;
+
+  Int picSptDpbOutputDuDelay = 0;
+  UInt *accumBitsDU = NULL;
+  UInt *accumNalsDU = NULL;
+  SEIDecodingUnitInfo decodingUnitInfoSEI;
+
+#if EFFICIENT_FIELD_IRAP
+  Int IRAPGOPid = -1;
+  Bool IRAPtoReorder = false;
+  Bool swapIRAPForward = false;
+  if(isField)
+  {
+    Int pocCurr;
+    for ( Int iGOPid=0; iGOPid < m_iGopSize; iGOPid++ )
+    {
+      // determine actual POC
+      if(iPOCLast == 0) //case first frame or first top field
+      {
+        pocCurr=0;
+      }
+      else if(iPOCLast == 1 && isField) //case first bottom field, just like the first frame, the poc computation is not right anymore, we set the right value
+      {
+        pocCurr = 1;
+      }
+      else
+      {
+        pocCurr = iPOCLast - iNumPicRcvd + m_pcCfg->getGOPEntry(iGOPid).m_POC - isField;
+      }
+
+      // check if POC corresponds to IRAP
+      NalUnitType tmpUnitType = getNalUnitType(pocCurr, m_iLastIDR, isField);
+      if(tmpUnitType >= NAL_UNIT_CODED_SLICE_BLA_W_LP && tmpUnitType <= NAL_UNIT_CODED_SLICE_CRA) // if picture is an IRAP
+      {
+        if(pocCurr%2 == 0 && iGOPid < m_iGopSize-1 && m_pcCfg->getGOPEntry(iGOPid).m_POC == m_pcCfg->getGOPEntry(iGOPid+1).m_POC-1)
+        { // if top field and following picture in enc order is associated bottom field
+          IRAPGOPid = iGOPid;
+          IRAPtoReorder = true;
+          swapIRAPForward = true; 
+          break;
+        }
+        if(pocCurr%2 != 0 && iGOPid > 0 && m_pcCfg->getGOPEntry(iGOPid).m_POC == m_pcCfg->getGOPEntry(iGOPid-1).m_POC+1)
+        {
+          // if picture is an IRAP remember to process it first
+          IRAPGOPid = iGOPid;
+          IRAPtoReorder = true;
+          swapIRAPForward = false; 
+          break;
+        }
+      }
+    }
+  }
+#endif
+  // reset flag indicating whether pictures have been encoded
+  for ( Int iGOPid=0; iGOPid < m_iGopSize; iGOPid++ )
+  {
+    m_pcCfg->setEncodedFlag(iGOPid, false);
+  }
+
+  for ( Int iGOPid=0; iGOPid < m_iGopSize; iGOPid++ )
+  {
+#if EFFICIENT_FIELD_IRAP
+    if(IRAPtoReorder)
+    {
+      if(swapIRAPForward)
+      {
+        if(iGOPid == IRAPGOPid)
+        {
+          iGOPid = IRAPGOPid +1;
+        }
+        else if(iGOPid == IRAPGOPid +1)
+        {
+          iGOPid = IRAPGOPid;
+        }
+      }
+      else
+      {
+        if(iGOPid == IRAPGOPid -1)
+        {
+          iGOPid = IRAPGOPid;
+        }
+        else if(iGOPid == IRAPGOPid)
+        {
+          iGOPid = IRAPGOPid -1;
+        }
+      }
+    }
+#endif
+
+    UInt uiColDir = 1;
+    //-- For time output for each slice
+    clock_t iBeforeTime = clock();
+
+    //select uiColDir
+    Int iCloseLeft=1, iCloseRight=-1;
+    for(Int i = 0; i<m_pcCfg->getGOPEntry(iGOPid).m_numRefPics; i++)
+    {
+      Int iRef = m_pcCfg->getGOPEntry(iGOPid).m_referencePics[i];
+      if(iRef>0&&(iRef<iCloseRight||iCloseRight==-1))
+      {
+        iCloseRight=iRef;
+      }
+      else if(iRef<0&&(iRef>iCloseLeft||iCloseLeft==1))
+      {
+        iCloseLeft=iRef;
+      }
+    }
+    if(iCloseRight>-1)
+    {
+      iCloseRight=iCloseRight+m_pcCfg->getGOPEntry(iGOPid).m_POC-1;
+    }
+    if(iCloseLeft<1)
+    {
+      iCloseLeft=iCloseLeft+m_pcCfg->getGOPEntry(iGOPid).m_POC-1;
+      while(iCloseLeft<0)
+      {
+        iCloseLeft+=m_iGopSize;
+      }
+    }
+    Int iLeftQP=0, iRightQP=0;
+    for(Int i=0; i<m_iGopSize; i++)
+    {
+      if(m_pcCfg->getGOPEntry(i).m_POC==(iCloseLeft%m_iGopSize)+1)
+      {
+        iLeftQP= m_pcCfg->getGOPEntry(i).m_QPOffset;
+      }
+      if (m_pcCfg->getGOPEntry(i).m_POC==(iCloseRight%m_iGopSize)+1)
+      {
+        iRightQP=m_pcCfg->getGOPEntry(i).m_QPOffset;
+      }
+    }
+    if(iCloseRight>-1&&iRightQP<iLeftQP)
+    {
+      uiColDir=0;
+    }
+
+    /////////////////////////////////////////////////////////////////////////////////////////////////// Initial to start encoding
+    Int iTimeOffset;
+    Int pocCurr;
+
+    if(iPOCLast == 0) //case first frame or first top field
+    {
+      pocCurr=0;
+      iTimeOffset = 1;
+    }
+    else if(iPOCLast == 1 && isField) //case first bottom field, just like the first frame, the poc computation is not right anymore, we set the right value
+    {
+      pocCurr = 1;
+      iTimeOffset = 1;
+    }
+    else
+    {
+      pocCurr = iPOCLast - iNumPicRcvd + m_pcCfg->getGOPEntry(iGOPid).m_POC - ((isField && m_iGopSize>1) ? 1:0);
+      iTimeOffset = m_pcCfg->getGOPEntry(iGOPid).m_POC;
+    }
+
+    if(pocCurr>=m_pcCfg->getFramesToBeEncoded())
+    {
+#if EFFICIENT_FIELD_IRAP
+      if(IRAPtoReorder)
+      {
+        if(swapIRAPForward)
+        {
+          if(iGOPid == IRAPGOPid)
+          {
+            iGOPid = IRAPGOPid +1;
+            IRAPtoReorder = false;
+          }
+          else if(iGOPid == IRAPGOPid +1)
+          {
+            iGOPid --;
+          }
+        }
+        else
+        {
+          if(iGOPid == IRAPGOPid)
+          {
+            iGOPid = IRAPGOPid -1;
+          }
+          else if(iGOPid == IRAPGOPid -1)
+          {
+            iGOPid = IRAPGOPid;
+            IRAPtoReorder = false;
+          }
+        }
+      }
+#endif
+      continue;
+    }
+
+    if( getNalUnitType(pocCurr, m_iLastIDR, isField) == NAL_UNIT_CODED_SLICE_IDR_W_RADL || getNalUnitType(pocCurr, m_iLastIDR, isField) == NAL_UNIT_CODED_SLICE_IDR_N_LP )
+    {
+      m_iLastIDR = pocCurr;
+    }
+    // start a new access unit: create an entry in the list of output access units
+    accessUnitsInGOP.push_back(AccessUnit());
+    AccessUnit& accessUnit = accessUnitsInGOP.back();
+    xGetBuffer( rcListPic, rcListPicYuvRecOut, iNumPicRcvd, iTimeOffset, pcPic, pcPicYuvRecOut, pocCurr, isField );
+
+    //  Slice data initialization
+    pcPic->clearSliceBuffer();
+    assert(pcPic->getNumAllocatedSlice() == 1);
+    m_pcSliceEncoder->setSliceIdx(0);
+    pcPic->setCurrSliceIdx(0);
+
+    m_pcSliceEncoder->initEncSlice ( pcPic, iPOCLast, pocCurr, iNumPicRcvd, iGOPid, pcSlice, m_pcEncTop->getSPS(), m_pcEncTop->getPPS(), isField );
+
+    //Set Frame/Field coding
+    pcSlice->getPic()->setField(isField);
+
+    pcSlice->setLastIDR(m_iLastIDR);
+    pcSlice->setSliceIdx(0);
+    //set default slice level flag to the same as SPS level flag
+    pcSlice->setLFCrossSliceBoundaryFlag(  pcSlice->getPPS()->getLoopFilterAcrossSlicesEnabledFlag()  );
+    pcSlice->setScalingList ( m_pcEncTop->getScalingList()  );
+    if(m_pcEncTop->getUseScalingListId() == SCALING_LIST_OFF)
+    {
+      m_pcEncTop->getTrQuant()->setFlatScalingList(pcSlice->getSPS()->getChromaFormatIdc());
+      m_pcEncTop->getTrQuant()->setUseScalingList(false);
+      m_pcEncTop->getSPS()->setScalingListPresentFlag(false);
+      m_pcEncTop->getPPS()->setScalingListPresentFlag(false);
+    }
+    else if(m_pcEncTop->getUseScalingListId() == SCALING_LIST_DEFAULT)
+    {
+      pcSlice->setDefaultScalingList ();
+      m_pcEncTop->getSPS()->setScalingListPresentFlag(false);
+      m_pcEncTop->getPPS()->setScalingListPresentFlag(false);
+      m_pcEncTop->getTrQuant()->setScalingList(pcSlice->getScalingList(), pcSlice->getSPS()->getChromaFormatIdc());
+      m_pcEncTop->getTrQuant()->setUseScalingList(true);
+    }
+    else if(m_pcEncTop->getUseScalingListId() == SCALING_LIST_FILE_READ)
+    {
+      pcSlice->setDefaultScalingList ();
+      if(pcSlice->getScalingList()->xParseScalingList(m_pcCfg->getScalingListFile()))
+      {
+        Bool bParsedScalingList=false; // Use of boolean so that assertion outputs useful string
+        assert(bParsedScalingList);
+        exit(1);
+      }
+      pcSlice->getScalingList()->checkDcOfMatrix();
+      m_pcEncTop->getSPS()->setScalingListPresentFlag(pcSlice->checkDefaultScalingList());
+      m_pcEncTop->getPPS()->setScalingListPresentFlag(false);
+      m_pcEncTop->getTrQuant()->setScalingList(pcSlice->getScalingList(), pcSlice->getSPS()->getChromaFormatIdc());
+      m_pcEncTop->getTrQuant()->setUseScalingList(true);
+    }
+    else
+    {
+      printf("error : ScalingList == %d no support\n",m_pcEncTop->getUseScalingListId());
+      assert(0);
+    }
+
+    if(pcSlice->getSliceType()==B_SLICE&&m_pcCfg->getGOPEntry(iGOPid).m_sliceType=='P')
+    {
+      pcSlice->setSliceType(P_SLICE);
+    }
+    if(pcSlice->getSliceType()==B_SLICE&&m_pcCfg->getGOPEntry(iGOPid).m_sliceType=='I')
+    {
+      pcSlice->setSliceType(I_SLICE);
+    }
+    
+    // Set the nal unit type
+    pcSlice->setNalUnitType(getNalUnitType(pocCurr, m_iLastIDR, isField));
+    if(pcSlice->getTemporalLayerNonReferenceFlag())
+    {
+      if (pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_TRAIL_R &&
+          !(m_iGopSize == 1 && pcSlice->getSliceType() == I_SLICE))
+        // Add this condition to avoid POC issues with encoder_intra_main.cfg configuration (see #1127 in bug tracker)
+      {
+        pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_TRAIL_N);
+      }
+      if(pcSlice->getNalUnitType()==NAL_UNIT_CODED_SLICE_RADL_R)
+      {
+        pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_RADL_N);
+      }
+      if(pcSlice->getNalUnitType()==NAL_UNIT_CODED_SLICE_RASL_R)
+      {
+        pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_RASL_N);
+      }
+    }
+
+#if EFFICIENT_FIELD_IRAP
+    if ( pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA )  // IRAP picture
+    {
+      m_associatedIRAPType = pcSlice->getNalUnitType();
+      m_associatedIRAPPOC = pocCurr;
+    }
+    pcSlice->setAssociatedIRAPType(m_associatedIRAPType);
+    pcSlice->setAssociatedIRAPPOC(m_associatedIRAPPOC);
+#endif
+    // Do decoding refresh marking if any
+    pcSlice->decodingRefreshMarking(m_pocCRA, m_bRefreshPending, rcListPic);
+    m_pcEncTop->selectReferencePictureSet(pcSlice, pocCurr, iGOPid);
+    pcSlice->getRPS()->setNumberOfLongtermPictures(0);
+#if !EFFICIENT_FIELD_IRAP
+    if ( pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_W_RADL
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_BLA_N_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_IDR_N_LP
+      || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA )  // IRAP picture
+    {
+      m_associatedIRAPType = pcSlice->getNalUnitType();
+      m_associatedIRAPPOC = pocCurr;
+    }
+    pcSlice->setAssociatedIRAPType(m_associatedIRAPType);
+    pcSlice->setAssociatedIRAPPOC(m_associatedIRAPPOC);
+#endif
+
+#if ALLOW_RECOVERY_POINT_AS_RAP
+    if ((pcSlice->checkThatAllRefPicsAreAvailable(rcListPic, pcSlice->getRPS(), false, m_iLastRecoveryPicPOC, m_pcCfg->getDecodingRefreshType() == 3) != 0) || (pcSlice->isIRAP()) 
+#if EFFICIENT_FIELD_IRAP
+      || (isField && pcSlice->getAssociatedIRAPType() >= NAL_UNIT_CODED_SLICE_BLA_W_LP && pcSlice->getAssociatedIRAPType() <= NAL_UNIT_CODED_SLICE_CRA && pcSlice->getAssociatedIRAPPOC() == pcSlice->getPOC()+1)
+#endif
+      )
+    {
+      pcSlice->createExplicitReferencePictureSetFromReference(rcListPic, pcSlice->getRPS(), pcSlice->isIRAP(), m_iLastRecoveryPicPOC, m_pcCfg->getDecodingRefreshType() == 3);
+    }
+#else
+    if ((pcSlice->checkThatAllRefPicsAreAvailable(rcListPic, pcSlice->getRPS(), false) != 0) || (pcSlice->isIRAP()))
+    {
+      pcSlice->createExplicitReferencePictureSetFromReference(rcListPic, pcSlice->getRPS(), pcSlice->isIRAP());
+    }
+#endif
+
+    pcSlice->applyReferencePictureSet(rcListPic, pcSlice->getRPS());
+
+    if(pcSlice->getTLayer() > 0 
+      &&  !( pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_RADL_N     // Check if not a leading picture
+          || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_RADL_R
+          || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_RASL_N
+          || pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_RASL_R )
+        )
+    {
+      if(pcSlice->isTemporalLayerSwitchingPoint(rcListPic) || pcSlice->getSPS()->getTemporalIdNestingFlag())
+      {
+        if(pcSlice->getTemporalLayerNonReferenceFlag())
+        {
+          pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_TSA_N);
+        }
+        else
+        {
+          pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_TSA_R);
+        }
+      }
+      else if(pcSlice->isStepwiseTemporalLayerSwitchingPointCandidate(rcListPic))
+      {
+        Bool isSTSA=true;
+        for(Int ii=iGOPid+1;(ii<m_pcCfg->getGOPSize() && isSTSA==true);ii++)
+        {
+          Int lTid= m_pcCfg->getGOPEntry(ii).m_temporalId;
+          if(lTid==pcSlice->getTLayer())
+          {
+            TComReferencePictureSet* nRPS = pcSlice->getSPS()->getRPSList()->getReferencePictureSet(ii);
+            for(Int jj=0;jj<nRPS->getNumberOfPictures();jj++)
+            {
+              if(nRPS->getUsed(jj))
+              {
+                Int tPoc=m_pcCfg->getGOPEntry(ii).m_POC+nRPS->getDeltaPOC(jj);
+                Int kk=0;
+                for(kk=0;kk<m_pcCfg->getGOPSize();kk++)
+                {
+                  if(m_pcCfg->getGOPEntry(kk).m_POC==tPoc)
+                    break;
+                }
+                Int tTid=m_pcCfg->getGOPEntry(kk).m_temporalId;
+                if(tTid >= pcSlice->getTLayer())
+                {
+                  isSTSA=false;
+                  break;
+                }
+              }
+            }
+          }
+        }
+        if(isSTSA==true)
+        {
+          if(pcSlice->getTemporalLayerNonReferenceFlag())
+          {
+            pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_STSA_N);
+          }
+          else
+          {
+            pcSlice->setNalUnitType(NAL_UNIT_CODED_SLICE_STSA_R);
+          }
+        }
+      }
+    }
+    arrangeLongtermPicturesInRPS(pcSlice, rcListPic);
+    TComRefPicListModification* refPicListModification = pcSlice->getRefPicListModification();
+    refPicListModification->setRefPicListModificationFlagL0(0);
+    refPicListModification->setRefPicListModificationFlagL1(0);
+    pcSlice->setNumRefIdx(REF_PIC_LIST_0,min(m_pcCfg->getGOPEntry(iGOPid).m_numRefPicsActive,pcSlice->getRPS()->getNumberOfPictures()));
+    pcSlice->setNumRefIdx(REF_PIC_LIST_1,min(m_pcCfg->getGOPEntry(iGOPid).m_numRefPicsActive,pcSlice->getRPS()->getNumberOfPictures()));
+
+#if ADAPTIVE_QP_SELECTION
+    pcSlice->setTrQuant( m_pcEncTop->getTrQuant() );
+#endif
+
+    //  Set reference list
+    pcSlice->setRefPicList ( rcListPic );
+
+    //  Slice info. refinement
+    if ( (pcSlice->getSliceType() == B_SLICE) && (pcSlice->getNumRefIdx(REF_PIC_LIST_1) == 0) )
+    {
+      pcSlice->setSliceType ( P_SLICE );
+    }
+
+    if (pcSlice->getSliceType() == B_SLICE)
+    {
+      pcSlice->setColFromL0Flag(1-uiColDir);
+      Bool bLowDelay = true;
+      Int  iCurrPOC  = pcSlice->getPOC();
+      Int iRefIdx = 0;
+
+      for (iRefIdx = 0; iRefIdx < pcSlice->getNumRefIdx(REF_PIC_LIST_0) && bLowDelay; iRefIdx++)
+      {
+        if ( pcSlice->getRefPic(REF_PIC_LIST_0, iRefIdx)->getPOC() > iCurrPOC )
+        {
+          bLowDelay = false;
+        }
+      }
+      for (iRefIdx = 0; iRefIdx < pcSlice->getNumRefIdx(REF_PIC_LIST_1) && bLowDelay; iRefIdx++)
+      {
+        if ( pcSlice->getRefPic(REF_PIC_LIST_1, iRefIdx)->getPOC() > iCurrPOC )
+        {
+          bLowDelay = false;
+        }
+      }
+
+      pcSlice->setCheckLDC(bLowDelay);
+    }
+    else
+    {
+      pcSlice->setCheckLDC(true);
+    }
+
+    uiColDir = 1-uiColDir;
+
+    //-------------------------------------------------------------
+    pcSlice->setRefPOCList();
+
+    pcSlice->setList1IdxToList0Idx();
+
+    if (m_pcEncTop->getTMVPModeId() == 2)
+    {
+      if (iGOPid == 0) // first picture in SOP (i.e. forward B)
+      {
+        pcSlice->setEnableTMVPFlag(0);
+      }
+      else
+      {
+        // Note: pcSlice->getColFromL0Flag() is assumed to be always 0 and getcolRefIdx() is always 0.
+        pcSlice->setEnableTMVPFlag(1);
+      }
+      pcSlice->getSPS()->setTMVPFlagsPresent(1);
+    }
+    else if (m_pcEncTop->getTMVPModeId() == 1)
+    {
+      pcSlice->getSPS()->setTMVPFlagsPresent(1);
+      pcSlice->setEnableTMVPFlag(1);
+    }
+    else
+    {
+      pcSlice->getSPS()->setTMVPFlagsPresent(0);
+      pcSlice->setEnableTMVPFlag(0);
+    }
+    /////////////////////////////////////////////////////////////////////////////////////////////////// Compress a slice
+    //  Slice compression
+    if (m_pcCfg->getUseASR())
+    {
+      m_pcSliceEncoder->setSearchRange(pcSlice);
+    }
+
+    Bool bGPBcheck=false;
+    if ( pcSlice->getSliceType() == B_SLICE)
+    {
+      if ( pcSlice->getNumRefIdx(RefPicList( 0 ) ) == pcSlice->getNumRefIdx(RefPicList( 1 ) ) )
+      {
+        bGPBcheck=true;
+        Int i;
+        for ( i=0; i < pcSlice->getNumRefIdx(RefPicList( 1 ) ); i++ )
+        {
+          if ( pcSlice->getRefPOC(RefPicList(1), i) != pcSlice->getRefPOC(RefPicList(0), i) )
+          {
+            bGPBcheck=false;
+            break;
+          }
+        }
+      }
+    }
+    if(bGPBcheck)
+    {
+      pcSlice->setMvdL1ZeroFlag(true);
+    }
+    else
+    {
+      pcSlice->setMvdL1ZeroFlag(false);
+    }
+    pcPic->getSlice(pcSlice->getSliceIdx())->setMvdL1ZeroFlag(pcSlice->getMvdL1ZeroFlag());
+
+    pcPic->getPicSym()->initTiles(pcSlice->getPPS());
+    pcPic->getPicSym()->initCtuTsRsAddrMaps();
+
+    Double lambda            = 0.0;
+    Int actualHeadBits       = 0;
+    Int actualTotalBits      = 0;
+    Int estimatedBits        = 0;
+    Int tmpBitsBeforeWriting = 0;
+    if ( m_pcCfg->getUseRateCtrl() )
+    {
+      Int frameLevel = m_pcRateCtrl->getRCSeq()->getGOPID2Level( iGOPid );
+      if ( pcPic->getSlice(0)->getSliceType() == I_SLICE )
+      {
+        frameLevel = 0;
+      }
+      m_pcRateCtrl->initRCPic( frameLevel );
+      estimatedBits = m_pcRateCtrl->getRCPic()->getTargetBits();
+
+      Int sliceQP = m_pcCfg->getInitialQP();
+      if ( ( pcSlice->getPOC() == 0 && m_pcCfg->getInitialQP() > 0 ) || ( frameLevel == 0 && m_pcCfg->getForceIntraQP() ) ) // QP is specified
+      {
+        Int    NumberBFrames = ( m_pcCfg->getGOPSize() - 1 );
+        Double dLambda_scale = 1.0 - Clip3( 0.0, 0.5, 0.05*(Double)NumberBFrames );
+        Double dQPFactor     = 0.57*dLambda_scale;
+        Int    SHIFT_QP      = 12;
+        Int    bitdepth_luma_qp_scale = 0;
+        Double qp_temp = (Double) sliceQP + bitdepth_luma_qp_scale - SHIFT_QP;
+        lambda = dQPFactor*pow( 2.0, qp_temp/3.0 );
+      }
+      else if ( frameLevel == 0 )   // intra case, but use the model
+      {
+        m_pcSliceEncoder->calCostSliceI(pcPic);
+
+        if ( m_pcCfg->getIntraPeriod() != 1 )   // do not refine allocated bits for all intra case
+        {
+          Int bits = m_pcRateCtrl->getRCSeq()->getLeftAverageBits();
+          bits = m_pcRateCtrl->getRCPic()->getRefineBitsForIntra( bits );
+          if ( bits < 200 )
+          {
+            bits = 200;
+          }
+          m_pcRateCtrl->getRCPic()->setTargetBits( bits );
+        }
+
+        list<TEncRCPic*> listPreviousPicture = m_pcRateCtrl->getPicList();
+        m_pcRateCtrl->getRCPic()->getLCUInitTargetBits();
+        lambda  = m_pcRateCtrl->getRCPic()->estimatePicLambda( listPreviousPicture, pcSlice->getSliceType());
+        sliceQP = m_pcRateCtrl->getRCPic()->estimatePicQP( lambda, listPreviousPicture );
+      }
+      else    // normal case
+      {
+        list<TEncRCPic*> listPreviousPicture = m_pcRateCtrl->getPicList();
+        lambda  = m_pcRateCtrl->getRCPic()->estimatePicLambda( listPreviousPicture, pcSlice->getSliceType());
+        sliceQP = m_pcRateCtrl->getRCPic()->estimatePicQP( lambda, listPreviousPicture );
+      }
+
+      sliceQP = Clip3( -pcSlice->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, sliceQP );
+      m_pcRateCtrl->getRCPic()->setPicEstQP( sliceQP );
+
+      m_pcSliceEncoder->resetQP( pcPic, sliceQP, lambda );
+    }
+
+    UInt uiNumSliceSegments = 1;
+
+
+    // Allocate some coders, now the number of tiles are known.
+    const Int numSubstreams = pcSlice->getPPS()->getNumSubstreams();
+    std::vector<TComOutputBitstream> substreamsOut(numSubstreams);
+
+    // now compress (trial encode) the various slice segments (slices, and dependent slices)
+    {
+      const UInt numberOfCtusInFrame=pcPic->getPicSym()->getNumberOfCtusInFrame();
+      pcSlice->setSliceCurStartCtuTsAddr( 0 );
+      pcSlice->setSliceSegmentCurStartCtuTsAddr( 0 );
+
+      for(UInt nextCtuTsAddr = 0; nextCtuTsAddr < numberOfCtusInFrame; )
+      {
+        m_pcSliceEncoder->precompressSlice( pcPic );
+        m_pcSliceEncoder->compressSlice   ( pcPic );
+
+        const UInt curSliceSegmentEnd = pcSlice->getSliceSegmentCurEndCtuTsAddr();
+        if (curSliceSegmentEnd < numberOfCtusInFrame)
+        {
+          const Bool bNextSegmentIsDependentSlice=curSliceSegmentEnd<pcSlice->getSliceCurEndCtuTsAddr();
+          const UInt sliceBits=pcSlice->getSliceBits();
+          pcPic->allocateNewSlice();
+          // prepare for next slice
+          pcPic->setCurrSliceIdx                    ( uiNumSliceSegments );
+          m_pcSliceEncoder->setSliceIdx             ( uiNumSliceSegments   );
+          pcSlice = pcPic->getSlice                 ( uiNumSliceSegments   );
+          pcSlice->copySliceInfo                    ( pcPic->getSlice(uiNumSliceSegments-1)  );
+          pcSlice->setSliceIdx                      ( uiNumSliceSegments   );
+          if (bNextSegmentIsDependentSlice)
+          {
+            pcSlice->setSliceBits(sliceBits);
+          }
+          else
+          {
+            pcSlice->setSliceCurStartCtuTsAddr      ( curSliceSegmentEnd );
+            pcSlice->setSliceBits(0);
+          }
+          pcSlice->setDependentSliceSegmentFlag(bNextSegmentIsDependentSlice);
+          pcSlice->setSliceSegmentCurStartCtuTsAddr ( curSliceSegmentEnd );
+          uiNumSliceSegments ++;
+        }
+        nextCtuTsAddr = curSliceSegmentEnd;
+      }
+    }
+
+    pcSlice = pcPic->getSlice(0);
+
+    // SAO parameter estimation using non-deblocked pixels for CTU bottom and right boundary areas
+    if( pcSlice->getSPS()->getUseSAO() && m_pcCfg->getSaoCtuBoundary() )
+    {
+      m_pcSAO->getPreDBFStatistics(pcPic);
+    }
+
+    //-- Loop filter
+    Bool bLFCrossTileBoundary = pcSlice->getPPS()->getLoopFilterAcrossTilesEnabledFlag();
+    m_pcLoopFilter->setCfg(bLFCrossTileBoundary);
+    if ( m_pcCfg->getDeblockingFilterMetric() )
+    {
+      dblMetric(pcPic, uiNumSliceSegments);
+    }
+    m_pcLoopFilter->loopFilterPic( pcPic );
+
+    /////////////////////////////////////////////////////////////////////////////////////////////////// File writing
+    // Set entropy coder
+    m_pcEntropyCoder->setEntropyCoder   ( m_pcCavlcCoder, pcSlice );
+
+    /* write various header sets. */
+    if ( m_bSeqFirst )
+    {
+      OutputNALUnit nalu(NAL_UNIT_VPS);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+      m_pcEntropyCoder->encodeVPS(m_pcEncTop->getVPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+      actualTotalBits += UInt(accessUnit.back()->m_nalUnitData.str().size()) * 8;
+
+      nalu = NALUnit(NAL_UNIT_SPS);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+      if (m_bSeqFirst)
+      {
+        pcSlice->getSPS()->setNumLongTermRefPicSPS(m_numLongTermRefPicSPS);
+        assert (m_numLongTermRefPicSPS <= MAX_NUM_LONG_TERM_REF_PICS);
+        for (Int k = 0; k < m_numLongTermRefPicSPS; k++)
+        {
+          pcSlice->getSPS()->setLtRefPicPocLsbSps(k, m_ltRefPicPocLsbSps[k]);
+          pcSlice->getSPS()->setUsedByCurrPicLtSPSFlag(k, m_ltRefPicUsedByCurrPicFlag[k]);
+        }
+      }
+      if( m_pcCfg->getPictureTimingSEIEnabled() || m_pcCfg->getDecodingUnitInfoSEIEnabled() )
+      {
+        UInt maxCU = m_pcCfg->getSliceArgument() >> ( pcSlice->getSPS()->getMaxCUDepth() << 1);
+        UInt numDU = ( m_pcCfg->getSliceMode() == FIXED_NUMBER_OF_CTU ) ? ( pcPic->getNumberOfCtusInFrame() / maxCU ) : ( 0 );
+        if( pcPic->getNumberOfCtusInFrame() % maxCU != 0 || numDU == 0 )
+        {
+          numDU ++;
+        }
+        pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->setNumDU( numDU );
+        pcSlice->getSPS()->setHrdParameters( m_pcCfg->getFrameRate(), numDU, m_pcCfg->getTargetBitrate(), ( m_pcCfg->getIntraPeriod() > 0 ) );
+      }
+      if( m_pcCfg->getBufferingPeriodSEIEnabled() || m_pcCfg->getPictureTimingSEIEnabled() || m_pcCfg->getDecodingUnitInfoSEIEnabled() )
+      {
+        pcSlice->getSPS()->getVuiParameters()->setHrdParametersPresentFlag( true );
+      }
+      m_pcEntropyCoder->encodeSPS(pcSlice->getSPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+      actualTotalBits += UInt(accessUnit.back()->m_nalUnitData.str().size()) * 8;
+
+      nalu = NALUnit(NAL_UNIT_PPS);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+      m_pcEntropyCoder->encodePPS(pcSlice->getPPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+      actualTotalBits += UInt(accessUnit.back()->m_nalUnitData.str().size()) * 8;
+
+      xCreateLeadingSEIMessages(accessUnit, pcSlice->getSPS());
+
+      m_bSeqFirst = false;
+    }
+
+    if (writeSOP) // write SOP description SEI (if enabled) at the beginning of GOP
+    {
+      Int SOPcurrPOC = pocCurr;
+
+      OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+      SEISOPDescription SOPDescriptionSEI;
+      SOPDescriptionSEI.m_sopSeqParameterSetId = pcSlice->getSPS()->getSPSId();
+
+      UInt i = 0;
+      UInt prevEntryId = iGOPid;
+      for (Int j = iGOPid; j < m_iGopSize; j++)
+      {
+        Int deltaPOC = m_pcCfg->getGOPEntry(j).m_POC - m_pcCfg->getGOPEntry(prevEntryId).m_POC;
+        if ((SOPcurrPOC + deltaPOC) < m_pcCfg->getFramesToBeEncoded())
+        {
+          SOPcurrPOC += deltaPOC;
+          SOPDescriptionSEI.m_sopDescVclNaluType[i] = getNalUnitType(SOPcurrPOC, m_iLastIDR, isField);
+          SOPDescriptionSEI.m_sopDescTemporalId[i] = m_pcCfg->getGOPEntry(j).m_temporalId;
+          SOPDescriptionSEI.m_sopDescStRpsIdx[i] = m_pcEncTop->getReferencePictureSetIdxForSOP(pcSlice, SOPcurrPOC, j);
+          SOPDescriptionSEI.m_sopDescPocDelta[i] = deltaPOC;
+
+          prevEntryId = j;
+          i++;
+        }
+      }
+
+      SOPDescriptionSEI.m_numPicsInSopMinus1 = i - 1;
+
+      m_seiWriter.writeSEImessage( nalu.m_Bitstream, SOPDescriptionSEI, pcSlice->getSPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+
+      writeSOP = false;
+    }
+
+    if( ( m_pcCfg->getPictureTimingSEIEnabled() || m_pcCfg->getDecodingUnitInfoSEIEnabled() ) &&
+        ( pcSlice->getSPS()->getVuiParametersPresentFlag() ) &&
+        ( ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getNalHrdParametersPresentFlag() )
+       || ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getVclHrdParametersPresentFlag() ) ) )
+    {
+      if( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getSubPicCpbParamsPresentFlag() )
+      {
+        UInt numDU = pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getNumDU();
+        pictureTimingSEI.m_numDecodingUnitsMinus1     = ( numDU - 1 );
+        pictureTimingSEI.m_duCommonCpbRemovalDelayFlag = false;
+
+        if( pictureTimingSEI.m_numNalusInDuMinus1 == NULL )
+        {
+          pictureTimingSEI.m_numNalusInDuMinus1       = new UInt[ numDU ];
+        }
+        if( pictureTimingSEI.m_duCpbRemovalDelayMinus1  == NULL )
+        {
+          pictureTimingSEI.m_duCpbRemovalDelayMinus1  = new UInt[ numDU ];
+        }
+        if( accumBitsDU == NULL )
+        {
+          accumBitsDU                                  = new UInt[ numDU ];
+        }
+        if( accumNalsDU == NULL )
+        {
+          accumNalsDU                                  = new UInt[ numDU ];
+        }
+      }
+      pictureTimingSEI.m_auCpbRemovalDelay = std::min<Int>(std::max<Int>(1, m_totalCoded - m_lastBPSEI), static_cast<Int>(pow(2, static_cast<Double>(pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getCpbRemovalDelayLengthMinus1()+1)))); // Syntax element signalled as minus, hence the .
+      pictureTimingSEI.m_picDpbOutputDelay = pcSlice->getSPS()->getNumReorderPics(pcSlice->getSPS()->getMaxTLayers()-1) + pcSlice->getPOC() - m_totalCoded;
+#if EFFICIENT_FIELD_IRAP
+      if(IRAPGOPid > 0 && IRAPGOPid < m_iGopSize)
+      {
+        // if pictures have been swapped there is likely one more picture delay on their tid. Very rough approximation
+        pictureTimingSEI.m_picDpbOutputDelay ++;
+      }
+#endif
+      Int factor = pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getTickDivisorMinus2() + 2;
+      pictureTimingSEI.m_picDpbOutputDuDelay = factor * pictureTimingSEI.m_picDpbOutputDelay;
+      if( m_pcCfg->getDecodingUnitInfoSEIEnabled() )
+      {
+        picSptDpbOutputDuDelay = factor * pictureTimingSEI.m_picDpbOutputDelay;
+      }
+    }
+
+    if( ( m_pcCfg->getBufferingPeriodSEIEnabled() ) && ( pcSlice->getSliceType() == I_SLICE ) &&
+        ( pcSlice->getSPS()->getVuiParametersPresentFlag() ) &&
+        ( ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getNalHrdParametersPresentFlag() )
+       || ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getVclHrdParametersPresentFlag() ) ) )
+    {
+      OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+      SEIBufferingPeriod sei_buffering_period;
+
+      UInt uiInitialCpbRemovalDelay = (90000/2);                      // 0.5 sec
+      sei_buffering_period.m_initialCpbRemovalDelay      [0][0]     = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialCpbRemovalDelayOffset[0][0]     = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialCpbRemovalDelay      [0][1]     = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialCpbRemovalDelayOffset[0][1]     = uiInitialCpbRemovalDelay;
+
+      Double dTmp = (Double)pcSlice->getSPS()->getVuiParameters()->getTimingInfo()->getNumUnitsInTick() / (Double)pcSlice->getSPS()->getVuiParameters()->getTimingInfo()->getTimeScale();
+
+      UInt uiTmp = (UInt)( dTmp * 90000.0 );
+      uiInitialCpbRemovalDelay -= uiTmp;
+      uiInitialCpbRemovalDelay -= uiTmp / ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getTickDivisorMinus2() + 2 );
+      sei_buffering_period.m_initialAltCpbRemovalDelay      [0][0]  = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialAltCpbRemovalDelayOffset[0][0]  = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialAltCpbRemovalDelay      [0][1]  = uiInitialCpbRemovalDelay;
+      sei_buffering_period.m_initialAltCpbRemovalDelayOffset[0][1]  = uiInitialCpbRemovalDelay;
+
+      sei_buffering_period.m_rapCpbParamsPresentFlag              = 0;
+      //for the concatenation, it can be set to one during splicing.
+      sei_buffering_period.m_concatenationFlag = 0;
+      //since the temporal layer HRD is not ready, we assumed it is fixed
+      sei_buffering_period.m_auCpbRemovalDelayDelta = 1;
+
+      sei_buffering_period.m_cpbDelayOffset = 0;
+      sei_buffering_period.m_dpbDelayOffset = 0;
+
+      m_seiWriter.writeSEImessage( nalu.m_Bitstream, sei_buffering_period, pcSlice->getSPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+
+      {
+        UInt seiPositionInAu = xGetFirstSeiLocation(accessUnit);
+        UInt offsetPosition = m_activeParameterSetSEIPresentInAU;   // Insert BP SEI after APS SEI
+        AccessUnit::iterator it = accessUnit.begin();
+        for(Int j = 0; j < seiPositionInAu + offsetPosition; j++)
+        {
+          it++;
+        }
+        accessUnit.insert(it, new NALUnitEBSP(nalu));
+        m_bufferingPeriodSEIPresentInAU = true;
+      }
+
+      if (m_pcCfg->getScalableNestingSEIEnabled())
+      {
+        OutputNALUnit naluTmp(NAL_UNIT_PREFIX_SEI);
+        m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+        m_pcEntropyCoder->setBitstream(&naluTmp.m_Bitstream);
+        scalableNestingSEI.m_nestedSEIs.clear();
+        scalableNestingSEI.m_nestedSEIs.push_back(&sei_buffering_period);
+        m_seiWriter.writeSEImessage( naluTmp.m_Bitstream, scalableNestingSEI, pcSlice->getSPS());
+        writeRBSPTrailingBits(naluTmp.m_Bitstream);
+        UInt seiPositionInAu = xGetFirstSeiLocation(accessUnit);
+        UInt offsetPosition = m_activeParameterSetSEIPresentInAU + m_bufferingPeriodSEIPresentInAU + m_pictureTimingSEIPresentInAU;   // Insert BP SEI after non-nested APS, BP and PT SEIs
+        AccessUnit::iterator it = accessUnit.begin();
+        for(Int j = 0; j < seiPositionInAu + offsetPosition; j++)
+        {
+          it++;
+        }
+        accessUnit.insert(it, new NALUnitEBSP(naluTmp));
+        m_nestedBufferingPeriodSEIPresentInAU = true;
+      }
+
+      m_lastBPSEI = m_totalCoded;
+      m_cpbRemovalDelay = 0;
+    }
+    m_cpbRemovalDelay ++;
+
+    if(pcSlice->getSPS()->getVuiParametersPresentFlag() && m_pcCfg->getChromaSamplingFilterHintEnabled() && ( pcSlice->getSliceType() == I_SLICE ))
+    {
+      SEIChromaSamplingFilterHint *seiChromaSamplingFilterHint = xCreateSEIChromaSamplingFilterHint(m_pcCfg->getChromaLocInfoPresentFlag(), m_pcCfg->getChromaSamplingHorFilterIdc(), m_pcCfg->getChromaSamplingVerFilterIdc());
+
+      OutputNALUnit naluTmp(NAL_UNIT_PREFIX_SEI); 
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_pcEntropyCoder->setBitstream(&naluTmp.m_Bitstream);
+      m_seiWriter.writeSEImessage(naluTmp.m_Bitstream, *seiChromaSamplingFilterHint, pcSlice->getSPS()); 
+      writeRBSPTrailingBits(naluTmp.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(naluTmp));
+      delete seiChromaSamplingFilterHint;
+    }
+
+    if( ( m_pcEncTop->getRecoveryPointSEIEnabled() ) && ( pcSlice->getSliceType() == I_SLICE ) )
+    {
+      if( m_pcEncTop->getGradualDecodingRefreshInfoEnabled() && !pcSlice->getRapPicFlag() )
+      {
+        // Gradual decoding refresh SEI
+        OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+        m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+        m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+        SEIGradualDecodingRefreshInfo seiGradualDecodingRefreshInfo;
+        seiGradualDecodingRefreshInfo.m_gdrForegroundFlag = true; // Indicating all "foreground"
+
+        m_seiWriter.writeSEImessage( nalu.m_Bitstream, seiGradualDecodingRefreshInfo, pcSlice->getSPS() );
+        writeRBSPTrailingBits(nalu.m_Bitstream);
+        accessUnit.push_back(new NALUnitEBSP(nalu));
+      }
+    // Recovery point SEI
+      OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+      SEIRecoveryPoint sei_recovery_point;
+      sei_recovery_point.m_recoveryPocCnt    = 0;
+      sei_recovery_point.m_exactMatchingFlag = ( pcSlice->getPOC() == 0 ) ? (true) : (false);
+      sei_recovery_point.m_brokenLinkFlag    = false;
+#if ALLOW_RECOVERY_POINT_AS_RAP
+      if(m_pcCfg->getDecodingRefreshType() == 3)
+      {
+        m_iLastRecoveryPicPOC = pocCurr;
+      }
+#endif
+
+      m_seiWriter.writeSEImessage( nalu.m_Bitstream, sei_recovery_point, pcSlice->getSPS() );
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+    }
+
+    if( m_pcEncTop->getNoDisplaySEITLayer() )
+    {
+      if( pcSlice->getTLayer() >= m_pcEncTop->getNoDisplaySEITLayer() )
+      {
+        // No display SEI
+        OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+        m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+        m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+        SEINoDisplay seiNoDisplay;
+        seiNoDisplay.m_noDisplay = true;
+
+        m_seiWriter.writeSEImessage( nalu.m_Bitstream, seiNoDisplay, pcSlice->getSPS() );
+        writeRBSPTrailingBits(nalu.m_Bitstream);
+        accessUnit.push_back(new NALUnitEBSP(nalu));
+      }
+    }
+
+    /* use the main bitstream buffer for storing the marshalled picture */
+    m_pcEntropyCoder->setBitstream(NULL);
+
+    pcSlice = pcPic->getSlice(0);
+
+    if (pcSlice->getSPS()->getUseSAO())
+    {
+      Bool sliceEnabled[MAX_NUM_COMPONENT];
+      TComBitCounter tempBitCounter;
+      tempBitCounter.resetBits();
+      m_pcEncTop->getRDGoOnSbacCoder()->setBitstream(&tempBitCounter);
+      m_pcSAO->initRDOCabacCoder(m_pcEncTop->getRDGoOnSbacCoder(), pcSlice);
+      m_pcSAO->SAOProcess(pcPic, sliceEnabled, pcPic->getSlice(0)->getLambdas()
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                          , m_pcCfg->getSaoCtuBoundary()
+#endif
+                         );
+      m_pcSAO->PCMLFDisableProcess(pcPic);
+      m_pcEncTop->getRDGoOnSbacCoder()->setBitstream(NULL);
+
+      //assign SAO slice header
+      for(Int s=0; s< uiNumSliceSegments; s++)
+      {
+        pcPic->getSlice(s)->setSaoEnabledFlag(CHANNEL_TYPE_LUMA, sliceEnabled[COMPONENT_Y]);
+        assert(sliceEnabled[COMPONENT_Cb] == sliceEnabled[COMPONENT_Cr]);
+        pcPic->getSlice(s)->setSaoEnabledFlag(CHANNEL_TYPE_CHROMA, sliceEnabled[COMPONENT_Cb]);
+      }
+    }
+
+    // pcSlice is currently slice 0.
+    Int64 binCountsInNalUnits   = 0; // For implementation of cabac_zero_word stuffing (section 7.4.3.10)
+    Int64 numBytesInVclNalUnits = 0; // For implementation of cabac_zero_word stuffing (section 7.4.3.10)
+
+    for( UInt sliceSegmentStartCtuTsAddr = 0, sliceIdxCount=0; sliceSegmentStartCtuTsAddr < pcPic->getPicSym()->getNumberOfCtusInFrame(); sliceIdxCount++, sliceSegmentStartCtuTsAddr=pcSlice->getSliceSegmentCurEndCtuTsAddr() )
+    {
+      pcSlice = pcPic->getSlice(sliceIdxCount);
+      if(sliceIdxCount > 0 && pcSlice->getSliceType()!= I_SLICE)
+      {
+        pcSlice->checkColRefIdx(sliceIdxCount, pcPic);
+      }
+      pcPic->setCurrSliceIdx(sliceIdxCount);
+      m_pcSliceEncoder->setSliceIdx(sliceIdxCount);
+
+      pcSlice->setRPS(pcPic->getSlice(0)->getRPS());
+      pcSlice->setRPSidx(pcPic->getSlice(0)->getRPSidx());
+
+      for ( UInt ui = 0 ; ui < numSubstreams; ui++ )
+      {
+        substreamsOut[ui].clear();
+      }
+
+      m_pcEntropyCoder->setEntropyCoder   ( m_pcCavlcCoder, pcSlice );
+      m_pcEntropyCoder->resetEntropy      ();
+      /* start slice NALunit */
+      OutputNALUnit nalu( pcSlice->getNalUnitType(), pcSlice->getTLayer() );
+      m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+
+      pcSlice->setNoRaslOutputFlag(false);
+      if (pcSlice->isIRAP())
+      {
+        if (pcSlice->getNalUnitType() >= NAL_UNIT_CODED_SLICE_BLA_W_LP && pcSlice->getNalUnitType() <= NAL_UNIT_CODED_SLICE_IDR_N_LP)
+        {
+          pcSlice->setNoRaslOutputFlag(true);
+        }
+        //the inference for NoOutputPriorPicsFlag
+        // KJS: This cannot happen at the encoder
+        if (!m_bFirst && pcSlice->isIRAP() && pcSlice->getNoRaslOutputFlag())
+        {
+          if (pcSlice->getNalUnitType() == NAL_UNIT_CODED_SLICE_CRA)
+          {
+            pcSlice->setNoOutputPriorPicsFlag(true);
+          }
+        }
+      }
+
+      tmpBitsBeforeWriting = m_pcEntropyCoder->getNumberOfWrittenBits();
+      m_pcEntropyCoder->encodeSliceHeader(pcSlice);
+      actualHeadBits += ( m_pcEntropyCoder->getNumberOfWrittenBits() - tmpBitsBeforeWriting );
+
+      pcSlice->setFinalized(true);
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+      g_bFinalEncode = true;
+#endif
+
+      pcSlice->clearSubstreamSizes(  );
+      {
+        UInt numBinsCoded = 0;
+        m_pcSliceEncoder->encodeSlice(pcPic, &(substreamsOut[0]), numBinsCoded);
+        binCountsInNalUnits+=numBinsCoded;
+      }
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+      g_bFinalEncode = false;
+#endif
+
+      {
+        // Construct the final bitstream by concatenating substreams.
+        // The final bitstream is either nalu.m_Bitstream or pcBitstreamRedirect;
+        // Complete the slice header info.
+        m_pcEntropyCoder->setEntropyCoder   ( m_pcCavlcCoder, pcSlice );
+        m_pcEntropyCoder->setBitstream(&nalu.m_Bitstream);
+        m_pcEntropyCoder->encodeTilesWPPEntryPoint( pcSlice );
+
+        // Append substreams...
+        TComOutputBitstream *pcOut = pcBitstreamRedirect;
+        const Int numZeroSubstreamsAtStartOfSlice  = pcPic->getSubstreamForCtuAddr(pcSlice->getSliceSegmentCurStartCtuTsAddr(), false, pcSlice);
+        const Int numSubstreamsToCode  = pcSlice->getNumberOfSubstreamSizes()+1;
+        for ( UInt ui = 0 ; ui < numSubstreamsToCode; ui++ )
+        {
+          pcOut->addSubstream(&(substreamsOut[ui+numZeroSubstreamsAtStartOfSlice]));
+        }
+      }
+
+      // If current NALU is the first NALU of slice (containing slice header) and more NALUs exist (due to multiple dependent slices) then buffer it.
+      // If current NALU is the last NALU of slice and a NALU was buffered, then (a) Write current NALU (b) Update an write buffered NALU at approproate location in NALU list.
+      Bool bNALUAlignedWrittenToList    = false; // used to ensure current NALU is not written more than once to the NALU list.
+      xAttachSliceDataToNalUnit(nalu, pcBitstreamRedirect);
+      accessUnit.push_back(new NALUnitEBSP(nalu));
+      actualTotalBits += UInt(accessUnit.back()->m_nalUnitData.str().size()) * 8;
+      numBytesInVclNalUnits += Int64(accessUnit.back()->m_nalUnitData.str().size());
+      bNALUAlignedWrittenToList = true;
+
+      if (!bNALUAlignedWrittenToList)
+      {
+        nalu.m_Bitstream.writeAlignZero();
+        accessUnit.push_back(new NALUnitEBSP(nalu));
+      }
+
+      if( ( m_pcCfg->getPictureTimingSEIEnabled() || m_pcCfg->getDecodingUnitInfoSEIEnabled() ) &&
+          ( pcSlice->getSPS()->getVuiParametersPresentFlag() ) &&
+          ( ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getNalHrdParametersPresentFlag() )
+         || ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getVclHrdParametersPresentFlag() ) ) &&
+          ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getSubPicCpbParamsPresentFlag() ) )
+      {
+          UInt numNalus = 0;
+        UInt numRBSPBytes = 0;
+        for (AccessUnit::const_iterator it = accessUnit.begin(); it != accessUnit.end(); it++)
+        {
+          UInt numRBSPBytes_nal = UInt((*it)->m_nalUnitData.str().size());
+          if ((*it)->m_nalUnitType != NAL_UNIT_PREFIX_SEI && (*it)->m_nalUnitType != NAL_UNIT_SUFFIX_SEI)
+          {
+            numRBSPBytes += numRBSPBytes_nal;
+            numNalus ++;
+          }
+        }
+        accumBitsDU[ pcSlice->getSliceIdx() ] = ( numRBSPBytes << 3 );
+        accumNalsDU[ pcSlice->getSliceIdx() ] = numNalus;   // SEI not counted for bit count; hence shouldn't be counted for # of NALUs - only for consistency
+      }
+    } // end iteration over slices
+
+    // cabac_zero_words processing
+    {
+      const Int log2subWidthCxsubHeightC = (pcPic->getComponentScaleX(COMPONENT_Cb)+pcPic->getComponentScaleY(COMPONENT_Cb));
+      const Int minCuWidth  = pcPic->getMinCUWidth();
+      const Int minCuHeight = pcPic->getMinCUHeight();
+      const Int paddedWidth = ((pcSlice->getSPS()->getPicWidthInLumaSamples()  + minCuWidth  - 1) / minCuWidth) * minCuWidth;
+      const Int paddedHeight= ((pcSlice->getSPS()->getPicHeightInLumaSamples() + minCuHeight - 1) / minCuHeight) * minCuHeight;
+      const Int rawBits = paddedWidth * paddedHeight *
+                             (g_bitDepth[CHANNEL_TYPE_LUMA] + 2*(g_bitDepth[CHANNEL_TYPE_CHROMA]>>log2subWidthCxsubHeightC));
+      const Int64 threshold = (32LL/3)*numBytesInVclNalUnits + (rawBits/32);
+      if (binCountsInNalUnits >= threshold)
+      {
+        // need to add additional cabac zero words (each one accounts for 3 bytes (=00 00 03)) to increase numBytesInVclNalUnits
+        const Int64 targetNumBytesInVclNalUnits = ((binCountsInNalUnits - (rawBits/32))*3+31)/32;
+        const Int64 numberOfAdditionalBytesNeeded=targetNumBytesInVclNalUnits - numBytesInVclNalUnits;
+
+        if (numberOfAdditionalBytesNeeded>0) // It should be!
+        {
+          const Int64 numberOfAdditionalCabacZeroWords=(numberOfAdditionalBytesNeeded+2)/3;
+          const Int64 numberOfAdditionalCabacZeroBytes=numberOfAdditionalCabacZeroWords*3;
+          if (m_pcCfg->getCabacZeroWordPaddingEnabled())
+          {
+            std::vector<Char> zeroBytesPadding(numberOfAdditionalCabacZeroBytes, Char(0));
+            for(Int64 i=0; i<numberOfAdditionalCabacZeroWords; i++)
+            {
+              zeroBytesPadding[i*3+2]=3;  // 00 00 03
+            }
+            accessUnit.back()->m_nalUnitData.write(&(zeroBytesPadding[0]), numberOfAdditionalCabacZeroBytes);
+            printf("Adding %lld bytes of padding\n", numberOfAdditionalCabacZeroWords*3);
+          }
+          else
+          {
+            printf("Standard would normally require adding %lld bytes of padding\n", numberOfAdditionalCabacZeroWords*3);
+          }
+        }
+      }
+    }
+
+    pcPic->compressMotion();
+
+    //-- For time output for each slice
+    Double dEncTime = (Double)(clock()-iBeforeTime) / CLOCKS_PER_SEC;
+
+    std::string digestStr;
+    if (m_pcCfg->getDecodedPictureHashSEIEnabled())
+    {
+      /* calculate MD5sum for entire reconstructed picture */
+      SEIDecodedPictureHash sei_recon_picture_digest;
+      if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 1)
+      {
+        sei_recon_picture_digest.method = SEIDecodedPictureHash::MD5;
+        UInt numChar=calcMD5(*pcPic->getPicYuvRec(), sei_recon_picture_digest.m_digest);
+        digestStr = digestToString(sei_recon_picture_digest.m_digest, numChar);
+      }
+      else if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 2)
+      {
+        sei_recon_picture_digest.method = SEIDecodedPictureHash::CRC;
+        UInt numChar=calcCRC(*pcPic->getPicYuvRec(), sei_recon_picture_digest.m_digest);
+        digestStr = digestToString(sei_recon_picture_digest.m_digest, numChar);
+      }
+      else if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 3)
+      {
+        sei_recon_picture_digest.method = SEIDecodedPictureHash::CHECKSUM;
+        UInt numChar=calcChecksum(*pcPic->getPicYuvRec(), sei_recon_picture_digest.m_digest);
+        digestStr = digestToString(sei_recon_picture_digest.m_digest, numChar);
+      }
+      OutputNALUnit nalu(NAL_UNIT_SUFFIX_SEI, pcSlice->getTLayer());
+
+      /* write the SEI messages */
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_seiWriter.writeSEImessage(nalu.m_Bitstream, sei_recon_picture_digest, pcSlice->getSPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+
+      accessUnit.insert(accessUnit.end(), new NALUnitEBSP(nalu));
+    }
+    if (m_pcCfg->getTemporalLevel0IndexSEIEnabled())
+    {
+      SEITemporalLevel0Index sei_temporal_level0_index;
+      if (pcSlice->getRapPicFlag())
+      {
+        m_tl0Idx = 0;
+        m_rapIdx = (m_rapIdx + 1) & 0xFF;
+      }
+      else
+      {
+        m_tl0Idx = (m_tl0Idx + (pcSlice->getTLayer() ? 0 : 1)) & 0xFF;
+      }
+      sei_temporal_level0_index.tl0Idx = m_tl0Idx;
+      sei_temporal_level0_index.rapIdx = m_rapIdx;
+
+      OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI);
+
+      /* write the SEI messages */
+      m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+      m_seiWriter.writeSEImessage(nalu.m_Bitstream, sei_temporal_level0_index, pcSlice->getSPS());
+      writeRBSPTrailingBits(nalu.m_Bitstream);
+
+      /* insert the SEI message NALUnit before any Slice NALUnits */
+      AccessUnit::iterator it = find_if(accessUnit.begin(), accessUnit.end(), mem_fun(&NALUnit::isSlice));
+      accessUnit.insert(it, new NALUnitEBSP(nalu));
+    }
+
+    m_pcCfg->setEncodedFlag(iGOPid, true);
+    xCalculateAddPSNR( pcPic, pcPic->getPicYuvRec(), accessUnit, dEncTime, snr_conversion, printFrameMSE );
+
+    //In case of field coding, compute the interlaced PSNR for both fields
+    if(isField)
+    {
+      Bool bothFieldsAreEncoded = false;
+      Int correspondingFieldPOC = pcPic->getPOC();
+      Int currentPicGOPPoc = m_pcCfg->getGOPEntry(iGOPid).m_POC;
+      if(pcPic->getPOC() == 0)
+      {
+        // particular case for POC 0 and 1. 
+        // If they are not encoded first and separately from other pictures, we need to change this 
+        // POC 0 is always encoded first then POC 1 is encoded
+        bothFieldsAreEncoded = false;
+      }
+      else if(pcPic->getPOC() == 1)
+      {
+        // if we are at POC 1, POC 0 has been encoded for sure
+        correspondingFieldPOC = 0;
+        bothFieldsAreEncoded = true;
+      }
+      else 
+      {
+        if(pcPic->getPOC()%2 == 1)
+        {
+          correspondingFieldPOC -= 1; // all odd POC are associated with the preceding even POC (e.g poc 1 is associated to poc 0)
+          currentPicGOPPoc      -= 1;
+        }
+        else
+        {
+          correspondingFieldPOC += 1; // all even POC are associated with the following odd POC (e.g poc 0 is associated to poc 1)
+          currentPicGOPPoc      += 1;
+        }
+        for(Int i = 0; i < m_iGopSize; i ++)
+        {
+          if(m_pcCfg->getGOPEntry(i).m_POC == currentPicGOPPoc)
+          {
+            bothFieldsAreEncoded = m_pcCfg->getGOPEntry(i).m_isEncoded;
+            break;
+          }
+        }
+      }
+
+      if(bothFieldsAreEncoded)
+      {        
+        //get complementary top field
+        TComList<TComPic*>::iterator   iterPic = rcListPic.begin();
+        while ((*iterPic)->getPOC() != correspondingFieldPOC)
+        {
+          iterPic ++;
+        }
+        TComPic* correspondingFieldPic = *(iterPic);
+
+        if( (pcPic->isTopField() && isTff) || (!pcPic->isTopField() && !isTff))
+        {
+          xCalculateInterlacedAddPSNR(pcPic, correspondingFieldPic, pcPic->getPicYuvRec(), correspondingFieldPic->getPicYuvRec(), accessUnit, dEncTime, snr_conversion, printFrameMSE );
+        }
+        else
+        {
+          xCalculateInterlacedAddPSNR(correspondingFieldPic, pcPic, correspondingFieldPic->getPicYuvRec(), pcPic->getPicYuvRec(), accessUnit, dEncTime, snr_conversion, printFrameMSE );
+        }
+      }
+    }
+
+#if VERBOSE_FRAME
+    if (!digestStr.empty())
+    {
+      if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 1)
+      {
+        printf(" [MD5:%s]", digestStr.c_str());
+      }
+      else if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 2)
+      {
+        printf(" [CRC:%s]", digestStr.c_str());
+      }
+      else if(m_pcCfg->getDecodedPictureHashSEIEnabled() == 3)
+      {
+        printf(" [Checksum:%s]", digestStr.c_str());
+      }
+    }
+#endif
+
+    if ( m_pcCfg->getUseRateCtrl() )
+    {
+      Double avgQP     = m_pcRateCtrl->getRCPic()->calAverageQP();
+      Double avgLambda = m_pcRateCtrl->getRCPic()->calAverageLambda();
+      if ( avgLambda < 0.0 )
+      {
+        avgLambda = lambda;
+      }
+
+      m_pcRateCtrl->getRCPic()->updateAfterPicture( actualHeadBits, actualTotalBits, avgQP, avgLambda, pcSlice->getSliceType());
+      m_pcRateCtrl->getRCPic()->addToPictureLsit( m_pcRateCtrl->getPicList() );
+
+      m_pcRateCtrl->getRCSeq()->updateAfterPic( actualTotalBits );
+      if ( pcSlice->getSliceType() != I_SLICE )
+      {
+        m_pcRateCtrl->getRCGOP()->updateAfterPicture( actualTotalBits );
+      }
+      else    // for intra picture, the estimated bits are used to update the current status in the GOP
+      {
+        m_pcRateCtrl->getRCGOP()->updateAfterPicture( estimatedBits );
+      }
+    }
+
+    if( ( m_pcCfg->getPictureTimingSEIEnabled() || m_pcCfg->getDecodingUnitInfoSEIEnabled() ) &&
+        ( pcSlice->getSPS()->getVuiParametersPresentFlag() ) &&
+        ( ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getNalHrdParametersPresentFlag() )
+        || ( pcSlice->getSPS()->getVuiParameters()->getHrdParameters()->getVclHrdParametersPresentFlag() ) ) )
+    {
+      TComVUI *vui = pcSlice->getSPS()->getVuiParameters();
+      TComHRD *hrd = vui->getHrdParameters();
+
+      if( hrd->getSubPicCpbParamsPresentFlag() )
+      {
+        Int i;
+        UInt64 ui64Tmp;
+        UInt uiPrev = 0;
+        UInt numDU = ( pictureTimingSEI.m_numDecodingUnitsMinus1 + 1 );
+        UInt *pCRD = &pictureTimingSEI.m_duCpbRemovalDelayMinus1[0];
+        UInt maxDiff = ( hrd->getTickDivisorMinus2() + 2 ) - 1;
+
+        for( i = 0; i < numDU; i ++ )
+        {
+          pictureTimingSEI.m_numNalusInDuMinus1[ i ]       = ( i == 0 ) ? ( accumNalsDU[ i ] - 1 ) : ( accumNalsDU[ i ] - accumNalsDU[ i - 1] - 1 );
+        }
+
+        if( numDU == 1 )
+        {
+          pCRD[ 0 ] = 0; /* don't care */
+        }
+        else
+        {
+          pCRD[ numDU - 1 ] = 0;/* by definition */
+          UInt tmp = 0;
+          UInt accum = 0;
+
+          for( i = ( numDU - 2 ); i >= 0; i -- )
+          {
+            ui64Tmp = ( ( ( accumBitsDU[ numDU - 1 ]  - accumBitsDU[ i ] ) * ( vui->getTimingInfo()->getTimeScale() / vui->getTimingInfo()->getNumUnitsInTick() ) * ( hrd->getTickDivisorMinus2() + 2 ) ) / ( m_pcCfg->getTargetBitrate() ) );
+            if( (UInt)ui64Tmp > maxDiff )
+            {
+              tmp ++;
+            }
+          }
+          uiPrev = 0;
+
+          UInt flag = 0;
+          for( i = ( numDU - 2 ); i >= 0; i -- )
+          {
+            flag = 0;
+            ui64Tmp = ( ( ( accumBitsDU[ numDU - 1 ]  - accumBitsDU[ i ] ) * ( vui->getTimingInfo()->getTimeScale() / vui->getTimingInfo()->getNumUnitsInTick() ) * ( hrd->getTickDivisorMinus2() + 2 ) ) / ( m_pcCfg->getTargetBitrate() ) );
+
+            if( (UInt)ui64Tmp > maxDiff )
+            {
+              if(uiPrev >= maxDiff - tmp)
+              {
+                ui64Tmp = uiPrev + 1;
+                flag = 1;
+              }
+              else                            ui64Tmp = maxDiff - tmp + 1;
+            }
+            pCRD[ i ] = (UInt)ui64Tmp - uiPrev - 1;
+            if( (Int)pCRD[ i ] < 0 )
+            {
+              pCRD[ i ] = 0;
+            }
+            else if (tmp > 0 && flag == 1)
+            {
+              tmp --;
+            }
+            accum += pCRD[ i ] + 1;
+            uiPrev = accum;
+          }
+        }
+      }
+
+      if( m_pcCfg->getPictureTimingSEIEnabled() )
+      {
+        {
+          OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI, pcSlice->getTLayer());
+          m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+          pictureTimingSEI.m_picStruct = (isField && pcSlice->getPic()->isTopField())? 1 : isField? 2 : 0;
+          m_seiWriter.writeSEImessage(nalu.m_Bitstream, pictureTimingSEI, pcSlice->getSPS());
+          writeRBSPTrailingBits(nalu.m_Bitstream);
+          UInt seiPositionInAu = xGetFirstSeiLocation(accessUnit);
+          UInt offsetPosition = m_activeParameterSetSEIPresentInAU
+                                    + m_bufferingPeriodSEIPresentInAU;    // Insert PT SEI after APS and BP SEI
+          AccessUnit::iterator it = accessUnit.begin();
+          for(Int j = 0; j < seiPositionInAu + offsetPosition; j++)
+          {
+            it++;
+          }
+          accessUnit.insert(it, new NALUnitEBSP(nalu));
+          m_pictureTimingSEIPresentInAU = true;
+        }
+
+        if ( m_pcCfg->getScalableNestingSEIEnabled() ) // put picture timing SEI into scalable nesting SEI
+        {
+          OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI, pcSlice->getTLayer());
+          m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+          scalableNestingSEI.m_nestedSEIs.clear();
+          scalableNestingSEI.m_nestedSEIs.push_back(&pictureTimingSEI);
+          m_seiWriter.writeSEImessage(nalu.m_Bitstream, scalableNestingSEI, pcSlice->getSPS());
+          writeRBSPTrailingBits(nalu.m_Bitstream);
+          UInt seiPositionInAu = xGetFirstSeiLocation(accessUnit);
+          UInt offsetPosition = m_activeParameterSetSEIPresentInAU
+            + m_bufferingPeriodSEIPresentInAU + m_pictureTimingSEIPresentInAU + m_nestedBufferingPeriodSEIPresentInAU;    // Insert PT SEI after APS and BP SEI
+          AccessUnit::iterator it = accessUnit.begin();
+          for(Int j = 0; j < seiPositionInAu + offsetPosition; j++)
+          {
+            it++;
+          }
+          accessUnit.insert(it, new NALUnitEBSP(nalu));
+          m_nestedPictureTimingSEIPresentInAU = true;
+        }
+      }
+
+      if( m_pcCfg->getDecodingUnitInfoSEIEnabled() && hrd->getSubPicCpbParamsPresentFlag() )
+      {
+        m_pcEntropyCoder->setEntropyCoder(m_pcCavlcCoder, pcSlice);
+        for( Int i = 0; i < ( pictureTimingSEI.m_numDecodingUnitsMinus1 + 1 ); i ++ )
+        {
+          OutputNALUnit nalu(NAL_UNIT_PREFIX_SEI, pcSlice->getTLayer());
+
+          SEIDecodingUnitInfo tempSEI;
+          tempSEI.m_decodingUnitIdx = i;
+          tempSEI.m_duSptCpbRemovalDelay = pictureTimingSEI.m_duCpbRemovalDelayMinus1[i] + 1;
+          tempSEI.m_dpbOutputDuDelayPresentFlag = false;
+          tempSEI.m_picSptDpbOutputDuDelay = picSptDpbOutputDuDelay;
+
+          // Insert the first one in the right location, before the first slice
+          if(i == 0)
+          {
+            // Insert before the first slice.
+            m_seiWriter.writeSEImessage(nalu.m_Bitstream, tempSEI, pcSlice->getSPS());
+            writeRBSPTrailingBits(nalu.m_Bitstream);
+
+            UInt seiPositionInAu = xGetFirstSeiLocation(accessUnit);
+            UInt offsetPosition = m_activeParameterSetSEIPresentInAU
+                                  + m_bufferingPeriodSEIPresentInAU
+                                  + m_pictureTimingSEIPresentInAU;  // Insert DU info SEI after APS, BP and PT SEI
+            AccessUnit::iterator it = accessUnit.begin();
+            for(Int j = 0; j < seiPositionInAu + offsetPosition; j++)
+            {
+              it++;
+            }
+            accessUnit.insert(it, new NALUnitEBSP(nalu));
+          }
+          else
+          {
+            // For the second decoding unit onwards we know how many NALUs are present
+            AccessUnit::iterator it = accessUnit.begin();
+            for (Int ctr = 0; it != accessUnit.end(); it++)
+            {
+              if(ctr == accumNalsDU[ i - 1 ])
+              {
+                // Insert before the first slice.
+                m_seiWriter.writeSEImessage(nalu.m_Bitstream, tempSEI, pcSlice->getSPS());
+                writeRBSPTrailingBits(nalu.m_Bitstream);
+
+                accessUnit.insert(it, new NALUnitEBSP(nalu));
+                break;
+              }
+              if ((*it)->m_nalUnitType != NAL_UNIT_PREFIX_SEI && (*it)->m_nalUnitType != NAL_UNIT_SUFFIX_SEI)
+              {
+                ctr++;
+              }
+            }
+          }
+        }
+      }
+    }
+
+    xResetNonNestedSEIPresentFlags();
+    xResetNestedSEIPresentFlags();
+
+    pcPic->getPicYuvRec()->copyToPic(pcPicYuvRecOut);
+
+    pcPic->setReconMark   ( true );
+    m_bFirst = false;
+    m_iNumPicCoded++;
+    m_totalCoded ++;
+    /* logging: insert a newline at end of picture period */
+#if VERBOSE_FRAME
+    printf("\n");
+    fflush(stdout);
+#endif
+
+#if EFFICIENT_FIELD_IRAP
+    if(IRAPtoReorder)
+    {
+      if(swapIRAPForward)
+      {
+        if(iGOPid == IRAPGOPid)
+        {
+          iGOPid = IRAPGOPid +1;
+          IRAPtoReorder = false;
+        }
+        else if(iGOPid == IRAPGOPid +1)
+        {
+          iGOPid --;
+        }
+      }
+      else
+      {
+        if(iGOPid == IRAPGOPid)
+        {
+          iGOPid = IRAPGOPid -1;
+        }
+        else if(iGOPid == IRAPGOPid -1)
+        {
+          iGOPid = IRAPGOPid;
+          IRAPtoReorder = false;
+        }
+      }
+    }
+#endif
+  } // iGOPid-loop
+
+  delete pcBitstreamRedirect;
+
+  if( accumBitsDU != NULL) delete accumBitsDU;
+  if( accumNalsDU != NULL) delete accumNalsDU;
+
+  assert ( (m_iNumPicCoded == iNumPicRcvd) );
+}
+
+Void TEncGOP::printOutSummary(UInt uiNumAllPicCoded, Bool isField, const Bool printMSEBasedSNR, const Bool printSequenceMSE)
+{
+  assert (uiNumAllPicCoded == m_gcAnalyzeAll.getNumPic());
+
+
+  //--CFG_KDY
+  const Int rateMultiplier=(isField?2:1);
+  m_gcAnalyzeAll.setFrmRate( m_pcCfg->getFrameRate()*rateMultiplier );
+  m_gcAnalyzeI.setFrmRate( m_pcCfg->getFrameRate()*rateMultiplier );
+  m_gcAnalyzeP.setFrmRate( m_pcCfg->getFrameRate()*rateMultiplier );
+  m_gcAnalyzeB.setFrmRate( m_pcCfg->getFrameRate()*rateMultiplier );
+  const ChromaFormat chFmt = m_pcCfg->getChromaFormatIdc();
+
+  //-- all
+  printf( "\n\nSUMMARY --------------------------------------------------------\n" );
+  m_gcAnalyzeAll.printOut('a', chFmt, printMSEBasedSNR, printSequenceMSE);
+
+  printf( "\n\nI Slices--------------------------------------------------------\n" );
+  m_gcAnalyzeI.printOut('i', chFmt, printMSEBasedSNR, printSequenceMSE);
+
+  printf( "\n\nP Slices--------------------------------------------------------\n" );
+  m_gcAnalyzeP.printOut('p', chFmt, printMSEBasedSNR, printSequenceMSE);
+
+  printf( "\n\nB Slices--------------------------------------------------------\n" );
+  m_gcAnalyzeB.printOut('b', chFmt, printMSEBasedSNR, printSequenceMSE);
+
+#if _SUMMARY_OUT_
+  m_gcAnalyzeAll.printSummary(chFmt, printSequenceMSE);
+#endif
+#if _SUMMARY_PIC_
+  m_gcAnalyzeI.printSummary(chFmt, printSequenceMSE,'I');
+  m_gcAnalyzeP.printSummary(chFmt, printSequenceMSE,'P');
+  m_gcAnalyzeB.printSummary(chFmt, printSequenceMSE,'B');
+#endif
+
+  if(isField)
+  {
+    //-- interlaced summary
+    m_gcAnalyzeAll_in.setFrmRate( m_pcCfg->getFrameRate());
+    m_gcAnalyzeAll_in.setBits(m_gcAnalyzeAll.getBits());
+    // prior to the above statement, the interlace analyser does not contain the correct total number of bits.
+
+    printf( "\n\nSUMMARY INTERLACED ---------------------------------------------\n" );
+    m_gcAnalyzeAll_in.printOut('a', chFmt, printMSEBasedSNR, printSequenceMSE);
+
+#if _SUMMARY_OUT_
+    m_gcAnalyzeAll_in.printSummary(chFmt, printSequenceMSE);
+#endif
+  }
+
+  printf("\nRVM: %.3lf\n" , xCalculateRVM());
+}
+
+Void TEncGOP::preLoopFilterPicAll( TComPic* pcPic, UInt64& ruiDist )
+{
+  Bool bCalcDist = false;
+  m_pcLoopFilter->setCfg(m_pcCfg->getLFCrossTileBoundaryFlag());
+  m_pcLoopFilter->loopFilterPic( pcPic );
+
+  if (!bCalcDist)
+    ruiDist = xFindDistortionFrame(pcPic->getPicYuvOrg(), pcPic->getPicYuvRec());
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+
+Void TEncGOP::xInitGOP( Int iPOCLast, Int iNumPicRcvd, TComList<TComPic*>& rcListPic, TComList<TComPicYuv*>& rcListPicYuvRecOut, Bool isField )
+{
+  assert( iNumPicRcvd > 0 );
+  //  Exception for the first frames
+  if ( ( isField && (iPOCLast == 0 || iPOCLast == 1) ) || (!isField  && (iPOCLast == 0))  )
+  {
+    m_iGopSize    = 1;
+  }
+  else
+  {
+    m_iGopSize    = m_pcCfg->getGOPSize();
+  }
+  assert (m_iGopSize > 0);
+
+  return;
+}
+
+
+Void TEncGOP::xGetBuffer( TComList<TComPic*>&      rcListPic,
+                         TComList<TComPicYuv*>&    rcListPicYuvRecOut,
+                         Int                       iNumPicRcvd,
+                         Int                       iTimeOffset,
+                         TComPic*&                 rpcPic,
+                         TComPicYuv*&              rpcPicYuvRecOut,
+                         Int                       pocCurr,
+                         Bool                      isField)
+{
+  Int i;
+  //  Rec. output
+  TComList<TComPicYuv*>::iterator     iterPicYuvRec = rcListPicYuvRecOut.end();
+
+  if (isField && pocCurr > 1 && m_iGopSize!=1)
+  {
+    iTimeOffset--;
+  }
+
+  for ( i = 0; i < (iNumPicRcvd - iTimeOffset + 1); i++ )
+  {
+    iterPicYuvRec--;
+  }
+
+  rpcPicYuvRecOut = *(iterPicYuvRec);
+
+  //  Current pic.
+  TComList<TComPic*>::iterator        iterPic       = rcListPic.begin();
+  while (iterPic != rcListPic.end())
+  {
+    rpcPic = *(iterPic);
+    rpcPic->setCurrSliceIdx(0);
+    if (rpcPic->getPOC() == pocCurr)
+    {
+      break;
+    }
+    iterPic++;
+  }
+
+  assert (rpcPic != NULL);
+  assert (rpcPic->getPOC() == pocCurr);
+
+  return;
+}
+
+UInt64 TEncGOP::xFindDistortionFrame (TComPicYuv* pcPic0, TComPicYuv* pcPic1)
+{
+  UInt64  uiTotalDiff = 0;
+
+  for(Int chan=0; chan<pcPic0 ->getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    Pel*  pSrc0   = pcPic0 ->getAddr(ch);
+    Pel*  pSrc1   = pcPic1 ->getAddr(ch);
+    UInt  uiShift     = 2 * DISTORTION_PRECISION_ADJUSTMENT(g_bitDepth[toChannelType(ch)]-8);
+
+    const Int   iStride = pcPic0->getStride(ch);
+    const Int   iWidth  = pcPic0->getWidth(ch);
+    const Int   iHeight = pcPic0->getHeight(ch);
+
+    for(Int y = 0; y < iHeight; y++ )
+    {
+      for(Int x = 0; x < iWidth; x++ )
+      {
+        Intermediate_Int iTemp = pSrc0[x] - pSrc1[x];
+        uiTotalDiff += UInt64((iTemp*iTemp) >> uiShift);
+      }
+      pSrc0 += iStride;
+      pSrc1 += iStride;
+    }
+  }
+
+  return uiTotalDiff;
+}
+
+#if VERBOSE_RATE
+static const Char* nalUnitTypeToString(NalUnitType type)
+{
+  switch (type)
+  {
+    case NAL_UNIT_CODED_SLICE_TRAIL_R:    return "TRAIL_R";
+    case NAL_UNIT_CODED_SLICE_TRAIL_N:    return "TRAIL_N";
+    case NAL_UNIT_CODED_SLICE_TSA_R:      return "TSA_R";
+    case NAL_UNIT_CODED_SLICE_TSA_N:      return "TSA_N";
+    case NAL_UNIT_CODED_SLICE_STSA_R:     return "STSA_R";
+    case NAL_UNIT_CODED_SLICE_STSA_N:     return "STSA_N";
+    case NAL_UNIT_CODED_SLICE_BLA_W_LP:   return "BLA_W_LP";
+    case NAL_UNIT_CODED_SLICE_BLA_W_RADL: return "BLA_W_RADL";
+    case NAL_UNIT_CODED_SLICE_BLA_N_LP:   return "BLA_N_LP";
+    case NAL_UNIT_CODED_SLICE_IDR_W_RADL: return "IDR_W_RADL";
+    case NAL_UNIT_CODED_SLICE_IDR_N_LP:   return "IDR_N_LP";
+    case NAL_UNIT_CODED_SLICE_CRA:        return "CRA";
+    case NAL_UNIT_CODED_SLICE_RADL_R:     return "RADL_R";
+    case NAL_UNIT_CODED_SLICE_RADL_N:     return "RADL_N";
+    case NAL_UNIT_CODED_SLICE_RASL_R:     return "RASL_R";
+    case NAL_UNIT_CODED_SLICE_RASL_N:     return "RASL_N";
+    case NAL_UNIT_VPS:                    return "VPS";
+    case NAL_UNIT_SPS:                    return "SPS";
+    case NAL_UNIT_PPS:                    return "PPS";
+    case NAL_UNIT_ACCESS_UNIT_DELIMITER:  return "AUD";
+    case NAL_UNIT_EOS:                    return "EOS";
+    case NAL_UNIT_EOB:                    return "EOB";
+    case NAL_UNIT_FILLER_DATA:            return "FILLER";
+    case NAL_UNIT_PREFIX_SEI:             return "SEI";
+    case NAL_UNIT_SUFFIX_SEI:             return "SEI";
+    default:                              return "UNK";
+  }
+}
+#endif
+
+Void TEncGOP::xCalculateAddPSNR( TComPic* pcPic, TComPicYuv* pcPicD, const AccessUnit& accessUnit, Double dEncTime, const InputColourSpaceConversion conversion, const Bool printFrameMSE )
+{
+  Double  dPSNR[MAX_NUM_COMPONENT];
+
+  for(Int i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    dPSNR[i]=0.0;
+  }
+
+  TComPicYuv cscd;
+  if (conversion!=IPCOLOURSPACE_UNCHANGED)
+  {
+    cscd.create(pcPicD->getWidth(COMPONENT_Y), pcPicD->getHeight(COMPONENT_Y), pcPicD->getChromaFormat(), g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+    TVideoIOYuv::ColourSpaceConvert(*pcPicD, cscd, conversion, g_bitDepth, false);
+  }
+  TComPicYuv &picd=(conversion==IPCOLOURSPACE_UNCHANGED)?*pcPicD : cscd;
+
+  //===== calculate PSNR =====
+  Double MSEyuvframe[MAX_NUM_COMPONENT] = {0, 0, 0};
+
+  for(Int chan=0; chan<pcPicD->getNumberValidComponents(); chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    const Pel*  pOrg    = (conversion!=IPCOLOURSPACE_UNCHANGED) ? pcPic ->getPicYuvTrueOrg()->getAddr(ch) : pcPic ->getPicYuvOrg()->getAddr(ch);
+    Pel*  pRec    = picd.getAddr(ch);
+    const Int   iStride = pcPicD->getStride(ch);
+
+    const Int   iWidth  = pcPicD->getWidth (ch) - (m_pcEncTop->getPad(0) >> pcPic->getComponentScaleX(ch));
+    const Int   iHeight = pcPicD->getHeight(ch) - ((m_pcEncTop->getPad(1) >> (pcPic->isField()?1:0)) >> pcPic->getComponentScaleY(ch));
+
+    Int   iSize   = iWidth*iHeight;
+
+    UInt64 uiSSDtemp=0;
+    for(Int y = 0; y < iHeight; y++ )
+    {
+      for(Int x = 0; x < iWidth; x++ )
+      {
+        Intermediate_Int iDiff = (Intermediate_Int)( pOrg[x] - pRec[x] );
+        uiSSDtemp   += iDiff * iDiff;
+      }
+      pOrg += iStride;
+      pRec += iStride;
+    }
+    const Int maxval = 255 << (g_bitDepth[toChannelType(ch)] - 8);
+    const Double fRefValue = (Double) maxval * maxval * iSize;
+    dPSNR[ch]         = ( uiSSDtemp ? 10.0 * log10( fRefValue / (Double)uiSSDtemp ) : 999.99 );
+    MSEyuvframe[ch]   = (Double)uiSSDtemp/(iSize);
+  }
+
+
+  /* calculate the size of the access unit, excluding:
+   *  - any AnnexB contributions (start_code_prefix, zero_byte, etc.,)
+   *  - SEI NAL units
+   */
+  UInt numRBSPBytes = 0;
+  for (AccessUnit::const_iterator it = accessUnit.begin(); it != accessUnit.end(); it++)
+  {
+    UInt numRBSPBytes_nal = UInt((*it)->m_nalUnitData.str().size());
+#if VERBOSE_RATE
+    printf("*** %6s numBytesInNALunit: %u\n", nalUnitTypeToString((*it)->m_nalUnitType), numRBSPBytes_nal);
+#endif
+    if ((*it)->m_nalUnitType != NAL_UNIT_PREFIX_SEI && (*it)->m_nalUnitType != NAL_UNIT_SUFFIX_SEI)
+    {
+      numRBSPBytes += numRBSPBytes_nal;
+    }
+  }
+
+  UInt uibits = numRBSPBytes * 8;
+  m_vRVM_RP.push_back( uibits );
+
+  //===== add PSNR =====
+  m_gcAnalyzeAll.addResult (dPSNR, (Double)uibits, MSEyuvframe);
+  TComSlice*  pcSlice = pcPic->getSlice(0);
+  if (pcSlice->isIntra())
+  {
+    m_gcAnalyzeI.addResult (dPSNR, (Double)uibits, MSEyuvframe);
+  }
+  if (pcSlice->isInterP())
+  {
+    m_gcAnalyzeP.addResult (dPSNR, (Double)uibits, MSEyuvframe);
+  }
+  if (pcSlice->isInterB())
+  {
+    m_gcAnalyzeB.addResult (dPSNR, (Double)uibits, MSEyuvframe);
+  }
+
+  Char c = (pcSlice->isIntra() ? 'I' : pcSlice->isInterP() ? 'P' : 'B');
+  if (!pcSlice->isReferenced()) c += 32;
+
+#if VERBOSE_FRAME
+
+#if ADAPTIVE_QP_SELECTION
+  printf("POC %4d TId: %1d ( %c-SLICE, nQP %d QP %d ) %10d bits",
+         pcSlice->getPOC(),
+         pcSlice->getTLayer(),
+         c,
+         pcSlice->getSliceQpBase(),
+         pcSlice->getSliceQp(),
+         uibits );
+#else
+  printf("POC %4d TId: %1d ( %c-SLICE, QP %d ) %10d bits",
+         pcSlice->getPOC()-pcSlice->getLastIDR(),
+         pcSlice->getTLayer(),
+         c,
+         pcSlice->getSliceQp(),
+         uibits );
+#endif
+
+  printf(" [Y %6.4lf dB    U %6.4lf dB    V %6.4lf dB]", dPSNR[COMPONENT_Y], dPSNR[COMPONENT_Cb], dPSNR[COMPONENT_Cr] );
+  if (printFrameMSE)
+  {
+    printf(" [Y MSE %6.4lf  U MSE %6.4lf  V MSE %6.4lf]", MSEyuvframe[COMPONENT_Y], MSEyuvframe[COMPONENT_Cb], MSEyuvframe[COMPONENT_Cr] );
+  }
+  printf(" [ET %5.0f ]", dEncTime );
+
+  for (Int iRefList = 0; iRefList < 2; iRefList++)
+  {
+    printf(" [L%d ", iRefList);
+    for (Int iRefIndex = 0; iRefIndex < pcSlice->getNumRefIdx(RefPicList(iRefList)); iRefIndex++)
+    {
+      printf ("%d ", pcSlice->getRefPOC(RefPicList(iRefList), iRefIndex)-pcSlice->getLastIDR());
+    }
+    printf("]");
+  }
+#endif /* VERBOSE_FRAME */
+
+  cscd.destroy();
+}
+
+Void TEncGOP::xCalculateInterlacedAddPSNR( TComPic* pcPicOrgFirstField, TComPic* pcPicOrgSecondField,
+                                           TComPicYuv* pcPicRecFirstField, TComPicYuv* pcPicRecSecondField,
+                                           const AccessUnit& accessUnit, Double dEncTime, const InputColourSpaceConversion conversion, const Bool printFrameMSE )
+{
+  Double  dPSNR[MAX_NUM_COMPONENT];
+  TComPic    *apcPicOrgFields[2]={pcPicOrgFirstField, pcPicOrgSecondField};
+  TComPicYuv *apcPicRecFields[2]={pcPicRecFirstField, pcPicRecSecondField};
+
+  for(Int i=0; i<MAX_NUM_COMPONENT; i++)
+  {
+    dPSNR[i]=0.0;
+  }
+
+  TComPicYuv cscd[2 /* first/second field */];
+  if (conversion!=IPCOLOURSPACE_UNCHANGED)
+  {
+    for(UInt fieldNum=0; fieldNum<2; fieldNum++)
+    {
+      TComPicYuv &reconField=*(apcPicRecFields[fieldNum]);
+      cscd[fieldNum].create(reconField.getWidth(COMPONENT_Y), reconField.getHeight(COMPONENT_Y), reconField.getChromaFormat(), g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+      TVideoIOYuv::ColourSpaceConvert(reconField, cscd[fieldNum], conversion, g_bitDepth, false);
+      apcPicRecFields[fieldNum]=cscd+fieldNum;
+    }
+  }
+
+  //===== calculate PSNR =====
+  Double MSEyuvframe[MAX_NUM_COMPONENT] = {0, 0, 0};
+
+  assert(apcPicRecFields[0]->getChromaFormat()==apcPicRecFields[1]->getChromaFormat());
+  const UInt numValidComponents=apcPicRecFields[0]->getNumberValidComponents();
+
+  for(Int chan=0; chan<numValidComponents; chan++)
+  {
+    const ComponentID ch=ComponentID(chan);
+    assert(apcPicRecFields[0]->getWidth(ch)==apcPicRecFields[1]->getWidth(ch));
+    assert(apcPicRecFields[0]->getHeight(ch)==apcPicRecFields[1]->getHeight(ch));
+
+    UInt64 uiSSDtemp=0;
+    const Int   iWidth  = apcPicRecFields[0]->getWidth (ch) - (m_pcEncTop->getPad(0) >> apcPicRecFields[0]->getComponentScaleX(ch));
+    const Int   iHeight = apcPicRecFields[0]->getHeight(ch) - ((m_pcEncTop->getPad(1) >> 1) >> apcPicRecFields[0]->getComponentScaleY(ch));
+
+    Int   iSize   = iWidth*iHeight;
+
+    for(UInt fieldNum=0; fieldNum<2; fieldNum++)
+    {
+      TComPic *pcPic=apcPicOrgFields[fieldNum];
+      TComPicYuv *pcPicD=apcPicRecFields[fieldNum];
+
+      const Pel*  pOrg    = (conversion!=IPCOLOURSPACE_UNCHANGED) ? pcPic ->getPicYuvTrueOrg()->getAddr(ch) : pcPic ->getPicYuvOrg()->getAddr(ch);
+      Pel*  pRec    = pcPicD->getAddr(ch);
+      const Int   iStride = pcPicD->getStride(ch);
+
+
+      for(Int y = 0; y < iHeight; y++ )
+      {
+        for(Int x = 0; x < iWidth; x++ )
+        {
+          Intermediate_Int iDiff = (Intermediate_Int)( pOrg[x] - pRec[x] );
+          uiSSDtemp   += iDiff * iDiff;
+        }
+        pOrg += iStride;
+        pRec += iStride;
+      }
+    }
+    const Int maxval = 255 << (g_bitDepth[toChannelType(ch)] - 8);
+    const Double fRefValue = (Double) maxval * maxval * iSize*2;
+    dPSNR[ch]         = ( uiSSDtemp ? 10.0 * log10( fRefValue / (Double)uiSSDtemp ) : 999.99 );
+    MSEyuvframe[ch]   = (Double)uiSSDtemp/(iSize*2);
+  }
+
+  UInt uibits = 0; // the number of bits for the pair is not calculated here - instead the overall total is used elsewhere.
+
+  //===== add PSNR =====
+  m_gcAnalyzeAll_in.addResult (dPSNR, (Double)uibits, MSEyuvframe);
+
+  printf("\n                                      Interlaced frame %d: [Y %6.4lf dB    U %6.4lf dB    V %6.4lf dB]", pcPicOrgSecondField->getPOC()/2 , dPSNR[COMPONENT_Y], dPSNR[COMPONENT_Cb], dPSNR[COMPONENT_Cr] );
+  if (printFrameMSE)
+  {
+    printf(" [Y MSE %6.4lf  U MSE %6.4lf  V MSE %6.4lf]", MSEyuvframe[COMPONENT_Y], MSEyuvframe[COMPONENT_Cb], MSEyuvframe[COMPONENT_Cr] );
+  }
+
+  for(UInt fieldNum=0; fieldNum<2; fieldNum++)
+  {
+    cscd[fieldNum].destroy();
+  }
+}
+
+/** Function for deciding the nal_unit_type.
+ * \param pocCurr POC of the current picture
+ * \returns the nal unit type of the picture
+ * This function checks the configuration and returns the appropriate nal_unit_type for the picture.
+ */
+NalUnitType TEncGOP::getNalUnitType(Int pocCurr, Int lastIDR, Bool isField)
+{
+  if (pocCurr == 0)
+  {
+    return NAL_UNIT_CODED_SLICE_IDR_W_RADL;
+  }
+
+#if EFFICIENT_FIELD_IRAP
+  if(isField && pocCurr == 1)
+  {
+    // to avoid the picture becoming an IRAP
+    return NAL_UNIT_CODED_SLICE_TRAIL_R;
+  }
+#endif
+
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  if(m_pcCfg->getDecodingRefreshType() != 3 && (pocCurr - isField) % m_pcCfg->getIntraPeriod() == 0)
+#else
+  if ((pocCurr - isField) % m_pcCfg->getIntraPeriod() == 0)
+#endif
+  {
+    if (m_pcCfg->getDecodingRefreshType() == 1)
+    {
+      return NAL_UNIT_CODED_SLICE_CRA;
+    }
+    else if (m_pcCfg->getDecodingRefreshType() == 2)
+    {
+      return NAL_UNIT_CODED_SLICE_IDR_W_RADL;
+    }
+  }
+  if(m_pocCRA>0)
+  {
+    if(pocCurr<m_pocCRA)
+    {
+      // All leading pictures are being marked as TFD pictures here since current encoder uses all
+      // reference pictures while encoding leading pictures. An encoder can ensure that a leading
+      // picture can be still decodable when random accessing to a CRA/CRANT/BLA/BLANT picture by
+      // controlling the reference pictures used for encoding that leading picture. Such a leading
+      // picture need not be marked as a TFD picture.
+      return NAL_UNIT_CODED_SLICE_RASL_R;
+    }
+  }
+  if (lastIDR>0)
+  {
+    if (pocCurr < lastIDR)
+    {
+      return NAL_UNIT_CODED_SLICE_RADL_R;
+    }
+  }
+  return NAL_UNIT_CODED_SLICE_TRAIL_R;
+}
+
+Double TEncGOP::xCalculateRVM()
+{
+  Double dRVM = 0;
+
+  if( m_pcCfg->getGOPSize() == 1 && m_pcCfg->getIntraPeriod() != 1 && m_pcCfg->getFramesToBeEncoded() > RVM_VCEGAM10_M * 2 )
+  {
+    // calculate RVM only for lowdelay configurations
+    std::vector<Double> vRL , vB;
+    size_t N = m_vRVM_RP.size();
+    vRL.resize( N );
+    vB.resize( N );
+
+    Int i;
+    Double dRavg = 0 , dBavg = 0;
+    vB[RVM_VCEGAM10_M] = 0;
+    for( i = RVM_VCEGAM10_M + 1 ; i < N - RVM_VCEGAM10_M + 1 ; i++ )
+    {
+      vRL[i] = 0;
+      for( Int j = i - RVM_VCEGAM10_M ; j <= i + RVM_VCEGAM10_M - 1 ; j++ )
+        vRL[i] += m_vRVM_RP[j];
+      vRL[i] /= ( 2 * RVM_VCEGAM10_M );
+      vB[i] = vB[i-1] + m_vRVM_RP[i] - vRL[i];
+      dRavg += m_vRVM_RP[i];
+      dBavg += vB[i];
+    }
+
+    dRavg /= ( N - 2 * RVM_VCEGAM10_M );
+    dBavg /= ( N - 2 * RVM_VCEGAM10_M );
+
+    Double dSigamB = 0;
+    for( i = RVM_VCEGAM10_M + 1 ; i < N - RVM_VCEGAM10_M + 1 ; i++ )
+    {
+      Double tmp = vB[i] - dBavg;
+      dSigamB += tmp * tmp;
+    }
+    dSigamB = sqrt( dSigamB / ( N - 2 * RVM_VCEGAM10_M ) );
+
+    Double f = sqrt( 12.0 * ( RVM_VCEGAM10_M - 1 ) / ( RVM_VCEGAM10_M + 1 ) );
+
+    dRVM = dSigamB / dRavg * f;
+  }
+
+  return( dRVM );
+}
+
+/** Attaches the input bitstream to the stream in the output NAL unit
+    Updates rNalu to contain concatenated bitstream. rpcBitstreamRedirect is cleared at the end of this function call.
+ *  \param codedSliceData contains the coded slice data (bitstream) to be concatenated to rNalu
+ *  \param rNalu          target NAL unit
+ */
+Void TEncGOP::xAttachSliceDataToNalUnit (OutputNALUnit& rNalu, TComOutputBitstream* codedSliceData)
+{
+  // Byte-align
+  rNalu.m_Bitstream.writeByteAlignment();   // Slice header byte-alignment
+
+  // Perform bitstream concatenation
+  if (codedSliceData->getNumberOfWrittenBits() > 0)
+  {
+    rNalu.m_Bitstream.addSubstream(codedSliceData);
+  }
+
+  m_pcEntropyCoder->setBitstream(&rNalu.m_Bitstream);
+
+  codedSliceData->clear();
+}
+
+// Function will arrange the long-term pictures in the decreasing order of poc_lsb_lt,
+// and among the pictures with the same lsb, it arranges them in increasing delta_poc_msb_cycle_lt value
+Void TEncGOP::arrangeLongtermPicturesInRPS(TComSlice *pcSlice, TComList<TComPic*>& rcListPic)
+{
+  TComReferencePictureSet *rps = pcSlice->getRPS();
+  if(!rps->getNumberOfLongtermPictures())
+  {
+    return;
+  }
+
+  // Arrange long-term reference pictures in the correct order of LSB and MSB,
+  // and assign values for pocLSBLT and MSB present flag
+  Int longtermPicsPoc[MAX_NUM_REF_PICS], longtermPicsLSB[MAX_NUM_REF_PICS], indices[MAX_NUM_REF_PICS];
+  Int longtermPicsMSB[MAX_NUM_REF_PICS];
+  Bool mSBPresentFlag[MAX_NUM_REF_PICS];
+  ::memset(longtermPicsPoc, 0, sizeof(longtermPicsPoc));    // Store POC values of LTRP
+  ::memset(longtermPicsLSB, 0, sizeof(longtermPicsLSB));    // Store POC LSB values of LTRP
+  ::memset(longtermPicsMSB, 0, sizeof(longtermPicsMSB));    // Store POC LSB values of LTRP
+  ::memset(indices        , 0, sizeof(indices));            // Indices to aid in tracking sorted LTRPs
+  ::memset(mSBPresentFlag , 0, sizeof(mSBPresentFlag));     // Indicate if MSB needs to be present
+
+  // Get the long-term reference pictures
+  Int offset = rps->getNumberOfNegativePictures() + rps->getNumberOfPositivePictures();
+  Int i, ctr = 0;
+  Int maxPicOrderCntLSB = 1 << pcSlice->getSPS()->getBitsForPOC();
+  for(i = rps->getNumberOfPictures() - 1; i >= offset; i--, ctr++)
+  {
+    longtermPicsPoc[ctr] = rps->getPOC(i);                                  // LTRP POC
+    longtermPicsLSB[ctr] = getLSB(longtermPicsPoc[ctr], maxPicOrderCntLSB); // LTRP POC LSB
+    indices[ctr]      = i;
+    longtermPicsMSB[ctr] = longtermPicsPoc[ctr] - longtermPicsLSB[ctr];
+  }
+  Int numLongPics = rps->getNumberOfLongtermPictures();
+  assert(ctr == numLongPics);
+
+  // Arrange pictures in decreasing order of MSB;
+  for(i = 0; i < numLongPics; i++)
+  {
+    for(Int j = 0; j < numLongPics - 1; j++)
+    {
+      if(longtermPicsMSB[j] < longtermPicsMSB[j+1])
+      {
+        std::swap(longtermPicsPoc[j], longtermPicsPoc[j+1]);
+        std::swap(longtermPicsLSB[j], longtermPicsLSB[j+1]);
+        std::swap(longtermPicsMSB[j], longtermPicsMSB[j+1]);
+        std::swap(indices[j]        , indices[j+1]        );
+      }
+    }
+  }
+
+  for(i = 0; i < numLongPics; i++)
+  {
+    // Check if MSB present flag should be enabled.
+    // Check if the buffer contains any pictures that have the same LSB.
+    TComList<TComPic*>::iterator  iterPic = rcListPic.begin();
+    TComPic*                      pcPic;
+    while ( iterPic != rcListPic.end() )
+    {
+      pcPic = *iterPic;
+      if( (getLSB(pcPic->getPOC(), maxPicOrderCntLSB) == longtermPicsLSB[i])   &&     // Same LSB
+                                      (pcPic->getSlice(0)->isReferenced())     &&    // Reference picture
+                                        (pcPic->getPOC() != longtermPicsPoc[i])    )  // Not the LTRP itself
+      {
+        mSBPresentFlag[i] = true;
+        break;
+      }
+      iterPic++;
+    }
+  }
+
+  // tempArray for usedByCurr flag
+  Bool tempArray[MAX_NUM_REF_PICS]; ::memset(tempArray, 0, sizeof(tempArray));
+  for(i = 0; i < numLongPics; i++)
+  {
+    tempArray[i] = rps->getUsed(indices[i]);
+  }
+  // Now write the final values;
+  ctr = 0;
+  Int currMSB = 0, currLSB = 0;
+  // currPicPoc = currMSB + currLSB
+  currLSB = getLSB(pcSlice->getPOC(), maxPicOrderCntLSB);
+  currMSB = pcSlice->getPOC() - currLSB;
+
+  for(i = rps->getNumberOfPictures() - 1; i >= offset; i--, ctr++)
+  {
+    rps->setPOC                   (i, longtermPicsPoc[ctr]);
+    rps->setDeltaPOC              (i, - pcSlice->getPOC() + longtermPicsPoc[ctr]);
+    rps->setUsed                  (i, tempArray[ctr]);
+    rps->setPocLSBLT              (i, longtermPicsLSB[ctr]);
+    rps->setDeltaPocMSBCycleLT    (i, (currMSB - (longtermPicsPoc[ctr] - longtermPicsLSB[ctr])) / maxPicOrderCntLSB);
+    rps->setDeltaPocMSBPresentFlag(i, mSBPresentFlag[ctr]);
+
+    assert(rps->getDeltaPocMSBCycleLT(i) >= 0);   // Non-negative value
+  }
+  for(i = rps->getNumberOfPictures() - 1, ctr = 1; i >= offset; i--, ctr++)
+  {
+    for(Int j = rps->getNumberOfPictures() - 1 - ctr; j >= offset; j--)
+    {
+      // Here at the encoder we know that we have set the full POC value for the LTRPs, hence we
+      // don't have to check the MSB present flag values for this constraint.
+      assert( rps->getPOC(i) != rps->getPOC(j) ); // If assert fails, LTRP entry repeated in RPS!!!
+    }
+  }
+}
+
+/** Function for finding the position to insert the first of APS and non-nested BP, PT, DU info SEI messages.
+ * \param accessUnit Access Unit of the current picture
+ * This function finds the position to insert the first of APS and non-nested BP, PT, DU info SEI messages.
+ */
+Int TEncGOP::xGetFirstSeiLocation(AccessUnit &accessUnit)
+{
+  // Find the location of the first SEI message
+  Int seiStartPos = 0;
+  for(AccessUnit::iterator it = accessUnit.begin(); it != accessUnit.end(); it++, seiStartPos++)
+  {
+     if ((*it)->isSei() || (*it)->isVcl())
+     {
+       break;
+     }
+  }
+  //  assert(it != accessUnit.end());  // Triggers with some legit configurations
+  return seiStartPos;
+}
+
+Void TEncGOP::dblMetric( TComPic* pcPic, UInt uiNumSlices )
+{
+  TComPicYuv* pcPicYuvRec = pcPic->getPicYuvRec();
+  Pel* Rec    = pcPicYuvRec->getAddr(COMPONENT_Y);
+  Pel* tempRec = Rec;
+  Int  stride = pcPicYuvRec->getStride(COMPONENT_Y);
+  UInt log2maxTB = pcPic->getSlice(0)->getSPS()->getQuadtreeTULog2MaxSize();
+  UInt maxTBsize = (1<<log2maxTB);
+  const UInt minBlockArtSize = 8;
+  const UInt picWidth = pcPicYuvRec->getWidth(COMPONENT_Y);
+  const UInt picHeight = pcPicYuvRec->getHeight(COMPONENT_Y);
+  const UInt noCol = (picWidth>>log2maxTB);
+  const UInt noRows = (picHeight>>log2maxTB);
+  assert(noCol > 1);
+  assert(noRows > 1);
+  UInt64 *colSAD = (UInt64*)malloc(noCol*sizeof(UInt64));
+  UInt64 *rowSAD = (UInt64*)malloc(noRows*sizeof(UInt64));
+  UInt colIdx = 0;
+  UInt rowIdx = 0;
+  Pel p0, p1, p2, q0, q1, q2;
+
+  Int qp = pcPic->getSlice(0)->getSliceQp();
+  Int bitdepthScale = 1 << (g_bitDepth[CHANNEL_TYPE_LUMA]-8);
+  Int beta = TComLoopFilter::getBeta( qp ) * bitdepthScale;
+  const Int thr2 = (beta>>2);
+  const Int thr1 = 2*bitdepthScale;
+  UInt a = 0;
+
+  memset(colSAD, 0, noCol*sizeof(UInt64));
+  memset(rowSAD, 0, noRows*sizeof(UInt64));
+
+  if (maxTBsize > minBlockArtSize)
+  {
+    // Analyze vertical artifact edges
+    for(Int c = maxTBsize; c < picWidth; c += maxTBsize)
+    {
+      for(Int r = 0; r < picHeight; r++)
+      {
+        p2 = Rec[c-3];
+        p1 = Rec[c-2];
+        p0 = Rec[c-1];
+        q0 = Rec[c];
+        q1 = Rec[c+1];
+        q2 = Rec[c+2];
+        a = ((abs(p2-(p1<<1)+p0)+abs(q0-(q1<<1)+q2))<<1);
+        if ( thr1 < a && a < thr2)
+        {
+          colSAD[colIdx] += abs(p0 - q0);
+        }
+        Rec += stride;
+      }
+      colIdx++;
+      Rec = tempRec;
+    }
+
+    // Analyze horizontal artifact edges
+    for(Int r = maxTBsize; r < picHeight; r += maxTBsize)
+    {
+      for(Int c = 0; c < picWidth; c++)
+      {
+        p2 = Rec[c + (r-3)*stride];
+        p1 = Rec[c + (r-2)*stride];
+        p0 = Rec[c + (r-1)*stride];
+        q0 = Rec[c + r*stride];
+        q1 = Rec[c + (r+1)*stride];
+        q2 = Rec[c + (r+2)*stride];
+        a = ((abs(p2-(p1<<1)+p0)+abs(q0-(q1<<1)+q2))<<1);
+        if (thr1 < a && a < thr2)
+        {
+          rowSAD[rowIdx] += abs(p0 - q0);
+        }
+      }
+      rowIdx++;
+    }
+  }
+
+  UInt64 colSADsum = 0;
+  UInt64 rowSADsum = 0;
+  for(Int c = 0; c < noCol-1; c++)
+  {
+    colSADsum += colSAD[c];
+  }
+  for(Int r = 0; r < noRows-1; r++)
+  {
+    rowSADsum += rowSAD[r];
+  }
+
+  colSADsum <<= 10;
+  rowSADsum <<= 10;
+  colSADsum /= (noCol-1);
+  colSADsum /= picHeight;
+  rowSADsum /= (noRows-1);
+  rowSADsum /= picWidth;
+
+  UInt64 avgSAD = ((colSADsum + rowSADsum)>>1);
+  avgSAD >>= (g_bitDepth[CHANNEL_TYPE_LUMA]-8);
+
+  if ( avgSAD > 2048 )
+  {
+    avgSAD >>= 9;
+    Int offset = Clip3(2,6,(Int)avgSAD);
+    for (Int i=0; i<uiNumSlices; i++)
+    {
+      pcPic->getSlice(i)->setDeblockingFilterOverrideFlag(true);
+      pcPic->getSlice(i)->setDeblockingFilterDisable(false);
+      pcPic->getSlice(i)->setDeblockingFilterBetaOffsetDiv2( offset );
+      pcPic->getSlice(i)->setDeblockingFilterTcOffsetDiv2( offset );
+    }
+  }
+  else
+  {
+    for (Int i=0; i<uiNumSlices; i++)
+    {
+      pcPic->getSlice(i)->setDeblockingFilterOverrideFlag(false);
+      pcPic->getSlice(i)->setDeblockingFilterDisable(        pcPic->getSlice(i)->getPPS()->getPicDisableDeblockingFilterFlag() );
+      pcPic->getSlice(i)->setDeblockingFilterBetaOffsetDiv2( pcPic->getSlice(i)->getPPS()->getDeblockingFilterBetaOffsetDiv2() );
+      pcPic->getSlice(i)->setDeblockingFilterTcOffsetDiv2(   pcPic->getSlice(i)->getPPS()->getDeblockingFilterTcOffsetDiv2()   );
+    }
+  }
+
+  free(colSAD);
+  free(rowSAD);
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncGOP.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,193 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncGOP.h
+    \brief    GOP encoder class (header)
+*/
+
+#ifndef __TENCGOP__
+#define __TENCGOP__
+
+#include <list>
+
+#include <stdlib.h>
+
+#include "TLibCommon/TComList.h"
+#include "TLibCommon/TComPic.h"
+#include "TLibCommon/TComBitCounter.h"
+#include "TLibCommon/TComLoopFilter.h"
+#include "TLibCommon/AccessUnit.h"
+#include "TEncSampleAdaptiveOffset.h"
+#include "TEncSlice.h"
+#include "TEncEntropy.h"
+#include "TEncCavlc.h"
+#include "TEncSbac.h"
+#include "SEIwrite.h"
+
+#include "TEncAnalyze.h"
+#include "TEncRateCtrl.h"
+#include <vector>
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncTop;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+class TEncGOP
+{
+private:
+  //  Data
+  Bool                    m_bLongtermTestPictureHasBeenCoded;
+  Bool                    m_bLongtermTestPictureHasBeenCoded2;
+  UInt                    m_numLongTermRefPicSPS;
+  UInt                    m_ltRefPicPocLsbSps[MAX_NUM_LONG_TERM_REF_PICS];
+  Bool                    m_ltRefPicUsedByCurrPicFlag[MAX_NUM_LONG_TERM_REF_PICS];
+  Int                     m_iLastIDR;
+  Int                     m_iGopSize;
+  Int                     m_iNumPicCoded;
+  Bool                    m_bFirst;
+#if ALLOW_RECOVERY_POINT_AS_RAP
+  Int                     m_iLastRecoveryPicPOC;
+#endif
+
+  //  Access channel
+  TEncTop*                m_pcEncTop;
+  TEncCfg*                m_pcCfg;
+  TEncSlice*              m_pcSliceEncoder;
+  TComList<TComPic*>*     m_pcListPic;
+
+  TEncEntropy*            m_pcEntropyCoder;
+  TEncCavlc*              m_pcCavlcCoder;
+  TEncSbac*               m_pcSbacCoder;
+  TEncBinCABAC*           m_pcBinCABAC;
+  TComLoopFilter*         m_pcLoopFilter;
+
+  SEIWriter               m_seiWriter;
+
+  //--Adaptive Loop filter
+  TEncSampleAdaptiveOffset*  m_pcSAO;
+  TEncRateCtrl*           m_pcRateCtrl;
+  // indicate sequence first
+  Bool                    m_bSeqFirst;
+
+  // clean decoding refresh
+  Bool                    m_bRefreshPending;
+  Int                     m_pocCRA;
+  NalUnitType             m_associatedIRAPType;
+  Int                     m_associatedIRAPPOC;
+
+  std::vector<Int> m_vRVM_RP;
+  UInt                    m_lastBPSEI;
+  UInt                    m_totalCoded;
+  UInt                    m_cpbRemovalDelay;
+  UInt                    m_tl0Idx;
+  UInt                    m_rapIdx;
+  Bool                    m_activeParameterSetSEIPresentInAU;
+  Bool                    m_bufferingPeriodSEIPresentInAU;
+  Bool                    m_pictureTimingSEIPresentInAU;
+  Bool                    m_nestedBufferingPeriodSEIPresentInAU;
+  Bool                    m_nestedPictureTimingSEIPresentInAU;
+public:
+  TEncGOP();
+  virtual ~TEncGOP();
+
+  Void  create      ();
+  Void  destroy     ();
+
+  Void  init        ( TEncTop* pcTEncTop );
+  Void  compressGOP ( Int iPOCLast, Int iNumPicRcvd, TComList<TComPic*>& rcListPic, TComList<TComPicYuv*>& rcListPicYuvRec,
+                      std::list<AccessUnit>& accessUnitsInGOP, Bool isField, Bool isTff, const InputColourSpaceConversion snr_conversion, const Bool printFrameMSE );
+  Void  xAttachSliceDataToNalUnit (OutputNALUnit& rNalu, TComOutputBitstream* pcBitstreamRedirect);
+
+
+  Int   getGOPSize()          { return  m_iGopSize;  }
+
+  TComList<TComPic*>*   getListPic()      { return m_pcListPic; }
+
+  Void  printOutSummary      ( UInt uiNumAllPicCoded, Bool isField, const Bool printMSEBasedSNR, const Bool printSequenceMSE );
+  Void  preLoopFilterPicAll  ( TComPic* pcPic, UInt64& ruiDist );
+
+  TEncSlice*  getSliceEncoder()   { return m_pcSliceEncoder; }
+  NalUnitType getNalUnitType( Int pocCurr, Int lastIdr, Bool isField );
+  Void arrangeLongtermPicturesInRPS(TComSlice *, TComList<TComPic*>& );
+
+protected:
+  TEncRateCtrl* getRateCtrl()       { return m_pcRateCtrl;  }
+
+protected:
+
+  Void  xInitGOP          ( Int iPOCLast, Int iNumPicRcvd, TComList<TComPic*>& rcListPic, TComList<TComPicYuv*>& rcListPicYuvRecOut, Bool isField );
+  Void  xGetBuffer        ( TComList<TComPic*>& rcListPic, TComList<TComPicYuv*>& rcListPicYuvRecOut, Int iNumPicRcvd, Int iTimeOffset, TComPic*& rpcPic, TComPicYuv*& rpcPicYuvRecOut, Int pocCurr, Bool isField );
+
+  Void  xCalculateAddPSNR          ( TComPic* pcPic, TComPicYuv* pcPicD, const AccessUnit&, Double dEncTime, const InputColourSpaceConversion snr_conversion, const Bool printFrameMSE );
+  Void  xCalculateInterlacedAddPSNR( TComPic* pcPicOrgFirstField, TComPic* pcPicOrgSecondField,
+                                     TComPicYuv* pcPicRecFirstField, TComPicYuv* pcPicRecSecondField,
+                                     const AccessUnit& accessUnit, Double dEncTime, const InputColourSpaceConversion snr_conversion, const Bool printFrameMSE );
+
+  UInt64 xFindDistortionFrame (TComPicYuv* pcPic0, TComPicYuv* pcPic1);
+
+  Double xCalculateRVM();
+
+  SEIActiveParameterSets*           xCreateSEIActiveParameterSets (TComSPS *sps);
+  SEIFramePacking*                  xCreateSEIFramePacking();
+  SEISegmentedRectFramePacking*     xCreateSEISegmentedRectFramePacking();
+  SEIDisplayOrientation*            xCreateSEIDisplayOrientation();
+  SEIToneMappingInfo*               xCreateSEIToneMappingInfo();
+  SEITempMotionConstrainedTileSets* xCreateSEITempMotionConstrainedTileSets ();
+  SEIKneeFunctionInfo*              xCreateSEIKneeFunctionInfo();
+  SEIChromaSamplingFilterHint*      xCreateSEIChromaSamplingFilterHint(Bool bChromaLocInfoPresent, Int iHorFilterIndex, Int iVerFilterIdc);
+
+  Void xCreateLeadingSEIMessages (/*SEIMessages seiMessages,*/ AccessUnit &accessUnit, TComSPS *sps);
+  Int xGetFirstSeiLocation (AccessUnit &accessUnit);
+  Void xResetNonNestedSEIPresentFlags()
+  {
+    m_activeParameterSetSEIPresentInAU = false;
+    m_bufferingPeriodSEIPresentInAU    = false;
+    m_pictureTimingSEIPresentInAU      = false;
+  }
+  Void xResetNestedSEIPresentFlags()
+  {
+    m_nestedBufferingPeriodSEIPresentInAU    = false;
+    m_nestedPictureTimingSEIPresentInAU      = false;
+  }
+  Void dblMetric( TComPic* pcPic, UInt uiNumSlices );
+};// END CLASS DEFINITION TEncGOP
+
+//! \}
+
+#endif // __TENCGOP__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncPic.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,156 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncPic.cpp
+    \brief    class of picture which includes side information for encoder
+*/
+
+#include "TEncPic.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+/** Constructor
+ */
+TEncQPAdaptationUnit::TEncQPAdaptationUnit()
+: m_dActivity(0.0)
+{
+}
+
+/** Destructor
+ */
+TEncQPAdaptationUnit::~TEncQPAdaptationUnit()
+{
+}
+
+/** Constructor
+ */
+TEncPicQPAdaptationLayer::TEncPicQPAdaptationLayer()
+: m_uiAQPartWidth(0)
+, m_uiAQPartHeight(0)
+, m_uiNumAQPartInWidth(0)
+, m_uiNumAQPartInHeight(0)
+, m_acTEncAQU(NULL)
+, m_dAvgActivity(0.0)
+{
+}
+
+/** Destructor
+ */
+TEncPicQPAdaptationLayer::~TEncPicQPAdaptationLayer()
+{
+  destroy();
+}
+
+/** Initialize member variables
+ * \param iWidth Picture width
+ * \param iHeight Picture height
+ * \param uiAQPartWidth Width of unit block for analyzing local image characteristics
+ * \param uiAQPartHeight Height of unit block for analyzing local image characteristics
+ * \return Void
+ */
+Void TEncPicQPAdaptationLayer::create( Int iWidth, Int iHeight, UInt uiAQPartWidth, UInt uiAQPartHeight )
+{
+  m_uiAQPartWidth = uiAQPartWidth;
+  m_uiAQPartHeight = uiAQPartHeight;
+  m_uiNumAQPartInWidth = (iWidth + m_uiAQPartWidth-1) / m_uiAQPartWidth;
+  m_uiNumAQPartInHeight = (iHeight + m_uiAQPartHeight-1) / m_uiAQPartHeight;
+  m_acTEncAQU = new TEncQPAdaptationUnit[ m_uiNumAQPartInWidth * m_uiNumAQPartInHeight ];
+}
+
+/** Clean up
+ * \return Void
+ */
+Void TEncPicQPAdaptationLayer::destroy()
+{
+  if (m_acTEncAQU)
+  {
+    delete[] m_acTEncAQU;
+    m_acTEncAQU = NULL;
+  }
+}
+
+/** Constructor
+ */
+TEncPic::TEncPic()
+: m_acAQLayer(NULL)
+, m_uiMaxAQDepth(0)
+{
+}
+
+/** Destructor
+ */
+TEncPic::~TEncPic()
+{
+  destroy();
+}
+
+/** Initialize member variables
+ * \param iWidth Picture width
+ * \param iHeight Picture height
+ * \param uiMaxWidth Maximum CU width
+ * \param uiMaxHeight Maximum CU height
+ * \param uiMaxDepth Maximum CU depth
+ * \param uiMaxAQDepth Maximum depth of unit block for assigning QP adaptive to local image characteristics
+ * \param bIsVirtual
+ * \return Void
+ */
+Void TEncPic::create( Int iWidth, Int iHeight, ChromaFormat chromaFormat, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth, UInt uiMaxAQDepth,
+                      Window &conformanceWindow, Window &defaultDisplayWindow, Int *numReorderPics, Bool bIsVirtual )
+{
+  TComPic::create( iWidth, iHeight, chromaFormat, uiMaxWidth, uiMaxHeight, uiMaxDepth, conformanceWindow, defaultDisplayWindow, numReorderPics, bIsVirtual );
+  m_uiMaxAQDepth = uiMaxAQDepth;
+  if ( uiMaxAQDepth > 0 )
+  {
+    m_acAQLayer = new TEncPicQPAdaptationLayer[ m_uiMaxAQDepth ];
+    for (UInt d = 0; d < m_uiMaxAQDepth; d++)
+    {
+      m_acAQLayer[d].create( iWidth, iHeight, uiMaxWidth>>d, uiMaxHeight>>d );
+    }
+  }
+}
+
+/** Clean up
+ * \return Void
+ */
+Void TEncPic::destroy()
+{
+  if (m_acAQLayer)
+  {
+    delete[] m_acAQLayer;
+    m_acAQLayer = NULL;
+  }
+  TComPic::destroy();
+}
+//! \}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncPic.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,115 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncPic.h
+    \brief    class of picture which includes side information for encoder (header)
+*/
+
+#ifndef __TENCPIC__
+#define __TENCPIC__
+
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComPic.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// Unit block for storing image characteristics
+class TEncQPAdaptationUnit
+{
+private:
+  Double m_dActivity;
+
+public:
+  TEncQPAdaptationUnit();
+  ~TEncQPAdaptationUnit();
+
+  Void   setActivity( Double d ) { m_dActivity = d; }
+  Double getActivity()           { return m_dActivity; }
+};
+
+/// Local image characteristics for CUs on a specific depth
+class TEncPicQPAdaptationLayer
+{
+private:
+  UInt                  m_uiAQPartWidth;
+  UInt                  m_uiAQPartHeight;
+  UInt                  m_uiNumAQPartInWidth;
+  UInt                  m_uiNumAQPartInHeight;
+  TEncQPAdaptationUnit* m_acTEncAQU;
+  Double                m_dAvgActivity;
+
+public:
+  TEncPicQPAdaptationLayer();
+  virtual ~TEncPicQPAdaptationLayer();
+
+  Void  create( Int iWidth, Int iHeight, UInt uiAQPartWidth, UInt uiAQPartHeight );
+  Void  destroy();
+
+  UInt                   getAQPartWidth()        { return m_uiAQPartWidth;       }
+  UInt                   getAQPartHeight()       { return m_uiAQPartHeight;      }
+  UInt                   getNumAQPartInWidth()   { return m_uiNumAQPartInWidth;  }
+  UInt                   getNumAQPartInHeight()  { return m_uiNumAQPartInHeight; }
+  UInt                   getAQPartStride()       { return m_uiNumAQPartInWidth;  }
+  TEncQPAdaptationUnit*  getQPAdaptationUnit()   { return m_acTEncAQU;           }
+  Double                 getAvgActivity()        { return m_dAvgActivity;        }
+
+  Void                   setAvgActivity( Double d )  { m_dAvgActivity = d; }
+};
+
+/// Picture class including local image characteristics information for QP adaptation
+class TEncPic : public TComPic
+{
+private:
+  TEncPicQPAdaptationLayer* m_acAQLayer;
+  UInt                      m_uiMaxAQDepth;
+
+public:
+  TEncPic();
+  virtual ~TEncPic();
+
+  Void          create( Int iWidth, Int iHeight, ChromaFormat chromaFormat, UInt uiMaxWidth, UInt uiMaxHeight, UInt uiMaxDepth, UInt uiMaxAQDepth,
+                          Window &conformanceWindow, Window &defaultDisplayWindow, Int *numReorderPics, Bool bIsVirtual = false );
+  virtual Void  destroy();
+
+  TEncPicQPAdaptationLayer* getAQLayer( UInt uiDepth )  { return &m_acAQLayer[uiDepth]; }
+  UInt                      getMaxAQDepth()             { return m_uiMaxAQDepth;        }
+};
+
+//! \}
+
+#endif // __TENCPIC__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncPreanalyzer.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,141 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncPreanalyzer.cpp
+    \brief    source picture analyzer class
+*/
+
+#include <cfloat>
+#include <algorithm>
+
+#include "TEncPreanalyzer.h"
+
+using namespace std;
+
+//! \ingroup TLibEncoder
+//! \{
+
+/** Constructor
+ */
+TEncPreanalyzer::TEncPreanalyzer()
+{
+}
+
+/** Destructor
+ */
+TEncPreanalyzer::~TEncPreanalyzer()
+{
+}
+
+/** Analyze source picture and compute local image characteristics used for QP adaptation
+ * \param pcEPic Picture object to be analyzed
+ * \return Void
+ */
+Void TEncPreanalyzer::xPreanalyze( TEncPic* pcEPic )
+{
+  TComPicYuv* pcPicYuv = pcEPic->getPicYuvOrg();
+  const Int iWidth = pcPicYuv->getWidth(COMPONENT_Y);
+  const Int iHeight = pcPicYuv->getHeight(COMPONENT_Y);
+  const Int iStride = pcPicYuv->getStride(COMPONENT_Y);
+
+  for ( UInt d = 0; d < pcEPic->getMaxAQDepth(); d++ )
+  {
+    const Pel* pLineY = pcPicYuv->getAddr(COMPONENT_Y);
+    TEncPicQPAdaptationLayer* pcAQLayer = pcEPic->getAQLayer(d);
+    const UInt uiAQPartWidth = pcAQLayer->getAQPartWidth();
+    const UInt uiAQPartHeight = pcAQLayer->getAQPartHeight();
+    TEncQPAdaptationUnit* pcAQU = pcAQLayer->getQPAdaptationUnit();
+
+    Double dSumAct = 0.0;
+    for ( UInt y = 0; y < iHeight; y += uiAQPartHeight )
+    {
+      const UInt uiCurrAQPartHeight = min(uiAQPartHeight, iHeight-y);
+      for ( UInt x = 0; x < iWidth; x += uiAQPartWidth, pcAQU++ )
+      {
+        const UInt uiCurrAQPartWidth = min(uiAQPartWidth, iWidth-x);
+        const Pel* pBlkY = &pLineY[x];
+        UInt64 uiSum[4] = {0, 0, 0, 0};
+        UInt64 uiSumSq[4] = {0, 0, 0, 0};
+        UInt uiNumPixInAQPart = 0;
+        UInt by = 0;
+        for ( ; by < uiCurrAQPartHeight>>1; by++ )
+        {
+          UInt bx = 0;
+          for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
+          {
+            uiSum  [0] += pBlkY[bx];
+            uiSumSq[0] += pBlkY[bx] * pBlkY[bx];
+          }
+          for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
+          {
+            uiSum  [1] += pBlkY[bx];
+            uiSumSq[1] += pBlkY[bx] * pBlkY[bx];
+          }
+          pBlkY += iStride;
+        }
+        for ( ; by < uiCurrAQPartHeight; by++ )
+        {
+          UInt bx = 0;
+          for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
+          {
+            uiSum  [2] += pBlkY[bx];
+            uiSumSq[2] += pBlkY[bx] * pBlkY[bx];
+          }
+          for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
+          {
+            uiSum  [3] += pBlkY[bx];
+            uiSumSq[3] += pBlkY[bx] * pBlkY[bx];
+          }
+          pBlkY += iStride;
+        }
+
+        Double dMinVar = DBL_MAX;
+        for ( Int i=0; i<4; i++)
+        {
+          const Double dAverage = Double(uiSum[i]) / uiNumPixInAQPart;
+          const Double dVariance = Double(uiSumSq[i]) / uiNumPixInAQPart - dAverage * dAverage;
+          dMinVar = min(dMinVar, dVariance);
+        }
+        const Double dActivity = 1.0 + dMinVar;
+        pcAQU->setActivity( dActivity );
+        dSumAct += dActivity;
+      }
+      pLineY += iStride * uiCurrAQPartHeight;
+    }
+
+    const Double dAvgAct = dSumAct / (pcAQLayer->getNumAQPartInWidth() * pcAQLayer->getNumAQPartInHeight());
+    pcAQLayer->setAvgActivity( dAvgAct );
+  }
+}
+//! \}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncPreanalyzer.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,62 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncPreanalyzer.h
+    \brief    source picture analyzer class (header)
+*/
+
+#ifndef __TENCPREANALYZER__
+#define __TENCPREANALYZER__
+
+#include "TEncPic.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// Source picture analyzer class
+class TEncPreanalyzer
+{
+public:
+  TEncPreanalyzer();
+  virtual ~TEncPreanalyzer();
+
+  Void xPreanalyze( TEncPic* pcPic );
+};
+
+//! \}
+
+#endif // __TENCPREANALYZER__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncRateCtrl.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1402 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncRateCtrl.cpp
+    \brief    Rate control manager class
+*/
+#include "TEncRateCtrl.h"
+#include "../TLibCommon/TComPic.h"
+#include "../TLibCommon/TComChromaFormat.h"
+
+#include <cmath>
+
+using namespace std;
+
+//sequence level
+TEncRCSeq::TEncRCSeq()
+{
+  m_totalFrames         = 0;
+  m_targetRate          = 0;
+  m_frameRate           = 0;
+  m_targetBits          = 0;
+  m_GOPSize             = 0;
+  m_picWidth            = 0;
+  m_picHeight           = 0;
+  m_LCUWidth            = 0;
+  m_LCUHeight           = 0;
+  m_numberOfLevel       = 0;
+  m_numberOfLCU         = 0;
+  m_averageBits         = 0;
+  m_bitsRatio           = NULL;
+  m_GOPID2Level         = NULL;
+  m_picPara             = NULL;
+  m_LCUPara             = NULL;
+  m_numberOfPixel       = 0;
+  m_framesLeft          = 0;
+  m_bitsLeft            = 0;
+  m_useLCUSeparateModel = false;
+  m_adaptiveBit         = 0;
+  m_lastLambda          = 0.0;
+}
+
+TEncRCSeq::~TEncRCSeq()
+{
+  destroy();
+}
+
+Void TEncRCSeq::create( Int totalFrames, Int targetBitrate, Int frameRate, Int GOPSize, Int picWidth, Int picHeight, Int LCUWidth, Int LCUHeight, Int numberOfLevel, Bool useLCUSeparateModel, Int adaptiveBit )
+{
+  destroy();
+  m_totalFrames         = totalFrames;
+  m_targetRate          = targetBitrate;
+  m_frameRate           = frameRate;
+  m_GOPSize             = GOPSize;
+  m_picWidth            = picWidth;
+  m_picHeight           = picHeight;
+  m_LCUWidth            = LCUWidth;
+  m_LCUHeight           = LCUHeight;
+  m_numberOfLevel       = numberOfLevel;
+  m_useLCUSeparateModel = useLCUSeparateModel;
+
+  m_numberOfPixel   = m_picWidth * m_picHeight;
+  m_targetBits      = (Int64)m_totalFrames * (Int64)m_targetRate / (Int64)m_frameRate;
+  m_seqTargetBpp = (Double)m_targetRate / (Double)m_frameRate / (Double)m_numberOfPixel;
+  if ( m_seqTargetBpp < 0.03 )
+  {
+    m_alphaUpdate = 0.01;
+    m_betaUpdate  = 0.005;
+  }
+  else if ( m_seqTargetBpp < 0.08 )
+  {
+    m_alphaUpdate = 0.05;
+    m_betaUpdate  = 0.025;
+  }
+  else if ( m_seqTargetBpp < 0.2 )
+  {
+    m_alphaUpdate = 0.1;
+    m_betaUpdate  = 0.05;
+  }
+  else if ( m_seqTargetBpp < 0.5 )
+  {
+    m_alphaUpdate = 0.2;
+    m_betaUpdate  = 0.1;
+  }
+  else
+  {
+    m_alphaUpdate = 0.4;
+    m_betaUpdate  = 0.2;
+  }
+
+  m_averageBits     = (Int)(m_targetBits / totalFrames);
+  Int picWidthInBU  = ( m_picWidth  % m_LCUWidth  ) == 0 ? m_picWidth  / m_LCUWidth  : m_picWidth  / m_LCUWidth  + 1;
+  Int picHeightInBU = ( m_picHeight % m_LCUHeight ) == 0 ? m_picHeight / m_LCUHeight : m_picHeight / m_LCUHeight + 1;
+  m_numberOfLCU     = picWidthInBU * picHeightInBU;
+
+  m_bitsRatio   = new Int[m_GOPSize];
+  for ( Int i=0; i<m_GOPSize; i++ )
+  {
+    m_bitsRatio[i] = 1;
+  }
+
+  m_GOPID2Level = new Int[m_GOPSize];
+  for ( Int i=0; i<m_GOPSize; i++ )
+  {
+    m_GOPID2Level[i] = 1;
+  }
+
+  m_picPara = new TRCParameter[m_numberOfLevel];
+  for ( Int i=0; i<m_numberOfLevel; i++ )
+  {
+    m_picPara[i].m_alpha = 0.0;
+    m_picPara[i].m_beta  = 0.0;
+  }
+
+  if ( m_useLCUSeparateModel )
+  {
+    m_LCUPara = new TRCParameter*[m_numberOfLevel];
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      m_LCUPara[i] = new TRCParameter[m_numberOfLCU];
+      for ( Int j=0; j<m_numberOfLCU; j++)
+      {
+        m_LCUPara[i][j].m_alpha = 0.0;
+        m_LCUPara[i][j].m_beta  = 0.0;
+      }
+    }
+  }
+
+  m_framesLeft = m_totalFrames;
+  m_bitsLeft   = m_targetBits;
+  m_adaptiveBit = adaptiveBit;
+  m_lastLambda = 0.0;
+}
+
+Void TEncRCSeq::destroy()
+{
+  if (m_bitsRatio != NULL)
+  {
+    delete[] m_bitsRatio;
+    m_bitsRatio = NULL;
+  }
+
+  if ( m_GOPID2Level != NULL )
+  {
+    delete[] m_GOPID2Level;
+    m_GOPID2Level = NULL;
+  }
+
+  if ( m_picPara != NULL )
+  {
+    delete[] m_picPara;
+    m_picPara = NULL;
+  }
+
+  if ( m_LCUPara != NULL )
+  {
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      delete[] m_LCUPara[i];
+    }
+    delete[] m_LCUPara;
+    m_LCUPara = NULL;
+  }
+}
+
+Void TEncRCSeq::initBitsRatio( Int bitsRatio[])
+{
+  for (Int i=0; i<m_GOPSize; i++)
+  {
+    m_bitsRatio[i] = bitsRatio[i];
+  }
+}
+
+Void TEncRCSeq::initGOPID2Level( Int GOPID2Level[] )
+{
+  for ( Int i=0; i<m_GOPSize; i++ )
+  {
+    m_GOPID2Level[i] = GOPID2Level[i];
+  }
+}
+
+Void TEncRCSeq::initPicPara( TRCParameter* picPara )
+{
+  assert( m_picPara != NULL );
+
+  if ( picPara == NULL )
+  {
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      if (i>0)
+      {
+        m_picPara[i].m_alpha = 3.2003;
+        m_picPara[i].m_beta  = -1.367;
+      }
+      else
+      {
+        m_picPara[i].m_alpha = ALPHA;
+        m_picPara[i].m_beta  = BETA2;
+      }
+    }
+  }
+  else
+  {
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      m_picPara[i] = picPara[i];
+    }
+  }
+}
+
+Void TEncRCSeq::initLCUPara( TRCParameter** LCUPara )
+{
+  if ( m_LCUPara == NULL )
+  {
+    return;
+  }
+  if ( LCUPara == NULL )
+  {
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      for ( Int j=0; j<m_numberOfLCU; j++)
+      {
+        m_LCUPara[i][j].m_alpha = m_picPara[i].m_alpha;
+        m_LCUPara[i][j].m_beta  = m_picPara[i].m_beta;
+      }
+    }
+  }
+  else
+  {
+    for ( Int i=0; i<m_numberOfLevel; i++ )
+    {
+      for ( Int j=0; j<m_numberOfLCU; j++)
+      {
+        m_LCUPara[i][j] = LCUPara[i][j];
+      }
+    }
+  }
+}
+
+Void TEncRCSeq::updateAfterPic ( Int bits )
+{
+  m_bitsLeft -= bits;
+  m_framesLeft--;
+}
+
+Void TEncRCSeq::setAllBitRatio( Double basicLambda, Double* equaCoeffA, Double* equaCoeffB )
+{
+  Int* bitsRatio = new Int[m_GOPSize];
+  for ( Int i=0; i<m_GOPSize; i++ )
+  {
+    bitsRatio[i] = (Int)( equaCoeffA[i] * pow( basicLambda, equaCoeffB[i] ) * m_numberOfPixel );
+  }
+  initBitsRatio( bitsRatio );
+  delete[] bitsRatio;
+}
+
+//GOP level
+TEncRCGOP::TEncRCGOP()
+{
+  m_encRCSeq  = NULL;
+  m_picTargetBitInGOP = NULL;
+  m_numPic     = 0;
+  m_targetBits = 0;
+  m_picLeft    = 0;
+  m_bitsLeft   = 0;
+}
+
+TEncRCGOP::~TEncRCGOP()
+{
+  destroy();
+}
+
+Void TEncRCGOP::create( TEncRCSeq* encRCSeq, Int numPic )
+{
+  destroy();
+  Int targetBits = xEstGOPTargetBits( encRCSeq, numPic );
+
+  if ( encRCSeq->getAdaptiveBits() > 0 && encRCSeq->getLastLambda() > 0.1 )
+  {
+    Double targetBpp = (Double)targetBits / encRCSeq->getNumPixel();
+    Double basicLambda = 0.0;
+    Double* lambdaRatio = new Double[encRCSeq->getGOPSize()];
+    Double* equaCoeffA = new Double[encRCSeq->getGOPSize()];
+    Double* equaCoeffB = new Double[encRCSeq->getGOPSize()];
+
+    if ( encRCSeq->getAdaptiveBits() == 1 )   // for GOP size =4, low delay case
+    {
+      if ( encRCSeq->getLastLambda() < 120.0 )
+      {
+        lambdaRatio[1] = 0.725 * log( encRCSeq->getLastLambda() ) + 0.5793;
+        lambdaRatio[0] = 1.3 * lambdaRatio[1];
+        lambdaRatio[2] = 1.3 * lambdaRatio[1];
+        lambdaRatio[3] = 1.0;
+      }
+      else
+      {
+        lambdaRatio[0] = 5.0;
+        lambdaRatio[1] = 4.0;
+        lambdaRatio[2] = 5.0;
+        lambdaRatio[3] = 1.0;
+      }
+    }
+    else if ( encRCSeq->getAdaptiveBits() == 2 )  // for GOP size = 8, random access case
+    {
+      if ( encRCSeq->getLastLambda() < 90.0 )
+      {
+        lambdaRatio[0] = 1.0;
+        lambdaRatio[1] = 0.725 * log( encRCSeq->getLastLambda() ) + 0.7963;
+        lambdaRatio[2] = 1.3 * lambdaRatio[1];
+        lambdaRatio[3] = 3.25 * lambdaRatio[1];
+        lambdaRatio[4] = 3.25 * lambdaRatio[1];
+        lambdaRatio[5] = 1.3  * lambdaRatio[1];
+        lambdaRatio[6] = 3.25 * lambdaRatio[1];
+        lambdaRatio[7] = 3.25 * lambdaRatio[1];
+      }
+      else
+      {
+        lambdaRatio[0] = 1.0;
+        lambdaRatio[1] = 4.0;
+        lambdaRatio[2] = 5.0;
+        lambdaRatio[3] = 12.3;
+        lambdaRatio[4] = 12.3;
+        lambdaRatio[5] = 5.0;
+        lambdaRatio[6] = 12.3;
+        lambdaRatio[7] = 12.3;
+      }
+    }
+
+    xCalEquaCoeff( encRCSeq, lambdaRatio, equaCoeffA, equaCoeffB, encRCSeq->getGOPSize() );
+    basicLambda = xSolveEqua( targetBpp, equaCoeffA, equaCoeffB, encRCSeq->getGOPSize() );
+    encRCSeq->setAllBitRatio( basicLambda, equaCoeffA, equaCoeffB );
+
+    delete []lambdaRatio;
+    delete []equaCoeffA;
+    delete []equaCoeffB;
+  }
+
+  m_picTargetBitInGOP = new Int[numPic];
+  Int i;
+  Int totalPicRatio = 0;
+  Int currPicRatio = 0;
+  for ( i=0; i<numPic; i++ )
+  {
+    totalPicRatio += encRCSeq->getBitRatio( i );
+  }
+  for ( i=0; i<numPic; i++ )
+  {
+    currPicRatio = encRCSeq->getBitRatio( i );
+    m_picTargetBitInGOP[i] = (Int)( ((Double)targetBits) * currPicRatio / totalPicRatio );
+  }
+
+  m_encRCSeq    = encRCSeq;
+  m_numPic       = numPic;
+  m_targetBits   = targetBits;
+  m_picLeft      = m_numPic;
+  m_bitsLeft     = m_targetBits;
+}
+
+Void TEncRCGOP::xCalEquaCoeff( TEncRCSeq* encRCSeq, Double* lambdaRatio, Double* equaCoeffA, Double* equaCoeffB, Int GOPSize )
+{
+  for ( Int i=0; i<GOPSize; i++ )
+  {
+    Int frameLevel = encRCSeq->getGOPID2Level(i);
+    Double alpha   = encRCSeq->getPicPara(frameLevel).m_alpha;
+    Double beta    = encRCSeq->getPicPara(frameLevel).m_beta;
+    equaCoeffA[i] = pow( 1.0/alpha, 1.0/beta ) * pow( lambdaRatio[i], 1.0/beta );
+    equaCoeffB[i] = 1.0/beta;
+  }
+}
+
+Double TEncRCGOP::xSolveEqua( Double targetBpp, Double* equaCoeffA, Double* equaCoeffB, Int GOPSize )
+{
+  Double solution = 100.0;
+  Double minNumber = 0.1;
+  Double maxNumber = 10000.0;
+  for ( Int i=0; i<g_RCIterationNum; i++ )
+  {
+    Double fx = 0.0;
+    for ( Int j=0; j<GOPSize; j++ )
+    {
+      fx += equaCoeffA[j] * pow( solution, equaCoeffB[j] );
+    }
+
+    if ( fabs( fx - targetBpp ) < 0.000001 )
+    {
+      break;
+    }
+
+    if ( fx > targetBpp )
+    {
+      minNumber = solution;
+      solution = ( solution + maxNumber ) / 2.0;
+    }
+    else
+    {
+      maxNumber = solution;
+      solution = ( solution + minNumber ) / 2.0;
+    }
+  }
+
+  solution = Clip3( 0.1, 10000.0, solution );
+  return solution;
+}
+
+Void TEncRCGOP::destroy()
+{
+  m_encRCSeq = NULL;
+  if ( m_picTargetBitInGOP != NULL )
+  {
+    delete[] m_picTargetBitInGOP;
+    m_picTargetBitInGOP = NULL;
+  }
+}
+
+Void TEncRCGOP::updateAfterPicture( Int bitsCost )
+{
+  m_bitsLeft -= bitsCost;
+  m_picLeft--;
+}
+
+Int TEncRCGOP::xEstGOPTargetBits( TEncRCSeq* encRCSeq, Int GOPSize )
+{
+  Int realInfluencePicture = min( g_RCSmoothWindowSize, encRCSeq->getFramesLeft() );
+  Int averageTargetBitsPerPic = (Int)( encRCSeq->getTargetBits() / encRCSeq->getTotalFrames() );
+  Int currentTargetBitsPerPic = (Int)( ( encRCSeq->getBitsLeft() - averageTargetBitsPerPic * (encRCSeq->getFramesLeft() - realInfluencePicture) ) / realInfluencePicture );
+  Int targetBits = currentTargetBitsPerPic * GOPSize;
+
+  if ( targetBits < 200 )
+  {
+    targetBits = 200;   // at least allocate 200 bits for one GOP
+  }
+
+  return targetBits;
+}
+
+//picture level
+TEncRCPic::TEncRCPic()
+{
+  m_encRCSeq = NULL;
+  m_encRCGOP = NULL;
+
+  m_frameLevel    = 0;
+  m_numberOfPixel = 0;
+  m_numberOfLCU   = 0;
+  m_targetBits    = 0;
+  m_estHeaderBits = 0;
+  m_estPicQP      = 0;
+  m_estPicLambda  = 0.0;
+
+  m_LCULeft       = 0;
+  m_bitsLeft      = 0;
+  m_pixelsLeft    = 0;
+
+  m_LCUs         = NULL;
+  m_picActualHeaderBits = 0;
+  m_picActualBits       = 0;
+  m_picQP               = 0;
+  m_picLambda           = 0.0;
+}
+
+TEncRCPic::~TEncRCPic()
+{
+  destroy();
+}
+
+Int TEncRCPic::xEstPicTargetBits( TEncRCSeq* encRCSeq, TEncRCGOP* encRCGOP )
+{
+  Int targetBits        = 0;
+  Int GOPbitsLeft       = encRCGOP->getBitsLeft();
+
+  Int i;
+  Int currPicPosition = encRCGOP->getNumPic()-encRCGOP->getPicLeft();
+  Int currPicRatio    = encRCSeq->getBitRatio( currPicPosition );
+  Int totalPicRatio   = 0;
+  for ( i=currPicPosition; i<encRCGOP->getNumPic(); i++ )
+  {
+    totalPicRatio += encRCSeq->getBitRatio( i );
+  }
+
+  targetBits  = Int( ((Double)GOPbitsLeft) * currPicRatio / totalPicRatio );
+
+  if ( targetBits < 100 )
+  {
+    targetBits = 100;   // at least allocate 100 bits for one picture
+  }
+
+  if ( m_encRCSeq->getFramesLeft() > 16 )
+  {
+    targetBits = Int( g_RCWeightPicRargetBitInBuffer * targetBits + g_RCWeightPicTargetBitInGOP * m_encRCGOP->getTargetBitInGOP( currPicPosition ) );
+  }
+
+  return targetBits;
+}
+
+Int TEncRCPic::xEstPicHeaderBits( list<TEncRCPic*>& listPreviousPictures, Int frameLevel )
+{
+  Int numPreviousPics   = 0;
+  Int totalPreviousBits = 0;
+
+  list<TEncRCPic*>::iterator it;
+  for ( it = listPreviousPictures.begin(); it != listPreviousPictures.end(); it++ )
+  {
+    if ( (*it)->getFrameLevel() == frameLevel )
+    {
+      totalPreviousBits += (*it)->getPicActualHeaderBits();
+      numPreviousPics++;
+    }
+  }
+
+  Int estHeaderBits = 0;
+  if ( numPreviousPics > 0 )
+  {
+    estHeaderBits = totalPreviousBits / numPreviousPics;
+  }
+
+  return estHeaderBits;
+}
+
+Void TEncRCPic::addToPictureLsit( list<TEncRCPic*>& listPreviousPictures )
+{
+  if ( listPreviousPictures.size() > g_RCMaxPicListSize )
+  {
+    TEncRCPic* p = listPreviousPictures.front();
+    listPreviousPictures.pop_front();
+    p->destroy();
+    delete p;
+  }
+
+  listPreviousPictures.push_back( this );
+}
+
+Void TEncRCPic::create( TEncRCSeq* encRCSeq, TEncRCGOP* encRCGOP, Int frameLevel, list<TEncRCPic*>& listPreviousPictures )
+{
+  destroy();
+  m_encRCSeq = encRCSeq;
+  m_encRCGOP = encRCGOP;
+
+  Int targetBits    = xEstPicTargetBits( encRCSeq, encRCGOP );
+  Int estHeaderBits = xEstPicHeaderBits( listPreviousPictures, frameLevel );
+
+  if ( targetBits < estHeaderBits + 100 )
+  {
+    targetBits = estHeaderBits + 100;   // at least allocate 100 bits for picture data
+  }
+
+  m_frameLevel       = frameLevel;
+  m_numberOfPixel    = encRCSeq->getNumPixel();
+  m_numberOfLCU      = encRCSeq->getNumberOfLCU();
+  m_estPicLambda     = 100.0;
+  m_targetBits       = targetBits;
+  m_estHeaderBits    = estHeaderBits;
+  m_bitsLeft         = m_targetBits;
+  Int picWidth       = encRCSeq->getPicWidth();
+  Int picHeight      = encRCSeq->getPicHeight();
+  Int LCUWidth       = encRCSeq->getLCUWidth();
+  Int LCUHeight      = encRCSeq->getLCUHeight();
+  Int picWidthInLCU  = ( picWidth  % LCUWidth  ) == 0 ? picWidth  / LCUWidth  : picWidth  / LCUWidth  + 1;
+  Int picHeightInLCU = ( picHeight % LCUHeight ) == 0 ? picHeight / LCUHeight : picHeight / LCUHeight + 1;
+
+  m_LCULeft         = m_numberOfLCU;
+  m_bitsLeft       -= m_estHeaderBits;
+  m_pixelsLeft      = m_numberOfPixel;
+
+  m_LCUs           = new TRCLCU[m_numberOfLCU];
+  Int i, j;
+  Int LCUIdx;
+  for ( i=0; i<picWidthInLCU; i++ )
+  {
+    for ( j=0; j<picHeightInLCU; j++ )
+    {
+      LCUIdx = j*picWidthInLCU + i;
+      m_LCUs[LCUIdx].m_actualBits = 0;
+      m_LCUs[LCUIdx].m_QP         = 0;
+      m_LCUs[LCUIdx].m_lambda     = 0.0;
+      m_LCUs[LCUIdx].m_targetBits = 0;
+      m_LCUs[LCUIdx].m_bitWeight  = 1.0;
+      Int currWidth  = ( (i == picWidthInLCU -1) ? picWidth  - LCUWidth *(picWidthInLCU -1) : LCUWidth  );
+      Int currHeight = ( (j == picHeightInLCU-1) ? picHeight - LCUHeight*(picHeightInLCU-1) : LCUHeight );
+      m_LCUs[LCUIdx].m_numberOfPixel = currWidth * currHeight;
+    }
+  }
+  m_picActualHeaderBits = 0;
+  m_picActualBits       = 0;
+  m_picQP               = 0;
+  m_picLambda           = 0.0;
+}
+
+Void TEncRCPic::destroy()
+{
+  if( m_LCUs != NULL )
+  {
+    delete[] m_LCUs;
+    m_LCUs = NULL;
+  }
+  m_encRCSeq = NULL;
+  m_encRCGOP = NULL;
+}
+
+
+Double TEncRCPic::estimatePicLambda( list<TEncRCPic*>& listPreviousPictures, SliceType eSliceType)
+{
+  Double alpha         = m_encRCSeq->getPicPara( m_frameLevel ).m_alpha;
+  Double beta          = m_encRCSeq->getPicPara( m_frameLevel ).m_beta;
+  Double bpp       = (Double)m_targetBits/(Double)m_numberOfPixel;
+  Double estLambda;
+  if (eSliceType == I_SLICE)
+  {
+    estLambda = calculateLambdaIntra(alpha, beta, pow(m_totalCostIntra/(Double)m_numberOfPixel, BETA1), bpp);
+  }
+  else
+  {
+    estLambda = alpha * pow( bpp, beta );
+  }
+
+  Double lastLevelLambda = -1.0;
+  Double lastPicLambda   = -1.0;
+  Double lastValidLambda = -1.0;
+  list<TEncRCPic*>::iterator it;
+  for ( it = listPreviousPictures.begin(); it != listPreviousPictures.end(); it++ )
+  {
+    if ( (*it)->getFrameLevel() == m_frameLevel )
+    {
+      lastLevelLambda = (*it)->getPicActualLambda();
+    }
+    lastPicLambda     = (*it)->getPicActualLambda();
+
+    if ( lastPicLambda > 0.0 )
+    {
+      lastValidLambda = lastPicLambda;
+    }
+  }
+
+  if ( lastLevelLambda > 0.0 )
+  {
+    lastLevelLambda = Clip3( 0.1, 10000.0, lastLevelLambda );
+    estLambda = Clip3( lastLevelLambda * pow( 2.0, -3.0/3.0 ), lastLevelLambda * pow( 2.0, 3.0/3.0 ), estLambda );
+  }
+
+  if ( lastPicLambda > 0.0 )
+  {
+    lastPicLambda = Clip3( 0.1, 2000.0, lastPicLambda );
+    estLambda = Clip3( lastPicLambda * pow( 2.0, -10.0/3.0 ), lastPicLambda * pow( 2.0, 10.0/3.0 ), estLambda );
+  }
+  else if ( lastValidLambda > 0.0 )
+  {
+    lastValidLambda = Clip3( 0.1, 2000.0, lastValidLambda );
+    estLambda = Clip3( lastValidLambda * pow(2.0, -10.0/3.0), lastValidLambda * pow(2.0, 10.0/3.0), estLambda );
+  }
+  else
+  {
+    estLambda = Clip3( 0.1, 10000.0, estLambda );
+  }
+
+  if ( estLambda < 0.1 )
+  {
+    estLambda = 0.1;
+  }
+
+  m_estPicLambda = estLambda;
+
+  Double totalWeight = 0.0;
+  // initial BU bit allocation weight
+  for ( Int i=0; i<m_numberOfLCU; i++ )
+  {
+    Double alphaLCU, betaLCU;
+    if ( m_encRCSeq->getUseLCUSeparateModel() )
+    {
+      alphaLCU = m_encRCSeq->getLCUPara( m_frameLevel, i ).m_alpha;
+      betaLCU  = m_encRCSeq->getLCUPara( m_frameLevel, i ).m_beta;
+    }
+    else
+    {
+      alphaLCU = m_encRCSeq->getPicPara( m_frameLevel ).m_alpha;
+      betaLCU  = m_encRCSeq->getPicPara( m_frameLevel ).m_beta;
+    }
+
+    m_LCUs[i].m_bitWeight =  m_LCUs[i].m_numberOfPixel * pow( estLambda/alphaLCU, 1.0/betaLCU );
+
+    if ( m_LCUs[i].m_bitWeight < 0.01 )
+    {
+      m_LCUs[i].m_bitWeight = 0.01;
+    }
+    totalWeight += m_LCUs[i].m_bitWeight;
+  }
+  for ( Int i=0; i<m_numberOfLCU; i++ )
+  {
+    Double BUTargetBits = m_targetBits * m_LCUs[i].m_bitWeight / totalWeight;
+    m_LCUs[i].m_bitWeight = BUTargetBits;
+  }
+
+  return estLambda;
+}
+
+Int TEncRCPic::estimatePicQP( Double lambda, list<TEncRCPic*>& listPreviousPictures )
+{
+  Int QP = Int( 4.2005 * log( lambda ) + 13.7122 + 0.5 );
+
+  Int lastLevelQP = g_RCInvalidQPValue;
+  Int lastPicQP   = g_RCInvalidQPValue;
+  Int lastValidQP = g_RCInvalidQPValue;
+  list<TEncRCPic*>::iterator it;
+  for ( it = listPreviousPictures.begin(); it != listPreviousPictures.end(); it++ )
+  {
+    if ( (*it)->getFrameLevel() == m_frameLevel )
+    {
+      lastLevelQP = (*it)->getPicActualQP();
+    }
+    lastPicQP = (*it)->getPicActualQP();
+    if ( lastPicQP > g_RCInvalidQPValue )
+    {
+      lastValidQP = lastPicQP;
+    }
+  }
+
+  if ( lastLevelQP > g_RCInvalidQPValue )
+  {
+    QP = Clip3( lastLevelQP - 3, lastLevelQP + 3, QP );
+  }
+
+  if( lastPicQP > g_RCInvalidQPValue )
+  {
+    QP = Clip3( lastPicQP - 10, lastPicQP + 10, QP );
+  }
+  else if( lastValidQP > g_RCInvalidQPValue )
+  {
+    QP = Clip3( lastValidQP - 10, lastValidQP + 10, QP );
+  }
+
+  return QP;
+}
+
+Double TEncRCPic::getLCUTargetBpp(SliceType eSliceType)
+{
+  Int   LCUIdx    = getLCUCoded();
+  Double bpp      = -1.0;
+  Int avgBits     = 0;
+
+  if (eSliceType == I_SLICE)
+  {
+    Int noOfLCUsLeft = m_numberOfLCU - LCUIdx + 1;
+    Int bitrateWindow = min(4,noOfLCUsLeft);
+    Double MAD      = getLCU(LCUIdx).m_costIntra;
+
+    if (m_remainingCostIntra > 0.1 )
+    {
+      Double weightedBitsLeft = (m_bitsLeft*bitrateWindow+(m_bitsLeft-getLCU(LCUIdx).m_targetBitsLeft)*noOfLCUsLeft)/(Double)bitrateWindow;
+      avgBits = Int( MAD*weightedBitsLeft/m_remainingCostIntra );
+    }
+    else
+    {
+      avgBits = Int( m_bitsLeft / m_LCULeft );
+    }
+    m_remainingCostIntra -= MAD;
+  }
+  else
+  {
+    Double totalWeight = 0;
+    for ( Int i=LCUIdx; i<m_numberOfLCU; i++ )
+    {
+      totalWeight += m_LCUs[i].m_bitWeight;
+    }
+    Int realInfluenceLCU = min( g_RCLCUSmoothWindowSize, getLCULeft() );
+    avgBits = (Int)( m_LCUs[LCUIdx].m_bitWeight - ( totalWeight - m_bitsLeft ) / realInfluenceLCU + 0.5 );
+  }
+
+  if ( avgBits < 1 )
+  {
+    avgBits = 1;
+  }
+
+  bpp = ( Double )avgBits/( Double )m_LCUs[ LCUIdx ].m_numberOfPixel;
+  m_LCUs[ LCUIdx ].m_targetBits = avgBits;
+
+  return bpp;
+}
+
+Double TEncRCPic::getLCUEstLambda( Double bpp )
+{
+  Int   LCUIdx = getLCUCoded();
+  Double alpha;
+  Double beta;
+  if ( m_encRCSeq->getUseLCUSeparateModel() )
+  {
+    alpha = m_encRCSeq->getLCUPara( m_frameLevel, LCUIdx ).m_alpha;
+    beta  = m_encRCSeq->getLCUPara( m_frameLevel, LCUIdx ).m_beta;
+  }
+  else
+  {
+    alpha = m_encRCSeq->getPicPara( m_frameLevel ).m_alpha;
+    beta  = m_encRCSeq->getPicPara( m_frameLevel ).m_beta;
+  }
+
+  Double estLambda = alpha * pow( bpp, beta );
+  //for Lambda clip, picture level clip
+  Double clipPicLambda = m_estPicLambda;
+
+  //for Lambda clip, LCU level clip
+  Double clipNeighbourLambda = -1.0;
+  for ( Int i=LCUIdx - 1; i>=0; i-- )
+  {
+    if ( m_LCUs[i].m_lambda > 0 )
+    {
+      clipNeighbourLambda = m_LCUs[i].m_lambda;
+      break;
+    }
+  }
+
+  if ( clipNeighbourLambda > 0.0 )
+  {
+    estLambda = Clip3( clipNeighbourLambda * pow( 2.0, -1.0/3.0 ), clipNeighbourLambda * pow( 2.0, 1.0/3.0 ), estLambda );
+  }
+
+  if ( clipPicLambda > 0.0 )
+  {
+    estLambda = Clip3( clipPicLambda * pow( 2.0, -2.0/3.0 ), clipPicLambda * pow( 2.0, 2.0/3.0 ), estLambda );
+  }
+  else
+  {
+    estLambda = Clip3( 10.0, 1000.0, estLambda );
+  }
+
+  if ( estLambda < 0.1 )
+  {
+    estLambda = 0.1;
+  }
+
+  return estLambda;
+}
+
+Int TEncRCPic::getLCUEstQP( Double lambda, Int clipPicQP )
+{
+  Int LCUIdx = getLCUCoded();
+  Int estQP = Int( 4.2005 * log( lambda ) + 13.7122 + 0.5 );
+
+  //for Lambda clip, LCU level clip
+  Int clipNeighbourQP = g_RCInvalidQPValue;
+  for ( Int i=LCUIdx - 1; i>=0; i-- )
+  {
+    if ( (getLCU(i)).m_QP > g_RCInvalidQPValue )
+    {
+      clipNeighbourQP = getLCU(i).m_QP;
+      break;
+    }
+  }
+
+  if ( clipNeighbourQP > g_RCInvalidQPValue )
+  {
+    estQP = Clip3( clipNeighbourQP - 1, clipNeighbourQP + 1, estQP );
+  }
+
+  estQP = Clip3( clipPicQP - 2, clipPicQP + 2, estQP );
+
+  return estQP;
+}
+
+Void TEncRCPic::updateAfterCTU( Int LCUIdx, Int bits, Int QP, Double lambda, Bool updateLCUParameter )
+{
+  m_LCUs[LCUIdx].m_actualBits = bits;
+  m_LCUs[LCUIdx].m_QP         = QP;
+  m_LCUs[LCUIdx].m_lambda     = lambda;
+
+  m_LCULeft--;
+  m_bitsLeft   -= bits;
+  m_pixelsLeft -= m_LCUs[LCUIdx].m_numberOfPixel;
+
+  if ( !updateLCUParameter )
+  {
+    return;
+  }
+
+  if ( !m_encRCSeq->getUseLCUSeparateModel() )
+  {
+    return;
+  }
+
+  Double alpha = m_encRCSeq->getLCUPara( m_frameLevel, LCUIdx ).m_alpha;
+  Double beta  = m_encRCSeq->getLCUPara( m_frameLevel, LCUIdx ).m_beta;
+
+  Int LCUActualBits   = m_LCUs[LCUIdx].m_actualBits;
+  Int LCUTotalPixels  = m_LCUs[LCUIdx].m_numberOfPixel;
+  Double bpp         = ( Double )LCUActualBits/( Double )LCUTotalPixels;
+  Double calLambda   = alpha * pow( bpp, beta );
+  Double inputLambda = m_LCUs[LCUIdx].m_lambda;
+
+  if( inputLambda < 0.01 || calLambda < 0.01 || bpp < 0.0001 )
+  {
+    alpha *= ( 1.0 - m_encRCSeq->getAlphaUpdate() / 2.0 );
+    beta  *= ( 1.0 - m_encRCSeq->getBetaUpdate() / 2.0 );
+
+    alpha = Clip3( g_RCAlphaMinValue, g_RCAlphaMaxValue, alpha );
+    beta  = Clip3( g_RCBetaMinValue,  g_RCBetaMaxValue,  beta  );
+
+    TRCParameter rcPara;
+    rcPara.m_alpha = alpha;
+    rcPara.m_beta  = beta;
+    m_encRCSeq->setLCUPara( m_frameLevel, LCUIdx, rcPara );
+
+    return;
+  }
+
+  calLambda = Clip3( inputLambda / 10.0, inputLambda * 10.0, calLambda );
+  alpha += m_encRCSeq->getAlphaUpdate() * ( log( inputLambda ) - log( calLambda ) ) * alpha;
+  Double lnbpp = log( bpp );
+  lnbpp = Clip3( -5.0, -0.1, lnbpp );
+  beta  += m_encRCSeq->getBetaUpdate() * ( log( inputLambda ) - log( calLambda ) ) * lnbpp;
+
+  alpha = Clip3( g_RCAlphaMinValue, g_RCAlphaMaxValue, alpha );
+  beta  = Clip3( g_RCBetaMinValue,  g_RCBetaMaxValue,  beta  );
+
+  TRCParameter rcPara;
+  rcPara.m_alpha = alpha;
+  rcPara.m_beta  = beta;
+  m_encRCSeq->setLCUPara( m_frameLevel, LCUIdx, rcPara );
+
+}
+
+Double TEncRCPic::calAverageQP()
+{
+  Int totalQPs = 0;
+  Int numTotalLCUs = 0;
+
+  Int i;
+  for ( i=0; i<m_numberOfLCU; i++ )
+  {
+    if ( m_LCUs[i].m_QP > 0 )
+    {
+      totalQPs += m_LCUs[i].m_QP;
+      numTotalLCUs++;
+    }
+  }
+
+  Double avgQP = 0.0;
+  if ( numTotalLCUs == 0 )
+  {
+    avgQP = g_RCInvalidQPValue;
+  }
+  else
+  {
+    avgQP = ((Double)totalQPs) / ((Double)numTotalLCUs);
+  }
+  return avgQP;
+}
+
+Double TEncRCPic::calAverageLambda()
+{
+  Double totalLambdas = 0.0;
+  Int numTotalLCUs = 0;
+
+  Int i;
+  for ( i=0; i<m_numberOfLCU; i++ )
+  {
+    if ( m_LCUs[i].m_lambda > 0.01 )
+    {
+      totalLambdas += log( m_LCUs[i].m_lambda );
+      numTotalLCUs++;
+    }
+  }
+
+  Double avgLambda;
+  if( numTotalLCUs == 0 )
+  {
+    avgLambda = -1.0;
+  }
+  else
+  {
+    avgLambda = pow( 2.7183, totalLambdas / numTotalLCUs );
+  }
+  return avgLambda;
+}
+
+
+Void TEncRCPic::updateAfterPicture( Int actualHeaderBits, Int actualTotalBits, Double averageQP, Double averageLambda, SliceType eSliceType)
+{
+  m_picActualHeaderBits = actualHeaderBits;
+  m_picActualBits       = actualTotalBits;
+  if ( averageQP > 0.0 )
+  {
+    m_picQP             = Int( averageQP + 0.5 );
+  }
+  else
+  {
+    m_picQP             = g_RCInvalidQPValue;
+  }
+  m_picLambda           = averageLambda;
+
+  Double alpha = m_encRCSeq->getPicPara( m_frameLevel ).m_alpha;
+  Double beta  = m_encRCSeq->getPicPara( m_frameLevel ).m_beta;
+
+  if (eSliceType == I_SLICE)
+  {
+    updateAlphaBetaIntra(&alpha, &beta);
+  }
+  else
+  {
+    // update parameters
+    Double picActualBits = ( Double )m_picActualBits;
+    Double picActualBpp  = picActualBits/(Double)m_numberOfPixel;
+    Double calLambda     = alpha * pow( picActualBpp, beta );
+    Double inputLambda   = m_picLambda;
+
+    if ( inputLambda < 0.01 || calLambda < 0.01 || picActualBpp < 0.0001 )
+    {
+      alpha *= ( 1.0 - m_encRCSeq->getAlphaUpdate() / 2.0 );
+      beta  *= ( 1.0 - m_encRCSeq->getBetaUpdate() / 2.0 );
+
+      alpha = Clip3( g_RCAlphaMinValue, g_RCAlphaMaxValue, alpha );
+      beta  = Clip3( g_RCBetaMinValue,  g_RCBetaMaxValue,  beta  );
+
+      TRCParameter rcPara;
+      rcPara.m_alpha = alpha;
+      rcPara.m_beta  = beta;
+      m_encRCSeq->setPicPara( m_frameLevel, rcPara );
+
+      return;
+    }
+
+    calLambda = Clip3( inputLambda / 10.0, inputLambda * 10.0, calLambda );
+    alpha += m_encRCSeq->getAlphaUpdate() * ( log( inputLambda ) - log( calLambda ) ) * alpha;
+    Double lnbpp = log( picActualBpp );
+    lnbpp = Clip3( -5.0, -0.1, lnbpp );
+
+    beta  += m_encRCSeq->getBetaUpdate() * ( log( inputLambda ) - log( calLambda ) ) * lnbpp;
+
+    alpha = Clip3( g_RCAlphaMinValue, g_RCAlphaMaxValue, alpha );
+    beta  = Clip3( g_RCBetaMinValue,  g_RCBetaMaxValue,  beta  );
+  }
+
+  TRCParameter rcPara;
+  rcPara.m_alpha = alpha;
+  rcPara.m_beta  = beta;
+
+  m_encRCSeq->setPicPara( m_frameLevel, rcPara );
+
+  if ( m_frameLevel == 1 )
+  {
+    Double currLambda = Clip3( 0.1, 10000.0, m_picLambda );
+    Double updateLastLambda = g_RCWeightHistoryLambda * m_encRCSeq->getLastLambda() + g_RCWeightCurrentLambda * currLambda;
+    m_encRCSeq->setLastLambda( updateLastLambda );
+  }
+}
+
+Int TEncRCPic::getRefineBitsForIntra( Int orgBits )
+{
+  Double alpha=0.25, beta=0.5582;
+  Int iIntraBits;
+
+  if (orgBits*40 < m_numberOfPixel)
+  {
+    alpha=0.25;
+  }
+  else
+  {
+    alpha=0.30;
+  }
+
+  iIntraBits = (Int)(alpha* pow(m_totalCostIntra*4.0/(Double)orgBits, beta)*(Double)orgBits+0.5);
+
+  return iIntraBits;
+}
+
+Double TEncRCPic::calculateLambdaIntra(Double alpha, Double beta, Double MADPerPixel, Double bitsPerPixel)
+{
+  return ( (alpha/256.0) * pow( MADPerPixel/bitsPerPixel, beta ) );
+}
+
+Void TEncRCPic::updateAlphaBetaIntra(Double *alpha, Double *beta)
+{
+  Double lnbpp = log(pow(m_totalCostIntra / (Double)m_numberOfPixel, BETA1));
+  Double diffLambda = (*beta)*(log((Double)m_picActualBits)-log((Double)m_targetBits));
+
+  diffLambda = Clip3(-0.125, 0.125, 0.25*diffLambda);
+  *alpha    =  (*alpha) * exp(diffLambda);
+  *beta     =  (*beta) + diffLambda / lnbpp;
+}
+
+
+Void TEncRCPic::getLCUInitTargetBits()
+{
+  Int iAvgBits     = 0;
+
+  m_remainingCostIntra = m_totalCostIntra;
+  for (Int i=m_numberOfLCU-1; i>=0; i--)
+  {
+    iAvgBits += Int(m_targetBits * getLCU(i).m_costIntra/m_totalCostIntra);
+    getLCU(i).m_targetBitsLeft = iAvgBits;
+  }
+}
+
+
+Double TEncRCPic::getLCUEstLambdaAndQP(Double bpp, Int clipPicQP, Int *estQP)
+{
+  Int   LCUIdx = getLCUCoded();
+
+  Double   alpha = m_encRCSeq->getPicPara( m_frameLevel ).m_alpha;
+  Double   beta  = m_encRCSeq->getPicPara( m_frameLevel ).m_beta;
+
+  Double costPerPixel = getLCU(LCUIdx).m_costIntra/(Double)getLCU(LCUIdx).m_numberOfPixel;
+  costPerPixel = pow(costPerPixel, BETA1);
+  Double estLambda = calculateLambdaIntra(alpha, beta, costPerPixel, bpp);
+
+  Int clipNeighbourQP = g_RCInvalidQPValue;
+  for (Int i=LCUIdx-1; i>=0; i--)
+  {
+    if ((getLCU(i)).m_QP > g_RCInvalidQPValue)
+    {
+      clipNeighbourQP = getLCU(i).m_QP;
+      break;
+    }
+  }
+
+  Int minQP = clipPicQP - 2;
+  Int maxQP = clipPicQP + 2;
+
+  if ( clipNeighbourQP > g_RCInvalidQPValue )
+  {
+    maxQP = min(clipNeighbourQP + 1, maxQP);
+    minQP = max(clipNeighbourQP - 1, minQP);
+  }
+
+  Double maxLambda=exp(((Double)(maxQP+0.49)-13.7122)/4.2005);
+  Double minLambda=exp(((Double)(minQP-0.49)-13.7122)/4.2005);
+
+  estLambda = Clip3(minLambda, maxLambda, estLambda);
+
+  *estQP = Int( 4.2005 * log(estLambda) + 13.7122 + 0.5 );
+  *estQP = Clip3(minQP, maxQP, *estQP);
+
+  return estLambda;
+}
+
+TEncRateCtrl::TEncRateCtrl()
+{
+  m_encRCSeq = NULL;
+  m_encRCGOP = NULL;
+  m_encRCPic = NULL;
+}
+
+TEncRateCtrl::~TEncRateCtrl()
+{
+  destroy();
+}
+
+Void TEncRateCtrl::destroy()
+{
+  if ( m_encRCSeq != NULL )
+  {
+    delete m_encRCSeq;
+    m_encRCSeq = NULL;
+  }
+  if ( m_encRCGOP != NULL )
+  {
+    delete m_encRCGOP;
+    m_encRCGOP = NULL;
+  }
+  while ( m_listRCPictures.size() > 0 )
+  {
+    TEncRCPic* p = m_listRCPictures.front();
+    m_listRCPictures.pop_front();
+    delete p;
+  }
+}
+
+Void TEncRateCtrl::init( Int totalFrames, Int targetBitrate, Int frameRate, Int GOPSize, Int picWidth, Int picHeight, Int LCUWidth, Int LCUHeight, Int keepHierBits, Bool useLCUSeparateModel, GOPEntry  GOPList[MAX_GOP] )
+{
+  destroy();
+
+  Bool isLowdelay = true;
+  for ( Int i=0; i<GOPSize-1; i++ )
+  {
+    if ( GOPList[i].m_POC > GOPList[i+1].m_POC )
+    {
+      isLowdelay = false;
+      break;
+    }
+  }
+
+  Int numberOfLevel = 1;
+  Int adaptiveBit = 0;
+  if ( keepHierBits > 0 )
+  {
+    numberOfLevel = Int( log((Double)GOPSize)/log(2.0) + 0.5 ) + 1;
+  }
+  if ( !isLowdelay && GOPSize == 8 )
+  {
+    numberOfLevel = Int( log((Double)GOPSize)/log(2.0) + 0.5 ) + 1;
+  }
+  numberOfLevel++;    // intra picture
+  numberOfLevel++;    // non-reference picture
+
+
+  Int* bitsRatio;
+  bitsRatio = new Int[ GOPSize ];
+  for ( Int i=0; i<GOPSize; i++ )
+  {
+    bitsRatio[i] = 10;
+    if ( !GOPList[i].m_refPic )
+    {
+      bitsRatio[i] = 2;
+    }
+  }
+
+  if ( keepHierBits > 0 )
+  {
+    Double bpp = (Double)( targetBitrate / (Double)( frameRate*picWidth*picHeight ) );
+    if ( GOPSize == 4 && isLowdelay )
+    {
+      if ( bpp > 0.2 )
+      {
+        bitsRatio[0] = 2;
+        bitsRatio[1] = 3;
+        bitsRatio[2] = 2;
+        bitsRatio[3] = 6;
+      }
+      else if( bpp > 0.1 )
+      {
+        bitsRatio[0] = 2;
+        bitsRatio[1] = 3;
+        bitsRatio[2] = 2;
+        bitsRatio[3] = 10;
+      }
+      else if ( bpp > 0.05 )
+      {
+        bitsRatio[0] = 2;
+        bitsRatio[1] = 3;
+        bitsRatio[2] = 2;
+        bitsRatio[3] = 12;
+      }
+      else
+      {
+        bitsRatio[0] = 2;
+        bitsRatio[1] = 3;
+        bitsRatio[2] = 2;
+        bitsRatio[3] = 14;
+      }
+
+      if ( keepHierBits == 2 )
+      {
+        adaptiveBit = 1;
+      }
+    }
+    else if ( GOPSize == 8 && !isLowdelay )
+    {
+      if ( bpp > 0.2 )
+      {
+        bitsRatio[0] = 15;
+        bitsRatio[1] = 5;
+        bitsRatio[2] = 4;
+        bitsRatio[3] = 1;
+        bitsRatio[4] = 1;
+        bitsRatio[5] = 4;
+        bitsRatio[6] = 1;
+        bitsRatio[7] = 1;
+      }
+      else if ( bpp > 0.1 )
+      {
+        bitsRatio[0] = 20;
+        bitsRatio[1] = 6;
+        bitsRatio[2] = 4;
+        bitsRatio[3] = 1;
+        bitsRatio[4] = 1;
+        bitsRatio[5] = 4;
+        bitsRatio[6] = 1;
+        bitsRatio[7] = 1;
+      }
+      else if ( bpp > 0.05 )
+      {
+        bitsRatio[0] = 25;
+        bitsRatio[1] = 7;
+        bitsRatio[2] = 4;
+        bitsRatio[3] = 1;
+        bitsRatio[4] = 1;
+        bitsRatio[5] = 4;
+        bitsRatio[6] = 1;
+        bitsRatio[7] = 1;
+      }
+      else
+      {
+        bitsRatio[0] = 30;
+        bitsRatio[1] = 8;
+        bitsRatio[2] = 4;
+        bitsRatio[3] = 1;
+        bitsRatio[4] = 1;
+        bitsRatio[5] = 4;
+        bitsRatio[6] = 1;
+        bitsRatio[7] = 1;
+      }
+
+      if ( keepHierBits == 2 )
+      {
+        adaptiveBit = 2;
+      }
+    }
+    else
+    {
+      printf( "\n hierarchical bit allocation is not support for the specified coding structure currently.\n" );
+    }
+  }
+
+  Int* GOPID2Level = new Int[ GOPSize ];
+  for ( Int i=0; i<GOPSize; i++ )
+  {
+    GOPID2Level[i] = 1;
+    if ( !GOPList[i].m_refPic )
+    {
+      GOPID2Level[i] = 2;
+    }
+  }
+
+  if ( keepHierBits > 0 )
+  {
+    if ( GOPSize == 4 && isLowdelay )
+    {
+      GOPID2Level[0] = 3;
+      GOPID2Level[1] = 2;
+      GOPID2Level[2] = 3;
+      GOPID2Level[3] = 1;
+    }
+    else if ( GOPSize == 8 && !isLowdelay )
+    {
+      GOPID2Level[0] = 1;
+      GOPID2Level[1] = 2;
+      GOPID2Level[2] = 3;
+      GOPID2Level[3] = 4;
+      GOPID2Level[4] = 4;
+      GOPID2Level[5] = 3;
+      GOPID2Level[6] = 4;
+      GOPID2Level[7] = 4;
+    }
+  }
+
+  if ( !isLowdelay && GOPSize == 8 )
+  {
+    GOPID2Level[0] = 1;
+    GOPID2Level[1] = 2;
+    GOPID2Level[2] = 3;
+    GOPID2Level[3] = 4;
+    GOPID2Level[4] = 4;
+    GOPID2Level[5] = 3;
+    GOPID2Level[6] = 4;
+    GOPID2Level[7] = 4;
+  }
+
+  m_encRCSeq = new TEncRCSeq;
+  m_encRCSeq->create( totalFrames, targetBitrate, frameRate, GOPSize, picWidth, picHeight, LCUWidth, LCUHeight, numberOfLevel, useLCUSeparateModel, adaptiveBit );
+  m_encRCSeq->initBitsRatio( bitsRatio );
+  m_encRCSeq->initGOPID2Level( GOPID2Level );
+  m_encRCSeq->initPicPara();
+  if ( useLCUSeparateModel )
+  {
+    m_encRCSeq->initLCUPara();
+  }
+
+  delete[] bitsRatio;
+  delete[] GOPID2Level;
+}
+
+Void TEncRateCtrl::initRCPic( Int frameLevel )
+{
+  m_encRCPic = new TEncRCPic;
+  m_encRCPic->create( m_encRCSeq, m_encRCGOP, frameLevel, m_listRCPictures );
+}
+
+Void TEncRateCtrl::initRCGOP( Int numberOfPictures )
+{
+  m_encRCGOP = new TEncRCGOP;
+  m_encRCGOP->create( m_encRCSeq, numberOfPictures );
+}
+
+Void TEncRateCtrl::destroyRCGOP()
+{
+  delete m_encRCGOP;
+  m_encRCGOP = NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncRateCtrl.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,335 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncRateCtrl.h
+    \brief    Rate control manager class
+*/
+
+#ifndef __TENCRATECTRL__
+#define __TENCRATECTRL__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+
+#include "../TLibCommon/CommonDef.h"
+#include "../TLibCommon/TComDataCU.h"
+
+#include <vector>
+#include <algorithm>
+
+using namespace std;
+
+//! \ingroup TLibEncoder
+//! \{
+
+#include "../TLibEncoder/TEncCfg.h"
+#include <list>
+#include <cassert>
+
+const Int g_RCInvalidQPValue = -999;
+const Int g_RCSmoothWindowSize = 40;
+const Int g_RCMaxPicListSize = 32;
+const Double g_RCWeightPicTargetBitInGOP    = 0.9;
+const Double g_RCWeightPicRargetBitInBuffer = 1.0 - g_RCWeightPicTargetBitInGOP;
+const Int g_RCIterationNum = 20;
+const Double g_RCWeightHistoryLambda = 0.5;
+const Double g_RCWeightCurrentLambda = 1.0 - g_RCWeightHistoryLambda;
+const Int g_RCLCUSmoothWindowSize = 4;
+const Double g_RCAlphaMinValue = 0.05;
+const Double g_RCAlphaMaxValue = 500.0;
+const Double g_RCBetaMinValue  = -3.0;
+const Double g_RCBetaMaxValue  = -0.1;
+
+#define ALPHA     6.7542;
+#define BETA1     1.2517
+#define BETA2     1.7860
+
+struct TRCLCU
+{
+  Int m_actualBits;
+  Int m_QP;     // QP of skip mode is set to g_RCInvalidQPValue
+  Int m_targetBits;
+  Double m_lambda;
+  Double m_bitWeight;
+  Int m_numberOfPixel;
+  Double m_costIntra;
+  Int m_targetBitsLeft;
+};
+
+struct TRCParameter
+{
+  Double m_alpha;
+  Double m_beta;
+};
+
+class TEncRCSeq
+{
+public:
+  TEncRCSeq();
+  ~TEncRCSeq();
+
+public:
+  Void create( Int totalFrames, Int targetBitrate, Int frameRate, Int GOPSize, Int picWidth, Int picHeight, Int LCUWidth, Int LCUHeight, Int numberOfLevel, Bool useLCUSeparateModel, Int adaptiveBit );
+  Void destroy();
+  Void initBitsRatio( Int bitsRatio[] );
+  Void initGOPID2Level( Int GOPID2Level[] );
+  Void initPicPara( TRCParameter* picPara  = NULL );    // NULL to initial with default value
+  Void initLCUPara( TRCParameter** LCUPara = NULL );    // NULL to initial with default value
+  Void updateAfterPic ( Int bits );
+  Void setAllBitRatio( Double basicLambda, Double* equaCoeffA, Double* equaCoeffB );
+
+public:
+  Int  getTotalFrames()                 { return m_totalFrames; }
+  Int  getTargetRate()                  { return m_targetRate; }
+  Int  getFrameRate()                   { return m_frameRate; }
+  Int  getGOPSize()                     { return m_GOPSize; }
+  Int  getPicWidth()                    { return m_picWidth; }
+  Int  getPicHeight()                   { return m_picHeight; }
+  Int  getLCUWidth()                    { return m_LCUWidth; }
+  Int  getLCUHeight()                   { return m_LCUHeight; }
+  Int  getNumberOfLevel()               { return m_numberOfLevel; }
+  Int  getAverageBits()                 { return m_averageBits; }
+  Int  getLeftAverageBits()             { assert( m_framesLeft > 0 ); return (Int)(m_bitsLeft / m_framesLeft); }
+  Bool getUseLCUSeparateModel()         { return m_useLCUSeparateModel; }
+
+  Int  getNumPixel()                    { return m_numberOfPixel; }
+  Int64  getTargetBits()                { return m_targetBits; }
+  Int  getNumberOfLCU()                 { return m_numberOfLCU; }
+  Int* getBitRatio()                    { return m_bitsRatio; }
+  Int  getBitRatio( Int idx )           { assert( idx<m_GOPSize); return m_bitsRatio[idx]; }
+  Int* getGOPID2Level()                 { return m_GOPID2Level; }
+  Int  getGOPID2Level( Int ID )         { assert( ID < m_GOPSize ); return m_GOPID2Level[ID]; }
+  TRCParameter*  getPicPara()                                   { return m_picPara; }
+  TRCParameter   getPicPara( Int level )                        { assert( level < m_numberOfLevel ); return m_picPara[level]; }
+  Void           setPicPara( Int level, TRCParameter para )     { assert( level < m_numberOfLevel ); m_picPara[level] = para; }
+  TRCParameter** getLCUPara()                                   { return m_LCUPara; }
+  TRCParameter*  getLCUPara( Int level )                        { assert( level < m_numberOfLevel ); return m_LCUPara[level]; }
+  TRCParameter   getLCUPara( Int level, Int LCUIdx )            { assert( LCUIdx  < m_numberOfLCU ); return getLCUPara(level)[LCUIdx]; }
+  Void           setLCUPara( Int level, Int LCUIdx, TRCParameter para ) { assert( level < m_numberOfLevel ); assert( LCUIdx  < m_numberOfLCU ); m_LCUPara[level][LCUIdx] = para; }
+
+  Int  getFramesLeft()                  { return m_framesLeft; }
+  Int64  getBitsLeft()                  { return m_bitsLeft; }
+
+  Double getSeqBpp()                    { return m_seqTargetBpp; }
+  Double getAlphaUpdate()               { return m_alphaUpdate; }
+  Double getBetaUpdate()                { return m_betaUpdate; }
+
+  Int    getAdaptiveBits()              { return m_adaptiveBit;  }
+  Double getLastLambda()                { return m_lastLambda;   }
+  Void   setLastLambda( Double lamdba ) { m_lastLambda = lamdba; }
+
+private:
+  Int m_totalFrames;
+  Int m_targetRate;
+  Int m_frameRate;
+  Int m_GOPSize;
+  Int m_picWidth;
+  Int m_picHeight;
+  Int m_LCUWidth;
+  Int m_LCUHeight;
+  Int m_numberOfLevel;
+  Int m_averageBits;
+
+  Int m_numberOfPixel;
+  Int64 m_targetBits;
+  Int m_numberOfLCU;
+  Int* m_bitsRatio;
+  Int* m_GOPID2Level;
+  TRCParameter*  m_picPara;
+  TRCParameter** m_LCUPara;
+
+  Int m_framesLeft;
+  Int64 m_bitsLeft;
+  Double m_seqTargetBpp;
+  Double m_alphaUpdate;
+  Double m_betaUpdate;
+  Bool m_useLCUSeparateModel;
+
+  Int m_adaptiveBit;
+  Double m_lastLambda;
+};
+
+class TEncRCGOP
+{
+public:
+  TEncRCGOP();
+  ~TEncRCGOP();
+
+public:
+  Void create( TEncRCSeq* encRCSeq, Int numPic );
+  Void destroy();
+  Void updateAfterPicture( Int bitsCost );
+
+private:
+  Int  xEstGOPTargetBits( TEncRCSeq* encRCSeq, Int GOPSize );
+  Void   xCalEquaCoeff( TEncRCSeq* encRCSeq, Double* lambdaRatio, Double* equaCoeffA, Double* equaCoeffB, Int GOPSize );
+  Double xSolveEqua( Double targetBpp, Double* equaCoeffA, Double* equaCoeffB, Int GOPSize );
+
+public:
+  TEncRCSeq* getEncRCSeq()        { return m_encRCSeq; }
+  Int  getNumPic()                { return m_numPic;}
+  Int  getTargetBits()            { return m_targetBits; }
+  Int  getPicLeft()               { return m_picLeft; }
+  Int  getBitsLeft()              { return m_bitsLeft; }
+  Int  getTargetBitInGOP( Int i ) { return m_picTargetBitInGOP[i]; }
+
+private:
+  TEncRCSeq* m_encRCSeq;
+  Int* m_picTargetBitInGOP;
+  Int m_numPic;
+  Int m_targetBits;
+  Int m_picLeft;
+  Int m_bitsLeft;
+};
+
+class TEncRCPic
+{
+public:
+  TEncRCPic();
+  ~TEncRCPic();
+
+public:
+  Void create( TEncRCSeq* encRCSeq, TEncRCGOP* encRCGOP, Int frameLevel, list<TEncRCPic*>& listPreviousPictures );
+  Void destroy();
+
+  Int    estimatePicQP    ( Double lambda, list<TEncRCPic*>& listPreviousPictures );
+  Int    getRefineBitsForIntra(Int orgBits);
+  Double calculateLambdaIntra(Double alpha, Double beta, Double MADPerPixel, Double bitsPerPixel);
+  Double estimatePicLambda( list<TEncRCPic*>& listPreviousPictures, SliceType eSliceType);
+
+  Void   updateAlphaBetaIntra(Double *alpha, Double *beta);
+
+  Double getLCUTargetBpp(SliceType eSliceType);
+  Double getLCUEstLambdaAndQP(Double bpp, Int clipPicQP, Int *estQP);
+  Double getLCUEstLambda( Double bpp );
+  Int    getLCUEstQP( Double lambda, Int clipPicQP );
+
+  Void updateAfterCTU( Int LCUIdx, Int bits, Int QP, Double lambda, Bool updateLCUParameter = true );
+  Void updateAfterPicture( Int actualHeaderBits, Int actualTotalBits, Double averageQP, Double averageLambda, SliceType eSliceType);
+
+  Void addToPictureLsit( list<TEncRCPic*>& listPreviousPictures );
+  Double calAverageQP();
+  Double calAverageLambda();
+
+private:
+  Int xEstPicTargetBits( TEncRCSeq* encRCSeq, TEncRCGOP* encRCGOP );
+  Int xEstPicHeaderBits( list<TEncRCPic*>& listPreviousPictures, Int frameLevel );
+
+public:
+  TEncRCSeq*      getRCSequence()                         { return m_encRCSeq; }
+  TEncRCGOP*      getRCGOP()                              { return m_encRCGOP; }
+
+  Int  getFrameLevel()                                    { return m_frameLevel; }
+  Int  getNumberOfPixel()                                 { return m_numberOfPixel; }
+  Int  getNumberOfLCU()                                   { return m_numberOfLCU; }
+  Int  getTargetBits()                                    { return m_targetBits; }
+  Int  getEstHeaderBits()                                 { return m_estHeaderBits; }
+  Int  getLCULeft()                                       { return m_LCULeft; }
+  Int  getBitsLeft()                                      { return m_bitsLeft; }
+  Int  getPixelsLeft()                                    { return m_pixelsLeft; }
+  Int  getBitsCoded()                                     { return m_targetBits - m_estHeaderBits - m_bitsLeft; }
+  Int  getLCUCoded()                                      { return m_numberOfLCU - m_LCULeft; }
+  TRCLCU* getLCU()                                        { return m_LCUs; }
+  TRCLCU& getLCU( Int LCUIdx )                            { return m_LCUs[LCUIdx]; }
+  Int  getPicActualHeaderBits()                           { return m_picActualHeaderBits; }
+  Void setTargetBits( Int bits )                          { m_targetBits = bits; m_bitsLeft = bits;}
+  Void setTotalIntraCost(Double cost)                     { m_totalCostIntra = cost; }
+  Void getLCUInitTargetBits();
+
+  Int  getPicActualBits()                                 { return m_picActualBits; }
+  Int  getPicActualQP()                                   { return m_picQP; }
+  Double getPicActualLambda()                             { return m_picLambda; }
+  Int  getPicEstQP()                                      { return m_estPicQP; }
+  Void setPicEstQP( Int QP )                              { m_estPicQP = QP; }
+  Double getPicEstLambda()                                { return m_estPicLambda; }
+  Void setPicEstLambda( Double lambda )                   { m_picLambda = lambda; }
+
+private:
+  TEncRCSeq* m_encRCSeq;
+  TEncRCGOP* m_encRCGOP;
+
+  Int m_frameLevel;
+  Int m_numberOfPixel;
+  Int m_numberOfLCU;
+  Int m_targetBits;
+  Int m_estHeaderBits;
+  Int m_estPicQP;
+  Double m_estPicLambda;
+
+  Int m_LCULeft;
+  Int m_bitsLeft;
+  Int m_pixelsLeft;
+
+  TRCLCU* m_LCUs;
+  Int m_picActualHeaderBits;    // only SH and potential APS
+  Double m_totalCostIntra;
+  Double m_remainingCostIntra;
+  Int m_picActualBits;          // the whole picture, including header
+  Int m_picQP;                  // in integer form
+  Double m_picLambda;
+};
+
+class TEncRateCtrl
+{
+public:
+  TEncRateCtrl();
+  ~TEncRateCtrl();
+
+public:
+  Void init( Int totalFrames, Int targetBitrate, Int frameRate, Int GOPSize, Int picWidth, Int picHeight, Int LCUWidth, Int LCUHeight, Int keepHierBits, Bool useLCUSeparateModel, GOPEntry GOPList[MAX_GOP] );
+  Void destroy();
+  Void initRCPic( Int frameLevel );
+  Void initRCGOP( Int numberOfPictures );
+  Void destroyRCGOP();
+
+public:
+  Void       setRCQP ( Int QP ) { m_RCQP = QP;   }
+  Int        getRCQP ()         { return m_RCQP; }
+  TEncRCSeq* getRCSeq()          { assert ( m_encRCSeq != NULL ); return m_encRCSeq; }
+  TEncRCGOP* getRCGOP()          { assert ( m_encRCGOP != NULL ); return m_encRCGOP; }
+  TEncRCPic* getRCPic()          { assert ( m_encRCPic != NULL ); return m_encRCPic; }
+  list<TEncRCPic*>& getPicList() { return m_listRCPictures; }
+
+private:
+  TEncRCSeq* m_encRCSeq;
+  TEncRCGOP* m_encRCGOP;
+  TEncRCPic* m_encRCPic;
+  list<TEncRCPic*> m_listRCPictures;
+  Int        m_RCQP;
+};
+
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSampleAdaptiveOffset.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1362 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ \file     TEncSampleAdaptiveOffset.cpp
+ \brief       estimation part of sample adaptive offset class
+ */
+#include "TEncSampleAdaptiveOffset.h"
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+
+//! \ingroup TLibEncoder
+//! \{
+
+
+/** rounding with IBDI
+ * \param  x
+ */
+inline Double xRoundIbdi2(Int bitDepth, Double x)
+{
+  return ((x)>0) ? (Int)(((Int)(x)+(1<<(bitDepth-8-1)))/(1<<(bitDepth-8))) : ((Int)(((Int)(x)-(1<<(bitDepth-8-1)))/(1<<(bitDepth-8))));
+}
+
+inline Double xRoundIbdi(Int bitDepth, Double x)
+{
+  return (bitDepth > 8 ? xRoundIbdi2(bitDepth, (x)) : ((x)>=0 ? ((Int)((x)+0.5)) : ((Int)((x)-0.5)))) ;
+}
+
+
+TEncSampleAdaptiveOffset::TEncSampleAdaptiveOffset()
+{
+  m_pppcRDSbacCoder = NULL;
+  m_pcRDGoOnSbacCoder = NULL;
+  m_pppcBinCoderCABAC = NULL;
+  m_statData = NULL;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  m_preDBFstatData = NULL;
+#endif
+}
+
+TEncSampleAdaptiveOffset::~TEncSampleAdaptiveOffset()
+{
+  destroyEncData();
+}
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+Void TEncSampleAdaptiveOffset::createEncData(Bool isPreDBFSamplesUsed)
+#else
+Void TEncSampleAdaptiveOffset::createEncData()
+#endif
+{
+
+  //cabac coder for RDO
+  m_pppcRDSbacCoder = new TEncSbac* [NUM_SAO_CABACSTATE_LABELS];
+#if FAST_BIT_EST
+  m_pppcBinCoderCABAC = new TEncBinCABACCounter* [NUM_SAO_CABACSTATE_LABELS];
+#else
+  m_pppcBinCoderCABAC = new TEncBinCABAC* [NUM_SAO_CABACSTATE_LABELS];
+#endif
+
+  for(Int cs=0; cs < NUM_SAO_CABACSTATE_LABELS; cs++)
+  {
+    m_pppcRDSbacCoder[cs] = new TEncSbac;
+#if FAST_BIT_EST
+    m_pppcBinCoderCABAC[cs] = new TEncBinCABACCounter;
+#else
+    m_pppcBinCoderCABAC[cs] = new TEncBinCABAC;
+#endif
+    m_pppcRDSbacCoder   [cs]->init( m_pppcBinCoderCABAC [cs] );
+  }
+
+
+  //statistics
+  m_statData = new SAOStatData**[m_numCTUsPic];
+  for(Int i=0; i< m_numCTUsPic; i++)
+  {
+    m_statData[i] = new SAOStatData*[MAX_NUM_COMPONENT];
+    for(Int compIdx=0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+    {
+      m_statData[i][compIdx] = new SAOStatData[NUM_SAO_NEW_TYPES];
+    }
+  }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  if(isPreDBFSamplesUsed)
+  {
+    m_preDBFstatData = new SAOStatData**[m_numCTUsPic];
+    for(Int i=0; i< m_numCTUsPic; i++)
+    {
+      m_preDBFstatData[i] = new SAOStatData*[MAX_NUM_COMPONENT];
+      for(Int compIdx=0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+      {
+        m_preDBFstatData[i][compIdx] = new SAOStatData[NUM_SAO_NEW_TYPES];
+      }
+    }
+
+  }
+#endif
+
+#if SAO_ENCODING_CHOICE
+  ::memset(m_saoDisabledRate, 0, sizeof(m_saoDisabledRate));
+#endif
+
+  for(Int typeIdc=0; typeIdc < NUM_SAO_NEW_TYPES; typeIdc++)
+  {
+    m_skipLinesR[COMPONENT_Y ][typeIdc]= 5;
+    m_skipLinesR[COMPONENT_Cb][typeIdc]= m_skipLinesR[COMPONENT_Cr][typeIdc]= 3;
+
+    m_skipLinesB[COMPONENT_Y ][typeIdc]= 4;
+    m_skipLinesB[COMPONENT_Cb][typeIdc]= m_skipLinesB[COMPONENT_Cr][typeIdc]= 2;
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+    if(isPreDBFSamplesUsed)
+    {
+      switch(typeIdc)
+      {
+      case SAO_TYPE_EO_0:
+        {
+          m_skipLinesR[COMPONENT_Y ][typeIdc]= 5;
+          m_skipLinesR[COMPONENT_Cb][typeIdc]= m_skipLinesR[COMPONENT_Cr][typeIdc]= 3;
+
+          m_skipLinesB[COMPONENT_Y ][typeIdc]= 3;
+          m_skipLinesB[COMPONENT_Cb][typeIdc]= m_skipLinesB[COMPONENT_Cr][typeIdc]= 1;
+        }
+        break;
+      case SAO_TYPE_EO_90:
+        {
+          m_skipLinesR[COMPONENT_Y ][typeIdc]= 4;
+          m_skipLinesR[COMPONENT_Cb][typeIdc]= m_skipLinesR[COMPONENT_Cr][typeIdc]= 2;
+
+          m_skipLinesB[COMPONENT_Y ][typeIdc]= 4;
+          m_skipLinesB[COMPONENT_Cb][typeIdc]= m_skipLinesB[COMPONENT_Cr][typeIdc]= 2;
+        }
+        break;
+      case SAO_TYPE_EO_135:
+      case SAO_TYPE_EO_45:
+        {
+          m_skipLinesR[COMPONENT_Y ][typeIdc]= 5;
+          m_skipLinesR[COMPONENT_Cb][typeIdc]= m_skipLinesR[COMPONENT_Cr][typeIdc]= 3;
+
+          m_skipLinesB[COMPONENT_Y ][typeIdc]= 4;
+          m_skipLinesB[COMPONENT_Cb][typeIdc]= m_skipLinesB[COMPONENT_Cr][typeIdc]= 2;
+        }
+        break;
+      case SAO_TYPE_BO:
+        {
+          m_skipLinesR[COMPONENT_Y ][typeIdc]= 4;
+          m_skipLinesR[COMPONENT_Cb][typeIdc]= m_skipLinesR[COMPONENT_Cr][typeIdc]= 2;
+
+          m_skipLinesB[COMPONENT_Y ][typeIdc]= 3;
+          m_skipLinesB[COMPONENT_Cb][typeIdc]= m_skipLinesB[COMPONENT_Cr][typeIdc]= 1;
+        }
+        break;
+      default:
+        {
+          printf("Not a supported type");
+          assert(0);
+          exit(-1);
+        }
+      }
+    }
+#endif
+  }
+
+}
+
+Void TEncSampleAdaptiveOffset::destroyEncData()
+{
+  if(m_pppcRDSbacCoder != NULL)
+  {
+    for (Int cs = 0; cs < NUM_SAO_CABACSTATE_LABELS; cs ++ )
+    {
+      delete m_pppcRDSbacCoder[cs];
+    }
+    delete[] m_pppcRDSbacCoder; m_pppcRDSbacCoder = NULL;
+  }
+
+  if(m_pppcBinCoderCABAC != NULL)
+  {
+    for (Int cs = 0; cs < NUM_SAO_CABACSTATE_LABELS; cs ++ )
+    {
+      delete m_pppcBinCoderCABAC[cs];
+    }
+    delete[] m_pppcBinCoderCABAC; m_pppcBinCoderCABAC = NULL;
+  }
+
+  if(m_statData != NULL)
+  {
+    for(Int i=0; i< m_numCTUsPic; i++)
+    {
+      for(Int compIdx=0; compIdx< MAX_NUM_COMPONENT; compIdx++)
+      {
+        delete[] m_statData[i][compIdx];
+      }
+      delete[] m_statData[i];
+    }
+    delete[] m_statData; m_statData = NULL;
+  }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  if(m_preDBFstatData != NULL)
+  {
+    for(Int i=0; i< m_numCTUsPic; i++)
+    {
+      for(Int compIdx=0; compIdx< MAX_NUM_COMPONENT; compIdx++)
+      {
+        delete[] m_preDBFstatData[i][compIdx];
+      }
+      delete[] m_preDBFstatData[i];
+    }
+    delete[] m_preDBFstatData; m_preDBFstatData = NULL;
+  }
+
+#endif
+}
+
+Void TEncSampleAdaptiveOffset::initRDOCabacCoder(TEncSbac* pcRDGoOnSbacCoder, TComSlice* pcSlice)
+{
+  m_pcRDGoOnSbacCoder = pcRDGoOnSbacCoder;
+  m_pcRDGoOnSbacCoder->setSlice(pcSlice);
+  m_pcRDGoOnSbacCoder->resetEntropy();
+  m_pcRDGoOnSbacCoder->resetBits();
+
+  m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[SAO_CABACSTATE_PIC_INIT]);
+}
+
+
+
+Void TEncSampleAdaptiveOffset::SAOProcess(TComPic* pPic, Bool* sliceEnabled, const Double *lambdas
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                                         , Bool isPreDBFSamplesUsed
+#endif
+                                          )
+{
+  TComPicYuv* orgYuv= pPic->getPicYuvOrg();
+  TComPicYuv* resYuv= pPic->getPicYuvRec();
+  memcpy(m_lambda, lambdas, sizeof(m_lambda));
+  TComPicYuv* srcYuv = m_tempPicYuv;
+  resYuv->copyToPic(srcYuv);
+  srcYuv->setBorderExtension(false);
+  srcYuv->extendPicBorder();
+
+  //collect statistics
+  getStatistics(m_statData, orgYuv, srcYuv, pPic);
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  if(isPreDBFSamplesUsed)
+  {
+    addPreDBFStatistics(m_statData);
+  }
+#endif
+  //slice on/off
+  decidePicParams(sliceEnabled, pPic->getSlice(0)->getDepth());
+
+  //block on/off
+  SAOBlkParam* reconParams = new SAOBlkParam[m_numCTUsPic]; //temporary parameter buffer for storing reconstructed SAO parameters
+  decideBlkParams(pPic, sliceEnabled, m_statData, srcYuv, resYuv, reconParams, pPic->getPicSym()->getSAOBlkParam());
+  delete[] reconParams;
+}
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+Void TEncSampleAdaptiveOffset::getPreDBFStatistics(TComPic* pPic)
+{
+  getStatistics(m_preDBFstatData, pPic->getPicYuvOrg(), pPic->getPicYuvRec(), pPic, true);
+}
+
+Void TEncSampleAdaptiveOffset::addPreDBFStatistics(SAOStatData*** blkStats)
+{
+  for(Int n=0; n< m_numCTUsPic; n++)
+  {
+    for(Int compIdx=0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+    {
+      for(Int typeIdc=0; typeIdc < NUM_SAO_NEW_TYPES; typeIdc++)
+      {
+        blkStats[n][compIdx][typeIdc] += m_preDBFstatData[n][compIdx][typeIdc];
+      }
+    }
+  }
+}
+
+#endif
+
+Void TEncSampleAdaptiveOffset::getStatistics(SAOStatData*** blkStats, TComPicYuv* orgYuv, TComPicYuv* srcYuv, TComPic* pPic
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                          , Bool isCalculatePreDeblockSamples
+#endif
+                          )
+{
+  Bool isLeftAvail,isRightAvail,isAboveAvail,isBelowAvail,isAboveLeftAvail,isAboveRightAvail,isBelowLeftAvail,isBelowRightAvail;
+
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+
+  for(Int ctuRsAddr= 0; ctuRsAddr < m_numCTUsPic; ctuRsAddr++)
+  {
+    Int yPos   = (ctuRsAddr / m_numCTUInWidth)*m_maxCUHeight;
+    Int xPos   = (ctuRsAddr % m_numCTUInWidth)*m_maxCUWidth;
+    Int height = (yPos + m_maxCUHeight > m_picHeight)?(m_picHeight- yPos):m_maxCUHeight;
+    Int width  = (xPos + m_maxCUWidth  > m_picWidth )?(m_picWidth - xPos):m_maxCUWidth;
+
+    pPic->getPicSym()->deriveLoopFilterBoundaryAvailibility(ctuRsAddr, isLeftAvail,isRightAvail,isAboveAvail,isBelowAvail,isAboveLeftAvail,isAboveRightAvail,isBelowLeftAvail,isBelowRightAvail);
+
+    //NOTE: The number of skipped lines during gathering CTU statistics depends on the slice boundary availabilities.
+    //For simplicity, here only picture boundaries are considered.
+
+    isRightAvail      = (xPos + m_maxCUWidth  < m_picWidth );
+    isBelowAvail      = (yPos + m_maxCUHeight < m_picHeight);
+    isBelowRightAvail = (isRightAvail && isBelowAvail);
+    isBelowLeftAvail  = ((xPos > 0) && (isBelowAvail));
+    isAboveRightAvail = ((yPos > 0) && (isRightAvail));
+
+    for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+    {
+      const ComponentID component = ComponentID(compIdx);
+
+      const UInt componentScaleX = getComponentScaleX(component, pPic->getChromaFormat());
+      const UInt componentScaleY = getComponentScaleY(component, pPic->getChromaFormat());
+
+      Int  srcStride  = srcYuv->getStride(component);
+      Pel* srcBlk     = srcYuv->getAddr(component) + ((yPos >> componentScaleY) * srcStride) + (xPos >> componentScaleX);
+
+      Int  orgStride  = orgYuv->getStride(component);
+      Pel* orgBlk     = orgYuv->getAddr(component) + ((yPos >> componentScaleY) * orgStride) + (xPos >> componentScaleX);
+
+      getBlkStats(component, blkStats[ctuRsAddr][component]
+                , srcBlk, orgBlk, srcStride, orgStride, (width  >> componentScaleX), (height >> componentScaleY)
+                , isLeftAvail,  isRightAvail, isAboveAvail, isBelowAvail, isAboveLeftAvail, isAboveRightAvail, isBelowLeftAvail, isBelowRightAvail
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                , isCalculatePreDeblockSamples
+#endif
+                );
+
+    }
+  }
+}
+
+Void TEncSampleAdaptiveOffset::decidePicParams(Bool* sliceEnabled, Int picTempLayer)
+{
+  //decide sliceEnabled[compIdx]
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+  for (Int compIdx = 0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+  {
+    sliceEnabled[compIdx] = false;
+  }
+
+  for (Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    // reset flags & counters
+    sliceEnabled[compIdx] = true;
+
+#if SAO_ENCODING_CHOICE
+#if SAO_ENCODING_CHOICE_CHROMA
+    // decide slice-level on/off based on previous results
+    if( (picTempLayer > 0)
+      && (m_saoDisabledRate[compIdx][picTempLayer-1] > ((compIdx==COMPONENT_Y) ? SAO_ENCODING_RATE : SAO_ENCODING_RATE_CHROMA)) )
+    {
+      sliceEnabled[compIdx] = false;
+    }
+#else
+    // decide slice-level on/off based on previous results
+    if( (picTempLayer > 0)
+      && (m_saoDisabledRate[COMPONENT_Y][0] > SAO_ENCODING_RATE) )
+    {
+      sliceEnabled[compIdx] = false;
+    }
+#endif
+#endif
+  }
+}
+
+Int64 TEncSampleAdaptiveOffset::getDistortion(ComponentID compIdx, Int typeIdc, Int typeAuxInfo, Int* invQuantOffset, SAOStatData& statData)
+{
+  Int64 dist        = 0;
+  Int shift         = 2 * DISTORTION_PRECISION_ADJUSTMENT(g_bitDepth[toChannelType(compIdx)] - 8);
+
+  switch(typeIdc)
+  {
+    case SAO_TYPE_EO_0:
+    case SAO_TYPE_EO_90:
+    case SAO_TYPE_EO_135:
+    case SAO_TYPE_EO_45:
+      {
+        for (Int offsetIdx=0; offsetIdx<NUM_SAO_EO_CLASSES; offsetIdx++)
+        {
+          dist += estSaoDist( statData.count[offsetIdx], invQuantOffset[offsetIdx], statData.diff[offsetIdx], shift);
+        }
+      }
+      break;
+    case SAO_TYPE_BO:
+      {
+        for (Int offsetIdx=typeAuxInfo; offsetIdx<typeAuxInfo+4; offsetIdx++)
+        {
+          Int bandIdx = offsetIdx % NUM_SAO_BO_CLASSES ;
+          dist += estSaoDist( statData.count[bandIdx], invQuantOffset[bandIdx], statData.diff[bandIdx], shift);
+        }
+      }
+      break;
+    default:
+      {
+        printf("Not a supported type");
+        assert(0);
+        exit(-1);
+      }
+  }
+
+  return dist;
+}
+
+inline Int64 TEncSampleAdaptiveOffset::estSaoDist(Int64 count, Int64 offset, Int64 diffSum, Int shift)
+{
+  return (( count*offset*offset-diffSum*offset*2 ) >> shift);
+}
+
+
+inline Int TEncSampleAdaptiveOffset::estIterOffset(Int typeIdx, Int classIdx, Double lambda, Int offsetInput, Int64 count, Int64 diffSum, Int shift, Int bitIncrease, Int64& bestDist, Double& bestCost, Int offsetTh )
+{
+  Int iterOffset, tempOffset;
+  Int64 tempDist, tempRate;
+  Double tempCost, tempMinCost;
+  Int offsetOutput = 0;
+  iterOffset = offsetInput;
+  // Assuming sending quantized value 0 results in zero offset and sending the value zero needs 1 bit. entropy coder can be used to measure the exact rate here.
+  tempMinCost = lambda;
+  while (iterOffset != 0)
+  {
+    // Calculate the bits required for signaling the offset
+    tempRate = (typeIdx == SAO_TYPE_BO) ? (abs((Int)iterOffset)+2) : (abs((Int)iterOffset)+1);
+    if (abs((Int)iterOffset)==offsetTh) //inclusive
+    {
+      tempRate --;
+    }
+    // Do the dequantization before distortion calculation
+    tempOffset  = iterOffset << bitIncrease;
+    tempDist    = estSaoDist( count, tempOffset, diffSum, shift);
+    tempCost    = ((Double)tempDist + lambda * (Double) tempRate);
+    if(tempCost < tempMinCost)
+    {
+      tempMinCost = tempCost;
+      offsetOutput = iterOffset;
+      bestDist = tempDist;
+      bestCost = tempCost;
+    }
+    iterOffset = (iterOffset > 0) ? (iterOffset-1):(iterOffset+1);
+  }
+  return offsetOutput;
+}
+
+Void TEncSampleAdaptiveOffset::deriveOffsets(ComponentID compIdx, Int typeIdc, SAOStatData& statData, Int* quantOffsets, Int& typeAuxInfo)
+{
+  Int bitDepth = g_bitDepth[toChannelType(compIdx)];
+  Int shift    = 2 * DISTORTION_PRECISION_ADJUSTMENT(bitDepth-8);
+  Int offsetTh = g_saoMaxOffsetQVal[compIdx];  //inclusive
+
+  ::memset(quantOffsets, 0, sizeof(Int)*MAX_NUM_SAO_CLASSES);
+
+  //derive initial offsets
+  Int numClasses = (typeIdc == SAO_TYPE_BO)?((Int)NUM_SAO_BO_CLASSES):((Int)NUM_SAO_EO_CLASSES);
+  for(Int classIdx=0; classIdx< numClasses; classIdx++)
+  {
+    if( (typeIdc != SAO_TYPE_BO) && (classIdx==SAO_CLASS_EO_PLAIN)  )
+    {
+      continue; //offset will be zero
+    }
+
+    if(statData.count[classIdx] == 0)
+    {
+      continue; //offset will be zero
+    }
+
+    quantOffsets[classIdx] = (Int) xRoundIbdi(bitDepth, (Double)( statData.diff[classIdx]<<(bitDepth-8))
+                                                                  /
+                                                          (Double)( statData.count[classIdx]<< m_offsetStepLog2[compIdx])
+                                               );
+    quantOffsets[classIdx] = Clip3(-offsetTh, offsetTh, quantOffsets[classIdx]);
+  }
+
+  // adjust offsets
+  switch(typeIdc)
+  {
+    case SAO_TYPE_EO_0:
+    case SAO_TYPE_EO_90:
+    case SAO_TYPE_EO_135:
+    case SAO_TYPE_EO_45:
+      {
+        Int64 classDist;
+        Double classCost;
+        for(Int classIdx=0; classIdx<NUM_SAO_EO_CLASSES; classIdx++)
+        {
+          if(classIdx==SAO_CLASS_EO_FULL_VALLEY && quantOffsets[classIdx] < 0) quantOffsets[classIdx] =0;
+          if(classIdx==SAO_CLASS_EO_HALF_VALLEY && quantOffsets[classIdx] < 0) quantOffsets[classIdx] =0;
+          if(classIdx==SAO_CLASS_EO_HALF_PEAK   && quantOffsets[classIdx] > 0) quantOffsets[classIdx] =0;
+          if(classIdx==SAO_CLASS_EO_FULL_PEAK   && quantOffsets[classIdx] > 0) quantOffsets[classIdx] =0;
+
+          if( quantOffsets[classIdx] != 0 ) //iterative adjustment only when derived offset is not zero
+          {
+            quantOffsets[classIdx] = estIterOffset( typeIdc, classIdx, m_lambda[compIdx], quantOffsets[classIdx], statData.count[classIdx], statData.diff[classIdx], shift, m_offsetStepLog2[compIdx], classDist , classCost , offsetTh );
+          }
+        }
+
+        typeAuxInfo =0;
+      }
+      break;
+    case SAO_TYPE_BO:
+      {
+        Int64  distBOClasses[NUM_SAO_BO_CLASSES];
+        Double costBOClasses[NUM_SAO_BO_CLASSES];
+        ::memset(distBOClasses, 0, sizeof(Int64)*NUM_SAO_BO_CLASSES);
+        for(Int classIdx=0; classIdx< NUM_SAO_BO_CLASSES; classIdx++)
+        {
+          costBOClasses[classIdx]= m_lambda[compIdx];
+          if( quantOffsets[classIdx] != 0 ) //iterative adjustment only when derived offset is not zero
+          {
+            quantOffsets[classIdx] = estIterOffset( typeIdc, classIdx, m_lambda[compIdx], quantOffsets[classIdx], statData.count[classIdx], statData.diff[classIdx], shift, m_offsetStepLog2[compIdx], distBOClasses[classIdx], costBOClasses[classIdx], offsetTh );
+          }
+        }
+
+        //decide the starting band index
+        Double minCost = MAX_DOUBLE, cost;
+        for(Int band=0; band< NUM_SAO_BO_CLASSES- 4+ 1; band++)
+        {
+          cost  = costBOClasses[band  ];
+          cost += costBOClasses[band+1];
+          cost += costBOClasses[band+2];
+          cost += costBOClasses[band+3];
+
+          if(cost < minCost)
+          {
+            minCost = cost;
+            typeAuxInfo = band;
+          }
+        }
+        //clear those unused classes
+        Int clearQuantOffset[NUM_SAO_BO_CLASSES];
+        ::memset(clearQuantOffset, 0, sizeof(Int)*NUM_SAO_BO_CLASSES);
+        for(Int i=0; i< 4; i++)
+        {
+          Int band = (typeAuxInfo+i)%NUM_SAO_BO_CLASSES;
+          clearQuantOffset[band] = quantOffsets[band];
+        }
+        ::memcpy(quantOffsets, clearQuantOffset, sizeof(Int)*NUM_SAO_BO_CLASSES);
+      }
+      break;
+    default:
+      {
+        printf("Not a supported type");
+        assert(0);
+        exit(-1);
+      }
+
+  }
+
+
+}
+
+Void TEncSampleAdaptiveOffset::deriveModeNewRDO(Int ctuRsAddr, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES], Bool* sliceEnabled, SAOStatData*** blkStats, SAOBlkParam& modeParam, Double& modeNormCost, TEncSbac** cabacCoderRDO, Int inCabacLabel)
+{
+  Double minCost, cost;
+  UInt previousWrittenBits;
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+
+  Int64 dist[MAX_NUM_COMPONENT], modeDist[MAX_NUM_COMPONENT];
+  SAOOffset testOffset[MAX_NUM_COMPONENT];
+  Int invQuantOffset[MAX_NUM_SAO_CLASSES];
+  for(Int comp=0; comp < MAX_NUM_COMPONENT; comp++)
+  {
+    modeDist[comp] = 0;
+  }
+
+  //pre-encode merge flags
+  modeParam[COMPONENT_Y].modeIdc = SAO_MODE_OFF;
+  m_pcRDGoOnSbacCoder->load(cabacCoderRDO[inCabacLabel]);
+  m_pcRDGoOnSbacCoder->codeSAOBlkParam(modeParam, sliceEnabled, (mergeList[SAO_MERGE_LEFT]!= NULL), (mergeList[SAO_MERGE_ABOVE]!= NULL), true);
+  m_pcRDGoOnSbacCoder->store(cabacCoderRDO[SAO_CABACSTATE_BLK_MID]);
+
+    //------ luma --------//
+  {
+    ComponentID compIdx = COMPONENT_Y;
+    //"off" case as initial cost
+    modeParam[compIdx].modeIdc = SAO_MODE_OFF;
+    m_pcRDGoOnSbacCoder->resetBits();
+    m_pcRDGoOnSbacCoder->codeSAOOffsetParam(compIdx, modeParam[compIdx], sliceEnabled[compIdx]);
+    modeDist[compIdx] = 0;
+    minCost= m_lambda[compIdx]*((Double)m_pcRDGoOnSbacCoder->getNumberOfWrittenBits());
+    m_pcRDGoOnSbacCoder->store(cabacCoderRDO[SAO_CABACSTATE_BLK_TEMP]);
+    if(sliceEnabled[compIdx])
+    {
+      for(Int typeIdc=0; typeIdc< NUM_SAO_NEW_TYPES; typeIdc++)
+      {
+        testOffset[compIdx].modeIdc = SAO_MODE_NEW;
+        testOffset[compIdx].typeIdc = typeIdc;
+
+        //derive coded offset
+        deriveOffsets(compIdx, typeIdc, blkStats[ctuRsAddr][compIdx][typeIdc], testOffset[compIdx].offset, testOffset[compIdx].typeAuxInfo);
+
+        //inversed quantized offsets
+        invertQuantOffsets(compIdx, typeIdc, testOffset[compIdx].typeAuxInfo, invQuantOffset, testOffset[compIdx].offset);
+
+        //get distortion
+        dist[compIdx] = getDistortion(compIdx, testOffset[compIdx].typeIdc, testOffset[compIdx].typeAuxInfo, invQuantOffset, blkStats[ctuRsAddr][compIdx][typeIdc]);
+
+        //get rate
+        m_pcRDGoOnSbacCoder->load(cabacCoderRDO[SAO_CABACSTATE_BLK_MID]);
+        m_pcRDGoOnSbacCoder->resetBits();
+        m_pcRDGoOnSbacCoder->codeSAOOffsetParam(compIdx, testOffset[compIdx], sliceEnabled[compIdx]);
+        Int rate = m_pcRDGoOnSbacCoder->getNumberOfWrittenBits();
+        cost = (Double)dist[compIdx] + m_lambda[compIdx]*((Double)rate);
+        if(cost < minCost)
+        {
+          minCost = cost;
+          modeDist[compIdx] = dist[compIdx];
+          modeParam[compIdx]= testOffset[compIdx];
+          m_pcRDGoOnSbacCoder->store(cabacCoderRDO[SAO_CABACSTATE_BLK_TEMP]);
+        }
+      }
+    }
+    m_pcRDGoOnSbacCoder->load(cabacCoderRDO[SAO_CABACSTATE_BLK_TEMP]);
+    m_pcRDGoOnSbacCoder->store(cabacCoderRDO[SAO_CABACSTATE_BLK_MID]);
+  }
+
+  //------ chroma --------//
+//"off" case as initial cost
+  cost = 0;
+  previousWrittenBits = 0;
+  m_pcRDGoOnSbacCoder->resetBits();
+  for(UInt componentIndex = COMPONENT_Cb; componentIndex < numberOfComponents; componentIndex++)
+  {
+    const ComponentID component = ComponentID(componentIndex);
+
+    modeParam[component].modeIdc = SAO_MODE_OFF;
+    modeDist [component]         = 0;
+    m_pcRDGoOnSbacCoder->codeSAOOffsetParam(component, modeParam[component], sliceEnabled[component]);
+    
+    const UInt currentWrittenBits = m_pcRDGoOnSbacCoder->getNumberOfWrittenBits();
+    cost += m_lambda[component] * (currentWrittenBits - previousWrittenBits);
+    previousWrittenBits = currentWrittenBits;
+  }
+
+  minCost = cost;
+
+  //doesn't need to store cabac status here since the whole CTU parameters will be re-encoded at the end of this function
+
+  for(Int typeIdc=0; typeIdc< NUM_SAO_NEW_TYPES; typeIdc++)
+  {
+    m_pcRDGoOnSbacCoder->load(cabacCoderRDO[SAO_CABACSTATE_BLK_MID]);
+    m_pcRDGoOnSbacCoder->resetBits();
+    previousWrittenBits = 0;
+    cost = 0;
+
+    for(UInt componentIndex = COMPONENT_Cb; componentIndex < numberOfComponents; componentIndex++)
+    {
+      const ComponentID component = ComponentID(componentIndex);
+      if(!sliceEnabled[component])
+      {
+        testOffset[component].modeIdc = SAO_MODE_OFF;
+        dist[component]= 0;
+        continue;
+      }
+      testOffset[component].modeIdc = SAO_MODE_NEW;
+      testOffset[component].typeIdc = typeIdc;
+
+      //derive offset & get distortion
+      deriveOffsets(component, typeIdc, blkStats[ctuRsAddr][component][typeIdc], testOffset[component].offset, testOffset[component].typeAuxInfo);
+      invertQuantOffsets(component, typeIdc, testOffset[component].typeAuxInfo, invQuantOffset, testOffset[component].offset);
+      dist[component] = getDistortion(component, typeIdc, testOffset[component].typeAuxInfo, invQuantOffset, blkStats[ctuRsAddr][component][typeIdc]);
+
+      m_pcRDGoOnSbacCoder->codeSAOOffsetParam(component, testOffset[component], sliceEnabled[component]);
+
+      const UInt currentWrittenBits = m_pcRDGoOnSbacCoder->getNumberOfWrittenBits();
+      cost += dist[component] + (m_lambda[component] * (currentWrittenBits - previousWrittenBits));
+      previousWrittenBits = currentWrittenBits;
+    }
+
+    if(cost < minCost)
+    {
+      minCost = cost;
+      for(UInt componentIndex = COMPONENT_Cb; componentIndex < numberOfComponents; componentIndex++)
+      {
+        modeDist[componentIndex]  = dist[componentIndex];
+        modeParam[componentIndex] = testOffset[componentIndex];
+      }
+    }
+
+  } // SAO_TYPE loop
+
+  //----- re-gen rate & normalized cost----//
+  modeNormCost = 0;
+  for(UInt componentIndex = COMPONENT_Y; componentIndex < numberOfComponents; componentIndex++)
+  {
+    modeNormCost += (Double)modeDist[componentIndex] / m_lambda[componentIndex];
+  }
+
+  m_pcRDGoOnSbacCoder->load(cabacCoderRDO[inCabacLabel]);
+  m_pcRDGoOnSbacCoder->resetBits();
+  m_pcRDGoOnSbacCoder->codeSAOBlkParam(modeParam, sliceEnabled, (mergeList[SAO_MERGE_LEFT]!= NULL), (mergeList[SAO_MERGE_ABOVE]!= NULL), false);
+  modeNormCost += (Double)m_pcRDGoOnSbacCoder->getNumberOfWrittenBits();
+}
+
+Void TEncSampleAdaptiveOffset::deriveModeMergeRDO(Int ctuRsAddr, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES], Bool* sliceEnabled, SAOStatData*** blkStats, SAOBlkParam& modeParam, Double& modeNormCost, TEncSbac** cabacCoderRDO, Int inCabacLabel)
+{
+  modeNormCost = MAX_DOUBLE;
+
+  Double cost;
+  SAOBlkParam testBlkParam;
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+
+  for(Int mergeType=0; mergeType< NUM_SAO_MERGE_TYPES; mergeType++)
+  {
+    if(mergeList[mergeType] == NULL)
+    {
+      continue;
+    }
+
+    testBlkParam = *(mergeList[mergeType]);
+    //normalized distortion
+    Double normDist=0;
+    for(Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+    {
+      testBlkParam[compIdx].modeIdc = SAO_MODE_MERGE;
+      testBlkParam[compIdx].typeIdc = mergeType;
+
+      SAOOffset& mergedOffsetParam = (*(mergeList[mergeType]))[compIdx];
+
+      if( mergedOffsetParam.modeIdc != SAO_MODE_OFF)
+      {
+        //offsets have been reconstructed. Don't call inversed quantization function.
+        normDist += (((Double)getDistortion(ComponentID(compIdx), mergedOffsetParam.typeIdc, mergedOffsetParam.typeAuxInfo, mergedOffsetParam.offset, blkStats[ctuRsAddr][compIdx][mergedOffsetParam.typeIdc]))
+                       /m_lambda[compIdx]
+                    );
+      }
+
+    }
+
+    //rate
+    m_pcRDGoOnSbacCoder->load(cabacCoderRDO[inCabacLabel]);
+    m_pcRDGoOnSbacCoder->resetBits();
+    m_pcRDGoOnSbacCoder->codeSAOBlkParam(testBlkParam, sliceEnabled, (mergeList[SAO_MERGE_LEFT]!= NULL), (mergeList[SAO_MERGE_ABOVE]!= NULL), false);
+    Int rate = m_pcRDGoOnSbacCoder->getNumberOfWrittenBits();
+
+    cost = normDist+(Double)rate;
+
+    if(cost < modeNormCost)
+    {
+      modeNormCost = cost;
+      modeParam    = testBlkParam;
+      m_pcRDGoOnSbacCoder->store(cabacCoderRDO[SAO_CABACSTATE_BLK_TEMP]);
+    }
+  }
+
+  m_pcRDGoOnSbacCoder->load(cabacCoderRDO[SAO_CABACSTATE_BLK_TEMP]);
+}
+
+Void TEncSampleAdaptiveOffset::decideBlkParams(TComPic* pic, Bool* sliceEnabled, SAOStatData*** blkStats, TComPicYuv* srcYuv, TComPicYuv* resYuv, SAOBlkParam* reconParams, SAOBlkParam* codedParams)
+{
+  Bool allBlksDisabled = true;
+  const Int numberOfComponents = getNumberValidComponents(m_chromaFormatIDC);
+  for(Int compId = COMPONENT_Y; compId < numberOfComponents; compId++)
+  {
+    if (sliceEnabled[compId])
+      allBlksDisabled = false;
+  }
+
+  m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[ SAO_CABACSTATE_PIC_INIT ]);
+
+  SAOBlkParam modeParam;
+  Double minCost, modeCost;
+
+
+#if RD_TEST_SAO_DISABLE_AT_PICTURE_LEVEL
+  Double totalCost = 0;
+#endif
+
+  for(Int ctuRsAddr=0; ctuRsAddr< m_numCTUsPic; ctuRsAddr++)
+  {
+    if(allBlksDisabled)
+    {
+      codedParams[ctuRsAddr].reset();
+      continue;
+    }
+
+    m_pcRDGoOnSbacCoder->store(m_pppcRDSbacCoder[ SAO_CABACSTATE_BLK_CUR ]);
+
+    //get merge list
+    SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES] = { NULL };
+    getMergeList(pic, ctuRsAddr, reconParams, mergeList);
+
+    minCost = MAX_DOUBLE;
+    for(Int mode=0; mode < NUM_SAO_MODES; mode++)
+    {
+      switch(mode)
+      {
+      case SAO_MODE_OFF:
+        {
+          continue; //not necessary, since all-off case will be tested in SAO_MODE_NEW case.
+        }
+        break;
+      case SAO_MODE_NEW:
+        {
+          deriveModeNewRDO(ctuRsAddr, mergeList, sliceEnabled, blkStats, modeParam, modeCost, m_pppcRDSbacCoder, SAO_CABACSTATE_BLK_CUR);
+
+        }
+        break;
+      case SAO_MODE_MERGE:
+        {
+          deriveModeMergeRDO(ctuRsAddr, mergeList, sliceEnabled, blkStats , modeParam, modeCost, m_pppcRDSbacCoder, SAO_CABACSTATE_BLK_CUR);
+        }
+        break;
+      default:
+        {
+          printf("Not a supported SAO mode\n");
+          assert(0);
+          exit(-1);
+        }
+      }
+
+      if(modeCost < minCost)
+      {
+        minCost = modeCost;
+        codedParams[ctuRsAddr] = modeParam;
+        m_pcRDGoOnSbacCoder->store(m_pppcRDSbacCoder[ SAO_CABACSTATE_BLK_NEXT ]);
+      }
+    } //mode
+
+#if RD_TEST_SAO_DISABLE_AT_PICTURE_LEVEL
+    totalCost += minCost;
+#endif
+
+    m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[ SAO_CABACSTATE_BLK_NEXT ]);
+
+    //apply reconstructed offsets
+    reconParams[ctuRsAddr] = codedParams[ctuRsAddr];
+    reconstructBlkSAOParam(reconParams[ctuRsAddr], mergeList);
+    offsetCTU(ctuRsAddr, srcYuv, resYuv, reconParams[ctuRsAddr], pic);
+  } //ctuRsAddr
+
+#if RD_TEST_SAO_DISABLE_AT_PICTURE_LEVEL
+  if (!allBlksDisabled && (totalCost >= 0)) //SAO is not beneficial - disable it
+  {
+    for(Int ctuRsAddr = 0; ctuRsAddr < m_numCTUsPic; ctuRsAddr++)
+    {
+      codedParams[ctuRsAddr].reset();
+    }
+
+    for (UInt componentIndex = 0; componentIndex < MAX_NUM_COMPONENT; componentIndex++)
+    {
+      sliceEnabled[componentIndex] = false;
+    }
+
+    m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[ SAO_CABACSTATE_PIC_INIT ]);
+  }
+#endif
+
+#if SAO_ENCODING_CHOICE
+  Int picTempLayer = pic->getSlice(0)->getDepth();
+  Int numCtusForSAOOff[MAX_NUM_COMPONENT];
+
+  for (Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    numCtusForSAOOff[compIdx] = 0;
+    for(Int ctuRsAddr=0; ctuRsAddr< m_numCTUsPic; ctuRsAddr++)
+    {
+      if( reconParams[ctuRsAddr][compIdx].modeIdc == SAO_MODE_OFF)
+      {
+        numCtusForSAOOff[compIdx]++;
+      }
+    }
+  }
+#if SAO_ENCODING_CHOICE_CHROMA
+  for (Int compIdx = 0; compIdx < numberOfComponents; compIdx++)
+  {
+    m_saoDisabledRate[compIdx][picTempLayer] = (Double)numCtusForSAOOff[compIdx]/(Double)m_numCTUsPic;
+  }
+#else
+  if (picTempLayer == 0)
+  {
+    m_saoDisabledRate[COMPONENT_Y][0] = (Double)(numCtusForSAOOff[COMPONENT_Y]+numCtusForSAOOff[COMPONENT_Cb]+numCtusForSAOOff[COMPONENT_Cr])/(Double)(m_numCTUsPic*3);
+  }
+#endif
+#endif
+}
+
+
+Void TEncSampleAdaptiveOffset::getBlkStats(ComponentID compIdx, SAOStatData* statsDataTypes
+                        , Pel* srcBlk, Pel* orgBlk, Int srcStride, Int orgStride, Int width, Int height
+                        , Bool isLeftAvail,  Bool isRightAvail, Bool isAboveAvail, Bool isBelowAvail, Bool isAboveLeftAvail, Bool isAboveRightAvail, Bool isBelowLeftAvail, Bool isBelowRightAvail
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                        , Bool isCalculatePreDeblockSamples
+#endif
+                        )
+{
+  if(m_lineBufWidth != m_maxCUWidth)
+  {
+    m_lineBufWidth = m_maxCUWidth;
+
+    if (m_signLineBuf1) delete[] m_signLineBuf1; m_signLineBuf1 = NULL;
+    m_signLineBuf1 = new Char[m_lineBufWidth+1];
+
+    if (m_signLineBuf2) delete[] m_signLineBuf2; m_signLineBuf2 = NULL;
+    m_signLineBuf2 = new Char[m_lineBufWidth+1];
+  }
+
+  Int x,y, startX, startY, endX, endY, edgeType, firstLineStartX, firstLineEndX;
+  Char signLeft, signRight, signDown;
+  Int64 *diff, *count;
+  Pel *srcLine, *orgLine;
+  Int* skipLinesR = m_skipLinesR[compIdx];
+  Int* skipLinesB = m_skipLinesB[compIdx];
+
+  for(Int typeIdx=0; typeIdx< NUM_SAO_NEW_TYPES; typeIdx++)
+  {
+    SAOStatData& statsData= statsDataTypes[typeIdx];
+    statsData.reset();
+
+    srcLine = srcBlk;
+    orgLine = orgBlk;
+    diff    = statsData.diff;
+    count   = statsData.count;
+    switch(typeIdx)
+    {
+    case SAO_TYPE_EO_0:
+      {
+        diff +=2;
+        count+=2;
+        endY   = (isBelowAvail) ? (height - skipLinesB[typeIdx]) : height;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        startX = (!isCalculatePreDeblockSamples) ? (isLeftAvail  ? 0 : 1)
+                                                 : (isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1))
+                                                 ;
+#else
+        startX = isLeftAvail ? 0 : 1;
+#endif
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        endX   = (!isCalculatePreDeblockSamples) ? (isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1))
+                                                 : (isRightAvail ? width : (width - 1))
+                                                 ;
+#else
+        endX   = isRightAvail ? (width - skipLinesR[typeIdx]): (width - 1);
+#endif
+        for (y=0; y<endY; y++)
+        {
+          signLeft = (Char)sgn(srcLine[startX] - srcLine[startX-1]);
+          for (x=startX; x<endX; x++)
+          {
+            signRight =  (Char)sgn(srcLine[x] - srcLine[x+1]);
+            edgeType  =  signRight + signLeft;
+            signLeft  = -signRight;
+
+            diff [edgeType] += (orgLine[x] - srcLine[x]);
+            count[edgeType] ++;
+          }
+          srcLine  += srcStride;
+          orgLine  += orgStride;
+        }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        if(isCalculatePreDeblockSamples)
+        {
+          if(isBelowAvail)
+          {
+            startX = isLeftAvail  ? 0 : 1;
+            endX   = isRightAvail ? width : (width -1);
+
+            for(y=0; y<skipLinesB[typeIdx]; y++)
+            {
+              signLeft = (Char)sgn(srcLine[startX] - srcLine[startX-1]);
+              for (x=startX; x<endX; x++)
+              {
+                signRight =  (Char)sgn(srcLine[x] - srcLine[x+1]);
+                edgeType  =  signRight + signLeft;
+                signLeft  = -signRight;
+
+                diff [edgeType] += (orgLine[x] - srcLine[x]);
+                count[edgeType] ++;
+              }
+              srcLine  += srcStride;
+              orgLine  += orgStride;
+            }
+          }
+        }
+#endif
+      }
+      break;
+    case SAO_TYPE_EO_90:
+      {
+        diff +=2;
+        count+=2;
+        Char *signUpLine = m_signLineBuf1;
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        startX = (!isCalculatePreDeblockSamples) ? 0
+                                                 : (isRightAvail ? (width - skipLinesR[typeIdx]) : width)
+                                                 ;
+#endif
+        startY = isAboveAvail ? 0 : 1;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        endX   = (!isCalculatePreDeblockSamples) ? (isRightAvail ? (width - skipLinesR[typeIdx]) : width)
+                                                 : width
+                                                 ;
+#else
+        endX   = isRightAvail ? (width - skipLinesR[typeIdx]) : width ;
+#endif
+        endY   = isBelowAvail ? (height - skipLinesB[typeIdx]) : (height - 1);
+        if (!isAboveAvail)
+        {
+          srcLine += srcStride;
+          orgLine += orgStride;
+        }
+
+        Pel* srcLineAbove = srcLine - srcStride;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        for (x=startX; x<endX; x++)
+#else
+        for (x=0; x< endX; x++)
+#endif
+        {
+          signUpLine[x] = (Char)sgn(srcLine[x] - srcLineAbove[x]);
+        }
+
+        Pel* srcLineBelow;
+        for (y=startY; y<endY; y++)
+        {
+          srcLineBelow = srcLine + srcStride;
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+          for (x=startX; x<endX; x++)
+#else
+          for (x=0; x<endX; x++)
+#endif
+          {
+            signDown  = (Char)sgn(srcLine[x] - srcLineBelow[x]); 
+            edgeType  = signDown + signUpLine[x];
+            signUpLine[x]= -signDown;
+
+            diff [edgeType] += (orgLine[x] - srcLine[x]);
+            count[edgeType] ++;
+          }
+          srcLine += srcStride;
+          orgLine += orgStride;
+        }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        if(isCalculatePreDeblockSamples)
+        {
+          if(isBelowAvail)
+          {
+            startX = 0;
+            endX   = width;
+
+            for(y=0; y<skipLinesB[typeIdx]; y++)
+            {
+              srcLineBelow = srcLine + srcStride;
+              srcLineAbove = srcLine - srcStride;
+
+              for (x=startX; x<endX; x++)
+              {
+                edgeType = sgn(srcLine[x] - srcLineBelow[x]) + sgn(srcLine[x] - srcLineAbove[x]);
+                diff [edgeType] += (orgLine[x] - srcLine[x]);
+                count[edgeType] ++;
+              }
+              srcLine  += srcStride;
+              orgLine  += orgStride;
+            }
+          }
+        }
+#endif
+
+      }
+      break;
+    case SAO_TYPE_EO_135:
+      {
+        diff +=2;
+        count+=2;
+        Char *signUpLine, *signDownLine, *signTmpLine;
+
+        signUpLine  = m_signLineBuf1;
+        signDownLine= m_signLineBuf2;
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        startX = (!isCalculatePreDeblockSamples) ? (isLeftAvail  ? 0 : 1)
+                                                 : (isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1))
+                                                 ;
+#else
+        startX = isLeftAvail ? 0 : 1 ;
+#endif
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        endX   = (!isCalculatePreDeblockSamples) ? (isRightAvail ? (width - skipLinesR[typeIdx]): (width - 1))
+                                                 : (isRightAvail ? width : (width - 1))
+                                                 ;
+#else
+        endX   = isRightAvail ? (width - skipLinesR[typeIdx]): (width - 1);
+#endif
+        endY   = isBelowAvail ? (height - skipLinesB[typeIdx]) : (height - 1);
+
+        //prepare 2nd line's upper sign
+        Pel* srcLineBelow = srcLine + srcStride;
+        for (x=startX; x<endX+1; x++)
+        {
+          signUpLine[x] = (Char)sgn(srcLineBelow[x] - srcLine[x-1]);
+        }
+
+        //1st line
+        Pel* srcLineAbove = srcLine - srcStride;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        firstLineStartX = (!isCalculatePreDeblockSamples) ? (isAboveLeftAvail ? 0    : 1) : startX;
+        firstLineEndX   = (!isCalculatePreDeblockSamples) ? (isAboveAvail     ? endX : 1) : endX;
+#else
+        firstLineStartX = isAboveLeftAvail ? 0    : 1;
+        firstLineEndX   = isAboveAvail     ? endX : 1;
+#endif
+        for(x=firstLineStartX; x<firstLineEndX; x++)
+        {
+          edgeType = sgn(srcLine[x] - srcLineAbove[x-1]) - signUpLine[x+1];
+          diff [edgeType] += (orgLine[x] - srcLine[x]);
+          count[edgeType] ++;
+        }
+        srcLine  += srcStride;
+        orgLine  += orgStride;
+
+
+        //middle lines
+        for (y=1; y<endY; y++)
+        {
+          srcLineBelow = srcLine + srcStride;
+
+          for (x=startX; x<endX; x++)
+          {
+            signDown = (Char)sgn(srcLine[x] - srcLineBelow[x+1]);
+            edgeType = signDown + signUpLine[x];
+            diff [edgeType] += (orgLine[x] - srcLine[x]);
+            count[edgeType] ++;
+
+            signDownLine[x+1] = -signDown;
+          }
+          signDownLine[startX] = (Char)sgn(srcLineBelow[startX] - srcLine[startX-1]);
+
+          signTmpLine  = signUpLine;
+          signUpLine   = signDownLine;
+          signDownLine = signTmpLine;
+
+          srcLine += srcStride;
+          orgLine += orgStride;
+        }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        if(isCalculatePreDeblockSamples)
+        {
+          if(isBelowAvail)
+          {
+            startX = isLeftAvail  ? 0     : 1 ;
+            endX   = isRightAvail ? width : (width -1);
+
+            for(y=0; y<skipLinesB[typeIdx]; y++)
+            {
+              srcLineBelow = srcLine + srcStride;
+              srcLineAbove = srcLine - srcStride;
+
+              for (x=startX; x< endX; x++)
+              {
+                edgeType = sgn(srcLine[x] - srcLineBelow[x+1]) + sgn(srcLine[x] - srcLineAbove[x-1]);
+                diff [edgeType] += (orgLine[x] - srcLine[x]);
+                count[edgeType] ++;
+              }
+              srcLine  += srcStride;
+              orgLine  += orgStride;
+            }
+          }
+        }
+#endif
+      }
+      break;
+    case SAO_TYPE_EO_45:
+      {
+        diff +=2;
+        count+=2;
+        Char *signUpLine = m_signLineBuf1+1;
+
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        startX = (!isCalculatePreDeblockSamples) ? (isLeftAvail  ? 0 : 1)
+                                                 : (isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1))
+                                                 ;
+#else
+        startX = isLeftAvail ? 0 : 1;
+#endif
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        endX   = (!isCalculatePreDeblockSamples) ? (isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1))
+                                                 : (isRightAvail ? width : (width - 1))
+                                                 ;
+#else
+        endX   = isRightAvail ? (width - skipLinesR[typeIdx]) : (width - 1);
+#endif
+        endY   = isBelowAvail ? (height - skipLinesB[typeIdx]) : (height - 1);
+
+        //prepare 2nd line upper sign
+        Pel* srcLineBelow = srcLine + srcStride;
+        for (x=startX-1; x<endX; x++)
+        {
+          signUpLine[x] = (Char)sgn(srcLineBelow[x] - srcLine[x+1]);
+        }
+
+
+        //first line
+        Pel* srcLineAbove = srcLine - srcStride;
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        firstLineStartX = (!isCalculatePreDeblockSamples) ? (isAboveAvail ? startX : endX)
+                                                          : startX
+                                                          ;
+        firstLineEndX   = (!isCalculatePreDeblockSamples) ? ((!isRightAvail && isAboveRightAvail) ? width : endX)
+                                                          : endX
+                                                          ;
+#else
+        firstLineStartX = isAboveAvail ? startX : endX;
+        firstLineEndX   = (!isRightAvail && isAboveRightAvail) ? width : endX;
+#endif
+        for(x=firstLineStartX; x<firstLineEndX; x++)
+        {
+          edgeType = sgn(srcLine[x] - srcLineAbove[x+1]) - signUpLine[x-1];
+          diff [edgeType] += (orgLine[x] - srcLine[x]);
+          count[edgeType] ++;
+        }
+
+        srcLine += srcStride;
+        orgLine += orgStride;
+
+        //middle lines
+        for (y=1; y<endY; y++)
+        {
+          srcLineBelow = srcLine + srcStride;
+
+          for(x=startX; x<endX; x++)
+          {
+            signDown = (Char)sgn(srcLine[x] - srcLineBelow[x-1]);
+            edgeType = signDown + signUpLine[x];
+
+            diff [edgeType] += (orgLine[x] - srcLine[x]);
+            count[edgeType] ++;
+
+            signUpLine[x-1] = -signDown;
+          }
+          signUpLine[endX-1] = (Char)sgn(srcLineBelow[endX-1] - srcLine[endX]);
+          srcLine  += srcStride;
+          orgLine  += orgStride;
+        }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        if(isCalculatePreDeblockSamples)
+        {
+          if(isBelowAvail)
+          {
+            startX = isLeftAvail  ? 0     : 1 ;
+            endX   = isRightAvail ? width : (width -1);
+
+            for(y=0; y<skipLinesB[typeIdx]; y++)
+            {
+              srcLineBelow = srcLine + srcStride;
+              srcLineAbove = srcLine - srcStride;
+
+              for (x=startX; x<endX; x++)
+              {
+                edgeType = sgn(srcLine[x] - srcLineBelow[x-1]) + sgn(srcLine[x] - srcLineAbove[x+1]);
+                diff [edgeType] += (orgLine[x] - srcLine[x]);
+                count[edgeType] ++;
+              }
+              srcLine  += srcStride;
+              orgLine  += orgStride;
+            }
+          }
+        }
+#endif
+      }
+      break;
+    case SAO_TYPE_BO:
+      {
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        startX = (!isCalculatePreDeblockSamples)?0
+                                                :( isRightAvail?(width- skipLinesR[typeIdx]):width)
+                                                ;
+        endX   = (!isCalculatePreDeblockSamples)?(isRightAvail ? (width - skipLinesR[typeIdx]) : width )
+                                                :width
+                                                ;
+#else
+        endX = isRightAvail ? (width- skipLinesR[typeIdx]) : width;
+#endif
+        endY = isBelowAvail ? (height- skipLinesB[typeIdx]) : height;
+        Int shiftBits = g_bitDepth[toChannelType(compIdx)] - NUM_SAO_BO_CLASSES_LOG2;
+        for (y=0; y< endY; y++)
+        {
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+          for (x=startX; x< endX; x++)
+#else
+          for (x=0; x< endX; x++)
+#endif
+          {
+
+            Int bandIdx= srcLine[x] >> shiftBits;
+            diff [bandIdx] += (orgLine[x] - srcLine[x]);
+            count[bandIdx] ++;
+          }
+          srcLine += srcStride;
+          orgLine += orgStride;
+        }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+        if(isCalculatePreDeblockSamples)
+        {
+          if(isBelowAvail)
+          {
+            startX = 0;
+            endX   = width;
+
+            for(y= 0; y< skipLinesB[typeIdx]; y++)
+            {
+              for (x=startX; x< endX; x++)
+              {
+                Int bandIdx= srcLine[x] >> shiftBits;
+                diff [bandIdx] += (orgLine[x] - srcLine[x]);
+                count[bandIdx] ++;
+              }
+              srcLine  += srcStride;
+              orgLine  += orgStride;
+
+            }
+
+          }
+        }
+#endif
+      }
+      break;
+    default:
+      {
+        printf("Not a supported SAO types\n");
+        assert(0);
+        exit(-1);
+      }
+    }
+  }
+}
+
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSampleAdaptiveOffset.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,168 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ \file     TEncSampleAdaptiveOffset.h
+ \brief    estimation part of sample adaptive offset class (header)
+ */
+
+#ifndef __TENCSAMPLEADAPTIVEOFFSET__
+#define __TENCSAMPLEADAPTIVEOFFSET__
+
+#include "TLibCommon/TComSampleAdaptiveOffset.h"
+#include "TLibCommon/TComPic.h"
+
+#include "TEncEntropy.h"
+#include "TEncSbac.h"
+#include "TLibCommon/TComBitCounter.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+enum SAOCabacStateLablesRDO //CABAC state labels
+{
+  SAO_CABACSTATE_PIC_INIT =0,
+  SAO_CABACSTATE_BLK_CUR,
+  SAO_CABACSTATE_BLK_NEXT,
+  SAO_CABACSTATE_BLK_MID,
+  SAO_CABACSTATE_BLK_TEMP,
+  NUM_SAO_CABACSTATE_LABELS
+};
+
+struct SAOStatData //data structure for SAO statistics
+{
+  Int64 diff[MAX_NUM_SAO_CLASSES];
+  Int64 count[MAX_NUM_SAO_CLASSES];
+
+  SAOStatData(){}
+  ~SAOStatData(){}
+  Void reset()
+  {
+    ::memset(diff, 0, sizeof(Int64)*MAX_NUM_SAO_CLASSES);
+    ::memset(count, 0, sizeof(Int64)*MAX_NUM_SAO_CLASSES);
+  }
+  const SAOStatData& operator=(const SAOStatData& src)
+  {
+    ::memcpy(diff, src.diff, sizeof(Int64)*MAX_NUM_SAO_CLASSES);
+    ::memcpy(count, src.count, sizeof(Int64)*MAX_NUM_SAO_CLASSES);
+    return *this;
+  }
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  const SAOStatData& operator+= (const SAOStatData& src)
+  {
+    for(Int i=0; i< MAX_NUM_SAO_CLASSES; i++)
+    {
+      diff[i] += src.diff[i];
+      count[i] += src.count[i];
+    }
+    return *this;
+  }
+#endif
+};
+
+class TEncSampleAdaptiveOffset : public TComSampleAdaptiveOffset
+{
+public:
+  TEncSampleAdaptiveOffset();
+  virtual ~TEncSampleAdaptiveOffset();
+
+  //interface
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  Void createEncData(Bool isPreDBFSamplesUsed);
+#else
+  Void createEncData();
+#endif
+  Void destroyEncData();
+  Void initRDOCabacCoder(TEncSbac* pcRDGoOnSbacCoder, TComSlice* pcSlice) ;
+  Void SAOProcess(TComPic* pPic, Bool* sliceEnabled, const Double *lambdas
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                , Bool isPreDBFSamplesUsed
+#endif
+                );
+public: //methods
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  Void getPreDBFStatistics(TComPic* pPic);
+#endif
+private: //methods
+  Void getStatistics(SAOStatData*** blkStats, TComPicYuv* orgYuv, TComPicYuv* srcYuv,TComPic* pPic
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                   , Bool isCalculatePreDeblockSamples = false
+#endif
+                   );
+  Void decidePicParams(Bool* sliceEnabled, Int picTempLayer);
+  Void decideBlkParams(TComPic* pic, Bool* sliceEnabled, SAOStatData*** blkStats, TComPicYuv* srcYuv, TComPicYuv* resYuv, SAOBlkParam* reconParams, SAOBlkParam* codedParams);
+  Void getBlkStats(ComponentID compIdx, SAOStatData* statsDataTypes, Pel* srcBlk, Pel* orgBlk, Int srcStride, Int orgStride, Int width, Int height, Bool isLeftAvail,  Bool isRightAvail, Bool isAboveAvail, Bool isBelowAvail, Bool isAboveLeftAvail, Bool isAboveRightAvail, Bool isBelowLeftAvail, Bool isBelowRightAvail
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+                  , Bool isCalculatePreDeblockSamples
+#endif
+                  );
+  Void deriveModeNewRDO(Int ctuRsAddr, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES], Bool* sliceEnabled, SAOStatData*** blkStats, SAOBlkParam& modeParam, Double& modeNormCost, TEncSbac** cabacCoderRDO, Int inCabacLabel);
+  Void deriveModeMergeRDO(Int ctuRsAddr, SAOBlkParam* mergeList[NUM_SAO_MERGE_TYPES], Bool* sliceEnabled, SAOStatData*** blkStats, SAOBlkParam& modeParam, Double& modeNormCost, TEncSbac** cabacCoderRDO, Int inCabacLabel);
+  Int64 getDistortion(ComponentID compIdx, Int typeIdc, Int typeAuxInfo, Int* offsetVal, SAOStatData& statData);
+  Void deriveOffsets(ComponentID compIdx, Int typeIdc, SAOStatData& statData, Int* quantOffsets, Int& typeAuxInfo);
+  inline Int64 estSaoDist(Int64 count, Int64 offset, Int64 diffSum, Int shift);
+  inline Int estIterOffset(Int typeIdx, Int classIdx, Double lambda, Int offsetInput, Int64 count, Int64 diffSum, Int shift, Int bitIncrease, Int64& bestDist, Double& bestCost, Int offsetTh );
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  Void addPreDBFStatistics(SAOStatData*** blkStats);
+#endif
+private: //members
+  //for RDO
+  TEncSbac**             m_pppcRDSbacCoder;
+  TEncSbac*              m_pcRDGoOnSbacCoder;
+#if FAST_BIT_EST
+  TEncBinCABACCounter**  m_pppcBinCoderCABAC;
+#else
+  TEncBinCABAC**         m_pppcBinCoderCABAC;
+#endif
+  Double                 m_lambda[MAX_NUM_COMPONENT];
+
+  //statistics
+  SAOStatData***         m_statData; //[ctu][comp][classes]
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+  SAOStatData***         m_preDBFstatData;
+#endif
+#if SAO_ENCODING_CHOICE
+  Double                 m_saoDisabledRate[MAX_NUM_COMPONENT][MAX_TLAYER];
+#endif
+  Int                    m_skipLinesR[MAX_NUM_COMPONENT][NUM_SAO_NEW_TYPES];
+  Int                    m_skipLinesB[MAX_NUM_COMPONENT][NUM_SAO_NEW_TYPES];
+};
+
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSbac.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2008 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSbac.cpp
+    \brief    SBAC encoder class
+*/
+
+#include "TEncTop.h"
+#include "TEncSbac.h"
+#include "TLibCommon/TComTU.h"
+
+#include <map>
+#include <algorithm>
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+#include "../TLibCommon/Debug.h"
+#endif
+
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TEncSbac::TEncSbac()
+// new structure here
+: m_pcBitIf                            ( NULL )
+, m_pcSlice                            ( NULL )
+, m_pcBinIf                            ( NULL )
+, m_numContextModels                   ( 0 )
+, m_cCUSplitFlagSCModel                ( 1,             1,                      NUM_SPLIT_FLAG_CTX                   , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUSkipFlagSCModel                 ( 1,             1,                      NUM_SKIP_FLAG_CTX                    , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUMergeFlagExtSCModel             ( 1,             1,                      NUM_MERGE_FLAG_EXT_CTX               , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUMergeIdxExtSCModel              ( 1,             1,                      NUM_MERGE_IDX_EXT_CTX                , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUPartSizeSCModel                 ( 1,             1,                      NUM_PART_SIZE_CTX                    , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUPredModeSCModel                 ( 1,             1,                      NUM_PRED_MODE_CTX                    , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUIntraPredSCModel                ( 1,             1,                      NUM_ADI_CTX                          , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUChromaPredSCModel               ( 1,             1,                      NUM_CHROMA_PRED_CTX                  , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUDeltaQpSCModel                  ( 1,             1,                      NUM_DELTA_QP_CTX                     , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUInterDirSCModel                 ( 1,             1,                      NUM_INTER_DIR_CTX                    , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCURefPicSCModel                   ( 1,             1,                      NUM_REF_NO_CTX                       , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUMvdSCModel                      ( 1,             1,                      NUM_MV_RES_CTX                       , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUQtCbfSCModel                    ( 1,             NUM_QT_CBF_CTX_SETS,    NUM_QT_CBF_CTX_PER_SET               , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUTransSubdivFlagSCModel          ( 1,             1,                      NUM_TRANS_SUBDIV_FLAG_CTX            , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUQtRootCbfSCModel                ( 1,             1,                      NUM_QT_ROOT_CBF_CTX                  , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUSigCoeffGroupSCModel            ( 1,             2,                      NUM_SIG_CG_FLAG_CTX                  , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUSigSCModel                      ( 1,             1,                      NUM_SIG_FLAG_CTX                     , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCuCtxLastX                        ( 1,             NUM_CTX_LAST_FLAG_SETS, NUM_CTX_LAST_FLAG_XY                 , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCuCtxLastY                        ( 1,             NUM_CTX_LAST_FLAG_SETS, NUM_CTX_LAST_FLAG_XY                 , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUOneSCModel                      ( 1,             1,                      NUM_ONE_FLAG_CTX                     , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCUAbsSCModel                      ( 1,             1,                      NUM_ABS_FLAG_CTX                     , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cMVPIdxSCModel                     ( 1,             1,                      NUM_MVP_IDX_CTX                      , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cSaoMergeSCModel                   ( 1,             1,                      NUM_SAO_MERGE_FLAG_CTX               , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cSaoTypeIdxSCModel                 ( 1,             1,                      NUM_SAO_TYPE_IDX_CTX                 , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cTransformSkipSCModel              ( 1,             MAX_NUM_CHANNEL_TYPE,   NUM_TRANSFORMSKIP_FLAG_CTX           , m_contextModels + m_numContextModels, m_numContextModels)
+, m_CUTransquantBypassFlagSCModel      ( 1,             1,                      NUM_CU_TRANSQUANT_BYPASS_FLAG_CTX    , m_contextModels + m_numContextModels, m_numContextModels)
+, m_explicitRdpcmFlagSCModel           ( 1,             MAX_NUM_CHANNEL_TYPE,   NUM_EXPLICIT_RDPCM_FLAG_CTX          , m_contextModels + m_numContextModels, m_numContextModels)
+, m_explicitRdpcmDirSCModel            ( 1,             MAX_NUM_CHANNEL_TYPE,   NUM_EXPLICIT_RDPCM_DIR_CTX           , m_contextModels + m_numContextModels, m_numContextModels)
+, m_cCrossComponentPredictionSCModel   ( 1,             1,                      NUM_CROSS_COMPONENT_PREDICTION_CTX   , m_contextModels + m_numContextModels, m_numContextModels)
+, m_ChromaQpAdjFlagSCModel             ( 1,             1,                      NUM_CHROMA_QP_ADJ_FLAG_CTX           , m_contextModels + m_numContextModels, m_numContextModels)
+, m_ChromaQpAdjIdcSCModel              ( 1,             1,                      NUM_CHROMA_QP_ADJ_IDC_CTX            , m_contextModels + m_numContextModels, m_numContextModels)
+{
+  assert( m_numContextModels <= MAX_NUM_CTX_MOD );
+}
+
+TEncSbac::~TEncSbac()
+{
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+Void TEncSbac::resetEntropy           ()
+{
+  Int  iQp              = m_pcSlice->getSliceQp();
+  SliceType eSliceType  = m_pcSlice->getSliceType();
+
+  Int  encCABACTableIdx = m_pcSlice->getPPS()->getEncCABACTableIdx();
+  if (!m_pcSlice->isIntra() && (encCABACTableIdx==B_SLICE || encCABACTableIdx==P_SLICE) && m_pcSlice->getPPS()->getCabacInitPresentFlag())
+  {
+    eSliceType = (SliceType) encCABACTableIdx;
+  }
+
+  m_cCUSplitFlagSCModel.initBuffer                ( eSliceType, iQp, (UChar*)INIT_SPLIT_FLAG );
+  m_cCUSkipFlagSCModel.initBuffer                 ( eSliceType, iQp, (UChar*)INIT_SKIP_FLAG );
+  m_cCUMergeFlagExtSCModel.initBuffer             ( eSliceType, iQp, (UChar*)INIT_MERGE_FLAG_EXT);
+  m_cCUMergeIdxExtSCModel.initBuffer              ( eSliceType, iQp, (UChar*)INIT_MERGE_IDX_EXT);
+  m_cCUPartSizeSCModel.initBuffer                 ( eSliceType, iQp, (UChar*)INIT_PART_SIZE );
+  m_cCUPredModeSCModel.initBuffer                 ( eSliceType, iQp, (UChar*)INIT_PRED_MODE );
+  m_cCUIntraPredSCModel.initBuffer                ( eSliceType, iQp, (UChar*)INIT_INTRA_PRED_MODE );
+  m_cCUChromaPredSCModel.initBuffer               ( eSliceType, iQp, (UChar*)INIT_CHROMA_PRED_MODE );
+  m_cCUInterDirSCModel.initBuffer                 ( eSliceType, iQp, (UChar*)INIT_INTER_DIR );
+  m_cCUMvdSCModel.initBuffer                      ( eSliceType, iQp, (UChar*)INIT_MVD );
+  m_cCURefPicSCModel.initBuffer                   ( eSliceType, iQp, (UChar*)INIT_REF_PIC );
+  m_cCUDeltaQpSCModel.initBuffer                  ( eSliceType, iQp, (UChar*)INIT_DQP );
+  m_cCUQtCbfSCModel.initBuffer                    ( eSliceType, iQp, (UChar*)INIT_QT_CBF );
+  m_cCUQtRootCbfSCModel.initBuffer                ( eSliceType, iQp, (UChar*)INIT_QT_ROOT_CBF );
+  m_cCUSigCoeffGroupSCModel.initBuffer            ( eSliceType, iQp, (UChar*)INIT_SIG_CG_FLAG );
+  m_cCUSigSCModel.initBuffer                      ( eSliceType, iQp, (UChar*)INIT_SIG_FLAG );
+  m_cCuCtxLastX.initBuffer                        ( eSliceType, iQp, (UChar*)INIT_LAST );
+  m_cCuCtxLastY.initBuffer                        ( eSliceType, iQp, (UChar*)INIT_LAST );
+  m_cCUOneSCModel.initBuffer                      ( eSliceType, iQp, (UChar*)INIT_ONE_FLAG );
+  m_cCUAbsSCModel.initBuffer                      ( eSliceType, iQp, (UChar*)INIT_ABS_FLAG );
+  m_cMVPIdxSCModel.initBuffer                     ( eSliceType, iQp, (UChar*)INIT_MVP_IDX );
+  m_cCUTransSubdivFlagSCModel.initBuffer          ( eSliceType, iQp, (UChar*)INIT_TRANS_SUBDIV_FLAG );
+  m_cSaoMergeSCModel.initBuffer                   ( eSliceType, iQp, (UChar*)INIT_SAO_MERGE_FLAG );
+  m_cSaoTypeIdxSCModel.initBuffer                 ( eSliceType, iQp, (UChar*)INIT_SAO_TYPE_IDX );
+  m_cTransformSkipSCModel.initBuffer              ( eSliceType, iQp, (UChar*)INIT_TRANSFORMSKIP_FLAG );
+  m_CUTransquantBypassFlagSCModel.initBuffer      ( eSliceType, iQp, (UChar*)INIT_CU_TRANSQUANT_BYPASS_FLAG );
+  m_explicitRdpcmFlagSCModel.initBuffer           ( eSliceType, iQp, (UChar*)INIT_EXPLICIT_RDPCM_FLAG);
+  m_explicitRdpcmDirSCModel.initBuffer            ( eSliceType, iQp, (UChar*)INIT_EXPLICIT_RDPCM_DIR);
+  m_cCrossComponentPredictionSCModel.initBuffer   ( eSliceType, iQp, (UChar*)INIT_CROSS_COMPONENT_PREDICTION  );
+  m_ChromaQpAdjFlagSCModel.initBuffer             ( eSliceType, iQp, (UChar*)INIT_CHROMA_QP_ADJ_FLAG );
+  m_ChromaQpAdjIdcSCModel.initBuffer              ( eSliceType, iQp, (UChar*)INIT_CHROMA_QP_ADJ_IDC );
+
+  for (UInt statisticIndex = 0; statisticIndex < RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS ; statisticIndex++)
+  {
+    m_golombRiceAdaptationStatistics[statisticIndex] = 0;
+  }
+
+  m_pcBinIf->start();
+
+  return;
+}
+
+/** The function does the following:
+ * If current slice type is P/B then it determines the distance of initialisation type 1 and 2 from the current CABAC states and
+ * stores the index of the closest table.  This index is used for the next P/B slice when cabac_init_present_flag is true.
+ */
+Void TEncSbac::determineCabacInitIdx()
+{
+  Int  qp              = m_pcSlice->getSliceQp();
+
+  if (!m_pcSlice->isIntra())
+  {
+    SliceType aSliceTypeChoices[] = {B_SLICE, P_SLICE};
+
+    UInt bestCost             = MAX_UINT;
+    SliceType bestSliceType   = aSliceTypeChoices[0];
+    for (UInt idx=0; idx<2; idx++)
+    {
+      UInt curCost          = 0;
+      SliceType curSliceType  = aSliceTypeChoices[idx];
+
+      curCost  = m_cCUSplitFlagSCModel.calcCost                ( curSliceType, qp, (UChar*)INIT_SPLIT_FLAG );
+      curCost += m_cCUSkipFlagSCModel.calcCost                 ( curSliceType, qp, (UChar*)INIT_SKIP_FLAG );
+      curCost += m_cCUMergeFlagExtSCModel.calcCost             ( curSliceType, qp, (UChar*)INIT_MERGE_FLAG_EXT);
+      curCost += m_cCUMergeIdxExtSCModel.calcCost              ( curSliceType, qp, (UChar*)INIT_MERGE_IDX_EXT);
+      curCost += m_cCUPartSizeSCModel.calcCost                 ( curSliceType, qp, (UChar*)INIT_PART_SIZE );
+      curCost += m_cCUPredModeSCModel.calcCost                 ( curSliceType, qp, (UChar*)INIT_PRED_MODE );
+      curCost += m_cCUIntraPredSCModel.calcCost                ( curSliceType, qp, (UChar*)INIT_INTRA_PRED_MODE );
+      curCost += m_cCUChromaPredSCModel.calcCost               ( curSliceType, qp, (UChar*)INIT_CHROMA_PRED_MODE );
+      curCost += m_cCUInterDirSCModel.calcCost                 ( curSliceType, qp, (UChar*)INIT_INTER_DIR );
+      curCost += m_cCUMvdSCModel.calcCost                      ( curSliceType, qp, (UChar*)INIT_MVD );
+      curCost += m_cCURefPicSCModel.calcCost                   ( curSliceType, qp, (UChar*)INIT_REF_PIC );
+      curCost += m_cCUDeltaQpSCModel.calcCost                  ( curSliceType, qp, (UChar*)INIT_DQP );
+      curCost += m_cCUQtCbfSCModel.calcCost                    ( curSliceType, qp, (UChar*)INIT_QT_CBF );
+      curCost += m_cCUQtRootCbfSCModel.calcCost                ( curSliceType, qp, (UChar*)INIT_QT_ROOT_CBF );
+      curCost += m_cCUSigCoeffGroupSCModel.calcCost            ( curSliceType, qp, (UChar*)INIT_SIG_CG_FLAG );
+      curCost += m_cCUSigSCModel.calcCost                      ( curSliceType, qp, (UChar*)INIT_SIG_FLAG );
+      curCost += m_cCuCtxLastX.calcCost                        ( curSliceType, qp, (UChar*)INIT_LAST );
+      curCost += m_cCuCtxLastY.calcCost                        ( curSliceType, qp, (UChar*)INIT_LAST );
+      curCost += m_cCUOneSCModel.calcCost                      ( curSliceType, qp, (UChar*)INIT_ONE_FLAG );
+      curCost += m_cCUAbsSCModel.calcCost                      ( curSliceType, qp, (UChar*)INIT_ABS_FLAG );
+      curCost += m_cMVPIdxSCModel.calcCost                     ( curSliceType, qp, (UChar*)INIT_MVP_IDX );
+      curCost += m_cCUTransSubdivFlagSCModel.calcCost          ( curSliceType, qp, (UChar*)INIT_TRANS_SUBDIV_FLAG );
+      curCost += m_cSaoMergeSCModel.calcCost                   ( curSliceType, qp, (UChar*)INIT_SAO_MERGE_FLAG );
+      curCost += m_cSaoTypeIdxSCModel.calcCost                 ( curSliceType, qp, (UChar*)INIT_SAO_TYPE_IDX );
+      curCost += m_cTransformSkipSCModel.calcCost              ( curSliceType, qp, (UChar*)INIT_TRANSFORMSKIP_FLAG );
+      curCost += m_CUTransquantBypassFlagSCModel.calcCost      ( curSliceType, qp, (UChar*)INIT_CU_TRANSQUANT_BYPASS_FLAG );
+      curCost += m_explicitRdpcmFlagSCModel.calcCost           ( curSliceType, qp, (UChar*)INIT_EXPLICIT_RDPCM_FLAG);
+      curCost += m_explicitRdpcmDirSCModel.calcCost            ( curSliceType, qp, (UChar*)INIT_EXPLICIT_RDPCM_DIR);
+      curCost += m_cCrossComponentPredictionSCModel.calcCost   ( curSliceType, qp, (UChar*)INIT_CROSS_COMPONENT_PREDICTION );
+      curCost += m_ChromaQpAdjFlagSCModel.calcCost             ( curSliceType, qp, (UChar*)INIT_CHROMA_QP_ADJ_FLAG );
+      curCost += m_ChromaQpAdjIdcSCModel.calcCost              ( curSliceType, qp, (UChar*)INIT_CHROMA_QP_ADJ_IDC );
+
+      if (curCost < bestCost)
+      {
+        bestSliceType = curSliceType;
+        bestCost      = curCost;
+      }
+    }
+    m_pcSlice->getPPS()->setEncCABACTableIdx( bestSliceType );
+  }
+  else
+  {
+    m_pcSlice->getPPS()->setEncCABACTableIdx( I_SLICE );
+  }
+}
+
+Void TEncSbac::codeVPS( TComVPS* pcVPS )
+{
+  assert (0);
+  return;
+}
+
+Void TEncSbac::codeSPS( TComSPS* pcSPS )
+{
+  assert (0);
+  return;
+}
+
+Void TEncSbac::codePPS( TComPPS* pcPPS )
+{
+  assert (0);
+  return;
+}
+
+Void TEncSbac::codeSliceHeader( TComSlice* pcSlice )
+{
+  assert (0);
+  return;
+}
+
+Void TEncSbac::codeTilesWPPEntryPoint( TComSlice* pSlice )
+{
+  assert (0);
+  return;
+}
+
+Void TEncSbac::codeTerminatingBit( UInt uilsLast )
+{
+  m_pcBinIf->encodeBinTrm( uilsLast );
+}
+
+Void TEncSbac::codeSliceFinish()
+{
+  m_pcBinIf->finish();
+}
+
+Void TEncSbac::xWriteUnarySymbol( UInt uiSymbol, ContextModel* pcSCModel, Int iOffset )
+{
+  m_pcBinIf->encodeBin( uiSymbol ? 1 : 0, pcSCModel[0] );
+
+  if( 0 == uiSymbol)
+  {
+    return;
+  }
+
+  while( uiSymbol-- )
+  {
+    m_pcBinIf->encodeBin( uiSymbol ? 1 : 0, pcSCModel[ iOffset ] );
+  }
+
+  return;
+}
+
+Void TEncSbac::xWriteUnaryMaxSymbol( UInt uiSymbol, ContextModel* pcSCModel, Int iOffset, UInt uiMaxSymbol )
+{
+  if (uiMaxSymbol == 0)
+  {
+    return;
+  }
+
+  m_pcBinIf->encodeBin( uiSymbol ? 1 : 0, pcSCModel[ 0 ] );
+
+  if ( uiSymbol == 0 )
+  {
+    return;
+  }
+
+  Bool bCodeLast = ( uiMaxSymbol > uiSymbol );
+
+  while( --uiSymbol )
+  {
+    m_pcBinIf->encodeBin( 1, pcSCModel[ iOffset ] );
+  }
+  if( bCodeLast )
+  {
+    m_pcBinIf->encodeBin( 0, pcSCModel[ iOffset ] );
+  }
+
+  return;
+}
+
+Void TEncSbac::xWriteEpExGolomb( UInt uiSymbol, UInt uiCount )
+{
+  UInt bins = 0;
+  Int numBins = 0;
+
+  while( uiSymbol >= (UInt)(1<<uiCount) )
+  {
+    bins = 2 * bins + 1;
+    numBins++;
+    uiSymbol -= 1 << uiCount;
+    uiCount  ++;
+  }
+  bins = 2 * bins + 0;
+  numBins++;
+
+  bins = (bins << uiCount) | uiSymbol;
+  numBins += uiCount;
+
+  assert( numBins <= 32 );
+  m_pcBinIf->encodeBinsEP( bins, numBins );
+}
+
+
+/** Coding of coeff_abs_level_minus3
+ * \param uiSymbol value of coeff_abs_level_minus3
+ * \param ruiGoRiceParam reference to Rice parameter
+ * \returns Void
+ */
+Void TEncSbac::xWriteCoefRemainExGolomb ( UInt symbol, UInt &rParam, const Bool useLimitedPrefixLength, const ChannelType channelType )
+{
+  Int codeNumber  = (Int)symbol;
+  UInt length;
+
+  if (codeNumber < (COEF_REMAIN_BIN_REDUCTION << rParam))
+  {
+    length = codeNumber>>rParam;
+    m_pcBinIf->encodeBinsEP( (1<<(length+1))-2 , length+1);
+    m_pcBinIf->encodeBinsEP((codeNumber%(1<<rParam)),rParam);
+  }
+  else if (useLimitedPrefixLength)
+  {
+    const UInt maximumPrefixLength = (32 - (COEF_REMAIN_BIN_REDUCTION + g_maxTrDynamicRange[channelType]));
+
+    UInt prefixLength = 0;
+    UInt suffixLength = MAX_UINT;
+    UInt codeValue    = (symbol >> rParam) - COEF_REMAIN_BIN_REDUCTION;
+
+    if (codeValue >= ((1 << maximumPrefixLength) - 1))
+    {
+      prefixLength = maximumPrefixLength;
+      suffixLength = g_maxTrDynamicRange[channelType] - rParam;
+    }
+    else
+    {
+      while (codeValue > ((2 << prefixLength) - 2))
+      {
+        prefixLength++;
+      }
+
+      suffixLength = prefixLength + 1; //+1 for the separator bit
+    }
+
+    const UInt suffix = codeValue - ((1 << prefixLength) - 1);
+
+    const UInt totalPrefixLength = prefixLength + COEF_REMAIN_BIN_REDUCTION;
+    const UInt prefix            = (1 << totalPrefixLength) - 1;
+    const UInt rParamBitMask     = (1 << rParam) - 1;
+
+    m_pcBinIf->encodeBinsEP(  prefix,                                        totalPrefixLength      ); //prefix
+    m_pcBinIf->encodeBinsEP(((suffix << rParam) | (symbol & rParamBitMask)), (suffixLength + rParam)); //separator, suffix, and rParam bits
+  }
+  else
+  {
+    length = rParam;
+    codeNumber  = codeNumber - ( COEF_REMAIN_BIN_REDUCTION << rParam);
+
+    while (codeNumber >= (1<<length))
+    {
+      codeNumber -=  (1<<(length++));
+    }
+
+    m_pcBinIf->encodeBinsEP((1<<(COEF_REMAIN_BIN_REDUCTION+length+1-rParam))-2,COEF_REMAIN_BIN_REDUCTION+length+1-rParam);
+    m_pcBinIf->encodeBinsEP(codeNumber,length);
+  }
+}
+
+// SBAC RD
+Void  TEncSbac::load ( const TEncSbac* pSrc)
+{
+  this->xCopyFrom(pSrc);
+}
+
+Void  TEncSbac::loadIntraDirMode( const TEncSbac* pSrc, const ChannelType chType )
+{
+  m_pcBinIf->copyState( pSrc->m_pcBinIf );
+  if (isLuma(chType))
+    this->m_cCUIntraPredSCModel      .copyFrom( &pSrc->m_cCUIntraPredSCModel       );
+  else
+    this->m_cCUChromaPredSCModel     .copyFrom( &pSrc->m_cCUChromaPredSCModel      );
+}
+
+
+Void  TEncSbac::store( TEncSbac* pDest) const
+{
+  pDest->xCopyFrom( this );
+}
+
+
+Void TEncSbac::xCopyFrom( const TEncSbac* pSrc )
+{
+  m_pcBinIf->copyState( pSrc->m_pcBinIf );
+  xCopyContextsFrom(pSrc);
+}
+
+Void TEncSbac::codeMVPIdx ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  Int iSymbol = pcCU->getMVPIdx(eRefList, uiAbsPartIdx);
+  Int iNum = AMVP_MAX_NUM_CANDS;
+
+  xWriteUnaryMaxSymbol(iSymbol, m_cMVPIdxSCModel.get(0), 1, iNum-1);
+}
+
+Void TEncSbac::codePartSize( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  PartSize eSize         = pcCU->getPartitionSize( uiAbsPartIdx );
+
+  if ( pcCU->isIntra( uiAbsPartIdx ) )
+  {
+    if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth )
+    {
+      m_pcBinIf->encodeBin( eSize == SIZE_2Nx2N? 1 : 0, m_cCUPartSizeSCModel.get( 0, 0, 0 ) );
+    }
+    return;
+  }
+
+  switch(eSize)
+  {
+    case SIZE_2Nx2N:
+    {
+      m_pcBinIf->encodeBin( 1, m_cCUPartSizeSCModel.get( 0, 0, 0) );
+      break;
+    }
+    case SIZE_2NxN:
+    case SIZE_2NxnU:
+    case SIZE_2NxnD:
+    {
+      m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 0) );
+      m_pcBinIf->encodeBin( 1, m_cCUPartSizeSCModel.get( 0, 0, 1) );
+      if ( pcCU->getSlice()->getSPS()->getAMPAcc( uiDepth ) )
+      {
+        if (eSize == SIZE_2NxN)
+        {
+          m_pcBinIf->encodeBin(1, m_cCUPartSizeSCModel.get( 0, 0, 3 ));
+        }
+        else
+        {
+          m_pcBinIf->encodeBin(0, m_cCUPartSizeSCModel.get( 0, 0, 3 ));
+          m_pcBinIf->encodeBinEP((eSize == SIZE_2NxnU? 0: 1));
+        }
+      }
+      break;
+    }
+    case SIZE_Nx2N:
+    case SIZE_nLx2N:
+    case SIZE_nRx2N:
+    {
+      m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 0) );
+      m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 1) );
+
+      if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth && !( pcCU->getWidth(uiAbsPartIdx) == 8 && pcCU->getHeight(uiAbsPartIdx) == 8 ) )
+      {
+        m_pcBinIf->encodeBin( 1, m_cCUPartSizeSCModel.get( 0, 0, 2) );
+      }
+
+      if ( pcCU->getSlice()->getSPS()->getAMPAcc( uiDepth ) )
+      {
+        if (eSize == SIZE_Nx2N)
+        {
+          m_pcBinIf->encodeBin(1, m_cCUPartSizeSCModel.get( 0, 0, 3 ));
+        }
+        else
+        {
+          m_pcBinIf->encodeBin(0, m_cCUPartSizeSCModel.get( 0, 0, 3 ));
+          m_pcBinIf->encodeBinEP((eSize == SIZE_nLx2N? 0: 1));
+        }
+      }
+      break;
+    }
+    case SIZE_NxN:
+    {
+      if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth && !( pcCU->getWidth(uiAbsPartIdx) == 8 && pcCU->getHeight(uiAbsPartIdx) == 8 ) )
+      {
+        m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 0) );
+        m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 1) );
+        m_pcBinIf->encodeBin( 0, m_cCUPartSizeSCModel.get( 0, 0, 2) );
+      }
+      break;
+    }
+    default:
+    {
+      assert(0);
+      break;
+    }
+  }
+}
+
+
+/** code prediction mode
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \returns Void
+ */
+Void TEncSbac::codePredMode( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  // get context function is here
+  m_pcBinIf->encodeBin( pcCU->isIntra( uiAbsPartIdx ) ? 1 : 0, m_cCUPredModeSCModel.get( 0, 0, 0 ) );
+}
+
+Void TEncSbac::codeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  UInt uiSymbol = pcCU->getCUTransquantBypass(uiAbsPartIdx);
+  m_pcBinIf->encodeBin( uiSymbol, m_CUTransquantBypassFlagSCModel.get( 0, 0, 0 ) );
+}
+
+/** code skip flag
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \returns Void
+ */
+Void TEncSbac::codeSkipFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  // get context function is here
+  UInt uiSymbol = pcCU->isSkipped( uiAbsPartIdx ) ? 1 : 0;
+  UInt uiCtxSkip = pcCU->getCtxSkipFlag( uiAbsPartIdx ) ;
+  m_pcBinIf->encodeBin( uiSymbol, m_cCUSkipFlagSCModel.get( 0, 0, uiCtxSkip ) );
+  DTRACE_CABAC_VL( g_nSymbolCounter++ );
+  DTRACE_CABAC_T( "\tSkipFlag" );
+  DTRACE_CABAC_T( "\tuiCtxSkip: ");
+  DTRACE_CABAC_V( uiCtxSkip );
+  DTRACE_CABAC_T( "\tuiSymbol: ");
+  DTRACE_CABAC_V( uiSymbol );
+  DTRACE_CABAC_T( "\n");
+}
+
+/** code merge flag
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \returns Void
+ */
+Void TEncSbac::codeMergeFlag( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  const UInt uiSymbol = pcCU->getMergeFlag( uiAbsPartIdx ) ? 1 : 0;
+  m_pcBinIf->encodeBin( uiSymbol, *m_cCUMergeFlagExtSCModel.get( 0 ) );
+
+  DTRACE_CABAC_VL( g_nSymbolCounter++ );
+  DTRACE_CABAC_T( "\tMergeFlag: " );
+  DTRACE_CABAC_V( uiSymbol );
+  DTRACE_CABAC_T( "\tAddress: " );
+  DTRACE_CABAC_V( pcCU->getCtuRsAddr() );
+  DTRACE_CABAC_T( "\tuiAbsPartIdx: " );
+  DTRACE_CABAC_V( uiAbsPartIdx );
+  DTRACE_CABAC_T( "\n" );
+}
+
+/** code merge index
+ * \param pcCU
+ * \param uiAbsPartIdx
+ * \returns Void
+ */
+Void TEncSbac::codeMergeIndex( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  UInt uiUnaryIdx = pcCU->getMergeIndex( uiAbsPartIdx );
+  UInt uiNumCand = pcCU->getSlice()->getMaxNumMergeCand();
+  if ( uiNumCand > 1 )
+  {
+    for( UInt ui = 0; ui < uiNumCand - 1; ++ui )
+    {
+      const UInt uiSymbol = ui == uiUnaryIdx ? 0 : 1;
+      if ( ui==0 )
+      {
+        m_pcBinIf->encodeBin( uiSymbol, m_cCUMergeIdxExtSCModel.get( 0, 0, 0 ) );
+      }
+      else
+      {
+        m_pcBinIf->encodeBinEP( uiSymbol );
+      }
+      if( uiSymbol == 0 )
+      {
+        break;
+      }
+    }
+  }
+  DTRACE_CABAC_VL( g_nSymbolCounter++ );
+  DTRACE_CABAC_T( "\tparseMergeIndex()" );
+  DTRACE_CABAC_T( "\tuiMRGIdx= " );
+  DTRACE_CABAC_V( pcCU->getMergeIndex( uiAbsPartIdx ) );
+  DTRACE_CABAC_T( "\n" );
+}
+
+Void TEncSbac::codeSplitFlag   ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth )
+{
+  if( uiDepth == g_uiMaxCUDepth - g_uiAddCUDepth )
+    return;
+
+  UInt uiCtx           = pcCU->getCtxSplitFlag( uiAbsPartIdx, uiDepth );
+  UInt uiCurrSplitFlag = ( pcCU->getDepth( uiAbsPartIdx ) > uiDepth ) ? 1 : 0;
+
+  assert( uiCtx < 3 );
+  m_pcBinIf->encodeBin( uiCurrSplitFlag, m_cCUSplitFlagSCModel.get( 0, 0, uiCtx ) );
+  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  DTRACE_CABAC_T( "\tSplitFlag\n" )
+  return;
+}
+
+Void TEncSbac::codeTransformSubdivFlag( UInt uiSymbol, UInt uiCtx )
+{
+  m_pcBinIf->encodeBin( uiSymbol, m_cCUTransSubdivFlagSCModel.get( 0, 0, uiCtx ) );
+  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  DTRACE_CABAC_T( "\tparseTransformSubdivFlag()" )
+  DTRACE_CABAC_T( "\tsymbol=" )
+  DTRACE_CABAC_V( uiSymbol )
+  DTRACE_CABAC_T( "\tctx=" )
+  DTRACE_CABAC_V( uiCtx )
+  DTRACE_CABAC_T( "\n" )
+}
+
+
+Void TEncSbac::codeIntraDirLumaAng( TComDataCU* pcCU, UInt absPartIdx, Bool isMultiple)
+{
+  UInt dir[4],j;
+  Int preds[4][NUM_MOST_PROBABLE_MODES] = {{-1, -1, -1},{-1, -1, -1},{-1, -1, -1},{-1, -1, -1}};
+  Int predNum[4], predIdx[4] ={ -1,-1,-1,-1};
+  PartSize mode = pcCU->getPartitionSize( absPartIdx );
+  UInt partNum = isMultiple?(mode==SIZE_NxN?4:1):1;
+  UInt partOffset = ( pcCU->getPic()->getNumPartitionsInCtu() >> ( pcCU->getDepth(absPartIdx) << 1 ) ) >> 2;
+  for (j=0;j<partNum;j++)
+  {
+    dir[j] = pcCU->getIntraDir( CHANNEL_TYPE_LUMA, absPartIdx+partOffset*j );
+    predNum[j] = pcCU->getIntraDirPredictor(absPartIdx+partOffset*j, preds[j], COMPONENT_Y);
+    for(UInt i = 0; i < predNum[j]; i++)
+    {
+      if(dir[j] == preds[j][i])
+      {
+        predIdx[j] = i;
+      }
+    }
+    m_pcBinIf->encodeBin((predIdx[j] != -1)? 1 : 0, m_cCUIntraPredSCModel.get( 0, 0, 0 ) );
+  }
+  for (j=0;j<partNum;j++)
+  {
+    if(predIdx[j] != -1)
+    {
+      m_pcBinIf->encodeBinEP( predIdx[j] ? 1 : 0 );
+      if (predIdx[j])
+      {
+        m_pcBinIf->encodeBinEP( predIdx[j]-1 );
+      }
+    }
+    else
+    {
+      assert(predNum[j]>=3); // It is currently always 3!
+      if (preds[j][0] > preds[j][1])
+      {
+        std::swap(preds[j][0], preds[j][1]);
+      }
+      if (preds[j][0] > preds[j][2])
+      {
+        std::swap(preds[j][0], preds[j][2]);
+      }
+      if (preds[j][1] > preds[j][2])
+      {
+        std::swap(preds[j][1], preds[j][2]);
+      }
+      for(Int i = (predNum[j] - 1); i >= 0; i--)
+      {
+        dir[j] = dir[j] > preds[j][i] ? dir[j] - 1 : dir[j];
+      }
+      m_pcBinIf->encodeBinsEP( dir[j], 5 );
+    }
+  }
+  return;
+}
+
+Void TEncSbac::codeIntraDirChroma( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  UInt uiIntraDirChroma = pcCU->getIntraDir( CHANNEL_TYPE_CHROMA, uiAbsPartIdx );
+
+  if( uiIntraDirChroma == DM_CHROMA_IDX )
+  {
+    m_pcBinIf->encodeBin( 0, m_cCUChromaPredSCModel.get( 0, 0, 0 ) );
+  }
+  else
+  {
+    m_pcBinIf->encodeBin( 1, m_cCUChromaPredSCModel.get( 0, 0, 0 ) );
+
+    UInt uiAllowedChromaDir[ NUM_CHROMA_MODE ];
+    pcCU->getAllowedChromaDir( uiAbsPartIdx, uiAllowedChromaDir );
+
+    for( Int i = 0; i < NUM_CHROMA_MODE - 1; i++ )
+    {
+      if( uiIntraDirChroma == uiAllowedChromaDir[i] )
+      {
+        uiIntraDirChroma = i;
+        break;
+      }
+    }
+
+    m_pcBinIf->encodeBinsEP( uiIntraDirChroma, 2 );
+  }
+
+  return;
+}
+
+
+Void TEncSbac::codeInterDir( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  const UInt uiInterDir = pcCU->getInterDir( uiAbsPartIdx ) - 1;
+  const UInt uiCtx      = pcCU->getCtxInterDir( uiAbsPartIdx );
+  ContextModel *pCtx    = m_cCUInterDirSCModel.get( 0 );
+
+  if (pcCU->getPartitionSize(uiAbsPartIdx) == SIZE_2Nx2N || pcCU->getHeight(uiAbsPartIdx) != 8 )
+  {
+    m_pcBinIf->encodeBin( uiInterDir == 2 ? 1 : 0, *( pCtx + uiCtx ) );
+  }
+
+  if (uiInterDir < 2)
+  {
+    m_pcBinIf->encodeBin( uiInterDir, *( pCtx + 4 ) );
+  }
+
+  return;
+}
+
+Void TEncSbac::codeRefFrmIdx( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  Int iRefFrame = pcCU->getCUMvField( eRefList )->getRefIdx( uiAbsPartIdx );
+  ContextModel *pCtx = m_cCURefPicSCModel.get( 0 );
+  m_pcBinIf->encodeBin( ( iRefFrame == 0 ? 0 : 1 ), *pCtx );
+
+  if( iRefFrame > 0 )
+  {
+    UInt uiRefNum = pcCU->getSlice()->getNumRefIdx( eRefList ) - 2;
+    pCtx++;
+    iRefFrame--;
+    for( UInt ui = 0; ui < uiRefNum; ++ui )
+    {
+      const UInt uiSymbol = ui == iRefFrame ? 0 : 1;
+      if( ui == 0 )
+      {
+        m_pcBinIf->encodeBin( uiSymbol, *pCtx );
+      }
+      else
+      {
+        m_pcBinIf->encodeBinEP( uiSymbol );
+      }
+      if( uiSymbol == 0 )
+      {
+        break;
+      }
+    }
+  }
+  return;
+}
+
+Void TEncSbac::codeMvd( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList )
+{
+  if(pcCU->getSlice()->getMvdL1ZeroFlag() && eRefList == REF_PIC_LIST_1 && pcCU->getInterDir(uiAbsPartIdx)==3)
+  {
+    return;
+  }
+
+  const TComCUMvField* pcCUMvField = pcCU->getCUMvField( eRefList );
+  const Int iHor = pcCUMvField->getMvd( uiAbsPartIdx ).getHor();
+  const Int iVer = pcCUMvField->getMvd( uiAbsPartIdx ).getVer();
+  ContextModel* pCtx = m_cCUMvdSCModel.get( 0 );
+
+  m_pcBinIf->encodeBin( iHor != 0 ? 1 : 0, *pCtx );
+  m_pcBinIf->encodeBin( iVer != 0 ? 1 : 0, *pCtx );
+
+  const Bool bHorAbsGr0 = iHor != 0;
+  const Bool bVerAbsGr0 = iVer != 0;
+  const UInt uiHorAbs   = 0 > iHor ? -iHor : iHor;
+  const UInt uiVerAbs   = 0 > iVer ? -iVer : iVer;
+  pCtx++;
+
+  if( bHorAbsGr0 )
+  {
+    m_pcBinIf->encodeBin( uiHorAbs > 1 ? 1 : 0, *pCtx );
+  }
+
+  if( bVerAbsGr0 )
+  {
+    m_pcBinIf->encodeBin( uiVerAbs > 1 ? 1 : 0, *pCtx );
+  }
+
+  if( bHorAbsGr0 )
+  {
+    if( uiHorAbs > 1 )
+    {
+      xWriteEpExGolomb( uiHorAbs-2, 1 );
+    }
+
+    m_pcBinIf->encodeBinEP( 0 > iHor ? 1 : 0 );
+  }
+
+  if( bVerAbsGr0 )
+  {
+    if( uiVerAbs > 1 )
+    {
+      xWriteEpExGolomb( uiVerAbs-2, 1 );
+    }
+
+    m_pcBinIf->encodeBinEP( 0 > iVer ? 1 : 0 );
+  }
+
+  return;
+}
+
+Void TEncSbac::codeCrossComponentPrediction( TComTU &rTu, ComponentID compID )
+{
+  TComDataCU *pcCU = rTu.getCU();
+
+  if( isLuma(compID) || !pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction() ) return;
+
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+
+  if (!pcCU->isIntra(uiAbsPartIdx) || (pcCU->getIntraDir( CHANNEL_TYPE_CHROMA, uiAbsPartIdx ) == DM_CHROMA_IDX))
+  {
+    DTRACE_CABAC_VL( g_nSymbolCounter++ )
+    DTRACE_CABAC_T("\tparseCrossComponentPrediction()")
+    DTRACE_CABAC_T( "\tAddr=" )
+    DTRACE_CABAC_V( compID )
+    DTRACE_CABAC_T( "\tuiAbsPartIdx=" )
+    DTRACE_CABAC_V( uiAbsPartIdx )
+
+    Int alpha = pcCU->getCrossComponentPredictionAlpha( uiAbsPartIdx, compID );
+    ContextModel *pCtx = m_cCrossComponentPredictionSCModel.get(0, 0) + ((compID == COMPONENT_Cr) ? (NUM_CROSS_COMPONENT_PREDICTION_CTX >> 1) : 0);
+    m_pcBinIf->encodeBin(((alpha != 0) ? 1 : 0), pCtx[0]);
+
+    if (alpha != 0)
+    {
+      static const Int log2AbsAlphaMinus1Table[8] = { 0, 1, 1, 2, 2, 2, 3, 3 };
+      assert(abs(alpha) <= 8);
+
+      if (abs(alpha)>1)
+      {
+        m_pcBinIf->encodeBin(1, pCtx[1]);
+        xWriteUnaryMaxSymbol( log2AbsAlphaMinus1Table[abs(alpha) - 1] - 1, (pCtx + 2), 1, 2 );
+      }
+      else
+      {
+        m_pcBinIf->encodeBin(0, pCtx[1]);
+      }
+      m_pcBinIf->encodeBin( ((alpha < 0) ? 1 : 0), pCtx[4] );
+    }
+    DTRACE_CABAC_T( "\tAlpha=" )
+    DTRACE_CABAC_V( pcCU->getCrossComponentPredictionAlpha( uiAbsPartIdx, compID ) )
+    DTRACE_CABAC_T( "\n" )
+  }
+}
+
+Void TEncSbac::codeDeltaQP( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  Int iDQp  = pcCU->getQP( uiAbsPartIdx ) - pcCU->getRefQP( uiAbsPartIdx );
+
+  Int qpBdOffsetY =  pcCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA);
+  iDQp = (iDQp + 78 + qpBdOffsetY + (qpBdOffsetY/2)) % (52 + qpBdOffsetY) - 26 - (qpBdOffsetY/2);
+
+  UInt uiAbsDQp = (UInt)((iDQp > 0)? iDQp  : (-iDQp));
+  UInt TUValue = min((Int)uiAbsDQp, CU_DQP_TU_CMAX);
+  xWriteUnaryMaxSymbol( TUValue, &m_cCUDeltaQpSCModel.get( 0, 0, 0 ), 1, CU_DQP_TU_CMAX);
+  if( uiAbsDQp >= CU_DQP_TU_CMAX )
+  {
+    xWriteEpExGolomb( uiAbsDQp - CU_DQP_TU_CMAX, CU_DQP_EG_k );
+  }
+
+  if ( uiAbsDQp > 0)
+  {
+    UInt uiSign = (iDQp > 0 ? 0 : 1);
+    m_pcBinIf->encodeBinEP(uiSign);
+  }
+
+  return;
+}
+
+/** code chroma qp adjustment, converting from the internal table representation
+ * \returns Void
+ */
+Void TEncSbac::codeChromaQpAdjustment( TComDataCU* cu, UInt absPartIdx )
+{
+  Int internalIdc = cu->getChromaQpAdj( absPartIdx );
+  Int tableSize = cu->getSlice()->getPPS()->getChromaQpAdjTableSize();
+  /* internal_idc == 0 => flag = 0
+   * internal_idc > 1 => code idc value (if table size warrents) */
+  m_pcBinIf->encodeBin( internalIdc > 0, m_ChromaQpAdjFlagSCModel.get( 0, 0, 0 ) );
+
+  if (internalIdc > 0 && tableSize > 1)
+  {
+    xWriteUnaryMaxSymbol( internalIdc - 1, &m_ChromaQpAdjIdcSCModel.get( 0, 0, 0 ), 0, tableSize - 1 );
+  }
+}
+
+Void TEncSbac::codeQtCbf( TComTU &rTu, const ComponentID compID, const Bool lowestLevel )
+{
+  TComDataCU* pcCU = rTu.getCU();
+
+  const UInt absPartIdx   = rTu.GetAbsPartIdxTU(compID);
+  const UInt TUDepth      = rTu.GetTransformDepthRel();
+        UInt uiCtx        = pcCU->getCtxQtCbf( rTu, toChannelType(compID) );
+  const UInt contextSet   = toChannelType(compID);
+
+  const UInt width        = rTu.getRect(compID).width;
+  const UInt height       = rTu.getRect(compID).height;
+  const Bool canQuadSplit = (width >= (MIN_TU_SIZE * 2)) && (height >= (MIN_TU_SIZE * 2));
+
+  //             Since the CBF for chroma is coded at the highest level possible, if sub-TUs are
+  //             to be coded for a 4x8 chroma TU, their CBFs must be coded at the highest 4x8 level
+  //             (i.e. where luma TUs are 8x8 rather than 4x4)
+  //    ___ ___
+  //   |   |   | <- 4 x (8x8 luma + 4x8 4:2:2 chroma)
+  //   |___|___|    each quadrant has its own chroma CBF
+  //   |   |   | _ _ _ _
+  //   |___|___|        |
+  //   <--16--->        V
+  //                   _ _
+  //                  |_|_| <- 4 x 4x4 luma + 1 x 4x8 4:2:2 chroma
+  //                  |_|_|    no chroma CBF is coded - instead the parent CBF is inherited
+  //                  <-8->    if sub-TUs are present, their CBFs had to be coded at the parent level
+
+  const UInt lowestTUDepth = TUDepth + ((!lowestLevel && !canQuadSplit) ? 1 : 0); //unsplittable TUs inherit their parent's CBF
+
+  if ((width != height) && (lowestLevel || !canQuadSplit)) //if sub-TUs are present
+  {
+    const UInt subTUDepth        = lowestTUDepth + 1;                      //if this is the lowest level of the TU-tree, the sub-TUs are directly below. Otherwise, this must be the level above the lowest level (as specified above)
+    const UInt partIdxesPerSubTU = rTu.GetAbsPartIdxNumParts(compID) >> 1;
+
+    for (UInt subTU = 0; subTU < 2; subTU++)
+    {
+      const UInt subTUAbsPartIdx = absPartIdx + (subTU * partIdxesPerSubTU);
+      const UInt uiCbf           = pcCU->getCbf(subTUAbsPartIdx, compID, subTUDepth);
+
+      m_pcBinIf->encodeBin(uiCbf, m_cCUQtCbfSCModel.get(0, contextSet, uiCtx));
+
+      DTRACE_CABAC_VL( g_nSymbolCounter++ )
+      DTRACE_CABAC_T( "\tparseQtCbf()" )
+      DTRACE_CABAC_T( "\tsub-TU=" )
+      DTRACE_CABAC_V( subTU )
+      DTRACE_CABAC_T( "\tsymbol=" )
+      DTRACE_CABAC_V( uiCbf )
+      DTRACE_CABAC_T( "\tctx=" )
+      DTRACE_CABAC_V( uiCtx )
+      DTRACE_CABAC_T( "\tetype=" )
+      DTRACE_CABAC_V( compID )
+      DTRACE_CABAC_T( "\tuiAbsPartIdx=" )
+      DTRACE_CABAC_V( subTUAbsPartIdx )
+      DTRACE_CABAC_T( "\n" )
+    }
+  }
+  else
+  {
+    const UInt uiCbf = pcCU->getCbf( absPartIdx, compID, lowestTUDepth );
+    m_pcBinIf->encodeBin( uiCbf , m_cCUQtCbfSCModel.get( 0, contextSet, uiCtx ) );
+
+
+    DTRACE_CABAC_VL( g_nSymbolCounter++ )
+    DTRACE_CABAC_T( "\tparseQtCbf()" )
+    DTRACE_CABAC_T( "\tsymbol=" )
+    DTRACE_CABAC_V( uiCbf )
+    DTRACE_CABAC_T( "\tctx=" )
+    DTRACE_CABAC_V( uiCtx )
+    DTRACE_CABAC_T( "\tetype=" )
+    DTRACE_CABAC_V( compID )
+    DTRACE_CABAC_T( "\tuiAbsPartIdx=" )
+    DTRACE_CABAC_V( rTu.GetAbsPartIdxTU(compID) )
+    DTRACE_CABAC_T( "\n" )
+  }
+}
+
+
+Void TEncSbac::codeTransformSkipFlags (TComTU &rTu, ComponentID component )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+
+  if (pcCU->getCUTransquantBypass(uiAbsPartIdx))
+  {
+    return;
+  }
+
+  if (!TUCompRectHasAssociatedTransformSkipFlag(rTu.getRect(component), pcCU->getSlice()->getPPS()->getTransformSkipLog2MaxSize()))
+  {
+    return;
+  }
+
+  UInt useTransformSkip = pcCU->getTransformSkip( uiAbsPartIdx,component);
+  m_pcBinIf->encodeBin( useTransformSkip, m_cTransformSkipSCModel.get( 0, toChannelType(component), 0 ) );
+
+  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  DTRACE_CABAC_T("\tparseTransformSkip()");
+  DTRACE_CABAC_T( "\tsymbol=" )
+  DTRACE_CABAC_V( useTransformSkip )
+  DTRACE_CABAC_T( "\tAddr=" )
+  DTRACE_CABAC_V( pcCU->getCtuRsAddr() )
+  DTRACE_CABAC_T( "\tetype=" )
+  DTRACE_CABAC_V( component )
+  DTRACE_CABAC_T( "\tuiAbsPartIdx=" )
+  DTRACE_CABAC_V( rTu.GetAbsPartIdxTU() )
+  DTRACE_CABAC_T( "\n" )
+}
+
+
+/** Code I_PCM information.
+ * \param pcCU pointer to CU
+ * \param uiAbsPartIdx CU index
+ * \returns Void
+ */
+Void TEncSbac::codeIPCMInfo( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  UInt uiIPCM = (pcCU->getIPCMFlag(uiAbsPartIdx) == true)? 1 : 0;
+
+  Bool writePCMSampleFlag = pcCU->getIPCMFlag(uiAbsPartIdx);
+
+  m_pcBinIf->encodeBinTrm (uiIPCM);
+
+  if (writePCMSampleFlag)
+  {
+    m_pcBinIf->encodePCMAlignBits();
+
+    const UInt minCoeffSizeY = pcCU->getPic()->getMinCUWidth() * pcCU->getPic()->getMinCUHeight();
+    const UInt offsetY       = minCoeffSizeY * uiAbsPartIdx;
+    for (UInt ch=0; ch < pcCU->getPic()->getNumberValidComponents(); ch++)
+    {
+      const ComponentID compID = ComponentID(ch);
+      const UInt offset = offsetY >> (pcCU->getPic()->getComponentScaleX(compID) + pcCU->getPic()->getComponentScaleY(compID));
+      Pel * pPCMSample  = pcCU->getPCMSample(compID) + offset;
+      const UInt width  = pcCU->getWidth (uiAbsPartIdx) >> pcCU->getPic()->getComponentScaleX(compID);
+      const UInt height = pcCU->getHeight(uiAbsPartIdx) >> pcCU->getPic()->getComponentScaleY(compID);
+      const UInt sampleBits = pcCU->getSlice()->getSPS()->getPCMBitDepth(toChannelType(compID));
+      for (UInt y=0; y<height; y++)
+      {
+        for (UInt x=0; x<width; x++)
+        {
+          UInt sample = pPCMSample[x];
+          m_pcBinIf->xWritePCMCode(sample, sampleBits);
+        }
+        pPCMSample += width;
+      }
+    }
+
+    m_pcBinIf->resetBac();
+  }
+}
+
+Void TEncSbac::codeQtRootCbf( TComDataCU* pcCU, UInt uiAbsPartIdx )
+{
+  UInt uiCbf = pcCU->getQtRootCbf( uiAbsPartIdx );
+  UInt uiCtx = 0;
+  m_pcBinIf->encodeBin( uiCbf , m_cCUQtRootCbfSCModel.get( 0, 0, uiCtx ) );
+  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  DTRACE_CABAC_T( "\tparseQtRootCbf()" )
+  DTRACE_CABAC_T( "\tsymbol=" )
+  DTRACE_CABAC_V( uiCbf )
+  DTRACE_CABAC_T( "\tctx=" )
+  DTRACE_CABAC_V( uiCtx )
+  DTRACE_CABAC_T( "\tuiAbsPartIdx=" )
+  DTRACE_CABAC_V( uiAbsPartIdx )
+  DTRACE_CABAC_T( "\n" )
+}
+
+Void TEncSbac::codeQtCbfZero( TComTU & rTu, const ChannelType chType )
+{
+  // this function is only used to estimate the bits when cbf is 0
+  // and will never be called when writing the bistream. do not need to write log
+  UInt uiCbf = 0;
+  UInt uiCtx = rTu.getCU()->getCtxQtCbf( rTu, chType );
+
+  m_pcBinIf->encodeBin( uiCbf , m_cCUQtCbfSCModel.get( 0, chType, uiCtx ) );
+}
+
+Void TEncSbac::codeQtRootCbfZero( TComDataCU* pcCU )
+{
+  // this function is only used to estimate the bits when cbf is 0
+  // and will never be called when writing the bistream. do not need to write log
+  UInt uiCbf = 0;
+  UInt uiCtx = 0;
+  m_pcBinIf->encodeBin( uiCbf , m_cCUQtRootCbfSCModel.get( 0, 0, uiCtx ) );
+}
+
+/** Encode (X,Y) position of the last significant coefficient
+ * \param uiPosX X component of last coefficient
+ * \param uiPosY Y component of last coefficient
+ * \param width  Block width
+ * \param height Block height
+ * \param eTType plane type / luminance or chrominance
+ * \param uiScanIdx scan type (zig-zag, hor, ver)
+ * This method encodes the X and Y component within a block of the last significant coefficient.
+ */
+Void TEncSbac::codeLastSignificantXY( UInt uiPosX, UInt uiPosY, Int width, Int height, ComponentID component, UInt uiScanIdx )
+{
+  // swap
+  if( uiScanIdx == SCAN_VER )
+  {
+    swap( uiPosX, uiPosY );
+    swap( width,  height );
+  }
+
+  UInt uiCtxLast;
+  UInt uiGroupIdxX    = g_uiGroupIdx[ uiPosX ];
+  UInt uiGroupIdxY    = g_uiGroupIdx[ uiPosY ];
+
+  ContextModel *pCtxX = m_cCuCtxLastX.get( 0, toChannelType(component) );
+  ContextModel *pCtxY = m_cCuCtxLastY.get( 0, toChannelType(component) );
+
+  Int blkSizeOffsetX, blkSizeOffsetY, shiftX, shiftY;
+  getLastSignificantContextParameters(component, width, height, blkSizeOffsetX, blkSizeOffsetY, shiftX, shiftY);
+
+  //------------------
+
+  // posX
+
+  for( uiCtxLast = 0; uiCtxLast < uiGroupIdxX; uiCtxLast++ )
+  {
+    m_pcBinIf->encodeBin( 1, *( pCtxX + blkSizeOffsetX + (uiCtxLast >>shiftX) ) );
+  }
+  if( uiGroupIdxX < g_uiGroupIdx[ width - 1 ])
+  {
+    m_pcBinIf->encodeBin( 0, *( pCtxX + blkSizeOffsetX + (uiCtxLast >>shiftX) ) );
+  }
+
+  // posY
+
+  for( uiCtxLast = 0; uiCtxLast < uiGroupIdxY; uiCtxLast++ )
+  {
+    m_pcBinIf->encodeBin( 1, *( pCtxY + blkSizeOffsetY + (uiCtxLast >>shiftY) ) );
+  }
+  if( uiGroupIdxY < g_uiGroupIdx[ height - 1 ])
+  {
+    m_pcBinIf->encodeBin( 0, *( pCtxY + blkSizeOffsetY + (uiCtxLast >>shiftY) ) );
+  }
+
+  // EP-coded part
+
+  if ( uiGroupIdxX > 3 )
+  {
+    UInt uiCount = ( uiGroupIdxX - 2 ) >> 1;
+    uiPosX       = uiPosX - g_uiMinInGroup[ uiGroupIdxX ];
+    for (Int i = uiCount - 1 ; i >= 0; i-- )
+    {
+      m_pcBinIf->encodeBinEP( ( uiPosX >> i ) & 1 );
+    }
+  }
+  if ( uiGroupIdxY > 3 )
+  {
+    UInt uiCount = ( uiGroupIdxY - 2 ) >> 1;
+    uiPosY       = uiPosY - g_uiMinInGroup[ uiGroupIdxY ];
+    for ( Int i = uiCount - 1 ; i >= 0; i-- )
+    {
+      m_pcBinIf->encodeBinEP( ( uiPosY >> i ) & 1 );
+    }
+  }
+}
+
+
+Void TEncSbac::codeCoeffNxN( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU(compID);
+  const TComRectangle &tuRect=rTu.getRect(compID);
+  const UInt uiWidth=tuRect.width;
+  const UInt uiHeight=tuRect.height;
+
+  DTRACE_CABAC_VL( g_nSymbolCounter++ )
+  DTRACE_CABAC_T( "\tparseCoeffNxN()\teType=" )
+  DTRACE_CABAC_V( compID )
+  DTRACE_CABAC_T( "\twidth=" )
+  DTRACE_CABAC_V( uiWidth )
+  DTRACE_CABAC_T( "\theight=" )
+  DTRACE_CABAC_V( uiHeight )
+  DTRACE_CABAC_T( "\tdepth=" )
+//  DTRACE_CABAC_V( rTu.GetTransformDepthTotalAdj(compID) )
+  DTRACE_CABAC_V( rTu.GetTransformDepthTotal() )
+  DTRACE_CABAC_T( "\tabspartidx=" )
+  DTRACE_CABAC_V( uiAbsPartIdx )
+  DTRACE_CABAC_T( "\ttoCU-X=" )
+  DTRACE_CABAC_V( pcCU->getCUPelX() )
+  DTRACE_CABAC_T( "\ttoCU-Y=" )
+  DTRACE_CABAC_V( pcCU->getCUPelY() )
+  DTRACE_CABAC_T( "\tCU-addr=" )
+  DTRACE_CABAC_V(  pcCU->getCtuRsAddr() )
+  DTRACE_CABAC_T( "\tinCU-X=" )
+//  DTRACE_CABAC_V( g_auiRasterToPelX[ g_auiZscanToRaster[uiAbsPartIdx] ] )
+  DTRACE_CABAC_V( g_auiRasterToPelX[ g_auiZscanToRaster[rTu.GetAbsPartIdxTU(compID)] ] )
+  DTRACE_CABAC_T( "\tinCU-Y=" )
+// DTRACE_CABAC_V( g_auiRasterToPelY[ g_auiZscanToRaster[uiAbsPartIdx] ] )
+  DTRACE_CABAC_V( g_auiRasterToPelY[ g_auiZscanToRaster[rTu.GetAbsPartIdxTU(compID)] ] )
+  DTRACE_CABAC_T( "\tpredmode=" )
+  DTRACE_CABAC_V(  pcCU->getPredictionMode( uiAbsPartIdx ) )
+  DTRACE_CABAC_T( "\n" )
+
+  //--------------------------------------------------------------------------------------------------
+
+  if( uiWidth > m_pcSlice->getSPS()->getMaxTrSize() )
+  {
+    std::cerr << "ERROR: codeCoeffNxN was passed a TU with dimensions larger than the maximum allowed size" << std::endl;
+    assert(false);
+    exit(1);
+  }
+
+  // compute number of significant coefficients
+  UInt uiNumSig = TEncEntropy::countNonZeroCoeffs(pcCoef, uiWidth * uiHeight);
+
+  if ( uiNumSig == 0 )
+  {
+    std::cerr << "ERROR: codeCoeffNxN called for empty TU!" << std::endl;
+    assert(false);
+    exit(1);
+  }
+
+  //--------------------------------------------------------------------------------------------------
+
+  //set parameters
+
+  const ChannelType  chType            = toChannelType(compID);
+  const UInt         uiLog2BlockWidth  = g_aucConvertToBit[ uiWidth  ] + 2;
+  const UInt         uiLog2BlockHeight = g_aucConvertToBit[ uiHeight ] + 2;
+
+  const ChannelType  channelType       = toChannelType(compID);
+  const Bool         extendedPrecision = pcCU->getSlice()->getSPS()->getUseExtendedPrecision();
+
+  const Bool         alignCABACBeforeBypass = pcCU->getSlice()->getSPS()->getAlignCABACBeforeBypass();
+
+  Bool beValid;
+
+  {
+    Int uiIntraMode = -1;
+    const Bool       bIsLuma = isLuma(compID);
+    Int isIntra = pcCU->isIntra(uiAbsPartIdx) ? 1 : 0;
+    if ( isIntra )
+    {
+      uiIntraMode = pcCU->getIntraDir( toChannelType(compID), uiAbsPartIdx );
+
+      uiIntraMode = (uiIntraMode==DM_CHROMA_IDX && !bIsLuma) ? pcCU->getIntraDir(CHANNEL_TYPE_LUMA, getChromasCorrespondingPULumaIdx(uiAbsPartIdx, rTu.GetChromaFormat())) : uiIntraMode;
+      uiIntraMode = ((rTu.GetChromaFormat() == CHROMA_422) && !bIsLuma) ? g_chroma422IntraAngleMappingTable[uiIntraMode] : uiIntraMode;
+    }
+
+    Int transformSkip = pcCU->getTransformSkip( uiAbsPartIdx,compID) ? 1 : 0;
+    Bool rdpcm_lossy = ( transformSkip && isIntra && ( (uiIntraMode == HOR_IDX) || (uiIntraMode == VER_IDX) ) ) && pcCU->isRDPCMEnabled(uiAbsPartIdx);
+
+    if ( (pcCU->getCUTransquantBypass(uiAbsPartIdx)) || rdpcm_lossy )
+    {
+      beValid = false;
+      if ( (!pcCU->isIntra(uiAbsPartIdx)) && pcCU->isRDPCMEnabled(uiAbsPartIdx))
+        codeExplicitRdpcmMode( rTu, compID);
+    }
+    else
+    {
+      beValid = pcCU->getSlice()->getPPS()->getSignHideFlag() > 0;
+    }
+  }
+
+  //--------------------------------------------------------------------------------------------------
+
+  if(pcCU->getSlice()->getPPS()->getUseTransformSkip())
+  {
+    codeTransformSkipFlags(rTu, compID);
+    if(pcCU->getTransformSkip(uiAbsPartIdx, compID) && !pcCU->isIntra(uiAbsPartIdx) && pcCU->isRDPCMEnabled(uiAbsPartIdx))
+    {
+      //  This TU has coefficients and is transform skipped. Check whether is inter coded and if yes encode the explicit RDPCM mode
+      codeExplicitRdpcmMode( rTu, compID);
+
+      if(pcCU->getExplicitRdpcmMode(compID, uiAbsPartIdx) != RDPCM_OFF)
+      {
+        //  Sign data hiding is avoided for horizontal and vertical explicit RDPCM modes
+        beValid = false;
+      }
+    }
+  }
+
+  //--------------------------------------------------------------------------------------------------
+
+  const Bool  bUseGolombRiceParameterAdaptation = pcCU->getSlice()->getSPS()->getUseGolombRiceParameterAdaptation();
+        UInt &currentGolombRiceStatistic        = m_golombRiceAdaptationStatistics[rTu.getGolombRiceStatisticsIndex(compID)];
+
+  //select scans
+  TUEntropyCodingParameters codingParameters;
+  getTUEntropyCodingParameters(codingParameters, rTu, compID);
+
+  //----- encode significance map -----
+
+  // Find position of last coefficient
+  Int scanPosLast = -1;
+  Int posLast;
+
+
+  UInt uiSigCoeffGroupFlag[ MLS_GRP_NUM ];
+
+  memset( uiSigCoeffGroupFlag, 0, sizeof(UInt) * MLS_GRP_NUM );
+  do
+  {
+    posLast = codingParameters.scan[ ++scanPosLast ];
+
+    if( pcCoef[ posLast ] != 0 )
+    {
+      // get L1 sig map
+      UInt uiPosY   = posLast >> uiLog2BlockWidth;
+      UInt uiPosX   = posLast - ( uiPosY << uiLog2BlockWidth );
+
+      UInt uiBlkIdx = (codingParameters.widthInGroups * (uiPosY >> MLS_CG_LOG2_HEIGHT)) + (uiPosX >> MLS_CG_LOG2_WIDTH);
+      uiSigCoeffGroupFlag[ uiBlkIdx ] = 1;
+
+      uiNumSig--;
+    }
+  }
+  while ( uiNumSig > 0 );
+
+  // Code position of last coefficient
+  Int posLastY = posLast >> uiLog2BlockWidth;
+  Int posLastX = posLast - ( posLastY << uiLog2BlockWidth );
+  codeLastSignificantXY(posLastX, posLastY, uiWidth, uiHeight, compID, codingParameters.scanType);
+
+  //===== code significance flag =====
+  ContextModel * const baseCoeffGroupCtx = m_cCUSigCoeffGroupSCModel.get( 0, chType );
+  ContextModel * const baseCtx = m_cCUSigSCModel.get( 0, 0 ) + getSignificanceMapContextOffset(compID);
+
+  const Int  iLastScanSet  = scanPosLast >> MLS_CG_SIZE;
+
+  UInt c1                  = 1;
+  UInt uiGoRiceParam       = 0;
+  Int  iScanPosSig         = scanPosLast;
+
+  for( Int iSubSet = iLastScanSet; iSubSet >= 0; iSubSet-- )
+  {
+    Int numNonZero = 0;
+    Int  iSubPos   = iSubSet << MLS_CG_SIZE;
+    uiGoRiceParam  = currentGolombRiceStatistic / RExt__GOLOMB_RICE_INCREMENT_DIVISOR;
+    Bool updateGolombRiceStatistics = bUseGolombRiceParameterAdaptation; //leave the statistics at 0 when not using the adaptation system
+    UInt coeffSigns = 0;
+
+    Int absCoeff[1 << MLS_CG_SIZE];
+
+    Int lastNZPosInCG  = -1;
+    Int firstNZPosInCG = 1 << MLS_CG_SIZE;
+
+    Bool escapeDataPresentInGroup = false;
+
+    if( iScanPosSig == scanPosLast )
+    {
+      absCoeff[ 0 ] = Int(abs( pcCoef[ posLast ] ));
+      coeffSigns    = ( pcCoef[ posLast ] < 0 );
+      numNonZero    = 1;
+      lastNZPosInCG  = iScanPosSig;
+      firstNZPosInCG = iScanPosSig;
+      iScanPosSig--;
+    }
+
+    // encode significant_coeffgroup_flag
+    Int iCGBlkPos = codingParameters.scanCG[ iSubSet ];
+    Int iCGPosY   = iCGBlkPos / codingParameters.widthInGroups;
+    Int iCGPosX   = iCGBlkPos - (iCGPosY * codingParameters.widthInGroups);
+
+    if( iSubSet == iLastScanSet || iSubSet == 0)
+    {
+      uiSigCoeffGroupFlag[ iCGBlkPos ] = 1;
+    }
+    else
+    {
+      UInt uiSigCoeffGroup   = (uiSigCoeffGroupFlag[ iCGBlkPos ] != 0);
+      UInt uiCtxSig  = TComTrQuant::getSigCoeffGroupCtxInc( uiSigCoeffGroupFlag, iCGPosX, iCGPosY, codingParameters.widthInGroups, codingParameters.heightInGroups );
+      m_pcBinIf->encodeBin( uiSigCoeffGroup, baseCoeffGroupCtx[ uiCtxSig ] );
+    }
+
+    // encode significant_coeff_flag
+    if( uiSigCoeffGroupFlag[ iCGBlkPos ] )
+    {
+      const Int patternSigCtx = TComTrQuant::calcPatternSigCtx(uiSigCoeffGroupFlag, iCGPosX, iCGPosY, codingParameters.widthInGroups, codingParameters.heightInGroups);
+
+      UInt uiBlkPos, uiSig, uiCtxSig;
+      for( ; iScanPosSig >= iSubPos; iScanPosSig-- )
+      {
+        uiBlkPos  = codingParameters.scan[ iScanPosSig ];
+        uiSig     = (pcCoef[ uiBlkPos ] != 0);
+        if( iScanPosSig > iSubPos || iSubSet == 0 || numNonZero )
+        {
+          uiCtxSig  = TComTrQuant::getSigCtxInc( patternSigCtx, codingParameters, iScanPosSig, uiLog2BlockWidth, uiLog2BlockHeight, chType );
+          m_pcBinIf->encodeBin( uiSig, baseCtx[ uiCtxSig ] );
+        }
+        if( uiSig )
+        {
+          absCoeff[ numNonZero ] = Int(abs( pcCoef[ uiBlkPos ] ));
+          coeffSigns = 2 * coeffSigns + ( pcCoef[ uiBlkPos ] < 0 );
+          numNonZero++;
+          if( lastNZPosInCG == -1 )
+          {
+            lastNZPosInCG = iScanPosSig;
+          }
+          firstNZPosInCG = iScanPosSig;
+        }
+      }
+    }
+    else
+    {
+      iScanPosSig = iSubPos - 1;
+    }
+
+    if( numNonZero > 0 )
+    {
+      Bool signHidden = ( lastNZPosInCG - firstNZPosInCG >= SBH_THRESHOLD );
+
+      const UInt uiCtxSet = getContextSetIndex(compID, iSubSet, (c1 == 0));
+      c1 = 1;
+
+      ContextModel *baseCtxMod = m_cCUOneSCModel.get( 0, 0 ) + (NUM_ONE_FLAG_CTX_PER_SET * uiCtxSet);
+
+      Int numC1Flag = min(numNonZero, C1FLAG_NUMBER);
+      Int firstC2FlagIdx = -1;
+      for( Int idx = 0; idx < numC1Flag; idx++ )
+      {
+        UInt uiSymbol = absCoeff[ idx ] > 1;
+        m_pcBinIf->encodeBin( uiSymbol, baseCtxMod[c1] );
+        if( uiSymbol )
+        {
+          c1 = 0;
+
+          if (firstC2FlagIdx == -1)
+          {
+            firstC2FlagIdx = idx;
+          }
+          else //if a greater-than-one has been encountered already this group
+          {
+            escapeDataPresentInGroup = true;
+          }
+        }
+        else if( (c1 < 3) && (c1 > 0) )
+        {
+          c1++;
+        }
+      }
+
+      if (c1 == 0)
+      {
+        baseCtxMod = m_cCUAbsSCModel.get( 0, 0 ) + (NUM_ABS_FLAG_CTX_PER_SET * uiCtxSet);
+        if ( firstC2FlagIdx != -1)
+        {
+          UInt symbol = absCoeff[ firstC2FlagIdx ] > 2;
+          m_pcBinIf->encodeBin( symbol, baseCtxMod[0] );
+          if (symbol != 0)
+          {
+            escapeDataPresentInGroup = true;
+          }
+        }
+      }
+
+      escapeDataPresentInGroup = escapeDataPresentInGroup || (numNonZero > C1FLAG_NUMBER);
+
+      if (escapeDataPresentInGroup && alignCABACBeforeBypass)
+      {
+        m_pcBinIf->align();
+      }
+
+      if( beValid && signHidden )
+      {
+        m_pcBinIf->encodeBinsEP( (coeffSigns >> 1), numNonZero-1 );
+      }
+      else
+      {
+        m_pcBinIf->encodeBinsEP( coeffSigns, numNonZero );
+      }
+
+      Int iFirstCoeff2 = 1;
+      if (escapeDataPresentInGroup)
+      {
+        for ( Int idx = 0; idx < numNonZero; idx++ )
+        {
+          UInt baseLevel  = (idx < C1FLAG_NUMBER)? (2 + iFirstCoeff2 ) : 1;
+
+          if( absCoeff[ idx ] >= baseLevel)
+          {
+            const UInt escapeCodeValue = absCoeff[idx] - baseLevel;
+
+            xWriteCoefRemainExGolomb( escapeCodeValue, uiGoRiceParam, extendedPrecision, channelType );
+
+            if (absCoeff[idx] > (3 << uiGoRiceParam))
+            {
+              uiGoRiceParam = bUseGolombRiceParameterAdaptation ? (uiGoRiceParam + 1) : (std::min<UInt>((uiGoRiceParam + 1), 4));
+            }
+
+            if (updateGolombRiceStatistics)
+            {
+              const UInt initialGolombRiceParameter = currentGolombRiceStatistic / RExt__GOLOMB_RICE_INCREMENT_DIVISOR;
+
+              if (escapeCodeValue >= (3 << initialGolombRiceParameter))
+              {
+                currentGolombRiceStatistic++;
+              }
+              else if (((escapeCodeValue * 2) < (1 << initialGolombRiceParameter)) && (currentGolombRiceStatistic > 0))
+              {
+                currentGolombRiceStatistic--;
+              }
+
+              updateGolombRiceStatistics = false;
+            }
+          }
+
+          if(absCoeff[ idx ] >= 2)
+          {
+            iFirstCoeff2 = 0;
+          }
+        }
+      }
+    }
+  }
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  printSBACCoeffData(posLastX, posLastY, uiWidth, uiHeight, compID, uiAbsPartIdx, codingParameters.scanType, pcCoef, g_bFinalEncode);
+#endif
+
+  return;
+}
+
+/** code SAO offset sign
+ * \param code sign value
+ */
+Void TEncSbac::codeSAOSign( UInt code )
+{
+  m_pcBinIf->encodeBinEP( code );
+}
+
+Void TEncSbac::codeSaoMaxUvlc    ( UInt code, UInt maxSymbol )
+{
+  if (maxSymbol == 0)
+  {
+    return;
+  }
+
+  Int i;
+  Bool bCodeLast = ( maxSymbol > code );
+
+  if ( code == 0 )
+  {
+    m_pcBinIf->encodeBinEP( 0 );
+  }
+  else
+  {
+    m_pcBinIf->encodeBinEP( 1 );
+    for ( i=0; i<code-1; i++ )
+    {
+      m_pcBinIf->encodeBinEP( 1 );
+    }
+    if( bCodeLast )
+    {
+      m_pcBinIf->encodeBinEP( 0 );
+    }
+  }
+}
+
+/** Code SAO EO class or BO band position
+ * \param uiLength
+ * \param uiCode
+ */
+Void TEncSbac::codeSaoUflc       ( UInt uiLength, UInt uiCode )
+{
+  m_pcBinIf->encodeBinsEP ( uiCode, uiLength );
+}
+
+/** Code SAO merge flags
+ * \param uiCode
+ * \param uiCompIdx
+ */
+Void TEncSbac::codeSaoMerge       ( UInt uiCode )
+{
+  m_pcBinIf->encodeBin(((uiCode == 0) ? 0 : 1),  m_cSaoMergeSCModel.get( 0, 0, 0 ));
+}
+
+/** Code SAO type index
+ * \param uiCode
+ */
+Void TEncSbac::codeSaoTypeIdx       ( UInt uiCode)
+{
+  if (uiCode == 0)
+  {
+    m_pcBinIf->encodeBin( 0, m_cSaoTypeIdxSCModel.get( 0, 0, 0 ) );
+  }
+  else
+  {
+    m_pcBinIf->encodeBin( 1, m_cSaoTypeIdxSCModel.get( 0, 0, 0 ) );
+    m_pcBinIf->encodeBinEP( uiCode == 1 ? 0 : 1 );
+  }
+}
+
+Void TEncSbac::codeSAOOffsetParam(ComponentID compIdx, SAOOffset& ctbParam, Bool sliceEnabled)
+{
+  UInt uiSymbol;
+  if(!sliceEnabled)
+  {
+    assert(ctbParam.modeIdc == SAO_MODE_OFF);
+    return;
+  }
+  const Bool bIsFirstCompOfChType = (getFirstComponentOfChannel(toChannelType(compIdx)) == compIdx);
+
+  //type
+  if(bIsFirstCompOfChType)
+  {
+    //sao_type_idx_luma or sao_type_idx_chroma
+    if(ctbParam.modeIdc == SAO_MODE_OFF)
+    {
+      uiSymbol =0;
+    }
+    else if(ctbParam.typeIdc == SAO_TYPE_BO) //BO
+    {
+      uiSymbol = 1;
+    }
+    else
+    {
+      assert(ctbParam.typeIdc < SAO_TYPE_START_BO); //EO
+      uiSymbol = 2;
+    }
+    codeSaoTypeIdx(uiSymbol);
+  }
+
+  if(ctbParam.modeIdc == SAO_MODE_NEW)
+  {
+    Int numClasses = (ctbParam.typeIdc == SAO_TYPE_BO)?4:NUM_SAO_EO_CLASSES;
+    Int offset[4];
+    Int k=0;
+    for(Int i=0; i< numClasses; i++)
+    {
+      if(ctbParam.typeIdc != SAO_TYPE_BO && i == SAO_CLASS_EO_PLAIN)
+      {
+        continue;
+      }
+      Int classIdx = (ctbParam.typeIdc == SAO_TYPE_BO)?(  (ctbParam.typeAuxInfo+i)% NUM_SAO_BO_CLASSES   ):i;
+      offset[k] = ctbParam.offset[classIdx];
+      k++;
+    }
+
+    for(Int i=0; i< 4; i++)
+    {
+      codeSaoMaxUvlc((offset[i]<0)?(-offset[i]):(offset[i]),  g_saoMaxOffsetQVal[compIdx] ); //sao_offset_abs
+    }
+
+
+    if(ctbParam.typeIdc == SAO_TYPE_BO)
+    {
+      for(Int i=0; i< 4; i++)
+      {
+        if(offset[i] != 0)
+        {
+          codeSAOSign((offset[i]< 0)?1:0);
+        }
+      }
+
+      codeSaoUflc(NUM_SAO_BO_CLASSES_LOG2, ctbParam.typeAuxInfo ); //sao_band_position
+    }
+    else //EO
+    {
+      if(bIsFirstCompOfChType)
+      {
+        assert(ctbParam.typeIdc - SAO_TYPE_START_EO >=0);
+        codeSaoUflc(NUM_SAO_EO_TYPES_LOG2, ctbParam.typeIdc - SAO_TYPE_START_EO ); //sao_eo_class_luma or sao_eo_class_chroma
+      }
+    }
+
+  }
+}
+
+
+Void TEncSbac::codeSAOBlkParam(SAOBlkParam& saoBlkParam
+                              , Bool* sliceEnabled
+                              , Bool leftMergeAvail
+                              , Bool aboveMergeAvail
+                              , Bool onlyEstMergeInfo // = false
+                              )
+{
+
+  Bool isLeftMerge = false;
+  Bool isAboveMerge= false;
+
+  if(leftMergeAvail)
+  {
+    isLeftMerge = ((saoBlkParam[COMPONENT_Y].modeIdc == SAO_MODE_MERGE) && (saoBlkParam[COMPONENT_Y].typeIdc == SAO_MERGE_LEFT));
+    codeSaoMerge( isLeftMerge?1:0  ); //sao_merge_left_flag
+  }
+
+  if( aboveMergeAvail && !isLeftMerge)
+  {
+    isAboveMerge = ((saoBlkParam[COMPONENT_Y].modeIdc == SAO_MODE_MERGE) && (saoBlkParam[COMPONENT_Y].typeIdc == SAO_MERGE_ABOVE));
+    codeSaoMerge( isAboveMerge?1:0  ); //sao_merge_left_flag
+  }
+
+  if(onlyEstMergeInfo)
+  {
+    return; //only for RDO
+  }
+
+  if(!isLeftMerge && !isAboveMerge) //not merge mode
+  {
+    for(Int compIdx=0; compIdx < MAX_NUM_COMPONENT; compIdx++)
+    {
+      codeSAOOffsetParam(ComponentID(compIdx), saoBlkParam[compIdx], sliceEnabled[compIdx]);
+    }
+  }
+}
+
+/*!
+ ****************************************************************************
+ * \brief
+ *   estimate bit cost for CBP, significant map and significant coefficients
+ ****************************************************************************
+ */
+Void TEncSbac::estBit( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType )
+{
+  estCBFBit( pcEstBitsSbac );
+
+  estSignificantCoeffGroupMapBit( pcEstBitsSbac, chType );
+
+  // encode significance map
+  estSignificantMapBit( pcEstBitsSbac, width, height, chType );
+
+  // encode last significant position
+  estLastSignificantPositionBit( pcEstBitsSbac, width, height, chType );
+
+  // encode significant coefficients
+  estSignificantCoefficientsBit( pcEstBitsSbac, chType );
+
+  memcpy(pcEstBitsSbac->golombRiceAdaptationStatistics, m_golombRiceAdaptationStatistics, (sizeof(UInt) * RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS));
+}
+
+/*!
+ ****************************************************************************
+ * \brief
+ *    estimate bit cost for each CBP bit
+ ****************************************************************************
+ */
+Void TEncSbac::estCBFBit( estBitsSbacStruct* pcEstBitsSbac )
+{
+  ContextModel *pCtx = m_cCUQtCbfSCModel.get( 0 );
+
+  for( UInt uiCtxInc = 0; uiCtxInc < (NUM_QT_CBF_CTX_SETS * NUM_QT_CBF_CTX_PER_SET); uiCtxInc++ )
+  {
+    pcEstBitsSbac->blockCbpBits[ uiCtxInc ][ 0 ] = pCtx[ uiCtxInc ].getEntropyBits( 0 );
+    pcEstBitsSbac->blockCbpBits[ uiCtxInc ][ 1 ] = pCtx[ uiCtxInc ].getEntropyBits( 1 );
+  }
+
+  pCtx = m_cCUQtRootCbfSCModel.get( 0 );
+
+  for( UInt uiCtxInc = 0; uiCtxInc < 4; uiCtxInc++ )
+  {
+    pcEstBitsSbac->blockRootCbpBits[ uiCtxInc ][ 0 ] = pCtx[ uiCtxInc ].getEntropyBits( 0 );
+    pcEstBitsSbac->blockRootCbpBits[ uiCtxInc ][ 1 ] = pCtx[ uiCtxInc ].getEntropyBits( 1 );
+  }
+}
+
+
+/*!
+ ****************************************************************************
+ * \brief
+ *    estimate SAMBAC bit cost for significant coefficient group map
+ ****************************************************************************
+ */
+Void TEncSbac::estSignificantCoeffGroupMapBit( estBitsSbacStruct* pcEstBitsSbac, ChannelType chType )
+{
+  Int firstCtx = 0, numCtx = NUM_SIG_CG_FLAG_CTX;
+
+  for ( Int ctxIdx = firstCtx; ctxIdx < firstCtx + numCtx; ctxIdx++ )
+  {
+    for( UInt uiBin = 0; uiBin < 2; uiBin++ )
+    {
+      pcEstBitsSbac->significantCoeffGroupBits[ ctxIdx ][ uiBin ] = m_cCUSigCoeffGroupSCModel.get(  0, chType, ctxIdx ).getEntropyBits( uiBin );
+    }
+  }
+}
+
+
+/*!
+ ****************************************************************************
+ * \brief
+ *    estimate SAMBAC bit cost for significant coefficient map
+ ****************************************************************************
+ */
+Void TEncSbac::estSignificantMapBit( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType )
+{
+  //--------------------------------------------------------------------------------------------------
+
+  //set up the number of channels and context variables
+
+  const UInt firstComponent = ((isLuma(chType)) ? (COMPONENT_Y) : (COMPONENT_Cb));
+  const UInt lastComponent  = ((isLuma(chType)) ? (COMPONENT_Y) : (COMPONENT_Cb));
+
+  //----------------------------------------------------------
+
+  Int firstCtx = MAX_INT;
+  Int numCtx   = MAX_INT;
+
+  if      ((width == 4) && (height == 4))
+  {
+    firstCtx = significanceMapContextSetStart[chType][CONTEXT_TYPE_4x4];
+    numCtx   = significanceMapContextSetSize [chType][CONTEXT_TYPE_4x4];
+  }
+  else if ((width == 8) && (height == 8))
+  {
+    firstCtx = significanceMapContextSetStart[chType][CONTEXT_TYPE_8x8];
+    numCtx   = significanceMapContextSetSize [chType][CONTEXT_TYPE_8x8];
+  }
+  else
+  {
+    firstCtx = significanceMapContextSetStart[chType][CONTEXT_TYPE_NxN];
+    numCtx   = significanceMapContextSetSize [chType][CONTEXT_TYPE_NxN];
+  }
+
+  //--------------------------------------------------------------------------------------------------
+
+  //fill the data for the significace map
+
+  for (UInt component = firstComponent; component <= lastComponent; component++)
+  {
+    const UInt contextOffset = getSignificanceMapContextOffset(ComponentID(component));
+
+    if (firstCtx > 0)
+    {
+      for( UInt bin = 0; bin < 2; bin++ ) //always get the DC
+      {
+        pcEstBitsSbac->significantBits[ contextOffset ][ bin ] = m_cCUSigSCModel.get( 0, 0, contextOffset ).getEntropyBits( bin );
+      }
+    }
+
+    // This could be made optional, but would require this function to have knowledge of whether the
+    // TU is transform-skipped or transquant-bypassed and whether the SPS flag is set
+    for( UInt bin = 0; bin < 2; bin++ )
+    {
+      const Int ctxIdx = significanceMapContextSetStart[chType][CONTEXT_TYPE_SINGLE];
+      pcEstBitsSbac->significantBits[ contextOffset + ctxIdx ][ bin ] = m_cCUSigSCModel.get( 0, 0, (contextOffset + ctxIdx) ).getEntropyBits( bin );
+    }
+
+    for ( Int ctxIdx = firstCtx; ctxIdx < firstCtx + numCtx; ctxIdx++ )
+    {
+      for( UInt uiBin = 0; uiBin < 2; uiBin++ )
+      {
+        pcEstBitsSbac->significantBits[ contextOffset + ctxIdx ][ uiBin ] = m_cCUSigSCModel.get(  0, 0, (contextOffset + ctxIdx) ).getEntropyBits( uiBin );
+      }
+    }
+  }
+
+  //--------------------------------------------------------------------------------------------------
+}
+
+
+/*!
+ ****************************************************************************
+ * \brief
+ *    estimate bit cost of significant coefficient
+ ****************************************************************************
+ */
+
+Void TEncSbac::estLastSignificantPositionBit( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType )
+{
+  //--------------------------------------------------------------------------------------------------.
+
+  //set up the number of channels
+
+  const UInt firstComponent = ((isLuma(chType)) ? (COMPONENT_Y) : (COMPONENT_Cb));
+  const UInt lastComponent  = ((isLuma(chType)) ? (COMPONENT_Y) : (COMPONENT_Cb));
+
+  //--------------------------------------------------------------------------------------------------
+
+  //fill the data for the last-significant-coefficient position
+
+  for (UInt componentIndex = firstComponent; componentIndex <= lastComponent; componentIndex++)
+  {
+    const ComponentID component = ComponentID(componentIndex);
+
+    Int iBitsX = 0, iBitsY = 0;
+
+    Int blkSizeOffsetX, blkSizeOffsetY, shiftX, shiftY;
+    getLastSignificantContextParameters(ComponentID(component), width, height, blkSizeOffsetX, blkSizeOffsetY, shiftX, shiftY);
+
+    Int ctx;
+
+    const ChannelType channelType = toChannelType(ComponentID(component));
+
+    ContextModel *const pCtxX = m_cCuCtxLastX.get( 0, channelType );
+    ContextModel *const pCtxY = m_cCuCtxLastY.get( 0, channelType );
+    Int          *const lastXBitsArray = pcEstBitsSbac->lastXBits[channelType];
+    Int          *const lastYBitsArray = pcEstBitsSbac->lastYBits[channelType];
+
+    //------------------------------------------------
+
+    //X-coordinate
+
+    for (ctx = 0; ctx < g_uiGroupIdx[ width - 1 ]; ctx++)
+    {
+      Int ctxOffset = blkSizeOffsetX + (ctx >>shiftX);
+      lastXBitsArray[ ctx ] = iBitsX + pCtxX[ ctxOffset ].getEntropyBits( 0 );
+      iBitsX += pCtxX[ ctxOffset ].getEntropyBits( 1 );
+    }
+
+    lastXBitsArray[ctx] = iBitsX;
+
+    //------------------------------------------------
+
+    //Y-coordinate
+
+    for (ctx = 0; ctx < g_uiGroupIdx[ height - 1 ]; ctx++)
+    {
+      Int ctxOffset = blkSizeOffsetY + (ctx >>shiftY);
+      lastYBitsArray[ ctx ] = iBitsY + pCtxY[ ctxOffset ].getEntropyBits( 0 );
+      iBitsY += pCtxY[ ctxOffset ].getEntropyBits( 1 );
+    }
+
+    lastYBitsArray[ctx] = iBitsY;
+
+  } //end of component loop
+
+  //--------------------------------------------------------------------------------------------------
+}
+
+
+/*!
+ ****************************************************************************
+ * \brief
+ *    estimate bit cost of significant coefficient
+ ****************************************************************************
+ */
+Void TEncSbac::estSignificantCoefficientsBit( estBitsSbacStruct* pcEstBitsSbac, ChannelType chType )
+{
+  ContextModel *ctxOne = m_cCUOneSCModel.get(0, 0);
+  ContextModel *ctxAbs = m_cCUAbsSCModel.get(0, 0);
+
+  const UInt oneStartIndex = ((isLuma(chType)) ? (0)                     : (NUM_ONE_FLAG_CTX_LUMA));
+  const UInt oneStopIndex  = ((isLuma(chType)) ? (NUM_ONE_FLAG_CTX_LUMA) : (NUM_ONE_FLAG_CTX));
+  const UInt absStartIndex = ((isLuma(chType)) ? (0)                     : (NUM_ABS_FLAG_CTX_LUMA));
+  const UInt absStopIndex  = ((isLuma(chType)) ? (NUM_ABS_FLAG_CTX_LUMA) : (NUM_ABS_FLAG_CTX));
+
+  for (Int ctxIdx = oneStartIndex; ctxIdx < oneStopIndex; ctxIdx++)
+  {
+    pcEstBitsSbac->m_greaterOneBits[ ctxIdx ][ 0 ] = ctxOne[ ctxIdx ].getEntropyBits( 0 );
+    pcEstBitsSbac->m_greaterOneBits[ ctxIdx ][ 1 ] = ctxOne[ ctxIdx ].getEntropyBits( 1 );
+  }
+
+  for (Int ctxIdx = absStartIndex; ctxIdx < absStopIndex; ctxIdx++)
+  {
+    pcEstBitsSbac->m_levelAbsBits[ ctxIdx ][ 0 ] = ctxAbs[ ctxIdx ].getEntropyBits( 0 );
+    pcEstBitsSbac->m_levelAbsBits[ ctxIdx ][ 1 ] = ctxAbs[ ctxIdx ].getEntropyBits( 1 );
+  }
+}
+
+/**
+ - Initialize our context information from the nominated source.
+ .
+ \param pSrc From where to copy context information.
+ */
+Void TEncSbac::xCopyContextsFrom( const TEncSbac* pSrc )
+{
+  memcpy(m_contextModels, pSrc->m_contextModels, m_numContextModels*sizeof(m_contextModels[0]));
+  memcpy(m_golombRiceAdaptationStatistics, pSrc->m_golombRiceAdaptationStatistics, (sizeof(UInt) * RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS));
+}
+
+Void  TEncSbac::loadContexts ( const TEncSbac* pSrc)
+{
+  xCopyContextsFrom(pSrc);
+}
+
+/** Performs CABAC encoding of the explicit RDPCM mode
+ * \param rTu current TU data structure
+ * \param compID component identifier
+ */
+Void TEncSbac::codeExplicitRdpcmMode( TComTU &rTu, const ComponentID compID )
+{
+  TComDataCU *cu = rTu.getCU();
+  const TComRectangle &rect = rTu.getRect(compID);
+  const UInt absPartIdx   = rTu.GetAbsPartIdxTU(compID);
+  const UInt tuHeight = g_aucConvertToBit[rect.height];
+  const UInt tuWidth  = g_aucConvertToBit[rect.width];
+
+  assert(tuHeight == tuWidth);
+  assert(tuHeight < 4);
+
+  UInt explicitRdpcmMode = cu->getExplicitRdpcmMode(compID, absPartIdx);
+
+  if( explicitRdpcmMode == RDPCM_OFF )
+  {
+    m_pcBinIf->encodeBin (0, m_explicitRdpcmFlagSCModel.get (0, toChannelType(compID), 0));
+  }
+  else if( explicitRdpcmMode == RDPCM_HOR || explicitRdpcmMode == RDPCM_VER )
+  {
+    m_pcBinIf->encodeBin (1, m_explicitRdpcmFlagSCModel.get (0, toChannelType(compID), 0));
+    if(explicitRdpcmMode == RDPCM_HOR)
+    {
+      m_pcBinIf->encodeBin ( 0, m_explicitRdpcmDirSCModel.get(0, toChannelType(compID), 0));
+    }
+    else
+    {
+      m_pcBinIf->encodeBin ( 1, m_explicitRdpcmDirSCModel.get(0, toChannelType(compID), 0));
+    }
+  }
+  else
+  {
+    assert(0);
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSbac.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,221 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSbac.h
+    \brief    Context-adaptive entropy encoder class (header)
+*/
+
+#ifndef __TENCSBAC__
+#define __TENCSBAC__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "TLibCommon/TComBitStream.h"
+#include "TLibCommon/ContextTables.h"
+#include "TLibCommon/ContextModel.h"
+#include "TLibCommon/ContextModel3DBuffer.h"
+#include "TEncEntropy.h"
+#include "TEncBinCoder.h"
+#include "TEncBinCoderCABAC.h"
+#if FAST_BIT_EST
+#include "TEncBinCoderCABACCounter.h"
+#endif
+
+class TEncTop;
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// SBAC encoder class
+class TEncSbac : public TEncEntropyIf
+{
+public:
+  TEncSbac();
+  virtual ~TEncSbac();
+
+  Void  init                   ( TEncBinIf* p )  { m_pcBinIf = p; }
+  Void  uninit                 ()                { m_pcBinIf = 0; }
+
+  //  Virtual list
+  Void  resetEntropy           ();
+  Void  determineCabacInitIdx  ();
+  Void  setBitstream           ( TComBitIf* p )  { m_pcBitIf = p; m_pcBinIf->init( p ); }
+  Void  setSlice               ( TComSlice* p )  { m_pcSlice = p;                       }
+
+  Void  load                   ( const TEncSbac* pSrc  );
+  Void  loadIntraDirMode       ( const TEncSbac* pScr, const ChannelType chType  );
+  Void  store                  ( TEncSbac* pDest ) const;
+  Void  loadContexts           ( const TEncSbac* pSrc  );
+  Void  resetBits              ()                { m_pcBinIf->resetBits(); m_pcBitIf->resetBits(); }
+  UInt  getNumberOfWrittenBits ()                { return m_pcBinIf->getNumWrittenBits(); }
+  //--SBAC RD
+
+  Void  codeVPS                ( TComVPS* pcVPS );
+  Void  codeSPS                ( TComSPS* pcSPS     );
+  Void  codePPS                ( TComPPS* pcPPS     );
+  Void  codeSliceHeader        ( TComSlice* pcSlice );
+  Void  codeTilesWPPEntryPoint ( TComSlice* pSlice );
+  Void  codeTerminatingBit     ( UInt uilsLast      );
+  Void  codeSliceFinish        ();
+  Void  codeSaoMaxUvlc       ( UInt code, UInt maxSymbol );
+  Void  codeSaoMerge         ( UInt  uiCode );
+  Void  codeSaoTypeIdx       ( UInt  uiCode);
+  Void  codeSaoUflc          ( UInt uiLength, UInt  uiCode );
+  Void  codeSAOSign          ( UInt  uiCode);  //<! code SAO offset sign
+  Void  codeScalingList      ( TComScalingList* /*scalingList*/     ){ assert (0);  return;};
+
+  Void codeSAOOffsetParam(ComponentID compIdx, SAOOffset& ctbParam, Bool sliceEnabled);
+  Void codeSAOBlkParam(SAOBlkParam& saoBlkParam
+                    , Bool* sliceEnabled
+                    , Bool leftMergeAvail
+                    , Bool aboveMergeAvail
+                    , Bool onlyEstMergeInfo = false
+                    );
+
+private:
+  Void  xWriteUnarySymbol    ( UInt uiSymbol, ContextModel* pcSCModel, Int iOffset );
+  Void  xWriteUnaryMaxSymbol ( UInt uiSymbol, ContextModel* pcSCModel, Int iOffset, UInt uiMaxSymbol );
+  Void  xWriteEpExGolomb     ( UInt uiSymbol, UInt uiCount );
+  Void  xWriteCoefRemainExGolomb ( UInt symbol, UInt &rParam, const Bool useLimitedPrefixLength, const ChannelType channelType );
+
+  Void  xCopyFrom            ( const TEncSbac* pSrc );
+  Void  xCopyContextsFrom    ( const TEncSbac* pSrc );
+
+  Void codeDFFlag( UInt /*uiCode*/, const Char* /*pSymbolName*/ )       {printf("Not supported in codeDFFlag()\n"); assert(0); exit(1);};
+  Void codeDFSvlc( Int /*iCode*/, const Char* /*pSymbolName*/ )         {printf("Not supported in codeDFSvlc()\n"); assert(0); exit(1);};
+
+protected:
+  TComBitIf*    m_pcBitIf;
+  TComSlice*    m_pcSlice;
+  TEncBinIf*    m_pcBinIf;
+
+  //--Adaptive loop filter
+
+public:
+  Void codeCUTransquantBypassFlag( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeSkipFlag      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeMergeFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeMergeIndex    ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeSplitFlag     ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
+  Void codeMVPIdx        ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+
+  Void codePartSize      ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
+  Void codePredMode      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeIPCMInfo      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeTransformSubdivFlag ( UInt uiSymbol, UInt uiCtx );
+  Void codeQtCbf               ( TComTU & rTu, const ComponentID compID, const Bool lowestLevel );
+  Void codeQtRootCbf           ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeQtCbfZero           ( TComTU &rTu, const ChannelType chType );
+  Void codeQtRootCbfZero       ( TComDataCU* pcCU );
+  Void codeIntraDirLumaAng     ( TComDataCU* pcCU, UInt absPartIdx, Bool isMultiple);
+
+  Void codeIntraDirChroma      ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeInterDir            ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeRefFrmIdx           ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+  Void codeMvd                 ( TComDataCU* pcCU, UInt uiAbsPartIdx, RefPicList eRefList );
+
+  Void codeCrossComponentPrediction( TComTU &rTu, ComponentID compID );
+
+  Void codeDeltaQP             ( TComDataCU* pcCU, UInt uiAbsPartIdx );
+  Void codeChromaQpAdjustment  ( TComDataCU* cu, UInt absPartIdx );
+
+  Void codeLastSignificantXY ( UInt uiPosX, UInt uiPosY, Int width, Int height, ComponentID component, UInt uiScanIdx );
+  Void codeCoeffNxN            ( TComTU &rTu, TCoeff* pcCoef, const ComponentID compID );
+  Void codeTransformSkipFlags ( TComTU &rTu, ComponentID component );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // for RD-optimizatioon
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void estBit               (estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType);
+  Void estCBFBit                     ( estBitsSbacStruct* pcEstBitsSbac );
+  Void estSignificantCoeffGroupMapBit( estBitsSbacStruct* pcEstBitsSbac, ChannelType chType );
+  Void estSignificantMapBit          ( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType );
+  Void estLastSignificantPositionBit ( estBitsSbacStruct* pcEstBitsSbac, Int width, Int height, ChannelType chType );
+  Void estSignificantCoefficientsBit ( estBitsSbacStruct* pcEstBitsSbac, ChannelType chType );
+
+  Void codeExplicitRdpcmMode            ( TComTU &rTu, const ComponentID compID );
+
+
+  TEncBinIf* getEncBinIf()  { return m_pcBinIf; }
+private:
+  ContextModel         m_contextModels[MAX_NUM_CTX_MOD];
+  Int                  m_numContextModels;
+  ContextModel3DBuffer m_cCUSplitFlagSCModel;
+  ContextModel3DBuffer m_cCUSkipFlagSCModel;
+  ContextModel3DBuffer m_cCUMergeFlagExtSCModel;
+  ContextModel3DBuffer m_cCUMergeIdxExtSCModel;
+  ContextModel3DBuffer m_cCUPartSizeSCModel;
+  ContextModel3DBuffer m_cCUPredModeSCModel;
+  ContextModel3DBuffer m_cCUIntraPredSCModel;
+  ContextModel3DBuffer m_cCUChromaPredSCModel;
+  ContextModel3DBuffer m_cCUDeltaQpSCModel;
+  ContextModel3DBuffer m_cCUInterDirSCModel;
+  ContextModel3DBuffer m_cCURefPicSCModel;
+  ContextModel3DBuffer m_cCUMvdSCModel;
+  ContextModel3DBuffer m_cCUQtCbfSCModel;
+  ContextModel3DBuffer m_cCUTransSubdivFlagSCModel;
+  ContextModel3DBuffer m_cCUQtRootCbfSCModel;
+
+  ContextModel3DBuffer m_cCUSigCoeffGroupSCModel;
+  ContextModel3DBuffer m_cCUSigSCModel;
+  ContextModel3DBuffer m_cCuCtxLastX;
+  ContextModel3DBuffer m_cCuCtxLastY;
+  ContextModel3DBuffer m_cCUOneSCModel;
+  ContextModel3DBuffer m_cCUAbsSCModel;
+
+  ContextModel3DBuffer m_cMVPIdxSCModel;
+
+  ContextModel3DBuffer m_cSaoMergeSCModel;
+  ContextModel3DBuffer m_cSaoTypeIdxSCModel;
+  ContextModel3DBuffer m_cTransformSkipSCModel;
+  ContextModel3DBuffer m_CUTransquantBypassFlagSCModel;
+  ContextModel3DBuffer m_explicitRdpcmFlagSCModel;
+  ContextModel3DBuffer m_explicitRdpcmDirSCModel;
+  ContextModel3DBuffer m_cCrossComponentPredictionSCModel;
+
+  ContextModel3DBuffer m_ChromaQpAdjFlagSCModel;
+  ContextModel3DBuffer m_ChromaQpAdjIdcSCModel;
+
+  UInt m_golombRiceAdaptationStatistics[RExt__GOLOMB_RICE_ADAPTATION_STATISTICS_SETS];
+};
+
+//! \}
+
+#endif // !defined(AFX_TENCSBAC_H__DDA7CDC4_EDE3_4015_9D32_2156249C82AA__INCLUDED_)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSearch.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,5814 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSearch.cpp
+ \brief    encoder search class
+ */
+
+#include "TLibCommon/TypeDef.h"
+#include "TLibCommon/TComRom.h"
+#include "TLibCommon/TComMotionInfo.h"
+#include "TEncSearch.h"
+#include "TLibCommon/TComTU.h"
+#include "TLibCommon/Debug.h"
+#include <math.h>
+#include <limits>
+
+
+//! \ingroup TLibEncoder
+//! \{
+
+static const TComMv s_acMvRefineH[9] =
+{
+  TComMv(  0,  0 ), // 0
+  TComMv(  0, -1 ), // 1
+  TComMv(  0,  1 ), // 2
+  TComMv( -1,  0 ), // 3
+  TComMv(  1,  0 ), // 4
+  TComMv( -1, -1 ), // 5
+  TComMv(  1, -1 ), // 6
+  TComMv( -1,  1 ), // 7
+  TComMv(  1,  1 )  // 8
+};
+
+static const TComMv s_acMvRefineQ[9] =
+{
+  TComMv(  0,  0 ), // 0
+  TComMv(  0, -1 ), // 1
+  TComMv(  0,  1 ), // 2
+  TComMv( -1, -1 ), // 5
+  TComMv(  1, -1 ), // 6
+  TComMv( -1,  0 ), // 3
+  TComMv(  1,  0 ), // 4
+  TComMv( -1,  1 ), // 7
+  TComMv(  1,  1 )  // 8
+};
+
+static const UInt s_auiDFilter[9] =
+{
+  0, 1, 0,
+  2, 3, 2,
+  0, 1, 0
+};
+
+static Void offsetSubTUCBFs(TComTU &rTu, const ComponentID compID)
+{
+        TComDataCU *pcCU              = rTu.getCU();
+  const UInt        uiTrDepth         = rTu.GetTransformDepthRel();
+  const UInt        uiAbsPartIdx      = rTu.GetAbsPartIdxTU(compID);
+  const UInt        partIdxesPerSubTU = rTu.GetAbsPartIdxNumParts(compID) >> 1;
+
+  //move the CBFs down a level and set the parent CBF
+
+  UChar subTUCBF[2];
+  UChar combinedSubTUCBF = 0;
+
+  for (UInt subTU = 0; subTU < 2; subTU++)
+  {
+    const UInt subTUAbsPartIdx = uiAbsPartIdx + (subTU * partIdxesPerSubTU);
+
+    subTUCBF[subTU]   = pcCU->getCbf(subTUAbsPartIdx, compID, uiTrDepth);
+    combinedSubTUCBF |= subTUCBF[subTU];
+  }
+
+  for (UInt subTU = 0; subTU < 2; subTU++)
+  {
+    const UInt subTUAbsPartIdx = uiAbsPartIdx + (subTU * partIdxesPerSubTU);
+    const UChar compositeCBF = (subTUCBF[subTU] << 1) | combinedSubTUCBF;
+
+    pcCU->setCbfPartRange((compositeCBF << uiTrDepth), compID, subTUAbsPartIdx, partIdxesPerSubTU);
+  }
+}
+
+
+TEncSearch::TEncSearch()
+{
+  for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    m_ppcQTTempCoeff[ch]                           = NULL;
+    m_pcQTTempCoeff[ch]                            = NULL;
+#if ADAPTIVE_QP_SELECTION
+    m_ppcQTTempArlCoeff[ch]                        = NULL;
+    m_pcQTTempArlCoeff[ch]                         = NULL;
+#endif
+    m_puhQTTempCbf[ch]                             = NULL;
+    m_phQTTempCrossComponentPredictionAlpha[ch]    = NULL;
+    m_pSharedPredTransformSkip[ch]                 = NULL;
+    m_pcQTTempTUCoeff[ch]                          = NULL;
+#if ADAPTIVE_QP_SELECTION
+    m_ppcQTTempTUArlCoeff[ch]                      = NULL;
+#endif
+    m_puhQTTempTransformSkipFlag[ch]               = NULL;
+  }
+  m_puhQTTempTrIdx                                 = NULL;
+  m_pcQTTempTComYuv                                = NULL;
+  m_pcEncCfg                                       = NULL;
+  m_pcEntropyCoder                                 = NULL;
+  m_pTempPel                                       = NULL;
+  setWpScalingDistParam( NULL, -1, REF_PIC_LIST_X );
+}
+
+
+
+
+TEncSearch::~TEncSearch()
+{
+  if ( m_pTempPel )
+  {
+    delete [] m_pTempPel;
+    m_pTempPel = NULL;
+  }
+
+  if ( m_pcEncCfg )
+  {
+    const UInt uiNumLayersAllocated = m_pcEncCfg->getQuadtreeTULog2MaxSize()-m_pcEncCfg->getQuadtreeTULog2MinSize()+1;
+
+    for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+    {
+      for (UInt layer = 0; layer < uiNumLayersAllocated; layer++)
+      {
+        delete[] m_ppcQTTempCoeff[ch][layer];
+#if ADAPTIVE_QP_SELECTION
+        delete[] m_ppcQTTempArlCoeff[ch][layer];
+#endif
+      }
+      delete[] m_ppcQTTempCoeff[ch];
+      delete[] m_pcQTTempCoeff[ch];
+      delete[] m_puhQTTempCbf[ch];
+#if ADAPTIVE_QP_SELECTION
+      delete[] m_ppcQTTempArlCoeff[ch];
+      delete[] m_pcQTTempArlCoeff[ch];
+#endif
+    }
+
+    for( UInt layer = 0; layer < uiNumLayersAllocated; layer++ )
+    {
+      m_pcQTTempTComYuv[layer].destroy();
+    }
+  }
+
+  delete[] m_puhQTTempTrIdx;
+  delete[] m_pcQTTempTComYuv;
+
+  for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    delete[] m_pSharedPredTransformSkip[ch];
+    delete[] m_pcQTTempTUCoeff[ch];
+#if ADAPTIVE_QP_SELECTION
+    delete[] m_ppcQTTempTUArlCoeff[ch];
+#endif
+    delete[] m_phQTTempCrossComponentPredictionAlpha[ch];
+    delete[] m_puhQTTempTransformSkipFlag[ch];
+  }
+  m_pcQTTempTransformSkipTComYuv.destroy();
+
+  m_tmpYuvPred.destroy();
+}
+
+
+
+
+Void TEncSearch::init(TEncCfg*      pcEncCfg,
+                      TComTrQuant*  pcTrQuant,
+                      Int           iSearchRange,
+                      Int           bipredSearchRange,
+                      Int           iFastSearch,
+                      Int           iMaxDeltaQP,
+                      TEncEntropy*  pcEntropyCoder,
+                      TComRdCost*   pcRdCost,
+                      TEncSbac*** pppcRDSbacCoder,
+                      TEncSbac*   pcRDGoOnSbacCoder
+                      )
+{
+  m_pcEncCfg             = pcEncCfg;
+  m_pcTrQuant            = pcTrQuant;
+  m_iSearchRange         = iSearchRange;
+  m_bipredSearchRange    = bipredSearchRange;
+  m_iFastSearch          = iFastSearch;
+  m_iMaxDeltaQP          = iMaxDeltaQP;
+  m_pcEntropyCoder       = pcEntropyCoder;
+  m_pcRdCost             = pcRdCost;
+
+  m_pppcRDSbacCoder     = pppcRDSbacCoder;
+  m_pcRDGoOnSbacCoder   = pcRDGoOnSbacCoder;
+
+  for (UInt iDir = 0; iDir < MAX_NUM_REF_LIST_ADAPT_SR; iDir++)
+  {
+    for (UInt iRefIdx = 0; iRefIdx < MAX_IDX_ADAPT_SR; iRefIdx++)
+    {
+      m_aaiAdaptSR[iDir][iRefIdx] = iSearchRange;
+    }
+  }
+
+  m_puiDFilter = s_auiDFilter + 4;
+
+  // initialize motion cost
+  for( Int iNum = 0; iNum < AMVP_MAX_NUM_CANDS+1; iNum++)
+  {
+    for( Int iIdx = 0; iIdx < AMVP_MAX_NUM_CANDS; iIdx++)
+    {
+      if (iIdx < iNum)
+        m_auiMVPIdxCost[iIdx][iNum] = xGetMvpIdxBits(iIdx, iNum);
+      else
+        m_auiMVPIdxCost[iIdx][iNum] = MAX_INT;
+    }
+  }
+
+  const ChromaFormat cform=pcEncCfg->getChromaFormatIdc();
+  initTempBuff(cform);
+
+  m_pTempPel = new Pel[g_uiMaxCUWidth*g_uiMaxCUHeight];
+
+  const UInt uiNumLayersToAllocate = pcEncCfg->getQuadtreeTULog2MaxSize()-pcEncCfg->getQuadtreeTULog2MinSize()+1;
+  const UInt uiNumPartitions = 1<<(g_uiMaxCUDepth<<1);
+  for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+  {
+    const UInt csx=::getComponentScaleX(ComponentID(ch), cform);
+    const UInt csy=::getComponentScaleY(ComponentID(ch), cform);
+    m_ppcQTTempCoeff[ch] = new TCoeff* [uiNumLayersToAllocate];
+    m_pcQTTempCoeff[ch]   = new TCoeff [(g_uiMaxCUWidth*g_uiMaxCUHeight)>>(csx+csy)   ];
+#if ADAPTIVE_QP_SELECTION
+    m_ppcQTTempArlCoeff[ch]  = new TCoeff*[uiNumLayersToAllocate];
+    m_pcQTTempArlCoeff[ch]   = new TCoeff [(g_uiMaxCUWidth*g_uiMaxCUHeight)>>(csx+csy)   ];
+#endif
+    m_puhQTTempCbf[ch] = new UChar  [uiNumPartitions];
+
+    for (UInt layer = 0; layer < uiNumLayersToAllocate; layer++)
+    {
+      m_ppcQTTempCoeff[ch][layer] = new TCoeff[(g_uiMaxCUWidth*g_uiMaxCUHeight)>>(csx+csy)];
+#if ADAPTIVE_QP_SELECTION
+      m_ppcQTTempArlCoeff[ch][layer]  = new TCoeff[(g_uiMaxCUWidth*g_uiMaxCUHeight)>>(csx+csy) ];
+#endif
+    }
+
+    m_phQTTempCrossComponentPredictionAlpha[ch]    = new Char  [uiNumPartitions];
+    m_pSharedPredTransformSkip[ch]                 = new Pel   [MAX_CU_SIZE*MAX_CU_SIZE];
+    m_pcQTTempTUCoeff[ch]                          = new TCoeff[MAX_CU_SIZE*MAX_CU_SIZE];
+#if ADAPTIVE_QP_SELECTION
+    m_ppcQTTempTUArlCoeff[ch]                      = new TCoeff[MAX_CU_SIZE*MAX_CU_SIZE];
+#endif
+    m_puhQTTempTransformSkipFlag[ch]               = new UChar [uiNumPartitions];
+  }
+  m_puhQTTempTrIdx   = new UChar  [uiNumPartitions];
+  m_pcQTTempTComYuv  = new TComYuv[uiNumLayersToAllocate];
+  for( UInt ui = 0; ui < uiNumLayersToAllocate; ++ui )
+  {
+    m_pcQTTempTComYuv[ui].create( g_uiMaxCUWidth, g_uiMaxCUHeight, pcEncCfg->getChromaFormatIdc() );
+  }
+  m_pcQTTempTransformSkipTComYuv.create( g_uiMaxCUWidth, g_uiMaxCUHeight, pcEncCfg->getChromaFormatIdc() );
+  m_tmpYuvPred.create(MAX_CU_SIZE, MAX_CU_SIZE, pcEncCfg->getChromaFormatIdc());
+}
+
+#if FASTME_SMOOTHER_MV
+#define FIRSTSEARCHSTOP     1
+#else
+#define FIRSTSEARCHSTOP     0
+#endif
+
+#define TZ_SEARCH_CONFIGURATION                                                                                 \
+const Int  iRaster                  = 5;  /* TZ soll von aussen ?ergeben werden */                            \
+const Bool bTestOtherPredictedMV    = 0;                                                                      \
+const Bool bTestZeroVector          = 1;                                                                      \
+const Bool bTestZeroVectorStart     = 0;                                                                      \
+const Bool bTestZeroVectorStop      = 0;                                                                      \
+const Bool bFirstSearchDiamond      = 1;  /* 1 = xTZ8PointDiamondSearch   0 = xTZ8PointSquareSearch */        \
+const Bool bFirstSearchStop         = FIRSTSEARCHSTOP;                                                        \
+const UInt uiFirstSearchRounds      = 3;  /* first search stop X rounds after best match (must be >=1) */     \
+const Bool bEnableRasterSearch      = 1;                                                                      \
+const Bool bAlwaysRasterSearch      = 0;  /* ===== 1: BETTER but factor 2 slower ===== */                     \
+const Bool bRasterRefinementEnable  = 0;  /* enable either raster refinement or star refinement */            \
+const Bool bRasterRefinementDiamond = 0;  /* 1 = xTZ8PointDiamondSearch   0 = xTZ8PointSquareSearch */        \
+const Bool bStarRefinementEnable    = 1;  /* enable either star refinement or raster refinement */            \
+const Bool bStarRefinementDiamond   = 1;  /* 1 = xTZ8PointDiamondSearch   0 = xTZ8PointSquareSearch */        \
+const Bool bStarRefinementStop      = 0;                                                                      \
+const UInt uiStarRefinementRounds   = 2;  /* star refinement stop X rounds after best match (must be >=1) */  \
+
+
+#define SEL_SEARCH_CONFIGURATION                                                                                 \
+  const Bool bTestOtherPredictedMV    = 1;                                                                       \
+  const Bool bTestZeroVector          = 1;                                                                       \
+  const Bool bEnableRasterSearch      = 1;                                                                       \
+  const Bool bAlwaysRasterSearch      = 0;  /* ===== 1: BETTER but factor 15x slower ===== */                    \
+  const Bool bStarRefinementEnable    = 1;  /* enable either star refinement or raster refinement */             \
+  const Bool bStarRefinementDiamond   = 1;  /* 1 = xTZ8PointDiamondSearch   0 = xTZ8PointSquareSearch */         \
+  const Bool bStarRefinementStop      = 0;                                                                       \
+  const UInt uiStarRefinementRounds   = 2;  /* star refinement stop X rounds after best match (must be >=1) */   \
+  const UInt uiSearchRange            = m_iSearchRange;                                                          \
+  const Int  uiSearchRangeInitial     = m_iSearchRange >> 2;                                                     \
+  const Int  uiSearchStep             = 4;                                                                       \
+  const Int  iMVDistThresh            = 8;                                                                       \
+
+
+
+__inline Void TEncSearch::xTZSearchHelp( TComPattern* pcPatternKey, IntTZSearchStruct& rcStruct, const Int iSearchX, const Int iSearchY, const UChar ucPointNr, const UInt uiDistance )
+{
+  Distortion  uiSad = 0;
+
+  Pel*  piRefSrch;
+
+  piRefSrch = rcStruct.piRefY + iSearchY * rcStruct.iYStride + iSearchX;
+
+  //-- jclee for using the SAD function pointer
+  m_pcRdCost->setDistParam( pcPatternKey, piRefSrch, rcStruct.iYStride,  m_cDistParam );
+
+  if(m_pcEncCfg->getFastSearch() != SELECTIVE)
+  {
+    // fast encoder decision: use subsampled SAD when rows > 8 for integer ME
+    if ( m_pcEncCfg->getUseFastEnc() )
+    {
+      if ( m_cDistParam.iRows > 8 )
+      {
+        m_cDistParam.iSubShift = 1;
+      }
+    }
+  }
+
+  setDistParamComp(COMPONENT_Y);
+
+  // distortion
+  m_cDistParam.bitDepth = g_bitDepth[CHANNEL_TYPE_LUMA];
+  if(m_pcEncCfg->getFastSearch() == SELECTIVE)
+  {
+    Int isubShift = 0;
+    // motion cost
+    UInt uiBitCost = m_pcRdCost->getCost( iSearchX, iSearchY );
+
+    if ( m_cDistParam.iRows > 32 )
+      m_cDistParam.iSubShift = 4;
+    else if ( m_cDistParam.iRows > 16 )
+      m_cDistParam.iSubShift = 3;
+    else if ( m_cDistParam.iRows > 8 )
+      m_cDistParam.iSubShift = 2;
+    else
+      m_cDistParam.iSubShift = 1;
+
+    Distortion uiTempSad = m_cDistParam.DistFunc( &m_cDistParam );
+    if((uiTempSad + uiBitCost) < rcStruct.uiBestSad)
+    {
+      uiSad += uiTempSad >>  m_cDistParam.iSubShift;
+      while(m_cDistParam.iSubShift > 0)
+      {
+        isubShift         = m_cDistParam.iSubShift -1;
+        m_cDistParam.pOrg = pcPatternKey->getROIY() + (pcPatternKey->getPatternLStride() << isubShift);
+        m_cDistParam.pCur = piRefSrch + (rcStruct.iYStride << isubShift);
+        uiTempSad = m_cDistParam.DistFunc( &m_cDistParam );
+        uiSad += uiTempSad >>  m_cDistParam.iSubShift;
+        if(((uiSad << isubShift) + uiBitCost) > rcStruct.uiBestSad)
+          break;
+
+        m_cDistParam.iSubShift--;
+      }
+
+      if(m_cDistParam.iSubShift == 0)
+      {
+        uiSad += uiBitCost;
+        if( uiSad < rcStruct.uiBestSad )
+        {
+          rcStruct.uiBestSad      = uiSad;
+          rcStruct.iBestX         = iSearchX;
+          rcStruct.iBestY         = iSearchY;
+          rcStruct.uiBestDistance = uiDistance;
+          rcStruct.uiBestRound    = 0;
+          rcStruct.ucPointNr      = ucPointNr;
+        }
+      }
+    }
+  }
+  else
+  {
+    uiSad = m_cDistParam.DistFunc( &m_cDistParam );
+
+    // motion cost
+    uiSad += m_pcRdCost->getCost( iSearchX, iSearchY );
+
+    if( uiSad < rcStruct.uiBestSad )
+    {
+      rcStruct.uiBestSad      = uiSad;
+      rcStruct.iBestX         = iSearchX;
+      rcStruct.iBestY         = iSearchY;
+      rcStruct.uiBestDistance = uiDistance;
+      rcStruct.uiBestRound    = 0;
+      rcStruct.ucPointNr      = ucPointNr;
+    }
+  }
+}
+
+
+
+
+__inline Void TEncSearch::xTZ2PointSearch( TComPattern* pcPatternKey, IntTZSearchStruct& rcStruct, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB )
+{
+  Int   iSrchRngHorLeft   = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight  = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop    = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom = pcMvSrchRngRB->getVer();
+
+  // 2 point search,                   //   1 2 3
+  // check only the 2 untested points  //   4 0 5
+  // around the start point            //   6 7 8
+  Int iStartX = rcStruct.iBestX;
+  Int iStartY = rcStruct.iBestY;
+  switch( rcStruct.ucPointNr )
+  {
+    case 1:
+    {
+      if ( (iStartX - 1) >= iSrchRngHorLeft )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY, 0, 2 );
+      }
+      if ( (iStartY - 1) >= iSrchRngVerTop )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iStartY - 1, 0, 2 );
+      }
+    }
+      break;
+    case 2:
+    {
+      if ( (iStartY - 1) >= iSrchRngVerTop )
+      {
+        if ( (iStartX - 1) >= iSrchRngHorLeft )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY - 1, 0, 2 );
+        }
+        if ( (iStartX + 1) <= iSrchRngHorRight )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY - 1, 0, 2 );
+        }
+      }
+    }
+      break;
+    case 3:
+    {
+      if ( (iStartY - 1) >= iSrchRngVerTop )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iStartY - 1, 0, 2 );
+      }
+      if ( (iStartX + 1) <= iSrchRngHorRight )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY, 0, 2 );
+      }
+    }
+      break;
+    case 4:
+    {
+      if ( (iStartX - 1) >= iSrchRngHorLeft )
+      {
+        if ( (iStartY + 1) <= iSrchRngVerBottom )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY + 1, 0, 2 );
+        }
+        if ( (iStartY - 1) >= iSrchRngVerTop )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY - 1, 0, 2 );
+        }
+      }
+    }
+      break;
+    case 5:
+    {
+      if ( (iStartX + 1) <= iSrchRngHorRight )
+      {
+        if ( (iStartY - 1) >= iSrchRngVerTop )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY - 1, 0, 2 );
+        }
+        if ( (iStartY + 1) <= iSrchRngVerBottom )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY + 1, 0, 2 );
+        }
+      }
+    }
+      break;
+    case 6:
+    {
+      if ( (iStartX - 1) >= iSrchRngHorLeft )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY , 0, 2 );
+      }
+      if ( (iStartY + 1) <= iSrchRngVerBottom )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iStartY + 1, 0, 2 );
+      }
+    }
+      break;
+    case 7:
+    {
+      if ( (iStartY + 1) <= iSrchRngVerBottom )
+      {
+        if ( (iStartX - 1) >= iSrchRngHorLeft )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX - 1, iStartY + 1, 0, 2 );
+        }
+        if ( (iStartX + 1) <= iSrchRngHorRight )
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY + 1, 0, 2 );
+        }
+      }
+    }
+      break;
+    case 8:
+    {
+      if ( (iStartX + 1) <= iSrchRngHorRight )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX + 1, iStartY, 0, 2 );
+      }
+      if ( (iStartY + 1) <= iSrchRngVerBottom )
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iStartY + 1, 0, 2 );
+      }
+    }
+      break;
+    default:
+    {
+      assert( false );
+    }
+      break;
+  } // switch( rcStruct.ucPointNr )
+}
+
+
+
+
+__inline Void TEncSearch::xTZ8PointSquareSearch( TComPattern* pcPatternKey, IntTZSearchStruct& rcStruct, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB, const Int iStartX, const Int iStartY, const Int iDist )
+{
+  Int   iSrchRngHorLeft   = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight  = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop    = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom = pcMvSrchRngRB->getVer();
+
+  // 8 point search,                   //   1 2 3
+  // search around the start point     //   4 0 5
+  // with the required  distance       //   6 7 8
+  assert( iDist != 0 );
+  const Int iTop        = iStartY - iDist;
+  const Int iBottom     = iStartY + iDist;
+  const Int iLeft       = iStartX - iDist;
+  const Int iRight      = iStartX + iDist;
+  rcStruct.uiBestRound += 1;
+
+  if ( iTop >= iSrchRngVerTop ) // check top
+  {
+    if ( iLeft >= iSrchRngHorLeft ) // check top left
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iTop, 1, iDist );
+    }
+    // top middle
+    xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iTop, 2, iDist );
+
+    if ( iRight <= iSrchRngHorRight ) // check top right
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iRight, iTop, 3, iDist );
+    }
+  } // check top
+  if ( iLeft >= iSrchRngHorLeft ) // check middle left
+  {
+    xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iStartY, 4, iDist );
+  }
+  if ( iRight <= iSrchRngHorRight ) // check middle right
+  {
+    xTZSearchHelp( pcPatternKey, rcStruct, iRight, iStartY, 5, iDist );
+  }
+  if ( iBottom <= iSrchRngVerBottom ) // check bottom
+  {
+    if ( iLeft >= iSrchRngHorLeft ) // check bottom left
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iBottom, 6, iDist );
+    }
+    // check bottom middle
+    xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iBottom, 7, iDist );
+
+    if ( iRight <= iSrchRngHorRight ) // check bottom right
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iRight, iBottom, 8, iDist );
+    }
+  } // check bottom
+}
+
+
+
+
+__inline Void TEncSearch::xTZ8PointDiamondSearch( TComPattern* pcPatternKey, IntTZSearchStruct& rcStruct, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB, const Int iStartX, const Int iStartY, const Int iDist )
+{
+  Int   iSrchRngHorLeft   = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight  = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop    = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom = pcMvSrchRngRB->getVer();
+
+  // 8 point search,                   //   1 2 3
+  // search around the start point     //   4 0 5
+  // with the required  distance       //   6 7 8
+  assert ( iDist != 0 );
+  const Int iTop        = iStartY - iDist;
+  const Int iBottom     = iStartY + iDist;
+  const Int iLeft       = iStartX - iDist;
+  const Int iRight      = iStartX + iDist;
+  rcStruct.uiBestRound += 1;
+
+  if ( iDist == 1 ) // iDist == 1
+  {
+    if ( iTop >= iSrchRngVerTop ) // check top
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iTop, 2, iDist );
+    }
+    if ( iLeft >= iSrchRngHorLeft ) // check middle left
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iStartY, 4, iDist );
+    }
+    if ( iRight <= iSrchRngHorRight ) // check middle right
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iRight, iStartY, 5, iDist );
+    }
+    if ( iBottom <= iSrchRngVerBottom ) // check bottom
+    {
+      xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iBottom, 7, iDist );
+    }
+  }
+  else // if (iDist != 1)
+  {
+    if ( iDist <= 8 )
+    {
+      const Int iTop_2      = iStartY - (iDist>>1);
+      const Int iBottom_2   = iStartY + (iDist>>1);
+      const Int iLeft_2     = iStartX - (iDist>>1);
+      const Int iRight_2    = iStartX + (iDist>>1);
+
+      if (  iTop >= iSrchRngVerTop && iLeft >= iSrchRngHorLeft &&
+          iRight <= iSrchRngHorRight && iBottom <= iSrchRngVerBottom ) // check border
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX,  iTop,      2, iDist    );
+        xTZSearchHelp( pcPatternKey, rcStruct, iLeft_2,  iTop_2,    1, iDist>>1 );
+        xTZSearchHelp( pcPatternKey, rcStruct, iRight_2, iTop_2,    3, iDist>>1 );
+        xTZSearchHelp( pcPatternKey, rcStruct, iLeft,    iStartY,   4, iDist    );
+        xTZSearchHelp( pcPatternKey, rcStruct, iRight,   iStartY,   5, iDist    );
+        xTZSearchHelp( pcPatternKey, rcStruct, iLeft_2,  iBottom_2, 6, iDist>>1 );
+        xTZSearchHelp( pcPatternKey, rcStruct, iRight_2, iBottom_2, 8, iDist>>1 );
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX,  iBottom,   7, iDist    );
+      }
+      else // check border
+      {
+        if ( iTop >= iSrchRngVerTop ) // check top
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iTop, 2, iDist );
+        }
+        if ( iTop_2 >= iSrchRngVerTop ) // check half top
+        {
+          if ( iLeft_2 >= iSrchRngHorLeft ) // check half left
+          {
+            xTZSearchHelp( pcPatternKey, rcStruct, iLeft_2, iTop_2, 1, (iDist>>1) );
+          }
+          if ( iRight_2 <= iSrchRngHorRight ) // check half right
+          {
+            xTZSearchHelp( pcPatternKey, rcStruct, iRight_2, iTop_2, 3, (iDist>>1) );
+          }
+        } // check half top
+        if ( iLeft >= iSrchRngHorLeft ) // check left
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iStartY, 4, iDist );
+        }
+        if ( iRight <= iSrchRngHorRight ) // check right
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iRight, iStartY, 5, iDist );
+        }
+        if ( iBottom_2 <= iSrchRngVerBottom ) // check half bottom
+        {
+          if ( iLeft_2 >= iSrchRngHorLeft ) // check half left
+          {
+            xTZSearchHelp( pcPatternKey, rcStruct, iLeft_2, iBottom_2, 6, (iDist>>1) );
+          }
+          if ( iRight_2 <= iSrchRngHorRight ) // check half right
+          {
+            xTZSearchHelp( pcPatternKey, rcStruct, iRight_2, iBottom_2, 8, (iDist>>1) );
+          }
+        } // check half bottom
+        if ( iBottom <= iSrchRngVerBottom ) // check bottom
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iBottom, 7, iDist );
+        }
+      } // check border
+    }
+    else // iDist > 8
+    {
+      if ( iTop >= iSrchRngVerTop && iLeft >= iSrchRngHorLeft &&
+          iRight <= iSrchRngHorRight && iBottom <= iSrchRngVerBottom ) // check border
+      {
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iTop,    0, iDist );
+        xTZSearchHelp( pcPatternKey, rcStruct, iLeft,   iStartY, 0, iDist );
+        xTZSearchHelp( pcPatternKey, rcStruct, iRight,  iStartY, 0, iDist );
+        xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iBottom, 0, iDist );
+        for ( Int index = 1; index < 4; index++ )
+        {
+          Int iPosYT = iTop    + ((iDist>>2) * index);
+          Int iPosYB = iBottom - ((iDist>>2) * index);
+          Int iPosXL = iStartX - ((iDist>>2) * index);
+          Int iPosXR = iStartX + ((iDist>>2) * index);
+          xTZSearchHelp( pcPatternKey, rcStruct, iPosXL, iPosYT, 0, iDist );
+          xTZSearchHelp( pcPatternKey, rcStruct, iPosXR, iPosYT, 0, iDist );
+          xTZSearchHelp( pcPatternKey, rcStruct, iPosXL, iPosYB, 0, iDist );
+          xTZSearchHelp( pcPatternKey, rcStruct, iPosXR, iPosYB, 0, iDist );
+        }
+      }
+      else // check border
+      {
+        if ( iTop >= iSrchRngVerTop ) // check top
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iTop, 0, iDist );
+        }
+        if ( iLeft >= iSrchRngHorLeft ) // check left
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iLeft, iStartY, 0, iDist );
+        }
+        if ( iRight <= iSrchRngHorRight ) // check right
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iRight, iStartY, 0, iDist );
+        }
+        if ( iBottom <= iSrchRngVerBottom ) // check bottom
+        {
+          xTZSearchHelp( pcPatternKey, rcStruct, iStartX, iBottom, 0, iDist );
+        }
+        for ( Int index = 1; index < 4; index++ )
+        {
+          Int iPosYT = iTop    + ((iDist>>2) * index);
+          Int iPosYB = iBottom - ((iDist>>2) * index);
+          Int iPosXL = iStartX - ((iDist>>2) * index);
+          Int iPosXR = iStartX + ((iDist>>2) * index);
+
+          if ( iPosYT >= iSrchRngVerTop ) // check top
+          {
+            if ( iPosXL >= iSrchRngHorLeft ) // check left
+            {
+              xTZSearchHelp( pcPatternKey, rcStruct, iPosXL, iPosYT, 0, iDist );
+            }
+            if ( iPosXR <= iSrchRngHorRight ) // check right
+            {
+              xTZSearchHelp( pcPatternKey, rcStruct, iPosXR, iPosYT, 0, iDist );
+            }
+          } // check top
+          if ( iPosYB <= iSrchRngVerBottom ) // check bottom
+          {
+            if ( iPosXL >= iSrchRngHorLeft ) // check left
+            {
+              xTZSearchHelp( pcPatternKey, rcStruct, iPosXL, iPosYB, 0, iDist );
+            }
+            if ( iPosXR <= iSrchRngHorRight ) // check right
+            {
+              xTZSearchHelp( pcPatternKey, rcStruct, iPosXR, iPosYB, 0, iDist );
+            }
+          } // check bottom
+        } // for ...
+      } // check border
+    } // iDist <= 8
+  } // iDist == 1
+}
+
+
+
+
+
+//<--
+
+Distortion TEncSearch::xPatternRefinement( TComPattern* pcPatternKey,
+                                           TComMv baseRefMv,
+                                           Int iFrac, TComMv& rcMvFrac,
+                                           Bool bAllowUseOfHadamard
+                                         )
+{
+  Distortion  uiDist;
+  Distortion  uiDistBest  = std::numeric_limits<Distortion>::max();
+  UInt        uiDirecBest = 0;
+
+  Pel*  piRefPos;
+  Int iRefStride = m_filteredBlock[0][0].getStride(COMPONENT_Y);
+
+  m_pcRdCost->setDistParam( pcPatternKey, m_filteredBlock[0][0].getAddr(COMPONENT_Y), iRefStride, 1, m_cDistParam, m_pcEncCfg->getUseHADME() && bAllowUseOfHadamard );
+
+  const TComMv* pcMvRefine = (iFrac == 2 ? s_acMvRefineH : s_acMvRefineQ);
+
+  for (UInt i = 0; i < 9; i++)
+  {
+    TComMv cMvTest = pcMvRefine[i];
+    cMvTest += baseRefMv;
+
+    Int horVal = cMvTest.getHor() * iFrac;
+    Int verVal = cMvTest.getVer() * iFrac;
+    piRefPos = m_filteredBlock[ verVal & 3 ][ horVal & 3 ].getAddr(COMPONENT_Y);
+    if ( horVal == 2 && ( verVal & 1 ) == 0 )
+    {
+      piRefPos += 1;
+    }
+    if ( ( horVal & 1 ) == 0 && verVal == 2 )
+    {
+      piRefPos += iRefStride;
+    }
+    cMvTest = pcMvRefine[i];
+    cMvTest += rcMvFrac;
+
+    setDistParamComp(COMPONENT_Y);
+
+    m_cDistParam.pCur = piRefPos;
+    m_cDistParam.bitDepth = g_bitDepth[CHANNEL_TYPE_LUMA];
+    uiDist = m_cDistParam.DistFunc( &m_cDistParam );
+    uiDist += m_pcRdCost->getCost( cMvTest.getHor(), cMvTest.getVer() );
+
+    if ( uiDist < uiDistBest )
+    {
+      uiDistBest  = uiDist;
+      uiDirecBest = i;
+    }
+  }
+
+  rcMvFrac = pcMvRefine[uiDirecBest];
+
+  return uiDistBest;
+}
+
+
+
+Void
+TEncSearch::xEncSubdivCbfQT(TComTU      &rTu,
+                            Bool         bLuma,
+                            Bool         bChroma )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx         = rTu.GetAbsPartIdxTU();
+  const UInt uiTrDepth            = rTu.GetTransformDepthRel();
+  const UInt uiTrMode             = pcCU->getTransformIdx( uiAbsPartIdx );
+  const UInt uiSubdiv             = ( uiTrMode > uiTrDepth ? 1 : 0 );
+  const UInt uiLog2LumaTrafoSize  = rTu.GetLog2LumaTrSize();
+
+  if( pcCU->isIntra(0) && pcCU->getPartitionSize(0) == SIZE_NxN && uiTrDepth == 0 )
+  {
+    assert( uiSubdiv );
+  }
+  else if( uiLog2LumaTrafoSize > pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() )
+  {
+    assert( uiSubdiv );
+  }
+  else if( uiLog2LumaTrafoSize == pcCU->getSlice()->getSPS()->getQuadtreeTULog2MinSize() )
+  {
+    assert( !uiSubdiv );
+  }
+  else if( uiLog2LumaTrafoSize == pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) )
+  {
+    assert( !uiSubdiv );
+  }
+  else
+  {
+    assert( uiLog2LumaTrafoSize > pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) );
+    if( bLuma )
+    {
+      m_pcEntropyCoder->encodeTransformSubdivFlag( uiSubdiv, 5 - uiLog2LumaTrafoSize );
+    }
+  }
+
+  if ( bChroma )
+  {
+    const UInt numberValidComponents = getNumberValidComponents(rTu.GetChromaFormat());
+    for (UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+      if( rTu.ProcessingAllQuadrants(compID) && (uiTrDepth==0 || pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepth-1 ) ))
+        m_pcEntropyCoder->encodeQtCbf(rTu, compID, (uiSubdiv == 0));
+    }
+  }
+
+  if( uiSubdiv )
+  {
+    TComTURecurse tuRecurse(rTu, false);
+    do
+    {
+      xEncSubdivCbfQT( tuRecurse, bLuma, bChroma );
+    } while (tuRecurse.nextSection(rTu));
+  }
+  else
+  {
+    //===== Cbfs =====
+    if( bLuma )
+    {
+      m_pcEntropyCoder->encodeQtCbf( rTu, COMPONENT_Y, true );
+    }
+  }
+}
+
+
+
+
+Void
+TEncSearch::xEncCoeffQT(TComTU &rTu,
+                        const ComponentID  component,
+                        Bool         bRealCoeff )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiTrDepth=rTu.GetTransformDepthRel();
+
+  const UInt  uiTrMode        = pcCU->getTransformIdx( uiAbsPartIdx );
+  const UInt  uiSubdiv        = ( uiTrMode > uiTrDepth ? 1 : 0 );
+
+  if( uiSubdiv )
+  {
+    TComTURecurse tuRecurseChild(rTu, false);
+    do
+    {
+      xEncCoeffQT( tuRecurseChild, component, bRealCoeff );
+    } while (tuRecurseChild.nextSection(rTu) );
+  }
+  else if (rTu.ProcessComponentSection(component))
+  {
+    //===== coefficients =====
+    const UInt  uiLog2TrafoSize = rTu.GetLog2LumaTrSize();
+    UInt    uiCoeffOffset   = rTu.getCoefficientOffset(component);
+    UInt    uiQTLayer       = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrafoSize;
+    TCoeff* pcCoeff         = bRealCoeff ? pcCU->getCoeff(component) : m_ppcQTTempCoeff[component][uiQTLayer];
+
+    if (isChroma(component) && (pcCU->getCbf( rTu.GetAbsPartIdxTU(), COMPONENT_Y, uiTrMode ) != 0) && pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction() )
+    {
+      m_pcEntropyCoder->encodeCrossComponentPrediction( rTu, component );
+    }
+
+    m_pcEntropyCoder->encodeCoeffNxN( rTu, pcCoeff+uiCoeffOffset, component );
+  }
+}
+
+
+
+
+Void
+TEncSearch::xEncIntraHeader( TComDataCU*  pcCU,
+                            UInt         uiTrDepth,
+                            UInt         uiAbsPartIdx,
+                            Bool         bLuma,
+                            Bool         bChroma )
+{
+  if( bLuma )
+  {
+    // CU header
+    if( uiAbsPartIdx == 0 )
+    {
+      if( !pcCU->getSlice()->isIntra() )
+      {
+        if (pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+        {
+          m_pcEntropyCoder->encodeCUTransquantBypassFlag( pcCU, 0, true );
+        }
+        m_pcEntropyCoder->encodeSkipFlag( pcCU, 0, true );
+        m_pcEntropyCoder->encodePredMode( pcCU, 0, true );
+      }
+      m_pcEntropyCoder  ->encodePartSize( pcCU, 0, pcCU->getDepth(0), true );
+
+      if (pcCU->isIntra(0) && pcCU->getPartitionSize(0) == SIZE_2Nx2N )
+      {
+        m_pcEntropyCoder->encodeIPCMInfo( pcCU, 0, true );
+
+        if ( pcCU->getIPCMFlag (0))
+        {
+          return;
+        }
+      }
+    }
+    // luma prediction mode
+    if( pcCU->getPartitionSize(0) == SIZE_2Nx2N )
+    {
+      if (uiAbsPartIdx==0)
+      {
+        m_pcEntropyCoder->encodeIntraDirModeLuma ( pcCU, 0 );
+      }
+    }
+    else
+    {
+      UInt uiQNumParts = pcCU->getTotalNumPart() >> 2;
+      if (uiTrDepth>0 && (uiAbsPartIdx%uiQNumParts)==0) m_pcEntropyCoder->encodeIntraDirModeLuma ( pcCU, uiAbsPartIdx );
+    }
+  }
+
+  if( bChroma )
+  {
+    if( pcCU->getPartitionSize(0) == SIZE_2Nx2N || !enable4ChromaPUsInIntraNxNCU(pcCU->getPic()->getChromaFormat()))
+    {
+      if(uiAbsPartIdx==0)
+      {
+         m_pcEntropyCoder->encodeIntraDirModeChroma ( pcCU, uiAbsPartIdx );
+      }
+    }
+    else
+    {
+      UInt uiQNumParts = pcCU->getTotalNumPart() >> 2;
+      assert(uiTrDepth>0);
+      if ((uiAbsPartIdx%uiQNumParts)==0)
+      {
+        m_pcEntropyCoder->encodeIntraDirModeChroma ( pcCU, uiAbsPartIdx );
+      }
+    }
+  }
+}
+
+
+
+
+UInt
+TEncSearch::xGetIntraBitsQT(TComTU &rTu,
+                            Bool         bLuma,
+                            Bool         bChroma,
+                            Bool         bRealCoeff /* just for test */ )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiTrDepth=rTu.GetTransformDepthRel();
+  m_pcEntropyCoder->resetBits();
+  xEncIntraHeader ( pcCU, uiTrDepth, uiAbsPartIdx, bLuma, bChroma );
+  xEncSubdivCbfQT ( rTu, bLuma, bChroma );
+
+  if( bLuma )
+  {
+    xEncCoeffQT   ( rTu, COMPONENT_Y,      bRealCoeff );
+  }
+  if( bChroma )
+  {
+    xEncCoeffQT   ( rTu, COMPONENT_Cb,  bRealCoeff );
+    xEncCoeffQT   ( rTu, COMPONENT_Cr,  bRealCoeff );
+  }
+  UInt   uiBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+
+  return uiBits;
+}
+
+UInt TEncSearch::xGetIntraBitsQTChroma(TComTU &rTu,
+                                       ComponentID compID,
+                                       Bool         bRealCoeff /* just for test */ )
+{
+  m_pcEntropyCoder->resetBits();
+  xEncCoeffQT   ( rTu, compID,  bRealCoeff );
+  UInt   uiBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+  return uiBits;
+}
+
+Void TEncSearch::xIntraCodingTUBlock(       TComYuv*    pcOrgYuv,
+                                            TComYuv*    pcPredYuv,
+                                            TComYuv*    pcResiYuv,
+                                            Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                      const Bool        checkCrossCPrediction,
+                                            Distortion& ruiDist,
+                                      const ComponentID compID,
+                                            TComTU&     rTu
+                                      DEBUG_STRING_FN_DECLARE(sDebug)
+                                           ,Int         default0Save1Load2
+                                     )
+{
+  if (!rTu.ProcessComponentSection(compID)) return;
+  const Bool       bIsLuma = isLuma(compID);
+  const TComRectangle &rect= rTu.getRect(compID);
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+
+  const UInt uiTrDepth=rTu.GetTransformDepthRelAdj(compID);
+  const UInt uiFullDepth   = rTu.GetTransformDepthTotal();
+  const UInt uiLog2TrSize  = rTu.GetLog2LumaTrSize();
+  const ChromaFormat chFmt = pcOrgYuv->getChromaFormat();
+  const ChannelType chType = toChannelType(compID);
+
+  const UInt    uiWidth           = rect.width;
+  const UInt    uiHeight          = rect.height;
+  const UInt    uiStride          = pcOrgYuv ->getStride (compID);
+        Pel*    piOrg             = pcOrgYuv ->getAddr( compID, uiAbsPartIdx );
+        Pel*    piPred            = pcPredYuv->getAddr( compID, uiAbsPartIdx );
+        Pel*    piResi            = pcResiYuv->getAddr( compID, uiAbsPartIdx );
+        Pel*    piReco            = pcPredYuv->getAddr( compID, uiAbsPartIdx );
+  const UInt    uiQTLayer           = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+        Pel*    piRecQt           = m_pcQTTempTComYuv[ uiQTLayer ].getAddr( compID, uiAbsPartIdx );
+  const UInt    uiRecQtStride     = m_pcQTTempTComYuv[ uiQTLayer ].getStride(compID);
+  const UInt    uiZOrder            = pcCU->getZorderIdxInCtu() + uiAbsPartIdx;
+        Pel*    piRecIPred        = pcCU->getPic()->getPicYuvRec()->getAddr( compID, pcCU->getCtuRsAddr(), uiZOrder );
+        UInt    uiRecIPredStride  = pcCU->getPic()->getPicYuvRec()->getStride  ( compID );
+        TCoeff* pcCoeff           = m_ppcQTTempCoeff[compID][uiQTLayer] + rTu.getCoefficientOffset(compID);
+        Bool    useTransformSkip  = pcCU->getTransformSkip(uiAbsPartIdx, compID);
+
+#if ADAPTIVE_QP_SELECTION
+        TCoeff*    pcArlCoeff     = m_ppcQTTempArlCoeff[compID][ uiQTLayer ] + rTu.getCoefficientOffset(compID);
+#endif
+
+  const UInt uiChPredMode         = pcCU->getIntraDir( chType, uiAbsPartIdx );
+  const UInt uiChCodedMode        = (uiChPredMode==DM_CHROMA_IDX && !bIsLuma) ? pcCU->getIntraDir(CHANNEL_TYPE_LUMA, getChromasCorrespondingPULumaIdx(uiAbsPartIdx, chFmt)) : uiChPredMode;
+  const UInt uiChFinalMode        = ((chFmt == CHROMA_422)       && !bIsLuma) ? g_chroma422IntraAngleMappingTable[uiChCodedMode] : uiChCodedMode;
+
+  const Int         blkX                                 = g_auiRasterToPelX[ g_auiZscanToRaster[ uiAbsPartIdx ] ];
+  const Int         blkY                                 = g_auiRasterToPelY[ g_auiZscanToRaster[ uiAbsPartIdx ] ];
+  const Int         bufferOffset                         = blkX + (blkY * MAX_CU_SIZE);
+        Pel  *const encoderLumaResidual                  = resiLuma[RESIDUAL_ENCODER_SIDE ] + bufferOffset;
+        Pel  *const reconstructedLumaResidual            = resiLuma[RESIDUAL_RECONSTRUCTED] + bufferOffset;
+  const Bool        bUseCrossCPrediction                 = isChroma(compID) && (uiChPredMode == DM_CHROMA_IDX) && checkCrossCPrediction;
+  const Bool        bUseReconstructedResidualForEstimate = m_pcEncCfg->getUseReconBasedCrossCPredictionEstimate();
+        Pel *const  lumaResidualForEstimate              = bUseReconstructedResidualForEstimate ? reconstructedLumaResidual : encoderLumaResidual;
+
+#ifdef DEBUG_STRING
+  const Int debugPredModeMask=DebugStringGetPredModeMask(MODE_INTRA);
+#endif
+
+  //===== init availability pattern =====
+  Bool  bAboveAvail = false;
+  Bool  bLeftAvail  = false;
+
+  DEBUG_STRING_NEW(sTemp)
+
+#ifndef DEBUG_STRING
+  if( default0Save1Load2 != 2 )
+#endif
+  {
+    const Bool bUseFilteredPredictions=TComPrediction::filteringIntraReferenceSamples(compID, uiChFinalMode, uiWidth, uiHeight, chFmt, pcCU->getSlice()->getSPS()->getDisableIntraReferenceSmoothing());
+
+    initAdiPatternChType( rTu, bAboveAvail, bLeftAvail, compID, bUseFilteredPredictions DEBUG_STRING_PASS_INTO(sDebug) );
+
+    //===== get prediction signal =====
+    predIntraAng( compID, uiChFinalMode, piOrg, uiStride, piPred, uiStride, rTu, bAboveAvail, bLeftAvail, bUseFilteredPredictions );
+
+    // save prediction
+    if( default0Save1Load2 == 1 )
+    {
+      Pel*  pPred   = piPred;
+      Pel*  pPredBuf = m_pSharedPredTransformSkip[compID];
+      Int k = 0;
+      for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+      {
+        for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+        {
+          pPredBuf[ k ++ ] = pPred[ uiX ];
+        }
+        pPred += uiStride;
+      }
+    }
+  }
+#ifndef DEBUG_STRING
+  else
+  {
+    // load prediction
+    Pel*  pPred   = piPred;
+    Pel*  pPredBuf = m_pSharedPredTransformSkip[compID];
+    Int k = 0;
+    for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+    {
+      for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+      {
+        pPred[ uiX ] = pPredBuf[ k ++ ];
+      }
+      pPred += uiStride;
+    }
+  }
+#endif
+
+  //===== get residual signal =====
+  {
+    // get residual
+    Pel*  pOrg    = piOrg;
+    Pel*  pPred   = piPred;
+    Pel*  pResi   = piResi;
+
+    for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+    {
+      for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+      {
+        pResi[ uiX ] = pOrg[ uiX ] - pPred[ uiX ];
+      }
+
+      pOrg  += uiStride;
+      pResi += uiStride;
+      pPred += uiStride;
+    }
+  }
+
+  if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+  {
+    if (bUseCrossCPrediction)
+    {
+      if (xCalcCrossComponentPredictionAlpha( rTu, compID, lumaResidualForEstimate, piResi, uiWidth, uiHeight, MAX_CU_SIZE, uiStride ) == 0) return;
+      TComTrQuant::crossComponentPrediction ( rTu, compID, reconstructedLumaResidual, piResi, piResi, uiWidth, uiHeight, MAX_CU_SIZE, uiStride, uiStride, false );
+    }
+    else if (isLuma(compID) && !bUseReconstructedResidualForEstimate)
+    {
+      xStoreCrossComponentPredictionResult( encoderLumaResidual, piResi, rTu, 0, 0, MAX_CU_SIZE, uiStride );
+    }
+  }
+
+  //===== transform and quantization =====
+  //--- init rate estimation arrays for RDOQ ---
+  if( useTransformSkip ? m_pcEncCfg->getUseRDOQTS() : m_pcEncCfg->getUseRDOQ() )
+  {
+    m_pcEntropyCoder->estimateBit( m_pcTrQuant->m_pcEstBitsSbac, uiWidth, uiHeight, chType );
+  }
+
+  //--- transform and quantization ---
+  TCoeff uiAbsSum = 0;
+  if (bIsLuma)
+  {
+    pcCU       ->setTrIdxSubParts ( uiTrDepth, uiAbsPartIdx, uiFullDepth );
+  }
+
+  const QpParam cQP(*pcCU, compID);
+
+#if RDOQ_CHROMA_LAMBDA
+  m_pcTrQuant->selectLambda     (compID);
+#endif
+
+  m_pcTrQuant->transformNxN     ( rTu, compID, piResi, uiStride, pcCoeff,
+#if ADAPTIVE_QP_SELECTION
+    pcArlCoeff,
+#endif
+    uiAbsSum, cQP
+    );
+
+  //--- inverse transform ---
+
+#ifdef DEBUG_STRING
+  if ( (uiAbsSum > 0) || (DebugOptionList::DebugString_InvTran.getInt()&debugPredModeMask) )
+#else
+  if ( uiAbsSum > 0 )
+#endif
+  {
+    m_pcTrQuant->invTransformNxN ( rTu, compID, piResi, uiStride, pcCoeff, cQP DEBUG_STRING_PASS_INTO_OPTIONAL(&sDebug, (DebugOptionList::DebugString_InvTran.getInt()&debugPredModeMask)) );
+  }
+  else
+  {
+    Pel* pResi = piResi;
+    memset( pcCoeff, 0, sizeof( TCoeff ) * uiWidth * uiHeight );
+    for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+    {
+      memset( pResi, 0, sizeof( Pel ) * uiWidth );
+      pResi += uiStride;
+    }
+  }
+
+
+  //===== reconstruction =====
+  {
+    Pel* pPred      = piPred;
+    Pel* pResi      = piResi;
+    Pel* pReco      = piReco;
+    Pel* pRecQt     = piRecQt;
+    Pel* pRecIPred  = piRecIPred;
+    const UInt clipbd=g_bitDepth[chType];
+
+    if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+    {
+      if (bUseCrossCPrediction)
+      {
+        TComTrQuant::crossComponentPrediction( rTu, compID, reconstructedLumaResidual, piResi, piResi, uiWidth, uiHeight, MAX_CU_SIZE, uiStride, uiStride, true );
+      }
+      else if (isLuma(compID))
+      {
+        xStoreCrossComponentPredictionResult( reconstructedLumaResidual, piResi, rTu, 0, 0, MAX_CU_SIZE, uiStride );
+      }
+    }
+
+ #ifdef DEBUG_STRING
+    std::stringstream ss(stringstream::out);
+    const Bool bDebugPred=((DebugOptionList::DebugString_Pred.getInt()&debugPredModeMask) && DEBUG_STRING_CHANNEL_CONDITION(compID));
+    const Bool bDebugResi=((DebugOptionList::DebugString_Resi.getInt()&debugPredModeMask) && DEBUG_STRING_CHANNEL_CONDITION(compID));
+    const Bool bDebugReco=((DebugOptionList::DebugString_Reco.getInt()&debugPredModeMask) && DEBUG_STRING_CHANNEL_CONDITION(compID));
+
+    if (bDebugPred || bDebugResi || bDebugReco)
+    {
+      ss << "###: " << "CompID: " << compID << " pred mode (ch/fin): " << uiChPredMode << "/" << uiChFinalMode << " absPartIdx: " << rTu.GetAbsPartIdxTU() << "\n";
+      for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+      {
+        ss << "###: ";
+        if (bDebugPred)
+        {
+          ss << " - pred: ";
+          for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+          {
+            ss << pPred[ uiX ] << ", ";
+          }
+        }
+        if (bDebugResi) ss << " - resi: ";
+        for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+        {
+          if (bDebugResi) ss << pResi[ uiX ] << ", ";
+          pReco    [ uiX ] = Pel(ClipBD<Int>( Int(pPred[uiX]) + Int(pResi[uiX]), clipbd ));
+          pRecQt   [ uiX ] = pReco[ uiX ];
+          pRecIPred[ uiX ] = pReco[ uiX ];
+        }
+        if (bDebugReco)
+        {
+          ss << " - reco: ";
+          for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+          {
+            ss << pReco[ uiX ] << ", ";
+          }
+        }
+        pPred     += uiStride;
+        pResi     += uiStride;
+        pReco     += uiStride;
+        pRecQt    += uiRecQtStride;
+        pRecIPred += uiRecIPredStride;
+        ss << "\n";
+      }
+      DEBUG_STRING_APPEND(sDebug, ss.str())
+    }
+    else
+#endif
+    {
+
+      for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+      {
+        for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+        {
+          pReco    [ uiX ] = Pel(ClipBD<Int>( Int(pPred[uiX]) + Int(pResi[uiX]), clipbd ));
+          pRecQt   [ uiX ] = pReco[ uiX ];
+          pRecIPred[ uiX ] = pReco[ uiX ];
+        }
+        pPred     += uiStride;
+        pResi     += uiStride;
+        pReco     += uiStride;
+        pRecQt    += uiRecQtStride;
+        pRecIPred += uiRecIPredStride;
+      }
+    }
+  }
+
+  //===== update distortion =====
+  ruiDist += m_pcRdCost->getDistPart( g_bitDepth[chType], piReco, uiStride, piOrg, uiStride, uiWidth, uiHeight, compID );
+}
+
+
+
+
+
+Void
+TEncSearch::xRecurIntraCodingQT(Bool        bLumaOnly,
+                                TComYuv*    pcOrgYuv,
+                                TComYuv*    pcPredYuv,
+                                TComYuv*    pcResiYuv,
+                                Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                Distortion& ruiDistY,
+                                Distortion& ruiDistC,
+#if HHI_RQT_INTRA_SPEEDUP
+                                Bool        bCheckFirst,
+#endif
+                                Double&     dRDCost,
+                                TComTU&     rTu
+                                DEBUG_STRING_FN_DECLARE(sDebug))
+{
+  TComDataCU   *pcCU          = rTu.getCU();
+  const UInt    uiAbsPartIdx  = rTu.GetAbsPartIdxTU();
+  const UInt    uiFullDepth   = rTu.GetTransformDepthTotal();
+  const UInt    uiTrDepth     = rTu.GetTransformDepthRel();
+  const UInt    uiLog2TrSize  = rTu.GetLog2LumaTrSize();
+        Bool    bCheckFull    = ( uiLog2TrSize  <= pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() );
+        Bool    bCheckSplit   = ( uiLog2TrSize  >  pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) );
+  const UInt    numValidComp  = (bLumaOnly) ? 1 : pcOrgYuv->getNumberValidComponents();
+
+        Pel     resiLumaSplit [NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE];
+        Pel     resiLumaSingle[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE];
+
+        Bool    bMaintainResidual[NUMBER_OF_STORED_RESIDUAL_TYPES];
+        for (UInt residualTypeIndex = 0; residualTypeIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; residualTypeIndex++)
+        {
+          bMaintainResidual[residualTypeIndex] = true; //assume true unless specified otherwise
+        }
+
+        bMaintainResidual[RESIDUAL_ENCODER_SIDE] = !(m_pcEncCfg->getUseReconBasedCrossCPredictionEstimate());
+
+#if HHI_RQT_INTRA_SPEEDUP
+  Int maxTuSize = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize();
+  Int isIntraSlice = (pcCU->getSlice()->getSliceType() == I_SLICE);
+  // don't check split if TU size is less or equal to max TU size
+  Bool noSplitIntraMaxTuSize = bCheckFull;
+  if(m_pcEncCfg->getRDpenalty() && ! isIntraSlice)
+  {
+    // in addition don't check split if TU size is less or equal to 16x16 TU size for non-intra slice
+    noSplitIntraMaxTuSize = ( uiLog2TrSize  <= min(maxTuSize,4) );
+
+    // if maximum RD-penalty don't check TU size 32x32
+    if(m_pcEncCfg->getRDpenalty()==2)
+    {
+      bCheckFull    = ( uiLog2TrSize  <= min(maxTuSize,4));
+    }
+  }
+  if( bCheckFirst && noSplitIntraMaxTuSize )
+
+  {
+    bCheckSplit = false;
+  }
+#else
+  Int maxTuSize = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize();
+  Int isIntraSlice = (pcCU->getSlice()->getSliceType() == I_SLICE);
+  // if maximum RD-penalty don't check TU size 32x32
+  if((m_pcEncCfg->getRDpenalty()==2)  && !isIntraSlice)
+  {
+    bCheckFull    = ( uiLog2TrSize  <= min(maxTuSize,4));
+  }
+#endif
+  Double     dSingleCost                        = MAX_DOUBLE;
+  Distortion uiSingleDist[MAX_NUM_CHANNEL_TYPE] = {0,0};
+  UInt       uiSingleCbf[MAX_NUM_COMPONENT]     = {0,0,0};
+  Bool       checkTransformSkip  = pcCU->getSlice()->getPPS()->getUseTransformSkip();
+  Int        bestModeId[MAX_NUM_COMPONENT] = { 0, 0, 0};
+  checkTransformSkip           &= TUCompRectHasAssociatedTransformSkipFlag(rTu.getRect(COMPONENT_Y), pcCU->getSlice()->getPPS()->getTransformSkipLog2MaxSize());
+  checkTransformSkip           &= (!pcCU->getCUTransquantBypass(0));
+
+  if ( m_pcEncCfg->getUseTransformSkipFast() )
+  {
+    checkTransformSkip       &= (pcCU->getPartitionSize(uiAbsPartIdx)==SIZE_NxN);
+  }
+
+  if( bCheckFull )
+  {
+    if(checkTransformSkip == true)
+    {
+      //----- store original entropy coding status -----
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+
+      Distortion singleDistTmp[MAX_NUM_CHANNEL_TYPE]  = { 0, 0 };
+      UInt       singleCbfTmp[MAX_NUM_COMPONENT]      = { 0, 0, 0 };
+      Double     singleCostTmp                        = 0;
+      Int        firstCheckId                         = 0;
+
+      for(Int modeId = firstCheckId; modeId < 2; modeId ++)
+      {
+        DEBUG_STRING_NEW(sModeString)
+        Int  default0Save1Load2 = 0;
+        singleDistTmp[0]=singleDistTmp[1]=0;
+        if(modeId == firstCheckId)
+        {
+          default0Save1Load2 = 1;
+        }
+        else
+        {
+          default0Save1Load2 = 2;
+        }
+
+        for(UInt ch=COMPONENT_Y; ch<numValidComp; ch++)
+        {
+          const ComponentID compID = ComponentID(ch);
+          if (rTu.ProcessComponentSection(compID))
+          {
+            const UInt totalAdjustedDepthChan = rTu.GetTransformDepthTotalAdj(compID);
+            pcCU->setTransformSkipSubParts ( modeId, compID, uiAbsPartIdx, totalAdjustedDepthChan );
+
+            xIntraCodingTUBlock( pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaSingle, false, singleDistTmp[toChannelType(compID)], compID, rTu DEBUG_STRING_PASS_INTO(sModeString), default0Save1Load2 );
+          }
+          singleCbfTmp[compID] = pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepth );
+        }
+        //----- determine rate and r-d cost -----
+        if(modeId == 1 && singleCbfTmp[COMPONENT_Y] == 0)
+        {
+          //In order not to code TS flag when cbf is zero, the case for TS with cbf being zero is forbidden.
+          singleCostTmp = MAX_DOUBLE;
+        }
+        else
+        {
+          UInt uiSingleBits = xGetIntraBitsQT( rTu, true, !bLumaOnly, false );
+          singleCostTmp     = m_pcRdCost->calcRdCost( uiSingleBits, singleDistTmp[CHANNEL_TYPE_LUMA] + singleDistTmp[CHANNEL_TYPE_CHROMA] );
+        }
+        if(singleCostTmp < dSingleCost)
+        {
+          DEBUG_STRING_SWAP(sDebug, sModeString)
+          dSingleCost   = singleCostTmp;
+          uiSingleDist[CHANNEL_TYPE_LUMA] = singleDistTmp[CHANNEL_TYPE_LUMA];
+          uiSingleDist[CHANNEL_TYPE_CHROMA] = singleDistTmp[CHANNEL_TYPE_CHROMA];
+          for (UInt ch=0; ch<MAX_NUM_COMPONENT; ch++)
+            uiSingleCbf[ch] = singleCbfTmp[ch];
+
+          bestModeId[COMPONENT_Y] = modeId;
+          if(bestModeId[COMPONENT_Y] == firstCheckId)
+          {
+            xStoreIntraResultQT(COMPONENT_Y, bLumaOnly?COMPONENT_Y:COMPONENT_Cr, rTu );
+            m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_TEMP_BEST ] );
+          }
+
+          if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+          {
+            const Int xOffset = rTu.getRect( COMPONENT_Y ).x0;
+            const Int yOffset = rTu.getRect( COMPONENT_Y ).y0;
+            for (UInt storedResidualIndex = 0; storedResidualIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; storedResidualIndex++)
+            {
+              if (bMaintainResidual[storedResidualIndex])
+              {
+                xStoreCrossComponentPredictionResult(resiLuma[storedResidualIndex], resiLumaSingle[storedResidualIndex], rTu, xOffset, yOffset, MAX_CU_SIZE, MAX_CU_SIZE);
+              }
+            }
+          }
+        }
+        if (modeId == firstCheckId)
+        {
+          m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+        }
+      }
+
+      for(UInt ch=COMPONENT_Y; ch<numValidComp; ch++)
+      {
+        const ComponentID compID=ComponentID(ch);
+        if (rTu.ProcessComponentSection(compID))
+        {
+          const UInt totalAdjustedDepthChan   = rTu.GetTransformDepthTotalAdj(compID);
+          pcCU ->setTransformSkipSubParts ( bestModeId[COMPONENT_Y], compID, uiAbsPartIdx, totalAdjustedDepthChan );
+        }
+      }
+
+      if(bestModeId[COMPONENT_Y] == firstCheckId)
+      {
+        xLoadIntraResultQT(COMPONENT_Y, bLumaOnly?COMPONENT_Y:COMPONENT_Cr, rTu );
+        for(UInt ch=COMPONENT_Y; ch< numValidComp; ch++)
+        {
+          const ComponentID compID=ComponentID(ch);
+          if (rTu.ProcessComponentSection(compID))
+            pcCU->setCbfSubParts  ( uiSingleCbf[compID] << uiTrDepth, compID, uiAbsPartIdx, rTu.GetTransformDepthTotalAdj(compID) );
+        }
+
+        m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiFullDepth ][ CI_TEMP_BEST ] );
+      }
+
+      if( !bLumaOnly )
+      {
+        bestModeId[COMPONENT_Cb] = bestModeId[COMPONENT_Cr] = bestModeId[COMPONENT_Y];
+        if (rTu.ProcessComponentSection(COMPONENT_Cb) && bestModeId[COMPONENT_Y] == 1)
+        {
+          //In order not to code TS flag when cbf is zero, the case for TS with cbf being zero is forbidden.
+          for (UInt ch=COMPONENT_Cb; ch<numValidComp; ch++)
+          {
+            if (uiSingleCbf[ch] == 0)
+            {
+              const ComponentID compID=ComponentID(ch);
+              const UInt totalAdjustedDepthChan = rTu.GetTransformDepthTotalAdj(compID);
+              pcCU ->setTransformSkipSubParts ( 0, compID, uiAbsPartIdx, totalAdjustedDepthChan);
+              bestModeId[ch] = 0;
+            }
+          }
+        }
+      }
+    }
+    else
+    {
+      //----- store original entropy coding status -----
+      if( bCheckSplit )
+      {
+        m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+      }
+      //----- code luma/chroma block with given intra prediction mode and store Cbf-----
+      dSingleCost   = 0.0;
+      for (UInt ch=COMPONENT_Y; ch<numValidComp; ch++)
+      {
+        const ComponentID compID = ComponentID(ch);
+
+        if (rTu.ProcessComponentSection(compID))
+        {
+          const UInt totalAdjustedDepthChan   = rTu.GetTransformDepthTotalAdj(compID);
+          pcCU ->setTransformSkipSubParts ( 0, compID, uiAbsPartIdx, totalAdjustedDepthChan );
+        }
+
+        xIntraCodingTUBlock( pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaSingle, false, uiSingleDist[toChannelType(compID)], compID, rTu DEBUG_STRING_PASS_INTO(sDebug));
+
+        if( bCheckSplit )
+        {
+          uiSingleCbf[compID] = pcCU->getCbf( uiAbsPartIdx, compID, uiTrDepth );
+        }
+      }
+      //----- determine rate and r-d cost -----
+      UInt uiSingleBits = xGetIntraBitsQT( rTu, true, !bLumaOnly, false );
+
+      if(m_pcEncCfg->getRDpenalty() && (uiLog2TrSize==5) && !isIntraSlice)
+      {
+        uiSingleBits=uiSingleBits*4;
+      }
+
+      dSingleCost       = m_pcRdCost->calcRdCost( uiSingleBits, uiSingleDist[CHANNEL_TYPE_LUMA] + uiSingleDist[CHANNEL_TYPE_CHROMA] );
+
+      if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+      {
+        const Int xOffset = rTu.getRect( COMPONENT_Y ).x0;
+        const Int yOffset = rTu.getRect( COMPONENT_Y ).y0;
+        for (UInt storedResidualIndex = 0; storedResidualIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; storedResidualIndex++)
+        {
+          if (bMaintainResidual[storedResidualIndex])
+          {
+            xStoreCrossComponentPredictionResult(resiLuma[storedResidualIndex], resiLumaSingle[storedResidualIndex], rTu, xOffset, yOffset, MAX_CU_SIZE, MAX_CU_SIZE);
+          }
+        }
+      }
+    }
+  }
+
+  if( bCheckSplit )
+  {
+    //----- store full entropy coding status, load original entropy coding status -----
+    if( bCheckFull )
+    {
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_TEST ] );
+      m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+    }
+    else
+    {
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+    }
+    //----- code splitted block -----
+    Double     dSplitCost                         = 0.0;
+    Distortion uiSplitDist[MAX_NUM_CHANNEL_TYPE]  = {0,0};
+    UInt       uiSplitCbf[MAX_NUM_COMPONENT]      = {0,0,0};
+
+    TComTURecurse tuRecurseChild(rTu, false);
+    DEBUG_STRING_NEW(sSplit)
+    do
+    {
+      DEBUG_STRING_NEW(sChild)
+#if HHI_RQT_INTRA_SPEEDUP
+      xRecurIntraCodingQT( bLumaOnly, pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaSplit, uiSplitDist[0], uiSplitDist[1], bCheckFirst, dSplitCost, tuRecurseChild DEBUG_STRING_PASS_INTO(sChild) );
+#else
+      xRecurIntraCodingQT( bLumaOnly, pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaSplit, uiSplitDist[0], uiSplitDist[1], dSplitCost, tuRecurseChild DEBUG_STRING_PASS_INTO(sChild) );
+#endif
+      DEBUG_STRING_APPEND(sSplit, sChild)
+      for(UInt ch=0; ch<numValidComp; ch++)
+      {
+        uiSplitCbf[ch] |= pcCU->getCbf( tuRecurseChild.GetAbsPartIdxTU(), ComponentID(ch), tuRecurseChild.GetTransformDepthRel() );
+      }
+    } while (tuRecurseChild.nextSection(rTu) );
+
+    UInt    uiPartsDiv     = rTu.GetAbsPartIdxNumParts();
+    for(UInt ch=COMPONENT_Y; ch<numValidComp; ch++)
+    {
+      if (uiSplitCbf[ch])
+      {
+        const UInt flag=1<<uiTrDepth;
+        const ComponentID compID=ComponentID(ch);
+        UChar *pBase=pcCU->getCbf( compID );
+        for( UInt uiOffs = 0; uiOffs < uiPartsDiv; uiOffs++ )
+        {
+          pBase[ uiAbsPartIdx + uiOffs ] |= flag;
+        }
+      }
+    }
+    //----- restore context states -----
+    m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+    
+    //----- determine rate and r-d cost -----
+    UInt uiSplitBits = xGetIntraBitsQT( rTu, true, !bLumaOnly, false );
+    dSplitCost       = m_pcRdCost->calcRdCost( uiSplitBits, uiSplitDist[CHANNEL_TYPE_LUMA] + uiSplitDist[CHANNEL_TYPE_CHROMA] );
+
+    //===== compare and set best =====
+    if( dSplitCost < dSingleCost )
+    {
+      //--- update cost ---
+      DEBUG_STRING_SWAP(sSplit, sDebug)
+      ruiDistY += uiSplitDist[CHANNEL_TYPE_LUMA];
+      ruiDistC += uiSplitDist[CHANNEL_TYPE_CHROMA];
+      dRDCost  += dSplitCost;
+
+      if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+      {
+        const Int xOffset = rTu.getRect( COMPONENT_Y ).x0;
+        const Int yOffset = rTu.getRect( COMPONENT_Y ).y0;
+        for (UInt storedResidualIndex = 0; storedResidualIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; storedResidualIndex++)
+        {
+          if (bMaintainResidual[storedResidualIndex])
+          {
+            xStoreCrossComponentPredictionResult(resiLuma[storedResidualIndex], resiLumaSplit[storedResidualIndex], rTu, xOffset, yOffset, MAX_CU_SIZE, MAX_CU_SIZE);
+          }
+        }
+      }
+
+      return;
+    }
+
+    //----- set entropy coding status -----
+    m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_TEST ] );
+
+    //--- set transform index and Cbf values ---
+    pcCU->setTrIdxSubParts( uiTrDepth, uiAbsPartIdx, uiFullDepth );
+    for(UInt ch=0; ch<numValidComp; ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+      const TComRectangle &tuRect=rTu.getRect(compID);
+      const UInt totalAdjustedDepthChan   = rTu.GetTransformDepthTotalAdj(compID);
+      pcCU->setCbfSubParts  ( uiSingleCbf[compID] << uiTrDepth, compID, uiAbsPartIdx, totalAdjustedDepthChan );
+      pcCU ->setTransformSkipSubParts  ( bestModeId[compID], compID, uiAbsPartIdx, totalAdjustedDepthChan );
+
+      //--- set reconstruction for next intra prediction blocks ---
+      const UInt  uiQTLayer   = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+      const UInt  uiZOrder    = pcCU->getZorderIdxInCtu() + uiAbsPartIdx;
+      const UInt  uiWidth     = tuRect.width;
+      const UInt  uiHeight    = tuRect.height;
+      Pel*  piSrc       = m_pcQTTempTComYuv[ uiQTLayer ].getAddr( compID, uiAbsPartIdx );
+      UInt  uiSrcStride = m_pcQTTempTComYuv[ uiQTLayer ].getStride  ( compID );
+      Pel*  piDes       = pcCU->getPic()->getPicYuvRec()->getAddr( compID, pcCU->getCtuRsAddr(), uiZOrder );
+      UInt  uiDesStride = pcCU->getPic()->getPicYuvRec()->getStride  ( compID );
+
+      for( UInt uiY = 0; uiY < uiHeight; uiY++, piSrc += uiSrcStride, piDes += uiDesStride )
+      {
+        for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+        {
+          piDes[ uiX ] = piSrc[ uiX ];
+        }
+      }
+    }
+  }
+  ruiDistY += uiSingleDist[CHANNEL_TYPE_LUMA];
+  ruiDistC += uiSingleDist[CHANNEL_TYPE_CHROMA];
+  dRDCost  += dSingleCost;
+}
+
+
+Void
+TEncSearch::xSetIntraResultQT(Bool        bLumaOnly,
+                              TComYuv*    pcRecoYuv,
+                              TComTU     &rTu)
+{
+  TComDataCU *pcCU        = rTu.getCU();
+  const UInt uiTrDepth    = rTu.GetTransformDepthRel();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  UInt uiTrMode     = pcCU->getTransformIdx( uiAbsPartIdx );
+  if(  uiTrMode == uiTrDepth )
+  {
+    UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+    UInt uiQTLayer    = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+
+    Bool bSkipChroma = !rTu.ProcessChannelSection(CHANNEL_TYPE_CHROMA);
+
+    //===== copy transform coefficients =====
+
+    const UInt numChannelsToProcess = (bLumaOnly || bSkipChroma) ? 1 : ::getNumberValidComponents(pcCU->getPic()->getChromaFormat());
+    for (UInt ch=0; ch<numChannelsToProcess; ch++)
+    {
+      const ComponentID compID = ComponentID(ch);
+      const TComRectangle &tuRect=rTu.getRect(compID);
+      const UInt coeffOffset = rTu.getCoefficientOffset(compID);
+      const UInt numCoeffInBlock = tuRect.width * tuRect.height;
+
+      if (numCoeffInBlock!=0)
+      {
+        const TCoeff* srcCoeff = m_ppcQTTempCoeff[compID][uiQTLayer] + coeffOffset;
+        TCoeff* destCoeff      = pcCU->getCoeff(compID) + coeffOffset;
+        ::memcpy( destCoeff, srcCoeff, sizeof(TCoeff)*numCoeffInBlock );
+#if ADAPTIVE_QP_SELECTION
+        const TCoeff* srcArlCoeff = m_ppcQTTempArlCoeff[compID][ uiQTLayer ] + coeffOffset;
+        TCoeff* destArlCoeff      = pcCU->getArlCoeff (compID)               + coeffOffset;
+        ::memcpy( destArlCoeff, srcArlCoeff, sizeof( TCoeff ) * numCoeffInBlock );
+#endif
+        m_pcQTTempTComYuv[ uiQTLayer ].copyPartToPartComponent( compID, pcRecoYuv, uiAbsPartIdx, tuRect.width, tuRect.height );
+      }
+    } // End of channel loop
+
+  }
+  else
+  {
+    TComTURecurse tuRecurseChild(rTu, false);
+    do
+    {
+      xSetIntraResultQT( bLumaOnly, pcRecoYuv, tuRecurseChild );
+    } while (tuRecurseChild.nextSection(rTu));
+  }
+}
+
+
+Void
+TEncSearch::xStoreIntraResultQT(const ComponentID first,
+                                const ComponentID lastIncl,
+                                      TComTU &rTu )
+{
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiTrDepth = rTu.GetTransformDepthRel();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiTrMode     = pcCU->getTransformIdx( uiAbsPartIdx );
+  if (  first==COMPONENT_Y || uiTrMode == uiTrDepth )
+  {
+    assert(uiTrMode == uiTrDepth);
+    const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+    const UInt uiQTLayer    = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+
+
+    for(UInt compID_=first; compID_<=lastIncl; compID_++)
+    {
+      ComponentID compID=ComponentID(compID_);
+      if (rTu.ProcessComponentSection(compID))
+      {
+        const TComRectangle &tuRect=rTu.getRect(compID);
+
+        //===== copy transform coefficients =====
+        const UInt uiNumCoeff    = tuRect.width * tuRect.height;
+        TCoeff* pcCoeffSrc = m_ppcQTTempCoeff[compID] [ uiQTLayer ] + rTu.getCoefficientOffset(compID);
+        TCoeff* pcCoeffDst = m_pcQTTempTUCoeff[compID];
+
+        ::memcpy( pcCoeffDst, pcCoeffSrc, sizeof( TCoeff ) * uiNumCoeff );
+#if ADAPTIVE_QP_SELECTION
+        TCoeff* pcArlCoeffSrc = m_ppcQTTempArlCoeff[compID] [ uiQTLayer ] + rTu.getCoefficientOffset(compID);
+        TCoeff* pcArlCoeffDst = m_ppcQTTempTUArlCoeff[compID];
+        ::memcpy( pcArlCoeffDst, pcArlCoeffSrc, sizeof( TCoeff ) * uiNumCoeff );
+#endif
+        //===== copy reconstruction =====
+        m_pcQTTempTComYuv[ uiQTLayer ].copyPartToPartComponent( compID, &m_pcQTTempTransformSkipTComYuv, uiAbsPartIdx, tuRect.width, tuRect.height );
+      }
+    }
+  }
+}
+
+
+Void
+TEncSearch::xLoadIntraResultQT(const ComponentID first,
+                               const ComponentID lastIncl,
+                                     TComTU &rTu)
+{
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiTrDepth = rTu.GetTransformDepthRel();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiTrMode     = pcCU->getTransformIdx( uiAbsPartIdx );
+  if (  first==COMPONENT_Y || uiTrMode == uiTrDepth )
+  {
+    assert(uiTrMode == uiTrDepth);
+    const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+    const UInt uiQTLayer    = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+    const UInt uiZOrder     = pcCU->getZorderIdxInCtu() + uiAbsPartIdx;
+
+    for(UInt compID_=first; compID_<=lastIncl; compID_++)
+    {
+      ComponentID compID=ComponentID(compID_);
+      if (rTu.ProcessComponentSection(compID))
+      {
+        const TComRectangle &tuRect=rTu.getRect(compID);
+
+        //===== copy transform coefficients =====
+        const UInt uiNumCoeff = tuRect.width * tuRect.height;
+        TCoeff* pcCoeffDst = m_ppcQTTempCoeff[compID] [ uiQTLayer ] + rTu.getCoefficientOffset(compID);
+        TCoeff* pcCoeffSrc = m_pcQTTempTUCoeff[compID];
+
+        ::memcpy( pcCoeffDst, pcCoeffSrc, sizeof( TCoeff ) * uiNumCoeff );
+#if ADAPTIVE_QP_SELECTION
+        TCoeff* pcArlCoeffDst = m_ppcQTTempArlCoeff[compID] [ uiQTLayer ] + rTu.getCoefficientOffset(compID);
+        TCoeff* pcArlCoeffSrc = m_ppcQTTempTUArlCoeff[compID];
+        ::memcpy( pcArlCoeffDst, pcArlCoeffSrc, sizeof( TCoeff ) * uiNumCoeff );
+#endif
+        //===== copy reconstruction =====
+        m_pcQTTempTransformSkipTComYuv.copyPartToPartComponent( compID, &m_pcQTTempTComYuv[ uiQTLayer ], uiAbsPartIdx, tuRect.width, tuRect.height );
+
+        Pel*    piRecIPred        = pcCU->getPic()->getPicYuvRec()->getAddr( compID, pcCU->getCtuRsAddr(), uiZOrder );
+        UInt    uiRecIPredStride  = pcCU->getPic()->getPicYuvRec()->getStride (compID);
+        Pel*    piRecQt           = m_pcQTTempTComYuv[ uiQTLayer ].getAddr( compID, uiAbsPartIdx );
+        UInt    uiRecQtStride     = m_pcQTTempTComYuv[ uiQTLayer ].getStride  (compID);
+        UInt    uiWidth           = tuRect.width;
+        UInt    uiHeight          = tuRect.height;
+        Pel* pRecQt               = piRecQt;
+        Pel* pRecIPred            = piRecIPred;
+        for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+        {
+          for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+          {
+            pRecIPred[ uiX ] = pRecQt   [ uiX ];
+          }
+          pRecQt    += uiRecQtStride;
+          pRecIPred += uiRecIPredStride;
+        }
+      }
+    }
+  }
+}
+
+Void
+TEncSearch::xStoreCrossComponentPredictionResult(       Pel    *pResiDst,
+                                                  const Pel    *pResiSrc,
+                                                        TComTU &rTu,
+                                                  const Int     xOffset,
+                                                  const Int     yOffset,
+                                                  const Int     strideDst,
+                                                  const Int     strideSrc )
+{
+  const Pel *pSrc = pResiSrc + yOffset * strideSrc + xOffset;
+        Pel *pDst = pResiDst + yOffset * strideDst + xOffset;
+
+  for( Int y = 0; y < rTu.getRect( COMPONENT_Y ).height; y++ )
+  {
+    ::memcpy( pDst, pSrc, sizeof(Pel) * rTu.getRect( COMPONENT_Y ).width );
+    pDst += strideDst;
+    pSrc += strideSrc;
+  }
+}
+
+Char
+TEncSearch::xCalcCrossComponentPredictionAlpha(       TComTU &rTu,
+                                                const ComponentID compID,
+                                                const Pel*        piResiL,
+                                                const Pel*        piResiC,
+                                                const Int         width,
+                                                const Int         height,
+                                                const Int         strideL,
+                                                const Int         strideC )
+{
+  const Pel *pResiL = piResiL;
+  const Pel *pResiC = piResiC;
+
+        TComDataCU *pCU = rTu.getCU();
+  const Int  absPartIdx = rTu.GetAbsPartIdxTU( compID );
+  const Int diffBitDepth = pCU->getSlice()->getSPS()->getDifferentialLumaChromaBitDepth();
+
+  Char alpha = 0;
+  Int SSxy  = 0;
+  Int SSxx  = 0;
+
+  for( UInt uiY = 0; uiY < height; uiY++ )
+  {
+    for( UInt uiX = 0; uiX < width; uiX++ )
+    {
+      const Pel scaledResiL = rightShift( pResiL[ uiX ], diffBitDepth );
+      SSxy += ( scaledResiL * pResiC[ uiX ] );
+      SSxx += ( scaledResiL * scaledResiL   );
+    }
+
+    pResiL += strideL;
+    pResiC += strideC;
+  }
+
+  if( SSxx != 0 )
+  {
+    Double dAlpha = SSxy / Double( SSxx );
+    alpha = Char(Clip3<Int>(-16, 16, (Int)(dAlpha * 16)));
+
+    static const Char alphaQuant[17] = {0, 1, 1, 2, 2, 2, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8};
+
+    alpha = (alpha < 0) ? -alphaQuant[Int(-alpha)] : alphaQuant[Int(alpha)];
+  }
+  pCU->setCrossComponentPredictionAlphaPartRange( alpha, compID, absPartIdx, rTu.GetAbsPartIdxNumParts( compID ) );
+
+  return alpha;
+}
+
+Void
+TEncSearch::xRecurIntraChromaCodingQT(TComYuv*    pcOrgYuv,
+                                      TComYuv*    pcPredYuv,
+                                      TComYuv*    pcResiYuv,
+                                      Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                      Distortion& ruiDist,
+                                      TComTU&     rTu
+                                      DEBUG_STRING_FN_DECLARE(sDebug))
+{
+  TComDataCU         *pcCU                  = rTu.getCU();
+  const UInt          uiTrDepth             = rTu.GetTransformDepthRel();
+  const UInt          uiAbsPartIdx          = rTu.GetAbsPartIdxTU();
+  const ChromaFormat  format                = rTu.GetChromaFormat();
+  UInt                uiTrMode              = pcCU->getTransformIdx( uiAbsPartIdx );
+  const UInt          numberValidComponents = getNumberValidComponents(format);
+
+  if(  uiTrMode == uiTrDepth )
+  {
+    if (!rTu.ProcessChannelSection(CHANNEL_TYPE_CHROMA)) return;
+
+    const UInt uiFullDepth = rTu.GetTransformDepthTotal();
+
+    Bool checkTransformSkip = pcCU->getSlice()->getPPS()->getUseTransformSkip();
+    checkTransformSkip &= TUCompRectHasAssociatedTransformSkipFlag(rTu.getRect(COMPONENT_Cb), pcCU->getSlice()->getPPS()->getTransformSkipLog2MaxSize());
+
+    if ( m_pcEncCfg->getUseTransformSkipFast() )
+    {
+      checkTransformSkip &= TUCompRectHasAssociatedTransformSkipFlag(rTu.getRect(COMPONENT_Y), pcCU->getSlice()->getPPS()->getTransformSkipLog2MaxSize());
+
+      if (checkTransformSkip)
+      {
+        Int nbLumaSkip = 0;
+        const UInt maxAbsPartIdxSub=uiAbsPartIdx + (rTu.ProcessingAllQuadrants(COMPONENT_Cb)?1:4);
+        for(UInt absPartIdxSub = uiAbsPartIdx; absPartIdxSub < maxAbsPartIdxSub; absPartIdxSub ++)
+        {
+          nbLumaSkip += pcCU->getTransformSkip(absPartIdxSub, COMPONENT_Y);
+        }
+        checkTransformSkip &= (nbLumaSkip > 0);
+      }
+    }
+
+
+    for (UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+    {
+      const ComponentID compID = ComponentID(ch);
+      DEBUG_STRING_NEW(sDebugBestMode)
+
+      //use RDO to decide whether Cr/Cb takes TS
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[uiFullDepth][CI_QT_TRAFO_ROOT] );
+
+      const Bool splitIntoSubTUs = rTu.getRect(compID).width != rTu.getRect(compID).height;
+
+      TComTURecurse TUIterator(rTu, false, (splitIntoSubTUs ? TComTU::VERTICAL_SPLIT : TComTU::DONT_SPLIT), true, compID);
+
+      const UInt partIdxesPerSubTU = TUIterator.GetAbsPartIdxNumParts(compID);
+
+      do
+      {
+        const UInt subTUAbsPartIdx   = TUIterator.GetAbsPartIdxTU(compID);
+
+        Double     dSingleCost               = MAX_DOUBLE;
+        Int        bestModeId                = 0;
+        Distortion singleDistC               = 0;
+        UInt       singleCbfC                = 0;
+        Distortion singleDistCTmp            = 0;
+        Double     singleCostTmp             = 0;
+        UInt       singleCbfCTmp             = 0;
+        Char       bestCrossCPredictionAlpha = 0;
+        Int        bestTransformSkipMode     = 0;
+
+        const Bool checkCrossComponentPrediction =    (pcCU->getIntraDir(CHANNEL_TYPE_CHROMA, subTUAbsPartIdx) == DM_CHROMA_IDX)
+                                                   &&  pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction()
+                                                   && (pcCU->getCbf(subTUAbsPartIdx,  COMPONENT_Y, uiTrDepth) != 0);
+
+        const Int  crossCPredictionModesToTest = checkCrossComponentPrediction ? 2 : 1;
+        const Int  transformSkipModesToTest    = checkTransformSkip            ? 2 : 1;
+        const Int  totalModesToTest            = crossCPredictionModesToTest * transformSkipModesToTest;
+              Int  currModeId                  = 0;
+              Int  default0Save1Load2          = 0;
+
+        for(Int transformSkipModeId = 0; transformSkipModeId < transformSkipModesToTest; transformSkipModeId++)
+        {
+          for(Int crossCPredictionModeId = 0; crossCPredictionModeId < crossCPredictionModesToTest; crossCPredictionModeId++)
+          {
+            pcCU->setCrossComponentPredictionAlphaPartRange(0, compID, subTUAbsPartIdx, partIdxesPerSubTU);
+            DEBUG_STRING_NEW(sDebugMode)
+            pcCU->setTransformSkipPartRange( transformSkipModeId, compID, subTUAbsPartIdx, partIdxesPerSubTU );
+            currModeId++;
+
+            const Bool isOneMode  = (totalModesToTest == 1);
+            const Bool isLastMode = (currModeId == totalModesToTest); // currModeId is indexed from 1
+
+            if (isOneMode)
+            {
+              default0Save1Load2 = 0;
+            }
+            else if (!isOneMode && (transformSkipModeId == 0) && (crossCPredictionModeId == 0))
+            {
+              default0Save1Load2 = 1; //save prediction on first mode
+            }
+            else
+            {
+              default0Save1Load2 = 2; //load it on subsequent modes
+            }
+
+            singleDistCTmp = 0;
+
+            xIntraCodingTUBlock( pcOrgYuv, pcPredYuv, pcResiYuv, resiLuma, (crossCPredictionModeId != 0), singleDistCTmp, compID, TUIterator DEBUG_STRING_PASS_INTO(sDebugMode), default0Save1Load2);
+            singleCbfCTmp = pcCU->getCbf( subTUAbsPartIdx, compID, uiTrDepth);
+
+            if (  ((crossCPredictionModeId == 1) && (pcCU->getCrossComponentPredictionAlpha(subTUAbsPartIdx, compID) == 0))
+               || ((transformSkipModeId    == 1) && (singleCbfCTmp == 0))) //In order not to code TS flag when cbf is zero, the case for TS with cbf being zero is forbidden.
+            {
+              singleCostTmp = MAX_DOUBLE;
+            }
+            else if (!isOneMode)
+            {
+              UInt bitsTmp = xGetIntraBitsQTChroma( TUIterator, compID, false );
+              singleCostTmp  = m_pcRdCost->calcRdCost( bitsTmp, singleDistCTmp);
+            }
+
+            if(singleCostTmp < dSingleCost)
+            {
+              DEBUG_STRING_SWAP(sDebugBestMode, sDebugMode)
+              dSingleCost               = singleCostTmp;
+              singleDistC               = singleDistCTmp;
+              bestCrossCPredictionAlpha = (crossCPredictionModeId != 0) ? pcCU->getCrossComponentPredictionAlpha(subTUAbsPartIdx, compID) : 0;
+              bestTransformSkipMode     = transformSkipModeId;
+              bestModeId                = currModeId;
+              singleCbfC                = singleCbfCTmp;
+
+              if (!isOneMode && !isLastMode)
+              {
+                xStoreIntraResultQT(compID, compID, TUIterator);
+                m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiFullDepth ][ CI_TEMP_BEST ] );
+              }
+            }
+
+            if (!isOneMode && !isLastMode)
+            {
+              m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiFullDepth ][ CI_QT_TRAFO_ROOT ] );
+            }
+          }
+        }
+
+        if(bestModeId < totalModesToTest)
+        {
+          xLoadIntraResultQT(compID, compID, TUIterator);
+          pcCU->setCbfPartRange( singleCbfC << uiTrDepth, compID, subTUAbsPartIdx, partIdxesPerSubTU );
+
+          m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiFullDepth ][ CI_TEMP_BEST ] );
+        }
+
+        DEBUG_STRING_APPEND(sDebug, sDebugBestMode)
+        pcCU ->setTransformSkipPartRange                ( bestTransformSkipMode,     compID, subTUAbsPartIdx, partIdxesPerSubTU );
+        pcCU ->setCrossComponentPredictionAlphaPartRange( bestCrossCPredictionAlpha, compID, subTUAbsPartIdx, partIdxesPerSubTU );
+        ruiDist += singleDistC;
+      }
+      while (TUIterator.nextSection(rTu));
+
+      if (splitIntoSubTUs) offsetSubTUCBFs(rTu, compID);
+    }
+  }
+  else
+  {
+    UInt    uiSplitCbf[MAX_NUM_COMPONENT] = {0,0,0};
+
+    TComTURecurse tuRecurseChild(rTu, false);
+    const UInt uiTrDepthChild   = tuRecurseChild.GetTransformDepthRel();
+    do
+    {
+      DEBUG_STRING_NEW(sChild)
+
+      xRecurIntraChromaCodingQT( pcOrgYuv, pcPredYuv, pcResiYuv, resiLuma, ruiDist, tuRecurseChild DEBUG_STRING_PASS_INTO(sChild) );
+
+      DEBUG_STRING_APPEND(sDebug, sChild)
+      const UInt uiAbsPartIdxSub=tuRecurseChild.GetAbsPartIdxTU();
+
+      for(UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+      {
+        uiSplitCbf[ch] |= pcCU->getCbf( uiAbsPartIdxSub, ComponentID(ch), uiTrDepthChild );
+      }
+    } while ( tuRecurseChild.nextSection(rTu) );
+
+
+    UInt uiPartsDiv = rTu.GetAbsPartIdxNumParts();
+    for(UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+    {
+      if (uiSplitCbf[ch])
+      {
+        const UInt flag=1<<uiTrDepth;
+        ComponentID compID=ComponentID(ch);
+        UChar *pBase=pcCU->getCbf( compID );
+        for( UInt uiOffs = 0; uiOffs < uiPartsDiv; uiOffs++ )
+        {
+          pBase[ uiAbsPartIdx + uiOffs ] |= flag;
+        }
+      }
+    }
+  }
+}
+
+
+
+
+Void
+TEncSearch::xSetIntraResultChromaQT(TComYuv*    pcRecoYuv, TComTU &rTu)
+{
+  if (!rTu.ProcessChannelSection(CHANNEL_TYPE_CHROMA)) return;
+  TComDataCU *pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiTrDepth   = rTu.GetTransformDepthRel();
+  UInt uiTrMode     = pcCU->getTransformIdx( uiAbsPartIdx );
+  if(  uiTrMode == uiTrDepth )
+  {
+    UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+    UInt uiQTLayer    = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+
+    //===== copy transform coefficients =====
+    const TComRectangle &tuRectCb=rTu.getRect(COMPONENT_Cb);
+    UInt uiNumCoeffC    = tuRectCb.width*tuRectCb.height;//( pcCU->getSlice()->getSPS()->getMaxCUWidth() * pcCU->getSlice()->getSPS()->getMaxCUHeight() ) >> ( uiFullDepth << 1 );
+    const UInt offset = rTu.getCoefficientOffset(COMPONENT_Cb);
+
+    const UInt numberValidComponents = getNumberValidComponents(rTu.GetChromaFormat());
+    for (UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+    {
+      const ComponentID component = ComponentID(ch);
+      const TCoeff* src           = m_ppcQTTempCoeff[component][uiQTLayer] + offset;//(uiNumCoeffIncC*uiAbsPartIdx);
+      TCoeff* dest                = pcCU->getCoeff(component) + offset;//(uiNumCoeffIncC*uiAbsPartIdx);
+      ::memcpy( dest, src, sizeof(TCoeff)*uiNumCoeffC );
+#if ADAPTIVE_QP_SELECTION
+      TCoeff* pcArlCoeffSrc = m_ppcQTTempArlCoeff[component][ uiQTLayer ] + offset;//( uiNumCoeffIncC * uiAbsPartIdx );
+      TCoeff* pcArlCoeffDst = pcCU->getArlCoeff(component)                + offset;//( uiNumCoeffIncC * uiAbsPartIdx );
+      ::memcpy( pcArlCoeffDst, pcArlCoeffSrc, sizeof( TCoeff ) * uiNumCoeffC );
+#endif
+    }
+
+    //===== copy reconstruction =====
+
+    m_pcQTTempTComYuv[ uiQTLayer ].copyPartToPartComponent( COMPONENT_Cb, pcRecoYuv, uiAbsPartIdx, tuRectCb.width, tuRectCb.height );
+    m_pcQTTempTComYuv[ uiQTLayer ].copyPartToPartComponent( COMPONENT_Cr, pcRecoYuv, uiAbsPartIdx, tuRectCb.width, tuRectCb.height );
+  }
+  else
+  {
+    TComTURecurse tuRecurseChild(rTu, false);
+    do
+    {
+      xSetIntraResultChromaQT( pcRecoYuv, tuRecurseChild );
+    } while (tuRecurseChild.nextSection(rTu));
+  }
+}
+
+
+
+Void
+TEncSearch::preestChromaPredMode( TComDataCU* pcCU,
+                                 TComYuv*    pcOrgYuv,
+                                 TComYuv*    pcPredYuv )
+{
+
+  //===== loop over partitions =====
+  const UInt    uiInitTrDepth  = pcCU->getPartitionSize(0) != SIZE_2Nx2N && enable4ChromaPUsInIntraNxNCU(pcOrgYuv->getChromaFormat()) ? 1 : 0;
+  TComTURecurse tuRecurseCU(pcCU, 0);
+  TComTURecurse tuRecurseWithPU(tuRecurseCU, false, (uiInitTrDepth==0)?TComTU::DONT_SPLIT : TComTU::QUAD_SPLIT);
+  const ChromaFormat chFmt = tuRecurseWithPU.GetChromaFormat();
+  Bool bFilterEnabled=filterIntraReferenceSamples(CHANNEL_TYPE_CHROMA, chFmt, pcCU->getSlice()->getSPS()->getDisableIntraReferenceSmoothing());
+
+  do
+  {
+    if (tuRecurseWithPU.ProcessChannelSection(CHANNEL_TYPE_CHROMA))
+    {
+      const TComRectangle &rect=tuRecurseWithPU.getRect(COMPONENT_Cb);
+      const UInt  uiWidth     = rect.width;
+      const UInt  uiHeight    = rect.height;
+      const UInt  partIdx     = tuRecurseWithPU.GetAbsPartIdxCU();
+      const UInt  uiStride    = pcOrgYuv ->getStride(COMPONENT_Cb);
+      Pel*  piOrgU      = pcOrgYuv ->getAddr ( COMPONENT_Cb, partIdx ); //TODO: Change this into an array and loop over chroma components below
+      Pel*  piOrgV      = pcOrgYuv ->getAddr ( COMPONENT_Cr, partIdx );
+      Pel*  piPredU     = pcPredYuv->getAddr ( COMPONENT_Cb, partIdx );
+      Pel*  piPredV     = pcPredYuv->getAddr ( COMPONENT_Cr, partIdx );
+
+      //===== init pattern =====
+      Bool  bAboveAvail = false;
+      Bool  bLeftAvail  = false;
+      DEBUG_STRING_NEW(sTemp)
+      initAdiPatternChType( tuRecurseWithPU, bAboveAvail, bLeftAvail, COMPONENT_Cb, bFilterEnabled DEBUG_STRING_PASS_INTO(sTemp) );
+      initAdiPatternChType( tuRecurseWithPU, bAboveAvail, bLeftAvail, COMPONENT_Cr, bFilterEnabled DEBUG_STRING_PASS_INTO(sTemp) );
+
+      //===== get best prediction modes (using SAD) =====
+            UInt        uiMinMode          = 0;
+            UInt        uiMaxMode          = 4;
+            UInt        uiBestMode         = MAX_UINT;
+            Distortion  uiMinSAD           = std::numeric_limits<Distortion>::max();
+      const UInt        mappedModeTable[4] = {PLANAR_IDX,DC_IDX,HOR_IDX,VER_IDX};
+
+      DistParam distParamU, distParamV;
+      const Bool bUseHadamard=pcCU->getCUTransquantBypass(0) == 0;
+      m_pcRdCost->setDistParam(distParamU, g_bitDepth[CHANNEL_TYPE_CHROMA], piOrgU, uiStride, piPredU, uiStride, uiWidth, uiHeight, bUseHadamard);
+      m_pcRdCost->setDistParam(distParamV, g_bitDepth[CHANNEL_TYPE_CHROMA], piOrgV, uiStride, piPredV, uiStride, uiWidth, uiHeight, bUseHadamard);
+      distParamU.bApplyWeight = false;
+      distParamV.bApplyWeight = false;
+
+      for( UInt uiMode_  = uiMinMode; uiMode_ < uiMaxMode; uiMode_++ )
+      {
+        UInt uiMode=mappedModeTable[uiMode_];
+        //--- get prediction ---
+        const Bool bUseFilter=TComPrediction::filteringIntraReferenceSamples(COMPONENT_Cb, uiMode, uiWidth, uiHeight, chFmt, pcCU->getSlice()->getSPS()->getDisableIntraReferenceSmoothing());
+
+        predIntraAng( COMPONENT_Cb, uiMode, piOrgU, uiStride, piPredU, uiStride, tuRecurseCU, bAboveAvail, bLeftAvail, bUseFilter );
+        predIntraAng( COMPONENT_Cr, uiMode, piOrgV, uiStride, piPredV, uiStride, tuRecurseCU, bAboveAvail, bLeftAvail, bUseFilter );
+
+        //--- get SAD ---
+        Distortion uiSAD  = distParamU.DistFunc(&distParamU);
+        uiSAD            += distParamV.DistFunc(&distParamV);
+        //--- check ---
+        if( uiSAD < uiMinSAD )
+        {
+          uiMinSAD   = uiSAD;
+          uiBestMode = uiMode;
+        }
+      }
+
+      //===== set chroma pred mode =====
+      pcCU->setIntraDirSubParts( CHANNEL_TYPE_CHROMA, uiBestMode, partIdx, tuRecurseWithPU.getCUDepth() + uiInitTrDepth );
+    }
+  } while (tuRecurseWithPU.nextSection(tuRecurseCU));
+}
+
+
+
+
+Void
+TEncSearch::estIntraPredQT(TComDataCU* pcCU,
+                           TComYuv*    pcOrgYuv,
+                           TComYuv*    pcPredYuv,
+                           TComYuv*    pcResiYuv,
+                           TComYuv*    pcRecoYuv,
+                           Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                           Distortion& ruiDistC,
+                           Bool        bLumaOnly
+                           DEBUG_STRING_FN_DECLARE(sDebug))
+{
+  const UInt         uiDepth               = pcCU->getDepth(0);
+  const UInt         uiInitTrDepth         = pcCU->getPartitionSize(0) == SIZE_2Nx2N ? 0 : 1;
+  const UInt         uiInitTrDepthC        = pcCU->getPartitionSize(0) != SIZE_2Nx2N && enable4ChromaPUsInIntraNxNCU(pcOrgYuv->getChromaFormat()) ? 1 : 0;
+  const UInt         uiNumPU               = 1<<(2*uiInitTrDepth);
+  const UInt         uiQNumParts           = pcCU->getTotalNumPart() >> 2;
+  const UInt         uiWidthBit            = pcCU->getIntraSizeIdx(0);
+  const ChromaFormat chFmt                 = pcCU->getPic()->getChromaFormat();
+  const UInt         numberValidComponents = getNumberValidComponents(chFmt);
+        Distortion   uiOverallDistY        = 0;
+        Distortion   uiOverallDistC        = 0;
+        UInt         CandNum;
+        Double       CandCostList[ FAST_UDI_MAX_RDMODE_NUM ];
+        Pel          resiLumaPU[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE];
+
+        Bool    bMaintainResidual[NUMBER_OF_STORED_RESIDUAL_TYPES];
+        for (UInt residualTypeIndex = 0; residualTypeIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; residualTypeIndex++)
+        {
+          bMaintainResidual[residualTypeIndex] = true; //assume true unless specified otherwise
+        }
+
+        bMaintainResidual[RESIDUAL_ENCODER_SIDE] = !(m_pcEncCfg->getUseReconBasedCrossCPredictionEstimate());
+
+  // Lambda calculation at equivalent Qp of 4 is recommended because at that Qp, the quantisation divisor is 1.
+#if FULL_NBIT
+  const Double sqrtLambdaForFirstPass= (m_pcEncCfg->getCostMode()==COST_MIXED_LOSSLESS_LOSSY_CODING && pcCU->getCUTransquantBypass(0)) ?
+                sqrt(0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12) / 3.0)))
+              : m_pcRdCost->getSqrtLambda();
+#else
+  const Double sqrtLambdaForFirstPass= (m_pcEncCfg->getCostMode()==COST_MIXED_LOSSLESS_LOSSY_CODING && pcCU->getCUTransquantBypass(0)) ?
+                sqrt(0.57 * pow(2.0, ((LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP_PRIME - 12 - 6 * (g_bitDepth[CHANNEL_TYPE_LUMA] - 8)) / 3.0)))
+              : m_pcRdCost->getSqrtLambda();
+#endif
+
+  //===== set QP and clear Cbf =====
+  if ( pcCU->getSlice()->getPPS()->getUseDQP() == true)
+  {
+    pcCU->setQPSubParts( pcCU->getQP(0), 0, uiDepth );
+  }
+  else
+  {
+    pcCU->setQPSubParts( pcCU->getSlice()->getSliceQp(), 0, uiDepth );
+  }
+
+  //===== loop over partitions =====
+  TComTURecurse tuRecurseCU(pcCU, 0);
+  TComTURecurse tuRecurseWithPU(tuRecurseCU, false, (uiInitTrDepth==0)?TComTU::DONT_SPLIT : TComTU::QUAD_SPLIT);
+
+  do
+  {
+    const UInt uiPartOffset=tuRecurseWithPU.GetAbsPartIdxTU();
+//  for( UInt uiPU = 0, uiPartOffset=0; uiPU < uiNumPU; uiPU++, uiPartOffset += uiQNumParts )
+  //{
+    //===== init pattern for luma prediction =====
+    Bool bAboveAvail = false;
+    Bool bLeftAvail  = false;
+    DEBUG_STRING_NEW(sTemp2)
+
+    //===== determine set of modes to be tested (using prediction signal only) =====
+    Int numModesAvailable     = 35; //total number of Intra modes
+    UInt uiRdModeList[FAST_UDI_MAX_RDMODE_NUM];
+    Int numModesForFullRD = g_aucIntraModeNumFast[ uiWidthBit ];
+
+    if (tuRecurseWithPU.ProcessComponentSection(COMPONENT_Y))
+      initAdiPatternChType( tuRecurseWithPU, bAboveAvail, bLeftAvail, COMPONENT_Y, true DEBUG_STRING_PASS_INTO(sTemp2) );
+
+    Bool doFastSearch = (numModesForFullRD != numModesAvailable);
+    if (doFastSearch)
+    {
+      assert(numModesForFullRD < numModesAvailable);
+
+      for( Int i=0; i < numModesForFullRD; i++ )
+      {
+        CandCostList[ i ] = MAX_DOUBLE;
+      }
+      CandNum = 0;
+
+      const TComRectangle &puRect=tuRecurseWithPU.getRect(COMPONENT_Y);
+      const UInt uiAbsPartIdx=tuRecurseWithPU.GetAbsPartIdxTU();
+
+      Pel* piOrg         = pcOrgYuv ->getAddr( COMPONENT_Y, uiAbsPartIdx );
+      Pel* piPred        = pcPredYuv->getAddr( COMPONENT_Y, uiAbsPartIdx );
+      UInt uiStride      = pcPredYuv->getStride( COMPONENT_Y );
+      DistParam distParam;
+      const Bool bUseHadamard=pcCU->getCUTransquantBypass(0) == 0;
+      m_pcRdCost->setDistParam(distParam, g_bitDepth[CHANNEL_TYPE_LUMA], piOrg, uiStride, piPred, uiStride, puRect.width, puRect.height, bUseHadamard);
+      distParam.bApplyWeight = false;
+      for( Int modeIdx = 0; modeIdx < numModesAvailable; modeIdx++ )
+      {
+        UInt       uiMode = modeIdx;
+        Distortion uiSad  = 0;
+
+        const Bool bUseFilter=TComPrediction::filteringIntraReferenceSamples(COMPONENT_Y, uiMode, puRect.width, puRect.height, chFmt, pcCU->getSlice()->getSPS()->getDisableIntraReferenceSmoothing());
+
+        predIntraAng( COMPONENT_Y, uiMode, piOrg, uiStride, piPred, uiStride, tuRecurseWithPU, bAboveAvail, bLeftAvail, bUseFilter, TComPrediction::UseDPCMForFirstPassIntraEstimation(tuRecurseWithPU, uiMode) );
+
+        // use hadamard transform here
+        uiSad+=distParam.DistFunc(&distParam);
+
+        UInt   iModeBits = 0;
+
+        // NB xModeBitsIntra will not affect the mode for chroma that may have already been pre-estimated.
+        iModeBits+=xModeBitsIntra( pcCU, uiMode, uiPartOffset, uiDepth, uiInitTrDepth, CHANNEL_TYPE_LUMA );
+
+        Double cost      = (Double)uiSad + (Double)iModeBits * sqrtLambdaForFirstPass;
+
+#ifdef DEBUG_INTRA_SEARCH_COSTS
+        std::cout << "1st pass mode " << uiMode << " SAD = " << uiSad << ", mode bits = " << iModeBits << ", cost = " << cost << "\n";
+#endif
+
+        CandNum += xUpdateCandList( uiMode, cost, numModesForFullRD, uiRdModeList, CandCostList );
+      }
+
+#if FAST_UDI_USE_MPM
+      Int uiPreds[NUM_MOST_PROBABLE_MODES] = {-1, -1, -1};
+
+      Int iMode = -1;
+      Int numCand = pcCU->getIntraDirPredictor( uiPartOffset, uiPreds, COMPONENT_Y, &iMode );
+
+      if( iMode >= 0 )
+      {
+        numCand = iMode;
+      }
+
+      for( Int j=0; j < numCand; j++)
+      {
+        Bool mostProbableModeIncluded = false;
+        Int mostProbableMode = uiPreds[j];
+
+        for( Int i=0; i < numModesForFullRD; i++)
+        {
+          mostProbableModeIncluded |= (mostProbableMode == uiRdModeList[i]);
+        }
+        if (!mostProbableModeIncluded)
+        {
+          uiRdModeList[numModesForFullRD++] = mostProbableMode;
+        }
+      }
+#endif // FAST_UDI_USE_MPM
+    }
+    else
+    {
+      for( Int i=0; i < numModesForFullRD; i++)
+      {
+        uiRdModeList[i] = i;
+      }
+    }
+
+    //===== check modes (using r-d costs) =====
+#if HHI_RQT_INTRA_SPEEDUP_MOD
+    UInt   uiSecondBestMode  = MAX_UINT;
+    Double dSecondBestPUCost = MAX_DOUBLE;
+#endif
+    DEBUG_STRING_NEW(sPU)
+    UInt       uiBestPUMode  = 0;
+    Distortion uiBestPUDistY = 0;
+    Distortion uiBestPUDistC = 0;
+    Double     dBestPUCost   = MAX_DOUBLE;
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+    UInt max=numModesForFullRD;
+
+    if (DebugOptionList::ForceLumaMode.isSet()) max=0;  // we are forcing a direction, so don't bother with mode check
+    for ( UInt uiMode = 0; uiMode < max; uiMode++)
+#else
+    for( UInt uiMode = 0; uiMode < numModesForFullRD; uiMode++ )
+#endif
+    {
+      // set luma prediction mode
+      UInt uiOrgMode = uiRdModeList[uiMode];
+
+      pcCU->setIntraDirSubParts ( CHANNEL_TYPE_LUMA, uiOrgMode, uiPartOffset, uiDepth + uiInitTrDepth );
+
+      DEBUG_STRING_NEW(sMode)
+      // set context models
+      m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST] );
+
+      // determine residual for partition
+      Distortion uiPUDistY = 0;
+      Distortion uiPUDistC = 0;
+      Double     dPUCost   = 0.0;
+#if HHI_RQT_INTRA_SPEEDUP
+      xRecurIntraCodingQT( bLumaOnly, pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaPU, uiPUDistY, uiPUDistC, true, dPUCost, tuRecurseWithPU DEBUG_STRING_PASS_INTO(sMode) );
+#else
+      xRecurIntraCodingQT( bLumaOnly, pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaPU, uiPUDistY, uiPUDistC, dPUCost, tuRecurseWithPU DEBUG_STRING_PASS_INTO(sMode) );
+#endif
+
+#ifdef DEBUG_INTRA_SEARCH_COSTS
+      std::cout << "2nd pass [luma,chroma] mode [" << Int(pcCU->getIntraDir(CHANNEL_TYPE_LUMA, uiPartOffset)) << "," << Int(pcCU->getIntraDir(CHANNEL_TYPE_CHROMA, uiPartOffset)) << "] cost = " << dPUCost << "\n";
+#endif
+
+      // check r-d cost
+      if( dPUCost < dBestPUCost )
+      {
+        DEBUG_STRING_SWAP(sPU, sMode)
+#if HHI_RQT_INTRA_SPEEDUP_MOD
+        uiSecondBestMode  = uiBestPUMode;
+        dSecondBestPUCost = dBestPUCost;
+#endif
+        uiBestPUMode  = uiOrgMode;
+        uiBestPUDistY = uiPUDistY;
+        uiBestPUDistC = uiPUDistC;
+        dBestPUCost   = dPUCost;
+
+        xSetIntraResultQT( bLumaOnly, pcRecoYuv, tuRecurseWithPU );
+
+        if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+        {
+          const Int xOffset = tuRecurseWithPU.getRect( COMPONENT_Y ).x0;
+          const Int yOffset = tuRecurseWithPU.getRect( COMPONENT_Y ).y0;
+          for (UInt storedResidualIndex = 0; storedResidualIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; storedResidualIndex++)
+          {
+            if (bMaintainResidual[storedResidualIndex])
+            {
+              xStoreCrossComponentPredictionResult(resiLuma[storedResidualIndex], resiLumaPU[storedResidualIndex], tuRecurseWithPU, xOffset, yOffset, MAX_CU_SIZE, MAX_CU_SIZE );
+            }
+          }
+        }
+
+        UInt uiQPartNum = tuRecurseWithPU.GetAbsPartIdxNumParts();
+
+        ::memcpy( m_puhQTTempTrIdx,  pcCU->getTransformIdx()       + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+        for (UInt component = 0; component < numberValidComponents; component++)
+        {
+          const ComponentID compID = ComponentID(component);
+          ::memcpy( m_puhQTTempCbf[compID], pcCU->getCbf( compID  ) + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+          ::memcpy( m_puhQTTempTransformSkipFlag[compID],  pcCU->getTransformSkip(compID)  + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+        }
+      }
+#if HHI_RQT_INTRA_SPEEDUP_MOD
+      else if( dPUCost < dSecondBestPUCost )
+      {
+        uiSecondBestMode  = uiOrgMode;
+        dSecondBestPUCost = dPUCost;
+      }
+#endif
+    } // Mode loop
+
+#if HHI_RQT_INTRA_SPEEDUP
+#if HHI_RQT_INTRA_SPEEDUP_MOD
+    for( UInt ui =0; ui < 2; ++ui )
+#endif
+    {
+#if HHI_RQT_INTRA_SPEEDUP_MOD
+      UInt uiOrgMode   = ui ? uiSecondBestMode  : uiBestPUMode;
+      if( uiOrgMode == MAX_UINT )
+      {
+        break;
+      }
+#else
+      UInt uiOrgMode = uiBestPUMode;
+#endif
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+      if (DebugOptionList::ForceLumaMode.isSet())
+        uiOrgMode = DebugOptionList::ForceLumaMode.getInt();
+#endif
+
+      pcCU->setIntraDirSubParts ( CHANNEL_TYPE_LUMA, uiOrgMode, uiPartOffset, uiDepth + uiInitTrDepth );
+      DEBUG_STRING_NEW(sModeTree)
+
+      // set context models
+      m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST] );
+
+      // determine residual for partition
+      Distortion uiPUDistY = 0;
+      Distortion uiPUDistC = 0;
+      Double     dPUCost   = 0.0;
+
+      xRecurIntraCodingQT( bLumaOnly, pcOrgYuv, pcPredYuv, pcResiYuv, resiLumaPU, uiPUDistY, uiPUDistC, false, dPUCost, tuRecurseWithPU DEBUG_STRING_PASS_INTO(sModeTree));
+
+      // check r-d cost
+      if( dPUCost < dBestPUCost )
+      {
+        DEBUG_STRING_SWAP(sPU, sModeTree)
+        uiBestPUMode  = uiOrgMode;
+        uiBestPUDistY = uiPUDistY;
+        uiBestPUDistC = uiPUDistC;
+        dBestPUCost   = dPUCost;
+
+        xSetIntraResultQT( bLumaOnly, pcRecoYuv, tuRecurseWithPU );
+
+        if (pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction())
+        {
+          const Int xOffset = tuRecurseWithPU.getRect( COMPONENT_Y ).x0;
+          const Int yOffset = tuRecurseWithPU.getRect( COMPONENT_Y ).y0;
+          for (UInt storedResidualIndex = 0; storedResidualIndex < NUMBER_OF_STORED_RESIDUAL_TYPES; storedResidualIndex++)
+          {
+            if (bMaintainResidual[storedResidualIndex])
+            {
+              xStoreCrossComponentPredictionResult(resiLuma[storedResidualIndex], resiLumaPU[storedResidualIndex], tuRecurseWithPU, xOffset, yOffset, MAX_CU_SIZE, MAX_CU_SIZE );
+            }
+          }
+        }
+
+        const UInt uiQPartNum = tuRecurseWithPU.GetAbsPartIdxNumParts();
+        ::memcpy( m_puhQTTempTrIdx,  pcCU->getTransformIdx()       + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+
+        for (UInt component = 0; component < numberValidComponents; component++)
+        {
+          const ComponentID compID = ComponentID(component);
+          ::memcpy( m_puhQTTempCbf[compID], pcCU->getCbf( compID  ) + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+          ::memcpy( m_puhQTTempTransformSkipFlag[compID],  pcCU->getTransformSkip(compID)  + uiPartOffset, uiQPartNum * sizeof( UChar ) );
+        }
+      }
+    } // Mode loop
+#endif
+
+    DEBUG_STRING_APPEND(sDebug, sPU)
+
+    //--- update overall distortion ---
+    uiOverallDistY += uiBestPUDistY;
+    uiOverallDistC += uiBestPUDistC;
+
+    //--- update transform index and cbf ---
+    const UInt uiQPartNum = tuRecurseWithPU.GetAbsPartIdxNumParts();
+    ::memcpy( pcCU->getTransformIdx()       + uiPartOffset, m_puhQTTempTrIdx,  uiQPartNum * sizeof( UChar ) );
+    for (UInt component = 0; component < numberValidComponents; component++)
+    {
+      const ComponentID compID = ComponentID(component);
+      ::memcpy( pcCU->getCbf( compID  ) + uiPartOffset, m_puhQTTempCbf[compID], uiQPartNum * sizeof( UChar ) );
+      ::memcpy( pcCU->getTransformSkip( compID  ) + uiPartOffset, m_puhQTTempTransformSkipFlag[compID ], uiQPartNum * sizeof( UChar ) );
+    }
+
+    //--- set reconstruction for next intra prediction blocks ---
+    if( !tuRecurseWithPU.IsLastSection() )
+    {
+      const Bool bSkipChroma  = tuRecurseWithPU.ProcessChannelSection(CHANNEL_TYPE_CHROMA);
+
+      const UInt numChannelToProcess = (bLumaOnly || bSkipChroma) ? 1 : getNumberValidComponents(pcCU->getPic()->getChromaFormat());
+
+      for (UInt ch=0; ch<numChannelToProcess; ch++)
+      {
+        const ComponentID compID = ComponentID(ch);
+        const TComRectangle &puRect=tuRecurseWithPU.getRect(compID);
+        const UInt  uiCompWidth   = puRect.width;
+        const UInt  uiCompHeight  = puRect.height;
+
+        const UInt  uiZOrder      = pcCU->getZorderIdxInCtu() + uiPartOffset;
+              Pel*  piDes         = pcCU->getPic()->getPicYuvRec()->getAddr( compID, pcCU->getCtuRsAddr(), uiZOrder );
+        const UInt  uiDesStride   = pcCU->getPic()->getPicYuvRec()->getStride( compID);
+        const Pel*  piSrc         = pcRecoYuv->getAddr( compID, uiPartOffset );
+        const UInt  uiSrcStride   = pcRecoYuv->getStride( compID);
+
+        for( UInt uiY = 0; uiY < uiCompHeight; uiY++, piSrc += uiSrcStride, piDes += uiDesStride )
+        {
+          for( UInt uiX = 0; uiX < uiCompWidth; uiX++ )
+          {
+            piDes[ uiX ] = piSrc[ uiX ];
+          }
+        }
+      }
+    }
+
+    //=== update PU data ====
+    pcCU->setIntraDirSubParts     ( CHANNEL_TYPE_LUMA, uiBestPUMode, uiPartOffset, uiDepth + uiInitTrDepth );
+    if (!bLumaOnly && getChromasCorrespondingPULumaIdx(uiPartOffset, chFmt)==uiPartOffset)
+    {
+      UInt chromaDir=pcCU->getIntraDir(CHANNEL_TYPE_CHROMA, getChromasCorrespondingPULumaIdx(uiPartOffset, chFmt));
+      if (chromaDir == uiBestPUMode && tuRecurseWithPU.ProcessChannelSection(CHANNEL_TYPE_CHROMA))
+      {
+        pcCU->setIntraDirSubParts     ( CHANNEL_TYPE_CHROMA, DM_CHROMA_IDX, getChromasCorrespondingPULumaIdx(uiPartOffset, chFmt), uiDepth + uiInitTrDepthC );
+      }
+    }
+    //pcCU->copyToPic                   ( uiDepth, uiPU, uiInitTrDepth ); // Unnecessary copy?
+  } while (tuRecurseWithPU.nextSection(tuRecurseCU));
+
+
+  if( uiNumPU > 1 )
+  { // set Cbf for all blocks
+    UInt uiCombCbfY = 0;
+    UInt uiCombCbfU = 0;
+    UInt uiCombCbfV = 0;
+    UInt uiPartIdx  = 0;
+    for( UInt uiPart = 0; uiPart < 4; uiPart++, uiPartIdx += uiQNumParts )
+    {
+      uiCombCbfY |= pcCU->getCbf( uiPartIdx, COMPONENT_Y,  1 );
+      uiCombCbfU |= pcCU->getCbf( uiPartIdx, COMPONENT_Cb, 1 );
+      uiCombCbfV |= pcCU->getCbf( uiPartIdx, COMPONENT_Cr, 1 );
+    }
+    for( UInt uiOffs = 0; uiOffs < 4 * uiQNumParts; uiOffs++ )
+    {
+      pcCU->getCbf( COMPONENT_Y  )[ uiOffs ] |= uiCombCbfY;
+      pcCU->getCbf( COMPONENT_Cb )[ uiOffs ] |= uiCombCbfU;
+      pcCU->getCbf( COMPONENT_Cr )[ uiOffs ] |= uiCombCbfV;
+    }
+  }
+
+  //===== reset context models =====
+  m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST]);
+
+  //===== set distortion (rate and r-d costs are determined later) =====
+  ruiDistC                   = uiOverallDistC;
+  pcCU->getTotalDistortion() = uiOverallDistY + uiOverallDistC;
+}
+
+
+
+
+Void
+TEncSearch::estIntraPredChromaQT(TComDataCU* pcCU,
+                                 TComYuv*    pcOrgYuv,
+                                 TComYuv*    pcPredYuv,
+                                 TComYuv*    pcResiYuv,
+                                 TComYuv*    pcRecoYuv,
+                                 Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                 Distortion  uiPreCalcDistC
+                                 DEBUG_STRING_FN_DECLARE(sDebug))
+{
+  pcCU->getTotalDistortion      () -= uiPreCalcDistC;
+
+  //const UInt    uiDepthCU     = pcCU->getDepth(0);
+  const UInt    uiInitTrDepth  = pcCU->getPartitionSize(0) != SIZE_2Nx2N && enable4ChromaPUsInIntraNxNCU(pcOrgYuv->getChromaFormat()) ? 1 : 0;
+//  const UInt    uiNumPU        = 1<<(2*uiInitTrDepth);
+
+  TComTURecurse tuRecurseCU(pcCU, 0);
+  TComTURecurse tuRecurseWithPU(tuRecurseCU, false, (uiInitTrDepth==0)?TComTU::DONT_SPLIT : TComTU::QUAD_SPLIT);
+  const UInt    uiQNumParts    = tuRecurseWithPU.GetAbsPartIdxNumParts();
+  const UInt    uiDepthCU=tuRecurseWithPU.getCUDepth();
+  const UInt    numberValidComponents = pcCU->getPic()->getNumberValidComponents();
+
+  do
+  {
+    UInt       uiBestMode  = 0;
+    Distortion uiBestDist  = 0;
+    Double     dBestCost   = MAX_DOUBLE;
+
+    //----- init mode list -----
+    if (tuRecurseWithPU.ProcessChannelSection(CHANNEL_TYPE_CHROMA))
+    {
+      UInt uiModeList[FAST_UDI_MAX_RDMODE_NUM];
+      const UInt  uiQPartNum     = uiQNumParts;
+      const UInt  uiPartOffset   = tuRecurseWithPU.GetAbsPartIdxTU();
+      {
+        UInt  uiMinMode = 0;
+        UInt  uiMaxMode = NUM_CHROMA_MODE;
+
+        //----- check chroma modes -----
+        pcCU->getAllowedChromaDir( uiPartOffset, uiModeList );
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+        if (DebugOptionList::ForceChromaMode.isSet())
+        {
+          uiMinMode=DebugOptionList::ForceChromaMode.getInt();
+          if (uiModeList[uiMinMode]==34) uiMinMode=4; // if the fixed mode has been renumbered because DM_CHROMA covers it, use DM_CHROMA.
+          uiMaxMode=uiMinMode+1;
+        }
+#endif
+
+        DEBUG_STRING_NEW(sPU)
+
+        for( UInt uiMode = uiMinMode; uiMode < uiMaxMode; uiMode++ )
+        {
+          //----- restore context models -----
+          m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[uiDepthCU][CI_CURR_BEST] );
+          
+          DEBUG_STRING_NEW(sMode)
+          //----- chroma coding -----
+          Distortion uiDist = 0;
+          pcCU->setIntraDirSubParts  ( CHANNEL_TYPE_CHROMA, uiModeList[uiMode], uiPartOffset, uiDepthCU+uiInitTrDepth );
+          xRecurIntraChromaCodingQT       ( pcOrgYuv, pcPredYuv, pcResiYuv, resiLuma, uiDist, tuRecurseWithPU DEBUG_STRING_PASS_INTO(sMode) );
+
+          if( pcCU->getSlice()->getPPS()->getUseTransformSkip() )
+          {
+            m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[uiDepthCU][CI_CURR_BEST] );
+          }
+
+          UInt    uiBits = xGetIntraBitsQT( tuRecurseWithPU, false, true, false );
+          Double  dCost  = m_pcRdCost->calcRdCost( uiBits, uiDist );
+
+          //----- compare -----
+          if( dCost < dBestCost )
+          {
+            DEBUG_STRING_SWAP(sPU, sMode);
+            dBestCost   = dCost;
+            uiBestDist  = uiDist;
+            uiBestMode  = uiModeList[uiMode];
+
+            xSetIntraResultChromaQT( pcRecoYuv, tuRecurseWithPU );
+            for (UInt componentIndex = COMPONENT_Cb; componentIndex < numberValidComponents; componentIndex++)
+            {
+              const ComponentID compID = ComponentID(componentIndex);
+              ::memcpy( m_puhQTTempCbf[compID], pcCU->getCbf( compID )+uiPartOffset, uiQPartNum * sizeof( UChar ) );
+              ::memcpy( m_puhQTTempTransformSkipFlag[compID], pcCU->getTransformSkip( compID )+uiPartOffset, uiQPartNum * sizeof( UChar ) );
+              ::memcpy( m_phQTTempCrossComponentPredictionAlpha[compID], pcCU->getCrossComponentPredictionAlpha(compID)+uiPartOffset, uiQPartNum * sizeof( Char ) );
+            }
+          }
+        }
+
+        DEBUG_STRING_APPEND(sDebug, sPU)
+
+        //----- set data -----
+        for (UInt componentIndex = COMPONENT_Cb; componentIndex < numberValidComponents; componentIndex++)
+        {
+          const ComponentID compID = ComponentID(componentIndex);
+          ::memcpy( pcCU->getCbf( compID )+uiPartOffset, m_puhQTTempCbf[compID], uiQPartNum * sizeof( UChar ) );
+          ::memcpy( pcCU->getTransformSkip( compID )+uiPartOffset, m_puhQTTempTransformSkipFlag[compID], uiQPartNum * sizeof( UChar ) );
+          ::memcpy( pcCU->getCrossComponentPredictionAlpha(compID)+uiPartOffset, m_phQTTempCrossComponentPredictionAlpha[compID], uiQPartNum * sizeof( Char ) );
+        }
+      }
+
+      if( ! tuRecurseWithPU.IsLastSection() )
+      {
+        for (UInt ch=COMPONENT_Cb; ch<numberValidComponents; ch++)
+        {
+          const ComponentID compID    = ComponentID(ch);
+          const TComRectangle &tuRect = tuRecurseWithPU.getRect(compID);
+          const UInt  uiCompWidth     = tuRect.width;
+          const UInt  uiCompHeight    = tuRect.height;
+          const UInt  uiZOrder        = pcCU->getZorderIdxInCtu() + tuRecurseWithPU.GetAbsPartIdxTU();
+                Pel*  piDes           = pcCU->getPic()->getPicYuvRec()->getAddr( compID, pcCU->getCtuRsAddr(), uiZOrder );
+          const UInt  uiDesStride     = pcCU->getPic()->getPicYuvRec()->getStride( compID);
+          const Pel*  piSrc           = pcRecoYuv->getAddr( compID, uiPartOffset );
+          const UInt  uiSrcStride     = pcRecoYuv->getStride( compID);
+
+          for( UInt uiY = 0; uiY < uiCompHeight; uiY++, piSrc += uiSrcStride, piDes += uiDesStride )
+          {
+            for( UInt uiX = 0; uiX < uiCompWidth; uiX++ )
+            {
+              piDes[ uiX ] = piSrc[ uiX ];
+            }
+          }
+        }
+      }
+
+      pcCU->setIntraDirSubParts( CHANNEL_TYPE_CHROMA, uiBestMode, uiPartOffset, uiDepthCU+uiInitTrDepth );
+      pcCU->getTotalDistortion      () += uiBestDist;
+    }
+
+  } while (tuRecurseWithPU.nextSection(tuRecurseCU));
+
+  //----- restore context models -----
+
+  if( uiInitTrDepth != 0 )
+  { // set Cbf for all blocks
+    UInt uiCombCbfU = 0;
+    UInt uiCombCbfV = 0;
+    UInt uiPartIdx  = 0;
+    for( UInt uiPart = 0; uiPart < 4; uiPart++, uiPartIdx += uiQNumParts )
+    {
+      uiCombCbfU |= pcCU->getCbf( uiPartIdx, COMPONENT_Cb, 1 );
+      uiCombCbfV |= pcCU->getCbf( uiPartIdx, COMPONENT_Cr, 1 );
+    }
+    for( UInt uiOffs = 0; uiOffs < 4 * uiQNumParts; uiOffs++ )
+    {
+      pcCU->getCbf( COMPONENT_Cb )[ uiOffs ] |= uiCombCbfU;
+      pcCU->getCbf( COMPONENT_Cr )[ uiOffs ] |= uiCombCbfV;
+    }
+  }
+
+  m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[uiDepthCU][CI_CURR_BEST] );
+}
+
+
+
+
+/** Function for encoding and reconstructing luma/chroma samples of a PCM mode CU.
+ * \param pcCU pointer to current CU
+ * \param uiAbsPartIdx part index
+ * \param piOrg pointer to original sample arrays
+ * \param piPCM pointer to PCM code arrays
+ * \param piPred pointer to prediction signal arrays
+ * \param piResi pointer to residual signal arrays
+ * \param piReco pointer to reconstructed sample arrays
+ * \param uiStride stride of the original/prediction/residual sample arrays
+ * \param uiWidth block width
+ * \param uiHeight block height
+ * \param ttText texture component type
+ * \returns Void
+ */
+Void TEncSearch::xEncPCM (TComDataCU* pcCU, UInt uiAbsPartIdx, Pel* pOrg, Pel* pPCM, Pel* pPred, Pel* pResi, Pel* pReco, UInt uiStride, UInt uiWidth, UInt uiHeight, const ComponentID compID )
+{
+  const UInt uiReconStride = pcCU->getPic()->getPicYuvRec()->getStride(compID);
+  const UInt uiPCMBitDepth = pcCU->getSlice()->getSPS()->getPCMBitDepth(toChannelType(compID));
+  Pel* pRecoPic = pcCU->getPic()->getPicYuvRec()->getAddr(compID, pcCU->getCtuRsAddr(), pcCU->getZorderIdxInCtu()+uiAbsPartIdx);
+
+  const Int pcmShiftRight=(g_bitDepth[toChannelType(compID)] - Int(uiPCMBitDepth));
+
+  assert(pcmShiftRight >= 0);
+
+  for( UInt uiY = 0; uiY < uiHeight; uiY++ )
+  {
+    for( UInt uiX = 0; uiX < uiWidth; uiX++ )
+    {
+      // Reset pred and residual
+      pPred[uiX] = 0;
+      pResi[uiX] = 0;
+      // Encode
+      pPCM[uiX] = (pOrg[uiX]>>pcmShiftRight);
+      // Reconstruction
+      pReco   [uiX] = (pPCM[uiX]<<(pcmShiftRight));
+      pRecoPic[uiX] = pReco[uiX];
+    }
+    pPred += uiStride;
+    pResi += uiStride;
+    pPCM += uiWidth;
+    pOrg += uiStride;
+    pReco += uiStride;
+    pRecoPic += uiReconStride;
+  }
+}
+
+
+/**  Function for PCM mode estimation.
+ * \param pcCU
+ * \param pcOrgYuv
+ * \param rpcPredYuv
+ * \param rpcResiYuv
+ * \param rpcRecoYuv
+ * \returns Void
+ */
+Void TEncSearch::IPCMSearch( TComDataCU* pcCU, TComYuv* pcOrgYuv, TComYuv* pcPredYuv, TComYuv* pcResiYuv, TComYuv* pcRecoYuv )
+{
+  UInt        uiDepth      = pcCU->getDepth(0);
+  const UInt  uiDistortion = 0;
+  UInt        uiBits;
+
+  Double dCost;
+
+  for (UInt ch=0; ch < pcCU->getPic()->getNumberValidComponents(); ch++)
+  {
+    const ComponentID compID  = ComponentID(ch);
+    const UInt width  = pcCU->getWidth(0)  >> pcCU->getPic()->getComponentScaleX(compID);
+    const UInt height = pcCU->getHeight(0) >> pcCU->getPic()->getComponentScaleY(compID);
+    const UInt stride = pcPredYuv->getStride(compID);
+
+    Pel * pOrig    = pcOrgYuv->getAddr  (compID, 0, width);
+    Pel * pResi    = pcResiYuv->getAddr(compID, 0, width);
+    Pel * pPred    = pcPredYuv->getAddr(compID, 0, width);
+    Pel * pReco    = pcRecoYuv->getAddr(compID, 0, width);
+    Pel * pPCM     = pcCU->getPCMSample (compID);
+
+    xEncPCM ( pcCU, 0, pOrig, pPCM, pPred, pResi, pReco, stride, width, height, compID );
+
+  }
+
+  m_pcEntropyCoder->resetBits();
+  xEncIntraHeader ( pcCU, uiDepth, 0, true, false);
+  uiBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+
+  dCost = m_pcRdCost->calcRdCost( uiBits, uiDistortion );
+
+  m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST]);
+
+  pcCU->getTotalBits()       = uiBits;
+  pcCU->getTotalCost()       = dCost;
+  pcCU->getTotalDistortion() = uiDistortion;
+
+  pcCU->copyToPic(uiDepth, 0, 0);
+}
+
+
+
+
+Void TEncSearch::xGetInterPredictionError( TComDataCU* pcCU, TComYuv* pcYuvOrg, Int iPartIdx, Distortion& ruiErr, Bool /*bHadamard*/ )
+{
+  motionCompensation( pcCU, &m_tmpYuvPred, REF_PIC_LIST_X, iPartIdx );
+
+  UInt uiAbsPartIdx = 0;
+  Int iWidth = 0;
+  Int iHeight = 0;
+  pcCU->getPartIndexAndSize( iPartIdx, uiAbsPartIdx, iWidth, iHeight );
+
+  DistParam cDistParam;
+
+  cDistParam.bApplyWeight = false;
+
+
+  m_pcRdCost->setDistParam( cDistParam, g_bitDepth[CHANNEL_TYPE_LUMA],
+                            pcYuvOrg->getAddr( COMPONENT_Y, uiAbsPartIdx ), pcYuvOrg->getStride(COMPONENT_Y),
+                            m_tmpYuvPred .getAddr( COMPONENT_Y, uiAbsPartIdx ), m_tmpYuvPred.getStride(COMPONENT_Y),
+                            iWidth, iHeight, m_pcEncCfg->getUseHADME() && (pcCU->getCUTransquantBypass(iPartIdx) == 0) );
+
+  ruiErr = cDistParam.DistFunc( &cDistParam );
+}
+
+/** estimation of best merge coding
+ * \param pcCU
+ * \param pcYuvOrg
+ * \param iPUIdx
+ * \param uiInterDir
+ * \param pacMvField
+ * \param uiMergeIndex
+ * \param ruiCost
+ * \param ruiBits
+ * \param puhNeighCands
+ * \param bValid
+ * \returns Void
+ */
+Void TEncSearch::xMergeEstimation( TComDataCU* pcCU, TComYuv* pcYuvOrg, Int iPUIdx, UInt& uiInterDir, TComMvField* pacMvField, UInt& uiMergeIndex, Distortion& ruiCost, TComMvField* cMvFieldNeighbours, UChar* uhInterDirNeighbours, Int& numValidMergeCand )
+{
+  UInt uiAbsPartIdx = 0;
+  Int iWidth = 0;
+  Int iHeight = 0;
+
+  pcCU->getPartIndexAndSize( iPUIdx, uiAbsPartIdx, iWidth, iHeight );
+  UInt uiDepth = pcCU->getDepth( uiAbsPartIdx );
+
+  PartSize partSize = pcCU->getPartitionSize( 0 );
+  if ( pcCU->getSlice()->getPPS()->getLog2ParallelMergeLevelMinus2() && partSize != SIZE_2Nx2N && pcCU->getWidth( 0 ) <= 8 )
+  {
+    pcCU->setPartSizeSubParts( SIZE_2Nx2N, 0, uiDepth );
+    if ( iPUIdx == 0 )
+    {
+      pcCU->getInterMergeCandidates( 0, 0, cMvFieldNeighbours,uhInterDirNeighbours, numValidMergeCand );
+    }
+    pcCU->setPartSizeSubParts( partSize, 0, uiDepth );
+  }
+  else
+  {
+    pcCU->getInterMergeCandidates( uiAbsPartIdx, iPUIdx, cMvFieldNeighbours, uhInterDirNeighbours, numValidMergeCand );
+  }
+
+  xRestrictBipredMergeCand( pcCU, iPUIdx, cMvFieldNeighbours, uhInterDirNeighbours, numValidMergeCand );
+
+  ruiCost = std::numeric_limits<Distortion>::max();
+  for( UInt uiMergeCand = 0; uiMergeCand < numValidMergeCand; ++uiMergeCand )
+  {
+    Distortion uiCostCand = std::numeric_limits<Distortion>::max();
+    UInt       uiBitsCand = 0;
+
+    PartSize ePartSize = pcCU->getPartitionSize( 0 );
+
+    pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvField( cMvFieldNeighbours[0 + 2*uiMergeCand], ePartSize, uiAbsPartIdx, 0, iPUIdx );
+    pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvField( cMvFieldNeighbours[1 + 2*uiMergeCand], ePartSize, uiAbsPartIdx, 0, iPUIdx );
+
+    xGetInterPredictionError( pcCU, pcYuvOrg, iPUIdx, uiCostCand, m_pcEncCfg->getUseHADME() );
+    uiBitsCand = uiMergeCand + 1;
+    if (uiMergeCand == m_pcEncCfg->getMaxNumMergeCand() -1)
+    {
+        uiBitsCand--;
+    }
+    uiCostCand = uiCostCand + m_pcRdCost->getCost( uiBitsCand );
+    if ( uiCostCand < ruiCost )
+    {
+      ruiCost = uiCostCand;
+      pacMvField[0] = cMvFieldNeighbours[0 + 2*uiMergeCand];
+      pacMvField[1] = cMvFieldNeighbours[1 + 2*uiMergeCand];
+      uiInterDir = uhInterDirNeighbours[uiMergeCand];
+      uiMergeIndex = uiMergeCand;
+    }
+  }
+}
+
+/** convert bi-pred merge candidates to uni-pred
+ * \param pcCU
+ * \param puIdx
+ * \param mvFieldNeighbours
+ * \param interDirNeighbours
+ * \param numValidMergeCand
+ * \returns Void
+ */
+Void TEncSearch::xRestrictBipredMergeCand( TComDataCU* pcCU, UInt puIdx, TComMvField* mvFieldNeighbours, UChar* interDirNeighbours, Int numValidMergeCand )
+{
+  if ( pcCU->isBipredRestriction(puIdx) )
+  {
+    for( UInt mergeCand = 0; mergeCand < numValidMergeCand; ++mergeCand )
+    {
+      if ( interDirNeighbours[mergeCand] == 3 )
+      {
+        interDirNeighbours[mergeCand] = 1;
+        mvFieldNeighbours[(mergeCand << 1) + 1].setMvField(TComMv(0,0), -1);
+      }
+    }
+  }
+}
+
+/** search of the best candidate for inter prediction
+ * \param pcCU
+ * \param pcOrgYuv
+ * \param rpcPredYuv
+ * \param rpcResiYuv
+ * \param rpcRecoYuv
+ * \param bUseRes
+ * \returns Void
+ */
+#if AMP_MRG
+Void TEncSearch::predInterSearch( TComDataCU* pcCU, TComYuv* pcOrgYuv, TComYuv* pcPredYuv, TComYuv* pcResiYuv, TComYuv* pcRecoYuv DEBUG_STRING_FN_DECLARE(sDebug), Bool bUseRes, Bool bUseMRG )
+#else
+Void TEncSearch::predInterSearch( TComDataCU* pcCU, TComYuv* pcOrgYuv, TComYuv* pcPredYuv, TComYuv* pcResiYuv, TComYuv* pcRecoYuv, Bool bUseRes )
+#endif
+{
+  for(UInt i=0; i<NUM_REF_PIC_LIST_01; i++)
+  {
+    m_acYuvPred[i].clear();
+  }
+  m_cYuvPredTemp.clear();
+  pcPredYuv->clear();
+
+  if ( !bUseRes )
+  {
+    pcResiYuv->clear();
+  }
+
+  pcRecoYuv->clear();
+
+  TComMv       cMvSrchRngLT;
+  TComMv       cMvSrchRngRB;
+
+  TComMv       cMvZero;
+  TComMv       TempMv; //kolya
+
+  TComMv       cMv[2];
+  TComMv       cMvBi[2];
+  TComMv       cMvTemp[2][33];
+
+  Int          iNumPart    = pcCU->getNumPartitions();
+  Int          iNumPredDir = pcCU->getSlice()->isInterP() ? 1 : 2;
+
+  TComMv       cMvPred[2][33];
+
+  TComMv       cMvPredBi[2][33];
+  Int          aaiMvpIdxBi[2][33];
+
+  Int          aaiMvpIdx[2][33];
+  Int          aaiMvpNum[2][33];
+
+  AMVPInfo     aacAMVPInfo[2][33];
+
+  Int          iRefIdx[2]={0,0}; //If un-initialized, may cause SEGV in bi-directional prediction iterative stage.
+  Int          iRefIdxBi[2];
+
+  UInt         uiPartAddr;
+  Int          iRoiWidth, iRoiHeight;
+
+  UInt         uiMbBits[3] = {1, 1, 0};
+
+  UInt         uiLastMode = 0;
+  Int          iRefStart, iRefEnd;
+
+  PartSize     ePartSize = pcCU->getPartitionSize( 0 );
+
+  Int          bestBiPRefIdxL1 = 0;
+  Int          bestBiPMvpL1 = 0;
+  Distortion   biPDistTemp = std::numeric_limits<Distortion>::max();
+
+  TComMvField cMvFieldNeighbours[MRG_MAX_NUM_CANDS << 1]; // double length for mv of both lists
+  UChar uhInterDirNeighbours[MRG_MAX_NUM_CANDS];
+  Int numValidMergeCand = 0 ;
+
+  for ( Int iPartIdx = 0; iPartIdx < iNumPart; iPartIdx++ )
+  {
+    Distortion   uiCost[2] = { std::numeric_limits<Distortion>::max(), std::numeric_limits<Distortion>::max() };
+    Distortion   uiCostBi  =   std::numeric_limits<Distortion>::max();
+    Distortion   uiCostTemp;
+
+    UInt         uiBits[3];
+    UInt         uiBitsTemp;
+    Distortion   bestBiPDist = std::numeric_limits<Distortion>::max();
+
+    Distortion   uiCostTempL0[MAX_NUM_REF];
+    for (Int iNumRef=0; iNumRef < MAX_NUM_REF; iNumRef++)
+    {
+      uiCostTempL0[iNumRef] = std::numeric_limits<Distortion>::max();
+    }
+    UInt         uiBitsTempL0[MAX_NUM_REF];
+
+    TComMv       mvValidList1;
+    Int          refIdxValidList1 = 0;
+    UInt         bitsValidList1 = MAX_UINT;
+    Distortion   costValidList1 = std::numeric_limits<Distortion>::max();
+
+    xGetBlkBits( ePartSize, pcCU->getSlice()->isInterP(), iPartIdx, uiLastMode, uiMbBits);
+
+    pcCU->getPartIndexAndSize( iPartIdx, uiPartAddr, iRoiWidth, iRoiHeight );
+
+#if AMP_MRG
+    Bool bTestNormalMC = true;
+
+    if ( bUseMRG && pcCU->getWidth( 0 ) > 8 && iNumPart == 2 )
+    {
+      bTestNormalMC = false;
+    }
+
+    if (bTestNormalMC)
+    {
+#endif
+
+    //  Uni-directional prediction
+    for ( Int iRefList = 0; iRefList < iNumPredDir; iRefList++ )
+    {
+      RefPicList  eRefPicList = ( iRefList ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+
+      for ( Int iRefIdxTemp = 0; iRefIdxTemp < pcCU->getSlice()->getNumRefIdx(eRefPicList); iRefIdxTemp++ )
+      {
+        uiBitsTemp = uiMbBits[iRefList];
+        if ( pcCU->getSlice()->getNumRefIdx(eRefPicList) > 1 )
+        {
+          uiBitsTemp += iRefIdxTemp+1;
+          if ( iRefIdxTemp == pcCU->getSlice()->getNumRefIdx(eRefPicList)-1 ) uiBitsTemp--;
+        }
+        xEstimateMvPredAMVP( pcCU, pcOrgYuv, iPartIdx, eRefPicList, iRefIdxTemp, cMvPred[iRefList][iRefIdxTemp], false, &biPDistTemp);
+        aaiMvpIdx[iRefList][iRefIdxTemp] = pcCU->getMVPIdx(eRefPicList, uiPartAddr);
+        aaiMvpNum[iRefList][iRefIdxTemp] = pcCU->getMVPNum(eRefPicList, uiPartAddr);
+
+        if(pcCU->getSlice()->getMvdL1ZeroFlag() && iRefList==1 && biPDistTemp < bestBiPDist)
+        {
+          bestBiPDist = biPDistTemp;
+          bestBiPMvpL1 = aaiMvpIdx[iRefList][iRefIdxTemp];
+          bestBiPRefIdxL1 = iRefIdxTemp;
+        }
+
+        uiBitsTemp += m_auiMVPIdxCost[aaiMvpIdx[iRefList][iRefIdxTemp]][AMVP_MAX_NUM_CANDS];
+
+#if GPB_SIMPLE_UNI
+        if ( iRefList == 1 )    // list 1
+        {
+          if ( pcCU->getSlice()->getList1IdxToList0Idx( iRefIdxTemp ) >= 0 )
+          {
+            cMvTemp[1][iRefIdxTemp] = cMvTemp[0][pcCU->getSlice()->getList1IdxToList0Idx( iRefIdxTemp )];
+            uiCostTemp = uiCostTempL0[pcCU->getSlice()->getList1IdxToList0Idx( iRefIdxTemp )];
+            /*first subtract the bit-rate part of the cost of the other list*/
+            uiCostTemp -= m_pcRdCost->getCost( uiBitsTempL0[pcCU->getSlice()->getList1IdxToList0Idx( iRefIdxTemp )] );
+            /*correct the bit-rate part of the current ref*/
+            m_pcRdCost->setPredictor  ( cMvPred[iRefList][iRefIdxTemp] );
+            uiBitsTemp += m_pcRdCost->getBits( cMvTemp[1][iRefIdxTemp].getHor(), cMvTemp[1][iRefIdxTemp].getVer() );
+            /*calculate the correct cost*/
+            uiCostTemp += m_pcRdCost->getCost( uiBitsTemp );
+          }
+          else
+          {
+            xMotionEstimation ( pcCU, pcOrgYuv, iPartIdx, eRefPicList, &cMvPred[iRefList][iRefIdxTemp], iRefIdxTemp, cMvTemp[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp );
+          }
+        }
+        else
+        {
+          xMotionEstimation ( pcCU, pcOrgYuv, iPartIdx, eRefPicList, &cMvPred[iRefList][iRefIdxTemp], iRefIdxTemp, cMvTemp[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp );
+        }
+#else
+        xMotionEstimation ( pcCU, pcOrgYuv, iPartIdx, eRefPicList, &cMvPred[iRefList][iRefIdxTemp], iRefIdxTemp, cMvTemp[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp );
+#endif
+        xCopyAMVPInfo(pcCU->getCUMvField(eRefPicList)->getAMVPInfo(), &aacAMVPInfo[iRefList][iRefIdxTemp]); // must always be done ( also when AMVP_MODE = AM_NONE )
+        xCheckBestMVP(pcCU, eRefPicList, cMvTemp[iRefList][iRefIdxTemp], cMvPred[iRefList][iRefIdxTemp], aaiMvpIdx[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp);
+
+        if ( iRefList == 0 )
+        {
+          uiCostTempL0[iRefIdxTemp] = uiCostTemp;
+          uiBitsTempL0[iRefIdxTemp] = uiBitsTemp;
+        }
+        if ( uiCostTemp < uiCost[iRefList] )
+        {
+          uiCost[iRefList] = uiCostTemp;
+          uiBits[iRefList] = uiBitsTemp; // storing for bi-prediction
+
+          // set motion
+          cMv[iRefList]     = cMvTemp[iRefList][iRefIdxTemp];
+          iRefIdx[iRefList] = iRefIdxTemp;
+        }
+
+        if ( iRefList == 1 && uiCostTemp < costValidList1 && pcCU->getSlice()->getList1IdxToList0Idx( iRefIdxTemp ) < 0 )
+        {
+          costValidList1 = uiCostTemp;
+          bitsValidList1 = uiBitsTemp;
+
+          // set motion
+          mvValidList1     = cMvTemp[iRefList][iRefIdxTemp];
+          refIdxValidList1 = iRefIdxTemp;
+        }
+      }
+    }
+
+    //  Bi-directional prediction
+    if ( (pcCU->getSlice()->isInterB()) && (pcCU->isBipredRestriction(iPartIdx) == false) )
+    {
+
+      cMvBi[0] = cMv[0];            cMvBi[1] = cMv[1];
+      iRefIdxBi[0] = iRefIdx[0];    iRefIdxBi[1] = iRefIdx[1];
+
+      ::memcpy(cMvPredBi, cMvPred, sizeof(cMvPred));
+      ::memcpy(aaiMvpIdxBi, aaiMvpIdx, sizeof(aaiMvpIdx));
+
+      UInt uiMotBits[2];
+
+      if(pcCU->getSlice()->getMvdL1ZeroFlag())
+      {
+        xCopyAMVPInfo(&aacAMVPInfo[1][bestBiPRefIdxL1], pcCU->getCUMvField(REF_PIC_LIST_1)->getAMVPInfo());
+        pcCU->setMVPIdxSubParts( bestBiPMvpL1, REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+        aaiMvpIdxBi[1][bestBiPRefIdxL1] = bestBiPMvpL1;
+        cMvPredBi[1][bestBiPRefIdxL1]   = pcCU->getCUMvField(REF_PIC_LIST_1)->getAMVPInfo()->m_acMvCand[bestBiPMvpL1];
+
+        cMvBi[1] = cMvPredBi[1][bestBiPRefIdxL1];
+        iRefIdxBi[1] = bestBiPRefIdxL1;
+        pcCU->getCUMvField( REF_PIC_LIST_1 )->setAllMv( cMvBi[1], ePartSize, uiPartAddr, 0, iPartIdx );
+        pcCU->getCUMvField( REF_PIC_LIST_1 )->setAllRefIdx( iRefIdxBi[1], ePartSize, uiPartAddr, 0, iPartIdx );
+        TComYuv* pcYuvPred = &m_acYuvPred[REF_PIC_LIST_1];
+        motionCompensation( pcCU, pcYuvPred, REF_PIC_LIST_1, iPartIdx );
+
+        uiMotBits[0] = uiBits[0] - uiMbBits[0];
+        uiMotBits[1] = uiMbBits[1];
+
+        if ( pcCU->getSlice()->getNumRefIdx(REF_PIC_LIST_1) > 1 )
+        {
+          uiMotBits[1] += bestBiPRefIdxL1+1;
+          if ( bestBiPRefIdxL1 == pcCU->getSlice()->getNumRefIdx(REF_PIC_LIST_1)-1 ) uiMotBits[1]--;
+        }
+
+        uiMotBits[1] += m_auiMVPIdxCost[aaiMvpIdxBi[1][bestBiPRefIdxL1]][AMVP_MAX_NUM_CANDS];
+
+        uiBits[2] = uiMbBits[2] + uiMotBits[0] + uiMotBits[1];
+
+        cMvTemp[1][bestBiPRefIdxL1] = cMvBi[1];
+      }
+      else
+      {
+        uiMotBits[0] = uiBits[0] - uiMbBits[0];
+        uiMotBits[1] = uiBits[1] - uiMbBits[1];
+        uiBits[2] = uiMbBits[2] + uiMotBits[0] + uiMotBits[1];
+      }
+
+      // 4-times iteration (default)
+      Int iNumIter = 4;
+
+      // fast encoder setting: only one iteration
+      if ( m_pcEncCfg->getUseFastEnc() || pcCU->getSlice()->getMvdL1ZeroFlag())
+      {
+        iNumIter = 1;
+      }
+
+      for ( Int iIter = 0; iIter < iNumIter; iIter++ )
+      {
+        Int         iRefList    = iIter % 2;
+
+        if ( m_pcEncCfg->getUseFastEnc() )
+        {
+          if( uiCost[0] <= uiCost[1] )
+          {
+            iRefList = 1;
+          }
+          else
+          {
+            iRefList = 0;
+          }
+        }
+        else if ( iIter == 0 )
+        {
+          iRefList = 0;
+        }
+        if ( iIter == 0 && !pcCU->getSlice()->getMvdL1ZeroFlag())
+        {
+          pcCU->getCUMvField(RefPicList(1-iRefList))->setAllMv( cMv[1-iRefList], ePartSize, uiPartAddr, 0, iPartIdx );
+          pcCU->getCUMvField(RefPicList(1-iRefList))->setAllRefIdx( iRefIdx[1-iRefList], ePartSize, uiPartAddr, 0, iPartIdx );
+          TComYuv*  pcYuvPred = &m_acYuvPred[1-iRefList];
+          motionCompensation ( pcCU, pcYuvPred, RefPicList(1-iRefList), iPartIdx );
+        }
+
+        RefPicList  eRefPicList = ( iRefList ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+
+        if(pcCU->getSlice()->getMvdL1ZeroFlag())
+        {
+          iRefList = 0;
+          eRefPicList = REF_PIC_LIST_0;
+        }
+
+        Bool bChanged = false;
+
+        iRefStart = 0;
+        iRefEnd   = pcCU->getSlice()->getNumRefIdx(eRefPicList)-1;
+
+        for ( Int iRefIdxTemp = iRefStart; iRefIdxTemp <= iRefEnd; iRefIdxTemp++ )
+        {
+          uiBitsTemp = uiMbBits[2] + uiMotBits[1-iRefList];
+          if ( pcCU->getSlice()->getNumRefIdx(eRefPicList) > 1 )
+          {
+            uiBitsTemp += iRefIdxTemp+1;
+            if ( iRefIdxTemp == pcCU->getSlice()->getNumRefIdx(eRefPicList)-1 ) uiBitsTemp--;
+          }
+          uiBitsTemp += m_auiMVPIdxCost[aaiMvpIdxBi[iRefList][iRefIdxTemp]][AMVP_MAX_NUM_CANDS];
+          // call ME
+          xMotionEstimation ( pcCU, pcOrgYuv, iPartIdx, eRefPicList, &cMvPredBi[iRefList][iRefIdxTemp], iRefIdxTemp, cMvTemp[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp, true );
+
+          xCopyAMVPInfo(&aacAMVPInfo[iRefList][iRefIdxTemp], pcCU->getCUMvField(eRefPicList)->getAMVPInfo());
+          xCheckBestMVP(pcCU, eRefPicList, cMvTemp[iRefList][iRefIdxTemp], cMvPredBi[iRefList][iRefIdxTemp], aaiMvpIdxBi[iRefList][iRefIdxTemp], uiBitsTemp, uiCostTemp);
+
+          if ( uiCostTemp < uiCostBi )
+          {
+            bChanged = true;
+
+            cMvBi[iRefList]     = cMvTemp[iRefList][iRefIdxTemp];
+            iRefIdxBi[iRefList] = iRefIdxTemp;
+
+            uiCostBi            = uiCostTemp;
+            uiMotBits[iRefList] = uiBitsTemp - uiMbBits[2] - uiMotBits[1-iRefList];
+            uiBits[2]           = uiBitsTemp;
+
+            if(iNumIter!=1)
+            {
+              //  Set motion
+              pcCU->getCUMvField( eRefPicList )->setAllMv( cMvBi[iRefList], ePartSize, uiPartAddr, 0, iPartIdx );
+              pcCU->getCUMvField( eRefPicList )->setAllRefIdx( iRefIdxBi[iRefList], ePartSize, uiPartAddr, 0, iPartIdx );
+
+              TComYuv* pcYuvPred = &m_acYuvPred[iRefList];
+              motionCompensation( pcCU, pcYuvPred, eRefPicList, iPartIdx );
+            }
+          }
+        } // for loop-iRefIdxTemp
+
+        if ( !bChanged )
+        {
+          if ( uiCostBi <= uiCost[0] && uiCostBi <= uiCost[1] )
+          {
+            xCopyAMVPInfo(&aacAMVPInfo[0][iRefIdxBi[0]], pcCU->getCUMvField(REF_PIC_LIST_0)->getAMVPInfo());
+            xCheckBestMVP(pcCU, REF_PIC_LIST_0, cMvBi[0], cMvPredBi[0][iRefIdxBi[0]], aaiMvpIdxBi[0][iRefIdxBi[0]], uiBits[2], uiCostBi);
+            if(!pcCU->getSlice()->getMvdL1ZeroFlag())
+            {
+              xCopyAMVPInfo(&aacAMVPInfo[1][iRefIdxBi[1]], pcCU->getCUMvField(REF_PIC_LIST_1)->getAMVPInfo());
+              xCheckBestMVP(pcCU, REF_PIC_LIST_1, cMvBi[1], cMvPredBi[1][iRefIdxBi[1]], aaiMvpIdxBi[1][iRefIdxBi[1]], uiBits[2], uiCostBi);
+            }
+          }
+          break;
+        }
+      } // for loop-iter
+    } // if (B_SLICE)
+
+#if AMP_MRG
+    } //end if bTestNormalMC
+#endif
+    //  Clear Motion Field
+    pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvField( TComMvField(), ePartSize, uiPartAddr, 0, iPartIdx );
+    pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvField( TComMvField(), ePartSize, uiPartAddr, 0, iPartIdx );
+    pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvd    ( cMvZero,       ePartSize, uiPartAddr, 0, iPartIdx );
+    pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvd    ( cMvZero,       ePartSize, uiPartAddr, 0, iPartIdx );
+
+    pcCU->setMVPIdxSubParts( -1, REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+    pcCU->setMVPNumSubParts( -1, REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+    pcCU->setMVPIdxSubParts( -1, REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+    pcCU->setMVPNumSubParts( -1, REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+
+    UInt uiMEBits = 0;
+    // Set Motion Field_
+    cMv[1] = mvValidList1;
+    iRefIdx[1] = refIdxValidList1;
+    uiBits[1] = bitsValidList1;
+    uiCost[1] = costValidList1;
+
+#if AMP_MRG
+    if (bTestNormalMC)
+    {
+#endif
+    if ( uiCostBi <= uiCost[0] && uiCostBi <= uiCost[1])
+    {
+      uiLastMode = 2;
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMv( cMvBi[0], ePartSize, uiPartAddr, 0, iPartIdx );
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllRefIdx( iRefIdxBi[0], ePartSize, uiPartAddr, 0, iPartIdx );
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMv( cMvBi[1], ePartSize, uiPartAddr, 0, iPartIdx );
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllRefIdx( iRefIdxBi[1], ePartSize, uiPartAddr, 0, iPartIdx );
+
+      TempMv = cMvBi[0] - cMvPredBi[0][iRefIdxBi[0]];
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvd    ( TempMv,                 ePartSize, uiPartAddr, 0, iPartIdx );
+
+      TempMv = cMvBi[1] - cMvPredBi[1][iRefIdxBi[1]];
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvd    ( TempMv,                 ePartSize, uiPartAddr, 0, iPartIdx );
+
+      pcCU->setInterDirSubParts( 3, uiPartAddr, iPartIdx, pcCU->getDepth(0) );
+
+      pcCU->setMVPIdxSubParts( aaiMvpIdxBi[0][iRefIdxBi[0]], REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      pcCU->setMVPNumSubParts( aaiMvpNum[0][iRefIdxBi[0]], REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      pcCU->setMVPIdxSubParts( aaiMvpIdxBi[1][iRefIdxBi[1]], REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      pcCU->setMVPNumSubParts( aaiMvpNum[1][iRefIdxBi[1]], REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+
+      uiMEBits = uiBits[2];
+    }
+    else if ( uiCost[0] <= uiCost[1] )
+    {
+      uiLastMode = 0;
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMv( cMv[0], ePartSize, uiPartAddr, 0, iPartIdx );
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllRefIdx( iRefIdx[0], ePartSize, uiPartAddr, 0, iPartIdx );
+
+      TempMv = cMv[0] - cMvPred[0][iRefIdx[0]];
+      pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvd    ( TempMv,                 ePartSize, uiPartAddr, 0, iPartIdx );
+
+      pcCU->setInterDirSubParts( 1, uiPartAddr, iPartIdx, pcCU->getDepth(0) );
+
+      pcCU->setMVPIdxSubParts( aaiMvpIdx[0][iRefIdx[0]], REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      pcCU->setMVPNumSubParts( aaiMvpNum[0][iRefIdx[0]], REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+
+      uiMEBits = uiBits[0];
+    }
+    else
+    {
+      uiLastMode = 1;
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMv( cMv[1], ePartSize, uiPartAddr, 0, iPartIdx );
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllRefIdx( iRefIdx[1], ePartSize, uiPartAddr, 0, iPartIdx );
+
+      TempMv = cMv[1] - cMvPred[1][iRefIdx[1]];
+      pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvd    ( TempMv,                 ePartSize, uiPartAddr, 0, iPartIdx );
+
+      pcCU->setInterDirSubParts( 2, uiPartAddr, iPartIdx, pcCU->getDepth(0) );
+
+      pcCU->setMVPIdxSubParts( aaiMvpIdx[1][iRefIdx[1]], REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      pcCU->setMVPNumSubParts( aaiMvpNum[1][iRefIdx[1]], REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+
+      uiMEBits = uiBits[1];
+    }
+#if AMP_MRG
+    } // end if bTestNormalMC
+#endif
+
+    if ( pcCU->getPartitionSize( uiPartAddr ) != SIZE_2Nx2N )
+    {
+      UInt uiMRGInterDir = 0;
+      TComMvField cMRGMvField[2];
+      UInt uiMRGIndex = 0;
+
+      UInt uiMEInterDir = 0;
+      TComMvField cMEMvField[2];
+
+      m_pcRdCost->getMotionCost( true, 0, pcCU->getCUTransquantBypass(uiPartAddr) );
+
+#if AMP_MRG
+      // calculate ME cost
+      Distortion uiMEError = std::numeric_limits<Distortion>::max();
+      Distortion uiMECost  = std::numeric_limits<Distortion>::max();
+
+      if (bTestNormalMC)
+      {
+        xGetInterPredictionError( pcCU, pcOrgYuv, iPartIdx, uiMEError, m_pcEncCfg->getUseHADME() );
+        uiMECost = uiMEError + m_pcRdCost->getCost( uiMEBits );
+      }
+#else
+      // calculate ME cost
+      Distortion uiMEError = std::numeric_limits<Distortion>::max();
+      xGetInterPredictionError( pcCU, pcOrgYuv, iPartIdx, uiMEError, m_pcEncCfg->getUseHADME() );
+      Distortion uiMECost = uiMEError + m_pcRdCost->getCost( uiMEBits );
+#endif
+      // save ME result.
+      uiMEInterDir = pcCU->getInterDir( uiPartAddr );
+      pcCU->getMvField( pcCU, uiPartAddr, REF_PIC_LIST_0, cMEMvField[0] );
+      pcCU->getMvField( pcCU, uiPartAddr, REF_PIC_LIST_1, cMEMvField[1] );
+
+      // find Merge result
+      Distortion uiMRGCost = std::numeric_limits<Distortion>::max();
+
+      xMergeEstimation( pcCU, pcOrgYuv, iPartIdx, uiMRGInterDir, cMRGMvField, uiMRGIndex, uiMRGCost, cMvFieldNeighbours, uhInterDirNeighbours, numValidMergeCand);
+
+      if ( uiMRGCost < uiMECost )
+      {
+        // set Merge result
+        pcCU->setMergeFlagSubParts ( true,          uiPartAddr, iPartIdx, pcCU->getDepth( uiPartAddr ) );
+        pcCU->setMergeIndexSubParts( uiMRGIndex,    uiPartAddr, iPartIdx, pcCU->getDepth( uiPartAddr ) );
+        pcCU->setInterDirSubParts  ( uiMRGInterDir, uiPartAddr, iPartIdx, pcCU->getDepth( uiPartAddr ) );
+        pcCU->getCUMvField( REF_PIC_LIST_0 )->setAllMvField( cMRGMvField[0], ePartSize, uiPartAddr, 0, iPartIdx );
+        pcCU->getCUMvField( REF_PIC_LIST_1 )->setAllMvField( cMRGMvField[1], ePartSize, uiPartAddr, 0, iPartIdx );
+
+        pcCU->getCUMvField(REF_PIC_LIST_0)->setAllMvd    ( cMvZero,            ePartSize, uiPartAddr, 0, iPartIdx );
+        pcCU->getCUMvField(REF_PIC_LIST_1)->setAllMvd    ( cMvZero,            ePartSize, uiPartAddr, 0, iPartIdx );
+
+        pcCU->setMVPIdxSubParts( -1, REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+        pcCU->setMVPNumSubParts( -1, REF_PIC_LIST_0, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+        pcCU->setMVPIdxSubParts( -1, REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+        pcCU->setMVPNumSubParts( -1, REF_PIC_LIST_1, uiPartAddr, iPartIdx, pcCU->getDepth(uiPartAddr));
+      }
+      else
+      {
+        // set ME result
+        pcCU->setMergeFlagSubParts( false,        uiPartAddr, iPartIdx, pcCU->getDepth( uiPartAddr ) );
+        pcCU->setInterDirSubParts ( uiMEInterDir, uiPartAddr, iPartIdx, pcCU->getDepth( uiPartAddr ) );
+        pcCU->getCUMvField( REF_PIC_LIST_0 )->setAllMvField( cMEMvField[0], ePartSize, uiPartAddr, 0, iPartIdx );
+        pcCU->getCUMvField( REF_PIC_LIST_1 )->setAllMvField( cMEMvField[1], ePartSize, uiPartAddr, 0, iPartIdx );
+      }
+    }
+
+    //  MC
+    motionCompensation ( pcCU, pcPredYuv, REF_PIC_LIST_X, iPartIdx );
+
+  } //  end of for ( Int iPartIdx = 0; iPartIdx < iNumPart; iPartIdx++ )
+
+  setWpScalingDistParam( pcCU, -1, REF_PIC_LIST_X );
+
+  return;
+}
+
+
+// AMVP
+Void TEncSearch::xEstimateMvPredAMVP( TComDataCU* pcCU, TComYuv* pcOrgYuv, UInt uiPartIdx, RefPicList eRefPicList, Int iRefIdx, TComMv& rcMvPred, Bool bFilled, Distortion* puiDistBiP )
+{
+  AMVPInfo*  pcAMVPInfo = pcCU->getCUMvField(eRefPicList)->getAMVPInfo();
+
+  TComMv     cBestMv;
+  Int        iBestIdx   = 0;
+  TComMv     cZeroMv;
+  TComMv     cMvPred;
+  Distortion uiBestCost = std::numeric_limits<Distortion>::max();
+  UInt       uiPartAddr = 0;
+  Int        iRoiWidth, iRoiHeight;
+  Int        i;
+
+  pcCU->getPartIndexAndSize( uiPartIdx, uiPartAddr, iRoiWidth, iRoiHeight );
+  // Fill the MV Candidates
+  if (!bFilled)
+  {
+    pcCU->fillMvpCand( uiPartIdx, uiPartAddr, eRefPicList, iRefIdx, pcAMVPInfo );
+  }
+
+  // initialize Mvp index & Mvp
+  iBestIdx = 0;
+  cBestMv  = pcAMVPInfo->m_acMvCand[0];
+  if (pcAMVPInfo->iN <= 1)
+  {
+    rcMvPred = cBestMv;
+
+    pcCU->setMVPIdxSubParts( iBestIdx, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+    pcCU->setMVPNumSubParts( pcAMVPInfo->iN, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+
+    if(pcCU->getSlice()->getMvdL1ZeroFlag() && eRefPicList==REF_PIC_LIST_1)
+    {
+      (*puiDistBiP) = xGetTemplateCost( pcCU, uiPartIdx, uiPartAddr, pcOrgYuv, &m_cYuvPredTemp, rcMvPred, 0, AMVP_MAX_NUM_CANDS, eRefPicList, iRefIdx, iRoiWidth, iRoiHeight);
+    }
+    return;
+  }
+
+  if (bFilled)
+  {
+    assert(pcCU->getMVPIdx(eRefPicList,uiPartAddr) >= 0);
+    rcMvPred = pcAMVPInfo->m_acMvCand[pcCU->getMVPIdx(eRefPicList,uiPartAddr)];
+    return;
+  }
+
+  m_cYuvPredTemp.clear();
+  //-- Check Minimum Cost.
+  for ( i = 0 ; i < pcAMVPInfo->iN; i++)
+  {
+    Distortion uiTmpCost;
+    uiTmpCost = xGetTemplateCost( pcCU, uiPartIdx, uiPartAddr, pcOrgYuv, &m_cYuvPredTemp, pcAMVPInfo->m_acMvCand[i], i, AMVP_MAX_NUM_CANDS, eRefPicList, iRefIdx, iRoiWidth, iRoiHeight);
+    if ( uiBestCost > uiTmpCost )
+    {
+      uiBestCost = uiTmpCost;
+      cBestMv   = pcAMVPInfo->m_acMvCand[i];
+      iBestIdx  = i;
+      (*puiDistBiP) = uiTmpCost;
+    }
+  }
+
+  m_cYuvPredTemp.clear();
+
+  // Setting Best MVP
+  rcMvPred = cBestMv;
+  pcCU->setMVPIdxSubParts( iBestIdx, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+  pcCU->setMVPNumSubParts( pcAMVPInfo->iN, eRefPicList, uiPartAddr, uiPartIdx, pcCU->getDepth(uiPartAddr));
+  return;
+}
+
+UInt TEncSearch::xGetMvpIdxBits(Int iIdx, Int iNum)
+{
+  assert(iIdx >= 0 && iNum >= 0 && iIdx < iNum);
+
+  if (iNum == 1)
+  {
+    return 0;
+  }
+
+  UInt uiLength = 1;
+  Int iTemp = iIdx;
+  if ( iTemp == 0 )
+  {
+    return uiLength;
+  }
+
+  Bool bCodeLast = ( iNum-1 > iTemp );
+
+  uiLength += (iTemp-1);
+
+  if( bCodeLast )
+  {
+    uiLength++;
+  }
+
+  return uiLength;
+}
+
+Void TEncSearch::xGetBlkBits( PartSize eCUMode, Bool bPSlice, Int iPartIdx, UInt uiLastMode, UInt uiBlkBit[3])
+{
+  if ( eCUMode == SIZE_2Nx2N )
+  {
+    uiBlkBit[0] = (! bPSlice) ? 3 : 1;
+    uiBlkBit[1] = 3;
+    uiBlkBit[2] = 5;
+  }
+  else if ( (eCUMode == SIZE_2NxN || eCUMode == SIZE_2NxnU) || eCUMode == SIZE_2NxnD )
+  {
+    UInt aauiMbBits[2][3][3] = { { {0,0,3}, {0,0,0}, {0,0,0} } , { {5,7,7}, {7,5,7}, {9-3,9-3,9-3} } };
+    if ( bPSlice )
+    {
+      uiBlkBit[0] = 3;
+      uiBlkBit[1] = 0;
+      uiBlkBit[2] = 0;
+    }
+    else
+    {
+      ::memcpy( uiBlkBit, aauiMbBits[iPartIdx][uiLastMode], 3*sizeof(UInt) );
+    }
+  }
+  else if ( (eCUMode == SIZE_Nx2N || eCUMode == SIZE_nLx2N) || eCUMode == SIZE_nRx2N )
+  {
+    UInt aauiMbBits[2][3][3] = { { {0,2,3}, {0,0,0}, {0,0,0} } , { {5,7,7}, {7-2,7-2,9-2}, {9-3,9-3,9-3} } };
+    if ( bPSlice )
+    {
+      uiBlkBit[0] = 3;
+      uiBlkBit[1] = 0;
+      uiBlkBit[2] = 0;
+    }
+    else
+    {
+      ::memcpy( uiBlkBit, aauiMbBits[iPartIdx][uiLastMode], 3*sizeof(UInt) );
+    }
+  }
+  else if ( eCUMode == SIZE_NxN )
+  {
+    uiBlkBit[0] = (! bPSlice) ? 3 : 1;
+    uiBlkBit[1] = 3;
+    uiBlkBit[2] = 5;
+  }
+  else
+  {
+    printf("Wrong!\n");
+    assert( 0 );
+  }
+}
+
+Void TEncSearch::xCopyAMVPInfo (AMVPInfo* pSrc, AMVPInfo* pDst)
+{
+  pDst->iN = pSrc->iN;
+  for (Int i = 0; i < pSrc->iN; i++)
+  {
+    pDst->m_acMvCand[i] = pSrc->m_acMvCand[i];
+  }
+}
+
+Void TEncSearch::xCheckBestMVP ( TComDataCU* pcCU, RefPicList eRefPicList, TComMv cMv, TComMv& rcMvPred, Int& riMVPIdx, UInt& ruiBits, Distortion& ruiCost )
+{
+  AMVPInfo* pcAMVPInfo = pcCU->getCUMvField(eRefPicList)->getAMVPInfo();
+
+  assert(pcAMVPInfo->m_acMvCand[riMVPIdx] == rcMvPred);
+
+  if (pcAMVPInfo->iN < 2) return;
+
+  m_pcRdCost->getMotionCost( true, 0, pcCU->getCUTransquantBypass(0) );
+  m_pcRdCost->setCostScale ( 0    );
+
+  Int iBestMVPIdx = riMVPIdx;
+
+  m_pcRdCost->setPredictor( rcMvPred );
+  Int iOrgMvBits  = m_pcRdCost->getBits(cMv.getHor(), cMv.getVer());
+  iOrgMvBits += m_auiMVPIdxCost[riMVPIdx][AMVP_MAX_NUM_CANDS];
+  Int iBestMvBits = iOrgMvBits;
+
+  for (Int iMVPIdx = 0; iMVPIdx < pcAMVPInfo->iN; iMVPIdx++)
+  {
+    if (iMVPIdx == riMVPIdx) continue;
+
+    m_pcRdCost->setPredictor( pcAMVPInfo->m_acMvCand[iMVPIdx] );
+
+    Int iMvBits = m_pcRdCost->getBits(cMv.getHor(), cMv.getVer());
+    iMvBits += m_auiMVPIdxCost[iMVPIdx][AMVP_MAX_NUM_CANDS];
+
+    if (iMvBits < iBestMvBits)
+    {
+      iBestMvBits = iMvBits;
+      iBestMVPIdx = iMVPIdx;
+    }
+  }
+
+  if (iBestMVPIdx != riMVPIdx)  //if changed
+  {
+    rcMvPred = pcAMVPInfo->m_acMvCand[iBestMVPIdx];
+
+    riMVPIdx = iBestMVPIdx;
+    UInt uiOrgBits = ruiBits;
+    ruiBits = uiOrgBits - iOrgMvBits + iBestMvBits;
+    ruiCost = (ruiCost - m_pcRdCost->getCost( uiOrgBits ))  + m_pcRdCost->getCost( ruiBits );
+  }
+}
+
+
+Distortion TEncSearch::xGetTemplateCost( TComDataCU* pcCU,
+                                         UInt        uiPartIdx,
+                                         UInt        uiPartAddr,
+                                         TComYuv*    pcOrgYuv,
+                                         TComYuv*    pcTemplateCand,
+                                         TComMv      cMvCand,
+                                         Int         iMVPIdx,
+                                         Int         iMVPNum,
+                                         RefPicList  eRefPicList,
+                                         Int         iRefIdx,
+                                         Int         iSizeX,
+                                         Int         iSizeY
+                                         )
+{
+  Distortion uiCost = std::numeric_limits<Distortion>::max();
+
+  TComPicYuv* pcPicYuvRef = pcCU->getSlice()->getRefPic( eRefPicList, iRefIdx )->getPicYuvRec();
+
+  pcCU->clipMv( cMvCand );
+
+  // prediction pattern
+  if ( pcCU->getSlice()->getPPS()->getUseWP() && pcCU->getSlice()->getSliceType()==P_SLICE )
+  {
+    xPredInterBlk( COMPONENT_Y, pcCU, pcPicYuvRef, uiPartAddr, &cMvCand, iSizeX, iSizeY, pcTemplateCand, true );
+  }
+  else
+  {
+    xPredInterBlk( COMPONENT_Y, pcCU, pcPicYuvRef, uiPartAddr, &cMvCand, iSizeX, iSizeY, pcTemplateCand, false );
+  }
+
+  if ( pcCU->getSlice()->getPPS()->getUseWP() && pcCU->getSlice()->getSliceType()==P_SLICE )
+  {
+    xWeightedPredictionUni( pcCU, pcTemplateCand, uiPartAddr, iSizeX, iSizeY, eRefPicList, pcTemplateCand, iRefIdx );
+  }
+
+  // calc distortion
+
+  uiCost = m_pcRdCost->getDistPart( g_bitDepth[CHANNEL_TYPE_LUMA], pcTemplateCand->getAddr(COMPONENT_Y, uiPartAddr), pcTemplateCand->getStride(COMPONENT_Y), pcOrgYuv->getAddr(COMPONENT_Y, uiPartAddr), pcOrgYuv->getStride(COMPONENT_Y), iSizeX, iSizeY, COMPONENT_Y, DF_SAD );
+  uiCost = (UInt) m_pcRdCost->calcRdCost( m_auiMVPIdxCost[iMVPIdx][iMVPNum], uiCost, false, DF_SAD );
+  return uiCost;
+}
+
+
+
+
+Void TEncSearch::xMotionEstimation( TComDataCU* pcCU, TComYuv* pcYuvOrg, Int iPartIdx, RefPicList eRefPicList, TComMv* pcMvPred, Int iRefIdxPred, TComMv& rcMv, UInt& ruiBits, Distortion& ruiCost, Bool bBi  )
+{
+  UInt          uiPartAddr;
+  Int           iRoiWidth;
+  Int           iRoiHeight;
+
+  TComMv        cMvHalf, cMvQter;
+  TComMv        cMvSrchRngLT;
+  TComMv        cMvSrchRngRB;
+
+  TComYuv*      pcYuv = pcYuvOrg;
+
+  assert(eRefPicList < MAX_NUM_REF_LIST_ADAPT_SR && iRefIdxPred<Int(MAX_IDX_ADAPT_SR));
+  m_iSearchRange = m_aaiAdaptSR[eRefPicList][iRefIdxPred];
+
+  Int           iSrchRng      = ( bBi ? m_bipredSearchRange : m_iSearchRange );
+  TComPattern   tmpPattern;
+  TComPattern*  pcPatternKey  = &tmpPattern;
+
+  Double        fWeight       = 1.0;
+
+  pcCU->getPartIndexAndSize( iPartIdx, uiPartAddr, iRoiWidth, iRoiHeight );
+
+  if ( bBi )
+  {
+    TComYuv*  pcYuvOther = &m_acYuvPred[1-(Int)eRefPicList];
+    pcYuv                = &m_cYuvPredTemp;
+
+    pcYuvOrg->copyPartToPartYuv( pcYuv, uiPartAddr, iRoiWidth, iRoiHeight );
+
+    pcYuv->removeHighFreq( pcYuvOther, uiPartAddr, iRoiWidth, iRoiHeight );
+
+    fWeight = 0.5;
+  }
+
+  //  Search key pattern initialization
+  pcPatternKey->initPattern( pcYuv->getAddr  ( COMPONENT_Y, uiPartAddr ),
+                             iRoiWidth,
+                             iRoiHeight,
+                             pcYuv->getStride(COMPONENT_Y) );
+
+  Pel*        piRefY      = pcCU->getSlice()->getRefPic( eRefPicList, iRefIdxPred )->getPicYuvRec()->getAddr( COMPONENT_Y, pcCU->getCtuRsAddr(), pcCU->getZorderIdxInCtu() + uiPartAddr );
+  Int         iRefStride  = pcCU->getSlice()->getRefPic( eRefPicList, iRefIdxPred )->getPicYuvRec()->getStride(COMPONENT_Y);
+
+  TComMv      cMvPred = *pcMvPred;
+
+  if ( bBi )  xSetSearchRange   ( pcCU, rcMv   , iSrchRng, cMvSrchRngLT, cMvSrchRngRB );
+  else        xSetSearchRange   ( pcCU, cMvPred, iSrchRng, cMvSrchRngLT, cMvSrchRngRB );
+
+  m_pcRdCost->getMotionCost( true, 0, pcCU->getCUTransquantBypass(uiPartAddr) );
+
+  m_pcRdCost->setPredictor  ( *pcMvPred );
+  m_pcRdCost->setCostScale  ( 2 );
+
+  setWpScalingDistParam( pcCU, iRefIdxPred, eRefPicList );
+  //  Do integer search
+  if ( !m_iFastSearch || bBi )
+  {
+    xPatternSearch      ( pcPatternKey, piRefY, iRefStride, &cMvSrchRngLT, &cMvSrchRngRB, rcMv, ruiCost );
+  }
+  else
+  {
+    rcMv = *pcMvPred;
+    const TComMv *pIntegerMv2Nx2NPred=0;
+    if (pcCU->getPartitionSize(0) != SIZE_2Nx2N || pcCU->getDepth(0) != 0)
+    {
+      pIntegerMv2Nx2NPred = &(m_integerMv2Nx2N[eRefPicList][iRefIdxPred]);
+    }
+    xPatternSearchFast  ( pcCU, pcPatternKey, piRefY, iRefStride, &cMvSrchRngLT, &cMvSrchRngRB, rcMv, ruiCost, pIntegerMv2Nx2NPred );
+    if (pcCU->getPartitionSize(0) == SIZE_2Nx2N)
+    {
+      m_integerMv2Nx2N[eRefPicList][iRefIdxPred] = rcMv;
+    }
+  }
+
+  m_pcRdCost->getMotionCost( true, 0, pcCU->getCUTransquantBypass(uiPartAddr) );
+  m_pcRdCost->setCostScale ( 1 );
+
+  const Bool bIsLosslessCoded = pcCU->getCUTransquantBypass(uiPartAddr) != 0;
+  xPatternSearchFracDIF( bIsLosslessCoded, pcPatternKey, piRefY, iRefStride, &rcMv, cMvHalf, cMvQter, ruiCost ,bBi );
+
+  m_pcRdCost->setCostScale( 0 );
+  rcMv <<= 2;
+  rcMv += (cMvHalf <<= 1);
+  rcMv +=  cMvQter;
+
+  UInt uiMvBits = m_pcRdCost->getBits( rcMv.getHor(), rcMv.getVer() );
+
+  ruiBits      += uiMvBits;
+  ruiCost       = (Distortion)( floor( fWeight * ( (Double)ruiCost - (Double)m_pcRdCost->getCost( uiMvBits ) ) ) + (Double)m_pcRdCost->getCost( ruiBits ) );
+}
+
+
+
+
+Void TEncSearch::xSetSearchRange ( TComDataCU* pcCU, TComMv& cMvPred, Int iSrchRng, TComMv& rcMvSrchRngLT, TComMv& rcMvSrchRngRB )
+{
+  Int  iMvShift = 2;
+  TComMv cTmpMvPred = cMvPred;
+  pcCU->clipMv( cTmpMvPred );
+
+  rcMvSrchRngLT.setHor( cTmpMvPred.getHor() - (iSrchRng << iMvShift) );
+  rcMvSrchRngLT.setVer( cTmpMvPred.getVer() - (iSrchRng << iMvShift) );
+
+  rcMvSrchRngRB.setHor( cTmpMvPred.getHor() + (iSrchRng << iMvShift) );
+  rcMvSrchRngRB.setVer( cTmpMvPred.getVer() + (iSrchRng << iMvShift) );
+  pcCU->clipMv        ( rcMvSrchRngLT );
+  pcCU->clipMv        ( rcMvSrchRngRB );
+
+  rcMvSrchRngLT >>= iMvShift;
+  rcMvSrchRngRB >>= iMvShift;
+}
+
+
+
+
+Void TEncSearch::xPatternSearch( TComPattern* pcPatternKey, Pel* piRefY, Int iRefStride, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB, TComMv& rcMv, Distortion& ruiSAD )
+{
+  Int   iSrchRngHorLeft   = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight  = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop    = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom = pcMvSrchRngRB->getVer();
+
+  Distortion  uiSad;
+  Distortion  uiSadBest = std::numeric_limits<Distortion>::max();
+  Int         iBestX = 0;
+  Int         iBestY = 0;
+
+  Pel*  piRefSrch;
+
+  //-- jclee for using the SAD function pointer
+  m_pcRdCost->setDistParam( pcPatternKey, piRefY, iRefStride,  m_cDistParam );
+
+  // fast encoder decision: use subsampled SAD for integer ME
+  if ( m_pcEncCfg->getUseFastEnc() )
+  {
+    if ( m_cDistParam.iRows > 8 )
+    {
+      m_cDistParam.iSubShift = 1;
+    }
+  }
+
+  piRefY += (iSrchRngVerTop * iRefStride);
+  for ( Int y = iSrchRngVerTop; y <= iSrchRngVerBottom; y++ )
+  {
+    for ( Int x = iSrchRngHorLeft; x <= iSrchRngHorRight; x++ )
+    {
+      //  find min. distortion position
+      piRefSrch = piRefY + x;
+      m_cDistParam.pCur = piRefSrch;
+
+      setDistParamComp(COMPONENT_Y);
+
+      m_cDistParam.bitDepth = g_bitDepth[CHANNEL_TYPE_LUMA];
+      uiSad = m_cDistParam.DistFunc( &m_cDistParam );
+
+      // motion cost
+      uiSad += m_pcRdCost->getCost( x, y );
+
+      if ( uiSad < uiSadBest )
+      {
+        uiSadBest = uiSad;
+        iBestX    = x;
+        iBestY    = y;
+      }
+    }
+    piRefY += iRefStride;
+  }
+
+  rcMv.set( iBestX, iBestY );
+
+  ruiSAD = uiSadBest - m_pcRdCost->getCost( iBestX, iBestY );
+  return;
+}
+
+
+
+Void TEncSearch::xPatternSearchFast( TComDataCU*   pcCU,
+                                     TComPattern*  pcPatternKey,
+                                     Pel*          piRefY,
+                                     Int           iRefStride,
+                                     TComMv*       pcMvSrchRngLT,
+                                     TComMv*       pcMvSrchRngRB,
+                                     TComMv       &rcMv,
+                                     Distortion   &ruiSAD,
+                                     const TComMv* pIntegerMv2Nx2NPred )
+{
+  assert (MD_LEFT < NUM_MV_PREDICTORS);
+  pcCU->getMvPredLeft       ( m_acMvPredictors[MD_LEFT] );
+  assert (MD_ABOVE < NUM_MV_PREDICTORS);
+  pcCU->getMvPredAbove      ( m_acMvPredictors[MD_ABOVE] );
+  assert (MD_ABOVE_RIGHT < NUM_MV_PREDICTORS);
+  pcCU->getMvPredAboveRight ( m_acMvPredictors[MD_ABOVE_RIGHT] );
+
+  switch ( m_iFastSearch )
+  {
+    case 1:
+      xTZSearch( pcCU, pcPatternKey, piRefY, iRefStride, pcMvSrchRngLT, pcMvSrchRngRB, rcMv, ruiSAD, pIntegerMv2Nx2NPred );
+      break;
+
+    case 2:
+      xTZSearchSelective( pcCU, pcPatternKey, piRefY, iRefStride, pcMvSrchRngLT, pcMvSrchRngRB, rcMv, ruiSAD, pIntegerMv2Nx2NPred );
+      break;
+    default:
+      break;
+  }
+}
+
+
+
+
+Void TEncSearch::xTZSearch( TComDataCU*  pcCU,
+                            TComPattern* pcPatternKey,
+                            Pel*         piRefY,
+                            Int          iRefStride,
+                            TComMv*      pcMvSrchRngLT,
+                            TComMv*      pcMvSrchRngRB,
+                            TComMv      &rcMv,
+                            Distortion  &ruiSAD,
+                            const TComMv* pIntegerMv2Nx2NPred )
+{
+  Int   iSrchRngHorLeft   = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight  = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop    = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom = pcMvSrchRngRB->getVer();
+
+  TZ_SEARCH_CONFIGURATION
+
+  UInt uiSearchRange = m_iSearchRange;
+  pcCU->clipMv( rcMv );
+  rcMv >>= 2;
+  // init TZSearchStruct
+  IntTZSearchStruct cStruct;
+  cStruct.iYStride    = iRefStride;
+  cStruct.piRefY      = piRefY;
+  cStruct.uiBestSad   = MAX_UINT;
+
+  // set rcMv (Median predictor) as start point and as best point
+  xTZSearchHelp( pcPatternKey, cStruct, rcMv.getHor(), rcMv.getVer(), 0, 0 );
+
+  // test whether one of PRED_A, PRED_B, PRED_C MV is better start point than Median predictor
+  if ( bTestOtherPredictedMV )
+  {
+    for ( UInt index = 0; index < NUM_MV_PREDICTORS; index++ )
+    {
+      TComMv cMv = m_acMvPredictors[index];
+      pcCU->clipMv( cMv );
+      cMv >>= 2;
+      xTZSearchHelp( pcPatternKey, cStruct, cMv.getHor(), cMv.getVer(), 0, 0 );
+    }
+  }
+
+  // test whether zero Mv is better start point than Median predictor
+  if ( bTestZeroVector )
+  {
+    xTZSearchHelp( pcPatternKey, cStruct, 0, 0, 0, 0 );
+  }
+
+  if (pIntegerMv2Nx2NPred != 0)
+  {
+    TComMv integerMv2Nx2NPred = *pIntegerMv2Nx2NPred;
+    integerMv2Nx2NPred <<= 2;
+    pcCU->clipMv( integerMv2Nx2NPred );
+    integerMv2Nx2NPred >>= 2;
+    xTZSearchHelp(pcPatternKey, cStruct, integerMv2Nx2NPred.getHor(), integerMv2Nx2NPred.getVer(), 0, 0);
+
+    // reset search range
+    TComMv cMvSrchRngLT;
+    TComMv cMvSrchRngRB;
+    Int iSrchRng = m_iSearchRange;
+    TComMv currBestMv(cStruct.iBestX, cStruct.iBestY );
+    currBestMv <<= 2;
+    xSetSearchRange( pcCU, currBestMv, iSrchRng, cMvSrchRngLT, cMvSrchRngRB );
+    iSrchRngHorLeft   = cMvSrchRngLT.getHor();
+    iSrchRngHorRight  = cMvSrchRngRB.getHor();
+    iSrchRngVerTop    = cMvSrchRngLT.getVer();
+    iSrchRngVerBottom = cMvSrchRngRB.getVer();
+  }
+
+  // start search
+  Int  iDist = 0;
+  Int  iStartX = cStruct.iBestX;
+  Int  iStartY = cStruct.iBestY;
+
+  // first search
+  for ( iDist = 1; iDist <= (Int)uiSearchRange; iDist*=2 )
+  {
+    if ( bFirstSearchDiamond == 1 )
+    {
+      xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+    }
+    else
+    {
+      xTZ8PointSquareSearch  ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+    }
+
+    if ( bFirstSearchStop && ( cStruct.uiBestRound >= uiFirstSearchRounds ) ) // stop criterion
+    {
+      break;
+    }
+  }
+
+  // test whether zero Mv is a better start point than Median predictor
+  if ( bTestZeroVectorStart && ((cStruct.iBestX != 0) || (cStruct.iBestY != 0)) )
+  {
+    xTZSearchHelp( pcPatternKey, cStruct, 0, 0, 0, 0 );
+    if ( (cStruct.iBestX == 0) && (cStruct.iBestY == 0) )
+    {
+      // test its neighborhood
+      for ( iDist = 1; iDist <= (Int)uiSearchRange; iDist*=2 )
+      {
+        xTZ8PointDiamondSearch( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, 0, 0, iDist );
+        if ( bTestZeroVectorStop && (cStruct.uiBestRound > 0) ) // stop criterion
+        {
+          break;
+        }
+      }
+    }
+  }
+
+  // calculate only 2 missing points instead 8 points if cStruct.uiBestDistance == 1
+  if ( cStruct.uiBestDistance == 1 )
+  {
+    cStruct.uiBestDistance = 0;
+    xTZ2PointSearch( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB );
+  }
+
+  // raster search if distance is too big
+  if ( bEnableRasterSearch && ( ((Int)(cStruct.uiBestDistance) > iRaster) || bAlwaysRasterSearch ) )
+  {
+    cStruct.uiBestDistance = iRaster;
+    for ( iStartY = iSrchRngVerTop; iStartY <= iSrchRngVerBottom; iStartY += iRaster )
+    {
+      for ( iStartX = iSrchRngHorLeft; iStartX <= iSrchRngHorRight; iStartX += iRaster )
+      {
+        xTZSearchHelp( pcPatternKey, cStruct, iStartX, iStartY, 0, iRaster );
+      }
+    }
+  }
+
+  // raster refinement
+  if ( bRasterRefinementEnable && cStruct.uiBestDistance > 0 )
+  {
+    while ( cStruct.uiBestDistance > 0 )
+    {
+      iStartX = cStruct.iBestX;
+      iStartY = cStruct.iBestY;
+      if ( cStruct.uiBestDistance > 1 )
+      {
+        iDist = cStruct.uiBestDistance >>= 1;
+        if ( bRasterRefinementDiamond == 1 )
+        {
+          xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+        else
+        {
+          xTZ8PointSquareSearch  ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+      }
+
+      // calculate only 2 missing points instead 8 points if cStruct.uiBestDistance == 1
+      if ( cStruct.uiBestDistance == 1 )
+      {
+        cStruct.uiBestDistance = 0;
+        if ( cStruct.ucPointNr != 0 )
+        {
+          xTZ2PointSearch( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB );
+        }
+      }
+    }
+  }
+
+  // start refinement
+  if ( bStarRefinementEnable && cStruct.uiBestDistance > 0 )
+  {
+    while ( cStruct.uiBestDistance > 0 )
+    {
+      iStartX = cStruct.iBestX;
+      iStartY = cStruct.iBestY;
+      cStruct.uiBestDistance = 0;
+      cStruct.ucPointNr = 0;
+      for ( iDist = 1; iDist < (Int)uiSearchRange + 1; iDist*=2 )
+      {
+        if ( bStarRefinementDiamond == 1 )
+        {
+          xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+        else
+        {
+          xTZ8PointSquareSearch  ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+        if ( bStarRefinementStop && (cStruct.uiBestRound >= uiStarRefinementRounds) ) // stop criterion
+        {
+          break;
+        }
+      }
+
+      // calculate only 2 missing points instead 8 points if cStrukt.uiBestDistance == 1
+      if ( cStruct.uiBestDistance == 1 )
+      {
+        cStruct.uiBestDistance = 0;
+        if ( cStruct.ucPointNr != 0 )
+        {
+          xTZ2PointSearch( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB );
+        }
+      }
+    }
+  }
+
+  // write out best match
+  rcMv.set( cStruct.iBestX, cStruct.iBestY );
+  ruiSAD = cStruct.uiBestSad - m_pcRdCost->getCost( cStruct.iBestX, cStruct.iBestY );
+}
+
+
+Void TEncSearch::xTZSearchSelective( TComDataCU*   pcCU,
+                                     TComPattern*  pcPatternKey,
+                                     Pel*          piRefY,
+                                     Int           iRefStride,
+                                     TComMv*       pcMvSrchRngLT,
+                                     TComMv*       pcMvSrchRngRB,
+                                     TComMv       &rcMv,
+                                     Distortion   &ruiSAD,
+                                     const TComMv* pIntegerMv2Nx2NPred )
+{
+  SEL_SEARCH_CONFIGURATION
+
+  Int   iSrchRngHorLeft         = pcMvSrchRngLT->getHor();
+  Int   iSrchRngHorRight        = pcMvSrchRngRB->getHor();
+  Int   iSrchRngVerTop          = pcMvSrchRngLT->getVer();
+  Int   iSrchRngVerBottom       = pcMvSrchRngRB->getVer();
+  Int   iFirstSrchRngHorLeft    = 0;
+  Int   iFirstSrchRngHorRight   = 0;
+  Int   iFirstSrchRngVerTop     = 0;
+  Int   iFirstSrchRngVerBottom  = 0;
+  Int   iStartX                 = 0;
+  Int   iStartY                 = 0;
+  Int   iBestX                  = 0;
+  Int   iBestY                  = 0;
+  Int   iDist                   = 0;
+
+  pcCU->clipMv( rcMv );
+  rcMv >>= 2;
+  // init TZSearchStruct
+  IntTZSearchStruct cStruct;
+  cStruct.iYStride    = iRefStride;
+  cStruct.piRefY      = piRefY;
+  cStruct.uiBestSad   = MAX_UINT;
+  cStruct.iBestX = 0;
+  cStruct.iBestY = 0;
+
+
+  // set rcMv (Median predictor) as start point and as best point
+  xTZSearchHelp( pcPatternKey, cStruct, rcMv.getHor(), rcMv.getVer(), 0, 0 );
+
+  // test whether one of PRED_A, PRED_B, PRED_C MV is better start point than Median predictor
+  if ( bTestOtherPredictedMV )
+  {
+    for ( UInt index = 0; index < NUM_MV_PREDICTORS; index++ )
+    {
+      TComMv cMv = m_acMvPredictors[index];
+      pcCU->clipMv( cMv );
+      cMv >>= 2;
+      xTZSearchHelp( pcPatternKey, cStruct, cMv.getHor(), cMv.getVer(), 0, 0 );
+    }
+  }
+
+  // test whether zero Mv is better start point than Median predictor
+  if ( bTestZeroVector )
+  {
+    xTZSearchHelp( pcPatternKey, cStruct, 0, 0, 0, 0 );
+  }
+
+  if ( pIntegerMv2Nx2NPred != 0 )
+  {
+    TComMv integerMv2Nx2NPred = *pIntegerMv2Nx2NPred;
+    integerMv2Nx2NPred <<= 2;
+    pcCU->clipMv( integerMv2Nx2NPred );
+    integerMv2Nx2NPred >>= 2;
+    xTZSearchHelp(pcPatternKey, cStruct, integerMv2Nx2NPred.getHor(), integerMv2Nx2NPred.getVer(), 0, 0);
+
+    // reset search range
+    TComMv cMvSrchRngLT;
+    TComMv cMvSrchRngRB;
+    Int iSrchRng = m_iSearchRange;
+    TComMv currBestMv(cStruct.iBestX, cStruct.iBestY );
+    currBestMv <<= 2;
+    xSetSearchRange( pcCU, currBestMv, iSrchRng, cMvSrchRngLT, cMvSrchRngRB );
+    iSrchRngHorLeft   = cMvSrchRngLT.getHor();
+    iSrchRngHorRight  = cMvSrchRngRB.getHor();
+    iSrchRngVerTop    = cMvSrchRngLT.getVer();
+    iSrchRngVerBottom = cMvSrchRngRB.getVer();
+  }
+
+  // Initial search
+  iBestX = cStruct.iBestX;
+  iBestY = cStruct.iBestY; 
+  iFirstSrchRngHorLeft    = ((iBestX - uiSearchRangeInitial) > iSrchRngHorLeft)   ? (iBestX - uiSearchRangeInitial) : iSrchRngHorLeft;
+  iFirstSrchRngVerTop     = ((iBestY - uiSearchRangeInitial) > iSrchRngVerTop)    ? (iBestY - uiSearchRangeInitial) : iSrchRngVerTop;
+  iFirstSrchRngHorRight   = ((iBestX + uiSearchRangeInitial) < iSrchRngHorRight)  ? (iBestX + uiSearchRangeInitial) : iSrchRngHorRight;  
+  iFirstSrchRngVerBottom  = ((iBestY + uiSearchRangeInitial) < iSrchRngVerBottom) ? (iBestY + uiSearchRangeInitial) : iSrchRngVerBottom;    
+
+  for ( iStartY = iFirstSrchRngVerTop; iStartY <= iFirstSrchRngVerBottom; iStartY += uiSearchStep )
+  {
+    for ( iStartX = iFirstSrchRngHorLeft; iStartX <= iFirstSrchRngHorRight; iStartX += uiSearchStep )
+    {
+      xTZSearchHelp( pcPatternKey, cStruct, iStartX, iStartY, 0, 0 );
+      xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, 1 );
+      xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, 2 );
+    }
+  }
+
+  Int iMaxMVDistToPred = (abs(cStruct.iBestX - iBestX) > iMVDistThresh || abs(cStruct.iBestY - iBestY) > iMVDistThresh);
+
+  //full search with early exit if MV is distant from predictors
+  if ( bEnableRasterSearch && (iMaxMVDistToPred || bAlwaysRasterSearch) )
+  {
+    for ( iStartY = iSrchRngVerTop; iStartY <= iSrchRngVerBottom; iStartY += 1 )
+    {
+      for ( iStartX = iSrchRngHorLeft; iStartX <= iSrchRngHorRight; iStartX += 1 )
+      {
+        xTZSearchHelp( pcPatternKey, cStruct, iStartX, iStartY, 0, 1 );
+      }
+    }
+  }
+  //Smaller MV, refine around predictor
+  else if ( bStarRefinementEnable && cStruct.uiBestDistance > 0 )
+  {
+    // start refinement
+    while ( cStruct.uiBestDistance > 0 )
+    {
+      iStartX = cStruct.iBestX;
+      iStartY = cStruct.iBestY;
+      cStruct.uiBestDistance = 0;
+      cStruct.ucPointNr = 0;
+      for ( iDist = 1; iDist < (Int)uiSearchRange + 1; iDist*=2 )
+      {
+        if ( bStarRefinementDiamond == 1 )
+        {
+          xTZ8PointDiamondSearch ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+        else
+        {
+          xTZ8PointSquareSearch  ( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB, iStartX, iStartY, iDist );
+        }
+        if ( bStarRefinementStop && (cStruct.uiBestRound >= uiStarRefinementRounds) ) // stop criterion
+        {
+          break;
+        }
+      }
+
+      // calculate only 2 missing points instead 8 points if cStrukt.uiBestDistance == 1
+      if ( cStruct.uiBestDistance == 1 )
+      {
+        cStruct.uiBestDistance = 0;
+        if ( cStruct.ucPointNr != 0 )
+        {
+          xTZ2PointSearch( pcPatternKey, cStruct, pcMvSrchRngLT, pcMvSrchRngRB );
+        }
+      }
+    }
+  }
+
+  // write out best match
+  rcMv.set( cStruct.iBestX, cStruct.iBestY );
+  ruiSAD = cStruct.uiBestSad - m_pcRdCost->getCost( cStruct.iBestX, cStruct.iBestY );
+
+}
+
+
+Void TEncSearch::xPatternSearchFracDIF(
+                                       Bool         bIsLosslessCoded,
+                                       TComPattern* pcPatternKey,
+                                       Pel*         piRefY,
+                                       Int          iRefStride,
+                                       TComMv*      pcMvInt,
+                                       TComMv&      rcMvHalf,
+                                       TComMv&      rcMvQter,
+                                       Distortion&  ruiCost,
+                                       Bool         biPred
+                                      )
+{
+  //  Reference pattern initialization (integer scale)
+  TComPattern cPatternRoi;
+  Int         iOffset    = pcMvInt->getHor() + pcMvInt->getVer() * iRefStride;
+  cPatternRoi.initPattern(piRefY + iOffset,
+                          pcPatternKey->getROIYWidth(),
+                          pcPatternKey->getROIYHeight(),
+                          iRefStride );
+
+  //  Half-pel refinement
+  xExtDIFUpSamplingH ( &cPatternRoi, biPred );
+
+  rcMvHalf = *pcMvInt;   rcMvHalf <<= 1;    // for mv-cost
+  TComMv baseRefMv(0, 0);
+  ruiCost = xPatternRefinement( pcPatternKey, baseRefMv, 2, rcMvHalf, !bIsLosslessCoded );
+
+  m_pcRdCost->setCostScale( 0 );
+
+  xExtDIFUpSamplingQ ( &cPatternRoi, rcMvHalf, biPred );
+  baseRefMv = rcMvHalf;
+  baseRefMv <<= 1;
+
+  rcMvQter = *pcMvInt;   rcMvQter <<= 1;    // for mv-cost
+  rcMvQter += rcMvHalf;  rcMvQter <<= 1;
+  ruiCost = xPatternRefinement( pcPatternKey, baseRefMv, 1, rcMvQter, !bIsLosslessCoded );
+}
+
+
+/** encode residual and calculate rate-distortion for a CU block
+ * \param pcCU
+ * \param pcYuvOrg
+ * \param pcYuvPred
+ * \param rpcYuvResi
+ * \param rpcYuvResiBest
+ * \param rpcYuvRec
+ * \param bSkipRes
+ * \returns Void
+ */
+Void TEncSearch::encodeResAndCalcRdInterCU( TComDataCU* pcCU, TComYuv* pcYuvOrg, TComYuv* pcYuvPred,
+                                            TComYuv* pcYuvResi, TComYuv* pcYuvResiBest, TComYuv* pcYuvRec,
+                                            Bool bSkipRes DEBUG_STRING_FN_DECLARE(sDebug) )
+{
+  if ( pcCU->isIntra(0) )
+  {
+    return;
+  }
+
+  Bool       bHighPass    = pcCU->getSlice()->getDepth() ? true : false;
+  UInt       uiBits       = 0, uiBitsBest       = 0;
+  Distortion uiDistortion = 0, uiDistortionBest = 0;
+
+  UInt        uiWidth      = pcCU->getWidth ( 0 );
+  UInt        uiHeight     = pcCU->getHeight( 0 );
+
+  //  No residual coding : SKIP mode
+  if ( bSkipRes )
+  {
+    pcCU->setSkipFlagSubParts( true, 0, pcCU->getDepth(0) );
+
+    pcYuvResi->clear();
+
+    pcYuvPred->copyToPartYuv( pcYuvRec, 0 );
+
+    for (UInt ch=0; ch < pcCU->getPic()->getNumberValidComponents(); ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+      const UInt csx=pcYuvOrg->getComponentScaleX(compID);
+      const UInt csy=pcYuvOrg->getComponentScaleY(compID);
+      uiDistortion += m_pcRdCost->getDistPart( g_bitDepth[toChannelType(compID)], pcYuvRec->getAddr(compID), pcYuvRec->getStride(compID), pcYuvOrg->getAddr(compID),
+                                               pcYuvOrg->getStride(compID), uiWidth >> csx, uiHeight >> csy, compID);
+    }
+
+    m_pcRDGoOnSbacCoder->load(m_pppcRDSbacCoder[pcCU->getDepth(0)][CI_CURR_BEST]);
+    m_pcEntropyCoder->resetBits();
+
+    if (pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+    {
+      m_pcEntropyCoder->encodeCUTransquantBypassFlag(pcCU, 0, true);
+    }
+
+    m_pcEntropyCoder->encodeSkipFlag(pcCU, 0, true);
+    m_pcEntropyCoder->encodeMergeIndex( pcCU, 0, true );
+
+    uiBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+    pcCU->getTotalBits()       = uiBits;
+    pcCU->getTotalDistortion() = uiDistortion;
+    pcCU->getTotalCost()       = m_pcRdCost->calcRdCost( uiBits, uiDistortion );
+
+    m_pcRDGoOnSbacCoder->store(m_pppcRDSbacCoder[pcCU->getDepth(0)][CI_TEMP_BEST]);
+
+    static const UInt cbfZero[MAX_NUM_COMPONENT]={0,0,0};
+    pcCU->setCbfSubParts( cbfZero, 0, pcCU->getDepth( 0 ) );
+    pcCU->setTrIdxSubParts( 0, 0, pcCU->getDepth(0) );
+
+#ifdef DEBUG_STRING
+    pcYuvResiBest->clear(); // Clear the residual image, if we didn't code it.
+    for(UInt i=0; i<MAX_NUM_COMPONENT+1; i++)
+    {
+      sDebug+=debug_reorder_data_inter_token[i];
+    }
+#endif
+
+    return;
+  }
+
+  //  Residual coding.
+  Int qp;
+  Int qpBest = 0;
+  Int qpMin;
+  Int qpMax;
+  Double  dCost, dCostBest = MAX_DOUBLE;
+
+  UInt uiTrLevel = 0;
+  if( (pcCU->getWidth(0) > pcCU->getSlice()->getSPS()->getMaxTrSize()) )
+  {
+    while( pcCU->getWidth(0) > (pcCU->getSlice()->getSPS()->getMaxTrSize()<<uiTrLevel) ) uiTrLevel++;
+  }
+
+  qpMin =  bHighPass ? Clip3( -pcCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, pcCU->getQP(0) - m_iMaxDeltaQP ) : pcCU->getQP( 0 );
+  qpMax =  bHighPass ? Clip3( -pcCU->getSlice()->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, pcCU->getQP(0) + m_iMaxDeltaQP ) : pcCU->getQP( 0 );
+
+  pcYuvResi->subtract( pcYuvOrg, pcYuvPred, 0, uiWidth );
+
+  TComTURecurse tuLevel0(pcCU, 0);
+
+  for ( qp = qpMin; qp <= qpMax; qp++ )
+  {
+    dCost = 0.;
+    uiBits = 0;
+    uiDistortion = 0;
+
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ pcCU->getDepth( 0 ) ][ CI_CURR_BEST ] );
+
+    Distortion uiZeroDistortion = 0;
+
+    xEstimateResidualQT( pcYuvResi,  dCost, uiBits, uiDistortion, &uiZeroDistortion, tuLevel0 DEBUG_STRING_PASS_INTO(sDebug) );
+
+    // -------------------------------------------------------
+    // set the coefficients in the pcCU, and also calculates the residual data.
+    // If a block full of 0's is efficient, then just use 0's.
+    // The costs at this point do not include header bits.
+
+    m_pcEntropyCoder->resetBits();
+    m_pcEntropyCoder->encodeQtRootCbfZero( pcCU );
+    UInt zeroResiBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+    Double dZeroCost = m_pcRdCost->calcRdCost( zeroResiBits, uiZeroDistortion );
+
+    if(pcCU->isLosslessCoded( 0 ))
+    {
+      dZeroCost = dCost + 1;
+    }
+
+    if ( dZeroCost < dCost )
+    {
+      if ( dZeroCost < dCost )
+      {
+        dCost        = dZeroCost;
+      }
+      uiDistortion = uiZeroDistortion;
+
+      const UInt uiQPartNum = tuLevel0.GetAbsPartIdxNumParts();
+      ::memset( pcCU->getTransformIdx()     , 0, uiQPartNum * sizeof(UChar) );
+      for (UInt ch=0; ch < pcCU->getPic()->getNumberValidComponents(); ch++)
+      {
+        const ComponentID component = ComponentID(ch);
+        const UInt componentShift   = pcCU->getPic()->getComponentScaleX(component) + pcCU->getPic()->getComponentScaleY(component);
+        ::memset( pcCU->getCbf( component ) , 0, uiQPartNum * sizeof(UChar) );
+        ::memset( pcCU->getCoeff(component), 0, (uiWidth*uiHeight*sizeof(TCoeff))>>componentShift );
+        ::memset( pcCU->getCrossComponentPredictionAlpha(component), 0, ( uiQPartNum * sizeof(Char) ) );
+      }
+      static const UInt useTS[MAX_NUM_COMPONENT]={0,0,0};
+      pcCU->setTransformSkipSubParts ( useTS, 0, pcCU->getDepth(0) );
+#ifdef DEBUG_STRING
+      sDebug.clear();
+      for(UInt i=0; i<MAX_NUM_COMPONENT+1; i++)
+      {
+        sDebug+=debug_reorder_data_inter_token[i];
+      }
+#endif
+    }
+    else
+    {
+      xSetResidualQTData( NULL, false, tuLevel0); // Call first time to set coefficients.
+    }
+
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[pcCU->getDepth(0)][CI_CURR_BEST] );
+
+    uiBits = 0;
+    xAddSymbolBitsInter( pcCU, 0, 0, uiBits );
+    // we've now encoded the pcCU, and so have a valid bit cost
+
+
+    Double dExactCost = m_pcRdCost->calcRdCost( uiBits, uiDistortion );
+    dCost = dExactCost;
+
+    // Is our new cost better?
+    if ( dCost < dCostBest )
+    {
+      if ( !pcCU->getQtRootCbf( 0 ) )
+      {
+        pcYuvResiBest->clear(); // Clear the residual image, if we didn't code it.
+      }
+      else
+      {
+        xSetResidualQTData( pcYuvResiBest, true, tuLevel0 ); // else set the residual image data pcYUVResiBest from the various temp images.
+      }
+
+      if( qpMin != qpMax && qp != qpMax )
+      {
+        const UInt uiQPartNum = tuLevel0.GetAbsPartIdxNumParts();
+        ::memcpy( m_puhQTTempTrIdx, pcCU->getTransformIdx(),        uiQPartNum * sizeof(UChar) );
+        for(UInt i=0; i<pcCU->getPic()->getNumberValidComponents(); i++)
+        {
+          const ComponentID compID=ComponentID(i);
+          const UInt csr = pcCU->getPic()->getComponentScaleX(compID) + pcCU->getPic()->getComponentScaleY(compID);
+          ::memcpy( m_puhQTTempCbf[compID],      pcCU->getCbf( compID ),     uiQPartNum * sizeof(UChar) );
+          ::memcpy( m_pcQTTempCoeff[compID],     pcCU->getCoeff(compID),     uiWidth * uiHeight * sizeof( TCoeff ) >> csr     );
+#if ADAPTIVE_QP_SELECTION
+          ::memcpy( m_pcQTTempArlCoeff[compID],  pcCU->getArlCoeff(compID),  uiWidth * uiHeight * sizeof( TCoeff )>> csr     );
+#endif
+          ::memcpy( m_puhQTTempTransformSkipFlag[compID], pcCU->getTransformSkip(compID),     uiQPartNum * sizeof( UChar ) );
+          ::memcpy( m_phQTTempCrossComponentPredictionAlpha[compID], pcCU->getCrossComponentPredictionAlpha(compID), uiQPartNum * sizeof(Char) );
+        }
+      }
+      uiBitsBest       = uiBits;
+      uiDistortionBest = uiDistortion;
+      dCostBest        = dCost;
+      qpBest           = qp;
+
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ pcCU->getDepth( 0 ) ][ CI_TEMP_BEST ] );
+    }
+  }
+
+  assert ( dCostBest != MAX_DOUBLE );
+
+  if( qpMin != qpMax && qpBest != qpMax )
+  {
+    assert( 0 ); // check
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ pcCU->getDepth( 0 ) ][ CI_TEMP_BEST ] );
+
+      // copy best cbf and trIdx to pcCU
+    const UInt uiQPartNum = tuLevel0.GetAbsPartIdxNumParts();
+    ::memcpy( pcCU->getTransformIdx(),       m_puhQTTempTrIdx,  uiQPartNum * sizeof(UChar) );
+    for(UInt i=0; i<pcCU->getPic()->getNumberValidComponents(); i++)
+    {
+      const ComponentID compID=ComponentID(i);
+      const UInt csr = pcCU->getPic()->getComponentScaleX(compID) + pcCU->getPic()->getComponentScaleY(compID);
+      ::memcpy( pcCU->getCbf( compID ),     m_puhQTTempCbf[compID],     uiQPartNum * sizeof(UChar) );
+      ::memcpy( pcCU->getCoeff(compID),     m_pcQTTempCoeff[compID],    uiWidth * uiHeight * sizeof( TCoeff ) >> csr     );
+#if ADAPTIVE_QP_SELECTION
+      ::memcpy( pcCU->getArlCoeff(compID),  m_pcQTTempArlCoeff[compID], uiWidth * uiHeight * sizeof( TCoeff    ) >> csr );
+#endif
+      ::memcpy( pcCU->getTransformSkip(compID),     m_puhQTTempTransformSkipFlag[compID], uiQPartNum * sizeof( UChar ) );
+      ::memcpy( pcCU->getCrossComponentPredictionAlpha(compID),  m_phQTTempCrossComponentPredictionAlpha[compID], uiQPartNum * sizeof( Char ) );
+    }
+  }
+  pcYuvRec->addClip ( pcYuvPred, pcYuvResiBest, 0, uiWidth );
+
+  // update with clipped distortion and cost (qp estimation loop uses unclipped values)
+
+  uiDistortionBest = 0;
+  for(UInt ch=0; ch<pcYuvRec->getNumberValidComponents(); ch++)
+  {
+    const ComponentID compID=ComponentID(ch);
+    uiDistortionBest += m_pcRdCost->getDistPart( g_bitDepth[toChannelType(compID)], pcYuvRec->getAddr(compID ), pcYuvRec->getStride(compID ), pcYuvOrg->getAddr(compID ), pcYuvOrg->getStride(compID), uiWidth >> pcYuvOrg->getComponentScaleX(compID), uiHeight >> pcYuvOrg->getComponentScaleY(compID), compID);
+  }
+  dCostBest = m_pcRdCost->calcRdCost( uiBitsBest, uiDistortionBest );
+
+  pcCU->getTotalBits()       = uiBitsBest;
+  pcCU->getTotalDistortion() = uiDistortionBest;
+  pcCU->getTotalCost()       = dCostBest;
+
+  if ( pcCU->isSkipped(0) )
+  {
+    static const UInt cbfZero[MAX_NUM_COMPONENT]={0,0,0};
+    pcCU->setCbfSubParts( cbfZero, 0, pcCU->getDepth( 0 ) );
+  }
+
+  pcCU->setQPSubParts( qpBest, 0, pcCU->getDepth(0) );
+}
+
+
+
+Void TEncSearch::xEstimateResidualQT( TComYuv    *pcResi,
+                                      Double     &rdCost,
+                                      UInt       &ruiBits,
+                                      Distortion &ruiDist,
+                                      Distortion *puiZeroDist,
+                                      TComTU     &rTu
+                                      DEBUG_STRING_FN_DECLARE(sDebug) )
+{
+  TComDataCU *pcCU        = rTu.getCU();
+  const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
+  const UInt uiDepth      = rTu.GetTransformDepthTotal();
+  const UInt uiTrMode     = rTu.GetTransformDepthRel();
+  const UInt subTUDepth   = uiTrMode + 1;
+  const UInt numValidComp = pcCU->getPic()->getNumberValidComponents();
+  DEBUG_STRING_NEW(sSingleStringComp[MAX_NUM_COMPONENT])
+
+  assert( pcCU->getDepth( 0 ) == pcCU->getDepth( uiAbsPartIdx ) );
+  const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+
+  UInt SplitFlag = ((pcCU->getSlice()->getSPS()->getQuadtreeTUMaxDepthInter() == 1) && pcCU->isInter(uiAbsPartIdx) && ( pcCU->getPartitionSize(uiAbsPartIdx) != SIZE_2Nx2N ));
+#ifdef DEBUG_STRING
+  const Int debugPredModeMask = DebugStringGetPredModeMask(pcCU->getPredictionMode(uiAbsPartIdx));
+#endif
+
+  Bool bCheckFull;
+
+  if ( SplitFlag && uiDepth == pcCU->getDepth(uiAbsPartIdx) && ( uiLog2TrSize >  pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) ) )
+  {
+    bCheckFull = false;
+  }
+  else
+  {
+    bCheckFull =  ( uiLog2TrSize <= pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() );
+  }
+
+  const Bool bCheckSplit  = ( uiLog2TrSize >  pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) );
+
+  assert( bCheckFull || bCheckSplit );
+
+  // code full block
+  Double     dSingleCost = MAX_DOUBLE;
+  UInt       uiSingleBits                                                                                                        = 0;
+  Distortion uiSingleDistComp            [MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = {{0,0},{0,0},{0,0}};
+  Distortion uiSingleDist                                                                                                        = 0;
+  TCoeff     uiAbsSum                    [MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = {{0,0},{0,0},{0,0}};
+  UInt       uiBestTransformMode         [MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = {{0,0},{0,0},{0,0}};
+  //  Stores the best explicit RDPCM mode for a TU encoded without split
+  UInt       bestExplicitRdpcmModeUnSplit[MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = {{3,3}, {3,3}, {3,3}};
+  Char       bestCrossCPredictionAlpha   [MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = {{0,0},{0,0},{0,0}};
+
+  m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+
+  if( bCheckFull )
+  {
+    Double minCost[MAX_NUM_COMPONENT][2/*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/];
+    Bool checkTransformSkip[MAX_NUM_COMPONENT];
+    pcCU->setTrIdxSubParts( uiTrMode, uiAbsPartIdx, uiDepth );
+
+    m_pcEntropyCoder->resetBits();
+
+    memset( m_pTempPel, 0, sizeof( Pel ) * rTu.getRect(COMPONENT_Y).width * rTu.getRect(COMPONENT_Y).height ); // not necessary needed for inside of recursion (only at the beginning)
+
+    const UInt uiQTTempAccessLayer = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+    TCoeff *pcCoeffCurr[MAX_NUM_COMPONENT];
+#if ADAPTIVE_QP_SELECTION
+    TCoeff *pcArlCoeffCurr[MAX_NUM_COMPONENT];
+#endif
+
+    for(UInt i=0; i<numValidComp; i++)
+    {
+      minCost[i][0] = MAX_DOUBLE;
+      minCost[i][1] = MAX_DOUBLE;
+    }
+
+    Pel crossCPredictedResidualBuffer[ MAX_TU_SIZE * MAX_TU_SIZE ];
+
+    for(UInt i=0; i<numValidComp; i++)
+    {
+      checkTransformSkip[i]=false;
+      const ComponentID compID=ComponentID(i);
+      pcCoeffCurr[compID]    = m_ppcQTTempCoeff[compID][uiQTTempAccessLayer] + rTu.getCoefficientOffset(compID);
+#if ADAPTIVE_QP_SELECTION
+      pcArlCoeffCurr[compID] = m_ppcQTTempArlCoeff[compID ][uiQTTempAccessLayer] +  rTu.getCoefficientOffset(compID);
+#endif
+
+      if(rTu.ProcessComponentSection(compID))
+      {
+        const QpParam cQP(*pcCU, compID);
+
+        checkTransformSkip[compID] = pcCU->getSlice()->getPPS()->getUseTransformSkip() &&
+                                     TUCompRectHasAssociatedTransformSkipFlag(rTu.getRect(compID), pcCU->getSlice()->getPPS()->getTransformSkipLog2MaxSize()) &&
+                                     (!pcCU->isLosslessCoded(0));
+
+        const Bool splitIntoSubTUs = rTu.getRect(compID).width != rTu.getRect(compID).height;
+
+        TComTURecurse TUIterator(rTu, false, (splitIntoSubTUs ? TComTU::VERTICAL_SPLIT : TComTU::DONT_SPLIT), true, compID);
+
+        const UInt partIdxesPerSubTU = TUIterator.GetAbsPartIdxNumParts(compID);
+
+        do
+        {
+          const UInt           subTUIndex             = TUIterator.GetSectionNumber();
+          const UInt           subTUAbsPartIdx        = TUIterator.GetAbsPartIdxTU(compID);
+          const TComRectangle &tuCompRect             = TUIterator.getRect(compID);
+          const UInt           subTUBufferOffset      = tuCompRect.width * tuCompRect.height * subTUIndex;
+
+                TCoeff        *currentCoefficients    = pcCoeffCurr[compID] + subTUBufferOffset;
+#if ADAPTIVE_QP_SELECTION
+                TCoeff        *currentARLCoefficients = pcArlCoeffCurr[compID] + subTUBufferOffset;
+#endif
+          const Bool isCrossCPredictionAvailable      =    isChroma(compID)
+                                                         && pcCU->getSlice()->getPPS()->getUseCrossComponentPrediction()
+                                                         && (pcCU->getCbf(subTUAbsPartIdx, COMPONENT_Y, uiTrMode) != 0);
+
+          Char preCalcAlpha = 0;
+          const Pel *pLumaResi = m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix( COMPONENT_Y, rTu.getRect( COMPONENT_Y ).x0, rTu.getRect( COMPONENT_Y ).y0 );
+
+          if (isCrossCPredictionAvailable)
+          {
+            const Bool bUseReconstructedResidualForEstimate = m_pcEncCfg->getUseReconBasedCrossCPredictionEstimate();
+            const Pel  *const lumaResidualForEstimate       = bUseReconstructedResidualForEstimate ? pLumaResi                                                     : pcResi->getAddrPix(COMPONENT_Y, tuCompRect.x0, tuCompRect.y0);
+            const UInt        lumaResidualStrideForEstimate = bUseReconstructedResidualForEstimate ? m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(COMPONENT_Y) : pcResi->getStride(COMPONENT_Y);
+
+            preCalcAlpha = xCalcCrossComponentPredictionAlpha(TUIterator,
+                                                              compID,
+                                                              lumaResidualForEstimate,
+                                                              pcResi->getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                              tuCompRect.width,
+                                                              tuCompRect.height,
+                                                              lumaResidualStrideForEstimate,
+                                                              pcResi->getStride(compID));
+          }
+
+          const Int transformSkipModesToTest    = checkTransformSkip[compID] ? 2 : 1;
+          const Int crossCPredictionModesToTest = (preCalcAlpha != 0)        ? 2 : 1; // preCalcAlpha cannot be anything other than 0 if isCrossCPredictionAvailable is false
+
+          const Bool isOneMode                  = (crossCPredictionModesToTest == 1) && (transformSkipModesToTest == 1);
+
+          for (Int transformSkipModeId = 0; transformSkipModeId < transformSkipModesToTest; transformSkipModeId++)
+          {
+            pcCU->setTransformSkipPartRange(transformSkipModeId, compID, subTUAbsPartIdx, partIdxesPerSubTU);
+
+            for (Int crossCPredictionModeId = 0; crossCPredictionModeId < crossCPredictionModesToTest; crossCPredictionModeId++)
+            {
+              const Bool isFirstMode          = (transformSkipModeId == 0) && (crossCPredictionModeId == 0);
+              const Bool bUseCrossCPrediction = crossCPredictionModeId != 0;
+
+              m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+              m_pcEntropyCoder->resetBits();
+
+              pcCU->setTransformSkipPartRange(transformSkipModeId, compID, subTUAbsPartIdx, partIdxesPerSubTU);
+              pcCU->setCrossComponentPredictionAlphaPartRange((bUseCrossCPrediction ? preCalcAlpha : 0), compID, subTUAbsPartIdx, partIdxesPerSubTU );
+
+              if ((compID != COMPONENT_Cr) && ((transformSkipModeId == 1) ? m_pcEncCfg->getUseRDOQTS() : m_pcEncCfg->getUseRDOQ()))
+              {
+                m_pcEntropyCoder->estimateBit(m_pcTrQuant->m_pcEstBitsSbac, tuCompRect.width, tuCompRect.height, toChannelType(compID));
+              }
+
+#if RDOQ_CHROMA_LAMBDA
+              m_pcTrQuant->selectLambda(compID);
+#endif
+
+              Pel *pcResiCurrComp = m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0);
+              UInt resiStride     = m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID);
+
+              TCoeff bestCoeffComp   [MAX_TU_SIZE*MAX_TU_SIZE];
+              Pel    bestResiComp    [MAX_TU_SIZE*MAX_TU_SIZE];
+
+#if ADAPTIVE_QP_SELECTION
+              TCoeff bestArlCoeffComp[MAX_TU_SIZE*MAX_TU_SIZE];
+#endif
+              TCoeff     currAbsSum   = 0;
+              UInt       currCompBits = 0;
+              Distortion currCompDist = 0;
+              Double     currCompCost = 0;
+              UInt       nonCoeffBits = 0;
+              Distortion nonCoeffDist = 0;
+              Double     nonCoeffCost = 0;
+
+              if(!isOneMode && !isFirstMode)
+              {
+                memcpy(bestCoeffComp,    currentCoefficients,    (sizeof(TCoeff) * tuCompRect.width * tuCompRect.height));
+#if ADAPTIVE_QP_SELECTION
+                memcpy(bestArlCoeffComp, currentARLCoefficients, (sizeof(TCoeff) * tuCompRect.width * tuCompRect.height));
+#endif
+                for(Int y = 0; y < tuCompRect.height; y++)
+                {
+                  memcpy(&bestResiComp[y * tuCompRect.width], (pcResiCurrComp + (y * resiStride)), (sizeof(Pel) * tuCompRect.width));
+                }
+              }
+
+              if (bUseCrossCPrediction)
+              {
+                TComTrQuant::crossComponentPrediction(TUIterator,
+                                                      compID,
+                                                      pLumaResi,
+                                                      pcResi->getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                      crossCPredictedResidualBuffer,
+                                                      tuCompRect.width,
+                                                      tuCompRect.height,
+                                                      m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(COMPONENT_Y),
+                                                      pcResi->getStride(compID),
+                                                      tuCompRect.width,
+                                                      false);
+
+                m_pcTrQuant->transformNxN(TUIterator, compID, crossCPredictedResidualBuffer, tuCompRect.width, currentCoefficients,
+#if ADAPTIVE_QP_SELECTION
+                                          currentARLCoefficients,
+#endif
+                                          currAbsSum, cQP);
+              }
+              else
+              {
+                m_pcTrQuant->transformNxN(TUIterator, compID, pcResi->getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ), pcResi->getStride(compID), currentCoefficients,
+#if ADAPTIVE_QP_SELECTION
+                                          currentARLCoefficients,
+#endif
+                                          currAbsSum, cQP);
+              }
+
+              if(isFirstMode || (currAbsSum == 0))
+              {
+                if (bUseCrossCPrediction)
+                {
+                  TComTrQuant::crossComponentPrediction(TUIterator,
+                                                        compID,
+                                                        pLumaResi,
+                                                        m_pTempPel,
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                        tuCompRect.width,
+                                                        tuCompRect.height,
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(COMPONENT_Y),
+                                                        tuCompRect.width,
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID),
+                                                        true);
+
+                  nonCoeffDist = m_pcRdCost->getDistPart( g_bitDepth[toChannelType(compID)], m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ),
+                                                                                              m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride( compID ), pcResi->getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ),
+                                                                                              pcResi->getStride(compID), tuCompRect.width, tuCompRect.height, compID); // initialized with zero residual destortion
+                }
+                else
+                {
+                  nonCoeffDist = m_pcRdCost->getDistPart( g_bitDepth[toChannelType(compID)], m_pTempPel, tuCompRect.width, pcResi->getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ),
+                                                                                              pcResi->getStride(compID), tuCompRect.width, tuCompRect.height, compID); // initialized with zero residual destortion
+                }
+
+                m_pcEntropyCoder->encodeQtCbfZero( TUIterator, toChannelType(compID) );
+
+                if ( isCrossCPredictionAvailable )
+                {
+                  m_pcEntropyCoder->encodeCrossComponentPrediction( TUIterator, compID );
+                }
+
+                nonCoeffBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+                nonCoeffCost = m_pcRdCost->calcRdCost( nonCoeffBits, nonCoeffDist );
+              }
+
+              if((puiZeroDist != NULL) && isFirstMode)
+              {
+                *puiZeroDist += nonCoeffDist; // initialized with zero residual destortion
+              }
+
+              DEBUG_STRING_NEW(sSingleStringTest)
+
+              if( currAbsSum > 0 ) //if non-zero coefficients are present, a residual needs to be derived for further prediction
+              {
+                if (isFirstMode)
+                {
+                  m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+                  m_pcEntropyCoder->resetBits();
+                }
+
+                m_pcEntropyCoder->encodeQtCbf( TUIterator, compID, true );
+
+                if (isCrossCPredictionAvailable)
+                {
+                  m_pcEntropyCoder->encodeCrossComponentPrediction( TUIterator, compID );
+                }
+
+                m_pcEntropyCoder->encodeCoeffNxN( TUIterator, currentCoefficients, compID );
+                currCompBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+
+                pcResiCurrComp = m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 );
+
+                m_pcTrQuant->invTransformNxN( TUIterator, compID, pcResiCurrComp, m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID), currentCoefficients, cQP DEBUG_STRING_PASS_INTO_OPTIONAL(&sSingleStringTest, (DebugOptionList::DebugString_InvTran.getInt()&debugPredModeMask)) );
+
+                if (bUseCrossCPrediction)
+                {
+                  TComTrQuant::crossComponentPrediction(TUIterator,
+                                                        compID,
+                                                        pLumaResi,
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                        tuCompRect.width,
+                                                        tuCompRect.height,
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(COMPONENT_Y),
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID     ),
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID     ),
+                                                        true);
+                }
+
+                currCompDist = m_pcRdCost->getDistPart( g_bitDepth[toChannelType(compID)], m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ),
+                                                        m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID),
+                                                        pcResi->getAddrPix( compID, tuCompRect.x0, tuCompRect.y0 ),
+                                                        pcResi->getStride(compID),
+                                                        tuCompRect.width, tuCompRect.height, compID);
+
+                currCompCost = m_pcRdCost->calcRdCost(currCompBits, currCompDist);
+                  
+                if (pcCU->isLosslessCoded(0)) nonCoeffCost = MAX_DOUBLE;
+              }
+              else if ((transformSkipModeId == 1) && !bUseCrossCPrediction)
+              {
+                currCompCost = MAX_DOUBLE;
+              }
+              else
+              {
+                currCompBits = nonCoeffBits;
+                currCompDist = nonCoeffDist;
+                currCompCost = nonCoeffCost;
+              }
+
+              // evaluate
+              if ((currCompCost < minCost[compID][subTUIndex]) || ((transformSkipModeId == 1) && (currCompCost == minCost[compID][subTUIndex])))
+              {
+                bestExplicitRdpcmModeUnSplit[compID][subTUIndex] = pcCU->getExplicitRdpcmMode(compID, subTUAbsPartIdx);
+
+                if(isFirstMode) //check for forced null
+                {
+                  if((nonCoeffCost < currCompCost) || (currAbsSum == 0))
+                  {
+                    memset(currentCoefficients, 0, (sizeof(TCoeff) * tuCompRect.width * tuCompRect.height));
+
+                    currAbsSum   = 0;
+                    currCompBits = nonCoeffBits;
+                    currCompDist = nonCoeffDist;
+                    currCompCost = nonCoeffCost;
+                  }
+                }
+
+#ifdef DEBUG_STRING
+                if (currAbsSum > 0)
+                {
+                  DEBUG_STRING_SWAP(sSingleStringComp[compID], sSingleStringTest)
+                }
+                else
+                {
+                  sSingleStringComp[compID].clear();
+                }
+#endif
+
+                uiAbsSum                 [compID][subTUIndex] = currAbsSum;
+                uiSingleDistComp         [compID][subTUIndex] = currCompDist;
+                minCost                  [compID][subTUIndex] = currCompCost;
+                uiBestTransformMode      [compID][subTUIndex] = transformSkipModeId;
+                bestCrossCPredictionAlpha[compID][subTUIndex] = (crossCPredictionModeId == 1) ? pcCU->getCrossComponentPredictionAlpha(subTUAbsPartIdx, compID) : 0;
+
+                if (uiAbsSum[compID][subTUIndex] == 0)
+                {
+                  if (bUseCrossCPrediction)
+                  {
+                    TComTrQuant::crossComponentPrediction(TUIterator,
+                                                          compID,
+                                                          pLumaResi,
+                                                          m_pTempPel,
+                                                          m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0),
+                                                          tuCompRect.width,
+                                                          tuCompRect.height,
+                                                          m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(COMPONENT_Y),
+                                                          tuCompRect.width,
+                                                          m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID),
+                                                          true);
+                  }
+                  else
+                  {
+                    pcResiCurrComp = m_pcQTTempTComYuv[uiQTTempAccessLayer].getAddrPix(compID, tuCompRect.x0, tuCompRect.y0);
+                    const UInt uiStride = m_pcQTTempTComYuv[uiQTTempAccessLayer].getStride(compID);
+                    for(UInt uiY = 0; uiY < tuCompRect.height; uiY++)
+                    {
+                      memset(pcResiCurrComp, 0, (sizeof(Pel) * tuCompRect.width));
+                      pcResiCurrComp += uiStride;
+                    }
+                  }
+                }
+              }
+              else
+              {
+                // reset
+                memcpy(currentCoefficients,    bestCoeffComp,    (sizeof(TCoeff) * tuCompRect.width * tuCompRect.height));
+#if ADAPTIVE_QP_SELECTION
+                memcpy(currentARLCoefficients, bestArlCoeffComp, (sizeof(TCoeff) * tuCompRect.width * tuCompRect.height));
+#endif
+                for (Int y = 0; y < tuCompRect.height; y++)
+                {
+                  memcpy((pcResiCurrComp + (y * resiStride)), &bestResiComp[y * tuCompRect.width], (sizeof(Pel) * tuCompRect.width));
+                }
+              }
+            }
+          }
+
+          pcCU->setExplicitRdpcmModePartRange            (   bestExplicitRdpcmModeUnSplit[compID][subTUIndex],                            compID, subTUAbsPartIdx, partIdxesPerSubTU);
+          pcCU->setTransformSkipPartRange                (   uiBestTransformMode         [compID][subTUIndex],                            compID, subTUAbsPartIdx, partIdxesPerSubTU );
+          pcCU->setCbfPartRange                          ((((uiAbsSum                    [compID][subTUIndex] > 0) ? 1 : 0) << uiTrMode), compID, subTUAbsPartIdx, partIdxesPerSubTU );
+          pcCU->setCrossComponentPredictionAlphaPartRange(   bestCrossCPredictionAlpha   [compID][subTUIndex],                            compID, subTUAbsPartIdx, partIdxesPerSubTU );
+        } //end of sub-TU loop
+        while (TUIterator.nextSection(rTu));
+      } // processing section
+    } // component loop
+
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      const ComponentID compID = ComponentID(ch);
+      if (rTu.ProcessComponentSection(compID) && (rTu.getRect(compID).width != rTu.getRect(compID).height))
+      {
+        offsetSubTUCBFs(rTu, compID); //the CBFs up to now have been defined for two sub-TUs - shift them down a level and replace with the parent level CBF
+      }
+    }
+
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+    m_pcEntropyCoder->resetBits();
+
+    if( uiLog2TrSize > pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) )
+    {
+      m_pcEntropyCoder->encodeTransformSubdivFlag( 0, 5 - uiLog2TrSize );
+    }
+
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      const UInt chOrderChange = ((ch + 1) == numValidComp) ? 0 : (ch + 1);
+      const ComponentID compID=ComponentID(chOrderChange);
+      if( rTu.ProcessComponentSection(compID) )
+      {
+        m_pcEntropyCoder->encodeQtCbf( rTu, compID, true );
+      }
+    }
+
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+      if (rTu.ProcessComponentSection(compID))
+      {
+        if(isChroma(compID) && (uiAbsSum[COMPONENT_Y][0] != 0))
+        {
+          m_pcEntropyCoder->encodeCrossComponentPrediction( rTu, compID );
+        }
+
+        m_pcEntropyCoder->encodeCoeffNxN( rTu, pcCoeffCurr[compID], compID );
+        for (UInt subTUIndex = 0; subTUIndex < 2; subTUIndex++) uiSingleDist += uiSingleDistComp[compID][subTUIndex];
+      }
+    }
+
+    uiSingleBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+
+    dSingleCost = m_pcRdCost->calcRdCost( uiSingleBits, uiSingleDist );
+  } // check full
+
+  // code sub-blocks
+  if( bCheckSplit )
+  {
+    if( bCheckFull )
+    {
+      m_pcRDGoOnSbacCoder->store( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_TEST ] );
+      m_pcRDGoOnSbacCoder->load ( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+    }
+    Distortion uiSubdivDist = 0;
+    UInt       uiSubdivBits = 0;
+    Double     dSubdivCost = 0.0;
+
+    //save the non-split CBFs in case we need to restore them later
+
+    UInt bestCBF     [MAX_NUM_COMPONENT];
+    UInt bestsubTUCBF[MAX_NUM_COMPONENT][2];
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+
+      if (rTu.ProcessComponentSection(compID))
+      {
+        bestCBF[compID] = pcCU->getCbf(uiAbsPartIdx, compID, uiTrMode);
+
+        const TComRectangle &tuCompRect = rTu.getRect(compID);
+        if (tuCompRect.width != tuCompRect.height)
+        {
+          const UInt partIdxesPerSubTU = rTu.GetAbsPartIdxNumParts(compID) >> 1;
+
+          for (UInt subTU = 0; subTU < 2; subTU++)
+            bestsubTUCBF[compID][subTU] = pcCU->getCbf ((uiAbsPartIdx + (subTU * partIdxesPerSubTU)), compID, subTUDepth);
+        }
+      }
+    }
+
+
+    TComTURecurse tuRecurseChild(rTu, false);
+    const UInt uiQPartNumSubdiv = tuRecurseChild.GetAbsPartIdxNumParts();
+
+    DEBUG_STRING_NEW(sSplitString[MAX_NUM_COMPONENT])
+
+    do
+    {
+      DEBUG_STRING_NEW(childString)
+      xEstimateResidualQT( pcResi, dSubdivCost, uiSubdivBits, uiSubdivDist, bCheckFull ? NULL : puiZeroDist,  tuRecurseChild DEBUG_STRING_PASS_INTO(childString));
+#ifdef DEBUG_STRING
+      // split the string by component and append to the relevant output (because decoder decodes in channel order, whereas this search searches by TU-order)
+      std::size_t lastPos=0;
+      const std::size_t endStrng=childString.find(debug_reorder_data_inter_token[MAX_NUM_COMPONENT], lastPos);
+      for(UInt ch = 0; ch < numValidComp; ch++)
+      {
+        if (lastPos!=std::string::npos && childString.find(debug_reorder_data_inter_token[ch], lastPos)==lastPos) lastPos+=strlen(debug_reorder_data_inter_token[ch]); // skip leading string
+        std::size_t pos=childString.find(debug_reorder_data_inter_token[ch+1], lastPos);
+        if (pos!=std::string::npos && pos>endStrng) lastPos=endStrng;
+        sSplitString[ch]+=childString.substr(lastPos, (pos==std::string::npos)? std::string::npos : (pos-lastPos) );
+        lastPos=pos;
+      }
+#endif
+    }
+    while ( tuRecurseChild.nextSection(rTu) ) ;
+
+    UInt uiCbfAny=0;
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      UInt uiYUVCbf = 0;
+      for( UInt ui = 0; ui < 4; ++ui )
+      {
+        uiYUVCbf |= pcCU->getCbf( uiAbsPartIdx + ui * uiQPartNumSubdiv, ComponentID(ch),  uiTrMode + 1 );
+      }
+      UChar *pBase=pcCU->getCbf( ComponentID(ch) );
+      const UInt flags=uiYUVCbf << uiTrMode;
+      for( UInt ui = 0; ui < 4 * uiQPartNumSubdiv; ++ui )
+      {
+        pBase[uiAbsPartIdx + ui] |= flags;
+      }
+      uiCbfAny|=uiYUVCbf;
+    }
+
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_ROOT ] );
+    m_pcEntropyCoder->resetBits();
+
+    // when compID isn't a channel, code Cbfs:
+    xEncodeResidualQT( MAX_NUM_COMPONENT, rTu );
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      xEncodeResidualQT( ComponentID(ch), rTu );
+    }
+
+    uiSubdivBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+    dSubdivCost  = m_pcRdCost->calcRdCost( uiSubdivBits, uiSubdivDist );
+
+    if (!bCheckFull || (uiCbfAny && (dSubdivCost < dSingleCost)))
+    {
+      rdCost += dSubdivCost;
+      ruiBits += uiSubdivBits;
+      ruiDist += uiSubdivDist;
+#ifdef DEBUG_STRING
+      for(UInt ch = 0; ch < numValidComp; ch++)
+      {
+        DEBUG_STRING_APPEND(sDebug, debug_reorder_data_inter_token[ch])
+        DEBUG_STRING_APPEND(sDebug, sSplitString[ch])
+      }
+#endif
+    }
+    else
+    {
+      rdCost  += dSingleCost;
+      ruiBits += uiSingleBits;
+      ruiDist += uiSingleDist;
+
+      //restore state to unsplit
+
+      pcCU->setTrIdxSubParts( uiTrMode, uiAbsPartIdx, uiDepth );
+
+      for(UInt ch = 0; ch < numValidComp; ch++)
+      {
+        const ComponentID compID=ComponentID(ch);
+
+        DEBUG_STRING_APPEND(sDebug, debug_reorder_data_inter_token[ch])
+        if (rTu.ProcessComponentSection(compID))
+        {
+          DEBUG_STRING_APPEND(sDebug, sSingleStringComp[compID])
+
+          const Bool splitIntoSubTUs   = rTu.getRect(compID).width != rTu.getRect(compID).height;
+          const UInt numberOfSections  = splitIntoSubTUs ? 2 : 1;
+          const UInt partIdxesPerSubTU = rTu.GetAbsPartIdxNumParts(compID) >> (splitIntoSubTUs ? 1 : 0);
+
+          for (UInt subTUIndex = 0; subTUIndex < numberOfSections; subTUIndex++)
+          {
+            const UInt  uisubTUPartIdx = uiAbsPartIdx + (subTUIndex * partIdxesPerSubTU);
+
+            if (splitIntoSubTUs)
+            {
+              const UChar combinedCBF = (bestsubTUCBF[compID][subTUIndex] << subTUDepth) | (bestCBF[compID] << uiTrMode);
+              pcCU->setCbfPartRange(combinedCBF, compID, uisubTUPartIdx, partIdxesPerSubTU);
+            }
+            else
+            {
+              pcCU->setCbfPartRange((bestCBF[compID] << uiTrMode), compID, uisubTUPartIdx, partIdxesPerSubTU);
+            }
+
+            pcCU->setCrossComponentPredictionAlphaPartRange(bestCrossCPredictionAlpha[compID][subTUIndex], compID, uisubTUPartIdx, partIdxesPerSubTU);
+            pcCU->setTransformSkipPartRange(uiBestTransformMode[compID][subTUIndex], compID, uisubTUPartIdx, partIdxesPerSubTU);
+            pcCU->setExplicitRdpcmModePartRange(bestExplicitRdpcmModeUnSplit[compID][subTUIndex], compID, uisubTUPartIdx, partIdxesPerSubTU);
+          }
+        }
+      }
+
+      m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[ uiDepth ][ CI_QT_TRAFO_TEST ] );
+    }
+  }
+  else
+  {
+    rdCost  += dSingleCost;
+    ruiBits += uiSingleBits;
+    ruiDist += uiSingleDist;
+#ifdef DEBUG_STRING
+    for(UInt ch = 0; ch < numValidComp; ch++)
+    {
+      const ComponentID compID=ComponentID(ch);
+      DEBUG_STRING_APPEND(sDebug, debug_reorder_data_inter_token[compID])
+
+      if (rTu.ProcessComponentSection(compID))
+      {
+        DEBUG_STRING_APPEND(sDebug, sSingleStringComp[compID])
+      }
+    }
+#endif
+  }
+  DEBUG_STRING_APPEND(sDebug, debug_reorder_data_inter_token[MAX_NUM_COMPONENT])
+}
+
+
+
+Void TEncSearch::xEncodeResidualQT( const ComponentID compID, TComTU &rTu )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+  const UInt uiCurrTrMode = rTu.GetTransformDepthRel();
+  assert( pcCU->getDepth( 0 ) == pcCU->getDepth( uiAbsPartIdx ) );
+  const UInt uiTrMode = pcCU->getTransformIdx( uiAbsPartIdx );
+
+  const Bool bSubdiv = uiCurrTrMode != uiTrMode;
+
+  const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+
+  if (compID==MAX_NUM_COMPONENT)  // we are not processing a channel, instead we always recurse and code the CBFs
+  {
+    if( uiLog2TrSize <= pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() && uiLog2TrSize > pcCU->getQuadtreeTULog2MinSizeInCU(uiAbsPartIdx) )
+    {
+      m_pcEntropyCoder->encodeTransformSubdivFlag( bSubdiv, 5 - uiLog2TrSize );
+    }
+
+    assert( !pcCU->isIntra(uiAbsPartIdx) );
+
+    const Bool bFirstCbfOfCU = uiCurrTrMode == 0;
+
+    for (UInt ch=COMPONENT_Cb; ch<pcCU->getPic()->getNumberValidComponents(); ch++)
+    {
+      const ComponentID compIdInner=ComponentID(ch);
+      if( bFirstCbfOfCU || rTu.ProcessingAllQuadrants(compIdInner) )
+      {
+        if( bFirstCbfOfCU || pcCU->getCbf( uiAbsPartIdx, compIdInner, uiCurrTrMode - 1 ) )
+        {
+          m_pcEntropyCoder->encodeQtCbf( rTu, compIdInner, !bSubdiv );
+        }
+      }
+      else
+      {
+        assert( pcCU->getCbf( uiAbsPartIdx, compIdInner, uiCurrTrMode ) == pcCU->getCbf( uiAbsPartIdx, compIdInner, uiCurrTrMode - 1 ) );
+      }
+    }
+
+    if (!bSubdiv)
+    {
+      m_pcEntropyCoder->encodeQtCbf( rTu, COMPONENT_Y, true );
+    }
+  }
+
+  if( !bSubdiv )
+  {
+    if (compID != MAX_NUM_COMPONENT) // we have already coded the CBFs, so now we code coefficients
+    {
+      if (rTu.ProcessComponentSection(compID))
+      {
+        if (isChroma(compID) && (pcCU->getCbf(uiAbsPartIdx, COMPONENT_Y, uiTrMode) != 0))
+        {
+          m_pcEntropyCoder->encodeCrossComponentPrediction(rTu, compID);
+        }
+
+        if (pcCU->getCbf(uiAbsPartIdx, compID, uiTrMode) != 0)
+        {
+          const UInt uiQTTempAccessLayer = pcCU->getSlice()->getSPS()->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+          TCoeff *pcCoeffCurr = m_ppcQTTempCoeff[compID][uiQTTempAccessLayer] + rTu.getCoefficientOffset(compID);
+          m_pcEntropyCoder->encodeCoeffNxN( rTu, pcCoeffCurr, compID );
+        }
+      }
+    }
+  }
+  else
+  {
+    if( compID==MAX_NUM_COMPONENT || pcCU->getCbf( uiAbsPartIdx, compID, uiCurrTrMode ) )
+    {
+      TComTURecurse tuRecurseChild(rTu, false);
+      do
+      {
+        xEncodeResidualQT( compID, tuRecurseChild );
+      } while (tuRecurseChild.nextSection(rTu));
+    }
+  }
+}
+
+
+
+
+Void TEncSearch::xSetResidualQTData( TComYuv* pcResi, Bool bSpatial, TComTU &rTu )
+{
+  TComDataCU* pcCU=rTu.getCU();
+  const UInt uiCurrTrMode=rTu.GetTransformDepthRel();
+  const UInt uiAbsPartIdx=rTu.GetAbsPartIdxTU();
+  assert( pcCU->getDepth( 0 ) == pcCU->getDepth( uiAbsPartIdx ) );
+  const UInt uiTrMode = pcCU->getTransformIdx( uiAbsPartIdx );
+  TComSPS *sps=pcCU->getSlice()->getSPS();
+
+  if( uiCurrTrMode == uiTrMode )
+  {
+    const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
+    const UInt uiQTTempAccessLayer = sps->getQuadtreeTULog2MaxSize() - uiLog2TrSize;
+
+    if( bSpatial )
+    {
+      // Data to be copied is in the spatial domain, i.e., inverse-transformed.
+
+      for(UInt i=0; i<pcResi->getNumberValidComponents(); i++)
+      {
+        const ComponentID compID=ComponentID(i);
+        if (rTu.ProcessComponentSection(compID))
+        {
+          const TComRectangle &rectCompTU(rTu.getRect(compID));
+          m_pcQTTempTComYuv[uiQTTempAccessLayer].copyPartToPartComponentMxN    ( compID, pcResi, rectCompTU );
+        }
+      }
+    }
+    else
+    {
+      for (UInt ch=0; ch < getNumberValidComponents(sps->getChromaFormatIdc()); ch++)
+      {
+        const ComponentID compID   = ComponentID(ch);
+        if (rTu.ProcessComponentSection(compID))
+        {
+          const TComRectangle &rectCompTU(rTu.getRect(compID));
+          const UInt numCoeffInBlock    = rectCompTU.width * rectCompTU.height;
+          const UInt offset             = rTu.getCoefficientOffset(compID);
+          TCoeff* dest                  = pcCU->getCoeff(compID)                        + offset;
+          const TCoeff* src             = m_ppcQTTempCoeff[compID][uiQTTempAccessLayer] + offset;
+          ::memcpy( dest, src, sizeof(TCoeff)*numCoeffInBlock );
+
+#if ADAPTIVE_QP_SELECTION
+          TCoeff* pcArlCoeffSrc            = m_ppcQTTempArlCoeff[compID][uiQTTempAccessLayer] + offset;
+          TCoeff* pcArlCoeffDst            = pcCU->getArlCoeff(compID)                        + offset;
+          ::memcpy( pcArlCoeffDst, pcArlCoeffSrc, sizeof( TCoeff ) * numCoeffInBlock );
+#endif
+        }
+      }
+    }
+  }
+  else
+  {
+
+    TComTURecurse tuRecurseChild(rTu, false);
+    do
+    {
+      xSetResidualQTData( pcResi, bSpatial, tuRecurseChild );
+    } while (tuRecurseChild.nextSection(rTu));
+  }
+}
+
+
+
+
+UInt TEncSearch::xModeBitsIntra( TComDataCU* pcCU, UInt uiMode, UInt uiPartOffset, UInt uiDepth, UInt uiInitTrDepth, const ChannelType chType )
+{
+  // Reload only contexts required for coding intra mode information
+  m_pcRDGoOnSbacCoder->loadIntraDirMode( m_pppcRDSbacCoder[uiDepth][CI_CURR_BEST], chType );
+
+  // Temporarily set the intra dir being tested, and only
+  // for absPartIdx, since encodeIntraDirModeLuma/Chroma only use
+  // the entry at absPartIdx.
+
+  UChar &rIntraDirVal=pcCU->getIntraDir( chType )[uiPartOffset];
+  UChar origVal=rIntraDirVal;
+  rIntraDirVal = uiMode;
+  //pcCU->setIntraDirSubParts ( chType, uiMode, uiPartOffset, uiDepth + uiInitTrDepth );
+
+  m_pcEntropyCoder->resetBits();
+  if (isLuma(chType))
+    m_pcEntropyCoder->encodeIntraDirModeLuma ( pcCU, uiPartOffset);
+  else
+    m_pcEntropyCoder->encodeIntraDirModeChroma ( pcCU, uiPartOffset);
+
+  rIntraDirVal = origVal; // restore
+
+  return m_pcEntropyCoder->getNumberOfWrittenBits();
+}
+
+
+
+
+UInt TEncSearch::xUpdateCandList( UInt uiMode, Double uiCost, UInt uiFastCandNum, UInt * CandModeList, Double * CandCostList )
+{
+  UInt i;
+  UInt shift=0;
+
+  while ( shift<uiFastCandNum && uiCost<CandCostList[ uiFastCandNum-1-shift ] ) shift++;
+
+  if( shift!=0 )
+  {
+    for(i=1; i<shift; i++)
+    {
+      CandModeList[ uiFastCandNum-i ] = CandModeList[ uiFastCandNum-1-i ];
+      CandCostList[ uiFastCandNum-i ] = CandCostList[ uiFastCandNum-1-i ];
+    }
+    CandModeList[ uiFastCandNum-shift ] = uiMode;
+    CandCostList[ uiFastCandNum-shift ] = uiCost;
+    return 1;
+  }
+
+  return 0;
+}
+
+
+
+
+
+/** add inter-prediction syntax elements for a CU block
+ * \param pcCU
+ * \param uiQp
+ * \param uiTrMode
+ * \param ruiBits
+ * \returns Void
+ */
+Void  TEncSearch::xAddSymbolBitsInter( TComDataCU* pcCU, UInt uiQp, UInt uiTrMode, UInt& ruiBits )
+{
+  if(pcCU->getMergeFlag( 0 ) && pcCU->getPartitionSize( 0 ) == SIZE_2Nx2N && !pcCU->getQtRootCbf( 0 ))
+  {
+    pcCU->setSkipFlagSubParts( true, 0, pcCU->getDepth(0) );
+
+    m_pcEntropyCoder->resetBits();
+    if(pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+    {
+      m_pcEntropyCoder->encodeCUTransquantBypassFlag(pcCU, 0, true);
+    }
+    m_pcEntropyCoder->encodeSkipFlag(pcCU, 0, true);
+    m_pcEntropyCoder->encodeMergeIndex(pcCU, 0, true);
+
+    ruiBits += m_pcEntropyCoder->getNumberOfWrittenBits();
+  }
+  else
+  {
+    m_pcEntropyCoder->resetBits();
+
+    if(pcCU->getSlice()->getPPS()->getTransquantBypassEnableFlag())
+    {
+      m_pcEntropyCoder->encodeCUTransquantBypassFlag(pcCU, 0, true);
+    }
+
+    m_pcEntropyCoder->encodeSkipFlag ( pcCU, 0, true );
+    m_pcEntropyCoder->encodePredMode( pcCU, 0, true );
+    m_pcEntropyCoder->encodePartSize( pcCU, 0, pcCU->getDepth(0), true );
+    m_pcEntropyCoder->encodePredInfo( pcCU, 0 );
+
+    Bool codeDeltaQp = false;
+    Bool codeChromaQpAdj = false;
+    m_pcEntropyCoder->encodeCoeff   ( pcCU, 0, pcCU->getDepth(0), codeDeltaQp, codeChromaQpAdj );
+
+    ruiBits += m_pcEntropyCoder->getNumberOfWrittenBits();
+  }
+}
+
+
+
+
+
+/**
+ * \brief Generate half-sample interpolated block
+ *
+ * \param pattern Reference picture ROI
+ * \param biPred    Flag indicating whether block is for biprediction
+ */
+Void TEncSearch::xExtDIFUpSamplingH( TComPattern* pattern, Bool biPred )
+{
+  Int width      = pattern->getROIYWidth();
+  Int height     = pattern->getROIYHeight();
+  Int srcStride  = pattern->getPatternLStride();
+
+  Int intStride = m_filteredBlockTmp[0].getStride(COMPONENT_Y);
+  Int dstStride = m_filteredBlock[0][0].getStride(COMPONENT_Y);
+  Pel *intPtr;
+  Pel *dstPtr;
+  Int filterSize = NTAPS_LUMA;
+  Int halfFilterSize = (filterSize>>1);
+  Pel *srcPtr = pattern->getROIY() - halfFilterSize*srcStride - 1;
+
+  const ChromaFormat chFmt = m_filteredBlock[0][0].getChromaFormat();
+
+  m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[0].getAddr(COMPONENT_Y), intStride, width+1, height+filterSize, 0, false, chFmt);
+  m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[2].getAddr(COMPONENT_Y), intStride, width+1, height+filterSize, 2, false, chFmt);
+
+  intPtr = m_filteredBlockTmp[0].getAddr(COMPONENT_Y) + halfFilterSize * intStride + 1;
+  dstPtr = m_filteredBlock[0][0].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width+0, height+0, 0, false, true, chFmt);
+
+  intPtr = m_filteredBlockTmp[0].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride + 1;
+  dstPtr = m_filteredBlock[2][0].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width+0, height+1, 2, false, true, chFmt);
+
+  intPtr = m_filteredBlockTmp[2].getAddr(COMPONENT_Y) + halfFilterSize * intStride;
+  dstPtr = m_filteredBlock[0][2].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width+1, height+0, 0, false, true, chFmt);
+
+  intPtr = m_filteredBlockTmp[2].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+  dstPtr = m_filteredBlock[2][2].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width+1, height+1, 2, false, true, chFmt);
+}
+
+
+
+
+
+/**
+ * \brief Generate quarter-sample interpolated blocks
+ *
+ * \param pattern    Reference picture ROI
+ * \param halfPelRef Half-pel mv
+ * \param biPred     Flag indicating whether block is for biprediction
+ */
+Void TEncSearch::xExtDIFUpSamplingQ( TComPattern* pattern, TComMv halfPelRef, Bool biPred )
+{
+  Int width      = pattern->getROIYWidth();
+  Int height     = pattern->getROIYHeight();
+  Int srcStride  = pattern->getPatternLStride();
+
+  Pel *srcPtr;
+  Int intStride = m_filteredBlockTmp[0].getStride(COMPONENT_Y);
+  Int dstStride = m_filteredBlock[0][0].getStride(COMPONENT_Y);
+  Pel *intPtr;
+  Pel *dstPtr;
+  Int filterSize = NTAPS_LUMA;
+
+  Int halfFilterSize = (filterSize>>1);
+
+  Int extHeight = (halfPelRef.getVer() == 0) ? height + filterSize : height + filterSize-1;
+
+  const ChromaFormat chFmt = m_filteredBlock[0][0].getChromaFormat();
+
+  // Horizontal filter 1/4
+  srcPtr = pattern->getROIY() - halfFilterSize * srcStride - 1;
+  intPtr = m_filteredBlockTmp[1].getAddr(COMPONENT_Y);
+  if (halfPelRef.getVer() > 0)
+  {
+    srcPtr += srcStride;
+  }
+  if (halfPelRef.getHor() >= 0)
+  {
+    srcPtr += 1;
+  }
+  m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 1, false, chFmt);
+
+  // Horizontal filter 3/4
+  srcPtr = pattern->getROIY() - halfFilterSize*srcStride - 1;
+  intPtr = m_filteredBlockTmp[3].getAddr(COMPONENT_Y);
+  if (halfPelRef.getVer() > 0)
+  {
+    srcPtr += srcStride;
+  }
+  if (halfPelRef.getHor() > 0)
+  {
+    srcPtr += 1;
+  }
+  m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 3, false, chFmt);
+
+  // Generate @ 1,1
+  intPtr = m_filteredBlockTmp[1].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+  dstPtr = m_filteredBlock[1][1].getAddr(COMPONENT_Y);
+  if (halfPelRef.getVer() == 0)
+  {
+    intPtr += intStride;
+  }
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1, false, true, chFmt);
+
+  // Generate @ 3,1
+  intPtr = m_filteredBlockTmp[1].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+  dstPtr = m_filteredBlock[3][1].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3, false, true, chFmt);
+
+  if (halfPelRef.getVer() != 0)
+  {
+    // Generate @ 2,1
+    intPtr = m_filteredBlockTmp[1].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+    dstPtr = m_filteredBlock[2][1].getAddr(COMPONENT_Y);
+    if (halfPelRef.getVer() == 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2, false, true, chFmt);
+
+    // Generate @ 2,3
+    intPtr = m_filteredBlockTmp[3].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+    dstPtr = m_filteredBlock[2][3].getAddr(COMPONENT_Y);
+    if (halfPelRef.getVer() == 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2, false, true, chFmt);
+  }
+  else
+  {
+    // Generate @ 0,1
+    intPtr = m_filteredBlockTmp[1].getAddr(COMPONENT_Y) + halfFilterSize * intStride;
+    dstPtr = m_filteredBlock[0][1].getAddr(COMPONENT_Y);
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0, false, true, chFmt);
+
+    // Generate @ 0,3
+    intPtr = m_filteredBlockTmp[3].getAddr(COMPONENT_Y) + halfFilterSize * intStride;
+    dstPtr = m_filteredBlock[0][3].getAddr(COMPONENT_Y);
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0, false, true, chFmt);
+  }
+
+  if (halfPelRef.getHor() != 0)
+  {
+    // Generate @ 1,2
+    intPtr = m_filteredBlockTmp[2].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+    dstPtr = m_filteredBlock[1][2].getAddr(COMPONENT_Y);
+    if (halfPelRef.getHor() > 0)
+    {
+      intPtr += 1;
+    }
+    if (halfPelRef.getVer() >= 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1, false, true, chFmt);
+
+    // Generate @ 3,2
+    intPtr = m_filteredBlockTmp[2].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+    dstPtr = m_filteredBlock[3][2].getAddr(COMPONENT_Y);
+    if (halfPelRef.getHor() > 0)
+    {
+      intPtr += 1;
+    }
+    if (halfPelRef.getVer() > 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3, false, true, chFmt);
+  }
+  else
+  {
+    // Generate @ 1,0
+    intPtr = m_filteredBlockTmp[0].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride + 1;
+    dstPtr = m_filteredBlock[1][0].getAddr(COMPONENT_Y);
+    if (halfPelRef.getVer() >= 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1, false, true, chFmt);
+
+    // Generate @ 3,0
+    intPtr = m_filteredBlockTmp[0].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride + 1;
+    dstPtr = m_filteredBlock[3][0].getAddr(COMPONENT_Y);
+    if (halfPelRef.getVer() > 0)
+    {
+      intPtr += intStride;
+    }
+    m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3, false, true, chFmt);
+  }
+
+  // Generate @ 1,3
+  intPtr = m_filteredBlockTmp[3].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+  dstPtr = m_filteredBlock[1][3].getAddr(COMPONENT_Y);
+  if (halfPelRef.getVer() == 0)
+  {
+    intPtr += intStride;
+  }
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1, false, true, chFmt);
+
+  // Generate @ 3,3
+  intPtr = m_filteredBlockTmp[3].getAddr(COMPONENT_Y) + (halfFilterSize-1) * intStride;
+  dstPtr = m_filteredBlock[3][3].getAddr(COMPONENT_Y);
+  m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3, false, true, chFmt);
+}
+
+
+
+
+
+/** set wp tables
+ * \param TComDataCU* pcCU
+ * \param iRefIdx
+ * \param eRefPicListCur
+ * \returns Void
+ */
+Void  TEncSearch::setWpScalingDistParam( TComDataCU* pcCU, Int iRefIdx, RefPicList eRefPicListCur )
+{
+  if ( iRefIdx<0 )
+  {
+    m_cDistParam.bApplyWeight = false;
+    return;
+  }
+
+  TComSlice       *pcSlice  = pcCU->getSlice();
+  TComPPS         *pps      = pcCU->getSlice()->getPPS();
+  WPScalingParam  *wp0 , *wp1;
+
+  m_cDistParam.bApplyWeight = ( pcSlice->getSliceType()==P_SLICE && pps->getUseWP() ) || ( pcSlice->getSliceType()==B_SLICE && pps->getWPBiPred() ) ;
+
+  if ( !m_cDistParam.bApplyWeight ) return;
+
+  Int iRefIdx0 = ( eRefPicListCur == REF_PIC_LIST_0 ) ? iRefIdx : (-1);
+  Int iRefIdx1 = ( eRefPicListCur == REF_PIC_LIST_1 ) ? iRefIdx : (-1);
+
+  getWpScaling( pcCU, iRefIdx0, iRefIdx1, wp0 , wp1 );
+
+  if ( iRefIdx0 < 0 ) wp0 = NULL;
+  if ( iRefIdx1 < 0 ) wp1 = NULL;
+
+  m_cDistParam.wpCur  = NULL;
+
+  if ( eRefPicListCur == REF_PIC_LIST_0 )
+  {
+    m_cDistParam.wpCur = wp0;
+  }
+  else
+  {
+    m_cDistParam.wpCur = wp1;
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSearch.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,474 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSearch.h
+    \brief    encoder search class (header)
+*/
+
+#ifndef __TENCSEARCH__
+#define __TENCSEARCH__
+
+// Include files
+#include "TLibCommon/TComYuv.h"
+#include "TLibCommon/TComMotionInfo.h"
+#include "TLibCommon/TComPattern.h"
+#include "TLibCommon/TComPrediction.h"
+#include "TLibCommon/TComTrQuant.h"
+#include "TLibCommon/TComPic.h"
+#include "TLibCommon/TComRectangle.h"
+#include "TEncEntropy.h"
+#include "TEncSbac.h"
+#include "TEncCfg.h"
+
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncCu;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+static const UInt MAX_NUM_REF_LIST_ADAPT_SR=2;
+static const UInt MAX_IDX_ADAPT_SR=33;
+static const UInt NUM_MV_PREDICTORS=3;
+
+/// encoder search class
+class TEncSearch : public TComPrediction
+{
+private:
+  TCoeff**        m_ppcQTTempCoeff[MAX_NUM_COMPONENT /* 0->Y, 1->Cb, 2->Cr*/];
+  TCoeff*         m_pcQTTempCoeff[MAX_NUM_COMPONENT];
+#if ADAPTIVE_QP_SELECTION
+  TCoeff**        m_ppcQTTempArlCoeff[MAX_NUM_COMPONENT];
+  TCoeff*         m_pcQTTempArlCoeff[MAX_NUM_COMPONENT];
+#endif
+  UChar*          m_puhQTTempTrIdx;
+  UChar*          m_puhQTTempCbf[MAX_NUM_COMPONENT];
+
+  TComYuv*        m_pcQTTempTComYuv;
+  TComYuv         m_tmpYuvPred; // To be used in xGetInterPredictionError() to avoid constant memory allocation/deallocation
+
+  Char*           m_phQTTempCrossComponentPredictionAlpha[MAX_NUM_COMPONENT];
+  Pel*            m_pSharedPredTransformSkip[MAX_NUM_COMPONENT];
+  TCoeff*         m_pcQTTempTUCoeff[MAX_NUM_COMPONENT];
+  UChar*          m_puhQTTempTransformSkipFlag[MAX_NUM_COMPONENT];
+  TComYuv         m_pcQTTempTransformSkipTComYuv;
+#if ADAPTIVE_QP_SELECTION
+  TCoeff*         m_ppcQTTempTUArlCoeff[MAX_NUM_COMPONENT];
+#endif
+
+protected:
+  // interface to option
+  TEncCfg*        m_pcEncCfg;
+
+  // interface to classes
+  TComTrQuant*    m_pcTrQuant;
+  TComRdCost*     m_pcRdCost;
+  TEncEntropy*    m_pcEntropyCoder;
+
+  // ME parameters
+  Int             m_iSearchRange;
+  Int             m_bipredSearchRange; // Search range for bi-prediction
+  Int             m_iFastSearch;
+  Int             m_aaiAdaptSR[MAX_NUM_REF_LIST_ADAPT_SR][MAX_IDX_ADAPT_SR];
+  TComMv          m_cSrchRngLT;
+  TComMv          m_cSrchRngRB;
+  TComMv          m_acMvPredictors[NUM_MV_PREDICTORS]; // Left, Above, AboveRight. enum MVP_DIR first NUM_MV_PREDICTORS entries are suitable for accessing.
+
+  // RD computation
+  TEncSbac***     m_pppcRDSbacCoder;
+  TEncSbac*       m_pcRDGoOnSbacCoder;
+  DistParam       m_cDistParam;
+
+  // Misc.
+  Pel*            m_pTempPel;
+  const UInt*     m_puiDFilter;
+  Int             m_iMaxDeltaQP;
+
+  // AMVP cost computation
+  // UInt            m_auiMVPIdxCost[AMVP_MAX_NUM_CANDS+1][AMVP_MAX_NUM_CANDS];
+  UInt            m_auiMVPIdxCost[AMVP_MAX_NUM_CANDS+1][AMVP_MAX_NUM_CANDS+1]; //th array bounds
+
+  TComMv          m_integerMv2Nx2N[NUM_REF_PIC_LIST_01][MAX_NUM_REF];
+
+public:
+  TEncSearch();
+  virtual ~TEncSearch();
+
+  Void init(  TEncCfg*      pcEncCfg,
+            TComTrQuant*  pcTrQuant,
+            Int           iSearchRange,
+            Int           bipredSearchRange,
+            Int           iFastSearch,
+            Int           iMaxDeltaQP,
+            TEncEntropy*  pcEntropyCoder,
+            TComRdCost*   pcRdCost,
+            TEncSbac***   pppcRDSbacCoder,
+            TEncSbac*     pcRDGoOnSbacCoder );
+
+protected:
+
+  /// sub-function for motion vector refinement used in fractional-pel accuracy
+  Distortion  xPatternRefinement( TComPattern* pcPatternKey,
+                                  TComMv baseRefMv,
+                                  Int iFrac, TComMv& rcMvFrac, Bool bAllowUseOfHadamard
+                                 );
+
+  typedef struct
+  {
+    Pel*        piRefY;
+    Int         iYStride;
+    Int         iBestX;
+    Int         iBestY;
+    UInt        uiBestRound;
+    UInt        uiBestDistance;
+    Distortion  uiBestSad;
+    UChar       ucPointNr;
+  } IntTZSearchStruct;
+
+  // sub-functions for ME
+  __inline Void xTZSearchHelp         ( TComPattern* pcPatternKey, IntTZSearchStruct& rcStruct, const Int iSearchX, const Int iSearchY, const UChar ucPointNr, const UInt uiDistance );
+  __inline Void xTZ2PointSearch       ( TComPattern* pcPatternKey, IntTZSearchStruct& rcStrukt, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB );
+  __inline Void xTZ8PointSquareSearch ( TComPattern* pcPatternKey, IntTZSearchStruct& rcStrukt, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB, const Int iStartX, const Int iStartY, const Int iDist );
+  __inline Void xTZ8PointDiamondSearch( TComPattern* pcPatternKey, IntTZSearchStruct& rcStrukt, TComMv* pcMvSrchRngLT, TComMv* pcMvSrchRngRB, const Int iStartX, const Int iStartY, const Int iDist );
+
+  Void xGetInterPredictionError( TComDataCU* pcCU, TComYuv* pcYuvOrg, Int iPartIdx, Distortion& ruiSAD, Bool Hadamard );
+
+public:
+  Void  preestChromaPredMode    ( TComDataCU* pcCU,
+                                  TComYuv*    pcOrgYuv,
+                                  TComYuv*    pcPredYuv );
+
+  Void  estIntraPredQT          ( TComDataCU* pcCU,
+                                  TComYuv*    pcOrgYuv,
+                                  TComYuv*    pcPredYuv,
+                                  TComYuv*    pcResiYuv,
+                                  TComYuv*    pcRecoYuv,
+                                  Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                  Distortion& ruiDistC,
+                                  Bool        bLumaOnly
+                                  DEBUG_STRING_FN_DECLARE(sDebug));
+
+  Void  estIntraPredChromaQT    ( TComDataCU* pcCU,
+                                  TComYuv*    pcOrgYuv,
+                                  TComYuv*    pcPredYuv,
+                                  TComYuv*    pcResiYuv,
+                                  TComYuv*    pcRecoYuv,
+                                  Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                  Distortion  uiPreCalcDistC
+                                  DEBUG_STRING_FN_DECLARE(sDebug));
+
+  /// encoder estimation - inter prediction (non-skip)
+  Void predInterSearch          ( TComDataCU* pcCU,
+                                  TComYuv*    pcOrgYuv,
+                                  TComYuv*    pcPredYuv,
+                                  TComYuv*    pcResiYuv,
+                                  TComYuv*    pcRecoYuv
+                                  DEBUG_STRING_FN_DECLARE(sDebug),
+                                  Bool        bUseRes = false
+#if AMP_MRG
+                                 ,Bool        bUseMRG = false
+#endif
+                                );
+
+  /// encode residual and compute rd-cost for inter mode
+  Void encodeResAndCalcRdInterCU( TComDataCU* pcCU,
+                                  TComYuv*    pcYuvOrg,
+                                  TComYuv*    pcYuvPred,
+                                  TComYuv*    pcYuvResi,
+                                  TComYuv*    pcYuvResiBest,
+                                  TComYuv*    pcYuvRec,
+                                  Bool        bSkipRes
+                                  DEBUG_STRING_FN_DECLARE(sDebug) );
+
+  /// set ME search range
+  Void setAdaptiveSearchRange   ( Int iDir, Int iRefIdx, Int iSearchRange) { assert(iDir < MAX_NUM_REF_LIST_ADAPT_SR && iRefIdx<Int(MAX_IDX_ADAPT_SR)); m_aaiAdaptSR[iDir][iRefIdx] = iSearchRange; }
+
+  Void xEncPCM    (TComDataCU* pcCU, UInt uiAbsPartIdx, Pel* piOrg, Pel* piPCM, Pel* piPred, Pel* piResi, Pel* piReco, UInt uiStride, UInt uiWidth, UInt uiHeight, const ComponentID compID );
+  Void IPCMSearch (TComDataCU* pcCU, TComYuv* pcOrgYuv, TComYuv* rpcPredYuv, TComYuv* rpcResiYuv, TComYuv* rpcRecoYuv );
+protected:
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // Intra search
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void  xEncSubdivCbfQT           ( TComTU      &rTu,
+                                    Bool         bLuma,
+                                    Bool         bChroma );
+
+  Void  xEncCoeffQT               ( TComTU &rTu,
+                                    ComponentID  component,
+                                    Bool         bRealCoeff );
+  Void  xEncIntraHeader           ( TComDataCU*  pcCU,
+                                    UInt         uiTrDepth,
+                                    UInt         uiAbsPartIdx,
+                                    Bool         bLuma,
+                                    Bool         bChroma );
+  UInt  xGetIntraBitsQT           ( TComTU &rTu,
+                                    Bool         bLuma,
+                                    Bool         bChroma,
+                                    Bool         bRealCoeff );
+
+  UInt  xGetIntraBitsQTChroma    ( TComTU &rTu,
+                                   ComponentID compID,
+                                   Bool          bRealCoeff );
+
+  Void  xIntraCodingTUBlock       (       TComYuv*      pcOrgYuv,
+                                          TComYuv*      pcPredYuv,
+                                          TComYuv*      pcResiYuv,
+                                          Pel           resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                    const Bool          checkCrossCPrediction,
+                                          Distortion&   ruiDist,
+                                    const ComponentID   compID,
+                                          TComTU        &rTu
+                                    DEBUG_STRING_FN_DECLARE(sTest)
+                                         ,Int           default0Save1Load2 = 0
+                                   );
+
+  Void  xRecurIntraCodingQT       ( Bool        bLumaOnly,
+                                    TComYuv*    pcOrgYuv,
+                                    TComYuv*    pcPredYuv,
+                                    TComYuv*    pcResiYuv,
+                                    Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                    Distortion& ruiDistY,
+                                    Distortion& ruiDistC,
+#if HHI_RQT_INTRA_SPEEDUP
+                                   Bool         bCheckFirst,
+#endif
+                                   Double&      dRDCost,
+                                   TComTU      &rTu
+                                   DEBUG_STRING_FN_DECLARE(sDebug));
+
+  Void  xSetIntraResultQT         ( Bool         bLumaOnly,
+                                    TComYuv*     pcRecoYuv,
+                                    TComTU &rTu);
+
+  Void xStoreCrossComponentPredictionResult  (       Pel    *pResiLuma,
+                                               const Pel    *pBestLuma,
+                                                     TComTU &rTu,
+                                               const Int     xOffset,
+                                               const Int     yOffset,
+                                               const Int     strideResi,
+                                               const Int     strideBest );
+
+  Char xCalcCrossComponentPredictionAlpha    (       TComTU &rTu,
+                                               const ComponentID compID,
+                                               const Pel*        piResiL,
+                                               const Pel*        piResiC,
+                                               const Int         width,
+                                               const Int         height,
+                                               const Int         strideL,
+                                               const Int         strideC );
+
+  Void  xRecurIntraChromaCodingQT ( TComYuv*    pcOrgYuv,
+                                    TComYuv*    pcPredYuv,
+                                    TComYuv*    pcResiYuv,
+                                    Pel         resiLuma[NUMBER_OF_STORED_RESIDUAL_TYPES][MAX_CU_SIZE * MAX_CU_SIZE],
+                                    Distortion& ruiDist,
+                                    TComTU      &rTu
+                                    DEBUG_STRING_FN_DECLARE(sDebug));
+
+  Void  xSetIntraResultChromaQT   ( TComYuv*    pcRecoYuv, TComTU &rTu);
+
+  Void  xStoreIntraResultQT       ( const ComponentID first, const ComponentID lastIncl, TComTU &rTu);
+  Void  xLoadIntraResultQT        ( const ComponentID first, const ComponentID lastIncl, TComTU &rTu);
+
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // Inter search (AMP)
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void xEstimateMvPredAMVP        ( TComDataCU* pcCU,
+                                    TComYuv*    pcOrgYuv,
+                                    UInt        uiPartIdx,
+                                    RefPicList  eRefPicList,
+                                    Int         iRefIdx,
+                                    TComMv&     rcMvPred,
+                                    Bool        bFilled = false
+                                  , Distortion* puiDistBiP = NULL
+                                     );
+
+  Void xCheckBestMVP              ( TComDataCU* pcCU,
+                                    RefPicList  eRefPicList,
+                                    TComMv      cMv,
+                                    TComMv&     rcMvPred,
+                                    Int&        riMVPIdx,
+                                    UInt&       ruiBits,
+                                    Distortion& ruiCost );
+
+  Distortion xGetTemplateCost    ( TComDataCU*  pcCU,
+                                    UInt        uiPartIdx,
+                                    UInt        uiPartAddr,
+                                    TComYuv*    pcOrgYuv,
+                                    TComYuv*    pcTemplateCand,
+                                    TComMv      cMvCand,
+                                    Int         iMVPIdx,
+                                    Int         iMVPNum,
+                                    RefPicList  eRefPicList,
+                                    Int         iRefIdx,
+                                    Int         iSizeX,
+                                    Int         iSizeY
+                                   );
+
+
+  Void xCopyAMVPInfo              ( AMVPInfo*   pSrc, AMVPInfo* pDst );
+  UInt xGetMvpIdxBits             ( Int iIdx, Int iNum );
+  Void xGetBlkBits                ( PartSize  eCUMode, Bool bPSlice, Int iPartIdx,  UInt uiLastMode, UInt uiBlkBit[3]);
+
+  Void xMergeEstimation           ( TComDataCU*  pcCU,
+                                    TComYuv*     pcYuvOrg,
+                                    Int          iPartIdx,
+                                    UInt&        uiInterDir,
+                                    TComMvField* pacMvField,
+                                    UInt&        uiMergeIndex,
+                                    Distortion&  ruiCost,
+                                    TComMvField* cMvFieldNeighbours,
+                                    UChar*       uhInterDirNeighbours,
+                                    Int&         numValidMergeCand
+                                   );
+
+  Void xRestrictBipredMergeCand   ( TComDataCU*     pcCU,
+                                    UInt            puIdx,
+                                    TComMvField*    mvFieldNeighbours,
+                                    UChar*          interDirNeighbours,
+                                    Int             numValidMergeCand );
+
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // motion estimation
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void xMotionEstimation          ( TComDataCU*  pcCU,
+                                    TComYuv*     pcYuvOrg,
+                                    Int          iPartIdx,
+                                    RefPicList   eRefPicList,
+                                    TComMv*      pcMvPred,
+                                    Int          iRefIdxPred,
+                                    TComMv&      rcMv,
+                                    UInt&        ruiBits,
+                                    Distortion&  ruiCost,
+                                    Bool         bBi = false  );
+
+  Void xTZSearch                  ( TComDataCU*  pcCU,
+                                    TComPattern* pcPatternKey,
+                                    Pel*         piRefY,
+                                    Int          iRefStride,
+                                    TComMv*      pcMvSrchRngLT,
+                                    TComMv*      pcMvSrchRngRB,
+                                    TComMv&      rcMv,
+                                    Distortion&  ruiSAD,
+                                    const TComMv *pIntegerMv2Nx2NPred
+                                    );
+
+  Void xTZSearchSelective         ( TComDataCU*  pcCU,
+                                    TComPattern* pcPatternKey,
+                                    Pel*         piRefY,
+                                    Int          iRefStride,
+                                    TComMv*      pcMvSrchRngLT,
+                                    TComMv*      pcMvSrchRngRB,
+                                    TComMv&      rcMv,
+                                    Distortion&  ruiSAD,
+                                    const TComMv *pIntegerMv2Nx2NPred
+                                    );
+
+  Void xSetSearchRange            ( TComDataCU*  pcCU,
+                                    TComMv&      cMvPred,
+                                    Int          iSrchRng,
+                                    TComMv&      rcMvSrchRngLT,
+                                    TComMv&      rcMvSrchRngRB );
+
+  Void xPatternSearchFast         ( TComDataCU*  pcCU,
+                                    TComPattern* pcPatternKey,
+                                    Pel*         piRefY,
+                                    Int          iRefStride,
+                                    TComMv*      pcMvSrchRngLT,
+                                    TComMv*      pcMvSrchRngRB,
+                                    TComMv&      rcMv,
+                                    Distortion&  ruiSAD,
+                                    const TComMv* pIntegerMv2Nx2NPred
+                                  );
+
+  Void xPatternSearch             ( TComPattern* pcPatternKey,
+                                    Pel*         piRefY,
+                                    Int          iRefStride,
+                                    TComMv*      pcMvSrchRngLT,
+                                    TComMv*      pcMvSrchRngRB,
+                                    TComMv&      rcMv,
+                                    Distortion&  ruiSAD );
+
+  Void xPatternSearchFracDIF      (
+                                    Bool         bIsLosslessCoded,
+                                    TComPattern* pcPatternKey,
+                                    Pel*         piRefY,
+                                    Int          iRefStride,
+                                    TComMv*      pcMvInt,
+                                    TComMv&      rcMvHalf,
+                                    TComMv&      rcMvQter,
+                                    Distortion&  ruiCost,
+                                    Bool         biPred
+                                   );
+
+  Void xExtDIFUpSamplingH( TComPattern* pcPattern, Bool biPred  );
+  Void xExtDIFUpSamplingQ( TComPattern* pcPatternKey, TComMv halfPelRef, Bool biPred );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // T & Q & Q-1 & T-1
+  // -------------------------------------------------------------------------------------------------------------------
+
+
+  Void xEncodeResidualQT( const ComponentID compID, TComTU &rTu );
+  Void xEstimateResidualQT( TComYuv* pcResi, Double &rdCost, UInt &ruiBits, Distortion &ruiDist, Distortion *puiZeroDist, TComTU &rTu DEBUG_STRING_FN_DECLARE(sDebug) );
+  Void xSetResidualQTData( TComYuv* pcResi, Bool bSpatial, TComTU &rTu  );
+
+  UInt  xModeBitsIntra ( TComDataCU* pcCU, UInt uiMode, UInt uiPartOffset, UInt uiDepth, UInt uiInitTrDepth, const ChannelType compID );
+  UInt  xUpdateCandList( UInt uiMode, Double uiCost, UInt uiFastCandNum, UInt * CandModeList, Double * CandCostList );
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // compute symbol bits
+  // -------------------------------------------------------------------------------------------------------------------
+
+  Void xAddSymbolBitsInter       ( TComDataCU*   pcCU,
+                                   UInt          uiQp,
+                                   UInt          uiTrMode,
+                                   UInt&         ruiBits);
+
+  Void  setWpScalingDistParam( TComDataCU* pcCU, Int iRefIdx, RefPicList eRefPicListCur );
+  inline  Void  setDistParamComp( ComponentID compIdx )  { m_cDistParam.compIdx = compIdx; }
+
+};// END CLASS DEFINITION TEncSearch
+
+//! \}
+
+#endif // __TENCSEARCH__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSlice.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1224 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSlice.cpp
+    \brief    slice encoder class
+*/
+
+#include "TEncTop.h"
+#include "TEncSlice.h"
+#include <math.h>
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TEncSlice::TEncSlice()
+{
+  m_apcPicYuvPred = NULL;
+  m_apcPicYuvResi = NULL;
+
+  m_pdRdPicLambda = NULL;
+  m_pdRdPicQp     = NULL;
+  m_piRdPicQp     = NULL;
+}
+
+TEncSlice::~TEncSlice()
+{
+}
+
+Void TEncSlice::create( Int iWidth, Int iHeight, ChromaFormat chromaFormat, UInt iMaxCUWidth, UInt iMaxCUHeight, UChar uhTotalDepth )
+{
+  // create prediction picture
+  if ( m_apcPicYuvPred == NULL )
+  {
+    m_apcPicYuvPred  = new TComPicYuv;
+    m_apcPicYuvPred->create( iWidth, iHeight, chromaFormat, iMaxCUWidth, iMaxCUHeight, uhTotalDepth );
+  }
+
+  // create residual picture
+  if( m_apcPicYuvResi == NULL )
+  {
+    m_apcPicYuvResi  = new TComPicYuv;
+    m_apcPicYuvResi->create( iWidth, iHeight, chromaFormat, iMaxCUWidth, iMaxCUHeight, uhTotalDepth );
+  }
+}
+
+Void TEncSlice::destroy()
+{
+  // destroy prediction picture
+  if ( m_apcPicYuvPred )
+  {
+    m_apcPicYuvPred->destroy();
+    delete m_apcPicYuvPred;
+    m_apcPicYuvPred  = NULL;
+  }
+
+  // destroy residual picture
+  if ( m_apcPicYuvResi )
+  {
+    m_apcPicYuvResi->destroy();
+    delete m_apcPicYuvResi;
+    m_apcPicYuvResi  = NULL;
+  }
+
+  // free lambda and QP arrays
+  if ( m_pdRdPicLambda ) { xFree( m_pdRdPicLambda ); m_pdRdPicLambda = NULL; }
+  if ( m_pdRdPicQp     ) { xFree( m_pdRdPicQp     ); m_pdRdPicQp     = NULL; }
+  if ( m_piRdPicQp     ) { xFree( m_piRdPicQp     ); m_piRdPicQp     = NULL; }
+}
+
+Void TEncSlice::init( TEncTop* pcEncTop )
+{
+  m_pcCfg             = pcEncTop;
+  m_pcListPic         = pcEncTop->getListPic();
+
+  m_pcGOPEncoder      = pcEncTop->getGOPEncoder();
+  m_pcCuEncoder       = pcEncTop->getCuEncoder();
+  m_pcPredSearch      = pcEncTop->getPredSearch();
+
+  m_pcEntropyCoder    = pcEncTop->getEntropyCoder();
+  m_pcSbacCoder       = pcEncTop->getSbacCoder();
+  m_pcBinCABAC        = pcEncTop->getBinCABAC();
+  m_pcTrQuant         = pcEncTop->getTrQuant();
+
+  m_pcRdCost          = pcEncTop->getRdCost();
+  m_pppcRDSbacCoder   = pcEncTop->getRDSbacCoder();
+  m_pcRDGoOnSbacCoder = pcEncTop->getRDGoOnSbacCoder();
+
+  // create lambda and QP arrays
+  m_pdRdPicLambda     = (Double*)xMalloc( Double, m_pcCfg->getDeltaQpRD() * 2 + 1 );
+  m_pdRdPicQp         = (Double*)xMalloc( Double, m_pcCfg->getDeltaQpRD() * 2 + 1 );
+  m_piRdPicQp         = (Int*   )xMalloc( Int,    m_pcCfg->getDeltaQpRD() * 2 + 1 );
+  m_pcRateCtrl        = pcEncTop->getRateCtrl();
+}
+
+
+
+Void
+TEncSlice::setUpLambda(TComSlice* slice, const Double dLambda, Int iQP)
+{
+  // store lambda
+  m_pcRdCost ->setLambda( dLambda );
+
+  // for RDO
+  // in RdCost there is only one lambda because the luma and chroma bits are not separated, instead we weight the distortion of chroma.
+  Double dLambdas[MAX_NUM_COMPONENT] = { dLambda };
+  for(UInt compIdx=1; compIdx<MAX_NUM_COMPONENT; compIdx++)
+  {
+    const ComponentID compID=ComponentID(compIdx);
+    Int chromaQPOffset = slice->getPPS()->getQpOffset(compID) + slice->getSliceChromaQpDelta(compID);
+    Int qpc=(iQP + chromaQPOffset < 0) ? iQP : getScaledChromaQP(iQP + chromaQPOffset, m_pcCfg->getChromaFormatIdc());
+    Double tmpWeight = pow( 2.0, (iQP-qpc)/3.0 );  // takes into account of the chroma qp mapping and chroma qp Offset
+    m_pcRdCost->setDistortionWeight(compID, tmpWeight);
+    dLambdas[compIdx]=dLambda/tmpWeight;
+  }
+
+#if RDOQ_CHROMA_LAMBDA
+// for RDOQ
+  m_pcTrQuant->setLambdas( dLambdas );
+#else
+  m_pcTrQuant->setLambda( dLambda );
+#endif
+
+// For SAO
+  slice   ->setLambdas( dLambdas );
+}
+
+
+
+/**
+ - non-referenced frame marking
+ - QP computation based on temporal structure
+ - lambda computation based on QP
+ - set temporal layer ID and the parameter sets
+ .
+ \param pcPic         picture class
+ \param pocLast      POC of last picture
+ \param pocCurr     current POC
+ \param iNumPicRcvd   number of received pictures
+ \param iTimeOffset   POC offset for hierarchical structure
+ \param iDepth        temporal layer depth
+ \param rpcSlice      slice header class
+ \param pSPS          SPS associated with the slice
+ \param pPPS          PPS associated with the slice
+ */
+
+Void TEncSlice::initEncSlice( TComPic* pcPic, Int pocLast, Int pocCurr, Int iNumPicRcvd, Int iGOPid, TComSlice*& rpcSlice, TComSPS* pSPS, TComPPS *pPPS, Bool isField )
+{
+  Double dQP;
+  Double dLambda;
+
+  rpcSlice = pcPic->getSlice(0);
+  rpcSlice->setSPS( pSPS );
+  rpcSlice->setPPS( pPPS );
+  rpcSlice->setSliceBits(0);
+  rpcSlice->setPic( pcPic );
+  rpcSlice->initSlice();
+  rpcSlice->setPicOutputFlag( true );
+  rpcSlice->setPOC( pocCurr );
+
+  // depth computation based on GOP size
+  Int depth;
+  {
+    Int poc = rpcSlice->getPOC();
+    if(isField)
+    {
+      poc = (poc/2) % (m_pcCfg->getGOPSize()/2);
+    }
+    else
+    {
+      poc = poc % m_pcCfg->getGOPSize();   
+    }
+
+    if ( poc == 0 )
+    {
+      depth = 0;
+    }
+    else
+    {
+      Int step = m_pcCfg->getGOPSize();
+      depth    = 0;
+      for( Int i=step>>1; i>=1; i>>=1 )
+      {
+        for ( Int j=i; j<m_pcCfg->getGOPSize(); j+=step )
+        {
+          if ( j == poc )
+          {
+            i=0;
+            break;
+          }
+        }
+        step >>= 1;
+        depth++;
+      }
+    }
+
+#if HARMONIZE_GOP_FIRST_FIELD_COUPLE
+    if(poc != 0)
+    {
+#endif
+      if (isField && ((rpcSlice->getPOC() % 2) == 1))
+      {
+        depth ++;
+      }
+#if HARMONIZE_GOP_FIRST_FIELD_COUPLE
+    }
+#endif
+  }
+
+  // slice type
+  SliceType eSliceType;
+
+  eSliceType=B_SLICE;
+#if EFFICIENT_FIELD_IRAP
+  if(!(isField && pocLast == 1))
+  {
+#endif // EFFICIENT_FIELD_IRAP
+#if ALLOW_RECOVERY_POINT_AS_RAP
+    if(m_pcCfg->getDecodingRefreshType() == 3)
+    {
+      eSliceType = (pocLast == 0 || pocCurr % m_pcCfg->getIntraPeriod() == 0             || m_pcGOPEncoder->getGOPSize() == 0) ? I_SLICE : eSliceType;
+    }
+    else
+    {
+#endif
+      eSliceType = (pocLast == 0 || (pocCurr - (isField ? 1 : 0)) % m_pcCfg->getIntraPeriod() == 0 || m_pcGOPEncoder->getGOPSize() == 0) ? I_SLICE : eSliceType;
+#if ALLOW_RECOVERY_POINT_AS_RAP
+    }
+#endif
+#if EFFICIENT_FIELD_IRAP
+  }
+#endif
+
+  rpcSlice->setSliceType    ( eSliceType );
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // Non-referenced frame marking
+  // ------------------------------------------------------------------------------------------------------------------
+
+  if(pocLast == 0)
+  {
+    rpcSlice->setTemporalLayerNonReferenceFlag(false);
+  }
+  else
+  {
+    rpcSlice->setTemporalLayerNonReferenceFlag(!m_pcCfg->getGOPEntry(iGOPid).m_refPic);
+  }
+  rpcSlice->setReferenced(true);
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // QP setting
+  // ------------------------------------------------------------------------------------------------------------------
+
+  dQP = m_pcCfg->getQP();
+  if(eSliceType!=I_SLICE)
+  {
+    if (!(( m_pcCfg->getMaxDeltaQP() == 0 ) && (dQP == -rpcSlice->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA) ) && (rpcSlice->getPPS()->getTransquantBypassEnableFlag())))
+    {
+      dQP += m_pcCfg->getGOPEntry(iGOPid).m_QPOffset;
+    }
+  }
+
+  // modify QP
+  Int* pdQPs = m_pcCfg->getdQPs();
+  if ( pdQPs )
+  {
+    dQP += pdQPs[ rpcSlice->getPOC() ];
+  }
+
+  if (m_pcCfg->getCostMode()==COST_LOSSLESS_CODING)
+  {
+    dQP=LOSSLESS_AND_MIXED_LOSSLESS_RD_COST_TEST_QP;
+    m_pcCfg->setDeltaQpRD(0);
+  }
+
+  // ------------------------------------------------------------------------------------------------------------------
+  // Lambda computation
+  // ------------------------------------------------------------------------------------------------------------------
+
+  Int iQP;
+  Double dOrigQP = dQP;
+
+  // pre-compute lambda and QP values for all possible QP candidates
+  for ( Int iDQpIdx = 0; iDQpIdx < 2 * m_pcCfg->getDeltaQpRD() + 1; iDQpIdx++ )
+  {
+    // compute QP value
+    dQP = dOrigQP + ((iDQpIdx+1)>>1)*(iDQpIdx%2 ? -1 : 1);
+
+    // compute lambda value
+    Int    NumberBFrames = ( m_pcCfg->getGOPSize() - 1 );
+    Int    SHIFT_QP = 12;
+
+    Double dLambda_scale = 1.0 - Clip3( 0.0, 0.5, 0.05*(Double)(isField ? NumberBFrames/2 : NumberBFrames) );
+
+#if FULL_NBIT
+    Int    bitdepth_luma_qp_scale = 6 * (g_bitDepth[CHANNEL_TYPE_LUMA] - 8);
+#else
+    Int    bitdepth_luma_qp_scale = 0;
+#endif
+    Double qp_temp = (Double) dQP + bitdepth_luma_qp_scale - SHIFT_QP;
+#if FULL_NBIT
+    Double qp_temp_orig = (Double) dQP - SHIFT_QP;
+#endif
+    // Case #1: I or P-slices (key-frame)
+    Double dQPFactor = m_pcCfg->getGOPEntry(iGOPid).m_QPFactor;
+    if ( eSliceType==I_SLICE )
+    {
+      dQPFactor=0.57*dLambda_scale;
+    }
+    dLambda = dQPFactor*pow( 2.0, qp_temp/3.0 );
+
+    if ( depth>0 )
+    {
+#if FULL_NBIT
+        dLambda *= Clip3( 2.00, 4.00, (qp_temp_orig / 6.0) ); // (j == B_SLICE && p_cur_frm->layer != 0 )
+#else
+        dLambda *= Clip3( 2.00, 4.00, (qp_temp / 6.0) ); // (j == B_SLICE && p_cur_frm->layer != 0 )
+#endif
+    }
+
+    // if hadamard is used in ME process
+    if ( !m_pcCfg->getUseHADME() && rpcSlice->getSliceType( ) != I_SLICE )
+    {
+      dLambda *= 0.95;
+    }
+
+    iQP = max( -pSPS->getQpBDOffset(CHANNEL_TYPE_LUMA), min( MAX_QP, (Int) floor( dQP + 0.5 ) ) );
+
+    m_pdRdPicLambda[iDQpIdx] = dLambda;
+    m_pdRdPicQp    [iDQpIdx] = dQP;
+    m_piRdPicQp    [iDQpIdx] = iQP;
+  }
+
+  // obtain dQP = 0 case
+  dLambda = m_pdRdPicLambda[0];
+  dQP     = m_pdRdPicQp    [0];
+  iQP     = m_piRdPicQp    [0];
+
+  if( rpcSlice->getSliceType( ) != I_SLICE )
+  {
+    dLambda *= m_pcCfg->getLambdaModifier( m_pcCfg->getGOPEntry(iGOPid).m_temporalId );
+  }
+
+  setUpLambda(rpcSlice, dLambda, iQP);
+
+#if HB_LAMBDA_FOR_LDC
+  // restore original slice type
+
+#if EFFICIENT_FIELD_IRAP
+  if(!(isField && pocLast == 1))
+  {
+#endif // EFFICIENT_FIELD_IRAP
+#if ALLOW_RECOVERY_POINT_AS_RAP
+    if(m_pcCfg->getDecodingRefreshType() == 3)
+    {
+      eSliceType = (pocLast == 0 || (pocCurr)                     % m_pcCfg->getIntraPeriod() == 0 || m_pcGOPEncoder->getGOPSize() == 0) ? I_SLICE : eSliceType;
+    }
+    else
+    {
+#endif
+      eSliceType = (pocLast == 0 || (pocCurr - (isField ? 1 : 0)) % m_pcCfg->getIntraPeriod() == 0 || m_pcGOPEncoder->getGOPSize() == 0) ? I_SLICE : eSliceType;
+#if ALLOW_RECOVERY_POINT_AS_RAP
+    }
+#endif
+#if EFFICIENT_FIELD_IRAP
+  }
+#endif // EFFICIENT_FIELD_IRAP
+
+  rpcSlice->setSliceType        ( eSliceType );
+#endif
+
+  if (m_pcCfg->getUseRecalculateQPAccordingToLambda())
+  {
+    dQP = xGetQPValueAccordingToLambda( dLambda );
+    iQP = max( -pSPS->getQpBDOffset(CHANNEL_TYPE_LUMA), min( MAX_QP, (Int) floor( dQP + 0.5 ) ) );
+  }
+
+  rpcSlice->setSliceQp           ( iQP );
+#if ADAPTIVE_QP_SELECTION
+  rpcSlice->setSliceQpBase       ( iQP );
+#endif
+  rpcSlice->setSliceQpDelta      ( 0 );
+  rpcSlice->setSliceChromaQpDelta( COMPONENT_Cb, 0 );
+  rpcSlice->setSliceChromaQpDelta( COMPONENT_Cr, 0 );
+  rpcSlice->setUseChromaQpAdj( pPPS->getChromaQpAdjTableSize() > 0 );
+  rpcSlice->setNumRefIdx(REF_PIC_LIST_0,m_pcCfg->getGOPEntry(iGOPid).m_numRefPicsActive);
+  rpcSlice->setNumRefIdx(REF_PIC_LIST_1,m_pcCfg->getGOPEntry(iGOPid).m_numRefPicsActive);
+
+  if ( m_pcCfg->getDeblockingFilterMetric() )
+  {
+    rpcSlice->setDeblockingFilterOverrideFlag(true);
+    rpcSlice->setDeblockingFilterDisable(false);
+    rpcSlice->setDeblockingFilterBetaOffsetDiv2( 0 );
+    rpcSlice->setDeblockingFilterTcOffsetDiv2( 0 );
+  }
+  else if (rpcSlice->getPPS()->getDeblockingFilterControlPresentFlag())
+  {
+    rpcSlice->getPPS()->setDeblockingFilterOverrideEnabledFlag( !m_pcCfg->getLoopFilterOffsetInPPS() );
+    rpcSlice->setDeblockingFilterOverrideFlag( !m_pcCfg->getLoopFilterOffsetInPPS() );
+    rpcSlice->getPPS()->setPicDisableDeblockingFilterFlag( m_pcCfg->getLoopFilterDisable() );
+    rpcSlice->setDeblockingFilterDisable( m_pcCfg->getLoopFilterDisable() );
+    if ( !rpcSlice->getDeblockingFilterDisable())
+    {
+      if ( !m_pcCfg->getLoopFilterOffsetInPPS() && eSliceType!=I_SLICE)
+      {
+        rpcSlice->getPPS()->setDeblockingFilterBetaOffsetDiv2( m_pcCfg->getGOPEntry(iGOPid).m_betaOffsetDiv2 + m_pcCfg->getLoopFilterBetaOffset() );
+        rpcSlice->getPPS()->setDeblockingFilterTcOffsetDiv2( m_pcCfg->getGOPEntry(iGOPid).m_tcOffsetDiv2 + m_pcCfg->getLoopFilterTcOffset() );
+        rpcSlice->setDeblockingFilterBetaOffsetDiv2( m_pcCfg->getGOPEntry(iGOPid).m_betaOffsetDiv2 + m_pcCfg->getLoopFilterBetaOffset()  );
+        rpcSlice->setDeblockingFilterTcOffsetDiv2( m_pcCfg->getGOPEntry(iGOPid).m_tcOffsetDiv2 + m_pcCfg->getLoopFilterTcOffset() );
+      }
+      else
+      {
+        rpcSlice->getPPS()->setDeblockingFilterBetaOffsetDiv2( m_pcCfg->getLoopFilterBetaOffset() );
+        rpcSlice->getPPS()->setDeblockingFilterTcOffsetDiv2( m_pcCfg->getLoopFilterTcOffset() );
+        rpcSlice->setDeblockingFilterBetaOffsetDiv2( m_pcCfg->getLoopFilterBetaOffset() );
+        rpcSlice->setDeblockingFilterTcOffsetDiv2( m_pcCfg->getLoopFilterTcOffset() );
+      }
+    }
+  }
+  else
+  {
+    rpcSlice->setDeblockingFilterOverrideFlag( false );
+    rpcSlice->setDeblockingFilterDisable( false );
+    rpcSlice->setDeblockingFilterBetaOffsetDiv2( 0 );
+    rpcSlice->setDeblockingFilterTcOffsetDiv2( 0 );
+  }
+
+  rpcSlice->setDepth            ( depth );
+
+  pcPic->setTLayer( m_pcCfg->getGOPEntry(iGOPid).m_temporalId );
+  if(eSliceType==I_SLICE)
+  {
+    pcPic->setTLayer(0);
+  }
+  rpcSlice->setTLayer( pcPic->getTLayer() );
+
+  assert( m_apcPicYuvPred );
+  assert( m_apcPicYuvResi );
+
+  pcPic->setPicYuvPred( m_apcPicYuvPred );
+  pcPic->setPicYuvResi( m_apcPicYuvResi );
+  rpcSlice->setSliceMode            ( m_pcCfg->getSliceMode()            );
+  rpcSlice->setSliceArgument        ( m_pcCfg->getSliceArgument()        );
+  rpcSlice->setSliceSegmentMode     ( m_pcCfg->getSliceSegmentMode()     );
+  rpcSlice->setSliceSegmentArgument ( m_pcCfg->getSliceSegmentArgument() );
+  rpcSlice->setMaxNumMergeCand        ( m_pcCfg->getMaxNumMergeCand()        );
+  xStoreWPparam( pPPS->getUseWP(), pPPS->getWPBiPred() );
+}
+
+Void TEncSlice::resetQP( TComPic* pic, Int sliceQP, Double lambda )
+{
+  TComSlice* slice = pic->getSlice(0);
+
+  // store lambda
+  slice->setSliceQp( sliceQP );
+#if ADAPTIVE_QP_SELECTION
+  slice->setSliceQpBase ( sliceQP );
+#endif
+  setUpLambda(slice, lambda, sliceQP);
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+Void TEncSlice::setSearchRange( TComSlice* pcSlice )
+{
+  Int iCurrPOC = pcSlice->getPOC();
+  Int iRefPOC;
+  Int iGOPSize = m_pcCfg->getGOPSize();
+  Int iOffset = (iGOPSize >> 1);
+  Int iMaxSR = m_pcCfg->getSearchRange();
+  Int iNumPredDir = pcSlice->isInterP() ? 1 : 2;
+
+  for (Int iDir = 0; iDir <= iNumPredDir; iDir++)
+  {
+    //RefPicList e = (RefPicList)iDir;
+    RefPicList  e = ( iDir ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+    for (Int iRefIdx = 0; iRefIdx < pcSlice->getNumRefIdx(e); iRefIdx++)
+    {
+      iRefPOC = pcSlice->getRefPic(e, iRefIdx)->getPOC();
+      Int iNewSR = Clip3(8, iMaxSR, (iMaxSR*ADAPT_SR_SCALE*abs(iCurrPOC - iRefPOC)+iOffset)/iGOPSize);
+      m_pcPredSearch->setAdaptiveSearchRange(iDir, iRefIdx, iNewSR);
+    }
+  }
+}
+
+/**
+ - multi-loop slice encoding for different slice QP
+ .
+ \param rpcPic    picture class
+ */
+Void TEncSlice::precompressSlice( TComPic* pcPic )
+{
+  // if deltaQP RD is not used, simply return
+  if ( m_pcCfg->getDeltaQpRD() == 0 )
+  {
+    return;
+  }
+
+  if ( m_pcCfg->getUseRateCtrl() )
+  {
+    printf( "\nMultiple QP optimization is not allowed when rate control is enabled." );
+    assert(0);
+  }
+
+  TComSlice* pcSlice        = pcPic->getSlice(getSliceIdx());
+  Double     dPicRdCostBest = MAX_DOUBLE;
+  UInt       uiQpIdxBest = 0;
+
+  Double dFrameLambda;
+#if FULL_NBIT
+  Int    SHIFT_QP = 12 + 6 * (g_bitDepth[CHANNEL_TYPE_LUMA] - 8);
+#else
+  Int    SHIFT_QP = 12;
+#endif
+
+  // set frame lambda
+  if (m_pcCfg->getGOPSize() > 1)
+  {
+    dFrameLambda = 0.68 * pow (2, (m_piRdPicQp[0]  - SHIFT_QP) / 3.0) * (pcSlice->isInterB()? 2 : 1);
+  }
+  else
+  {
+    dFrameLambda = 0.68 * pow (2, (m_piRdPicQp[0] - SHIFT_QP) / 3.0);
+  }
+  m_pcRdCost      ->setFrameLambda(dFrameLambda);
+
+  const UInt initialSliceQp=pcSlice->getSliceQp();
+  // for each QP candidate
+  for ( UInt uiQpIdx = 0; uiQpIdx < 2 * m_pcCfg->getDeltaQpRD() + 1; uiQpIdx++ )
+  {
+    pcSlice       ->setSliceQp             ( m_piRdPicQp    [uiQpIdx] );
+#if ADAPTIVE_QP_SELECTION
+    pcSlice       ->setSliceQpBase         ( m_piRdPicQp    [uiQpIdx] );
+#endif
+    setUpLambda(pcSlice, m_pdRdPicLambda[uiQpIdx], m_piRdPicQp    [uiQpIdx]);
+
+    // try compress
+    compressSlice   ( pcPic );
+
+    Double dPicRdCost;
+    UInt64 uiPicDist        = m_uiPicDist;
+    // TODO: will this work if multiple slices are being used? There may not be any reconstruction data yet.
+    //       Will this also be ideal if a byte-restriction is placed on the slice?
+    //         - what if the last CTU was sometimes included, sometimes not, and that had all the distortion?
+    m_pcGOPEncoder->preLoopFilterPicAll( pcPic, uiPicDist );
+
+    // compute RD cost and choose the best
+    dPicRdCost = m_pcRdCost->calcRdCost64( m_uiPicTotalBits, uiPicDist, true, DF_SSE_FRAME);
+
+    if ( dPicRdCost < dPicRdCostBest )
+    {
+      uiQpIdxBest    = uiQpIdx;
+      dPicRdCostBest = dPicRdCost;
+    }
+  }
+
+  if (pcSlice->getDependentSliceSegmentFlag() && initialSliceQp!=m_piRdPicQp[uiQpIdxBest] )
+  {
+    // TODO: this won't work with dependent slices: they do not have their own QP.
+    fprintf(stderr,"ERROR - attempt to change QP for a dependent slice-segment, having already coded the slice\n");
+    assert(pcSlice->getDependentSliceSegmentFlag()==false || initialSliceQp==m_piRdPicQp[uiQpIdxBest]);
+  }
+  // set best values
+  pcSlice       ->setSliceQp             ( m_piRdPicQp    [uiQpIdxBest] );
+#if ADAPTIVE_QP_SELECTION
+  pcSlice       ->setSliceQpBase         ( m_piRdPicQp    [uiQpIdxBest] );
+#endif
+  setUpLambda(pcSlice, m_pdRdPicLambda[uiQpIdxBest], m_piRdPicQp    [uiQpIdxBest]);
+}
+
+Void TEncSlice::calCostSliceI(TComPic* pcPic)
+{
+  UInt    ctuRsAddr;
+  UInt    startCtuTsAddr;
+  UInt    boundingCtuTsAddr;
+  Int     iSumHad, shift = g_bitDepth[CHANNEL_TYPE_LUMA]-8, offset = (shift>0)?(1<<(shift-1)):0;;
+  Double  iSumHadSlice = 0;
+
+  pcPic->getSlice(getSliceIdx())->setSliceSegmentBits(0);
+  TComSlice* pcSlice            = pcPic->getSlice(getSliceIdx());
+  xDetermineStartAndBoundingCtuTsAddr ( startCtuTsAddr, boundingCtuTsAddr, pcPic, false );
+
+  UInt ctuTsAddr;
+  ctuRsAddr = pcPic->getPicSym()->getCtuTsToRsAddrMap( startCtuTsAddr);
+  for( ctuTsAddr = startCtuTsAddr; ctuTsAddr < boundingCtuTsAddr; ctuRsAddr = pcPic->getPicSym()->getCtuTsToRsAddrMap(++ctuTsAddr) )
+  {
+    // initialize CU encoder
+    TComDataCU* pCtu = pcPic->getCtu( ctuRsAddr );
+    pCtu->initCtu( pcPic, ctuRsAddr );
+
+    Int height  = min( pcSlice->getSPS()->getMaxCUHeight(),pcSlice->getSPS()->getPicHeightInLumaSamples() - ctuRsAddr / pcPic->getFrameWidthInCtus() * pcSlice->getSPS()->getMaxCUHeight() );
+    Int width   = min( pcSlice->getSPS()->getMaxCUWidth(),pcSlice->getSPS()->getPicWidthInLumaSamples() - ctuRsAddr % pcPic->getFrameWidthInCtus() * pcSlice->getSPS()->getMaxCUWidth() );
+
+    iSumHad = m_pcCuEncoder->updateCtuDataISlice(pCtu, width, height);
+
+    (m_pcRateCtrl->getRCPic()->getLCU(ctuRsAddr)).m_costIntra=(iSumHad+offset)>>shift;
+    iSumHadSlice += (m_pcRateCtrl->getRCPic()->getLCU(ctuRsAddr)).m_costIntra;
+
+  }
+  m_pcRateCtrl->getRCPic()->setTotalIntraCost(iSumHadSlice);
+}
+
+/** \param rpcPic   picture class
+ */
+Void TEncSlice::compressSlice( TComPic* pcPic )
+{
+  UInt   startCtuTsAddr;
+  UInt   boundingCtuTsAddr;
+  TComSlice* pcSlice            = pcPic->getSlice(getSliceIdx());
+  pcSlice->setSliceSegmentBits(0);
+  xDetermineStartAndBoundingCtuTsAddr ( startCtuTsAddr, boundingCtuTsAddr, pcPic, false );
+
+  // initialize cost values - these are used by precompressSlice (they should be parameters).
+  m_uiPicTotalBits  = 0;
+  m_dPicRdCost      = 0; // NOTE: This is a write-only variable!
+  m_uiPicDist       = 0;
+
+  m_pcEntropyCoder->setEntropyCoder   ( m_pppcRDSbacCoder[0][CI_CURR_BEST], pcSlice );
+  m_pcEntropyCoder->resetEntropy      ();
+
+  TEncBinCABAC* pRDSbacCoder = (TEncBinCABAC *) m_pppcRDSbacCoder[0][CI_CURR_BEST]->getEncBinIf();
+  pRDSbacCoder->setBinCountingEnableFlag( false );
+  pRDSbacCoder->setBinsCoded( 0 );
+
+  TComBitCounter  tempBitCounter;
+  const UInt      frameWidthInCtus = pcPic->getPicSym()->getFrameWidthInCtus();
+
+  //------------------------------------------------------------------------------
+  //  Weighted Prediction parameters estimation.
+  //------------------------------------------------------------------------------
+  // calculate AC/DC values for current picture
+  if( pcSlice->getPPS()->getUseWP() || pcSlice->getPPS()->getWPBiPred() )
+  {
+    xCalcACDCParamSlice(pcSlice);
+  }
+
+  const Bool bWp_explicit = (pcSlice->getSliceType()==P_SLICE && pcSlice->getPPS()->getUseWP()) || (pcSlice->getSliceType()==B_SLICE && pcSlice->getPPS()->getWPBiPred());
+
+  if ( bWp_explicit )
+  {
+    //------------------------------------------------------------------------------
+    //  Weighted Prediction implemented at Slice level. SliceMode=2 is not supported yet.
+    //------------------------------------------------------------------------------
+    if ( pcSlice->getSliceMode()==FIXED_NUMBER_OF_BYTES || pcSlice->getSliceSegmentMode()==FIXED_NUMBER_OF_BYTES )
+    {
+      printf("Weighted Prediction is not supported with slice mode determined by max number of bins.\n"); exit(0);
+    }
+
+    xEstimateWPParamSlice( pcSlice );
+    pcSlice->initWpScaling();
+
+    // check WP on/off
+    xCheckWPEnable( pcSlice );
+  }
+
+#if ADAPTIVE_QP_SELECTION
+  if( m_pcCfg->getUseAdaptQpSelect() && !(pcSlice->getDependentSliceSegmentFlag()))
+  {
+    // TODO: this won't work with dependent slices: they do not have their own QP. Check fix to mask clause execution with && !(pcSlice->getDependentSliceSegmentFlag())
+    m_pcTrQuant->clearSliceARLCnt();
+    if(pcSlice->getSliceType()!=I_SLICE)
+    {
+      Int qpBase = pcSlice->getSliceQpBase();
+      pcSlice->setSliceQp(qpBase + m_pcTrQuant->getQpDelta(qpBase));
+    }
+  }
+#endif
+
+
+
+  // Adjust initial state if this is the start of a dependent slice.
+  {
+    const UInt      ctuRsAddr               = pcPic->getPicSym()->getCtuTsToRsAddrMap( startCtuTsAddr);
+    const UInt      currentTileIdx          = pcPic->getPicSym()->getTileIdxMap(ctuRsAddr);
+    const TComTile *pCurrentTile            = pcPic->getPicSym()->getTComTile(currentTileIdx);
+    const UInt      firstCtuRsAddrOfTile    = pCurrentTile->getFirstCtuRsAddr();
+    if( pcSlice->getDependentSliceSegmentFlag() && ctuRsAddr != firstCtuRsAddrOfTile )
+    {
+      // This will only occur if dependent slice-segments (m_entropyCodingSyncContextState=true) are being used.
+      if( pCurrentTile->getTileWidthInCtus() >= 2 || !m_pcCfg->getWaveFrontsynchro() )
+      {
+        m_pppcRDSbacCoder[0][CI_CURR_BEST]->loadContexts( &m_lastSliceSegmentEndContextState );
+      }
+    }
+  }
+
+  // for every CTU in the slice segment (may terminate sooner if there is a byte limit on the slice-segment)
+
+  for( UInt ctuTsAddr = startCtuTsAddr; ctuTsAddr < boundingCtuTsAddr; ++ctuTsAddr )
+  {
+    const UInt ctuRsAddr = pcPic->getPicSym()->getCtuTsToRsAddrMap(ctuTsAddr);
+    // initialize CTU encoder
+    TComDataCU* pCtu = pcPic->getCtu( ctuRsAddr );
+    pCtu->initCtu( pcPic, ctuRsAddr );
+
+    // update CABAC state
+    const UInt firstCtuRsAddrOfTile = pcPic->getPicSym()->getTComTile(pcPic->getPicSym()->getTileIdxMap(ctuRsAddr))->getFirstCtuRsAddr();
+    const UInt tileXPosInCtus = firstCtuRsAddrOfTile % frameWidthInCtus;
+    const UInt ctuXPosInCtus  = ctuRsAddr % frameWidthInCtus;
+    
+    if (ctuRsAddr == firstCtuRsAddrOfTile)
+    {
+      m_pppcRDSbacCoder[0][CI_CURR_BEST]->resetEntropy();
+    }
+    else if ( ctuXPosInCtus == tileXPosInCtus && m_pcCfg->getWaveFrontsynchro())
+    {
+      // reset and then update contexts to the state at the end of the top-right CTU (if within current slice and tile).
+      m_pppcRDSbacCoder[0][CI_CURR_BEST]->resetEntropy();
+      // Sync if the Top-Right is available.
+      TComDataCU *pCtuUp = pCtu->getCtuAbove();
+      if ( pCtuUp && ((ctuRsAddr%frameWidthInCtus+1) < frameWidthInCtus)  )
+      {
+        TComDataCU *pCtuTR = pcPic->getCtu( ctuRsAddr - frameWidthInCtus + 1 );
+        if ( pCtu->CUIsFromSameSliceAndTile(pCtuTR) )
+        {
+          // Top-Right is available, we use it.
+          m_pppcRDSbacCoder[0][CI_CURR_BEST]->loadContexts( &m_entropyCodingSyncContextState );
+        }
+      }
+    }
+
+    // set go-on entropy coder (used for all trial encodings - the cu encoder and encoder search also have a copy of the same pointer)
+    m_pcEntropyCoder->setEntropyCoder ( m_pcRDGoOnSbacCoder, pcSlice );
+    m_pcEntropyCoder->setBitstream( &tempBitCounter );
+    tempBitCounter.resetBits();
+    m_pcRDGoOnSbacCoder->load( m_pppcRDSbacCoder[0][CI_CURR_BEST] ); // this copy is not strictly necessary here, but indicates that the GoOnSbacCoder
+                                                                     // is reset to a known state before every decision process.
+
+    ((TEncBinCABAC*)m_pcRDGoOnSbacCoder->getEncBinIf())->setBinCountingEnableFlag(true);
+
+    Double oldLambda = m_pcRdCost->getLambda();
+    if ( m_pcCfg->getUseRateCtrl() )
+    {
+      Int estQP        = pcSlice->getSliceQp();
+      Double estLambda = -1.0;
+      Double bpp       = -1.0;
+
+      if ( ( pcPic->getSlice( 0 )->getSliceType() == I_SLICE && m_pcCfg->getForceIntraQP() ) || !m_pcCfg->getLCULevelRC() )
+      {
+        estQP = pcSlice->getSliceQp();
+      }
+      else
+      {
+        bpp = m_pcRateCtrl->getRCPic()->getLCUTargetBpp(pcSlice->getSliceType());
+        if ( pcPic->getSlice( 0 )->getSliceType() == I_SLICE)
+        {
+          estLambda = m_pcRateCtrl->getRCPic()->getLCUEstLambdaAndQP(bpp, pcSlice->getSliceQp(), &estQP);
+        }
+        else
+        {
+          estLambda = m_pcRateCtrl->getRCPic()->getLCUEstLambda( bpp );
+          estQP     = m_pcRateCtrl->getRCPic()->getLCUEstQP    ( estLambda, pcSlice->getSliceQp() );
+        }
+
+        estQP     = Clip3( -pcSlice->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA), MAX_QP, estQP );
+
+        m_pcRdCost->setLambda(estLambda);
+
+#if RDOQ_CHROMA_LAMBDA
+        // set lambda for RDOQ
+        const Double chromaLambda = estLambda / m_pcRdCost->getChromaWeight();
+        const Double lambdaArray[MAX_NUM_COMPONENT] = { estLambda, chromaLambda, chromaLambda };
+        m_pcTrQuant->setLambdas( lambdaArray );
+#else
+        m_pcTrQuant->setLambda( estLambda );
+#endif
+      }
+
+      m_pcRateCtrl->setRCQP( estQP );
+#if ADAPTIVE_QP_SELECTION
+      pCtu->getSlice()->setSliceQpBase( estQP );
+#endif
+    }
+
+    // run CTU trial encoder
+    m_pcCuEncoder->compressCtu( pCtu );
+
+
+    // All CTU decisions have now been made. Restore entropy coder to an initial stage, ready to make a true encode,
+    // which will result in the state of the contexts being correct. It will also count up the number of bits coded,
+    // which is used if there is a limit of the number of bytes per slice-segment.
+
+    m_pcEntropyCoder->setEntropyCoder ( m_pppcRDSbacCoder[0][CI_CURR_BEST], pcSlice );
+    m_pcEntropyCoder->setBitstream( &tempBitCounter );
+    pRDSbacCoder->setBinCountingEnableFlag( true );
+    m_pppcRDSbacCoder[0][CI_CURR_BEST]->resetBits();
+    pRDSbacCoder->setBinsCoded( 0 );
+
+    // encode CTU and calculate the true bit counters.
+    m_pcCuEncoder->encodeCtu( pCtu );
+
+
+    pRDSbacCoder->setBinCountingEnableFlag( false );
+
+    const Int numberOfWrittenBits = m_pcEntropyCoder->getNumberOfWrittenBits();
+
+    // Calculate if this CTU puts us over slice bit size.
+    // cannot terminate if current slice/slice-segment would be 0 Ctu in size,
+    const UInt validEndOfSliceCtuTsAddr = ctuTsAddr + (ctuTsAddr == startCtuTsAddr ? 1 : 0);
+    // Set slice end parameter
+    if(pcSlice->getSliceMode()==FIXED_NUMBER_OF_BYTES && pcSlice->getSliceBits()+numberOfWrittenBits > (pcSlice->getSliceArgument()<<3))
+    {
+      pcSlice->setSliceSegmentCurEndCtuTsAddr(validEndOfSliceCtuTsAddr);
+      pcSlice->setSliceCurEndCtuTsAddr(validEndOfSliceCtuTsAddr);
+      boundingCtuTsAddr=validEndOfSliceCtuTsAddr;
+    }
+    else if(pcSlice->getSliceSegmentMode()==FIXED_NUMBER_OF_BYTES && pcSlice->getSliceSegmentBits()+numberOfWrittenBits > (pcSlice->getSliceSegmentArgument()<<3))
+    {
+      pcSlice->setSliceSegmentCurEndCtuTsAddr(validEndOfSliceCtuTsAddr);
+      boundingCtuTsAddr=validEndOfSliceCtuTsAddr;
+    }
+
+    if (boundingCtuTsAddr <= ctuTsAddr)
+      break;
+
+    pcSlice->setSliceBits( (UInt)(pcSlice->getSliceBits() + numberOfWrittenBits) );
+    pcSlice->setSliceSegmentBits(pcSlice->getSliceSegmentBits()+numberOfWrittenBits);
+
+    // Store probabilities of second CTU in line into buffer - used only if wavefront-parallel-processing is enabled.
+    if ( ctuXPosInCtus == tileXPosInCtus+1 && m_pcCfg->getWaveFrontsynchro())
+    {
+      m_entropyCodingSyncContextState.loadContexts(m_pppcRDSbacCoder[0][CI_CURR_BEST]);
+    }
+
+
+    if ( m_pcCfg->getUseRateCtrl() )
+    {
+      Int actualQP        = g_RCInvalidQPValue;
+      Double actualLambda = m_pcRdCost->getLambda();
+      Int actualBits      = pCtu->getTotalBits();
+      Int numberOfEffectivePixels    = 0;
+      for ( Int idx = 0; idx < pcPic->getNumPartitionsInCtu(); idx++ )
+      {
+        if ( pCtu->getPredictionMode( idx ) != NUMBER_OF_PREDICTION_MODES && ( !pCtu->isSkipped( idx ) ) )
+        {
+          numberOfEffectivePixels = numberOfEffectivePixels + 16;
+          break;
+        }
+      }
+
+      if ( numberOfEffectivePixels == 0 )
+      {
+        actualQP = g_RCInvalidQPValue;
+      }
+      else
+      {
+        actualQP = pCtu->getQP( 0 );
+      }
+      m_pcRdCost->setLambda(oldLambda);
+      m_pcRateCtrl->getRCPic()->updateAfterCTU( m_pcRateCtrl->getRCPic()->getLCUCoded(), actualBits, actualQP, actualLambda,
+                                                pCtu->getSlice()->getSliceType() == I_SLICE ? 0 : m_pcCfg->getLCULevelRC() );
+    }
+
+    m_uiPicTotalBits += pCtu->getTotalBits();
+    m_dPicRdCost     += pCtu->getTotalCost();
+    m_uiPicDist      += pCtu->getTotalDistortion();
+  }
+
+  // store context state at the end of this slice-segment, in case the next slice is a dependent slice and continues using the CABAC contexts.
+  if( pcSlice->getPPS()->getDependentSliceSegmentsEnabledFlag() )
+  {
+    m_lastSliceSegmentEndContextState.loadContexts( m_pppcRDSbacCoder[0][CI_CURR_BEST] );//ctx end of dep.slice
+  }
+  xRestoreWPparam( pcSlice );
+
+  // stop use of temporary bit counter object.
+  m_pppcRDSbacCoder[0][CI_CURR_BEST]->setBitstream(NULL);
+  m_pcRDGoOnSbacCoder->setBitstream(NULL); // stop use of tempBitCounter.
+}
+
+/**
+ \param  rpcPic        picture class
+ \retval rpcBitstream  bitstream class
+ */
+Void TEncSlice::encodeSlice   ( TComPic* pcPic, TComOutputBitstream* pcSubstreams, UInt &numBinsCoded )
+{
+  TComSlice* pcSlice                 = pcPic->getSlice(getSliceIdx());
+
+  const UInt startCtuTsAddr          = pcSlice->getSliceSegmentCurStartCtuTsAddr();
+  const UInt boundingCtuTsAddr       = pcSlice->getSliceSegmentCurEndCtuTsAddr();
+
+  const UInt frameWidthInCtus        = pcPic->getPicSym()->getFrameWidthInCtus();
+  const Bool depSliceSegmentsEnabled = pcSlice->getPPS()->getDependentSliceSegmentsEnabledFlag();
+  const Bool wavefrontsEnabled       = pcSlice->getPPS()->getEntropyCodingSyncEnabledFlag();
+
+  // initialise entropy coder for the slice
+  m_pcSbacCoder->init( (TEncBinIf*)m_pcBinCABAC );
+  m_pcEntropyCoder->setEntropyCoder ( m_pcSbacCoder, pcSlice );
+  m_pcEntropyCoder->resetEntropy      ();
+
+  numBinsCoded = 0;
+  m_pcBinCABAC->setBinCountingEnableFlag( true );
+  m_pcBinCABAC->setBinsCoded(0);
+
+#if ENC_DEC_TRACE
+  g_bJustDoIt = g_bEncDecTraceEnable;
+#endif
+  DTRACE_CABAC_VL( g_nSymbolCounter++ );
+  DTRACE_CABAC_T( "\tPOC: " );
+  DTRACE_CABAC_V( pcPic->getPOC() );
+  DTRACE_CABAC_T( "\n" );
+#if ENC_DEC_TRACE
+  g_bJustDoIt = g_bEncDecTraceDisable;
+#endif
+
+
+  if (depSliceSegmentsEnabled)
+  {
+    // modify initial contexts with previous slice segment if this is a dependent slice.
+    const UInt ctuRsAddr        = pcPic->getPicSym()->getCtuTsToRsAddrMap( startCtuTsAddr );
+    const UInt currentTileIdx=pcPic->getPicSym()->getTileIdxMap(ctuRsAddr);
+    const TComTile *pCurrentTile=pcPic->getPicSym()->getTComTile(currentTileIdx);
+    const UInt firstCtuRsAddrOfTile = pCurrentTile->getFirstCtuRsAddr();
+
+    if( pcSlice->getDependentSliceSegmentFlag() && ctuRsAddr != firstCtuRsAddrOfTile )
+    {
+      if( pCurrentTile->getTileWidthInCtus() >= 2 || !wavefrontsEnabled )
+      {
+        m_pcSbacCoder->loadContexts(&m_lastSliceSegmentEndContextState);
+      }
+    }
+  }
+
+  // for every CTU in the slice segment...
+
+  for( UInt ctuTsAddr = startCtuTsAddr; ctuTsAddr < boundingCtuTsAddr; ++ctuTsAddr )
+  {
+    const UInt ctuRsAddr = pcPic->getPicSym()->getCtuTsToRsAddrMap(ctuTsAddr);
+    const TComTile &currentTile = *(pcPic->getPicSym()->getTComTile(pcPic->getPicSym()->getTileIdxMap(ctuRsAddr)));
+    const UInt firstCtuRsAddrOfTile = currentTile.getFirstCtuRsAddr();
+    const UInt tileXPosInCtus       = firstCtuRsAddrOfTile % frameWidthInCtus;
+    const UInt tileYPosInCtus       = firstCtuRsAddrOfTile / frameWidthInCtus;
+    const UInt ctuXPosInCtus        = ctuRsAddr % frameWidthInCtus;
+    const UInt ctuYPosInCtus        = ctuRsAddr / frameWidthInCtus;
+    const UInt uiSubStrm=pcPic->getSubstreamForCtuAddr(ctuRsAddr, true, pcSlice);
+    TComDataCU* pCtu = pcPic->getCtu( ctuRsAddr );
+
+    m_pcEntropyCoder->setBitstream( &pcSubstreams[uiSubStrm] );
+
+    // set up CABAC contexts' state for this CTU
+    if (ctuRsAddr == firstCtuRsAddrOfTile)
+    {
+      if (ctuTsAddr != startCtuTsAddr) // if it is the first CTU, then the entropy coder has already been reset
+      {
+        m_pcEntropyCoder->resetEntropy();
+      }
+    }
+    else if (ctuXPosInCtus == tileXPosInCtus && wavefrontsEnabled)
+    {
+      // Synchronize cabac probabilities with upper-right CTU if it's available and at the start of a line.
+      if (ctuTsAddr != startCtuTsAddr) // if it is the first CTU, then the entropy coder has already been reset
+      {
+        m_pcEntropyCoder->resetEntropy();
+      }
+      TComDataCU *pCtuUp = pCtu->getCtuAbove();
+      if ( pCtuUp && ((ctuRsAddr%frameWidthInCtus+1) < frameWidthInCtus)  )
+      {
+        TComDataCU *pCtuTR = pcPic->getCtu( ctuRsAddr - frameWidthInCtus + 1 );
+        if ( pCtu->CUIsFromSameSliceAndTile(pCtuTR) )
+        {
+          // Top-right is available, so use it.
+          m_pcSbacCoder->loadContexts( &m_entropyCodingSyncContextState );
+        }
+      }
+    }
+
+
+    if ( pcSlice->getSPS()->getUseSAO() )
+    {
+      Bool bIsSAOSliceEnabled = false;
+      Bool sliceEnabled[MAX_NUM_COMPONENT];
+      for(Int comp=0; comp < MAX_NUM_COMPONENT; comp++)
+      {
+        ComponentID compId=ComponentID(comp);
+        sliceEnabled[compId] = pcSlice->getSaoEnabledFlag(toChannelType(compId)) && (comp < pcPic->getNumberValidComponents());
+        if (sliceEnabled[compId]) bIsSAOSliceEnabled=true;
+      }
+      if (bIsSAOSliceEnabled)
+      {
+        SAOBlkParam& saoblkParam = (pcPic->getPicSym()->getSAOBlkParam())[ctuRsAddr];
+
+        Bool leftMergeAvail = false;
+        Bool aboveMergeAvail= false;
+        //merge left condition
+        Int rx = (ctuRsAddr % frameWidthInCtus);
+        if(rx > 0)
+        {
+          leftMergeAvail = pcPic->getSAOMergeAvailability(ctuRsAddr, ctuRsAddr-1);
+        }
+
+        //merge up condition
+        Int ry = (ctuRsAddr / frameWidthInCtus);
+        if(ry > 0)
+        {
+          aboveMergeAvail = pcPic->getSAOMergeAvailability(ctuRsAddr, ctuRsAddr-frameWidthInCtus);
+        }
+
+        m_pcEntropyCoder->encodeSAOBlkParam(saoblkParam, sliceEnabled, leftMergeAvail, aboveMergeAvail);
+      }
+    }
+
+#if ENC_DEC_TRACE
+    g_bJustDoIt = g_bEncDecTraceEnable;
+#endif
+      m_pcCuEncoder->encodeCtu( pCtu );
+#if ENC_DEC_TRACE
+    g_bJustDoIt = g_bEncDecTraceDisable;
+#endif
+
+    //Store probabilities of second CTU in line into buffer
+    if ( ctuXPosInCtus == tileXPosInCtus+1 && wavefrontsEnabled)
+    {
+      m_entropyCodingSyncContextState.loadContexts( m_pcSbacCoder );
+    }
+
+    // terminate the sub-stream, if required (end of slice-segment, end of tile, end of wavefront-CTU-row):
+    if (ctuTsAddr+1 == boundingCtuTsAddr ||
+         (  ctuXPosInCtus + 1 == tileXPosInCtus + currentTile.getTileWidthInCtus() &&
+          ( ctuYPosInCtus + 1 == tileYPosInCtus + currentTile.getTileHeightInCtus() || wavefrontsEnabled)
+         )
+       )
+    {
+      m_pcEntropyCoder->encodeTerminatingBit(1);
+      m_pcEntropyCoder->encodeSliceFinish();
+      // Byte-alignment in slice_data() when new tile
+      pcSubstreams[uiSubStrm].writeByteAlignment();
+
+      // write sub-stream size
+      if (ctuTsAddr+1 != boundingCtuTsAddr)
+      {
+        pcSlice->addSubstreamSize( (pcSubstreams[uiSubStrm].getNumberOfWrittenBits() >> 3) + pcSubstreams[uiSubStrm].countStartCodeEmulations() );
+      }
+    }
+  } // CTU-loop
+
+  if( depSliceSegmentsEnabled )
+  {
+    m_lastSliceSegmentEndContextState.loadContexts( m_pcSbacCoder );//ctx end of dep.slice
+  }
+
+#if ADAPTIVE_QP_SELECTION
+  if( m_pcCfg->getUseAdaptQpSelect() )
+  {
+    m_pcTrQuant->storeSliceQpNext(pcSlice);
+  }
+#endif
+
+  if (pcSlice->getPPS()->getCabacInitPresentFlag())
+  {
+    if (pcSlice->getPPS()->getDependentSliceSegmentsEnabledFlag())
+    {
+      pcSlice->getPPS()->setEncCABACTableIdx( pcSlice->getSliceType() );
+    }
+    else
+    {
+      m_pcEntropyCoder->determineCabacInitIdx();
+    }
+  }
+  numBinsCoded = m_pcBinCABAC->getBinsCoded();
+}
+
+Void TEncSlice::calculateBoundingCtuTsAddrForSlice(UInt &startCtuTSAddrSlice, UInt &boundingCtuTSAddrSlice, Bool &haveReachedTileBoundary,
+                                                   TComPic* pcPic, const Bool encodingSlice, const Int sliceMode, const Int sliceArgument, const UInt sliceCurEndCtuTSAddr)
+{
+  TComSlice* pcSlice = pcPic->getSlice(getSliceIdx());
+  const UInt numberOfCtusInFrame = pcPic->getNumberOfCtusInFrame();
+  boundingCtuTSAddrSlice=0;
+  haveReachedTileBoundary=false;
+
+  switch (sliceMode)
+  {
+    case FIXED_NUMBER_OF_CTU:
+      {
+        UInt ctuAddrIncrement    = sliceArgument;
+        boundingCtuTSAddrSlice  = ((startCtuTSAddrSlice + ctuAddrIncrement) < numberOfCtusInFrame) ? (startCtuTSAddrSlice + ctuAddrIncrement) : numberOfCtusInFrame;
+      }
+      break;
+    case FIXED_NUMBER_OF_BYTES:
+      if (encodingSlice)
+        boundingCtuTSAddrSlice  = sliceCurEndCtuTSAddr;
+      else
+        boundingCtuTSAddrSlice  = numberOfCtusInFrame;
+      break;
+    case FIXED_NUMBER_OF_TILES:
+      {
+        const UInt tileIdx        = pcPic->getPicSym()->getTileIdxMap( pcPic->getPicSym()->getCtuTsToRsAddrMap(startCtuTSAddrSlice) );
+        const UInt tileTotalCount = (pcPic->getPicSym()->getNumTileColumnsMinus1()+1) * (pcPic->getPicSym()->getNumTileRowsMinus1()+1);
+        UInt ctuAddrIncrement   = 0;
+
+        for(UInt tileIdxIncrement = 0; tileIdxIncrement < sliceArgument; tileIdxIncrement++)
+        {
+          if((tileIdx + tileIdxIncrement) < tileTotalCount)
+          {
+            UInt tileWidthInCtus   = pcPic->getPicSym()->getTComTile(tileIdx + tileIdxIncrement)->getTileWidthInCtus();
+            UInt tileHeightInCtus  = pcPic->getPicSym()->getTComTile(tileIdx + tileIdxIncrement)->getTileHeightInCtus();
+            ctuAddrIncrement    += (tileWidthInCtus * tileHeightInCtus);
+          }
+        }
+
+        boundingCtuTSAddrSlice  = ((startCtuTSAddrSlice + ctuAddrIncrement) < numberOfCtusInFrame) ? (startCtuTSAddrSlice + ctuAddrIncrement) : numberOfCtusInFrame;
+      }
+      break;
+    default:
+      boundingCtuTSAddrSlice    = numberOfCtusInFrame;
+      break;
+  }
+
+  // Adjust for tiles and wavefronts.
+  if ((sliceMode == FIXED_NUMBER_OF_CTU || sliceMode == FIXED_NUMBER_OF_BYTES) &&
+      (m_pcCfg->getNumRowsMinus1() > 0 || m_pcCfg->getNumColumnsMinus1() > 0))
+  {
+    const UInt ctuRSAddr                  = pcPic->getPicSym()->getCtuTsToRsAddrMap(startCtuTSAddrSlice);
+    const UInt startTileIdx               = pcPic->getPicSym()->getTileIdxMap(ctuRSAddr);
+    const Bool wavefrontsAreEnabled       = m_pcCfg->getWaveFrontsynchro();
+
+    const TComTile *pStartingTile         = pcPic->getPicSym()->getTComTile(startTileIdx);
+    const UInt tileStartTsAddr            = pcPic->getPicSym()->getCtuRsToTsAddrMap(pStartingTile->getFirstCtuRsAddr());
+    const UInt tileStartWidth             = pStartingTile->getTileWidthInCtus();
+    const UInt tileStartHeight            = pStartingTile->getTileHeightInCtus();
+    const UInt tileLastTsAddr_excl        = tileStartTsAddr + tileStartWidth*tileStartHeight;
+    const UInt tileBoundingCtuTsAddrSlice = tileLastTsAddr_excl;
+
+    const UInt ctuColumnOfStartingTile    = ((startCtuTSAddrSlice-tileStartTsAddr)%tileStartWidth);
+    if (wavefrontsAreEnabled && ctuColumnOfStartingTile!=0)
+    {
+      // WPP: if a slice does not start at the beginning of a CTB row, it must end within the same CTB row
+      const UInt numberOfCTUsToEndOfRow            = tileStartWidth - ctuColumnOfStartingTile;
+      const UInt wavefrontTileBoundingCtuAddrSlice = startCtuTSAddrSlice + numberOfCTUsToEndOfRow;
+      if (wavefrontTileBoundingCtuAddrSlice < boundingCtuTSAddrSlice)
+      {
+        boundingCtuTSAddrSlice = wavefrontTileBoundingCtuAddrSlice;
+      }
+    }
+
+    if (tileBoundingCtuTsAddrSlice < boundingCtuTSAddrSlice)
+    {
+      boundingCtuTSAddrSlice = tileBoundingCtuTsAddrSlice;
+      haveReachedTileBoundary = true;
+    }
+  }
+  else if ((sliceMode == FIXED_NUMBER_OF_CTU || sliceMode == FIXED_NUMBER_OF_BYTES) && pcSlice->getPPS()->getEntropyCodingSyncEnabledFlag() && ((startCtuTSAddrSlice % pcPic->getFrameWidthInCtus()) != 0))
+  {
+    // Adjust for wavefronts (no tiles).
+    // WPP: if a slice does not start at the beginning of a CTB row, it must end within the same CTB row
+    boundingCtuTSAddrSlice = min(boundingCtuTSAddrSlice, startCtuTSAddrSlice - (startCtuTSAddrSlice % pcPic->getFrameWidthInCtus()) + (pcPic->getFrameWidthInCtus()));
+  }
+}
+
+/** Determines the starting and bounding CTU address of current slice / dependent slice
+ * \param bEncodeSlice Identifies if the calling function is compressSlice() [false] or encodeSlice() [true]
+ * \returns Updates startCtuTsAddr, boundingCtuTsAddr with appropriate CTU address
+ */
+Void TEncSlice::xDetermineStartAndBoundingCtuTsAddr  ( UInt& startCtuTsAddr, UInt& boundingCtuTsAddr, TComPic* pcPic, const Bool encodingSlice )
+{
+  TComSlice* pcSlice                 = pcPic->getSlice(getSliceIdx());
+
+  // Non-dependent slice
+  UInt startCtuTsAddrSlice           = pcSlice->getSliceCurStartCtuTsAddr();
+  Bool haveReachedTileBoundarySlice  = false;
+  UInt boundingCtuTsAddrSlice;
+  calculateBoundingCtuTsAddrForSlice(startCtuTsAddrSlice, boundingCtuTsAddrSlice, haveReachedTileBoundarySlice, pcPic,
+                                     encodingSlice, m_pcCfg->getSliceMode(), m_pcCfg->getSliceArgument(), pcSlice->getSliceCurEndCtuTsAddr());
+  pcSlice->setSliceCurEndCtuTsAddr(   boundingCtuTsAddrSlice );
+  pcSlice->setSliceCurStartCtuTsAddr( startCtuTsAddrSlice    );
+
+  // Dependent slice
+  UInt startCtuTsAddrSliceSegment          = pcSlice->getSliceSegmentCurStartCtuTsAddr();
+  Bool haveReachedTileBoundarySliceSegment = false;
+  UInt boundingCtuTsAddrSliceSegment;
+  calculateBoundingCtuTsAddrForSlice(startCtuTsAddrSliceSegment, boundingCtuTsAddrSliceSegment, haveReachedTileBoundarySliceSegment, pcPic,
+                                     encodingSlice, m_pcCfg->getSliceSegmentMode(), m_pcCfg->getSliceSegmentArgument(), pcSlice->getSliceSegmentCurEndCtuTsAddr());
+  if (boundingCtuTsAddrSliceSegment>boundingCtuTsAddrSlice)
+  {
+    boundingCtuTsAddrSliceSegment = boundingCtuTsAddrSlice;
+  }
+  pcSlice->setSliceSegmentCurEndCtuTsAddr( boundingCtuTsAddrSliceSegment );
+  pcSlice->setSliceSegmentCurStartCtuTsAddr(startCtuTsAddrSliceSegment);
+
+  // Make a joint decision based on reconstruction and dependent slice bounds
+  startCtuTsAddr    = max(startCtuTsAddrSlice   , startCtuTsAddrSliceSegment   );
+  boundingCtuTsAddr = boundingCtuTsAddrSliceSegment;
+}
+
+Double TEncSlice::xGetQPValueAccordingToLambda ( Double lambda )
+{
+  return 4.2005*log(lambda) + 13.7122;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncSlice.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,136 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncSlice.h
+    \brief    slice encoder class (header)
+*/
+
+#ifndef __TENCSLICE__
+#define __TENCSLICE__
+
+// Include files
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComList.h"
+#include "TLibCommon/TComPic.h"
+#include "TLibCommon/TComPicYuv.h"
+#include "TEncCu.h"
+#include "WeightPredAnalysis.h"
+#include "TEncRateCtrl.h"
+
+//! \ingroup TLibEncoder
+//! \{
+
+class TEncTop;
+class TEncGOP;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// slice encoder class
+class TEncSlice
+  : public WeightPredAnalysis
+{
+private:
+  // encoder configuration
+  TEncCfg*                m_pcCfg;                              ///< encoder configuration class
+
+  // pictures
+  TComList<TComPic*>*     m_pcListPic;                          ///< list of pictures
+  TComPicYuv*             m_apcPicYuvPred;                      ///< prediction picture buffer
+  TComPicYuv*             m_apcPicYuvResi;                      ///< residual picture buffer
+
+  // processing units
+  TEncGOP*                m_pcGOPEncoder;                       ///< GOP encoder
+  TEncCu*                 m_pcCuEncoder;                        ///< CU encoder
+
+  // encoder search
+  TEncSearch*             m_pcPredSearch;                       ///< encoder search class
+
+  // coding tools
+  TEncEntropy*            m_pcEntropyCoder;                     ///< entropy encoder
+  TEncSbac*               m_pcSbacCoder;                        ///< SBAC encoder
+  TEncBinCABAC*           m_pcBinCABAC;                         ///< Bin encoder CABAC
+  TComTrQuant*            m_pcTrQuant;                          ///< transform & quantization
+
+  // RD optimization
+  TComRdCost*             m_pcRdCost;                           ///< RD cost computation
+  TEncSbac***             m_pppcRDSbacCoder;                    ///< storage for SBAC-based RD optimization
+  TEncSbac*               m_pcRDGoOnSbacCoder;                  ///< go-on SBAC encoder
+  UInt64                  m_uiPicTotalBits;                     ///< total bits for the picture
+  UInt64                  m_uiPicDist;                          ///< total distortion for the picture
+  Double                  m_dPicRdCost;                         ///< picture-level RD cost
+  Double*                 m_pdRdPicLambda;                      ///< array of lambda candidates
+  Double*                 m_pdRdPicQp;                          ///< array of picture QP candidates (double-type for lambda)
+  Int*                    m_piRdPicQp;                          ///< array of picture QP candidates (Int-type)
+  TEncRateCtrl*           m_pcRateCtrl;                         ///< Rate control manager
+  UInt                    m_uiSliceIdx;
+  TEncSbac                m_lastSliceSegmentEndContextState;    ///< context storage for state at the end of the previous slice-segment (used for dependent slices only).
+  TEncSbac                m_entropyCodingSyncContextState;      ///< context storate for state of contexts at the wavefront/WPP/entropy-coding-sync second CTU of tile-row
+
+  Void     setUpLambda(TComSlice* slice, const Double dLambda, Int iQP);
+  Void     calculateBoundingCtuTsAddrForSlice(UInt &startCtuTSAddrSlice, UInt &boundingCtuTSAddrSlice, Bool &haveReachedTileBoundary, TComPic* pcPic, const Bool encodingSlice, const Int sliceMode, const Int sliceArgument, const UInt uiSliceCurEndCtuTSAddr);
+
+public:
+  TEncSlice();
+  virtual ~TEncSlice();
+
+  Void    create              ( Int iWidth, Int iHeight, ChromaFormat chromaFormat, UInt iMaxCUWidth, UInt iMaxCUHeight, UChar uhTotalDepth );
+  Void    destroy             ();
+  Void    init                ( TEncTop* pcEncTop );
+
+  /// preparation of slice encoding (reference marking, QP and lambda)
+  Void    initEncSlice        ( TComPic*  pcPic, Int pocLast, Int pocCurr, Int iNumPicRcvd,
+                                Int iGOPid,   TComSlice*& rpcSlice, TComSPS* pSPS, TComPPS *pPPS, Bool isField );
+  Void    resetQP             ( TComPic* pic, Int sliceQP, Double lambda );
+  // compress and encode slice
+  Void    precompressSlice    ( TComPic* pcPic                                     );      ///< precompress slice for multi-loop opt.
+  Void    compressSlice       ( TComPic* pcPic                                     );      ///< analysis stage of slice
+  Void    calCostSliceI       ( TComPic* pcPic );
+  Void    encodeSlice         ( TComPic* pcPic, TComOutputBitstream* pcSubstreams, UInt &numBinsCoded );
+
+  // misc. functions
+  Void    setSearchRange      ( TComSlice* pcSlice  );                                  ///< set ME range adaptively
+
+  TEncCu*        getCUEncoder() { return m_pcCuEncoder; }                        ///< CU encoder
+  Void    xDetermineStartAndBoundingCtuTsAddr  ( UInt& startCtuTsAddr, UInt& boundingCtuTsAddr, TComPic* pcPic, const Bool encodingSlice );
+  UInt    getSliceIdx()         { return m_uiSliceIdx;                    }
+  Void    setSliceIdx(UInt i)   { m_uiSliceIdx = i;                       }
+
+private:
+  Double  xGetQPValueAccordingToLambda ( Double lambda );
+};
+
+//! \}
+
+#endif // __TENCSLICE__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncTop.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1056 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncTop.cpp
+    \brief    encoder class
+*/
+
+#include "TLibCommon/CommonDef.h"
+#include "TEncTop.h"
+#include "TEncPic.h"
+#include "TLibCommon/TComChromaFormat.h"
+#if FAST_BIT_EST
+#include "TLibCommon/ContextModel.h"
+#endif
+
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Constructor / destructor / create / destroy
+// ====================================================================================================================
+
+TEncTop::TEncTop()
+{
+  m_iPOCLast          = -1;
+  m_iNumPicRcvd       =  0;
+  m_uiNumAllPicCoded  =  0;
+  m_pppcRDSbacCoder   =  NULL;
+  m_pppcBinCoderCABAC =  NULL;
+  m_cRDGoOnSbacCoder.init( &m_cRDGoOnBinCoderCABAC );
+#if ENC_DEC_TRACE
+  if (g_hTrace == NULL)
+  {
+    g_hTrace = fopen( "TraceEnc.txt", "wb" );
+  }
+  g_bJustDoIt = g_bEncDecTraceDisable;
+  g_nSymbolCounter = 0;
+#endif
+
+  m_iMaxRefPicNum     = 0;
+
+#if FAST_BIT_EST
+  ContextModel::buildNextStateTable();
+#endif
+}
+
+TEncTop::~TEncTop()
+{
+#if ENC_DEC_TRACE
+  if (g_hTrace != stdout)
+  {
+    fclose( g_hTrace );
+  }
+#endif
+}
+
+Void TEncTop::create ()
+{
+  // initialize global variables
+  initROM();
+
+  // create processing unit classes
+  m_cGOPEncoder.        create( );
+  m_cSliceEncoder.      create( getSourceWidth(), getSourceHeight(), m_chromaFormatIDC, g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth );
+  m_cCuEncoder.         create( g_uiMaxCUDepth, g_uiMaxCUWidth, g_uiMaxCUHeight, m_chromaFormatIDC );
+  if (m_bUseSAO)
+  {
+    m_cEncSAO.create( getSourceWidth(), getSourceHeight(), m_chromaFormatIDC, g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth, m_saoOffsetBitShift[CHANNEL_TYPE_LUMA], m_saoOffsetBitShift[CHANNEL_TYPE_CHROMA] );
+#if SAO_ENCODE_ALLOW_USE_PREDEBLOCK
+    m_cEncSAO.createEncData(getSaoCtuBoundary());
+#else
+    m_cEncSAO.createEncData();
+#endif
+  }
+#if ADAPTIVE_QP_SELECTION
+  if (m_bUseAdaptQpSelect)
+  {
+    m_cTrQuant.initSliceQpDelta();
+  }
+#endif
+
+  m_cLoopFilter.create( g_uiMaxCUDepth );
+
+  if ( m_RCEnableRateControl )
+  {
+    m_cRateCtrl.init( m_framesToBeEncoded, m_RCTargetBitrate, m_iFrameRate, m_iGOPSize, m_iSourceWidth, m_iSourceHeight,
+                      g_uiMaxCUWidth, g_uiMaxCUHeight, m_RCKeepHierarchicalBit, m_RCUseLCUSeparateModel, m_GOPList );
+  }
+
+  m_pppcRDSbacCoder = new TEncSbac** [g_uiMaxCUDepth+1];
+#if FAST_BIT_EST
+  m_pppcBinCoderCABAC = new TEncBinCABACCounter** [g_uiMaxCUDepth+1];
+#else
+  m_pppcBinCoderCABAC = new TEncBinCABAC** [g_uiMaxCUDepth+1];
+#endif
+
+  for ( Int iDepth = 0; iDepth < g_uiMaxCUDepth+1; iDepth++ )
+  {
+    m_pppcRDSbacCoder[iDepth] = new TEncSbac* [CI_NUM];
+#if FAST_BIT_EST
+    m_pppcBinCoderCABAC[iDepth] = new TEncBinCABACCounter* [CI_NUM];
+#else
+    m_pppcBinCoderCABAC[iDepth] = new TEncBinCABAC* [CI_NUM];
+#endif
+
+    for (Int iCIIdx = 0; iCIIdx < CI_NUM; iCIIdx ++ )
+    {
+      m_pppcRDSbacCoder[iDepth][iCIIdx] = new TEncSbac;
+#if FAST_BIT_EST
+      m_pppcBinCoderCABAC [iDepth][iCIIdx] = new TEncBinCABACCounter;
+#else
+      m_pppcBinCoderCABAC [iDepth][iCIIdx] = new TEncBinCABAC;
+#endif
+      m_pppcRDSbacCoder   [iDepth][iCIIdx]->init( m_pppcBinCoderCABAC [iDepth][iCIIdx] );
+    }
+  }
+}
+
+Void TEncTop::destroy ()
+{
+  // destroy processing unit classes
+  m_cGOPEncoder.        destroy();
+  m_cSliceEncoder.      destroy();
+  m_cCuEncoder.         destroy();
+  if (m_cSPS.getUseSAO())
+  {
+    m_cEncSAO.destroyEncData();
+    m_cEncSAO.destroy();
+  }
+  m_cLoopFilter.        destroy();
+  m_cRateCtrl.          destroy();
+  Int iDepth;
+  for ( iDepth = 0; iDepth < g_uiMaxCUDepth+1; iDepth++ )
+  {
+    for (Int iCIIdx = 0; iCIIdx < CI_NUM; iCIIdx ++ )
+    {
+      delete m_pppcRDSbacCoder[iDepth][iCIIdx];
+      delete m_pppcBinCoderCABAC[iDepth][iCIIdx];
+    }
+  }
+
+  for ( iDepth = 0; iDepth < g_uiMaxCUDepth+1; iDepth++ )
+  {
+    delete [] m_pppcRDSbacCoder[iDepth];
+    delete [] m_pppcBinCoderCABAC[iDepth];
+  }
+
+  delete [] m_pppcRDSbacCoder;
+  delete [] m_pppcBinCoderCABAC;
+
+  // destroy ROM
+  destroyROM();
+
+  return;
+}
+
+Void TEncTop::init(Bool isFieldCoding)
+{
+  // initialize SPS
+  xInitSPS();
+
+  // set the VPS profile information
+  *m_cVPS.getPTL() = *m_cSPS.getPTL();
+  m_cVPS.getTimingInfo()->setTimingInfoPresentFlag       ( false );
+
+  m_cRdCost.setCostMode(m_costMode);
+
+  // initialize PPS
+  m_cPPS.setSPS(&m_cSPS);
+  xInitPPS();
+  xInitRPS(isFieldCoding);
+
+  xInitPPSforTiles();
+
+  // initialize processing unit classes
+  m_cGOPEncoder.  init( this );
+  m_cSliceEncoder.init( this );
+  m_cCuEncoder.   init( this );
+
+  // initialize transform & quantization class
+  m_pcCavlcCoder = getCavlcCoder();
+
+  m_cTrQuant.init( 1 << m_uiQuadtreeTULog2MaxSize,
+                   m_useRDOQ,
+                   m_useRDOQTS,
+                   true
+                  ,m_useTransformSkipFast
+#if ADAPTIVE_QP_SELECTION
+                  ,m_bUseAdaptQpSelect
+#endif
+                  );
+
+  // initialize encoder search class
+  m_cSearch.init( this, &m_cTrQuant, m_iSearchRange, m_bipredSearchRange, m_iFastSearch, 0, &m_cEntropyCoder, &m_cRdCost, getRDSbacCoder(), getRDGoOnSbacCoder() );
+
+  m_iMaxRefPicNum = 0;
+}
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+Void TEncTop::deletePicBuffer()
+{
+  TComList<TComPic*>::iterator iterPic = m_cListPic.begin();
+  Int iSize = Int( m_cListPic.size() );
+
+  for ( Int i = 0; i < iSize; i++ )
+  {
+    TComPic* pcPic = *(iterPic++);
+
+    pcPic->destroy();
+    delete pcPic;
+    pcPic = NULL;
+  }
+}
+
+/**
+ - Application has picture buffer list with size of GOP + 1
+ - Picture buffer list acts like as ring buffer
+ - End of the list has the latest picture
+ .
+ \param   flush               cause encoder to encode a partial GOP
+ \param   pcPicYuvOrg         original YUV picture
+ \retval  rcListPicYuvRecOut  list of reconstruction YUV pictures
+ \retval  rcListBitstreamOut  list of output bitstreams
+ \retval  iNumEncoded         number of encoded pictures
+ */
+Void TEncTop::encode( Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded )
+{
+  if (pcPicYuvOrg != NULL)
+  {
+    // get original YUV
+    TComPic* pcPicCurr = NULL;
+
+    xGetNewPicBuffer( pcPicCurr );
+    pcPicYuvOrg->copyToPic( pcPicCurr->getPicYuvOrg() );
+    pcPicYuvTrueOrg->copyToPic( pcPicCurr->getPicYuvTrueOrg() );
+
+    // compute image characteristics
+    if ( getUseAdaptiveQP() )
+    {
+      m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcPicCurr ) );
+    }
+  }
+
+  if ((m_iNumPicRcvd == 0) || (!flush && (m_iPOCLast != 0) && (m_iNumPicRcvd != m_iGOPSize) && (m_iGOPSize != 0)))
+  {
+    iNumEncoded = 0;
+    return;
+  }
+
+  if ( m_RCEnableRateControl )
+  {
+    m_cRateCtrl.initRCGOP( m_iNumPicRcvd );
+  }
+
+  // compress GOP
+  m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, false, false, snrCSC, m_printFrameMSE);
+
+  if ( m_RCEnableRateControl )
+  {
+    m_cRateCtrl.destroyRCGOP();
+  }
+
+  iNumEncoded         = m_iNumPicRcvd;
+  m_iNumPicRcvd       = 0;
+  m_uiNumAllPicCoded += iNumEncoded;
+}
+
+/**------------------------------------------------
+ Separate interlaced frame into two fields
+ -------------------------------------------------**/
+Void separateFields(Pel* org, Pel* dstField, UInt stride, UInt width, UInt height, Bool isTop)
+{
+  if (!isTop)
+  {
+    org += stride;
+  }
+  for (Int y = 0; y < height>>1; y++)
+  {
+    for (Int x = 0; x < width; x++)
+    {
+      dstField[x] = org[x];
+    }
+
+    dstField += stride;
+    org += stride*2;
+  }
+
+}
+
+Void TEncTop::encode(Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded, Bool isTff)
+{
+  iNumEncoded = 0;
+
+  for (Int fieldNum=0; fieldNum<2; fieldNum++)
+  {
+    if (pcPicYuvOrg)
+    {
+
+      /* -- field initialization -- */
+      const Bool isTopField=isTff==(fieldNum==0);
+
+      TComPic *pcField;
+      xGetNewPicBuffer( pcField );
+      pcField->setReconMark (false);                     // where is this normally?
+
+      if (fieldNum==1)                                   // where is this normally?
+      {
+        TComPicYuv* rpcPicYuvRec;
+
+        // org. buffer
+        if ( rcListPicYuvRecOut.size() >= (UInt)m_iGOPSize+1 ) // need to maintain field 0 in list of RecOuts while processing field 1. Hence +1 on m_iGOPSize.
+        {
+          rpcPicYuvRec = rcListPicYuvRecOut.popFront();
+        }
+        else
+        {
+          rpcPicYuvRec = new TComPicYuv;
+          rpcPicYuvRec->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+        }
+        rcListPicYuvRecOut.pushBack( rpcPicYuvRec );
+      }
+
+      pcField->getSlice(0)->setPOC( m_iPOCLast );        // superfluous?
+      pcField->getPicYuvRec()->setBorderExtension(false);// where is this normally?
+
+      pcField->setTopField(isTopField);                  // interlaced requirement
+
+      for (UInt componentIndex = 0; componentIndex < pcPicYuvOrg->getNumberValidComponents(); componentIndex++)
+      {
+        const ComponentID component = ComponentID(componentIndex);
+        const UInt stride = pcPicYuvOrg->getStride(component);
+
+        separateFields((pcPicYuvOrg->getBuf(component) + pcPicYuvOrg->getMarginX(component) + (pcPicYuvOrg->getMarginY(component) * stride)),
+                       pcField->getPicYuvOrg()->getAddr(component),
+                       pcPicYuvOrg->getStride(component),
+                       pcPicYuvOrg->getWidth(component),
+                       pcPicYuvOrg->getHeight(component),
+                       isTopField);
+
+        separateFields((pcPicYuvTrueOrg->getBuf(component) + pcPicYuvTrueOrg->getMarginX(component) + (pcPicYuvTrueOrg->getMarginY(component) * stride)),
+                       pcField->getPicYuvTrueOrg()->getAddr(component),
+                       pcPicYuvTrueOrg->getStride(component),
+                       pcPicYuvTrueOrg->getWidth(component),
+                       pcPicYuvTrueOrg->getHeight(component),
+                       isTopField);
+      }
+
+      // compute image characteristics
+      if ( getUseAdaptiveQP() )
+      {
+        m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcField ) );
+      }
+    }
+
+    if ( m_iNumPicRcvd && ((flush&&fieldNum==1) || (m_iPOCLast/2)==0 || m_iNumPicRcvd==m_iGOPSize ) )
+    {
+      // compress GOP
+      m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, true, isTff, snrCSC, m_printFrameMSE);
+
+      iNumEncoded += m_iNumPicRcvd;
+      m_uiNumAllPicCoded += m_iNumPicRcvd;
+      m_iNumPicRcvd = 0;
+    }
+  }
+}
+
+// ====================================================================================================================
+// Protected member functions
+// ====================================================================================================================
+
+/**
+ - Application has picture buffer list with size of GOP + 1
+ - Picture buffer list acts like as ring buffer
+ - End of the list has the latest picture
+ .
+ \retval rpcPic obtained picture buffer
+ */
+Void TEncTop::xGetNewPicBuffer ( TComPic*& rpcPic )
+{
+  TComSlice::sortPicList(m_cListPic);
+
+  if (m_cListPic.size() >= (UInt)(m_iGOPSize + getMaxDecPicBuffering(MAX_TLAYER-1) + 2) )
+  {
+    TComList<TComPic*>::iterator iterPic  = m_cListPic.begin();
+    Int iSize = Int( m_cListPic.size() );
+    for ( Int i = 0; i < iSize; i++ )
+    {
+      rpcPic = *(iterPic++);
+      if(rpcPic->getSlice(0)->isReferenced() == false)
+      {
+        break;
+      }
+    }
+  }
+  else
+  {
+    if ( getUseAdaptiveQP() )
+    {
+      TEncPic* pcEPic = new TEncPic;
+      pcEPic->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth, m_cPPS.getMaxCuDQPDepth()+1, m_conformanceWindow, m_defaultDisplayWindow, m_numReorderPics);
+      rpcPic = pcEPic;
+    }
+    else
+    {
+      rpcPic = new TComPic;
+      rpcPic->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth, m_conformanceWindow, m_defaultDisplayWindow, m_numReorderPics, false );
+    }
+
+    m_cListPic.pushBack( rpcPic );
+  }
+  rpcPic->setReconMark (false);
+
+  m_iPOCLast++;
+  m_iNumPicRcvd++;
+
+  rpcPic->getSlice(0)->setPOC( m_iPOCLast );
+  // mark it should be extended
+  rpcPic->getPicYuvRec()->setBorderExtension(false);
+}
+
+Void TEncTop::xInitSPS()
+{
+  ProfileTierLevel& profileTierLevel = *m_cSPS.getPTL()->getGeneralPTL();
+  profileTierLevel.setLevelIdc(m_level);
+  profileTierLevel.setTierFlag(m_levelTier);
+  profileTierLevel.setProfileIdc(m_profile);
+  profileTierLevel.setProfileCompatibilityFlag(m_profile, 1);
+  profileTierLevel.setProgressiveSourceFlag(m_progressiveSourceFlag);
+  profileTierLevel.setInterlacedSourceFlag(m_interlacedSourceFlag);
+  profileTierLevel.setNonPackedConstraintFlag(m_nonPackedConstraintFlag);
+  profileTierLevel.setFrameOnlyConstraintFlag(m_frameOnlyConstraintFlag);
+  profileTierLevel.setBitDepthConstraint(m_bitDepthConstraintValue);
+  profileTierLevel.setChromaFormatConstraint(m_chromaFormatConstraintValue);
+  profileTierLevel.setIntraConstraintFlag(m_intraConstraintFlag);
+  profileTierLevel.setLowerBitRateConstraintFlag(m_lowerBitRateConstraintFlag);
+
+  if ((m_profile == Profile::MAIN10) && (g_bitDepth[CHANNEL_TYPE_LUMA] == 8) && (g_bitDepth[CHANNEL_TYPE_CHROMA] == 8))
+  {
+    /* The above constraint is equal to Profile::MAIN */
+    profileTierLevel.setProfileCompatibilityFlag(Profile::MAIN, 1);
+  }
+  if (m_profile == Profile::MAIN)
+  {
+    /* A Profile::MAIN10 decoder can always decode Profile::MAIN */
+    profileTierLevel.setProfileCompatibilityFlag(Profile::MAIN10, 1);
+  }
+  /* XXX: should Main be marked as compatible with still picture? */
+  /* XXX: may be a good idea to refactor the above into a function
+   * that chooses the actual compatibility based upon options */
+
+  m_cSPS.setPicWidthInLumaSamples         ( m_iSourceWidth      );
+  m_cSPS.setPicHeightInLumaSamples        ( m_iSourceHeight     );
+  m_cSPS.setConformanceWindow             ( m_conformanceWindow );
+  m_cSPS.setMaxCUWidth    ( g_uiMaxCUWidth      );
+  m_cSPS.setMaxCUHeight   ( g_uiMaxCUHeight     );
+  m_cSPS.setMaxCUDepth    ( g_uiMaxCUDepth      );
+  m_cSPS.setChromaFormatIdc( m_chromaFormatIDC);
+
+  Int minCUSize = m_cSPS.getMaxCUWidth() >> ( m_cSPS.getMaxCUDepth()-g_uiAddCUDepth );
+  Int log2MinCUSize = 0;
+  while(minCUSize > 1)
+  {
+    minCUSize >>= 1;
+    log2MinCUSize++;
+  }
+
+  m_cSPS.setLog2MinCodingBlockSize(log2MinCUSize);
+  m_cSPS.setLog2DiffMaxMinCodingBlockSize(m_cSPS.getMaxCUDepth()-g_uiAddCUDepth-getMaxCUDepthOffset(m_cSPS.getChromaFormatIdc(), m_cSPS.getQuadtreeTULog2MinSize()));
+
+  m_cSPS.setPCMLog2MinSize (m_uiPCMLog2MinSize);
+  m_cSPS.setUsePCM        ( m_usePCM           );
+  m_cSPS.setPCMLog2MaxSize( m_pcmLog2MaxSize  );
+
+  m_cSPS.setQuadtreeTULog2MaxSize( m_uiQuadtreeTULog2MaxSize );
+  m_cSPS.setQuadtreeTULog2MinSize( m_uiQuadtreeTULog2MinSize );
+  m_cSPS.setQuadtreeTUMaxDepthInter( m_uiQuadtreeTUMaxDepthInter    );
+  m_cSPS.setQuadtreeTUMaxDepthIntra( m_uiQuadtreeTUMaxDepthIntra    );
+
+  m_cSPS.setTMVPFlagsPresent(false);
+
+  m_cSPS.setMaxTrSize   ( 1 << m_uiQuadtreeTULog2MaxSize );
+
+  Int i;
+
+  for (i = 0; i < g_uiMaxCUDepth-g_uiAddCUDepth; i++ )
+  {
+    m_cSPS.setAMPAcc( i, m_useAMP );
+    //m_cSPS.setAMPAcc( i, 1 );
+  }
+
+  m_cSPS.setUseAMP ( m_useAMP );
+
+  for (i = g_uiMaxCUDepth-g_uiAddCUDepth; i < g_uiMaxCUDepth; i++ )
+  {
+    m_cSPS.setAMPAcc(i, 0);
+  }
+
+
+  for (UInt channelType = 0; channelType < MAX_NUM_CHANNEL_TYPE; channelType++)
+  {
+    m_cSPS.setBitDepth    (ChannelType(channelType), g_bitDepth[channelType]            );
+    m_cSPS.setQpBDOffset  (ChannelType(channelType), (6 * (g_bitDepth[channelType] - 8)));
+    m_cSPS.setPCMBitDepth (ChannelType(channelType), g_PCMBitDepth[channelType]         );
+  }
+
+  m_cSPS.setUseExtendedPrecision(m_useExtendedPrecision);
+  m_cSPS.setUseHighPrecisionPredictionWeighting(m_useHighPrecisionPredictionWeighting);
+
+  m_cSPS.setUseSAO( m_bUseSAO );
+  m_cSPS.setUseResidualRotation(m_useResidualRotation);
+  m_cSPS.setUseSingleSignificanceMapContext(m_useSingleSignificanceMapContext);
+  m_cSPS.setUseGolombRiceParameterAdaptation(m_useGolombRiceParameterAdaptation);
+  m_cSPS.setAlignCABACBeforeBypass(m_alignCABACBeforeBypass);
+
+  for (UInt signallingModeIndex = 0; signallingModeIndex < NUMBER_OF_RDPCM_SIGNALLING_MODES; signallingModeIndex++)
+  {
+    m_cSPS.setUseResidualDPCM(RDPCMSignallingMode(signallingModeIndex), m_useResidualDPCM[signallingModeIndex]);
+  }
+
+  m_cSPS.setMaxTLayers( m_maxTempLayer );
+  m_cSPS.setTemporalIdNestingFlag( ( m_maxTempLayer == 1 ) ? true : false );
+
+  for ( i = 0; i < min(m_cSPS.getMaxTLayers(),(UInt) MAX_TLAYER); i++ )
+  {
+    m_cSPS.setMaxDecPicBuffering(m_maxDecPicBuffering[i], i);
+    m_cSPS.setNumReorderPics(m_numReorderPics[i], i);
+  }
+
+  m_cSPS.setPCMFilterDisableFlag  ( m_bPCMFilterDisableFlag );
+  m_cSPS.setDisableIntraReferenceSmoothing( m_disableIntraReferenceSmoothing );
+  m_cSPS.setScalingListFlag ( (m_useScalingListId == 0) ? 0 : 1 );
+  m_cSPS.setUseStrongIntraSmoothing( m_useStrongIntraSmoothing );
+  m_cSPS.setVuiParametersPresentFlag(getVuiParametersPresentFlag());
+
+  if (m_cSPS.getVuiParametersPresentFlag())
+  {
+    TComVUI* pcVUI = m_cSPS.getVuiParameters();
+    pcVUI->setAspectRatioInfoPresentFlag(getAspectRatioInfoPresentFlag());
+    pcVUI->setAspectRatioIdc(getAspectRatioIdc());
+    pcVUI->setSarWidth(getSarWidth());
+    pcVUI->setSarHeight(getSarHeight());
+    pcVUI->setOverscanInfoPresentFlag(getOverscanInfoPresentFlag());
+    pcVUI->setOverscanAppropriateFlag(getOverscanAppropriateFlag());
+    pcVUI->setVideoSignalTypePresentFlag(getVideoSignalTypePresentFlag());
+    pcVUI->setVideoFormat(getVideoFormat());
+    pcVUI->setVideoFullRangeFlag(getVideoFullRangeFlag());
+    pcVUI->setColourDescriptionPresentFlag(getColourDescriptionPresentFlag());
+    pcVUI->setColourPrimaries(getColourPrimaries());
+    pcVUI->setTransferCharacteristics(getTransferCharacteristics());
+    pcVUI->setMatrixCoefficients(getMatrixCoefficients());
+    pcVUI->setChromaLocInfoPresentFlag(getChromaLocInfoPresentFlag());
+    pcVUI->setChromaSampleLocTypeTopField(getChromaSampleLocTypeTopField());
+    pcVUI->setChromaSampleLocTypeBottomField(getChromaSampleLocTypeBottomField());
+    pcVUI->setNeutralChromaIndicationFlag(getNeutralChromaIndicationFlag());
+    pcVUI->setDefaultDisplayWindow(getDefaultDisplayWindow());
+    pcVUI->setFrameFieldInfoPresentFlag(getFrameFieldInfoPresentFlag());
+    pcVUI->setFieldSeqFlag(false);
+    pcVUI->setHrdParametersPresentFlag(false);
+    pcVUI->getTimingInfo()->setPocProportionalToTimingFlag(getPocProportionalToTimingFlag());
+    pcVUI->getTimingInfo()->setNumTicksPocDiffOneMinus1   (getNumTicksPocDiffOneMinus1()   );
+    pcVUI->setBitstreamRestrictionFlag(getBitstreamRestrictionFlag());
+    pcVUI->setTilesFixedStructureFlag(getTilesFixedStructureFlag());
+    pcVUI->setMotionVectorsOverPicBoundariesFlag(getMotionVectorsOverPicBoundariesFlag());
+    pcVUI->setMinSpatialSegmentationIdc(getMinSpatialSegmentationIdc());
+    pcVUI->setMaxBytesPerPicDenom(getMaxBytesPerPicDenom());
+    pcVUI->setMaxBitsPerMinCuDenom(getMaxBitsPerMinCuDenom());
+    pcVUI->setLog2MaxMvLengthHorizontal(getLog2MaxMvLengthHorizontal());
+    pcVUI->setLog2MaxMvLengthVertical(getLog2MaxMvLengthVertical());
+  }
+}
+
+Void TEncTop::xInitPPS()
+{
+  m_cPPS.setConstrainedIntraPred( m_bUseConstrainedIntraPred );
+  Bool bUseDQP = (getMaxCuDQPDepth() > 0)? true : false;
+
+  if((getMaxDeltaQP() != 0 )|| getUseAdaptiveQP())
+  {
+    bUseDQP = true;
+  }
+
+  if (m_costMode==COST_SEQUENCE_LEVEL_LOSSLESS || m_costMode==COST_LOSSLESS_CODING) bUseDQP=false;
+
+  if(bUseDQP)
+  {
+    m_cPPS.setUseDQP(true);
+    m_cPPS.setMaxCuDQPDepth( m_iMaxCuDQPDepth );
+    m_cPPS.setMinCuDQPSize( m_cPPS.getSPS()->getMaxCUWidth() >> ( m_cPPS.getMaxCuDQPDepth()) );
+  }
+  else
+  {
+    m_cPPS.setUseDQP(false);
+    m_cPPS.setMaxCuDQPDepth( 0 );
+    m_cPPS.setMinCuDQPSize( m_cPPS.getSPS()->getMaxCUWidth() >> ( m_cPPS.getMaxCuDQPDepth()) );
+  }
+
+  if ( m_maxCUChromaQpAdjustmentDepth >= 0 )
+  {
+    m_cPPS.setMaxCuChromaQpAdjDepth(m_maxCUChromaQpAdjustmentDepth);
+    m_cPPS.setChromaQpAdjTableAt(1, 6, 6);
+    /* todo, insert table entries from command line (NB, 0 should not be touched) */
+  }
+  else
+  {
+    m_cPPS.setMaxCuChromaQpAdjDepth(0);
+    m_cPPS.clearChromaQpAdjTable();
+  }
+
+  if ( m_RCEnableRateControl )
+  {
+    m_cPPS.setUseDQP(true);
+    m_cPPS.setMaxCuDQPDepth( 0 );
+    m_cPPS.setMinCuDQPSize( m_cPPS.getSPS()->getMaxCUWidth() >> ( m_cPPS.getMaxCuDQPDepth()) );
+  }
+
+  m_cPPS.setMinCuChromaQpAdjSize( m_cPPS.getSPS()->getMaxCUWidth() >> ( m_cPPS.getMaxCuChromaQpAdjDepth()) );
+
+  m_cPPS.setQpOffset(COMPONENT_Cb, m_chromaCbQpOffset );
+  m_cPPS.setQpOffset(COMPONENT_Cr, m_chromaCrQpOffset );
+
+  m_cPPS.setNumSubstreams(m_iWaveFrontSubstreams);
+  m_cPPS.setEntropyCodingSyncEnabledFlag( m_iWaveFrontSynchro > 0 );
+  m_cPPS.setTilesEnabledFlag( (m_iNumColumnsMinus1 > 0 || m_iNumRowsMinus1 > 0) );
+  m_cPPS.setUseWP( m_useWeightedPred );
+  m_cPPS.setWPBiPred( m_useWeightedBiPred );
+  m_cPPS.setUseCrossComponentPrediction(m_useCrossComponentPrediction);
+  m_cPPS.setSaoOffsetBitShift(CHANNEL_TYPE_LUMA,   m_saoOffsetBitShift[CHANNEL_TYPE_LUMA  ]);
+  m_cPPS.setSaoOffsetBitShift(CHANNEL_TYPE_CHROMA, m_saoOffsetBitShift[CHANNEL_TYPE_CHROMA]);
+  m_cPPS.setOutputFlagPresentFlag( false );
+  m_cPPS.setSignHideFlag(getSignHideFlag());
+  if ( getDeblockingFilterMetric() )
+  {
+    m_cPPS.setDeblockingFilterControlPresentFlag (true);
+    m_cPPS.setDeblockingFilterOverrideEnabledFlag(true);
+    m_cPPS.setPicDisableDeblockingFilterFlag(false);
+    m_cPPS.setDeblockingFilterBetaOffsetDiv2(0);
+    m_cPPS.setDeblockingFilterTcOffsetDiv2(0);
+  }
+  else
+  {
+    m_cPPS.setDeblockingFilterControlPresentFlag (m_DeblockingFilterControlPresent );
+  }
+  m_cPPS.setLog2ParallelMergeLevelMinus2   (m_log2ParallelMergeLevelMinus2 );
+  m_cPPS.setCabacInitPresentFlag(CABAC_INIT_PRESENT_FLAG);
+  m_cPPS.setLoopFilterAcrossSlicesEnabledFlag( m_bLFCrossSliceBoundaryFlag );
+
+  Int histogram[MAX_NUM_REF + 1];
+  for( Int i = 0; i <= MAX_NUM_REF; i++ )
+  {
+    histogram[i]=0;
+  }
+  for( Int i = 0; i < getGOPSize(); i++)
+  {
+    assert(getGOPEntry(i).m_numRefPicsActive >= 0 && getGOPEntry(i).m_numRefPicsActive <= MAX_NUM_REF);
+    histogram[getGOPEntry(i).m_numRefPicsActive]++;
+  }
+
+  Int maxHist=-1;
+  Int bestPos=0;
+  for( Int i = 0; i <= MAX_NUM_REF; i++ )
+  {
+    if(histogram[i]>maxHist)
+    {
+      maxHist=histogram[i];
+      bestPos=i;
+    }
+  }
+  assert(bestPos <= 15);
+  m_cPPS.setNumRefIdxL0DefaultActive(bestPos);
+  m_cPPS.setNumRefIdxL1DefaultActive(bestPos);
+  m_cPPS.setTransquantBypassEnableFlag(getTransquantBypassEnableFlag());
+  m_cPPS.setUseTransformSkip( m_useTransformSkip );
+  m_cPPS.setTransformSkipLog2MaxSize( m_transformSkipLog2MaxSize  );
+
+  if (m_sliceSegmentMode != NO_SLICES)
+  {
+    m_cPPS.setDependentSliceSegmentsEnabledFlag( true );
+  }
+}
+
+//Function for initializing m_RPSList, a list of TComReferencePictureSet, based on the GOPEntry objects read from the config file.
+Void TEncTop::xInitRPS(Bool isFieldCoding)
+{
+  TComReferencePictureSet*      rps;
+
+  m_cSPS.createRPSList(getGOPSize() + m_extraRPSs + 1);
+  TComRPSList* rpsList = m_cSPS.getRPSList();
+
+  for( Int i = 0; i < getGOPSize()+m_extraRPSs; i++)
+  {
+    GOPEntry ge = getGOPEntry(i);
+    rps = rpsList->getReferencePictureSet(i);
+    rps->setNumberOfPictures(ge.m_numRefPics);
+    rps->setNumRefIdc(ge.m_numRefIdc);
+    Int numNeg = 0;
+    Int numPos = 0;
+    for( Int j = 0; j < ge.m_numRefPics; j++)
+    {
+      rps->setDeltaPOC(j,ge.m_referencePics[j]);
+      rps->setUsed(j,ge.m_usedByCurrPic[j]);
+      if(ge.m_referencePics[j]>0)
+      {
+        numPos++;
+      }
+      else
+      {
+        numNeg++;
+      }
+    }
+    rps->setNumberOfNegativePictures(numNeg);
+    rps->setNumberOfPositivePictures(numPos);
+
+    // handle inter RPS intialization from the config file.
+#if AUTO_INTER_RPS
+    rps->setInterRPSPrediction(ge.m_interRPSPrediction > 0);  // not very clean, converting anything > 0 to true.
+    rps->setDeltaRIdxMinus1(0);                               // index to the Reference RPS is always the previous one.
+    TComReferencePictureSet*     RPSRef = rpsList->getReferencePictureSet(i-1);  // get the reference RPS
+
+    if (ge.m_interRPSPrediction == 2)  // Automatic generation of the inter RPS idc based on the RIdx provided.
+    {
+      Int deltaRPS = getGOPEntry(i-1).m_POC - ge.m_POC;  // the ref POC - current POC
+      Int numRefDeltaPOC = RPSRef->getNumberOfPictures();
+
+      rps->setDeltaRPS(deltaRPS);           // set delta RPS
+      rps->setNumRefIdc(numRefDeltaPOC+1);  // set the numRefIdc to the number of pictures in the reference RPS + 1.
+      Int count=0;
+      for (Int j = 0; j <= numRefDeltaPOC; j++ ) // cycle through pics in reference RPS.
+      {
+        Int RefDeltaPOC = (j<numRefDeltaPOC)? RPSRef->getDeltaPOC(j): 0;  // if it is the last decoded picture, set RefDeltaPOC = 0
+        rps->setRefIdc(j, 0);
+        for (Int k = 0; k < rps->getNumberOfPictures(); k++ )  // cycle through pics in current RPS.
+        {
+          if (rps->getDeltaPOC(k) == ( RefDeltaPOC + deltaRPS))  // if the current RPS has a same picture as the reference RPS.
+          {
+              rps->setRefIdc(j, (rps->getUsed(k)?1:2));
+              count++;
+              break;
+          }
+        }
+      }
+      if (count != rps->getNumberOfPictures())
+      {
+        printf("Warning: Unable fully predict all delta POCs using the reference RPS index given in the config file.  Setting Inter RPS to false for this RPS.\n");
+        rps->setInterRPSPrediction(0);
+      }
+    }
+    else if (ge.m_interRPSPrediction == 1)  // inter RPS idc based on the RefIdc values provided in config file.
+    {
+      rps->setDeltaRPS(ge.m_deltaRPS);
+      rps->setNumRefIdc(ge.m_numRefIdc);
+      for (Int j = 0; j < ge.m_numRefIdc; j++ )
+      {
+        rps->setRefIdc(j, ge.m_refIdc[j]);
+      }
+#if WRITE_BACK
+      // the folowing code overwrite the deltaPOC and Used by current values read from the config file with the ones
+      // computed from the RefIdc.  A warning is printed if they are not identical.
+      numNeg = 0;
+      numPos = 0;
+      TComReferencePictureSet      RPSTemp;  // temporary variable
+
+      for (Int j = 0; j < ge.m_numRefIdc; j++ )
+      {
+        if (ge.m_refIdc[j])
+        {
+          Int deltaPOC = ge.m_deltaRPS + ((j < RPSRef->getNumberOfPictures())? RPSRef->getDeltaPOC(j) : 0);
+          RPSTemp.setDeltaPOC((numNeg+numPos),deltaPOC);
+          RPSTemp.setUsed((numNeg+numPos),ge.m_refIdc[j]==1?1:0);
+          if (deltaPOC<0)
+          {
+            numNeg++;
+          }
+          else
+          {
+            numPos++;
+          }
+        }
+      }
+      if (numNeg != rps->getNumberOfNegativePictures())
+      {
+        printf("Warning: number of negative pictures in RPS is different between intra and inter RPS specified in the config file.\n");
+        rps->setNumberOfNegativePictures(numNeg);
+        rps->setNumberOfPictures(numNeg+numPos);
+      }
+      if (numPos != rps->getNumberOfPositivePictures())
+      {
+        printf("Warning: number of positive pictures in RPS is different between intra and inter RPS specified in the config file.\n");
+        rps->setNumberOfPositivePictures(numPos);
+        rps->setNumberOfPictures(numNeg+numPos);
+      }
+      RPSTemp.setNumberOfPictures(numNeg+numPos);
+      RPSTemp.setNumberOfNegativePictures(numNeg);
+      RPSTemp.sortDeltaPOC();     // sort the created delta POC before comparing
+      // check if Delta POC and Used are the same
+      // print warning if they are not.
+      for (Int j = 0; j < ge.m_numRefIdc; j++ )
+      {
+        if (RPSTemp.getDeltaPOC(j) != rps->getDeltaPOC(j))
+        {
+          printf("Warning: delta POC is different between intra RPS and inter RPS specified in the config file.\n");
+          rps->setDeltaPOC(j,RPSTemp.getDeltaPOC(j));
+        }
+        if (RPSTemp.getUsed(j) != rps->getUsed(j))
+        {
+          printf("Warning: Used by Current in RPS is different between intra and inter RPS specified in the config file.\n");
+          rps->setUsed(j,RPSTemp.getUsed(j));
+        }
+      }
+#endif
+    }
+#else
+    rps->setInterRPSPrediction(ge.m_interRPSPrediction);
+    if (ge.m_interRPSPrediction)
+    {
+      rps->setDeltaRIdxMinus1(0);
+      rps->setDeltaRPS(ge.m_deltaRPS);
+      rps->setNumRefIdc(ge.m_numRefIdc);
+      for (Int j = 0; j < ge.m_numRefIdc; j++ )
+      {
+        rps->setRefIdc(j, ge.m_refIdc[j]);
+      }
+#if WRITE_BACK
+      // the folowing code overwrite the deltaPOC and Used by current values read from the config file with the ones
+      // computed from the RefIdc.  This is not necessary if both are identical. Currently there is no check to see if they are identical.
+      numNeg = 0;
+      numPos = 0;
+      TComReferencePictureSet*     RPSRef = m_RPSList.getReferencePictureSet(i-1);
+
+      for (Int j = 0; j < ge.m_numRefIdc; j++ )
+      {
+        if (ge.m_refIdc[j])
+        {
+          Int deltaPOC = ge.m_deltaRPS + ((j < RPSRef->getNumberOfPictures())? RPSRef->getDeltaPOC(j) : 0);
+          rps->setDeltaPOC((numNeg+numPos),deltaPOC);
+          rps->setUsed((numNeg+numPos),ge.m_refIdc[j]==1?1:0);
+          if (deltaPOC<0)
+          {
+            numNeg++;
+          }
+          else
+          {
+            numPos++;
+          }
+        }
+      }
+      rps->setNumberOfNegativePictures(numNeg);
+      rps->setNumberOfPositivePictures(numPos);
+      rps->sortDeltaPOC();
+#endif
+    }
+#endif //INTER_RPS_AUTO
+  }
+  //In case of field coding, we need to set special parameters for the first bottom field of the sequence, since it is not specified in the cfg file.
+  //The position = GOPSize + extraRPSs which is (a priori) unused is reserved for this field in the RPS.
+  if (isFieldCoding)
+  {
+    rps = rpsList->getReferencePictureSet(getGOPSize()+m_extraRPSs);
+    rps->setNumberOfPictures(1);
+    rps->setNumberOfNegativePictures(1);
+    rps->setNumberOfPositivePictures(0);
+    rps->setNumberOfLongtermPictures(0);
+    rps->setDeltaPOC(0,-1);
+    rps->setPOC(0,0);
+    rps->setUsed(0,true);
+    rps->setInterRPSPrediction(false);
+    rps->setDeltaRIdxMinus1(0);
+    rps->setDeltaRPS(0);
+    rps->setNumRefIdc(0);
+  }
+}
+
+   // This is a function that
+   // determines what Reference Picture Set to use
+   // for a specific slice (with POC = POCCurr)
+Void TEncTop::selectReferencePictureSet(TComSlice* slice, Int POCCurr, Int GOPid )
+{
+  slice->setRPSidx(GOPid);
+
+  for(Int extraNum=m_iGOPSize; extraNum<m_extraRPSs+m_iGOPSize; extraNum++)
+  {
+    if(m_uiIntraPeriod > 0 && getDecodingRefreshType() > 0)
+    {
+      Int POCIndex = POCCurr%m_uiIntraPeriod;
+      if(POCIndex == 0)
+      {
+        POCIndex = m_uiIntraPeriod;
+      }
+      if(POCIndex == m_GOPList[extraNum].m_POC)
+      {
+        slice->setRPSidx(extraNum);
+      }
+    }
+    else
+    {
+      if(POCCurr==m_GOPList[extraNum].m_POC)
+      {
+        slice->setRPSidx(extraNum);
+      }
+    }
+  }
+
+  if(POCCurr == 1 && slice->getPic()->isField())
+  {
+    slice->setRPSidx(m_iGOPSize+m_extraRPSs);
+  }
+
+  slice->setRPS(getSPS()->getRPSList()->getReferencePictureSet(slice->getRPSidx()));
+  slice->getRPS()->setNumberOfPictures(slice->getRPS()->getNumberOfNegativePictures()+slice->getRPS()->getNumberOfPositivePictures());
+}
+
+Int TEncTop::getReferencePictureSetIdxForSOP(TComSlice* slice, Int POCCurr, Int GOPid )
+{
+  Int rpsIdx = GOPid;
+
+  for(Int extraNum=m_iGOPSize; extraNum<m_extraRPSs+m_iGOPSize; extraNum++)
+  {
+    if(m_uiIntraPeriod > 0 && getDecodingRefreshType() > 0)
+    {
+      Int POCIndex = POCCurr%m_uiIntraPeriod;
+      if(POCIndex == 0)
+      {
+        POCIndex = m_uiIntraPeriod;
+      }
+      if(POCIndex == m_GOPList[extraNum].m_POC)
+      {
+        rpsIdx = extraNum;
+      }
+    }
+    else
+    {
+      if(POCCurr==m_GOPList[extraNum].m_POC)
+      {
+        rpsIdx = extraNum;
+      }
+    }
+  }
+
+  return rpsIdx;
+}
+
+Void  TEncTop::xInitPPSforTiles()
+{
+  m_cPPS.setTileUniformSpacingFlag( m_tileUniformSpacingFlag );
+  m_cPPS.setNumTileColumnsMinus1( m_iNumColumnsMinus1 );
+  m_cPPS.setNumTileRowsMinus1( m_iNumRowsMinus1 );
+  if( !m_tileUniformSpacingFlag )
+  {
+    m_cPPS.setTileColumnWidth( m_tileColumnWidth );
+    m_cPPS.setTileRowHeight( m_tileRowHeight );
+  }
+  m_cPPS.setLoopFilterAcrossTilesEnabledFlag( m_loopFilterAcrossTilesEnabledFlag );
+
+  // # substreams is "per tile" when tiles are independent.
+  if (m_iWaveFrontSynchro )
+  {
+    m_cPPS.setNumSubstreams(m_iWaveFrontSubstreams * (m_iNumColumnsMinus1+1));
+  }
+  else
+  {
+    m_cPPS.setNumSubstreams((m_iNumRowsMinus1+1) * (m_iNumColumnsMinus1+1));
+  }
+}
+
+Void  TEncCfg::xCheckGSParameters()
+{
+  Int   iWidthInCU = ( m_iSourceWidth%g_uiMaxCUWidth ) ? m_iSourceWidth/g_uiMaxCUWidth + 1 : m_iSourceWidth/g_uiMaxCUWidth;
+  Int   iHeightInCU = ( m_iSourceHeight%g_uiMaxCUHeight ) ? m_iSourceHeight/g_uiMaxCUHeight + 1 : m_iSourceHeight/g_uiMaxCUHeight;
+  UInt  uiCummulativeColumnWidth = 0;
+  UInt  uiCummulativeRowHeight = 0;
+
+  //check the column relative parameters
+  if( m_iNumColumnsMinus1 >= (1<<(LOG2_MAX_NUM_COLUMNS_MINUS1+1)) )
+  {
+    printf( "The number of columns is larger than the maximum allowed number of columns.\n" );
+    exit( EXIT_FAILURE );
+  }
+
+  if( m_iNumColumnsMinus1 >= iWidthInCU )
+  {
+    printf( "The current picture can not have so many columns.\n" );
+    exit( EXIT_FAILURE );
+  }
+
+  if( m_iNumColumnsMinus1 && !m_tileUniformSpacingFlag )
+  {
+    for(Int i=0; i<m_iNumColumnsMinus1; i++)
+    {
+      uiCummulativeColumnWidth += m_tileColumnWidth[i];
+    }
+
+    if( uiCummulativeColumnWidth >= iWidthInCU )
+    {
+      printf( "The width of the column is too large.\n" );
+      exit( EXIT_FAILURE );
+    }
+  }
+
+  //check the row relative parameters
+  if( m_iNumRowsMinus1 >= (1<<(LOG2_MAX_NUM_ROWS_MINUS1+1)) )
+  {
+    printf( "The number of rows is larger than the maximum allowed number of rows.\n" );
+    exit( EXIT_FAILURE );
+  }
+
+  if( m_iNumRowsMinus1 >= iHeightInCU )
+  {
+    printf( "The current picture can not have so many rows.\n" );
+    exit( EXIT_FAILURE );
+  }
+
+  if( m_iNumRowsMinus1 && !m_tileUniformSpacingFlag )
+  {
+    for(Int i=0; i<m_iNumRowsMinus1; i++)
+      uiCummulativeRowHeight += m_tileRowHeight[i];
+
+    if( uiCummulativeRowHeight >= iHeightInCU )
+    {
+      printf( "The height of the row is too large.\n" );
+      exit( EXIT_FAILURE );
+    }
+  }
+}
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/TEncTop.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,183 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TEncTop.h
+    \brief    encoder class (header)
+*/
+
+#ifndef __TENCTOP__
+#define __TENCTOP__
+
+// Include files
+#include "TLibCommon/TComList.h"
+#include "TLibCommon/TComPrediction.h"
+#include "TLibCommon/TComTrQuant.h"
+#include "TLibCommon/TComLoopFilter.h"
+#include "TLibCommon/AccessUnit.h"
+
+#include "TLibVideoIO/TVideoIOYuv.h"
+
+#include "TEncCfg.h"
+#include "TEncGOP.h"
+#include "TEncSlice.h"
+#include "TEncEntropy.h"
+#include "TEncCavlc.h"
+#include "TEncSbac.h"
+#include "TEncSearch.h"
+#include "TEncSampleAdaptiveOffset.h"
+#include "TEncPreanalyzer.h"
+#include "TEncRateCtrl.h"
+//! \ingroup TLibEncoder
+//! \{
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// encoder class
+class TEncTop : public TEncCfg
+{
+private:
+  // picture
+  Int                     m_iPOCLast;                     ///< time index (POC)
+  Int                     m_iNumPicRcvd;                  ///< number of received pictures
+  UInt                    m_uiNumAllPicCoded;             ///< number of coded pictures
+  TComList<TComPic*>      m_cListPic;                     ///< dynamic list of pictures
+
+  // encoder search
+  TEncSearch              m_cSearch;                      ///< encoder search class
+  //TEncEntropy*            m_pcEntropyCoder;                     ///< entropy encoder
+  TEncCavlc*              m_pcCavlcCoder;                       ///< CAVLC encoder
+  // coding tool
+  TComTrQuant             m_cTrQuant;                     ///< transform & quantization class
+  TComLoopFilter          m_cLoopFilter;                  ///< deblocking filter class
+  TEncSampleAdaptiveOffset m_cEncSAO;                     ///< sample adaptive offset class
+  TEncEntropy             m_cEntropyCoder;                ///< entropy encoder
+  TEncCavlc               m_cCavlcCoder;                  ///< CAVLC encoder
+  TEncSbac                m_cSbacCoder;                   ///< SBAC encoder
+  TEncBinCABAC            m_cBinCoderCABAC;               ///< bin coder CABAC
+
+  // processing unit
+  TEncGOP                 m_cGOPEncoder;                  ///< GOP encoder
+  TEncSlice               m_cSliceEncoder;                ///< slice encoder
+  TEncCu                  m_cCuEncoder;                   ///< CU encoder
+  // SPS
+  TComSPS                 m_cSPS;                         ///< SPS
+  TComPPS                 m_cPPS;                         ///< PPS
+  // RD cost computation
+  TComRdCost              m_cRdCost;                      ///< RD cost computation class
+  TEncSbac***             m_pppcRDSbacCoder;              ///< temporal storage for RD computation
+  TEncSbac                m_cRDGoOnSbacCoder;             ///< going on SBAC model for RD stage
+#if FAST_BIT_EST
+  TEncBinCABACCounter***  m_pppcBinCoderCABAC;            ///< temporal CABAC state storage for RD computation
+  TEncBinCABACCounter     m_cRDGoOnBinCoderCABAC;         ///< going on bin coder CABAC for RD stage
+#else
+  TEncBinCABAC***         m_pppcBinCoderCABAC;            ///< temporal CABAC state storage for RD computation
+  TEncBinCABAC            m_cRDGoOnBinCoderCABAC;         ///< going on bin coder CABAC for RD stage
+#endif
+
+  // quality control
+  TEncPreanalyzer         m_cPreanalyzer;                 ///< image characteristics analyzer for TM5-step3-like adaptive QP
+
+  TComScalingList         m_scalingList;                 ///< quantization matrix information
+  TEncRateCtrl            m_cRateCtrl;                    ///< Rate control class
+
+protected:
+  Void  xGetNewPicBuffer  ( TComPic*& rpcPic );           ///< get picture buffer which will be processed
+  Void  xInitSPS          ();                             ///< initialize SPS from encoder options
+  Void  xInitPPS          ();                             ///< initialize PPS from encoder options
+
+  Void  xInitPPSforTiles  ();
+  Void  xInitRPS          (Bool isFieldCoding);           ///< initialize PPS from encoder options
+
+public:
+  TEncTop();
+  virtual ~TEncTop();
+
+  Void      create          ();
+  Void      destroy         ();
+  Void      init            (Bool isFieldCoding);
+  Void      deletePicBuffer ();
+
+  // -------------------------------------------------------------------------------------------------------------------
+  // member access functions
+  // -------------------------------------------------------------------------------------------------------------------
+
+  TComList<TComPic*>*     getListPic            () { return  &m_cListPic;             }
+  TEncSearch*             getPredSearch         () { return  &m_cSearch;              }
+
+  TComTrQuant*            getTrQuant            () { return  &m_cTrQuant;             }
+  TComLoopFilter*         getLoopFilter         () { return  &m_cLoopFilter;          }
+  TEncSampleAdaptiveOffset* getSAO              () { return  &m_cEncSAO;              }
+  TEncGOP*                getGOPEncoder         () { return  &m_cGOPEncoder;          }
+  TEncSlice*              getSliceEncoder       () { return  &m_cSliceEncoder;        }
+  TEncCu*                 getCuEncoder          () { return  &m_cCuEncoder;           }
+  TEncEntropy*            getEntropyCoder       () { return  &m_cEntropyCoder;        }
+  TEncCavlc*              getCavlcCoder         () { return  &m_cCavlcCoder;          }
+  TEncSbac*               getSbacCoder          () { return  &m_cSbacCoder;           }
+  TEncBinCABAC*           getBinCABAC           () { return  &m_cBinCoderCABAC;       }
+
+  TComRdCost*             getRdCost             () { return  &m_cRdCost;              }
+  TEncSbac***             getRDSbacCoder        () { return  m_pppcRDSbacCoder;       }
+  TEncSbac*               getRDGoOnSbacCoder    () { return  &m_cRDGoOnSbacCoder;     }
+  TEncRateCtrl*           getRateCtrl           () { return &m_cRateCtrl;             }
+  TComSPS*                getSPS                () { return  &m_cSPS;                 }
+  TComPPS*                getPPS                () { return  &m_cPPS;                 }
+  Void selectReferencePictureSet(TComSlice* slice, Int POCCurr, Int GOPid );
+  Int getReferencePictureSetIdxForSOP(TComSlice* slice, Int POCCurr, Int GOPid );
+  TComScalingList*        getScalingList        () { return  &m_scalingList;         }
+  // -------------------------------------------------------------------------------------------------------------------
+  // encoder function
+  // -------------------------------------------------------------------------------------------------------------------
+
+  /// encode several number of pictures until end-of-sequence
+  Void encode( Bool bEos,
+               TComPicYuv* pcPicYuvOrg,
+               TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, // used for SNR calculations. Picture in original colour space.
+               TComList<TComPicYuv*>& rcListPicYuvRecOut,
+               std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded );
+
+  /// encode several number of pictures until end-of-sequence
+  Void encode( Bool bEos, TComPicYuv* pcPicYuvOrg,
+               TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, // used for SNR calculations. Picture in original colour space.
+               TComList<TComPicYuv*>& rcListPicYuvRecOut,
+               std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded, Bool isTff);
+
+  Void printSummary(Bool isField) { m_cGOPEncoder.printOutSummary (m_uiNumAllPicCoded, isField, m_printMSEBasedSequencePSNR, m_printSequenceMSE); }
+
+};
+
+//! \}
+
+#endif // __TENCTOP__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/WeightPredAnalysis.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,386 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     WeightPredAnalysis.cpp
+    \brief    weighted prediction encoder class
+*/
+
+#include "../TLibCommon/TypeDef.h"
+#include "../TLibCommon/TComSlice.h"
+#include "../TLibCommon/TComPic.h"
+#include "../TLibCommon/TComPicYuv.h"
+#include "WeightPredAnalysis.h"
+
+#define ABS(a)    ((a) < 0 ? - (a) : (a))
+#define DTHRESH (0.99)
+
+WeightPredAnalysis::WeightPredAnalysis()
+{
+  m_weighted_pred_flag = false;
+  m_weighted_bipred_flag = false;
+
+  for ( UInt lst =0 ; lst<NUM_REF_PIC_LIST_01 ; lst++ )
+  {
+    for ( Int iRefIdx=0 ; iRefIdx<MAX_NUM_REF ; iRefIdx++ )
+    {
+      for ( Int comp=0 ; comp<MAX_NUM_COMPONENT ;comp++ )
+      {
+        WPScalingParam  *pwp   = &(m_wp[lst][iRefIdx][comp]);
+        pwp->bPresentFlag      = false;
+        pwp->uiLog2WeightDenom = 0;
+        pwp->iWeight           = 1;
+        pwp->iOffset           = 0;
+      }
+    }
+  }
+}
+
+
+/** calculate AC and DC values for current original image
+ * \param TComSlice *slice
+ * \returns Void
+ */
+Void WeightPredAnalysis::xCalcACDCParamSlice(TComSlice *const slice)
+{
+  //===== calculate AC/DC value =====
+  TComPicYuv*   pPic = slice->getPic()->getPicYuvOrg();
+
+  WPACDCParam weightACDCParam[MAX_NUM_COMPONENT];
+
+  for(Int componentIndex = 0; componentIndex < pPic->getNumberValidComponents(); componentIndex++)
+  {
+    const ComponentID compID = ComponentID(componentIndex);
+
+    // calculate DC/AC value for channel
+
+    const Int iStride = pPic->getStride(compID);
+    const Int iWidth  = pPic->getWidth(compID);
+    const Int iHeight = pPic->getHeight(compID);
+
+    const Int iSample = iWidth*iHeight;
+
+    Int64 iOrgDC = 0;
+    {
+      const Pel *pPel = pPic->getAddr(compID);
+
+      for(Int y = 0; y < iHeight; y++, pPel+=iStride )
+        for(Int x = 0; x < iWidth; x++ )
+          iOrgDC += (Int)( pPel[x] );
+    }
+
+    const Int64 iOrgNormDC = ((iOrgDC+(iSample>>1)) / iSample);
+
+    Int64 iOrgAC = 0;
+    {
+      const Pel *pPel = pPic->getAddr(compID);
+
+      for(Int y = 0; y < iHeight; y++, pPel += iStride )
+        for(Int x = 0; x < iWidth; x++ )
+          iOrgAC += abs( (Int)pPel[x] - (Int)iOrgNormDC );
+    }
+
+    const Int fixedBitShift = (slice->getSPS()->getUseHighPrecisionPredictionWeighting())?RExt__PREDICTION_WEIGHTING_ANALYSIS_DC_PRECISION:0;
+    weightACDCParam[compID].iDC = (((iOrgDC<<fixedBitShift)+(iSample>>1)) / iSample);
+    weightACDCParam[compID].iAC = iOrgAC;
+  }
+
+  slice->setWpAcDcParam(weightACDCParam);
+}
+
+
+/** store weighted_pred_flag and weighted_bipred_idc values
+ * \param weighted_pred_flag
+ * \param weighted_bipred_idc
+ * \returns Void
+ */
+Void  WeightPredAnalysis::xStoreWPparam(const Bool weighted_pred_flag, const Bool weighted_bipred_flag)
+{
+  m_weighted_pred_flag   = weighted_pred_flag;
+  m_weighted_bipred_flag = weighted_bipred_flag;
+}
+
+
+/** restore weighted_pred_flag and weighted_bipred_idc values
+ * \param TComSlice *slice
+ * \returns Void
+ */
+Void  WeightPredAnalysis::xRestoreWPparam(TComSlice *const slice)
+{
+  slice->getPPS()->setUseWP   (m_weighted_pred_flag);
+  slice->getPPS()->setWPBiPred(m_weighted_bipred_flag);
+}
+
+
+/** check weighted pred or non-weighted pred
+ * \param TComSlice *slice
+ * \returns Void
+ */
+Void  WeightPredAnalysis::xCheckWPEnable(TComSlice *const slice)
+{
+  const TComPicYuv *pPic = slice->getPic()->getPicYuvOrg();
+
+  Int iPresentCnt = 0;
+  for ( UInt lst=0 ; lst<NUM_REF_PIC_LIST_01 ; lst++ )
+  {
+    for ( Int iRefIdx=0 ; iRefIdx<MAX_NUM_REF ; iRefIdx++ )
+    {
+      for(Int componentIndex = 0; componentIndex < pPic->getNumberValidComponents(); componentIndex++)
+      {
+        WPScalingParam  *pwp = &(m_wp[lst][iRefIdx][componentIndex]);
+        iPresentCnt += (Int)pwp->bPresentFlag;
+      }
+    }
+  }
+
+  if(iPresentCnt==0)
+  {
+    slice->getPPS()->setUseWP(false);
+    slice->getPPS()->setWPBiPred(false);
+
+    for ( UInt lst=0 ; lst<NUM_REF_PIC_LIST_01 ; lst++ )
+    {
+      for ( Int iRefIdx=0 ; iRefIdx<MAX_NUM_REF ; iRefIdx++ )
+      {
+        for(Int componentIndex = 0; componentIndex < pPic->getNumberValidComponents(); componentIndex++)
+        {
+          WPScalingParam  *pwp = &(m_wp[lst][iRefIdx][componentIndex]);
+
+          pwp->bPresentFlag      = false;
+          pwp->uiLog2WeightDenom = 0;
+          pwp->iWeight           = 1;
+          pwp->iOffset           = 0;
+        }
+      }
+    }
+    slice->setWpScaling( m_wp );
+  }
+}
+
+
+/** estimate wp tables for explicit wp
+ * \param TComSlice *slice
+ */
+Void WeightPredAnalysis::xEstimateWPParamSlice(TComSlice *const slice)
+{
+  Int  iDenom         = 6;
+  Bool validRangeFlag = false;
+
+  if(slice->getNumRefIdx(REF_PIC_LIST_0)>3)
+  {
+    iDenom = 7;
+  }
+
+  do
+  {
+    validRangeFlag = xUpdatingWPParameters(slice, iDenom);
+    if (!validRangeFlag)
+    {
+      iDenom--; // decrement to satisfy the range limitation
+    }
+  } while (validRangeFlag == false);
+
+  // selecting whether WP is used, or not
+  xSelectWP(slice, iDenom);
+
+  slice->setWpScaling( m_wp );
+}
+
+
+/** update wp tables for explicit wp w.r.t ramge limitation
+ * \param TComSlice *slice
+ * \returns Bool
+ */
+Bool WeightPredAnalysis::xUpdatingWPParameters(TComSlice *const slice, const Int log2Denom)
+{
+  const Int  numComp                    = slice->getPic()->getPicYuvOrg()->getNumberValidComponents();
+  const Bool bUseHighPrecisionWeighting = slice->getSPS()->getUseHighPrecisionPredictionWeighting();
+  const Int numPredDir                  = slice->isInterP() ? 1 : 2;
+
+  assert (numPredDir <= Int(NUM_REF_PIC_LIST_01));
+
+  for ( Int refList = 0; refList < numPredDir; refList++ )
+  {
+    const RefPicList eRefPicList = ( refList ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+
+    for ( Int refIdxTemp = 0; refIdxTemp < slice->getNumRefIdx(eRefPicList); refIdxTemp++ )
+    {
+      WPACDCParam *currWeightACDCParam, *refWeightACDCParam;
+      slice->getWpAcDcParam(currWeightACDCParam);
+      slice->getRefPic(eRefPicList, refIdxTemp)->getSlice(0)->getWpAcDcParam(refWeightACDCParam);
+
+      for ( Int comp = 0; comp < numComp; comp++ )
+      {
+        const ComponentID compID        = ComponentID(comp);
+        const Int         range         = bUseHighPrecisionWeighting ? (1<<g_bitDepth[toChannelType(compID)])/2 : 128;
+        const Int         realLog2Denom = log2Denom + (bUseHighPrecisionWeighting ? RExt__PREDICTION_WEIGHTING_ANALYSIS_DC_PRECISION : (g_bitDepth[toChannelType(compID)] - 8));
+        const Int         realOffset    = ((Int)1<<(realLog2Denom-1));
+
+        // current frame
+        const Int64 currDC = currWeightACDCParam[comp].iDC;
+        const Int64 currAC = currWeightACDCParam[comp].iAC;
+        // reference frame
+        const Int64 refDC  = refWeightACDCParam[comp].iDC;
+        const Int64 refAC  = refWeightACDCParam[comp].iAC;
+
+        // calculating iWeight and iOffset params
+        const Double dWeight = (refAC==0) ? (Double)1.0 : Clip3( -16.0, 15.0, ((Double)currAC / (Double)refAC) );
+        const Int weight     = (Int)( 0.5 + dWeight * (Double)(1<<log2Denom) );
+        const Int offset     = (Int)( ((currDC<<log2Denom) - ((Int64)weight * refDC) + (Int64)realOffset) >> realLog2Denom );
+
+        Int clippedOffset;
+        if(isChroma(compID)) // Chroma offset range limination
+        {
+          const Int pred        = ( range - ( ( range*weight)>>(log2Denom) ) );
+          const Int deltaOffset = Clip3( -4*range, 4*range-1, (offset - pred) ); // signed 10bit
+
+          clippedOffset = Clip3( -range, range-1, (deltaOffset + pred) );  // signed 8bit
+        }
+        else // Luma offset range limitation
+        {
+          clippedOffset = Clip3( -range, range-1, offset);
+        }
+
+        // Weighting factor limitation
+        const Int defaultWeight = (1<<log2Denom);
+        const Int deltaWeight   = (defaultWeight - weight);
+
+        if(deltaWeight >= range || deltaWeight < -range)
+          return false;
+
+        m_wp[refList][refIdxTemp][comp].bPresentFlag      = true;
+        m_wp[refList][refIdxTemp][comp].iWeight           = weight;
+        m_wp[refList][refIdxTemp][comp].iOffset           = clippedOffset;
+        m_wp[refList][refIdxTemp][comp].uiLog2WeightDenom = log2Denom;
+      }
+    }
+  }
+  return true;
+}
+
+
+/** select whether weighted pred enables or not.
+ * \param TComSlice *slice
+ * \param log2Denom
+ * \returns Bool
+ */
+Bool WeightPredAnalysis::xSelectWP(TComSlice *const slice, const Int log2Denom)
+{
+        TComPicYuv *const pPic                                = slice->getPic()->getPicYuvOrg();
+  const Int               iDefaultWeight                      = ((Int)1<<log2Denom);
+  const Int               iNumPredDir                         = slice->isInterP() ? 1 : 2;
+  const Bool              useHighPrecisionPredictionWeighting = slice->getSPS()->getUseHighPrecisionPredictionWeighting();
+
+  assert (iNumPredDir <= Int(NUM_REF_PIC_LIST_01));
+
+  for ( Int iRefList = 0; iRefList < iNumPredDir; iRefList++ )
+  {
+    const RefPicList eRefPicList = ( iRefList ? REF_PIC_LIST_1 : REF_PIC_LIST_0 );
+
+    for ( Int iRefIdxTemp = 0; iRefIdxTemp < slice->getNumRefIdx(eRefPicList); iRefIdxTemp++ )
+    {
+      Int64 iSADWP = 0, iSADnoWP = 0;
+
+      for(Int comp=0; comp<pPic->getNumberValidComponents(); comp++)
+      {
+        const ComponentID  compID     = ComponentID(comp);
+              Pel         *pOrg       = pPic->getAddr(compID);
+              Pel         *pRef       = slice->getRefPic(eRefPicList, iRefIdxTemp)->getPicYuvRec()->getAddr(compID);
+        const Int          iOrgStride = pPic->getStride(compID);
+        const Int          iRefStride = slice->getRefPic(eRefPicList, iRefIdxTemp)->getPicYuvRec()->getStride(compID);
+        const Int          iWidth     = pPic->getWidth(compID);
+        const Int          iHeight    = pPic->getHeight(compID);
+        const Int          bitDepth   = g_bitDepth[toChannelType(compID)];
+
+        // calculate SAD costs with/without wp for luma
+        iSADWP   += xCalcSADvalueWP(bitDepth, pOrg, pRef, iWidth, iHeight, iOrgStride, iRefStride, log2Denom, m_wp[iRefList][iRefIdxTemp][compID].iWeight, m_wp[iRefList][iRefIdxTemp][compID].iOffset, useHighPrecisionPredictionWeighting);
+        iSADnoWP += xCalcSADvalueWP(bitDepth, pOrg, pRef, iWidth, iHeight, iOrgStride, iRefStride, log2Denom, iDefaultWeight, 0, useHighPrecisionPredictionWeighting);
+      }
+
+      const Double dRatio = ((Double)iSADWP / (Double)iSADnoWP);
+      if(dRatio >= (Double)DTHRESH)
+      {
+        for(Int comp=0; comp<pPic->getNumberValidComponents(); comp++)
+        {
+          m_wp[iRefList][iRefIdxTemp][comp].bPresentFlag      = false;
+          m_wp[iRefList][iRefIdxTemp][comp].iOffset           = 0;
+          m_wp[iRefList][iRefIdxTemp][comp].iWeight           = iDefaultWeight;
+          m_wp[iRefList][iRefIdxTemp][comp].uiLog2WeightDenom = log2Denom;
+        }
+      }
+    }
+  }
+
+  return true;
+}
+
+
+/** calculate SAD values for both WP version and non-WP version.
+ * \param Pel *pOrgPel
+ * \param Pel *pRefPel
+ * \param Int iWidth
+ * \param Int iHeight
+ * \param Int iOrgStride
+ * \param Int iRefStride
+ * \param Int iLog2Denom
+ * \param Int iWeight
+ * \param Int iOffset
+ * \returns Int64
+ */
+Int64 WeightPredAnalysis::xCalcSADvalueWP(const Int   bitDepth,
+                                          const Pel  *pOrgPel,
+                                          const Pel  *pRefPel,
+                                          const Int   iWidth,
+                                          const Int   iHeight,
+                                          const Int   iOrgStride,
+                                          const Int   iRefStride,
+                                          const Int   iLog2Denom,
+                                          const Int   iWeight,
+                                          const Int   iOffset,
+                                          const Bool  useHighPrecisionPredictionWeighting)
+{
+  const Int64 iSize          = iWidth*iHeight;
+  const Int64 iRealLog2Denom = useHighPrecisionPredictionWeighting ? iLog2Denom : (iLog2Denom + (bitDepth - 8));
+
+  Int64 iSAD = 0;
+  for( Int y = 0; y < iHeight; y++ )
+  {
+    for( Int x = 0; x < iWidth; x++ )
+    {
+      iSAD += ABS(( ((Int64)pOrgPel[x]<<(Int64)iLog2Denom) - ( (Int64)pRefPel[x] * (Int64)iWeight + ((Int64)iOffset<<iRealLog2Denom) ) ) );
+    }
+    pOrgPel += iOrgStride;
+    pRefPel += iRefStride;
+  }
+
+  return (iSAD/iSize);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibEncoder/WeightPredAnalysis.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,82 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     WeightPredAnalysis.h
+    \brief    weighted prediction encoder class
+*/
+#ifndef __WEIGHTPREDANALYSIS__
+#define __WEIGHTPREDANALYSIS__
+
+#include "../TLibCommon/TypeDef.h"
+#include "../TLibCommon/TComSlice.h"
+#include "TEncCavlc.h"
+
+class  WeightPredAnalysis
+{
+private:
+
+  // member variables
+  Bool            m_weighted_pred_flag;
+  Bool            m_weighted_bipred_flag;
+  WPScalingParam  m_wp[NUM_REF_PIC_LIST_01][MAX_NUM_REF][MAX_NUM_COMPONENT];
+
+  // member functions
+
+  Bool  xSelectWP            (TComSlice *const slice, const Int log2Denom);
+  Bool  xUpdatingWPParameters(TComSlice *const slice, const Int log2Denom);
+
+  Int64 xCalcSADvalueWP      (const Int   bitDepth,
+                              const Pel  *pOrgPel,
+                              const Pel  *pRefPel,
+                              const Int   iWidth,
+                              const Int   iHeight,
+                              const Int   iOrgStride,
+                              const Int   iRefStride,
+                              const Int   iLog2Denom,
+                              const Int   iWeight,
+                              const Int   iOffset,
+                              const Bool  useHighPrecisionPredictionWeighting);
+
+public:
+
+  WeightPredAnalysis();
+
+  // WP analysis :
+  Void  xCalcACDCParamSlice  (TComSlice *const slice);
+  Void  xEstimateWPParamSlice(TComSlice *const slice);
+  Void  xStoreWPparam        (const Bool weighted_pred_flag, const Bool weighted_bipred_flag);
+  Void  xRestoreWPparam      (TComSlice *const slice);
+  Void  xCheckWPEnable       (TComSlice *const slice);
+};
+
+#endif // __WEIGHTPREDANALYSIS__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibVideoIO/TVideoIOYuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,988 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TVideoIOYuv.cpp
+    \brief    YUV file I/O class
+*/
+
+#include <cstdlib>
+#include <fcntl.h>
+#include <assert.h>
+#include <sys/stat.h>
+#include <fstream>
+#include <iostream>
+#include <memory.h>
+
+#include "TLibCommon/TComRom.h"
+#include "TVideoIOYuv.h"
+
+using namespace std;
+
+// ====================================================================================================================
+// Local Functions
+// ====================================================================================================================
+
+/**
+ * Scale all pixels in img depending upon sign of shiftbits by a factor of
+ * 2<sup>shiftbits</sup>.
+ *
+ * @param img        pointer to image to be transformed
+ * @param stride  distance between vertically adjacent pixels of img.
+ * @param width   width of active area in img.
+ * @param height  height of active area in img.
+ * @param shiftbits if zero, no operation performed
+ *                  if > 0, multiply by 2<sup>shiftbits</sup>, see scalePlane()
+ *                  if < 0, divide and round by 2<sup>shiftbits</sup> and clip,
+ *                          see invScalePlane().
+ * @param minval  minimum clipping value when dividing.
+ * @param maxval  maximum clipping value when dividing.
+ */
+static Void scalePlane(Pel* img, const UInt stride, const UInt width, const UInt height, Int shiftbits, Pel minval, Pel maxval)
+{
+  if (shiftbits > 0)
+  {
+    for (UInt y = 0; y < height; y++, img+=stride)
+      for (UInt x = 0; x < width; x++)
+        img[x] <<= shiftbits;
+  }
+  else if (shiftbits < 0)
+  {
+    shiftbits=-shiftbits;
+
+    Pel rounding = 1 << (shiftbits-1);
+    for (UInt y = 0; y < height; y++, img+=stride)
+      for (UInt x = 0; x < width; x++)
+        img[x] = Clip3(minval, maxval, Pel((img[x] + rounding) >> shiftbits));
+  }
+}
+
+
+// ====================================================================================================================
+// Public member functions
+// ====================================================================================================================
+
+/**
+ * Open file for reading/writing Y'CbCr frames.
+ *
+ * Frames read/written have bitdepth fileBitDepth, and are automatically
+ * formatted as 8 or 16 bit word values (see TVideoIOYuv::write()).
+ *
+ * Image data read or written is converted to/from internalBitDepth
+ * (See scalePlane(), TVideoIOYuv::read() and TVideoIOYuv::write() for
+ * further details).
+ *
+ * \param pchFile          file name string
+ * \param bWriteMode       file open mode: true=read, false=write
+ * \param fileBitDepth     bit-depth array of input/output file data.
+ * \param internalBitDepth bit-depth array to scale image data to/from when reading/writing.
+ */
+Void TVideoIOYuv::open( Char* pchFile, Bool bWriteMode, const Int fileBitDepth[MAX_NUM_CHANNEL_TYPE], const Int MSBExtendedBitDepth[MAX_NUM_CHANNEL_TYPE], const Int internalBitDepth[MAX_NUM_CHANNEL_TYPE] )
+{
+  //NOTE: files cannot have bit depth greater than 16
+  for(UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    m_fileBitdepth       [ch] = std::min<UInt>(fileBitDepth[ch], 16);
+    m_MSBExtendedBitDepth[ch] = MSBExtendedBitDepth[ch];
+    m_bitdepthShift      [ch] = internalBitDepth[ch] - m_MSBExtendedBitDepth[ch];
+
+    if (m_fileBitdepth[ch] > 16)
+    {
+      if (bWriteMode)
+      {
+        std::cerr << "\nWARNING: Cannot write a yuv file of bit depth greater than 16 - output will be right-shifted down to 16-bit precision\n" << std::endl;
+      }
+      else
+      {
+        std::cerr << "\nERROR: Cannot read a yuv file of bit depth greater than 16\n" << std::endl;
+        exit(0);
+      }
+    }
+  }
+
+  if ( bWriteMode )
+  {
+    m_cHandle.open( pchFile, ios::binary | ios::out );
+
+    if( m_cHandle.fail() )
+    {
+      printf("\nfailed to write reconstructed YUV file\n");
+      exit(0);
+    }
+  }
+  else
+  {
+    m_cHandle.open( pchFile, ios::binary | ios::in );
+
+    if( m_cHandle.fail() )
+    {
+      printf("\nfailed to open Input YUV file\n");
+      exit(0);
+    }
+  }
+
+  return;
+}
+
+Void TVideoIOYuv::close()
+{
+  m_cHandle.close();
+}
+
+Bool TVideoIOYuv::isEof()
+{
+  return m_cHandle.eof();
+}
+
+Bool TVideoIOYuv::isFail()
+{
+  return m_cHandle.fail();
+}
+
+/**
+ * Skip numFrames in input.
+ *
+ * This function correctly handles cases where the input file is not
+ * seekable, by consuming bytes.
+ */
+Void TVideoIOYuv::skipFrames(UInt numFrames, UInt width, UInt height, ChromaFormat format)
+{
+  if (!numFrames)
+    return;
+
+  //------------------
+  //set the frame size according to the chroma format
+  streamoff frameSize = 0;
+  UInt wordsize=1; // default to 8-bit, unless a channel with more than 8-bits is detected.
+  for (UInt component = 0; component < getNumberValidComponents(format); component++)
+  {
+    ComponentID compID=ComponentID(component);
+    frameSize += (width >> getComponentScaleX(compID, format)) * (height >> getComponentScaleY(compID, format));
+    if (m_fileBitdepth[toChannelType(compID)] > 8) wordsize=2;
+  }
+  frameSize *= wordsize;
+  //------------------
+
+  const streamoff offset = frameSize * numFrames;
+
+  /* attempt to seek */
+  if (!!m_cHandle.seekg(offset, ios::cur))
+    return; /* success */
+  m_cHandle.clear();
+
+  /* fall back to consuming the input */
+  Char buf[512];
+  const UInt offset_mod_bufsize = offset % sizeof(buf);
+  for (streamoff i = 0; i < offset - offset_mod_bufsize; i += sizeof(buf))
+  {
+    m_cHandle.read(buf, sizeof(buf));
+  }
+  m_cHandle.read(buf, offset_mod_bufsize);
+}
+
+/**
+ * Read width*height pixels from fd into dst, optionally
+ * padding the left and right edges by edge-extension.  Input may be
+ * either 8bit or 16bit little-endian lsb-aligned words.
+ *
+ * @param dst     destination image
+ * @param fd      input file stream
+ * @param is16bit true if input file carries > 8bit data, false otherwise.
+ * @param stride  distance between vertically adjacent pixels of dst.
+ * @param width   width of active area in dst.
+ * @param height  height of active area in dst.
+ * @param pad_x   length of horizontal padding.
+ * @param pad_y   length of vertical padding.
+ * @return true for success, false in case of error
+ */
+static Bool readPlane(Pel* dst,
+                      istream& fd,
+                      Bool is16bit,
+                      UInt stride444,
+                      UInt width444,
+                      UInt height444,
+                      UInt pad_x444,
+                      UInt pad_y444,
+                      const ComponentID compID,
+                      const ChromaFormat destFormat,
+                      const ChromaFormat fileFormat,
+                      const UInt fileBitDepth)
+{
+  const UInt csx_file =getComponentScaleX(compID, fileFormat);
+  const UInt csy_file =getComponentScaleY(compID, fileFormat);
+  const UInt csx_dest =getComponentScaleX(compID, destFormat);
+  const UInt csy_dest =getComponentScaleY(compID, destFormat);
+
+  const UInt width_dest       = width444 >>csx_dest;
+  const UInt height_dest      = height444>>csy_dest;
+  const UInt pad_x_dest       = pad_x444>>csx_dest;
+  const UInt pad_y_dest       = pad_y444>>csy_dest;
+  const UInt stride_dest      = stride444>>csx_dest;
+
+  const UInt full_width_dest  = width_dest+pad_x_dest;
+  const UInt full_height_dest = height_dest+pad_y_dest;
+
+  const UInt stride_file      = (width444 * (is16bit ? 2 : 1)) >> csx_file;
+
+  UChar  *buf   = new UChar[stride_file];
+
+  if (compID!=COMPONENT_Y && (fileFormat==CHROMA_400 || destFormat==CHROMA_400))
+  {
+    if (destFormat!=CHROMA_400)
+    {
+      // set chrominance data to mid-range: (1<<(fileBitDepth-1))
+      const Pel value=Pel(1<<(fileBitDepth-1));
+      for (UInt y = 0; y < full_height_dest; y++, dst+=stride_dest)
+        for (UInt x = 0; x < full_width_dest; x++)
+          dst[x] = value;
+    }
+
+    if (fileFormat!=CHROMA_400)
+    {
+      const UInt height_file      = height444>>csy_file;
+      fd.seekg(height_file*stride_file, ios::cur);
+      if (fd.eof() || fd.fail() )
+      {
+        delete[] buf;
+        return false;
+      }
+    }
+  }
+  else
+  {
+    const UInt mask_y_file=(1<<csy_file)-1;
+    const UInt mask_y_dest=(1<<csy_dest)-1;
+    for(UInt y444=0; y444<height444; y444++)
+    {
+      if ((y444&mask_y_file)==0)
+      {
+        // read a new line
+        fd.read(reinterpret_cast<Char*>(buf), stride_file);
+        if (fd.eof() || fd.fail() )
+        {
+          delete[] buf;
+          return false;
+        }
+      }
+
+      if ((y444&mask_y_dest)==0)
+      {
+        // process current destination line
+        if (csx_file < csx_dest)
+        {
+          // eg file is 444, dest is 422.
+          const UInt sx=csx_dest-csx_file;
+          if (!is16bit)
+          {
+            for (UInt x = 0; x < width_dest; x++)
+              dst[x] = buf[x<<sx];
+          }
+          else
+          {
+            for (UInt x = 0; x < width_dest; x++)
+            {
+              dst[x] = Pel(buf[(x<<sx)*2+0]) | (Pel(buf[(x<<sx)*2+1])<<8);
+            }
+          }
+        }
+        else
+        {
+          // eg file is 422, dest is 444.
+          const UInt sx=csx_file-csx_dest;
+          if (!is16bit)
+          {
+            for (UInt x = 0; x < width_dest; x++)
+              dst[x] = buf[x>>sx];
+          }
+          else
+          {
+            for (UInt x = 0; x < width_dest; x++)
+              dst[x] = Pel(buf[(x>>sx)*2+0]) | (Pel(buf[(x>>sx)*2+1])<<8);
+          }
+        }
+
+        // process right hand side padding
+        const Pel val=dst[width_dest-1];
+        for (UInt x = width_dest; x < full_width_dest; x++)
+          dst[x] = val;
+
+        dst += stride_dest;
+      }
+    }
+
+    // process lower padding
+    for (UInt y = height_dest; y < full_height_dest; y++, dst+=stride_dest)
+      for (UInt x = 0; x < full_width_dest; x++)
+        dst[x] = (dst - stride_dest)[x];
+  }
+  delete[] buf;
+  return true;
+}
+
+/**
+ * Write width*height pixels info fd from src.
+ *
+ * @param fd      output file stream
+ * @param src     source image
+ * @param is16bit true if input file carries > 8bit data, false otherwise.
+ * @param stride  distance between vertically adjacent pixels of src.
+ * @param width   width of active area in src.
+ * @param height  height of active area in src.
+ * @return true for success, false in case of error
+ */
+static Bool writePlane(ostream& fd, Pel* src, Bool is16bit,
+                       UInt stride444,
+                       UInt width444, UInt height444,
+                       const ComponentID compID,
+                       const ChromaFormat srcFormat,
+                       const ChromaFormat fileFormat,
+                       const UInt fileBitDepth)
+{
+  const UInt csx_file =getComponentScaleX(compID, fileFormat);
+  const UInt csy_file =getComponentScaleY(compID, fileFormat);
+  const UInt csx_src  =getComponentScaleX(compID, srcFormat);
+  const UInt csy_src  =getComponentScaleY(compID, srcFormat);
+
+  const UInt stride_src      = stride444>>csx_src;
+
+  const UInt stride_file      = (width444 * (is16bit ? 2 : 1)) >> csx_file;
+  const UInt width_file       = width444 >>csx_file;
+  const UInt height_file      = height444>>csy_file;
+
+  UChar  *buf   = new UChar[stride_file];
+
+  if (compID!=COMPONENT_Y && (fileFormat==CHROMA_400 || srcFormat==CHROMA_400))
+  {
+    if (fileFormat!=CHROMA_400)
+    {
+      const UInt value=1<<(fileBitDepth-1);
+
+      for(UInt y=0; y< height_file; y++)
+      {
+        if (!is16bit)
+        {
+          UChar val(value);
+          for (UInt x = 0; x < width_file; x++)
+            buf[x]=val;
+        }
+        else
+        {
+          UShort val(value);
+          for (UInt x = 0; x < width_file; x++)
+          {
+            buf[2*x+0]= (val>>0) & 0xff;
+            buf[2*x+1]= (val>>8) & 0xff;
+          }
+        }
+
+        fd.write(reinterpret_cast<Char*>(buf), stride_file);
+        if (fd.eof() || fd.fail() )
+        {
+          delete[] buf;
+          return false;
+        }
+      }
+    }
+  }
+  else
+  {
+    const UInt mask_y_file=(1<<csy_file)-1;
+    const UInt mask_y_src =(1<<csy_src )-1;
+    for(UInt y444=0; y444<height444; y444++)
+    {
+      if ((y444&mask_y_file)==0)
+      {
+        // write a new line
+        if (csx_file < csx_src)
+        {
+          // eg file is 444, source is 422.
+          const UInt sx=csx_src-csx_file;
+          if (!is16bit)
+          {
+            for (UInt x = 0; x < width_file; x++)
+            {
+              buf[x] = (UChar)(src[x>>sx]);
+            }
+          }
+          else
+          {
+            for (UInt x = 0; x < width_file; x++)
+            {
+              buf[2*x  ] = (src[x>>sx]>>0) & 0xff;
+              buf[2*x+1] = (src[x>>sx]>>8) & 0xff;
+            }
+          }
+        }
+        else
+        {
+          // eg file is 422, src is 444.
+          const UInt sx=csx_file-csx_src;
+          if (!is16bit)
+          {
+            for (UInt x = 0; x < width_file; x++)
+            {
+              buf[x] = (UChar)(src[x<<sx]);
+            }
+          }
+          else
+          {
+            for (UInt x = 0; x < width_file; x++)
+            {
+              buf[2*x  ] = (src[x<<sx]>>0) & 0xff;
+              buf[2*x+1] = (src[x<<sx]>>8) & 0xff;
+            }
+          }
+        }
+
+        fd.write(reinterpret_cast<Char*>(buf), stride_file);
+        if (fd.eof() || fd.fail() )
+        {
+          delete[] buf;
+          return false;
+        }
+      }
+
+      if ((y444&mask_y_src)==0)
+      {
+        src += stride_src;
+      }
+
+    }
+  }
+  delete[] buf;
+  return true;
+}
+
+static Bool writeField(ostream& fd, Pel* top, Pel* bottom, Bool is16bit,
+                       UInt stride444,
+                       UInt width444, UInt height444,
+                       const ComponentID compID,
+                       const ChromaFormat srcFormat,
+                       const ChromaFormat fileFormat,
+                       const UInt fileBitDepth, const Bool isTff)
+{
+  const UInt csx_file =getComponentScaleX(compID, fileFormat);
+  const UInt csy_file =getComponentScaleY(compID, fileFormat);
+  const UInt csx_src  =getComponentScaleX(compID, srcFormat);
+  const UInt csy_src  =getComponentScaleY(compID, srcFormat);
+
+  const UInt stride_src      = stride444>>csx_src;
+
+  const UInt stride_file      = (width444 * (is16bit ? 2 : 1)) >> csx_file;
+  const UInt width_file       = width444 >>csx_file;
+  const UInt height_file      = height444>>csy_file;
+
+  UChar  *buf   = new UChar[stride_file * 2];
+
+  if (compID!=COMPONENT_Y && (fileFormat==CHROMA_400 || srcFormat==CHROMA_400))
+  {
+    if (fileFormat!=CHROMA_400)
+    {
+      const UInt value=1<<(fileBitDepth-1);
+
+      for(UInt y=0; y< height_file; y++)
+      {
+        for (UInt field = 0; field < 2; field++)
+        {
+          UChar *fieldBuffer = buf + (field * stride_file);
+
+          if (!is16bit)
+          {
+            UChar val(value);
+            for (UInt x = 0; x < width_file; x++)
+              fieldBuffer[x]=val;
+          }
+          else
+          {
+            UShort val(value);
+            for (UInt x = 0; x < width_file; x++)
+            {
+              fieldBuffer[2*x+0]= (val>>0) & 0xff;
+              fieldBuffer[2*x+1]= (val>>8) & 0xff;
+            }
+          }
+        }
+
+        fd.write(reinterpret_cast<Char*>(buf), (stride_file * 2));
+        if (fd.eof() || fd.fail() )
+        {
+          delete[] buf;
+          return false;
+        }
+      }
+    }
+  }
+  else
+  {
+    const UInt mask_y_file=(1<<csy_file)-1;
+    const UInt mask_y_src =(1<<csy_src )-1;
+    for(UInt y444=0; y444<height444; y444++)
+    {
+      if ((y444&mask_y_file)==0)
+      {
+        for (UInt field = 0; field < 2; field++)
+        {
+          UChar *fieldBuffer = buf + (field * stride_file);
+          Pel   *src         = (((field == 0) && isTff) || ((field == 1) && (!isTff))) ? top : bottom;
+
+          // write a new line
+          if (csx_file < csx_src)
+          {
+            // eg file is 444, source is 422.
+            const UInt sx=csx_src-csx_file;
+            if (!is16bit)
+            {
+              for (UInt x = 0; x < width_file; x++)
+              {
+                fieldBuffer[x] = (UChar)(src[x>>sx]);
+              }
+            }
+            else
+            {
+              for (UInt x = 0; x < width_file; x++)
+              {
+                fieldBuffer[2*x  ] = (src[x>>sx]>>0) & 0xff;
+                fieldBuffer[2*x+1] = (src[x>>sx]>>8) & 0xff;
+              }
+            }
+          }
+          else
+          {
+            // eg file is 422, src is 444.
+            const UInt sx=csx_file-csx_src;
+            if (!is16bit)
+            {
+              for (UInt x = 0; x < width_file; x++)
+              {
+                fieldBuffer[x] = (UChar)(src[x<<sx]);
+              }
+            }
+            else
+            {
+              for (UInt x = 0; x < width_file; x++)
+              {
+                fieldBuffer[2*x  ] = (src[x<<sx]>>0) & 0xff;
+                fieldBuffer[2*x+1] = (src[x<<sx]>>8) & 0xff;
+              }
+            }
+          }
+        }
+
+        fd.write(reinterpret_cast<Char*>(buf), (stride_file * 2));
+        if (fd.eof() || fd.fail() )
+        {
+          delete[] buf;
+          return false;
+        }
+      }
+
+      if ((y444&mask_y_src)==0)
+      {
+        top    += stride_src;
+        bottom += stride_src;
+      }
+
+    }
+  }
+  delete[] buf;
+  return true;
+}
+
+/**
+ * Read one Y'CbCr frame, performing any required input scaling to change
+ * from the bitdepth of the input file to the internal bit-depth.
+ *
+ * If a bit-depth reduction is required, and internalBitdepth >= 8, then
+ * the input file is assumed to be ITU-R BT.601/709 compliant, and the
+ * resulting data is clipped to the appropriate legal range, as if the
+ * file had been provided at the lower-bitdepth compliant to Rec601/709.
+ *
+ * @param pPicYuv      input picture YUV buffer class pointer
+ * @param aiPad        source padding size, aiPad[0] = horizontal, aiPad[1] = vertical
+ * @return true for success, false in case of error
+ */
+Bool TVideoIOYuv::read ( TComPicYuv*  pPicYuvUser, TComPicYuv* pPicYuvTrueOrg, const InputColourSpaceConversion ipcsc, Int aiPad[2], ChromaFormat format )
+{
+  // check end-of-file
+  if ( isEof() ) return false;
+  TComPicYuv *pPicYuv=pPicYuvTrueOrg;
+  if (format>=NUM_CHROMA_FORMAT) format=pPicYuv->getChromaFormat();
+
+  Bool is16bit = false;
+
+  for(UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    if (m_fileBitdepth[ch] > 8) is16bit=true;
+  }
+
+  const UInt stride444      = pPicYuv->getStride(COMPONENT_Y);
+
+  // compute actual YUV width & height excluding padding size
+  const UInt pad_h444       = aiPad[0];
+  const UInt pad_v444       = aiPad[1];
+
+  const UInt width_full444  = pPicYuv->getWidth(COMPONENT_Y);
+  const UInt height_full444 = pPicYuv->getHeight(COMPONENT_Y);
+
+  const UInt width444       = width_full444 - pad_h444;
+  const UInt height444      = height_full444 - pad_v444;
+
+  for(UInt comp=0; comp<MAX_NUM_COMPONENT; comp++)
+  {
+    const ComponentID compID = ComponentID(comp);
+    const ChannelType chType=toChannelType(compID);
+
+    const Int desired_bitdepth = m_MSBExtendedBitDepth[chType] + m_bitdepthShift[chType];
+
+#if !CLIP_TO_709_RANGE
+    const Pel minval = 0;
+    const Pel maxval = (1 << desired_bitdepth) - 1;
+#else
+    const Bool b709Compliance=(m_bitdepthShift[chType] < 0 && desired_bitdepth >= 8);     /* ITU-R BT.709 compliant clipping for converting say 10b to 8b */
+    const Pel minval = b709Compliance? ((   1 << (desired_bitdepth - 8))   ) : 0;
+    const Pel maxval = b709Compliance? ((0xff << (desired_bitdepth - 8)) -1) : (1 << desired_bitdepth) - 1;
+#endif
+
+    if (! readPlane(pPicYuv->getAddr(compID), m_cHandle, is16bit, stride444, width444, height444, pad_h444, pad_v444, compID, pPicYuv->getChromaFormat(), format, m_fileBitdepth[chType]))
+    {
+      return false;
+    }
+
+    if (compID < pPicYuv->getNumberValidComponents() )
+    {
+      const UInt csx=getComponentScaleX(compID, pPicYuv->getChromaFormat());
+      const UInt csy=getComponentScaleY(compID, pPicYuv->getChromaFormat());
+      scalePlane(pPicYuv->getAddr(compID), stride444>>csx, width_full444>>csx, height_full444>>csy, m_bitdepthShift[chType], minval, maxval);
+    }
+  }
+
+  Int internalBitDepth[MAX_NUM_CHANNEL_TYPE];
+  for(UInt chType=0; chType<MAX_NUM_CHANNEL_TYPE; chType++)
+  {
+    internalBitDepth[chType] = m_bitdepthShift[chType] + m_MSBExtendedBitDepth[chType];
+  }
+  ColourSpaceConvert(*pPicYuvTrueOrg, *pPicYuvUser, ipcsc, internalBitDepth, true);
+
+  return true;
+}
+
+/**
+ * Write one Y'CbCr frame. No bit-depth conversion is performed, pcPicYuv is
+ * assumed to be at TVideoIO::m_fileBitdepth depth.
+ *
+ * @param pPicYuv     input picture YUV buffer class pointer
+ * @param aiPad       source padding size, aiPad[0] = horizontal, aiPad[1] = vertical
+ * @return true for success, false in case of error
+ */
+Bool TVideoIOYuv::write( TComPicYuv* pPicYuvUser, const InputColourSpaceConversion ipCSC, Int confLeft, Int confRight, Int confTop, Int confBottom, ChromaFormat format )
+{
+  TComPicYuv cPicYuvCSCd;
+  if (ipCSC!=IPCOLOURSPACE_UNCHANGED)
+  {
+    cPicYuvCSCd.create(pPicYuvUser->getWidth(COMPONENT_Y), pPicYuvUser->getHeight(COMPONENT_Y), pPicYuvUser->getChromaFormat(), g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+    Int internalBitDepth[MAX_NUM_CHANNEL_TYPE];
+    for(UInt chType=0; chType<MAX_NUM_CHANNEL_TYPE; chType++)
+    {
+      internalBitDepth[chType] = m_bitdepthShift[chType] + m_MSBExtendedBitDepth[chType];
+    }
+    ColourSpaceConvert(*pPicYuvUser, cPicYuvCSCd, ipCSC, internalBitDepth, false);
+  }
+  TComPicYuv *pPicYuv=(ipCSC==IPCOLOURSPACE_UNCHANGED) ? pPicYuvUser : &cPicYuvCSCd;
+
+  // compute actual YUV frame size excluding padding size
+  const Int   iStride444 = pPicYuv->getStride(COMPONENT_Y);
+  const UInt width444  = pPicYuv->getWidth(COMPONENT_Y) - confLeft - confRight;
+  const UInt height444 = pPicYuv->getHeight(COMPONENT_Y) -  confTop  - confBottom;
+  Bool is16bit = false;
+  Bool nonZeroBitDepthShift=false;
+
+  if ((width444 == 0) || (height444 == 0))
+  {
+    printf ("\nWarning: writing %d x %d luma sample output picture!", width444, height444);
+  }
+
+  for(UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    if (m_fileBitdepth[ch] > 8) is16bit=true;
+    if (m_bitdepthShift[ch] != 0) nonZeroBitDepthShift=true;
+  }
+
+  TComPicYuv *dstPicYuv = NULL;
+  Bool retval = true;
+  if (format>=NUM_CHROMA_FORMAT) format=pPicYuv->getChromaFormat();
+
+  if (nonZeroBitDepthShift)
+  {
+    dstPicYuv = new TComPicYuv;
+    dstPicYuv->create( pPicYuv->getWidth(COMPONENT_Y), pPicYuv->getHeight(COMPONENT_Y), pPicYuv->getChromaFormat(), 1, 1, 0 );
+    pPicYuv->copyToPic(dstPicYuv);
+
+    for(UInt comp=0; comp<dstPicYuv->getNumberValidComponents(); comp++)
+    {
+      const ComponentID compID=ComponentID(comp);
+      const ChannelType ch=toChannelType(compID);
+#if !CLIP_TO_709_RANGE
+      const Pel minval = 0;
+      const Pel maxval = (1 << m_MSBExtendedBitDepth[ch]) - 1;
+#else
+      const Bool b709Compliance=(-m_bitdepthShift[ch] < 0 && m_MSBExtendedBitDepth[ch] >= 8);     /* ITU-R BT.709 compliant clipping for converting say 10b to 8b */
+      const Pel minval = b709Compliance? ((   1 << (m_MSBExtendedBitDepth[ch] - 8))   ) : 0;
+      const Pel maxval = b709Compliance? ((0xff << (m_MSBExtendedBitDepth[ch] - 8)) -1) : (1 << m_MSBExtendedBitDepth[ch]) - 1;
+#endif
+
+      scalePlane(dstPicYuv->getAddr(compID), dstPicYuv->getStride(compID), dstPicYuv->getWidth(compID), dstPicYuv->getHeight(compID), -m_bitdepthShift[ch], minval, maxval);
+    }
+  }
+  else
+  {
+    dstPicYuv = pPicYuv;
+  }
+
+  for(UInt comp=0; retval && comp<dstPicYuv->getNumberValidComponents(); comp++)
+  {
+    const ComponentID compID = ComponentID(comp);
+    const ChannelType ch=toChannelType(compID);
+    const UInt csx = pPicYuv->getComponentScaleX(compID);
+    const UInt csy = pPicYuv->getComponentScaleY(compID);
+    const Int planeOffset =  (confLeft>>csx) + (confTop>>csy) * pPicYuv->getStride(compID);
+    if (! writePlane(m_cHandle, dstPicYuv->getAddr(compID) + planeOffset, is16bit, iStride444, width444, height444, compID, dstPicYuv->getChromaFormat(), format, m_fileBitdepth[ch]))
+    {
+      retval=false;
+    }
+  }
+
+  if (nonZeroBitDepthShift)
+  {
+    dstPicYuv->destroy();
+    delete dstPicYuv;
+  }
+
+  cPicYuvCSCd.destroy();
+
+  return retval;
+}
+
+Bool TVideoIOYuv::write( TComPicYuv* pPicYuvUserTop, TComPicYuv* pPicYuvUserBottom, const InputColourSpaceConversion ipCSC, Int confLeft, Int confRight, Int confTop, Int confBottom, ChromaFormat format, const Bool isTff )
+{
+
+  TComPicYuv cPicYuvTopCSCd;
+  TComPicYuv cPicYuvBottomCSCd;
+  if (ipCSC!=IPCOLOURSPACE_UNCHANGED)
+  {
+    cPicYuvTopCSCd   .create(pPicYuvUserTop   ->getWidth(COMPONENT_Y), pPicYuvUserTop   ->getHeight(COMPONENT_Y), pPicYuvUserTop   ->getChromaFormat(), g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+    cPicYuvBottomCSCd.create(pPicYuvUserBottom->getWidth(COMPONENT_Y), pPicYuvUserBottom->getHeight(COMPONENT_Y), pPicYuvUserBottom->getChromaFormat(), g_uiMaxCUWidth, g_uiMaxCUHeight, g_uiMaxCUDepth);
+    Int internalBitDepth[MAX_NUM_CHANNEL_TYPE];
+    for(UInt chType=0; chType<MAX_NUM_CHANNEL_TYPE; chType++)
+    {
+      internalBitDepth[chType] = m_bitdepthShift[chType] + m_MSBExtendedBitDepth[chType];
+    }
+    ColourSpaceConvert(*pPicYuvUserTop,    cPicYuvTopCSCd,    ipCSC, internalBitDepth, false);
+    ColourSpaceConvert(*pPicYuvUserBottom, cPicYuvBottomCSCd, ipCSC, internalBitDepth, false);
+  }
+  TComPicYuv *pPicYuvTop    = (ipCSC==IPCOLOURSPACE_UNCHANGED) ? pPicYuvUserTop    : &cPicYuvTopCSCd;
+  TComPicYuv *pPicYuvBottom = (ipCSC==IPCOLOURSPACE_UNCHANGED) ? pPicYuvUserBottom : &cPicYuvBottomCSCd;
+
+  Bool is16bit = false;
+  Bool nonZeroBitDepthShift=false;
+
+  for(UInt ch=0; ch<MAX_NUM_CHANNEL_TYPE; ch++)
+  {
+    if (m_fileBitdepth[ch] > 8) is16bit=true;
+    if (m_bitdepthShift[ch] != 0) nonZeroBitDepthShift=true;
+  }
+
+  TComPicYuv *dstPicYuvTop    = NULL;
+  TComPicYuv *dstPicYuvBottom = NULL;
+
+  for (UInt field = 0; field < 2; field++)
+  {
+    TComPicYuv *pPicYuv = (field == 0) ? pPicYuvTop : pPicYuvBottom;
+
+    if (format>=NUM_CHROMA_FORMAT) format=pPicYuv->getChromaFormat();
+
+    TComPicYuv* &dstPicYuv = (field == 0) ? dstPicYuvTop : dstPicYuvBottom;
+
+    if (nonZeroBitDepthShift)
+    {
+      dstPicYuv = new TComPicYuv;
+      dstPicYuv->create( pPicYuv->getWidth(COMPONENT_Y), pPicYuv->getHeight(COMPONENT_Y), pPicYuv->getChromaFormat(), 1, 1, 0 );
+      pPicYuv->copyToPic(dstPicYuv);
+
+      for(UInt comp=0; comp<dstPicYuv->getNumberValidComponents(); comp++)
+      {
+        const ComponentID compID=ComponentID(comp);
+        const ChannelType ch=toChannelType(compID);
+#if !CLIP_TO_709_RANGE
+        const Pel minval = 0;
+        const Pel maxval = (1 << m_MSBExtendedBitDepth[ch]) - 1;
+#else
+        const Bool b709Compliance=(-m_bitdepthShift[ch] < 0 && m_MSBExtendedBitDepth[ch] >= 8);     /* ITU-R BT.709 compliant clipping for converting say 10b to 8b */
+        const Pel minval = b709Compliance? ((   1 << (m_MSBExtendedBitDepth[ch] - 8))   ) : 0;
+        const Pel maxval = b709Compliance? ((0xff << (m_MSBExtendedBitDepth[ch] - 8)) -1) : (1 << m_MSBExtendedBitDepth[ch]) - 1;
+#endif
+
+        scalePlane(dstPicYuv->getAddr(compID), dstPicYuv->getStride(compID), dstPicYuv->getWidth(compID), dstPicYuv->getHeight(compID), -m_bitdepthShift[ch], minval, maxval);
+      }
+    }
+    else
+    {
+      dstPicYuv = pPicYuv;
+    }
+  }
+
+  Bool retval = true;
+
+  assert(dstPicYuvTop->getNumberValidComponents() == dstPicYuvBottom->getNumberValidComponents());
+  assert(dstPicYuvTop->getChromaFormat()          == dstPicYuvBottom->getChromaFormat()         );
+  assert(dstPicYuvTop->getWidth(COMPONENT_Y)      == dstPicYuvBottom->getWidth(COMPONENT_Y)    );
+  assert(dstPicYuvTop->getHeight(COMPONENT_Y)     == dstPicYuvBottom->getHeight(COMPONENT_Y)    );
+  assert(dstPicYuvTop->getStride(COMPONENT_Y)     == dstPicYuvBottom->getStride(COMPONENT_Y)    );
+
+  for(UInt comp=0; retval && comp<dstPicYuvTop->getNumberValidComponents(); comp++)
+  {
+    const ComponentID compID = ComponentID(comp);
+    const ChannelType ch=toChannelType(compID);
+
+    assert(dstPicYuvTop->getComponentScaleX(compID) == dstPicYuvBottom->getComponentScaleX(compID));
+    assert(dstPicYuvTop->getComponentScaleY(compID) == dstPicYuvBottom->getComponentScaleY(compID));
+    assert(dstPicYuvTop->getStride         (compID) == dstPicYuvBottom->getStride         (compID));
+
+    const UInt width444   = dstPicYuvTop->getWidth(COMPONENT_Y)  - (confLeft + confRight);
+    const UInt height444  = dstPicYuvTop->getHeight(COMPONENT_Y) - (confTop + confBottom);
+
+    if ((width444 == 0) || (height444 == 0))
+    {
+      printf ("\nWarning: writing %d x %d luma sample output picture!", width444, height444);
+    }
+
+    const UInt csx = dstPicYuvTop->getComponentScaleX(compID);
+    const UInt csy = dstPicYuvTop->getComponentScaleY(compID);
+    const Int planeOffset  = (confLeft>>csx) + ( confTop>>csy) * dstPicYuvTop->getStride(compID); //offset is for entire frame - round up for top field and down for bottom field
+
+    if (! writeField(m_cHandle,
+                     (dstPicYuvTop   ->getAddr(compID) + planeOffset),
+                     (dstPicYuvBottom->getAddr(compID) + planeOffset),
+                     is16bit,
+                     dstPicYuvTop->getStride(COMPONENT_Y),
+                     width444, height444, compID, dstPicYuvTop->getChromaFormat(), format, m_fileBitdepth[ch], isTff))
+    {
+      retval=false;
+    }
+  }
+
+  if (nonZeroBitDepthShift)
+  {
+    dstPicYuvTop->destroy();
+    dstPicYuvBottom->destroy();
+    delete dstPicYuvTop;
+    delete dstPicYuvBottom;
+  }
+
+  cPicYuvTopCSCd.destroy();
+  cPicYuvBottomCSCd.destroy();
+
+  return retval;
+}
+
+static Void
+copyPlane(const TComPicYuv &src, const ComponentID srcPlane, TComPicYuv &dest, const ComponentID destPlane)
+{
+  const UInt width=src.getWidth(srcPlane);
+  const UInt height=src.getHeight(srcPlane);
+  assert(dest.getWidth(destPlane) == width);
+  assert(dest.getHeight(destPlane) == height);
+  const Pel *pSrc=src.getAddr(srcPlane);
+  Pel *pDest=dest.getAddr(destPlane);
+  const UInt strideSrc=src.getStride(srcPlane);
+  const UInt strideDest=dest.getStride(destPlane);
+  for(UInt y=0; y<height; y++, pSrc+=strideSrc, pDest+=strideDest)
+  {
+    memcpy(pDest, pSrc, width*sizeof(Pel));
+  }
+}
+
+// static member
+Void TVideoIOYuv::ColourSpaceConvert(const TComPicYuv &src, TComPicYuv &dest, const InputColourSpaceConversion conversion, const Int bitDepths[MAX_NUM_CHANNEL_TYPE], Bool bIsForwards)
+{
+  const ChromaFormat  format=src.getChromaFormat();
+  const UInt          numValidComp=src.getNumberValidComponents();
+
+  switch (conversion)
+  {
+    case IPCOLOURSPACE_YCbCrtoYYY:
+      if (format!=CHROMA_444)
+      {
+        // only 444 is handled.
+        assert(format==CHROMA_444);
+        exit(1);
+      }
+
+      {
+        for(UInt comp=0; comp<numValidComp; comp++)
+          copyPlane(src, ComponentID(bIsForwards?0:comp), dest, ComponentID(comp));
+      }
+      break;
+    case IPCOLOURSPACE_YCbCrtoYCrCb:
+      {
+        for(UInt comp=0; comp<numValidComp; comp++)
+          copyPlane(src, ComponentID(comp), dest, ComponentID((numValidComp-comp)%numValidComp));
+      }
+      break;
+
+    case IPCOLOURSPACE_RGBtoGBR:
+      {
+        if (format!=CHROMA_444)
+        {
+          // only 444 is handled.
+          assert(format==CHROMA_444);
+          exit(1);
+        }
+
+        // channel re-mapping
+        for(UInt comp=0; comp<numValidComp; comp++)
+        {
+          const ComponentID compIDsrc=ComponentID((comp+1)%numValidComp);
+          const ComponentID compIDdst=ComponentID(comp);
+          copyPlane(src, bIsForwards?compIDsrc:compIDdst, dest, bIsForwards?compIDdst:compIDsrc);
+        }
+      }
+      break;
+
+    case IPCOLOURSPACE_UNCHANGED:
+    default:
+      {
+        for(UInt comp=0; comp<numValidComp; comp++)
+          copyPlane(src, ComponentID(comp), dest, ComponentID(comp));
+      }
+      break;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/TLibVideoIO/TVideoIOYuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     TVideoIOYuv.h
+    \brief    YUV file I/O class (header)
+*/
+
+#ifndef __TVIDEOIOYUV__
+#define __TVIDEOIOYUV__
+
+#include <stdio.h>
+#include <fstream>
+#include <iostream>
+#include "TLibCommon/CommonDef.h"
+#include "TLibCommon/TComPicYuv.h"
+
+using namespace std;
+
+// ====================================================================================================================
+// Class definition
+// ====================================================================================================================
+
+/// YUV file I/O class
+class TVideoIOYuv
+{
+private:
+  fstream   m_cHandle;                                      ///< file handle
+  Int       m_fileBitdepth[MAX_NUM_CHANNEL_TYPE]; ///< bitdepth of input/output video file
+  Int       m_MSBExtendedBitDepth[MAX_NUM_CHANNEL_TYPE];  ///< bitdepth after addition of MSBs (with value 0)
+  Int       m_bitdepthShift[MAX_NUM_CHANNEL_TYPE];  ///< number of bits to increase or decrease image by before/after write/read
+
+public:
+  TVideoIOYuv()           {}
+  virtual ~TVideoIOYuv()  {}
+
+  Void  open  ( Char* pchFile, Bool bWriteMode, const Int fileBitDepth[MAX_NUM_CHANNEL_TYPE], const Int MSBExtendedBitDepth[MAX_NUM_CHANNEL_TYPE], const Int internalBitDepth[MAX_NUM_CHANNEL_TYPE] ); ///< open or create file
+  Void  close ();                                           ///< close file
+
+  Void skipFrames(UInt numFrames, UInt width, UInt height, ChromaFormat format);
+
+  // if fileFormat<NUM_CHROMA_FORMAT, the format of the file is that format specified, else it is the format of the TComPicYuv.
+
+
+  Bool  read  ( TComPicYuv* pPicYuv, TComPicYuv* pPicYuvTrueOrg, const InputColourSpaceConversion ipcsc, Int aiPad[2], ChromaFormat fileFormat=NUM_CHROMA_FORMAT );     ///< read one frame with padding parameter
+  Bool  write ( TComPicYuv* pPicYuv, const InputColourSpaceConversion ipCSC, Int confLeft=0, Int confRight=0, Int confTop=0, Int confBottom=0, ChromaFormat fileFormat=NUM_CHROMA_FORMAT );     ///< write one YUV frame with padding parameter
+  Bool  write ( TComPicYuv* pPicYuvTop, TComPicYuv* pPicYuvBottom, const InputColourSpaceConversion ipCSC, Int confLeft=0, Int confRight=0, Int confTop=0, Int confBottom=0, ChromaFormat fileFormat=NUM_CHROMA_FORMAT, Bool isTff=false);
+  static Void ColourSpaceConvert(const TComPicYuv &src, TComPicYuv &dest, const InputColourSpaceConversion conversion, const Int bitDepths[MAX_NUM_CHANNEL_TYPE], Bool bIsForwards);
+
+  Bool  isEof ();                                           ///< check for end-of-file
+  Bool  isFail();                                           ///< check for failure
+
+
+};
+
+#endif // __TVIDEOIOYUV__
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/encmain.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,110 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file     encmain.cpp
+    \brief    Encoder application main
+*/
+
+#include <time.h>
+#include <iostream>
+#include "TAppEncTop.h"
+#include "program_options_lite.h"
+
+//! \ingroup TAppEncoder
+//! \{
+
+#include "TLibCommon/Debug.h"
+
+// ====================================================================================================================
+// Main function
+// ====================================================================================================================
+
+int main(int argc, char* argv[])
+{
+  TAppEncTop  cTAppEncTop;
+
+  // print information
+  fprintf( stdout, "\n" );
+  fprintf( stdout, "HM software: Encoder Version [%s] (including RExt)", NV_VERSION );
+  fprintf( stdout, NVM_ONOS );
+  fprintf( stdout, NVM_COMPILEDBY );
+  fprintf( stdout, NVM_BITS );
+  fprintf( stdout, "\n\n" );
+
+  // create application encoder class
+  cTAppEncTop.create();
+
+  // parse configuration
+  try
+  {
+    if(!cTAppEncTop.parseCfg( argc, argv ))
+    {
+      cTAppEncTop.destroy();
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+      EnvVar::printEnvVar();
+#endif
+      return 1;
+    }
+  }
+  catch (df::program_options_lite::ParseFailure &e)
+  {
+    std::cerr << "Error parsing option \""<< e.arg <<"\" with argument \""<< e.val <<"\"." << std::endl;
+    return 1;
+  }
+
+#if PRINT_MACRO_VALUES
+  printMacroSettings();
+#endif
+
+#if ENVIRONMENT_VARIABLE_DEBUG_AND_TEST
+  EnvVar::printEnvVarInUse();
+#endif
+
+  // starting time
+  Double dResult;
+  clock_t lBefore = clock();
+
+  // call encoding function
+  cTAppEncTop.encode();
+
+  // ending time
+  dResult = (Double)(clock()-lBefore) / CLOCKS_PER_SEC;
+  printf("\n Total Time: %12.3f sec.\n", dResult);
+
+  // destroy application encoder class
+  cTAppEncTop.destroy();
+
+  return 0;
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/encoder_intra_main.cfg	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,103 @@
+#======== File I/O =====================
+BitstreamFile                 : str.bin
+ReconFile                     : rec.yuv
+
+#======== Profile ================
+Profile                       : main-RExt
+#Profile                       : main10
+
+#======== Unit definition ================
+MaxCUWidth                    : 64          # Maximum coding unit width in pixel
+MaxCUHeight                   : 64          # Maximum coding unit height in pixel
+MaxPartitionDepth             : 4           # Maximum coding unit depth
+QuadtreeTULog2MaxSize         : 5           # Log2 of maximum transform size for
+                                            # quadtree-based TU coding (2...6)
+QuadtreeTULog2MinSize         : 2           # Log2 of minimum transform size for
+                                            # quadtree-based TU coding (2...6)
+QuadtreeTUMaxDepthInter       : 3
+QuadtreeTUMaxDepthIntra       : 3
+
+#======== Coding Structure =============
+IntraPeriod                   : 1           # Period of I-Frame ( -1 = only first)
+DecodingRefreshType           : 0           # Random Accesss 0:none, 1:CRA, 2:IDR, 3:Recovery Point SEI
+GOPSize                       : 1           # GOP Size (number of B slice = GOPSize-1)
+#        Type POC QPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2  temporal_id #ref_pics_active #ref_pics reference pictures 
+
+#=========== Motion Search =============
+FastSearch                    : 1           # 0:Full search  1:TZ search
+SearchRange                   : 64          # (0: Search range is a Full frame)
+HadamardME                    : 1           # Use of hadamard measure for fractional ME
+FEN                           : 1           # Fast encoder decision
+FDM                           : 1           # Fast Decision for Merge RD cost
+
+#======== Quantization =============
+QP                            : 32          # Quantization parameter(0-51)
+MaxDeltaQP                    : 0           # CU-based multi-QP optimization
+MaxCuDQPDepth                 : 0           # Max depth of a minimum CuDQP for sub-LCU-level delta QP
+DeltaQpRD                     : 0           # Slice-based multi-QP optimization
+RDOQ                          : 1           # RDOQ
+RDOQTS                        : 1           # RDOQ for transform skip
+
+#=========== Deblock Filter ============
+DeblockingFilterControlPresent: 0           # Dbl control params present (0=not present, 1=present)
+LoopFilterOffsetInPPS         : 0           # Dbl params: 0=varying params in SliceHeader, param = base_param + GOP_offset_param; 1=constant params in PPS, param = base_param)
+LoopFilterDisable             : 0           # Disable deblocking filter (0=Filter, 1=No Filter)
+LoopFilterBetaOffset_div2     : 0           # base_param: -6 ~ 6
+LoopFilterTcOffset_div2       : 0           # base_param: -6 ~ 6
+DeblockingFilterMetric        : 0           # blockiness metric (automatically configures deblocking parameters in bitstream)
+
+#=========== Misc. ============
+InternalBitDepth              : 8           # codec operating bit-depth
+
+#=========== Coding Tools =================
+SAO                           : 1           # Sample adaptive offset  (0: OFF, 1: ON)
+AMP                           : 1           # Asymmetric motion partitions (0: OFF, 1: ON)
+TransformSkip                 : 1           # Transform skipping (0: OFF, 1: ON)
+TransformSkipFast             : 1           # Fast Transform skipping (0: OFF, 1: ON)
+SAOLcuBoundary                : 0           # SAOLcuBoundary using non-deblocked pixels (0: OFF, 1: ON)
+
+#============ Slices ================
+SliceMode                : 0                # 0: Disable all slice options.
+                                            # 1: Enforce maximum number of LCU in an slice,
+                                            # 2: Enforce maximum number of bytes in an 'slice'
+                                            # 3: Enforce maximum number of tiles in a slice
+SliceArgument            : 1500             # Argument for 'SliceMode'.
+                                            # If SliceMode==1 it represents max. SliceGranularity-sized blocks per slice.
+                                            # If SliceMode==2 it represents max. bytes per slice.
+                                            # If SliceMode==3 it represents max. tiles per slice.
+
+LFCrossSliceBoundaryFlag : 1                # In-loop filtering, including ALF and DB, is across or not across slice boundary.
+                                            # 0:not across, 1: across
+
+#============ PCM ================
+PCMEnabledFlag                      : 0                # 0: No PCM mode
+PCMLog2MaxSize                      : 5                # Log2 of maximum PCM block size.
+PCMLog2MinSize                      : 3                # Log2 of minimum PCM block size.
+PCMInputBitDepthFlag                : 1                # 0: PCM bit-depth is internal bit-depth. 1: PCM bit-depth is input bit-depth.
+PCMFilterDisableFlag                : 0                # 0: Enable loop filtering on I_PCM samples. 1: Disable loop filtering on I_PCM samples.
+
+#============ Tiles ================
+TileUniformSpacing                  : 0                # 0: the column boundaries are indicated by TileColumnWidth array, the row boundaries are indicated by TileRowHeight array
+                                                       # 1: the column and row boundaries are distributed uniformly
+NumTileColumnsMinus1                : 0                # Number of tile columns in a picture minus 1
+TileColumnWidthArray                : 2 3              # Array containing tile column width values in units of CTU (from left to right in picture)   
+NumTileRowsMinus1                   : 0                # Number of tile rows in a picture minus 1
+TileRowHeightArray                  : 2                # Array containing tile row height values in units of CTU (from top to bottom in picture)
+
+LFCrossTileBoundaryFlag             : 1                # In-loop filtering is across or not across tile boundary.
+                                                       # 0:not across, 1: across 
+
+#============ WaveFront ================
+WaveFrontSynchro                    : 0                # 0:  No WaveFront synchronisation (WaveFrontSubstreams must be 1 in this case).
+                                                       # >0: WaveFront synchronises with the LCU above and to the right by this many LCUs.
+
+#=========== Quantization Matrix =================
+ScalingList                   : 0                      # ScalingList 0 : off, 1 : default, 2 : file read
+ScalingListFile               : scaling_list.txt       # Scaling List file name. If file is not exist, use Default Matrix.
+
+#============ Lossless ================
+TransquantBypassEnableFlag : 0                         # Value of PPS flag.
+CUTransquantBypassFlagForce: 0                         # Force transquant bypass mode, when transquant_bypass_enable_flag is enabled
+
+### DO NOT ADD ANYTHING BELOW THIS LINE ###
+### DO NOT DELETE THE EMPTY LINE BELOW ###
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/libmd5/MD5.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,75 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#pragma once
+#include "libmd5.h"
+#include <string>
+
+//! \ingroup libMD5
+//! \{
+
+static const UInt MD5_DIGEST_STRING_LENGTH=16;
+
+class MD5
+{
+public:
+  /**
+   * initialize digest state
+   */
+  MD5()
+  {
+    MD5Init(&m_state);
+  }
+
+  /**
+   * compute digest over buf of length len.
+   * multiple calls may extend the digest over more data.
+   */
+  void update(unsigned char *buf, unsigned len)
+  {
+    MD5Update(&m_state, buf, len);
+  }
+
+  /**
+   * flush any outstanding MD5 data, write the digest into digest.
+   */
+  void finalize(unsigned char digest[MD5_DIGEST_STRING_LENGTH])
+  {
+    MD5Final(digest, &m_state);
+  }
+
+private:
+  context_md5_t m_state;
+};
+
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/libmd5/libmd5.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,256 @@
+/*
+ * This code implements the MD5 message-digest algorithm.  The algorithm was
+ * written by Ron Rivest.  This code was written by Colin Plumb in 1993, our
+ * understanding is that no copyright is claimed and that this code is in the
+ * public domain.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is functionally equivalent,
+ *
+ * To compute the message digest of a chunk of bytes, declare an MD5Context
+ * structure, pass it to MD5Init, call MD5Update as needed on buffers full of
+ * bytes, and then call MD5Final, which will fill a supplied 16-byte array with
+ * the digest.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include "libmd5.h"
+
+//! \ingroup libMD5
+//! \{
+
+static void MD5Transform(uint32_t buf[4], uint32_t const in[16]);
+
+#ifndef __BIG_ENDIAN__
+# define byteReverse(buf, len)    /* Nothing */
+#else
+void byteReverse(uint32_t *buf, unsigned len);
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+void byteReverse(uint32_t *buf, unsigned len)
+{
+  uint32_t t;
+  do {
+    char* bytes = (char *) buf;
+    t = ((unsigned) bytes[3] << 8 | bytes[2]) << 16 |
+        ((unsigned) bytes[1] << 8 | bytes[0]);
+    *buf = t;
+    buf++;
+  } while (--len);
+}
+#endif
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void MD5Init(context_md5_t *ctx)
+{
+  ctx->buf[0] = 0x67452301;
+  ctx->buf[1] = 0xefcdab89;
+  ctx->buf[2] = 0x98badcfe;
+  ctx->buf[3] = 0x10325476;
+
+  ctx->bits[0] = 0;
+  ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void MD5Update(context_md5_t *ctx, unsigned char *buf, unsigned len)
+{
+  uint32_t t;
+
+  /* Update bitcount */
+
+  t = ctx->bits[0];
+  if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t)
+    ctx->bits[1]++;        /* Carry from low to high */
+  ctx->bits[1] += len >> 29;
+
+  t = (t >> 3) & 0x3f;    /* Bytes already in shsInfo->data */
+
+  /* Handle any leading odd-sized chunks */
+
+  if (t) {
+    unsigned char *p = ctx->in.b8 + t;
+
+    t = 64 - t;
+    if (len < t) {
+      memcpy(p, buf, len);
+      return;
+    }
+    memcpy(p, buf, t);
+    byteReverse(ctx->in.b32, 16);
+    MD5Transform(ctx->buf, ctx->in.b32);
+    buf += t;
+    len -= t;
+  }
+  /* Process data in 64-byte chunks */
+
+  while (len >= 64) {
+    memcpy(ctx->in.b8, buf, 64);
+    byteReverse(ctx->in.b32, 16);
+    MD5Transform(ctx->buf, ctx->in.b32);
+    buf += 64;
+    len -= 64;
+  }
+
+    /* Handle any remaining bytes of data. */
+
+  memcpy(ctx->in.b8, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void MD5Final(unsigned char digest[16], context_md5_t *ctx)
+{
+  unsigned count;
+  unsigned char *p;
+
+  /* Compute number of bytes mod 64 */
+  count = (ctx->bits[0] >> 3) & 0x3F;
+
+  /* Set the first char of padding to 0x80.  This is safe since there is
+     always at least one byte free */
+  p = ctx->in.b8 + count;
+  *p++ = 0x80;
+
+  /* Bytes of padding needed to make 64 bytes */
+  count = 64 - 1 - count;
+
+  /* Pad out to 56 mod 64 */
+  if (count < 8) {
+    /* Two lots of padding:  Pad the first block to 64 bytes */
+    memset(p, 0, count);
+    byteReverse(ctx->in.b32, 16);
+    MD5Transform(ctx->buf, ctx->in.b32);
+
+    /* Now fill the next block with 56 bytes */
+    memset(ctx->in.b8, 0, 56);
+  } else {
+    /* Pad block to 56 bytes */
+    memset(p, 0, count - 8);
+  }
+  byteReverse(ctx->in.b32, 14);
+
+  /* Append length in bits and transform */
+  ctx->in.b32[14] = ctx->bits[0];
+  ctx->in.b32[15] = ctx->bits[1];
+
+  MD5Transform(ctx->buf, ctx->in.b32);
+  byteReverse((uint32_t *) ctx->buf, 4);
+  memcpy(digest, ctx->buf, 16);
+
+  memset(ctx, 0, sizeof(* ctx));    /* In case it's sensitive */
+  /* The original version of this code omitted the asterisk. In
+     effect, only the first part of ctx was wiped with zeros, not
+     the whole thing. Bug found by Derek Jones. Original line: */
+  // memset(ctx, 0, sizeof(ctx));    /* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+    ( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void MD5Transform(uint32_t buf[4], uint32_t const in[16])
+{
+  register uint32_t a, b, c, d;
+
+  a = buf[0];
+  b = buf[1];
+  c = buf[2];
+  d = buf[3];
+
+  MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+  MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+  MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+  MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+  MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+  MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+  MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+  MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+  MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+  MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+  MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+  MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+  MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+  MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+  MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+  MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+  MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+  MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+  MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+  MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+  MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+  MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+  MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+  MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+  MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+  MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+  MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+  MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+  MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+  MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+  MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+  MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+  MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+  MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+  MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+  MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+  MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+  MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+  MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+  MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+  MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+  MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+  MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+  MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+  MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+  MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+  MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+  MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+  MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+  MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+  MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+  MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+  MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+  MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+  MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+  MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+  MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+  MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+  MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+  MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+  MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+  MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+  MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+  MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+  buf[0] += a;
+  buf[1] += b;
+  buf[2] += c;
+  buf[3] += d;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/libmd5/libmd5.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,58 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#pragma once
+#include <stdint.h>
+
+//! \ingroup libMD5
+//! \{
+
+typedef struct _context_md5_t {
+  uint32_t buf[4];
+  uint32_t bits[2];
+  union {
+    unsigned char b8[64];
+    uint32_t b32[16];
+  } in;
+} context_md5_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void MD5Init(context_md5_t *ctx);
+void MD5Update(context_md5_t *ctx, unsigned char *buf, unsigned len);
+void MD5Final(unsigned char digest[16], context_md5_t *ctx);
+#ifdef __cplusplus
+}
+#endif
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/program_options_lite.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,498 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdlib.h>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <string>
+#include <list>
+#include <map>
+#include <algorithm>
+#include "program_options_lite.h"
+
+using namespace std;
+
+//! \ingroup TAppCommon
+//! \{
+
+namespace df
+{
+  namespace program_options_lite
+  {
+
+    Options::~Options()
+    {
+      for(Options::NamesPtrList::iterator it = opt_list.begin(); it != opt_list.end(); it++)
+      {
+        delete *it;
+      }
+    }
+
+    void Options::addOption(OptionBase *opt)
+    {
+      Names* names = new Names();
+      names->opt = opt;
+      string& opt_string = opt->opt_string;
+
+      size_t opt_start = 0;
+      for (size_t opt_end = 0; opt_end != string::npos;)
+      {
+        opt_end = opt_string.find_first_of(',', opt_start);
+        bool force_short = 0;
+        if (opt_string[opt_start] == '-')
+        {
+          opt_start++;
+          force_short = 1;
+        }
+        string opt_name = opt_string.substr(opt_start, opt_end - opt_start);
+        if (force_short || opt_name.size() == 1)
+        {
+          names->opt_short.push_back(opt_name);
+          opt_short_map[opt_name].push_back(names);
+        }
+        else
+        {
+          names->opt_long.push_back(opt_name);
+          opt_long_map[opt_name].push_back(names);
+        }
+        opt_start += opt_end + 1;
+      }
+      opt_list.push_back(names);
+    }
+
+    /* Helper method to initiate adding options to Options */
+    OptionSpecific Options::addOptions()
+    {
+      return OptionSpecific(*this);
+    }
+
+    static void setOptions(Options::NamesPtrList& opt_list, const string& value)
+    {
+      /* multiple options may be registered for the same name:
+       *   allow each to parse value */
+      for (Options::NamesPtrList::iterator it = opt_list.begin(); it != opt_list.end(); ++it)
+      {
+        (*it)->opt->parse(value);
+      }
+    }
+
+    static const char spaces[41] = "                                        ";
+
+    /* format help text for a single option:
+     * using the formatting: "-x, --long",
+     * if a short/long option isn't specified, it is not printed
+     */
+    static void doHelpOpt(ostream& out, const Options::Names& entry, unsigned pad_short = 0)
+    {
+      pad_short = min(pad_short, 8u);
+
+      if (!entry.opt_short.empty())
+      {
+        unsigned pad = max((int)pad_short - (int)entry.opt_short.front().size(), 0);
+        out << "-" << entry.opt_short.front();
+        if (!entry.opt_long.empty())
+        {
+          out << ", ";
+        }
+        out << &(spaces[40 - pad]);
+      }
+      else
+      {
+        out << "   ";
+        out << &(spaces[40 - pad_short]);
+      }
+
+      if (!entry.opt_long.empty())
+      {
+        out << "--" << entry.opt_long.front();
+      }
+    }
+
+    /* format the help text */
+    void doHelp(ostream& out, Options& opts, unsigned columns)
+    {
+      const unsigned pad_short = 3;
+      /* first pass: work out the longest option name */
+      unsigned max_width = 0;
+      for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++)
+      {
+        ostringstream line(ios_base::out);
+        doHelpOpt(line, **it, pad_short);
+        max_width = max(max_width, (unsigned) line.tellp());
+      }
+
+      unsigned opt_width = min(max_width+2, 28u + pad_short) + 2;
+      unsigned desc_width = columns - opt_width;
+
+      /* second pass: write out formatted option and help text.
+       *  - align start of help text to start at opt_width
+       *  - if the option text is longer than opt_width, place the help
+       *    text at opt_width on the next line.
+       */
+      for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++)
+      {
+        ostringstream line(ios_base::out);
+        line << "  ";
+        doHelpOpt(line, **it, pad_short);
+
+        const string& opt_desc = (*it)->opt->opt_desc;
+        if (opt_desc.empty())
+        {
+          /* no help text: output option, skip further processing */
+          cout << line.str() << endl;
+          continue;
+        }
+        size_t currlength = size_t(line.tellp());
+        if (currlength > opt_width)
+        {
+          /* if option text is too long (and would collide with the
+           * help text, split onto next line */
+          line << endl;
+          currlength = 0;
+        }
+        /* split up the help text, taking into account new lines,
+         *   (add opt_width of padding to each new line) */
+        for (size_t newline_pos = 0, cur_pos = 0; cur_pos != string::npos; currlength = 0)
+        {
+          /* print any required padding space for vertical alignment */
+          line << &(spaces[40 - opt_width + currlength]);
+          newline_pos = opt_desc.find_first_of('\n', newline_pos);
+          if (newline_pos != string::npos)
+          {
+            /* newline found, print substring (newline needn't be stripped) */
+            newline_pos++;
+            line << opt_desc.substr(cur_pos, newline_pos - cur_pos);
+            cur_pos = newline_pos;
+            continue;
+          }
+          if (cur_pos + desc_width > opt_desc.size())
+          {
+            /* no need to wrap text, remainder is less than avaliable width */
+            line << opt_desc.substr(cur_pos);
+            break;
+          }
+          /* find a suitable point to split text (avoid spliting in middle of word) */
+          size_t split_pos = opt_desc.find_last_of(' ', cur_pos + desc_width);
+          if (split_pos != string::npos)
+          {
+            /* eat up multiple space characters */
+            split_pos = opt_desc.find_last_not_of(' ', split_pos) + 1;
+          }
+
+          /* bad split if no suitable space to split at.  fall back to width */
+          bool bad_split = split_pos == string::npos || split_pos <= cur_pos;
+          if (bad_split)
+          {
+            split_pos = cur_pos + desc_width;
+          }
+          line << opt_desc.substr(cur_pos, split_pos - cur_pos);
+
+          /* eat up any space for the start of the next line */
+          if (!bad_split)
+          {
+            split_pos = opt_desc.find_first_not_of(' ', split_pos);
+          }
+          cur_pos = newline_pos = split_pos;
+
+          if (cur_pos >= opt_desc.size())
+          {
+            break;
+          }
+          line << endl;
+        }
+
+        cout << line.str() << endl;
+      }
+    }
+
+    bool storePair(Options& opts, bool allow_long, bool allow_short, const string& name, const string& value)
+    {
+      bool found = false;
+      Options::NamesMap::iterator opt_it;
+      if (allow_long)
+      {
+        opt_it = opts.opt_long_map.find(name);
+        if (opt_it != opts.opt_long_map.end())
+        {
+          found = true;
+        }
+      }
+
+      /* check for the short list */
+      if (allow_short && !(found && allow_long))
+      {
+        opt_it = opts.opt_short_map.find(name);
+        if (opt_it != opts.opt_short_map.end())
+        {
+          found = true;
+        }
+      }
+
+      if (!found)
+      {
+        /* not found */
+        cerr << "Unknown option: `" << name << "' (value:`" << value << "')" << endl;
+        return false;
+      }
+
+      setOptions((*opt_it).second, value);
+      return true;
+    }
+
+    bool storePair(Options& opts, const string& name, const string& value)
+    {
+      return storePair(opts, true, true, name, value);
+    }
+
+    /**
+     * returns number of extra arguments consumed
+     */
+    unsigned parseGNU(Options& opts, unsigned argc, const char* argv[])
+    {
+      /* gnu style long options can take the forms:
+       *  --option=arg
+       *  --option arg
+       */
+      string arg(argv[0]);
+      size_t arg_opt_start = arg.find_first_not_of('-');
+      size_t arg_opt_sep = arg.find_first_of('=');
+      string option = arg.substr(arg_opt_start, arg_opt_sep - arg_opt_start);
+
+      unsigned extra_argc_consumed = 0;
+      if (arg_opt_sep == string::npos)
+      {
+        /* no argument found => argument in argv[1] (maybe) */
+        /* xxx, need to handle case where option isn't required */
+#if 0
+        /* commented out, to return to true GNU style processing
+        * where longopts have to include an =, otherwise they are
+        * booleans */
+        if (argc == 1)
+          return 0; /* run out of argv for argument */
+        extra_argc_consumed = 1;
+#endif
+        if(!storePair(opts, true, false, option, "1"))
+        {
+          return 0;
+        }
+      }
+      else
+      {
+        /* argument occurs after option_sep */
+        string val = arg.substr(arg_opt_sep + 1);
+        storePair(opts, true, false, option, val);
+      }
+
+      return extra_argc_consumed;
+    }
+
+    unsigned parseSHORT(Options& opts, unsigned argc, const char* argv[])
+    {
+      /* short options can take the forms:
+       *  --option arg
+       *  -option arg
+       */
+      string arg(argv[0]);
+      size_t arg_opt_start = arg.find_first_not_of('-');
+      string option = arg.substr(arg_opt_start);
+      /* lookup option */
+
+      /* argument in argv[1] */
+      /* xxx, need to handle case where option isn't required */
+      if (argc == 1)
+      {
+        cerr << "Not processing option without argument `" << option << "'" << endl;
+        return 0; /* run out of argv for argument */
+      }
+      storePair(opts, false, true, option, string(argv[1]));
+
+      return 1;
+    }
+
+    list<const char*>
+    scanArgv(Options& opts, unsigned argc, const char* argv[])
+    {
+      /* a list for anything that didn't get handled as an option */
+      list<const char*> non_option_arguments;
+
+      for(unsigned i = 1; i < argc; i++)
+      {
+        if (argv[i][0] != '-')
+        {
+          non_option_arguments.push_back(argv[i]);
+          continue;
+        }
+
+        if (argv[i][1] == 0)
+        {
+          /* a lone single dash is an argument (usually signifying stdin) */
+          non_option_arguments.push_back(argv[i]);
+          continue;
+        }
+
+        if (argv[i][1] != '-')
+        {
+          /* handle short (single dash) options */
+#if 0
+          i += parsePOSIX(opts, argc - i, &argv[i]);
+#else
+          i += parseSHORT(opts, argc - i, &argv[i]);
+#endif
+          continue;
+        }
+
+        if (argv[i][2] == 0)
+        {
+          /* a lone double dash ends option processing */
+          while (++i < argc)
+            non_option_arguments.push_back(argv[i]);
+          break;
+        }
+
+        /* handle long (double dash) options */
+        i += parseGNU(opts, argc - i, &argv[i]);
+      }
+
+      return non_option_arguments;
+    }
+
+    void scanLine(Options& opts, string& line)
+    {
+      /* strip any leading whitespace */
+      size_t start = line.find_first_not_of(" \t\n\r");
+      if (start == string::npos)
+      {
+        /* blank line */
+        return;
+      }
+      if (line[start] == '#')
+      {
+        /* comment line */
+        return;
+      }
+      /* look for first whitespace or ':' after the option end */
+      size_t option_end = line.find_first_of(": \t\n\r",start);
+      string option = line.substr(start, option_end - start);
+
+      /* look for ':', eat up any whitespace first */
+      start = line.find_first_not_of(" \t\n\r", option_end);
+      if (start == string::npos)
+      {
+        /* error: badly formatted line */
+        return;
+      }
+      if (line[start] != ':')
+      {
+        /* error: badly formatted line */
+        return;
+      }
+
+      /* look for start of value string -- eat up any leading whitespace */
+      start = line.find_first_not_of(" \t\n\r", ++start);
+      if (start == string::npos)
+      {
+        /* error: badly formatted line */
+        return;
+      }
+
+      /* extract the value part, which may contain embedded spaces
+       * by searching for a word at a time, until we hit a comment or end of line */
+      size_t value_end = start;
+      do
+      {
+        if (line[value_end] == '#')
+        {
+          /* rest of line is a comment */
+          value_end--;
+          break;
+        }
+        value_end = line.find_first_of(" \t\n\r", value_end);
+        /* consume any white space, incase there is another word.
+         * any trailing whitespace will be removed shortly */
+        value_end = line.find_first_not_of(" \t\n\r", value_end);
+      }
+      while (value_end != string::npos);
+      /* strip any trailing space from value*/
+      value_end = line.find_last_not_of(" \t\n\r", value_end);
+
+      string value;
+      if (value_end >= start)
+      {
+        value = line.substr(start, value_end +1 - start);
+      }
+      else
+      {
+        /* error: no value */
+        return;
+      }
+
+      /* store the value in option */
+      storePair(opts, true, false, option, value);
+    }
+
+    void scanFile(Options& opts, istream& in)
+    {
+      do
+      {
+        string line;
+        getline(in, line);
+        scanLine(opts, line);
+      }
+      while(!!in);
+    }
+
+    /* for all options in opts, set their storage to their specified
+     * default value */
+    void setDefaults(Options& opts)
+    {
+      for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++)
+      {
+        (*it)->opt->setDefault();
+      }
+    }
+
+    void parseConfigFile(Options& opts, const string& filename)
+    {
+      ifstream cfgstream(filename.c_str(), ifstream::in);
+      if (!cfgstream)
+      {
+        cerr << "Failed to open config file: `" << filename << "'" << endl;
+        exit(EXIT_FAILURE);
+      }
+      scanFile(opts, cfgstream);
+    }
+
+  }
+}
+
+//! \}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc/program_options_lite.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,237 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2014, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <list>
+#include <map>
+
+#ifndef __PROGRAM_OPTIONS_LITE__
+#define __PROGRAM_OPTIONS_LITE__
+
+//! \ingroup TAppCommon
+//! \{
+
+
+namespace df
+{
+  namespace program_options_lite
+  {
+    struct Options;
+
+    struct ParseFailure : public std::exception
+    {
+      ParseFailure(std::string arg0, std::string val0) throw()
+      : arg(arg0), val(val0)
+      {}
+
+      ~ParseFailure() throw() {};
+
+      std::string arg;
+      std::string val;
+
+      const char* what() const throw() { return "Option Parse Failure"; }
+    };
+
+    void doHelp(std::ostream& out, Options& opts, unsigned columns = 80);
+    unsigned parseGNU(Options& opts, unsigned argc, const char* argv[]);
+    unsigned parseSHORT(Options& opts, unsigned argc, const char* argv[]);
+    std::list<const char*> scanArgv(Options& opts, unsigned argc, const char* argv[]);
+    void scanLine(Options& opts, std::string& line);
+    void scanFile(Options& opts, std::istream& in);
+    void setDefaults(Options& opts);
+    void parseConfigFile(Options& opts, const std::string& filename);
+    bool storePair(Options& opts, const std::string& name, const std::string& value);
+
+    /** OptionBase: Virtual base class for storing information relating to a
+     * specific option This base class describes common elements.  Type specific
+     * information should be stored in a derived class. */
+    struct OptionBase
+    {
+      OptionBase(const std::string& name, const std::string& desc)
+      : opt_string(name), opt_desc(desc)
+      {};
+
+      virtual ~OptionBase() {}
+
+      /* parse argument arg, to obtain a value for the option */
+      virtual void parse(const std::string& arg) = 0;
+      /* set the argument to the default value */
+      virtual void setDefault() = 0;
+
+      std::string opt_string;
+      std::string opt_desc;
+    };
+
+    /** Type specific option storage */
+    template<typename T>
+    struct Option : public OptionBase
+    {
+      Option(const std::string& name, T& storage, T default_val, const std::string& desc)
+      : OptionBase(name, desc), opt_storage(storage), opt_default_val(default_val)
+      {}
+
+      void parse(const std::string& arg);
+
+      void setDefault()
+      {
+        opt_storage = opt_default_val;
+      }
+
+      T& opt_storage;
+      T opt_default_val;
+    };
+
+    /* Generic parsing */
+    template<typename T>
+    inline void
+    Option<T>::parse(const std::string& arg)
+    {
+      std::istringstream arg_ss (arg,std::istringstream::in);
+      arg_ss.exceptions(std::ios::failbit);
+      try
+      {
+        arg_ss >> opt_storage;
+      }
+      catch (...)
+      {
+        throw ParseFailure(opt_string, arg);
+      }
+    }
+
+    /* string parsing is specialized -- copy the whole string, not just the
+     * first word */
+    template<>
+    inline void
+    Option<std::string>::parse(const std::string& arg)
+    {
+      opt_storage = arg;
+    }
+
+    /** Option class for argument handling using a user provided function */
+    struct OptionFunc : public OptionBase
+    {
+      typedef void (Func)(Options&, const std::string&);
+
+      OptionFunc(const std::string& name, Options& parent_, Func *func_, const std::string& desc)
+      : OptionBase(name, desc), parent(parent_), func(func_)
+      {}
+
+      void parse(const std::string& arg)
+      {
+        func(parent, arg);
+      }
+
+      void setDefault()
+      {
+        return;
+      }
+
+    private:
+      Options& parent;
+      void (*func)(Options&, const std::string&);
+    };
+
+    class OptionSpecific;
+    struct Options
+    {
+      ~Options();
+
+      OptionSpecific addOptions();
+
+      struct Names
+      {
+        Names() : opt(0) {};
+        ~Names()
+        {
+          if (opt)
+            delete opt;
+        }
+        std::list<std::string> opt_long;
+        std::list<std::string> opt_short;
+        OptionBase* opt;
+      };
+
+      void addOption(OptionBase *opt);
+
+      typedef std::list<Names*> NamesPtrList;
+      NamesPtrList opt_list;
+
+      typedef std::map<std::string, NamesPtrList> NamesMap;
+      NamesMap opt_long_map;
+      NamesMap opt_short_map;
+    };
+
+    /* Class with templated overloaded operator(), for use by Options::addOptions() */
+    class OptionSpecific
+    {
+    public:
+      OptionSpecific(Options& parent_) : parent(parent_) {}
+
+      /**
+       * Add option described by name to the parent Options list,
+       *   with storage for the option's value
+       *   with default_val as the default value
+       *   with desc as an optional help description
+       */
+      template<typename T>
+      OptionSpecific&
+      operator()(const std::string& name, T& storage, T default_val, const std::string& desc = "")
+      {
+        parent.addOption(new Option<T>(name, storage, default_val, desc));
+        return *this;
+      }
+
+      /**
+       * Add option described by name to the parent Options list,
+       *   with desc as an optional help description
+       * instead of storing the value somewhere, a function of type
+       * OptionFunc::Func is called.  It is upto this function to correctly
+       * handle evaluating the option's value.
+       */
+      OptionSpecific&
+      operator()(const std::string& name, OptionFunc::Func *func, const std::string& desc = "")
+      {
+        parent.addOption(new OptionFunc(name, parent, func, desc));
+        return *this;
+      }
+    private:
+      Options& parent;
+    };
+
+  } /* namespace: program_options_lite */
+} /* namespace: df */
+
+//! \}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jctvc_glue.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,257 @@
+#ifdef WIN32
+#include <windows.h>
+#endif
+#include <unistd.h>
+#include <iostream>
+#include "TAppEncTop.h"
+#include "TLibCommon/Debug.h"
+#include "TLibEncoder/TEncAnalyze.h"
+
+#include "bpgenc.h"
+
+struct HEVCEncoderContext {
+    HEVCEncodeParams params;
+    char infilename[1024];
+    char outfilename[1024];
+    FILE *yuv_file;
+    int frame_count;
+};
+
+#define ARGV_MAX 256
+
+static void add_opt(int *pargc, char **argv,
+                    const char *str)
+{
+    int argc;
+    argc = *pargc;
+    if (argc >= ARGV_MAX)
+        abort();
+    argv[argc++] = strdup(str);
+    *pargc = argc;
+}
+
+static HEVCEncoderContext *jctvc_open(const HEVCEncodeParams *params)
+{
+    HEVCEncoderContext *s;
+    char buf[1024];
+    static int tmp_idx = 1;
+
+    s = (HEVCEncoderContext *)malloc(sizeof(HEVCEncoderContext));
+    memset(s, 0, sizeof(*s));
+
+    s->params = *params;
+#ifdef WIN32
+    if (GetTempPath(sizeof(buf), buf) > sizeof(buf) - 1) {
+        fprintf(stderr, "Temporary path too long\n");
+        free(s);
+        return NULL;
+    }
+#else
+    strcpy(buf, "/tmp/");
+#endif
+    snprintf(s->infilename, sizeof(s->infilename), "%sout%d-%d.yuv", buf, getpid(), tmp_idx);
+    snprintf(s->outfilename, sizeof(s->outfilename), "%sout%d-%d.bin", buf, getpid(), tmp_idx);
+    tmp_idx++;
+
+    s->yuv_file = fopen(s->infilename, "wb");
+    if (!s->yuv_file) {
+        fprintf(stderr, "Could not open '%s'\n", s->infilename);
+        free(s);
+        return NULL;
+    }
+    return s;
+}
+
+static int jctvc_encode(HEVCEncoderContext *s, Image *img)
+{
+    save_yuv1(img, s->yuv_file);
+    s->frame_count++;
+    return 0;
+}
+
+/* return the encoded data in *pbuf and the size. Return < 0 if error */
+static int jctvc_close(HEVCEncoderContext *s, uint8_t **pbuf)
+{
+    TAppEncTop cTAppEncTop;
+    int argc;
+    char *argv[ARGV_MAX + 1];
+    char buf[1024];
+    const char *str;
+    FILE *f;
+    uint8_t *out_buf;
+    int out_buf_len, i;
+    
+    fclose(s->yuv_file);
+    s->yuv_file = NULL;
+
+    m_gcAnalyzeAll.clear();
+    m_gcAnalyzeI.clear();
+    m_gcAnalyzeP.clear();
+    m_gcAnalyzeB.clear();
+    m_gcAnalyzeAll_in.clear();
+    
+    cTAppEncTop.create();
+
+    argc = 0;
+    add_opt(&argc, argv, "jctvc"); /* dummy executable name */
+
+    snprintf(buf, sizeof(buf),"--InputFile=%s", s->infilename);
+    add_opt(&argc, argv, buf);
+    snprintf(buf, sizeof(buf),"--BitstreamFile=%s", s->outfilename);
+    add_opt(&argc, argv, buf);
+
+    snprintf(buf, sizeof(buf),"--SourceWidth=%d", s->params.width);
+    add_opt(&argc, argv, buf);
+    snprintf(buf, sizeof(buf),"--SourceHeight=%d", s->params.height);
+    add_opt(&argc, argv, buf);
+    snprintf(buf, sizeof(buf),"--InputBitDepth=%d", s->params.bit_depth);
+    add_opt(&argc, argv, buf);
+
+    switch(s->params.chroma_format) {
+    case BPG_FORMAT_GRAY:
+        str = "400";
+        break;
+    case BPG_FORMAT_420:
+        str = "420";
+        break;
+    case BPG_FORMAT_422:
+        str = "422";
+        break;
+    case BPG_FORMAT_444:
+        str = "444";
+        break;
+    default:
+        abort();
+    }
+    snprintf(buf, sizeof(buf),"--InputChromaFormat=%s", str);
+    add_opt(&argc, argv, buf);
+
+    snprintf(buf, sizeof(buf),"--QP=%d", s->params.qp);
+    add_opt(&argc, argv, buf);
+    
+    snprintf(buf, sizeof(buf),"--SEIDecodedPictureHash=%d", 
+             s->params.sei_decoded_picture_hash);
+    add_opt(&argc, argv, buf);
+    
+    if (!s->params.verbose)
+      add_opt(&argc, argv, "--Verbose=0");
+      
+    /* single frame */
+    snprintf(buf, sizeof(buf),"--FramesToBeEncoded=%d", s->frame_count);
+    add_opt(&argc, argv, buf);
+    
+    /* no padding necessary (it is done before) */
+    add_opt(&argc, argv, "--ConformanceWindowMode=0");
+    
+    /* dummy frame rate */
+    add_opt(&argc, argv, "--FrameRate=25");
+
+    /* general config */
+    add_opt(&argc, argv, "--QuadtreeTULog2MaxSize=5");
+    if (s->params.compress_level == 9) {
+        add_opt(&argc, argv, "--QuadtreeTUMaxDepthIntra=4");
+        add_opt(&argc, argv, "--QuadtreeTUMaxDepthInter=4");
+    } else {
+        add_opt(&argc, argv, "--QuadtreeTUMaxDepthIntra=3");
+        add_opt(&argc, argv, "--QuadtreeTUMaxDepthInter=3");
+    }
+
+    if (s->params.intra_only) {
+        add_opt(&argc, argv, "--Profile=main_444_16_intra");
+
+        add_opt(&argc, argv, "--IntraPeriod=1");
+        add_opt(&argc, argv, "--GOPSize=1");
+    } else {
+        int gop_size;
+
+        add_opt(&argc, argv, "--Profile=main_444_16");
+        add_opt(&argc, argv, "--IntraPeriod=250");
+        gop_size = 1;
+        snprintf(buf, sizeof(buf), "--GOPSize=%d", gop_size);
+        add_opt(&argc, argv, buf);
+
+        for(i = 0; i < gop_size; i++) {
+            snprintf(buf, sizeof(buf), "--Frame%d=P 1 3 0.4624 0 0 0 1 1 -1 0", i + 1);
+            add_opt(&argc, argv, buf);
+        }
+    }
+    add_opt(&argc, argv, "--TransformSkip=1");
+    add_opt(&argc, argv, "--TransformSkipFast=1");
+
+    /* Note: Format Range extension */
+    if (s->params.chroma_format == BPG_FORMAT_444) {
+        add_opt(&argc, argv, "--CrossComponentPrediction=1");
+    }
+
+    if (s->params.lossless) {
+        add_opt(&argc, argv, "--CostMode=lossless");
+        add_opt(&argc, argv, "--SAO=0");
+        add_opt(&argc, argv, "--LoopFilterDisable");
+        add_opt(&argc, argv, "--TransquantBypassEnableFlag");
+        add_opt(&argc, argv, "--CUTransquantBypassFlagForce");
+        add_opt(&argc, argv, "--ImplicitResidualDPCM");
+        add_opt(&argc, argv, "--GolombRiceParameterAdaptation");
+        add_opt(&argc, argv, "--HadamardME=0");
+    }
+
+#if 0
+    /* TEST with several slices */
+    add_opt(&argc, argv, "--SliceMode=2");
+    add_opt(&argc, argv, "--SliceArgument=5");
+#endif
+
+    /* trailing NULL */
+    argv[argc] = NULL;
+
+    if (s->params.verbose >= 2) {
+        int i;
+        printf("Encode options:");
+        for(i = 0; i < argc; i++) {
+            printf(" %s", argv[i]);
+        }
+        printf("\n");
+    }
+    
+    if(!cTAppEncTop.parseCfg( argc, argv )) {
+        fprintf(stderr, "Error while parsing options\n");
+        cTAppEncTop.destroy();
+        return -1;
+    }
+    
+    cTAppEncTop.encode();
+    
+    cTAppEncTop.destroy();
+    
+    for(i = 0; i < argc; i++)
+        free(argv[i]);
+    unlink(s->infilename);
+
+    /* read output bitstream */
+    f = fopen(s->outfilename, "rb");
+    if (!f) {
+        fprintf(stderr, "Could not open '%s'\n", s->outfilename);
+        return -1;
+    }
+    
+    fseek(f, 0, SEEK_END);
+    out_buf_len = ftell(f);
+    fseek(f, 0, SEEK_SET);
+    out_buf = (uint8_t *)malloc(out_buf_len);
+    if (fread(out_buf, 1, out_buf_len, f) != out_buf_len) {
+        fprintf(stderr, "read error\n");
+        fclose(f);
+        free(out_buf);
+        return -1;
+    }
+    fclose(f);
+    unlink(s->outfilename);
+    *pbuf = out_buf;
+    free(s);
+    return out_buf_len;
+}
+
+HEVCEncoder jctvc_encoder = {
+  .open = jctvc_open,
+  .encode = jctvc_encode,
+  .close = jctvc_close,
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/avcodec.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,5246 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include <errno.h>
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/cpu.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "version.h"
+
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec.
+ *    This ensures that 2 forks can independently add AVCodecIDs without producing conflicts.
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+    AV_CODEC_ID_NONE,
+
+    /* video codecs */
+    AV_CODEC_ID_MPEG1VIDEO,
+    AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+    AV_CODEC_ID_MPEG2VIDEO_XVMC,
+#endif /* FF_API_XVMC */
+    AV_CODEC_ID_H261,
+    AV_CODEC_ID_H263,
+    AV_CODEC_ID_RV10,
+    AV_CODEC_ID_RV20,
+    AV_CODEC_ID_MJPEG,
+    AV_CODEC_ID_MJPEGB,
+    AV_CODEC_ID_LJPEG,
+    AV_CODEC_ID_SP5X,
+    AV_CODEC_ID_JPEGLS,
+    AV_CODEC_ID_MPEG4,
+    AV_CODEC_ID_RAWVIDEO,
+    AV_CODEC_ID_MSMPEG4V1,
+    AV_CODEC_ID_MSMPEG4V2,
+    AV_CODEC_ID_MSMPEG4V3,
+    AV_CODEC_ID_WMV1,
+    AV_CODEC_ID_WMV2,
+    AV_CODEC_ID_H263P,
+    AV_CODEC_ID_H263I,
+    AV_CODEC_ID_FLV1,
+    AV_CODEC_ID_SVQ1,
+    AV_CODEC_ID_SVQ3,
+    AV_CODEC_ID_DVVIDEO,
+    AV_CODEC_ID_HUFFYUV,
+    AV_CODEC_ID_CYUV,
+    AV_CODEC_ID_H264,
+    AV_CODEC_ID_INDEO3,
+    AV_CODEC_ID_VP3,
+    AV_CODEC_ID_THEORA,
+    AV_CODEC_ID_ASV1,
+    AV_CODEC_ID_ASV2,
+    AV_CODEC_ID_FFV1,
+    AV_CODEC_ID_4XM,
+    AV_CODEC_ID_VCR1,
+    AV_CODEC_ID_CLJR,
+    AV_CODEC_ID_MDEC,
+    AV_CODEC_ID_ROQ,
+    AV_CODEC_ID_INTERPLAY_VIDEO,
+    AV_CODEC_ID_XAN_WC3,
+    AV_CODEC_ID_XAN_WC4,
+    AV_CODEC_ID_RPZA,
+    AV_CODEC_ID_CINEPAK,
+    AV_CODEC_ID_WS_VQA,
+    AV_CODEC_ID_MSRLE,
+    AV_CODEC_ID_MSVIDEO1,
+    AV_CODEC_ID_IDCIN,
+    AV_CODEC_ID_8BPS,
+    AV_CODEC_ID_SMC,
+    AV_CODEC_ID_FLIC,
+    AV_CODEC_ID_TRUEMOTION1,
+    AV_CODEC_ID_VMDVIDEO,
+    AV_CODEC_ID_MSZH,
+    AV_CODEC_ID_ZLIB,
+    AV_CODEC_ID_QTRLE,
+    AV_CODEC_ID_TSCC,
+    AV_CODEC_ID_ULTI,
+    AV_CODEC_ID_QDRAW,
+    AV_CODEC_ID_VIXL,
+    AV_CODEC_ID_QPEG,
+    AV_CODEC_ID_PNG,
+    AV_CODEC_ID_PPM,
+    AV_CODEC_ID_PBM,
+    AV_CODEC_ID_PGM,
+    AV_CODEC_ID_PGMYUV,
+    AV_CODEC_ID_PAM,
+    AV_CODEC_ID_FFVHUFF,
+    AV_CODEC_ID_RV30,
+    AV_CODEC_ID_RV40,
+    AV_CODEC_ID_VC1,
+    AV_CODEC_ID_WMV3,
+    AV_CODEC_ID_LOCO,
+    AV_CODEC_ID_WNV1,
+    AV_CODEC_ID_AASC,
+    AV_CODEC_ID_INDEO2,
+    AV_CODEC_ID_FRAPS,
+    AV_CODEC_ID_TRUEMOTION2,
+    AV_CODEC_ID_BMP,
+    AV_CODEC_ID_CSCD,
+    AV_CODEC_ID_MMVIDEO,
+    AV_CODEC_ID_ZMBV,
+    AV_CODEC_ID_AVS,
+    AV_CODEC_ID_SMACKVIDEO,
+    AV_CODEC_ID_NUV,
+    AV_CODEC_ID_KMVC,
+    AV_CODEC_ID_FLASHSV,
+    AV_CODEC_ID_CAVS,
+    AV_CODEC_ID_JPEG2000,
+    AV_CODEC_ID_VMNC,
+    AV_CODEC_ID_VP5,
+    AV_CODEC_ID_VP6,
+    AV_CODEC_ID_VP6F,
+    AV_CODEC_ID_TARGA,
+    AV_CODEC_ID_DSICINVIDEO,
+    AV_CODEC_ID_TIERTEXSEQVIDEO,
+    AV_CODEC_ID_TIFF,
+    AV_CODEC_ID_GIF,
+    AV_CODEC_ID_DXA,
+    AV_CODEC_ID_DNXHD,
+    AV_CODEC_ID_THP,
+    AV_CODEC_ID_SGI,
+    AV_CODEC_ID_C93,
+    AV_CODEC_ID_BETHSOFTVID,
+    AV_CODEC_ID_PTX,
+    AV_CODEC_ID_TXD,
+    AV_CODEC_ID_VP6A,
+    AV_CODEC_ID_AMV,
+    AV_CODEC_ID_VB,
+    AV_CODEC_ID_PCX,
+    AV_CODEC_ID_SUNRAST,
+    AV_CODEC_ID_INDEO4,
+    AV_CODEC_ID_INDEO5,
+    AV_CODEC_ID_MIMIC,
+    AV_CODEC_ID_RL2,
+    AV_CODEC_ID_ESCAPE124,
+    AV_CODEC_ID_DIRAC,
+    AV_CODEC_ID_BFI,
+    AV_CODEC_ID_CMV,
+    AV_CODEC_ID_MOTIONPIXELS,
+    AV_CODEC_ID_TGV,
+    AV_CODEC_ID_TGQ,
+    AV_CODEC_ID_TQI,
+    AV_CODEC_ID_AURA,
+    AV_CODEC_ID_AURA2,
+    AV_CODEC_ID_V210X,
+    AV_CODEC_ID_TMV,
+    AV_CODEC_ID_V210,
+    AV_CODEC_ID_DPX,
+    AV_CODEC_ID_MAD,
+    AV_CODEC_ID_FRWU,
+    AV_CODEC_ID_FLASHSV2,
+    AV_CODEC_ID_CDGRAPHICS,
+    AV_CODEC_ID_R210,
+    AV_CODEC_ID_ANM,
+    AV_CODEC_ID_BINKVIDEO,
+    AV_CODEC_ID_IFF_ILBM,
+    AV_CODEC_ID_IFF_BYTERUN1,
+    AV_CODEC_ID_KGV1,
+    AV_CODEC_ID_YOP,
+    AV_CODEC_ID_VP8,
+    AV_CODEC_ID_PICTOR,
+    AV_CODEC_ID_ANSI,
+    AV_CODEC_ID_A64_MULTI,
+    AV_CODEC_ID_A64_MULTI5,
+    AV_CODEC_ID_R10K,
+    AV_CODEC_ID_MXPEG,
+    AV_CODEC_ID_LAGARITH,
+    AV_CODEC_ID_PRORES,
+    AV_CODEC_ID_JV,
+    AV_CODEC_ID_DFA,
+    AV_CODEC_ID_WMV3IMAGE,
+    AV_CODEC_ID_VC1IMAGE,
+    AV_CODEC_ID_UTVIDEO,
+    AV_CODEC_ID_BMV_VIDEO,
+    AV_CODEC_ID_VBLE,
+    AV_CODEC_ID_DXTORY,
+    AV_CODEC_ID_V410,
+    AV_CODEC_ID_XWD,
+    AV_CODEC_ID_CDXL,
+    AV_CODEC_ID_XBM,
+    AV_CODEC_ID_ZEROCODEC,
+    AV_CODEC_ID_MSS1,
+    AV_CODEC_ID_MSA1,
+    AV_CODEC_ID_TSCC2,
+    AV_CODEC_ID_MTS2,
+    AV_CODEC_ID_CLLC,
+    AV_CODEC_ID_MSS2,
+    AV_CODEC_ID_VP9,
+    AV_CODEC_ID_AIC,
+    AV_CODEC_ID_ESCAPE130_DEPRECATED,
+    AV_CODEC_ID_G2M_DEPRECATED,
+    AV_CODEC_ID_WEBP_DEPRECATED,
+    AV_CODEC_ID_HNM4_VIDEO,
+    AV_CODEC_ID_HEVC_DEPRECATED,
+    AV_CODEC_ID_FIC,
+    AV_CODEC_ID_ALIAS_PIX,
+    AV_CODEC_ID_BRENDER_PIX_DEPRECATED,
+    AV_CODEC_ID_PAF_VIDEO_DEPRECATED,
+    AV_CODEC_ID_EXR_DEPRECATED,
+    AV_CODEC_ID_VP7_DEPRECATED,
+    AV_CODEC_ID_SANM_DEPRECATED,
+    AV_CODEC_ID_SGIRLE_DEPRECATED,
+    AV_CODEC_ID_MVC1_DEPRECATED,
+    AV_CODEC_ID_MVC2_DEPRECATED,
+
+    AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),
+    AV_CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),
+    AV_CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),
+    AV_CODEC_ID_EXR        = MKBETAG('0','E','X','R'),
+    AV_CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),
+
+    AV_CODEC_ID_012V       = MKBETAG('0','1','2','V'),
+    AV_CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),
+    AV_CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),
+    AV_CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),
+    AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'),
+    AV_CODEC_ID_V308       = MKBETAG('V','3','0','8'),
+    AV_CODEC_ID_V408       = MKBETAG('V','4','0','8'),
+    AV_CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),
+    AV_CODEC_ID_SANM       = MKBETAG('S','A','N','M'),
+    AV_CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),
+    AV_CODEC_ID_AVRN       = MKBETAG('A','V','R','n'),
+    AV_CODEC_ID_CPIA       = MKBETAG('C','P','I','A'),
+    AV_CODEC_ID_XFACE      = MKBETAG('X','F','A','C'),
+    AV_CODEC_ID_SGIRLE     = MKBETAG('S','G','I','R'),
+    AV_CODEC_ID_MVC1       = MKBETAG('M','V','C','1'),
+    AV_CODEC_ID_MVC2       = MKBETAG('M','V','C','2'),
+    AV_CODEC_ID_SNOW       = MKBETAG('S','N','O','W'),
+    AV_CODEC_ID_WEBP       = MKBETAG('W','E','B','P'),
+    AV_CODEC_ID_SMVJPEG    = MKBETAG('S','M','V','J'),
+    AV_CODEC_ID_HEVC       = MKBETAG('H','2','6','5'),
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+    AV_CODEC_ID_VP7        = MKBETAG('V','P','7','0'),
+
+    /* various PCM "codecs" */
+    AV_CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs
+    AV_CODEC_ID_PCM_S16LE = 0x10000,
+    AV_CODEC_ID_PCM_S16BE,
+    AV_CODEC_ID_PCM_U16LE,
+    AV_CODEC_ID_PCM_U16BE,
+    AV_CODEC_ID_PCM_S8,
+    AV_CODEC_ID_PCM_U8,
+    AV_CODEC_ID_PCM_MULAW,
+    AV_CODEC_ID_PCM_ALAW,
+    AV_CODEC_ID_PCM_S32LE,
+    AV_CODEC_ID_PCM_S32BE,
+    AV_CODEC_ID_PCM_U32LE,
+    AV_CODEC_ID_PCM_U32BE,
+    AV_CODEC_ID_PCM_S24LE,
+    AV_CODEC_ID_PCM_S24BE,
+    AV_CODEC_ID_PCM_U24LE,
+    AV_CODEC_ID_PCM_U24BE,
+    AV_CODEC_ID_PCM_S24DAUD,
+    AV_CODEC_ID_PCM_ZORK,
+    AV_CODEC_ID_PCM_S16LE_PLANAR,
+    AV_CODEC_ID_PCM_DVD,
+    AV_CODEC_ID_PCM_F32BE,
+    AV_CODEC_ID_PCM_F32LE,
+    AV_CODEC_ID_PCM_F64BE,
+    AV_CODEC_ID_PCM_F64LE,
+    AV_CODEC_ID_PCM_BLURAY,
+    AV_CODEC_ID_PCM_LXF,
+    AV_CODEC_ID_S302M,
+    AV_CODEC_ID_PCM_S8_PLANAR,
+    AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED,
+    AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED,
+    AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'),
+    AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'),
+    AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16),
+
+    /* various ADPCM codecs */
+    AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+    AV_CODEC_ID_ADPCM_IMA_WAV,
+    AV_CODEC_ID_ADPCM_IMA_DK3,
+    AV_CODEC_ID_ADPCM_IMA_DK4,
+    AV_CODEC_ID_ADPCM_IMA_WS,
+    AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+    AV_CODEC_ID_ADPCM_MS,
+    AV_CODEC_ID_ADPCM_4XM,
+    AV_CODEC_ID_ADPCM_XA,
+    AV_CODEC_ID_ADPCM_ADX,
+    AV_CODEC_ID_ADPCM_EA,
+    AV_CODEC_ID_ADPCM_G726,
+    AV_CODEC_ID_ADPCM_CT,
+    AV_CODEC_ID_ADPCM_SWF,
+    AV_CODEC_ID_ADPCM_YAMAHA,
+    AV_CODEC_ID_ADPCM_SBPRO_4,
+    AV_CODEC_ID_ADPCM_SBPRO_3,
+    AV_CODEC_ID_ADPCM_SBPRO_2,
+    AV_CODEC_ID_ADPCM_THP,
+    AV_CODEC_ID_ADPCM_IMA_AMV,
+    AV_CODEC_ID_ADPCM_EA_R1,
+    AV_CODEC_ID_ADPCM_EA_R3,
+    AV_CODEC_ID_ADPCM_EA_R2,
+    AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+    AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+    AV_CODEC_ID_ADPCM_EA_XAS,
+    AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+    AV_CODEC_ID_ADPCM_IMA_ISS,
+    AV_CODEC_ID_ADPCM_G722,
+    AV_CODEC_ID_ADPCM_IMA_APC,
+    AV_CODEC_ID_ADPCM_VIMA_DEPRECATED,
+    AV_CODEC_ID_ADPCM_VIMA = MKBETAG('V','I','M','A'),
+    AV_CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),
+    AV_CODEC_ID_ADPCM_AFC  = MKBETAG('A','F','C',' '),
+    AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),
+    AV_CODEC_ID_ADPCM_DTK  = MKBETAG('D','T','K',' '),
+    AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '),
+    AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'),
+
+    /* AMR */
+    AV_CODEC_ID_AMR_NB = 0x12000,
+    AV_CODEC_ID_AMR_WB,
+
+    /* RealAudio codecs*/
+    AV_CODEC_ID_RA_144 = 0x13000,
+    AV_CODEC_ID_RA_288,
+
+    /* various DPCM codecs */
+    AV_CODEC_ID_ROQ_DPCM = 0x14000,
+    AV_CODEC_ID_INTERPLAY_DPCM,
+    AV_CODEC_ID_XAN_DPCM,
+    AV_CODEC_ID_SOL_DPCM,
+
+    /* audio codecs */
+    AV_CODEC_ID_MP2 = 0x15000,
+    AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+    AV_CODEC_ID_AAC,
+    AV_CODEC_ID_AC3,
+    AV_CODEC_ID_DTS,
+    AV_CODEC_ID_VORBIS,
+    AV_CODEC_ID_DVAUDIO,
+    AV_CODEC_ID_WMAV1,
+    AV_CODEC_ID_WMAV2,
+    AV_CODEC_ID_MACE3,
+    AV_CODEC_ID_MACE6,
+    AV_CODEC_ID_VMDAUDIO,
+    AV_CODEC_ID_FLAC,
+    AV_CODEC_ID_MP3ADU,
+    AV_CODEC_ID_MP3ON4,
+    AV_CODEC_ID_SHORTEN,
+    AV_CODEC_ID_ALAC,
+    AV_CODEC_ID_WESTWOOD_SND1,
+    AV_CODEC_ID_GSM, ///< as in Berlin toast format
+    AV_CODEC_ID_QDM2,
+    AV_CODEC_ID_COOK,
+    AV_CODEC_ID_TRUESPEECH,
+    AV_CODEC_ID_TTA,
+    AV_CODEC_ID_SMACKAUDIO,
+    AV_CODEC_ID_QCELP,
+    AV_CODEC_ID_WAVPACK,
+    AV_CODEC_ID_DSICINAUDIO,
+    AV_CODEC_ID_IMC,
+    AV_CODEC_ID_MUSEPACK7,
+    AV_CODEC_ID_MLP,
+    AV_CODEC_ID_GSM_MS, /* as found in WAV */
+    AV_CODEC_ID_ATRAC3,
+#if FF_API_VOXWARE
+    AV_CODEC_ID_VOXWARE,
+#endif
+    AV_CODEC_ID_APE,
+    AV_CODEC_ID_NELLYMOSER,
+    AV_CODEC_ID_MUSEPACK8,
+    AV_CODEC_ID_SPEEX,
+    AV_CODEC_ID_WMAVOICE,
+    AV_CODEC_ID_WMAPRO,
+    AV_CODEC_ID_WMALOSSLESS,
+    AV_CODEC_ID_ATRAC3P,
+    AV_CODEC_ID_EAC3,
+    AV_CODEC_ID_SIPR,
+    AV_CODEC_ID_MP1,
+    AV_CODEC_ID_TWINVQ,
+    AV_CODEC_ID_TRUEHD,
+    AV_CODEC_ID_MP4ALS,
+    AV_CODEC_ID_ATRAC1,
+    AV_CODEC_ID_BINKAUDIO_RDFT,
+    AV_CODEC_ID_BINKAUDIO_DCT,
+    AV_CODEC_ID_AAC_LATM,
+    AV_CODEC_ID_QDMC,
+    AV_CODEC_ID_CELT,
+    AV_CODEC_ID_G723_1,
+    AV_CODEC_ID_G729,
+    AV_CODEC_ID_8SVX_EXP,
+    AV_CODEC_ID_8SVX_FIB,
+    AV_CODEC_ID_BMV_AUDIO,
+    AV_CODEC_ID_RALF,
+    AV_CODEC_ID_IAC,
+    AV_CODEC_ID_ILBC,
+    AV_CODEC_ID_OPUS_DEPRECATED,
+    AV_CODEC_ID_COMFORT_NOISE,
+    AV_CODEC_ID_TAK_DEPRECATED,
+    AV_CODEC_ID_METASOUND,
+    AV_CODEC_ID_PAF_AUDIO_DEPRECATED,
+    AV_CODEC_ID_ON2AVC,
+    AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
+    AV_CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),
+    AV_CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),
+    AV_CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),
+    AV_CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),
+    AV_CODEC_ID_TAK         = MKBETAG('t','B','a','K'),
+    AV_CODEC_ID_EVRC        = MKBETAG('s','e','v','c'),
+    AV_CODEC_ID_SMV         = MKBETAG('s','s','m','v'),
+    AV_CODEC_ID_DSD_LSBF    = MKBETAG('D','S','D','L'),
+    AV_CODEC_ID_DSD_MSBF    = MKBETAG('D','S','D','M'),
+    AV_CODEC_ID_DSD_LSBF_PLANAR = MKBETAG('D','S','D','1'),
+    AV_CODEC_ID_DSD_MSBF_PLANAR = MKBETAG('D','S','D','8'),
+
+    /* subtitle codecs */
+    AV_CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.
+    AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+    AV_CODEC_ID_DVB_SUBTITLE,
+    AV_CODEC_ID_TEXT,  ///< raw UTF-8 text
+    AV_CODEC_ID_XSUB,
+    AV_CODEC_ID_SSA,
+    AV_CODEC_ID_MOV_TEXT,
+    AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+    AV_CODEC_ID_DVB_TELETEXT,
+    AV_CODEC_ID_SRT,
+    AV_CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),
+    AV_CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),
+    AV_CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),
+    AV_CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),
+    AV_CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),
+    AV_CODEC_ID_STL        = MKBETAG('S','p','T','L'),
+    AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'),
+    AV_CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),
+    AV_CODEC_ID_SUBRIP     = MKBETAG('S','R','i','p'),
+    AV_CODEC_ID_WEBVTT     = MKBETAG('W','V','T','T'),
+    AV_CODEC_ID_MPL2       = MKBETAG('M','P','L','2'),
+    AV_CODEC_ID_VPLAYER    = MKBETAG('V','P','l','r'),
+    AV_CODEC_ID_PJS        = MKBETAG('P','h','J','S'),
+    AV_CODEC_ID_ASS        = MKBETAG('A','S','S',' '),  ///< ASS as defined in Matroska
+
+    /* other specific kind of codecs (generally used for attachments) */
+    AV_CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.
+    AV_CODEC_ID_TTF = 0x18000,
+    AV_CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),
+    AV_CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),
+    AV_CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),
+    AV_CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),
+    AV_CODEC_ID_SMPTE_KLV  = MKBETAG('K','L','V','A'),
+    AV_CODEC_ID_DVD_NAV    = MKBETAG('D','N','A','V'),
+    AV_CODEC_ID_TIMED_ID3  = MKBETAG('T','I','D','3'),
+    AV_CODEC_ID_BIN_DATA   = MKBETAG('D','A','T','A'),
+
+
+    AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+    AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+                                * stream (only used by libavformat) */
+    AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+                                * stream (only used by libavformat) */
+    AV_CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.
+
+#if FF_API_CODEC_ID
+#include "old_codec_ids.h"
+#endif
+};
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+    enum AVCodecID     id;
+    enum AVMediaType type;
+    /**
+     * Name of the codec described by this descriptor. It is non-empty and
+     * unique for each codec descriptor. It should contain alphanumeric
+     * characters and '_' only.
+     */
+    const char      *name;
+    /**
+     * A more descriptive name for this codec. May be NULL.
+     */
+    const char *long_name;
+    /**
+     * Codec properties, a combination of AV_CODEC_PROP_* flags.
+     */
+    int             props;
+
+    /**
+     * MIME type(s) associated with the codec.
+     * May be NULL; if not, a NULL-terminated array of MIME types.
+     * The first item is always non-NULL and is the preferred MIME type.
+     */
+    const char *const *mime_types;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY    (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY         (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS      (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER       (1 << 3)
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB    (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB      (1 << 17)
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 32
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * @ingroup lavc_encoding
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+    ME_ZERO = 1,    ///< no search, that is use 0,0 vector whenever one is needed
+    ME_FULL,
+    ME_LOG,
+    ME_PHODS,
+    ME_EPZS,        ///< enhanced predictive zonal search
+    ME_X1,          ///< reserved for experiments
+    ME_HEX,         ///< hexagon based search
+    ME_UMH,         ///< uneven multi-hexagon search
+    ME_TESA,        ///< transformed exhaustive search algorithm
+    ME_ITER=50,     ///< iterative search
+};
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+    /* We leave some space between them for extensions (drop some
+     * keyframes for intra-only or drop just some bidir frames). */
+    AVDISCARD_NONE    =-16, ///< discard nothing
+    AVDISCARD_DEFAULT =  0, ///< discard useless packets like 0 size packets in avi
+    AVDISCARD_NONREF  =  8, ///< discard all non reference
+    AVDISCARD_BIDIR   = 16, ///< discard all bidirectional frames
+    AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
+    AVDISCARD_NONKEY  = 32, ///< discard all frames except keyframes
+    AVDISCARD_ALL     = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+    AV_AUDIO_SERVICE_TYPE_MAIN              = 0,
+    AV_AUDIO_SERVICE_TYPE_EFFECTS           = 1,
+    AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+    AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED  = 3,
+    AV_AUDIO_SERVICE_TYPE_DIALOGUE          = 4,
+    AV_AUDIO_SERVICE_TYPE_COMMENTARY        = 5,
+    AV_AUDIO_SERVICE_TYPE_EMERGENCY         = 6,
+    AV_AUDIO_SERVICE_TYPE_VOICE_OVER        = 7,
+    AV_AUDIO_SERVICE_TYPE_KARAOKE           = 8,
+    AV_AUDIO_SERVICE_TYPE_NB                   , ///< Not part of ABI
+};
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+    int start_frame;
+    int end_frame;
+    int qscale; // If this is 0 then quality_factor will be used instead.
+    float quality_factor;
+} RcOverride;
+
+#if FF_API_MAX_BFRAMES
+/**
+ * @deprecated there is no libavcodec-wide limit on the number of B-frames
+ */
+#define FF_MAX_B_FRAMES 16
+#endif
+
+/* encoding support
+   These flags can be passed in AVCodecContext.flags before initialization.
+   Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define CODEC_FLAG_UNALIGNED 0x0001
+#define CODEC_FLAG_QSCALE 0x0002  ///< Use fixed qscale.
+#define CODEC_FLAG_4MV    0x0004  ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted
+#define CODEC_FLAG_QPEL   0x0010  ///< Use qpel MC.
+#if FF_API_GMC
+/**
+ * @deprecated use the "gmc" private option of the libxvid encoder
+ */
+#define CODEC_FLAG_GMC    0x0020  ///< Use GMC.
+#endif
+#if FF_API_MV0
+/**
+ * @deprecated use the flag "mv0" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_MV0    0x0040
+#endif
+#if FF_API_INPUT_PRESERVED
+/**
+ * @deprecated passing reference-counted frames to the encoders replaces this
+ * flag
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#endif
+#define CODEC_FLAG_PASS1           0x0200   ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2           0x0400   ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_GRAY            0x2000   ///< Only decode/encode grayscale.
+#if FF_API_EMU_EDGE
+/**
+ * @deprecated edges are not used/required anymore. I.e. this flag is now always
+ * set.
+ */
+#define CODEC_FLAG_EMU_EDGE        0x4000
+#endif
+#define CODEC_FLAG_PSNR            0x8000   ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED       0x00010000 /** Input bitstream might be truncated at a random
+                                                  location instead of only at frame boundaries. */
+#if FF_API_NORMALIZE_AQP
+/**
+ * @deprecated use the flag "naq" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_NORMALIZE_AQP  0x00020000
+#endif
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY      0x00080000 ///< Force low delay.
+#define CODEC_FLAG_GLOBAL_HEADER  0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT       0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED        0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_LOOP_FILTER    0x00000800 ///< loop filter
+#define CODEC_FLAG_INTERLACED_ME  0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_CLOSED_GOP     0x80000000
+#define CODEC_FLAG2_FAST          0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_NO_OUTPUT     0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER  0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!
+#define CODEC_FLAG2_IGNORE_CROP   0x00010000 ///< Discard cropping information from SPS.
+
+#define CODEC_FLAG2_CHUNKS        0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+#define CODEC_FLAG2_SHOW_ALL      0x00400000 ///< Show all frames before the first keyframe
+#define CODEC_FLAG2_EXPORT_MVS    0x10000000 ///< Export motion vectors through frame side data
+#define CODEC_FLAG2_SKIP_MANUAL   0x20000000 ///< Do not skip samples and export skip information as frame side data
+
+/* Unsupported options :
+ *              Syntax Arithmetic coding (SAC)
+ *              Reference Picture Selection
+ *              Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1             0x0002
+#define CODEC_CAP_TRUNCATED       0x0008
+#if FF_API_XVMC
+/* Codec can export data for HW decoding. This flag indicates that
+ * the codec would call get_format() with list that might contain HW accelerated
+ * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them
+ * including raw image format.
+ * The application can use the passed context to determine bitstream version,
+ * chroma format, resolution etc.
+ */
+#define CODEC_CAP_HWACCEL         0x0010
+#endif /* FF_API_XVMC */
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ *       with NULL data. The user can still send NULL data to the public encode
+ *       or decode function, but libavcodec will not pass it along to the codec
+ *       unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ *       flag also means that the encoder must set the pts and duration for
+ *       each output packet. If this flag is not set, the pts and duration will
+ *       be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY           0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+#if FF_API_CAP_VDPAU
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU    0x0080
+#endif
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES        0x0100
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL     0x0200
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF     0x0400
+#if FF_API_NEG_LINESIZES
+/**
+ * @deprecated no codecs use this capability
+ */
+#define CODEC_CAP_NEG_LINESIZES    0x0800
+#endif
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS    0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS    0x2000
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE     0x4000
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS     0x8000
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
+/**
+ * Codec is intra only.
+ */
+#define CODEC_CAP_INTRA_ONLY       0x40000000
+/**
+ * Codec is lossless.
+ */
+#define CODEC_CAP_LOSSLESS         0x80000000
+
+#if FF_API_MB_TYPE
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4   0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM  0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16      0x0008
+#define MB_TYPE_16x8       0x0010
+#define MB_TYPE_8x16       0x0020
+#define MB_TYPE_8x8        0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2    0x0100 //FIXME
+#define MB_TYPE_ACPRED     0x0200
+#define MB_TYPE_GMC        0x0400
+#define MB_TYPE_SKIP       0x0800
+#define MB_TYPE_P0L0       0x1000
+#define MB_TYPE_P1L0       0x2000
+#define MB_TYPE_P0L1       0x4000
+#define MB_TYPE_P1L1       0x8000
+#define MB_TYPE_L0         (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1         (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1       (MB_TYPE_L0   | MB_TYPE_L1)
+#define MB_TYPE_QUANT      0x00010000
+#define MB_TYPE_CBP        0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+#endif
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+    /**
+     * id
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    int id;
+
+    /**
+     * width and height in 1/16 pel
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    int width;
+    int height;
+
+    /**
+     * position of the top left corner in 1/16 pel for up to 3 fields/frames
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    int16_t position[3][2];
+}AVPanScan;
+
+#if FF_API_QSCALE_TYPE
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264  2
+#define FF_QSCALE_TYPE_VP56  3
+#endif
+
+#if FF_API_GET_BUFFER
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER     2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED   4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY     8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+#define FF_BUFFER_HINTS_VALID    0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+    AV_PKT_DATA_PALETTE,
+    AV_PKT_DATA_NEW_EXTRADATA,
+
+    /**
+     * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+     * @code
+     * u32le param_flags
+     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+     *     s32le channel_count
+     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+     *     u64le channel_layout
+     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+     *     s32le sample_rate
+     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+     *     s32le width
+     *     s32le height
+     * @endcode
+     */
+    AV_PKT_DATA_PARAM_CHANGE,
+
+    /**
+     * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+     * structures with info about macroblocks relevant to splitting the
+     * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+     * That is, it does not necessarily contain info about all macroblocks,
+     * as long as the distance between macroblocks in the info is smaller
+     * than the target payload size.
+     * Each MB info structure is 12 bytes, and is laid out as follows:
+     * @code
+     * u32le bit offset from the start of the packet
+     * u8    current quantizer at the start of the macroblock
+     * u8    GOB number
+     * u16le macroblock address within the GOB
+     * u8    horizontal MV predictor
+     * u8    vertical MV predictor
+     * u8    horizontal MV predictor for block number 3
+     * u8    vertical MV predictor for block number 3
+     * @endcode
+     */
+    AV_PKT_DATA_H263_MB_INFO,
+
+    /**
+     * This side data should be associated with an audio stream and contains
+     * ReplayGain information in form of the AVReplayGain struct.
+     */
+    AV_PKT_DATA_REPLAYGAIN,
+
+    /**
+     * This side data contains a 3x3 transformation matrix describing an affine
+     * transformation that needs to be applied to the decoded video frames for
+     * correct presentation.
+     *
+     * See libavutil/display.h for a detailed description of the data.
+     */
+    AV_PKT_DATA_DISPLAYMATRIX,
+
+    /**
+     * This side data should be associated with a video stream and contains
+     * Stereoscopic 3D information in form of the AVStereo3D struct.
+     */
+    AV_PKT_DATA_STEREO3D,
+
+    /**
+     * Recommmends skipping the specified number of samples
+     * @code
+     * u32le number of samples to skip from start of this packet
+     * u32le number of samples to skip from end of this packet
+     * u8    reason for start skip
+     * u8    reason for end   skip (0=padding silence, 1=convergence)
+     * @endcode
+     */
+    AV_PKT_DATA_SKIP_SAMPLES=70,
+
+    /**
+     * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+     * the packet may contain "dual mono" audio specific to Japanese DTV
+     * and if it is true, recommends only the selected channel to be used.
+     * @code
+     * u8    selected channels (0=mail/left, 1=sub/right, 2=both)
+     * @endcode
+     */
+    AV_PKT_DATA_JP_DUALMONO,
+
+    /**
+     * A list of zero terminated key/value strings. There is no end marker for
+     * the list, so it is required to rely on the side data size to stop.
+     */
+    AV_PKT_DATA_STRINGS_METADATA,
+
+    /**
+     * Subtitle event position
+     * @code
+     * u32le x1
+     * u32le y1
+     * u32le x2
+     * u32le y2
+     * @endcode
+     */
+    AV_PKT_DATA_SUBTITLE_POSITION,
+
+    /**
+     * Data found in BlockAdditional element of matroska container. There is
+     * no end marker for the data, so it is required to rely on the side data
+     * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+     * by data.
+     */
+    AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+    /**
+     * The optional first identifier line of a WebVTT cue.
+     */
+    AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+    /**
+     * The optional settings (rendering instructions) that immediately
+     * follow the timestamp specifier of a WebVTT cue.
+     */
+    AV_PKT_DATA_WEBVTT_SETTINGS,
+
+    /**
+     * A list of zero terminated key/value strings. There is no end marker for
+     * the list, so it is required to rely on the side data size to stop. This
+     * side data includes updated metadata which appeared in the stream.
+     */
+    AV_PKT_DATA_METADATA_UPDATE,
+};
+
+typedef struct AVPacketSideData {
+    uint8_t *data;
+    int      size;
+    enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames.
+ *
+ * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the buf or destruct (deprecated)
+ * fields. If either is set, the packet data is dynamically allocated and is
+ * valid indefinitely until av_free_packet() is called (which in turn calls
+ * av_buffer_unref()/the destruct callback to free the data). If neither is set,
+ * the packet data is typically backed by some static buffer somewhere and is
+ * only valid for a limited time (e.g. until the next read call when demuxing).
+ *
+ * The side data is always allocated with av_malloc() and is freed in
+ * av_free_packet().
+ */
+typedef struct AVPacket {
+    /**
+     * A reference to the reference-counted buffer where the packet data is
+     * stored.
+     * May be NULL, then the packet data is not reference-counted.
+     */
+    AVBufferRef *buf;
+    /**
+     * Presentation timestamp in AVStream->time_base units; the time at which
+     * the decompressed packet will be presented to the user.
+     * Can be AV_NOPTS_VALUE if it is not stored in the file.
+     * pts MUST be larger or equal to dts as presentation cannot happen before
+     * decompression, unless one wants to view hex dumps. Some formats misuse
+     * the terms dts and pts/cts to mean something different. Such timestamps
+     * must be converted to true pts/dts before they are stored in AVPacket.
+     */
+    int64_t pts;
+    /**
+     * Decompression timestamp in AVStream->time_base units; the time at which
+     * the packet is decompressed.
+     * Can be AV_NOPTS_VALUE if it is not stored in the file.
+     */
+    int64_t dts;
+    uint8_t *data;
+    int   size;
+    int   stream_index;
+    /**
+     * A combination of AV_PKT_FLAG values
+     */
+    int   flags;
+    /**
+     * Additional packet data that can be provided by the container.
+     * Packet can contain several types of side information.
+     */
+    AVPacketSideData *side_data;
+    int side_data_elems;
+
+    /**
+     * Duration of this packet in AVStream->time_base units, 0 if unknown.
+     * Equals next_pts - this_pts in presentation order.
+     */
+    int   duration;
+#if FF_API_DESTRUCT_PACKET
+    attribute_deprecated
+    void  (*destruct)(struct AVPacket *);
+    attribute_deprecated
+    void  *priv;
+#endif
+    int64_t pos;                            ///< byte position in stream, -1 if unknown
+
+    /**
+     * Time difference in AVStream->time_base units from the pts of this
+     * packet to the point at which the output from the decoder has converged
+     * independent from the availability of previous frames. That is, the
+     * frames are virtually identical no matter if decoding started from
+     * the very first frame or from this keyframe.
+     * Is AV_NOPTS_VALUE if unknown.
+     * This field is not the display duration of the current packet.
+     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+     * set.
+     *
+     * The purpose of this field is to allow seeking in streams that have no
+     * keyframes in the conventional sense. It corresponds to the
+     * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+     * essential for some types of subtitle streams to ensure that all
+     * subtitles are correctly displayed after seeking.
+     */
+    int64_t convergence_duration;
+} AVPacket;
+#define AV_PKT_FLAG_KEY     0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+enum AVSideDataParamChangeFlags {
+    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT  = 0x0001,
+    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+    AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE    = 0x0004,
+    AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS     = 0x0008,
+};
+/**
+ * @}
+ */
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+    AV_FIELD_UNKNOWN,
+    AV_FIELD_PROGRESSIVE,
+    AV_FIELD_TT,          //< Top coded_first, top displayed first
+    AV_FIELD_BB,          //< Bottom coded first, bottom displayed first
+    AV_FIELD_TB,          //< Top coded first, bottom displayed first
+    AV_FIELD_BT,          //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user
+ * applications.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+    /**
+     * information on struct for av_log
+     * - set by avcodec_alloc_context3
+     */
+    const AVClass *av_class;
+    int log_level_offset;
+
+    enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+    const struct AVCodec  *codec;
+#if FF_API_CODEC_NAME
+    /**
+     * @deprecated this field is not used for anything in libavcodec
+     */
+    attribute_deprecated
+    char             codec_name[32];
+#endif
+    enum AVCodecID     codec_id; /* see AV_CODEC_ID_xxx */
+
+    /**
+     * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+     * This is used to work around some encoder bugs.
+     * A demuxer should set this to what is stored in the field used to identify the codec.
+     * If there are multiple such fields in a container then the demuxer should choose the one
+     * which maximizes the information about the used codec.
+     * If the codec tag field in a container is larger than 32 bits then the demuxer should
+     * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+     * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+     * first.
+     * - encoding: Set by user, if not then the default based on codec_id will be used.
+     * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+     */
+    unsigned int codec_tag;
+
+    /**
+     * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+     * This is used to work around some encoder bugs.
+     * - encoding: unused
+     * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+     */
+    unsigned int stream_codec_tag;
+
+    void *priv_data;
+
+    /**
+     * Private context used for internal data.
+     *
+     * Unlike priv_data, this is not codec-specific. It is used in general
+     * libavcodec functions.
+     */
+    //    struct AVCodecInternal *internal;
+
+    /**
+     * Private data of the user, can be used to carry app specific stuff.
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    void *opaque;
+
+    /**
+     * the average bitrate
+     * - encoding: Set by user; unused for constant quantizer encoding.
+     * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+     */
+    int bit_rate;
+
+    /**
+     * number of bits the bitstream is allowed to diverge from the reference.
+     *           the reference can be CBR (for CBR pass1) or VBR (for pass2)
+     * - encoding: Set by user; unused for constant quantizer encoding.
+     * - decoding: unused
+     */
+    int bit_rate_tolerance;
+
+    /**
+     * Global quality for codecs which cannot change it per frame.
+     * This should be proportional to MPEG-1/2/4 qscale.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int global_quality;
+
+    /**
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+    /**
+     * CODEC_FLAG_*.
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int flags;
+
+    /**
+     * CODEC_FLAG2_*
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int flags2;
+
+    /**
+     * some codecs need / can use extradata like Huffman tables.
+     * mjpeg: Huffman tables
+     * rv10: additional flags
+     * mpeg4: global headers (they can be in the bitstream or here)
+     * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+     * than extradata_size to avoid problems if it is read with the bitstream reader.
+     * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+     * - encoding: Set/allocated/freed by libavcodec.
+     * - decoding: Set/allocated/freed by user.
+     */
+    uint8_t *extradata;
+    int extradata_size;
+
+    /**
+     * This is the fundamental unit of time (in seconds) in terms
+     * of which frame timestamps are represented. For fixed-fps content,
+     * timebase should be 1/framerate and timestamp increments should be
+     * identically 1.
+     * This often, but not always is the inverse of the frame rate or field rate
+     * for video.
+     * - encoding: MUST be set by user.
+     * - decoding: the use of this field for decoding is deprecated.
+     *             Use framerate instead.
+     */
+    AVRational time_base;
+
+    /**
+     * For some codecs, the time base is closer to the field rate than the frame rate.
+     * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+     * if no telecine is used ...
+     *
+     * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+     */
+    int ticks_per_frame;
+
+    /**
+     * Codec delay.
+     *
+     * Encoding: Number of frames delay there will be from the encoder input to
+     *           the decoder output. (we assume the decoder matches the spec)
+     * Decoding: Number of frames delay in addition to what a standard decoder
+     *           as specified in the spec would produce.
+     *
+     * Video:
+     *   Number of frames the decoded output will be delayed relative to the
+     *   encoded input.
+     *
+     * Audio:
+     *   For encoding, this field is unused (see initial_padding).
+     *
+     *   For decoding, this is the number of samples the decoder needs to
+     *   output before the decoder's output is valid. When seeking, you should
+     *   start decoding this many samples prior to your desired seek point.
+     *
+     * - encoding: Set by libavcodec.
+     * - decoding: Set by libavcodec.
+     */
+    int delay;
+
+
+    /* video only */
+    /**
+     * picture width / height.
+     * - encoding: MUST be set by user.
+     * - decoding: May be set by the user before opening the decoder if known e.g.
+     *             from the container. Some decoders will require the dimensions
+     *             to be set by the caller. During decoding, the decoder may
+     *             overwrite those values as required.
+     */
+    int width, height;
+
+    /**
+     * Bitstream width / height, may be different from width/height e.g. when
+     * the decoded frame is cropped before being output or lowres is enabled.
+     * - encoding: unused
+     * - decoding: May be set by the user before opening the decoder if known
+     *             e.g. from the container. During decoding, the decoder may
+     *             overwrite those values as required.
+     */
+    int coded_width, coded_height;
+
+#if FF_API_ASPECT_EXTENDED
+#define FF_ASPECT_EXTENDED 15
+#endif
+
+    /**
+     * the number of pictures in a group of pictures, or 0 for intra_only
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int gop_size;
+
+    /**
+     * Pixel format, see AV_PIX_FMT_xxx.
+     * May be set by the demuxer if known from headers.
+     * May be overridden by the decoder if it knows better.
+     * - encoding: Set by user.
+     * - decoding: Set by user if known, overridden by libavcodec if known
+     */
+    enum AVPixelFormat pix_fmt;
+
+    /**
+     * Motion estimation algorithm used for video coding.
+     * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+     * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
+     * - encoding: MUST be set by user.
+     * - decoding: unused
+     */
+    int me_method;
+
+    /**
+     * If non NULL, 'draw_horiz_band' is called by the libavcodec
+     * decoder to draw a horizontal band. It improves cache usage. Not
+     * all codecs can do that. You must check the codec capabilities
+     * beforehand.
+     * When multithreading is used, it may be called from multiple threads
+     * at the same time; threads might draw different parts of the same AVFrame,
+     * or multiple AVFrames, and there is no guarantee that slices will be drawn
+     * in order.
+     * The function is also used by hardware acceleration APIs.
+     * It is called at least once during frame decoding to pass
+     * the data needed for hardware render.
+     * In that mode instead of pixel data, AVFrame points to
+     * a structure specific to the acceleration API. The application
+     * reads the structure and can change some fields to indicate progress
+     * or mark state.
+     * - encoding: unused
+     * - decoding: Set by user.
+     * @param height the height of the slice
+     * @param y the y position of the slice
+     * @param type 1->top field, 2->bottom field, 3->frame
+     * @param offset offset into the AVFrame.data from which the slice should be read
+     */
+    void (*draw_horiz_band)(struct AVCodecContext *s,
+                            const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+                            int y, int type, int height);
+
+    /**
+     * callback to negotiate the pixelFormat
+     * @param fmt is the list of formats which are supported by the codec,
+     * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+     * The first is always the native one.
+     * @note The callback may be called again immediately if initialization for
+     * the selected (hardware-accelerated) pixel format failed.
+     * @warning Behavior is undefined if the callback returns a value not
+     * in the fmt list of formats.
+     * @return the chosen format
+     * - encoding: unused
+     * - decoding: Set by user, if not set the native format will be chosen.
+     */
+    enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+    /**
+     * maximum number of B-frames between non-B-frames
+     * Note: The output will be delayed by max_b_frames+1 relative to the input.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int max_b_frames;
+
+    /**
+     * qscale factor between IP and B-frames
+     * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float b_quant_factor;
+
+    /** obsolete FIXME remove */
+    int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+    int b_frame_strategy;
+
+    /**
+     * qscale offset between IP and B-frames
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float b_quant_offset;
+
+    /**
+     * Size of the frame reordering buffer in the decoder.
+     * For MPEG-2 it is 1 IPB or 0 low delay IP.
+     * - encoding: Set by libavcodec.
+     * - decoding: Set by libavcodec.
+     */
+    int has_b_frames;
+
+    /**
+     * 0-> h263 quant 1-> mpeg quant
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mpeg_quant;
+
+    /**
+     * qscale factor between P and I-frames
+     * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float i_quant_factor;
+
+    /**
+     * qscale offset between P and I-frames
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float i_quant_offset;
+
+    /**
+     * luminance masking (0-> disabled)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float lumi_masking;
+
+    /**
+     * temporary complexity masking (0-> disabled)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float temporal_cplx_masking;
+
+    /**
+     * spatial complexity masking (0-> disabled)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float spatial_cplx_masking;
+
+    /**
+     * p block masking (0-> disabled)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float p_masking;
+
+    /**
+     * darkness masking (0-> disabled)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    float dark_masking;
+
+    /**
+     * slice count
+     * - encoding: Set by libavcodec.
+     * - decoding: Set by user (or 0).
+     */
+    int slice_count;
+    /**
+     * prediction method (needed for huffyuv)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+     int prediction_method;
+#define FF_PRED_LEFT   0
+#define FF_PRED_PLANE  1
+#define FF_PRED_MEDIAN 2
+
+    /**
+     * slice offsets in the frame in bytes
+     * - encoding: Set/allocated by libavcodec.
+     * - decoding: Set/allocated by user (or NULL).
+     */
+    int *slice_offset;
+
+    /**
+     * sample aspect ratio (0 if unknown)
+     * That is the width of a pixel divided by the height of the pixel.
+     * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    AVRational sample_aspect_ratio;
+
+    /**
+     * motion estimation comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_cmp;
+    /**
+     * subpixel motion estimation comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_sub_cmp;
+    /**
+     * macroblock comparison function (not supported yet)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mb_cmp;
+    /**
+     * interlaced DCT comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int ildct_cmp;
+#define FF_CMP_SAD    0
+#define FF_CMP_SSE    1
+#define FF_CMP_SATD   2
+#define FF_CMP_DCT    3
+#define FF_CMP_PSNR   4
+#define FF_CMP_BIT    5
+#define FF_CMP_RD     6
+#define FF_CMP_ZERO   7
+#define FF_CMP_VSAD   8
+#define FF_CMP_VSSE   9
+#define FF_CMP_NSSE   10
+#define FF_CMP_W53    11
+#define FF_CMP_W97    12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+    /**
+     * ME diamond size & shape
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int dia_size;
+
+    /**
+     * amount of previous MV predictors (2a+1 x 2a+1 square)
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int last_predictor_count;
+
+    /**
+     * prepass for motion estimation
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int pre_me;
+
+    /**
+     * motion estimation prepass comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_pre_cmp;
+
+    /**
+     * ME prepass diamond size & shape
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int pre_dia_size;
+
+    /**
+     * subpel ME quality
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_subpel_quality;
+
+#if FF_API_AFD
+    /**
+     * DTG active format information (additional aspect ratio
+     * information only used in DVB MPEG-2 transport streams)
+     * 0 if not set.
+     *
+     * - encoding: unused
+     * - decoding: Set by decoder.
+     * @deprecated Deprecated in favor of AVSideData
+     */
+    attribute_deprecated int dtg_active_format;
+#define FF_DTG_AFD_SAME         8
+#define FF_DTG_AFD_4_3          9
+#define FF_DTG_AFD_16_9         10
+#define FF_DTG_AFD_14_9         11
+#define FF_DTG_AFD_4_3_SP_14_9  13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3       15
+#endif /* FF_API_AFD */
+
+    /**
+     * maximum motion estimation search range in subpel units
+     * If 0 then no limit.
+     *
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_range;
+
+    /**
+     * intra quantizer bias
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+    /**
+     * inter quantizer bias
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int inter_quant_bias;
+
+    /**
+     * slice flags
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int slice_flags;
+#define SLICE_FLAG_CODED_ORDER    0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD    0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE    0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+#if FF_API_XVMC
+    /**
+     * XVideo Motion Acceleration
+     * - encoding: forbidden
+     * - decoding: set by decoder
+     * @deprecated XvMC doesn't need it anymore.
+     */
+    attribute_deprecated int xvmc_acceleration;
+#endif /* FF_API_XVMC */
+
+    /**
+     * macroblock decision mode
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0        ///< uses mb_cmp
+#define FF_MB_DECISION_BITS   1        ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD     2        ///< rate distortion
+
+    /**
+     * custom intra quantization matrix
+     * - encoding: Set by user, can be NULL.
+     * - decoding: Set by libavcodec.
+     */
+    uint16_t *intra_matrix;
+
+    /**
+     * custom inter quantization matrix
+     * - encoding: Set by user, can be NULL.
+     * - decoding: Set by libavcodec.
+     */
+    uint16_t *inter_matrix;
+
+    /**
+     * scene change detection threshold
+     * 0 is default, larger means fewer detected scene changes.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int scenechange_threshold;
+
+    /**
+     * noise reduction strength
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int noise_reduction;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated this field is unused
+     */
+    attribute_deprecated
+    int me_threshold;
+
+    /**
+     * @deprecated this field is unused
+     */
+    attribute_deprecated
+    int mb_threshold;
+#endif
+
+    /**
+     * precision of the intra DC coefficient - 8
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int intra_dc_precision;
+
+    /**
+     * Number of macroblock rows at the top which are skipped.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int skip_top;
+
+    /**
+     * Number of macroblock rows at the bottom which are skipped.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int skip_bottom;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    float border_masking;
+#endif
+
+    /**
+     * minimum MB lagrange multipler
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mb_lmin;
+
+    /**
+     * maximum MB lagrange multipler
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mb_lmax;
+
+    /**
+     *
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int me_penalty_compensation;
+
+    /**
+     *
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int bidir_refine;
+
+    /**
+     *
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int brd_scale;
+
+    /**
+     * minimum GOP size
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int keyint_min;
+
+    /**
+     * number of reference frames
+     * - encoding: Set by user.
+     * - decoding: Set by lavc.
+     */
+    int refs;
+
+    /**
+     * chroma qp offset from luma
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int chromaoffset;
+
+#if FF_API_UNUSED_MEMBERS
+    /**
+     * Multiplied by qscale for each frame and added to scene_change_score.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    attribute_deprecated int scenechange_factor;
+#endif
+
+    /**
+     *
+     * Note: Value depends upon the compare function used for fullpel ME.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int mv0_threshold;
+
+    /**
+     * Adjust sensitivity of b_frame_strategy 1.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int b_sensitivity;
+
+    /**
+     * Chromaticity coordinates of the source primaries.
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorPrimaries color_primaries;
+
+    /**
+     * Color Transfer Characteristic.
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorTransferCharacteristic color_trc;
+
+    /**
+     * YUV colorspace type.
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorSpace colorspace;
+
+    /**
+     * MPEG vs JPEG YUV range.
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorRange color_range;
+
+    /**
+     * This defines the location of chroma samples.
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVChromaLocation chroma_sample_location;
+
+    /**
+     * Number of slices.
+     * Indicates number of picture subdivisions. Used for parallelized
+     * decoding.
+     * - encoding: Set by user
+     * - decoding: unused
+     */
+    int slices;
+
+    /** Field order
+     * - encoding: set by libavcodec
+     * - decoding: Set by user.
+     */
+    enum AVFieldOrder field_order;
+
+    /* audio only */
+    int sample_rate; ///< samples per second
+    int channels;    ///< number of audio channels
+
+    /**
+     * audio sample format
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    enum AVSampleFormat sample_fmt;  ///< sample format
+
+    /* The following data should not be initialized. */
+    /**
+     * Number of samples per channel in an audio frame.
+     *
+     * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+     *   except the last must contain exactly frame_size samples per channel.
+     *   May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+     *   frame size is not restricted.
+     * - decoding: may be set by some decoders to indicate constant frame size
+     */
+    int frame_size;
+
+    /**
+     * Frame counter, set by libavcodec.
+     *
+     * - decoding: total number of frames returned from the decoder so far.
+     * - encoding: total number of frames passed to the encoder so far.
+     *
+     *   @note the counter is not incremented if encoding/decoding resulted in
+     *   an error.
+     */
+    int frame_number;
+
+    /**
+     * number of bytes per packet if constant and known or 0
+     * Used by some WAV based audio codecs.
+     */
+    int block_align;
+
+    /**
+     * Audio cutoff bandwidth (0 means "automatic")
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int cutoff;
+
+#if FF_API_REQUEST_CHANNELS
+    /**
+     * Decoder should decode to this many channels if it can (0 for default)
+     * - encoding: unused
+     * - decoding: Set by user.
+     * @deprecated Deprecated in favor of request_channel_layout.
+     */
+    attribute_deprecated int request_channels;
+#endif
+
+    /**
+     * Audio channel layout.
+     * - encoding: set by user.
+     * - decoding: set by user, may be overwritten by libavcodec.
+     */
+    uint64_t channel_layout;
+
+    /**
+     * Request decoder to use this channel layout if it can (0 for default)
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    uint64_t request_channel_layout;
+
+    /**
+     * Type of service that the audio stream conveys.
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    enum AVAudioServiceType audio_service_type;
+
+    /**
+     * desired sample format
+     * - encoding: Not used.
+     * - decoding: Set by user.
+     * Decoder will decode to this format if it can.
+     */
+    enum AVSampleFormat request_sample_fmt;
+
+#if FF_API_GET_BUFFER
+    /**
+     * Called at the beginning of each frame to get a buffer for it.
+     *
+     * The function will set AVFrame.data[], AVFrame.linesize[].
+     * AVFrame.extended_data[] must also be set, but it should be the same as
+     * AVFrame.data[] except for planar audio with more channels than can fit
+     * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+     * many data pointers as it can hold.
+     *
+     * if CODEC_CAP_DR1 is not set then get_buffer() must call
+     * avcodec_default_get_buffer() instead of providing buffers allocated by
+     * some other means.
+     *
+     * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+     * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+     * but if get_buffer() is overridden then alignment considerations should
+     * be taken into account.
+     *
+     * @see avcodec_default_get_buffer()
+     *
+     * Video:
+     *
+     * If pic.reference is set then the frame will be read later by libavcodec.
+     * avcodec_align_dimensions2() should be used to find the required width and
+     * height, as they normally need to be rounded up to the next multiple of 16.
+     *
+     * If frame multithreading is used and thread_safe_callbacks is set,
+     * it may be called from a different thread, but not from more than one at
+     * once. Does not need to be reentrant.
+     *
+     * @see release_buffer(), reget_buffer()
+     * @see avcodec_align_dimensions2()
+     *
+     * Audio:
+     *
+     * Decoders request a buffer of a particular size by setting
+     * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+     * however, utilize only part of the buffer by setting AVFrame.nb_samples
+     * to a smaller value in the output frame.
+     *
+     * Decoders cannot use the buffer after returning from
+     * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+     * is assumed to be released immediately upon return. In some rare cases,
+     * a decoder may need to call get_buffer() more than once in a single
+     * call to avcodec_decode_audio4(). In that case, when get_buffer() is
+     * called again after it has already been called once, the previously
+     * acquired buffer is assumed to be released at that time and may not be
+     * reused by the decoder.
+     *
+     * As a convenience, av_samples_get_buffer_size() and
+     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+     * functions to find the required data size and to fill data pointers and
+     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+     * since all planes must be the same size.
+     *
+     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+     *
+     * - encoding: unused
+     * - decoding: Set by libavcodec, user can override.
+     *
+     * @deprecated use get_buffer2()
+     */
+    attribute_deprecated
+    int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+    /**
+     * Called to release buffers which were allocated with get_buffer.
+     * A released buffer can be reused in get_buffer().
+     * pic.data[*] must be set to NULL.
+     * May be called from a different thread if frame multithreading is used,
+     * but not by more than one thread at once, so does not need to be reentrant.
+     * - encoding: unused
+     * - decoding: Set by libavcodec, user can override.
+     *
+     * @deprecated custom freeing callbacks should be set from get_buffer2()
+     */
+    attribute_deprecated
+    void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+    /**
+     * Called at the beginning of a frame to get cr buffer for it.
+     * Buffer type (size, hints) must be the same. libavcodec won't check it.
+     * libavcodec will pass previous buffer in pic, function should return
+     * same buffer or new buffer with old frame "painted" into it.
+     * If pic.data[0] == NULL must behave like get_buffer().
+     * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+     * avcodec_default_reget_buffer() instead of providing buffers allocated by
+     * some other means.
+     * - encoding: unused
+     * - decoding: Set by libavcodec, user can override.
+     */
+    attribute_deprecated
+    int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+
+    /**
+     * This callback is called at the beginning of each frame to get data
+     * buffer(s) for it. There may be one contiguous buffer for all the data or
+     * there may be a buffer per each data plane or anything in between. What
+     * this means is, you may set however many entries in buf[] you feel necessary.
+     * Each buffer must be reference-counted using the AVBuffer API (see description
+     * of buf[] below).
+     *
+     * The following fields will be set in the frame before this callback is
+     * called:
+     * - format
+     * - width, height (video only)
+     * - sample_rate, channel_layout, nb_samples (audio only)
+     * Their values may differ from the corresponding values in
+     * AVCodecContext. This callback must use the frame values, not the codec
+     * context values, to calculate the required buffer size.
+     *
+     * This callback must fill the following fields in the frame:
+     * - data[]
+     * - linesize[]
+     * - extended_data:
+     *   * if the data is planar audio with more than 8 channels, then this
+     *     callback must allocate and fill extended_data to contain all pointers
+     *     to all data planes. data[] must hold as many pointers as it can.
+     *     extended_data must be allocated with av_malloc() and will be freed in
+     *     av_frame_unref().
+     *   * otherwise exended_data must point to data
+     * - buf[] must contain one or more pointers to AVBufferRef structures. Each of
+     *   the frame's data and extended_data pointers must be contained in these. That
+     *   is, one AVBufferRef for each allocated chunk of memory, not necessarily one
+     *   AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),
+     *   and av_buffer_ref().
+     * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+     *   this callback and filled with the extra buffers if there are more
+     *   buffers than buf[] can hold. extended_buf will be freed in
+     *   av_frame_unref().
+     *
+     * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+     * avcodec_default_get_buffer2() instead of providing buffers allocated by
+     * some other means.
+     *
+     * Each data plane must be aligned to the maximum required by the target
+     * CPU.
+     *
+     * @see avcodec_default_get_buffer2()
+     *
+     * Video:
+     *
+     * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+     * (read and/or written to if it is writable) later by libavcodec.
+     *
+     * avcodec_align_dimensions2() should be used to find the required width and
+     * height, as they normally need to be rounded up to the next multiple of 16.
+     *
+     * Some decoders do not support linesizes changing between frames.
+     *
+     * If frame multithreading is used and thread_safe_callbacks is set,
+     * this callback may be called from a different thread, but not from more
+     * than one at once. Does not need to be reentrant.
+     *
+     * @see avcodec_align_dimensions2()
+     *
+     * Audio:
+     *
+     * Decoders request a buffer of a particular size by setting
+     * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+     * however, utilize only part of the buffer by setting AVFrame.nb_samples
+     * to a smaller value in the output frame.
+     *
+     * As a convenience, av_samples_get_buffer_size() and
+     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+     * functions to find the required data size and to fill data pointers and
+     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+     * since all planes must be the same size.
+     *
+     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+     *
+     * - encoding: unused
+     * - decoding: Set by libavcodec, user can override.
+     */
+    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+    /**
+     * If non-zero, the decoded audio and video frames returned from
+     * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+     * and are valid indefinitely. The caller must free them with
+     * av_frame_unref() when they are not needed anymore.
+     * Otherwise, the decoded frames must not be freed by the caller and are
+     * only valid until the next decode call.
+     *
+     * - encoding: unused
+     * - decoding: set by the caller before avcodec_open2().
+     */
+    int refcounted_frames;
+
+    /* - encoding parameters */
+    float qcompress;  ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+    float qblur;      ///< amount of qscale smoothing over time (0.0-1.0)
+
+    /**
+     * minimum quantizer
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int qmin;
+
+    /**
+     * maximum quantizer
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int qmax;
+
+    /**
+     * maximum quantizer difference between frames
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int max_qdiff;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    float rc_qsquish;
+
+    attribute_deprecated
+    float rc_qmod_amp;
+    attribute_deprecated
+    int rc_qmod_freq;
+#endif
+
+    /**
+     * decoder bitstream buffer size
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int rc_buffer_size;
+
+    /**
+     * ratecontrol override, see RcOverride
+     * - encoding: Allocated/set/freed by user.
+     * - decoding: unused
+     */
+    int rc_override_count;
+    RcOverride *rc_override;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    const char *rc_eq;
+#endif
+
+    /**
+     * maximum bitrate
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+    int rc_max_rate;
+
+    /**
+     * minimum bitrate
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int rc_min_rate;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    float rc_buffer_aggressivity;
+
+    attribute_deprecated
+    float rc_initial_cplx;
+#endif
+
+    /**
+     * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+     * - encoding: Set by user.
+     * - decoding: unused.
+     */
+    float rc_max_available_vbv_use;
+
+    /**
+     * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+     * - encoding: Set by user.
+     * - decoding: unused.
+     */
+    float rc_min_vbv_overflow_use;
+
+    /**
+     * Number of bits which should be loaded into the rc buffer before decoding starts.
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int rc_initial_buffer_occupancy;
+
+#define FF_CODER_TYPE_VLC       0
+#define FF_CODER_TYPE_AC        1
+#define FF_CODER_TYPE_RAW       2
+#define FF_CODER_TYPE_RLE       3
+#if FF_API_UNUSED_MEMBERS
+#define FF_CODER_TYPE_DEFLATE   4
+#endif /* FF_API_UNUSED_MEMBERS */
+    /**
+     * coder type
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int coder_type;
+
+    /**
+     * context model
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int context_model;
+
+#if FF_API_MPV_OPT
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    int lmin;
+
+    /**
+     * @deprecated use encoder private options instead
+     */
+    attribute_deprecated
+    int lmax;
+#endif
+
+    /**
+     * frame skip threshold
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int frame_skip_threshold;
+
+    /**
+     * frame skip factor
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int frame_skip_factor;
+
+    /**
+     * frame skip exponent
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int frame_skip_exp;
+
+    /**
+     * frame skip comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int frame_skip_cmp;
+
+    /**
+     * trellis RD quantization
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int trellis;
+
+    /**
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int min_prediction_order;
+
+    /**
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int max_prediction_order;
+
+    /**
+     * GOP timecode frame start number
+     * - encoding: Set by user, in non drop frame format
+     * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)
+     */
+    int64_t timecode_frame_start;
+
+    /* The RTP callback: This function is called    */
+    /* every time the encoder has a packet to send. */
+    /* It depends on the encoder if the data starts */
+    /* with a Start Code (it should). H.263 does.   */
+    /* mb_nb contains the number of macroblocks     */
+    /* encoded in the RTP payload.                  */
+    void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+    int rtp_payload_size;   /* The size of the RTP payload: the coder will  */
+                            /* do its best to deliver a chunk with size     */
+                            /* below rtp_payload_size, the chunk will start */
+                            /* with a start code on some codecs like H.263. */
+                            /* This doesn't take account of any particular  */
+                            /* headers inside the transmitted RTP payload.  */
+
+    /* statistics, used for 2-pass encoding */
+    int mv_bits;
+    int header_bits;
+    int i_tex_bits;
+    int p_tex_bits;
+    int i_count;
+    int p_count;
+    int skip_count;
+    int misc_bits;
+
+    /**
+     * number of bits used for the previously encoded frame
+     * - encoding: Set by libavcodec.
+     * - decoding: unused
+     */
+    int frame_bits;
+
+    /**
+     * pass1 encoding statistics output buffer
+     * - encoding: Set by libavcodec.
+     * - decoding: unused
+     */
+    char *stats_out;
+
+    /**
+     * pass2 encoding statistics input buffer
+     * Concatenated stuff from stats_out of pass1 should be placed here.
+     * - encoding: Allocated/set/freed by user.
+     * - decoding: unused
+     */
+    char *stats_in;
+
+    /**
+     * Work around bugs in encoders which sometimes cannot be detected automatically.
+     * - encoding: Set by user
+     * - decoding: Set by user
+     */
+    int workaround_bugs;
+#define FF_BUG_AUTODETECT       1  ///< autodetection
+#if FF_API_OLD_MSMPEG4
+#define FF_BUG_OLD_MSMPEG4      2
+#endif
+#define FF_BUG_XVID_ILACE       4
+#define FF_BUG_UMP4             8
+#define FF_BUG_NO_PADDING       16
+#define FF_BUG_AMV              32
+#if FF_API_AC_VLC
+#define FF_BUG_AC_VLC           0  ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#endif
+#define FF_BUG_QPEL_CHROMA      64
+#define FF_BUG_STD_QPEL         128
+#define FF_BUG_QPEL_CHROMA2     256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE             1024
+#define FF_BUG_HPEL_CHROMA      2048
+#define FF_BUG_DC_CLIP          4096
+#define FF_BUG_MS               8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED       16384
+
+    /**
+     * strictly follow the standard (MPEG4, ...).
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     * Setting this to STRICT or higher means the encoder and decoder will
+     * generally do stupid things, whereas setting it to unofficial or lower
+     * will mean the encoder might produce output that is not supported by all
+     * spec-compliant decoders. Decoders don't differentiate between normal,
+     * unofficial and experimental (that is, they always try to decode things
+     * when they can) unless they are explicitly asked to behave stupidly
+     * (=strictly conform to the specs)
+     */
+    int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT   2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT        1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL        0
+#define FF_COMPLIANCE_UNOFFICIAL   -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+    /**
+     * error concealment flags
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int error_concealment;
+#define FF_EC_GUESS_MVS   1
+#define FF_EC_DEBLOCK     2
+#define FF_EC_FAVOR_INTER 256
+
+    /**
+     * debug
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int debug;
+#define FF_DEBUG_PICT_INFO   1
+#define FF_DEBUG_RC          2
+#define FF_DEBUG_BITSTREAM   4
+#define FF_DEBUG_MB_TYPE     8
+#define FF_DEBUG_QP          16
+#if FF_API_DEBUG_MV
+/**
+ * @deprecated this option does nothing
+ */
+#define FF_DEBUG_MV          32
+#endif
+#define FF_DEBUG_DCT_COEFF   0x00000040
+#define FF_DEBUG_SKIP        0x00000080
+#define FF_DEBUG_STARTCODE   0x00000100
+#if FF_API_UNUSED_MEMBERS
+#define FF_DEBUG_PTS         0x00000200
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_DEBUG_ER          0x00000400
+#define FF_DEBUG_MMCO        0x00000800
+#define FF_DEBUG_BUGS        0x00001000
+#if FF_API_DEBUG_MV
+#define FF_DEBUG_VIS_QP      0x00002000 ///< only access through AVOptions from outside libavcodec
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec
+#endif
+#define FF_DEBUG_BUFFERS     0x00008000
+#define FF_DEBUG_THREADS     0x00010000
+#define FF_DEBUG_NOMC        0x01000000
+
+#if FF_API_DEBUG_MV
+    /**
+     * debug
+     * Code outside libavcodec should access this field using AVOptions
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+    /**
+     * Error recognition; may misdetect some more or less valid parts as errors.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int err_recognition;
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the codec) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder returning an error.
+ */
+#define AV_EF_CRCCHECK  (1<<0)
+#define AV_EF_BITSTREAM (1<<1)          ///< detect bitstream specification deviations
+#define AV_EF_BUFFER    (1<<2)          ///< detect improper bitstream length
+#define AV_EF_EXPLODE   (1<<3)          ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1<<15)        ///< ignore errors and continue
+#define AV_EF_CAREFUL    (1<<16)        ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT  (1<<17)        ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE (1<<18)        ///< consider things that a sane encoder should not do as an error
+
+
+    /**
+     * opaque 64bit number (generally a PTS) that will be reordered and
+     * output in AVFrame.reordered_opaque
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    int64_t reordered_opaque;
+
+    /**
+     * Hardware accelerator in use
+     * - encoding: unused.
+     * - decoding: Set by libavcodec
+     */
+    struct AVHWAccel *hwaccel;
+
+    /**
+     * Hardware accelerator context.
+     * For some hardware accelerators, a global context needs to be
+     * provided by the user. In that case, this holds display-dependent
+     * data FFmpeg cannot instantiate itself. Please refer to the
+     * FFmpeg HW accelerator documentation to know how to fill this
+     * is. e.g. for VA API, this is a struct vaapi_context.
+     * - encoding: unused
+     * - decoding: Set by user
+     */
+    void *hwaccel_context;
+
+    /**
+     * error
+     * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+     * - decoding: unused
+     */
+    uint64_t error[AV_NUM_DATA_POINTERS];
+
+    /**
+     * DCT algorithm, see FF_DCT_* below
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+    int dct_algo;
+#define FF_DCT_AUTO    0
+#define FF_DCT_FASTINT 1
+#if FF_API_UNUSED_MEMBERS
+#define FF_DCT_INT     2
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_DCT_MMX     3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN    6
+
+    /**
+     * IDCT algorithm, see FF_IDCT_* below.
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int idct_algo;
+#define FF_IDCT_AUTO          0
+#define FF_IDCT_INT           1
+#define FF_IDCT_SIMPLE        2
+#define FF_IDCT_SIMPLEMMX     3
+#define FF_IDCT_ARM           7
+#define FF_IDCT_ALTIVEC       8
+#if FF_API_ARCH_SH4
+#define FF_IDCT_SH4           9
+#endif
+#define FF_IDCT_SIMPLEARM     10
+#if FF_API_UNUSED_MEMBERS
+#define FF_IDCT_IPP           13
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_IDCT_XVID          14
+#if FF_API_IDCT_XVIDMMX
+#define FF_IDCT_XVIDMMX       14
+#endif /* FF_API_IDCT_XVIDMMX */
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6   17
+#if FF_API_ARCH_SPARC
+#define FF_IDCT_SIMPLEVIS     18
+#endif
+#define FF_IDCT_FAAN          20
+#define FF_IDCT_SIMPLENEON    22
+#if FF_API_ARCH_ALPHA
+#define FF_IDCT_SIMPLEALPHA   23
+#endif
+#define FF_IDCT_SIMPLEAUTO    128
+
+    /**
+     * bits per sample/pixel from the demuxer (needed for huffyuv).
+     * - encoding: Set by libavcodec.
+     * - decoding: Set by user.
+     */
+     int bits_per_coded_sample;
+
+    /**
+     * Bits per sample/pixel of internal libavcodec pixel/sample format.
+     * - encoding: set by user.
+     * - decoding: set by libavcodec.
+     */
+    int bits_per_raw_sample;
+
+#if FF_API_LOWRES
+    /**
+     * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+     * - encoding: unused
+     * - decoding: Set by user.
+     * Code outside libavcodec should access this field using:
+     * av_codec_{get,set}_lowres(avctx)
+     */
+     int lowres;
+#endif
+
+    /**
+     * the picture in the bitstream
+     * - encoding: Set by libavcodec.
+     * - decoding: unused
+     */
+    AVFrame *coded_frame;
+
+    /**
+     * thread count
+     * is used to decide how many independent tasks should be passed to execute()
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int thread_count;
+
+    /**
+     * Which multithreading methods to use.
+     * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+     * so clients which cannot provide future frames should not use it.
+     *
+     * - encoding: Set by user, otherwise the default is used.
+     * - decoding: Set by user, otherwise the default is used.
+     */
+    int thread_type;
+#define FF_THREAD_FRAME   1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE   2 ///< Decode more than one part of a single frame at once
+
+    /**
+     * Which multithreading methods are in use by the codec.
+     * - encoding: Set by libavcodec.
+     * - decoding: Set by libavcodec.
+     */
+    int active_thread_type;
+
+    /**
+     * Set by the client if its custom get_buffer() callback can be called
+     * synchronously from another thread, which allows faster multithreaded decoding.
+     * draw_horiz_band() will be called from other threads regardless of this setting.
+     * Ignored if the default get_buffer() is used.
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int thread_safe_callbacks;
+
+    /**
+     * The codec may call this to execute several independent things.
+     * It will return only after finishing all tasks.
+     * The user may replace this with some multithreaded implementation,
+     * the default implementation will execute the parts serially.
+     * @param count the number of things to execute
+     * - encoding: Set by libavcodec, user can override.
+     * - decoding: Set by libavcodec, user can override.
+     */
+    int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+    /**
+     * The codec may call this to execute several independent things.
+     * It will return only after finishing all tasks.
+     * The user may replace this with some multithreaded implementation,
+     * the default implementation will execute the parts serially.
+     * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+     * @param c context passed also to func
+     * @param count the number of things to execute
+     * @param arg2 argument passed unchanged to func
+     * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+     * @param func function that will be called count times, with jobnr from 0 to count-1.
+     *             threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+     *             two instances of func executing at the same time will have the same threadnr.
+     * @return always 0 currently, but code should handle a future improvement where when any call to func
+     *         returns < 0 no further calls to func may be done and < 0 is returned.
+     * - encoding: Set by libavcodec, user can override.
+     * - decoding: Set by libavcodec, user can override.
+     */
+    int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+#if FF_API_THREAD_OPAQUE
+    /**
+     * @deprecated this field should not be used from outside of lavc
+     */
+    attribute_deprecated
+    void *thread_opaque;
+#endif
+
+    /**
+     * noise vs. sse weight for the nsse comparison function
+     * - encoding: Set by user.
+     * - decoding: unused
+     */
+     int nsse_weight;
+
+    /**
+     * profile
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+     int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW  1
+#define FF_PROFILE_AAC_SSR  2
+#define FF_PROFILE_AAC_LTP  3
+#define FF_PROFILE_AAC_HE   4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD   22
+#define FF_PROFILE_AAC_ELD  38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE  131
+
+#define FF_PROFILE_DTS         20
+#define FF_PROFILE_DTS_ES      30
+#define FF_PROFILE_DTS_96_24   40
+#define FF_PROFILE_DTS_HD_HRA  50
+#define FF_PROFILE_DTS_HD_MA   60
+
+#define FF_PROFILE_MPEG2_422    0
+#define FF_PROFILE_MPEG2_HIGH   1
+#define FF_PROFILE_MPEG2_SS     2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE  3
+#define FF_PROFILE_MPEG2_MAIN   4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED  (1<<9)  // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA        (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE             66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN                 77
+#define FF_PROFILE_H264_EXTENDED             88
+#define FF_PROFILE_H264_HIGH                 100
+#define FF_PROFILE_H264_HIGH_10              110
+#define FF_PROFILE_H264_HIGH_10_INTRA        (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422             122
+#define FF_PROFILE_H264_HIGH_422_INTRA       (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444             144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE  244
+#define FF_PROFILE_H264_HIGH_444_INTRA       (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444            44
+
+#define FF_PROFILE_VC1_SIMPLE   0
+#define FF_PROFILE_VC1_MAIN     1
+#define FF_PROFILE_VC1_COMPLEX  2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE                     0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE            1
+#define FF_PROFILE_MPEG4_CORE                       2
+#define FF_PROFILE_MPEG4_MAIN                       3
+#define FF_PROFILE_MPEG4_N_BIT                      4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE           5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION      6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE     7
+#define FF_PROFILE_MPEG4_HYBRID                     8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME         9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE             10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING           11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE             12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO             14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE           15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0   0
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1   1
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION  2
+#define FF_PROFILE_JPEG2000_DCINEMA_2K              3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K              4
+
+
+#define FF_PROFILE_HEVC_MAIN                        1
+#define FF_PROFILE_HEVC_MAIN_10                     2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE          3
+#define FF_PROFILE_HEVC_REXT                        4
+
+    /**
+     * level
+     * - encoding: Set by user.
+     * - decoding: Set by libavcodec.
+     */
+     int level;
+#define FF_LEVEL_UNKNOWN -99
+
+    /**
+     * Skip loop filtering for selected frames.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    enum AVDiscard skip_loop_filter;
+
+    /**
+     * Skip IDCT/dequantization for selected frames.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    enum AVDiscard skip_idct;
+
+    /**
+     * Skip decoding for selected frames.
+     * - encoding: unused
+     * - decoding: Set by user.
+     */
+    enum AVDiscard skip_frame;
+
+    /**
+     * Header containing style information for text subtitles.
+     * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+     * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+     * the Format line following. It shouldn't include any Dialogue line.
+     * - encoding: Set/allocated/freed by user (before avcodec_open2())
+     * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+     */
+    uint8_t *subtitle_header;
+    int subtitle_header_size;
+
+#if FF_API_ERROR_RATE
+    /**
+     * @deprecated use the 'error_rate' private AVOption of the mpegvideo
+     * encoders
+     */
+    attribute_deprecated
+    int error_rate;
+#endif
+
+#if FF_API_CODEC_PKT
+    /**
+     * @deprecated this field is not supposed to be accessed from outside lavc
+     */
+    attribute_deprecated
+    AVPacket *pkt;
+#endif
+
+    /**
+     * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+     * Used for compliant TS muxing.
+     * - encoding: Set by libavcodec.
+     * - decoding: unused.
+     */
+    uint64_t vbv_delay;
+
+    /**
+     * Encoding only. Allow encoders to output packets that do not contain any
+     * encoded data, only side data.
+     *
+     * Some encoders need to output such packets, e.g. to update some stream
+     * parameters at the end of encoding.
+     *
+     * All callers are strongly recommended to set this option to 1 and update
+     * their code to deal with such packets, since this behaviour may become
+     * always enabled in the future (then this option will be deprecated and
+     * later removed). To avoid ABI issues when this happens, the callers should
+     * use AVOptions to set this field.
+     */
+    int side_data_only_packets;
+
+    /**
+     * Audio only. The number of "priming" samples (padding) inserted by the
+     * encoder at the beginning of the audio. I.e. this number of leading
+     * decoded samples must be discarded by the caller to get the original audio
+     * without leading padding.
+     *
+     * - decoding: unused
+     * - encoding: Set by libavcodec. The timestamps on the output packets are
+     *             adjusted by the encoder so that they always refer to the
+     *             first sample of the data actually contained in the packet,
+     *             including any added padding.  E.g. if the timebase is
+     *             1/samplerate and the timestamp of the first input sample is
+     *             0, the timestamp of the first output packet will be
+     *             -initial_padding.
+     */
+    int initial_padding;
+
+    /**
+     * - decoding: For codecs that store a framerate value in the compressed
+     *             bitstream, the decoder may export it here. { 0, 1} when
+     *             unknown.
+     * - encoding: unused
+     */
+    AVRational framerate;
+
+    /**
+     * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
+     * Code outside libavcodec should access this field using:
+     * av_codec_{get,set}_pkt_timebase(avctx)
+     * - encoding unused.
+     * - decoding set by user.
+     */
+    AVRational pkt_timebase;
+
+    /**
+     * AVCodecDescriptor
+     * Code outside libavcodec should access this field using:
+     * av_codec_{get,set}_codec_descriptor(avctx)
+     * - encoding: unused.
+     * - decoding: set by libavcodec.
+     */
+    const AVCodecDescriptor *codec_descriptor;
+
+#if !FF_API_LOWRES
+    /**
+     * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+     * - encoding: unused
+     * - decoding: Set by user.
+     * Code outside libavcodec should access this field using:
+     * av_codec_{get,set}_lowres(avctx)
+     */
+     int lowres;
+#endif
+
+    /**
+     * Current statistics for PTS correction.
+     * - decoding: maintained and used by libavcodec, not intended to be used by user apps
+     * - encoding: unused
+     */
+    int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
+    int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
+    int64_t pts_correction_last_pts;       /// PTS of the last frame
+    int64_t pts_correction_last_dts;       /// DTS of the last frame
+
+    /**
+     * Character encoding of the input subtitles file.
+     * - decoding: set by user
+     * - encoding: unused
+     */
+    char *sub_charenc;
+
+    /**
+     * Subtitles character encoding mode. Formats or codecs might be adjusting
+     * this setting (if they are doing the conversion themselves for instance).
+     * - decoding: set by libavcodec
+     * - encoding: unused
+     */
+    int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING  -1  ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC    0  ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER  1  ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
+
+    /**
+     * Skip processing alpha if supported by codec.
+     * Note that if the format uses pre-multiplied alpha (common with VP6,
+     * and recommended due to better video quality/compression)
+     * the image will look as if alpha-blended onto a black background.
+     * However for formats that do not use pre-multiplied alpha
+     * there might be serious artefacts (though e.g. libswscale currently
+     * assumes pre-multiplied alpha anyway).
+     * Code outside libavcodec should access this field using AVOptions
+     *
+     * - decoding: set by user
+     * - encoding: unused
+     */
+    int skip_alpha;
+
+    /**
+     * Number of samples to skip after a discontinuity
+     * - decoding: unused
+     * - encoding: set by libavcodec
+     */
+    int seek_preroll;
+
+#if !FF_API_DEBUG_MV
+    /**
+     * debug motion vectors
+     * Code outside libavcodec should access this field using AVOptions
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+    /**
+     * custom intra quantization matrix
+     * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix()
+     * - encoding: Set by user, can be NULL.
+     * - decoding: unused.
+     */
+    uint16_t *chroma_intra_matrix;
+
+    /**
+     * dump format separator.
+     * can be ", " or "\n      " or anything else
+     * Code outside libavcodec should access this field using AVOptions
+     * (NO direct access).
+     * - encoding: Set by user.
+     * - decoding: Set by user.
+     */
+    uint8_t *dump_separator;
+
+    /**
+     * ',' seperated list of allowed decoders.
+     * If NULL then all are allowed
+     * - encoding: unused
+     * - decoding: set by user through AVOPtions (NO direct access)
+     */
+    char *codec_whitelist;
+} AVCodecContext;
+
+AVRational av_codec_get_pkt_timebase         (const AVCodecContext *avctx);
+void       av_codec_set_pkt_timebase         (AVCodecContext *avctx, AVRational val);
+
+const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);
+void                     av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);
+
+int  av_codec_get_lowres(const AVCodecContext *avctx);
+void av_codec_set_lowres(AVCodecContext *avctx, int val);
+
+int  av_codec_get_seek_preroll(const AVCodecContext *avctx);
+void av_codec_set_seek_preroll(AVCodecContext *avctx, int val);
+
+uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);
+void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+    int profile;
+    const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+struct AVSubtitle;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+    /**
+     * Name of the codec implementation.
+     * The name is globally unique among encoders and among decoders (but an
+     * encoder and a decoder can share the same name).
+     * This is the primary way to find a codec from the user perspective.
+     */
+    const char *name;
+    /**
+     * Descriptive name for the codec, meant to be more human readable than name.
+     * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+     */
+    const char *long_name;
+    enum AVMediaType type;
+    enum AVCodecID id;
+    /**
+     * Codec capabilities.
+     * see CODEC_CAP_*
+     */
+    int capabilities;
+    const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+    const enum AVPixelFormat *pix_fmts;     ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+    const int *supported_samplerates;       ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+    const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+    const uint64_t *channel_layouts;         ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#if FF_API_LOWRES
+    uint8_t max_lowres;                     ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres()
+#endif
+    const AVClass *priv_class;              ///< AVClass for the private context
+    const AVProfile *profiles;              ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+    /*****************************************************************
+     * No fields below this line are part of the public API. They
+     * may not be used outside of libavcodec and can be changed and
+     * removed at will.
+     * New public fields should be added right above.
+     *****************************************************************
+     */
+    int priv_data_size;
+    struct AVCodec *next;
+    /**
+     * @name Frame-level threading support functions
+     * @{
+     */
+    /**
+     * If defined, called on thread contexts when they are created.
+     * If the codec allocates writable tables in init(), re-allocate them here.
+     * priv_data will be set to a copy of the original.
+     */
+    int (*init_thread_copy)(AVCodecContext *);
+    /**
+     * Copy necessary context variables from a previous thread context to the current one.
+     * If not defined, the next thread will start automatically; otherwise, the codec
+     * must call ff_thread_finish_setup().
+     *
+     * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+     */
+    int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+    /** @} */
+
+    /**
+     * Private codec-specific defaults.
+     */
+    const AVCodecDefault *defaults;
+
+    /**
+     * Initialize codec static data, called from avcodec_register().
+     */
+    void (*init_static_data)(struct AVCodec *codec);
+
+    int (*init)(AVCodecContext *);
+    int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
+                      const struct AVSubtitle *sub);
+    /**
+     * Encode data to an AVPacket.
+     *
+     * @param      avctx          codec context
+     * @param      avpkt          output AVPacket (may contain a user-provided buffer)
+     * @param[in]  frame          AVFrame containing the raw data to be encoded
+     * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+     *                            non-empty packet was returned in avpkt.
+     * @return 0 on success, negative error code on failure
+     */
+    int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+                   int *got_packet_ptr);
+    int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+    int (*close)(AVCodecContext *);
+    /**
+     * Flush buffers.
+     * Will be called when seeking
+     */
+    void (*flush)(AVCodecContext *);
+} AVCodec;
+
+int av_codec_get_max_lowres(const AVCodec *codec);
+
+struct MpegEncContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ * @{
+ */
+typedef struct AVHWAccel {
+    /**
+     * Name of the hardware accelerated codec.
+     * The name is globally unique among encoders and among decoders (but an
+     * encoder and a decoder can share the same name).
+     */
+    const char *name;
+
+    /**
+     * Type of codec implemented by the hardware accelerator.
+     *
+     * See AVMEDIA_TYPE_xxx
+     */
+    enum AVMediaType type;
+
+    /**
+     * Codec implemented by the hardware accelerator.
+     *
+     * See AV_CODEC_ID_xxx
+     */
+    enum AVCodecID id;
+
+    /**
+     * Supported pixel format.
+     *
+     * Only hardware accelerated formats are supported here.
+     */
+    enum AVPixelFormat pix_fmt;
+
+    /**
+     * Hardware accelerated codec capabilities.
+     * see FF_HWACCEL_CODEC_CAP_*
+     */
+    int capabilities;
+
+    /*****************************************************************
+     * No fields below this line are part of the public API. They
+     * may not be used outside of libavcodec and can be changed and
+     * removed at will.
+     * New public fields should be added right above.
+     *****************************************************************
+     */
+    struct AVHWAccel *next;
+
+    /**
+     * Allocate a custom buffer
+     */
+    int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
+
+    /**
+     * Called at the beginning of each frame or field picture.
+     *
+     * Meaningful frame information (codec specific) is guaranteed to
+     * be parsed at this point. This function is mandatory.
+     *
+     * Note that buf can be NULL along with buf_size set to 0.
+     * Otherwise, this means the whole frame is available at this point.
+     *
+     * @param avctx the codec context
+     * @param buf the frame data buffer base
+     * @param buf_size the size of the frame in bytes
+     * @return zero if successful, a negative value otherwise
+     */
+    int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+    /**
+     * Callback for each slice.
+     *
+     * Meaningful slice information (codec specific) is guaranteed to
+     * be parsed at this point. This function is mandatory.
+     * The only exception is XvMC, that works on MB level.
+     *
+     * @param avctx the codec context
+     * @param buf the slice data buffer base
+     * @param buf_size the size of the slice in bytes
+     * @return zero if successful, a negative value otherwise
+     */
+    int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+    /**
+     * Called at the end of each frame or field picture.
+     *
+     * The whole picture is parsed at this point and can now be sent
+     * to the hardware accelerator. This function is mandatory.
+     *
+     * @param avctx the codec context
+     * @return zero if successful, a negative value otherwise
+     */
+    int (*end_frame)(AVCodecContext *avctx);
+
+    /**
+     * Size of per-frame hardware accelerator private data.
+     *
+     * Private data is allocated with av_mallocz() before
+     * AVCodecContext.get_buffer() and deallocated after
+     * AVCodecContext.release_buffer().
+     */
+    int frame_priv_data_size;
+
+    /**
+     * Called for every Macroblock in a slice.
+     *
+     * XvMC uses it to replace the ff_mpv_decode_mb().
+     * Instead of decoding to raw picture, MB parameters are
+     * stored in an array provided by the video driver.
+     *
+     * @param s the mpeg context
+     */
+    void (*decode_mb)(struct MpegEncContext *s);
+
+    /**
+     * Initialize the hwaccel private data.
+     *
+     * This will be called from ff_get_format(), after hwaccel and
+     * hwaccel_context are set and the hwaccel private data in AVCodecInternal
+     * is allocated.
+     */
+    int (*init)(AVCodecContext *avctx);
+
+    /**
+     * Uninitialize the hwaccel private data.
+     *
+     * This will be called from get_format() or avcodec_close(), after hwaccel
+     * and hwaccel_context are already uninitialized.
+     */
+    int (*uninit)(AVCodecContext *avctx);
+
+    /**
+     * Size of the private data to allocate in
+     * AVCodecInternal.hwaccel_priv_data.
+     */
+    int priv_data_size;
+} AVHWAccel;
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
+ * Picture data structure.
+ *
+ * Up to four components can be stored into it, the last component is
+ * alpha.
+ */
+typedef struct AVPicture {
+    uint8_t *data[AV_NUM_DATA_POINTERS];    ///< pointers to the image data planes
+    int linesize[AV_NUM_DATA_POINTERS];     ///< number of bytes per line
+} AVPicture;
+
+/**
+ * @}
+ */
+
+enum AVSubtitleType {
+    SUBTITLE_NONE,
+
+    SUBTITLE_BITMAP,                ///< A bitmap, pict will be set
+
+    /**
+     * Plain text, the text field must be set by the decoder and is
+     * authoritative. ass and pict fields may contain approximations.
+     */
+    SUBTITLE_TEXT,
+
+    /**
+     * Formatted text, the ass field must be set by the decoder and is
+     * authoritative. pict and text fields may contain approximations.
+     */
+    SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+    int x;         ///< top left corner  of pict, undefined when pict is not set
+    int y;         ///< top left corner  of pict, undefined when pict is not set
+    int w;         ///< width            of pict, undefined when pict is not set
+    int h;         ///< height           of pict, undefined when pict is not set
+    int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+    /**
+     * data+linesize for the bitmap of this subtitle.
+     * can be set for text/ass as well once they where rendered
+     */
+    AVPicture pict;
+    enum AVSubtitleType type;
+
+    char *text;                     ///< 0 terminated plain UTF-8 text
+
+    /**
+     * 0 terminated ASS/SSA compatible event line.
+     * The presentation of this is unaffected by the other values in this
+     * struct.
+     */
+    char *ass;
+
+    int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+    uint16_t format; /* 0 = graphics */
+    uint32_t start_display_time; /* relative to packet pts, in ms */
+    uint32_t end_display_time; /* relative to packet pts, in ms */
+    unsigned num_rects;
+    AVSubtitleRect **rects;
+    int64_t pts;    ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(const AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ *              for the given codec. It is illegal to then call avcodec_open2()
+ *              with a different codec.
+ *              If NULL, then the codec-specific defaults won't be initialized,
+ *              which may result in suboptimal default settings (this is
+ *              important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext **avctx);
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVFrame. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_frame_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ *             avcodec_alloc_context3(NULL), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+#if FF_API_AVFRAME_LAVC
+/**
+ * @deprecated use av_frame_alloc()
+ */
+attribute_deprecated
+AVFrame *avcodec_alloc_frame(void);
+
+/**
+ * Set the fields of the given AVFrame to default values.
+ *
+ * @param frame The AVFrame of which the fields should be set to default values.
+ *
+ * @deprecated use av_frame_unref()
+ */
+attribute_deprecated
+void avcodec_get_frame_defaults(AVFrame *frame);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ *
+ * @warning this function does NOT free the data buffers themselves
+ * (it does not know how, since they might have been allocated with
+ *  a custom get_buffer()).
+ *
+ * @deprecated use av_frame_free()
+ */
+attribute_deprecated
+void avcodec_free_frame(AVFrame **frame);
+#endif
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ *     exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ *     exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ *              previously passed to avcodec_alloc_context3() or
+ *              avcodec_get_context_defaults3() for this context, then this
+ *              parameter MUST be either NULL or equal to the previously passed
+ *              codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ *                On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ *      av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_packet
+ * @{
+ */
+
+#if FF_API_DESTRUCT_PACKET
+/**
+ * Default packet destructor.
+ * @deprecated use the AVBuffer API instead
+ */
+attribute_deprecated
+void av_destruct_packet(AVPacket *pkt);
+#endif
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ *        buf and destruct fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ *        function returns successfully, the data is owned by the underlying AVBuffer.
+ *        The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ *        size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ */
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Copy packet, including contents
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Copy packet side data
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Free a packet.
+ *
+ * @param pkt packet to free
+ */
+void av_free_packet(AVPacket *pkt);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+                                 int size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+                               int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+                                 int *size);
+
+int av_packet_merge_side_data(AVPacket *pkt);
+
+int av_packet_split_side_data(AVPacket *pkt);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);
+
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_packet_ref(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ *
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ *               expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ *               converted
+ */
+void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
+
+#if FF_API_EMU_EDGE
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ *
+ * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer
+ * needed
+ */
+attribute_deprecated
+unsigned avcodec_get_edge_width(void);
+#endif
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+                               int linesize_align[AV_NUM_DATA_POINTERS]);
+
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos  horizontal chroma sample position
+ * @param ypos  vertical   chroma sample position
+ */
+int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos  horizontal chroma sample position
+ * @param ypos  vertical   chroma sample position
+ */
+enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
+
+#if FF_API_OLD_DECODE_AUDIO
+/**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
+ * Decode the audio frame of size avpkt->size from avpkt->data into samples.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio3 has to be called again with an AVPacket that contains
+ * the remaining data in order to decode the second frame etc.
+ * If no frame
+ * could be outputted, frame_size_ptr is zero. Otherwise, it is the
+ * decompressed frame size in bytes.
+ *
+ * @warning You must set frame_size_ptr to the allocated size of the
+ * output buffer before calling avcodec_decode_audio3().
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @warning You must not provide a custom get_buffer() when using
+ * avcodec_decode_audio3().  Doing so will override it with
+ * avcodec_default_get_buffer.  Use avcodec_decode_audio4() instead,
+ * which does allow the application to provide a custom get_buffer().
+ *
+ * @note You might have to align the input buffer avpkt->data and output buffer
+ * samples. The alignment requirements depend on the CPU: On some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum and
+ * samples should be 16 byte aligned unless the CPU doesn't need it
+ * (AltiVec and SSE do).
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ *                     If the sample format is planar, each channel plane will
+ *                     be the same size, with no padding between channels.
+ * @param[in,out] frame_size_ptr the output buffer size in bytes
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ *            You can create such packet with av_init_packet() and by then setting
+ *            data and size, some decoders might in addition need other fields.
+ *            All decoders are designed to use the least fields possible though.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame data was decompressed (used) from the input AVPacket.
+ */
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+                         int *frame_size_ptr,
+                         AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame and the return value would be
+ * less than the packet size. In this case, avcodec_decode_audio4 has to be
+ * called again with an AVPacket containing the remaining data in order to
+ * decode the second frame, etc...  Even if no frames are returned, the packet
+ * needs to be fed to the decoder with remaining data until it is completely
+ * consumed or an error occurs.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning samples. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no samples will be returned.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ *          larger than the actual read bytes because some optimized bitstream
+ *          readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @param      avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ *                   The decoder will allocate a buffer for the decoded frame by
+ *                   calling the AVCodecContext.get_buffer2() callback.
+ *                   When AVCodecContext.refcounted_frames is set to 1, the frame is
+ *                   reference counted and the returned reference belongs to the
+ *                   caller. The caller must release the frame using av_frame_unref()
+ *                   when the frame is no longer needed. The caller may safely write
+ *                   to the frame if av_frame_is_writable() returns 1.
+ *                   When AVCodecContext.refcounted_frames is set to 0, the returned
+ *                   reference belongs to the decoder and is valid only until the
+ *                   next call to this function or until closing or flushing the
+ *                   decoder. The caller may not write to it.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ *                           non-zero. Note that this field being set to zero
+ *                           does not mean that an error has occurred. For
+ *                           decoders with CODEC_CAP_DELAY set, no given decode
+ *                           call is guaranteed to produce a frame.
+ * @param[in]  avpkt The input AVPacket containing the input buffer.
+ *                   At least avpkt->data and avpkt->size should be set. Some
+ *                   decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ *         decoding, otherwise the number of bytes consumed from the input
+ *         AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+                          int *got_frame_ptr, const AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ *             Use av_frame_alloc() to get an AVFrame. The codec will
+ *             allocate memory for the actual bitmap by calling the
+ *             AVCodecContext.get_buffer2() callback.
+ *             When AVCodecContext.refcounted_frames is set to 1, the frame is
+ *             reference counted and the returned reference belongs to the
+ *             caller. The caller must release the frame using av_frame_unref()
+ *             when the frame is no longer needed. The caller may safely write
+ *             to the frame if av_frame_is_writable() returns 1.
+ *             When AVCodecContext.refcounted_frames is set to 0, the returned
+ *             reference belongs to the decoder and is valid only until the
+ *             next call to this function or until closing or flushing the
+ *             decoder. The caller may not write to it.
+ *
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ *            You can create such packet with av_init_packet() and by then setting
+ *            data and size, some decoders might in addition need other fields like
+ *            flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ *            fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+                         int *got_picture_ptr,
+                         const AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
+ *                 must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+                            int *got_sub_ptr,
+                            AVPacket *avpkt);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+    AV_PICTURE_STRUCTURE_UNKNOWN,      //< unknown
+    AV_PICTURE_STRUCTURE_TOP_FIELD,    //< coded as top field
+    AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field
+    AV_PICTURE_STRUCTURE_FRAME,        //< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+    void *priv_data;
+    struct AVCodecParser *parser;
+    int64_t frame_offset; /* offset of the current frame */
+    int64_t cur_offset; /* current offset
+                           (incremented by each av_parser_parse()) */
+    int64_t next_frame_offset; /* offset of the next frame */
+    /* video info */
+    int pict_type; /* XXX: Put it back in AVCodecContext. */
+    /**
+     * This field is used for proper frame duration computation in lavf.
+     * It signals, how much longer the frame duration of the current frame
+     * is compared to normal frame duration.
+     *
+     * frame_duration = (1 + repeat_pict) * time_base
+     *
+     * It is used by codecs like H.264 to display telecined material.
+     */
+    int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+    int64_t pts;     /* pts of the current frame */
+    int64_t dts;     /* dts of the current frame */
+
+    /* private data */
+    int64_t last_pts;
+    int64_t last_dts;
+    int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+    int cur_frame_start_index;
+    int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+    int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+    int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+    int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES           0x0001
+#define PARSER_FLAG_ONCE                      0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET            0x0004
+#define PARSER_FLAG_USE_CODEC_TS              0x1000
+
+    int64_t offset;      ///< byte offset from starting packet start
+    int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+    /**
+     * Set by parser to 1 for key frames and 0 for non-key frames.
+     * It is initialized to -1, so if the parser doesn't set this flag,
+     * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+     * will be used.
+     */
+    int key_frame;
+
+    /**
+     * Time difference in stream time base units from the pts of this
+     * packet to the point at which the output from the decoder has converged
+     * independent from the availability of previous frames. That is, the
+     * frames are virtually identical no matter if decoding started from
+     * the very first frame or from this keyframe.
+     * Is AV_NOPTS_VALUE if unknown.
+     * This field is not the display duration of the current frame.
+     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+     * set.
+     *
+     * The purpose of this field is to allow seeking in streams that have no
+     * keyframes in the conventional sense. It corresponds to the
+     * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+     * essential for some types of subtitle streams to ensure that all
+     * subtitles are correctly displayed after seeking.
+     */
+    int64_t convergence_duration;
+
+    // Timestamp generation support:
+    /**
+     * Synchronization point for start of timestamp generation.
+     *
+     * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+     * (default).
+     *
+     * For example, this corresponds to presence of H.264 buffering period
+     * SEI message.
+     */
+    int dts_sync_point;
+
+    /**
+     * Offset of the current timestamp against last timestamp sync point in
+     * units of AVCodecContext.time_base.
+     *
+     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+     * contain a valid timestamp offset.
+     *
+     * Note that the timestamp of sync point has usually a nonzero
+     * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+     * the next frame after timestamp sync point will be usually 1.
+     *
+     * For example, this corresponds to H.264 cpb_removal_delay.
+     */
+    int dts_ref_dts_delta;
+
+    /**
+     * Presentation delay of current frame in units of AVCodecContext.time_base.
+     *
+     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+     * contain valid non-negative timestamp delta (presentation time of a frame
+     * must not lie in the past).
+     *
+     * This delay represents the difference between decoding and presentation
+     * time of the frame.
+     *
+     * For example, this corresponds to H.264 dpb_output_delay.
+     */
+    int pts_dts_delta;
+
+    /**
+     * Position of the packet in file.
+     *
+     * Analogous to cur_frame_pts/dts
+     */
+    int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+    /**
+     * Byte position of currently parsed frame in stream.
+     */
+    int64_t pos;
+
+    /**
+     * Previous frame byte position.
+     */
+    int64_t last_pos;
+
+    /**
+     * Duration of the current frame.
+     * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+     * For all other types, this is in units of AVCodecContext.time_base.
+     */
+    int duration;
+
+    enum AVFieldOrder field_order;
+
+    /**
+     * Indicate whether a picture is coded as a frame, top field or bottom field.
+     *
+     * For example, H.264 field_pic_flag equal to 0 corresponds to
+     * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+     * equal to 1 and bottom_field_flag equal to 0 corresponds to
+     * AV_PICTURE_STRUCTURE_TOP_FIELD.
+     */
+    enum AVPictureStructure picture_structure;
+
+    /**
+     * Picture number incremented in presentation or output order.
+     * This field may be reinitialized at the first picture of a new sequence.
+     *
+     * For example, this corresponds to H.264 PicOrderCnt.
+     */
+    int output_picture_number;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+    int codec_ids[5]; /* several codec IDs are permitted */
+    int priv_data_size;
+    int (*parser_init)(AVCodecParserContext *s);
+    int (*parser_parse)(AVCodecParserContext *s,
+                        AVCodecContext *avctx,
+                        const uint8_t **poutbuf, int *poutbuf_size,
+                        const uint8_t *buf, int buf_size);
+    void (*parser_close)(AVCodecParserContext *s);
+    int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+    struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(const AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s             parser context.
+ * @param avctx         codec context.
+ * @param poutbuf       set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size  set to size of parsed buffer or zero if not yet finished.
+ * @param buf           input buffer.
+ * @param buf_size      input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts           input presentation timestamp.
+ * @param dts           input decoding timestamp.
+ * @param pos           input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ *   while(in_len){
+ *       len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ *                                        in_data, in_len,
+ *                                        pts, dts, pos);
+ *       in_data += len;
+ *       in_len  -= len;
+ *
+ *       if(size)
+ *          decode_frame(data, size);
+ *   }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+                     AVCodecContext *avctx,
+                     uint8_t **poutbuf, int *poutbuf_size,
+                     const uint8_t *buf, int buf_size,
+                     int64_t pts, int64_t dts,
+                     int64_t pos);
+
+/**
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitStreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+                     AVCodecContext *avctx,
+                     uint8_t **poutbuf, int *poutbuf_size,
+                     const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+#if FF_API_OLD_ENCODE_AUDIO
+/**
+ * Encode an audio frame from samples into buf.
+ *
+ * @deprecated Use avcodec_encode_audio2 instead.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
+ * will know how much space is needed because it depends on the value passed
+ * in buf_size as described below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
+ * samples read from samples is equal to:
+ * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
+ * This also implies that av_get_bits_per_sample() must not return 0 for these
+ * codecs.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
+                                              uint8_t *buf, int buf_size,
+                                              const short *samples);
+#endif
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx     codec context
+ * @param avpkt     output AVPacket.
+ *                  The user can supply an output buffer by setting
+ *                  avpkt->data and avpkt->size prior to calling the
+ *                  function, but if the size of the user-provided data is not
+ *                  large enough, encoding will fail. If avpkt->data and
+ *                  avpkt->size are set, avpkt->destruct must also be set. All
+ *                  other AVPacket fields will be reset by the encoder using
+ *                  av_init_packet(). If avpkt->data is NULL, the encoder will
+ *                  allocate it. The encoder will set avpkt->size to the size
+ *                  of the output packet.
+ *
+ *                  If this function fails or produces no output, avpkt will be
+ *                  freed using av_free_packet() (i.e. avpkt->destruct will be
+ *                  called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ *                  May be NULL when flushing an encoder that has the
+ *                  CODEC_CAP_DELAY capability set.
+ *                  If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ *                  can have any number of samples.
+ *                  If it is not set, frame->nb_samples must be equal to
+ *                  avctx->frame_size for all frames except the last.
+ *                  The final frame may be smaller than avctx->frame_size.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ *                            output packet is non-empty, and to 0 if it is
+ *                            empty. If the function returns an error, the
+ *                            packet can be assumed to be invalid, and the
+ *                            value of got_packet_ptr is undefined and should
+ *                            not be used.
+ * @return          0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+                          const AVFrame *frame, int *got_packet_ptr);
+
+#if FF_API_OLD_ENCODE_VIDEO
+/**
+ * @deprecated use avcodec_encode_video2() instead.
+ *
+ * Encode a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+attribute_deprecated
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+                         const AVFrame *pict);
+#endif
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx     codec context
+ * @param avpkt     output AVPacket.
+ *                  The user can supply an output buffer by setting
+ *                  avpkt->data and avpkt->size prior to calling the
+ *                  function, but if the size of the user-provided data is not
+ *                  large enough, encoding will fail. All other AVPacket fields
+ *                  will be reset by the encoder using av_init_packet(). If
+ *                  avpkt->data is NULL, the encoder will allocate it.
+ *                  The encoder will set avpkt->size to the size of the
+ *                  output packet. The returned data (if any) belongs to the
+ *                  caller, he is responsible for freeing it.
+ *
+ *                  If this function fails or produces no output, avpkt will be
+ *                  freed using av_free_packet() (i.e. avpkt->destruct will be
+ *                  called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ *                  May be NULL when flushing an encoder that has the
+ *                  CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ *                            output packet is non-empty, and to 0 if it is
+ *                            empty. If the function returns an error, the
+ *                            packet can be assumed to be invalid, and the
+ *                            value of got_packet_ptr is undefined and should
+ *                            not be used.
+ * @return          0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+                          const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+                            const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+#if FF_API_AVCODEC_RESAMPLE
+/**
+ * @defgroup lavc_resample Audio resampling
+ * @ingroup libavc
+ * @deprecated use libswresample instead
+ *
+ * @{
+ */
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ *  Initialize audio resampling context.
+ *
+ * @param output_channels  number of output channels
+ * @param input_channels   number of input channels
+ * @param output_rate      output sample rate
+ * @param input_rate       input sample rate
+ * @param sample_fmt_out   requested output sample format
+ * @param sample_fmt_in    input sample format
+ * @param filter_length    length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear           if 1 then the used FIR filter will be linearly interpolated
+                           between the 2 closest, if 0 the closest will be used
+ * @param cutoff           cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+attribute_deprecated
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+                                        int output_rate, int input_rate,
+                                        enum AVSampleFormat sample_fmt_out,
+                                        enum AVSampleFormat sample_fmt_in,
+                                        int filter_length, int log2_phase_count,
+                                        int linear, double cutoff);
+
+attribute_deprecated
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ *          created with av_audio_resample_init()
+ */
+attribute_deprecated
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+                 between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+attribute_deprecated
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+attribute_deprecated
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+attribute_deprecated
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+attribute_deprecated
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * @}
+ */
+#endif
+
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * Allocate memory for the pixels of a picture and setup the AVPicture
+ * fields for it.
+ *
+ * Call avpicture_free() to free it.
+ *
+ * @param picture            the picture structure to be filled in
+ * @param pix_fmt            the pixel format of the picture
+ * @param width              the width of the picture
+ * @param height             the height of the picture
+ * @return zero if successful, a negative error code otherwise
+ *
+ * @see av_image_alloc(), avpicture_fill()
+ */
+int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Setup the picture fields based on the specified image parameters
+ * and the provided image data buffer.
+ *
+ * The picture fields are filled in by using the image data buffer
+ * pointed to by ptr.
+ *
+ * If ptr is NULL, the function will fill only the picture linesize
+ * array and return the required size for the image buffer.
+ *
+ * To allocate an image buffer and fill the picture data in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture       the picture to be filled in
+ * @param ptr           buffer where the image data is stored, or NULL
+ * @param pix_fmt       the pixel format of the image
+ * @param width         the width of the image in pixels
+ * @param height        the height of the image in pixels
+ * @return the size in bytes required for src, a negative error code
+ * in case of failure
+ *
+ * @see av_image_fill_arrays()
+ */
+int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
+                   enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ *
+ * avpicture_get_size() can be used to compute the required size for
+ * the buffer to fill.
+ *
+ * @param src        source picture with filled data
+ * @param pix_fmt    picture pixel format
+ * @param width      picture width
+ * @param height     picture height
+ * @param dest       destination buffer
+ * @param dest_size  destination buffer size in bytes
+ * @return the number of bytes written to dest, or a negative value
+ * (error code) on error, for example if the destination buffer is not
+ * big enough
+ *
+ * @see av_image_copy_to_buffer()
+ */
+int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,
+                     int width, int height,
+                     unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ *
+ * @param pix_fmt    picture pixel format
+ * @param width      picture width
+ * @param height     picture height
+ * @return the computed picture buffer size or a negative error code
+ * in case of error
+ *
+ * @see av_image_get_buffer_size().
+ */
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
+
+#if FF_API_DEINTERLACE
+/**
+ *  deinterlace - if not supported return -1
+ *
+ * @deprecated - use yadif (in libavfilter) instead
+ */
+attribute_deprecated
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+                          enum AVPixelFormat pix_fmt, int width, int height);
+#endif
+/**
+ * Copy image src to dst. Wraps av_image_copy().
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+                     enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+                    enum AVPixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
+            int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample
+ * for one that returns a failure code and continues in case of invalid
+ * pix_fmts.
+ *
+ * @param[in]  pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w
+ * @param[out] v_shift store log2_chroma_h
+ *
+ * @see av_pix_fmt_get_chroma_sub_sample
+ */
+
+void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * @deprecated see av_get_pix_fmt_loss()
+ */
+int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
+                             int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format.  When converting from one pixel format to another, information loss
+ * may occur.  For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
+                                            enum AVPixelFormat src_pix_fmt,
+                                            int has_alpha, int *loss_ptr);
+
+/**
+ * @deprecated see av_find_best_pix_fmt_of_2()
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+attribute_deprecated
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,
+                                              enum AVPixelFormat src_pix_fmt,
+                                              int has_alpha, int *loss_ptr);
+#else
+enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+#endif
+
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+#if FF_API_SET_DIMENSIONS
+/**
+ * @deprecated this function is not supposed to be used from outside of lavc
+ */
+attribute_deprecated
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+#endif
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf       buffer to place codec tag in
+ * @param buf_size size in bytes of buf
+ * @param codec_tag codec tag to assign
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame       the AVFrame
+ *                    frame->nb_samples must be set prior to calling the
+ *                    function. This function fills in frame->data,
+ *                    frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt  sample format
+ * @param buf         buffer to use for frame data
+ * @param buf_size    size of buffer
+ * @param align       plane size sample alignment (0 = default)
+ * @return            >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+                             enum AVSampleFormat sample_fmt, const uint8_t *buf,
+                             int buf_size, int align);
+
+/**
+ * Reset the internal decoder state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),
+ * this invalidates the frames previously returned from the decoder. When
+ * refcounted frames are used, the decoder just releases any references it might
+ * keep internally, but the caller's reference remains valid.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be  endianness, 0 for little, 1 for big,
+ *            -1 (or anything else) for native
+ * @return  AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx        codec context
+ * @param frame_bytes  size of the frame, or 0 if unknown
+ * @return             frame duration, in samples, if known. 0 if not able to
+ *                     determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+
+typedef struct AVBitStreamFilterContext {
+    void *priv_data;
+    struct AVBitStreamFilter *filter;
+    AVCodecParserContext *parser;
+    struct AVBitStreamFilterContext *next;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+    const char *name;
+    int priv_data_size;
+    int (*filter)(AVBitStreamFilterContext *bsfc,
+                  AVCodecContext *avctx, const char *args,
+                  uint8_t **poutbuf, int *poutbuf_size,
+                  const uint8_t *buf, int buf_size, int keyframe);
+    void (*close)(AVBitStreamFilterContext *bsfc);
+    struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+/**
+ * Register a bitstream filter.
+ *
+ * The filter will be accessible to the application code through
+ * av_bitstream_filter_next() or can be directly initialized with
+ * av_bitstream_filter_init().
+ *
+ * @see avcodec_register_all()
+ */
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+
+/**
+ * Create and initialize a bitstream filter context given a bitstream
+ * filter name.
+ *
+ * The returned context must be freed with av_bitstream_filter_close().
+ *
+ * @param name    the name of the bitstream filter
+ * @return a bitstream filter context if a matching filter was found
+ * and successfully initialized, NULL otherwise
+ */
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+
+/**
+ * Filter bitstream.
+ *
+ * This function filters the buffer buf with size buf_size, and places the
+ * filtered buffer in the buffer pointed to by poutbuf.
+ *
+ * The output buffer must be freed by the caller.
+ *
+ * @param bsfc            bitstream filter context created by av_bitstream_filter_init()
+ * @param avctx           AVCodecContext accessed by the filter, may be NULL.
+ *                        If specified, this must point to the encoder context of the
+ *                        output stream the packet is sent to.
+ * @param args            arguments which specify the filter configuration, may be NULL
+ * @param poutbuf         pointer which is updated to point to the filtered buffer
+ * @param poutbuf_size    pointer which is updated to the filtered buffer size in bytes
+ * @param buf             buffer containing the data to filter
+ * @param buf_size        size in bytes of buf
+ * @param keyframe        set to non-zero if the buffer to filter corresponds to a key-frame packet data
+ * @return >= 0 in case of success, or a negative error code in case of failure
+ *
+ * If the return value is positive, an output buffer is allocated and
+ * is available in *poutbuf, and is distinct from the input buffer.
+ *
+ * If the return value is 0, the output buffer is not allocated and
+ * should be considered identical to the input buffer, or in case
+ * *poutbuf was set it points to the input buffer (not necessarily to
+ * its starting address).
+ */
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+                               AVCodecContext *avctx, const char *args,
+                               uint8_t **poutbuf, int *poutbuf_size,
+                               const uint8_t *buf, int buf_size, int keyframe);
+
+/**
+ * Release bitstream filter context.
+ *
+ * @param bsf the bitstream filter context created with
+ * av_bitstream_filter_init(), can be NULL
+ */
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+/**
+ * If f is NULL, return the first registered bitstream filter,
+ * if f is non-NULL, return the next registered bitstream filter
+ * after f, or NULL if f is the last one.
+ *
+ * This function can be used to iterate over all registered bitstream
+ * filters.
+ */
+AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#if FF_API_MISSING_SAMPLE
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ * @deprecated Use avpriv_report_missing_feature() instead.
+ */
+attribute_deprecated
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ * @deprecated Use avpriv_request_sample() instead.
+ */
+attribute_deprecated
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+#endif /* FF_API_MISSING_SAMPLE */
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+  AV_LOCK_CREATE,  ///< Create a mutex
+  AV_LOCK_OBTAIN,  ///< Lock the mutex
+  AV_LOCK_RELEASE, ///< Unlock the mutex
+  AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. The "mutex" argument to the function points
+ * to a (void *) where the lockmgr should store/get a pointer to a user
+ * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the
+ * value left by the last call for all other ops. If the lock manager is
+ * unable to perform the op then it should leave the mutex in the same
+ * state as when it was called and return a non-zero value. However,
+ * when called with AV_LOCK_DESTROY the mutex will always be assumed to
+ * have been successfully destroyed. If av_lockmgr_register succeeds
+ * it will return a non-negative value, if it fails it will return a
+ * negative value and destroy all mutex and unregister all callbacks.
+ * av_lockmgr_register is not thread-safe, it must be called from a
+ * single thread before any calls which make use of locking are used.
+ *
+ * @param cb User defined callback. av_lockmgr_register invokes calls
+ *           to this callback and the previously registered callback.
+ *           The callback will be used to create more than one mutex
+ *           each of which must be backed by its own underlying locking
+ *           mechanism (i.e. do not use a single static object to
+ *           implement your lock manager). If cb is set to NULL the
+ *           lockmgr will be unregistered.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return  a static string identifying the codec; never NULL
+ */
+const char *avcodec_get_name(enum AVCodecID id);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ *         exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/bit_depth_template.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,98 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mathops.h"
+#include "rnd_avg.h"
+#include "libavutil/intreadwrite.h"
+
+#ifndef BIT_DEPTH
+#define BIT_DEPTH 8
+#endif
+
+#ifdef AVCODEC_BIT_DEPTH_TEMPLATE_C
+#   undef pixel
+#   undef pixel2
+#   undef pixel4
+#   undef dctcoef
+#   undef INIT_CLIP
+#   undef no_rnd_avg_pixel4
+#   undef rnd_avg_pixel4
+#   undef AV_RN2P
+#   undef AV_RN4P
+#   undef AV_RN4PA
+#   undef AV_WN2P
+#   undef AV_WN4P
+#   undef AV_WN4PA
+#   undef CLIP
+#   undef FUNC
+#   undef FUNCC
+#   undef av_clip_pixel
+#   undef PIXEL_SPLAT_X4
+#else
+#   define AVCODEC_BIT_DEPTH_TEMPLATE_C
+#endif
+
+#if defined(USE_VAR_BIT_DEPTH) || BIT_DEPTH > 8
+#   define pixel  uint16_t
+#   define pixel2 uint32_t
+#   define pixel4 uint64_t
+#   define dctcoef int32_t
+
+#   define INIT_CLIP
+#   define no_rnd_avg_pixel4 no_rnd_avg64
+#   define    rnd_avg_pixel4    rnd_avg64
+#   define AV_RN2P  AV_RN32
+#   define AV_RN4P  AV_RN64
+#   define AV_RN4PA AV_RN64A
+#   define AV_WN2P  AV_WN32
+#   define AV_WN4P  AV_WN64
+#   define AV_WN4PA AV_WN64A
+#   define PIXEL_SPLAT_X4(x) ((x)*0x0001000100010001ULL)
+
+#   define av_clip_pixel(a) av_clip_uintp2(a, BIT_DEPTH)
+#   define CLIP(a)          av_clip_uintp2(a, BIT_DEPTH)
+#else
+#   define pixel  uint8_t
+#   define pixel2 uint16_t
+#   define pixel4 uint32_t
+#   define dctcoef int16_t
+
+#   define INIT_CLIP
+#   define no_rnd_avg_pixel4 no_rnd_avg32
+#   define    rnd_avg_pixel4    rnd_avg32
+#   define AV_RN2P  AV_RN16
+#   define AV_RN4P  AV_RN32
+#   define AV_RN4PA AV_RN32A
+#   define AV_WN2P  AV_WN16
+#   define AV_WN4P  AV_WN32
+#   define AV_WN4PA AV_WN32A
+#   define PIXEL_SPLAT_X4(x) ((x)*0x01010101U)
+
+#   define av_clip_pixel(a) av_clip_uint8(a)
+#   define CLIP(a) av_clip_uint8(a)
+#endif
+
+#define FUNC3(a, b, c)  a ## _ ## b ## c
+#define FUNC2(a, b, c)  FUNC3(a, b, c)
+#ifdef USE_VAR_BIT_DEPTH
+#define FUNC(a)  FUNC2(a, var,)
+#define FUNCC(a) FUNC2(a, var, _c)
+#else
+#define FUNC(a)  FUNC2(a, BIT_DEPTH,)
+#define FUNCC(a) FUNC2(a, BIT_DEPTH, _c)
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/bswapdsp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,32 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSWAP_BUF_H
+#define AVCODEC_BSWAP_BUF_H
+
+#include <stdint.h>
+
+typedef struct BswapDSPContext {
+    void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w);
+    void (*bswap16_buf)(uint16_t *dst, const uint16_t *src, int len);
+} BswapDSPContext;
+
+void ff_bswapdsp_init(BswapDSPContext *c);
+void ff_bswapdsp_init_x86(BswapDSPContext *c);
+
+#endif /* AVCODEC_BSWAP_BUF_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/bytestream.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,374 @@
+/*
+ * Bytestream functions
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BYTESTREAM_H
+#define AVCODEC_BYTESTREAM_H
+
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+
+typedef struct GetByteContext {
+    const uint8_t *buffer, *buffer_end, *buffer_start;
+} GetByteContext;
+
+typedef struct PutByteContext {
+    uint8_t *buffer, *buffer_end, *buffer_start;
+    int eof;
+} PutByteContext;
+
+#define DEF(type, name, bytes, read, write)                                  \
+static av_always_inline type bytestream_get_ ## name(const uint8_t **b)        \
+{                                                                              \
+    (*b) += bytes;                                                             \
+    return read(*b - bytes);                                                   \
+}                                                                              \
+static av_always_inline void bytestream_put_ ## name(uint8_t **b,              \
+                                                     const type value)         \
+{                                                                              \
+    write(*b, value);                                                          \
+    (*b) += bytes;                                                             \
+}                                                                              \
+static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p,  \
+                                                           const type value)   \
+{                                                                              \
+    bytestream_put_ ## name(&p->buffer, value);                                \
+}                                                                              \
+static av_always_inline void bytestream2_put_ ## name(PutByteContext *p,       \
+                                                      const type value)        \
+{                                                                              \
+    if (!p->eof && (p->buffer_end - p->buffer >= bytes)) {                     \
+        write(p->buffer, value);                                               \
+        p->buffer += bytes;                                                    \
+    } else                                                                     \
+        p->eof = 1;                                                            \
+}                                                                              \
+static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)  \
+{                                                                              \
+    return bytestream_get_ ## name(&g->buffer);                                \
+}                                                                              \
+static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)       \
+{                                                                              \
+    if (g->buffer_end - g->buffer < bytes)                                     \
+        return 0;                                                              \
+    return bytestream2_get_ ## name ## u(g);                                   \
+}                                                                              \
+static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)      \
+{                                                                              \
+    if (g->buffer_end - g->buffer < bytes)                                     \
+        return 0;                                                              \
+    return read(g->buffer);                                                    \
+}
+
+DEF(uint64_t,     le64, 8, AV_RL64, AV_WL64)
+DEF(unsigned int, le32, 4, AV_RL32, AV_WL32)
+DEF(unsigned int, le24, 3, AV_RL24, AV_WL24)
+DEF(unsigned int, le16, 2, AV_RL16, AV_WL16)
+DEF(uint64_t,     be64, 8, AV_RB64, AV_WB64)
+DEF(unsigned int, be32, 4, AV_RB32, AV_WB32)
+DEF(unsigned int, be24, 3, AV_RB24, AV_WB24)
+DEF(unsigned int, be16, 2, AV_RB16, AV_WB16)
+DEF(unsigned int, byte, 1, AV_RB8 , AV_WB8)
+
+#if HAVE_BIGENDIAN
+#   define bytestream2_get_ne16  bytestream2_get_be16
+#   define bytestream2_get_ne24  bytestream2_get_be24
+#   define bytestream2_get_ne32  bytestream2_get_be32
+#   define bytestream2_get_ne64  bytestream2_get_be64
+#   define bytestream2_get_ne16u bytestream2_get_be16u
+#   define bytestream2_get_ne24u bytestream2_get_be24u
+#   define bytestream2_get_ne32u bytestream2_get_be32u
+#   define bytestream2_get_ne64u bytestream2_get_be64u
+#   define bytestream2_put_ne16  bytestream2_put_be16
+#   define bytestream2_put_ne24  bytestream2_put_be24
+#   define bytestream2_put_ne32  bytestream2_put_be32
+#   define bytestream2_put_ne64  bytestream2_put_be64
+#   define bytestream2_peek_ne16 bytestream2_peek_be16
+#   define bytestream2_peek_ne24 bytestream2_peek_be24
+#   define bytestream2_peek_ne32 bytestream2_peek_be32
+#   define bytestream2_peek_ne64 bytestream2_peek_be64
+#else
+#   define bytestream2_get_ne16  bytestream2_get_le16
+#   define bytestream2_get_ne24  bytestream2_get_le24
+#   define bytestream2_get_ne32  bytestream2_get_le32
+#   define bytestream2_get_ne64  bytestream2_get_le64
+#   define bytestream2_get_ne16u bytestream2_get_le16u
+#   define bytestream2_get_ne24u bytestream2_get_le24u
+#   define bytestream2_get_ne32u bytestream2_get_le32u
+#   define bytestream2_get_ne64u bytestream2_get_le64u
+#   define bytestream2_put_ne16  bytestream2_put_le16
+#   define bytestream2_put_ne24  bytestream2_put_le24
+#   define bytestream2_put_ne32  bytestream2_put_le32
+#   define bytestream2_put_ne64  bytestream2_put_le64
+#   define bytestream2_peek_ne16 bytestream2_peek_le16
+#   define bytestream2_peek_ne24 bytestream2_peek_le24
+#   define bytestream2_peek_ne32 bytestream2_peek_le32
+#   define bytestream2_peek_ne64 bytestream2_peek_le64
+#endif
+
+static av_always_inline void bytestream2_init(GetByteContext *g,
+                                              const uint8_t *buf,
+                                              int buf_size)
+{
+    av_assert0(buf_size >= 0);
+    g->buffer       = buf;
+    g->buffer_start = buf;
+    g->buffer_end   = buf + buf_size;
+}
+
+static av_always_inline void bytestream2_init_writer(PutByteContext *p,
+                                                     uint8_t *buf,
+                                                     int buf_size)
+{
+    av_assert0(buf_size >= 0);
+    p->buffer       = buf;
+    p->buffer_start = buf;
+    p->buffer_end   = buf + buf_size;
+    p->eof          = 0;
+}
+
+static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
+{
+    return g->buffer_end - g->buffer;
+}
+
+static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
+{
+    return p->buffer_end - p->buffer;
+}
+
+static av_always_inline void bytestream2_skip(GetByteContext *g,
+                                              unsigned int size)
+{
+    g->buffer += FFMIN(g->buffer_end - g->buffer, size);
+}
+
+static av_always_inline void bytestream2_skipu(GetByteContext *g,
+                                               unsigned int size)
+{
+    g->buffer += size;
+}
+
+static av_always_inline void bytestream2_skip_p(PutByteContext *p,
+                                                unsigned int size)
+{
+    int size2;
+    if (p->eof)
+        return;
+    size2 = FFMIN(p->buffer_end - p->buffer, size);
+    if (size2 != size)
+        p->eof = 1;
+    p->buffer += size2;
+}
+
+static av_always_inline int bytestream2_tell(GetByteContext *g)
+{
+    return (int)(g->buffer - g->buffer_start);
+}
+
+static av_always_inline int bytestream2_tell_p(PutByteContext *p)
+{
+    return (int)(p->buffer - p->buffer_start);
+}
+
+static av_always_inline int bytestream2_size(GetByteContext *g)
+{
+    return (int)(g->buffer_end - g->buffer_start);
+}
+
+static av_always_inline int bytestream2_size_p(PutByteContext *p)
+{
+    return (int)(p->buffer_end - p->buffer_start);
+}
+
+static av_always_inline int bytestream2_seek(GetByteContext *g,
+                                             int offset,
+                                             int whence)
+{
+    switch (whence) {
+    case SEEK_CUR:
+        offset     = av_clip(offset, -(g->buffer - g->buffer_start),
+                             g->buffer_end - g->buffer);
+        g->buffer += offset;
+        break;
+    case SEEK_END:
+        offset    = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
+        g->buffer = g->buffer_end + offset;
+        break;
+    case SEEK_SET:
+        offset    = av_clip(offset, 0, g->buffer_end - g->buffer_start);
+        g->buffer = g->buffer_start + offset;
+        break;
+    default:
+        return AVERROR(EINVAL);
+    }
+    return bytestream2_tell(g);
+}
+
+static av_always_inline int bytestream2_seek_p(PutByteContext *p,
+                                               int offset,
+                                               int whence)
+{
+    p->eof = 0;
+    switch (whence) {
+    case SEEK_CUR:
+        if (p->buffer_end - p->buffer < offset)
+            p->eof = 1;
+        offset     = av_clip(offset, -(p->buffer - p->buffer_start),
+                             p->buffer_end - p->buffer);
+        p->buffer += offset;
+        break;
+    case SEEK_END:
+        if (offset > 0)
+            p->eof = 1;
+        offset    = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
+        p->buffer = p->buffer_end + offset;
+        break;
+    case SEEK_SET:
+        if (p->buffer_end - p->buffer_start < offset)
+            p->eof = 1;
+        offset    = av_clip(offset, 0, p->buffer_end - p->buffer_start);
+        p->buffer = p->buffer_start + offset;
+        break;
+    default:
+        return AVERROR(EINVAL);
+    }
+    return bytestream2_tell_p(p);
+}
+
+static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
+                                                            uint8_t *dst,
+                                                            unsigned int size)
+{
+    int size2 = FFMIN(g->buffer_end - g->buffer, size);
+    memcpy(dst, g->buffer, size2);
+    g->buffer += size2;
+    return size2;
+}
+
+static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
+                                                             uint8_t *dst,
+                                                             unsigned int size)
+{
+    memcpy(dst, g->buffer, size);
+    g->buffer += size;
+    return size;
+}
+
+static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
+                                                            const uint8_t *src,
+                                                            unsigned int size)
+{
+    int size2;
+    if (p->eof)
+        return 0;
+    size2 = FFMIN(p->buffer_end - p->buffer, size);
+    if (size2 != size)
+        p->eof = 1;
+    memcpy(p->buffer, src, size2);
+    p->buffer += size2;
+    return size2;
+}
+
+static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
+                                                             const uint8_t *src,
+                                                             unsigned int size)
+{
+    memcpy(p->buffer, src, size);
+    p->buffer += size;
+    return size;
+}
+
+static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
+                                                    const uint8_t c,
+                                                    unsigned int size)
+{
+    int size2;
+    if (p->eof)
+        return;
+    size2 = FFMIN(p->buffer_end - p->buffer, size);
+    if (size2 != size)
+        p->eof = 1;
+    memset(p->buffer, c, size2);
+    p->buffer += size2;
+}
+
+static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
+                                                     const uint8_t c,
+                                                     unsigned int size)
+{
+    memset(p->buffer, c, size);
+    p->buffer += size;
+}
+
+static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
+{
+    return p->eof;
+}
+
+static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
+                                                              GetByteContext *g,
+                                                              unsigned int size)
+{
+    memcpy(p->buffer, g->buffer, size);
+    p->buffer += size;
+    g->buffer += size;
+    return size;
+}
+
+static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
+                                                             GetByteContext *g,
+                                                             unsigned int size)
+{
+    int size2;
+
+    if (p->eof)
+        return 0;
+    size  = FFMIN(g->buffer_end - g->buffer, size);
+    size2 = FFMIN(p->buffer_end - p->buffer, size);
+    if (size2 != size)
+        p->eof = 1;
+
+    return bytestream2_copy_bufferu(p, g, size2);
+}
+
+static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
+                                                           uint8_t *dst,
+                                                           unsigned int size)
+{
+    memcpy(dst, *b, size);
+    (*b) += size;
+    return size;
+}
+
+static av_always_inline void bytestream_put_buffer(uint8_t **b,
+                                                   const uint8_t *src,
+                                                   unsigned int size)
+{
+    memcpy(*b, src, size);
+    (*b) += size;
+}
+
+#endif /* AVCODEC_BYTESTREAM_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/cabac.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,227 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Context Adaptive Binary Arithmetic Coder.
+ */
+
+#include <string.h>
+
+#include "libavutil/common.h"
+#include "libavutil/timer.h"
+#include "get_bits.h"
+#include "cabac.h"
+#include "cabac_functions.h"
+
+#include "cabac_tablegen.h"
+
+/**
+ *
+ * @param buf_size size of buf in bits
+ */
+void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size){
+    init_put_bits(&c->pb, buf, buf_size);
+
+    c->low= 0;
+    c->range= 0x1FE;
+    c->outstanding_count= 0;
+    c->pb.bit_left++; //avoids firstBitFlag
+}
+
+/**
+ *
+ * @param buf_size size of buf in bits
+ */
+void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
+    c->bytestream_start=
+    c->bytestream= buf;
+    c->bytestream_end= buf + buf_size;
+
+#if CABAC_BITS == 16
+    c->low =  (*c->bytestream++)<<18;
+    c->low+=  (*c->bytestream++)<<10;
+#else
+    c->low =  (*c->bytestream++)<<10;
+#endif
+    c->low+= ((*c->bytestream++)<<2) + 2;
+    c->range= 0x1FE;
+}
+
+void ff_init_cabac_states(void)
+{
+    static int initialized = 0;
+
+    if (initialized)
+        return;
+
+    cabac_tableinit();
+
+    initialized = 1;
+}
+
+#ifdef TEST
+#define SIZE 10240
+
+#include "libavutil/lfg.h"
+#include "avcodec.h"
+
+static inline void put_cabac_bit(CABACContext *c, int b){
+    put_bits(&c->pb, 1, b);
+    for(;c->outstanding_count; c->outstanding_count--){
+        put_bits(&c->pb, 1, 1-b);
+    }
+}
+
+static inline void renorm_cabac_encoder(CABACContext *c){
+    while(c->range < 0x100){
+        //FIXME optimize
+        if(c->low<0x100){
+            put_cabac_bit(c, 0);
+        }else if(c->low<0x200){
+            c->outstanding_count++;
+            c->low -= 0x100;
+        }else{
+            put_cabac_bit(c, 1);
+            c->low -= 0x200;
+        }
+
+        c->range+= c->range;
+        c->low += c->low;
+    }
+}
+
+static void put_cabac(CABACContext *c, uint8_t * const state, int bit){
+    int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state];
+
+    if(bit == ((*state)&1)){
+        c->range -= RangeLPS;
+        *state    = ff_h264_mlps_state[128 + *state];
+    }else{
+        c->low += c->range - RangeLPS;
+        c->range = RangeLPS;
+        *state= ff_h264_mlps_state[127 - *state];
+    }
+
+    renorm_cabac_encoder(c);
+}
+
+/**
+ * @param bit 0 -> write zero bit, !=0 write one bit
+ */
+static void put_cabac_bypass(CABACContext *c, int bit){
+    c->low += c->low;
+
+    if(bit){
+        c->low += c->range;
+    }
+//FIXME optimize
+    if(c->low<0x200){
+        put_cabac_bit(c, 0);
+    }else if(c->low<0x400){
+        c->outstanding_count++;
+        c->low -= 0x200;
+    }else{
+        put_cabac_bit(c, 1);
+        c->low -= 0x400;
+    }
+}
+
+/**
+ *
+ * @return the number of bytes written
+ */
+static int put_cabac_terminate(CABACContext *c, int bit){
+    c->range -= 2;
+
+    if(!bit){
+        renorm_cabac_encoder(c);
+    }else{
+        c->low += c->range;
+        c->range= 2;
+
+        renorm_cabac_encoder(c);
+
+        av_assert0(c->low <= 0x1FF);
+        put_cabac_bit(c, c->low>>9);
+        put_bits(&c->pb, 2, ((c->low>>7)&3)|1);
+
+        flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong
+    }
+
+    return (put_bits_count(&c->pb)+7)>>3;
+}
+
+int main(void){
+    CABACContext c;
+    uint8_t b[9*SIZE];
+    uint8_t r[9*SIZE];
+    int i;
+    uint8_t state[10]= {0};
+    AVLFG prng;
+
+    av_lfg_init(&prng, 1);
+    ff_init_cabac_encoder(&c, b, SIZE);
+    ff_init_cabac_states();
+
+    for(i=0; i<SIZE; i++){
+        if(2*i<SIZE) r[i] = av_lfg_get(&prng) % 7;
+        else         r[i] = (i>>8)&1;
+    }
+
+    for(i=0; i<SIZE; i++){
+START_TIMER
+        put_cabac_bypass(&c, r[i]&1);
+STOP_TIMER("put_cabac_bypass")
+    }
+
+    for(i=0; i<SIZE; i++){
+START_TIMER
+        put_cabac(&c, state, r[i]&1);
+STOP_TIMER("put_cabac")
+    }
+
+    put_cabac_terminate(&c, 1);
+
+    ff_init_cabac_decoder(&c, b, SIZE);
+
+    memset(state, 0, sizeof(state));
+
+    for(i=0; i<SIZE; i++){
+START_TIMER
+        if( (r[i]&1) != get_cabac_bypass(&c) )
+            av_log(NULL, AV_LOG_ERROR, "CABAC bypass failure at %d\n", i);
+STOP_TIMER("get_cabac_bypass")
+    }
+
+    for(i=0; i<SIZE; i++){
+START_TIMER
+        if( (r[i]&1) != get_cabac_noinline(&c, state) )
+            av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
+STOP_TIMER("get_cabac")
+    }
+    if(!get_cabac_terminate(&c))
+        av_log(NULL, AV_LOG_ERROR, "where's the Terminator?\n");
+
+    return 0;
+}
+
+#endif /* TEST */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/cabac.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,62 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Context Adaptive Binary Arithmetic Coder.
+ */
+
+#ifndef AVCODEC_CABAC_H
+#define AVCODEC_CABAC_H
+
+#include <stdint.h>
+
+#include "put_bits.h"
+
+#if CONFIG_HARDCODED_TABLES
+#define CABAC_TABLE_CONST const
+#else
+#define CABAC_TABLE_CONST
+#endif
+extern CABAC_TABLE_CONST uint8_t ff_h264_cabac_tables[512 + 4*2*64 + 4*64 + 63];
+#define H264_NORM_SHIFT_OFFSET 0
+#define H264_LPS_RANGE_OFFSET 512
+#define H264_MLPS_STATE_OFFSET 1024
+#define H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET 1280
+
+#define CABAC_BITS 16
+#define CABAC_MASK ((1<<CABAC_BITS)-1)
+
+typedef struct CABACContext{
+    int low;
+    int range;
+    int outstanding_count;
+    const uint8_t *bytestream_start;
+    const uint8_t *bytestream;
+    const uint8_t *bytestream_end;
+    PutBitContext pb;
+}CABACContext;
+
+void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
+void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
+void ff_init_cabac_states(void);
+
+#endif /* AVCODEC_CABAC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/cabac_functions.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,198 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Context Adaptive Binary Arithmetic Coder inline functions
+ */
+
+#ifndef AVCODEC_CABAC_FUNCTIONS_H
+#define AVCODEC_CABAC_FUNCTIONS_H
+
+#include <stdint.h>
+
+#include "cabac.h"
+#include "config.h"
+
+#ifndef UNCHECKED_BITSTREAM_READER
+#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
+#endif
+
+#if ARCH_AARCH64
+#   include "aarch64/cabac.h"
+#endif
+#if ARCH_ARM
+#   include "arm/cabac.h"
+#endif
+#if ARCH_X86
+#   include "x86/cabac.h"
+#endif
+
+static CABAC_TABLE_CONST uint8_t * const ff_h264_norm_shift = ff_h264_cabac_tables + H264_NORM_SHIFT_OFFSET;
+static CABAC_TABLE_CONST uint8_t * const ff_h264_lps_range = ff_h264_cabac_tables + H264_LPS_RANGE_OFFSET;
+static CABAC_TABLE_CONST uint8_t * const ff_h264_mlps_state = ff_h264_cabac_tables + H264_MLPS_STATE_OFFSET;
+static CABAC_TABLE_CONST uint8_t * const ff_h264_last_coeff_flag_offset_8x8 = ff_h264_cabac_tables + H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET;
+
+static void refill(CABACContext *c){
+#if CABAC_BITS == 16
+        c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+        c->low+= c->bytestream[0]<<1;
+#endif
+    c->low -= CABAC_MASK;
+#if !UNCHECKED_BITSTREAM_READER
+    if (c->bytestream < c->bytestream_end)
+#endif
+        c->bytestream += CABAC_BITS / 8;
+}
+
+static inline void renorm_cabac_decoder_once(CABACContext *c){
+    int shift= (uint32_t)(c->range - 0x100)>>31;
+    c->range<<= shift;
+    c->low  <<= shift;
+    if(!(c->low & CABAC_MASK))
+        refill(c);
+}
+
+#ifndef get_cabac_inline
+static void refill2(CABACContext *c){
+    int i, x;
+
+    x= c->low ^ (c->low-1);
+    i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
+
+    x= -CABAC_MASK;
+
+#if CABAC_BITS == 16
+        x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+        x+= c->bytestream[0]<<1;
+#endif
+
+    c->low += x<<i;
+#if !UNCHECKED_BITSTREAM_READER
+    if (c->bytestream < c->bytestream_end)
+#endif
+        c->bytestream += CABAC_BITS/8;
+}
+
+static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){
+    int s = *state;
+    int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s];
+    int bit, lps_mask;
+
+    c->range -= RangeLPS;
+    lps_mask= ((c->range<<(CABAC_BITS+1)) - c->low)>>31;
+
+    c->low -= (c->range<<(CABAC_BITS+1)) & lps_mask;
+    c->range += (RangeLPS - c->range) & lps_mask;
+
+    s^=lps_mask;
+    *state= (ff_h264_mlps_state+128)[s];
+    bit= s&1;
+
+    lps_mask= ff_h264_norm_shift[c->range];
+    c->range<<= lps_mask;
+    c->low  <<= lps_mask;
+    if(!(c->low & CABAC_MASK))
+        refill2(c);
+    return bit;
+}
+#endif
+
+static int av_noinline av_unused get_cabac_noinline(CABACContext *c, uint8_t * const state){
+    return get_cabac_inline(c,state);
+}
+
+static int av_unused get_cabac(CABACContext *c, uint8_t * const state){
+    return get_cabac_inline(c,state);
+}
+
+#ifndef get_cabac_bypass
+static int av_unused get_cabac_bypass(CABACContext *c){
+    int range;
+    c->low += c->low;
+
+    if(!(c->low & CABAC_MASK))
+        refill(c);
+
+    range= c->range<<(CABAC_BITS+1);
+    if(c->low < range){
+        return 0;
+    }else{
+        c->low -= range;
+        return 1;
+    }
+}
+#endif
+
+#ifndef get_cabac_bypass_sign
+static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
+    int range, mask;
+    c->low += c->low;
+
+    if(!(c->low & CABAC_MASK))
+        refill(c);
+
+    range= c->range<<(CABAC_BITS+1);
+    c->low -= range;
+    mask= c->low >> 31;
+    range &= mask;
+    c->low += range;
+    return (val^mask)-mask;
+}
+#endif
+
+/**
+ *
+ * @return the number of bytes read or 0 if no end
+ */
+static int av_unused get_cabac_terminate(CABACContext *c){
+    c->range -= 2;
+    if(c->low < c->range<<(CABAC_BITS+1)){
+        renorm_cabac_decoder_once(c);
+        return 0;
+    }else{
+        return c->bytestream - c->bytestream_start;
+    }
+}
+
+/**
+ * Skip @p n bytes and reset the decoder.
+ * @return the address of the first skipped byte or NULL if there's less than @p n bytes left
+ */
+static av_unused const uint8_t* skip_bytes(CABACContext *c, int n) {
+    const uint8_t *ptr = c->bytestream;
+
+    if (c->low & 0x1)
+        ptr--;
+#if CABAC_BITS == 16
+    if (c->low & 0x1FF)
+        ptr--;
+#endif
+    if ((int) (c->bytestream_end - ptr) < n)
+        return NULL;
+    ff_init_cabac_decoder(c, ptr + n, c->bytestream_end - ptr - n);
+
+    return ptr;
+}
+
+#endif /* AVCODEC_CABAC_FUNCTIONS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/cabac_tablegen.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,108 @@
+/*
+ * Header file for hardcoded CABAC table
+ *
+ * Copyright (c) 2014 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CABAC_TABLEGEN_H
+#define AVCODEC_CABAC_TABLEGEN_H
+
+#if CONFIG_HARDCODED_TABLES
+#define cabac_tableinit()
+#include "libavcodec/cabac_tables.h"
+#else
+uint8_t ff_h264_cabac_tables[512 + 4*2*64 + 4*64 + 63];
+
+static const uint8_t lps_range[64][4]= {
+{128,176,208,240}, {128,167,197,227}, {128,158,187,216}, {123,150,178,205},
+{116,142,169,195}, {111,135,160,185}, {105,128,152,175}, {100,122,144,166},
+{ 95,116,137,158}, { 90,110,130,150}, { 85,104,123,142}, { 81, 99,117,135},
+{ 77, 94,111,128}, { 73, 89,105,122}, { 69, 85,100,116}, { 66, 80, 95,110},
+{ 62, 76, 90,104}, { 59, 72, 86, 99}, { 56, 69, 81, 94}, { 53, 65, 77, 89},
+{ 51, 62, 73, 85}, { 48, 59, 69, 80}, { 46, 56, 66, 76}, { 43, 53, 63, 72},
+{ 41, 50, 59, 69}, { 39, 48, 56, 65}, { 37, 45, 54, 62}, { 35, 43, 51, 59},
+{ 33, 41, 48, 56}, { 32, 39, 46, 53}, { 30, 37, 43, 50}, { 29, 35, 41, 48},
+{ 27, 33, 39, 45}, { 26, 31, 37, 43}, { 24, 30, 35, 41}, { 23, 28, 33, 39},
+{ 22, 27, 32, 37}, { 21, 26, 30, 35}, { 20, 24, 29, 33}, { 19, 23, 27, 31},
+{ 18, 22, 26, 30}, { 17, 21, 25, 28}, { 16, 20, 23, 27}, { 15, 19, 22, 25},
+{ 14, 18, 21, 24}, { 14, 17, 20, 23}, { 13, 16, 19, 22}, { 12, 15, 18, 21},
+{ 12, 14, 17, 20}, { 11, 14, 16, 19}, { 11, 13, 15, 18}, { 10, 12, 15, 17},
+{ 10, 12, 14, 16}, {  9, 11, 13, 15}, {  9, 11, 12, 14}, {  8, 10, 12, 14},
+{  8,  9, 11, 13}, {  7,  9, 11, 12}, {  7,  9, 10, 12}, {  7,  8, 10, 11},
+{  6,  8,  9, 11}, {  6,  7,  9, 10}, {  6,  7,  8,  9}, {  2,  2,  2,  2},
+};
+
+static const uint8_t mps_state[64]= {
+  1, 2, 3, 4, 5, 6, 7, 8,
+  9,10,11,12,13,14,15,16,
+ 17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,
+ 33,34,35,36,37,38,39,40,
+ 41,42,43,44,45,46,47,48,
+ 49,50,51,52,53,54,55,56,
+ 57,58,59,60,61,62,62,63,
+};
+
+static const uint8_t lps_state[64]= {
+  0, 0, 1, 2, 2, 4, 4, 5,
+  6, 7, 8, 9, 9,11,11,12,
+ 13,13,15,15,16,16,18,18,
+ 19,19,21,21,22,22,23,24,
+ 24,25,26,26,27,27,28,29,
+ 29,30,30,30,31,32,32,33,
+ 33,33,34,34,35,35,35,36,
+ 36,36,37,37,37,38,38,63,
+};
+
+static const uint8_t last_coeff_flag_offset_8x8[63] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
+};
+
+static av_cold void cabac_tableinit(void)
+{
+    int i, j;
+    for (i = 0; i < 512; i++)
+        ff_h264_norm_shift[i] = i ? 8 - av_log2(i) : 9;
+
+    for(i=0; i<64; i++){
+        for(j=0; j<4; j++){ //FIXME check if this is worth the 1 shift we save
+            ff_h264_lps_range[j*2*64+2*i+0]=
+            ff_h264_lps_range[j*2*64+2*i+1]= lps_range[i][j];
+        }
+        ff_h264_mlps_state[128 + 2 * i + 0] = 2 * mps_state[i] + 0;
+        ff_h264_mlps_state[128 + 2 * i + 1] = 2 * mps_state[i] + 1;
+
+        if( i ){
+            ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0;
+            ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1;
+        }else{
+            ff_h264_mlps_state[128-2*i-1]= 1;
+            ff_h264_mlps_state[128-2*i-2]= 0;
+        }
+    }
+    for(i=0; i< 63; i++){
+      ff_h264_last_coeff_flag_offset_8x8[i] = last_coeff_flag_offset_8x8[i];
+    }
+}
+#endif /* CONFIG_HARDCODED_TABLES */
+
+#endif /* AVCODEC_CABAC_TABLEGEN_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/get_bits.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,707 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bitstream reader API header.
+ */
+
+#ifndef AVCODEC_GET_BITS_H
+#define AVCODEC_GET_BITS_H
+
+#include <stdint.h>
+
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/log.h"
+#include "libavutil/avassert.h"
+#include "mathops.h"
+
+/*
+ * Safe bitstream reading:
+ * optionally, the get_bits API can check to ensure that we
+ * don't read past input buffer boundaries. This is protected
+ * with CONFIG_SAFE_BITSTREAM_READER at the global level, and
+ * then below that with UNCHECKED_BITSTREAM_READER at the per-
+ * decoder level. This means that decoders that check internally
+ * can "#define UNCHECKED_BITSTREAM_READER 1" to disable
+ * overread checks.
+ * Boundary checking causes a minor performance penalty so for
+ * applications that won't want/need this, it can be disabled
+ * globally using "#define CONFIG_SAFE_BITSTREAM_READER 0".
+ */
+#ifndef UNCHECKED_BITSTREAM_READER
+#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
+#endif
+
+typedef struct GetBitContext {
+    const uint8_t *buffer, *buffer_end;
+    int index;
+    int size_in_bits;
+    int size_in_bits_plus8;
+} GetBitContext;
+
+#define VLC_TYPE int16_t
+
+typedef struct VLC {
+    int bits;
+    VLC_TYPE (*table)[2]; ///< code, bits
+    int table_size, table_allocated;
+} VLC;
+
+typedef struct RL_VLC_ELEM {
+    int16_t level;
+    int8_t len;
+    uint8_t run;
+} RL_VLC_ELEM;
+
+/* Bitstream reader API docs:
+ * name
+ *   arbitrary name which is used as prefix for the internal variables
+ *
+ * gb
+ *   getbitcontext
+ *
+ * OPEN_READER(name, gb)
+ *   load gb into local variables
+ *
+ * CLOSE_READER(name, gb)
+ *   store local vars in gb
+ *
+ * UPDATE_CACHE(name, gb)
+ *   Refill the internal cache from the bitstream.
+ *   After this call at least MIN_CACHE_BITS will be available.
+ *
+ * GET_CACHE(name, gb)
+ *   Will output the contents of the internal cache,
+ *   next bit is MSB of 32 or 64 bit (FIXME 64bit).
+ *
+ * SHOW_UBITS(name, gb, num)
+ *   Will return the next num bits.
+ *
+ * SHOW_SBITS(name, gb, num)
+ *   Will return the next num bits and do sign extension.
+ *
+ * SKIP_BITS(name, gb, num)
+ *   Will skip over the next num bits.
+ *   Note, this is equivalent to SKIP_CACHE; SKIP_COUNTER.
+ *
+ * SKIP_CACHE(name, gb, num)
+ *   Will remove the next num bits from the cache (note SKIP_COUNTER
+ *   MUST be called before UPDATE_CACHE / CLOSE_READER).
+ *
+ * SKIP_COUNTER(name, gb, num)
+ *   Will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS).
+ *
+ * LAST_SKIP_BITS(name, gb, num)
+ *   Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
+ *
+ * BITS_LEFT(name, gb)
+ *   Return the number of bits left
+ *
+ * For examples see get_bits, show_bits, skip_bits, get_vlc.
+ */
+
+#ifdef LONG_BITSTREAM_READER
+#   define MIN_CACHE_BITS 32
+#else
+#   define MIN_CACHE_BITS 25
+#endif
+
+#define OPEN_READER_NOSIZE(name, gb)            \
+    unsigned int name ## _index = (gb)->index;  \
+    unsigned int av_unused name ## _cache
+
+#if UNCHECKED_BITSTREAM_READER
+#define OPEN_READER(name, gb) OPEN_READER_NOSIZE(name, gb)
+
+#define BITS_AVAILABLE(name, gb) 1
+#else
+#define OPEN_READER(name, gb)                   \
+    OPEN_READER_NOSIZE(name, gb);               \
+    unsigned int name ## _size_plus8 = (gb)->size_in_bits_plus8
+
+#define BITS_AVAILABLE(name, gb) name ## _index < name ## _size_plus8
+#endif
+
+#define CLOSE_READER(name, gb) (gb)->index = name ## _index
+
+# ifdef LONG_BITSTREAM_READER
+
+# define UPDATE_CACHE_LE(name, gb) name ## _cache = \
+      AV_RL64((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7)
+
+# define UPDATE_CACHE_BE(name, gb) name ## _cache = \
+      AV_RB64((gb)->buffer + (name ## _index >> 3)) >> (32 - (name ## _index & 7))
+
+#else
+
+# define UPDATE_CACHE_LE(name, gb) name ## _cache = \
+      AV_RL32((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7)
+
+# define UPDATE_CACHE_BE(name, gb) name ## _cache = \
+      AV_RB32((gb)->buffer + (name ## _index >> 3)) << (name ## _index & 7)
+
+#endif
+
+
+#ifdef BITSTREAM_READER_LE
+
+# define UPDATE_CACHE(name, gb) UPDATE_CACHE_LE(name, gb)
+
+# define SKIP_CACHE(name, gb, num) name ## _cache >>= (num)
+
+#else
+
+# define UPDATE_CACHE(name, gb) UPDATE_CACHE_BE(name, gb)
+
+# define SKIP_CACHE(name, gb, num) name ## _cache <<= (num)
+
+#endif
+
+#if UNCHECKED_BITSTREAM_READER
+#   define SKIP_COUNTER(name, gb, num) name ## _index += (num)
+#else
+#   define SKIP_COUNTER(name, gb, num) \
+    name ## _index = FFMIN(name ## _size_plus8, name ## _index + (num))
+#endif
+
+#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index))
+
+#define SKIP_BITS(name, gb, num)                \
+    do {                                        \
+        SKIP_CACHE(name, gb, num);              \
+        SKIP_COUNTER(name, gb, num);            \
+    } while (0)
+
+#define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
+
+#define SHOW_UBITS_LE(name, gb, num) zero_extend(name ## _cache, num)
+#define SHOW_SBITS_LE(name, gb, num) sign_extend(name ## _cache, num)
+
+#define SHOW_UBITS_BE(name, gb, num) NEG_USR32(name ## _cache, num)
+#define SHOW_SBITS_BE(name, gb, num) NEG_SSR32(name ## _cache, num)
+
+#ifdef BITSTREAM_READER_LE
+#   define SHOW_UBITS(name, gb, num) SHOW_UBITS_LE(name, gb, num)
+#   define SHOW_SBITS(name, gb, num) SHOW_SBITS_LE(name, gb, num)
+#else
+#   define SHOW_UBITS(name, gb, num) SHOW_UBITS_BE(name, gb, num)
+#   define SHOW_SBITS(name, gb, num) SHOW_SBITS_BE(name, gb, num)
+#endif
+
+#define GET_CACHE(name, gb) ((uint32_t) name ## _cache)
+
+static inline int get_bits_count(const GetBitContext *s)
+{
+    return s->index;
+}
+
+static inline void skip_bits_long(GetBitContext *s, int n)
+{
+#if UNCHECKED_BITSTREAM_READER
+    s->index += n;
+#else
+    s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index);
+#endif
+}
+
+/**
+ * read mpeg1 dc style vlc (sign bit + mantissa with no MSB).
+ * if MSB not set it is negative
+ * @param n length in bits
+ */
+static inline int get_xbits(GetBitContext *s, int n)
+{
+    register int sign;
+    register int32_t cache;
+    OPEN_READER(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    cache = GET_CACHE(re, s);
+    sign  = ~cache >> 31;
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+    return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
+}
+
+static inline int get_sbits(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    tmp = SHOW_SBITS(re, s, n);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+    return tmp;
+}
+
+#ifdef CONFIG_SMALL
+unsigned int get_bits(GetBitContext *s, int n);
+unsigned int show_bits(GetBitContext *s, int n);
+void skip_bits(GetBitContext *s, int n);
+unsigned int get_bits1(GetBitContext *s);
+unsigned int show_bits1(GetBitContext *s);
+void skip_bits1(GetBitContext *s);
+unsigned int get_bits_long(GetBitContext *s, int n);
+
+#else
+/**
+ * Read 1-25 bits.
+ */
+static inline unsigned int get_bits(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    tmp = SHOW_UBITS(re, s, n);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+    return tmp;
+}
+
+/**
+ * Show 1-25 bits.
+ */
+static inline unsigned int show_bits(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER_NOSIZE(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    tmp = SHOW_UBITS(re, s, n);
+    return tmp;
+}
+
+static inline void skip_bits(GetBitContext *s, int n)
+{
+    OPEN_READER(re, s);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+}
+
+static inline unsigned int get_bits1(GetBitContext *s)
+{
+    unsigned int index = s->index;
+    uint8_t result     = s->buffer[index >> 3];
+#ifdef BITSTREAM_READER_LE
+    result >>= index & 7;
+    result  &= 1;
+#else
+    result <<= index & 7;
+    result >>= 8 - 1;
+#endif
+#if !UNCHECKED_BITSTREAM_READER
+    if (s->index < s->size_in_bits_plus8)
+#endif
+        index++;
+    s->index = index;
+
+    return result;
+}
+
+static inline unsigned int show_bits1(GetBitContext *s)
+{
+    return show_bits(s, 1);
+}
+
+static inline void skip_bits1(GetBitContext *s)
+{
+    skip_bits(s, 1);
+}
+
+/**
+ * Read 0-32 bits.
+ */
+static inline unsigned int get_bits_long(GetBitContext *s, int n)
+{
+    if (!n) {
+        return 0;
+    } else if (n <= MIN_CACHE_BITS) {
+        return get_bits(s, n);
+    } else {
+#ifdef BITSTREAM_READER_LE
+        unsigned ret = get_bits(s, 16);
+        return ret | (get_bits(s, n - 16) << 16);
+#else
+        unsigned ret = get_bits(s, 16) << (n - 16);
+        return ret | get_bits(s, n - 16);
+#endif
+    }
+}
+#endif /* !CONFIG_SMALL */
+
+static inline unsigned int get_bits_le(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE_LE(re, s);
+    tmp = SHOW_UBITS_LE(re, s, n);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+    return tmp;
+}
+
+/**
+ * Read 0-64 bits.
+ */
+static inline uint64_t get_bits64(GetBitContext *s, int n)
+{
+    if (n <= 32) {
+        return get_bits_long(s, n);
+    } else {
+#ifdef BITSTREAM_READER_LE
+        uint64_t ret = get_bits_long(s, 32);
+        return ret | (uint64_t) get_bits_long(s, n - 32) << 32;
+#else
+        uint64_t ret = (uint64_t) get_bits_long(s, n - 32) << 32;
+        return ret | get_bits_long(s, 32);
+#endif
+    }
+}
+
+/**
+ * Read 0-32 bits as a signed integer.
+ */
+static inline int get_sbits_long(GetBitContext *s, int n)
+{
+    return sign_extend(get_bits_long(s, n), n);
+}
+
+/**
+ * Show 0-32 bits.
+ */
+static inline unsigned int show_bits_long(GetBitContext *s, int n)
+{
+    if (n <= MIN_CACHE_BITS) {
+        return show_bits(s, n);
+    } else {
+        GetBitContext gb = *s;
+        return get_bits_long(&gb, n);
+    }
+}
+
+static inline int check_marker(GetBitContext *s, const char *msg)
+{
+    int bit = get_bits1(s);
+    if (!bit)
+        av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg);
+
+    return bit;
+}
+
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
+ *        larger than the actual read bits because some optimized bitstream
+ *        readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
+                                int bit_size)
+{
+    int buffer_size;
+    int ret = 0;
+
+    if (bit_size >= INT_MAX - 7 || bit_size < 0 || !buffer) {
+        bit_size    = 0;
+        buffer      = NULL;
+        ret         = AVERROR_INVALIDDATA;
+    }
+
+    buffer_size = (bit_size + 7) >> 3;
+
+    s->buffer             = buffer;
+    s->size_in_bits       = bit_size;
+    s->size_in_bits_plus8 = bit_size + 8;
+    s->buffer_end         = buffer + buffer_size;
+    s->index              = 0;
+
+    return ret;
+}
+
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
+ *        larger than the actual read bits because some optimized bitstream
+ *        readers read 32 or 64 bit at once and could read over the end
+ * @param byte_size the size of the buffer in bytes
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
+                                 int byte_size)
+{
+    if (byte_size > INT_MAX / 8 || byte_size < 0)
+        byte_size = -1;
+    return init_get_bits(s, buffer, byte_size * 8);
+}
+
+static inline const uint8_t *align_get_bits(GetBitContext *s)
+{
+    int n = -get_bits_count(s) & 7;
+    if (n)
+        skip_bits(s, n);
+    return s->buffer + (s->index >> 3);
+}
+
+#define init_vlc(vlc, nb_bits, nb_codes,                \
+                 bits, bits_wrap, bits_size,            \
+                 codes, codes_wrap, codes_size,         \
+                 flags)                                 \
+    ff_init_vlc_sparse(vlc, nb_bits, nb_codes,          \
+                       bits, bits_wrap, bits_size,      \
+                       codes, codes_wrap, codes_size,   \
+                       NULL, 0, 0, flags)
+
+int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+                       const void *bits, int bits_wrap, int bits_size,
+                       const void *codes, int codes_wrap, int codes_size,
+                       const void *symbols, int symbols_wrap, int symbols_size,
+                       int flags);
+void ff_free_vlc(VLC *vlc);
+
+#define INIT_VLC_LE             2
+#define INIT_VLC_USE_NEW_STATIC 4
+
+#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size)       \
+    do {                                                                   \
+        static VLC_TYPE table[static_size][2];                             \
+        (vlc)->table           = table;                                    \
+        (vlc)->table_allocated = static_size;                              \
+        init_vlc(vlc, bits, a, b, c, d, e, f, g, INIT_VLC_USE_NEW_STATIC); \
+    } while (0)
+
+/**
+ * If the vlc code is invalid and max_depth=1, then no bits will be removed.
+ * If the vlc code is invalid and max_depth>1, then the number of bits removed
+ * is undefined.
+ */
+#define GET_VLC(code, name, gb, table, bits, max_depth)         \
+    do {                                                        \
+        int n, nb_bits;                                         \
+        unsigned int index;                                     \
+                                                                \
+        index = SHOW_UBITS(name, gb, bits);                     \
+        code  = table[index][0];                                \
+        n     = table[index][1];                                \
+                                                                \
+        if (max_depth > 1 && n < 0) {                           \
+            LAST_SKIP_BITS(name, gb, bits);                     \
+            UPDATE_CACHE(name, gb);                             \
+                                                                \
+            nb_bits = -n;                                       \
+                                                                \
+            index = SHOW_UBITS(name, gb, nb_bits) + code;       \
+            code  = table[index][0];                            \
+            n     = table[index][1];                            \
+            if (max_depth > 2 && n < 0) {                       \
+                LAST_SKIP_BITS(name, gb, nb_bits);              \
+                UPDATE_CACHE(name, gb);                         \
+                                                                \
+                nb_bits = -n;                                   \
+                                                                \
+                index = SHOW_UBITS(name, gb, nb_bits) + code;   \
+                code  = table[index][0];                        \
+                n     = table[index][1];                        \
+            }                                                   \
+        }                                                       \
+        SKIP_BITS(name, gb, n);                                 \
+    } while (0)
+
+#define GET_RL_VLC_INTERNAL(level, run, name, gb, table, bits,  \
+                   max_depth, need_update)                      \
+    do {                                                        \
+        int n, nb_bits;                                         \
+        unsigned int index;                                     \
+                                                                \
+        index = SHOW_UBITS(name, gb, bits);                     \
+        level = table[index].level;                             \
+        n     = table[index].len;                               \
+                                                                \
+        if (max_depth > 1 && n < 0) {                           \
+            SKIP_BITS(name, gb, bits);                          \
+            if (need_update) {                                  \
+                UPDATE_CACHE(name, gb);                         \
+            }                                                   \
+                                                                \
+            nb_bits = -n;                                       \
+                                                                \
+            index = SHOW_UBITS(name, gb, nb_bits) + level;      \
+            level = table[index].level;                         \
+            n     = table[index].len;                           \
+        }                                                       \
+        run = table[index].run;                                 \
+        SKIP_BITS(name, gb, n);                                 \
+    } while (0)
+
+/**
+ * Parse a vlc code.
+ * @param bits is the number of bits which will be read at once, must be
+ *             identical to nb_bits in init_vlc()
+ * @param max_depth is the number of times bits bits must be read to completely
+ *                  read the longest vlc code
+ *                  = (max_vlc_length + bits - 1) / bits
+ */
+static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
+                                     int bits, int max_depth)
+{
+    int code;
+
+    OPEN_READER(re, s);
+    UPDATE_CACHE(re, s);
+
+    GET_VLC(code, re, s, table, bits, max_depth);
+
+    CLOSE_READER(re, s);
+
+    return code;
+}
+
+static inline int decode012(GetBitContext *gb)
+{
+    int n;
+    n = get_bits1(gb);
+    if (n == 0)
+        return 0;
+    else
+        return get_bits1(gb) + 1;
+}
+
+static inline int decode210(GetBitContext *gb)
+{
+    if (get_bits1(gb))
+        return 0;
+    else
+        return 2 - get_bits1(gb);
+}
+
+static inline int get_bits_left(GetBitContext *gb)
+{
+    return gb->size_in_bits - get_bits_count(gb);
+}
+
+static inline int skip_1stop_8data_bits(GetBitContext *gb)
+{
+    if (get_bits_left(gb) <= 0)
+        return AVERROR_INVALIDDATA;
+
+    while (get_bits1(gb)) {
+        skip_bits(gb, 8);
+        if (get_bits_left(gb) <= 0)
+            return AVERROR_INVALIDDATA;
+    }
+
+    return 0;
+}
+
+//#define TRACE
+
+#ifdef TRACE
+static inline void print_bin(int bits, int n)
+{
+    int i;
+
+    for (i = n - 1; i >= 0; i--)
+        av_log(NULL, AV_LOG_DEBUG, "%d", (bits >> i) & 1);
+    for (i = n; i < 24; i++)
+        av_log(NULL, AV_LOG_DEBUG, " ");
+}
+
+static inline int get_bits_trace(GetBitContext *s, int n, const char *file,
+                                 const char *func, int line)
+{
+    int r = get_bits(s, n);
+
+    print_bin(r, n);
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d bit @%5d in %s %s:%d\n",
+           r, n, r, get_bits_count(s) - n, file, func, line);
+
+    return r;
+}
+
+static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2],
+                                int bits, int max_depth, const char *file,
+                                const char *func, int line)
+{
+    int show  = show_bits(s, 24);
+    int pos   = get_bits_count(s);
+    int r     = get_vlc2(s, table, bits, max_depth);
+    int len   = get_bits_count(s) - pos;
+    int bits2 = show >> (24 - len);
+
+    print_bin(bits2, len);
+
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d vlc @%5d in %s %s:%d\n",
+           bits2, len, r, pos, file, func, line);
+
+    return r;
+}
+
+#define GET_RL_VLC(level, run, name, gb, table, bits,           \
+                   max_depth, need_update)                      \
+    do {                                                        \
+        int show  = SHOW_UBITS(name, gb, 24);                   \
+        int len;                                                \
+        int pos = name ## _index;                               \
+                                                                \
+        GET_RL_VLC_INTERNAL(level, run, name, gb, table, bits,max_depth, need_update); \
+                                                                \
+        len = name ## _index - pos + 1;                         \
+        show = show >> (24 - len);                              \
+                                                                \
+        print_bin(show, len);                                   \
+                                                                \
+        av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d/%-3d rlv @%5d in %s %s:%d\n",\
+               show, len, run-1, level, pos, __FILE__, __PRETTY_FUNCTION__, __LINE__);\
+    } while (0)                                                 \
+
+
+static inline int get_xbits_trace(GetBitContext *s, int n, const char *file,
+                                  const char *func, int line)
+{
+    int show = show_bits(s, n);
+    int r    = get_xbits(s, n);
+
+    print_bin(show, n);
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d xbt @%5d in %s %s:%d\n",
+           show, n, r, get_bits_count(s) - n, file, func, line);
+
+    return r;
+}
+
+#define get_bits(s, n)  get_bits_trace(s , n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_bits1(s)    get_bits_trace(s,  1, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_xbits(s, n) get_xbits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+
+#define get_vlc(s, vlc)             get_vlc_trace(s, (vlc)->table, (vlc)->bits,   3, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vlc2(s, tab, bits, max) get_vlc_trace(s,          tab,        bits, max, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+
+#define tprintf(p, ...) av_log(p, AV_LOG_DEBUG, __VA_ARGS__)
+
+#else //TRACE
+#define tprintf(p, ...) { }
+#define GET_RL_VLC GET_RL_VLC_INTERNAL
+#endif
+
+#endif /* AVCODEC_GET_BITS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/golomb.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,285 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief
+ *     exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "libavutil/common.h"
+
+#ifdef CONFIG_SMALL
+
+#include "golomb.h"
+
+unsigned int get_bits(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    tmp = SHOW_UBITS(re, s, n);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+    return tmp;
+}
+
+/**
+ * Show 1-25 bits.
+ */
+unsigned int show_bits(GetBitContext *s, int n)
+{
+    register int tmp;
+    OPEN_READER_NOSIZE(re, s);
+    av_assert2(n>0 && n<=25);
+    UPDATE_CACHE(re, s);
+    tmp = SHOW_UBITS(re, s, n);
+    return tmp;
+}
+
+void skip_bits(GetBitContext *s, int n)
+{
+    OPEN_READER(re, s);
+    LAST_SKIP_BITS(re, s, n);
+    CLOSE_READER(re, s);
+}
+
+unsigned int get_bits1(GetBitContext *s)
+{
+    unsigned int index = s->index;
+    uint8_t result     = s->buffer[index >> 3];
+#ifdef BITSTREAM_READER_LE
+    result >>= index & 7;
+    result  &= 1;
+#else
+    result <<= index & 7;
+    result >>= 8 - 1;
+#endif
+#if !UNCHECKED_BITSTREAM_READER
+    if (s->index < s->size_in_bits_plus8)
+#endif
+        index++;
+    s->index = index;
+
+    return result;
+}
+
+unsigned int show_bits1(GetBitContext *s)
+{
+    return show_bits(s, 1);
+}
+
+void skip_bits1(GetBitContext *s)
+{
+    skip_bits(s, 1);
+}
+
+/**
+ * Read 0-32 bits.
+ */
+unsigned int get_bits_long(GetBitContext *s, int n)
+{
+    if (!n) {
+        return 0;
+    } else if (n <= MIN_CACHE_BITS) {
+        return get_bits(s, n);
+    } else {
+#ifdef BITSTREAM_READER_LE
+        unsigned ret = get_bits(s, 16);
+        return ret | (get_bits(s, n - 16) << 16);
+#else
+        unsigned ret = get_bits(s, 16) << (n - 16);
+        return ret | get_bits(s, n - 16);
+#endif
+    }
+}
+
+unsigned get_ue_golomb_long(GetBitContext *gb)
+{
+    unsigned buf, log;
+
+    buf = show_bits_long(gb, 32);
+    log = 31 - av_log2(buf);
+    skip_bits_long(gb, log);
+
+    return get_bits_long(gb, log + 1) - 1;
+}
+
+int get_se_golomb_long(GetBitContext *gb)
+{
+    unsigned int buf = get_ue_golomb_long(gb);
+
+    if (buf & 1)
+        buf = (buf + 1) >> 1;
+    else
+        buf = -(buf >> 1);
+
+    return buf;
+}
+
+#else
+
+const uint8_t ff_golomb_vlc_len[512]={
+19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+const uint8_t ff_ue_golomb_vlc_code[512]={
+32,32,32,32,32,32,32,32,31,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const int8_t ff_se_golomb_vlc_code[512]={
+ 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17,  8, -8,  9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15,
+  4,  4,  4,  4, -4, -4, -4, -4,  5,  5,  5,  5, -5, -5, -5, -5,  6,  6,  6,  6, -6, -6, -6, -6,  7,  7,  7,  7, -7, -7, -7, -7,
+  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2,  2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
+  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
+  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+};
+
+const uint8_t ff_ue_golomb_len[256]={
+ 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11,
+11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,
+};
+
+const uint8_t ff_interleaved_golomb_vlc_len[256]={
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+};
+
+const uint8_t ff_interleaved_ue_golomb_vlc_code[256]={
+ 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3,
+ 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5,
+ 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const int8_t ff_interleaved_se_golomb_vlc_code[256]={
+  8, -8,  4,  4,  9, -9, -4, -4,  2,  2,  2,  2,  2,  2,  2,  2,
+ 10,-10,  5,  5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2,
+  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
+  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
+ 12,-12,  6,  6, 13,-13, -6, -6,  3,  3,  3,  3,  3,  3,  3,  3,
+ 14,-14,  7,  7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+};
+
+const uint8_t ff_interleaved_dirac_golomb_vlc_code[256]={
+0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2,
+12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/golomb.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,623 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief
+ *     exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at> and Alex Beregszaszi
+ */
+
+#ifndef AVCODEC_GOLOMB_H
+#define AVCODEC_GOLOMB_H
+
+#include <stdint.h>
+
+#include "get_bits.h"
+#include "put_bits.h"
+
+#define INVALID_VLC           0x80000000
+
+extern const uint8_t ff_golomb_vlc_len[512];
+extern const uint8_t ff_ue_golomb_vlc_code[512];
+extern const  int8_t ff_se_golomb_vlc_code[512];
+extern const uint8_t ff_ue_golomb_len[256];
+
+extern const uint8_t ff_interleaved_golomb_vlc_len[256];
+extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256];
+extern const  int8_t ff_interleaved_se_golomb_vlc_code[256];
+extern const uint8_t ff_interleaved_dirac_golomb_vlc_code[256];
+
+#ifdef CONFIG_SMALL
+unsigned get_ue_golomb_long(GetBitContext *gb);
+
+static inline int get_ue_golomb(GetBitContext *gb)
+{
+    return get_ue_golomb_long(gb);
+}
+#else
+/**
+ * Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
+ */
+static inline unsigned get_ue_golomb_long(GetBitContext *gb)
+{
+    unsigned buf, log;
+
+    buf = show_bits_long(gb, 32);
+    log = 31 - av_log2(buf);
+    skip_bits_long(gb, log);
+
+    return get_bits_long(gb, log + 1) - 1;
+}
+
+/**
+ * read unsigned exp golomb code.
+ */
+static inline int get_ue_golomb(GetBitContext *gb)
+{
+    unsigned int buf;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    if (buf >= (1 << 27)) {
+        buf >>= 32 - 9;
+        LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+        CLOSE_READER(re, gb);
+
+        return ff_ue_golomb_vlc_code[buf];
+    } else {
+        int log = 2 * av_log2(buf) - 31;
+        LAST_SKIP_BITS(re, gb, 32 - log);
+        CLOSE_READER(re, gb);
+        if (CONFIG_FTRAPV && log < 0) {
+            av_log(0, AV_LOG_ERROR, "Invalid UE golomb code\n");
+            return AVERROR_INVALIDDATA;
+        }
+        buf >>= log;
+        buf--;
+
+        return buf;
+    }
+}
+#endif
+
+/**
+ * read unsigned exp golomb code, constraint to a max of 31.
+ * the return value is undefined if the stored value exceeds 31.
+ */
+static inline int get_ue_golomb_31(GetBitContext *gb)
+{
+    unsigned int buf;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    buf >>= 32 - 9;
+    LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+    CLOSE_READER(re, gb);
+
+    return ff_ue_golomb_vlc_code[buf];
+}
+
+static inline unsigned svq3_get_ue_golomb(GetBitContext *gb)
+{
+    uint32_t buf;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    if (buf & 0xAA800000) {
+        buf >>= 32 - 8;
+        LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+        CLOSE_READER(re, gb);
+
+        return ff_interleaved_ue_golomb_vlc_code[buf];
+    } else {
+        unsigned ret = 1;
+
+        do {
+            buf >>= 32 - 8;
+            LAST_SKIP_BITS(re, gb,
+                           FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
+
+            if (ff_interleaved_golomb_vlc_len[buf] != 9) {
+                ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
+                ret  |= ff_interleaved_dirac_golomb_vlc_code[buf];
+                break;
+            }
+            ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
+            UPDATE_CACHE(re, gb);
+            buf = GET_CACHE(re, gb);
+        } while (ret<0x8000000U && BITS_AVAILABLE(re, gb));
+
+        CLOSE_READER(re, gb);
+        return ret - 1;
+    }
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te0_golomb(GetBitContext *gb, int range)
+{
+    av_assert2(range >= 1);
+
+    if (range == 1)
+        return 0;
+    else if (range == 2)
+        return get_bits1(gb) ^ 1;
+    else
+        return get_ue_golomb(gb);
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te_golomb(GetBitContext *gb, int range)
+{
+    av_assert2(range >= 1);
+
+    if (range == 2)
+        return get_bits1(gb) ^ 1;
+    else
+        return get_ue_golomb(gb);
+}
+
+#ifdef CONFIG_SMALL
+int get_se_golomb_long(GetBitContext *gb);
+
+static inline int get_se_golomb(GetBitContext *gb)
+{
+    return get_se_golomb_long(gb);
+}
+#else
+static inline int get_se_golomb_long(GetBitContext *gb)
+{
+    unsigned int buf = get_ue_golomb_long(gb);
+
+    if (buf & 1)
+        buf = (buf + 1) >> 1;
+    else
+        buf = -(buf >> 1);
+
+    return buf;
+}
+
+/**
+ * read signed exp golomb code.
+ */
+static inline int get_se_golomb(GetBitContext *gb)
+{
+    unsigned int buf;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    if (buf >= (1 << 27)) {
+        buf >>= 32 - 9;
+        LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+        CLOSE_READER(re, gb);
+
+        return ff_se_golomb_vlc_code[buf];
+    } else {
+        int log = av_log2(buf);
+        LAST_SKIP_BITS(re, gb, 31 - log);
+        UPDATE_CACHE(re, gb);
+        buf = GET_CACHE(re, gb);
+
+        buf >>= log;
+
+        LAST_SKIP_BITS(re, gb, 32 - log);
+        CLOSE_READER(re, gb);
+
+        if (buf & 1)
+            buf = -(buf >> 1);
+        else
+            buf = (buf >> 1);
+
+        return buf;
+    }
+}
+#endif
+
+static inline int svq3_get_se_golomb(GetBitContext *gb)
+{
+    unsigned int buf;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    if (buf & 0xAA800000) {
+        buf >>= 32 - 8;
+        LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+        CLOSE_READER(re, gb);
+
+        return ff_interleaved_se_golomb_vlc_code[buf];
+    } else {
+        int log;
+        LAST_SKIP_BITS(re, gb, 8);
+        UPDATE_CACHE(re, gb);
+        buf |= 1 | (GET_CACHE(re, gb) >> 8);
+
+        if ((buf & 0xAAAAAAAA) == 0)
+            return INVALID_VLC;
+
+        for (log = 31; (buf & 0x80000000) == 0; log--)
+            buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30);
+
+        LAST_SKIP_BITS(re, gb, 63 - 2 * log - 8);
+        CLOSE_READER(re, gb);
+
+        return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1;
+    }
+}
+
+static inline int dirac_get_se_golomb(GetBitContext *gb)
+{
+    uint32_t ret = svq3_get_ue_golomb(gb);
+
+    if (ret) {
+        uint32_t buf;
+        OPEN_READER(re, gb);
+        UPDATE_CACHE(re, gb);
+        buf = SHOW_SBITS(re, gb, 1);
+        LAST_SKIP_BITS(re, gb, 1);
+        ret = (ret ^ buf) - buf;
+        CLOSE_READER(re, gb);
+    }
+
+    return ret;
+}
+
+/**
+ * read unsigned golomb rice code (ffv1).
+ */
+static inline int get_ur_golomb(GetBitContext *gb, int k, int limit,
+                                int esc_len)
+{
+    unsigned int buf;
+    int log;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    log = av_log2(buf);
+
+    if (log > 31 - limit) {
+        buf >>= log - k;
+        buf  += (30 - log) << k;
+        LAST_SKIP_BITS(re, gb, 32 + k - log);
+        CLOSE_READER(re, gb);
+
+        return buf;
+    } else {
+        LAST_SKIP_BITS(re, gb, limit);
+        UPDATE_CACHE(re, gb);
+
+        buf = SHOW_UBITS(re, gb, esc_len);
+
+        LAST_SKIP_BITS(re, gb, esc_len);
+        CLOSE_READER(re, gb);
+
+        return buf + limit - 1;
+    }
+}
+
+/**
+ * read unsigned golomb rice code (jpegls).
+ */
+static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit,
+                                       int esc_len)
+{
+    unsigned int buf;
+    int log;
+
+    OPEN_READER(re, gb);
+    UPDATE_CACHE(re, gb);
+    buf = GET_CACHE(re, gb);
+
+    log = av_log2(buf);
+
+    if (log - k >= 32 - MIN_CACHE_BITS + (MIN_CACHE_BITS == 32) &&
+        32 - log < limit) {
+        buf >>= log - k;
+        buf  += (30 - log) << k;
+        LAST_SKIP_BITS(re, gb, 32 + k - log);
+        CLOSE_READER(re, gb);
+
+        return buf;
+    } else {
+        int i;
+        for (i = 0; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
+            if (gb->size_in_bits <= re_index)
+                return -1;
+            LAST_SKIP_BITS(re, gb, 1);
+            UPDATE_CACHE(re, gb);
+        }
+        SKIP_BITS(re, gb, 1);
+
+        if (i < limit - 1) {
+            if (k) {
+                buf = SHOW_UBITS(re, gb, k);
+                LAST_SKIP_BITS(re, gb, k);
+            } else {
+                buf = 0;
+            }
+
+            CLOSE_READER(re, gb);
+            return buf + (i << k);
+        } else if (i == limit - 1) {
+            buf = SHOW_UBITS(re, gb, esc_len);
+            LAST_SKIP_BITS(re, gb, esc_len);
+            CLOSE_READER(re, gb);
+
+            return buf + 1;
+        } else
+            return -1;
+    }
+}
+
+/**
+ * read signed golomb rice code (ffv1).
+ */
+static inline int get_sr_golomb(GetBitContext *gb, int k, int limit,
+                                int esc_len)
+{
+    int v = get_ur_golomb(gb, k, limit, esc_len);
+
+    v++;
+    if (v & 1)
+        return v >> 1;
+    else
+        return -(v >> 1);
+
+//    return (v>>1) ^ -(v&1);
+}
+
+/**
+ * read signed golomb rice code (flac).
+ */
+static inline int get_sr_golomb_flac(GetBitContext *gb, int k, int limit,
+                                     int esc_len)
+{
+    int v = get_ur_golomb_jpegls(gb, k, limit, esc_len);
+    return (v >> 1) ^ -(v & 1);
+}
+
+/**
+ * read unsigned golomb rice code (shorten).
+ */
+static inline unsigned int get_ur_golomb_shorten(GetBitContext *gb, int k)
+{
+    return get_ur_golomb_jpegls(gb, k, INT_MAX, 0);
+}
+
+/**
+ * read signed golomb rice code (shorten).
+ */
+static inline int get_sr_golomb_shorten(GetBitContext *gb, int k)
+{
+    int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0);
+    if (uvar & 1)
+        return ~(uvar >> 1);
+    else
+        return uvar >> 1;
+}
+
+#ifdef TRACE
+
+static inline int get_ue(GetBitContext *s, const char *file, const char *func,
+                         int line)
+{
+    int show = show_bits(s, 24);
+    int pos  = get_bits_count(s);
+    int i    = get_ue_golomb(s);
+    int len  = get_bits_count(s) - pos;
+    int bits = show >> (24 - len);
+
+    print_bin(bits, len);
+
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d ue  @%5d in %s %s:%d\n",
+           bits, len, i, pos, file, func, line);
+
+    return i;
+}
+
+static inline int get_se(GetBitContext *s, const char *file, const char *func,
+                         int line)
+{
+    int show = show_bits(s, 24);
+    int pos  = get_bits_count(s);
+    int i    = get_se_golomb(s);
+    int len  = get_bits_count(s) - pos;
+    int bits = show >> (24 - len);
+
+    print_bin(bits, len);
+
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d se  @%5d in %s %s:%d\n",
+           bits, len, i, pos, file, func, line);
+
+    return i;
+}
+
+static inline int get_te(GetBitContext *s, int r, char *file, const char *func,
+                         int line)
+{
+    int show = show_bits(s, 24);
+    int pos  = get_bits_count(s);
+    int i    = get_te0_golomb(s, r);
+    int len  = get_bits_count(s) - pos;
+    int bits = show >> (24 - len);
+
+    print_bin(bits, len);
+
+    av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d te  @%5d in %s %s:%d\n",
+           bits, len, i, pos, file, func, line);
+
+    return i;
+}
+
+#define get_ue_golomb(a) get_ue(a, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_se_golomb(a) get_se(a, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_te_golomb(a, r)  get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+
+#endif /* TRACE */
+
+/**
+ * write unsigned exp golomb code.
+ */
+static inline void set_ue_golomb(PutBitContext *pb, int i)
+{
+    av_assert2(i >= 0);
+
+#if 0
+    if (i = 0) {
+        put_bits(pb, 1, 1);
+        return;
+    }
+#endif
+    if (i < 256)
+        put_bits(pb, ff_ue_golomb_len[i], i + 1);
+    else {
+        int e = av_log2(i + 1);
+        put_bits(pb, 2 * e + 1, i + 1);
+    }
+}
+
+/**
+ * write truncated unsigned exp golomb code.
+ */
+static inline void set_te_golomb(PutBitContext *pb, int i, int range)
+{
+    av_assert2(range >= 1);
+    av_assert2(i <= range);
+
+    if (range == 2)
+        put_bits(pb, 1, i ^ 1);
+    else
+        set_ue_golomb(pb, i);
+}
+
+/**
+ * write signed exp golomb code. 16 bits at most.
+ */
+static inline void set_se_golomb(PutBitContext *pb, int i)
+{
+#if 0
+    if (i <= 0)
+        i = -2 * i;
+    else
+        i = 2 * i - 1;
+#elif 1
+    i = 2 * i - 1;
+    if (i < 0)
+        i ^= -1;    //FIXME check if gcc does the right thing
+#else
+    i  = 2 * i - 1;
+    i ^= (i >> 31);
+#endif
+    set_ue_golomb(pb, i);
+}
+
+/**
+ * write unsigned golomb rice code (ffv1).
+ */
+static inline void set_ur_golomb(PutBitContext *pb, int i, int k, int limit,
+                                 int esc_len)
+{
+    int e;
+
+    av_assert2(i >= 0);
+
+    e = i >> k;
+    if (e < limit)
+        put_bits(pb, e + k + 1, (1 << k) + (i & ((1 << k) - 1)));
+    else
+        put_bits(pb, limit + esc_len, i - limit + 1);
+}
+
+/**
+ * write unsigned golomb rice code (jpegls).
+ */
+static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k,
+                                        int limit, int esc_len)
+{
+    int e;
+
+    av_assert2(i >= 0);
+
+    e = (i >> k) + 1;
+    if (e < limit) {
+        while (e > 31) {
+            put_bits(pb, 31, 0);
+            e -= 31;
+        }
+        put_bits(pb, e, 1);
+        if (k)
+            put_sbits(pb, k, i);
+    } else {
+        while (limit > 31) {
+            put_bits(pb, 31, 0);
+            limit -= 31;
+        }
+        put_bits(pb, limit, 1);
+        put_bits(pb, esc_len, i - 1);
+    }
+}
+
+/**
+ * write signed golomb rice code (ffv1).
+ */
+static inline void set_sr_golomb(PutBitContext *pb, int i, int k, int limit,
+                                 int esc_len)
+{
+    int v;
+
+    v  = -2 * i - 1;
+    v ^= (v >> 31);
+
+    set_ur_golomb(pb, v, k, limit, esc_len);
+}
+
+/**
+ * write signed golomb rice code (flac).
+ */
+static inline void set_sr_golomb_flac(PutBitContext *pb, int i, int k,
+                                      int limit, int esc_len)
+{
+    int v;
+
+    v  = -2 * i - 1;
+    v ^= (v >> 31);
+
+    set_ur_golomb_jpegls(pb, v, k, limit, esc_len);
+}
+
+#endif /* AVCODEC_GOLOMB_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3648 @@
+/*
+ * HEVC video Decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2012 - 2013 Mickael Raulet
+ * Copyright (C) 2012 - 2013 Gildas Cocherel
+ * Copyright (C) 2012 - 2013 Wassim Hamidouche
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/atomic.h"
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "libavutil/display.h"
+#include "libavutil/internal.h"
+#include "libavutil/md5.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/stereo3d.h"
+
+#include "bswapdsp.h"
+#include "bytestream.h"
+#include "cabac_functions.h"
+#include "golomb.h"
+#include "hevc.h"
+
+const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
+
+/**
+ * NOTE: Each function hls_foo correspond to the function foo in the
+ * specification (HLS stands for High Level Syntax).
+ */
+
+/**
+ * Section 5.7
+ */
+
+/* free everything allocated  by pic_arrays_init() */
+static void pic_arrays_free(HEVCContext *s)
+{
+    av_freep(&s->sao);
+    av_freep(&s->deblock);
+
+    av_freep(&s->skip_flag);
+    av_freep(&s->tab_ct_depth);
+
+    av_freep(&s->tab_ipm);
+    av_freep(&s->cbf_luma);
+    av_freep(&s->is_pcm);
+
+    av_freep(&s->qp_y_tab);
+    av_freep(&s->tab_slice_address);
+    av_freep(&s->filter_slice_edges);
+
+    av_freep(&s->horizontal_bs);
+    av_freep(&s->vertical_bs);
+
+    av_freep(&s->sh.entry_point_offset);
+    av_freep(&s->sh.size);
+    av_freep(&s->sh.offset);
+#ifdef USE_PRED
+    av_buffer_pool_uninit(&s->tab_mvf_pool);
+    av_buffer_pool_uninit(&s->rpl_tab_pool);
+#endif
+}
+
+/* allocate arrays that depend on frame dimensions */
+static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
+{
+    int log2_min_cb_size = sps->log2_min_cb_size;
+    int width            = sps->width;
+    int height           = sps->height;
+    int pic_size_in_ctb  = ((width  >> log2_min_cb_size) + 1) *
+                           ((height >> log2_min_cb_size) + 1);
+    int ctb_count        = sps->ctb_width * sps->ctb_height;
+    int min_pu_size      = sps->min_pu_width * sps->min_pu_height;
+
+    s->bs_width  = (width  >> 2) + 1;
+    s->bs_height = (height >> 2) + 1;
+
+    s->sao           = av_mallocz_array(ctb_count, sizeof(*s->sao));
+    s->deblock       = av_mallocz_array(ctb_count, sizeof(*s->deblock));
+    if (!s->sao || !s->deblock)
+        goto fail;
+
+    s->skip_flag    = av_malloc(sps->min_cb_height * sps->min_cb_width);
+    s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
+    if (!s->skip_flag || !s->tab_ct_depth)
+        goto fail;
+
+    s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
+    s->tab_ipm  = av_mallocz(min_pu_size);
+    s->is_pcm   = av_malloc((sps->min_pu_width + 1) * (sps->min_pu_height + 1));
+    if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
+        goto fail;
+
+    s->filter_slice_edges = av_malloc(ctb_count);
+    s->tab_slice_address  = av_malloc_array(pic_size_in_ctb,
+                                      sizeof(*s->tab_slice_address));
+    s->qp_y_tab           = av_malloc_array(pic_size_in_ctb,
+                                      sizeof(*s->qp_y_tab));
+    if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
+        goto fail;
+
+    s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
+    s->vertical_bs   = av_mallocz_array(s->bs_width, s->bs_height);
+    if (!s->horizontal_bs || !s->vertical_bs)
+        goto fail;
+#ifdef USE_PRED
+    s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
+                                          av_buffer_allocz);
+    s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
+                                          av_buffer_allocz);
+    if (!s->tab_mvf_pool || !s->rpl_tab_pool)
+        goto fail;
+#endif
+
+    return 0;
+
+fail:
+    pic_arrays_free(s);
+    return AVERROR(ENOMEM);
+}
+
+#ifdef USE_PRED
+static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
+{
+    int i = 0;
+    int j = 0;
+    uint8_t luma_weight_l0_flag[16];
+    uint8_t chroma_weight_l0_flag[16];
+    uint8_t luma_weight_l1_flag[16];
+    uint8_t chroma_weight_l1_flag[16];
+
+    s->sh.luma_log2_weight_denom = get_ue_golomb_long(gb);
+    if (s->sps->chroma_format_idc != 0) {
+        int delta = get_se_golomb(gb);
+        s->sh.chroma_log2_weight_denom = av_clip(s->sh.luma_log2_weight_denom + delta, 0, 7);
+    }
+
+    for (i = 0; i < s->sh.nb_refs[L0]; i++) {
+        luma_weight_l0_flag[i] = get_bits1(gb);
+        if (!luma_weight_l0_flag[i]) {
+            s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
+            s->sh.luma_offset_l0[i] = 0;
+        }
+    }
+    if (s->sps->chroma_format_idc != 0) {
+        for (i = 0; i < s->sh.nb_refs[L0]; i++)
+            chroma_weight_l0_flag[i] = get_bits1(gb);
+    } else {
+        for (i = 0; i < s->sh.nb_refs[L0]; i++)
+            chroma_weight_l0_flag[i] = 0;
+    }
+    for (i = 0; i < s->sh.nb_refs[L0]; i++) {
+        if (luma_weight_l0_flag[i]) {
+            int delta_luma_weight_l0 = get_se_golomb(gb);
+            s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
+            s->sh.luma_offset_l0[i] = get_se_golomb(gb);
+        }
+        if (chroma_weight_l0_flag[i]) {
+            for (j = 0; j < 2; j++) {
+                int delta_chroma_weight_l0 = get_se_golomb(gb);
+                int delta_chroma_offset_l0 = get_se_golomb(gb);
+                s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
+                s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
+                                                                                    >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
+            }
+        } else {
+            s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
+            s->sh.chroma_offset_l0[i][0] = 0;
+            s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
+            s->sh.chroma_offset_l0[i][1] = 0;
+        }
+    }
+#ifdef USE_BIPRED
+    if (s->sh.slice_type == B_SLICE) {
+        for (i = 0; i < s->sh.nb_refs[L1]; i++) {
+            luma_weight_l1_flag[i] = get_bits1(gb);
+            if (!luma_weight_l1_flag[i]) {
+                s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
+                s->sh.luma_offset_l1[i] = 0;
+            }
+        }
+        if (s->sps->chroma_format_idc != 0) {
+            for (i = 0; i < s->sh.nb_refs[L1]; i++)
+                chroma_weight_l1_flag[i] = get_bits1(gb);
+        } else {
+            for (i = 0; i < s->sh.nb_refs[L1]; i++)
+                chroma_weight_l1_flag[i] = 0;
+        }
+        for (i = 0; i < s->sh.nb_refs[L1]; i++) {
+            if (luma_weight_l1_flag[i]) {
+                int delta_luma_weight_l1 = get_se_golomb(gb);
+                s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
+                s->sh.luma_offset_l1[i] = get_se_golomb(gb);
+            }
+            if (chroma_weight_l1_flag[i]) {
+                for (j = 0; j < 2; j++) {
+                    int delta_chroma_weight_l1 = get_se_golomb(gb);
+                    int delta_chroma_offset_l1 = get_se_golomb(gb);
+                    s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
+                    s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
+                                                                                        >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
+                }
+            } else {
+                s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
+                s->sh.chroma_offset_l1[i][0] = 0;
+                s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
+                s->sh.chroma_offset_l1[i][1] = 0;
+            }
+        }
+    }
+#endif
+}
+
+static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
+{
+    const HEVCSPS *sps = s->sps;
+    int max_poc_lsb    = 1 << sps->log2_max_poc_lsb;
+    int prev_delta_msb = 0;
+    unsigned int nb_sps = 0, nb_sh;
+    int i;
+
+    rps->nb_refs = 0;
+    if (!sps->long_term_ref_pics_present_flag)
+        return 0;
+
+    if (sps->num_long_term_ref_pics_sps > 0)
+        nb_sps = get_ue_golomb_long(gb);
+    nb_sh = get_ue_golomb_long(gb);
+
+    if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
+        return AVERROR_INVALIDDATA;
+
+    rps->nb_refs = nb_sh + nb_sps;
+
+    for (i = 0; i < rps->nb_refs; i++) {
+        uint8_t delta_poc_msb_present;
+
+        if (i < nb_sps) {
+            uint8_t lt_idx_sps = 0;
+
+            if (sps->num_long_term_ref_pics_sps > 1)
+                lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
+
+            rps->poc[i]  = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
+            rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
+        } else {
+            rps->poc[i]  = get_bits(gb, sps->log2_max_poc_lsb);
+            rps->used[i] = get_bits1(gb);
+        }
+
+        delta_poc_msb_present = get_bits1(gb);
+        if (delta_poc_msb_present) {
+            int delta = get_ue_golomb_long(gb);
+
+            if (i && i != nb_sps)
+                delta += prev_delta_msb;
+
+            rps->poc[i] += s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
+            prev_delta_msb = delta;
+        }
+    }
+
+    return 0;
+}
+#endif
+
+static int get_buffer_sao(HEVCContext *s, AVFrame *frame, const HEVCSPS *sps)
+{
+    int ret, i;
+
+    frame->width  = s->avctx->coded_width  + 2;
+    frame->height = s->avctx->coded_height + 2;
+    if ((ret = ff_get_buffer(s->avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+        return ret;
+    for (i = 0; frame->data[i]; i++) {
+        int offset = frame->linesize[i] + (1 << sps->pixel_shift);
+        frame->data[i] += offset;
+    }
+    frame->width  = s->avctx->coded_width;
+    frame->height = s->avctx->coded_height;
+
+    return 0;
+}
+
+static int set_sps(HEVCContext *s, const HEVCSPS *sps)
+{
+    int ret;
+
+    pic_arrays_free(s);
+    ret = pic_arrays_init(s, sps);
+    if (ret < 0)
+        goto fail;
+
+    s->avctx->coded_width         = sps->width;
+    s->avctx->coded_height        = sps->height;
+    s->avctx->width               = sps->output_width;
+    s->avctx->height              = sps->output_height;
+    s->avctx->pix_fmt             = sps->pix_fmt;
+    s->avctx->has_b_frames        = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
+
+    ff_set_sar(s->avctx, sps->vui.sar);
+
+    if (sps->vui.video_signal_type_present_flag)
+        s->avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
+                                                               : AVCOL_RANGE_MPEG;
+    else
+        s->avctx->color_range = AVCOL_RANGE_MPEG;
+
+    if (sps->vui.colour_description_present_flag) {
+        s->avctx->color_primaries = sps->vui.colour_primaries;
+        s->avctx->color_trc       = sps->vui.transfer_characteristic;
+        s->avctx->colorspace      = sps->vui.matrix_coeffs;
+    } else {
+        s->avctx->color_primaries = AVCOL_PRI_UNSPECIFIED;
+        s->avctx->color_trc       = AVCOL_TRC_UNSPECIFIED;
+        s->avctx->colorspace      = AVCOL_SPC_UNSPECIFIED;
+    }
+
+#ifdef USE_FUNC_PTR
+    ff_hevc_pred_init(&s->hpc,     sps->bit_depth);
+#endif
+    ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
+#ifdef USE_PRED
+    ff_videodsp_init (&s->vdsp,    sps->bit_depth);
+#endif
+
+    if (sps->sao_enabled) {
+#ifdef USE_SAO_SMALL_BUFFER
+        {
+            int ctb_size = 1 << s->sps->log2_ctb_size;
+            int c_count = (s->sps->chroma_format_idc != 0) ? 3 : 1;
+            int c_idx;
+
+            s->sao_pixel_buffer = 
+                av_malloc(((ctb_size + 2) * (ctb_size + 2)) << 
+                          s->sps->pixel_shift);
+            for(c_idx = 0; c_idx < c_count; c_idx++) {
+                int w = s->sps->width >> s->sps->hshift[c_idx];
+                int h = s->sps->height >> s->sps->vshift[c_idx];
+                s->sao_pixel_buffer_h[c_idx] = 
+                    av_malloc((w * 2 * s->sps->ctb_height) <<
+                              s->sps->pixel_shift);
+                s->sao_pixel_buffer_v[c_idx] = 
+                    av_malloc((h * 2 * s->sps->ctb_width) << 
+                              s->sps->pixel_shift);
+            }
+        }
+#else
+        av_frame_unref(s->tmp_frame);
+        ret = get_buffer_sao(s, s->tmp_frame, sps);
+        s->sao_frame = s->tmp_frame;
+#endif
+    }
+
+    s->sps = sps;
+    s->vps = (HEVCVPS*) s->vps_list[s->sps->vps_id]->data;
+
+#ifdef USE_FULL
+    {
+        unsigned int num = 0, den = 0;
+        if (s->vps->vps_timing_info_present_flag) {
+            num = s->vps->vps_num_units_in_tick;
+            den = s->vps->vps_time_scale;
+        } else if (sps->vui.vui_timing_info_present_flag) {
+            num = sps->vui.vui_num_units_in_tick;
+            den = sps->vui.vui_time_scale;
+        }
+        
+        if (num != 0 && den != 0)
+            av_reduce(&s->avctx->framerate.den, &s->avctx->framerate.num,
+                      num, den, 1 << 30);
+    }
+#endif
+
+    return 0;
+
+fail:
+    pic_arrays_free(s);
+    s->sps = NULL;
+    return ret;
+}
+
+static int hls_slice_header(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    SliceHeader *sh   = &s->sh;
+    int i, j, ret;
+
+    // Coded parameters
+    sh->first_slice_in_pic_flag = get_bits1(gb);
+    if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
+        s->seq_decode = (s->seq_decode + 1) & 0xff;
+        s->max_ra     = INT_MAX;
+        if (IS_IDR(s))
+            ff_hevc_clear_refs(s);
+    }
+    sh->no_output_of_prior_pics_flag = 0;
+    if (IS_IRAP(s))
+        sh->no_output_of_prior_pics_flag = get_bits1(gb);
+
+    sh->pps_id = get_ue_golomb_long(gb);
+    if (sh->pps_id >= MAX_PPS_COUNT || !s->pps_list[sh->pps_id]) {
+        av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
+        return AVERROR_INVALIDDATA;
+    }
+    if (!sh->first_slice_in_pic_flag &&
+        s->pps != (HEVCPPS*)s->pps_list[sh->pps_id]->data) {
+        av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
+        return AVERROR_INVALIDDATA;
+    }
+    s->pps = (HEVCPPS*)s->pps_list[sh->pps_id]->data;
+    if (s->nal_unit_type == NAL_CRA_NUT && s->last_eos == 1)
+        sh->no_output_of_prior_pics_flag = 1;
+
+    if (s->sps != (HEVCSPS*)s->sps_list[s->pps->sps_id]->data) {
+        const HEVCSPS* last_sps = s->sps;
+        s->sps = (HEVCSPS*)s->sps_list[s->pps->sps_id]->data;
+        if (last_sps && IS_IRAP(s) && s->nal_unit_type != NAL_CRA_NUT) {
+            if (s->sps->width !=  last_sps->width || s->sps->height != last_sps->height ||
+                s->sps->temporal_layer[s->sps->max_sub_layers - 1].max_dec_pic_buffering !=
+                last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
+                sh->no_output_of_prior_pics_flag = 0;
+        }
+        ff_hevc_clear_refs(s);
+        ret = set_sps(s, s->sps);
+        if (ret < 0)
+            return ret;
+
+        s->seq_decode = (s->seq_decode + 1) & 0xff;
+        s->max_ra     = INT_MAX;
+    }
+
+    s->avctx->profile = s->sps->ptl.general_ptl.profile_idc;
+    s->avctx->level   = s->sps->ptl.general_ptl.level_idc;
+
+    sh->dependent_slice_segment_flag = 0;
+    if (!sh->first_slice_in_pic_flag) {
+        int slice_address_length;
+
+        if (s->pps->dependent_slice_segments_enabled_flag)
+            sh->dependent_slice_segment_flag = get_bits1(gb);
+
+        slice_address_length = av_ceil_log2(s->sps->ctb_width *
+                                            s->sps->ctb_height);
+        sh->slice_segment_addr = get_bits(gb, slice_address_length);
+        if (sh->slice_segment_addr >= s->sps->ctb_width * s->sps->ctb_height) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "Invalid slice segment address: %u.\n",
+                   sh->slice_segment_addr);
+            return AVERROR_INVALIDDATA;
+        }
+
+        if (!sh->dependent_slice_segment_flag) {
+            sh->slice_addr = sh->slice_segment_addr;
+            s->slice_idx++;
+        }
+    } else {
+        sh->slice_segment_addr = sh->slice_addr = 0;
+        s->slice_idx           = 0;
+        s->slice_initialized   = 0;
+    }
+
+    if (!sh->dependent_slice_segment_flag) {
+        s->slice_initialized = 0;
+
+        for (i = 0; i < s->pps->num_extra_slice_header_bits; i++)
+            skip_bits(gb, 1);  // slice_reserved_undetermined_flag[]
+
+        sh->slice_type = get_ue_golomb_long(gb);
+        if (!(sh->slice_type == I_SLICE ||
+              sh->slice_type == P_SLICE ||
+              sh->slice_type == B_SLICE)) {
+            av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
+                   sh->slice_type);
+            return AVERROR_INVALIDDATA;
+        }
+        if (IS_IRAP(s) && sh->slice_type != I_SLICE) {
+            av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
+            return AVERROR_INVALIDDATA;
+        }
+
+        // when flag is not present, picture is inferred to be output
+        sh->pic_output_flag = 1;
+        if (s->pps->output_flag_present_flag)
+            sh->pic_output_flag = get_bits1(gb);
+
+        if (s->sps->separate_colour_plane_flag)
+            sh->colour_plane_id = get_bits(gb, 2);
+        
+        if (!IS_IDR(s)) {
+#ifdef USE_PRED
+            int short_term_ref_pic_set_sps_flag, poc;
+
+            sh->pic_order_cnt_lsb = get_bits(gb, s->sps->log2_max_poc_lsb);
+            poc = ff_hevc_compute_poc(s, sh->pic_order_cnt_lsb);
+            if (!sh->first_slice_in_pic_flag && poc != s->poc) {
+                av_log(s->avctx, AV_LOG_WARNING,
+                       "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
+                if (s->avctx->err_recognition & AV_EF_EXPLODE)
+                    return AVERROR_INVALIDDATA;
+                poc = s->poc;
+            }
+            s->poc = poc;
+
+            short_term_ref_pic_set_sps_flag = get_bits1(gb);
+            if (!short_term_ref_pic_set_sps_flag) {
+                ret = ff_hevc_decode_short_term_rps(s, &sh->slice_rps, s->sps, 1);
+                if (ret < 0)
+                    return ret;
+
+                sh->short_term_rps = &sh->slice_rps;
+            } else {
+                int numbits, rps_idx;
+
+                if (!s->sps->nb_st_rps) {
+                    av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
+                    return AVERROR_INVALIDDATA;
+                }
+
+                numbits = av_ceil_log2(s->sps->nb_st_rps);
+                rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
+                sh->short_term_rps = &s->sps->st_rps[rps_idx];
+            }
+
+            ret = decode_lt_rps(s, &sh->long_term_rps, gb);
+            if (ret < 0) {
+                av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
+                if (s->avctx->err_recognition & AV_EF_EXPLODE)
+                    return AVERROR_INVALIDDATA;
+            }
+
+            if (s->sps->sps_temporal_mvp_enabled_flag)
+                sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
+            else
+                sh->slice_temporal_mvp_enabled_flag = 0;
+#else
+            abort();
+#endif
+        } else {
+            s->sh.short_term_rps = NULL;
+            s->poc               = 0;
+        }
+
+        /* 8.3.1 */
+        if (s->temporal_id == 0 &&
+            s->nal_unit_type != NAL_TRAIL_N &&
+            s->nal_unit_type != NAL_TSA_N   &&
+            s->nal_unit_type != NAL_STSA_N  &&
+            s->nal_unit_type != NAL_RADL_N  &&
+            s->nal_unit_type != NAL_RADL_R  &&
+            s->nal_unit_type != NAL_RASL_N  &&
+            s->nal_unit_type != NAL_RASL_R)
+            s->pocTid0 = s->poc;
+
+        if (s->sps->sao_enabled) {
+            sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
+            if (s->sps->chroma_format_idc != 0) {
+                sh->slice_sample_adaptive_offset_flag[1] =
+                    sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
+            } else {
+                sh->slice_sample_adaptive_offset_flag[1] = 0;
+                sh->slice_sample_adaptive_offset_flag[2] = 0;
+            }
+        } else {
+            sh->slice_sample_adaptive_offset_flag[0] = 0;
+            sh->slice_sample_adaptive_offset_flag[1] = 0;
+            sh->slice_sample_adaptive_offset_flag[2] = 0;
+        }
+
+        sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
+#ifdef USE_PRED
+        if (sh->slice_type == P_SLICE || sh->slice_type == B_SLICE) {
+            int nb_refs;
+
+            sh->nb_refs[L0] = s->pps->num_ref_idx_l0_default_active;
+            if (sh->slice_type == B_SLICE)
+                sh->nb_refs[L1] = s->pps->num_ref_idx_l1_default_active;
+
+            if (get_bits1(gb)) { // num_ref_idx_active_override_flag
+                sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
+                if (sh->slice_type == B_SLICE)
+                    sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
+            }
+            if (sh->nb_refs[L0] > MAX_REFS || sh->nb_refs[L1] > MAX_REFS) {
+                av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
+                       sh->nb_refs[L0], sh->nb_refs[L1]);
+                return AVERROR_INVALIDDATA;
+            }
+
+            sh->rpl_modification_flag[0] = 0;
+            sh->rpl_modification_flag[1] = 0;
+            nb_refs = ff_hevc_frame_nb_refs(s);
+            if (!nb_refs) {
+                av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
+                return AVERROR_INVALIDDATA;
+            }
+
+            if (s->pps->lists_modification_present_flag && nb_refs > 1) {
+                sh->rpl_modification_flag[0] = get_bits1(gb);
+                if (sh->rpl_modification_flag[0]) {
+                    for (i = 0; i < sh->nb_refs[L0]; i++)
+                        sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
+                }
+
+                if (sh->slice_type == B_SLICE) {
+                    sh->rpl_modification_flag[1] = get_bits1(gb);
+                    if (sh->rpl_modification_flag[1] == 1)
+                        for (i = 0; i < sh->nb_refs[L1]; i++)
+                            sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
+                }
+            }
+
+            if (sh->slice_type == B_SLICE)
+                sh->mvd_l1_zero_flag = get_bits1(gb);
+
+            if (s->pps->cabac_init_present_flag)
+                sh->cabac_init_flag = get_bits1(gb);
+            else
+                sh->cabac_init_flag = 0;
+
+            sh->collocated_ref_idx = 0;
+            if (sh->slice_temporal_mvp_enabled_flag) {
+                sh->collocated_list = L0;
+                if (sh->slice_type == B_SLICE)
+                    sh->collocated_list = !get_bits1(gb);
+
+                if (sh->nb_refs[sh->collocated_list] > 1) {
+                    sh->collocated_ref_idx = get_ue_golomb_long(gb);
+                    if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
+                        av_log(s->avctx, AV_LOG_ERROR,
+                               "Invalid collocated_ref_idx: %d.\n",
+                               sh->collocated_ref_idx);
+                        return AVERROR_INVALIDDATA;
+                    }
+                }
+            }
+
+            if ((s->pps->weighted_pred_flag   && sh->slice_type == P_SLICE) ||
+                (s->pps->weighted_bipred_flag && sh->slice_type == B_SLICE)) {
+                pred_weight_table(s, gb);
+            }
+
+            sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
+            if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "Invalid number of merging MVP candidates: %d.\n",
+                       sh->max_num_merge_cand);
+                return AVERROR_INVALIDDATA;
+            }
+        }
+#endif
+
+        sh->slice_qp_delta = get_se_golomb(gb);
+               
+        if (s->pps->pic_slice_level_chroma_qp_offsets_present_flag) {
+            sh->slice_cb_qp_offset = get_se_golomb(gb);
+            sh->slice_cr_qp_offset = get_se_golomb(gb);
+        } else {
+            sh->slice_cb_qp_offset = 0;
+            sh->slice_cr_qp_offset = 0;
+        }
+
+        if (s->pps->chroma_qp_offset_list_enabled_flag)
+            sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb);
+        else
+            sh->cu_chroma_qp_offset_enabled_flag = 0;
+
+        if (s->pps->deblocking_filter_control_present_flag) {
+            int deblocking_filter_override_flag = 0;
+
+            if (s->pps->deblocking_filter_override_enabled_flag)
+                deblocking_filter_override_flag = get_bits1(gb);
+
+            if (deblocking_filter_override_flag) {
+                sh->disable_deblocking_filter_flag = get_bits1(gb);
+                if (!sh->disable_deblocking_filter_flag) {
+                    sh->beta_offset = get_se_golomb(gb) * 2;
+                    sh->tc_offset   = get_se_golomb(gb) * 2;
+                }
+            } else {
+                sh->disable_deblocking_filter_flag = s->pps->disable_dbf;
+                sh->beta_offset                    = s->pps->beta_offset;
+                sh->tc_offset                      = s->pps->tc_offset;
+            }
+        } else {
+            sh->disable_deblocking_filter_flag = 0;
+            sh->beta_offset                    = 0;
+            sh->tc_offset                      = 0;
+        }
+
+        if (s->pps->seq_loop_filter_across_slices_enabled_flag &&
+            (sh->slice_sample_adaptive_offset_flag[0] ||
+             sh->slice_sample_adaptive_offset_flag[1] ||
+             !sh->disable_deblocking_filter_flag)) {
+            sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
+        } else {
+            sh->slice_loop_filter_across_slices_enabled_flag = s->pps->seq_loop_filter_across_slices_enabled_flag;
+        }
+    } else if (!s->slice_initialized) {
+        av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
+        return AVERROR_INVALIDDATA;
+    }
+
+    sh->num_entry_point_offsets = 0;
+    if (s->pps->tiles_enabled_flag || s->pps->entropy_coding_sync_enabled_flag) {
+        sh->num_entry_point_offsets = get_ue_golomb_long(gb);
+        if (sh->num_entry_point_offsets > 0) {
+            int offset_len = get_ue_golomb_long(gb) + 1;
+            int segments = offset_len >> 4;
+            int rest = (offset_len & 15);
+            av_freep(&sh->entry_point_offset);
+            av_freep(&sh->offset);
+            av_freep(&sh->size);
+            sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
+            sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
+            sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
+            if (!sh->entry_point_offset || !sh->offset || !sh->size) {
+                sh->num_entry_point_offsets = 0;
+                av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
+                return AVERROR(ENOMEM);
+            }
+            for (i = 0; i < sh->num_entry_point_offsets; i++) {
+                int val = 0;
+                for (j = 0; j < segments; j++) {
+                    val <<= 16;
+                    val += get_bits(gb, 16);
+                }
+                if (rest) {
+                    val <<= rest;
+                    val += get_bits(gb, rest);
+                }
+                sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
+            }
+            if (s->threads_number > 1 && (s->pps->num_tile_rows > 1 || s->pps->num_tile_columns > 1)) {
+                s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
+                s->threads_number = 1;
+            } else
+                s->enable_parallel_tiles = 0;
+        } else
+            s->enable_parallel_tiles = 0;
+    }
+
+    if (s->pps->slice_header_extension_present_flag) {
+        unsigned int length = get_ue_golomb_long(gb);
+        if (length*8LL > get_bits_left(gb)) {
+            av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
+            return AVERROR_INVALIDDATA;
+        }
+        for (i = 0; i < length; i++)
+            skip_bits(gb, 8);  // slice_header_extension_data_byte
+    }
+
+    // Inferred parameters
+    sh->slice_qp = 26U + s->pps->pic_init_qp_minus26 + sh->slice_qp_delta;
+    if (sh->slice_qp > 51 ||
+        sh->slice_qp < -s->sps->qp_bd_offset) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "The slice_qp %d is outside the valid range "
+               "[%d, 51].\n",
+               sh->slice_qp,
+               -s->sps->qp_bd_offset);
+        return AVERROR_INVALIDDATA;
+    }
+
+    sh->slice_ctb_addr_rs = sh->slice_segment_addr;
+
+    if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
+        av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
+        return AVERROR_INVALIDDATA;
+    }
+
+    if (get_bits_left(gb) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "Overread slice header by %d bits\n", -get_bits_left(gb));
+        return AVERROR_INVALIDDATA;
+    }
+
+    s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
+
+    if (!s->pps->cu_qp_delta_enabled_flag)
+        s->HEVClc->qp_y = s->sh.slice_qp;
+
+    s->slice_initialized = 1;
+    s->HEVClc->tu.cu_qp_offset_cb = 0;
+    s->HEVClc->tu.cu_qp_offset_cr = 0;
+
+    return 0;
+}
+
+#define CTB(tab, x, y) ((tab)[(y) * s->sps->ctb_width + (x)])
+
+#define SET_SAO(elem, value)                            \
+do {                                                    \
+    if (!sao_merge_up_flag && !sao_merge_left_flag)     \
+        sao->elem = value;                              \
+    else if (sao_merge_left_flag)                       \
+        sao->elem = CTB(s->sao, rx-1, ry).elem;         \
+    else if (sao_merge_up_flag)                         \
+        sao->elem = CTB(s->sao, rx, ry-1).elem;         \
+    else                                                \
+        sao->elem = 0;                                  \
+} while (0)
+
+static void hls_sao_param(HEVCContext *s, int rx, int ry)
+{
+    HEVCLocalContext *lc    = s->HEVClc;
+    int sao_merge_left_flag = 0;
+    int sao_merge_up_flag   = 0;
+    SAOParams *sao          = &CTB(s->sao, rx, ry);
+    int c_idx, i, c_count;
+
+    if (s->sh.slice_sample_adaptive_offset_flag[0] ||
+        s->sh.slice_sample_adaptive_offset_flag[1]) {
+        if (rx > 0) {
+            if (lc->ctb_left_flag)
+                sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
+        }
+        if (ry > 0 && !sao_merge_left_flag) {
+            if (lc->ctb_up_flag)
+                sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
+        }
+    }
+
+    c_count = (s->sps->chroma_format_idc != 0) ? 3 : 1;
+    for (c_idx = 0; c_idx < c_count; c_idx++) {
+        int log2_sao_offset_scale = c_idx == 0 ? s->pps->log2_sao_offset_scale_luma :
+                                                 s->pps->log2_sao_offset_scale_chroma;
+        if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
+            sao->type_idx[c_idx] = SAO_NOT_APPLIED;
+            continue;
+        }
+
+        if (c_idx == 2) {
+            sao->type_idx[2] = sao->type_idx[1];
+            sao->eo_class[2] = sao->eo_class[1];
+        } else {
+            SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
+        }
+
+        if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
+            continue;
+
+        for (i = 0; i < 4; i++)
+            SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
+
+        if (sao->type_idx[c_idx] == SAO_BAND) {
+            for (i = 0; i < 4; i++) {
+                if (sao->offset_abs[c_idx][i]) {
+                    SET_SAO(offset_sign[c_idx][i],
+                            ff_hevc_sao_offset_sign_decode(s));
+                } else {
+                    sao->offset_sign[c_idx][i] = 0;
+                }
+            }
+            SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
+        } else if (c_idx != 2) {
+            SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
+        }
+
+        // Inferred parameters
+        sao->offset_val[c_idx][0] = 0;
+        for (i = 0; i < 4; i++) {
+            sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
+            if (sao->type_idx[c_idx] == SAO_EDGE) {
+                if (i > 1)
+                    sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
+            } else if (sao->offset_sign[c_idx][i]) {
+                sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
+            }
+            sao->offset_val[c_idx][i + 1] <<= log2_sao_offset_scale;
+        }
+    }
+}
+
+#undef SET_SAO
+#undef CTB
+
+static int hls_cross_component_pred(HEVCContext *s, int idx) {
+    HEVCLocalContext *lc    = s->HEVClc;
+    int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
+
+    if (log2_res_scale_abs_plus1 !=  0) {
+        int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
+        lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
+                               (1 - 2 * res_scale_sign_flag);
+    } else {
+        lc->tu.res_scale_val = 0;
+    }
+
+
+    return 0;
+}
+
+#ifdef USE_FUNC_PTR
+#define INTRA_PRED(log2_trafo_size, s, x, y, c_idx) s->hpc.intra_pred[log2_trafo_size - 2](s, x, y, c_idx)
+#else
+#define INTRA_PRED(log2_trafo_size, s, x, y, c_idx) intra_pred(s, x, y, log2_trafo_size, c_idx)
+#endif
+
+static int hls_transform_unit(HEVCContext *s, int x0, int y0,
+                              int xBase, int yBase, int cb_xBase, int cb_yBase,
+                              int log2_cb_size, int log2_trafo_size,
+                              int trafo_depth, int blk_idx,
+                              int cbf_luma, int *cbf_cb, int *cbf_cr)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    const int log2_trafo_size_c = log2_trafo_size - s->sps->hshift[1];
+    int i;
+
+    if (lc->cu.pred_mode == MODE_INTRA) {
+        int trafo_size = 1 << log2_trafo_size;
+        ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
+
+        INTRA_PRED(log2_trafo_size, s, x0, y0, 0);
+    }
+
+    if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
+        (s->sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
+        int scan_idx   = SCAN_DIAG;
+        int scan_idx_c = SCAN_DIAG;
+        int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
+                         (s->sps->chroma_format_idc == 2 &&
+                         (cbf_cb[1] || cbf_cr[1]));
+
+        if (s->pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
+            lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s);
+            if (lc->tu.cu_qp_delta != 0)
+                if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
+                    lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
+            lc->tu.is_cu_qp_delta_coded = 1;
+
+            if (lc->tu.cu_qp_delta < -(26 + s->sps->qp_bd_offset / 2) ||
+                lc->tu.cu_qp_delta >  (25 + s->sps->qp_bd_offset / 2)) {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "The cu_qp_delta %d is outside the valid range "
+                       "[%d, %d].\n",
+                       lc->tu.cu_qp_delta,
+                       -(26 + s->sps->qp_bd_offset / 2),
+                        (25 + s->sps->qp_bd_offset / 2));
+                return AVERROR_INVALIDDATA;
+            }
+
+            ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
+        }
+
+        if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
+            !lc->cu.cu_transquant_bypass_flag  &&  !lc->tu.is_cu_chroma_qp_offset_coded) {
+            int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
+            if (cu_chroma_qp_offset_flag) {
+                int cu_chroma_qp_offset_idx  = 0;
+                if (s->pps->chroma_qp_offset_list_len_minus1 > 0) {
+                    cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
+                    av_log(s->avctx, AV_LOG_ERROR,
+                        "cu_chroma_qp_offset_idx not yet tested.\n");
+                }
+                lc->tu.cu_qp_offset_cb = s->pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
+                lc->tu.cu_qp_offset_cr = s->pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
+            } else {
+                lc->tu.cu_qp_offset_cb = 0;
+                lc->tu.cu_qp_offset_cr = 0;
+            }
+            lc->tu.is_cu_chroma_qp_offset_coded = 1;
+        }
+
+        if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
+            if (lc->tu.intra_pred_mode >= 6 &&
+                lc->tu.intra_pred_mode <= 14) {
+                scan_idx = SCAN_VERT;
+            } else if (lc->tu.intra_pred_mode >= 22 &&
+                       lc->tu.intra_pred_mode <= 30) {
+                scan_idx = SCAN_HORIZ;
+            }
+
+            if (lc->tu.intra_pred_mode_c >=  6 &&
+                lc->tu.intra_pred_mode_c <= 14) {
+                scan_idx_c = SCAN_VERT;
+            } else if (lc->tu.intra_pred_mode_c >= 22 &&
+                       lc->tu.intra_pred_mode_c <= 30) {
+                scan_idx_c = SCAN_HORIZ;
+            }
+        }
+
+        lc->tu.cross_pf = 0;
+
+        if (cbf_luma)
+            ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
+        if (s->sps->chroma_format_idc != 0) {
+        if (log2_trafo_size > 2 || s->sps->chroma_format_idc == 3) {
+            int trafo_size_h = 1 << (log2_trafo_size_c + s->sps->hshift[1]);
+            int trafo_size_v = 1 << (log2_trafo_size_c + s->sps->vshift[1]);
+            lc->tu.cross_pf  = (s->pps->cross_component_prediction_enabled_flag && cbf_luma &&
+                                (lc->cu.pred_mode == MODE_INTER ||
+                                 (lc->tu.chroma_mode_c ==  4)));
+
+            if (lc->tu.cross_pf) {
+                hls_cross_component_pred(s, 0);
+            }
+            for (i = 0; i < (s->sps->chroma_format_idc == 2 ? 2 : 1); i++) {
+                if (lc->cu.pred_mode == MODE_INTRA) {
+                    ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
+                    INTRA_PRED(log2_trafo_size_c, s, x0, y0 + (i << log2_trafo_size_c), 1);
+                }
+                if (cbf_cb[i])
+                    ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
+                                                log2_trafo_size_c, scan_idx_c, 1);
+                else
+                    if (lc->tu.cross_pf) {
+                        ptrdiff_t stride = s->frame->linesize[1];
+                        int hshift = s->sps->hshift[1];
+                        int vshift = s->sps->vshift[1];
+                        int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
+                        int16_t *coeffs   = (int16_t*)lc->edge_emu_buffer2;
+                        int size = 1 << log2_trafo_size_c;
+
+                        uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
+                                                              ((x0 >> hshift) << s->sps->pixel_shift)];
+                        for (i = 0; i < (size * size); i++) {
+                            coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
+                        }
+                        s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride BIT_DEPTH_ARG2(s->sps->bit_depth));
+                    }
+            }
+
+            if (lc->tu.cross_pf) {
+                hls_cross_component_pred(s, 1);
+            }
+            for (i = 0; i < (s->sps->chroma_format_idc == 2 ? 2 : 1); i++) {
+                if (lc->cu.pred_mode == MODE_INTRA) {
+                    ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
+                    INTRA_PRED(log2_trafo_size_c, s, x0, y0 + (i << log2_trafo_size_c), 2);
+                }
+                if (cbf_cr[i])
+                    ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
+                                                log2_trafo_size_c, scan_idx_c, 2);
+                else
+                    if (lc->tu.cross_pf) {
+                        ptrdiff_t stride = s->frame->linesize[2];
+                        int hshift = s->sps->hshift[2];
+                        int vshift = s->sps->vshift[2];
+                        int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
+                        int16_t *coeffs   = (int16_t*)lc->edge_emu_buffer2;
+                        int size = 1 << log2_trafo_size_c;
+
+                        uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
+                                                          ((x0 >> hshift) << s->sps->pixel_shift)];
+                        for (i = 0; i < (size * size); i++) {
+                            coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
+                        }
+                        s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride BIT_DEPTH_ARG2(s->sps->bit_depth));
+                    }
+            }
+        } else if (blk_idx == 3) {
+            int trafo_size_h = 1 << (log2_trafo_size + 1);
+            int trafo_size_v = 1 << (log2_trafo_size + s->sps->vshift[1]);
+            for (i = 0; i < (s->sps->chroma_format_idc == 2 ? 2 : 1); i++) {
+                if (lc->cu.pred_mode == MODE_INTRA) {
+                    ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
+                                                    trafo_size_h, trafo_size_v);
+                    INTRA_PRED(log2_trafo_size, s, xBase, yBase + (i << log2_trafo_size), 1);
+                }
+                if (cbf_cb[i])
+                    ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
+                                                log2_trafo_size, scan_idx_c, 1);
+            }
+            for (i = 0; i < (s->sps->chroma_format_idc == 2 ? 2 : 1); i++) {
+                if (lc->cu.pred_mode == MODE_INTRA) {
+                    ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
+                                                trafo_size_h, trafo_size_v);
+                    INTRA_PRED(log2_trafo_size, s, xBase, yBase + (i << log2_trafo_size), 2);
+                }
+                if (cbf_cr[i])
+                    ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
+                                                log2_trafo_size, scan_idx_c, 2);
+            }
+        }
+        } /* chroma_firmat_idc != 0 */
+    } else if (lc->cu.pred_mode == MODE_INTRA && 
+               s->sps->chroma_format_idc != 0) {
+        if (log2_trafo_size > 2 || s->sps->chroma_format_idc == 3) {
+            int trafo_size_h = 1 << (log2_trafo_size_c + s->sps->hshift[1]);
+            int trafo_size_v = 1 << (log2_trafo_size_c + s->sps->vshift[1]);
+            ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
+            INTRA_PRED(log2_trafo_size_c, s, x0, y0, 1);
+            INTRA_PRED(log2_trafo_size_c, s, x0, y0, 2);
+            if (s->sps->chroma_format_idc == 2) {
+                ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
+                                                trafo_size_h, trafo_size_v);
+                INTRA_PRED(log2_trafo_size_c, s, x0, y0 + (1 << log2_trafo_size_c), 1);
+                INTRA_PRED(log2_trafo_size_c, s, x0, y0 + (1 << log2_trafo_size_c), 2);
+            }
+        } else if (blk_idx == 3) {
+            int trafo_size_h = 1 << (log2_trafo_size + 1);
+            int trafo_size_v = 1 << (log2_trafo_size + s->sps->vshift[1]);
+            ff_hevc_set_neighbour_available(s, xBase, yBase,
+                                            trafo_size_h, trafo_size_v);
+            INTRA_PRED(log2_trafo_size, s, xBase, yBase, 1);
+            INTRA_PRED(log2_trafo_size, s, xBase, yBase, 2);
+            if (s->sps->chroma_format_idc == 2) {
+                ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
+                                                trafo_size_h, trafo_size_v);
+                INTRA_PRED(log2_trafo_size, s, xBase, yBase + (1 << (log2_trafo_size)), 1);
+                INTRA_PRED(log2_trafo_size, s, xBase, yBase + (1 << (log2_trafo_size)), 2);
+            }
+        }
+    }
+
+    return 0;
+}
+
+static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
+{
+    int cb_size          = 1 << log2_cb_size;
+    int log2_min_pu_size = s->sps->log2_min_pu_size;
+
+    int min_pu_width     = s->sps->min_pu_width;
+    int x_end = FFMIN(x0 + cb_size, s->sps->width);
+    int y_end = FFMIN(y0 + cb_size, s->sps->height);
+    int i, j;
+
+    for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
+        for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
+            s->is_pcm[i + j * min_pu_width] = 2;
+}
+
+static int hls_transform_tree(HEVCContext *s, int x0, int y0,
+                              int xBase, int yBase, int cb_xBase, int cb_yBase,
+                              int log2_cb_size, int log2_trafo_size,
+                              int trafo_depth, int blk_idx,
+                              const int *base_cbf_cb, const int *base_cbf_cr)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    uint8_t split_transform_flag;
+    int cbf_cb[2];
+    int cbf_cr[2];
+    int ret;
+
+    cbf_cb[0] = base_cbf_cb[0];
+    cbf_cb[1] = base_cbf_cb[1];
+    cbf_cr[0] = base_cbf_cr[0];
+    cbf_cr[1] = base_cbf_cr[1];
+
+    if (lc->cu.intra_split_flag) {
+        if (trafo_depth == 1) {
+            lc->tu.intra_pred_mode   = lc->pu.intra_pred_mode[blk_idx];
+            if (s->sps->chroma_format_idc == 3) {
+                lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
+                lc->tu.chroma_mode_c     = lc->pu.chroma_mode_c[blk_idx];
+            } else {
+                lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
+                lc->tu.chroma_mode_c     = lc->pu.chroma_mode_c[0];
+            }
+        }
+    } else {
+        lc->tu.intra_pred_mode   = lc->pu.intra_pred_mode[0];
+        lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
+        lc->tu.chroma_mode_c     = lc->pu.chroma_mode_c[0];
+    }
+
+    if (log2_trafo_size <= s->sps->log2_max_trafo_size &&
+        log2_trafo_size >  s->sps->log2_min_tb_size    &&
+        trafo_depth     < lc->cu.max_trafo_depth       &&
+        !(lc->cu.intra_split_flag && trafo_depth == 0)) {
+        split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
+    } else {
+        int inter_split = s->sps->max_transform_hierarchy_depth_inter == 0 &&
+                          lc->cu.pred_mode == MODE_INTER &&
+                          lc->cu.part_mode != PART_2Nx2N &&
+                          trafo_depth == 0;
+
+        split_transform_flag = log2_trafo_size > s->sps->log2_max_trafo_size ||
+                               (lc->cu.intra_split_flag && trafo_depth == 0) ||
+                               inter_split;
+    }
+
+    if ((log2_trafo_size > 2 || s->sps->chroma_format_idc == 3) &&
+        s->sps->chroma_format_idc != 0) {
+        if (trafo_depth == 0 || cbf_cb[0]) {
+            cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
+            if (s->sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
+                cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
+            }
+        }
+
+        if (trafo_depth == 0 || cbf_cr[0]) {
+            cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
+            if (s->sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
+                cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
+            }
+        }
+    }
+
+    if (split_transform_flag) {
+        const int trafo_size_split = 1 << (log2_trafo_size - 1);
+        const int x1 = x0 + trafo_size_split;
+        const int y1 = y0 + trafo_size_split;
+
+#define SUBDIVIDE(x, y, idx)                                                    \
+do {                                                                            \
+    ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
+                             log2_trafo_size - 1, trafo_depth + 1, idx,         \
+                             cbf_cb, cbf_cr);                                   \
+    if (ret < 0)                                                                \
+        return ret;                                                             \
+} while (0)
+
+        SUBDIVIDE(x0, y0, 0);
+        SUBDIVIDE(x1, y0, 1);
+        SUBDIVIDE(x0, y1, 2);
+        SUBDIVIDE(x1, y1, 3);
+
+#undef SUBDIVIDE
+    } else {
+        int min_tu_size      = 1 << s->sps->log2_min_tb_size;
+        int log2_min_tu_size = s->sps->log2_min_tb_size;
+        int min_tu_width     = s->sps->min_tb_width;
+        int cbf_luma         = 1;
+
+        if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
+            cbf_cb[0] || cbf_cr[0] ||
+            (s->sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
+            cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
+        }
+
+        ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
+                                 log2_cb_size, log2_trafo_size, trafo_depth,
+                                 blk_idx, cbf_luma, cbf_cb, cbf_cr);
+        if (ret < 0)
+            return ret;
+        // TODO: store cbf_luma somewhere else
+        if (cbf_luma) {
+            int i, j;
+            for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
+                for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
+                    int x_tu = (x0 + j) >> log2_min_tu_size;
+                    int y_tu = (y0 + i) >> log2_min_tu_size;
+                    s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
+                }
+        }
+        if (!s->sh.disable_deblocking_filter_flag) {
+            ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
+            if (s->pps->transquant_bypass_enable_flag &&
+                lc->cu.cu_transquant_bypass_flag)
+                set_deblocking_bypass(s, x0, y0, log2_trafo_size);
+        }
+    }
+    return 0;
+}
+
+static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    GetBitContext gb;
+    int cb_size   = 1 << log2_cb_size;
+    int stride0   = s->frame->linesize[0];
+    uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->sps->pixel_shift)];
+    int   stride1 = s->frame->linesize[1];
+    uint8_t *dst1 = &s->frame->data[1][(y0 >> s->sps->vshift[1]) * stride1 + ((x0 >> s->sps->hshift[1]) << s->sps->pixel_shift)];
+    int   stride2 = s->frame->linesize[2];
+    uint8_t *dst2 = &s->frame->data[2][(y0 >> s->sps->vshift[2]) * stride2 + ((x0 >> s->sps->hshift[2]) << s->sps->pixel_shift)];
+
+    int length         = cb_size * cb_size * s->sps->pcm.bit_depth +
+                         (((cb_size >> s->sps->hshift[1]) * (cb_size >> s->sps->vshift[1])) +
+                          ((cb_size >> s->sps->hshift[2]) * (cb_size >> s->sps->vshift[2]))) *
+                          s->sps->pcm.bit_depth_chroma;
+    const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
+    int ret;
+
+    if (!s->sh.disable_deblocking_filter_flag)
+        ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
+
+    ret = init_get_bits(&gb, pcm, length);
+    if (ret < 0)
+        return ret;
+
+    s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size,     &gb, s->sps->pcm.bit_depth BIT_DEPTH_ARG2(s->sps->bit_depth));
+    s->hevcdsp.put_pcm(dst1, stride1,
+                       cb_size >> s->sps->hshift[1],
+                       cb_size >> s->sps->vshift[1],
+                       &gb, s->sps->pcm.bit_depth_chroma BIT_DEPTH_ARG2(s->sps->bit_depth));
+    s->hevcdsp.put_pcm(dst2, stride2,
+                       cb_size >> s->sps->hshift[2],
+                       cb_size >> s->sps->vshift[2],
+                       &gb, s->sps->pcm.bit_depth_chroma BIT_DEPTH_ARG2(s->sps->bit_depth));
+    return 0;
+}
+
+#ifdef USE_PRED
+/**
+ * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
+ *
+ * @param s HEVC decoding context
+ * @param dst target buffer for block data at block position
+ * @param dststride stride of the dst buffer
+ * @param ref reference picture buffer at origin (0, 0)
+ * @param mv motion vector (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param luma_weight weighting factor applied to the luma prediction
+ * @param luma_offset additive offset applied to the luma prediction value
+ */
+
+static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
+                        AVFrame *ref, const Mv *mv, int x_off, int y_off,
+                        int block_w, int block_h, int luma_weight, int luma_offset)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    uint8_t *src         = ref->data[0];
+    ptrdiff_t srcstride  = ref->linesize[0];
+    int pic_width        = s->sps->width;
+    int pic_height       = s->sps->height;
+    int mx               = mv->x & 3;
+    int my               = mv->y & 3;
+    int weight_flag      = (s->sh.slice_type == P_SLICE && s->pps->weighted_pred_flag) ||
+                           (s->sh.slice_type == B_SLICE && s->pps->weighted_bipred_flag);
+    int idx              = ff_hevc_pel_weight[block_w];
+
+    x_off += mv->x >> 2;
+    y_off += mv->y >> 2;
+    src   += y_off * srcstride + (x_off << s->sps->pixel_shift);
+
+    if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
+        x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
+        y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset     = QPEL_EXTRA_BEFORE * srcstride       + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+        int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
+                                 edge_emu_stride, srcstride,
+                                 block_w + QPEL_EXTRA,
+                                 block_h + QPEL_EXTRA,
+                                 x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+        src = lc->edge_emu_buffer + buf_offset;
+        srcstride = edge_emu_stride;
+    }
+
+    if (!weight_flag)
+        s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
+                                                      block_h, mx, my, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    else
+        s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
+                                                        block_h, s->sh.luma_log2_weight_denom,
+                                                        luma_weight, luma_offset, mx, my, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+}
+
+#ifdef USE_BIPRED
+/**
+ * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
+ *
+ * @param s HEVC decoding context
+ * @param dst target buffer for block data at block position
+ * @param dststride stride of the dst buffer
+ * @param ref0 reference picture0 buffer at origin (0, 0)
+ * @param mv0 motion vector0 (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param ref1 reference picture1 buffer at origin (0, 0)
+ * @param mv1 motion vector1 (relative to block position) to get pixel data from
+ * @param current_mv current motion vector structure
+ */
+ static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
+                       AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
+                       int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    ptrdiff_t src0stride  = ref0->linesize[0];
+    ptrdiff_t src1stride  = ref1->linesize[0];
+    int pic_width        = s->sps->width;
+    int pic_height       = s->sps->height;
+    int mx0              = mv0->x & 3;
+    int my0              = mv0->y & 3;
+    int mx1              = mv1->x & 3;
+    int my1              = mv1->y & 3;
+    int weight_flag      = (s->sh.slice_type == P_SLICE && s->pps->weighted_pred_flag) ||
+                           (s->sh.slice_type == B_SLICE && s->pps->weighted_bipred_flag);
+    int x_off0           = x_off + (mv0->x >> 2);
+    int y_off0           = y_off + (mv0->y >> 2);
+    int x_off1           = x_off + (mv1->x >> 2);
+    int y_off1           = y_off + (mv1->y >> 2);
+    int idx              = ff_hevc_pel_weight[block_w];
+
+    uint8_t *src0  = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->sps->pixel_shift);
+    uint8_t *src1  = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->sps->pixel_shift);
+
+    if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
+        x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
+        y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset     = QPEL_EXTRA_BEFORE * src0stride       + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+        int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
+                                 edge_emu_stride, src0stride,
+                                 block_w + QPEL_EXTRA,
+                                 block_h + QPEL_EXTRA,
+                                 x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+        src0 = lc->edge_emu_buffer + buf_offset;
+        src0stride = edge_emu_stride;
+    }
+
+    if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
+        x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
+        y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset     = QPEL_EXTRA_BEFORE * src1stride       + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+        int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->sps->pixel_shift);
+
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
+                                 edge_emu_stride, src1stride,
+                                 block_w + QPEL_EXTRA,
+                                 block_h + QPEL_EXTRA,
+                                 x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+        src1 = lc->edge_emu_buffer2 + buf_offset;
+        src1stride = edge_emu_stride;
+    }
+
+    s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
+                                                block_h, mx0, my0, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    if (!weight_flag)
+        s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
+                                                       block_h, mx1, my1, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    else
+        s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
+                                                         block_h, s->sh.luma_log2_weight_denom,
+                                                         s->sh.luma_weight_l0[current_mv->ref_idx[0]],
+                                                         s->sh.luma_weight_l1[current_mv->ref_idx[1]],
+                                                         s->sh.luma_offset_l0[current_mv->ref_idx[0]],
+                                                         s->sh.luma_offset_l1[current_mv->ref_idx[1]],
+                                                         mx1, my1, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+
+}
+#endif
+
+/**
+ * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
+ *
+ * @param s HEVC decoding context
+ * @param dst1 target buffer for block data at block position (U plane)
+ * @param dst2 target buffer for block data at block position (V plane)
+ * @param dststride stride of the dst1 and dst2 buffers
+ * @param ref reference picture buffer at origin (0, 0)
+ * @param mv motion vector (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param chroma_weight weighting factor applied to the chroma prediction
+ * @param chroma_offset additive offset applied to the chroma prediction value
+ */
+
+static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
+                          ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
+                          int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int pic_width        = s->sps->width >> s->sps->hshift[1];
+    int pic_height       = s->sps->height >> s->sps->vshift[1];
+    const Mv *mv         = &current_mv->mv[reflist];
+    int weight_flag      = (s->sh.slice_type == P_SLICE && s->pps->weighted_pred_flag) ||
+                           (s->sh.slice_type == B_SLICE && s->pps->weighted_bipred_flag);
+    int idx              = ff_hevc_pel_weight[block_w];
+    int hshift           = s->sps->hshift[1];
+    int vshift           = s->sps->vshift[1];
+    intptr_t mx          = mv->x & ((1 << (2 + hshift)) - 1);
+    intptr_t my          = mv->y & ((1 << (2 + vshift)) - 1);
+    intptr_t _mx         = mx << (1 - hshift);
+    intptr_t _my         = my << (1 - vshift);
+
+    x_off += mv->x >> (2 + hshift);
+    y_off += mv->y >> (2 + vshift);
+    src0  += y_off * srcstride + (x_off << s->sps->pixel_shift);
+
+    if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
+        x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
+        y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->sps->pixel_shift));
+        int buf_offset0 = EPEL_EXTRA_BEFORE *
+                          (edge_emu_stride + (1 << s->sps->pixel_shift));
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
+                                 edge_emu_stride, srcstride,
+                                 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
+                                 x_off - EPEL_EXTRA_BEFORE,
+                                 y_off - EPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+
+        src0 = lc->edge_emu_buffer + buf_offset0;
+        srcstride = edge_emu_stride;
+    }
+    if (!weight_flag)
+        s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
+                                                  block_h, _mx, _my, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    else
+        s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
+                                                        block_h, s->sh.chroma_log2_weight_denom,
+                                                        chroma_weight, chroma_offset, _mx, _my, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+}
+
+#ifdef USE_BIPRED
+/**
+ * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
+ *
+ * @param s HEVC decoding context
+ * @param dst target buffer for block data at block position
+ * @param dststride stride of the dst buffer
+ * @param ref0 reference picture0 buffer at origin (0, 0)
+ * @param mv0 motion vector0 (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param ref1 reference picture1 buffer at origin (0, 0)
+ * @param mv1 motion vector1 (relative to block position) to get pixel data from
+ * @param current_mv current motion vector structure
+ * @param cidx chroma component(cb, cr)
+ */
+static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
+                         int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    uint8_t *src1        = ref0->data[cidx+1];
+    uint8_t *src2        = ref1->data[cidx+1];
+    ptrdiff_t src1stride = ref0->linesize[cidx+1];
+    ptrdiff_t src2stride = ref1->linesize[cidx+1];
+    int weight_flag      = (s->sh.slice_type == P_SLICE && s->pps->weighted_pred_flag) ||
+                           (s->sh.slice_type == B_SLICE && s->pps->weighted_bipred_flag);
+    int pic_width        = s->sps->width >> s->sps->hshift[1];
+    int pic_height       = s->sps->height >> s->sps->vshift[1];
+    Mv *mv0              = &current_mv->mv[0];
+    Mv *mv1              = &current_mv->mv[1];
+    int hshift = s->sps->hshift[1];
+    int vshift = s->sps->vshift[1];
+
+    intptr_t mx0 = mv0->x & ((1 << (2 + hshift)) - 1);
+    intptr_t my0 = mv0->y & ((1 << (2 + vshift)) - 1);
+    intptr_t mx1 = mv1->x & ((1 << (2 + hshift)) - 1);
+    intptr_t my1 = mv1->y & ((1 << (2 + vshift)) - 1);
+    intptr_t _mx0 = mx0 << (1 - hshift);
+    intptr_t _my0 = my0 << (1 - vshift);
+    intptr_t _mx1 = mx1 << (1 - hshift);
+    intptr_t _my1 = my1 << (1 - vshift);
+
+    int x_off0 = x_off + (mv0->x >> (2 + hshift));
+    int y_off0 = y_off + (mv0->y >> (2 + vshift));
+    int x_off1 = x_off + (mv1->x >> (2 + hshift));
+    int y_off1 = y_off + (mv1->y >> (2 + vshift));
+    int idx = ff_hevc_pel_weight[block_w];
+    src1  += y_off0 * src1stride + (int)((unsigned)x_off0 << s->sps->pixel_shift);
+    src2  += y_off1 * src2stride + (int)((unsigned)x_off1 << s->sps->pixel_shift);
+
+    if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
+        x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
+        y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->sps->pixel_shift));
+        int buf_offset1 = EPEL_EXTRA_BEFORE *
+                          (edge_emu_stride + (1 << s->sps->pixel_shift));
+
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
+                                 edge_emu_stride, src1stride,
+                                 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
+                                 x_off0 - EPEL_EXTRA_BEFORE,
+                                 y_off0 - EPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+
+        src1 = lc->edge_emu_buffer + buf_offset1;
+        src1stride = edge_emu_stride;
+    }
+
+    if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
+        x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
+        y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
+        const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->sps->pixel_shift;
+        int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->sps->pixel_shift));
+        int buf_offset1 = EPEL_EXTRA_BEFORE *
+                          (edge_emu_stride + (1 << s->sps->pixel_shift));
+
+        s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
+                                 edge_emu_stride, src2stride,
+                                 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
+                                 x_off1 - EPEL_EXTRA_BEFORE,
+                                 y_off1 - EPEL_EXTRA_BEFORE,
+                                 pic_width, pic_height);
+
+        src2 = lc->edge_emu_buffer2 + buf_offset1;
+        src2stride = edge_emu_stride;
+    }
+
+    s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
+                                                block_h, _mx0, _my0, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    if (!weight_flag)
+        s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
+                                                       src2, src2stride, lc->tmp,
+                                                       block_h, _mx1, _my1, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+    else
+        s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
+                                                         src2, src2stride, lc->tmp,
+                                                         block_h,
+                                                         s->sh.chroma_log2_weight_denom,
+                                                         s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
+                                                         s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
+                                                         s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
+                                                         s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
+                                                         _mx1, _my1, block_w BIT_DEPTH_ARG2(s->sps->bit_depth));
+}
+#endif /* USE_BIPRED */
+#endif /* USE_PRED */
+
+#ifdef USE_FULL
+static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
+                                const Mv *mv, int y0, int height)
+{
+    int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
+
+    if (s->threads_type == FF_THREAD_FRAME )
+        ff_thread_await_progress(&ref->tf, y, 0);
+}
+#endif
+
+#ifdef USE_PRED
+static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
+                                int nPbW, int nPbH,
+                                int log2_cb_size, int partIdx, int idx)
+{
+#define POS(c_idx, x, y)                                                              \
+    &s->frame->data[c_idx][((y) >> s->sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
+                           (((x) >> s->sps->hshift[c_idx]) << s->sps->pixel_shift)]
+    HEVCLocalContext *lc = s->HEVClc;
+    int merge_idx = 0;
+    struct MvField current_mv = {{{ 0 }}};
+
+    int min_pu_width = s->sps->min_pu_width;
+
+    MvField *tab_mvf = s->ref->tab_mvf;
+    RefPicList  *refPicList = s->ref->refPicList;
+    HEVCFrame *ref0, *ref1;
+    uint8_t *dst0 = POS(0, x0, y0);
+    uint8_t *dst1 = POS(1, x0, y0);
+    uint8_t *dst2 = POS(2, x0, y0);
+    int log2_min_cb_size = s->sps->log2_min_cb_size;
+    int min_cb_width     = s->sps->min_cb_width;
+    int x_cb             = x0 >> log2_min_cb_size;
+    int y_cb             = y0 >> log2_min_cb_size;
+    int ref_idx[2];
+    int mvp_flag[2];
+    int x_pu, y_pu;
+    int i, j;
+
+    if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
+        if (s->sh.max_num_merge_cand > 1)
+            merge_idx = ff_hevc_merge_idx_decode(s);
+        else
+            merge_idx = 0;
+
+        ff_hevc_luma_mv_merge_mode(s, x0, y0,
+                                   1 << log2_cb_size,
+                                   1 << log2_cb_size,
+                                   log2_cb_size, partIdx,
+                                   merge_idx, &current_mv);
+        x_pu = x0 >> s->sps->log2_min_pu_size;
+        y_pu = y0 >> s->sps->log2_min_pu_size;
+
+        for (j = 0; j < nPbH >> s->sps->log2_min_pu_size; j++)
+            for (i = 0; i < nPbW >> s->sps->log2_min_pu_size; i++)
+                tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
+    } else { /* MODE_INTER */
+        lc->pu.merge_flag = ff_hevc_merge_flag_decode(s);
+        if (lc->pu.merge_flag) {
+            if (s->sh.max_num_merge_cand > 1)
+                merge_idx = ff_hevc_merge_idx_decode(s);
+            else
+                merge_idx = 0;
+
+            ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
+                                       partIdx, merge_idx, &current_mv);
+            x_pu = x0 >> s->sps->log2_min_pu_size;
+            y_pu = y0 >> s->sps->log2_min_pu_size;
+
+            for (j = 0; j < nPbH >> s->sps->log2_min_pu_size; j++)
+                for (i = 0; i < nPbW >> s->sps->log2_min_pu_size; i++)
+                    tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
+        } else {
+            enum InterPredIdc inter_pred_idc = PRED_L0;
+            ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
+            current_mv.pred_flag = 0;
+            if (s->sh.slice_type == B_SLICE)
+                inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
+
+            if (inter_pred_idc != PRED_L1) {
+                if (s->sh.nb_refs[L0]) {
+                    ref_idx[0] = ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
+                    current_mv.ref_idx[0] = ref_idx[0];
+                }
+                current_mv.pred_flag = PF_L0;
+                ff_hevc_hls_mvd_coding(s, x0, y0, 0);
+                mvp_flag[0] = ff_hevc_mvp_lx_flag_decode(s);
+                ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
+                                         partIdx, merge_idx, &current_mv,
+                                         mvp_flag[0], 0);
+                current_mv.mv[0].x += lc->pu.mvd.x;
+                current_mv.mv[0].y += lc->pu.mvd.y;
+            }
+
+            if (inter_pred_idc != PRED_L0) {
+                if (s->sh.nb_refs[L1]) {
+                    ref_idx[1] = ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
+                    current_mv.ref_idx[1] = ref_idx[1];
+                }
+
+                if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
+                    AV_ZERO32(&lc->pu.mvd);
+                } else {
+                    ff_hevc_hls_mvd_coding(s, x0, y0, 1);
+                }
+
+                current_mv.pred_flag += PF_L1;
+                mvp_flag[1] = ff_hevc_mvp_lx_flag_decode(s);
+                ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
+                                         partIdx, merge_idx, &current_mv,
+                                         mvp_flag[1], 1);
+                current_mv.mv[1].x += lc->pu.mvd.x;
+                current_mv.mv[1].y += lc->pu.mvd.y;
+            }
+
+            x_pu = x0 >> s->sps->log2_min_pu_size;
+            y_pu = y0 >> s->sps->log2_min_pu_size;
+
+            for(j = 0; j < nPbH >> s->sps->log2_min_pu_size; j++)
+                for (i = 0; i < nPbW >> s->sps->log2_min_pu_size; i++)
+                    tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
+        }
+    }
+
+    if (current_mv.pred_flag & PF_L0) {
+        ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
+        if (!ref0)
+            return;
+#ifdef USE_FULL
+        hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
+#endif
+    }
+    if (current_mv.pred_flag & PF_L1) {
+        ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
+        if (!ref1)
+            return;
+#ifdef USE_FULL
+        hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
+#endif
+    }
+
+    if (current_mv.pred_flag == PF_L0) {
+        luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
+                    &current_mv.mv[0], x0, y0, nPbW, nPbH,
+                    s->sh.luma_weight_l0[current_mv.ref_idx[0]],
+                    s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
+        
+        if (s->sps->chroma_format_idc != 0) {
+            int x0_c = x0 >> s->sps->hshift[1];
+            int y0_c = y0 >> s->sps->vshift[1];
+            int nPbW_c = nPbW >> s->sps->hshift[1];
+            int nPbH_c = nPbH >> s->sps->vshift[1];
+            
+            chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
+                          0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
+                          s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
+            chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
+                          0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
+                          s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
+        }
+    } else if (current_mv.pred_flag == PF_L1) {
+        luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
+                    &current_mv.mv[1], x0, y0, nPbW, nPbH,
+                    s->sh.luma_weight_l1[current_mv.ref_idx[1]],
+                    s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
+
+        if (s->sps->chroma_format_idc != 0) {
+            int x0_c = x0 >> s->sps->hshift[1];
+            int y0_c = y0 >> s->sps->vshift[1];
+            int nPbW_c = nPbW >> s->sps->hshift[1];
+            int nPbH_c = nPbH >> s->sps->vshift[1];
+
+            chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
+                          1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
+                          s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
+            
+            chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
+                          1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
+                          s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
+        }
+    } else if (current_mv.pred_flag == PF_BI) {
+#ifdef USE_BIPRED
+        luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
+                   &current_mv.mv[0], x0, y0, nPbW, nPbH,
+                   ref1->frame, &current_mv.mv[1], &current_mv);
+        if (s->sps->chroma_format_idc != 0) {
+            int x0_c = x0 >> s->sps->hshift[1];
+            int y0_c = y0 >> s->sps->vshift[1];
+            int nPbW_c = nPbW >> s->sps->hshift[1];
+            int nPbH_c = nPbH >> s->sps->vshift[1];
+            
+            chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
+                         x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
+            
+            chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
+                         x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
+        }
+#else
+        abort();
+#endif
+    }
+}
+#endif
+
+/**
+ * 8.4.1
+ */
+static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
+                                int prev_intra_luma_pred_flag)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int x_pu             = x0 >> s->sps->log2_min_pu_size;
+    int y_pu             = y0 >> s->sps->log2_min_pu_size;
+    int min_pu_width     = s->sps->min_pu_width;
+    int size_in_pus      = pu_size >> s->sps->log2_min_pu_size;
+    int x0b              = x0 & ((1 << s->sps->log2_ctb_size) - 1);
+    int y0b              = y0 & ((1 << s->sps->log2_ctb_size) - 1);
+
+    int cand_up   = (lc->ctb_up_flag || y0b) ?
+                    s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
+    int cand_left = (lc->ctb_left_flag || x0b) ?
+                    s->tab_ipm[y_pu * min_pu_width + x_pu - 1]   : INTRA_DC;
+
+    int y_ctb = (y0 >> (s->sps->log2_ctb_size)) << (s->sps->log2_ctb_size);
+
+#ifdef USE_PRED
+    MvField *tab_mvf = s->ref->tab_mvf;
+    int j;
+#endif
+    int intra_pred_mode;
+    int candidate[3];
+    int i;
+
+    // intra_pred_mode prediction does not cross vertical CTB boundaries
+    if ((y0 - 1) < y_ctb)
+        cand_up = INTRA_DC;
+
+    if (cand_left == cand_up) {
+        if (cand_left < 2) {
+            candidate[0] = INTRA_PLANAR;
+            candidate[1] = INTRA_DC;
+            candidate[2] = INTRA_ANGULAR_26;
+        } else {
+            candidate[0] = cand_left;
+            candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
+            candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
+        }
+    } else {
+        candidate[0] = cand_left;
+        candidate[1] = cand_up;
+        if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
+            candidate[2] = INTRA_PLANAR;
+        } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
+            candidate[2] = INTRA_DC;
+        } else {
+            candidate[2] = INTRA_ANGULAR_26;
+        }
+    }
+
+    if (prev_intra_luma_pred_flag) {
+        intra_pred_mode = candidate[lc->pu.mpm_idx];
+    } else {
+        if (candidate[0] > candidate[1])
+            FFSWAP(uint8_t, candidate[0], candidate[1]);
+        if (candidate[0] > candidate[2])
+            FFSWAP(uint8_t, candidate[0], candidate[2]);
+        if (candidate[1] > candidate[2])
+            FFSWAP(uint8_t, candidate[1], candidate[2]);
+
+        intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
+        for (i = 0; i < 3; i++)
+            if (intra_pred_mode >= candidate[i])
+                intra_pred_mode++;
+    }
+
+    /* write the intra prediction units into the mv array */
+    if (!size_in_pus)
+        size_in_pus = 1;
+    for (i = 0; i < size_in_pus; i++) {
+        memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
+               intra_pred_mode, size_in_pus);
+#ifdef USE_PRED
+        for (j = 0; j < size_in_pus; j++) {
+            tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
+        }
+#endif
+    }
+
+    return intra_pred_mode;
+}
+
+static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
+                                          int log2_cb_size, int ct_depth)
+{
+    int length = (1 << log2_cb_size) >> s->sps->log2_min_cb_size;
+    int x_cb   = x0 >> s->sps->log2_min_cb_size;
+    int y_cb   = y0 >> s->sps->log2_min_cb_size;
+    int y;
+
+    for (y = 0; y < length; y++)
+        memset(&s->tab_ct_depth[(y_cb + y) * s->sps->min_cb_width + x_cb],
+               ct_depth, length);
+}
+
+static const uint8_t tab_mode_idx[] = {
+     0,  1,  2,  2,  2,  2,  3,  5,  7,  8, 10, 12, 13, 15, 17, 18, 19, 20,
+    21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
+
+static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
+                                  int log2_cb_size)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
+    uint8_t prev_intra_luma_pred_flag[4];
+    int split   = lc->cu.part_mode == PART_NxN;
+    int pb_size = (1 << log2_cb_size) >> split;
+    int side    = split + 1;
+    int chroma_mode;
+    int i, j;
+
+    for (i = 0; i < side; i++)
+        for (j = 0; j < side; j++)
+            prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
+
+    for (i = 0; i < side; i++) {
+        for (j = 0; j < side; j++) {
+            if (prev_intra_luma_pred_flag[2 * i + j])
+                lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s);
+            else
+                lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s);
+
+            lc->pu.intra_pred_mode[2 * i + j] =
+                luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
+                                     prev_intra_luma_pred_flag[2 * i + j]);
+        }
+    }
+
+    if (s->sps->chroma_format_idc == 3) {
+        for (i = 0; i < side; i++) {
+            for (j = 0; j < side; j++) {
+                lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
+                if (chroma_mode != 4) {
+                    if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
+                        lc->pu.intra_pred_mode_c[2 * i + j] = 34;
+                    else
+                        lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
+                } else {
+                    lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
+                }
+            }
+        }
+    } else if (s->sps->chroma_format_idc == 2) {
+        int mode_idx;
+        lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
+        if (chroma_mode != 4) {
+            if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
+                mode_idx = 34;
+            else
+                mode_idx = intra_chroma_table[chroma_mode];
+        } else {
+            mode_idx = lc->pu.intra_pred_mode[0];
+        }
+        lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
+    } else if (s->sps->chroma_format_idc != 0) {
+        chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
+        if (chroma_mode != 4) {
+            if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
+                lc->pu.intra_pred_mode_c[0] = 34;
+            else
+                lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
+        } else {
+            lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
+        }
+    }
+}
+
+static void intra_prediction_unit_default_value(HEVCContext *s,
+                                                int x0, int y0,
+                                                int log2_cb_size)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int pb_size          = 1 << log2_cb_size;
+    int size_in_pus      = pb_size >> s->sps->log2_min_pu_size;
+    int min_pu_width     = s->sps->min_pu_width;
+#ifdef USE_PRED
+    MvField *tab_mvf     = s->ref->tab_mvf;
+#endif
+    int x_pu             = x0 >> s->sps->log2_min_pu_size;
+    int y_pu             = y0 >> s->sps->log2_min_pu_size;
+    int j, k;
+
+    if (size_in_pus == 0)
+        size_in_pus = 1;
+    for (j = 0; j < size_in_pus; j++)
+        memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
+#ifdef USE_PRED
+    if (lc->cu.pred_mode == MODE_INTRA)
+        for (j = 0; j < size_in_pus; j++)
+            for (k = 0; k < size_in_pus; k++)
+                tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
+#endif
+}
+
+static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
+{
+    int cb_size          = 1 << log2_cb_size;
+    HEVCLocalContext *lc = s->HEVClc;
+    int log2_min_cb_size = s->sps->log2_min_cb_size;
+    int length           = cb_size >> log2_min_cb_size;
+    int min_cb_width     = s->sps->min_cb_width;
+    int x_cb             = x0 >> log2_min_cb_size;
+    int y_cb             = y0 >> log2_min_cb_size;
+#ifdef USE_PRED
+    int idx              = log2_cb_size - 2;
+#endif
+    int qp_block_mask    = (1<<(s->sps->log2_ctb_size - s->pps->diff_cu_qp_delta_depth)) - 1;
+    int x, y, ret;
+
+    lc->cu.x                = x0;
+    lc->cu.y                = y0;
+    lc->cu.rqt_root_cbf     = 1;
+    lc->cu.pred_mode        = MODE_INTRA;
+    lc->cu.part_mode        = PART_2Nx2N;
+    lc->cu.intra_split_flag = 0;
+    lc->cu.pcm_flag         = 0;
+
+    SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
+    for (x = 0; x < 4; x++)
+        lc->pu.intra_pred_mode[x] = 1;
+    if (s->pps->transquant_bypass_enable_flag) {
+        lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s);
+        if (lc->cu.cu_transquant_bypass_flag)
+            set_deblocking_bypass(s, x0, y0, log2_cb_size);
+    } else
+        lc->cu.cu_transquant_bypass_flag = 0;
+
+#ifdef USE_PRED
+    if (s->sh.slice_type != I_SLICE) {
+        uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
+
+        x = y_cb * min_cb_width + x_cb;
+        for (y = 0; y < length; y++) {
+            memset(&s->skip_flag[x], skip_flag, length);
+            x += min_cb_width;
+        }
+        lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
+    } else 
+#endif
+    {
+        x = y_cb * min_cb_width + x_cb;
+        for (y = 0; y < length; y++) {
+            memset(&s->skip_flag[x], 0, length);
+            x += min_cb_width;
+        }
+    }
+
+#ifdef USE_PRED
+    if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
+        hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
+        intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
+
+        if (!s->sh.disable_deblocking_filter_flag)
+            ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
+    } else 
+#endif
+    {
+#ifdef USE_PRED
+        if (s->sh.slice_type != I_SLICE)
+            lc->cu.pred_mode = ff_hevc_pred_mode_decode(s);
+#endif
+        if (lc->cu.pred_mode != MODE_INTRA ||
+            log2_cb_size == s->sps->log2_min_cb_size) {
+            lc->cu.part_mode        = ff_hevc_part_mode_decode(s, log2_cb_size);
+            lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
+                                      lc->cu.pred_mode == MODE_INTRA;
+        }
+
+        if (lc->cu.pred_mode == MODE_INTRA) {
+            if (lc->cu.part_mode == PART_2Nx2N && s->sps->pcm_enabled_flag &&
+                log2_cb_size >= s->sps->pcm.log2_min_pcm_cb_size &&
+                log2_cb_size <= s->sps->pcm.log2_max_pcm_cb_size) {
+                lc->cu.pcm_flag = ff_hevc_pcm_flag_decode(s);
+            }
+            if (lc->cu.pcm_flag) {
+                intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
+                ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
+                if (s->sps->pcm.loop_filter_disable_flag)
+                    set_deblocking_bypass(s, x0, y0, log2_cb_size);
+
+                if (ret < 0)
+                    return ret;
+            } else {
+                intra_prediction_unit(s, x0, y0, log2_cb_size);
+            }
+        } else {
+#ifdef USE_PRED
+            intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
+            switch (lc->cu.part_mode) {
+            case PART_2Nx2N:
+                hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
+                break;
+            case PART_2NxN:
+                hls_prediction_unit(s, x0, y0,               cb_size, cb_size / 2, log2_cb_size, 0, idx);
+                hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
+                break;
+            case PART_Nx2N:
+                hls_prediction_unit(s, x0,               y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
+                hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
+                break;
+            case PART_2NxnU:
+                hls_prediction_unit(s, x0, y0,               cb_size, cb_size     / 4, log2_cb_size, 0, idx);
+                hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
+                break;
+            case PART_2NxnD:
+                hls_prediction_unit(s, x0, y0,                   cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
+                hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size     / 4, log2_cb_size, 1, idx);
+                break;
+            case PART_nLx2N:
+                hls_prediction_unit(s, x0,               y0, cb_size     / 4, cb_size, log2_cb_size, 0, idx - 2);
+                hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
+                break;
+            case PART_nRx2N:
+                hls_prediction_unit(s, x0,                   y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
+                hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size     / 4, cb_size, log2_cb_size, 1, idx - 2);
+                break;
+            case PART_NxN:
+                hls_prediction_unit(s, x0,               y0,               cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
+                hls_prediction_unit(s, x0 + cb_size / 2, y0,               cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
+                hls_prediction_unit(s, x0,               y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
+                hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
+                break;
+            }
+#else
+            abort();
+#endif
+        }
+
+        if (!lc->cu.pcm_flag) {
+#ifdef USE_PRED
+            if (lc->cu.pred_mode != MODE_INTRA &&
+                !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
+                lc->cu.rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
+            }
+#endif
+            if (lc->cu.rqt_root_cbf) {
+                const static int cbf[2] = { 0 };
+                lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
+                                         s->sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
+                                         s->sps->max_transform_hierarchy_depth_inter;
+                ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
+                                         log2_cb_size,
+                                         log2_cb_size, 0, 0, cbf, cbf);
+                if (ret < 0)
+                    return ret;
+            } else {
+                if (!s->sh.disable_deblocking_filter_flag)
+                    ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
+            }
+        }
+    }
+
+    if (s->pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
+        ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
+
+    x = y_cb * min_cb_width + x_cb;
+    for (y = 0; y < length; y++) {
+        memset(&s->qp_y_tab[x], lc->qp_y, length);
+        x += min_cb_width;
+    }
+
+    if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
+       ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
+        lc->qPy_pred = lc->qp_y;
+    }
+
+    set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
+
+    return 0;
+}
+
+static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
+                               int log2_cb_size, int cb_depth)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    const int cb_size    = 1 << log2_cb_size;
+    int ret;
+    int qp_block_mask = (1<<(s->sps->log2_ctb_size - s->pps->diff_cu_qp_delta_depth)) - 1;
+    int split_cu;
+
+    lc->ct_depth = cb_depth;
+    if (x0 + cb_size <= s->sps->width  &&
+        y0 + cb_size <= s->sps->height &&
+        log2_cb_size > s->sps->log2_min_cb_size) {
+        split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
+    } else {
+        split_cu = (log2_cb_size > s->sps->log2_min_cb_size);
+    }
+    if (s->pps->cu_qp_delta_enabled_flag &&
+        log2_cb_size >= s->sps->log2_ctb_size - s->pps->diff_cu_qp_delta_depth) {
+        lc->tu.is_cu_qp_delta_coded = 0;
+        lc->tu.cu_qp_delta          = 0;
+    }
+
+    if (s->sh.cu_chroma_qp_offset_enabled_flag &&
+        log2_cb_size >= s->sps->log2_ctb_size - s->pps->diff_cu_chroma_qp_offset_depth) {
+        lc->tu.is_cu_chroma_qp_offset_coded = 0;
+    }
+
+    if (split_cu) {
+        const int cb_size_split = cb_size >> 1;
+        const int x1 = x0 + cb_size_split;
+        const int y1 = y0 + cb_size_split;
+
+        int more_data = 0;
+
+        more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
+        if (more_data < 0)
+            return more_data;
+
+        if (more_data && x1 < s->sps->width) {
+            more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
+            if (more_data < 0)
+                return more_data;
+        }
+        if (more_data && y1 < s->sps->height) {
+            more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
+            if (more_data < 0)
+                return more_data;
+        }
+        if (more_data && x1 < s->sps->width &&
+            y1 < s->sps->height) {
+            more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
+            if (more_data < 0)
+                return more_data;
+        }
+
+        if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
+            ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
+            lc->qPy_pred = lc->qp_y;
+
+        if (more_data)
+            return ((x1 + cb_size_split) < s->sps->width ||
+                    (y1 + cb_size_split) < s->sps->height);
+        else
+            return 0;
+    } else {
+        ret = hls_coding_unit(s, x0, y0, log2_cb_size);
+        if (ret < 0)
+            return ret;
+        if ((!((x0 + cb_size) %
+               (1 << (s->sps->log2_ctb_size))) ||
+             (x0 + cb_size >= s->sps->width)) &&
+            (!((y0 + cb_size) %
+               (1 << (s->sps->log2_ctb_size))) ||
+             (y0 + cb_size >= s->sps->height))) {
+            int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
+            return !end_of_slice_flag;
+        } else {
+            return 1;
+        }
+    }
+
+    return 0;
+}
+
+static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
+                                 int ctb_addr_ts)
+{
+    HEVCLocalContext *lc  = s->HEVClc;
+    int ctb_size          = 1 << s->sps->log2_ctb_size;
+    int ctb_addr_rs       = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts];
+    int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
+
+    s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
+
+    if (s->pps->entropy_coding_sync_enabled_flag) {
+        if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
+            lc->first_qp_group = 1;
+        lc->end_of_tiles_x = s->sps->width;
+    } else if (s->pps->tiles_enabled_flag) {
+        if (ctb_addr_ts && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[ctb_addr_ts - 1]) {
+            int idxX = s->pps->col_idxX[x_ctb >> s->sps->log2_ctb_size];
+            lc->end_of_tiles_x   = x_ctb + (s->pps->column_width[idxX] << s->sps->log2_ctb_size);
+            lc->first_qp_group   = 1;
+        }
+    } else {
+        lc->end_of_tiles_x = s->sps->width;
+    }
+
+    lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->sps->height);
+
+    lc->boundary_flags = 0;
+    if (s->pps->tiles_enabled_flag) {
+        if (x_ctb > 0 && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
+            lc->boundary_flags |= BOUNDARY_LEFT_TILE;
+        if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
+            lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
+        if (y_ctb > 0 && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->sps->ctb_width]])
+            lc->boundary_flags |= BOUNDARY_UPPER_TILE;
+        if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->sps->ctb_width])
+            lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
+    } else {
+        if (!ctb_addr_in_slice > 0)
+            lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
+        if (ctb_addr_in_slice < s->sps->ctb_width)
+            lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
+    }
+
+    lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
+    lc->ctb_up_flag   = ((y_ctb > 0) && (ctb_addr_in_slice >= s->sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
+    lc->ctb_up_right_flag = ((y_ctb > 0)  && (ctb_addr_in_slice+1 >= s->sps->ctb_width) && (s->pps->tile_id[ctb_addr_ts] == s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->sps->ctb_width]]));
+    lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0)  && (ctb_addr_in_slice-1 >= s->sps->ctb_width) && (s->pps->tile_id[ctb_addr_ts] == s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->sps->ctb_width]]));
+}
+
+static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
+{
+    HEVCContext *s  = avctxt->priv_data;
+    int ctb_size    = 1 << s->sps->log2_ctb_size;
+    int more_data   = 1;
+    int x_ctb       = 0;
+    int y_ctb       = 0;
+    int ctb_addr_ts = s->pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
+
+    if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
+        av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
+        return AVERROR_INVALIDDATA;
+    }
+
+    if (s->sh.dependent_slice_segment_flag) {
+        int prev_rs = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
+        if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
+            av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
+            return AVERROR_INVALIDDATA;
+        }
+    }
+
+    while (more_data && ctb_addr_ts < s->sps->ctb_size) {
+        int ctb_addr_rs = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts];
+
+        x_ctb = (ctb_addr_rs % ((s->sps->width + ctb_size - 1) >> s->sps->log2_ctb_size)) << s->sps->log2_ctb_size;
+        y_ctb = (ctb_addr_rs / ((s->sps->width + ctb_size - 1) >> s->sps->log2_ctb_size)) << s->sps->log2_ctb_size;
+        hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
+
+        ff_hevc_cabac_init(s, ctb_addr_ts);
+
+        hls_sao_param(s, x_ctb >> s->sps->log2_ctb_size, y_ctb >> s->sps->log2_ctb_size);
+
+        s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
+        s->deblock[ctb_addr_rs].tc_offset   = s->sh.tc_offset;
+        s->filter_slice_edges[ctb_addr_rs]  = s->sh.slice_loop_filter_across_slices_enabled_flag;
+
+        more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->sps->log2_ctb_size, 0);
+        if (more_data < 0) {
+            s->tab_slice_address[ctb_addr_rs] = -1;
+            return more_data;
+        }
+
+
+        ctb_addr_ts++;
+        ff_hevc_save_states(s, ctb_addr_ts);
+        ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
+    }
+
+    if (x_ctb + ctb_size >= s->sps->width &&
+        y_ctb + ctb_size >= s->sps->height)
+        ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
+
+    return ctb_addr_ts;
+}
+
+static int hls_slice_data(HEVCContext *s)
+{
+    int arg[2];
+    int ret[2];
+
+    arg[0] = 0;
+    arg[1] = 1;
+
+    s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
+    return ret[0];
+}
+
+#ifdef USE_FULL
+static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
+{
+    HEVCContext *s1  = avctxt->priv_data, *s;
+    HEVCLocalContext *lc;
+    int ctb_size    = 1<< s1->sps->log2_ctb_size;
+    int more_data   = 1;
+    int *ctb_row_p    = input_ctb_row;
+    int ctb_row = ctb_row_p[job];
+    int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->sps->width + ctb_size - 1) >> s1->sps->log2_ctb_size);
+    int ctb_addr_ts = s1->pps->ctb_addr_rs_to_ts[ctb_addr_rs];
+    int thread = ctb_row % s1->threads_number;
+    int ret;
+
+    s = s1->sList[self_id];
+    lc = s->HEVClc;
+
+    if(ctb_row) {
+        ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
+
+        if (ret < 0)
+            return ret;
+        ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
+    }
+
+    while(more_data && ctb_addr_ts < s->sps->ctb_size) {
+        int x_ctb = (ctb_addr_rs % s->sps->ctb_width) << s->sps->log2_ctb_size;
+        int y_ctb = (ctb_addr_rs / s->sps->ctb_width) << s->sps->log2_ctb_size;
+
+        hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
+
+        ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
+
+        if (avpriv_atomic_int_get(&s1->wpp_err)){
+            ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
+            return 0;
+        }
+
+        ff_hevc_cabac_init(s, ctb_addr_ts);
+        hls_sao_param(s, x_ctb >> s->sps->log2_ctb_size, y_ctb >> s->sps->log2_ctb_size);
+        more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->sps->log2_ctb_size, 0);
+
+        if (more_data < 0) {
+            s->tab_slice_address[ctb_addr_rs] = -1;
+            return more_data;
+        }
+
+        ctb_addr_ts++;
+
+        ff_hevc_save_states(s, ctb_addr_ts);
+        ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
+        ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
+
+        if (!more_data && (x_ctb+ctb_size) < s->sps->width && ctb_row != s->sh.num_entry_point_offsets) {
+            avpriv_atomic_int_set(&s1->wpp_err,  1);
+            ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
+            return 0;
+        }
+
+        if ((x_ctb+ctb_size) >= s->sps->width && (y_ctb+ctb_size) >= s->sps->height ) {
+            ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
+            ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
+            return ctb_addr_ts;
+        }
+        ctb_addr_rs       = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts];
+        x_ctb+=ctb_size;
+
+        if(x_ctb >= s->sps->width) {
+            break;
+        }
+    }
+    ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
+
+    return 0;
+}
+
+static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
+    int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
+    int offset;
+    int startheader, cmpt = 0;
+    int i, j, res = 0;
+
+
+    if (!s->sList[1]) {
+        ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
+
+
+        for (i = 1; i < s->threads_number; i++) {
+            s->sList[i] = av_malloc(sizeof(HEVCContext));
+            memcpy(s->sList[i], s, sizeof(HEVCContext));
+            s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
+            s->sList[i]->HEVClc = s->HEVClcList[i];
+        }
+    }
+
+    offset = (lc->gb.index >> 3);
+
+    for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < s->skipped_bytes; j++) {
+        if (s->skipped_bytes_pos[j] >= offset && s->skipped_bytes_pos[j] < startheader) {
+            startheader--;
+            cmpt++;
+        }
+    }
+
+    for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
+        offset += (s->sh.entry_point_offset[i - 1] - cmpt);
+        for (j = 0, cmpt = 0, startheader = offset
+             + s->sh.entry_point_offset[i]; j < s->skipped_bytes; j++) {
+            if (s->skipped_bytes_pos[j] >= offset && s->skipped_bytes_pos[j] < startheader) {
+                startheader--;
+                cmpt++;
+            }
+        }
+        s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
+        s->sh.offset[i - 1] = offset;
+
+    }
+    if (s->sh.num_entry_point_offsets != 0) {
+        offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
+        s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
+        s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
+
+    }
+    s->data = nal;
+
+    for (i = 1; i < s->threads_number; i++) {
+        s->sList[i]->HEVClc->first_qp_group = 1;
+        s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
+        memcpy(s->sList[i], s, sizeof(HEVCContext));
+        s->sList[i]->HEVClc = s->HEVClcList[i];
+    }
+
+    avpriv_atomic_int_set(&s->wpp_err, 0);
+    ff_reset_entries(s->avctx);
+
+    for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
+        arg[i] = i;
+        ret[i] = 0;
+    }
+
+    if (s->pps->entropy_coding_sync_enabled_flag)
+        s->avctx->execute2(s->avctx, (void *) hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
+
+    for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
+        res += ret[i];
+    av_free(ret);
+    av_free(arg);
+    return res;
+}
+#endif
+
+/**
+ * @return AVERROR_INVALIDDATA if the packet is not a valid NAL unit,
+ * 0 if the unit should be skipped, 1 otherwise
+ */
+static int hls_nal_unit(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    int nuh_layer_id;
+
+    if (get_bits1(gb) != 0)
+        return AVERROR_INVALIDDATA;
+
+    s->nal_unit_type = get_bits(gb, 6);
+
+    nuh_layer_id   = get_bits(gb, 6);
+    s->temporal_id = get_bits(gb, 3) - 1;
+    if (s->temporal_id < 0)
+        return AVERROR_INVALIDDATA;
+
+    av_log(s->avctx, AV_LOG_DEBUG,
+           "nal_unit_type: %d, nuh_layer_id: %d, temporal_id: %d\n",
+           s->nal_unit_type, nuh_layer_id, s->temporal_id);
+
+    return nuh_layer_id == 0;
+}
+
+static int set_side_data(HEVCContext *s)
+{
+#ifdef USE_FULL
+    AVFrame *out = s->ref->frame;
+
+    if (s->sei_frame_packing_present &&
+        s->frame_packing_arrangement_type >= 3 &&
+        s->frame_packing_arrangement_type <= 5 &&
+        s->content_interpretation_type > 0 &&
+        s->content_interpretation_type < 3) {
+        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
+        if (!stereo)
+            return AVERROR(ENOMEM);
+
+        switch (s->frame_packing_arrangement_type) {
+        case 3:
+            if (s->quincunx_subsampling)
+                stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
+            else
+                stereo->type = AV_STEREO3D_SIDEBYSIDE;
+            break;
+        case 4:
+            stereo->type = AV_STEREO3D_TOPBOTTOM;
+            break;
+        case 5:
+            stereo->type = AV_STEREO3D_FRAMESEQUENCE;
+            break;
+        }
+
+        if (s->content_interpretation_type == 2)
+            stereo->flags = AV_STEREO3D_FLAG_INVERT;
+    }
+
+    if (s->sei_display_orientation_present &&
+        (s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) {
+        double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
+        AVFrameSideData *rotation = av_frame_new_side_data(out,
+                                                           AV_FRAME_DATA_DISPLAYMATRIX,
+                                                           sizeof(int32_t) * 9);
+        if (!rotation)
+            return AVERROR(ENOMEM);
+
+        av_display_rotation_set((int32_t *)rotation->data, angle);
+        av_display_matrix_flip((int32_t *)rotation->data,
+                               s->sei_hflip, s->sei_vflip);
+    }
+#endif
+    return 0;
+}
+
+static int hevc_frame_start(HEVCContext *s)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int pic_size_in_ctb  = ((s->sps->width  >> s->sps->log2_min_cb_size) + 1) *
+                           ((s->sps->height >> s->sps->log2_min_cb_size) + 1);
+    int ret;
+
+    memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
+    memset(s->vertical_bs,   0, s->bs_width * s->bs_height);
+    memset(s->cbf_luma,      0, s->sps->min_tb_width * s->sps->min_tb_height);
+    memset(s->is_pcm,        0, (s->sps->min_pu_width + 1) * (s->sps->min_pu_height + 1));
+    memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
+
+    s->is_decoded        = 0;
+    s->first_nal_type    = s->nal_unit_type;
+
+    if (s->pps->tiles_enabled_flag)
+        lc->end_of_tiles_x = s->pps->column_width[0] << s->sps->log2_ctb_size;
+
+    ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
+    if (ret < 0)
+        goto fail;
+
+#ifdef USE_PRED
+    ret = ff_hevc_frame_rps(s);
+    if (ret < 0) {
+        av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
+        goto fail;
+    }
+#endif
+
+    s->ref->frame->key_frame = IS_IRAP(s);
+
+    ret = set_side_data(s);
+    if (ret < 0)
+        goto fail;
+
+    s->frame->pict_type = 3 - s->sh.slice_type;
+
+#ifdef USE_PRED
+    if (!IS_IRAP(s))
+        ff_hevc_bump_frame(s);
+#endif
+
+    av_frame_unref(s->output_frame);
+    ret = ff_hevc_output_frame(s, s->output_frame, 0);
+    if (ret < 0)
+        goto fail;
+
+    ff_thread_finish_setup(s->avctx);
+
+    return 0;
+
+fail:
+    if (s->ref && s->threads_type == FF_THREAD_FRAME)
+        ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
+    s->ref = NULL;
+    return ret;
+}
+
+static int decode_nal_unit(HEVCContext *s, const uint8_t *nal, int length)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    GetBitContext *gb    = &lc->gb;
+    int ctb_addr_ts, ret;
+
+    ret = init_get_bits8(gb, nal, length);
+    if (ret < 0)
+        return ret;
+
+    ret = hls_nal_unit(s);
+    if (ret < 0) {
+        av_log(s->avctx, AV_LOG_ERROR, "Invalid NAL unit %d, skipping.\n",
+               s->nal_unit_type);
+        goto fail;
+    } else if (!ret)
+        return 0;
+
+    switch (s->nal_unit_type) {
+#ifdef USE_MSPS
+    case 48:
+        ret = ff_hevc_decode_nal_sps(s);
+        if (ret < 0)
+            goto fail;
+        break;
+#else
+    case NAL_VPS:
+        ret = ff_hevc_decode_nal_vps(s);
+        if (ret < 0)
+            goto fail;
+        break;
+    case NAL_SPS:
+        ret = ff_hevc_decode_nal_sps(s);
+        if (ret < 0)
+            goto fail;
+        break;
+#endif
+    case NAL_PPS:
+        ret = ff_hevc_decode_nal_pps(s);
+        if (ret < 0)
+            goto fail;
+        break;
+    case NAL_SEI_PREFIX:
+    case NAL_SEI_SUFFIX:
+        ret = ff_hevc_decode_nal_sei(s);
+        if (ret < 0)
+            goto fail;
+        break;
+    case NAL_TRAIL_R:
+    case NAL_TRAIL_N:
+    case NAL_TSA_N:
+    case NAL_TSA_R:
+    case NAL_STSA_N:
+    case NAL_STSA_R:
+    case NAL_BLA_W_LP:
+    case NAL_BLA_W_RADL:
+    case NAL_BLA_N_LP:
+    case NAL_IDR_W_RADL:
+    case NAL_IDR_N_LP:
+    case NAL_CRA_NUT:
+    case NAL_RADL_N:
+    case NAL_RADL_R:
+    case NAL_RASL_N:
+    case NAL_RASL_R:
+        ret = hls_slice_header(s);
+        if (ret < 0)
+            return ret;
+
+        if (s->max_ra == INT_MAX) {
+            if (s->nal_unit_type == NAL_CRA_NUT || IS_BLA(s)) {
+                s->max_ra = s->poc;
+            } else {
+                if (IS_IDR(s))
+                    s->max_ra = INT_MIN;
+            }
+        }
+
+        if ((s->nal_unit_type == NAL_RASL_R || s->nal_unit_type == NAL_RASL_N) &&
+            s->poc <= s->max_ra) {
+            s->is_decoded = 0;
+            break;
+        } else {
+            if (s->nal_unit_type == NAL_RASL_R && s->poc > s->max_ra)
+                s->max_ra = INT_MIN;
+        }
+
+        if (s->sh.first_slice_in_pic_flag) {
+            ret = hevc_frame_start(s);
+            if (ret < 0)
+                return ret;
+        } else if (!s->ref) {
+            av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
+            goto fail;
+        }
+
+        if (s->nal_unit_type != s->first_nal_type) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "Non-matching NAL types of the VCL NALUs: %d %d\n",
+                   s->first_nal_type, s->nal_unit_type);
+            return AVERROR_INVALIDDATA;
+        }
+
+#ifdef USE_PRED
+        if (!s->sh.dependent_slice_segment_flag &&
+            s->sh.slice_type != I_SLICE) {
+            ret = ff_hevc_slice_rpl(s);
+            if (ret < 0) {
+                av_log(s->avctx, AV_LOG_WARNING,
+                       "Error constructing the reference lists for the current slice.\n");
+                goto fail;
+            }
+        }
+#endif
+
+#ifdef USE_FULL
+        if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
+            ctb_addr_ts = hls_slice_data_wpp(s, nal, length);
+        else
+#endif
+            ctb_addr_ts = hls_slice_data(s);
+        if (ctb_addr_ts >= (s->sps->ctb_width * s->sps->ctb_height)) {
+            s->is_decoded = 1;
+        }
+
+        if (ctb_addr_ts < 0) {
+            ret = ctb_addr_ts;
+            goto fail;
+        }
+        break;
+    case NAL_EOS_NUT:
+    case NAL_EOB_NUT:
+        s->seq_decode = (s->seq_decode + 1) & 0xff;
+        s->max_ra     = INT_MAX;
+        break;
+    case NAL_AUD:
+    case NAL_FD_NUT:
+        break;
+    default:
+        av_log(s->avctx, AV_LOG_INFO,
+               "Skipping NAL unit %d\n", s->nal_unit_type);
+    }
+
+    return 0;
+fail:
+    if (s->avctx->err_recognition & AV_EF_EXPLODE)
+        return ret;
+    return 0;
+}
+
+/* FIXME: This is adapted from ff_h264_decode_nal, avoiding duplication
+ * between these functions would be nice. */
+int ff_hevc_extract_rbsp(HEVCContext *s, const uint8_t *src, int length,
+                         HEVCNAL *nal)
+{
+    int i, si, di;
+    uint8_t *dst;
+
+    s->skipped_bytes = 0;
+#define STARTCODE_TEST                                                  \
+        if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) {     \
+            if (src[i + 2] != 3) {                                      \
+                /* startcode, so we must be past the end */             \
+                length = i;                                             \
+            }                                                           \
+            break;                                                      \
+        }
+#if HAVE_FAST_UNALIGNED
+#define FIND_FIRST_ZERO                                                 \
+        if (i > 0 && !src[i])                                           \
+            i--;                                                        \
+        while (src[i])                                                  \
+            i++
+#if HAVE_FAST_64BIT
+    for (i = 0; i + 1 < length; i += 9) {
+        if (!((~AV_RN64A(src + i) &
+               (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
+              0x8000800080008080ULL))
+            continue;
+        FIND_FIRST_ZERO;
+        STARTCODE_TEST;
+        i -= 7;
+    }
+#else
+    for (i = 0; i + 1 < length; i += 5) {
+        if (!((~AV_RN32A(src + i) &
+               (AV_RN32A(src + i) - 0x01000101U)) &
+              0x80008080U))
+            continue;
+        FIND_FIRST_ZERO;
+        STARTCODE_TEST;
+        i -= 3;
+    }
+#endif /* HAVE_FAST_64BIT */
+#else
+    for (i = 0; i + 1 < length; i += 2) {
+        if (src[i])
+            continue;
+        if (i > 0 && src[i - 1] == 0)
+            i--;
+        STARTCODE_TEST;
+    }
+#endif /* HAVE_FAST_UNALIGNED */
+
+    if (i >= length - 1) { // no escaped 0
+        nal->data = src;
+        nal->size = length;
+        return length;
+    }
+
+    av_fast_malloc(&nal->rbsp_buffer, (unsigned int *)&nal->rbsp_buffer_size,
+                   length + FF_INPUT_BUFFER_PADDING_SIZE);
+    if (!nal->rbsp_buffer)
+        return AVERROR(ENOMEM);
+
+    dst = nal->rbsp_buffer;
+
+    memcpy(dst, src, i);
+    si = di = i;
+    while (si + 2 < length) {
+        // remove escapes (very rare 1:2^22)
+        if (src[si + 2] > 3) {
+            dst[di++] = src[si++];
+            dst[di++] = src[si++];
+        } else if (src[si] == 0 && src[si + 1] == 0) {
+            if (src[si + 2] == 3) { // escape
+                dst[di++] = 0;
+                dst[di++] = 0;
+                si       += 3;
+
+                s->skipped_bytes++;
+                if (s->skipped_bytes_pos_size < s->skipped_bytes) {
+                    s->skipped_bytes_pos_size *= 2;
+                    av_reallocp_array(&s->skipped_bytes_pos,
+                            s->skipped_bytes_pos_size,
+                            sizeof(*s->skipped_bytes_pos));
+                    if (!s->skipped_bytes_pos)
+                        return AVERROR(ENOMEM);
+                }
+                if (s->skipped_bytes_pos)
+                    s->skipped_bytes_pos[s->skipped_bytes-1] = di - 1;
+                continue;
+            } else // next start code
+                goto nsc;
+        }
+
+        dst[di++] = src[si++];
+    }
+    while (si < length)
+        dst[di++] = src[si++];
+
+nsc:
+    memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+    nal->data = dst;
+    nal->size = di;
+    return si;
+}
+
+static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
+{
+    int i, consumed, ret = 0;
+
+    s->ref = NULL;
+    s->last_eos = s->eos;
+    s->eos = 0;
+
+    /* split the input packet into NAL units, so we know the upper bound on the
+     * number of slices in the frame */
+    s->nb_nals = 0;
+    while (length >= 4) {
+        HEVCNAL *nal;
+        int extract_length = 0;
+
+        if (s->is_nalff) {
+            int i;
+            for (i = 0; i < s->nal_length_size; i++)
+                extract_length = (extract_length << 8) | buf[i];
+            buf    += s->nal_length_size;
+            length -= s->nal_length_size;
+
+            if (extract_length > length) {
+                av_log(s->avctx, AV_LOG_ERROR, "Invalid NAL unit size.\n");
+                ret = AVERROR_INVALIDDATA;
+                goto fail;
+            }
+        } else {
+            /* search start code */
+            while (buf[0] != 0 || buf[1] != 0 || buf[2] != 1) {
+                ++buf;
+                --length;
+                if (length < 4) {
+                    av_log(s->avctx, AV_LOG_ERROR, "No start code is found.\n");
+                    ret = AVERROR_INVALIDDATA;
+                    goto fail;
+                }
+            }
+
+            buf           += 3;
+            length        -= 3;
+        }
+
+        if (!s->is_nalff)
+            extract_length = length;
+
+        if (s->nals_allocated < s->nb_nals + 1) {
+            int new_size = s->nals_allocated + 1;
+            HEVCNAL *tmp = av_realloc_array(s->nals, new_size, sizeof(*tmp));
+            if (!tmp) {
+                ret = AVERROR(ENOMEM);
+                goto fail;
+            }
+            s->nals = tmp;
+            memset(s->nals + s->nals_allocated, 0,
+                   (new_size - s->nals_allocated) * sizeof(*tmp));
+            av_reallocp_array(&s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
+            av_reallocp_array(&s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
+            av_reallocp_array(&s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
+            s->skipped_bytes_pos_size_nal[s->nals_allocated] = 1024; // initial buffer size
+            s->skipped_bytes_pos_nal[s->nals_allocated] = av_malloc_array(s->skipped_bytes_pos_size_nal[s->nals_allocated], sizeof(*s->skipped_bytes_pos));
+            s->nals_allocated = new_size;
+        }
+        s->skipped_bytes_pos_size = s->skipped_bytes_pos_size_nal[s->nb_nals];
+        s->skipped_bytes_pos = s->skipped_bytes_pos_nal[s->nb_nals];
+        nal = &s->nals[s->nb_nals];
+
+        consumed = ff_hevc_extract_rbsp(s, buf, extract_length, nal);
+
+        s->skipped_bytes_nal[s->nb_nals] = s->skipped_bytes;
+        s->skipped_bytes_pos_size_nal[s->nb_nals] = s->skipped_bytes_pos_size;
+        s->skipped_bytes_pos_nal[s->nb_nals++] = s->skipped_bytes_pos;
+
+
+        if (consumed < 0) {
+            ret = consumed;
+            goto fail;
+        }
+
+        ret = init_get_bits8(&s->HEVClc->gb, nal->data, nal->size);
+        if (ret < 0)
+            goto fail;
+        hls_nal_unit(s);
+
+        if (s->nal_unit_type == NAL_EOB_NUT ||
+            s->nal_unit_type == NAL_EOS_NUT)
+            s->eos = 1;
+
+        buf    += consumed;
+        length -= consumed;
+    }
+
+    /* parse the NAL units */
+    for (i = 0; i < s->nb_nals; i++) {
+        int ret;
+        s->skipped_bytes = s->skipped_bytes_nal[i];
+        s->skipped_bytes_pos = s->skipped_bytes_pos_nal[i];
+
+        ret = decode_nal_unit(s, s->nals[i].data, s->nals[i].size);
+        if (ret < 0) {
+            av_log(s->avctx, AV_LOG_WARNING,
+                   "Error parsing NAL unit #%d.\n", i);
+            goto fail;
+        }
+    }
+
+fail:
+    if (s->ref && s->threads_type == FF_THREAD_FRAME)
+        ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
+
+    return ret;
+}
+
+#ifdef USE_MD5
+static void print_md5(void *log_ctx, int level, uint8_t md5[16])
+{
+    int i;
+    for (i = 0; i < 16; i++)
+        av_log(log_ctx, level, "%02"PRIx8, md5[i]);
+}
+
+static int verify_md5(HEVCContext *s, AVFrame *frame)
+{
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+    int pixel_shift;
+    int i, j;
+
+    if (!desc)
+        return AVERROR(EINVAL);
+
+#ifdef USE_VAR_BIT_DEPTH
+    pixel_shift = s->sps->bit_depth > 8;
+#else
+    pixel_shift = desc->comp[0].depth_minus1 > 7;
+#endif
+
+    av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
+           s->poc);
+
+    /* the checksums are LE, so we have to byteswap for >8bpp formats
+     * on BE arches */
+#if HAVE_BIGENDIAN
+    if (pixel_shift && !s->checksum_buf) {
+        av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
+                       FFMAX3(frame->linesize[0], frame->linesize[1],
+                              frame->linesize[2]));
+        if (!s->checksum_buf)
+            return AVERROR(ENOMEM);
+    }
+#endif
+#ifdef USE_VAR_BIT_DEPTH
+    if (pixel_shift == 0) {
+        av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
+                       FFMAX3(frame->linesize[0], frame->linesize[1],
+                              frame->linesize[2]));
+        if (!s->checksum_buf)
+            return AVERROR(ENOMEM);
+    }
+#endif
+
+    for (i = 0; frame->data[i]; i++) {
+        int width  = s->avctx->coded_width;
+        int height = s->avctx->coded_height;
+        int w = (i == 1 || i == 2) ? (width  >> desc->log2_chroma_w) : width;
+        int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
+        uint8_t md5[16];
+
+        av_md5_init(s->md5_ctx);
+        for (j = 0; j < h; j++) {
+            const uint8_t *src = frame->data[i] + j * frame->linesize[i];
+#if HAVE_BIGENDIAN
+            if (pixel_shift) {
+                s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
+                                    (const uint16_t *) src, w);
+                src = s->checksum_buf;
+            }
+#endif
+#ifdef USE_VAR_BIT_DEPTH
+            /* convert from 16 to 8 bits */
+            if (pixel_shift == 0) {
+                int j;
+                for(j = 0; j < w; j++)
+                    s->checksum_buf[j] = ((uint16_t *)src)[j];
+                src = s->checksum_buf;
+            }
+#endif
+            av_md5_update(s->md5_ctx, src, w << pixel_shift);
+        }
+        av_md5_final(s->md5_ctx, md5);
+
+        if (!memcmp(md5, s->md5[i], 16)) {
+            av_log   (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
+            print_md5(s->avctx, AV_LOG_DEBUG, md5);
+            av_log   (s->avctx, AV_LOG_DEBUG, "; ");
+        } else {
+            av_log   (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
+            print_md5(s->avctx, AV_LOG_ERROR, md5);
+            av_log   (s->avctx, AV_LOG_ERROR, " != ");
+            print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]);
+            av_log   (s->avctx, AV_LOG_ERROR, "\n");
+            return AVERROR_INVALIDDATA;
+        }
+    }
+
+    av_log(s->avctx, AV_LOG_DEBUG, "\n");
+    return 0;
+}
+#endif
+
+static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
+                             AVPacket *avpkt)
+{
+    int ret;
+    HEVCContext *s = avctx->priv_data;
+
+    if (!avpkt->size) {
+        ret = ff_hevc_output_frame(s, data, 1);
+        if (ret < 0)
+            return ret;
+
+        *got_output = ret;
+        return 0;
+    }
+
+    s->ref = NULL;
+#ifdef USE_FRAME_DURATION_SEI
+    s->frame_duration = 1;
+#endif
+    ret    = decode_nal_units(s, avpkt->data, avpkt->size);
+    if (ret < 0)
+        return ret;
+
+#ifdef USE_MD5
+    /* verify the SEI checksum */
+    if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
+        s->is_md5) {
+        ret = verify_md5(s, s->ref->frame);
+        if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
+            ff_hevc_unref_frame(s, s->ref, ~0);
+            return ret;
+        }
+    }
+    s->is_md5 = 0;
+#endif
+
+    if (s->is_decoded) {
+        av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
+        s->is_decoded = 0;
+    }
+
+    if (s->output_frame->buf[0]) {
+#ifdef USE_FRAME_DURATION_SEI
+        s->output_frame->pts = s->frame_duration;
+#endif
+        av_frame_move_ref(data, s->output_frame);
+        *got_output = 1;
+    }
+
+    return avpkt->size;
+}
+
+#ifdef USE_FULL
+static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
+{
+    int ret;
+
+    ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+    if (ret < 0)
+        return ret;
+
+    dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
+    if (!dst->tab_mvf_buf)
+        goto fail;
+    dst->tab_mvf = src->tab_mvf;
+
+    dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
+    if (!dst->rpl_tab_buf)
+        goto fail;
+    dst->rpl_tab = src->rpl_tab;
+
+    dst->rpl_buf = av_buffer_ref(src->rpl_buf);
+    if (!dst->rpl_buf)
+        goto fail;
+
+    dst->poc        = src->poc;
+    dst->ctb_count  = src->ctb_count;
+    dst->window     = src->window;
+    dst->flags      = src->flags;
+    dst->sequence   = src->sequence;
+
+    return 0;
+fail:
+    ff_hevc_unref_frame(s, dst, ~0);
+    return AVERROR(ENOMEM);
+}
+#endif
+
+static av_cold int hevc_decode_free(AVCodecContext *avctx)
+{
+    HEVCContext       *s = avctx->priv_data;
+    int i;
+
+    pic_arrays_free(s);
+
+#ifdef USE_MD5
+    av_freep(&s->md5_ctx);
+    av_freep(&s->checksum_buf);
+#endif
+
+    for(i=0; i < s->nals_allocated; i++) {
+        av_freep(&s->skipped_bytes_pos_nal[i]);
+    }
+    av_freep(&s->skipped_bytes_pos_size_nal);
+    av_freep(&s->skipped_bytes_nal);
+    av_freep(&s->skipped_bytes_pos_nal);
+
+    av_freep(&s->cabac_state);
+
+#ifdef USE_SAO_SMALL_BUFFER
+    av_freep(&s->sao_pixel_buffer);
+    for(i = 0; i < 3; i++) {
+        av_freep(&s->sao_pixel_buffer_h[i]);
+        av_freep(&s->sao_pixel_buffer_v[i]);
+    }
+#else
+    av_frame_free(&s->tmp_frame);
+#endif
+    av_frame_free(&s->output_frame);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        ff_hevc_unref_frame(s, &s->DPB[i], ~0);
+        av_frame_free(&s->DPB[i].frame);
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->vps_list); i++)
+        av_buffer_unref(&s->vps_list[i]);
+    for (i = 0; i < FF_ARRAY_ELEMS(s->sps_list); i++)
+        av_buffer_unref(&s->sps_list[i]);
+    for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++)
+        av_buffer_unref(&s->pps_list[i]);
+    s->sps = NULL;
+    s->pps = NULL;
+    s->vps = NULL;
+
+    av_buffer_unref(&s->current_sps);
+
+    av_freep(&s->sh.entry_point_offset);
+    av_freep(&s->sh.offset);
+    av_freep(&s->sh.size);
+
+    for (i = 1; i < s->threads_number; i++) {
+        HEVCLocalContext *lc = s->HEVClcList[i];
+        if (lc) {
+            av_freep(&s->HEVClcList[i]);
+            av_freep(&s->sList[i]);
+        }
+    }
+    if (s->HEVClc == s->HEVClcList[0])
+        s->HEVClc = NULL;
+    av_freep(&s->HEVClcList[0]);
+
+    for (i = 0; i < s->nals_allocated; i++)
+        av_freep(&s->nals[i].rbsp_buffer);
+    av_freep(&s->nals);
+    s->nals_allocated = 0;
+
+    return 0;
+}
+
+static av_cold int hevc_init_context(AVCodecContext *avctx)
+{
+    HEVCContext *s = avctx->priv_data;
+    int i;
+
+    s->avctx = avctx;
+
+    s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
+    if (!s->HEVClc)
+        goto fail;
+    s->HEVClcList[0] = s->HEVClc;
+    s->sList[0] = s;
+
+    s->cabac_state = av_malloc(HEVC_CONTEXTS);
+    if (!s->cabac_state)
+        goto fail;
+
+#ifndef USE_SAO_SMALL_BUFFER
+    s->tmp_frame = av_frame_alloc();
+    if (!s->tmp_frame)
+        goto fail;
+#endif
+
+    s->output_frame = av_frame_alloc();
+    if (!s->output_frame)
+        goto fail;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        s->DPB[i].frame = av_frame_alloc();
+        if (!s->DPB[i].frame)
+            goto fail;
+        s->DPB[i].tf.f = s->DPB[i].frame;
+    }
+
+    s->max_ra = INT_MAX;
+
+#ifdef USE_MD5
+    s->md5_ctx = av_md5_alloc();
+    if (!s->md5_ctx)
+        goto fail;
+#endif
+
+#if HAVE_BIGENDIAN
+    ff_bswapdsp_init(&s->bdsp);
+#endif
+
+    s->context_initialized = 1;
+    s->eos = 0;
+
+    return 0;
+
+fail:
+    hevc_decode_free(avctx);
+    return AVERROR(ENOMEM);
+}
+
+#ifdef USE_FULL
+static int hevc_update_thread_context(AVCodecContext *dst,
+                                      const AVCodecContext *src)
+{
+    HEVCContext *s  = dst->priv_data;
+    HEVCContext *s0 = src->priv_data;
+    int i, ret;
+
+    if (!s->context_initialized) {
+        ret = hevc_init_context(dst);
+        if (ret < 0)
+            return ret;
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        ff_hevc_unref_frame(s, &s->DPB[i], ~0);
+        if (s0->DPB[i].frame->buf[0]) {
+            ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
+            if (ret < 0)
+                return ret;
+        }
+    }
+
+    if (s->sps != s0->sps)
+        s->sps = NULL;
+    for (i = 0; i < FF_ARRAY_ELEMS(s->vps_list); i++) {
+        av_buffer_unref(&s->vps_list[i]);
+        if (s0->vps_list[i]) {
+            s->vps_list[i] = av_buffer_ref(s0->vps_list[i]);
+            if (!s->vps_list[i])
+                return AVERROR(ENOMEM);
+        }
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->sps_list); i++) {
+        av_buffer_unref(&s->sps_list[i]);
+        if (s0->sps_list[i]) {
+            s->sps_list[i] = av_buffer_ref(s0->sps_list[i]);
+            if (!s->sps_list[i])
+                return AVERROR(ENOMEM);
+        }
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) {
+        av_buffer_unref(&s->pps_list[i]);
+        if (s0->pps_list[i]) {
+            s->pps_list[i] = av_buffer_ref(s0->pps_list[i]);
+            if (!s->pps_list[i])
+                return AVERROR(ENOMEM);
+        }
+    }
+
+    av_buffer_unref(&s->current_sps);
+    if (s0->current_sps) {
+        s->current_sps = av_buffer_ref(s0->current_sps);
+        if (!s->current_sps)
+            return AVERROR(ENOMEM);
+    }
+
+    if (s->sps != s0->sps)
+        if ((ret = set_sps(s, s0->sps)) < 0)
+            return ret;
+
+    s->seq_decode = s0->seq_decode;
+    s->seq_output = s0->seq_output;
+    s->pocTid0    = s0->pocTid0;
+    s->max_ra     = s0->max_ra;
+    s->eos        = s0->eos;
+
+    s->is_nalff        = s0->is_nalff;
+    s->nal_length_size = s0->nal_length_size;
+
+    s->threads_number      = s0->threads_number;
+    s->threads_type        = s0->threads_type;
+
+    if (s0->eos) {
+        s->seq_decode = (s->seq_decode + 1) & 0xff;
+        s->max_ra = INT_MAX;
+    }
+
+    return 0;
+}
+
+static int hevc_decode_extradata(HEVCContext *s)
+{
+    AVCodecContext *avctx = s->avctx;
+    GetByteContext gb;
+    int ret;
+
+    bytestream2_init(&gb, avctx->extradata, avctx->extradata_size);
+
+    if (avctx->extradata_size > 3 &&
+        (avctx->extradata[0] || avctx->extradata[1] ||
+         avctx->extradata[2] > 1)) {
+        /* It seems the extradata is encoded as hvcC format.
+         * Temporarily, we support configurationVersion==0 until 14496-15 3rd
+         * is finalized. When finalized, configurationVersion will be 1 and we
+         * can recognize hvcC by checking if avctx->extradata[0]==1 or not. */
+        int i, j, num_arrays, nal_len_size;
+
+        s->is_nalff = 1;
+
+        bytestream2_skip(&gb, 21);
+        nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
+        num_arrays   = bytestream2_get_byte(&gb);
+
+        /* nal units in the hvcC always have length coded with 2 bytes,
+         * so put a fake nal_length_size = 2 while parsing them */
+        s->nal_length_size = 2;
+
+        /* Decode nal units from hvcC. */
+        for (i = 0; i < num_arrays; i++) {
+            int type = bytestream2_get_byte(&gb) & 0x3f;
+            int cnt  = bytestream2_get_be16(&gb);
+
+            for (j = 0; j < cnt; j++) {
+                // +2 for the nal size field
+                int nalsize = bytestream2_peek_be16(&gb) + 2;
+                if (bytestream2_get_bytes_left(&gb) < nalsize) {
+                    av_log(s->avctx, AV_LOG_ERROR,
+                           "Invalid NAL unit size in extradata.\n");
+                    return AVERROR_INVALIDDATA;
+                }
+
+                ret = decode_nal_units(s, gb.buffer, nalsize);
+                if (ret < 0) {
+                    av_log(avctx, AV_LOG_ERROR,
+                           "Decoding nal unit %d %d from hvcC failed\n",
+                           type, i);
+                    return ret;
+                }
+                bytestream2_skip(&gb, nalsize);
+            }
+        }
+
+        /* Now store right nal length size, that will be used to parse
+         * all other nals */
+        s->nal_length_size = nal_len_size;
+    } else {
+        s->is_nalff = 0;
+        ret = decode_nal_units(s, avctx->extradata, avctx->extradata_size);
+        if (ret < 0)
+            return ret;
+    }
+    return 0;
+}
+#endif
+
+static av_cold int hevc_decode_init(AVCodecContext *avctx)
+{
+    HEVCContext *s = avctx->priv_data;
+    int ret;
+
+    ff_init_cabac_states();
+#ifdef CONFIG_SMALL
+    hevc_transform_init();
+#endif
+
+#ifdef USE_FULL
+    avctx->internal->allocate_progress = 1;
+#endif
+
+    ret = hevc_init_context(avctx);
+    if (ret < 0)
+        return ret;
+
+    s->enable_parallel_tiles = 0;
+    s->picture_struct = 0;
+
+    if(avctx->active_thread_type & FF_THREAD_SLICE)
+        s->threads_number = avctx->thread_count;
+    else
+        s->threads_number = 1;
+
+#ifdef USE_FULL
+    if (avctx->extradata_size > 0 && avctx->extradata) {
+        ret = hevc_decode_extradata(s);
+        if (ret < 0) {
+            hevc_decode_free(avctx);
+            return ret;
+        }
+    }
+#endif
+    if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
+            s->threads_type = FF_THREAD_FRAME;
+        else
+            s->threads_type = FF_THREAD_SLICE;
+
+    return 0;
+}
+
+#ifdef USE_FULL
+static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
+{
+    HEVCContext *s = avctx->priv_data;
+    int ret;
+
+    memset(s, 0, sizeof(*s));
+
+    ret = hevc_init_context(avctx);
+    if (ret < 0)
+        return ret;
+
+    return 0;
+}
+#endif
+
+static void hevc_decode_flush(AVCodecContext *avctx)
+{
+    HEVCContext *s = avctx->priv_data;
+    ff_hevc_flush_dpb(s);
+    s->max_ra = INT_MAX;
+}
+
+#define OFFSET(x) offsetof(HEVCContext, x)
+#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+
+#if 0
+static const AVProfile profiles[] = {
+    { FF_PROFILE_HEVC_MAIN,                 "Main"                },
+    { FF_PROFILE_HEVC_MAIN_10,              "Main 10"             },
+    { FF_PROFILE_HEVC_MAIN_STILL_PICTURE,   "Main Still Picture"  },
+    { FF_PROFILE_HEVC_REXT,                 "Rext"  },
+    { FF_PROFILE_UNKNOWN },
+};
+
+static const AVOption options[] = {
+    { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
+        AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, PAR },
+    { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
+        AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, PAR },
+    { NULL },
+};
+
+static const AVClass hevc_decoder_class = {
+    .class_name = "HEVC decoder",
+    .item_name  = av_default_item_name,
+    .option     = options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+#endif
+
+AVCodec ff_hevc_decoder = {
+    .name                  = "hevc",
+    .long_name             = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
+    .type                  = AVMEDIA_TYPE_VIDEO,
+    .id                    = AV_CODEC_ID_HEVC,
+    .priv_data_size        = sizeof(HEVCContext),
+    //    .priv_class            = &hevc_decoder_class,
+    .init                  = hevc_decode_init,
+    .close                 = hevc_decode_free,
+    .decode                = hevc_decode_frame,
+    .flush                 = hevc_decode_flush,
+    //    .update_thread_context = hevc_update_thread_context,
+    //    .init_thread_copy      = hevc_init_thread_copy,
+    .capabilities          = CODEC_CAP_DR1 | CODEC_CAP_DELAY |
+                             CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS,
+    //    .profiles              = NULL_IF_CONFIG_SMALL(profiles),
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1082 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HEVC_H
+#define AVCODEC_HEVC_H
+
+#include "libavutil/buffer.h"
+#include "libavutil/md5.h"
+
+#include "avcodec.h"
+#include "bswapdsp.h"
+#include "cabac.h"
+#include "get_bits.h"
+#include "hevcdsp.h"
+#include "hevcpred.h"
+#include "internal.h"
+#include "thread.h"
+#include "videodsp.h"
+
+#define MAX_DPB_SIZE 16 // A.4.1
+#define MAX_REFS 16
+
+#define MAX_NB_THREADS 16
+#define SHIFT_CTB_WPP 2
+
+/**
+ * 7.4.2.1
+ */
+#define MAX_SUB_LAYERS 7
+#ifdef USE_MSPS
+#define MAX_VPS_COUNT 16
+#define MAX_SPS_COUNT 32
+#ifdef USE_PRED
+#define MAX_DPB_COUNT 32
+#else
+#define MAX_DPB_COUNT 1
+#endif
+#else
+#define MAX_VPS_COUNT 1
+#define MAX_SPS_COUNT 1
+#define MAX_DPB_COUNT 32
+#endif
+#define MAX_PPS_COUNT 256
+#define MAX_SHORT_TERM_RPS_COUNT 64
+#define MAX_CU_SIZE 128
+
+//TODO: check if this is really the maximum
+#define MAX_TRANSFORM_DEPTH 5
+
+#define MAX_TB_SIZE 32
+#define MAX_LOG2_CTB_SIZE 6
+#define MAX_QP 51
+#define DEFAULT_INTRA_TC_OFFSET 2
+
+#define HEVC_CONTEXTS 199
+
+#define MRG_MAX_NUM_CANDS     5
+
+#define L0 0
+#define L1 1
+
+#define EPEL_EXTRA_BEFORE 1
+#define EPEL_EXTRA_AFTER  2
+#define EPEL_EXTRA        3
+#define QPEL_EXTRA_BEFORE 3
+#define QPEL_EXTRA_AFTER  4
+#define QPEL_EXTRA        7
+
+#define EDGE_EMU_BUFFER_STRIDE 80
+
+/**
+ * Value of the luma sample at position (x, y) in the 2D array tab.
+ */
+#define SAMPLE(tab, x, y) ((tab)[(y) * s->sps->width + (x)])
+#define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)])
+
+#define IS_IDR(s) ((s)->nal_unit_type == NAL_IDR_W_RADL || (s)->nal_unit_type == NAL_IDR_N_LP)
+#define IS_BLA(s) ((s)->nal_unit_type == NAL_BLA_W_RADL || (s)->nal_unit_type == NAL_BLA_W_LP || \
+                   (s)->nal_unit_type == NAL_BLA_N_LP)
+#define IS_IRAP(s) ((s)->nal_unit_type >= 16 && (s)->nal_unit_type <= 23)
+
+/**
+ * Table 7-3: NAL unit type codes
+ */
+enum NALUnitType {
+    NAL_TRAIL_N    = 0,
+    NAL_TRAIL_R    = 1,
+    NAL_TSA_N      = 2,
+    NAL_TSA_R      = 3,
+    NAL_STSA_N     = 4,
+    NAL_STSA_R     = 5,
+    NAL_RADL_N     = 6,
+    NAL_RADL_R     = 7,
+    NAL_RASL_N     = 8,
+    NAL_RASL_R     = 9,
+    NAL_BLA_W_LP   = 16,
+    NAL_BLA_W_RADL = 17,
+    NAL_BLA_N_LP   = 18,
+    NAL_IDR_W_RADL = 19,
+    NAL_IDR_N_LP   = 20,
+    NAL_CRA_NUT    = 21,
+    NAL_VPS        = 32,
+    NAL_SPS        = 33,
+    NAL_PPS        = 34,
+    NAL_AUD        = 35,
+    NAL_EOS_NUT    = 36,
+    NAL_EOB_NUT    = 37,
+    NAL_FD_NUT     = 38,
+    NAL_SEI_PREFIX = 39,
+    NAL_SEI_SUFFIX = 40,
+};
+
+enum RPSType {
+    ST_CURR_BEF = 0,
+    ST_CURR_AFT,
+    ST_FOLL,
+    LT_CURR,
+    LT_FOLL,
+    NB_RPS_TYPE,
+};
+
+enum SliceType {
+    B_SLICE = 0,
+    P_SLICE = 1,
+    I_SLICE = 2,
+};
+
+enum SyntaxElement {
+    SAO_MERGE_FLAG = 0,
+    SAO_TYPE_IDX,
+    SAO_EO_CLASS,
+    SAO_BAND_POSITION,
+    SAO_OFFSET_ABS,
+    SAO_OFFSET_SIGN,
+    END_OF_SLICE_FLAG,
+    SPLIT_CODING_UNIT_FLAG,
+    CU_TRANSQUANT_BYPASS_FLAG,
+    SKIP_FLAG,
+    CU_QP_DELTA,
+    PRED_MODE_FLAG,
+    PART_MODE,
+    PCM_FLAG,
+    PREV_INTRA_LUMA_PRED_FLAG,
+    MPM_IDX,
+    REM_INTRA_LUMA_PRED_MODE,
+    INTRA_CHROMA_PRED_MODE,
+    MERGE_FLAG,
+    MERGE_IDX,
+    INTER_PRED_IDC,
+    REF_IDX_L0,
+    REF_IDX_L1,
+    ABS_MVD_GREATER0_FLAG,
+    ABS_MVD_GREATER1_FLAG,
+    ABS_MVD_MINUS2,
+    MVD_SIGN_FLAG,
+    MVP_LX_FLAG,
+    NO_RESIDUAL_DATA_FLAG,
+    SPLIT_TRANSFORM_FLAG,
+    CBF_LUMA,
+    CBF_CB_CR,
+    TRANSFORM_SKIP_FLAG,
+    EXPLICIT_RDPCM_FLAG,
+    EXPLICIT_RDPCM_DIR_FLAG,
+    LAST_SIGNIFICANT_COEFF_X_PREFIX,
+    LAST_SIGNIFICANT_COEFF_Y_PREFIX,
+    LAST_SIGNIFICANT_COEFF_X_SUFFIX,
+    LAST_SIGNIFICANT_COEFF_Y_SUFFIX,
+    SIGNIFICANT_COEFF_GROUP_FLAG,
+    SIGNIFICANT_COEFF_FLAG,
+    COEFF_ABS_LEVEL_GREATER1_FLAG,
+    COEFF_ABS_LEVEL_GREATER2_FLAG,
+    COEFF_ABS_LEVEL_REMAINING,
+    COEFF_SIGN_FLAG,
+    LOG2_RES_SCALE_ABS,
+    RES_SCALE_SIGN_FLAG,
+    CU_CHROMA_QP_OFFSET_FLAG,
+    CU_CHROMA_QP_OFFSET_IDX,
+};
+
+enum PartMode {
+    PART_2Nx2N = 0,
+    PART_2NxN  = 1,
+    PART_Nx2N  = 2,
+    PART_NxN   = 3,
+    PART_2NxnU = 4,
+    PART_2NxnD = 5,
+    PART_nLx2N = 6,
+    PART_nRx2N = 7,
+};
+
+enum PredMode {
+    MODE_INTER = 0,
+    MODE_INTRA,
+    MODE_SKIP,
+};
+
+enum InterPredIdc {
+    PRED_L0 = 0,
+    PRED_L1,
+    PRED_BI,
+};
+
+enum PredFlag {
+    PF_INTRA = 0,
+    PF_L0,
+    PF_L1,
+    PF_BI,
+};
+
+enum IntraPredMode {
+    INTRA_PLANAR = 0,
+    INTRA_DC,
+    INTRA_ANGULAR_2,
+    INTRA_ANGULAR_3,
+    INTRA_ANGULAR_4,
+    INTRA_ANGULAR_5,
+    INTRA_ANGULAR_6,
+    INTRA_ANGULAR_7,
+    INTRA_ANGULAR_8,
+    INTRA_ANGULAR_9,
+    INTRA_ANGULAR_10,
+    INTRA_ANGULAR_11,
+    INTRA_ANGULAR_12,
+    INTRA_ANGULAR_13,
+    INTRA_ANGULAR_14,
+    INTRA_ANGULAR_15,
+    INTRA_ANGULAR_16,
+    INTRA_ANGULAR_17,
+    INTRA_ANGULAR_18,
+    INTRA_ANGULAR_19,
+    INTRA_ANGULAR_20,
+    INTRA_ANGULAR_21,
+    INTRA_ANGULAR_22,
+    INTRA_ANGULAR_23,
+    INTRA_ANGULAR_24,
+    INTRA_ANGULAR_25,
+    INTRA_ANGULAR_26,
+    INTRA_ANGULAR_27,
+    INTRA_ANGULAR_28,
+    INTRA_ANGULAR_29,
+    INTRA_ANGULAR_30,
+    INTRA_ANGULAR_31,
+    INTRA_ANGULAR_32,
+    INTRA_ANGULAR_33,
+    INTRA_ANGULAR_34,
+};
+
+enum SAOType {
+    SAO_NOT_APPLIED = 0,
+    SAO_BAND,
+    SAO_EDGE,
+    SAO_APPLIED
+};
+
+enum SAOEOClass {
+    SAO_EO_HORIZ = 0,
+    SAO_EO_VERT,
+    SAO_EO_135D,
+    SAO_EO_45D,
+};
+
+enum ScanType {
+    SCAN_DIAG = 0,
+    SCAN_HORIZ,
+    SCAN_VERT,
+};
+
+typedef struct ShortTermRPS {
+    unsigned int num_negative_pics;
+    int num_delta_pocs;
+    int32_t delta_poc[32];
+    uint8_t used[32];
+} ShortTermRPS;
+
+typedef struct LongTermRPS {
+    int     poc[32];
+    uint8_t used[32];
+    uint8_t nb_refs;
+} LongTermRPS;
+
+typedef struct RefPicList {
+    struct HEVCFrame *ref[MAX_REFS];
+    int list[MAX_REFS];
+    int isLongTerm[MAX_REFS];
+    int nb_refs;
+} RefPicList;
+
+typedef struct RefPicListTab {
+    RefPicList refPicList[2];
+} RefPicListTab;
+
+typedef struct HEVCWindow {
+    int left_offset;
+    int right_offset;
+    int top_offset;
+    int bottom_offset;
+} HEVCWindow;
+
+typedef struct VUI {
+    AVRational sar;
+
+    int overscan_info_present_flag;
+    int overscan_appropriate_flag;
+
+    int video_signal_type_present_flag;
+    int video_format;
+    int video_full_range_flag;
+    int colour_description_present_flag;
+    uint8_t colour_primaries;
+    uint8_t transfer_characteristic;
+    uint8_t matrix_coeffs;
+
+    int chroma_loc_info_present_flag;
+    int chroma_sample_loc_type_top_field;
+    int chroma_sample_loc_type_bottom_field;
+    int neutra_chroma_indication_flag;
+
+    int field_seq_flag;
+    int frame_field_info_present_flag;
+
+    int default_display_window_flag;
+    HEVCWindow def_disp_win;
+
+    int vui_timing_info_present_flag;
+    uint32_t vui_num_units_in_tick;
+    uint32_t vui_time_scale;
+    int vui_poc_proportional_to_timing_flag;
+    int vui_num_ticks_poc_diff_one_minus1;
+    int vui_hrd_parameters_present_flag;
+
+    int bitstream_restriction_flag;
+    int tiles_fixed_structure_flag;
+    int motion_vectors_over_pic_boundaries_flag;
+    int restricted_ref_pic_lists_flag;
+    int min_spatial_segmentation_idc;
+    int max_bytes_per_pic_denom;
+    int max_bits_per_min_cu_denom;
+    int log2_max_mv_length_horizontal;
+    int log2_max_mv_length_vertical;
+} VUI;
+
+typedef struct PTLCommon {
+    uint8_t profile_space;
+    uint8_t tier_flag;
+    uint8_t profile_idc;
+    uint8_t profile_compatibility_flag[32];
+    uint8_t level_idc;
+    uint8_t progressive_source_flag;
+    uint8_t interlaced_source_flag;
+    uint8_t non_packed_constraint_flag;
+    uint8_t frame_only_constraint_flag;
+} PTLCommon;
+
+typedef struct PTL {
+    PTLCommon general_ptl;
+    PTLCommon sub_layer_ptl[MAX_SUB_LAYERS];
+
+    uint8_t sub_layer_profile_present_flag[MAX_SUB_LAYERS];
+    uint8_t sub_layer_level_present_flag[MAX_SUB_LAYERS];
+} PTL;
+
+typedef struct HEVCVPS {
+    uint8_t vps_temporal_id_nesting_flag;
+    int vps_max_layers;
+    int vps_max_sub_layers; ///< vps_max_temporal_layers_minus1 + 1
+
+    PTL ptl;
+    int vps_sub_layer_ordering_info_present_flag;
+    unsigned int vps_max_dec_pic_buffering[MAX_SUB_LAYERS];
+    unsigned int vps_num_reorder_pics[MAX_SUB_LAYERS];
+    unsigned int vps_max_latency_increase[MAX_SUB_LAYERS];
+    int vps_max_layer_id;
+    int vps_num_layer_sets; ///< vps_num_layer_sets_minus1 + 1
+    uint8_t vps_timing_info_present_flag;
+    uint32_t vps_num_units_in_tick;
+    uint32_t vps_time_scale;
+    uint8_t vps_poc_proportional_to_timing_flag;
+    int vps_num_ticks_poc_diff_one; ///< vps_num_ticks_poc_diff_one_minus1 + 1
+    int vps_num_hrd_parameters;
+} HEVCVPS;
+
+typedef struct ScalingList {
+    /* This is a little wasteful, since sizeID 0 only needs 8 coeffs,
+     * and size ID 3 only has 2 arrays, not 6. */
+    uint8_t sl[4][6][64];
+    uint8_t sl_dc[2][6];
+} ScalingList;
+
+typedef struct HEVCSPS {
+    unsigned vps_id;
+    int chroma_format_idc;
+    uint8_t separate_colour_plane_flag;
+
+    ///< output (i.e. cropped) values
+    int output_width, output_height;
+    HEVCWindow output_window;
+
+    HEVCWindow pic_conf_win;
+
+    int bit_depth;
+    int pixel_shift;
+    enum AVPixelFormat pix_fmt;
+
+    unsigned int log2_max_poc_lsb;
+    int pcm_enabled_flag;
+
+    int max_sub_layers;
+    struct {
+        int max_dec_pic_buffering;
+        int num_reorder_pics;
+        int max_latency_increase;
+    } temporal_layer[MAX_SUB_LAYERS];
+
+    VUI vui;
+    PTL ptl;
+
+    uint8_t scaling_list_enable_flag;
+    ScalingList scaling_list;
+
+    unsigned int nb_st_rps;
+    ShortTermRPS st_rps[MAX_SHORT_TERM_RPS_COUNT];
+
+    uint8_t amp_enabled_flag;
+    uint8_t sao_enabled;
+
+    uint8_t long_term_ref_pics_present_flag;
+    uint16_t lt_ref_pic_poc_lsb_sps[32];
+    uint8_t used_by_curr_pic_lt_sps_flag[32];
+    uint8_t num_long_term_ref_pics_sps;
+
+    struct {
+        uint8_t bit_depth;
+        uint8_t bit_depth_chroma;
+        unsigned int log2_min_pcm_cb_size;
+        unsigned int log2_max_pcm_cb_size;
+        uint8_t loop_filter_disable_flag;
+    } pcm;
+    uint8_t sps_temporal_mvp_enabled_flag;
+    uint8_t sps_strong_intra_smoothing_enable_flag;
+
+    unsigned int log2_min_cb_size;
+    unsigned int log2_diff_max_min_coding_block_size;
+    unsigned int log2_min_tb_size;
+    unsigned int log2_max_trafo_size;
+    unsigned int log2_ctb_size;
+    unsigned int log2_min_pu_size;
+
+    int max_transform_hierarchy_depth_inter;
+    int max_transform_hierarchy_depth_intra;
+
+    int transform_skip_rotation_enabled_flag;
+    int transform_skip_context_enabled_flag;
+    int implicit_rdpcm_enabled_flag;
+    int explicit_rdpcm_enabled_flag;
+    int intra_smoothing_disabled_flag;
+    int persistent_rice_adaptation_enabled_flag;
+
+    ///< coded frame dimension in various units
+    int width;
+    int height;
+    int ctb_width;
+    int ctb_height;
+    int ctb_size;
+    int min_cb_width;
+    int min_cb_height;
+    int min_tb_width;
+    int min_tb_height;
+    int min_pu_width;
+    int min_pu_height;
+    int tb_mask;
+
+    int hshift[3];
+    int vshift[3];
+
+    int qp_bd_offset;
+} HEVCSPS;
+
+typedef struct HEVCPPS {
+    unsigned int sps_id; ///< seq_parameter_set_id
+
+    uint8_t sign_data_hiding_flag;
+
+    uint8_t cabac_init_present_flag;
+
+    int num_ref_idx_l0_default_active; ///< num_ref_idx_l0_default_active_minus1 + 1
+    int num_ref_idx_l1_default_active; ///< num_ref_idx_l1_default_active_minus1 + 1
+    int pic_init_qp_minus26;
+
+    uint8_t constrained_intra_pred_flag;
+    uint8_t transform_skip_enabled_flag;
+
+    uint8_t cu_qp_delta_enabled_flag;
+    int diff_cu_qp_delta_depth;
+
+    int cb_qp_offset;
+    int cr_qp_offset;
+    uint8_t pic_slice_level_chroma_qp_offsets_present_flag;
+    uint8_t weighted_pred_flag;
+    uint8_t weighted_bipred_flag;
+    uint8_t output_flag_present_flag;
+    uint8_t transquant_bypass_enable_flag;
+
+    uint8_t dependent_slice_segments_enabled_flag;
+    uint8_t tiles_enabled_flag;
+    uint8_t entropy_coding_sync_enabled_flag;
+
+    int num_tile_columns;   ///< num_tile_columns_minus1 + 1
+    int num_tile_rows;      ///< num_tile_rows_minus1 + 1
+    uint8_t uniform_spacing_flag;
+    uint8_t loop_filter_across_tiles_enabled_flag;
+
+    uint8_t seq_loop_filter_across_slices_enabled_flag;
+
+    uint8_t deblocking_filter_control_present_flag;
+    uint8_t deblocking_filter_override_enabled_flag;
+    uint8_t disable_dbf;
+    int beta_offset;    ///< beta_offset_div2 * 2
+    int tc_offset;      ///< tc_offset_div2 * 2
+
+    uint8_t scaling_list_data_present_flag;
+    ScalingList scaling_list;
+
+    uint8_t lists_modification_present_flag;
+    int log2_parallel_merge_level; ///< log2_parallel_merge_level_minus2 + 2
+    int num_extra_slice_header_bits;
+    uint8_t slice_header_extension_present_flag;
+    uint8_t log2_max_transform_skip_block_size;
+    uint8_t cross_component_prediction_enabled_flag;
+    uint8_t chroma_qp_offset_list_enabled_flag;
+    uint8_t diff_cu_chroma_qp_offset_depth;
+    uint8_t chroma_qp_offset_list_len_minus1;
+    int8_t  cb_qp_offset_list[5];
+    int8_t  cr_qp_offset_list[5];
+    uint8_t log2_sao_offset_scale_luma;
+    uint8_t log2_sao_offset_scale_chroma;
+
+    // Inferred parameters
+    unsigned int *column_width;  ///< ColumnWidth
+    unsigned int *row_height;    ///< RowHeight
+    unsigned int *col_bd;        ///< ColBd
+    unsigned int *row_bd;        ///< RowBd
+    int *col_idxX;
+
+    int *ctb_addr_rs_to_ts; ///< CtbAddrRSToTS
+    int *ctb_addr_ts_to_rs; ///< CtbAddrTSToRS
+    int *tile_id;           ///< TileId
+    int *tile_pos_rs;       ///< TilePosRS
+    int *min_tb_addr_zs;    ///< MinTbAddrZS
+    int *min_tb_addr_zs_tab;///< MinTbAddrZS
+} HEVCPPS;
+
+typedef struct SliceHeader {
+    unsigned int pps_id;
+
+    ///< address (in raster order) of the first block in the current slice segment
+    unsigned int   slice_segment_addr;
+    ///< address (in raster order) of the first block in the current slice
+    unsigned int   slice_addr;
+
+    enum SliceType slice_type;
+
+    int pic_order_cnt_lsb;
+
+    uint8_t first_slice_in_pic_flag;
+    uint8_t dependent_slice_segment_flag;
+    uint8_t pic_output_flag;
+    uint8_t colour_plane_id;
+
+    ///< RPS coded in the slice header itself is stored here
+    ShortTermRPS slice_rps;
+    const ShortTermRPS *short_term_rps;
+    LongTermRPS long_term_rps;
+    unsigned int list_entry_lx[2][32];
+
+    uint8_t rpl_modification_flag[2];
+    uint8_t no_output_of_prior_pics_flag;
+    uint8_t slice_temporal_mvp_enabled_flag;
+
+    unsigned int nb_refs[2];
+
+    uint8_t slice_sample_adaptive_offset_flag[3];
+    uint8_t mvd_l1_zero_flag;
+
+    uint8_t cabac_init_flag;
+    uint8_t disable_deblocking_filter_flag; ///< slice_header_disable_deblocking_filter_flag
+    uint8_t slice_loop_filter_across_slices_enabled_flag;
+    uint8_t collocated_list;
+
+    unsigned int collocated_ref_idx;
+
+    int slice_qp_delta;
+    int slice_cb_qp_offset;
+    int slice_cr_qp_offset;
+
+    uint8_t cu_chroma_qp_offset_enabled_flag;
+
+    int beta_offset;    ///< beta_offset_div2 * 2
+    int tc_offset;      ///< tc_offset_div2 * 2
+
+    unsigned int max_num_merge_cand; ///< 5 - 5_minus_max_num_merge_cand
+
+    int *entry_point_offset;
+    int * offset;
+    int * size;
+    int num_entry_point_offsets;
+
+    int8_t slice_qp;
+
+    uint8_t luma_log2_weight_denom;
+    int16_t chroma_log2_weight_denom;
+
+    int16_t luma_weight_l0[16];
+    int16_t chroma_weight_l0[16][2];
+    int16_t chroma_weight_l1[16][2];
+    int16_t luma_weight_l1[16];
+
+    int16_t luma_offset_l0[16];
+    int16_t chroma_offset_l0[16][2];
+
+    int16_t luma_offset_l1[16];
+    int16_t chroma_offset_l1[16][2];
+
+    int slice_ctb_addr_rs;
+} SliceHeader;
+
+typedef struct CodingUnit {
+    int x;
+    int y;
+
+    enum PredMode pred_mode;    ///< PredMode
+    enum PartMode part_mode;    ///< PartMode
+
+    uint8_t rqt_root_cbf;
+
+    uint8_t pcm_flag;
+
+    // Inferred parameters
+    uint8_t intra_split_flag;   ///< IntraSplitFlag
+    uint8_t max_trafo_depth;    ///< MaxTrafoDepth
+    uint8_t cu_transquant_bypass_flag;
+} CodingUnit;
+
+typedef struct Mv {
+    int16_t x;  ///< horizontal component of motion vector
+    int16_t y;  ///< vertical component of motion vector
+} Mv;
+
+typedef struct MvField {
+    DECLARE_ALIGNED(4, Mv, mv)[2];
+    int8_t ref_idx[2];
+    int8_t pred_flag;
+} MvField;
+
+typedef struct NeighbourAvailable {
+    int cand_bottom_left;
+    int cand_left;
+    int cand_up;
+    int cand_up_left;
+    int cand_up_right;
+    int cand_up_right_sap;
+} NeighbourAvailable;
+
+typedef struct PredictionUnit {
+    int mpm_idx;
+    int rem_intra_luma_pred_mode;
+    uint8_t intra_pred_mode[4];
+    Mv mvd;
+    uint8_t merge_flag;
+    uint8_t intra_pred_mode_c[4];
+    uint8_t chroma_mode_c[4];
+} PredictionUnit;
+
+typedef struct TransformUnit {
+    int cu_qp_delta;
+
+    int res_scale_val;
+
+    // Inferred parameters;
+    int intra_pred_mode;
+    int intra_pred_mode_c;
+    int chroma_mode_c;
+    uint8_t is_cu_qp_delta_coded;
+    uint8_t is_cu_chroma_qp_offset_coded;
+    int8_t  cu_qp_offset_cb;
+    int8_t  cu_qp_offset_cr;
+    uint8_t cross_pf;
+} TransformUnit;
+
+typedef struct DBParams {
+    int beta_offset;
+    int tc_offset;
+} DBParams;
+
+#define HEVC_FRAME_FLAG_OUTPUT    (1 << 0)
+#define HEVC_FRAME_FLAG_SHORT_REF (1 << 1)
+#define HEVC_FRAME_FLAG_LONG_REF  (1 << 2)
+#define HEVC_FRAME_FLAG_BUMPING   (1 << 3)
+
+typedef struct HEVCFrame {
+    AVFrame *frame;
+    ThreadFrame tf;
+#ifdef USE_PRED
+    MvField *tab_mvf;
+    RefPicList *refPicList;
+    RefPicListTab **rpl_tab;
+#endif
+    int ctb_count;
+    int poc;
+    struct HEVCFrame *collocated_ref;
+
+    HEVCWindow window;
+
+#ifdef USE_PRED
+    AVBufferRef *tab_mvf_buf;
+    AVBufferRef *rpl_tab_buf;
+    AVBufferRef *rpl_buf;
+#endif
+
+    /**
+     * A sequence counter, so that old frames are output first
+     * after a POC reset
+     */
+    uint16_t sequence;
+
+    /**
+     * A combination of HEVC_FRAME_FLAG_*
+     */
+    uint8_t flags;
+} HEVCFrame;
+
+typedef struct HEVCNAL {
+    uint8_t *rbsp_buffer;
+    int rbsp_buffer_size;
+
+    int size;
+    const uint8_t *data;
+} HEVCNAL;
+
+typedef struct HEVCLocalContext {
+    uint8_t cabac_state[HEVC_CONTEXTS];
+
+    uint8_t stat_coeff[4];
+
+    uint8_t first_qp_group;
+
+    GetBitContext gb;
+    CABACContext cc;
+
+    int8_t qp_y;
+    int8_t curr_qp_y;
+
+    int qPy_pred;
+
+    TransformUnit tu;
+
+    uint8_t ctb_left_flag;
+    uint8_t ctb_up_flag;
+    uint8_t ctb_up_right_flag;
+    uint8_t ctb_up_left_flag;
+    int     end_of_tiles_x;
+    int     end_of_tiles_y;
+    /* +7 is for subpixel interpolation, *2 for high bit depths */
+    DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[(MAX_PB_SIZE + 7) * EDGE_EMU_BUFFER_STRIDE * 2];
+    DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer2)[(MAX_PB_SIZE + 7) * EDGE_EMU_BUFFER_STRIDE * 2];
+    DECLARE_ALIGNED(16, int16_t, tmp [MAX_PB_SIZE * MAX_PB_SIZE]);
+
+    int ct_depth;
+    CodingUnit cu;
+    PredictionUnit pu;
+    NeighbourAvailable na;
+
+#define BOUNDARY_LEFT_SLICE     (1 << 0)
+#define BOUNDARY_LEFT_TILE      (1 << 1)
+#define BOUNDARY_UPPER_SLICE    (1 << 2)
+#define BOUNDARY_UPPER_TILE     (1 << 3)
+    /* properties of the boundary of the current CTB for the purposes
+     * of the deblocking filter */
+    int boundary_flags;
+} HEVCLocalContext;
+
+typedef struct HEVCContext {
+    const AVClass *c;  // needed by private avoptions
+    AVCodecContext *avctx;
+
+    struct HEVCContext  *sList[MAX_NB_THREADS];
+
+    HEVCLocalContext    *HEVClcList[MAX_NB_THREADS];
+    HEVCLocalContext    *HEVClc;
+
+    uint8_t             threads_type;
+    uint8_t             threads_number;
+
+    int                 width;
+    int                 height;
+
+    uint8_t *cabac_state;
+
+    /** 1 if the independent slice segment header was successfully parsed */
+    uint8_t slice_initialized;
+
+    AVFrame *frame;
+    AVFrame *output_frame;
+#ifdef USE_SAO_SMALL_BUFFER
+    uint8_t *sao_pixel_buffer;
+    uint8_t *sao_pixel_buffer_h[3];
+    uint8_t *sao_pixel_buffer_v[3];
+#else
+    AVFrame *tmp_frame;
+    AVFrame *sao_frame;
+#endif
+    const HEVCVPS *vps;
+    const HEVCSPS *sps;
+    const HEVCPPS *pps;
+    AVBufferRef *vps_list[MAX_VPS_COUNT];
+    AVBufferRef *sps_list[MAX_SPS_COUNT];
+    AVBufferRef *pps_list[MAX_PPS_COUNT];
+
+    AVBufferRef *current_sps;
+
+#ifdef USE_PRED
+    AVBufferPool *tab_mvf_pool;
+    AVBufferPool *rpl_tab_pool;
+
+    ///< candidate references for the current frame
+    RefPicList rps[5];
+#endif
+
+    SliceHeader sh;
+    SAOParams *sao;
+    DBParams *deblock;
+    enum NALUnitType nal_unit_type;
+    int temporal_id;  ///< temporal_id_plus1 - 1
+    HEVCFrame *ref;
+    HEVCFrame DPB[MAX_DPB_COUNT];
+    int poc;
+    int pocTid0;
+    int slice_idx; ///< number of the slice being currently decoded
+    int eos;       ///< current packet contains an EOS/EOB NAL
+    int last_eos;  ///< last packet contains an EOS/EOB NAL
+    int max_ra;
+    int bs_width;
+    int bs_height;
+
+    int is_decoded;
+
+#ifdef USE_FUNC_PTR
+    HEVCPredContext hpc;
+#endif
+    HEVCDSPContext hevcdsp;
+#ifdef USE_PRED
+    VideoDSPContext vdsp;
+#endif
+#if HAVE_BIGENDIAN
+    BswapDSPContext bdsp;
+#endif
+    int8_t *qp_y_tab;
+    uint8_t *horizontal_bs;
+    uint8_t *vertical_bs;
+
+    int32_t *tab_slice_address;
+
+    //  CU
+    uint8_t *skip_flag;
+    uint8_t *tab_ct_depth;
+    // PU
+    uint8_t *tab_ipm;
+
+    uint8_t *cbf_luma; // cbf_luma of colocated TU
+    uint8_t *is_pcm;
+
+    // CTB-level flags affecting loop filter operation
+    uint8_t *filter_slice_edges;
+
+    /** used on BE to byteswap the lines for checksumming */
+    uint8_t *checksum_buf;
+    int      checksum_buf_size;
+
+    /**
+     * Sequence counters for decoded and output frames, so that old
+     * frames are output first after a POC reset
+     */
+    uint16_t seq_decode;
+    uint16_t seq_output;
+
+    int enable_parallel_tiles;
+    int wpp_err;
+    int skipped_bytes;
+    int *skipped_bytes_pos;
+    int skipped_bytes_pos_size;
+
+    int *skipped_bytes_nal;
+    int **skipped_bytes_pos_nal;
+    int *skipped_bytes_pos_size_nal;
+
+    const uint8_t *data;
+
+    HEVCNAL *nals;
+    int nb_nals;
+    int nals_allocated;
+    // type of the first VCL NAL of the current frame
+    enum NALUnitType first_nal_type;
+
+#ifdef USE_MD5
+    // for checking the frame checksums
+    struct AVMD5 *md5_ctx;
+#endif
+    uint8_t       md5[3][16];
+    uint8_t is_md5;
+
+    uint8_t context_initialized;
+    uint8_t is_nalff;       ///< this flag is != 0 if bitstream is encapsulated
+                            ///< as a format defined in 14496-15
+    int apply_defdispwin;
+
+    int active_seq_parameter_set_id;
+
+    int nal_length_size;    ///< Number of bytes used for nal length (1, 2 or 4)
+    int nuh_layer_id;
+
+    /** frame packing arrangement variables */
+    int sei_frame_packing_present;
+    int frame_packing_arrangement_type;
+    int content_interpretation_type;
+    int quincunx_subsampling;
+
+    /** display orientation */
+    int sei_display_orientation_present;
+    int sei_anticlockwise_rotation;
+    int sei_hflip, sei_vflip;
+
+    int picture_struct;
+#ifdef USE_FRAME_DURATION_SEI
+    uint16_t frame_duration;
+#endif
+} HEVCContext;
+
+int ff_hevc_decode_short_term_rps(HEVCContext *s, ShortTermRPS *rps,
+                                  const HEVCSPS *sps, int is_slice_header);
+int ff_hevc_decode_nal_vps(HEVCContext *s);
+int ff_hevc_decode_nal_sps(HEVCContext *s);
+int ff_hevc_decode_nal_pps(HEVCContext *s);
+int ff_hevc_decode_nal_sei(HEVCContext *s);
+
+int ff_hevc_extract_rbsp(HEVCContext *s, const uint8_t *src, int length,
+                         HEVCNAL *nal);
+
+/**
+ * Mark all frames in DPB as unused for reference.
+ */
+void ff_hevc_clear_refs(HEVCContext *s);
+
+/**
+ * Drop all frames currently in DPB.
+ */
+void ff_hevc_flush_dpb(HEVCContext *s);
+
+/**
+ * Compute POC of the current frame and return it.
+ */
+int ff_hevc_compute_poc(HEVCContext *s, int poc_lsb);
+
+RefPicList *ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *frame,
+                                 int x0, int y0);
+
+/**
+ * Construct the reference picture sets for the current frame.
+ */
+int ff_hevc_frame_rps(HEVCContext *s);
+
+/**
+ * Construct the reference picture list(s) for the current slice.
+ */
+int ff_hevc_slice_rpl(HEVCContext *s);
+
+void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts);
+void ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts);
+int ff_hevc_sao_merge_flag_decode(HEVCContext *s);
+int ff_hevc_sao_type_idx_decode(HEVCContext *s);
+int ff_hevc_sao_band_position_decode(HEVCContext *s);
+int ff_hevc_sao_offset_abs_decode(HEVCContext *s);
+int ff_hevc_sao_offset_sign_decode(HEVCContext *s);
+int ff_hevc_sao_eo_class_decode(HEVCContext *s);
+int ff_hevc_end_of_slice_flag_decode(HEVCContext *s);
+int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s);
+int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0,
+                             int x_cb, int y_cb);
+int ff_hevc_pred_mode_decode(HEVCContext *s);
+int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth,
+                                          int x0, int y0);
+int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size);
+int ff_hevc_pcm_flag_decode(HEVCContext *s);
+int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s);
+int ff_hevc_mpm_idx_decode(HEVCContext *s);
+int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s);
+int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s);
+int ff_hevc_merge_idx_decode(HEVCContext *s);
+int ff_hevc_merge_flag_decode(HEVCContext *s);
+int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH);
+int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx);
+int ff_hevc_mvp_lx_flag_decode(HEVCContext *s);
+int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s);
+int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size);
+int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth);
+int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth);
+int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx);
+int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx);
+
+/**
+ * Get the number of candidate references for the current frame.
+ */
+int ff_hevc_frame_nb_refs(HEVCContext *s);
+
+int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc);
+
+/**
+ * Find next frame in output order and put a reference to it in frame.
+ * @return 1 if a frame was output, 0 otherwise
+ */
+int ff_hevc_output_frame(HEVCContext *s, AVFrame *frame, int flush);
+
+void ff_hevc_bump_frame(HEVCContext *s);
+
+void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags);
+
+void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
+                                     int nPbW, int nPbH);
+void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0,
+                                int nPbW, int nPbH, int log2_cb_size,
+                                int part_idx, int merge_idx, MvField *mv);
+void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0,
+                              int nPbW, int nPbH, int log2_cb_size,
+                              int part_idx, int merge_idx,
+                              MvField *mv, int mvp_lx_flag, int LX);
+void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase,
+                     int log2_cb_size);
+void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
+                                           int log2_trafo_size);
+int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s);
+int ff_hevc_cu_qp_delta_abs(HEVCContext *s);
+int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s);
+int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s);
+void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size);
+void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size);
+void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0,
+                                 int log2_trafo_size, enum ScanType scan_idx,
+                                 int c_idx);
+
+void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size);
+
+#ifndef USE_FUNC_PTR
+void intra_pred (HEVCContext *s, int x0, int y0, int log2_size, int c_idx);
+#endif
+
+extern const uint8_t ff_hevc_qpel_extra_before[4];
+extern const uint8_t ff_hevc_qpel_extra_after[4];
+extern const uint8_t ff_hevc_qpel_extra[4];
+
+extern const uint8_t ff_hevc_diag_scan4x4_x[16];
+extern const uint8_t ff_hevc_diag_scan4x4_y[16];
+extern const uint8_t ff_hevc_diag_scan8x8_x[64];
+extern const uint8_t ff_hevc_diag_scan8x8_y[64];
+
+#endif /* AVCODEC_HEVC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_cabac.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1601 @@
+/*
+ * HEVC CABAC decoding
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2012 - 2013 Gildas Cocherel
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+
+#include "cabac_functions.h"
+#include "hevc.h"
+
+#define CABAC_MAX_BIN 31
+
+/**
+ * number of bin by SyntaxElement.
+ */
+av_unused static const int8_t num_bins_in_se[] = {
+     1, // sao_merge_flag
+     1, // sao_type_idx
+     0, // sao_eo_class
+     0, // sao_band_position
+     0, // sao_offset_abs
+     0, // sao_offset_sign
+     0, // end_of_slice_flag
+     3, // split_coding_unit_flag
+     1, // cu_transquant_bypass_flag
+     3, // skip_flag
+     3, // cu_qp_delta
+     1, // pred_mode
+     4, // part_mode
+     0, // pcm_flag
+     1, // prev_intra_luma_pred_mode
+     0, // mpm_idx
+     0, // rem_intra_luma_pred_mode
+     2, // intra_chroma_pred_mode
+     1, // merge_flag
+     1, // merge_idx
+     5, // inter_pred_idc
+     2, // ref_idx_l0
+     2, // ref_idx_l1
+     2, // abs_mvd_greater0_flag
+     2, // abs_mvd_greater1_flag
+     0, // abs_mvd_minus2
+     0, // mvd_sign_flag
+     1, // mvp_lx_flag
+     1, // no_residual_data_flag
+     3, // split_transform_flag
+     2, // cbf_luma
+     4, // cbf_cb, cbf_cr
+     2, // transform_skip_flag[][]
+     2, // explicit_rdpcm_flag[][]
+     2, // explicit_rdpcm_dir_flag[][]
+    18, // last_significant_coeff_x_prefix
+    18, // last_significant_coeff_y_prefix
+     0, // last_significant_coeff_x_suffix
+     0, // last_significant_coeff_y_suffix
+     4, // significant_coeff_group_flag
+    44, // significant_coeff_flag
+    24, // coeff_abs_level_greater1_flag
+     6, // coeff_abs_level_greater2_flag
+     0, // coeff_abs_level_remaining
+     0, // coeff_sign_flag
+     8, // log2_res_scale_abs
+     2, // res_scale_sign_flag
+     1, // cu_chroma_qp_offset_flag
+     1, // cu_chroma_qp_offset_idx
+};
+
+/**
+ * Offset to ctxIdx 0 in init_values and states, indexed by SyntaxElement.
+ */
+static const int elem_offset[sizeof(num_bins_in_se)] = {
+    0, // sao_merge_flag
+    1, // sao_type_idx
+    2, // sao_eo_class
+    2, // sao_band_position
+    2, // sao_offset_abs
+    2, // sao_offset_sign
+    2, // end_of_slice_flag
+    2, // split_coding_unit_flag
+    5, // cu_transquant_bypass_flag
+    6, // skip_flag
+    9, // cu_qp_delta
+    12, // pred_mode
+    13, // part_mode
+    17, // pcm_flag
+    17, // prev_intra_luma_pred_mode
+    18, // mpm_idx
+    18, // rem_intra_luma_pred_mode
+    18, // intra_chroma_pred_mode
+    20, // merge_flag
+    21, // merge_idx
+    22, // inter_pred_idc
+    27, // ref_idx_l0
+    29, // ref_idx_l1
+    31, // abs_mvd_greater0_flag
+    33, // abs_mvd_greater1_flag
+    35, // abs_mvd_minus2
+    35, // mvd_sign_flag
+    35, // mvp_lx_flag
+    36, // no_residual_data_flag
+    37, // split_transform_flag
+    40, // cbf_luma
+    42, // cbf_cb, cbf_cr
+    46, // transform_skip_flag[][]
+    48, // explicit_rdpcm_flag[][]
+    50, // explicit_rdpcm_dir_flag[][]
+    52, // last_significant_coeff_x_prefix
+    70, // last_significant_coeff_y_prefix
+    88, // last_significant_coeff_x_suffix
+    88, // last_significant_coeff_y_suffix
+    88, // significant_coeff_group_flag
+    92, // significant_coeff_flag
+    136, // coeff_abs_level_greater1_flag
+    160, // coeff_abs_level_greater2_flag
+    166, // coeff_abs_level_remaining
+    166, // coeff_sign_flag
+    166, // log2_res_scale_abs
+    174, // res_scale_sign_flag
+    176, // cu_chroma_qp_offset_flag
+    177, // cu_chroma_qp_offset_idx
+};
+
+#define CNU 154
+/**
+ * Indexed by init_type
+ */
+static const uint8_t init_values[3][HEVC_CONTEXTS] = {
+    { // sao_merge_flag
+      153,
+      // sao_type_idx
+      200,
+      // split_coding_unit_flag
+      139, 141, 157,
+      // cu_transquant_bypass_flag
+      154,
+      // skip_flag
+      CNU, CNU, CNU,
+      // cu_qp_delta
+      154, 154, 154,
+      // pred_mode
+      CNU,
+      // part_mode
+      184, CNU, CNU, CNU,
+      // prev_intra_luma_pred_mode
+      184,
+      // intra_chroma_pred_mode
+      63, 139,
+      // merge_flag
+      CNU,
+      // merge_idx
+      CNU,
+      // inter_pred_idc
+      CNU, CNU, CNU, CNU, CNU,
+      // ref_idx_l0
+      CNU, CNU,
+      // ref_idx_l1
+      CNU, CNU,
+      // abs_mvd_greater1_flag
+      CNU, CNU,
+      // abs_mvd_greater1_flag
+      CNU, CNU,
+      // mvp_lx_flag
+      CNU,
+      // no_residual_data_flag
+      CNU,
+      // split_transform_flag
+      153, 138, 138,
+      // cbf_luma
+      111, 141,
+      // cbf_cb, cbf_cr
+      94, 138, 182, 154,
+      // transform_skip_flag
+      139, 139,
+      // explicit_rdpcm_flag
+      139, 139,
+      // explicit_rdpcm_dir_flag
+      139, 139,
+      // last_significant_coeff_x_prefix
+      110, 110, 124, 125, 140, 153, 125, 127, 140, 109, 111, 143, 127, 111,
+       79, 108, 123,  63,
+      // last_significant_coeff_y_prefix
+      110, 110, 124, 125, 140, 153, 125, 127, 140, 109, 111, 143, 127, 111,
+       79, 108, 123,  63,
+      // significant_coeff_group_flag
+      91, 171, 134, 141,
+      // significant_coeff_flag
+      111, 111, 125, 110, 110,  94, 124, 108, 124, 107, 125, 141, 179, 153,
+      125, 107, 125, 141, 179, 153, 125, 107, 125, 141, 179, 153, 125, 140,
+      139, 182, 182, 152, 136, 152, 136, 153, 136, 139, 111, 136, 139, 111,
+      141, 111,
+      // coeff_abs_level_greater1_flag
+      140,  92, 137, 138, 140, 152, 138, 139, 153,  74, 149,  92, 139, 107,
+      122, 152, 140, 179, 166, 182, 140, 227, 122, 197,
+      // coeff_abs_level_greater2_flag
+      138, 153, 136, 167, 152, 152,
+      // log2_res_scale_abs
+      154, 154, 154, 154, 154, 154, 154, 154,
+      // res_scale_sign_flag
+      154, 154,
+      // cu_chroma_qp_offset_flag
+      154,
+      // cu_chroma_qp_offset_idx
+      154,
+    },
+    { // sao_merge_flag
+      153,
+      // sao_type_idx
+      185,
+      // split_coding_unit_flag
+      107, 139, 126,
+      // cu_transquant_bypass_flag
+      154,
+      // skip_flag
+      197, 185, 201,
+      // cu_qp_delta
+      154, 154, 154,
+      // pred_mode
+      149,
+      // part_mode
+      154, 139, 154, 154,
+      // prev_intra_luma_pred_mode
+      154,
+      // intra_chroma_pred_mode
+      152, 139,
+      // merge_flag
+      110,
+      // merge_idx
+      122,
+      // inter_pred_idc
+      95, 79, 63, 31, 31,
+      // ref_idx_l0
+      153, 153,
+      // ref_idx_l1
+      153, 153,
+      // abs_mvd_greater1_flag
+      140, 198,
+      // abs_mvd_greater1_flag
+      140, 198,
+      // mvp_lx_flag
+      168,
+      // no_residual_data_flag
+      79,
+      // split_transform_flag
+      124, 138, 94,
+      // cbf_luma
+      153, 111,
+      // cbf_cb, cbf_cr
+      149, 107, 167, 154,
+      // transform_skip_flag
+      139, 139,
+      // explicit_rdpcm_flag
+      139, 139,
+      // explicit_rdpcm_dir_flag
+      139, 139,
+      // last_significant_coeff_x_prefix
+      125, 110,  94, 110,  95,  79, 125, 111, 110,  78, 110, 111, 111,  95,
+       94, 108, 123, 108,
+      // last_significant_coeff_y_prefix
+      125, 110,  94, 110,  95,  79, 125, 111, 110,  78, 110, 111, 111,  95,
+       94, 108, 123, 108,
+      // significant_coeff_group_flag
+      121, 140, 61, 154,
+      // significant_coeff_flag
+      155, 154, 139, 153, 139, 123, 123,  63, 153, 166, 183, 140, 136, 153,
+      154, 166, 183, 140, 136, 153, 154, 166, 183, 140, 136, 153, 154, 170,
+      153, 123, 123, 107, 121, 107, 121, 167, 151, 183, 140, 151, 183, 140,
+      140, 140,
+      // coeff_abs_level_greater1_flag
+      154, 196, 196, 167, 154, 152, 167, 182, 182, 134, 149, 136, 153, 121,
+      136, 137, 169, 194, 166, 167, 154, 167, 137, 182,
+      // coeff_abs_level_greater2_flag
+      107, 167, 91, 122, 107, 167,
+      // log2_res_scale_abs
+      154, 154, 154, 154, 154, 154, 154, 154,
+      // res_scale_sign_flag
+      154, 154,
+      // cu_chroma_qp_offset_flag
+      154,
+      // cu_chroma_qp_offset_idx
+      154,
+    },
+    { // sao_merge_flag
+      153,
+      // sao_type_idx
+      160,
+      // split_coding_unit_flag
+      107, 139, 126,
+      // cu_transquant_bypass_flag
+      154,
+      // skip_flag
+      197, 185, 201,
+      // cu_qp_delta
+      154, 154, 154,
+      // pred_mode
+      134,
+      // part_mode
+      154, 139, 154, 154,
+      // prev_intra_luma_pred_mode
+      183,
+      // intra_chroma_pred_mode
+      152, 139,
+      // merge_flag
+      154,
+      // merge_idx
+      137,
+      // inter_pred_idc
+      95, 79, 63, 31, 31,
+      // ref_idx_l0
+      153, 153,
+      // ref_idx_l1
+      153, 153,
+      // abs_mvd_greater1_flag
+      169, 198,
+      // abs_mvd_greater1_flag
+      169, 198,
+      // mvp_lx_flag
+      168,
+      // no_residual_data_flag
+      79,
+      // split_transform_flag
+      224, 167, 122,
+      // cbf_luma
+      153, 111,
+      // cbf_cb, cbf_cr
+      149, 92, 167, 154,
+      // transform_skip_flag
+      139, 139,
+      // explicit_rdpcm_flag
+      139, 139,
+      // explicit_rdpcm_dir_flag
+      139, 139,
+      // last_significant_coeff_x_prefix
+      125, 110, 124, 110,  95,  94, 125, 111, 111,  79, 125, 126, 111, 111,
+       79, 108, 123,  93,
+      // last_significant_coeff_y_prefix
+      125, 110, 124, 110,  95,  94, 125, 111, 111,  79, 125, 126, 111, 111,
+       79, 108, 123,  93,
+      // significant_coeff_group_flag
+      121, 140, 61, 154,
+      // significant_coeff_flag
+      170, 154, 139, 153, 139, 123, 123,  63, 124, 166, 183, 140, 136, 153,
+      154, 166, 183, 140, 136, 153, 154, 166, 183, 140, 136, 153, 154, 170,
+      153, 138, 138, 122, 121, 122, 121, 167, 151, 183, 140, 151, 183, 140,
+      140, 140,
+      // coeff_abs_level_greater1_flag
+      154, 196, 167, 167, 154, 152, 167, 182, 182, 134, 149, 136, 153, 121,
+      136, 122, 169, 208, 166, 167, 154, 152, 167, 182,
+      // coeff_abs_level_greater2_flag
+      107, 167, 91, 107, 107, 167,
+      // log2_res_scale_abs
+      154, 154, 154, 154, 154, 154, 154, 154,
+      // res_scale_sign_flag
+      154, 154,
+      // cu_chroma_qp_offset_flag
+      154,
+      // cu_chroma_qp_offset_idx
+      154,
+    },
+};
+
+static const uint8_t scan_1x1[1] = {
+    0,
+};
+
+static const uint8_t horiz_scan2x2_x[4] = {
+    0, 1, 0, 1,
+};
+
+static const uint8_t horiz_scan2x2_y[4] = {
+    0, 0, 1, 1
+};
+
+static const uint8_t horiz_scan4x4_x[16] = {
+    0, 1, 2, 3,
+    0, 1, 2, 3,
+    0, 1, 2, 3,
+    0, 1, 2, 3,
+};
+
+static const uint8_t horiz_scan4x4_y[16] = {
+    0, 0, 0, 0,
+    1, 1, 1, 1,
+    2, 2, 2, 2,
+    3, 3, 3, 3,
+};
+
+static const uint8_t horiz_scan8x8_inv[8][8] = {
+    {  0,  1,  2,  3, 16, 17, 18, 19, },
+    {  4,  5,  6,  7, 20, 21, 22, 23, },
+    {  8,  9, 10, 11, 24, 25, 26, 27, },
+    { 12, 13, 14, 15, 28, 29, 30, 31, },
+    { 32, 33, 34, 35, 48, 49, 50, 51, },
+    { 36, 37, 38, 39, 52, 53, 54, 55, },
+    { 40, 41, 42, 43, 56, 57, 58, 59, },
+    { 44, 45, 46, 47, 60, 61, 62, 63, },
+};
+
+static const uint8_t diag_scan2x2_x[4] = {
+    0, 0, 1, 1,
+};
+
+static const uint8_t diag_scan2x2_y[4] = {
+    0, 1, 0, 1,
+};
+
+static const uint8_t diag_scan2x2_inv[2][2] = {
+    { 0, 2, },
+    { 1, 3, },
+};
+
+const uint8_t ff_hevc_diag_scan4x4_x[16] = {
+    0, 0, 1, 0,
+    1, 2, 0, 1,
+    2, 3, 1, 2,
+    3, 2, 3, 3,
+};
+
+const uint8_t ff_hevc_diag_scan4x4_y[16] = {
+    0, 1, 0, 2,
+    1, 0, 3, 2,
+    1, 0, 3, 2,
+    1, 3, 2, 3,
+};
+
+static const uint8_t diag_scan4x4_inv[4][4] = {
+    { 0,  2,  5,  9, },
+    { 1,  4,  8, 12, },
+    { 3,  7, 11, 14, },
+    { 6, 10, 13, 15, },
+};
+
+const uint8_t ff_hevc_diag_scan8x8_x[64] = {
+    0, 0, 1, 0,
+    1, 2, 0, 1,
+    2, 3, 0, 1,
+    2, 3, 4, 0,
+    1, 2, 3, 4,
+    5, 0, 1, 2,
+    3, 4, 5, 6,
+    0, 1, 2, 3,
+    4, 5, 6, 7,
+    1, 2, 3, 4,
+    5, 6, 7, 2,
+    3, 4, 5, 6,
+    7, 3, 4, 5,
+    6, 7, 4, 5,
+    6, 7, 5, 6,
+    7, 6, 7, 7,
+};
+
+const uint8_t ff_hevc_diag_scan8x8_y[64] = {
+    0, 1, 0, 2,
+    1, 0, 3, 2,
+    1, 0, 4, 3,
+    2, 1, 0, 5,
+    4, 3, 2, 1,
+    0, 6, 5, 4,
+    3, 2, 1, 0,
+    7, 6, 5, 4,
+    3, 2, 1, 0,
+    7, 6, 5, 4,
+    3, 2, 1, 7,
+    6, 5, 4, 3,
+    2, 7, 6, 5,
+    4, 3, 7, 6,
+    5, 4, 7, 6,
+    5, 7, 6, 7,
+};
+
+static const uint8_t diag_scan8x8_inv[8][8] = {
+    {  0,  2,  5,  9, 14, 20, 27, 35, },
+    {  1,  4,  8, 13, 19, 26, 34, 42, },
+    {  3,  7, 12, 18, 25, 33, 41, 48, },
+    {  6, 11, 17, 24, 32, 40, 47, 53, },
+    { 10, 16, 23, 31, 39, 46, 52, 57, },
+    { 15, 22, 30, 38, 45, 51, 56, 60, },
+    { 21, 29, 37, 44, 50, 55, 59, 62, },
+    { 28, 36, 43, 49, 54, 58, 61, 63, },
+};
+
+void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
+{
+    if (s->pps->entropy_coding_sync_enabled_flag &&
+        (ctb_addr_ts % s->sps->ctb_width == 2 ||
+         (s->sps->ctb_width == 2 &&
+          ctb_addr_ts % s->sps->ctb_width == 0))) {
+        memcpy(s->cabac_state, s->HEVClc->cabac_state, HEVC_CONTEXTS);
+    }
+}
+
+static void load_states(HEVCContext *s)
+{
+    memcpy(s->HEVClc->cabac_state, s->cabac_state, HEVC_CONTEXTS);
+}
+
+static void cabac_reinit(HEVCLocalContext *lc)
+{
+    skip_bytes(&lc->cc, 0);
+}
+
+static void cabac_init_decoder(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    skip_bits(gb, 1);
+    align_get_bits(gb);
+    ff_init_cabac_decoder(&s->HEVClc->cc,
+                          gb->buffer + get_bits_count(gb) / 8,
+                          (get_bits_left(gb) + 7) / 8);
+}
+
+static void cabac_init_state(HEVCContext *s)
+{
+    int init_type = 2 - s->sh.slice_type;
+    int i;
+
+    if (s->sh.cabac_init_flag && s->sh.slice_type != I_SLICE)
+        init_type ^= 3;
+
+    for (i = 0; i < HEVC_CONTEXTS; i++) {
+        int init_value = init_values[init_type][i];
+        int m = (init_value >> 4) * 5 - 45;
+        int n = ((init_value & 15) << 3) - 16;
+        int pre = 2 * (((m * av_clip(s->sh.slice_qp, 0, 51)) >> 4) + n) - 127;
+
+        pre ^= pre >> 31;
+        if (pre > 124)
+            pre = 124 + (pre & 1);
+        s->HEVClc->cabac_state[i] = pre;
+    }
+
+    for (i = 0; i < 4; i++)
+        s->HEVClc->stat_coeff[i] = 0;
+}
+
+void ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts)
+{
+    if (ctb_addr_ts == s->pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs]) {
+        cabac_init_decoder(s);
+        if (s->sh.dependent_slice_segment_flag == 0 ||
+            (s->pps->tiles_enabled_flag &&
+             s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[ctb_addr_ts - 1]))
+            cabac_init_state(s);
+
+        if (!s->sh.first_slice_in_pic_flag &&
+            s->pps->entropy_coding_sync_enabled_flag) {
+            if (ctb_addr_ts % s->sps->ctb_width == 0) {
+                if (s->sps->ctb_width == 1)
+                    cabac_init_state(s);
+                else if (s->sh.dependent_slice_segment_flag == 1)
+                    load_states(s);
+            }
+        }
+    } else {
+        if (s->pps->tiles_enabled_flag &&
+            s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[ctb_addr_ts - 1]) {
+            if (s->threads_number == 1)
+                cabac_reinit(s->HEVClc);
+            else
+                cabac_init_decoder(s);
+            cabac_init_state(s);
+        }
+        if (s->pps->entropy_coding_sync_enabled_flag) {
+            if (ctb_addr_ts % s->sps->ctb_width == 0) {
+                get_cabac_terminate(&s->HEVClc->cc);
+                if (s->threads_number == 1)
+                    cabac_reinit(s->HEVClc);
+                else
+                    cabac_init_decoder(s);
+
+                if (s->sps->ctb_width == 1)
+                    cabac_init_state(s);
+                else
+                    load_states(s);
+            }
+        }
+    }
+}
+
+#define GET_CABAC(ctx) get_cabac(&s->HEVClc->cc, &s->HEVClc->cabac_state[ctx])
+
+int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[SAO_MERGE_FLAG]);
+}
+
+int ff_hevc_sao_type_idx_decode(HEVCContext *s)
+{
+    if (!GET_CABAC(elem_offset[SAO_TYPE_IDX]))
+        return 0;
+
+    if (!get_cabac_bypass(&s->HEVClc->cc))
+        return SAO_BAND;
+    return SAO_EDGE;
+}
+
+int ff_hevc_sao_band_position_decode(HEVCContext *s)
+{
+    int i;
+    int value = get_cabac_bypass(&s->HEVClc->cc);
+
+    for (i = 0; i < 4; i++)
+        value = (value << 1) | get_cabac_bypass(&s->HEVClc->cc);
+    return value;
+}
+
+int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
+{
+    int i = 0;
+    int length = (1 << (FFMIN(s->sps->bit_depth, 10) - 5)) - 1;
+
+    while (i < length && get_cabac_bypass(&s->HEVClc->cc))
+        i++;
+    return i;
+}
+
+int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
+{
+    return get_cabac_bypass(&s->HEVClc->cc);
+}
+
+int ff_hevc_sao_eo_class_decode(HEVCContext *s)
+{
+    int ret = get_cabac_bypass(&s->HEVClc->cc) << 1;
+    ret    |= get_cabac_bypass(&s->HEVClc->cc);
+    return ret;
+}
+
+int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
+{
+    return get_cabac_terminate(&s->HEVClc->cc);
+}
+
+int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[CU_TRANSQUANT_BYPASS_FLAG]);
+}
+
+int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
+{
+    int min_cb_width = s->sps->min_cb_width;
+    int inc = 0;
+    int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
+    int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
+
+    if (s->HEVClc->ctb_left_flag || x0b)
+        inc = !!SAMPLE_CTB(s->skip_flag, x_cb - 1, y_cb);
+    if (s->HEVClc->ctb_up_flag || y0b)
+        inc += !!SAMPLE_CTB(s->skip_flag, x_cb, y_cb - 1);
+
+    return GET_CABAC(elem_offset[SKIP_FLAG] + inc);
+}
+
+int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
+{
+    int prefix_val = 0;
+    int suffix_val = 0;
+    int inc = 0;
+
+    while (prefix_val < 5 && GET_CABAC(elem_offset[CU_QP_DELTA] + inc)) {
+        prefix_val++;
+        inc = 1;
+    }
+    if (prefix_val >= 5) {
+        int k = 0;
+        while (k < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) {
+            suffix_val += 1 << k;
+            k++;
+        }
+        if (k == CABAC_MAX_BIN)
+            av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", k);
+
+        while (k--)
+            suffix_val += get_cabac_bypass(&s->HEVClc->cc) << k;
+    }
+    return prefix_val + suffix_val;
+}
+
+int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
+{
+    return get_cabac_bypass(&s->HEVClc->cc);
+}
+
+int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[CU_CHROMA_QP_OFFSET_FLAG]);
+}
+
+int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
+{
+    int c_max= FFMAX(5, s->pps->chroma_qp_offset_list_len_minus1);
+    int i = 0;
+
+    while (i < c_max && GET_CABAC(elem_offset[CU_CHROMA_QP_OFFSET_IDX]))
+        i++;
+
+    return i;
+}
+
+int ff_hevc_pred_mode_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[PRED_MODE_FLAG]);
+}
+
+int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
+{
+    int inc = 0, depth_left = 0, depth_top = 0;
+    int x0b  = x0 & ((1 << s->sps->log2_ctb_size) - 1);
+    int y0b  = y0 & ((1 << s->sps->log2_ctb_size) - 1);
+    int x_cb = x0 >> s->sps->log2_min_cb_size;
+    int y_cb = y0 >> s->sps->log2_min_cb_size;
+
+    if (s->HEVClc->ctb_left_flag || x0b)
+        depth_left = s->tab_ct_depth[(y_cb) * s->sps->min_cb_width + x_cb - 1];
+    if (s->HEVClc->ctb_up_flag || y0b)
+        depth_top = s->tab_ct_depth[(y_cb - 1) * s->sps->min_cb_width + x_cb];
+
+    inc += (depth_left > ct_depth);
+    inc += (depth_top  > ct_depth);
+
+    return GET_CABAC(elem_offset[SPLIT_CODING_UNIT_FLAG] + inc);
+}
+
+
+#ifdef USE_PRED
+int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
+{
+    if (GET_CABAC(elem_offset[PART_MODE])) // 1
+        return PART_2Nx2N;
+    if (log2_cb_size == s->sps->log2_min_cb_size) {
+        if (s->HEVClc->cu.pred_mode == MODE_INTRA) // 0
+            return PART_NxN;
+        if (GET_CABAC(elem_offset[PART_MODE] + 1)) // 01
+            return PART_2NxN;
+        if (log2_cb_size == 3) // 00
+            return PART_Nx2N;
+        if (GET_CABAC(elem_offset[PART_MODE] + 2)) // 001
+            return PART_Nx2N;
+        return PART_NxN; // 000
+    }
+
+    if (!s->sps->amp_enabled_flag) {
+        if (GET_CABAC(elem_offset[PART_MODE] + 1)) // 01
+            return PART_2NxN;
+        return PART_Nx2N;
+    }
+
+    if (GET_CABAC(elem_offset[PART_MODE] + 1)) { // 01X, 01XX
+        if (GET_CABAC(elem_offset[PART_MODE] + 3)) // 011
+            return PART_2NxN;
+        if (get_cabac_bypass(&s->HEVClc->cc)) // 0101
+            return PART_2NxnD;
+        return PART_2NxnU; // 0100
+    }
+
+    if (GET_CABAC(elem_offset[PART_MODE] + 3)) // 001
+        return PART_Nx2N;
+    if (get_cabac_bypass(&s->HEVClc->cc)) // 0001
+        return PART_nRx2N;
+    return PART_nLx2N;  // 0000
+}
+#else
+int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
+{
+    if (GET_CABAC(elem_offset[PART_MODE])) // 1
+        return PART_2Nx2N;
+    else
+        return PART_NxN;
+}
+#endif
+
+int ff_hevc_pcm_flag_decode(HEVCContext *s)
+{
+    return get_cabac_terminate(&s->HEVClc->cc);
+}
+
+int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[PREV_INTRA_LUMA_PRED_FLAG]);
+}
+
+int ff_hevc_mpm_idx_decode(HEVCContext *s)
+{
+    int i = 0;
+    while (i < 2 && get_cabac_bypass(&s->HEVClc->cc))
+        i++;
+    return i;
+}
+
+int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
+{
+    int i;
+    int value = get_cabac_bypass(&s->HEVClc->cc);
+
+    for (i = 0; i < 4; i++)
+        value = (value << 1) | get_cabac_bypass(&s->HEVClc->cc);
+    return value;
+}
+
+int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
+{
+    int ret;
+    if (!GET_CABAC(elem_offset[INTRA_CHROMA_PRED_MODE]))
+        return 4;
+
+    ret  = get_cabac_bypass(&s->HEVClc->cc) << 1;
+    ret |= get_cabac_bypass(&s->HEVClc->cc);
+    return ret;
+}
+
+int ff_hevc_merge_idx_decode(HEVCContext *s)
+{
+    int i = GET_CABAC(elem_offset[MERGE_IDX]);
+
+    if (i != 0) {
+        while (i < s->sh.max_num_merge_cand-1 && get_cabac_bypass(&s->HEVClc->cc))
+            i++;
+    }
+    return i;
+}
+
+int ff_hevc_merge_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[MERGE_FLAG]);
+}
+
+int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
+{
+    if (nPbW + nPbH == 12)
+        return GET_CABAC(elem_offset[INTER_PRED_IDC] + 4);
+    if (GET_CABAC(elem_offset[INTER_PRED_IDC] + s->HEVClc->ct_depth))
+        return PRED_BI;
+
+    return GET_CABAC(elem_offset[INTER_PRED_IDC] + 4);
+}
+
+int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
+{
+    int i = 0;
+    int max = num_ref_idx_lx - 1;
+    int max_ctx = FFMIN(max, 2);
+
+    while (i < max_ctx && GET_CABAC(elem_offset[REF_IDX_L0] + i))
+        i++;
+    if (i == 2) {
+        while (i < max && get_cabac_bypass(&s->HEVClc->cc))
+            i++;
+    }
+
+    return i;
+}
+
+int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[MVP_LX_FLAG]);
+}
+
+int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[NO_RESIDUAL_DATA_FLAG]);
+}
+
+static av_always_inline int abs_mvd_greater0_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[ABS_MVD_GREATER0_FLAG]);
+}
+
+static av_always_inline int abs_mvd_greater1_flag_decode(HEVCContext *s)
+{
+    return GET_CABAC(elem_offset[ABS_MVD_GREATER1_FLAG] + 1);
+}
+
+static av_always_inline int mvd_decode(HEVCContext *s)
+{
+    int ret = 2;
+    int k = 1;
+
+    while (k < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) {
+        ret += 1 << k;
+        k++;
+    }
+    if (k == CABAC_MAX_BIN)
+        av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", k);
+    while (k--)
+        ret += get_cabac_bypass(&s->HEVClc->cc) << k;
+    return get_cabac_bypass_sign(&s->HEVClc->cc, -ret);
+}
+
+static av_always_inline int mvd_sign_flag_decode(HEVCContext *s)
+{
+    return get_cabac_bypass_sign(&s->HEVClc->cc, -1);
+}
+
+int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
+{
+    return GET_CABAC(elem_offset[SPLIT_TRANSFORM_FLAG] + 5 - log2_trafo_size);
+}
+
+int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
+{
+    return GET_CABAC(elem_offset[CBF_CB_CR] + trafo_depth);
+}
+
+int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
+{
+    return GET_CABAC(elem_offset[CBF_LUMA] + !trafo_depth);
+}
+
+static int ff_hevc_transform_skip_flag_decode(HEVCContext *s, int c_idx)
+{
+    return GET_CABAC(elem_offset[TRANSFORM_SKIP_FLAG] + !!c_idx);
+}
+
+static int explicit_rdpcm_flag_decode(HEVCContext *s, int c_idx)
+{
+    return GET_CABAC(elem_offset[EXPLICIT_RDPCM_FLAG] + !!c_idx);
+}
+
+static int explicit_rdpcm_dir_flag_decode(HEVCContext *s, int c_idx)
+{
+    return GET_CABAC(elem_offset[EXPLICIT_RDPCM_DIR_FLAG] + !!c_idx);
+}
+
+int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx) {
+    int i =0;
+
+    while (i < 4 && GET_CABAC(elem_offset[LOG2_RES_SCALE_ABS] + 4 * idx + i))
+        i++;
+
+    return i;
+}
+
+int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx) {
+    return GET_CABAC(elem_offset[RES_SCALE_SIGN_FLAG] + idx);
+}
+
+static av_always_inline void last_significant_coeff_xy_prefix_decode(HEVCContext *s, int c_idx,
+                                                   int log2_size, int *last_scx_prefix, int *last_scy_prefix)
+{
+    int i = 0;
+    int max = (log2_size << 1) - 1;
+    int ctx_offset, ctx_shift;
+
+    if (!c_idx) {
+        ctx_offset = 3 * (log2_size - 2)  + ((log2_size - 1) >> 2);
+        ctx_shift = (log2_size + 1) >> 2;
+    } else {
+        ctx_offset = 15;
+        ctx_shift = log2_size - 2;
+    }
+    while (i < max &&
+           GET_CABAC(elem_offset[LAST_SIGNIFICANT_COEFF_X_PREFIX] + (i >> ctx_shift) + ctx_offset))
+        i++;
+    *last_scx_prefix = i;
+
+    i = 0;
+    while (i < max &&
+           GET_CABAC(elem_offset[LAST_SIGNIFICANT_COEFF_Y_PREFIX] + (i >> ctx_shift) + ctx_offset))
+        i++;
+    *last_scy_prefix = i;
+}
+
+static av_always_inline int last_significant_coeff_suffix_decode(HEVCContext *s,
+                                                 int last_significant_coeff_prefix)
+{
+    int i;
+    int length = (last_significant_coeff_prefix >> 1) - 1;
+    int value = get_cabac_bypass(&s->HEVClc->cc);
+
+    for (i = 1; i < length; i++)
+        value = (value << 1) | get_cabac_bypass(&s->HEVClc->cc);
+    return value;
+}
+
+static av_always_inline int significant_coeff_group_flag_decode(HEVCContext *s, int c_idx, int ctx_cg)
+{
+    int inc;
+
+    inc = FFMIN(ctx_cg, 1) + (c_idx>0 ? 2 : 0);
+
+    return GET_CABAC(elem_offset[SIGNIFICANT_COEFF_GROUP_FLAG] + inc);
+}
+static av_always_inline int significant_coeff_flag_decode(HEVCContext *s, int x_c, int y_c,
+                                           int offset, const uint8_t *ctx_idx_map)
+{
+    int inc = ctx_idx_map[(y_c << 2) + x_c] + offset;
+    return GET_CABAC(elem_offset[SIGNIFICANT_COEFF_FLAG] + inc);
+}
+
+static av_always_inline int significant_coeff_flag_decode_0(HEVCContext *s, int c_idx, int offset)
+{
+    return GET_CABAC(elem_offset[SIGNIFICANT_COEFF_FLAG] + offset);
+}
+
+static av_always_inline int coeff_abs_level_greater1_flag_decode(HEVCContext *s, int c_idx, int inc)
+{
+
+    if (c_idx > 0)
+        inc += 16;
+
+    return GET_CABAC(elem_offset[COEFF_ABS_LEVEL_GREATER1_FLAG] + inc);
+}
+
+static av_always_inline int coeff_abs_level_greater2_flag_decode(HEVCContext *s, int c_idx, int inc)
+{
+    if (c_idx > 0)
+        inc += 4;
+
+    return GET_CABAC(elem_offset[COEFF_ABS_LEVEL_GREATER2_FLAG] + inc);
+}
+
+static av_always_inline int coeff_abs_level_remaining_decode(HEVCContext *s, int rc_rice_param)
+{
+    int prefix = 0;
+    int suffix = 0;
+    int last_coeff_abs_level_remaining;
+    int i;
+
+    while (prefix < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc))
+        prefix++;
+    if (prefix == CABAC_MAX_BIN)
+        av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", prefix);
+    if (prefix < 3) {
+        for (i = 0; i < rc_rice_param; i++)
+            suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc);
+        last_coeff_abs_level_remaining = (prefix << rc_rice_param) + suffix;
+    } else {
+        int prefix_minus3 = prefix - 3;
+        for (i = 0; i < prefix_minus3 + rc_rice_param; i++)
+            suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc);
+        last_coeff_abs_level_remaining = (((1 << prefix_minus3) + 3 - 1)
+                                              << rc_rice_param) + suffix;
+    }
+    return last_coeff_abs_level_remaining;
+}
+
+static av_always_inline int coeff_sign_flag_decode(HEVCContext *s, uint8_t nb)
+{
+    int i;
+    int ret = 0;
+
+    for (i = 0; i < nb; i++)
+        ret = (ret << 1) | get_cabac_bypass(&s->HEVClc->cc);
+    return ret;
+}
+
+void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0,
+                                int log2_trafo_size, enum ScanType scan_idx,
+                                int c_idx)
+{
+#define GET_COORD(offset, n)                                    \
+    do {                                                        \
+        x_c = (x_cg << 2) + scan_x_off[n];                      \
+        y_c = (y_cg << 2) + scan_y_off[n];                      \
+    } while (0)
+    HEVCLocalContext *lc = s->HEVClc;
+    int transform_skip_flag = 0;
+
+    int last_significant_coeff_x, last_significant_coeff_y;
+    int last_scan_pos;
+    int n_end;
+    int num_coeff = 0;
+    int greater1_ctx = 1;
+
+    int num_last_subset;
+    int x_cg_last_sig, y_cg_last_sig;
+
+    const uint8_t *scan_x_cg, *scan_y_cg, *scan_x_off, *scan_y_off;
+
+    ptrdiff_t stride = s->frame->linesize[c_idx];
+    int hshift = s->sps->hshift[c_idx];
+    int vshift = s->sps->vshift[c_idx];
+    uint8_t *dst = &s->frame->data[c_idx][(y0 >> vshift) * stride +
+                                          ((x0 >> hshift) << s->sps->pixel_shift)];
+    int16_t *coeffs = (int16_t*)(c_idx ? lc->edge_emu_buffer2 : lc->edge_emu_buffer);
+    uint8_t significant_coeff_group_flag[8][8] = {{0}};
+    int explicit_rdpcm_flag = 0;
+    int explicit_rdpcm_dir_flag;
+
+    int trafo_size = 1 << log2_trafo_size;
+    int i;
+    int qp,shift,add,scale,scale_m;
+    const uint8_t level_scale[] = { 40, 45, 51, 57, 64, 72 };
+    const uint8_t *scale_matrix = NULL;
+    uint8_t dc_scale;
+    int pred_mode_intra = (c_idx == 0) ? lc->tu.intra_pred_mode :
+                                         lc->tu.intra_pred_mode_c;
+
+    memset(coeffs, 0, trafo_size * trafo_size * sizeof(int16_t));
+
+    // Derive QP for dequant
+    if (!lc->cu.cu_transquant_bypass_flag) {
+        static const int qp_c[] = { 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37 };
+        static const uint8_t rem6[51 + 4 * 6 + 1] = {
+            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+            4, 5, 0, 1, 2, 3, 4, 5, 0, 1
+        };
+
+        static const uint8_t div6[51 + 4 * 6 + 1] = {
+            0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3,  3,  3,
+            3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,  6,  6,
+            7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+            10, 10, 11, 11, 11, 11, 11, 11, 12, 12
+        };
+        int qp_y = lc->qp_y;
+
+        if (s->pps->transform_skip_enabled_flag &&
+            log2_trafo_size <= s->pps->log2_max_transform_skip_block_size) {
+            transform_skip_flag = ff_hevc_transform_skip_flag_decode(s, c_idx);
+        }
+
+        if (c_idx == 0) {
+            qp = qp_y + s->sps->qp_bd_offset;
+        } else {
+            int qp_i, offset;
+
+            if (c_idx == 1)
+                offset = s->pps->cb_qp_offset + s->sh.slice_cb_qp_offset +
+                         lc->tu.cu_qp_offset_cb;
+            else
+                offset = s->pps->cr_qp_offset + s->sh.slice_cr_qp_offset +
+                         lc->tu.cu_qp_offset_cr;
+
+            qp_i = av_clip(qp_y + offset, - s->sps->qp_bd_offset, 57);
+            if (s->sps->chroma_format_idc == 1) {
+                if (qp_i < 30)
+                    qp = qp_i;
+                else if (qp_i > 43)
+                    qp = qp_i - 6;
+                else
+                    qp = qp_c[qp_i - 30];
+            } else {
+                if (qp_i > 51)
+                    qp = 51;
+                else
+                    qp = qp_i;
+            }
+
+            qp += s->sps->qp_bd_offset;
+        }
+
+        shift    = s->sps->bit_depth + log2_trafo_size - 5;
+        add      = 1 << (shift-1);
+        scale    = level_scale[rem6[qp]] << (div6[qp]);
+        scale_m  = 16; // default when no custom scaling lists.
+        dc_scale = 16;
+
+        if (s->sps->scaling_list_enable_flag && !(transform_skip_flag && log2_trafo_size > 2)) {
+            const ScalingList *sl = s->pps->scaling_list_data_present_flag ?
+            &s->pps->scaling_list : &s->sps->scaling_list;
+            int matrix_id = lc->cu.pred_mode != MODE_INTRA;
+
+            matrix_id = 3 * matrix_id + c_idx;
+
+            scale_matrix = sl->sl[log2_trafo_size - 2][matrix_id];
+            if (log2_trafo_size >= 4)
+                dc_scale = sl->sl_dc[log2_trafo_size - 4][matrix_id];
+        }
+    } else {
+        shift        = 0;
+        add          = 0;
+        scale        = 0;
+        dc_scale     = 0;
+    }
+
+#ifdef USE_PRED
+    if (lc->cu.pred_mode == MODE_INTER && s->sps->explicit_rdpcm_enabled_flag &&
+        (transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
+        explicit_rdpcm_flag = explicit_rdpcm_flag_decode(s, c_idx);
+        if (explicit_rdpcm_flag) {
+            explicit_rdpcm_dir_flag = explicit_rdpcm_dir_flag_decode(s, c_idx);
+        }
+    }
+#endif
+
+    last_significant_coeff_xy_prefix_decode(s, c_idx, log2_trafo_size,
+                                           &last_significant_coeff_x, &last_significant_coeff_y);
+
+    if (last_significant_coeff_x > 3) {
+        int suffix = last_significant_coeff_suffix_decode(s, last_significant_coeff_x);
+        last_significant_coeff_x = (1 << ((last_significant_coeff_x >> 1) - 1)) *
+        (2 + (last_significant_coeff_x & 1)) +
+        suffix;
+    }
+
+    if (last_significant_coeff_y > 3) {
+        int suffix = last_significant_coeff_suffix_decode(s, last_significant_coeff_y);
+        last_significant_coeff_y = (1 << ((last_significant_coeff_y >> 1) - 1)) *
+        (2 + (last_significant_coeff_y & 1)) +
+        suffix;
+    }
+
+    if (scan_idx == SCAN_VERT)
+        FFSWAP(int, last_significant_coeff_x, last_significant_coeff_y);
+
+    x_cg_last_sig = last_significant_coeff_x >> 2;
+    y_cg_last_sig = last_significant_coeff_y >> 2;
+
+    switch (scan_idx) {
+    case SCAN_DIAG: {
+        int last_x_c = last_significant_coeff_x & 3;
+        int last_y_c = last_significant_coeff_y & 3;
+
+        scan_x_off = ff_hevc_diag_scan4x4_x;
+        scan_y_off = ff_hevc_diag_scan4x4_y;
+        num_coeff = diag_scan4x4_inv[last_y_c][last_x_c];
+        if (trafo_size == 4) {
+            scan_x_cg = scan_1x1;
+            scan_y_cg = scan_1x1;
+        } else if (trafo_size == 8) {
+            num_coeff += diag_scan2x2_inv[y_cg_last_sig][x_cg_last_sig] << 4;
+            scan_x_cg = diag_scan2x2_x;
+            scan_y_cg = diag_scan2x2_y;
+        } else if (trafo_size == 16) {
+            num_coeff += diag_scan4x4_inv[y_cg_last_sig][x_cg_last_sig] << 4;
+            scan_x_cg = ff_hevc_diag_scan4x4_x;
+            scan_y_cg = ff_hevc_diag_scan4x4_y;
+        } else { // trafo_size == 32
+            num_coeff += diag_scan8x8_inv[y_cg_last_sig][x_cg_last_sig] << 4;
+            scan_x_cg = ff_hevc_diag_scan8x8_x;
+            scan_y_cg = ff_hevc_diag_scan8x8_y;
+        }
+        break;
+    }
+    case SCAN_HORIZ:
+        scan_x_cg = horiz_scan2x2_x;
+        scan_y_cg = horiz_scan2x2_y;
+        scan_x_off = horiz_scan4x4_x;
+        scan_y_off = horiz_scan4x4_y;
+        num_coeff = horiz_scan8x8_inv[last_significant_coeff_y][last_significant_coeff_x];
+        break;
+    default: //SCAN_VERT
+        scan_x_cg = horiz_scan2x2_y;
+        scan_y_cg = horiz_scan2x2_x;
+        scan_x_off = horiz_scan4x4_y;
+        scan_y_off = horiz_scan4x4_x;
+        num_coeff = horiz_scan8x8_inv[last_significant_coeff_x][last_significant_coeff_y];
+        break;
+    }
+    num_coeff++;
+    num_last_subset = (num_coeff - 1) >> 4;
+
+    for (i = num_last_subset; i >= 0; i--) {
+        int n, m;
+        int x_cg, y_cg, x_c, y_c, pos;
+        int implicit_non_zero_coeff = 0;
+        int64_t trans_coeff_level;
+        int prev_sig = 0;
+        int offset = i << 4;
+        int rice_init = 0;
+
+        uint8_t significant_coeff_flag_idx[16];
+        uint8_t nb_significant_coeff_flag = 0;
+
+        x_cg = scan_x_cg[i];
+        y_cg = scan_y_cg[i];
+
+        if ((i < num_last_subset) && (i > 0)) {
+            int ctx_cg = 0;
+            if (x_cg < (1 << (log2_trafo_size - 2)) - 1)
+                ctx_cg += significant_coeff_group_flag[x_cg + 1][y_cg];
+            if (y_cg < (1 << (log2_trafo_size - 2)) - 1)
+                ctx_cg += significant_coeff_group_flag[x_cg][y_cg + 1];
+
+            significant_coeff_group_flag[x_cg][y_cg] =
+                significant_coeff_group_flag_decode(s, c_idx, ctx_cg);
+            implicit_non_zero_coeff = 1;
+        } else {
+            significant_coeff_group_flag[x_cg][y_cg] =
+            ((x_cg == x_cg_last_sig && y_cg == y_cg_last_sig) ||
+             (x_cg == 0 && y_cg == 0));
+        }
+
+        last_scan_pos = num_coeff - offset - 1;
+
+        if (i == num_last_subset) {
+            n_end = last_scan_pos - 1;
+            significant_coeff_flag_idx[0] = last_scan_pos;
+            nb_significant_coeff_flag = 1;
+        } else {
+            n_end = 15;
+        }
+
+        if (x_cg < ((1 << log2_trafo_size) - 1) >> 2)
+            prev_sig = !!significant_coeff_group_flag[x_cg + 1][y_cg];
+        if (y_cg < ((1 << log2_trafo_size) - 1) >> 2)
+            prev_sig += (!!significant_coeff_group_flag[x_cg][y_cg + 1] << 1);
+
+        if (significant_coeff_group_flag[x_cg][y_cg] && n_end >= 0) {
+            static const uint8_t ctx_idx_map[] = {
+                0, 1, 4, 5, 2, 3, 4, 5, 6, 6, 8, 8, 7, 7, 8, 8, // log2_trafo_size == 2
+                1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, // prev_sig == 0
+                2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, // prev_sig == 1
+                2, 1, 0, 0, 2, 1, 0, 0, 2, 1, 0, 0, 2, 1, 0, 0, // prev_sig == 2
+                2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2  // default
+            };
+            const uint8_t *ctx_idx_map_p;
+            int scf_offset = 0;
+            if (s->sps->transform_skip_context_enabled_flag &&
+                (transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
+                ctx_idx_map_p = (uint8_t*) &ctx_idx_map[4 * 16];
+                if (c_idx == 0) {
+                    scf_offset = 40;
+                } else {
+                    scf_offset = 14 + 27;
+                }
+            } else {
+                if (c_idx != 0)
+                    scf_offset = 27;
+                if (log2_trafo_size == 2) {
+                    ctx_idx_map_p = (uint8_t*) &ctx_idx_map[0];
+                } else {
+                    ctx_idx_map_p = (uint8_t*) &ctx_idx_map[(prev_sig + 1) << 4];
+                    if (c_idx == 0) {
+                        if ((x_cg > 0 || y_cg > 0))
+                            scf_offset += 3;
+                        if (log2_trafo_size == 3) {
+                            scf_offset += (scan_idx == SCAN_DIAG) ? 9 : 15;
+                        } else {
+                            scf_offset += 21;
+                        }
+                    } else {
+                        if (log2_trafo_size == 3)
+                            scf_offset += 9;
+                        else
+                            scf_offset += 12;
+                    }
+                }
+            }
+            for (n = n_end; n > 0; n--) {
+                x_c = scan_x_off[n];
+                y_c = scan_y_off[n];
+                if (significant_coeff_flag_decode(s, x_c, y_c, scf_offset, ctx_idx_map_p)) {
+                    significant_coeff_flag_idx[nb_significant_coeff_flag] = n;
+                    nb_significant_coeff_flag++;
+                    implicit_non_zero_coeff = 0;
+                }
+            }
+            if (implicit_non_zero_coeff == 0) {
+                if (s->sps->transform_skip_context_enabled_flag &&
+                    (transform_skip_flag || lc->cu.cu_transquant_bypass_flag)) {
+                    if (c_idx == 0) {
+                        scf_offset = 42;
+                    } else {
+                        scf_offset = 16 + 27;
+                    }
+                } else {
+                    if (i == 0) {
+                        if (c_idx == 0)
+                            scf_offset = 0;
+                        else
+                            scf_offset = 27;
+                    } else {
+                        scf_offset = 2 + scf_offset;
+                    }
+                }
+                if (significant_coeff_flag_decode_0(s, c_idx, scf_offset) == 1) {
+                    significant_coeff_flag_idx[nb_significant_coeff_flag] = 0;
+                    nb_significant_coeff_flag++;
+                }
+            } else {
+                significant_coeff_flag_idx[nb_significant_coeff_flag] = 0;
+                nb_significant_coeff_flag++;
+            }
+        }
+
+        n_end = nb_significant_coeff_flag;
+
+
+        if (n_end) {
+            int first_nz_pos_in_cg;
+            int last_nz_pos_in_cg;
+            int c_rice_param = 0;
+            int first_greater1_coeff_idx = -1;
+            uint8_t coeff_abs_level_greater1_flag[8];
+            uint16_t coeff_sign_flag;
+            int sum_abs = 0;
+            int sign_hidden;
+            int sb_type;
+
+
+            // initialize first elem of coeff_bas_level_greater1_flag
+            int ctx_set = (i > 0 && c_idx == 0) ? 2 : 0;
+
+            if (s->sps->persistent_rice_adaptation_enabled_flag) {
+                if (!transform_skip_flag && !lc->cu.cu_transquant_bypass_flag)
+                    sb_type = 2 * (c_idx == 0 ? 1 : 0);
+                else
+                    sb_type = 2 * (c_idx == 0 ? 1 : 0) + 1;
+                c_rice_param = lc->stat_coeff[sb_type] / 4;
+            }
+
+            if (!(i == num_last_subset) && greater1_ctx == 0)
+                ctx_set++;
+            greater1_ctx = 1;
+            last_nz_pos_in_cg = significant_coeff_flag_idx[0];
+
+            for (m = 0; m < (n_end > 8 ? 8 : n_end); m++) {
+                int inc = (ctx_set << 2) + greater1_ctx;
+                coeff_abs_level_greater1_flag[m] =
+                    coeff_abs_level_greater1_flag_decode(s, c_idx, inc);
+                if (coeff_abs_level_greater1_flag[m]) {
+                    greater1_ctx = 0;
+                    if (first_greater1_coeff_idx == -1)
+                        first_greater1_coeff_idx = m;
+                } else if (greater1_ctx > 0 && greater1_ctx < 3) {
+                    greater1_ctx++;
+                }
+            }
+            first_nz_pos_in_cg = significant_coeff_flag_idx[n_end - 1];
+
+            if (lc->cu.cu_transquant_bypass_flag || 
+                (lc->cu.pred_mode ==  MODE_INTRA  &&
+                 s->sps->implicit_rdpcm_enabled_flag  &&  transform_skip_flag  &&
+                 (pred_mode_intra == 10 || pred_mode_intra  ==  26 )) ||
+                 explicit_rdpcm_flag)
+                sign_hidden = 0;
+            else
+                sign_hidden = (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4);
+
+            if (first_greater1_coeff_idx != -1) {
+                coeff_abs_level_greater1_flag[first_greater1_coeff_idx] += coeff_abs_level_greater2_flag_decode(s, c_idx, ctx_set);
+            }
+            if (!s->pps->sign_data_hiding_flag || !sign_hidden ) {
+                coeff_sign_flag = coeff_sign_flag_decode(s, nb_significant_coeff_flag) << (16 - nb_significant_coeff_flag);
+            } else {
+                coeff_sign_flag = coeff_sign_flag_decode(s, nb_significant_coeff_flag - 1) << (16 - (nb_significant_coeff_flag - 1));
+            }
+
+            for (m = 0; m < n_end; m++) {
+                n = significant_coeff_flag_idx[m];
+                GET_COORD(offset, n);
+                if (m < 8) {
+                    trans_coeff_level = 1 + coeff_abs_level_greater1_flag[m];
+                    if (trans_coeff_level == ((m == first_greater1_coeff_idx) ? 3 : 2)) {
+                        int last_coeff_abs_level_remaining = coeff_abs_level_remaining_decode(s, c_rice_param);
+
+                        trans_coeff_level += last_coeff_abs_level_remaining;
+                        if (trans_coeff_level > (3 << c_rice_param))
+                            c_rice_param = s->sps->persistent_rice_adaptation_enabled_flag ? c_rice_param + 1 : FFMIN(c_rice_param + 1, 4);
+                        if (s->sps->persistent_rice_adaptation_enabled_flag && !rice_init) {
+                            int c_rice_p_init = lc->stat_coeff[sb_type] / 4;
+                            if (last_coeff_abs_level_remaining >= (3 << c_rice_p_init))
+                                lc->stat_coeff[sb_type]++;
+                            else if (2 * last_coeff_abs_level_remaining < (1 << c_rice_p_init))
+                                if (lc->stat_coeff[sb_type] > 0)
+                                    lc->stat_coeff[sb_type]--;
+                            rice_init = 1;
+                        }
+                    }
+                } else {
+                    int last_coeff_abs_level_remaining = coeff_abs_level_remaining_decode(s, c_rice_param);
+
+                    trans_coeff_level = 1 + last_coeff_abs_level_remaining;
+                    if (trans_coeff_level > (3 << c_rice_param))
+                        c_rice_param = s->sps->persistent_rice_adaptation_enabled_flag ? c_rice_param + 1 : FFMIN(c_rice_param + 1, 4);
+                    if (s->sps->persistent_rice_adaptation_enabled_flag && !rice_init) {
+                        int c_rice_p_init = lc->stat_coeff[sb_type] / 4;
+                        if (last_coeff_abs_level_remaining >= (3 << c_rice_p_init))
+                            lc->stat_coeff[sb_type]++;
+                        else if (2 * last_coeff_abs_level_remaining < (1 << c_rice_p_init))
+                            if (lc->stat_coeff[sb_type] > 0)
+                                lc->stat_coeff[sb_type]--;
+                        rice_init = 1;
+                    }
+                }
+                if (s->pps->sign_data_hiding_flag && sign_hidden) {
+                    sum_abs += trans_coeff_level;
+                    if (n == first_nz_pos_in_cg && (sum_abs&1))
+                        trans_coeff_level = -trans_coeff_level;
+                }
+                if (coeff_sign_flag >> 15)
+                    trans_coeff_level = -trans_coeff_level;
+                coeff_sign_flag <<= 1;
+                if(!lc->cu.cu_transquant_bypass_flag) {
+                    if (s->sps->scaling_list_enable_flag && !(transform_skip_flag && log2_trafo_size > 2)) {
+                        if(y_c || x_c || log2_trafo_size < 4) {
+                            switch(log2_trafo_size) {
+                                case 3: pos = (y_c << 3) + x_c; break;
+                                case 4: pos = ((y_c >> 1) << 3) + (x_c >> 1); break;
+                                case 5: pos = ((y_c >> 2) << 3) + (x_c >> 2); break;
+                                default: pos = (y_c << 2) + x_c; break;
+                            }
+                            scale_m = scale_matrix[pos];
+                        } else {
+                            scale_m = dc_scale;
+                        }
+                    }
+                    trans_coeff_level = (trans_coeff_level * (int64_t)scale * (int64_t)scale_m + add) >> shift;
+                    if(trans_coeff_level < 0) {
+                        if((~trans_coeff_level) & 0xFffffffffff8000)
+                            trans_coeff_level = -32768;
+                    } else {
+                        if(trans_coeff_level & 0xffffffffffff8000)
+                            trans_coeff_level = 32767;
+                    }
+                }
+                coeffs[y_c * trafo_size + x_c] = trans_coeff_level;
+            }
+        }
+    }
+
+    if (lc->cu.cu_transquant_bypass_flag) {
+        if (explicit_rdpcm_flag || (s->sps->implicit_rdpcm_enabled_flag &&
+                                    (pred_mode_intra == 10 || pred_mode_intra == 26))) {
+            int mode = s->sps->implicit_rdpcm_enabled_flag ? (pred_mode_intra == 26) : explicit_rdpcm_dir_flag;
+
+            s->hevcdsp.transform_rdpcm(coeffs, log2_trafo_size, mode);
+        }
+    } else {
+        if (transform_skip_flag) {
+            int rot = s->sps->transform_skip_rotation_enabled_flag &&
+                      log2_trafo_size == 2 &&
+                      lc->cu.pred_mode == MODE_INTRA;
+            if (rot) {
+                for (i = 0; i < 8; i++)
+                    FFSWAP(int16_t, coeffs[i], coeffs[16 - i - 1]);
+            }
+
+            s->hevcdsp.transform_skip(coeffs, log2_trafo_size BIT_DEPTH_ARG2(s->sps->bit_depth));
+
+            if (explicit_rdpcm_flag || (s->sps->implicit_rdpcm_enabled_flag &&
+                                        lc->cu.pred_mode == MODE_INTRA &&
+                                        (pred_mode_intra == 10 || pred_mode_intra == 26))) {
+                int mode = explicit_rdpcm_flag ? explicit_rdpcm_dir_flag : (pred_mode_intra == 26);
+
+                s->hevcdsp.transform_rdpcm(coeffs, log2_trafo_size, mode);
+            }
+        } else if (lc->cu.pred_mode == MODE_INTRA && c_idx == 0 && log2_trafo_size == 2) {
+            s->hevcdsp.idct_4x4_luma(coeffs BIT_DEPTH_ARG2(s->sps->bit_depth));
+        } else {
+            int max_xy = FFMAX(last_significant_coeff_x, last_significant_coeff_y);
+            if (max_xy == 0)
+                s->hevcdsp.idct_dc[log2_trafo_size-2](coeffs BIT_DEPTH_ARG2(s->sps->bit_depth));
+            else {
+                int col_limit = last_significant_coeff_x + last_significant_coeff_y + 4;
+                if (max_xy < 4)
+                    col_limit = FFMIN(4, col_limit);
+                else if (max_xy < 8)
+                    col_limit = FFMIN(8, col_limit);
+                else if (max_xy < 12)
+                    col_limit = FFMIN(24, col_limit);
+                s->hevcdsp.idct[log2_trafo_size-2](coeffs, col_limit BIT_DEPTH_ARG2(s->sps->bit_depth));
+            }
+        }
+    }
+    if (lc->tu.cross_pf) {
+        int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
+
+        for (i = 0; i < (trafo_size * trafo_size); i++) {
+            coeffs[i] = coeffs[i] + ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
+        }
+    }
+    s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride BIT_DEPTH_ARG2(s->sps->bit_depth));
+}
+
+#ifdef USE_PRED
+void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int x = abs_mvd_greater0_flag_decode(s);
+    int y = abs_mvd_greater0_flag_decode(s);
+
+    if (x)
+        x += abs_mvd_greater1_flag_decode(s);
+    if (y)
+        y += abs_mvd_greater1_flag_decode(s);
+
+    switch (x) {
+    case 2: lc->pu.mvd.x = mvd_decode(s);           break;
+    case 1: lc->pu.mvd.x = mvd_sign_flag_decode(s); break;
+    case 0: lc->pu.mvd.x = 0;                       break;
+    }
+
+    switch (y) {
+    case 2: lc->pu.mvd.y = mvd_decode(s);           break;
+    case 1: lc->pu.mvd.y = mvd_sign_flag_decode(s); break;
+    case 0: lc->pu.mvd.y = 0;                       break;
+    }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_filter.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,917 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2013 Seppo Tomperi
+ * Copyright (C) 2013 Wassim Hamidouche
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+
+#include "cabac_functions.h"
+#include "golomb.h"
+#include "hevc.h"
+
+#include "bit_depth_template.c"
+
+#define LUMA 0
+#define CB 1
+#define CR 2
+
+static const uint8_t tctable[54] = {
+    0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 0, 0, 1, // QP  0...18
+    1, 1, 1, 1, 1, 1, 1,  1,  2,  2,  2,  2,  3,  3,  3,  3, 4, 4, 4, // QP 19...37
+    5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24           // QP 38...53
+};
+
+static const uint8_t betatable[52] = {
+     0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  6,  7,  8, // QP 0...18
+     9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, // QP 19...37
+    38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64                      // QP 38...51
+};
+
+static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
+{
+    static const uint8_t qp_c[] = {
+        29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
+    };
+    int qp, qp_i, offset, idxt;
+
+    // slice qp offset is not used for deblocking
+    if (c_idx == 1)
+        offset = s->pps->cb_qp_offset;
+    else
+        offset = s->pps->cr_qp_offset;
+
+    qp_i = av_clip(qp_y + offset, 0, 57);
+    if (s->sps->chroma_format_idc == 1) {
+        if (qp_i < 30)
+            qp = qp_i;
+        else if (qp_i > 43)
+            qp = qp_i - 6;
+        else
+            qp = qp_c[qp_i - 30];
+    } else {
+        qp = av_clip(qp_i, 0, 51);
+    }
+
+    idxt = av_clip(qp + DEFAULT_INTRA_TC_OFFSET + tc_offset, 0, 53);
+    return tctable[idxt];
+}
+
+static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
+{
+    HEVCLocalContext *lc     = s->HEVClc;
+    int ctb_size_mask        = (1 << s->sps->log2_ctb_size) - 1;
+    int MinCuQpDeltaSizeMask = (1 << (s->sps->log2_ctb_size -
+                                      s->pps->diff_cu_qp_delta_depth)) - 1;
+    int xQgBase              = xBase - (xBase & MinCuQpDeltaSizeMask);
+    int yQgBase              = yBase - (yBase & MinCuQpDeltaSizeMask);
+    int min_cb_width         = s->sps->min_cb_width;
+    int x_cb                 = xQgBase >> s->sps->log2_min_cb_size;
+    int y_cb                 = yQgBase >> s->sps->log2_min_cb_size;
+    int availableA           = (xBase   & ctb_size_mask) &&
+                               (xQgBase & ctb_size_mask);
+    int availableB           = (yBase   & ctb_size_mask) &&
+                               (yQgBase & ctb_size_mask);
+    int qPy_pred, qPy_a, qPy_b;
+
+    // qPy_pred
+    if (lc->first_qp_group || (!xQgBase && !yQgBase)) {
+        lc->first_qp_group = !lc->tu.is_cu_qp_delta_coded;
+        qPy_pred = s->sh.slice_qp;
+    } else {
+        qPy_pred = lc->qPy_pred;
+    }
+
+    // qPy_a
+    if (availableA == 0)
+        qPy_a = qPy_pred;
+    else
+        qPy_a = s->qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
+
+    // qPy_b
+    if (availableB == 0)
+        qPy_b = qPy_pred;
+    else
+        qPy_b = s->qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
+
+    av_assert2(qPy_a >= -s->sps->qp_bd_offset && qPy_a < 52);
+    av_assert2(qPy_b >= -s->sps->qp_bd_offset && qPy_b < 52);
+
+    return (qPy_a + qPy_b + 1) >> 1;
+}
+
+void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
+{
+    int qp_y = get_qPy_pred(s, xBase, yBase, log2_cb_size);
+
+    if (s->HEVClc->tu.cu_qp_delta != 0) {
+        int off = s->sps->qp_bd_offset;
+        s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
+                                 52 + off) - off;
+    } else
+        s->HEVClc->qp_y = qp_y;
+}
+
+static int get_qPy(HEVCContext *s, int xC, int yC)
+{
+    int log2_min_cb_size  = s->sps->log2_min_cb_size;
+    int x                 = xC >> log2_min_cb_size;
+    int y                 = yC >> log2_min_cb_size;
+    return s->qp_y_tab[x + y * s->sps->min_cb_width];
+}
+
+static void copy_CTB(uint8_t *dst, const uint8_t *src,
+                     int width, int height, int stride_dst, int stride_src)
+{
+    int i;
+
+    for (i = 0; i < height; i++) {
+        memcpy(dst, src, width);
+        dst += stride_dst;
+        src += stride_src;
+    }
+}
+
+#if defined(USE_SAO_SMALL_BUFFER)
+static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
+{
+    if (pixel_shift)
+        *(uint16_t *)dst = *(uint16_t *)src;
+    else
+        *dst = *src;
+        
+}
+
+static void copy_vert(uint8_t *dst, const uint8_t *src,
+                      int pixel_shift, int height, 
+                      int stride_dst, int stride_src)
+{
+    int i;
+    if (pixel_shift == 0) {
+        for (i = 0; i < height; i++) {
+            *dst = *src;
+            dst += stride_dst;
+            src += stride_src;
+        }
+    } else {
+        for (i = 0; i < height; i++) {
+            *(uint16_t *)dst = *(uint16_t *)src;
+            dst += stride_dst;
+            src += stride_src;
+        }
+    }
+}
+
+static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src,
+                           int stride_src, int x, int y, int width, int height,
+                           int c_idx, int x_ctb, int y_ctb)
+{
+    int sh = s->sps->pixel_shift;
+    int w = s->sps->width >> s->sps->hshift[c_idx];
+    int h = s->sps->height >> s->sps->vshift[c_idx];
+
+    /* copy horizontal edges */
+    memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb) * w + x) << sh), 
+        src, width << sh);
+    memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 1) * w + x) << sh), 
+        src + stride_src * (height - 1), width << sh);
+    
+    /* copy vertical edges */
+    copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb) * h + y) << sh), src, sh, height, 1 << sh, stride_src);
+        
+    copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
+}
+#endif
+
+static void restore_tqb_pixels(HEVCContext *s, 
+                               uint8_t *src1, const uint8_t *dst1,
+                               ptrdiff_t stride_src, ptrdiff_t stride_dst,
+                               int x0, int y0, int width, int height, int c_idx)
+{
+    if ( s->pps->transquant_bypass_enable_flag ||
+            (s->sps->pcm.loop_filter_disable_flag && s->sps->pcm_enabled_flag)) {
+        int x, y;
+        int min_pu_size  = 1 << s->sps->log2_min_pu_size;
+        int hshift       = s->sps->hshift[c_idx];
+        int vshift       = s->sps->vshift[c_idx];
+        int x_min        = ((x0         ) >> s->sps->log2_min_pu_size);
+        int y_min        = ((y0         ) >> s->sps->log2_min_pu_size);
+        int x_max        = ((x0 + width ) >> s->sps->log2_min_pu_size);
+        int y_max        = ((y0 + height) >> s->sps->log2_min_pu_size);
+        int len          = (min_pu_size >> hshift) << s->sps->pixel_shift;
+        for (y = y_min; y < y_max; y++) {
+            for (x = x_min; x < x_max; x++) {
+                if (s->is_pcm[y * s->sps->min_pu_width + x]) {
+                    int n;
+                    uint8_t *src = src1 + (((y - y0) << s->sps->log2_min_pu_size) >> vshift) * stride_src + ((((x - x0) << s->sps->log2_min_pu_size) >> hshift) << s->sps->pixel_shift);
+                    const uint8_t *dst = dst1 + (((y - y0) << s->sps->log2_min_pu_size) >> vshift) * stride_dst + ((((x - x0) << s->sps->log2_min_pu_size) >> hshift) << s->sps->pixel_shift);
+                    for (n = 0; n < (min_pu_size >> vshift); n++) {
+                        memcpy(src, dst, len);
+                        src += stride_src;
+                        dst += stride_dst;
+                    }
+                }
+            }
+        }
+    }
+}
+
+#define CTB(tab, x, y) ((tab)[(y) * s->sps->ctb_width + (x)])
+
+static void sao_filter_CTB(HEVCContext *s, int x, int y)
+{
+    int c_idx, c_count;
+    int edges[4];  // 0 left 1 top 2 right 3 bottom
+    int x_ctb                = x >> s->sps->log2_ctb_size;
+    int y_ctb                = y >> s->sps->log2_ctb_size;
+    int ctb_addr_rs          = y_ctb * s->sps->ctb_width + x_ctb;
+    int ctb_addr_ts          = s->pps->ctb_addr_rs_to_ts[ctb_addr_rs];
+    SAOParams *sao           = &CTB(s->sao, x_ctb, y_ctb);
+    // flags indicating unfilterable edges
+    uint8_t vert_edge[]      = { 0, 0 };
+    uint8_t horiz_edge[]     = { 0, 0 };
+    uint8_t diag_edge[]      = { 0, 0, 0, 0 };
+    uint8_t lfase            = CTB(s->filter_slice_edges, x_ctb, y_ctb);
+    uint8_t no_tile_filter   = s->pps->tiles_enabled_flag &&
+                               !s->pps->loop_filter_across_tiles_enabled_flag;
+    uint8_t restore          = no_tile_filter || !lfase;
+    uint8_t left_tile_edge   = 0;
+    uint8_t right_tile_edge  = 0;
+    uint8_t up_tile_edge     = 0;
+    uint8_t bottom_tile_edge = 0;
+
+    edges[0]   = x_ctb == 0;
+    edges[1]   = y_ctb == 0;
+    edges[2]   = x_ctb == s->sps->ctb_width  - 1;
+    edges[3]   = y_ctb == s->sps->ctb_height - 1;
+
+    if (restore) {
+        if (!edges[0]) {
+            left_tile_edge  = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs-1]];
+            vert_edge[0]    = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb)) || left_tile_edge;
+        }
+        if (!edges[2]) {
+            right_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs+1]];
+            vert_edge[1]    = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb)) || right_tile_edge;
+        }
+        if (!edges[1]) {
+            up_tile_edge     = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->sps->ctb_width]];
+            horiz_edge[0]    = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb - 1)) || up_tile_edge;
+        }
+        if (!edges[3]) {
+            bottom_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs + s->sps->ctb_width]];
+            horiz_edge[1]    = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb + 1)) || bottom_tile_edge;
+        }
+        if (!edges[0] && !edges[1]) {
+            diag_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb - 1)) || left_tile_edge || up_tile_edge;
+        }
+        if (!edges[1] && !edges[2]) {
+            diag_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb - 1)) || right_tile_edge || up_tile_edge;
+        }
+        if (!edges[2] && !edges[3]) {
+            diag_edge[2] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb + 1)) || right_tile_edge || bottom_tile_edge;
+        }
+        if (!edges[0] && !edges[3]) {
+            diag_edge[3] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb + 1)) || left_tile_edge || bottom_tile_edge;
+        }
+    }
+    
+    c_count = (s->sps->chroma_format_idc != 0) ? 3 : 1;
+    for (c_idx = 0; c_idx < c_count; c_idx++) {
+        int x0       = x >> s->sps->hshift[c_idx];
+        int y0       = y >> s->sps->vshift[c_idx];
+        int stride_src = s->frame->linesize[c_idx];
+        int ctb_size_h = (1 << (s->sps->log2_ctb_size)) >> s->sps->hshift[c_idx];
+        int ctb_size_v = (1 << (s->sps->log2_ctb_size)) >> s->sps->vshift[c_idx];
+        int width    = FFMIN(ctb_size_h, (s->sps->width  >> s->sps->hshift[c_idx]) - x0);
+        int height   = FFMIN(ctb_size_v, (s->sps->height >> s->sps->vshift[c_idx]) - y0);
+        uint8_t *src = &s->frame->data[c_idx][y0 * stride_src + (x0 << s->sps->pixel_shift)];
+#if defined(USE_SAO_SMALL_BUFFER)
+        int stride_dst = ((1 << (s->sps->log2_ctb_size)) + 2) << s->sps->pixel_shift;
+        uint8_t *dst = s->sao_pixel_buffer + (1 * stride_dst) + (1 << s->sps->pixel_shift);
+#else
+        int stride_dst = s->sao_frame->linesize[c_idx];
+        uint8_t *dst = &s->sao_frame->data[c_idx][y0 * stride_dst + (x0 << s->sps->pixel_shift)];
+#endif
+
+        switch (sao->type_idx[c_idx]) {
+        case SAO_BAND:
+            copy_CTB(dst, src, width << s->sps->pixel_shift, height, stride_dst, stride_src);
+#if defined(USE_SAO_SMALL_BUFFER)
+            copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
+                           x_ctb, y_ctb);
+#endif
+            s->hevcdsp.sao_band_filter(src, dst,
+                                       stride_src, stride_dst,
+                                       sao,
+                                       edges, width,
+                                       height, c_idx BIT_DEPTH_ARG2(s->sps->bit_depth));
+            restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
+                               x, y, width, height, c_idx);
+            sao->type_idx[c_idx] = SAO_APPLIED;
+            break;
+        case SAO_EDGE:
+        {
+#if defined(USE_SAO_SMALL_BUFFER)
+            int w = s->sps->width >> s->sps->hshift[c_idx];
+            int h = s->sps->height >> s->sps->vshift[c_idx];
+            int left_edge = edges[0];
+            int top_edge = edges[1];
+            int right_edge = edges[2];
+            int bottom_edge = edges[3];
+            int sh = s->sps->pixel_shift;
+            int left_pixels, right_pixels;
+
+            if (!top_edge) {
+                int left = 1 - left_edge;
+                int right = 1 - right_edge;
+                const uint8_t *src1[2];
+                uint8_t *dst1;
+                int src_idx, pos;
+                
+                dst1 = dst - stride_dst - (left << sh);
+                src1[0] = src - stride_src - (left << sh);
+                src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb - 1) * w + x0 - left) << sh);
+                pos = 0;
+                if (left) {
+                    src_idx = (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] == 
+                               SAO_APPLIED);
+                    copy_pixel(dst1, src1[src_idx], sh);
+                    pos += (1 << sh);
+                }
+                src_idx = (CTB(s->sao, x_ctb, y_ctb-1).type_idx[c_idx] == 
+                           SAO_APPLIED);
+                memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
+                if (right) {
+                    pos += width << sh;
+                    src_idx = (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] == 
+                               SAO_APPLIED);
+                    copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
+                }
+            }
+            if (!bottom_edge) {
+                int left = 1 - left_edge;
+                int right = 1 - right_edge;
+                const uint8_t *src1[2];
+                uint8_t *dst1;
+                int src_idx, pos;
+
+                dst1 = dst + height * stride_dst - (left << sh);
+                src1[0] = src + height * stride_src - (left << sh);
+                src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 2) * w + x0 - left) << sh);
+                pos = 0;
+                if (left) {
+                    src_idx = (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] == 
+                               SAO_APPLIED);
+                    copy_pixel(dst1, src1[src_idx], sh);
+                    pos += (1 << sh);
+                }
+                src_idx = (CTB(s->sao, x_ctb, y_ctb+1).type_idx[c_idx] == 
+                           SAO_APPLIED);
+                memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
+                if (right) {
+                    pos += width << sh;
+                    src_idx = (CTB(s->sao, x_ctb+1, y_ctb+1).type_idx[c_idx] == 
+                               SAO_APPLIED);
+                    copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
+                }
+            }
+            left_pixels = 0;
+            if (!left_edge) {
+                if (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
+                    copy_vert(dst - (1 << sh), 
+                              s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb - 1) * h + y0) << sh),
+                              sh, height, stride_dst, 1 << sh);
+                } else {
+                    left_pixels = 1;
+                }
+            }
+            right_pixels = 0;
+            if (!right_edge) {
+                if (CTB(s->sao, x_ctb+1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
+                    copy_vert(dst + (width << sh), 
+                              s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 2) * h + y0) << sh),
+                              sh, height, stride_dst, 1 << sh);
+                } else {
+                    right_pixels = 1;
+                }
+            }
+
+            copy_CTB(dst - (left_pixels << sh),
+                     src - (left_pixels << sh),
+                     (width + left_pixels + right_pixels) << sh, 
+                     height, stride_dst, stride_src);
+
+            copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
+                           x_ctb, y_ctb);
+#else
+            uint8_t left_pixels;
+            /* get the CTB edge pixels from the SAO pixel buffer */
+            left_pixels = !edges[0] && (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] != SAO_APPLIED);
+            if (!edges[1]) {
+                uint8_t top_left  = !edges[0] && (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] != SAO_APPLIED);
+                uint8_t top_right = !edges[2] && (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] != SAO_APPLIED);
+                if (CTB(s->sao, x_ctb  , y_ctb-1).type_idx[c_idx] == 0)
+                    memcpy( dst - stride_dst - (top_left << s->sps->pixel_shift),
+                            src - stride_src - (top_left << s->sps->pixel_shift),
+                            (top_left + width + top_right) << s->sps->pixel_shift);
+                else {
+                    if (top_left)
+                        memcpy( dst - stride_dst - (1 << s->sps->pixel_shift),
+                                src - stride_src - (1 << s->sps->pixel_shift),
+                                1 << s->sps->pixel_shift);
+                    if(top_right)
+                        memcpy( dst - stride_dst + (width << s->sps->pixel_shift),
+                                src - stride_src + (width << s->sps->pixel_shift),
+                                1 << s->sps->pixel_shift);
+                }
+            }
+            if (!edges[3]) {                                                                // bottom and bottom right
+                uint8_t bottom_left = !edges[0] && (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] != SAO_APPLIED);
+                memcpy( dst + height * stride_dst - (bottom_left << s->sps->pixel_shift),
+                        src + height * stride_src - (bottom_left << s->sps->pixel_shift),
+                        (width + 1 + bottom_left) << s->sps->pixel_shift);
+            }
+            copy_CTB(dst - (left_pixels << s->sps->pixel_shift),
+                     src - (left_pixels << s->sps->pixel_shift),
+                     (width + 1 + left_pixels) << s->sps->pixel_shift, height, stride_dst, stride_src);
+#endif
+            /* XXX: could handle the restoration here to simplify the
+               DSP functions */
+            s->hevcdsp.sao_edge_filter[restore](src, dst,
+                                                stride_src, stride_dst,
+                                                sao,
+                                                edges, width,
+                                                height, c_idx,
+                                                vert_edge,
+                                                horiz_edge,
+                                                diag_edge BIT_DEPTH_ARG2(s->sps->bit_depth));
+            restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
+                               x, y, width, height, c_idx);
+            sao->type_idx[c_idx] = SAO_APPLIED;
+            break;
+        }
+        }
+    }
+}
+
+static int get_pcm(HEVCContext *s, int x, int y)
+{
+    int log2_min_pu_size = s->sps->log2_min_pu_size;
+    int x_pu, y_pu;
+
+    if (x < 0 || y < 0)
+        return 2;
+
+    x_pu = x >> log2_min_pu_size;
+    y_pu = y >> log2_min_pu_size;
+
+    if (x_pu >= s->sps->min_pu_width || y_pu >= s->sps->min_pu_height)
+        return 2;
+    return s->is_pcm[y_pu * s->sps->min_pu_width + x_pu];
+}
+
+#define TC_CALC(qp, bs)                                                 \
+    tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) +       \
+                    (tc_offset >> 1 << 1),                              \
+                    0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)]
+
+static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
+{
+    uint8_t *src;
+    int x, y;
+    int chroma, beta;
+    int32_t c_tc[2], tc[2];
+    uint8_t no_p[2] = { 0 };
+    uint8_t no_q[2] = { 0 };
+
+    int log2_ctb_size = s->sps->log2_ctb_size;
+    int x_end, x_end2, y_end;
+    int ctb_size        = 1 << log2_ctb_size;
+    int ctb             = (x0 >> log2_ctb_size) +
+                          (y0 >> log2_ctb_size) * s->sps->ctb_width;
+    int cur_tc_offset   = s->deblock[ctb].tc_offset;
+    int cur_beta_offset = s->deblock[ctb].beta_offset;
+    int left_tc_offset, left_beta_offset;
+    int tc_offset, beta_offset;
+    int pcmf = (s->sps->pcm_enabled_flag &&
+                s->sps->pcm.loop_filter_disable_flag) ||
+               s->pps->transquant_bypass_enable_flag;
+    int bit_depth =  s->sps->bit_depth;
+
+    if (x0) {
+        left_tc_offset   = s->deblock[ctb - 1].tc_offset;
+        left_beta_offset = s->deblock[ctb - 1].beta_offset;
+    } else {
+        left_tc_offset   = 0;
+        left_beta_offset = 0;
+    }
+
+    x_end = x0 + ctb_size;
+    if (x_end > s->sps->width)
+        x_end = s->sps->width;
+    y_end = y0 + ctb_size;
+    if (y_end > s->sps->height)
+        y_end = s->sps->height;
+
+    tc_offset   = cur_tc_offset;
+    beta_offset = cur_beta_offset;
+
+    x_end2 = x_end;
+    if (x_end2 != s->sps->width)
+        x_end2 -= 8;
+    for (y = y0; y < y_end; y += 8) {
+        // vertical filtering luma
+        for (x = x0 ? x0 : 8; x < x_end; x += 8) {
+            const int bs0 = s->vertical_bs[(x +  y      * s->bs_width) >> 2];
+            const int bs1 = s->vertical_bs[(x + (y + 4) * s->bs_width) >> 2];
+            if (bs0 || bs1) {
+                const int qp = (get_qPy(s, x - 1, y)     + get_qPy(s, x, y)     + 1) >> 1;
+
+                beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
+
+                tc[0]   = bs0 ? TC_CALC(qp, bs0) : 0;
+                tc[1]   = bs1 ? TC_CALC(qp, bs1) : 0;
+                src     = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->sps->pixel_shift)];
+                if (pcmf) {
+                    no_p[0] = get_pcm(s, x - 1, y);
+                    no_p[1] = get_pcm(s, x - 1, y + 4);
+                    no_q[0] = get_pcm(s, x, y);
+                    no_q[1] = get_pcm(s, x, y + 4);
+                    s->hevcdsp.hevc_v_loop_filter_luma_c(src,
+                                                         s->frame->linesize[LUMA],
+                                                         beta, tc, no_p, no_q BIT_DEPTH_ARG);
+                } else
+                    s->hevcdsp.hevc_v_loop_filter_luma(src,
+                                                       s->frame->linesize[LUMA],
+                                                       beta, tc, no_p, no_q BIT_DEPTH_ARG);
+            }
+        }
+
+        if(!y)
+             continue;
+
+        // horizontal filtering luma
+        for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
+            const int bs0 = s->horizontal_bs[( x      + y * s->bs_width) >> 2];
+            const int bs1 = s->horizontal_bs[((x + 4) + y * s->bs_width) >> 2];
+            if (bs0 || bs1) {
+                const int qp = (get_qPy(s, x, y - 1)     + get_qPy(s, x, y)     + 1) >> 1;
+
+                tc_offset   = x >= x0 ? cur_tc_offset : left_tc_offset;
+                beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
+
+                beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
+                tc[0]   = bs0 ? TC_CALC(qp, bs0) : 0;
+                tc[1]   = bs1 ? TC_CALC(qp, bs1) : 0;
+                src     = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->sps->pixel_shift)];
+                if (pcmf) {
+                    no_p[0] = get_pcm(s, x, y - 1);
+                    no_p[1] = get_pcm(s, x + 4, y - 1);
+                    no_q[0] = get_pcm(s, x, y);
+                    no_q[1] = get_pcm(s, x + 4, y);
+                    s->hevcdsp.hevc_h_loop_filter_luma_c(src,
+                                                         s->frame->linesize[LUMA],
+                                                         beta, tc, no_p, no_q BIT_DEPTH_ARG);
+                } else
+                    s->hevcdsp.hevc_h_loop_filter_luma(src,
+                                                       s->frame->linesize[LUMA],
+                                                       beta, tc, no_p, no_q BIT_DEPTH_ARG);
+            }
+        }
+    }
+
+    if (s->sps->chroma_format_idc != 0) {
+    for (chroma = 1; chroma <= 2; chroma++) {
+        int h = 1 << s->sps->hshift[chroma];
+        int v = 1 << s->sps->vshift[chroma];
+
+        // vertical filtering chroma
+        for (y = y0; y < y_end; y += (8 * v)) {
+            for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 * h)) {
+                const int bs0 = s->vertical_bs[(x +  y            * s->bs_width) >> 2];
+                const int bs1 = s->vertical_bs[(x + (y + (4 * v)) * s->bs_width) >> 2];
+
+                if ((bs0 == 2) || (bs1 == 2)) {
+                    const int qp0 = (get_qPy(s, x - 1, y)           + get_qPy(s, x, y)           + 1) >> 1;
+                    const int qp1 = (get_qPy(s, x - 1, y + (4 * v)) + get_qPy(s, x, y + (4 * v)) + 1) >> 1;
+
+                    c_tc[0] = (bs0 == 2) ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
+                    c_tc[1] = (bs1 == 2) ? chroma_tc(s, qp1, chroma, tc_offset) : 0;
+                    src       = &s->frame->data[chroma][(y >> s->sps->vshift[chroma]) * s->frame->linesize[chroma] + ((x >> s->sps->hshift[chroma]) << s->sps->pixel_shift)];
+                    if (pcmf) {
+                        no_p[0] = get_pcm(s, x - 1, y);
+                        no_p[1] = get_pcm(s, x - 1, y + (4 * v));
+                        no_q[0] = get_pcm(s, x, y);
+                        no_q[1] = get_pcm(s, x, y + (4 * v));
+                        s->hevcdsp.hevc_v_loop_filter_chroma_c(src,
+                                                               s->frame->linesize[chroma],
+                                                               c_tc, no_p, no_q BIT_DEPTH_ARG);
+                    } else
+                        s->hevcdsp.hevc_v_loop_filter_chroma(src,
+                                                             s->frame->linesize[chroma],
+                                                             c_tc, no_p, no_q BIT_DEPTH_ARG);
+                }
+            }
+
+            if(!y)
+                 continue;
+
+            // horizontal filtering chroma
+            tc_offset = x0 ? left_tc_offset : cur_tc_offset;
+            x_end2 = x_end;
+            if (x_end != s->sps->width)
+                x_end2 = x_end - 8 * h;
+            for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 * h)) {
+                const int bs0 = s->horizontal_bs[( x          + y * s->bs_width) >> 2];
+                const int bs1 = s->horizontal_bs[((x + 4 * h) + y * s->bs_width) >> 2];
+                if ((bs0 == 2) || (bs1 == 2)) {
+                    const int qp0 = bs0 == 2 ? (get_qPy(s, x,           y - 1) + get_qPy(s, x,           y) + 1) >> 1 : 0;
+                    const int qp1 = bs1 == 2 ? (get_qPy(s, x + (4 * h), y - 1) + get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
+
+                    c_tc[0]   = bs0 == 2 ? chroma_tc(s, qp0, chroma, tc_offset)     : 0;
+                    c_tc[1]   = bs1 == 2 ? chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
+                    src       = &s->frame->data[chroma][(y >> s->sps->vshift[1]) * s->frame->linesize[chroma] + ((x >> s->sps->hshift[1]) << s->sps->pixel_shift)];
+                    if (pcmf) {
+                        no_p[0] = get_pcm(s, x,           y - 1);
+                        no_p[1] = get_pcm(s, x + (4 * h), y - 1);
+                        no_q[0] = get_pcm(s, x,           y);
+                        no_q[1] = get_pcm(s, x + (4 * h), y);
+                        s->hevcdsp.hevc_h_loop_filter_chroma_c(src,
+                                                               s->frame->linesize[chroma],
+                                                               c_tc, no_p, no_q BIT_DEPTH_ARG);
+                    } else
+                        s->hevcdsp.hevc_h_loop_filter_chroma(src,
+                                                             s->frame->linesize[chroma],
+                                                             c_tc, no_p, no_q BIT_DEPTH_ARG);
+                }
+            }
+        }
+    }
+    } /* chroma_format_idc != 0 */
+}
+
+#ifdef USE_PRED
+static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh,
+                             RefPicList *neigh_refPicList)
+{
+    if (curr->pred_flag == PF_BI &&  neigh->pred_flag == PF_BI) {
+        // same L0 and L1
+        if (s->ref->refPicList[0].list[curr->ref_idx[0]] == neigh_refPicList[0].list[neigh->ref_idx[0]]  &&
+            s->ref->refPicList[0].list[curr->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]] &&
+            neigh_refPicList[0].list[neigh->ref_idx[0]] == neigh_refPicList[1].list[neigh->ref_idx[1]]) {
+            if ((FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
+                 FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4) &&
+                (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
+                 FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4))
+                return 1;
+            else
+                return 0;
+        } else if (neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
+                   neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
+            if (FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
+                FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4)
+                return 1;
+            else
+                return 0;
+        } else if (neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
+                   neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
+            if (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
+                FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4)
+                return 1;
+            else
+                return 0;
+        } else {
+            return 1;
+        }
+    } else if ((curr->pred_flag != PF_BI) && (neigh->pred_flag != PF_BI)){ // 1 MV
+        Mv A, B;
+        int ref_A, ref_B;
+
+        if (curr->pred_flag & 1) {
+            A     = curr->mv[0];
+            ref_A = s->ref->refPicList[0].list[curr->ref_idx[0]];
+        } else {
+            A     = curr->mv[1];
+            ref_A = s->ref->refPicList[1].list[curr->ref_idx[1]];
+        }
+
+        if (neigh->pred_flag & 1) {
+            B     = neigh->mv[0];
+            ref_B = neigh_refPicList[0].list[neigh->ref_idx[0]];
+        } else {
+            B     = neigh->mv[1];
+            ref_B = neigh_refPicList[1].list[neigh->ref_idx[1]];
+        }
+
+        if (ref_A == ref_B) {
+            if (FFABS(A.x - B.x) >= 4 || FFABS(A.y - B.y) >= 4)
+                return 1;
+            else
+                return 0;
+        } else
+            return 1;
+    }
+    return 1;
+}
+#endif
+
+void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
+                                           int log2_trafo_size)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int log2_min_pu_size = s->sps->log2_min_pu_size;
+    int log2_min_tu_size = s->sps->log2_min_tb_size;
+    int min_pu_width     = s->sps->min_pu_width;
+    int min_tu_width     = s->sps->min_tb_width;
+#ifdef USE_PRED
+    MvField *tab_mvf     = s->ref->tab_mvf;
+    int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
+                           (x0 >> log2_min_pu_size)].pred_flag == PF_INTRA;
+    int j;
+#endif
+    int boundary_upper, boundary_left;
+    int i, bs;
+
+    boundary_upper = y0 > 0 && !(y0 & 7);
+    if (boundary_upper &&
+        ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
+          lc->boundary_flags & BOUNDARY_UPPER_SLICE &&
+          (y0 % (1 << s->sps->log2_ctb_size)) == 0) ||
+         (!s->pps->loop_filter_across_tiles_enabled_flag &&
+          lc->boundary_flags & BOUNDARY_UPPER_TILE &&
+          (y0 % (1 << s->sps->log2_ctb_size)) == 0)))
+        boundary_upper = 0;
+
+    if (boundary_upper) {
+#ifdef USE_PRED
+        RefPicList *rpl_top = (lc->boundary_flags & BOUNDARY_UPPER_SLICE) ?
+                              ff_hevc_get_ref_list(s, s->ref, x0, y0 - 1) :
+                              s->ref->refPicList;
+        int yp_pu = (y0 - 1) >> log2_min_pu_size;
+        int yq_pu =  y0      >> log2_min_pu_size;
+        int yp_tu = (y0 - 1) >> log2_min_tu_size;
+        int yq_tu =  y0      >> log2_min_tu_size;
+
+            for (i = 0; i < (1 << log2_trafo_size); i += 4) {
+                int x_pu = (x0 + i) >> log2_min_pu_size;
+                int x_tu = (x0 + i) >> log2_min_tu_size;
+                MvField *top  = &tab_mvf[yp_pu * min_pu_width + x_pu];
+                MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
+                uint8_t top_cbf_luma  = s->cbf_luma[yp_tu * min_tu_width + x_tu];
+                uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu];
+
+                if (curr->pred_flag == PF_INTRA || top->pred_flag == PF_INTRA)
+                    bs = 2;
+                else if (curr_cbf_luma || top_cbf_luma)
+                    bs = 1;
+                else
+                    bs = boundary_strength(s, curr, top, rpl_top);
+                s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs;
+            }
+#else
+            for (i = 0; i < (1 << log2_trafo_size); i += 4) {
+                s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = 2;
+            }
+#endif
+    }
+
+    // bs for vertical TU boundaries
+    boundary_left = x0 > 0 && !(x0 & 7);
+    if (boundary_left &&
+        ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
+          lc->boundary_flags & BOUNDARY_LEFT_SLICE &&
+          (x0 % (1 << s->sps->log2_ctb_size)) == 0) ||
+         (!s->pps->loop_filter_across_tiles_enabled_flag &&
+          lc->boundary_flags & BOUNDARY_LEFT_TILE &&
+          (x0 % (1 << s->sps->log2_ctb_size)) == 0)))
+        boundary_left = 0;
+
+    if (boundary_left) {
+#ifdef USE_PRED
+        RefPicList *rpl_left = (lc->boundary_flags & BOUNDARY_LEFT_SLICE) ?
+                               ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0) :
+                               s->ref->refPicList;
+        int xp_pu = (x0 - 1) >> log2_min_pu_size;
+        int xq_pu =  x0      >> log2_min_pu_size;
+        int xp_tu = (x0 - 1) >> log2_min_tu_size;
+        int xq_tu =  x0      >> log2_min_tu_size;
+
+            for (i = 0; i < (1 << log2_trafo_size); i += 4) {
+                int y_pu      = (y0 + i) >> log2_min_pu_size;
+                int y_tu      = (y0 + i) >> log2_min_tu_size;
+                MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
+                MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
+                uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu];
+                uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu];
+
+                if (curr->pred_flag == PF_INTRA || left->pred_flag == PF_INTRA)
+                    bs = 2;
+                else if (curr_cbf_luma || left_cbf_luma)
+                    bs = 1;
+                else
+                    bs = boundary_strength(s, curr, left, rpl_left);
+                s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = bs;
+            }
+#else
+            for (i = 0; i < (1 << log2_trafo_size); i += 4) {
+                s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = 2;
+            }
+#endif
+    }
+
+#ifdef USE_PRED
+    if (log2_trafo_size > log2_min_pu_size && !is_intra) {
+        RefPicList *rpl = s->ref->refPicList;
+
+        // bs for TU internal horizontal PU boundaries
+        for (j = 8; j < (1 << log2_trafo_size); j += 8) {
+            int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
+            int yq_pu = (y0 + j)     >> log2_min_pu_size;
+
+            for (i = 0; i < (1 << log2_trafo_size); i += 4) {
+                int x_pu = (x0 + i) >> log2_min_pu_size;
+                MvField *top  = &tab_mvf[yp_pu * min_pu_width + x_pu];
+                MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
+
+                bs = boundary_strength(s, curr, top, rpl);
+                s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
+            }
+        }
+
+        // bs for TU internal vertical PU boundaries
+        for (j = 0; j < (1 << log2_trafo_size); j += 4) {
+            int y_pu = (y0 + j) >> log2_min_pu_size;
+
+            for (i = 8; i < (1 << log2_trafo_size); i += 8) {
+                int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
+                int xq_pu = (x0 + i)     >> log2_min_pu_size;
+                MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
+                MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
+
+                bs = boundary_strength(s, curr, left, rpl);
+                s->vertical_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
+            }
+        }
+    }
+#endif
+}
+
+#undef LUMA
+#undef CB
+#undef CR
+
+void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
+{
+    int x_end = x >= s->sps->width  - ctb_size;
+    deblocking_filter_CTB(s, x, y);
+    if (s->sps->sao_enabled) {
+        int y_end = y >= s->sps->height - ctb_size;
+        if (y && x)
+            sao_filter_CTB(s, x - ctb_size, y - ctb_size);
+        if (x && y_end)
+            sao_filter_CTB(s, x - ctb_size, y);
+        if (y && x_end) {
+            sao_filter_CTB(s, x, y - ctb_size);
+            if (s->threads_type & FF_THREAD_FRAME )
+                ff_thread_report_progress(&s->ref->tf, y, 0);
+        }
+        if (x_end && y_end) {
+            sao_filter_CTB(s, x , y);
+            if (s->threads_type & FF_THREAD_FRAME )
+                ff_thread_report_progress(&s->ref->tf, y + ctb_size, 0);
+        }
+    } else if (s->threads_type & FF_THREAD_FRAME && x_end)
+        ff_thread_report_progress(&s->ref->tf, y + ctb_size - 4, 0);
+}
+
+void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
+{
+    int x_end = x_ctb >= s->sps->width  - ctb_size;
+    int y_end = y_ctb >= s->sps->height - ctb_size;
+    if (y_ctb && x_ctb)
+        ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb - ctb_size, ctb_size);
+    if (y_ctb && x_end)
+        ff_hevc_hls_filter(s, x_ctb, y_ctb - ctb_size, ctb_size);
+    if (x_ctb && y_end)
+        ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb, ctb_size);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_mvs.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,777 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2013 Anand Meher Kotra
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "hevc.h"
+
+#ifdef USE_PRED
+static const uint8_t l0_l1_cand_idx[12][2] = {
+    { 0, 1, },
+    { 1, 0, },
+    { 0, 2, },
+    { 2, 0, },
+    { 1, 2, },
+    { 2, 1, },
+    { 0, 3, },
+    { 3, 0, },
+    { 1, 3, },
+    { 3, 1, },
+    { 2, 3, },
+    { 3, 2, },
+};
+#endif
+
+void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
+                                     int nPbW, int nPbH)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
+    int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
+
+    lc->na.cand_up       = (lc->ctb_up_flag   || y0b);
+    lc->na.cand_left     = (lc->ctb_left_flag || x0b);
+    lc->na.cand_up_left  = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
+    lc->na.cand_up_right_sap =
+            ((x0b + nPbW) == (1 << s->sps->log2_ctb_size)) ?
+                    lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
+    lc->na.cand_up_right =
+            lc->na.cand_up_right_sap
+                     && (x0 + nPbW) < lc->end_of_tiles_x;
+    lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
+}
+
+#ifdef USE_PRED
+/*
+ * 6.4.1 Derivation process for z-scan order block availability
+ */
+static av_always_inline int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
+                              int xN, int yN)
+{
+#define MIN_TB_ADDR_ZS(x, y)                                            \
+    s->pps->min_tb_addr_zs[(y) * (s->sps->tb_mask+2) + (x)]
+
+    int xCurr_ctb = xCurr >> s->sps->log2_ctb_size;
+    int yCurr_ctb = yCurr >> s->sps->log2_ctb_size;
+    int xN_ctb    = xN    >> s->sps->log2_ctb_size;
+    int yN_ctb    = yN    >> s->sps->log2_ctb_size;
+    if( yN_ctb < yCurr_ctb || xN_ctb < xCurr_ctb )
+        return 1;
+    else {
+        int Curr = MIN_TB_ADDR_ZS((xCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
+                (yCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
+        int N    = MIN_TB_ADDR_ZS((xN >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
+                (yN >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
+        return N <= Curr;
+    }
+}
+
+//check if the two luma locations belong to the same mostion estimation region
+static av_always_inline int is_diff_mer(HEVCContext *s, int xN, int yN, int xP, int yP)
+{
+    uint8_t plevel = s->pps->log2_parallel_merge_level;
+
+    return xN >> plevel == xP >> plevel &&
+           yN >> plevel == yP >> plevel;
+}
+
+#define MATCH_MV(x) (AV_RN32A(&A.x) == AV_RN32A(&B.x))
+#define MATCH(x) (A.x == B.x)
+
+// check if the mv's and refidx are the same between A and B
+static av_always_inline int compare_mv_ref_idx(struct MvField A, struct MvField B)
+{
+    int a_pf = A.pred_flag;
+    int b_pf = B.pred_flag;
+    if (a_pf == b_pf) {
+        if (a_pf == PF_BI) {
+            return MATCH(ref_idx[0]) && MATCH_MV(mv[0]) &&
+                   MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
+        } else if (a_pf == PF_L0) {
+            return MATCH(ref_idx[0]) && MATCH_MV(mv[0]);
+        } else if (a_pf == PF_L1) {
+            return MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
+        }
+    }
+    return 0;
+}
+
+static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
+{
+    int tx, scale_factor;
+
+    td = av_clip_int8(td);
+    tb = av_clip_int8(tb);
+    tx = (0x4000 + abs(td / 2)) / td;
+    scale_factor = av_clip((tb * tx + 32) >> 6, -4096, 4095);
+    dst->x = av_clip_int16((scale_factor * src->x + 127 +
+                           (scale_factor * src->x < 0)) >> 8);
+    dst->y = av_clip_int16((scale_factor * src->y + 127 +
+                           (scale_factor * src->y < 0)) >> 8);
+}
+
+static int check_mvset(Mv *mvLXCol, Mv *mvCol,
+                       int colPic, int poc,
+                       RefPicList *refPicList, int X, int refIdxLx,
+                       RefPicList *refPicList_col, int listCol, int refidxCol)
+{
+    int cur_lt = refPicList[X].isLongTerm[refIdxLx];
+    int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
+    int col_poc_diff, cur_poc_diff;
+
+    if (cur_lt != col_lt) {
+        mvLXCol->x = 0;
+        mvLXCol->y = 0;
+        return 0;
+    }
+
+    col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
+    cur_poc_diff = poc    - refPicList[X].list[refIdxLx];
+
+    if (cur_lt || col_poc_diff == cur_poc_diff || !col_poc_diff) {
+        mvLXCol->x = mvCol->x;
+        mvLXCol->y = mvCol->y;
+    } else {
+        mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
+    }
+    return 1;
+}
+
+#define CHECK_MVSET(l)                                          \
+    check_mvset(mvLXCol, temp_col.mv + l,                       \
+                colPic, s->poc,                                 \
+                refPicList, X, refIdxLx,                        \
+                refPicList_col, L ## l, temp_col.ref_idx[l])
+
+// derive the motion vectors section 8.5.3.1.8
+static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
+                                         int refIdxLx, Mv *mvLXCol, int X,
+                                         int colPic, RefPicList *refPicList_col)
+{
+    RefPicList *refPicList = s->ref->refPicList;
+
+    if (temp_col.pred_flag == PF_INTRA)
+        return 0;
+
+    if (!(temp_col.pred_flag & PF_L0))
+        return CHECK_MVSET(1);
+    else if (temp_col.pred_flag == PF_L0)
+        return CHECK_MVSET(0);
+    else if (temp_col.pred_flag == PF_BI) {
+        int check_diffpicount = 0;
+        int i, j;
+        for (j = 0; j < 2; j++) {
+            for (i = 0; i < refPicList[j].nb_refs; i++) {
+                if (refPicList[j].list[i] > s->poc) {
+                    check_diffpicount++;
+                    break;
+                }
+            }
+        }
+        if (!check_diffpicount) {
+            if (X==0)
+                return CHECK_MVSET(0);
+            else
+                return CHECK_MVSET(1);
+        } else {
+            if (s->sh.collocated_list == L1)
+                return CHECK_MVSET(0);
+            else
+                return CHECK_MVSET(1);
+        }
+    }
+
+    return 0;
+}
+
+#define TAB_MVF(x, y)                                                   \
+    tab_mvf[(y) * min_pu_width + x]
+
+#define TAB_MVF_PU(v)                                                   \
+    TAB_MVF(((x ## v) >> s->sps->log2_min_pu_size),                     \
+            ((y ## v) >> s->sps->log2_min_pu_size))
+
+#define DERIVE_TEMPORAL_COLOCATED_MVS                                   \
+    derive_temporal_colocated_mvs(s, temp_col,                          \
+                                  refIdxLx, mvLXCol, X, colPic,         \
+                                  ff_hevc_get_ref_list(s, ref, x, y))
+
+/*
+ * 8.5.3.1.7  temporal luma motion vector prediction
+ */
+static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
+                                       int nPbW, int nPbH, int refIdxLx,
+                                       Mv *mvLXCol, int X)
+{
+    MvField *tab_mvf;
+    MvField temp_col;
+    int x, y, x_pu, y_pu;
+    int min_pu_width = s->sps->min_pu_width;
+    int availableFlagLXCol = 0;
+    int colPic;
+
+    HEVCFrame *ref = s->ref->collocated_ref;
+
+    if (!ref) {
+        memset(mvLXCol, 0, sizeof(*mvLXCol));
+        return 0;
+    }
+
+    tab_mvf = ref->tab_mvf;
+    colPic  = ref->poc;
+
+    //bottom right collocated motion vector
+    x = x0 + nPbW;
+    y = y0 + nPbH;
+
+    if (tab_mvf &&
+        (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) &&
+        y < s->sps->height &&
+        x < s->sps->width) {
+        x                 &= ~15;
+        y                 &= ~15;
+        if (s->threads_type == FF_THREAD_FRAME)
+            ff_thread_await_progress(&ref->tf, y, 0);
+        x_pu               = x >> s->sps->log2_min_pu_size;
+        y_pu               = y >> s->sps->log2_min_pu_size;
+        temp_col           = TAB_MVF(x_pu, y_pu);
+        availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
+    }
+
+    // derive center collocated motion vector
+    if (tab_mvf && !availableFlagLXCol) {
+        x                  = x0 + (nPbW >> 1);
+        y                  = y0 + (nPbH >> 1);
+        x                 &= ~15;
+        y                 &= ~15;
+        if (s->threads_type == FF_THREAD_FRAME)
+            ff_thread_await_progress(&ref->tf, y, 0);
+        x_pu               = x >> s->sps->log2_min_pu_size;
+        y_pu               = y >> s->sps->log2_min_pu_size;
+        temp_col           = TAB_MVF(x_pu, y_pu);
+        availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
+    }
+    return availableFlagLXCol;
+}
+
+#define AVAILABLE(cand, v)                                      \
+    (cand && !(TAB_MVF_PU(v).pred_flag == PF_INTRA))
+
+#define PRED_BLOCK_AVAILABLE(v)                                 \
+    z_scan_block_avail(s, x0, y0, x ## v, y ## v)
+
+#define COMPARE_MV_REFIDX(a, b)                                 \
+    compare_mv_ref_idx(TAB_MVF_PU(a), TAB_MVF_PU(b))
+
+/*
+ * 8.5.3.1.2  Derivation process for spatial merging candidates
+ */
+static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
+                                            int nPbW, int nPbH,
+                                            int log2_cb_size,
+                                            int singleMCLFlag, int part_idx,
+                                            int merge_idx,
+                                            struct MvField mergecandlist[])
+{
+    HEVCLocalContext *lc   = s->HEVClc;
+    RefPicList *refPicList = s->ref->refPicList;
+    MvField *tab_mvf       = s->ref->tab_mvf;
+
+    const int min_pu_width = s->sps->min_pu_width;
+
+    const int cand_bottom_left = lc->na.cand_bottom_left;
+    const int cand_left        = lc->na.cand_left;
+    const int cand_up_left     = lc->na.cand_up_left;
+    const int cand_up          = lc->na.cand_up;
+    const int cand_up_right    = lc->na.cand_up_right_sap;
+
+    const int xA1    = x0 - 1;
+    const int yA1    = y0 + nPbH - 1;
+
+    const int xB1    = x0 + nPbW - 1;
+    const int yB1    = y0 - 1;
+
+    const int xB0    = x0 + nPbW;
+    const int yB0    = y0 - 1;
+
+    const int xA0    = x0 - 1;
+    const int yA0    = y0 + nPbH;
+
+    const int xB2    = x0 - 1;
+    const int yB2    = y0 - 1;
+
+    const int nb_refs = (s->sh.slice_type == P_SLICE) ?
+                        s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
+
+    int zero_idx = 0;
+
+    int nb_merge_cand = 0;
+    int nb_orig_merge_cand = 0;
+
+    int is_available_a0;
+    int is_available_a1;
+    int is_available_b0;
+    int is_available_b1;
+    int is_available_b2;
+
+
+    if (!singleMCLFlag && part_idx == 1 &&
+        (lc->cu.part_mode == PART_Nx2N ||
+         lc->cu.part_mode == PART_nLx2N ||
+         lc->cu.part_mode == PART_nRx2N) ||
+        is_diff_mer(s, xA1, yA1, x0, y0)) {
+        is_available_a1 = 0;
+    } else {
+        is_available_a1 = AVAILABLE(cand_left, A1);
+        if (is_available_a1) {
+            mergecandlist[nb_merge_cand] = TAB_MVF_PU(A1);
+            if (merge_idx == 0)
+                return;
+            nb_merge_cand++;
+        }
+    }
+
+    if (!singleMCLFlag && part_idx == 1 &&
+        (lc->cu.part_mode == PART_2NxN ||
+         lc->cu.part_mode == PART_2NxnU ||
+         lc->cu.part_mode == PART_2NxnD) ||
+        is_diff_mer(s, xB1, yB1, x0, y0)) {
+        is_available_b1 = 0;
+    } else {
+        is_available_b1 = AVAILABLE(cand_up, B1);
+        if (is_available_b1 &&
+            !(is_available_a1 && COMPARE_MV_REFIDX(B1, A1))) {
+            mergecandlist[nb_merge_cand] = TAB_MVF_PU(B1);
+            if (merge_idx == nb_merge_cand)
+                return;
+            nb_merge_cand++;
+        }
+    }
+
+    // above right spatial merge candidate
+    is_available_b0 = AVAILABLE(cand_up_right, B0) &&
+                      xB0 < s->sps->width &&
+                      PRED_BLOCK_AVAILABLE(B0) &&
+                      !is_diff_mer(s, xB0, yB0, x0, y0);
+
+    if (is_available_b0 &&
+        !(is_available_b1 && COMPARE_MV_REFIDX(B0, B1))) {
+        mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0);
+        if (merge_idx == nb_merge_cand)
+            return;
+        nb_merge_cand++;
+    }
+
+    // left bottom spatial merge candidate
+    is_available_a0 = AVAILABLE(cand_bottom_left, A0) &&
+                      yA0 < s->sps->height &&
+                      PRED_BLOCK_AVAILABLE(A0) &&
+                      !is_diff_mer(s, xA0, yA0, x0, y0);
+
+    if (is_available_a0 &&
+        !(is_available_a1 && COMPARE_MV_REFIDX(A0, A1))) {
+        mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0);
+        if (merge_idx == nb_merge_cand)
+            return;
+        nb_merge_cand++;
+    }
+
+    // above left spatial merge candidate
+    is_available_b2 = AVAILABLE(cand_up_left, B2) &&
+                      !is_diff_mer(s, xB2, yB2, x0, y0);
+
+    if (is_available_b2 &&
+        !(is_available_a1 && COMPARE_MV_REFIDX(B2, A1)) &&
+        !(is_available_b1 && COMPARE_MV_REFIDX(B2, B1)) &&
+        nb_merge_cand != 4) {
+        mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2);
+        if (merge_idx == nb_merge_cand)
+            return;
+        nb_merge_cand++;
+    }
+
+    // temporal motion vector candidate
+    if (s->sh.slice_temporal_mvp_enabled_flag &&
+        nb_merge_cand < s->sh.max_num_merge_cand) {
+        Mv mv_l0_col = { 0 }, mv_l1_col = { 0 };
+        int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
+                                                       0, &mv_l0_col, 0);
+        int available_l1 = (s->sh.slice_type == B_SLICE) ?
+                           temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
+                                                       0, &mv_l1_col, 1) : 0;
+
+        if (available_l0 || available_l1) {
+            mergecandlist[nb_merge_cand].pred_flag = available_l0 + (available_l1 << 1);
+            AV_ZERO16(mergecandlist[nb_merge_cand].ref_idx);
+            mergecandlist[nb_merge_cand].mv[0]      = mv_l0_col;
+            mergecandlist[nb_merge_cand].mv[1]      = mv_l1_col;
+
+            if (merge_idx == nb_merge_cand)
+                return;
+            nb_merge_cand++;
+        }
+    }
+
+    nb_orig_merge_cand = nb_merge_cand;
+
+    // combined bi-predictive merge candidates  (applies for B slices)
+    if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 &&
+        nb_orig_merge_cand < s->sh.max_num_merge_cand) {
+        int comb_idx = 0;
+
+        for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
+                           comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
+            int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
+            int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
+            MvField l0_cand = mergecandlist[l0_cand_idx];
+            MvField l1_cand = mergecandlist[l1_cand_idx];
+
+            if ((l0_cand.pred_flag & PF_L0) && (l1_cand.pred_flag & PF_L1) &&
+                (refPicList[0].list[l0_cand.ref_idx[0]] !=
+                 refPicList[1].list[l1_cand.ref_idx[1]] ||
+                 AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) {
+                mergecandlist[nb_merge_cand].ref_idx[0]   = l0_cand.ref_idx[0];
+                mergecandlist[nb_merge_cand].ref_idx[1]   = l1_cand.ref_idx[1];
+                mergecandlist[nb_merge_cand].pred_flag    = PF_BI;
+                AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]);
+                AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]);
+                if (merge_idx == nb_merge_cand)
+                    return;
+                nb_merge_cand++;
+            }
+        }
+    }
+
+    // append Zero motion vector candidates
+    while (nb_merge_cand < s->sh.max_num_merge_cand) {
+        mergecandlist[nb_merge_cand].pred_flag    = PF_L0 + ((s->sh.slice_type == B_SLICE) << 1);
+        AV_ZERO32(mergecandlist[nb_merge_cand].mv + 0);
+        AV_ZERO32(mergecandlist[nb_merge_cand].mv + 1);
+        mergecandlist[nb_merge_cand].ref_idx[0]   = zero_idx < nb_refs ? zero_idx : 0;
+        mergecandlist[nb_merge_cand].ref_idx[1]   = zero_idx < nb_refs ? zero_idx : 0;
+
+        if (merge_idx == nb_merge_cand)
+            return;
+        nb_merge_cand++;
+        zero_idx++;
+    }
+}
+
+/*
+ * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
+ */
+void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
+                                int nPbH, int log2_cb_size, int part_idx,
+                                int merge_idx, MvField *mv)
+{
+    int singleMCLFlag = 0;
+    int nCS = 1 << log2_cb_size;
+    LOCAL_ALIGNED(4, MvField, mergecand_list, [MRG_MAX_NUM_CANDS]);
+    int nPbW2 = nPbW;
+    int nPbH2 = nPbH;
+    HEVCLocalContext *lc = s->HEVClc;
+
+    if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) {
+        singleMCLFlag = 1;
+        x0            = lc->cu.x;
+        y0            = lc->cu.y;
+        nPbW          = nCS;
+        nPbH          = nCS;
+        part_idx      = 0;
+    }
+
+    ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
+    derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
+                                    singleMCLFlag, part_idx,
+                                    merge_idx, mergecand_list);
+
+    if (mergecand_list[merge_idx].pred_flag == PF_BI &&
+        (nPbW2 + nPbH2) == 12) {
+        mergecand_list[merge_idx].pred_flag = PF_L0;
+    }
+
+    *mv = mergecand_list[merge_idx];
+}
+
+static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
+                                        int min_pu_width, int x, int y,
+                                        int elist, int ref_idx_curr, int ref_idx)
+{
+    RefPicList *refPicList = s->ref->refPicList;
+    MvField *tab_mvf       = s->ref->tab_mvf;
+    int ref_pic_elist      = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
+    int ref_pic_curr       = refPicList[ref_idx_curr].list[ref_idx];
+
+    if (ref_pic_elist != ref_pic_curr) {
+        int poc_diff = s->poc - ref_pic_elist;
+        if (!poc_diff)
+            poc_diff = 1;
+        mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
+    }
+}
+
+static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
+                         Mv *mv, int ref_idx_curr, int ref_idx)
+{
+    MvField *tab_mvf = s->ref->tab_mvf;
+    int min_pu_width = s->sps->min_pu_width;
+
+    RefPicList *refPicList = s->ref->refPicList;
+
+    if (((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) &&
+        refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
+        *mv = TAB_MVF(x, y).mv[pred_flag_index];
+        return 1;
+    }
+    return 0;
+}
+
+static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
+                            Mv *mv, int ref_idx_curr, int ref_idx)
+{
+    MvField *tab_mvf = s->ref->tab_mvf;
+    int min_pu_width = s->sps->min_pu_width;
+
+    RefPicList *refPicList = s->ref->refPicList;
+
+    if ((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) {
+        int currIsLongTerm     = refPicList[ref_idx_curr].isLongTerm[ref_idx];
+
+        int colIsLongTerm =
+            refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
+
+        if (colIsLongTerm == currIsLongTerm) {
+            *mv = TAB_MVF(x, y).mv[pred_flag_index];
+            if (!currIsLongTerm)
+                dist_scale(s, mv, min_pu_width, x, y,
+                           pred_flag_index, ref_idx_curr, ref_idx);
+            return 1;
+        }
+    }
+    return 0;
+}
+
+#define MP_MX(v, pred, mx)                                      \
+    mv_mp_mode_mx(s,                                            \
+                  (x ## v) >> s->sps->log2_min_pu_size,         \
+                  (y ## v) >> s->sps->log2_min_pu_size,         \
+                  pred, &mx, ref_idx_curr, ref_idx)
+
+#define MP_MX_LT(v, pred, mx)                                   \
+    mv_mp_mode_mx_lt(s,                                         \
+                     (x ## v) >> s->sps->log2_min_pu_size,      \
+                     (y ## v) >> s->sps->log2_min_pu_size,      \
+                     pred, &mx, ref_idx_curr, ref_idx)
+
+void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
+                              int nPbH, int log2_cb_size, int part_idx,
+                              int merge_idx, MvField *mv,
+                              int mvp_lx_flag, int LX)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    MvField *tab_mvf = s->ref->tab_mvf;
+    int isScaledFlag_L0 = 0;
+    int availableFlagLXA0 = 1;
+    int availableFlagLXB0 = 1;
+    int numMVPCandLX = 0;
+    int min_pu_width = s->sps->min_pu_width;
+
+    int xA0, yA0;
+    int is_available_a0;
+    int xA1, yA1;
+    int is_available_a1;
+    int xB0, yB0;
+    int is_available_b0;
+    int xB1, yB1;
+    int is_available_b1;
+    int xB2, yB2;
+    int is_available_b2;
+
+    Mv mvpcand_list[2] = { { 0 } };
+    Mv mxA;
+    Mv mxB;
+    int ref_idx_curr;
+    int ref_idx = 0;
+    int pred_flag_index_l0;
+    int pred_flag_index_l1;
+
+    const int cand_bottom_left = lc->na.cand_bottom_left;
+    const int cand_left        = lc->na.cand_left;
+    const int cand_up_left     = lc->na.cand_up_left;
+    const int cand_up          = lc->na.cand_up;
+    const int cand_up_right    = lc->na.cand_up_right_sap;
+    ref_idx_curr       = LX;
+    ref_idx            = mv->ref_idx[LX];
+    pred_flag_index_l0 = LX;
+    pred_flag_index_l1 = !LX;
+
+    // left bottom spatial candidate
+    xA0 = x0 - 1;
+    yA0 = y0 + nPbH;
+
+    is_available_a0 = AVAILABLE(cand_bottom_left, A0) &&
+                      yA0 < s->sps->height &&
+                      PRED_BLOCK_AVAILABLE(A0);
+
+    //left spatial merge candidate
+    xA1    = x0 - 1;
+    yA1    = y0 + nPbH - 1;
+
+    is_available_a1 = AVAILABLE(cand_left, A1);
+    if (is_available_a0 || is_available_a1)
+        isScaledFlag_L0 = 1;
+
+    if (is_available_a0) {
+        if (MP_MX(A0, pred_flag_index_l0, mxA)) {
+            goto b_candidates;
+        }
+        if (MP_MX(A0, pred_flag_index_l1, mxA)) {
+            goto b_candidates;
+        }
+    }
+
+    if (is_available_a1) {
+        if (MP_MX(A1, pred_flag_index_l0, mxA)) {
+            goto b_candidates;
+        }
+        if (MP_MX(A1, pred_flag_index_l1, mxA)) {
+            goto b_candidates;
+        }
+    }
+
+    if (is_available_a0) {
+        if (MP_MX_LT(A0, pred_flag_index_l0, mxA)) {
+            goto b_candidates;
+        }
+        if (MP_MX_LT(A0, pred_flag_index_l1, mxA)) {
+            goto b_candidates;
+        }
+    }
+
+    if (is_available_a1) {
+        if (MP_MX_LT(A1, pred_flag_index_l0, mxA)) {
+            goto b_candidates;
+        }
+        if (MP_MX_LT(A1, pred_flag_index_l1, mxA)) {
+            goto b_candidates;
+        }
+    }
+    availableFlagLXA0 = 0;
+
+b_candidates:
+    // B candidates
+    // above right spatial merge candidate
+    xB0    = x0 + nPbW;
+    yB0    = y0 - 1;
+
+    is_available_b0 =  AVAILABLE(cand_up_right, B0) &&
+                       xB0 < s->sps->width &&
+                       PRED_BLOCK_AVAILABLE(B0);
+
+    // above spatial merge candidate
+    xB1    = x0 + nPbW - 1;
+    yB1    = y0 - 1;
+    is_available_b1 = AVAILABLE(cand_up, B1);
+
+    // above left spatial merge candidate
+    xB2 = x0 - 1;
+    yB2 = y0 - 1;
+    is_available_b2 = AVAILABLE(cand_up_left, B2);
+
+    // above right spatial merge candidate
+    if (is_available_b0) {
+        if (MP_MX(B0, pred_flag_index_l0, mxB)) {
+            goto scalef;
+        }
+        if (MP_MX(B0, pred_flag_index_l1, mxB)) {
+            goto scalef;
+        }
+    }
+
+    // above spatial merge candidate
+    if (is_available_b1) {
+        if (MP_MX(B1, pred_flag_index_l0, mxB)) {
+            goto scalef;
+        }
+        if (MP_MX(B1, pred_flag_index_l1, mxB)) {
+            goto scalef;
+        }
+    }
+
+    // above left spatial merge candidate
+    if (is_available_b2) {
+        if (MP_MX(B2, pred_flag_index_l0, mxB)) {
+            goto scalef;
+        }
+        if (MP_MX(B2, pred_flag_index_l1, mxB)) {
+            goto scalef;
+        }
+    }
+    availableFlagLXB0 = 0;
+
+scalef:
+    if (!isScaledFlag_L0) {
+        if (availableFlagLXB0) {
+            availableFlagLXA0 = 1;
+            mxA = mxB;
+        }
+        availableFlagLXB0 = 0;
+
+        // XB0 and L1
+        if (is_available_b0) {
+            availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
+            if (!availableFlagLXB0)
+                availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
+        }
+
+        if (is_available_b1 && !availableFlagLXB0) {
+            availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
+            if (!availableFlagLXB0)
+                availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
+        }
+
+        if (is_available_b2 && !availableFlagLXB0) {
+            availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
+            if (!availableFlagLXB0)
+                availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
+        }
+    }
+
+    if (availableFlagLXA0)
+        mvpcand_list[numMVPCandLX++] = mxA;
+
+    if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
+        mvpcand_list[numMVPCandLX++] = mxB;
+
+    //temporal motion vector prediction candidate
+    if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag &&
+        mvp_lx_flag == numMVPCandLX) {
+        Mv mv_col;
+        int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
+                                                        nPbH, ref_idx,
+                                                        &mv_col, LX);
+        if (available_col)
+            mvpcand_list[numMVPCandLX++] = mv_col;
+    }
+
+    mv->mv[LX] = mvpcand_list[mvp_lx_flag];
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_ps.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1704 @@
+/*
+ * HEVC Parameter Set decoding
+ *
+ * Copyright (C) 2012 - 2103 Guillaume Martres
+ * Copyright (C) 2012 - 2103 Mickael Raulet
+ * Copyright (C) 2012 - 2013 Gildas Cocherel
+ * Copyright (C) 2013 Vittorio Giovara
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "golomb.h"
+#include "hevc.h"
+
+static const uint8_t default_scaling_list_intra[] = {
+    16, 16, 16, 16, 17, 18, 21, 24,
+    16, 16, 16, 16, 17, 19, 22, 25,
+    16, 16, 17, 18, 20, 22, 25, 29,
+    16, 16, 18, 21, 24, 27, 31, 36,
+    17, 17, 20, 24, 30, 35, 41, 47,
+    18, 19, 22, 27, 35, 44, 54, 65,
+    21, 22, 25, 31, 41, 54, 70, 88,
+    24, 25, 29, 36, 47, 65, 88, 115
+};
+
+static const uint8_t default_scaling_list_inter[] = {
+    16, 16, 16, 16, 17, 18, 20, 24,
+    16, 16, 16, 17, 18, 20, 24, 25,
+    16, 16, 17, 18, 20, 24, 25, 28,
+    16, 17, 18, 20, 24, 25, 28, 33,
+    17, 18, 20, 24, 25, 28, 33, 41,
+    18, 20, 24, 25, 28, 33, 41, 54,
+    20, 24, 25, 28, 33, 41, 54, 71,
+    24, 25, 28, 33, 41, 54, 71, 91
+};
+
+#ifdef USE_PRED
+int ff_hevc_decode_short_term_rps(HEVCContext *s, ShortTermRPS *rps,
+                                  const HEVCSPS *sps, int is_slice_header)
+{
+    HEVCLocalContext *lc = s->HEVClc;
+    uint8_t rps_predict = 0;
+    int delta_poc;
+    int k0 = 0;
+    int k1 = 0;
+    int k  = 0;
+    int i;
+
+    GetBitContext *gb = &lc->gb;
+
+    if (rps != sps->st_rps && sps->nb_st_rps)
+        rps_predict = get_bits1(gb);
+
+    if (rps_predict) {
+        const ShortTermRPS *rps_ridx;
+        int delta_rps;
+        unsigned abs_delta_rps;
+        uint8_t use_delta_flag = 0;
+        uint8_t delta_rps_sign;
+
+        if (is_slice_header) {
+            unsigned int delta_idx = get_ue_golomb_long(gb) + 1;
+            if (delta_idx > sps->nb_st_rps) {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "Invalid value of delta_idx in slice header RPS: %d > %d.\n",
+                       delta_idx, sps->nb_st_rps);
+                return AVERROR_INVALIDDATA;
+            }
+            rps_ridx = &sps->st_rps[sps->nb_st_rps - delta_idx];
+        } else
+            rps_ridx = &sps->st_rps[rps - sps->st_rps - 1];
+
+        delta_rps_sign = get_bits1(gb);
+        abs_delta_rps  = get_ue_golomb_long(gb) + 1;
+        if (abs_delta_rps < 1 || abs_delta_rps > 32768) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "Invalid value of abs_delta_rps: %d\n",
+                   abs_delta_rps);
+            return AVERROR_INVALIDDATA;
+        }
+        delta_rps      = (1 - (delta_rps_sign << 1)) * abs_delta_rps;
+        for (i = 0; i <= rps_ridx->num_delta_pocs; i++) {
+            int used = rps->used[k] = get_bits1(gb);
+
+            if (!used)
+                use_delta_flag = get_bits1(gb);
+
+            if (used || use_delta_flag) {
+                if (i < rps_ridx->num_delta_pocs)
+                    delta_poc = delta_rps + rps_ridx->delta_poc[i];
+                else
+                    delta_poc = delta_rps;
+                rps->delta_poc[k] = delta_poc;
+                if (delta_poc < 0)
+                    k0++;
+                else
+                    k1++;
+                k++;
+            }
+        }
+
+        rps->num_delta_pocs    = k;
+        rps->num_negative_pics = k0;
+        // sort in increasing order (smallest first)
+        if (rps->num_delta_pocs != 0) {
+            int used, tmp;
+            for (i = 1; i < rps->num_delta_pocs; i++) {
+                delta_poc = rps->delta_poc[i];
+                used      = rps->used[i];
+                for (k = i - 1; k >= 0; k--) {
+                    tmp = rps->delta_poc[k];
+                    if (delta_poc < tmp) {
+                        rps->delta_poc[k + 1] = tmp;
+                        rps->used[k + 1]      = rps->used[k];
+                        rps->delta_poc[k]     = delta_poc;
+                        rps->used[k]          = used;
+                    }
+                }
+            }
+        }
+        if ((rps->num_negative_pics >> 1) != 0) {
+            int used;
+            k = rps->num_negative_pics - 1;
+            // flip the negative values to largest first
+            for (i = 0; i < rps->num_negative_pics >> 1; i++) {
+                delta_poc         = rps->delta_poc[i];
+                used              = rps->used[i];
+                rps->delta_poc[i] = rps->delta_poc[k];
+                rps->used[i]      = rps->used[k];
+                rps->delta_poc[k] = delta_poc;
+                rps->used[k]      = used;
+                k--;
+            }
+        }
+    } else {
+        unsigned int prev, nb_positive_pics;
+        rps->num_negative_pics = get_ue_golomb_long(gb);
+        nb_positive_pics       = get_ue_golomb_long(gb);
+
+        if (rps->num_negative_pics >= MAX_REFS ||
+            nb_positive_pics >= MAX_REFS) {
+            av_log(s->avctx, AV_LOG_ERROR, "Too many refs in a short term RPS.\n");
+            return AVERROR_INVALIDDATA;
+        }
+
+        rps->num_delta_pocs = rps->num_negative_pics + nb_positive_pics;
+        if (rps->num_delta_pocs) {
+            prev = 0;
+            for (i = 0; i < rps->num_negative_pics; i++) {
+                delta_poc = get_ue_golomb_long(gb) + 1;
+                prev -= delta_poc;
+                rps->delta_poc[i] = prev;
+                rps->used[i]      = get_bits1(gb);
+            }
+            prev = 0;
+            for (i = 0; i < nb_positive_pics; i++) {
+                delta_poc = get_ue_golomb_long(gb) + 1;
+                prev += delta_poc;
+                rps->delta_poc[rps->num_negative_pics + i] = prev;
+                rps->used[rps->num_negative_pics + i]      = get_bits1(gb);
+            }
+        }
+    }
+    return 0;
+}
+#endif
+
+#ifndef USE_MSPS
+static const AVRational vui_sar[] = {
+    {  0,   1 },
+    {  1,   1 },
+    { 12,  11 },
+    { 10,  11 },
+    { 16,  11 },
+    { 40,  33 },
+    { 24,  11 },
+    { 20,  11 },
+    { 32,  11 },
+    { 80,  33 },
+    { 18,  11 },
+    { 15,  11 },
+    { 64,  33 },
+    { 160, 99 },
+    {  4,   3 },
+    {  3,   2 },
+    {  2,   1 },
+};
+
+static int decode_profile_tier_level(HEVCContext *s, PTLCommon *ptl)
+{
+    int i;
+    HEVCLocalContext *lc = s->HEVClc;
+    GetBitContext *gb = &lc->gb;
+
+    if (get_bits_left(gb) < 2+1+5 + 32 + 4 + 16 + 16 + 12)
+        return -1;
+
+    ptl->profile_space = get_bits(gb, 2);
+    ptl->tier_flag     = get_bits1(gb);
+    ptl->profile_idc   = get_bits(gb, 5);
+    if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN)
+        av_log(s->avctx, AV_LOG_DEBUG, "Main profile bitstream\n");
+    else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10)
+        av_log(s->avctx, AV_LOG_DEBUG, "Main 10 profile bitstream\n");
+    else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
+        av_log(s->avctx, AV_LOG_DEBUG, "Main Still Picture profile bitstream\n");
+    else if (ptl->profile_idc == FF_PROFILE_HEVC_REXT)
+        av_log(s->avctx, AV_LOG_DEBUG, "Range Extension profile bitstream\n");
+    else
+        av_log(s->avctx, AV_LOG_WARNING, "Unknown HEVC profile: %d\n", ptl->profile_idc);
+
+    for (i = 0; i < 32; i++)
+        ptl->profile_compatibility_flag[i] = get_bits1(gb);
+    ptl->progressive_source_flag    = get_bits1(gb);
+    ptl->interlaced_source_flag     = get_bits1(gb);
+    ptl->non_packed_constraint_flag = get_bits1(gb);
+    ptl->frame_only_constraint_flag = get_bits1(gb);
+
+    skip_bits(gb, 16); // XXX_reserved_zero_44bits[0..15]
+    skip_bits(gb, 16); // XXX_reserved_zero_44bits[16..31]
+    skip_bits(gb, 12); // XXX_reserved_zero_44bits[32..43]
+
+    return 0;
+}
+
+static int parse_ptl(HEVCContext *s, PTL *ptl, int max_num_sub_layers)
+{
+    int i;
+    HEVCLocalContext *lc = s->HEVClc;
+    GetBitContext *gb = &lc->gb;
+    if (decode_profile_tier_level(s, &ptl->general_ptl) < 0 ||
+        get_bits_left(gb) < 8 + 8*2) {
+        av_log(s->avctx, AV_LOG_ERROR, "PTL information too short\n");
+        return -1;
+    }
+
+    ptl->general_ptl.level_idc = get_bits(gb, 8);
+
+    for (i = 0; i < max_num_sub_layers - 1; i++) {
+        ptl->sub_layer_profile_present_flag[i] = get_bits1(gb);
+        ptl->sub_layer_level_present_flag[i]   = get_bits1(gb);
+    }
+
+    if (max_num_sub_layers - 1> 0)
+        for (i = max_num_sub_layers - 1; i < 8; i++)
+            skip_bits(gb, 2); // reserved_zero_2bits[i]
+    for (i = 0; i < max_num_sub_layers - 1; i++) {
+        if (ptl->sub_layer_profile_present_flag[i] &&
+            decode_profile_tier_level(s, &ptl->sub_layer_ptl[i]) < 0) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "PTL information for sublayer %i too short\n", i);
+            return -1;
+        }
+        if (ptl->sub_layer_level_present_flag[i]) {
+            if (get_bits_left(gb) < 8) {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "Not enough data for sublayer %i level_idc\n", i);
+                return -1;
+            } else
+                ptl->sub_layer_ptl[i].level_idc = get_bits(gb, 8);
+        }
+    }
+
+    return 0;
+}
+
+static void decode_sublayer_hrd(HEVCContext *s, unsigned int nb_cpb,
+                                int subpic_params_present)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    int i;
+
+    for (i = 0; i < nb_cpb; i++) {
+        get_ue_golomb_long(gb); // bit_rate_value_minus1
+        get_ue_golomb_long(gb); // cpb_size_value_minus1
+
+        if (subpic_params_present) {
+            get_ue_golomb_long(gb); // cpb_size_du_value_minus1
+            get_ue_golomb_long(gb); // bit_rate_du_value_minus1
+        }
+        skip_bits1(gb); // cbr_flag
+    }
+}
+
+static int decode_hrd(HEVCContext *s, int common_inf_present,
+                       int max_sublayers)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    int nal_params_present = 0, vcl_params_present = 0;
+    int subpic_params_present = 0;
+    int i;
+
+    if (common_inf_present) {
+        nal_params_present = get_bits1(gb);
+        vcl_params_present = get_bits1(gb);
+
+        if (nal_params_present || vcl_params_present) {
+            subpic_params_present = get_bits1(gb);
+
+            if (subpic_params_present) {
+                skip_bits(gb, 8); // tick_divisor_minus2
+                skip_bits(gb, 5); // du_cpb_removal_delay_increment_length_minus1
+                skip_bits(gb, 1); // sub_pic_cpb_params_in_pic_timing_sei_flag
+                skip_bits(gb, 5); // dpb_output_delay_du_length_minus1
+            }
+
+            skip_bits(gb, 4); // bit_rate_scale
+            skip_bits(gb, 4); // cpb_size_scale
+
+            if (subpic_params_present)
+                skip_bits(gb, 4);  // cpb_size_du_scale
+
+            skip_bits(gb, 5); // initial_cpb_removal_delay_length_minus1
+            skip_bits(gb, 5); // au_cpb_removal_delay_length_minus1
+            skip_bits(gb, 5); // dpb_output_delay_length_minus1
+        }
+    }
+
+    for (i = 0; i < max_sublayers; i++) {
+        int low_delay = 0;
+        unsigned int nb_cpb = 1;
+        int fixed_rate = get_bits1(gb);
+
+        if (!fixed_rate)
+            fixed_rate = get_bits1(gb);
+
+        if (fixed_rate)
+            get_ue_golomb_long(gb);  // elemental_duration_in_tc_minus1
+        else
+            low_delay = get_bits1(gb);
+
+        if (!low_delay) {
+            nb_cpb = get_ue_golomb_long(gb) + 1;
+            if (nb_cpb < 1 || nb_cpb > 32) {
+                av_log(s->avctx, AV_LOG_ERROR, "nb_cpb %d invalid\n", nb_cpb);
+                return AVERROR_INVALIDDATA;
+            }
+        }
+
+        if (nal_params_present)
+            decode_sublayer_hrd(s, nb_cpb, subpic_params_present);
+        if (vcl_params_present)
+            decode_sublayer_hrd(s, nb_cpb, subpic_params_present);
+    }
+    return 0;
+}
+#endif /* !USE_MSPS */
+
+#ifdef USE_MSPS
+static int create_dummy_vps(HEVCContext *s)
+{
+    int i;
+    int vps_id = 0;
+    HEVCVPS *vps;
+    AVBufferRef *vps_buf = av_buffer_allocz(sizeof(*vps));
+
+    if (!vps_buf)
+        return AVERROR(ENOMEM);
+    vps = (HEVCVPS*)vps_buf->data;
+
+    vps_id = 0;
+    vps->vps_max_layers               = 1;
+    vps->vps_max_sub_layers           = 1;
+    vps->vps_temporal_id_nesting_flag = 0;
+
+    vps->vps_sub_layer_ordering_info_present_flag = 1;
+
+    i = vps->vps_sub_layer_ordering_info_present_flag ? 0 : vps->vps_max_sub_layers - 1;
+    for (; i < vps->vps_max_sub_layers; i++) {
+        vps->vps_max_dec_pic_buffering[i] = 1;
+        vps->vps_num_reorder_pics[i]      = 0;
+        vps->vps_max_latency_increase[i]  = -1;
+    }
+
+    vps->vps_max_layer_id   = 0;
+    vps->vps_num_layer_sets = 1;
+
+    vps->vps_timing_info_present_flag = 0;
+    av_buffer_unref(&s->vps_list[vps_id]);
+    s->vps_list[vps_id] = vps_buf;
+    return 0;
+}
+#else
+int ff_hevc_decode_nal_vps(HEVCContext *s)
+{
+    int i,j;
+    GetBitContext *gb = &s->HEVClc->gb;
+    int vps_id = 0;
+    HEVCVPS *vps;
+    AVBufferRef *vps_buf = av_buffer_allocz(sizeof(*vps));
+
+    if (!vps_buf)
+        return AVERROR(ENOMEM);
+    vps = (HEVCVPS*)vps_buf->data;
+
+    av_log(s->avctx, AV_LOG_DEBUG, "Decoding VPS\n");
+
+    vps_id = get_bits(gb, 4);
+    if (vps_id >= MAX_VPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "VPS id out of range: %d\n", vps_id);
+        goto err;
+    }
+
+    if (get_bits(gb, 2) != 3) { // vps_reserved_three_2bits
+        av_log(s->avctx, AV_LOG_ERROR, "vps_reserved_three_2bits is not three\n");
+        goto err;
+    }
+
+    vps->vps_max_layers               = get_bits(gb, 6) + 1;
+    vps->vps_max_sub_layers           = get_bits(gb, 3) + 1;
+    vps->vps_temporal_id_nesting_flag = get_bits1(gb);
+
+    if (get_bits(gb, 16) != 0xffff) { // vps_reserved_ffff_16bits
+        av_log(s->avctx, AV_LOG_ERROR, "vps_reserved_ffff_16bits is not 0xffff\n");
+        goto err;
+    }
+
+    if (vps->vps_max_sub_layers > MAX_SUB_LAYERS) {
+        av_log(s->avctx, AV_LOG_ERROR, "vps_max_sub_layers out of range: %d\n",
+               vps->vps_max_sub_layers);
+        goto err;
+    }
+
+    if (parse_ptl(s, &vps->ptl, vps->vps_max_sub_layers) < 0)
+        goto err;
+
+    vps->vps_sub_layer_ordering_info_present_flag = get_bits1(gb);
+
+    i = vps->vps_sub_layer_ordering_info_present_flag ? 0 : vps->vps_max_sub_layers - 1;
+    for (; i < vps->vps_max_sub_layers; i++) {
+        vps->vps_max_dec_pic_buffering[i] = get_ue_golomb_long(gb) + 1;
+        vps->vps_num_reorder_pics[i]      = get_ue_golomb_long(gb);
+        vps->vps_max_latency_increase[i]  = get_ue_golomb_long(gb) - 1;
+
+        if (vps->vps_max_dec_pic_buffering[i] > MAX_DPB_SIZE || !vps->vps_max_dec_pic_buffering[i]) {
+            av_log(s->avctx, AV_LOG_ERROR, "vps_max_dec_pic_buffering_minus1 out of range: %d\n",
+                   vps->vps_max_dec_pic_buffering[i] - 1);
+            goto err;
+        }
+        if (vps->vps_num_reorder_pics[i] > vps->vps_max_dec_pic_buffering[i] - 1) {
+            av_log(s->avctx, AV_LOG_WARNING, "vps_max_num_reorder_pics out of range: %d\n",
+                   vps->vps_num_reorder_pics[i]);
+            if (s->avctx->err_recognition & AV_EF_EXPLODE)
+                goto err;
+        }
+    }
+
+    vps->vps_max_layer_id   = get_bits(gb, 6);
+    vps->vps_num_layer_sets = get_ue_golomb_long(gb) + 1;
+    if ((vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) {
+        av_log(s->avctx, AV_LOG_ERROR, "too many layer_id_included_flags\n");
+        goto err;
+    }
+
+    for (i = 1; i < vps->vps_num_layer_sets; i++)
+        for (j = 0; j <= vps->vps_max_layer_id; j++)
+            skip_bits(gb, 1);  // layer_id_included_flag[i][j]
+
+    vps->vps_timing_info_present_flag = get_bits1(gb);
+    if (vps->vps_timing_info_present_flag) {
+        vps->vps_num_units_in_tick               = get_bits_long(gb, 32);
+        vps->vps_time_scale                      = get_bits_long(gb, 32);
+        vps->vps_poc_proportional_to_timing_flag = get_bits1(gb);
+        if (vps->vps_poc_proportional_to_timing_flag)
+            vps->vps_num_ticks_poc_diff_one = get_ue_golomb_long(gb) + 1;
+        vps->vps_num_hrd_parameters = get_ue_golomb_long(gb);
+        for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
+            int common_inf_present = 1;
+
+            get_ue_golomb_long(gb); // hrd_layer_set_idx
+            if (i)
+                common_inf_present = get_bits1(gb);
+            decode_hrd(s, common_inf_present, vps->vps_max_sub_layers);
+        }
+    }
+    get_bits1(gb); /* vps_extension_flag */
+
+    if (get_bits_left(gb) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "Overread VPS by %d bits\n", -get_bits_left(gb));
+        goto err;
+    }
+
+    av_buffer_unref(&s->vps_list[vps_id]);
+    s->vps_list[vps_id] = vps_buf;
+    return 0;
+
+err:
+    av_buffer_unref(&vps_buf);
+    return AVERROR_INVALIDDATA;
+}
+#endif
+
+#ifndef USE_MSPS
+static void decode_vui(HEVCContext *s, HEVCSPS *sps)
+{
+    VUI *vui          = &sps->vui;
+    GetBitContext *gb = &s->HEVClc->gb;
+    GetBitContext backup;
+    int sar_present, alt = 0;
+
+    av_log(s->avctx, AV_LOG_DEBUG, "Decoding VUI\n");
+
+    sar_present = get_bits1(gb);
+    if (sar_present) {
+        uint8_t sar_idx = get_bits(gb, 8);
+        if (sar_idx < FF_ARRAY_ELEMS(vui_sar))
+            vui->sar = vui_sar[sar_idx];
+        else if (sar_idx == 255) {
+            vui->sar.num = get_bits(gb, 16);
+            vui->sar.den = get_bits(gb, 16);
+        } else
+            av_log(s->avctx, AV_LOG_WARNING,
+                   "Unknown SAR index: %u.\n", sar_idx);
+    }
+
+    vui->overscan_info_present_flag = get_bits1(gb);
+    if (vui->overscan_info_present_flag)
+        vui->overscan_appropriate_flag = get_bits1(gb);
+
+    vui->video_signal_type_present_flag = get_bits1(gb);
+    if (vui->video_signal_type_present_flag) {
+        vui->video_format                    = get_bits(gb, 3);
+        vui->video_full_range_flag           = get_bits1(gb);
+        vui->colour_description_present_flag = get_bits1(gb);
+        if (vui->video_full_range_flag && sps->pix_fmt == AV_PIX_FMT_YUV420P)
+            sps->pix_fmt = AV_PIX_FMT_YUVJ420P;
+        if (vui->colour_description_present_flag) {
+            vui->colour_primaries        = get_bits(gb, 8);
+            vui->transfer_characteristic = get_bits(gb, 8);
+            vui->matrix_coeffs           = get_bits(gb, 8);
+
+            // Set invalid values to "unspecified"
+            if (vui->colour_primaries >= AVCOL_PRI_NB)
+                vui->colour_primaries = AVCOL_PRI_UNSPECIFIED;
+            if (vui->transfer_characteristic >= AVCOL_TRC_NB)
+                vui->transfer_characteristic = AVCOL_TRC_UNSPECIFIED;
+            if (vui->matrix_coeffs >= AVCOL_SPC_NB)
+                vui->matrix_coeffs = AVCOL_SPC_UNSPECIFIED;
+        }
+    }
+
+    vui->chroma_loc_info_present_flag = get_bits1(gb);
+    if (vui->chroma_loc_info_present_flag) {
+        vui->chroma_sample_loc_type_top_field    = get_ue_golomb_long(gb);
+        vui->chroma_sample_loc_type_bottom_field = get_ue_golomb_long(gb);
+    }
+
+    vui->neutra_chroma_indication_flag = get_bits1(gb);
+    vui->field_seq_flag                = get_bits1(gb);
+    vui->frame_field_info_present_flag = get_bits1(gb);
+
+    if (get_bits_left(gb) >= 68 && show_bits_long(gb, 21) == 0x100000) {
+        vui->default_display_window_flag = 0;
+        av_log(s->avctx, AV_LOG_WARNING, "Invalid default display window\n");
+    } else
+        vui->default_display_window_flag = get_bits1(gb);
+    // Backup context in case an alternate header is detected
+    memcpy(&backup, gb, sizeof(backup));
+
+    if (vui->default_display_window_flag) {
+        //TODO: * 2 is only valid for 420
+        vui->def_disp_win.left_offset   = get_ue_golomb_long(gb) * 2;
+        vui->def_disp_win.right_offset  = get_ue_golomb_long(gb) * 2;
+        vui->def_disp_win.top_offset    = get_ue_golomb_long(gb) * 2;
+        vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * 2;
+
+        if (s->apply_defdispwin &&
+            s->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) {
+            av_log(s->avctx, AV_LOG_DEBUG,
+                   "discarding vui default display window, "
+                   "original values are l:%u r:%u t:%u b:%u\n",
+                   vui->def_disp_win.left_offset,
+                   vui->def_disp_win.right_offset,
+                   vui->def_disp_win.top_offset,
+                   vui->def_disp_win.bottom_offset);
+
+            vui->def_disp_win.left_offset   =
+            vui->def_disp_win.right_offset  =
+            vui->def_disp_win.top_offset    =
+            vui->def_disp_win.bottom_offset = 0;
+        }
+    }
+
+    vui->vui_timing_info_present_flag = get_bits1(gb);
+
+    if (vui->vui_timing_info_present_flag) {
+        if( get_bits_left(gb) < 66) {
+            // The alternate syntax seem to have timing info located
+            // at where def_disp_win is normally located
+            av_log(s->avctx, AV_LOG_WARNING,
+                   "Strange VUI timing information, retrying...\n");
+            vui->default_display_window_flag = 0;
+            memset(&vui->def_disp_win, 0, sizeof(vui->def_disp_win));
+            memcpy(gb, &backup, sizeof(backup));
+            alt = 1;
+        }
+        vui->vui_num_units_in_tick               = get_bits_long(gb, 32);
+        vui->vui_time_scale                      = get_bits_long(gb, 32);
+        if (alt) {
+            av_log(s->avctx, AV_LOG_INFO, "Retry got %i/%i fps\n",
+                   vui->vui_time_scale, vui->vui_num_units_in_tick);
+        }
+        vui->vui_poc_proportional_to_timing_flag = get_bits1(gb);
+        if (vui->vui_poc_proportional_to_timing_flag)
+            vui->vui_num_ticks_poc_diff_one_minus1 = get_ue_golomb_long(gb);
+        vui->vui_hrd_parameters_present_flag = get_bits1(gb);
+        if (vui->vui_hrd_parameters_present_flag)
+            decode_hrd(s, 1, sps->max_sub_layers);
+    }
+
+    vui->bitstream_restriction_flag = get_bits1(gb);
+    if (vui->bitstream_restriction_flag) {
+        vui->tiles_fixed_structure_flag              = get_bits1(gb);
+        vui->motion_vectors_over_pic_boundaries_flag = get_bits1(gb);
+        vui->restricted_ref_pic_lists_flag           = get_bits1(gb);
+        vui->min_spatial_segmentation_idc            = get_ue_golomb_long(gb);
+        vui->max_bytes_per_pic_denom                 = get_ue_golomb_long(gb);
+        vui->max_bits_per_min_cu_denom               = get_ue_golomb_long(gb);
+        vui->log2_max_mv_length_horizontal           = get_ue_golomb_long(gb);
+        vui->log2_max_mv_length_vertical             = get_ue_golomb_long(gb);
+    }
+}
+#endif /* !USE_MSPS */
+
+static void set_default_scaling_list_data(ScalingList *sl)
+{
+    int matrixId;
+
+    for (matrixId = 0; matrixId < 6; matrixId++) {
+        // 4x4 default is 16
+        memset(sl->sl[0][matrixId], 16, 16);
+        sl->sl_dc[0][matrixId] = 16; // default for 16x16
+        sl->sl_dc[1][matrixId] = 16; // default for 32x32
+    }
+    memcpy(sl->sl[1][0], default_scaling_list_intra, 64);
+    memcpy(sl->sl[1][1], default_scaling_list_intra, 64);
+    memcpy(sl->sl[1][2], default_scaling_list_intra, 64);
+    memcpy(sl->sl[1][3], default_scaling_list_inter, 64);
+    memcpy(sl->sl[1][4], default_scaling_list_inter, 64);
+    memcpy(sl->sl[1][5], default_scaling_list_inter, 64);
+    memcpy(sl->sl[2][0], default_scaling_list_intra, 64);
+    memcpy(sl->sl[2][1], default_scaling_list_intra, 64);
+    memcpy(sl->sl[2][2], default_scaling_list_intra, 64);
+    memcpy(sl->sl[2][3], default_scaling_list_inter, 64);
+    memcpy(sl->sl[2][4], default_scaling_list_inter, 64);
+    memcpy(sl->sl[2][5], default_scaling_list_inter, 64);
+    memcpy(sl->sl[3][0], default_scaling_list_intra, 64);
+    memcpy(sl->sl[3][1], default_scaling_list_intra, 64);
+    memcpy(sl->sl[3][2], default_scaling_list_intra, 64);
+    memcpy(sl->sl[3][3], default_scaling_list_inter, 64);
+    memcpy(sl->sl[3][4], default_scaling_list_inter, 64);
+    memcpy(sl->sl[3][5], default_scaling_list_inter, 64);
+}
+
+static int scaling_list_data(HEVCContext *s, ScalingList *sl, HEVCSPS *sps)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    uint8_t scaling_list_pred_mode_flag;
+    int32_t scaling_list_dc_coef[2][6];
+    int size_id, matrix_id, pos;
+    int i;
+
+    for (size_id = 0; size_id < 4; size_id++)
+        for (matrix_id = 0; matrix_id < 6; matrix_id += ((size_id == 3) ? 3 : 1)) {
+            scaling_list_pred_mode_flag = get_bits1(gb);
+            if (!scaling_list_pred_mode_flag) {
+                unsigned int delta = get_ue_golomb_long(gb);
+                /* Only need to handle non-zero delta. Zero means default,
+                 * which should already be in the arrays. */
+                if (delta) {
+                    // Copy from previous array.
+                    if (matrix_id < delta) {
+                        av_log(s->avctx, AV_LOG_ERROR,
+                               "Invalid delta in scaling list data: %d.\n", delta);
+                        return AVERROR_INVALIDDATA;
+                    }
+
+                    memcpy(sl->sl[size_id][matrix_id],
+                           sl->sl[size_id][matrix_id - delta],
+                           size_id > 0 ? 64 : 16);
+                    if (size_id > 1)
+                        sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta];
+                }
+            } else {
+                int next_coef, coef_num;
+                int32_t scaling_list_delta_coef;
+
+                next_coef = 8;
+                coef_num  = FFMIN(64, 1 << (4 + (size_id << 1)));
+                if (size_id > 1) {
+                    scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8;
+                    next_coef = scaling_list_dc_coef[size_id - 2][matrix_id];
+                    sl->sl_dc[size_id - 2][matrix_id] = next_coef;
+                }
+                for (i = 0; i < coef_num; i++) {
+                    if (size_id == 0)
+                        pos = 4 * ff_hevc_diag_scan4x4_y[i] +
+                                  ff_hevc_diag_scan4x4_x[i];
+                    else
+                        pos = 8 * ff_hevc_diag_scan8x8_y[i] +
+                                  ff_hevc_diag_scan8x8_x[i];
+
+                    scaling_list_delta_coef = get_se_golomb(gb);
+                    next_coef = (next_coef + scaling_list_delta_coef + 256) % 256;
+                    sl->sl[size_id][matrix_id][pos] = next_coef;
+                }
+            }
+        }
+
+    if (sps->chroma_format_idc == 3) {
+        for (i = 0; i < 64; i++) {
+            sl->sl[3][1][i] = sl->sl[2][1][i];
+            sl->sl[3][2][i] = sl->sl[2][2][i];
+            sl->sl[3][4][i] = sl->sl[2][4][i];
+            sl->sl[3][5][i] = sl->sl[2][5][i];
+        }
+        sl->sl_dc[1][1] = sl->sl_dc[0][1];
+        sl->sl_dc[1][2] = sl->sl_dc[0][2];
+        sl->sl_dc[1][4] = sl->sl_dc[0][4];
+        sl->sl_dc[1][5] = sl->sl_dc[0][5];
+    }
+
+
+    return 0;
+}
+
+int ff_hevc_decode_nal_sps(HEVCContext *s)
+{
+    const AVPixFmtDescriptor *desc;
+    GetBitContext *gb = &s->HEVClc->gb;
+    int ret = 0;
+    unsigned int sps_id = 0;
+    int log2_diff_max_min_transform_block_size;
+#ifndef USE_MSPS
+    int bit_depth_chroma, start, vui_present, sublayer_ordering_info;
+#endif
+    int i;
+
+    HEVCSPS *sps;
+    AVBufferRef *sps_buf = av_buffer_allocz(sizeof(*sps));
+
+    if (!sps_buf)
+        return AVERROR(ENOMEM);
+    sps = (HEVCSPS*)sps_buf->data;
+
+    av_log(s->avctx, AV_LOG_DEBUG, "Decoding SPS\n");
+
+    // Coded parameters
+#ifdef USE_MSPS
+    ret = create_dummy_vps(s);
+    if (ret < 0)
+        return ret;
+    sps->vps_id = 0;
+    sps->max_sub_layers = 1;
+
+    sps_id = 0;
+
+    sps->chroma_format_idc = get_bits(gb, 8);
+    if (sps->chroma_format_idc > 3) {
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    sps->separate_colour_plane_flag = 0;
+    
+    sps->width  = get_bits_long(gb, 32);
+    sps->height = get_bits_long(gb, 32);
+    if ((ret = av_image_check_size(sps->width,
+                                   sps->height, 0, s->avctx)) < 0)
+        goto err;
+    sps->bit_depth = get_bits(gb, 8) + 8;
+
+#ifdef USE_VAR_BIT_DEPTH
+    /* Note: in order to simplify the code we always use 16 bit pixfmt */
+    switch(sps->chroma_format_idc) {
+    case 0:
+        sps->pix_fmt = AV_PIX_FMT_GRAY16;
+        break;
+    case 1:
+        sps->pix_fmt = AV_PIX_FMT_YUV420P16;
+        break;
+    case 2:
+        sps->pix_fmt = AV_PIX_FMT_YUV422P16;
+        break;
+    default:
+    case 3:
+        sps->pix_fmt = AV_PIX_FMT_YUV444P16;
+        break;
+    }
+    sps->pixel_shift = 1;
+#else
+    if (sps->bit_depth != 8) {
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    switch(sps->chroma_format_idc) {
+    case 0:
+        sps->pix_fmt = AV_PIX_FMT_GRAY8;
+        break;
+    case 1:
+        sps->pix_fmt = AV_PIX_FMT_YUV420P;
+        break;
+    case 2:
+        sps->pix_fmt = AV_PIX_FMT_YUV422P;
+        break;
+    default:
+    case 3:
+        sps->pix_fmt = AV_PIX_FMT_YUV444P;
+        break;
+    }
+    sps->pixel_shift = 0;
+#endif /* !USE_VAR_BIT_DEPTH */
+
+#else
+    sps->vps_id = get_bits(gb, 4);
+    if (sps->vps_id >= MAX_VPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "VPS id out of range: %d\n", sps->vps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    if (!s->vps_list[sps->vps_id]) {
+        av_log(s->avctx, AV_LOG_ERROR, "VPS %d does not exist\n",
+               sps->vps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    sps->max_sub_layers = get_bits(gb, 3) + 1;
+    if (sps->max_sub_layers > MAX_SUB_LAYERS) {
+        av_log(s->avctx, AV_LOG_ERROR, "sps_max_sub_layers out of range: %d\n",
+               sps->max_sub_layers);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    skip_bits1(gb); // temporal_id_nesting_flag
+
+    if (parse_ptl(s, &sps->ptl, sps->max_sub_layers) < 0)
+        goto err;
+
+    sps_id = get_ue_golomb_long(gb);
+    if (sps_id >= MAX_SPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "SPS id out of range: %d\n", sps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    sps->chroma_format_idc = get_ue_golomb_long(gb);
+    if (!(sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2 || sps->chroma_format_idc == 3 || sps->chroma_format_idc == 0)) {
+        avpriv_report_missing_feature(s->avctx, "chroma_format_idc != {0, 1, 2, 3}\n");
+        ret = AVERROR_PATCHWELCOME;
+        goto err;
+    }
+
+    if (sps->chroma_format_idc == 3)
+        sps->separate_colour_plane_flag = get_bits1(gb);
+
+    if (sps->separate_colour_plane_flag) {
+        sps->chroma_format_idc = 0;
+        avpriv_report_missing_feature(s->avctx, "separate_colour_plane_flag = 1\n");
+        ret = AVERROR_PATCHWELCOME;
+        goto err;
+    }
+
+    sps->width  = get_ue_golomb_long(gb);
+    sps->height = get_ue_golomb_long(gb);
+    if ((ret = av_image_check_size(sps->width,
+                                   sps->height, 0, s->avctx)) < 0)
+        goto err;
+
+    if (get_bits1(gb)) { // pic_conformance_flag
+        int hshift, vshift;
+        switch(sps->chroma_format_idc) {
+        default:
+        case 0:
+        case 3:
+            hshift = vshift = 0;
+            break;
+        case 1:
+            hshift = vshift = 1;
+            break;
+        case 2:
+            hshift = 1;
+            vshift = 0;
+            break;
+        }
+        sps->pic_conf_win.left_offset   = get_ue_golomb_long(gb) << hshift;
+        sps->pic_conf_win.right_offset  = get_ue_golomb_long(gb) << hshift;
+        sps->pic_conf_win.top_offset    = get_ue_golomb_long(gb) << vshift;
+        sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) << vshift;
+
+        if (s->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) {
+            av_log(s->avctx, AV_LOG_DEBUG,
+                   "discarding sps conformance window, "
+                   "original values are l:%u r:%u t:%u b:%u\n",
+                   sps->pic_conf_win.left_offset,
+                   sps->pic_conf_win.right_offset,
+                   sps->pic_conf_win.top_offset,
+                   sps->pic_conf_win.bottom_offset);
+
+            sps->pic_conf_win.left_offset   =
+            sps->pic_conf_win.right_offset  =
+            sps->pic_conf_win.top_offset    =
+            sps->pic_conf_win.bottom_offset = 0;
+        }
+        sps->output_window = sps->pic_conf_win;
+    }
+
+    sps->bit_depth   = get_ue_golomb_long(gb) + 8;
+    bit_depth_chroma = get_ue_golomb_long(gb) + 8;
+    if (bit_depth_chroma != sps->bit_depth && sps->chroma_format_idc != 0) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "Luma bit depth (%d) is different from chroma bit depth (%d), "
+               "this is unsupported.\n",
+               sps->bit_depth, bit_depth_chroma);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    switch (sps->bit_depth) {
+    case 8:
+        if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P;
+        if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P;
+        if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P;
+       break;
+    case 9:
+        if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P9;
+        if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P9;
+        if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P9;
+        break;
+    case 10:
+        if (sps->chroma_format_idc == 0) {
+            if (sps->separate_colour_plane_flag)
+                goto format_not_supported;
+            sps->pix_fmt = AV_PIX_FMT_GRAY16; /* XXX: should have gray10 */
+        }
+        if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P10;
+        if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P10;
+        if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P10;
+        break;
+    case 12:
+        if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P12;
+        if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P12;
+        if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P12;
+        break;
+    default:
+    format_not_supported:
+        av_log(s->avctx, AV_LOG_ERROR,
+               "4:2:0, 4:2:2, 4:4:4 supports are currently specified for 8, 10 and 12 bits.\n");
+        ret = AVERROR_PATCHWELCOME;
+        goto err;
+    }
+    sps->pixel_shift = sps->bit_depth > 8;
+#endif /* USE_MSPS */
+
+    desc = av_pix_fmt_desc_get(sps->pix_fmt);
+    if (!desc) {
+        ret = AVERROR(EINVAL);
+        goto err;
+    }
+
+    sps->hshift[0] = sps->vshift[0] = 0;
+    sps->hshift[2] = sps->hshift[1] = desc->log2_chroma_w;
+    sps->vshift[2] = sps->vshift[1] = desc->log2_chroma_h;
+
+
+#ifdef USE_MSPS
+    sps->log2_max_poc_lsb = 8; /* not used for intra */
+    for (i = 0; i < sps->max_sub_layers; i++) {
+        sps->temporal_layer[i].max_dec_pic_buffering = 1;
+        sps->temporal_layer[i].num_reorder_pics      = 0;
+        sps->temporal_layer[i].max_latency_increase  = -1;
+    }
+    sps->log2_min_cb_size                    = get_ue_golomb_long(gb) + 3;
+    /* update the width & heigth to be a multiple of min_cb_size */
+    {
+        int m;
+        m = (1 << sps->log2_min_cb_size) - 1;
+        sps->width = (sps->width + m) & ~m;
+        sps->height = (sps->height + m) & ~m;
+    }
+#else
+    sps->log2_max_poc_lsb = get_ue_golomb_long(gb) + 4;
+    if (sps->log2_max_poc_lsb > 16) {
+        av_log(s->avctx, AV_LOG_ERROR, "log2_max_pic_order_cnt_lsb_minus4 out range: %d\n",
+               sps->log2_max_poc_lsb - 4);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    sublayer_ordering_info = get_bits1(gb);
+    start = sublayer_ordering_info ? 0 : sps->max_sub_layers - 1;
+    for (i = start; i < sps->max_sub_layers; i++) {
+        sps->temporal_layer[i].max_dec_pic_buffering = get_ue_golomb_long(gb) + 1;
+        sps->temporal_layer[i].num_reorder_pics      = get_ue_golomb_long(gb);
+        sps->temporal_layer[i].max_latency_increase  = get_ue_golomb_long(gb) - 1;
+        if (sps->temporal_layer[i].max_dec_pic_buffering > MAX_DPB_SIZE) {
+            av_log(s->avctx, AV_LOG_ERROR, "sps_max_dec_pic_buffering_minus1 out of range: %d\n",
+                   sps->temporal_layer[i].max_dec_pic_buffering - 1);
+            ret = AVERROR_INVALIDDATA;
+            goto err;
+        }
+        if (sps->temporal_layer[i].num_reorder_pics > sps->temporal_layer[i].max_dec_pic_buffering - 1) {
+            av_log(s->avctx, AV_LOG_WARNING, "sps_max_num_reorder_pics out of range: %d\n",
+                   sps->temporal_layer[i].num_reorder_pics);
+            if (s->avctx->err_recognition & AV_EF_EXPLODE ||
+                sps->temporal_layer[i].num_reorder_pics > MAX_DPB_SIZE - 1) {
+                ret = AVERROR_INVALIDDATA;
+                goto err;
+            }
+            sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[i].num_reorder_pics + 1;
+        }
+    }
+
+    if (!sublayer_ordering_info) {
+        for (i = 0; i < start; i++) {
+            sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[start].max_dec_pic_buffering;
+            sps->temporal_layer[i].num_reorder_pics      = sps->temporal_layer[start].num_reorder_pics;
+            sps->temporal_layer[i].max_latency_increase  = sps->temporal_layer[start].max_latency_increase;
+        }
+    }
+    sps->log2_min_cb_size                    = get_ue_golomb_long(gb) + 3;
+#endif
+    sps->log2_diff_max_min_coding_block_size = get_ue_golomb_long(gb);
+    sps->log2_min_tb_size                    = get_ue_golomb_long(gb) + 2;
+    log2_diff_max_min_transform_block_size   = get_ue_golomb_long(gb);
+    sps->log2_max_trafo_size                 = log2_diff_max_min_transform_block_size +
+                                               sps->log2_min_tb_size;
+    if (sps->log2_min_tb_size >= sps->log2_min_cb_size) {
+        av_log(s->avctx, AV_LOG_ERROR, "Invalid value for log2_min_tb_size");
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+#ifdef USE_MSPS
+    sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
+    sps->max_transform_hierarchy_depth_inter = 
+        sps->max_transform_hierarchy_depth_intra; /* not used for intra */
+    sps->amp_enabled_flag = 1; /* not used for intra */
+#else
+    sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
+    sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
+
+    sps->scaling_list_enable_flag = get_bits1(gb);
+    if (sps->scaling_list_enable_flag) {
+#ifdef USE_FULL
+        set_default_scaling_list_data(&sps->scaling_list);
+
+        if (get_bits1(gb)) {
+            ret = scaling_list_data(s, &sps->scaling_list, sps);
+            if (ret < 0)
+                goto err;
+        }
+#else
+        abort();
+#endif
+    }
+    sps->amp_enabled_flag = get_bits1(gb);
+#endif
+
+    sps->sao_enabled      = get_bits1(gb);
+
+    sps->pcm_enabled_flag = get_bits1(gb);
+    if (sps->pcm_enabled_flag) {
+        sps->pcm.bit_depth   = get_bits(gb, 4) + 1;
+        sps->pcm.bit_depth_chroma = get_bits(gb, 4) + 1;
+        sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3;
+        sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size +
+                                        get_ue_golomb_long(gb);
+        if (sps->pcm.bit_depth > sps->bit_depth) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "PCM bit depth (%d) is greater than normal bit depth (%d)\n",
+                   sps->pcm.bit_depth, sps->bit_depth);
+            ret = AVERROR_INVALIDDATA;
+            goto err;
+        }
+
+        sps->pcm.loop_filter_disable_flag = get_bits1(gb);
+    }
+
+#ifdef USE_MSPS
+    sps->nb_st_rps = 0; /* not used for intra */
+    sps->long_term_ref_pics_present_flag = 0; /* not used for intra */
+    sps->sps_temporal_mvp_enabled_flag = 1; /* not used for intra */
+    sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb);
+    sps->vui.sar = (AVRational){0, 1};
+#else
+    sps->nb_st_rps = get_ue_golomb_long(gb);
+    if (sps->nb_st_rps > MAX_SHORT_TERM_RPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "Too many short term RPS: %d.\n",
+               sps->nb_st_rps);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    for (i = 0; i < sps->nb_st_rps; i++) {
+        if ((ret = ff_hevc_decode_short_term_rps(s, &sps->st_rps[i],
+                                                 sps, 0)) < 0)
+            goto err;
+    }
+
+    sps->long_term_ref_pics_present_flag = get_bits1(gb);
+    if (sps->long_term_ref_pics_present_flag) {
+        sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
+        for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
+            sps->lt_ref_pic_poc_lsb_sps[i]       = get_bits(gb, sps->log2_max_poc_lsb);
+            sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
+        }
+    }
+
+    sps->sps_temporal_mvp_enabled_flag          = get_bits1(gb);
+    sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb);
+    sps->vui.sar = (AVRational){0, 1};
+    vui_present = get_bits1(gb);
+    if (vui_present)
+        decode_vui(s, sps);
+#endif
+
+    if (get_bits1(gb)) { // sps_extension_flag
+        int sps_extension_flag[1];
+        for (i = 0; i < 1; i++)
+            sps_extension_flag[i] = get_bits1(gb);
+        skip_bits(gb, 7); //sps_extension_7bits = get_bits(gb, 7);
+        if (sps_extension_flag[0]) {
+            int extended_precision_processing_flag;
+            int high_precision_offsets_enabled_flag;
+            int cabac_bypass_alignment_enabled_flag;
+
+            sps->transform_skip_rotation_enabled_flag = get_bits1(gb);
+            sps->transform_skip_context_enabled_flag  = get_bits1(gb);
+            sps->implicit_rdpcm_enabled_flag = get_bits1(gb);
+            sps->explicit_rdpcm_enabled_flag = get_bits1(gb);
+
+            extended_precision_processing_flag = get_bits1(gb);
+            if (extended_precision_processing_flag)
+                av_log(s->avctx, AV_LOG_WARNING,
+                   "extended_precision_processing_flag not yet implemented\n");
+
+            sps->intra_smoothing_disabled_flag       = get_bits1(gb);
+            high_precision_offsets_enabled_flag  = get_bits1(gb);
+            if (high_precision_offsets_enabled_flag)
+                av_log(s->avctx, AV_LOG_WARNING,
+                   "high_precision_offsets_enabled_flag not yet implemented\n");
+
+            sps->persistent_rice_adaptation_enabled_flag = get_bits1(gb);
+
+            cabac_bypass_alignment_enabled_flag  = get_bits1(gb);
+            if (cabac_bypass_alignment_enabled_flag)
+                av_log(s->avctx, AV_LOG_WARNING,
+                   "cabac_bypass_alignment_enabled_flag not yet implemented\n");
+        }
+    }
+#ifdef USE_MSPS
+    sps->output_width  = sps->width;
+    sps->output_height = sps->height;
+#else
+    if (s->apply_defdispwin) {
+        sps->output_window.left_offset   += sps->vui.def_disp_win.left_offset;
+        sps->output_window.right_offset  += sps->vui.def_disp_win.right_offset;
+        sps->output_window.top_offset    += sps->vui.def_disp_win.top_offset;
+        sps->output_window.bottom_offset += sps->vui.def_disp_win.bottom_offset;
+    }
+    if (sps->output_window.left_offset & (0x1F >> (sps->pixel_shift)) &&
+        !(s->avctx->flags & CODEC_FLAG_UNALIGNED)) {
+        sps->output_window.left_offset &= ~(0x1F >> (sps->pixel_shift));
+        av_log(s->avctx, AV_LOG_WARNING, "Reducing left output window to %d "
+               "chroma samples to preserve alignment.\n",
+               sps->output_window.left_offset);
+    }
+    sps->output_width  = sps->width -
+                         (sps->output_window.left_offset + sps->output_window.right_offset);
+    sps->output_height = sps->height -
+                         (sps->output_window.top_offset + sps->output_window.bottom_offset);
+    if (sps->output_width <= 0 || sps->output_height <= 0) {
+        av_log(s->avctx, AV_LOG_WARNING, "Invalid visible frame dimensions: %dx%d.\n",
+               sps->output_width, sps->output_height);
+        if (s->avctx->err_recognition & AV_EF_EXPLODE) {
+            ret = AVERROR_INVALIDDATA;
+            goto err;
+        }
+        av_log(s->avctx, AV_LOG_WARNING,
+               "Displaying the whole video surface.\n");
+        sps->pic_conf_win.left_offset   =
+        sps->pic_conf_win.right_offset  =
+        sps->pic_conf_win.top_offset    =
+        sps->pic_conf_win.bottom_offset = 0;
+        sps->output_width               = sps->width;
+        sps->output_height              = sps->height;
+    }
+#endif
+
+    // Inferred parameters
+    sps->log2_ctb_size = sps->log2_min_cb_size +
+                         sps->log2_diff_max_min_coding_block_size;
+    sps->log2_min_pu_size = sps->log2_min_cb_size - 1;
+
+    sps->ctb_width  = (sps->width  + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+    sps->ctb_height = (sps->height + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+    sps->ctb_size   = sps->ctb_width * sps->ctb_height;
+
+    sps->min_cb_width  = sps->width  >> sps->log2_min_cb_size;
+    sps->min_cb_height = sps->height >> sps->log2_min_cb_size;
+    sps->min_tb_width  = sps->width  >> sps->log2_min_tb_size;
+    sps->min_tb_height = sps->height >> sps->log2_min_tb_size;
+    sps->min_pu_width  = sps->width  >> sps->log2_min_pu_size;
+    sps->min_pu_height = sps->height >> sps->log2_min_pu_size;
+    sps->tb_mask       = (1 << (sps->log2_ctb_size - sps->log2_min_tb_size)) - 1;
+
+    sps->qp_bd_offset = 6 * (sps->bit_depth - 8);
+
+    if (sps->width  & ((1 << sps->log2_min_cb_size) - 1) ||
+        sps->height & ((1 << sps->log2_min_cb_size) - 1)) {
+        av_log(s->avctx, AV_LOG_ERROR, "Invalid coded frame dimensions.\n");
+        goto err;
+    }
+
+    if (sps->log2_ctb_size > MAX_LOG2_CTB_SIZE) {
+        av_log(s->avctx, AV_LOG_ERROR, "CTB size out of range: 2^%d\n", sps->log2_ctb_size);
+        goto err;
+    }
+    if (sps->max_transform_hierarchy_depth_inter > sps->log2_ctb_size - sps->log2_min_tb_size) {
+        av_log(s->avctx, AV_LOG_ERROR, "max_transform_hierarchy_depth_inter out of range: %d\n",
+               sps->max_transform_hierarchy_depth_inter);
+        goto err;
+    }
+    if (sps->max_transform_hierarchy_depth_intra > sps->log2_ctb_size - sps->log2_min_tb_size) {
+        av_log(s->avctx, AV_LOG_ERROR, "max_transform_hierarchy_depth_intra out of range: %d\n",
+               sps->max_transform_hierarchy_depth_intra);
+        goto err;
+    }
+    if (sps->log2_max_trafo_size > FFMIN(sps->log2_ctb_size, 5)) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "max transform block size out of range: %d\n",
+               sps->log2_max_trafo_size);
+        goto err;
+    }
+
+    if (get_bits_left(gb) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "Overread SPS by %d bits\n", -get_bits_left(gb));
+        goto err;
+    }
+
+    if (s->avctx->debug & FF_DEBUG_BITSTREAM) {
+        av_log(s->avctx, AV_LOG_DEBUG,
+               "Parsed SPS: id %d; coded wxh: %dx%d; "
+               "cropped wxh: %dx%d; pix_fmt: %s.\n",
+               sps_id, sps->width, sps->height,
+               sps->output_width, sps->output_height,
+#ifdef USE_FULL
+               av_get_pix_fmt_name(sps->pix_fmt)
+#else
+               "?"
+#endif
+               );
+    }
+
+    /* check if this is a repeat of an already parsed SPS, then keep the
+     * original one.
+     * otherwise drop all PPSes that depend on it */
+    if (s->sps_list[sps_id] &&
+        !memcmp(s->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) {
+        av_buffer_unref(&sps_buf);
+    } else {
+        for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) {
+            if (s->pps_list[i] && ((HEVCPPS*)s->pps_list[i]->data)->sps_id == sps_id)
+                av_buffer_unref(&s->pps_list[i]);
+        }
+        if (s->sps_list[sps_id] && s->sps == (HEVCSPS*)s->sps_list[sps_id]->data) {
+            av_buffer_unref(&s->current_sps);
+            s->current_sps = av_buffer_ref(s->sps_list[sps_id]);
+            if (!s->current_sps)
+                s->sps = NULL;
+        }
+        av_buffer_unref(&s->sps_list[sps_id]);
+        s->sps_list[sps_id] = sps_buf;
+    }
+
+    return 0;
+
+err:
+    av_buffer_unref(&sps_buf);
+    return ret;
+}
+
+static void hevc_pps_free(void *opaque, uint8_t *data)
+{
+    HEVCPPS *pps = (HEVCPPS*)data;
+
+    av_freep(&pps->column_width);
+    av_freep(&pps->row_height);
+    av_freep(&pps->col_bd);
+    av_freep(&pps->row_bd);
+    av_freep(&pps->col_idxX);
+    av_freep(&pps->ctb_addr_rs_to_ts);
+    av_freep(&pps->ctb_addr_ts_to_rs);
+    av_freep(&pps->tile_pos_rs);
+    av_freep(&pps->tile_id);
+    av_freep(&pps->min_tb_addr_zs_tab);
+
+    av_freep(&pps);
+}
+
+static int pps_range_extensions(HEVCContext *s, HEVCPPS *pps, HEVCSPS *sps) {
+    GetBitContext *gb = &s->HEVClc->gb;
+    int i;
+
+    if (pps->transform_skip_enabled_flag) {
+        pps->log2_max_transform_skip_block_size = get_ue_golomb_long(gb) + 2;
+    }
+    pps->cross_component_prediction_enabled_flag = get_bits1(gb);
+    pps->chroma_qp_offset_list_enabled_flag = get_bits1(gb);
+    if (pps->chroma_qp_offset_list_enabled_flag) {
+        pps->diff_cu_chroma_qp_offset_depth = get_ue_golomb_long(gb);
+        pps->chroma_qp_offset_list_len_minus1 = get_ue_golomb_long(gb);
+        if (pps->chroma_qp_offset_list_len_minus1 && pps->chroma_qp_offset_list_len_minus1 >= 5) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                   "chroma_qp_offset_list_len_minus1 shall be in the range [0, 5].\n");
+            return AVERROR_INVALIDDATA;
+        }
+        for (i = 0; i <= pps->chroma_qp_offset_list_len_minus1; i++) {
+            pps->cb_qp_offset_list[i] = get_se_golomb_long(gb);
+            if (pps->cb_qp_offset_list[i]) {
+                av_log(s->avctx, AV_LOG_WARNING,
+                       "cb_qp_offset_list not tested yet.\n");
+            }
+            pps->cr_qp_offset_list[i] = get_se_golomb_long(gb);
+            if (pps->cr_qp_offset_list[i]) {
+                av_log(s->avctx, AV_LOG_WARNING,
+                       "cb_qp_offset_list not tested yet.\n");
+            }
+        }
+    }
+    pps->log2_sao_offset_scale_luma = get_ue_golomb_long(gb);
+    pps->log2_sao_offset_scale_chroma = get_ue_golomb_long(gb);
+
+    return(0);
+}
+
+int ff_hevc_decode_nal_pps(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    HEVCSPS      *sps = NULL;
+    int pic_area_in_ctbs;
+    int log2_diff_ctb_min_tb_size;
+    int i, j, x, y, ctb_addr_rs, tile_id;
+    int ret = 0;
+    unsigned int pps_id = 0;
+
+    AVBufferRef *pps_buf;
+    HEVCPPS *pps = av_mallocz(sizeof(*pps));
+
+    if (!pps)
+        return AVERROR(ENOMEM);
+
+    pps_buf = av_buffer_create((uint8_t *)pps, sizeof(*pps),
+                               hevc_pps_free, NULL, 0);
+    if (!pps_buf) {
+        av_freep(&pps);
+        return AVERROR(ENOMEM);
+    }
+
+    av_log(s->avctx, AV_LOG_DEBUG, "Decoding PPS\n");
+
+    // Default values
+    pps->loop_filter_across_tiles_enabled_flag = 1;
+    pps->num_tile_columns                      = 1;
+    pps->num_tile_rows                         = 1;
+    pps->uniform_spacing_flag                  = 1;
+    pps->disable_dbf                           = 0;
+    pps->beta_offset                           = 0;
+    pps->tc_offset                             = 0;
+    pps->log2_max_transform_skip_block_size    = 2;
+
+    // Coded parameters
+    pps_id = get_ue_golomb_long(gb);
+    if (pps_id >= MAX_PPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", pps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    pps->sps_id = get_ue_golomb_long(gb);
+    if (pps->sps_id >= MAX_SPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "SPS id out of range: %d\n", pps->sps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    if (!s->sps_list[pps->sps_id]) {
+        av_log(s->avctx, AV_LOG_ERROR, "SPS %u does not exist.\n", pps->sps_id);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    sps = (HEVCSPS *)s->sps_list[pps->sps_id]->data;
+
+    pps->dependent_slice_segments_enabled_flag = get_bits1(gb);
+    pps->output_flag_present_flag              = get_bits1(gb);
+    pps->num_extra_slice_header_bits           = get_bits(gb, 3);
+
+    pps->sign_data_hiding_flag = get_bits1(gb);
+
+    pps->cabac_init_present_flag = get_bits1(gb);
+
+    pps->num_ref_idx_l0_default_active = get_ue_golomb_long(gb) + 1;
+    pps->num_ref_idx_l1_default_active = get_ue_golomb_long(gb) + 1;
+
+    pps->pic_init_qp_minus26 = get_se_golomb(gb);
+
+    pps->constrained_intra_pred_flag = get_bits1(gb);
+    pps->transform_skip_enabled_flag = get_bits1(gb);
+
+    pps->cu_qp_delta_enabled_flag = get_bits1(gb);
+    pps->diff_cu_qp_delta_depth   = 0;
+    if (pps->cu_qp_delta_enabled_flag)
+        pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
+
+    pps->cb_qp_offset = get_se_golomb(gb);
+    if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
+        av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
+               pps->cb_qp_offset);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    pps->cr_qp_offset = get_se_golomb(gb);
+    if (pps->cr_qp_offset < -12 || pps->cr_qp_offset > 12) {
+        av_log(s->avctx, AV_LOG_ERROR, "pps_cr_qp_offset out of range: %d\n",
+               pps->cr_qp_offset);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+    pps->pic_slice_level_chroma_qp_offsets_present_flag = get_bits1(gb);
+
+    pps->weighted_pred_flag   = get_bits1(gb);
+    pps->weighted_bipred_flag = get_bits1(gb);
+
+    pps->transquant_bypass_enable_flag    = get_bits1(gb);
+    pps->tiles_enabled_flag               = get_bits1(gb);
+    pps->entropy_coding_sync_enabled_flag = get_bits1(gb);
+
+    if (pps->tiles_enabled_flag) {
+        pps->num_tile_columns = get_ue_golomb_long(gb) + 1;
+        pps->num_tile_rows    = get_ue_golomb_long(gb) + 1;
+        if (pps->num_tile_columns == 0 ||
+            pps->num_tile_columns >= sps->width) {
+            av_log(s->avctx, AV_LOG_ERROR, "num_tile_columns_minus1 out of range: %d\n",
+                   pps->num_tile_columns - 1);
+            ret = AVERROR_INVALIDDATA;
+            goto err;
+        }
+        if (pps->num_tile_rows == 0 ||
+            pps->num_tile_rows >= sps->height) {
+            av_log(s->avctx, AV_LOG_ERROR, "num_tile_rows_minus1 out of range: %d\n",
+                   pps->num_tile_rows - 1);
+            ret = AVERROR_INVALIDDATA;
+            goto err;
+        }
+
+        pps->column_width = av_malloc_array(pps->num_tile_columns, sizeof(*pps->column_width));
+        pps->row_height   = av_malloc_array(pps->num_tile_rows,    sizeof(*pps->row_height));
+        if (!pps->column_width || !pps->row_height) {
+            ret = AVERROR(ENOMEM);
+            goto err;
+        }
+
+        pps->uniform_spacing_flag = get_bits1(gb);
+        if (!pps->uniform_spacing_flag) {
+            uint64_t sum = 0;
+            for (i = 0; i < pps->num_tile_columns - 1; i++) {
+                pps->column_width[i] = get_ue_golomb_long(gb) + 1;
+                sum                 += pps->column_width[i];
+            }
+            if (sum >= sps->ctb_width) {
+                av_log(s->avctx, AV_LOG_ERROR, "Invalid tile widths.\n");
+                ret = AVERROR_INVALIDDATA;
+                goto err;
+            }
+            pps->column_width[pps->num_tile_columns - 1] = sps->ctb_width - sum;
+
+            sum = 0;
+            for (i = 0; i < pps->num_tile_rows - 1; i++) {
+                pps->row_height[i] = get_ue_golomb_long(gb) + 1;
+                sum               += pps->row_height[i];
+            }
+            if (sum >= sps->ctb_height) {
+                av_log(s->avctx, AV_LOG_ERROR, "Invalid tile heights.\n");
+                ret = AVERROR_INVALIDDATA;
+                goto err;
+            }
+            pps->row_height[pps->num_tile_rows - 1] = sps->ctb_height - sum;
+        }
+        pps->loop_filter_across_tiles_enabled_flag = get_bits1(gb);
+    }
+
+    pps->seq_loop_filter_across_slices_enabled_flag = get_bits1(gb);
+
+    pps->deblocking_filter_control_present_flag = get_bits1(gb);
+    if (pps->deblocking_filter_control_present_flag) {
+        pps->deblocking_filter_override_enabled_flag = get_bits1(gb);
+        pps->disable_dbf                             = get_bits1(gb);
+        if (!pps->disable_dbf) {
+            pps->beta_offset = get_se_golomb(gb) * 2;
+            pps->tc_offset = get_se_golomb(gb) * 2;
+            if (pps->beta_offset/2 < -6 || pps->beta_offset/2 > 6) {
+                av_log(s->avctx, AV_LOG_ERROR, "pps_beta_offset_div2 out of range: %d\n",
+                       pps->beta_offset/2);
+                ret = AVERROR_INVALIDDATA;
+                goto err;
+            }
+            if (pps->tc_offset/2 < -6 || pps->tc_offset/2 > 6) {
+                av_log(s->avctx, AV_LOG_ERROR, "pps_tc_offset_div2 out of range: %d\n",
+                       pps->tc_offset/2);
+                ret = AVERROR_INVALIDDATA;
+                goto err;
+            }
+        }
+    }
+
+    pps->scaling_list_data_present_flag = get_bits1(gb);
+    if (pps->scaling_list_data_present_flag) {
+        set_default_scaling_list_data(&pps->scaling_list);
+        ret = scaling_list_data(s, &pps->scaling_list, sps);
+        if (ret < 0)
+            goto err;
+    }
+    pps->lists_modification_present_flag = get_bits1(gb);
+    pps->log2_parallel_merge_level       = get_ue_golomb_long(gb) + 2;
+    if (pps->log2_parallel_merge_level > sps->log2_ctb_size) {
+        av_log(s->avctx, AV_LOG_ERROR, "log2_parallel_merge_level_minus2 out of range: %d\n",
+               pps->log2_parallel_merge_level - 2);
+        ret = AVERROR_INVALIDDATA;
+        goto err;
+    }
+
+    pps->slice_header_extension_present_flag = get_bits1(gb);
+
+    if (get_bits1(gb)) { // pps_extension_present_flag
+        int pps_range_extensions_flag = get_bits1(gb);
+        /* int pps_extension_7bits = */ get_bits(gb, 7);
+        if (
+#ifndef USE_MSPS
+            /* XXX: check if testing the profile is correct */
+            sps->ptl.general_ptl.profile_idc == FF_PROFILE_HEVC_REXT && 
+#endif
+            pps_range_extensions_flag) {
+            pps_range_extensions(s, pps, sps);
+        }
+    }
+
+    // Inferred parameters
+    pps->col_bd   = av_malloc_array(pps->num_tile_columns + 1, sizeof(*pps->col_bd));
+    pps->row_bd   = av_malloc_array(pps->num_tile_rows + 1,    sizeof(*pps->row_bd));
+    pps->col_idxX = av_malloc_array(sps->ctb_width,    sizeof(*pps->col_idxX));
+    if (!pps->col_bd || !pps->row_bd || !pps->col_idxX) {
+        ret = AVERROR(ENOMEM);
+        goto err;
+    }
+
+    if (pps->uniform_spacing_flag) {
+        if (!pps->column_width) {
+            pps->column_width = av_malloc_array(pps->num_tile_columns, sizeof(*pps->column_width));
+            pps->row_height   = av_malloc_array(pps->num_tile_rows,    sizeof(*pps->row_height));
+        }
+        if (!pps->column_width || !pps->row_height) {
+            ret = AVERROR(ENOMEM);
+            goto err;
+        }
+
+        for (i = 0; i < pps->num_tile_columns; i++) {
+            pps->column_width[i] = ((i + 1) * sps->ctb_width) / pps->num_tile_columns -
+                                   (i * sps->ctb_width) / pps->num_tile_columns;
+        }
+
+        for (i = 0; i < pps->num_tile_rows; i++) {
+            pps->row_height[i] = ((i + 1) * sps->ctb_height) / pps->num_tile_rows -
+                                 (i * sps->ctb_height) / pps->num_tile_rows;
+        }
+    }
+
+    pps->col_bd[0] = 0;
+    for (i = 0; i < pps->num_tile_columns; i++)
+        pps->col_bd[i + 1] = pps->col_bd[i] + pps->column_width[i];
+
+    pps->row_bd[0] = 0;
+    for (i = 0; i < pps->num_tile_rows; i++)
+        pps->row_bd[i + 1] = pps->row_bd[i] + pps->row_height[i];
+
+    for (i = 0, j = 0; i < sps->ctb_width; i++) {
+        if (i > pps->col_bd[j])
+            j++;
+        pps->col_idxX[i] = j;
+    }
+
+    /**
+     * 6.5
+     */
+    pic_area_in_ctbs     = sps->ctb_width    * sps->ctb_height;
+
+    pps->ctb_addr_rs_to_ts = av_malloc_array(pic_area_in_ctbs,    sizeof(*pps->ctb_addr_rs_to_ts));
+    pps->ctb_addr_ts_to_rs = av_malloc_array(pic_area_in_ctbs,    sizeof(*pps->ctb_addr_ts_to_rs));
+    pps->tile_id           = av_malloc_array(pic_area_in_ctbs,    sizeof(*pps->tile_id));
+    pps->min_tb_addr_zs_tab = av_malloc_array((sps->tb_mask+2) * (sps->tb_mask+2), sizeof(*pps->min_tb_addr_zs_tab));
+    if (!pps->ctb_addr_rs_to_ts || !pps->ctb_addr_ts_to_rs ||
+        !pps->tile_id || !pps->min_tb_addr_zs_tab) {
+        ret = AVERROR(ENOMEM);
+        goto err;
+    }
+
+    for (ctb_addr_rs = 0; ctb_addr_rs < pic_area_in_ctbs; ctb_addr_rs++) {
+        int tb_x   = ctb_addr_rs % sps->ctb_width;
+        int tb_y   = ctb_addr_rs / sps->ctb_width;
+        int tile_x = 0;
+        int tile_y = 0;
+        int val    = 0;
+
+        for (i = 0; i < pps->num_tile_columns; i++) {
+            if (tb_x < pps->col_bd[i + 1]) {
+                tile_x = i;
+                break;
+            }
+        }
+
+        for (i = 0; i < pps->num_tile_rows; i++) {
+            if (tb_y < pps->row_bd[i + 1]) {
+                tile_y = i;
+                break;
+            }
+        }
+
+        for (i = 0; i < tile_x; i++)
+            val += pps->row_height[tile_y] * pps->column_width[i];
+        for (i = 0; i < tile_y; i++)
+            val += sps->ctb_width * pps->row_height[i];
+
+        val += (tb_y - pps->row_bd[tile_y]) * pps->column_width[tile_x] +
+               tb_x - pps->col_bd[tile_x];
+
+        pps->ctb_addr_rs_to_ts[ctb_addr_rs] = val;
+        pps->ctb_addr_ts_to_rs[val]         = ctb_addr_rs;
+    }
+
+    for (j = 0, tile_id = 0; j < pps->num_tile_rows; j++)
+        for (i = 0; i < pps->num_tile_columns; i++, tile_id++)
+            for (y = pps->row_bd[j]; y < pps->row_bd[j + 1]; y++)
+                for (x = pps->col_bd[i]; x < pps->col_bd[i + 1]; x++)
+                    pps->tile_id[pps->ctb_addr_rs_to_ts[y * sps->ctb_width + x]] = tile_id;
+
+    pps->tile_pos_rs = av_malloc_array(tile_id, sizeof(*pps->tile_pos_rs));
+    if (!pps->tile_pos_rs) {
+        ret = AVERROR(ENOMEM);
+        goto err;
+    }
+
+    for (j = 0; j < pps->num_tile_rows; j++)
+        for (i = 0; i < pps->num_tile_columns; i++)
+            pps->tile_pos_rs[j * pps->num_tile_columns + i] = pps->row_bd[j] * sps->ctb_width + pps->col_bd[i];
+
+    log2_diff_ctb_min_tb_size = sps->log2_ctb_size - sps->log2_min_tb_size;
+    pps->min_tb_addr_zs = &pps->min_tb_addr_zs_tab[1*(sps->tb_mask+2)+1];
+    for (y = 0; y < sps->tb_mask+2; y++) {
+        pps->min_tb_addr_zs_tab[y*(sps->tb_mask+2)] = -1;
+        pps->min_tb_addr_zs_tab[y]    = -1;
+    }
+    for (y = 0; y < sps->tb_mask+1; y++) {
+        for (x = 0; x < sps->tb_mask+1; x++) {
+            int tb_x        = x >> log2_diff_ctb_min_tb_size;
+            int tb_y        = y >> log2_diff_ctb_min_tb_size;
+            int ctb_addr_rs = sps->ctb_width * tb_y + tb_x;
+            int val         = pps->ctb_addr_rs_to_ts[ctb_addr_rs] <<
+                              (log2_diff_ctb_min_tb_size * 2);
+            for (i = 0; i < log2_diff_ctb_min_tb_size; i++) {
+                int m = 1 << i;
+                val += (m & x ? m * m : 0) + (m & y ? 2 * m * m : 0);
+            }
+            pps->min_tb_addr_zs[y * (sps->tb_mask+2) + x] = val;
+        }
+    }
+
+    if (get_bits_left(gb) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR,
+               "Overread PPS by %d bits\n", -get_bits_left(gb));
+        goto err;
+    }
+
+    av_buffer_unref(&s->pps_list[pps_id]);
+    s->pps_list[pps_id] = pps_buf;
+
+    return 0;
+
+err:
+    av_buffer_unref(&pps_buf);
+    return ret;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_refs.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,551 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2012 - 2013 Gildas Cocherel
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+
+#include "internal.h"
+#include "thread.h"
+#include "hevc.h"
+
+void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
+{
+    /* frame->frame can be NULL if context init failed */
+    if (!frame->frame || !frame->frame->buf[0])
+        return;
+
+    frame->flags &= ~flags;
+    if (!frame->flags) {
+        ff_thread_release_buffer(s->avctx, &frame->tf);
+
+#ifdef USE_PRED
+        av_buffer_unref(&frame->tab_mvf_buf);
+        frame->tab_mvf = NULL;
+        av_buffer_unref(&frame->rpl_buf);
+        av_buffer_unref(&frame->rpl_tab_buf);
+        frame->rpl_tab    = NULL;
+        frame->refPicList = NULL;
+#endif
+
+        frame->collocated_ref = NULL;
+    }
+}
+
+#ifdef USE_PRED
+RefPicList *ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *ref, int x0, int y0)
+{
+    int x_cb         = x0 >> s->sps->log2_ctb_size;
+    int y_cb         = y0 >> s->sps->log2_ctb_size;
+    int pic_width_cb = s->sps->ctb_width;
+    int ctb_addr_ts  = s->pps->ctb_addr_rs_to_ts[y_cb * pic_width_cb + x_cb];
+    return (RefPicList *)ref->rpl_tab[ctb_addr_ts];
+}
+#endif
+
+void ff_hevc_clear_refs(HEVCContext *s)
+{
+    int i;
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
+        ff_hevc_unref_frame(s, &s->DPB[i],
+                            HEVC_FRAME_FLAG_SHORT_REF |
+                            HEVC_FRAME_FLAG_LONG_REF);
+}
+
+void ff_hevc_flush_dpb(HEVCContext *s)
+{
+    int i;
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
+        ff_hevc_unref_frame(s, &s->DPB[i], ~0);
+}
+
+static HEVCFrame *alloc_frame(HEVCContext *s)
+{
+    int i, j, ret;
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *frame = &s->DPB[i];
+        if (frame->frame->buf[0])
+            continue;
+
+        ret = ff_thread_get_buffer(s->avctx, &frame->tf,
+                                   AV_GET_BUFFER_FLAG_REF);
+        if (ret < 0)
+            return NULL;
+
+        frame->ctb_count = s->sps->ctb_width * s->sps->ctb_height;
+#ifdef USE_PRED
+        frame->rpl_buf = av_buffer_allocz(s->nb_nals * sizeof(RefPicListTab));
+        if (!frame->rpl_buf)
+            goto fail;
+
+        frame->tab_mvf_buf = av_buffer_pool_get(s->tab_mvf_pool);
+        if (!frame->tab_mvf_buf)
+            goto fail;
+        frame->tab_mvf = (MvField *)frame->tab_mvf_buf->data;
+
+        frame->rpl_tab_buf = av_buffer_pool_get(s->rpl_tab_pool);
+        if (!frame->rpl_tab_buf)
+            goto fail;
+        frame->rpl_tab   = (RefPicListTab **)frame->rpl_tab_buf->data;
+        for (j = 0; j < frame->ctb_count; j++)
+            frame->rpl_tab[j] = (RefPicListTab *)frame->rpl_buf->data;
+#endif
+
+        frame->frame->top_field_first  = s->picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD;
+        frame->frame->interlaced_frame = (s->picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD) || (s->picture_struct == AV_PICTURE_STRUCTURE_BOTTOM_FIELD);
+        return frame;
+fail:
+        ff_hevc_unref_frame(s, frame, ~0);
+        return NULL;
+    }
+    av_log(s->avctx, AV_LOG_ERROR, "Error allocating frame, DPB full.\n");
+    return NULL;
+}
+
+int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
+{
+    HEVCFrame *ref;
+    int i;
+
+    /* check that this POC doesn't already exist */
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *frame = &s->DPB[i];
+
+        if (frame->frame->buf[0] && frame->sequence == s->seq_decode &&
+            frame->poc == poc) {
+            av_log(s->avctx, AV_LOG_ERROR, "Duplicate POC in a sequence: %d.\n",
+                   poc);
+            return AVERROR_INVALIDDATA;
+        }
+    }
+
+    ref = alloc_frame(s);
+    if (!ref)
+        return AVERROR(ENOMEM);
+
+    *frame = ref->frame;
+    s->ref = ref;
+
+    if (s->sh.pic_output_flag)
+        ref->flags = HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_SHORT_REF;
+    else
+        ref->flags = HEVC_FRAME_FLAG_SHORT_REF;
+
+    ref->poc      = poc;
+    ref->sequence = s->seq_decode;
+    ref->window   = s->sps->output_window;
+
+    return 0;
+}
+
+int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
+{
+    do {
+        int nb_output = 0;
+        int min_poc   = INT_MAX;
+        int i, min_idx, ret;
+
+        if (s->sh.no_output_of_prior_pics_flag == 1) {
+            for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+                HEVCFrame *frame = &s->DPB[i];
+                if (!(frame->flags & HEVC_FRAME_FLAG_BUMPING) && frame->poc != s->poc &&
+                        frame->sequence == s->seq_output) {
+                    ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT);
+                }
+            }
+        }
+
+        for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+            HEVCFrame *frame = &s->DPB[i];
+            if ((frame->flags & HEVC_FRAME_FLAG_OUTPUT) &&
+                frame->sequence == s->seq_output) {
+                nb_output++;
+                if (frame->poc < min_poc) {
+                    min_poc = frame->poc;
+                    min_idx = i;
+                }
+            }
+        }
+
+        /* wait for more frames before output */
+        if (!flush && s->seq_output == s->seq_decode && s->sps &&
+            nb_output <= s->sps->temporal_layer[s->sps->max_sub_layers - 1].num_reorder_pics)
+            return 0;
+
+        if (nb_output) {
+            HEVCFrame *frame = &s->DPB[min_idx];
+            AVFrame *src = frame->frame;
+
+            ret = av_frame_ref(out, src);
+            if (frame->flags & HEVC_FRAME_FLAG_BUMPING)
+                ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_BUMPING);
+            else
+                ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT);
+            if (ret < 0)
+                return ret;
+
+#ifndef USE_MSPS
+            {
+                AVFrame *dst = out;
+                const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
+                int pixel_shift = !!(desc->comp[0].depth_minus1 > 7);
+                for (i = 0; i < 3; i++) {
+                    int hshift = (i > 0) ? desc->log2_chroma_w : 0;
+                    int vshift = (i > 0) ? desc->log2_chroma_h : 0;
+                    int off = ((frame->window.left_offset >> hshift) << pixel_shift) +
+                        (frame->window.top_offset   >> vshift) * dst->linesize[i];
+                    dst->data[i] += off;
+                }
+                av_log(s->avctx, AV_LOG_DEBUG,
+                       "Output frame with POC %d.\n", frame->poc);
+            }
+#endif
+            return 1;
+        }
+
+        if (s->seq_output != s->seq_decode)
+            s->seq_output = (s->seq_output + 1) & 0xff;
+        else
+            break;
+    } while (1);
+
+    return 0;
+}
+
+#ifdef USE_PRED
+void ff_hevc_bump_frame(HEVCContext *s)
+{
+    int dpb = 0;
+    int min_poc = INT_MAX;
+    int i;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *frame = &s->DPB[i];
+        if ((frame->flags) &&
+            frame->sequence == s->seq_output &&
+            frame->poc != s->poc) {
+            dpb++;
+        }
+    }
+
+    if (s->sps && dpb >= s->sps->temporal_layer[s->sps->max_sub_layers - 1].max_dec_pic_buffering) {
+        for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+            HEVCFrame *frame = &s->DPB[i];
+            if ((frame->flags) &&
+                frame->sequence == s->seq_output &&
+                frame->poc != s->poc) {
+                if (frame->flags == HEVC_FRAME_FLAG_OUTPUT && frame->poc < min_poc) {
+                    min_poc = frame->poc;
+                }
+            }
+        }
+
+        for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+            HEVCFrame *frame = &s->DPB[i];
+            if (frame->flags & HEVC_FRAME_FLAG_OUTPUT &&
+                frame->sequence == s->seq_output &&
+                frame->poc <= min_poc) {
+                frame->flags |= HEVC_FRAME_FLAG_BUMPING;
+            }
+        }
+
+        dpb--;
+    }
+}
+
+static int init_slice_rpl(HEVCContext *s)
+{
+    HEVCFrame *frame = s->ref;
+    int ctb_count    = frame->ctb_count;
+    int ctb_addr_ts  = s->pps->ctb_addr_rs_to_ts[s->sh.slice_segment_addr];
+    int i;
+
+    if (s->slice_idx >= frame->rpl_buf->size / sizeof(RefPicListTab))
+        return AVERROR_INVALIDDATA;
+
+    for (i = ctb_addr_ts; i < ctb_count; i++)
+        frame->rpl_tab[i] = (RefPicListTab *)frame->rpl_buf->data + s->slice_idx;
+
+    frame->refPicList = (RefPicList *)frame->rpl_tab[ctb_addr_ts];
+
+    return 0;
+}
+
+int ff_hevc_slice_rpl(HEVCContext *s)
+{
+    SliceHeader *sh = &s->sh;
+
+    uint8_t nb_list = sh->slice_type == B_SLICE ? 2 : 1;
+    uint8_t list_idx;
+    int i, j, ret;
+
+    ret = init_slice_rpl(s);
+    if (ret < 0)
+        return ret;
+
+    if (!(s->rps[ST_CURR_BEF].nb_refs + s->rps[ST_CURR_AFT].nb_refs +
+          s->rps[LT_CURR].nb_refs)) {
+        av_log(s->avctx, AV_LOG_ERROR, "Zero refs in the frame RPS.\n");
+        return AVERROR_INVALIDDATA;
+    }
+
+    for (list_idx = 0; list_idx < nb_list; list_idx++) {
+        RefPicList  rpl_tmp = { { 0 } };
+        RefPicList *rpl     = &s->ref->refPicList[list_idx];
+
+        /* The order of the elements is
+         * ST_CURR_BEF - ST_CURR_AFT - LT_CURR for the L0 and
+         * ST_CURR_AFT - ST_CURR_BEF - LT_CURR for the L1 */
+        int cand_lists[3] = { list_idx ? ST_CURR_AFT : ST_CURR_BEF,
+                              list_idx ? ST_CURR_BEF : ST_CURR_AFT,
+                              LT_CURR };
+
+        /* concatenate the candidate lists for the current frame */
+        while (rpl_tmp.nb_refs < sh->nb_refs[list_idx]) {
+            for (i = 0; i < FF_ARRAY_ELEMS(cand_lists); i++) {
+                RefPicList *rps = &s->rps[cand_lists[i]];
+                for (j = 0; j < rps->nb_refs && rpl_tmp.nb_refs < MAX_REFS; j++) {
+                    rpl_tmp.list[rpl_tmp.nb_refs]       = rps->list[j];
+                    rpl_tmp.ref[rpl_tmp.nb_refs]        = rps->ref[j];
+                    rpl_tmp.isLongTerm[rpl_tmp.nb_refs] = i == 2;
+                    rpl_tmp.nb_refs++;
+                }
+            }
+        }
+
+        /* reorder the references if necessary */
+        if (sh->rpl_modification_flag[list_idx]) {
+            for (i = 0; i < sh->nb_refs[list_idx]; i++) {
+                int idx = sh->list_entry_lx[list_idx][i];
+
+                if (idx >= rpl_tmp.nb_refs) {
+                    av_log(s->avctx, AV_LOG_ERROR, "Invalid reference index.\n");
+                    return AVERROR_INVALIDDATA;
+                }
+
+                rpl->list[i]       = rpl_tmp.list[idx];
+                rpl->ref[i]        = rpl_tmp.ref[idx];
+                rpl->isLongTerm[i] = rpl_tmp.isLongTerm[idx];
+                rpl->nb_refs++;
+            }
+        } else {
+            memcpy(rpl, &rpl_tmp, sizeof(*rpl));
+            rpl->nb_refs = FFMIN(rpl->nb_refs, sh->nb_refs[list_idx]);
+        }
+
+        if (sh->collocated_list == list_idx &&
+            sh->collocated_ref_idx < rpl->nb_refs)
+            s->ref->collocated_ref = rpl->ref[sh->collocated_ref_idx];
+    }
+
+    return 0;
+}
+
+static HEVCFrame *find_ref_idx(HEVCContext *s, int poc)
+{
+    int i;
+    int LtMask = (1 << s->sps->log2_max_poc_lsb) - 1;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *ref = &s->DPB[i];
+        if (ref->frame->buf[0] && (ref->sequence == s->seq_decode)) {
+            if ((ref->poc & LtMask) == poc)
+                return ref;
+        }
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *ref = &s->DPB[i];
+        if (ref->frame->buf[0] && ref->sequence == s->seq_decode) {
+            if (ref->poc == poc || (ref->poc & LtMask) == poc)
+                return ref;
+        }
+    }
+
+    av_log(s->avctx, AV_LOG_ERROR,
+           "Could not find ref with POC %d\n", poc);
+    return NULL;
+}
+
+static void mark_ref(HEVCFrame *frame, int flag)
+{
+    frame->flags &= ~(HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF);
+    frame->flags |= flag;
+}
+
+static HEVCFrame *generate_missing_ref(HEVCContext *s, int poc)
+{
+    HEVCFrame *frame;
+    int i, x, y;
+
+    frame = alloc_frame(s);
+    if (!frame)
+        return NULL;
+
+    if (!s->sps->pixel_shift) {
+        for (i = 0; frame->frame->buf[i]; i++)
+            memset(frame->frame->buf[i]->data, 1 << (s->sps->bit_depth - 1),
+                   frame->frame->buf[i]->size);
+    } else {
+        for (i = 0; frame->frame->data[i]; i++)
+            for (y = 0; y < (s->sps->height >> s->sps->vshift[i]); y++)
+                for (x = 0; x < (s->sps->width >> s->sps->hshift[i]); x++) {
+                    AV_WN16(frame->frame->data[i] + y * frame->frame->linesize[i] + 2 * x,
+                            1 << (s->sps->bit_depth - 1));
+                }
+    }
+
+    frame->poc      = poc;
+    frame->sequence = s->seq_decode;
+    frame->flags    = 0;
+
+    if (s->threads_type == FF_THREAD_FRAME)
+        ff_thread_report_progress(&frame->tf, INT_MAX, 0);
+
+    return frame;
+}
+
+/* add a reference with the given poc to the list and mark it as used in DPB */
+static int add_candidate_ref(HEVCContext *s, RefPicList *list,
+                             int poc, int ref_flag)
+{
+    HEVCFrame *ref = find_ref_idx(s, poc);
+
+    if (ref == s->ref)
+        return AVERROR_INVALIDDATA;
+
+    if (!ref) {
+        ref = generate_missing_ref(s, poc);
+        if (!ref)
+            return AVERROR(ENOMEM);
+    }
+
+    list->list[list->nb_refs] = ref->poc;
+    list->ref[list->nb_refs]  = ref;
+    list->nb_refs++;
+
+    mark_ref(ref, ref_flag);
+    return 0;
+}
+
+int ff_hevc_frame_rps(HEVCContext *s)
+{
+    const ShortTermRPS *short_rps = s->sh.short_term_rps;
+    const LongTermRPS  *long_rps  = &s->sh.long_term_rps;
+    RefPicList               *rps = s->rps;
+    int i, ret;
+
+    if (!short_rps) {
+        rps[0].nb_refs = rps[1].nb_refs = 0;
+        return 0;
+    }
+
+    /* clear the reference flags on all frames except the current one */
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
+        HEVCFrame *frame = &s->DPB[i];
+
+        if (frame == s->ref)
+            continue;
+
+        mark_ref(frame, 0);
+    }
+
+    for (i = 0; i < NB_RPS_TYPE; i++)
+        rps[i].nb_refs = 0;
+
+    /* add the short refs */
+    for (i = 0; i < short_rps->num_delta_pocs; i++) {
+        int poc = s->poc + short_rps->delta_poc[i];
+        int list;
+
+        if (!short_rps->used[i])
+            list = ST_FOLL;
+        else if (i < short_rps->num_negative_pics)
+            list = ST_CURR_BEF;
+        else
+            list = ST_CURR_AFT;
+
+        ret = add_candidate_ref(s, &rps[list], poc, HEVC_FRAME_FLAG_SHORT_REF);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* add the long refs */
+    for (i = 0; i < long_rps->nb_refs; i++) {
+        int poc  = long_rps->poc[i];
+        int list = long_rps->used[i] ? LT_CURR : LT_FOLL;
+
+        ret = add_candidate_ref(s, &rps[list], poc, HEVC_FRAME_FLAG_LONG_REF);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* release any frames that are now unused */
+    for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
+        ff_hevc_unref_frame(s, &s->DPB[i], 0);
+
+    return 0;
+}
+
+int ff_hevc_compute_poc(HEVCContext *s, int poc_lsb)
+{
+    int max_poc_lsb  = 1 << s->sps->log2_max_poc_lsb;
+    int prev_poc_lsb = s->pocTid0 % max_poc_lsb;
+    int prev_poc_msb = s->pocTid0 - prev_poc_lsb;
+    int poc_msb;
+
+    if (poc_lsb < prev_poc_lsb && prev_poc_lsb - poc_lsb >= max_poc_lsb / 2)
+        poc_msb = prev_poc_msb + max_poc_lsb;
+    else if (poc_lsb > prev_poc_lsb && poc_lsb - prev_poc_lsb > max_poc_lsb / 2)
+        poc_msb = prev_poc_msb - max_poc_lsb;
+    else
+        poc_msb = prev_poc_msb;
+
+    // For BLA picture types, POCmsb is set to 0.
+    if (s->nal_unit_type == NAL_BLA_W_LP   ||
+        s->nal_unit_type == NAL_BLA_W_RADL ||
+        s->nal_unit_type == NAL_BLA_N_LP)
+        poc_msb = 0;
+
+    return poc_msb + poc_lsb;
+}
+
+int ff_hevc_frame_nb_refs(HEVCContext *s)
+{
+    int ret = 0;
+    int i;
+    const ShortTermRPS *rps = s->sh.short_term_rps;
+    LongTermRPS *long_rps   = &s->sh.long_term_rps;
+
+    if (rps) {
+        for (i = 0; i < rps->num_negative_pics; i++)
+            ret += !!rps->used[i];
+        for (; i < rps->num_delta_pocs; i++)
+            ret += !!rps->used[i];
+    }
+
+    if (long_rps) {
+        for (i = 0; i < long_rps->nb_refs; i++)
+            ret += !!long_rps->used[i];
+    }
+    return ret;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevc_sei.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,217 @@
+/*
+ * HEVC Supplementary Enhancement Information messages
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2012 - 2013 Gildas Cocherel
+ * Copyright (C) 2013 Vittorio Giovara
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "golomb.h"
+#include "hevc.h"
+
+static void decode_nal_sei_decoded_picture_hash(HEVCContext *s)
+{
+    int cIdx, i;
+    uint8_t hash_type;
+    //uint16_t picture_crc;
+    //uint32_t picture_checksum;
+    GetBitContext *gb = &s->HEVClc->gb;
+    hash_type = get_bits(gb, 8);
+
+    for (cIdx = 0; cIdx < 3/*((s->sps->chroma_format_idc == 0) ? 1 : 3)*/; cIdx++) {
+        if (hash_type == 0) {
+            s->is_md5 = 1;
+            for (i = 0; i < 16; i++)
+                s->md5[cIdx][i] = get_bits(gb, 8);
+        } else if (hash_type == 1) {
+            // picture_crc = get_bits(gb, 16);
+            skip_bits(gb, 16);
+        } else if (hash_type == 2) {
+            // picture_checksum = get_bits_long(gb, 32);
+            skip_bits(gb, 32);
+        }
+    }
+}
+
+#ifdef USE_FULL
+static void decode_nal_sei_frame_packing_arrangement(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+
+    get_ue_golomb(gb);                  // frame_packing_arrangement_id
+    s->sei_frame_packing_present = !get_bits1(gb);
+
+    if (s->sei_frame_packing_present) {
+        s->frame_packing_arrangement_type = get_bits(gb, 7);
+        s->quincunx_subsampling           = get_bits1(gb);
+        s->content_interpretation_type    = get_bits(gb, 6);
+
+        // the following skips spatial_flipping_flag frame0_flipped_flag
+        // field_views_flag current_frame_is_frame0_flag
+        // frame0_self_contained_flag frame1_self_contained_flag
+        skip_bits(gb, 6);
+
+        if (!s->quincunx_subsampling && s->frame_packing_arrangement_type != 5)
+            skip_bits(gb, 16);  // frame[01]_grid_position_[xy]
+        skip_bits(gb, 8);       // frame_packing_arrangement_reserved_byte
+        skip_bits1(gb);         // frame_packing_arrangement_persistance_flag
+    }
+    skip_bits1(gb);             // upsampled_aspect_ratio_flag
+}
+
+static void decode_nal_sei_display_orientation(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+
+    s->sei_display_orientation_present = !get_bits1(gb);
+
+    if (s->sei_display_orientation_present) {
+        s->sei_hflip = get_bits1(gb);     // hor_flip
+        s->sei_vflip = get_bits1(gb);     // ver_flip
+
+        s->sei_anticlockwise_rotation = get_bits(gb, 16);
+        skip_bits1(gb);     // display_orientation_persistence_flag
+    }
+}
+
+static int decode_pic_timing(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    HEVCSPS *sps;
+
+    if (!s->sps_list[s->active_seq_parameter_set_id])
+        return(AVERROR(ENOMEM));
+    sps = (HEVCSPS*)s->sps_list[s->active_seq_parameter_set_id]->data;
+
+    if (sps->vui.frame_field_info_present_flag) {
+        int pic_struct = get_bits(gb, 4);
+        s->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
+        if (pic_struct == 2) {
+            av_log(s->avctx, AV_LOG_DEBUG, "BOTTOM Field\n");
+            s->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
+        } else if (pic_struct == 1) {
+            av_log(s->avctx, AV_LOG_DEBUG, "TOP Field\n");
+            s->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
+        }
+        get_bits(gb, 2);                   // source_scan_type
+        get_bits(gb, 1);                   // duplicate_flag
+    }
+    return 1;
+}
+
+static int active_parameter_sets(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+    int num_sps_ids_minus1;
+    int i;
+    unsigned active_seq_parameter_set_id;
+
+    get_bits(gb, 4); // active_video_parameter_set_id
+    get_bits(gb, 1); // self_contained_cvs_flag
+    get_bits(gb, 1); // num_sps_ids_minus1
+    num_sps_ids_minus1 = get_ue_golomb_long(gb); // num_sps_ids_minus1
+
+    active_seq_parameter_set_id = get_ue_golomb_long(gb);
+    if (active_seq_parameter_set_id >= MAX_SPS_COUNT) {
+        av_log(s->avctx, AV_LOG_ERROR, "active_parameter_set_id %d invalid\n", active_seq_parameter_set_id);
+        return AVERROR_INVALIDDATA;
+    }
+    s->active_seq_parameter_set_id = active_seq_parameter_set_id;
+
+    for (i = 1; i <= num_sps_ids_minus1; i++)
+        get_ue_golomb_long(gb); // active_seq_parameter_set_id[i]
+
+    return 0;
+}
+#endif
+
+static int decode_nal_sei_message(HEVCContext *s)
+{
+    GetBitContext *gb = &s->HEVClc->gb;
+
+    int payload_type = 0;
+    int payload_size = 0;
+    int byte = 0xFF;
+    av_log(s->avctx, AV_LOG_DEBUG, "Decoding SEI\n");
+
+    while (byte == 0xFF) {
+        byte          = get_bits(gb, 8);
+        payload_type += byte;
+    }
+    byte = 0xFF;
+    while (byte == 0xFF) {
+        byte          = get_bits(gb, 8);
+        payload_size += byte;
+    }
+    if (s->nal_unit_type == NAL_SEI_PREFIX) {
+        if (payload_type == 256 /*&& s->decode_checksum_sei*/) {
+            decode_nal_sei_decoded_picture_hash(s);
+        } else 
+#ifdef USE_FULL
+        if (payload_type == 45) {
+            decode_nal_sei_frame_packing_arrangement(s);
+        } else if (payload_type == 47) {
+            decode_nal_sei_display_orientation(s);
+        } else if (payload_type == 1){
+            int ret = decode_pic_timing(s);
+            av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", payload_type);
+            skip_bits(gb, 8 * payload_size);
+            return ret;
+        } else if (payload_type == 129){
+            active_parameter_sets(s);
+            av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", payload_type);
+        } else 
+#endif
+#ifdef USE_FRAME_DURATION_SEI
+        if (payload_type == 257) {
+            /* frame duration in multiples of the frame rate period */
+            s->frame_duration = get_bits(gb, 16);
+        } else
+#endif
+        {
+            av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", payload_type);
+            skip_bits(gb, 8*payload_size);
+        }
+    } else { /* nal_unit_type == NAL_SEI_SUFFIX */
+        if (payload_type == 132 /* && s->decode_checksum_sei */)
+            decode_nal_sei_decoded_picture_hash(s);
+        else {
+            av_log(s->avctx, AV_LOG_DEBUG, "Skipped SUFFIX SEI %d\n", payload_type);
+            skip_bits(gb, 8 * payload_size);
+        }
+    }
+    return 1;
+}
+
+static int more_rbsp_data(GetBitContext *gb)
+{
+    return get_bits_left(gb) > 0 && show_bits(gb, 8) != 0x80;
+}
+
+int ff_hevc_decode_nal_sei(HEVCContext *s)
+{
+    int ret;
+
+    do {
+        ret = decode_nal_sei_message(s);
+        if (ret < 0)
+            return(AVERROR(ENOMEM));
+    } while (more_rbsp_data(&s->HEVClc->gb));
+    return 1;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcdsp.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,325 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2013 - 2014 Pierre-Edouard Lepere
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "hevcdsp.h"
+
+#ifdef CONFIG_SMALL
+static int8_t transform[32][32];
+
+static const int8_t dct_coefs[32] = {
+    64, 90, 90, 90, 89, 88, 87, 85, 83, 82, 80, 78, 75, 73, 70, 67, 64, 61, 57, 54, 50, 46, 43, 38, 36, 31, 25, 22, 18, 13, 9, 4
+};
+
+void hevc_transform_init(void)
+{
+    int i, j, k, s;
+
+    if (transform[0][0])
+        return;
+    for(i = 0; i < 32; i++) {
+        for(j = 0; j < 32; j++) {
+            k = (unsigned)((2 * j + 1) * i) % 128;
+            s = 1;
+            if (k >= 64) {
+                k -= 64;
+                s = -1;
+            }
+            if (k >= 32) {
+                k = 64 - k;
+                s = -s;
+            }
+            transform[i][j] = dct_coefs[k] * s;
+        }
+    }
+}
+
+#else
+static const int8_t transform[32][32] = {
+    { 64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,
+      64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64,  64 },
+    { 90,  90,  88,  85,  82,  78,  73,  67,  61,  54,  46,  38,  31,  22,  13,   4,
+      -4, -13, -22, -31, -38, -46, -54, -61, -67, -73, -78, -82, -85, -88, -90, -90 },
+    { 90,  87,  80,  70,  57,  43,  25,   9,  -9, -25, -43, -57, -70, -80, -87, -90,
+     -90, -87, -80, -70, -57, -43, -25,  -9,   9,  25,  43,  57,  70,  80,  87,  90 },
+    { 90,  82,  67,  46,  22,  -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13,
+      13,  38,  61,  78,  88,  90,  85,  73,  54,  31,   4, -22, -46, -67, -82, -90 },
+    { 89,  75,  50,  18, -18, -50, -75, -89, -89, -75, -50, -18,  18,  50,  75,  89,
+      89,  75,  50,  18, -18, -50, -75, -89, -89, -75, -50, -18,  18,  50,  75,  89 },
+    { 88,  67,  31, -13, -54, -82, -90, -78, -46, -4,   38,  73,  90,  85,  61,  22,
+     -22, -61, -85, -90, -73, -38,   4,  46,  78,  90,  82,  54,  13, -31, -67, -88 },
+    { 87,  57,   9, -43, -80, -90, -70, -25,  25,  70,  90,  80,  43,  -9, -57, -87,
+     -87, -57,  -9,  43,  80,  90,  70,  25, -25, -70, -90, -80, -43,   9,  57,  87 },
+    { 85,  46, -13, -67, -90, -73, -22,  38,  82,  88,  54,  -4, -61, -90, -78, -31,
+      31,  78,  90,  61,   4, -54, -88, -82, -38,  22,  73,  90,  67,  13, -46, -85 },
+    { 83,  36, -36, -83, -83, -36,  36,  83,  83,  36, -36, -83, -83, -36,  36,  83,
+      83,  36, -36, -83, -83, -36,  36,  83,  83,  36, -36, -83, -83, -36,  36,  83 },
+    { 82,  22, -54, -90, -61,  13,  78,  85,  31, -46, -90, -67,   4,  73,  88,  38,
+     -38, -88, -73,  -4,  67,  90,  46, -31, -85, -78, -13,  61,  90,  54, -22, -82 },
+    { 80,   9, -70, -87, -25,  57,  90,  43, -43, -90, -57,  25,  87,  70,  -9, -80,
+     -80,  -9,  70,  87,  25, -57, -90, -43,  43,  90,  57, -25, -87, -70,   9,  80 },
+    { 78,  -4, -82, -73,  13,  85,  67, -22, -88, -61,  31,  90,  54, -38, -90, -46,
+      46,  90,  38, -54, -90, -31,  61,  88,  22, -67, -85, -13,  73,  82,   4, -78 },
+    { 75, -18, -89, -50,  50,  89,  18, -75, -75,  18,  89,  50, -50, -89, -18,  75,
+      75, -18, -89, -50,  50,  89,  18, -75, -75,  18,  89,  50, -50, -89, -18,  75 },
+    { 73, -31, -90, -22,  78,  67, -38, -90, -13,  82,  61, -46, -88,  -4,  85,  54,
+     -54, -85,   4,  88,  46, -61, -82,  13,  90,  38, -67, -78,  22,  90,  31, -73 },
+    { 70, -43, -87,   9,  90,  25, -80, -57,  57,  80, -25, -90,  -9,  87,  43, -70,
+     -70,  43,  87,  -9, -90, -25,  80,  57, -57, -80,  25,  90,   9, -87, -43,  70 },
+    { 67, -54, -78,  38,  85, -22, -90,   4,  90,  13, -88, -31,  82,  46, -73, -61,
+      61,  73, -46, -82,  31,  88, -13, -90,  -4,  90,  22, -85, -38,  78,  54, -67 },
+    { 64, -64, -64,  64,  64, -64, -64,  64,  64, -64, -64,  64,  64, -64, -64,  64,
+      64, -64, -64,  64,  64, -64, -64,  64,  64, -64, -64,  64,  64, -64, -64,  64 },
+    { 61, -73, -46,  82,  31, -88, -13,  90,  -4, -90,  22,  85, -38, -78,  54,  67,
+     -67, -54,  78,  38, -85, -22,  90,   4, -90,  13,  88, -31, -82,  46,  73, -61 },
+    { 57, -80, -25,  90,  -9, -87,  43,  70, -70, -43,  87,   9, -90,  25,  80, -57,
+     -57,  80,  25, -90,   9,  87, -43, -70,  70,  43, -87,  -9,  90, -25, -80,  57 },
+    { 54, -85,  -4,  88, -46, -61,  82,  13, -90,  38,  67, -78, -22,  90, -31, -73,
+      73,  31, -90,  22,  78, -67, -38,  90, -13, -82,  61,  46, -88,   4,  85, -54 },
+    { 50, -89,  18,  75, -75, -18,  89, -50, -50,  89, -18, -75,  75,  18, -89,  50,
+      50, -89,  18,  75, -75, -18,  89, -50, -50,  89, -18, -75,  75,  18, -89,  50 },
+    { 46, -90,  38,  54, -90,  31,  61, -88,  22,  67, -85,  13,  73, -82,   4,  78,
+     -78,  -4,  82, -73, -13,  85, -67, -22,  88, -61, -31,  90, -54, -38,  90, -46 },
+    { 43, -90,  57,  25, -87,  70,   9, -80,  80,  -9, -70,  87, -25, -57,  90, -43,
+     -43,  90, -57, -25,  87, -70,  -9,  80, -80,   9,  70, -87,  25,  57, -90,  43 },
+    { 38, -88,  73,  -4, -67,  90, -46, -31,  85, -78,  13,  61, -90,  54,  22, -82,
+      82, -22, -54,  90, -61, -13,  78, -85,  31,  46, -90,  67,   4, -73,  88, -38 },
+    { 36, -83,  83, -36, -36,  83, -83,  36,  36, -83,  83, -36, -36,  83, -83,  36,
+      36, -83,  83, -36, -36,  83, -83,  36,  36, -83,  83, -36, -36,  83, -83,  36 },
+    { 31, -78,  90, -61,   4,  54, -88,  82, -38, -22,  73, -90,  67, -13, -46,  85,
+     -85,  46,  13, -67,  90, -73,  22,  38, -82,  88, -54,  -4,  61, -90,  78, -31 },
+    { 25, -70,  90, -80,  43,   9, -57,  87, -87,  57,  -9, -43,  80, -90,  70, -25,
+     -25,  70, -90,  80, -43,  -9,  57, -87,  87, -57,   9,  43, -80,  90, -70,  25 },
+    { 22, -61,  85, -90,  73, -38,  -4,  46, -78,  90, -82,  54, -13, -31,  67, -88,
+      88, -67,  31,  13, -54,  82, -90,  78, -46,   4,  38, -73,  90, -85,  61, -22 },
+    { 18, -50,  75, -89,  89, -75,  50, -18, -18,  50, -75,  89, -89,  75, -50,  18,
+      18, -50,  75, -89,  89, -75,  50, -18, -18,  50, -75,  89, -89,  75, -50,  18 },
+    { 13, -38,  61, -78,  88, -90,  85, -73,  54, -31,   4,  22, -46,  67, -82,  90,
+     -90,  82, -67,  46, -22,  -4,  31, -54,  73, -85,  90, -88,  78, -61,  38, -13 },
+    {  9, -25,  43, -57,  70, -80,  87, -90,  90, -87,  80, -70,  57, -43,  25, -9,
+      -9,  25, -43,  57, -70,  80, -87,  90, -90,  87, -80,  70, -57,  43, -25,   9 },
+    {  4, -13,  22, -31,  38, -46,  54, -61,  67, -73,  78, -82,  85, -88,  90, -90,
+      90, -90,  88, -85,  82, -78,  73, -67,  61, -54,  46, -38,  31, -22,  13,  -4 },
+};
+#endif
+
+DECLARE_ALIGNED(16, const int8_t, ff_hevc_epel_filters[7][4]) = {
+    { -2, 58, 10, -2},
+    { -4, 54, 16, -2},
+    { -6, 46, 28, -4},
+    { -4, 36, 36, -4},
+    { -4, 28, 46, -6},
+    { -2, 16, 54, -4},
+    { -2, 10, 58, -2},
+};
+
+DECLARE_ALIGNED(16, const int8_t, ff_hevc_qpel_filters[3][16]) = {
+    { -1,  4,-10, 58, 17, -5,  1,  0, -1,  4,-10, 58, 17, -5,  1,  0},
+    { -1,  4,-11, 40, 40,-11,  4, -1, -1,  4,-11, 40, 40,-11,  4, -1},
+    {  0,  1, -5, 17, 58,-10,  4, -1,  0,  1, -5, 17, 58,-10,  4, -1}
+};
+
+#if defined(USE_VAR_BIT_DEPTH)
+
+#define BIT_DEPTH bit_depth
+#include "hevcdsp_template.c"
+#undef BIT_DEPTH
+
+#else
+
+#define BIT_DEPTH 8
+#include "hevcdsp_template.c"
+#undef BIT_DEPTH
+
+#ifdef USE_FULL
+#define BIT_DEPTH 9
+#include "hevcdsp_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "hevcdsp_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 12
+#include "hevcdsp_template.c"
+#undef BIT_DEPTH
+#endif /* USE_FULL */
+
+#endif /* !USE_VAR_BIT_DEPTH */
+
+void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
+{
+#undef FUNC
+#define FUNC(a, depth) a ## _ ## depth
+
+#undef PEL_FUNC
+#define PEL_FUNC(dst1, idx1, idx2, a, depth)                                   \
+    for(i = 0 ; i < 10 ; i++)                                                  \
+{                                                                              \
+    hevcdsp->dst1[i][idx1][idx2] = a ## _ ## depth;                            \
+}
+
+#undef EPEL_FUNCS
+#define EPEL_FUNCS(depth)                                                     \
+    PEL_FUNC(put_hevc_epel, 0, 0, put_hevc_pel_pixels, depth);                \
+    PEL_FUNC(put_hevc_epel, 0, 1, put_hevc_epel_h, depth);                    \
+    PEL_FUNC(put_hevc_epel, 1, 0, put_hevc_epel_v, depth);                    \
+    PEL_FUNC(put_hevc_epel, 1, 1, put_hevc_epel_hv, depth)
+
+#undef EPEL_UNI_FUNCS
+#define EPEL_UNI_FUNCS(depth)                                                 \
+    PEL_FUNC(put_hevc_epel_uni, 0, 0, put_hevc_pel_uni_pixels, depth);        \
+    PEL_FUNC(put_hevc_epel_uni, 0, 1, put_hevc_epel_uni_h, depth);            \
+    PEL_FUNC(put_hevc_epel_uni, 1, 0, put_hevc_epel_uni_v, depth);            \
+    PEL_FUNC(put_hevc_epel_uni, 1, 1, put_hevc_epel_uni_hv, depth);           \
+    PEL_FUNC(put_hevc_epel_uni_w, 0, 0, put_hevc_pel_uni_w_pixels, depth);    \
+    PEL_FUNC(put_hevc_epel_uni_w, 0, 1, put_hevc_epel_uni_w_h, depth);        \
+    PEL_FUNC(put_hevc_epel_uni_w, 1, 0, put_hevc_epel_uni_w_v, depth);        \
+    PEL_FUNC(put_hevc_epel_uni_w, 1, 1, put_hevc_epel_uni_w_hv, depth)
+
+#undef EPEL_BI_FUNCS
+#define EPEL_BI_FUNCS(depth)                                                \
+    PEL_FUNC(put_hevc_epel_bi, 0, 0, put_hevc_pel_bi_pixels, depth);        \
+    PEL_FUNC(put_hevc_epel_bi, 0, 1, put_hevc_epel_bi_h, depth);            \
+    PEL_FUNC(put_hevc_epel_bi, 1, 0, put_hevc_epel_bi_v, depth);            \
+    PEL_FUNC(put_hevc_epel_bi, 1, 1, put_hevc_epel_bi_hv, depth);           \
+    PEL_FUNC(put_hevc_epel_bi_w, 0, 0, put_hevc_pel_bi_w_pixels, depth);    \
+    PEL_FUNC(put_hevc_epel_bi_w, 0, 1, put_hevc_epel_bi_w_h, depth);        \
+    PEL_FUNC(put_hevc_epel_bi_w, 1, 0, put_hevc_epel_bi_w_v, depth);        \
+    PEL_FUNC(put_hevc_epel_bi_w, 1, 1, put_hevc_epel_bi_w_hv, depth)
+
+#undef QPEL_FUNCS
+#define QPEL_FUNCS(depth)                                                     \
+    PEL_FUNC(put_hevc_qpel, 0, 0, put_hevc_pel_pixels, depth);                \
+    PEL_FUNC(put_hevc_qpel, 0, 1, put_hevc_qpel_h, depth);                    \
+    PEL_FUNC(put_hevc_qpel, 1, 0, put_hevc_qpel_v, depth);                    \
+    PEL_FUNC(put_hevc_qpel, 1, 1, put_hevc_qpel_hv, depth)
+
+#undef QPEL_UNI_FUNCS
+#define QPEL_UNI_FUNCS(depth)                                                 \
+    PEL_FUNC(put_hevc_qpel_uni, 0, 0, put_hevc_pel_uni_pixels, depth);        \
+    PEL_FUNC(put_hevc_qpel_uni, 0, 1, put_hevc_qpel_uni_h, depth);            \
+    PEL_FUNC(put_hevc_qpel_uni, 1, 0, put_hevc_qpel_uni_v, depth);            \
+    PEL_FUNC(put_hevc_qpel_uni, 1, 1, put_hevc_qpel_uni_hv, depth);           \
+    PEL_FUNC(put_hevc_qpel_uni_w, 0, 0, put_hevc_pel_uni_w_pixels, depth);    \
+    PEL_FUNC(put_hevc_qpel_uni_w, 0, 1, put_hevc_qpel_uni_w_h, depth);        \
+    PEL_FUNC(put_hevc_qpel_uni_w, 1, 0, put_hevc_qpel_uni_w_v, depth);        \
+    PEL_FUNC(put_hevc_qpel_uni_w, 1, 1, put_hevc_qpel_uni_w_hv, depth)
+
+#undef QPEL_BI_FUNCS
+#define QPEL_BI_FUNCS(depth)                                                  \
+    PEL_FUNC(put_hevc_qpel_bi, 0, 0, put_hevc_pel_bi_pixels, depth);          \
+    PEL_FUNC(put_hevc_qpel_bi, 0, 1, put_hevc_qpel_bi_h, depth);              \
+    PEL_FUNC(put_hevc_qpel_bi, 1, 0, put_hevc_qpel_bi_v, depth);              \
+    PEL_FUNC(put_hevc_qpel_bi, 1, 1, put_hevc_qpel_bi_hv, depth);             \
+    PEL_FUNC(put_hevc_qpel_bi_w, 0, 0, put_hevc_pel_bi_w_pixels, depth);      \
+    PEL_FUNC(put_hevc_qpel_bi_w, 0, 1, put_hevc_qpel_bi_w_h, depth);          \
+    PEL_FUNC(put_hevc_qpel_bi_w, 1, 0, put_hevc_qpel_bi_w_v, depth);          \
+    PEL_FUNC(put_hevc_qpel_bi_w, 1, 1, put_hevc_qpel_bi_w_hv, depth)
+
+#ifdef USE_PRED
+
+#ifdef USE_BIPRED
+
+#define HEVC_DSP_PRED(depth)                                                   \
+    QPEL_FUNCS(depth);                                                         \
+    QPEL_UNI_FUNCS(depth);                                                     \
+    QPEL_BI_FUNCS(depth);                                                      \
+    EPEL_FUNCS(depth);                                                         \
+    EPEL_UNI_FUNCS(depth);                                                     \
+    EPEL_BI_FUNCS(depth);
+
+#else
+
+#define HEVC_DSP_PRED(depth)                                                   \
+    QPEL_UNI_FUNCS(depth);                                                     \
+    EPEL_UNI_FUNCS(depth);
+
+#endif
+
+#else
+
+#define HEVC_DSP_PRED(depth)
+
+#endif
+
+#define HEVC_DSP(depth)                                                     \
+    hevcdsp->put_pcm                = FUNC(put_pcm, depth);                 \
+    hevcdsp->transform_add[0]       = FUNC(transform_add4x4, depth);        \
+    hevcdsp->transform_add[1]       = FUNC(transform_add8x8, depth);        \
+    hevcdsp->transform_add[2]       = FUNC(transform_add16x16, depth);      \
+    hevcdsp->transform_add[3]       = FUNC(transform_add32x32, depth);      \
+    hevcdsp->transform_skip         = FUNC(transform_skip, depth);          \
+    hevcdsp->transform_rdpcm        = FUNC(transform_rdpcm, depth);         \
+    hevcdsp->idct_4x4_luma          = FUNC(transform_4x4_luma, depth);      \
+    hevcdsp->idct[0]                = FUNC(idct_4x4, depth);                \
+    hevcdsp->idct[1]                = FUNC(idct_8x8, depth);                \
+    hevcdsp->idct[2]                = FUNC(idct_16x16, depth);              \
+    hevcdsp->idct[3]                = FUNC(idct_32x32, depth);              \
+                                                                            \
+    hevcdsp->idct_dc[0]             = FUNC(idct_4x4_dc, depth);             \
+    hevcdsp->idct_dc[1]             = FUNC(idct_8x8_dc, depth);             \
+    hevcdsp->idct_dc[2]             = FUNC(idct_16x16_dc, depth);           \
+    hevcdsp->idct_dc[3]             = FUNC(idct_32x32_dc, depth);           \
+    HEVC_DSP_PRED(depth)                                                    \
+    hevcdsp->sao_band_filter    = FUNC(sao_band_filter_0, depth);              \
+    hevcdsp->sao_edge_filter[0] = FUNC(sao_edge_filter_0, depth);              \
+    hevcdsp->sao_edge_filter[1] = FUNC(sao_edge_filter_1, depth);              \
+                                                                               \
+    hevcdsp->hevc_h_loop_filter_luma     = FUNC(hevc_h_loop_filter_luma, depth);   \
+    hevcdsp->hevc_v_loop_filter_luma     = FUNC(hevc_v_loop_filter_luma, depth);   \
+    hevcdsp->hevc_h_loop_filter_chroma   = FUNC(hevc_h_loop_filter_chroma, depth); \
+    hevcdsp->hevc_v_loop_filter_chroma   = FUNC(hevc_v_loop_filter_chroma, depth); \
+    hevcdsp->hevc_h_loop_filter_luma_c   = FUNC(hevc_h_loop_filter_luma, depth);   \
+    hevcdsp->hevc_v_loop_filter_luma_c   = FUNC(hevc_v_loop_filter_luma, depth);   \
+    hevcdsp->hevc_h_loop_filter_chroma_c = FUNC(hevc_h_loop_filter_chroma, depth); \
+    hevcdsp->hevc_v_loop_filter_chroma_c = FUNC(hevc_v_loop_filter_chroma, depth)
+
+#ifdef USE_PRED
+int i = 0;
+#endif
+
+#if defined(USE_VAR_BIT_DEPTH)
+    HEVC_DSP(var);
+#else
+    switch (bit_depth) {
+#ifdef USE_FULL
+    case 9:
+        HEVC_DSP(9);
+        break;
+    case 10:
+        HEVC_DSP(10);
+        break;
+    case 12:
+        HEVC_DSP(12);
+        break;
+#endif /* USE_FULL */
+    default:
+        HEVC_DSP(8);
+        break;
+    }
+#endif /* USE_VAR_BIT_DEPTH */
+
+    if (ARCH_X86)
+        ff_hevc_dsp_init_x86(hevcdsp, bit_depth);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcdsp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,144 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ * Copyright (C) 2013 - 2014 Pierre-Edouard Lepere
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HEVCDSP_H
+#define AVCODEC_HEVCDSP_H
+
+#include "get_bits.h"
+
+#define MAX_PB_SIZE 64
+
+#ifdef USE_VAR_BIT_DEPTH
+#define BIT_DEPTH_PARAM ,int bit_depth
+#define BIT_DEPTH_ARG , bit_depth
+#define BIT_DEPTH_ARG2(x) , x
+#else
+#define BIT_DEPTH_PARAM 
+#define BIT_DEPTH_ARG
+#define BIT_DEPTH_ARG2(x)
+#endif
+
+typedef struct SAOParams {
+    int offset_abs[3][4];   ///< sao_offset_abs
+    int offset_sign[3][4];  ///< sao_offset_sign
+
+    uint8_t band_position[3];   ///< sao_band_position
+
+    int eo_class[3];        ///< sao_eo_class
+
+    int16_t offset_val[3][5];   ///<SaoOffsetVal
+
+    uint8_t type_idx[3];    ///< sao_type_idx
+} SAOParams;
+
+typedef struct HEVCDSPContext {
+    void (*put_pcm)(uint8_t *_dst, ptrdiff_t _stride, int width, int height,
+                    struct GetBitContext *gb, int pcm_bit_depth BIT_DEPTH_PARAM);
+
+    void (*transform_add[4])(uint8_t *_dst, int16_t *coeffs, ptrdiff_t _stride BIT_DEPTH_PARAM);
+
+    void (*transform_skip)(int16_t *coeffs, int16_t log2_size BIT_DEPTH_PARAM);
+
+    void (*transform_rdpcm)(int16_t *coeffs, int16_t log2_size, int mode);
+
+    void (*idct_4x4_luma)(int16_t *coeffs BIT_DEPTH_PARAM);
+
+    void (*idct[4])(int16_t *coeffs, int col_limit BIT_DEPTH_PARAM);
+
+    void (*idct_dc[4])(int16_t *coeffs BIT_DEPTH_PARAM);
+
+    void (*sao_band_filter)(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src,
+                            struct SAOParams *sao, int *borders,
+                            int width, int height, int c_idx BIT_DEPTH_PARAM);
+
+    void (*sao_edge_filter[2])(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src,
+                               struct SAOParams *sao, int *borders, int _width,
+                               int _height, int c_idx, uint8_t *vert_edge,
+                               uint8_t *horiz_edge, uint8_t *diag_edge BIT_DEPTH_PARAM);
+
+    void (*put_hevc_qpel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
+                                    int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_qpel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
+                                        int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_qpel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                          int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+
+    void (*put_hevc_qpel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_qpel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                         int16_t *src2,
+                                         int height, int denom, int wx0, int wx1,
+                                         int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_epel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
+                                    int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+
+    void (*put_hevc_epel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                        int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_epel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                          int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_epel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+    void (*put_hevc_epel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                         int16_t *src2,
+                                         int height, int denom, int wx0, int ox0, int wx1,
+                                         int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM);
+
+    void (*hevc_h_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride,
+                                    int beta, int32_t *tc,
+                                    uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_v_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride,
+                                    int beta, int32_t *tc,
+                                    uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_h_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride,
+                                      int32_t *tc, uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_v_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride,
+                                      int32_t *tc, uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_h_loop_filter_luma_c)(uint8_t *pix, ptrdiff_t stride,
+                                      int beta, int32_t *tc,
+                                      uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_v_loop_filter_luma_c)(uint8_t *pix, ptrdiff_t stride,
+                                      int beta, int32_t *tc,
+                                      uint8_t *no_p, uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_h_loop_filter_chroma_c)(uint8_t *pix, ptrdiff_t stride,
+                                        int32_t *tc, uint8_t *no_p,
+                                        uint8_t *no_q BIT_DEPTH_PARAM);
+    void (*hevc_v_loop_filter_chroma_c)(uint8_t *pix, ptrdiff_t stride,
+                                        int32_t *tc, uint8_t *no_p,
+                                        uint8_t *no_q BIT_DEPTH_PARAM);
+} HEVCDSPContext;
+
+void ff_hevc_dsp_init(HEVCDSPContext *hpc, int bit_depth);
+
+extern const int8_t ff_hevc_epel_filters[7][4];
+extern const int8_t ff_hevc_qpel_filters[3][16];
+
+void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth);
+
+#ifdef CONFIG_SMALL
+void hevc_transform_init(void);
+#endif
+
+#endif /* AVCODEC_HEVCDSP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcdsp_template.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1697 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "get_bits.h"
+#include "hevc.h"
+
+#include "bit_depth_template.c"
+#include "hevcdsp.h"
+
+
+static void FUNC(put_pcm)(uint8_t *_dst, ptrdiff_t stride, int width, int height,
+                          GetBitContext *gb, int pcm_bit_depth BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+
+    stride /= sizeof(pixel);
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = get_bits(gb, pcm_bit_depth) << (BIT_DEPTH - pcm_bit_depth);
+        dst += stride;
+    }
+}
+
+static void FUNC(transform_add4x4)(uint8_t *_dst, int16_t *coeffs,
+                                       ptrdiff_t stride BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+
+    stride /= sizeof(pixel);
+
+    for (y = 0; y < 4; y++) {
+        for (x = 0; x < 4; x++) {
+            dst[x] = av_clip_pixel(dst[x] + *coeffs);
+            coeffs++;
+        }
+        dst += stride;
+    }
+}
+
+static void FUNC(transform_add8x8)(uint8_t *_dst, int16_t *coeffs,
+                                       ptrdiff_t stride BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+
+    stride /= sizeof(pixel);
+
+    for (y = 0; y < 8; y++) {
+        for (x = 0; x < 8; x++) {
+            dst[x] = av_clip_pixel(dst[x] + *coeffs);
+            coeffs++;
+        }
+        dst += stride;
+    }
+}
+
+static void FUNC(transform_add16x16)(uint8_t *_dst, int16_t *coeffs,
+                                         ptrdiff_t stride BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+
+    stride /= sizeof(pixel);
+
+    for (y = 0; y < 16; y++) {
+        for (x = 0; x < 16; x++) {
+            dst[x] = av_clip_pixel(dst[x] + *coeffs);
+            coeffs++;
+        }
+        dst += stride;
+    }
+}
+
+static void FUNC(transform_add32x32)(uint8_t *_dst, int16_t *coeffs,
+                                         ptrdiff_t stride BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+
+    stride /= sizeof(pixel);
+
+    for (y = 0; y < 32; y++) {
+        for (x = 0; x < 32; x++) {
+            dst[x] = av_clip_pixel(dst[x] + *coeffs);
+            coeffs++;
+        }
+        dst += stride;
+    }
+}
+
+
+static void FUNC(transform_rdpcm)(int16_t *_coeffs, int16_t log2_size, int mode)
+{
+    int16_t *coeffs = (int16_t *) _coeffs;
+    int x, y;
+    int size = 1 << log2_size;
+
+    if (mode) {
+        coeffs += size;
+        for (y = 0; y < size - 1; y++) {
+            for (x = 0; x < size; x++)
+                coeffs[x] += coeffs[x - size];
+            coeffs += size;
+        }
+    } else {
+        for (y = 0; y < size; y++) {
+            for (x = 1; x < size; x++)
+                coeffs[x] += coeffs[x - 1];
+            coeffs += size;
+        }
+    }
+}
+
+static void FUNC(transform_skip)(int16_t *_coeffs, int16_t log2_size BIT_DEPTH_PARAM)
+{
+    int shift  = 15 - BIT_DEPTH - log2_size;
+    int x, y;
+    int size = 1 << log2_size;
+    int16_t *coeffs = _coeffs;
+
+
+    if (shift > 0) {
+        int offset = 1 << (shift - 1);
+        for (y = 0; y < size; y++) {
+            for (x = 0; x < size; x++) {
+                *coeffs = (*coeffs + offset) >> shift;
+                coeffs++;
+            }
+        }
+    } else {
+        for (y = 0; y < size; y++) {
+            for (x = 0; x < size; x++) {
+                *coeffs = *coeffs << -shift;
+                coeffs++;
+            }
+        }
+    }
+}
+
+#define SET(dst, x)   (dst) = (x)
+#define SCALE(dst, x) (dst) = av_clip_int16(((x) + add) >> shift)
+#define ADD_AND_SCALE(dst, x)                                           \
+    (dst) = av_clip_pixel((dst) + av_clip_int16(((x) + add) >> shift))
+
+#define TR_4x4_LUMA(dst, src, step, assign)                             \
+    do {                                                                \
+        int c0 = src[0 * step] + src[2 * step];                         \
+        int c1 = src[2 * step] + src[3 * step];                         \
+        int c2 = src[0 * step] - src[3 * step];                         \
+        int c3 = 74 * src[1 * step];                                    \
+                                                                        \
+        assign(dst[2 * step], 74 * (src[0 * step] -                     \
+                                    src[2 * step] +                     \
+                                    src[3 * step]));                    \
+        assign(dst[0 * step], 29 * c0 + 55 * c1 + c3);                  \
+        assign(dst[1 * step], 55 * c2 - 29 * c1 + c3);                  \
+        assign(dst[3 * step], 55 * c0 + 29 * c2 - c3);                  \
+    } while (0)
+
+static void FUNC(transform_4x4_luma)(int16_t *coeffs BIT_DEPTH_PARAM)
+{
+    int i;
+    int shift    = 7;
+    int add      = 1 << (shift - 1);
+    int16_t *src = coeffs;
+
+    for (i = 0; i < 4; i++) {
+        TR_4x4_LUMA(src, src, 4, SCALE);
+        src++;
+    }
+
+    shift = 20 - BIT_DEPTH;
+    add   = 1 << (shift - 1);
+    for (i = 0; i < 4; i++) {
+        TR_4x4_LUMA(coeffs, coeffs, 1, SCALE);
+        coeffs += 4;
+    }
+}
+
+#undef TR_4x4_LUMA
+
+#define TR_4(dst, src, dstep, sstep, assign, end)                              \
+    do {                                                                       \
+        const int e0 = 64 * src[0 * sstep] + 64 * src[2 * sstep];              \
+        const int e1 = 64 * src[0 * sstep] - 64 * src[2 * sstep];              \
+        const int o0 = 83 * src[1 * sstep] + 36 * src[3 * sstep];              \
+        const int o1 = 36 * src[1 * sstep] - 83 * src[3 * sstep];              \
+                                                                               \
+        assign(dst[0 * dstep], e0 + o0);                                       \
+        assign(dst[1 * dstep], e1 + o1);                                       \
+        assign(dst[2 * dstep], e1 - o1);                                       \
+        assign(dst[3 * dstep], e0 - o0);                                       \
+    } while (0)
+
+#define TR_8(dst, src, dstep, sstep, assign, end)                              \
+    do {                                                                       \
+        int i, j;                                                              \
+        int e_8[4];                                                            \
+        int o_8[4] = { 0 };                                                    \
+        for (i = 0; i < 4; i++)                                                \
+            for (j = 1; j < end; j += 2)                                       \
+                o_8[i] += transform[4 * j][i] * src[j * sstep];                \
+        TR_4(e_8, src, 1, 2 * sstep, SET, 4);                                  \
+                                                                               \
+        for (i = 0; i < 4; i++) {                                              \
+            assign(dst[i * dstep], e_8[i] + o_8[i]);                           \
+            assign(dst[(7 - i) * dstep], e_8[i] - o_8[i]);                     \
+        }                                                                      \
+    } while (0)
+
+#define TR_16(dst, src, dstep, sstep, assign, end)                             \
+    do {                                                                       \
+        int i, j;                                                              \
+        int e_16[8];                                                           \
+        int o_16[8] = { 0 };                                                   \
+        for (i = 0; i < 8; i++)                                                \
+            for (j = 1; j < end; j += 2)                                       \
+                o_16[i] += transform[2 * j][i] * src[j * sstep];               \
+        TR_8(e_16, src, 1, 2 * sstep, SET, 8);                                 \
+                                                                               \
+        for (i = 0; i < 8; i++) {                                              \
+            assign(dst[i * dstep], e_16[i] + o_16[i]);                         \
+            assign(dst[(15 - i) * dstep], e_16[i] - o_16[i]);                  \
+        }                                                                      \
+    } while (0)
+
+#define TR_32(dst, src, dstep, sstep, assign, end)                             \
+    do {                                                                       \
+        int i, j;                                                              \
+        int e_32[16];                                                          \
+        int o_32[16] = { 0 };                                                  \
+        for (i = 0; i < 16; i++)                                               \
+            for (j = 1; j < end; j += 2)                                       \
+                o_32[i] += transform[j][i] * src[j * sstep];                   \
+        TR_16(e_32, src, 1, 2 * sstep, SET, end/2);                            \
+                                                                               \
+        for (i = 0; i < 16; i++) {                                             \
+            assign(dst[i * dstep], e_32[i] + o_32[i]);                         \
+            assign(dst[(31 - i) * dstep], e_32[i] - o_32[i]);                  \
+        }                                                                      \
+    } while (0)
+
+#define IDCT_VAR4(H)                                                          \
+    int      limit2   = FFMIN(col_limit + 4, H)
+#define IDCT_VAR8(H)                                                          \
+        int      limit   = FFMIN(col_limit, H);                               \
+        int      limit2   = FFMIN(col_limit + 4, H)
+#define IDCT_VAR16(H)   IDCT_VAR8(H)
+#define IDCT_VAR32(H)   IDCT_VAR8(H)
+
+#define IDCT(H)                                                              \
+static void FUNC(idct_##H ##x ##H )(                                         \
+                   int16_t *coeffs, int col_limit BIT_DEPTH_PARAM) {         \
+    int i;                                                                   \
+    int      shift   = 7;                                                    \
+    int      add     = 1 << (shift - 1);                                     \
+    int16_t *src     = coeffs;                                               \
+    IDCT_VAR ##H(H);                                                         \
+                                                                             \
+    for (i = 0; i < H; i++) {                                                \
+        TR_ ## H(src, src, H, H, SCALE, limit2);                             \
+        if (limit2 < H && i%4 == 0 && !!i)                                   \
+            limit2 -= 4;                                                     \
+        src++;                                                               \
+    }                                                                        \
+                                                                             \
+    shift   = 20 - BIT_DEPTH;                                                \
+    add     = 1 << (shift - 1);                                              \
+    for (i = 0; i < H; i++) {                                                \
+        TR_ ## H(coeffs, coeffs, 1, 1, SCALE, limit);                        \
+        coeffs += H;                                                         \
+    }                                                                        \
+}
+
+#define IDCT_DC(H)                                                           \
+static void FUNC(idct_##H ##x ##H ##_dc)(                                    \
+                   int16_t *coeffs BIT_DEPTH_PARAM) {                        \
+    int i, j;                                                                \
+    int      shift   = 14 - BIT_DEPTH;                                       \
+    int      add     = 1 << (shift - 1);                                     \
+    int      coeff   = (((coeffs[0] + 1) >> 1) + add) >> shift;              \
+                                                                             \
+    for (j = 0; j < H; j++) {                                                \
+        for (i = 0; i < H; i++) {                                            \
+            coeffs[i+j*H] = coeff;                                           \
+        }                                                                    \
+    }                                                                        \
+}
+
+IDCT( 4)
+IDCT( 8)
+IDCT(16)
+IDCT(32)
+
+IDCT_DC( 4)
+IDCT_DC( 8)
+IDCT_DC(16)
+IDCT_DC(32)
+
+#undef TR_4
+#undef TR_8
+#undef TR_16
+#undef TR_32
+
+#undef SET
+#undef SCALE
+#undef ADD_AND_SCALE
+
+static void FUNC(sao_band_filter_0)(uint8_t *_dst, uint8_t *_src,
+                                  ptrdiff_t stride_dst, ptrdiff_t stride_src, SAOParams *sao,
+                                  int *borders, int width, int height,
+                                  int c_idx BIT_DEPTH_PARAM)
+{
+    pixel *dst = (pixel *)_dst;
+    pixel *src = (pixel *)_src;
+    int offset_table[32] = { 0 };
+    int k, y, x;
+    int shift  = BIT_DEPTH - 5;
+    int16_t *sao_offset_val = sao->offset_val[c_idx];
+    int sao_left_class  = sao->band_position[c_idx];
+
+    stride_dst /= sizeof(pixel);
+    stride_src /= sizeof(pixel);
+
+    for (k = 0; k < 4; k++)
+        offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1];
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(src[x] + offset_table[src[x] >> shift]);
+        dst += stride_dst;
+        src += stride_src;
+    }
+}
+
+#define CMP(a, b) ((a) > (b) ? 1 : ((a) == (b) ? 0 : -1))
+
+static void FUNC(sao_edge_filter)(uint8_t *_dst, uint8_t *_src,
+                                  ptrdiff_t stride_dst, ptrdiff_t stride_src, SAOParams *sao,
+                                  int width, int height,
+                                  int c_idx, int init_x, int init_y BIT_DEPTH_PARAM) {
+
+    static const uint8_t edge_idx[] = { 1, 2, 0, 3, 4 };
+    static const int8_t pos[4][2][2] = {
+        { { -1,  0 }, {  1, 0 } }, // horizontal
+        { {  0, -1 }, {  0, 1 } }, // vertical
+        { { -1, -1 }, {  1, 1 } }, // 45 degree
+        { {  1, -1 }, { -1, 1 } }, // 135 degree
+    };
+    int16_t *sao_offset_val = sao->offset_val[c_idx];
+    int sao_eo_class    = sao->eo_class[c_idx];
+    pixel *dst = (pixel *)_dst;
+    pixel *src = (pixel *)_src;
+
+    int y_stride_src = init_y * stride_src;
+    int y_stride_dst = init_y * stride_dst;
+    int pos_0_0  = pos[sao_eo_class][0][0];
+    int pos_0_1  = pos[sao_eo_class][0][1];
+    int pos_1_0  = pos[sao_eo_class][1][0];
+    int pos_1_1  = pos[sao_eo_class][1][1];
+    int x, y;
+
+    int y_stride_0_1 = (init_y + pos_0_1) * stride_src;
+    int y_stride_1_1 = (init_y + pos_1_1) * stride_src;
+    for (y = init_y; y < height; y++) {
+        for (x = init_x; x < width; x++) {
+            int diff0             = CMP(src[x + y_stride_src], src[x + pos_0_0 + y_stride_0_1]);
+            int diff1             = CMP(src[x + y_stride_src], src[x + pos_1_0 + y_stride_1_1]);
+            int offset_val        = edge_idx[2 + diff0 + diff1];
+            dst[x + y_stride_dst] = av_clip_pixel(src[x + y_stride_src] + sao_offset_val[offset_val]);
+        }
+        y_stride_src += stride_src;
+        y_stride_dst += stride_dst;
+        y_stride_0_1 += stride_src;
+        y_stride_1_1 += stride_src;
+    }
+}
+
+static void FUNC(sao_edge_filter_0)(uint8_t *_dst, uint8_t *_src,
+                                    ptrdiff_t stride_dst, ptrdiff_t stride_src, SAOParams *sao,
+                                    int *borders, int _width, int _height,
+                                    int c_idx, uint8_t *vert_edge,
+                                    uint8_t *horiz_edge, uint8_t *diag_edge BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+    pixel *src = (pixel *)_src;
+    int16_t *sao_offset_val = sao->offset_val[c_idx];
+    int sao_eo_class    = sao->eo_class[c_idx];
+    int init_x = 0, init_y = 0, width = _width, height = _height;
+
+    stride_dst /= sizeof(pixel);
+    stride_src /= sizeof(pixel);
+
+    if (sao_eo_class != SAO_EO_VERT) {
+        if (borders[0]) {
+            int offset_val = sao_offset_val[0];
+            for (y = 0; y < height; y++) {
+                dst[y * stride_dst] = av_clip_pixel(src[y * stride_src] + offset_val);
+            }
+            init_x = 1;
+        }
+        if (borders[2]) {
+            int offset_val = sao_offset_val[0];
+            int offset     = width - 1;
+            for (x = 0; x < height; x++) {
+                dst[x * stride_dst + offset] = av_clip_pixel(src[x * stride_src + offset] + offset_val);
+            }
+            width--;
+        }
+    }
+    if (sao_eo_class != SAO_EO_HORIZ) {
+        if (borders[1]) {
+            int offset_val = sao_offset_val[0];
+            for (x = init_x; x < width; x++)
+                dst[x] = av_clip_pixel(src[x] + offset_val);
+            init_y = 1;
+        }
+        if (borders[3]) {
+            int offset_val   = sao_offset_val[0];
+            int y_stride_dst = stride_dst * (height - 1);
+            int y_stride_src = stride_src * (height - 1);
+            for (x = init_x; x < width; x++)
+                dst[x + y_stride_dst] = av_clip_pixel(src[x + y_stride_src] + offset_val);
+            height--;
+        }
+    }
+
+    FUNC(sao_edge_filter)((uint8_t *)dst, (uint8_t *)src, stride_dst, stride_src, sao, width, height, c_idx, init_x, init_y BIT_DEPTH_ARG);
+}
+
+static void FUNC(sao_edge_filter_1)(uint8_t *_dst, uint8_t *_src,
+                                    ptrdiff_t stride_dst, ptrdiff_t stride_src, SAOParams *sao,
+                                    int *borders, int _width, int _height,
+                                    int c_idx, uint8_t *vert_edge,
+                                    uint8_t *horiz_edge, uint8_t *diag_edge BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *dst = (pixel *)_dst;
+    pixel *src = (pixel *)_src;
+    int16_t *sao_offset_val = sao->offset_val[c_idx];
+    int sao_eo_class    = sao->eo_class[c_idx];
+    int init_x = 0, init_y = 0, width = _width, height = _height;
+
+    stride_dst /= sizeof(pixel);
+    stride_src /= sizeof(pixel);
+
+    if (sao_eo_class != SAO_EO_VERT) {
+        if (borders[0]) {
+            int offset_val = sao_offset_val[0];
+            for (y = 0; y < height; y++) {
+                dst[y * stride_dst] = av_clip_pixel(src[y * stride_src] + offset_val);
+            }
+            init_x = 1;
+        }
+        if (borders[2]) {
+            int offset_val = sao_offset_val[0];
+            int offset     = width - 1;
+            for (x = 0; x < height; x++) {
+                dst[x * stride_dst + offset] = av_clip_pixel(src[x * stride_src + offset] + offset_val);
+            }
+            width--;
+        }
+    }
+    if (sao_eo_class != SAO_EO_HORIZ) {
+        if (borders[1]) {
+            int offset_val = sao_offset_val[0];
+            for (x = init_x; x < width; x++)
+                dst[x] = av_clip_pixel(src[x] + offset_val);
+            init_y = 1;
+        }
+        if (borders[3]) {
+            int offset_val   = sao_offset_val[0];
+            int y_stride_dst = stride_dst * (height - 1);
+            int y_stride_src = stride_src * (height - 1);
+            for (x = init_x; x < width; x++)
+                dst[x + y_stride_dst] = av_clip_pixel(src[x + y_stride_src] + offset_val);
+            height--;
+        }
+    }
+
+    FUNC(sao_edge_filter)((uint8_t *)dst, (uint8_t *)src, stride_dst, stride_src, sao, width, height, c_idx, init_x, init_y BIT_DEPTH_ARG);
+
+    {
+        int save_upper_left  = !diag_edge[0] && sao_eo_class == SAO_EO_135D && !borders[0] && !borders[1];
+        int save_upper_right = !diag_edge[1] && sao_eo_class == SAO_EO_45D  && !borders[1] && !borders[2];
+        int save_lower_right = !diag_edge[2] && sao_eo_class == SAO_EO_135D && !borders[2] && !borders[3];
+        int save_lower_left  = !diag_edge[3] && sao_eo_class == SAO_EO_45D  && !borders[0] && !borders[3];
+
+        // Restore pixels that can't be modified
+        if(vert_edge[0] && sao_eo_class != SAO_EO_VERT) {
+            for(y = init_y+save_upper_left; y< height-save_lower_left; y++)
+                dst[y*stride_dst] = src[y*stride_src];
+        }
+        if(vert_edge[1] && sao_eo_class != SAO_EO_VERT) {
+            for(y = init_y+save_upper_right; y< height-save_lower_right; y++)
+                dst[y*stride_dst+width-1] = src[y*stride_src+width-1];
+        }
+
+        if(horiz_edge[0] && sao_eo_class != SAO_EO_HORIZ) {
+            for(x = init_x+save_upper_left; x < width-save_upper_right; x++)
+                dst[x] = src[x];
+        }
+        if(horiz_edge[1] && sao_eo_class != SAO_EO_HORIZ) {
+            for(x = init_x+save_lower_left; x < width-save_lower_right; x++)
+                dst[(height-1)*stride_dst+x] = src[(height-1)*stride_src+x];
+        }
+        if(diag_edge[0] && sao_eo_class == SAO_EO_135D)
+            dst[0] = src[0];
+        if(diag_edge[1] && sao_eo_class == SAO_EO_45D)
+            dst[width-1] = src[width-1];
+        if(diag_edge[2] && sao_eo_class == SAO_EO_135D)
+            dst[stride_dst*(height-1)+width-1] = src[stride_src*(height-1)+width-1];
+        if(diag_edge[3] && sao_eo_class == SAO_EO_45D)
+            dst[stride_dst*(height-1)] = src[stride_src*(height-1)];
+
+    }
+}
+
+#undef CMP
+
+#ifdef USE_PRED
+////////////////////////////////////////////////////////////////////////////////
+//
+////////////////////////////////////////////////////////////////////////////////
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_pel_pixels)(int16_t *dst,
+                                      uint8_t *_src, ptrdiff_t _srcstride,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src          = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = src[x] << (14 - BIT_DEPTH);
+        src += srcstride;
+        dst += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_pel_uni_pixels)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                          int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int y;
+    pixel *src          = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    for (y = 0; y < height; y++) {
+        memcpy(dst, src, width * sizeof(pixel));
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_pel_bi_pixels)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                         int16_t *src2,
+                                         int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src          = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    int shift = 14  + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((src[x] << (14 - BIT_DEPTH)) + src2[x] + offset) >> shift);
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_pel_uni_w_pixels)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                            int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src          = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    ox     = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel((((src[x] << (14 - BIT_DEPTH)) * wx + offset) >> shift) + ox);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_pel_bi_w_pixels)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                           int16_t *src2,
+                                           int height, int denom, int wx0, int wx1,
+                                           int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src          = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    int shift = 14  + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++) {
+            dst[x] = av_clip_pixel(( (src[x] << (14 - BIT_DEPTH)) * wx1 + src2[x] * wx0 + ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        }
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+//
+////////////////////////////////////////////////////////////////////////////////
+#define QPEL_FILTER(src, stride)                                               \
+    (filter[0] * src[x - 3 * stride] +                                         \
+     filter[1] * src[x - 2 * stride] +                                         \
+     filter[2] * src[x -     stride] +                                         \
+     filter[3] * src[x             ] +                                         \
+     filter[4] * src[x +     stride] +                                         \
+     filter[5] * src[x + 2 * stride] +                                         \
+     filter[6] * src[x + 3 * stride] +                                         \
+     filter[7] * src[x + 4 * stride])
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_h)(int16_t *dst,
+                                  uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        dst += MAX_PB_SIZE;
+    }
+}
+
+static void FUNC(put_hevc_qpel_v)(int16_t *dst,
+                                  uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[my - 1];
+    for (y = 0; y < height; y++)  {
+        for (x = 0; x < width; x++)
+            dst[x] = QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        dst += MAX_PB_SIZE;
+    }
+}
+
+static void FUNC(put_hevc_qpel_hv)(int16_t *dst,
+                                   uint8_t *_src,
+                                   ptrdiff_t _srcstride,
+                                   int height, intptr_t mx,
+                                   intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    const int8_t *filter;
+    pixel *src = (pixel*)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+
+    src   -= QPEL_EXTRA_BEFORE * srcstride;
+    filter = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height + QPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp    = tmp_array + QPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_qpel_filters[my - 1];
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = QPEL_FILTER(tmp, MAX_PB_SIZE) >> 6;
+        tmp += MAX_PB_SIZE;
+        dst += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_h)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                      uint8_t *_src, ptrdiff_t _srcstride,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[mx - 1];
+    int shift = 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) + offset) >> shift);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                     int16_t *src2,
+                                     int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    const int8_t *filter    = ff_hevc_qpel_filters[mx - 1];
+
+    int shift = 14  + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) + src2[x] + offset) >> shift);
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_v)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                     uint8_t *_src, ptrdiff_t _srcstride,
+                                     int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[my - 1];
+    int shift = 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) + offset) >> shift);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                     int16_t *src2,
+                                     int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    const int8_t *filter    = ff_hevc_qpel_filters[my - 1];
+
+    int shift = 14 + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) + src2[x] + offset) >> shift);
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_hv)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                       uint8_t *_src, ptrdiff_t _srcstride,
+                                       int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    const int8_t *filter;
+    pixel *src = (pixel*)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift =  14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src   -= QPEL_EXTRA_BEFORE * srcstride;
+    filter = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height + QPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp    = tmp_array + QPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_qpel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) + offset) >> shift);
+        tmp += MAX_PB_SIZE;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                      int16_t *src2,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    const int8_t *filter;
+    pixel *src = (pixel*)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = 14 + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src   -= QPEL_EXTRA_BEFORE * srcstride;
+    filter = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height + QPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp    = tmp_array + QPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_qpel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) + src2[x] + offset) >> shift);
+        tmp  += MAX_PB_SIZE;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_w_h)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                        uint8_t *_src, ptrdiff_t _srcstride,
+                                        int height, int denom, int wx, int ox,
+                                        intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[mx - 1];
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    ox = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel((((QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) * wx + offset) >> shift) + ox);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_w_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, int denom, int wx0, int wx1,
+                                       int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    const int8_t *filter    = ff_hevc_qpel_filters[mx - 1];
+
+    int shift = 14  + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_w_v)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                        uint8_t *_src, ptrdiff_t _srcstride,
+                                        int height, int denom, int wx, int ox,
+                                        intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter    = ff_hevc_qpel_filters[my - 1];
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    ox = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel((((QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) * wx + offset) >> shift) + ox);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_w_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, int denom, int wx0, int wx1,
+                                       int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel        *src       = (pixel*)_src;
+    ptrdiff_t     srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+
+    const int8_t *filter    = ff_hevc_qpel_filters[my - 1];
+
+    int shift = 14 + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_qpel_uni_w_hv)(uint8_t *_dst,  ptrdiff_t _dststride,
+                                         uint8_t *_src, ptrdiff_t _srcstride,
+                                         int height, int denom, int wx, int ox,
+                                         intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    const int8_t *filter;
+    pixel *src = (pixel*)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src   -= QPEL_EXTRA_BEFORE * srcstride;
+    filter = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height + QPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp    = tmp_array + QPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_qpel_filters[my - 1];
+
+    ox = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel((((QPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) * wx + offset) >> shift) + ox);
+        tmp += MAX_PB_SIZE;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_qpel_bi_w_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                        int16_t *src2,
+                                        int height, int denom, int wx0, int wx1,
+                                        int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    const int8_t *filter;
+    pixel *src = (pixel*)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = 14 + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    src   -= QPEL_EXTRA_BEFORE * srcstride;
+    filter = ff_hevc_qpel_filters[mx - 1];
+    for (y = 0; y < height + QPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = QPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp    = tmp_array + QPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_qpel_filters[my - 1];
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((QPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        tmp  += MAX_PB_SIZE;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+//
+////////////////////////////////////////////////////////////////////////////////
+#define EPEL_FILTER(src, stride)                                               \
+    (filter[0] * src[x - stride] +                                             \
+     filter[1] * src[x]          +                                             \
+     filter[2] * src[x + stride] +                                             \
+     filter[3] * src[x + 2 * stride])
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_h)(int16_t *dst,
+                                  uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        dst += MAX_PB_SIZE;
+    }
+}
+
+static void FUNC(put_hevc_epel_v)(int16_t *dst,
+                                  uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = EPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        dst += MAX_PB_SIZE;
+    }
+}
+
+static void FUNC(put_hevc_epel_hv)(int16_t *dst,
+                                   uint8_t *_src, ptrdiff_t _srcstride,
+                                   int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+
+    src -= EPEL_EXTRA_BEFORE * srcstride;
+
+    for (y = 0; y < height + EPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp      = tmp_array + EPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_epel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = EPEL_FILTER(tmp, MAX_PB_SIZE) >> 6;
+        tmp += MAX_PB_SIZE;
+        dst += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int shift = 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) + offset) >> shift);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                     int16_t *src2,
+                                     int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int shift = 14 + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++) {
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) + src2[x] + offset) >> shift);
+        }
+        dst  += dststride;
+        src  += srcstride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[my - 1];
+    int shift = 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) + offset) >> shift);
+        src += srcstride;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                     int16_t *src2,
+                                     int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[my - 1];
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int shift = 14 + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) + src2[x] + offset) >> shift);
+        dst  += dststride;
+        src  += srcstride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src -= EPEL_EXTRA_BEFORE * srcstride;
+
+    for (y = 0; y < height + EPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp      = tmp_array + EPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_epel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) + offset) >> shift);
+        tmp += MAX_PB_SIZE;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                      int16_t *src2,
+                                      int height, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = 14 + 1 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src -= EPEL_EXTRA_BEFORE * srcstride;
+
+    for (y = 0; y < height + EPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp      = tmp_array + EPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_epel_filters[my - 1];
+
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) + src2[x] + offset) >> shift);
+        tmp  += MAX_PB_SIZE;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_w_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                        int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    ox     = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++) {
+            dst[x] = av_clip_pixel((((EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) * wx + offset) >> shift) + ox);
+        }
+        dst += dststride;
+        src += srcstride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_w_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, int denom, int wx0, int wx1,
+                                       int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int shift = 14 + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_w_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                        int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[my - 1];
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    ox     = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++) {
+            dst[x] = av_clip_pixel((((EPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) * wx + offset) >> shift) + ox);
+        }
+        dst += dststride;
+        src += srcstride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_w_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                       int16_t *src2,
+                                       int height, int denom, int wx0, int wx1,
+                                       int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride  = _srcstride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[my - 1];
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    int shift = 14 + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        src  += srcstride;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}
+#endif
+
+static void FUNC(put_hevc_epel_uni_w_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                         int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = denom + 14 - BIT_DEPTH;
+    int offset = (1 << shift) >> 1;
+
+    src -= EPEL_EXTRA_BEFORE * srcstride;
+
+    for (y = 0; y < height + EPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp      = tmp_array + EPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_epel_filters[my - 1];
+
+    ox     = ox * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel((((EPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) * wx + offset) >> shift) + ox);
+        tmp += MAX_PB_SIZE;
+        dst += dststride;
+    }
+}
+
+#ifdef USE_BIPRED
+static void FUNC(put_hevc_epel_bi_w_hv)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                        int16_t *src2,
+                                        int height, int denom, int wx0, int wx1,
+                                        int ox0, int ox1, intptr_t mx, intptr_t my, int width BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src = (pixel *)_src;
+    ptrdiff_t srcstride = _srcstride / sizeof(pixel);
+    pixel *dst          = (pixel *)_dst;
+    ptrdiff_t dststride = _dststride / sizeof(pixel);
+    const int8_t *filter = ff_hevc_epel_filters[mx - 1];
+    int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE];
+    int16_t *tmp = tmp_array;
+    int shift = 14 + 1 - BIT_DEPTH;
+    int log2Wd = denom + shift - 1;
+
+    src -= EPEL_EXTRA_BEFORE * srcstride;
+
+    for (y = 0; y < height + EPEL_EXTRA; y++) {
+        for (x = 0; x < width; x++)
+            tmp[x] = EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8);
+        src += srcstride;
+        tmp += MAX_PB_SIZE;
+    }
+
+    tmp      = tmp_array + EPEL_EXTRA_BEFORE * MAX_PB_SIZE;
+    filter = ff_hevc_epel_filters[my - 1];
+
+    ox0     = ox0 * (1 << (BIT_DEPTH - 8));
+    ox1     = ox1 * (1 << (BIT_DEPTH - 8));
+    for (y = 0; y < height; y++) {
+        for (x = 0; x < width; x++)
+            dst[x] = av_clip_pixel(((EPEL_FILTER(tmp, MAX_PB_SIZE) >> 6) * wx1 + src2[x] * wx0 +
+                                    ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1));
+        tmp  += MAX_PB_SIZE;
+        dst  += dststride;
+        src2 += MAX_PB_SIZE;
+    }
+}// line zero
+#endif
+
+#endif /* USE_PRED */
+
+
+#define P3 pix[-4 * xstride]
+#define P2 pix[-3 * xstride]
+#define P1 pix[-2 * xstride]
+#define P0 pix[-1 * xstride]
+#define Q0 pix[0 * xstride]
+#define Q1 pix[1 * xstride]
+#define Q2 pix[2 * xstride]
+#define Q3 pix[3 * xstride]
+
+// line three. used only for deblocking decision
+#define TP3 pix[-4 * xstride + 3 * ystride]
+#define TP2 pix[-3 * xstride + 3 * ystride]
+#define TP1 pix[-2 * xstride + 3 * ystride]
+#define TP0 pix[-1 * xstride + 3 * ystride]
+#define TQ0 pix[0  * xstride + 3 * ystride]
+#define TQ1 pix[1  * xstride + 3 * ystride]
+#define TQ2 pix[2  * xstride + 3 * ystride]
+#define TQ3 pix[3  * xstride + 3 * ystride]
+
+static void FUNC(hevc_loop_filter_luma)(uint8_t *_pix,
+                                        ptrdiff_t _xstride, ptrdiff_t _ystride,
+                                        int beta, int *_tc,
+                                        uint8_t *_no_p, uint8_t *_no_q BIT_DEPTH_PARAM)
+{
+    int d, j;
+    pixel *pix        = (pixel *)_pix;
+    ptrdiff_t xstride = _xstride / sizeof(pixel);
+    ptrdiff_t ystride = _ystride / sizeof(pixel);
+
+    beta <<= BIT_DEPTH - 8;
+
+    for (j = 0; j < 2; j++) {
+        const int dp0  = abs(P2  - 2 * P1  + P0);
+        const int dq0  = abs(Q2  - 2 * Q1  + Q0);
+        const int dp3  = abs(TP2 - 2 * TP1 + TP0);
+        const int dq3  = abs(TQ2 - 2 * TQ1 + TQ0);
+        const int d0   = dp0 + dq0;
+        const int d3   = dp3 + dq3;
+        const int tc   = _tc[j]   << (BIT_DEPTH - 8);
+        const int no_p = _no_p[j];
+        const int no_q = _no_q[j];
+
+        if (d0 + d3 >= beta) {
+            pix += 4 * ystride;
+            continue;
+        } else {
+            const int beta_3 = beta >> 3;
+            const int beta_2 = beta >> 2;
+            const int tc25   = ((tc * 5 + 1) >> 1);
+
+            if (abs(P3  -  P0) + abs(Q3  -  Q0) < beta_3 && abs(P0  -  Q0) < tc25 &&
+                abs(TP3 - TP0) + abs(TQ3 - TQ0) < beta_3 && abs(TP0 - TQ0) < tc25 &&
+                                      (d0 << 1) < beta_2 &&      (d3 << 1) < beta_2) {
+                // strong filtering
+                const int tc2 = tc << 1;
+                for (d = 0; d < 4; d++) {
+                    const int p3 = P3;
+                    const int p2 = P2;
+                    const int p1 = P1;
+                    const int p0 = P0;
+                    const int q0 = Q0;
+                    const int q1 = Q1;
+                    const int q2 = Q2;
+                    const int q3 = Q3;
+                    if (!no_p) {
+                        P0 = p0 + av_clip(((p2 + 2 * p1 + 2 * p0 + 2 * q0 + q1 + 4) >> 3) - p0, -tc2, tc2);
+                        P1 = p1 + av_clip(((p2 + p1 + p0 + q0 + 2) >> 2) - p1, -tc2, tc2);
+                        P2 = p2 + av_clip(((2 * p3 + 3 * p2 + p1 + p0 + q0 + 4) >> 3) - p2, -tc2, tc2);
+                    }
+                    if (!no_q) {
+                        Q0 = q0 + av_clip(((p1 + 2 * p0 + 2 * q0 + 2 * q1 + q2 + 4) >> 3) - q0, -tc2, tc2);
+                        Q1 = q1 + av_clip(((p0 + q0 + q1 + q2 + 2) >> 2) - q1, -tc2, tc2);
+                        Q2 = q2 + av_clip(((2 * q3 + 3 * q2 + q1 + q0 + p0 + 4) >> 3) - q2, -tc2, tc2);
+                    }
+                    pix += ystride;
+                }
+            } else { // normal filtering
+                int nd_p = 1;
+                int nd_q = 1;
+                const int tc_2 = tc >> 1;
+                if (dp0 + dp3 < ((beta + (beta >> 1)) >> 3))
+                    nd_p = 2;
+                if (dq0 + dq3 < ((beta + (beta >> 1)) >> 3))
+                    nd_q = 2;
+
+                for (d = 0; d < 4; d++) {
+                    const int p2 = P2;
+                    const int p1 = P1;
+                    const int p0 = P0;
+                    const int q0 = Q0;
+                    const int q1 = Q1;
+                    const int q2 = Q2;
+                    int delta0   = (9 * (q0 - p0) - 3 * (q1 - p1) + 8) >> 4;
+                    if (abs(delta0) < 10 * tc) {
+                        delta0 = av_clip(delta0, -tc, tc);
+                        if (!no_p)
+                            P0 = av_clip_pixel(p0 + delta0);
+                        if (!no_q)
+                            Q0 = av_clip_pixel(q0 - delta0);
+                        if (!no_p && nd_p > 1) {
+                            const int deltap1 = av_clip((((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1, -tc_2, tc_2);
+                            P1 = av_clip_pixel(p1 + deltap1);
+                        }
+                        if (!no_q && nd_q > 1) {
+                            const int deltaq1 = av_clip((((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1, -tc_2, tc_2);
+                            Q1 = av_clip_pixel(q1 + deltaq1);
+                        }
+                    }
+                    pix += ystride;
+                }
+            }
+        }
+    }
+}
+
+static void FUNC(hevc_loop_filter_chroma)(uint8_t *_pix, ptrdiff_t _xstride,
+                                          ptrdiff_t _ystride, int *_tc,
+                                          uint8_t *_no_p, uint8_t *_no_q BIT_DEPTH_PARAM)
+{
+    int d, j, no_p, no_q;
+    pixel *pix        = (pixel *)_pix;
+    ptrdiff_t xstride = _xstride / sizeof(pixel);
+    ptrdiff_t ystride = _ystride / sizeof(pixel);
+
+    for (j = 0; j < 2; j++) {
+        const int tc = _tc[j] << (BIT_DEPTH - 8);
+        if (tc <= 0) {
+            pix += 4 * ystride;
+            continue;
+        }
+        no_p = _no_p[j];
+        no_q = _no_q[j];
+
+        for (d = 0; d < 4; d++) {
+            int delta0;
+            const int p1 = P1;
+            const int p0 = P0;
+            const int q0 = Q0;
+            const int q1 = Q1;
+            delta0 = av_clip((((q0 - p0) * 4) + p1 - q1 + 4) >> 3, -tc, tc);
+            if (!no_p)
+                P0 = av_clip_pixel(p0 + delta0);
+            if (!no_q)
+                Q0 = av_clip_pixel(q0 - delta0);
+            pix += ystride;
+        }
+    }
+}
+
+static void FUNC(hevc_h_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride,
+                                            int32_t *tc, uint8_t *no_p,
+                                            uint8_t *no_q BIT_DEPTH_PARAM)
+{
+    FUNC(hevc_loop_filter_chroma)(pix, stride, sizeof(pixel), tc, no_p, no_q BIT_DEPTH_ARG);
+}
+
+static void FUNC(hevc_v_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride,
+                                            int32_t *tc, uint8_t *no_p,
+                                            uint8_t *no_q BIT_DEPTH_PARAM)
+{
+    FUNC(hevc_loop_filter_chroma)(pix, sizeof(pixel), stride, tc, no_p, no_q BIT_DEPTH_ARG);
+}
+
+static void FUNC(hevc_h_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride,
+                                          int beta, int32_t *tc, uint8_t *no_p,
+                                          uint8_t *no_q BIT_DEPTH_PARAM)
+{
+    FUNC(hevc_loop_filter_luma)(pix, stride, sizeof(pixel),
+                                beta, tc, no_p, no_q BIT_DEPTH_ARG);
+}
+
+static void FUNC(hevc_v_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride,
+                                          int beta, int32_t *tc, uint8_t *no_p,
+                                          uint8_t *no_q BIT_DEPTH_PARAM)
+{
+    FUNC(hevc_loop_filter_luma)(pix, sizeof(pixel), stride,
+                                beta, tc, no_p, no_q BIT_DEPTH_ARG);
+}
+
+#undef P3
+#undef P2
+#undef P1
+#undef P0
+#undef Q0
+#undef Q1
+#undef Q2
+#undef Q3
+
+#undef TP3
+#undef TP2
+#undef TP1
+#undef TP0
+#undef TQ0
+#undef TQ1
+#undef TQ2
+#undef TQ3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcpred.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,104 @@
+/*
+ * HEVC video Decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "hevc.h"
+
+#include "hevcpred.h"
+
+#if defined(USE_FULL)
+#define HEVC_INLINE av_always_inline
+#else
+/* Note: slower but smaller */
+#define HEVC_INLINE av_noinline
+#endif
+
+#ifdef USE_VAR_BIT_DEPTH
+
+#define BIT_DEPTH bit_depth
+#include "hevcpred_template.c"
+#undef BIT_DEPTH
+
+#else
+
+#define BIT_DEPTH 8
+#include "hevcpred_template.c"
+#undef BIT_DEPTH
+
+#ifdef USE_FULL
+#define BIT_DEPTH 9
+#include "hevcpred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "hevcpred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 12
+#include "hevcpred_template.c"
+#undef BIT_DEPTH
+#endif /* USE_FULL */
+
+#endif /* USE_VAR_BIT_DEPTH */
+
+#ifdef USE_FUNC_PTR
+void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
+{
+#undef FUNC
+#define FUNC(a, depth) a ## _ ## depth
+
+#define HEVC_PRED(depth)                                \
+    hpc->intra_pred[0]   = FUNC(intra_pred_2, depth);   \
+    hpc->intra_pred[1]   = FUNC(intra_pred_3, depth);   \
+    hpc->intra_pred[2]   = FUNC(intra_pred_4, depth);   \
+    hpc->intra_pred[3]   = FUNC(intra_pred_5, depth);   \
+    hpc->pred_planar[0]  = FUNC(pred_planar_0, depth);  \
+    hpc->pred_planar[1]  = FUNC(pred_planar_1, depth);  \
+    hpc->pred_planar[2]  = FUNC(pred_planar_2, depth);  \
+    hpc->pred_planar[3]  = FUNC(pred_planar_3, depth);  \
+    hpc->pred_dc         = FUNC(pred_dc, depth);        \
+    hpc->pred_angular[0] = FUNC(pred_angular_0, depth); \
+    hpc->pred_angular[1] = FUNC(pred_angular_1, depth); \
+    hpc->pred_angular[2] = FUNC(pred_angular_2, depth); \
+    hpc->pred_angular[3] = FUNC(pred_angular_3, depth);
+
+#ifdef USE_VAR_BIT_DEPTH
+    HEVC_PRED(var);
+#else
+    switch (bit_depth) {
+#ifdef USE_FULL
+    case 9:
+        HEVC_PRED(9);
+        break;
+    case 10:
+        HEVC_PRED(10);
+        break;
+    case 12:
+        HEVC_PRED(12);
+        break;
+#endif /* USE_FULL */
+    default:
+        HEVC_PRED(8);
+        break;
+    }
+#endif /* !USE_VAR_BIT_DEPTH */
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcpred.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,46 @@
+/*
+ * HEVC video Decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HEVCPRED_H
+#define AVCODEC_HEVCPRED_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+struct HEVCContext;
+
+typedef struct HEVCPredContext {
+    void (*intra_pred[4])(struct HEVCContext *s, int x0, int y0, int c_idx);
+
+    void (*pred_planar[4])(uint8_t *src, const uint8_t *top,
+                           const uint8_t *left, ptrdiff_t stride);
+    void (*pred_dc)(uint8_t *src, const uint8_t *top, const uint8_t *left,
+                    ptrdiff_t stride, int log2_size, int c_idx);
+    void (*pred_angular[4])(uint8_t *src, const uint8_t *top,
+                            const uint8_t *left, ptrdiff_t stride,
+                            int c_idx, int mode, 
+                            int disable_intra_boundary_filter BIT_DEPTH_PARAM);
+} HEVCPredContext;
+
+void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth);
+
+#endif /* AVCODEC_HEVCPRED_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/hevcpred_template.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,620 @@
+/*
+ * HEVC video decoder
+ *
+ * Copyright (C) 2012 - 2013 Guillaume Martres
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+
+#include "bit_depth_template.c"
+#include "hevcpred.h"
+
+#ifndef USE_FUNC_PTR
+static HEVC_INLINE void FUNC(pred_planar)(uint8_t *_src, const uint8_t *_top,
+                                          const uint8_t *_left, ptrdiff_t stride,
+                                          int trafo_size);
+static void FUNC(pred_dc)(uint8_t *_src, const uint8_t *_top,
+                          const uint8_t *_left,
+                          ptrdiff_t stride, int log2_size, int c_idx);
+static HEVC_INLINE void FUNC(pred_angular)(uint8_t *_src,
+                                           const uint8_t *_top,
+                                           const uint8_t *_left,
+                                           ptrdiff_t stride, int c_idx,
+                                           int mode, int size, 
+                                           int disable_intra_boundary_filter BIT_DEPTH_PARAM);
+#endif
+
+#define POS(x, y) src[(x) + stride * (y)]
+
+#ifdef USE_FUNC_PTR
+static HEVC_INLINE void FUNC(intra_pred)
+#else
+void intra_pred
+#endif
+(HEVCContext *s, int x0, int y0, int log2_size, int c_idx)
+{
+#define PU(x) \
+    ((x) >> s->sps->log2_min_pu_size)
+#ifdef USE_PRED
+#define MVF(x, y) \
+    (s->ref->tab_mvf[(x) + (y) * min_pu_width].pred_flag)
+#else
+#define MVF(x, y) PF_INTRA
+#endif
+#define MVF_PU(x, y) \
+    MVF(PU(x0 + ((x) << hshift)), PU(y0 + ((y) << vshift)))
+#define IS_INTRA(x, y) \
+    (MVF_PU(x, y) == PF_INTRA)
+#define MIN_TB_ADDR_ZS(x, y) \
+    s->pps->min_tb_addr_zs[(y) * (s->sps->tb_mask+2) + (x)]
+#define EXTEND(ptr, val, len)         \
+do {                                  \
+    pixel4 pix = PIXEL_SPLAT_X4(val); \
+    for (i = 0; i < (len); i += 4)    \
+        AV_WN4P(ptr + i, pix);        \
+} while (0)
+
+#define EXTEND_RIGHT_CIP(ptr, start, length)                                   \
+        for (i = start; i < (start) + (length); i += 4)                        \
+            if (!IS_INTRA(i, -1))                                              \
+                AV_WN4P(&ptr[i], a);                                           \
+            else                                                               \
+                a = PIXEL_SPLAT_X4(ptr[i+3])
+#define EXTEND_LEFT_CIP(ptr, start, length) \
+        for (i = start; i > (start) - (length); i--) \
+            if (!IS_INTRA(i - 1, -1)) \
+                ptr[i - 1] = ptr[i]
+#define EXTEND_UP_CIP(ptr, start, length)                                      \
+        for (i = (start); i > (start) - (length); i -= 4)                      \
+            if (!IS_INTRA(-1, i - 3))                                          \
+                AV_WN4P(&ptr[i - 3], a);                                       \
+            else                                                               \
+                a = PIXEL_SPLAT_X4(ptr[i - 3])
+#define EXTEND_DOWN_CIP(ptr, start, length)                                    \
+        for (i = start; i < (start) + (length); i += 4)                        \
+            if (!IS_INTRA(-1, i))                                              \
+                AV_WN4P(&ptr[i], a);                                           \
+            else                                                               \
+                a = PIXEL_SPLAT_X4(ptr[i + 3])
+
+    HEVCLocalContext *lc = s->HEVClc;
+#ifdef USE_VAR_BIT_DEPTH
+    int bit_depth = s->sps->bit_depth;
+#endif
+    int i;
+    int hshift = s->sps->hshift[c_idx];
+    int vshift = s->sps->vshift[c_idx];
+    int size = (1 << log2_size);
+    int size_in_luma_h = size << hshift;
+    int size_in_tbs_h  = size_in_luma_h >> s->sps->log2_min_tb_size;
+    int size_in_luma_v = size << vshift;
+    int size_in_tbs_v  = size_in_luma_v >> s->sps->log2_min_tb_size;
+    int x = x0 >> hshift;
+    int y = y0 >> vshift;
+    int x_tb = (x0 >> s->sps->log2_min_tb_size) & s->sps->tb_mask;
+    int y_tb = (y0 >> s->sps->log2_min_tb_size) & s->sps->tb_mask;
+
+    int cur_tb_addr = MIN_TB_ADDR_ZS(x_tb, y_tb);
+
+    ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(pixel);
+    pixel *src = (pixel*)s->frame->data[c_idx] + x + y * stride;
+
+    int min_pu_width = s->sps->min_pu_width;
+
+    enum IntraPredMode mode = c_idx ? lc->tu.intra_pred_mode_c :
+                              lc->tu.intra_pred_mode;
+    pixel4 a;
+    pixel  left_array[2 * MAX_TB_SIZE + 1];
+    pixel  filtered_left_array[2 * MAX_TB_SIZE + 1];
+    pixel  top_array[2 * MAX_TB_SIZE + 1];
+    pixel  filtered_top_array[2 * MAX_TB_SIZE + 1];
+
+    pixel  *left          = left_array + 1;
+    pixel  *top           = top_array  + 1;
+    pixel  *filtered_left = filtered_left_array + 1;
+    pixel  *filtered_top  = filtered_top_array  + 1;
+    int cand_bottom_left = lc->na.cand_bottom_left && cur_tb_addr > MIN_TB_ADDR_ZS( x_tb - 1, (y_tb + size_in_tbs_v) & s->sps->tb_mask);
+    int cand_left        = lc->na.cand_left;
+    int cand_up_left     = lc->na.cand_up_left;
+    int cand_up          = lc->na.cand_up;
+    int cand_up_right    = lc->na.cand_up_right    && cur_tb_addr > MIN_TB_ADDR_ZS((x_tb + size_in_tbs_h) & s->sps->tb_mask, y_tb - 1);
+
+    int bottom_left_size = (FFMIN(y0 + 2 * size_in_luma_v, s->sps->height) -
+                           (y0 + size_in_luma_v)) >> vshift;
+    int top_right_size   = (FFMIN(x0 + 2 * size_in_luma_h, s->sps->width) -
+                           (x0 + size_in_luma_h)) >> hshift;
+    int disable_intra_boundary_filter;
+
+    if (s->pps->constrained_intra_pred_flag == 1) {
+        int size_in_luma_pu_v = PU(size_in_luma_v);
+        int size_in_luma_pu_h = PU(size_in_luma_h);
+        int on_pu_edge_x    = !(x0 & ((1 << s->sps->log2_min_pu_size) - 1));
+        int on_pu_edge_y    = !(y0 & ((1 << s->sps->log2_min_pu_size) - 1));
+        if (!size_in_luma_pu_h)
+            size_in_luma_pu_h++;
+        if (cand_bottom_left == 1 && on_pu_edge_x) {
+            int x_left_pu   = PU(x0 - 1);
+            int y_bottom_pu = PU(y0 + size_in_luma_v);
+            int max = FFMIN(size_in_luma_pu_v, s->sps->min_pu_height - y_bottom_pu);
+            cand_bottom_left = 0;
+            for (i = 0; i < max; i += 2)
+                cand_bottom_left |= (MVF(x_left_pu, y_bottom_pu + i) == PF_INTRA);
+        }
+        if (cand_left == 1 && on_pu_edge_x) {
+            int x_left_pu   = PU(x0 - 1);
+            int y_left_pu   = PU(y0);
+            int max = FFMIN(size_in_luma_pu_v, s->sps->min_pu_height - y_left_pu);
+            cand_left = 0;
+            for (i = 0; i < max; i += 2)
+                cand_left |= (MVF(x_left_pu, y_left_pu + i) == PF_INTRA);
+        }
+        if (cand_up_left == 1) {
+            int x_left_pu   = PU(x0 - 1);
+            int y_top_pu    = PU(y0 - 1);
+            cand_up_left = MVF(x_left_pu, y_top_pu) == PF_INTRA;
+        }
+        if (cand_up == 1 && on_pu_edge_y) {
+            int x_top_pu    = PU(x0);
+            int y_top_pu    = PU(y0 - 1);
+            int max = FFMIN(size_in_luma_pu_h, s->sps->min_pu_width - x_top_pu);
+            cand_up = 0;
+            for (i = 0; i < max; i += 2)
+                cand_up |= (MVF(x_top_pu + i, y_top_pu) == PF_INTRA);
+        }
+        if (cand_up_right == 1 && on_pu_edge_y) {
+            int y_top_pu    = PU(y0 - 1);
+            int x_right_pu  = PU(x0 + size_in_luma_h);
+            int max = FFMIN(size_in_luma_pu_h, s->sps->min_pu_width - x_right_pu);
+            cand_up_right = 0;
+            for (i = 0; i < max; i += 2)
+                cand_up_right |= (MVF(x_right_pu + i, y_top_pu) == PF_INTRA);
+        }
+        memset(left, 128, 2 * MAX_TB_SIZE*sizeof(pixel));
+        memset(top , 128, 2 * MAX_TB_SIZE*sizeof(pixel));
+        top[-1] = 128;
+    }
+    if (cand_up_left) {
+        left[-1] = POS(-1, -1);
+        top[-1]  = left[-1];
+    }
+    if (cand_up)
+        memcpy(top, src - stride, size * sizeof(pixel));
+    if (cand_up_right) {
+        memcpy(top + size, src - stride + size, size * sizeof(pixel));
+        EXTEND(top + size + top_right_size, POS(size + top_right_size - 1, -1),
+               size - top_right_size);
+    }
+    if (cand_left)
+        for (i = 0; i < size; i++)
+            left[i] = POS(-1, i);
+    if (cand_bottom_left) {
+        for (i = size; i < size + bottom_left_size; i++)
+            left[i] = POS(-1, i);
+        EXTEND(left + size + bottom_left_size, POS(-1, size + bottom_left_size - 1),
+               size - bottom_left_size);
+    }
+
+    if (s->pps->constrained_intra_pred_flag == 1) {
+        if (cand_bottom_left || cand_left || cand_up_left || cand_up || cand_up_right) {
+            int size_max_x = x0 + ((2 * size) << hshift) < s->sps->width ?
+                                    2 * size : (s->sps->width - x0) >> hshift;
+            int size_max_y = y0 + ((2 * size) << vshift) < s->sps->height ?
+                                    2 * size : (s->sps->height - y0) >> vshift;
+            int j = size + (cand_bottom_left? bottom_left_size: 0) -1;
+            if (!cand_up_right) {
+                size_max_x = x0 + ((size) << hshift) < s->sps->width ?
+                                                    size : (s->sps->width - x0) >> hshift;
+            }
+            if (!cand_bottom_left) {
+                size_max_y = y0 + (( size) << vshift) < s->sps->height ?
+                                                     size : (s->sps->height - y0) >> vshift;
+            }
+            if (cand_bottom_left || cand_left || cand_up_left) {
+                while (j > -1 && !IS_INTRA(-1, j))
+                    j--;
+                if (!IS_INTRA(-1, j)) {
+                    j = 0;
+                    while (j < size_max_x && !IS_INTRA(j, -1))
+                        j++;
+                    EXTEND_LEFT_CIP(top, j, j + 1);
+                    left[-1] = top[-1];
+                }
+            } else {
+                j = 0;
+                while (j < size_max_x && !IS_INTRA(j, -1))
+                    j++;
+                if (j > 0)
+                    if (x0 > 0) {
+                        EXTEND_LEFT_CIP(top, j, j + 1);
+                    } else {
+                        EXTEND_LEFT_CIP(top, j, j);
+                        top[-1] = top[0];
+                    }
+                left[-1] = top[-1];
+            }
+            left[-1] = top[-1];
+            if (cand_bottom_left || cand_left) {
+                a = PIXEL_SPLAT_X4(left[-1]);
+                EXTEND_DOWN_CIP(left, 0, size_max_y);
+            }
+            if (!cand_left)
+                EXTEND(left, left[-1], size);
+            if (!cand_bottom_left)
+                EXTEND(left + size, left[size - 1], size);
+            if (x0 != 0 && y0 != 0) {
+                a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
+                EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
+                if (!IS_INTRA(-1, - 1))
+                    left[-1] = left[0];
+            } else if (x0 == 0) {
+                EXTEND(left, 0, size_max_y);
+            } else {
+                a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
+                EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
+            }
+            top[-1] = left[-1];
+            if (y0 != 0) {
+                a = PIXEL_SPLAT_X4(left[-1]);
+                EXTEND_RIGHT_CIP(top, 0, size_max_x);
+            }
+        }
+    }
+    // Infer the unavailable samples
+    if (!cand_bottom_left) {
+        if (cand_left) {
+            EXTEND(left + size, left[size - 1], size);
+        } else if (cand_up_left) {
+            EXTEND(left, left[-1], 2 * size);
+            cand_left = 1;
+        } else if (cand_up) {
+            left[-1] = top[0];
+            EXTEND(left, left[-1], 2 * size);
+            cand_up_left = 1;
+            cand_left    = 1;
+        } else if (cand_up_right) {
+            EXTEND(top, top[size], size);
+            left[-1] = top[size];
+            EXTEND(left, left[-1], 2 * size);
+            cand_up      = 1;
+            cand_up_left = 1;
+            cand_left    = 1;
+        } else { // No samples available
+            left[-1] = (1 << (BIT_DEPTH - 1));
+            EXTEND(top,  left[-1], 2 * size);
+            EXTEND(left, left[-1], 2 * size);
+        }
+    }
+
+    if (!cand_left)
+        EXTEND(left, left[size], size);
+    if (!cand_up_left) {
+        left[-1] = left[0];
+    }
+    if (!cand_up)
+        EXTEND(top, left[-1], size);
+    if (!cand_up_right)
+        EXTEND(top + size, top[size - 1], size);
+
+    top[-1] = left[-1];
+
+    // Filtering process
+    if (!s->sps->intra_smoothing_disabled_flag && (c_idx == 0  || s->sps->chroma_format_idc == 3)) {
+        if (mode != INTRA_DC && size != 4){
+            int intra_hor_ver_dist_thresh[] = { 7, 1, 0 };
+            int min_dist_vert_hor = FFMIN(FFABS((int)(mode - 26U)),
+                                          FFABS((int)(mode - 10U)));
+            if (min_dist_vert_hor > intra_hor_ver_dist_thresh[log2_size - 3]) {
+                int threshold = 1 << (BIT_DEPTH - 5);
+                if (s->sps->sps_strong_intra_smoothing_enable_flag && c_idx == 0 &&
+                    log2_size == 5 &&
+                    FFABS(top[-1]  + top[63]  - 2 * top[31])  < threshold &&
+                    FFABS(left[-1] + left[63] - 2 * left[31]) < threshold) {
+                    // We can't just overwrite values in top because it could be
+                    // a pointer into src
+                    filtered_top[-1] = top[-1];
+                    filtered_top[63] = top[63];
+                    for (i = 0; i < 63; i++)
+                        filtered_top[i] = ((64 - (i + 1)) * top[-1] +
+                                           (i + 1)  * top[63] + 32) >> 6;
+                    for (i = 0; i < 63; i++)
+                        left[i] = ((64 - (i + 1)) * left[-1] +
+                                   (i + 1)  * left[63] + 32) >> 6;
+                    top = filtered_top;
+                } else {
+                    filtered_left[2 * size - 1] = left[2 * size - 1];
+                    filtered_top[2 * size - 1]  = top[2 * size - 1];
+                    for (i = 2 * size - 2; i >= 0; i--)
+                        filtered_left[i] = (left[i + 1] + 2 * left[i] +
+                                            left[i - 1] + 2) >> 2;
+                    filtered_top[-1]  =
+                    filtered_left[-1] = (left[0] + 2 * left[-1] + top[0] + 2) >> 2;
+                    for (i = 2 * size - 2; i >= 0; i--)
+                        filtered_top[i] = (top[i + 1] + 2 * top[i] +
+                                           top[i - 1] + 2) >> 2;
+                    left = filtered_left;
+                    top  = filtered_top;
+                }
+            }
+        }
+    }
+
+    switch (mode) {
+    case INTRA_PLANAR:
+#ifdef USE_FUNC_PTR
+        s->hpc.pred_planar[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
+                                          (uint8_t *)left, stride);
+#else
+        FUNC(pred_planar)((uint8_t *)src, (uint8_t *)top,
+                          (uint8_t *)left, stride, log2_size);
+#endif
+        break;
+    case INTRA_DC:
+#ifdef USE_FUNC_PTR
+        s->hpc.pred_dc((uint8_t *)src, (uint8_t *)top,
+                       (uint8_t *)left, stride, log2_size, c_idx);
+#else
+        FUNC(pred_dc)((uint8_t *)src, (uint8_t *)top,
+                      (uint8_t *)left, stride, log2_size, c_idx);
+#endif
+        break;
+    default:
+        disable_intra_boundary_filter = (s->sps->implicit_rdpcm_enabled_flag &&
+                                         lc->cu.cu_transquant_bypass_flag);
+#ifdef USE_FUNC_PTR
+        s->hpc.pred_angular[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
+                                           (uint8_t *)left, stride, c_idx,
+                                           mode, 
+                                           disable_intra_boundary_filter BIT_DEPTH_ARG);
+#else
+        FUNC(pred_angular)((uint8_t *)src, (uint8_t *)top,
+                           (uint8_t *)left, stride, c_idx,
+                           mode, 1 << log2_size,
+                           disable_intra_boundary_filter BIT_DEPTH_ARG);
+#endif
+        break;
+    }
+}
+
+#ifdef USE_FUNC_PTR
+
+#define INTRA_PRED(size)                                                            \
+static void FUNC(intra_pred_ ## size)(HEVCContext *s, int x0, int y0, int c_idx)    \
+{                                                                                   \
+    FUNC(intra_pred)(s, x0, y0, size, c_idx);                                       \
+}
+
+INTRA_PRED(2)
+INTRA_PRED(3)
+INTRA_PRED(4)
+INTRA_PRED(5)
+
+#undef INTRA_PRED
+
+#endif
+
+static HEVC_INLINE void FUNC(pred_planar)(uint8_t *_src, const uint8_t *_top,
+                                  const uint8_t *_left, ptrdiff_t stride,
+                                  int trafo_size)
+{
+    int x, y;
+    pixel *src        = (pixel *)_src;
+    const pixel *top  = (const pixel *)_top;
+    const pixel *left = (const pixel *)_left;
+    int size = 1 << trafo_size;
+    for (y = 0; y < size; y++)
+        for (x = 0; x < size; x++)
+            POS(x, y) = ((size - 1 - x) * left[y] + (x + 1) * top[size]  +
+                         (size - 1 - y) * top[x]  + (y + 1) * left[size] + size) >> (trafo_size + 1);
+}
+
+#ifdef USE_FUNC_PTR
+
+#define PRED_PLANAR(size)\
+static void FUNC(pred_planar_ ## size)(uint8_t *src, const uint8_t *top,        \
+                                       const uint8_t *left, ptrdiff_t stride)   \
+{                                                                               \
+    FUNC(pred_planar)(src, top, left, stride, size + 2);                        \
+}
+
+PRED_PLANAR(0)
+PRED_PLANAR(1)
+PRED_PLANAR(2)
+PRED_PLANAR(3)
+
+#undef PRED_PLANAR
+
+#endif
+
+static void FUNC(pred_dc)(uint8_t *_src, const uint8_t *_top,
+                          const uint8_t *_left,
+                          ptrdiff_t stride, int log2_size, int c_idx)
+{
+    int i, j, x, y;
+    int size          = (1 << log2_size);
+    pixel *src        = (pixel *)_src;
+    const pixel *top  = (const pixel *)_top;
+    const pixel *left = (const pixel *)_left;
+    int dc            = size;
+    pixel4 a;
+    for (i = 0; i < size; i++)
+        dc += left[i] + top[i];
+
+    dc >>= log2_size + 1;
+
+    a = PIXEL_SPLAT_X4(dc);
+
+    for (i = 0; i < size; i++)
+        for (j = 0; j < size; j+=4)
+            AV_WN4P(&POS(j, i), a);
+
+    if (c_idx == 0 && size < 32) {
+        POS(0, 0) = (left[0] + 2 * dc + top[0] + 2) >> 2;
+        for (x = 1; x < size; x++)
+            POS(x, 0) = (top[x] + 3 * dc + 2) >> 2;
+        for (y = 1; y < size; y++)
+            POS(0, y) = (left[y] + 3 * dc + 2) >> 2;
+    }
+}
+
+static HEVC_INLINE void FUNC(pred_angular)(uint8_t *_src,
+                                           const uint8_t *_top,
+                                           const uint8_t *_left,
+                                           ptrdiff_t stride, int c_idx,
+                                           int mode, int size, 
+                                           int disable_intra_boundary_filter BIT_DEPTH_PARAM)
+{
+    int x, y;
+    pixel *src        = (pixel *)_src;
+    const pixel *top  = (const pixel *)_top;
+    const pixel *left = (const pixel *)_left;
+
+    static const int8_t intra_pred_angle[] = {
+         32,  26,  21,  17, 13,  9,  5, 2, 0, -2, -5, -9, -13, -17, -21, -26, -32,
+        -26, -21, -17, -13, -9, -5, -2, 0, 2,  5,  9, 13,  17,  21,  26,  32
+    };
+    static const int16_t inv_angle[] = {
+        -4096, -1638, -910, -630, -482, -390, -315, -256, -315, -390, -482,
+        -630, -910, -1638, -4096
+    };
+
+    int angle = intra_pred_angle[mode - 2];
+    pixel ref_array[3 * MAX_TB_SIZE + 4];
+    pixel *ref_tmp = ref_array + size;
+    const pixel *ref;
+    int last = (size * angle) >> 5;
+
+    if (mode >= 18) {
+        ref = top - 1;
+        if (angle < 0 && last < -1) {
+            for (x = 0; x <= size; x += 4)
+                AV_WN4P(&ref_tmp[x], AV_RN4P(&top[x - 1]));
+            for (x = last; x <= -1; x++)
+                ref_tmp[x] = left[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
+            ref = ref_tmp;
+        }
+
+        for (y = 0; y < size; y++) {
+            int idx  = ((y + 1) * angle) >> 5;
+            int fact = ((y + 1) * angle) & 31;
+            if (fact) {
+                for (x = 0; x < size; x += 4) {
+                    POS(x    , y) = ((32 - fact) * ref[x + idx + 1] +
+                                           fact  * ref[x + idx + 2] + 16) >> 5;
+                    POS(x + 1, y) = ((32 - fact) * ref[x + 1 + idx + 1] +
+                                           fact  * ref[x + 1 + idx + 2] + 16) >> 5;
+                    POS(x + 2, y) = ((32 - fact) * ref[x + 2 + idx + 1] +
+                                           fact  * ref[x + 2 + idx + 2] + 16) >> 5;
+                    POS(x + 3, y) = ((32 - fact) * ref[x + 3 + idx + 1] +
+                                           fact  * ref[x + 3 + idx + 2] + 16) >> 5;
+                }
+            } else {
+                for (x = 0; x < size; x += 4)
+                    AV_WN4P(&POS(x, y), AV_RN4P(&ref[x + idx + 1]));
+            }
+        }
+        if (mode == 26 && c_idx == 0 && size < 32 && 
+            !disable_intra_boundary_filter) {
+            for (y = 0; y < size; y++)
+                POS(0, y) = av_clip_pixel(top[0] + ((left[y] - left[-1]) >> 1));
+        }
+    } else {
+        ref = left - 1;
+        if (angle < 0 && last < -1) {
+            for (x = 0; x <= size; x += 4)
+                AV_WN4P(&ref_tmp[x], AV_RN4P(&left[x - 1]));
+            for (x = last; x <= -1; x++)
+                ref_tmp[x] = top[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
+            ref = ref_tmp;
+        }
+
+        for (x = 0; x < size; x++) {
+            int idx  = ((x + 1) * angle) >> 5;
+            int fact = ((x + 1) * angle) & 31;
+            if (fact) {
+                for (y = 0; y < size; y++) {
+                    POS(x, y) = ((32 - fact) * ref[y + idx + 1] +
+                                       fact  * ref[y + idx + 2] + 16) >> 5;
+                }
+            } else {
+                for (y = 0; y < size; y++)
+                    POS(x, y) = ref[y + idx + 1];
+            }
+        }
+        if (mode == 10 && c_idx == 0 && size < 32 && 
+            !disable_intra_boundary_filter) {
+            for (x = 0; x < size; x += 4) {
+                POS(x,     0) = av_clip_pixel(left[0] + ((top[x    ] - top[-1]) >> 1));
+                POS(x + 1, 0) = av_clip_pixel(left[0] + ((top[x + 1] - top[-1]) >> 1));
+                POS(x + 2, 0) = av_clip_pixel(left[0] + ((top[x + 2] - top[-1]) >> 1));
+                POS(x + 3, 0) = av_clip_pixel(left[0] + ((top[x + 3] - top[-1]) >> 1));
+            }
+        }
+    }
+}
+
+#ifdef USE_FUNC_PTR
+
+static void FUNC(pred_angular_0)(uint8_t *src, const uint8_t *top,
+                                 const uint8_t *left,
+                                 ptrdiff_t stride, int c_idx, int mode,
+                                 int disable_intra_boundary_filter BIT_DEPTH_PARAM)
+{
+    FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 2,
+                       disable_intra_boundary_filter BIT_DEPTH_ARG);
+}
+
+static void FUNC(pred_angular_1)(uint8_t *src, const uint8_t *top,
+                                 const uint8_t *left,
+                                 ptrdiff_t stride, int c_idx, int mode,
+                                 int disable_intra_boundary_filter BIT_DEPTH_PARAM)
+{
+    FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 3,
+                       disable_intra_boundary_filter BIT_DEPTH_ARG);
+}
+
+static void FUNC(pred_angular_2)(uint8_t *src, const uint8_t *top,
+                                 const uint8_t *left,
+                                 ptrdiff_t stride, int c_idx, int mode,
+                                 int disable_intra_boundary_filter BIT_DEPTH_PARAM)
+{
+    FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 4,
+                       disable_intra_boundary_filter BIT_DEPTH_ARG);
+}
+
+static void FUNC(pred_angular_3)(uint8_t *src, const uint8_t *top,
+                                 const uint8_t *left,
+                                 ptrdiff_t stride, int c_idx, int mode,
+                                 int disable_intra_boundary_filter BIT_DEPTH_PARAM)
+{
+    FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 5,
+                       disable_intra_boundary_filter BIT_DEPTH_ARG);
+}
+#endif
+
+#undef EXTEND_LEFT_CIP
+#undef EXTEND_RIGHT_CIP
+#undef EXTEND_UP_CIP
+#undef EXTEND_DOWN_CIP
+#undef IS_INTRA
+#undef MVF_PU
+#undef MVF
+#undef PU
+#undef EXTEND
+#undef MIN_TB_ADDR_ZS
+#undef POS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,267 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal api header.
+ */
+
+#ifndef AVCODEC_INTERNAL_H
+#define AVCODEC_INTERNAL_H
+
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixfmt.h"
+#include "avcodec.h"
+#include "config.h"
+
+#define FF_SANE_NB_CHANNELS 63U
+
+#if HAVE_AVX
+#   define STRIDE_ALIGN 32
+#elif HAVE_SIMD_ALIGN_16
+#   define STRIDE_ALIGN 16
+#else
+#   define STRIDE_ALIGN 8
+#endif
+
+typedef struct FramePool {
+    /**
+     * Pools for each data plane. For audio all the planes have the same size,
+     * so only pools[0] is used.
+     */
+    AVBufferPool *pools[4];
+
+    /*
+     * Pool parameters
+     */
+    int format;
+    int width, height;
+    int stride_align[AV_NUM_DATA_POINTERS];
+    int linesize[4];
+    int planes;
+    int channels;
+    int samples;
+} FramePool;
+
+typedef struct AVCodecInternal {
+    /**
+     * Whether the parent AVCodecContext is a copy of the context which had
+     * init() called on it.
+     * This is used by multithreading - shared tables and picture pointers
+     * should be freed from the original context only.
+     */
+    int is_copy;
+
+    /**
+     * Whether to allocate progress for frame threading.
+     *
+     * The codec must set it to 1 if it uses ff_thread_await/report_progress(),
+     * then progress will be allocated in ff_thread_get_buffer(). The frames
+     * then MUST be freed with ff_thread_release_buffer().
+     *
+     * If the codec does not need to call the progress functions (there are no
+     * dependencies between the frames), it should leave this at 0. Then it can
+     * decode straight to the user-provided frames (which the user will then
+     * free with av_frame_unref()), there is no need to call
+     * ff_thread_release_buffer().
+     */
+    int allocate_progress;
+
+#if FF_API_OLD_ENCODE_AUDIO
+    /**
+     * Internal sample count used by avcodec_encode_audio() to fabricate pts.
+     * Can be removed along with avcodec_encode_audio().
+     */
+    int64_t sample_count;
+#endif
+
+    /**
+     * An audio frame with less than required samples has been submitted and
+     * padded with silence. Reject all subsequent frames.
+     */
+    int last_audio_frame;
+
+    AVFrame *to_free;
+
+    FramePool *pool;
+
+    void *thread_ctx;
+
+    /**
+     * Current packet as passed into the decoder, to avoid having to pass the
+     * packet into every function.
+     */
+    AVPacket *pkt;
+
+    /**
+     * temporary buffer used for encoders to store their bitstream
+     */
+    uint8_t *byte_buffer;
+    unsigned int byte_buffer_size;
+
+    void *frame_thread_encoder;
+
+    /**
+     * Number of audio samples to skip at the start of the next decoded frame
+     */
+    int skip_samples;
+
+    /**
+     * hwaccel-specific private data
+     */
+    void *hwaccel_priv_data;
+} AVCodecInternal;
+
+struct AVCodecDefault {
+    const uint8_t *key;
+    const uint8_t *value;
+};
+
+extern const uint8_t ff_log2_run[41];
+
+/**
+ * Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
+ * If there is no such matching pair then size is returned.
+ */
+int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
+
+unsigned int avpriv_toupper4(unsigned int x);
+
+/**
+ * does needed setup of pkt_pts/pos and such for (re)get_buffer();
+ */
+int ff_init_buffer_info(AVCodecContext *s, AVFrame *frame);
+
+
+void avpriv_color_frame(AVFrame *frame, const int color[4]);
+
+extern volatile int ff_avcodec_locked;
+int ff_lock_avcodec(AVCodecContext *log_ctx);
+int ff_unlock_avcodec(void);
+
+int avpriv_lock_avformat(void);
+int avpriv_unlock_avformat(void);
+
+/**
+ * Maximum size in bytes of extradata.
+ * This value was chosen such that every bit of the buffer is
+ * addressable by a 32-bit signed integer as used by get_bits.
+ */
+#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - FF_INPUT_BUFFER_PADDING_SIZE)
+
+/**
+ * Check AVPacket size and/or allocate data.
+ *
+ * Encoders supporting AVCodec.encode2() can use this as a convenience to
+ * ensure the output packet data is large enough, whether provided by the user
+ * or allocated in this function.
+ *
+ * @param avctx   the AVCodecContext of the encoder
+ * @param avpkt   the AVPacket
+ *                If avpkt->data is already set, avpkt->size is checked
+ *                to ensure it is large enough.
+ *                If avpkt->data is NULL, a new buffer is allocated.
+ *                avpkt->size is set to the specified size.
+ *                All other AVPacket fields will be reset with av_init_packet().
+ * @param size    the minimum required packet size
+ * @return        0 on success, negative error code on failure
+ */
+int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
+
+int ff_alloc_packet(AVPacket *avpkt, int size);
+
+/**
+ * Rescale from sample rate to AVCodecContext.time_base.
+ */
+static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx,
+                                                        int64_t samples)
+{
+    if(samples == AV_NOPTS_VALUE)
+        return AV_NOPTS_VALUE;
+    return av_rescale_q(samples, (AVRational){ 1, avctx->sample_rate },
+                        avctx->time_base);
+}
+
+/**
+ * Get a buffer for a frame. This is a wrapper around
+ * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
+ * directly.
+ */
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+/**
+ * Identical in function to av_frame_make_writable(), except it uses
+ * ff_get_buffer() to allocate the buffer when needed.
+ */
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame);
+
+int ff_thread_can_start_frame(AVCodecContext *avctx);
+
+int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx);
+
+/**
+ * Call avcodec_open2 recursively by decrementing counter, unlocking mutex,
+ * calling the function and then restoring again. Assumes the mutex is
+ * already locked
+ */
+int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Finalize buf into extradata and set its size appropriately.
+ */
+int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf);
+
+const uint8_t *avpriv_find_start_code(const uint8_t *p,
+                                      const uint8_t *end,
+                                      uint32_t *state);
+
+/**
+ * Check that the provided frame dimensions are valid and set them on the codec
+ * context.
+ */
+int ff_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Check that the provided sample aspect ratio is valid and set it on the codec
+ * context.
+ */
+int ff_set_sar(AVCodecContext *avctx, AVRational sar);
+
+/**
+ * Add or update AV_FRAME_DATA_MATRIXENCODING side data.
+ */
+int ff_side_data_update_matrix_encoding(AVFrame *frame,
+                                        enum AVMatrixEncoding matrix_encoding);
+
+/**
+ * Select the (possibly hardware accelerated) pixel format.
+ * This is a wrapper around AVCodecContext.get_format() and should be used
+ * instead of calling get_format() directly.
+ */
+int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
+
+/**
+ * Set various frame properties from the codec context / packet data.
+ */
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame);
+
+#endif /* AVCODEC_INTERNAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/mathops.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,230 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVCODEC_MATHOPS_H
+#define AVCODEC_MATHOPS_H
+
+#include <stdint.h>
+
+#include "libavutil/common.h"
+#include "config.h"
+
+#define MAX_NEG_CROP 1024
+
+extern const uint32_t ff_inverse[257];
+extern const uint8_t  ff_reverse[256];
+extern const uint8_t ff_sqrt_tab[256];
+extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
+extern const uint8_t ff_zigzag_direct[64];
+
+#if   ARCH_ARM
+#   include "arm/mathops.h"
+#elif ARCH_AVR32
+#   include "avr32/mathops.h"
+#elif ARCH_MIPS
+#   include "mips/mathops.h"
+#elif ARCH_PPC
+#   include "ppc/mathops.h"
+#elif ARCH_X86
+#   include "x86/mathops.h"
+#endif
+
+/* generic implementation */
+
+#ifndef MUL64
+#   define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
+#endif
+
+#ifndef MULL
+#   define MULL(a,b,s) (MUL64(a, b) >> (s))
+#endif
+
+#ifndef MULH
+static av_always_inline int MULH(int a, int b){
+    return MUL64(a, b) >> 32;
+}
+#endif
+
+#ifndef UMULH
+static av_always_inline unsigned UMULH(unsigned a, unsigned b){
+    return ((uint64_t)(a) * (uint64_t)(b))>>32;
+}
+#endif
+
+#ifndef MAC64
+#   define MAC64(d, a, b) ((d) += MUL64(a, b))
+#endif
+
+#ifndef MLS64
+#   define MLS64(d, a, b) ((d) -= MUL64(a, b))
+#endif
+
+/* signed 16x16 -> 32 multiply add accumulate */
+#ifndef MAC16
+#   define MAC16(rt, ra, rb) rt += (ra) * (rb)
+#endif
+
+/* signed 16x16 -> 32 multiply */
+#ifndef MUL16
+#   define MUL16(ra, rb) ((ra) * (rb))
+#endif
+
+#ifndef MLS16
+#   define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
+#endif
+
+/* median of 3 */
+#ifndef mid_pred
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+#if 0
+    int t= (a-b)&((a-b)>>31);
+    a-=t;
+    b+=t;
+    b-= (b-c)&((b-c)>>31);
+    b+= (a-b)&((a-b)>>31);
+
+    return b;
+#else
+    if(a>b){
+        if(c>b){
+            if(c>a) b=a;
+            else    b=c;
+        }
+    }else{
+        if(b>c){
+            if(c>a) b=c;
+            else    b=a;
+        }
+    }
+    return b;
+#endif
+}
+#endif
+
+#ifndef sign_extend
+static inline av_const int sign_extend(int val, unsigned bits)
+{
+    unsigned shift = 8 * sizeof(int) - bits;
+    union { unsigned u; int s; } v = { (unsigned) val << shift };
+    return v.s >> shift;
+}
+#endif
+
+#ifndef zero_extend
+static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
+{
+    return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
+}
+#endif
+
+#ifndef COPY3_IF_LT
+#define COPY3_IF_LT(x, y, a, b, c, d)\
+if ((y) < (x)) {\
+    (x) = (y);\
+    (a) = (b);\
+    (c) = (d);\
+}
+#endif
+
+#ifndef MASK_ABS
+#define MASK_ABS(mask, level) do {              \
+        mask  = level >> 31;                    \
+        level = (level ^ mask) - mask;          \
+    } while (0)
+#endif
+
+#ifndef NEG_SSR32
+#   define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
+#endif
+
+#ifndef NEG_USR32
+#   define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
+#endif
+
+#if HAVE_BIGENDIAN
+# ifndef PACK_2U8
+#   define PACK_2U8(a,b)     (((a) <<  8) | (b))
+# endif
+# ifndef PACK_4U8
+#   define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
+# endif
+# ifndef PACK_2U16
+#   define PACK_2U16(a,b)    (((a) << 16) | (b))
+# endif
+#else
+# ifndef PACK_2U8
+#   define PACK_2U8(a,b)     (((b) <<  8) | (a))
+# endif
+# ifndef PACK_4U2
+#   define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
+# endif
+# ifndef PACK_2U16
+#   define PACK_2U16(a,b)    (((b) << 16) | (a))
+# endif
+#endif
+
+#ifndef PACK_2S8
+#   define PACK_2S8(a,b)     PACK_2U8((a)&255, (b)&255)
+#endif
+#ifndef PACK_4S8
+#   define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
+#endif
+#ifndef PACK_2S16
+#   define PACK_2S16(a,b)    PACK_2U16((a)&0xffff, (b)&0xffff)
+#endif
+
+#ifndef FASTDIV
+#   define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
+#endif /* FASTDIV */
+
+static inline av_const unsigned int ff_sqrt(unsigned int a)
+{
+    unsigned int b;
+
+    if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
+    else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
+#if !CONFIG_SMALL
+    else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
+    else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8]   ;
+#endif
+    else {
+        int s = av_log2_16bit(a >> 16) >> 1;
+        unsigned int c = a >> (s + 2);
+        b = ff_sqrt_tab[c >> (s + 8)];
+        b = FASTDIV(c,b) + (b << s);
+    }
+
+    return b - (a < b * b);
+}
+
+static inline int8_t ff_u8_to_s8(uint8_t a)
+{
+    union {
+        uint8_t u8;
+        int8_t  s8;
+    } b;
+    b.u8 = a;
+    return b.s8;
+}
+
+#endif /* AVCODEC_MATHOPS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/old_codec_ids.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,397 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_OLD_CODEC_IDS_H
+#define AVCODEC_OLD_CODEC_IDS_H
+
+/*
+ * This header exists to prevent new codec IDs from being accidentally added to
+ * the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVCodecID enum instead.
+ */
+
+    CODEC_ID_NONE = AV_CODEC_ID_NONE,
+
+    /* video codecs */
+    CODEC_ID_MPEG1VIDEO,
+    CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+    CODEC_ID_MPEG2VIDEO_XVMC,
+#endif
+    CODEC_ID_H261,
+    CODEC_ID_H263,
+    CODEC_ID_RV10,
+    CODEC_ID_RV20,
+    CODEC_ID_MJPEG,
+    CODEC_ID_MJPEGB,
+    CODEC_ID_LJPEG,
+    CODEC_ID_SP5X,
+    CODEC_ID_JPEGLS,
+    CODEC_ID_MPEG4,
+    CODEC_ID_RAWVIDEO,
+    CODEC_ID_MSMPEG4V1,
+    CODEC_ID_MSMPEG4V2,
+    CODEC_ID_MSMPEG4V3,
+    CODEC_ID_WMV1,
+    CODEC_ID_WMV2,
+    CODEC_ID_H263P,
+    CODEC_ID_H263I,
+    CODEC_ID_FLV1,
+    CODEC_ID_SVQ1,
+    CODEC_ID_SVQ3,
+    CODEC_ID_DVVIDEO,
+    CODEC_ID_HUFFYUV,
+    CODEC_ID_CYUV,
+    CODEC_ID_H264,
+    CODEC_ID_INDEO3,
+    CODEC_ID_VP3,
+    CODEC_ID_THEORA,
+    CODEC_ID_ASV1,
+    CODEC_ID_ASV2,
+    CODEC_ID_FFV1,
+    CODEC_ID_4XM,
+    CODEC_ID_VCR1,
+    CODEC_ID_CLJR,
+    CODEC_ID_MDEC,
+    CODEC_ID_ROQ,
+    CODEC_ID_INTERPLAY_VIDEO,
+    CODEC_ID_XAN_WC3,
+    CODEC_ID_XAN_WC4,
+    CODEC_ID_RPZA,
+    CODEC_ID_CINEPAK,
+    CODEC_ID_WS_VQA,
+    CODEC_ID_MSRLE,
+    CODEC_ID_MSVIDEO1,
+    CODEC_ID_IDCIN,
+    CODEC_ID_8BPS,
+    CODEC_ID_SMC,
+    CODEC_ID_FLIC,
+    CODEC_ID_TRUEMOTION1,
+    CODEC_ID_VMDVIDEO,
+    CODEC_ID_MSZH,
+    CODEC_ID_ZLIB,
+    CODEC_ID_QTRLE,
+    CODEC_ID_TSCC,
+    CODEC_ID_ULTI,
+    CODEC_ID_QDRAW,
+    CODEC_ID_VIXL,
+    CODEC_ID_QPEG,
+    CODEC_ID_PNG,
+    CODEC_ID_PPM,
+    CODEC_ID_PBM,
+    CODEC_ID_PGM,
+    CODEC_ID_PGMYUV,
+    CODEC_ID_PAM,
+    CODEC_ID_FFVHUFF,
+    CODEC_ID_RV30,
+    CODEC_ID_RV40,
+    CODEC_ID_VC1,
+    CODEC_ID_WMV3,
+    CODEC_ID_LOCO,
+    CODEC_ID_WNV1,
+    CODEC_ID_AASC,
+    CODEC_ID_INDEO2,
+    CODEC_ID_FRAPS,
+    CODEC_ID_TRUEMOTION2,
+    CODEC_ID_BMP,
+    CODEC_ID_CSCD,
+    CODEC_ID_MMVIDEO,
+    CODEC_ID_ZMBV,
+    CODEC_ID_AVS,
+    CODEC_ID_SMACKVIDEO,
+    CODEC_ID_NUV,
+    CODEC_ID_KMVC,
+    CODEC_ID_FLASHSV,
+    CODEC_ID_CAVS,
+    CODEC_ID_JPEG2000,
+    CODEC_ID_VMNC,
+    CODEC_ID_VP5,
+    CODEC_ID_VP6,
+    CODEC_ID_VP6F,
+    CODEC_ID_TARGA,
+    CODEC_ID_DSICINVIDEO,
+    CODEC_ID_TIERTEXSEQVIDEO,
+    CODEC_ID_TIFF,
+    CODEC_ID_GIF,
+    CODEC_ID_DXA,
+    CODEC_ID_DNXHD,
+    CODEC_ID_THP,
+    CODEC_ID_SGI,
+    CODEC_ID_C93,
+    CODEC_ID_BETHSOFTVID,
+    CODEC_ID_PTX,
+    CODEC_ID_TXD,
+    CODEC_ID_VP6A,
+    CODEC_ID_AMV,
+    CODEC_ID_VB,
+    CODEC_ID_PCX,
+    CODEC_ID_SUNRAST,
+    CODEC_ID_INDEO4,
+    CODEC_ID_INDEO5,
+    CODEC_ID_MIMIC,
+    CODEC_ID_RL2,
+    CODEC_ID_ESCAPE124,
+    CODEC_ID_DIRAC,
+    CODEC_ID_BFI,
+    CODEC_ID_CMV,
+    CODEC_ID_MOTIONPIXELS,
+    CODEC_ID_TGV,
+    CODEC_ID_TGQ,
+    CODEC_ID_TQI,
+    CODEC_ID_AURA,
+    CODEC_ID_AURA2,
+    CODEC_ID_V210X,
+    CODEC_ID_TMV,
+    CODEC_ID_V210,
+    CODEC_ID_DPX,
+    CODEC_ID_MAD,
+    CODEC_ID_FRWU,
+    CODEC_ID_FLASHSV2,
+    CODEC_ID_CDGRAPHICS,
+    CODEC_ID_R210,
+    CODEC_ID_ANM,
+    CODEC_ID_BINKVIDEO,
+    CODEC_ID_IFF_ILBM,
+    CODEC_ID_IFF_BYTERUN1,
+    CODEC_ID_KGV1,
+    CODEC_ID_YOP,
+    CODEC_ID_VP8,
+    CODEC_ID_PICTOR,
+    CODEC_ID_ANSI,
+    CODEC_ID_A64_MULTI,
+    CODEC_ID_A64_MULTI5,
+    CODEC_ID_R10K,
+    CODEC_ID_MXPEG,
+    CODEC_ID_LAGARITH,
+    CODEC_ID_PRORES,
+    CODEC_ID_JV,
+    CODEC_ID_DFA,
+    CODEC_ID_WMV3IMAGE,
+    CODEC_ID_VC1IMAGE,
+    CODEC_ID_UTVIDEO,
+    CODEC_ID_BMV_VIDEO,
+    CODEC_ID_VBLE,
+    CODEC_ID_DXTORY,
+    CODEC_ID_V410,
+    CODEC_ID_XWD,
+    CODEC_ID_CDXL,
+    CODEC_ID_XBM,
+    CODEC_ID_ZEROCODEC,
+    CODEC_ID_MSS1,
+    CODEC_ID_MSA1,
+    CODEC_ID_TSCC2,
+    CODEC_ID_MTS2,
+    CODEC_ID_CLLC,
+    CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),
+    CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),
+    CODEC_ID_EXR        = MKBETAG('0','E','X','R'),
+    CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),
+
+    CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),
+    CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),
+    CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),
+    CODEC_ID_V308       = MKBETAG('V','3','0','8'),
+    CODEC_ID_V408       = MKBETAG('V','4','0','8'),
+    CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),
+    CODEC_ID_SANM       = MKBETAG('S','A','N','M'),
+    CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),
+    CODEC_ID_SNOW       = AV_CODEC_ID_SNOW,
+
+    /* various PCM "codecs" */
+    CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs
+    CODEC_ID_PCM_S16LE = 0x10000,
+    CODEC_ID_PCM_S16BE,
+    CODEC_ID_PCM_U16LE,
+    CODEC_ID_PCM_U16BE,
+    CODEC_ID_PCM_S8,
+    CODEC_ID_PCM_U8,
+    CODEC_ID_PCM_MULAW,
+    CODEC_ID_PCM_ALAW,
+    CODEC_ID_PCM_S32LE,
+    CODEC_ID_PCM_S32BE,
+    CODEC_ID_PCM_U32LE,
+    CODEC_ID_PCM_U32BE,
+    CODEC_ID_PCM_S24LE,
+    CODEC_ID_PCM_S24BE,
+    CODEC_ID_PCM_U24LE,
+    CODEC_ID_PCM_U24BE,
+    CODEC_ID_PCM_S24DAUD,
+    CODEC_ID_PCM_ZORK,
+    CODEC_ID_PCM_S16LE_PLANAR,
+    CODEC_ID_PCM_DVD,
+    CODEC_ID_PCM_F32BE,
+    CODEC_ID_PCM_F32LE,
+    CODEC_ID_PCM_F64BE,
+    CODEC_ID_PCM_F64LE,
+    CODEC_ID_PCM_BLURAY,
+    CODEC_ID_PCM_LXF,
+    CODEC_ID_S302M,
+    CODEC_ID_PCM_S8_PLANAR,
+
+    /* various ADPCM codecs */
+    CODEC_ID_ADPCM_IMA_QT = 0x11000,
+    CODEC_ID_ADPCM_IMA_WAV,
+    CODEC_ID_ADPCM_IMA_DK3,
+    CODEC_ID_ADPCM_IMA_DK4,
+    CODEC_ID_ADPCM_IMA_WS,
+    CODEC_ID_ADPCM_IMA_SMJPEG,
+    CODEC_ID_ADPCM_MS,
+    CODEC_ID_ADPCM_4XM,
+    CODEC_ID_ADPCM_XA,
+    CODEC_ID_ADPCM_ADX,
+    CODEC_ID_ADPCM_EA,
+    CODEC_ID_ADPCM_G726,
+    CODEC_ID_ADPCM_CT,
+    CODEC_ID_ADPCM_SWF,
+    CODEC_ID_ADPCM_YAMAHA,
+    CODEC_ID_ADPCM_SBPRO_4,
+    CODEC_ID_ADPCM_SBPRO_3,
+    CODEC_ID_ADPCM_SBPRO_2,
+    CODEC_ID_ADPCM_THP,
+    CODEC_ID_ADPCM_IMA_AMV,
+    CODEC_ID_ADPCM_EA_R1,
+    CODEC_ID_ADPCM_EA_R3,
+    CODEC_ID_ADPCM_EA_R2,
+    CODEC_ID_ADPCM_IMA_EA_SEAD,
+    CODEC_ID_ADPCM_IMA_EA_EACS,
+    CODEC_ID_ADPCM_EA_XAS,
+    CODEC_ID_ADPCM_EA_MAXIS_XA,
+    CODEC_ID_ADPCM_IMA_ISS,
+    CODEC_ID_ADPCM_G722,
+    CODEC_ID_ADPCM_IMA_APC,
+    CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),
+
+    /* AMR */
+    CODEC_ID_AMR_NB = 0x12000,
+    CODEC_ID_AMR_WB,
+
+    /* RealAudio codecs*/
+    CODEC_ID_RA_144 = 0x13000,
+    CODEC_ID_RA_288,
+
+    /* various DPCM codecs */
+    CODEC_ID_ROQ_DPCM = 0x14000,
+    CODEC_ID_INTERPLAY_DPCM,
+    CODEC_ID_XAN_DPCM,
+    CODEC_ID_SOL_DPCM,
+
+    /* audio codecs */
+    CODEC_ID_MP2 = 0x15000,
+    CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+    CODEC_ID_AAC,
+    CODEC_ID_AC3,
+    CODEC_ID_DTS,
+    CODEC_ID_VORBIS,
+    CODEC_ID_DVAUDIO,
+    CODEC_ID_WMAV1,
+    CODEC_ID_WMAV2,
+    CODEC_ID_MACE3,
+    CODEC_ID_MACE6,
+    CODEC_ID_VMDAUDIO,
+    CODEC_ID_FLAC,
+    CODEC_ID_MP3ADU,
+    CODEC_ID_MP3ON4,
+    CODEC_ID_SHORTEN,
+    CODEC_ID_ALAC,
+    CODEC_ID_WESTWOOD_SND1,
+    CODEC_ID_GSM, ///< as in Berlin toast format
+    CODEC_ID_QDM2,
+    CODEC_ID_COOK,
+    CODEC_ID_TRUESPEECH,
+    CODEC_ID_TTA,
+    CODEC_ID_SMACKAUDIO,
+    CODEC_ID_QCELP,
+    CODEC_ID_WAVPACK,
+    CODEC_ID_DSICINAUDIO,
+    CODEC_ID_IMC,
+    CODEC_ID_MUSEPACK7,
+    CODEC_ID_MLP,
+    CODEC_ID_GSM_MS, /* as found in WAV */
+    CODEC_ID_ATRAC3,
+    CODEC_ID_VOXWARE,
+    CODEC_ID_APE,
+    CODEC_ID_NELLYMOSER,
+    CODEC_ID_MUSEPACK8,
+    CODEC_ID_SPEEX,
+    CODEC_ID_WMAVOICE,
+    CODEC_ID_WMAPRO,
+    CODEC_ID_WMALOSSLESS,
+    CODEC_ID_ATRAC3P,
+    CODEC_ID_EAC3,
+    CODEC_ID_SIPR,
+    CODEC_ID_MP1,
+    CODEC_ID_TWINVQ,
+    CODEC_ID_TRUEHD,
+    CODEC_ID_MP4ALS,
+    CODEC_ID_ATRAC1,
+    CODEC_ID_BINKAUDIO_RDFT,
+    CODEC_ID_BINKAUDIO_DCT,
+    CODEC_ID_AAC_LATM,
+    CODEC_ID_QDMC,
+    CODEC_ID_CELT,
+    CODEC_ID_G723_1,
+    CODEC_ID_G729,
+    CODEC_ID_8SVX_EXP,
+    CODEC_ID_8SVX_FIB,
+    CODEC_ID_BMV_AUDIO,
+    CODEC_ID_RALF,
+    CODEC_ID_IAC,
+    CODEC_ID_ILBC,
+    CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
+    CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),
+    CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),
+    CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),
+    CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),
+
+    /* subtitle codecs */
+    CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.
+    CODEC_ID_DVD_SUBTITLE = 0x17000,
+    CODEC_ID_DVB_SUBTITLE,
+    CODEC_ID_TEXT,  ///< raw UTF-8 text
+    CODEC_ID_XSUB,
+    CODEC_ID_SSA,
+    CODEC_ID_MOV_TEXT,
+    CODEC_ID_HDMV_PGS_SUBTITLE,
+    CODEC_ID_DVB_TELETEXT,
+    CODEC_ID_SRT,
+    CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),
+    CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),
+    CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),
+    CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),
+    CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),
+    CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),
+
+    /* other specific kind of codecs (generally used for attachments) */
+    CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.
+    CODEC_ID_TTF = 0x18000,
+    CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),
+    CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),
+    CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),
+    CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),
+
+    CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
+
+    CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+                                * stream (only used by libavformat) */
+    CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+                                * stream (only used by libavformat) */
+    CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.
+
+#endif /* AVCODEC_OLD_CODEC_IDS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/put_bits.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,258 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bitstream writer API
+ */
+
+#ifndef AVCODEC_PUT_BITS_H
+#define AVCODEC_PUT_BITS_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <assert.h>
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+
+typedef struct PutBitContext {
+    uint32_t bit_buf;
+    int bit_left;
+    uint8_t *buf, *buf_ptr, *buf_end;
+    int size_in_bits;
+} PutBitContext;
+
+/**
+ * Initialize the PutBitContext s.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer
+ */
+static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
+                                 int buffer_size)
+{
+    if (buffer_size < 0) {
+        buffer_size = 0;
+        buffer      = NULL;
+    }
+
+    s->size_in_bits = 8 * buffer_size;
+    s->buf          = buffer;
+    s->buf_end      = s->buf + buffer_size;
+    s->buf_ptr      = s->buf;
+    s->bit_left     = 32;
+    s->bit_buf      = 0;
+}
+
+/**
+ * Rebase the bit writer onto a reallocated buffer.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer,
+ *                    must be larger than the previous size
+ */
+static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
+                                   int buffer_size)
+{
+    av_assert0(8*buffer_size > s->size_in_bits);
+
+    s->buf_end = buffer + buffer_size;
+    s->buf_ptr = buffer + (s->buf_ptr - s->buf);
+    s->buf     = buffer;
+    s->size_in_bits = 8 * buffer_size;
+}
+
+/**
+ * @return the total number of bits written to the bitstream.
+ */
+static inline int put_bits_count(PutBitContext *s)
+{
+    return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
+}
+
+/**
+ * @return the number of bits available in the bitstream.
+ */
+static inline int put_bits_left(PutBitContext* s)
+{
+    return (s->buf_end - s->buf_ptr) * 8 - 32 + s->bit_left;
+}
+
+/**
+ * Pad the end of the output stream with zeros.
+ */
+static inline void flush_put_bits(PutBitContext *s)
+{
+#ifndef BITSTREAM_WRITER_LE
+    if (s->bit_left < 32)
+        s->bit_buf <<= s->bit_left;
+#endif
+    while (s->bit_left < 32) {
+        /* XXX: should test end of buffer */
+#ifdef BITSTREAM_WRITER_LE
+        *s->buf_ptr++ = s->bit_buf;
+        s->bit_buf  >>= 8;
+#else
+        *s->buf_ptr++ = s->bit_buf >> 24;
+        s->bit_buf  <<= 8;
+#endif
+        s->bit_left  += 8;
+    }
+    s->bit_left = 32;
+    s->bit_buf  = 0;
+}
+
+#ifdef BITSTREAM_WRITER_LE
+#define avpriv_align_put_bits align_put_bits_unsupported_here
+#define avpriv_put_string ff_put_string_unsupported_here
+#define avpriv_copy_bits avpriv_copy_bits_unsupported_here
+#else
+/**
+ * Pad the bitstream with zeros up to the next byte boundary.
+ */
+void avpriv_align_put_bits(PutBitContext *s);
+
+/**
+ * Put the string string in the bitstream.
+ *
+ * @param terminate_string 0-terminates the written string if value is 1
+ */
+void avpriv_put_string(PutBitContext *pb, const char *string,
+                       int terminate_string);
+
+/**
+ * Copy the content of src to the bitstream.
+ *
+ * @param length the number of bits of src to copy
+ */
+void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
+#endif
+
+/**
+ * Write up to 31 bits into a bitstream.
+ * Use put_bits32 to write 32 bits.
+ */
+static inline void put_bits(PutBitContext *s, int n, unsigned int value)
+{
+    unsigned int bit_buf;
+    int bit_left;
+
+    av_assert2(n <= 31 && value < (1U << n));
+
+    bit_buf  = s->bit_buf;
+    bit_left = s->bit_left;
+
+    /* XXX: optimize */
+#ifdef BITSTREAM_WRITER_LE
+    bit_buf |= value << (32 - bit_left);
+    if (n >= bit_left) {
+        av_assert2(s->buf_ptr+3<s->buf_end);
+        AV_WL32(s->buf_ptr, bit_buf);
+        s->buf_ptr += 4;
+        bit_buf     = (bit_left == 32) ? 0 : value >> bit_left;
+        bit_left   += 32;
+    }
+    bit_left -= n;
+#else
+    if (n < bit_left) {
+        bit_buf     = (bit_buf << n) | value;
+        bit_left   -= n;
+    } else {
+        bit_buf   <<= bit_left;
+        bit_buf    |= value >> (n - bit_left);
+        av_assert2(s->buf_ptr+3<s->buf_end);
+        AV_WB32(s->buf_ptr, bit_buf);
+        s->buf_ptr += 4;
+        bit_left   += 32 - n;
+        bit_buf     = value;
+    }
+#endif
+
+    s->bit_buf  = bit_buf;
+    s->bit_left = bit_left;
+}
+
+static inline void put_sbits(PutBitContext *pb, int n, int32_t value)
+{
+    av_assert2(n >= 0 && n <= 31);
+
+    put_bits(pb, n, value & ((1 << n) - 1));
+}
+
+/**
+ * Write exactly 32 bits into a bitstream.
+ */
+static void av_unused put_bits32(PutBitContext *s, uint32_t value)
+{
+    int lo = value & 0xffff;
+    int hi = value >> 16;
+#ifdef BITSTREAM_WRITER_LE
+    put_bits(s, 16, lo);
+    put_bits(s, 16, hi);
+#else
+    put_bits(s, 16, hi);
+    put_bits(s, 16, lo);
+#endif
+}
+
+/**
+ * Return the pointer to the byte where the bitstream writer will put
+ * the next bit.
+ */
+static inline uint8_t *put_bits_ptr(PutBitContext *s)
+{
+    return s->buf_ptr;
+}
+
+/**
+ * Skip the given number of bytes.
+ * PutBitContext must be flushed & aligned to a byte boundary before calling this.
+ */
+static inline void skip_put_bytes(PutBitContext *s, int n)
+{
+    av_assert2((put_bits_count(s) & 7) == 0);
+    av_assert2(s->bit_left == 32);
+    s->buf_ptr += n;
+}
+
+/**
+ * Skip the given number of bits.
+ * Must only be used if the actual values in the bitstream do not matter.
+ * If n is 0 the behavior is undefined.
+ */
+static inline void skip_put_bits(PutBitContext *s, int n)
+{
+    s->bit_left -= n;
+    s->buf_ptr  -= 4 * (s->bit_left >> 5);
+    s->bit_left &= 31;
+}
+
+/**
+ * Change the end of the buffer.
+ *
+ * @param size the new size in bytes of the buffer where to put bits
+ */
+static inline void set_put_bits_buffer_size(PutBitContext *s, int size)
+{
+    s->buf_end = s->buf + size;
+}
+
+#endif /* AVCODEC_PUT_BITS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/rnd_avg.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
+ * Copyright (c) 2011 Oskar Arvidsson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_RND_AVG_H
+#define AVCODEC_RND_AVG_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define BYTE_VEC32(c) ((c) * 0x01010101UL)
+#define BYTE_VEC64(c) ((c) * 0x0001000100010001UL)
+
+static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
+{
+    return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
+{
+    return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline uint64_t rnd_avg64(uint64_t a, uint64_t b)
+{
+    return (a | b) - (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
+}
+
+static inline uint64_t no_rnd_avg64(uint64_t a, uint64_t b)
+{
+    return (a & b) + (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
+}
+
+#endif /* AVCODEC_RND_AVG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/thread.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2008 Alexander Strange <astrange@ithinksw.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Multithreading support functions
+ * @author Alexander Strange <astrange@ithinksw.com>
+ */
+
+#ifndef AVCODEC_THREAD_H
+#define AVCODEC_THREAD_H
+
+#include "libavutil/buffer.h"
+
+#include "config.h"
+#include "avcodec.h"
+
+typedef struct ThreadFrame {
+    AVFrame *f;
+    AVCodecContext *owner;
+    // progress->data is an array of 2 ints holding progress for top/bottom
+    // fields
+    AVBufferRef *progress;
+} ThreadFrame;
+
+/**
+ * Wait for decoding threads to finish and reset internal state.
+ * Called by avcodec_flush_buffers().
+ *
+ * @param avctx The context.
+ */
+void ff_thread_flush(AVCodecContext *avctx);
+
+/**
+ * Submit a new frame to a decoding thread.
+ * Returns the next available frame in picture. *got_picture_ptr
+ * will be 0 if none is available.
+ * The return value on success is the size of the consumed packet for
+ * compatibility with avcodec_decode_video2(). This means the decoder
+ * has to consume the full packet.
+ *
+ * Parameters are the same as avcodec_decode_video2().
+ */
+int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
+                           int *got_picture_ptr, AVPacket *avpkt);
+
+/**
+ * If the codec defines update_thread_context(), call this
+ * when they are ready for the next thread to start decoding
+ * the next frame. After calling it, do not change any variables
+ * read by the update_thread_context() method, or call ff_thread_get_buffer().
+ *
+ * @param avctx The context.
+ */
+void ff_thread_finish_setup(AVCodecContext *avctx);
+
+/**
+ * Notify later decoding threads when part of their reference picture is ready.
+ * Call this when some part of the picture is finished decoding.
+ * Later calls with lower values of progress have no effect.
+ *
+ * @param f The picture being decoded.
+ * @param progress Value, in arbitrary units, of how much of the picture has decoded.
+ * @param field The field being decoded, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
+
+/**
+ * Wait for earlier decoding threads to finish reference pictures.
+ * Call this before accessing some part of a picture, with a given
+ * value for progress, and it will return after the responsible decoding
+ * thread calls ff_thread_report_progress() with the same or
+ * higher value for progress.
+ *
+ * @param f The picture being referenced.
+ * @param progress Value, in arbitrary units, to wait for.
+ * @param field The field being referenced, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_await_progress(ThreadFrame *f, int progress, int field);
+
+/**
+ * Wrapper around get_format() for frame-multithreaded codecs.
+ * Call this function instead of avctx->get_format().
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param fmt The list of available formats.
+ */
+enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
+
+/**
+ * Wrapper around get_buffer() for frame-multithreaded codecs.
+ * Call this function instead of ff_get_buffer(f).
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param f The frame to write into.
+ */
+int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+
+/**
+ * Wrapper around release_buffer() frame-for multithreaded codecs.
+ * Call this function instead of avctx->release_buffer(f).
+ * The AVFrame will be copied and the actual release_buffer() call
+ * will be performed later. The contents of data pointed to by the
+ * AVFrame should not be changed until ff_thread_get_buffer() is called
+ * on it.
+ *
+ * @param avctx The current context.
+ * @param f The picture being released.
+ */
+void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
+
+int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src);
+
+int ff_thread_init(AVCodecContext *s);
+void ff_thread_free(AVCodecContext *s);
+
+int ff_alloc_entries(AVCodecContext *avctx, int count);
+void ff_reset_entries(AVCodecContext *avctx);
+void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n);
+void ff_thread_await_progress2(AVCodecContext *avctx,  int field, int thread, int shift);
+
+#endif /* AVCODEC_THREAD_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/utils.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,369 @@
+/*
+ * utils for libavcodec
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * utils.
+ */
+
+#include "config.h"
+#include "libavutil/atomic.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/crc.h"
+#include "libavutil/frame.h"
+#include "libavutil/internal.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/dict.h"
+#include "avcodec.h"
+#include "libavutil/opt.h"
+#include "thread.h"
+#include "internal.h"
+#include "bytestream.h"
+#include "version.h"
+#include <stdlib.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <float.h>
+#if CONFIG_ICONV
+# include <iconv.h>
+#endif
+
+void avcodec_register_all(void)
+{
+}
+
+int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
+{
+    int ret = 0;
+
+    if (codec->priv_data_size > 0) {
+        if (!avctx->priv_data) {
+            avctx->priv_data = av_mallocz(codec->priv_data_size);
+            if (!avctx->priv_data) {
+                ret = AVERROR(ENOMEM);
+                goto end;
+            }
+        }
+    } else {
+        avctx->priv_data = NULL;
+    }
+    avctx->codec = codec;
+    avctx->frame_number = 0;
+    //    avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
+
+    avctx->thread_count = 1;
+
+    avctx->pts_correction_num_faulty_pts =
+    avctx->pts_correction_num_faulty_dts = 0;
+    avctx->pts_correction_last_pts =
+    avctx->pts_correction_last_dts = INT64_MIN;
+
+    ret = avctx->codec->init(avctx);
+    if (ret < 0) {
+        goto free_and_end;
+    }
+
+    return 0;
+free_and_end:
+    av_freep(&avctx->priv_data);
+    avctx->codec = NULL;
+ end:
+    return ret;
+}
+
+av_cold int avcodec_close(AVCodecContext *avctx)
+{
+    if (!avctx)
+        return 0;
+
+    if (avctx->codec && avctx->codec->close)
+        avctx->codec->close(avctx);
+    avctx->coded_frame = NULL;
+
+    av_freep(&avctx->priv_data);
+    avctx->codec = NULL;
+    avctx->active_thread_type = 0;
+
+    return 0;
+}
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
+{
+    int i;
+
+    for (i = 0; i < count; i++) {
+        int r = func(c, (char *)arg + i * size);
+        if (ret)
+            ret[i] = r;
+    }
+    return 0;
+}
+
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
+{
+    int i;
+
+    for (i = 0; i < count; i++) {
+        int r = func(c, arg, i, 0);
+        if (ret)
+            ret[i] = r;
+    }
+    return 0;
+}
+
+int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+    int i, h, linesize, bpp;
+
+    for (i = 0; i < desc->nb_components; i++) {
+        bpp = (desc->comp[i].depth_minus1 + 8) >> 3;
+        linesize = FFALIGN(frame->width * bpp, 32);
+        if (i == 1 || i == 2)
+            linesize = FF_CEIL_RSHIFT(linesize, desc->log2_chroma_w);
+
+        frame->linesize[i] = linesize;
+
+        h = FFALIGN(frame->height, 32);
+        if (i == 1 || i == 2)
+            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
+        
+        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 32);
+        if (!frame->buf[i])
+            goto fail;
+
+        frame->data[i] = frame->buf[i]->data;
+    }
+    return 0;
+ fail:
+    return -1;
+}
+
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec)
+{
+    memset(s, 0, sizeof(AVCodecContext));
+
+    s->codec_type = codec ? codec->type : AVMEDIA_TYPE_UNKNOWN;
+    if (codec)
+        s->codec_id = codec->id;
+
+    s->time_base           = (AVRational){0,1};
+    s->framerate           = (AVRational){ 0, 1 };
+    s->pkt_timebase        = (AVRational){ 0, 1 };
+    s->get_buffer2         = avcodec_default_get_buffer2;
+    //    s->get_format          = avcodec_default_get_format;
+    s->execute             = avcodec_default_execute;
+    s->execute2            = avcodec_default_execute2;
+    s->sample_aspect_ratio = (AVRational){0,1};
+    s->pix_fmt             = AV_PIX_FMT_NONE;
+    s->sample_fmt          = AV_SAMPLE_FMT_NONE;
+
+    s->reordered_opaque    = AV_NOPTS_VALUE;
+    if(codec && codec->priv_data_size){
+        if(!s->priv_data){
+            s->priv_data= av_mallocz(codec->priv_data_size);
+            if (!s->priv_data) {
+                return AVERROR(ENOMEM);
+            }
+        }
+    }
+    return 0;
+}
+
+int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
+{
+    return 0;
+}
+
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
+{
+    AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));
+
+    if (!avctx)
+        return NULL;
+
+    if(avcodec_get_context_defaults3(avctx, codec) < 0){
+        av_free(avctx);
+        return NULL;
+    }
+
+    return avctx;
+}
+
+int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+                                              int *got_picture_ptr,
+                                              const AVPacket *avpkt)
+{
+    int ret;
+    // copy to ensure we do not change avpkt
+    AVPacket tmp = *avpkt;
+
+    if (!avctx->codec)
+        return AVERROR(EINVAL);
+    if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) {
+        av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n");
+        return AVERROR(EINVAL);
+    }
+
+    *got_picture_ptr = 0;
+    if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx))
+        return AVERROR(EINVAL);
+
+    av_frame_unref(picture);
+
+    if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
+
+        ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
+                                   &tmp);
+
+        if (*got_picture_ptr) {
+            avctx->frame_number++;
+        } else {
+            av_frame_unref(picture);
+        }
+    } else {
+        ret = 0;
+    }
+
+    /* many decoders assign whole AVFrames, thus overwriting extended_data;
+     * make sure it's set correctly */
+    //    av_assert0(!picture->extended_data || picture->extended_data == picture->data);
+
+    return ret;
+}
+
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
+{
+    if ((int)w>0 && (int)h>0 && (w+128) < (INT_MAX/8) / (h + 128))
+        return 0;
+    else
+        return AVERROR(EINVAL);
+}
+
+int ff_set_sar(AVCodecContext *avctx, AVRational sar)
+{
+    return 0;
+}
+
+static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+    int override_dimensions = 1;
+    int ret;
+
+    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+        if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0 || avctx->pix_fmt<0) {
+            av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
+            return AVERROR(EINVAL);
+        }
+    }
+    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+        if (frame->width <= 0 || frame->height <= 0) {
+            frame->width  = FFMAX(avctx->width,  FF_CEIL_RSHIFT(avctx->coded_width,  avctx->lowres));
+            frame->height = FFMAX(avctx->height, FF_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
+            override_dimensions = 0;
+        }
+        frame->format              = avctx->pix_fmt;
+    }
+    ret = avctx->get_buffer2(avctx, frame, flags);
+
+    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions) {
+        frame->width  = avctx->width;
+        frame->height = avctx->height;
+    }
+
+    return ret;
+}
+
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+    int ret = get_buffer_internal(avctx, frame, flags);
+    if (ret < 0)
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+    return ret;
+}
+
+int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+{
+    f->owner = avctx;
+    return ff_get_buffer(avctx, f->f, flags);
+}
+
+void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+    if (f->f)
+        av_frame_unref(f->f);
+}
+
+void ff_thread_finish_setup(AVCodecContext *avctx)
+{
+}
+
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
+{
+}
+
+void ff_thread_await_progress(ThreadFrame *f, int progress, int field)
+{
+}
+
+int ff_thread_can_start_frame(AVCodecContext *avctx)
+{
+    return 1;
+}
+
+int ff_alloc_entries(AVCodecContext *avctx, int count)
+{
+    return 0;
+}
+
+void ff_reset_entries(AVCodecContext *avctx)
+{
+}
+
+void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
+{
+}
+
+void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
+{
+}
+
+void av_init_packet(AVPacket *pkt)
+{
+    pkt->pts                  = AV_NOPTS_VALUE;
+    pkt->dts                  = AV_NOPTS_VALUE;
+    pkt->pos                  = -1;
+    pkt->duration             = 0;
+    pkt->convergence_duration = 0;
+    pkt->flags                = 0;
+    pkt->stream_index         = 0;
+    pkt->buf                  = NULL;
+    pkt->side_data            = NULL;
+    pkt->side_data_elems      = 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/version.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,188 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVCODEC_VERSION_MAJOR 56
+#define LIBAVCODEC_VERSION_MINOR  12
+#define LIBAVCODEC_VERSION_MICRO 101
+
+#define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+                                               LIBAVCODEC_VERSION_MINOR, \
+                                               LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION      AV_VERSION(LIBAVCODEC_VERSION_MAJOR,    \
+                                           LIBAVCODEC_VERSION_MINOR,    \
+                                           LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD        LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT        "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_REQUEST_CHANNELS
+#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_ENCODE_AUDIO
+#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_ENCODE_VIDEO
+#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_ID
+#define FF_API_CODEC_ID          (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AUDIO_CONVERT
+#define FF_API_AUDIO_CONVERT     (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AVCODEC_RESAMPLE
+#define FF_API_AVCODEC_RESAMPLE  FF_API_AUDIO_CONVERT
+#endif
+#ifndef FF_API_DEINTERLACE
+#define FF_API_DEINTERLACE       (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DESTRUCT_PACKET
+#define FF_API_DESTRUCT_PACKET   (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_GET_BUFFER
+#define FF_API_GET_BUFFER        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MISSING_SAMPLE
+#define FF_API_MISSING_SAMPLE    (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_LOWRES
+#define FF_API_LOWRES            (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CAP_VDPAU
+#define FF_API_CAP_VDPAU         (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_BUFS_VDPAU
+#define FF_API_BUFS_VDPAU        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_VOXWARE
+#define FF_API_VOXWARE           (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_SET_DIMENSIONS
+#define FF_API_SET_DIMENSIONS    (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DEBUG_MV
+#define FF_API_DEBUG_MV          (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AC_VLC
+#define FF_API_AC_VLC            (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_MSMPEG4
+#define FF_API_OLD_MSMPEG4       (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ASPECT_EXTENDED
+#define FF_API_ASPECT_EXTENDED   (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_THREAD_OPAQUE
+#define FF_API_THREAD_OPAQUE     (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_PKT
+#define FF_API_CODEC_PKT         (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_ALPHA
+#define FF_API_ARCH_ALPHA        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC              (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ERROR_RATE
+#define FF_API_ERROR_RATE        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_QSCALE_TYPE
+#define FF_API_QSCALE_TYPE       (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MB_TYPE
+#define FF_API_MB_TYPE           (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MAX_BFRAMES
+#define FF_API_MAX_BFRAMES       (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_NEG_LINESIZES
+#define FF_API_NEG_LINESIZES     (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_EMU_EDGE
+#define FF_API_EMU_EDGE          (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_SH4
+#define FF_API_ARCH_SH4          (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_SPARC
+#define FF_API_ARCH_SPARC        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_UNUSED_MEMBERS
+#define FF_API_UNUSED_MEMBERS    (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_IDCT_XVIDMMX
+#define FF_API_IDCT_XVIDMMX      (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_INPUT_PRESERVED
+#define FF_API_INPUT_PRESERVED   (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_NORMALIZE_AQP
+#define FF_API_NORMALIZE_AQP     (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_GMC
+#define FF_API_GMC               (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MV0
+#define FF_API_MV0               (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_NAME
+#define FF_API_CODEC_NAME        (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AFD
+#define FF_API_AFD               (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_VISMV
+/* XXX: don't forget to drop the -vismv documentation */
+#define FF_API_VISMV             (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DV_FRAME_PROFILE
+#define FF_API_DV_FRAME_PROFILE  (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AUDIOENC_DELAY
+#define FF_API_AUDIOENC_DELAY    (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AVCTX_TIMEBASE
+#define FF_API_AVCTX_TIMEBASE    (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_MPV_OPT
+#define FF_API_MPV_OPT           (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/videodsp.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "videodsp.h"
+
+#ifdef USE_PRED
+
+#if !defined(USE_VAR_BIT_DEPTH)
+#define BIT_DEPTH 8
+#include "videodsp_template.c"
+#undef BIT_DEPTH
+#endif
+
+#if defined(USE_VAR_BIT_DEPTH) || defined(USE_FULL)
+#define BIT_DEPTH 16
+#include "videodsp_template.c"
+#undef BIT_DEPTH
+#endif
+
+#ifdef USE_FULL
+static void just_return(uint8_t *buf, ptrdiff_t stride, int h)
+{
+}
+#endif
+
+av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
+{
+#ifdef USE_FULL
+    ctx->prefetch = just_return;
+#endif
+
+#ifdef USE_FULL
+    if (bpc <= 8) {
+        ctx->emulated_edge_mc = ff_emulated_edge_mc_8;
+    } else {
+        ctx->emulated_edge_mc = ff_emulated_edge_mc_16;
+    }
+#else
+#if defined(USE_VAR_BIT_DEPTH)
+    ctx->emulated_edge_mc = ff_emulated_edge_mc_var;
+#else
+    ctx->emulated_edge_mc = ff_emulated_edge_mc_8;
+#endif
+#endif /* !USE_FULL */
+
+    if (ARCH_AARCH64)
+        ff_videodsp_init_aarch64(ctx, bpc);
+    if (ARCH_ARM)
+        ff_videodsp_init_arm(ctx, bpc);
+    if (ARCH_PPC)
+        ff_videodsp_init_ppc(ctx, bpc);
+    if (ARCH_X86)
+        ff_videodsp_init_x86(ctx, bpc);
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/videodsp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Core video DSP helper functions
+ */
+
+#ifndef AVCODEC_VIDEODSP_H
+#define AVCODEC_VIDEODSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define EMULATED_EDGE(depth) \
+void ff_emulated_edge_mc_ ## depth(uint8_t *dst, const uint8_t *src, \
+                                   ptrdiff_t dst_stride, ptrdiff_t src_stride, \
+                                   int block_w, int block_h,\
+                                   int src_x, int src_y, int w, int h);
+
+EMULATED_EDGE(8)
+EMULATED_EDGE(16)
+
+typedef struct VideoDSPContext {
+    /**
+     * Copy a rectangular area of samples to a temporary buffer and replicate
+     * the border samples.
+     *
+     * @param dst destination buffer
+     * @param dst_stride number of bytes between 2 vertically adjacent samples
+     *                   in destination buffer
+     * @param src source buffer
+     * @param dst_linesize number of bytes between 2 vertically adjacent
+     *                     samples in the destination buffer
+     * @param src_linesize number of bytes between 2 vertically adjacent
+     *                     samples in both the source buffer
+     * @param block_w width of block
+     * @param block_h height of block
+     * @param src_x x coordinate of the top left sample of the block in the
+     *                source buffer
+     * @param src_y y coordinate of the top left sample of the block in the
+     *                source buffer
+     * @param w width of the source buffer
+     * @param h height of the source buffer
+     */
+    void (*emulated_edge_mc)(uint8_t *dst, const uint8_t *src,
+                             ptrdiff_t dst_linesize,
+                             ptrdiff_t src_linesize,
+                             int block_w, int block_h,
+                             int src_x, int src_y, int w, int h);
+
+    /**
+     * Prefetch memory into cache (if supported by hardware).
+     *
+     * @param buf    pointer to buffer to prefetch memory from
+     * @param stride distance between two lines of buf (in bytes)
+     * @param h      number of lines to prefetch
+     */
+    void (*prefetch)(uint8_t *buf, ptrdiff_t stride, int h);
+} VideoDSPContext;
+
+void ff_videodsp_init(VideoDSPContext *ctx, int bpc);
+
+/* for internal use only (i.e. called by ff_videodsp_init() */
+void ff_videodsp_init_aarch64(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_arm(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_ppc(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc);
+
+#endif /* AVCODEC_VIDEODSP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavcodec/videodsp_template.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2002-2012 Michael Niedermayer
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <assert.h>
+
+#include "bit_depth_template.c"
+void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
+                               ptrdiff_t buf_linesize,
+                               ptrdiff_t src_linesize,
+                               int block_w, int block_h,
+                               int src_x, int src_y, int w, int h)
+{
+    int x, y;
+    int start_y, start_x, end_y, end_x;
+
+    if (!w || !h)
+        return;
+
+    if (src_y >= h) {
+        src -= src_y * src_linesize;
+        src += (h - 1) * src_linesize;
+        src_y = h - 1;
+    } else if (src_y <= -block_h) {
+        src -= src_y * src_linesize;
+        src += (1 - block_h) * src_linesize;
+        src_y = 1 - block_h;
+    }
+    if (src_x >= w) {
+        src  += (w - 1 - src_x) * sizeof(pixel);
+        src_x = w - 1;
+    } else if (src_x <= -block_w) {
+        src  += (1 - block_w - src_x) * sizeof(pixel);
+        src_x = 1 - block_w;
+    }
+
+    start_y = FFMAX(0, -src_y);
+    start_x = FFMAX(0, -src_x);
+    end_y = FFMIN(block_h, h-src_y);
+    end_x = FFMIN(block_w, w-src_x);
+    av_assert2(start_y < end_y && block_h);
+    av_assert2(start_x < end_x && block_w);
+
+    w    = end_x - start_x;
+    src += start_y * src_linesize + start_x * sizeof(pixel);
+    buf += start_x * sizeof(pixel);
+
+    // top
+    for (y = 0; y < start_y; y++) {
+        memcpy(buf, src, w * sizeof(pixel));
+        buf += buf_linesize;
+    }
+
+    // copy existing part
+    for (; y < end_y; y++) {
+        memcpy(buf, src, w * sizeof(pixel));
+        src += src_linesize;
+        buf += buf_linesize;
+    }
+
+    // bottom
+    src -= src_linesize;
+    for (; y < block_h; y++) {
+        memcpy(buf, src, w * sizeof(pixel));
+        buf += buf_linesize;
+    }
+
+    buf -= block_h * buf_linesize + start_x * sizeof(pixel);
+    while (block_h--) {
+        pixel *bufp = (pixel *) buf;
+
+        // left
+        for(x = 0; x < start_x; x++) {
+            bufp[x] = bufp[start_x];
+        }
+
+        // right
+        for (x = end_x; x < block_w; x++) {
+            bufp[x] = bufp[end_x - 1];
+        }
+        buf += buf_linesize;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/adler32.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,55 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @file
+ * Public header for libavutil Adler32 hasher
+ *
+ * @defgroup lavu_adler32 Adler32
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf   pointer to input buffer
+ * @param len   size of input buffer
+ * @return      updated checksum
+ */
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+                                unsigned int len) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ADLER32_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/aes.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,65 @@
+/*
+ * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AES_H
+#define AVUTIL_AES_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_aes AES
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_aes_size;
+
+struct AVAES;
+
+/**
+ * Allocate an AVAES context.
+ */
+struct AVAES *av_aes_alloc(void);
+
+/**
+ * Initialize an AVAES context.
+ * @param key_bits 128, 192 or 256
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ * @param count number of 16 byte blocks
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AES_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/atomic.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATOMIC_H
+#define AVUTIL_ATOMIC_H
+
+#include "config.h"
+
+#if HAVE_ATOMICS_NATIVE
+
+#if HAVE_ATOMICS_GCC
+#include "atomic_gcc.h"
+#elif HAVE_ATOMICS_WIN32
+#include "atomic_win32.h"
+#elif HAVE_ATOMICS_SUNCC
+#include "atomic_suncc.h"
+#endif
+
+#else
+
+/**
+ * Load the current value stored in an atomic integer.
+ *
+ * @param ptr atomic integer
+ * @return the current value of the atomic integer
+ * @note This acts as a memory barrier.
+ */
+int avpriv_atomic_int_get(volatile int *ptr);
+
+/**
+ * Store a new value in an atomic integer.
+ *
+ * @param ptr atomic integer
+ * @param val the value to store in the atomic integer
+ * @note This acts as a memory barrier.
+ */
+void avpriv_atomic_int_set(volatile int *ptr, int val);
+
+/**
+ * Add a value to an atomic integer.
+ *
+ * @param ptr atomic integer
+ * @param inc the value to add to the atomic integer (may be negative)
+ * @return the new value of the atomic integer.
+ * @note This does NOT act as a memory barrier. This is primarily
+ *       intended for reference counting.
+ */
+int avpriv_atomic_int_add_and_fetch(volatile int *ptr, int inc);
+
+/**
+ * Atomic pointer compare and swap.
+ *
+ * @param ptr pointer to the pointer to operate on
+ * @param oldval do the swap if the current value of *ptr equals to oldval
+ * @param newval value to replace *ptr with
+ * @return the value of *ptr before comparison
+ */
+void *avpriv_atomic_ptr_cas(void * volatile *ptr, void *oldval, void *newval);
+
+#endif /* HAVE_ATOMICS_NATIVE */
+
+#endif /* AVUTIL_ATOMIC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/atomic_gcc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATOMIC_GCC_H
+#define AVUTIL_ATOMIC_GCC_H
+
+#include <stdint.h>
+
+#include "atomic.h"
+
+#define avpriv_atomic_int_get atomic_int_get_gcc
+static inline int atomic_int_get_gcc(volatile int *ptr)
+{
+#if HAVE_ATOMIC_COMPARE_EXCHANGE
+    return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
+#else
+    __sync_synchronize();
+    return *ptr;
+#endif
+}
+
+#define avpriv_atomic_int_set atomic_int_set_gcc
+static inline void atomic_int_set_gcc(volatile int *ptr, int val)
+{
+#if HAVE_ATOMIC_COMPARE_EXCHANGE
+    __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
+#else
+    *ptr = val;
+    __sync_synchronize();
+#endif
+}
+
+#define avpriv_atomic_int_add_and_fetch atomic_int_add_and_fetch_gcc
+static inline int atomic_int_add_and_fetch_gcc(volatile int *ptr, int inc)
+{
+#if HAVE_ATOMIC_COMPARE_EXCHANGE
+    return __atomic_add_fetch(ptr, inc, __ATOMIC_SEQ_CST);
+#else
+    return __sync_add_and_fetch(ptr, inc);
+#endif
+}
+
+#define avpriv_atomic_ptr_cas atomic_ptr_cas_gcc
+static inline void *atomic_ptr_cas_gcc(void * volatile *ptr,
+                                       void *oldval, void *newval)
+{
+#if HAVE_SYNC_VAL_COMPARE_AND_SWAP
+#ifdef __ARMCC_VERSION
+    // armcc will throw an error if ptr is not an integer type
+    volatile uintptr_t *tmp = (volatile uintptr_t*)ptr;
+    return (void*)__sync_val_compare_and_swap(tmp, oldval, newval);
+#else
+    return __sync_val_compare_and_swap(ptr, oldval, newval);
+#endif
+#else
+    __atomic_compare_exchange_n(ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+    return oldval;
+#endif
+}
+
+#endif /* AVUTIL_ATOMIC_GCC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/atomic_suncc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,55 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATOMIC_SUNCC_H
+#define AVUTIL_ATOMIC_SUNCC_H
+
+#include <atomic.h>
+#include <mbarrier.h>
+
+#include "atomic.h"
+
+#define avpriv_atomic_int_get atomic_int_get_suncc
+static inline int atomic_int_get_suncc(volatile int *ptr)
+{
+    __machine_rw_barrier();
+    return *ptr;
+}
+
+#define avpriv_atomic_int_set atomic_int_set_suncc
+static inline void atomic_int_set_suncc(volatile int *ptr, int val)
+{
+    *ptr = val;
+    __machine_rw_barrier();
+}
+
+#define avpriv_atomic_int_add_and_fetch atomic_int_add_and_fetch_suncc
+static inline int atomic_int_add_and_fetch_suncc(volatile int *ptr, int inc)
+{
+    return atomic_add_int_nv(ptr, inc);
+}
+
+#define avpriv_atomic_ptr_cas atomic_ptr_cas_suncc
+static inline void *atomic_ptr_cas_suncc(void * volatile *ptr,
+                                         void *oldval, void *newval)
+{
+    return atomic_cas_ptr(ptr, oldval, newval);
+}
+
+#endif /* AVUTIL_ATOMIC_SUNCC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/atomic_win32.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATOMIC_WIN32_H
+#define AVUTIL_ATOMIC_WIN32_H
+
+#include <windows.h>
+
+#define avpriv_atomic_int_get atomic_int_get_win32
+static inline int atomic_int_get_win32(volatile int *ptr)
+{
+    MemoryBarrier();
+    return *ptr;
+}
+
+#define avpriv_atomic_int_set atomic_int_set_win32
+static inline void atomic_int_set_win32(volatile int *ptr, int val)
+{
+    *ptr = val;
+    MemoryBarrier();
+}
+
+#define avpriv_atomic_int_add_and_fetch atomic_int_add_and_fetch_win32
+static inline int atomic_int_add_and_fetch_win32(volatile int *ptr, int inc)
+{
+    return inc + InterlockedExchangeAdd(ptr, inc);
+}
+
+#define avpriv_atomic_ptr_cas atomic_ptr_cas_win32
+static inline void *atomic_ptr_cas_win32(void * volatile *ptr,
+                                         void *oldval, void *newval)
+{
+    return InterlockedCompareExchangePointer(ptr, newval, oldval);
+}
+
+#endif /* AVUTIL_ATOMIC_WIN32_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/attributes.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,160 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+#    define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+#    define AV_GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#ifndef av_always_inline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+#    define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+#    define av_always_inline __forceinline
+#else
+#    define av_always_inline inline
+#endif
+#endif
+
+#ifndef av_extern_inline
+#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+#    define av_extern_inline extern inline
+#else
+#    define av_extern_inline inline
+#endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+#    define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+#    define av_noinline __declspec(noinline)
+#else
+#    define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+#    define av_pure __attribute__((pure))
+#else
+#    define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+#    define av_const __attribute__((const))
+#else
+#    define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+#    define av_cold __attribute__((cold))
+#else
+#    define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
+#    define av_flatten __attribute__((flatten))
+#else
+#    define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+#    define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+#    define attribute_deprecated __declspec(deprecated)
+#else
+#    define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+#if AV_GCC_VERSION_AT_LEAST(4,6)
+#    define AV_NOWARN_DEPRECATED(code) \
+        _Pragma("GCC diagnostic push") \
+        _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+        code \
+        _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#    define AV_NOWARN_DEPRECATED(code) \
+        __pragma(warning(push)) \
+        __pragma(warning(disable : 4996)) \
+        code; \
+        __pragma(warning(pop))
+#else
+#    define AV_NOWARN_DEPRECATED(code) code
+#endif
+#endif
+
+
+#if defined(__GNUC__)
+#    define av_unused __attribute__((unused))
+#else
+#    define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away.  This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+#    define av_used __attribute__((used))
+#else
+#    define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+#   define av_alias __attribute__((may_alias))
+#else
+#   define av_alias
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+#    define av_uninit(x) x=x
+#else
+#    define av_uninit(x) x
+#endif
+
+#ifdef __GNUC__
+#    define av_builtin_constant_p __builtin_constant_p
+#    define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+#    define av_builtin_constant_p(x) 0
+#    define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5)
+#    define av_noreturn __attribute__((noreturn))
+#else
+#    define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/audio_fifo.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,153 @@
+/*
+ * Audio FIFO
+ * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio FIFO Buffer
+ */
+
+#ifndef AVUTIL_AUDIO_FIFO_H
+#define AVUTIL_AUDIO_FIFO_H
+
+#include "avutil.h"
+#include "fifo.h"
+#include "samplefmt.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_audiofifo Audio FIFO Buffer
+ * @{
+ */
+
+/**
+ * Context for an Audio FIFO Buffer.
+ *
+ * - Operates at the sample level rather than the byte level.
+ * - Supports multiple channels with either planar or packed sample format.
+ * - Automatic reallocation when writing to a full buffer.
+ */
+typedef struct AVAudioFifo AVAudioFifo;
+
+/**
+ * Free an AVAudioFifo.
+ *
+ * @param af  AVAudioFifo to free
+ */
+void av_audio_fifo_free(AVAudioFifo *af);
+
+/**
+ * Allocate an AVAudioFifo.
+ *
+ * @param sample_fmt  sample format
+ * @param channels    number of channels
+ * @param nb_samples  initial allocation size, in samples
+ * @return            newly allocated AVAudioFifo, or NULL on error
+ */
+AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
+                                 int nb_samples);
+
+/**
+ * Reallocate an AVAudioFifo.
+ *
+ * @param af          AVAudioFifo to reallocate
+ * @param nb_samples  new allocation size, in samples
+ * @return            0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Write data to an AVAudioFifo.
+ *
+ * The AVAudioFifo will be reallocated automatically if the available space
+ * is less than nb_samples.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af          AVAudioFifo to write to
+ * @param data        audio data plane pointers
+ * @param nb_samples  number of samples to write
+ * @return            number of samples actually written, or negative AVERROR
+ *                    code on failure. If successful, the number of samples
+ *                    actually written will always be nb_samples.
+ */
+int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Read data from an AVAudioFifo.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af          AVAudioFifo to read from
+ * @param data        audio data plane pointers
+ * @param nb_samples  number of samples to read
+ * @return            number of samples actually read, or negative AVERROR code
+ *                    on failure. The number of samples actually read will not
+ *                    be greater than nb_samples, and will only be less than
+ *                    nb_samples if av_audio_fifo_size is less than nb_samples.
+ */
+int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Drain data from an AVAudioFifo.
+ *
+ * Removes the data without reading it.
+ *
+ * @param af          AVAudioFifo to drain
+ * @param nb_samples  number of samples to drain
+ * @return            0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Reset the AVAudioFifo buffer.
+ *
+ * This empties all data in the buffer.
+ *
+ * @param af  AVAudioFifo to reset
+ */
+void av_audio_fifo_reset(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for reading.
+ *
+ * @param af  the AVAudioFifo to query
+ * @return    number of samples available for reading
+ */
+int av_audio_fifo_size(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for writing.
+ *
+ * @param af  the AVAudioFifo to query
+ * @return    number of samples available for writing
+ */
+int av_audio_fifo_space(AVAudioFifo *af);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIO_FIFO_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/audioconvert.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+
+#include "version.h"
+
+#if FF_API_AUDIOCONVERT
+#include "channel_layout.h"
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/avassert.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,66 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include <stdlib.h>
+#include "avutil.h"
+#include "log.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do {                                           \
+    if (!(cond)) {                                                      \
+        av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n",    \
+               AV_STRINGIFY(cond), __FILE__, __LINE__);                 \
+        abort();                                                        \
+    }                                                                   \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speedloss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#else
+#define av_assert2(cond) ((void)0)
+#endif
+
+#endif /* AVUTIL_AVASSERT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/avconfig.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,7 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 0
+#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0
+#endif /* AVUTIL_AVCONFIG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/avstring.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle.  A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle   string to search for
+ * @return         pointer to the located match within haystack
+ *                 or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Locate the first occurrence of the string needle in the string haystack
+ * where not more than hay_length characters are searched. A zero-length
+ * string needle is considered to match at the start of haystack.
+ *
+ * This function is a length-limited version of the standard strstr().
+ *
+ * @param haystack   string to search in
+ * @param needle     string to search for
+ * @param hay_length length of string to search in
+ * @return           pointer to the located match within haystack
+ *                   or a null pointer if no match
+ */
+char *av_strnstr(const char *haystack, const char *needle, size_t hay_length);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ *  appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ *  following parameters are used
+ * @return the length of the string that would have been generated
+ *  if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Get the count of continuous non zero chars starting from the beginning.
+ *
+ * @param len maximum number of characters to check in the string, that
+ *            is the maximum value which is returned by the function
+ */
+static inline size_t av_strnlen(const char *s, size_t len)
+{
+    size_t i;
+    for (i = 0; i < len && s[i]; i++)
+        ;
+    return i;
+}
+
+/**
+ * Print arguments following specified format into a large enough auto
+ * allocated buffer. It is similar to GNU asprintf().
+ * @param fmt printf-compatible format string, specifying how the
+ *            following parameters are used.
+ * @return the allocated string
+ * @note You have to free the string yourself with av_free().
+ */
+char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
+
+/**
+ * Convert a number to a av_malloced string.
+ */
+char *av_d2str(double d);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Split the string into several tokens which can be accessed by
+ * successive calls to av_strtok().
+ *
+ * A token is defined as a sequence of characters not belonging to the
+ * set specified in delim.
+ *
+ * On the first call to av_strtok(), s should point to the string to
+ * parse, and the value of saveptr is ignored. In subsequent calls, s
+ * should be NULL, and saveptr should be unchanged since the previous
+ * call.
+ *
+ * This function is similar to strtok_r() defined in POSIX.1.
+ *
+ * @param s the string to parse, may be NULL
+ * @param delim 0-terminated list of token delimiters, must be non-NULL
+ * @param saveptr user-provided pointer which points to stored
+ * information necessary for av_strtok() to continue scanning the same
+ * string. saveptr is updated to point to the next character after the
+ * first delimiter found, or to NULL if the string was terminated
+ * @return the found token, or NULL when no token is found
+ */
+char *av_strtok(char *s, const char *delim, char **saveptr);
+
+/**
+ * Locale-independent conversion of ASCII isdigit.
+ */
+av_const int av_isdigit(int c);
+
+/**
+ * Locale-independent conversion of ASCII isgraph.
+ */
+av_const int av_isgraph(int c);
+
+/**
+ * Locale-independent conversion of ASCII isspace.
+ */
+av_const int av_isspace(int c);
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline av_const int av_toupper(int c)
+{
+    if (c >= 'a' && c <= 'z')
+        c ^= 0x20;
+    return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline av_const int av_tolower(int c)
+{
+    if (c >= 'A' && c <= 'Z')
+        c ^= 0x20;
+    return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII isxdigit.
+ */
+av_const int av_isxdigit(int c);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+
+/**
+ * Thread safe basename.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return pointer to the basename substring.
+ */
+const char *av_basename(const char *path);
+
+/**
+ * Thread safe dirname.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return the path with the separator replaced by the string terminator or ".".
+ * @note the function may change the input string.
+ */
+const char *av_dirname(char *path);
+
+/**
+ * Match instances of a name in a comma-separated list of names.
+ * @param name  Name to look for.
+ * @param names List of names.
+ * @return 1 on match, 0 otherwise.
+ */
+int av_match_name(const char *name, const char *names);
+
+enum AVEscapeMode {
+    AV_ESCAPE_MODE_AUTO,      ///< Use auto-selected escaping mode.
+    AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.
+    AV_ESCAPE_MODE_QUOTE,     ///< Use single-quote escaping.
+};
+
+/**
+ * Consider spaces special and escape them even in the middle of the
+ * string.
+ *
+ * This is equivalent to adding the whitespace characters to the special
+ * characters lists, except it is guaranteed to use the exact same list
+ * of whitespace characters as the rest of libavutil.
+ */
+#define AV_ESCAPE_FLAG_WHITESPACE 0x01
+
+/**
+ * Escape only specified special characters.
+ * Without this flag, escape also any characters that may be considered
+ * special by av_get_token(), such as the single quote.
+ */
+#define AV_ESCAPE_FLAG_STRICT 0x02
+
+/**
+ * Escape string in src, and put the escaped string in an allocated
+ * string in *dst, which must be freed with av_free().
+ *
+ * @param dst           pointer where an allocated string is put
+ * @param src           string to escape, must be non-NULL
+ * @param special_chars string containing the special characters which
+ *                      need to be escaped, can be NULL
+ * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ *                      Any unknown value for mode will be considered equivalent to
+ *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ *                      notice.
+ * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_ macros
+ * @return the length of the allocated string, or a negative error code in case of error
+ * @see av_bprint_escape()
+ */
+int av_escape(char **dst, const char *src, const char *special_chars,
+              enum AVEscapeMode mode, int flags);
+
+#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES          1 ///< accept codepoints over 0x10FFFF
+#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS             2 ///< accept non-characters - 0xFFFE and 0xFFFF
+#define AV_UTF8_FLAG_ACCEPT_SURROGATES                 4 ///< accept UTF-16 surrogates codes
+#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML
+
+#define AV_UTF8_FLAG_ACCEPT_ALL \
+    AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES
+
+/**
+ * Read and decode a single UTF-8 code point (character) from the
+ * buffer in *buf, and update *buf to point to the next byte to
+ * decode.
+ *
+ * In case of an invalid byte sequence, the pointer will be updated to
+ * the next byte after the invalid sequence and the function will
+ * return an error code.
+ *
+ * Depending on the specified flags, the function will also fail in
+ * case the decoded code point does not belong to a valid range.
+ *
+ * @note For speed-relevant code a carefully implemented use of
+ * GET_UTF8() may be preferred.
+ *
+ * @param codep   pointer used to return the parsed code in case of success.
+ *                The value in *codep is set even in case the range check fails.
+ * @param bufp    pointer to the address the first byte of the sequence
+ *                to decode, updated by the function to point to the
+ *                byte next after the decoded sequence
+ * @param buf_end pointer to the end of the buffer, points to the next
+ *                byte past the last in the buffer. This is used to
+ *                avoid buffer overreads (in case of an unfinished
+ *                UTF-8 sequence towards the end of the buffer).
+ * @param flags   a collection of AV_UTF8_FLAG_* flags
+ * @return >= 0 in case a sequence was successfully read, a negative
+ * value in case of invalid sequence
+ */
+int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
+                   unsigned int flags);
+
+/**
+ * Check if a name is in a list.
+ * @returns 0 if not found, or the 1 based index where it has been found in the
+ *            list.
+ */
+int av_match_list(const char *name, const char *list, char separator);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/avutil.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,344 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp  "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other FFmpeg
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup lavu_internal Internal
+ *
+ * Not exported functions, for internal usage only
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+    AVMEDIA_TYPE_UNKNOWN = -1,  ///< Usually treated as AVMEDIA_TYPE_DATA
+    AVMEDIA_TYPE_VIDEO,
+    AVMEDIA_TYPE_AUDIO,
+    AVMEDIA_TYPE_DATA,          ///< Opaque data information usually continuous
+    AVMEDIA_TYPE_SUBTITLE,
+    AVMEDIA_TYPE_ATTACHMENT,    ///< Opaque data information usually sparse
+    AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char *av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * FFmpeg internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE          ((int64_t)UINT64_C(0x8000000000000000))
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE            1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q          (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+    AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+    AV_PICTURE_TYPE_I,     ///< Intra
+    AV_PICTURE_TYPE_P,     ///< Predicted
+    AV_PICTURE_TYPE_B,     ///< Bi-dir predicted
+    AV_PICTURE_TYPE_S,     ///< S(GMC)-VOP MPEG4
+    AV_PICTURE_TYPE_SI,    ///< Switching Intra
+    AV_PICTURE_TYPE_SP,    ///< Switching Predicted
+    AV_PICTURE_TYPE_BI,    ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "error.h"
+#include "rational.h"
+#include "version.h"
+#include "macros.h"
+#include "mathematics.h"
+#include "log.h"
+#include "pixfmt.h"
+
+/**
+ * Return x default pointer in case p is NULL.
+ */
+static inline void *av_x_if_null(const void *p, const void *x)
+{
+    return (void *)(intptr_t)(p ? p : x);
+}
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param elsize  size in bytes of each list element (only 1, 2, 4 or 8)
+ * @param term    list terminator (usually 0 or -1)
+ * @param list    pointer to the list
+ * @return  length of the list, in elements, not counting the terminator
+ */
+unsigned av_int_list_length_for_size(unsigned elsize,
+                                     const void *list, uint64_t term) av_pure;
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param term  list terminator (usually 0 or -1)
+ * @param list  pointer to the list
+ * @return  length of the list, in elements, not counting the terminator
+ */
+#define av_int_list_length(list, term) \
+    av_int_list_length_for_size(sizeof(*(list)), list, term)
+
+/**
+ * Open a file using a UTF-8 filename.
+ * The API of this function matches POSIX fopen(), errors are returned through
+ * errno.
+ */
+FILE *av_fopen_utf8(const char *path, const char *mode);
+
+/**
+ * Return the fractional representation of the internal time base.
+ */
+AVRational av_get_time_base_q(void);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/base64.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BASE64_H
+#define AVUTIL_BASE64_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out      buffer for decoded data
+ * @param in       null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ *                 least 3/4 of the length of in
+ * @return         number of bytes written, or a negative value in case of
+ *                 invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out      buffer for encoded data
+ * @param out_size size in bytes of the out buffer (including the
+ *                 null terminator), must be at least AV_BASE64_SIZE(in_size)
+ * @param in       input buffer containing the data to encode
+ * @param in_size  size in bytes of the in buffer
+ * @return         out or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes to a
+ * null-terminated string.
+ */
+#define AV_BASE64_SIZE(x)  (((x)+2) / 3 * 4 + 1)
+
+ /**
+  * @}
+  */
+
+#endif /* AVUTIL_BASE64_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/blowfish.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,77 @@
+/*
+ * Blowfish algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BLOWFISH_H
+#define AVUTIL_BLOWFISH_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_blowfish Blowfish
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#define AV_BF_ROUNDS 16
+
+typedef struct AVBlowfish {
+    uint32_t p[AV_BF_ROUNDS + 2];
+    uint32_t s[4][256];
+} AVBlowfish;
+
+/**
+ * Initialize an AVBlowfish context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param key a key
+ * @param key_len length of the key
+ */
+void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param xl left four bytes halves of input to be encrypted
+ * @param xr right four bytes halves of input to be encrypted
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
+                           int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
+                       int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BLOWFISH_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/bprint.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BPRINT_H
+#define AVUTIL_BPRINT_H
+
+#include <stdarg.h>
+
+#include "attributes.h"
+#include "avstring.h"
+
+/**
+ * Define a structure with extra padding to a fixed size
+ * This helps ensuring binary compatibility with future versions.
+ */
+#define FF_PAD_STRUCTURE(size, ...) \
+    __VA_ARGS__ \
+    char reserved_padding[size - sizeof(struct { __VA_ARGS__ })];
+
+/**
+ * Buffer to print data progressively
+ *
+ * The string buffer grows as necessary and is always 0-terminated.
+ * The content of the string is never accessed, and thus is
+ * encoding-agnostic and can even hold binary data.
+ *
+ * Small buffers are kept in the structure itself, and thus require no
+ * memory allocation at all (unless the contents of the buffer is needed
+ * after the structure goes out of scope). This is almost as lightweight as
+ * declaring a local "char buf[512]".
+ *
+ * The length of the string can go beyond the allocated size: the buffer is
+ * then truncated, but the functions still keep account of the actual total
+ * length.
+ *
+ * In other words, buf->len can be greater than buf->size and records the
+ * total length of what would have been to the buffer if there had been
+ * enough memory.
+ *
+ * Append operations do not need to be tested for failure: if a memory
+ * allocation fails, data stop being appended to the buffer, but the length
+ * is still updated. This situation can be tested with
+ * av_bprint_is_complete().
+ *
+ * The size_max field determines several possible behaviours:
+ *
+ * size_max = -1 (= UINT_MAX) or any large value will let the buffer be
+ * reallocated as necessary, with an amortized linear cost.
+ *
+ * size_max = 0 prevents writing anything to the buffer: only the total
+ * length is computed. The write operations can then possibly be repeated in
+ * a buffer with exactly the necessary size
+ * (using size_init = size_max = len + 1).
+ *
+ * size_max = 1 is automatically replaced by the exact size available in the
+ * structure itself, thus ensuring no dynamic memory allocation. The
+ * internal buffer is large enough to hold a reasonable paragraph of text,
+ * such as the current paragraph.
+ */
+typedef struct AVBPrint {
+    FF_PAD_STRUCTURE(1024,
+    char *str;         /**< string so far */
+    unsigned len;      /**< length so far */
+    unsigned size;     /**< allocated memory */
+    unsigned size_max; /**< maximum allocated memory */
+    char reserved_internal_buffer[1];
+    )
+} AVBPrint;
+
+/**
+ * Convenience macros for special values for av_bprint_init() size_max
+ * parameter.
+ */
+#define AV_BPRINT_SIZE_UNLIMITED  ((unsigned)-1)
+#define AV_BPRINT_SIZE_AUTOMATIC  1
+#define AV_BPRINT_SIZE_COUNT_ONLY 0
+
+/**
+ * Init a print buffer.
+ *
+ * @param buf        buffer to init
+ * @param size_init  initial size (including the final 0)
+ * @param size_max   maximum size;
+ *                   0 means do not write anything, just count the length;
+ *                   1 is replaced by the maximum value for automatic storage;
+ *                   any large value means that the internal buffer will be
+ *                   reallocated as needed up to that limit; -1 is converted to
+ *                   UINT_MAX, the largest limit possible.
+ *                   Check also AV_BPRINT_SIZE_* macros.
+ */
+void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
+
+/**
+ * Init a print buffer using a pre-existing buffer.
+ *
+ * The buffer will not be reallocated.
+ *
+ * @param buf     buffer structure to init
+ * @param buffer  byte buffer to use for the string data
+ * @param size    size of buffer
+ */
+void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);
+
+/**
+ * Append char c n times to a print buffer.
+ */
+void av_bprint_chars(AVBPrint *buf, char c, unsigned n);
+
+/**
+ * Append data to a print buffer.
+ *
+ * param buf  bprint buffer to use
+ * param data pointer to data
+ * param size size of data
+ */
+void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);
+
+struct tm;
+/**
+ * Append a formatted date and time to a print buffer.
+ *
+ * param buf  bprint buffer to use
+ * param fmt  date and time format string, see strftime()
+ * param tm   broken-down time structure to translate
+ *
+ * @note due to poor design of the standard strftime function, it may
+ * produce poor results if the format string expands to a very long text and
+ * the bprint buffer is near the limit stated by the size_max option.
+ */
+void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);
+
+/**
+ * Allocate bytes in the buffer for external use.
+ *
+ * @param[in]  buf          buffer structure
+ * @param[in]  size         required size
+ * @param[out] mem          pointer to the memory area
+ * @param[out] actual_size  size of the memory area after allocation;
+ *                          can be larger or smaller than size
+ */
+void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
+                          unsigned char **mem, unsigned *actual_size);
+
+/**
+ * Reset the string to "" but keep internal allocated data.
+ */
+void av_bprint_clear(AVBPrint *buf);
+
+/**
+ * Test if the print buffer is complete (not truncated).
+ *
+ * It may have been truncated due to a memory allocation failure
+ * or the size_max limit (compare size and size_max if necessary).
+ */
+static inline int av_bprint_is_complete(const AVBPrint *buf)
+{
+    return buf->len < buf->size;
+}
+
+/**
+ * Finalize a print buffer.
+ *
+ * The print buffer can no longer be used afterwards,
+ * but the len and size fields are still valid.
+ *
+ * @arg[out] ret_str  if not NULL, used to return a permanent copy of the
+ *                    buffer contents, or NULL if memory allocation fails;
+ *                    if NULL, the buffer is discarded and freed
+ * @return  0 for success or error code (probably AVERROR(ENOMEM))
+ */
+int av_bprint_finalize(AVBPrint *buf, char **ret_str);
+
+/**
+ * Escape the content in src and append it to dstbuf.
+ *
+ * @param dstbuf        already inited destination bprint buffer
+ * @param src           string containing the text to escape
+ * @param special_chars string containing the special characters which
+ *                      need to be escaped, can be NULL
+ * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ *                      Any unknown value for mode will be considered equivalent to
+ *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ *                      notice.
+ * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_* macros
+ */
+void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
+                      enum AVEscapeMode mode, int flags);
+
+#endif /* AVUTIL_BPRINT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/bswap.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,109 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if   ARCH_AARCH64
+#   include "aarch64/bswap.h"
+#elif ARCH_ARM
+#   include "arm/bswap.h"
+#elif ARCH_AVR32
+#   include "avr32/bswap.h"
+#elif ARCH_SH4
+#   include "sh4/bswap.h"
+#elif ARCH_X86
+#   include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00)  | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+    x= (x>>8) | (x<<8);
+    return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+    return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+    return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/buffer.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,358 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "atomic.h"
+#include "buffer_internal.h"
+#include "common.h"
+#include "mem.h"
+
+AVBufferRef *av_buffer_create(uint8_t *data, int size,
+                              void (*free)(void *opaque, uint8_t *data),
+                              void *opaque, int flags)
+{
+    AVBufferRef *ref = NULL;
+    AVBuffer    *buf = NULL;
+
+    buf = av_mallocz(sizeof(*buf));
+    if (!buf)
+        return NULL;
+
+    buf->data     = data;
+    buf->size     = size;
+    buf->free     = free ? free : av_buffer_default_free;
+    buf->opaque   = opaque;
+    buf->refcount = 1;
+
+    if (flags & AV_BUFFER_FLAG_READONLY)
+        buf->flags |= BUFFER_FLAG_READONLY;
+
+    ref = av_mallocz(sizeof(*ref));
+    if (!ref) {
+        av_freep(&buf);
+        return NULL;
+    }
+
+    ref->buffer = buf;
+    ref->data   = data;
+    ref->size   = size;
+
+    return ref;
+}
+
+void av_buffer_default_free(void *opaque, uint8_t *data)
+{
+    av_free(data);
+}
+
+AVBufferRef *av_buffer_alloc(int size)
+{
+    AVBufferRef *ret = NULL;
+    uint8_t    *data = NULL;
+
+    data = av_malloc(size);
+    if (!data)
+        return NULL;
+
+    ret = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
+    if (!ret)
+        av_freep(&data);
+
+    return ret;
+}
+
+AVBufferRef *av_buffer_allocz(int size)
+{
+    AVBufferRef *ret = av_buffer_alloc(size);
+    if (!ret)
+        return NULL;
+
+    memset(ret->data, 0, size);
+    return ret;
+}
+
+AVBufferRef *av_buffer_ref(AVBufferRef *buf)
+{
+    AVBufferRef *ret = av_mallocz(sizeof(*ret));
+
+    if (!ret)
+        return NULL;
+
+    *ret = *buf;
+
+    avpriv_atomic_int_add_and_fetch(&buf->buffer->refcount, 1);
+
+    return ret;
+}
+
+void av_buffer_unref(AVBufferRef **buf)
+{
+    AVBuffer *b;
+
+    if (!buf || !*buf)
+        return;
+    b = (*buf)->buffer;
+    av_freep(buf);
+
+    if (!avpriv_atomic_int_add_and_fetch(&b->refcount, -1)) {
+        b->free(b->opaque, b->data);
+        av_freep(&b);
+    }
+}
+
+int av_buffer_is_writable(const AVBufferRef *buf)
+{
+    if (buf->buffer->flags & AV_BUFFER_FLAG_READONLY)
+        return 0;
+
+    return avpriv_atomic_int_get(&buf->buffer->refcount) == 1;
+}
+
+void *av_buffer_get_opaque(const AVBufferRef *buf)
+{
+    return buf->buffer->opaque;
+}
+
+int av_buffer_get_ref_count(const AVBufferRef *buf)
+{
+    return buf->buffer->refcount;
+}
+
+int av_buffer_make_writable(AVBufferRef **pbuf)
+{
+    AVBufferRef *newbuf, *buf = *pbuf;
+
+    if (av_buffer_is_writable(buf))
+        return 0;
+
+    newbuf = av_buffer_alloc(buf->size);
+    if (!newbuf)
+        return AVERROR(ENOMEM);
+
+    memcpy(newbuf->data, buf->data, buf->size);
+    av_buffer_unref(pbuf);
+    *pbuf = newbuf;
+
+    return 0;
+}
+
+int av_buffer_realloc(AVBufferRef **pbuf, int size)
+{
+    AVBufferRef *buf = *pbuf;
+    uint8_t *tmp;
+
+    if (!buf) {
+        /* allocate a new buffer with av_realloc(), so it will be reallocatable
+         * later */
+        uint8_t *data = av_realloc(NULL, size);
+        if (!data)
+            return AVERROR(ENOMEM);
+
+        buf = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
+        if (!buf) {
+            av_freep(&data);
+            return AVERROR(ENOMEM);
+        }
+
+        buf->buffer->flags |= BUFFER_FLAG_REALLOCATABLE;
+        *pbuf = buf;
+
+        return 0;
+    } else if (buf->size == size)
+        return 0;
+
+    if (!(buf->buffer->flags & BUFFER_FLAG_REALLOCATABLE) ||
+        !av_buffer_is_writable(buf)) {
+        /* cannot realloc, allocate a new reallocable buffer and copy data */
+        AVBufferRef *new = NULL;
+
+        av_buffer_realloc(&new, size);
+        if (!new)
+            return AVERROR(ENOMEM);
+
+        memcpy(new->data, buf->data, FFMIN(size, buf->size));
+
+        av_buffer_unref(pbuf);
+        *pbuf = new;
+        return 0;
+    }
+
+    tmp = av_realloc(buf->buffer->data, size);
+    if (!tmp)
+        return AVERROR(ENOMEM);
+
+    buf->buffer->data = buf->data = tmp;
+    buf->buffer->size = buf->size = size;
+    return 0;
+}
+
+AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size))
+{
+    AVBufferPool *pool = av_mallocz(sizeof(*pool));
+    if (!pool)
+        return NULL;
+
+    pool->size     = size;
+    pool->alloc    = alloc ? alloc : av_buffer_alloc;
+
+    avpriv_atomic_int_set(&pool->refcount, 1);
+
+    return pool;
+}
+
+/*
+ * This function gets called when the pool has been uninited and
+ * all the buffers returned to it.
+ */
+static void buffer_pool_free(AVBufferPool *pool)
+{
+    while (pool->pool) {
+        BufferPoolEntry *buf = pool->pool;
+        pool->pool = buf->next;
+
+        buf->free(buf->opaque, buf->data);
+        av_freep(&buf);
+    }
+    av_freep(&pool);
+}
+
+void av_buffer_pool_uninit(AVBufferPool **ppool)
+{
+    AVBufferPool *pool;
+
+    if (!ppool || !*ppool)
+        return;
+    pool   = *ppool;
+    *ppool = NULL;
+
+    if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
+        buffer_pool_free(pool);
+}
+
+/* remove the whole buffer list from the pool and return it */
+static BufferPoolEntry *get_pool(AVBufferPool *pool)
+{
+    BufferPoolEntry *cur = *(void * volatile *)&pool->pool, *last = NULL;
+
+    while (cur != last) {
+        last = cur;
+        cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, last, NULL);
+        if (!cur)
+            return NULL;
+    }
+
+    return cur;
+}
+
+static void add_to_pool(BufferPoolEntry *buf)
+{
+    AVBufferPool *pool;
+    BufferPoolEntry *cur, *end = buf;
+
+    if (!buf)
+        return;
+    pool = buf->pool;
+
+    while (end->next)
+        end = end->next;
+
+    while (avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, NULL, buf)) {
+        /* pool is not empty, retrieve it and append it to our list */
+        cur = get_pool(pool);
+        end->next = cur;
+        while (end->next)
+            end = end->next;
+    }
+}
+
+static void pool_release_buffer(void *opaque, uint8_t *data)
+{
+    BufferPoolEntry *buf = opaque;
+    AVBufferPool *pool = buf->pool;
+
+    if(CONFIG_MEMORY_POISONING)
+        memset(buf->data, FF_MEMORY_POISON, pool->size);
+
+    add_to_pool(buf);
+    if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
+        buffer_pool_free(pool);
+}
+
+/* allocate a new buffer and override its free() callback so that
+ * it is returned to the pool on free */
+static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
+{
+    BufferPoolEntry *buf;
+    AVBufferRef     *ret;
+
+    ret = pool->alloc(pool->size);
+    if (!ret)
+        return NULL;
+
+    buf = av_mallocz(sizeof(*buf));
+    if (!buf) {
+        av_buffer_unref(&ret);
+        return NULL;
+    }
+
+    buf->data   = ret->buffer->data;
+    buf->opaque = ret->buffer->opaque;
+    buf->free   = ret->buffer->free;
+    buf->pool   = pool;
+
+    ret->buffer->opaque = buf;
+    ret->buffer->free   = pool_release_buffer;
+
+    avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
+    avpriv_atomic_int_add_and_fetch(&pool->nb_allocated, 1);
+
+    return ret;
+}
+
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
+{
+    AVBufferRef *ret;
+    BufferPoolEntry *buf;
+
+    /* check whether the pool is empty */
+    buf = get_pool(pool);
+    if (!buf && pool->refcount <= pool->nb_allocated) {
+        av_log(NULL, AV_LOG_DEBUG, "Pool race dectected, spining to avoid overallocation and eventual OOM\n");
+        while (!buf && avpriv_atomic_int_get(&pool->refcount) <= avpriv_atomic_int_get(&pool->nb_allocated))
+            buf = get_pool(pool);
+    }
+
+    if (!buf)
+        return pool_alloc_buffer(pool);
+
+    /* keep the first entry, return the rest of the list to the pool */
+    add_to_pool(buf->next);
+    buf->next = NULL;
+
+    ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
+                           buf, 0);
+    if (!ret) {
+        add_to_pool(buf);
+        return NULL;
+    }
+    avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
+
+    return ret;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/buffer.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,274 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of FFmpeg is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+    AVBuffer *buffer;
+
+    /**
+     * The data buffer. It is considered writable if and only if
+     * this is the only reference to the buffer, in which case
+     * av_buffer_is_writable() returns 1.
+     */
+    uint8_t *data;
+    /**
+     * Size of data in bytes.
+     */
+    int      size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef *av_buffer_alloc(int size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef *av_buffer_allocz(int size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data   data array
+ * @param size   size of data in bytes
+ * @param free   a callback for freeing this buffer's data
+ * @param opaque parameter to be got for processing or passed to free
+ * @param flags  a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef *av_buffer_create(uint8_t *data, int size,
+                              void (*free)(void *opaque, uint8_t *data),
+                              void *opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void *opaque, uint8_t *data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef *av_buffer_ref(AVBufferRef *buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef **buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef *buf);
+
+/**
+ * @return the opaque parameter set by av_buffer_create.
+ */
+void *av_buffer_get_opaque(const AVBufferRef *buf);
+
+int av_buffer_get_ref_count(const AVBufferRef *buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ *            untouched, or it is unreferenced and a new writable AVBufferRef is
+ *            written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef **buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf  a buffer reference to reallocate. On success, buf will be
+ *             unreferenced and a new reference with the required size will be
+ *             written in its place. On failure buf will be left untouched. *buf
+ *             may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef **buf, int size);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ * @see av_buffer_pool_can_uninit()
+ */
+void av_buffer_pool_uninit(AVBufferPool **pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/buffer_internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,94 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BUFFER_INTERNAL_H
+#define AVUTIL_BUFFER_INTERNAL_H
+
+#include <stdint.h>
+
+#include "buffer.h"
+
+/**
+ * The buffer is always treated as read-only.
+ */
+#define BUFFER_FLAG_READONLY      (1 << 0)
+/**
+ * The buffer was av_realloc()ed, so it is reallocatable.
+ */
+#define BUFFER_FLAG_REALLOCATABLE (1 << 1)
+
+struct AVBuffer {
+    uint8_t *data; /**< data described by this buffer */
+    int      size; /**< size of data in bytes */
+
+    /**
+     *  number of existing AVBufferRef instances referring to this buffer
+     */
+    volatile int refcount;
+
+    /**
+     * a callback for freeing the data
+     */
+    void (*free)(void *opaque, uint8_t *data);
+
+    /**
+     * an opaque pointer, to be used by the freeing callback
+     */
+    void *opaque;
+
+    /**
+     * A combination of BUFFER_FLAG_*
+     */
+    int flags;
+};
+
+typedef struct BufferPoolEntry {
+    uint8_t *data;
+
+    /*
+     * Backups of the original opaque/free of the AVBuffer corresponding to
+     * data. They will be used to free the buffer when the pool is freed.
+     */
+    void *opaque;
+    void (*free)(void *opaque, uint8_t *data);
+
+    AVBufferPool *pool;
+    struct BufferPoolEntry * volatile next;
+} BufferPoolEntry;
+
+struct AVBufferPool {
+    BufferPoolEntry * volatile pool;
+
+    /*
+     * This is used to track when the pool is to be freed.
+     * The pointer to the pool itself held by the caller is considered to
+     * be one reference. Each buffer requested by the caller increases refcount
+     * by one, returning the buffer to the pool decreases it by one.
+     * refcount reaches zero when the buffer has been uninited AND all the
+     * buffers have been released, then it's safe to free the pool and all
+     * the buffers in it.
+     */
+    volatile int refcount;
+
+    volatile int nb_allocated;
+
+    int size;
+    AVBufferRef* (*alloc)(int size);
+};
+
+#endif /* AVUTIL_BUFFER_INTERNAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/cast5.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,67 @@
+/*
+ * An implementation of the CAST128 algorithm as mentioned in RFC2144
+ * Copyright (c) 2014 Supraja Meedinti
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CAST5_H
+#define AVUTIL_CAST5_H
+
+#include <stdint.h>
+
+
+/**
+  * @file
+  * @brief Public header for libavutil CAST5 algorithm
+  * @defgroup lavu_cast5 CAST5
+  * @ingroup lavu_crypto
+  * @{
+  */
+
+extern const int av_cast5_size;
+
+struct AVCAST5;
+
+/**
+  * Allocate an AVCAST5 context
+  * To free the struct: av_free(ptr)
+  */
+struct AVCAST5 *av_cast5_alloc(void);
+/**
+  * Initialize an AVCAST5 context.
+  *
+  * @param ctx an AVCAST5 context
+  * @param key a key of 5,6,...16 bytes used for encryption/decryption
+  * @param key_bits number of keybits: possible are 40,48,...,128
+ */
+int av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits);
+
+/**
+  * Encrypt or decrypt a buffer using a previously initialized context
+  *
+  * @param ctx an AVCAST5 context
+  * @param dst destination array, can be equal to src
+  * @param src source array, can be equal to dst
+  * @param count number of 8 byte blocks
+  * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count,int decrypt);
+/**
+ * @}
+ */
+#endif /* AVUTIL_CAST5_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/channel_layout.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT             0x00000001
+#define AV_CH_FRONT_RIGHT            0x00000002
+#define AV_CH_FRONT_CENTER           0x00000004
+#define AV_CH_LOW_FREQUENCY          0x00000008
+#define AV_CH_BACK_LEFT              0x00000010
+#define AV_CH_BACK_RIGHT             0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER   0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER  0x00000080
+#define AV_CH_BACK_CENTER            0x00000100
+#define AV_CH_SIDE_LEFT              0x00000200
+#define AV_CH_SIDE_RIGHT             0x00000400
+#define AV_CH_TOP_CENTER             0x00000800
+#define AV_CH_TOP_FRONT_LEFT         0x00001000
+#define AV_CH_TOP_FRONT_CENTER       0x00002000
+#define AV_CH_TOP_FRONT_RIGHT        0x00004000
+#define AV_CH_TOP_BACK_LEFT          0x00008000
+#define AV_CH_TOP_BACK_CENTER        0x00010000
+#define AV_CH_TOP_BACK_RIGHT         0x00020000
+#define AV_CH_STEREO_LEFT            0x20000000  ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT           0x40000000  ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT              0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT             0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT   0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT  0x0000000400000000ULL
+#define AV_CH_LOW_FREQUENCY_2        0x0000000800000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+    to indicate that the user requests the channel order of the decoder output
+    to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE          0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO              (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO            (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1           (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1               (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND          (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1           (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1           (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2               (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD              (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1           (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK      (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK      (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT     (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL         (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK      (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT     (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT     (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE      (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL         (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX    (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+enum AVMatrixEncoding {
+    AV_MATRIX_ENCODING_NONE,
+    AV_MATRIX_ENCODING_DOLBY,
+    AV_MATRIX_ENCODING_DPLII,
+    AV_MATRIX_ENCODING_DPLIIX,
+    AV_MATRIX_ENCODING_DPLIIZ,
+    AV_MATRIX_ENCODING_DOLBYEX,
+    AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+    AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ *   5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ *   SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, optionally followed by 'c', yielding
+ *   the default channel layout for that number of channels (@see
+ *   av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ *   AV_CH_* macros).
+ *
+ * @warning Starting from the next major bump the trailing character
+ * 'c' to specify a number of channels will be required, while a
+ * channel layout mask could also be specified as a decimal number
+ * (if and only if not followed by "c").
+ *
+ * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+struct AVBPrint;
+/**
+ * Append a description of a channel layout to a bprint buffer.
+ */
+void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ */
+int64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel a channel layout describing exactly one channel which must be
+ *                present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ *         on error.
+ */
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+                                        uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ */
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ */
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * Get the description of a given channel.
+ *
+ * @param channel  a channel layout with a single channel
+ * @return  channel description on success, NULL on error
+ */
+const char *av_get_channel_description(uint64_t channel);
+
+/**
+ * Get the value and name of a standard channel layout.
+ *
+ * @param[in]  index   index in an internal list, starting at 0
+ * @param[out] layout  channel layout mask
+ * @param[out] name    name of the layout
+ * @return  0  if the layout exists,
+ *          <0 if index is beyond the limits
+ */
+int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
+                                   const char **name);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/colorspace.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,111 @@
+/*
+ * Colorspace conversion defines
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Various defines for YUV<->RGB conversion
+ */
+
+#ifndef AVUTIL_COLORSPACE_H
+#define AVUTIL_COLORSPACE_H
+
+#define SCALEBITS 10
+#define ONE_HALF  (1 << (SCALEBITS - 1))
+#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define YUV_TO_RGB1_CCIR(cb1, cr1)\
+{\
+    cb = (cb1) - 128;\
+    cr = (cr1) - 128;\
+    r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
+    g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
+            ONE_HALF;\
+    b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2_CCIR(r, g, b, y1)\
+{\
+    y = ((y1) - 16) * FIX(255.0/219.0);\
+    r = cm[(y + r_add) >> SCALEBITS];\
+    g = cm[(y + g_add) >> SCALEBITS];\
+    b = cm[(y + b_add) >> SCALEBITS];\
+}
+
+#define YUV_TO_RGB1(cb1, cr1)\
+{\
+    cb = (cb1) - 128;\
+    cr = (cr1) - 128;\
+    r_add = FIX(1.40200) * cr + ONE_HALF;\
+    g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
+    b_add = FIX(1.77200) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2(r, g, b, y1)\
+{\
+    y = (y1) << SCALEBITS;\
+    r = cm[(y + r_add) >> SCALEBITS];\
+    g = cm[(y + g_add) >> SCALEBITS];\
+    b = cm[(y + b_add) >> SCALEBITS];\
+}
+
+#define Y_CCIR_TO_JPEG(y)\
+ cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
+
+#define Y_JPEG_TO_CCIR(y)\
+ (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define C_CCIR_TO_JPEG(y)\
+ cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
+
+/* NOTE: the clamp is really necessary! */
+static inline int C_JPEG_TO_CCIR(int y) {
+    y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
+    if (y < 16)
+        y = 16;
+    return y;
+}
+
+
+#define RGB_TO_Y(r, g, b) \
+((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
+  FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
+
+#define RGB_TO_U(r1, g1, b1, shift)\
+(((- FIX(0.16874) * r1 - FIX(0.33126) * g1 +         \
+     FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V(r1, g1, b1, shift)\
+(((FIX(0.50000) * r1 - FIX(0.41869) * g1 -           \
+   FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_Y_CCIR(r, g, b) \
+((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
+  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
+(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
+     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
+   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#endif /* AVUTIL_COLORSPACE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/common.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,469 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)
+#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "version.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+#   define AV_NE(be, le) (be)
+#else
+#   define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+/* assume a>0 and b>0 */
+#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
+                                                       : ((a) + (1<<(b)) - 1) >> (b))
+#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
+#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+/* misc math functions */
+
+/**
+ * Reverse the order of the bits of an 8-bits unsigned integer.
+ */
+#if FF_API_AV_REVERSE
+extern attribute_deprecated const uint8_t av_reverse[256];
+#endif
+
+#ifdef HAVE_AV_CONFIG_H
+#   include "config.h"
+#   include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+    if (amin > amax) abort();
+#endif
+    if      (a < amin) return amin;
+    else if (a > amax) return amax;
+    else               return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+    if (amin > amax) abort();
+#endif
+    if      (a < amin) return amin;
+    else if (a > amax) return amax;
+    else               return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+    if (a&(~0xFF)) return (-a)>>31;
+    else           return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+    if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;
+    else                  return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+    if (a&(~0xFFFF)) return (-a)>>31;
+    else             return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+    if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+    else                      return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+    if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);
+    else                                         return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param  a value to clip
+ * @param  p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+    if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+    else                   return  a;
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param  a one value
+ * @param  b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b)
+{
+    return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param  a first value
+ * @param  b value doubled and added to a
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b)
+{
+    return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+    if (amin > amax) abort();
+#endif
+    if      (a < amin) return amin;
+    else if (a > amax) return amax;
+    else               return a;
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin, double amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+    if (amin > amax) abort();
+#endif
+    if      (a < amin) return amin;
+    else if (a > amax) return amax;
+    else               return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+    return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+    x -= (x >> 1) & 0x55555555;
+    x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+    x = (x + (x >> 4)) & 0x0F0F0F0F;
+    x += x >> 8;
+    return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+    return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val      Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ *                 Evaluated up to 7 times (4 for the currently
+ *                 assigned Unicode range).  With a memory buffer
+ *                 input, this could be *ptr++.
+ * @param ERROR    Expression to be evaluated on invalid input,
+ *                 typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+    val= GET_BYTE;\
+    {\
+        uint32_t top = (val & 128) >> 1;\
+        if ((val & 0xc0) == 0x80 || val >= 0xFE)\
+            ERROR\
+        while (val & top) {\
+            int tmp= GET_BYTE - 128;\
+            if(tmp>>6)\
+                ERROR\
+            val= (val<<6) + tmp;\
+            top <<= 5;\
+        }\
+        val &= (top << 1) - 1;\
+    }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val       Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ *                  to native byte order.  Evaluated one or two times.
+ * @param ERROR     Expression to be evaluated on invalid input,
+ *                  typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+    val = GET_16BIT;\
+    {\
+        unsigned int hi = val - 0xD800;\
+        if (hi < 0x800) {\
+            val = GET_16BIT - 0xDC00;\
+            if (val > 0x3FFU || hi > 0x3FFU)\
+                ERROR\
+            val += (hi<<10) + 0x10000;\
+        }\
+    }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+    {\
+        int bytes, shift;\
+        uint32_t in = val;\
+        if (in < 0x80) {\
+            tmp = in;\
+            PUT_BYTE\
+        } else {\
+            bytes = (av_log2(in) + 4) / 5;\
+            shift = (bytes - 1) * 6;\
+            tmp = (256 - (256 >> bytes)) | (in >> shift);\
+            PUT_BYTE\
+            while (shift >= 6) {\
+                shift -= 6;\
+                tmp = 0x80 | ((in >> shift) & 0x3f);\
+                PUT_BYTE\
+            }\
+        }\
+    }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte.  For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+    {\
+        uint32_t in = val;\
+        if (in < 0x10000) {\
+            tmp = in;\
+            PUT_16BIT\
+        } else {\
+            tmp = 0xD800 | ((in - 0x10000) >> 10);\
+            PUT_16BIT\
+            tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+            PUT_16BIT\
+        }\
+    }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+#    include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_ceil_log2
+#   define av_ceil_log2     av_ceil_log2_c
+#endif
+#ifndef av_clip
+#   define av_clip          av_clip_c
+#endif
+#ifndef av_clip64
+#   define av_clip64        av_clip64_c
+#endif
+#ifndef av_clip_uint8
+#   define av_clip_uint8    av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+#   define av_clip_int8     av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+#   define av_clip_uint16   av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+#   define av_clip_int16    av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+#   define av_clipl_int32   av_clipl_int32_c
+#endif
+#ifndef av_clip_uintp2
+#   define av_clip_uintp2   av_clip_uintp2_c
+#endif
+#ifndef av_sat_add32
+#   define av_sat_add32     av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+#   define av_sat_dadd32    av_sat_dadd32_c
+#endif
+#ifndef av_clipf
+#   define av_clipf         av_clipf_c
+#endif
+#ifndef av_clipd
+#   define av_clipd         av_clipd_c
+#endif
+#ifndef av_popcount
+#   define av_popcount      av_popcount_c
+#endif
+#ifndef av_popcount64
+#   define av_popcount64    av_popcount64_c
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/cpu.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include "attributes.h"
+
+#define AV_CPU_FLAG_FORCE    0x80000000 /* force usage of selected flags (OR) */
+
+    /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX          0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT       0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2         0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW        0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE          0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2         0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+                                        ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT     0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3         0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+                                        ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3        0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM     0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4         0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42        0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AVX          0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_XOP          0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4         0x0800 ///< Bulldozer FMA4 functions
+// #if LIBAVUTIL_VERSION_MAJOR <52
+#define AV_CPU_FLAG_CMOV      0x1001000 ///< supports cmov instruction
+// #else
+// #define AV_CPU_FLAG_CMOV         0x1000 ///< supports cmov instruction
+// #endif
+#define AV_CPU_FLAG_AVX2         0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_FMA3        0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1        0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2        0x40000 ///< Bit Manipulation Instruction Set 2
+
+#define AV_CPU_FLAG_ALTIVEC      0x0001 ///< standard
+
+#define AV_CPU_FLAG_ARMV5TE      (1 << 0)
+#define AV_CPU_FLAG_ARMV6        (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2      (1 << 2)
+#define AV_CPU_FLAG_VFP          (1 << 3)
+#define AV_CPU_FLAG_VFPV3        (1 << 4)
+#define AV_CPU_FLAG_NEON         (1 << 5)
+#define AV_CPU_FLAG_ARMV8        (1 << 6)
+#define AV_CPU_FLAG_SETEND       (1 <<16)
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in a application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Set a mask on flags returned by av_get_cpu_flags().
+ * This function is mainly useful for testing.
+ * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
+ *
+ * @warning this function is not thread safe.
+ */
+attribute_deprecated void av_set_cpu_flags_mask(int mask);
+
+/**
+ * Parse CPU flags from a string.
+ *
+ * The returned flags contain the specified flags as well as related unspecified flags.
+ *
+ * This function exists only for compatibility with libav.
+ * Please use av_parse_cpu_caps() when possible.
+ * @return a combination of AV_CPU_* flags, negative on error.
+ */
+attribute_deprecated
+int av_parse_cpu_flags(const char *s);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned *flags, const char *s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+#endif /* AVUTIL_CPU_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/cpu_internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,34 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_INTERNAL_H
+#define AVUTIL_CPU_INTERNAL_H
+
+#include "cpu.h"
+
+#define CPUEXT_SUFFIX(flags, suffix, cpuext)                            \
+    (HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext))
+
+#define CPUEXT(flags, cpuext) CPUEXT_SUFFIX(flags, , cpuext)
+
+int ff_get_cpu_flags_aarch64(void);
+int ff_get_cpu_flags_arm(void);
+int ff_get_cpu_flags_ppc(void);
+int ff_get_cpu_flags_x86(void);
+
+#endif /* AVUTIL_CPU_INTERNAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/crc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_crc32 CRC32
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+    AV_CRC_8_ATM,
+    AV_CRC_16_ANSI,
+    AV_CRC_16_CCITT,
+    AV_CRC_32_IEEE,
+    AV_CRC_32_IEEE_LE,  /*< reversed bitorder version of AV_CRC_32_IEEE */
+    AV_CRC_16_ANSI_LE,  /*< reversed bitorder version of AV_CRC_16_ANSI */
+    AV_CRC_24_IEEE = 12,
+    AV_CRC_MAX,         /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+/**
+ * Initialize a CRC table.
+ * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
+ * @param le If 1, the lowest bit represents the coefficient for the highest
+ *           exponent of the corresponding polynomial (both for poly and
+ *           actual CRC).
+ *           If 0, you must swap the CRC parameter and the result of av_crc
+ *           if you need the standard representation (can be simplified in
+ *           most cases to e.g. bswap16):
+ *           av_bswap32(crc << (32-bits))
+ * @param bits number of bits for the CRC
+ * @param poly generator polynomial without the x**bits coefficient, in the
+ *             representation as specified by le
+ * @param ctx_size size of ctx in bytes
+ * @return <0 on failure
+ */
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+
+/**
+ * Get an initialized standard CRC table.
+ * @param crc_id ID of a standard CRC
+ * @return a pointer to the CRC table or NULL on failure
+ */
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+
+/**
+ * Calculate the CRC of a block.
+ * @param crc CRC of previous blocks if any or initial value for CRC
+ * @return CRC updated with the data from the given block
+ *
+ * @see av_crc_init() "le" parameter
+ */
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+                const uint8_t *buffer, size_t length) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CRC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/des.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,61 @@
+/*
+ * DES encryption/decryption
+ * Copyright (c) 2007 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DES_H
+#define AVUTIL_DES_H
+
+#include <stdint.h>
+
+struct AVDES {
+    uint64_t round_keys[3][16];
+    int triple_des;
+};
+
+/**
+ * @brief Initializes an AVDES context.
+ *
+ * @param key_bits must be 64 or 192
+ * @param decrypt 0 for encryption/CBC-MAC, 1 for decryption
+ */
+int av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * @brief Encrypts / decrypts using the DES algorithm.
+ *
+ * @param count number of 8 byte blocks
+ * @param dst destination array, can be equal to src, must be 8-byte aligned
+ * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used,
+ *           must be 8-byte aligned
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @brief Calculates CBC-MAC using the DES algorithm.
+ *
+ * @param count number of 8 byte blocks
+ * @param dst destination array, can be equal to src, must be 8-byte aligned
+ * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL
+ */
+void av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count);
+
+#endif /* AVUTIL_DES_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/dict.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,178 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ *  AVDictionary is provided for compatibility with libav. It is both in
+ *  implementation as well as API inefficient. It does not scale and is
+ *  extremely slow with large dictionaries.
+ *  It is recommended that new code uses our tree container from tree.c/h
+ *  where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include <stdint.h>
+
+#include "version.h"
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ @code
+   AVDictionary *d = NULL;           // "create" an empty dictionary
+   AVDictionaryEntry *t = NULL;
+
+   av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+   char *k = av_strdup("key");       // if your strings are already allocated,
+   char *v = av_strdup("value");     // you can avoid copying them like this
+   av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+   while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+       <....>                             // iterate over all entries in d
+   }
+   av_dict_free(&d);
+ @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE      1   /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX   2   /**< Return first entry in a dictionary whose first part corresponds to the search key,
+                                         ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY 4   /**< Take ownership of a key that's been
+                                         allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL 8   /**< Take ownership of a value that's been
+                                         allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16   ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND         32   /**< If the entry already exists, append to it.  Note that no
+                                      delimiter is added, the strings are simply concatenated. */
+
+typedef struct AVDictionaryEntry {
+    char *key;
+    char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * To iterate through all the dictionary entries, you can set the matching key
+ * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ *             If set to NULL the first matching element is returned.
+ * @param key matching key
+ * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
+ * @return found entry or NULL in case no matching entry was found in the dictionary
+ */
+AVDictionaryEntry *av_dict_get(FF_CONST_AVUTIL53 AVDictionary *m, const char *key,
+                               const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return  number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ *        Passing a NULL value will cause an existing entry to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Convenience wrapper for av_dict_set that converts the value to a string
+ * and stores it.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep  a 0-terminated list of characters used to separate
+ *                     key from value
+ * @param pairs_sep    a 0-terminated list of characters used to separate
+ *                     two pairs from each other
+ * @param flags        flags to use when adding to dictionary.
+ *                     AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ *                     are ignored since the key/value tokens will always
+ *                     be duplicated.
+ * @return             0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+                         const char *key_val_sep, const char *pairs_sep,
+                         int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ *            this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ */
+void av_dict_copy(AVDictionary **dst, FF_CONST_AVUTIL53 AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/display.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DISPLAY_H
+#define AVUTIL_DISPLAY_H
+
+#include <stdint.h>
+
+/**
+ * The display transformation matrix specifies an affine transformation that
+ * should be applied to video frames for correct presentation. It is compatible
+ * with the matrices stored in the ISO/IEC 14496-12 container format.
+ *
+ * The data is a 3x3 matrix represented as a 9-element array:
+ *
+ *                                  | a b u |
+ *   (a, b, u, c, d, v, x, y, w) -> | c d v |
+ *                                  | x y w |
+ *
+ * All numbers are stored in native endianness, as 16.16 fixed-point values,
+ * except for u, v and w, which are stored as 2.30 fixed-point values.
+ *
+ * The transformation maps a point (p, q) in the source (pre-transformation)
+ * frame to the point (p', q') in the destination (post-transformation) frame as
+ * follows:
+ *               | a b u |
+ *   (p, q, 1) . | c d v | = z * (p', q', 1)
+ *               | x y w |
+ *
+ * The transformation can also be more explicitly written in components as
+ * follows:
+ *   p' = (a * p + c * q + x) / z;
+ *   q' = (b * p + d * q + y) / z;
+ *   z  =  u * p + v * q + w
+ */
+
+/**
+ * Extract the rotation component of the transformation matrix.
+ *
+ * @param matrix the transformation matrix
+ * @return the angle (in degrees) by which the transformation rotates the frame.
+ *         The angle will be in range [-180.0, 180.0], or NaN if the matrix is
+ *         singular.
+ *
+ * @note floating point numbers are inherently inexact, so callers are
+ *       recommended to round the return value to nearest integer before use.
+ */
+double av_display_rotation_get(const int32_t matrix[9]);
+
+/**
+ * Initialize a transformation matrix describing a pure rotation by the
+ * specified angle (in degrees).
+ *
+ * @param matrix an allocated transformation matrix (will be fully overwritten
+ *               by this function)
+ * @param angle rotation angle in degrees.
+ */
+void av_display_rotation_set(int32_t matrix[9], double angle);
+
+/**
+ * Flip the input matrix horizontally and/or vertically.
+ *
+ * @param matrix an allocated transformation matrix
+ * @param hflip whether the matrix should be flipped horizontally
+ * @param vflip whether the matrix should be flipped vertically
+ */
+void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip);
+
+#endif /* AVUTIL_DISPLAY_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/downmix_info.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DOWNMIX_INFO_H
+#define AVUTIL_DOWNMIX_INFO_H
+
+#include "frame.h"
+
+/**
+ * @file
+ * audio downmix medatata
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup downmix_info Audio downmix metadata
+ * @{
+ */
+
+/**
+ * Possible downmix types.
+ */
+enum AVDownmixType {
+    AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */
+    AV_DOWNMIX_TYPE_LORO,    /**< Lo/Ro 2-channel downmix (Stereo). */
+    AV_DOWNMIX_TYPE_LTRT,    /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */
+    AV_DOWNMIX_TYPE_DPLII,   /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */
+    AV_DOWNMIX_TYPE_NB       /**< Number of downmix types. Not part of ABI. */
+};
+
+/**
+ * This structure describes optional metadata relevant to a downmix procedure.
+ *
+ * All fields are set by the decoder to the value indicated in the audio
+ * bitstream (if present), or to a "sane" default otherwise.
+ */
+typedef struct AVDownmixInfo {
+    /**
+     * Type of downmix preferred by the mastering engineer.
+     */
+    enum AVDownmixType preferred_downmix_type;
+
+    /**
+     * Absolute scale factor representing the nominal level of the center
+     * channel during a regular downmix.
+     */
+    double center_mix_level;
+
+    /**
+     * Absolute scale factor representing the nominal level of the center
+     * channel during an Lt/Rt compatible downmix.
+     */
+    double center_mix_level_ltrt;
+
+    /**
+     * Absolute scale factor representing the nominal level of the surround
+     * channels during a regular downmix.
+     */
+    double surround_mix_level;
+
+    /**
+     * Absolute scale factor representing the nominal level of the surround
+     * channels during an Lt/Rt compatible downmix.
+     */
+    double surround_mix_level_ltrt;
+
+    /**
+     * Absolute scale factor representing the level at which the LFE data is
+     * mixed into L/R channels during downmixing.
+     */
+    double lfe_mix_level;
+} AVDownmixInfo;
+
+/**
+ * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.
+ *
+ * If the side data is absent, it is created and added to the frame.
+ *
+ * @param frame the frame for which the side data is to be obtained or created
+ *
+ * @return the AVDownmixInfo structure to be edited by the caller, or NULL if
+ *         the structure cannot be allocated.
+ */
+AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DOWNMIX_INFO_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/dynarray.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,70 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DYNARRAY_H
+#define AVUTIL_DYNARRAY_H
+
+#include "log.h"
+#include "mem.h"
+
+/**
+ * Add an element of to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the size is incremented.
+ *
+ * @param av_size_max  maximum size of the array, usually the MAX macro of
+ *                     the type of the size
+ * @param av_elt_size  size of the elements in the array, in bytes
+ * @param av_array     pointer to the array, must be a lvalue
+ * @param av_size      size of the array, must be an integer lvalue
+ * @param av_success   statement to execute on success; at this point, the
+ *                     size variable is not yet incremented
+ * @param av_failure   statement to execute on failure; if this happens, the
+ *                     array and size are not changed; the statement can end
+ *                     with a return or a goto
+ */
+#define AV_DYNARRAY_ADD(av_size_max, av_elt_size, av_array, av_size, \
+                        av_success, av_failure) \
+    do { \
+        size_t av_size_new = (av_size); \
+        if (!((av_size) & ((av_size) - 1))) { \
+            av_size_new = (av_size) ? (av_size) << 1 : 1; \
+            if (av_size_new > (av_size_max) / (av_elt_size)) { \
+                av_size_new = 0; \
+            } else { \
+                void *av_array_new = \
+                    av_realloc((av_array), av_size_new * (av_elt_size)); \
+                if (!av_array_new) \
+                    av_size_new = 0; \
+                else \
+                    (av_array) = av_array_new; \
+            } \
+        } \
+        if (av_size_new) { \
+            { av_success } \
+            (av_size)++; \
+        } else { \
+            av_failure \
+        } \
+    } while (0)
+
+#endif /* AVUTIL_DYNARRAY_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/error.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,126 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e))   ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
+
+#define AVERROR_BSF_NOT_FOUND      FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found
+#define AVERROR_BUG                FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2
+#define AVERROR_BUFFER_TOO_SMALL   FFERRTAG( 'B','U','F','S') ///< Buffer too small
+#define AVERROR_DECODER_NOT_FOUND  FFERRTAG(0xF8,'D','E','C') ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND  FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND  FFERRTAG(0xF8,'E','N','C') ///< Encoder not found
+#define AVERROR_EOF                FFERRTAG( 'E','O','F',' ') ///< End of file
+#define AVERROR_EXIT               FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_EXTERNAL           FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library
+#define AVERROR_FILTER_NOT_FOUND   FFERRTAG(0xF8,'F','I','L') ///< Filter not found
+#define AVERROR_INVALIDDATA        FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND    FFERRTAG(0xF8,'M','U','X') ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND   FFERRTAG(0xF8,'O','P','T') ///< Option not found
+#define AVERROR_PATCHWELCOME       FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found
+
+#define AVERROR_STREAM_NOT_FOUND   FFERRTAG(0xF8,'S','T','R') ///< Stream not found
+/**
+ * This is semantically identical to AVERROR_BUG
+ * it has been introduced in Libav after our AVERROR_BUG and with a modified value.
+ */
+#define AVERROR_BUG2               FFERRTAG( 'B','U','G',' ')
+#define AVERROR_UNKNOWN            FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL       (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+#define AVERROR_INPUT_CHANGED      (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
+#define AVERROR_OUTPUT_CHANGED     (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)
+/* HTTP & RTSP errors */
+#define AVERROR_HTTP_BAD_REQUEST   FFERRTAG(0xF8,'4','0','0')
+#define AVERROR_HTTP_UNAUTHORIZED  FFERRTAG(0xF8,'4','0','1')
+#define AVERROR_HTTP_FORBIDDEN     FFERRTAG(0xF8,'4','0','3')
+#define AVERROR_HTTP_NOT_FOUND     FFERRTAG(0xF8,'4','0','4')
+#define AVERROR_HTTP_OTHER_4XX     FFERRTAG(0xF8,'4','X','X')
+#define AVERROR_HTTP_SERVER_ERROR  FFERRTAG(0xF8,'5','X','X')
+
+#define AV_ERROR_MAX_STRING_SIZE 64
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum      error code to describe
+ * @param errbuf      buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * Fill the provided buffer with a string containing an error string
+ * corresponding to the AVERROR code errnum.
+ *
+ * @param errbuf         a buffer
+ * @param errbuf_size    size in bytes of errbuf
+ * @param errnum         error code to describe
+ * @return the buffer in input, filled with the error description
+ * @see av_strerror()
+ */
+static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)
+{
+    av_strerror(errnum, errbuf, errbuf_size);
+    return errbuf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_err2str(errnum) \
+    av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/eval.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+#include "avutil.h"
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+                           const char * const *const_names, const double *const_values,
+                           const char * const *func1_names, double (* const *funcs1)(void *, double),
+                           const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+                           void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+                  const char * const *const_names,
+                  const char * const *func1_names, double (* const *funcs1)(void *, double),
+                  const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+                  int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/ffversion.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,4 @@
+#ifndef AVUTIL_FFVERSION_H
+#define AVUTIL_FFVERSION_H
+#define FFMPEG_VERSION "N-67732-g530eb6a"
+#endif /* AVUTIL_FFVERSION_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/fifo.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,158 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a very simple circular buffer FIFO implementation
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include <stdint.h>
+#include "avutil.h"
+#include "attributes.h"
+
+typedef struct AVFifoBuffer {
+    uint8_t *buffer;
+    uint8_t *rptr, *wptr, *end;
+    uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param nmemb number of elements
+ * @param size  size of the single element
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Free an AVFifoBuffer and reset pointer to NULL.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_freep(AVFifoBuffer **f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ */
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ */
+int av_fifo_size(FF_CONST_AVUTIL53 AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ */
+int av_fifo_space(FF_CONST_AVUTIL53 AVFifoBuffer *f);
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ */
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO
+ */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Enlarge an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ * The new fifo size may be larger than the requested size.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ */
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f    AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ *             than the used buffer size or the returned pointer will
+ *             point outside to the buffer data.
+ *             The used buffer size can be checked with av_fifo_size().
+ */
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+    uint8_t *ptr = f->rptr + offs;
+    if (ptr >= f->end)
+        ptr = f->buffer + (ptr - f->end);
+    else if (ptr < f->buffer)
+        ptr = f->end - (f->buffer - ptr);
+    return ptr;
+}
+
+#endif /* AVUTIL_FIFO_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/file.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,66 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILE_H
+#define AVUTIL_FILE_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+
+/**
+ * @file
+ * Misc file utilities.
+ */
+
+/**
+ * Read the file with name filename, and put its content in a newly
+ * allocated buffer or map it with mmap() when available.
+ * In case of success set *bufptr to the read or mmapped buffer, and
+ * *size to the size in bytes of the buffer in *bufptr.
+ * The returned buffer must be released with av_file_unmap().
+ *
+ * @param log_offset loglevel offset used for logging
+ * @param log_ctx context used for logging
+ * @return a non negative number in case of success, a negative value
+ * corresponding to an AVERROR error code in case of failure
+ */
+int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
+                int log_offset, void *log_ctx);
+
+/**
+ * Unmap or free the buffer bufptr created by av_file_map().
+ *
+ * @param size size in bytes of bufptr, must be the same as returned
+ * by av_file_map()
+ */
+void av_file_unmap(uint8_t *bufptr, size_t size);
+
+/**
+ * Wrapper to work around the lack of mkstemp() on mingw.
+ * Also, tries to create file in /tmp first, if possible.
+ * *prefix can be a character constant; *filename will be allocated internally.
+ * @return file descriptor of opened file (or -1 on error)
+ * and opened file name in **filename.
+ * @note On very old libcs it is necessary to set a secure umask before
+ *       calling this, av_tempfile() can't call umask itself as it is used in
+ *       libraries and could interfere with the calling application.
+ */
+int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);
+
+#endif /* AVUTIL_FILE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/fixed_dsp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author:  Nedeljko Babic (nbabic@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FIXED_DSP_H
+#define AVUTIL_FIXED_DSP_H
+
+#include <stdint.h>
+#include "attributes.h"
+#include "common.h"
+#include "libavcodec/mathops.h"
+
+typedef struct AVFixedDSPContext {
+    /**
+     * Overlap/add with window function.
+     * Used primarily by MDCT-based audio codecs.
+     * Source and destination vectors must overlap exactly or not at all.
+     *
+     * @param dst  result vector
+     *             constraints: 16-byte aligned
+     * @param src0 first source vector
+     *             constraints: 16-byte aligned
+     * @param src1 second source vector
+     *             constraints: 16-byte aligned
+     * @param win  half-window vector
+     *             constraints: 16-byte aligned
+     * @param len  length of vector
+     *             constraints: multiple of 4
+     * @param bits scaling parameter
+     *
+     */
+    void (*vector_fmul_window_scaled)(int16_t *dst, const int32_t *src0, const int32_t *src1, const int32_t *win, int len, uint8_t bits);
+
+    /**
+     * Overlap/add with window function.
+     * Used primarily by MDCT-based audio codecs.
+     * Source and destination vectors must overlap exactly or not at all.
+     *
+     * @param dst  result vector
+     *             constraints: 32-byte aligned
+     * @param src0 first source vector
+     *             constraints: 16-byte aligned
+     * @param src1 second source vector
+     *             constraints: 16-byte aligned
+     * @param win  half-window vector
+     *             constraints: 16-byte aligned
+     * @param len  length of vector
+     *             constraints: multiple of 4
+     */
+    void (*vector_fmul_window)(int32_t *dst, const int32_t *src0, const int32_t *src1, const int32_t *win, int len);
+
+} AVFixedDSPContext;
+
+/**
+ * Allocate and initialize a fixed DSP context.
+ * note: should be freed with a av_free call when no longer needed.
+ *
+ * @param strict  setting to non-zero avoids using functions which may not be IEEE-754 compliant
+ */
+AVFixedDSPContext * avpriv_alloc_fixed_dsp(int strict);
+
+/**
+ * Calculate the square root
+ *
+ * @param x    input fixed point number
+ *
+ * @param bits format of fixed point number (32 - bits).bits
+ *
+ * note: input is normalized to (0, 1) fixed point value
+ */
+
+static av_always_inline int fixed_sqrt(int x, int bits)
+{
+    int retval, bit_mask, guess, square, i;
+    int64_t accu;
+    int shift1 = 30 - bits;
+    int shift2 = bits - 15;
+
+    if (shift1 > 0) retval = ff_sqrt(x << shift1);
+    else retval = ff_sqrt(x >> -shift1);
+
+    if (shift2 > 0) {
+        retval = retval << shift2;
+        bit_mask = (1 << (shift2 - 1));
+
+        for (i=0; i<shift2; i++){
+            guess = retval + bit_mask;
+            accu = (int64_t)guess * guess;
+            square = (int)((accu + bit_mask) >> bits);
+            if (x >= square)
+                retval += bit_mask;
+            bit_mask >>= 1;
+        }
+
+    }
+    else retval >>= (-shift2);
+
+    return retval;
+}
+
+#endif /* AVUTIL_FIXED_DSP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/float_dsp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,188 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FLOAT_DSP_H
+#define AVUTIL_FLOAT_DSP_H
+
+#include "config.h"
+
+typedef struct AVFloatDSPContext {
+    /**
+     * Calculate the product of two vectors of floats and store the result in
+     * a vector of floats.
+     *
+     * @param dst  output vector
+     *             constraints: 32-byte aligned
+     * @param src0 first input vector
+     *             constraints: 32-byte aligned
+     * @param src1 second input vector
+     *             constraints: 32-byte aligned
+     * @param len  number of elements in the input
+     *             constraints: multiple of 16
+     */
+    void (*vector_fmul)(float *dst, const float *src0, const float *src1,
+                        int len);
+
+    /**
+     * Multiply a vector of floats by a scalar float and add to
+     * destination vector.  Source and destination vectors must
+     * overlap exactly or not at all.
+     *
+     * @param dst result vector
+     *            constraints: 32-byte aligned
+     * @param src input vector
+     *            constraints: 32-byte aligned
+     * @param mul scalar value
+     * @param len length of vector
+     *            constraints: multiple of 16
+     */
+    void (*vector_fmac_scalar)(float *dst, const float *src, float mul,
+                               int len);
+
+    /**
+     * Multiply a vector of floats by a scalar float.  Source and
+     * destination vectors must overlap exactly or not at all.
+     *
+     * @param dst result vector
+     *            constraints: 16-byte aligned
+     * @param src input vector
+     *            constraints: 16-byte aligned
+     * @param mul scalar value
+     * @param len length of vector
+     *            constraints: multiple of 4
+     */
+    void (*vector_fmul_scalar)(float *dst, const float *src, float mul,
+                               int len);
+
+    /**
+     * Multiply a vector of double by a scalar double.  Source and
+     * destination vectors must overlap exactly or not at all.
+     *
+     * @param dst result vector
+     *            constraints: 32-byte aligned
+     * @param src input vector
+     *            constraints: 32-byte aligned
+     * @param mul scalar value
+     * @param len length of vector
+     *            constraints: multiple of 8
+     */
+    void (*vector_dmul_scalar)(double *dst, const double *src, double mul,
+                               int len);
+
+    /**
+     * Overlap/add with window function.
+     * Used primarily by MDCT-based audio codecs.
+     * Source and destination vectors must overlap exactly or not at all.
+     *
+     * @param dst  result vector
+     *             constraints: 16-byte aligned
+     * @param src0 first source vector
+     *             constraints: 16-byte aligned
+     * @param src1 second source vector
+     *             constraints: 16-byte aligned
+     * @param win  half-window vector
+     *             constraints: 16-byte aligned
+     * @param len  length of vector
+     *             constraints: multiple of 4
+     */
+    void (*vector_fmul_window)(float *dst, const float *src0,
+                               const float *src1, const float *win, int len);
+
+    /**
+     * Calculate the product of two vectors of floats, add a third vector of
+     * floats and store the result in a vector of floats.
+     *
+     * @param dst  output vector
+     *             constraints: 32-byte aligned
+     * @param src0 first input vector
+     *             constraints: 32-byte aligned
+     * @param src1 second input vector
+     *             constraints: 32-byte aligned
+     * @param src2 third input vector
+     *             constraints: 32-byte aligned
+     * @param len  number of elements in the input
+     *             constraints: multiple of 16
+     */
+    void (*vector_fmul_add)(float *dst, const float *src0, const float *src1,
+                            const float *src2, int len);
+
+    /**
+     * Calculate the product of two vectors of floats, and store the result
+     * in a vector of floats. The second vector of floats is iterated over
+     * in reverse order.
+     *
+     * @param dst  output vector
+     *             constraints: 32-byte aligned
+     * @param src0 first input vector
+     *             constraints: 32-byte aligned
+     * @param src1 second input vector
+     *             constraints: 32-byte aligned
+     * @param len  number of elements in the input
+     *             constraints: multiple of 16
+     */
+    void (*vector_fmul_reverse)(float *dst, const float *src0,
+                                const float *src1, int len);
+
+    /**
+     * Calculate the sum and difference of two vectors of floats.
+     *
+     * @param v1  first input vector, sum output, 16-byte aligned
+     * @param v2  second input vector, difference output, 16-byte aligned
+     * @param len length of vectors, multiple of 4
+     */
+    void (*butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len);
+
+    /**
+     * Calculate the scalar product of two vectors of floats.
+     *
+     * @param v1  first vector, 16-byte aligned
+     * @param v2  second vector, 16-byte aligned
+     * @param len length of vectors, multiple of 4
+     *
+     * @return sum of elementwise products
+     */
+    float (*scalarproduct_float)(const float *v1, const float *v2, int len);
+} AVFloatDSPContext;
+
+/**
+ * Return the scalar product of two vectors.
+ *
+ * @param v1  first input vector
+ * @param v2  first input vector
+ * @param len number of elements
+ *
+ * @return sum of elementwise products
+ */
+float avpriv_scalarproduct_float_c(const float *v1, const float *v2, int len);
+
+/**
+ * Initialize a float DSP context.
+ *
+ * @param fdsp    float DSP context
+ * @param strict  setting to non-zero avoids using functions which may not be IEEE-754 compliant
+ */
+void avpriv_float_dsp_init(AVFloatDSPContext *fdsp, int strict);
+
+
+void ff_float_dsp_init_aarch64(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_arm(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_ppc(AVFloatDSPContext *fdsp, int strict);
+void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp);
+
+#endif /* AVUTIL_FLOAT_DSP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/frame.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,661 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "channel_layout.h"
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "dict.h"
+#include "frame.h"
+#include "imgutils.h"
+#include "mem.h"
+#include "samplefmt.h"
+
+MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
+MAKE_ACCESSORS(AVFrame, frame, int,     channels)
+MAKE_ACCESSORS(AVFrame, frame, int,     sample_rate)
+MAKE_ACCESSORS(AVFrame, frame, int,     decode_error_flags)
+MAKE_ACCESSORS(AVFrame, frame, int,     pkt_size)
+MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
+MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
+
+#define CHECK_CHANNELS_CONSISTENCY(frame) \
+    av_assert2(!(frame)->channel_layout || \
+               (frame)->channels == \
+               av_get_channel_layout_nb_channels((frame)->channel_layout))
+
+static void get_frame_defaults(AVFrame *frame)
+{
+    memset(frame, 0, sizeof(*frame));
+
+    frame->pts                   =
+    frame->pkt_dts               =
+    frame->pkt_pts               = AV_NOPTS_VALUE;
+    av_frame_set_best_effort_timestamp(frame, AV_NOPTS_VALUE);
+    av_frame_set_pkt_duration         (frame, 0);
+    av_frame_set_pkt_pos              (frame, -1);
+    av_frame_set_pkt_size             (frame, -1);
+    frame->key_frame           = 1;
+    frame->sample_aspect_ratio = (AVRational){ 0, 1 };
+    frame->format              = -1; /* unknown */
+    frame->color_primaries     = AVCOL_PRI_UNSPECIFIED;
+    frame->color_trc           = AVCOL_TRC_UNSPECIFIED;
+    frame->colorspace          = AVCOL_SPC_UNSPECIFIED;
+    frame->color_range         = AVCOL_RANGE_UNSPECIFIED;
+    frame->chroma_location     = AVCHROMA_LOC_UNSPECIFIED;
+}
+
+static void free_side_data(AVFrameSideData **ptr_sd)
+{
+    AVFrameSideData *sd = *ptr_sd;
+
+    av_freep(&sd->data);
+    av_freep(ptr_sd);
+}
+
+AVFrame *av_frame_alloc(void)
+{
+    AVFrame *frame = av_mallocz(sizeof(*frame));
+
+    if (!frame)
+        return NULL;
+
+    get_frame_defaults(frame);
+
+    return frame;
+}
+
+void av_frame_free(AVFrame **frame)
+{
+    if (!frame || !*frame)
+        return;
+
+    av_frame_unref(*frame);
+    av_freep(frame);
+}
+
+void av_frame_unref(AVFrame *frame)
+{
+    int i;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
+        av_buffer_unref(&frame->buf[i]);
+
+    get_frame_defaults(frame);
+}
+
+void av_frame_move_ref(AVFrame *dst, AVFrame *src)
+{
+    *dst = *src;
+    memset(src, 0, sizeof(*src));
+    get_frame_defaults(src);
+}
+
+int av_frame_ref(AVFrame *dst, const AVFrame *src)
+{
+    int i, ret = 0;
+
+    dst->format         = src->format;
+    dst->width          = src->width;
+    dst->height         = src->height;
+    dst->channels       = src->channels;
+    dst->channel_layout = src->channel_layout;
+    dst->nb_samples     = src->nb_samples;
+
+    /* duplicate the frame data if it's not refcounted */
+    if (!src->buf[0]) {
+        abort();
+#if 0
+        ret = av_frame_get_buffer(dst, 32);
+        if (ret < 0)
+            return ret;
+
+        ret = av_frame_copy(dst, src);
+        if (ret < 0)
+            av_frame_unref(dst);
+
+        return ret;
+#endif
+    }
+
+    /* ref the buffers */
+    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
+        if (!src->buf[i])
+            continue;
+        dst->buf[i] = av_buffer_ref(src->buf[i]);
+        if (!dst->buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+
+    memcpy(dst->data,     src->data,     sizeof(src->data));
+    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
+
+    return 0;
+
+fail:
+    av_frame_unref(dst);
+    return ret;
+}
+
+#if 0
+static int get_video_buffer(AVFrame *frame, int align)
+{
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+    int ret, i;
+
+    if (!desc)
+        return AVERROR(EINVAL);
+
+    if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
+        return ret;
+
+    if (!frame->linesize[0]) {
+        for(i=1; i<=align; i+=i) {
+            ret = av_image_fill_linesizes(frame->linesize, frame->format,
+                                          FFALIGN(frame->width, i));
+            if (ret < 0)
+                return ret;
+            if (!(frame->linesize[0] & (align-1)))
+                break;
+        }
+
+        for (i = 0; i < 4 && frame->linesize[i]; i++)
+            frame->linesize[i] = FFALIGN(frame->linesize[i], align);
+    }
+
+    for (i = 0; i < 4 && frame->linesize[i]; i++) {
+        int h = FFALIGN(frame->height, 32);
+        if (i == 1 || i == 2)
+            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
+
+        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
+        if (!frame->buf[i])
+            goto fail;
+
+        frame->data[i] = frame->buf[i]->data;
+    }
+    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
+        av_buffer_unref(&frame->buf[1]);
+        frame->buf[1] = av_buffer_alloc(1024);
+        if (!frame->buf[1])
+            goto fail;
+        frame->data[1] = frame->buf[1]->data;
+    }
+
+    frame->extended_data = frame->data;
+
+    return 0;
+fail:
+    av_frame_unref(frame);
+    return AVERROR(ENOMEM);
+}
+
+static int get_audio_buffer(AVFrame *frame, int align)
+{
+    int channels;
+    int planar   = av_sample_fmt_is_planar(frame->format);
+    int planes;
+    int ret, i;
+
+    if (!frame->channels)
+        frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
+
+    channels = frame->channels;
+    planes = planar ? channels : 1;
+
+    CHECK_CHANNELS_CONSISTENCY(frame);
+    if (!frame->linesize[0]) {
+        ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
+                                         frame->nb_samples, frame->format,
+                                         align);
+        if (ret < 0)
+            return ret;
+    }
+
+    if (planes > AV_NUM_DATA_POINTERS) {
+        frame->extended_data = av_mallocz_array(planes,
+                                          sizeof(*frame->extended_data));
+        frame->extended_buf  = av_mallocz_array((planes - AV_NUM_DATA_POINTERS),
+                                          sizeof(*frame->extended_buf));
+        if (!frame->extended_data || !frame->extended_buf) {
+            av_freep(&frame->extended_data);
+            av_freep(&frame->extended_buf);
+            return AVERROR(ENOMEM);
+        }
+        frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+    } else
+        frame->extended_data = frame->data;
+
+    for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+        frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
+        if (!frame->buf[i]) {
+            av_frame_unref(frame);
+            return AVERROR(ENOMEM);
+        }
+        frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+    }
+    for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
+        frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
+        if (!frame->extended_buf[i]) {
+            av_frame_unref(frame);
+            return AVERROR(ENOMEM);
+        }
+        frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+    }
+    return 0;
+
+}
+
+int av_frame_get_buffer(AVFrame *frame, int align)
+{
+    if (frame->format < 0)
+        return AVERROR(EINVAL);
+
+    if (frame->width > 0 && frame->height > 0)
+        return get_video_buffer(frame, align);
+    else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
+        return get_audio_buffer(frame, align);
+
+    return AVERROR(EINVAL);
+}
+
+
+int av_frame_ref(AVFrame *dst, const AVFrame *src)
+{
+    int i, ret = 0;
+
+    dst->format         = src->format;
+    dst->width          = src->width;
+    dst->height         = src->height;
+    dst->channels       = src->channels;
+    dst->channel_layout = src->channel_layout;
+    dst->nb_samples     = src->nb_samples;
+
+    ret = av_frame_copy_props(dst, src);
+    if (ret < 0)
+        return ret;
+
+    /* duplicate the frame data if it's not refcounted */
+    if (!src->buf[0]) {
+        ret = av_frame_get_buffer(dst, 32);
+        if (ret < 0)
+            return ret;
+
+        ret = av_frame_copy(dst, src);
+        if (ret < 0)
+            av_frame_unref(dst);
+
+        return ret;
+    }
+
+    /* ref the buffers */
+    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
+        if (!src->buf[i])
+            continue;
+        dst->buf[i] = av_buffer_ref(src->buf[i]);
+        if (!dst->buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+
+    if (src->extended_buf) {
+        dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
+                                       src->nb_extended_buf);
+        if (!dst->extended_buf) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+        dst->nb_extended_buf = src->nb_extended_buf;
+
+        for (i = 0; i < src->nb_extended_buf; i++) {
+            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
+            if (!dst->extended_buf[i]) {
+                ret = AVERROR(ENOMEM);
+                goto fail;
+            }
+        }
+    }
+
+    /* duplicate extended data */
+    if (src->extended_data != src->data) {
+        int ch = src->channels;
+
+        if (!ch) {
+            ret = AVERROR(EINVAL);
+            goto fail;
+        }
+        CHECK_CHANNELS_CONSISTENCY(src);
+
+        dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
+        if (!dst->extended_data) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
+    } else
+        dst->extended_data = dst->data;
+
+    memcpy(dst->data,     src->data,     sizeof(src->data));
+    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
+
+    return 0;
+
+fail:
+    av_frame_unref(dst);
+    return ret;
+}
+
+AVFrame *av_frame_clone(const AVFrame *src)
+{
+    AVFrame *ret = av_frame_alloc();
+
+    if (!ret)
+        return NULL;
+
+    if (av_frame_ref(ret, src) < 0)
+        av_frame_free(&ret);
+
+    return ret;
+}
+
+int av_frame_is_writable(AVFrame *frame)
+{
+    int i, ret = 1;
+
+    /* assume non-refcounted frames are not writable */
+    if (!frame->buf[0])
+        return 0;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
+        if (frame->buf[i])
+            ret &= !!av_buffer_is_writable(frame->buf[i]);
+    for (i = 0; i < frame->nb_extended_buf; i++)
+        ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
+
+    return ret;
+}
+
+int av_frame_make_writable(AVFrame *frame)
+{
+    AVFrame tmp;
+    int ret;
+
+    if (!frame->buf[0])
+        return AVERROR(EINVAL);
+
+    if (av_frame_is_writable(frame))
+        return 0;
+
+    memset(&tmp, 0, sizeof(tmp));
+    tmp.format         = frame->format;
+    tmp.width          = frame->width;
+    tmp.height         = frame->height;
+    tmp.channels       = frame->channels;
+    tmp.channel_layout = frame->channel_layout;
+    tmp.nb_samples     = frame->nb_samples;
+    ret = av_frame_get_buffer(&tmp, 32);
+    if (ret < 0)
+        return ret;
+
+    ret = av_frame_copy(&tmp, frame);
+    if (ret < 0) {
+        av_frame_unref(&tmp);
+        return ret;
+    }
+
+    ret = av_frame_copy_props(&tmp, frame);
+    if (ret < 0) {
+        av_frame_unref(&tmp);
+        return ret;
+    }
+
+    av_frame_unref(frame);
+
+    *frame = tmp;
+    if (tmp.data == tmp.extended_data)
+        frame->extended_data = frame->data;
+
+    return 0;
+}
+
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
+{
+    int i;
+
+    dst->key_frame              = src->key_frame;
+    dst->pict_type              = src->pict_type;
+    dst->sample_aspect_ratio    = src->sample_aspect_ratio;
+    dst->pts                    = src->pts;
+    dst->repeat_pict            = src->repeat_pict;
+    dst->interlaced_frame       = src->interlaced_frame;
+    dst->top_field_first        = src->top_field_first;
+    dst->palette_has_changed    = src->palette_has_changed;
+    dst->sample_rate            = src->sample_rate;
+    dst->opaque                 = src->opaque;
+    dst->pkt_pts                = src->pkt_pts;
+    dst->pkt_dts                = src->pkt_dts;
+    dst->pkt_pos                = src->pkt_pos;
+    dst->pkt_size               = src->pkt_size;
+    dst->pkt_duration           = src->pkt_duration;
+    dst->reordered_opaque       = src->reordered_opaque;
+    dst->quality                = src->quality;
+    dst->best_effort_timestamp  = src->best_effort_timestamp;
+    dst->coded_picture_number   = src->coded_picture_number;
+    dst->display_picture_number = src->display_picture_number;
+    dst->flags                  = src->flags;
+    dst->decode_error_flags     = src->decode_error_flags;
+    dst->color_primaries        = src->color_primaries;
+    dst->color_trc              = src->color_trc;
+    dst->colorspace             = src->colorspace;
+    dst->color_range            = src->color_range;
+    dst->chroma_location        = src->chroma_location;
+
+    memcpy(dst->error, src->error, sizeof(dst->error));
+
+    for (i = 0; i < src->nb_side_data; i++) {
+        const AVFrameSideData *sd_src = src->side_data[i];
+        AVFrameSideData *sd_dst;
+        if (   sd_src->type == AV_FRAME_DATA_PANSCAN
+            && (src->width != dst->width || src->height != dst->height))
+            continue;
+        sd_dst = av_frame_new_side_data(dst, sd_src->type,
+                                                         sd_src->size);
+        if (!sd_dst) {
+            for (i = 0; i < dst->nb_side_data; i++) {
+                free_side_data(&dst->side_data[i]);
+            }
+            av_freep(&dst->side_data);
+            return AVERROR(ENOMEM);
+        }
+        memcpy(sd_dst->data, sd_src->data, sd_src->size);
+    }
+
+    return 0;
+}
+
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
+{
+    uint8_t *data;
+    int planes, i;
+
+    if (frame->nb_samples) {
+        int channels = frame->channels;
+        if (!channels)
+            return NULL;
+        CHECK_CHANNELS_CONSISTENCY(frame);
+        planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
+    } else
+        planes = 4;
+
+    if (plane < 0 || plane >= planes || !frame->extended_data[plane])
+        return NULL;
+    data = frame->extended_data[plane];
+
+    for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
+        AVBufferRef *buf = frame->buf[i];
+        if (data >= buf->data && data < buf->data + buf->size)
+            return buf;
+    }
+    for (i = 0; i < frame->nb_extended_buf; i++) {
+        AVBufferRef *buf = frame->extended_buf[i];
+        if (data >= buf->data && data < buf->data + buf->size)
+            return buf;
+    }
+    return NULL;
+}
+
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+                                        enum AVFrameSideDataType type,
+                                        int size)
+{
+    AVFrameSideData *ret, **tmp;
+
+    if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
+        return NULL;
+
+    tmp = av_realloc(frame->side_data,
+                     (frame->nb_side_data + 1) * sizeof(*frame->side_data));
+    if (!tmp)
+        return NULL;
+    frame->side_data = tmp;
+
+    ret = av_mallocz(sizeof(*ret));
+    if (!ret)
+        return NULL;
+
+    ret->data = av_malloc(size);
+    if (!ret->data) {
+        av_freep(&ret);
+        return NULL;
+    }
+
+    ret->size = size;
+    ret->type = type;
+
+    frame->side_data[frame->nb_side_data++] = ret;
+
+    return ret;
+}
+
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+                                        enum AVFrameSideDataType type)
+{
+    int i;
+
+    for (i = 0; i < frame->nb_side_data; i++) {
+        if (frame->side_data[i]->type == type)
+            return frame->side_data[i];
+    }
+    return NULL;
+}
+
+static int frame_copy_video(AVFrame *dst, const AVFrame *src)
+{
+    const uint8_t *src_data[4];
+    int i, planes;
+
+    if (dst->width  < src->width ||
+        dst->height < src->height)
+        return AVERROR(EINVAL);
+
+    planes = av_pix_fmt_count_planes(dst->format);
+    for (i = 0; i < planes; i++)
+        if (!dst->data[i] || !src->data[i])
+            return AVERROR(EINVAL);
+
+    memcpy(src_data, src->data, sizeof(src_data));
+    av_image_copy(dst->data, dst->linesize,
+                  src_data, src->linesize,
+                  dst->format, src->width, src->height);
+
+    return 0;
+}
+
+static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
+{
+    int planar   = av_sample_fmt_is_planar(dst->format);
+    int channels = dst->channels;
+    int planes   = planar ? channels : 1;
+    int i;
+
+    if (dst->nb_samples     != src->nb_samples ||
+        dst->channels       != src->channels ||
+        dst->channel_layout != src->channel_layout)
+        return AVERROR(EINVAL);
+
+    CHECK_CHANNELS_CONSISTENCY(src);
+
+    for (i = 0; i < planes; i++)
+        if (!dst->extended_data[i] || !src->extended_data[i])
+            return AVERROR(EINVAL);
+
+    av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
+                    dst->nb_samples, channels, dst->format);
+
+    return 0;
+}
+
+int av_frame_copy(AVFrame *dst, const AVFrame *src)
+{
+    if (dst->format != src->format || dst->format < 0)
+        return AVERROR(EINVAL);
+
+    if (dst->width > 0 && dst->height > 0)
+        return frame_copy_video(dst, src);
+    else if (dst->nb_samples > 0 && dst->channel_layout)
+        return frame_copy_audio(dst, src);
+
+    return AVERROR(EINVAL);
+}
+
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
+{
+    int i;
+
+    for (i = 0; i < frame->nb_side_data; i++) {
+        AVFrameSideData *sd = frame->side_data[i];
+        if (sd->type == type) {
+            free_side_data(&frame->side_data[i]);
+            frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
+            frame->nb_side_data--;
+        }
+    }
+}
+
+const char *av_frame_side_data_name(enum AVFrameSideDataType type)
+{
+    switch(type) {
+    case AV_FRAME_DATA_PANSCAN:         return "AVPanScan";
+    case AV_FRAME_DATA_A53_CC:          return "ATSC A53 Part 4 Closed Captions";
+    case AV_FRAME_DATA_STEREO3D:        return "Stereoscopic 3d metadata";
+    case AV_FRAME_DATA_MATRIXENCODING:  return "AVMatrixEncoding";
+    case AV_FRAME_DATA_DOWNMIX_INFO:    return "Metadata relevant to a downmix procedure";
+    case AV_FRAME_DATA_REPLAYGAIN:      return "AVReplayGain";
+    case AV_FRAME_DATA_DISPLAYMATRIX:   return "3x3 displaymatrix";
+    case AV_FRAME_DATA_MOTION_VECTORS:  return "Motion vectors";
+    }
+    return NULL;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/frame.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,703 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+    /**
+     * The data is the AVPanScan struct defined in libavcodec.
+     */
+    AV_FRAME_DATA_PANSCAN,
+    /**
+     * ATSC A53 Part 4 Closed Captions.
+     * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+     * The number of bytes of CC data is AVFrameSideData.size.
+     */
+    AV_FRAME_DATA_A53_CC,
+    /**
+     * Stereoscopic 3d metadata.
+     * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+     */
+    AV_FRAME_DATA_STEREO3D,
+    /**
+     * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
+     */
+    AV_FRAME_DATA_MATRIXENCODING,
+    /**
+     * Metadata relevant to a downmix procedure.
+     * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+     */
+    AV_FRAME_DATA_DOWNMIX_INFO,
+    /**
+     * ReplayGain information in the form of the AVReplayGain struct.
+     */
+    AV_FRAME_DATA_REPLAYGAIN,
+    /**
+     * This side data contains a 3x3 transformation matrix describing an affine
+     * transformation that needs to be applied to the frame for correct
+     * presentation.
+     *
+     * See libavutil/display.h for a detailed description of the data.
+     */
+    AV_FRAME_DATA_DISPLAYMATRIX,
+    /**
+     * Active Format Description data consisting of a single byte as specified
+     * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+     */
+    AV_FRAME_DATA_AFD,
+    /**
+     * Motion vectors exported by some codecs (on demand through the export_mvs
+     * flag set in the libavcodec AVCodecContext flags2 option).
+     * The data is the AVMotionVector struct defined in
+     * libavutil/motion_vector.h.
+     */
+    AV_FRAME_DATA_MOTION_VECTORS,
+    /**
+     * Recommmends skipping the specified number of samples. This is exported
+     * only if the "skip_manual" AVOption is set in libavcodec.
+     * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+     * @code
+     * u32le number of samples to skip from start of this packet
+     * u32le number of samples to skip from end of this packet
+     * u8    reason for start skip
+     * u8    reason for end   skip (0=padding silence, 1=convergence)
+     * @endcode
+     */
+    AV_FRAME_DATA_SKIP_SAMPLES,
+};
+
+enum AVActiveFormatDescription {
+    AV_AFD_SAME         = 8,
+    AV_AFD_4_3          = 9,
+    AV_AFD_16_9         = 10,
+    AV_AFD_14_9         = 11,
+    AV_AFD_4_3_SP_14_9  = 13,
+    AV_AFD_16_9_SP_14_9 = 14,
+    AV_AFD_SP_4_3       = 15,
+};
+
+typedef struct AVFrameSideData {
+    enum AVFrameSideDataType type;
+    uint8_t *data;
+    int      size;
+    AVDictionary *metadata;
+} AVFrameSideData;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ * Similarly fields that are marked as to be only accessed by
+ * av_opt_ptr() can be reordered. This allows 2 forks to add fields
+ * without breaking compatibility with each other.
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+    /**
+     * pointer to the picture/channel planes.
+     * This might be different from the first allocated byte
+     *
+     * Some decoders access areas outside 0,0 - width,height, please
+     * see avcodec_align_dimensions2(). Some filters and swscale can read
+     * up to 16 bytes beyond the planes, if these filters are to be used,
+     * then 16 extra bytes must be allocated.
+     */
+    uint8_t *data[AV_NUM_DATA_POINTERS];
+
+    /**
+     * For video, size in bytes of each picture line.
+     * For audio, size in bytes of each plane.
+     *
+     * For audio, only linesize[0] may be set. For planar audio, each channel
+     * plane must be the same size.
+     *
+     * For video the linesizes should be multiples of the CPUs alignment
+     * preference, this is 16 or 32 for modern desktop CPUs.
+     * Some code requires such alignment other code can be slower without
+     * correct alignment, for yet other it makes no difference.
+     *
+     * @note The linesize may be larger than the size of usable data -- there
+     * may be extra padding present for performance reasons.
+     */
+    int linesize[AV_NUM_DATA_POINTERS];
+
+#if 0
+    /**
+     * pointers to the data planes/channels.
+     *
+     * For video, this should simply point to data[].
+     *
+     * For planar audio, each channel has a separate data pointer, and
+     * linesize[0] contains the size of each channel buffer.
+     * For packed audio, there is just one data pointer, and linesize[0]
+     * contains the total size of the buffer for all channels.
+     *
+     * Note: Both data and extended_data should always be set in a valid frame,
+     * but for planar audio with more channels that can fit in data,
+     * extended_data must be used in order to access all channels.
+     */
+    uint8_t **extended_data;
+#endif
+
+    /**
+     * width and height of the video frame
+     */
+    int width, height;
+
+    /**
+     * number of audio samples (per channel) described by this frame
+     */
+    int nb_samples;
+
+    /**
+     * format of the frame, -1 if unknown or unset
+     * Values correspond to enum AVPixelFormat for video frames,
+     * enum AVSampleFormat for audio)
+     */
+    int format;
+
+    /**
+     * 1 -> keyframe, 0-> not
+     */
+    int key_frame;
+
+    /**
+     * Picture type of the frame.
+     */
+    enum AVPictureType pict_type;
+
+#if FF_API_AVFRAME_LAVC
+    attribute_deprecated
+    uint8_t *base[AV_NUM_DATA_POINTERS];
+#endif
+
+    /**
+     * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+     */
+    AVRational sample_aspect_ratio;
+
+    /**
+     * Presentation timestamp in time_base units (time when frame should be shown to user).
+     */
+    int64_t pts;
+
+    /**
+     * PTS copied from the AVPacket that was decoded to produce this frame.
+     */
+    int64_t pkt_pts;
+
+    /**
+     * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
+     * This is also the Presentation time of this AVFrame calculated from
+     * only AVPacket.dts values without pts values.
+     */
+    int64_t pkt_dts;
+
+    /**
+     * picture number in bitstream order
+     */
+    int coded_picture_number;
+    /**
+     * picture number in display order
+     */
+    int display_picture_number;
+
+    /**
+     * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+     */
+    int quality;
+
+    /**
+     * for some private data of the user
+     */
+    void *opaque;
+
+    /**
+     * error
+     */
+    uint64_t error[AV_NUM_DATA_POINTERS];
+
+#if FF_API_AVFRAME_LAVC
+    attribute_deprecated
+    int type;
+#endif
+
+    /**
+     * When decoding, this signals how much the picture must be delayed.
+     * extra_delay = repeat_pict / (2*fps)
+     */
+    int repeat_pict;
+
+    /**
+     * The content of the picture is interlaced.
+     */
+    int interlaced_frame;
+
+    /**
+     * If the content is interlaced, is top field displayed first.
+     */
+    int top_field_first;
+
+    /**
+     * Tell user application that palette has changed from previous frame.
+     */
+    int palette_has_changed;
+
+#if FF_API_AVFRAME_LAVC
+    attribute_deprecated
+    int buffer_hints;
+
+    /**
+     * Pan scan.
+     */
+    attribute_deprecated
+    struct AVPanScan *pan_scan;
+#endif
+
+    /**
+     * reordered opaque 64bit (generally an integer or a double precision float
+     * PTS but can be anything).
+     * The user sets AVCodecContext.reordered_opaque to represent the input at
+     * that time,
+     * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+     * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+     * @deprecated in favor of pkt_pts
+     */
+    int64_t reordered_opaque;
+
+#if FF_API_AVFRAME_LAVC
+    /**
+     * @deprecated this field is unused
+     */
+    attribute_deprecated void *hwaccel_picture_private;
+
+    attribute_deprecated
+    struct AVCodecContext *owner;
+    attribute_deprecated
+    void *thread_opaque;
+
+    /**
+     * log2 of the size of the block which a single vector in motion_val represents:
+     * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+     */
+    uint8_t motion_subsample_log2;
+#endif
+
+    /**
+     * Sample rate of the audio data.
+     */
+    int sample_rate;
+
+    /**
+     * Channel layout of the audio data.
+     */
+    uint64_t channel_layout;
+
+    /**
+     * AVBuffer references backing the data for this frame. If all elements of
+     * this array are NULL, then this frame is not reference counted.
+     *
+     * There may be at most one AVBuffer per data plane, so for video this array
+     * always contains all the references. For planar audio with more than
+     * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+     * this array. Then the extra AVBufferRef pointers are stored in the
+     * extended_buf array.
+     */
+    AVBufferRef *buf[AV_NUM_DATA_POINTERS];
+
+#if 0
+    /**
+     * For planar audio which requires more than AV_NUM_DATA_POINTERS
+     * AVBufferRef pointers, this array will hold all the references which
+     * cannot fit into AVFrame.buf.
+     *
+     * Note that this is different from AVFrame.extended_data, which always
+     * contains all the pointers. This array only contains the extra pointers,
+     * which cannot fit into AVFrame.buf.
+     *
+     * This array is always allocated using av_malloc() by whoever constructs
+     * the frame. It is freed in av_frame_unref().
+     */
+    AVBufferRef **extended_buf;
+    /**
+     * Number of elements in extended_buf.
+     */
+    int        nb_extended_buf;
+
+    AVFrameSideData **side_data;
+    int            nb_side_data;
+#endif
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT       (1 << 0)
+/**
+ * @}
+ */
+
+    /**
+     * Frame flags, a combination of @ref lavu_frame_flags
+     */
+    int flags;
+
+    /**
+     * MPEG vs JPEG YUV range.
+     * It must be accessed using av_frame_get_color_range() and
+     * av_frame_set_color_range().
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorRange color_range;
+
+    enum AVColorPrimaries color_primaries;
+
+    enum AVColorTransferCharacteristic color_trc;
+
+    /**
+     * YUV colorspace type.
+     * It must be accessed using av_frame_get_colorspace() and
+     * av_frame_set_colorspace().
+     * - encoding: Set by user
+     * - decoding: Set by libavcodec
+     */
+    enum AVColorSpace colorspace;
+
+    enum AVChromaLocation chroma_location;
+
+    /**
+     * frame timestamp estimated using various heuristics, in stream time base
+     * Code outside libavcodec should access this field using:
+     * av_frame_get_best_effort_timestamp(frame)
+     * - encoding: unused
+     * - decoding: set by libavcodec, read by user.
+     */
+    int64_t best_effort_timestamp;
+
+    /**
+     * reordered pos from the last AVPacket that has been input into the decoder
+     * Code outside libavcodec should access this field using:
+     * av_frame_get_pkt_pos(frame)
+     * - encoding: unused
+     * - decoding: Read by user.
+     */
+    int64_t pkt_pos;
+
+    /**
+     * duration of the corresponding packet, expressed in
+     * AVStream->time_base units, 0 if unknown.
+     * Code outside libavcodec should access this field using:
+     * av_frame_get_pkt_duration(frame)
+     * - encoding: unused
+     * - decoding: Read by user.
+     */
+    int64_t pkt_duration;
+
+    /**
+     * decode error flags of the frame, set to a combination of
+     * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+     * were errors during the decoding.
+     * Code outside libavcodec should access this field using:
+     * av_frame_get_decode_error_flags(frame)
+     * - encoding: unused
+     * - decoding: set by libavcodec, read by user.
+     */
+    int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM   1
+#define FF_DECODE_ERROR_MISSING_REFERENCE   2
+
+    /**
+     * number of audio channels, only used for audio.
+     * Code outside libavcodec should access this field using:
+     * av_frame_get_channels(frame)
+     * - encoding: unused
+     * - decoding: Read by user.
+     */
+    int channels;
+
+    /**
+     * size of the corresponding packet containing the compressed
+     * frame. It must be accessed using av_frame_get_pkt_size() and
+     * av_frame_set_pkt_size().
+     * It is set to a negative value if unknown.
+     * - encoding: unused
+     * - decoding: set by libavcodec, read by user.
+     */
+    int pkt_size;
+} AVFrame;
+
+/**
+ * Accessors for some AVFrame fields.
+ * The position of these field in the structure is not part of the ABI,
+ * they should not be accessed directly outside libavcodec.
+ */
+int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
+void    av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_duration         (const AVFrame *frame);
+void    av_frame_set_pkt_duration         (AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_pos              (const AVFrame *frame);
+void    av_frame_set_pkt_pos              (AVFrame *frame, int64_t val);
+int64_t av_frame_get_channel_layout       (const AVFrame *frame);
+void    av_frame_set_channel_layout       (AVFrame *frame, int64_t val);
+int     av_frame_get_channels             (const AVFrame *frame);
+void    av_frame_set_channels             (AVFrame *frame, int     val);
+int     av_frame_get_sample_rate          (const AVFrame *frame);
+void    av_frame_set_sample_rate          (AVFrame *frame, int     val);
+AVDictionary *av_frame_get_metadata       (const AVFrame *frame);
+void          av_frame_set_metadata       (AVFrame *frame, AVDictionary *val);
+int     av_frame_get_decode_error_flags   (const AVFrame *frame);
+void    av_frame_set_decode_error_flags   (AVFrame *frame, int     val);
+int     av_frame_get_pkt_size(const AVFrame *frame);
+void    av_frame_set_pkt_size(AVFrame *frame, int val);
+AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame);
+int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
+int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
+enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
+void    av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
+enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
+void    av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
+
+/**
+ * Get the name of a colorspace.
+ * @return a static string identifying the colorspace; can be NULL.
+ */
+const char *av_get_colorspace_name(enum AVColorSpace val);
+
+/**
+ * Allocate an AVFrame and set its fields to default values.  The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame *av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame **frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame *av_frame_clone(const AVFrame *src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame *frame);
+
+/**
+ * Move everythnig contained in src to dst and reset src.
+ */
+void av_frame_move_ref(AVFrame *dst, AVFrame *src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and channel_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align required buffer size alignment
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame *frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame *frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame *frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers.  E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+                                        enum AVFrameSideDataType type,
+                                        int size);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+                                        enum AVFrameSideDataType type);
+
+/**
+ * If side data of the supplied type exists in the frame, free it and remove it
+ * from the frame.
+ */
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char *av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/hash.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HASH_H
+#define AVUTIL_HASH_H
+
+#include <stdint.h>
+
+struct AVHashContext;
+
+/**
+ * Allocate a hash context for the algorithm specified by name.
+ *
+ * @return  >= 0 for success, a negative error code for failure
+ * @note  The context is not initialized, you must call av_hash_init().
+ */
+int av_hash_alloc(struct AVHashContext **ctx, const char *name);
+
+/**
+ * Get the names of available hash algorithms.
+ *
+ * This function can be used to enumerate the algorithms.
+ *
+ * @param i  index of the hash algorithm, starting from 0
+ * @return   a pointer to a static string or NULL if i is out of range
+ */
+const char *av_hash_names(int i);
+
+/**
+ * Get the name of the algorithm corresponding to the given hash context.
+ */
+const char *av_hash_get_name(const struct AVHashContext *ctx);
+
+/**
+ * Maximum value that av_hash_get_size will currently return.
+ *
+ * You can use this if you absolutely want or need to use static allocation
+ * and are fine with not supporting hashes newly added to libavutil without
+ * recompilation.
+ * Note that you still need to check against av_hash_get_size, adding new hashes
+ * with larger sizes will not be considered an ABI change and should not cause
+ * your code to overflow a buffer.
+ */
+#define AV_HASH_MAX_SIZE 64
+
+/**
+ * Get the size of the resulting hash value in bytes.
+ *
+ * The pointer passed to av_hash_final have space for at least this many bytes.
+ */
+int av_hash_get_size(const struct AVHashContext *ctx);
+
+/**
+ * Initialize or reset a hash context.
+ */
+void av_hash_init(struct AVHashContext *ctx);
+
+/**
+ * Update a hash context with additional data.
+ */
+void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);
+
+/**
+ * Finalize a hash context and compute the actual hash value.
+ */
+void av_hash_final(struct AVHashContext *ctx, uint8_t *dst);
+
+/**
+ * Finalize a hash context and compute the actual hash value.
+ * If size is smaller than the hash size, the hash is truncated;
+ * if size is larger, the buffer is padded with 0.
+ */
+void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Finalize a hash context and compute the actual hash value as a hex string.
+ * The string is always 0-terminated.
+ * If size is smaller than 2 * hash_size + 1, the hex string is truncated.
+ */
+void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Finalize a hash context and compute the actual hash value as a base64 string.
+ * The string is always 0-terminated.
+ * If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is
+ * truncated.
+ */
+void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Free hash context.
+ */
+void av_hash_freep(struct AVHashContext **ctx);
+
+#endif /* AVUTIL_HASH_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/hmac.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 Martin Storsjo
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HMAC_H
+#define AVUTIL_HMAC_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_hmac HMAC
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+enum AVHMACType {
+    AV_HMAC_MD5,
+    AV_HMAC_SHA1,
+    AV_HMAC_SHA224 = 10,
+    AV_HMAC_SHA256,
+    AV_HMAC_SHA384,
+    AV_HMAC_SHA512,
+};
+
+typedef struct AVHMAC AVHMAC;
+
+/**
+ * Allocate an AVHMAC context.
+ * @param type The hash function used for the HMAC.
+ */
+AVHMAC *av_hmac_alloc(enum AVHMACType type);
+
+/**
+ * Free an AVHMAC context.
+ * @param ctx The context to free, may be NULL
+ */
+void av_hmac_free(AVHMAC *ctx);
+
+/**
+ * Initialize an AVHMAC context with an authentication key.
+ * @param ctx    The HMAC context
+ * @param key    The authentication key
+ * @param keylen The length of the key, in bytes
+ */
+void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen);
+
+/**
+ * Hash data with the HMAC.
+ * @param ctx  The HMAC context
+ * @param data The data to hash
+ * @param len  The length of the data, in bytes
+ */
+void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len);
+
+/**
+ * Finish hashing and output the HMAC digest.
+ * @param ctx    The HMAC context
+ * @param out    The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return       The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen);
+
+/**
+ * Hash an array of data with a key.
+ * @param ctx    The HMAC context
+ * @param data   The data to hash
+ * @param len    The length of the data, in bytes
+ * @param key    The authentication key
+ * @param keylen The length of the key, in bytes
+ * @param out    The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return       The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len,
+                 const uint8_t *key, unsigned int keylen,
+                 uint8_t *out, unsigned int outlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_HMAC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/imgutils.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,213 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include "avutil.h"
+#include "pixdesc.h"
+#include "rational.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+                                const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+                           uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+                   int w, int h, enum AVPixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * bytewidth must be contained by both absolute values of dst_linesize
+ * and src_linesize, otherwise the function behavior is undefined.
+ *
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src_linesize linesize for the image plane in src
+ */
+void av_image_copy_plane(uint8_t       *dst, int dst_linesize,
+                         const uint8_t *src, int src_linesize,
+                         int bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_linesizes linesizes for the image in src_data
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+                   const uint8_t *src_data[4], const int src_linesizes[4],
+                   enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Setup the data pointers and linesizes based on the specified image
+ * parameters and the provided array.
+ *
+ * The fields of the given image are filled in by using the src
+ * address which points to the image data buffer. Depending on the
+ * specified pixel format, one or multiple image data pointers and
+ * line sizes will be set.  If a planar format is specified, several
+ * pointers will be set pointing to the different picture planes and
+ * the line sizes of the different planes will be stored in the
+ * lines_sizes array. Call with !src to get the required
+ * size for the src buffer.
+ *
+ * To allocate the buffer and fill in the dst_data and dst_linesize in
+ * one call, use av_image_alloc().
+ *
+ * @param dst_data      data pointers to be filled in
+ * @param dst_linesizes linesizes for the image in dst_data to be filled in
+ * @param src           buffer which will contain or contains the actual image data, can be NULL
+ * @param pix_fmt       the pixel format of the image
+ * @param width         the width of the image in pixels
+ * @param height        the height of the image in pixels
+ * @param align         the value used in src for linesize alignment
+ * @return the size in bytes required for src, a negative error code
+ * in case of failure
+ */
+int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
+                         const uint8_t *src,
+                         enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Return the size in bytes of the amount of data required to store an
+ * image with the given parameters.
+ *
+ * @param[in] align the assumed linesize alignment
+ */
+int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Copy image data from an image into a buffer.
+ *
+ * av_image_get_buffer_size() can be used to compute the required size
+ * for the buffer to fill.
+ *
+ * @param dst           a buffer into which picture data will be copied
+ * @param dst_size      the size in bytes of dst
+ * @param src_data      pointers containing the source image data
+ * @param src_linesizes linesizes for the image in src_data
+ * @param pix_fmt       the pixel format of the source image
+ * @param width         the width of the source image in pixels
+ * @param height        the height of the source image in pixels
+ * @param align         the assumed linesize alignment for dst
+ * @return the number of bytes written to dst, or a negative value
+ * (error code) on error
+ */
+int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
+                            const uint8_t * const src_data[4], const int src_linesize[4],
+                            enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+/**
+ * Check if the given sample aspect ratio of an image is valid.
+ *
+ * It is considered invalid if the denominator is 0 or if applying the ratio
+ * to the image size would make the smaller dimension less than 1. If the
+ * sar numerator is 0, it is considered unknown and will return as valid.
+ *
+ * @param w width of the image
+ * @param h height of the image
+ * @param sar sample aspect ratio of the image
+ * @return 0 if valid, a negative AVERROR code otherwise
+ */
+int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/integer.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*
+ * arbitrary precision integers
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * arbitrary precision integers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_INTEGER_H
+#define AVUTIL_INTEGER_H
+
+#include <stdint.h>
+#include "common.h"
+
+#define AV_INTEGER_SIZE 8
+
+typedef struct AVInteger{
+    uint16_t v[AV_INTEGER_SIZE];
+} AVInteger;
+
+AVInteger av_add_i(AVInteger a, AVInteger b) av_const;
+AVInteger av_sub_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Return the rounded-down value of the base 2 logarithm of the given
+ * AVInteger. This is simply the index of the most significant bit
+ * which is 1, or 0 if all bits are 0.
+ */
+int av_log2_i(AVInteger a) av_const;
+AVInteger av_mul_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Return 0 if a==b, 1 if a>b and -1 if a<b.
+ */
+int av_cmp_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * bitwise shift
+ * @param s the number of bits by which the value should be shifted right,
+            may be negative for shifting left
+ */
+AVInteger av_shr_i(AVInteger a, int s) av_const;
+
+/**
+ * Return a % b.
+ * @param quot a/b will be stored here.
+ */
+AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b);
+
+/**
+ * Return a/b.
+ */
+AVInteger av_div_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Convert the given int64_t to an AVInteger.
+ */
+AVInteger av_int2i(int64_t a) av_const;
+
+/**
+ * Convert the given AVInteger to an int64_t.
+ * If the AVInteger is too large to fit into an int64_t,
+ * then only the least significant 64 bits will be used.
+ */
+int64_t av_i2int(AVInteger a) av_const;
+
+#endif /* AVUTIL_INTEGER_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,269 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal API header
+ */
+
+#ifndef AVUTIL_INTERNAL_H
+#define AVUTIL_INTERNAL_H
+
+#if !defined(DEBUG) && !defined(NDEBUG)
+#    define NDEBUG
+#endif
+
+#include <limits.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <assert.h>
+#include "config.h"
+#include "attributes.h"
+#include "timer.h"
+#include "cpu.h"
+#include "dict.h"
+#include "pixfmt.h"
+#include "version.h"
+
+#if ARCH_X86
+#   include "x86/emms.h"
+#endif
+
+#ifndef emms_c
+#   define emms_c()
+#endif
+
+#ifndef attribute_align_arg
+#if ARCH_X86_32 && AV_GCC_VERSION_AT_LEAST(4,2)
+#    define attribute_align_arg __attribute__((force_align_arg_pointer))
+#else
+#    define attribute_align_arg
+#endif
+#endif
+
+#if defined(_MSC_VER) && CONFIG_SHARED
+#    define av_export __declspec(dllimport)
+#else
+#    define av_export
+#endif
+
+#if HAVE_PRAGMA_DEPRECATED
+#    if defined(__ICL) || defined (__INTEL_COMPILER)
+#        define FF_DISABLE_DEPRECATION_WARNINGS __pragma(warning(push)) __pragma(warning(disable:1478))
+#        define FF_ENABLE_DEPRECATION_WARNINGS  __pragma(warning(pop))
+#    elif defined(_MSC_VER)
+#        define FF_DISABLE_DEPRECATION_WARNINGS __pragma(warning(push)) __pragma(warning(disable:4996))
+#        define FF_ENABLE_DEPRECATION_WARNINGS  __pragma(warning(pop))
+#    else
+#        define FF_DISABLE_DEPRECATION_WARNINGS _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#        define FF_ENABLE_DEPRECATION_WARNINGS  _Pragma("GCC diagnostic warning \"-Wdeprecated-declarations\"")
+#    endif
+#else
+#    define FF_DISABLE_DEPRECATION_WARNINGS
+#    define FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+#ifndef INT_BIT
+#    define INT_BIT (CHAR_BIT * sizeof(int))
+#endif
+
+#define FF_MEMORY_POISON 0x2a
+
+#define MAKE_ACCESSORS(str, name, type, field) \
+    type av_##name##_get_##field(const str *s) { return s->field; } \
+    void av_##name##_set_##field(str *s, type v) { s->field = v; }
+
+// Some broken preprocessors need a second expansion
+// to be forced to tokenize __VA_ARGS__
+#define E1(x) x
+
+/* Check if the hard coded offset of a struct member still matches reality.
+ * Induce a compilation failure if not.
+ */
+#define AV_CHECK_OFFSET(s, m, o) struct check_##o {    \
+        int x_##o[offsetof(s, m) == o? 1: -1];         \
+    }
+
+#define LOCAL_ALIGNED_A(a, t, v, s, o, ...)             \
+    uint8_t la_##v[sizeof(t s o) + (a)];                \
+    t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
+
+#define LOCAL_ALIGNED_D(a, t, v, s, o, ...)             \
+    DECLARE_ALIGNED(a, t, la_##v) s o;                  \
+    t (*v) o = la_##v
+
+#define LOCAL_ALIGNED(a, t, v, ...) E1(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
+
+#if HAVE_LOCAL_ALIGNED_8
+#   define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
+#else
+#   define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
+#endif
+
+#if HAVE_LOCAL_ALIGNED_16
+#   define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
+#else
+#   define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
+#endif
+
+#if HAVE_LOCAL_ALIGNED_32
+#   define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
+#else
+#   define LOCAL_ALIGNED_32(t, v, ...) LOCAL_ALIGNED(32, t, v, __VA_ARGS__)
+#endif
+
+#define FF_ALLOC_OR_GOTO(ctx, p, size, label)\
+{\
+    p = av_malloc(size);\
+    if (!(p) && (size) != 0) {\
+        av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
+        goto label;\
+    }\
+}
+
+#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)\
+{\
+    p = av_mallocz(size);\
+    if (!(p) && (size) != 0) {\
+        av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
+        goto label;\
+    }\
+}
+
+#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)\
+{\
+    p = av_malloc_array(nelem, elsize);\
+    if (!p) {\
+        av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
+        goto label;\
+    }\
+}
+
+#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)\
+{\
+    p = av_mallocz_array(nelem, elsize);\
+    if (!p) {\
+        av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
+        goto label;\
+    }\
+}
+
+#include "libm.h"
+
+#if defined(_MSC_VER)
+#pragma comment(linker, "/include:"EXTERN_PREFIX"avpriv_strtod")
+#pragma comment(linker, "/include:"EXTERN_PREFIX"avpriv_snprintf")
+#endif
+
+/**
+ * Return NULL if CONFIG_SMALL is true, otherwise the argument
+ * without modification. Used to disable the definition of strings
+ * (for example AVCodec long_names).
+ */
+#if CONFIG_SMALL
+#   define NULL_IF_CONFIG_SMALL(x) NULL
+#else
+#   define NULL_IF_CONFIG_SMALL(x) x
+#endif
+
+/**
+ * Define a function with only the non-default version specified.
+ *
+ * On systems with ELF shared libraries, all symbols exported from
+ * FFmpeg libraries are tagged with the name and major version of the
+ * library to which they belong.  If a function is moved from one
+ * library to another, a wrapper must be retained in the original
+ * location to preserve binary compatibility.
+ *
+ * Functions defined with this macro will never be used to resolve
+ * symbols by the build-time linker.
+ *
+ * @param type return type of function
+ * @param name name of function
+ * @param args argument list of function
+ * @param ver  version tag to assign function
+ */
+#if HAVE_SYMVER_ASM_LABEL
+#   define FF_SYMVER(type, name, args, ver)                     \
+    type ff_##name args __asm__ (EXTERN_PREFIX #name "@" ver);  \
+    type ff_##name args
+#elif HAVE_SYMVER_GNU_ASM
+#   define FF_SYMVER(type, name, args, ver)                             \
+    __asm__ (".symver ff_" #name "," EXTERN_PREFIX #name "@" ver);      \
+    type ff_##name args;                                                \
+    type ff_##name args
+#endif
+
+/**
+ * Return NULL if a threading library has not been enabled.
+ * Used to disable threading functions in AVCodec definitions
+ * when not needed.
+ */
+#if HAVE_THREADS
+#   define ONLY_IF_THREADS_ENABLED(x) x
+#else
+#   define ONLY_IF_THREADS_ENABLED(x) NULL
+#endif
+
+/**
+ * Log a generic warning message about a missing feature.
+ *
+ * @param[in] avc a pointer to an arbitrary struct of which the first
+ *                field is a pointer to an AVClass struct
+ * @param[in] msg string containing the name of the missing feature
+ */
+#ifdef USE_AV_LOG
+void avpriv_report_missing_feature(void *avc,
+                                   const char *msg, ...) av_printf_format(2, 3);
+#else
+#define avpriv_report_missing_feature(avc, msg, ...) do { } while (0)
+#endif
+/**
+ * Log a generic warning message about a missing feature.
+ * Additionally request that a sample showcasing the feature be uploaded.
+ *
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ *                a pointer to an AVClass struct
+ * @param[in] msg string containing the name of the missing feature
+ */
+void avpriv_request_sample(void *avc,
+                           const char *msg, ...) av_printf_format(2, 3);
+
+#if HAVE_LIBC_MSVCRT
+#define avpriv_open ff_open
+#define PTRDIFF_SPECIFIER "Id"
+#define SIZE_SPECIFIER "Iu"
+#else
+#define PTRDIFF_SPECIFIER "td"
+#define SIZE_SPECIFIER "zu"
+#endif
+
+/**
+ * A wrapper for open() setting O_CLOEXEC.
+ */
+int avpriv_open(const char *filename, int flags, ...);
+
+int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
+
+#if FF_API_GET_CHANNEL_LAYOUT_COMPAT
+uint64_t ff_get_channel_layout(const char *name, int compat);
+#endif
+
+#endif /* AVUTIL_INTERNAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/intfloat.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+    uint32_t i;
+    float    f;
+};
+
+union av_intfloat64 {
+    uint64_t i;
+    double   f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+    union av_intfloat32 v;
+    v.i = i;
+    return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+    union av_intfloat32 v;
+    v.f = f;
+    return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+    union av_intfloat64 v;
+    v.i = i;
+    return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+    union av_intfloat64 v;
+    v.f = f;
+    return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/intmath.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTMATH_H
+#define AVUTIL_INTMATH_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "attributes.h"
+
+#if ARCH_ARM
+#   include "arm/intmath.h"
+#endif
+
+/**
+ * @addtogroup lavu_internal
+ * @{
+ */
+
+#if HAVE_FAST_CLZ
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+#ifndef ff_log2
+#   define ff_log2(x) (31 - __builtin_clz((x)|1))
+#   ifndef ff_log2_16bit
+#      define ff_log2_16bit av_log2
+#   endif
+#endif /* ff_log2 */
+#elif defined( __INTEL_COMPILER )
+#ifndef ff_log2
+#   define ff_log2(x) (_bit_scan_reverse(x|1))
+#   ifndef ff_log2_16bit
+#      define ff_log2_16bit av_log2
+#   endif
+#endif /* ff_log2 */
+#endif
+#endif /* AV_GCC_VERSION_AT_LEAST(3,4) */
+
+extern const uint8_t ff_log2_tab[256];
+
+#ifndef ff_log2
+#define ff_log2 ff_log2_c
+#if !defined( _MSC_VER )
+static av_always_inline av_const int ff_log2_c(unsigned int v)
+{
+    int n = 0;
+    if (v & 0xffff0000) {
+        v >>= 16;
+        n += 16;
+    }
+    if (v & 0xff00) {
+        v >>= 8;
+        n += 8;
+    }
+    n += ff_log2_tab[v];
+
+    return n;
+}
+#else
+static av_always_inline av_const int ff_log2_c(unsigned int v)
+{
+    unsigned long n;
+    _BitScanReverse(&n, v|1);
+    return n;
+}
+#define ff_log2_16bit av_log2
+#endif
+#endif
+
+#ifndef ff_log2_16bit
+#define ff_log2_16bit ff_log2_16bit_c
+static av_always_inline av_const int ff_log2_16bit_c(unsigned int v)
+{
+    int n = 0;
+    if (v & 0xff00) {
+        v >>= 8;
+        n += 8;
+    }
+    n += ff_log2_tab[v];
+
+    return n;
+}
+#endif
+
+#define av_log2       ff_log2
+#define av_log2_16bit ff_log2_16bit
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+#if HAVE_FAST_CLZ
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+#ifndef ff_ctz
+#define ff_ctz(v) __builtin_ctz(v)
+#endif
+#elif defined( __INTEL_COMPILER )
+#ifndef ff_ctz
+#define ff_ctz(v) _bit_scan_forward(v)
+#endif
+#endif
+#endif
+
+#ifndef ff_ctz
+#define ff_ctz ff_ctz_c
+#if !defined( _MSC_VER )
+static av_always_inline av_const int ff_ctz_c(int v)
+{
+    int c;
+
+    if (v & 0x1)
+        return 0;
+
+    c = 1;
+    if (!(v & 0xffff)) {
+        v >>= 16;
+        c += 16;
+    }
+    if (!(v & 0xff)) {
+        v >>= 8;
+        c += 8;
+    }
+    if (!(v & 0xf)) {
+        v >>= 4;
+        c += 4;
+    }
+    if (!(v & 0x3)) {
+        v >>= 2;
+        c += 2;
+    }
+    c -= v & 0x1;
+
+    return c;
+}
+#else
+static av_always_inline av_const int ff_ctz_c( int v )
+{
+    unsigned long c;
+    _BitScanForward(&c, v);
+    return c;
+}
+#endif
+#endif
+
+/**
+ * Trailing zero bit count.
+ *
+ * @param v  input value. If v is 0, the result is undefined.
+ * @return   the number of trailing 0-bits
+ */
+int av_ctz(int v);
+
+/**
+ * @}
+ */
+#endif /* AVUTIL_INTMATH_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/intreadwrite.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,629 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+    uint64_t u64;
+    uint32_t u32[2];
+    uint16_t u16[4];
+    uint8_t  u8 [8];
+    double   f64;
+    float    f32[2];
+} av_alias av_alias64;
+
+typedef union {
+    uint32_t u32;
+    uint16_t u16[2];
+    uint8_t  u8 [4];
+    float    f32;
+} av_alias av_alias32;
+
+typedef union {
+    uint16_t u16;
+    uint8_t  u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ *
+ * R/W means read/write, B/L/N means big/little/native endianness.
+ * The following macros require aligned access, compared to their
+ * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A.
+ * Incorrect usage may range from abysmal performance to crash
+ * depending on the platform.
+ *
+ * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if   ARCH_ARM
+#   include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+#   include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+#   include "mips/intreadwrite.h"
+#elif ARCH_PPC
+#   include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+#   include "tomi/intreadwrite.h"
+#elif ARCH_X86
+#   include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+#   if    defined(AV_RN16) && !defined(AV_RB16)
+#       define AV_RB16(p) AV_RN16(p)
+#   elif !defined(AV_RN16) &&  defined(AV_RB16)
+#       define AV_RN16(p) AV_RB16(p)
+#   endif
+
+#   if    defined(AV_WN16) && !defined(AV_WB16)
+#       define AV_WB16(p, v) AV_WN16(p, v)
+#   elif !defined(AV_WN16) &&  defined(AV_WB16)
+#       define AV_WN16(p, v) AV_WB16(p, v)
+#   endif
+
+#   if    defined(AV_RN24) && !defined(AV_RB24)
+#       define AV_RB24(p) AV_RN24(p)
+#   elif !defined(AV_RN24) &&  defined(AV_RB24)
+#       define AV_RN24(p) AV_RB24(p)
+#   endif
+
+#   if    defined(AV_WN24) && !defined(AV_WB24)
+#       define AV_WB24(p, v) AV_WN24(p, v)
+#   elif !defined(AV_WN24) &&  defined(AV_WB24)
+#       define AV_WN24(p, v) AV_WB24(p, v)
+#   endif
+
+#   if    defined(AV_RN32) && !defined(AV_RB32)
+#       define AV_RB32(p) AV_RN32(p)
+#   elif !defined(AV_RN32) &&  defined(AV_RB32)
+#       define AV_RN32(p) AV_RB32(p)
+#   endif
+
+#   if    defined(AV_WN32) && !defined(AV_WB32)
+#       define AV_WB32(p, v) AV_WN32(p, v)
+#   elif !defined(AV_WN32) &&  defined(AV_WB32)
+#       define AV_WN32(p, v) AV_WB32(p, v)
+#   endif
+
+#   if    defined(AV_RN48) && !defined(AV_RB48)
+#       define AV_RB48(p) AV_RN48(p)
+#   elif !defined(AV_RN48) &&  defined(AV_RB48)
+#       define AV_RN48(p) AV_RB48(p)
+#   endif
+
+#   if    defined(AV_WN48) && !defined(AV_WB48)
+#       define AV_WB48(p, v) AV_WN48(p, v)
+#   elif !defined(AV_WN48) &&  defined(AV_WB48)
+#       define AV_WN48(p, v) AV_WB48(p, v)
+#   endif
+
+#   if    defined(AV_RN64) && !defined(AV_RB64)
+#       define AV_RB64(p) AV_RN64(p)
+#   elif !defined(AV_RN64) &&  defined(AV_RB64)
+#       define AV_RN64(p) AV_RB64(p)
+#   endif
+
+#   if    defined(AV_WN64) && !defined(AV_WB64)
+#       define AV_WB64(p, v) AV_WN64(p, v)
+#   elif !defined(AV_WN64) &&  defined(AV_WB64)
+#       define AV_WN64(p, v) AV_WB64(p, v)
+#   endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+#   if    defined(AV_RN16) && !defined(AV_RL16)
+#       define AV_RL16(p) AV_RN16(p)
+#   elif !defined(AV_RN16) &&  defined(AV_RL16)
+#       define AV_RN16(p) AV_RL16(p)
+#   endif
+
+#   if    defined(AV_WN16) && !defined(AV_WL16)
+#       define AV_WL16(p, v) AV_WN16(p, v)
+#   elif !defined(AV_WN16) &&  defined(AV_WL16)
+#       define AV_WN16(p, v) AV_WL16(p, v)
+#   endif
+
+#   if    defined(AV_RN24) && !defined(AV_RL24)
+#       define AV_RL24(p) AV_RN24(p)
+#   elif !defined(AV_RN24) &&  defined(AV_RL24)
+#       define AV_RN24(p) AV_RL24(p)
+#   endif
+
+#   if    defined(AV_WN24) && !defined(AV_WL24)
+#       define AV_WL24(p, v) AV_WN24(p, v)
+#   elif !defined(AV_WN24) &&  defined(AV_WL24)
+#       define AV_WN24(p, v) AV_WL24(p, v)
+#   endif
+
+#   if    defined(AV_RN32) && !defined(AV_RL32)
+#       define AV_RL32(p) AV_RN32(p)
+#   elif !defined(AV_RN32) &&  defined(AV_RL32)
+#       define AV_RN32(p) AV_RL32(p)
+#   endif
+
+#   if    defined(AV_WN32) && !defined(AV_WL32)
+#       define AV_WL32(p, v) AV_WN32(p, v)
+#   elif !defined(AV_WN32) &&  defined(AV_WL32)
+#       define AV_WN32(p, v) AV_WL32(p, v)
+#   endif
+
+#   if    defined(AV_RN48) && !defined(AV_RL48)
+#       define AV_RL48(p) AV_RN48(p)
+#   elif !defined(AV_RN48) &&  defined(AV_RL48)
+#       define AV_RN48(p) AV_RL48(p)
+#   endif
+
+#   if    defined(AV_WN48) && !defined(AV_WL48)
+#       define AV_WL48(p, v) AV_WN48(p, v)
+#   elif !defined(AV_WN48) &&  defined(AV_WL48)
+#       define AV_WN48(p, v) AV_WL48(p, v)
+#   endif
+
+#   if    defined(AV_RN64) && !defined(AV_RL64)
+#       define AV_RL64(p) AV_RN64(p)
+#   elif !defined(AV_RN64) &&  defined(AV_RL64)
+#       define AV_RN64(p) AV_RL64(p)
+#   endif
+
+#   if    defined(AV_WN64) && !defined(AV_WL64)
+#       define AV_WL64(p, v) AV_WN64(p, v)
+#   elif !defined(AV_WN64) &&  defined(AV_WL64)
+#       define AV_WN64(p, v) AV_WL64(p, v)
+#   endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+#   define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+#   define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(__DECC)
+
+#   define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+#   define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+#   define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+#   define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+#   define AV_RB16(x)                           \
+    ((((const uint8_t*)(x))[0] << 8) |          \
+      ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+#   define AV_WB16(p, darg) do {                \
+        unsigned d = (darg);                    \
+        ((uint8_t*)(p))[1] = (d);               \
+        ((uint8_t*)(p))[0] = (d)>>8;            \
+    } while(0)
+#endif
+
+#ifndef AV_RL16
+#   define AV_RL16(x)                           \
+    ((((const uint8_t*)(x))[1] << 8) |          \
+      ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+#   define AV_WL16(p, darg) do {                \
+        unsigned d = (darg);                    \
+        ((uint8_t*)(p))[0] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+    } while(0)
+#endif
+
+#ifndef AV_RB32
+#   define AV_RB32(x)                                \
+    (((uint32_t)((const uint8_t*)(x))[0] << 24) |    \
+               (((const uint8_t*)(x))[1] << 16) |    \
+               (((const uint8_t*)(x))[2] <<  8) |    \
+                ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+#   define AV_WB32(p, darg) do {                \
+        unsigned d = (darg);                    \
+        ((uint8_t*)(p))[3] = (d);               \
+        ((uint8_t*)(p))[2] = (d)>>8;            \
+        ((uint8_t*)(p))[1] = (d)>>16;           \
+        ((uint8_t*)(p))[0] = (d)>>24;           \
+    } while(0)
+#endif
+
+#ifndef AV_RL32
+#   define AV_RL32(x)                                \
+    (((uint32_t)((const uint8_t*)(x))[3] << 24) |    \
+               (((const uint8_t*)(x))[2] << 16) |    \
+               (((const uint8_t*)(x))[1] <<  8) |    \
+                ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+#   define AV_WL32(p, darg) do {                \
+        unsigned d = (darg);                    \
+        ((uint8_t*)(p))[0] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+        ((uint8_t*)(p))[2] = (d)>>16;           \
+        ((uint8_t*)(p))[3] = (d)>>24;           \
+    } while(0)
+#endif
+
+#ifndef AV_RB64
+#   define AV_RB64(x)                                   \
+    (((uint64_t)((const uint8_t*)(x))[0] << 56) |       \
+     ((uint64_t)((const uint8_t*)(x))[1] << 48) |       \
+     ((uint64_t)((const uint8_t*)(x))[2] << 40) |       \
+     ((uint64_t)((const uint8_t*)(x))[3] << 32) |       \
+     ((uint64_t)((const uint8_t*)(x))[4] << 24) |       \
+     ((uint64_t)((const uint8_t*)(x))[5] << 16) |       \
+     ((uint64_t)((const uint8_t*)(x))[6] <<  8) |       \
+      (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+#   define AV_WB64(p, darg) do {                \
+        uint64_t d = (darg);                    \
+        ((uint8_t*)(p))[7] = (d);               \
+        ((uint8_t*)(p))[6] = (d)>>8;            \
+        ((uint8_t*)(p))[5] = (d)>>16;           \
+        ((uint8_t*)(p))[4] = (d)>>24;           \
+        ((uint8_t*)(p))[3] = (d)>>32;           \
+        ((uint8_t*)(p))[2] = (d)>>40;           \
+        ((uint8_t*)(p))[1] = (d)>>48;           \
+        ((uint8_t*)(p))[0] = (d)>>56;           \
+    } while(0)
+#endif
+
+#ifndef AV_RL64
+#   define AV_RL64(x)                                   \
+    (((uint64_t)((const uint8_t*)(x))[7] << 56) |       \
+     ((uint64_t)((const uint8_t*)(x))[6] << 48) |       \
+     ((uint64_t)((const uint8_t*)(x))[5] << 40) |       \
+     ((uint64_t)((const uint8_t*)(x))[4] << 32) |       \
+     ((uint64_t)((const uint8_t*)(x))[3] << 24) |       \
+     ((uint64_t)((const uint8_t*)(x))[2] << 16) |       \
+     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |       \
+      (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+#   define AV_WL64(p, darg) do {                \
+        uint64_t d = (darg);                    \
+        ((uint8_t*)(p))[0] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+        ((uint8_t*)(p))[2] = (d)>>16;           \
+        ((uint8_t*)(p))[3] = (d)>>24;           \
+        ((uint8_t*)(p))[4] = (d)>>32;           \
+        ((uint8_t*)(p))[5] = (d)>>40;           \
+        ((uint8_t*)(p))[6] = (d)>>48;           \
+        ((uint8_t*)(p))[7] = (d)>>56;           \
+    } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+#   define AV_RN(s, p)    AV_RB##s(p)
+#   define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+#   define AV_RN(s, p)    AV_RL##s(p)
+#   define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+#   define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+#   define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+#   define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+#   define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+#   define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+#   define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+#   define AV_RB(s, p)    AV_RN##s(p)
+#   define AV_WB(s, p, v) AV_WN##s(p, v)
+#   define AV_RL(s, p)    av_bswap##s(AV_RN##s(p))
+#   define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+#   define AV_RB(s, p)    av_bswap##s(AV_RN##s(p))
+#   define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#   define AV_RL(s, p)    AV_RN##s(p)
+#   define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x)     (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d)  do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x)     AV_RB8(x)
+#define AV_WL8(p, d)  AV_WB8(p, d)
+
+#ifndef AV_RB16
+#   define AV_RB16(p)    AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+#   define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+#   define AV_RL16(p)    AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+#   define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+#   define AV_RB32(p)    AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+#   define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+#   define AV_RL32(p)    AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+#   define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+#   define AV_RB64(p)    AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+#   define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+#   define AV_RL64(p)    AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+#   define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+#   define AV_RB24(x)                           \
+    ((((const uint8_t*)(x))[0] << 16) |         \
+     (((const uint8_t*)(x))[1] <<  8) |         \
+      ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+#   define AV_WB24(p, d) do {                   \
+        ((uint8_t*)(p))[2] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+        ((uint8_t*)(p))[0] = (d)>>16;           \
+    } while(0)
+#endif
+
+#ifndef AV_RL24
+#   define AV_RL24(x)                           \
+    ((((const uint8_t*)(x))[2] << 16) |         \
+     (((const uint8_t*)(x))[1] <<  8) |         \
+      ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+#   define AV_WL24(p, d) do {                   \
+        ((uint8_t*)(p))[0] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+        ((uint8_t*)(p))[2] = (d)>>16;           \
+    } while(0)
+#endif
+
+#ifndef AV_RB48
+#   define AV_RB48(x)                                     \
+    (((uint64_t)((const uint8_t*)(x))[0] << 40) |         \
+     ((uint64_t)((const uint8_t*)(x))[1] << 32) |         \
+     ((uint64_t)((const uint8_t*)(x))[2] << 24) |         \
+     ((uint64_t)((const uint8_t*)(x))[3] << 16) |         \
+     ((uint64_t)((const uint8_t*)(x))[4] <<  8) |         \
+      (uint64_t)((const uint8_t*)(x))[5])
+#endif
+#ifndef AV_WB48
+#   define AV_WB48(p, darg) do {                \
+        uint64_t d = (darg);                    \
+        ((uint8_t*)(p))[5] = (d);               \
+        ((uint8_t*)(p))[4] = (d)>>8;            \
+        ((uint8_t*)(p))[3] = (d)>>16;           \
+        ((uint8_t*)(p))[2] = (d)>>24;           \
+        ((uint8_t*)(p))[1] = (d)>>32;           \
+        ((uint8_t*)(p))[0] = (d)>>40;           \
+    } while(0)
+#endif
+
+#ifndef AV_RL48
+#   define AV_RL48(x)                                     \
+    (((uint64_t)((const uint8_t*)(x))[5] << 40) |         \
+     ((uint64_t)((const uint8_t*)(x))[4] << 32) |         \
+     ((uint64_t)((const uint8_t*)(x))[3] << 24) |         \
+     ((uint64_t)((const uint8_t*)(x))[2] << 16) |         \
+     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |         \
+      (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL48
+#   define AV_WL48(p, darg) do {                \
+        uint64_t d = (darg);                    \
+        ((uint8_t*)(p))[0] = (d);               \
+        ((uint8_t*)(p))[1] = (d)>>8;            \
+        ((uint8_t*)(p))[2] = (d)>>16;           \
+        ((uint8_t*)(p))[3] = (d)>>24;           \
+        ((uint8_t*)(p))[4] = (d)>>32;           \
+        ((uint8_t*)(p))[5] = (d)>>40;           \
+    } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p)    (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+#   define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+#   define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+#   define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+#   define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+#   define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+#   define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+/*
+ * The AV_COPYxxU macros are suitable for copying data to/from unaligned
+ * memory locations.
+ */
+
+#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
+
+#ifndef AV_COPY16U
+#   define AV_COPY16U(d, s) AV_COPYU(16, d, s)
+#endif
+
+#ifndef AV_COPY32U
+#   define AV_COPY32U(d, s) AV_COPYU(32, d, s)
+#endif
+
+#ifndef AV_COPY64U
+#   define AV_COPY64U(d, s) AV_COPYU(64, d, s)
+#endif
+
+#ifndef AV_COPY128U
+#   define AV_COPY128U(d, s)                                    \
+    do {                                                        \
+        AV_COPY64U(d, s);                                       \
+        AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8);     \
+    } while(0)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+    (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+#   define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+#   define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+#   define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+#   define AV_COPY128(d, s)                    \
+    do {                                       \
+        AV_COPY64(d, s);                       \
+        AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+    } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+#   define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+#   define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+#   define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+#   define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+#   define AV_ZERO128(d)         \
+    do {                         \
+        AV_ZERO64(d);            \
+        AV_ZERO64((char*)(d)+8); \
+    } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/lfg.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,62 @@
+/*
+ * Lagged Fibonacci PRNG
+ * Copyright (c) 2008 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LFG_H
+#define AVUTIL_LFG_H
+
+typedef struct AVLFG {
+    unsigned int state[64];
+    int index;
+} AVLFG;
+
+void av_lfg_init(AVLFG *c, unsigned int seed);
+
+/**
+ * Get the next random unsigned 32-bit number using an ALFG.
+ *
+ * Please also consider a simple LCG like state= state*1664525+1013904223,
+ * it may be good enough and faster for your specific use case.
+ */
+static inline unsigned int av_lfg_get(AVLFG *c){
+    c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];
+    return c->state[c->index++ & 63];
+}
+
+/**
+ * Get the next random unsigned 32-bit number using a MLFG.
+ *
+ * Please also consider av_lfg_get() above, it is faster.
+ */
+static inline unsigned int av_mlfg_get(AVLFG *c){
+    unsigned int a= c->state[(c->index-55) & 63];
+    unsigned int b= c->state[(c->index-24) & 63];
+    return c->state[c->index++ & 63] = 2*a*b+a+b;
+}
+
+/**
+ * Get the next two numbers generated by a Box-Muller Gaussian
+ * generator using the random numbers issued by lfg.
+ *
+ * @param out array where the two generated numbers are placed
+ */
+void av_bmg_get(AVLFG *lfg, double out[2]);
+
+#endif /* AVUTIL_LFG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/libm.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,189 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Replacements for frequently missing libm functions
+ */
+
+#ifndef AVUTIL_LIBM_H
+#define AVUTIL_LIBM_H
+
+#include <math.h>
+#include "config.h"
+#include "attributes.h"
+#include "intfloat.h"
+
+#if HAVE_MIPSFPU && HAVE_INLINE_ASM
+#include "libavutil/mips/libm_mips.h"
+#endif /* HAVE_MIPSFPU && HAVE_INLINE_ASM*/
+
+#if !HAVE_ATANF
+#undef atanf
+#define atanf(x) ((float)atan(x))
+#endif
+
+#if !HAVE_ATAN2F
+#undef atan2f
+#define atan2f(y, x) ((float)atan2(y, x))
+#endif
+
+#if !HAVE_POWF
+#undef powf
+#define powf(x, y) ((float)pow(x, y))
+#endif
+
+#if !HAVE_CBRT
+static av_always_inline double cbrt(double x)
+{
+    return x < 0 ? -pow(-x, 1.0 / 3.0) : pow(x, 1.0 / 3.0);
+}
+#endif
+
+#if !HAVE_CBRTF
+static av_always_inline float cbrtf(float x)
+{
+    return x < 0 ? -powf(-x, 1.0 / 3.0) : powf(x, 1.0 / 3.0);
+}
+#endif
+
+#if !HAVE_COSF
+#undef cosf
+#define cosf(x) ((float)cos(x))
+#endif
+
+#if !HAVE_EXPF
+#undef expf
+#define expf(x) ((float)exp(x))
+#endif
+
+#if !HAVE_EXP2
+#undef exp2
+#define exp2(x) exp((x) * 0.693147180559945)
+#endif /* HAVE_EXP2 */
+
+#if !HAVE_EXP2F
+#undef exp2f
+#define exp2f(x) ((float)exp2(x))
+#endif /* HAVE_EXP2F */
+
+#if !HAVE_ISINF
+static av_always_inline av_const int isinf(float x)
+{
+    uint32_t v = av_float2int(x);
+    if ((v & 0x7f800000) != 0x7f800000)
+        return 0;
+    return !(v & 0x007fffff);
+}
+#endif /* HAVE_ISINF */
+
+#if !HAVE_ISNAN
+static av_always_inline av_const int isnan(float x)
+{
+    uint32_t v = av_float2int(x);
+    if ((v & 0x7f800000) != 0x7f800000)
+        return 0;
+    return v & 0x007fffff;
+}
+#endif /* HAVE_ISNAN */
+
+#if !HAVE_LDEXPF
+#undef ldexpf
+#define ldexpf(x, exp) ((float)ldexp(x, exp))
+#endif
+
+#if !HAVE_LLRINT
+#undef llrint
+#define llrint(x) ((long long)rint(x))
+#endif /* HAVE_LLRINT */
+
+#if !HAVE_LLRINTF
+#undef llrintf
+#define llrintf(x) ((long long)rint(x))
+#endif /* HAVE_LLRINT */
+
+#if !HAVE_LOG2
+#undef log2
+#define log2(x) (log(x) * 1.44269504088896340736)
+#endif /* HAVE_LOG2 */
+
+#if !HAVE_LOG2F
+#undef log2f
+#define log2f(x) ((float)log2(x))
+#endif /* HAVE_LOG2F */
+
+#if !HAVE_LOG10F
+#undef log10f
+#define log10f(x) ((float)log10(x))
+#endif
+
+#if !HAVE_SINF
+#undef sinf
+#define sinf(x) ((float)sin(x))
+#endif
+
+#if !HAVE_RINT
+static inline double rint(double x)
+{
+    return x >= 0 ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_RINT */
+
+#if !HAVE_LRINT
+static av_always_inline av_const long int lrint(double x)
+{
+    return rint(x);
+}
+#endif /* HAVE_LRINT */
+
+#if !HAVE_LRINTF
+static av_always_inline av_const long int lrintf(float x)
+{
+    return (int)(rint(x));
+}
+#endif /* HAVE_LRINTF */
+
+#if !HAVE_ROUND
+static av_always_inline av_const double round(double x)
+{
+    return (x > 0) ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_ROUND */
+
+#if !HAVE_ROUNDF
+static av_always_inline av_const float roundf(float x)
+{
+    return (x > 0) ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_ROUNDF */
+
+#if !HAVE_TRUNC
+static av_always_inline av_const double trunc(double x)
+{
+    return (x > 0) ? floor(x) : ceil(x);
+}
+#endif /* HAVE_TRUNC */
+
+#if !HAVE_TRUNCF
+static av_always_inline av_const float truncf(float x)
+{
+    return (x > 0) ? floor(x) : ceil(x);
+}
+#endif /* HAVE_TRUNCF */
+
+#endif /* AVUTIL_LIBM_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/lls.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,64 @@
+/*
+ * linear least squares model
+ *
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LLS_H
+#define AVUTIL_LLS_H
+
+#include "common.h"
+#include "mem.h"
+#include "version.h"
+
+#define MAX_VARS 32
+#define MAX_VARS_ALIGN FFALIGN(MAX_VARS+1,4)
+
+//FIXME avoid direct access to LLSModel from outside
+
+/**
+ * Linear least squares model.
+ */
+typedef struct LLSModel {
+    DECLARE_ALIGNED(32, double, covariance[MAX_VARS_ALIGN][MAX_VARS_ALIGN]);
+    DECLARE_ALIGNED(32, double, coeff[MAX_VARS][MAX_VARS]);
+    double variance[MAX_VARS];
+    int indep_count;
+    /**
+     * Take the outer-product of var[] with itself, and add to the covariance matrix.
+     * @param m this context
+     * @param var training samples, starting with the value to be predicted
+     *            32-byte aligned, and any padding elements must be initialized
+     *            (i.e not denormal/nan).
+     */
+    void (*update_lls)(struct LLSModel *m, const double *var);
+    /**
+     * Inner product of var[] and the LPC coefs.
+     * @param m this context
+     * @param var training samples, excluding the value to be predicted. unaligned.
+     * @param order lpc order
+     */
+    double (*evaluate_lls)(struct LLSModel *m, const double *var, int order);
+} LLSModel;
+
+void avpriv_init_lls(LLSModel *m, int indep_count);
+void ff_init_lls_x86(LLSModel *m);
+void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order);
+
+#endif /* AVUTIL_LLS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/log.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,353 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "avutil.h"
+#include "attributes.h"
+
+typedef enum {
+    AV_CLASS_CATEGORY_NA = 0,
+    AV_CLASS_CATEGORY_INPUT,
+    AV_CLASS_CATEGORY_OUTPUT,
+    AV_CLASS_CATEGORY_MUXER,
+    AV_CLASS_CATEGORY_DEMUXER,
+    AV_CLASS_CATEGORY_ENCODER,
+    AV_CLASS_CATEGORY_DECODER,
+    AV_CLASS_CATEGORY_FILTER,
+    AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+    AV_CLASS_CATEGORY_SWSCALER,
+    AV_CLASS_CATEGORY_SWRESAMPLER,
+    AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+    AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+    AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+    AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+    AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+    AV_CLASS_CATEGORY_DEVICE_INPUT,
+    AV_CLASS_CATEGORY_NB, ///< not part of ABI/API
+}AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+    (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+     ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+     ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+    (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+     ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+     ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+    /**
+     * The name of the class; usually it is the same name as the
+     * context structure type to which the AVClass is associated.
+     */
+    const char* class_name;
+
+    /**
+     * A pointer to a function which returns the name of a context
+     * instance ctx associated with the class.
+     */
+    const char* (*item_name)(void* ctx);
+
+    /**
+     * a pointer to the first option specified in the class if any or NULL
+     *
+     * @see av_set_default_options()
+     */
+    const struct AVOption *option;
+
+    /**
+     * LIBAVUTIL_VERSION with which this structure was created.
+     * This is used to allow fields to be added without requiring major
+     * version bumps everywhere.
+     */
+
+    int version;
+
+    /**
+     * Offset in the structure where log_level_offset is stored.
+     * 0 means there is no such variable
+     */
+    int log_level_offset_offset;
+
+    /**
+     * Offset in the structure where a pointer to the parent context for
+     * logging is stored. For example a decoder could pass its AVCodecContext
+     * to eval as such a parent context, which an av_log() implementation
+     * could then leverage to display the parent context.
+     * The offset can be NULL.
+     */
+    int parent_log_context_offset;
+
+    /**
+     * Return next AVOptions-enabled child or NULL
+     */
+    void* (*child_next)(void *obj, void *prev);
+
+    /**
+     * Return an AVClass corresponding to the next potential
+     * AVOptions-enabled child.
+     *
+     * The difference between child_next and this is that
+     * child_next iterates over _already existing_ objects, while
+     * child_class_next iterates over _all possible_ children.
+     */
+    const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+
+    /**
+     * Category used for visualization (like color)
+     * This is only set if the category is equal for all objects using this class.
+     * available since version (51 << 16 | 56 << 8 | 100)
+     */
+    AVClassCategory category;
+
+    /**
+     * Callback to return the category.
+     * available since version (51 << 16 | 59 << 8 | 100)
+     */
+    AVClassCategory (*get_category)(void* ctx);
+
+    /**
+     * Callback to return the supported/allowed ranges.
+     * available since version (52.12)
+     */
+    int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET    -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC     0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL     8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR    16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING  24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO     32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE  40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG    48
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+   av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+   @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) (x << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ *        pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ *        lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ *        subsequent arguments are converted to output.
+ */
+#ifdef USE_AV_LOG
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+#else
+#define av_log(avcl, level, fmt, ...) do { } while (0)
+#endif
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ *        pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ *        lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ *        subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ *       threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ *        pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ *        lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ *        subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void *avcl, int level, const char *fmt,
+                             va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param  ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void *ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line          buffer to receive the formated line
+ * @param line_size     size of the buffer
+ * @param print_prefix  used to store whether the prefix must be printed;
+ *                      must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
+                        char *line, int line_size, int *print_prefix);
+
+/**
+ * av_dlog macros
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+#    define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+#    define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/log2_tab.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+const uint8_t ff_log2_tab[256]={
+        0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+        5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+        6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+        6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+        7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+        7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+        7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+        7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/lzo.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,66 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LZO_H
+#define AVUTIL_LZO_H
+
+/**
+ * @defgroup lavu_lzo LZO
+ * @ingroup lavu_crypto
+ *
+ * @{
+ */
+
+#include <stdint.h>
+
+/** @name Error flags returned by av_lzo1x_decode
+ * @{ */
+/// end of the input buffer reached before decoding finished
+#define AV_LZO_INPUT_DEPLETED  1
+/// decoded data did not fit into output buffer
+#define AV_LZO_OUTPUT_FULL     2
+/// a reference to previously decoded data was wrong
+#define AV_LZO_INVALID_BACKPTR 4
+/// a non-specific error in the compressed bitstream
+#define AV_LZO_ERROR           8
+/** @} */
+
+#define AV_LZO_INPUT_PADDING   8
+#define AV_LZO_OUTPUT_PADDING 12
+
+/**
+ * @brief Decodes LZO 1x compressed data.
+ * @param out output buffer
+ * @param outlen size of output buffer, number of bytes left are returned here
+ * @param in input buffer
+ * @param inlen size of input buffer, number of bytes left are returned here
+ * @return 0 on success, otherwise a combination of the error flags above
+ *
+ * Make sure all buffers are appropriately padded, in must provide
+ * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
+ */
+int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LZO_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/macros.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,48 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s)         AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#endif /* AVUTIL_MACROS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/mathematics.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,164 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+#define M_E            2.7182818284590452354   /* e */
+#endif
+#ifndef M_LN2
+#define M_LN2          0.69314718055994530942  /* log_e 2 */
+#endif
+#ifndef M_LN10
+#define M_LN10         2.30258509299404568402  /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+#define M_LOG2_10      3.32192809488736234787  /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI          1.61803398874989484820   /* phi / golden ratio */
+#endif
+#ifndef M_PI
+#define M_PI           3.14159265358979323846  /* pi */
+#endif
+#ifndef M_PI_2
+#define M_PI_2         1.57079632679489661923  /* pi/2 */
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2      0.70710678118654752440  /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2        1.41421356237309504880  /* sqrt(2) */
+#endif
+#ifndef NAN
+#define NAN            av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY       av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+    AV_ROUND_ZERO     = 0, ///< Round toward zero.
+    AV_ROUND_INF      = 1, ///< Round away from zero.
+    AV_ROUND_DOWN     = 2, ///< Round toward -infinity.
+    AV_ROUND_UP       = 3, ///< Round toward +infinity.
+    AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+    AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE
+};
+
+/**
+ * Return the greatest common divisor of a and b.
+ * If both a and b are 0 or either or both are <0 then behavior is
+ * undefined.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ *         INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ *         INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+                         enum AVRounding) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ *         a positive value if a is greater than b
+ *         0                if a equals          b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * @param in_ts Input timestamp
+ * @param in_tb Input timebase
+ * @param fs_tb Duration and *last timebase
+ * @param duration duration till the next call
+ * @param out_tb Output timebase
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts,  AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param ts Input timestamp
+ * @param ts_tb Input timestamp timebase
+ * @param inc value to add to ts
+ * @param inc_tb inc timebase
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);
+
+
+    /**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/md5.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2006 Michael Niedermayer (michaelni@gmx.at)
+ * Copyright (C) 2003-2005 by Christopher R. Hertel (crh@ubiqx.mn.org)
+ *
+ * References:
+ *  IETF RFC 1321: The MD5 Message-Digest Algorithm
+ *       Ron Rivest. IETF, April, 1992
+ *
+ * based on http://ubiqx.org/libcifs/source/Auth/MD5.c
+ *          from Christopher R. Hertel (crh@ubiqx.mn.org)
+ * Simplified, cleaned and IMO redundant comments removed by michael.
+ *
+ * If you use gcc, then version 4.1 or later and -fomit-frame-pointer is
+ * strongly recommended.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "bswap.h"
+#include "intreadwrite.h"
+#include "md5.h"
+#include "mem.h"
+
+typedef struct AVMD5{
+    uint64_t len;
+    uint8_t  block[64];
+    uint32_t ABCD[4];
+} AVMD5;
+
+const int av_md5_size = sizeof(AVMD5);
+
+struct AVMD5 *av_md5_alloc(void)
+{
+    return av_mallocz(sizeof(struct AVMD5));
+}
+
+static const uint8_t S[4][4] = {
+    { 7, 12, 17, 22 },  /* round 1 */
+    { 5,  9, 14, 20 },  /* round 2 */
+    { 4, 11, 16, 23 },  /* round 3 */
+    { 6, 10, 15, 21 }   /* round 4 */
+};
+
+static const uint32_t T[64] = { // T[i]= fabs(sin(i+1)<<32)
+    0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,   /* round 1 */
+    0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
+    0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
+    0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
+
+    0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,   /* round 2 */
+    0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
+    0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
+    0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
+
+    0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,   /* round 3 */
+    0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
+    0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
+    0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
+
+    0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,   /* round 4 */
+    0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
+    0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
+    0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
+};
+
+#define CORE(i, a, b, c, d) do {                                        \
+        t = S[i >> 4][i & 3];                                           \
+        a += T[i];                                                      \
+                                                                        \
+        if (i < 32) {                                                   \
+            if (i < 16) a += (d ^ (b & (c ^ d)))  + X[       i  & 15];  \
+            else        a += ((d & b) | (~d & c)) + X[(1 + 5*i) & 15];  \
+        } else {                                                        \
+            if (i < 48) a += (b ^ c ^ d)          + X[(5 + 3*i) & 15];  \
+            else        a += (c ^ (b | ~d))       + X[(    7*i) & 15];  \
+        }                                                               \
+        a = b + (a << t | a >> (32 - t));                               \
+    } while (0)
+
+static void body(uint32_t ABCD[4], uint32_t *src, int nblocks)
+{
+    int i av_unused;
+    int n;
+    uint32_t a, b, c, d, t, *X;
+
+    for (n = 0; n < nblocks; n++) {
+        a = ABCD[3];
+        b = ABCD[2];
+        c = ABCD[1];
+        d = ABCD[0];
+
+        X = src + n * 16;
+
+#if HAVE_BIGENDIAN
+        for (i = 0; i < 16; i++)
+            X[i] = av_bswap32(X[i]);
+#endif
+
+#if CONFIG_SMALL
+        for (i = 0; i < 64; i++) {
+            CORE(i, a, b, c, d);
+            t = d;
+            d = c;
+            c = b;
+            b = a;
+            a = t;
+        }
+#else
+#define CORE2(i)                                                        \
+        CORE( i,   a,b,c,d); CORE((i+1),d,a,b,c);                       \
+        CORE((i+2),c,d,a,b); CORE((i+3),b,c,d,a)
+#define CORE4(i) CORE2(i); CORE2((i+4)); CORE2((i+8)); CORE2((i+12))
+        CORE4(0); CORE4(16); CORE4(32); CORE4(48);
+#endif
+
+        ABCD[0] += d;
+        ABCD[1] += c;
+        ABCD[2] += b;
+        ABCD[3] += a;
+    }
+}
+
+void av_md5_init(AVMD5 *ctx)
+{
+    ctx->len     = 0;
+
+    ctx->ABCD[0] = 0x10325476;
+    ctx->ABCD[1] = 0x98badcfe;
+    ctx->ABCD[2] = 0xefcdab89;
+    ctx->ABCD[3] = 0x67452301;
+}
+
+void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
+{
+    const uint8_t *end;
+    int j;
+
+    j = ctx->len & 63;
+    ctx->len += len;
+
+    if (j) {
+        int cnt = FFMIN(len, 64 - j);
+        memcpy(ctx->block + j, src, cnt);
+        src += cnt;
+        len -= cnt;
+        if (j + cnt < 64)
+            return;
+        body(ctx->ABCD, (uint32_t *)ctx->block, 1);
+    }
+
+    end = src + (len & ~63);
+    if (HAVE_BIGENDIAN || (!HAVE_FAST_UNALIGNED && ((intptr_t)src & 3))) {
+       while (src < end) {
+           memcpy(ctx->block, src, 64);
+           body(ctx->ABCD, (uint32_t *) ctx->block, 1);
+           src += 64;
+        }
+    } else {
+        int nblocks = len / 64;
+        body(ctx->ABCD, (uint32_t *)src, nblocks);
+        src = end;
+    }
+    len &= 63;
+    if (len > 0)
+        memcpy(ctx->block, src, len);
+}
+
+void av_md5_final(AVMD5 *ctx, uint8_t *dst)
+{
+    int i;
+    uint64_t finalcount = av_le2ne64(ctx->len << 3);
+
+    av_md5_update(ctx, "\200", 1);
+    while ((ctx->len & 63) != 56)
+        av_md5_update(ctx, "", 1);
+
+    av_md5_update(ctx, (uint8_t *)&finalcount, 8);
+
+    for (i = 0; i < 4; i++)
+        AV_WL32(dst + 4*i, ctx->ABCD[3 - i]);
+}
+
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len)
+{
+    AVMD5 ctx;
+
+    av_md5_init(&ctx);
+    av_md5_update(&ctx, src, len);
+    av_md5_final(&ctx, dst);
+}
+
+#ifdef TEST
+#include <stdio.h>
+
+static void print_md5(uint8_t *md5)
+{
+    int i;
+    for (i = 0; i < 16; i++)
+        printf("%02x", md5[i]);
+    printf("\n");
+}
+
+int main(void){
+    uint8_t md5val[16];
+    int i;
+    volatile uint8_t in[1000]; // volatile to workaround http://llvm.org/bugs/show_bug.cgi?id=20849
+    // FIXME remove volatile once it has been fixed and all fate clients are updated
+
+    for (i = 0; i < 1000; i++)
+        in[i] = i * i;
+    av_md5_sum(md5val, in, 1000); print_md5(md5val);
+    av_md5_sum(md5val, in,   63); print_md5(md5val);
+    av_md5_sum(md5val, in,   64); print_md5(md5val);
+    av_md5_sum(md5val, in,   65); print_md5(md5val);
+    for (i = 0; i < 1000; i++)
+        in[i] = i % 127;
+    av_md5_sum(md5val, in,  999); print_md5(md5val);
+
+    return 0;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/md5.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,81 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MD5_H
+#define AVUTIL_MD5_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_md5 MD5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_md5_size;
+
+struct AVMD5;
+
+/**
+ * Allocate an AVMD5 context.
+ */
+struct AVMD5 *av_md5_alloc(void);
+
+/**
+ * Initialize MD5 hashing.
+ *
+ * @param ctx pointer to the function context (of size av_md5_size)
+ */
+void av_md5_init(struct AVMD5 *ctx);
+
+/**
+ * Update hash value.
+ *
+ * @param ctx hash function context
+ * @param src input data to update hash with
+ * @param len input data length
+ */
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param ctx hash function context
+ * @param dst buffer where output digest value is stored
+ */
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+
+/**
+ * Hash an array of data.
+ *
+ * @param dst The output buffer to write the digest into
+ * @param src The data to hash
+ * @param len The length of the data, in bytes
+ */
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MD5_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/mem.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,571 @@
+/*
+ * default memory allocator for libavutil
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * default memory allocator for libavutil
+ */
+
+#define _XOPEN_SOURCE 600
+
+#include "config.h"
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#if HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#include "avassert.h"
+#include "avutil.h"
+#include "common.h"
+#include "dynarray.h"
+#include "intreadwrite.h"
+#include "mem.h"
+
+//#define USE_MEM_STATS
+
+#ifdef USE_MEM_STATS
+#include <malloc.h>
+static int mem_cur, mem_max;
+static int block_cur, block_max;
+#endif
+
+#ifdef MALLOC_PREFIX
+
+#define malloc         AV_JOIN(MALLOC_PREFIX, malloc)
+#define memalign       AV_JOIN(MALLOC_PREFIX, memalign)
+#define posix_memalign AV_JOIN(MALLOC_PREFIX, posix_memalign)
+#define realloc        AV_JOIN(MALLOC_PREFIX, realloc)
+#define free           AV_JOIN(MALLOC_PREFIX, free)
+
+void *malloc(size_t size);
+void *memalign(size_t align, size_t size);
+int   posix_memalign(void **ptr, size_t align, size_t size);
+void *realloc(void *ptr, size_t size);
+void  free(void *ptr);
+
+#endif /* MALLOC_PREFIX */
+
+#define ALIGN (HAVE_AVX ? 32 : 16)
+
+/* NOTE: if you want to override these functions with your own
+ * implementations (not recommended) you have to link libav* as
+ * dynamic libraries and remove -Wl,-Bsymbolic from the linker flags.
+ * Note that this will cost performance. */
+
+static size_t max_alloc_size= INT_MAX;
+
+void av_max_alloc(size_t max){
+    max_alloc_size = max;
+}
+
+void *av_malloc(size_t size)
+{
+    void *ptr = NULL;
+#if CONFIG_MEMALIGN_HACK
+    long diff;
+#endif
+
+    /* let's disallow possibly ambiguous cases */
+    if (size > (max_alloc_size - 32))
+        return NULL;
+
+#if CONFIG_MEMALIGN_HACK
+    ptr = malloc(size + ALIGN);
+    if (!ptr)
+        return ptr;
+    diff              = ((~(long)ptr)&(ALIGN - 1)) + 1;
+    ptr               = (char *)ptr + diff;
+    ((char *)ptr)[-1] = diff;
+#elif HAVE_POSIX_MEMALIGN
+    if (size) //OS X on SDK 10.6 has a broken posix_memalign implementation
+    if (posix_memalign(&ptr, ALIGN, size))
+        ptr = NULL;
+#elif HAVE_ALIGNED_MALLOC
+    ptr = _aligned_malloc(size, ALIGN);
+#elif HAVE_MEMALIGN
+#ifndef __DJGPP__
+    ptr = memalign(ALIGN, size);
+#else
+    ptr = memalign(size, ALIGN);
+#endif
+    /* Why 64?
+     * Indeed, we should align it:
+     *   on  4 for 386
+     *   on 16 for 486
+     *   on 32 for 586, PPro - K6-III
+     *   on 64 for K7 (maybe for P3 too).
+     * Because L1 and L2 caches are aligned on those values.
+     * But I don't want to code such logic here!
+     */
+    /* Why 32?
+     * For AVX ASM. SSE / NEON needs only 16.
+     * Why not larger? Because I did not see a difference in benchmarks ...
+     */
+    /* benchmarks with P3
+     * memalign(64) + 1          3071, 3051, 3032
+     * memalign(64) + 2          3051, 3032, 3041
+     * memalign(64) + 4          2911, 2896, 2915
+     * memalign(64) + 8          2545, 2554, 2550
+     * memalign(64) + 16         2543, 2572, 2563
+     * memalign(64) + 32         2546, 2545, 2571
+     * memalign(64) + 64         2570, 2533, 2558
+     *
+     * BTW, malloc seems to do 8-byte alignment by default here.
+     */
+#else
+    ptr = malloc(size);
+#ifdef USE_MEM_STATS
+    printf("malloc(%ld) -> %p\n", size, ptr);
+    if (ptr) {
+        mem_cur += malloc_usable_size(ptr);
+        if (mem_cur > mem_max) {
+            mem_max = mem_cur;
+            printf("mem_max=%d\n", mem_max);
+        }
+        if (++block_cur > block_max) {
+            block_max = block_cur;
+            printf("block_max=%d\n", block_max);
+        }
+    }
+#endif
+#endif
+    if(!ptr && !size) {
+        size = 1;
+        ptr= av_malloc(1);
+    }
+#if CONFIG_MEMORY_POISONING
+    if (ptr)
+        memset(ptr, FF_MEMORY_POISON, size);
+#endif
+    return ptr;
+}
+
+void *av_realloc(void *ptr, size_t size)
+{
+#if CONFIG_MEMALIGN_HACK
+    int diff;
+#endif
+
+    /* let's disallow possibly ambiguous cases */
+    if (size > (max_alloc_size - 32))
+        return NULL;
+
+#if CONFIG_MEMALIGN_HACK
+    //FIXME this isn't aligned correctly, though it probably isn't needed
+    if (!ptr)
+        return av_malloc(size);
+    diff = ((char *)ptr)[-1];
+    av_assert0(diff>0 && diff<=ALIGN);
+    ptr = realloc((char *)ptr - diff, size + diff);
+    if (ptr)
+        ptr = (char *)ptr + diff;
+    return ptr;
+#elif HAVE_ALIGNED_MALLOC
+    return _aligned_realloc(ptr, size + !size, ALIGN);
+#else
+#ifdef USE_MEM_STATS
+    if (ptr) {
+        mem_cur -= malloc_usable_size(ptr);
+        block_cur--;
+    }
+    printf("realloc(%p, %ld)\n", ptr, size);
+    ptr = realloc(ptr, size + !size);
+    if (ptr) {
+        mem_cur += malloc_usable_size(ptr);
+        if (mem_cur > mem_max) {
+            mem_max = mem_cur;
+            printf("mem_max=%d\n", mem_max);
+        }
+        if (++block_cur > block_max) {
+            block_max = block_cur;
+            printf("block_max=%d\n", block_max);
+        }
+    }
+    return ptr;
+#else
+    return realloc(ptr, size + !size);
+#endif
+#endif
+}
+
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize)
+{
+    size_t size;
+    void *r;
+
+    if (av_size_mult(elsize, nelem, &size)) {
+        av_free(ptr);
+        return NULL;
+    }
+    r = av_realloc(ptr, size);
+    if (!r && size)
+        av_free(ptr);
+    return r;
+}
+
+int av_reallocp(void *ptr, size_t size)
+{
+    void **ptrptr = ptr;
+    void *ret;
+
+    if (!size) {
+        av_freep(ptr);
+        return 0;
+    }
+    ret = av_realloc(*ptrptr, size);
+
+    if (!ret) {
+        av_freep(ptr);
+        return AVERROR(ENOMEM);
+    }
+
+    *ptrptr = ret;
+    return 0;
+}
+
+void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
+{
+    if (!size || nmemb >= INT_MAX / size)
+        return NULL;
+    return av_realloc(ptr, nmemb * size);
+}
+
+int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
+{
+    void **ptrptr = ptr;
+    *ptrptr = av_realloc_f(*ptrptr, nmemb, size);
+    if (!*ptrptr && nmemb && size)
+        return AVERROR(ENOMEM);
+    return 0;
+}
+
+void av_free(void *ptr)
+{
+#if CONFIG_MEMALIGN_HACK
+    if (ptr) {
+        int v= ((char *)ptr)[-1];
+        av_assert0(v>0 && v<=ALIGN);
+        free((char *)ptr - v);
+    }
+#elif HAVE_ALIGNED_MALLOC
+    _aligned_free(ptr);
+#else
+#ifdef USE_MEM_STATS
+    if (ptr) {
+        printf("free(%p)\n", ptr);
+        mem_cur -= malloc_usable_size(ptr);
+        block_cur--;
+    }
+#endif
+    free(ptr);
+#endif
+}
+
+void av_freep(void *arg)
+{
+    void **ptr = (void **)arg;
+    av_free(*ptr);
+    *ptr = NULL;
+}
+
+void *av_mallocz(size_t size)
+{
+    void *ptr = av_malloc(size);
+    if (ptr)
+        memset(ptr, 0, size);
+    return ptr;
+}
+
+#ifdef USE_FULL
+void *av_calloc(size_t nmemb, size_t size)
+{
+    if (size <= 0 || nmemb >= INT_MAX / size)
+        return NULL;
+    return av_mallocz(nmemb * size);
+}
+
+char *av_strdup(const char *s)
+{
+    char *ptr = NULL;
+    if (s) {
+        int len = strlen(s) + 1;
+        ptr = av_realloc(NULL, len);
+        if (ptr)
+            memcpy(ptr, s, len);
+    }
+    return ptr;
+}
+
+char *av_strndup(const char *s, size_t len)
+{
+    char *ret = NULL, *end;
+
+    if (!s)
+        return NULL;
+
+    end = memchr(s, 0, len);
+    if (end)
+        len = end - s;
+
+    ret = av_realloc(NULL, len + 1);
+    if (!ret)
+        return NULL;
+
+    memcpy(ret, s, len);
+    ret[len] = 0;
+    return ret;
+}
+
+void *av_memdup(const void *p, size_t size)
+{
+    void *ptr = NULL;
+    if (p) {
+        ptr = av_malloc(size);
+        if (ptr)
+            memcpy(ptr, p, size);
+    }
+    return ptr;
+}
+
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
+{
+    void **tab = *(void ***)tab_ptr;
+
+    AV_DYNARRAY_ADD(INT_MAX, sizeof(*tab), tab, *nb_ptr, {
+        tab[*nb_ptr] = elem;
+        *(void ***)tab_ptr = tab;
+    }, {
+        return AVERROR(ENOMEM);
+    });
+    return 0;
+}
+
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem)
+{
+    void **tab = *(void ***)tab_ptr;
+
+    AV_DYNARRAY_ADD(INT_MAX, sizeof(*tab), tab, *nb_ptr, {
+        tab[*nb_ptr] = elem;
+        *(void ***)tab_ptr = tab;
+    }, {
+        *nb_ptr = 0;
+        av_freep(tab_ptr);
+    });
+}
+
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+                       const uint8_t *elem_data)
+{
+    uint8_t *tab_elem_data = NULL;
+
+    AV_DYNARRAY_ADD(INT_MAX, elem_size, *tab_ptr, *nb_ptr, {
+        tab_elem_data = (uint8_t *)*tab_ptr + (*nb_ptr) * elem_size;
+        if (elem_data)
+            memcpy(tab_elem_data, elem_data, elem_size);
+        else if (CONFIG_MEMORY_POISONING)
+            memset(tab_elem_data, FF_MEMORY_POISON, elem_size);
+    }, {
+        av_freep(tab_ptr);
+        *nb_ptr = 0;
+    });
+    return tab_elem_data;
+}
+
+static void fill16(uint8_t *dst, int len)
+{
+    uint32_t v = AV_RN16(dst - 2);
+
+    v |= v << 16;
+
+    while (len >= 4) {
+        AV_WN32(dst, v);
+        dst += 4;
+        len -= 4;
+    }
+
+    while (len--) {
+        *dst = dst[-2];
+        dst++;
+    }
+}
+
+static void fill24(uint8_t *dst, int len)
+{
+#if HAVE_BIGENDIAN
+    uint32_t v = AV_RB24(dst - 3);
+    uint32_t a = v << 8  | v >> 16;
+    uint32_t b = v << 16 | v >> 8;
+    uint32_t c = v << 24 | v;
+#else
+    uint32_t v = AV_RL24(dst - 3);
+    uint32_t a = v       | v << 24;
+    uint32_t b = v >> 8  | v << 16;
+    uint32_t c = v >> 16 | v << 8;
+#endif
+
+    while (len >= 12) {
+        AV_WN32(dst,     a);
+        AV_WN32(dst + 4, b);
+        AV_WN32(dst + 8, c);
+        dst += 12;
+        len -= 12;
+    }
+
+    if (len >= 4) {
+        AV_WN32(dst, a);
+        dst += 4;
+        len -= 4;
+    }
+
+    if (len >= 4) {
+        AV_WN32(dst, b);
+        dst += 4;
+        len -= 4;
+    }
+
+    while (len--) {
+        *dst = dst[-3];
+        dst++;
+    }
+}
+
+static void fill32(uint8_t *dst, int len)
+{
+    uint32_t v = AV_RN32(dst - 4);
+
+    while (len >= 4) {
+        AV_WN32(dst, v);
+        dst += 4;
+        len -= 4;
+    }
+
+    while (len--) {
+        *dst = dst[-4];
+        dst++;
+    }
+}
+
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
+{
+    const uint8_t *src = &dst[-back];
+    if (!back)
+        return;
+
+    if (back == 1) {
+        memset(dst, *src, cnt);
+    } else if (back == 2) {
+        fill16(dst, cnt);
+    } else if (back == 3) {
+        fill24(dst, cnt);
+    } else if (back == 4) {
+        fill32(dst, cnt);
+    } else {
+        if (cnt >= 16) {
+            int blocklen = back;
+            while (cnt > blocklen) {
+                memcpy(dst, src, blocklen);
+                dst       += blocklen;
+                cnt       -= blocklen;
+                blocklen <<= 1;
+            }
+            memcpy(dst, src, cnt);
+            return;
+        }
+        if (cnt >= 8) {
+            AV_COPY32U(dst,     src);
+            AV_COPY32U(dst + 4, src + 4);
+            src += 8;
+            dst += 8;
+            cnt -= 8;
+        }
+        if (cnt >= 4) {
+            AV_COPY32U(dst, src);
+            src += 4;
+            dst += 4;
+            cnt -= 4;
+        }
+        if (cnt >= 2) {
+            AV_COPY16U(dst, src);
+            src += 2;
+            dst += 2;
+            cnt -= 2;
+        }
+        if (cnt)
+            *dst = *src;
+    }
+}
+
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
+{
+    if (min_size < *size)
+        return ptr;
+
+    min_size = FFMAX(17 * min_size / 16 + 32, min_size);
+
+    ptr = av_realloc(ptr, min_size);
+    /* we could set this to the unmodified min_size but this is safer
+     * if the user lost the ptr and uses NULL now
+     */
+    if (!ptr)
+        min_size = 0;
+
+    *size = min_size;
+
+    return ptr;
+}
+#endif
+
+static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc)
+{
+    void **p = ptr;
+    if (min_size < *size)
+        return 0;
+    min_size = FFMAX(17 * min_size / 16 + 32, min_size);
+    av_free(*p);
+    *p = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size);
+    if (!*p)
+        min_size = 0;
+    *size = min_size;
+    return 1;
+}
+
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
+{
+    ff_fast_malloc(ptr, size, min_size, 0);
+}
+
+void *av_malloc_array(size_t nmemb, size_t size)
+{
+    if (!size || nmemb >= INT_MAX / size)
+        return NULL;
+    return av_malloc(nmemb * size);
+}
+
+void *av_mallocz_array(size_t nmemb, size_t size)
+{
+    if (!size || nmemb >= INT_MAX / size)
+        return NULL;
+    return av_mallocz(nmemb * size);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/mem.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,379 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "error.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v
+    #define DECLARE_ASM_CONST(n,t,v)    const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+    #define DECLARE_ALIGNED(n,t,v)                      \
+        AV_PRAGMA(DATA_ALIGN(v,n))                      \
+        t __attribute__((aligned(n))) v
+    #define DECLARE_ASM_CONST(n,t,v)                    \
+        AV_PRAGMA(DATA_ALIGN(v,n))                      \
+        static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v
+    #define DECLARE_ASM_CONST(n,t,v)    static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+    #define DECLARE_ALIGNED(n,t,v)      __declspec(align(n)) t v
+    #define DECLARE_ASM_CONST(n,t,v)    __declspec(align(n)) static const t v
+#else
+    #define DECLARE_ALIGNED(n,t,v)      t v
+    #define DECLARE_ASM_CONST(n,t,v)    static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+    #define av_malloc_attrib __attribute__((__malloc__))
+#else
+    #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+    #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+    #define av_alloc_size(...)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of size * nmemb bytes with av_malloc().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ *          not be passed to av_realloc(). The former can be implemented using
+ *          memalign() (or other functions), and there is no guarantee that
+ *          pointers from such functions can be passed to realloc() at all.
+ *          The situation is undefined according to POSIX and may crash with
+ *          some libc implementations.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * This function does the same thing as av_realloc, except:
+ * - It takes two arguments and checks the result of the multiplication for
+ *   integer overflow.
+ * - It frees the input block in case of failure, thus avoiding the memory
+ *   leak with the classic "buf = realloc(buf); if (!buf) return -1;".
+ */
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If *ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param   ptr Pointer to a pointer to a memory block already allocated
+ *          with av_realloc(), or pointer to a pointer to NULL.
+ *          The pointer is updated on success, or freed on failure.
+ * @param   size Size in bytes for the memory block to be allocated or
+ *          reallocated
+ * @return  Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ *          not be passed to av_reallocp(). The former can be implemented using
+ *          memalign() (or other functions), and there is no guarantee that
+ *          pointers from such functions can be passed to realloc() at all.
+ *          The situation is undefined according to POSIX and may crash with
+ *          some libc implementations.
+ */
+int av_reallocp(void *ptr, size_t size);
+
+/**
+ * Allocate or reallocate an array.
+ * If ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ *          not be passed to av_realloc(). The former can be implemented using
+ *          memalign() (or other functions), and there is no guarantee that
+ *          pointers from such functions can be passed to realloc() at all.
+ *          The situation is undefined according to POSIX and may crash with
+ *          some libc implementations.
+ */
+av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Allocate or reallocate an array through a pointer to a pointer.
+ * If *ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ *          not be passed to av_realloc(). The former can be implemented using
+ *          memalign() (or other functions), and there is no guarantee that
+ *          pointers from such functions can be passed to realloc() at all.
+ *          The situation is undefined according to POSIX and may crash with
+ *          some libc implementations.
+ */
+av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of nmemb * size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * The allocation will fail if nmemb * size is greater than or equal
+ * to INT_MAX.
+ * @param nmemb
+ * @param size
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ */
+void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
+
+/**
+ * Allocate a block of size * nmemb bytes with av_mallocz().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+av_alloc_size(1, 2) void *av_mallocz_array(size_t nmemb, size_t size);
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Duplicate a substring of the string s.
+ * @param s string to be duplicated
+ * @param len the maximum length of the resulting string (not counting the
+ *            terminating byte).
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strndup(const char *s, size_t len) av_malloc_attrib;
+
+/**
+ * Duplicate the buffer p.
+ * @param p buffer to be duplicated
+ * @return Pointer to a newly allocated buffer containing a
+ * copy of p or NULL if the buffer cannot be allocated.
+ */
+void *av_memdup(const void *p, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @note passing a pointer to a NULL pointer is safe and leads to no action.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * The array to grow is supposed to be an array of pointers to
+ * structures, and the element to add must be a pointer to an already
+ * allocated structure.
+ *
+ * The array is reallocated when its size reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr  pointer to the number of elements in the array
+ * @param elem    element to add
+ * @see av_dynarray_add_nofree(), av_dynarray2_add()
+ */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * Function has the same functionality as av_dynarray_add(),
+ * but it doesn't free memory on fails. It returns error code
+ * instead and leave current buffer untouched.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr  pointer to the number of elements in the array
+ * @param elem    element to add
+ * @return >=0 on success, negative otherwise.
+ * @see av_dynarray_add(), av_dynarray2_add()
+ */
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element of size elem_size to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr   pointer to the array to grow
+ * @param nb_ptr    pointer to the number of elements in the array
+ * @param elem_size size in bytes of the elements in the array
+ * @param elem_data pointer to the data of the element to add. If NULL, the space of
+ *                  the new added element is not filled.
+ * @return          pointer to the data of the element to copy in the new allocated space.
+ *                  If NULL, the new allocated space is left uninitialized."
+ * @see av_dynarray_add(), av_dynarray_add_nofree()
+ */
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+                       const uint8_t *elem_data);
+
+/**
+ * Multiply two size_t values checking for overflow.
+ * @return  0 if success, AVERROR(EINVAL) if overflow.
+ */
+static inline int av_size_mult(size_t a, size_t b, size_t *r)
+{
+    size_t t = a * b;
+    /* Hack inspired from glibc: only try the division if nelem and elsize
+     * are both greater than sqrt(SIZE_MAX). */
+    if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
+        return AVERROR(EINVAL);
+    *r = t;
+    return 0;
+}
+
+/**
+ * Set the maximum size that may me allocated in one block.
+ */
+void av_max_alloc(size_t max);
+
+/**
+ * deliberately overlapping memcpy implementation
+ * @param dst destination buffer
+ * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ *                 *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/motion_vector.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MOTION_VECTOR_H
+#define AVUTIL_MOTION_VECTOR_H
+
+#include <stdint.h>
+
+typedef struct AVMotionVector {
+    /**
+     * Where the current macroblock comes from; negative value when it comes
+     * from the past, positive value when it comes from the future.
+     * XXX: set exact relative ref frame reference instead of a +/- 1 "direction".
+     */
+    int32_t source;
+    /**
+     * Width and height of the block.
+     */
+    uint8_t w, h;
+    /**
+     * Absolute source position. Can be outside the frame area.
+     */
+    int16_t src_x, src_y;
+    /**
+     * Absolute destination position. Can be outside the frame area.
+     */
+    int16_t dst_x, dst_y;
+    /**
+     * Extra flag information.
+     * Currently unused.
+     */
+    uint64_t flags;
+} AVMotionVector;
+
+#endif /* AVUTIL_MOTION_VECTOR_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/murmur3.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MURMUR3_H
+#define AVUTIL_MURMUR3_H
+
+#include <stdint.h>
+
+struct AVMurMur3 *av_murmur3_alloc(void);
+void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed);
+void av_murmur3_init(struct AVMurMur3 *c);
+void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len);
+void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]);
+
+#endif /* AVUTIL_MURMUR3_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/old_pix_fmts.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,177 @@
+/*
+ * copyright (c) 2006-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OLD_PIX_FMTS_H
+#define AVUTIL_OLD_PIX_FMTS_H
+
+/*
+ * This header exists to prevent new pixel formats from being accidentally added
+ * to the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVPixelFormat enum instead.
+ */
+    PIX_FMT_NONE = AV_PIX_FMT_NONE,
+    PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+    PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+    PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+    PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+    PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+    PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+    PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+    PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+    PIX_FMT_GRAY8,     ///<        Y        ,  8bpp
+    PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+    PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+    PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette
+    PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+    PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+    PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+    PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+    PIX_FMT_XVMC_MPEG2_IDCT,
+#endif /* FF_API_XVMC */
+    PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+    PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+    PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)
+    PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+    PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)
+    PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)
+    PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+    PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)
+    PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+    PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped
+
+    PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+    PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+    PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+    PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+    PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian
+    PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian
+    PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+    PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+    PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+    PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+    PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+    PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+    PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian
+    PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian
+    PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+    PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+    PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian
+    PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian
+    PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+    PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+    PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+    PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+    PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+    PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+    PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+    PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+    PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+    PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+    PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+    PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+    PIX_FMT_GRAY8A,    ///< 8bit gray, 8bit alpha
+    PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+    PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+    //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus
+    //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately
+    //is better
+    PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA
+
+#ifdef AV_PIX_FMT_ABI_GIT_MASTER
+    PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+    PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp
+    PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big endian
+    PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little endian
+    PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big endian
+    PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little endian
+    PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big endian
+    PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little endian
+
+#ifndef AV_PIX_FMT_ABI_GIT_MASTER
+    PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+    PIX_FMT_0RGB=0x123+4,      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
+    PIX_FMT_RGB0,      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
+    PIX_FMT_0BGR,      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
+    PIX_FMT_BGR0,      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+    PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+    PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+
+    PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big endian
+    PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little endian
+    PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big endian
+    PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little endian
+
+    PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+#endif /* AVUTIL_OLD_PIX_FMTS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/opencl.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2012 Peng  Gao     <peng@multicorewareinc.com>
+ * Copyright (C) 2012 Li    Cao     <li@multicorewareinc.com>
+ * Copyright (C) 2012 Wei   Gao     <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang    <lwanghpc@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * OpenCL wrapper
+ *
+ * This interface is considered still experimental and its API and ABI may
+ * change without prior notice.
+ */
+
+#ifndef LIBAVUTIL_OPENCL_H
+#define LIBAVUTIL_OPENCL_H
+
+#include "config.h"
+#if HAVE_CL_CL_H
+#include <CL/cl.h>
+#else
+#include <OpenCL/cl.h>
+#endif
+#include <stdint.h>
+#include "dict.h"
+
+#include "libavutil/version.h"
+
+#define AV_OPENCL_KERNEL( ... )# __VA_ARGS__
+
+#define AV_OPENCL_MAX_KERNEL_NAME_SIZE 150
+
+#define AV_OPENCL_MAX_DEVICE_NAME_SIZE 100
+
+#define AV_OPENCL_MAX_PLATFORM_NAME_SIZE 100
+
+typedef struct {
+    int device_type;
+    char device_name[AV_OPENCL_MAX_DEVICE_NAME_SIZE];
+    cl_device_id device_id;
+} AVOpenCLDeviceNode;
+
+typedef struct {
+    cl_platform_id platform_id;
+    char platform_name[AV_OPENCL_MAX_PLATFORM_NAME_SIZE];
+    int device_num;
+    AVOpenCLDeviceNode **device_node;
+} AVOpenCLPlatformNode;
+
+typedef struct {
+    int platform_num;
+    AVOpenCLPlatformNode **platform_node;
+} AVOpenCLDeviceList;
+
+typedef struct {
+    cl_platform_id platform_id;
+    cl_device_type device_type;
+    cl_context context;
+    cl_device_id  device_id;
+    cl_command_queue command_queue;
+    char *platform_name;
+} AVOpenCLExternalEnv;
+
+/**
+ * Get OpenCL device list.
+ *
+ * It must be freed with av_opencl_free_device_list().
+ *
+ * @param device_list pointer to OpenCL environment device list,
+ *                    should be released by av_opencl_free_device_list()
+ *
+ * @return  >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_get_device_list(AVOpenCLDeviceList **device_list);
+
+/**
+  * Free OpenCL device list.
+  *
+  * @param device_list pointer to OpenCL environment device list
+  *                       created by av_opencl_get_device_list()
+  */
+void av_opencl_free_device_list(AVOpenCLDeviceList **device_list);
+
+/**
+ * Set option in the global OpenCL context.
+ *
+ * This options affect the operation performed by the next
+ * av_opencl_init() operation.
+ *
+ * The currently accepted options are:
+ * - platform: set index of platform in device list
+ * - device: set index of device in device list
+ *
+ * See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
+ *
+ * @param key                 option key
+ * @param val                 option value
+ * @return >=0 on success, a negative error code in case of failure
+ * @see av_opencl_get_option()
+ */
+int av_opencl_set_option(const char *key, const char *val);
+
+/**
+ * Get option value from the global OpenCL context.
+ *
+ * @param key        option key
+ * @param out_val  pointer to location where option value will be
+ *                         written, must be freed with av_freep()
+ * @return  >=0 on success, a negative error code in case of failure
+ * @see av_opencl_set_option()
+ */
+int av_opencl_get_option(const char *key, uint8_t **out_val);
+
+/**
+ * Free option values of the global OpenCL context.
+ *
+ */
+void av_opencl_free_option(void);
+
+/**
+ * Allocate OpenCL external environment.
+ *
+ * It must be freed with av_opencl_free_external_env().
+ *
+ * @return pointer to allocated OpenCL external environment
+ */
+AVOpenCLExternalEnv *av_opencl_alloc_external_env(void);
+
+/**
+ * Free OpenCL external environment.
+ *
+ * @param ext_opencl_env pointer to OpenCL external environment
+ *                       created by av_opencl_alloc_external_env()
+ */
+void av_opencl_free_external_env(AVOpenCLExternalEnv **ext_opencl_env);
+
+/**
+ * Get OpenCL error string.
+ *
+ * @param status    OpenCL error code
+ * @return OpenCL error string
+ */
+const char *av_opencl_errstr(cl_int status);
+
+/**
+ * Register kernel code.
+ *
+ *  The registered kernel code is stored in a global context, and compiled
+ *  in the runtime environment when av_opencl_init() is called.
+ *
+ * @param kernel_code    kernel code to be compiled in the OpenCL runtime environment
+ * @return  >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_register_kernel_code(const char *kernel_code);
+
+/**
+ * Initialize the run time OpenCL environment
+ *
+ * @param ext_opencl_env external OpenCL environment, created by an
+ *                       application program, ignored if set to NULL
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_init(AVOpenCLExternalEnv *ext_opencl_env);
+
+/**
+ * compile specific OpenCL kernel source
+ *
+ * @param program_name  pointer to a program name used for identification
+ * @param build_opts    pointer to a string that describes the preprocessor
+ *                      build options to be used for building the program
+ * @return a cl_program object
+ */
+cl_program av_opencl_compile(const char *program_name, const char* build_opts);
+
+/**
+ * get OpenCL command queue
+ *
+ * @return a cl_command_queue object
+ */
+cl_command_queue av_opencl_get_command_queue(void);
+
+/**
+ * Create OpenCL buffer.
+ *
+ * The buffer is used to save the data used or created by an OpenCL
+ * kernel.
+ * The created buffer must be released with av_opencl_buffer_release().
+ *
+ * See clCreateBuffer() function reference for more information about
+ * the parameters.
+ *
+ * @param cl_buf       pointer to OpenCL buffer
+ * @param cl_buf_size  size in bytes of the OpenCL buffer to create
+ * @param flags        flags used to control buffer attributes
+ * @param host_ptr     host pointer of the OpenCL buffer
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_buffer_create(cl_mem *cl_buf, size_t cl_buf_size, int flags, void *host_ptr);
+
+/**
+ * Write OpenCL buffer with data from src_buf.
+ *
+ * @param dst_cl_buf        pointer to OpenCL destination buffer
+ * @param src_buf           pointer to source buffer
+ * @param buf_size          size in bytes of the source and destination buffers
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_buffer_write(cl_mem dst_cl_buf, uint8_t *src_buf, size_t buf_size);
+
+/**
+ * Read data from OpenCL buffer to memory buffer.
+ *
+ * @param dst_buf           pointer to destination buffer (CPU memory)
+ * @param src_cl_buf        pointer to source OpenCL buffer
+ * @param buf_size          size in bytes of the source and destination buffers
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_buffer_read(uint8_t *dst_buf, cl_mem src_cl_buf, size_t buf_size);
+
+/**
+ * Write image data from memory to OpenCL buffer.
+ *
+ * The source must be an array of pointers to image plane buffers.
+ *
+ * @param dst_cl_buf         pointer to destination OpenCL buffer
+ * @param dst_cl_buf_size    size in bytes of OpenCL buffer
+ * @param dst_cl_buf_offset  the offset of the OpenCL buffer start position
+ * @param src_data           array of pointers to source plane buffers
+ * @param src_plane_sizes    array of sizes in bytes of the source plane buffers
+ * @param src_plane_num      number of source image planes
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_buffer_write_image(cl_mem dst_cl_buf, size_t cl_buffer_size, int dst_cl_offset,
+                                 uint8_t **src_data, int *plane_size, int plane_num);
+
+/**
+ * Read image data from OpenCL buffer.
+ *
+ * @param dst_data           array of pointers to destination plane buffers
+ * @param dst_plane_sizes    array of pointers to destination plane buffers
+ * @param dst_plane_num      number of destination image planes
+ * @param src_cl_buf         pointer to source OpenCL buffer
+ * @param src_cl_buf_size    size in bytes of OpenCL buffer
+ * @return >=0 on success, a negative error code in case of failure
+ */
+int av_opencl_buffer_read_image(uint8_t **dst_data, int *plane_size, int plane_num,
+                                cl_mem src_cl_buf, size_t cl_buffer_size);
+
+/**
+ * Release OpenCL buffer.
+ *
+ * @param cl_buf pointer to OpenCL buffer to release, which was
+ *               previously filled with av_opencl_buffer_create()
+ */
+void av_opencl_buffer_release(cl_mem *cl_buf);
+
+/**
+ * Release OpenCL environment.
+ *
+ * The OpenCL environment is effectively released only if all the created
+ * kernels had been released with av_opencl_release_kernel().
+ */
+void av_opencl_uninit(void);
+
+/**
+ * Benchmark an OpenCL device with a user defined callback function.  This function
+ * sets up an external OpenCL environment including context and command queue on
+ * the device then tears it down in the end.  The callback function should perform
+ * the rest of the work.
+ *
+ * @param device            pointer to the OpenCL device to be used
+ * @param platform          cl_platform_id handle to which the device belongs to
+ * @param benchmark         callback function to perform the benchmark, return a
+ *                          negative value in case of failure
+ * @return the score passed from the callback function, a negative error code in case
+ * of failure
+ */
+int64_t av_opencl_benchmark(AVOpenCLDeviceNode *device, cl_platform_id platform,
+                            int64_t (*benchmark)(AVOpenCLExternalEnv *ext_opencl_env));
+
+#endif /* LIBAVUTIL_OPENCL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/opencl_internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 Peng Gao <peng@multicorewareinc.com>
+ * Copyright (C) 2012 Li   Cao <li@multicorewareinc.com>
+ * Copyright (C) 2012 Wei  Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "opencl.h"
+
+#define FF_OPENCL_PARAM_INFO(a) ((void*)(&(a))), (sizeof(a))
+
+typedef struct {
+    cl_kernel kernel;
+    int param_num;
+    void *ctx;
+} FFOpenclParam;
+
+int avpriv_opencl_set_parameter(FFOpenclParam *opencl_param, ...);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/opt.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,897 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "dict.h"
+#include "log.h"
+#include "pixfmt.h"
+#include "samplefmt.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct should be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ *     AVClass *class;
+ *     int      int_opt;
+ *     char    *str_opt;
+ *     uint8_t *bin_opt;
+ *     int      bin_len;
+ * } test_struct;
+ *
+ * static const AVOption test_options[] = {
+ *   { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ *     AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },
+ *   { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ *     AV_OPT_TYPE_STRING },
+ *   { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ *     AV_OPT_TYPE_BINARY },
+ *   { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ *     .class_name = "test class",
+ *     .item_name  = av_default_item_name,
+ *     .option     = test_options,
+ *     .version    = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() can be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ *     test_struct *ret = av_malloc(sizeof(*ret));
+ *     ret->class = &test_class;
+ *     av_opt_set_defaults(ret);
+ *     return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ *     av_opt_free(*foo);
+ *     av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ *      It may happen that an AVOptions-enabled struct contains another
+ *      AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ *      libavcodec exports generic options, while its priv_data field exports
+ *      codec-specific options). In such a case, it is possible to set up the
+ *      parent struct to export a child's options. To do that, simply
+ *      implement AVClass.child_next() and AVClass.child_class_next() in the
+ *      parent struct's AVClass.
+ *      Assuming that the test_struct from above now also contains a
+ *      child_struct field:
+ *
+ *      @code
+ *      typedef struct child_struct {
+ *          AVClass *class;
+ *          int flags_opt;
+ *      } child_struct;
+ *      static const AVOption child_opts[] = {
+ *          { "test_flags", "This is a test option of flags type.",
+ *            offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },
+ *          { NULL },
+ *      };
+ *      static const AVClass child_class = {
+ *          .class_name = "child class",
+ *          .item_name  = av_default_item_name,
+ *          .option     = child_opts,
+ *          .version    = LIBAVUTIL_VERSION_INT,
+ *      };
+ *
+ *      void *child_next(void *obj, void *prev)
+ *      {
+ *          test_struct *t = obj;
+ *          if (!prev && t->child_struct)
+ *              return t->child_struct;
+ *          return NULL
+ *      }
+ *      const AVClass child_class_next(const AVClass *prev)
+ *      {
+ *          return prev ? NULL : &child_class;
+ *      }
+ *      @endcode
+ *      Putting child_next() and child_class_next() as defined above into
+ *      test_class will now make child_struct's options accessible through
+ *      test_struct (again, proper setup as described above needs to be done on
+ *      child_struct right after it is created).
+ *
+ *      From the above example it might not be clear why both child_next()
+ *      and child_class_next() are needed. The distinction is that child_next()
+ *      iterates over actually existing objects, while child_class_next()
+ *      iterates over all possible child classes. E.g. if an AVCodecContext
+ *      was initialized to use a codec which has private options, then its
+ *      child_next() will return AVCodecContext.priv_data and finish
+ *      iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ *      iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ *      It is possible to create named constants for options. Simply set the unit
+ *      field of the option the constants should apply to a string and
+ *      create the constants themselves as options of type AV_OPT_TYPE_CONST
+ *      with their unit field set to the same string.
+ *      Their default_val field should contain the value of the named
+ *      constant.
+ *      For example, to add some named constants for the test_flags option
+ *      above, put the following into the child_opts array:
+ *      @code
+ *      { "test_flags", "This is a test option of flags type.",
+ *        offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" },
+ *      { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" },
+ *      @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g.  when constructing documentation). In that case you should call
+ * av_opt_child_class_next() recursively on the parent struct's AVClass.  The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This allows to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+    AV_OPT_TYPE_FLAGS,
+    AV_OPT_TYPE_INT,
+    AV_OPT_TYPE_INT64,
+    AV_OPT_TYPE_DOUBLE,
+    AV_OPT_TYPE_FLOAT,
+    AV_OPT_TYPE_STRING,
+    AV_OPT_TYPE_RATIONAL,
+    AV_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length
+    AV_OPT_TYPE_DICT,
+    AV_OPT_TYPE_CONST = 128,
+    AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers
+    AV_OPT_TYPE_PIXEL_FMT  = MKBETAG('P','F','M','T'),
+    AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'),
+    AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational
+    AV_OPT_TYPE_DURATION   = MKBETAG('D','U','R',' '),
+    AV_OPT_TYPE_COLOR      = MKBETAG('C','O','L','R'),
+    AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'),
+#if FF_API_OLD_AVOPTIONS
+    FF_OPT_TYPE_FLAGS = 0,
+    FF_OPT_TYPE_INT,
+    FF_OPT_TYPE_INT64,
+    FF_OPT_TYPE_DOUBLE,
+    FF_OPT_TYPE_FLOAT,
+    FF_OPT_TYPE_STRING,
+    FF_OPT_TYPE_RATIONAL,
+    FF_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length
+    FF_OPT_TYPE_CONST=128,
+#endif
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+    const char *name;
+
+    /**
+     * short English help text
+     * @todo What about other languages?
+     */
+    const char *help;
+
+    /**
+     * The offset relative to the context structure where the option
+     * value is stored. It should be 0 for named constants.
+     */
+    int offset;
+    enum AVOptionType type;
+
+    /**
+     * the default value for scalar options
+     */
+    union {
+        int64_t i64;
+        double dbl;
+        const char *str;
+        /* TODO those are unused now */
+        AVRational q;
+    } default_val;
+    double min;                 ///< minimum valid value for the option
+    double max;                 ///< maximum valid value for the option
+
+    int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM  1   ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM  2   ///< a generic parameter which can be set by the user for demuxing or decoding
+#if FF_API_OPT_TYPE_METADATA
+#define AV_OPT_FLAG_METADATA        4   ///< some data extracted or inserted into the file like title, comment, ...
+#endif
+#define AV_OPT_FLAG_AUDIO_PARAM     8
+#define AV_OPT_FLAG_VIDEO_PARAM     16
+#define AV_OPT_FLAG_SUBTITLE_PARAM  32
+/**
+ * The option is inteded for exporting values to the caller.
+ */
+#define AV_OPT_FLAG_EXPORT          64
+/**
+ * The option may not be set through the AVOptions API, only read.
+ * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.
+ */
+#define AV_OPT_FLAG_READONLY        128
+#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
+//FIXME think about enc-audio, ... style flags
+
+    /**
+     * The logical unit to which the option belongs. Non-constant
+     * options and corresponding named constants share the same
+     * unit. May be NULL.
+     */
+    const char *unit;
+} AVOption;
+
+/**
+ * A single allowed range of values, or a single allowed value.
+ */
+typedef struct AVOptionRange {
+    const char *str;
+    /**
+     * Value range.
+     * For string ranges this represents the min/max length.
+     * For dimensions this represents the min/max pixel count or width/height in multi-component case.
+     */
+    double value_min, value_max;
+    /**
+     * Value's component range.
+     * For string this represents the unicode range for chars, 0-127 limits to ASCII.
+     */
+    double component_min, component_max;
+    /**
+     * Range flag.
+     * If set to 1 the struct encodes a range, if set to 0 a single value.
+     */
+    int is_range;
+} AVOptionRange;
+
+/**
+ * List of AVOptionRange structs.
+ */
+typedef struct AVOptionRanges {
+    /**
+     * Array of option ranges.
+     *
+     * Most of option types use just one component.
+     * Following describes multi-component option types:
+     *
+     * AV_OPT_TYPE_IMAGE_SIZE:
+     * component index 0: range of pixel count (width * height).
+     * component index 1: range of width.
+     * component index 2: range of height.
+     *
+     * @note To obtain multi-component version of this structure, user must
+     *       provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or
+     *       av_opt_query_ranges_default function.
+     *
+     * Multi-component range can be read as in following example:
+     *
+     * @code
+     * int range_index, component_index;
+     * AVOptionRanges *ranges;
+     * AVOptionRange *range[3]; //may require more than 3 in the future.
+     * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE);
+     * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) {
+     *     for (component_index = 0; component_index < ranges->nb_components; component_index++)
+     *         range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index];
+     *     //do something with range here.
+     * }
+     * av_opt_freep_ranges(&ranges);
+     * @endcode
+     */
+    AVOptionRange **range;
+    /**
+     * Number of ranges per component.
+     */
+    int nb_ranges;
+    /**
+     * Number of componentes.
+     */
+    int nb_components;
+} AVOptionRanges;
+
+
+#if FF_API_OLD_AVOPTIONS
+/**
+ * Set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an
+ * AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. If the field is not of a string
+ * type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param[out] o_out if non-NULL put here a pointer to the AVOption
+ * found
+ * @param alloc this parameter is currently ignored
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ * @deprecated use av_opt_set()
+ */
+attribute_deprecated
+int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out);
+
+attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n);
+attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n);
+attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n);
+
+double av_get_double(void *obj, const char *name, const AVOption **o_out);
+AVRational av_get_q(void *obj, const char *name, const AVOption **o_out);
+int64_t av_get_int(void *obj, const char *name, const AVOption **o_out);
+attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len);
+attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last);
+#endif
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+#if FF_API_OLD_AVOPTIONS
+attribute_deprecated
+void av_opt_set_defaults2(void *s, int mask, int flags);
+#endif
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param opts options string to parse, may be NULL
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_opt_set() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+                          const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Parse the key-value pairs list in opts. For each key=value pair found,
+ * set the value of the corresponding option in ctx.
+ *
+ * @param ctx          the AVClass object to set options on
+ * @param opts         the options string, key-value pairs separated by a
+ *                     delimiter
+ * @param shorthand    a NULL-terminated array of options names for shorthand
+ *                     notation: if the first field in opts has no key part,
+ *                     the key is taken from the first element of shorthand;
+ *                     then again for the second, etc., until either opts is
+ *                     finished, shorthand is finished or a named option is
+ *                     found; after that, all options must be named
+ * @param key_val_sep  a 0-terminated list of characters used to separate
+ *                     key from value, for example '='
+ * @param pairs_sep    a 0-terminated list of characters used to separate
+ *                     two pairs from each other, for example ':' or ','
+ * @return  the number of successfully set key=value pairs, or a negative
+ *          value corresponding to an AVERROR code in case of error:
+ *          AVERROR(EINVAL) if opts cannot be parsed,
+ *          the error code issued by av_set_string3() if a key/value pair
+ *          cannot be set
+ *
+ * Options names must use only the following characters: a-z A-Z 0-9 - . / _
+ * Separators must use characters distinct from option names and from each
+ * other.
+ */
+int av_opt_set_from_string(void *ctx, const char *opts,
+                           const char *const *shorthand,
+                           const char *key_val_sep, const char *pairs_sep);
+/**
+ * Free all allocated objects in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ *         isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ *                by a new one containing all options not found in obj.
+ *                Of course this new dictionary needs to be freed by caller
+ *                with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ *         but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ *                by a new one containing all options not found in obj.
+ *                Of course this new dictionary needs to be freed by caller
+ *                with av_dict_free().
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ *         but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags);
+
+/**
+ * Extract a key-value pair from the beginning of a string.
+ *
+ * @param ropts        pointer to the options string, will be updated to
+ *                     point to the rest of the string (one of the pairs_sep
+ *                     or the final NUL)
+ * @param key_val_sep  a 0-terminated list of characters used to separate
+ *                     key from value, for example '='
+ * @param pairs_sep    a 0-terminated list of characters used to separate
+ *                     two pairs from each other, for example ':' or ','
+ * @param flags        flags; see the AV_OPT_FLAG_* values below
+ * @param rkey         parsed key; must be freed using av_free()
+ * @param rval         parsed value; must be freed using av_free()
+ *
+ * @return  >=0 for success, or a negative value corresponding to an
+ *          AVERROR code in case of error; in particular:
+ *          AVERROR(EINVAL) if no key is present
+ *
+ */
+int av_opt_get_key_value(const char **ropts,
+                         const char *key_val_sep, const char *pairs_sep,
+                         unsigned flags,
+                         char **rkey, char **rval);
+
+enum {
+
+    /**
+     * Accept to parse a value without a key; the key will then be returned
+     * as NULL.
+     */
+    AV_OPT_FLAG_IMPLICIT_KEY = 1,
+};
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int        *flags_out);
+int av_opt_eval_int   (void *obj, const AVOption *o, const char *val, int        *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t    *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float      *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double     *double_out);
+int av_opt_eval_q     (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN   0x0001 /**< Search in possible children of the
+                                             given object first. */
+/**
+ *  The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ *  instead of a required pointer to a struct containing AVClass. This is
+ *  useful for searching for options without needing to allocate the corresponding
+ *  object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ   0x0002
+
+/**
+ *  Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than
+ *  one component for certain option types.
+ *  @see AVOptionRanges for details.
+ */
+#define AV_OPT_MULTI_COMPONENT_RANGE 0x1000
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ *                pointer to an AVClass.
+ *                Alternatively a double pointer to an AVClass, if
+ *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ *                 it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ *         was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_opt_set(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+                            int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ *                pointer to an AVClass.
+ *                Alternatively a double pointer to an AVClass, if
+ *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ *                 it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ *         was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+                             int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ *            AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ *             or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set         (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int     (void *obj, const char *name, int64_t     val, int search_flags);
+int av_opt_set_double  (void *obj, const char *name, double      val, int search_flags);
+int av_opt_set_q       (void *obj, const char *name, AVRational  val, int search_flags);
+int av_opt_set_bin     (void *obj, const char *name, const uint8_t *val, int size, int search_flags);
+int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags);
+int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);
+int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);
+int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);
+/**
+ * @note Any old dictionary present is discarded and replaced with a copy of the new one. The
+ * caller still owns val is and responsible for freeing it.
+ */
+int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags);
+
+/**
+ * Set a binary option to an integer list.
+ *
+ * @param obj    AVClass object to set options on
+ * @param name   name of the binary option
+ * @param val    pointer to an integer list (must have the correct type with
+ *               regard to the contents of the list)
+ * @param term   list terminator (usually 0 or -1)
+ * @param flags  search flags
+ */
+#define av_opt_set_int_list(obj, name, val, term, flags) \
+    (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \
+     AVERROR(EINVAL) : \
+     av_opt_set_bin(obj, name, (const uint8_t *)(val), \
+                    av_int_list_length(val, term) * sizeof(*(val)), flags))
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return >=0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller
+ */
+int av_opt_get         (void *obj, const char *name, int search_flags, uint8_t   **out_val);
+int av_opt_get_int     (void *obj, const char *name, int search_flags, int64_t    *out_val);
+int av_opt_get_double  (void *obj, const char *name, int search_flags, double     *out_val);
+int av_opt_get_q       (void *obj, const char *name, int search_flags, AVRational *out_val);
+int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out);
+int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);
+int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);
+int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);
+int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);
+/**
+ * @param[out] out_val The returned dictionary is a copy of the actual value and must
+ * be freed with av_dict_free() by the caller
+ */
+int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val);
+/**
+ * @}
+ */
+/**
+ * Gets a pointer to the requested field in a struct.
+ * This function allows accessing a struct even when its fields are moved or
+ * renamed since the application making the access has been compiled,
+ *
+ * @returns a pointer to the field, it can be cast to the correct type and read
+ *          or written to.
+ */
+void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name);
+
+/**
+ * Free an AVOptionRanges struct and set it to NULL.
+ */
+void av_opt_freep_ranges(AVOptionRanges **ranges);
+
+/**
+ * Get a list of allowed ranges for the given option.
+ *
+ * The returned list may depend on other fields in obj like for example profile.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ *              AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_freep_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Copy options from src object into dest object.
+ *
+ * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.
+ * Original memory allocated for such options is freed unless both src and dest options points to the same memory.
+ *
+ * @param dest Object to copy from
+ * @param src  Object to copy into
+ * @return 0 on success, negative on error
+ */
+int av_opt_copy(void *dest, void *src);
+
+/**
+ * Get a default list of allowed ranges for the given option.
+ *
+ * This list is constructed without using the AVClass.query_ranges() callback
+ * and can be used as fallback from within the callback.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ *              AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_free_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * Options o must belong to the obj. This function must not be called to check child's options state.
+ * @see av_opt_is_set_to_default_by_name().
+ *
+ * @param obj  AVClass object to check option on
+ * @param o    option to be checked
+ * @return     >0 when option is set to its default,
+ *              0 when option is not set its default,
+ *             <0 on error
+ */
+int av_opt_is_set_to_default(void *obj, const AVOption *o);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * @param obj          AVClass object to check option on
+ * @param name         option name
+ * @param search_flags combination of AV_OPT_SEARCH_*
+ * @return             >0 when option is set to its default,
+ *                     0 when option is not set its default,
+ *                     <0 on error
+ */
+int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags);
+
+
+#define AV_OPT_SERIALIZE_SKIP_DEFAULTS              0x00000001  ///< Serialize options that are not set to default values only.
+#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT            0x00000002  ///< Serialize options that exactly match opt_flags only.
+
+/**
+ * Serialize object's options.
+ *
+ * Create a string containing object's serialized options.
+ * Such string may be passed back to av_opt_set_from_string() in order to restore option values.
+ *
+ * @param[in]  obj           AVClass object to serialize
+ * @param[in]  opt_flags     serialize options with all the specified flags set (AV_OPT_FLAG)
+ * @param[in]  flags         combination of AV_OPT_SERIALIZE_* flags
+ * @param[out] buffer        Pointer to buffer that will be allocated with string containg serialized options.
+ *                           Buffer must be freed by the caller when is no longer needed.
+ * @param[in]  key_val_sep   character used to separate key from value
+ * @param[in]  pairs_sep     character used to separate two pairs from each other
+ * @return                   >= 0 on success, negative on error
+ */
+int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,
+                     const char key_val_sep, const char pairs_sep);
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/parseutils.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,187 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include <time.h>
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and store the parsed ratio in q.
+ *
+ * Note that a ratio with infinite (1/0) or negative value is
+ * considered valid, so you should check on the returned value if you
+ * want to exclude those values.
+ *
+ * The undefined value can be expressed using the "0:0" string.
+ *
+ * @param[in,out] q pointer to the AVRational which will contain the ratio
+ * @param[in] str the string to parse: it has to be a string in the format
+ * num:den, a float number or an expression
+ * @param[in] max the maximum allowed numerator and denominator
+ * @param[in] log_offset log level offset which is applied to the log
+ * level of log_ctx
+ * @param[in] log_ctx parent logging context
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_ratio(AVRational *q, const char *str, int max,
+                   int log_offset, void *log_ctx);
+
+#define av_parse_ratio_quiet(rate, str, max) \
+    av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL)
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+                   void *log_ctx);
+
+/**
+ * Get the name of a color from the internal table of hard-coded named
+ * colors.
+ *
+ * This function is meant to enumerate the color names recognized by
+ * av_parse_color().
+ *
+ * @param color_idx index of the requested color, starting from 0
+ * @param rgbp      if not NULL, will point to a 3-elements array with the color value in RGB
+ * @return the color name string or NULL if color_idx is not in the array
+ */
+const char *av_get_known_color_name(int color_idx, const uint8_t **rgb);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval.  If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date.  If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-][HH:]MM:SS[.m...]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Parse the input string p according to the format string fmt and
+ * store its results in the structure dt.
+ * This implementation supports only a subset of the formats supported
+ * by the standard strptime().
+ *
+ * In particular it actually supports the parameters:
+ * - %H: the hour as a decimal number, using a 24-hour clock, in the
+ * range '00' through '23'
+ * - %J: hours as a decimal number, in the range '0' through INT_MAX
+ * - %M: the minute as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - %S: the second as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - %Y: the year as a decimal number, using the Gregorian calendar
+ * - %m: the month as a decimal number, in the range '1' through '12'
+ * - %d: the day of the month as a decimal number, in the range '1'
+ * through '31'
+ * - %%: a literal '%'
+ *
+ * @return a pointer to the first character not processed in this
+ * function call, or NULL in case the function fails to match all of
+ * the fmt string and therefore an error occurred
+ */
+char *av_small_strptime(const char *p, const char *fmt, struct tm *dt);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/pca.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,35 @@
+/*
+ * principal component analysis (PCA)
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * principal component analysis (PCA)
+ */
+
+#ifndef AVUTIL_PCA_H
+#define AVUTIL_PCA_H
+
+struct PCA *ff_pca_init(int n);
+void ff_pca_free(struct PCA *pca);
+void ff_pca_add(struct PCA *pca, const double *v);
+int ff_pca(struct PCA *pca, double *eigenvector, double *eigenvalue);
+
+#endif /* AVUTIL_PCA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/pixdesc.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,170 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "avassert.h"
+#include "avstring.h"
+#include "common.h"
+#include "pixfmt.h"
+#include "pixdesc.h"
+#include "internal.h"
+#include "intreadwrite.h"
+#include "version.h"
+
+typedef struct {
+    enum AVPixelFormat pix_fmt;
+    AVPixFmtDescriptor desc;
+} AVPixFmtDescriptorEntry;
+
+
+static const AVPixFmtDescriptorEntry pix_desc[] = {
+#ifdef USE_VAR_BIT_DEPTH
+    {
+        AV_PIX_FMT_YUV420P16LE,
+        {
+            //.name = "yuv420p16le",
+            .nb_components = 3,
+            .log2_chroma_w = 1,
+            .log2_chroma_h = 1,
+            .comp = {
+                { 0, 1, 1, 0, 15 },        /* Y */
+                { 1, 1, 1, 0, 15 },        /* U */
+                { 2, 1, 1, 0, 15 },        /* V */
+            },
+        }
+    },
+    {
+        AV_PIX_FMT_YUV422P16LE,
+        {
+            //.name = "yuv422p16le",
+            .nb_components = 3,
+            .log2_chroma_w = 1,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 15 },        /* Y */
+                { 1, 1, 1, 0, 15 },        /* U */
+                { 2, 1, 1, 0, 15 },        /* V */
+            },
+            .flags = AV_PIX_FMT_FLAG_PLANAR,
+        }
+    },
+    {
+        AV_PIX_FMT_YUV444P16LE,
+        {
+            //.name = "yuv444p16le",
+            .nb_components = 3,
+            .log2_chroma_w = 0,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 15 },        /* Y */
+                { 1, 1, 1, 0, 15 },        /* U */
+                { 2, 1, 1, 0, 15 },        /* V */
+            },
+            .flags = AV_PIX_FMT_FLAG_PLANAR,
+        }
+    },
+    {
+        AV_PIX_FMT_GRAY16LE,
+        {
+            //.name = "gray16le",
+            .nb_components = 1,
+            .log2_chroma_w = 0,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 15 },       /* Y */
+            },
+            //.alias = "y16le",
+        },
+    },
+#else
+    {
+        AV_PIX_FMT_YUV420P,
+        {
+            //.name = "yuv420p",
+            .nb_components = 3,
+            .log2_chroma_w = 1,
+            .log2_chroma_h = 1,
+            .comp = {
+                { 0, 1, 1, 0, 7 },        /* Y */
+                { 1, 1, 1, 0, 7 },        /* U */
+                { 2, 1, 1, 0, 7 },        /* V */
+            },
+        }
+    },
+    {
+        AV_PIX_FMT_YUV422P,
+        {
+            //.name = "yuv422p",
+            .nb_components = 3,
+            .log2_chroma_w = 1,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 7 },        /* Y */
+                { 1, 1, 1, 0, 7 },        /* U */
+                { 2, 1, 1, 0, 7 },        /* V */
+            },
+            .flags = AV_PIX_FMT_FLAG_PLANAR,
+        }
+    },
+    {
+        AV_PIX_FMT_YUV444P,
+        {
+            //.name = "yuv444p",
+            .nb_components = 3,
+            .log2_chroma_w = 0,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 7 },        /* Y */
+                { 1, 1, 1, 0, 7 },        /* U */
+                { 2, 1, 1, 0, 7 },        /* V */
+            },
+            .flags = AV_PIX_FMT_FLAG_PLANAR,
+        }
+    },
+    {
+        AV_PIX_FMT_GRAY8,
+        {
+            //.name = "gray",
+            .nb_components = 1,
+            .log2_chroma_w = 0,
+            .log2_chroma_h = 0,
+            .comp = {
+                { 0, 1, 1, 0, 7 },       /* Y */
+            },
+        },
+    },
+#endif
+};
+
+#define countof(x) (sizeof(x) / sizeof(x[0]))
+
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
+{
+    int i;
+    for(i = 0; i < countof(pix_desc); i++) {
+        if (pix_desc[i].pix_fmt == pix_fmt) {
+            return &pix_desc[i].desc;
+        }
+    }
+    return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/pixdesc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,385 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include <inttypes.h>
+
+#include "attributes.h"
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor {
+    /**
+     * Which of the 4 planes contains the component.
+     */
+    uint16_t plane        : 2;
+
+    /**
+     * Number of elements between 2 horizontally consecutive pixels minus 1.
+     * Elements are bits for bitstream formats, bytes otherwise.
+     */
+    uint16_t step_minus1  : 3;
+
+    /**
+     * Number of elements before the component of the first pixel plus 1.
+     * Elements are bits for bitstream formats, bytes otherwise.
+     */
+    uint16_t offset_plus1 : 3;
+
+    /**
+     * Number of least significant bits that must be shifted away
+     * to get the value.
+     */
+    uint16_t shift        : 3;
+
+    /**
+     * Number of bits in the component minus 1.
+     */
+    uint16_t depth_minus1 : 4;
+} AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ *       and all the YUV variants) AVPixFmtDescriptor just stores how values
+ *       are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor {
+    const char *name;
+    uint8_t nb_components;  ///< The number of components each pixel has, (1-4)
+
+    /**
+     * Amount to shift the luma width right to find the chroma width.
+     * For YV12 this is 1 for example.
+     * chroma_width = -((-luma_width) >> log2_chroma_w)
+     * The note above is needed to ensure rounding up.
+     * This value only refers to the chroma components.
+     */
+    uint8_t log2_chroma_w;  ///< chroma_width = -((-luma_width )>>log2_chroma_w)
+
+    /**
+     * Amount to shift the luma height right to find the chroma height.
+     * For YV12 this is 1 for example.
+     * chroma_height= -((-luma_height) >> log2_chroma_h)
+     * The note above is needed to ensure rounding up.
+     * This value only refers to the chroma components.
+     */
+    uint8_t log2_chroma_h;
+    uint8_t flags;
+
+    /**
+     * Parameters that describe how pixels are packed.
+     * If the format has 2 or 4 components, then alpha is last.
+     * If the format has 1 or 2 components, then luma is 0.
+     * If the format has 3 or 4 components,
+     * if the RGB flag is set then 0 is red, 1 is green and 2 is blue;
+     * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.
+     */
+    AVComponentDescriptor comp[4];
+
+    /**
+     * Alternative comma-separated names.
+     */
+    const char *alias;
+} AVPixFmtDescriptor;
+
+/**
+ * Pixel format is big-endian.
+ */
+#define AV_PIX_FMT_FLAG_BE           (1 << 0)
+/**
+ * Pixel format has a palette in data[1], values are indexes in this palette.
+ */
+#define AV_PIX_FMT_FLAG_PAL          (1 << 1)
+/**
+ * All values of a component are bit-wise packed end to end.
+ */
+#define AV_PIX_FMT_FLAG_BITSTREAM    (1 << 2)
+/**
+ * Pixel format is an HW accelerated format.
+ */
+#define AV_PIX_FMT_FLAG_HWACCEL      (1 << 3)
+/**
+ * At least one pixel component is not in the first data plane.
+ */
+#define AV_PIX_FMT_FLAG_PLANAR       (1 << 4)
+/**
+ * The pixel format contains RGB-like data (as opposed to YUV/grayscale).
+ */
+#define AV_PIX_FMT_FLAG_RGB          (1 << 5)
+/**
+ * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as
+ * paletted internally, but the palette is generated by the decoder and is not
+ * stored in the file.
+ */
+#define AV_PIX_FMT_FLAG_PSEUDOPAL    (1 << 6)
+/**
+ * The pixel format has an alpha channel.
+ */
+#define AV_PIX_FMT_FLAG_ALPHA        (1 << 7)
+
+#if FF_API_PIX_FMT
+/**
+ * @deprecated use the AV_PIX_FMT_FLAG_* flags
+ */
+#define PIX_FMT_BE        AV_PIX_FMT_FLAG_BE
+#define PIX_FMT_PAL       AV_PIX_FMT_FLAG_PAL
+#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM
+#define PIX_FMT_HWACCEL   AV_PIX_FMT_FLAG_HWACCEL
+#define PIX_FMT_PLANAR    AV_PIX_FMT_FLAG_PLANAR
+#define PIX_FMT_RGB       AV_PIX_FMT_FLAG_RGB
+#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL
+#define PIX_FMT_ALPHA     AV_PIX_FMT_FLAG_ALPHA
+#endif
+
+#if FF_API_PIX_FMT_DESC
+/**
+ * The array of all the pixel format descriptors.
+ */
+extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[];
+#endif
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ */
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4],
+                        const int linesize[4], const AVPixFmtDescriptor *desc,
+                        int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ */
+void av_write_image_line(const uint16_t *src, uint8_t *data[4],
+                         const int linesize[4], const AVPixFmtDescriptor *desc,
+                         int x, int y, int c, int w);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE.
+ */
+enum AVPixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or a header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string(char *buf, int buf_size,
+                            enum AVPixelFormat pix_fmt);
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc. Note that this is not the same as the number
+ * of bits per sample.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Return the number of bits per pixel for the pixel format
+ * described by pixdesc, including any padding or unused bits.
+ */
+int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * @return a pixel format descriptor for provided pixel format or NULL if
+ * this pixel format is unknown.
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);
+
+/**
+ * Iterate over all pixel format descriptors known to libavutil.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);
+
+/**
+ * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc
+ * is not a valid pointer to a pixel format descriptor.
+ */
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * See av_get_chroma_sub_sample() for a function that asserts a
+ * valid pixel format instead of returning an error code.
+ * Its recommended that you use avcodec_get_chroma_sub_sample unless
+ * you do check the return code!
+ *
+ * @param[in]  pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w
+ * @param[out] v_shift store log2_chroma_h
+ *
+ * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format
+ */
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+                                     int *h_shift, int *v_shift);
+
+/**
+ * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a
+ * valid pixel format.
+ */
+int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);
+
+void ff_check_pixfmt_descriptors(void);
+
+/**
+ * Utility function to swap the endianness of a pixel format.
+ *
+ * @param[in]  pix_fmt the pixel format
+ *
+ * @return pixel format with swapped endianness if it exists,
+ * otherwise AV_PIX_FMT_NONE
+ */
+enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION  0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH       0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE  0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA       0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT  0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA      0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
+                        enum AVPixelFormat src_pix_fmt,
+                        int has_alpha);
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+                                             enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+/**
+ * @return the name for provided color range or NULL if unknown.
+ */
+const char *av_color_range_name(enum AVColorRange range);
+
+/**
+ * @return the name for provided color primaries or NULL if unknown.
+ */
+const char *av_color_primaries_name(enum AVColorPrimaries primaries);
+
+/**
+ * @return the name for provided color transfer or NULL if unknown.
+ */
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer);
+
+/**
+ * @return the name for provided color space or NULL if unknown.
+ */
+const char *av_color_space_name(enum AVColorSpace space);
+
+/**
+ * @return the name for provided chroma location or NULL if unknown.
+ */
+const char *av_chroma_location_name(enum AVChromaLocation location);
+
+#endif /* AVUTIL_PIXDESC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/pixelutils.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXELUTILS_H
+#define AVUTIL_PIXELUTILS_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include "common.h"
+
+/**
+ * Sum of abs(src1[x] - src2[x])
+ */
+typedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1,
+                                    const uint8_t *src2, ptrdiff_t stride2);
+
+/**
+ * Get a potentially optimized pointer to a Sum-of-absolute-differences
+ * function (see the av_pixelutils_sad_fn prototype).
+ *
+ * @param w_bits  1<<w_bits is the requested width of the block size
+ * @param h_bits  1<<h_bits is the requested height of the block size
+ * @param aligned If set to 2, the returned sad function will assume src1 and
+ *                src2 addresses are aligned on the block size.
+ *                If set to 1, the returned sad function will assume src1 is
+ *                aligned on the block size.
+ *                If set to 0, the returned sad function assume no particular
+ *                alignment.
+ * @param log_ctx context used for logging, can be NULL
+ *
+ * @return a pointer to the SAD function or NULL in case of error (because of
+ *         invalid parameters)
+ */
+av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits,
+                                              int aligned, void *log_ctx);
+
+#endif /* AVUTIL_PIXELUTILS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/pixfmt.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,521 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ *  (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ *
+ * @note
+ * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1
+ * and that all newly added little-endian formats have (pix_fmt & 1) == 0.
+ * This allows simpler detection of big vs little-endian.
+ */
+enum AVPixelFormat {
+    AV_PIX_FMT_NONE = -1,
+    AV_PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+    AV_PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+    AV_PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+    AV_PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+    AV_PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+    AV_PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+    AV_PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+    AV_PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+    AV_PIX_FMT_GRAY8,     ///<        Y        ,  8bpp
+    AV_PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+    AV_PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+    AV_PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette
+    AV_PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+    AV_PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+    AV_PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+    AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+    AV_PIX_FMT_XVMC_MPEG2_IDCT,
+#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT
+#endif /* FF_API_XVMC */
+    AV_PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+    AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+    AV_PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)
+    AV_PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+    AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)
+    AV_PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)
+    AV_PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+    AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)
+    AV_PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+    AV_PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped
+
+    AV_PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+    AV_PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+    AV_PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+    AV_PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+    AV_PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian
+    AV_PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian
+    AV_PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+    AV_PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+    AV_PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+    AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+    AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+    AV_PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+    AV_PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+    AV_PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian
+    AV_PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian
+    AV_PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+    AV_PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+    AV_PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian
+    AV_PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian
+    AV_PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+    AV_PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+    AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+    AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+    AV_PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+    AV_PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    AV_PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    AV_PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    AV_PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    AV_PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+    AV_PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+    AV_PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+    AV_PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+    AV_PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+    AV_PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+    AV_PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+    AV_PIX_FMT_YA8,       ///< 8bit gray, 8bit alpha
+
+    AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+    AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+    AV_PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+    AV_PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+    /**
+     * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
+     * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
+     * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+     */
+    AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA
+
+#ifdef AV_PIX_FMT_ABI_GIT_MASTER
+    AV_PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+    AV_PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp
+    AV_PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big-endian
+    AV_PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little-endian
+    AV_PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big-endian
+    AV_PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little-endian
+    AV_PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big-endian
+    AV_PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little-endian
+
+    /**
+     * duplicated pixel formats for compatibility with libav.
+     * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)
+     * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)
+     */
+    AV_PIX_FMT_YUVA422P_LIBAV,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+    AV_PIX_FMT_YUVA444P_LIBAV,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+
+    AV_PIX_FMT_YUVA420P9BE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+    AV_PIX_FMT_YUVA420P9LE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+    AV_PIX_FMT_YUVA422P9BE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+    AV_PIX_FMT_YUVA422P9LE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+    AV_PIX_FMT_YUVA444P9BE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+    AV_PIX_FMT_YUVA444P9LE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+    AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+    AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+    AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+    AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+    AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+    AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+    AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+
+    AV_PIX_FMT_VDPAU,     ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+
+    AV_PIX_FMT_XYZ12LE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+    AV_PIX_FMT_XYZ12BE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+    AV_PIX_FMT_NV16,         ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+    AV_PIX_FMT_NV20LE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_NV20BE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+
+    /**
+     * duplicated pixel formats for compatibility with libav.
+     * FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)
+     * also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028
+     * Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)
+     */
+    AV_PIX_FMT_RGBA64BE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_RGBA64LE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+    AV_PIX_FMT_BGRA64BE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_BGRA64LE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+
+    AV_PIX_FMT_YVYU422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+    AV_PIX_FMT_VDA,          ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
+
+    AV_PIX_FMT_YA16BE,       ///< 16bit gray, 16bit alpha (big-endian)
+    AV_PIX_FMT_YA16LE,       ///< 16bit gray, 16bit alpha (little-endian)
+
+
+#ifndef AV_PIX_FMT_ABI_GIT_MASTER
+    AV_PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+    AV_PIX_FMT_0RGB=0x123+4,      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
+    AV_PIX_FMT_RGB0,      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
+    AV_PIX_FMT_0BGR,      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
+    AV_PIX_FMT_BGR0,      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+    AV_PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+    AV_PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+
+    AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+    AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+    AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+    AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+    AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+    AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+    AV_PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big-endian
+    AV_PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little-endian
+    AV_PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big-endian
+    AV_PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little-endian
+    AV_PIX_FMT_GBRAP,       ///< planar GBRA 4:4:4:4 32bpp
+    AV_PIX_FMT_GBRAP16BE,   ///< planar GBRA 4:4:4:4 64bpp, big-endian
+    AV_PIX_FMT_GBRAP16LE,   ///< planar GBRA 4:4:4:4 64bpp, little-endian
+    AV_PIX_FMT_YUVJ411P,    ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range
+
+    AV_PIX_FMT_BAYER_BGGR8,    ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
+    AV_PIX_FMT_BAYER_RGGB8,    ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
+    AV_PIX_FMT_BAYER_GBRG8,    ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
+    AV_PIX_FMT_BAYER_GRBG8,    ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
+    AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
+    AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
+    AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
+    AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
+    AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
+    AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
+    AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
+    AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
+#if !FF_API_XVMC
+    AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+#endif /* !FF_API_XVMC */
+
+    AV_PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#if FF_API_PIX_FMT
+#include "old_pix_fmts.h"
+#endif
+};
+
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV
+#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV
+#define AV_PIX_FMT_RGBA64BE AV_PIX_FMT_RGBA64BE_LIBAV
+#define AV_PIX_FMT_RGBA64LE AV_PIX_FMT_RGBA64LE_LIBAV
+#define AV_PIX_FMT_BGRA64BE AV_PIX_FMT_BGRA64BE_LIBAV
+#define AV_PIX_FMT_BGRA64LE AV_PIX_FMT_BGRA64LE_LIBAV
+#endif
+
+
+#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A
+#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP
+
+#if AV_HAVE_BIGENDIAN
+#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32   AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32   AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32  AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32  AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16   AV_PIX_FMT_NE(YA16BE,   YA16LE)
+#define AV_PIX_FMT_RGB48  AV_PIX_FMT_NE(RGB48BE,  RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48  AV_PIX_FMT_NE(BGR48BE,  BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9  AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9  AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9  AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9     AV_PIX_FMT_NE(GBRP9BE ,    GBRP9LE)
+#define AV_PIX_FMT_GBRP10    AV_PIX_FMT_NE(GBRP10BE,    GBRP10LE)
+#define AV_PIX_FMT_GBRP12    AV_PIX_FMT_NE(GBRP12BE,    GBRP12LE)
+#define AV_PIX_FMT_GBRP14    AV_PIX_FMT_NE(GBRP14BE,    GBRP14LE)
+#define AV_PIX_FMT_GBRP16    AV_PIX_FMT_NE(GBRP16BE,    GBRP16LE)
+#define AV_PIX_FMT_GBRAP16   AV_PIX_FMT_NE(GBRAP16BE,   GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE,    BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE,    BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE,    BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE,    BAYER_GRBG16LE)
+
+
+#define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9  AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12      AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20       AV_PIX_FMT_NE(NV20BE,  NV20LE)
+
+
+#if FF_API_PIX_FMT
+#define PixelFormat AVPixelFormat
+
+#define PIX_FMT_Y400A AV_PIX_FMT_Y400A
+#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P
+
+#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)
+
+#define PIX_FMT_RGB32   AV_PIX_FMT_RGB32
+#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1
+#define PIX_FMT_BGR32   AV_PIX_FMT_BGR32
+#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1
+#define PIX_FMT_0RGB32  AV_PIX_FMT_0RGB32
+#define PIX_FMT_0BGR32  AV_PIX_FMT_0BGR32
+
+#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16
+#define PIX_FMT_RGB48  AV_PIX_FMT_RGB48
+#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565
+#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555
+#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444
+#define PIX_FMT_BGR48  AV_PIX_FMT_BGR48
+#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565
+#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555
+#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444
+
+#define PIX_FMT_YUV420P9  AV_PIX_FMT_YUV420P9
+#define PIX_FMT_YUV422P9  AV_PIX_FMT_YUV422P9
+#define PIX_FMT_YUV444P9  AV_PIX_FMT_YUV444P9
+#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10
+#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10
+#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10
+#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12
+#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12
+#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12
+#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14
+#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14
+#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14
+#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16
+#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16
+#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16
+
+#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64
+#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64
+#define PIX_FMT_GBRP9  AV_PIX_FMT_GBRP9
+#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10
+#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12
+#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14
+#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16
+#endif
+
+/**
+  * Chromaticity coordinates of the source primaries.
+  */
+enum AVColorPrimaries {
+    AVCOL_PRI_RESERVED0   = 0,
+    AVCOL_PRI_BT709       = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+    AVCOL_PRI_UNSPECIFIED = 2,
+    AVCOL_PRI_RESERVED    = 3,
+    AVCOL_PRI_BT470M      = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+    AVCOL_PRI_BT470BG     = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+    AVCOL_PRI_SMPTE170M   = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+    AVCOL_PRI_SMPTE240M   = 7, ///< functionally identical to above
+    AVCOL_PRI_FILM        = 8, ///< colour filters using Illuminant C
+    AVCOL_PRI_BT2020      = 9, ///< ITU-R BT2020
+    AVCOL_PRI_NB,              ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ */
+enum AVColorTransferCharacteristic {
+    AVCOL_TRC_RESERVED0    = 0,
+    AVCOL_TRC_BT709        = 1,  ///< also ITU-R BT1361
+    AVCOL_TRC_UNSPECIFIED  = 2,
+    AVCOL_TRC_RESERVED     = 3,
+    AVCOL_TRC_GAMMA22      = 4,  ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+    AVCOL_TRC_GAMMA28      = 5,  ///< also ITU-R BT470BG
+    AVCOL_TRC_SMPTE170M    = 6,  ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+    AVCOL_TRC_SMPTE240M    = 7,
+    AVCOL_TRC_LINEAR       = 8,  ///< "Linear transfer characteristics"
+    AVCOL_TRC_LOG          = 9,  ///< "Logarithmic transfer characteristic (100:1 range)"
+    AVCOL_TRC_LOG_SQRT     = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+    AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+    AVCOL_TRC_BT1361_ECG   = 12, ///< ITU-R BT1361 Extended Colour Gamut
+    AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+    AVCOL_TRC_BT2020_10    = 14, ///< ITU-R BT2020 for 10 bit system
+    AVCOL_TRC_BT2020_12    = 15, ///< ITU-R BT2020 for 12 bit system
+    AVCOL_TRC_NB,                ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ */
+enum AVColorSpace {
+    AVCOL_SPC_RGB         = 0,  ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
+    AVCOL_SPC_BT709       = 1,  ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+    AVCOL_SPC_UNSPECIFIED = 2,
+    AVCOL_SPC_RESERVED    = 3,
+    AVCOL_SPC_FCC         = 4,  ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+    AVCOL_SPC_BT470BG     = 5,  ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+    AVCOL_SPC_SMPTE170M   = 6,  ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+    AVCOL_SPC_SMPTE240M   = 7,
+    AVCOL_SPC_YCOCG       = 8,  ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+    AVCOL_SPC_BT2020_NCL  = 9,  ///< ITU-R BT2020 non-constant luminance system
+    AVCOL_SPC_BT2020_CL   = 10, ///< ITU-R BT2020 constant luminance system
+    AVCOL_SPC_NB,               ///< Not part of ABI
+};
+#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG
+
+
+/**
+ * MPEG vs JPEG YUV range.
+ */
+enum AVColorRange {
+    AVCOL_RANGE_UNSPECIFIED = 0,
+    AVCOL_RANGE_MPEG        = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+    AVCOL_RANGE_JPEG        = 2, ///< the normal     2^n-1   "JPEG" YUV ranges
+    AVCOL_RANGE_NB,              ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ *  X   X      3 4 X      X are luma samples,
+ *             1 2        1-6 are possible chroma positions
+ *  X   X      5 6 X      0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+    AVCHROMA_LOC_UNSPECIFIED = 0,
+    AVCHROMA_LOC_LEFT        = 1, ///< mpeg2/4, h264 default
+    AVCHROMA_LOC_CENTER      = 2, ///< mpeg1, jpeg, h263
+    AVCHROMA_LOC_TOPLEFT     = 3, ///< DV
+    AVCHROMA_LOC_TOP         = 4,
+    AVCHROMA_LOC_BOTTOMLEFT  = 5,
+    AVCHROMA_LOC_BOTTOM      = 6,
+    AVCHROMA_LOC_NB,              ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/qsort.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,117 @@
+/*
+ * copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "common.h"
+
+
+/**
+ * Quicksort
+ * This sort is fast, and fully inplace but not stable and it is possible
+ * to construct input that requires O(n^2) time but this is very unlikely to
+ * happen with non constructed input.
+ */
+#define AV_QSORT(p, num, type, cmp) {\
+    void *stack[64][2];\
+    int sp= 1;\
+    stack[0][0] = p;\
+    stack[0][1] = (p)+(num)-1;\
+    while(sp){\
+        type *start= stack[--sp][0];\
+        type *end  = stack[  sp][1];\
+        while(start < end){\
+            if(start < end-1) {\
+                int checksort=0;\
+                type *right = end-2;\
+                type *left  = start+1;\
+                type *mid = start + ((end-start)>>1);\
+                if(cmp(start, end) > 0) {\
+                    if(cmp(  end, mid) > 0) FFSWAP(type, *start, *mid);\
+                    else                    FFSWAP(type, *start, *end);\
+                }else{\
+                    if(cmp(start, mid) > 0) FFSWAP(type, *start, *mid);\
+                    else checksort= 1;\
+                }\
+                if(cmp(mid, end) > 0){ \
+                    FFSWAP(type, *mid, *end);\
+                    checksort=0;\
+                }\
+                if(start == end-2) break;\
+                FFSWAP(type, end[-1], *mid);\
+                while(left <= right){\
+                    while(left<=right && cmp(left, end-1) < 0)\
+                        left++;\
+                    while(left<=right && cmp(right, end-1) > 0)\
+                        right--;\
+                    if(left <= right){\
+                        FFSWAP(type, *left, *right);\
+                        left++;\
+                        right--;\
+                    }\
+                }\
+                FFSWAP(type, end[-1], *left);\
+                if(checksort && (mid == left-1 || mid == left)){\
+                    mid= start;\
+                    while(mid<end && cmp(mid, mid+1) <= 0)\
+                        mid++;\
+                    if(mid==end)\
+                        break;\
+                }\
+                if(end-left < left-start){\
+                    stack[sp  ][0]= start;\
+                    stack[sp++][1]= right;\
+                    start = left+1;\
+                }else{\
+                    stack[sp  ][0]= left+1;\
+                    stack[sp++][1]= end;\
+                    end = right;\
+                }\
+            }else{\
+                if(cmp(start, end) > 0)\
+                    FFSWAP(type, *start, *end);\
+                break;\
+            }\
+        }\
+    }\
+}
+
+/**
+ * Merge sort, this sort requires a temporary buffer and is stable, its worst
+ * case time is O(n log n)
+ * @param p     must be a lvalue pointer, this function may exchange it with tmp
+ * @param tmp   must be a lvalue pointer, this function may exchange it with p
+ */
+#define AV_MSORT(p, tmp, num, type, cmp) {\
+    unsigned i, j, step;\
+    for(step=1; step<(num); step+=step){\
+        for(i=0; i<(num); i+=2*step){\
+            unsigned a[2] = {i, i+step};\
+            unsigned end = FFMIN(i+2*step, (num));\
+            for(j=i; a[0]<i+step && a[1]<end; j++){\
+                int idx= cmp(p+a[0], p+a[1]) > 0;\
+                tmp[j] = p[ a[idx]++ ];\
+            }\
+            if(a[0]>=i+step) a[0] = a[1];\
+            for(; j<end; j++){\
+                tmp[j] = p[ a[0]++ ];\
+            }\
+        }\
+        FFSWAP(type*, p, tmp);\
+    }\
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/random_seed.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RANDOM_SEED_H
+#define AVUTIL_RANDOM_SEED_H
+
+#include <stdint.h>
+/**
+ * @addtogroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Get a seed to use in conjunction with random functions.
+ * This function tries to provide a good seed at a best effort bases.
+ * Its possible to call this function multiple times if more bits are needed.
+ * It can be quite slow, which is why it should only be used as seed for a faster
+ * PRNG. The quality of the seed depends on the platform.
+ */
+uint32_t av_get_random_seed(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RANDOM_SEED_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/rational.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,166 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+    int num; ///< numerator
+    int den; ///< denominator
+} AVRational;
+
+/**
+ * Create a rational.
+ * Useful for compilers that do not support compound literals.
+ * @note  The return value is not reduced.
+ */
+static inline AVRational av_make_q(int num, int den)
+{
+    AVRational r = { num, den };
+    return r;
+}
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the
+ * values is of the form 0/0
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+    const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+    if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;
+    else if(b.den && a.den) return 0;
+    else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+    else                    return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+    return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+    AVRational r = { q.den, q.num };
+    return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/rc4.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,50 @@
+/*
+ * RC4 encryption/decryption/pseudo-random number generator
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RC4_H
+#define AVUTIL_RC4_H
+
+#include <stdint.h>
+
+struct AVRC4 {
+    uint8_t state[256];
+    int x, y;
+};
+
+/**
+ * @brief Initializes an AVRC4 context.
+ *
+ * @param key_bits must be a multiple of 8
+ * @param decrypt 0 for encryption, 1 for decryption, currently has no effect
+ */
+int av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * @brief Encrypts / decrypts using the RC4 algorithm.
+ *
+ * @param count number of bytes
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst, may be NULL
+ * @param iv not (yet) used for RC4, should be NULL
+ * @param decrypt 0 for encryption, 1 for decryption, not (yet) used
+ */
+void av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+#endif /* AVUTIL_RC4_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/replaygain.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,51 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_REPLAYGAIN_H
+#define AVUTIL_REPLAYGAIN_H
+
+#include <stdint.h>
+
+/**
+ * ReplayGain information (see
+ * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification).
+ * The size of this struct is a part of the public ABI.
+ */
+typedef struct AVReplayGain {
+    /**
+     * Track replay gain in microbels (divide by 100000 to get the value in dB).
+     * Should be set to INT32_MIN when unknown.
+     */
+    int32_t track_gain;
+    /**
+     * Peak track amplitude, with 100000 representing full scale (but values
+     * may overflow). 0 when unknown.
+     */
+    uint32_t track_peak;
+    /**
+     * Same as track_gain, but for the whole album.
+     */
+    int32_t album_gain;
+    /**
+     * Same as track_peak, but for the whole album,
+     */
+    uint32_t album_peak;
+} AVReplayGain;
+
+#endif /* AVUTIL_REPLAYGAIN_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/ripemd.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2013 James Almer <jamrial@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RIPEMD_H
+#define AVUTIL_RIPEMD_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_ripemd RIPEMD
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_ripemd_size;
+
+struct AVRIPEMD;
+
+/**
+ * Allocate an AVRIPEMD context.
+ */
+struct AVRIPEMD *av_ripemd_alloc(void);
+
+/**
+ * Initialize RIPEMD hashing.
+ *
+ * @param context pointer to the function context (of size av_ripemd_size)
+ * @param bits    number of bits in digest (128, 160, 256 or 320 bits)
+ * @return        zero if initialization succeeded, -1 otherwise
+ */
+int av_ripemd_init(struct AVRIPEMD* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data    input data to update hash with
+ * @param len     input data length
+ */
+void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest  buffer where output digest value is stored
+ */
+void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RIPEMD_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/samplefmt.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,271 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ *
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ *   Sample values can be expressed by native C types, hence the lack of a signed
+ *   24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ *   [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ *   (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+    AV_SAMPLE_FMT_NONE = -1,
+    AV_SAMPLE_FMT_U8,          ///< unsigned 8 bits
+    AV_SAMPLE_FMT_S16,         ///< signed 16 bits
+    AV_SAMPLE_FMT_S32,         ///< signed 32 bits
+    AV_SAMPLE_FMT_FLT,         ///< float
+    AV_SAMPLE_FMT_DBL,         ///< double
+
+    AV_SAMPLE_FMT_U8P,         ///< unsigned 8 bits, planar
+    AV_SAMPLE_FMT_S16P,        ///< signed 16 bits, planar
+    AV_SAMPLE_FMT_S32P,        ///< signed 32 bits, planar
+    AV_SAMPLE_FMT_FLTP,        ///< float, planar
+    AV_SAMPLE_FMT_DBLP,        ///< double, planar
+
+    AV_SAMPLE_FMT_NB           ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return  the packed alternative form of the given sample format or
+            AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return  the planar alternative form of the given sample format or
+            AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels   the number of channels
+ * @param nb_samples    the number of samples in a single channel
+ * @param sample_fmt    the sample format
+ * @param align         buffer size alignment (0 = default, 1 = no alignment)
+ * @return              required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+                               enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data  array to be filled with the pointer for each channel
+ * @param[out] linesize    calculated linesize, may be NULL
+ * @param buf              the pointer to a buffer containing the samples
+ * @param nb_channels      the number of channels
+ * @param nb_samples       the number of samples in a single channel
+ * @param sample_fmt       the sample format
+ * @param align            buffer size alignment (0 = default, 1 = no alignment)
+ * @return                 >=0 on success or a negative error code on failure
+ * @todo return minimum size in bytes required for the buffer in case
+ * of success at the next bump
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+                           const uint8_t *buf,
+                           int nb_channels, int nb_samples,
+                           enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data  array to be filled with the pointer for each channel
+ * @param[out] linesize    aligned size for audio buffer(s), may be NULL
+ * @param nb_channels      number of audio channels
+ * @param nb_samples       number of samples per channel
+ * @param align            buffer size alignment (0 = default, 1 = no alignment)
+ * @return                 >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+                     int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
+                                       int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+                    int src_offset, int nb_samples, int nb_channels,
+                    enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data  array of pointers to data planes
+ * @param offset      offset in samples at which to start filling
+ * @param nb_samples  number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt  audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+                           int nb_channels, enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/sha.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA_H
+#define AVUTIL_SHA_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha SHA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha_size;
+
+struct AVSHA;
+
+/**
+ * Allocate an AVSHA context.
+ */
+struct AVSHA *av_sha_alloc(void);
+
+/**
+ * Initialize SHA-1 or SHA-2 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha_size)
+ * @param bits    number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)
+ * @return        zero if initialization succeeded, -1 otherwise
+ */
+int av_sha_init(struct AVSHA* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data    input data to update hash with
+ * @param len     input data length
+ */
+void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest  buffer where output digest value is stored
+ */
+void av_sha_final(struct AVSHA* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/sha512.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2013 James Almer <jamrial@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA512_H
+#define AVUTIL_SHA512_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha512 SHA512
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha512_size;
+
+struct AVSHA512;
+
+/**
+ * Allocate an AVSHA512 context.
+ */
+struct AVSHA512 *av_sha512_alloc(void);
+
+/**
+ * Initialize SHA-2 512 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha512_size)
+ * @param bits    number of bits in digest (224, 256, 384 or 512 bits)
+ * @return        zero if initialization succeeded, -1 otherwise
+ */
+int av_sha512_init(struct AVSHA512* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data    input data to update hash with
+ * @param len     input data length
+ */
+void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest  buffer where output digest value is stored
+ */
+void av_sha512_final(struct AVSHA512* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA512_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/softfloat.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SOFTFLOAT_H
+#define AVUTIL_SOFTFLOAT_H
+
+#include <stdint.h>
+#include "common.h"
+
+#include "avassert.h"
+
+#define MIN_EXP -126
+#define MAX_EXP  126
+#define ONE_BITS 29
+
+typedef struct SoftFloat{
+    int32_t  exp;
+    int32_t mant;
+}SoftFloat;
+
+static av_const SoftFloat av_normalize_sf(SoftFloat a){
+    if(a.mant){
+#if 1
+        while((a.mant + 0x20000000U)<0x40000000U){
+            a.mant += a.mant;
+            a.exp  -= 1;
+        }
+#else
+        int s=ONE_BITS + 1 - av_log2(a.mant ^ (a.mant<<1));
+        a.exp   -= s;
+        a.mant <<= s;
+#endif
+        if(a.exp < MIN_EXP){
+            a.exp = MIN_EXP;
+            a.mant= 0;
+        }
+    }else{
+        a.exp= MIN_EXP;
+    }
+    return a;
+}
+
+static inline av_const SoftFloat av_normalize1_sf(SoftFloat a){
+#if 1
+    if((int32_t)(a.mant + 0x40000000U) < 0){
+        a.exp++;
+        a.mant>>=1;
+    }
+    av_assert2(a.mant < 0x40000000 && a.mant > -0x40000000);
+    return a;
+#elif 1
+    int t= a.mant + 0x40000000 < 0;
+    return (SoftFloat){a.exp+t, a.mant>>t};
+#else
+    int t= (a.mant + 0x40000000U)>>31;
+    return (SoftFloat){a.exp+t, a.mant>>t};
+#endif
+}
+
+/**
+ * @return Will not be more denormalized than a+b. So if either input is
+ *         normalized, then the output will not be worse then the other input.
+ *         If both are normalized, then the output will be normalized.
+ */
+static inline av_const SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){
+    a.exp += b.exp;
+    av_assert2((int32_t)((a.mant * (int64_t)b.mant) >> ONE_BITS) == (a.mant * (int64_t)b.mant) >> ONE_BITS);
+    a.mant = (a.mant * (int64_t)b.mant) >> ONE_BITS;
+    return av_normalize1_sf(a);
+}
+
+/**
+ * b has to be normalized and not zero.
+ * @return Will not be more denormalized than a.
+ */
+static av_const SoftFloat av_div_sf(SoftFloat a, SoftFloat b){
+    a.exp -= b.exp+1;
+    a.mant = ((int64_t)a.mant<<(ONE_BITS+1)) / b.mant;
+    return av_normalize1_sf(a);
+}
+
+static inline av_const int av_cmp_sf(SoftFloat a, SoftFloat b){
+    int t= a.exp - b.exp;
+    if(t<0) return (a.mant >> (-t)) -  b.mant      ;
+    else    return  a.mant          - (b.mant >> t);
+}
+
+static inline av_const SoftFloat av_add_sf(SoftFloat a, SoftFloat b){
+    int t= a.exp - b.exp;
+    if      (t <-31) return b;
+    else if (t <  0) return av_normalize1_sf((SoftFloat){b.exp, b.mant + (a.mant >> (-t))});
+    else if (t < 32) return av_normalize1_sf((SoftFloat){a.exp, a.mant + (b.mant >>   t )});
+    else             return a;
+}
+
+static inline av_const SoftFloat av_sub_sf(SoftFloat a, SoftFloat b){
+    return av_add_sf(a, (SoftFloat){b.exp, -b.mant});
+}
+
+//FIXME sqrt, log, exp, pow, sin, cos
+
+static inline av_const SoftFloat av_int2sf(int v, int frac_bits){
+    return av_normalize_sf((SoftFloat){ONE_BITS-frac_bits, v});
+}
+
+/**
+ * Rounding is to -inf.
+ */
+static inline av_const int av_sf2int(SoftFloat v, int frac_bits){
+    v.exp += frac_bits - ONE_BITS;
+    if(v.exp >= 0) return v.mant <<  v.exp ;
+    else           return v.mant >>(-v.exp);
+}
+
+#endif /* AVUTIL_SOFTFLOAT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/stereo3d.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_STEREO3D_H
+#define AVUTIL_STEREO3D_H
+
+#include <stdint.h>
+
+#include "frame.h"
+
+/**
+ * List of possible 3D Types
+ */
+enum AVStereo3DType {
+    /**
+     * Video is not stereoscopic (and metadata has to be there).
+     */
+    AV_STEREO3D_2D,
+
+    /**
+     * Views are next to each other.
+     *
+     *    LLLLRRRR
+     *    LLLLRRRR
+     *    LLLLRRRR
+     *    ...
+     */
+    AV_STEREO3D_SIDEBYSIDE,
+
+    /**
+     * Views are on top of each other.
+     *
+     *    LLLLLLLL
+     *    LLLLLLLL
+     *    RRRRRRRR
+     *    RRRRRRRR
+     */
+    AV_STEREO3D_TOPBOTTOM,
+
+    /**
+     * Views are alternated temporally.
+     *
+     *     frame0   frame1   frame2   ...
+     *    LLLLLLLL RRRRRRRR LLLLLLLL
+     *    LLLLLLLL RRRRRRRR LLLLLLLL
+     *    LLLLLLLL RRRRRRRR LLLLLLLL
+     *    ...      ...      ...
+     */
+    AV_STEREO3D_FRAMESEQUENCE,
+
+    /**
+     * Views are packed in a checkerboard-like structure per pixel.
+     *
+     *    LRLRLRLR
+     *    RLRLRLRL
+     *    LRLRLRLR
+     *    ...
+     */
+    AV_STEREO3D_CHECKERBOARD,
+
+    /**
+     * Views are next to each other, but when upscaling
+     * apply a checkerboard pattern.
+     *
+     *     LLLLRRRR          L L L L    R R R R
+     *     LLLLRRRR    =>     L L L L  R R R R
+     *     LLLLRRRR          L L L L    R R R R
+     *     LLLLRRRR           L L L L  R R R R
+     */
+    AV_STEREO3D_SIDEBYSIDE_QUINCUNX,
+
+    /**
+     * Views are packed per line, as if interlaced.
+     *
+     *    LLLLLLLL
+     *    RRRRRRRR
+     *    LLLLLLLL
+     *    ...
+     */
+    AV_STEREO3D_LINES,
+
+    /**
+     * Views are packed per column.
+     *
+     *    LRLRLRLR
+     *    LRLRLRLR
+     *    LRLRLRLR
+     *    ...
+     */
+    AV_STEREO3D_COLUMNS,
+};
+
+
+/**
+ * Inverted views, Right/Bottom represents the left view.
+ */
+#define AV_STEREO3D_FLAG_INVERT     (1 << 0)
+
+/**
+ * Stereo 3D type: this structure describes how two videos are packed
+ * within a single video surface, with additional information as needed.
+ *
+ * @note The struct must be allocated with av_stereo3d_alloc() and
+ *       its size is not a part of the public ABI.
+ */
+typedef struct AVStereo3D {
+    /**
+     * How views are packed within the video.
+     */
+    enum AVStereo3DType type;
+
+    /**
+     * Additional information about the frame packing.
+     */
+    int flags;
+} AVStereo3D;
+
+/**
+ * Allocate an AVStereo3D structure and set its fields to default values.
+ * The resulting struct can be freed using av_freep().
+ *
+ * @return An AVStereo3D filled with default values or NULL on failure.
+ */
+AVStereo3D *av_stereo3d_alloc(void);
+
+/**
+ * Allocate a complete AVFrameSideData and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVStereo3D structure to be filled by caller.
+ */
+AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);
+
+#endif /* AVUTIL_STEREO3D_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/threadmessage.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_THREADMESSAGE_H
+#define AVUTIL_THREADMESSAGE_H
+
+typedef struct AVThreadMessageQueue AVThreadMessageQueue;
+
+typedef enum AVThreadMessageFlags {
+
+    /**
+     * Perform non-blocking operation.
+     * If this flag is set, send and recv operations are non-blocking and
+     * return AVERROR(EAGAIN) immediately if they can not proceed.
+     */
+    AV_THREAD_MESSAGE_NONBLOCK = 1,
+
+} AVThreadMessageFlags;
+
+/**
+ * Allocate a new message queue.
+ *
+ * @param mq      pointer to the message queue
+ * @param nelem   maximum number of elements in the queue
+ * @param elsize  size of each element in the queue
+ * @return  >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if
+ *          lavu was built without thread support
+ */
+int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
+                                  unsigned nelem,
+                                  unsigned elsize);
+
+/**
+ * Free a message queue.
+ *
+ * The message queue must no longer be in use by another thread.
+ */
+void av_thread_message_queue_free(AVThreadMessageQueue **mq);
+
+/**
+ * Send a message on the queue.
+ */
+int av_thread_message_queue_send(AVThreadMessageQueue *mq,
+                                 void *msg,
+                                 unsigned flags);
+
+/**
+ * Receive a message from the queue.
+ */
+int av_thread_message_queue_recv(AVThreadMessageQueue *mq,
+                                 void *msg,
+                                 unsigned flags);
+
+/**
+ * Set the sending error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_recv() will
+ * return it immediately when there are no longer available messages.
+ * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used
+ * to cause the receiving thread to stop or suspend its operation.
+ */
+void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
+                                          int err);
+
+/**
+ * Set the receiving error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_send() will
+ * return it immediately. Conventional values, such as AVERROR_EOF or
+ * AVERROR(EAGAIN), can be used to cause the sending thread to stop or
+ * suspend its operation.
+ */
+void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
+                                          int err);
+
+#endif /* AVUTIL_THREADMESSAGE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/time.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include <stdint.h>
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Get the current time in microseconds since some unspecified starting point.
+ * On platforms that support it, the time comes from a monotonic clock
+ * This property makes this time source ideal for measuring relative time.
+ * The returned values may not be monotonic on platforms where a monotonic
+ * clock is not available.
+ */
+int64_t av_gettime_relative(void);
+
+/**
+ * Indicates with a boolean result if the av_gettime_relative() time source
+ * is monotonic.
+ */
+int av_gettime_relative_is_monotonic(void);
+
+/**
+ * Sleep for a period of time.  Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param  usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/time_internal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,47 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_INTERNAL_H
+#define AVUTIL_TIME_INTERNAL_H
+
+#include <time.h>
+#include "config.h"
+
+#if !HAVE_GMTIME_R && !defined(gmtime_r)
+static inline struct tm *gmtime_r(const time_t* clock, struct tm *result)
+{
+    struct tm *ptr = gmtime(clock);
+    if (!ptr)
+        return NULL;
+    *result = *ptr;
+    return result;
+}
+#endif
+
+#if !HAVE_LOCALTIME_R && !defined(localtime_r)
+static inline struct tm *localtime_r(const time_t* clock, struct tm *result)
+{
+    struct tm *ptr = localtime(clock);
+    if (!ptr)
+        return NULL;
+    *result = *ptr;
+    return result;
+}
+#endif
+
+#endif /* AVUTIL_TIME_INTERNAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/timecode.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Timecode helpers header
+ */
+
+#ifndef AVUTIL_TIMECODE_H
+#define AVUTIL_TIMECODE_H
+
+#include <stdint.h>
+#include "rational.h"
+
+#define AV_TIMECODE_STR_SIZE 16
+
+enum AVTimecodeFlag {
+    AV_TIMECODE_FLAG_DROPFRAME      = 1<<0, ///< timecode is drop frame
+    AV_TIMECODE_FLAG_24HOURSMAX     = 1<<1, ///< timecode wraps after 24 hours
+    AV_TIMECODE_FLAG_ALLOWNEGATIVE  = 1<<2, ///< negative time values are allowed
+};
+
+typedef struct {
+    int start;          ///< timecode frame start (first base frame number)
+    uint32_t flags;     ///< flags such as drop frame, +24 hours support, ...
+    AVRational rate;    ///< frame rate in rational form
+    unsigned fps;       ///< frame per second; must be consistent with the rate field
+} AVTimecode;
+
+/**
+ * Adjust frame number for NTSC drop frame time code.
+ *
+ * @param framenum frame number to adjust
+ * @param fps      frame per second, 30 or 60
+ * @return         adjusted frame number
+ * @warning        adjustment is only valid in NTSC 29.97 and 59.94
+ */
+int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
+
+/**
+ * Convert frame number to SMPTE 12M binary representation.
+ *
+ * @param tc       timecode data correctly initialized
+ * @param framenum frame number
+ * @return         the SMPTE binary representation
+ *
+ * @note Frame number adjustment is automatically done in case of drop timecode,
+ *       you do NOT have to call av_timecode_adjust_ntsc_framenum2().
+ * @note The frame number is relative to tc->start.
+ * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity
+ *       correction (PC) bits are set to zero.
+ */
+uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);
+
+/**
+ * Load timecode string in buf.
+ *
+ * @param buf      destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc       timecode data correctly initialized
+ * @param framenum frame number
+ * @return         the buf parameter
+ *
+ * @note Timecode representation can be a negative timecode and have more than
+ *       24 hours, but will only be honored if the flags are correctly set.
+ * @note The frame number is relative to tc->start.
+ */
+char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
+ * @param buf        destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tcsmpte    the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ *                   is arbitrary
+ * @return           the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);
+
+/**
+ * Get the timecode string from the 25-bit timecode format (MPEG GOP format).
+ *
+ * @param buf     destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc25bit the 25-bits timecode
+ * @return        the buf parameter
+ */
+char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);
+
+/**
+ * Init a timecode struct with the passed parameters.
+ *
+ * @param log_ctx     a pointer to an arbitrary struct of which the first field
+ *                    is a pointer to an AVClass struct (used for av_log)
+ * @param tc          pointer to an allocated AVTimecode
+ * @param rate        frame rate in rational form
+ * @param flags       miscellaneous flags such as drop frame, +24 hours, ...
+ *                    (see AVTimecodeFlag)
+ * @param frame_start the first frame number
+ * @return            0 on success, AVERROR otherwise
+ */
+int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);
+
+/**
+ * Parse timecode representation (hh:mm:ss[:;.]ff).
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field is a
+ *                pointer to an AVClass struct (used for av_log).
+ * @param tc      pointer to an allocated AVTimecode
+ * @param rate    frame rate in rational form
+ * @param str     timecode string which will determine the frame start
+ * @return        0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);
+
+/**
+ * Check if the timecode feature is available for the given frame rate
+ *
+ * @return 0 if supported, <0 otherwise
+ */
+int av_timecode_check_frame_rate(AVRational rate);
+
+#endif /* AVUTIL_TIMECODE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/timer.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,90 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * high precision timer, useful to profile code
+ */
+
+#ifndef AVUTIL_TIMER_H
+#define AVUTIL_TIMER_H
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "config.h"
+
+#if HAVE_MACH_MACH_TIME_H
+#include <mach/mach_time.h>
+#endif
+
+#include "log.h"
+
+#if   ARCH_ARM
+#   include "arm/timer.h"
+#elif ARCH_PPC
+#   include "ppc/timer.h"
+#elif ARCH_X86
+#   include "x86/timer.h"
+#endif
+
+#if !defined(AV_READ_TIME)
+#   if HAVE_GETHRTIME
+#       define AV_READ_TIME gethrtime
+#   elif HAVE_MACH_ABSOLUTE_TIME
+#       define AV_READ_TIME mach_absolute_time
+#   endif
+#endif
+
+#ifndef FF_TIMER_UNITS
+#   define FF_TIMER_UNITS "UNITS"
+#endif
+
+#ifdef AV_READ_TIME
+#define START_TIMER                             \
+    uint64_t tend;                              \
+    uint64_t tstart = AV_READ_TIME();           \
+
+#define STOP_TIMER(id)                                                    \
+    tend = AV_READ_TIME();                                                \
+    {                                                                     \
+        static uint64_t tsum   = 0;                                       \
+        static int tcount      = 0;                                       \
+        static int tskip_count = 0;                                       \
+        if (tcount < 2                        ||                          \
+            tend - tstart < 8 * tsum / tcount ||                          \
+            tend - tstart < 2000) {                                       \
+            tsum+= tend - tstart;                                         \
+            tcount++;                                                     \
+        } else                                                            \
+            tskip_count++;                                                \
+        if (((tcount + tskip_count) & (tcount + tskip_count - 1)) == 0) { \
+            av_log(NULL, AV_LOG_ERROR,                                    \
+                   "%"PRIu64" " FF_TIMER_UNITS " in %s, %d runs, %d skips\n",          \
+                   tsum * 10 / tcount, id, tcount, tskip_count);          \
+        }                                                                 \
+    }
+#else
+#define START_TIMER
+#define STOP_TIMER(id) { }
+#endif
+
+#endif /* AVUTIL_TIMER_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/timestamp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,78 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * timestamp utils, mostly useful for debugging/logging purposes
+ */
+
+#ifndef AVUTIL_TIMESTAMP_H
+#define AVUTIL_TIMESTAMP_H
+
+#include "common.h"
+
+#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64)
+#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS
+#endif
+
+#define AV_TS_MAX_STRING_SIZE 32
+
+/**
+ * Fill the provided buffer with a string containing a timestamp
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_string(char *buf, int64_t ts)
+{
+    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts);
+    return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)
+
+/**
+ * Fill the provided buffer with a string containing a timestamp time
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @param tb the timebase of the timestamp
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)
+{
+    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts);
+    return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)
+
+#endif /* AVUTIL_TIMESTAMP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/tree.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,132 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * A tree container.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_TREE_H
+#define AVUTIL_TREE_H
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @addtogroup lavu_tree AVTree
+ * @ingroup lavu_data
+ *
+ * Low-complexity tree container
+ *
+ * Insertion, removal, finding equal, largest which is smaller than and
+ * smallest which is larger than, all have O(log n) worst-case complexity.
+ * @{
+ */
+
+
+struct AVTreeNode;
+extern const int av_tree_node_size;
+
+/**
+ * Allocate an AVTreeNode.
+ */
+struct AVTreeNode *av_tree_node_alloc(void);
+
+/**
+ * Find an element.
+ * @param root a pointer to the root node of the tree
+ * @param next If next is not NULL, then next[0] will contain the previous
+ *             element and next[1] the next element. If either does not exist,
+ *             then the corresponding entry in next is unchanged.
+ * @return An element with cmp(key, elem) == 0 or NULL if no such element
+ *         exists in the tree.
+ */
+void *av_tree_find(const struct AVTreeNode *root, void *key,
+                   int (*cmp)(void *key, const void *b), void *next[2]);
+
+/**
+ * Insert or remove an element.
+ *
+ * If *next is NULL, then the supplied element will be removed if it exists.
+ * If *next is non-NULL, then the supplied element will be inserted, unless
+ * it already exists in the tree.
+ *
+ * @param rootp A pointer to a pointer to the root node of the tree; note that
+ *              the root node can change during insertions, this is required
+ *              to keep the tree balanced.
+ * @param key  pointer to the element key to insert in the tree
+ * @param next Used to allocate and free AVTreeNodes. For insertion the user
+ *             must set it to an allocated and zeroed object of at least
+ *             av_tree_node_size bytes size. av_tree_insert() will set it to
+ *             NULL if it has been consumed.
+ *             For deleting elements *next is set to NULL by the user and
+ *             av_tree_insert() will set it to the AVTreeNode which was
+ *             used for the removed element.
+ *             This allows the use of flat arrays, which have
+ *             lower overhead compared to many malloced elements.
+ *             You might want to define a function like:
+ *             @code
+ *             void *tree_insert(struct AVTreeNode **rootp, void *key,
+ *                               int (*cmp)(void *key, const void *b),
+ *                               AVTreeNode **next)
+ *             {
+ *                 if (!*next)
+ *                     *next = av_mallocz(av_tree_node_size);
+ *                 return av_tree_insert(rootp, key, cmp, next);
+ *             }
+ *             void *tree_remove(struct AVTreeNode **rootp, void *key,
+ *                               int (*cmp)(void *key, const void *b, AVTreeNode **next))
+ *             {
+ *                 av_freep(next);
+ *                 return av_tree_insert(rootp, key, cmp, next);
+ *             }
+ *             @endcode
+ * @param cmp compare function used to compare elements in the tree
+ * @return If no insertion happened, the found element; if an insertion or
+ *         removal happened, then either key or NULL will be returned.
+ *         Which one it is depends on the tree state and the implementation. You
+ *         should make no assumptions that it's one or the other in the code.
+ */
+void *av_tree_insert(struct AVTreeNode **rootp, void *key,
+                     int (*cmp)(void *key, const void *b),
+                     struct AVTreeNode **next);
+
+void av_tree_destroy(struct AVTreeNode *t);
+
+/**
+ * Apply enu(opaque, &elem) to all the elements in the tree in a given range.
+ *
+ * @param cmp a comparison function that returns < 0 for a element below the
+ *            range, > 0 for a element above the range and == 0 for a
+ *            element inside the range
+ *
+ * @note The cmp function should use the same ordering used to construct the
+ *       tree.
+ */
+void av_tree_enumerate(struct AVTreeNode *t, void *opaque,
+                       int (*cmp)(void *opaque, void *elem),
+                       int (*enu)(void *opaque, void *elem));
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_TREE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/version.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,137 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @addtogroup version_utils
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR  54
+#define LIBAVUTIL_VERSION_MINOR  13
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+                                               LIBAVUTIL_VERSION_MINOR, \
+                                               LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION       AV_VERSION(LIBAVUTIL_VERSION_MAJOR,     \
+                                           LIBAVUTIL_VERSION_MINOR,     \
+                                           LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD         LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT         "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @{
+ */
+
+#ifndef FF_API_OLD_AVOPTIONS
+#define FF_API_OLD_AVOPTIONS            (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_PIX_FMT
+#define FF_API_PIX_FMT                  (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_CONTEXT_SIZE
+#define FF_API_CONTEXT_SIZE             (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_PIX_FMT_DESC
+#define FF_API_PIX_FMT_DESC             (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AV_REVERSE
+#define FF_API_AV_REVERSE               (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AUDIOCONVERT
+#define FF_API_AUDIOCONVERT             (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_CPU_FLAG_MMX2
+#define FF_API_CPU_FLAG_MMX2            (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_LLS_PRIVATE
+#define FF_API_LLS_PRIVATE              (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AVFRAME_LAVC
+#define FF_API_AVFRAME_LAVC             (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_VDPAU
+#define FF_API_VDPAU                    (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT
+#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC                     (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_OPT_TYPE_METADATA
+#define FF_API_OPT_TYPE_METADATA        (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+
+
+#ifndef FF_CONST_AVUTIL53
+#if LIBAVUTIL_VERSION_MAJOR >= 53
+#define FF_CONST_AVUTIL53 const
+#else
+#define FF_CONST_AVUTIL53
+#endif
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/x86_cpu.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1 @@
+#include "libavutil/x86/asm.h"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/xga_font_data.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,35 @@
+/*
+ * CGA/EGA/VGA ROM font data
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * CGA/EGA/VGA ROM font data
+ */
+
+#ifndef AVUTIL_XGA_FONT_DATA_H
+#define AVUTIL_XGA_FONT_DATA_H
+
+#include <stdint.h>
+#include "internal.h"
+
+extern av_export const uint8_t avpriv_cga_font[2048];
+extern av_export const uint8_t avpriv_vga16_font[4096];
+
+#endif /* AVUTIL_XGA_FONT_DATA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libavutil/xtea.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,64 @@
+/*
+ * A 32-bit implementation of the XTEA algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_XTEA_H
+#define AVUTIL_XTEA_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * @brief Public header for libavutil XTEA algorithm
+ * @defgroup lavu_xtea XTEA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef struct AVXTEA {
+    uint32_t key[16];
+} AVXTEA;
+
+/**
+ * Initialize an AVXTEA context.
+ *
+ * @param ctx an AVXTEA context
+ * @param key a key of 16 bytes used for encryption/decryption
+ */
+void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVXTEA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,
+                   int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_XTEA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libbpg.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1958 @@
+/*
+ * libbpg
+ * 
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <math.h>
+#ifdef EMSCRIPTEN
+#include <emscripten.h>
+#endif
+
+#include <libavutil/opt.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/common.h>
+
+/* The following global defines are used:
+   - USE_VAR_BIT_DEPTH : support of bit depth > 8 bits
+   - USE_PRED : support of animations 
+*/
+   
+#ifndef EMSCRIPTEN
+#define USE_RGB48 /* support all pixel formats */
+//#define DEBUG
+#endif
+
+#if !defined(DEBUG)
+#define NDEBUG
+#endif
+
+#include <assert.h>
+#include "libbpg.h"
+
+#define BPG_HEADER_MAGIC 0x425047fb
+
+#define ITAPS2 4 
+#define ITAPS (2 * ITAPS2) /* number of taps of the interpolation filter */
+
+#ifdef USE_VAR_BIT_DEPTH
+typedef uint16_t PIXEL;
+#else
+typedef uint8_t PIXEL;
+#endif
+
+#define MAX_DATA_SIZE ((1 << 30) - 1)
+
+typedef struct {
+    int c_shift;
+    int c_rnd;
+    int c_one;
+    int y_one, y_offset;
+    int c_r_cr, c_g_cb, c_g_cr, c_b_cb;
+    int c_center;
+    int bit_depth;
+    int limited_range;
+} ColorConvertState;
+
+typedef void ColorConvertFunc(ColorConvertState *s, 
+                              uint8_t *dst, const PIXEL *y_ptr,
+                              const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                              int n, int incr);
+
+struct BPGDecoderContext {
+    AVCodecContext *dec_ctx;
+    AVCodecContext *alpha_dec_ctx;
+    AVFrame *frame;
+    AVFrame *alpha_frame;
+    int w, h;
+    BPGImageFormatEnum format;
+    uint8_t c_h_phase; /* only used for 422 and 420 */
+    uint8_t has_alpha; /* true if alpha or W plane */
+    uint8_t bit_depth;
+    uint8_t has_w_plane;
+    uint8_t limited_range;
+    uint8_t premultiplied_alpha;
+    uint8_t has_animation;
+    BPGColorSpaceEnum color_space;
+    uint8_t keep_extension_data; /* true if the extension data must be
+                                    kept during parsing */
+    uint8_t decode_animation; /* true if animation decoding is enabled */
+    BPGExtensionData *first_md;
+
+    /* animation */
+    uint16_t loop_count;
+    uint16_t frame_delay_num;
+    uint16_t frame_delay_den;
+    uint8_t *input_buf;
+    int input_buf_pos;
+    int input_buf_len;
+
+    /* the following is used for format conversion */
+    uint8_t output_inited;
+    BPGDecoderOutputFormat out_fmt;
+    uint8_t is_rgba;
+    uint8_t is_16bpp;
+    uint8_t is_cmyk;
+    int y; /* current line */
+    int w2, h2;
+    const uint8_t *y_buf, *cb_buf, *cr_buf, *a_buf;
+    int y_linesize, cb_linesize, cr_linesize, a_linesize;
+    PIXEL *cb_buf2, *cr_buf2, *cb_buf3[ITAPS], *cr_buf3[ITAPS];
+    int16_t *c_buf4;
+    ColorConvertState cvt;
+    ColorConvertFunc *cvt_func;
+};
+
+/* ffmpeg utilities */
+#ifdef USE_AV_LOG
+void av_log(void* avcl, int level, const char *fmt, ...)
+{
+#ifdef DEBUG
+    va_list ap;
+
+    va_start(ap, fmt);
+    vfprintf(stderr, fmt, ap);
+    va_end(ap);
+#endif
+}
+
+void avpriv_report_missing_feature(void *avc, const char *msg, ...)
+{
+#ifdef DEBUG
+    va_list ap;
+
+    va_start(ap, msg);
+    vfprintf(stderr, msg, ap);
+    va_end(ap);
+#endif
+}
+#endif /* USE_AV_LOG */
+
+/* return < 0 if error, otherwise the consumed length */
+static int get_ue32(uint32_t *pv, const uint8_t *buf, int len)
+{
+    const uint8_t *p;
+    uint32_t v;
+    int a;
+
+    if (len <= 0) 
+        return -1;
+    p = buf;
+    a = *p++;
+    len--;
+    if (a < 0x80) {
+        *pv = a;
+        return 1;
+    } else if (a == 0x80) {
+        /* we don't accept non canonical encodings */
+        return -1;
+    }
+    v = a & 0x7f;
+    for(;;) {
+        if (len <= 0)
+            return -1;
+        a = *p++;
+        len--;
+        v = (v << 7) | (a & 0x7f);
+        if (!(a & 0x80))
+            break;
+    }
+    *pv = v;
+    return p - buf;
+}
+
+static int get_ue(uint32_t *pv, const uint8_t *buf, int len)
+{
+    int ret;
+    ret = get_ue32(pv, buf, len);
+    if (ret < 0)
+        return ret;
+    /* limit the maximum size to avoid overflows in buffer
+       computations */
+    if (*pv > MAX_DATA_SIZE)
+        return -1;
+    return ret;
+}
+
+static int build_msps(uint8_t **pbuf, int *pbuf_len,
+                      const uint8_t *input_data, int input_data_len1,
+                      int width, int height, int chroma_format_idc,
+                      int bit_depth)
+{
+    int input_data_len = input_data_len1;
+    int idx, msps_len, ret, buf_len, i;
+    uint32_t len;
+    uint8_t *buf, *msps_buf;
+
+    *pbuf = NULL;
+
+    /* build the modified SPS header to please libavcodec */
+    ret = get_ue(&len, input_data, input_data_len);
+    if (ret < 0)
+        return -1;
+    input_data += ret;
+    input_data_len -= ret;
+    
+    if (len > input_data_len)
+        return -1;
+
+    msps_len = 1 + 4 + 4 + 1 + len;
+    msps_buf = av_malloc(msps_len);
+    idx = 0;
+    msps_buf[idx++] = chroma_format_idc;
+    msps_buf[idx++] = (width >> 24);
+    msps_buf[idx++] = (width >> 16);
+    msps_buf[idx++] = (width >> 8);
+    msps_buf[idx++] = (width >> 0);
+    msps_buf[idx++] = (height >> 24);
+    msps_buf[idx++] = (height >> 16);
+    msps_buf[idx++] = (height >> 8);
+    msps_buf[idx++] = (height >> 0);
+    msps_buf[idx++] = bit_depth - 8;
+    memcpy(msps_buf + idx, input_data, len);
+    idx += len;
+    assert(idx == msps_len);
+    input_data += len;
+    input_data_len -= len;
+    
+    buf_len = 4 + 2 + msps_len * 2 + 4 + (input_data_len - len);
+    buf = av_malloc(buf_len);
+
+    idx = 0;
+    /* NAL header */
+    buf[idx++] = 0x00;
+    buf[idx++] = 0x00;
+    buf[idx++] = 0x00;
+    buf[idx++] = 0x01; 
+    buf[idx++] = (48 << 1); /* application specific NAL unit type */
+    buf[idx++] = 1;
+
+    /* add the modified SPS with the correct escape codes */
+    i = 0;
+    while (i < msps_len) {
+        if ((i + 1) < msps_len && msps_buf[i] == 0 && msps_buf[i + 1] == 0) {
+            buf[idx++] = 0x00;
+            buf[idx++] = 0x00;
+            buf[idx++] = 0x03;
+            i += 2;
+        } else {
+            buf[idx++] = msps_buf[i++];
+        }
+    }
+    /* the last byte cannot be 0 */
+    if (idx == 0 || buf[idx - 1] == 0x00)
+        buf[idx++] = 0x80;
+    av_free(msps_buf);
+    
+    *pbuf_len = idx;
+    *pbuf = buf;
+    return input_data_len1 - input_data_len;
+}
+
+/* return the position of the end of the NAL or -1 if error */
+static int find_nal_end(const uint8_t *buf, int buf_len, int has_startcode)
+{
+    int idx;
+
+    idx = 0;
+    if (has_startcode) {
+        if (buf_len >= 4 &&
+            buf[0] == 0 && buf[1] == 0 && buf[2] == 0 && buf[3] == 1) {
+            idx = 4;
+        } else if (buf_len >= 3 &&
+                   buf[0] == 0 && buf[1] == 0 && buf[2] == 1) {
+            idx = 3;
+        } else {
+            return -1;
+        }
+    }
+    /* NAL header */
+    if (idx + 2 > buf_len)
+        return -1;
+    /* find the last byte */
+    for(;;) {
+        if (idx + 2 >= buf_len) {
+            idx = buf_len;
+            break;
+        }
+        if (buf[idx] == 0 && buf[idx + 1] == 0 && buf[idx + 2] == 1)
+            break;
+        if (idx + 3 < buf_len &&
+            buf[idx] == 0 && buf[idx + 1] == 0 && buf[idx + 2] == 0 && buf[idx + 3] == 1)
+            break;
+        idx++;
+    }
+    return idx;
+}
+
+typedef struct {
+    uint8_t *buf;
+    int size;
+    int len;
+} DynBuf;
+
+static void dyn_buf_init(DynBuf *s)
+{
+    s->buf = NULL;
+    s->size = 0;
+    s->len = 0;
+}
+
+static int dyn_buf_resize(DynBuf *s, int size)
+{
+    int new_size;
+    uint8_t *new_buf;
+
+    if (size <= s->size)
+        return 0;
+    new_size = (s->size * 3) / 2;
+    if (new_size < size)
+        new_size = size;
+    new_buf = av_realloc(s->buf, new_size);
+    if (!new_buf) 
+        return -1;
+    s->buf = new_buf;
+    s->size = new_size;
+    return 0;
+}
+
+static int dyn_buf_push(DynBuf *s, const uint8_t *data, int len)
+{
+    if (dyn_buf_resize(s, s->len + len) < 0)
+        return -1;
+    memcpy(s->buf + s->len, data, len);
+    s->len += len;
+    return 0;
+}
+
+extern AVCodec ff_hevc_decoder;
+
+static int hevc_decode_init1(DynBuf *pbuf, AVFrame **pframe,
+                             AVCodecContext **pc, 
+                             const uint8_t *buf, int buf_len,
+                             int width, int height, int chroma_format_idc,
+                             int bit_depth)
+{
+    AVCodec *codec;
+    AVCodecContext *c;
+    AVFrame *frame;
+    uint8_t *nal_buf;
+    int nal_len, ret, ret1;
+
+    ret = build_msps(&nal_buf, &nal_len, buf, buf_len,
+                     width, height, chroma_format_idc, bit_depth);
+    if (ret < 0)
+        return -1;
+    ret1 = dyn_buf_push(pbuf, nal_buf, nal_len);
+    av_free(nal_buf);
+    if (ret1 < 0)
+        return -1;
+    
+    codec = &ff_hevc_decoder;
+
+    c = avcodec_alloc_context3(codec);
+    if (!c) 
+        return -1;
+    frame = av_frame_alloc();
+    if (!frame) 
+        return -1;
+    /* for testing: use the MD5 or CRC in SEI to check the decoded bit
+       stream. */
+    c->err_recognition |= AV_EF_CRCCHECK; 
+    /* open it */
+    if (avcodec_open2(c, codec, NULL) < 0) {
+        av_frame_free(&frame);
+        return -1;
+    }
+    *pc = c;
+    *pframe = frame;
+    return ret;
+}
+
+static int hevc_write_frame(AVCodecContext *avctx,
+                            AVFrame *frame,
+                            uint8_t *buf, int buf_len)
+{
+    AVPacket avpkt;
+    int len, got_frame;
+
+    av_init_packet(&avpkt);
+    avpkt.data = (uint8_t *)buf;
+    avpkt.size = buf_len;
+    /* avoid using uninitialized data */
+    memset(buf + buf_len, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+    len = avcodec_decode_video2(avctx, frame, &got_frame, &avpkt);
+    if (len < 0 || !got_frame)
+        return -1;
+    else
+        return 0;
+}
+
+static int hevc_decode_frame_internal(BPGDecoderContext *s,
+                                      DynBuf *abuf, DynBuf *cbuf,
+                                      const uint8_t *buf, int buf_len1,
+                                      int first_nal)
+{
+    int nal_len, start, nal_buf_len, ret, nuh_layer_id, buf_len, has_alpha;
+    int nut, frame_start_found[2];
+    DynBuf *pbuf;
+    uint8_t *nal_buf;
+
+    has_alpha = (s->alpha_dec_ctx != NULL);
+    buf_len = buf_len1;
+    frame_start_found[0] = 0;
+    frame_start_found[1] = 0;
+    while (buf_len > 0) {
+        if (buf_len < (first_nal ? 3 : 0) + 2)
+            goto fail;
+        if (first_nal)
+            start = 0;
+        else
+            start = 3 + (buf[2] == 0);
+        if (buf_len < start + 3)
+            goto fail;
+        nuh_layer_id = ((buf[start] & 1) << 5) | (buf[start + 1] >> 3);
+        nut = (buf[start] >> 1) & 0x3f;
+#if 0
+        printf("nal: type=%d layer_id=%d fs=%d %d\n", 
+               nut, nuh_layer_id, frame_start_found[0], frame_start_found[1]);
+#endif
+        /* Note: we assume the alpha and color data are correctly
+           interleaved */
+        if ((nut >= 32 && nut <= 35) || nut == 39 || nut >= 41) {
+            if (frame_start_found[0] && frame_start_found[has_alpha])
+                break;
+        } else if ((nut <= 9 || (nut >= 16 && nut <= 21)) &&  
+                   start + 2 < buf_len && (buf[start + 2] & 0x80)) {
+            /* first slice segment */
+            if (frame_start_found[0] && frame_start_found[has_alpha])
+                break;
+            if (has_alpha && nuh_layer_id == 1)
+                frame_start_found[1] = 1;
+            else
+                frame_start_found[0] = 1;
+        }
+        
+        nal_len = find_nal_end(buf, buf_len, !first_nal);
+        if (nal_len < 0)
+            goto fail;
+        nal_buf_len = nal_len - start + 3;
+        if (has_alpha && nuh_layer_id == 1)
+            pbuf = abuf;
+        else
+            pbuf = cbuf;
+        if (dyn_buf_resize(pbuf, pbuf->len + nal_buf_len) < 0)
+            goto fail;
+        nal_buf = pbuf->buf + pbuf->len;
+        nal_buf[0] = 0x00;
+        nal_buf[1] = 0x00;
+        nal_buf[2] = 0x01;
+        memcpy(nal_buf + 3, buf + start, nal_len - start);
+        if (has_alpha && nuh_layer_id == 1)
+            nal_buf[4] &= 0x7;
+        pbuf->len += nal_buf_len;
+        buf += nal_len;
+        buf_len -= nal_len;
+        first_nal = 0;
+    }
+    
+    if (s->alpha_dec_ctx) {
+        if (dyn_buf_resize(abuf, abuf->len + FF_INPUT_BUFFER_PADDING_SIZE) < 0)
+            goto fail;
+        ret = hevc_write_frame(s->alpha_dec_ctx, s->alpha_frame, abuf->buf, abuf->len);
+        if (ret < 0)
+            goto fail;
+    }
+
+    if (dyn_buf_resize(cbuf, cbuf->len + FF_INPUT_BUFFER_PADDING_SIZE) < 0)
+        goto fail;
+    ret = hevc_write_frame(s->dec_ctx, s->frame, cbuf->buf, cbuf->len);
+    if (ret < 0)
+        goto fail;
+    ret = buf_len1 - buf_len;
+ done:
+    return ret;
+ fail:
+    ret = -1;
+    goto done;
+}
+
+/* decode the first frame */
+static int hevc_decode_start(BPGDecoderContext *s,
+                             const uint8_t *buf, int buf_len1,
+                             int width, int height, int chroma_format_idc,
+                             int bit_depth, int has_alpha)
+{
+    int ret, buf_len;
+    DynBuf abuf_s, *abuf = &abuf_s;
+    DynBuf cbuf_s, *cbuf = &cbuf_s;
+
+    dyn_buf_init(abuf);
+    dyn_buf_init(cbuf);
+
+    buf_len = buf_len1;
+    if (has_alpha) {
+        ret = hevc_decode_init1(abuf, &s->alpha_frame, &s->alpha_dec_ctx,
+                                buf, buf_len, width, height, 0, bit_depth);
+        if (ret < 0)
+            goto fail;
+        buf += ret;
+        buf_len -= ret;
+    }
+    
+    ret = hevc_decode_init1(cbuf, &s->frame, &s->dec_ctx,
+                            buf, buf_len, width, height, chroma_format_idc, 
+                            bit_depth);
+    if (ret < 0)
+        goto fail;
+    buf += ret;
+    buf_len -= ret;
+    
+    ret = hevc_decode_frame_internal(s, abuf, cbuf, buf, buf_len, 1);
+    av_free(abuf->buf);
+    av_free(cbuf->buf);
+    if (ret < 0)
+        goto fail;
+    buf_len -= ret;
+    return buf_len1 - buf_len;
+ fail:
+    return -1;
+}
+
+#ifdef USE_PRED
+static int hevc_decode_frame(BPGDecoderContext *s,
+                             const uint8_t *buf, int buf_len)
+{
+    int ret;
+    DynBuf abuf_s, *abuf = &abuf_s;
+    DynBuf cbuf_s, *cbuf = &cbuf_s;
+
+    dyn_buf_init(abuf);
+    dyn_buf_init(cbuf);
+    ret = hevc_decode_frame_internal(s, abuf, cbuf, buf, buf_len, 0);
+    av_free(abuf->buf);
+    av_free(cbuf->buf);
+    return ret;
+}
+#endif
+
+static void hevc_decode_end(BPGDecoderContext *s)
+{
+    if (s->alpha_dec_ctx) {
+        avcodec_close(s->alpha_dec_ctx);
+        av_free(s->alpha_dec_ctx);
+        s->alpha_dec_ctx = NULL;
+    }
+    if (s->dec_ctx) {
+        avcodec_close(s->dec_ctx);
+        av_free(s->dec_ctx);
+        s->dec_ctx = NULL;
+    }
+}
+
+uint8_t *bpg_decoder_get_data(BPGDecoderContext *img, int *pline_size, int plane)
+{
+    int c_count;
+    if (img->format == BPG_FORMAT_GRAY) 
+        c_count = 1;
+    else
+        c_count = 3;
+    if (plane < c_count) {
+        *pline_size = img->frame->linesize[plane];
+        return img->frame->data[plane];
+    } else if (img->has_alpha && plane == c_count) {
+        *pline_size = img->alpha_frame->linesize[0];
+        return img->alpha_frame->data[0];
+    } else {
+        *pline_size = 0;
+        return NULL;
+    }
+}
+
+int bpg_decoder_get_info(BPGDecoderContext *img, BPGImageInfo *p)
+{
+    if (!img->frame)
+        return -1;
+    p->width = img->w;
+    p->height = img->h;
+    p->format = img->format;
+    p->has_alpha = img->has_alpha && !img->has_w_plane;
+    p->premultiplied_alpha = img->premultiplied_alpha;
+    p->has_w_plane = img->has_w_plane;
+    p->limited_range = img->limited_range;
+    p->color_space = img->color_space;
+    p->bit_depth = img->bit_depth;
+    p->has_animation = img->has_animation;
+    p->loop_count = img->loop_count;
+    return 0;
+}
+
+static inline int clamp_pix(int a, int pixel_max)
+{
+    if (a < 0)
+        return 0;
+    else if (a > pixel_max)
+        return pixel_max;
+    else
+        return a;
+}
+
+static inline int clamp8(int a)
+{
+    if (a < 0)
+        return 0;
+    else if (a > 255)
+        return 255;
+    else
+        return a;
+}
+
+/* 8 tap Lanczos interpolator (phase=0, symmetric) */
+#define IP0C0 40
+#define IP0C1 (-11)
+#define IP0C2 4
+#define IP0C3 (-1)
+
+/* 7 tap Lanczos interpolator (phase=0.5) */
+#define IP1C0 (-1)
+#define IP1C1 4
+#define IP1C2 (-10)
+#define IP1C3 57
+#define IP1C4 18
+#define IP1C5 (-6)
+#define IP1C6 2
+
+/* interpolate by a factor of two assuming chroma is aligned with the
+   luma samples. */
+static void interp2p0_simple(PIXEL *dst, const PIXEL *src, int n, int bit_depth)
+{
+    int pixel_max;
+
+    pixel_max = (1 << bit_depth) - 1;
+    while (n >= 2) {
+        dst[0] = src[0];
+        dst[1] = clamp_pix(((src[-3] + src[4]) * IP0C3 + 
+                            (src[-2] + src[3]) * IP0C2 + 
+                            (src[-1] + src[2]) * IP0C1 + 
+                            (src[0] + src[1]) * IP0C0 + 32) >> 6, pixel_max);
+        dst += 2;
+        src++;
+        n -= 2;
+    }
+    if (n) {
+        dst[0] = src[0];
+    }
+}
+
+static void interp2p0_simple16(PIXEL *dst, const int16_t *src, int n, int bit_depth)
+{
+    int shift1, offset1, shift0, offset0, pixel_max;
+
+    pixel_max = (1 << bit_depth) - 1;
+    shift0 = 14 - bit_depth;
+    offset0 = (1 << shift0) >> 1;
+    shift1 = 20 - bit_depth;
+    offset1 = 1 << (shift1 - 1);
+
+    while (n >= 2) {
+        dst[0] = clamp_pix((src[0] + offset0) >> shift0, pixel_max);
+        dst[1] = clamp_pix(((src[-3] + src[4]) * IP0C3 + 
+                            (src[-2] + src[3]) * IP0C2 + 
+                            (src[-1] + src[2]) * IP0C1 + 
+                            (src[0] + src[1]) * IP0C0 + offset1) >> shift1,
+                           pixel_max);
+        dst += 2;
+        src++;
+        n -= 2;
+    }
+    if (n) {
+        dst[0] = clamp_pix((src[0] + offset0) >> shift0, pixel_max);
+    }
+}
+
+/* interpolate by a factor of two assuming chroma is between the luma
+   samples. */
+static void interp2p1_simple(PIXEL *dst, const PIXEL *src, int n, int bit_depth)
+{
+    int pixel_max, a0, a1, a2, a3, a4, a5, a6;
+
+    pixel_max = (1 << bit_depth) - 1;
+
+    a1 = src[-3];
+    a2 = src[-2];
+    a3 = src[-1];
+    a4 = src[0];
+    a5 = src[1];
+    a6 = src[2];
+
+    while (n >= 2) {
+        a0 = a1;
+        a1 = a2;
+        a2 = a3;
+        a3 = a4;
+        a4 = a5;
+        a5 = a6;
+        a6 = src[3];
+        dst[0] = clamp_pix((a0 * IP1C6 + a1 * IP1C5 + a2 * IP1C4 + a3 * IP1C3 + 
+                            a4 * IP1C2 + a5 * IP1C1 + a6 * IP1C0 + 32) >> 6, 
+                           pixel_max);
+        dst[1] = clamp_pix((a0 * IP1C0 + a1 * IP1C1 + a2 * IP1C2 + a3 * IP1C3 +
+                            a4 * IP1C4 + a5 * IP1C5 + a6 * IP1C6 + 32) >> 6, 
+                           pixel_max);
+        dst += 2;
+        src++;
+        n -= 2;
+    }
+    if (n) {
+        a0 = a1;
+        a1 = a2;
+        a2 = a3;
+        a3 = a4;
+        a4 = a5;
+        a5 = a6;
+        a6 = src[3];
+        dst[0] = clamp_pix((a0 * IP1C6 + a1 * IP1C5 + a2 * IP1C4 + a3 * IP1C3 + 
+                            a4 * IP1C2 + a5 * IP1C1 + a6 * IP1C0 + 32) >> 6, 
+                           pixel_max);
+    }
+}
+
+static void interp2p1_simple16(PIXEL *dst, const int16_t *src, int n, 
+                               int bit_depth)
+{
+    int shift, offset, pixel_max, a0, a1, a2, a3, a4, a5, a6;
+
+    pixel_max = (1 << bit_depth) - 1;
+    shift = 20 - bit_depth;
+    offset = 1 << (shift - 1);
+
+    a1 = src[-3];
+    a2 = src[-2];
+    a3 = src[-1];
+    a4 = src[0];
+    a5 = src[1];
+    a6 = src[2];
+
+    while (n >= 2) {
+        a0 = a1;
+        a1 = a2;
+        a2 = a3;
+        a3 = a4;
+        a4 = a5;
+        a5 = a6;
+        a6 = src[3];
+        dst[0] = clamp_pix((a0 * IP1C6 + a1 * IP1C5 + a2 * IP1C4 + a3 * IP1C3 +
+                            a4 * IP1C2 + a5 * IP1C1 + a6 * IP1C0 + offset) >> shift,
+                           pixel_max);
+        dst[1] = clamp_pix((a0 * IP1C0 + a1 * IP1C1 + a2 * IP1C2 + a3 * IP1C3 +
+                            a4 * IP1C4 + a5 * IP1C5 + a6 * IP1C6 + offset) >> shift,
+                           pixel_max);
+        dst += 2;
+        src++;
+        n -= 2;
+    }
+    if (n) {
+        a0 = a1;
+        a1 = a2;
+        a2 = a3;
+        a3 = a4;
+        a4 = a5;
+        a5 = a6;
+        a6 = src[3];
+        dst[0] = clamp_pix((a0 * IP1C6 + a1 * IP1C5 + a2 * IP1C4 + a3 * IP1C3 +
+                            a4 * IP1C2 + a5 * IP1C1 + a6 * IP1C0 + offset) >> shift, 
+                           pixel_max);
+    }
+}
+
+/* tmp_buf is a temporary buffer of length (n2 + 2 * ITAPS2 - 1) */
+static void interp2_h(PIXEL *dst, const PIXEL *src, int n, int bit_depth,
+                      int phase, PIXEL *tmp_buf)
+{
+    PIXEL *src1 = tmp_buf, v;
+    int i, n2;
+
+    /* add extra pixels and do the interpolation (XXX: could go faster) */
+    n2 = (n + 1) / 2;
+    memcpy(src1 + ITAPS2 - 1, src, n2 * sizeof(PIXEL));
+
+    v = src[0];
+    for(i = 0; i < ITAPS2 - 1; i++)
+        src1[i] = v;
+
+    v = src[n2 - 1];
+    for(i = 0; i < ITAPS2; i++)
+        src1[ITAPS2 - 1 + n2 + i] = v;
+    if (phase == 0)
+        interp2p0_simple(dst, src1 + ITAPS2 - 1, n, bit_depth);
+    else
+        interp2p1_simple(dst, src1 + ITAPS2 - 1, n, bit_depth);
+}
+
+/* y_pos is the position of the sample '0' in the 'src' circular
+   buffer. tmp_buf is a temporary buffer of length (n2 + 2 * ITAPS2 - 1) */
+static void interp2_vh(PIXEL *dst, PIXEL **src, int n, int y_pos,
+                       int16_t *tmp_buf, int bit_depth, int frac_pos,
+                       int c_h_phase)
+{
+    const PIXEL *src0, *src1, *src2, *src3, *src4, *src5, *src6;
+    int i, n2, shift, rnd;
+    int16_t v;
+
+    src0 = src[(y_pos - 3) & 7];
+    src1 = src[(y_pos - 2) & 7];
+    src2 = src[(y_pos - 1) & 7];
+    src3 = src[(y_pos + 0) & 7];
+    src4 = src[(y_pos + 1) & 7];
+    src5 = src[(y_pos + 2) & 7];
+    src6 = src[(y_pos + 3) & 7];
+
+    /* vertical interpolation first */
+    shift = bit_depth - 8;
+    rnd = (1 << shift) >> 1;
+    n2 = (n + 1) / 2;
+    if (frac_pos == 0) {
+        for(i = 0; i < n2; i++) {
+            tmp_buf[ITAPS2 - 1 + i] = 
+                (src0[i] * IP1C6 + src1[i] * IP1C5 + 
+                 src2[i] * IP1C4 + src3[i] * IP1C3 + 
+                 src4[i] * IP1C2 + src5[i] * IP1C1 + 
+                 src6[i] * IP1C0 + rnd) >> shift;
+        }
+    } else {
+        for(i = 0; i < n2; i++) {
+            tmp_buf[ITAPS2 - 1 + i] = 
+                (src0[i] * IP1C0 + src1[i] * IP1C1 + 
+                 src2[i] * IP1C2 + src3[i] * IP1C3 + 
+                 src4[i] * IP1C4 + src5[i] * IP1C5 + 
+                 src6[i] * IP1C6 + rnd) >> shift;
+        }
+    }
+
+    /* then horizontal interpolation */
+    v = tmp_buf[ITAPS2 - 1];
+    for(i = 0; i < ITAPS2 - 1; i++)
+        tmp_buf[i] = v;
+    v = tmp_buf[ITAPS2 - 1 + n2 - 1];
+    for(i = 0; i < ITAPS2; i++)
+        tmp_buf[ITAPS2 - 1 + n2 + i] = v;
+    if (c_h_phase == 0)
+        interp2p0_simple16(dst, tmp_buf + ITAPS2 - 1, n, bit_depth);
+    else
+        interp2p1_simple16(dst, tmp_buf + ITAPS2 - 1, n, bit_depth);
+}
+
+static void ycc_to_rgb24(ColorConvertState *s, uint8_t *dst, const PIXEL *y_ptr,
+                         const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                         int n, int incr)
+{
+    uint8_t *q = dst;
+    int y_val, cb_val, cr_val, x;
+    int c_r_cr, c_g_cb, c_g_cr, c_b_cb, rnd, shift, center, c_one;
+
+    c_r_cr = s->c_r_cr;
+    c_g_cb = s->c_g_cb;
+    c_g_cr = s->c_g_cr;
+    c_b_cb = s->c_b_cb;
+    c_one = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    center = s->c_center;
+    for(x = 0; x < n; x++) {
+        y_val = y_ptr[x] * c_one;
+        cb_val = cb_ptr[x] - center;
+        cr_val = cr_ptr[x] - center;
+        q[0] = clamp8((y_val + c_r_cr * cr_val + rnd) >> shift);
+        q[1] = clamp8((y_val - c_g_cb * cb_val - c_g_cr * cr_val + rnd) >> shift);
+        q[2] = clamp8((y_val + c_b_cb * cb_val + rnd) >> shift);
+        q += incr;
+    }
+}
+
+static void ycgco_to_rgb24(ColorConvertState *s, 
+                           uint8_t *dst, const PIXEL *y_ptr,
+                           const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                           int n, int incr)
+{
+    uint8_t *q = dst;
+    int y_val, cb_val, cr_val, x;
+    int rnd, shift, center, c_one;
+
+    c_one = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    center = s->c_center;
+    for(x = 0; x < n; x++) {
+        y_val = y_ptr[x];
+        cb_val = cb_ptr[x] - center;
+        cr_val = cr_ptr[x] - center;
+        q[0] = clamp8(((y_val - cb_val + cr_val) * c_one + rnd) >> shift);
+        q[1] = clamp8(((y_val + cb_val) * c_one + rnd) >> shift);
+        q[2] = clamp8(((y_val - cb_val - cr_val) * c_one + rnd) >> shift);
+        q += incr;
+    }
+}
+
+/* c = c * alpha */
+static void alpha_combine8(ColorConvertState *s, 
+                           uint8_t *dst, const PIXEL *a_ptr, int n, int incr)
+{
+    uint8_t *q = dst;
+    int x, a_val, shift, rnd;
+
+    shift = s->bit_depth;
+    rnd = 1 << (shift - 1);
+    for(x = 0; x < n; x++) {
+        a_val = a_ptr[x];
+        /* XXX: not accurate enough */
+        q[0] = (q[0] * a_val + rnd) >> shift;
+        q[1] = (q[1] * a_val + rnd) >> shift;
+        q[2] = (q[2] * a_val + rnd) >> shift;
+        q += incr;
+    }
+}
+
+static uint32_t divide8_table[256];
+
+#define DIV8_BITS 16
+
+static void alpha_divide8_init(void)
+{
+    int i;
+    for(i = 1; i < 256; i++) {
+        /* Note: the 128 is added to have 100% correct results for all
+           the values */
+        divide8_table[i] = ((255 << DIV8_BITS) + (i / 2) + 128) / i;
+    }
+}
+
+static inline unsigned int comp_divide8(unsigned int val, unsigned int alpha,
+                                        unsigned int alpha_inv)
+{
+    if (val >= alpha)
+        return 255;
+    return (val * alpha_inv + (1 << (DIV8_BITS - 1))) >> DIV8_BITS;
+}
+
+/* c = c / alpha */
+static void alpha_divide8(uint8_t *dst, int n)
+{
+    static int inited;
+    uint8_t *q = dst;
+    int x;
+    unsigned int a_val, a_inv;
+
+    if (!inited) {
+        inited = 1;
+        alpha_divide8_init();
+    }
+
+    for(x = 0; x < n; x++) {
+        a_val = q[3];
+        if (a_val == 0) {
+            q[0] = 255;
+            q[1] = 255;
+            q[2] = 255;
+        } else {
+            a_inv = divide8_table[a_val];
+            q[0] = comp_divide8(q[0], a_val, a_inv);
+            q[1] = comp_divide8(q[1], a_val, a_inv);
+            q[2] = comp_divide8(q[2], a_val, a_inv);
+        }
+        q += 4;
+    }
+}
+
+static void gray_to_rgb24(ColorConvertState *s, 
+                          uint8_t *dst, const PIXEL *y_ptr,
+                          const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                          int n, int incr)
+{
+    uint8_t *q = dst;
+    int x, y_val, c, rnd, shift;
+
+    if (s->bit_depth == 8 && !s->limited_range) {
+        for(x = 0; x < n; x++) {
+            y_val = y_ptr[x];
+            q[0] = y_val;
+            q[1] = y_val;
+            q[2] = y_val;
+            q += incr;
+        }
+    } else {
+        c = s->y_one;
+        rnd = s->y_offset;
+        shift = s->c_shift;
+        for(x = 0; x < n; x++) {
+            y_val = clamp8((y_ptr[x] * c + rnd) >> shift);
+            q[0] = y_val;
+            q[1] = y_val;
+            q[2] = y_val;
+            q += incr;
+        }
+    }
+}
+
+static void rgb_to_rgb24(ColorConvertState *s, uint8_t *dst, const PIXEL *y_ptr,
+                         const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                         int n, int incr)
+{
+    uint8_t *q = dst;
+    int x, c, rnd, shift;
+
+    if (s->bit_depth == 8 && !s->limited_range) {
+        for(x = 0; x < n; x++) {
+            q[0] = cr_ptr[x];
+            q[1] = y_ptr[x];
+            q[2] = cb_ptr[x];
+            q += incr;
+        }
+    } else {
+        c = s->y_one;
+        rnd = s->y_offset;
+        shift = s->c_shift;
+        for(x = 0; x < n; x++) {
+            q[0] = clamp8((cr_ptr[x] * c + rnd) >> shift);
+            q[1] = clamp8((y_ptr[x] * c + rnd) >> shift);
+            q[2] = clamp8((cb_ptr[x] * c + rnd) >> shift);
+            q += incr;
+        }
+    }
+}
+
+static void put_dummy_gray8(uint8_t *dst, int n, int incr)
+{
+    int x;
+    for(x = 0; x < n; x++) {
+        dst[0] = 0xff;
+        dst += incr;
+    }
+}
+
+static void gray_to_gray8(ColorConvertState *s, 
+                          uint8_t *dst, const PIXEL *y_ptr,
+                          int n, int incr)
+{
+    uint8_t *q = dst;
+    int x, y_val, c, rnd, shift;
+
+    if (s->bit_depth == 8) {
+        for(x = 0; x < n; x++) {
+            y_val = y_ptr[x];
+            q[0] = y_val;
+            q += incr;
+        }
+    } else {
+        c = s->c_one;
+        rnd = s->c_rnd;
+        shift = s->c_shift;
+        for(x = 0; x < n; x++) {
+            y_val = (y_ptr[x] * c + rnd) >> shift;
+            q[0] = y_val;
+            q += incr;
+        }
+    }
+}
+
+static ColorConvertFunc *cs_to_rgb24[BPG_CS_COUNT] = {
+    ycc_to_rgb24,
+    rgb_to_rgb24,
+    ycgco_to_rgb24,
+    ycc_to_rgb24,
+    ycc_to_rgb24,
+};
+
+#ifdef USE_RGB48
+
+/* 16 bit output */
+
+static inline int clamp16(int a)
+{
+    if (a < 0)
+        return 0;
+    else if (a > 65535)
+        return 65535;
+    else
+        return a;
+}
+
+static void ycc_to_rgb48(ColorConvertState *s, uint8_t *dst, const PIXEL *y_ptr,
+                         const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                         int n, int incr)
+{
+    uint16_t *q = (uint16_t *)dst;
+    int y_val, cb_val, cr_val, x;
+    int c_r_cr, c_g_cb, c_g_cr, c_b_cb, rnd, shift, center, c_one;
+
+    c_r_cr = s->c_r_cr;
+    c_g_cb = s->c_g_cb;
+    c_g_cr = s->c_g_cr;
+    c_b_cb = s->c_b_cb;
+    c_one = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    center = s->c_center;
+    for(x = 0; x < n; x++) {
+        y_val = y_ptr[x] * c_one;
+        cb_val = cb_ptr[x] - center;
+        cr_val = cr_ptr[x] - center;
+        q[0] = clamp16((y_val + c_r_cr * cr_val + rnd) >> shift);
+        q[1] = clamp16((y_val - c_g_cb * cb_val - c_g_cr * cr_val + rnd) >> shift);
+        q[2] = clamp16((y_val + c_b_cb * cb_val + rnd) >> shift);
+        q += incr;
+    }
+}
+
+static void ycgco_to_rgb48(ColorConvertState *s, 
+                           uint8_t *dst, const PIXEL *y_ptr,
+                           const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                           int n, int incr)
+{
+    uint16_t *q = (uint16_t *)dst;
+    int y_val, cb_val, cr_val, x;
+    int rnd, shift, center, c_one;
+
+    c_one = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    center = s->c_center;
+    for(x = 0; x < n; x++) {
+        y_val = y_ptr[x];
+        cb_val = cb_ptr[x] - center;
+        cr_val = cr_ptr[x] - center;
+        q[0] = clamp16(((y_val - cb_val + cr_val) * c_one + rnd) >> shift);
+        q[1] = clamp16(((y_val + cb_val) * c_one + rnd) >> shift);
+        q[2] = clamp16(((y_val - cb_val - cr_val) * c_one + rnd) >> shift);
+        q += incr;
+    }
+}
+
+static void gray_to_rgb48(ColorConvertState *s, 
+                          uint8_t *dst, const PIXEL *y_ptr,
+                          const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                          int n, int incr)
+{
+    uint16_t *q = (uint16_t *)dst;
+    int x, y_val, c, rnd, shift;
+
+    c = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    for(x = 0; x < n; x++) {
+        y_val = clamp16((y_ptr[x] * c + rnd) >> shift);
+        q[0] = y_val;
+        q[1] = y_val;
+        q[2] = y_val;
+        q += incr;
+    }
+}
+
+static void gray_to_gray16(ColorConvertState *s, 
+                           uint16_t *dst, const PIXEL *y_ptr,
+                           int n, int incr)
+{
+    uint16_t *q = dst;
+    int x, y_val, c, rnd, shift;
+
+    c = s->c_one;
+    rnd = s->c_rnd;
+    shift = s->c_shift;
+    for(x = 0; x < n; x++) {
+        y_val = (y_ptr[x] * c + rnd) >> shift;
+        q[0] = y_val;
+        q += incr;
+    }
+}
+
+static void luma_to_gray16(ColorConvertState *s, 
+                           uint16_t *dst, const PIXEL *y_ptr,
+                           int n, int incr)
+{
+    uint16_t *q = dst;
+    int x, y_val, c, rnd, shift;
+
+    c = s->y_one;
+    rnd = s->y_offset;
+    shift = s->c_shift;
+    for(x = 0; x < n; x++) {
+        y_val = clamp16((y_ptr[x] * c + rnd) >> shift);
+        q[0] = y_val;
+        q += incr;
+    }
+}
+
+static void rgb_to_rgb48(ColorConvertState *s, 
+                         uint8_t *dst, const PIXEL *y_ptr,
+                         const PIXEL *cb_ptr, const PIXEL *cr_ptr,
+                         int n, int incr)
+{
+    luma_to_gray16(s, (uint16_t *)dst + 1, y_ptr, n, incr);
+    luma_to_gray16(s, (uint16_t *)dst + 2, cb_ptr, n, incr);
+    luma_to_gray16(s, (uint16_t *)dst + 0, cr_ptr, n, incr);
+}
+
+static void put_dummy_gray16(uint16_t *dst, int n, int incr)
+{
+    int x;
+    for(x = 0; x < n; x++) {
+        dst[0] = 0xffff;
+        dst += incr;
+    }
+}
+
+/* c = c * alpha */
+static void alpha_combine16(ColorConvertState *s, 
+                            uint16_t *dst, const PIXEL *a_ptr, int n, int incr)
+{
+    uint16_t *q = dst;
+    int x, a_val, shift, rnd;
+
+    shift = s->bit_depth;
+    rnd = 1 << (shift - 1);
+    for(x = 0; x < n; x++) {
+        a_val = a_ptr[x];
+        /* XXX: not accurate enough */
+        q[0] = (q[0] * a_val + rnd) >> shift;
+        q[1] = (q[1] * a_val + rnd) >> shift;
+        q[2] = (q[2] * a_val + rnd) >> shift;
+        q += incr;
+    }
+}
+
+#define DIV16_BITS 15
+
+static unsigned int comp_divide16(unsigned int val, unsigned int alpha,
+                                  unsigned int alpha_inv)
+{
+    if (val >= alpha)
+        return 65535;
+    return (val * alpha_inv + (1 << (DIV16_BITS - 1))) >> DIV16_BITS;
+}
+
+/* c = c / alpha */
+static void alpha_divide16(uint16_t *dst, int n)
+{
+    uint16_t *q = dst;
+    int x;
+    unsigned int a_val, a_inv;
+
+    for(x = 0; x < n; x++) {
+        a_val = q[3];
+        if (a_val == 0) {
+            q[0] = 65535;
+            q[1] = 65535;
+            q[2] = 65535;
+        } else {
+            a_inv = ((65535 << DIV16_BITS) + (a_val / 2)) / a_val;
+            q[0] = comp_divide16(q[0], a_val, a_inv);
+            q[1] = comp_divide16(q[1], a_val, a_inv);
+            q[2] = comp_divide16(q[2], a_val, a_inv);
+        }
+        q += 4;
+    }
+}
+
+static void gray_one_minus8(uint8_t *dst, int n, int incr)
+{
+    int x;
+    for(x = 0; x < n; x++) {
+        dst[0] = 255 - dst[0];
+        dst += incr;
+    }
+}
+
+static void gray_one_minus16(uint16_t *dst, int n, int incr)
+{
+    int x;
+    for(x = 0; x < n; x++) {
+        dst[0] = 65535 - dst[0];
+        dst += incr;
+    }
+}
+
+static ColorConvertFunc *cs_to_rgb48[BPG_CS_COUNT] = {
+    ycc_to_rgb48,
+    rgb_to_rgb48,
+    ycgco_to_rgb48,
+    ycc_to_rgb48,
+    ycc_to_rgb48,
+};
+#endif
+
+static void convert_init(ColorConvertState *s, 
+                         int in_bit_depth, int out_bit_depth,
+                         BPGColorSpaceEnum color_space,
+                         int limited_range)
+{
+    int c_shift, in_pixel_max, out_pixel_max;
+    double mult, k_r, k_b, mult_y, mult_c;
+
+    c_shift = 30 - out_bit_depth;
+    in_pixel_max = (1 << in_bit_depth) - 1;
+    out_pixel_max = (1 << out_bit_depth) - 1;
+    mult = (double)out_pixel_max * (1 << c_shift) / (double)in_pixel_max;
+    if (limited_range) {
+        mult_y = (double)out_pixel_max * (1 << c_shift) / 
+            (double)(219 << (in_bit_depth - 8));
+        mult_c = (double)out_pixel_max * (1 << c_shift) / 
+            (double)(224 << (in_bit_depth - 8));
+    } else {
+        mult_y = mult;
+        mult_c = mult;
+    }
+    switch(color_space) {
+    case BPG_CS_YCbCr:
+        k_r = 0.299;
+        k_b = 0.114;
+        goto convert_ycc;
+    case BPG_CS_YCbCr_BT709:
+        k_r = 0.2126; 
+        k_b = 0.0722;
+        goto convert_ycc;
+    case BPG_CS_YCbCr_BT2020:
+        k_r = 0.2627;
+        k_b = 0.0593;
+    convert_ycc:
+        s->c_r_cr = lrint(2*(1-k_r) * mult_c);
+        s->c_g_cb = lrint(2*k_b*(1-k_b)/(1-k_b-k_r) * mult_c);
+        s->c_g_cr = lrint(2*k_r*(1-k_r)/(1-k_b-k_r) * mult_c);
+        s->c_b_cb = lrint(2*(1-k_b) * mult_c);
+        break;
+    default:
+        break;
+    }
+    s->c_one = lrint(mult);
+    s->c_shift = c_shift;
+    s->c_rnd = (1 << (c_shift - 1));
+    s->c_center = 1 << (in_bit_depth - 1);
+    if (limited_range) {
+        s->y_one = lrint(mult_y);
+        s->y_offset = -(16 << (in_bit_depth - 8)) * s->y_one + s->c_rnd;
+    } else {
+        s->y_one = s->c_one;
+        s->y_offset = s->c_rnd;
+    }
+    s->bit_depth = in_bit_depth;
+    s->limited_range = limited_range;
+}
+
+static int bpg_decoder_output_init(BPGDecoderContext *s,
+                                   BPGDecoderOutputFormat out_fmt)
+{
+    int i;
+
+#ifdef USE_RGB48
+    if ((unsigned)out_fmt > BPG_OUTPUT_FORMAT_CMYK64)
+        return -1;
+#else
+    if ((unsigned)out_fmt > BPG_OUTPUT_FORMAT_RGBA32)
+        return -1;
+#endif
+    s->is_rgba = (out_fmt == BPG_OUTPUT_FORMAT_RGBA32 ||
+                    out_fmt == BPG_OUTPUT_FORMAT_RGBA64);
+    s->is_16bpp = (out_fmt == BPG_OUTPUT_FORMAT_RGB48 ||
+                   out_fmt == BPG_OUTPUT_FORMAT_RGBA64 ||
+                   out_fmt == BPG_OUTPUT_FORMAT_CMYK64);
+    s->is_cmyk = (out_fmt == BPG_OUTPUT_FORMAT_CMYK32 ||
+                  out_fmt == BPG_OUTPUT_FORMAT_CMYK64);
+    
+    if (s->format == BPG_FORMAT_420 || s->format == BPG_FORMAT_422) {
+        s->w2 = (s->w + 1) / 2;
+        s->h2 = (s->h + 1) / 2;
+        s->cb_buf2 = av_malloc(s->w * sizeof(PIXEL));
+        s->cr_buf2 = av_malloc(s->w * sizeof(PIXEL));
+        /* Note: too large if 422 and sizeof(PIXEL) = 1 */
+        s->c_buf4 = av_malloc((s->w2 + 2 * ITAPS2 - 1) * sizeof(int16_t));
+
+        if (s->format == BPG_FORMAT_420) {
+            for(i = 0; i < ITAPS; i++) {
+                s->cb_buf3[i] = av_malloc(s->w2 * sizeof(PIXEL));
+                s->cr_buf3[i] = av_malloc(s->w2 * sizeof(PIXEL));
+            }
+        }
+    }
+    convert_init(&s->cvt, s->bit_depth, s->is_16bpp ? 16 : 8,
+                 s->color_space, s->limited_range);
+
+    if (s->format == BPG_FORMAT_GRAY) {
+#ifdef USE_RGB48
+        if (s->is_16bpp) {
+            s->cvt_func = gray_to_rgb48;
+        } else 
+#endif
+        {
+            s->cvt_func = gray_to_rgb24;
+        }
+    } else {
+#ifdef USE_RGB48
+        if (s->is_16bpp) {
+            s->cvt_func = cs_to_rgb48[s->color_space];
+        } else
+#endif
+        {
+            s->cvt_func = cs_to_rgb24[s->color_space];
+        }
+    }
+    return 0;
+}
+
+static void bpg_decoder_output_end(BPGDecoderContext *s)
+{
+    int i;
+
+    av_free(s->cb_buf2);
+    av_free(s->cr_buf2);
+    for(i = 0; i < ITAPS; i++) {
+        av_free(s->cb_buf3[i]);
+        av_free(s->cr_buf3[i]);
+    }
+    av_free(s->c_buf4);
+}
+
+int bpg_decoder_start(BPGDecoderContext *s, BPGDecoderOutputFormat out_fmt)
+{
+    int ret, c_idx;
+
+    if (!s->frame)
+        return -1;
+    
+    if (!s->output_inited) {
+        /* first frame is already decoded */
+        ret = bpg_decoder_output_init(s, out_fmt);
+        if (ret)
+            return ret;
+        s->output_inited = 1;
+        s->out_fmt = out_fmt;
+    } else {
+#ifdef USE_PRED
+        if (s->has_animation && s->decode_animation) {
+            if (out_fmt != s->out_fmt)
+                return -1;
+            if (s->input_buf_pos >= s->input_buf_len) {
+                return -1;
+            } else {
+                ret = hevc_decode_frame(s, s->input_buf + s->input_buf_pos, 
+                                        s->input_buf_len - s->input_buf_pos);
+                if (ret < 0)
+                    return -1;
+                s->input_buf_pos += ret;
+            }
+        } else 
+#endif
+        {
+            return -1;
+        }
+    }
+    s->y_buf = bpg_decoder_get_data(s, &s->y_linesize, 0);
+    if (s->format != BPG_FORMAT_GRAY) {
+        s->cb_buf = bpg_decoder_get_data(s, &s->cb_linesize, 1);
+        s->cr_buf = bpg_decoder_get_data(s, &s->cr_linesize, 2);
+        c_idx = 3;
+    } else {
+        c_idx = 1;
+    }
+    if (s->has_alpha)
+        s->a_buf = bpg_decoder_get_data(s, &s->a_linesize, c_idx);
+    else
+        s->a_buf = NULL;
+    s->y = 0;
+    return 0;
+}
+
+void bpg_decoder_get_frame_duration(BPGDecoderContext *s, int *pnum, int *pden)
+{
+#ifdef USE_PRED
+    if (s->frame && s->has_animation) {
+        *pnum = s->frame_delay_num * (s->frame->pts);
+        *pden = s->frame_delay_den;
+    } else 
+#endif
+    {
+        *pnum = 0;
+        *pden = 1;
+    }
+}
+
+int bpg_decoder_get_line(BPGDecoderContext *s, void *rgb_line1)
+{
+    uint8_t *rgb_line = rgb_line1;
+    int w, y, pos, y2, y1, incr, y_frac;
+    PIXEL *y_ptr, *cb_ptr, *cr_ptr, *a_ptr;
+
+    y = s->y;
+    if ((unsigned)y >= s->h) 
+        return -1;
+    w = s->w;
+    
+    y_ptr = (PIXEL *)(s->y_buf + y * s->y_linesize);
+    incr = 3 + (s->is_rgba || s->is_cmyk);
+    switch(s->format) {
+    case BPG_FORMAT_GRAY:
+        s->cvt_func(&s->cvt, rgb_line, y_ptr, NULL, NULL, w, incr);
+        break;
+    case BPG_FORMAT_420:
+        if (y == 0) {
+            int i;
+            /* init the vertical interpolation buffer */
+            for(i = 0; i < ITAPS; i++) {
+                y1 = i;
+                if (y1 > ITAPS2)
+                    y1 -= ITAPS;
+                if (y1 < 0)
+                    y1 = 0;
+                else if (y1 >= s->h2)
+                    y1 = s->h2 - 1;
+                cb_ptr = (PIXEL *)(s->cb_buf + y1 * s->cb_linesize);
+                cr_ptr = (PIXEL *)(s->cr_buf + y1 * s->cr_linesize);
+                memcpy(s->cb_buf3[i], cb_ptr, s->w2 * sizeof(PIXEL));
+                memcpy(s->cr_buf3[i], cr_ptr, s->w2 * sizeof(PIXEL));
+            }
+        }
+        y2 = y >> 1;
+        pos = y2 % ITAPS;
+        y_frac = y & 1;
+        interp2_vh(s->cb_buf2, s->cb_buf3, w, pos, s->c_buf4,
+                   s->bit_depth, y_frac, s->c_h_phase);
+        interp2_vh(s->cr_buf2, s->cr_buf3, w, pos, s->c_buf4,
+                   s->bit_depth, y_frac, s->c_h_phase);
+        if (y_frac) {
+            /* add a new line in the circular buffer */
+            pos = (pos + ITAPS2 + 1) % ITAPS;
+            y1 = y2 + ITAPS2 + 1;
+            if (y1 >= s->h2)
+                y1 = s->h2 - 1;
+            cb_ptr = (PIXEL *)(s->cb_buf + y1 * s->cb_linesize);
+            cr_ptr = (PIXEL *)(s->cr_buf + y1 * s->cr_linesize);
+            memcpy(s->cb_buf3[pos], cb_ptr, s->w2 * sizeof(PIXEL));
+            memcpy(s->cr_buf3[pos], cr_ptr, s->w2 * sizeof(PIXEL));
+        }
+        s->cvt_func(&s->cvt, rgb_line, y_ptr, s->cb_buf2, s->cr_buf2, w, incr);
+        break;
+    case BPG_FORMAT_422:
+        cb_ptr = (PIXEL *)(s->cb_buf + y * s->cb_linesize);
+        cr_ptr = (PIXEL *)(s->cr_buf + y * s->cr_linesize);
+        interp2_h(s->cb_buf2, cb_ptr, w, s->bit_depth, s->c_h_phase, 
+                  (PIXEL *)s->c_buf4);
+        interp2_h(s->cr_buf2, cr_ptr, w, s->bit_depth, s->c_h_phase,
+                  (PIXEL *)s->c_buf4);
+        s->cvt_func(&s->cvt, rgb_line, y_ptr, s->cb_buf2, s->cr_buf2, w, incr);
+        break;
+    case BPG_FORMAT_444:
+        cb_ptr = (PIXEL *)(s->cb_buf + y * s->cb_linesize);
+        cr_ptr = (PIXEL *)(s->cr_buf + y * s->cr_linesize);
+        s->cvt_func(&s->cvt, rgb_line, y_ptr, cb_ptr, cr_ptr, w, incr);
+        break;
+    default:
+        return -1;
+    }
+
+    /* alpha output or CMYK handling */
+#ifdef USE_RGB48
+    if (s->is_cmyk) {
+        int i;
+        /* convert RGBW to CMYK */
+        if (s->is_16bpp) {
+            if (!s->has_w_plane)
+                put_dummy_gray16((uint16_t *)rgb_line + 3, w, 4);
+            for(i = 0; i < 4; i++)
+                gray_one_minus16((uint16_t *)rgb_line + i, w, 4);
+        } else {
+            if (!s->has_w_plane)
+                put_dummy_gray8(rgb_line + 3, w, 4);
+            for(i = 0; i < 4; i++)
+                gray_one_minus8(rgb_line + i, w, 4);
+        }
+    } else
+#endif
+    if (s->has_w_plane) {
+        a_ptr = (PIXEL *)(s->a_buf + y * s->a_linesize);
+#ifdef USE_RGB48
+        if (s->is_16bpp) {
+            alpha_combine16(&s->cvt, (uint16_t *)rgb_line, a_ptr, w, incr);
+            if (s->is_rgba)
+                put_dummy_gray16((uint16_t *)rgb_line + 3, w, 4);
+        } else
+#endif
+        {
+            alpha_combine8(&s->cvt, rgb_line, a_ptr, w, incr);
+            if (s->is_rgba)
+                put_dummy_gray8(rgb_line + 3, w, 4);
+        }
+    } else if (s->is_rgba) {
+#ifdef USE_RGB48
+        if (s->is_16bpp) {
+            if (s->has_alpha) {
+                a_ptr = (PIXEL *)(s->a_buf + y * s->a_linesize);
+                gray_to_gray16(&s->cvt, 
+                               (uint16_t *)rgb_line + 3, a_ptr, w, 4);
+                if (s->premultiplied_alpha)
+                    alpha_divide16((uint16_t *)rgb_line, w);
+            } else {
+                put_dummy_gray16((uint16_t *)rgb_line + 3, w, 4);
+            }
+        } else
+#endif
+        {
+            if (s->has_alpha) {
+                a_ptr = (PIXEL *)(s->a_buf + y * s->a_linesize);
+                gray_to_gray8(&s->cvt, rgb_line + 3, a_ptr, w, 4);
+                if (s->premultiplied_alpha)
+                    alpha_divide8((uint8_t *)rgb_line, w);
+            } else {
+                put_dummy_gray8(rgb_line + 3, w, 4);
+            }
+            }
+    }
+
+    /* go to next line */
+    s->y++;
+    return 0;
+}
+
+BPGDecoderContext *bpg_decoder_open(void)
+{
+    BPGDecoderContext *s;
+
+    s = av_mallocz(sizeof(BPGDecoderContext));
+    if (!s)
+        return NULL;
+    return s;
+}
+
+typedef struct {
+    uint32_t width, height;
+    BPGImageFormatEnum format;
+    uint8_t has_alpha;
+    uint8_t bit_depth;
+    uint8_t has_w_plane;
+    uint8_t premultiplied_alpha;
+    uint8_t limited_range;
+    uint8_t has_animation;
+    uint16_t loop_count;
+    uint16_t frame_delay_num;
+    uint16_t frame_delay_den;
+    BPGColorSpaceEnum color_space;
+    uint32_t hevc_data_len;
+    BPGExtensionData *first_md;
+} BPGHeaderData;
+
+static int bpg_decode_header(BPGHeaderData *h,
+                             const uint8_t *buf, int buf_len,
+                             int header_only, int load_extensions)
+{
+    int idx, flags1, flags2, has_extension, ret, alpha1_flag, alpha2_flag;
+    uint32_t extension_data_len;
+
+    if (buf_len < 6)
+        return -1;
+    /* check magic */
+    if (buf[0] != ((BPG_HEADER_MAGIC >> 24) & 0xff) ||
+        buf[1] != ((BPG_HEADER_MAGIC >> 16) & 0xff) ||
+        buf[2] != ((BPG_HEADER_MAGIC >> 8) & 0xff) ||
+        buf[3] != ((BPG_HEADER_MAGIC >> 0) & 0xff))
+        return -1;
+    idx = 4;
+    flags1 = buf[idx++];
+    h->format = flags1 >> 5;
+    if (h->format > 5)
+        return -1;
+    alpha1_flag = (flags1 >> 4) & 1;
+    h->bit_depth = (flags1 & 0xf) + 8;
+    if (h->bit_depth > 14)
+        return -1;
+    flags2 = buf[idx++];
+    h->color_space = (flags2 >> 4) & 0xf;
+    has_extension = (flags2 >> 3) & 1;
+    alpha2_flag = (flags2 >> 2) & 1;
+    h->limited_range = (flags2 >> 1) & 1;
+    h->has_animation = flags2 & 1;
+    h->loop_count = 0;
+    h->frame_delay_num = 0;
+    h->frame_delay_den = 0;
+    h->has_alpha = 0;
+    h->has_w_plane = 0;
+    h->premultiplied_alpha = 0;
+    
+    if (alpha1_flag) {
+        h->has_alpha = 1;
+        h->premultiplied_alpha = alpha2_flag;
+    } else if (alpha2_flag) {
+        h->has_alpha = 1;
+        h->has_w_plane = 1;
+    }
+
+    if (h->color_space >= BPG_CS_COUNT || 
+        (h->format == BPG_FORMAT_GRAY && h->color_space != 0) ||
+        (h->has_w_plane && h->format == BPG_FORMAT_GRAY))
+        return -1;
+    ret = get_ue(&h->width, buf + idx, buf_len - idx);
+    if (ret < 0)
+        return -1;
+    idx += ret;
+    ret = get_ue(&h->height, buf + idx, buf_len - idx);
+    if (ret < 0)
+        return -1;
+    idx += ret;
+    if (h->width == 0 || h->height == 0)
+        return -1;
+    if (header_only)
+        return idx;
+
+    ret = get_ue(&h->hevc_data_len, buf + idx, buf_len - idx);
+    if (ret < 0)
+        return -1;
+    idx += ret;
+           
+    extension_data_len = 0;
+    if (has_extension) {
+        ret = get_ue(&extension_data_len, buf + idx, buf_len - idx);
+        if (ret < 0)
+            return -1;
+        idx += ret;
+    }
+
+    h->first_md = NULL;
+    if (has_extension) {
+        int ext_end;
+
+        ext_end = idx + extension_data_len;
+        if (ext_end > buf_len)
+            return -1;
+        if (load_extensions || h->has_animation) {
+            BPGExtensionData *md, **plast_md;
+            uint32_t tag, buf_len;
+
+            plast_md = &h->first_md;
+            while (idx < ext_end) {
+                ret = get_ue32(&tag, buf + idx, ext_end - idx);
+                if (ret < 0) 
+                    goto fail;
+                idx += ret;
+
+                ret = get_ue(&buf_len, buf + idx, ext_end - idx);
+                if (ret < 0) 
+                    goto fail;
+                idx += ret;
+                
+                if (idx + buf_len > ext_end) {
+                fail:
+                    bpg_decoder_free_extension_data(h->first_md);
+                    return -1;
+                }
+                if (h->has_animation && tag == BPG_EXTENSION_TAG_ANIM_CONTROL) {
+                    int idx1;
+                    uint32_t loop_count, frame_delay_num, frame_delay_den;
+
+                    idx1 = idx;
+                    ret = get_ue(&loop_count, buf + idx1, ext_end - idx1);
+                    if (ret < 0) 
+                        goto fail;
+                    idx1 += ret;
+                    ret = get_ue(&frame_delay_num, buf + idx1, ext_end - idx1);
+                    if (ret < 0) 
+                        goto fail;
+                    idx1 += ret;
+                    ret = get_ue(&frame_delay_den, buf + idx1, ext_end - idx1);
+                    if (ret < 0) 
+                        goto fail;
+                    idx1 += ret;
+                    if (frame_delay_num == 0 || frame_delay_den == 0 ||
+                        (uint16_t)frame_delay_num != frame_delay_num ||
+                        (uint16_t)frame_delay_den != frame_delay_den ||
+                        (uint16_t)loop_count != loop_count)
+                        goto fail;
+                    h->loop_count = loop_count;
+                    h->frame_delay_num = frame_delay_num;
+                    h->frame_delay_den = frame_delay_den;
+                }
+                if (load_extensions) {
+                    md = av_malloc(sizeof(BPGExtensionData));
+                    md->tag = tag;
+                    md->buf_len = buf_len;
+                    md->next = NULL;
+                    *plast_md = md;
+                    plast_md = &md->next;
+                    
+                    md->buf = av_malloc(md->buf_len);
+                    memcpy(md->buf, buf + idx, md->buf_len);
+                }
+                idx += buf_len;
+            }
+        } else
+        {
+            /* skip extension data */
+            idx += extension_data_len;
+        }
+    }
+
+    /* must have animation control extension for animations */
+    if (h->has_animation && h->frame_delay_num == 0)
+        goto fail;
+
+    if (h->hevc_data_len == 0)
+        h->hevc_data_len = buf_len - idx;
+    
+    return idx;
+}
+
+int bpg_decoder_decode(BPGDecoderContext *img, const uint8_t *buf, int buf_len)
+{
+    int idx, has_alpha, bit_depth, color_space, ret;
+    uint32_t width, height;
+    BPGHeaderData h_s, *h = &h_s;
+
+    idx = bpg_decode_header(h, buf, buf_len, 0, img->keep_extension_data);
+    if (idx < 0)
+        return idx;
+    width = h->width;
+    height = h->height;
+    has_alpha = h->has_alpha;
+    color_space = h->color_space;
+    bit_depth = h->bit_depth;
+    
+    img->w = width;
+    img->h = height;
+    img->format = h->format;
+    if (h->format == BPG_FORMAT_422_VIDEO) {
+        img->format = BPG_FORMAT_422;
+        img->c_h_phase = 0;
+    } else if (h->format == BPG_FORMAT_420_VIDEO) {
+        img->format = BPG_FORMAT_420;
+        img->c_h_phase = 0;
+    } else {
+        img->format = h->format;
+        img->c_h_phase = 1;
+    }
+    img->has_alpha = has_alpha;
+    img->premultiplied_alpha = h->premultiplied_alpha;
+    img->has_w_plane = h->has_w_plane;
+    img->limited_range = h->limited_range;
+    img->color_space = color_space;
+    img->bit_depth = bit_depth;
+    img->has_animation = h->has_animation;
+    img->loop_count = h->loop_count;
+    img->frame_delay_num = h->frame_delay_num;
+    img->frame_delay_den = h->frame_delay_den;
+
+    img->first_md = h->first_md;
+
+    if (idx + h->hevc_data_len > buf_len)
+        goto fail;
+
+    /* decode the first frame */
+    ret = hevc_decode_start(img, buf + idx, buf_len - idx,
+                            width, height, img->format, bit_depth, has_alpha);
+    if (ret < 0)
+        goto fail;
+    idx += ret;
+
+#ifdef USE_PRED
+    /* XXX: add an option to avoid decoding animations ? */
+    img->decode_animation = 1;
+    if (img->has_animation && img->decode_animation) { 
+        int len;
+        /* keep trailing bitstream to decode the next frames */
+        len = buf_len - idx;
+        img->input_buf = av_malloc(len);
+        if (!img->input_buf)
+            goto fail;
+        memcpy(img->input_buf, buf + idx, len);
+        img->input_buf_len = len;
+        img->input_buf_pos = 0;
+    } else 
+#endif
+    {
+        hevc_decode_end(img);
+    }
+    if (img->frame->width < img->w || img->frame->height < img->h)
+        goto fail;
+    img->y = -1;
+    return 0;
+
+ fail:
+    av_frame_free(&img->frame);
+    av_frame_free(&img->alpha_frame);
+    bpg_decoder_free_extension_data(img->first_md);
+    img->first_md = NULL;
+    return -1;
+}
+
+void bpg_decoder_close(BPGDecoderContext *s)
+{
+    bpg_decoder_output_end(s);
+    av_free(s->input_buf);
+    hevc_decode_end(s);
+    av_frame_free(&s->frame);
+    av_frame_free(&s->alpha_frame);
+    bpg_decoder_free_extension_data(s->first_md);
+    av_free(s);
+}
+
+void bpg_decoder_free_extension_data(BPGExtensionData *first_md)
+{
+#ifndef EMSCRIPTEN
+    BPGExtensionData *md, *md_next;
+    
+    for(md = first_md; md != NULL; md = md_next) {
+        md_next = md->next;
+        av_free(md->buf);
+        av_free(md);
+    }
+#endif
+}
+
+#ifndef EMSCRIPTEN
+void bpg_decoder_keep_extension_data(BPGDecoderContext *s, int enable)
+{
+    s->keep_extension_data = enable;
+}
+
+BPGExtensionData *bpg_decoder_get_extension_data(BPGDecoderContext *s)
+{
+    return s->first_md;
+}
+
+int bpg_decoder_get_info_from_buf(BPGImageInfo *p, 
+                                  BPGExtensionData **pfirst_md,
+                                  const uint8_t *buf, int buf_len)
+{
+    BPGHeaderData h_s, *h = &h_s;
+    int parse_extension;
+
+    parse_extension = (pfirst_md != NULL);
+    if (bpg_decode_header(h, buf, buf_len, 
+                          !parse_extension, parse_extension) < 0)
+        return -1;
+    p->width = h->width;
+    p->height = h->height;
+    p->format = h->format;
+    p->has_alpha = h->has_alpha && !h->has_w_plane;
+    p->premultiplied_alpha = h->premultiplied_alpha;
+    p->has_w_plane = h->has_w_plane;
+    p->limited_range = h->limited_range;
+    p->color_space = h->color_space;
+    p->bit_depth = h->bit_depth;
+    p->has_animation = h->has_animation;
+    p->loop_count = h->loop_count;
+    if (pfirst_md)
+        *pfirst_md = h->first_md;
+    return 0;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libbpg.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,138 @@
+/*
+ * BPG decoder
+ * 
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef _LIBBPG_H
+#define _LIBBPG_H
+
+#include <inttypes.h>
+
+typedef struct BPGDecoderContext BPGDecoderContext;
+
+typedef enum {
+    BPG_FORMAT_GRAY,
+    BPG_FORMAT_420, /* chroma at offset (0.5, 0.5) (JPEG) */
+    BPG_FORMAT_422, /* chroma at offset (0.5, 0) (JPEG) */
+    BPG_FORMAT_444,
+    BPG_FORMAT_420_VIDEO, /* chroma at offset (0, 0.5) (MPEG2) */
+    BPG_FORMAT_422_VIDEO, /* chroma at offset (0, 0) (MPEG2) */
+} BPGImageFormatEnum;
+
+typedef enum {
+    BPG_CS_YCbCr,
+    BPG_CS_RGB,
+    BPG_CS_YCgCo,
+    BPG_CS_YCbCr_BT709,
+    BPG_CS_YCbCr_BT2020,
+
+    BPG_CS_COUNT,
+} BPGColorSpaceEnum;
+
+typedef struct {
+    uint32_t width;
+    uint32_t height;
+    uint8_t format; /* see BPGImageFormatEnum */
+    uint8_t has_alpha; /* TRUE if an alpha plane is present */
+    uint8_t color_space; /* see BPGColorSpaceEnum */
+    uint8_t bit_depth;
+    uint8_t premultiplied_alpha; /* TRUE if the color is alpha premultiplied */
+    uint8_t has_w_plane; /* TRUE if a W plane is present (for CMYK encoding) */
+    uint8_t limited_range; /* TRUE if limited range for the color */
+    uint8_t has_animation; /* TRUE if the image contains animations */
+    uint16_t loop_count; /* animations: number of loop, 0 = infinity */
+} BPGImageInfo;
+
+typedef enum {
+    BPG_EXTENSION_TAG_EXIF = 1,
+    BPG_EXTENSION_TAG_ICCP = 2,
+    BPG_EXTENSION_TAG_XMP = 3,
+    BPG_EXTENSION_TAG_THUMBNAIL = 4,
+    BPG_EXTENSION_TAG_ANIM_CONTROL = 5,
+} BPGExtensionTagEnum;
+
+typedef struct BPGExtensionData {
+    BPGExtensionTagEnum tag;
+    uint32_t buf_len;
+    uint8_t *buf;
+    struct BPGExtensionData *next;
+} BPGExtensionData;
+
+typedef enum {
+    BPG_OUTPUT_FORMAT_RGB24,
+    BPG_OUTPUT_FORMAT_RGBA32, /* not premultiplied alpha */
+    BPG_OUTPUT_FORMAT_RGB48,
+    BPG_OUTPUT_FORMAT_RGBA64, /* not premultiplied alpha */
+    BPG_OUTPUT_FORMAT_CMYK32,
+    BPG_OUTPUT_FORMAT_CMYK64,
+} BPGDecoderOutputFormat;
+
+#define BPG_DECODER_INFO_BUF_SIZE 16
+
+BPGDecoderContext *bpg_decoder_open(void);
+
+/* If enable is true, extension data are kept during the image
+   decoding and can be accessed after bpg_decoder_decode() with
+   bpg_decoder_get_extension(). By default, the extension data are
+   discarded. */
+void bpg_decoder_keep_extension_data(BPGDecoderContext *s, int enable);
+
+/* return 0 if 0K, < 0 if error */
+int bpg_decoder_decode(BPGDecoderContext *s, const uint8_t *buf, int buf_len);
+
+/* Return the first element of the extension data list */
+BPGExtensionData *bpg_decoder_get_extension_data(BPGDecoderContext *s);
+
+/* return 0 if 0K, < 0 if error */
+int bpg_decoder_get_info(BPGDecoderContext *s, BPGImageInfo *p);
+
+/* return 0 if 0K, < 0 if error */
+int bpg_decoder_start(BPGDecoderContext *s, BPGDecoderOutputFormat out_fmt);
+
+/* return the frame delay for animations as a fraction (*pnum) / (*pden)
+   in seconds. In case there is no animation, 0 / 1 is returned. */
+void bpg_decoder_get_frame_duration(BPGDecoderContext *s, int *pnum, int *pden);
+
+/* return 0 if 0K, < 0 if error */
+int bpg_decoder_get_line(BPGDecoderContext *s, void *buf);
+
+void bpg_decoder_close(BPGDecoderContext *s);
+
+/* only useful for low level access to the image data */
+uint8_t *bpg_decoder_get_data(BPGDecoderContext *s, int *pline_size, int plane);
+
+/* Get information from the start of the image data in 'buf' (at least
+   min(BPG_DECODER_INFO_BUF_SIZE, file_size) bytes must be given).
+
+   If pfirst_md != NULL, the extension data are also parsed and the
+   first element of the list is returned in *pfirst_md. The list must
+   be freed with bpg_decoder_free_extension_data().
+
+   BPGImageInfo.loop_count is only set if extension data are parsed.
+
+   Return 0 if OK, < 0 if unrecognized data. */
+int bpg_decoder_get_info_from_buf(BPGImageInfo *p, 
+                                  BPGExtensionData **pfirst_md,
+                                  const uint8_t *buf, int buf_len);
+/* Free the extension data returned by bpg_decoder_get_info_from_buf() */
+void bpg_decoder_free_extension_data(BPGExtensionData *first_md);
+
+#endif /* _LIBBPG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/post.js	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,217 @@
+/*
+ * BPG Javascript decoder
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+window['BPGDecoder'] = function(ctx) {
+    this.ctx = ctx;
+    this['imageData'] = null;
+    this['onload'] = null;
+    this['frames'] = null;
+    this['loop_count'] = 0;
+}
+
+window['BPGDecoder'].prototype = {
+
+malloc: Module['cwrap']('malloc', 'number', [ 'number' ]),
+
+free: Module['cwrap']('free', 'void', [ 'number' ]),
+
+bpg_decoder_open: Module['cwrap']('bpg_decoder_open', 'number', [ ]),
+
+bpg_decoder_decode: Module['cwrap']('bpg_decoder_decode', 'number', [ 'number', 'array', 'number' ]),
+
+bpg_decoder_get_info: Module['cwrap']('bpg_decoder_get_info', 'number', [ 'number', 'number' ]),
+
+bpg_decoder_start: Module['cwrap']('bpg_decoder_start', 'number', [ 'number', 'number' ]),
+
+bpg_decoder_get_frame_duration: Module['cwrap']('bpg_decoder_get_frame_duration', 'void', [ 'number', 'number', 'number' ]),
+
+bpg_decoder_get_line: Module['cwrap']('bpg_decoder_get_line', 'number', [ 'number', 'number' ]),
+
+bpg_decoder_close: Module['cwrap']('bpg_decoder_close', 'void', [ 'number' ] ),
+
+load: function(url) 
+{
+    var request = new XMLHttpRequest();
+    var this1 = this;
+
+    request.open("get", url, true);
+    request.responseType = "arraybuffer";
+    request.onload = function(event) {
+        this1._onload(request, event);
+    };
+    request.send();
+},
+
+_onload: function(request, event)
+{
+    var data = request.response;
+    var array = new Uint8Array(data);
+    var img, w, h, img_info_buf, cimg, p0, rgba_line, w4, frame_count;
+    var heap8, heap16, heap32, dst, v, i, y, func, duration, frames, loop_count;
+
+    //    console.log("loaded " + data.byteLength + " bytes");
+
+    img = this.bpg_decoder_open();
+
+    if (this.bpg_decoder_decode(img, array, array.length) < 0) {
+        console.log("could not decode image");
+        return;
+    }
+    
+    img_info_buf = this.malloc(5 * 4);
+    this.bpg_decoder_get_info(img, img_info_buf);
+    /* extract the image info */
+    heap8 = Module['HEAPU8'];
+    heap16 = Module['HEAPU16'];
+    heap32 = Module['HEAPU32'];
+    w = heap32[img_info_buf >> 2];
+    h = heap32[(img_info_buf + 4) >> 2];
+    loop_count = heap16[(img_info_buf + 16) >> 1];
+    //    console.log("image: w=" + w + " h=" + h + " loop_count=" + loop_count);
+    
+    w4 = w * 4;
+    rgba_line = this.malloc(w4);
+
+    frame_count = 0;
+    frames = [];
+    for(;;) {
+        /* select RGBA32 output */
+        if (this.bpg_decoder_start(img, 1) < 0)
+            break;
+        this.bpg_decoder_get_frame_duration(img, img_info_buf, 
+                                            img_info_buf + 4);
+        duration = (heap32[img_info_buf >> 2] * 1000) / heap32[(img_info_buf + 4) >> 2];
+
+        cimg = this.ctx.createImageData(w, h);
+        dst = cimg.data;
+        p0 = 0;
+        for(y = 0; y < h; y++) {
+            this.bpg_decoder_get_line(img, rgba_line);
+            for(i = 0; i < w4; i = (i + 1) | 0) {
+                dst[p0] = heap8[(rgba_line + i) | 0] | 0;
+                p0 = (p0 + 1) | 0;
+            }
+        }
+        frames[frame_count++] = { 'img': cimg, 'duration': duration };
+    }
+
+    this.free(rgba_line);
+    this.free(img_info_buf);
+
+    this.bpg_decoder_close(img);
+
+    this['loop_count'] = loop_count;
+    this['frames'] = frames;
+    this['imageData'] = frames[0]['img'];
+
+    if (this['onload'])
+        this['onload']();
+}
+
+};
+
+window.onload = function() { 
+    var i, n, el, tab, tab1, url, dec, canvas, id, style, ctx, dw, dh;
+
+    /* put all images to load in a separate array */
+    tab = document.images;
+    n = tab.length;
+    tab1 = [];
+    for(i = 0; i < n; i++) {
+        el = tab[i];
+        url = el.src;
+        if (url.substr(-4,4).toLowerCase() == ".bpg") {
+            tab1[tab1.length] = el;
+        }
+    }
+
+    /* change the tags to canvas */
+    n = tab1.length;
+    for(i = 0; i < n; i++) {
+        el = tab1[i];
+        url = el.src;
+        canvas = document.createElement("canvas");
+
+        if (el.id)
+            canvas.id = el.id;
+        if (el.className)
+            canvas.className = el.className;
+
+        /* handle simple attribute cases to resize the canvas */
+        dw = el.getAttribute("width") | 0;
+        if (dw) {
+            canvas.style.width = dw + "px";
+        }
+        dh = el.getAttribute("height") | 0;
+        if (dh) {
+            canvas.style.height = dh + "px";
+        }
+
+        el.parentNode.replaceChild(canvas, el);
+
+        ctx = canvas.getContext("2d");
+        dec = new BPGDecoder(ctx);
+        dec.onload = (function(canvas, ctx) {
+            var dec = this;
+            var frames = this['frames'];
+            var imageData = frames[0]['img'];
+            function next_frame() {
+                var frame_index = dec.frame_index;
+                
+                /* compute next frame index */
+                if (++frame_index >= frames.length) {
+                    if (dec['loop_count'] == 0 ||
+                        dec.loop_counter < dec['loop_count']) {
+                        frame_index = 0;
+                        dec.loop_counter++;
+                    } else {
+                        frame_index = -1;
+                    }
+                }
+                if (frame_index >= 0) {
+                    dec.frame_index = frame_index;
+                    ctx.putImageData(frames[frame_index]['img'], 0, 0);
+                    setTimeout(next_frame, frames[frame_index]['duration']);
+                }
+            };
+
+            /* resize the canvas to the image size */
+            canvas.width = imageData.width;
+            canvas.height = imageData.height;
+
+            /* draw the image */
+            ctx.putImageData(imageData, 0, 0);
+
+            /* if it is an animation, add a timer to display the next frame */
+            if (frames.length > 1) {
+                dec.frame_index = 0;
+                dec.loop_counter = 0;
+                setTimeout(next_frame, frames[0]['duration']);
+            }
+        }).bind(dec, canvas, ctx);
+        dec.load(url);
+    }
+};
+
+/* end of dummy function enclosing all the emscripten code */
+})();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pre.js	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,25 @@
+/*
+ * BPG Javascript decoder
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+(function () {
+    var Module = {};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tmalloc.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,314 @@
+/*
+ * Tiny malloc
+ * 
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+#include <limits.h>
+#ifndef MALLOC_TEST
+#define NDEBUG
+#endif
+#include <assert.h>
+
+/*
+ * Note: only works for 32 bit pointers
+ */
+#define MALLOC_ALIGN 8
+#define MALLOC_BLOCK_SIZE 32
+
+#define STATE_FREE      0xaa
+#define STATE_ALLOCATED 0x55
+
+struct list_head {
+    struct list_head *prev, *next;
+};
+
+#define list_entry(el, type, member) \
+    ((type *)((uint8_t *)(el) - offsetof(type, member)))
+
+/* Note: the 'state' byte is stored just before the MemBlock header,
+   so at most 23 bytes can be allocated in a single block. */
+typedef struct MemBlock {
+    struct list_head link;
+    union {
+        uint8_t data[0] __attribute((aligned(MALLOC_ALIGN)));
+        struct list_head free_link;
+    } u;
+} MemBlock;
+
+void *sbrk(intptr_t increment);
+
+/* Invariants: the last block is always a free block. The last free
+   block is always the last block. */
+static struct list_head free_list;
+static struct list_head block_list;
+static uint8_t *mem_top;
+
+/* insert 'el' after prev */
+static void list_add(struct list_head *el, struct list_head *prev)
+{
+    struct list_head *next = prev->next;
+    prev->next = el;
+    el->prev = prev;
+    el->next = next;
+    next->prev = el;
+}
+
+static void list_del(struct list_head *el)
+{
+    struct list_head *prev, *next;
+    prev = el->prev;
+    next = el->next;
+    prev->next = next;
+    next->prev = prev;
+}
+
+static size_t get_alloc_size(size_t size)
+{
+    size = offsetof(MemBlock, u.data) + size;
+    /* one more byte for the state byte from the next block */
+    size = (size + MALLOC_BLOCK_SIZE) & ~(MALLOC_BLOCK_SIZE - 1);
+    return size;
+}
+
+/* Note: this size includes the 'state' byte from the next block */
+static size_t get_block_size(MemBlock *p)
+{
+    uint8_t *end;
+    struct list_head *el;
+    el = p->link.next;
+    if (el == &block_list)
+        end = mem_top;
+    else
+        end = (uint8_t *)list_entry(el, MemBlock, link);
+    return end - (uint8_t *)p;
+}
+
+static inline void set_block_state(MemBlock *p, int state)
+{
+    ((uint8_t *)p)[-1] = state;
+}
+
+static inline int get_block_state(const MemBlock *p)
+{
+    return ((const uint8_t *)p)[-1];
+}
+
+void *malloc(size_t size)
+{
+    MemBlock *p, *p1;
+    struct list_head *el;
+    size_t block_size;
+
+    if (size == 0 || size > (INT_MAX - 2 * MALLOC_BLOCK_SIZE))
+        return NULL;
+    if (free_list.next == NULL) {
+        /* init */
+        p = sbrk(MALLOC_BLOCK_SIZE * 2);
+        if (p == (void *)-1)
+            return NULL;
+        
+        mem_top = sbrk(0);
+        free_list.prev = free_list.next = &free_list;
+        block_list.prev = block_list.next = &block_list;
+        p++;
+        set_block_state(p, STATE_FREE);
+        list_add(&p->link, &block_list);
+        list_add(&p->u.free_link, &free_list);
+    }
+
+    size = get_alloc_size(size);
+    el = free_list.next;
+    for(;;) {
+        p = list_entry(el, MemBlock, u.free_link);
+        assert(get_block_state(p) == STATE_FREE);
+        block_size = get_block_size(p);
+        if (size < block_size) {
+            goto done1;
+        } else if (el == free_list.prev) {
+            /* last free block: increase its size */
+            if (sbrk(size + MALLOC_BLOCK_SIZE - block_size) == (void *)-1)
+                return NULL;
+            mem_top = sbrk(0);
+        done1:
+            p1 = (MemBlock *)((uint8_t *)p + size);
+            list_add(&p1->link, &p->link);
+            list_add(&p1->u.free_link, &p->u.free_link);
+            set_block_state(p1, STATE_FREE);
+            list_del(&p->u.free_link);
+        done:
+            set_block_state(p, STATE_ALLOCATED);
+            return p->u.data;
+        } else if (size == block_size) {
+            list_del(&p->u.free_link);
+            goto done;
+        }
+        el = el->next;
+    }
+}
+
+void free(void *ptr)
+{
+    MemBlock *p, *p1;
+    struct list_head *el;
+
+    if (!ptr)
+        return;
+    p = (MemBlock *)((uint8_t *)ptr - offsetof(MemBlock, u.data));
+    assert(get_block_state(p) == STATE_ALLOCATED);
+
+    /* mark as free */
+    list_add(&p->u.free_link, &free_list);
+    set_block_state(p, STATE_FREE);
+
+    /* merge with previous free block if possible */
+    el = p->link.prev;
+    if (el != &block_list) {
+        p1 = list_entry(el, MemBlock, link);
+        if (get_block_state(p1) == STATE_FREE) {
+            list_del(&p->link);
+            list_del(&p->u.free_link);
+            p = p1;
+        }
+    }
+    /* merge with next block if possible */
+    el = p->link.next;
+    if (el != &block_list) {
+        p1 = list_entry(el, MemBlock, link);
+        if (get_block_state(p1) == STATE_FREE) {
+            list_del(&p1->link);
+            /* keep p in the same position in free_list as p1 */
+            list_del(&p->u.free_link);
+            list_add(&p->u.free_link, &p1->u.free_link);
+            list_del(&p1->u.free_link);
+        }
+    }
+}
+
+void *realloc(void *ptr, size_t size)
+{
+    MemBlock *p;
+    void *ptr1;
+    size_t size1;
+
+    if (ptr == NULL) {
+        return malloc(size);
+    } else if (size == 0) {
+        free(ptr);
+        return NULL;
+    } else {
+        p = (MemBlock *)((uint8_t *)ptr - offsetof(MemBlock, u.data));
+        assert(get_block_state(p) == STATE_ALLOCATED);
+        ptr1 = malloc(size);
+        if (!ptr1)
+            return NULL;
+        /* Note: never the last block so it is valid */
+        size1 = (uint8_t *)list_entry(p->link.next, MemBlock, link) - 
+            p->u.data - 1;
+        if (size < size1)
+            size1 = size;
+        memcpy(ptr1, ptr, size1);
+        free(ptr);
+        return ptr1;
+    }
+}
+
+#ifdef MALLOC_TEST
+static void malloc_check(void)
+{
+    MemBlock *p;
+    struct list_head *el;
+    int state;
+
+    for(el = block_list.next; el != &block_list; el = el->next) {
+        p = list_entry(el, MemBlock, link);
+        state = get_block_state(p);
+        assert(state == STATE_FREE || state == STATE_ALLOCATED);
+        if (el->next != &block_list)
+            assert(el->next > el);
+    }
+    for(el = free_list.next; el != &free_list; el = el->next) {
+        p = list_entry(el, MemBlock, u.free_link);
+        assert(get_block_state(p) == STATE_FREE);
+    }
+
+    /* check invariant */
+    el = free_list.prev;
+    if (el != &free_list) {
+        p = list_entry(el, MemBlock, u.free_link);
+        assert(&p->link == block_list.prev);
+    }
+}
+
+static void malloc_dump(void)
+{
+    MemBlock *p;
+    struct list_head *el;
+    
+    printf("blocks:\n");
+    for(el = block_list.next; el != &block_list; el = el->next) {
+        p = list_entry(el, MemBlock, link);
+        printf("block: %p next=%p free=%d size=%u\n", p, p->link.next, 
+               get_block_state(p) == STATE_FREE,
+               (unsigned int)get_block_size(p));
+    }
+    printf("free list:\n");
+    for(el = free_list.next; el != &free_list; el = el->next) {
+        p = list_entry(el, MemBlock, u.free_link);
+        printf("block: %p size=%u\n", p, (unsigned int)get_block_size(p));
+    }
+}
+
+int main(int argc, char **argv)
+{
+    int i, n, j, size;
+    void **tab;
+
+    n = 100;
+    tab = malloc(sizeof(void *) * n);
+    memset(tab, 0, n * sizeof(void *));
+
+    for(i = 0; i < n * 1000; i++) {
+        j = random() % n;
+
+        free(tab[j]);
+
+        malloc_check();
+
+        size = random() % 500;
+        tab[j] = malloc(size);
+        memset(tab[j], 0x11, size);
+
+        malloc_check();
+    }
+
+    malloc_dump();
+
+    for(i = 0; i < n; i++) {
+        free(tab[i]);
+    }
+    return 0;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/COPYING	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,343 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
+This program is also available under a commercial proprietary license.
+For more information, contact us at license @ x265.com.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/README.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,71 @@
+= Mandatory Prerequisites =
+
+* GCC, MSVC (9, 10, 11, 12), Xcode or Intel C/C++
+* CMake 2.8.8 or later http://www.cmake.org
+* On linux, ccmake is helpful, usually a package named cmake-curses-gui 
+
+Note: MSVC12 requires cmake 2.8.11 or later
+
+
+= Optional Prerequisites =
+
+1. Yasm 1.2.0 or later, to compile assembly primitives (performance)
+
+   For Windows, download the latest yasm executable
+   http://yasm.tortall.net/Download.html and copy the EXE into
+   C:\Windows or somewhere else in your %PATH% that a 32-bit app (cmake)
+   can find it. If it is not in the path, you must manually tell cmake
+   where to find it.  Note: you do not need the vsyasm packages, x265
+   does not use them.  You only need the yasm executable itself.
+
+   On Linux, the packaged yasm may be older than 1.2, in which case
+   so you will need get the latest source and build it yourself.
+
+   Once YASM is properly installed, run cmake to regenerate projects. If you
+   do not see the below line in the cmake output, YASM is not in the PATH.
+
+   -- Found Yasm 1.3.0 to build assembly primitives
+
+   Now build the encoder and run x265 -V:
+
+   x265 [info]: using cpu capabilities: MMX, SSE2, ...
+
+   If cpu capabilities line says 'none!', then the encoder was built
+   without yasm.
+
+2. VisualLeakDetector (Windows Only)
+
+   Download from https://vld.codeplex.com/releases and install. May need
+   to re-login in order for it to be in your %PATH%.  Cmake will find it
+   and enable leak detection in debug builds without any additional work.
+
+   If VisualLeakDetector is not installed, cmake will complain a bit, but
+   it is completely harmless.
+
+
+= Build Instructions Linux =
+
+1. Use cmake to generate Makefiles: cmake ../source
+2. Build x265:                      make
+
+  Or use our shell script which runs cmake then opens the curses GUI to
+  configure build options
+
+1. cd build/linux ; ./make-Makefiles.bash
+2. make
+
+
+= Build Instructions Windows =
+
+We recommend you use one of the make-solutions.bat files in the appropriate
+build/ sub-folder for your preferred compiler.  They will open the cmake-gui
+to configure build options, click configure until no more red options remain,
+then click generate and exit.  There should now be an x265.sln file in the
+same folder, open this in Visual Studio and build it.
+
+= Version number considerations =
+
+Note that cmake will update X265_VERSION each time cmake runs, if you are
+building out of a Mercurial source repository.  If you are building out of
+a release source package, the version will not change.  If Mercurial is not
+found, the version will be "unknown".
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/linux/make-Makefiles.bash	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3 @@
+#!/bin/bash
+# Run this from within a bash shell
+cmake -G "Unix Makefiles" ../../source && ccmake ../../source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/linux/multilib.sh	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+mkdir -p 8bit 10bit 12bit
+
+cd 12bit
+cmake ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+make ${MAKEFLAGS}
+
+cd ../10bit
+cmake ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+make ${MAKEFLAGS}
+
+cd ../8bit
+ln -sf ../10bit/libx265.a libx265_main10.a
+ln -sf ../12bit/libx265.a libx265_main12.a
+cmake ../../../source -DEXTRA_LIB="x265_main10.a;x265_main12.a" -DEXTRA_LINK_FLAGS=-L. -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+make ${MAKEFLAGS}
+
+# rename the 8bit library, then combine all three into libx265.a
+mv libx265.a libx265_main.a
+
+uname=`uname`
+if [ "$uname" = "Linux" ]
+then
+
+# On Linux, we use GNU ar to combine the static libraries together
+ar -M <<EOF
+CREATE libx265.a
+ADDLIB libx265_main.a
+ADDLIB libx265_main10.a
+ADDLIB libx265_main12.a
+SAVE
+END
+EOF
+
+else
+
+# Mac/BSD libtool
+libtool -static -o libx265.a libx265_main.a libx265_main10.a libx265_main12.a 2>/dev/null
+
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/msys/make-Makefiles.sh	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3 @@
+#!/bin/sh
+# Run this from within an MSYS bash shell
+cmake -G "MSYS Makefiles" ../../source && cmake-gui ../../source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/msys/make-x86_64-w64-mingw32-Makefiles.sh	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# This will generate a cross-compile environment, compiling an x86_64
+# Win64 target from a 32bit MinGW32 host environment.  If your MinGW
+# install is 64bit, you can use the native compiler batch file:
+# make-Makefiles.sh
+
+cmake -G "MSYS Makefiles" -DCMAKE_TOOLCHAIN_FILE=toolchain-x86_64-w64-mingw32.cmake ../../source && cmake-gui ../../source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/msys/multilib.sh	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+mkdir -p 8bit 10bit 12bit
+
+cd 12bit
+cmake -G "MSYS Makefiles" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+make ${MAKEFLAGS}
+cp libx265.a ../8bit/libx265_main12.a
+
+cd ../10bit
+cmake -G "MSYS Makefiles" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+make ${MAKEFLAGS}
+cp libx265.a ../8bit/libx265_main10.a
+
+cd ../8bit
+cmake -G "MSYS Makefiles" ../../../source -DEXTRA_LIB="x265_main10.a;x265_main12.a" -DEXTRA_LINK_FLAGS=-L. -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+make ${MAKEFLAGS}
+
+# rename the 8bit library, then combine all three into libx265.a using GNU ar
+mv libx265.a libx265_main.a
+
+ar -M <<EOF
+CREATE libx265.a
+ADDLIB libx265_main.a
+ADDLIB libx265_main10.a
+ADDLIB libx265_main12.a
+SAVE
+END
+EOF
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/msys/toolchain-mingw32.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,9 @@
+# cmake -DCMAKE_TOOLCHAIN_FILE=toolchain-mingw32.cmake
+# this one is important
+SET(CMAKE_SYSTEM_NAME Windows)
+
+# specify the cross compiler
+SET(CMAKE_C_COMPILER   i686-w64-mingw32-gcc)
+SET(CMAKE_CXX_COMPILER i686-w64-mingw32-g++)
+SET(CMAKE_RC_COMPILER i686-w64-mingw32-windres)
+SET(CMAKE_ASM_YASM_COMPILER yasm)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/msys/toolchain-x86_64-w64-mingw32.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+SET(CMAKE_SYSTEM_NAME Windows)
+SET(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc)
+SET(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++)
+SET(CMAKE_RC_COMPILER x86_64-w64-mingw32-windres)
+SET(CMAKE_RANLIB x86_64-w64-mingw32-ranlib)
+SET(CMAKE_ASM_YASM_COMPILER yasm)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc10-x86/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS100COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 10 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc10-x86/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 10" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc10-x86_64/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS100COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 10 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc10-x86_64/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 10 Win64" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc10-x86_64/multilib.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,44 @@
+@echo off
+if "%VS100COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 10 not detected"
+  exit 1
+)
+
+call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat"
+
+@mkdir 12bit
+@mkdir 10bit
+@mkdir 8bit
+
+@cd 12bit
+cmake -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main12.lib
+)
+
+@cd ..\10bit
+cmake -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main10.lib
+)
+
+@cd ..\8bit
+if not exist x265-static-main10.lib (
+  msg "%username%" "10bit build failed"
+  exit 1
+)
+if not exist x265-static-main12.lib (
+  msg "%username%" "12bit build failed"
+  exit 1
+)
+cmake -G "Visual Studio 10 Win64" ../../../source -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  :: combine static libraries (ignore warnings caused by winxp.cpp hacks)
+  move Release\x265-static.lib x265-static-main.lib
+  LIB.EXE /ignore:4006 /ignore:4221 /OUT:Release\x265-static.lib x265-static-main.lib x265-static-main10.lib x265-static-main12.lib
+)
+
+pause
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc11-x86/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS110COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 11 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS110COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc11-x86/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 11" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc11-x86_64/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS110COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 11 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS110COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc11-x86_64/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 11 Win64" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc11-x86_64/multilib.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,44 @@
+@echo off
+if "%VS110COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 11 not detected"
+  exit 1
+)
+
+call "%VS110COMNTOOLS%\..\..\VC\vcvarsall.bat"
+
+@mkdir 12bit
+@mkdir 10bit
+@mkdir 8bit
+
+@cd 12bit
+cmake -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main12.lib
+)
+
+@cd ..\10bit
+cmake -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main10.lib
+)
+
+@cd ..\8bit
+if not exist x265-static-main10.lib (
+  msg "%username%" "10bit build failed"
+  exit 1
+)
+if not exist x265-static-main12.lib (
+  msg "%username%" "12bit build failed"
+  exit 1
+)
+cmake -G "Visual Studio 11 Win64" ../../../source -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  :: combine static libraries (ignore warnings caused by winxp.cpp hacks)
+  move Release\x265-static.lib x265-static-main.lib
+  LIB.EXE /ignore:4006 /ignore:4221 /OUT:Release\x265-static.lib x265-static-main.lib x265-static-main10.lib x265-static-main12.lib
+)
+
+pause
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc12-x86/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS120COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 12 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc12-x86/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 12" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc12-x86_64/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS120COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 12 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc12-x86_64/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 12 Win64" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc12-x86_64/multilib.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,44 @@
+@echo off
+if "%VS120COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 12 not detected"
+  exit 1
+)
+
+call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat"
+
+@mkdir 12bit
+@mkdir 10bit
+@mkdir 8bit
+
+@cd 12bit
+cmake -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main12.lib
+)
+
+@cd ..\10bit
+cmake -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main10.lib
+)
+
+@cd ..\8bit
+if not exist x265-static-main10.lib (
+  msg "%username%" "10bit build failed"
+  exit 1
+)
+if not exist x265-static-main12.lib (
+  msg "%username%" "12bit build failed"
+  exit 1
+)
+cmake -G "Visual Studio 12 Win64" ../../../source -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  :: combine static libraries (ignore warnings caused by winxp.cpp hacks)
+  move Release\x265-static.lib x265-static-main.lib
+  LIB.EXE /ignore:4006 /ignore:4221 /OUT:Release\x265-static.lib x265-static-main.lib x265-static-main10.lib x265-static-main12.lib
+)
+
+pause
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc9-x86/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS90COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 9 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc9-x86/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 9 2008" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc9-x86_64/build-all.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+@echo off
+if "%VS90COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 9 not detected"
+  exit 1
+)
+if not exist x265.sln (
+  call make-solutions.bat
+)
+if exist x265.sln (
+  call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat"
+  MSBuild /property:Configuration="Release" x265.sln
+  MSBuild /property:Configuration="Debug" x265.sln
+  MSBuild /property:Configuration="RelWithDebInfo" x265.sln
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc9-x86_64/make-solutions.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,6 @@
+@echo off
+::
+:: run this batch file to create a Visual Studio solution file for this project.
+:: See the cmake documentation for other generator targets
+::
+cmake -G "Visual Studio 9 2008 Win64" ..\..\source && cmake-gui ..\..\source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/vc9-x86_64/multilib.bat	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,44 @@
+@echo off
+if "%VS90COMNTOOLS%" == "" (
+  msg "%username%" "Visual Studio 9 not detected"
+  exit 1
+)
+
+call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat"
+
+@mkdir 12bit
+@mkdir 10bit
+@mkdir 8bit
+
+@cd 12bit
+cmake -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main12.lib
+)
+
+@cd ..\10bit
+cmake -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  copy/y Release\x265-static.lib ..\8bit\x265-static-main10.lib
+)
+
+@cd ..\8bit
+if not exist x265-static-main10.lib (
+  msg "%username%" "10bit build failed"
+  exit 1
+)
+if not exist x265-static-main12.lib (
+  msg "%username%" "12bit build failed"
+  exit 1
+)
+cmake -G "Visual Studio 9 2008 Win64" ../../../source -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DLINKED_10BIT=ON -DLINKED_12BIT=ON
+if exist x265.sln (
+  MSBuild /property:Configuration="Release" x265.sln
+  :: combine static libraries (ignore warnings caused by winxp.cpp hacks)
+  move Release\x265-static.lib x265-static-main.lib
+  LIB.EXE /ignore:4006 /ignore:4221 /OUT:Release\x265-static.lib x265-static-main.lib x265-static-main10.lib x265-static-main12.lib
+)
+
+pause
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/build/xcode/make-project.sh	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2 @@
+#!/bin/sh
+cmake -G "Xcode" ../../source && ccmake ../../source
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/intra/intra-16x16.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,561 @@
+--- 16x16, Mode= 2 [F]---
+[ 0]: Fact= 0:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,            *
+[ 1]: Fact= 0:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,            *
+[ 2]: Fact= 0:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,            *
+[ 3]: Fact= 0:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,            *
+[ 4]: Fact= 0:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,            *
+[ 5]: Fact= 0:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,            *
+[ 6]: Fact= 0:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,            *
+[ 7]: Fact= 0:	 -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,            *
+[ 8]: Fact= 0:	-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,            *
+[ 9]: Fact= 0:	-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,            *
+[10]: Fact= 0:	-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,            *
+[11]: Fact= 0:	-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,            *
+[12]: Fact= 0:	-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,            *
+[13]: Fact= 0:	-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,            *
+[14]: Fact= 0:	-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,            *
+[15]: Fact= 0:	-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+--- 16x16, Mode= 3 [F]---
+[ 0]: Fact=26:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact=20:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 2]: Fact=14:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 3]: Fact= 8:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 4]: Fact= 2:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[ 5]: Fact=28:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[ 6]: Fact=22:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[ 7]: Fact=16:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[ 8]: Fact=10:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[ 9]: Fact= 4:	 -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[10]: Fact=30:	 -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[11]: Fact=24:	-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[12]: Fact=18:	-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[13]: Fact=12:	-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[14]: Fact= 6:	-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[15]: Fact= 0:	-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,            *
+--- 16x16, Mode= 4 [F]---
+[ 0]: Fact=21:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact=10:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 2]: Fact=31:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 3]: Fact=20:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 4]: Fact= 9:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 5]: Fact=30:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 6]: Fact=19:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[ 7]: Fact= 8:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[ 8]: Fact=29:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[ 9]: Fact=18:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[10]: Fact= 7:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[11]: Fact=28:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[12]: Fact=17:	 -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[13]: Fact= 6:	-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[14]: Fact=27:	-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[15]: Fact=16:	-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+--- 16x16, Mode= 5 [F]---
+[ 0]: Fact=17:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact= 2:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 2]: Fact=19:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 3]: Fact= 4:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 4]: Fact=21:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 5]: Fact= 6:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 6]: Fact=23:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 7]: Fact= 8:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[ 8]: Fact=25:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[ 9]: Fact=10:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[10]: Fact=27:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[11]: Fact=12:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[12]: Fact=29:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[13]: Fact=14:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[14]: Fact=31:	 -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[15]: Fact=16:	 -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+--- 16x16, Mode= 6 [F]---
+[ 0]: Fact=13:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact=26:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 2]: Fact= 7:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 3]: Fact=20:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 4]: Fact= 1:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 5]: Fact=14:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 6]: Fact=27:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 7]: Fact= 8:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 8]: Fact=21:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[ 9]: Fact= 2:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[10]: Fact=15:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[11]: Fact=28:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[12]: Fact= 9:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[13]: Fact=22:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[14]: Fact= 3:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[15]: Fact=16:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+--- 16x16, Mode= 7 [F]---
+[ 0]: Fact= 9:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact=18:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 2]: Fact=27:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 3]: Fact= 4:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 4]: Fact=13:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 5]: Fact=22:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 6]: Fact=31:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 7]: Fact= 8:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 8]: Fact=17:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[ 9]: Fact=26:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[10]: Fact= 3:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[11]: Fact=12:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[12]: Fact=21:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[13]: Fact=30:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[14]: Fact= 7:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[15]: Fact=16:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+--- 16x16, Mode= 8 [F]---
+[ 0]: Fact= 5:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact=10:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 2]: Fact=15:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 3]: Fact=20:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 4]: Fact=25:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 5]: Fact=30:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 6]: Fact= 3:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 7]: Fact= 8:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 8]: Fact=13:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[ 9]: Fact=18:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[10]: Fact=23:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[11]: Fact=28:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[12]: Fact= 1:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[13]: Fact= 6:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[14]: Fact=11:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[15]: Fact=16:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+--- 16x16, Mode= 9 [ ]---
+[ 0]: Fact= 2:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 1]: Fact= 4:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 2]: Fact= 6:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 3]: Fact= 8:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 4]: Fact=10:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 5]: Fact=12:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 6]: Fact=14:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 7]: Fact=16:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 8]: Fact=18:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[ 9]: Fact=20:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[10]: Fact=22:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[11]: Fact=24:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[12]: Fact=26:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[13]: Fact=28:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[14]: Fact=30:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[15]: Fact= 0:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,            *
+--- 16x16, Mode=10 [ ]---
+[ 0]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 1]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 2]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 3]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 4]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 5]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 6]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 7]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 8]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[ 9]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[10]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[11]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[12]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[13]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[14]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+[15]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,            *
+--- 16x16, Mode=11 [ ]---
+[ 0]: Fact=30:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=28:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 2]: Fact=26:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 3]: Fact=24:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 4]: Fact=22:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 5]: Fact=20:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 6]: Fact=18:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 7]: Fact=16:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 8]: Fact=14:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 9]: Fact=12:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[10]: Fact=10:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[11]: Fact= 8:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[12]: Fact= 6:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[13]: Fact= 4:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[14]: Fact= 2:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[15]: Fact= 0:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,            *
+--- 16x16, Mode=12 [F]---
+[ 0]: Fact=27:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=22:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 2]: Fact=17:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 3]: Fact=12:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 4]: Fact= 7:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 5]: Fact= 2:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 6]: Fact=29:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 7]: Fact=24:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 8]: Fact=19:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 9]: Fact=14:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[10]: Fact= 9:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[11]: Fact= 4:	  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[12]: Fact=31:	 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[13]: Fact=26:	 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[14]: Fact=21:	 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[15]: Fact=16:	 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+--- 16x16, Mode=13 [F]---
+[ 0]: Fact=23:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=14:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 2]: Fact= 5:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 3]: Fact=28:	  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 4]: Fact=19:	  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 5]: Fact=10:	  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 6]: Fact= 1:	  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 7]: Fact=24:	  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 8]: Fact=15:	  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 9]: Fact= 6:	  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[10]: Fact=29:	 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[11]: Fact=20:	 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[12]: Fact=11:	 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[13]: Fact= 2:	 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[14]: Fact=25:	 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[15]: Fact=16:	 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+--- 16x16, Mode=14 [F]---
+[ 0]: Fact=19:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact= 6:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 2]: Fact=25:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 3]: Fact=12:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 4]: Fact=31:	  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 5]: Fact=18:	  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 6]: Fact= 5:	  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 7]: Fact=24:	  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 8]: Fact=11:	  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 9]: Fact=30:	 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[10]: Fact=17:	 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[11]: Fact= 4:	 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[12]: Fact=23:	 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[13]: Fact=10:	 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[14]: Fact=29:	 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[15]: Fact=16:	 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+--- 16x16, Mode=15 [F]---
+[ 0]: Fact=15:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=30:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 2]: Fact=13:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 3]: Fact=28:	  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 4]: Fact=11:	  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 5]: Fact=26:	  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 6]: Fact= 9:	  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 7]: Fact=24:	  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 8]: Fact= 7:	  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 9]: Fact=22:	  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[10]: Fact= 5:	  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[11]: Fact=20:	 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[12]: Fact= 3:	 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[13]: Fact=18:	 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[14]: Fact= 1:	 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[15]: Fact=16:	 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+--- 16x16, Mode=16 [F]---
+[ 0]: Fact=11:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=22:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 2]: Fact= 1:	  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 3]: Fact=12:	  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 4]: Fact=23:	  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 5]: Fact= 2:	  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 6]: Fact=13:	  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 7]: Fact=24:	  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 8]: Fact= 3:	  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 9]: Fact=14:	  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[10]: Fact=25:	 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[11]: Fact= 4:	 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[12]: Fact=15:	 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[13]: Fact=26:	 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[14]: Fact= 5:	 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[15]: Fact=16:	 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6,        *
+--- 16x16, Mode=17 [F]---
+[ 0]: Fact= 6:	  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[ 1]: Fact=12:	  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[ 2]: Fact=18:	  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 3]: Fact=24:	  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 4]: Fact=30:	  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 5]: Fact= 4:	  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 6]: Fact=10:	  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 7]: Fact=16:	  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 8]: Fact=22:	  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 9]: Fact=28:	 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[10]: Fact= 2:	 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[11]: Fact= 8:	 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[12]: Fact=14:	 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6,        *
+[13]: Fact=20:	 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5,        *
+[14]: Fact=26:	 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4,        *
+[15]: Fact= 0:	 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3,            *
+--- 16x16, Mode=18 [F]---
+[ 0]: Fact= 0:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,            *
+[ 1]: Fact= 0:	 -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,            *
+[ 2]: Fact= 0:	 -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,            *
+[ 3]: Fact= 0:	 -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,            *
+[ 4]: Fact= 0:	 -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,            *
+[ 5]: Fact= 0:	 -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,            *
+[ 6]: Fact= 0:	 -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,            *
+[ 7]: Fact= 0:	 -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 8]: Fact= 0:	 -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,            *
+[ 9]: Fact= 0:	 -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,            *
+[10]: Fact= 0:	-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,            *
+[11]: Fact= 0:	-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,            *
+[12]: Fact= 0:	-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,            *
+[13]: Fact= 0:	-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,            *
+[14]: Fact= 0:	-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,            *
+[15]: Fact= 0:	-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,            *
+--- 16x16, Mode=19 [F]---
+[ 0]: Fact= 6:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=12:	 -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 2]: Fact=18:	 -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 3]: Fact=24:	 -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 4]: Fact=30:	 -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 5]: Fact= 4:	 -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 6]: Fact=10:	 -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 7]: Fact=16:	 -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 8]: Fact=22:	 -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 9]: Fact=28:	-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[10]: Fact= 2:	-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[11]: Fact= 8:	-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[12]: Fact=14:	-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,        *
+[13]: Fact=20:	-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,        *
+[14]: Fact=26:	-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,        *
+[15]: Fact= 0:	-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,            *
+--- 16x16, Mode=20 [F]---
+[ 0]: Fact=11:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=22:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 2]: Fact= 1:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 3]: Fact=12:	 -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 4]: Fact=23:	 -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 5]: Fact= 2:	 -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 6]: Fact=13:	 -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 7]: Fact=24:	 -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 8]: Fact= 3:	 -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 9]: Fact=14:	 -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[10]: Fact=25:	-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[11]: Fact= 4:	-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[12]: Fact=15:	-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[13]: Fact=26:	-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[14]: Fact= 5:	-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[15]: Fact=16:	-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,        *
+--- 16x16, Mode=21 [F]---
+[ 0]: Fact=15:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=30:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 2]: Fact=13:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 3]: Fact=28:	 -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 4]: Fact=11:	 -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 5]: Fact=26:	 -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 6]: Fact= 9:	 -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 7]: Fact=24:	 -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 8]: Fact= 7:	 -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 9]: Fact=22:	 -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[10]: Fact= 5:	 -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[11]: Fact=20:	-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[12]: Fact= 3:	-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[13]: Fact=18:	-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[14]: Fact= 1:	-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[15]: Fact=16:	-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+--- 16x16, Mode=22 [F]---
+[ 0]: Fact=19:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact= 6:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 2]: Fact=25:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 3]: Fact=12:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 4]: Fact=31:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 5]: Fact=18:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 6]: Fact= 5:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 7]: Fact=24:	 -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 8]: Fact=11:	 -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 9]: Fact=30:	-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[10]: Fact=17:	-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[11]: Fact= 4:	-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[12]: Fact=23:	-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[13]: Fact=10:	-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[14]: Fact=29:	-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[15]: Fact=16:	-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+--- 16x16, Mode=23 [F]---
+[ 0]: Fact=23:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=14:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 2]: Fact= 5:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 3]: Fact=28:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 4]: Fact=19:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 5]: Fact=10:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 6]: Fact= 1:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 7]: Fact=24:	 -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 8]: Fact=15:	 -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 9]: Fact= 6:	 -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[10]: Fact=29:	-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[11]: Fact=20:	-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[12]: Fact=11:	-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[13]: Fact= 2:	-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[14]: Fact=25:	-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[15]: Fact=16:	-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+--- 16x16, Mode=24 [F]---
+[ 0]: Fact=27:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=22:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 2]: Fact=17:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 3]: Fact=12:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 4]: Fact= 7:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 5]: Fact= 2:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 6]: Fact=29:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 7]: Fact=24:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 8]: Fact=19:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[ 9]: Fact=14:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[10]: Fact= 9:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[11]: Fact= 4:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+[12]: Fact=31:	-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[13]: Fact=26:	-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[14]: Fact=21:	-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[15]: Fact=16:	-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+--- 16x16, Mode=25 [ ]---
+[ 0]: Fact=30:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 1]: Fact=28:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 2]: Fact=26:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 3]: Fact=24:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 4]: Fact=22:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 5]: Fact=20:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 6]: Fact=18:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 7]: Fact=16:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 8]: Fact=14:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[ 9]: Fact=12:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[10]: Fact=10:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[11]: Fact= 8:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[12]: Fact= 6:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[13]: Fact= 4:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[14]: Fact= 2:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,        *
+[15]: Fact= 0:	  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,            *
+--- 16x16, Mode=26 [ ]---
+[ 0]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 1]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 2]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 3]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 4]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 5]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 6]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 7]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 8]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[ 9]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[10]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[11]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[12]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[13]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[14]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[15]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+--- 16x16, Mode=27 [ ]---
+[ 0]: Fact= 2:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact= 4:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 2]: Fact= 6:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 3]: Fact= 8:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 4]: Fact=10:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 5]: Fact=12:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 6]: Fact=14:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 7]: Fact=16:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 8]: Fact=18:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 9]: Fact=20:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[10]: Fact=22:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[11]: Fact=24:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[12]: Fact=26:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[13]: Fact=28:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[14]: Fact=30:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[15]: Fact= 0:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,            *
+--- 16x16, Mode=28 [F]---
+[ 0]: Fact= 5:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact=10:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 2]: Fact=15:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 3]: Fact=20:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 4]: Fact=25:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 5]: Fact=30:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 6]: Fact= 3:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 7]: Fact= 8:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 8]: Fact=13:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 9]: Fact=18:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[10]: Fact=23:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[11]: Fact=28:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[12]: Fact= 1:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[13]: Fact= 6:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[14]: Fact=11:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[15]: Fact=16:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+--- 16x16, Mode=29 [F]---
+[ 0]: Fact= 9:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact=18:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 2]: Fact=27:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 3]: Fact= 4:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 4]: Fact=13:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 5]: Fact=22:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 6]: Fact=31:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 7]: Fact= 8:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 8]: Fact=17:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 9]: Fact=26:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[10]: Fact= 3:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[11]: Fact=12:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[12]: Fact=21:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[13]: Fact=30:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[14]: Fact= 7:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[15]: Fact=16:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+--- 16x16, Mode=30 [F]---
+[ 0]: Fact=13:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact=26:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 2]: Fact= 7:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 3]: Fact=20:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 4]: Fact= 1:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 5]: Fact=14:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 6]: Fact=27:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 7]: Fact= 8:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 8]: Fact=21:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 9]: Fact= 2:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[10]: Fact=15:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[11]: Fact=28:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[12]: Fact= 9:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[13]: Fact=22:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[14]: Fact= 3:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+[15]: Fact=16:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+--- 16x16, Mode=31 [F]---
+[ 0]: Fact=17:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact= 2:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 2]: Fact=19:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 3]: Fact= 4:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 4]: Fact=21:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 5]: Fact= 6:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 6]: Fact=23:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 7]: Fact= 8:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[ 8]: Fact=25:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[ 9]: Fact=10:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[10]: Fact=27:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[11]: Fact=12:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+[12]: Fact=29:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+[13]: Fact=14:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,        *
+[14]: Fact=31:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,        *
+[15]: Fact=16:	  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,        *
+--- 16x16, Mode=32 [F]---
+[ 0]: Fact=21:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact=10:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 2]: Fact=31:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 3]: Fact=20:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 4]: Fact= 9:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 5]: Fact=30:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 6]: Fact=19:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[ 7]: Fact= 8:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[ 8]: Fact=29:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[ 9]: Fact=18:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+[10]: Fact= 7:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,        *
+[11]: Fact=28:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,        *
+[12]: Fact=17:	  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,        *
+[13]: Fact= 6:	 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,        *
+[14]: Fact=27:	 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,        *
+[15]: Fact=16:	 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,        *
+--- 16x16, Mode=33 [F]---
+[ 0]: Fact=26:	  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,        *
+[ 1]: Fact=20:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,        *
+[ 2]: Fact=14:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,        *
+[ 3]: Fact= 8:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,        *
+[ 4]: Fact= 2:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[ 5]: Fact=28:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,        *
+[ 6]: Fact=22:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,        *
+[ 7]: Fact=16:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,        *
+[ 8]: Fact=10:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,        *
+[ 9]: Fact= 4:	  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,        *
+[10]: Fact=30:	  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,        *
+[11]: Fact=24:	 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,        *
+[12]: Fact=18:	 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,        *
+[13]: Fact=12:	 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,        *
+[14]: Fact= 6:	 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,        *
+[15]: Fact= 0:	 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,            *
+--- 16x16, Mode=34 [F]---
+[ 0]: Fact= 0:	  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,            *
+[ 1]: Fact= 0:	  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,            *
+[ 2]: Fact= 0:	  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,            *
+[ 3]: Fact= 0:	  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,            *
+[ 4]: Fact= 0:	  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,            *
+[ 5]: Fact= 0:	  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,            *
+[ 6]: Fact= 0:	  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,            *
+[ 7]: Fact= 0:	  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,            *
+[ 8]: Fact= 0:	 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,            *
+[ 9]: Fact= 0:	 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,            *
+[10]: Fact= 0:	 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,            *
+[11]: Fact= 0:	 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,            *
+[12]: Fact= 0:	 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,            *
+[13]: Fact= 0:	 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,            *
+[14]: Fact= 0:	 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,            *
+[15]: Fact= 0:	 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/intra/intra-32x32.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1089 @@
+--- 32x32, Mode= 2 [F]---
+[ 0]: Fact= 0:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,            *
+[ 1]: Fact= 0:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,            *
+[ 2]: Fact= 0:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,            *
+[ 3]: Fact= 0:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,            *
+[ 4]: Fact= 0:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,            *
+[ 5]: Fact= 0:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,            *
+[ 6]: Fact= 0:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,            *
+[ 7]: Fact= 0:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,            *
+[ 8]: Fact= 0:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,            *
+[ 9]: Fact= 0:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,            *
+[10]: Fact= 0:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,            *
+[11]: Fact= 0:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,            *
+[12]: Fact= 0:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,            *
+[13]: Fact= 0:  -15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,            *
+[14]: Fact= 0:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,            *
+[15]: Fact= 0:  -17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,            *
+[16]: Fact= 0:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,            *
+[17]: Fact= 0:  -19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,            *
+[18]: Fact= 0:  -20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,            *
+[19]: Fact= 0:  -21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,            *
+[20]: Fact= 0:  -22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,            *
+[21]: Fact= 0:  -23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,            *
+[22]: Fact= 0:  -24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,            *
+[23]: Fact= 0:  -25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,            *
+[24]: Fact= 0:  -26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,            *
+[25]: Fact= 0:  -27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,            *
+[26]: Fact= 0:  -28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,            *
+[27]: Fact= 0:  -29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,            *
+[28]: Fact= 0:  -30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,            *
+[29]: Fact= 0:  -31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,            *
+[30]: Fact= 0:  -32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,            *
+[31]: Fact= 0:  -33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,            *
+--- 32x32, Mode= 3 [F]---
+[ 0]: Fact=26:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact=20:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 2]: Fact=14:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 3]: Fact= 8:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 4]: Fact= 2:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[ 5]: Fact=28:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[ 6]: Fact=22:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[ 7]: Fact=16:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[ 8]: Fact=10:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[ 9]: Fact= 4:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[10]: Fact=30:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[11]: Fact=24:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[12]: Fact=18:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[13]: Fact=12:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[14]: Fact= 6:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[15]: Fact= 0:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,            *
+[16]: Fact=26:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,        *
+[17]: Fact=20:  -15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,        *
+[18]: Fact=14:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,        *
+[19]: Fact= 8:  -17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,        *
+[20]: Fact= 2:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,        *
+[21]: Fact=28:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,        *
+[22]: Fact=22:  -19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,        *
+[23]: Fact=16:  -20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,        *
+[24]: Fact=10:  -21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,        *
+[25]: Fact= 4:  -22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,        *
+[26]: Fact=30:  -22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,        *
+[27]: Fact=24:  -23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,        *
+[28]: Fact=18:  -24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,        *
+[29]: Fact=12:  -25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,        *
+[30]: Fact= 6:  -26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,        *
+[31]: Fact= 0:  -27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,
+--- 32x32, Mode= 4 [F]---
+[ 0]: Fact=21:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact=10:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 2]: Fact=31:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 3]: Fact=20:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 4]: Fact= 9:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 5]: Fact=30:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 6]: Fact=19:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[ 7]: Fact= 8:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[ 8]: Fact=29:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[ 9]: Fact=18:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[10]: Fact= 7:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[11]: Fact=28:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[12]: Fact=17:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[13]: Fact= 6:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[14]: Fact=27:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[15]: Fact=16:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[16]: Fact= 5:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[17]: Fact=26:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[18]: Fact=15:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[19]: Fact= 4:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,        *
+[20]: Fact=25:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,        *
+[21]: Fact=14:  -15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,        *
+[22]: Fact= 3:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,        *
+[23]: Fact=24:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,        *
+[24]: Fact=13:  -17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,        *
+[25]: Fact= 2:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,        *
+[26]: Fact=23:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,        *
+[27]: Fact=12:  -19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,        *
+[28]: Fact= 1:  -20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,        *
+[29]: Fact=22:  -20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,        *
+[30]: Fact=11:  -21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,        *
+[31]: Fact= 0:  -22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,            *
+--- 32x32, Mode= 5 [F]---
+[ 0]: Fact=17:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact= 2:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 2]: Fact=19:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 3]: Fact= 4:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 4]: Fact=21:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 5]: Fact= 6:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 6]: Fact=23:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 7]: Fact= 8:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[ 8]: Fact=25:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[ 9]: Fact=10:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[10]: Fact=27:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[11]: Fact=12:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[12]: Fact=29:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[13]: Fact=14:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[14]: Fact=31:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[15]: Fact=16:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[16]: Fact= 1:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[17]: Fact=18:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[18]: Fact= 3:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[19]: Fact=20:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[20]: Fact= 5:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[21]: Fact=22:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[22]: Fact= 7:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[23]: Fact=24:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[24]: Fact= 9:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,        *
+[25]: Fact=26:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,        *
+[26]: Fact=11:  -15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,        *
+[27]: Fact=28:  -15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,        *
+[28]: Fact=13:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,        *
+[29]: Fact=30:  -16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,        *
+[30]: Fact=15:  -17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,        *
+[31]: Fact= 0:  -18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,            *
+--- 32x32, Mode= 6 [F]---
+[ 0]: Fact=13:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact=26:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 2]: Fact= 7:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 3]: Fact=20:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 4]: Fact= 1:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 5]: Fact=14:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 6]: Fact=27:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 7]: Fact= 8:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 8]: Fact=21:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[ 9]: Fact= 2:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[10]: Fact=15:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[11]: Fact=28:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[12]: Fact= 9:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[13]: Fact=22:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[14]: Fact= 3:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[15]: Fact=16:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[16]: Fact=29:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[17]: Fact=10:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[18]: Fact=23:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[19]: Fact= 4:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[20]: Fact=17:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[21]: Fact=30:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[22]: Fact=11:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[23]: Fact=24:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,        *
+[24]: Fact= 5:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[25]: Fact=18:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[26]: Fact=31:  -11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,        *
+[27]: Fact=12:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[28]: Fact=25:  -12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,        *
+[29]: Fact= 6:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[30]: Fact=19:  -13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,        *
+[31]: Fact= 0:  -14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,            *
+--- 32x32, Mode= 7 [F]---
+[ 0]: Fact= 9:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact=18:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 2]: Fact=27:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 3]: Fact= 4:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 4]: Fact=13:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 5]: Fact=22:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 6]: Fact=31:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 7]: Fact= 8:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 8]: Fact=17:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[ 9]: Fact=26:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[10]: Fact= 3:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[11]: Fact=12:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[12]: Fact=21:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[13]: Fact=30:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[14]: Fact= 7:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[15]: Fact=16:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[16]: Fact=25:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[17]: Fact= 2:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[18]: Fact=11:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[19]: Fact=20:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[20]: Fact=29:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,        *
+[21]: Fact= 6:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[22]: Fact=15:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[23]: Fact=24:   -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,        *
+[24]: Fact= 1:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[25]: Fact=10:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[26]: Fact=19:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[27]: Fact=28:   -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,        *
+[28]: Fact= 5:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[29]: Fact=14:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[30]: Fact=23:   -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,        *
+[31]: Fact= 0:  -10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,            *
+--- 32x32, Mode= 8 [F]---
+[ 0]: Fact= 5:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact=10:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 2]: Fact=15:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 3]: Fact=20:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 4]: Fact=25:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 5]: Fact=30:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 6]: Fact= 3:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 7]: Fact= 8:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 8]: Fact=13:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[ 9]: Fact=18:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[10]: Fact=23:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[11]: Fact=28:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[12]: Fact= 1:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[13]: Fact= 6:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[14]: Fact=11:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[15]: Fact=16:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[16]: Fact=21:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[17]: Fact=26:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[18]: Fact=31:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,        *
+[19]: Fact= 4:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[20]: Fact= 9:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[21]: Fact=14:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[22]: Fact=19:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[23]: Fact=24:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[24]: Fact=29:   -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,        *
+[25]: Fact= 2:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[26]: Fact= 7:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[27]: Fact=12:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[28]: Fact=17:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[29]: Fact=22:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[30]: Fact=27:   -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,        *
+[31]: Fact= 0:   -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,            *
+--- 32x32, Mode= 9 [F]---
+[ 0]: Fact= 2:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 1]: Fact= 4:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 2]: Fact= 6:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 3]: Fact= 8:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 4]: Fact=10:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 5]: Fact=12:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 6]: Fact=14:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 7]: Fact=16:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 8]: Fact=18:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[ 9]: Fact=20:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[10]: Fact=22:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[11]: Fact=24:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[12]: Fact=26:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[13]: Fact=28:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[14]: Fact=30:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,        *
+[15]: Fact= 0:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,            *
+[16]: Fact= 2:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[17]: Fact= 4:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[18]: Fact= 6:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[19]: Fact= 8:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[20]: Fact=10:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[21]: Fact=12:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[22]: Fact=14:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[23]: Fact=16:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[24]: Fact=18:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[25]: Fact=20:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[26]: Fact=22:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[27]: Fact=24:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[28]: Fact=26:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[29]: Fact=28:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[30]: Fact=30:   -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,        *
+[31]: Fact= 0:   -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,            *
+--- 32x32, Mode=10 [ ]---
+[ 0]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 1]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 2]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 3]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 4]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 5]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 6]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 7]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 8]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[ 9]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[10]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[11]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[12]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[13]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[14]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[15]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[16]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[17]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[18]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[19]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[20]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[21]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[22]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[23]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[24]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[25]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[26]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[27]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[28]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[29]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[30]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+[31]: Fact= 0:   -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,            *
+--- 32x32, Mode=11 [F]---
+[ 0]: Fact=30:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=28:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 2]: Fact=26:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 3]: Fact=24:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 4]: Fact=22:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 5]: Fact=20:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 6]: Fact=18:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 7]: Fact=16:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 8]: Fact=14:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 9]: Fact=12:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[10]: Fact=10:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[11]: Fact= 8:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[12]: Fact= 6:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[13]: Fact= 4:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[14]: Fact= 2:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[15]: Fact= 0:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,            *
+[16]: Fact=30:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[17]: Fact=28:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[18]: Fact=26:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[19]: Fact=24:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[20]: Fact=22:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[21]: Fact=20:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[22]: Fact=18:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[23]: Fact=16:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[24]: Fact=14:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[25]: Fact=12:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[26]: Fact=10:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[27]: Fact= 8:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[28]: Fact= 6:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[29]: Fact= 4:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[30]: Fact= 2:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[31]: Fact= 0:   16,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,            *
+--- 32x32, Mode=12 [F]---
+[ 0]: Fact=27:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=22:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 2]: Fact=17:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 3]: Fact=12:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 4]: Fact= 7:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 5]: Fact= 2:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 6]: Fact=29:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 7]: Fact=24:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 8]: Fact=19:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 9]: Fact=14:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[10]: Fact= 9:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[11]: Fact= 4:    6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[12]: Fact=31:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[13]: Fact=26:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[14]: Fact=21:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[15]: Fact=16:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[16]: Fact=11:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[17]: Fact= 6:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[18]: Fact= 1:   13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[19]: Fact=28:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[20]: Fact=23:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[21]: Fact=18:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[22]: Fact=13:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[23]: Fact= 8:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[24]: Fact= 3:   19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[25]: Fact=30:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[26]: Fact=25:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[27]: Fact=20:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[28]: Fact=15:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[29]: Fact=10:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[30]: Fact= 5:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[31]: Fact= 0:   26, 19, 13,  6,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,            *
+--- 32x32, Mode=13 [F]---
+[ 0]: Fact=23:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=14:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 2]: Fact= 5:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 3]: Fact=28:    4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 4]: Fact=19:    4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 5]: Fact=10:    4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 6]: Fact= 1:    4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 7]: Fact=24:    7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 8]: Fact=15:    7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 9]: Fact= 6:    7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[10]: Fact=29:   11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[11]: Fact=20:   11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[12]: Fact=11:   11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[13]: Fact= 2:   11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[14]: Fact=25:   14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[15]: Fact=16:   14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[16]: Fact= 7:   14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[17]: Fact=30:   18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[18]: Fact=21:   18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[19]: Fact=12:   18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[20]: Fact= 3:   18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[21]: Fact=26:   21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[22]: Fact=17:   21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[23]: Fact= 8:   21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[24]: Fact=31:   25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[25]: Fact=22:   25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[26]: Fact=13:   25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[27]: Fact= 4:   25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[28]: Fact=27:   28, 25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[29]: Fact=18:   28, 25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[30]: Fact= 9:   28, 25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[31]: Fact= 0:   28, 25, 21, 18, 14, 11,  7,  4,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,            *
+--- 32x32, Mode=14 [F]---
+[ 0]: Fact=19:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact= 6:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 2]: Fact=25:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 3]: Fact=12:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 4]: Fact=31:    5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 5]: Fact=18:    5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 6]: Fact= 5:    5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 7]: Fact=24:    7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 8]: Fact=11:    7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 9]: Fact=30:   10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[10]: Fact=17:   10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[11]: Fact= 4:   10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[12]: Fact=23:   12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[13]: Fact=10:   12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[14]: Fact=29:   15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[15]: Fact=16:   15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[16]: Fact= 3:   15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[17]: Fact=22:   17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[18]: Fact= 9:   17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[19]: Fact=28:   20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[20]: Fact=15:   20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[21]: Fact= 2:   20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[22]: Fact=21:   22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[23]: Fact= 8:   22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[24]: Fact=27:   25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[25]: Fact=14:   25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[26]: Fact= 1:   25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[27]: Fact=20:   27, 25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[28]: Fact= 7:   27, 25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[29]: Fact=26:   30, 27, 25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[30]: Fact=13:   30, 27, 25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[31]: Fact= 0:   30, 27, 25, 22, 20, 17, 15, 12, 10,  7,  5,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,            *
+--- 32x32, Mode=15 [F]---
+[ 0]: Fact=15:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=30:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 2]: Fact=13:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 3]: Fact=28:    4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 4]: Fact=11:    4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 5]: Fact=26:    6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 6]: Fact= 9:    6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 7]: Fact=24:    8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[ 8]: Fact= 7:    8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[ 9]: Fact=22:    9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[10]: Fact= 5:    9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[11]: Fact=20:   11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[12]: Fact= 3:   11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[13]: Fact=18:   13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[14]: Fact= 1:   13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[15]: Fact=16:   15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[16]: Fact=31:   17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[17]: Fact=14:   17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[18]: Fact=29:   19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[19]: Fact=12:   19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[20]: Fact=27:   21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[21]: Fact=10:   21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[22]: Fact=25:   23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[23]: Fact= 8:   23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[24]: Fact=23:   24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[25]: Fact= 6:   24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[26]: Fact=21:   26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[27]: Fact= 4:   26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[28]: Fact=19:   28, 26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[29]: Fact= 2:   28, 26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[30]: Fact=17:   30, 28, 26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[31]: Fact= 0:   30, 28, 26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,            *
+--- 32x32, Mode=16 [F]---
+[ 0]: Fact=11:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=22:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 2]: Fact= 1:    2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 3]: Fact=12:    3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 4]: Fact=23:    5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 5]: Fact= 2:    5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 6]: Fact=13:    6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[ 7]: Fact=24:    8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[ 8]: Fact= 3:    8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[ 9]: Fact=14:    9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[10]: Fact=25:   11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[11]: Fact= 4:   11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[12]: Fact=15:   12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[13]: Fact=26:   14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[14]: Fact= 5:   14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[15]: Fact=16:   15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[16]: Fact=27:   17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[17]: Fact= 6:   17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[18]: Fact=17:   18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[19]: Fact=28:   20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[20]: Fact= 7:   20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[21]: Fact=18:   21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[22]: Fact=29:   23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[23]: Fact= 8:   23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[24]: Fact=19:   24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[25]: Fact=30:   26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[26]: Fact= 9:   26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[27]: Fact=20:   27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[28]: Fact=31:   29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[29]: Fact=10:   29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[30]: Fact=21:   30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[31]: Fact= 0:   30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,            *
+--- 32x32, Mode=17 [F]---
+[ 0]: Fact= 6:    0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,        *
+[ 1]: Fact=12:    1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,        *
+[ 2]: Fact=18:    2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,        *
+[ 3]: Fact=24:    4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,        *
+[ 4]: Fact=30:    5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[ 5]: Fact= 4:    5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,        *
+[ 6]: Fact=10:    6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,        *
+[ 7]: Fact=16:    7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,        *
+[ 8]: Fact=22:    9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,        *
+[ 9]: Fact=28:   10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[10]: Fact= 2:   10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,        *
+[11]: Fact= 8:   11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,        *
+[12]: Fact=14:   12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,        *
+[13]: Fact=20:   14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,        *
+[14]: Fact=26:   15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,        *
+[15]: Fact= 0:   15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,            *
+[16]: Fact= 6:   16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,        *
+[17]: Fact=12:   17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,-18,        *
+[18]: Fact=18:   18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,-17,        *
+[19]: Fact=24:   20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,-16,        *
+[20]: Fact=30:   21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[21]: Fact= 4:   21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+[22]: Fact=10:   22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[23]: Fact=16:   23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[24]: Fact=22:   25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[25]: Fact=28:   26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[26]: Fact= 2:   26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[27]: Fact= 8:   27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[28]: Fact=14:   28, 27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[29]: Fact=20:   30, 28, 27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[30]: Fact=26:   31, 30, 28, 27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[31]: Fact= 0:   31, 30, 28, 27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2, -3, -4, -5, -6,            *
+--- 32x32, Mode=18 [F]---
+[ 0]: Fact= 0:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,            *
+[ 1]: Fact= 0:   -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,            *
+[ 2]: Fact= 0:   -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,            *
+[ 3]: Fact= 0:   -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,            *
+[ 4]: Fact= 0:   -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,            *
+[ 5]: Fact= 0:   -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,            *
+[ 6]: Fact= 0:   -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,            *
+[ 7]: Fact= 0:   -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,            *
+[ 8]: Fact= 0:   -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,            *
+[ 9]: Fact= 0:   -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,            *
+[10]: Fact= 0:  -10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,            *
+[11]: Fact= 0:  -11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,            *
+[12]: Fact= 0:  -12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,            *
+[13]: Fact= 0:  -13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,            *
+[14]: Fact= 0:  -14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,            *
+[15]: Fact= 0:  -15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,            *
+[16]: Fact= 0:  -16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,            *
+[17]: Fact= 0:  -17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,            *
+[18]: Fact= 0:  -18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,            *
+[19]: Fact= 0:  -19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,            *
+[20]: Fact= 0:  -20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,            *
+[21]: Fact= 0:  -21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,            *
+[22]: Fact= 0:  -22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,            *
+[23]: Fact= 0:  -23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,            *
+[24]: Fact= 0:  -24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,            *
+[25]: Fact= 0:  -25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,  6,            *
+[26]: Fact= 0:  -26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5,            *
+[27]: Fact= 0:  -27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,  4,            *
+[28]: Fact= 0:  -28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,  3,            *
+[29]: Fact= 0:  -29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,  2,            *
+[30]: Fact= 0:  -30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,  1,            *
+[31]: Fact= 0:  -31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10, -9, -8, -7, -6, -5, -4, -3, -2, -1,  0,            *
+--- 32x32, Mode=19 [F]---
+[ 0]: Fact= 6:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=12:   -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 2]: Fact=18:   -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 3]: Fact=24:   -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 4]: Fact=30:   -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[ 5]: Fact= 4:   -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[ 6]: Fact=10:   -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[ 7]: Fact=16:   -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[ 8]: Fact=22:   -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[ 9]: Fact=28:  -10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[10]: Fact= 2:  -10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[11]: Fact= 8:  -11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[12]: Fact=14:  -12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[13]: Fact=20:  -14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[14]: Fact=26:  -15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[15]: Fact= 0:  -15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[16]: Fact= 6:  -16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[17]: Fact=12:  -17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+[18]: Fact=18:  -18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+[19]: Fact=24:  -20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
+[20]: Fact=30:  -21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+[21]: Fact= 4:  -21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+[22]: Fact=10:  -22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,
+[23]: Fact=16:  -23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,
+[24]: Fact=22:  -25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
+[25]: Fact=28:  -26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,
+[26]: Fact= 2:  -26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,
+[27]: Fact= 8:  -27,-26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,
+[28]: Fact=14:  -28,-27,-26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+[29]: Fact=20:  -30,-28,-27,-26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8,
+[30]: Fact=26:  -31,-30,-28,-27,-26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,
+[31]: Fact= 0:  -31,-30,-28,-27,-26,-25,-23,-22,-21,-20,-18,-17,-16,-15,-14,-12,-11,-10, -9, -7, -6, -5, -4, -2, -1,  0,  1,  2,  3,  4,  5,  6,
+--- 32x32, Mode=20 [F]---
+[ 0]: Fact=11:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=22:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 2]: Fact= 1:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 3]: Fact=12:   -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 4]: Fact=23:   -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 5]: Fact= 2:   -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 6]: Fact=13:   -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[ 7]: Fact=24:   -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[ 8]: Fact= 3:   -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[ 9]: Fact=14:   -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[10]: Fact=25:  -11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[11]: Fact= 4:  -11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[12]: Fact=15:  -12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[13]: Fact=26:  -14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[14]: Fact= 5:  -14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[15]: Fact=16:  -15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[16]: Fact=27:  -17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[17]: Fact= 6:  -17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[18]: Fact=17:  -18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[19]: Fact=28:  -20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[20]: Fact= 7:  -20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[21]: Fact=18:  -21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+[22]: Fact=29:  -23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+[23]: Fact= 8:  -23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+[24]: Fact=19:  -24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
+[25]: Fact=30:  -26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+[26]: Fact= 9:  -26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+[27]: Fact=20:  -27,-26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14,
+[28]: Fact=31:  -29,-27,-26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,
+[29]: Fact=10:  -29,-27,-26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,
+[30]: Fact=21:  -30,-29,-27,-26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
+[31]: Fact= 0:  -30,-29,-27,-26,-24,-23,-21,-20,-18,-17,-15,-14,-12,-11, -9, -8, -6, -5, -3, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,
+--- 32x32, Mode=21 [F]---
+[ 0]: Fact=15:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=30:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 2]: Fact=13:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 3]: Fact=28:   -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 4]: Fact=11:   -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 5]: Fact=26:   -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 6]: Fact= 9:   -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 7]: Fact=24:   -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[ 8]: Fact= 7:   -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[ 9]: Fact=22:   -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[10]: Fact= 5:   -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[11]: Fact=20:  -11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[12]: Fact= 3:  -11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[13]: Fact=18:  -13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[14]: Fact= 1:  -13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[15]: Fact=16:  -15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[16]: Fact=31:  -17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[17]: Fact=14:  -17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[18]: Fact=29:  -19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[19]: Fact=12:  -19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[20]: Fact=27:  -21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[21]: Fact=10:  -21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[22]: Fact=25:  -23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[23]: Fact= 8:  -23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[24]: Fact=23:  -24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[25]: Fact= 6:  -24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+[26]: Fact=21:  -26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+[27]: Fact= 4:  -26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+[28]: Fact=19:  -28,-26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+[29]: Fact= 2:  -28,-26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17,
+[30]: Fact=17:  -30,-28,-26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
+[31]: Fact= 0:  -30,-28,-26,-24,-23,-21,-19,-17,-15,-13,-11, -9, -8, -6, -4, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+--- 32x32, Mode=22 [F]---
+[ 0]: Fact=19:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact= 6:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 2]: Fact=25:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 3]: Fact=12:   -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 4]: Fact=31:   -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 5]: Fact=18:   -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 6]: Fact= 5:   -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 7]: Fact=24:   -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 8]: Fact=11:   -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[ 9]: Fact=30:  -10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[10]: Fact=17:  -10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[11]: Fact= 4:  -10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[12]: Fact=23:  -12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[13]: Fact=10:  -12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[14]: Fact=29:  -15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[15]: Fact=16:  -15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[16]: Fact= 3:  -15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[17]: Fact=22:  -17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[18]: Fact= 9:  -17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[19]: Fact=28:  -20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[20]: Fact=15:  -20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[21]: Fact= 2:  -20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[22]: Fact=21:  -22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[23]: Fact= 8:  -22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+[24]: Fact=27:  -25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[25]: Fact=14:  -25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[26]: Fact= 1:  -25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+[27]: Fact=20:  -27,-25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[28]: Fact= 7:  -27,-25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+[29]: Fact=26:  -30,-27,-25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[30]: Fact=13:  -30,-27,-25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+[31]: Fact= 0:  -30,-27,-25,-22,-20,-17,-15,-12,-10, -7, -5, -2,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+--- 32x32, Mode=23 [F]---
+[ 0]: Fact=23:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=14:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 2]: Fact= 5:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 3]: Fact=28:   -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 4]: Fact=19:   -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 5]: Fact=10:   -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 6]: Fact= 1:   -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 7]: Fact=24:   -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 8]: Fact=15:   -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[ 9]: Fact= 6:   -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[10]: Fact=29:  -11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[11]: Fact=20:  -11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[12]: Fact=11:  -11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[13]: Fact= 2:  -11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[14]: Fact=25:  -14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[15]: Fact=16:  -14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[16]: Fact= 7:  -14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[17]: Fact=30:  -18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[18]: Fact=21:  -18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[19]: Fact=12:  -18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[20]: Fact= 3:  -18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+[21]: Fact=26:  -21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[22]: Fact=17:  -21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[23]: Fact= 8:  -21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+[24]: Fact=31:  -25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[25]: Fact=22:  -25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[26]: Fact=13:  -25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[27]: Fact= 4:  -25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+[28]: Fact=27:  -28,-25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[29]: Fact=18:  -28,-25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[30]: Fact= 9:  -28,-25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+[31]: Fact= 0:  -28,-25,-21,-18,-14,-11, -7, -4,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+--- 32x32, Mode=24 [F]---
+[ 0]: Fact=27:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=22:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 2]: Fact=17:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 3]: Fact=12:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 4]: Fact= 7:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 5]: Fact= 2:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 6]: Fact=29:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 7]: Fact=24:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 8]: Fact=19:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[ 9]: Fact=14:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[10]: Fact= 9:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[11]: Fact= 4:   -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[12]: Fact=31:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[13]: Fact=26:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[14]: Fact=21:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[15]: Fact=16:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[16]: Fact=11:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[17]: Fact= 6:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[18]: Fact= 1:  -13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+[19]: Fact=28:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[20]: Fact=23:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[21]: Fact=18:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[22]: Fact=13:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[23]: Fact= 8:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[24]: Fact= 3:  -19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+[25]: Fact=30:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[26]: Fact=25:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[27]: Fact=20:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[28]: Fact=15:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[29]: Fact=10:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[30]: Fact= 5:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+[31]: Fact= 0:  -26,-19,-13, -6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+--- 32x32, Mode=25 [F]---
+[ 0]: Fact=30:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 1]: Fact=28:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 2]: Fact=26:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 3]: Fact=24:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 4]: Fact=22:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 5]: Fact=20:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 6]: Fact=18:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 7]: Fact=16:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 8]: Fact=14:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[ 9]: Fact=12:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[10]: Fact=10:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[11]: Fact= 8:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[12]: Fact= 6:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[13]: Fact= 4:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[14]: Fact= 2:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+[15]: Fact= 0:    0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[16]: Fact=30:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[17]: Fact=28:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[18]: Fact=26:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[19]: Fact=24:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[20]: Fact=22:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[21]: Fact=20:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[22]: Fact=18:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[23]: Fact=16:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[24]: Fact=14:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[25]: Fact=12:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[26]: Fact=10:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[27]: Fact= 8:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[28]: Fact= 6:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[29]: Fact= 4:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[30]: Fact= 2:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+[31]: Fact= 0:  -16,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+--- 32x32, Mode=26 [ ]---
+[ 0]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 1]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 2]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 3]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 4]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 5]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 6]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 7]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 8]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[ 9]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[10]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[11]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[12]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[13]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[14]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[15]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[16]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[17]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[18]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[19]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[20]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[21]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[22]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[23]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[24]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[25]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[26]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[27]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[28]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[29]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[30]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+[31]: Fact= 0:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,            *
+--- 32x32, Mode=27 [F]---
+[ 0]: Fact= 2:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact= 4:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 2]: Fact= 6:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 3]: Fact= 8:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 4]: Fact=10:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 5]: Fact=12:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 6]: Fact=14:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 7]: Fact=16:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 8]: Fact=18:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 9]: Fact=20:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[10]: Fact=22:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[11]: Fact=24:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[12]: Fact=26:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[13]: Fact=28:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[14]: Fact=30:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[15]: Fact= 0:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[16]: Fact= 2:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[17]: Fact= 4:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[18]: Fact= 6:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[19]: Fact= 8:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[20]: Fact=10:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[21]: Fact=12:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[22]: Fact=14:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[23]: Fact=16:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[24]: Fact=18:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[25]: Fact=20:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[26]: Fact=22:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[27]: Fact=24:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[28]: Fact=26:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[29]: Fact=28:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[30]: Fact=30:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[31]: Fact= 0:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+--- 32x32, Mode=28 [F]---
+[ 0]: Fact= 5:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact=10:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 2]: Fact=15:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 3]: Fact=20:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 4]: Fact=25:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 5]: Fact=30:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 6]: Fact= 3:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 7]: Fact= 8:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 8]: Fact=13:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 9]: Fact=18:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[10]: Fact=23:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[11]: Fact=28:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[12]: Fact= 1:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[13]: Fact= 6:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[14]: Fact=11:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[15]: Fact=16:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[16]: Fact=21:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[17]: Fact=26:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[18]: Fact=31:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[19]: Fact= 4:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[20]: Fact= 9:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[21]: Fact=14:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[22]: Fact=19:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[23]: Fact=24:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[24]: Fact=29:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[25]: Fact= 2:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[26]: Fact= 7:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[27]: Fact=12:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[28]: Fact=17:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[29]: Fact=22:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[30]: Fact=27:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[31]: Fact= 0:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+--- 32x32, Mode=29 [F]---
+[ 0]: Fact= 9:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact=18:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 2]: Fact=27:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 3]: Fact= 4:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 4]: Fact=13:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 5]: Fact=22:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 6]: Fact=31:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 7]: Fact= 8:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 8]: Fact=17:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 9]: Fact=26:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[10]: Fact= 3:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[11]: Fact=12:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[12]: Fact=21:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[13]: Fact=30:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[14]: Fact= 7:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[15]: Fact=16:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[16]: Fact=25:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[17]: Fact= 2:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[18]: Fact=11:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[19]: Fact=20:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[20]: Fact=29:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[21]: Fact= 6:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[22]: Fact=15:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[23]: Fact=24:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[24]: Fact= 1:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[25]: Fact=10:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[26]: Fact=19:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[27]: Fact=28:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[28]: Fact= 5:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[29]: Fact=14:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[30]: Fact=23:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[31]: Fact= 0:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+--- 32x32, Mode=30 [F]---
+[ 0]: Fact=13:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact=26:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 2]: Fact= 7:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 3]: Fact=20:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 4]: Fact= 1:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 5]: Fact=14:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 6]: Fact=27:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 7]: Fact= 8:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 8]: Fact=21:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 9]: Fact= 2:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[10]: Fact=15:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[11]: Fact=28:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[12]: Fact= 9:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[13]: Fact=22:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[14]: Fact= 3:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[15]: Fact=16:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[16]: Fact=29:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[17]: Fact=10:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[18]: Fact=23:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[19]: Fact= 4:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[20]: Fact=17:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[21]: Fact=30:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[22]: Fact=11:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[23]: Fact=24:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[24]: Fact= 5:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[25]: Fact=18:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[26]: Fact=31:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[27]: Fact=12:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[28]: Fact=25:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[29]: Fact= 6:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[30]: Fact=19:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[31]: Fact= 0:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+--- 32x32, Mode=31 [F]---
+[ 0]: Fact=17:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact= 2:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 2]: Fact=19:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 3]: Fact= 4:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 4]: Fact=21:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 5]: Fact= 6:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 6]: Fact=23:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 7]: Fact= 8:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 8]: Fact=25:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 9]: Fact=10:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[10]: Fact=27:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[11]: Fact=12:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[12]: Fact=29:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[13]: Fact=14:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[14]: Fact=31:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[15]: Fact=16:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[16]: Fact= 1:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[17]: Fact=18:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[18]: Fact= 3:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[19]: Fact=20:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[20]: Fact= 5:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[21]: Fact=22:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[22]: Fact= 7:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[23]: Fact=24:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[24]: Fact= 9:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[25]: Fact=26:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[26]: Fact=11:   15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+[27]: Fact=28:   15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+[28]: Fact=13:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[29]: Fact=30:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[30]: Fact=15:   17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+[31]: Fact= 0:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+--- 32x32, Mode=32 [F]---
+[ 0]: Fact=21:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact=10:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 2]: Fact=31:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 3]: Fact=20:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 4]: Fact= 9:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 5]: Fact=30:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 6]: Fact=19:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 7]: Fact= 8:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[ 8]: Fact=29:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[ 9]: Fact=18:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[10]: Fact= 7:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[11]: Fact=28:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[12]: Fact=17:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[13]: Fact= 6:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[14]: Fact=27:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[15]: Fact=16:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[16]: Fact= 5:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[17]: Fact=26:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[18]: Fact=15:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[19]: Fact= 4:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[20]: Fact=25:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[21]: Fact=14:   15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+[22]: Fact= 3:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[23]: Fact=24:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[24]: Fact=13:   17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+[25]: Fact= 2:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+[26]: Fact=23:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+[27]: Fact=12:   19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+[28]: Fact= 1:   20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+[29]: Fact=22:   20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+[30]: Fact=11:   21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+[31]: Fact= 0:   22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+--- 32x32, Mode=33 [F]---
+[ 0]: Fact=26:    1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact=20:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 2]: Fact=14:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 3]: Fact= 8:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 4]: Fact= 2:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 5]: Fact=28:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 6]: Fact=22:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[ 7]: Fact=16:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[ 8]: Fact=10:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[ 9]: Fact= 4:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[10]: Fact=30:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[11]: Fact=24:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[12]: Fact=18:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[13]: Fact=12:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[14]: Fact= 6:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[15]: Fact= 0:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[16]: Fact=26:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[17]: Fact=20:   15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+[18]: Fact=14:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[19]: Fact= 8:   17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+[20]: Fact= 2:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+[21]: Fact=28:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+[22]: Fact=22:   19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+[23]: Fact=16:   20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+[24]: Fact=10:   21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+[25]: Fact= 4:   22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+[26]: Fact=30:   22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+[27]: Fact=24:   23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+[28]: Fact=18:   24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+[29]: Fact=12:   25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+[30]: Fact= 6:   26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+[31]: Fact= 0:   27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+--- 32x32, Mode=34 [F]---
+[ 0]: Fact= 0:    2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+[ 1]: Fact= 0:    3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+[ 2]: Fact= 0:    4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+[ 3]: Fact= 0:    5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+[ 4]: Fact= 0:    6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+[ 5]: Fact= 0:    7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+[ 6]: Fact= 0:    8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+[ 7]: Fact= 0:    9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+[ 8]: Fact= 0:   10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+[ 9]: Fact= 0:   11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+[10]: Fact= 0:   12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+[11]: Fact= 0:   13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+[12]: Fact= 0:   14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+[13]: Fact= 0:   15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+[14]: Fact= 0:   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+[15]: Fact= 0:   17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+[16]: Fact= 0:   18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+[17]: Fact= 0:   19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+[18]: Fact= 0:   20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+[19]: Fact= 0:   21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+[20]: Fact= 0:   22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+[21]: Fact= 0:   23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+[22]: Fact= 0:   24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+[23]: Fact= 0:   25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+[24]: Fact= 0:   26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+[25]: Fact= 0:   27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+[26]: Fact= 0:   28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+[27]: Fact= 0:   29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+[28]: Fact= 0:   30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+[29]: Fact= 0:   31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+[30]: Fact= 0:   32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+[31]: Fact= 0:   33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/intra/intra-4x4.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,166 @@
+--- 4x4, Mode= 2 --- [refPix]
+[ 0]: Fact= 0:   10, 11, 12, 13                 *
+[ 1]: Fact= 0:   11, 12, 13, 14                 *
+[ 2]: Fact= 0:   12, 13, 14, 15                 *
+[ 3]: Fact= 0:   13, 14, 15, 16                 *
+--- 4x4, Mode= 3 ---
+[ 0]: Fact=26:    9, 10, 11, 12, 13             *
+[ 1]: Fact=20:   10, 11, 12, 13, 14             *
+[ 2]: Fact=14:   11, 12, 13, 14, 15             *
+[ 3]: Fact= 8:   12, 13, 14, 15, 16             *
+--- 4x4, Mode= 4 ---
+[ 0]: Fact=21:    9, 10, 11, 12, 13             *
+[ 1]: Fact=10:   10, 11, 12, 13, 14             *
+[ 2]: Fact=31:   10, 11, 12, 13, 14             *
+[ 3]: Fact=20:   11, 12, 13, 14, 15             *
+--- 4x4, Mode= 5 ---
+[ 0]: Fact=17:    9, 10, 11, 12, 13             *
+[ 1]: Fact= 2:   10, 11, 12, 13, 14             *
+[ 2]: Fact=19:   10, 11, 12, 13, 14             *
+[ 3]: Fact= 4:   11, 12, 13, 14, 15             *
+--- 4x4, Mode= 6 ---
+[ 0]: Fact=13:    9, 10, 11, 12, 13             *
+[ 1]: Fact=26:    9, 10, 11, 12, 13             *
+[ 2]: Fact= 7:   10, 11, 12, 13, 14             *
+[ 3]: Fact=20:   10, 11, 12, 13, 14             *
+--- 4x4, Mode= 7 ---
+[ 0]: Fact= 9:    9, 10, 11, 12, 13             *
+[ 1]: Fact=18:    9, 10, 11, 12, 13             *
+[ 2]: Fact=27:    9, 10, 11, 12, 13             *
+[ 3]: Fact= 4:   10, 11, 12, 13, 14             *
+--- 4x4, Mode= 8 ---
+[ 0]: Fact= 5:    9, 10, 11, 12, 13             *
+[ 1]: Fact=10:    9, 10, 11, 12, 13             *
+[ 2]: Fact=15:    9, 10, 11, 12, 13             *
+[ 3]: Fact=20:    9, 10, 11, 12, 13             *
+--- 4x4, Mode= 9 ---
+[ 0]: Fact= 2:    9, 10, 11, 12, 13             *
+[ 1]: Fact= 4:    9, 10, 11, 12, 13             *
+[ 2]: Fact= 6:    9, 10, 11, 12, 13             *
+[ 3]: Fact= 8:    9, 10, 11, 12, 13             *
+--- 4x4, Mode=10 --- filtPix
+[ 0]: Fact= 0:    9, 10, 11, 12                 *
+[ 1]: Fact= 0:    9, 10, 11, 12                 *
+[ 2]: Fact= 0:    9, 10, 11, 12                 *
+[ 3]: Fact= 0:    9, 10, 11, 12                 *
+--- 4x4, Mode=11 ---
+[ 0]: Fact=30:    0, 9, 10, 11, 12              *
+[ 1]: Fact=28:    0, 9, 10, 11, 12              *
+[ 2]: Fact=26:    0, 9, 10, 11, 12              *
+[ 3]: Fact=24:    0, 9, 10, 11, 12              *
+--- 4x4, Mode=12 ---
+[ 0]: Fact=27:    0, 9, 10, 11, 12              *
+[ 1]: Fact=22:    0, 9, 10, 11, 12              *
+[ 2]: Fact=17:    0, 9, 10, 11, 12              *
+[ 3]: Fact=12:    0, 9, 10, 11, 12              *
+--- 4x4, Mode=13 ---
+[ 0]: Fact=23:    0, 9, 10, 11, 12              *
+[ 1]: Fact=14:    0, 9, 10, 11, 12              *
+[ 2]: Fact= 5:    0, 9, 10, 11, 12              *
+[ 3]: Fact=28:    4, 0,  9, 10, 11              *
+--- 4x4, Mode=14 ---
+[ 0]: Fact=19:    0, 9, 10, 11, 12              *
+[ 1]: Fact= 6:    0, 9, 10, 11, 12              *
+[ 2]: Fact=25:    2, 0,  9, 10, 11              *
+[ 3]: Fact=12:    2, 0,  9, 10, 11              *
+--- 4x4, Mode=15 ---
+[ 0]: Fact=15:    0, 9, 10, 11, 12              *
+[ 1]: Fact=30:    2, 0,  9, 10, 11              *
+[ 2]: Fact=13:    2, 0,  9, 10, 11              *
+[ 3]: Fact=28:    4, 2,  0,  9, 10              *
+--- 4x4, Mode=16 ---
+[ 0]: Fact=11:    0,  9, 10, 11, 12             *
+[ 1]: Fact=22:    2,  0, 9,  10, 11             *
+[ 2]: Fact= 1:    2,  0, 9,  10, 11             *
+[ 3]: Fact=12:    3,  2, 0,  9,  10             *
+--- 4x4, Mode=17 ---
+[ 0]: Fact= 6:    0,  9, 10, 11, 12             *
+[ 1]: Fact=12:    1,  0,  9, 10, 11             *
+[ 2]: Fact=18:    2,  1,  0,  9, 10             *
+[ 3]: Fact=24:    4,  2,  1,  0,  9             *
+--- 4x4, Mode=18 ---
+[ 0]: Fact= 0:    0,  1,  2,  3                 *
+[ 1]: Fact= 0:    9,  0,  1,  2                 *
+[ 2]: Fact= 0:    10, 9,  0,  1                 *
+[ 3]: Fact= 0:    11, 10, 9,  0                 *
+--- 4x4, Mode=19 ---
+[ 0]: Fact= 6:    0,  1,  2,  3,  4             *
+[ 1]: Fact=12:    9,  0,  1,  2,  3             *
+[ 2]: Fact=18:    10, 9,  0,  1,  2             *
+[ 3]: Fact=24:    12, 10, 9,  0,  1             *
+--- 4x4, Mode=20 ---
+[ 0]: Fact=11:    0,  1,  2,  3,  4             *
+[ 1]: Fact=22:    10, 0,  1,  2,  3             *
+[ 2]: Fact= 1:    10, 0,  1,  2,  3             *
+[ 3]: Fact=12:    11, 10, 0,  1,  2             *
+--- 4x4, Mode=21 ---
+[ 0]: Fact=15:    0,  1,  2,  3,  4             *
+[ 1]: Fact=30:    10, 0,  1,  2,  3             *
+[ 2]: Fact=13:    10, 0,  1,  2,  3             *
+[ 3]: Fact=28:    12, 10, 0,  1,  2             *
+--- 4x4, Mode=22 ---
+[ 0]: Fact=19:    0,  1,  2,  3,  4             *
+[ 1]: Fact= 6:    0,  1,  2,  3,  4             *
+[ 2]: Fact=25:    10, 0,  1,  2,  3             *
+[ 3]: Fact=12:    10, 0,  1,  2,  3             *
+--- 4x4, Mode=23 ---
+[ 0]: Fact=23:    0,  1,  2,  3,  4             *
+[ 1]: Fact=14:    0,  1,  2,  3,  4             *
+[ 2]: Fact= 5:    0,  1,  2,  3,  4             *
+[ 3]: Fact=28:    12, 0,  1,  2,  3             *
+--- 4x4, Mode=24 ---
+[ 0]: Fact=27:    0,  1,  2,  3,  4             *
+[ 1]: Fact=22:    0,  1,  2,  3,  4             *
+[ 2]: Fact=17:    0,  1,  2,  3,  4             *
+[ 3]: Fact=12:    0,  1,  2,  3,  4             *
+--- 4x4, Mode=25 ---
+[ 0]: Fact=30:    0,  1,  2,  3,  4             *
+[ 1]: Fact=28:    0,  1,  2,  3,  4             *
+[ 2]: Fact=26:    0,  1,  2,  3,  4             *
+[ 3]: Fact=24:    0,  1,  2,  3,  4             *
+--- 4x4, Mode=26 ---
+[ 0]: Fact= 0:    1,  2,  3,  4,                *
+[ 1]: Fact= 0:    1,  2,  3,  4,                *
+[ 2]: Fact= 0:    1,  2,  3,  4,                *
+[ 3]: Fact= 0:    1,  2,  3,  4,                *
+--- 4x4, Mode=27 ---
+[ 0]: Fact= 2:    1,  2,  3,  4,  5,            *
+[ 1]: Fact= 4:    1,  2,  3,  4,  5,            *
+[ 2]: Fact= 6:    1,  2,  3,  4,  5,            *
+[ 3]: Fact= 8:    1,  2,  3,  4,  5,            *
+--- 4x4, Mode=28 ---
+[ 0]: Fact= 5:    1,  2,  3,  4,  5,            *
+[ 1]: Fact=10:    1,  2,  3,  4,  5,            *
+[ 2]: Fact=15:    1,  2,  3,  4,  5,            *
+[ 3]: Fact=20:    1,  2,  3,  4,  5,            *
+--- 4x4, Mode=29 ---
+[ 0]: Fact= 9:    1,  2,  3,  4,  5,            *
+[ 1]: Fact=18:    1,  2,  3,  4,  5,            *
+[ 2]: Fact=27:    1,  2,  3,  4,  5,            *
+[ 3]: Fact= 4:    2,  3,  4,  5,  6,            *
+--- 4x4, Mode=30 ---
+[ 0]: Fact=13:    1,  2,  3,  4,  5,            *
+[ 1]: Fact=26:    1,  2,  3,  4,  5,            *
+[ 2]: Fact= 7:    2,  3,  4,  5,  6,            *
+[ 3]: Fact=20:    2,  3,  4,  5,  6,            *
+--- 4x4, Mode=31 ---
+[ 0]: Fact=17:    1,  2,  3,  4,  5,            *
+[ 1]: Fact= 2:    2,  3,  4,  5,  6,            *
+[ 2]: Fact=19:    2,  3,  4,  5,  6,            *
+[ 3]: Fact= 4:    3,  4,  5,  6,  7,            *
+--- 4x4, Mode=32 ---
+[ 0]: Fact=21:    1,  2,  3,  4,  5,            *
+[ 1]: Fact=10:    2,  3,  4,  5,  6,            *
+[ 2]: Fact=31:    2,  3,  4,  5,  6,            *
+[ 3]: Fact=20:    3,  4,  5,  6,  7,            *
+--- 4x4, Mode=33 ---
+[ 0]: Fact=26:    1,  2,  3,  4,  5,            *
+[ 1]: Fact=20:    2,  3,  4,  5,  6,            *
+[ 2]: Fact=14:    3,  4,  5,  6,  7,            *
+[ 3]: Fact= 8:    4,  5,  6,  7,  8,            *
+--- 4x4, Mode=34 ---
+[ 0]: Fact= 0:    2,  3,  4,  5,                *
+[ 1]: Fact= 0:    3,  4,  5,  6,                *
+[ 2]: Fact= 0:    4,  5,  6,  7,                *
+[ 3]: Fact= 0:    5,  6,  7,  8,                *
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/intra/intra-8x8.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,298 @@
+--- 8x8, Mode= 2 ---
+[ 0]: Fact= 0:	 -2, -3, -4, -5, -6, -7, -8, -9,            *
+[ 1]: Fact= 0:	 -3, -4, -5, -6, -7, -8, -9,-10,            *
+[ 2]: Fact= 0:	 -4, -5, -6, -7, -8, -9,-10,-11,            *
+[ 3]: Fact= 0:	 -5, -6, -7, -8, -9,-10,-11,-12,            *
+[ 4]: Fact= 0:	 -6, -7, -8, -9,-10,-11,-12,-13,            *
+[ 5]: Fact= 0:	 -7, -8, -9,-10,-11,-12,-13,-14,            *
+[ 6]: Fact= 0:	 -8, -9,-10,-11,-12,-13,-14,-15,            *
+[ 7]: Fact= 0:	 -9,-10,-11,-12,-13,-14,-15,-16,            *
+--- 8x8, Mode= 3 ---                                        
+[ 0]: Fact=26:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact=20:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 2]: Fact=14:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 3]: Fact= 8:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 4]: Fact= 2:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 5]: Fact=28:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 6]: Fact=22:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+[ 7]: Fact=16:	 -7, -8, -9,-10,-11,-12,-13,-14,-15,        *
+--- 8x8, Mode= 4 ---
+[ 0]: Fact=21:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact=10:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 2]: Fact=31:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 3]: Fact=20:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 4]: Fact= 9:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 5]: Fact=30:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 6]: Fact=19:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+[ 7]: Fact= 8:	 -6, -7, -8, -9,-10,-11,-12,-13,-14,        *
+--- 8x8, Mode= 5 ---
+[ 0]: Fact=17:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact= 2:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 2]: Fact=19:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 3]: Fact= 4:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 4]: Fact=21:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 5]: Fact= 6:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 6]: Fact=23:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+[ 7]: Fact= 8:	 -5, -6, -7, -8, -9,-10,-11,-12,-13,        *
+--- 8x8, Mode= 6 ---
+[ 0]: Fact=13:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact=26:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 2]: Fact= 7:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 3]: Fact=20:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 4]: Fact= 1:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 5]: Fact=14:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 6]: Fact=27:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+[ 7]: Fact= 8:	 -4, -5, -6, -7, -8, -9,-10,-11,-12,        *
+--- 8x8, Mode= 7 ---
+[ 0]: Fact= 9:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact=18:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 2]: Fact=27:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 3]: Fact= 4:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 4]: Fact=13:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 5]: Fact=22:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 6]: Fact=31:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 7]: Fact= 8:	 -3, -4, -5, -6, -7, -8, -9,-10,-11,        *
+--- 8x8, Mode= 8 ---
+[ 0]: Fact= 5:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact=10:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 2]: Fact=15:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 3]: Fact=20:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 4]: Fact=25:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 5]: Fact=30:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 6]: Fact= 3:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+[ 7]: Fact= 8:	 -2, -3, -4, -5, -6, -7, -8, -9,-10,        *
+--- 8x8, Mode= 9 ---
+[ 0]: Fact= 2:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 1]: Fact= 4:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 2]: Fact= 6:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 3]: Fact= 8:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 4]: Fact=10:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 5]: Fact=12:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 6]: Fact=14:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+[ 7]: Fact=16:	 -1, -2, -3, -4, -5, -6, -7, -8, -9,        *
+--- 8x8, Mode=10 ---
+[ 0]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 1]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 2]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 3]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 4]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 5]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 6]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+[ 7]: Fact= 0:	 -1, -2, -3, -4, -5, -6, -7, -8,            *
+--- 8x8, Mode=11 ---
+[ 0]: Fact=30:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact=28:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 2]: Fact=26:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 3]: Fact=24:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 4]: Fact=22:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 5]: Fact=20:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 6]: Fact=18:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 7]: Fact=16:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+--- 8x8, Mode=12 ---
+[ 0]: Fact=27:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact=22:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 2]: Fact=17:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 3]: Fact=12:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 4]: Fact= 7:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 5]: Fact= 2:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 6]: Fact=29:	  6,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 7]: Fact=24:	  6,  0, -1, -2, -3, -4, -5, -6, -7,        *
+--- 8x8, Mode=13 ---
+[ 0]: Fact=23:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact=14:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 2]: Fact= 5:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 3]: Fact=28:	  4,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 4]: Fact=19:	  4,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 5]: Fact=10:	  4,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 6]: Fact= 1:	  4,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 7]: Fact=24:	  7,  4,  0, -1, -2, -3, -4, -5, -6,        *
+--- 8x8, Mode=14 ---
+[ 0]: Fact=19:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact= 6:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 2]: Fact=25:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 3]: Fact=12:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 4]: Fact=31:	  5,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 5]: Fact=18:	  5,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 6]: Fact= 5:	  5,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 7]: Fact=24:	  7,  5,  2,  0, -1, -2, -3, -4, -5,        *
+--- 8x8, Mode=15 ---
+[ 0]: Fact=15:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact=30:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 2]: Fact=13:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 3]: Fact=28:	  4,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 4]: Fact=11:	  4,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 5]: Fact=26:	  6,  4,  2,  0, -1, -2, -3, -4, -5,        *
+[ 6]: Fact= 9:	  6,  4,  2,  0, -1, -2, -3, -4, -5,        *
+[ 7]: Fact=24:	  8,  6,  4,  2,  0, -1, -2, -3, -4,        *
+--- 8x8, Mode=16 ---
+[ 0]: Fact=11:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        *
+[ 1]: Fact=22:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 2]: Fact= 1:	  2,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 3]: Fact=12:	  3,  2,  0, -1, -2, -3, -4, -5, -6,        *
+[ 4]: Fact=23:	  5,  3,  2,  0, -1, -2, -3, -4, -5,        *
+[ 5]: Fact= 2:	  5,  3,  2,  0, -1, -2, -3, -4, -5,        *
+[ 6]: Fact=13:	  6,  5,  3,  2,  0, -1, -2, -3, -4,        *
+[ 7]: Fact=24:	  8,  6,  5,  3,  2,  0, -1, -2, -3,        *
+--- 8x8, Mode=17 ---
+[ 0]: Fact= 6:	  0, -1, -2, -3, -4, -5, -6, -7, -8,        x
+[ 1]: Fact=12:	  1,  0, -1, -2, -3, -4, -5, -6, -7,        *
+[ 2]: Fact=18:	  2,  1,  0, -1, -2, -3, -4, -5, -6,        *
+[ 3]: Fact=24:	  4,  2,  1,  0, -1, -2, -3, -4, -5,        *
+[ 4]: Fact=30:	  5,  4,  2,  1,  0, -1, -2, -3, -4,        *
+[ 5]: Fact= 4:	  5,  4,  2,  1,  0, -1, -2, -3, -4,        *
+[ 6]: Fact=10:	  6,  5,  4,  2,  1,  0, -1, -2, -3,        *
+[ 7]: Fact=16:	  7,  6,  5,  4,  2,  1,  0, -1, -2,        *
+--- 8x8, Mode=18 ---
+[ 0]: Fact= 0:	  0,  1,  2,  3,  4,  5,  6,  7,            *
+[ 1]: Fact= 0:	 -1,  0,  1,  2,  3,  4,  5,  6,            *
+[ 2]: Fact= 0:	 -2, -1,  0,  1,  2,  3,  4,  5,            *
+[ 3]: Fact= 0:	 -3, -2, -1,  0,  1,  2,  3,  4,            *
+[ 4]: Fact= 0:	 -4, -3, -2, -1,  0,  1,  2,  3,            *
+[ 5]: Fact= 0:	 -5, -4, -3, -2, -1,  0,  1,  2,            *
+[ 6]: Fact= 0:	 -6, -5, -4, -3, -2, -1,  0,  1,            *
+[ 7]: Fact= 0:	 -7, -6, -5, -4, -3, -2, -1,  0,            *
+--- 8x8, Mode=19 ---
+[ 0]: Fact= 6:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        x
+[ 1]: Fact=12:	 -1,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 2]: Fact=18:	 -2, -1,  0,  1,  2,  3,  4,  5,  6,        *
+[ 3]: Fact=24:	 -4, -2, -1,  0,  1,  2,  3,  4,  5,        *
+[ 4]: Fact=30:	 -5, -4, -2, -1,  0,  1,  2,  3,  4,        *
+[ 5]: Fact= 4:	 -5, -4, -2, -1,  0,  1,  2,  3,  4,        *
+[ 6]: Fact=10:	 -6, -5, -4, -2, -1,  0,  1,  2,  3,        *
+[ 7]: Fact=16:	 -7, -6, -5, -4, -2, -1,  0,  1,  2,        *
+--- 8x8, Mode=20 ---
+[ 0]: Fact=11:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact=22:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 2]: Fact= 1:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 3]: Fact=12:	 -3, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 4]: Fact=23:	 -5, -3, -2,  0,  1,  2,  3,  4,  5,        *
+[ 5]: Fact= 2:	 -5, -3, -2,  0,  1,  2,  3,  4,  5,        *
+[ 6]: Fact=13:	 -6, -5, -3, -2,  0,  1,  2,  3,  4,        *
+[ 7]: Fact=24:	 -8, -6, -5, -3, -2,  0,  1,  2,  3,        *
+--- 8x8, Mode=21 ---
+[ 0]: Fact=15:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact=30:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 2]: Fact=13:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 3]: Fact=28:	 -4, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 4]: Fact=11:	 -4, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 5]: Fact=26:	 -6, -4, -2,  0,  1,  2,  3,  4,  5,        *
+[ 6]: Fact= 9:	 -6, -4, -2,  0,  1,  2,  3,  4,  5,        *
+[ 7]: Fact=24:	 -8, -6, -4, -2,  0,  1,  2,  3,  4,        *
+--- 8x8, Mode=22 ---
+[ 0]: Fact=19:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact= 6:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 2]: Fact=25:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 3]: Fact=12:	 -2,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 4]: Fact=31:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 5]: Fact=18:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 6]: Fact= 5:	 -5, -2,  0,  1,  2,  3,  4,  5,  6,        *
+[ 7]: Fact=24:	 -7, -5, -2,  0,  1,  2,  3,  4,  5,        *
+--- 8x8, Mode=23 ---
+[ 0]: Fact=23:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact=14:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 2]: Fact= 5:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 3]: Fact=28:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 4]: Fact=19:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 5]: Fact=10:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 6]: Fact= 1:	 -4,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 7]: Fact=24:	 -7, -4,  0,  1,  2,  3,  4,  5,  6,        *
+--- 8x8, Mode=24 ---
+[ 0]: Fact=27:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact=22:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 2]: Fact=17:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 3]: Fact=12:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 4]: Fact= 7:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 5]: Fact= 2:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 6]: Fact=29:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,        *
+[ 7]: Fact=24:	 -6,  0,  1,  2,  3,  4,  5,  6,  7,        *
+--- 8x8, Mode=25 ---
+[ 0]: Fact=30:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 1]: Fact=28:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 2]: Fact=26:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 3]: Fact=24:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 4]: Fact=22:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 5]: Fact=20:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 6]: Fact=18:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+[ 7]: Fact=16:	  0,  1,  2,  3,  4,  5,  6,  7,  8,        *
+--- 8x8, Mode=26 ---
+[ 0]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 1]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 2]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 3]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 4]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 5]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 6]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+[ 7]: Fact= 0:	  1,  2,  3,  4,  5,  6,  7,  8,            *
+--- 8x8, Mode=27 ---
+[ 0]: Fact= 2:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact= 4:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 2]: Fact= 6:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 3]: Fact= 8:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 4]: Fact=10:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 5]: Fact=12:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 6]: Fact=14:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 7]: Fact=16:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+--- 8x8, Mode=28 ---
+[ 0]: Fact= 5:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact=10:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 2]: Fact=15:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 3]: Fact=20:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 4]: Fact=25:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 5]: Fact=30:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 6]: Fact= 3:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 7]: Fact= 8:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+--- 8x8, Mode=29 ---
+[ 0]: Fact= 9:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact=18:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 2]: Fact=27:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 3]: Fact= 4:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 4]: Fact=13:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 5]: Fact=22:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 6]: Fact=31:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 7]: Fact= 8:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+--- 8x8, Mode=30 ---
+[ 0]: Fact=13:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact=26:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 2]: Fact= 7:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 3]: Fact=20:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 4]: Fact= 1:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 5]: Fact=14:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 6]: Fact=27:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 7]: Fact= 8:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+--- 8x8, Mode=31 ---
+[ 0]: Fact=17:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact= 2:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 2]: Fact=19:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 3]: Fact= 4:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 4]: Fact=21:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 5]: Fact= 6:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 6]: Fact=23:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 7]: Fact= 8:	  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+--- 8x8, Mode=32 ---
+[ 0]: Fact=21:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact=10:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 2]: Fact=31:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 3]: Fact=20:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 4]: Fact= 9:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 5]: Fact=30:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 6]: Fact=19:	  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 7]: Fact= 8:	  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+--- 8x8, Mode=33 ---
+[ 0]: Fact=26:	  1,  2,  3,  4,  5,  6,  7,  8,  9,        *
+[ 1]: Fact=20:	  2,  3,  4,  5,  6,  7,  8,  9, 10,        *
+[ 2]: Fact=14:	  3,  4,  5,  6,  7,  8,  9, 10, 11,        *
+[ 3]: Fact= 8:	  4,  5,  6,  7,  8,  9, 10, 11, 12,        *
+[ 4]: Fact= 2:	  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 5]: Fact=28:	  5,  6,  7,  8,  9, 10, 11, 12, 13,        *
+[ 6]: Fact=22:	  6,  7,  8,  9, 10, 11, 12, 13, 14,        *
+[ 7]: Fact=16:	  7,  8,  9, 10, 11, 12, 13, 14, 15,        *
+--- 8x8, Mode=34 ---
+[ 0]: Fact= 0:	  2,  3,  4,  5,  6,  7,  8,  9,            *
+[ 1]: Fact= 0:	  3,  4,  5,  6,  7,  8,  9, 10,            *
+[ 2]: Fact= 0:	  4,  5,  6,  7,  8,  9, 10, 11,            *
+[ 3]: Fact= 0:	  5,  6,  7,  8,  9, 10, 11, 12,            *
+[ 4]: Fact= 0:	  6,  7,  8,  9, 10, 11, 12, 13,            *
+[ 5]: Fact= 0:	  7,  8,  9, 10, 11, 12, 13, 14,            *
+[ 6]: Fact= 0:	  8,  9, 10, 11, 12, 13, 14, 15,            *
+[ 7]: Fact= 0:	  9, 10, 11, 12, 13, 14, 15, 16,            *
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/Makefile	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,109 @@
+# Makefile for (Sphinx based) restructured text documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+QCOLLECTIONGENERATOR = qcollectiongenerator
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean distclean html web pickle htmlhelp qthelp qhc latex changes linkcheck
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  qhc       to make QHC file"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  man       to make manpages"
+	@echo "  changes   to make an overview over all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+
+clean:
+	-rm -rf build/*
+
+distclean: clean
+	-rmdir build/
+
+html:
+	mkdir -p build/html build/doctrees
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+zip: html
+	(cd build ; zip TortoiseHg.html.zip -r html)
+
+pickle:
+	mkdir -p build/pickle build/doctrees
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+web: pickle
+
+json:
+	mkdir -p build/json build/doctrees
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	mkdir -p build/htmlhelp build/doctrees
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+qthelp:
+	mkdir -p build/qthelp build/doctrees
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in build/qthelp, like this:"
+	@echo "# qcollectiongenerator build/qthelp/foo.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile build/qthelp/foo.qhc"
+
+qhc: qthelp
+	$(QCOLLECTIONGENERATOR) build/qthelp/TortoiseHg.qhcp
+	@echo "Build finished. To view the help file:"
+	@echo "# assistant -collectionFile build/qthelp/TortoiseHg.qhc"
+
+latex:
+	mkdir -p build/latex build/doctrees
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+man:
+	mkdir -p build/man build/doctrees
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) build/man
+	@echo
+	@echo "Build finished; the manpages are in build/man."
+	@echo "Run \`man -l build/man/x265.1' or \`man -l build/man/libx265.3'" \
+	      "to view them."
+
+changes:
+	mkdir -p build/changes build/doctrees
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	mkdir -p build/linkcheck build/doctrees
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/api.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,511 @@
+*********************************
+Application Programming Interface
+*********************************
+
+Introduction
+============
+
+x265 is written primarily in C++ and x86 assembly language but the
+public facing programming interface is C for the widest possible
+portability.  This C interface is wholly defined within :file:`x265.h`
+in the source/ folder of our source tree.  All of the functions and
+variables and enumerations meant to be used by the end-user are present
+in this header.
+
+Where possible, x265 has tried to keep its public API as close as
+possible to x264's public API. So those familiar with using x264 through
+its C interface will find x265 quite familiar.
+
+This file is meant to be read in-order; the narrative follows linearly
+through the various sections
+
+Build Considerations
+====================
+
+The choice of Main or Main10 profile encodes is made at compile time;
+the internal pixel depth influences a great deal of variable sizes and
+thus 8 and 10bit pixels are handled as different build options
+(primarily to maintain the performance of the 8bit builds). libx265
+exports a variable **x265_max_bit_depth** which indicates how the
+library was compiled (it will contain a value of 8 or 10). Further,
+**x265_version_str** is a pointer to a string indicating the version of
+x265 which was compiled, and **x265_build_info_str** is a pointer to a
+string identifying the compiler and build options.
+
+.. Note::
+
+	**x265_version_str** is only updated when **cmake** runs. If you are
+	making binaries for others to use, it is recommended to run
+	**cmake** prior to **make** in your build scripts.
+
+x265 will accept input pixels of any depth between 8 and 16 bits
+regardless of the depth of its internal pixels (8 or 10).  It will shift
+and mask input pixels as required to reach the internal depth. If
+downshifting is being performed using our CLI application (to 8 bits),
+the :option:`--dither` option may be enabled to reduce banding. This
+feature is not available through the C interface.
+
+Encoder
+=======
+
+The primary object in x265 is the encoder object, and this is
+represented in the public API as an opaque typedef **x265_encoder**.
+Pointers of this type are passed to most encoder functions.
+
+A single encoder generates a single output bitstream from a sequence of
+raw input pictures.  Thus if you need multiple output bitstreams you
+must allocate multiple encoders.  You may pass the same input pictures
+to multiple encoders, the encode function does not modify the input
+picture structures (the pictures are copied into the encoder as the
+first step of encode).
+
+Encoder allocation is a reentrant function, so multiple encoders may be
+safely allocated in a single process. The encoder access functions are
+not reentrant for a single encoder, so the recommended use case is to
+allocate one client thread per encoder instance (one thread for all
+encoder instances is possible, but some encoder access functions are
+blocking and thus this would be less efficient).
+
+.. Note::
+
+	There is one caveat to having multiple encoders within a single
+	process. All of the encoders must use the same maximum CTU size
+	because many global variables are configured based on this size.
+	Encoder allocation will fail if a mis-matched CTU size is attempted.
+	If no encoders are open, **x265_cleanup()** can be called to reset
+	the configured CTU size so a new size can be used.
+
+An encoder is allocated by calling **x265_encoder_open()**::
+
+	/* x265_encoder_open:
+	 *      create a new encoder handler, all parameters from x265_param are copied */
+	x265_encoder* x265_encoder_open(x265_param *);
+
+The returned pointer is then passed to all of the functions pertaining
+to this encode. A large amount of memory is allocated during this
+function call, but the encoder will continue to allocate memory as the
+first pictures are passed to the encoder; until its pool of picture
+structures is large enough to handle all of the pictures it must keep
+internally.  The pool size is determined by the lookahead depth, the
+number of frame threads, and the maximum number of references.
+
+As indicated in the comment, **x265_param** is copied internally so the user
+may release their copy after allocating the encoder.  Changes made to
+their copy of the param structure have no affect on the encoder after it
+has been allocated.
+
+Param
+=====
+
+The **x265_param** structure describes everything the encoder needs to
+know about the input pictures and the output bitstream and most
+everything in between.
+
+The recommended way to handle these param structures is to allocate them
+from libx265 via::
+
+	/* x265_param_alloc:
+	 *  Allocates an x265_param instance. The returned param structure is not
+	 *  special in any way, but using this method together with x265_param_free()
+	 *  and x265_param_parse() to set values by name allows the application to treat
+	 *  x265_param as an opaque data struct for version safety */
+	x265_param *x265_param_alloc();
+
+In this way, your application does not need to know the exact size of
+the param structure (the build of x265 could potentially be a bit newer
+than the copy of :file:`x265.h` that your application compiled against).
+
+Next you perform the initial *rough cut* configuration of the encoder by
+chosing a performance preset and optional tune factor
+**x265_preset_names** and **x265_tune_names** respectively hold the
+string names of the presets and tune factors (see :ref:`presets
+<preset-tune-ref>` for more detail on presets and tune factors)::
+
+	/*      returns 0 on success, negative on failure (e.g. invalid preset/tune name). */
+	int x265_param_default_preset(x265_param *, const char *preset, const char *tune);
+
+Now you may optionally specify a profile. **x265_profile_names**
+contains the string names this function accepts::
+
+	/*      (can be NULL, in which case the function will do nothing)
+	 *      returns 0 on success, negative on failure (e.g. invalid profile name). */
+	int x265_param_apply_profile(x265_param *, const char *profile);
+
+Finally you configure any remaining options by name using repeated calls to::
+
+	/* x265_param_parse:
+	 *  set one parameter by name.
+	 *  returns 0 on success, or returns one of the following errors.
+	 *  note: BAD_VALUE occurs only if it can't even parse the value,
+	 *  numerical range is not checked until x265_encoder_open().
+	 *  value=NULL means "true" for boolean options, but is a BAD_VALUE for non-booleans. */
+	#define X265_PARAM_BAD_NAME  (-1)
+	#define X265_PARAM_BAD_VALUE (-2)
+	int x265_param_parse(x265_param *p, const char *name, const char *value);
+
+See :ref:`string options <string-options-ref>` for the list of options (and their
+descriptions) which can be set by **x265_param_parse()**.
+
+After the encoder has been created, you may release the param structure::
+
+	/* x265_param_free:
+	 *  Use x265_param_free() to release storage for an x265_param instance
+	 *  allocated by x265_param_alloc() */
+	void x265_param_free(x265_param *);
+
+.. Note::
+
+	Using these methods to allocate and release the param structures
+	helps future-proof your code in many ways, but the x265 API is
+	versioned in such a way that we prevent linkage against a build of
+	x265 that does not match the version of the header you are compiling
+	against (unless you use x265_api_query() to acquire the library's
+	interfaces). This is function of the X265_BUILD macro.
+
+**x265_encoder_parameters()** may be used to get a copy of the param
+structure from the encoder after it has been opened, in order to see the
+changes made to the parameters for auto-detection and other reasons::
+
+	/* x265_encoder_parameters:
+	 *      copies the current internal set of parameters to the pointer provided
+	 *      by the caller.  useful when the calling application needs to know
+	 *      how x265_encoder_open has changed the parameters.
+	 *      note that the data accessible through pointers in the returned param struct
+	 *      (e.g. filenames) should not be modified by the calling application. */
+	void x265_encoder_parameters(x265_encoder *, x265_param *);
+
+**x265_encoder_reconfig()** may be used to reconfigure encoder parameters mid-encode::
+
+	/* x265_encoder_reconfig:
+	 *       used to modify encoder parameters.
+	 *      various parameters from x265_param are copied.
+	 *      this takes effect immediately, on whichever frame is encoded next;
+	 *      returns 0 on success, negative on parameter validation error.
+	 *
+	 *      not all parameters can be changed; see the actual function for a
+	 *      detailed breakdown.  since not all parameters can be changed, moving
+	 *      from preset to preset may not always fully copy all relevant parameters,
+	 *      but should still work usably in practice. however, more so than for
+	 *      other presets, many of the speed shortcuts used in ultrafast cannot be
+	 *      switched out of; using reconfig to switch between ultrafast and other
+	 *      presets is not recommended without a more fine-grained breakdown of
+	 *      parameters to take this into account. */
+	int x265_encoder_reconfig(x265_encoder *, x265_param *);
+
+Pictures
+========
+
+Raw pictures are passed to the encoder via the **x265_picture** structure.
+Just like the param structure we recommend you allocate this structure
+from the encoder to avoid potential size mismatches::
+
+	/* x265_picture_alloc:
+	 *  Allocates an x265_picture instance. The returned picture structure is not
+	 *  special in any way, but using this method together with x265_picture_free()
+	 *  and x265_picture_init() allows some version safety. New picture fields will
+	 *  always be added to the end of x265_picture */
+	x265_picture *x265_picture_alloc();
+
+Regardless of whether you allocate your picture structure this way or
+whether you simply declare it on the stack, your next step is to
+initialize the structure via::
+
+	/***
+	 * Initialize an x265_picture structure to default values. It sets the pixel
+	 * depth and color space to the encoder's internal values and sets the slice
+	 * type to auto - so the lookahead will determine slice type.
+	 */
+	void x265_picture_init(x265_param *param, x265_picture *pic);
+
+x265 does not perform any color space conversions, so the raw picture's
+color space (chroma sampling) must match the color space specified in
+the param structure used to allocate the encoder. **x265_picture_init**
+initializes this field to the internal color space and it is best to
+leave it unmodified.
+
+The picture bit depth is initialized to be the encoder's internal bit
+depth but this value should be changed to the actual depth of the pixels
+being passed into the encoder.  If the picture bit depth is more than 8,
+the encoder assumes two bytes are used to represent each sample
+(little-endian shorts).
+
+The user is responsible for setting the plane pointers and plane strides
+(in units of bytes, not pixels). The presentation time stamp (**pts**)
+is optional, depending on whether you need accurate decode time stamps
+(**dts**) on output.
+
+If you wish to override the lookahead or rate control for a given
+picture you may specify a slicetype other than X265_TYPE_AUTO, or a
+forceQP value other than 0.
+
+x265 does not modify the picture structure provided as input, so you may
+reuse a single **x265_picture** for all pictures passed to a single
+encoder, or even all pictures passed to multiple encoders.
+
+Structures allocated from the library should eventually be released::
+
+	/* x265_picture_free:
+	 *  Use x265_picture_free() to release storage for an x265_picture instance
+	 *  allocated by x265_picture_alloc() */
+	void x265_picture_free(x265_picture *);
+
+
+Analysis Buffers
+================
+
+Analysis information can be saved and reused to between encodes of the
+same video sequence (generally for multiple bitrate encodes).  The best
+results are attained by saving the analysis information of the highest
+bitrate encode and reuse it in lower bitrate encodes.
+
+When saving or loading analysis data, buffers must be allocated for
+every picture passed into the encoder using::
+
+	/* x265_alloc_analysis_data:
+	 *  Allocate memory to hold analysis meta data, returns 1 on success else 0 */
+	int x265_alloc_analysis_data(x265_picture*);
+
+Note that this is very different from the typical semantics of
+**x265_picture**, which can be reused many times. The analysis buffers must
+be re-allocated for every input picture.
+
+Analysis buffers passed to the encoder are owned by the encoder until
+they pass the buffers back via an output **x265_picture**. The user is
+responsible for releasing the buffers when they are finished with them
+via::
+
+	/* x265_free_analysis_data:
+	 *  Use x265_free_analysis_data to release storage of members allocated by
+	 *  x265_alloc_analysis_data */
+	void x265_free_analysis_data(x265_picture*);
+
+
+Encode Process
+==============
+
+The output of the encoder is a series of NAL packets, which are always
+returned concatenated in consecutive memory. HEVC streams have SPS and
+PPS and VPS headers which describe how the following packets are to be
+decoded. If you specified :option:`--repeat-headers` then those headers
+will be output with every keyframe.  Otherwise you must explicitly query
+those headers using::
+
+	/* x265_encoder_headers:
+	 *      return the SPS and PPS that will be used for the whole stream.
+	 *      *pi_nal is the number of NAL units outputted in pp_nal.
+	 *      returns negative on error, total byte size of payload data on success
+	 *      the payloads of all output NALs are guaranteed to be sequential in memory. */
+	int x265_encoder_headers(x265_encoder *, x265_nal **pp_nal, uint32_t *pi_nal);
+
+Now we get to the main encode loop. Raw input pictures are passed to the
+encoder in display order via::
+
+	/* x265_encoder_encode:
+	 *      encode one picture.
+	 *      *pi_nal is the number of NAL units outputted in pp_nal.
+	 *      returns negative on error, zero if no NAL units returned.
+	 *      the payloads of all output NALs are guaranteed to be sequential in memory. */
+	int x265_encoder_encode(x265_encoder *encoder, x265_nal **pp_nal, uint32_t *pi_nal, x265_picture *pic_in, x265_picture *pic_out);
+
+These pictures are queued up until the lookahead is full, and then the
+frame encoders in turn are filled, and then finally you begin receiving
+a output NALs (corresponding to a single output picture) with each input
+picture you pass into the encoder.
+
+Once the pipeline is completely full, **x265_encoder_encode()** will
+block until the next output picture is complete.
+
+.. note:: 
+
+	Optionally, if the pointer of a second **x265_picture** structure is
+	provided, the encoder will fill it with data pertaining to the
+	output picture corresponding to the output NALs, including the
+	recontructed image, POC and decode timestamp. These pictures will be
+	in encode (or decode) order.
+
+When the last of the raw input pictures has been sent to the encoder,
+**x265_encoder_encode()** must still be called repeatedly with a
+*pic_in* argument of 0, indicating a pipeline flush, until the function
+returns a value less than or equal to 0 (indicating the output bitstream
+is complete).
+
+At any time during this process, the application may query running
+statistics from the encoder::
+
+	/* x265_encoder_get_stats:
+	 *       returns encoder statistics */
+	void x265_encoder_get_stats(x265_encoder *encoder, x265_stats *, uint32_t statsSizeBytes);
+
+Cleanup
+=======
+
+Finally, the encoder must be closed in order to free all of its
+resources. An encoder that has been flushed cannot be restarted and
+reused. Once **x265_encoder_close()** has been called, the encoder
+handle must be discarded::
+
+	/* x265_encoder_close:
+	 *      close an encoder handler */
+	void x265_encoder_close(x265_encoder *);
+
+When the application has completed all encodes, it should call
+**x265_cleanup()** to free process global, particularly if a memory-leak
+detection tool is being used. **x265_cleanup()** also resets the saved
+CTU size so it will be possible to create a new encoder with a different
+CTU size::
+
+	/* x265_cleanup:
+	 *     release library static allocations, reset configured CTU size */
+	void x265_cleanup(void);
+
+
+Multi-library Interface
+=======================
+
+If your application might want to make a runtime bit-depth selection, it
+will need to use one of these bit-depth introspection interfaces which
+returns an API structure containing the public function entry points and
+constants.
+
+Instead of directly using all of the **x265_** methods documented above,
+you query an x265_api structure from your libx265 and then use the
+function pointers of the same name (minus the **x265_** prefix) within
+that structure.  For instance **x265_param_default()** becomes
+**api->param_default()**.
+
+x265_api_get
+------------
+
+The first bit-depth instrospecton method is x265_api_get(). It designed
+for applications that might statically link with libx265, or will at
+least be tied to a particular SONAME or API version::
+
+	/* x265_api_get:
+	 *   Retrieve the programming interface for a linked x265 library.
+	 *   May return NULL if no library is available that supports the
+	 *   requested bit depth. If bitDepth is 0, the function is guarunteed
+	 *   to return a non-NULL x265_api pointer from the system default
+	 *   libx265 */
+	const x265_api* x265_api_get(int bitDepth);
+
+Like **x265_encoder_encode()**, this function has the build number
+automatically appended to the function name via macros. This ties your
+application to a particular binary API version of libx265 (the one you
+compile against). If you attempt to link with a libx265 with a different
+API version number, the link will fail.
+
+Obviously this has no meaningful effect on applications which statically
+link to libx265.
+
+x265_api_query
+--------------
+
+The second bit-depth introspection method is designed for applications
+which need more flexibility in API versioning.  If you use
+**x265_api_query()** and dynamically link to libx265 at runtime (using
+dlopen() on POSIX or LoadLibrary() on Windows) your application is no
+longer directly tied to the API version that it was compiled against::
+
+	/* x265_api_query:
+	 *   Retrieve the programming interface for a linked x265 library, like
+	 *   x265_api_get(), except this function accepts X265_BUILD as the second
+	 *   argument rather than using the build number as part of the function name.
+	 *   Applications which dynamically link to libx265 can use this interface to
+	 *   query the library API and achieve a relative amount of version skew
+	 *   flexibility. The function may return NULL if the library determines that
+	 *   the apiVersion that your application was compiled against is not compatible
+	 *   with the library you have linked with.
+	 *
+	 *   api_major_version will be incremented any time non-backward compatible
+	 *   changes are made to any public structures or functions. If
+	 *   api_major_version does not match X265_MAJOR_VERSION from the x265.h your
+	 *   application compiled against, your application must not use the returned
+	 *   x265_api pointer.
+	 *
+	 *   Users of this API *must* also validate the sizes of any structures which
+	 *   are not treated as opaque in application code. For instance, if your
+	 *   application dereferences a x265_param pointer, then it must check that
+	 *   api->sizeof_param matches the sizeof(x265_param) that your application
+	 *   compiled with. */
+	const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err);
+
+A number of validations must be performed on the returned API structure
+in order to determine if it is safe for use by your application. If you
+do not perform these checks, your application is liable to crash::
+
+	if (api->api_major_version != X265_MAJOR_VERSION) /* do not use */
+	if (api->sizeof_param != sizeof(x265_param))      /* do not use */
+	if (api->sizeof_picture != sizeof(x265_picture))  /* do not use */
+	if (api->sizeof_stats != sizeof(x265_stats))      /* do not use */
+	if (api->sizeof_zone != sizeof(x265_zone))        /* do not use */
+	etc.
+
+Note that if your application does not directly allocate or dereference
+one of these structures, if it treats the structure as opaque or does
+not use it at all, then it can skip the size check for that structure.
+
+In particular, if your application uses api->param_alloc(),
+api->param_free(), api->param_parse(), etc and never directly accesses
+any x265_param fields, then it can skip the check on the
+sizeof(x265_parm) and thereby ignore changes to that structure (which
+account for a large percentage of X265_BUILD bumps).
+
+Build Implications
+------------------
+
+By default libx265 will place all of its internal C++ classes and
+functions within an x265 namespace and export all of the C functions
+documented in this file. Obviously this prevents 8bit and 10bit builds
+of libx265 from being statically linked into a single binary, all of
+those symbols would collide.
+
+However, if you set the EXPORT_C_API cmake option to OFF then libx265
+will use a bit-depth specific namespace and prefix for its assembly
+functions (x265_8bit, x265_10bit or x265_12bit) and export no C
+functions.
+
+In this way you can build one or more libx265 libraries without any
+exported C interface and link them into a libx265 build that does export
+a C interface. The build which exported the C functions becomes the
+*default* bit depth for the combined library, and the other bit depths
+are available via the bit-depth introspection methods.
+
+.. Note::
+
+	When setting EXPORT_C_API cmake option to OFF, it is recommended to
+	also set ENABLE_SHARED and ENABLE_CLI to OFF to prevent build
+	problems.  We only need the static library from these builds.
+
+If an application requests a bit-depth that is not supported by the
+default library or any of the additionally linked libraries, the
+introspection method will fall-back to an attempt to dynamically bind a
+shared library with a name appropriate for the requested bit-depth::
+
+	8-bit:  libx265_main
+	10-bit: libx265_main10
+	12-bit: libx265_main12
+
+If the profile-named library is not found, it will then try to bind a
+generic libx265 in the hopes that it is a multilib library with all bit
+depths.
+
+Packaging and Distribution
+--------------------------
+
+We recommend that packagers distribute a single combined shared/static
+library build which includes all the bit depth libraries linked
+together. See the multilib scripts in our :file:`build/` subdirectories
+for examples of how to affect these combined library builds. It is the
+packager's discretion which bit-depth exports the public C functions and
+thus becomes the default bit-depth for the combined library.
+
+.. Note::
+
+	Windows packagers might want to build libx265 with WINXP_SUPPORT
+	enabled. This makes the resulting binaries functional on XP and
+	Vista. Without this flag, the minimum supported host O/S is Windows
+	7. Also note that binaries built with WINXP_SUPPORT will *not* have
+	NUMA support and they will have slightly less performance.
+
+	STATIC_LINK_CRT is also recommended so end-users will not need to
+	install any additional MSVC C runtime libraries.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/cli.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1770 @@
+*********************
+Command Line Options
+*********************
+
+.. _string-options-ref:
+
+Note that unless an option is listed as **CLI ONLY** the option is also
+supported by x265_param_parse(). The CLI uses getopt to parse the
+command line options so the short or long versions may be used and the
+long options may be truncated to the shortest unambiguous abbreviation.
+Users of the API must pass x265_param_parse() the full option name.
+
+Preset and tune have special implications. The API user must call
+x265_param_default_preset() with the preset and tune parameters they
+wish to use, prior to calling x265_param_parse() to set any additional
+fields. The CLI does this for the user implicitly, so all CLI options
+are applied after the user's preset and tune choices, regardless of the
+order of the arguments on the command line.
+
+If there is an extra command line argument (not an option or an option
+value) the CLI will treat it as the input filename.  This effectively
+makes the :option:`--input` specifier optional for the input file. If
+there are two extra arguments, the second is treated as the output
+bitstream filename, making :option:`--output` also optional if the input
+filename was implied. This makes :command:`x265 in.y4m out.hevc` a valid
+command line. If there are more than two extra arguments, the CLI will
+consider this an error and abort.
+
+Generally, when an option expects a string value from a list of strings
+the user may specify the integer ordinal of the value they desire. ie:
+:option:`--log-level` 3 is equivalent to :option:`--log-level` debug.
+
+Executable Options
+==================
+
+.. option:: --help, -h
+
+	Display help text
+
+	**CLI ONLY**
+
+.. option:: --version, -V
+
+	Display version details
+
+	**CLI ONLY**
+
+Command line executable return codes::
+
+	0. encode successful
+	1. unable to parse command line
+	2. unable to open encoder
+	3. unable to generate stream headers
+	4. encoder abort
+	5. unable to open csv file
+
+Logging/Statistic Options
+=========================
+
+.. option:: --log-level <integer|string>
+
+	Logging level. Debug level enables per-frame QP, metric, and bitrate
+	logging. If a CSV file is being generated, frame level makes the log
+	be per-frame rather than per-encode. Full level enables hash and
+	weight logging. -1 disables all logging, except certain fatal
+	errors, and can be specified by the string "none".
+
+	0. error
+	1. warning
+	2. info **(default)**
+	3. debug
+	4. full
+
+.. option:: --no-progress
+
+	Disable periodic progress reports from the CLI
+
+	**CLI ONLY**
+
+.. option:: --csv <filename>
+
+	Writes encoding results to a comma separated value log file. Creates
+	the file if it doesnt already exist. If :option:`--csv-log-level` is 0, 
+	it adds one line per run. If :option:`--csv-log-level` is greater than
+	0, it writes one line per frame. Default none
+
+	Several frame performance statistics are available when 
+	:option:`--csv-log-level` is greater than or equal to 2:
+
+	**DecideWait ms** number of milliseconds the frame encoder had to
+	wait, since the previous frame was retrieved by the API thread,
+	before a new frame has been given to it. This is the latency
+	introduced by slicetype decisions (lookahead).
+	
+	**Row0Wait ms** number of milliseconds since the frame encoder
+	received a frame to encode before its first row of CTUs is allowed
+	to begin compression. This is the latency introduced by reference
+	frames making reconstructed and filtered rows available.
+	
+	**Wall time ms** number of milliseconds between the first CTU
+	being ready to be compressed and the entire frame being compressed
+	and the output NALs being completed.
+	
+	**Ref Wait Wall ms** number of milliseconds between the first
+	reference row being available and the last reference row becoming
+	available.
+	
+	**Total CTU time ms** the total time (measured in milliseconds)
+	spent by worker threads compressing and filtering CTUs for this
+	frame.
+	
+	**Stall Time ms** the number of milliseconds of the reported wall
+	time that were spent with zero worker threads, aka all compression
+	was completely stalled.
+
+	**Avg WPP** the average number of worker threads working on this
+	frame, at any given time. This value is sampled at the completion of
+	each CTU. This shows the effectiveness of Wavefront Parallel
+	Processing.
+
+	**Row Blocks** the number of times a worker thread had to abandon
+	the row of CTUs it was encoding because the row above it was not far
+	enough ahead for the necessary reference data to be available. This
+	is more of a problem for P frames where some blocks are much more
+	expensive than others.
+	
+	**CLI ONLY**
+
+.. option:: --csv-log-level <integer>
+
+        CSV logging level. Default 0
+        0. summary
+        1. frame level logging
+        2. frame level logging with performance statistics
+
+        **CLI ONLY**
+
+.. option:: --ssim, --no-ssim
+
+	Calculate and report Structural Similarity values. It is
+	recommended to use :option:`--tune` ssim if you are measuring ssim,
+	else the results should not be used for comparison purposes.
+	Default disabled
+
+.. option:: --psnr, --no-psnr
+
+	Calculate and report Peak Signal to Noise Ratio.  It is recommended
+	to use :option:`--tune` psnr if you are measuring PSNR, else the
+	results should not be used for comparison purposes.  Default
+	disabled
+
+Performance Options
+===================
+
+.. option:: --asm <integer:false:string>, --no-asm
+
+	x265 will use all detected CPU SIMD architectures by default. You can
+	disable all assembly by using :option:`--no-asm` or you can specify
+	a comma separated list of SIMD architectures to use, matching these
+	strings: MMX2, SSE, SSE2, SSE3, SSSE3, SSE4, SSE4.1, SSE4.2, AVX, XOP, FMA4, AVX2, FMA3
+
+	Some higher architectures imply lower ones being present, this is
+	handled implicitly.
+
+	One may also directly supply the CPU capability bitmap as an integer.
+	
+	Note that by specifying this option you are overriding x265's CPU
+	detection and it is possible to do this wrong. You can cause encoder
+	crashes by specifying SIMD architectures which are not supported on
+	your CPU.
+
+	Default: auto-detected SIMD architectures
+
+.. option:: --frame-threads, -F <integer>
+
+	Number of concurrently encoded frames. Using a single frame thread
+	gives a slight improvement in compression, since the entire reference
+	frames are always available for motion compensation, but it has
+	severe performance implications. Default is an autodetected count
+	based on the number of CPU cores and whether WPP is enabled or not.
+
+	Over-allocation of frame threads will not improve performance, it
+	will generally just increase memory use.
+
+	**Values:** any value between 0 and 16. Default is 0, auto-detect
+
+.. option:: --pools <string>, --numa-pools <string>
+
+	Comma seperated list of threads per NUMA node. If "none", then no worker
+	pools are created and only frame parallelism is possible. If NULL or ""
+	(default) x265 will use all available threads on each NUMA node::
+
+	'+'  is a special value indicating all cores detected on the node
+	'*'  is a special value indicating all cores detected on the node and all remaining nodes
+	'-'  is a special value indicating no cores on the node, same as '0'
+
+	example strings for a 4-node system::
+
+	""        - default, unspecified, all numa nodes are used for thread pools
+	"*"       - same as default
+	"none"    - no thread pools are created, only frame parallelism possible
+	"-"       - same as "none"
+	"10"      - allocate one pool, using up to 10 cores on node 0
+	"-,+"     - allocate one pool, using all cores on node 1
+	"+,-,+"   - allocate one pool, using only cores on nodes 0 and 2
+	"+,-,+,-" - allocate one pool, using only cores on nodes 0 and 2
+	"-,*"     - allocate one pool, using all cores on nodes 1, 2 and 3
+	"8,8,8,8" - allocate four pools with up to 8 threads in each pool
+	"8,+,+,+" - allocate two pools, the first with 8 threads on node 0, and the second with all cores on node 1,2,3
+
+	A thread pool dedicated to a given NUMA node is enabled only when the
+	number of threads to be created on that NUMA node is explicitly mentioned
+	in that corresponding position with the --pools option. Else, all threads
+	are spawned from a single pool. The total number of threads will be
+	determined by the number of threads assigned to the enabled NUMA nodes for
+	that pool. The worker threads are be given affinity to all the enabled
+	NUMA nodes for that pool and may migrate between them, unless explicitly
+	specified as described above.
+
+	In the case that any threadpool has more than 64 threads, the threadpool
+	may be broken down into multiple pools of 64 threads each; on 32-bit
+	machines, this number is 32. All pools are given affinity to the NUMA
+	nodes on which the original pool had affinity. For performance reasons,
+	the last thread pool is spawned only if it has more than 32 threads for
+	64-bit machines, or 16 for 32-bit machines. If the total number of threads
+	in the system doesn't obey this constraint, we may spawn fewer threads
+	than cores which has been emperically shown to be better for performance. 
+
+	If the four pool features: :option:`--wpp`, :option:`--pmode`,
+	:option:`--pme` and :option:`--lookahead-slices` are all disabled,
+	then :option:`--pools` is ignored and no thread pools are created.
+
+	If "none" is specified, then all four of the thread pool features are
+	implicitly disabled.
+
+	Frame encoders are distributed between the available thread pools,
+	and the encoder will never generate more thread pools than
+	:option:`--frame-threads`.  The pools are used for WPP and for
+	distributed analysis and motion search.
+
+	On Windows, the native APIs offer sufficient functionality to
+	discover the NUMA topology and enforce the thread affinity that
+	libx265 needs (so long as you have not chosen to target XP or
+	Vista), but on POSIX systems it relies on libnuma for this
+	functionality. If your target POSIX system is single socket, then
+	building without libnuma is a perfectly reasonable option, as it
+	will have no effect on the runtime behavior. On a multiple-socket
+	system, a POSIX build of libx265 without libnuma will be less work
+	efficient. See :ref:`thread pools <pools>` for more detail.
+
+	Default "", one pool is created across all available NUMA nodes, with
+	one thread allocated per detected hardware thread
+	(logical CPU cores). In the case that the total number of threads is more
+	than the maximum size that ATOMIC operations can handle (32 for 32-bit
+	compiles, and 64 for 64-bit compiles), multiple thread pools may be
+	spawned subject to the performance constraint described above.
+
+	Note that the string value will need to be escaped or quoted to
+	protect against shell expansion on many platforms
+
+.. option:: --wpp, --no-wpp
+
+	Enable Wavefront Parallel Processing. The encoder may begin encoding
+	a row as soon as the row above it is at least two CTUs ahead in the
+	encode process. This gives a 3-5x gain in parallelism for about 1%
+	overhead in compression efficiency.
+
+	This feature is implicitly disabled when no thread pool is present.
+
+	Default: Enabled
+
+.. option:: --pmode, --no-pmode
+
+	Parallel mode decision, or distributed mode analysis. When enabled
+	the encoder will distribute the analysis work of each CU (merge,
+	inter, intra) across multiple worker threads. Only recommended if
+	x265 is not already saturating the CPU cores. In RD levels 3 and 4
+	it will be most effective if --rect is enabled. At RD levels 5 and
+	6 there is generally always enough work to distribute to warrant the
+	overhead, assuming your CPUs are not already saturated.
+	
+	--pmode will increase utilization without reducing compression
+	efficiency. In fact, since the modes are all measured in parallel it
+	makes certain early-outs impractical and thus you usually get
+	slightly better compression when it is enabled (at the expense of
+	not skipping improbable modes). This bypassing of early-outs can
+	cause pmode to slow down encodes, especially at faster presets.
+
+	This feature is implicitly disabled when no thread pool is present.
+
+	Default disabled
+
+.. option:: --pme, --no-pme
+
+	Parallel motion estimation. When enabled the encoder will distribute
+	motion estimation across multiple worker threads when more than two
+	references require motion searches for a given CU. Only recommended
+	if x265 is not already saturating CPU cores. :option:`--pmode` is
+	much more effective than this option, since the amount of work it
+	distributes is substantially higher. With --pme it is not unusual
+	for the overhead of distributing the work to outweigh the
+	parallelism benefits.
+	
+	This feature is implicitly disabled when no thread pool is present.
+
+	--pme will increase utilization on many core systems with no effect
+	on the output bitstream.
+	
+	Default disabled
+
+.. option:: --preset, -p <integer|string>
+
+	Sets parameters to preselected values, trading off compression efficiency against 
+	encoding speed. These parameters are applied before all other input parameters are 
+	applied, and so you can override any parameters that these values control.  See
+	:ref:`presets <presets>` for more detail.
+
+	0. ultrafast
+	1. superfast
+	2. veryfast
+	3. faster
+	4. fast
+	5. medium **(default)**
+	6. slow
+	7. slower
+	8. veryslow
+	9. placebo
+
+.. option:: --tune, -t <string>
+
+	Tune the settings for a particular type of source or situation. The changes will
+	be applied after :option:`--preset` but before all other parameters. Default none.
+	See :ref:`tunings <tunings>` for more detail.
+
+	**Values:** psnr, ssim, grain, zero-latency, fast-decode.
+
+Input/Output File Options
+=========================
+
+These options all describe the input video sequence or, in the case of
+:option:`--dither`, operations that are performed on the sequence prior
+to encode. All options dealing with files (names, formats, offsets or
+frame counts) are only applicable to the CLI application.
+
+.. option:: --input <filename>
+
+	Input filename, only raw YUV or Y4M supported. Use single dash for
+	stdin. This option name will be implied for the first "extra"
+	command line argument.
+
+	**CLI ONLY**
+
+.. option:: --y4m
+
+	Parse input stream as YUV4MPEG2 regardless of file extension,
+	primarily intended for use with stdin (ie: :option:`--input` -
+	:option:`--y4m`).  This option is implied if the input filename has
+	a ".y4m" extension
+
+	**CLI ONLY**
+
+.. option:: --input-depth <integer>
+
+	YUV only: Bit-depth of input file or stream
+
+	**Values:** any value between 8 and 16. Default is internal depth.
+
+	**CLI ONLY**
+
+.. option:: --total-frames <integer>
+
+	The number of frames intended to be encoded.  It may be left
+	unspecified, but when it is specified rate control can make use of
+	this information. It is also used to determine if an encode is
+	actually a stillpicture profile encode (single frame)
+
+.. option:: --dither
+
+	Enable high quality downscaling. Dithering is based on the diffusion
+	of errors from one row of pixels to the next row of pixels in a
+	picture. Only applicable when the input bit depth is larger than
+	8bits and internal bit depth is 8bits. Default disabled
+
+	**CLI ONLY**
+
+.. option:: --input-res <wxh>
+
+	YUV only: Source picture size [w x h]
+
+	**CLI ONLY**
+
+.. option:: --input-csp <integer|string>
+
+	YUV only: Source color space. Only i420, i422, and i444 are
+	supported at this time. The internal color space is always the
+	same as the source color space (libx265 does not support any color
+	space conversions).
+
+	0. i400
+	1. i420 **(default)**
+	2. i422
+	3. i444
+	4. nv12
+	5. nv16
+
+.. option:: --fps <integer|float|numerator/denominator>
+
+	YUV only: Source frame rate
+
+	**Range of values:** positive int or float, or num/denom
+
+.. option:: --interlace <false|tff|bff>, --no-interlace
+
+	0. progressive pictures **(default)**
+	1. top field first 
+	2. bottom field first
+
+	HEVC encodes interlaced content as fields. Fields must be provided to
+	the encoder in the correct temporal order. The source dimensions
+	must be field dimensions and the FPS must be in units of fields per
+	second. The decoder must re-combine the fields in their correct
+	orientation for display.
+
+.. option:: --seek <integer>
+
+	Number of frames to skip at start of input file. Default 0
+
+	**CLI ONLY**
+
+.. option:: --frames, -f <integer>
+
+	Number of frames of input sequence to be encoded. Default 0 (all)
+
+	**CLI ONLY**
+
+.. option:: --output, -o <filename>
+
+	Bitstream output file name. If there are two extra CLI options, the
+	first is implicitly the input filename and the second is the output
+	filename, making the :option:`--output` option optional.
+
+	The output file will always contain a raw HEVC bitstream, the CLI
+	does not support any container file formats.
+
+	**CLI ONLY**
+
+.. option:: --output-depth, -D 8|10|12
+
+	Bitdepth of output HEVC bitstream, which is also the internal bit
+	depth of the encoder. If the requested bit depth is not the bit
+	depth of the linked libx265, it will attempt to bind libx265_main
+	for an 8bit encoder, libx265_main10 for a 10bit encoder, or
+	libx265_main12 for a 12bit encoder, with the same API version as the
+	linked libx265.
+
+	If the output depth is not specified but :option:`--profile` is
+	specified, the output depth will be derived from the profile name.
+
+	**CLI ONLY**
+
+Profile, Level, Tier
+====================
+
+.. option:: --profile, -P <string>
+
+	Enforce the requirements of the specified profile, ensuring the
+	output stream will be decodable by a decoder which supports that
+	profile.  May abort the encode if the specified profile is
+	impossible to be supported by the compile options chosen for the
+	encoder (a high bit depth encoder will be unable to output
+	bitstreams compliant with Main or MainStillPicture).
+
+	The first version of the HEVC specification only described Main,
+	Main10, and MainStillPicture. All other profiles were added by the
+	Range Extensions additions in HEVC version two.
+
+	8bit profiles::
+
+	main, main-intra, mainstillpicture (or msp for short)
+	main444-8 main444-intra main444-stillpicture
+
+	10bit profiles::
+
+	main10, main10-intra
+	main422-10, main422-10-intra
+	main444-10, main444-10-intra
+
+	12bit profiles::
+
+	main12, main12-intra
+	main422-12, main422-12-intra
+	main444-12, main444-12-intra
+
+	16bit profiles::
+
+	main444-16-intra main444-16-stillpicture
+
+	**CLI ONLY**
+
+	API users must call x265_param_apply_profile() after configuring
+	their param structure. Any changes made to the param structure after
+	this call might make the encode non-compliant.
+
+	The CLI application will derive the output bit depth from the
+	profile name if :option:`--output-depth` is not specified.
+
+.. option:: --level-idc <integer|float>
+
+	Minimum decoder requirement level. Defaults to 0, which implies
+	auto-detection by the encoder. If specified, the encoder will
+	attempt to bring the encode specifications within that specified
+	level. If the encoder is unable to reach the level it issues a
+	warning and aborts the encode. If the requested requirement level is
+	higher than the actual level, the actual requirement level is
+	signaled.
+
+	Beware, specifying a decoder level will force the encoder to enable
+	VBV for constant rate factor encodes, which may introduce
+	non-determinism.
+
+	The value is specified as a float or as an integer with the level
+	times 10, for example level **5.1** is specified as "5.1" or "51",
+	and level **5.0** is specified as "5.0" or "50".
+
+	Annex A levels: 1, 2, 2.1, 3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2, 8.5
+
+.. option:: --high-tier, --no-high-tier
+
+	If :option:`--level-idc` has been specified, the option adds the
+	intention to support the High tier of that level. If your specified
+	level does not support a High tier, a warning is issued and this
+	modifier flag is ignored. If :option:`--level-idc` has been specified,
+	but not --high-tier, then the encoder will attempt to encode at the 
+	specified level, main tier first, turning on high tier only if 
+	necessary and available at that level.
+
+	If :option:`--level-idc` has not been specified, this argument is
+	ignored.
+
+.. option:: --ref <1..16>
+
+	Max number of L0 references to be allowed. This number has a linear
+	multiplier effect on the amount of work performed in motion search,
+	but will generally have a beneficial affect on compression and
+	distortion.
+	
+	Note that x265 allows up to 16 L0 references but the HEVC
+	specification only allows a maximum of 8 total reference frames. So
+	if you have B frames enabled only 7 L0 refs are valid and if you
+	have :option:`--b-pyramid` enabled (which is enabled by default in
+	all presets), then only 6 L0 refs are the maximum allowed by the
+	HEVC specification.  If x265 detects that the total reference count
+	is greater than 8, it will issue a warning that the resulting stream
+	is non-compliant and it signals the stream as profile NONE and level
+	NONE and will abort the encode unless
+	:option:`--allow-non-conformance` it specified.  Compliant HEVC
+	decoders may refuse to decode such streams.
+	
+	Default 3
+
+.. option:: --allow-non-conformance, --no-allow-non-conformance
+
+	Allow libx265 to generate a bitstream with profile and level NONE.
+	By default it will abort any encode which does not meet strict level
+	compliance. The two most likely causes for non-conformance are
+	:option:`--ctu` being too small, :option:`--ref` being too high,
+	or the bitrate or resolution being out of specification.
+
+	Default: disabled
+
+.. note::
+
+	:option:`--profile`, :option:`--level-idc`, and
+	:option:`--high-tier` are only intended for use when you are
+	targeting a particular decoder (or decoders) with fixed resource
+	limitations and must constrain the bitstream within those limits.
+	Specifying a profile or level may lower the encode quality
+	parameters to meet those requirements but it will never raise
+	them. It may enable VBV constraints on a CRF encode.
+
+	Also note that x265 determines the decoder requirement profile and
+	level in three steps.  First, the user configures an x265_param
+	structure with their suggested encoder options and then optionally
+	calls x265_param_apply_profile() to enforce a specific profile
+	(main, main10, etc). Second, an encoder is created from this
+	x265_param instance and the :option:`--level-idc` and
+	:option:`--high-tier` parameters are used to reduce bitrate or other
+	features in order to enforce the target level. Finally, the encoder
+	re-examines the final set of parameters and detects the actual
+	minimum decoder requirement level and this is what is signaled in
+	the bitstream headers. The detected decoder level will only use High
+	tier if the user specified a High tier level.
+
+	The signaled profile will be determined by the encoder's internal
+	bitdepth and input color space. If :option:`--keyint` is 0 or 1,
+	then an intra variant of the profile will be signaled.
+
+	If :option:`--total-frames` is 1, then a stillpicture variant will
+	be signaled, but this parameter is not always set by applications,
+	particularly not when the CLI uses stdin streaming or when libx265
+	is used by third-party applications.
+
+
+Mode decision / Analysis
+========================
+
+.. option:: --rd <0..6>
+
+	Level of RDO in mode decision. The higher the value, the more
+	exhaustive the analysis and the more rate distortion optimization is
+	used. The lower the value the faster the encode, the higher the
+	value the smaller the bitstream (in general). Default 3
+
+	Note that this table aims for accuracy, but is not necessarily our
+	final target behavior for each mode.
+
+	+-------+---------------------------------------------------------------+
+	| Level | Description                                                   |
+	+=======+===============================================================+
+	| 0     | sa8d mode and split decisions, intra w/ source pixels         |
+	+-------+---------------------------------------------------------------+
+	| 1     | recon generated (better intra), RDO merge/skip selection      |
+	+-------+---------------------------------------------------------------+
+	| 2     | RDO splits and merge/skip selection                           |
+	+-------+---------------------------------------------------------------+
+	| 3     | RDO mode and split decisions, chroma residual used for sa8d   |
+	+-------+---------------------------------------------------------------+
+	| 4     | Currently same as 3                                           |
+	+-------+---------------------------------------------------------------+
+	| 5     | Adds RDO prediction decisions                                 |
+	+-------+---------------------------------------------------------------+
+	| 6     | Currently same as 5                                           |
+	+-------+---------------------------------------------------------------+
+
+	**Range of values:** 0: least .. 6: full RDO analysis
+
+Options which affect the coding unit quad-tree, sometimes referred to as
+the prediction quad-tree.
+
+.. option:: --ctu, -s <64|32|16>
+
+	Maximum CU size (width and height). The larger the maximum CU size,
+	the more efficiently x265 can encode flat areas of the picture,
+	giving large reductions in bitrate. However this comes at a loss of
+	parallelism with fewer rows of CUs that can be encoded in parallel,
+	and less frame parallelism as well. Because of this the faster
+	presets use a CU size of 32. Default: 64
+
+.. option:: --min-cu-size <64|32|16|8>
+
+	Minimum CU size (width and height). By using 16 or 32 the encoder
+	will not analyze the cost of CUs below that minimum threshold,
+	saving considerable amounts of compute with a predictable increase
+	in bitrate. This setting has a large effect on performance on the
+	faster presets.
+
+	Default: 8 (minimum 8x8 CU for HEVC, best compression efficiency)
+
+.. note::
+
+	All encoders within a single process must use the same settings for
+	the CU size range. :option:`--ctu` and :option:`--min-cu-size` must
+	be consistent for all of them since the encoder configures several
+	key global data structures based on this range.
+
+.. option:: --limit-refs <0|1|2|3>
+
+	When set to X265_REF_LIMIT_DEPTH (1) x265 will limit the references
+	analyzed at the current depth based on the references used to code
+	the 4 sub-blocks at the next depth.  For example, a 16x16 CU will
+	only use the references used to code its four 8x8 CUs.
+
+	When set to X265_REF_LIMIT_CU (2), the rectangular and asymmetrical
+	partitions will only use references selected by the 2Nx2N motion
+	search (including at the lowest depth which is otherwise unaffected
+	by the depth limit).
+
+	When set to 3 (X265_REF_LIMIT_DEPTH && X265_REF_LIMIT_CU), the 2Nx2N 
+	motion search at each depth will only use references from the split 
+	CUs and the rect/amp motion searches at that depth will only use the 
+	reference(s) selected by 2Nx2N. 
+
+	For all non-zero values of limit-refs, the current depth will evaluate
+	intra mode (in inter slices), only if intra mode was chosen as the best
+	mode for atleast one of the 4 sub-blocks.
+
+	You can often increase the number of references you are using
+	(within your decoder level limits) if you enable one or
+	both of these flags.
+
+	This feature is EXPERIMENTAL and functional at all RD levels.
+
+.. option:: --rect, --no-rect
+
+	Enable analysis of rectangular motion partitions Nx2N and 2NxN
+	(50/50 splits, two directions). Default disabled
+
+.. option:: --amp, --no-amp
+
+	Enable analysis of asymmetric motion partitions (75/25 splits, four
+	directions). At RD levels 0 through 4, AMP partitions are only
+	considered at CU sizes 32x32 and below. At RD levels 5 and 6, it
+	will only consider AMP partitions as merge candidates (no motion
+	search) at 64x64, and as merge or inter candidates below 64x64.
+
+	The AMP partitions which are searched are derived from the current
+	best inter partition. If Nx2N (vertical rectangular) is the best
+	current prediction, then left and right asymmetrical splits will be
+	evaluated. If 2NxN (horizontal rectangular) is the best current
+	prediction, then top and bottom asymmetrical splits will be
+	evaluated, If 2Nx2N is the best prediction, and the block is not a
+	merge/skip, then all four AMP partitions are evaluated.
+
+	This setting has no effect if rectangular partitions are disabled.
+	Default disabled
+
+.. option:: --early-skip, --no-early-skip
+
+	Measure full CU size (2Nx2N) merge candidates first; if no residual
+	is found the analysis is short circuited. Default disabled
+
+.. option:: --fast-intra, --no-fast-intra
+
+	Perform an initial scan of every fifth intra angular mode, then
+	check modes +/- 2 distance from the best mode, then +/- 1 distance
+	from the best mode, effectively performing a gradient descent. When
+	enabled 10 modes in total are checked. When disabled all 33 angular
+	modes are checked.  Only applicable for :option:`--rd` levels 4 and
+	below (medium preset and faster).
+
+.. option:: --b-intra, --no-b-intra
+
+	Enables the evaluation of intra modes in B slices. Default disabled.
+
+.. option:: --cu-lossless, --no-cu-lossless
+
+	For each CU, evaluate lossless (transform and quant bypass) encode
+	of the best non-lossless mode option as a potential rate distortion
+	optimization. If the global option :option:`--lossless` has been
+	specified, all CUs will be encoded as lossless unconditionally
+	regardless of whether this option was enabled. Default disabled.
+
+	Only effective at RD levels 3 and above, which perform RDO mode
+	decisions.
+
+.. option:: --tskip-fast, --no-tskip-fast
+
+	Only evaluate transform skip for NxN intra predictions (4x4 blocks).
+	Only applicable if transform skip is enabled. For chroma, only
+	evaluate if luma used tskip. Inter block tskip analysis is
+	unmodified. Default disabled
+
+Analysis re-use options, to improve performance when encoding the same
+sequence multiple times (presumably at varying bitrates). The encoder
+will not reuse analysis if the resolution and slice type parameters do
+not match.
+
+.. option:: --analysis-mode <string|int>
+
+	Specify whether analysis information of each frame is output by encoder
+	or input for reuse. By reading the analysis data writen by an
+	earlier encode of the same sequence, substantial redundant work may
+	be avoided.
+
+	The following data may be stored and reused:
+	I frames   - split decisions and luma intra directions of all CUs.
+	P/B frames - motion vectors are dumped at each depth for all CUs.
+
+	**Values:** off(0), save(1): dump analysis data, load(2): read analysis data
+
+.. option:: --analysis-file <filename>
+
+	Specify a filename for analysis data (see :option:`--analysis-mode`)
+	If no filename is specified, x265_analysis.dat is used.
+
+Options which affect the transform unit quad-tree, sometimes referred to
+as the residual quad-tree (RQT).
+
+.. option:: --rdoq-level <0|1|2>, --no-rdoq-level
+
+	Specify the amount of rate-distortion analysis to use within
+	quantization::
+
+	At level 0 rate-distortion cost is not considered in quant
+	
+	At level 1 rate-distortion cost is used to find optimal rounding
+	values for each level (and allows psy-rdoq to be effective). It
+	trades-off the signaling cost of the coefficient vs its post-inverse
+	quant distortion from the pre-quant coefficient. When
+	:option:`--psy-rdoq` is enabled, this formula is biased in favor of
+	more energy in the residual (larger coefficient absolute levels)
+	
+	At level 2 rate-distortion cost is used to make decimate decisions
+	on each 4x4 coding group, including the cost of signaling the group
+	within the group bitmap. If the total distortion of not signaling
+	the entire coding group is less than the rate cost, the block is
+	decimated. Next, it applies rate-distortion cost analysis to the
+	last non-zero coefficient, which can result in many (or all) of the
+	coding groups being decimated. Psy-rdoq is less effective at
+	preserving energy when RDOQ is at level 2, since it only has
+	influence over the level distortion costs.
+
+.. option:: --tu-intra-depth <1..4>
+
+	The transform unit (residual) quad-tree begins with the same depth
+	as the coding unit quad-tree, but the encoder may decide to further
+	split the transform unit tree if it improves compression efficiency.
+	This setting limits the number of extra recursion depth which can be
+	attempted for intra coded units. Default: 1, which means the
+	residual quad-tree is always at the same depth as the coded unit
+	quad-tree
+	
+	Note that when the CU intra prediction is NxN (only possible with
+	8x8 CUs), a TU split is implied, and thus the residual quad-tree
+	begins at 4x4 and cannot split any futhrer.
+
+.. option:: --tu-inter-depth <1..4>
+
+	The transform unit (residual) quad-tree begins with the same depth
+	as the coding unit quad-tree, but the encoder may decide to further
+	split the transform unit tree if it improves compression efficiency.
+	This setting limits the number of extra recursion depth which can be
+	attempted for inter coded units. Default: 1. which means the
+	residual quad-tree is always at the same depth as the coded unit
+	quad-tree unless the CU was coded with rectangular or AMP
+	partitions, in which case a TU split is implied and thus the
+	residual quad-tree begins one layer below the CU quad-tree.
+
+.. option:: --nr-intra <integer>, --nr-inter <integer>
+
+	Noise reduction - an adaptive deadzone applied after DCT
+	(subtracting from DCT coefficients), before quantization.  It does
+	no pixel-level filtering, doesn't cross DCT block boundaries, has no
+	overlap, The higher the strength value parameter, the more
+	aggressively it will reduce noise.
+
+	Enabling noise reduction will make outputs diverge between different
+	numbers of frame threads. Outputs will be deterministic but the
+	outputs of -F2 will no longer match the outputs of -F3, etc.
+
+	**Values:** any value in range of 0 to 2000. Default 0 (disabled).
+
+.. option:: --tskip, --no-tskip
+
+	Enable evaluation of transform skip (bypass DCT but still use
+	quantization) coding for 4x4 TU coded blocks.
+
+	Only effective at RD levels 3 and above, which perform RDO mode
+	decisions. Default disabled
+
+.. option:: --rdpenalty <0..2>
+
+	When set to 1, transform units of size 32x32 are given a 4x bit cost
+	penalty compared to smaller transform units, in intra coded CUs in P
+	or B slices.
+
+	When set to 2, transform units of size 32x32 are not even attempted,
+	unless otherwise required by the maximum recursion depth.  For this
+	option to be effective with 32x32 intra CUs,
+	:option:`--tu-intra-depth` must be at least 2.  For it to be
+	effective with 64x64 intra CUs, :option:`--tu-intra-depth` must be
+	at least 3.
+
+	Note that in HEVC an intra transform unit (a block of the residual
+	quad-tree) is also a prediction unit, meaning that the intra
+	prediction signal is generated for each TU block, the residual
+	subtracted and then coded. The coding unit simply provides the
+	prediction modes that will be used when predicting all of the
+	transform units within the CU. This means that when you prevent
+	32x32 intra transform units, you are preventing 32x32 intra
+	predictions.
+
+	Default 0, disabled.
+
+	**Values:** 0:disabled 1:4x cost penalty 2:force splits
+
+.. option:: --max-tu-size <32|16|8|4>
+
+	Maximum TU size (width and height). The residual can be more
+	efficiently compressed by the DCT transform when the max TU size
+	is larger, but at the expense of more computation. Transform unit
+	quad-tree begins at the same depth of the coded tree unit, but if the
+	maximum TU size is smaller than the CU size then transform QT begins 
+	at the depth of the max-tu-size. Default: 32.
+
+Temporal / motion search options
+================================
+
+.. option:: --max-merge <1..5>
+
+	Maximum number of neighbor (spatial and temporal) candidate blocks
+	that the encoder may consider for merging motion predictions. If a
+	merge candidate results in no residual, it is immediately selected
+	as a "skip".  Otherwise the merge candidates are tested as part of
+	motion estimation when searching for the least cost inter option.
+	The max candidate number is encoded in the SPS and determines the
+	bit cost of signaling merge CUs. Default 2
+
+.. option:: --me <integer|string>
+
+	Motion search method. Generally, the higher the number the harder
+	the ME method will try to find an optimal match. Diamond search is
+	the simplest. Hexagon search is a little better. Uneven
+	Multi-Hexegon is an adaption of the search method used by x264 for
+	slower presets. Star is a three step search adapted from the HM
+	encoder: a star-pattern search followed by an optional radix scan
+	followed by an optional star-search refinement. Full is an
+	exhaustive search; an order of magnitude slower than all other
+	searches but not much better than umh or star.
+
+	0. dia
+	1. hex **(default)**
+	2. umh
+	3. star
+	4. full
+
+.. option:: --subme, -m <0..7>
+
+	Amount of subpel refinement to perform. The higher the number the
+	more subpel iterations and steps are performed. Default 2
+
+	+----+------------+-----------+------------+-----------+-----------+
+	| -m | HPEL iters | HPEL dirs | QPEL iters | QPEL dirs | HPEL SATD |
+	+====+============+===========+============+===========+===========+
+	|  0 | 1          | 4         | 0          | 4         | false     |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  1 | 1          | 4         | 1          | 4         | false     |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  2 | 1          | 4         | 1          | 4         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  3 | 2          | 4         | 1          | 4         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  4 | 2          | 4         | 2          | 4         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  5 | 1          | 8         | 1          | 8         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  6 | 2          | 8         | 1          | 8         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+	|  7 | 2          | 8         | 2          | 8         | true      |
+	+----+------------+-----------+------------+-----------+-----------+
+
+	At --subme values larger than 2, chroma residual cost is included
+	in all subpel refinement steps and chroma residual is included in
+	all motion estimation decisions (selecting the best reference
+	picture in each list, and chosing between merge, uni-directional
+	motion and bi-directional motion). The 'slow' preset is the first
+	preset to enable the use of chroma residual.
+
+.. option:: --merange <integer>
+
+	Motion search range. Default 57
+
+	The default is derived from the default CTU size (64) minus the luma
+	interpolation half-length (4) minus maximum subpel distance (2)
+	minus one extra pixel just in case the hex search method is used. If
+	the search range were any larger than this, another CTU row of
+	latency would be required for reference frames.
+
+	**Range of values:** an integer from 0 to 32768
+
+.. option:: --temporal-mvp, --no-temporal-mvp
+
+	Enable temporal motion vector predictors in P and B slices.
+	This enables the use of the motion vector from the collocated block
+	in the previous frame to be used as a predictor. Default is enabled
+
+.. option:: --weightp, -w, --no-weightp
+
+	Enable weighted prediction in P slices. This enables weighting
+	analysis in the lookahead, which influences slice decisions, and
+	enables weighting analysis in the main encoder which allows P
+	reference samples to have a weight function applied to them prior to
+	using them for motion compensation.  In video which has lighting
+	changes, it can give a large improvement in compression efficiency.
+	Default is enabled
+
+.. option:: --weightb, --no-weightb
+
+	Enable weighted prediction in B slices. Default disabled
+
+Spatial/intra options
+=====================
+
+.. option:: --strong-intra-smoothing, --no-strong-intra-smoothing
+
+	Enable strong intra smoothing for 32x32 intra blocks. This flag 
+	performs bi-linear interpolation of the corner reference samples 
+	for a strong smoothing effect. The purpose is to prevent blocking 
+	or banding artifacts in regions with few/zero AC coefficients. 
+	Default enabled
+
+.. option:: --constrained-intra, --no-constrained-intra
+
+	Constrained intra prediction. When generating intra predictions for
+	blocks in inter slices, only intra-coded reference pixels are used.
+	Inter-coded reference pixels are replaced with intra-coded neighbor
+	pixels or default values. The general idea is to block the
+	propagation of reference errors that may have resulted from lossy
+	signals. Default disabled
+
+Psycho-visual options
+=====================
+
+Left to its own devices, the encoder will make mode decisions based on a
+simple rate distortion formula, trading distortion for bitrate. This is
+generally effective except for the manner in which this distortion is
+measured. It tends to favor blurred reconstructed blocks over blocks
+which have wrong motion. The human eye generally prefers the wrong
+motion over the blur and thus x265 offers psycho-visual adjustments to
+the rate distortion algorithm.
+
+:option:`--psy-rd` will add an extra cost to reconstructed blocks which
+do not match the visual energy of the source block. The higher the
+strength of :option:`--psy-rd` the more strongly it will favor similar
+energy over blur and the more aggressively it will ignore rate
+distortion. If it is too high, it will introduce visal artifacts and
+increase bitrate enough for rate control to increase quantization
+globally, reducing overall quality. psy-rd will tend to reduce the use
+of blurred prediction modes, like DC and planar intra and bi-directional
+inter prediction.
+
+:option:`--psy-rdoq` will adjust the distortion cost used in
+rate-distortion optimized quantization (RDO quant), enabled by
+:option:`--rdoq-level` 1 or 2, favoring the preservation of energy in the
+reconstructed image.  :option:`--psy-rdoq` prevents RDOQ from blurring
+all of the encoding options which psy-rd has to chose from.  At low
+strength levels, psy-rdoq will influence the quantization level
+decisions, favoring higher AC energy in the reconstructed image. As
+psy-rdoq strength is increased, more non-zero coefficient levels are
+added and fewer coefficients are zeroed by RDOQ's rate distortion
+analysis. High levels of psy-rdoq can double the bitrate which can have
+a drastic effect on rate control, forcing higher overall QP, and can
+cause ringing artifacts. psy-rdoq is less accurate than psy-rd, it is
+biasing towards energy in general while psy-rd biases towards the energy
+of the source image. But very large psy-rdoq values can sometimes be
+beneficial, preserving film grain for instance.
+
+As a general rule, when both psycho-visual features are disabled, the
+encoder will tend to blur blocks in areas of difficult motion. Turning
+on small amounts of psy-rd and psy-rdoq will improve the perceived
+visual quality. Increasing psycho-visual strength further will improve
+quality and begin introducing artifacts and increase bitrate, which may
+force rate control to increase global QP. Finding the optimal
+psycho-visual parameters for a given video requires experimentation. Our
+recommended defaults (1.0 for both) are generally on the low end of the
+spectrum.
+
+The lower the bitrate, the lower the optimal psycho-visual settings. If
+the bitrate is too low for the psycho-visual settings, you will begin to
+see temporal artifacts (motion judder). This is caused when the encoder
+is forced to code skip blocks (no residual) in areas of difficult motion
+because it is the best option psycho-visually (they have great amounts
+of energy and no residual cost). One can lower psy-rd settings when
+judder is happening, and allow the encoder to use some blur in these
+areas of high motion.
+
+.. option:: --psy-rd <float>
+
+	Influence rate distortion optimizated mode decision to preserve the
+	energy of the source image in the encoded image at the expense of
+	compression efficiency. It only has effect on presets which use
+	RDO-based mode decisions (:option:`--rd` 3 and above). 1.0 is a
+	typical value. Default 0.3
+
+	**Range of values:** 0 .. 2.0
+
+.. option:: --psy-rdoq <float>
+
+	Influence rate distortion optimized quantization by favoring higher
+	energy in the reconstructed image. This generally improves perceived
+	visual quality at the cost of lower quality metric scores.  It only
+	has effect when :option:`--rdoq-level` is 1 or 2. High values can
+	be beneficial in preserving high-frequency detail like film grain.
+	Default: 1.0
+
+	**Range of values:** 0 .. 50.0
+
+
+Slice decision options
+======================
+
+.. option:: --open-gop, --no-open-gop
+
+	Enable open GOP, allow I-slices to be non-IDR. Default enabled
+
+.. option:: --keyint, -I <integer>
+
+	Max intra period in frames. A special case of infinite-gop (single
+	keyframe at the beginning of the stream) can be triggered with
+	argument -1. Use 1 to force all-intra. Default 250
+
+.. option:: --min-keyint, -i <integer>
+
+	Minimum GOP size. Scenecuts closer together than this are coded as I
+	or P, not IDR. Minimum keyint is clamped to be at least half of
+	:option:`--keyint`. If you wish to force regular keyframe intervals
+	and disable adaptive I frame placement, you must use
+	:option:`--no-scenecut`.
+
+	**Range of values:** >=0 (0: auto)
+
+.. option:: --scenecut <integer>, --no-scenecut
+
+	How aggressively I-frames need to be inserted. The higher the
+	threshold value, the more aggressive the I-frame placement.
+	:option:`--scenecut` 0 or :option:`--no-scenecut` disables adaptive
+	I frame placement. Default 40
+
+.. option:: --rc-lookahead <integer>
+
+	Number of frames for slice-type decision lookahead (a key
+	determining factor for encoder latency). The longer the lookahead
+	buffer the more accurate scenecut decisions will be, and the more
+	effective cuTree will be at improving adaptive quant. Having a
+	lookahead larger than the max keyframe interval is not helpful.
+	Default 20
+
+	**Range of values:** Between the maximum consecutive bframe count (:option:`--bframes`) and 250
+
+.. option:: --lookahead-slices <0..16>
+
+	Use multiple worker threads to measure the estimated cost of each
+	frame within the lookahead. When :option:`--b-adapt` is 2, most
+	frame cost estimates will be performed in batch mode, many cost
+	estimates at the same time, and lookahead-slices is ignored for
+	batched estimates. The effect on performance can be quite small.
+	The higher this parameter, the less accurate the frame costs will be
+	(since context is lost across slice boundaries) which will result in
+	less accurate B-frame and scene-cut decisions.
+
+	The encoder may internally lower the number of slices to ensure
+	each slice codes at least 10 16x16 rows of lowres blocks. If slices
+	are used in lookahead, they are logged in the list of tools as
+	*lslices*.
+	
+	**Values:** 0 - disabled (default). 1 is the same as 0. Max 16
+
+.. option:: --b-adapt <integer>
+
+	Set the level of effort in determining B frame placement.
+
+	With b-adapt 0, the GOP structure is fixed based on the values of
+	:option:`--keyint` and :option:`--bframes`.
+	
+	With b-adapt 1 a light lookahead is used to choose B frame placement.
+
+	With b-adapt 2 (trellis) a viterbi B path selection is performed
+
+	**Values:** 0:none; 1:fast; 2:full(trellis) **default**
+
+.. option:: --bframes, -b <0..16>
+
+	Maximum number of consecutive b-frames. Use :option:`--bframes` 0 to
+	force all P/I low-latency encodes. Default 4. This parameter has a
+	quadratic effect on the amount of memory allocated and the amount of
+	work performed by the full trellis version of :option:`--b-adapt`
+	lookahead.
+
+.. option:: --bframe-bias <integer>
+
+	Bias towards B frames in slicetype decision. The higher the bias the
+	more likely x265 is to use B frames. Can be any value between -90
+	and 100 and is clipped to that range. Default 0
+
+.. option:: --b-pyramid, --no-b-pyramid
+
+	Use B-frames as references, when possible. Default enabled
+
+Quality, rate control and rate distortion options
+=================================================
+
+.. option:: --bitrate <integer>
+
+	Enables single-pass ABR rate control. Specify the target bitrate in
+	kbps. Default is 0 (CRF)
+
+	**Range of values:** An integer greater than 0
+
+.. option:: --crf <0..51.0>
+
+	Quality-controlled variable bitrate. CRF is the default rate control
+	method; it does not try to reach any particular bitrate target,
+	instead it tries to achieve a given uniform quality and the size of
+	the bitstream is determined by the complexity of the source video.
+	The higher the rate factor the higher the quantization and the lower
+	the quality. Default rate factor is 28.0.
+
+.. option:: --crf-max <0..51.0>
+
+	Specify an upper limit to the rate factor which may be assigned to
+	any given frame (ensuring a max QP).  This is dangerous when CRF is
+	used in combination with VBV as it may result in buffer underruns.
+	Default disabled
+        
+.. option:: --crf-min <0..51.0>
+
+	Specify an lower limit to the rate factor which may be assigned to
+	any given frame (ensuring a min compression factor).
+
+.. option:: --vbv-bufsize <integer>
+
+	Specify the size of the VBV buffer (kbits). Enables VBV in ABR
+	mode.  In CRF mode, :option:`--vbv-maxrate` must also be specified.
+	Default 0 (vbv disabled)
+
+.. option:: --vbv-maxrate <integer>
+
+	Maximum local bitrate (kbits/sec). Will be used only if vbv-bufsize
+	is also non-zero. Both vbv-bufsize and vbv-maxrate are required to
+	enable VBV in CRF mode. Default 0 (disabled)
+
+	Note that when VBV is enabled (with a valid :option:`--vbv-bufsize`),
+	VBV emergency denoising is turned on. This will turn on aggressive 
+	denoising at the frame level when frame QP > QP_MAX_SPEC (51), drastically
+	reducing bitrate and allowing ratecontrol to assign lower QPs for
+	the following frames. The visual effect is blurring, but removes 
+	significant blocking/displacement artifacts.
+
+.. option:: --vbv-init <float>
+
+	Initial buffer occupancy. The portion of the decode buffer which
+	must be full before the decoder will begin decoding.  Determines
+	absolute maximum frame size. May be specified as a fractional value
+	between 0 and 1, or in kbits. In other words these two option pairs
+	are equivalent::
+
+	--vbv-bufsize 1000 --vbv-init 900
+	--vbv-bufsize 1000 --vbv-init 0.9
+
+	Default 0.9
+
+	**Range of values:** fractional: 0 - 1.0, or kbits: 2 .. bufsize
+
+.. option:: --qp, -q <integer>
+
+	Specify base quantization parameter for Constant QP rate control.
+	Using this option enables Constant QP rate control. The specified QP
+	is assigned to P slices. I and B slices are given QPs relative to P
+	slices using param->rc.ipFactor and param->rc.pbFactor unless QP 0
+	is specified, in which case QP 0 is used for all slice types.  Note
+	that QP 0 does not cause lossless encoding, it only disables
+	quantization. Default disabled (CRF)
+
+	**Range of values:** an integer from 0 to 51
+
+.. option:: --lossless, --no-lossless
+
+	Enables true lossless coding by bypassing scaling, transform,
+	quantization and in-loop filter processes. This is used for
+	ultra-high bitrates with zero loss of quality. Reconstructed output
+	pictures are bit-exact to the input pictures. Lossless encodes
+	implicitly have no rate control, all rate control options are
+	ignored. Slower presets will generally achieve better compression
+	efficiency (and generate smaller bitstreams). Default disabled.
+
+.. option:: --aq-mode <0|1|2|3>
+
+	Adaptive Quantization operating mode. Raise or lower per-block
+	quantization based on complexity analysis of the source image. The
+	more complex the block, the more quantization is used. This offsets
+	the tendency of the encoder to spend too many bits on complex areas
+	and not enough in flat areas.
+
+	0. disabled
+	1. AQ enabled **(default)**
+	2. AQ enabled with auto-variance
+	3. AQ enabled with auto-variance and bias to dark scenes
+
+.. option:: --aq-strength <float>
+
+	Adjust the strength of the adaptive quantization offsets. Setting
+	:option:`--aq-strength` to 0 disables AQ. Default 1.0.
+
+	**Range of values:** 0.0 to 3.0
+
+.. option:: --qg-size <64|32|16>
+
+	Enable adaptive quantization for sub-CTUs. This parameter specifies 
+	the minimum CU size at which QP can be adjusted, ie. Quantization Group
+	size. Allowed range of values are 64, 32, 16 provided this falls within 
+	the inclusive range [maxCUSize, minCUSize]. Experimental.
+	Default: same as maxCUSize
+
+.. option:: --cutree, --no-cutree
+
+	Enable the use of lookahead's lowres motion vector fields to
+	determine the amount of reuse of each block to tune adaptive
+	quantization factors. CU blocks which are heavily reused as motion
+	reference for later frames are given a lower QP (more bits) while CU
+	blocks which are quickly changed and are not referenced are given
+	less bits. This tends to improve detail in the backgrounds of video
+	with less detail in areas of high motion. Default enabled
+
+.. option:: --pass <integer>
+
+	Enable multi-pass rate control mode. Input is encoded multiple times,
+	storing the encoded information of each pass in a stats file from which
+	the consecutive pass tunes the qp of each frame to improve the quality
+	of the output. Default disabled
+
+	1. First pass, creates stats file
+	2. Last pass, does not overwrite stats file
+	3. Nth pass, overwrites stats file
+
+	**Range of values:** 1 to 3
+
+.. option:: --stats <filename>
+
+	Specify file name of of the multi-pass stats file. If unspecified
+	the encoder will use x265_2pass.log
+
+.. option:: --slow-firstpass, --no-slow-firstpass
+
+	Enable a slow and more detailed first pass encode in multi-pass rate
+	control mode.  Speed of the first pass encode is slightly lesser and
+	quality midly improved when compared to the default settings in a
+	multi-pass encode. Default disabled (turbo mode enabled)
+
+	When **turbo** first pass is not disabled, these options are
+	set on the first pass to improve performance:
+	
+	* :option:`--fast-intra`
+	* :option:`--no-rect`
+	* :option:`--no-amp`
+	* :option:`--early-skip`
+	* :option:`--ref` = 1
+	* :option:`--max-merge` = 1
+	* :option:`--me` = DIA
+	* :option:`--subme` = MIN(2, :option:`--subme`)
+	* :option:`--rd` = MIN(2, :option:`--rd`)
+
+.. option:: --strict-cbr, --no-strict-cbr
+	
+	Enables stricter conditions to control bitrate deviance from the 
+	target bitrate in ABR mode. Bit rate adherence is prioritised
+	over quality. Rate tolerance is reduced to 50%. Default disabled.
+	
+	This option is for use-cases which require the final average bitrate 
+	to be within very strict limits of the target; preventing overshoots, 
+	while keeping the bit rate within 5% of the target setting, 
+	especially in short segment encodes. Typically, the encoder stays 
+	conservative, waiting until there is enough feedback in terms of 
+	encoded frames to control QP. strict-cbr allows the encoder to be 
+	more aggressive in hitting the target bitrate even for short segment 
+	videos. Experimental.
+	
+.. option:: --cbqpoffs <integer>
+
+	Offset of Cb chroma QP from the luma QP selected by rate control.
+	This is a general way to spend more or less bits on the chroma
+	channel.  Default 0
+
+	**Range of values:** -12 to 12
+
+.. option:: --crqpoffs <integer>
+
+	Offset of Cr chroma QP from the luma QP selected by rate control.
+	This is a general way to spend more or less bits on the chroma
+	channel.  Default 0
+
+	**Range of values:**  -12 to 12
+
+.. option:: --ipratio <float>
+
+	QP ratio factor between I and P slices. This ratio is used in all of
+	the rate control modes. Some :option:`--tune` options may change the
+	default value. It is not typically manually specified. Default 1.4
+
+.. option:: --pbratio <float>
+
+	QP ratio factor between P and B slices. This ratio is used in all of
+	the rate control modes. Some :option:`--tune` options may change the
+	default value. It is not typically manually specified. Default 1.3
+
+.. option:: --qcomp <float>
+
+	qComp sets the quantizer curve compression factor. It weights the
+	frame quantizer based on the complexity of residual (measured by
+	lookahead).  Default value is 0.6. Increasing it to 1 will
+	effectively generate CQP
+
+.. option:: --qpstep <integer>
+
+	The maximum single adjustment in QP allowed to rate control. Default
+	4
+
+.. option:: --qblur <float>
+
+	Temporally blur quants. Default 0.5
+
+.. option:: --cplxblur <float>
+
+	temporally blur complexity. default 20
+
+.. option:: --zones <zone0>/<zone1>/...
+
+	Tweak the bitrate of regions of the video. Each zone takes the form:
+
+	<start frame>,<end frame>,<option> where <option> is either q=<integer>
+	(force QP) or b=<float> (bitrate multiplier).
+
+	If zones overlap, whichever comes later in the list takes precedence.
+	Default none
+
+Quantization Options
+====================
+
+Note that rate-distortion optimized quantization (RDOQ) is enabled
+implicitly at :option:`--rd` 4, 5, and 6 and disabled implicitly at all
+other levels.
+ 
+.. option:: --signhide, --no-signhide
+
+	Hide sign bit of one coeff per TU (rdo). The last sign is implied.
+	This requires analyzing all the coefficients to determine if a sign
+	must be toggled, and then to determine which one can be toggled with
+	the least amount of distortion. Default enabled
+
+.. option:: --qpfile <filename>
+
+	Specify a text file which contains frametypes and QPs for some or
+	all frames. The format of each line is:
+
+	framenumber frametype QP
+
+	Frametype can be one of [I,i,P,B,b]. **B** is a referenced B frame,
+	**b** is an unreferenced B frame.  **I** is a keyframe (random
+	access point) while **i** is a I frame that is not a keyframe
+	(references are not broken).
+
+	Specifying QP (integer) is optional, and if specified they are
+	clamped within the encoder to qpmin/qpmax.
+
+.. option:: --scaling-list <filename>
+
+	Quantization scaling lists. HEVC supports 6 quantization scaling
+	lists to be defined; one each for Y, Cb, Cr for intra prediction and
+	one each for inter prediction.
+
+	x265 does not use scaling lists by default, but this can also be
+	made explicit by :option:`--scaling-list` *off*.
+
+	HEVC specifies a default set of scaling lists which may be enabled
+	without requiring them to be signaled in the SPS. Those scaling
+	lists can be enabled via :option:`--scaling-list` *default*.
+    
+	All other strings indicate a filename containing custom scaling
+	lists in the HM format. The encode will abort if the file is not
+	parsed correctly. Custom lists must be signaled in the SPS
+
+.. option:: --lambda-file <filename>
+
+	Specify a text file containing values for x265_lambda_tab and
+	x265_lambda2_tab. Each table requires MAX_MAX_QP+1 (70) float
+	values.
+	
+	The text file syntax is simple. Comma is considered to be
+	white-space. All white-space is ignored. Lines must be less than 2k
+	bytes in length. Content following hash (#) characters are ignored.
+	The values read from the file are logged at :option:`--log-level`
+	debug.
+
+	Note that the lambda tables are process-global and so the new values
+	affect all encoders running in the same process. 
+	
+	Lambda values affect encoder mode decisions, the lower the lambda
+	the more bits it will try to spend on signaling information (motion
+	vectors and splits) and less on residual. This feature is intended
+	for experimentation.
+
+Loop filters
+============
+
+.. option:: --deblock=<int>:<int>, --no-deblock
+
+	Toggle deblocking loop filter, optionally specify deblocking
+	strength offsets.
+
+	<int>:<int> - parsed as tC offset and Beta offset
+	<int>,<int> - parsed as tC offset and Beta offset
+	<int>       - both tC and Beta offsets assigned the same value
+
+	If unspecified, the offsets default to 0. The offsets must be in a
+	range of -6 (lowest strength) to 6 (highest strength).
+
+	To disable the deblocking filter entirely, use --no-deblock or
+	--deblock=false. Default enabled, with both offsets defaulting to 0
+
+	If deblocking is disabled, or the offsets are non-zero, these
+	changes from the default configuration are signaled in the PPS.
+
+.. option:: --sao, --no-sao
+
+	Toggle Sample Adaptive Offset loop filter, default enabled
+
+.. option:: --sao-non-deblock, --no-sao-non-deblock
+
+	Specify how to handle depencency between SAO and deblocking filter.
+	When enabled, non-deblocked pixels are used for SAO analysis. When
+	disabled, SAO analysis skips the right/bottom boundary areas.
+	Default disabled
+
+VUI (Video Usability Information) options
+=========================================
+
+x265 emits a VUI with only the timing info by default. If the SAR is
+specified (or read from a Y4M header) it is also included.  All other
+VUI fields must be manually specified.
+
+.. option:: --sar <integer|w:h>
+
+	Sample Aspect Ratio, the ratio of width to height of an individual
+	sample (pixel). The user may supply the width and height explicitly
+	or specify an integer from the predefined list of aspect ratios
+	defined in the HEVC specification.  Default undefined (not signaled)
+
+	1. 1:1 (square)
+	2. 12:11
+	3. 10:11
+	4. 16:11
+	5. 40:33
+	6. 24:11
+	7. 20:11
+	8. 32:11
+	9. 80:33
+	10. 18:11
+	11. 15:11
+	12. 64:33
+	13. 160:99
+	14. 4:3
+	15. 3:2
+	16. 2:1
+
+.. option:: --display-window <left,top,right,bottom>
+
+	Define the (overscan) region of the image that does not contain
+	information because it was added to achieve certain resolution or
+	aspect ratio (the areas are typically black bars). The decoder may
+	be directed to crop away this region before displaying the images
+	via the :option:`--overscan` option.  Default undefined (not
+	signaled).
+
+	Note that this has nothing to do with padding added internally by
+	the encoder to ensure the pictures size is a multiple of the minimum
+	coding unit (4x4). That padding is signaled in a separate
+	"conformance window" and is not user-configurable.
+
+.. option:: --overscan <show|crop>
+
+	Specify whether it is appropriate for the decoder to display or crop
+	the overscan area. Default unspecified (not signaled)
+
+.. option:: --videoformat <integer|string>
+
+	Specify the source format of the original analog video prior to
+	digitizing and encoding. Default undefined (not signaled)
+
+	0. component
+	1. pal
+	2. ntsc
+	3. secam
+	4. mac
+	5. undefined
+
+.. option:: --range <full|limited>
+
+	Specify output range of black level and range of luma and chroma
+	signals. Default undefined (not signaled)
+
+.. option:: --colorprim <integer|string>
+
+	Specify color primitive to use when converting to RGB. Default
+	undefined (not signaled)
+
+	1. bt709
+	2. undef
+	3. **reserved**
+	4. bt470m
+	5. bt470bg
+	6. smpte170m
+	7. smpte240m
+	8. film
+	9. bt2020
+
+.. option:: --transfer <integer|string>
+
+	Specify transfer characteristics. Default undefined (not signaled)
+
+	1. bt709
+	2. undef
+	3. **reserved**
+	4. bt470m
+	5. bt470bg
+	6. smpte170m
+	7. smpte240m
+	8. linear
+	9. log100
+	10. log316
+	11. iec61966-2-4
+	12. bt1361e
+	13. iec61966-2-1
+	14. bt2020-10
+	15. bt2020-12
+	16. smpte-st-2084
+	17. smpte-st-428
+	18. arib-std-b67
+
+.. option:: --colormatrix <integer|string>
+
+	Specify color matrix setting i.e set the matrix coefficients used in
+	deriving the luma and chroma. Default undefined (not signaled)
+
+	0. GBR
+	1. bt709
+	2. undef 
+	3. **reserved**
+	4. fcc
+	5. bt470bg
+	6. smpte170m
+	7. smpte240m
+	8. YCgCo
+	9. bt2020nc
+	10. bt2020c
+
+.. option:: --chromaloc <0..5>
+
+	Specify chroma sample location for 4:2:0 inputs. Consult the HEVC
+	specification for a description of these values. Default undefined
+	(not signaled)
+
+.. option:: --master-display <string>
+
+	SMPTE ST 2086 mastering display color volume SEI info, specified as
+	a string which is parsed when the stream header SEI are emitted. The
+	string format is "G(%hu,%hu)B(%hu,%hu)R(%hu,%hu)WP(%hu,%hu)L(%u,%u)"
+	where %hu are unsigned 16bit integers and %u are unsigned 32bit
+	integers. The SEI includes X,Y display primaries for RGB channels,
+	white point X,Y and max,min luminance values. (HDR)
+
+	Example for D65P3 1000-nits:
+
+		G(13200,34500)B(7500,3000)R(34000,16000)WP(15635,16450)L(10000000,1)
+
+	Note that this string value will need to be escaped or quoted to
+	protect against shell expansion on many platforms. No default.
+
+.. option:: --max-cll <string>
+
+	Maximum content light level and maximum frame average light level as
+	required by the Consumer Electronics Association 861.3 specification.
+
+	Specified as a string which is parsed when the stream header SEI are
+	emitted. The string format is "%hu,%hu" where %hu are unsigned 16bit
+	integers. The first value is the max content light level (or 0 if no
+	maximum is indicated), the second value is the maximum picture
+	average light level (or 0). (HDR)
+
+	Note that this string value will need to be escaped or quoted to
+	protect against shell expansion on many platforms. No default.
+
+.. option:: --min-luma <integer>
+
+	Minimum luma value allowed for input pictures. Any values below min-luma
+	are clipped. Experimental. No default.
+
+.. option:: --max-luma <integer>
+
+	Maximum luma value allowed for input pictures. Any values above max-luma
+	are clipped. Experimental. No default.
+
+Bitstream options
+=================
+
+.. option:: --annexb, --no-annexb
+
+	If enabled, x265 will produce Annex B bitstream format, which places
+	start codes before NAL. If disabled, x265 will produce file format,
+	which places length before NAL. x265 CLI will choose the right option
+	based on output format. Default enabled
+
+	**API ONLY**
+
+.. option:: --repeat-headers, --no-repeat-headers
+
+	If enabled, x265 will emit VPS, SPS, and PPS headers with every
+	keyframe. This is intended for use when you do not have a container
+	to keep the stream headers for you and you want keyframes to be
+	random access points. Default disabled
+
+.. option:: --aud, --no-aud
+
+	Emit an access unit delimiter NAL at the start of each slice access
+	unit. If :option:`--repeat-headers` is not enabled (indicating the
+	user will be writing headers manually at the start of the stream)
+	the very first AUD will be skipped since it cannot be placed at the
+	start of the access unit, where it belongs. Default disabled
+
+.. option:: --hrd, --no-hrd
+
+	Enable the signalling of HRD parameters to the decoder. The HRD
+	parameters are carried by the Buffering Period SEI messages and
+	Picture Timing SEI messages providing timing information to the
+	decoder. Default disabled
+
+.. option:: --info, --no-info
+
+	Emit an informational SEI with the stream headers which describes
+	the encoder version, build info, and encode parameters. This is very
+	helpful for debugging purposes but encoding version numbers and
+	build info could make your bitstreams diverge and interfere with
+	regression testing. Default enabled
+
+.. option:: --hash <integer>
+
+	Emit decoded picture hash SEI, so the decoder may validate the
+	reconstructed pictures and detect data loss. Also useful as a
+	debug feature to validate the encoder state. Default None
+
+	1. MD5
+	2. CRC
+	3. Checksum
+
+.. option:: --temporal-layers,--no-temporal-layers
+
+	Enable a temporal sub layer. All referenced I/P/B frames are in the
+	base layer and all unreferenced B frames are placed in a temporal
+	enhancement layer. A decoder may chose to drop the enhancement layer 
+	and only decode and display the base layer slices.
+	
+	If used with a fixed GOP (:option:`b-adapt` 0) and :option:`bframes`
+	3 then the two layers evenly split the frame rate, with a cadence of
+	PbBbP. You probably also want :option:`--no-scenecut` and a keyframe
+	interval that is a multiple of 4.
+
+Debugging options
+=================
+
+.. option:: --recon, -r <filename>
+
+	Output file containing reconstructed images in display order. If the
+	file extension is ".y4m" the file will contain a YUV4MPEG2 stream
+	header and frame headers. Otherwise it will be a raw YUV file in the
+	encoder's internal bit depth.
+
+	**CLI ONLY**
+
+.. option:: --recon-depth <integer>
+
+	Bit-depth of output file. This value defaults to the internal bit
+	depth and currently cannot to be modified.
+
+	**CLI ONLY**
+
+.. option:: --recon-y4m-exec <string>
+
+	If you have an application which can play a Y4MPEG stream received
+	on stdin, the x265 CLI can feed it reconstructed pictures in display
+	order.  The pictures will have no timing info, obviously, so the
+	picture timing will be determined primarily by encoding elapsed time
+	and latencies, but it can be useful to preview the pictures being
+	output by the encoder to validate input settings and rate control
+	parameters.
+
+	Example command for ffplay (assuming it is in your PATH):
+
+	--recon-y4m-exec "ffplay -i pipe:0 -autoexit"
+
+	**CLI ONLY**
+
+.. vim: noet
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/conf.py	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# -- General configuration -----------------------------------------------------
+
+source_suffix = '.rst'
+
+# Name of the master file 
+master_doc = 'index'
+
+# General information about the project.
+project = u'x265'
+
+# This is the Copyright Information that will appear on the bottom of the document
+copyright = u'2014 MulticoreWare Inc'
+
+# -- Options for HTML output ---------------------------------------------------
+html_theme = "default"
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'libx265', 'Full x265 Documentation',
+    ['MulticoreWare Inc'], 3),
+    ('x265', 'x265', 'x265 CLI Documentation',
+    ['MulticoreWare Inc'], 1)
+]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/index.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,11 @@
+x265 Documentation
+======================
+.. toctree::
+   :maxdepth: 2
+
+   introduction
+   cli
+   api
+   threading
+   presets
+   lossless
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/introduction.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,82 @@
+************
+Introduction
+************
+
+Increasing demand for high definition and ultra-high definition video,
+along with an increasing desire for video on demand has led to
+exponential growth in demand for bandwidth and storage requirements.
+These challenges can be met by the new High Efficiency Video Coding
+(HEVC) standard, also known as H.265. The x265 HEVC encoder project was
+launched by MulticoreWare in 2013, aiming to provide the most efficient,
+highest performance HEVC video encoder.
+
+About HEVC
+==========
+
+The High Efficiency Video Coding (HEVC) was developed by the ISO/IEC
+Moving Picture Experts Group (MPEG) and ITU-T Video Coding Experts Group
+(VCEG), through their Joint Collaborative Team on Video Coding (JCT-VC).
+HEVC is also known as ISO/IEC 23008-2 MPEG-H Part 2 and ITU-T H.265.
+HEVC provides superior video quality and up to twice the data
+compression as the previous standard (H.264/MPEG-4 AVC).  HEVC can
+support 8K Ultra High Definition video, with a picture size up to
+8192x4320 pixels.
+
+About x265
+==========
+
+The primary objective of x265 is to become the best H.265/HEVC encoder
+available anywhere, offering the highest compression efficiency and the
+highest performance on a wide variety of hardware platforms. The x265
+encoder is available as an open source library, published under the
+GPLv2 license. It is also available under a commercial license, enabling
+commercial companies to utilize and distribute x265 in their solutions
+without being subject to the restrictions of the GPL license.
+
+x265 is developed by `MulticoreWare <http://www.multicorewareinc.com>`_,
+leaders in high performance software solutions, with backing from
+leading video technology providers including `Telestream
+<http://www.telestream.com>`_ and `Doremi Labs
+<http://www.doremilabs.com>`_ (and other companies who want to remain
+anonymous at this time), and with contributions from open source
+developers.  x265 leverages many of the outstanding video encoding
+features and optimizations from the x264 AVC encoder project.
+
+The x265 software is available for free under the GNU GPL 2 license,
+from https://bitbucket.org/multicoreware/x265.  For commercial companies
+that wish to distribute x265 without being subject to the open source
+requirements of the GPL 2 license, commercial licenses are available
+with competitive terms.  Contact license @ x265.com to inquire about
+commercial license terms.  
+
+While x265 is primarily designed as a video encoder software library, a
+command-line executable is provided to facilitate testing and
+development.  We expect x265 to be utilized in many leading video
+hardware and software products and services in the coming months.
+
+LEGAL NOTICES
+=============
+
+The x265 software is owned and copyrighted by MulticoreWare, Inc.
+MulticoreWare is committed to offering the x265 software under the GNU
+GPL v2 license.  Companies who do not wish to integrate the x265
+Software in their products under the terms of the GPL license can
+contact MulticoreWare (license @ x265.com) to obtain a commercial
+license agreement.  Companies who use x265 under the GPL may also wish
+to work with MulticoreWare to accelerate the development of specific
+features or optimized support for specific hardware or software
+platforms, or to contract for support.
+
+The GNU GPL v2 license or the x265 commercial license agreement govern
+your rights to access the copyrighted x265 software source code, but do
+not cover any patents that may be applicable to the function of binary
+executable software created from the x265 source code.  You are
+responsible for understanding the laws in your country, and for
+licensing all applicable patent rights needed for use or distribution of
+software applications created from the x265 source code.  A good place
+to start is with the `Motion Picture Experts Group - Licensing Authority
+- HEVC Licensing Program <http://www.mpegla.com/main/PID/HEVC/default.aspx>`_.
+
+x265 is a registered trademark of MulticoreWare, Inc.  The x265 logo is
+a trademark of MulticoreWare, and may only be used with explicit written
+permission.  All rights reserved.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/lossless.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,162 @@
+Lossless
+--------
+
+Lossless Encoding
+=================
+
+x265 can encode HEVC bitstreams that are entirely lossless (the
+reconstructed images are bit-exact to the source images) by using the
+:option:`--lossless` option.  Lossless operation is theoretically
+simple. Rate control, by definition, is disabled and the encoder
+disables all quality metrics since they would only waste CPU cycles.
+Instead, x265 reports only a compression factor at the end of the
+encode.
+
+In HEVC, lossless coding means bypassing both the DCT transforms and
+bypassing quantization (often referred to as transquant bypass).  Normal
+predictions are still allowed, so the encoder will find optimal inter or
+intra predictions and then losslessly code the residual (with transquant
+bypass).
+
+All :option:`--preset` options are capable of generating lossless video
+streams, but in general the slower the preset the better the compression
+ratio (and the slower the encode). Here are some examples::
+
+	./x265 ../test-720p.y4m o.bin --preset ultrafast --lossless
+	... <snip> ...
+	encoded 721 frames in 238.38s (3.02 fps), 57457.94 kb/s
+
+	./x265 ../test-720p.y4m o.bin --preset faster --lossless
+	... <snip> ...
+	x265 [info]: lossless compression ratio 3.11::1
+	encoded 721 frames in 258.46s (2.79 fps), 56787.65 kb/s
+
+	./x265 ../test-720p.y4m o.bin --preset slow --lossless
+	... <snip> ...
+	x265 [info]: lossless compression ratio 3.36::1
+	encoded 721 frames in 576.73s (1.25 fps), 52668.25 kb/s
+
+	./x265 ../test-720p.y4m o.bin --preset veryslow --lossless
+	x265 [info]: lossless compression ratio 3.76::1
+	encoded 721 frames in 6298.22s (0.11 fps), 47008.65 kb/s
+ 
+.. Note::
+	In HEVC, only QP=4 is truly lossless quantization, and thus when
+	encoding losslesly x265 uses QP=4 internally in its RDO decisions.
+
+Near-lossless Encoding
+======================
+
+Near-lossless conditions are a quite a bit more interesting.  Normal ABR
+rate control will allow one to scale the bitrate up to the point where 
+quantization is entirely bypassed (QP <= 4), but even at this point
+there is a lot of SSIM left on the table because of the DCT transforms,
+which are not lossless::
+
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 40000 --ssim
+	encoded 721 frames in 326.62s (2.21 fps), 39750.56 kb/s, SSIM Mean Y: 0.9990703 (30.317 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 50000 --ssim
+	encoded 721 frames in 349.27s (2.06 fps), 44326.84 kb/s, SSIM Mean Y: 0.9994134 (32.316 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 60000 --ssim
+	encoded 721 frames in 360.04s (2.00 fps), 45394.50 kb/s, SSIM Mean Y: 0.9994823 (32.859 dB)
+
+For the encoder to get over this quality plateau, one must enable
+lossless coding at the CU level with :option:`--cu-lossless`.  It tells
+the encoder to evaluate trans-quant bypass as a coding option for each
+CU, and to pick the option with the best rate-distortion
+characteristics.
+
+The :option:`--cu-lossless` option is very expensive, computationally,
+and it only has a positive effect when the QP is extremely low, allowing
+RDO to spend a large amount of bits to make small improvements to
+quality.  So this option should only be enabled when you are encoding
+near-lossless bitstreams::
+
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 40000 --ssim --cu-lossless
+	encoded 721 frames in 500.51s (1.44 fps), 40017.10 kb/s, SSIM Mean Y: 0.9997790 (36.557 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 50000 --ssim --cu-lossless
+	encoded 721 frames in 524.60s (1.37 fps), 46083.37 kb/s, SSIM Mean Y: 0.9999432 (42.456 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 60000 --ssim --cu-lossless
+	encoded 721 frames in 523.63s (1.38 fps), 46552.92 kb/s, SSIM Mean Y: 0.9999489 (42.917 dB)
+
+.. Note::
+	It is not unusual for bitrate to drop as you increase lossless coding.
+	Having "perfectly coded" reference blocks reduces residual in later
+	frames. It is quite possible for a near-lossless encode to spend
+	more bits than a lossless encode.
+
+Enabling psycho-visual rate distortion will improve lossless coding.
+:option:`--psy-rd` influences the RDO decisions in favor of energy
+(detail) preservation over bit cost and results in more blocks being
+losslessly coded.  Our psy-rd feature is not yet assembly optimized, so
+this makes the encodes run even slower::
+
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 40000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 581.83s (1.24 fps), 40112.15 kb/s, SSIM Mean Y: 0.9998632 (38.638 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 50000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 587.54s (1.23 fps), 46284.55 kb/s, SSIM Mean Y: 0.9999663 (44.721 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset medium --bitrate 60000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 592.93s (1.22 fps), 46839.51 kb/s, SSIM Mean Y: 0.9999707 (45.334 dB)
+
+:option:`--cu-lossless` will also be more effective at slower
+presets which perform RDO at more levels and thus may find smaller
+blocks that would benefit from lossless coding::
+
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 40000 --ssim --cu-lossless
+	encoded 721 frames in 12969.25s (0.06 fps), 37331.96 kb/s, SSIM Mean Y: 0.9998108 (37.231 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 50000 --ssim --cu-lossless
+	encoded 721 frames in 46217.84s (0.05 fps), 42976.28 kb/s, SSIM Mean Y: 0.9999482 (42.856 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 60000 --ssim --cu-lossless
+	encoded 721 frames in 13738.17s (0.05 fps), 43864.21 kb/s, SSIM Mean Y: 0.9999633 (44.348 dB)
+	
+And with psy-rd and a slow preset together, very high SSIMs are
+possible::
+
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 40000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 11675.81s (0.06 fps), 37819.45 kb/s, SSIM Mean Y: 0.9999181 (40.867 dB)
+	    
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 50000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 12414.56s (0.06 fps), 42815.75 kb/s, SSIM Mean Y: 0.9999758 (46.168 dB)
+	
+	./x265 ../test-720p.y4m o.bin --preset veryslow --bitrate 60000 --ssim --cu-lossless --psy-rd 1.0
+	encoded 721 frames in 11684.89s (0.06 fps), 43324.48 kb/s, SSIM Mean Y: 0.9999793 (46.835 dB)
+
+
+It's important to note in the end that it is easier (less work) for the
+encoder to encode the video losslessly than it is to encode it
+near-losslessly. If the encoder knows up front the encode must be
+lossless, it does not need to evaluate any lossy coding methods. The
+encoder only needs to find the most efficient prediction for each block
+and then entropy code the residual.
+
+It is not feasible for :option:`--cu-lossless` to turn itself on when
+the encoder determines it is encoding a near-lossless bitstream (ie:
+when rate control nearly disables all quantization) because the feature
+requires a flag to be enabled in the stream headers. At the time the
+stream headers are being coded we do not know whether
+:option:`--cu-lossless` would be a help or a hinder.  If very few or no
+blocks end up being coded as lossless, then having the feature enabled
+is a net loss in compression efficiency because it adds a flag that must
+be coded for every CU. So ignoring even the performance aspects of the
+feature, it can be a compression loss if enabled without being used. So
+it is up to the user to only enable this feature when they are coding at
+near-lossless quality.
+
+Transform Skip
+==============
+
+A somewhat related feature, :option:`--tskip` tells the encoder to
+evaluate transform-skip (bypass DCT but with quantization still enabled)
+when coding small 4x4 transform blocks. This feature is intended to
+improve the coding efficiency of screen content (aka: text on a screen)
+and is not really intended for lossless coding.  This feature should
+only be enabled if the content has a lot of very sharp edges in it, and
+is mostly unrelated to lossless coding.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/presets.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,182 @@
+Preset Options
+--------------
+
+.. _presets:
+
+Presets
+=======
+
+x265 has a number of predefined :option:`--preset` options that make
+trade-offs between encode speed (encoded frames per second) and
+compression efficiency (quality per bit in the bitstream).  The default
+preset is medium, it does a reasonably good job of finding the best
+possible quality without spending enormous CPU cycles looking for the
+absolute most efficient way to achieve that quality.  As you go higher
+than medium, the encoder takes shortcuts to improve performance at the
+expense of quality and compression efficiency.  As you go lower than
+medium, the encoder tries harder and harder to achieve the best quailty
+per bit compression ratio.
+
+The presets adjust encoder parameters to affect these trade-offs.
+
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+|              | ultrafast | superfast | veryfast | faster | fast | medium | slow | slower | veryslow | placebo |
++==============+===========+===========+==========+========+======+========+======+========+==========+=========+
+| ctu          |   32      |    32     |   32     |  64    |  64  |   64   |  64  |  64    |   64     |   64    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| min-cu-size  |   16      |     8     |    8     |   8    |   8  |    8   |   8  |   8    |    8     |    8    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| bframes      |    3      |     3     |    4     |   4    |  4   |    4   |  4   |   8    |    8     |    8    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| b-adapt      |    0      |     0     |    0     |   0    |  0   |    2   |  2   |   2    |    2     |    2    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| rc-lookahead |    5      |    10     |   15     |  15    |  15  |   20   |  25  |   30   |   40     |   60    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| scenecut     |    0      |    40     |   40     |  40    |  40  |   40   |  40  |   40   |   40     |   40    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| refs         |    1      |     1     |    1     |   1    |  2   |    3   |  3   |   3    |    5     |    5    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| me           |   dia     |   hex     |   hex    |  hex   | hex  |   hex  | star |  star  |   star   |   star  |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| merange      |   57      |    57     |   57     |  57    |  57  |   57   | 57   |  57    |   57     |   92    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| subme        |    0      |     1     |    1     |   2    |  2   |    2   |  3   |   3    |    4     |    5    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| rect         |    0      |     0     |    0     |   0    |  0   |    0   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| amp          |    0      |     0     |    0     |   0    |  0   |    0   |  0   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| max-merge    |    2      |     2     |    2     |   2    |  2   |    2   |  3   |   3    |    4     |    5    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| early-skip   |    1      |     1     |    1     |   1    |  0   |    0   |  0   |   0    |    0     |    0    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| fast-intra   |    1      |     1     |    1     |   1    |  1   |    0   |  0   |   0    |    0     |    0    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| b-intra      |    0      |     0     |    0     |   0    |  0   |    0   |  0   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| sao          |    0      |     0     |    1     |   1    |  1   |    1   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| signhide     |    0      |     1     |    1     |   1    |  1   |    1   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| weightp      |    0      |     0     |    1     |   1    |  1   |    1   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| weightb      |    0      |     0     |    0     |   0    |  0   |    0   |  0   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| aq-mode      |    0      |     0     |    1     |   1    |  1   |    1   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| cuTree       |    0      |     0     |    0     |   0    |  1   |    1   |  1   |   1    |    1     |    1    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| rdLevel      |    2      |     2     |    2     |   2    |  2   |    3   |  4   |   6    |    6     |    6    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| rdoq-level   |    0      |     0     |    0     |   0    |  0   |    0   |  2   |   2    |    2     |    2    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| tu-intra     |    1      |     1     |    1     |   1    |  1   |    1   |  1   |   2    |    3     |    4    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+| tu-inter     |    1      |     1     |    1     |   1    |  1   |    1   |  1   |   2    |    3     |    4    |
++--------------+-----------+-----------+----------+--------+------+--------+------+--------+----------+---------+
+
+Placebo mode enables transform-skip prediction evaluation.
+
+.. _tunings:
+
+Tuning
+======
+
+There are a few :option:`--tune` options available, which are applied
+after the preset.
+
+.. Note::
+
+	The *psnr* and *ssim* tune options disable all optimizations that
+	sacrafice metric scores for perceived visual quality (also known as
+	psycho-visual optimizations). By default x265 always tunes for
+	highest perceived visual quality but if one intends to measure an
+	encode using PSNR or SSIM for the purpose of benchmarking, we highly
+	recommend you configure x265 to tune for that particular metric.
+
++--------------+-----------------------------------------------------+
+| --tune       | effect                                              |
++==============+=====================================================+
+| psnr         | disables adaptive quant, psy-rd, and cutree         |
++--------------+-----------------------------------------------------+
+| ssim         | enables adaptive quant auto-mode, disables psy-rd   |
++--------------+-----------------------------------------------------+
+| grain        | improves retention of film grain. more below        |
++--------------+-----------------------------------------------------+
+| fastdecode   | no loop filters, no weighted pred, no intra in B    |
++--------------+-----------------------------------------------------+
+| zerolatency  | no lookahead, no B frames, no cutree                |
++--------------+-----------------------------------------------------+
+
+
+
+Film Grain Retention
+~~~~~~~~~~~~~~~~~~~~
+
+:option:`--tune` *grain* tries to improve the retention of film grain in
+the reconstructed output. It disables rate distortion optimizations in
+quantization, and increases the default psy-rd.
+
+    * :option:`--psy-rd` 0.5
+    * :option:`--rdoq-level` 0
+    * :option:`--psy-rdoq` 0
+
+It lowers the strength of adaptive quantization, so residual energy can
+be more evenly distributed across the (noisy) picture:
+
+    * :option:`--aq-strength` 0.3
+
+And it similarly tunes rate control to prevent the slice QP from
+swinging too wildly from frame to frame:
+
+    * :option:`--ipratio` 1.1
+    * :option:`--pbratio` 1.1
+    * :option:`--qcomp` 0.8
+
+And lastly it reduces the strength of deblocking to prevent grain being
+blurred on block boundaries:
+
+    * :option:`--deblock` -2
+
+Fast Decode
+~~~~~~~~~~~
+
+:option:`--tune` *fastdecode* disables encoder features which tend to be
+bottlenecks for the decoder. It is intended for use with 4K content at
+high bitrates which can cause decoders to struggle. It disables both
+HEVC loop filters, which tend to be process bottlenecks:
+
+    * :option:`--no-deblock`
+    * :option:`--no-sao`
+
+It disables weighted prediction, which tend to be bandwidth bottlenecks:
+
+    * :option:`--no-weightp`
+    * :option:`--no-weightb`
+
+And it disables intra blocks in B frames with :option:`--no-b-intra`
+since intra predicted blocks cause serial dependencies in the decoder.
+
+Zero Latency
+~~~~~~~~~~~~
+
+There are two halves to the latency problem. There is latency at the
+decoder and latency at the encoder. :option:`--tune` *zerolatency*
+removes latency from both sides. The decoder latency is removed by:
+
+    * :option:`--bframes` 0
+
+Encoder latency is removed by:
+
+    * :option:`--b-adapt` 0
+    * :option:`--rc-lookahead` 0
+    * :option:`--no-scenecut`
+    * :option:`--no-cutree`
+    * :option:`--frame-threads` 1
+
+With all of these settings x265_encoder_encode() will run synchronously,
+the picture passed as pic_in will be encoded and returned as NALs. These
+settings disable frame parallelism, which is an important component for
+x265 performance. If you can tolerate any latency on the encoder, you
+can increase performance by increasing the number of frame threads. Each
+additional frame thread adds one frame of latency.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/threading.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,266 @@
+*********
+Threading
+*********
+
+.. _pools:
+
+Thread Pools
+============
+
+x265 creates one or more thread pools per encoder, one pool per NUMA
+node (typically a CPU socket). :option:`--pools` specifies the number of
+pools and the number of threads per pool the encoder will allocate. By
+default x265 allocates one thread per (hyperthreaded) CPU core on each
+NUMA node.
+
+If you are running multiple encoders on a system with multiple NUMA
+nodes, it is recommended to isolate each of them to a single node in
+order to avoid the NUMA overhead of remote memory access.
+
+Work distribution is job based. Idle worker threads scan the job
+providers assigned to their thread pool for jobs to perform. When no
+jobs are available, the idle worker threads block and consume no CPU
+cycles.
+
+Objects which desire to distribute work to worker threads are known as
+job providers (and they derive from the JobProvider class).  The thread
+pool has a method to **poke** awake a blocked idle thread, and job
+providers are recommended to call this method when they make new jobs
+available.
+
+Worker jobs are not allowed to block except when absolutely necessary
+for data locking. If a job becomes blocked, the work function is
+expected to drop that job so the worker thread may go back to the pool
+and find more work.
+
+On Windows, the native APIs offer sufficient functionality to discover
+the NUMA topology and enforce the thread affinity that libx265 needs (so
+long as you have not chosen to target XP or Vista), but on POSIX systems
+it relies on libnuma for this functionality. If your target POSIX system
+is single socket, then building without libnuma is a perfectly
+reasonable option, as it will have no effect on the runtime behavior. On
+a multiple-socket system, a POSIX build of libx265 without libnuma will
+be less work efficient, but will still function correctly. You lose the
+work isolation effect that keeps each frame encoder from only using the
+threads of a single socket and so you incur a heavier context switching
+cost.
+
+Wavefront Parallel Processing
+=============================
+
+New with HEVC, Wavefront Parallel Processing allows each row of CTUs to
+be encoded in parallel, so long as each row stays at least two CTUs
+behind the row above it, to ensure the intra references and other data
+of the blocks above and above-right are available. WPP has almost no
+effect on the analysis and compression of each CTU and so it has a very
+small impact on compression efficiency relative to slices or tiles. The
+compression loss from WPP has been found to be less than 1% in most of
+our tests.
+
+WPP has three effects which can impact efficiency. The first is the row
+starts must be signaled in the slice header, the second is each row must
+be padded to an even byte in length, and the third is the state of the
+entropy coder is transferred from the second CTU of each row to the
+first CTU of the row below it.  In some conditions this transfer of
+state actually improves compression since the above-right state may have
+better locality than the end of the previous row.
+
+Parabola Research have published an excellent HEVC
+`animation <http://www.parabolaresearch.com/blog/2013-12-01-hevc-wavefront-animation.html>`_
+which visualizes WPP very well.  It even correctly visualizes some of
+WPPs key drawbacks, such as:
+
+1. the low thread utilization at the start and end of each frame
+2. a difficult block may stall the wave-front and it takes a while for
+   the wave-front to recover.
+3. 64x64 CTUs are big! there are much fewer rows than with H.264 and
+   similar codecs
+
+Because of these stall issues you rarely get the full parallelisation
+benefit one would expect from row threading. 30% to 50% of the
+theoretical perfect threading is typical.
+
+In x265 WPP is enabled by default since it not only improves performance
+at encode but it also makes it possible for the decoder to be threaded.
+
+If WPP is disabled by :option:`--no-wpp` the frame will be encoded in
+scan order and the entropy overheads will be avoided.  If frame
+threading is not disabled, the encoder will change the default frame
+thread count to be higher than if WPP was enabled.  The exact formulas
+are described in the next section.
+
+Bonded Task Groups
+==================
+
+If a worker thread job has work which can be performed in parallel by
+many threads, it may allocate a bonded task group and enlist the help of
+other idle worker threads from the same thread pool. Those threads will
+cooperate to complete the work of the bonded task group and then return
+to their idle states. The larger and more uniform those tasks are, the
+better the bonded task group will perform.
+
+Parallel Mode Analysis
+~~~~~~~~~~~~~~~~~~~~~~
+
+When :option:`--pmode` is enabled, each CU (at all depths from 64x64 to
+8x8) will distribute its analysis work to the thread pool via a bonded
+task group. Each analysis job will measure the cost of one prediction
+for the CU: merge, skip, intra, inter (2Nx2N, Nx2N, 2NxN, and AMP).
+
+At slower presets, the amount of increased parallelism from pmode is
+often enough to be able to reduce or disable frame parallelism while
+achieving the same overall CPU utilization. Reducing frame threads is
+often beneficial to ABR and VBV rate control.
+
+Parallel Motion Estimation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When :option:`--pme` is enabled all of the analysis functions which
+perform motion searches to reference frames will distribute those motion
+searches to other worker threads via a bonded task group (if more than
+two motion searches are required).
+
+Frame Threading
+===============
+
+Frame threading is the act of encoding multiple frames at the same time.
+It is a challenge because each frame will generally use one or more of
+the previously encoded frames as motion references and those frames may
+still be in the process of being encoded themselves.
+
+Previous encoders such as x264 worked around this problem by limiting
+the motion search region within these reference frames to just one
+macroblock row below the coincident row being encoded. Thus a frame
+could be encoded at the same time as its reference frames so long as it
+stayed one row behind the encode progress of its references (glossing
+over a few details). 
+
+x265 has the same frame threading mechanism, but we generally have much
+less frame parallelism to exploit than x264 because of the size of our
+CTU rows. For instance, with 1080p video x264 has 68 16x16 macroblock
+rows available each frame while x265 only has 17 64x64 CTU rows.
+
+The second extenuating circumstance is the loop filters. The pixels used
+for motion reference must be processed by the loop filters and the loop
+filters cannot run until a full row has been encoded, and it must run a
+full row behind the encode process so that the pixels below the row
+being filtered are available. On top of this, HEVC has two loop filters:
+deblocking and SAO, which must be run in series with a row lag between
+them. When you add up all the row lags each frame ends up being 3 CTU
+rows behind its reference frames (the equivalent of 12 macroblock rows
+for x264). And keep in mind the wave-front progression pattern; by the
+time the reference frame finishes the third row of CTUs, nearly half of
+the CTUs in the frame may be compressed (depending on the display aspect
+ratio).
+
+The third extenuating circumstance is that when a frame being encoded
+becomes blocked by a reference frame row being available, that frame's
+wave-front becomes completely stalled and when the row becomes available
+again it can take quite some time for the wave to be restarted, if it
+ever does. This makes WPP less effective when frame parallelism is in
+use.
+
+:option:`--merange` can have a negative impact on frame parallelism. If
+the range is too large, more rows of CTU lag must be added to ensure
+those pixels are available in the reference frames.
+
+.. note::
+
+	Even though the merange is used to determine the amount of reference
+	pixels that must be available in the reference frames, the actual
+	motion search is not necessarily centered around the coincident
+	block. The motion search is actually centered around the motion
+	predictor, but the available pixel area (mvmin, mvmax) is determined
+	by merange and the interpolation filter half-heights.
+
+When frame threading is disabled, the entirety of all reference frames
+are always fully available (by definition) and thus the available pixel
+area is not restricted at all, and this can sometimes improve
+compression efficiency. Because of this, the output of encodes with
+frame parallelism disabled will not match the output of encodes with
+frame parallelism enabled; but when enabled the number of frame threads
+should have no effect on the output bitstream except when using ABR or
+VBV rate control or noise reduction.
+
+When :option:`--nr` is enabled, the outputs of each number of frame threads
+will be deterministic but none of them will match becaue each frame
+encoder maintains a cumulative noise reduction state.
+
+VBV introduces non-determinism in the encoder, at this point in time,
+regardless of the amount of frame parallelism.
+
+By default frame parallelism and WPP are enabled together. The number of
+frame threads used is auto-detected from the (hyperthreaded) CPU core
+count, but may be manually specified via :option:`--frame-threads`
+
+	+-------+--------+
+	| Cores | Frames |
+	+=======+========+
+	|  > 32 |  6..8  |
+	+-------+--------+
+	| >= 16 |   5    |
+	+-------+--------+
+	| >= 8  |   3    |
+	+-------+--------+
+	| >= 4  |   2    |
+	+-------+--------+
+
+If WPP is disabled, then the frame thread count defaults to **min(cpuCount, ctuRows / 2)**
+
+Over-allocating frame threads can be very counter-productive. They
+each allocate a large amount of memory and because of the limited number
+of CTU rows and the reference lag, you generally get limited benefit
+from adding frame encoders beyond the auto-detected count, and often
+the extra frame encoders reduce performance.
+
+Given these considerations, you can understand why the faster presets
+lower the max CTU size to 32x32 (making twice as many CTU rows available
+for WPP and for finer grained frame parallelism) and reduce
+:option:`--merange`
+
+Each frame encoder runs in its own thread (allocated separately from the
+worker pool). This frame thread has some pre-processing responsibilities
+and some post-processing responsibilities for each frame, but it spends
+the bulk of its time managing the wave-front processing by making CTU
+rows available to the worker threads when their dependencies are
+resolved.  The frame encoder threads spend nearly all of their time
+blocked in one of 4 possible locations:
+
+1. blocked, waiting for a frame to process
+2. blocked on a reference frame, waiting for a CTU row of reconstructed
+   and loop-filtered reference pixels to become available
+3. blocked waiting for wave-front completion
+4. blocked waiting for the main thread to consume an encoded frame
+
+Lookahead
+=========
+
+The lookahead module of x265 (the lowres pre-encode which determines
+scene cuts and slice types) uses the thread pool to distribute the
+lowres cost analysis to worker threads. It will use bonded task groups
+to perform batches of frame cost estimates, and it may optionally use
+bonded task groups to measure single frame cost estimates using slices.
+(see :option:`--lookahead-slices`)
+
+The main slicetypeDecide() function itself is also performed by a worker
+thread if your encoder has a thread pool, else it runs within the
+context of the thread which calls the x265_encoder_encode().
+
+SAO
+===
+
+The Sample Adaptive Offset loopfilter has a large effect on encode
+performance because of the peculiar way it must be analyzed and coded.
+
+SAO flags and data are encoded at the CTU level before the CTU itself is
+coded, but SAO analysis (deciding whether to enable SAO and with what
+parameters) cannot be performed until that CTU is completely analyzed
+(reconstructed pixels are available) as well as the CTUs to the right
+and below.  So in effect the encoder must perform SAO analysis in a
+wavefront at least a full row behind the CTU compression wavefront.
+
+This extra latency forces the encoder to save the encode data of every
+CTU until the entire frame has been analyzed, at which point a function
+can code the final slice bitstream with the decided SAO flags and data
+interleaved between each CTU.  This second pass over the CTUs can be
+expensive, particularly at large resolutions and high bitrates.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/reST/x265.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,49 @@
+x265 CLI Documentation
+######################
+
+
+SYNOPSIS
+========
+
+**x265** [options] infile [-o] outfile
+
+Bit depth: 8
+
+
+**x265-10bit** [options] infile [-o] outfile
+
+Bit depth: 10
+
+
+infile can be YUV or Y4M
+
+outfile is raw HEVC bitstream
+
+
+DESCRIPTION
+===========
+
+.. toctree::
+   :maxdepth: 2
+
+   introduction
+
+
+OPTIONS
+=======
+
+.. toctree::
+   :maxdepth: 2
+
+   cli
+   presets
+   lossless
+
+
+SEE ALSO
+========
+
+**libx265**\(3)
+
+Online documentation: http://x265.readthedocs.org/en/default/cli.html
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/doc/uncrustify/codingstyle.cfg	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,232 @@
+align_func_params=true
+align_keep_tabs=false
+align_left_shift=true
+align_mix_var_proto=false
+align_nl_cont=false
+align_number_left=false
+align_oc_decl_colon=false
+align_on_operator=false
+align_on_tabstop=false
+align_right_cmt_mix=false
+align_single_line_brace=false
+align_single_line_func=false
+align_var_def_attribute=false
+align_var_def_colon=false
+align_var_def_inline=false
+align_with_tabs=false
+cmt_c_group=true
+cmt_c_nl_end=false
+cmt_c_nl_start=true
+cmt_cpp_group=true
+cmt_cpp_nl_end=false
+cmt_cpp_nl_start=false
+cmt_cpp_to_c=false
+cmt_indent_multi=false
+cmt_insert_before_preproc=false
+cmt_multi_check_last=true
+cmt_reflow_mode=1
+cmt_sp_before_star_cont=0
+cmt_star_cont=true
+cmt_width=130
+#code_width=130
+eat_blanks_after_open_brace=true
+eat_blanks_before_close_brace=true
+indent_access_spec_body=false
+indent_align_assign=false
+indent_align_string=false
+indent_bool_paren=false
+indent_brace_parent=false
+indent_braces=false
+indent_braces_no_class=false
+indent_braces_no_func=false
+indent_braces_no_struct=false
+indent_class=true
+indent_class_colon=false
+indent_cmt_with_tabs=false
+indent_col1_comment=false
+indent_columns=4
+indent_comma_paren=false
+indent_else_if=false
+indent_extern=false
+indent_first_bool_expr=false
+indent_func_call_param=false
+indent_func_class_param=false
+indent_func_ctor_var_param=false
+indent_func_def_param=false
+indent_func_param_double=false
+indent_func_proto_param=false
+indent_namespace=false
+indent_paren_nl=false
+indent_preserve_sql=false
+indent_relative_single_line_comments=false
+indent_square_nl=false
+indent_template_param=false
+indent_var_def_cont=false
+indent_with_tabs=0
+input_tab_size=2
+ls_for_split_full=true
+ls_func_split_full=true
+mod_add_long_ifdef_else_comment=10
+mod_add_long_ifdef_endif_comment=10
+mod_full_brace_do=add
+mod_full_brace_for=add
+mod_full_brace_if=ignore
+mod_full_brace_if_chain=false
+mod_full_brace_while=add
+mod_full_paren_if_bool=false
+mod_move_case_break=false
+mod_paren_on_return=remove
+mod_pawn_semicolon=false
+mod_remove_empty_return=true
+mod_remove_extra_semicolon=true
+mod_sort_import=false
+mod_sort_include=false
+mod_sort_using=false
+newlines=lf
+nl_after_access_spec=2
+#nl_after_brace_close=ignore
+#nl_after_brace_open=ignore
+nl_after_brace_open_cmt=true
+nl_after_case=false
+nl_after_class=2
+nl_after_for=add
+nl_after_func_body=2
+nl_after_func_body_one_liner=2
+nl_after_if=ignore
+nl_after_multiline_comment=true
+nl_after_return=false
+nl_after_semicolon=true
+nl_after_struct=2
+nl_after_switch=add
+nl_after_vbrace_close=false
+nl_after_vbrace_open=false
+nl_after_vbrace_open_empty=false
+nl_after_while=add
+nl_assign_brace=add
+nl_assign_leave_one_liners=true
+nl_before_access_spec=2
+nl_before_block_comment=2
+nl_before_case=false
+nl_brace_else=add
+nl_brace_while=add
+nl_case_colon_brace=add
+nl_class_brace=add
+nl_class_init_args=ignore
+nl_class_leave_one_liners=true
+nl_collapse_empty_body=false
+nl_create_for_one_liner=false
+nl_create_if_one_liner=false
+nl_create_while_one_liner=false
+nl_define_macro=false
+nl_do_brace=add
+nl_ds_struct_enum_close_brace=false
+nl_ds_struct_enum_cmt=false
+nl_else_brace=add
+nl_else_if=remove
+nl_elseif_brace=add
+nl_end_of_file=add
+nl_end_of_file_min=1
+nl_enum_brace=add
+nl_enum_leave_one_liners=true
+nl_fdef_brace=add
+nl_for_brace=add
+nl_func_decl_end=remove
+nl_func_decl_start=remove
+nl_func_def_paren=remove
+nl_func_def_start=remove
+nl_func_leave_one_liners=true
+nl_func_paren=remove
+nl_func_proto_type_name=remove
+nl_func_type_name=remove
+nl_func_type_name_class=remove
+nl_func_var_def_blk=2
+nl_getset_leave_one_liners=true
+nl_if_brace=add
+nl_if_leave_one_liners=true
+nl_max=2
+nl_multi_line_cond=false
+nl_multi_line_define=false
+nl_namespace_brace=remove
+nl_return_expr=remove
+nl_squeeze_ifdef=false
+nl_start_of_file=remove
+nl_struct_brace=add
+nl_switch_brace=add
+nl_template_class=add
+nl_while_brace=add
+pp_define_at_level=false
+pp_if_indent_code=false
+pp_indent=remove
+pp_indent_at_level=false
+pp_region_indent_code=false
+sp_addr=remove
+sp_after_angle=remove
+sp_after_cast=remove
+sp_after_class_colon=add
+sp_after_comma=add
+sp_after_dc=remove
+sp_after_new=add
+sp_after_operator=add
+sp_after_operator_sym=remove
+sp_after_type=ignore
+sp_angle_paren=remove
+sp_angle_word=add
+sp_arith=add
+sp_assign=add
+sp_assign_default=add
+sp_attribute_paren=remove
+sp_balance_nested_parens=false
+sp_before_angle=remove
+sp_before_case_colon=remove
+sp_before_class_colon=add
+sp_before_comma=remove
+sp_before_dc=remove
+sp_before_nl_cont=add
+sp_before_semi=remove
+sp_before_semi_for=remove
+sp_before_semi_for_empty=remove
+sp_before_sparen=add
+sp_before_square=remove
+sp_before_squares=ignore
+sp_before_tr_emb_cmt=add
+sp_bool=add
+sp_brace_else=add
+sp_cmt_cpp_start=ignore
+sp_compare=add
+sp_cond_colon=add
+sp_cond_question=add
+sp_cpp_cast_paren=remove
+sp_defined_paren=remove
+sp_deref=remove
+sp_else_brace=add
+sp_endif_cmt=add
+sp_enum_assign=add
+sp_fparen_brace=add
+sp_func_call_paren=remove
+sp_func_class_paren=remove
+sp_func_def_paren=remove
+sp_func_proto_paren=remove
+sp_incdec=remove
+sp_inside_angle=remove
+sp_inside_braces=add
+#sp_inside_braces_empty=remove
+sp_inside_fparen=remove
+sp_inside_fparens=remove
+sp_inside_paren=remove
+sp_inside_paren_cast=remove
+sp_inside_sparen=remove
+sp_inside_square=remove
+sp_inv=remove
+sp_member=remove
+sp_not=remove
+sp_paren_brace=add
+sp_paren_paren=remove
+sp_pp_concat=add
+sp_sign=remove
+sp_sizeof_paren=remove
+sp_special_semi=ignore
+sp_template_angle=remove
+tok_split_gte=false
+utf8_bom=remove
+utf8_byte=false
+utf8_force=false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/readme.rst	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+=================
+x265 HEVC Encoder
+=================
+
+| **Read:** | Online `documentation <http://x265.readthedocs.org/en/default/>`_ | Developer `wiki <http://bitbucket.org/multicoreware/x265/wiki/>`_
+| **Download:** | `releases <http://ftp.videolan.org/pub/videolan/x265/>`_ 
+| **Interact:** | #x265 on freenode.irc.net | `x265-devel@videolan.org <http://mailman.videolan.org/listinfo/x265-devel>`_ | `Report an issue <https://bitbucket.org/multicoreware/x265/issues?status=new&status=open>`_
+
+`x265 <https://www.videolan.org/developers/x265.html>`_ is an open
+source HEVC encoder. See the developer wiki for instructions for
+downloading and building the source.
+
+x265 is free to use under the `GNU GPL <http://www.gnu.org/licenses/gpl-2.0.html>`_ 
+and is also available under a commercial `license <http://x265.org>`_ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,586 @@
+# vim: syntax=cmake
+if(NOT CMAKE_BUILD_TYPE)
+    # default to Release build for GCC builds
+    set(CMAKE_BUILD_TYPE Release CACHE STRING
+        "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel."
+        FORCE)
+endif()
+message(STATUS "cmake version ${CMAKE_VERSION}")
+if(POLICY CMP0025)
+    cmake_policy(SET CMP0025 OLD) # report Apple's Clang as just Clang
+endif()
+if(POLICY CMP0042)
+    cmake_policy(SET CMP0042 NEW) # MACOSX_RPATH
+endif()
+if(POLICY CMP0054)
+    cmake_policy(SET CMP0054 OLD) # Only interpret if() arguments as variables or keywords when unquoted
+endif()
+
+project (x265)
+cmake_minimum_required (VERSION 2.8.8) # OBJECT libraries require 2.8.8
+include(CheckIncludeFiles)
+include(CheckFunctionExists)
+include(CheckSymbolExists)
+include(CheckCXXCompilerFlag)
+
+option(FPROFILE_GENERATE "Compile executable to generate usage data" OFF)
+option(FPROFILE_USE "Compile executable using generated usage data" OFF)
+option(NATIVE_BUILD "Target the build CPU" OFF)
+option(STATIC_LINK_CRT "Statically link C runtime for release builds" OFF)
+mark_as_advanced(FPROFILE_USE FPROFILE_GENERATE NATIVE_BUILD)
+
+# X265_BUILD must be incremented each time the public API is changed
+set(X265_BUILD 75)
+configure_file("${PROJECT_SOURCE_DIR}/x265.def.in"
+               "${PROJECT_BINARY_DIR}/x265.def")
+configure_file("${PROJECT_SOURCE_DIR}/x265_config.h.in"
+               "${PROJECT_BINARY_DIR}/x265_config.h")
+
+SET(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" "${CMAKE_MODULE_PATH}")
+
+# System architecture detection
+string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" SYSPROC)
+set(X86_ALIASES x86 i386 i686 x86_64 amd64)
+list(FIND X86_ALIASES "${SYSPROC}" X86MATCH)
+set(POWER_ALIASES ppc64 ppc64le)
+list(FIND POWER_ALIASES "${SYSPROC}" POWERMATCH)
+if("${SYSPROC}" STREQUAL "" OR X86MATCH GREATER "-1")
+    message(STATUS "Detected x86 target processor")
+    set(X86 1)
+    add_definitions(-DX265_ARCH_X86=1)
+    if("${CMAKE_SIZEOF_VOID_P}" MATCHES 8)
+        set(X64 1)
+        add_definitions(-DX86_64=1)
+    endif()
+elseif(POWERMATCH GREATER "-1")
+    message(STATUS "Detected POWER target processor")
+    set(POWER 1)
+    add_definitions(-DX265_ARCH_POWER=1)
+elseif(${SYSPROC} STREQUAL "armv6l")
+    message(STATUS "Detected ARM target processor")
+    set(ARM 1)
+    add_definitions(-DX265_ARCH_ARM=1 -DHAVE_ARMV6=1)
+else()
+    message(STATUS "CMAKE_SYSTEM_PROCESSOR value `${CMAKE_SYSTEM_PROCESSOR}` is unknown")
+    message(STATUS "Please add this value near ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE}")
+endif()
+
+if(UNIX)
+    list(APPEND PLATFORM_LIBS pthread)
+    find_library(LIBRT rt)
+    if(LIBRT)
+        list(APPEND PLATFORM_LIBS rt)
+    endif()
+    mark_as_advanced(LIBRT)
+    find_library(LIBDL dl)
+    if(LIBDL)
+        list(APPEND PLATFORM_LIBS dl)
+    endif()
+    option(ENABLE_LIBNUMA "Enable libnuma usage (Linux only)" ON)
+    if(ENABLE_LIBNUMA)
+        find_package(Numa)
+        if(NUMA_FOUND)
+            link_directories(${NUMA_LIBRARY_DIR})
+            list(APPEND CMAKE_REQUIRED_LIBRARIES numa)
+            check_symbol_exists(numa_node_of_cpu numa.h NUMA_V2)
+            if(NUMA_V2)
+                add_definitions(-DHAVE_LIBNUMA)
+                message(STATUS "libnuma found, building with support for NUMA nodes")
+                list(APPEND PLATFORM_LIBS numa)
+                include_directories(${NUMA_INCLUDE_DIR})
+            endif()
+        endif()
+        mark_as_advanced(NUMA_FOUND)
+    endif(ENABLE_LIBNUMA)
+    option(NO_ATOMICS "Use a slow mutex to replace atomics" OFF)
+    if(NO_ATOMICS)
+        add_definitions(-DNO_ATOMICS=1)
+    endif(NO_ATOMICS)
+endif(UNIX)
+
+if(X64 AND NOT WIN32)
+    option(ENABLE_PIC "Enable Position Independent Code" ON)
+else()
+    option(ENABLE_PIC "Enable Position Independent Code" OFF)
+endif(X64 AND NOT WIN32)
+
+# Compiler detection
+if(CMAKE_GENERATOR STREQUAL "Xcode")
+  set(XCODE 1)
+endif()
+if(APPLE)
+  add_definitions(-DMACOS)
+endif()
+
+if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
+    set(CLANG 1)
+endif()
+if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel")
+    set(INTEL_CXX 1)
+endif()
+if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
+    set(GCC 1)
+endif()
+
+if(INTEL_CXX AND WIN32)
+    # treat icl roughly like MSVC
+    set(MSVC 1)
+endif()
+if(MSVC)
+    if(STATIC_LINK_CRT)
+        set(CompilerFlags CMAKE_CXX_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE)
+        foreach(CompilerFlag ${CompilerFlags})
+            string(REPLACE "/MD" "/MT" ${CompilerFlag} "${${CompilerFlag}}")
+        endforeach()
+    endif(STATIC_LINK_CRT)
+    add_definitions(/W4)  # Full warnings
+    add_definitions(/Ob2) # always inline
+    add_definitions(/MP)  # multithreaded build
+
+    # disable Microsofts suggestions for proprietary secure APIs
+    add_definitions(/D_CRT_SECURE_NO_WARNINGS)
+
+    check_include_files(stdint.h HAVE_STDINT_H)
+    if(NOT HAVE_STDINT_H)
+        include_directories(compat/msvc)
+    endif()
+endif(MSVC)
+
+check_include_files(inttypes.h HAVE_INT_TYPES_H)
+if(HAVE_INT_TYPES_H)
+    add_definitions(-DHAVE_INT_TYPES_H=1)
+endif()
+
+if(INTEL_CXX AND UNIX)
+    set(GCC 1) # treat icpc roughly like gcc
+elseif(CLANG)
+    set(GCC 1) # treat clang roughly like gcc
+elseif(CMAKE_COMPILER_IS_GNUCXX)
+    set(GCC 1)
+endif()
+if(GCC)
+    add_definitions(-Wall -Wextra -Wshadow)
+    add_definitions(-D__STDC_LIMIT_MACROS=1)
+    if(ENABLE_PIC)
+         add_definitions(-fPIC)
+    endif(ENABLE_PIC)
+    if(NATIVE_BUILD)
+        if(INTEL_CXX)
+            add_definitions(-xhost)
+        else()
+            add_definitions(-march=native)
+        endif()
+    elseif(X86 AND NOT X64)
+        add_definitions(-march=i686)
+    endif()
+    if(ARM)
+        add_definitions(-march=armv6 -mfloat-abi=hard -mfpu=vfp)
+    endif()
+    if(FPROFILE_GENERATE)
+        if(INTEL_CXX)
+            add_definitions(-prof-gen -prof-dir="${CMAKE_CURRENT_BINARY_DIR}")
+            list(APPEND LINKER_OPTIONS "-prof-gen")
+        else()
+            check_cxx_compiler_flag(-fprofile-generate CC_HAS_PROFILE_GENERATE)
+            if(CC_HAS_PROFILE_GENERATE)
+                add_definitions(-fprofile-generate)
+                list(APPEND LINKER_OPTIONS "-fprofile-generate")
+            endif(CC_HAS_PROFILE_GENERATE)
+        endif(INTEL_CXX)
+    endif(FPROFILE_GENERATE)
+    if(FPROFILE_USE)
+        if(INTEL_CXX)
+            add_definitions(-prof-use -prof-dir="${CMAKE_CURRENT_BINARY_DIR}")
+            list(APPEND LINKER_OPTIONS "-prof-use")
+        else()
+            check_cxx_compiler_flag(-fprofile-use CC_HAS_PROFILE_USE)
+            check_cxx_compiler_flag(-fprofile-correction CC_HAS_PROFILE_CORRECTION)
+            check_cxx_compiler_flag(-Wno-error=coverage-mismatch CC_HAS_COVMISMATCH)
+            if(CC_HAS_PROFILE_USE)
+                add_definitions(-fprofile-use)
+                list(APPEND LINKER_OPTIONS "-fprofile-use")
+            endif(CC_HAS_PROFILE_USE)
+            if(CC_HAS_PROFILE_CORRECTION)
+                # auto-correct corrupted counters (happens a lot with x265)
+                add_definitions(-fprofile-correction)
+            endif(CC_HAS_PROFILE_CORRECTION)
+            if(CC_HAS_COVMISMATCH)
+                # ignore coverage mismatches (also happens a lot)
+                add_definitions(-Wno-error=coverage-mismatch)
+            endif(CC_HAS_COVMISMATCH)
+        endif(INTEL_CXX)
+    endif(FPROFILE_USE)
+    if(STATIC_LINK_CRT)
+        add_definitions(-static)
+        list(APPEND LINKER_OPTIONS "-static")
+    endif(STATIC_LINK_CRT)
+    check_cxx_compiler_flag(-Wno-strict-overflow CC_HAS_NO_STRICT_OVERFLOW)
+    check_cxx_compiler_flag(-Wno-narrowing CC_HAS_NO_NARROWING) 
+    check_cxx_compiler_flag(-Wno-array-bounds CC_HAS_NO_ARRAY_BOUNDS) 
+    if (CC_HAS_NO_ARRAY_BOUNDS)
+        add_definitions(-Wno-array-bounds) # these are unhelpful
+    endif()
+    check_cxx_compiler_flag(-ffast-math CC_HAS_FAST_MATH) 
+    if (CC_HAS_FAST_MATH)
+        add_definitions(-ffast-math)
+    endif()
+    check_cxx_compiler_flag(-mstackrealign CC_HAS_STACK_REALIGN) 
+    if (CC_HAS_STACK_REALIGN)
+        add_definitions(-mstackrealign)
+    endif()
+    # Disable exceptions. Reduce executable size, increase compability.
+    check_cxx_compiler_flag(-fno-exceptions CC_HAS_FNO_EXCEPTIONS_FLAG)
+    if(CC_HAS_FNO_EXCEPTIONS_FLAG)
+        add_definitions(-fno-exceptions)
+    endif()
+    set(FSANITIZE "" CACHE STRING "-fsanitize options for GCC/clang")
+    if(FSANITIZE)
+        add_definitions(-fsanitize=${FSANITIZE})
+        # clang and gcc need the sanitize options to be passed at link
+        # time so the appropriate ASAN/TSAN runtime libraries can be
+        # linked.
+        list(APPEND LINKER_OPTIONS "-fsanitize=${FSANITIZE}")
+    endif()
+    option(ENABLE_AGGRESSIVE_CHECKS "Enable stack protection and -ftrapv" OFF)
+    if(ENABLE_AGGRESSIVE_CHECKS)
+        # use with care, -ftrapv can cause testbench SIGILL exceptions
+        # since it is testing corner cases of signed integer math
+        add_definitions(-DUSING_FTRAPV=1)
+        check_cxx_compiler_flag(-fsanitize=undefined-trap CC_HAS_CATCH_UNDEFINED) # clang
+        check_cxx_compiler_flag(-ftrapv CC_HAS_FTRAPV)                            # gcc
+        check_cxx_compiler_flag(-fstack-protector-all CC_HAS_STACK_PROTECT)       # gcc
+        if(CC_HAS_FTRAPV)
+            add_definitions(-ftrapv)
+        endif()
+        if(CC_HAS_CATCH_UNDEFINED)
+            add_definitions(-fsanitize=undefined-trap -fsanitize-undefined-trap-on-error)
+        endif()
+        if(CC_HAS_STACK_PROTECT)
+            add_definitions(-fstack-protector-all)
+            if(MINGW)
+                list(APPEND PLATFORM_LIBS ssp)
+            endif()
+        endif()
+    endif(ENABLE_AGGRESSIVE_CHECKS)
+    execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE CC_VERSION)
+endif(GCC)
+
+find_package(Yasm)
+if(YASM_FOUND AND X86)
+    if (YASM_VERSION_STRING VERSION_LESS "1.2.0")
+        message(STATUS "Yasm version ${YASM_VERSION_STRING} is too old. 1.2.0 or later required")
+        option(ENABLE_ASSEMBLY "Enable use of assembly coded primitives" OFF)
+    else()
+        message(STATUS "Found Yasm ${YASM_VERSION_STRING} to build assembly primitives")
+        option(ENABLE_ASSEMBLY "Enable use of assembly coded primitives" ON)
+    endif()
+else()
+    option(ENABLE_ASSEMBLY "Enable use of assembly coded primitives" OFF)
+endif()
+
+# no need to have slow encoding on x86
+if(X86 AND NOT ENABLE_ASSEMBLY)
+    message(FATAL_ERROR "Yasm 1.2.0 or later must be installed")
+endif()
+
+option(CHECKED_BUILD "Enable run-time sanity checks (debugging)" OFF)
+if(CHECKED_BUILD)
+    add_definitions(-DCHECKED_BUILD=1)
+endif()
+
+# Build options
+set(LIB_INSTALL_DIR lib CACHE STRING "Install location of libraries")
+set(BIN_INSTALL_DIR bin CACHE STRING "Install location of executables")
+set(EXTRA_LIB "" CACHE STRING "Extra libraries to link against")
+set(EXTRA_LINK_FLAGS "" CACHE STRING "Extra link flags")
+if(EXTRA_LINK_FLAGS)
+    list(APPEND LINKER_OPTIONS ${EXTRA_LINK_FLAGS})
+endif()
+
+option(LINKED_8BIT  "8bit libx265 is being linked with this library" OFF)
+option(LINKED_10BIT "10bit libx265 is being linked with this library" OFF)
+option(LINKED_12BIT "12bit libx265 is being linked with this library" OFF)
+
+mark_as_advanced(EXTRA_LIB EXTRA_LINK_FLAGS)
+
+if(X64)
+    # NOTE: We only officially support high-bit-depth compiles of x265
+    # on 64bit architectures. Main10 plus large resolution plus slow
+    # preset plus 32bit address space usually means malloc failure.  You
+    # can disable this if(X64) check if you desparately need a 32bit
+    # build with 10bit/12bit support, but this violates the "shrink wrap
+    # license" so to speak.  If it breaks you get to keep both halves.
+    # You will need to disable assembly manually.
+    option(HIGH_BIT_DEPTH "Store pixel samples as 16bit values (Main10/Main12)" OFF)
+endif(X64)
+if(HIGH_BIT_DEPTH)
+    option(MAIN12 "Support Main12 instead of Main10" OFF)
+    if(MAIN12)
+        add_definitions(-DHIGH_BIT_DEPTH=1 -DX265_DEPTH=12)
+    else()
+        add_definitions(-DHIGH_BIT_DEPTH=1 -DX265_DEPTH=10)
+    endif()
+else(HIGH_BIT_DEPTH)
+    add_definitions(-DHIGH_BIT_DEPTH=0 -DX265_DEPTH=8)
+endif(HIGH_BIT_DEPTH)
+
+# this option can only be used when linking multiple libx265 libraries
+# together, and some alternate API access method is implemented.
+option(EXPORT_C_API "Implement public C programming interface" ON)
+mark_as_advanced(EXPORT_C_API)
+if(EXPORT_C_API)
+    set(X265_NS x265)
+    add_definitions(-DEXPORT_C_API=1)
+elseif(HIGH_BIT_DEPTH)
+    if(MAIN12)
+        set(X265_NS x265_12bit)
+    else()
+        set(X265_NS x265_10bit)
+    endif()
+    add_definitions(-DEXPORT_C_API=0)
+else()
+    set(X265_NS x265_8bit)
+    add_definitions(-DEXPORT_C_API=0)
+endif()
+add_definitions(-DX265_NS=${X265_NS})
+
+option(WARNINGS_AS_ERRORS "Stop compiles on first warning" OFF)
+if(WARNINGS_AS_ERRORS)
+    if(GCC)
+        add_definitions(-Werror)
+    elseif(MSVC)
+        add_definitions(/WX)
+    endif()
+endif(WARNINGS_AS_ERRORS)
+
+if(WIN32)
+    # Visual leak detector
+    find_package(VLD QUIET)
+    if(VLD_FOUND)
+        add_definitions(-DHAVE_VLD)
+        include_directories(${VLD_INCLUDE_DIRS})
+        list(APPEND PLATFORM_LIBS ${VLD_LIBRARIES})
+        link_directories(${VLD_LIBRARY_DIRS})
+    endif()
+    option(WINXP_SUPPORT "Make binaries compatible with Windows XP and Vista" OFF)
+    if(WINXP_SUPPORT)
+        # force use of workarounds for CONDITION_VARIABLE and atomic
+        # intrinsics introduced after XP
+        add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_WINXP -D_WIN32_WINNT_WIN7=0x0601)
+    else(WINXP_SUPPORT)
+        # default to targeting Windows 7 for the NUMA APIs
+        add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_WIN7)
+    endif(WINXP_SUPPORT)
+endif()
+
+include(version) # determine X265_VERSION and X265_LATEST_TAG
+include_directories(. common encoder "${PROJECT_BINARY_DIR}")
+
+option(ENABLE_PPA "Enable PPA profiling instrumentation" OFF)
+if(ENABLE_PPA)
+    add_definitions(-DENABLE_PPA)
+    list(APPEND PLATFORM_LIBS PPA)
+    if(UNIX)
+        list(APPEND PLATFORM_LIBS dl)
+    endif(UNIX)
+    add_subdirectory(profile/PPA)
+endif(ENABLE_PPA)
+
+option(ENABLE_VTUNE "Enable Vtune profiling instrumentation" OFF)
+if(ENABLE_VTUNE)
+    add_definitions(-DENABLE_VTUNE)
+    include_directories($ENV{VTUNE_AMPLIFIER_XE_2015_DIR}/include)
+    list(APPEND PLATFORM_LIBS vtune)
+    link_directories($ENV{VTUNE_AMPLIFIER_XE_2015_DIR}/lib64)
+    if(WIN32)
+        list(APPEND PLATFORM_LIBS libittnotify.lib)
+    else()
+        list(APPEND PLATFORM_LIBS libittnotify.a dl)
+    endif()
+    add_subdirectory(profile/vtune)
+endif(ENABLE_VTUNE)
+
+option(DETAILED_CU_STATS "Enable internal profiling of encoder work" OFF)
+if(DETAILED_CU_STATS)
+    add_definitions(-DDETAILED_CU_STATS)
+endif(DETAILED_CU_STATS)
+
+add_subdirectory(encoder)
+add_subdirectory(common)
+
+if((MSVC_IDE OR XCODE) AND ENABLE_ASSEMBLY)
+    # this is required because of this cmake bug
+    # http://www.cmake.org/Bug/print_bug_page.php?bug_id=8170
+    if(WIN32)
+        set(SUFFIX obj)
+    else()
+        set(SUFFIX o)
+    endif()
+    foreach(ASM ${MSVC_ASMS})
+        set(YASM_SRC ${CMAKE_CURRENT_SOURCE_DIR}/common/x86/${ASM})
+        list(APPEND YASM_SRCS ${YASM_SRC})
+        list(APPEND YASM_OBJS ${ASM}.${SUFFIX})
+        add_custom_command(
+            OUTPUT ${ASM}.${SUFFIX}
+            COMMAND ${YASM_EXECUTABLE} ARGS ${YASM_FLAGS} ${YASM_SRC} -o ${ASM}.${SUFFIX}
+            DEPENDS ${YASM_SRC})
+    endforeach()
+endif()
+
+source_group(ASM FILES ${YASM_SRCS})
+add_library(x265-static STATIC $<TARGET_OBJECTS:encoder> $<TARGET_OBJECTS:common> ${YASM_OBJS} ${YASM_SRCS})
+if(NOT MSVC)
+    set_target_properties(x265-static PROPERTIES OUTPUT_NAME x265)
+endif()
+if(EXTRA_LIB)
+    target_link_libraries(x265-static ${EXTRA_LIB})
+endif()
+install(TARGETS x265-static
+    LIBRARY DESTINATION ${LIB_INSTALL_DIR}
+    ARCHIVE DESTINATION ${LIB_INSTALL_DIR})
+install(FILES x265.h "${PROJECT_BINARY_DIR}/x265_config.h" DESTINATION include)
+
+if(CMAKE_RC_COMPILER)
+    # The resource compiler does not need CFLAGS or macro defines. It
+    # often breaks them
+    string(REPLACE "<FLAGS>" "" CMAKE_RC_COMPILE_OBJECT "${CMAKE_RC_COMPILE_OBJECT}")
+    string(REPLACE "<DEFINES>" "" CMAKE_RC_COMPILE_OBJECT "${CMAKE_RC_COMPILE_OBJECT}")
+
+    # convert X265_LATEST_TAG (ex: 0.7) and X265_TAG_DISTANCE (ex: 103) to
+    # @X265_VERSION_MAJOR@,@X265_VERSION_MINOR@,@X265_BRANCH_ID@,@X265_TAG_DISTANCE@
+    string(REPLACE "." ";" VERSION_LIST "${X265_LATEST_TAG}")
+    list(GET VERSION_LIST 0 X265_VERSION_MAJOR)
+    list(GET VERSION_LIST 1 X265_VERSION_MINOR)
+    set(X265_BRANCH_ID 0) # TODO: 0 - stable, 1 - default or other
+    set(X265_RC_FILE "${CMAKE_CURRENT_BINARY_DIR}/x265.rc")
+    configure_file("${CMAKE_CURRENT_SOURCE_DIR}/x265.rc.in" "${X265_RC_FILE}" @ONLY)
+endif()
+
+if(NOT (MSVC_IDE OR XCODE))
+    add_custom_target(clean-generated COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/clean-generated.cmake)
+endif()
+
+option(ENABLE_SHARED "Build shared library" ON)
+if(ENABLE_SHARED)
+    add_library(x265-shared SHARED "${PROJECT_BINARY_DIR}/x265.def" ${YASM_OBJS}
+                ${X265_RC_FILE} $<TARGET_OBJECTS:encoder> $<TARGET_OBJECTS:common>)
+    target_link_libraries(x265-shared ${PLATFORM_LIBS})
+    if(MSVC)
+        set_target_properties(x265-shared PROPERTIES OUTPUT_NAME libx265)
+    else()
+        set_target_properties(x265-shared PROPERTIES OUTPUT_NAME x265)
+    endif()
+    if(UNIX)
+        set_target_properties(x265-shared PROPERTIES VERSION ${X265_BUILD})
+        if(APPLE)
+            set_target_properties(x265-shared PROPERTIES MACOSX_RPATH 1)
+        else()
+            list(APPEND LINKER_OPTIONS "-Wl,-Bsymbolic,-znoexecstack")
+        endif()
+    endif()
+    set_target_properties(x265-shared PROPERTIES SOVERSION ${X265_BUILD})
+    if(X265_LATEST_TAG)
+        if(WINDOWS)
+            set_target_properties(x265-shared PROPERTIES VERSION ${X265_LATEST_TAG})
+        endif()
+        # shared library is not installed if a tag is not found
+        install(TARGETS x265-shared
+                LIBRARY DESTINATION ${LIB_INSTALL_DIR}
+                ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
+                RUNTIME DESTINATION ${BIN_INSTALL_DIR})
+    endif()
+    if(EXTRA_LIB)
+        target_link_libraries(x265-shared ${EXTRA_LIB})
+    endif()
+    if(LINKER_OPTIONS)
+        # set_target_properties can't do list expansion
+        string(REPLACE ";" " " LINKER_OPTION_STR "${LINKER_OPTIONS}")
+        set_target_properties(x265-shared PROPERTIES LINK_FLAGS "${LINKER_OPTION_STR}")
+    endif()
+endif()
+
+if(X265_LATEST_TAG)
+    # convert lists of link libraries into -lstdc++ -lm etc..
+    foreach(LIB ${CMAKE_CXX_IMPLICIT_LINK_LIBRARIES} ${PLATFORM_LIBS})
+        if(IS_ABSOLUTE ${LIB} AND EXISTS ${LIB})
+            list(APPEND PLIBLIST "${LIB}")
+        else()
+            list(APPEND PLIBLIST "-l${LIB}")
+        endif()
+    endforeach()
+    if(PLIBLIST)
+        # blacklist of libraries that should not be in Libs.private
+        list(REMOVE_ITEM PLIBLIST "-lc" "-lpthread")
+        string(REPLACE ";" " " PRIVATE_LIBS "${PLIBLIST}")
+    else()
+        set(PRIVATE_LIBS "")
+    endif(PLIBLIST)
+
+    # Produce a pkg-config file
+    configure_file("x265.pc.in" "x265.pc" @ONLY)
+    install(FILES       "${CMAKE_CURRENT_BINARY_DIR}/x265.pc"
+            DESTINATION "${LIB_INSTALL_DIR}/pkgconfig")
+endif()
+
+if(NOT WIN32)
+    configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
+                   "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake"
+                   IMMEDIATE @ONLY)
+    add_custom_target(uninstall
+                      "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake")
+endif()
+
+# Main CLI application
+set(ENABLE_CLI ON CACHE BOOL "Build standalone CLI application")
+if(ENABLE_CLI)
+    file(GLOB InputFiles input/input.cpp input/yuv.cpp input/y4m.cpp input/*.h)
+    file(GLOB OutputFiles output/output.cpp output/reconplay.cpp output/*.h
+                          output/yuv.cpp output/y4m.cpp # recon
+                          output/raw.cpp)               # muxers
+    source_group(input FILES ${InputFiles})
+    source_group(output FILES ${OutputFiles})
+
+    check_include_files(getopt.h HAVE_GETOPT_H)
+    if(NOT HAVE_GETOPT_H)
+        if(MSVC)
+            set_source_files_properties(compat/getopt/getopt.c PROPERTIES COMPILE_FLAGS "/wd4100 /wd4131 -DHAVE_STRING_H=1")
+        endif(MSVC)
+        include_directories(compat/getopt)
+        set(GETOPT compat/getopt/getopt.c compat/getopt/getopt.h)
+    endif(NOT HAVE_GETOPT_H)
+    if(WIN32)
+        set(ExportDefs "${PROJECT_BINARY_DIR}/x265.def")
+    endif(WIN32)
+
+    if(XCODE)
+        # Xcode seems unable to link the CLI with libs, so link as one targget
+        add_executable(cli ../COPYING ${InputFiles} ${OutputFiles} ${GETOPT}
+                       x265.cpp x265.h x265cli.h x265-extras.h x265-extras.cpp
+                       $<TARGET_OBJECTS:encoder> $<TARGET_OBJECTS:common> ${YASM_OBJS} ${YASM_SRCS})
+    else()
+        add_executable(cli ../COPYING ${InputFiles} ${OutputFiles} ${GETOPT} ${X265_RC_FILE}
+                       ${ExportDefs} x265.cpp x265.h x265cli.h x265-extras.h x265-extras.cpp)
+        if(WIN32 OR NOT ENABLE_SHARED OR INTEL_CXX)
+            # The CLI cannot link to the shared library on Windows, it
+            # requires internal APIs not exported from the DLL
+            target_link_libraries(cli x265-static ${PLATFORM_LIBS})
+        else()
+            target_link_libraries(cli x265-shared ${PLATFORM_LIBS})
+        endif()
+    endif()
+    set_target_properties(cli PROPERTIES OUTPUT_NAME x265)
+    if(LINKER_OPTIONS)
+        # set_target_properties can't do list expansion
+        string(REPLACE ";" " " LINKER_OPTION_STR "${LINKER_OPTIONS}")
+        set_target_properties(cli PROPERTIES LINK_FLAGS "${LINKER_OPTION_STR}")
+    endif()
+
+    install(TARGETS cli DESTINATION ${BIN_INSTALL_DIR})
+endif(ENABLE_CLI)
+
+if(ENABLE_ASSEMBLY AND NOT XCODE)
+    option(ENABLE_TESTS "Enable Unit Tests" OFF)
+    if(ENABLE_TESTS)
+        add_subdirectory(test)
+    endif()
+endif()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/CMakeASM_YASMInformation.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,68 @@
+set(ASM_DIALECT "_YASM")
+set(CMAKE_ASM${ASM_DIALECT}_SOURCE_FILE_EXTENSIONS asm)
+
+if(X64)
+    list(APPEND ASM_FLAGS -DARCH_X86_64=1)
+    if(ENABLE_PIC)
+        list(APPEND ASM_FLAGS -DPIC)
+    endif()
+    if(APPLE)
+        set(ARGS -f macho64 -m amd64 -DPREFIX)
+    elseif(UNIX AND NOT CYGWIN)
+        set(ARGS -f elf64 -m amd64)
+    else()
+        set(ARGS -f win64 -m amd64)
+    endif()
+else()
+    list(APPEND ASM_FLAGS -DARCH_X86_64=0)
+    if(APPLE)
+        set(ARGS -f macho -DPREFIX)
+    elseif(UNIX AND NOT CYGWIN)
+        set(ARGS -f elf32)
+    else()
+        set(ARGS -f win32 -DPREFIX)
+    endif()
+endif()
+
+if(GCC)
+    list(APPEND ASM_FLAGS -DHAVE_ALIGNED_STACK=1)
+else()
+    list(APPEND ASM_FLAGS -DHAVE_ALIGNED_STACK=0)
+endif()
+
+if(HIGH_BIT_DEPTH)
+    if(MAIN12)
+        list(APPEND ASM_FLAGS -DHIGH_BIT_DEPTH=1 -DBIT_DEPTH=12 -DX265_NS=${X265_NS})
+    else()
+        list(APPEND ASM_FLAGS -DHIGH_BIT_DEPTH=1 -DBIT_DEPTH=10 -DX265_NS=${X265_NS})
+    endif()
+else()
+    list(APPEND ASM_FLAGS -DHIGH_BIT_DEPTH=0 -DBIT_DEPTH=8 -DX265_NS=${X265_NS})
+endif()
+
+list(APPEND ASM_FLAGS "${CMAKE_ASM_YASM_FLAGS}")
+
+if(CMAKE_BUILD_TYPE MATCHES Release)
+    list(APPEND ASM_FLAGS "${CMAKE_ASM_YASM_FLAGS_RELEASE}")
+elseif(CMAKE_BUILD_TYPE MATCHES Debug)
+    list(APPEND ASM_FLAGS "${CMAKE_ASM_YASM_FLAGS_DEBUG}")
+elseif(CMAKE_BUILD_TYPE MATCHES MinSizeRel)
+    list(APPEND ASM_FLAGS "${CMAKE_ASM_YASM_FLAGS_MINSIZEREL}")
+elseif(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo)
+    list(APPEND ASM_FLAGS "${CMAKE_ASM_YASM_FLAGS_RELWITHDEBINFO}")
+endif()
+
+set(YASM_FLAGS ${ARGS} ${ASM_FLAGS} PARENT_SCOPE)
+string(REPLACE ";" " " CMAKE_ASM_YASM_COMPILER_ARG1 "${ARGS}")
+
+# This section exists to override the one in CMakeASMInformation.cmake
+# (the default Information file). This removes the <FLAGS>
+# thing so that your C compiler flags that have been set via
+# set_target_properties don't get passed to yasm and confuse it.
+if(NOT CMAKE_ASM${ASM_DIALECT}_COMPILE_OBJECT)
+    string(REPLACE ";" " " STR_ASM_FLAGS "${ASM_FLAGS}")
+    set(CMAKE_ASM${ASM_DIALECT}_COMPILE_OBJECT "<CMAKE_ASM${ASM_DIALECT}_COMPILER> ${STR_ASM_FLAGS} -o <OBJECT> <SOURCE>")
+endif()
+
+include(CMakeASMInformation)
+set(ASM_DIALECT)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/CMakeDetermineASM_YASMCompiler.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,5 @@
+set(ASM_DIALECT "_YASM")
+set(CMAKE_ASM${ASM_DIALECT}_COMPILER ${YASM_EXECUTABLE})
+set(CMAKE_ASM${ASM_DIALECT}_COMPILER_INIT ${_CMAKE_TOOLCHAIN_PREFIX}yasm)
+include(CMakeDetermineASMCompiler)
+set(ASM_DIALECT)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/CMakeTestASM_YASMCompiler.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3 @@
+set(ASM_DIALECT "_YASM")
+include(CMakeTestASMCompiler)
+set(ASM_DIALECT)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/FindNuma.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,43 @@
+# Module for locating libnuma
+#
+# Read-only variables:
+#   NUMA_FOUND
+#     Indicates that the library has been found.
+#
+#   NUMA_INCLUDE_DIR
+#     Points to the libnuma include directory.
+#
+#   NUMA_LIBRARY_DIR
+#     Points to the directory that contains the libraries.
+#     The content of this variable can be passed to link_directories.
+#
+#   NUMA_LIBRARY
+#     Points to the libnuma that can be passed to target_link_libararies.
+#
+# Copyright (c) 2015 Steve Borho
+
+include(FindPackageHandleStandardArgs)
+
+find_path(NUMA_ROOT_DIR
+  NAMES include/numa.h
+  PATHS ENV NUMA_ROOT
+  DOC "NUMA root directory")
+
+find_path(NUMA_INCLUDE_DIR
+  NAMES numa.h
+  HINTS ${NUMA_ROOT_DIR}
+  PATH_SUFFIXES include
+  DOC "NUMA include directory")
+
+find_library(NUMA_LIBRARY
+  NAMES numa
+  HINTS ${NUMA_ROOT_DIR}
+  DOC "NUMA library")
+
+if (NUMA_LIBRARY)
+    get_filename_component(NUMA_LIBRARY_DIR ${NUMA_LIBRARY} PATH)
+endif()
+
+mark_as_advanced(NUMA_INCLUDE_DIR NUMA_LIBRARY_DIR NUMA_LIBRARY)
+
+find_package_handle_standard_args(NUMA REQUIRED_VARS NUMA_ROOT_DIR NUMA_INCLUDE_DIR NUMA_LIBRARY)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/FindVLD.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,126 @@
+# Module for locating Visual Leak Detector.
+#
+# Customizable variables:
+#   VLD_ROOT_DIR
+#     This variable points to the Visual Leak Detector root directory. By
+#     default, the module looks for the installation directory by examining the
+#     Program Files/Program Files (x86) folders and the VLDROOT environment
+#     variable.
+#
+# Read-only variables:
+#   VLD_FOUND
+#     Indicates that the library has been found.
+#
+#   VLD_INCLUDE_DIRS
+#     Points to the Visual Leak Detector include directory.
+#
+#   VLD_LIBRARY_DIRS
+#     Points to the Visual Leak Detector directory that contains the libraries.
+#     The content of this variable can be passed to link_directories.
+#
+#   VLD_LIBRARIES
+#     Points to the Visual Leak Detector libraries that can be passed to
+#     target_link_libararies.
+#
+#
+# Copyright (c) 2012 Sergiu Dotenco
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+INCLUDE (FindPackageHandleStandardArgs)
+
+SET (_VLD_POSSIBLE_LIB_SUFFIXES lib)
+
+# Version 2.0 uses vld_x86 and vld_x64 instead of simply vld as library names
+IF (CMAKE_SIZEOF_VOID_P EQUAL 4)
+  LIST (APPEND _VLD_POSSIBLE_LIB_SUFFIXES lib/Win32)
+ELSEIF (CMAKE_SIZEOF_VOID_P EQUAL 8)
+  LIST (APPEND _VLD_POSSIBLE_LIB_SUFFIXES lib/Win64)
+ENDIF (CMAKE_SIZEOF_VOID_P EQUAL 4)
+
+SET (PFILES "ProgramFiles")
+SET (PFILES_X86 "ProgramFiles(x86)") # hack to avoid escaping issues in cmake 3.1
+
+FIND_PATH (VLD_ROOT_DIR
+  NAMES include/vld.h
+  PATHS ENV VLDROOT
+        "$ENV{PFILES}/Visual Leak Detector"
+        "$ENV{PFILES_X86}/Visual Leak Detector"
+        "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Visual Leak Detector;InstallLocation]"
+        "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Visual Leak Detector;InstallLocation]"
+  DOC "VLD root directory")
+
+FIND_PATH (VLD_INCLUDE_DIR
+  NAMES vld.h
+  HINTS ${VLD_ROOT_DIR}
+  PATH_SUFFIXES include
+  DOC "VLD include directory")
+
+FIND_LIBRARY (VLD_LIBRARY_DEBUG
+  NAMES vld
+  HINTS ${VLD_ROOT_DIR}
+  PATH_SUFFIXES ${_VLD_POSSIBLE_LIB_SUFFIXES}
+  DOC "VLD debug library")
+
+IF (VLD_ROOT_DIR)
+  SET (_VLD_VERSION_FILE ${VLD_ROOT_DIR}/CHANGES.txt)
+
+  IF (EXISTS ${_VLD_VERSION_FILE})
+    SET (_VLD_VERSION_REGEX
+      "Visual Leak Detector \\(VLD\\) Version (([0-9]+)\\.([0-9]+)([a-z]|(.([0-9]+)))?)")
+    FILE (STRINGS ${_VLD_VERSION_FILE} _VLD_VERSION_TMP REGEX
+      ${_VLD_VERSION_REGEX})
+
+    STRING (REGEX REPLACE ${_VLD_VERSION_REGEX} "\\1" _VLD_VERSION_TMP
+      "${_VLD_VERSION_TMP}")
+
+    STRING (REGEX REPLACE "([0-9]+).([0-9]+).*" "\\1" VLD_VERSION_MAJOR
+      "${_VLD_VERSION_TMP}")
+    STRING (REGEX REPLACE "([0-9]+).([0-9]+).*" "\\2" VLD_VERSION_MINOR
+      "${_VLD_VERSION_TMP}")
+
+    SET (VLD_VERSION ${VLD_VERSION_MAJOR}.${VLD_VERSION_MINOR})
+
+    IF ("${_VLD_VERSION_TMP}" MATCHES "^([0-9]+).([0-9]+).([0-9]+)$")
+      # major.minor.patch version numbering scheme
+      STRING (REGEX REPLACE "([0-9]+).([0-9]+).([0-9]+)" "\\3"
+        VLD_VERSION_PATCH "${_VLD_VERSION_TMP}")
+      SET (VLD_VERSION "${VLD_VERSION}.${VLD_VERSION_PATCH}")
+      SET (VLD_VERSION_COUNT 3)
+    ELSE ("${_VLD_VERSION_TMP}" MATCHES "^([0-9]+).([0-9]+).([0-9]+)$")
+      # major.minor version numbering scheme. The trailing letter is ignored.
+      SET (VLD_VERSION_COUNT 2)
+    ENDIF ("${_VLD_VERSION_TMP}" MATCHES "^([0-9]+).([0-9]+).([0-9]+)$")
+  ENDIF (EXISTS ${_VLD_VERSION_FILE})
+ENDIF (VLD_ROOT_DIR)
+
+IF (VLD_LIBRARY_DEBUG)
+  SET (VLD_LIBRARY debug ${VLD_LIBRARY_DEBUG} CACHE DOC "VLD library")
+  GET_FILENAME_COMPONENT (_VLD_LIBRARY_DIR ${VLD_LIBRARY_DEBUG} PATH)
+  SET (VLD_LIBRARY_DIR ${_VLD_LIBRARY_DIR} CACHE PATH "VLD library directory")
+ENDIF (VLD_LIBRARY_DEBUG)
+
+SET (VLD_INCLUDE_DIRS ${VLD_INCLUDE_DIR})
+SET (VLD_LIBRARY_DIRS ${VLD_LIBRARY_DIR})
+SET (VLD_LIBRARIES ${VLD_LIBRARY})
+
+MARK_AS_ADVANCED (VLD_INCLUDE_DIR VLD_LIBRARY_DIR VLD_LIBRARY_DEBUG VLD_LIBRARY)
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS (VLD REQUIRED_VARS VLD_ROOT_DIR
+  VLD_INCLUDE_DIR VLD_LIBRARY VERSION_VAR VLD_VERSION)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/FindYasm.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,25 @@
+include(FindPackageHandleStandardArgs)
+
+# Simple path search with YASM_ROOT environment variable override
+find_program(YASM_EXECUTABLE 
+ NAMES yasm yasm-1.2.0-win32 yasm-1.2.0-win64 yasm yasm-1.3.0-win32 yasm-1.3.0-win64
+ HINTS $ENV{YASM_ROOT} ${YASM_ROOT}
+ PATH_SUFFIXES bin
+)
+
+if(YASM_EXECUTABLE)
+    execute_process(COMMAND ${YASM_EXECUTABLE} --version
+        OUTPUT_VARIABLE yasm_version
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+    if(yasm_version MATCHES "^yasm ([0-9\\.]*)")
+        set(YASM_VERSION_STRING "${CMAKE_MATCH_1}")
+    endif()
+    unset(yasm_version)
+endif()
+
+# Provide standardized success/failure messages
+find_package_handle_standard_args(yasm
+    REQUIRED_VARS YASM_EXECUTABLE
+    VERSION_VAR YASM_VERSION_STRING)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/clean-generated.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,10 @@
+set(generated "${CMAKE_CURRENT_BINARY_DIR}/x265.rc"
+              "${CMAKE_CURRENT_BINARY_DIR}/x265.pc"
+              "${CMAKE_CURRENT_BINARY_DIR}/x265.def"
+              "${CMAKE_CURRENT_BINARY_DIR}/x265_config.h")
+
+foreach(file ${generated})
+  if(EXISTS ${file})
+     file(REMOVE ${file})
+  endif()
+endforeach(file)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/cmake_uninstall.cmake.in	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,19 @@
+if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
+    message(FATAL_ERROR "Cannot find install manifest: '@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt'")
+endif()
+
+file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+foreach(file ${files})
+    message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
+    if(EXISTS "$ENV{DESTDIR}${file}" OR IS_SYMLINK "$ENV{DESTDIR}${file}")
+        exec_program("@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+                     OUTPUT_VARIABLE rm_out
+                     RETURN_VALUE rm_retval)
+        if(NOT "${rm_retval}" STREQUAL 0)
+            message(FATAL_ERROR "Problem when removing '$ENV{DESTDIR}${file}'")
+        endif(NOT "${rm_retval}" STREQUAL 0)
+    else()
+        message(STATUS "File '$ENV{DESTDIR}${file}' does not exist.")
+    endif()
+endforeach(file)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/cmake/version.cmake	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,90 @@
+if(CMAKE_VERSION VERSION_LESS "2.8.10")
+    find_program(HG_EXECUTABLE hg)
+else()
+    find_package(Hg QUIET)
+endif()
+find_package(Git QUIET) # present in 2.8.8
+
+# defaults, in case everything below fails
+set(X265_VERSION "unknown")
+set(X265_LATEST_TAG "0.0")
+set(X265_TAG_DISTANCE "0")
+
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../.hg_archival.txt)
+    # read the lines of the archive summary file to extract the version
+    file(READ ${CMAKE_CURRENT_SOURCE_DIR}/../.hg_archival.txt archive)
+    STRING(REGEX REPLACE "\n" ";" archive "${archive}")
+    foreach(f ${archive})
+        string(FIND "${f}" ": " pos)
+        string(SUBSTRING "${f}" 0 ${pos} key)
+        string(SUBSTRING "${f}" ${pos} -1 value)
+        string(SUBSTRING "${value}" 2 -1 value)
+        set(hg_${key} ${value})
+    endforeach()
+    if(DEFINED hg_tag)
+        set(X265_VERSION ${hg_tag})
+        set(X265_LATEST_TAG ${hg_tag})
+        set(X265_TAG_DISTANCE "0")
+    elseif(DEFINED hg_node)
+        string(SUBSTRING "${hg_node}" 0 16 hg_id)
+        set(X265_VERSION "${hg_latesttag}+${hg_latesttagdistance}-${hg_id}")
+    endif()
+elseif(HG_EXECUTABLE AND EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../.hg)
+    if(EXISTS "${HG_EXECUTABLE}.bat")
+        # mercurial source installs on Windows require .bat extension
+        set(HG_EXECUTABLE "${HG_EXECUTABLE}.bat")
+    endif()
+    message(STATUS "hg found at ${HG_EXECUTABLE}")
+
+    execute_process(COMMAND
+        ${HG_EXECUTABLE} log -r. --template "{latesttag}"
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+        OUTPUT_VARIABLE X265_LATEST_TAG
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+    execute_process(COMMAND
+        ${HG_EXECUTABLE} log -r. --template "{latesttagdistance}"
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+        OUTPUT_VARIABLE X265_TAG_DISTANCE
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+    execute_process(
+        COMMAND
+        ${HG_EXECUTABLE} log -r. --template "{node|short}"
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+        OUTPUT_VARIABLE HG_REVISION_ID
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+
+    if(X265_LATEST_TAG MATCHES "^r")
+        string(SUBSTRING ${X265_LATEST_TAG} 1 -1 X265_LATEST_TAG)
+    endif()
+    if(X265_TAG_DISTANCE STREQUAL "0")
+        set(X265_VERSION "${X265_LATEST_TAG}")
+    else()
+        set(X265_VERSION "${X265_LATEST_TAG}+${X265_TAG_DISTANCE}-${HG_REVISION_ID}")
+    endif()
+elseif(GIT_EXECUTABLE AND EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../.git)
+    execute_process(
+        COMMAND
+        ${GIT_EXECUTABLE} describe --tags --abbrev=0
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+        OUTPUT_VARIABLE X265_LATEST_TAG
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+
+    execute_process(
+        COMMAND
+        ${GIT_EXECUTABLE} describe --tags
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+        OUTPUT_VARIABLE X265_VERSION
+        ERROR_QUIET
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        )
+endif()
+
+message(STATUS "x265 version ${X265_VERSION}")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,127 @@
+# vim: syntax=cmake
+
+list(APPEND VFLAGS "-DX265_VERSION=${X265_VERSION}")
+if(EXTRA_LIB)
+    if(LINKED_8BIT)
+        list(APPEND VFLAGS "-DLINKED_8BIT=1")
+    endif(LINKED_8BIT)
+    if(LINKED_10BIT)
+        list(APPEND VFLAGS "-DLINKED_10BIT=1")
+    endif(LINKED_10BIT)
+    if(LINKED_12BIT)
+        list(APPEND VFLAGS "-DLINKED_12BIT=1")
+    endif(LINKED_12BIT)
+endif(EXTRA_LIB)
+
+if(ENABLE_ASSEMBLY)
+    set_source_files_properties(threading.cpp primitives.cpp PROPERTIES COMPILE_FLAGS -DENABLE_ASSEMBLY=1)
+    list(APPEND VFLAGS "-DENABLE_ASSEMBLY=1")
+
+    set(SSE3  vec/dct-sse3.cpp)
+    set(SSSE3 vec/dct-ssse3.cpp)
+    set(SSE41 vec/dct-sse41.cpp)
+
+    if(MSVC AND X86)
+        set(PRIMITIVES ${SSE3} ${SSSE3} ${SSE41})
+        set(WARNDISABLE "/wd4100") # unreferenced formal parameter
+        if(INTEL_CXX)
+            add_definitions(/Qwd111) # statement is unreachable
+            add_definitions(/Qwd128) # loop is unreachable
+            add_definitions(/Qwd177) # declared function is unused
+            add_definitions(/Qwd185) # dynamic initialization in unreachable code
+            add_definitions(/Qwd280) # conditional expression is constant
+        endif()
+        if(X64)
+            set_source_files_properties(${SSE3} ${SSSE3} ${SSE41} PROPERTIES COMPILE_FLAGS "${WARNDISABLE}")
+        else()
+            # x64 implies SSE4, so only add /arch:SSE2 if building for Win32
+            set_source_files_properties(${SSE3} ${SSSE3} ${SSE41} PROPERTIES COMPILE_FLAGS "${WARNDISABLE} /arch:SSE2")
+        endif()
+    endif()
+    if(GCC AND X86)
+        if(CLANG)
+            # llvm intrinsic headers cause shadow warnings
+            set(WARNDISABLE "-Wno-shadow -Wno-unused-parameter")
+        else()
+            set(WARNDISABLE "-Wno-unused-parameter")
+        endif()
+        if(INTEL_CXX OR CLANG OR (NOT CC_VERSION VERSION_LESS 4.3))
+            set(PRIMITIVES ${SSE3} ${SSSE3} ${SSE41})
+            set_source_files_properties(${SSE3}  PROPERTIES COMPILE_FLAGS "${WARNDISABLE} -msse3")
+            set_source_files_properties(${SSSE3} PROPERTIES COMPILE_FLAGS "${WARNDISABLE} -mssse3")
+            set_source_files_properties(${SSE41} PROPERTIES COMPILE_FLAGS "${WARNDISABLE} -msse4.1")
+        endif()
+    endif()
+    set(VEC_PRIMITIVES vec/vec-primitives.cpp ${PRIMITIVES})
+    source_group(Intrinsics FILES ${VEC_PRIMITIVES})
+
+    set(C_SRCS asm-primitives.cpp pixel.h mc.h ipfilter8.h blockcopy8.h dct8.h loopfilter.h)
+    set(A_SRCS pixel-a.asm const-a.asm cpu-a.asm ssd-a.asm mc-a.asm
+               mc-a2.asm pixel-util8.asm blockcopy8.asm
+               pixeladd8.asm dct8.asm)
+    if(HIGH_BIT_DEPTH)
+        set(A_SRCS ${A_SRCS} sad16-a.asm intrapred16.asm ipfilter16.asm loopfilter.asm)
+    else()
+        set(A_SRCS ${A_SRCS} sad-a.asm intrapred8.asm intrapred8_allangs.asm ipfilter8.asm loopfilter.asm)
+    endif()
+
+    if(NOT X64)
+        set(A_SRCS ${A_SRCS} pixel-32.asm)
+    endif()
+
+    if(MSVC_IDE OR XCODE)
+        # MSVC requires custom build rules in the main cmake script for yasm
+        set(MSVC_ASMS "${A_SRCS}" CACHE INTERNAL "yasm sources")
+        set(A_SRCS)
+    endif()
+
+    enable_language(ASM_YASM)
+
+    foreach(SRC ${A_SRCS} ${C_SRCS})
+        set(ASM_PRIMITIVES ${ASM_PRIMITIVES} x86/${SRC})
+    endforeach()
+    source_group(Assembly FILES ${ASM_PRIMITIVES})
+endif(ENABLE_ASSEMBLY)
+
+# set_target_properties can't do list expansion
+string(REPLACE ";" " " VERSION_FLAGS "${VFLAGS}")
+set_source_files_properties(version.cpp PROPERTIES COMPILE_FLAGS ${VERSION_FLAGS})
+
+check_symbol_exists(strtok_r "string.h" HAVE_STRTOK_R)
+if(HAVE_STRTOK_R)
+    set_source_files_properties(param.cpp PROPERTIES COMPILE_FLAGS -DHAVE_STRTOK_R=1)
+endif()
+
+if(GCC AND CC_HAS_NO_NARROWING)
+    set_source_files_properties(cpu.cpp PROPERTIES COMPILE_FLAGS -Wno-narrowing)
+endif()
+if(WIN32)
+    set(WINXP winxp.h winxp.cpp)
+endif(WIN32)
+
+add_library(common OBJECT
+    ${ASM_PRIMITIVES} ${VEC_PRIMITIVES} ${WINXP}
+    primitives.cpp primitives.h
+    pixel.cpp dct.cpp ipfilter.cpp intrapred.cpp loopfilter.cpp
+    constants.cpp constants.h
+    cpu.cpp cpu.h version.cpp
+    threading.cpp threading.h
+    threadpool.cpp threadpool.h
+    wavefront.h wavefront.cpp
+    md5.cpp md5.h
+    bitstream.h bitstream.cpp
+    yuv.cpp yuv.h
+    shortyuv.cpp shortyuv.h
+    picyuv.cpp picyuv.h
+    common.cpp common.h
+    param.cpp param.h
+    frame.cpp frame.h
+    framedata.cpp framedata.h
+    cudata.cpp cudata.h
+    slice.cpp slice.h
+    lowres.cpp lowres.h mv.h 
+    piclist.cpp piclist.h
+    predict.cpp  predict.h
+    scalinglist.cpp scalinglist.h
+    quant.cpp quant.h contexts.h
+    deblock.cpp deblock.h)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/bitstream.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,129 @@
+#include "common.h"
+#include "bitstream.h"
+
+using namespace X265_NS;
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244)
+#endif
+
+#define MIN_FIFO_SIZE 1000
+
+Bitstream::Bitstream()
+{
+    m_fifo = X265_MALLOC(uint8_t, MIN_FIFO_SIZE);
+    m_byteAlloc = MIN_FIFO_SIZE;
+    resetBits();
+}
+
+void Bitstream::push_back(uint8_t val)
+{
+    if (!m_fifo)
+        return;
+
+    if (m_byteOccupancy >= m_byteAlloc)
+    {
+        /** reallocate buffer with doubled size */
+        uint8_t *temp = X265_MALLOC(uint8_t, m_byteAlloc * 2);
+        if (temp)
+        {
+            memcpy(temp, m_fifo, m_byteOccupancy);
+            X265_FREE(m_fifo);
+            m_fifo = temp;
+            m_byteAlloc *= 2;
+        }
+        else
+        {
+            x265_log(NULL, X265_LOG_ERROR, "Unable to realloc bitstream buffer");
+            return;
+        }
+    }
+    m_fifo[m_byteOccupancy++] = val;
+}
+
+void Bitstream::write(uint32_t val, uint32_t numBits)
+{
+    X265_CHECK(numBits <= 32, "numBits out of range\n");
+    X265_CHECK(numBits == 32 || ((val & (~0u << numBits)) == 0), "numBits & val out of range\n");
+
+    uint32_t totalPartialBits = m_partialByteBits + numBits;
+    uint32_t nextPartialBits = totalPartialBits & 7;
+    uint8_t  nextHeldByte = val << (8 - nextPartialBits);
+    uint32_t writeBytes = totalPartialBits >> 3;
+
+    if (writeBytes)
+    {
+        /* topword aligns m_partialByte with the msb of val */
+        uint32_t topword = (numBits - nextPartialBits) & ~7;
+#if USING_FTRAPV
+        uint32_t write_bits = (topword < 32 ? m_partialByte << topword : 0) | (val >> nextPartialBits);
+#else
+        uint32_t write_bits = (m_partialByte << topword) | (val >> nextPartialBits);
+#endif
+
+        switch (writeBytes)
+        {
+        case 4: push_back(write_bits >> 24);
+        case 3: push_back(write_bits >> 16);
+        case 2: push_back(write_bits >> 8);
+        case 1: push_back(write_bits);
+        }
+
+        m_partialByte = nextHeldByte;
+        m_partialByteBits = nextPartialBits;
+    }
+    else
+    {
+        m_partialByte |= nextHeldByte;
+        m_partialByteBits = nextPartialBits;
+    }
+}
+
+void Bitstream::writeByte(uint32_t val)
+{
+    // Only CABAC will call writeByte, the fifo must be byte aligned
+    X265_CHECK(!m_partialByteBits, "expecting m_partialByteBits = 0\n");
+
+    push_back(val);
+}
+
+void Bitstream::writeAlignOne()
+{
+    uint32_t numBits = (8 - m_partialByteBits) & 0x7;
+
+    write((1 << numBits) - 1, numBits);
+}
+
+void Bitstream::writeAlignZero()
+{
+    if (m_partialByteBits)
+    {
+        push_back(m_partialByte);
+        m_partialByte = 0;
+        m_partialByteBits = 0;
+    }
+}
+
+void Bitstream::writeByteAlignment()
+{
+    write(1, 1);
+    writeAlignZero();
+}
+
+void SyntaxElementWriter::writeUvlc(uint32_t code)
+{
+    uint32_t length = 1;
+    uint32_t temp = ++code;
+
+    X265_CHECK(temp, "writing -1 code, will cause infinite loop\n");
+
+    while (1 != temp)
+    {
+        temp >>= 1;
+        length += 2;
+    }
+
+    // Take care of cases where length > 32
+    m_bitIf->write(0, length >> 1);
+    m_bitIf->write(code, (length + 1) >> 1);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/bitstream.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,158 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Author: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_BITSTREAM_H
+#define X265_BITSTREAM_H 1
+
+namespace X265_NS {
+// private namespace
+
+class BitInterface
+{
+public:
+
+    virtual void     write(uint32_t val, uint32_t numBits)  = 0;
+    virtual void     writeByte(uint32_t val)                = 0;
+    virtual void     resetBits()                            = 0;
+    virtual uint32_t getNumberOfWrittenBits() const         = 0;
+    virtual void     writeAlignOne()                        = 0;
+    virtual void     writeAlignZero()                       = 0;
+    virtual ~BitInterface() {}
+};
+
+class BitCounter : public BitInterface
+{
+protected:
+
+    uint32_t  m_bitCounter;
+
+public:
+
+    BitCounter() : m_bitCounter(0) {}
+
+    void     write(uint32_t, uint32_t num)  { m_bitCounter += num; }
+    void     writeByte(uint32_t)            { m_bitCounter += 8;   }
+    void     resetBits()                    { m_bitCounter = 0;    }
+    uint32_t getNumberOfWrittenBits() const { return m_bitCounter; }
+    void     writeAlignOne()                { }
+    void     writeAlignZero()               { }
+};
+
+
+class Bitstream : public BitInterface
+{
+public:
+
+    Bitstream();
+    ~Bitstream()                             { X265_FREE(m_fifo); }
+
+    void     resetBits()                     { m_partialByteBits = m_byteOccupancy = 0; m_partialByte = 0; }
+    uint32_t getNumberOfWrittenBytes() const { return m_byteOccupancy; }
+    uint32_t getNumberOfWrittenBits()  const { return m_byteOccupancy * 8 + m_partialByteBits; }
+    const uint8_t* getFIFO() const           { return m_fifo; }
+
+    void     write(uint32_t val, uint32_t numBits);
+    void     writeByte(uint32_t val);
+
+    void     writeAlignOne();      // insert one bits until the bitstream is byte-aligned
+    void     writeAlignZero();     // insert zero bits until the bitstream is byte-aligned
+    void     writeByteAlignment(); // insert 1 bit, then pad to byte-align with zero
+
+private:
+
+    uint8_t *m_fifo;
+    uint32_t m_byteAlloc;
+    uint32_t m_byteOccupancy;
+    uint32_t m_partialByteBits;
+    uint8_t  m_partialByte;
+
+    void     push_back(uint8_t val);
+};
+
+static const uint8_t bitSize[256] =
+{
+    1, 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7,
+    9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+};
+
+static inline int bs_size_ue(unsigned int val)
+{
+    return bitSize[val + 1];
+}
+
+static inline int bs_size_ue_big(unsigned int val)
+{
+    if (val < 255)
+        return bitSize[val + 1];
+    else
+        return bitSize[(val + 1) >> 8] + 16;
+}
+
+static inline int bs_size_se(int val)
+{
+    int tmp = 1 - val * 2;
+
+    if (tmp < 0) tmp = val * 2;
+    if (tmp < 256)
+        return bitSize[tmp];
+    else
+        return bitSize[tmp >> 8] + 16;
+}
+
+class SyntaxElementWriter
+{
+public:
+
+    BitInterface* m_bitIf;
+
+    SyntaxElementWriter() : m_bitIf(NULL) {}
+
+    /* silently discard the name of the syntax element */
+    inline void WRITE_CODE(uint32_t code, uint32_t length, const char *) { writeCode(code, length); }
+    inline void WRITE_UVLC(uint32_t code,                  const char *) { writeUvlc(code); }
+    inline void WRITE_SVLC(int32_t  code,                  const char *) { writeSvlc(code); }
+    inline void WRITE_FLAG(bool flag,                      const char *) { writeFlag(flag); }
+
+    void writeCode(uint32_t code, uint32_t length) { m_bitIf->write(code, length); }
+    void writeUvlc(uint32_t code);
+    void writeSvlc(int32_t code)                   { uint32_t ucode = (code <= 0) ? -code << 1 : (code << 1) - 1; writeUvlc(ucode); }
+    void writeFlag(bool code)                      { m_bitIf->write(code, 1); }
+};
+
+}
+
+#endif // ifndef X265_BITSTREAM_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/common.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,219 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "slice.h"
+#include "threading.h"
+#include "x265.h"
+
+#if _WIN32
+#include <sys/types.h>
+#include <sys/timeb.h>
+#else
+#include <sys/time.h>
+#endif
+
+namespace X265_NS {
+
+#if CHECKED_BUILD || _DEBUG
+int g_checkFailures;
+#endif
+
+int64_t x265_mdate(void)
+{
+#if _WIN32
+    struct timeb tb;
+    ftime(&tb);
+    return ((int64_t)tb.time * 1000 + (int64_t)tb.millitm) * 1000;
+#else
+    struct timeval tv_date;
+    gettimeofday(&tv_date, NULL);
+    return (int64_t)tv_date.tv_sec * 1000000 + (int64_t)tv_date.tv_usec;
+#endif
+}
+
+#define X265_ALIGNBYTES 32
+
+#if _WIN32
+#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
+#define _aligned_malloc __mingw_aligned_malloc
+#define _aligned_free   __mingw_aligned_free
+#include "malloc.h"
+#endif
+
+void *x265_malloc(size_t size)
+{
+    return _aligned_malloc(size, X265_ALIGNBYTES);
+}
+
+void x265_free(void *ptr)
+{
+    if (ptr) _aligned_free(ptr);
+}
+
+#else // if _WIN32
+void *x265_malloc(size_t size)
+{
+    void *ptr;
+
+    if (posix_memalign((void**)&ptr, X265_ALIGNBYTES, size) == 0)
+        return ptr;
+    else
+        return NULL;
+}
+
+void x265_free(void *ptr)
+{
+    if (ptr) free(ptr);
+}
+
+#endif // if _WIN32
+
+/* Not a general-purpose function; multiplies input by -1/6 to convert
+ * qp to qscale. */
+int x265_exp2fix8(double x)
+{
+    int i = (int)(x * (-64.f / 6.f) + 512.5f);
+
+    if (i < 0) return 0;
+    if (i > 1023) return 0xffff;
+    return (x265_exp2_lut[i & 63] + 256) << (i >> 6) >> 8;
+}
+
+void general_log(const x265_param* param, const char* caller, int level, const char* fmt, ...)
+{
+    if (param && level > param->logLevel)
+        return;
+    const int bufferSize = 4096;
+    char buffer[bufferSize];
+    int p = 0;
+    const char* log_level;
+    switch (level)
+    {
+    case X265_LOG_ERROR:
+        log_level = "error";
+        break;
+    case X265_LOG_WARNING:
+        log_level = "warning";
+        break;
+    case X265_LOG_INFO:
+        log_level = "info";
+        break;
+    case X265_LOG_DEBUG:
+        log_level = "debug";
+        break;
+    case X265_LOG_FULL:
+        log_level = "full";
+        break;
+    default:
+        log_level = "unknown";
+        break;
+    }
+
+    if (caller)
+        p += sprintf(buffer, "%-4s [%s]: ", caller, log_level);
+    va_list arg;
+    va_start(arg, fmt);
+    vsnprintf(buffer + p, bufferSize - p, fmt, arg);
+    va_end(arg);
+    fputs(buffer, stderr);
+}
+
+double x265_ssim2dB(double ssim)
+{
+    double inv_ssim = 1 - ssim;
+
+    if (inv_ssim <= 0.0000000001) /* Max 100dB */
+        return 100;
+
+    return -10.0 * log10(inv_ssim);
+}
+
+/* The qscale - qp conversion is specified in the standards.
+ * Approx qscale increases by 12%  with every qp increment */
+double x265_qScale2qp(double qScale)
+{
+    return 12.0 + 6.0 * (double)X265_LOG2(qScale / 0.85);
+}
+
+double x265_qp2qScale(double qp)
+{
+    return 0.85 * pow(2.0, (qp - 12.0) / 6.0);
+}
+
+uint32_t x265_picturePlaneSize(int csp, int width, int height, int plane)
+{
+    uint32_t size = (uint32_t)(width >> x265_cli_csps[csp].width[plane]) * (height >> x265_cli_csps[csp].height[plane]);
+
+    return size;
+}
+
+char* x265_slurp_file(const char *filename)
+{
+    if (!filename)
+        return NULL;
+
+    int bError = 0;
+    size_t fSize;
+    char *buf = NULL;
+
+    FILE *fh = fopen(filename, "rb");
+    if (!fh)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "unable to open file %s\n", filename);
+        return NULL;
+    }
+
+    bError |= fseek(fh, 0, SEEK_END) < 0;
+    bError |= (fSize = ftell(fh)) <= 0;
+    bError |= fseek(fh, 0, SEEK_SET) < 0;
+    if (bError)
+        goto error;
+
+    buf = X265_MALLOC(char, fSize + 2);
+    if (!buf)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "unable to allocate memory\n");
+        goto error;
+    }
+
+    bError |= fread(buf, 1, fSize, fh) != fSize;
+    if (buf[fSize - 1] != '\n')
+        buf[fSize++] = '\n';
+    buf[fSize] = 0;
+    fclose(fh);
+
+    if (bError)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "unable to read the file\n");
+        X265_FREE(buf);
+        buf = NULL;
+    }
+    return buf;
+
+error:
+    fclose(fh);
+    return NULL;
+}
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/common.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,441 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_COMMON_H
+#define X265_COMMON_H
+
+#include <algorithm>
+#include <climits>
+#include <cmath>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <ctime>
+
+#include <stdint.h>
+#include <memory.h>
+#include <assert.h>
+
+#include "x265.h"
+
+#if ENABLE_PPA && ENABLE_VTUNE
+#error "PPA and VTUNE cannot both be enabled. Disable one of them."
+#endif
+#if ENABLE_PPA
+#include "profile/PPA/ppa.h"
+#define ProfileScopeEvent(x) PPAScopeEvent(x)
+#define THREAD_NAME(n,i)
+#define PROFILE_INIT()       PPA_INIT()
+#define PROFILE_PAUSE()
+#define PROFILE_RESUME()
+#elif ENABLE_VTUNE
+#include "profile/vtune/vtune.h"
+#define ProfileScopeEvent(x) VTuneScopeEvent _vtuneTask(x)
+#define THREAD_NAME(n,i)     vtuneSetThreadName(n, i)
+#define PROFILE_INIT()       vtuneInit()
+#define PROFILE_PAUSE()      __itt_pause()
+#define PROFILE_RESUME()     __itt_resume()
+#else
+#define ProfileScopeEvent(x)
+#define THREAD_NAME(n,i)
+#define PROFILE_INIT()
+#define PROFILE_PAUSE()
+#define PROFILE_RESUME()
+#endif
+
+#define FENC_STRIDE 64
+#define NUM_INTRA_MODE 35
+
+#if defined(__GNUC__)
+#define ALIGN_VAR_8(T, var)  T var __attribute__((aligned(8)))
+#define ALIGN_VAR_16(T, var) T var __attribute__((aligned(16)))
+#define ALIGN_VAR_32(T, var) T var __attribute__((aligned(32)))
+
+#if defined(__MINGW32__)
+#define fseeko fseeko64
+#endif
+
+#elif defined(_MSC_VER)
+
+#define ALIGN_VAR_8(T, var)  __declspec(align(8)) T var
+#define ALIGN_VAR_16(T, var) __declspec(align(16)) T var
+#define ALIGN_VAR_32(T, var) __declspec(align(32)) T var
+#define fseeko _fseeki64
+
+#endif // if defined(__GNUC__)
+
+#if HAVE_INT_TYPES_H
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#define X265_LL "%" PRIu64
+#else
+#define X265_LL "%lld"
+#endif
+
+#if _DEBUG && defined(_MSC_VER)
+#define DEBUG_BREAK() __debugbreak()
+#elif __APPLE_CC__
+#define DEBUG_BREAK() __builtin_trap()
+#else
+#define DEBUG_BREAK() abort()
+#endif
+
+/* If compiled with CHECKED_BUILD perform run-time checks and log any that
+ * fail, both to stderr and to a file */
+#if CHECKED_BUILD || _DEBUG
+namespace X265_NS { extern int g_checkFailures; }
+#define X265_CHECK(expr, ...) if (!(expr)) { \
+    x265_log(NULL, X265_LOG_ERROR, __VA_ARGS__); \
+    FILE *fp = fopen("x265_check_failures.txt", "a"); \
+    if (fp) { fprintf(fp, "%s:%d\n", __FILE__, __LINE__); fprintf(fp, __VA_ARGS__); fclose(fp); } \
+    g_checkFailures++; DEBUG_BREAK(); \
+}
+#if _MSC_VER
+#pragma warning(disable: 4127) // some checks have constant conditions
+#endif
+#else
+#define X265_CHECK(expr, ...)
+#endif
+
+#if HIGH_BIT_DEPTH
+typedef uint16_t pixel;
+typedef uint32_t sum_t;
+typedef uint64_t sum2_t;
+typedef uint64_t pixel4;
+typedef int64_t  ssum2_t;
+#else
+typedef uint8_t  pixel;
+typedef uint16_t sum_t;
+typedef uint32_t sum2_t;
+typedef uint32_t pixel4;
+typedef int32_t  ssum2_t; // Signed sum
+#endif // if HIGH_BIT_DEPTH
+
+#if X265_DEPTH <= 10
+typedef uint32_t sse_ret_t;
+#else
+typedef uint64_t sse_ret_t;
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define MAX_UINT        0xFFFFFFFFU // max. value of unsigned 32-bit integer
+#define MAX_INT         2147483647  // max. value of signed 32-bit integer
+#define MAX_INT64       0x7FFFFFFFFFFFFFFFLL  // max. value of signed 64-bit integer
+#define MAX_DOUBLE      1.7e+308    // max. value of double-type value
+
+#define QP_MIN          0
+#define QP_MAX_SPEC     51 /* max allowed signaled QP in HEVC */
+#define QP_MAX_MAX      69 /* max allowed QP to be output by rate control */
+
+#define MIN_QPSCALE     0.21249999999999999
+#define MAX_MAX_QPSCALE 615.46574234477100
+
+#define BITS_FOR_POC 8
+
+template<typename T>
+inline T x265_min(T a, T b) { return a < b ? a : b; }
+
+template<typename T>
+inline T x265_max(T a, T b) { return a > b ? a : b; }
+
+template<typename T>
+inline T x265_clip3(T minVal, T maxVal, T a) { return x265_min(x265_max(minVal, a), maxVal); }
+
+template<typename T> /* clip to pixel range, 0..255 or 0..1023 */
+inline pixel x265_clip(T x) { return (pixel)x265_min<T>(T((1 << X265_DEPTH) - 1), x265_max<T>(T(0), x)); }
+
+typedef int16_t  coeff_t;      // transform coefficient
+
+#define X265_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define X265_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define COPY1_IF_LT(x, y) if ((y) < (x)) (x) = (y);
+#define COPY2_IF_LT(x, y, a, b) \
+    if ((y) < (x)) \
+    { \
+        (x) = (y); \
+        (a) = (b); \
+    }
+#define COPY3_IF_LT(x, y, a, b, c, d) \
+    if ((y) < (x)) \
+    { \
+        (x) = (y); \
+        (a) = (b); \
+        (c) = (d); \
+    }
+#define COPY4_IF_LT(x, y, a, b, c, d, e, f) \
+    if ((y) < (x)) \
+    { \
+        (x) = (y); \
+        (a) = (b); \
+        (c) = (d); \
+        (e) = (f); \
+    }
+#define X265_MIN3(a, b, c) X265_MIN((a), X265_MIN((b), (c)))
+#define X265_MAX3(a, b, c) X265_MAX((a), X265_MAX((b), (c)))
+#define X265_MIN4(a, b, c, d) X265_MIN((a), X265_MIN3((b), (c), (d)))
+#define X265_MAX4(a, b, c, d) X265_MAX((a), X265_MAX3((b), (c), (d)))
+#define QP_BD_OFFSET (6 * (X265_DEPTH - 8))
+#define MAX_CHROMA_LAMBDA_OFFSET 36
+
+// arbitrary, but low because SATD scores are 1/4 normal
+#define X265_LOOKAHEAD_QP (12 + QP_BD_OFFSET)
+#define X265_LOOKAHEAD_MAX 250
+
+// Use the same size blocks as x264.  Using larger blocks seems to give artificially
+// high cost estimates (intra and inter both suffer)
+#define X265_LOWRES_CU_SIZE   8
+#define X265_LOWRES_CU_BITS   3
+
+#define X265_MALLOC(type, count)    (type*)x265_malloc(sizeof(type) * (count))
+#define X265_FREE(ptr)              x265_free(ptr)
+#define CHECKED_MALLOC(var, type, count) \
+    { \
+        var = (type*)x265_malloc(sizeof(type) * (count)); \
+        if (!var) \
+        { \
+            x265_log(NULL, X265_LOG_ERROR, "malloc of size %d failed\n", sizeof(type) * (count)); \
+            goto fail; \
+        } \
+    }
+#define CHECKED_MALLOC_ZERO(var, type, count) \
+    { \
+        var = (type*)x265_malloc(sizeof(type) * (count)); \
+        if (var) \
+            memset((void*)var, 0, sizeof(type) * (count)); \
+        else \
+        { \
+            x265_log(NULL, X265_LOG_ERROR, "malloc of size %d failed\n", sizeof(type) * (count)); \
+            goto fail; \
+        } \
+    }
+
+#if defined(_MSC_VER)
+#define X265_LOG2F(x) (logf((float)(x)) * 1.44269504088896405f)
+#define X265_LOG2(x) (log((double)(x)) * 1.4426950408889640513713538072172)
+#else
+#define X265_LOG2F(x) log2f(x)
+#define X265_LOG2(x)  log2(x)
+#endif
+
+#define NUM_CU_DEPTH            4                           // maximum number of CU depths
+#define NUM_FULL_DEPTH          5                           // maximum number of full depths
+#define MIN_LOG2_CU_SIZE        3                           // log2(minCUSize)
+#define MAX_LOG2_CU_SIZE        6                           // log2(maxCUSize)
+#define MIN_CU_SIZE             (1 << MIN_LOG2_CU_SIZE)     // minimum allowable size of CU
+#define MAX_CU_SIZE             (1 << MAX_LOG2_CU_SIZE)     // maximum allowable size of CU
+
+#define LOG2_UNIT_SIZE          2                           // log2(unitSize)
+#define UNIT_SIZE               (1 << LOG2_UNIT_SIZE)       // unit size of CU partition
+
+#define MAX_NUM_PARTITIONS      256
+#define NUM_4x4_PARTITIONS      (1U << (g_unitSizeDepth << 1)) // number of 4x4 units in max CU size
+
+#define MIN_PU_SIZE             4
+#define MIN_TU_SIZE             4
+#define MAX_NUM_SPU_W           (MAX_CU_SIZE / MIN_PU_SIZE) // maximum number of SPU in horizontal line
+
+#define MAX_LOG2_TR_SIZE 5
+#define MAX_LOG2_TS_SIZE 2 // TODO: RExt
+#define MAX_TR_SIZE (1 << MAX_LOG2_TR_SIZE)
+#define MAX_TS_SIZE (1 << MAX_LOG2_TS_SIZE)
+
+#define COEF_REMAIN_BIN_REDUCTION   3 // indicates the level at which the VLC
+                                      // transitions from Golomb-Rice to TU+EG(k)
+
+#define SBH_THRESHOLD               4 // fixed sign bit hiding controlling threshold
+
+#define C1FLAG_NUMBER               8 // maximum number of largerThan1 flag coded in one chunk:  16 in HM5
+#define C2FLAG_NUMBER               1 // maximum number of largerThan2 flag coded in one chunk:  16 in HM5
+
+#define SAO_ENCODING_RATE           0.75
+#define SAO_ENCODING_RATE_CHROMA    0.5
+
+#define MLS_GRP_NUM                 64 // Max number of coefficient groups, max(16, 64)
+#define MLS_CG_SIZE                 4  // Coefficient group size of 4x4
+#define MLS_CG_BLK_SIZE             (MLS_CG_SIZE * MLS_CG_SIZE)
+#define MLS_CG_LOG2_SIZE            2
+
+#define QUANT_IQUANT_SHIFT          20 // Q(QP%6) * IQ(QP%6) = 2^20
+#define QUANT_SHIFT                 14 // Q(4) = 2^14
+#define SCALE_BITS                  15 // Inherited from TMuC, presumably for fractional bit estimates in RDOQ
+#define MAX_TR_DYNAMIC_RANGE        15 // Maximum transform dynamic range (excluding sign bit)
+
+#define SHIFT_INV_1ST               7  // Shift after first inverse transform stage
+#define SHIFT_INV_2ND               12 // Shift after second inverse transform stage
+
+#define AMVP_DECIMATION_FACTOR      4
+
+#define SCAN_SET_SIZE               16
+#define LOG2_SCAN_SET_SIZE          4
+
+#define ALL_IDX                     -1
+#define PLANAR_IDX                  0
+#define VER_IDX                     26 // index for intra VERTICAL   mode
+#define HOR_IDX                     10 // index for intra HORIZONTAL mode
+#define DC_IDX                      1  // index for intra DC mode
+#define NUM_CHROMA_MODE             5  // total number of chroma modes
+#define DM_CHROMA_IDX               36 // chroma mode index for derived from luma intra mode
+
+#define MDCS_ANGLE_LIMIT            4 // distance from true angle that horiz or vertical scan is allowed
+#define MDCS_LOG2_MAX_SIZE          3 // TUs with log2 of size greater than this can only use diagonal scan
+
+#define MAX_NUM_REF_PICS            16 // max. number of pictures used for reference
+#define MAX_NUM_REF                 16 // max. number of entries in picture reference list
+
+#define REF_NOT_VALID               -1
+
+#define AMVP_NUM_CANDS              2 // number of AMVP candidates
+#define MRG_MAX_NUM_CANDS           5 // max number of final merge candidates
+
+#define CHROMA_H_SHIFT(x) (x == X265_CSP_I420 || x == X265_CSP_I422)
+#define CHROMA_V_SHIFT(x) (x == X265_CSP_I420)
+#define X265_MAX_PRED_MODE_PER_CTU 85 * 2 * 8
+
+#define MAX_NUM_TR_COEFFS           MAX_TR_SIZE * MAX_TR_SIZE // Maximum number of transform coefficients, for a 32x32 transform
+#define MAX_NUM_TR_CATEGORIES       16                        // 32, 16, 8, 4 transform categories each for luma and chroma
+
+namespace X265_NS {
+
+enum { SAO_NUM_OFFSET = 4 };
+
+enum SaoMergeMode
+{
+    SAO_MERGE_NONE,
+    SAO_MERGE_LEFT,
+    SAO_MERGE_UP
+};
+
+struct SaoCtuParam
+{
+    SaoMergeMode mergeMode;
+    int  typeIdx;
+    uint32_t bandPos;    // BO band position
+    int  offset[SAO_NUM_OFFSET];
+
+    void reset()
+    {
+        mergeMode = SAO_MERGE_NONE;
+        typeIdx = -1;
+        bandPos = 0;
+        offset[0] = 0;
+        offset[1] = 0;
+        offset[2] = 0;
+        offset[3] = 0;
+    }
+};
+
+struct SAOParam
+{
+    SaoCtuParam* ctuParam[3];
+    bool         bSaoFlag[2];
+    int          numCuInWidth;
+
+    SAOParam()
+    {
+        for (int i = 0; i < 3; i++)
+            ctuParam[i] = NULL;
+    }
+
+    ~SAOParam()
+    {
+        delete[] ctuParam[0];
+        delete[] ctuParam[1];
+        delete[] ctuParam[2];
+    }
+};
+
+/* Stores inter analysis data for a single frame */
+struct analysis_inter_data
+{
+    int32_t*    ref;
+    uint8_t*    depth;
+    uint8_t*    modes;
+    uint32_t*   bestMergeCand;
+};
+
+/* Stores intra analysis data for a single frame. This struct needs better packing */
+struct analysis_intra_data
+{
+    uint8_t*  depth;
+    uint8_t*  modes;
+    char*     partSizes;
+    uint8_t*  chromaModes;
+};
+
+enum TextType
+{
+    TEXT_LUMA     = 0,  // luma
+    TEXT_CHROMA_U = 1,  // chroma U
+    TEXT_CHROMA_V = 2,  // chroma V
+    MAX_NUM_COMPONENT = 3
+};
+
+// coefficient scanning type used in ACS
+enum ScanType
+{
+    SCAN_DIAG = 0,     // up-right diagonal scan
+    SCAN_HOR = 1,      // horizontal first scan
+    SCAN_VER = 2,      // vertical first scan
+    NUM_SCAN_TYPE = 3
+};
+
+enum SignificanceMapContextType
+{
+    CONTEXT_TYPE_4x4 = 0,
+    CONTEXT_TYPE_8x8 = 1,
+    CONTEXT_TYPE_NxN = 2,
+    CONTEXT_NUMBER_OF_TYPES = 3
+};
+
+/* located in pixel.cpp */
+void extendPicBorder(pixel* recon, intptr_t stride, int width, int height, int marginX, int marginY);
+
+/* located in common.cpp */
+int64_t  x265_mdate(void);
+#define  x265_log(param, ...) general_log(param, "x265", __VA_ARGS__)
+void     general_log(const x265_param* param, const char* caller, int level, const char* fmt, ...);
+int      x265_exp2fix8(double x);
+
+double   x265_ssim2dB(double ssim);
+double   x265_qScale2qp(double qScale);
+double   x265_qp2qScale(double qp);
+uint32_t x265_picturePlaneSize(int csp, int width, int height, int plane);
+
+void*    x265_malloc(size_t size);
+void     x265_free(void *ptr);
+char*    x265_slurp_file(const char *filename);
+
+/* located in primitives.cpp */
+void     x265_setup_primitives(x265_param* param);
+void     x265_report_simd(x265_param* param);
+}
+
+#include "constants.h"
+
+#endif // ifndef X265_COMMON_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/constants.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,582 @@
+/*****************************************************************************
+* Copyright (C) 2015 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "constants.h"
+#include "threading.h"
+
+namespace X265_NS {
+
+#if X265_DEPTH == 12
+
+// lambda = pow(2, (double)q / 6 - 2) * (1 << (12 - 8));
+double x265_lambda_tab[QP_MAX_MAX + 1] =
+{
+    4.0000,    4.4898,    5.0397,    5.6569,     6.3496,
+    7.1272,    8.0000,    8.9797,    10.0794,    11.3137,
+    12.6992,   14.2544,   16.0000,   17.9594,    20.1587,
+    22.6274,   25.3984,   28.5088,   32.0000,    35.9188,
+    40.3175,   45.2548,   50.7968,   57.0175,    64.0000,
+    71.8376,   80.6349,   90.5097,   101.5937,   114.0350,
+    128.0000,  143.6751,  161.2699,  181.0193,   203.1873,
+    228.0701,  256.0000,  287.3503,  322.5398,   362.0387,
+    406.3747,  456.1401,  512.0000,  574.7006,   645.0796,
+    724.0773,  812.7493,  912.2803,  1024.0000,  1149.4011,
+    1290.1592, 1448.1547, 1625.4987, 1824.5606,  2048.0000,
+    2298.8023, 2580.3183, 2896.3094, 3250.9974,  3649.1211,
+    4096.0000, 4597.6045, 5160.6366, 5792.6188,  6501.9947,
+    7298.2423, 8192.0000, 9195.2091, 10321.2732, 11585.2375
+};
+
+// lambda2 = pow(lambda, 2) * scale (0.85);
+double x265_lambda2_tab[QP_MAX_MAX + 1] =
+{
+    13.6000,       17.1349,       21.5887,       27.2000,       34.2699,
+    43.1773,       54.4000,       68.5397,       86.3546,       108.8000,
+    137.0794,      172.7092,      217.6000,      274.1588,      345.4185,
+    435.2000,      548.3176,      690.8369,      870.4000,      1096.6353,
+    1381.6739,     1740.8000,     2193.2706,     2763.3478,     3481.6000,
+    4386.5411,     5526.6955,     6963.2000,     8773.0822,     11053.3910,
+    13926.4000,    17546.1645,    22106.7819,    27852.8000,    35092.3291,
+    44213.5641,    55705.6000,    70184.6579,    88427.1282,    111411.2000,
+    140369.3159,   176854.2563,   222822.4000,   280738.6324,   353708.5127,
+    445644.8001,   561477.2648,   707417.0237,   891289.6000,   1122954.5277,
+    1414834.0484,  1782579.2003,  2245909.0566,  2829668.0981,  3565158.4000,
+    4491818.1146,  5659336.1938,  7130316.8013,  8983636.2264,  11318672.3923,
+    14260633.6000, 17967272.4585, 22637344.7751, 28521267.1953, 35934544.9165,
+    45274689.5567, 57042534.4000, 71869089.8338, 90549379.1181, 114085068.8008
+};
+
+#elif X265_DEPTH == 10
+
+// lambda = pow(2, (double)q / 6 - 2) * (1 << (X265_DEPTH - 8));
+double x265_lambda_tab[QP_MAX_MAX + 1] =
+{
+    1.0000, 1.1225, 1.2599, 1.4142, 1.5874, 
+    1.7818, 2.0000, 2.2449, 2.5198, 2.8284, 
+    3.1748, 3.5636, 4.0000, 4.4898, 5.0397, 
+    5.6569, 6.3496, 7.1272, 8.0000, 8.9797, 
+    10.0794, 11.3137, 12.6992, 14.2544, 16.0000, 
+    17.9594, 20.1587, 22.6274, 25.3984, 28.5088, 
+    32.0000, 35.9188, 40.3175, 45.2548, 50.7968, 
+    57.0175, 64.0000, 71.8376, 80.6349, 90.5097, 
+    101.5937, 114.0350, 128.0000, 143.6751, 161.2699, 
+    181.0193, 203.1873, 228.0701, 256.0000, 287.3503, 
+    322.5398, 362.0387, 406.3747, 456.1401, 512.0000, 
+    574.7006, 645.0796, 724.0773, 812.7493, 912.2803, 
+    1024.0000, 1149.4011, 1290.1592, 1448.1547, 1625.4987, 
+    1824.5606, 2048.0000, 2298.8023, 2580.3183, 2896.3094,
+};
+
+// lambda2 = pow(lambda, 2) * scale (0.85);
+double x265_lambda2_tab[QP_MAX_MAX + 1] =
+{
+    0.8500, 1.0709, 1.3493, 1.7000, 2.1419, 
+    2.6986, 3.4000, 4.2837, 5.3972, 6.8000, 
+    8.5675, 10.7943, 13.6000, 17.1349, 21.5887, 
+    27.2000, 34.2699, 43.1773, 54.4000, 68.5397, 
+    86.3546, 108.8000, 137.0794, 172.7092, 217.6000, 
+    274.1588, 345.4185, 435.2000, 548.3176, 690.8369, 
+    870.4000, 1096.6353, 1381.6739, 1740.8000, 2193.2706, 
+    2763.3478, 3481.6000, 4386.5411, 5526.6955, 6963.2000, 
+    8773.0823, 11053.3910, 13926.4000, 17546.1645, 22106.7820, 
+    27852.8000, 35092.3290, 44213.5640, 55705.6000, 70184.6580, 
+    88427.1280, 111411.2000, 140369.3161, 176854.2561, 222822.4000, 
+    280738.6321, 353708.5122, 445644.8000, 561477.2643, 707417.0243, 
+    891289.6000, 1122954.5286, 1414834.0486, 1782579.2000, 2245909.0572, 
+    2829668.0973, 3565158.4000, 4491818.1144, 5659336.1946, 7130316.8000, 
+};
+
+#else /* !HIGH_BIT_DEPTH */
+
+// lambda = pow(2, (double)q / 6 - 2);
+double x265_lambda_tab[QP_MAX_MAX + 1] =
+{
+    0.2500, 0.2806, 0.3150, 0.3536, 0.3969,
+    0.4454, 0.5000, 0.5612, 0.6300, 0.7071,
+    0.7937, 0.8909, 1.0000, 1.1225, 1.2599,
+    1.4142, 1.5874, 1.7818, 2.0000, 2.2449,
+    2.5198, 2.8284, 3.1748, 3.5636, 4.0000,
+    4.4898, 5.0397, 5.6569, 6.3496, 7.1272,
+    8.0000, 8.9797, 10.0794, 11.3137, 12.6992,
+    14.2544, 16.0000, 17.9594, 20.1587, 22.6274,
+    25.3984, 28.5088, 32.0000, 35.9188, 40.3175,
+    45.2548, 50.7968, 57.0175, 64.0000, 71.8376,
+    80.6349, 90.5097, 101.5937, 114.0350, 128.0000,
+    143.6751, 161.2699, 181.0193, 203.1873, 228.0701,
+    256.0000, 287.3503, 322.5398, 362.0387, 406.3747,
+    456.1401, 512.0000, 574.7006, 645.0796, 724.0773
+};
+
+// lambda2 = pow(lambda, 2) * scale (0.85);
+double x265_lambda2_tab[QP_MAX_MAX + 1] =
+{
+    0.0531, 0.0669, 0.0843, 0.1063, 0.1339,
+    0.1687, 0.2125, 0.2677, 0.3373, 0.4250,
+    0.5355, 0.6746, 0.8500, 1.0709, 1.3493,
+    1.7000, 2.1419, 2.6986, 3.4000, 4.2837,
+    5.3970, 6.8000, 8.5675, 10.7943, 13.6000,
+    17.1345, 21.5887, 27.2004, 34.2699, 43.1773,
+    54.4000, 68.5397, 86.3551, 108.7998, 137.0792,
+    172.7097, 217.6000, 274.1590, 345.4172, 435.1993,
+    548.3169, 690.8389, 870.4000, 1096.6362, 1381.6757,
+    1740.7974, 2193.2676, 2763.3460, 3481.6000, 4386.5446,
+    5526.6890, 6963.2049, 8773.0879, 11053.3840, 13926.4000,
+    17546.1542, 22106.7835, 27852.7889, 35092.3170, 44213.5749,
+    55705.6000, 70184.6657, 88427.1342, 111411.2172, 140369.3373,
+    176854.2222, 222822.4000, 280738.6627, 353708.5368, 445644.7459
+};
+
+#endif
+
+const uint16_t x265_chroma_lambda2_offset_tab[MAX_CHROMA_LAMBDA_OFFSET+1] =
+{
+       16,    20,    25,    32,    40,    50,
+       64,    80,   101,   128,   161,   203,
+      256,   322,   406,   512,   645,   812,
+     1024,  1290,  1625,  2048,  2580,  3250,
+     4096,  5160,  6501,  8192, 10321, 13003,
+    16384, 20642, 26007, 32768, 41285, 52015,
+    65535
+};
+
+int      g_ctuSizeConfigured = 0;
+uint32_t g_maxLog2CUSize = MAX_LOG2_CU_SIZE;
+uint32_t g_maxCUSize     = MAX_CU_SIZE;
+uint32_t g_unitSizeDepth = NUM_CU_DEPTH;
+uint32_t g_maxCUDepth    = NUM_CU_DEPTH - 1;
+uint32_t g_zscanToRaster[MAX_NUM_PARTITIONS] = { 0, };
+uint32_t g_rasterToZscan[MAX_NUM_PARTITIONS] = { 0, };
+
+const uint8_t g_zscanToPelX[MAX_NUM_PARTITIONS] =
+{
+    0, 4, 0, 4, 8, 12, 8, 12, 0, 4, 0, 4, 8, 12, 8, 12,
+    16, 20, 16, 20, 24, 28, 24, 28, 16, 20, 16, 20, 24, 28, 24, 28,
+    0, 4, 0, 4, 8, 12, 8, 12, 0, 4, 0, 4, 8, 12, 8, 12,
+    16, 20, 16, 20, 24, 28, 24, 28, 16, 20, 16, 20, 24, 28, 24, 28,
+    32, 36, 32, 36, 40, 44, 40, 44, 32, 36, 32, 36, 40, 44, 40, 44,
+    48, 52, 48, 52, 56, 60, 56, 60, 48, 52, 48, 52, 56, 60, 56, 60,
+    32, 36, 32, 36, 40, 44, 40, 44, 32, 36, 32, 36, 40, 44, 40, 44,
+    48, 52, 48, 52, 56, 60, 56, 60, 48, 52, 48, 52, 56, 60, 56, 60,
+    0, 4, 0, 4, 8, 12, 8, 12, 0, 4, 0, 4, 8, 12, 8, 12,
+    16, 20, 16, 20, 24, 28, 24, 28, 16, 20, 16, 20, 24, 28, 24, 28,
+    0, 4, 0, 4, 8, 12, 8, 12, 0, 4, 0, 4, 8, 12, 8, 12,
+    16, 20, 16, 20, 24, 28, 24, 28, 16, 20, 16, 20, 24, 28, 24, 28,
+    32, 36, 32, 36, 40, 44, 40, 44, 32, 36, 32, 36, 40, 44, 40, 44,
+    48, 52, 48, 52, 56, 60, 56, 60, 48, 52, 48, 52, 56, 60, 56, 60,
+    32, 36, 32, 36, 40, 44, 40, 44, 32, 36, 32, 36, 40, 44, 40, 44,
+    48, 52, 48, 52, 56, 60, 56, 60, 48, 52, 48, 52, 56, 60, 56, 60
+};
+
+const uint8_t g_zscanToPelY[MAX_NUM_PARTITIONS] =
+{
+    0, 0, 4, 4, 0, 0, 4, 4, 8, 8, 12, 12, 8, 8, 12, 12,
+    0, 0, 4, 4, 0, 0, 4, 4, 8, 8, 12, 12, 8, 8, 12, 12,
+    16, 16, 20, 20, 16, 16, 20, 20, 24, 24, 28, 28, 24, 24, 28, 28,
+    16, 16, 20, 20, 16, 16, 20, 20, 24, 24, 28, 28, 24, 24, 28, 28,
+    0, 0, 4, 4, 0, 0, 4, 4, 8, 8, 12, 12, 8, 8, 12, 12,
+    0, 0, 4, 4, 0, 0, 4, 4, 8, 8, 12, 12, 8, 8, 12, 12,
+    16, 16, 20, 20, 16, 16, 20, 20, 24, 24, 28, 28, 24, 24, 28, 28,
+    16, 16, 20, 20, 16, 16, 20, 20, 24, 24, 28, 28, 24, 24, 28, 28,
+    32, 32, 36, 36, 32, 32, 36, 36, 40, 40, 44, 44, 40, 40, 44, 44,
+    32, 32, 36, 36, 32, 32, 36, 36, 40, 40, 44, 44, 40, 40, 44, 44,
+    48, 48, 52, 52, 48, 48, 52, 52, 56, 56, 60, 60, 56, 56, 60, 60,
+    48, 48, 52, 52, 48, 48, 52, 52, 56, 56, 60, 60, 56, 56, 60, 60,
+    32, 32, 36, 36, 32, 32, 36, 36, 40, 40, 44, 44, 40, 40, 44, 44,
+    32, 32, 36, 36, 32, 32, 36, 36, 40, 40, 44, 44, 40, 40, 44, 44,
+    48, 48, 52, 52, 48, 48, 52, 52, 56, 56, 60, 60, 56, 56, 60, 60,
+    48, 48, 52, 52, 48, 48, 52, 52, 56, 56, 60, 60, 56, 56, 60, 60
+};
+
+void initZscanToRaster(uint32_t maxFullDepth, uint32_t depth, uint32_t startVal, uint32_t*& curIdx)
+{
+    uint32_t stride = 1 << maxFullDepth;
+
+    if (depth > maxFullDepth)
+    {
+        curIdx[0] = startVal;
+        curIdx++;
+    }
+    else
+    {
+        int step = stride >> depth;
+        initZscanToRaster(maxFullDepth, depth + 1, startVal,                        curIdx);
+        initZscanToRaster(maxFullDepth, depth + 1, startVal + step,                 curIdx);
+        initZscanToRaster(maxFullDepth, depth + 1, startVal + step * stride,        curIdx);
+        initZscanToRaster(maxFullDepth, depth + 1, startVal + step * stride + step, curIdx);
+    }
+}
+
+void initRasterToZscan(uint32_t maxFullDepth)
+{
+    uint32_t numPartitions = 1 << (maxFullDepth * 2);
+
+    for (uint32_t i = 0; i < numPartitions; i++)
+        g_rasterToZscan[g_zscanToRaster[i]] = i;
+}
+
+const int16_t g_lumaFilter[4][NTAPS_LUMA] =
+{
+    {  0, 0,   0, 64,  0,   0, 0,  0 },
+    { -1, 4, -10, 58, 17,  -5, 1,  0 },
+    { -1, 4, -11, 40, 40, -11, 4, -1 },
+    {  0, 1,  -5, 17, 58, -10, 4, -1 }
+};
+
+const int16_t g_chromaFilter[8][NTAPS_CHROMA] =
+{
+    {  0, 64,  0,  0 },
+    { -2, 58, 10, -2 },
+    { -4, 54, 16, -2 },
+    { -6, 46, 28, -4 },
+    { -4, 36, 36, -4 },
+    { -4, 28, 46, -6 },
+    { -2, 16, 54, -4 },
+    { -2, 10, 58, -2 }
+};
+
+const int16_t g_t4[4][4] =
+{
+    { 64, 64, 64, 64 },
+    { 83, 36, -36, -83 },
+    { 64, -64, -64, 64 },
+    { 36, -83, 83, -36 }
+};
+
+const int16_t g_t8[8][8] =
+{
+    { 64, 64, 64, 64, 64, 64, 64, 64 },
+    { 89, 75, 50, 18, -18, -50, -75, -89 },
+    { 83, 36, -36, -83, -83, -36, 36, 83 },
+    { 75, -18, -89, -50, 50, 89, 18, -75 },
+    { 64, -64, -64, 64, 64, -64, -64, 64 },
+    { 50, -89, 18, 75, -75, -18, 89, -50 },
+    { 36, -83, 83, -36, -36, 83, -83, 36 },
+    { 18, -50, 75, -89, 89, -75, 50, -18 }
+};
+
+const int16_t g_t16[16][16] =
+{
+    { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 },
+    { 90, 87, 80, 70, 57, 43, 25,  9, -9, -25, -43, -57, -70, -80, -87, -90 },
+    { 89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89 },
+    { 87, 57,  9, -43, -80, -90, -70, -25, 25, 70, 90, 80, 43, -9, -57, -87 },
+    { 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83 },
+    { 80,  9, -70, -87, -25, 57, 90, 43, -43, -90, -57, 25, 87, 70, -9, -80 },
+    { 75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75 },
+    { 70, -43, -87,  9, 90, 25, -80, -57, 57, 80, -25, -90, -9, 87, 43, -70 },
+    { 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64 },
+    { 57, -80, -25, 90, -9, -87, 43, 70, -70, -43, 87,  9, -90, 25, 80, -57 },
+    { 50, -89, 18, 75, -75, -18, 89, -50, -50, 89, -18, -75, 75, 18, -89, 50 },
+    { 43, -90, 57, 25, -87, 70,  9, -80, 80, -9, -70, 87, -25, -57, 90, -43 },
+    { 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36 },
+    { 25, -70, 90, -80, 43,  9, -57, 87, -87, 57, -9, -43, 80, -90, 70, -25 },
+    { 18, -50, 75, -89, 89, -75, 50, -18, -18, 50, -75, 89, -89, 75, -50, 18 },
+    {  9, -25, 43, -57, 70, -80, 87, -90, 90, -87, 80, -70, 57, -43, 25, -9 }
+};
+
+const int16_t g_t32[32][32] =
+{
+    { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 },
+    { 90, 90, 88, 85, 82, 78, 73, 67, 61, 54, 46, 38, 31, 22, 13,  4, -4, -13, -22, -31, -38, -46, -54, -61, -67, -73, -78, -82, -85, -88, -90, -90 },
+    { 90, 87, 80, 70, 57, 43, 25,  9, -9, -25, -43, -57, -70, -80, -87, -90, -90, -87, -80, -70, -57, -43, -25, -9,  9, 25, 43, 57, 70, 80, 87, 90 },
+    { 90, 82, 67, 46, 22, -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13, 13, 38, 61, 78, 88, 90, 85, 73, 54, 31,  4, -22, -46, -67, -82, -90 },
+    { 89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89, 89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89 },
+    { 88, 67, 31, -13, -54, -82, -90, -78, -46, -4, 38, 73, 90, 85, 61, 22, -22, -61, -85, -90, -73, -38,  4, 46, 78, 90, 82, 54, 13, -31, -67, -88 },
+    { 87, 57,  9, -43, -80, -90, -70, -25, 25, 70, 90, 80, 43, -9, -57, -87, -87, -57, -9, 43, 80, 90, 70, 25, -25, -70, -90, -80, -43,  9, 57, 87 },
+    { 85, 46, -13, -67, -90, -73, -22, 38, 82, 88, 54, -4, -61, -90, -78, -31, 31, 78, 90, 61,  4, -54, -88, -82, -38, 22, 73, 90, 67, 13, -46, -85 },
+    { 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83 },
+    { 82, 22, -54, -90, -61, 13, 78, 85, 31, -46, -90, -67,  4, 73, 88, 38, -38, -88, -73, -4, 67, 90, 46, -31, -85, -78, -13, 61, 90, 54, -22, -82 },
+    { 80,  9, -70, -87, -25, 57, 90, 43, -43, -90, -57, 25, 87, 70, -9, -80, -80, -9, 70, 87, 25, -57, -90, -43, 43, 90, 57, -25, -87, -70,  9, 80 },
+    { 78, -4, -82, -73, 13, 85, 67, -22, -88, -61, 31, 90, 54, -38, -90, -46, 46, 90, 38, -54, -90, -31, 61, 88, 22, -67, -85, -13, 73, 82,  4, -78 },
+    { 75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75, 75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75 },
+    { 73, -31, -90, -22, 78, 67, -38, -90, -13, 82, 61, -46, -88, -4, 85, 54, -54, -85,  4, 88, 46, -61, -82, 13, 90, 38, -67, -78, 22, 90, 31, -73 },
+    { 70, -43, -87,  9, 90, 25, -80, -57, 57, 80, -25, -90, -9, 87, 43, -70, -70, 43, 87, -9, -90, -25, 80, 57, -57, -80, 25, 90,  9, -87, -43, 70 },
+    { 67, -54, -78, 38, 85, -22, -90,  4, 90, 13, -88, -31, 82, 46, -73, -61, 61, 73, -46, -82, 31, 88, -13, -90, -4, 90, 22, -85, -38, 78, 54, -67 },
+    { 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64 },
+    { 61, -73, -46, 82, 31, -88, -13, 90, -4, -90, 22, 85, -38, -78, 54, 67, -67, -54, 78, 38, -85, -22, 90,  4, -90, 13, 88, -31, -82, 46, 73, -61 },
+    { 57, -80, -25, 90, -9, -87, 43, 70, -70, -43, 87,  9, -90, 25, 80, -57, -57, 80, 25, -90,  9, 87, -43, -70, 70, 43, -87, -9, 90, -25, -80, 57 },
+    { 54, -85, -4, 88, -46, -61, 82, 13, -90, 38, 67, -78, -22, 90, -31, -73, 73, 31, -90, 22, 78, -67, -38, 90, -13, -82, 61, 46, -88,  4, 85, -54 },
+    { 50, -89, 18, 75, -75, -18, 89, -50, -50, 89, -18, -75, 75, 18, -89, 50, 50, -89, 18, 75, -75, -18, 89, -50, -50, 89, -18, -75, 75, 18, -89, 50 },
+    { 46, -90, 38, 54, -90, 31, 61, -88, 22, 67, -85, 13, 73, -82,  4, 78, -78, -4, 82, -73, -13, 85, -67, -22, 88, -61, -31, 90, -54, -38, 90, -46 },
+    { 43, -90, 57, 25, -87, 70,  9, -80, 80, -9, -70, 87, -25, -57, 90, -43, -43, 90, -57, -25, 87, -70, -9, 80, -80,  9, 70, -87, 25, 57, -90, 43 },
+    { 38, -88, 73, -4, -67, 90, -46, -31, 85, -78, 13, 61, -90, 54, 22, -82, 82, -22, -54, 90, -61, -13, 78, -85, 31, 46, -90, 67,  4, -73, 88, -38 },
+    { 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36 },
+    { 31, -78, 90, -61,  4, 54, -88, 82, -38, -22, 73, -90, 67, -13, -46, 85, -85, 46, 13, -67, 90, -73, 22, 38, -82, 88, -54, -4, 61, -90, 78, -31 },
+    { 25, -70, 90, -80, 43,  9, -57, 87, -87, 57, -9, -43, 80, -90, 70, -25, -25, 70, -90, 80, -43, -9, 57, -87, 87, -57,  9, 43, -80, 90, -70, 25 },
+    { 22, -61, 85, -90, 73, -38, -4, 46, -78, 90, -82, 54, -13, -31, 67, -88, 88, -67, 31, 13, -54, 82, -90, 78, -46,  4, 38, -73, 90, -85, 61, -22 },
+    { 18, -50, 75, -89, 89, -75, 50, -18, -18, 50, -75, 89, -89, 75, -50, 18, 18, -50, 75, -89, 89, -75, 50, -18, -18, 50, -75, 89, -89, 75, -50, 18 },
+    { 13, -38, 61, -78, 88, -90, 85, -73, 54, -31,  4, 22, -46, 67, -82, 90, -90, 82, -67, 46, -22, -4, 31, -54, 73, -85, 90, -88, 78, -61, 38, -13 },
+    {  9, -25, 43, -57, 70, -80, 87, -90, 90, -87, 80, -70, 57, -43, 25, -9, -9, 25, -43, 57, -70, 80, -87, 90, -90, 87, -80, 70, -57, 43, -25,  9 },
+    {  4, -13, 22, -31, 38, -46, 54, -61, 67, -73, 78, -82, 85, -88, 90, -90, 90, -90, 88, -85, 82, -78, 73, -67, 61, -54, 46, -38, 31, -22, 13, -4 }
+};
+
+const uint8_t g_chromaScale[ChromaQPMappingTableSize] =
+{
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+    51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51
+};
+
+const uint8_t g_chroma422IntraAngleMappingTable[AngleMapping422TableSize] =
+{ 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31, DM_CHROMA_IDX };
+
+const uint8_t g_log2Size[MAX_CU_SIZE + 1] =
+{
+    0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+    6
+};
+
+const uint16_t g_scan2x2[][2*2] =
+{
+    { 0, 2, 1, 3 },
+    { 0, 1, 2, 3 },
+};
+
+const uint16_t g_scan8x8[NUM_SCAN_TYPE][8 * 8] =
+{
+    { 0,   8,  1, 16,  9,  2, 24, 17, 10,  3, 25, 18, 11, 26, 19, 27, 32, 40, 33, 48, 41, 34, 56, 49, 42, 35, 57, 50, 43, 58, 51, 59,
+      4,  12,  5, 20, 13,  6, 28, 21, 14,  7, 29, 22, 15, 30, 23, 31, 36, 44, 37, 52, 45, 38, 60, 53, 46, 39, 61, 54, 47, 62, 55, 63 },
+    { 0,   1,  2,  3,  8,  9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27,  4,  5,  6,  7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31,
+      32, 33, 34, 35, 40, 41, 42, 43, 48, 49, 50, 51, 56, 57, 58, 59, 36, 37, 38, 39, 44, 45, 46, 47, 52, 53, 54, 55, 60, 61, 62, 63 },
+    { 0,   8, 16, 24,  1,  9, 17, 25,  2, 10, 18, 26,  3, 11, 19, 27, 32, 40, 48, 56, 33, 41, 49, 57, 34, 42, 50, 58, 35, 43, 51, 59,
+      4,  12, 20, 28,  5, 13, 21, 29,  6, 14, 22, 30,  7, 15, 23, 31, 36, 44, 52, 60, 37, 45, 53, 61, 38, 46, 54, 62, 39, 47, 55, 63 }
+};
+
+ALIGN_VAR_16(const uint16_t, g_scan4x4[NUM_SCAN_TYPE + 1][4 * 4]) =
+{
+    { 0,  4,  1,  8,  5,  2, 12,  9,  6,  3, 13, 10,  7, 14, 11, 15 },
+    { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15 },
+    { 0,  4,  8, 12,  1,  5,  9, 13,  2,  6, 10, 14,  3,  7, 11, 15 },
+    { 0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 }
+};
+
+const uint16_t g_scan16x16[16 * 16] =
+{
+    0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 49, 34, 19, 50, 35, 51,
+    64, 80, 65, 96, 81, 66, 112, 97, 82, 67, 113, 98, 83, 114, 99, 115,
+    4, 20, 5, 36, 21, 6, 52, 37, 22, 7, 53, 38, 23, 54, 39, 55,
+    128, 144, 129, 160, 145, 130, 176, 161, 146, 131, 177, 162, 147, 178, 163, 179,
+    68, 84, 69, 100, 85, 70, 116, 101, 86, 71, 117, 102, 87, 118, 103, 119,
+    8, 24, 9, 40, 25, 10, 56, 41, 26, 11, 57, 42, 27, 58, 43, 59,
+    192,208, 193,224,209, 194,240,225,210, 195,241,226,211,242,227,243,
+    132, 148, 133, 164, 149, 134, 180, 165, 150, 135, 181, 166, 151, 182, 167, 183,
+    72, 88, 73, 104, 89, 74, 120, 105, 90, 75, 121, 106, 91, 122, 107, 123,
+    12, 28, 13, 44, 29, 14, 60, 45, 30, 15, 61, 46, 31, 62, 47, 63,
+    196,212, 197,228,213, 198,244,229,214, 199,245,230,215,246,231,247,
+    136, 152, 137, 168, 153, 138, 184, 169, 154, 139, 185, 170, 155, 186, 171, 187,
+    76, 92, 77, 108, 93, 78, 124, 109, 94, 79, 125, 110, 95, 126, 111, 127,
+    200,216,201,232,217,202,248,233,218,203,249,234,219,250,235,251,
+    140, 156, 141, 172, 157, 142, 188, 173, 158, 143, 189, 174, 159, 190, 175, 191,
+    204,220,205,236,221,206,252,237,222,207,253,238,223,254,239,255
+};
+
+const uint16_t g_scan8x8diag[8 * 8] =
+{
+    0,   8,  1, 16,  9,  2, 24, 17,
+    10,  3, 32, 25, 18, 11,  4, 40,
+    33, 26, 19, 12,  5, 48, 41, 34,
+    27, 20, 13,  6, 56, 49, 42, 35,
+    28, 21, 14,  7, 57, 50, 43, 36,
+    29, 22, 15, 58, 51, 44, 37, 30,
+    23, 59, 52, 45, 38, 31, 60, 53,
+    46, 39, 61, 54, 47, 62, 55, 63
+};
+
+const uint16_t g_scan32x32[32 * 32] =
+{
+    0,32,1,64,33,2,96,65,34,3,97,66,35,98,67,99,128,160,129,192,161,130,224,193,162,131,225,194,163,226,195,227,
+    4,36,5,68,37,6,100,69,38,7,101,70,39,102,71,103,256,288,257,320,289,258,352,321,290,259,353,322,291,354,323,355,
+    132,164,133,196,165,134,228,197,166,135,229,198,167,230,199,231,8,40,9,72,41,10,104,73,42,11,105,74,43,106,75,107,
+    384,416,385,448,417,386,480,449,418,387,481,450,419,482,451,483,260,292,261,324,293,262,356,325,294,263,357,326,295,358,327,359,
+    136,168,137,200,169,138,232,201,170,139,233,202,171,234,203,235,12,44,13,76,45,14,108,77,46,15,109,78,47,110,79,111,
+    512,544,513,576,545,514,608,577,546,515,609,578,547,610,579,611,388,420,389,452,421,390,484,453,422,391,485,454,423,486,455,487,
+    264,296,265,328,297,266,360,329,298,267,361,330,299,362,331,363,140,172,141,204,173,142,236,205,174,143,237,206,175,238,207,239,
+    16,48,17,80,49,18,112,81,50,19,113,82,51,114,83,115,640,672,641,704,673,642,736,705,674,643,737,706,675,738,707,739,
+    516,548,517,580,549,518,612,581,550,519,613,582,551,614,583,615,392,424,393,456,425,394,488,457,426,395,489,458,427,490,459,491,
+    268,300,269,332,301,270,364,333,302,271,365,334,303,366,335,367,144,176,145,208,177,146,240,209,178,147,241,210,179,242,211,243,
+    20,52,21,84,53,22,116,85,54,23,117,86,55,118,87,119,768,800,769,832,801,770,864,833,802,771,865,834,803,866,835,867,
+    644,676,645,708,677,646,740,709,678,647,741,710,679,742,711,743,520,552,521,584,553,522,616,585,554,523,617,586,555,618,587,619,
+    396,428,397,460,429,398,492,461,430,399,493,462,431,494,463,495,272,304,273,336,305,274,368,337,306,275,369,338,307,370,339,371,
+    148,180,149,212,181,150,244,213,182,151,245,214,183,246,215,247,24,56,25,88,57,26,120,89,58,27,121,90,59,122,91,123,
+    896,928,897,960,929,898,992,961,930,899,993,962,931,994,963,995,772,804,773,836,805,774,868,837,806,775,869,838,807,870,839,871,
+    648,680,649,712,681,650,744,713,682,651,745,714,683,746,715,747,524,556,525,588,557,526,620,589,558,527,621,590,559,622,591,623,
+    400,432,401,464,433,402,496,465,434,403,497,466,435,498,467,499,276,308,277,340,309,278,372,341,310,279,373,342,311,374,343,375,
+    152,184,153,216,185,154,248,217,186,155,249,218,187,250,219,251,28,60,29,92,61,30,124,93,62,31,125,94,63,126,95,127,
+    900,932,901,964,933,902,996,965,934,903,997,966,935,998,967,999,776,808,777,840,809,778,872,841,810,779,873,842,811,874,843,875,
+    652,684,653,716,685,654,748,717,686,655,749,718,687,750,719,751,528,560,529,592,561,530,624,593,562,531,625,594,563,626,595,627,
+    404,436,405,468,437,406,500,469,438,407,501,470,439,502,471,503,280,312,281,344,313,282,376,345,314,283,377,346,315,378,347,379,
+    156,188,157,220,189,158,252,221,190,159,253,222,191,254,223,255,904,936,905,968,937,906,1000,969,938,907,1001,970,939,1002,971,1003,
+    780,812,781,844,813,782,876,845,814,783,877,846,815,878,847,879,656,688,657,720,689,658,752,721,690,659,753,722,691,754,723,755,
+    532,564,533,596,565,534,628,597,566,535,629,598,567,630,599,631,408,440,409,472,441,410,504,473,442,411,505,474,443,506,475,507,
+    284,316,285,348,317,286,380,349,318,287,381,350,319,382,351,383,908,940,909,972,941,910,1004,973,942,911,1005,974,943,1006,975,1007,
+    784,816,785,848,817,786,880,849,818,787,881,850,819,882,851,883,660,692,661,724,693,662,756,725,694,663,757,726,695,758,727,759,
+    536,568,537,600,569,538,632,601,570,539,633,602,571,634,603,635,412,444,413,476,445,414,508,477,446,415,509,478,447,510,479,511,
+    912,944,913,976,945,914,1008,977,946,915,1009,978,947,1010,979,1011,788,820,789,852,821,790,884,853,822,791,885,854,823,886,855,887,
+    664,696,665,728,697,666,760,729,698,667,761,730,699,762,731,763,540,572,541,604,573,542,636,605,574,543,637,606,575,638,607,639,
+    916,948,917,980,949,918,1012,981,950,919,1013,982,951,1014,983,1015,792,824,793,856,825,794,888,857,826,795,889,858,827,890,859,891,
+    668,700,669,732,701,670,764,733,702,671,765,734,703,766,735,767,920,952,921,984,953,922,1016,985,954,923,1017,986,955,1018,987,1019,
+    796,828,797,860,829,798,892,861,830,799,893,862,831,894,863,895,924,956,925,988,957,926,1020,989,958,927,1021,990,959,1022,991,1023
+};
+
+const uint16_t* const g_scanOrder[NUM_SCAN_TYPE][NUM_SCAN_SIZE] =
+{
+    { g_scan4x4[0], g_scan8x8[0], g_scan16x16, g_scan32x32 },
+    { g_scan4x4[1], g_scan8x8[1], g_scan16x16, g_scan32x32 },
+    { g_scan4x4[2], g_scan8x8[2], g_scan16x16, g_scan32x32 }
+};
+
+const uint16_t* const g_scanOrderCG[NUM_SCAN_TYPE][NUM_SCAN_SIZE] =
+{
+    { g_scan4x4[0], g_scan2x2[0], g_scan4x4[0], g_scan8x8diag },
+    { g_scan4x4[1], g_scan2x2[1], g_scan4x4[0], g_scan8x8diag },
+    { g_scan4x4[2], g_scan2x2[0], g_scan4x4[0], g_scan8x8diag }
+};
+
+// Table used for encoding the last coefficient position. The index is the position.
+// The low 4 bits are the number of "1" in the prefix and the high 4 bits are the number
+// of bits in the suffix.
+const uint8_t g_lastCoeffTable[32] =
+{
+    0x00, 0x01, 0x02, 0x03, 0x14, 0x14, 0x15, 0x15,
+    0x26, 0x26, 0x26, 0x26, 0x27, 0x27, 0x27, 0x27,
+    0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+    0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+};
+
+// Rice parameters for absolute transform levels
+const uint8_t g_goRiceRange[5] = { 7, 14, 26, 46, 78 };
+
+const uint8_t g_lpsTable[64][4] =
+{
+    { 128, 176, 208, 240 },
+    { 128, 167, 197, 227 },
+    { 128, 158, 187, 216 },
+    { 123, 150, 178, 205 },
+    { 116, 142, 169, 195 },
+    { 111, 135, 160, 185 },
+    { 105, 128, 152, 175 },
+    { 100, 122, 144, 166 },
+    {  95, 116, 137, 158 },
+    {  90, 110, 130, 150 },
+    {  85, 104, 123, 142 },
+    {  81,  99, 117, 135 },
+    {  77,  94, 111, 128 },
+    {  73,  89, 105, 122 },
+    {  69,  85, 100, 116 },
+    {  66,  80,  95, 110 },
+    {  62,  76,  90, 104 },
+    {  59,  72,  86,  99 },
+    {  56,  69,  81,  94 },
+    {  53,  65,  77,  89 },
+    {  51,  62,  73,  85 },
+    {  48,  59,  69,  80 },
+    {  46,  56,  66,  76 },
+    {  43,  53,  63,  72 },
+    {  41,  50,  59,  69 },
+    {  39,  48,  56,  65 },
+    {  37,  45,  54,  62 },
+    {  35,  43,  51,  59 },
+    {  33,  41,  48,  56 },
+    {  32,  39,  46,  53 },
+    {  30,  37,  43,  50 },
+    {  29,  35,  41,  48 },
+    {  27,  33,  39,  45 },
+    {  26,  31,  37,  43 },
+    {  24,  30,  35,  41 },
+    {  23,  28,  33,  39 },
+    {  22,  27,  32,  37 },
+    {  21,  26,  30,  35 },
+    {  20,  24,  29,  33 },
+    {  19,  23,  27,  31 },
+    {  18,  22,  26,  30 },
+    {  17,  21,  25,  28 },
+    {  16,  20,  23,  27 },
+    {  15,  19,  22,  25 },
+    {  14,  18,  21,  24 },
+    {  14,  17,  20,  23 },
+    {  13,  16,  19,  22 },
+    {  12,  15,  18,  21 },
+    {  12,  14,  17,  20 },
+    {  11,  14,  16,  19 },
+    {  11,  13,  15,  18 },
+    {  10,  12,  15,  17 },
+    {  10,  12,  14,  16 },
+    {   9,  11,  13,  15 },
+    {   9,  11,  12,  14 },
+    {   8,  10,  12,  14 },
+    {   8,   9,  11,  13 },
+    {   7,   9,  11,  12 },
+    {   7,   9,  10,  12 },
+    {   7,   8,  10,  11 },
+    {   6,   8,   9,  11 },
+    {   6,   7,   9,  10 },
+    {   6,   7,   8,   9 },
+    {   2,   2,   2,   2 }
+};
+
+const uint8_t x265_exp2_lut[64] =
+{
+    0,  3,  6,  8,  11, 14,  17,  20,  23,  26,  29,  32,  36,  39,  42,  45,
+    48,  52,  55,  58,  62,  65,  69,  72,  76,  80,  83,  87,  91,  94,  98,  102,
+    106,  110,  114,  118,  122,  126,  130,  135,  139,  143,  147,  152,  156,  161,  165,  170,
+    175,  179,  184,  189,  194,  198,  203,  208,  214,  219,  224,  229,  234,  240,  245,  250
+};
+
+/* bFilter = g_intraFilterFlags[dir] & trSize */
+const uint8_t g_intraFilterFlags[NUM_INTRA_MODE] =
+{
+    0x38, 0x00,
+    0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x00, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+    0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x00, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+    0x38, 
+};
+
+/* Contains how much to increment shared depth buffer for different ctu sizes to get next best depth
+ * here, depth 0 = 64x64, depth 1 = 32x32, depth 2 = 16x16 and depth 3 = 8x8
+ * if ctu = 64, depth buffer size is 256 combination of depth values 0, 1, 2, 3
+ * if ctu = 32, depth buffer size is 64 combination of depth values 1, 2, 3
+ * if ctu = 16, depth buffer size is 16 combination of depth values 2, 3 */
+const uint32_t g_depthInc[3][4] =
+{
+    { 16,  4,  0, 0},
+    { 64, 16,  4, 1},
+    {256, 64, 16, 4}
+};
+
+/* g_depthScanIdx [y][x] */
+const uint32_t g_depthScanIdx[8][8] =
+{
+    {   0,   1,   4,   5,  16,  17,  20,  21,  },
+    {   2,   3,   6,   7,  18,  19,  22,  23,  },
+    {   8,   9,  12,  13,  24,  25,  28,  29,  },
+    {  10,  11,  14,  15,  26,  27,  30,  31,  },
+    {  32,  33,  36,  37,  48,  49,  52,  53,  },
+    {  34,  35,  38,  39,  50,  51,  54,  55,  },
+    {  40,  41,  44,  45,  56,  57,  60,  61,  },
+    {  42,  43,  46,  47,  58,  59,  62,  63,  }
+};
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/constants.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,103 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_CONSTANTS_H
+#define X265_CONSTANTS_H
+
+#include "common.h"
+
+namespace X265_NS {
+// private namespace
+
+extern int g_ctuSizeConfigured;
+
+void initZscanToRaster(uint32_t maxFullDepth, uint32_t depth, uint32_t startVal, uint32_t*& curIdx);
+void initRasterToZscan(uint32_t maxFullDepth);
+
+extern double x265_lambda_tab[QP_MAX_MAX + 1];
+extern double x265_lambda2_tab[QP_MAX_MAX + 1];
+extern const uint16_t x265_chroma_lambda2_offset_tab[MAX_CHROMA_LAMBDA_OFFSET + 1];
+
+enum { ChromaQPMappingTableSize = 70 };
+enum { AngleMapping422TableSize = 36 };
+
+extern const uint8_t g_chromaScale[ChromaQPMappingTableSize];
+extern const uint8_t g_chroma422IntraAngleMappingTable[AngleMapping422TableSize];
+
+// flexible conversion from relative to absolute index
+extern uint32_t g_zscanToRaster[MAX_NUM_PARTITIONS];
+extern uint32_t g_rasterToZscan[MAX_NUM_PARTITIONS];
+
+// conversion of partition index to picture pel position
+extern const uint8_t g_zscanToPelX[MAX_NUM_PARTITIONS];
+extern const uint8_t g_zscanToPelY[MAX_NUM_PARTITIONS];
+extern const uint8_t g_log2Size[MAX_CU_SIZE + 1]; // from size to log2(size)
+
+// global variable (CTU width/height, max. CU depth)
+extern uint32_t g_maxLog2CUSize;
+extern uint32_t g_maxCUSize;
+extern uint32_t g_maxCUDepth;
+extern uint32_t g_unitSizeDepth; // Depth at which 4x4 unit occurs from max CU size
+
+extern const int16_t g_t4[4][4];
+extern const int16_t g_t8[8][8];
+extern const int16_t g_t16[16][16];
+extern const int16_t g_t32[32][32];
+
+// Subpel interpolation defines and constants
+
+#define NTAPS_LUMA        8                            // Number of taps for luma
+#define NTAPS_CHROMA      4                            // Number of taps for chroma
+#define IF_INTERNAL_PREC 14                            // Number of bits for internal precision
+#define IF_FILTER_PREC    6                            // Log2 of sum of filter taps
+#define IF_INTERNAL_OFFS (1 << (IF_INTERNAL_PREC - 1)) // Offset used internally
+#define SLFASE_CONSTANT  0x5f4e4a53
+
+extern const int16_t g_lumaFilter[4][NTAPS_LUMA];      // Luma filter taps
+extern const int16_t g_chromaFilter[8][NTAPS_CHROMA];  // Chroma filter taps
+
+// Scanning order & context mapping table
+
+#define NUM_SCAN_SIZE 4
+
+extern const uint16_t* const g_scanOrder[NUM_SCAN_TYPE][NUM_SCAN_SIZE];
+extern const uint16_t* const g_scanOrderCG[NUM_SCAN_TYPE][NUM_SCAN_SIZE];
+extern const uint16_t g_scan8x8diag[8 * 8];
+extern const uint16_t g_scan4x4[NUM_SCAN_TYPE + 1][4 * 4];  // +1 for safe buffer area for codeCoeffNxN assembly optimize, there have up to 15 bytes beyond bound read
+
+extern const uint8_t g_lastCoeffTable[32];
+extern const uint8_t g_goRiceRange[5]; // maximum value coded with Rice codes
+
+// CABAC tables
+extern const uint8_t g_lpsTable[64][4];
+extern const uint8_t x265_exp2_lut[64];
+
+// Intra tables
+extern const uint8_t g_intraFilterFlags[NUM_INTRA_MODE];
+
+extern const uint32_t g_depthInc[3][4];
+extern const uint32_t g_depthScanIdx[8][8];
+
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/contexts.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,311 @@
+/*****************************************************************************
+* Copyright (C) 2015 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_CONTEXTS_H
+#define X265_CONTEXTS_H
+
+#include "common.h"
+
+#define NUM_SPLIT_FLAG_CTX          3   // number of context models for split flag
+#define NUM_SKIP_FLAG_CTX           3   // number of context models for skip flag
+
+#define NUM_MERGE_FLAG_EXT_CTX      1   // number of context models for merge flag of merge extended
+#define NUM_MERGE_IDX_EXT_CTX       1   // number of context models for merge index of merge extended
+
+#define NUM_PART_SIZE_CTX           4   // number of context models for partition size
+#define NUM_PRED_MODE_CTX           1   // number of context models for prediction mode
+
+#define NUM_ADI_CTX                 1   // number of context models for intra prediction
+
+#define NUM_CHROMA_PRED_CTX         2   // number of context models for intra prediction (chroma)
+#define NUM_INTER_DIR_CTX           5   // number of context models for inter prediction direction
+#define NUM_MV_RES_CTX              2   // number of context models for motion vector difference
+
+#define NUM_REF_NO_CTX              2   // number of context models for reference index
+#define NUM_TRANS_SUBDIV_FLAG_CTX   3   // number of context models for transform subdivision flags
+#define NUM_QT_CBF_CTX              7   // number of context models for QT CBF
+#define NUM_QT_ROOT_CBF_CTX         1   // number of context models for QT ROOT CBF
+#define NUM_DELTA_QP_CTX            3   // number of context models for dQP
+
+#define NUM_SIG_CG_FLAG_CTX         2   // number of context models for MULTI_LEVEL_SIGNIFICANCE
+
+#define NUM_SIG_FLAG_CTX            42  // number of context models for sig flag
+#define NUM_SIG_FLAG_CTX_LUMA       27  // number of context models for luma sig flag
+#define NUM_SIG_FLAG_CTX_CHROMA     15  // number of context models for chroma sig flag
+
+#define NUM_CTX_LAST_FLAG_XY        18  // number of context models for last coefficient position
+#define NUM_CTX_LAST_FLAG_XY_LUMA   15  // number of context models for last coefficient position of luma
+#define NUM_CTX_LAST_FLAG_XY_CHROMA 3   // number of context models for last coefficient position of chroma
+
+#define NUM_ONE_FLAG_CTX            24  // number of context models for greater than 1 flag
+#define NUM_ONE_FLAG_CTX_LUMA       16  // number of context models for greater than 1 flag of luma
+#define NUM_ONE_FLAG_CTX_CHROMA     8   // number of context models for greater than 1 flag of chroma
+#define NUM_ABS_FLAG_CTX            6   // number of context models for greater than 2 flag
+#define NUM_ABS_FLAG_CTX_LUMA       4   // number of context models for greater than 2 flag of luma
+#define NUM_ABS_FLAG_CTX_CHROMA     2   // number of context models for greater than 2 flag of chroma
+
+#define NUM_MVP_IDX_CTX             1   // number of context models for MVP index
+
+#define NUM_SAO_MERGE_FLAG_CTX      1   // number of context models for SAO merge flags
+#define NUM_SAO_TYPE_IDX_CTX        1   // number of context models for SAO type index
+
+#define NUM_TRANSFORMSKIP_FLAG_CTX  1   // number of context models for transform skipping
+#define NUM_TQUANT_BYPASS_FLAG_CTX  1
+#define CNU                         154 // dummy initialization value for unused context models 'Context model Not Used'
+
+// Offset for context
+#define OFF_SPLIT_FLAG_CTX         (0)
+#define OFF_SKIP_FLAG_CTX          (OFF_SPLIT_FLAG_CTX         +     NUM_SPLIT_FLAG_CTX)
+#define OFF_MERGE_FLAG_EXT_CTX     (OFF_SKIP_FLAG_CTX          +     NUM_SKIP_FLAG_CTX)
+#define OFF_MERGE_IDX_EXT_CTX      (OFF_MERGE_FLAG_EXT_CTX     +     NUM_MERGE_FLAG_EXT_CTX)
+#define OFF_PART_SIZE_CTX          (OFF_MERGE_IDX_EXT_CTX      +     NUM_MERGE_IDX_EXT_CTX)
+#define OFF_PRED_MODE_CTX          (OFF_PART_SIZE_CTX          +     NUM_PART_SIZE_CTX)
+#define OFF_ADI_CTX                (OFF_PRED_MODE_CTX          +     NUM_PRED_MODE_CTX)
+#define OFF_CHROMA_PRED_CTX        (OFF_ADI_CTX                +     NUM_ADI_CTX)
+#define OFF_DELTA_QP_CTX           (OFF_CHROMA_PRED_CTX        +     NUM_CHROMA_PRED_CTX)
+#define OFF_INTER_DIR_CTX          (OFF_DELTA_QP_CTX           +     NUM_DELTA_QP_CTX)
+#define OFF_REF_NO_CTX             (OFF_INTER_DIR_CTX          +     NUM_INTER_DIR_CTX)
+#define OFF_MV_RES_CTX             (OFF_REF_NO_CTX             +     NUM_REF_NO_CTX)
+#define OFF_QT_CBF_CTX             (OFF_MV_RES_CTX             +     NUM_MV_RES_CTX)
+#define OFF_TRANS_SUBDIV_FLAG_CTX  (OFF_QT_CBF_CTX             +     NUM_QT_CBF_CTX)
+#define OFF_QT_ROOT_CBF_CTX        (OFF_TRANS_SUBDIV_FLAG_CTX  +     NUM_TRANS_SUBDIV_FLAG_CTX)
+#define OFF_SIG_CG_FLAG_CTX        (OFF_QT_ROOT_CBF_CTX        +     NUM_QT_ROOT_CBF_CTX)
+#define OFF_SIG_FLAG_CTX           (OFF_SIG_CG_FLAG_CTX        + 2 * NUM_SIG_CG_FLAG_CTX)
+#define OFF_CTX_LAST_FLAG_X        (OFF_SIG_FLAG_CTX           +     NUM_SIG_FLAG_CTX)
+#define OFF_CTX_LAST_FLAG_Y        (OFF_CTX_LAST_FLAG_X        +     NUM_CTX_LAST_FLAG_XY)
+#define OFF_ONE_FLAG_CTX           (OFF_CTX_LAST_FLAG_Y        +     NUM_CTX_LAST_FLAG_XY)
+#define OFF_ABS_FLAG_CTX           (OFF_ONE_FLAG_CTX           +     NUM_ONE_FLAG_CTX)
+#define OFF_MVP_IDX_CTX            (OFF_ABS_FLAG_CTX           +     NUM_ABS_FLAG_CTX)
+#define OFF_SAO_MERGE_FLAG_CTX     (OFF_MVP_IDX_CTX            +     NUM_MVP_IDX_CTX)
+#define OFF_SAO_TYPE_IDX_CTX       (OFF_SAO_MERGE_FLAG_CTX     +     NUM_SAO_MERGE_FLAG_CTX)
+#define OFF_TRANSFORMSKIP_FLAG_CTX (OFF_SAO_TYPE_IDX_CTX       +     NUM_SAO_TYPE_IDX_CTX)
+#define OFF_TQUANT_BYPASS_FLAG_CTX (OFF_TRANSFORMSKIP_FLAG_CTX + 2 * NUM_TRANSFORMSKIP_FLAG_CTX)
+#define MAX_OFF_CTX_MOD            (OFF_TQUANT_BYPASS_FLAG_CTX +     NUM_TQUANT_BYPASS_FLAG_CTX)
+
+extern "C" const uint32_t PFX(entropyStateBits)[128];
+
+namespace X265_NS {
+// private namespace
+
+extern const uint32_t g_entropyBits[128];
+extern const uint8_t g_nextState[128][2];
+
+#define sbacGetMps(S)            ((S) & 1)
+#define sbacGetState(S)          ((S) >> 1)
+#define sbacNext(S, V)           (g_nextState[(S)][(V)])
+#define sbacGetEntropyBits(S, V) (g_entropyBits[(S) ^ (V)])
+#define sbacGetEntropyBitsTrm(V) (g_entropyBits[126 ^ (V)])
+
+#define MAX_NUM_CHANNEL_TYPE     2
+
+static const uint32_t ctxCbf[3][5] = { { 1, 0, 0, 0, 0 }, { 2, 3, 4, 5, 6 }, { 2, 3, 4, 5, 6 } };
+static const uint32_t significanceMapContextSetStart[MAX_NUM_CHANNEL_TYPE][3] = { { 0,  9, 21 }, { 0,  9, 12 } };
+static const uint32_t significanceMapContextSetSize[MAX_NUM_CHANNEL_TYPE][3]  = { { 9, 12,  6 }, { 9,  3,  3 } };
+static const uint32_t nonDiagonalScan8x8ContextOffset[MAX_NUM_CHANNEL_TYPE]   = {  6, 0  };
+static const uint32_t notFirstGroupNeighbourhoodContextOffset[MAX_NUM_CHANNEL_TYPE] = { 3, 0 };
+
+// initial probability for cu_transquant_bypass flag
+static const uint8_t INIT_CU_TRANSQUANT_BYPASS_FLAG[3][NUM_TQUANT_BYPASS_FLAG_CTX] =
+{
+    { 154 },
+    { 154 },
+    { 154 },
+};
+
+// initial probability for split flag
+static const uint8_t INIT_SPLIT_FLAG[3][NUM_SPLIT_FLAG_CTX] =
+{
+    { 107,  139,  126, },
+    { 107,  139,  126, },
+    { 139,  141,  157, },
+};
+
+static const uint8_t INIT_SKIP_FLAG[3][NUM_SKIP_FLAG_CTX] =
+{
+    { 197,  185,  201, },
+    { 197,  185,  201, },
+    { CNU,  CNU,  CNU, },
+};
+
+static const uint8_t INIT_MERGE_FLAG_EXT[3][NUM_MERGE_FLAG_EXT_CTX] =
+{
+    { 154, },
+    { 110, },
+    { CNU, },
+};
+
+static const uint8_t INIT_MERGE_IDX_EXT[3][NUM_MERGE_IDX_EXT_CTX] =
+{
+    { 137, },
+    { 122, },
+    { CNU, },
+};
+
+static const uint8_t INIT_PART_SIZE[3][NUM_PART_SIZE_CTX] =
+{
+    { 154,  139,  154, 154 },
+    { 154,  139,  154, 154 },
+    { 184,  CNU,  CNU, CNU },
+};
+
+static const uint8_t INIT_PRED_MODE[3][NUM_PRED_MODE_CTX] =
+{
+    { 134, },
+    { 149, },
+    { CNU, },
+};
+
+static const uint8_t INIT_INTRA_PRED_MODE[3][NUM_ADI_CTX] =
+{
+    { 183, },
+    { 154, },
+    { 184, },
+};
+
+static const uint8_t INIT_CHROMA_PRED_MODE[3][NUM_CHROMA_PRED_CTX] =
+{
+    { 152,  139, },
+    { 152,  139, },
+    {  63,  139, },
+};
+
+static const uint8_t INIT_INTER_DIR[3][NUM_INTER_DIR_CTX] =
+{
+    {  95,   79,   63,   31,  31, },
+    {  95,   79,   63,   31,  31, },
+    { CNU,  CNU,  CNU,  CNU, CNU, },
+};
+
+static const uint8_t INIT_MVD[3][NUM_MV_RES_CTX] =
+{
+    { 169,  198, },
+    { 140,  198, },
+    { CNU,  CNU, },
+};
+
+static const uint8_t INIT_REF_PIC[3][NUM_REF_NO_CTX] =
+{
+    { 153,  153 },
+    { 153,  153 },
+    { CNU,  CNU },
+};
+
+static const uint8_t INIT_DQP[3][NUM_DELTA_QP_CTX] =
+{
+    { 154,  154,  154, },
+    { 154,  154,  154, },
+    { 154,  154,  154, },
+};
+
+static const uint8_t INIT_QT_CBF[3][NUM_QT_CBF_CTX] =
+{
+    { 153,  111,  149,   92,  167,  154,  154 },
+    { 153,  111,  149,  107,  167,  154,  154 },
+    { 111,  141,   94,  138,  182,  154,  154 },
+};
+
+static const uint8_t INIT_QT_ROOT_CBF[3][NUM_QT_ROOT_CBF_CTX] =
+{
+    {  79, },
+    {  79, },
+    { CNU, },
+};
+
+static const uint8_t INIT_LAST[3][NUM_CTX_LAST_FLAG_XY] =
+{
+    { 125,  110,  124,  110,   95,   94,  125,  111,  111,   79,  125,  126,  111,  111,   79,
+      108,  123,   93 },
+    { 125,  110,   94,  110,   95,   79,  125,  111,  110,   78,  110,  111,  111,   95,   94,
+      108,  123,  108 },
+    { 110,  110,  124,  125,  140,  153,  125,  127,  140,  109,  111,  143,  127,  111,   79,
+      108,  123,   63 },
+};
+
+static const uint8_t INIT_SIG_CG_FLAG[3][2 * NUM_SIG_CG_FLAG_CTX] =
+{
+    { 121,  140,
+      61,  154, },
+    { 121,  140,
+      61,  154, },
+    {  91,  171,
+       134,  141, },
+};
+
+static const uint8_t INIT_SIG_FLAG[3][NUM_SIG_FLAG_CTX] =
+{
+    { 170,  154,  139,  153,  139,  123,  123,   63,  124,  166,  183,  140,  136,  153,  154,  166,  183,  140,  136,  153,  154,  166,  183,  140,  136,  153,  154,  170,  153,  138,  138,  122,  121,  122,  121,  167,  151,  183,  140,  151,  183,  140,  },
+    { 155,  154,  139,  153,  139,  123,  123,   63,  153,  166,  183,  140,  136,  153,  154,  166,  183,  140,  136,  153,  154,  166,  183,  140,  136,  153,  154,  170,  153,  123,  123,  107,  121,  107,  121,  167,  151,  183,  140,  151,  183,  140,  },
+    { 111,  111,  125,  110,  110,   94,  124,  108,  124,  107,  125,  141,  179,  153,  125,  107,  125,  141,  179,  153,  125,  107,  125,  141,  179,  153,  125,  140,  139,  182,  182,  152,  136,  152,  136,  153,  136,  139,  111,  136,  139,  111,  },
+};
+
+static const uint8_t INIT_ONE_FLAG[3][NUM_ONE_FLAG_CTX] =
+{
+    { 154,  196,  167,  167,  154,  152,  167,  182,  182,  134,  149,  136,  153,  121,  136,  122,  169,  208,  166,  167,  154,  152,  167,  182, },
+    { 154,  196,  196,  167,  154,  152,  167,  182,  182,  134,  149,  136,  153,  121,  136,  137,  169,  194,  166,  167,  154,  167,  137,  182, },
+    { 140,   92,  137,  138,  140,  152,  138,  139,  153,   74,  149,   92,  139,  107,  122,  152,  140,  179,  166,  182,  140,  227,  122,  197, },
+};
+
+static const uint8_t INIT_ABS_FLAG[3][NUM_ABS_FLAG_CTX] =
+{
+    { 107,  167,   91,  107,  107,  167, },
+    { 107,  167,   91,  122,  107,  167, },
+    { 138,  153,  136,  167,  152,  152, },
+};
+
+static const uint8_t INIT_MVP_IDX[3][NUM_MVP_IDX_CTX] =
+{
+    { 168 },
+    { 168 },
+    { CNU },
+};
+
+static const uint8_t INIT_SAO_MERGE_FLAG[3][NUM_SAO_MERGE_FLAG_CTX] =
+{
+    { 153,  },
+    { 153,  },
+    { 153,  },
+};
+
+static const uint8_t INIT_SAO_TYPE_IDX[3][NUM_SAO_TYPE_IDX_CTX] =
+{
+    { 160, },
+    { 185, },
+    { 200, },
+};
+
+static const uint8_t INIT_TRANS_SUBDIV_FLAG[3][NUM_TRANS_SUBDIV_FLAG_CTX] =
+{
+    { 224,  167,  122, },
+    { 124,  138,   94, },
+    { 153,  138,  138, },
+};
+
+static const uint8_t INIT_TRANSFORMSKIP_FLAG[3][2 * NUM_TRANSFORMSKIP_FLAG_CTX] =
+{
+    { 139,  139 },
+    { 139,  139 },
+    { 139,  139 },
+};
+}
+
+#endif // ifndef X265_CONTEXTS_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/cpu.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,374 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Loren Merritt <lorenm@u.washington.edu>
+ *          Laurent Aimar <fenrir@via.ecp.fr>
+ *          Fiona Glaser <fiona@x264.com>
+ *          Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "cpu.h"
+#include "common.h"
+
+#if MACOS || SYS_FREEBSD
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+#if SYS_OPENBSD
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <machine/cpu.h>
+#endif
+
+#if X265_ARCH_ARM && !defined(HAVE_NEON)
+#include <signal.h>
+#include <setjmp.h>
+static sigjmp_buf jmpbuf;
+static volatile sig_atomic_t canjump = 0;
+
+static void sigill_handler(int sig)
+{
+    if (!canjump)
+    {
+        signal(sig, SIG_DFL);
+        raise(sig);
+    }
+
+    canjump = 0;
+    siglongjmp(jmpbuf, 1);
+}
+
+#endif // if X265_ARCH_ARM
+
+namespace X265_NS {
+const cpu_name_t cpu_names[] =
+{
+#if X265_ARCH_X86
+#define MMX2 X265_CPU_MMX | X265_CPU_MMX2 | X265_CPU_CMOV
+    { "MMX2",        MMX2 },
+    { "MMXEXT",      MMX2 },
+    { "SSE",         MMX2 | X265_CPU_SSE },
+#define SSE2 MMX2 | X265_CPU_SSE | X265_CPU_SSE2
+    { "SSE2Slow",    SSE2 | X265_CPU_SSE2_IS_SLOW },
+    { "SSE2",        SSE2 },
+    { "SSE2Fast",    SSE2 | X265_CPU_SSE2_IS_FAST },
+    { "SSE3",        SSE2 | X265_CPU_SSE3 },
+    { "SSSE3",       SSE2 | X265_CPU_SSE3 | X265_CPU_SSSE3 },
+    { "SSE4.1",      SSE2 | X265_CPU_SSE3 | X265_CPU_SSSE3 | X265_CPU_SSE4 },
+    { "SSE4",        SSE2 | X265_CPU_SSE3 | X265_CPU_SSSE3 | X265_CPU_SSE4 },
+    { "SSE4.2",      SSE2 | X265_CPU_SSE3 | X265_CPU_SSSE3 | X265_CPU_SSE4 | X265_CPU_SSE42 },
+#define AVX SSE2 | X265_CPU_SSE3 | X265_CPU_SSSE3 | X265_CPU_SSE4 | X265_CPU_SSE42 | X265_CPU_AVX
+    { "AVX",         AVX },
+    { "XOP",         AVX | X265_CPU_XOP },
+    { "FMA4",        AVX | X265_CPU_FMA4 },
+    { "AVX2",        AVX | X265_CPU_AVX2 },
+    { "FMA3",        AVX | X265_CPU_FMA3 },
+#undef AVX
+#undef SSE2
+#undef MMX2
+    { "Cache32",         X265_CPU_CACHELINE_32 },
+    { "Cache64",         X265_CPU_CACHELINE_64 },
+    { "LZCNT",           X265_CPU_LZCNT },
+    { "BMI1",            X265_CPU_BMI1 },
+    { "BMI2",            X265_CPU_BMI1 | X265_CPU_BMI2 },
+    { "SlowCTZ",         X265_CPU_SLOW_CTZ },
+    { "SlowAtom",        X265_CPU_SLOW_ATOM },
+    { "SlowPshufb",      X265_CPU_SLOW_PSHUFB },
+    { "SlowPalignr",     X265_CPU_SLOW_PALIGNR },
+    { "SlowShuffle",     X265_CPU_SLOW_SHUFFLE },
+    { "UnalignedStack",  X265_CPU_STACK_MOD4 },
+
+#elif X265_ARCH_ARM
+    { "ARMv6",           X265_CPU_ARMV6 },
+    { "NEON",            X265_CPU_NEON },
+    { "FastNeonMRC",     X265_CPU_FAST_NEON_MRC },
+#endif // if X265_ARCH_X86
+    { "", 0 },
+};
+
+#if X265_ARCH_X86
+
+extern "C" {
+/* cpu-a.asm */
+int PFX(cpu_cpuid_test)(void);
+void PFX(cpu_cpuid)(uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+void PFX(cpu_xgetbv)(uint32_t op, uint32_t *eax, uint32_t *edx);
+}
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4309) // truncation of constant value
+#endif
+
+uint32_t cpu_detect(void)
+{
+    uint32_t cpu = 0;
+
+    uint32_t eax, ebx, ecx, edx;
+    uint32_t vendor[4] = { 0 };
+    uint32_t max_extended_cap, max_basic_cap;
+
+#if !X86_64
+    if (!PFX(cpu_cpuid_test)())
+        return 0;
+#endif
+
+    PFX(cpu_cpuid)(0, &eax, vendor + 0, vendor + 2, vendor + 1);
+    max_basic_cap = eax;
+    if (max_basic_cap == 0)
+        return 0;
+
+    PFX(cpu_cpuid)(1, &eax, &ebx, &ecx, &edx);
+    if (edx & 0x00800000)
+        cpu |= X265_CPU_MMX;
+    else
+        return cpu;
+    if (edx & 0x02000000)
+        cpu |= X265_CPU_MMX2 | X265_CPU_SSE;
+    if (edx & 0x00008000)
+        cpu |= X265_CPU_CMOV;
+    else
+        return cpu;
+    if (edx & 0x04000000)
+        cpu |= X265_CPU_SSE2;
+    if (ecx & 0x00000001)
+        cpu |= X265_CPU_SSE3;
+    if (ecx & 0x00000200)
+        cpu |= X265_CPU_SSSE3;
+    if (ecx & 0x00080000)
+        cpu |= X265_CPU_SSE4;
+    if (ecx & 0x00100000)
+        cpu |= X265_CPU_SSE42;
+    /* Check OXSAVE and AVX bits */
+    if ((ecx & 0x18000000) == 0x18000000)
+    {
+        /* Check for OS support */
+        PFX(cpu_xgetbv)(0, &eax, &edx);
+        if ((eax & 0x6) == 0x6)
+        {
+            cpu |= X265_CPU_AVX;
+            if (ecx & 0x00001000)
+                cpu |= X265_CPU_FMA3;
+        }
+    }
+
+    if (max_basic_cap >= 7)
+    {
+        PFX(cpu_cpuid)(7, &eax, &ebx, &ecx, &edx);
+        /* AVX2 requires OS support, but BMI1/2 don't. */
+        if ((cpu & X265_CPU_AVX) && (ebx & 0x00000020))
+            cpu |= X265_CPU_AVX2;
+        if (ebx & 0x00000008)
+        {
+            cpu |= X265_CPU_BMI1;
+            if (ebx & 0x00000100)
+                cpu |= X265_CPU_BMI2;
+        }
+    }
+
+    if (cpu & X265_CPU_SSSE3)
+        cpu |= X265_CPU_SSE2_IS_FAST;
+
+    PFX(cpu_cpuid)(0x80000000, &eax, &ebx, &ecx, &edx);
+    max_extended_cap = eax;
+
+    if (max_extended_cap >= 0x80000001)
+    {
+        PFX(cpu_cpuid)(0x80000001, &eax, &ebx, &ecx, &edx);
+
+        if (ecx & 0x00000020)
+            cpu |= X265_CPU_LZCNT; /* Supported by Intel chips starting with Haswell */
+        if (ecx & 0x00000040) /* SSE4a, AMD only */
+        {
+            int family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+            cpu |= X265_CPU_SSE2_IS_FAST;      /* Phenom and later CPUs have fast SSE units */
+            if (family == 0x14)
+            {
+                cpu &= ~X265_CPU_SSE2_IS_FAST; /* SSSE3 doesn't imply fast SSE anymore... */
+                cpu |= X265_CPU_SSE2_IS_SLOW;  /* Bobcat has 64-bit SIMD units */
+                cpu |= X265_CPU_SLOW_PALIGNR;  /* palignr is insanely slow on Bobcat */
+            }
+            if (family == 0x16)
+            {
+                cpu |= X265_CPU_SLOW_PSHUFB;   /* Jaguar's pshufb isn't that slow, but it's slow enough
+                                                * compared to alternate instruction sequences that this
+                                                * is equal or faster on almost all such functions. */
+            }
+        }
+
+        if (cpu & X265_CPU_AVX)
+        {
+            if (ecx & 0x00000800) /* XOP */
+                cpu |= X265_CPU_XOP;
+            if (ecx & 0x00010000) /* FMA4 */
+                cpu |= X265_CPU_FMA4;
+        }
+
+        if (!strcmp((char*)vendor, "AuthenticAMD"))
+        {
+            if (edx & 0x00400000)
+                cpu |= X265_CPU_MMX2;
+            if (!(cpu & X265_CPU_LZCNT))
+                cpu |= X265_CPU_SLOW_CTZ;
+            if ((cpu & X265_CPU_SSE2) && !(cpu & X265_CPU_SSE2_IS_FAST))
+                cpu |= X265_CPU_SSE2_IS_SLOW; /* AMD CPUs come in two types: terrible at SSE and great at it */
+        }
+    }
+
+    if (!strcmp((char*)vendor, "GenuineIntel"))
+    {
+        PFX(cpu_cpuid)(1, &eax, &ebx, &ecx, &edx);
+        int family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+        int model  = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
+        if (family == 6)
+        {
+            /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah")
+             * theoretically support sse2, but it's significantly slower than mmx for
+             * almost all of x264's functions, so let's just pretend they don't. */
+            if (model == 9 || model == 13 || model == 14)
+            {
+                cpu &= ~(X265_CPU_SSE2 | X265_CPU_SSE3);
+                X265_CHECK(!(cpu & (X265_CPU_SSSE3 | X265_CPU_SSE4)), "unexpected CPU ID %d\n", cpu);
+            }
+            /* Detect Atom CPU */
+            else if (model == 28)
+            {
+                cpu |= X265_CPU_SLOW_ATOM;
+                cpu |= X265_CPU_SLOW_CTZ;
+                cpu |= X265_CPU_SLOW_PSHUFB;
+            }
+
+            /* Conroe has a slow shuffle unit. Check the model number to make sure not
+             * to include crippled low-end Penryns and Nehalems that don't have SSE4. */
+            else if ((cpu & X265_CPU_SSSE3) && !(cpu & X265_CPU_SSE4) && model < 23)
+                cpu |= X265_CPU_SLOW_SHUFFLE;
+        }
+    }
+
+    if ((!strcmp((char*)vendor, "GenuineIntel") || !strcmp((char*)vendor, "CyrixInstead")) && !(cpu & X265_CPU_SSE42))
+    {
+        /* cacheline size is specified in 3 places, any of which may be missing */
+        PFX(cpu_cpuid)(1, &eax, &ebx, &ecx, &edx);
+        int cache = (ebx & 0xff00) >> 5; // cflush size
+        if (!cache && max_extended_cap >= 0x80000006)
+        {
+            PFX(cpu_cpuid)(0x80000006, &eax, &ebx, &ecx, &edx);
+            cache = ecx & 0xff; // cacheline size
+        }
+        if (!cache && max_basic_cap >= 2)
+        {
+            // Cache and TLB Information
+            static const char cache32_ids[] = { 0x0a, 0x0c, 0x41, 0x42, 0x43, 0x44, 0x45, 0x82, 0x83, 0x84, 0x85, 0 };
+            static const char cache64_ids[] = { 0x22, 0x23, 0x25, 0x29, 0x2c, 0x46, 0x47, 0x49, 0x60, 0x66, 0x67,
+                                                0x68, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7c, 0x7f, 0x86, 0x87, 0 };
+            uint32_t buf[4];
+            int max, i = 0;
+            do
+            {
+                PFX(cpu_cpuid)(2, buf + 0, buf + 1, buf + 2, buf + 3);
+                max = buf[0] & 0xff;
+                buf[0] &= ~0xff;
+                for (int j = 0; j < 4; j++)
+                {
+                    if (!(buf[j] >> 31))
+                        while (buf[j])
+                        {
+                            if (strchr(cache32_ids, buf[j] & 0xff))
+                                cache = 32;
+                            if (strchr(cache64_ids, buf[j] & 0xff))
+                                cache = 64;
+                            buf[j] >>= 8;
+                        }
+                }
+            }
+            while (++i < max);
+        }
+
+        if (cache == 32)
+            cpu |= X265_CPU_CACHELINE_32;
+        else if (cache == 64)
+            cpu |= X265_CPU_CACHELINE_64;
+        else
+            x265_log(NULL, X265_LOG_WARNING, "unable to determine cacheline size\n");
+    }
+
+#if BROKEN_STACK_ALIGNMENT
+    cpu |= X265_CPU_STACK_MOD4;
+#endif
+
+    return cpu;
+}
+
+#elif X265_ARCH_ARM
+
+extern "C" {
+void PFX(cpu_neon_test)(void);
+int PFX(cpu_fast_neon_mrc_test)(void);
+}
+
+uint32_t cpu_detect(void)
+{
+    int flags = 0;
+
+#if HAVE_ARMV6
+    flags |= X265_CPU_ARMV6;
+
+    // don't do this hack if compiled with -mfpu=neon
+#if !HAVE_NEON
+    static void (* oldsig)(int);
+    oldsig = signal(SIGILL, sigill_handler);
+    if (sigsetjmp(jmpbuf, 1))
+    {
+        signal(SIGILL, oldsig);
+        return flags;
+    }
+
+    canjump = 1;
+    PFX(cpu_neon_test)();
+    canjump = 0;
+    signal(SIGILL, oldsig);
+#endif // if !HAVE_NEON
+
+    flags |= X265_CPU_NEON;
+
+    // fast neon -> arm (Cortex-A9) detection relies on user access to the
+    // cycle counter; this assumes ARMv7 performance counters.
+    // NEON requires at least ARMv7, ARMv8 may require changes here, but
+    // hopefully this hacky detection method will have been replaced by then.
+    // Note that there is potential for a race condition if another program or
+    // x264 instance disables or reinits the counters while x264 is using them,
+    // which may result in incorrect detection and the counters stuck enabled.
+    // right now Apple does not seem to support performance counters for this test
+#ifndef __MACH__
+    flags |= PFX(cpu_fast_neon_mrc_test)() ? X265_CPU_FAST_NEON_MRC : 0;
+#endif
+    // TODO: write dual issue test? currently it's A8 (dual issue) vs. A9 (fast mrc)
+#endif // if HAVE_ARMV6
+    return flags;
+}
+
+#else // if X265_ARCH_X86
+
+uint32_t cpu_detect(void)
+{
+    return 0;
+}
+
+#endif // if X265_ARCH_X86
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/cpu.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,64 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Loren Merritt <lorenm@u.washington.edu>
+ *          Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_CPU_H
+#define X265_CPU_H
+
+#include "common.h"
+
+/* All assembly functions are prefixed with X265_NS (macro expanded) */
+#define PFX3(prefix, name) prefix ## _ ## name
+#define PFX2(prefix, name) PFX3(prefix, name)
+#define PFX(name)          PFX2(X265_NS, name)
+
+// from cpu-a.asm, if ASM primitives are compiled, else primitives.cpp
+extern "C" void PFX(cpu_emms)(void);
+extern "C" void PFX(safe_intel_cpu_indicator_init)(void);
+
+#if _MSC_VER && _WIN64
+#define x265_emms() PFX(cpu_emms)()
+#elif _MSC_VER
+#include <mmintrin.h>
+#define x265_emms() _mm_empty()
+#elif __GNUC__
+// Cannot use _mm_empty() directly without compiling all the source with
+// a fixed CPU arch, which we would like to avoid at the moment
+#define x265_emms() PFX(cpu_emms)()
+#else
+#define x265_emms() PFX(cpu_emms)()
+#endif
+
+namespace X265_NS {
+uint32_t cpu_detect(void);
+
+struct cpu_name_t
+{
+    char name[16];
+    uint32_t flags;
+};
+
+extern const cpu_name_t cpu_names[];
+}
+
+#endif // ifndef X265_CPU_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/cudata.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2035 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "mv.h"
+#include "cudata.h"
+
+using namespace X265_NS;
+
+/* for all bcast* and copy* functions, dst and src are aligned to MIN(size, 32) */
+
+static void bcast1(uint8_t* dst, uint8_t val)  { dst[0] = val; }
+
+static void copy4(uint8_t* dst, uint8_t* src)  { ((uint32_t*)dst)[0] = ((uint32_t*)src)[0]; }
+static void bcast4(uint8_t* dst, uint8_t val)  { ((uint32_t*)dst)[0] = 0x01010101u * val; }
+
+static void copy16(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; }
+static void bcast16(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val; ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; }
+
+static void copy64(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; 
+                                                 ((uint64_t*)dst)[2] = ((uint64_t*)src)[2]; ((uint64_t*)dst)[3] = ((uint64_t*)src)[3];
+                                                 ((uint64_t*)dst)[4] = ((uint64_t*)src)[4]; ((uint64_t*)dst)[5] = ((uint64_t*)src)[5];
+                                                 ((uint64_t*)dst)[6] = ((uint64_t*)src)[6]; ((uint64_t*)dst)[7] = ((uint64_t*)src)[7]; }
+static void bcast64(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val;
+                                                 ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; ((uint64_t*)dst)[2] = bval; ((uint64_t*)dst)[3] = bval;
+                                                 ((uint64_t*)dst)[4] = bval; ((uint64_t*)dst)[5] = bval; ((uint64_t*)dst)[6] = bval; ((uint64_t*)dst)[7] = bval; }
+
+/* at 256 bytes, memset/memcpy will probably use SIMD more effectively than our uint64_t hack,
+ * but hand-written assembly would beat it. */
+static void copy256(uint8_t* dst, uint8_t* src) { memcpy(dst, src, 256); }
+static void bcast256(uint8_t* dst, uint8_t val) { memset(dst, val, 256); }
+
+namespace {
+// file private namespace
+
+/* Check whether 2 addresses point to the same column */
+inline bool isEqualCol(int addrA, int addrB, int numUnits)
+{
+    // addrA % numUnits == addrB % numUnits
+    return ((addrA ^ addrB) &  (numUnits - 1)) == 0;
+}
+
+/* Check whether 2 addresses point to the same row */
+inline bool isEqualRow(int addrA, int addrB, int numUnits)
+{
+    // addrA / numUnits == addrB / numUnits
+    return ((addrA ^ addrB) & ~(numUnits - 1)) == 0;
+}
+
+/* Check whether 2 addresses point to the same row or column */
+inline bool isEqualRowOrCol(int addrA, int addrB, int numUnits)
+{
+    return isEqualCol(addrA, addrB, numUnits) | isEqualRow(addrA, addrB, numUnits);
+}
+
+/* Check whether one address points to the first column */
+inline bool isZeroCol(int addr, int numUnits)
+{
+    // addr % numUnits == 0
+    return (addr & (numUnits - 1)) == 0;
+}
+
+/* Check whether one address points to the first row */
+inline bool isZeroRow(int addr, int numUnits)
+{
+    // addr / numUnits == 0
+    return (addr & ~(numUnits - 1)) == 0;
+}
+
+/* Check whether one address points to a column whose index is smaller than a given value */
+inline bool lessThanCol(int addr, int val, int numUnits)
+{
+    // addr % numUnits < val
+    return (addr & (numUnits - 1)) < val;
+}
+
+/* Check whether one address points to a row whose index is smaller than a given value */
+inline bool lessThanRow(int addr, int val, int numUnits)
+{
+    // addr / numUnits < val
+    return addr < val * numUnits;
+}
+
+inline MV scaleMv(MV mv, int scale)
+{
+    int mvx = x265_clip3(-32768, 32767, (scale * mv.x + 127 + (scale * mv.x < 0)) >> 8);
+    int mvy = x265_clip3(-32768, 32767, (scale * mv.y + 127 + (scale * mv.y < 0)) >> 8);
+
+    return MV((int16_t)mvx, (int16_t)mvy);
+}
+
+}
+
+cubcast_t CUData::s_partSet[NUM_FULL_DEPTH] = { NULL, NULL, NULL, NULL, NULL };
+uint32_t CUData::s_numPartInCUSize;
+
+CUData::CUData()
+{
+    memset(this, 0, sizeof(*this));
+}
+
+void CUData::initialize(const CUDataMemPool& dataPool, uint32_t depth, int csp, int instance)
+{
+    m_chromaFormat  = csp;
+    m_hChromaShift  = CHROMA_H_SHIFT(csp);
+    m_vChromaShift  = CHROMA_V_SHIFT(csp);
+    m_numPartitions = NUM_4x4_PARTITIONS >> (depth * 2);
+
+    if (!s_partSet[0])
+    {
+        s_numPartInCUSize = 1 << g_unitSizeDepth;
+        switch (g_maxLog2CUSize)
+        {
+        case 6:
+            s_partSet[0] = bcast256;
+            s_partSet[1] = bcast64;
+            s_partSet[2] = bcast16;
+            s_partSet[3] = bcast4;
+            s_partSet[4] = bcast1;
+            break;
+        case 5:
+            s_partSet[0] = bcast64;
+            s_partSet[1] = bcast16;
+            s_partSet[2] = bcast4;
+            s_partSet[3] = bcast1;
+            s_partSet[4] = NULL;
+            break;
+        case 4:
+            s_partSet[0] = bcast16;
+            s_partSet[1] = bcast4;
+            s_partSet[2] = bcast1;
+            s_partSet[3] = NULL;
+            s_partSet[4] = NULL;
+            break;
+        default:
+            X265_CHECK(0, "unexpected CTU size\n");
+            break;
+        }
+    }
+
+    switch (m_numPartitions)
+    {
+    case 256: // 64x64 CU
+        m_partCopy = copy256;
+        m_partSet = bcast256;
+        m_subPartCopy = copy64;
+        m_subPartSet = bcast64;
+        break;
+    case 64:  // 32x32 CU
+        m_partCopy = copy64;
+        m_partSet = bcast64;
+        m_subPartCopy = copy16;
+        m_subPartSet = bcast16;
+        break;
+    case 16:  // 16x16 CU
+        m_partCopy = copy16;
+        m_partSet = bcast16;
+        m_subPartCopy = copy4;
+        m_subPartSet = bcast4;
+        break;
+    case 4:   // 8x8 CU
+        m_partCopy = copy4;
+        m_partSet = bcast4;
+        m_subPartCopy = NULL;
+        m_subPartSet = NULL;
+        break;
+    default:
+        X265_CHECK(0, "unexpected CU partition count\n");
+        break;
+    }
+
+    /* Each CU's data is layed out sequentially within the charMemBlock */
+    uint8_t *charBuf = dataPool.charMemBlock + (m_numPartitions * BytesPerPartition) * instance;
+
+    m_qp        = (int8_t*)charBuf; charBuf += m_numPartitions;
+    m_log2CUSize         = charBuf; charBuf += m_numPartitions;
+    m_lumaIntraDir       = charBuf; charBuf += m_numPartitions;
+    m_tqBypass           = charBuf; charBuf += m_numPartitions;
+    m_refIdx[0] = (int8_t*)charBuf; charBuf += m_numPartitions;
+    m_refIdx[1] = (int8_t*)charBuf; charBuf += m_numPartitions;
+    m_cuDepth            = charBuf; charBuf += m_numPartitions;
+    m_predMode           = charBuf; charBuf += m_numPartitions; /* the order up to here is important in initCTU() and initSubCU() */
+    m_partSize           = charBuf; charBuf += m_numPartitions;
+    m_mergeFlag          = charBuf; charBuf += m_numPartitions;
+    m_interDir           = charBuf; charBuf += m_numPartitions;
+    m_mvpIdx[0]          = charBuf; charBuf += m_numPartitions;
+    m_mvpIdx[1]          = charBuf; charBuf += m_numPartitions;
+    m_tuDepth            = charBuf; charBuf += m_numPartitions;
+    m_transformSkip[0]   = charBuf; charBuf += m_numPartitions;
+    m_transformSkip[1]   = charBuf; charBuf += m_numPartitions;
+    m_transformSkip[2]   = charBuf; charBuf += m_numPartitions;
+    m_cbf[0]             = charBuf; charBuf += m_numPartitions;
+    m_cbf[1]             = charBuf; charBuf += m_numPartitions;
+    m_cbf[2]             = charBuf; charBuf += m_numPartitions;
+    m_chromaIntraDir     = charBuf; charBuf += m_numPartitions;
+
+    X265_CHECK(charBuf == dataPool.charMemBlock + (m_numPartitions * BytesPerPartition) * (instance + 1), "CU data layout is broken\n");
+
+    m_mv[0]  = dataPool.mvMemBlock + (instance * 4) * m_numPartitions;
+    m_mv[1]  = m_mv[0] +  m_numPartitions;
+    m_mvd[0] = m_mv[1] +  m_numPartitions;
+    m_mvd[1] = m_mvd[0] + m_numPartitions;
+
+    uint32_t cuSize = g_maxCUSize >> depth;
+    uint32_t sizeL = cuSize * cuSize;
+    uint32_t sizeC = sizeL >> (m_hChromaShift + m_vChromaShift);
+    m_trCoeff[0] = dataPool.trCoeffMemBlock + instance * (sizeL + sizeC * 2);
+    m_trCoeff[1] = m_trCoeff[0] + sizeL;
+    m_trCoeff[2] = m_trCoeff[0] + sizeL + sizeC;
+}
+
+void CUData::initCTU(const Frame& frame, uint32_t cuAddr, int qp)
+{
+    m_encData       = frame.m_encData;
+    m_slice         = m_encData->m_slice;
+    m_cuAddr        = cuAddr;
+    m_cuPelX        = (cuAddr % m_slice->m_sps->numCuInWidth) << g_maxLog2CUSize;
+    m_cuPelY        = (cuAddr / m_slice->m_sps->numCuInWidth) << g_maxLog2CUSize;
+    m_absIdxInCTU   = 0;
+    m_numPartitions = NUM_4x4_PARTITIONS;
+
+    /* sequential memsets */
+    m_partSet((uint8_t*)m_qp, (uint8_t)qp);
+    m_partSet(m_log2CUSize,   (uint8_t)g_maxLog2CUSize);
+    m_partSet(m_lumaIntraDir, (uint8_t)DC_IDX);
+    m_partSet(m_tqBypass,     (uint8_t)frame.m_encData->m_param->bLossless);
+    if (m_slice->m_sliceType != I_SLICE)
+    {
+        m_partSet((uint8_t*)m_refIdx[0], (uint8_t)REF_NOT_VALID);
+        m_partSet((uint8_t*)m_refIdx[1], (uint8_t)REF_NOT_VALID);
+    }
+
+    X265_CHECK(!(frame.m_encData->m_param->bLossless && !m_slice->m_pps->bTransquantBypassEnabled), "lossless enabled without TQbypass in PPS\n");
+
+    /* initialize the remaining CU data in one memset */
+    memset(m_cuDepth, 0, (BytesPerPartition - 6) * m_numPartitions);
+
+    uint32_t widthInCU = m_slice->m_sps->numCuInWidth;
+    m_cuLeft = (m_cuAddr % widthInCU) ? m_encData->getPicCTU(m_cuAddr - 1) : NULL;
+    m_cuAbove = (m_cuAddr / widthInCU) ? m_encData->getPicCTU(m_cuAddr - widthInCU) : NULL;
+    m_cuAboveLeft = (m_cuLeft && m_cuAbove) ? m_encData->getPicCTU(m_cuAddr - widthInCU - 1) : NULL;
+    m_cuAboveRight = (m_cuAbove && ((m_cuAddr % widthInCU) < (widthInCU - 1))) ? m_encData->getPicCTU(m_cuAddr - widthInCU + 1) : NULL;
+}
+
+// initialize Sub partition
+void CUData::initSubCU(const CUData& ctu, const CUGeom& cuGeom, int qp)
+{
+    m_absIdxInCTU   = cuGeom.absPartIdx;
+    m_encData       = ctu.m_encData;
+    m_slice         = ctu.m_slice;
+    m_cuAddr        = ctu.m_cuAddr;
+    m_cuPelX        = ctu.m_cuPelX + g_zscanToPelX[cuGeom.absPartIdx];
+    m_cuPelY        = ctu.m_cuPelY + g_zscanToPelY[cuGeom.absPartIdx];
+    m_cuLeft        = ctu.m_cuLeft;
+    m_cuAbove       = ctu.m_cuAbove;
+    m_cuAboveLeft   = ctu.m_cuAboveLeft;
+    m_cuAboveRight  = ctu.m_cuAboveRight;
+    X265_CHECK(m_numPartitions == cuGeom.numPartitions, "initSubCU() size mismatch\n");
+
+    m_partSet((uint8_t*)m_qp, (uint8_t)qp);
+
+    m_partSet(m_log2CUSize,   (uint8_t)cuGeom.log2CUSize);
+    m_partSet(m_lumaIntraDir, (uint8_t)DC_IDX);
+    m_partSet(m_tqBypass,     (uint8_t)m_encData->m_param->bLossless);
+    m_partSet((uint8_t*)m_refIdx[0], (uint8_t)REF_NOT_VALID);
+    m_partSet((uint8_t*)m_refIdx[1], (uint8_t)REF_NOT_VALID);
+    m_partSet(m_cuDepth,      (uint8_t)cuGeom.depth);
+
+    /* initialize the remaining CU data in one memset */
+    memset(m_predMode, 0, (BytesPerPartition - 7) * m_numPartitions);
+}
+
+/* Copy the results of a sub-part (split) CU to the parent CU */
+void CUData::copyPartFrom(const CUData& subCU, const CUGeom& childGeom, uint32_t subPartIdx)
+{
+    X265_CHECK(subPartIdx < 4, "part unit should be less than 4\n");
+
+    uint32_t offset = childGeom.numPartitions * subPartIdx;
+
+    m_subPartCopy((uint8_t*)m_qp + offset, (uint8_t*)subCU.m_qp);
+    m_subPartCopy(m_log2CUSize + offset, subCU.m_log2CUSize);
+    m_subPartCopy(m_lumaIntraDir + offset, subCU.m_lumaIntraDir);
+    m_subPartCopy(m_tqBypass + offset, subCU.m_tqBypass);
+    m_subPartCopy((uint8_t*)m_refIdx[0] + offset, (uint8_t*)subCU.m_refIdx[0]);
+    m_subPartCopy((uint8_t*)m_refIdx[1] + offset, (uint8_t*)subCU.m_refIdx[1]);
+    m_subPartCopy(m_cuDepth + offset, subCU.m_cuDepth);
+    m_subPartCopy(m_predMode + offset, subCU.m_predMode);
+    m_subPartCopy(m_partSize + offset, subCU.m_partSize);
+    m_subPartCopy(m_mergeFlag + offset, subCU.m_mergeFlag);
+    m_subPartCopy(m_interDir + offset, subCU.m_interDir);
+    m_subPartCopy(m_mvpIdx[0] + offset, subCU.m_mvpIdx[0]);
+    m_subPartCopy(m_mvpIdx[1] + offset, subCU.m_mvpIdx[1]);
+    m_subPartCopy(m_tuDepth + offset, subCU.m_tuDepth);
+    m_subPartCopy(m_transformSkip[0] + offset, subCU.m_transformSkip[0]);
+    m_subPartCopy(m_transformSkip[1] + offset, subCU.m_transformSkip[1]);
+    m_subPartCopy(m_transformSkip[2] + offset, subCU.m_transformSkip[2]);
+    m_subPartCopy(m_cbf[0] + offset, subCU.m_cbf[0]);
+    m_subPartCopy(m_cbf[1] + offset, subCU.m_cbf[1]);
+    m_subPartCopy(m_cbf[2] + offset, subCU.m_cbf[2]);
+    m_subPartCopy(m_chromaIntraDir + offset, subCU.m_chromaIntraDir);
+
+    memcpy(m_mv[0] + offset, subCU.m_mv[0], childGeom.numPartitions * sizeof(MV));
+    memcpy(m_mv[1] + offset, subCU.m_mv[1], childGeom.numPartitions * sizeof(MV));
+    memcpy(m_mvd[0] + offset, subCU.m_mvd[0], childGeom.numPartitions * sizeof(MV));
+    memcpy(m_mvd[1] + offset, subCU.m_mvd[1], childGeom.numPartitions * sizeof(MV));
+
+    uint32_t tmp = 1 << ((g_maxLog2CUSize - childGeom.depth) * 2);
+    uint32_t tmp2 = subPartIdx * tmp;
+    memcpy(m_trCoeff[0] + tmp2, subCU.m_trCoeff[0], sizeof(coeff_t) * tmp);
+
+    uint32_t tmpC = tmp >> (m_hChromaShift + m_vChromaShift);
+    uint32_t tmpC2 = tmp2 >> (m_hChromaShift + m_vChromaShift);
+    memcpy(m_trCoeff[1] + tmpC2, subCU.m_trCoeff[1], sizeof(coeff_t) * tmpC);
+    memcpy(m_trCoeff[2] + tmpC2, subCU.m_trCoeff[2], sizeof(coeff_t) * tmpC);
+}
+
+/* If a sub-CU part is not present (off the edge of the picture) its depth and
+ * log2size should still be configured */
+void CUData::setEmptyPart(const CUGeom& childGeom, uint32_t subPartIdx)
+{
+    uint32_t offset = childGeom.numPartitions * subPartIdx;
+    m_subPartSet(m_cuDepth + offset, (uint8_t)childGeom.depth);
+    m_subPartSet(m_log2CUSize + offset, (uint8_t)childGeom.log2CUSize);
+}
+
+/* Copy all CU data from one instance to the next, except set lossless flag
+ * This will only get used when --cu-lossless is enabled but --lossless is not. */
+void CUData::initLosslessCU(const CUData& cu, const CUGeom& cuGeom)
+{
+    /* Start by making an exact copy */
+    m_encData      = cu.m_encData;
+    m_slice        = cu.m_slice;
+    m_cuAddr       = cu.m_cuAddr;
+    m_cuPelX       = cu.m_cuPelX;
+    m_cuPelY       = cu.m_cuPelY;
+    m_cuLeft       = cu.m_cuLeft;
+    m_cuAbove      = cu.m_cuAbove;
+    m_cuAboveLeft  = cu.m_cuAboveLeft;
+    m_cuAboveRight = cu.m_cuAboveRight;
+    m_absIdxInCTU  = cuGeom.absPartIdx;
+    m_numPartitions = cuGeom.numPartitions;
+    memcpy(m_qp, cu.m_qp, BytesPerPartition * m_numPartitions);
+    memcpy(m_mv[0],  cu.m_mv[0],  m_numPartitions * sizeof(MV));
+    memcpy(m_mv[1],  cu.m_mv[1],  m_numPartitions * sizeof(MV));
+    memcpy(m_mvd[0], cu.m_mvd[0], m_numPartitions * sizeof(MV));
+    memcpy(m_mvd[1], cu.m_mvd[1], m_numPartitions * sizeof(MV));
+
+    /* force TQBypass to true */
+    m_partSet(m_tqBypass, true);
+
+    /* clear residual coding flags */
+    m_partSet(m_predMode, cu.m_predMode[0] & (MODE_INTRA | MODE_INTER));
+    m_partSet(m_tuDepth, 0);
+    m_partSet(m_transformSkip[0], 0);
+    m_partSet(m_transformSkip[1], 0);
+    m_partSet(m_transformSkip[2], 0);
+    m_partSet(m_cbf[0], 0);
+    m_partSet(m_cbf[1], 0);
+    m_partSet(m_cbf[2], 0);
+}
+
+/* Copy completed predicted CU to CTU in picture */
+void CUData::copyToPic(uint32_t depth) const
+{
+    CUData& ctu = *m_encData->getPicCTU(m_cuAddr);
+
+    m_partCopy((uint8_t*)ctu.m_qp + m_absIdxInCTU, (uint8_t*)m_qp);
+    m_partCopy(ctu.m_log2CUSize + m_absIdxInCTU, m_log2CUSize);
+    m_partCopy(ctu.m_lumaIntraDir + m_absIdxInCTU, m_lumaIntraDir);
+    m_partCopy(ctu.m_tqBypass + m_absIdxInCTU, m_tqBypass);
+    m_partCopy((uint8_t*)ctu.m_refIdx[0] + m_absIdxInCTU, (uint8_t*)m_refIdx[0]);
+    m_partCopy((uint8_t*)ctu.m_refIdx[1] + m_absIdxInCTU, (uint8_t*)m_refIdx[1]);
+    m_partCopy(ctu.m_cuDepth + m_absIdxInCTU, m_cuDepth);
+    m_partCopy(ctu.m_predMode + m_absIdxInCTU, m_predMode);
+    m_partCopy(ctu.m_partSize + m_absIdxInCTU, m_partSize);
+    m_partCopy(ctu.m_mergeFlag + m_absIdxInCTU, m_mergeFlag);
+    m_partCopy(ctu.m_interDir + m_absIdxInCTU, m_interDir);
+    m_partCopy(ctu.m_mvpIdx[0] + m_absIdxInCTU, m_mvpIdx[0]);
+    m_partCopy(ctu.m_mvpIdx[1] + m_absIdxInCTU, m_mvpIdx[1]);
+    m_partCopy(ctu.m_tuDepth + m_absIdxInCTU, m_tuDepth);
+    m_partCopy(ctu.m_transformSkip[0] + m_absIdxInCTU, m_transformSkip[0]);
+    m_partCopy(ctu.m_transformSkip[1] + m_absIdxInCTU, m_transformSkip[1]);
+    m_partCopy(ctu.m_transformSkip[2] + m_absIdxInCTU, m_transformSkip[2]);
+    m_partCopy(ctu.m_cbf[0] + m_absIdxInCTU, m_cbf[0]);
+    m_partCopy(ctu.m_cbf[1] + m_absIdxInCTU, m_cbf[1]);
+    m_partCopy(ctu.m_cbf[2] + m_absIdxInCTU, m_cbf[2]);
+    m_partCopy(ctu.m_chromaIntraDir + m_absIdxInCTU, m_chromaIntraDir);
+
+    memcpy(ctu.m_mv[0] + m_absIdxInCTU,  m_mv[0],  m_numPartitions * sizeof(MV));
+    memcpy(ctu.m_mv[1] + m_absIdxInCTU,  m_mv[1],  m_numPartitions * sizeof(MV));
+    memcpy(ctu.m_mvd[0] + m_absIdxInCTU, m_mvd[0], m_numPartitions * sizeof(MV));
+    memcpy(ctu.m_mvd[1] + m_absIdxInCTU, m_mvd[1], m_numPartitions * sizeof(MV));
+
+    uint32_t tmpY = 1 << ((g_maxLog2CUSize - depth) * 2);
+    uint32_t tmpY2 = m_absIdxInCTU << (LOG2_UNIT_SIZE * 2);
+    memcpy(ctu.m_trCoeff[0] + tmpY2, m_trCoeff[0], sizeof(coeff_t) * tmpY);
+
+    uint32_t tmpC = tmpY >> (m_hChromaShift + m_vChromaShift);
+    uint32_t tmpC2 = tmpY2 >> (m_hChromaShift + m_vChromaShift);
+    memcpy(ctu.m_trCoeff[1] + tmpC2, m_trCoeff[1], sizeof(coeff_t) * tmpC);
+    memcpy(ctu.m_trCoeff[2] + tmpC2, m_trCoeff[2], sizeof(coeff_t) * tmpC);
+}
+
+/* The reverse of copyToPic, called only by encodeResidue */
+void CUData::copyFromPic(const CUData& ctu, const CUGeom& cuGeom)
+{
+    m_encData       = ctu.m_encData;
+    m_slice         = ctu.m_slice;
+    m_cuAddr        = ctu.m_cuAddr;
+    m_cuPelX        = ctu.m_cuPelX + g_zscanToPelX[cuGeom.absPartIdx];
+    m_cuPelY        = ctu.m_cuPelY + g_zscanToPelY[cuGeom.absPartIdx];
+    m_absIdxInCTU   = cuGeom.absPartIdx;
+    m_numPartitions = cuGeom.numPartitions;
+
+    /* copy out all prediction info for this part */
+    m_partCopy((uint8_t*)m_qp, (uint8_t*)ctu.m_qp + m_absIdxInCTU);
+    m_partCopy(m_log2CUSize,   ctu.m_log2CUSize + m_absIdxInCTU);
+    m_partCopy(m_lumaIntraDir, ctu.m_lumaIntraDir + m_absIdxInCTU);
+    m_partCopy(m_tqBypass,     ctu.m_tqBypass + m_absIdxInCTU);
+    m_partCopy((uint8_t*)m_refIdx[0], (uint8_t*)ctu.m_refIdx[0] + m_absIdxInCTU);
+    m_partCopy((uint8_t*)m_refIdx[1], (uint8_t*)ctu.m_refIdx[1] + m_absIdxInCTU);
+    m_partCopy(m_cuDepth,      ctu.m_cuDepth + m_absIdxInCTU);
+    m_partSet(m_predMode, ctu.m_predMode[m_absIdxInCTU] & (MODE_INTRA | MODE_INTER)); /* clear skip flag */
+    m_partCopy(m_partSize,     ctu.m_partSize + m_absIdxInCTU);
+    m_partCopy(m_mergeFlag,    ctu.m_mergeFlag + m_absIdxInCTU);
+    m_partCopy(m_interDir,     ctu.m_interDir + m_absIdxInCTU);
+    m_partCopy(m_mvpIdx[0],    ctu.m_mvpIdx[0] + m_absIdxInCTU);
+    m_partCopy(m_mvpIdx[1],    ctu.m_mvpIdx[1] + m_absIdxInCTU);
+    m_partCopy(m_chromaIntraDir, ctu.m_chromaIntraDir + m_absIdxInCTU);
+
+    memcpy(m_mv[0],  ctu.m_mv[0] + m_absIdxInCTU,  m_numPartitions * sizeof(MV));
+    memcpy(m_mv[1],  ctu.m_mv[1] + m_absIdxInCTU,  m_numPartitions * sizeof(MV));
+    memcpy(m_mvd[0], ctu.m_mvd[0] + m_absIdxInCTU, m_numPartitions * sizeof(MV));
+    memcpy(m_mvd[1], ctu.m_mvd[1] + m_absIdxInCTU, m_numPartitions * sizeof(MV));
+
+    /* clear residual coding flags */
+    m_partSet(m_tuDepth, 0);
+    m_partSet(m_transformSkip[0], 0);
+    m_partSet(m_transformSkip[1], 0);
+    m_partSet(m_transformSkip[2], 0);
+    m_partSet(m_cbf[0], 0);
+    m_partSet(m_cbf[1], 0);
+    m_partSet(m_cbf[2], 0);
+}
+
+/* Only called by encodeResidue, these fields can be modified during inter/intra coding */
+void CUData::updatePic(uint32_t depth) const
+{
+    CUData& ctu = *m_encData->getPicCTU(m_cuAddr);
+
+    m_partCopy((uint8_t*)ctu.m_qp + m_absIdxInCTU, (uint8_t*)m_qp);
+    m_partCopy(ctu.m_transformSkip[0] + m_absIdxInCTU, m_transformSkip[0]);
+    m_partCopy(ctu.m_transformSkip[1] + m_absIdxInCTU, m_transformSkip[1]);
+    m_partCopy(ctu.m_transformSkip[2] + m_absIdxInCTU, m_transformSkip[2]);
+    m_partCopy(ctu.m_predMode + m_absIdxInCTU, m_predMode);
+    m_partCopy(ctu.m_tuDepth + m_absIdxInCTU, m_tuDepth);
+    m_partCopy(ctu.m_cbf[0] + m_absIdxInCTU, m_cbf[0]);
+    m_partCopy(ctu.m_cbf[1] + m_absIdxInCTU, m_cbf[1]);
+    m_partCopy(ctu.m_cbf[2] + m_absIdxInCTU, m_cbf[2]);
+    m_partCopy(ctu.m_chromaIntraDir + m_absIdxInCTU, m_chromaIntraDir);
+
+    uint32_t tmpY = 1 << ((g_maxLog2CUSize - depth) * 2);
+    uint32_t tmpY2 = m_absIdxInCTU << (LOG2_UNIT_SIZE * 2);
+    memcpy(ctu.m_trCoeff[0] + tmpY2, m_trCoeff[0], sizeof(coeff_t) * tmpY);
+    tmpY  >>= m_hChromaShift + m_vChromaShift;
+    tmpY2 >>= m_hChromaShift + m_vChromaShift;
+    memcpy(ctu.m_trCoeff[1] + tmpY2, m_trCoeff[1], sizeof(coeff_t) * tmpY);
+    memcpy(ctu.m_trCoeff[2] + tmpY2, m_trCoeff[2], sizeof(coeff_t) * tmpY);
+}
+
+const CUData* CUData::getPULeft(uint32_t& lPartUnitIdx, uint32_t curPartUnitIdx) const
+{
+    uint32_t absPartIdx = g_zscanToRaster[curPartUnitIdx];
+
+    if (!isZeroCol(absPartIdx, s_numPartInCUSize))
+    {
+        uint32_t absZorderCUIdx   = g_zscanToRaster[m_absIdxInCTU];
+        lPartUnitIdx = g_rasterToZscan[absPartIdx - 1];
+        if (isEqualCol(absPartIdx, absZorderCUIdx, s_numPartInCUSize))
+            return m_encData->getPicCTU(m_cuAddr);
+        else
+        {
+            lPartUnitIdx -= m_absIdxInCTU;
+            return this;
+        }
+    }
+
+    lPartUnitIdx = g_rasterToZscan[absPartIdx + s_numPartInCUSize - 1];
+    return m_cuLeft;
+}
+
+const CUData* CUData::getPUAbove(uint32_t& aPartUnitIdx, uint32_t curPartUnitIdx) const
+{
+    uint32_t absPartIdx = g_zscanToRaster[curPartUnitIdx];
+
+    if (!isZeroRow(absPartIdx, s_numPartInCUSize))
+    {
+        uint32_t absZorderCUIdx = g_zscanToRaster[m_absIdxInCTU];
+        aPartUnitIdx = g_rasterToZscan[absPartIdx - s_numPartInCUSize];
+        if (isEqualRow(absPartIdx, absZorderCUIdx, s_numPartInCUSize))
+            return m_encData->getPicCTU(m_cuAddr);
+        else
+            aPartUnitIdx -= m_absIdxInCTU;
+        return this;
+    }
+
+    aPartUnitIdx = g_rasterToZscan[absPartIdx + NUM_4x4_PARTITIONS - s_numPartInCUSize];
+    return m_cuAbove;
+}
+
+const CUData* CUData::getPUAboveLeft(uint32_t& alPartUnitIdx, uint32_t curPartUnitIdx) const
+{
+    uint32_t absPartIdx = g_zscanToRaster[curPartUnitIdx];
+
+    if (!isZeroCol(absPartIdx, s_numPartInCUSize))
+    {
+        if (!isZeroRow(absPartIdx, s_numPartInCUSize))
+        {
+            uint32_t absZorderCUIdx  = g_zscanToRaster[m_absIdxInCTU];
+            alPartUnitIdx = g_rasterToZscan[absPartIdx - s_numPartInCUSize - 1];
+            if (isEqualRowOrCol(absPartIdx, absZorderCUIdx, s_numPartInCUSize))
+                return m_encData->getPicCTU(m_cuAddr);
+            else
+            {
+                alPartUnitIdx -= m_absIdxInCTU;
+                return this;
+            }
+        }
+        alPartUnitIdx = g_rasterToZscan[absPartIdx + NUM_4x4_PARTITIONS - s_numPartInCUSize - 1];
+        return m_cuAbove;
+    }
+
+    if (!isZeroRow(absPartIdx, s_numPartInCUSize))
+    {
+        alPartUnitIdx = g_rasterToZscan[absPartIdx - 1];
+        return m_cuLeft;
+    }
+
+    alPartUnitIdx = g_rasterToZscan[NUM_4x4_PARTITIONS - 1];
+    return m_cuAboveLeft;
+}
+
+const CUData* CUData::getPUAboveRight(uint32_t& arPartUnitIdx, uint32_t curPartUnitIdx) const
+{
+    if ((m_encData->getPicCTU(m_cuAddr)->m_cuPelX + g_zscanToPelX[curPartUnitIdx] + UNIT_SIZE) >= m_slice->m_sps->picWidthInLumaSamples)
+        return NULL;
+
+    uint32_t absPartIdxRT = g_zscanToRaster[curPartUnitIdx];
+
+    if (lessThanCol(absPartIdxRT, s_numPartInCUSize - 1, s_numPartInCUSize))
+    {
+        if (!isZeroRow(absPartIdxRT, s_numPartInCUSize))
+        {
+            if (curPartUnitIdx > g_rasterToZscan[absPartIdxRT - s_numPartInCUSize + 1])
+            {
+                uint32_t absZorderCUIdx = g_zscanToRaster[m_absIdxInCTU] + (1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1;
+                arPartUnitIdx = g_rasterToZscan[absPartIdxRT - s_numPartInCUSize + 1];
+                if (isEqualRowOrCol(absPartIdxRT, absZorderCUIdx, s_numPartInCUSize))
+                    return m_encData->getPicCTU(m_cuAddr);
+                else
+                {
+                    arPartUnitIdx -= m_absIdxInCTU;
+                    return this;
+                }
+            }
+            return NULL;
+        }
+        arPartUnitIdx = g_rasterToZscan[absPartIdxRT + NUM_4x4_PARTITIONS - s_numPartInCUSize + 1];
+        return m_cuAbove;
+    }
+
+    if (!isZeroRow(absPartIdxRT, s_numPartInCUSize))
+        return NULL;
+
+    arPartUnitIdx = g_rasterToZscan[NUM_4x4_PARTITIONS - s_numPartInCUSize];
+    return m_cuAboveRight;
+}
+
+const CUData* CUData::getPUBelowLeft(uint32_t& blPartUnitIdx, uint32_t curPartUnitIdx) const
+{
+    if ((m_encData->getPicCTU(m_cuAddr)->m_cuPelY + g_zscanToPelY[curPartUnitIdx] + UNIT_SIZE) >= m_slice->m_sps->picHeightInLumaSamples)
+        return NULL;
+
+    uint32_t absPartIdxLB = g_zscanToRaster[curPartUnitIdx];
+
+    if (lessThanRow(absPartIdxLB, s_numPartInCUSize - 1, s_numPartInCUSize))
+    {
+        if (!isZeroCol(absPartIdxLB, s_numPartInCUSize))
+        {
+            if (curPartUnitIdx > g_rasterToZscan[absPartIdxLB + s_numPartInCUSize - 1])
+            {
+                uint32_t absZorderCUIdxLB = g_zscanToRaster[m_absIdxInCTU] + ((1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1) * s_numPartInCUSize;
+                blPartUnitIdx = g_rasterToZscan[absPartIdxLB + s_numPartInCUSize - 1];
+                if (isEqualRowOrCol(absPartIdxLB, absZorderCUIdxLB, s_numPartInCUSize))
+                    return m_encData->getPicCTU(m_cuAddr);
+                else
+                {
+                    blPartUnitIdx -= m_absIdxInCTU;
+                    return this;
+                }
+            }
+            return NULL;
+        }
+        blPartUnitIdx = g_rasterToZscan[absPartIdxLB + s_numPartInCUSize * 2 - 1];
+        return m_cuLeft;
+    }
+
+    return NULL;
+}
+
+const CUData* CUData::getPUBelowLeftAdi(uint32_t& blPartUnitIdx,  uint32_t curPartUnitIdx, uint32_t partUnitOffset) const
+{
+    if ((m_encData->getPicCTU(m_cuAddr)->m_cuPelY + g_zscanToPelY[curPartUnitIdx] + (partUnitOffset << LOG2_UNIT_SIZE)) >= m_slice->m_sps->picHeightInLumaSamples)
+        return NULL;
+
+    uint32_t absPartIdxLB = g_zscanToRaster[curPartUnitIdx];
+
+    if (lessThanRow(absPartIdxLB, s_numPartInCUSize - partUnitOffset, s_numPartInCUSize))
+    {
+        if (!isZeroCol(absPartIdxLB, s_numPartInCUSize))
+        {
+            if (curPartUnitIdx > g_rasterToZscan[absPartIdxLB + partUnitOffset * s_numPartInCUSize - 1])
+            {
+                uint32_t absZorderCUIdxLB = g_zscanToRaster[m_absIdxInCTU] + ((1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1) * s_numPartInCUSize;
+                blPartUnitIdx = g_rasterToZscan[absPartIdxLB + partUnitOffset * s_numPartInCUSize - 1];
+                if (isEqualRowOrCol(absPartIdxLB, absZorderCUIdxLB, s_numPartInCUSize))
+                    return m_encData->getPicCTU(m_cuAddr);
+                else
+                {
+                    blPartUnitIdx -= m_absIdxInCTU;
+                    return this;
+                }
+            }
+            return NULL;
+        }
+        blPartUnitIdx = g_rasterToZscan[absPartIdxLB + (1 + partUnitOffset) * s_numPartInCUSize - 1];
+        return m_cuLeft;
+    }
+
+    return NULL;
+}
+
+const CUData* CUData::getPUAboveRightAdi(uint32_t& arPartUnitIdx, uint32_t curPartUnitIdx, uint32_t partUnitOffset) const
+{
+    if ((m_encData->getPicCTU(m_cuAddr)->m_cuPelX + g_zscanToPelX[curPartUnitIdx] + (partUnitOffset << LOG2_UNIT_SIZE)) >= m_slice->m_sps->picWidthInLumaSamples)
+        return NULL;
+
+    uint32_t absPartIdxRT = g_zscanToRaster[curPartUnitIdx];
+
+    if (lessThanCol(absPartIdxRT, s_numPartInCUSize - partUnitOffset, s_numPartInCUSize))
+    {
+        if (!isZeroRow(absPartIdxRT, s_numPartInCUSize))
+        {
+            if (curPartUnitIdx > g_rasterToZscan[absPartIdxRT - s_numPartInCUSize + partUnitOffset])
+            {
+                uint32_t absZorderCUIdx = g_zscanToRaster[m_absIdxInCTU] + (1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1;
+                arPartUnitIdx = g_rasterToZscan[absPartIdxRT - s_numPartInCUSize + partUnitOffset];
+                if (isEqualRowOrCol(absPartIdxRT, absZorderCUIdx, s_numPartInCUSize))
+                    return m_encData->getPicCTU(m_cuAddr);
+                else
+                {
+                    arPartUnitIdx -= m_absIdxInCTU;
+                    return this;
+                }
+            }
+            return NULL;
+        }
+        arPartUnitIdx = g_rasterToZscan[absPartIdxRT + NUM_4x4_PARTITIONS - s_numPartInCUSize + partUnitOffset];
+        return m_cuAbove;
+    }
+
+    if (!isZeroRow(absPartIdxRT, s_numPartInCUSize))
+        return NULL;
+
+    arPartUnitIdx = g_rasterToZscan[NUM_4x4_PARTITIONS - s_numPartInCUSize + partUnitOffset - 1];
+    return m_cuAboveRight;
+}
+
+/* Get left QpMinCu */
+const CUData* CUData::getQpMinCuLeft(uint32_t& lPartUnitIdx, uint32_t curAbsIdxInCTU) const
+{
+    uint32_t absZorderQpMinCUIdx = curAbsIdxInCTU & (0xFF << (g_unitSizeDepth - m_slice->m_pps->maxCuDQPDepth) * 2);
+    uint32_t absRorderQpMinCUIdx = g_zscanToRaster[absZorderQpMinCUIdx];
+
+    // check for left CTU boundary
+    if (isZeroCol(absRorderQpMinCUIdx, s_numPartInCUSize))
+        return NULL;
+
+    // get index of left-CU relative to top-left corner of current quantization group
+    lPartUnitIdx = g_rasterToZscan[absRorderQpMinCUIdx - 1];
+
+    // return pointer to current CTU
+    return m_encData->getPicCTU(m_cuAddr);
+}
+
+/* Get above QpMinCu */
+const CUData* CUData::getQpMinCuAbove(uint32_t& aPartUnitIdx, uint32_t curAbsIdxInCTU) const
+{
+    uint32_t absZorderQpMinCUIdx = curAbsIdxInCTU & (0xFF << (g_unitSizeDepth - m_slice->m_pps->maxCuDQPDepth) * 2);
+    uint32_t absRorderQpMinCUIdx = g_zscanToRaster[absZorderQpMinCUIdx];
+
+    // check for top CTU boundary
+    if (isZeroRow(absRorderQpMinCUIdx, s_numPartInCUSize))
+        return NULL;
+
+    // get index of top-CU relative to top-left corner of current quantization group
+    aPartUnitIdx = g_rasterToZscan[absRorderQpMinCUIdx - s_numPartInCUSize];
+
+    // return pointer to current CTU
+    return m_encData->getPicCTU(m_cuAddr);
+}
+
+/* Get reference QP from left QpMinCu or latest coded QP */
+int8_t CUData::getRefQP(uint32_t curAbsIdxInCTU) const
+{
+    uint32_t lPartIdx = 0, aPartIdx = 0;
+    const CUData* cULeft = getQpMinCuLeft(lPartIdx, m_absIdxInCTU + curAbsIdxInCTU);
+    const CUData* cUAbove = getQpMinCuAbove(aPartIdx, m_absIdxInCTU + curAbsIdxInCTU);
+
+    return ((cULeft ? cULeft->m_qp[lPartIdx] : getLastCodedQP(curAbsIdxInCTU)) + (cUAbove ? cUAbove->m_qp[aPartIdx] : getLastCodedQP(curAbsIdxInCTU)) + 1) >> 1;
+}
+
+int CUData::getLastValidPartIdx(int absPartIdx) const
+{
+    int lastValidPartIdx = absPartIdx - 1;
+
+    while (lastValidPartIdx >= 0 && m_predMode[lastValidPartIdx] == MODE_NONE)
+    {
+        uint32_t depth = m_cuDepth[lastValidPartIdx];
+        lastValidPartIdx -= m_numPartitions >> (depth << 1);
+    }
+
+    return lastValidPartIdx;
+}
+
+int8_t CUData::getLastCodedQP(uint32_t absPartIdx) const
+{
+    uint32_t quPartIdxMask = 0xFF << (g_unitSizeDepth - m_slice->m_pps->maxCuDQPDepth) * 2;
+    int lastValidPartIdx = getLastValidPartIdx(absPartIdx & quPartIdxMask);
+
+    if (lastValidPartIdx >= 0)
+        return m_qp[lastValidPartIdx];
+    else
+    {
+        if (m_absIdxInCTU)
+            return m_encData->getPicCTU(m_cuAddr)->getLastCodedQP(m_absIdxInCTU);
+        else if (m_cuAddr > 0 && !(m_slice->m_pps->bEntropyCodingSyncEnabled && !(m_cuAddr % m_slice->m_sps->numCuInWidth)))
+            return m_encData->getPicCTU(m_cuAddr - 1)->getLastCodedQP(NUM_4x4_PARTITIONS);
+        else
+            return (int8_t)m_slice->m_sliceQp;
+    }
+}
+
+/* Get allowed chroma intra modes */
+void CUData::getAllowedChromaDir(uint32_t absPartIdx, uint32_t* modeList) const
+{
+    modeList[0] = PLANAR_IDX;
+    modeList[1] = VER_IDX;
+    modeList[2] = HOR_IDX;
+    modeList[3] = DC_IDX;
+    modeList[4] = DM_CHROMA_IDX;
+
+    uint32_t lumaMode = m_lumaIntraDir[absPartIdx];
+
+    for (int i = 0; i < NUM_CHROMA_MODE - 1; i++)
+    {
+        if (lumaMode == modeList[i])
+        {
+            modeList[i] = 34; // VER+8 mode
+            break;
+        }
+    }
+}
+
+/* Get most probable intra modes */
+int CUData::getIntraDirLumaPredictor(uint32_t absPartIdx, uint32_t* intraDirPred) const
+{
+    const CUData* tempCU;
+    uint32_t tempPartIdx;
+    uint32_t leftIntraDir, aboveIntraDir;
+
+    // Get intra direction of left PU
+    tempCU = getPULeft(tempPartIdx, m_absIdxInCTU + absPartIdx);
+
+    leftIntraDir = (tempCU && tempCU->isIntra(tempPartIdx)) ? tempCU->m_lumaIntraDir[tempPartIdx] : DC_IDX;
+
+    // Get intra direction of above PU
+    tempCU = g_zscanToPelY[m_absIdxInCTU + absPartIdx] > 0 ? getPUAbove(tempPartIdx, m_absIdxInCTU + absPartIdx) : NULL;
+
+    aboveIntraDir = (tempCU && tempCU->isIntra(tempPartIdx)) ? tempCU->m_lumaIntraDir[tempPartIdx] : DC_IDX;
+
+    if (leftIntraDir == aboveIntraDir)
+    {
+        if (leftIntraDir >= 2) // angular modes
+        {
+            intraDirPred[0] = leftIntraDir;
+            intraDirPred[1] = ((leftIntraDir - 2 + 31) & 31) + 2;
+            intraDirPred[2] = ((leftIntraDir - 2 +  1) & 31) + 2;
+        }
+        else //non-angular
+        {
+            intraDirPred[0] = PLANAR_IDX;
+            intraDirPred[1] = DC_IDX;
+            intraDirPred[2] = VER_IDX;
+        }
+        return 1;
+    }
+    else
+    {
+        intraDirPred[0] = leftIntraDir;
+        intraDirPred[1] = aboveIntraDir;
+
+        if (leftIntraDir && aboveIntraDir) //both modes are non-planar
+            intraDirPred[2] = PLANAR_IDX;
+        else
+            intraDirPred[2] =  (leftIntraDir + aboveIntraDir) < 2 ? VER_IDX : DC_IDX;
+        return 2;
+    }
+}
+
+uint32_t CUData::getCtxSplitFlag(uint32_t absPartIdx, uint32_t depth) const
+{
+    const CUData* tempCU;
+    uint32_t    tempPartIdx;
+    uint32_t    ctx;
+
+    // Get left split flag
+    tempCU = getPULeft(tempPartIdx, m_absIdxInCTU + absPartIdx);
+    ctx  = (tempCU) ? ((tempCU->m_cuDepth[tempPartIdx] > depth) ? 1 : 0) : 0;
+
+    // Get above split flag
+    tempCU = getPUAbove(tempPartIdx, m_absIdxInCTU + absPartIdx);
+    ctx += (tempCU) ? ((tempCU->m_cuDepth[tempPartIdx] > depth) ? 1 : 0) : 0;
+
+    return ctx;
+}
+
+void CUData::getIntraTUQtDepthRange(uint32_t tuDepthRange[2], uint32_t absPartIdx) const
+{
+    uint32_t log2CUSize = m_log2CUSize[absPartIdx];
+    uint32_t splitFlag = m_partSize[absPartIdx] != SIZE_2Nx2N;
+
+    tuDepthRange[0] = m_slice->m_sps->quadtreeTULog2MinSize;
+    tuDepthRange[1] = m_slice->m_sps->quadtreeTULog2MaxSize;
+
+    tuDepthRange[0] = x265_clip3(tuDepthRange[0], tuDepthRange[1], log2CUSize - (m_slice->m_sps->quadtreeTUMaxDepthIntra - 1 + splitFlag));
+}
+
+void CUData::getInterTUQtDepthRange(uint32_t tuDepthRange[2], uint32_t absPartIdx) const
+{
+    uint32_t log2CUSize = m_log2CUSize[absPartIdx];
+    uint32_t quadtreeTUMaxDepth = m_slice->m_sps->quadtreeTUMaxDepthInter;
+    uint32_t splitFlag = quadtreeTUMaxDepth == 1 && m_partSize[absPartIdx] != SIZE_2Nx2N;
+
+    tuDepthRange[0] = m_slice->m_sps->quadtreeTULog2MinSize;
+    tuDepthRange[1] = m_slice->m_sps->quadtreeTULog2MaxSize;
+
+    tuDepthRange[0] = x265_clip3(tuDepthRange[0], tuDepthRange[1], log2CUSize - (quadtreeTUMaxDepth - 1 + splitFlag));
+}
+
+uint32_t CUData::getCtxSkipFlag(uint32_t absPartIdx) const
+{
+    const CUData* tempCU;
+    uint32_t tempPartIdx;
+    uint32_t ctx;
+
+    // Get BCBP of left PU
+    tempCU = getPULeft(tempPartIdx, m_absIdxInCTU + absPartIdx);
+    ctx    = tempCU ? tempCU->isSkipped(tempPartIdx) : 0;
+
+    // Get BCBP of above PU
+    tempCU = getPUAbove(tempPartIdx, m_absIdxInCTU + absPartIdx);
+    ctx   += tempCU ? tempCU->isSkipped(tempPartIdx) : 0;
+
+    return ctx;
+}
+
+bool CUData::setQPSubCUs(int8_t qp, uint32_t absPartIdx, uint32_t depth)
+{
+    uint32_t curPartNumb = NUM_4x4_PARTITIONS >> (depth << 1);
+    uint32_t curPartNumQ = curPartNumb >> 2;
+
+    if (m_cuDepth[absPartIdx] > depth)
+    {
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+            if (setQPSubCUs(qp, absPartIdx + subPartIdx * curPartNumQ, depth + 1))
+                return true;
+    }
+    else
+    {
+        if (getQtRootCbf(absPartIdx))
+            return true;
+        else
+            setQPSubParts(qp, absPartIdx, depth);
+    }
+
+    return false;
+}
+
+void CUData::setPUInterDir(uint8_t dir, uint32_t absPartIdx, uint32_t puIdx)
+{
+    uint32_t curPartNumQ = m_numPartitions >> 2;
+    X265_CHECK(puIdx < 2, "unexpected part unit index\n");
+
+    switch (m_partSize[absPartIdx])
+    {
+    case SIZE_2Nx2N:
+        memset(m_interDir + absPartIdx, dir, 4 * curPartNumQ);
+        break;
+    case SIZE_2NxN:
+        memset(m_interDir + absPartIdx, dir, 2 * curPartNumQ);
+        break;
+    case SIZE_Nx2N:
+        memset(m_interDir + absPartIdx, dir, curPartNumQ);
+        memset(m_interDir + absPartIdx + 2 * curPartNumQ, dir, curPartNumQ);
+        break;
+    case SIZE_NxN:
+        memset(m_interDir + absPartIdx, dir, curPartNumQ);
+        break;
+    case SIZE_2NxnU:
+        if (!puIdx)
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 1));
+            memset(m_interDir + absPartIdx + curPartNumQ, dir, (curPartNumQ >> 1));
+        }
+        else
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 1));
+            memset(m_interDir + absPartIdx + curPartNumQ, dir, ((curPartNumQ >> 1) + (curPartNumQ << 1)));
+        }
+        break;
+    case SIZE_2NxnD:
+        if (!puIdx)
+        {
+            memset(m_interDir + absPartIdx, dir, ((curPartNumQ << 1) + (curPartNumQ >> 1)));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1) + curPartNumQ, dir, (curPartNumQ >> 1));
+        }
+        else
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 1));
+            memset(m_interDir + absPartIdx + curPartNumQ, dir, (curPartNumQ >> 1));
+        }
+        break;
+    case SIZE_nLx2N:
+        if (!puIdx)
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1) + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+        }
+        else
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ >> 1), dir, (curPartNumQ + (curPartNumQ >> 2)));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1) + (curPartNumQ >> 1), dir, (curPartNumQ + (curPartNumQ >> 2)));
+        }
+        break;
+    case SIZE_nRx2N:
+        if (!puIdx)
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ + (curPartNumQ >> 2)));
+            memset(m_interDir + absPartIdx + curPartNumQ + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1), dir, (curPartNumQ + (curPartNumQ >> 2)));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1) + curPartNumQ + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+        }
+        else
+        {
+            memset(m_interDir + absPartIdx, dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1), dir, (curPartNumQ >> 2));
+            memset(m_interDir + absPartIdx + (curPartNumQ << 1) + (curPartNumQ >> 1), dir, (curPartNumQ >> 2));
+        }
+        break;
+    default:
+        X265_CHECK(0, "unexpected part type\n");
+        break;
+    }
+}
+
+template<typename T>
+void CUData::setAllPU(T* p, const T& val, int absPartIdx, int puIdx)
+{
+    int i;
+
+    p += absPartIdx;
+    int numElements = m_numPartitions;
+
+    switch (m_partSize[absPartIdx])
+    {
+    case SIZE_2Nx2N:
+        for (i = 0; i < numElements; i++)
+            p[i] = val;
+        break;
+
+    case SIZE_2NxN:
+        numElements >>= 1;
+        for (i = 0; i < numElements; i++)
+            p[i] = val;
+        break;
+
+    case SIZE_Nx2N:
+        numElements >>= 2;
+        for (i = 0; i < numElements; i++)
+        {
+            p[i] = val;
+            p[i + 2 * numElements] = val;
+        }
+        break;
+
+    case SIZE_2NxnU:
+    {
+        int curPartNumQ = numElements >> 2;
+        if (!puIdx)
+        {
+            T *pT  = p;
+            T *pT2 = p + curPartNumQ;
+            for (i = 0; i < (curPartNumQ >> 1); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+        }
+        else
+        {
+            T *pT  = p;
+            for (i = 0; i < (curPartNumQ >> 1); i++)
+                pT[i] = val;
+
+            pT = p + curPartNumQ;
+            for (i = 0; i < ((curPartNumQ >> 1) + (curPartNumQ << 1)); i++)
+                pT[i] = val;
+        }
+        break;
+    }
+
+    case SIZE_2NxnD:
+    {
+        int curPartNumQ = numElements >> 2;
+        if (!puIdx)
+        {
+            T *pT  = p;
+            for (i = 0; i < ((curPartNumQ >> 1) + (curPartNumQ << 1)); i++)
+                pT[i] = val;
+
+            pT = p + (numElements - curPartNumQ);
+            for (i = 0; i < (curPartNumQ >> 1); i++)
+                pT[i] = val;
+        }
+        else
+        {
+            T *pT  = p;
+            T *pT2 = p + curPartNumQ;
+            for (i = 0; i < (curPartNumQ >> 1); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+        }
+        break;
+    }
+
+    case SIZE_nLx2N:
+    {
+        int curPartNumQ = numElements >> 2;
+        if (!puIdx)
+        {
+            T *pT  = p;
+            T *pT2 = p + (curPartNumQ << 1);
+            T *pT3 = p + (curPartNumQ >> 1);
+            T *pT4 = p + (curPartNumQ << 1) + (curPartNumQ >> 1);
+
+            for (i = 0; i < (curPartNumQ >> 2); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+                pT3[i] = val;
+                pT4[i] = val;
+            }
+        }
+        else
+        {
+            T *pT  = p;
+            T *pT2 = p + (curPartNumQ << 1);
+            for (i = 0; i < (curPartNumQ >> 2); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+
+            pT  = p + (curPartNumQ >> 1);
+            pT2 = p + (curPartNumQ << 1) + (curPartNumQ >> 1);
+            for (i = 0; i < ((curPartNumQ >> 2) + curPartNumQ); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+        }
+        break;
+    }
+
+    case SIZE_nRx2N:
+    {
+        int curPartNumQ = numElements >> 2;
+        if (!puIdx)
+        {
+            T *pT  = p;
+            T *pT2 = p + (curPartNumQ << 1);
+            for (i = 0; i < ((curPartNumQ >> 2) + curPartNumQ); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+
+            pT  = p + curPartNumQ + (curPartNumQ >> 1);
+            pT2 = p + numElements - curPartNumQ + (curPartNumQ >> 1);
+            for (i = 0; i < (curPartNumQ >> 2); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+            }
+        }
+        else
+        {
+            T *pT  = p;
+            T *pT2 = p + (curPartNumQ >> 1);
+            T *pT3 = p + (curPartNumQ << 1);
+            T *pT4 = p + (curPartNumQ << 1) + (curPartNumQ >> 1);
+            for (i = 0; i < (curPartNumQ >> 2); i++)
+            {
+                pT[i] = val;
+                pT2[i] = val;
+                pT3[i] = val;
+                pT4[i] = val;
+            }
+        }
+        break;
+    }
+
+    case SIZE_NxN:
+    default:
+        X265_CHECK(0, "unknown partition type\n");
+        break;
+    }
+}
+
+void CUData::setPUMv(int list, const MV& mv, int absPartIdx, int puIdx)
+{
+    setAllPU(m_mv[list], mv, absPartIdx, puIdx);
+}
+
+void CUData::setPURefIdx(int list, int8_t refIdx, int absPartIdx, int puIdx)
+{
+    setAllPU(m_refIdx[list], refIdx, absPartIdx, puIdx);
+}
+
+void CUData::getPartIndexAndSize(uint32_t partIdx, uint32_t& outPartAddr, int& outWidth, int& outHeight) const
+{
+    int cuSize = 1 << m_log2CUSize[0];
+    int partType = m_partSize[0];
+
+    int tmp = partTable[partType][partIdx][0];
+    outWidth = ((tmp >> 4) * cuSize) >> 2;
+    outHeight = ((tmp & 0xF) * cuSize) >> 2;
+    outPartAddr = (partAddrTable[partType][partIdx] * m_numPartitions) >> 4;
+}
+
+void CUData::getMvField(const CUData* cu, uint32_t absPartIdx, int picList, MVField& outMvField) const
+{
+    if (cu)
+    {
+        outMvField.mv = cu->m_mv[picList][absPartIdx];
+        outMvField.refIdx = cu->m_refIdx[picList][absPartIdx];
+    }
+    else
+    {
+        // OUT OF BOUNDARY
+        outMvField.mv = 0;
+        outMvField.refIdx = REF_NOT_VALID;
+    }
+}
+
+void CUData::deriveLeftRightTopIdx(uint32_t partIdx, uint32_t& partIdxLT, uint32_t& partIdxRT) const
+{
+    partIdxLT = m_absIdxInCTU;
+    partIdxRT = g_rasterToZscan[g_zscanToRaster[partIdxLT] + (1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1];
+
+    switch (m_partSize[0])
+    {
+    case SIZE_2Nx2N: break;
+    case SIZE_2NxN:
+        partIdxLT += (partIdx == 0) ? 0 : m_numPartitions >> 1;
+        partIdxRT += (partIdx == 0) ? 0 : m_numPartitions >> 1;
+        break;
+    case SIZE_Nx2N:
+        partIdxLT += (partIdx == 0) ? 0 : m_numPartitions >> 2;
+        partIdxRT -= (partIdx == 1) ? 0 : m_numPartitions >> 2;
+        break;
+    case SIZE_NxN:
+        partIdxLT += (m_numPartitions >> 2) * partIdx;
+        partIdxRT +=  (m_numPartitions >> 2) * (partIdx - 1);
+        break;
+    case SIZE_2NxnU:
+        partIdxLT += (partIdx == 0) ? 0 : m_numPartitions >> 3;
+        partIdxRT += (partIdx == 0) ? 0 : m_numPartitions >> 3;
+        break;
+    case SIZE_2NxnD:
+        partIdxLT += (partIdx == 0) ? 0 : (m_numPartitions >> 1) + (m_numPartitions >> 3);
+        partIdxRT += (partIdx == 0) ? 0 : (m_numPartitions >> 1) + (m_numPartitions >> 3);
+        break;
+    case SIZE_nLx2N:
+        partIdxLT += (partIdx == 0) ? 0 : m_numPartitions >> 4;
+        partIdxRT -= (partIdx == 1) ? 0 : (m_numPartitions >> 2) + (m_numPartitions >> 4);
+        break;
+    case SIZE_nRx2N:
+        partIdxLT += (partIdx == 0) ? 0 : (m_numPartitions >> 2) + (m_numPartitions >> 4);
+        partIdxRT -= (partIdx == 1) ? 0 : m_numPartitions >> 4;
+        break;
+    default:
+        X265_CHECK(0, "unexpected part index\n");
+        break;
+    }
+}
+
+uint32_t CUData::deriveLeftBottomIdx(uint32_t puIdx) const
+{
+    uint32_t outPartIdxLB;
+    outPartIdxLB = g_rasterToZscan[g_zscanToRaster[m_absIdxInCTU] + ((1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE - 1)) - 1) * s_numPartInCUSize];
+
+    switch (m_partSize[0])
+    {
+    case SIZE_2Nx2N:
+        outPartIdxLB += m_numPartitions >> 1;
+        break;
+    case SIZE_2NxN:
+        outPartIdxLB += puIdx ? m_numPartitions >> 1 : 0;
+        break;
+    case SIZE_Nx2N:
+        outPartIdxLB += puIdx ? (m_numPartitions >> 2) * 3 : m_numPartitions >> 1;
+        break;
+    case SIZE_NxN:
+        outPartIdxLB += (m_numPartitions >> 2) * puIdx;
+        break;
+    case SIZE_2NxnU:
+        outPartIdxLB += puIdx ? m_numPartitions >> 1 : -((int)m_numPartitions >> 3);
+        break;
+    case SIZE_2NxnD:
+        outPartIdxLB += puIdx ? m_numPartitions >> 1 : (m_numPartitions >> 2) + (m_numPartitions >> 3);
+        break;
+    case SIZE_nLx2N:
+        outPartIdxLB += puIdx ? (m_numPartitions >> 1) + (m_numPartitions >> 4) : m_numPartitions >> 1;
+        break;
+    case SIZE_nRx2N:
+        outPartIdxLB += puIdx ? (m_numPartitions >> 1) + (m_numPartitions >> 2) + (m_numPartitions >> 4) : m_numPartitions >> 1;
+        break;
+    default:
+        X265_CHECK(0, "unexpected part index\n");
+        break;
+    }
+    return outPartIdxLB;
+}
+
+/* Derives the partition index of neighboring bottom right block */
+uint32_t CUData::deriveRightBottomIdx(uint32_t puIdx) const
+{
+    uint32_t outPartIdxRB;
+    outPartIdxRB = g_rasterToZscan[g_zscanToRaster[m_absIdxInCTU] +
+                                   ((1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE - 1)) - 1) * s_numPartInCUSize +
+                                   (1 << (m_log2CUSize[0] - LOG2_UNIT_SIZE)) - 1];
+
+    switch (m_partSize[0])
+    {
+    case SIZE_2Nx2N:
+        outPartIdxRB += m_numPartitions >> 1;
+        break;
+    case SIZE_2NxN:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : 0;
+        break;
+    case SIZE_Nx2N:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : m_numPartitions >> 2;
+        break;
+    case SIZE_NxN:
+        outPartIdxRB += (m_numPartitions >> 2) * (puIdx - 1);
+        break;
+    case SIZE_2NxnU:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : -((int)m_numPartitions >> 3);
+        break;
+    case SIZE_2NxnD:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : (m_numPartitions >> 2) + (m_numPartitions >> 3);
+        break;
+    case SIZE_nLx2N:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : (m_numPartitions >> 3) + (m_numPartitions >> 4);
+        break;
+    case SIZE_nRx2N:
+        outPartIdxRB += puIdx ? m_numPartitions >> 1 : (m_numPartitions >> 2) + (m_numPartitions >> 3) + (m_numPartitions >> 4);
+        break;
+    default:
+        X265_CHECK(0, "unexpected part index\n");
+        break;
+    }
+    return outPartIdxRB;
+}
+
+bool CUData::hasEqualMotion(uint32_t absPartIdx, const CUData& candCU, uint32_t candAbsPartIdx) const
+{
+    if (m_interDir[absPartIdx] != candCU.m_interDir[candAbsPartIdx])
+        return false;
+
+    for (uint32_t refListIdx = 0; refListIdx < 2; refListIdx++)
+    {
+        if (m_interDir[absPartIdx] & (1 << refListIdx))
+        {
+            if (m_mv[refListIdx][absPartIdx] != candCU.m_mv[refListIdx][candAbsPartIdx] ||
+                m_refIdx[refListIdx][absPartIdx] != candCU.m_refIdx[refListIdx][candAbsPartIdx])
+                return false;
+        }
+    }
+
+    return true;
+}
+
+/* Construct list of merging candidates, returns count */
+uint32_t CUData::getInterMergeCandidates(uint32_t absPartIdx, uint32_t puIdx, MVField(*candMvField)[2], uint8_t* candDir) const
+{
+    uint32_t absPartAddr = m_absIdxInCTU + absPartIdx;
+    const bool isInterB = m_slice->isInterB();
+
+    const uint32_t maxNumMergeCand = m_slice->m_maxNumMergeCand;
+
+    for (uint32_t i = 0; i < maxNumMergeCand; ++i)
+    {
+        candMvField[i][0].mv = 0;
+        candMvField[i][1].mv = 0;
+        candMvField[i][0].refIdx = REF_NOT_VALID;
+        candMvField[i][1].refIdx = REF_NOT_VALID;
+    }
+
+    /* calculate the location of upper-left corner pixel and size of the current PU */
+    int xP, yP, nPSW, nPSH;
+
+    int cuSize = 1 << m_log2CUSize[0];
+    int partMode = m_partSize[0];
+
+    int tmp = partTable[partMode][puIdx][0];
+    nPSW = ((tmp >> 4) * cuSize) >> 2;
+    nPSH = ((tmp & 0xF) * cuSize) >> 2;
+
+    tmp = partTable[partMode][puIdx][1];
+    xP = ((tmp >> 4) * cuSize) >> 2;
+    yP = ((tmp & 0xF) * cuSize) >> 2;
+
+    uint32_t count = 0;
+
+    uint32_t partIdxLT, partIdxRT, partIdxLB = deriveLeftBottomIdx(puIdx);
+    PartSize curPS = (PartSize)m_partSize[absPartIdx];
+    
+    // left
+    uint32_t leftPartIdx = 0;
+    const CUData* cuLeft = getPULeft(leftPartIdx, partIdxLB);
+    bool isAvailableA1 = cuLeft &&
+        cuLeft->isDiffMER(xP - 1, yP + nPSH - 1, xP, yP) &&
+        !(puIdx == 1 && (curPS == SIZE_Nx2N || curPS == SIZE_nLx2N || curPS == SIZE_nRx2N)) &&
+        cuLeft->isInter(leftPartIdx);
+    if (isAvailableA1)
+    {
+        // get Inter Dir
+        candDir[count] = cuLeft->m_interDir[leftPartIdx];
+        // get Mv from Left
+        cuLeft->getMvField(cuLeft, leftPartIdx, 0, candMvField[count][0]);
+        if (isInterB)
+            cuLeft->getMvField(cuLeft, leftPartIdx, 1, candMvField[count][1]);
+
+        if (++count == maxNumMergeCand)
+            return maxNumMergeCand;
+    }
+
+    deriveLeftRightTopIdx(puIdx, partIdxLT, partIdxRT);
+
+    // above
+    uint32_t abovePartIdx = 0;
+    const CUData* cuAbove = getPUAbove(abovePartIdx, partIdxRT);
+    bool isAvailableB1 = cuAbove &&
+        cuAbove->isDiffMER(xP + nPSW - 1, yP - 1, xP, yP) &&
+        !(puIdx == 1 && (curPS == SIZE_2NxN || curPS == SIZE_2NxnU || curPS == SIZE_2NxnD)) &&
+        cuAbove->isInter(abovePartIdx);
+    if (isAvailableB1 && (!isAvailableA1 || !cuLeft->hasEqualMotion(leftPartIdx, *cuAbove, abovePartIdx)))
+    {
+        // get Inter Dir
+        candDir[count] = cuAbove->m_interDir[abovePartIdx];
+        // get Mv from Left
+        cuAbove->getMvField(cuAbove, abovePartIdx, 0, candMvField[count][0]);
+        if (isInterB)
+            cuAbove->getMvField(cuAbove, abovePartIdx, 1, candMvField[count][1]);
+
+        if (++count == maxNumMergeCand)
+            return maxNumMergeCand;
+    }
+
+    // above right
+    uint32_t aboveRightPartIdx = 0;
+    const CUData* cuAboveRight = getPUAboveRight(aboveRightPartIdx, partIdxRT);
+    bool isAvailableB0 = cuAboveRight &&
+        cuAboveRight->isDiffMER(xP + nPSW, yP - 1, xP, yP) &&
+        cuAboveRight->isInter(aboveRightPartIdx);
+    if (isAvailableB0 && (!isAvailableB1 || !cuAbove->hasEqualMotion(abovePartIdx, *cuAboveRight, aboveRightPartIdx)))
+    {
+        // get Inter Dir
+        candDir[count] = cuAboveRight->m_interDir[aboveRightPartIdx];
+        // get Mv from Left
+        cuAboveRight->getMvField(cuAboveRight, aboveRightPartIdx, 0, candMvField[count][0]);
+        if (isInterB)
+            cuAboveRight->getMvField(cuAboveRight, aboveRightPartIdx, 1, candMvField[count][1]);
+
+        if (++count == maxNumMergeCand)
+            return maxNumMergeCand;
+    }
+
+    // left bottom
+    uint32_t leftBottomPartIdx = 0;
+    const CUData* cuLeftBottom = this->getPUBelowLeft(leftBottomPartIdx, partIdxLB);
+    bool isAvailableA0 = cuLeftBottom &&
+        cuLeftBottom->isDiffMER(xP - 1, yP + nPSH, xP, yP) &&
+        cuLeftBottom->isInter(leftBottomPartIdx);
+    if (isAvailableA0 && (!isAvailableA1 || !cuLeft->hasEqualMotion(leftPartIdx, *cuLeftBottom, leftBottomPartIdx)))
+    {
+        // get Inter Dir
+        candDir[count] = cuLeftBottom->m_interDir[leftBottomPartIdx];
+        // get Mv from Left
+        cuLeftBottom->getMvField(cuLeftBottom, leftBottomPartIdx, 0, candMvField[count][0]);
+        if (isInterB)
+            cuLeftBottom->getMvField(cuLeftBottom, leftBottomPartIdx, 1, candMvField[count][1]);
+
+        if (++count == maxNumMergeCand)
+            return maxNumMergeCand;
+    }
+
+    // above left
+    if (count < 4)
+    {
+        uint32_t aboveLeftPartIdx = 0;
+        const CUData* cuAboveLeft = getPUAboveLeft(aboveLeftPartIdx, absPartAddr);
+        bool isAvailableB2 = cuAboveLeft &&
+            cuAboveLeft->isDiffMER(xP - 1, yP - 1, xP, yP) &&
+            cuAboveLeft->isInter(aboveLeftPartIdx);
+        if (isAvailableB2 && (!isAvailableA1 || !cuLeft->hasEqualMotion(leftPartIdx, *cuAboveLeft, aboveLeftPartIdx))
+            && (!isAvailableB1 || !cuAbove->hasEqualMotion(abovePartIdx, *cuAboveLeft, aboveLeftPartIdx)))
+        {
+            // get Inter Dir
+            candDir[count] = cuAboveLeft->m_interDir[aboveLeftPartIdx];
+            // get Mv from Left
+            cuAboveLeft->getMvField(cuAboveLeft, aboveLeftPartIdx, 0, candMvField[count][0]);
+            if (isInterB)
+                cuAboveLeft->getMvField(cuAboveLeft, aboveLeftPartIdx, 1, candMvField[count][1]);
+
+            if (++count == maxNumMergeCand)
+                return maxNumMergeCand;
+        }
+    }
+    if (m_slice->m_sps->bTemporalMVPEnabled)
+    {
+        uint32_t partIdxRB = deriveRightBottomIdx(puIdx);
+        MV colmv;
+        int ctuIdx = -1;
+
+        // image boundary check
+        if (m_encData->getPicCTU(m_cuAddr)->m_cuPelX + g_zscanToPelX[partIdxRB] + UNIT_SIZE < m_slice->m_sps->picWidthInLumaSamples &&
+            m_encData->getPicCTU(m_cuAddr)->m_cuPelY + g_zscanToPelY[partIdxRB] + UNIT_SIZE < m_slice->m_sps->picHeightInLumaSamples)
+        {
+            uint32_t absPartIdxRB = g_zscanToRaster[partIdxRB];
+            uint32_t numUnits = s_numPartInCUSize;
+            bool bNotLastCol = lessThanCol(absPartIdxRB, numUnits - 1, numUnits); // is not at the last column of CTU
+            bool bNotLastRow = lessThanRow(absPartIdxRB, numUnits - 1, numUnits); // is not at the last row    of CTU
+
+            if (bNotLastCol && bNotLastRow)
+            {
+                absPartAddr = g_rasterToZscan[absPartIdxRB + numUnits + 1];
+                ctuIdx = m_cuAddr;
+            }
+            else if (bNotLastCol)
+                absPartAddr = g_rasterToZscan[(absPartIdxRB + numUnits + 1) & (numUnits - 1)];
+            else if (bNotLastRow)
+            {
+                absPartAddr = g_rasterToZscan[absPartIdxRB + 1];
+                ctuIdx = m_cuAddr + 1;
+            }
+            else // is the right bottom corner of CTU
+                absPartAddr = 0;
+        }
+
+        int maxList = isInterB ? 2 : 1;
+        int dir = 0, refIdx = 0;
+        for (int list = 0; list < maxList; list++)
+        {
+            bool bExistMV = ctuIdx >= 0 && getColMVP(colmv, refIdx, list, ctuIdx, absPartAddr);
+            if (!bExistMV)
+            {
+                uint32_t partIdxCenter = deriveCenterIdx(puIdx);
+                bExistMV = getColMVP(colmv, refIdx, list, m_cuAddr, partIdxCenter);
+            }
+            if (bExistMV)
+            {
+                dir |= (1 << list);
+                candMvField[count][list].mv = colmv;
+                candMvField[count][list].refIdx = refIdx;
+            }
+        }
+
+        if (dir != 0)
+        {
+            candDir[count] = (uint8_t)dir;
+
+            if (++count == maxNumMergeCand)
+                return maxNumMergeCand;
+        }
+    }
+
+    if (isInterB)
+    {
+        const uint32_t cutoff = count * (count - 1);
+        uint32_t priorityList0 = 0xEDC984; // { 0, 1, 0, 2, 1, 2, 0, 3, 1, 3, 2, 3 }
+        uint32_t priorityList1 = 0xB73621; // { 1, 0, 2, 0, 2, 1, 3, 0, 3, 1, 3, 2 }
+
+        for (uint32_t idx = 0; idx < cutoff; idx++, priorityList0 >>= 2, priorityList1 >>= 2)
+        {
+            int i = priorityList0 & 3;
+            int j = priorityList1 & 3;
+
+            if ((candDir[i] & 0x1) && (candDir[j] & 0x2))
+            {
+                // get Mv from cand[i] and cand[j]
+                int refIdxL0 = candMvField[i][0].refIdx;
+                int refIdxL1 = candMvField[j][1].refIdx;
+                int refPOCL0 = m_slice->m_refPOCList[0][refIdxL0];
+                int refPOCL1 = m_slice->m_refPOCList[1][refIdxL1];
+                if (!(refPOCL0 == refPOCL1 && candMvField[i][0].mv == candMvField[j][1].mv))
+                {
+                    candMvField[count][0].mv = candMvField[i][0].mv;
+                    candMvField[count][0].refIdx = refIdxL0;
+                    candMvField[count][1].mv = candMvField[j][1].mv;
+                    candMvField[count][1].refIdx = refIdxL1;
+                    candDir[count] = 3;
+
+                    if (++count == maxNumMergeCand)
+                        return maxNumMergeCand;
+                }
+            }
+        }
+    }
+    int numRefIdx = (isInterB) ? X265_MIN(m_slice->m_numRefIdx[0], m_slice->m_numRefIdx[1]) : m_slice->m_numRefIdx[0];
+    int r = 0;
+    int refcnt = 0;
+    while (count < maxNumMergeCand)
+    {
+        candDir[count] = 1;
+        candMvField[count][0].mv.word = 0;
+        candMvField[count][0].refIdx = r;
+
+        if (isInterB)
+        {
+            candDir[count] = 3;
+            candMvField[count][1].mv.word = 0;
+            candMvField[count][1].refIdx = r;
+        }
+
+        count++;
+
+        if (refcnt == numRefIdx - 1)
+            r = 0;
+        else
+        {
+            ++r;
+            ++refcnt;
+        }
+    }
+
+    return count;
+}
+
+// Create the PMV list. Called for each reference index.
+int CUData::getPMV(InterNeighbourMV *neighbours, uint32_t picList, uint32_t refIdx, MV* amvpCand, MV* pmv) const
+{
+    MV directMV[MD_ABOVE_LEFT + 1];
+    MV indirectMV[MD_ABOVE_LEFT + 1];
+    bool validDirect[MD_ABOVE_LEFT + 1];
+    bool validIndirect[MD_ABOVE_LEFT + 1];
+
+    // Left candidate.
+    validDirect[MD_BELOW_LEFT]  = getDirectPMV(directMV[MD_BELOW_LEFT], neighbours + MD_BELOW_LEFT, picList, refIdx);
+    validDirect[MD_LEFT]        = getDirectPMV(directMV[MD_LEFT], neighbours + MD_LEFT, picList, refIdx);
+    // Top candidate.
+    validDirect[MD_ABOVE_RIGHT] = getDirectPMV(directMV[MD_ABOVE_RIGHT], neighbours + MD_ABOVE_RIGHT, picList, refIdx);
+    validDirect[MD_ABOVE]       = getDirectPMV(directMV[MD_ABOVE], neighbours + MD_ABOVE, picList, refIdx);
+    validDirect[MD_ABOVE_LEFT]  = getDirectPMV(directMV[MD_ABOVE_LEFT], neighbours + MD_ABOVE_LEFT, picList, refIdx);
+
+    // Left candidate.
+    validIndirect[MD_BELOW_LEFT]  = getIndirectPMV(indirectMV[MD_BELOW_LEFT], neighbours + MD_BELOW_LEFT, picList, refIdx);
+    validIndirect[MD_LEFT]        = getIndirectPMV(indirectMV[MD_LEFT], neighbours + MD_LEFT, picList, refIdx);
+    // Top candidate.
+    validIndirect[MD_ABOVE_RIGHT] = getIndirectPMV(indirectMV[MD_ABOVE_RIGHT], neighbours + MD_ABOVE_RIGHT, picList, refIdx);
+    validIndirect[MD_ABOVE]       = getIndirectPMV(indirectMV[MD_ABOVE], neighbours + MD_ABOVE, picList, refIdx);
+    validIndirect[MD_ABOVE_LEFT]  = getIndirectPMV(indirectMV[MD_ABOVE_LEFT], neighbours + MD_ABOVE_LEFT, picList, refIdx);
+
+    int num = 0;
+    // Left predictor search
+    if (validDirect[MD_BELOW_LEFT])
+        amvpCand[num++] = directMV[MD_BELOW_LEFT];
+    else if (validDirect[MD_LEFT])
+        amvpCand[num++] = directMV[MD_LEFT];
+    else if (validIndirect[MD_BELOW_LEFT])
+        amvpCand[num++] = indirectMV[MD_BELOW_LEFT];
+    else if (validIndirect[MD_LEFT])
+        amvpCand[num++] = indirectMV[MD_LEFT];
+
+    bool bAddedSmvp = num > 0;
+
+    // Above predictor search
+    if (validDirect[MD_ABOVE_RIGHT])
+        amvpCand[num++] = directMV[MD_ABOVE_RIGHT];
+    else if (validDirect[MD_ABOVE])
+        amvpCand[num++] = directMV[MD_ABOVE];
+    else if (validDirect[MD_ABOVE_LEFT])
+        amvpCand[num++] = directMV[MD_ABOVE_LEFT];
+
+    if (!bAddedSmvp)
+    {
+        if (validIndirect[MD_ABOVE_RIGHT])
+            amvpCand[num++] = indirectMV[MD_ABOVE_RIGHT];
+        else if (validIndirect[MD_ABOVE])
+            amvpCand[num++] = indirectMV[MD_ABOVE];
+        else if (validIndirect[MD_ABOVE_LEFT])
+            amvpCand[num++] = indirectMV[MD_ABOVE_LEFT];
+    }
+
+    int numMvc = 0;
+    for (int dir = MD_LEFT; dir <= MD_ABOVE_LEFT; dir++)
+    {
+        if (validDirect[dir] && directMV[dir].notZero())
+            pmv[numMvc++] = directMV[dir];
+
+        if (validIndirect[dir] && indirectMV[dir].notZero())
+            pmv[numMvc++] = indirectMV[dir];
+    }
+
+    if (num == 2)
+        num -= amvpCand[0] == amvpCand[1];
+
+    // Get the collocated candidate. At this step, either the first candidate
+    // was found or its value is 0.
+    if (m_slice->m_sps->bTemporalMVPEnabled && num < 2)
+    {
+        int tempRefIdx = neighbours[MD_COLLOCATED].refIdx[picList];
+        if (tempRefIdx != -1)
+        {
+            uint32_t cuAddr = neighbours[MD_COLLOCATED].cuAddr[picList];
+            const Frame* colPic = m_slice->m_refFrameList[m_slice->isInterB() && !m_slice->m_colFromL0Flag][m_slice->m_colRefIdx];
+            const CUData* colCU = colPic->m_encData->getPicCTU(cuAddr);
+
+            // Scale the vector
+            int colRefPOC = colCU->m_slice->m_refPOCList[tempRefIdx >> 4][tempRefIdx & 0xf];
+            int colPOC = colCU->m_slice->m_poc;
+
+            int curRefPOC = m_slice->m_refPOCList[picList][refIdx];
+            int curPOC = m_slice->m_poc;
+
+            pmv[numMvc++] = amvpCand[num++] = scaleMvByPOCDist(neighbours[MD_COLLOCATED].mv[picList], curPOC, curRefPOC, colPOC, colRefPOC);
+        }
+    }
+
+    while (num < AMVP_NUM_CANDS)
+        amvpCand[num++] = 0;
+
+    return numMvc;
+}
+
+/* Constructs a list of candidates for AMVP, and a larger list of motion candidates */
+void CUData::getNeighbourMV(uint32_t puIdx, uint32_t absPartIdx, InterNeighbourMV* neighbours) const
+{
+    // Set the temporal neighbour to unavailable by default.
+    neighbours[MD_COLLOCATED].unifiedRef = -1;
+
+    uint32_t partIdxLT, partIdxRT, partIdxLB = deriveLeftBottomIdx(puIdx);
+    deriveLeftRightTopIdx(puIdx, partIdxLT, partIdxRT);
+
+    // Load the spatial MVs.
+    getInterNeighbourMV(neighbours + MD_BELOW_LEFT, partIdxLB, MD_BELOW_LEFT);
+    getInterNeighbourMV(neighbours + MD_LEFT,       partIdxLB, MD_LEFT);
+    getInterNeighbourMV(neighbours + MD_ABOVE_RIGHT,partIdxRT, MD_ABOVE_RIGHT);
+    getInterNeighbourMV(neighbours + MD_ABOVE,      partIdxRT, MD_ABOVE);
+    getInterNeighbourMV(neighbours + MD_ABOVE_LEFT, partIdxLT, MD_ABOVE_LEFT);
+
+    if (m_slice->m_sps->bTemporalMVPEnabled)
+    {
+        uint32_t absPartAddr = m_absIdxInCTU + absPartIdx;
+        uint32_t partIdxRB = deriveRightBottomIdx(puIdx);
+
+        // co-located RightBottom temporal predictor (H)
+        int ctuIdx = -1;
+
+        // image boundary check
+        if (m_encData->getPicCTU(m_cuAddr)->m_cuPelX + g_zscanToPelX[partIdxRB] + UNIT_SIZE < m_slice->m_sps->picWidthInLumaSamples &&
+            m_encData->getPicCTU(m_cuAddr)->m_cuPelY + g_zscanToPelY[partIdxRB] + UNIT_SIZE < m_slice->m_sps->picHeightInLumaSamples)
+        {
+            uint32_t absPartIdxRB = g_zscanToRaster[partIdxRB];
+            uint32_t numUnits = s_numPartInCUSize;
+            bool bNotLastCol = lessThanCol(absPartIdxRB, numUnits - 1, numUnits); // is not at the last column of CTU
+            bool bNotLastRow = lessThanRow(absPartIdxRB, numUnits - 1, numUnits); // is not at the last row    of CTU
+
+            if (bNotLastCol && bNotLastRow)
+            {
+                absPartAddr = g_rasterToZscan[absPartIdxRB + numUnits + 1];
+                ctuIdx = m_cuAddr;
+            }
+            else if (bNotLastCol)
+                absPartAddr = g_rasterToZscan[(absPartIdxRB + numUnits + 1) & (numUnits - 1)];
+            else if (bNotLastRow)
+            {
+                absPartAddr = g_rasterToZscan[absPartIdxRB + 1];
+                ctuIdx = m_cuAddr + 1;
+            }
+            else // is the right bottom corner of CTU
+                absPartAddr = 0;
+        }
+
+        if (!(ctuIdx >= 0 && getCollocatedMV(ctuIdx, absPartAddr, neighbours + MD_COLLOCATED)))
+        {
+            uint32_t partIdxCenter =  deriveCenterIdx(puIdx);
+            uint32_t curCTUIdx = m_cuAddr;
+            getCollocatedMV(curCTUIdx, partIdxCenter, neighbours + MD_COLLOCATED);
+        }
+    }
+}
+
+void CUData::getInterNeighbourMV(InterNeighbourMV *neighbour, uint32_t partUnitIdx, MVP_DIR dir) const
+{
+    const CUData* tmpCU = NULL;
+    uint32_t idx = 0;
+
+    switch (dir)
+    {
+    case MD_LEFT:
+        tmpCU = getPULeft(idx, partUnitIdx);
+        break;
+    case MD_ABOVE:
+        tmpCU = getPUAbove(idx, partUnitIdx);
+        break;
+    case MD_ABOVE_RIGHT:
+        tmpCU = getPUAboveRight(idx, partUnitIdx);
+        break;
+    case MD_BELOW_LEFT:
+        tmpCU = getPUBelowLeft(idx, partUnitIdx);
+        break;
+    case MD_ABOVE_LEFT:
+        tmpCU = getPUAboveLeft(idx, partUnitIdx);
+        break;
+    default:
+        break;
+    }
+
+    if (!tmpCU)
+    {
+        // Mark the PMV as unavailable.
+        for (int i = 0; i < 2; i++)
+            neighbour->refIdx[i] = -1;
+        return;
+    }
+
+    for (int i = 0; i < 2; i++)
+    {
+        // Get the MV.
+        neighbour->mv[i] = tmpCU->m_mv[i][idx];
+
+        // Get the reference idx.
+        neighbour->refIdx[i] = tmpCU->m_refIdx[i][idx];
+    }
+}
+
+/* Clip motion vector to within slightly padded boundary of picture (the
+ * MV may reference a block that is completely within the padded area).
+ * Note this function is unaware of how much of this picture is actually
+ * available for use (re: frame parallelism) */
+void CUData::clipMv(MV& outMV) const
+{
+    const uint32_t mvshift = 2;
+    uint32_t offset = 8;
+
+    int16_t xmax = (int16_t)((m_slice->m_sps->picWidthInLumaSamples + offset - m_cuPelX - 1) << mvshift);
+    int16_t xmin = -(int16_t)((g_maxCUSize + offset + m_cuPelX - 1) << mvshift);
+
+    int16_t ymax = (int16_t)((m_slice->m_sps->picHeightInLumaSamples + offset - m_cuPelY - 1) << mvshift);
+    int16_t ymin = -(int16_t)((g_maxCUSize + offset + m_cuPelY - 1) << mvshift);
+
+    outMV.x = X265_MIN(xmax, X265_MAX(xmin, outMV.x));
+    outMV.y = X265_MIN(ymax, X265_MAX(ymin, outMV.y));
+}
+
+// Load direct spatial MV if available.
+bool CUData::getDirectPMV(MV& pmv, InterNeighbourMV *neighbours, uint32_t picList, uint32_t refIdx) const
+{
+    int curRefPOC = m_slice->m_refPOCList[picList][refIdx];
+    for (int i = 0; i < 2; i++, picList = !picList)
+    {
+        int partRefIdx = neighbours->refIdx[picList];
+        if (partRefIdx >= 0 && curRefPOC == m_slice->m_refPOCList[picList][partRefIdx])
+        {
+            pmv = neighbours->mv[picList];
+            return true;
+        }
+    }
+    return false;
+}
+
+// Load indirect spatial MV if available. An indirect MV has to be scaled.
+bool CUData::getIndirectPMV(MV& outMV, InterNeighbourMV *neighbours, uint32_t picList, uint32_t refIdx) const
+{
+    int curPOC = m_slice->m_poc;
+    int neibPOC = curPOC;
+    int curRefPOC = m_slice->m_refPOCList[picList][refIdx];
+
+    for (int i = 0; i < 2; i++, picList = !picList)
+    {
+        int partRefIdx = neighbours->refIdx[picList];
+        if (partRefIdx >= 0)
+        {
+            int neibRefPOC = m_slice->m_refPOCList[picList][partRefIdx];
+            MV mvp = neighbours->mv[picList];
+
+            outMV = scaleMvByPOCDist(mvp, curPOC, curRefPOC, neibPOC, neibRefPOC);
+            return true;
+        }
+    }
+    return false;
+}
+
+bool CUData::getColMVP(MV& outMV, int& outRefIdx, int picList, int cuAddr, int partUnitIdx) const
+{
+    const Frame* colPic = m_slice->m_refFrameList[m_slice->isInterB() && !m_slice->m_colFromL0Flag][m_slice->m_colRefIdx];
+    const CUData* colCU = colPic->m_encData->getPicCTU(cuAddr);
+
+    uint32_t absPartAddr = partUnitIdx & TMVP_UNIT_MASK;
+    if (colCU->m_predMode[partUnitIdx] == MODE_NONE || colCU->isIntra(absPartAddr))
+        return false;
+
+    int colRefPicList = m_slice->m_bCheckLDC ? picList : m_slice->m_colFromL0Flag;
+
+    int colRefIdx = colCU->m_refIdx[colRefPicList][absPartAddr];
+
+    if (colRefIdx < 0)
+    {
+        colRefPicList = !colRefPicList;
+        colRefIdx = colCU->m_refIdx[colRefPicList][absPartAddr];
+
+        if (colRefIdx < 0)
+            return false;
+    }
+
+    // Scale the vector
+    int colRefPOC = colCU->m_slice->m_refPOCList[colRefPicList][colRefIdx];
+    int colPOC = colCU->m_slice->m_poc;
+    MV colmv = colCU->m_mv[colRefPicList][absPartAddr];
+
+    int curRefPOC = m_slice->m_refPOCList[picList][outRefIdx];
+    int curPOC = m_slice->m_poc;
+
+    outMV = scaleMvByPOCDist(colmv, curPOC, curRefPOC, colPOC, colRefPOC);
+    return true;
+}
+
+// Cache the collocated MV.
+bool CUData::getCollocatedMV(int cuAddr, int partUnitIdx, InterNeighbourMV *neighbour) const
+{
+    const Frame* colPic = m_slice->m_refFrameList[m_slice->isInterB() && !m_slice->m_colFromL0Flag][m_slice->m_colRefIdx];
+    const CUData* colCU = colPic->m_encData->getPicCTU(cuAddr);
+
+    uint32_t absPartAddr = partUnitIdx & TMVP_UNIT_MASK;
+    if (colCU->m_predMode[partUnitIdx] == MODE_NONE || colCU->isIntra(absPartAddr))
+        return false;
+
+    for (int list = 0; list < 2; list++)
+    {
+        neighbour->cuAddr[list] = cuAddr;
+        int colRefPicList = m_slice->m_bCheckLDC ? list : m_slice->m_colFromL0Flag;
+        int colRefIdx = colCU->m_refIdx[colRefPicList][absPartAddr];
+
+        if (colRefIdx < 0)
+            colRefPicList = !colRefPicList;
+
+        neighbour->refIdx[list] = colCU->m_refIdx[colRefPicList][absPartAddr];
+        neighbour->refIdx[list] |= colRefPicList << 4;
+
+        neighbour->mv[list] = colCU->m_mv[colRefPicList][absPartAddr];
+    }
+
+    return neighbour->unifiedRef != -1;
+}
+
+MV CUData::scaleMvByPOCDist(const MV& inMV, int curPOC, int curRefPOC, int colPOC, int colRefPOC) const
+{
+    int diffPocD = colPOC - colRefPOC;
+    int diffPocB = curPOC - curRefPOC;
+
+    if (diffPocD == diffPocB)
+        return inMV;
+    else
+    {
+        int tdb   = x265_clip3(-128, 127, diffPocB);
+        int tdd   = x265_clip3(-128, 127, diffPocD);
+        int x     = (0x4000 + abs(tdd / 2)) / tdd;
+        int scale = x265_clip3(-4096, 4095, (tdb * x + 32) >> 6);
+        return scaleMv(inMV, scale);
+    }
+}
+
+uint32_t CUData::deriveCenterIdx(uint32_t puIdx) const
+{
+    uint32_t absPartIdx;
+    int puWidth, puHeight;
+
+    getPartIndexAndSize(puIdx, absPartIdx, puWidth, puHeight);
+
+    return g_rasterToZscan[g_zscanToRaster[m_absIdxInCTU + absPartIdx]
+                           + (puHeight >> (LOG2_UNIT_SIZE + 1)) * s_numPartInCUSize
+                           + (puWidth  >> (LOG2_UNIT_SIZE + 1))];
+}
+
+void CUData::getTUEntropyCodingParameters(TUEntropyCodingParameters &result, uint32_t absPartIdx, uint32_t log2TrSize, bool bIsLuma) const
+{
+    bool bIsIntra = isIntra(absPartIdx);
+
+    // set the group layout
+    result.log2TrSizeCG = log2TrSize - 2;
+
+    // set the scan orders
+    if (bIsIntra)
+    {
+        uint32_t dirMode;
+
+        if (bIsLuma)
+            dirMode = m_lumaIntraDir[absPartIdx];
+        else
+        {
+            dirMode = m_chromaIntraDir[absPartIdx];
+            if (dirMode == DM_CHROMA_IDX)
+            {
+                dirMode = m_lumaIntraDir[(m_chromaFormat == X265_CSP_I444) ? absPartIdx : absPartIdx & 0xFC];
+                dirMode = (m_chromaFormat == X265_CSP_I422) ? g_chroma422IntraAngleMappingTable[dirMode] : dirMode;
+            }
+        }
+
+        if (log2TrSize <= (MDCS_LOG2_MAX_SIZE - m_hChromaShift) || (bIsLuma && log2TrSize == MDCS_LOG2_MAX_SIZE))
+            result.scanType = dirMode >= 22 && dirMode <= 30 ? SCAN_HOR : dirMode >= 6 && dirMode <= 14 ? SCAN_VER : SCAN_DIAG;
+        else
+            result.scanType = SCAN_DIAG;
+    }
+    else
+        result.scanType = SCAN_DIAG;
+
+    result.scan     = g_scanOrder[result.scanType][log2TrSize - 2];
+    result.scanCG   = g_scanOrderCG[result.scanType][result.log2TrSizeCG];
+
+    if (log2TrSize == 2)
+        result.firstSignificanceMapContext = 0;
+    else if (log2TrSize == 3)
+        result.firstSignificanceMapContext = (result.scanType != SCAN_DIAG && bIsLuma) ? 15 : 9;
+    else
+        result.firstSignificanceMapContext = bIsLuma ? 21 : 12;
+}
+
+#define CU_SET_FLAG(bitfield, flag, value) (bitfield) = ((bitfield) & (~(flag))) | ((~((value) - 1)) & (flag))
+
+void CUData::calcCTUGeoms(uint32_t ctuWidth, uint32_t ctuHeight, uint32_t maxCUSize, uint32_t minCUSize, CUGeom cuDataArray[CUGeom::MAX_GEOMS])
+{
+    // Initialize the coding blocks inside the CTB
+    for (uint32_t log2CUSize = g_log2Size[maxCUSize], rangeCUIdx = 0; log2CUSize >= g_log2Size[minCUSize]; log2CUSize--)
+    {
+        uint32_t blockSize = 1 << log2CUSize;
+        uint32_t sbWidth   = 1 << (g_log2Size[maxCUSize] - log2CUSize);
+        int32_t lastLevelFlag = log2CUSize == g_log2Size[minCUSize];
+
+        for (uint32_t sbY = 0; sbY < sbWidth; sbY++)
+        {
+            for (uint32_t sbX = 0; sbX < sbWidth; sbX++)
+            {
+                uint32_t depthIdx = g_depthScanIdx[sbY][sbX];
+                uint32_t cuIdx = rangeCUIdx + depthIdx;
+                uint32_t childIdx = rangeCUIdx + sbWidth * sbWidth + (depthIdx << 2);
+                uint32_t px = sbX * blockSize;
+                uint32_t py = sbY * blockSize;
+                int32_t presentFlag = px < ctuWidth && py < ctuHeight;
+                int32_t splitMandatoryFlag = presentFlag && !lastLevelFlag && (px + blockSize > ctuWidth || py + blockSize > ctuHeight);
+                
+                /* Offset of the luma CU in the X, Y direction in terms of pixels from the CTU origin */
+                uint32_t xOffset = (sbX * blockSize) >> 3;
+                uint32_t yOffset = (sbY * blockSize) >> 3;
+                X265_CHECK(cuIdx < CUGeom::MAX_GEOMS, "CU geom index bug\n");
+
+                CUGeom *cu = cuDataArray + cuIdx;
+                cu->log2CUSize = log2CUSize;
+                cu->childOffset = childIdx - cuIdx;
+                cu->absPartIdx = g_depthScanIdx[yOffset][xOffset] * 4;
+                cu->numPartitions = (NUM_4x4_PARTITIONS >> ((g_maxLog2CUSize - cu->log2CUSize) * 2));
+                cu->depth = g_log2Size[maxCUSize] - log2CUSize;
+
+                cu->flags = 0;
+                CU_SET_FLAG(cu->flags, CUGeom::PRESENT, presentFlag);
+                CU_SET_FLAG(cu->flags, CUGeom::SPLIT_MANDATORY | CUGeom::SPLIT, splitMandatoryFlag);
+                CU_SET_FLAG(cu->flags, CUGeom::LEAF, lastLevelFlag);
+            }
+        }
+        rangeCUIdx += sbWidth * sbWidth;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/cudata.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,362 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_CUDATA_H
+#define X265_CUDATA_H
+
+#include "common.h"
+#include "slice.h"
+#include "mv.h"
+
+namespace X265_NS {
+// private namespace
+
+class FrameData;
+class Slice;
+struct TUEntropyCodingParameters;
+struct CUDataMemPool;
+
+enum PartSize
+{
+    SIZE_2Nx2N, // symmetric motion partition,  2Nx2N
+    SIZE_2NxN,  // symmetric motion partition,  2Nx N
+    SIZE_Nx2N,  // symmetric motion partition,   Nx2N
+    SIZE_NxN,   // symmetric motion partition,   Nx N
+    SIZE_2NxnU, // asymmetric motion partition, 2Nx( N/2) + 2Nx(3N/2)
+    SIZE_2NxnD, // asymmetric motion partition, 2Nx(3N/2) + 2Nx( N/2)
+    SIZE_nLx2N, // asymmetric motion partition, ( N/2)x2N + (3N/2)x2N
+    SIZE_nRx2N, // asymmetric motion partition, (3N/2)x2N + ( N/2)x2N
+    NUM_SIZES
+};
+
+enum PredMode
+{
+    MODE_NONE  = 0,
+    MODE_INTER = (1 << 0),
+    MODE_INTRA = (1 << 1),
+    MODE_SKIP  = (1 << 2) | MODE_INTER
+};
+
+// motion vector predictor direction used in AMVP
+enum MVP_DIR
+{
+    MD_LEFT = 0,    // MVP of left block
+    MD_ABOVE,       // MVP of above block
+    MD_ABOVE_RIGHT, // MVP of above right block
+    MD_BELOW_LEFT,  // MVP of below left block
+    MD_ABOVE_LEFT,  // MVP of above left block
+    MD_COLLOCATED   // MVP of temporal neighbour
+};
+
+struct CUGeom
+{
+    enum {
+        INTRA           = 1<<0, // CU is intra predicted
+        PRESENT         = 1<<1, // CU is not completely outside the frame
+        SPLIT_MANDATORY = 1<<2, // CU split is mandatory if CU is inside frame and can be split
+        LEAF            = 1<<3, // CU is a leaf node of the CTU
+        SPLIT           = 1<<4, // CU is currently split in four child CUs.
+    };
+    
+    // (1 + 4 + 16 + 64) = 85.
+    enum { MAX_GEOMS = 85 };
+
+    uint32_t log2CUSize;    // Log of the CU size.
+    uint32_t childOffset;   // offset of the first child CU from current CU
+    uint32_t absPartIdx;    // Part index of this CU in terms of 4x4 blocks.
+    uint32_t numPartitions; // Number of 4x4 blocks in the CU
+    uint32_t flags;         // CU flags.
+    uint32_t depth;         // depth of this CU relative from CTU
+};
+
+struct MVField
+{
+    MV  mv;
+    int refIdx;
+};
+
+// Structure that keeps the neighbour's MV information.
+struct InterNeighbourMV
+{
+    // Neighbour MV. The index represents the list.
+    MV mv[2];
+
+    // Collocated right bottom CU addr.
+    uint32_t cuAddr[2];
+
+    // For spatial prediction, this field contains the reference index
+    // in each list (-1 if not available).
+    //
+    // For temporal prediction, the first value is used for the 
+    // prediction with list 0. The second value is used for the prediction 
+    // with list 1. For each value, the first four bits are the reference index 
+    // associated to the PMV, and the fifth bit is the list associated to the PMV.
+    // if both reference indices are -1, then unifiedRef is also -1
+    union { int16_t refIdx[2]; int32_t unifiedRef; };
+};
+
+typedef void(*cucopy_t)(uint8_t* dst, uint8_t* src); // dst and src are aligned to MIN(size, 32)
+typedef void(*cubcast_t)(uint8_t* dst, uint8_t val); // dst is aligned to MIN(size, 32)
+
+// Partition count table, index represents partitioning mode.
+const uint32_t nbPartsTable[8] = { 1, 2, 2, 4, 2, 2, 2, 2 };
+
+// Partition table.
+// First index is partitioning mode. Second index is partition index.
+// Third index is 0 for partition sizes, 1 for partition offsets. The 
+// sizes and offsets are encoded as two packed 4-bit values (X,Y). 
+// X and Y represent 1/4 fractions of the block size.
+const uint32_t partTable[8][4][2] =
+{
+    //        XY
+    { { 0x44, 0x00 }, { 0x00, 0x00 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_2Nx2N.
+    { { 0x42, 0x00 }, { 0x42, 0x02 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_2NxN.
+    { { 0x24, 0x00 }, { 0x24, 0x20 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_Nx2N.
+    { { 0x22, 0x00 }, { 0x22, 0x20 }, { 0x22, 0x02 }, { 0x22, 0x22 } }, // SIZE_NxN.
+    { { 0x41, 0x00 }, { 0x43, 0x01 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_2NxnU.
+    { { 0x43, 0x00 }, { 0x41, 0x03 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_2NxnD.
+    { { 0x14, 0x00 }, { 0x34, 0x10 }, { 0x00, 0x00 }, { 0x00, 0x00 } }, // SIZE_nLx2N.
+    { { 0x34, 0x00 }, { 0x14, 0x30 }, { 0x00, 0x00 }, { 0x00, 0x00 } }  // SIZE_nRx2N.
+};
+
+// Partition Address table.
+// First index is partitioning mode. Second index is partition address.
+const uint32_t partAddrTable[8][4] =
+{
+    { 0x00, 0x00, 0x00, 0x00 }, // SIZE_2Nx2N.
+    { 0x00, 0x08, 0x08, 0x08 }, // SIZE_2NxN.
+    { 0x00, 0x04, 0x04, 0x04 }, // SIZE_Nx2N.
+    { 0x00, 0x04, 0x08, 0x0C }, // SIZE_NxN.
+    { 0x00, 0x02, 0x02, 0x02 }, // SIZE_2NxnU.
+    { 0x00, 0x0A, 0x0A, 0x0A }, // SIZE_2NxnD.
+    { 0x00, 0x01, 0x01, 0x01 }, // SIZE_nLx2N.
+    { 0x00, 0x05, 0x05, 0x05 }  // SIZE_nRx2N.
+};
+
+// Holds part data for a CU of a given size, from an 8x8 CU to a CTU
+class CUData
+{
+public:
+
+    static cubcast_t s_partSet[NUM_FULL_DEPTH]; // pointer to broadcast set functions per absolute depth
+    static uint32_t  s_numPartInCUSize;
+
+    FrameData*    m_encData;
+    const Slice*  m_slice;
+
+    cucopy_t      m_partCopy;         // pointer to function that copies m_numPartitions elements
+    cubcast_t     m_partSet;          // pointer to function that sets m_numPartitions elements
+    cucopy_t      m_subPartCopy;      // pointer to function that copies m_numPartitions/4 elements, may be NULL
+    cubcast_t     m_subPartSet;       // pointer to function that sets m_numPartitions/4 elements, may be NULL
+
+    uint32_t      m_cuAddr;           // address of CTU within the picture in raster order
+    uint32_t      m_absIdxInCTU;      // address of CU within its CTU in Z scan order
+    uint32_t      m_cuPelX;           // CU position within the picture, in pixels (X)
+    uint32_t      m_cuPelY;           // CU position within the picture, in pixels (Y)
+    uint32_t      m_numPartitions;    // maximum number of 4x4 partitions within this CU
+
+    uint32_t      m_chromaFormat;
+    uint32_t      m_hChromaShift;
+    uint32_t      m_vChromaShift;
+
+    /* Per-part data, stored contiguously */
+    int8_t*       m_qp;               // array of QP values
+    uint8_t*      m_log2CUSize;       // array of cu log2Size TODO: seems redundant to depth
+    uint8_t*      m_lumaIntraDir;     // array of intra directions (luma)
+    uint8_t*      m_tqBypass;         // array of CU lossless flags
+    int8_t*       m_refIdx[2];        // array of motion reference indices per list
+    uint8_t*      m_cuDepth;          // array of depths
+    uint8_t*      m_predMode;         // array of prediction modes
+    uint8_t*      m_partSize;         // array of partition sizes
+    uint8_t*      m_mergeFlag;        // array of merge flags
+    uint8_t*      m_interDir;         // array of inter directions
+    uint8_t*      m_mvpIdx[2];        // array of motion vector predictor candidates or merge candidate indices [0]
+    uint8_t*      m_tuDepth;          // array of transform indices
+    uint8_t*      m_transformSkip[3]; // array of transform skipping flags per plane
+    uint8_t*      m_cbf[3];           // array of coded block flags (CBF) per plane
+    uint8_t*      m_chromaIntraDir;   // array of intra directions (chroma)
+    enum { BytesPerPartition = 21 };  // combined sizeof() of all per-part data
+
+    coeff_t*      m_trCoeff[3];       // transformed coefficient buffer per plane
+
+    MV*           m_mv[2];            // array of motion vectors per list
+    MV*           m_mvd[2];           // array of coded motion vector deltas per list
+    enum { TMVP_UNIT_MASK = 0xF0 };  // mask for mapping index to into a compressed (reference) MV field
+
+    const CUData* m_cuAboveLeft;      // pointer to above-left neighbor CTU
+    const CUData* m_cuAboveRight;     // pointer to above-right neighbor CTU
+    const CUData* m_cuAbove;          // pointer to above neighbor CTU
+    const CUData* m_cuLeft;           // pointer to left neighbor CTU
+
+    CUData();
+
+    void     initialize(const CUDataMemPool& dataPool, uint32_t depth, int csp, int instance);
+    static void calcCTUGeoms(uint32_t ctuWidth, uint32_t ctuHeight, uint32_t maxCUSize, uint32_t minCUSize, CUGeom cuDataArray[CUGeom::MAX_GEOMS]);
+
+    void     initCTU(const Frame& frame, uint32_t cuAddr, int qp);
+    void     initSubCU(const CUData& ctu, const CUGeom& cuGeom, int qp);
+    void     initLosslessCU(const CUData& cu, const CUGeom& cuGeom);
+
+    void     copyPartFrom(const CUData& cu, const CUGeom& childGeom, uint32_t subPartIdx);
+    void     setEmptyPart(const CUGeom& childGeom, uint32_t subPartIdx);
+    void     copyToPic(uint32_t depth) const;
+
+    /* RD-0 methods called only from encodeResidue */
+    void     copyFromPic(const CUData& ctu, const CUGeom& cuGeom);
+    void     updatePic(uint32_t depth) const;
+
+    void     setPartSizeSubParts(PartSize size)    { m_partSet(m_partSize, (uint8_t)size); }
+    void     setPredModeSubParts(PredMode mode)    { m_partSet(m_predMode, (uint8_t)mode); }
+    void     clearCbf()                            { m_partSet(m_cbf[0], 0); m_partSet(m_cbf[1], 0); m_partSet(m_cbf[2], 0); }
+
+    /* these functions all take depth as an absolute depth from CTU, it is used to calculate the number of parts to copy */
+    void     setQPSubParts(int8_t qp, uint32_t absPartIdx, uint32_t depth)                    { s_partSet[depth]((uint8_t*)m_qp + absPartIdx, (uint8_t)qp); }
+    void     setTUDepthSubParts(uint8_t tuDepth, uint32_t absPartIdx, uint32_t depth)         { s_partSet[depth](m_tuDepth + absPartIdx, tuDepth); }
+    void     setLumaIntraDirSubParts(uint8_t dir, uint32_t absPartIdx, uint32_t depth)        { s_partSet[depth](m_lumaIntraDir + absPartIdx, dir); }
+    void     setChromIntraDirSubParts(uint8_t dir, uint32_t absPartIdx, uint32_t depth)       { s_partSet[depth](m_chromaIntraDir + absPartIdx, dir); }
+    void     setCbfSubParts(uint8_t cbf, TextType ttype, uint32_t absPartIdx, uint32_t depth) { s_partSet[depth](m_cbf[ttype] + absPartIdx, cbf); }
+    void     setCbfPartRange(uint8_t cbf, TextType ttype, uint32_t absPartIdx, uint32_t coveredPartIdxes) { memset(m_cbf[ttype] + absPartIdx, cbf, coveredPartIdxes); }
+    void     setTransformSkipSubParts(uint8_t tskip, TextType ttype, uint32_t absPartIdx, uint32_t depth) { s_partSet[depth](m_transformSkip[ttype] + absPartIdx, tskip); }
+    void     setTransformSkipPartRange(uint8_t tskip, TextType ttype, uint32_t absPartIdx, uint32_t coveredPartIdxes) { memset(m_transformSkip[ttype] + absPartIdx, tskip, coveredPartIdxes); }
+
+    bool     setQPSubCUs(int8_t qp, uint32_t absPartIdx, uint32_t depth);
+
+    void     setPUInterDir(uint8_t dir, uint32_t absPartIdx, uint32_t puIdx);
+    void     setPUMv(int list, const MV& mv, int absPartIdx, int puIdx);
+    void     setPURefIdx(int list, int8_t refIdx, int absPartIdx, int puIdx);
+
+    uint8_t  getCbf(uint32_t absPartIdx, TextType ttype, uint32_t tuDepth) const { return (m_cbf[ttype][absPartIdx] >> tuDepth) & 0x1; }
+    uint8_t  getQtRootCbf(uint32_t absPartIdx) const                             { return m_cbf[0][absPartIdx] || m_cbf[1][absPartIdx] || m_cbf[2][absPartIdx]; }
+    int8_t   getRefQP(uint32_t currAbsIdxInCTU) const;
+    uint32_t getInterMergeCandidates(uint32_t absPartIdx, uint32_t puIdx, MVField (*candMvField)[2], uint8_t* candDir) const;
+    void     clipMv(MV& outMV) const;
+    int      getPMV(InterNeighbourMV *neighbours, uint32_t reference_list, uint32_t refIdx, MV* amvpCand, MV* pmv) const;
+    void     getNeighbourMV(uint32_t puIdx, uint32_t absPartIdx, InterNeighbourMV* neighbours) const;
+    void     getIntraTUQtDepthRange(uint32_t tuDepthRange[2], uint32_t absPartIdx) const;
+    void     getInterTUQtDepthRange(uint32_t tuDepthRange[2], uint32_t absPartIdx) const;
+    uint32_t getBestRefIdx(uint32_t subPartIdx) const { return ((m_interDir[subPartIdx] & 1) << m_refIdx[0][subPartIdx]) | 
+                                                              (((m_interDir[subPartIdx] >> 1) & 1) << (m_refIdx[1][subPartIdx] + 16)); }
+    uint32_t getPUOffset(uint32_t puIdx, uint32_t absPartIdx) const { return (partAddrTable[(int)m_partSize[absPartIdx]][puIdx] << (g_unitSizeDepth - m_cuDepth[absPartIdx]) * 2) >> 4; }
+
+    uint32_t getNumPartInter(uint32_t absPartIdx) const              { return nbPartsTable[(int)m_partSize[absPartIdx]]; }
+    bool     isIntra(uint32_t absPartIdx) const   { return m_predMode[absPartIdx] == MODE_INTRA; }
+    bool     isInter(uint32_t absPartIdx) const   { return !!(m_predMode[absPartIdx] & MODE_INTER); }
+    bool     isSkipped(uint32_t absPartIdx) const { return m_predMode[absPartIdx] == MODE_SKIP; }
+    bool     isBipredRestriction() const          { return m_log2CUSize[0] == 3 && m_partSize[0] != SIZE_2Nx2N; }
+
+    void     getPartIndexAndSize(uint32_t puIdx, uint32_t& absPartIdx, int& puWidth, int& puHeight) const;
+    void     getMvField(const CUData* cu, uint32_t absPartIdx, int picList, MVField& mvField) const;
+
+    void     getAllowedChromaDir(uint32_t absPartIdx, uint32_t* modeList) const;
+    int      getIntraDirLumaPredictor(uint32_t absPartIdx, uint32_t* intraDirPred) const;
+
+    uint32_t getSCUAddr() const                  { return (m_cuAddr << g_unitSizeDepth * 2) + m_absIdxInCTU; }
+    uint32_t getCtxSplitFlag(uint32_t absPartIdx, uint32_t depth) const;
+    uint32_t getCtxSkipFlag(uint32_t absPartIdx) const;
+    void     getTUEntropyCodingParameters(TUEntropyCodingParameters &result, uint32_t absPartIdx, uint32_t log2TrSize, bool bIsLuma) const;
+
+    const CUData* getPULeft(uint32_t& lPartUnitIdx, uint32_t curPartUnitIdx) const;
+    const CUData* getPUAbove(uint32_t& aPartUnitIdx, uint32_t curPartUnitIdx) const;
+    const CUData* getPUAboveLeft(uint32_t& alPartUnitIdx, uint32_t curPartUnitIdx) const;
+    const CUData* getPUAboveRight(uint32_t& arPartUnitIdx, uint32_t curPartUnitIdx) const;
+    const CUData* getPUBelowLeft(uint32_t& blPartUnitIdx, uint32_t curPartUnitIdx) const;
+
+    const CUData* getQpMinCuLeft(uint32_t& lPartUnitIdx, uint32_t currAbsIdxInCTU) const;
+    const CUData* getQpMinCuAbove(uint32_t& aPartUnitIdx, uint32_t currAbsIdxInCTU) const;
+
+    const CUData* getPUAboveRightAdi(uint32_t& arPartUnitIdx, uint32_t curPartUnitIdx, uint32_t partUnitOffset) const;
+    const CUData* getPUBelowLeftAdi(uint32_t& blPartUnitIdx, uint32_t curPartUnitIdx, uint32_t partUnitOffset) const;
+
+protected:
+
+    template<typename T>
+    void setAllPU(T *p, const T& val, int absPartIdx, int puIdx);
+
+    int8_t getLastCodedQP(uint32_t absPartIdx) const;
+    int  getLastValidPartIdx(int absPartIdx) const;
+
+    bool hasEqualMotion(uint32_t absPartIdx, const CUData& candCU, uint32_t candAbsPartIdx) const;
+
+    /* Check whether the current PU and a spatial neighboring PU are in same merge region */
+    bool isDiffMER(int xN, int yN, int xP, int yP) const { return ((xN >> 2) != (xP >> 2)) || ((yN >> 2) != (yP >> 2)); }
+
+    // add possible motion vector predictor candidates
+    bool getDirectPMV(MV& pmv, InterNeighbourMV *neighbours, uint32_t picList, uint32_t refIdx) const;
+    bool getIndirectPMV(MV& outMV, InterNeighbourMV *neighbours, uint32_t reference_list, uint32_t refIdx) const;
+    void getInterNeighbourMV(InterNeighbourMV *neighbour, uint32_t partUnitIdx, MVP_DIR dir) const;
+
+    bool getColMVP(MV& outMV, int& outRefIdx, int picList, int cuAddr, int absPartIdx) const;
+    bool getCollocatedMV(int cuAddr, int partUnitIdx, InterNeighbourMV *neighbour) const;
+
+    MV scaleMvByPOCDist(const MV& inMV, int curPOC, int curRefPOC, int colPOC, int colRefPOC) const;
+
+    void     deriveLeftRightTopIdx(uint32_t puIdx, uint32_t& partIdxLT, uint32_t& partIdxRT) const;
+
+    uint32_t deriveCenterIdx(uint32_t puIdx) const;
+    uint32_t deriveRightBottomIdx(uint32_t puIdx) const;
+    uint32_t deriveLeftBottomIdx(uint32_t puIdx) const;
+};
+
+// TU settings for entropy encoding
+struct TUEntropyCodingParameters
+{
+    const uint16_t *scan;
+    const uint16_t *scanCG;
+    ScanType        scanType;
+    uint32_t        log2TrSizeCG;
+    uint32_t        firstSignificanceMapContext;
+};
+
+struct CUDataMemPool
+{
+    uint8_t* charMemBlock;
+    coeff_t* trCoeffMemBlock;
+    MV*      mvMemBlock;
+
+    CUDataMemPool() { charMemBlock = NULL; trCoeffMemBlock = NULL; mvMemBlock = NULL; }
+
+    bool create(uint32_t depth, uint32_t csp, uint32_t numInstances)
+    {
+        uint32_t numPartition = NUM_4x4_PARTITIONS >> (depth * 2);
+        uint32_t cuSize = g_maxCUSize >> depth;
+        uint32_t sizeL = cuSize * cuSize;
+        uint32_t sizeC = sizeL >> (CHROMA_H_SHIFT(csp) + CHROMA_V_SHIFT(csp));
+        CHECKED_MALLOC(trCoeffMemBlock, coeff_t, (sizeL + sizeC * 2) * numInstances);
+        CHECKED_MALLOC(charMemBlock, uint8_t, numPartition * numInstances * CUData::BytesPerPartition);
+        CHECKED_MALLOC(mvMemBlock, MV, numPartition * 4 * numInstances);
+        return true;
+
+    fail:
+        return false;
+    }
+
+    void destroy()
+    {
+        X265_FREE(trCoeffMemBlock);
+        X265_FREE(mvMemBlock);
+        X265_FREE(charMemBlock);
+    }
+};
+}
+
+#endif // ifndef X265_CUDATA_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/dct.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1010 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Mandar Gurav <mandar@multicorewareinc.com>
+ *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "contexts.h"   // costCoeffNxN_c
+#include "threading.h"  // CLZ
+
+using namespace X265_NS;
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant, typical for templated functions
+#endif
+
+// Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
+// give identical results
+static void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
+{
+    int c[4];
+    int rnd_factor = 1 << (shift - 1);
+
+    for (int i = 0; i < 4; i++)
+    {
+        // Intermediate Variables
+        c[0] = block[4 * i + 0] + block[4 * i + 3];
+        c[1] = block[4 * i + 1] + block[4 * i + 3];
+        c[2] = block[4 * i + 0] - block[4 * i + 1];
+        c[3] = 74 * block[4 * i + 2];
+
+        coeff[i] =      (int16_t)((29 * c[0] + 55 * c[1]  + c[3] + rnd_factor) >> shift);
+        coeff[4 + i] =  (int16_t)((74 * (block[4 * i + 0] + block[4 * i + 1] - block[4 * i + 3]) + rnd_factor) >> shift);
+        coeff[8 + i] =  (int16_t)((29 * c[2] + 55 * c[0]  - c[3] + rnd_factor) >> shift);
+        coeff[12 + i] = (int16_t)((55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift);
+    }
+}
+
+static void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
+{
+    int i, c[4];
+    int rnd_factor = 1 << (shift - 1);
+
+    for (i = 0; i < 4; i++)
+    {
+        // Intermediate Variables
+        c[0] = tmp[i] + tmp[8 + i];
+        c[1] = tmp[8 + i] + tmp[12 + i];
+        c[2] = tmp[i] - tmp[12 + i];
+        c[3] = 74 * tmp[4 + i];
+
+        block[4 * i + 0] = (int16_t)x265_clip3(-32768, 32767, (29 * c[0] + 55 * c[1]     + c[3]               + rnd_factor) >> shift);
+        block[4 * i + 1] = (int16_t)x265_clip3(-32768, 32767, (55 * c[2] - 29 * c[1]     + c[3]               + rnd_factor) >> shift);
+        block[4 * i + 2] = (int16_t)x265_clip3(-32768, 32767, (74 * (tmp[i] - tmp[8 + i]  + tmp[12 + i])      + rnd_factor) >> shift);
+        block[4 * i + 3] = (int16_t)x265_clip3(-32768, 32767, (55 * c[0] + 29 * c[2]     - c[3]               + rnd_factor) >> shift);
+    }
+}
+
+static void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[8], O[8];
+    int EE[4], EO[4];
+    int EEE[2], EEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O */
+        for (k = 0; k < 8; k++)
+        {
+            E[k] = src[k] + src[15 - k];
+            O[k] = src[k] - src[15 - k];
+        }
+
+        /* EE and EO */
+        for (k = 0; k < 4; k++)
+        {
+            EE[k] = E[k] + E[7 - k];
+            EO[k] = E[k] - E[7 - k];
+        }
+
+        /* EEE and EEO */
+        EEE[0] = EE[0] + EE[3];
+        EEO[0] = EE[0] - EE[3];
+        EEE[1] = EE[1] + EE[2];
+        EEO[1] = EE[1] - EE[2];
+
+        dst[0] = (int16_t)((g_t16[0][0] * EEE[0] + g_t16[0][1] * EEE[1] + add) >> shift);
+        dst[8 * line] = (int16_t)((g_t16[8][0] * EEE[0] + g_t16[8][1] * EEE[1] + add) >> shift);
+        dst[4 * line] = (int16_t)((g_t16[4][0] * EEO[0] + g_t16[4][1] * EEO[1] + add) >> shift);
+        dst[12 * line] = (int16_t)((g_t16[12][0] * EEO[0] + g_t16[12][1] * EEO[1] + add) >> shift);
+
+        for (k = 2; k < 16; k += 4)
+        {
+            dst[k * line] = (int16_t)((g_t16[k][0] * EO[0] + g_t16[k][1] * EO[1] + g_t16[k][2] * EO[2] +
+                                       g_t16[k][3] * EO[3] + add) >> shift);
+        }
+
+        for (k = 1; k < 16; k += 2)
+        {
+            dst[k * line] =  (int16_t)((g_t16[k][0] * O[0] + g_t16[k][1] * O[1] + g_t16[k][2] * O[2] + g_t16[k][3] * O[3] +
+                                        g_t16[k][4] * O[4] + g_t16[k][5] * O[5] + g_t16[k][6] * O[6] + g_t16[k][7] * O[7] +
+                                        add) >> shift);
+        }
+
+        src += 16;
+        dst++;
+    }
+}
+
+static void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[16], O[16];
+    int EE[8], EO[8];
+    int EEE[4], EEO[4];
+    int EEEE[2], EEEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O*/
+        for (k = 0; k < 16; k++)
+        {
+            E[k] = src[k] + src[31 - k];
+            O[k] = src[k] - src[31 - k];
+        }
+
+        /* EE and EO */
+        for (k = 0; k < 8; k++)
+        {
+            EE[k] = E[k] + E[15 - k];
+            EO[k] = E[k] - E[15 - k];
+        }
+
+        /* EEE and EEO */
+        for (k = 0; k < 4; k++)
+        {
+            EEE[k] = EE[k] + EE[7 - k];
+            EEO[k] = EE[k] - EE[7 - k];
+        }
+
+        /* EEEE and EEEO */
+        EEEE[0] = EEE[0] + EEE[3];
+        EEEO[0] = EEE[0] - EEE[3];
+        EEEE[1] = EEE[1] + EEE[2];
+        EEEO[1] = EEE[1] - EEE[2];
+
+        dst[0] = (int16_t)((g_t32[0][0] * EEEE[0] + g_t32[0][1] * EEEE[1] + add) >> shift);
+        dst[16 * line] = (int16_t)((g_t32[16][0] * EEEE[0] + g_t32[16][1] * EEEE[1] + add) >> shift);
+        dst[8 * line] = (int16_t)((g_t32[8][0] * EEEO[0] + g_t32[8][1] * EEEO[1] + add) >> shift);
+        dst[24 * line] = (int16_t)((g_t32[24][0] * EEEO[0] + g_t32[24][1] * EEEO[1] + add) >> shift);
+        for (k = 4; k < 32; k += 8)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * EEO[0] + g_t32[k][1] * EEO[1] + g_t32[k][2] * EEO[2] +
+                                       g_t32[k][3] * EEO[3] + add) >> shift);
+        }
+
+        for (k = 2; k < 32; k += 4)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * EO[0] + g_t32[k][1] * EO[1] + g_t32[k][2] * EO[2] +
+                                       g_t32[k][3] * EO[3] + g_t32[k][4] * EO[4] + g_t32[k][5] * EO[5] +
+                                       g_t32[k][6] * EO[6] + g_t32[k][7] * EO[7] + add) >> shift);
+        }
+
+        for (k = 1; k < 32; k += 2)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * O[0] + g_t32[k][1] * O[1] + g_t32[k][2] * O[2] + g_t32[k][3] * O[3] +
+                                       g_t32[k][4] * O[4] + g_t32[k][5] * O[5] + g_t32[k][6] * O[6] + g_t32[k][7] * O[7] +
+                                       g_t32[k][8] * O[8] + g_t32[k][9] * O[9] + g_t32[k][10] * O[10] + g_t32[k][11] *
+                                       O[11] + g_t32[k][12] * O[12] + g_t32[k][13] * O[13] + g_t32[k][14] * O[14] +
+                                       g_t32[k][15] * O[15] + add) >> shift);
+        }
+
+        src += 32;
+        dst++;
+    }
+}
+
+static void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[4], O[4];
+    int EE[2], EO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O*/
+        for (k = 0; k < 4; k++)
+        {
+            E[k] = src[k] + src[7 - k];
+            O[k] = src[k] - src[7 - k];
+        }
+
+        /* EE and EO */
+        EE[0] = E[0] + E[3];
+        EO[0] = E[0] - E[3];
+        EE[1] = E[1] + E[2];
+        EO[1] = E[1] - E[2];
+
+        dst[0] = (int16_t)((g_t8[0][0] * EE[0] + g_t8[0][1] * EE[1] + add) >> shift);
+        dst[4 * line] = (int16_t)((g_t8[4][0] * EE[0] + g_t8[4][1] * EE[1] + add) >> shift);
+        dst[2 * line] = (int16_t)((g_t8[2][0] * EO[0] + g_t8[2][1] * EO[1] + add) >> shift);
+        dst[6 * line] = (int16_t)((g_t8[6][0] * EO[0] + g_t8[6][1] * EO[1] + add) >> shift);
+
+        dst[line] = (int16_t)((g_t8[1][0] * O[0] + g_t8[1][1] * O[1] + g_t8[1][2] * O[2] + g_t8[1][3] * O[3] + add) >> shift);
+        dst[3 * line] = (int16_t)((g_t8[3][0] * O[0] + g_t8[3][1] * O[1] + g_t8[3][2] * O[2] + g_t8[3][3] * O[3] + add) >> shift);
+        dst[5 * line] = (int16_t)((g_t8[5][0] * O[0] + g_t8[5][1] * O[1] + g_t8[5][2] * O[2] + g_t8[5][3] * O[3] + add) >> shift);
+        dst[7 * line] = (int16_t)((g_t8[7][0] * O[0] + g_t8[7][1] * O[1] + g_t8[7][2] * O[2] + g_t8[7][3] * O[3] + add) >> shift);
+
+        src += 8;
+        dst++;
+    }
+}
+
+static void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j;
+    int E[2], O[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        O[0] = g_t4[1][0] * src[line] + g_t4[3][0] * src[3 * line];
+        O[1] = g_t4[1][1] * src[line] + g_t4[3][1] * src[3 * line];
+        E[0] = g_t4[0][0] * src[0] + g_t4[2][0] * src[2 * line];
+        E[1] = g_t4[0][1] * src[0] + g_t4[2][1] * src[2 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        dst[0] = (int16_t)(x265_clip3(-32768, 32767, (E[0] + O[0] + add) >> shift));
+        dst[1] = (int16_t)(x265_clip3(-32768, 32767, (E[1] + O[1] + add) >> shift));
+        dst[2] = (int16_t)(x265_clip3(-32768, 32767, (E[1] - O[1] + add) >> shift));
+        dst[3] = (int16_t)(x265_clip3(-32768, 32767, (E[0] - O[0] + add) >> shift));
+
+        src++;
+        dst += 4;
+    }
+}
+
+static void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[4], O[4];
+    int EE[2], EO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 4; k++)
+        {
+            O[k] = g_t8[1][k] * src[line] + g_t8[3][k] * src[3 * line] + g_t8[5][k] * src[5 * line] + g_t8[7][k] * src[7 * line];
+        }
+
+        EO[0] = g_t8[2][0] * src[2 * line] + g_t8[6][0] * src[6 * line];
+        EO[1] = g_t8[2][1] * src[2 * line] + g_t8[6][1] * src[6 * line];
+        EE[0] = g_t8[0][0] * src[0] + g_t8[4][0] * src[4 * line];
+        EE[1] = g_t8[0][1] * src[0] + g_t8[4][1] * src[4 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        E[0] = EE[0] + EO[0];
+        E[3] = EE[0] - EO[0];
+        E[1] = EE[1] + EO[1];
+        E[2] = EE[1] - EO[1];
+        for (k = 0; k < 4; k++)
+        {
+            dst[k] = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 4] = (int16_t)x265_clip3(-32768, 32767, (E[3 - k] - O[3 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 8;
+    }
+}
+
+static void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[8], O[8];
+    int EE[4], EO[4];
+    int EEE[2], EEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 8; k++)
+        {
+            O[k] = g_t16[1][k] * src[line] + g_t16[3][k] * src[3 * line] + g_t16[5][k] * src[5 * line] + g_t16[7][k] * src[7 * line] +
+                g_t16[9][k] * src[9 * line] + g_t16[11][k] * src[11 * line] + g_t16[13][k] * src[13 * line] + g_t16[15][k] * src[15 * line];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            EO[k] = g_t16[2][k] * src[2 * line] + g_t16[6][k] * src[6 * line] + g_t16[10][k] * src[10 * line] + g_t16[14][k] * src[14 * line];
+        }
+
+        EEO[0] = g_t16[4][0] * src[4 * line] + g_t16[12][0] * src[12 * line];
+        EEE[0] = g_t16[0][0] * src[0] + g_t16[8][0] * src[8 * line];
+        EEO[1] = g_t16[4][1] * src[4 * line] + g_t16[12][1] * src[12 * line];
+        EEE[1] = g_t16[0][1] * src[0] + g_t16[8][1] * src[8 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        for (k = 0; k < 2; k++)
+        {
+            EE[k] = EEE[k] + EEO[k];
+            EE[k + 2] = EEE[1 - k] - EEO[1 - k];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            E[k] = EE[k] + EO[k];
+            E[k + 4] = EE[3 - k] - EO[3 - k];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            dst[k]   = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 8] = (int16_t)x265_clip3(-32768, 32767, (E[7 - k] - O[7 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 16;
+    }
+}
+
+static void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[16], O[16];
+    int EE[8], EO[8];
+    int EEE[4], EEO[4];
+    int EEEE[2], EEEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 16; k++)
+        {
+            O[k] = g_t32[1][k] * src[line] + g_t32[3][k] * src[3 * line] + g_t32[5][k] * src[5 * line] + g_t32[7][k] * src[7 * line] +
+                g_t32[9][k] * src[9 * line] + g_t32[11][k] * src[11 * line] + g_t32[13][k] * src[13 * line] + g_t32[15][k] * src[15 * line] +
+                g_t32[17][k] * src[17 * line] + g_t32[19][k] * src[19 * line] + g_t32[21][k] * src[21 * line] + g_t32[23][k] * src[23 * line] +
+                g_t32[25][k] * src[25 * line] + g_t32[27][k] * src[27 * line] + g_t32[29][k] * src[29 * line] + g_t32[31][k] * src[31 * line];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            EO[k] = g_t32[2][k] * src[2 * line] + g_t32[6][k] * src[6 * line] + g_t32[10][k] * src[10 * line] + g_t32[14][k] * src[14 * line] +
+                g_t32[18][k] * src[18 * line] + g_t32[22][k] * src[22 * line] + g_t32[26][k] * src[26 * line] + g_t32[30][k] * src[30 * line];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            EEO[k] = g_t32[4][k] * src[4 * line] + g_t32[12][k] * src[12 * line] + g_t32[20][k] * src[20 * line] + g_t32[28][k] * src[28 * line];
+        }
+
+        EEEO[0] = g_t32[8][0] * src[8 * line] + g_t32[24][0] * src[24 * line];
+        EEEO[1] = g_t32[8][1] * src[8 * line] + g_t32[24][1] * src[24 * line];
+        EEEE[0] = g_t32[0][0] * src[0] + g_t32[16][0] * src[16 * line];
+        EEEE[1] = g_t32[0][1] * src[0] + g_t32[16][1] * src[16 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        EEE[0] = EEEE[0] + EEEO[0];
+        EEE[3] = EEEE[0] - EEEO[0];
+        EEE[1] = EEEE[1] + EEEO[1];
+        EEE[2] = EEEE[1] - EEEO[1];
+        for (k = 0; k < 4; k++)
+        {
+            EE[k] = EEE[k] + EEO[k];
+            EE[k + 4] = EEE[3 - k] - EEO[3 - k];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            E[k] = EE[k] + EO[k];
+            E[k + 8] = EE[7 - k] - EO[7 - k];
+        }
+
+        for (k = 0; k < 16; k++)
+        {
+            dst[k] = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 16] = (int16_t)x265_clip3(-32768, 32767, (E[15 - k] - O[15 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 32;
+    }
+}
+
+static void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j;
+    int E[2], O[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O */
+        E[0] = src[0] + src[3];
+        O[0] = src[0] - src[3];
+        E[1] = src[1] + src[2];
+        O[1] = src[1] - src[2];
+
+        dst[0] = (int16_t)((g_t4[0][0] * E[0] + g_t4[0][1] * E[1] + add) >> shift);
+        dst[2 * line] = (int16_t)((g_t4[2][0] * E[0] + g_t4[2][1] * E[1] + add) >> shift);
+        dst[line] = (int16_t)((g_t4[1][0] * O[0] + g_t4[1][1] * O[1] + add) >> shift);
+        dst[3 * line] = (int16_t)((g_t4[3][0] * O[0] + g_t4[3][1] * O[1] + add) >> shift);
+
+        src += 4;
+        dst++;
+    }
+}
+
+static void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 1 + X265_DEPTH - 8;
+    const int shift_2nd = 8;
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t));
+    }
+
+    fastForwardDst(block, coef, shift_1st);
+    fastForwardDst(coef, dst, shift_2nd);
+}
+
+static void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 1 + X265_DEPTH - 8;
+    const int shift_2nd = 8;
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t));
+    }
+
+    partialButterfly4(block, coef, shift_1st, 4);
+    partialButterfly4(coef, dst, shift_2nd, 4);
+}
+
+static void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 2 + X265_DEPTH - 8;
+    const int shift_2nd = 9;
+
+    ALIGN_VAR_32(int16_t, coef[8 * 8]);
+    ALIGN_VAR_32(int16_t, block[8 * 8]);
+
+    for (int i = 0; i < 8; i++)
+    {
+        memcpy(&block[i * 8], &src[i * srcStride], 8 * sizeof(int16_t));
+    }
+
+    partialButterfly8(block, coef, shift_1st, 8);
+    partialButterfly8(coef, dst, shift_2nd, 8);
+}
+
+static void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 3 + X265_DEPTH - 8;
+    const int shift_2nd = 10;
+
+    ALIGN_VAR_32(int16_t, coef[16 * 16]);
+    ALIGN_VAR_32(int16_t, block[16 * 16]);
+
+    for (int i = 0; i < 16; i++)
+    {
+        memcpy(&block[i * 16], &src[i * srcStride], 16 * sizeof(int16_t));
+    }
+
+    partialButterfly16(block, coef, shift_1st, 16);
+    partialButterfly16(coef, dst, shift_2nd, 16);
+}
+
+static void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 4 + X265_DEPTH - 8;
+    const int shift_2nd = 11;
+
+    ALIGN_VAR_32(int16_t, coef[32 * 32]);
+    ALIGN_VAR_32(int16_t, block[32 * 32]);
+
+    for (int i = 0; i < 32; i++)
+    {
+        memcpy(&block[i * 32], &src[i * srcStride], 32 * sizeof(int16_t));
+    }
+
+    partialButterfly32(block, coef, shift_1st, 32);
+    partialButterfly32(coef, dst, shift_2nd, 32);
+}
+
+static void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    inversedst(src, coef, shift_1st); // Forward DST BY FAST ALGORITHM, block input, coef output
+    inversedst(coef, block, shift_2nd); // Forward DST BY FAST ALGORITHM, coef input, coeff output
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t));
+    }
+}
+
+static void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    partialButterflyInverse4(src, coef, shift_1st, 4); // Forward DST BY FAST ALGORITHM, block input, coef output
+    partialButterflyInverse4(coef, block, shift_2nd, 4); // Forward DST BY FAST ALGORITHM, coef input, coeff output
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t));
+    }
+}
+
+static void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[8 * 8]);
+    ALIGN_VAR_32(int16_t, block[8 * 8]);
+
+    partialButterflyInverse8(src, coef, shift_1st, 8);
+    partialButterflyInverse8(coef, block, shift_2nd, 8);
+
+    for (int i = 0; i < 8; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 8], 8 * sizeof(int16_t));
+    }
+}
+
+static void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[16 * 16]);
+    ALIGN_VAR_32(int16_t, block[16 * 16]);
+
+    partialButterflyInverse16(src, coef, shift_1st, 16);
+    partialButterflyInverse16(coef, block, shift_2nd, 16);
+
+    for (int i = 0; i < 16; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 16], 16 * sizeof(int16_t));
+    }
+}
+
+static void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[32 * 32]);
+    ALIGN_VAR_32(int16_t, block[32 * 32]);
+
+    partialButterflyInverse32(src, coef, shift_1st, 32);
+    partialButterflyInverse32(coef, block, shift_2nd, 32);
+
+    for (int i = 0; i < 32; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 32], 32 * sizeof(int16_t));
+    }
+}
+
+static void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
+{
+#if HIGH_BIT_DEPTH
+    X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > (X265_DEPTH - 8)), "dequant invalid scale %d\n", scale);
+#else
+    // NOTE: maximum of scale is (72 * 256)
+    X265_CHECK(scale < 32768, "dequant invalid scale %d\n", scale);
+#endif
+    X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
+    X265_CHECK((num % 8) == 0, "dequant num %d not multiple of 8\n", num);
+    X265_CHECK(shift <= 10, "shift too large %d\n", shift);
+    X265_CHECK(((intptr_t)coef & 31) == 0, "dequant coef buffer not aligned\n");
+
+    int add, coeffQ;
+
+    add = 1 << (shift - 1);
+
+    for (int n = 0; n < num; n++)
+    {
+        coeffQ = (quantCoef[n] * scale + add) >> shift;
+        coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ);
+    }
+}
+
+static void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
+{
+    X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
+
+    int add, coeffQ;
+
+    shift += 4;
+
+    if (shift > per)
+    {
+        add = 1 << (shift - per - 1);
+
+        for (int n = 0; n < num; n++)
+        {
+            coeffQ = ((quantCoef[n] * deQuantCoef[n]) + add) >> (shift - per);
+            coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ);
+        }
+    }
+    else
+    {
+        for (int n = 0; n < num; n++)
+        {
+            coeffQ   = x265_clip3(-32768, 32767, quantCoef[n] * deQuantCoef[n]);
+            coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ << (per - shift));
+        }
+    }
+}
+
+static uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
+{
+    X265_CHECK(qBits >= 8, "qBits less than 8\n");
+    X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n");
+    int qBits8 = qBits - 8;
+    uint32_t numSig = 0;
+
+    for (int blockpos = 0; blockpos < numCoeff; blockpos++)
+    {
+        int level = coef[blockpos];
+        int sign  = (level < 0 ? -1 : 1);
+
+        int tmplevel = abs(level) * quantCoeff[blockpos];
+        level = ((tmplevel + add) >> qBits);
+        deltaU[blockpos] = ((tmplevel - (level << qBits)) >> qBits8);
+        if (level)
+            ++numSig;
+        level *= sign;
+        qCoef[blockpos] = (int16_t)x265_clip3(-32768, 32767, level);
+    }
+
+    return numSig;
+}
+
+static uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
+{
+    X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n");
+    X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n");
+    X265_CHECK(((intptr_t)quantCoeff & 31) == 0, "quantCoeff buffer not aligned\n");
+
+    uint32_t numSig = 0;
+
+    for (int blockpos = 0; blockpos < numCoeff; blockpos++)
+    {
+        int level = coef[blockpos];
+        int sign  = (level < 0 ? -1 : 1);
+
+        int tmplevel = abs(level) * quantCoeff[blockpos];
+        level = ((tmplevel + add) >> qBits);
+        if (level)
+            ++numSig;
+        level *= sign;
+        qCoef[blockpos] = (int16_t)x265_clip3(-32768, 32767, level);
+    }
+
+    return numSig;
+}
+template<int trSize>
+int  count_nonzero_c(const int16_t* quantCoeff)
+{
+    X265_CHECK(((intptr_t)quantCoeff & 15) == 0, "quant buffer not aligned\n");
+    int count = 0;
+    int numCoeff = trSize * trSize;
+    for (int i = 0; i < numCoeff; i++)
+    {
+        count += quantCoeff[i] != 0;
+    }
+
+    return count;
+}
+
+template<int trSize>
+uint32_t copy_count(int16_t* coeff, const int16_t* residual, intptr_t resiStride)
+{
+    uint32_t numSig = 0;
+    for (int k = 0; k < trSize; k++)
+    {
+        for (int j = 0; j < trSize; j++)
+        {
+            coeff[k * trSize + j] = residual[k * resiStride + j];
+            numSig += (residual[k * resiStride + j] != 0);
+        }
+    }
+
+    return numSig;
+}
+
+static void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
+{
+    for (int i = 0; i < numCoeff; i++)
+    {
+        int level = dctCoef[i];
+        int sign = level >> 31;
+        level = (level + sign) ^ sign;
+        resSum[i] += level;
+        level -= offset[i];
+        dctCoef[i] = (int16_t)(level < 0 ? 0 : (level ^ sign) - sign);
+    }
+}
+
+static int scanPosLast_c(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
+{
+    memset(coeffNum, 0, MLS_GRP_NUM * sizeof(*coeffNum));
+    memset(coeffFlag, 0, MLS_GRP_NUM * sizeof(*coeffFlag));
+    memset(coeffSign, 0, MLS_GRP_NUM * sizeof(*coeffSign));
+
+    int scanPosLast = 0;
+    do
+    {
+        const uint32_t cgIdx = (uint32_t)scanPosLast >> MLS_CG_SIZE;
+
+        const uint32_t posLast = scan[scanPosLast++];
+
+        const int curCoeff = coeff[posLast];
+        const uint32_t isNZCoeff = (curCoeff != 0);
+        // get L1 sig map
+        // NOTE: the new algorithm is complicated, so I keep reference code here
+        //uint32_t posy   = posLast >> log2TrSize;
+        //uint32_t posx   = posLast - (posy << log2TrSize);
+        //uint32_t blkIdx0 = ((posy >> MLS_CG_LOG2_SIZE) << codingParameters.log2TrSizeCG) + (posx >> MLS_CG_LOG2_SIZE);
+        //const uint32_t blkIdx = ((posLast >> (2 * MLS_CG_LOG2_SIZE)) & ~maskPosXY) + ((posLast >> MLS_CG_LOG2_SIZE) & maskPosXY);
+        //sigCoeffGroupFlag64 |= ((uint64_t)isNZCoeff << blkIdx);
+        numSig -= isNZCoeff;
+
+        // TODO: optimize by instruction BTS
+        coeffSign[cgIdx] += (uint16_t)(((uint32_t)curCoeff >> 31) << coeffNum[cgIdx]);
+        coeffFlag[cgIdx] = (coeffFlag[cgIdx] << 1) + (uint16_t)isNZCoeff;
+        coeffNum[cgIdx] += (uint8_t)isNZCoeff;
+    }
+    while (numSig > 0);
+    return scanPosLast - 1;
+}
+
+static uint32_t findPosFirstLast_c(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
+{
+    int n;
+
+    for (n = SCAN_SET_SIZE - 1; n >= 0; --n)
+    {
+        const uint32_t idx = scanTbl[n];
+        const uint32_t idxY = idx / MLS_CG_SIZE;
+        const uint32_t idxX = idx % MLS_CG_SIZE;
+        if (dstCoeff[idxY * trSize + idxX])
+            break;
+    }
+
+    X265_CHECK(n >= -1, "non-zero coeff scan failuare!\n");
+
+    uint32_t lastNZPosInCG = (uint32_t)n;
+
+    for (n = 0; n < SCAN_SET_SIZE; n++)
+    {
+        const uint32_t idx = scanTbl[n];
+        const uint32_t idxY = idx / MLS_CG_SIZE;
+        const uint32_t idxX = idx % MLS_CG_SIZE;
+        if (dstCoeff[idxY * trSize + idxX])
+            break;
+    }
+
+    uint32_t firstNZPosInCG = (uint32_t)n;
+
+    // NOTE: when coeff block all ZERO, the lastNZPosInCG is undefined and firstNZPosInCG is 16
+    return ((lastNZPosInCG << 16) | firstNZPosInCG);
+}
+
+
+static uint32_t costCoeffNxN_c(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
+{
+    ALIGN_VAR_32(uint16_t, tmpCoeff[SCAN_SET_SIZE]);
+    uint32_t numNonZero = (scanPosSigOff < (SCAN_SET_SIZE - 1) ? 1 : 0);
+    uint32_t sum = 0;
+
+    // correct offset to match assembly
+    absCoeff -= numNonZero;
+
+    for (int i = 0; i < MLS_CG_SIZE; i++)
+    {
+        tmpCoeff[i * MLS_CG_SIZE + 0] = (uint16_t)abs(coeff[i * trSize + 0]);
+        tmpCoeff[i * MLS_CG_SIZE + 1] = (uint16_t)abs(coeff[i * trSize + 1]);
+        tmpCoeff[i * MLS_CG_SIZE + 2] = (uint16_t)abs(coeff[i * trSize + 2]);
+        tmpCoeff[i * MLS_CG_SIZE + 3] = (uint16_t)abs(coeff[i * trSize + 3]);
+    }
+
+    do
+    {
+        uint32_t blkPos, sig, ctxSig;
+        blkPos = scan[scanPosSigOff];
+        const uint32_t posZeroMask = (subPosBase + scanPosSigOff) ? ~0 : 0;
+        sig     = scanFlagMask & 1;
+        scanFlagMask >>= 1;
+        X265_CHECK((uint32_t)(tmpCoeff[blkPos] != 0) == sig, "sign bit mistake\n");
+        if ((scanPosSigOff != 0) || (subPosBase == 0) || numNonZero)
+        {
+            const uint32_t cnt = tabSigCtx[blkPos] + offset;
+            ctxSig = cnt & posZeroMask;
+
+            //X265_CHECK(ctxSig == Quant::getSigCtxInc(patternSigCtx, log2TrSize, trSize, codingParameters.scan[subPosBase + scanPosSigOff], bIsLuma, codingParameters.firstSignificanceMapContext), "sigCtx mistake!\n");;
+            //encodeBin(sig, baseCtx[ctxSig]);
+            const uint32_t mstate = baseCtx[ctxSig];
+            const uint32_t mps = mstate & 1;
+            const uint32_t stateBits = PFX(entropyStateBits)[mstate ^ sig];
+            uint32_t nextState = (stateBits >> 24) + mps;
+            if ((mstate ^ sig) == 1)
+                nextState = sig;
+            X265_CHECK(sbacNext(mstate, sig) == nextState, "nextState check failure\n");
+            X265_CHECK(sbacGetEntropyBits(mstate, sig) == (stateBits & 0xFFFFFF), "entropyBits check failure\n");
+            baseCtx[ctxSig] = (uint8_t)nextState;
+            sum += stateBits;
+        }
+        assert(numNonZero <= 15);
+        assert(blkPos <= 15);
+        absCoeff[numNonZero] = tmpCoeff[blkPos];
+        numNonZero += sig;
+        scanPosSigOff--;
+    }
+    while(scanPosSigOff >= 0);
+
+    return (sum & 0xFFFFFF);
+}
+
+static uint32_t costCoeffRemain_c(uint16_t *absCoeff, int numNonZero, int idx)
+{
+    uint32_t goRiceParam = 0;
+
+    uint32_t sum = 0;
+    int baseLevel = 3;
+    do
+    {
+        if (idx >= C1FLAG_NUMBER)
+            baseLevel = 1;
+
+        // TODO: the IDX is not really idx, so this check inactive
+        //X265_CHECK(baseLevel == ((idx < C1FLAG_NUMBER) ? (2 + firstCoeff2) : 1), "baseLevel check failurr\n");
+        int codeNumber = absCoeff[idx] - baseLevel;
+
+        if (codeNumber >= 0)
+        {
+            //writeCoefRemainExGolomb(absCoeff[idx] - baseLevel, goRiceParam);
+            uint32_t length = 0;
+
+            codeNumber = ((uint32_t)codeNumber >> goRiceParam) - COEF_REMAIN_BIN_REDUCTION;
+            if (codeNumber >= 0)
+            {
+                {
+                    unsigned long cidx;
+                    CLZ(cidx, codeNumber + 1);
+                    length = cidx;
+                }
+                X265_CHECK((codeNumber != 0) || (length == 0), "length check failure\n");
+
+                codeNumber = (length + length);
+            }
+            sum += (COEF_REMAIN_BIN_REDUCTION + 1 + goRiceParam + codeNumber);
+
+            if (absCoeff[idx] > (COEF_REMAIN_BIN_REDUCTION << goRiceParam))
+                goRiceParam = (goRiceParam + 1) - (goRiceParam >> 2);
+            X265_CHECK(goRiceParam <= 4, "goRiceParam check failure\n");
+        }
+        baseLevel = 2;
+        idx++;
+    }
+    while(idx < numNonZero);
+
+    return sum;
+}
+
+
+static uint32_t costC1C2Flag_c(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
+{
+    uint32_t sum = 0;
+    uint32_t c1 = 1;
+    uint32_t firstC2Idx = 8;
+    uint32_t firstC2Flag = 2;
+    uint32_t c1Next = 0xFFFFFFFE;
+
+    int idx = 0;
+    do
+    {
+        uint32_t symbol1 = absCoeff[idx] > 1;
+        uint32_t symbol2 = absCoeff[idx] > 2;
+        //encodeBin(symbol1, baseCtxMod[c1]);
+        {
+            const uint32_t mstate = baseCtxMod[c1];
+            baseCtxMod[c1] = sbacNext(mstate, symbol1);
+            sum += sbacGetEntropyBits(mstate, symbol1);
+        }
+
+        if (symbol1)
+            c1Next = 0;
+
+        if (symbol1 + firstC2Flag == 3)
+            firstC2Flag = symbol2;
+
+        if (symbol1 + firstC2Idx == 9)
+            firstC2Idx  = idx;
+
+        c1 = (c1Next & 3);
+        c1Next >>= 2;
+        X265_CHECK(c1 <= 3, "c1 check failure\n");
+        idx++;
+    }
+    while(idx < numC1Flag);
+
+    if (!c1)
+    {
+        X265_CHECK((firstC2Flag <= 1), "firstC2FlagIdx check failure\n");
+
+        baseCtxMod += ctxOffset;
+
+        //encodeBin(firstC2Flag, baseCtxMod[0]);
+        {
+            const uint32_t mstate = baseCtxMod[0];
+            baseCtxMod[0] = sbacNext(mstate, firstC2Flag);
+            sum += sbacGetEntropyBits(mstate, firstC2Flag);
+        }
+    }
+
+    return (sum & 0x00FFFFFF) + (c1 << 26) + (firstC2Idx << 28);
+}
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupDCTPrimitives_c(EncoderPrimitives& p)
+{
+    p.dequant_scaling = dequant_scaling_c;
+    p.dequant_normal = dequant_normal_c;
+    p.quant = quant_c;
+    p.nquant = nquant_c;
+    p.dst4x4 = dst4_c;
+    p.cu[BLOCK_4x4].dct   = dct4_c;
+    p.cu[BLOCK_8x8].dct   = dct8_c;
+    p.cu[BLOCK_16x16].dct = dct16_c;
+    p.cu[BLOCK_32x32].dct = dct32_c;
+    p.idst4x4 = idst4_c;
+    p.cu[BLOCK_4x4].idct   = idct4_c;
+    p.cu[BLOCK_8x8].idct   = idct8_c;
+    p.cu[BLOCK_16x16].idct = idct16_c;
+    p.cu[BLOCK_32x32].idct = idct32_c;
+    p.denoiseDct = denoiseDct_c;
+    p.cu[BLOCK_4x4].count_nonzero = count_nonzero_c<4>;
+    p.cu[BLOCK_8x8].count_nonzero = count_nonzero_c<8>;
+    p.cu[BLOCK_16x16].count_nonzero = count_nonzero_c<16>;
+    p.cu[BLOCK_32x32].count_nonzero = count_nonzero_c<32>;
+
+    p.cu[BLOCK_4x4].copy_cnt   = copy_count<4>;
+    p.cu[BLOCK_8x8].copy_cnt   = copy_count<8>;
+    p.cu[BLOCK_16x16].copy_cnt = copy_count<16>;
+    p.cu[BLOCK_32x32].copy_cnt = copy_count<32>;
+
+    p.scanPosLast = scanPosLast_c;
+    p.findPosFirstLast = findPosFirstLast_c;
+    p.costCoeffNxN = costCoeffNxN_c;
+    p.costCoeffRemain = costCoeffRemain_c;
+    p.costC1C2Flag = costC1C2Flag_c;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/deblock.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,555 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Gopu Govindaswamy <gopu@multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "deblock.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "slice.h"
+#include "mv.h"
+
+using namespace X265_NS;
+
+#define DEBLOCK_SMALLEST_BLOCK  8
+#define DEFAULT_INTRA_TC_OFFSET 2
+
+void Deblock::deblockCTU(const CUData* ctu, const CUGeom& cuGeom, int32_t dir)
+{
+    uint8_t blockStrength[MAX_NUM_PARTITIONS];
+
+    memset(blockStrength, 0, sizeof(uint8_t) * cuGeom.numPartitions);
+
+    deblockCU(ctu, cuGeom, dir, blockStrength);
+}
+
+static inline uint8_t bsCuEdge(const CUData* cu, uint32_t absPartIdx, int32_t dir)
+{
+    if (dir == Deblock::EDGE_VER)
+    {
+        if (cu->m_cuPelX + g_zscanToPelX[absPartIdx] > 0)
+        {
+            uint32_t    tempPartIdx;
+            const CUData* tempCU = cu->getPULeft(tempPartIdx, absPartIdx);
+            return tempCU ? 2 : 0;
+        }
+    }
+    else
+    {
+        if (cu->m_cuPelY + g_zscanToPelY[absPartIdx] > 0)
+        {
+            uint32_t    tempPartIdx;
+            const CUData* tempCU = cu->getPUAbove(tempPartIdx, absPartIdx);
+            return tempCU ? 2 : 0;
+        }
+    }
+
+    return 0;
+}
+
+/* Deblocking filter process in CU-based (the same function as conventional's)
+ * param Edge the direction of the edge in block boundary (horizonta/vertical), which is added newly */
+void Deblock::deblockCU(const CUData* cu, const CUGeom& cuGeom, const int32_t dir, uint8_t blockStrength[])
+{
+    uint32_t absPartIdx = cuGeom.absPartIdx;
+    uint32_t depth = cuGeom.depth;
+    if (cu->m_predMode[absPartIdx] == MODE_NONE)
+        return;
+
+    if (cu->m_cuDepth[absPartIdx] > depth)
+    {
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+                deblockCU(cu, childGeom, dir, blockStrength);
+        }
+        return;
+    }
+
+    uint32_t numUnits = 1 << (cuGeom.log2CUSize - LOG2_UNIT_SIZE);
+    setEdgefilterPU(cu, absPartIdx, dir, blockStrength, numUnits);
+    setEdgefilterTU(cu, absPartIdx, 0, dir, blockStrength);
+    setEdgefilterMultiple(cu, absPartIdx, dir, 0, bsCuEdge(cu, absPartIdx, dir), blockStrength, numUnits);
+
+    uint32_t numParts = cuGeom.numPartitions;
+    for (uint32_t partIdx = absPartIdx; partIdx < absPartIdx + numParts; partIdx++)
+    {
+        uint32_t bsCheck = !(partIdx & (1 << dir));
+
+        if (bsCheck && blockStrength[partIdx])
+            blockStrength[partIdx] = getBoundaryStrength(cu, dir, partIdx, blockStrength);
+    }
+
+    const uint32_t partIdxIncr = DEBLOCK_SMALLEST_BLOCK >> LOG2_UNIT_SIZE;
+    uint32_t shiftFactor = (dir == EDGE_VER) ? cu->m_hChromaShift : cu->m_vChromaShift;
+    uint32_t chromaMask = ((DEBLOCK_SMALLEST_BLOCK << shiftFactor) >> LOG2_UNIT_SIZE) - 1;
+    uint32_t e0 = (dir == EDGE_VER ? g_zscanToPelX[absPartIdx] : g_zscanToPelY[absPartIdx]) >> LOG2_UNIT_SIZE;
+        
+    for (uint32_t e = 0; e < numUnits; e += partIdxIncr)
+    {
+        edgeFilterLuma(cu, absPartIdx, depth, dir, e, blockStrength);
+        if (cu->m_chromaFormat != X265_CSP_I400 && !((e0 + e) & chromaMask))
+            edgeFilterChroma(cu, absPartIdx, depth, dir, e, blockStrength);
+    }
+}
+
+static inline uint32_t calcBsIdx(const CUData* cu, uint32_t absPartIdx, int32_t dir, int32_t edgeIdx, int32_t baseUnitIdx)
+{
+    uint32_t numUnits = cu->m_slice->m_sps->numPartInCUSize;
+
+    if (dir)
+        return g_rasterToZscan[g_zscanToRaster[absPartIdx] + edgeIdx * numUnits + baseUnitIdx];
+    else
+        return g_rasterToZscan[g_zscanToRaster[absPartIdx] + baseUnitIdx * numUnits + edgeIdx];
+}
+
+void Deblock::setEdgefilterMultiple(const CUData* cu, uint32_t scanIdx, int32_t dir, int32_t edgeIdx, uint8_t value, uint8_t blockStrength[], uint32_t numUnits)
+{
+    X265_CHECK(numUnits > 0, "numUnits edge filter check\n");
+    for (uint32_t i = 0; i < numUnits; i++)
+    {
+        const uint32_t bsidx = calcBsIdx(cu, scanIdx, dir, edgeIdx, i);
+        blockStrength[bsidx] = value;
+    }
+}
+
+void Deblock::setEdgefilterTU(const CUData* cu, uint32_t absPartIdx, uint32_t tuDepth, int32_t dir, uint8_t blockStrength[])
+{
+    uint32_t log2TrSize = cu->m_log2CUSize[absPartIdx] - tuDepth;
+    if (cu->m_tuDepth[absPartIdx] > tuDepth)
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - LOG2_UNIT_SIZE - 1) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            setEdgefilterTU(cu, absPartIdx, tuDepth + 1, dir, blockStrength);
+        return;
+    }
+
+    uint32_t numUnits  = 1 << (log2TrSize - LOG2_UNIT_SIZE);
+    setEdgefilterMultiple(cu, absPartIdx, dir, 0, 2, blockStrength, numUnits);
+}
+
+void Deblock::setEdgefilterPU(const CUData* cu, uint32_t absPartIdx, int32_t dir, uint8_t blockStrength[], uint32_t numUnits)
+{
+    const uint32_t hNumUnits = numUnits >> 1;
+    const uint32_t qNumUnits = numUnits >> 2;
+
+    switch (cu->m_partSize[absPartIdx])
+    {
+    case SIZE_2NxN:
+        if (EDGE_HOR == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, hNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_Nx2N:
+        if (EDGE_VER == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, hNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_NxN:
+        setEdgefilterMultiple(cu, absPartIdx, dir, hNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_2NxnU:
+        if (EDGE_HOR == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, qNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_nLx2N:
+        if (EDGE_VER == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, qNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_2NxnD:
+        if (EDGE_HOR == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, numUnits - qNumUnits, 1, blockStrength, numUnits);
+        break;
+    case SIZE_nRx2N:
+        if (EDGE_VER == dir)
+            setEdgefilterMultiple(cu, absPartIdx, dir, numUnits - qNumUnits, 1, blockStrength, numUnits);
+        break;
+
+    case SIZE_2Nx2N:
+    default:
+        break;
+    }
+}
+
+uint8_t Deblock::getBoundaryStrength(const CUData* cuQ, int32_t dir, uint32_t partQ, const uint8_t blockStrength[])
+{
+    // Calculate block index
+    uint32_t partP;
+    const CUData* cuP = (dir == EDGE_VER ? cuQ->getPULeft(partP, partQ) : cuQ->getPUAbove(partP, partQ));
+
+    // Set BS for Intra MB : BS = 2
+    if (cuP->isIntra(partP) || cuQ->isIntra(partQ))
+        return 2;
+
+    // Set BS for not Intra MB : BS = 1 or 0
+    if (blockStrength[partQ] > 1 &&
+        (cuQ->getCbf(partQ, TEXT_LUMA, cuQ->m_tuDepth[partQ]) ||
+         cuP->getCbf(partP, TEXT_LUMA, cuP->m_tuDepth[partP])))
+        return 1;
+
+    static const MV zeroMv(0, 0);
+    const Slice* const sliceQ = cuQ->m_slice;
+    const Slice* const sliceP = cuP->m_slice;
+
+    const Frame* refP0 = sliceP->m_refFrameList[0][cuP->m_refIdx[0][partP]];
+    const Frame* refQ0 = sliceQ->m_refFrameList[0][cuQ->m_refIdx[0][partQ]];
+    const MV& mvP0 = refP0 ? cuP->m_mv[0][partP] : zeroMv;
+    const MV& mvQ0 = refQ0 ? cuQ->m_mv[0][partQ] : zeroMv;
+
+    if (sliceQ->isInterP() && sliceP->isInterP())
+    {
+        return ((refP0 != refQ0) ||
+                (abs(mvQ0.x - mvP0.x) >= 4) || (abs(mvQ0.y - mvP0.y) >= 4)) ? 1 : 0;
+    }
+
+    // (sliceQ->isInterB() || sliceP->isInterB())
+    const Frame* refP1 = sliceP->m_refFrameList[1][cuP->m_refIdx[1][partP]];
+    const Frame* refQ1 = sliceQ->m_refFrameList[1][cuQ->m_refIdx[1][partQ]];
+    const MV& mvP1 = refP1 ? cuP->m_mv[1][partP] : zeroMv;
+    const MV& mvQ1 = refQ1 ? cuQ->m_mv[1][partQ] : zeroMv;
+
+    if (((refP0 == refQ0) && (refP1 == refQ1)) || ((refP0 == refQ1) && (refP1 == refQ0)))
+    {
+        if (refP0 != refP1) // Different L0 & L1
+        {
+            if (refP0 == refQ0)
+                return ((abs(mvQ0.x - mvP0.x) >= 4) || (abs(mvQ0.y - mvP0.y) >= 4) ||
+                        (abs(mvQ1.x - mvP1.x) >= 4) || (abs(mvQ1.y - mvP1.y) >= 4)) ? 1 : 0;
+            else
+                return ((abs(mvQ1.x - mvP0.x) >= 4) || (abs(mvQ1.y - mvP0.y) >= 4) ||
+                        (abs(mvQ0.x - mvP1.x) >= 4) || (abs(mvQ0.y - mvP1.y) >= 4)) ? 1 : 0;
+        }
+        else // Same L0 & L1
+        {
+            return (((abs(mvQ0.x - mvP0.x) >= 4) || (abs(mvQ0.y - mvP0.y) >= 4) ||
+                     (abs(mvQ1.x - mvP1.x) >= 4) || (abs(mvQ1.y - mvP1.y) >= 4)) &&
+                    ((abs(mvQ1.x - mvP0.x) >= 4) || (abs(mvQ1.y - mvP0.y) >= 4) ||
+                     (abs(mvQ0.x - mvP1.x) >= 4) || (abs(mvQ0.y - mvP1.y) >= 4))) ? 1 : 0;
+        }
+    }
+        
+    // for all different Ref_Idx
+    return 1;
+}
+
+static inline int32_t calcDP(pixel* src, intptr_t offset)
+{
+    return abs(static_cast<int32_t>(src[-offset * 3]) - 2 * src[-offset * 2] + src[-offset]);
+}
+
+static inline int32_t calcDQ(pixel* src, intptr_t offset)
+{
+    return abs(static_cast<int32_t>(src[0]) - 2 * src[offset] + src[offset * 2]);
+}
+
+static inline bool useStrongFiltering(intptr_t offset, int32_t beta, int32_t tc, pixel* src)
+{
+    int16_t m4     = (int16_t)src[0];
+    int16_t m3     = (int16_t)src[-offset];
+    int16_t m7     = (int16_t)src[offset * 3];
+    int16_t m0     = (int16_t)src[-offset * 4];
+    int32_t strong = abs(m0 - m3) + abs(m7 - m4);
+
+    return (strong < (beta >> 3)) && (abs(m3 - m4) < ((tc * 5 + 1) >> 1));
+}
+
+/* Deblocking for the luminance component with strong or weak filter
+ * \param src     pointer to picture data
+ * \param offset  offset value for picture data
+ * \param tc      tc value
+ * \param maskP   indicator to enable filtering on partP
+ * \param maskQ   indicator to enable filtering on partQ
+ * \param maskP1  decision weak filter/no filter for partP
+ * \param maskQ1  decision weak filter/no filter for partQ */
+static inline void pelFilterLumaStrong(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tc, int32_t maskP, int32_t maskQ)
+{
+    int32_t tc2 = 2 * tc;
+    int32_t tcP = (tc2 & maskP);
+    int32_t tcQ = (tc2 & maskQ);
+    for (int32_t i = 0; i < UNIT_SIZE; i++, src += srcStep)
+    {
+        int16_t m4  = (int16_t)src[0];
+        int16_t m3  = (int16_t)src[-offset];
+        int16_t m5  = (int16_t)src[offset];
+        int16_t m2  = (int16_t)src[-offset * 2];
+        int16_t m6  = (int16_t)src[offset * 2];
+        int16_t m1  = (int16_t)src[-offset * 3];
+        int16_t m7  = (int16_t)src[offset * 3];
+        int16_t m0  = (int16_t)src[-offset * 4];
+        src[-offset * 3] = (pixel)(x265_clip3(-tcP, tcP, ((2 * m0 + 3 * m1 + m2 + m3 + m4 + 4) >> 3) - m1) + m1);
+        src[-offset * 2] = (pixel)(x265_clip3(-tcP, tcP, ((m1 + m2 + m3 + m4 + 2) >> 2) - m2) + m2);
+        src[-offset]     = (pixel)(x265_clip3(-tcP, tcP, ((m1 + 2 * m2 + 2 * m3 + 2 * m4 + m5 + 4) >> 3) - m3) + m3);
+        src[0]           = (pixel)(x265_clip3(-tcQ, tcQ, ((m2 + 2 * m3 + 2 * m4 + 2 * m5 + m6 + 4) >> 3) - m4) + m4);
+        src[offset]      = (pixel)(x265_clip3(-tcQ, tcQ, ((m3 + m4 + m5 + m6 + 2) >> 2) - m5) + m5);
+        src[offset * 2]  = (pixel)(x265_clip3(-tcQ, tcQ, ((m3 + m4 + m5 + 3 * m6 + 2 * m7 + 4) >> 3) - m6) + m6);
+    }
+}
+
+/* Weak filter */
+static inline void pelFilterLuma(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tc, int32_t maskP, int32_t maskQ,
+                                 int32_t maskP1, int32_t maskQ1)
+{
+    int32_t thrCut = tc * 10;
+    int32_t tc2 = tc >> 1;
+    maskP1 &= maskP;
+    maskQ1 &= maskQ;
+
+    for (int32_t i = 0; i < UNIT_SIZE; i++, src += srcStep)
+    {
+        int16_t m4  = (int16_t)src[0];
+        int16_t m3  = (int16_t)src[-offset];
+        int16_t m5  = (int16_t)src[offset];
+        int16_t m2  = (int16_t)src[-offset * 2];
+
+        int32_t delta = (9 * (m4 - m3) - 3 * (m5 - m2) + 8) >> 4;
+
+        if (abs(delta) < thrCut)
+        {
+            delta = x265_clip3(-tc, tc, delta);
+
+            src[-offset] = x265_clip(m3 + (delta & maskP));
+            src[0] = x265_clip(m4 - (delta & maskQ));
+            if (maskP1)
+            {
+                int16_t m1  = (int16_t)src[-offset * 3];
+                int32_t delta1 = x265_clip3(-tc2, tc2, ((((m1 + m3 + 1) >> 1) - m2 + delta) >> 1));
+                src[-offset * 2] = x265_clip(m2 + delta1);
+            }
+            if (maskQ1)
+            {
+                int16_t m6  = (int16_t)src[offset * 2];
+                int32_t delta2 = x265_clip3(-tc2, tc2, ((((m6 + m4 + 1) >> 1) - m5 - delta) >> 1));
+                src[offset] = x265_clip(m5 + delta2);
+            }
+        }
+    }
+}
+
+/* Deblocking of one line/column for the chrominance component
+ * \param src     pointer to picture data
+ * \param offset  offset value for picture data
+ * \param tc      tc value
+ * \param maskP   indicator to disable filtering on partP
+ * \param maskQ   indicator to disable filtering on partQ */
+static inline void pelFilterChroma(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tc, int32_t maskP, int32_t maskQ)
+{
+    for (int32_t i = 0; i < UNIT_SIZE; i++, src += srcStep)
+    {
+        int16_t m4  = (int16_t)src[0];
+        int16_t m3  = (int16_t)src[-offset];
+        int16_t m5  = (int16_t)src[offset];
+        int16_t m2  = (int16_t)src[-offset * 2];
+
+        int32_t delta = x265_clip3(-tc, tc, ((((m4 - m3) * 4) + m2 - m5 + 4) >> 3));
+        src[-offset] = x265_clip(m3 + (delta & maskP));
+        src[0] = x265_clip(m4 - (delta & maskQ));
+    }
+}
+
+void Deblock::edgeFilterLuma(const CUData* cuQ, uint32_t absPartIdx, uint32_t depth, int32_t dir, int32_t edge, const uint8_t blockStrength[])
+{
+    PicYuv* reconPic = cuQ->m_encData->m_reconPic;
+    pixel* src = reconPic->getLumaAddr(cuQ->m_cuAddr, absPartIdx);
+    intptr_t stride = reconPic->m_stride;
+    const PPS* pps = cuQ->m_slice->m_pps;
+
+    intptr_t offset, srcStep;
+
+    int32_t maskP = -1;
+    int32_t maskQ = -1;
+    int32_t betaOffset = pps->deblockingFilterBetaOffsetDiv2 << 1;
+    int32_t tcOffset = pps->deblockingFilterTcOffsetDiv2 << 1;
+    bool bCheckNoFilter = pps->bTransquantBypassEnabled;
+
+    if (dir == EDGE_VER)
+    {
+        offset = 1;
+        srcStep = stride;
+        src += (edge << LOG2_UNIT_SIZE);
+    }
+    else // (dir == EDGE_HOR)
+    {
+        offset = stride;
+        srcStep = 1;
+        src += (edge << LOG2_UNIT_SIZE) * stride;
+    }
+
+    uint32_t numUnits = cuQ->m_slice->m_sps->numPartInCUSize >> depth;
+    for (uint32_t idx = 0; idx < numUnits; idx++)
+    {
+        uint32_t partQ = calcBsIdx(cuQ, absPartIdx, dir, edge, idx);
+        uint32_t bs = blockStrength[partQ];
+
+        if (!bs)
+            continue;
+
+        // Derive neighboring PU index
+        uint32_t partP;
+        const CUData* cuP = (dir == EDGE_VER ? cuQ->getPULeft(partP, partQ) : cuQ->getPUAbove(partP, partQ));
+
+        if (bCheckNoFilter)
+        {
+            // check if each of PUs is lossless coded
+            maskP = cuP->m_tqBypass[partP] - 1;
+            maskQ = cuQ->m_tqBypass[partQ] - 1;
+            if (!(maskP | maskQ))
+                continue;
+        }
+
+        int32_t qpQ = cuQ->m_qp[partQ];
+        int32_t qpP = cuP->m_qp[partP];
+        int32_t qp  = (qpP + qpQ + 1) >> 1;
+
+        int32_t indexB = x265_clip3(0, QP_MAX_SPEC, qp + betaOffset);
+
+        const int32_t bitdepthShift = X265_DEPTH - 8;
+        int32_t beta = s_betaTable[indexB] << bitdepthShift;
+
+        intptr_t unitOffset = idx * srcStep << LOG2_UNIT_SIZE;
+        int32_t dp0 = calcDP(src + unitOffset              , offset);
+        int32_t dq0 = calcDQ(src + unitOffset              , offset);
+        int32_t dp3 = calcDP(src + unitOffset + srcStep * 3, offset);
+        int32_t dq3 = calcDQ(src + unitOffset + srcStep * 3, offset);
+        int32_t d0 = dp0 + dq0;
+        int32_t d3 = dp3 + dq3;
+
+        int32_t d =  d0 + d3;
+
+        if (d >= beta)
+            continue;
+
+        int32_t indexTC = x265_clip3(0, QP_MAX_SPEC + DEFAULT_INTRA_TC_OFFSET, int32_t(qp + DEFAULT_INTRA_TC_OFFSET * (bs - 1) + tcOffset));
+        int32_t tc = s_tcTable[indexTC] << bitdepthShift;
+
+        bool sw = (2 * d0 < (beta >> 2) &&
+                   2 * d3 < (beta >> 2) &&
+                   useStrongFiltering(offset, beta, tc, src + unitOffset              ) &&
+                   useStrongFiltering(offset, beta, tc, src + unitOffset + srcStep * 3));
+
+        if (sw)
+            pelFilterLumaStrong(src + unitOffset, srcStep, offset, tc, maskP, maskQ);
+        else
+        {
+            int32_t sideThreshold = (beta + (beta >> 1)) >> 3;
+            int32_t dp = dp0 + dp3;
+            int32_t dq = dq0 + dq3;
+            int32_t maskP1 = (dp < sideThreshold ? -1 : 0);
+            int32_t maskQ1 = (dq < sideThreshold ? -1 : 0);
+
+            pelFilterLuma(src + unitOffset, srcStep, offset, tc, maskP, maskQ, maskP1, maskQ1);
+        }
+    }
+}
+
+void Deblock::edgeFilterChroma(const CUData* cuQ, uint32_t absPartIdx, uint32_t depth, int32_t dir, int32_t edge, const uint8_t blockStrength[])
+{
+    int32_t chFmt = cuQ->m_chromaFormat, chromaShift;
+    intptr_t offset, srcStep;
+    const PPS* pps = cuQ->m_slice->m_pps;
+
+    int32_t maskP = -1;
+    int32_t maskQ = -1;
+    int32_t tcOffset = pps->deblockingFilterTcOffsetDiv2 << 1;
+
+    X265_CHECK(((dir == EDGE_VER)
+                ? ((g_zscanToPelX[absPartIdx] + edge * UNIT_SIZE) >> cuQ->m_hChromaShift)
+                : ((g_zscanToPelY[absPartIdx] + edge * UNIT_SIZE) >> cuQ->m_vChromaShift)) % DEBLOCK_SMALLEST_BLOCK == 0,
+               "invalid edge\n");
+
+    PicYuv* reconPic = cuQ->m_encData->m_reconPic;
+    intptr_t stride = reconPic->m_strideC;
+    intptr_t srcOffset = reconPic->getChromaAddrOffset(cuQ->m_cuAddr, absPartIdx);
+    bool bCheckNoFilter = pps->bTransquantBypassEnabled;
+
+    if (dir == EDGE_VER)
+    {
+        chromaShift = cuQ->m_vChromaShift;
+        srcOffset += (edge << (LOG2_UNIT_SIZE - cuQ->m_hChromaShift));
+        offset     = 1;
+        srcStep    = stride;
+    }
+    else // (dir == EDGE_HOR)
+    {
+        chromaShift = cuQ->m_hChromaShift;
+        srcOffset += edge * stride << (LOG2_UNIT_SIZE - cuQ->m_vChromaShift);
+        offset     = stride;
+        srcStep    = 1;
+    }
+
+    pixel* srcChroma[2];
+    srcChroma[0] = reconPic->m_picOrg[1] + srcOffset;
+    srcChroma[1] = reconPic->m_picOrg[2] + srcOffset;
+
+    uint32_t numUnits = cuQ->m_slice->m_sps->numPartInCUSize >> (depth + chromaShift);
+    for (uint32_t idx = 0; idx < numUnits; idx++)
+    {
+        uint32_t partQ = calcBsIdx(cuQ, absPartIdx, dir, edge, idx << chromaShift);
+        uint32_t bs = blockStrength[partQ];
+
+        if (bs <= 1)
+            continue;
+
+        // Derive neighboring PU index
+        uint32_t partP;
+        const CUData* cuP = (dir == EDGE_VER ? cuQ->getPULeft(partP, partQ) : cuQ->getPUAbove(partP, partQ));
+
+        if (bCheckNoFilter)
+        {
+            // check if each of PUs is lossless coded
+            maskP = (cuP->m_tqBypass[partP] ? 0 : -1);
+            maskQ = (cuQ->m_tqBypass[partQ] ? 0 : -1);
+            if (!(maskP | maskQ))
+                continue;
+        }
+
+        int32_t qpQ = cuQ->m_qp[partQ];
+        int32_t qpP = cuP->m_qp[partP];
+        int32_t qpA = (qpP + qpQ + 1) >> 1;
+
+        intptr_t unitOffset = idx * srcStep << LOG2_UNIT_SIZE;
+        for (uint32_t chromaIdx = 0; chromaIdx < 2; chromaIdx++)
+        {
+            int32_t qp = qpA + pps->chromaQpOffset[chromaIdx];
+            if (qp >= 30)
+                qp = chFmt == X265_CSP_I420 ? g_chromaScale[qp] : X265_MIN(qp, QP_MAX_SPEC);
+
+            int32_t indexTC = x265_clip3(0, QP_MAX_SPEC + DEFAULT_INTRA_TC_OFFSET, int32_t(qp + DEFAULT_INTRA_TC_OFFSET + tcOffset));
+            const int32_t bitdepthShift = X265_DEPTH - 8;
+            int32_t tc = s_tcTable[indexTC] << bitdepthShift;
+            pixel* srcC = srcChroma[chromaIdx];
+
+            pelFilterChroma(srcC + unitOffset, srcStep, offset, tc, maskP, maskQ);
+        }
+    }
+}
+
+const uint8_t Deblock::s_tcTable[54] =
+{
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+    2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24
+};
+
+const uint8_t Deblock::s_betaTable[52] =
+{
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+    18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/deblock.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,63 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Gopu Govindaswamy <gopu@multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_DEBLOCK_H
+#define X265_DEBLOCK_H
+
+#include "common.h"
+
+namespace X265_NS {
+// private namespace
+
+class CUData;
+struct CUGeom;
+
+class Deblock
+{
+public:
+    enum { EDGE_VER, EDGE_HOR };
+
+    void deblockCTU(const CUData* ctu, const CUGeom& cuGeom, int32_t dir);
+
+protected:
+
+    // CU-level deblocking function
+    void deblockCU(const CUData* cu, const CUGeom& cuGeom, const int32_t dir, uint8_t blockStrength[]);
+
+    // set filtering functions
+    void setEdgefilterTU(const CUData* cu, uint32_t absPartIdx, uint32_t tuDepth, int32_t dir, uint8_t blockStrength[]);
+    void setEdgefilterPU(const CUData* cu, uint32_t absPartIdx, int32_t dir, uint8_t blockStrength[], uint32_t numUnits);
+    void setEdgefilterMultiple(const CUData* cu, uint32_t absPartIdx, int32_t dir, int32_t edgeIdx, uint8_t value, uint8_t blockStrength[], uint32_t numUnits);
+
+    // get filtering functions
+    uint8_t getBoundaryStrength(const CUData* cuQ, int32_t dir, uint32_t partQ, const uint8_t blockStrength[]);
+
+    // filter luma/chroma functions
+    void edgeFilterLuma(const CUData* cuQ, uint32_t absPartIdx, uint32_t depth, int32_t dir, int32_t edge, const uint8_t blockStrength[]);
+    void edgeFilterChroma(const CUData* cuQ, uint32_t absPartIdx, uint32_t depth, int32_t dir, int32_t edge, const uint8_t blockStrength[]);
+
+    static const uint8_t s_tcTable[54];
+    static const uint8_t s_betaTable[52];
+};
+}
+#endif // ifndef X265_DEBLOCK_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/frame.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,129 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "picyuv.h"
+#include "framedata.h"
+
+using namespace X265_NS;
+
+Frame::Frame()
+{
+    m_bChromaExtended = false;
+    m_lowresInit = false;
+    m_reconRowCount.set(0);
+    m_countRefEncoders = 0;
+    m_encData = NULL;
+    m_reconPic = NULL;
+    m_quantOffsets = NULL;
+    m_next = NULL;
+    m_prev = NULL;
+    m_param = NULL;
+    memset(&m_lowres, 0, sizeof(m_lowres));
+}
+
+bool Frame::create(x265_param *param, float* quantOffsets)
+{
+    m_fencPic = new PicYuv;
+    m_param = param;
+
+    if (m_fencPic->create(param->sourceWidth, param->sourceHeight, param->internalCsp) &&
+        m_lowres.create(m_fencPic, param->bframes, !!param->rc.aqMode))
+    {
+        if (quantOffsets)
+        {
+            int32_t cuCount = m_lowres.maxBlocksInRow * m_lowres.maxBlocksInCol;
+            m_quantOffsets = new float[cuCount];
+        }
+        return true;
+    }
+    return false;
+}
+
+bool Frame::allocEncodeData(x265_param *param, const SPS& sps)
+{
+    m_encData = new FrameData;
+    m_reconPic = new PicYuv;
+    m_encData->m_reconPic = m_reconPic;
+    bool ok = m_encData->create(*param, sps) && m_reconPic->create(param->sourceWidth, param->sourceHeight, param->internalCsp);
+    if (ok)
+    {
+        /* initialize right border of m_reconpicYuv as SAO may read beyond the
+         * end of the picture accessing uninitialized pixels */
+        int maxHeight = sps.numCuInHeight * g_maxCUSize;
+        memset(m_reconPic->m_picOrg[0], 0, sizeof(pixel) * m_reconPic->m_stride * maxHeight);
+        if (m_reconPic->m_picCsp != X265_CSP_I400) {
+            memset(m_reconPic->m_picOrg[1], 0, sizeof(pixel) * m_reconPic->m_strideC * (maxHeight >> m_reconPic->m_vChromaShift));
+            memset(m_reconPic->m_picOrg[2], 0, sizeof(pixel) * m_reconPic->m_strideC * (maxHeight >> m_reconPic->m_vChromaShift));
+        }
+
+        /* use pre-calculated cu/pu offsets cached in the SPS structure */
+        m_reconPic->m_cuOffsetY = sps.cuOffsetY;
+        m_reconPic->m_buOffsetY = sps.buOffsetY;
+        if (m_reconPic->m_picCsp != X265_CSP_I400) {
+            m_reconPic->m_cuOffsetC = sps.cuOffsetC;
+            m_reconPic->m_buOffsetC = sps.buOffsetC;
+        }
+    }
+    return ok;
+}
+
+/* prepare to re-use a FrameData instance to encode a new picture */
+void Frame::reinit(const SPS& sps)
+{
+    m_bChromaExtended = false;
+    m_reconPic = m_encData->m_reconPic;
+    m_encData->reinit(sps);
+}
+
+void Frame::destroy()
+{
+    if (m_encData)
+    {
+        m_encData->destroy();
+        delete m_encData;
+        m_encData = NULL;
+    }
+
+    if (m_fencPic)
+    {
+        m_fencPic->destroy();
+        delete m_fencPic;
+        m_fencPic = NULL;
+    }
+
+    if (m_reconPic)
+    {
+        m_reconPic->destroy();
+        delete m_reconPic;
+        m_reconPic = NULL;
+    }
+
+    if (m_quantOffsets)
+    {
+        delete[] m_quantOffsets;
+    }
+
+    m_lowres.destroy();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/frame.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,81 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_FRAME_H
+#define X265_FRAME_H
+
+#include "common.h"
+#include "lowres.h"
+#include "threading.h"
+
+namespace X265_NS {
+// private namespace
+
+class FrameData;
+class PicYuv;
+struct SPS;
+
+#define IS_REFERENCED(frame) (frame->m_lowres.sliceType != X265_TYPE_B) 
+
+class Frame
+{
+public:
+
+    /* These two items will be NULL until the Frame begins to be encoded, at which point
+     * it will be assigned a FrameData instance, which comes with a reconstructed image PicYuv */
+    FrameData*             m_encData;
+    PicYuv*                m_reconPic;
+
+    /* Data associated with x265_picture */
+    PicYuv*                m_fencPic;
+    int                    m_poc;
+    int64_t                m_pts;                // user provided presentation time stamp
+    int64_t                m_reorderedPts;
+    int64_t                m_dts;
+    int32_t                m_forceqp;            // Force to use the qp specified in qp file
+    void*                  m_userData;           // user provided pointer passed in with this picture
+
+    Lowres                 m_lowres;
+    bool                   m_lowresInit;         // lowres init complete (pre-analysis)
+    bool                   m_bChromaExtended;    // orig chroma planes motion extended for weight analysis
+
+    float*                 m_quantOffsets;       // points to quantOffsets in x265_picture
+
+    /* Frame Parallelism - notification between FrameEncoders of available motion reference rows */
+    ThreadSafeInteger      m_reconRowCount;      // count of CTU rows completely reconstructed and extended for motion reference
+    volatile uint32_t      m_countRefEncoders;   // count of FrameEncoder threads monitoring m_reconRowCount
+
+    Frame*                 m_next;               // PicList doubly linked list pointers
+    Frame*                 m_prev;
+    x265_param*            m_param;              // Points to the latest param set for the frame.
+    x265_analysis_data     m_analysisData;
+    Frame();
+
+    bool create(x265_param *param, float* quantOffsets);
+    bool allocEncodeData(x265_param *param, const SPS& sps);
+    void reinit(const SPS& sps);
+    void destroy();
+};
+}
+
+#endif // ifndef X265_FRAME_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/framedata.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,69 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "framedata.h"
+#include "picyuv.h"
+
+using namespace X265_NS;
+
+FrameData::FrameData()
+{
+    memset(this, 0, sizeof(*this));
+}
+
+bool FrameData::create(const x265_param& param, const SPS& sps)
+{
+    m_param = &param;
+    m_slice  = new Slice;
+    m_picCTU = new CUData[sps.numCUsInFrame];
+
+    m_cuMemPool.create(0, param.internalCsp, sps.numCUsInFrame);
+    for (uint32_t ctuAddr = 0; ctuAddr < sps.numCUsInFrame; ctuAddr++)
+        m_picCTU[ctuAddr].initialize(m_cuMemPool, 0, param.internalCsp, ctuAddr);
+
+    CHECKED_MALLOC(m_cuStat, RCStatCU, sps.numCUsInFrame);
+    CHECKED_MALLOC(m_rowStat, RCStatRow, sps.numCuInHeight);
+    reinit(sps);
+    return true;
+
+fail:
+    return false;
+}
+
+void FrameData::reinit(const SPS& sps)
+{
+    memset(m_cuStat, 0, sps.numCUsInFrame * sizeof(*m_cuStat));
+    memset(m_rowStat, 0, sps.numCuInHeight * sizeof(*m_rowStat));
+}
+
+void FrameData::destroy()
+{
+    delete [] m_picCTU;
+    delete m_slice;
+    delete m_saoParam;
+
+    m_cuMemPool.destroy();
+
+    X265_FREE(m_cuStat);
+    X265_FREE(m_rowStat);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/framedata.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,152 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Author: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_FRAMEDATA_H
+#define X265_FRAMEDATA_H
+
+#include "common.h"
+#include "slice.h"
+#include "cudata.h"
+
+namespace X265_NS {
+// private namespace
+
+class PicYuv;
+class JobProvider;
+
+#define INTER_MODES 4 // 2Nx2N, 2NxN, Nx2N, AMP modes
+#define INTRA_MODES 3 // DC, Planar, Angular modes
+
+/* Current frame stats for 2 pass */
+struct FrameStats
+{
+    int         mvBits;    /* MV bits (MV+Ref+Block Type) */
+    int         coeffBits; /* Texture bits (DCT coefs) */
+    int         miscBits;
+
+    int         intra8x8Cnt;
+    int         inter8x8Cnt;
+    int         skip8x8Cnt;
+
+    /* CU type counts stored as percentage */
+    double      percent8x8Intra;
+    double      percent8x8Inter;
+    double      percent8x8Skip;
+    double      avgLumaDistortion;
+    double      avgChromaDistortion;
+    double      avgPsyEnergy;
+    double      avgResEnergy;
+    double      percentIntraNxN;
+    double      percentSkipCu[NUM_CU_DEPTH];
+    double      percentMergeCu[NUM_CU_DEPTH];
+    double      percentIntraDistribution[NUM_CU_DEPTH][INTRA_MODES];
+    double      percentInterDistribution[NUM_CU_DEPTH][3];           // 2Nx2N, RECT, AMP modes percentage
+
+    uint64_t    cntIntraNxN;
+    uint64_t    totalCu;
+    uint64_t    totalCtu;
+    uint64_t    lumaDistortion;
+    uint64_t    chromaDistortion;
+    uint64_t    psyEnergy;
+    uint64_t    resEnergy;
+    uint64_t    cntSkipCu[NUM_CU_DEPTH];
+    uint64_t    cntMergeCu[NUM_CU_DEPTH];
+    uint64_t    cntInter[NUM_CU_DEPTH];
+    uint64_t    cntIntra[NUM_CU_DEPTH];
+    uint64_t    cuInterDistribution[NUM_CU_DEPTH][INTER_MODES];
+    uint64_t    cuIntraDistribution[NUM_CU_DEPTH][INTRA_MODES];
+
+    FrameStats()
+    {
+        memset(this, 0, sizeof(FrameStats));
+    }
+};
+
+/* Per-frame data that is used during encodes and referenced while the picture
+ * is available for reference. A FrameData instance is attached to a Frame as it
+ * comes out of the lookahead. Frames which are not being encoded do not have a
+ * FrameData instance. These instances are re-used once the encoded frame has
+ * no active references. They hold the Slice instance and the 'official' CTU
+ * data structures. They are maintained in a free-list pool along together with
+ * a reconstructed image PicYuv in order to conserve memory. */
+class FrameData
+{
+public:
+
+    Slice*         m_slice;
+    SAOParam*      m_saoParam;
+    const x265_param* m_param;
+
+    FrameData*     m_freeListNext;
+    PicYuv*        m_reconPic;
+    bool           m_bHasReferences;   /* used during DPB/RPS updates */
+    int            m_frameEncoderID;   /* the ID of the FrameEncoder encoding this frame */
+    JobProvider*   m_jobProvider;
+
+    CUDataMemPool  m_cuMemPool;
+    CUData*        m_picCTU;
+
+    /* Rate control data used during encode and by references */
+    struct RCStatCU
+    {
+        uint32_t totalBits;     /* total bits to encode this CTU */
+        uint32_t vbvCost;       /* sum of lowres costs for 16x16 sub-blocks */
+        uint32_t intraVbvCost;  /* sum of lowres intra costs for 16x16 sub-blocks */
+        uint64_t avgCost[4];    /* stores the avg cost of CU's in frame for each depth */
+        uint32_t count[4];      /* count and avgCost only used by Analysis at RD0..4 */
+        double   baseQp;        /* Qp of Cu set from RateControl/Vbv (only used by frame encoder) */
+    };
+
+    struct RCStatRow
+    {
+        uint32_t numEncodedCUs; /* ctuAddr of last encoded CTU in row */
+        uint32_t encodedBits;   /* sum of 'totalBits' of encoded CTUs */
+        uint32_t satdForVbv;    /* sum of lowres (estimated) costs for entire row */
+        uint32_t intraSatdForVbv; /* sum of lowres (estimated) intra costs for entire row */
+        uint32_t diagSatd;
+        uint32_t diagIntraSatd;
+        double   diagQp;
+        double   diagQpScale;
+        double   sumQpRc;
+        double   sumQpAq;
+    };
+
+    RCStatCU*      m_cuStat;
+    RCStatRow*     m_rowStat;
+    FrameStats     m_frameStats; // stats of current frame for multi-pass encodes
+
+    double         m_avgQpRc;    /* avg QP as decided by rate-control */
+    double         m_avgQpAq;    /* avg QP as decided by AQ in addition to rate-control */
+    double         m_rateFactor; /* calculated based on the Frame QP */
+
+    FrameData();
+
+    bool create(const x265_param& param, const SPS& sps);
+    void reinit(const SPS& sps);
+    void destroy();
+
+    inline CUData* getPicCTU(uint32_t ctuAddr) { return &m_picCTU[ctuAddr]; }
+};
+}
+
+#endif // ifndef X265_FRAMEDATA_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/intrapred.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,270 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+namespace {
+
+template<int tuSize>
+void intraFilter(const pixel* samples, pixel* filtered) /* 1:2:1 filtering of left and top reference samples */
+{
+    const int tuSize2 = tuSize << 1;
+
+    pixel topLeft = samples[0], topLast = samples[tuSize2], leftLast = samples[tuSize2 + tuSize2];
+
+    // filtering top
+    for (int i = 1; i < tuSize2; i++)
+        filtered[i] = ((samples[i] << 1) + samples[i - 1] + samples[i + 1] + 2) >> 2;
+    filtered[tuSize2] = topLast;
+    
+    // filtering top-left
+    filtered[0] = ((topLeft << 1) + samples[1] + samples[tuSize2 + 1] + 2) >> 2;
+
+    // filtering left
+    filtered[tuSize2 + 1] = ((samples[tuSize2 + 1] << 1) + topLeft + samples[tuSize2 + 2] + 2) >> 2;
+    for (int i = tuSize2 + 2; i < tuSize2 + tuSize2; i++)
+        filtered[i] = ((samples[i] << 1) + samples[i - 1] + samples[i + 1] + 2) >> 2;
+    filtered[tuSize2 + tuSize2] = leftLast;
+}
+
+static void dcPredFilter(const pixel* above, const pixel* left, pixel* dst, intptr_t dststride, int size)
+{
+    // boundary pixels processing
+    dst[0] = (pixel)((above[0] + left[0] + 2 * dst[0] + 2) >> 2);
+
+    for (int x = 1; x < size; x++)
+        dst[x] = (pixel)((above[x] +  3 * dst[x] + 2) >> 2);
+
+    dst += dststride;
+    for (int y = 1; y < size; y++)
+    {
+        *dst = (pixel)((left[y] + 3 * *dst + 2) >> 2);
+        dst += dststride;
+    }
+}
+
+template<int width>
+void intra_pred_dc_c(pixel* dst, intptr_t dstStride, const pixel* srcPix, int /*dirMode*/, int bFilter)
+{
+    int k, l;
+
+    int dcVal = width;
+    for (int i = 0; i < width; i++)
+        dcVal += srcPix[1 + i] + srcPix[2 * width + 1 + i];
+
+    dcVal = dcVal / (width + width);
+    for (k = 0; k < width; k++)
+        for (l = 0; l < width; l++)
+            dst[k * dstStride + l] = (pixel)dcVal;
+
+    if (bFilter)
+        dcPredFilter(srcPix + 1, srcPix + (2 * width + 1), dst, dstStride, width);
+}
+
+template<int log2Size>
+void planar_pred_c(pixel* dst, intptr_t dstStride, const pixel* srcPix, int /*dirMode*/, int /*bFilter*/)
+{
+    const int blkSize = 1 << log2Size;
+
+    const pixel* above = srcPix + 1;
+    const pixel* left  = srcPix + (2 * blkSize + 1);
+
+    pixel topRight = above[blkSize];
+    pixel bottomLeft = left[blkSize];
+    for (int y = 0; y < blkSize; y++)
+        for (int x = 0; x < blkSize; x++)
+            dst[y * dstStride + x] = (pixel) (((blkSize - 1 - x) * left[y] + (blkSize - 1 -y) * above[x] + (x + 1) * topRight + (y + 1) * bottomLeft + blkSize) >> (log2Size + 1));
+}
+
+template<int width>
+void intra_pred_ang_c(pixel* dst, intptr_t dstStride, const pixel *srcPix0, int dirMode, int bFilter)
+{
+    int width2 = width << 1;
+    // Flip the neighbours in the horizontal case.
+    int horMode = dirMode < 18;
+    pixel neighbourBuf[129];
+    const pixel *srcPix = srcPix0;
+
+    if (horMode)
+    {
+        neighbourBuf[0] = srcPix[0];
+        for (int i = 0; i < width << 1; i++)
+        {
+            neighbourBuf[1 + i] = srcPix[width2 + 1 + i];
+            neighbourBuf[width2 + 1 + i] = srcPix[1 + i];
+        }
+        srcPix = neighbourBuf;
+    }
+
+    // Intra prediction angle and inverse angle tables.
+    const int8_t angleTable[17] = { -32, -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32 };
+    const int16_t invAngleTable[8] = { 4096, 1638, 910, 630, 482, 390, 315, 256 };
+
+    // Get the prediction angle.
+    int angleOffset = horMode ? 10 - dirMode : dirMode - 26;
+    int angle = angleTable[8 + angleOffset];
+
+    // Vertical Prediction.
+    if (!angle)
+    {
+        for (int y = 0; y < width; y++)
+            for (int x = 0; x < width; x++)
+                dst[y * dstStride + x] = srcPix[1 + x];
+
+        if (bFilter)
+        {
+            int topLeft = srcPix[0], top = srcPix[1];
+            for (int y = 0; y < width; y++)
+                dst[y * dstStride] = x265_clip((int16_t)(top + ((srcPix[width2 + 1 + y] - topLeft) >> 1)));
+        }
+    }
+    else // Angular prediction.
+    {
+        // Get the reference pixels. The reference base is the first pixel to the top (neighbourBuf[1]).
+        pixel refBuf[64];
+        const pixel *ref;
+
+        // Use the projected left neighbours and the top neighbours.
+        if (angle < 0)
+        {
+            // Number of neighbours projected. 
+            int nbProjected = -((width * angle) >> 5) - 1;
+            pixel *ref_pix = refBuf + nbProjected + 1;
+
+            // Project the neighbours.
+            int invAngle = invAngleTable[- angleOffset - 1];
+            int invAngleSum = 128;
+            for (int i = 0; i < nbProjected; i++)
+            {
+                invAngleSum += invAngle;
+                ref_pix[- 2 - i] = srcPix[width2 + (invAngleSum >> 8)];
+            }
+
+            // Copy the top-left and top pixels.
+            for (int i = 0; i < width + 1; i++)
+                ref_pix[-1 + i] = srcPix[i];
+            ref = ref_pix;
+        }
+        else // Use the top and top-right neighbours.
+            ref = srcPix + 1;
+
+        // Pass every row.
+        int angleSum = 0;
+        for (int y = 0; y < width; y++)
+        {
+            angleSum += angle;
+            int offset = angleSum >> 5;
+            int fraction = angleSum & 31;
+
+            if (fraction) // Interpolate
+                for (int x = 0; x < width; x++)
+                    dst[y * dstStride + x] = (pixel)(((32 - fraction) * ref[offset + x] + fraction * ref[offset + x + 1] + 16) >> 5);
+            else // Copy.
+                for (int x = 0; x < width; x++)
+                    dst[y * dstStride + x] = ref[offset + x];
+        }
+    }
+
+    // Flip for horizontal.
+    if (horMode)
+    {
+        for (int y = 0; y < width - 1; y++)
+        {
+            for (int x = y + 1; x < width; x++)
+            {
+                pixel tmp              = dst[y * dstStride + x];
+                dst[y * dstStride + x] = dst[x * dstStride + y];
+                dst[x * dstStride + y] = tmp;
+            }
+        }
+    }
+}
+
+template<int log2Size>
+void all_angs_pred_c(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+{
+    const int size = 1 << log2Size;
+    for (int mode = 2; mode <= 34; mode++)
+    {
+        pixel *srcPix  = (g_intraFilterFlags[mode] & size ? filtPix  : refPix);
+        pixel *out = dest + ((mode - 2) << (log2Size * 2));
+
+        intra_pred_ang_c<size>(out, size, srcPix, mode, bLuma);
+
+        // Optimize code don't flip buffer
+        bool modeHor = (mode < 18);
+
+        // transpose the block if this is a horizontal mode
+        if (modeHor)
+        {
+            for (int k = 0; k < size - 1; k++)
+            {
+                for (int l = k + 1; l < size; l++)
+                {
+                    pixel tmp         = out[k * size + l];
+                    out[k * size + l] = out[l * size + k];
+                    out[l * size + k] = tmp;
+                }
+            }
+        }
+    }
+}
+}
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupIntraPrimitives_c(EncoderPrimitives& p)
+{
+    p.cu[BLOCK_4x4].intra_filter = intraFilter<4>;
+    p.cu[BLOCK_8x8].intra_filter = intraFilter<8>;
+    p.cu[BLOCK_16x16].intra_filter = intraFilter<16>;
+    p.cu[BLOCK_32x32].intra_filter = intraFilter<32>;
+
+    p.cu[BLOCK_4x4].intra_pred[PLANAR_IDX] = planar_pred_c<2>;
+    p.cu[BLOCK_8x8].intra_pred[PLANAR_IDX] = planar_pred_c<3>;
+    p.cu[BLOCK_16x16].intra_pred[PLANAR_IDX] = planar_pred_c<4>;
+    p.cu[BLOCK_32x32].intra_pred[PLANAR_IDX] = planar_pred_c<5>;
+
+    p.cu[BLOCK_4x4].intra_pred[DC_IDX] = intra_pred_dc_c<4>;
+    p.cu[BLOCK_8x8].intra_pred[DC_IDX] = intra_pred_dc_c<8>;
+    p.cu[BLOCK_16x16].intra_pred[DC_IDX] = intra_pred_dc_c<16>;
+    p.cu[BLOCK_32x32].intra_pred[DC_IDX] = intra_pred_dc_c<32>;
+
+    for (int i = 2; i < NUM_INTRA_MODE; i++)
+    {
+        p.cu[BLOCK_4x4].intra_pred[i] = intra_pred_ang_c<4>;
+        p.cu[BLOCK_8x8].intra_pred[i] = intra_pred_ang_c<8>;
+        p.cu[BLOCK_16x16].intra_pred[i] = intra_pred_ang_c<16>;
+        p.cu[BLOCK_32x32].intra_pred[i] = intra_pred_ang_c<32>;
+    }
+
+    p.cu[BLOCK_4x4].intra_pred_allangs = all_angs_pred_c<2>;
+    p.cu[BLOCK_8x8].intra_pred_allangs = all_angs_pred_c<3>;
+    p.cu[BLOCK_16x16].intra_pred_allangs = all_angs_pred_c<4>;
+    p.cu[BLOCK_32x32].intra_pred_allangs = all_angs_pred_c<5>;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/ipfilter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,520 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Devaki <deepthidevaki@multicorewareinc.com>,
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "x265.h"
+
+using namespace X265_NS;
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant, typical for templated functions
+#endif
+
+namespace {
+// file local namespace
+
+template<int width, int height>
+void filterPixelToShort_c(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride)
+{
+    int shift = IF_INTERNAL_PREC - X265_DEPTH;
+    int row, col;
+
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int16_t val = src[col] << shift;
+            dst[col] = val - (int16_t)IF_INTERNAL_OFFS;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+static void extendCURowColBorder(pixel* txt, intptr_t stride, int width, int height, int marginX)
+{
+    for (int y = 0; y < height; y++)
+    {
+#if HIGH_BIT_DEPTH
+        for (int x = 0; x < marginX; x++)
+        {
+            txt[-marginX + x] = txt[0];
+            txt[width + x] = txt[width - 1];
+        }
+
+#else
+        memset(txt - marginX, txt[0], marginX);
+        memset(txt + width, txt[width - 1], marginX);
+#endif
+
+        txt += stride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_horiz_pp_c(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx)
+{
+    const int16_t* coeff = (N == 4) ? g_chromaFilter[coeffIdx] : g_lumaFilter[coeffIdx];
+    int headRoom = IF_FILTER_PREC;
+    int offset =  (1 << (headRoom - 1));
+    uint16_t maxVal = (1 << X265_DEPTH) - 1;
+    int cStride = 1;
+
+    src -= (N / 2 - 1) * cStride;
+
+    int row, col;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * cStride] * coeff[0];
+            sum += src[col + 1 * cStride] * coeff[1];
+            sum += src[col + 2 * cStride] * coeff[2];
+            sum += src[col + 3 * cStride] * coeff[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * cStride] * coeff[4];
+                sum += src[col + 5 * cStride] * coeff[5];
+                sum += src[col + 6 * cStride] * coeff[6];
+                sum += src[col + 7 * cStride] * coeff[7];
+            }
+            int16_t val = (int16_t)((sum + offset) >> headRoom);
+
+            if (val < 0) val = 0;
+            if (val > maxVal) val = maxVal;
+            dst[col] = (pixel)val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_horiz_ps_c(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+{
+    const int16_t* coeff = (N == 4) ? g_chromaFilter[coeffIdx] : g_lumaFilter[coeffIdx];
+    int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
+    int shift = IF_FILTER_PREC - headRoom;
+    int offset = -IF_INTERNAL_OFFS << shift;
+    int blkheight = height;
+
+    src -= N / 2 - 1;
+
+    if (isRowExt)
+    {
+        src -= (N / 2 - 1) * srcStride;
+        blkheight += N - 1;
+    }
+
+    int row, col;
+    for (row = 0; row < blkheight; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0] * coeff[0];
+            sum += src[col + 1] * coeff[1];
+            sum += src[col + 2] * coeff[2];
+            sum += src[col + 3] * coeff[3];
+            if (N == 8)
+            {
+                sum += src[col + 4] * coeff[4];
+                sum += src[col + 5] * coeff[5];
+                sum += src[col + 6] * coeff[6];
+                sum += src[col + 7] * coeff[7];
+            }
+
+            int16_t val = (int16_t)((sum + offset) >> shift);
+            dst[col] = val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_vert_pp_c(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx)
+{
+    const int16_t* c = (N == 4) ? g_chromaFilter[coeffIdx] : g_lumaFilter[coeffIdx];
+    int shift = IF_FILTER_PREC;
+    int offset = 1 << (shift - 1);
+    uint16_t maxVal = (1 << X265_DEPTH) - 1;
+
+    src -= (N / 2 - 1) * srcStride;
+
+    int row, col;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * srcStride] * c[0];
+            sum += src[col + 1 * srcStride] * c[1];
+            sum += src[col + 2 * srcStride] * c[2];
+            sum += src[col + 3 * srcStride] * c[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * srcStride] * c[4];
+                sum += src[col + 5 * srcStride] * c[5];
+                sum += src[col + 6 * srcStride] * c[6];
+                sum += src[col + 7 * srcStride] * c[7];
+            }
+
+            int16_t val = (int16_t)((sum + offset) >> shift);
+            val = (val < 0) ? 0 : val;
+            val = (val > maxVal) ? maxVal : val;
+
+            dst[col] = (pixel)val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_vert_ps_c(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx)
+{
+    const int16_t* c = (N == 4) ? g_chromaFilter[coeffIdx] : g_lumaFilter[coeffIdx];
+    int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
+    int shift = IF_FILTER_PREC - headRoom;
+    int offset = -IF_INTERNAL_OFFS << shift;
+
+    src -= (N / 2 - 1) * srcStride;
+
+    int row, col;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * srcStride] * c[0];
+            sum += src[col + 1 * srcStride] * c[1];
+            sum += src[col + 2 * srcStride] * c[2];
+            sum += src[col + 3 * srcStride] * c[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * srcStride] * c[4];
+                sum += src[col + 5 * srcStride] * c[5];
+                sum += src[col + 6 * srcStride] * c[6];
+                sum += src[col + 7 * srcStride] * c[7];
+            }
+
+            int16_t val = (int16_t)((sum + offset) >> shift);
+            dst[col] = val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_vert_sp_c(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx)
+{
+    int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
+    int shift = IF_FILTER_PREC + headRoom;
+    int offset = (1 << (shift - 1)) + (IF_INTERNAL_OFFS << IF_FILTER_PREC);
+    uint16_t maxVal = (1 << X265_DEPTH) - 1;
+    const int16_t* coeff = (N == 8 ? g_lumaFilter[coeffIdx] : g_chromaFilter[coeffIdx]);
+
+    src -= (N / 2 - 1) * srcStride;
+
+    int row, col;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * srcStride] * coeff[0];
+            sum += src[col + 1 * srcStride] * coeff[1];
+            sum += src[col + 2 * srcStride] * coeff[2];
+            sum += src[col + 3 * srcStride] * coeff[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * srcStride] * coeff[4];
+                sum += src[col + 5 * srcStride] * coeff[5];
+                sum += src[col + 6 * srcStride] * coeff[6];
+                sum += src[col + 7 * srcStride] * coeff[7];
+            }
+
+            int16_t val = (int16_t)((sum + offset) >> shift);
+
+            val = (val < 0) ? 0 : val;
+            val = (val > maxVal) ? maxVal : val;
+
+            dst[col] = (pixel)val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_vert_ss_c(const int16_t* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx)
+{
+    const int16_t* c = (N == 8 ? g_lumaFilter[coeffIdx] : g_chromaFilter[coeffIdx]);
+    int shift = IF_FILTER_PREC;
+    int row, col;
+
+    src -= (N / 2 - 1) * srcStride;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * srcStride] * c[0];
+            sum += src[col + 1 * srcStride] * c[1];
+            sum += src[col + 2 * srcStride] * c[2];
+            sum += src[col + 3 * srcStride] * c[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * srcStride] * c[4];
+                sum += src[col + 5 * srcStride] * c[5];
+                sum += src[col + 6 * srcStride] * c[6];
+                sum += src[col + 7 * srcStride] * c[7];
+            }
+
+            int16_t val = (int16_t)((sum) >> shift);
+            dst[col] = val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N>
+void filterVertical_sp_c(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int coeffIdx)
+{
+    int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
+    int shift = IF_FILTER_PREC + headRoom;
+    int offset = (1 << (shift - 1)) + (IF_INTERNAL_OFFS << IF_FILTER_PREC);
+    uint16_t maxVal = (1 << X265_DEPTH) - 1;
+    const int16_t* coeff = (N == 8 ? g_lumaFilter[coeffIdx] : g_chromaFilter[coeffIdx]);
+
+    src -= (N / 2 - 1) * srcStride;
+
+    int row, col;
+    for (row = 0; row < height; row++)
+    {
+        for (col = 0; col < width; col++)
+        {
+            int sum;
+
+            sum  = src[col + 0 * srcStride] * coeff[0];
+            sum += src[col + 1 * srcStride] * coeff[1];
+            sum += src[col + 2 * srcStride] * coeff[2];
+            sum += src[col + 3 * srcStride] * coeff[3];
+            if (N == 8)
+            {
+                sum += src[col + 4 * srcStride] * coeff[4];
+                sum += src[col + 5 * srcStride] * coeff[5];
+                sum += src[col + 6 * srcStride] * coeff[6];
+                sum += src[col + 7 * srcStride] * coeff[7];
+            }
+
+            int16_t val = (int16_t)((sum + offset) >> shift);
+
+            val = (val < 0) ? 0 : val;
+            val = (val > maxVal) ? maxVal : val;
+
+            dst[col] = (pixel)val;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+template<int N, int width, int height>
+void interp_hv_pp_c(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int idxX, int idxY)
+{
+    short immedVals[(64 + 8) * (64 + 8)];
+
+    interp_horiz_ps_c<N, width, height>(src, srcStride, immedVals, width, idxX, 1);
+    filterVertical_sp_c<N>(immedVals + 3 * width, width, dst, dstStride, width, height, idxY);
+}
+}
+
+namespace X265_NS {
+// x265 private namespace
+
+#define CHROMA_420(W, H) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_hpp = interp_horiz_pp_c<4, W, H>; \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_hps = interp_horiz_ps_c<4, W, H>; \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vpp = interp_vert_pp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vps = interp_vert_ps_c<4, W, H>;  \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vsp = interp_vert_sp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vss = interp_vert_ss_c<4, W, H>; \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].p2s = filterPixelToShort_c<W, H>;
+
+#define CHROMA_422(W, H) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_hpp = interp_horiz_pp_c<4, W, H>; \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_hps = interp_horiz_ps_c<4, W, H>; \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vpp = interp_vert_pp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vps = interp_vert_ps_c<4, W, H>;  \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vsp = interp_vert_sp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vss = interp_vert_ss_c<4, W, H>; \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].p2s = filterPixelToShort_c<W, H>;
+
+#define CHROMA_444(W, H) \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_hpp = interp_horiz_pp_c<4, W, H>; \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_hps = interp_horiz_ps_c<4, W, H>; \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_vpp = interp_vert_pp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_vps = interp_vert_ps_c<4, W, H>;  \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_vsp = interp_vert_sp_c<4, W, H>;  \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_vss = interp_vert_ss_c<4, W, H>; \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].p2s = filterPixelToShort_c<W, H>;
+
+#define LUMA(W, H) \
+    p.pu[LUMA_ ## W ## x ## H].luma_hpp     = interp_horiz_pp_c<8, W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].luma_hps     = interp_horiz_ps_c<8, W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].luma_vpp     = interp_vert_pp_c<8, W, H>;  \
+    p.pu[LUMA_ ## W ## x ## H].luma_vps     = interp_vert_ps_c<8, W, H>;  \
+    p.pu[LUMA_ ## W ## x ## H].luma_vsp     = interp_vert_sp_c<8, W, H>;  \
+    p.pu[LUMA_ ## W ## x ## H].luma_vss     = interp_vert_ss_c<8, W, H>;  \
+    p.pu[LUMA_ ## W ## x ## H].luma_hvpp    = interp_hv_pp_c<8, W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].convert_p2s = filterPixelToShort_c<W, H>;
+
+void setupFilterPrimitives_c(EncoderPrimitives& p)
+{
+    LUMA(4, 4);
+    LUMA(8, 8);
+    CHROMA_420(4,  4);
+    LUMA(4, 8);
+    CHROMA_420(2,  4);
+    LUMA(8, 4);
+    CHROMA_420(4,  2);
+    LUMA(16, 16);
+    CHROMA_420(8,  8);
+    LUMA(16,  8);
+    CHROMA_420(8,  4);
+    LUMA(8, 16);
+    CHROMA_420(4,  8);
+    LUMA(16, 12);
+    CHROMA_420(8,  6);
+    LUMA(12, 16);
+    CHROMA_420(6,  8);
+    LUMA(16,  4);
+    CHROMA_420(8,  2);
+    LUMA(4, 16);
+    CHROMA_420(2,  8);
+    LUMA(32, 32);
+    CHROMA_420(16, 16);
+    LUMA(32, 16);
+    CHROMA_420(16, 8);
+    LUMA(16, 32);
+    CHROMA_420(8,  16);
+    LUMA(32, 24);
+    CHROMA_420(16, 12);
+    LUMA(24, 32);
+    CHROMA_420(12, 16);
+    LUMA(32,  8);
+    CHROMA_420(16, 4);
+    LUMA(8, 32);
+    CHROMA_420(4,  16);
+    LUMA(64, 64);
+    CHROMA_420(32, 32);
+    LUMA(64, 32);
+    CHROMA_420(32, 16);
+    LUMA(32, 64);
+    CHROMA_420(16, 32);
+    LUMA(64, 48);
+    CHROMA_420(32, 24);
+    LUMA(48, 64);
+    CHROMA_420(24, 32);
+    LUMA(64, 16);
+    CHROMA_420(32, 8);
+    LUMA(16, 64);
+    CHROMA_420(8,  32);
+
+    CHROMA_422(4, 8);
+    CHROMA_422(4, 4);
+    CHROMA_422(2, 4);
+    CHROMA_422(2, 8);
+    CHROMA_422(8,  16);
+    CHROMA_422(8,  8);
+    CHROMA_422(4,  16);
+    CHROMA_422(8,  12);
+    CHROMA_422(6,  16);
+    CHROMA_422(8,  4);
+    CHROMA_422(2,  16);
+    CHROMA_422(16, 32);
+    CHROMA_422(16, 16);
+    CHROMA_422(8,  32);
+    CHROMA_422(16, 24);
+    CHROMA_422(12, 32);
+    CHROMA_422(16, 8);
+    CHROMA_422(4,  32);
+    CHROMA_422(32, 64);
+    CHROMA_422(32, 32);
+    CHROMA_422(16, 64);
+    CHROMA_422(32, 48);
+    CHROMA_422(24, 64);
+    CHROMA_422(32, 16);
+    CHROMA_422(8,  64);
+
+    CHROMA_444(4,  4);
+    CHROMA_444(8,  8);
+    CHROMA_444(4,  8);
+    CHROMA_444(8,  4);
+    CHROMA_444(16, 16);
+    CHROMA_444(16, 8);
+    CHROMA_444(8,  16);
+    CHROMA_444(16, 12);
+    CHROMA_444(12, 16);
+    CHROMA_444(16, 4);
+    CHROMA_444(4,  16);
+    CHROMA_444(32, 32);
+    CHROMA_444(32, 16);
+    CHROMA_444(16, 32);
+    CHROMA_444(32, 24);
+    CHROMA_444(24, 32);
+    CHROMA_444(32, 8);
+    CHROMA_444(8,  32);
+    CHROMA_444(64, 64);
+    CHROMA_444(64, 32);
+    CHROMA_444(32, 64);
+    CHROMA_444(64, 48);
+    CHROMA_444(48, 64);
+    CHROMA_444(64, 16);
+    CHROMA_444(16, 64);
+
+    p.extendRowBorder = extendCURowColBorder;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/loopfilter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,154 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+*          Dnyaneshwar Gorade <dnyaneshwar@multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+#define PIXEL_MIN 0
+#define PIXEL_MAX ((1 << X265_DEPTH) - 1)
+
+namespace {
+
+/* get the sign of input variable (TODO: this is a dup, make common) */
+inline int8_t signOf(int x)
+{
+    return (x >> 31) | ((int)((((uint32_t)-x)) >> 31));
+}
+
+static void calSign(int8_t *dst, const pixel *src1, const pixel *src2, const int endX)
+{
+    for (int x = 0; x < endX; x++)
+        dst[x] = signOf(src1[x] - src2[x]);
+}
+
+static void processSaoCUE0(pixel * rec, int8_t * offsetEo, int width, int8_t* signLeft, intptr_t stride)
+{
+    int x, y;
+    int8_t signRight, signLeft0;
+    int8_t edgeType;
+
+    for (y = 0; y < 2; y++)
+    {
+        signLeft0 = signLeft[y];
+        for (x = 0; x < width; x++)
+        {
+            signRight = ((rec[x] - rec[x + 1]) < 0) ? -1 : ((rec[x] - rec[x + 1]) > 0) ? 1 : 0;
+            edgeType = signRight + signLeft0 + 2;
+            signLeft0 = -signRight;
+            rec[x] = x265_clip(rec[x] + offsetEo[edgeType]);
+        }
+        rec += stride;
+    }
+}
+
+static void processSaoCUE1(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width)
+{
+    int x;
+    int8_t signDown;
+    int edgeType;
+
+    for (x = 0; x < width; x++)
+    {
+        signDown = signOf(rec[x] - rec[x + stride]);
+        edgeType = signDown + upBuff1[x] + 2;
+        upBuff1[x] = -signDown;
+        rec[x] = x265_clip(rec[x] + offsetEo[edgeType]);
+    }
+}
+
+static void processSaoCUE1_2Rows(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width)
+{
+    int x, y;
+    int8_t signDown;
+    int edgeType;
+
+    for (y = 0; y < 2; y++)
+    {
+        for (x = 0; x < width; x++)
+        {
+            signDown = signOf(rec[x] - rec[x + stride]);
+            edgeType = signDown + upBuff1[x] + 2;
+            upBuff1[x] = -signDown;
+            rec[x] = x265_clip(rec[x] + offsetEo[edgeType]);
+        }
+        rec += stride;
+    }
+}
+
+static void processSaoCUE2(pixel * rec, int8_t * bufft, int8_t * buff1, int8_t * offsetEo, int width, intptr_t stride)
+{
+    int x;
+    for (x = 0; x < width; x++)
+    {
+        int8_t signDown = signOf(rec[x] - rec[x + stride + 1]);
+        int edgeType = signDown + buff1[x] + 2;
+        bufft[x + 1] = -signDown;
+        rec[x] = x265_clip(rec[x] + offsetEo[edgeType]);;
+    }
+}
+
+static void processSaoCUE3(pixel *rec, int8_t *upBuff1, int8_t *offsetEo, intptr_t stride, int startX, int endX)
+{
+    int8_t signDown;
+    int8_t edgeType;
+
+    for (int x = startX + 1; x < endX; x++)
+    {
+        signDown = signOf(rec[x] - rec[x + stride]);
+        edgeType = signDown + upBuff1[x] + 2;
+        upBuff1[x - 1] = -signDown;
+        rec[x] = x265_clip(rec[x] + offsetEo[edgeType]);
+    }
+}
+
+static void processSaoCUB0(pixel* rec, const int8_t* offset, int ctuWidth, int ctuHeight, intptr_t stride)
+{
+    #define SAO_BO_BITS 5
+    const int boShift = X265_DEPTH - SAO_BO_BITS;
+    int x, y;
+    for (y = 0; y < ctuHeight; y++)
+    {
+        for (x = 0; x < ctuWidth; x++)
+        {
+            rec[x] = x265_clip(rec[x] + offset[rec[x] >> boShift]);
+        }
+        rec += stride;
+    }
+}
+}
+
+namespace X265_NS {
+void setupLoopFilterPrimitives_c(EncoderPrimitives &p)
+{
+    p.saoCuOrgE0 = processSaoCUE0;
+    p.saoCuOrgE1 = processSaoCUE1;
+    p.saoCuOrgE1_2Rows = processSaoCUE1_2Rows;
+    p.saoCuOrgE2[0] = processSaoCUE2;
+    p.saoCuOrgE2[1] = processSaoCUE2;
+    p.saoCuOrgE3[0] = processSaoCUE3;
+    p.saoCuOrgE3[1] = processSaoCUE3;
+    p.saoCuOrgB0 = processSaoCUB0;
+    p.sign = calSign;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/lowres.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,165 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "picyuv.h"
+#include "lowres.h"
+#include "mv.h"
+
+using namespace X265_NS;
+
+bool Lowres::create(PicYuv *origPic, int _bframes, bool bAQEnabled)
+{
+    isLowres = true;
+    bframes = _bframes;
+    width = origPic->m_picWidth / 2;
+    lines = origPic->m_picHeight / 2;
+    lumaStride = width + 2 * origPic->m_lumaMarginX;
+    if (lumaStride & 31)
+        lumaStride += 32 - (lumaStride & 31);
+    maxBlocksInRow = (width + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    maxBlocksInCol = (lines + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    int cuCount = maxBlocksInRow * maxBlocksInCol;
+
+    /* rounding the width to multiple of lowres CU size */
+    width = maxBlocksInRow * X265_LOWRES_CU_SIZE;
+    lines = maxBlocksInCol * X265_LOWRES_CU_SIZE;
+
+    size_t planesize = lumaStride * (lines + 2 * origPic->m_lumaMarginY);
+    size_t padoffset = lumaStride * origPic->m_lumaMarginY + origPic->m_lumaMarginX;
+
+    if (bAQEnabled)
+    {
+        CHECKED_MALLOC(qpAqOffset, double, cuCount);
+        CHECKED_MALLOC(invQscaleFactor, int, cuCount);
+        CHECKED_MALLOC(qpCuTreeOffset, double, cuCount);
+    }
+    CHECKED_MALLOC(propagateCost, uint16_t, cuCount);
+
+    /* allocate lowres buffers */
+    CHECKED_MALLOC_ZERO(buffer[0], pixel, 4 * planesize);
+
+    buffer[1] = buffer[0] + planesize;
+    buffer[2] = buffer[1] + planesize;
+    buffer[3] = buffer[2] + planesize;
+
+    lowresPlane[0] = buffer[0] + padoffset;
+    lowresPlane[1] = buffer[1] + padoffset;
+    lowresPlane[2] = buffer[2] + padoffset;
+    lowresPlane[3] = buffer[3] + padoffset;
+
+    CHECKED_MALLOC(intraCost, int32_t, cuCount);
+    CHECKED_MALLOC(intraMode, uint8_t, cuCount);
+
+    for (int i = 0; i < bframes + 2; i++)
+    {
+        for (int j = 0; j < bframes + 2; j++)
+        {
+            CHECKED_MALLOC(rowSatds[i][j], int32_t, maxBlocksInCol);
+            CHECKED_MALLOC(lowresCosts[i][j], uint16_t, cuCount);
+        }
+    }
+
+    for (int i = 0; i < bframes + 1; i++)
+    {
+        CHECKED_MALLOC(lowresMvs[0][i], MV, cuCount);
+        CHECKED_MALLOC(lowresMvs[1][i], MV, cuCount);
+        CHECKED_MALLOC(lowresMvCosts[0][i], int32_t, cuCount);
+        CHECKED_MALLOC(lowresMvCosts[1][i], int32_t, cuCount);
+    }
+
+    return true;
+
+fail:
+    return false;
+}
+
+void Lowres::destroy()
+{
+    X265_FREE(buffer[0]);
+    X265_FREE(intraCost);
+    X265_FREE(intraMode);
+
+    for (int i = 0; i < bframes + 2; i++)
+    {
+        for (int j = 0; j < bframes + 2; j++)
+        {
+            X265_FREE(rowSatds[i][j]);
+            X265_FREE(lowresCosts[i][j]);
+        }
+    }
+
+    for (int i = 0; i < bframes + 1; i++)
+    {
+        X265_FREE(lowresMvs[0][i]);
+        X265_FREE(lowresMvs[1][i]);
+        X265_FREE(lowresMvCosts[0][i]);
+        X265_FREE(lowresMvCosts[1][i]);
+    }
+
+    X265_FREE(qpAqOffset);
+    X265_FREE(invQscaleFactor);
+    X265_FREE(qpCuTreeOffset);
+    X265_FREE(propagateCost);
+}
+
+// (re) initialize lowres state
+void Lowres::init(PicYuv *origPic, int poc)
+{
+    bLastMiniGopBFrame = false;
+    bScenecut = false;  // could be a scene-cut, until ruled out by flash detection
+    bKeyframe = false; // Not a keyframe unless identified by lookahead
+    frameNum = poc;
+    leadingBframes = 0;
+    indB = 0;
+    satdCost = (int64_t)-1;
+    memset(costEst, -1, sizeof(costEst));
+    memset(weightedCostDelta, 0, sizeof(weightedCostDelta));
+
+    if (qpAqOffset && invQscaleFactor)
+        memset(costEstAq, -1, sizeof(costEstAq));
+
+    for (int y = 0; y < bframes + 2; y++)
+        for (int x = 0; x < bframes + 2; x++)
+            rowSatds[y][x][0] = -1;
+
+    for (int i = 0; i < bframes + 1; i++)
+    {
+        lowresMvs[0][i][0].x = 0x7FFF;
+        lowresMvs[1][i][0].x = 0x7FFF;
+    }
+
+    for (int i = 0; i < bframes + 2; i++)
+        intraMbs[i] = 0;
+
+    /* downscale and generate 4 hpel planes for lookahead */
+    primitives.frameInitLowres(origPic->m_picOrg[0],
+                               lowresPlane[0], lowresPlane[1], lowresPlane[2], lowresPlane[3],
+                               origPic->m_stride, lumaStride, width, lines);
+
+    /* extend hpel planes for motion search */
+    extendPicBorder(lowresPlane[0], lumaStride, width, lines, origPic->m_lumaMarginX, origPic->m_lumaMarginY);
+    extendPicBorder(lowresPlane[1], lumaStride, width, lines, origPic->m_lumaMarginX, origPic->m_lumaMarginY);
+    extendPicBorder(lowresPlane[2], lumaStride, width, lines, origPic->m_lumaMarginX, origPic->m_lumaMarginY);
+    extendPicBorder(lowresPlane[3], lumaStride, width, lines, origPic->m_lumaMarginX, origPic->m_lumaMarginY);
+    fpelPlane[0] = lowresPlane[0];
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/lowres.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,159 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_LOWRES_H
+#define X265_LOWRES_H
+
+#include "primitives.h"
+#include "common.h"
+#include "picyuv.h"
+#include "mv.h"
+
+namespace X265_NS {
+// private namespace
+
+struct ReferencePlanes
+{
+    ReferencePlanes() { memset(this, 0, sizeof(ReferencePlanes)); }
+
+    pixel*   fpelPlane[3];
+    pixel*   lowresPlane[4];
+    PicYuv*  reconPic;
+
+    bool     isWeighted;
+    bool     isLowres;
+
+    intptr_t lumaStride;
+    intptr_t chromaStride;
+
+    struct {
+        int      weight;
+        int      offset;
+        int      shift;
+        int      round;
+    } w[3];
+
+    pixel* getLumaAddr(uint32_t ctuAddr, uint32_t absPartIdx) { return fpelPlane[0] + reconPic->m_cuOffsetY[ctuAddr] + reconPic->m_buOffsetY[absPartIdx]; }
+    pixel* getCbAddr(uint32_t ctuAddr, uint32_t absPartIdx)   { return fpelPlane[1] + reconPic->m_cuOffsetC[ctuAddr] + reconPic->m_buOffsetC[absPartIdx]; }
+    pixel* getCrAddr(uint32_t ctuAddr, uint32_t absPartIdx)   { return fpelPlane[2] + reconPic->m_cuOffsetC[ctuAddr] + reconPic->m_buOffsetC[absPartIdx]; }
+
+    /* lowres motion compensation, you must provide a buffer and stride for QPEL averaged pixels
+     * in case QPEL is required.  Else it returns a pointer to the HPEL pixels */
+    inline pixel *lowresMC(intptr_t blockOffset, const MV& qmv, pixel *buf, intptr_t& outstride)
+    {
+        if ((qmv.x | qmv.y) & 1)
+        {
+            int hpelA = (qmv.y & 2) | ((qmv.x & 2) >> 1);
+            pixel *frefA = lowresPlane[hpelA] + blockOffset + (qmv.x >> 2) + (qmv.y >> 2) * lumaStride;
+            int qmvx = qmv.x + (qmv.x & 1);
+            int qmvy = qmv.y + (qmv.y & 1);
+            int hpelB = (qmvy & 2) | ((qmvx & 2) >> 1);
+            pixel *frefB = lowresPlane[hpelB] + blockOffset + (qmvx >> 2) + (qmvy >> 2) * lumaStride;
+            primitives.pu[LUMA_8x8].pixelavg_pp(buf, outstride, frefA, lumaStride, frefB, lumaStride, 32);
+            return buf;
+        }
+        else
+        {
+            outstride = lumaStride;
+            int hpel = (qmv.y & 2) | ((qmv.x & 2) >> 1);
+            return lowresPlane[hpel] + blockOffset + (qmv.x >> 2) + (qmv.y >> 2) * lumaStride;
+        }
+    }
+
+    inline int lowresQPelCost(pixel *fenc, intptr_t blockOffset, const MV& qmv, pixelcmp_t comp)
+    {
+        if ((qmv.x | qmv.y) & 1)
+        {
+            ALIGN_VAR_16(pixel, subpelbuf[8 * 8]);
+            int hpelA = (qmv.y & 2) | ((qmv.x & 2) >> 1);
+            pixel *frefA = lowresPlane[hpelA] + blockOffset + (qmv.x >> 2) + (qmv.y >> 2) * lumaStride;
+            int qmvx = qmv.x + (qmv.x & 1);
+            int qmvy = qmv.y + (qmv.y & 1);
+            int hpelB = (qmvy & 2) | ((qmvx & 2) >> 1);
+            pixel *frefB = lowresPlane[hpelB] + blockOffset + (qmvx >> 2) + (qmvy >> 2) * lumaStride;
+            primitives.pu[LUMA_8x8].pixelavg_pp(subpelbuf, 8, frefA, lumaStride, frefB, lumaStride, 32);
+            return comp(fenc, FENC_STRIDE, subpelbuf, 8);
+        }
+        else
+        {
+            int hpel = (qmv.y & 2) | ((qmv.x & 2) >> 1);
+            pixel *fref = lowresPlane[hpel] + blockOffset + (qmv.x >> 2) + (qmv.y >> 2) * lumaStride;
+            return comp(fenc, FENC_STRIDE, fref, lumaStride);
+        }
+    }
+};
+
+/* lowres buffers, sizes and strides */
+struct Lowres : public ReferencePlanes
+{
+    pixel *buffer[4];
+
+    int    frameNum;         // Presentation frame number
+    int    sliceType;        // Slice type decided by lookahead
+    int    width;            // width of lowres frame in pixels
+    int    lines;            // height of lowres frame in pixel lines
+    int    leadingBframes;   // number of leading B frames for P or I
+
+    bool   bScenecut;        // Set to false if the frame cannot possibly be part of a real scenecut.
+    bool   bKeyframe;
+    bool   bLastMiniGopBFrame;
+
+    /* lookahead output data */
+    int64_t   costEst[X265_BFRAME_MAX + 2][X265_BFRAME_MAX + 2];
+    int64_t   costEstAq[X265_BFRAME_MAX + 2][X265_BFRAME_MAX + 2];
+    int32_t*  rowSatds[X265_BFRAME_MAX + 2][X265_BFRAME_MAX + 2];
+    int       intraMbs[X265_BFRAME_MAX + 2];
+    int32_t*  intraCost;
+    uint8_t*  intraMode;
+    int64_t   satdCost;
+    uint16_t* lowresCostForRc;
+    uint16_t(*lowresCosts[X265_BFRAME_MAX + 2][X265_BFRAME_MAX + 2]);
+    int32_t*  lowresMvCosts[2][X265_BFRAME_MAX + 1];
+    MV*       lowresMvs[2][X265_BFRAME_MAX + 1];
+    uint32_t  maxBlocksInRow;
+    uint32_t  maxBlocksInCol;
+
+    /* used for vbvLookahead */
+    int       plannedType[X265_LOOKAHEAD_MAX + 1];
+    int64_t   plannedSatd[X265_LOOKAHEAD_MAX + 1];
+    int       indB;
+    int       bframes;
+
+    /* rate control / adaptive quant data */
+    double*   qpAqOffset;      // AQ QP offset values for each 16x16 CU
+    double*   qpCuTreeOffset;  // cuTree QP offset values for each 16x16 CU
+    int*      invQscaleFactor; // qScale values for qp Aq Offsets
+    uint64_t  wp_ssd[3];       // This is different than SSDY, this is sum(pixel^2) - sum(pixel)^2 for entire frame
+    uint64_t  wp_sum[3];
+
+    /* cutree intermediate data */
+    uint16_t* propagateCost;
+    double    weightedCostDelta[X265_BFRAME_MAX + 2];
+
+    bool create(PicYuv *origPic, int _bframes, bool bAqEnabled);
+    void destroy();
+    void init(PicYuv *origPic, int poc);
+};
+}
+
+#endif // ifndef X265_LOWRES_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/md5.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,268 @@
+/*****************************************************************************
+ * md5.cpp: Calculate MD5 for SEI
+ *****************************************************************************
+ * Copyright (C) 2011-2012 x265 project
+ *
+ * Authors: Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at chenm003@163.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "md5.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+#ifndef ARCH_BIG_ENDIAN
+#define byteReverse(buf, len)   /* Nothing */
+#else
+static void byteReverse(uint8_t_t *buf, unsigned int nSize)
+{
+    int i;
+    uint32_t tmp;
+
+    for (i = 0; i < nSize; i++)
+    {
+        tmp = ((unsigned int)buf[3] << 8 | buf[2]) << 16 |
+            ((unsigned int)buf[1] << 8 | buf[0]);
+        *(uint32_t*)buf = tmp;
+        buf += 4;
+    }
+}
+
+#endif // ifndef ARCH_BIG_ENDIAN
+
+void MD5Transform(uint32_t *buf, uint32_t *in);
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void MD5Init(MD5Context *ctx)
+{
+    ctx->buf[0] = 0x67452301;
+    ctx->buf[1] = 0xefcdab89;
+    ctx->buf[2] = 0x98badcfe;
+    ctx->buf[3] = 0x10325476;
+
+    ctx->bits[0] = 0;
+    ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void MD5Update(MD5Context *ctx, uint8_t *buf, uint32_t len)
+{
+    uint32_t t;
+
+    /* Update bitcount */
+
+    t = ctx->bits[0];
+    if ((ctx->bits[0] = t + ((uint32_t)len << 3)) < t)
+        ctx->bits[1]++; /* Carry from low to high */
+    ctx->bits[1] += len >> 29;
+
+    t = (t >> 3) & 0x3F;        /* Bytes already in shsInfo->data */
+
+    /* Handle any leading odd-sized chunks */
+
+    if (t)
+    {
+        uint8_t *p = (uint8_t*)ctx->in + t;
+
+        t = 64 - t;
+        if (len < t)
+        {
+            memcpy(p, buf, len);
+            return;
+        }
+        memcpy(p, buf, t);
+        byteReverse(ctx->in, 16);
+        MD5Transform(ctx->buf, (uint32_t*)ctx->in);
+        buf += t;
+        len -= t;
+    }
+    /* Process data in 64-byte chunks */
+
+    while (len >= 64)
+    {
+        memcpy(ctx->in, buf, 64);
+        byteReverse(ctx->in, 16);
+        MD5Transform(ctx->buf, (uint32_t*)ctx->in);
+        buf += 64;
+        len -= 64;
+    }
+
+    /* Handle any remaining bytes of data. */
+
+    memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void MD5Final(MD5Context *ctx, uint8_t *digest)
+{
+    uint32_t count;
+    uint8_t  *p;
+
+    /* Compute number of bytes mod 64 */
+    count = (ctx->bits[0] >> 3) & 0x3F;
+
+    /* Set the first char of padding to 0x80.  This is safe since there is
+       always at least one byte free */
+    p = ctx->in + count;
+    *p++ = 0x80;
+
+    /* Bytes of padding needed to make 64 bytes */
+    count = 64 - 1 - count;
+
+    /* Pad out to 56 mod 64 */
+    if (count < 8)
+    {
+        /* Two lots of padding:  Pad the first block to 64 bytes */
+        memset(p, 0, count);
+        byteReverse(ctx->in, 16);
+        MD5Transform(ctx->buf, (uint32_t*)ctx->in);
+
+        /* Now fill the next block with 56 bytes */
+        memset(ctx->in, 0, 56);
+    }
+    else
+    {
+        /* Pad block to 56 bytes */
+        memset(p, 0, count - 8);
+    }
+    byteReverse(ctx->in, 14);
+
+    /* Append length in bits and transform */
+    // CHECK_ME: Always use 32-bits operator
+    uint32_t *table = (uint32_t*)&ctx->in;
+    table[14] = ctx->bits[0];
+    table[15] = ctx->bits[1];
+
+    MD5Transform(ctx->buf, (uint32_t*)ctx->in);
+    byteReverse((uint8_t*)ctx->buf, 4);
+    memcpy(digest, ctx->buf, 16);
+
+    memset(ctx, 0, sizeof(*ctx));        /* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+    (w += f(x, y, z) + data,  w = w << s | w >> (32 - s),  w += x)
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+void MD5Transform(uint32_t *buf, uint32_t *in)
+{
+    register uint32_t a, b, c, d;
+
+    a = buf[0];
+    b = buf[1];
+    c = buf[2];
+    d = buf[3];
+
+    MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+    MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+    MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+    MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+    MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+    MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+    MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+    MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+    MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+    MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+    MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+    MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+    MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+    MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+    MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+    MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+    MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+    MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+    MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+    MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+    MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+    MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+    MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+    MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+    MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+    MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+    MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+    MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+    MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+    MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+    MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+    MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+    MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+    MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+    MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+    MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+    MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+    MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+    MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+    MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+    MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+    MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+    MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+    MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+    MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+    MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+    MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+    MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+    MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+    MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+    MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+    MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+    MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+    MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+    MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+    MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+    MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+    MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+    MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+    MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+    MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+    MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+    MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+    MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+    buf[0] += a;
+    buf[1] += b;
+    buf[2] += c;
+    buf[3] += d;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/md5.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,79 @@
+/*****************************************************************************
+ * md5.h: Calculate MD5
+ *****************************************************************************
+ * Copyright (C) 2011-2012 x265 project
+ *
+ * Authors: Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at chenm003@163.com.
+ *****************************************************************************/
+
+#ifndef X265_MD5_H
+#define X265_MD5_H
+
+#include "common.h"
+
+namespace X265_NS {
+//private x265 namespace
+
+typedef struct MD5Context
+{
+    uint32_t buf[4];
+    uint32_t bits[2];
+    unsigned char in[64];
+} MD5Context;
+
+void MD5Init(MD5Context *context);
+void MD5Update(MD5Context *context, unsigned char *buf, uint32_t len);
+void MD5Final(MD5Context *ctx, uint8_t *digest);
+
+class MD5
+{
+public:
+
+    /**
+     * initialize digest state
+     */
+    MD5()
+    {
+        MD5Init(&m_state);
+    }
+
+    /**
+     * compute digest over buf of length len.
+     * multiple calls may extend the digest over more data.
+     */
+    void update(unsigned char *buf, unsigned len)
+    {
+        MD5Update(&m_state, buf, len);
+    }
+
+    /**
+     * flush any outstanding MD5 data, write the digest into digest.
+     */
+    void finalize(unsigned char digest[16])
+    {
+        MD5Final(&m_state, digest);
+    }
+
+private:
+
+    MD5Context m_state;
+};
+}
+
+#endif // ifndef X265_MD5_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/mv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,111 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_MV_H
+#define X265_MV_H
+
+#include "common.h"
+#include "primitives.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+#if _MSC_VER
+#pragma warning(disable: 4201) // non-standard extension used (nameless struct/union)
+#endif
+
+struct MV
+{
+public:
+
+    union {
+        struct { int16_t x, y; };
+
+        int32_t word;
+    };
+
+    MV()                                       {}
+    MV(int32_t w) : word(w)                    {}
+    MV(int16_t _x, int16_t _y) : x(_x), y(_y)  {}
+
+    MV& operator =(uint32_t w)                 { word = w; return *this; }
+
+    MV& operator +=(const MV& other)           { x += other.x; y += other.y; return *this; }
+
+    MV& operator -=(const MV& other)           { x -= other.x; y -= other.y; return *this; }
+
+    MV& operator >>=(int i)                    { x >>= i; y >>= i; return *this; }
+
+#if USING_FTRAPV
+    /* avoid signed left-shifts when -ftrapv is enabled */
+    MV& operator <<=(int i)                    { x *= (1 << i); y *= (1 << i); return *this; }
+    MV operator <<(int i) const                { return MV(x * (1 << i), y * (1 << i)); }
+#else
+    MV& operator <<=(int i)                    { x <<= i; y <<= i; return *this; }
+    MV operator <<(int i) const                { return MV(x << i, y << i); }
+#endif
+
+    MV operator >>(int i) const                { return MV(x >> i, y >> i); }
+
+    MV operator *(int16_t i) const             { return MV(x * i, y * i); }
+
+    MV operator -(const MV& other) const       { return MV(x - other.x, y - other.y); }
+
+    MV operator +(const MV& other) const       { return MV(x + other.x, y + other.y); }
+
+    bool operator ==(const MV& other) const    { return word == other.word; }
+
+    bool operator !=(const MV& other) const    { return word != other.word; }
+
+    bool operator !() const                    { return !word; }
+
+    // Scale down a QPEL mv to FPEL mv, rounding up by one HPEL offset
+    MV roundToFPel() const                     { return MV((x + 2) >> 2, (y + 2) >> 2); }
+
+    // Scale up an FPEL mv to QPEL by shifting up two bits
+    MV toQPel() const                          { return *this << 2; }
+
+    bool inline notZero() const                { return this->word != 0; }
+
+    bool inline isSubpel() const               { return (this->word & 0x00030003) != 0; }
+
+    MV mvmin(const MV& m) const                { return MV(x > m.x ? m.x : x, y > m.y ? m.y : y); }
+
+    MV mvmax(const MV& m) const                { return MV(x < m.x ? m.x : x, y < m.y ? m.y : y); }
+
+    MV clipped(const MV& _min, const MV& _max) const
+    {
+        MV cl = mvmin(_max);
+
+        return cl.mvmax(_min);
+    }
+
+    // returns true if MV is within range (inclusive)
+    bool checkRange(const MV& _min, const MV& _max) const
+    {
+        return x >= _min.x && x <= _max.x && y >= _min.y && y <= _max.y;
+    }
+};
+}
+
+#endif // ifndef X265_MV_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/param.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1562 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "slice.h"
+#include "threading.h"
+#include "param.h"
+#include "cpu.h"
+#include "x265.h"
+
+#if _MSC_VER
+#pragma warning(disable: 4996) // POSIX functions are just fine, thanks
+#pragma warning(disable: 4706) // assignment within conditional
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+#if _WIN32
+#define strcasecmp _stricmp
+#endif
+
+#if !defined(HAVE_STRTOK_R)
+
+/*
+ * adapted from public domain strtok_r() by Charlie Gordon
+ *
+ *   from comp.lang.c  9/14/2007
+ *
+ *      http://groups.google.com/group/comp.lang.c/msg/2ab1ecbb86646684
+ *
+ *     (Declaration that it's public domain):
+ *      http://groups.google.com/group/comp.lang.c/msg/7c7b39328fefab9c
+ */
+
+#undef strtok_r
+static char* strtok_r(char* str, const char* delim, char** nextp)
+{
+    if (!str)
+        str = *nextp;
+
+    str += strspn(str, delim);
+
+    if (!*str)
+        return NULL;
+
+    char *ret = str;
+
+    str += strcspn(str, delim);
+
+    if (*str)
+        *str++ = '\0';
+
+    *nextp = str;
+
+    return ret;
+}
+
+#endif // if !defined(HAVE_STRTOK_R)
+
+#if EXPORT_C_API
+
+/* these functions are exported as C functions (default) */
+using namespace X265_NS;
+extern "C" {
+
+#else
+
+/* these functions exist within private namespace (multilib) */
+namespace X265_NS {
+
+#endif
+
+x265_param *x265_param_alloc()
+{
+    return (x265_param*)x265_malloc(sizeof(x265_param));
+}
+
+void x265_param_free(x265_param* p)
+{
+    x265_free(p);
+}
+
+void x265_param_default(x265_param* param)
+{
+    memset(param, 0, sizeof(x265_param));
+
+    /* Applying default values to all elements in the param structure */
+    param->cpuid = X265_NS::cpu_detect();
+    param->bEnableWavefront = 1;
+    param->frameNumThreads = 0;
+
+    param->logLevel = X265_LOG_INFO;
+    param->csvfn = NULL;
+    param->rc.lambdaFileName = NULL;
+    param->bLogCuStats = 0;
+    param->decodedPictureHashSEI = 0;
+
+    /* Quality Measurement Metrics */
+    param->bEnablePsnr = 0;
+    param->bEnableSsim = 0;
+
+    /* Source specifications */
+    param->internalBitDepth = X265_DEPTH;
+    param->internalCsp = X265_CSP_I420;
+
+    param->levelIdc = 0;
+    param->bHighTier = 0;
+    param->interlaceMode = 0;
+    param->bAnnexB = 1;
+    param->bRepeatHeaders = 0;
+    param->bEnableAccessUnitDelimiters = 0;
+    param->bEmitHRDSEI = 0;
+    param->bEmitInfoSEI = 1;
+
+    /* CU definitions */
+    param->maxCUSize = 64;
+    param->minCUSize = 8;
+    param->tuQTMaxInterDepth = 1;
+    param->tuQTMaxIntraDepth = 1;
+    param->maxTUSize = 32;
+
+    /* Coding Structure */
+    param->keyframeMin = 0;
+    param->keyframeMax = 250;
+    param->bOpenGOP = 1;
+    param->bframes = 4;
+    param->lookaheadDepth = 20;
+    param->bFrameAdaptive = X265_B_ADAPT_TRELLIS;
+    param->bBPyramid = 1;
+    param->scenecutThreshold = 40; /* Magic number pulled in from x264 */
+    param->lookaheadSlices = 0;
+
+    /* Intra Coding Tools */
+    param->bEnableConstrainedIntra = 0;
+    param->bEnableStrongIntraSmoothing = 1;
+    param->bEnableFastIntra = 0;
+
+    /* Inter Coding tools */
+    param->searchMethod = X265_HEX_SEARCH;
+    param->subpelRefine = 2;
+    param->searchRange = 57;
+    param->maxNumMergeCand = 2;
+    param->limitReferences = 0;
+    param->bEnableWeightedPred = 1;
+    param->bEnableWeightedBiPred = 0;
+    param->bEnableEarlySkip = 0;
+    param->bEnableAMP = 0;
+    param->bEnableRectInter = 0;
+    param->rdLevel = 3;
+    param->rdoqLevel = 0;
+    param->bEnableSignHiding = 1;
+    param->bEnableTransformSkip = 0;
+    param->bEnableTSkipFast = 0;
+    param->maxNumReferences = 3;
+    param->bEnableTemporalMvp = 1;
+
+    /* Loop Filter */
+    param->bEnableLoopFilter = 1;
+
+    /* SAO Loop Filter */
+    param->bEnableSAO = 1;
+    param->bSaoNonDeblocked = 0;
+
+    /* Coding Quality */
+    param->cbQpOffset = 0;
+    param->crQpOffset = 0;
+    param->rdPenalty = 0;
+    param->psyRd = 0.3;
+    param->psyRdoq = 0.0;
+    param->analysisMode = 0;
+    param->analysisFileName = NULL;
+    param->bIntraInBFrames = 0;
+    param->bLossless = 0;
+    param->bCULossless = 0;
+    param->bEnableTemporalSubLayers = 0;
+
+    /* Rate control options */
+    param->rc.vbvMaxBitrate = 0;
+    param->rc.vbvBufferSize = 0;
+    param->rc.vbvBufferInit = 0.9;
+    param->rc.rfConstant = 28;
+    param->rc.bitrate = 0;
+    param->rc.qCompress = 0.6;
+    param->rc.ipFactor = 1.4f;
+    param->rc.pbFactor = 1.3f;
+    param->rc.qpStep = 4;
+    param->rc.rateControlMode = X265_RC_CRF;
+    param->rc.qp = 32;
+    param->rc.aqMode = X265_AQ_VARIANCE;
+    param->rc.qgSize = 32;
+    param->rc.aqStrength = 1.0;
+    param->rc.cuTree = 1;
+    param->rc.rfConstantMax = 0;
+    param->rc.rfConstantMin = 0;
+    param->rc.bStatRead = 0;
+    param->rc.bStatWrite = 0;
+    param->rc.statFileName = NULL;
+    param->rc.complexityBlur = 20;
+    param->rc.qblur = 0.5;
+    param->rc.zoneCount = 0;
+    param->rc.zones = NULL;
+    param->rc.bEnableSlowFirstPass = 0;
+    param->rc.bStrictCbr = 0;
+
+    /* Video Usability Information (VUI) */
+    param->vui.aspectRatioIdc = 0;
+    param->vui.sarWidth = 0;
+    param->vui.sarHeight = 0;
+    param->vui.bEnableOverscanAppropriateFlag = 0;
+    param->vui.bEnableVideoSignalTypePresentFlag = 0;
+    param->vui.videoFormat = 5;
+    param->vui.bEnableVideoFullRangeFlag = 0;
+    param->vui.bEnableColorDescriptionPresentFlag = 0;
+    param->vui.colorPrimaries = 2;
+    param->vui.transferCharacteristics = 2;
+    param->vui.matrixCoeffs = 2;
+    param->vui.bEnableChromaLocInfoPresentFlag = 0;
+    param->vui.chromaSampleLocTypeTopField = 0;
+    param->vui.chromaSampleLocTypeBottomField = 0;
+    param->vui.bEnableDefaultDisplayWindowFlag = 0;
+    param->vui.defDispWinLeftOffset = 0;
+    param->vui.defDispWinRightOffset = 0;
+    param->vui.defDispWinTopOffset = 0;
+    param->vui.defDispWinBottomOffset = 0;
+    param->maxCLL = 0;
+    param->maxFALL = 0;
+    param->minLuma = 0;
+    param->maxLuma = (1 << X265_DEPTH) - 1;
+}
+
+int x265_param_default_preset(x265_param* param, const char* preset, const char* tune)
+{
+#if EXPORT_C_API
+    ::x265_param_default(param);
+#else
+    X265_NS::x265_param_default(param);
+#endif
+
+    if (preset)
+    {
+        char *end;
+        int i = strtol(preset, &end, 10);
+        if (*end == 0 && i >= 0 && i < (int)(sizeof(x265_preset_names) / sizeof(*x265_preset_names) - 1))
+            preset = x265_preset_names[i];
+
+        if (!strcmp(preset, "ultrafast"))
+        {
+            param->lookaheadDepth = 5;
+            param->scenecutThreshold = 0; // disable lookahead
+            param->maxCUSize = 32;
+            param->minCUSize = 16;
+            param->bframes = 3;
+            param->bFrameAdaptive = 0;
+            param->subpelRefine = 0;
+            param->searchMethod = X265_DIA_SEARCH;
+            param->bEnableEarlySkip = 1;
+            param->bEnableSAO = 0;
+            param->bEnableSignHiding = 0;
+            param->bEnableWeightedPred = 0;
+            param->rdLevel = 2;
+            param->maxNumReferences = 1;
+            param->rc.aqStrength = 0.0;
+            param->rc.aqMode = X265_AQ_NONE;
+            param->rc.cuTree = 0;
+            param->rc.qgSize = 32;
+            param->bEnableFastIntra = 1;
+        }
+        else if (!strcmp(preset, "superfast"))
+        {
+            param->lookaheadDepth = 10;
+            param->maxCUSize = 32;
+            param->bframes = 3;
+            param->bFrameAdaptive = 0;
+            param->subpelRefine = 1;
+            param->bEnableEarlySkip = 1;
+            param->bEnableWeightedPred = 0;
+            param->rdLevel = 2;
+            param->maxNumReferences = 1;
+            param->rc.aqStrength = 0.0;
+            param->rc.aqMode = X265_AQ_NONE;
+            param->rc.cuTree = 0;
+            param->rc.qgSize = 32;
+            param->bEnableSAO = 0;
+            param->bEnableFastIntra = 1;
+        }
+        else if (!strcmp(preset, "veryfast"))
+        {
+            param->lookaheadDepth = 15;
+            param->maxCUSize = 32;
+            param->bFrameAdaptive = 0;
+            param->subpelRefine = 1;
+            param->bEnableEarlySkip = 1;
+            param->rdLevel = 2;
+            param->maxNumReferences = 1;
+            param->rc.cuTree = 0;
+            param->rc.qgSize = 32;
+            param->bEnableFastIntra = 1;
+        }
+        else if (!strcmp(preset, "faster"))
+        {
+            param->lookaheadDepth = 15;
+            param->bFrameAdaptive = 0;
+            param->bEnableEarlySkip = 1;
+            param->rdLevel = 2;
+            param->maxNumReferences = 1;
+            param->rc.cuTree = 0;
+            param->bEnableFastIntra = 1;
+        }
+        else if (!strcmp(preset, "fast"))
+        {
+            param->lookaheadDepth = 15;
+            param->bFrameAdaptive = 0;
+            param->rdLevel = 2;
+            param->maxNumReferences = 2;
+            param->bEnableFastIntra = 1;
+        }
+        else if (!strcmp(preset, "medium"))
+        {
+            /* defaults */
+        }
+        else if (!strcmp(preset, "slow"))
+        {
+            param->bEnableRectInter = 1;
+            param->lookaheadDepth = 25;
+            param->rdLevel = 4;
+            param->rdoqLevel = 2;
+            param->psyRdoq = 1.0;
+            param->subpelRefine = 3;
+            param->maxNumMergeCand = 3;
+            param->searchMethod = X265_STAR_SEARCH;
+        }
+        else if (!strcmp(preset, "slower"))
+        {
+            param->bEnableWeightedBiPred = 1;
+            param->bEnableAMP = 1;
+            param->bEnableRectInter = 1;
+            param->lookaheadDepth = 30;
+            param->bframes = 8;
+            param->tuQTMaxInterDepth = 2;
+            param->tuQTMaxIntraDepth = 2;
+            param->rdLevel = 6;
+            param->rdoqLevel = 2;
+            param->psyRdoq = 1.0;
+            param->subpelRefine = 3;
+            param->maxNumMergeCand = 3;
+            param->searchMethod = X265_STAR_SEARCH;
+            param->bIntraInBFrames = 1;
+        }
+        else if (!strcmp(preset, "veryslow"))
+        {
+            param->bEnableWeightedBiPred = 1;
+            param->bEnableAMP = 1;
+            param->bEnableRectInter = 1;
+            param->lookaheadDepth = 40;
+            param->bframes = 8;
+            param->tuQTMaxInterDepth = 3;
+            param->tuQTMaxIntraDepth = 3;
+            param->rdLevel = 6;
+            param->rdoqLevel = 2;
+            param->psyRdoq = 1.0;
+            param->subpelRefine = 4;
+            param->maxNumMergeCand = 4;
+            param->searchMethod = X265_STAR_SEARCH;
+            param->maxNumReferences = 5;
+            param->bIntraInBFrames = 1;
+        }
+        else if (!strcmp(preset, "placebo"))
+        {
+            param->bEnableWeightedBiPred = 1;
+            param->bEnableAMP = 1;
+            param->bEnableRectInter = 1;
+            param->lookaheadDepth = 60;
+            param->searchRange = 92;
+            param->bframes = 8;
+            param->tuQTMaxInterDepth = 4;
+            param->tuQTMaxIntraDepth = 4;
+            param->rdLevel = 6;
+            param->rdoqLevel = 2;
+            param->psyRdoq = 1.0;
+            param->subpelRefine = 5;
+            param->maxNumMergeCand = 5;
+            param->searchMethod = X265_STAR_SEARCH;
+            param->bEnableTransformSkip = 1;
+            param->maxNumReferences = 5;
+            param->rc.bEnableSlowFirstPass = 1;
+            param->bIntraInBFrames = 1;
+            // TODO: optimized esa
+        }
+        else
+            return -1;
+    }
+    if (tune)
+    {
+        if (!strcmp(tune, "psnr"))
+        {
+            param->rc.aqStrength = 0.0;
+            param->psyRd = 0.0;
+            param->psyRdoq = 0.0;
+        }
+        else if (!strcmp(tune, "ssim"))
+        {
+            param->rc.aqMode = X265_AQ_AUTO_VARIANCE;
+            param->psyRd = 0.0;
+            param->psyRdoq = 0.0;
+        }
+        else if (!strcmp(tune, "fastdecode") ||
+                 !strcmp(tune, "fast-decode"))
+        {
+            param->bEnableLoopFilter = 0;
+            param->bEnableSAO = 0;
+            param->bEnableWeightedPred = 0;
+            param->bEnableWeightedBiPred = 0;
+            param->bIntraInBFrames = 0;
+        }
+        else if (!strcmp(tune, "zerolatency") ||
+                 !strcmp(tune, "zero-latency"))
+        {
+            param->bFrameAdaptive = 0;
+            param->bframes = 0;
+            param->lookaheadDepth = 0;
+            param->scenecutThreshold = 0;
+            param->rc.cuTree = 0;
+            param->frameNumThreads = 1;
+        }
+        else if (!strcmp(tune, "grain"))
+        {
+            param->deblockingFilterBetaOffset = -2;
+            param->deblockingFilterTCOffset = -2;
+            param->bIntraInBFrames = 0;
+            param->rdoqLevel = 0;
+            param->psyRdoq = 0;
+            param->psyRd = 0.5;
+            param->rc.ipFactor = 1.1;
+            param->rc.pbFactor = 1.1;
+            param->rc.aqStrength = 0.3;
+            param->rc.qCompress = 0.8;
+        }
+        else
+            return -1;
+    }
+
+    return 0;
+}
+
+static int x265_atobool(const char* str, bool& bError)
+{
+    if (!strcmp(str, "1") ||
+        !strcmp(str, "true") ||
+        !strcmp(str, "yes"))
+        return 1;
+    if (!strcmp(str, "0") ||
+        !strcmp(str, "false") ||
+        !strcmp(str, "no"))
+        return 0;
+    bError = true;
+    return 0;
+}
+
+static int parseName(const char* arg, const char* const* names, bool& bError)
+{
+    for (int i = 0; names[i]; i++)
+        if (!strcmp(arg, names[i]))
+            return i;
+
+    return x265_atoi(arg, bError);
+}
+
+/* internal versions of string-to-int with additional error checking */
+#undef atoi
+#undef atof
+#define atoi(str) x265_atoi(str, bError)
+#define atof(str) x265_atof(str, bError)
+#define atobool(str) (bNameWasBool = true, x265_atobool(str, bError))
+
+int x265_param_parse(x265_param* p, const char* name, const char* value)
+{
+    bool bError = false;
+    bool bNameWasBool = false;
+    bool bValueWasNull = !value;
+    char nameBuf[64];
+
+    if (!name)
+        return X265_PARAM_BAD_NAME;
+
+    // skip -- prefix if provided
+    if (name[0] == '-' && name[1] == '-')
+        name += 2;
+
+    // s/_/-/g
+    if (strlen(name) + 1 < sizeof(nameBuf) && strchr(name, '_'))
+    {
+        char *c;
+        strcpy(nameBuf, name);
+        while ((c = strchr(nameBuf, '_')) != 0)
+            *c = '-';
+
+        name = nameBuf;
+    }
+
+    if (!strncmp(name, "no-", 3))
+    {
+        name += 3;
+        value = !value || x265_atobool(value, bError) ? "false" : "true";
+    }
+    else if (!strncmp(name, "no", 2))
+    {
+        name += 2;
+        value = !value || x265_atobool(value, bError) ? "false" : "true";
+    }
+    else if (!value)
+        value = "true";
+    else if (value[0] == '=')
+        value++;
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+#define OPT(STR) else if (!strcmp(name, STR))
+#define OPT2(STR1, STR2) else if (!strcmp(name, STR1) || !strcmp(name, STR2))
+    if (0) ;
+    OPT("asm")
+    {
+        if (bValueWasNull)
+            p->cpuid = atobool(value);
+        else
+            p->cpuid = parseCpuName(value, bError);
+    }
+    OPT("fps")
+    {
+        if (sscanf(value, "%u/%u", &p->fpsNum, &p->fpsDenom) == 2)
+            ;
+        else
+        {
+            float fps = (float)atof(value);
+            if (fps > 0 && fps <= INT_MAX / 1000)
+            {
+                p->fpsNum = (int)(fps * 1000 + .5);
+                p->fpsDenom = 1000;
+            }
+            else
+            {
+                p->fpsNum = atoi(value);
+                p->fpsDenom = 1;
+            }
+        }
+    }
+    OPT("frame-threads") p->frameNumThreads = atoi(value);
+    OPT("pmode") p->bDistributeModeAnalysis = atobool(value);
+    OPT("pme") p->bDistributeMotionEstimation = atobool(value);
+    OPT2("level-idc", "level")
+    {
+        /* allow "5.1" or "51", both converted to integer 51 */
+        if (atof(value) < 7)
+            p->levelIdc = (int)(10 * atof(value) + .5);
+        else
+            p->levelIdc = atoi(value);
+    }
+    OPT("high-tier") p->bHighTier = atobool(value);
+    OPT("allow-non-conformance") p->bAllowNonConformance = atobool(value);
+    OPT2("log-level", "log")
+    {
+        p->logLevel = atoi(value);
+        if (bError)
+        {
+            bError = false;
+            p->logLevel = parseName(value, logLevelNames, bError) - 1;
+        }
+    }
+    OPT("cu-stats") p->bLogCuStats = atobool(value);
+    OPT("total-frames") p->totalFrames = atoi(value);
+    OPT("annexb") p->bAnnexB = atobool(value);
+    OPT("repeat-headers") p->bRepeatHeaders = atobool(value);
+    OPT("wpp") p->bEnableWavefront = atobool(value);
+    OPT("ctu") p->maxCUSize = (uint32_t)atoi(value);
+    OPT("min-cu-size") p->minCUSize = (uint32_t)atoi(value);
+    OPT("tu-intra-depth") p->tuQTMaxIntraDepth = (uint32_t)atoi(value);
+    OPT("tu-inter-depth") p->tuQTMaxInterDepth = (uint32_t)atoi(value);
+    OPT("max-tu-size") p->maxTUSize = (uint32_t)atoi(value);
+    OPT("subme") p->subpelRefine = atoi(value);
+    OPT("merange") p->searchRange = atoi(value);
+    OPT("rect") p->bEnableRectInter = atobool(value);
+    OPT("amp") p->bEnableAMP = atobool(value);
+    OPT("max-merge") p->maxNumMergeCand = (uint32_t)atoi(value);
+    OPT("temporal-mvp") p->bEnableTemporalMvp = atobool(value);
+    OPT("early-skip") p->bEnableEarlySkip = atobool(value);
+    OPT("rdpenalty") p->rdPenalty = atoi(value);
+    OPT("tskip") p->bEnableTransformSkip = atobool(value);
+    OPT("no-tskip-fast") p->bEnableTSkipFast = atobool(value);
+    OPT("tskip-fast") p->bEnableTSkipFast = atobool(value);
+    OPT("strong-intra-smoothing") p->bEnableStrongIntraSmoothing = atobool(value);
+    OPT("lossless") p->bLossless = atobool(value);
+    OPT("cu-lossless") p->bCULossless = atobool(value);
+    OPT2("constrained-intra", "cip") p->bEnableConstrainedIntra = atobool(value);
+    OPT("fast-intra") p->bEnableFastIntra = atobool(value);
+    OPT("open-gop") p->bOpenGOP = atobool(value);
+    OPT("lookahead-slices") p->lookaheadSlices = atoi(value);
+    OPT("scenecut")
+    {
+        p->scenecutThreshold = atobool(value);
+        if (bError || p->scenecutThreshold)
+        {
+            bError = false;
+            p->scenecutThreshold = atoi(value);
+        }
+    }
+    OPT("temporal-layers") p->bEnableTemporalSubLayers = atobool(value);
+    OPT("keyint") p->keyframeMax = atoi(value);
+    OPT("min-keyint") p->keyframeMin = atoi(value);
+    OPT("rc-lookahead") p->lookaheadDepth = atoi(value);
+    OPT("bframes") p->bframes = atoi(value);
+    OPT("bframe-bias") p->bFrameBias = atoi(value);
+    OPT("b-adapt")
+    {
+        p->bFrameAdaptive = atobool(value);
+        if (bError || p->bFrameAdaptive)
+        {
+            bError = false;
+            p->bFrameAdaptive = atoi(value);
+        }
+    }
+    OPT("interlace")
+    {
+        p->interlaceMode = atobool(value);
+        if (bError || p->interlaceMode)
+        {
+            bError = false;
+            p->interlaceMode = parseName(value, x265_interlace_names, bError);
+        }
+    }
+    OPT("ref") p->maxNumReferences = atoi(value);
+    OPT("limit-refs") p->limitReferences = atoi(value);
+    OPT("weightp") p->bEnableWeightedPred = atobool(value);
+    OPT("weightb") p->bEnableWeightedBiPred = atobool(value);
+    OPT("cbqpoffs") p->cbQpOffset = atoi(value);
+    OPT("crqpoffs") p->crQpOffset = atoi(value);
+    OPT("rd") p->rdLevel = atoi(value);
+    OPT2("rdoq", "rdoq-level")
+    {
+        int bval = atobool(value);
+        if (bError || bval)
+        {
+            bError = false;
+            p->rdoqLevel = atoi(value);
+        }
+        else
+            p->rdoqLevel = 0;
+    }
+    OPT("psy-rd")
+    {
+        int bval = atobool(value);
+        if (bError || bval)
+        {
+            bError = false;
+            p->psyRd = atof(value);
+        }
+        else
+            p->psyRd = 0.0;
+    }
+    OPT("psy-rdoq")
+    {
+        int bval = atobool(value);
+        if (bError || bval)
+        {
+            bError = false;
+            p->psyRdoq = atof(value);
+        }
+        else
+            p->psyRdoq = 0.0;
+    }
+    OPT("signhide") p->bEnableSignHiding = atobool(value);
+    OPT("b-intra") p->bIntraInBFrames = atobool(value);
+    OPT("lft") p->bEnableLoopFilter = atobool(value); /* DEPRECATED */
+    OPT("deblock")
+    {
+        if (2 == sscanf(value, "%d:%d", &p->deblockingFilterTCOffset, &p->deblockingFilterBetaOffset) ||
+            2 == sscanf(value, "%d,%d", &p->deblockingFilterTCOffset, &p->deblockingFilterBetaOffset))
+        {
+            p->bEnableLoopFilter = true;
+        }
+        else if (sscanf(value, "%d", &p->deblockingFilterTCOffset))
+        {
+            p->bEnableLoopFilter = 1;
+            p->deblockingFilterBetaOffset = p->deblockingFilterTCOffset;
+        }
+        else
+            p->bEnableLoopFilter = atobool(value);
+    }
+    OPT("sao") p->bEnableSAO = atobool(value);
+    OPT("sao-non-deblock") p->bSaoNonDeblocked = atobool(value);
+    OPT("ssim") p->bEnableSsim = atobool(value);
+    OPT("psnr") p->bEnablePsnr = atobool(value);
+    OPT("hash") p->decodedPictureHashSEI = atoi(value);
+    OPT("aud") p->bEnableAccessUnitDelimiters = atobool(value);
+    OPT("info") p->bEmitInfoSEI = atobool(value);
+    OPT("b-pyramid") p->bBPyramid = atobool(value);
+    OPT("hrd") p->bEmitHRDSEI = atobool(value);
+    OPT2("ipratio", "ip-factor") p->rc.ipFactor = atof(value);
+    OPT2("pbratio", "pb-factor") p->rc.pbFactor = atof(value);
+    OPT("qcomp") p->rc.qCompress = atof(value);
+    OPT("qpstep") p->rc.qpStep = atoi(value);
+    OPT("cplxblur") p->rc.complexityBlur = atof(value);
+    OPT("qblur") p->rc.qblur = atof(value);
+    OPT("aq-mode") p->rc.aqMode = atoi(value);
+    OPT("aq-strength") p->rc.aqStrength = atof(value);
+    OPT("vbv-maxrate") p->rc.vbvMaxBitrate = atoi(value);
+    OPT("vbv-bufsize") p->rc.vbvBufferSize = atoi(value);
+    OPT("vbv-init")    p->rc.vbvBufferInit = atof(value);
+    OPT("crf-max")     p->rc.rfConstantMax = atof(value);
+    OPT("crf-min")     p->rc.rfConstantMin = atof(value);
+    OPT("crf")
+    {
+        p->rc.rfConstant = atof(value);
+        p->rc.rateControlMode = X265_RC_CRF;
+    }
+    OPT("bitrate")
+    {
+        p->rc.bitrate = atoi(value);
+        p->rc.rateControlMode = X265_RC_ABR;
+    }
+    OPT("qp")
+    {
+        p->rc.qp = atoi(value);
+        p->rc.rateControlMode = X265_RC_CQP;
+    }
+    OPT("zones")
+    {
+        p->rc.zoneCount = 1;
+        const char* c;
+
+        for (c = value; *c; c++)
+            p->rc.zoneCount += (*c == '/');
+
+        p->rc.zones = X265_MALLOC(x265_zone, p->rc.zoneCount);
+        c = value;
+        for (int i = 0; i < p->rc.zoneCount; i++ )
+        {
+            int len;
+            if (3 == sscanf(c, "%d,%d,q=%d%n", &p->rc.zones[i].startFrame, &p->rc.zones[i].endFrame, &p->rc.zones[i].qp, &len))
+                p->rc.zones[i].bForceQp = 1;
+            else if (3 == sscanf(c, "%d,%d,b=%f%n", &p->rc.zones[i].startFrame, &p->rc.zones[i].endFrame, &p->rc.zones[i].bitrateFactor, &len))
+                p->rc.zones[i].bForceQp = 0;
+            else
+            {
+                bError = true;
+                break;
+            }
+            c += len + 1;
+        }
+    }
+    OPT("input-res") bError |= sscanf(value, "%dx%d", &p->sourceWidth, &p->sourceHeight) != 2;
+    OPT("input-csp") p->internalCsp = parseName(value, x265_source_csp_names, bError);
+    OPT("me")        p->searchMethod = parseName(value, x265_motion_est_names, bError);
+    OPT("cutree")    p->rc.cuTree = atobool(value);
+    OPT("slow-firstpass") p->rc.bEnableSlowFirstPass = atobool(value);
+    OPT("strict-cbr")
+    {
+        p->rc.bStrictCbr = atobool(value);
+        p->rc.pbFactor = 1.0;
+    }
+    OPT("analysis-mode") p->analysisMode = parseName(value, x265_analysis_names, bError);
+    OPT("sar")
+    {
+        p->vui.aspectRatioIdc = parseName(value, x265_sar_names, bError);
+        if (bError)
+        {
+            p->vui.aspectRatioIdc = X265_EXTENDED_SAR;
+            bError = sscanf(value, "%d:%d", &p->vui.sarWidth, &p->vui.sarHeight) != 2;
+        }
+    }
+    OPT("overscan")
+    {
+        if (!strcmp(value, "show"))
+            p->vui.bEnableOverscanInfoPresentFlag = 1;
+        else if (!strcmp(value, "crop"))
+        {
+            p->vui.bEnableOverscanInfoPresentFlag = 1;
+            p->vui.bEnableOverscanAppropriateFlag = 1;
+        }
+        else if (!strcmp(value, "undef"))
+            p->vui.bEnableOverscanInfoPresentFlag = 0;
+        else
+            bError = true;
+    }
+    OPT("videoformat")
+    {
+        p->vui.bEnableVideoSignalTypePresentFlag = 1;
+        p->vui.videoFormat = parseName(value, x265_video_format_names, bError);
+    }
+    OPT("range")
+    {
+        p->vui.bEnableVideoSignalTypePresentFlag = 1;
+        p->vui.bEnableVideoFullRangeFlag = parseName(value, x265_fullrange_names, bError);
+    }
+    OPT("colorprim")
+    {
+        p->vui.bEnableVideoSignalTypePresentFlag = 1;
+        p->vui.bEnableColorDescriptionPresentFlag = 1;
+        p->vui.colorPrimaries = parseName(value, x265_colorprim_names, bError);
+    }
+    OPT("transfer")
+    {
+        p->vui.bEnableVideoSignalTypePresentFlag = 1;
+        p->vui.bEnableColorDescriptionPresentFlag = 1;
+        p->vui.transferCharacteristics = parseName(value, x265_transfer_names, bError);
+    }
+    OPT("colormatrix")
+    {
+        p->vui.bEnableVideoSignalTypePresentFlag = 1;
+        p->vui.bEnableColorDescriptionPresentFlag = 1;
+        p->vui.matrixCoeffs = parseName(value, x265_colmatrix_names, bError);
+    }
+    OPT("chromaloc")
+    {
+        p->vui.bEnableChromaLocInfoPresentFlag = 1;
+        p->vui.chromaSampleLocTypeTopField = atoi(value);
+        p->vui.chromaSampleLocTypeBottomField = p->vui.chromaSampleLocTypeTopField;
+    }
+    OPT2("display-window", "crop-rect")
+    {
+        p->vui.bEnableDefaultDisplayWindowFlag = 1;
+        bError |= sscanf(value, "%d,%d,%d,%d",
+                         &p->vui.defDispWinLeftOffset,
+                         &p->vui.defDispWinTopOffset,
+                         &p->vui.defDispWinRightOffset,
+                         &p->vui.defDispWinBottomOffset) != 4;
+    }
+    OPT("nr-intra") p->noiseReductionIntra = atoi(value);
+    OPT("nr-inter") p->noiseReductionInter = atoi(value);
+    OPT("pass")
+    {
+        int pass = x265_clip3(0, 3, atoi(value));
+        p->rc.bStatWrite = pass & 1;
+        p->rc.bStatRead = pass & 2;
+    }
+    OPT("stats") p->rc.statFileName = strdup(value);
+    OPT("scaling-list") p->scalingLists = strdup(value);
+    OPT2("pools", "numa-pools") p->numaPools = strdup(value);
+    OPT("lambda-file") p->rc.lambdaFileName = strdup(value);
+    OPT("analysis-file") p->analysisFileName = strdup(value);
+    OPT("qg-size") p->rc.qgSize = atoi(value);
+    OPT("master-display") p->masteringDisplayColorVolume = strdup(value);
+    OPT("max-cll") bError |= sscanf(value, "%hu,%hu", &p->maxCLL, &p->maxFALL) != 2;
+    OPT("min-luma") p->minLuma = (uint16_t)atoi(value);
+    OPT("max-luma") p->maxLuma = (uint16_t)atoi(value);
+    else
+        return X265_PARAM_BAD_NAME;
+#undef OPT
+#undef atobool
+#undef atoi
+#undef atof
+
+    bError |= bValueWasNull && !bNameWasBool;
+    return bError ? X265_PARAM_BAD_VALUE : 0;
+}
+
+} /* end extern "C" or namespace */
+
+namespace X265_NS {
+// internal encoder functions
+
+int x265_atoi(const char* str, bool& bError)
+{
+    char *end;
+    int v = strtol(str, &end, 0);
+
+    if (end == str || *end != '\0')
+        bError = true;
+    return v;
+}
+
+double x265_atof(const char* str, bool& bError)
+{
+    char *end;
+    double v = strtod(str, &end);
+
+    if (end == str || *end != '\0')
+        bError = true;
+    return v;
+}
+
+/* cpu name can be:
+ *   auto || true - x265::cpu_detect()
+ *   false || no  - disabled
+ *   integer bitmap value
+ *   comma separated list of SIMD names, eg: SSE4.1,XOP */
+int parseCpuName(const char* value, bool& bError)
+{
+    if (!value)
+    {
+        bError = 1;
+        return 0;
+    }
+    int cpu;
+    if (isdigit(value[0]))
+        cpu = x265_atoi(value, bError);
+    else
+        cpu = !strcmp(value, "auto") || x265_atobool(value, bError) ? X265_NS::cpu_detect() : 0;
+
+    if (bError)
+    {
+        char *buf = strdup(value);
+        char *tok, *saveptr = NULL, *init;
+        bError = 0;
+        cpu = 0;
+        for (init = buf; (tok = strtok_r(init, ",", &saveptr)); init = NULL)
+        {
+            int i;
+            for (i = 0; X265_NS::cpu_names[i].flags && strcasecmp(tok, X265_NS::cpu_names[i].name); i++)
+            {
+            }
+
+            cpu |= X265_NS::cpu_names[i].flags;
+            if (!X265_NS::cpu_names[i].flags)
+                bError = 1;
+        }
+
+        free(buf);
+        if ((cpu & X265_CPU_SSSE3) && !(cpu & X265_CPU_SSE2_IS_SLOW))
+            cpu |= X265_CPU_SSE2_IS_FAST;
+    }
+
+    return cpu;
+}
+
+static const int fixedRatios[][2] =
+{
+    { 1,  1 },
+    { 12, 11 },
+    { 10, 11 },
+    { 16, 11 },
+    { 40, 33 },
+    { 24, 11 },
+    { 20, 11 },
+    { 32, 11 },
+    { 80, 33 },
+    { 18, 11 },
+    { 15, 11 },
+    { 64, 33 },
+    { 160, 99 },
+    { 4, 3 },
+    { 3, 2 },
+    { 2, 1 },
+};
+
+void setParamAspectRatio(x265_param* p, int width, int height)
+{
+    p->vui.aspectRatioIdc = X265_EXTENDED_SAR;
+    p->vui.sarWidth = width;
+    p->vui.sarHeight = height;
+    for (size_t i = 0; i < sizeof(fixedRatios) / sizeof(fixedRatios[0]); i++)
+    {
+        if (width == fixedRatios[i][0] && height == fixedRatios[i][1])
+        {
+            p->vui.aspectRatioIdc = (int)i + 1;
+            return;
+        }
+    }
+}
+
+void getParamAspectRatio(x265_param* p, int& width, int& height)
+{
+    if (!p->vui.aspectRatioIdc)
+        width = height = 0;
+    else if ((size_t)p->vui.aspectRatioIdc <= sizeof(fixedRatios) / sizeof(fixedRatios[0]))
+    {
+        width  = fixedRatios[p->vui.aspectRatioIdc - 1][0];
+        height = fixedRatios[p->vui.aspectRatioIdc - 1][1];
+    }
+    else if (p->vui.aspectRatioIdc == X265_EXTENDED_SAR)
+    {
+        width  = p->vui.sarWidth;
+        height = p->vui.sarHeight;
+    }
+    else
+        width = height = 0;
+}
+
+static inline int _confirm(x265_param* param, bool bflag, const char* message)
+{
+    if (!bflag)
+        return 0;
+
+    x265_log(param, X265_LOG_ERROR, "%s\n", message);
+    return 1;
+}
+
+int x265_check_params(x265_param* param)
+{
+#define CHECK(expr, msg) check_failed |= _confirm(param, expr, msg)
+    int check_failed = 0; /* abort if there is a fatal configuration problem */
+
+    CHECK(param->maxCUSize != 64 && param->maxCUSize != 32 && param->maxCUSize != 16,
+          "max cu size must be 16, 32, or 64");
+    if (check_failed == 1)
+        return check_failed;
+
+    uint32_t maxLog2CUSize = (uint32_t)g_log2Size[param->maxCUSize];
+    uint32_t tuQTMaxLog2Size = X265_MIN(maxLog2CUSize, 5);
+    uint32_t tuQTMinLog2Size = 2; //log2(4)
+
+    CHECK(param->internalBitDepth != X265_DEPTH,
+          "internalBitDepth must match compiled bit depth");
+    CHECK(param->minCUSize != 64 && param->minCUSize != 32 && param->minCUSize != 16 && param->minCUSize != 8,
+          "minimim CU size must be 8, 16, 32, or 64");
+    CHECK(param->minCUSize > param->maxCUSize,
+          "min CU size must be less than or equal to max CU size");
+    CHECK(param->rc.qp < -6 * (param->internalBitDepth - 8) || param->rc.qp > QP_MAX_SPEC,
+          "QP exceeds supported range (-QpBDOffsety to 51)");
+    CHECK(param->fpsNum == 0 || param->fpsDenom == 0,
+          "Frame rate numerator and denominator must be specified");
+    CHECK(param->interlaceMode < 0 || param->interlaceMode > 2,
+          "Interlace mode must be 0 (progressive) 1 (top-field first) or 2 (bottom field first)");
+    CHECK(param->searchMethod<0 || param->searchMethod> X265_FULL_SEARCH,
+          "Search method is not supported value (0:DIA 1:HEX 2:UMH 3:HM 5:FULL)");
+    CHECK(param->searchRange < 0,
+          "Search Range must be more than 0");
+    CHECK(param->searchRange >= 32768,
+          "Search Range must be less than 32768");
+    CHECK(param->subpelRefine > X265_MAX_SUBPEL_LEVEL,
+          "subme must be less than or equal to X265_MAX_SUBPEL_LEVEL (7)");
+    CHECK(param->subpelRefine < 0,
+          "subme must be greater than or equal to 0");
+    CHECK(param->limitReferences > 3,
+          "limitReferences must be 0, 1, 2 or 3");
+    CHECK(param->frameNumThreads < 0 || param->frameNumThreads > X265_MAX_FRAME_THREADS,
+          "frameNumThreads (--frame-threads) must be [0 .. X265_MAX_FRAME_THREADS)");
+    CHECK(param->cbQpOffset < -12, "Min. Chroma Cb QP Offset is -12");
+    CHECK(param->cbQpOffset >  12, "Max. Chroma Cb QP Offset is  12");
+    CHECK(param->crQpOffset < -12, "Min. Chroma Cr QP Offset is -12");
+    CHECK(param->crQpOffset >  12, "Max. Chroma Cr QP Offset is  12");
+
+    CHECK(tuQTMaxLog2Size > maxLog2CUSize,
+          "QuadtreeTULog2MaxSize must be log2(maxCUSize) or smaller.");
+
+    CHECK(param->tuQTMaxInterDepth < 1 || param->tuQTMaxInterDepth > 4,
+          "QuadtreeTUMaxDepthInter must be greater than 0 and less than 5");
+    CHECK(maxLog2CUSize < tuQTMinLog2Size + param->tuQTMaxInterDepth - 1,
+          "QuadtreeTUMaxDepthInter must be less than or equal to the difference between log2(maxCUSize) and QuadtreeTULog2MinSize plus 1");
+    CHECK(param->tuQTMaxIntraDepth < 1 || param->tuQTMaxIntraDepth > 4,
+          "QuadtreeTUMaxDepthIntra must be greater 0 and less than 5");
+    CHECK(maxLog2CUSize < tuQTMinLog2Size + param->tuQTMaxIntraDepth - 1,
+          "QuadtreeTUMaxDepthInter must be less than or equal to the difference between log2(maxCUSize) and QuadtreeTULog2MinSize plus 1");
+    CHECK((param->maxTUSize != 32 && param->maxTUSize != 16 && param->maxTUSize != 8 && param->maxTUSize != 4),
+          "max TU size must be 4, 8, 16, or 32");
+    CHECK(param->maxNumMergeCand < 1, "MaxNumMergeCand must be 1 or greater.");
+    CHECK(param->maxNumMergeCand > 5, "MaxNumMergeCand must be 5 or smaller.");
+
+    CHECK(param->maxNumReferences < 1, "maxNumReferences must be 1 or greater.");
+    CHECK(param->maxNumReferences > MAX_NUM_REF, "maxNumReferences must be 16 or smaller.");
+
+#if 0
+    CHECK(param->sourceWidth < (int)param->maxCUSize || param->sourceHeight < (int)param->maxCUSize,
+          "Picture size must be at least one CTU");
+#endif
+    CHECK(param->internalCsp < X265_CSP_I400 || X265_CSP_I444 < param->internalCsp,
+          "Color space must be i420, i422, or i444");
+    CHECK(param->sourceWidth & !!CHROMA_H_SHIFT(param->internalCsp),
+          "Picture width must be an integer multiple of the specified chroma subsampling");
+    CHECK(param->sourceHeight & !!CHROMA_V_SHIFT(param->internalCsp),
+          "Picture height must be an integer multiple of the specified chroma subsampling");
+
+    CHECK(param->rc.rateControlMode > X265_RC_CRF || param->rc.rateControlMode < X265_RC_ABR,
+          "Rate control mode is out of range");
+    CHECK(param->rdLevel < 0 || param->rdLevel > 6,
+          "RD Level is out of range");
+    CHECK(param->rdoqLevel < 0 || param->rdoqLevel > 2,
+        "RDOQ Level is out of range");
+    CHECK(param->bframes && param->bframes >= param->lookaheadDepth && !param->rc.bStatRead,
+          "Lookahead depth must be greater than the max consecutive bframe count");
+    CHECK(param->bframes < 0,
+          "bframe count should be greater than zero");
+    CHECK(param->bframes > X265_BFRAME_MAX,
+          "max consecutive bframe count must be 16 or smaller");
+    CHECK(param->lookaheadDepth > X265_LOOKAHEAD_MAX,
+          "Lookahead depth must be less than 256");
+    CHECK(param->lookaheadSlices > 16 || param->lookaheadSlices < 0,
+          "Lookahead slices must between 0 and 16");
+    CHECK(param->rc.aqMode < X265_AQ_NONE || X265_AQ_AUTO_VARIANCE_BIASED < param->rc.aqMode,
+          "Aq-Mode is out of range");
+    CHECK(param->rc.aqStrength < 0 || param->rc.aqStrength > 3,
+          "Aq-Strength is out of range");
+    CHECK(param->deblockingFilterTCOffset < -6 || param->deblockingFilterTCOffset > 6,
+          "deblocking filter tC offset must be in the range of -6 to +6");
+    CHECK(param->deblockingFilterBetaOffset < -6 || param->deblockingFilterBetaOffset > 6,
+          "deblocking filter Beta offset must be in the range of -6 to +6");
+    CHECK(param->psyRd < 0 || 2.0 < param->psyRd, "Psy-rd strength must be between 0 and 2.0");
+    CHECK(param->psyRdoq < 0 || 50.0 < param->psyRdoq, "Psy-rdoq strength must be between 0 and 50.0");
+    CHECK(param->bEnableWavefront < 0, "WaveFrontSynchro cannot be negative");
+    CHECK((param->vui.aspectRatioIdc < 0
+           || param->vui.aspectRatioIdc > 16)
+          && param->vui.aspectRatioIdc != X265_EXTENDED_SAR,
+          "Sample Aspect Ratio must be 0-16 or 255");
+    CHECK(param->vui.aspectRatioIdc == X265_EXTENDED_SAR && param->vui.sarWidth <= 0,
+          "Sample Aspect Ratio width must be greater than 0");
+    CHECK(param->vui.aspectRatioIdc == X265_EXTENDED_SAR && param->vui.sarHeight <= 0,
+          "Sample Aspect Ratio height must be greater than 0");
+    CHECK(param->vui.videoFormat < 0 || param->vui.videoFormat > 5,
+          "Video Format must be component,"
+          " pal, ntsc, secam, mac or undef");
+    CHECK(param->vui.colorPrimaries < 0
+          || param->vui.colorPrimaries > 9
+          || param->vui.colorPrimaries == 3,
+          "Color Primaries must be undef, bt709, bt470m,"
+          " bt470bg, smpte170m, smpte240m, film or bt2020");
+    CHECK(param->vui.transferCharacteristics < 0
+          || param->vui.transferCharacteristics > 18
+          || param->vui.transferCharacteristics == 3,
+          "Transfer Characteristics must be undef, bt709, bt470m, bt470bg,"
+          " smpte170m, smpte240m, linear, log100, log316, iec61966-2-4, bt1361e,"
+          " iec61966-2-1, bt2020-10, bt2020-12, smpte-st-2084, smpte-st-428 or arib-std-b67");
+    CHECK(param->vui.matrixCoeffs < 0
+          || param->vui.matrixCoeffs > 10
+          || param->vui.matrixCoeffs == 3,
+          "Matrix Coefficients must be undef, bt709, fcc, bt470bg, smpte170m,"
+          " smpte240m, GBR, YCgCo, bt2020nc or bt2020c");
+    CHECK(param->vui.chromaSampleLocTypeTopField < 0
+          || param->vui.chromaSampleLocTypeTopField > 5,
+          "Chroma Sample Location Type Top Field must be 0-5");
+    CHECK(param->vui.chromaSampleLocTypeBottomField < 0
+          || param->vui.chromaSampleLocTypeBottomField > 5,
+          "Chroma Sample Location Type Bottom Field must be 0-5");
+    CHECK(param->vui.defDispWinLeftOffset < 0,
+          "Default Display Window Left Offset must be 0 or greater");
+    CHECK(param->vui.defDispWinRightOffset < 0,
+          "Default Display Window Right Offset must be 0 or greater");
+    CHECK(param->vui.defDispWinTopOffset < 0,
+          "Default Display Window Top Offset must be 0 or greater");
+    CHECK(param->vui.defDispWinBottomOffset < 0,
+          "Default Display Window Bottom Offset must be 0 or greater");
+    CHECK(param->rc.rfConstant < -6 * (param->internalBitDepth - 8) || param->rc.rfConstant > 51,
+          "Valid quality based range: -qpBDOffsetY to 51");
+    CHECK(param->rc.rfConstantMax < -6 * (param->internalBitDepth - 8) || param->rc.rfConstantMax > 51,
+          "Valid quality based range: -qpBDOffsetY to 51");
+    CHECK(param->rc.rfConstantMin < -6 * (param->internalBitDepth - 8) || param->rc.rfConstantMin > 51,
+          "Valid quality based range: -qpBDOffsetY to 51");
+    CHECK(param->bFrameAdaptive < 0 || param->bFrameAdaptive > 2,
+          "Valid adaptive b scheduling values 0 - none, 1 - fast, 2 - full");
+    CHECK(param->logLevel<-1 || param->logLevel> X265_LOG_FULL,
+          "Valid Logging level -1:none 0:error 1:warning 2:info 3:debug 4:full");
+    CHECK(param->scenecutThreshold < 0,
+          "scenecutThreshold must be greater than 0");
+    CHECK(param->rdPenalty < 0 || param->rdPenalty > 2,
+          "Valid penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum");
+    CHECK(param->keyframeMax < -1,
+          "Invalid max IDR period in frames. value should be greater than -1");
+    CHECK(param->decodedPictureHashSEI < 0 || param->decodedPictureHashSEI > 3,
+          "Invalid hash option. Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum");
+    CHECK(param->rc.vbvBufferSize < 0,
+          "Size of the vbv buffer can not be less than zero");
+    CHECK(param->rc.vbvMaxBitrate < 0,
+          "Maximum local bit rate can not be less than zero");
+    CHECK(param->rc.vbvBufferInit < 0,
+          "Valid initial VBV buffer occupancy must be a fraction 0 - 1, or size in kbits");
+    CHECK(param->rc.bitrate < 0,
+          "Target bitrate can not be less than zero");
+    CHECK(param->rc.qCompress < 0.5 || param->rc.qCompress > 1.0,
+          "qCompress must be between 0.5 and 1.0");
+    if (param->noiseReductionIntra)
+        CHECK(0 > param->noiseReductionIntra || param->noiseReductionIntra > 2000, "Valid noise reduction range 0 - 2000");
+    if (param->noiseReductionInter)
+        CHECK(0 > param->noiseReductionInter || param->noiseReductionInter > 2000, "Valid noise reduction range 0 - 2000");
+    CHECK(param->rc.rateControlMode == X265_RC_CRF && param->rc.bStatRead,
+          "Constant rate-factor is incompatible with 2pass");
+    CHECK(param->rc.rateControlMode == X265_RC_CQP && param->rc.bStatRead,
+          "Constant QP is incompatible with 2pass");
+    CHECK(param->rc.bStrictCbr && (param->rc.bitrate <= 0 || param->rc.vbvBufferSize <=0),
+          "Strict-cbr cannot be applied without specifying target bitrate or vbv bufsize");
+    return check_failed;
+}
+
+void x265_param_apply_fastfirstpass(x265_param* param)
+{
+    /* Set faster options in case of turbo firstpass */
+    if (param->rc.bStatWrite && !param->rc.bStatRead)
+    {
+        param->maxNumReferences = 1;
+        param->maxNumMergeCand = 1;
+        param->bEnableRectInter = 0;
+        param->bEnableFastIntra = 1;
+        param->bEnableAMP = 0;
+        param->searchMethod = X265_DIA_SEARCH;
+        param->subpelRefine = X265_MIN(2, param->subpelRefine);
+        param->bEnableEarlySkip = 1;
+        param->rdLevel = X265_MIN(2, param->rdLevel);
+    }
+}
+
+int x265_set_globals(x265_param* param)
+{
+    uint32_t maxLog2CUSize = (uint32_t)g_log2Size[param->maxCUSize];
+    uint32_t minLog2CUSize = (uint32_t)g_log2Size[param->minCUSize];
+
+    if (ATOMIC_INC(&g_ctuSizeConfigured) > 1)
+    {
+        if (g_maxCUSize != param->maxCUSize)
+        {
+            x265_log(param, X265_LOG_ERROR, "maxCUSize must be the same for all encoders in a single process");
+            return -1;
+        }
+        if (g_maxCUDepth != maxLog2CUSize - minLog2CUSize)
+        {
+            x265_log(param, X265_LOG_ERROR, "maxCUDepth must be the same for all encoders in a single process");
+            return -1;
+        }
+    }
+    else
+    {
+        // set max CU width & height
+        g_maxCUSize     = param->maxCUSize;
+        g_maxLog2CUSize = maxLog2CUSize;
+
+        // compute actual CU depth with respect to config depth and max transform size
+        g_maxCUDepth    = maxLog2CUSize - minLog2CUSize;
+        g_unitSizeDepth = maxLog2CUSize - LOG2_UNIT_SIZE;
+
+        // initialize partition order
+        uint32_t* tmp = &g_zscanToRaster[0];
+        initZscanToRaster(g_unitSizeDepth, 1, 0, tmp);
+        initRasterToZscan(g_unitSizeDepth);
+    }
+    return 0;
+}
+
+static void appendtool(x265_param* param, char* buf, size_t size, const char* toolstr)
+{
+    static const int overhead = (int)strlen("x265 [info]: tools: ");
+
+    if (strlen(buf) + strlen(toolstr) + overhead >= size)
+    {
+        x265_log(param, X265_LOG_INFO, "tools:%s\n", buf);
+        sprintf(buf, " %s", toolstr);
+    }
+    else
+    {
+        strcat(buf, " ");
+        strcat(buf, toolstr);
+    }
+}
+
+void x265_print_params(x265_param* param)
+{
+    if (param->logLevel < X265_LOG_INFO)
+        return;
+
+    if (param->interlaceMode)
+        x265_log(param, X265_LOG_INFO, "Interlaced field inputs             : %s\n", x265_interlace_names[param->interlaceMode]);
+
+    x265_log(param, X265_LOG_INFO, "Coding QT: max CU size, min CU size : %d / %d\n", param->maxCUSize, param->minCUSize);
+
+    x265_log(param, X265_LOG_INFO, "Residual QT: max TU size, max depth : %d / %d inter / %d intra\n",
+             param->maxTUSize, param->tuQTMaxInterDepth, param->tuQTMaxIntraDepth);
+
+    x265_log(param, X265_LOG_INFO, "ME / range / subpel / merge         : %s / %d / %d / %d\n",
+             x265_motion_est_names[param->searchMethod], param->searchRange, param->subpelRefine, param->maxNumMergeCand);
+
+    if (param->keyframeMax != INT_MAX || param->scenecutThreshold)
+        x265_log(param, X265_LOG_INFO, "Keyframe min / max / scenecut       : %d / %d / %d\n", param->keyframeMin, param->keyframeMax, param->scenecutThreshold);
+    else
+        x265_log(param, X265_LOG_INFO, "Keyframe min / max / scenecut       : disabled\n");
+
+    if (param->cbQpOffset || param->crQpOffset)
+        x265_log(param, X265_LOG_INFO, "Cb/Cr QP Offset                     : %d / %d\n", param->cbQpOffset, param->crQpOffset);
+
+    if (param->rdPenalty)
+        x265_log(param, X265_LOG_INFO, "Intra 32x32 TU penalty type         : %d\n", param->rdPenalty);
+
+    x265_log(param, X265_LOG_INFO, "Lookahead / bframes / badapt        : %d / %d / %d\n", param->lookaheadDepth, param->bframes, param->bFrameAdaptive);
+    x265_log(param, X265_LOG_INFO, "b-pyramid / weightp / weightb       : %d / %d / %d\n",
+             param->bBPyramid, param->bEnableWeightedPred, param->bEnableWeightedBiPred);
+    x265_log(param, X265_LOG_INFO, "References / ref-limit  cu / depth  : %d / %d / %d\n",
+             param->maxNumReferences, !!(param->limitReferences & X265_REF_LIMIT_CU), !!(param->limitReferences & X265_REF_LIMIT_DEPTH));
+
+    if (param->rc.aqMode)
+        x265_log(param, X265_LOG_INFO, "AQ: mode / str / qg-size / cu-tree  : %d / %0.1f / %d / %d\n", param->rc.aqMode,
+                 param->rc.aqStrength, param->rc.qgSize, param->rc.cuTree);
+
+    if (param->bLossless)
+        x265_log(param, X265_LOG_INFO, "Rate Control                        : Lossless\n");
+    else switch (param->rc.rateControlMode)
+    {
+    case X265_RC_ABR:
+        x265_log(param, X265_LOG_INFO, "Rate Control / qCompress            : ABR-%d kbps / %0.2f\n", param->rc.bitrate, param->rc.qCompress); break;
+    case X265_RC_CQP:
+        x265_log(param, X265_LOG_INFO, "Rate Control                        : CQP-%d\n", param->rc.qp); break;
+    case X265_RC_CRF:
+        x265_log(param, X265_LOG_INFO, "Rate Control / qCompress            : CRF-%0.1f / %0.2f\n", param->rc.rfConstant, param->rc.qCompress); break;
+    }
+
+    if (param->rc.vbvBufferSize)
+        x265_log(param, X265_LOG_INFO, "VBV/HRD buffer / max-rate / init    : %d / %d / %.3f\n",
+                 param->rc.vbvBufferSize, param->rc.vbvMaxBitrate, param->rc.vbvBufferInit);
+
+    char buf[80] = { 0 };
+    char tmp[40];
+#define TOOLOPT(FLAG, STR) if (FLAG) appendtool(param, buf, sizeof(buf), STR);
+#define TOOLVAL(VAL, STR)  if (VAL) { sprintf(tmp, STR, VAL); appendtool(param, buf, sizeof(buf), tmp); }
+    TOOLOPT(param->bEnableRectInter, "rect");
+    TOOLOPT(param->bEnableAMP, "amp");
+    TOOLVAL(param->rdLevel, "rd=%d");
+    TOOLVAL(param->psyRd, "psy-rd=%.2lf");
+    TOOLVAL(param->rdoqLevel, "rdoq=%d");
+    TOOLVAL(param->psyRdoq, "psy-rdoq=%.2lf");
+    TOOLOPT(param->bEnableEarlySkip, "early-skip");
+    TOOLVAL(param->noiseReductionIntra, "nr-intra=%d");
+    TOOLVAL(param->noiseReductionInter, "nr-inter=%d");
+    TOOLOPT(param->bEnableTSkipFast, "tskip-fast");
+    TOOLOPT(!param->bEnableTSkipFast && param->bEnableTransformSkip, "tskip");
+    TOOLOPT(param->bCULossless, "cu-lossless");
+    TOOLOPT(param->bEnableSignHiding, "signhide");
+    TOOLOPT(param->bEnableTemporalMvp, "tmvp");
+    TOOLOPT(param->bEnableConstrainedIntra, "cip");
+    TOOLOPT(param->bIntraInBFrames, "b-intra");
+    TOOLOPT(param->bEnableFastIntra, "fast-intra");
+    TOOLOPT(param->bEnableStrongIntraSmoothing, "strong-intra-smoothing");
+    TOOLVAL(param->lookaheadSlices, "lslices=%d");
+    if (param->bEnableLoopFilter)
+    {
+        if (param->deblockingFilterBetaOffset || param->deblockingFilterTCOffset)
+        {
+            sprintf(tmp, "deblock(tC=%d:B=%d)", param->deblockingFilterTCOffset, param->deblockingFilterBetaOffset);
+            appendtool(param, buf, sizeof(buf), tmp);
+        }
+        else
+            TOOLOPT(param->bEnableLoopFilter, "deblock");
+    }
+    TOOLOPT(param->bSaoNonDeblocked, "sao-non-deblock");
+    TOOLOPT(!param->bSaoNonDeblocked && param->bEnableSAO, "sao");
+    TOOLOPT(param->rc.bStatWrite, "stats-write");
+    TOOLOPT(param->rc.bStatRead,  "stats-read");
+    x265_log(param, X265_LOG_INFO, "tools:%s\n", buf);
+    fflush(stderr);
+}
+
+void x265_print_reconfigured_params(x265_param* param, x265_param* reconfiguredParam)
+{
+    if (!param || !reconfiguredParam)
+        return;
+
+    x265_log(param,X265_LOG_INFO, "Reconfigured param options :\n");
+
+    char buf[80] = { 0 };
+    char tmp[40];
+#define TOOLCMP(COND1, COND2, STR, VAL)  if (COND1 != COND2) { sprintf(tmp, STR, VAL); appendtool(param, buf, sizeof(buf), tmp); }
+    TOOLCMP(param->maxNumReferences, reconfiguredParam->maxNumReferences, "ref=%d", reconfiguredParam->maxNumReferences);
+    TOOLCMP(param->maxTUSize, reconfiguredParam->maxTUSize, "max-tu-size=%d", reconfiguredParam->maxTUSize);
+    TOOLCMP(param->searchRange, reconfiguredParam->searchRange, "merange=%d", reconfiguredParam->searchRange);
+    TOOLCMP(param->subpelRefine, reconfiguredParam->subpelRefine, "subme= %d", reconfiguredParam->subpelRefine);
+    TOOLCMP(param->rdLevel, reconfiguredParam->rdLevel, "rd=%d", reconfiguredParam->rdLevel);
+    TOOLCMP(param->psyRd, reconfiguredParam->psyRd, "psy-rd=%.2lf", reconfiguredParam->psyRd);
+    TOOLCMP(param->rdoqLevel, reconfiguredParam->rdoqLevel, "rdoq=%d", reconfiguredParam->rdoqLevel);
+    TOOLCMP(param->psyRdoq, reconfiguredParam->psyRdoq, "psy-rdoq=%.2lf", reconfiguredParam->psyRdoq);
+    TOOLCMP(param->noiseReductionIntra, reconfiguredParam->noiseReductionIntra, "nr-intra=%d", reconfiguredParam->noiseReductionIntra);
+    TOOLCMP(param->noiseReductionInter, reconfiguredParam->noiseReductionInter, "nr-inter=%d", reconfiguredParam->noiseReductionInter);
+    TOOLCMP(param->bEnableTSkipFast, reconfiguredParam->bEnableTSkipFast, "tskip-fast=%d", reconfiguredParam->bEnableTSkipFast);
+    TOOLCMP(param->bEnableSignHiding, reconfiguredParam->bEnableSignHiding, "signhide=%d", reconfiguredParam->bEnableSignHiding);
+    TOOLCMP(param->bEnableFastIntra, reconfiguredParam->bEnableFastIntra, "fast-intra=%d", reconfiguredParam->bEnableFastIntra);
+    if (param->bEnableLoopFilter && (param->deblockingFilterBetaOffset != reconfiguredParam->deblockingFilterBetaOffset 
+        || param->deblockingFilterTCOffset != reconfiguredParam->deblockingFilterTCOffset))
+    {
+        sprintf(tmp, "deblock(tC=%d:B=%d)", param->deblockingFilterTCOffset, param->deblockingFilterBetaOffset);
+        appendtool(param, buf, sizeof(buf), tmp);
+    }
+    else
+        TOOLCMP(param->bEnableLoopFilter,  reconfiguredParam->bEnableLoopFilter, "deblock=%d", reconfiguredParam->bEnableLoopFilter);
+
+    TOOLCMP(param->bEnableTemporalMvp, reconfiguredParam->bEnableTemporalMvp, "tmvp=%d", reconfiguredParam->bEnableTemporalMvp);
+    TOOLCMP(param->bEnableEarlySkip, reconfiguredParam->bEnableEarlySkip, "early-skip=%d", reconfiguredParam->bEnableEarlySkip);
+    x265_log(param, X265_LOG_INFO, "tools:%s\n", buf);
+}
+
+char *x265_param2string(x265_param* p)
+{
+    char *buf, *s;
+
+    buf = s = X265_MALLOC(char, MAXPARAMSIZE);
+    if (!buf)
+        return NULL;
+
+#define BOOL(param, cliopt) \
+    s += sprintf(s, " %s", (param) ? cliopt : "no-"cliopt);
+
+    s += sprintf(s, "%dx%d", p->sourceWidth,p->sourceHeight);
+    s += sprintf(s, " fps=%u/%u", p->fpsNum, p->fpsDenom);
+    s += sprintf(s, " bitdepth=%d", p->internalBitDepth);
+    BOOL(p->bEnableWavefront, "wpp");
+    s += sprintf(s, " ctu=%d", p->maxCUSize);
+    s += sprintf(s, " min-cu-size=%d", p->minCUSize);
+    s += sprintf(s, " max-tu-size=%d", p->maxTUSize);
+    s += sprintf(s, " tu-intra-depth=%d", p->tuQTMaxIntraDepth);
+    s += sprintf(s, " tu-inter-depth=%d", p->tuQTMaxInterDepth);
+    s += sprintf(s, " me=%d", p->searchMethod);
+    s += sprintf(s, " subme=%d", p->subpelRefine);
+    s += sprintf(s, " merange=%d", p->searchRange);
+    BOOL(p->bEnableRectInter, "rect");
+    BOOL(p->bEnableAMP, "amp");
+    s += sprintf(s, " max-merge=%d", p->maxNumMergeCand);
+    BOOL(p->bEnableTemporalMvp, "temporal-mvp");
+    BOOL(p->bEnableEarlySkip, "early-skip");
+    s += sprintf(s, " rdpenalty=%d", p->rdPenalty);
+    BOOL(p->bEnableTransformSkip, "tskip");
+    BOOL(p->bEnableTSkipFast, "tskip-fast");
+    BOOL(p->bEnableStrongIntraSmoothing, "strong-intra-smoothing");
+    BOOL(p->bLossless, "lossless");
+    BOOL(p->bCULossless, "cu-lossless");
+    BOOL(p->bEnableConstrainedIntra, "constrained-intra");
+    BOOL(p->bEnableFastIntra, "fast-intra");
+    BOOL(p->bOpenGOP, "open-gop");
+    BOOL(p->bEnableTemporalSubLayers, "temporal-layers");
+    s += sprintf(s, " interlace=%d", p->interlaceMode);
+    s += sprintf(s, " keyint=%d", p->keyframeMax);
+    s += sprintf(s, " min-keyint=%d", p->keyframeMin);
+    s += sprintf(s, " scenecut=%d", p->scenecutThreshold);
+    s += sprintf(s, " rc-lookahead=%d", p->lookaheadDepth);
+    s += sprintf(s, " lookahead-slices=%d", p->lookaheadSlices);
+    s += sprintf(s, " bframes=%d", p->bframes);
+    s += sprintf(s, " bframe-bias=%d", p->bFrameBias);
+    s += sprintf(s, " b-adapt=%d", p->bFrameAdaptive);
+    s += sprintf(s, " ref=%d", p->maxNumReferences);
+    s += sprintf(s, " limit-refs=%d", p->limitReferences);
+    BOOL(p->bEnableWeightedPred, "weightp");
+    BOOL(p->bEnableWeightedBiPred, "weightb");
+    s += sprintf(s, " aq-mode=%d", p->rc.aqMode);
+    s += sprintf(s, " qg-size=%d", p->rc.qgSize);
+    s += sprintf(s, " aq-strength=%.2f", p->rc.aqStrength);
+    s += sprintf(s, " cbqpoffs=%d", p->cbQpOffset);
+    s += sprintf(s, " crqpoffs=%d", p->crQpOffset);
+    s += sprintf(s, " rd=%d", p->rdLevel);
+    s += sprintf(s, " psy-rd=%.2f", p->psyRd);
+    s += sprintf(s, " rdoq-level=%d", p->rdoqLevel);
+    s += sprintf(s, " psy-rdoq=%.2f", p->psyRdoq);
+    BOOL(p->bEnableSignHiding, "signhide");
+    BOOL(p->bEnableLoopFilter, "deblock");
+    if (p->bEnableLoopFilter && (p->deblockingFilterBetaOffset || p->deblockingFilterTCOffset))
+        s += sprintf(s, "=%d:%d", p->deblockingFilterTCOffset, p->deblockingFilterBetaOffset);
+    BOOL(p->bEnableSAO, "sao");
+    BOOL(p->bSaoNonDeblocked, "sao-non-deblock");
+    BOOL(p->bBPyramid, "b-pyramid");
+    BOOL(p->rc.cuTree, "cutree");
+    s += sprintf(s, " rc=%s", p->rc.rateControlMode == X265_RC_ABR ? (
+         p->rc.bStatRead ? "2 pass" : p->rc.bitrate == p->rc.vbvMaxBitrate ? "cbr" : "abr")
+         : p->rc.rateControlMode == X265_RC_CRF ? "crf" : "cqp");
+    if (p->rc.rateControlMode == X265_RC_ABR || p->rc.rateControlMode == X265_RC_CRF)
+    {
+        if (p->rc.rateControlMode == X265_RC_CRF)
+            s += sprintf(s, " crf=%.1f", p->rc.rfConstant);
+        else
+            s += sprintf(s, " bitrate=%d", p->rc.bitrate);
+        s += sprintf(s, " qcomp=%.2f qpmin=%d qpmax=%d qpstep=%d",
+                     p->rc.qCompress, QP_MIN, QP_MAX_SPEC, p->rc.qpStep);
+        if (p->rc.bStatRead)
+            s += sprintf( s, " cplxblur=%.1f qblur=%.1f",
+                          p->rc.complexityBlur, p->rc.qblur);
+        if (p->rc.vbvBufferSize)
+        {
+            s += sprintf(s, " vbv-maxrate=%d vbv-bufsize=%d",
+                          p->rc.vbvMaxBitrate, p->rc.vbvBufferSize);
+            if (p->rc.rateControlMode == X265_RC_CRF)
+                s += sprintf(s, " crf-max=%.1f", p->rc.rfConstantMax);
+        }
+    }
+    else if (p->rc.rateControlMode == X265_RC_CQP)
+        s += sprintf(s, " qp=%d", p->rc.qp);
+    if (!(p->rc.rateControlMode == X265_RC_CQP && p->rc.qp == 0))
+    {
+        s += sprintf(s, " ipratio=%.2f", p->rc.ipFactor);
+        if (p->bframes)
+            s += sprintf(s, " pbratio=%.2f", p->rc.pbFactor);
+    }
+#undef BOOL
+    return buf;
+}
+
+bool parseLambdaFile(x265_param* param)
+{
+    if (!param->rc.lambdaFileName)
+        return false;
+
+    FILE *lfn = fopen(param->rc.lambdaFileName, "r");
+    if (!lfn)
+    {
+        x265_log(param, X265_LOG_ERROR, "unable to read lambda file <%s>\n", param->rc.lambdaFileName);
+        return true;
+    }
+
+    char line[2048];
+    char *toksave = NULL, *tok = NULL, *buf = NULL;
+
+    for (int t = 0; t < 3; t++)
+    {
+        double *table = t ? x265_lambda2_tab : x265_lambda_tab;
+
+        for (int i = 0; i < QP_MAX_MAX + 1; i++)
+        {
+            double value;
+
+            do
+            {
+                if (!tok)
+                {
+                    /* consume a line of text file */
+                    if (!fgets(line, sizeof(line), lfn))
+                    {
+                        fclose(lfn);
+
+                        if (t < 2)
+                        {
+                            x265_log(param, X265_LOG_ERROR, "lambda file is incomplete\n");
+                            return true;
+                        }
+                        else
+                            return false;
+                    }
+
+                    /* truncate at first hash */
+                    char *hash = strchr(line, '#');
+                    if (hash) *hash = 0;
+                    buf = line;
+                }
+
+                tok = strtok_r(buf, " ,", &toksave);
+                buf = NULL;
+                if (tok && sscanf(tok, "%lf", &value) == 1)
+                    break;
+            }
+            while (1);
+
+            if (t == 2)
+            {
+                x265_log(param, X265_LOG_ERROR, "lambda file contains too many values\n");
+                fclose(lfn);
+                return true;
+            }
+            else
+                x265_log(param, X265_LOG_DEBUG, "lambda%c[%d] = %lf\n", t ? '2' : ' ', i, value);
+            table[i] = value;
+        }
+    }
+
+    fclose(lfn);
+    return false;
+}
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/param.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,62 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_PARAM_H
+#define X265_PARAM_H
+
+namespace X265_NS {
+
+int   x265_check_params(x265_param *param);
+int   x265_set_globals(x265_param *param);
+void  x265_print_params(x265_param *param);
+void  x265_print_reconfigured_params(x265_param* param, x265_param* reconfiguredParam);
+void  x265_param_apply_fastfirstpass(x265_param *p);
+char* x265_param2string(x265_param *param);
+int   x265_atoi(const char *str, bool& bError);
+double x265_atof(const char *str, bool& bError);
+int   parseCpuName(const char *value, bool& bError);
+void  setParamAspectRatio(x265_param *p, int width, int height);
+void  getParamAspectRatio(x265_param *p, int& width, int& height);
+bool  parseLambdaFile(x265_param *param);
+
+/* this table is kept internal to avoid confusion, since log level indices start at -1 */
+static const char * const logLevelNames[] = { "none", "error", "warning", "info", "debug", "full", 0 };
+
+#if EXPORT_C_API
+#define PARAM_NS
+#else
+/* declare param functions within private namespace */
+void x265_param_free(x265_param *);
+x265_param* x265_param_alloc();
+void x265_param_default(x265_param *param);
+int x265_param_default_preset(x265_param *, const char *preset, const char *tune);
+int x265_param_apply_profile(x265_param *, const char *profile);
+int x265_param_parse(x265_param *p, const char *name, const char *value);
+#define PARAM_NS X265_NS
+#endif
+
+#define MAXPARAMSIZE 2000
+}
+
+#endif // ifndef X265_PARAM_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/piclist.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,151 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "piclist.h"
+#include "frame.h"
+
+using namespace X265_NS;
+
+void PicList::pushFront(Frame& curFrame)
+{
+    X265_CHECK(!curFrame.m_next && !curFrame.m_prev, "piclist: picture already in list\n"); // ensure frame is not in a list
+    curFrame.m_next = m_start;
+    curFrame.m_prev = NULL;
+
+    if (m_count)
+    {
+        m_start->m_prev = &curFrame;
+        m_start = &curFrame;
+    }
+    else
+    {
+        m_start = m_end = &curFrame;
+    }
+    m_count++;
+}
+
+void PicList::pushBack(Frame& curFrame)
+{
+    X265_CHECK(!curFrame.m_next && !curFrame.m_prev, "piclist: picture already in list\n"); // ensure frame is not in a list
+    curFrame.m_next = NULL;
+    curFrame.m_prev = m_end;
+
+    if (m_count)
+    {
+        m_end->m_next = &curFrame;
+        m_end = &curFrame;
+    }
+    else
+    {
+        m_start = m_end = &curFrame;
+    }
+    m_count++;
+}
+
+Frame *PicList::popFront()
+{
+    if (m_start)
+    {
+        Frame *temp = m_start;
+        m_count--;
+
+        if (m_count)
+        {
+            m_start = m_start->m_next;
+            m_start->m_prev = NULL;
+        }
+        else
+        {
+            m_start = m_end = NULL;
+        }
+        temp->m_next = temp->m_prev = NULL;
+        return temp;
+    }
+    else
+        return NULL;
+}
+
+Frame* PicList::getPOC(int poc)
+{
+    Frame *curFrame = m_start;
+    while (curFrame && curFrame->m_poc != poc)
+        curFrame = curFrame->m_next;
+    return curFrame;
+}
+
+Frame *PicList::popBack()
+{
+    if (m_end)
+    {
+        Frame* temp = m_end;
+        m_count--;
+
+        if (m_count)
+        {
+            m_end = m_end->m_prev;
+            m_end->m_next = NULL;
+        }
+        else
+        {
+            m_start = m_end = NULL;
+        }
+        temp->m_next = temp->m_prev = NULL;
+        return temp;
+    }
+    else
+        return NULL;
+}
+
+void PicList::remove(Frame& curFrame)
+{
+#if _DEBUG
+    Frame *tmp = m_start;
+    while (tmp && tmp != &curFrame)
+    {
+        tmp = tmp->m_next;
+    }
+
+    X265_CHECK(tmp == &curFrame, "piclist: pic being removed was not in list\n"); // verify pic is in this list
+#endif
+
+    m_count--;
+    if (m_count)
+    {
+        if (m_start == &curFrame)
+            m_start = curFrame.m_next;
+        if (m_end == &curFrame)
+            m_end = curFrame.m_prev;
+
+        if (curFrame.m_next)
+            curFrame.m_next->m_prev = curFrame.m_prev;
+        if (curFrame.m_prev)
+            curFrame.m_prev->m_next = curFrame.m_next;
+    }
+    else
+    {
+        m_start = m_end = NULL;
+    }
+
+    curFrame.m_next = curFrame.m_prev = NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/piclist.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,80 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_PICLIST_H
+#define X265_PICLIST_H
+
+#include "common.h"
+
+namespace X265_NS {
+
+class Frame;
+
+class PicList
+{
+protected:
+
+    Frame*   m_start;
+    Frame*   m_end;
+    int      m_count;
+
+public:
+
+    PicList()
+    {
+        m_start = NULL;
+        m_end   = NULL;
+        m_count = 0;
+    }
+
+    /** Push picture to end of the list */
+    void pushBack(Frame& pic);
+
+    /** Push picture to beginning of the list */
+    void pushFront(Frame& pic);
+
+    /** Pop picture from end of the list */
+    Frame* popBack();
+
+    /** Pop picture from beginning of the list */
+    Frame* popFront();
+
+    /** Find frame with specified POC */
+    Frame* getPOC(int poc);
+
+    /** Remove picture from list */
+    void remove(Frame& pic);
+
+    Frame* first()        { return m_start;   }
+
+    Frame* last()         { return m_end;     }
+
+    int size()            { return m_count;   }
+
+    bool empty() const    { return !m_count;  }
+
+    operator bool() const { return !!m_count; }
+};
+}
+
+#endif // ifndef X265_PICLIST_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/picyuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,427 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "picyuv.h"
+#include "slice.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+PicYuv::PicYuv()
+{
+    m_picBuf[0] = NULL;
+    m_picBuf[1] = NULL;
+    m_picBuf[2] = NULL;
+
+    m_picOrg[0] = NULL;
+    m_picOrg[1] = NULL;
+    m_picOrg[2] = NULL;
+
+    m_cuOffsetY = NULL;
+    m_cuOffsetC = NULL;
+    m_buOffsetY = NULL;
+    m_buOffsetC = NULL;
+
+    m_maxLumaLevel = 0;
+    m_avgLumaLevel = 0;
+}
+
+bool PicYuv::create(uint32_t picWidth, uint32_t picHeight, uint32_t picCsp)
+{
+    m_picWidth  = picWidth;
+    m_picHeight = picHeight;
+    m_hChromaShift = CHROMA_H_SHIFT(picCsp);
+    m_vChromaShift = CHROMA_V_SHIFT(picCsp);
+    m_picCsp = picCsp;
+
+    uint32_t numCuInWidth = (m_picWidth + g_maxCUSize - 1)  / g_maxCUSize;
+    uint32_t numCuInHeight = (m_picHeight + g_maxCUSize - 1) / g_maxCUSize;
+
+    m_lumaMarginX = g_maxCUSize + 32; // search margin and 8-tap filter half-length, padded for 32-byte alignment
+    m_lumaMarginY = g_maxCUSize + 16; // margin for 8-tap filter and infinite padding
+    m_stride = (numCuInWidth * g_maxCUSize) + (m_lumaMarginX << 1);
+
+    m_chromaMarginX = m_lumaMarginX;  // keep 16-byte alignment for chroma CTUs
+    m_chromaMarginY = m_lumaMarginY >> m_vChromaShift;
+
+    m_strideC = ((numCuInWidth * g_maxCUSize) >> m_hChromaShift) + (m_chromaMarginX * 2);
+    int maxHeight = numCuInHeight * g_maxCUSize;
+
+    CHECKED_MALLOC(m_picBuf[0], pixel, m_stride * (maxHeight + (m_lumaMarginY * 2)));
+    m_picOrg[0] = m_picBuf[0] + m_lumaMarginY   * m_stride  + m_lumaMarginX;
+
+    if (m_picCsp != X265_CSP_I400) {
+        CHECKED_MALLOC(m_picBuf[1], pixel, m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2)));
+        CHECKED_MALLOC(m_picBuf[2], pixel, m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2)));
+        
+        m_picOrg[1] = m_picBuf[1] + m_chromaMarginY * m_strideC + m_chromaMarginX;
+        m_picOrg[2] = m_picBuf[2] + m_chromaMarginY * m_strideC + m_chromaMarginX;
+    }
+
+    return true;
+
+fail:
+    return false;
+}
+
+/* the first picture allocated by the encoder will be asked to generate these
+ * offset arrays. Once generated, they will be provided to all future PicYuv
+ * allocated by the same encoder. */
+bool PicYuv::createOffsets(const SPS& sps)
+{
+    uint32_t numPartitions = 1 << (g_unitSizeDepth * 2);
+    CHECKED_MALLOC(m_cuOffsetY, intptr_t, sps.numCuInWidth * sps.numCuInHeight);
+    if (m_picCsp != X265_CSP_I400) {
+        CHECKED_MALLOC(m_cuOffsetC, intptr_t, sps.numCuInWidth * sps.numCuInHeight);
+    }
+    for (uint32_t cuRow = 0; cuRow < sps.numCuInHeight; cuRow++)
+    {
+        for (uint32_t cuCol = 0; cuCol < sps.numCuInWidth; cuCol++)
+        {
+            m_cuOffsetY[cuRow * sps.numCuInWidth + cuCol] = m_stride * cuRow * g_maxCUSize + cuCol * g_maxCUSize;
+            if (m_picCsp != X265_CSP_I400) {
+                m_cuOffsetC[cuRow * sps.numCuInWidth + cuCol] = m_strideC * cuRow * (g_maxCUSize >> m_vChromaShift) + cuCol * (g_maxCUSize >> m_hChromaShift);
+            }
+        }
+    }
+
+    CHECKED_MALLOC(m_buOffsetY, intptr_t, (size_t)numPartitions);
+    if (m_picCsp != X265_CSP_I400) {
+        CHECKED_MALLOC(m_buOffsetC, intptr_t, (size_t)numPartitions);
+    }
+    for (uint32_t idx = 0; idx < numPartitions; ++idx)
+    {
+        intptr_t x = g_zscanToPelX[idx];
+        intptr_t y = g_zscanToPelY[idx];
+        m_buOffsetY[idx] = m_stride * y + x;
+        if (m_picCsp != X265_CSP_I400) {
+            m_buOffsetC[idx] = m_strideC * (y >> m_vChromaShift) + (x >> m_hChromaShift);
+        }
+    }
+
+    return true;
+
+fail:
+    return false;
+}
+
+void PicYuv::destroy()
+{
+    X265_FREE(m_picBuf[0]);
+    X265_FREE(m_picBuf[1]);
+    X265_FREE(m_picBuf[2]);
+}
+
+/* Copy pixels from an x265_picture into internal PicYuv instance.
+ * Shift pixels as necessary, mask off bits above X265_DEPTH for safety. */
+void PicYuv::copyFromPicture(const x265_picture& pic, const x265_param& param, int padx, int pady)
+{
+    /* m_picWidth is the width that is being encoded, padx indicates how many
+     * of those pixels are padding to reach multiple of MinCU(4) size.
+     *
+     * Internally, we need to extend rows out to a multiple of 16 for lowres
+     * downscale and other operations. But those padding pixels are never
+     * encoded.
+     *
+     * The same applies to m_picHeight and pady */
+
+    /* width and height - without padsize (input picture raw width and height) */
+    int width = m_picWidth - padx;
+    int height = m_picHeight - pady;
+
+    /* internal pad to multiple of 16x16 blocks */
+    uint8_t rem = width & 15;
+
+    padx = rem ? 16 - rem : padx;
+    rem = height & 15;
+    pady = rem ? 16 - rem : pady;
+
+    /* add one more row and col of pad for downscale interpolation, fixes
+     * warnings from valgrind about using uninitialized pixels */
+    padx++;
+    pady++;
+
+    X265_CHECK(pic.bitDepth >= 8, "pic.bitDepth check failure");
+
+    if (pic.bitDepth == 8)
+    {
+#if (X265_DEPTH > 8)
+        {
+            pixel *yPixel = m_picOrg[0];
+            pixel *uPixel = m_picOrg[1];
+            pixel *vPixel = m_picOrg[2];
+
+            uint8_t *yChar = (uint8_t*)pic.planes[0];
+            uint8_t *uChar = (uint8_t*)pic.planes[1];
+            uint8_t *vChar = (uint8_t*)pic.planes[2];
+            int shift = (X265_DEPTH - 8);
+
+            primitives.planecopy_cp(yChar, pic.stride[0] / sizeof(*yChar), yPixel, m_stride, width, height, shift);
+            if (m_picCsp != X265_CSP_I400) {
+                primitives.planecopy_cp(uChar, pic.stride[1] / sizeof(*uChar), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift);
+                primitives.planecopy_cp(vChar, pic.stride[2] / sizeof(*vChar), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift);
+            }
+        }
+#else /* Case for (X265_DEPTH == 8) */
+        // TODO: Does we need this path? may merge into above in future
+        {
+            pixel *yPixel = m_picOrg[0];
+            pixel *uPixel = m_picOrg[1];
+            pixel *vPixel = m_picOrg[2];
+
+            uint8_t *yChar = (uint8_t*)pic.planes[0];
+            uint8_t *uChar = (uint8_t*)pic.planes[1];
+            uint8_t *vChar = (uint8_t*)pic.planes[2];
+
+            for (int r = 0; r < height; r++)
+            {
+                memcpy(yPixel, yChar, width * sizeof(pixel));
+
+                yPixel += m_stride;
+                yChar += pic.stride[0] / sizeof(*yChar);
+            }
+
+            if (m_picCsp != X265_CSP_I400) {
+                for (int r = 0; r < height >> m_vChromaShift; r++)
+                    {
+                        memcpy(uPixel, uChar, (width >> m_hChromaShift) * sizeof(pixel));
+                        memcpy(vPixel, vChar, (width >> m_hChromaShift) * sizeof(pixel));
+                        
+                        uPixel += m_strideC;
+                        vPixel += m_strideC;
+                        uChar += pic.stride[1] / sizeof(*uChar);
+                        vChar += pic.stride[2] / sizeof(*vChar);
+                    }
+            }
+        }
+#endif /* (X265_DEPTH > 8) */
+    }
+    else /* pic.bitDepth > 8 */
+    {
+        /* defensive programming, mask off bits that are supposed to be zero */
+        uint16_t mask = (1 << X265_DEPTH) - 1;
+        int shift = abs(pic.bitDepth - X265_DEPTH);
+        pixel *yPixel = m_picOrg[0];
+        pixel *uPixel = m_picOrg[1];
+        pixel *vPixel = m_picOrg[2];
+
+        uint16_t *yShort = (uint16_t*)pic.planes[0];
+        uint16_t *uShort = (uint16_t*)pic.planes[1];
+        uint16_t *vShort = (uint16_t*)pic.planes[2];
+
+        if (pic.bitDepth > X265_DEPTH)
+        {
+            /* shift right and mask pixels to final size */
+            primitives.planecopy_sp(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask);
+            if (m_picCsp != X265_CSP_I400) {
+                primitives.planecopy_sp(uShort, pic.stride[1] / sizeof(*uShort), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask);
+                primitives.planecopy_sp(vShort, pic.stride[2] / sizeof(*vShort), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask);
+            }
+        }
+        else /* Case for (pic.bitDepth <= X265_DEPTH) */
+        {
+            /* shift left and mask pixels to final size */
+            primitives.planecopy_sp_shl(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask);
+            if (m_picCsp != X265_CSP_I400) {
+                primitives.planecopy_sp_shl(uShort, pic.stride[1] / sizeof(*uShort), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask);
+                primitives.planecopy_sp_shl(vShort, pic.stride[2] / sizeof(*vShort), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask);
+            }
+        }
+    }
+
+    /* extend the right edge if width was not multiple of the minimum CU size */
+
+    pixel *Y = m_picOrg[0];
+    pixel *U = m_picOrg[1];
+    pixel *V = m_picOrg[2];
+
+    uint64_t sumLuma;
+    m_maxLumaLevel = primitives.planeClipAndMax(Y, m_stride, width, height, &sumLuma, (pixel)param.minLuma, (pixel)param.maxLuma);
+    m_avgLumaLevel = (double)(sumLuma) / (m_picHeight * m_picWidth);
+
+    for (int r = 0; r < height; r++)
+    {
+        for (int x = 0; x < padx; x++)
+            Y[width + x] = Y[width - 1];
+
+        Y += m_stride;
+    }
+
+    if (m_picCsp != X265_CSP_I400) {
+        for (int r = 0; r < height >> m_vChromaShift; r++)
+            {
+                for (int x = 0; x < padx >> m_hChromaShift; x++)
+                    {
+                        U[(width >> m_hChromaShift) + x] = U[(width >> m_hChromaShift) - 1];
+                        V[(width >> m_hChromaShift) + x] = V[(width >> m_hChromaShift) - 1];
+                    }
+                
+                U += m_strideC;
+                V += m_strideC;
+            }
+    }
+
+    /* extend the bottom if height was not multiple of the minimum CU size */
+    Y = m_picOrg[0] + (height - 1) * m_stride;
+
+    for (int i = 1; i <= pady; i++)
+        memcpy(Y + i * m_stride, Y, (width + padx) * sizeof(pixel));
+
+    if (m_picCsp != X265_CSP_I400) {
+        U = m_picOrg[1] + ((height >> m_vChromaShift) - 1) * m_strideC;
+        V = m_picOrg[2] + ((height >> m_vChromaShift) - 1) * m_strideC;
+        for (int j = 1; j <= pady >> m_vChromaShift; j++)
+            {
+                memcpy(U + j * m_strideC, U, ((width + padx) >> m_hChromaShift) * sizeof(pixel));
+                memcpy(V + j * m_strideC, V, ((width + padx) >> m_hChromaShift) * sizeof(pixel));
+            }
+    }
+}
+
+namespace X265_NS {
+
+template<uint32_t OUTPUT_BITDEPTH_DIV8>
+static void md5_block(MD5Context& md5, const pixel* plane, uint32_t n)
+{
+    /* create a 64 byte buffer for packing pixel's into */
+    uint8_t buf[64 / OUTPUT_BITDEPTH_DIV8][OUTPUT_BITDEPTH_DIV8];
+
+    for (uint32_t i = 0; i < n; i++)
+    {
+        pixel pel = plane[i];
+        /* perform bitdepth and endian conversion */
+        for (uint32_t d = 0; d < OUTPUT_BITDEPTH_DIV8; d++)
+            buf[i][d] = (uint8_t)(pel >> (d * 8));
+    }
+
+    MD5Update(&md5, (uint8_t*)buf, n * OUTPUT_BITDEPTH_DIV8);
+}
+
+/* Update md5 with all samples in plane in raster order, each sample
+ * is adjusted to OUTBIT_BITDEPTH_DIV8 */
+template<uint32_t OUTPUT_BITDEPTH_DIV8>
+static void md5_plane(MD5Context& md5, const pixel* plane, uint32_t width, uint32_t height, intptr_t stride)
+{
+    /* N is the number of samples to process per md5 update.
+     * All N samples must fit in buf */
+    uint32_t N = 32;
+    uint32_t width_modN = width % N;
+    uint32_t width_less_modN = width - width_modN;
+
+    for (uint32_t y = 0; y < height; y++)
+    {
+        /* convert pel's into uint32_t chars in little endian byte order.
+         * NB, for 8bit data, data is truncated to 8bits. */
+        for (uint32_t x = 0; x < width_less_modN; x += N)
+            md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y * stride + x], N);
+
+        /* mop up any of the remaining line */
+        md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y * stride + width_less_modN], width_modN);
+    }
+}
+
+void updateCRC(const pixel* plane, uint32_t& crcVal, uint32_t height, uint32_t width, intptr_t stride)
+{
+    uint32_t crcMsb;
+    uint32_t bitVal;
+    uint32_t bitIdx;
+
+    for (uint32_t y = 0; y < height; y++)
+    {
+        for (uint32_t x = 0; x < width; x++)
+        {
+            // take CRC of first pictureData byte
+            for (bitIdx = 0; bitIdx < 8; bitIdx++)
+            {
+                crcMsb = (crcVal >> 15) & 1;
+                bitVal = (plane[y * stride + x] >> (7 - bitIdx)) & 1;
+                crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021);
+            }
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+            // take CRC of second pictureData byte if bit depth is greater than 8-bits
+            if (X265_DEPTH > 8)
+            {
+                for (bitIdx = 0; bitIdx < 8; bitIdx++)
+                {
+                    crcMsb = (crcVal >> 15) & 1;
+                    bitVal = (plane[y * stride + x] >> (15 - bitIdx)) & 1;
+                    crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021);
+                }
+            }
+        }
+    }
+}
+
+void crcFinish(uint32_t& crcVal, uint8_t digest[16])
+{
+    uint32_t crcMsb;
+
+    for (int bitIdx = 0; bitIdx < 16; bitIdx++)
+    {
+        crcMsb = (crcVal >> 15) & 1;
+        crcVal = ((crcVal << 1) & 0xffff) ^ (crcMsb * 0x1021);
+    }
+
+    digest[0] = (crcVal >> 8)  & 0xff;
+    digest[1] =  crcVal        & 0xff;
+}
+
+void updateChecksum(const pixel* plane, uint32_t& checksumVal, uint32_t height, uint32_t width, intptr_t stride, int row, uint32_t cuHeight)
+{
+    uint8_t xor_mask;
+
+    for (uint32_t y = row * cuHeight; y < ((row * cuHeight) + height); y++)
+    {
+        for (uint32_t x = 0; x < width; x++)
+        {
+            xor_mask = (uint8_t)((x & 0xff) ^ (y & 0xff) ^ (x >> 8) ^ (y >> 8));
+            checksumVal = (checksumVal + ((plane[y * stride + x] & 0xff) ^ xor_mask)) & 0xffffffff;
+
+            if (X265_DEPTH > 8)
+                checksumVal = (checksumVal + ((plane[y * stride + x] >> 7 >> 1) ^ xor_mask)) & 0xffffffff;
+        }
+    }
+}
+
+void checksumFinish(uint32_t checksum, uint8_t digest[16])
+{
+    digest[0] = (checksum >> 24) & 0xff;
+    digest[1] = (checksum >> 16) & 0xff;
+    digest[2] = (checksum >> 8)  & 0xff;
+    digest[3] =  checksum        & 0xff;
+}
+
+void updateMD5Plane(MD5Context& md5, const pixel* plane, uint32_t width, uint32_t height, intptr_t stride)
+{
+    /* choose an md5_plane packing function based on the system bitdepth */
+    typedef void(*MD5PlaneFunc)(MD5Context&, const pixel*, uint32_t, uint32_t, intptr_t);
+    MD5PlaneFunc md5_plane_func;
+    md5_plane_func = X265_DEPTH <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>;
+
+    md5_plane_func(md5, plane, width, height, stride);
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/picyuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,106 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_PICYUV_H
+#define X265_PICYUV_H
+
+#include "common.h"
+#include "md5.h"
+#include "x265.h"
+
+namespace X265_NS {
+// private namespace
+
+class ShortYuv;
+struct SPS;
+
+class PicYuv
+{
+public:
+
+    pixel*   m_picBuf[3];  // full allocated buffers, including margins
+    pixel*   m_picOrg[3];  // pointers to plane starts
+
+    uint32_t m_picWidth;
+    uint32_t m_picHeight;
+    intptr_t m_stride;
+    intptr_t m_strideC;
+
+    uint32_t m_picCsp;
+    uint32_t m_hChromaShift;
+    uint32_t m_vChromaShift;
+
+    intptr_t* m_cuOffsetY;  /* these four buffers are owned by the top-level encoder */
+    intptr_t* m_cuOffsetC;
+    intptr_t* m_buOffsetY;
+    intptr_t* m_buOffsetC;
+
+    uint32_t m_lumaMarginX;
+    uint32_t m_lumaMarginY;
+    uint32_t m_chromaMarginX;
+    uint32_t m_chromaMarginY;
+
+    uint16_t m_maxLumaLevel;
+    double   m_avgLumaLevel;
+
+    PicYuv();
+
+    bool  create(uint32_t picWidth, uint32_t picHeight, uint32_t csp);
+    bool  createOffsets(const SPS& sps);
+    void  destroy();
+
+    void  copyFromPicture(const x265_picture&, const x265_param& param, int padx, int pady);
+
+    intptr_t getChromaAddrOffset(uint32_t ctuAddr, uint32_t absPartIdx) const { return m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+
+    /* get pointer to CTU start address */
+    pixel*  getLumaAddr(uint32_t ctuAddr)                      { return m_picOrg[0] + m_cuOffsetY[ctuAddr]; }
+    pixel*  getCbAddr(uint32_t ctuAddr)                        { return m_picOrg[1] + m_cuOffsetC[ctuAddr]; }
+    pixel*  getCrAddr(uint32_t ctuAddr)                        { return m_picOrg[2] + m_cuOffsetC[ctuAddr]; }
+    pixel*  getChromaAddr(uint32_t chromaId, uint32_t ctuAddr) { return m_picOrg[chromaId] + m_cuOffsetC[ctuAddr]; }
+    pixel*  getPlaneAddr(uint32_t plane, uint32_t ctuAddr)     { return m_picOrg[plane] + (plane ? m_cuOffsetC[ctuAddr] : m_cuOffsetY[ctuAddr]); }
+    const pixel* getLumaAddr(uint32_t ctuAddr) const           { return m_picOrg[0] + m_cuOffsetY[ctuAddr]; }
+    const pixel* getCbAddr(uint32_t ctuAddr) const             { return m_picOrg[1] + m_cuOffsetC[ctuAddr]; }
+    const pixel* getCrAddr(uint32_t ctuAddr) const             { return m_picOrg[2] + m_cuOffsetC[ctuAddr]; }
+    const pixel* getChromaAddr(uint32_t chromaId, uint32_t ctuAddr) const { return m_picOrg[chromaId] + m_cuOffsetC[ctuAddr]; }
+    const pixel* getPlaneAddr(uint32_t plane, uint32_t ctuAddr) const     { return m_picOrg[plane] + (plane ? m_cuOffsetC[ctuAddr] : m_cuOffsetY[ctuAddr]); }
+
+    /* get pointer to CU start address */
+    pixel*  getLumaAddr(uint32_t ctuAddr, uint32_t absPartIdx) { return m_picOrg[0] + m_cuOffsetY[ctuAddr] + m_buOffsetY[absPartIdx]; }
+    pixel*  getCbAddr(uint32_t ctuAddr, uint32_t absPartIdx)   { return m_picOrg[1] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+    pixel*  getCrAddr(uint32_t ctuAddr, uint32_t absPartIdx)   { return m_picOrg[2] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+    pixel*  getChromaAddr(uint32_t chromaId, uint32_t ctuAddr, uint32_t absPartIdx) { return m_picOrg[chromaId] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+    const pixel* getLumaAddr(uint32_t ctuAddr, uint32_t absPartIdx) const { return m_picOrg[0] + m_cuOffsetY[ctuAddr] + m_buOffsetY[absPartIdx]; }
+    const pixel* getCbAddr(uint32_t ctuAddr, uint32_t absPartIdx) const   { return m_picOrg[1] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+    const pixel* getCrAddr(uint32_t ctuAddr, uint32_t absPartIdx) const   { return m_picOrg[2] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+    const pixel* getChromaAddr(uint32_t chromaId, uint32_t ctuAddr, uint32_t absPartIdx) const { return m_picOrg[chromaId] + m_cuOffsetC[ctuAddr] + m_buOffsetC[absPartIdx]; }
+};
+
+void updateChecksum(const pixel* plane, uint32_t& checksumVal, uint32_t height, uint32_t width, intptr_t stride, int row, uint32_t cuHeight);
+void updateCRC(const pixel* plane, uint32_t& crcVal, uint32_t height, uint32_t width, intptr_t stride);
+void crcFinish(uint32_t & crc, uint8_t digest[16]);
+void checksumFinish(uint32_t checksum, uint8_t digest[16]);
+void updateMD5Plane(MD5Context& md5, const pixel* plane, uint32_t width, uint32_t height, intptr_t stride);
+}
+
+#endif // ifndef X265_PICYUV_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/pixel.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1289 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "x265.h"
+
+#include <cstdlib> // abs()
+
+using namespace X265_NS;
+
+namespace {
+// place functions in anonymous namespace (file static)
+
+template<int lx, int ly>
+int sad(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    int sum = 0;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            sum += abs(pix1[x] - pix2[x]);
+
+        pix1 += stride_pix1;
+        pix2 += stride_pix2;
+    }
+
+    return sum;
+}
+
+template<int lx, int ly>
+int sad(const int16_t* pix1, intptr_t stride_pix1, const int16_t* pix2, intptr_t stride_pix2)
+{
+    int sum = 0;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            sum += abs(pix1[x] - pix2[x]);
+
+        pix1 += stride_pix1;
+        pix2 += stride_pix2;
+    }
+
+    return sum;
+}
+
+template<int lx, int ly>
+void sad_x3(const pixel* pix1, const pixel* pix2, const pixel* pix3, const pixel* pix4, intptr_t frefstride, int32_t* res)
+{
+    res[0] = 0;
+    res[1] = 0;
+    res[2] = 0;
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            res[0] += abs(pix1[x] - pix2[x]);
+            res[1] += abs(pix1[x] - pix3[x]);
+            res[2] += abs(pix1[x] - pix4[x]);
+        }
+
+        pix1 += FENC_STRIDE;
+        pix2 += frefstride;
+        pix3 += frefstride;
+        pix4 += frefstride;
+    }
+}
+
+template<int lx, int ly>
+void sad_x4(const pixel* pix1, const pixel* pix2, const pixel* pix3, const pixel* pix4, const pixel* pix5, intptr_t frefstride, int32_t* res)
+{
+    res[0] = 0;
+    res[1] = 0;
+    res[2] = 0;
+    res[3] = 0;
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            res[0] += abs(pix1[x] - pix2[x]);
+            res[1] += abs(pix1[x] - pix3[x]);
+            res[2] += abs(pix1[x] - pix4[x]);
+            res[3] += abs(pix1[x] - pix5[x]);
+        }
+
+        pix1 += FENC_STRIDE;
+        pix2 += frefstride;
+        pix3 += frefstride;
+        pix4 += frefstride;
+        pix5 += frefstride;
+    }
+}
+
+template<int lx, int ly, class T1, class T2>
+sse_ret_t sse(const T1* pix1, intptr_t stride_pix1, const T2* pix2, intptr_t stride_pix2)
+{
+    sse_ret_t sum = 0;
+    int tmp;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            tmp = pix1[x] - pix2[x];
+            sum += (tmp * tmp);
+        }
+
+        pix1 += stride_pix1;
+        pix2 += stride_pix2;
+    }
+
+    return sum;
+}
+
+#define BITS_PER_SUM (8 * sizeof(sum_t))
+
+#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) { \
+        sum2_t t0 = s0 + s1; \
+        sum2_t t1 = s0 - s1; \
+        sum2_t t2 = s2 + s3; \
+        sum2_t t3 = s2 - s3; \
+        d0 = t0 + t2; \
+        d2 = t0 - t2; \
+        d1 = t1 + t3; \
+        d3 = t1 - t3; \
+}
+
+// in: a pseudo-simd number of the form x+(y<<16)
+// return: abs(x)+(abs(y)<<16)
+inline sum2_t abs2(sum2_t a)
+{
+    sum2_t s = ((a >> (BITS_PER_SUM - 1)) & (((sum2_t)1 << BITS_PER_SUM) + 1)) * ((sum_t)-1);
+
+    return (a + s) ^ s;
+}
+
+static int satd_4x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    sum2_t tmp[4][2];
+    sum2_t a0, a1, a2, a3, b0, b1;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 4; i++, pix1 += stride_pix1, pix2 += stride_pix2)
+    {
+        a0 = pix1[0] - pix2[0];
+        a1 = pix1[1] - pix2[1];
+        b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);
+        a2 = pix1[2] - pix2[2];
+        a3 = pix1[3] - pix2[3];
+        b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);
+        tmp[i][0] = b0 + b1;
+        tmp[i][1] = b0 - b1;
+    }
+
+    for (int i = 0; i < 2; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+        sum += ((sum_t)a0) + (a0 >> BITS_PER_SUM);
+    }
+
+    return (int)(sum >> 1);
+}
+
+static int satd_4x4(const int16_t* pix1, intptr_t stride_pix1)
+{
+    int32_t tmp[4][4];
+    int32_t s01, s23, d01, d23;
+    int32_t satd = 0;
+    int d;
+
+    for (d = 0; d < 4; d++, pix1 += stride_pix1)
+    {
+        s01 = pix1[0] + pix1[1];
+        s23 = pix1[2] + pix1[3];
+        d01 = pix1[0] - pix1[1];
+        d23 = pix1[2] - pix1[3];
+
+        tmp[d][0] = s01 + s23;
+        tmp[d][1] = s01 - s23;
+        tmp[d][2] = d01 - d23;
+        tmp[d][3] = d01 + d23;
+    }
+
+    for (d = 0; d < 4; d++)
+    {
+        s01 = tmp[0][d] + tmp[1][d];
+        s23 = tmp[2][d] + tmp[3][d];
+        d01 = tmp[0][d] - tmp[1][d];
+        d23 = tmp[2][d] - tmp[3][d];
+        satd += abs(s01 + s23) + abs(s01 - s23) + abs(d01 - d23) + abs(d01 + d23);
+    }
+    return (int)(satd / 2);
+}
+
+// x264's SWAR version of satd 8x4, performs two 4x4 SATDs at once
+static int satd_8x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    sum2_t tmp[4][4];
+    sum2_t a0, a1, a2, a3;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 4; i++, pix1 += stride_pix1, pix2 += stride_pix2)
+    {
+        a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
+        a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
+        a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
+        a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
+        HADAMARD4(tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0, a1, a2, a3);
+    }
+
+    for (int i = 0; i < 4; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+    }
+
+    return (((sum_t)sum) + (sum >> BITS_PER_SUM)) >> 1;
+}
+
+template<int w, int h>
+// calculate satd in blocks of 4x4
+int satd4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    int satd = 0;
+
+    for (int row = 0; row < h; row += 4)
+        for (int col = 0; col < w; col += 4)
+            satd += satd_4x4(pix1 + row * stride_pix1 + col, stride_pix1,
+                             pix2 + row * stride_pix2 + col, stride_pix2);
+
+    return satd;
+}
+
+template<int w, int h>
+// calculate satd in blocks of 8x4
+int satd8(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    int satd = 0;
+
+    for (int row = 0; row < h; row += 4)
+        for (int col = 0; col < w; col += 8)
+            satd += satd_8x4(pix1 + row * stride_pix1 + col, stride_pix1,
+                             pix2 + row * stride_pix2 + col, stride_pix2);
+
+    return satd;
+}
+
+inline int _sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    sum2_t tmp[8][4];
+    sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2)
+    {
+        a0 = pix1[0] - pix2[0];
+        a1 = pix1[1] - pix2[1];
+        b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);
+        a2 = pix1[2] - pix2[2];
+        a3 = pix1[3] - pix2[3];
+        b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);
+        a4 = pix1[4] - pix2[4];
+        a5 = pix1[5] - pix2[5];
+        b2 = (a4 + a5) + ((a4 - a5) << BITS_PER_SUM);
+        a6 = pix1[6] - pix2[6];
+        a7 = pix1[7] - pix2[7];
+        b3 = (a6 + a7) + ((a6 - a7) << BITS_PER_SUM);
+        HADAMARD4(tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0, b1, b2, b3);
+    }
+
+    for (int i = 0; i < 4; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        HADAMARD4(a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i]);
+        b0  = abs2(a0 + a4) + abs2(a0 - a4);
+        b0 += abs2(a1 + a5) + abs2(a1 - a5);
+        b0 += abs2(a2 + a6) + abs2(a2 - a6);
+        b0 += abs2(a3 + a7) + abs2(a3 - a7);
+        sum += (sum_t)b0 + (b0 >> BITS_PER_SUM);
+    }
+
+    return (int)sum;
+}
+
+inline int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    return (int)((_sa8d_8x8(pix1, i_pix1, pix2, i_pix2) + 2) >> 2);
+}
+
+inline int _sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+{
+    int32_t tmp[8][8];
+    int32_t a0, a1, a2, a3, a4, a5, a6, a7;
+    int32_t sum = 0;
+
+    for (int i = 0; i < 8; i++, pix1 += i_pix1)
+    {
+        a0 = pix1[0] + pix1[1];
+        a1 = pix1[2] + pix1[3];
+        a2 = pix1[4] + pix1[5];
+        a3 = pix1[6] + pix1[7];
+        a4 = pix1[0] - pix1[1];
+        a5 = pix1[2] - pix1[3];
+        a6 = pix1[4] - pix1[5];
+        a7 = pix1[6] - pix1[7];
+        tmp[i][0] = (a0 + a1) + (a2 + a3);
+        tmp[i][1] = (a0 + a1) - (a2 + a3);
+        tmp[i][2] = (a0 - a1) + (a2 - a3);
+        tmp[i][3] = (a0 - a1) - (a2 - a3);
+        tmp[i][4] = (a4 + a5) + (a6 + a7);
+        tmp[i][5] = (a4 + a5) - (a6 + a7);
+        tmp[i][6] = (a4 - a5) + (a6 - a7);
+        tmp[i][7] = (a4 - a5) - (a6 - a7);
+    }
+
+    for (int i = 0; i < 8; i++)
+    {
+        a0 = (tmp[0][i] + tmp[1][i]) + (tmp[2][i] + tmp[3][i]);
+        a2 = (tmp[0][i] + tmp[1][i]) - (tmp[2][i] + tmp[3][i]);
+        a1 = (tmp[0][i] - tmp[1][i]) + (tmp[2][i] - tmp[3][i]);
+        a3 = (tmp[0][i] - tmp[1][i]) - (tmp[2][i] - tmp[3][i]);
+        a4 = (tmp[4][i] + tmp[5][i]) + (tmp[6][i] + tmp[7][i]);
+        a6 = (tmp[4][i] + tmp[5][i]) - (tmp[6][i] + tmp[7][i]);
+        a5 = (tmp[4][i] - tmp[5][i]) + (tmp[6][i] - tmp[7][i]);
+        a7 = (tmp[4][i] - tmp[5][i]) - (tmp[6][i] - tmp[7][i]);
+        a0 = abs(a0 + a4) + abs(a0 - a4);
+        a0 += abs(a1 + a5) + abs(a1 - a5);
+        a0 += abs(a2 + a6) + abs(a2 - a6);
+        a0 += abs(a3 + a7) + abs(a3 - a7);
+        sum += a0;
+    }
+
+    return (int)sum;
+}
+
+static int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+{
+    return (int)((_sa8d_8x8(pix1, i_pix1) + 2) >> 2);
+}
+
+static int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int sum = _sa8d_8x8(pix1, i_pix1, pix2, i_pix2)
+        + _sa8d_8x8(pix1 + 8, i_pix1, pix2 + 8, i_pix2)
+        + _sa8d_8x8(pix1 + 8 * i_pix1, i_pix1, pix2 + 8 * i_pix2, i_pix2)
+        + _sa8d_8x8(pix1 + 8 + 8 * i_pix1, i_pix1, pix2 + 8 + 8 * i_pix2, i_pix2);
+
+    // This matches x264 sa8d_16x16, but is slightly different from HM's behavior because
+    // this version only rounds once at the end
+    return (sum + 2) >> 2;
+}
+
+template<int w, int h>
+// Calculate sa8d in blocks of 8x8
+int sa8d8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int cost = 0;
+
+    for (int y = 0; y < h; y += 8)
+        for (int x = 0; x < w; x += 8)
+            cost += sa8d_8x8(pix1 + i_pix1 * y + x, i_pix1, pix2 + i_pix2 * y + x, i_pix2);
+
+    return cost;
+}
+
+template<int w, int h>
+// Calculate sa8d in blocks of 16x16
+int sa8d16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int cost = 0;
+
+    for (int y = 0; y < h; y += 16)
+        for (int x = 0; x < w; x += 16)
+            cost += sa8d_16x16(pix1 + i_pix1 * y + x, i_pix1, pix2 + i_pix2 * y + x, i_pix2);
+
+    return cost;
+}
+
+template<int size>
+int pixel_ssd_s_c(const int16_t* a, intptr_t dstride)
+{
+    int sum = 0;
+    for (int y = 0; y < size; y++)
+    {
+        for (int x = 0; x < size; x++)
+            sum += a[x] * a[x];
+
+        a += dstride;
+    }
+    return sum;
+}
+
+template<int size>
+void blockfill_s_c(int16_t* dst, intptr_t dstride, int16_t val)
+{
+    for (int y = 0; y < size; y++)
+        for (int x = 0; x < size; x++)
+            dst[y * dstride + x] = val;
+}
+
+template<int size>
+void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift)
+{
+    X265_CHECK(((intptr_t)dst & 15) == 0, "dst alignment error\n");
+    X265_CHECK((((intptr_t)src | (srcStride * sizeof(*src))) & 15) == 0 || size == 4, "src alignment error\n");
+    X265_CHECK(shift >= 0, "invalid shift\n");
+
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = src[j] << shift;
+
+        src += srcStride;
+        dst += size;
+    }
+}
+
+template<int size>
+void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift)
+{
+    X265_CHECK(((intptr_t)dst & 15) == 0, "dst alignment error\n");
+    X265_CHECK((((intptr_t)src | (srcStride * sizeof(*src))) & 15) == 0 || size == 4, "src alignment error\n");
+    X265_CHECK(shift > 0, "invalid shift\n");
+
+    int16_t round = 1 << (shift - 1);
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = (src[j] + round) >> shift;
+
+        src += srcStride;
+        dst += size;
+    }
+}
+
+template<int size>
+void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+{
+    X265_CHECK((((intptr_t)dst | (dstStride * sizeof(*dst))) & 15) == 0 || size == 4, "dst alignment error\n");
+    X265_CHECK(((intptr_t)src & 15) == 0, "src alignment error\n");
+    X265_CHECK(shift >= 0, "invalid shift\n");
+
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = src[j] << shift;
+
+        src += size;
+        dst += dstStride;
+    }
+}
+
+template<int size>
+void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+{
+    X265_CHECK((((intptr_t)dst | (dstStride * sizeof(*dst))) & 15) == 0 || size == 4, "dst alignment error\n");
+    X265_CHECK(((intptr_t)src & 15) == 0, "src alignment error\n");
+    X265_CHECK(shift > 0, "invalid shift\n");
+
+    int16_t round = 1 << (shift - 1);
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = (src[j] + round) >> shift;
+
+        src += size;
+        dst += dstStride;
+    }
+}
+
+template<int blockSize>
+void getResidual(const pixel* fenc, const pixel* pred, int16_t* residual, intptr_t stride)
+{
+    for (int y = 0; y < blockSize; y++)
+    {
+        for (int x = 0; x < blockSize; x++)
+            residual[x] = static_cast<int16_t>(fenc[x]) - static_cast<int16_t>(pred[x]);
+
+        fenc += stride;
+        residual += stride;
+        pred += stride;
+    }
+}
+
+template<int blockSize>
+void transpose(pixel* dst, const pixel* src, intptr_t stride)
+{
+    for (int k = 0; k < blockSize; k++)
+        for (int l = 0; l < blockSize; l++)
+            dst[k * blockSize + l] = src[l * stride + k];
+}
+
+static void weight_sp_c(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
+{
+    int x, y;
+
+#if CHECKED_BUILD || _DEBUG
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+    X265_CHECK(!((w0 << 6) > 32767), "w0 using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK(!(round > 32767), "round using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK((shift >= correction), "shift must be include factor correction, please update ASM ABI\n");
+#endif
+
+    for (y = 0; y <= height - 1; y++)
+    {
+        for (x = 0; x <= width - 1; )
+        {
+            // note: width can be odd
+            dst[x] = x265_clip(((w0 * (src[x] + IF_INTERNAL_OFFS) + round) >> shift) + offset);
+            x++;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+static void weight_pp_c(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
+{
+    int x, y;
+
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+
+    X265_CHECK(!(width & 15), "weightp alignment error\n");
+    X265_CHECK(!((w0 << 6) > 32767), "w0 using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK(!(round > 32767), "round using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK((shift >= correction), "shift must be include factor correction, please update ASM ABI\n");
+    X265_CHECK(!(round & ((1 << correction) - 1)), "round must be include factor correction, please update ASM ABI\n");
+
+    for (y = 0; y <= height - 1; y++)
+    {
+        for (x = 0; x <= width - 1; )
+        {
+            // simulating pixel to short conversion
+            int16_t val = src[x] << correction;
+            dst[x] = x265_clip(((w0 * (val) + round) >> shift) + offset);
+            x++;
+        }
+
+        src += stride;
+        dst += stride;
+    }
+}
+
+template<int lx, int ly>
+void pixelavg_pp(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+{
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            dst[x] = (src0[x] + src1[x] + 1) >> 1;
+
+        src0 += sstride0;
+        src1 += sstride1;
+        dst += dstride;
+    }
+}
+
+static void scale1D_128to64(pixel *dst, const pixel *src)
+{
+    int x;
+    const pixel* src1 = src;
+    const pixel* src2 = src + 128;
+
+    pixel* dst1 = dst;
+    pixel* dst2 = dst + 64/*128*/;
+
+    for (x = 0; x < 128; x += 2)
+    {
+        // Top pixel
+        pixel pix0 = src1[(x + 0)];
+        pixel pix1 = src1[(x + 1)];
+
+        // Left pixel
+        pixel pix2 = src2[(x + 0)];
+        pixel pix3 = src2[(x + 1)];
+        int sum1 = pix0 + pix1;
+        int sum2 = pix2 + pix3;
+
+        dst1[x >> 1] = (pixel)((sum1 + 1) >> 1);
+        dst2[x >> 1] = (pixel)((sum2 + 1) >> 1);
+    }
+}
+
+static void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
+{
+    uint32_t x, y;
+
+    for (y = 0; y < 64; y += 2)
+    {
+        for (x = 0; x < 64; x += 2)
+        {
+            pixel pix0 = src[(y + 0) * stride + (x + 0)];
+            pixel pix1 = src[(y + 0) * stride + (x + 1)];
+            pixel pix2 = src[(y + 1) * stride + (x + 0)];
+            pixel pix3 = src[(y + 1) * stride + (x + 1)];
+            int sum = pix0 + pix1 + pix2 + pix3;
+
+            dst[y / 2 * 32 + x / 2] = (pixel)((sum + 2) >> 2);
+        }
+    }
+}
+
+static
+void frame_init_lowres_core(const pixel* src0, pixel* dst0, pixel* dsth, pixel* dstv, pixel* dstc,
+                            intptr_t src_stride, intptr_t dst_stride, int width, int height)
+{
+    for (int y = 0; y < height; y++)
+    {
+        const pixel* src1 = src0 + src_stride;
+        const pixel* src2 = src1 + src_stride;
+        for (int x = 0; x < width; x++)
+        {
+            // slower than naive bilinear, but matches asm
+#define FILTER(a, b, c, d) ((((a + b + 1) >> 1) + ((c + d + 1) >> 1) + 1) >> 1)
+            dst0[x] = FILTER(src0[2 * x], src1[2 * x], src0[2 * x + 1], src1[2 * x + 1]);
+            dsth[x] = FILTER(src0[2 * x + 1], src1[2 * x + 1], src0[2 * x + 2], src1[2 * x + 2]);
+            dstv[x] = FILTER(src1[2 * x], src2[2 * x], src1[2 * x + 1], src2[2 * x + 1]);
+            dstc[x] = FILTER(src1[2 * x + 1], src2[2 * x + 1], src1[2 * x + 2], src2[2 * x + 2]);
+#undef FILTER
+        }
+        src0 += src_stride * 2;
+        dst0 += dst_stride;
+        dsth += dst_stride;
+        dstv += dst_stride;
+        dstc += dst_stride;
+    }
+}
+
+/* structural similarity metric */
+static void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
+{
+    for (int z = 0; z < 2; z++)
+    {
+        uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
+        for (int y = 0; y < 4; y++)
+        {
+            for (int x = 0; x < 4; x++)
+            {
+                int a = pix1[x + y * stride1];
+                int b = pix2[x + y * stride2];
+                s1 += a;
+                s2 += b;
+                ss += a * a;
+                ss += b * b;
+                s12 += a * b;
+            }
+        }
+
+        sums[z][0] = s1;
+        sums[z][1] = s2;
+        sums[z][2] = ss;
+        sums[z][3] = s12;
+        pix1 += 4;
+        pix2 += 4;
+    }
+}
+
+static float ssim_end_1(int s1, int s2, int ss, int s12)
+{
+/* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
+ * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
+ * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
+
+#define PIXEL_MAX ((1 << X265_DEPTH) - 1)
+#if HIGH_BIT_DEPTH
+    X265_CHECK((X265_DEPTH == 10) || (X265_DEPTH == 12), "ssim invalid depth\n");
+#define type float
+    static const float ssim_c1 = (float)(.01 * .01 * PIXEL_MAX * PIXEL_MAX * 64);
+    static const float ssim_c2 = (float)(.03 * .03 * PIXEL_MAX * PIXEL_MAX * 64 * 63);
+#else
+    X265_CHECK(X265_DEPTH == 8, "ssim invalid depth\n");
+#define type int
+    static const int ssim_c1 = (int)(.01 * .01 * PIXEL_MAX * PIXEL_MAX * 64 + .5);
+    static const int ssim_c2 = (int)(.03 * .03 * PIXEL_MAX * PIXEL_MAX * 64 * 63 + .5);
+#endif
+    type fs1 = (type)s1;
+    type fs2 = (type)s2;
+    type fss = (type)ss;
+    type fs12 = (type)s12;
+    type vars = (type)(fss * 64 - fs1 * fs1 - fs2 * fs2);
+    type covar = (type)(fs12 * 64 - fs1 * fs2);
+    return (float)(2 * fs1 * fs2 + ssim_c1) * (float)(2 * covar + ssim_c2)
+           / ((float)(fs1 * fs1 + fs2 * fs2 + ssim_c1) * (float)(vars + ssim_c2));
+#undef type
+#undef PIXEL_MAX
+}
+
+static float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
+{
+    float ssim = 0.0;
+
+    for (int i = 0; i < width; i++)
+    {
+        ssim += ssim_end_1(sum0[i][0] + sum0[i + 1][0] + sum1[i][0] + sum1[i + 1][0],
+                           sum0[i][1] + sum0[i + 1][1] + sum1[i][1] + sum1[i + 1][1],
+                           sum0[i][2] + sum0[i + 1][2] + sum1[i][2] + sum1[i + 1][2],
+                           sum0[i][3] + sum0[i + 1][3] + sum1[i][3] + sum1[i + 1][3]);
+    }
+
+    return ssim;
+}
+
+template<int size>
+uint64_t pixel_var(const pixel* pix, intptr_t i_stride)
+{
+    uint32_t sum = 0, sqr = 0;
+
+    for (int y = 0; y < size; y++)
+    {
+        for (int x = 0; x < size; x++)
+        {
+            sum += pix[x];
+            sqr += pix[x] * pix[x];
+        }
+
+        pix += i_stride;
+    }
+
+    return sum + ((uint64_t)sqr << 32);
+}
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+template<int size>
+int psyCost_pp(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride)
+{
+    static pixel zeroBuf[8] /* = { 0 } */;
+
+    if (size)
+    {
+        int dim = 1 << (size + 2);
+        uint32_t totEnergy = 0;
+        for (int i = 0; i < dim; i += 8)
+        {
+            for (int j = 0; j < dim; j+= 8)
+            {
+                /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
+                int sourceEnergy = sa8d_8x8(source + i * sstride + j, sstride, zeroBuf, 0) - 
+                                   (sad<8, 8>(source + i * sstride + j, sstride, zeroBuf, 0) >> 2);
+                int reconEnergy =  sa8d_8x8(recon + i * rstride + j, rstride, zeroBuf, 0) - 
+                                   (sad<8, 8>(recon + i * rstride + j, rstride, zeroBuf, 0) >> 2);
+
+                totEnergy += abs(sourceEnergy - reconEnergy);
+            }
+        }
+        return totEnergy;
+    }
+    else
+    {
+        /* 4x4 is too small for sa8d */
+        int sourceEnergy = satd_4x4(source, sstride, zeroBuf, 0) - (sad<4, 4>(source, sstride, zeroBuf, 0) >> 2);
+        int reconEnergy = satd_4x4(recon, rstride, zeroBuf, 0) - (sad<4, 4>(recon, rstride, zeroBuf, 0) >> 2);
+        return abs(sourceEnergy - reconEnergy);
+    }
+}
+
+template<int size>
+int psyCost_ss(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride)
+{
+    static int16_t zeroBuf[8] /* = { 0 } */;
+
+    if (size)
+    {
+        int dim = 1 << (size + 2);
+        uint32_t totEnergy = 0;
+        for (int i = 0; i < dim; i += 8)
+        {
+            for (int j = 0; j < dim; j+= 8)
+            {
+                /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
+                int sourceEnergy = sa8d_8x8(source + i * sstride + j, sstride) - 
+                                   (sad<8, 8>(source + i * sstride + j, sstride, zeroBuf, 0) >> 2);
+                int reconEnergy =  sa8d_8x8(recon + i * rstride + j, rstride) - 
+                                   (sad<8, 8>(recon + i * rstride + j, rstride, zeroBuf, 0) >> 2);
+
+                totEnergy += abs(sourceEnergy - reconEnergy);
+            }
+        }
+        return totEnergy;
+    }
+    else
+    {
+        /* 4x4 is too small for sa8d */
+        int sourceEnergy = satd_4x4(source, sstride) - (sad<4, 4>(source, sstride, zeroBuf, 0) >> 2);
+        int reconEnergy = satd_4x4(recon, rstride) - (sad<4, 4>(recon, rstride, zeroBuf, 0) >> 2);
+        return abs(sourceEnergy - reconEnergy);
+    }
+}
+
+template<int bx, int by>
+void blockcopy_pp_c(pixel* a, intptr_t stridea, const pixel* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_ss_c(int16_t* a, intptr_t stridea, const int16_t* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_sp_c(pixel* a, intptr_t stridea, const int16_t* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+        {
+            X265_CHECK((b[x] >= 0) && (b[x] <= ((1 << X265_DEPTH) - 1)), "blockcopy pixel size fail\n");
+            a[x] = (pixel)b[x];
+        }
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_ps_c(int16_t* a, intptr_t stridea, const pixel* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = (int16_t)b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void pixel_sub_ps_c(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = (int16_t)(b0[x] - b1[x]);
+
+        b0 += sstride0;
+        b1 += sstride1;
+        a += dstride;
+    }
+}
+
+template<int bx, int by>
+void pixel_add_ps_c(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = x265_clip(b0[x] + b1[x]);
+
+        b0 += sstride0;
+        b1 += sstride1;
+        a += dstride;
+    }
+}
+
+template<int bx, int by>
+void addAvg(const int16_t* src0, const int16_t* src1, pixel* dst, intptr_t src0Stride, intptr_t src1Stride, intptr_t dstStride)
+{
+    int shiftNum, offset;
+
+    shiftNum = IF_INTERNAL_PREC + 1 - X265_DEPTH;
+    offset = (1 << (shiftNum - 1)) + 2 * IF_INTERNAL_OFFS;
+
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x += 2)
+        {
+            dst[x + 0] = x265_clip((src0[x + 0] + src1[x + 0] + offset) >> shiftNum);
+            dst[x + 1] = x265_clip((src0[x + 1] + src1[x + 1] + offset) >> shiftNum);
+        }
+
+        src0 += src0Stride;
+        src1 += src1Stride;
+        dst  += dstStride;
+    }
+}
+
+static void planecopy_cp_c(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = ((pixel)src[c]) << shift;
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+static void planecopy_sp_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = (pixel)((src[c] >> shift) & mask);
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+static void planecopy_sp_shl_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = (pixel)((src[c] << shift) & mask);
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+/* Estimate the total amount of influence on future quality that could be had if we
+ * were to improve the reference samples used to inter predict any given CU. */
+static void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
+                             const int32_t* invQscales, const double* fpsFactor, int len)
+{
+    double fps = *fpsFactor / 256;
+
+    for (int i = 0; i < len; i++)
+    {
+        double intraCost       = intraCosts[i] * invQscales[i];
+        double propagateAmount = (double)propagateIn[i] + intraCost * fps;
+        double propagateNum    = (double)intraCosts[i] - (interCosts[i] & ((1 << 14) - 1));
+        double propagateDenom  = (double)intraCosts[i];
+        dst[i] = (int)(propagateAmount * propagateNum / propagateDenom + 0.5);
+    }
+}
+
+static pixel planeClipAndMax_c(pixel *src, intptr_t stride, int width, int height, uint64_t *outsum, const pixel minPix, const pixel maxPix)
+{
+    pixel maxLumaLevel = 0;
+    uint64_t sumLuma = 0;
+
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+        {
+            /* Clip luma of source picture to max and min values before extending edges of picYuv */
+            src[c] = x265_clip3((pixel)minPix, (pixel)maxPix, src[c]);
+
+            /* Determine maximum and average luma level in a picture */
+            maxLumaLevel = X265_MAX(src[c], maxLumaLevel);
+            sumLuma += src[c];
+        }
+
+        src += stride;
+    }
+
+    *outsum = sumLuma;
+    return maxLumaLevel;
+}
+
+}  // end anonymous namespace
+
+namespace X265_NS {
+// x265 private namespace
+
+/* Extend the edges of a picture so that it may safely be used for motion
+ * compensation. This function assumes the picture is stored in a buffer with
+ * sufficient padding for the X and Y margins */
+void extendPicBorder(pixel* pic, intptr_t stride, int width, int height, int marginX, int marginY)
+{
+    /* extend left and right margins */
+    primitives.extendRowBorder(pic, stride, width, height, marginX);
+
+    /* copy top row to create above margin */
+    pixel* top = pic - marginX;
+    for (int y = 0; y < marginY; y++)
+        memcpy(top - (y + 1) * stride, top, stride * sizeof(pixel));
+
+    /* copy bottom row to create below margin */
+    pixel* bot = pic - marginX + (height - 1) * stride;
+    for (int y = 0; y < marginY; y++)
+        memcpy(bot + (y + 1) * stride, bot, stride * sizeof(pixel));
+}
+
+/* Initialize entries for pixel functions defined in this file */
+void setupPixelPrimitives_c(EncoderPrimitives &p)
+{
+#define LUMA_PU(W, H) \
+    p.pu[LUMA_ ## W ## x ## H].copy_pp = blockcopy_pp_c<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].addAvg = addAvg<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad = sad<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad_x3 = sad_x3<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad_x4 = sad_x4<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].pixelavg_pp = pixelavg_pp<W, H>;
+
+#define LUMA_CU(W, H) \
+    p.cu[BLOCK_ ## W ## x ## H].sub_ps        = pixel_sub_ps_c<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].add_ps        = pixel_add_ps_c<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_sp       = blockcopy_sp_c<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_ps       = blockcopy_ps_c<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_ss       = blockcopy_ss_c<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].blockfill_s   = blockfill_s_c<W>;  \
+    p.cu[BLOCK_ ## W ## x ## H].cpy2Dto1D_shl = cpy2Dto1D_shl<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy2Dto1D_shr = cpy2Dto1D_shr<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy1Dto2D_shl = cpy1Dto2D_shl<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy1Dto2D_shr = cpy1Dto2D_shr<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].psy_cost_pp   = psyCost_pp<BLOCK_ ## W ## x ## H>; \
+    p.cu[BLOCK_ ## W ## x ## H].psy_cost_ss   = psyCost_ss<BLOCK_ ## W ## x ## H>; \
+    p.cu[BLOCK_ ## W ## x ## H].transpose     = transpose<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].ssd_s         = pixel_ssd_s_c<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].var           = pixel_var<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].calcresidual  = getResidual<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].sse_pp        = sse<W, H, pixel, pixel>; \
+    p.cu[BLOCK_ ## W ## x ## H].sse_ss        = sse<W, H, int16_t, int16_t>;
+
+    LUMA_PU(4, 4);
+    LUMA_PU(8, 8);
+    LUMA_PU(16, 16);
+    LUMA_PU(32, 32);
+    LUMA_PU(64, 64);
+    LUMA_PU(4, 8);
+    LUMA_PU(8, 4);
+    LUMA_PU(16,  8);
+    LUMA_PU(8, 16);
+    LUMA_PU(16, 12);
+    LUMA_PU(12, 16);
+    LUMA_PU(16,  4);
+    LUMA_PU(4, 16);
+    LUMA_PU(32, 16);
+    LUMA_PU(16, 32);
+    LUMA_PU(32, 24);
+    LUMA_PU(24, 32);
+    LUMA_PU(32,  8);
+    LUMA_PU(8, 32);
+    LUMA_PU(64, 32);
+    LUMA_PU(32, 64);
+    LUMA_PU(64, 48);
+    LUMA_PU(48, 64);
+    LUMA_PU(64, 16);
+    LUMA_PU(16, 64);
+
+    p.pu[LUMA_4x4].satd   = satd_4x4;
+    p.pu[LUMA_8x8].satd   = satd8<8, 8>;
+    p.pu[LUMA_8x4].satd   = satd_8x4;
+    p.pu[LUMA_4x8].satd   = satd4<4, 8>;
+    p.pu[LUMA_16x16].satd = satd8<16, 16>;
+    p.pu[LUMA_16x8].satd  = satd8<16, 8>;
+    p.pu[LUMA_8x16].satd  = satd8<8, 16>;
+    p.pu[LUMA_16x12].satd = satd8<16, 12>;
+    p.pu[LUMA_12x16].satd = satd4<12, 16>;
+    p.pu[LUMA_16x4].satd  = satd8<16, 4>;
+    p.pu[LUMA_4x16].satd  = satd4<4, 16>;
+    p.pu[LUMA_32x32].satd = satd8<32, 32>;
+    p.pu[LUMA_32x16].satd = satd8<32, 16>;
+    p.pu[LUMA_16x32].satd = satd8<16, 32>;
+    p.pu[LUMA_32x24].satd = satd8<32, 24>;
+    p.pu[LUMA_24x32].satd = satd8<24, 32>;
+    p.pu[LUMA_32x8].satd  = satd8<32, 8>;
+    p.pu[LUMA_8x32].satd  = satd8<8, 32>;
+    p.pu[LUMA_64x64].satd = satd8<64, 64>;
+    p.pu[LUMA_64x32].satd = satd8<64, 32>;
+    p.pu[LUMA_32x64].satd = satd8<32, 64>;
+    p.pu[LUMA_64x48].satd = satd8<64, 48>;
+    p.pu[LUMA_48x64].satd = satd8<48, 64>;
+    p.pu[LUMA_64x16].satd = satd8<64, 16>;
+    p.pu[LUMA_16x64].satd = satd8<16, 64>;
+
+    LUMA_CU(4, 4);
+    LUMA_CU(8, 8);
+    LUMA_CU(16, 16);
+    LUMA_CU(32, 32);
+    LUMA_CU(64, 64);
+
+    p.cu[BLOCK_4x4].sa8d   = satd_4x4;
+    p.cu[BLOCK_8x8].sa8d   = sa8d_8x8;
+    p.cu[BLOCK_16x16].sa8d = sa8d_16x16;
+    p.cu[BLOCK_32x32].sa8d = sa8d16<32, 32>;
+    p.cu[BLOCK_64x64].sa8d = sa8d16<64, 64>;
+
+#define CHROMA_PU_420(W, H) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].addAvg  = addAvg<W, H>;         \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].copy_pp = blockcopy_pp_c<W, H>; \
+
+    CHROMA_PU_420(2, 2);
+    CHROMA_PU_420(2, 4);
+    CHROMA_PU_420(4, 4);
+    CHROMA_PU_420(8, 8);
+    CHROMA_PU_420(16, 16);
+    CHROMA_PU_420(32, 32);
+    CHROMA_PU_420(4, 2);
+    CHROMA_PU_420(8, 4);
+    CHROMA_PU_420(4, 8);
+    CHROMA_PU_420(8, 6);
+    CHROMA_PU_420(6, 8);
+    CHROMA_PU_420(8, 2);
+    CHROMA_PU_420(2, 8);
+    CHROMA_PU_420(16, 8);
+    CHROMA_PU_420(8,  16);
+    CHROMA_PU_420(16, 12);
+    CHROMA_PU_420(12, 16);
+    CHROMA_PU_420(16, 4);
+    CHROMA_PU_420(4,  16);
+    CHROMA_PU_420(32, 16);
+    CHROMA_PU_420(16, 32);
+    CHROMA_PU_420(32, 24);
+    CHROMA_PU_420(24, 32);
+    CHROMA_PU_420(32, 8);
+    CHROMA_PU_420(8,  32);
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd   = satd_4x4;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd   = satd8<8, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = satd8<16, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = satd8<32, 32>;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd   = satd_8x4;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd   = satd4<4, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd  = satd8<16, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd  = satd8<8, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = satd8<32, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = satd8<16, 32>;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = satd4<16, 12>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd = satd4<12, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd  = satd4<16, 4>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd  = satd4<4, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = satd8<32, 24>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = satd8<24, 32>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd  = satd8<32, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd  = satd8<8, 32>;
+
+#define CHROMA_CU_420(W, H) \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].sse_pp  = sse<W, H, pixel, pixel>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_sp = blockcopy_sp_c<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_ps = blockcopy_ps_c<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_ss = blockcopy_ss_c<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].sub_ps = pixel_sub_ps_c<W, H>;  \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].add_ps = pixel_add_ps_c<W, H>;
+
+    CHROMA_CU_420(2, 2)
+    CHROMA_CU_420(4, 4)
+    CHROMA_CU_420(8, 8)
+    CHROMA_CU_420(16, 16)
+    CHROMA_CU_420(32, 32)
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_8x8].sa8d   = p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd;
+    p.chroma[X265_CSP_I420].cu[BLOCK_16x16].sa8d = sa8d8<8, 8>;
+    p.chroma[X265_CSP_I420].cu[BLOCK_32x32].sa8d = sa8d16<16, 16>;
+    p.chroma[X265_CSP_I420].cu[BLOCK_64x64].sa8d = sa8d16<32, 32>;
+
+#define CHROMA_PU_422(W, H) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].addAvg  = addAvg<W, H>;         \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].copy_pp = blockcopy_pp_c<W, H>; \
+
+    CHROMA_PU_422(2, 4);
+    CHROMA_PU_422(4, 8);
+    CHROMA_PU_422(8, 16);
+    CHROMA_PU_422(16, 32);
+    CHROMA_PU_422(32, 64);
+    CHROMA_PU_422(4, 4);
+    CHROMA_PU_422(2, 8);
+    CHROMA_PU_422(8, 8);
+    CHROMA_PU_422(4, 16);
+    CHROMA_PU_422(8, 12);
+    CHROMA_PU_422(6, 16);
+    CHROMA_PU_422(8, 4);
+    CHROMA_PU_422(2, 16);
+    CHROMA_PU_422(16, 16);
+    CHROMA_PU_422(8, 32);
+    CHROMA_PU_422(16, 24);
+    CHROMA_PU_422(12, 32);
+    CHROMA_PU_422(16, 8);
+    CHROMA_PU_422(4,  32);
+    CHROMA_PU_422(32, 32);
+    CHROMA_PU_422(16, 64);
+    CHROMA_PU_422(32, 48);
+    CHROMA_PU_422(24, 64);
+    CHROMA_PU_422(32, 16);
+    CHROMA_PU_422(8,  64);
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].satd   = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd   = satd4<4, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd  = satd8<8, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].satd = satd8<16, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].satd = satd8<32, 64>;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd   = satd_4x4;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].satd   = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd   = satd8<8, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd  = satd4<4, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = satd8<16, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd  = satd8<8, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = satd8<32, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = satd8<16, 64>;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].satd  = satd4<8, 12>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].satd  = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd   = satd4<8, 4>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].satd  = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].satd = satd8<16, 24>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].satd = satd4<12, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd  = satd8<16, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].satd  = satd4<4, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].satd = satd8<32, 48>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].satd = satd8<24, 64>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = satd8<32, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].satd  = satd8<8, 64>;
+
+#define CHROMA_CU_422(W, H) \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].sse_pp  = sse<W, H, pixel, pixel>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_sp = blockcopy_sp_c<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_ps = blockcopy_ps_c<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_ss = blockcopy_ss_c<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].sub_ps = pixel_sub_ps_c<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].add_ps = pixel_add_ps_c<W, H>;
+
+    CHROMA_CU_422(2, 4)
+    CHROMA_CU_422(4, 8)
+    CHROMA_CU_422(8, 16)
+    CHROMA_CU_422(16, 32)
+    CHROMA_CU_422(32, 64)
+
+    p.chroma[X265_CSP_I422].cu[BLOCK_8x8].sa8d   = p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd;
+    p.chroma[X265_CSP_I422].cu[BLOCK_16x16].sa8d = sa8d8<8, 16>;
+    p.chroma[X265_CSP_I422].cu[BLOCK_32x32].sa8d = sa8d16<16, 32>;
+    p.chroma[X265_CSP_I422].cu[BLOCK_64x64].sa8d = sa8d16<32, 64>;
+
+    p.weight_pp = weight_pp_c;
+    p.weight_sp = weight_sp_c;
+
+    p.scale1D_128to64 = scale1D_128to64;
+    p.scale2D_64to32 = scale2D_64to32;
+    p.frameInitLowres = frame_init_lowres_core;
+    p.ssim_4x4x2_core = ssim_4x4x2_core;
+    p.ssim_end_4 = ssim_end_4;
+
+    p.planecopy_cp = planecopy_cp_c;
+    p.planecopy_sp = planecopy_sp_c;
+    p.planecopy_sp_shl = planecopy_sp_shl_c;
+    p.planeClipAndMax = planeClipAndMax_c;
+    p.propagateCost = estimateCUPropagateCost;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/predict.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,990 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "slice.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "predict.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+PredictionUnit::PredictionUnit(const CUData& cu, const CUGeom& cuGeom, int puIdx)
+{
+    /* address of CTU */
+    ctuAddr = cu.m_cuAddr;
+
+    /* offset of CU */
+    cuAbsPartIdx = cuGeom.absPartIdx;
+
+    /* offset and dimensions of PU */
+    cu.getPartIndexAndSize(puIdx, puAbsPartIdx, width, height);
+}
+
+namespace
+{
+inline pixel weightBidir(int w0, int16_t P0, int w1, int16_t P1, int round, int shift, int offset)
+{
+    return x265_clip((w0 * (P0 + IF_INTERNAL_OFFS) + w1 * (P1 + IF_INTERNAL_OFFS) + round + (offset * (1 << (shift - 1)))) >> shift);
+}
+}
+
+Predict::Predict()
+{
+    m_immedVals = NULL;
+}
+
+Predict::~Predict()
+{
+    X265_FREE(m_immedVals);
+    m_predShortYuv[0].destroy();
+    m_predShortYuv[1].destroy();
+}
+
+bool Predict::allocBuffers(int csp)
+{
+    m_csp = csp;
+    m_hChromaShift = CHROMA_H_SHIFT(csp);
+    m_vChromaShift = CHROMA_V_SHIFT(csp);
+    CHECKED_MALLOC(m_immedVals, int16_t, 64 * (64 + NTAPS_LUMA - 1));
+
+    return m_predShortYuv[0].create(MAX_CU_SIZE, csp) && m_predShortYuv[1].create(MAX_CU_SIZE, csp);
+
+fail:
+    return false;
+}
+
+void Predict::motionCompensation(const CUData& cu, const PredictionUnit& pu, Yuv& predYuv, bool bLuma, bool bChroma)
+{
+    int refIdx0 = cu.m_refIdx[0][pu.puAbsPartIdx];
+    int refIdx1 = cu.m_refIdx[1][pu.puAbsPartIdx];
+
+    /* XXX: disable chroma at a higher level ? */
+    if (cu.m_chromaFormat == X265_CSP_I400)
+        bChroma = false;
+    
+    if (cu.m_slice->isInterP())
+    {
+        /* P Slice */
+        WeightValues wv0[3];
+
+        X265_CHECK(refIdx0 >= 0, "invalid P refidx\n");
+        X265_CHECK(refIdx0 < cu.m_slice->m_numRefIdx[0], "P refidx out of range\n");
+        const WeightParam *wp0 = cu.m_slice->m_weightPredTable[0][refIdx0];
+
+        MV mv0 = cu.m_mv[0][pu.puAbsPartIdx];
+        cu.clipMv(mv0);
+
+        if (cu.m_slice->m_pps->bUseWeightPred && wp0->bPresentFlag)
+        {
+            int num_planes;
+            if (cu.m_chromaFormat != X265_CSP_I400)
+                num_planes = 3;
+            else
+                num_planes = 1;
+            for (int plane = 0; plane < num_planes; plane++)
+            {
+                wv0[plane].w      = wp0[plane].inputWeight;
+                wv0[plane].offset = wp0[plane].inputOffset * (1 << (X265_DEPTH - 8));
+                wv0[plane].shift  = wp0[plane].log2WeightDenom;
+                wv0[plane].round  = wp0[plane].log2WeightDenom >= 1 ? 1 << (wp0[plane].log2WeightDenom - 1) : 0;
+            }
+
+            ShortYuv& shortYuv = m_predShortYuv[0];
+
+            if (bLuma)
+                predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+            if (bChroma)
+                predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+
+            addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma);
+        }
+        else
+        {
+            if (bLuma)
+                predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+            if (bChroma)
+                predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+        }
+    }
+    else
+    {
+        /* B Slice */
+
+        WeightValues wv0[3], wv1[3];
+        const WeightParam *pwp0, *pwp1;
+
+        X265_CHECK(refIdx0 < cu.m_slice->m_numRefIdx[0], "bidir refidx0 out of range\n");
+        X265_CHECK(refIdx1 < cu.m_slice->m_numRefIdx[1], "bidir refidx1 out of range\n");
+
+        if (cu.m_slice->m_pps->bUseWeightedBiPred)
+        {
+            int num_planes;
+            
+            pwp0 = refIdx0 >= 0 ? cu.m_slice->m_weightPredTable[0][refIdx0] : NULL;
+            pwp1 = refIdx1 >= 0 ? cu.m_slice->m_weightPredTable[1][refIdx1] : NULL;
+            if (cu.m_chromaFormat != X265_CSP_I400)
+                num_planes = 3;
+            else
+                num_planes = 1;
+            if (pwp0 && pwp1 && (pwp0->bPresentFlag || pwp1->bPresentFlag))
+            {
+                /* biprediction weighting */
+                for (int plane = 0; plane < num_planes; plane++)
+                {
+                    wv0[plane].w = pwp0[plane].inputWeight;
+                    wv0[plane].o = pwp0[plane].inputOffset * (1 << (X265_DEPTH - 8));
+                    wv0[plane].shift = pwp0[plane].log2WeightDenom;
+                    wv0[plane].round = 1 << pwp0[plane].log2WeightDenom;
+
+                    wv1[plane].w = pwp1[plane].inputWeight;
+                    wv1[plane].o = pwp1[plane].inputOffset * (1 << (X265_DEPTH - 8));
+                    wv1[plane].shift = wv0[plane].shift;
+                    wv1[plane].round = wv0[plane].round;
+                }
+            }
+            else
+            {
+                /* uniprediction weighting, always outputs to wv0 */
+                const WeightParam* pwp = (refIdx0 >= 0) ? pwp0 : pwp1;
+                for (int plane = 0; plane < num_planes; plane++)
+                {
+                    wv0[plane].w = pwp[plane].inputWeight;
+                    wv0[plane].offset = pwp[plane].inputOffset * (1 << (X265_DEPTH - 8));
+                    wv0[plane].shift = pwp[plane].log2WeightDenom;
+                    wv0[plane].round = pwp[plane].log2WeightDenom >= 1 ? 1 << (pwp[plane].log2WeightDenom - 1) : 0;
+                }
+            }
+        }
+        else
+            pwp0 = pwp1 = NULL;
+
+        if (refIdx0 >= 0 && refIdx1 >= 0)
+        {
+            MV mv0 = cu.m_mv[0][pu.puAbsPartIdx];
+            MV mv1 = cu.m_mv[1][pu.puAbsPartIdx];
+            cu.clipMv(mv0);
+            cu.clipMv(mv1);
+
+            if (bLuma)
+            {
+                predInterLumaShort(pu, m_predShortYuv[0], *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+                predInterLumaShort(pu, m_predShortYuv[1], *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+            }
+            if (bChroma)
+            {
+                predInterChromaShort(pu, m_predShortYuv[0], *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+                predInterChromaShort(pu, m_predShortYuv[1], *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+            }
+
+            if (pwp0 && pwp1 && (pwp0->bPresentFlag || pwp1->bPresentFlag))
+                addWeightBi(pu, predYuv, m_predShortYuv[0], m_predShortYuv[1], wv0, wv1, bLuma, bChroma);
+            else
+                predYuv.addAvg(m_predShortYuv[0], m_predShortYuv[1], pu.puAbsPartIdx, pu.width, pu.height, bLuma, bChroma);
+        }
+        else if (refIdx0 >= 0)
+        {
+            MV mv0 = cu.m_mv[0][pu.puAbsPartIdx];
+            cu.clipMv(mv0);
+
+            if (pwp0 && pwp0->bPresentFlag)
+            {
+                ShortYuv& shortYuv = m_predShortYuv[0];
+
+                if (bLuma)
+                    predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+                if (bChroma)
+                    predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+
+                addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma);
+            }
+            else
+            {
+                if (bLuma)
+                    predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+                if (bChroma)
+                    predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);
+            }
+        }
+        else
+        {
+            MV mv1 = cu.m_mv[1][pu.puAbsPartIdx];
+            cu.clipMv(mv1);
+
+            /* uniprediction to L1 */
+            X265_CHECK(refIdx1 >= 0, "refidx1 was not positive\n");
+
+            if (pwp1 && pwp1->bPresentFlag)
+            {
+                ShortYuv& shortYuv = m_predShortYuv[0];
+
+                if (bLuma)
+                    predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+                if (bChroma)
+                    predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+
+                addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma);
+            }
+            else
+            {
+                if (bLuma)
+                    predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+                if (bChroma)
+                    predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1);
+            }
+        }
+    }
+}
+
+void Predict::predInterLumaPixel(const PredictionUnit& pu, Yuv& dstYuv, const PicYuv& refPic, const MV& mv) const
+{
+    pixel* dst = dstYuv.getLumaAddr(pu.puAbsPartIdx);
+    intptr_t dstStride = dstYuv.m_size;
+
+    intptr_t srcStride = refPic.m_stride;
+    intptr_t srcOffset = (mv.x >> 2) + (mv.y >> 2) * srcStride;
+    int partEnum = partitionFromSizes(pu.width, pu.height);
+    const pixel* src = refPic.getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + srcOffset;
+
+    int xFrac = mv.x & 0x3;
+    int yFrac = mv.y & 0x3;
+
+    if (!(yFrac | xFrac))
+        primitives.pu[partEnum].copy_pp(dst, dstStride, src, srcStride);
+    else if (!yFrac)
+        primitives.pu[partEnum].luma_hpp(src, srcStride, dst, dstStride, xFrac);
+    else if (!xFrac)
+        primitives.pu[partEnum].luma_vpp(src, srcStride, dst, dstStride, yFrac);
+    else
+        primitives.pu[partEnum].luma_hvpp(src, srcStride, dst, dstStride, xFrac, yFrac);
+}
+
+void Predict::predInterLumaShort(const PredictionUnit& pu, ShortYuv& dstSYuv, const PicYuv& refPic, const MV& mv) const
+{
+    int16_t* dst = dstSYuv.getLumaAddr(pu.puAbsPartIdx);
+    intptr_t dstStride = dstSYuv.m_size;
+
+    intptr_t srcStride = refPic.m_stride;
+    intptr_t srcOffset = (mv.x >> 2) + (mv.y >> 2) * srcStride;
+    const pixel* src = refPic.getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + srcOffset;
+
+    int xFrac = mv.x & 0x3;
+    int yFrac = mv.y & 0x3;
+
+    int partEnum = partitionFromSizes(pu.width, pu.height);
+
+    X265_CHECK((pu.width % 4) + (pu.height % 4) == 0, "width or height not divisible by 4\n");
+    X265_CHECK(dstStride == MAX_CU_SIZE, "stride expected to be max cu size\n");
+
+    if (!(yFrac | xFrac))
+        primitives.pu[partEnum].convert_p2s(src, srcStride, dst, dstStride);
+    else if (!yFrac)
+        primitives.pu[partEnum].luma_hps(src, srcStride, dst, dstStride, xFrac, 0);
+    else if (!xFrac)
+        primitives.pu[partEnum].luma_vps(src, srcStride, dst, dstStride, yFrac);
+    else
+    {
+        int tmpStride = pu.width;
+        int filterSize = NTAPS_LUMA;
+        int halfFilterSize = (filterSize >> 1);
+        primitives.pu[partEnum].luma_hps(src, srcStride, m_immedVals, tmpStride, xFrac, 1);
+        primitives.pu[partEnum].luma_vss(m_immedVals + (halfFilterSize - 1) * tmpStride, tmpStride, dst, dstStride, yFrac);
+    }
+}
+
+void Predict::predInterChromaPixel(const PredictionUnit& pu, Yuv& dstYuv, const PicYuv& refPic, const MV& mv) const
+{
+    intptr_t dstStride = dstYuv.m_csize;
+    intptr_t refStride = refPic.m_strideC;
+
+    int shiftHor = (2 + m_hChromaShift);
+    int shiftVer = (2 + m_vChromaShift);
+
+    intptr_t refOffset = (mv.x >> shiftHor) + (mv.y >> shiftVer) * refStride;
+
+    const pixel* refCb = refPic.getCbAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + refOffset;
+    const pixel* refCr = refPic.getCrAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + refOffset;
+
+    pixel* dstCb = dstYuv.getCbAddr(pu.puAbsPartIdx);
+    pixel* dstCr = dstYuv.getCrAddr(pu.puAbsPartIdx);
+
+    int xFrac = mv.x & ((1 << shiftHor) - 1);
+    int yFrac = mv.y & ((1 << shiftVer) - 1);
+
+    int partEnum = partitionFromSizes(pu.width, pu.height);
+    
+    if (!(yFrac | xFrac))
+    {
+        primitives.chroma[m_csp].pu[partEnum].copy_pp(dstCb, dstStride, refCb, refStride);
+        primitives.chroma[m_csp].pu[partEnum].copy_pp(dstCr, dstStride, refCr, refStride);
+    }
+    else if (!yFrac)
+    {
+        primitives.chroma[m_csp].pu[partEnum].filter_hpp(refCb, refStride, dstCb, dstStride, xFrac << (1 - m_hChromaShift));
+        primitives.chroma[m_csp].pu[partEnum].filter_hpp(refCr, refStride, dstCr, dstStride, xFrac << (1 - m_hChromaShift));
+    }
+    else if (!xFrac)
+    {
+        primitives.chroma[m_csp].pu[partEnum].filter_vpp(refCb, refStride, dstCb, dstStride, yFrac << (1 - m_vChromaShift));
+        primitives.chroma[m_csp].pu[partEnum].filter_vpp(refCr, refStride, dstCr, dstStride, yFrac << (1 - m_vChromaShift));
+    }
+    else
+    {
+        int extStride = pu.width >> m_hChromaShift;
+        int filterSize = NTAPS_CHROMA;
+        int halfFilterSize = (filterSize >> 1);
+
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCb, refStride, m_immedVals, extStride, xFrac << (1 - m_hChromaShift), 1);
+        primitives.chroma[m_csp].pu[partEnum].filter_vsp(m_immedVals + (halfFilterSize - 1) * extStride, extStride, dstCb, dstStride, yFrac << (1 - m_vChromaShift));
+
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCr, refStride, m_immedVals, extStride, xFrac << (1 - m_hChromaShift), 1);
+        primitives.chroma[m_csp].pu[partEnum].filter_vsp(m_immedVals + (halfFilterSize - 1) * extStride, extStride, dstCr, dstStride, yFrac << (1 - m_vChromaShift));
+    }
+}
+
+void Predict::predInterChromaShort(const PredictionUnit& pu, ShortYuv& dstSYuv, const PicYuv& refPic, const MV& mv) const
+{
+    intptr_t refStride = refPic.m_strideC;
+    intptr_t dstStride = dstSYuv.m_csize;
+
+    int shiftHor = (2 + m_hChromaShift);
+    int shiftVer = (2 + m_vChromaShift);
+
+    intptr_t refOffset = (mv.x >> shiftHor) + (mv.y >> shiftVer) * refStride;
+
+    const pixel* refCb = refPic.getCbAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + refOffset;
+    const pixel* refCr = refPic.getCrAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx) + refOffset;
+
+    int16_t* dstCb = dstSYuv.getCbAddr(pu.puAbsPartIdx);
+    int16_t* dstCr = dstSYuv.getCrAddr(pu.puAbsPartIdx);
+
+    int xFrac = mv.x & ((1 << shiftHor) - 1);
+    int yFrac = mv.y & ((1 << shiftVer) - 1);
+
+    int partEnum = partitionFromSizes(pu.width, pu.height);
+    
+    uint32_t cxWidth  = pu.width >> m_hChromaShift;
+
+    X265_CHECK(((cxWidth | (pu.height >> m_vChromaShift)) % 2) == 0, "chroma block size expected to be multiple of 2\n");
+
+    if (!(yFrac | xFrac))
+    {
+        primitives.chroma[m_csp].pu[partEnum].p2s(refCb, refStride, dstCb, dstStride);
+        primitives.chroma[m_csp].pu[partEnum].p2s(refCr, refStride, dstCr, dstStride);
+    }
+    else if (!yFrac)
+    {
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCb, refStride, dstCb, dstStride, xFrac << (1 - m_hChromaShift), 0);
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCr, refStride, dstCr, dstStride, xFrac << (1 - m_hChromaShift), 0);
+    }
+    else if (!xFrac)
+    {
+        primitives.chroma[m_csp].pu[partEnum].filter_vps(refCb, refStride, dstCb, dstStride, yFrac << (1 - m_vChromaShift));
+        primitives.chroma[m_csp].pu[partEnum].filter_vps(refCr, refStride, dstCr, dstStride, yFrac << (1 - m_vChromaShift));
+    }
+    else
+    {
+        int extStride = cxWidth;
+        int filterSize = NTAPS_CHROMA;
+        int halfFilterSize = (filterSize >> 1);
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCb, refStride, m_immedVals, extStride, xFrac << (1 - m_hChromaShift), 1);
+        primitives.chroma[m_csp].pu[partEnum].filter_vss(m_immedVals + (halfFilterSize - 1) * extStride, extStride, dstCb, dstStride, yFrac << (1 - m_vChromaShift));
+        primitives.chroma[m_csp].pu[partEnum].filter_hps(refCr, refStride, m_immedVals, extStride, xFrac << (1 - m_hChromaShift), 1);
+        primitives.chroma[m_csp].pu[partEnum].filter_vss(m_immedVals + (halfFilterSize - 1) * extStride, extStride, dstCr, dstStride, yFrac << (1 - m_vChromaShift));
+    }
+}
+
+/* weighted averaging for bi-pred */
+void Predict::addWeightBi(const PredictionUnit& pu, Yuv& predYuv, const ShortYuv& srcYuv0, const ShortYuv& srcYuv1, const WeightValues wp0[3], const WeightValues wp1[3], bool bLuma, bool bChroma) const
+{
+    int x, y;
+
+    int w0, w1, offset, shiftNum, shift, round;
+    uint32_t src0Stride, src1Stride, dststride;
+
+    if (bLuma)
+    {
+        pixel* dstY = predYuv.getLumaAddr(pu.puAbsPartIdx);
+        const int16_t* srcY0 = srcYuv0.getLumaAddr(pu.puAbsPartIdx);
+        const int16_t* srcY1 = srcYuv1.getLumaAddr(pu.puAbsPartIdx);
+
+        // Luma
+        w0      = wp0[0].w;
+        offset  = wp0[0].o + wp1[0].o;
+        shiftNum = IF_INTERNAL_PREC - X265_DEPTH;
+        shift   = wp0[0].shift + shiftNum + 1;
+        round   = shift ? (1 << (shift - 1)) : 0;
+        w1      = wp1[0].w;
+
+        src0Stride = srcYuv0.m_size;
+        src1Stride = srcYuv1.m_size;
+        dststride = predYuv.m_size;
+
+        // TODO: can we use weight_sp here?
+        for (y = pu.height - 1; y >= 0; y--)
+        {
+            for (x = pu.width - 1; x >= 0; )
+            {
+                // note: luma min width is 4
+                dstY[x] = weightBidir(w0, srcY0[x], w1, srcY1[x], round, shift, offset);
+                x--;
+                dstY[x] = weightBidir(w0, srcY0[x], w1, srcY1[x], round, shift, offset);
+                x--;
+                dstY[x] = weightBidir(w0, srcY0[x], w1, srcY1[x], round, shift, offset);
+                x--;
+                dstY[x] = weightBidir(w0, srcY0[x], w1, srcY1[x], round, shift, offset);
+                x--;
+            }
+
+            srcY0 += src0Stride;
+            srcY1 += src1Stride;
+            dstY  += dststride;
+        }
+    }
+
+    if (bChroma)
+    {
+        pixel* dstU = predYuv.getCbAddr(pu.puAbsPartIdx);
+        pixel* dstV = predYuv.getCrAddr(pu.puAbsPartIdx);
+        const int16_t* srcU0 = srcYuv0.getCbAddr(pu.puAbsPartIdx);
+        const int16_t* srcV0 = srcYuv0.getCrAddr(pu.puAbsPartIdx);
+        const int16_t* srcU1 = srcYuv1.getCbAddr(pu.puAbsPartIdx);
+        const int16_t* srcV1 = srcYuv1.getCrAddr(pu.puAbsPartIdx);
+
+        // Chroma U
+        w0      = wp0[1].w;
+        offset  = wp0[1].o + wp1[1].o;
+        shiftNum = IF_INTERNAL_PREC - X265_DEPTH;
+        shift   = wp0[1].shift + shiftNum + 1;
+        round   = shift ? (1 << (shift - 1)) : 0;
+        w1      = wp1[1].w;
+
+        src0Stride = srcYuv0.m_csize;
+        src1Stride = srcYuv1.m_csize;
+        dststride  = predYuv.m_csize;
+
+        uint32_t cwidth = pu.width >> srcYuv0.m_hChromaShift;
+        uint32_t cheight = pu.height >> srcYuv0.m_vChromaShift;
+
+        // TODO: can we use weight_sp here?
+        for (y = cheight - 1; y >= 0; y--)
+        {
+            for (x = cwidth - 1; x >= 0;)
+            {
+                // note: chroma min width is 2
+                dstU[x] = weightBidir(w0, srcU0[x], w1, srcU1[x], round, shift, offset);
+                x--;
+                dstU[x] = weightBidir(w0, srcU0[x], w1, srcU1[x], round, shift, offset);
+                x--;
+            }
+
+            srcU0 += src0Stride;
+            srcU1 += src1Stride;
+            dstU  += dststride;
+        }
+
+        // Chroma V
+        w0     = wp0[2].w;
+        offset = wp0[2].o + wp1[2].o;
+        shift  = wp0[2].shift + shiftNum + 1;
+        round  = shift ? (1 << (shift - 1)) : 0;
+        w1     = wp1[2].w;
+
+        for (y = cheight - 1; y >= 0; y--)
+        {
+            for (x = cwidth - 1; x >= 0;)
+            {
+                // note: chroma min width is 2
+                dstV[x] = weightBidir(w0, srcV0[x], w1, srcV1[x], round, shift, offset);
+                x--;
+                dstV[x] = weightBidir(w0, srcV0[x], w1, srcV1[x], round, shift, offset);
+                x--;
+            }
+
+            srcV0 += src0Stride;
+            srcV1 += src1Stride;
+            dstV  += dststride;
+        }
+    }
+}
+
+/* weighted averaging for uni-pred */
+void Predict::addWeightUni(const PredictionUnit& pu, Yuv& predYuv, const ShortYuv& srcYuv, const WeightValues wp[3], bool bLuma, bool bChroma) const
+{
+    int w0, offset, shiftNum, shift, round;
+    uint32_t srcStride, dstStride;
+
+    if (bLuma)
+    {
+        pixel* dstY = predYuv.getLumaAddr(pu.puAbsPartIdx);
+        const int16_t* srcY0 = srcYuv.getLumaAddr(pu.puAbsPartIdx);
+
+        // Luma
+        w0      = wp[0].w;
+        offset  = wp[0].offset;
+        shiftNum = IF_INTERNAL_PREC - X265_DEPTH;
+        shift   = wp[0].shift + shiftNum;
+        round   = shift ? (1 << (shift - 1)) : 0;
+        srcStride = srcYuv.m_size;
+        dstStride = predYuv.m_size;
+
+        primitives.weight_sp(srcY0, dstY, srcStride, dstStride, pu.width, pu.height, w0, round, shift, offset);
+    }
+
+    if (bChroma)
+    {
+        pixel* dstU = predYuv.getCbAddr(pu.puAbsPartIdx);
+        pixel* dstV = predYuv.getCrAddr(pu.puAbsPartIdx);
+        const int16_t* srcU0 = srcYuv.getCbAddr(pu.puAbsPartIdx);
+        const int16_t* srcV0 = srcYuv.getCrAddr(pu.puAbsPartIdx);
+
+        // Chroma U
+        w0      = wp[1].w;
+        offset  = wp[1].offset;
+        shiftNum = IF_INTERNAL_PREC - X265_DEPTH;
+        shift   = wp[1].shift + shiftNum;
+        round   = shift ? (1 << (shift - 1)) : 0;
+
+        srcStride = srcYuv.m_csize;
+        dstStride = predYuv.m_csize;
+
+        uint32_t cwidth = pu.width >> srcYuv.m_hChromaShift;
+        uint32_t cheight = pu.height >> srcYuv.m_vChromaShift;
+
+        primitives.weight_sp(srcU0, dstU, srcStride, dstStride, cwidth, cheight, w0, round, shift, offset);
+
+        // Chroma V
+        w0     = wp[2].w;
+        offset = wp[2].offset;
+        shift  = wp[2].shift + shiftNum;
+        round  = shift ? (1 << (shift - 1)) : 0;
+
+        primitives.weight_sp(srcV0, dstV, srcStride, dstStride, cwidth, cheight, w0, round, shift, offset);
+    }
+}
+
+void Predict::predIntraLumaAng(uint32_t dirMode, pixel* dst, intptr_t stride, uint32_t log2TrSize)
+{
+    int tuSize = 1 << log2TrSize;
+    int sizeIdx = log2TrSize - 2;
+    X265_CHECK(sizeIdx >= 0 && sizeIdx < 4, "intra block size is out of range\n");
+
+    int filter = !!(g_intraFilterFlags[dirMode] & tuSize);
+    bool bFilter = log2TrSize <= 4;
+    primitives.cu[sizeIdx].intra_pred[dirMode](dst, stride, intraNeighbourBuf[filter], dirMode, bFilter);
+}
+
+void Predict::predIntraChromaAng(uint32_t dirMode, pixel* dst, intptr_t stride, uint32_t log2TrSizeC)
+{
+    int tuSize = 1 << log2TrSizeC;
+    int sizeIdx = log2TrSizeC - 2;
+    X265_CHECK(sizeIdx >= 0 && sizeIdx < 4, "intra block size is out of range\n");
+
+    int filter = !!(m_csp == X265_CSP_I444 && (g_intraFilterFlags[dirMode] & tuSize));
+    primitives.cu[sizeIdx].intra_pred[dirMode](dst, stride, intraNeighbourBuf[filter], dirMode, 0);
+}
+
+void Predict::initAdiPattern(const CUData& cu, const CUGeom& cuGeom, uint32_t puAbsPartIdx, const IntraNeighbors& intraNeighbors, int dirMode)
+{
+    int tuSize = 1 << intraNeighbors.log2TrSize;
+    int tuSize2 = tuSize << 1;
+
+    PicYuv* reconPic = cu.m_encData->m_reconPic;
+    pixel* adiOrigin = reconPic->getLumaAddr(cu.m_cuAddr, cuGeom.absPartIdx + puAbsPartIdx);
+    intptr_t picStride = reconPic->m_stride;
+
+    fillReferenceSamples(adiOrigin, picStride, intraNeighbors, intraNeighbourBuf[0]);
+
+    pixel* refBuf = intraNeighbourBuf[0];
+    pixel* fltBuf = intraNeighbourBuf[1];
+
+    pixel topLeft = refBuf[0], topLast = refBuf[tuSize2], leftLast = refBuf[tuSize2 + tuSize2];
+
+    if (dirMode == ALL_IDX ? (8 | 16 | 32) & tuSize : g_intraFilterFlags[dirMode] & tuSize)
+    {
+        // generate filtered intra prediction samples
+
+        if (cu.m_slice->m_sps->bUseStrongIntraSmoothing && tuSize == 32)
+        {
+            const int threshold = 1 << (X265_DEPTH - 5);
+
+            pixel topMiddle = refBuf[32], leftMiddle = refBuf[tuSize2 + 32];
+
+            if (abs(topLeft + topLast  - (topMiddle  << 1)) < threshold &&
+                abs(topLeft + leftLast - (leftMiddle << 1)) < threshold)
+            {
+                // "strong" bilinear interpolation
+                const int shift = 5 + 1;
+                int init = (topLeft << shift) + tuSize;
+                int deltaL, deltaR;
+
+                deltaL = leftLast - topLeft; deltaR = topLast - topLeft;
+
+                fltBuf[0] = topLeft;
+                for (int i = 1; i < tuSize2; i++)
+                {
+                    fltBuf[i + tuSize2] = (pixel)((init + deltaL * i) >> shift); // Left Filtering
+                    fltBuf[i] = (pixel)((init + deltaR * i) >> shift);           // Above Filtering
+                }
+                fltBuf[tuSize2] = topLast;
+                fltBuf[tuSize2 + tuSize2] = leftLast;
+                return;
+            }
+        }
+
+        primitives.cu[intraNeighbors.log2TrSize - 2].intra_filter(refBuf, fltBuf);
+    }
+}
+
+void Predict::initAdiPatternChroma(const CUData& cu, const CUGeom& cuGeom, uint32_t puAbsPartIdx, const IntraNeighbors& intraNeighbors, uint32_t chromaId)
+{
+    PicYuv* reconPic = cu.m_encData->m_reconPic;
+    const pixel* adiOrigin = reconPic->getChromaAddr(chromaId, cu.m_cuAddr, cuGeom.absPartIdx + puAbsPartIdx);
+    intptr_t picStride = reconPic->m_strideC;
+
+    fillReferenceSamples(adiOrigin, picStride, intraNeighbors, intraNeighbourBuf[0]);
+
+    if (m_csp == X265_CSP_I444)
+        primitives.cu[intraNeighbors.log2TrSize - 2].intra_filter(intraNeighbourBuf[0], intraNeighbourBuf[1]);
+}
+
+void Predict::initIntraNeighbors(const CUData& cu, uint32_t absPartIdx, uint32_t tuDepth, bool isLuma, IntraNeighbors *intraNeighbors)
+{
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+    int log2UnitWidth = LOG2_UNIT_SIZE;
+    int log2UnitHeight = LOG2_UNIT_SIZE;
+
+    if (!isLuma)
+    {
+        log2TrSize -= cu.m_hChromaShift;
+        log2UnitWidth -= cu.m_hChromaShift;
+        log2UnitHeight -= cu.m_vChromaShift;
+    }
+
+    int numIntraNeighbor;
+    bool* bNeighborFlags = intraNeighbors->bNeighborFlags;
+
+    uint32_t numPartInWidth = 1 << (cu.m_log2CUSize[0] - LOG2_UNIT_SIZE - tuDepth);
+    uint32_t partIdxLT = cu.m_absIdxInCTU + absPartIdx;
+    uint32_t partIdxRT = g_rasterToZscan[g_zscanToRaster[partIdxLT] + numPartInWidth - 1];
+
+    uint32_t tuSize = 1 << log2TrSize;
+    int  tuWidthInUnits = tuSize >> log2UnitWidth;
+    int  tuHeightInUnits = tuSize >> log2UnitHeight;
+    int  aboveUnits = tuWidthInUnits << 1;
+    int  leftUnits = tuHeightInUnits << 1;
+    int  partIdxStride = cu.m_slice->m_sps->numPartInCUSize;
+    uint32_t partIdxLB = g_rasterToZscan[g_zscanToRaster[partIdxLT] + ((tuHeightInUnits - 1) * partIdxStride)];
+
+    if (cu.m_slice->isIntra() || !cu.m_slice->m_pps->bConstrainedIntraPred)
+    {
+        bNeighborFlags[leftUnits] = isAboveLeftAvailable<false>(cu, partIdxLT);
+        numIntraNeighbor  = (int)(bNeighborFlags[leftUnits]);
+        numIntraNeighbor += isAboveAvailable<false>(cu, partIdxLT, partIdxRT, bNeighborFlags + leftUnits + 1);
+        numIntraNeighbor += isAboveRightAvailable<false>(cu, partIdxRT, bNeighborFlags + leftUnits + 1 + tuWidthInUnits, tuWidthInUnits);
+        numIntraNeighbor += isLeftAvailable<false>(cu, partIdxLT, partIdxLB, bNeighborFlags + leftUnits - 1);
+        numIntraNeighbor += isBelowLeftAvailable<false>(cu, partIdxLB, bNeighborFlags + tuHeightInUnits - 1, tuHeightInUnits);
+    }
+    else
+    {
+        bNeighborFlags[leftUnits] = isAboveLeftAvailable<true>(cu, partIdxLT);
+        numIntraNeighbor  = (int)(bNeighborFlags[leftUnits]);
+        numIntraNeighbor += isAboveAvailable<true>(cu, partIdxLT, partIdxRT, bNeighborFlags + leftUnits + 1);
+        numIntraNeighbor += isAboveRightAvailable<true>(cu, partIdxRT, bNeighborFlags + leftUnits + 1 + tuWidthInUnits, tuWidthInUnits);
+        numIntraNeighbor += isLeftAvailable<true>(cu, partIdxLT, partIdxLB, bNeighborFlags + leftUnits - 1);
+        numIntraNeighbor += isBelowLeftAvailable<true>(cu, partIdxLB, bNeighborFlags + tuHeightInUnits - 1, tuHeightInUnits);
+    }
+
+    intraNeighbors->numIntraNeighbor = numIntraNeighbor;
+    intraNeighbors->totalUnits = aboveUnits + leftUnits + 1;
+    intraNeighbors->aboveUnits = aboveUnits;
+    intraNeighbors->leftUnits = leftUnits;
+    intraNeighbors->unitWidth = 1 << log2UnitWidth;
+    intraNeighbors->unitHeight = 1 << log2UnitHeight;
+    intraNeighbors->log2TrSize = log2TrSize;
+}
+
+void Predict::fillReferenceSamples(const pixel* adiOrigin, intptr_t picStride, const IntraNeighbors& intraNeighbors, pixel dst[258])
+{
+    const pixel dcValue = (pixel)(1 << (X265_DEPTH - 1));
+    int numIntraNeighbor = intraNeighbors.numIntraNeighbor;
+    int totalUnits = intraNeighbors.totalUnits;
+    uint32_t tuSize = 1 << intraNeighbors.log2TrSize;
+    uint32_t refSize = tuSize * 2 + 1;
+
+    // Nothing is available, perform DC prediction.
+    if (numIntraNeighbor == 0)
+    {
+        // Fill top border with DC value
+        for (uint32_t i = 0; i < refSize; i++)
+            dst[i] = dcValue;
+
+        // Fill left border with DC value
+        for (uint32_t i = 0; i < refSize - 1; i++)
+            dst[i + refSize] = dcValue;
+    }
+    else if (numIntraNeighbor == totalUnits)
+    {
+        // Fill top border with rec. samples
+        const pixel* adiTemp = adiOrigin - picStride - 1;
+        memcpy(dst, adiTemp, refSize * sizeof(pixel));
+
+        // Fill left border with rec. samples
+        adiTemp = adiOrigin - 1;
+        for (uint32_t i = 0; i < refSize - 1; i++)
+        {
+            dst[i + refSize] = adiTemp[0];
+            adiTemp += picStride;
+        }
+    }
+    else // reference samples are partially available
+    {
+        const bool *bNeighborFlags = intraNeighbors.bNeighborFlags;
+        const bool *pNeighborFlags;
+        int aboveUnits = intraNeighbors.aboveUnits;
+        int leftUnits = intraNeighbors.leftUnits;
+        int unitWidth = intraNeighbors.unitWidth;
+        int unitHeight = intraNeighbors.unitHeight;
+        int totalSamples = (leftUnits * unitHeight) + ((aboveUnits + 1) * unitWidth);
+        pixel adiLineBuffer[5 * MAX_CU_SIZE];
+        pixel *adi;
+
+        // Initialize
+        for (int i = 0; i < totalSamples; i++)
+            adiLineBuffer[i] = dcValue;
+
+        // Fill top-left sample
+        const pixel* adiTemp = adiOrigin - picStride - 1;
+        adi = adiLineBuffer + (leftUnits * unitHeight);
+        pNeighborFlags = bNeighborFlags + leftUnits;
+        if (*pNeighborFlags)
+        {
+            pixel topLeftVal = adiTemp[0];
+            for (int i = 0; i < unitWidth; i++)
+                adi[i] = topLeftVal;
+        }
+
+        // Fill left & below-left samples
+        adiTemp += picStride;
+        adi--;
+        // NOTE: over copy here, but reduce condition operators
+        for (int j = 0; j < leftUnits * unitHeight; j++)
+        {
+            adi[-j] = adiTemp[j * picStride];
+        }
+
+        // Fill above & above-right samples
+        adiTemp = adiOrigin - picStride;
+        adi = adiLineBuffer + (leftUnits * unitHeight) + unitWidth;
+        // NOTE: over copy here, but reduce condition operators
+        memcpy(adi, adiTemp, aboveUnits * unitWidth * sizeof(*adiTemp));
+
+        // Pad reference samples when necessary
+        int curr = 0;
+        int next = 1;
+        adi = adiLineBuffer;
+        int pAdiLineTopRowOffset = leftUnits * (unitHeight - unitWidth);
+        if (!bNeighborFlags[0])
+        {
+            // very bottom unit of bottom-left; at least one unit will be valid.
+            while (next < totalUnits && !bNeighborFlags[next])
+                next++;
+
+            pixel* pAdiLineNext = adiLineBuffer + ((next < leftUnits) ? (next * unitHeight) : (pAdiLineTopRowOffset + (next * unitWidth)));
+            const pixel refSample = *pAdiLineNext;
+            // Pad unavailable samples with new value
+            int nextOrTop = X265_MIN(next, leftUnits);
+
+            // fill left column
+#if HIGH_BIT_DEPTH
+            while (curr < nextOrTop)
+            {
+                for (int i = 0; i < unitHeight; i++)
+                    adi[i] = refSample;
+
+                adi += unitHeight;
+                curr++;
+            }
+
+            // fill top row
+            while (curr < next)
+            {
+                for (int i = 0; i < unitWidth; i++)
+                    adi[i] = refSample;
+
+                adi += unitWidth;
+                curr++;
+            }
+#else
+            X265_CHECK(curr <= nextOrTop, "curr must be less than or equal to nextOrTop\n");
+            if (curr < nextOrTop)
+            {
+                const int fillSize = unitHeight * (nextOrTop - curr);
+                memset(adi, refSample, fillSize * sizeof(pixel));
+                curr = nextOrTop;
+                adi += fillSize;
+            }
+
+            if (curr < next)
+            {
+                const int fillSize = unitWidth * (next - curr);
+                memset(adi, refSample, fillSize * sizeof(pixel));
+                curr = next;
+                adi += fillSize;
+            }
+#endif
+        }
+
+        // pad all other reference samples.
+        while (curr < totalUnits)
+        {
+            if (!bNeighborFlags[curr]) // samples not available
+            {
+                int numSamplesInCurrUnit = (curr >= leftUnits) ? unitWidth : unitHeight;
+                const pixel refSample = *(adi - 1);
+                for (int i = 0; i < numSamplesInCurrUnit; i++)
+                    adi[i] = refSample;
+
+                adi += numSamplesInCurrUnit;
+                curr++;
+            }
+            else
+            {
+                adi += (curr >= leftUnits) ? unitWidth : unitHeight;
+                curr++;
+            }
+        }
+
+        // Copy processed samples
+        adi = adiLineBuffer + refSize + unitWidth - 2;
+        memcpy(dst, adi, refSize * sizeof(pixel));
+
+        adi = adiLineBuffer + refSize - 1;
+        for (int i = 0; i < (int)refSize - 1; i++)
+            dst[i + refSize] = adi[-(i + 1)];
+    }
+}
+
+template<bool cip>
+bool Predict::isAboveLeftAvailable(const CUData& cu, uint32_t partIdxLT)
+{
+    uint32_t partAboveLeft;
+    const CUData* cuAboveLeft = cu.getPUAboveLeft(partAboveLeft, partIdxLT);
+
+    return cuAboveLeft && (!cip || cuAboveLeft->isIntra(partAboveLeft));
+}
+
+template<bool cip>
+int Predict::isAboveAvailable(const CUData& cu, uint32_t partIdxLT, uint32_t partIdxRT, bool* bValidFlags)
+{
+    const uint32_t rasterPartBegin = g_zscanToRaster[partIdxLT];
+    const uint32_t rasterPartEnd = g_zscanToRaster[partIdxRT];
+    const uint32_t idxStep = 1;
+    int numIntra = 0;
+
+    for (uint32_t rasterPart = rasterPartBegin; rasterPart <= rasterPartEnd; rasterPart += idxStep, bValidFlags++)
+    {
+        uint32_t partAbove;
+        const CUData* cuAbove = cu.getPUAbove(partAbove, g_rasterToZscan[rasterPart]);
+        if (cuAbove && (!cip || cuAbove->isIntra(partAbove)))
+        {
+            numIntra++;
+            *bValidFlags = true;
+        }
+        else
+            *bValidFlags = false;
+    }
+
+    return numIntra;
+}
+
+template<bool cip>
+int Predict::isLeftAvailable(const CUData& cu, uint32_t partIdxLT, uint32_t partIdxLB, bool* bValidFlags)
+{
+    const uint32_t rasterPartBegin = g_zscanToRaster[partIdxLT];
+    const uint32_t rasterPartEnd = g_zscanToRaster[partIdxLB];
+    const uint32_t idxStep = cu.m_slice->m_sps->numPartInCUSize;
+    int numIntra = 0;
+
+    for (uint32_t rasterPart = rasterPartBegin; rasterPart <= rasterPartEnd; rasterPart += idxStep, bValidFlags--) // opposite direction
+    {
+        uint32_t partLeft;
+        const CUData* cuLeft = cu.getPULeft(partLeft, g_rasterToZscan[rasterPart]);
+        if (cuLeft && (!cip || cuLeft->isIntra(partLeft)))
+        {
+            numIntra++;
+            *bValidFlags = true;
+        }
+        else
+            *bValidFlags = false;
+    }
+
+    return numIntra;
+}
+
+template<bool cip>
+int Predict::isAboveRightAvailable(const CUData& cu, uint32_t partIdxRT, bool* bValidFlags, uint32_t numUnits)
+{
+    int numIntra = 0;
+
+    for (uint32_t offset = 1; offset <= numUnits; offset++, bValidFlags++)
+    {
+        uint32_t partAboveRight;
+        const CUData* cuAboveRight = cu.getPUAboveRightAdi(partAboveRight, partIdxRT, offset);
+        if (cuAboveRight && (!cip || cuAboveRight->isIntra(partAboveRight)))
+        {
+            numIntra++;
+            *bValidFlags = true;
+        }
+        else
+            *bValidFlags = false;
+    }
+
+    return numIntra;
+}
+
+template<bool cip>
+int Predict::isBelowLeftAvailable(const CUData& cu, uint32_t partIdxLB, bool* bValidFlags, uint32_t numUnits)
+{
+    int numIntra = 0;
+
+    for (uint32_t offset = 1; offset <= numUnits; offset++, bValidFlags--) // opposite direction
+    {
+        uint32_t partBelowLeft;
+        const CUData* cuBelowLeft = cu.getPUBelowLeftAdi(partBelowLeft, partIdxLB, offset);
+        if (cuBelowLeft && (!cip || cuBelowLeft->isIntra(partBelowLeft)))
+        {
+            numIntra++;
+            *bValidFlags = true;
+        }
+        else
+            *bValidFlags = false;
+    }
+
+    return numIntra;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/predict.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,124 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_PREDICT_H
+#define X265_PREDICT_H
+
+#include "common.h"
+#include "frame.h"
+#include "quant.h"
+#include "shortyuv.h"
+#include "yuv.h"
+
+namespace X265_NS {
+
+class CUData;
+class Slice;
+struct CUGeom;
+
+struct PredictionUnit
+{
+    uint32_t     ctuAddr;      // raster index of current CTU within its picture
+    uint32_t     cuAbsPartIdx; // z-order offset of current CU within its CTU
+    uint32_t     puAbsPartIdx; // z-order offset of current PU with its CU
+    int          width;
+    int          height;
+
+    PredictionUnit(const CUData& cu, const CUGeom& cuGeom, int puIdx);
+};
+
+class Predict
+{
+public:
+
+    enum { ADI_BUF_STRIDE = (2 * MAX_CU_SIZE + 1 + 15) }; // alignment to 16 bytes
+
+    /* Weighted prediction scaling values built from slice parameters (bitdepth scaled) */
+    struct WeightValues
+    {
+        int w, o, offset, shift, round;
+    };
+
+    struct IntraNeighbors
+    {
+        int      numIntraNeighbor;
+        int      totalUnits;
+        int      aboveUnits;
+        int      leftUnits;
+        int      unitWidth;
+        int      unitHeight;
+        int      log2TrSize;
+        bool     bNeighborFlags[4 * MAX_NUM_SPU_W + 1];
+    };
+
+    ShortYuv  m_predShortYuv[2]; /* temporary storage for weighted prediction */
+    int16_t*  m_immedVals;
+
+    // Unfiltered/filtered neighbours of the current partition.
+    pixel     intraNeighbourBuf[2][258];
+
+    /* Slice information */
+    int       m_csp;
+    int       m_hChromaShift;
+    int       m_vChromaShift;
+
+    Predict();
+    ~Predict();
+
+    bool allocBuffers(int csp);
+
+    // motion compensation functions
+    void predInterLumaPixel(const PredictionUnit& pu, Yuv& dstYuv, const PicYuv& refPic, const MV& mv) const;
+    void predInterChromaPixel(const PredictionUnit& pu, Yuv& dstYuv, const PicYuv& refPic, const MV& mv) const;
+
+    void predInterLumaShort(const PredictionUnit& pu, ShortYuv& dstSYuv, const PicYuv& refPic, const MV& mv) const;
+    void predInterChromaShort(const PredictionUnit& pu, ShortYuv& dstSYuv, const PicYuv& refPic, const MV& mv) const;
+
+    void addWeightBi(const PredictionUnit& pu, Yuv& predYuv, const ShortYuv& srcYuv0, const ShortYuv& srcYuv1, const WeightValues wp0[3], const WeightValues wp1[3], bool bLuma, bool bChroma) const;
+    void addWeightUni(const PredictionUnit& pu, Yuv& predYuv, const ShortYuv& srcYuv, const WeightValues wp[3], bool bLuma, bool bChroma) const;
+
+    void motionCompensation(const CUData& cu, const PredictionUnit& pu, Yuv& predYuv, bool bLuma, bool bChroma);
+
+    /* Angular Intra */
+    void predIntraLumaAng(uint32_t dirMode, pixel* pred, intptr_t stride, uint32_t log2TrSize);
+    void predIntraChromaAng(uint32_t dirMode, pixel* pred, intptr_t stride, uint32_t log2TrSizeC);
+    void initAdiPattern(const CUData& cu, const CUGeom& cuGeom, uint32_t puAbsPartIdx, const IntraNeighbors& intraNeighbors, int dirMode);
+    void initAdiPatternChroma(const CUData& cu, const CUGeom& cuGeom, uint32_t puAbsPartIdx, const IntraNeighbors& intraNeighbors, uint32_t chromaId);
+
+    /* Intra prediction helper functions */
+    static void initIntraNeighbors(const CUData& cu, uint32_t absPartIdx, uint32_t tuDepth, bool isLuma, IntraNeighbors *IntraNeighbors);
+    static void fillReferenceSamples(const pixel* adiOrigin, intptr_t picStride, const IntraNeighbors& intraNeighbors, pixel dst[258]);
+    template<bool cip>
+    static bool isAboveLeftAvailable(const CUData& cu, uint32_t partIdxLT);
+    template<bool cip>
+    static int  isAboveAvailable(const CUData& cu, uint32_t partIdxLT, uint32_t partIdxRT, bool* bValidFlags);
+    template<bool cip>
+    static int  isLeftAvailable(const CUData& cu, uint32_t partIdxLT, uint32_t partIdxLB, bool* bValidFlags);
+    template<bool cip>
+    static int  isAboveRightAvailable(const CUData& cu, uint32_t partIdxRT, bool* bValidFlags, uint32_t numUnits);
+    template<bool cip>
+    static int  isBelowLeftAvailable(const CUData& cu, uint32_t partIdxLB, bool* bValidFlags, uint32_t numUnits);
+};
+}
+
+#endif // ifndef X265_PREDICT_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/primitives.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,264 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+extern const uint8_t lumaPartitionMapTable[] =
+{
+//  4          8          12          16          20  24          28  32          36  40  44  48          52  56  60  64
+    LUMA_4x4,  LUMA_4x8,  255,        LUMA_4x16,  255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 4
+    LUMA_8x4,  LUMA_8x8,  255,        LUMA_8x16,  255, 255,        255, LUMA_8x32,  255, 255, 255, 255,        255, 255, 255, 255,        // 8
+    255,        255,      255,        LUMA_12x16, 255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 12
+    LUMA_16x4, LUMA_16x8, LUMA_16x12, LUMA_16x16, 255, 255,        255, LUMA_16x32, 255, 255, 255, 255,        255, 255, 255, LUMA_16x64, // 16
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 20
+    255,        255,      255,        255,        255, 255,        255, LUMA_24x32, 255, 255, 255, 255,        255, 255, 255, 255,        // 24
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 28
+    255,        LUMA_32x8, 255,       LUMA_32x16, 255, LUMA_32x24, 255, LUMA_32x32, 255, 255, 255, 255,        255, 255, 255, LUMA_32x64, // 32
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 36
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 40
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 44
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, LUMA_48x64, // 48
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 52
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 56
+    255,        255,      255,        255,        255, 255,        255, 255,        255, 255, 255, 255,        255, 255, 255, 255,        // 60
+    255,        255,      255,        LUMA_64x16, 255, 255,        255, LUMA_64x32, 255, 255, 255, LUMA_64x48, 255, 255, 255, LUMA_64x64  // 64
+};
+
+/* the "authoritative" set of encoder primitives */
+EncoderPrimitives primitives;
+
+void setupPixelPrimitives_c(EncoderPrimitives &p);
+void setupDCTPrimitives_c(EncoderPrimitives &p);
+void setupFilterPrimitives_c(EncoderPrimitives &p);
+void setupIntraPrimitives_c(EncoderPrimitives &p);
+void setupLoopFilterPrimitives_c(EncoderPrimitives &p);
+void setupSaoPrimitives_c(EncoderPrimitives &p);
+
+void setupCPrimitives(EncoderPrimitives &p)
+{
+    setupPixelPrimitives_c(p);      // pixel.cpp
+    setupDCTPrimitives_c(p);        // dct.cpp
+    setupFilterPrimitives_c(p);     // ipfilter.cpp
+    setupIntraPrimitives_c(p);      // intrapred.cpp
+    setupLoopFilterPrimitives_c(p); // loopfilter.cpp
+    setupSaoPrimitives_c(p);        // sao.cpp
+}
+
+void setupAliasPrimitives(EncoderPrimitives &p)
+{
+#if HIGH_BIT_DEPTH
+    /* at HIGH_BIT_DEPTH, pixel == short so we can alias many primitives */
+    for (int i = 0; i < NUM_CU_SIZES; i++)
+    {
+        p.cu[i].sse_pp = (pixel_sse_t)p.cu[i].sse_ss;
+
+        p.cu[i].copy_ps = (copy_ps_t)p.pu[i].copy_pp;
+        p.cu[i].copy_sp = (copy_sp_t)p.pu[i].copy_pp;
+        p.cu[i].copy_ss = (copy_ss_t)p.pu[i].copy_pp;
+
+        p.chroma[X265_CSP_I420].cu[i].copy_ps = (copy_ps_t)p.chroma[X265_CSP_I420].pu[i].copy_pp;
+        p.chroma[X265_CSP_I420].cu[i].copy_sp = (copy_sp_t)p.chroma[X265_CSP_I420].pu[i].copy_pp;
+        p.chroma[X265_CSP_I420].cu[i].copy_ss = (copy_ss_t)p.chroma[X265_CSP_I420].pu[i].copy_pp;
+
+        p.chroma[X265_CSP_I422].cu[i].copy_ps = (copy_ps_t)p.chroma[X265_CSP_I422].pu[i].copy_pp;
+        p.chroma[X265_CSP_I422].cu[i].copy_sp = (copy_sp_t)p.chroma[X265_CSP_I422].pu[i].copy_pp;
+        p.chroma[X265_CSP_I422].cu[i].copy_ss = (copy_ss_t)p.chroma[X265_CSP_I422].pu[i].copy_pp;
+    }
+#endif
+
+    /* alias chroma 4:4:4 from luma primitives (all but chroma filters) */
+
+    p.chroma[X265_CSP_I444].cu[BLOCK_4x4].sa8d = NULL;
+
+    for (int i = 0; i < NUM_PU_SIZES; i++)
+    {
+        p.chroma[X265_CSP_I444].pu[i].copy_pp = p.pu[i].copy_pp;
+        p.chroma[X265_CSP_I444].pu[i].addAvg  = p.pu[i].addAvg;
+        p.chroma[X265_CSP_I444].pu[i].satd    = p.pu[i].satd;
+        p.chroma[X265_CSP_I444].pu[i].p2s     = p.pu[i].convert_p2s;
+    }
+
+    for (int i = 0; i < NUM_CU_SIZES; i++)
+    {
+        p.chroma[X265_CSP_I444].cu[i].sa8d    = p.cu[i].sa8d;
+        p.chroma[X265_CSP_I444].cu[i].sse_pp  = p.cu[i].sse_pp;
+        p.chroma[X265_CSP_I444].cu[i].sub_ps  = p.cu[i].sub_ps;
+        p.chroma[X265_CSP_I444].cu[i].add_ps  = p.cu[i].add_ps;
+        p.chroma[X265_CSP_I444].cu[i].copy_ps = p.cu[i].copy_ps;
+        p.chroma[X265_CSP_I444].cu[i].copy_sp = p.cu[i].copy_sp;
+        p.chroma[X265_CSP_I444].cu[i].copy_ss = p.cu[i].copy_ss;
+    }
+
+    p.cu[BLOCK_4x4].sa8d = p.pu[LUMA_4x4].satd;
+
+    /* Chroma PU can often use luma satd primitives */
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd   = p.pu[LUMA_4x4].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd   = p.pu[LUMA_8x8].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = p.pu[LUMA_16x16].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = p.pu[LUMA_32x32].satd;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd   = p.pu[LUMA_8x4].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd   = p.pu[LUMA_4x8].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd  = p.pu[LUMA_16x8].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd  = p.pu[LUMA_8x16].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = p.pu[LUMA_32x16].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = p.pu[LUMA_16x32].satd;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = p.pu[LUMA_16x12].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd = p.pu[LUMA_12x16].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd  = p.pu[LUMA_16x4].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd  = p.pu[LUMA_4x16].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = p.pu[LUMA_32x24].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = p.pu[LUMA_24x32].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd  = p.pu[LUMA_32x8].satd;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd  = p.pu[LUMA_8x32].satd;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd   = p.pu[LUMA_4x8].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd  = p.pu[LUMA_8x16].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].satd = p.pu[LUMA_16x32].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].satd = p.pu[LUMA_32x64].satd;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd   = p.pu[LUMA_4x4].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd   = p.pu[LUMA_8x8].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd  = p.pu[LUMA_4x16].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = p.pu[LUMA_16x16].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd  = p.pu[LUMA_8x32].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = p.pu[LUMA_32x32].satd;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = p.pu[LUMA_16x64].satd;
+
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_8x12]  = satd4<8, 12>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd  = p.pu[LUMA_8x4].satd;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_16x24] = satd8<16, 24>;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_12x32] = satd4<12, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd = p.pu[LUMA_16x8].satd;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_4x32]  = satd4<4, 32>;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_32x48] = satd8<32, 48>;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_24x64] = satd8<24, 64>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = p.pu[LUMA_32x16].satd;
+    //p.chroma[X265_CSP_I422].satd[CHROMA_422_8x64]  = satd8<8, 64>;
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_2x2].sa8d = NULL;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sa8d = p.pu[LUMA_4x4].satd;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = p.cu[BLOCK_8x8].sa8d;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = p.cu[BLOCK_16x16].sa8d;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = p.cu[BLOCK_32x32].sa8d;
+
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_2x4].sa8d = NULL;
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sa8d = p.pu[LUMA_4x8].satd;
+
+    /* alias CU copy_pp from square PU copy_pp */
+    for (int i = 0; i < NUM_CU_SIZES; i++)
+    {
+        p.cu[i].copy_pp = p.pu[i].copy_pp;
+
+        for (int c = 0; c < X265_CSP_COUNT; c++)
+            p.chroma[c].cu[i].copy_pp = p.chroma[c].pu[i].copy_pp;
+    }
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_2x2].sse_pp = NULL;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sse_pp = p.cu[BLOCK_4x4].sse_pp;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sse_pp = p.cu[BLOCK_8x8].sse_pp;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = p.cu[BLOCK_16x16].sse_pp;
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = p.cu[BLOCK_32x32].sse_pp;
+
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_2x4].sse_pp = NULL;
+}
+
+void x265_report_simd(x265_param* param)
+{
+    if (param->logLevel >= X265_LOG_INFO)
+    {
+        int cpuid = param->cpuid;
+
+        char buf[1000];
+        char *p = buf + sprintf(buf, "using cpu capabilities:");
+        char *none = p;
+        for (int i = 0; X265_NS::cpu_names[i].flags; i++)
+        {
+            if (!strcmp(X265_NS::cpu_names[i].name, "SSE")
+                && (cpuid & X265_CPU_SSE2))
+                continue;
+            if (!strcmp(X265_NS::cpu_names[i].name, "SSE2")
+                && (cpuid & (X265_CPU_SSE2_IS_FAST | X265_CPU_SSE2_IS_SLOW)))
+                continue;
+            if (!strcmp(X265_NS::cpu_names[i].name, "SSE3")
+                && (cpuid & X265_CPU_SSSE3 || !(cpuid & X265_CPU_CACHELINE_64)))
+                continue;
+            if (!strcmp(X265_NS::cpu_names[i].name, "SSE4.1")
+                && (cpuid & X265_CPU_SSE42))
+                continue;
+            if (!strcmp(X265_NS::cpu_names[i].name, "BMI1")
+                && (cpuid & X265_CPU_BMI2))
+                continue;
+            if ((cpuid & X265_NS::cpu_names[i].flags) == X265_NS::cpu_names[i].flags
+                && (!i || X265_NS::cpu_names[i].flags != X265_NS::cpu_names[i - 1].flags))
+                p += sprintf(p, " %s", X265_NS::cpu_names[i].name);
+        }
+
+        if (p == none)
+            sprintf(p, " none!");
+        x265_log(param, X265_LOG_INFO, "%s\n", buf);
+    }
+}
+
+void x265_setup_primitives(x265_param *param)
+{
+    if (!primitives.pu[0].sad)
+    {
+        setupCPrimitives(primitives);
+
+        /* We do not want the encoder to use the un-optimized intra all-angles
+         * C references. It is better to call the individual angle functions
+         * instead. We must check for NULL before using this primitive */
+        for (int i = 0; i < NUM_TR_SIZE; i++)
+            primitives.cu[i].intra_pred_allangs = NULL;
+
+#if ENABLE_ASSEMBLY
+        setupInstrinsicPrimitives(primitives, param->cpuid);
+        setupAssemblyPrimitives(primitives, param->cpuid);
+#endif
+
+        setupAliasPrimitives(primitives);
+    }
+
+    x265_report_simd(param);
+}
+}
+
+#if ENABLE_ASSEMBLY
+/* these functions are implemented in assembly. When assembly is not being
+ * compiled, they are unnecessary and can be NOPs */
+#else
+extern "C" {
+int PFX(cpu_cpuid_test)(void) { return 0; }
+void PFX(cpu_emms)(void) {}
+void PFX(cpu_cpuid)(uint32_t, uint32_t *eax, uint32_t *, uint32_t *, uint32_t *) { *eax = 0; }
+void PFX(cpu_xgetbv)(uint32_t, uint32_t *, uint32_t *) {}
+void PFX(cpu_neon_test)(void) {}
+int PFX(cpu_fast_neon_mrc_test)(void) { return 0; }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/primitives.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,414 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_PRIMITIVES_H
+#define X265_PRIMITIVES_H
+
+#include "common.h"
+#include "cpu.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+enum LumaPU
+{
+    // Square (the first 5 PUs match the block sizes)
+    LUMA_4x4,   LUMA_8x8,   LUMA_16x16, LUMA_32x32, LUMA_64x64,
+    // Rectangular
+    LUMA_8x4,   LUMA_4x8,
+    LUMA_16x8,  LUMA_8x16,
+    LUMA_32x16, LUMA_16x32,
+    LUMA_64x32, LUMA_32x64,
+    // Asymmetrical (0.75, 0.25)
+    LUMA_16x12, LUMA_12x16, LUMA_16x4,  LUMA_4x16,
+    LUMA_32x24, LUMA_24x32, LUMA_32x8,  LUMA_8x32,
+    LUMA_64x48, LUMA_48x64, LUMA_64x16, LUMA_16x64,
+    NUM_PU_SIZES
+};
+
+enum LumaCU // can be indexed using log2n(width)-2
+{
+    BLOCK_4x4,
+    BLOCK_8x8,
+    BLOCK_16x16,
+    BLOCK_32x32,
+    BLOCK_64x64,
+    NUM_CU_SIZES
+};
+
+enum { NUM_TR_SIZE = 4 }; // TU are 4x4, 8x8, 16x16, and 32x32
+
+
+/* Chroma partition sizes. These enums are only a convenience for indexing into
+ * the chroma primitive arrays when instantiating macros or templates. The
+ * chroma function tables should always be indexed by a LumaPU enum when used. */
+enum ChromaPU420
+{
+    CHROMA_420_2x2,   CHROMA_420_4x4,   CHROMA_420_8x8,  CHROMA_420_16x16, CHROMA_420_32x32,
+    CHROMA_420_4x2,   CHROMA_420_2x4,
+    CHROMA_420_8x4,   CHROMA_420_4x8,
+    CHROMA_420_16x8,  CHROMA_420_8x16,
+    CHROMA_420_32x16, CHROMA_420_16x32,
+    CHROMA_420_8x6,   CHROMA_420_6x8,   CHROMA_420_8x2,  CHROMA_420_2x8,
+    CHROMA_420_16x12, CHROMA_420_12x16, CHROMA_420_16x4, CHROMA_420_4x16,
+    CHROMA_420_32x24, CHROMA_420_24x32, CHROMA_420_32x8, CHROMA_420_8x32,
+};
+
+enum ChromaCU420
+{
+    BLOCK_420_2x2,
+    BLOCK_420_4x4,
+    BLOCK_420_8x8,
+    BLOCK_420_16x16,
+    BLOCK_420_32x32
+};
+
+enum ChromaPU422
+{
+    CHROMA_422_2x4,   CHROMA_422_4x8,   CHROMA_422_8x16,  CHROMA_422_16x32, CHROMA_422_32x64,
+    CHROMA_422_4x4,   CHROMA_422_2x8,
+    CHROMA_422_8x8,   CHROMA_422_4x16,
+    CHROMA_422_16x16, CHROMA_422_8x32,
+    CHROMA_422_32x32, CHROMA_422_16x64,
+    CHROMA_422_8x12,  CHROMA_422_6x16,  CHROMA_422_8x4,   CHROMA_422_2x16,
+    CHROMA_422_16x24, CHROMA_422_12x32, CHROMA_422_16x8,  CHROMA_422_4x32,
+    CHROMA_422_32x48, CHROMA_422_24x64, CHROMA_422_32x16, CHROMA_422_8x64,
+};
+
+enum ChromaCU422
+{
+    BLOCK_422_2x4,
+    BLOCK_422_4x8,
+    BLOCK_422_8x16,
+    BLOCK_422_16x32,
+    BLOCK_422_32x64
+};
+
+typedef int  (*pixelcmp_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
+typedef int  (*pixelcmp_ss_t)(const int16_t* fenc, intptr_t fencstride, const int16_t* fref, intptr_t frefstride);
+typedef sse_ret_t (*pixel_sse_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
+typedef sse_ret_t (*pixel_sse_ss_t)(const int16_t* fenc, intptr_t fencstride, const int16_t* fref, intptr_t frefstride);
+typedef int  (*pixel_ssd_s_t)(const int16_t* fenc, intptr_t fencstride);
+typedef void (*pixelcmp_x4_t)(const pixel* fenc, const pixel* fref0, const pixel* fref1, const pixel* fref2, const pixel* fref3, intptr_t frefstride, int32_t* res);
+typedef void (*pixelcmp_x3_t)(const pixel* fenc, const pixel* fref0, const pixel* fref1, const pixel* fref2, intptr_t frefstride, int32_t* res);
+typedef void (*blockfill_s_t)(int16_t* dst, intptr_t dstride, int16_t val);
+
+typedef void (*intra_pred_t)(pixel* dst, intptr_t dstStride, const pixel *srcPix, int dirMode, int bFilter);
+typedef void (*intra_allangs_t)(pixel *dst, pixel *refPix, pixel *filtPix, int bLuma);
+typedef void (*intra_filter_t)(const pixel* references, pixel* filtered);
+
+typedef void (*cpy2Dto1D_shl_t)(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+typedef void (*cpy2Dto1D_shr_t)(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+typedef void (*cpy1Dto2D_shl_t)(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift);
+typedef void (*cpy1Dto2D_shr_t)(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift);
+typedef uint32_t (*copy_cnt_t)(int16_t* coeff, const int16_t* residual, intptr_t resiStride);
+
+typedef void (*dct_t)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+typedef void (*idct_t)(const int16_t* src, int16_t* dst, intptr_t dstStride);
+typedef void (*denoiseDct_t)(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff);
+
+typedef void (*calcresidual_t)(const pixel* fenc, const pixel* pred, int16_t* residual, intptr_t stride);
+typedef void (*transpose_t)(pixel* dst, const pixel* src, intptr_t stride);
+typedef uint32_t (*quant_t)(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff);
+typedef uint32_t (*nquant_t)(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff);
+typedef void (*dequant_scaling_t)(const int16_t* src, const int32_t* dequantCoef, int16_t* dst, int num, int mcqp_miper, int shift);
+typedef void (*dequant_normal_t)(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift);
+typedef int(*count_nonzero_t)(const int16_t* quantCoeff);
+typedef void (*weightp_pp_t)(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset);
+typedef void (*weightp_sp_t)(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset);
+typedef void (*scale1D_t)(pixel* dst, const pixel* src);
+typedef void (*scale2D_t)(pixel* dst, const pixel* src, intptr_t stride);
+typedef void (*downscale_t)(const pixel* src0, pixel* dstf, pixel* dsth, pixel* dstv, pixel* dstc,
+                            intptr_t src_stride, intptr_t dst_stride, int width, int height);
+typedef void (*extendCURowBorder_t)(pixel* txt, intptr_t stride, int width, int height, int marginX);
+typedef void (*ssim_4x4x2_core_t)(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4]);
+typedef float (*ssim_end4_t)(int sum0[5][4], int sum1[5][4], int width);
+typedef uint64_t (*var_t)(const pixel* pix, intptr_t stride);
+typedef void (*plane_copy_deinterleave_t)(pixel* dstu, intptr_t dstuStride, pixel* dstv, intptr_t dstvStride, const pixel* src, intptr_t srcStride, int w, int h);
+
+typedef void (*filter_pp_t) (const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);
+typedef void (*filter_hps_t) (const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt);
+typedef void (*filter_ps_t) (const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx);
+typedef void (*filter_sp_t) (const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);
+typedef void (*filter_ss_t) (const int16_t* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx);
+typedef void (*filter_hv_pp_t) (const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int idxX, int idxY);
+typedef void (*filter_p2s_t)(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
+
+typedef void (*copy_pp_t)(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride); // dst is aligned
+typedef void (*copy_sp_t)(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+typedef void (*copy_ps_t)(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+typedef void (*copy_ss_t)(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+
+typedef void (*pixel_sub_ps_t)(int16_t* dst, intptr_t dstride, const pixel* src0, const pixel* src1, intptr_t sstride0, intptr_t sstride1);
+typedef void (*pixel_add_ps_t)(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
+typedef void (*pixelavg_pp_t)(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int weight);
+typedef void (*addAvg_t)(const int16_t* src0, const int16_t* src1, pixel* dst, intptr_t src0Stride, intptr_t src1Stride, intptr_t dstStride);
+
+typedef void (*saoCuOrgE0_t)(pixel* rec, int8_t* offsetEo, int width, int8_t* signLeft, intptr_t stride);
+typedef void (*saoCuOrgE1_t)(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width);
+typedef void (*saoCuOrgE2_t)(pixel* rec, int8_t* pBufft, int8_t* pBuff1, int8_t* offsetEo, int lcuWidth, intptr_t stride);
+typedef void (*saoCuOrgE3_t)(pixel* rec, int8_t* upBuff1, int8_t* m_offsetEo, intptr_t stride, int startX, int endX);
+typedef void (*saoCuOrgB0_t)(pixel* rec, const int8_t* offsetBo, int ctuWidth, int ctuHeight, intptr_t stride);
+
+typedef void (*saoCuStatsBO_t)(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count);
+typedef void (*saoCuStatsE0_t)(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count);
+typedef void (*saoCuStatsE1_t)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count);
+typedef void (*saoCuStatsE2_t)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int8_t *upBuff, int endX, int endY, int32_t *stats, int32_t *count);
+typedef void (*saoCuStatsE3_t)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count);
+
+typedef void (*sign_t)(int8_t *dst, const pixel *src1, const pixel *src2, const int endX);
+typedef void (*planecopy_cp_t) (const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift);
+typedef void (*planecopy_sp_t) (const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
+typedef pixel (*planeClipAndMax_t)(pixel *src, intptr_t stride, int width, int height, uint64_t *outsum, const pixel minPix, const pixel maxPix);
+
+typedef void (*cutree_propagate_cost) (int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts, const int32_t* invQscales, const double* fpsFactor, int len);
+
+typedef int (*scanPosLast_t)(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* scanCG4x4, const int trSize);
+typedef uint32_t (*findPosFirstLast_t)(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16]);
+
+typedef uint32_t (*costCoeffNxN_t)(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase);
+typedef uint32_t (*costCoeffRemain_t)(uint16_t *absCoeff, int numNonZero, int idx);
+typedef uint32_t (*costC1C2Flag_t)(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset);
+
+/* Function pointers to optimized encoder primitives. Each pointer can reference
+ * either an assembly routine, a SIMD intrinsic primitive, or a C function */
+struct EncoderPrimitives
+{
+    /* These primitives can be used for any sized prediction unit (from 4x4 to
+     * 64x64, square, rectangular - 50/50 or asymmetrical - 25/75) and are
+     * generally restricted to motion estimation and motion compensation (inter
+     * prediction. Note that the 4x4 PU can only be used for intra, which is
+     * really a 4x4 TU, so at most copy_pp and satd will use 4x4. This array is
+     * indexed by LumaPU values, which can be retrieved by partitionFromSizes() */
+    struct PU
+    {
+        pixelcmp_t     sad;         // Sum of Absolute Differences
+        pixelcmp_x3_t  sad_x3;      // Sum of Absolute Differences, 3 mv offsets at once
+        pixelcmp_x4_t  sad_x4;      // Sum of Absolute Differences, 4 mv offsets at once
+        pixelcmp_t     satd;        // Sum of Absolute Transformed Differences (4x4 Hadamard)
+
+        filter_pp_t    luma_hpp;    // 8-tap luma motion compensation interpolation filters
+        filter_hps_t   luma_hps;
+        filter_pp_t    luma_vpp;
+        filter_ps_t    luma_vps;
+        filter_sp_t    luma_vsp;
+        filter_ss_t    luma_vss;
+        filter_hv_pp_t luma_hvpp;   // combines hps + vsp
+
+        pixelavg_pp_t  pixelavg_pp; // quick bidir using pixels (borrowed from x264)
+        addAvg_t       addAvg;      // bidir motion compensation, uses 16bit values
+
+        copy_pp_t      copy_pp;
+        filter_p2s_t   convert_p2s;
+    }
+    pu[NUM_PU_SIZES];
+
+    /* These primitives can be used for square TU blocks (4x4 to 32x32) or
+     * possibly square CU blocks (8x8 to 64x64). Some primitives are used for
+     * both CU and TU so we merge them into one array that is indexed uniformly.
+     * This keeps the index logic uniform and simple and improves cache
+     * coherency. CU only primitives will leave 4x4 pointers NULL while TU only
+     * primitives will leave 64x64 pointers NULL.  Indexed by LumaCU */
+    struct CU
+    {
+        dct_t           dct;
+        idct_t          idct;
+        calcresidual_t  calcresidual;
+        pixel_sub_ps_t  sub_ps;
+        pixel_add_ps_t  add_ps;
+        blockfill_s_t   blockfill_s;   // block fill, for DC transforms
+        copy_cnt_t      copy_cnt;      // copy coeff while counting non-zero
+        count_nonzero_t count_nonzero;
+        cpy2Dto1D_shl_t cpy2Dto1D_shl;
+        cpy2Dto1D_shr_t cpy2Dto1D_shr;
+        cpy1Dto2D_shl_t cpy1Dto2D_shl;
+        cpy1Dto2D_shr_t cpy1Dto2D_shr;
+
+        copy_sp_t       copy_sp;
+        copy_ps_t       copy_ps;
+        copy_ss_t       copy_ss;
+        copy_pp_t       copy_pp;       // alias to pu[].copy_pp
+
+        var_t           var;           // block internal variance
+
+        pixel_sse_t     sse_pp;        // Sum of Square Error (pixel, pixel) fenc alignment not assumed
+        pixel_sse_ss_t  sse_ss;        // Sum of Square Error (short, short) fenc alignment not assumed
+        pixelcmp_t      psy_cost_pp;   // difference in AC energy between two pixel blocks
+        pixelcmp_ss_t   psy_cost_ss;   // difference in AC energy between two signed residual blocks
+        pixel_ssd_s_t   ssd_s;         // Sum of Square Error (residual coeff to self)
+        pixelcmp_t      sa8d;          // Sum of Transformed Differences (8x8 Hadamard), uses satd for 4x4 intra TU
+
+        transpose_t     transpose;     // transpose pixel block; for use with intra all-angs
+        intra_allangs_t intra_pred_allangs;
+        intra_filter_t  intra_filter;
+        intra_pred_t    intra_pred[NUM_INTRA_MODE];
+    }
+    cu[NUM_CU_SIZES];
+
+    /* These remaining primitives work on either fixed block sizes or take
+     * block dimensions as arguments and thus do not belong in either the PU or
+     * the CU arrays */
+    dct_t                 dst4x4;
+    idct_t                idst4x4;
+
+    quant_t               quant;
+    nquant_t              nquant;
+    dequant_scaling_t     dequant_scaling;
+    dequant_normal_t      dequant_normal;
+    denoiseDct_t          denoiseDct;
+    scale1D_t             scale1D_128to64;
+    scale2D_t             scale2D_64to32;
+
+    ssim_4x4x2_core_t     ssim_4x4x2_core;
+    ssim_end4_t           ssim_end_4;
+
+    sign_t                sign;
+    saoCuOrgE0_t          saoCuOrgE0;
+
+    /* To avoid the overhead in avx2 optimization in handling width=16, SAO_E0_1 is split
+     * into two parts: saoCuOrgE1, saoCuOrgE1_2Rows */
+    saoCuOrgE1_t          saoCuOrgE1, saoCuOrgE1_2Rows;
+
+    // saoCuOrgE2[0] is used for width<=16 and saoCuOrgE2[1] is used for width > 16.
+    saoCuOrgE2_t          saoCuOrgE2[2];
+
+    /* In avx2 optimization, two rows cannot be handled simultaneously since it requires 
+     * a pixel from the previous row. So, saoCuOrgE3[0] is used for width<=16 and 
+     * saoCuOrgE3[1] is used for width > 16. */
+    saoCuOrgE3_t          saoCuOrgE3[2];
+    saoCuOrgB0_t          saoCuOrgB0;
+
+    saoCuStatsBO_t        saoCuStatsBO;
+    saoCuStatsE0_t        saoCuStatsE0;
+    saoCuStatsE1_t        saoCuStatsE1;
+    saoCuStatsE2_t        saoCuStatsE2;
+    saoCuStatsE3_t        saoCuStatsE3;
+
+    downscale_t           frameInitLowres;
+    cutree_propagate_cost propagateCost;
+
+    extendCURowBorder_t   extendRowBorder;
+    planecopy_cp_t        planecopy_cp;
+    planecopy_sp_t        planecopy_sp;
+    planecopy_sp_t        planecopy_sp_shl;
+    planeClipAndMax_t     planeClipAndMax;
+
+    weightp_sp_t          weight_sp;
+    weightp_pp_t          weight_pp;
+
+
+    scanPosLast_t         scanPosLast;
+    findPosFirstLast_t    findPosFirstLast;
+
+    costCoeffNxN_t        costCoeffNxN;
+    costCoeffRemain_t     costCoeffRemain;
+    costC1C2Flag_t        costC1C2Flag;
+
+
+    /* There is one set of chroma primitives per color space. An encoder will
+     * have just a single color space and thus it will only ever use one entry
+     * in this array. However we always fill all entries in the array in case
+     * multiple encoders with different color spaces share the primitive table
+     * in a single process. Note that 4:2:0 PU and CU are 1/2 width and 1/2
+     * height of their luma counterparts. 4:2:2 PU and CU are 1/2 width and full
+     * height, while 4:4:4 directly uses the luma block sizes and shares luma
+     * primitives for all cases except for the interpolation filters. 4:4:4
+     * interpolation filters have luma partition sizes but are only 4-tap. */
+    struct Chroma
+    {
+        /* Chroma prediction unit primitives. Indexed by LumaPU */
+        struct PUChroma
+        {
+            pixelcmp_t   satd;      // if chroma PU is not multiple of 4x4, will be NULL
+            filter_pp_t  filter_vpp;
+            filter_ps_t  filter_vps;
+            filter_sp_t  filter_vsp;
+            filter_ss_t  filter_vss;
+            filter_pp_t  filter_hpp;
+            filter_hps_t filter_hps;
+            addAvg_t     addAvg;
+            copy_pp_t    copy_pp;
+            filter_p2s_t p2s;
+
+        }
+        pu[NUM_PU_SIZES];
+
+        /* Chroma transform and coding unit primitives. Indexed by LumaCU */
+        struct CUChroma
+        {
+            pixelcmp_t     sa8d;    // if chroma CU is not multiple of 8x8, will use satd
+            pixel_sse_t    sse_pp;
+            pixel_sub_ps_t sub_ps;
+            pixel_add_ps_t add_ps;
+
+            copy_ps_t      copy_ps;
+            copy_sp_t      copy_sp;
+            copy_ss_t      copy_ss;
+            copy_pp_t      copy_pp;
+        }
+        cu[NUM_CU_SIZES];
+
+    }
+    chroma[X265_CSP_COUNT];
+};
+
+/* This copy of the table is what gets used by the encoder */
+extern EncoderPrimitives primitives;
+
+/* Returns a LumaPU enum for the given size, always expected to return a valid enum */
+inline int partitionFromSizes(int width, int height)
+{
+    X265_CHECK(((width | height) & ~(4 | 8 | 16 | 32 | 64)) == 0, "Invalid block width/height\n");
+    extern const uint8_t lumaPartitionMapTable[];
+    int w = (width >> 2) - 1;
+    int h = (height >> 2) - 1;
+    int part = (int)lumaPartitionMapTable[(w << 4) + h];
+    X265_CHECK(part != 255, "Invalid block width %d height %d\n", width, height);
+    return part;
+}
+
+inline int partitionFromLog2Size(int log2Size)
+{
+    X265_CHECK(2 <= log2Size && log2Size <= 6, "Invalid block size\n");
+    return log2Size - 2;
+}
+
+void setupCPrimitives(EncoderPrimitives &p);
+void setupInstrinsicPrimitives(EncoderPrimitives &p, int cpuMask);
+void setupAssemblyPrimitives(EncoderPrimitives &p, int cpuMask);
+void setupAliasPrimitives(EncoderPrimitives &p);
+}
+
+#if !EXPORT_C_API
+extern const int   PFX(max_bit_depth);
+extern const char* PFX(version_str);
+extern const char* PFX(build_info_str);
+#endif
+
+#endif // ifndef X265_PRIMITIVES_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/quant.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1431 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "quant.h"
+#include "framedata.h"
+#include "entropy.h"
+#include "yuv.h"
+#include "cudata.h"
+#include "contexts.h"
+
+using namespace X265_NS;
+
+#define SIGN(x,y) ((x^(y >> 31))-(y >> 31))
+
+namespace {
+
+struct coeffGroupRDStats
+{
+    int     nnzBeforePos0;     /* indicates coeff other than pos 0 are coded */
+    int64_t codedLevelAndDist; /* distortion and level cost of coded coefficients */
+    int64_t uncodedDist;       /* uncoded distortion cost of coded coefficients */
+    int64_t sigCost;           /* cost of signaling significant coeff bitmap */
+    int64_t sigCost0;          /* cost of signaling sig coeff bit of coeff 0 */
+};
+
+inline int fastMin(int x, int y)
+{
+    return y + ((x - y) & ((x - y) >> (sizeof(int) * CHAR_BIT - 1))); // min(x, y)
+}
+
+inline int getICRate(uint32_t absLevel, int32_t diffLevel, const int* greaterOneBits, const int* levelAbsBits, const uint32_t absGoRice, const uint32_t maxVlc, uint32_t c1c2Idx)
+{
+    X265_CHECK(c1c2Idx <= 3, "c1c2Idx check failure\n");
+    X265_CHECK(absGoRice <= 4, "absGoRice check failure\n");
+    if (!absLevel)
+    {
+        X265_CHECK(diffLevel < 0, "diffLevel check failure\n");
+        return 0;
+    }
+    int rate = 0;
+
+    if (diffLevel < 0)
+    {
+        X265_CHECK(absLevel <= 2, "absLevel check failure\n");
+        rate += greaterOneBits[(absLevel == 2)];
+
+        if (absLevel == 2)
+            rate += levelAbsBits[0];
+    }
+    else
+    {
+        uint32_t symbol = diffLevel;
+        bool expGolomb = (symbol > maxVlc);
+
+        if (expGolomb)
+        {
+            absLevel = symbol - maxVlc;
+
+            // NOTE: mapping to x86 hardware instruction BSR
+            unsigned long size;
+            CLZ(size, absLevel);
+            int egs = size * 2 + 1;
+
+            rate += egs << 15;
+
+            // NOTE: in here, expGolomb=true means (symbol >= maxVlc + 1)
+            X265_CHECK(fastMin(symbol, (maxVlc + 1)) == (int)maxVlc + 1, "min check failure\n");
+            symbol = maxVlc + 1;
+        }
+
+        uint32_t prefLen = (symbol >> absGoRice) + 1;
+        uint32_t numBins = fastMin(prefLen + absGoRice, 8 /* g_goRicePrefixLen[absGoRice] + absGoRice */);
+
+        rate += numBins << 15;
+
+        if (c1c2Idx & 1)
+            rate += greaterOneBits[1];
+
+        if (c1c2Idx == 3)
+            rate += levelAbsBits[1];
+    }
+    return rate;
+}
+
+#if CHECKED_BUILD || _DEBUG
+inline int getICRateNegDiff(uint32_t absLevel, const int* greaterOneBits, const int* levelAbsBits)
+{
+    X265_CHECK(absLevel <= 2, "absLevel check failure\n");
+
+    int rate;
+    if (absLevel == 0)
+        rate = 0;
+    else if (absLevel == 2)
+        rate = greaterOneBits[1] + levelAbsBits[0];
+    else
+        rate = greaterOneBits[0];
+    return rate;
+}
+#endif
+
+inline int getICRateLessVlc(uint32_t absLevel, int32_t diffLevel, const uint32_t absGoRice)
+{
+    X265_CHECK(absGoRice <= 4, "absGoRice check failure\n");
+    if (!absLevel)
+    {
+        X265_CHECK(diffLevel < 0, "diffLevel check failure\n");
+        return 0;
+    }
+    int rate;
+
+    uint32_t symbol = diffLevel;
+    uint32_t prefLen = (symbol >> absGoRice) + 1;
+    uint32_t numBins = fastMin(prefLen + absGoRice, 8 /* g_goRicePrefixLen[absGoRice] + absGoRice */);
+
+    rate = numBins << 15;
+
+    return rate;
+}
+
+/* Calculates the cost for specific absolute transform level */
+inline uint32_t getICRateCost(uint32_t absLevel, int32_t diffLevel, const int* greaterOneBits, const int* levelAbsBits, uint32_t absGoRice, uint32_t c1c2Idx)
+{
+    X265_CHECK(absLevel, "absLevel should not be zero\n");
+
+    if (diffLevel < 0)
+    {
+        X265_CHECK((absLevel == 1) || (absLevel == 2), "absLevel range check failure\n");
+
+        uint32_t rate = greaterOneBits[(absLevel == 2)];
+        if (absLevel == 2)
+            rate += levelAbsBits[0];
+        return rate;
+    }
+    else
+    {
+        uint32_t rate;
+        uint32_t symbol = diffLevel;
+        if ((symbol >> absGoRice) < COEF_REMAIN_BIN_REDUCTION)
+        {
+            uint32_t length = symbol >> absGoRice;
+            rate = (length + 1 + absGoRice) << 15;
+        }
+        else
+        {
+            uint32_t length = 0;
+            symbol = (symbol >> absGoRice) - COEF_REMAIN_BIN_REDUCTION;
+            if (symbol)
+            {
+                unsigned long idx;
+                CLZ(idx, symbol + 1);
+                length = idx;
+            }
+
+            rate = (COEF_REMAIN_BIN_REDUCTION + length + absGoRice + 1 + length) << 15;
+        }
+        if (c1c2Idx & 1)
+            rate += greaterOneBits[1];
+        if (c1c2Idx == 3)
+            rate += levelAbsBits[1];
+        return rate;
+    }
+}
+
+}
+
+Quant::Quant()
+{
+    m_resiDctCoeff = NULL;
+    m_fencDctCoeff = NULL;
+    m_fencShortBuf = NULL;
+    m_frameNr      = NULL;
+    m_nr           = NULL;
+}
+
+bool Quant::init(int rdoqLevel, double psyScale, const ScalingList& scalingList, Entropy& entropy)
+{
+    m_entropyCoder = &entropy;
+    m_rdoqLevel    = rdoqLevel;
+    m_psyRdoqScale = (int32_t)(psyScale * 256.0);
+    X265_CHECK((psyScale * 256.0) < (double)MAX_INT, "psyScale value too large\n");
+    m_scalingList  = &scalingList;
+    m_resiDctCoeff = X265_MALLOC(int16_t, MAX_TR_SIZE * MAX_TR_SIZE * 2);
+    m_fencDctCoeff = m_resiDctCoeff + (MAX_TR_SIZE * MAX_TR_SIZE);
+    m_fencShortBuf = X265_MALLOC(int16_t, MAX_TR_SIZE * MAX_TR_SIZE);
+
+    return m_resiDctCoeff && m_fencShortBuf;
+}
+
+bool Quant::allocNoiseReduction(const x265_param& param)
+{
+    m_frameNr = X265_MALLOC(NoiseReduction, param.frameNumThreads);
+    if (m_frameNr)
+        memset(m_frameNr, 0, sizeof(NoiseReduction) * param.frameNumThreads);
+    else
+        return false;
+    return true;
+}
+
+Quant::~Quant()
+{
+    X265_FREE(m_frameNr);
+    X265_FREE(m_resiDctCoeff);
+    X265_FREE(m_fencShortBuf);
+}
+
+void Quant::setQPforQuant(const CUData& ctu, int qp)
+{
+    m_nr = m_frameNr ? &m_frameNr[ctu.m_encData->m_frameEncoderID] : NULL;
+    m_qpParam[TEXT_LUMA].setQpParam(qp + QP_BD_OFFSET);
+    setChromaQP(qp + ctu.m_slice->m_pps->chromaQpOffset[0], TEXT_CHROMA_U, ctu.m_chromaFormat);
+    setChromaQP(qp + ctu.m_slice->m_pps->chromaQpOffset[1], TEXT_CHROMA_V, ctu.m_chromaFormat);
+}
+
+void Quant::setChromaQP(int qpin, TextType ttype, int chFmt)
+{
+    int qp = x265_clip3(-QP_BD_OFFSET, 57, qpin);
+    if (qp >= 30)
+    {
+        if (chFmt == X265_CSP_I420)
+            qp = g_chromaScale[qp];
+        else
+            qp = X265_MIN(qp, QP_MAX_SPEC);
+    }
+    m_qpParam[ttype].setQpParam(qp + QP_BD_OFFSET);
+}
+
+/* To minimize the distortion only. No rate is considered */
+uint32_t Quant::signBitHidingHDQ(int16_t* coeff, int32_t* deltaU, uint32_t numSig, const TUEntropyCodingParameters &codeParams, uint32_t log2TrSize)
+{
+    uint32_t trSize = 1 << log2TrSize;
+    const uint16_t* scan = codeParams.scan;
+
+    uint8_t coeffNum[MLS_GRP_NUM];      // value range[0, 16]
+    uint16_t coeffSign[MLS_GRP_NUM];    // bit mask map for non-zero coeff sign
+    uint16_t coeffFlag[MLS_GRP_NUM];    // bit mask map for non-zero coeff
+
+#if CHECKED_BUILD || _DEBUG
+    // clean output buffer, the asm version of scanPosLast Never output anything after latest non-zero coeff group
+    memset(coeffNum, 0, sizeof(coeffNum));
+    memset(coeffSign, 0, sizeof(coeffNum));
+    memset(coeffFlag, 0, sizeof(coeffNum));
+#endif
+    const int lastScanPos = primitives.scanPosLast(codeParams.scan, coeff, coeffSign, coeffFlag, coeffNum, numSig, g_scan4x4[codeParams.scanType], trSize);
+    const int cgLastScanPos = (lastScanPos >> LOG2_SCAN_SET_SIZE);
+    unsigned long tmp;
+
+    // first CG need specially processing
+    const uint32_t correctOffset = 0x0F & (lastScanPos ^ 0xF);
+    coeffFlag[cgLastScanPos] <<= correctOffset;
+
+    for (int cg = cgLastScanPos; cg >= 0; cg--)
+    {
+        int cgStartPos = cg << LOG2_SCAN_SET_SIZE;
+        int n;
+
+#if CHECKED_BUILD || _DEBUG
+        for (n = SCAN_SET_SIZE - 1; n >= 0; --n)
+            if (coeff[scan[n + cgStartPos]])
+                break;
+        int lastNZPosInCG0 = n;
+#endif
+
+        if (coeffNum[cg] == 0)
+        {
+            X265_CHECK(lastNZPosInCG0 < 0, "all zero block check failure\n");
+            continue;
+        }
+
+#if CHECKED_BUILD || _DEBUG
+        for (n = 0;; n++)
+            if (coeff[scan[n + cgStartPos]])
+                break;
+
+        int firstNZPosInCG0 = n;
+#endif
+
+        CLZ(tmp, coeffFlag[cg]);
+        const int firstNZPosInCG = (15 ^ tmp);
+
+        CTZ(tmp, coeffFlag[cg]);
+        const int lastNZPosInCG = (15 ^ tmp);
+
+        X265_CHECK(firstNZPosInCG0 == firstNZPosInCG, "firstNZPosInCG0 check failure\n");
+        X265_CHECK(lastNZPosInCG0 == lastNZPosInCG, "lastNZPosInCG0 check failure\n");
+
+        if (lastNZPosInCG - firstNZPosInCG >= SBH_THRESHOLD)
+        {
+            uint32_t signbit = coeff[scan[cgStartPos + firstNZPosInCG]] > 0 ? 0 : 1;
+            uint32_t absSum = 0;
+
+            for (n = firstNZPosInCG; n <= lastNZPosInCG; n++)
+                absSum += coeff[scan[n + cgStartPos]];
+
+            if (signbit != (absSum & 0x1)) // compare signbit with sum_parity
+            {
+                int minCostInc = MAX_INT,  minPos = -1, curCost = MAX_INT;
+                int32_t finalChange = 0, curChange = 0;
+                uint32_t cgFlags = coeffFlag[cg];
+                if (cg == cgLastScanPos)
+                    cgFlags >>= correctOffset;
+
+                for (n = (cg == cgLastScanPos ? lastNZPosInCG : SCAN_SET_SIZE - 1); n >= 0; --n)
+                {
+                    uint32_t blkPos = scan[n + cgStartPos];
+                    X265_CHECK(!!coeff[blkPos] == !!(cgFlags & 1), "non zero coeff check failure\n");
+
+                    if (cgFlags & 1)
+                    {
+                        if (deltaU[blkPos] > 0)
+                        {
+                            curCost = -deltaU[blkPos];
+                            curChange = 1;
+                        }
+                        else
+                        {
+                            if ((cgFlags == 1) && (abs(coeff[blkPos]) == 1))
+                            {
+                                X265_CHECK(n == firstNZPosInCG, "firstNZPosInCG position check failure\n");
+                                curCost = MAX_INT;
+                            }
+                            else
+                            {
+                                curCost = deltaU[blkPos];
+                                curChange = -1;
+                            }
+                        }
+                    }
+                    else
+                    {
+                        if (cgFlags == 0)
+                        {
+                            X265_CHECK(n < firstNZPosInCG, "firstNZPosInCG position check failure\n");
+                            uint32_t thisSignBit = m_resiDctCoeff[blkPos] >= 0 ? 0 : 1;
+                            if (thisSignBit != signbit)
+                                curCost = MAX_INT;
+                            else
+                            {
+                                curCost = -deltaU[blkPos];
+                                curChange = 1;
+                            }
+                        }
+                        else
+                        {
+                            curCost = -deltaU[blkPos];
+                            curChange = 1;
+                        }
+                    }
+
+                    if (curCost < minCostInc)
+                    {
+                        minCostInc = curCost;
+                        finalChange = curChange;
+                        minPos = blkPos;
+                    }
+                    cgFlags>>=1;
+                }
+
+                /* do not allow change to violate coeff clamp */
+                if (coeff[minPos] == 32767 || coeff[minPos] == -32768)
+                    finalChange = -1;
+
+                if (!coeff[minPos])
+                    numSig++;
+                else if (finalChange == -1 && abs(coeff[minPos]) == 1)
+                    numSig--;
+
+                {
+                    const int16_t sigMask = ((int16_t)m_resiDctCoeff[minPos]) >> 15;
+                    coeff[minPos] += ((int16_t)finalChange ^ sigMask) - sigMask;
+                }
+            }
+        }
+    }
+
+    return numSig;
+}
+
+uint32_t Quant::transformNxN(const CUData& cu, const pixel* fenc, uint32_t fencStride, const int16_t* residual, uint32_t resiStride,
+                             coeff_t* coeff, uint32_t log2TrSize, TextType ttype, uint32_t absPartIdx, bool useTransformSkip)
+{
+    const uint32_t sizeIdx = log2TrSize - 2;
+
+    if (cu.m_tqBypass[0])
+    {
+        X265_CHECK(log2TrSize >= 2 && log2TrSize <= 5, "Block size mistake!\n");
+        return primitives.cu[sizeIdx].copy_cnt(coeff, residual, resiStride);
+    }
+
+    bool isLuma  = ttype == TEXT_LUMA;
+    bool usePsy  = m_psyRdoqScale && isLuma && !useTransformSkip;
+    int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize; // Represents scaling through forward transform
+
+    X265_CHECK((cu.m_slice->m_sps->quadtreeTULog2MaxSize >= log2TrSize), "transform size too large\n");
+    if (useTransformSkip)
+    {
+#if X265_DEPTH <= 10
+        X265_CHECK(transformShift >= 0, "invalid transformShift\n");
+        primitives.cu[sizeIdx].cpy2Dto1D_shl(m_resiDctCoeff, residual, resiStride, transformShift);
+#else
+        if (transformShift >= 0)
+            primitives.cu[sizeIdx].cpy2Dto1D_shl(m_resiDctCoeff, residual, resiStride, transformShift);
+        else
+            primitives.cu[sizeIdx].cpy2Dto1D_shr(m_resiDctCoeff, residual, resiStride, -transformShift);
+#endif
+    }
+    else
+    {
+        bool isIntra = cu.isIntra(absPartIdx);
+
+        if (!sizeIdx && isLuma && isIntra)
+            primitives.dst4x4(residual, m_resiDctCoeff, resiStride);
+        else
+            primitives.cu[sizeIdx].dct(residual, m_resiDctCoeff, resiStride);
+
+        /* NOTE: if RDOQ is disabled globally, psy-rdoq is also disabled, so
+         * there is no risk of performing this DCT unnecessarily */
+        if (usePsy)
+        {
+            int trSize = 1 << log2TrSize;
+            /* perform DCT on source pixels for psy-rdoq */
+            primitives.cu[sizeIdx].copy_ps(m_fencShortBuf, trSize, fenc, fencStride);
+            primitives.cu[sizeIdx].dct(m_fencShortBuf, m_fencDctCoeff, trSize);
+        }
+
+        if (m_nr && m_nr->offset)
+        {
+            /* denoise is not applied to intra residual, so DST can be ignored */
+            int cat = sizeIdx + 4 * !isLuma + 8 * !isIntra;
+            int numCoeff = 1 << (log2TrSize * 2);
+            primitives.denoiseDct(m_resiDctCoeff, m_nr->residualSum[cat], m_nr->offset[cat], numCoeff);
+            m_nr->count[cat]++;
+        }
+    }
+
+    if (m_rdoqLevel)
+        return rdoQuant(cu, coeff, log2TrSize, ttype, absPartIdx, usePsy);
+    else
+    {
+        int deltaU[32 * 32];
+
+        int scalingListType = (cu.isIntra(absPartIdx) ? 0 : 3) + ttype;
+        int rem = m_qpParam[ttype].rem;
+        int per = m_qpParam[ttype].per;
+        const int32_t* quantCoeff = m_scalingList->m_quantCoef[log2TrSize - 2][scalingListType][rem];
+
+        int qbits = QUANT_SHIFT + per + transformShift;
+        int add = (cu.m_slice->m_sliceType == I_SLICE ? 171 : 85) << (qbits - 9);
+        int numCoeff = 1 << (log2TrSize * 2);
+
+        uint32_t numSig = primitives.quant(m_resiDctCoeff, quantCoeff, deltaU, coeff, qbits, add, numCoeff);
+
+        if (numSig >= 2 && cu.m_slice->m_pps->bSignHideEnabled)
+        {
+            TUEntropyCodingParameters codeParams;
+            cu.getTUEntropyCodingParameters(codeParams, absPartIdx, log2TrSize, isLuma);
+            return signBitHidingHDQ(coeff, deltaU, numSig, codeParams, log2TrSize);
+        }
+        else
+            return numSig;
+    }
+}
+
+void Quant::invtransformNxN(const CUData& cu, int16_t* residual, uint32_t resiStride, const coeff_t* coeff,
+                            uint32_t log2TrSize, TextType ttype, bool bIntra, bool useTransformSkip, uint32_t numSig)
+{
+    const uint32_t sizeIdx = log2TrSize - 2;
+
+    if (cu.m_tqBypass[0])
+    {
+        primitives.cu[sizeIdx].cpy1Dto2D_shl(residual, coeff, resiStride, 0);
+        return;
+    }
+
+    // Values need to pass as input parameter in dequant
+    int rem = m_qpParam[ttype].rem;
+    int per = m_qpParam[ttype].per;
+    int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize;
+    int shift = QUANT_IQUANT_SHIFT - QUANT_SHIFT - transformShift;
+    int numCoeff = 1 << (log2TrSize * 2);
+
+    if (m_scalingList->m_bEnabled)
+    {
+        int scalingListType = (bIntra ? 0 : 3) + ttype;
+        const int32_t* dequantCoef = m_scalingList->m_dequantCoef[sizeIdx][scalingListType][rem];
+        primitives.dequant_scaling(coeff, dequantCoef, m_resiDctCoeff, numCoeff, per, shift);
+    }
+    else
+    {
+        int scale = m_scalingList->s_invQuantScales[rem] << per;
+        primitives.dequant_normal(coeff, m_resiDctCoeff, numCoeff, scale, shift);
+    }
+
+    if (useTransformSkip)
+    {
+#if X265_DEPTH <= 10
+        X265_CHECK(transformShift > 0, "invalid transformShift\n");
+        primitives.cu[sizeIdx].cpy1Dto2D_shr(residual, m_resiDctCoeff, resiStride, transformShift);
+#else
+        if (transformShift > 0)
+            primitives.cu[sizeIdx].cpy1Dto2D_shr(residual, m_resiDctCoeff, resiStride, transformShift);
+        else
+            primitives.cu[sizeIdx].cpy1Dto2D_shl(residual, m_resiDctCoeff, resiStride, -transformShift);
+#endif
+    }
+    else
+    {
+        int useDST = !sizeIdx && ttype == TEXT_LUMA && bIntra;
+        X265_CHECK((int)numSig == primitives.cu[log2TrSize - 2].count_nonzero(coeff), "numSig differ\n");
+        // DC only
+        if (numSig == 1 && coeff[0] != 0 && !useDST)
+        {
+            const int shift_1st = 7 - 6;
+            const int add_1st = 1 << (shift_1st - 1);
+            const int shift_2nd = 12 - (X265_DEPTH - 8) - 3;
+            const int add_2nd = 1 << (shift_2nd - 1);
+
+            int dc_val = (((m_resiDctCoeff[0] * (64 >> 6) + add_1st) >> shift_1st) * (64 >> 3) + add_2nd) >> shift_2nd;
+            primitives.cu[sizeIdx].blockfill_s(residual, resiStride, (int16_t)dc_val);
+            return;
+        }
+
+        if (useDST)
+            primitives.idst4x4(m_resiDctCoeff, residual, resiStride);
+        else
+            primitives.cu[sizeIdx].idct(m_resiDctCoeff, residual, resiStride);
+    }
+}
+
+/* Rate distortion optimized quantization for entropy coding engines using
+ * probability models like CABAC */
+uint32_t Quant::rdoQuant(const CUData& cu, int16_t* dstCoeff, uint32_t log2TrSize, TextType ttype, uint32_t absPartIdx, bool usePsy)
+{
+    int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize; /* Represents scaling through forward transform */
+    int scalingListType = (cu.isIntra(absPartIdx) ? 0 : 3) + ttype;
+    const uint32_t usePsyMask = usePsy ? -1 : 0;
+
+    X265_CHECK(scalingListType < 6, "scaling list type out of range\n");
+
+    int rem = m_qpParam[ttype].rem;
+    int per = m_qpParam[ttype].per;
+    int qbits = QUANT_SHIFT + per + transformShift; /* Right shift of non-RDOQ quantizer level = (coeff*Q + offset)>>q_bits */
+    int add = (1 << (qbits - 1));
+    const int32_t* qCoef = m_scalingList->m_quantCoef[log2TrSize - 2][scalingListType][rem];
+
+    int numCoeff = 1 << (log2TrSize * 2);
+    uint32_t numSig = primitives.nquant(m_resiDctCoeff, qCoef, dstCoeff, qbits, add, numCoeff);
+    X265_CHECK((int)numSig == primitives.cu[log2TrSize - 2].count_nonzero(dstCoeff), "numSig differ\n");
+    if (!numSig)
+        return 0;
+
+    uint32_t trSize = 1 << log2TrSize;
+    int64_t lambda2 = m_qpParam[ttype].lambda2;
+    const int64_t psyScale = ((int64_t)m_psyRdoqScale * m_qpParam[ttype].lambda);
+
+    /* unquant constants for measuring distortion. Scaling list quant coefficients have a (1 << 4)
+     * scale applied that must be removed during unquant. Note that in real dequant there is clipping
+     * at several stages. We skip the clipping for simplicity when measuring RD cost */
+    const int32_t* unquantScale = m_scalingList->m_dequantCoef[log2TrSize - 2][scalingListType][rem];
+    int unquantShift = QUANT_IQUANT_SHIFT - QUANT_SHIFT - transformShift + (m_scalingList->m_bEnabled ? 4 : 0);
+    int unquantRound = (unquantShift > per) ? 1 << (unquantShift - per - 1) : 0;
+    int scaleBits = SCALE_BITS - 2 * transformShift;
+
+#define UNQUANT(lvl)    (((lvl) * (unquantScale[blkPos] << per) + unquantRound) >> unquantShift)
+#define SIGCOST(bits)   ((lambda2 * (bits)) >> 8)
+#define RDCOST(d, bits) ((((int64_t)d * d) << scaleBits) + SIGCOST(bits))
+#define PSYVALUE(rec)   ((psyScale * (rec)) >> (2 * transformShift + 1))
+
+    int64_t costCoeff[32 * 32];   /* d*d + lambda * bits */
+    int64_t costUncoded[32 * 32]; /* d*d + lambda * 0    */
+    int64_t costSig[32 * 32];     /* lambda * bits       */
+
+    int rateIncUp[32 * 32];      /* signal overhead of increasing level */
+    int rateIncDown[32 * 32];    /* signal overhead of decreasing level */
+    int sigRateDelta[32 * 32];   /* signal difference between zero and non-zero */
+
+    int64_t costCoeffGroupSig[MLS_GRP_NUM]; /* lambda * bits of group coding cost */
+    uint64_t sigCoeffGroupFlag64 = 0;
+
+    const uint32_t cgSize = (1 << MLS_CG_SIZE); /* 4x4 num coef = 16 */
+    bool bIsLuma = ttype == TEXT_LUMA;
+
+    /* total rate distortion cost of transform block, as CBF=0 */
+    int64_t totalUncodedCost = 0;
+
+    /* Total rate distortion cost of this transform block, counting te distortion of uncoded blocks,
+     * the distortion and signal cost of coded blocks, and the coding cost of significant
+     * coefficient and coefficient group bitmaps */
+    int64_t totalRdCost = 0;
+
+    TUEntropyCodingParameters codeParams;
+    cu.getTUEntropyCodingParameters(codeParams, absPartIdx, log2TrSize, bIsLuma);
+    const uint32_t cgNum = 1 << (codeParams.log2TrSizeCG * 2);
+    const uint32_t cgStride = (trSize >> MLS_CG_LOG2_SIZE);
+
+    uint8_t coeffNum[MLS_GRP_NUM];      // value range[0, 16]
+    uint16_t coeffSign[MLS_GRP_NUM];    // bit mask map for non-zero coeff sign
+    uint16_t coeffFlag[MLS_GRP_NUM];    // bit mask map for non-zero coeff
+
+#if CHECKED_BUILD || _DEBUG
+    // clean output buffer, the asm version of scanPosLast Never output anything after latest non-zero coeff group
+    memset(coeffNum, 0, sizeof(coeffNum));
+    memset(coeffSign, 0, sizeof(coeffNum));
+    memset(coeffFlag, 0, sizeof(coeffNum));
+#endif
+    const int lastScanPos = primitives.scanPosLast(codeParams.scan, dstCoeff, coeffSign, coeffFlag, coeffNum, numSig, g_scan4x4[codeParams.scanType], trSize);
+    const int cgLastScanPos = (lastScanPos >> LOG2_SCAN_SET_SIZE);
+
+
+    /* TODO: update bit estimates if dirty */
+    EstBitsSbac& estBitsSbac = m_entropyCoder->m_estBitsSbac;
+
+    uint32_t scanPos = 0;
+    uint32_t c1 = 1;
+
+    // process trail all zero Coeff Group
+
+    /* coefficients after lastNZ have no distortion signal cost */
+    const int zeroCG = cgNum - 1 - cgLastScanPos;
+    memset(&costCoeff[(cgLastScanPos + 1) << MLS_CG_SIZE], 0, zeroCG * MLS_CG_BLK_SIZE * sizeof(int64_t));
+    memset(&costSig[(cgLastScanPos + 1) << MLS_CG_SIZE], 0, zeroCG * MLS_CG_BLK_SIZE * sizeof(int64_t));
+
+    /* sum zero coeff (uncodec) cost */
+
+    // TODO: does we need these cost?
+    if (usePsyMask)
+    {
+        for (int cgScanPos = cgLastScanPos + 1; cgScanPos < (int)cgNum ; cgScanPos++)
+        {
+            X265_CHECK(coeffNum[cgScanPos] == 0, "count of coeff failure\n");
+
+            uint32_t scanPosBase = (cgScanPos << MLS_CG_SIZE);
+            uint32_t blkPos      = codeParams.scan[scanPosBase];
+
+            // TODO: we can't SIMD optimize because PSYVALUE need 64-bits multiplication, convert to Double can work faster by FMA
+            for (int y = 0; y < MLS_CG_SIZE; y++)
+            {
+                for (int x = 0; x < MLS_CG_SIZE; x++)
+                {
+                    int signCoef         = m_resiDctCoeff[blkPos + x];            /* pre-quantization DCT coeff */
+                    int predictedCoef    = m_fencDctCoeff[blkPos + x] - signCoef; /* predicted DCT = source DCT - residual DCT*/
+
+                    costUncoded[blkPos + x] = ((int64_t)signCoef * signCoef) << scaleBits;
+
+                    /* when no residual coefficient is coded, predicted coef == recon coef */
+                    costUncoded[blkPos + x] -= PSYVALUE(predictedCoef);
+
+                    totalUncodedCost += costUncoded[blkPos + x];
+                    totalRdCost += costUncoded[blkPos + x];
+                }
+                blkPos += trSize;
+            }
+        }
+    }
+    else
+    {
+        // non-psy path
+        for (int cgScanPos = cgLastScanPos + 1; cgScanPos < (int)cgNum ; cgScanPos++)
+        {
+            X265_CHECK(coeffNum[cgScanPos] == 0, "count of coeff failure\n");
+
+            uint32_t scanPosBase = (cgScanPos << MLS_CG_SIZE);
+            uint32_t blkPos      = codeParams.scan[scanPosBase];
+
+            for (int y = 0; y < MLS_CG_SIZE; y++)
+            {
+                for (int x = 0; x < MLS_CG_SIZE; x++)
+                {
+                    int signCoef = m_resiDctCoeff[blkPos + x];            /* pre-quantization DCT coeff */
+                    costUncoded[blkPos + x] = ((int64_t)signCoef * signCoef) << scaleBits;
+
+                    totalUncodedCost += costUncoded[blkPos + x];
+                    totalRdCost += costUncoded[blkPos + x];
+                }
+                blkPos += trSize;
+            }
+        }
+    }
+
+    static const uint8_t table_cnt[5][SCAN_SET_SIZE] =
+    {
+        // patternSigCtx = 0
+        {
+            2, 1, 1, 0,
+            1, 1, 0, 0,
+            1, 0, 0, 0,
+            0, 0, 0, 0,
+        },
+        // patternSigCtx = 1
+        {
+            2, 2, 2, 2,
+            1, 1, 1, 1,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+        },
+        // patternSigCtx = 2
+        {
+            2, 1, 0, 0,
+            2, 1, 0, 0,
+            2, 1, 0, 0,
+            2, 1, 0, 0,
+        },
+        // patternSigCtx = 3
+        {
+            2, 2, 2, 2,
+            2, 2, 2, 2,
+            2, 2, 2, 2,
+            2, 2, 2, 2,
+        },
+        // 4x4
+        {
+            0, 1, 4, 5,
+            2, 3, 4, 5,
+            6, 6, 8, 8,
+            7, 7, 8, 8
+        }
+    };
+
+    /* iterate over coding groups in reverse scan order */
+    for (int cgScanPos = cgLastScanPos; cgScanPos >= 0; cgScanPos--)
+    {
+        uint32_t ctxSet = (cgScanPos && bIsLuma) ? 2 : 0;
+        const uint32_t cgBlkPos = codeParams.scanCG[cgScanPos];
+        const uint32_t cgPosY   = cgBlkPos >> codeParams.log2TrSizeCG;
+        const uint32_t cgPosX   = cgBlkPos - (cgPosY << codeParams.log2TrSizeCG);
+        const uint64_t cgBlkPosMask = ((uint64_t)1 << cgBlkPos);
+        const int patternSigCtx = calcPatternSigCtx(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, cgStride);
+        const int ctxSigOffset = codeParams.firstSignificanceMapContext + (cgScanPos && bIsLuma ? 3 : 0);
+
+        if (c1 == 0)
+            ctxSet++;
+        c1 = 1;
+
+        if (cgScanPos && (coeffNum[cgScanPos] == 0))
+        {
+            // TODO: does we need zero-coeff cost?
+            const uint32_t scanPosBase = (cgScanPos << MLS_CG_SIZE);
+            uint32_t blkPos = codeParams.scan[scanPosBase];
+
+            if (usePsyMask)
+            {
+                // TODO: we can't SIMD optimize because PSYVALUE need 64-bits multiplication, convert to Double can work faster by FMA
+                for (int y = 0; y < MLS_CG_SIZE; y++)
+                {
+                    for (int x = 0; x < MLS_CG_SIZE; x++)
+                    {
+                        int signCoef         = m_resiDctCoeff[blkPos + x];            /* pre-quantization DCT coeff */
+                        int predictedCoef    = m_fencDctCoeff[blkPos + x] - signCoef; /* predicted DCT = source DCT - residual DCT*/
+
+                        costUncoded[blkPos + x] = ((int64_t)signCoef * signCoef) << scaleBits;
+
+                        /* when no residual coefficient is coded, predicted coef == recon coef */
+                        costUncoded[blkPos + x] -= PSYVALUE(predictedCoef);
+
+                        totalUncodedCost += costUncoded[blkPos + x];
+                        totalRdCost += costUncoded[blkPos + x];
+
+                        const uint32_t scanPosOffset =  y * MLS_CG_SIZE + x;
+                        const uint32_t ctxSig = table_cnt[patternSigCtx][g_scan4x4[codeParams.scanType][scanPosOffset]] + ctxSigOffset;
+                        X265_CHECK(trSize > 4, "trSize check failure\n");
+                        X265_CHECK(ctxSig == getSigCtxInc(patternSigCtx, log2TrSize, trSize, codeParams.scan[scanPosBase + scanPosOffset], bIsLuma, codeParams.firstSignificanceMapContext), "sigCtx check failure\n");
+
+                        costSig[scanPosBase + scanPosOffset] = SIGCOST(estBitsSbac.significantBits[0][ctxSig]);
+                        costCoeff[scanPosBase + scanPosOffset] = costUncoded[blkPos + x];
+                        sigRateDelta[blkPos + x] = estBitsSbac.significantBits[1][ctxSig] - estBitsSbac.significantBits[0][ctxSig];
+                    }
+                    blkPos += trSize;
+                }
+            }
+            else
+            {
+                // non-psy path
+                for (int y = 0; y < MLS_CG_SIZE; y++)
+                {
+                    for (int x = 0; x < MLS_CG_SIZE; x++)
+                    {
+                        int signCoef = m_resiDctCoeff[blkPos + x];            /* pre-quantization DCT coeff */
+                        costUncoded[blkPos + x] = ((int64_t)signCoef * signCoef) << scaleBits;
+
+                        totalUncodedCost += costUncoded[blkPos + x];
+                        totalRdCost += costUncoded[blkPos + x];
+
+                        const uint32_t scanPosOffset =  y * MLS_CG_SIZE + x;
+                        const uint32_t ctxSig = table_cnt[patternSigCtx][g_scan4x4[codeParams.scanType][scanPosOffset]] + ctxSigOffset;
+                        X265_CHECK(trSize > 4, "trSize check failure\n");
+                        X265_CHECK(ctxSig == getSigCtxInc(patternSigCtx, log2TrSize, trSize, codeParams.scan[scanPosBase + scanPosOffset], bIsLuma, codeParams.firstSignificanceMapContext), "sigCtx check failure\n");
+
+                        costSig[scanPosBase + scanPosOffset] = SIGCOST(estBitsSbac.significantBits[0][ctxSig]);
+                        costCoeff[scanPosBase + scanPosOffset] = costUncoded[blkPos + x];
+                        sigRateDelta[blkPos + x] = estBitsSbac.significantBits[1][ctxSig] - estBitsSbac.significantBits[0][ctxSig];
+                    }
+                    blkPos += trSize;
+                }
+            }
+
+            /* there were no coded coefficients in this coefficient group */
+            {
+                uint32_t ctxSig = getSigCoeffGroupCtxInc(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, cgStride);
+                costCoeffGroupSig[cgScanPos] = SIGCOST(estBitsSbac.significantCoeffGroupBits[ctxSig][0]);
+                totalRdCost += costCoeffGroupSig[cgScanPos];  /* add cost of 0 bit in significant CG bitmap */
+            }
+            continue;
+        }
+
+        coeffGroupRDStats cgRdStats;
+        memset(&cgRdStats, 0, sizeof(coeffGroupRDStats));
+
+        uint32_t subFlagMask = coeffFlag[cgScanPos];
+        int    c2            = 0;
+        uint32_t goRiceParam = 0;
+        uint32_t c1Idx       = 0;
+        uint32_t c2Idx       = 0;
+        /* iterate over coefficients in each group in reverse scan order */
+        for (int scanPosinCG = cgSize - 1; scanPosinCG >= 0; scanPosinCG--)
+        {
+            scanPos              = (cgScanPos << MLS_CG_SIZE) + scanPosinCG;
+            uint32_t blkPos      = codeParams.scan[scanPos];
+            uint32_t maxAbsLevel = abs(dstCoeff[blkPos]);             /* abs(quantized coeff) */
+            int signCoef         = m_resiDctCoeff[blkPos];            /* pre-quantization DCT coeff */
+            int predictedCoef    = m_fencDctCoeff[blkPos] - signCoef; /* predicted DCT = source DCT - residual DCT*/
+
+            /* RDOQ measures distortion as the squared difference between the unquantized coded level
+             * and the original DCT coefficient. The result is shifted scaleBits to account for the
+             * FIX15 nature of the CABAC cost tables minus the forward transform scale */
+
+            /* cost of not coding this coefficient (all distortion, no signal bits) */
+            costUncoded[blkPos] = ((int64_t)signCoef * signCoef) << scaleBits;
+            X265_CHECK((!!scanPos ^ !!blkPos) == 0, "failed on (blkPos=0 && scanPos!=0)\n");
+            if (usePsyMask & scanPos)
+                /* when no residual coefficient is coded, predicted coef == recon coef */
+                costUncoded[blkPos] -= PSYVALUE(predictedCoef);
+
+            totalUncodedCost += costUncoded[blkPos];
+
+            // coefficient level estimation
+            const int* greaterOneBits = estBitsSbac.greaterOneBits[4 * ctxSet + c1];
+            //const uint32_t ctxSig = (blkPos == 0) ? 0 : table_cnt[(trSize == 4) ? 4 : patternSigCtx][g_scan4x4[codeParams.scanType][scanPosinCG]] + ctxSigOffset;
+            static const uint64_t table_cnt64[4] = {0x0000000100110112ULL, 0x0000000011112222ULL, 0x0012001200120012ULL, 0x2222222222222222ULL};
+            uint64_t ctxCnt = table_cnt64[patternSigCtx];
+            if (trSize == 4)
+                ctxCnt = 0x8877886654325410ULL;
+            const uint32_t ctxSig = (blkPos == 0) ? 0 : ((ctxCnt >> (4 * g_scan4x4[codeParams.scanType][scanPosinCG])) & 0xF) + ctxSigOffset;
+            // NOTE: above equal to 'table_cnt[(trSize == 4) ? 4 : patternSigCtx][g_scan4x4[codeParams.scanType][scanPosinCG]] + ctxSigOffset'
+            X265_CHECK(ctxSig == getSigCtxInc(patternSigCtx, log2TrSize, trSize, blkPos, bIsLuma, codeParams.firstSignificanceMapContext), "sigCtx check failure\n");
+
+            // before find lastest non-zero coeff
+            if (scanPos > (uint32_t)lastScanPos)
+            {
+                /* coefficients after lastNZ have no distortion signal cost */
+                costCoeff[scanPos] = 0;
+                costSig[scanPos] = 0;
+
+                /* No non-zero coefficient yet found, but this does not mean
+                 * there is no uncoded-cost for this coefficient. Pre-
+                 * quantization the coefficient may have been non-zero */
+                totalRdCost += costUncoded[blkPos];
+            }
+            else if (!(subFlagMask & 1))
+            {
+                // fast zero coeff path
+                /* set default costs to uncoded costs */
+                costSig[scanPos] = SIGCOST(estBitsSbac.significantBits[0][ctxSig]);
+                costCoeff[scanPos] = costUncoded[blkPos] + costSig[scanPos];
+                sigRateDelta[blkPos] = estBitsSbac.significantBits[1][ctxSig] - estBitsSbac.significantBits[0][ctxSig];
+                totalRdCost += costCoeff[scanPos];
+                rateIncUp[blkPos] = greaterOneBits[0];
+
+                subFlagMask >>= 1;
+            }
+            else
+            {
+                subFlagMask >>= 1;
+
+                const uint32_t c1c2Idx = ((c1Idx - 8) >> (sizeof(int) * CHAR_BIT - 1)) + (((-(int)c2Idx) >> (sizeof(int) * CHAR_BIT - 1)) + 1) * 2;
+                const uint32_t baseLevel = ((uint32_t)0xD9 >> (c1c2Idx * 2)) & 3;  // {1, 2, 1, 3}
+
+                X265_CHECK(!!((int)c1Idx < C1FLAG_NUMBER) == (int)((c1Idx - 8) >> (sizeof(int) * CHAR_BIT - 1)), "scan validation 1\n");
+                X265_CHECK(!!(c2Idx == 0) == ((-(int)c2Idx) >> (sizeof(int) * CHAR_BIT - 1)) + 1, "scan validation 2\n");
+                X265_CHECK((int)baseLevel == ((c1Idx < C1FLAG_NUMBER) ? (2 + (c2Idx == 0)) : 1), "scan validation 3\n");
+
+                // coefficient level estimation
+                const int* levelAbsBits = estBitsSbac.levelAbsBits[ctxSet + c2];
+
+                uint32_t level = 0;
+                uint32_t sigCoefBits = 0;
+                costCoeff[scanPos] = MAX_INT64;
+
+                if ((int)scanPos == lastScanPos)
+                    sigRateDelta[blkPos] = 0;
+                else
+                {
+                    if (maxAbsLevel < 3)
+                    {
+                        /* set default costs to uncoded costs */
+                        costSig[scanPos] = SIGCOST(estBitsSbac.significantBits[0][ctxSig]);
+                        costCoeff[scanPos] = costUncoded[blkPos] + costSig[scanPos];
+                    }
+                    sigRateDelta[blkPos] = estBitsSbac.significantBits[1][ctxSig] - estBitsSbac.significantBits[0][ctxSig];
+                    sigCoefBits = estBitsSbac.significantBits[1][ctxSig];
+                }
+
+                // NOTE: X265_MAX(maxAbsLevel - 1, 1) ==> (X>=2 -> X-1), (X<2 -> 1)  | (0 < X < 2 ==> X=1)
+                if (maxAbsLevel == 1)
+                {
+                    uint32_t levelBits = (c1c2Idx & 1) ? greaterOneBits[0] + IEP_RATE : ((1 + goRiceParam) << 15) + IEP_RATE;
+                    X265_CHECK(levelBits == getICRateCost(1, 1 - baseLevel, greaterOneBits, levelAbsBits, goRiceParam, c1c2Idx) + IEP_RATE, "levelBits mistake\n");
+
+                    int unquantAbsLevel = UNQUANT(1);
+                    int d = abs(signCoef) - unquantAbsLevel;
+                    int64_t curCost = RDCOST(d, sigCoefBits + levelBits);
+
+                    /* Psy RDOQ: bias in favor of higher AC coefficients in the reconstructed frame */
+                    if (usePsyMask & scanPos)
+                    {
+                        int reconCoef = abs(unquantAbsLevel + SIGN(predictedCoef, signCoef));
+                        curCost -= PSYVALUE(reconCoef);
+                    }
+
+                    if (curCost < costCoeff[scanPos])
+                    {
+                        level = 1;
+                        costCoeff[scanPos] = curCost;
+                        costSig[scanPos] = SIGCOST(sigCoefBits);
+                    }
+                }
+                else if (maxAbsLevel)
+                {
+                    uint32_t levelBits0 = getICRateCost(maxAbsLevel,     maxAbsLevel     - baseLevel, greaterOneBits, levelAbsBits, goRiceParam, c1c2Idx) + IEP_RATE;
+                    uint32_t levelBits1 = getICRateCost(maxAbsLevel - 1, maxAbsLevel - 1 - baseLevel, greaterOneBits, levelAbsBits, goRiceParam, c1c2Idx) + IEP_RATE;
+
+                    int unquantAbsLevel0 = UNQUANT(maxAbsLevel);
+                    int d0 = abs(signCoef) - unquantAbsLevel0;
+                    int64_t curCost0 = RDCOST(d0, sigCoefBits + levelBits0);
+
+                    int unquantAbsLevel1 = UNQUANT(maxAbsLevel - 1);
+                    int d1 = abs(signCoef) - unquantAbsLevel1;
+                    int64_t curCost1 = RDCOST(d1, sigCoefBits + levelBits1);
+
+                    /* Psy RDOQ: bias in favor of higher AC coefficients in the reconstructed frame */
+                    if (usePsyMask & scanPos)
+                    {
+                        int reconCoef;
+                        reconCoef = abs(unquantAbsLevel0 + SIGN(predictedCoef, signCoef));
+                        curCost0 -= PSYVALUE(reconCoef);
+
+                        reconCoef = abs(unquantAbsLevel1 + SIGN(predictedCoef, signCoef));
+                        curCost1 -= PSYVALUE(reconCoef);
+                    }
+                    if (curCost0 < costCoeff[scanPos])
+                    {
+                        level = maxAbsLevel;
+                        costCoeff[scanPos] = curCost0;
+                        costSig[scanPos] = SIGCOST(sigCoefBits);
+                    }
+                    if (curCost1 < costCoeff[scanPos])
+                    {
+                        level = maxAbsLevel - 1;
+                        costCoeff[scanPos] = curCost1;
+                        costSig[scanPos] = SIGCOST(sigCoefBits);
+                    }
+                }
+
+                dstCoeff[blkPos] = (int16_t)level;
+                totalRdCost += costCoeff[scanPos];
+
+                /* record costs for sign-hiding performed at the end */
+                if ((cu.m_slice->m_pps->bSignHideEnabled ? ~0 : 0) & level)
+                {
+                    const int32_t diff0 = level - 1 - baseLevel;
+                    const int32_t diff2 = level + 1 - baseLevel;
+                    const int32_t maxVlc = g_goRiceRange[goRiceParam];
+                    int rate0, rate1, rate2;
+
+                    if (diff0 < -2)  // prob (92.9, 86.5, 74.5)%
+                    {
+                        // NOTE: Min: L - 1 - {1,2,1,3} < -2 ==> L < {0,1,0,2}
+                        //            additional L > 0, so I got (L > 0 && L < 2) ==> L = 1
+                        X265_CHECK(level == 1, "absLevel check failure\n");
+
+                        const int rateEqual2 = greaterOneBits[1] + levelAbsBits[0];;
+                        const int rateNotEqual2 = greaterOneBits[0];
+
+                        rate0 = 0;
+                        rate2 = rateEqual2;
+                        rate1 = rateNotEqual2;
+
+                        X265_CHECK(rate1 == getICRateNegDiff(level + 0, greaterOneBits, levelAbsBits), "rate1 check failure!\n");
+                        X265_CHECK(rate2 == getICRateNegDiff(level + 1, greaterOneBits, levelAbsBits), "rate1 check failure!\n");
+                        X265_CHECK(rate0 == getICRateNegDiff(level - 1, greaterOneBits, levelAbsBits), "rate1 check failure!\n");
+                    }
+                    else if (diff0 >= 0 && diff2 <= maxVlc)     // prob except from above path (98.6, 97.9, 96.9)%
+                    {
+                        // NOTE: no c1c2 correct rate since all of rate include this factor
+                        rate1 = getICRateLessVlc(level + 0, diff0 + 1, goRiceParam);
+                        rate2 = getICRateLessVlc(level + 1, diff0 + 2, goRiceParam);
+                        rate0 = getICRateLessVlc(level - 1, diff0 + 0, goRiceParam);
+                    }
+                    else
+                    {
+                        rate1 = getICRate(level + 0, diff0 + 1, greaterOneBits, levelAbsBits, goRiceParam, maxVlc, c1c2Idx);
+                        rate2 = getICRate(level + 1, diff0 + 2, greaterOneBits, levelAbsBits, goRiceParam, maxVlc, c1c2Idx);
+                        rate0 = getICRate(level - 1, diff0 + 0, greaterOneBits, levelAbsBits, goRiceParam, maxVlc, c1c2Idx);
+                    }
+                    rateIncUp[blkPos] = rate2 - rate1;
+                    rateIncDown[blkPos] = rate0 - rate1;
+                }
+                else
+                {
+                    rateIncUp[blkPos] = greaterOneBits[0];
+                    rateIncDown[blkPos] = 0;
+                }
+
+                /* Update CABAC estimation state */
+                if (level >= baseLevel && goRiceParam < 4 && level > (3U << goRiceParam))
+                    goRiceParam++;
+
+                c1Idx -= (-(int32_t)level) >> 31;
+
+                /* update bin model */
+                if (level > 1)
+                {
+                    c1 = 0;
+                    c2 += (uint32_t)(c2 - 2) >> 31;
+                    c2Idx++;
+                }
+                else if ((c1 < 3) && (c1 > 0) && level)
+                    c1++;
+
+                if (dstCoeff[blkPos])
+                {
+                    sigCoeffGroupFlag64 |= cgBlkPosMask;
+                    cgRdStats.codedLevelAndDist += costCoeff[scanPos] - costSig[scanPos];
+                    cgRdStats.uncodedDist += costUncoded[blkPos];
+                    cgRdStats.nnzBeforePos0 += scanPosinCG;
+                }
+            }
+
+            cgRdStats.sigCost += costSig[scanPos];
+        } /* end for (scanPosinCG) */
+
+        X265_CHECK((cgScanPos << MLS_CG_SIZE) == (int)scanPos, "scanPos mistake\n");
+        cgRdStats.sigCost0 = costSig[scanPos];
+
+        costCoeffGroupSig[cgScanPos] = 0;
+
+        /* nothing to do at this case */
+        X265_CHECK(cgLastScanPos >= 0, "cgLastScanPos check failure\n");
+
+        if (!cgScanPos || cgScanPos == cgLastScanPos)
+        {
+            /* coeff group 0 is implied to be present, no signal cost */
+            /* coeff group with last NZ is implied to be present, handled below */
+        }
+        else if (sigCoeffGroupFlag64 & cgBlkPosMask)
+        {
+            if (!cgRdStats.nnzBeforePos0)
+            {
+                /* if only coeff 0 in this CG is coded, its significant coeff bit is implied */
+                totalRdCost -= cgRdStats.sigCost0;
+                cgRdStats.sigCost -= cgRdStats.sigCost0;
+            }
+
+            /* there are coded coefficients in this group, but now we include the signaling cost
+             * of the significant coefficient group flag and evaluate whether the RD cost of the
+             * coded group is more than the RD cost of the uncoded group */
+
+            uint32_t sigCtx = getSigCoeffGroupCtxInc(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, cgStride);
+
+            int64_t costZeroCG = totalRdCost + SIGCOST(estBitsSbac.significantCoeffGroupBits[sigCtx][0]);
+            costZeroCG += cgRdStats.uncodedDist;       /* add distortion for resetting non-zero levels to zero levels */
+            costZeroCG -= cgRdStats.codedLevelAndDist; /* remove distortion and level cost of coded coefficients */
+            costZeroCG -= cgRdStats.sigCost;           /* remove signaling cost of significant coeff bitmap */
+
+            costCoeffGroupSig[cgScanPos] = SIGCOST(estBitsSbac.significantCoeffGroupBits[sigCtx][1]);
+            totalRdCost += costCoeffGroupSig[cgScanPos];  /* add the cost of 1 bit in significant CG bitmap */
+
+            if (costZeroCG < totalRdCost && m_rdoqLevel > 1)
+            {
+                sigCoeffGroupFlag64 &= ~cgBlkPosMask;
+                totalRdCost = costZeroCG;
+                costCoeffGroupSig[cgScanPos] = SIGCOST(estBitsSbac.significantCoeffGroupBits[sigCtx][0]);
+
+                /* reset all coeffs to 0. UNCODE THIS COEFF GROUP! */
+                const uint32_t blkPos = codeParams.scan[cgScanPos * cgSize];
+                memset(&dstCoeff[blkPos + 0 * trSize], 0, 4 * sizeof(*dstCoeff));
+                memset(&dstCoeff[blkPos + 1 * trSize], 0, 4 * sizeof(*dstCoeff));
+                memset(&dstCoeff[blkPos + 2 * trSize], 0, 4 * sizeof(*dstCoeff));
+                memset(&dstCoeff[blkPos + 3 * trSize], 0, 4 * sizeof(*dstCoeff));
+            }
+        }
+        else
+        {
+            /* there were no coded coefficients in this coefficient group */
+            uint32_t ctxSig = getSigCoeffGroupCtxInc(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, cgStride);
+            costCoeffGroupSig[cgScanPos] = SIGCOST(estBitsSbac.significantCoeffGroupBits[ctxSig][0]);
+            totalRdCost += costCoeffGroupSig[cgScanPos];  /* add cost of 0 bit in significant CG bitmap */
+            totalRdCost -= cgRdStats.sigCost;             /* remove cost of significant coefficient bitmap */
+        }
+    } /* end for (cgScanPos) */
+
+    X265_CHECK(lastScanPos >= 0, "numSig non zero, but no coded CG\n");
+
+    /* calculate RD cost of uncoded block CBF=0, and add cost of CBF=1 to total */
+    int64_t bestCost;
+    if (!cu.isIntra(absPartIdx) && bIsLuma && !cu.m_tuDepth[absPartIdx])
+    {
+        bestCost = totalUncodedCost + SIGCOST(estBitsSbac.blockRootCbpBits[0]);
+        totalRdCost += SIGCOST(estBitsSbac.blockRootCbpBits[1]);
+    }
+    else
+    {
+        int ctx = ctxCbf[ttype][cu.m_tuDepth[absPartIdx]];
+        bestCost = totalUncodedCost + SIGCOST(estBitsSbac.blockCbpBits[ctx][0]);
+        totalRdCost += SIGCOST(estBitsSbac.blockCbpBits[ctx][1]);
+    }
+
+    /* This loop starts with the last non-zero found in the first loop and then refines this last
+     * non-zero by measuring the true RD cost of the last NZ at this position, and then the RD costs
+     * at all previous coefficients until a coefficient greater than 1 is encountered or we run out
+     * of coefficients to evaluate.  This will factor in the cost of coding empty groups and empty
+     * coeff prior to the last NZ. The base best cost is the RD cost of CBF=0 */
+    int  bestLastIdx = 0;
+    bool foundLast = false;
+    for (int cgScanPos = cgLastScanPos; cgScanPos >= 0 && !foundLast; cgScanPos--)
+    {
+        if (!cgScanPos || cgScanPos == cgLastScanPos)
+        {
+            /* the presence of these coefficient groups are inferred, they have no bit in
+             * sigCoeffGroupFlag64 and no saved costCoeffGroupSig[] cost */
+        }
+        else if (sigCoeffGroupFlag64 & (1ULL << codeParams.scanCG[cgScanPos]))
+        {
+            /* remove cost of significant coeff group flag, the group's presence would be inferred
+             * from lastNZ if it were present in this group */
+            totalRdCost -= costCoeffGroupSig[cgScanPos];
+        }
+        else
+        {
+            /* remove cost of signaling this empty group as not present */
+            totalRdCost -= costCoeffGroupSig[cgScanPos];
+            continue;
+        }
+
+        for (int scanPosinCG = cgSize - 1; scanPosinCG >= 0; scanPosinCG--)
+        {
+            scanPos = cgScanPos * cgSize + scanPosinCG;
+            if ((int)scanPos > lastScanPos)
+                continue;
+
+            /* if the coefficient was coded, measure the RD cost of it as the last non-zero and then
+             * continue as if it were uncoded. If the coefficient was already uncoded, remove the
+             * cost of signaling it as not-significant */
+            uint32_t blkPos = codeParams.scan[scanPos];
+            if (dstCoeff[blkPos])
+            {
+                // Calculates the cost of signaling the last significant coefficient in the block 
+                uint32_t pos[2] = { (blkPos & (trSize - 1)), (blkPos >> log2TrSize) };
+                if (codeParams.scanType == SCAN_VER)
+                    std::swap(pos[0], pos[1]);
+                uint32_t bitsLastNZ = 0;
+
+                for (int i = 0; i < 2; i++)
+                {
+                    int temp = g_lastCoeffTable[pos[i]];
+                    int prefixOnes = temp & 15;
+                    int suffixLen = temp >> 4;
+
+                    bitsLastNZ += m_entropyCoder->m_estBitsSbac.lastBits[i][prefixOnes];
+                    bitsLastNZ += IEP_RATE * suffixLen;
+                }
+
+                int64_t costAsLast = totalRdCost - costSig[scanPos] + SIGCOST(bitsLastNZ);
+
+                if (costAsLast < bestCost)
+                {
+                    bestLastIdx = scanPos + 1;
+                    bestCost = costAsLast;
+                }
+                if (dstCoeff[blkPos] > 1 || m_rdoqLevel == 1)
+                {
+                    foundLast = true;
+                    break;
+                }
+
+                totalRdCost -= costCoeff[scanPos];
+                totalRdCost += costUncoded[blkPos];
+            }
+            else
+                totalRdCost -= costSig[scanPos];
+        }
+    }
+
+    /* recount non-zero coefficients and re-apply sign of DCT coef */
+    numSig = 0;
+    for (int pos = 0; pos < bestLastIdx; pos++)
+    {
+        int blkPos = codeParams.scan[pos];
+        int level  = dstCoeff[blkPos];
+        numSig += (level != 0);
+
+        uint32_t mask = (int32_t)m_resiDctCoeff[blkPos] >> 31;
+        dstCoeff[blkPos] = (int16_t)((level ^ mask) - mask);
+    }
+
+    // Average 49.62 pixels
+    /* clean uncoded coefficients */
+    for (int pos = bestLastIdx; pos <= fastMin(lastScanPos, (bestLastIdx | (SCAN_SET_SIZE - 1))); pos++)
+    {
+        dstCoeff[codeParams.scan[pos]] = 0;
+    }
+    for (int pos = (bestLastIdx & ~(SCAN_SET_SIZE - 1)) + SCAN_SET_SIZE; pos <= lastScanPos; pos += SCAN_SET_SIZE)
+    {
+        const uint32_t blkPos = codeParams.scan[pos];
+        memset(&dstCoeff[blkPos + 0 * trSize], 0, 4 * sizeof(*dstCoeff));
+        memset(&dstCoeff[blkPos + 1 * trSize], 0, 4 * sizeof(*dstCoeff));
+        memset(&dstCoeff[blkPos + 2 * trSize], 0, 4 * sizeof(*dstCoeff));
+        memset(&dstCoeff[blkPos + 3 * trSize], 0, 4 * sizeof(*dstCoeff));
+    }
+
+    /* rate-distortion based sign-hiding */
+    if (cu.m_slice->m_pps->bSignHideEnabled && numSig >= 2)
+    {
+        const int realLastScanPos = (bestLastIdx - 1) >> LOG2_SCAN_SET_SIZE;
+        int lastCG = true;
+        for (int subSet = realLastScanPos; subSet >= 0; subSet--)
+        {
+            int subPos = subSet << LOG2_SCAN_SET_SIZE;
+            int n;
+
+            if (!(sigCoeffGroupFlag64 & (1ULL << codeParams.scanCG[subSet])))
+                continue;
+
+            /* measure distance between first and last non-zero coef in this
+             * coding group */
+            const uint32_t posFirstLast = primitives.findPosFirstLast(&dstCoeff[codeParams.scan[subPos]], trSize, g_scan4x4[codeParams.scanType]);
+            int firstNZPosInCG = (uint16_t)posFirstLast;
+            int lastNZPosInCG = posFirstLast >> 16;
+
+
+            if (lastNZPosInCG - firstNZPosInCG >= SBH_THRESHOLD)
+            {
+                uint32_t signbit = (dstCoeff[codeParams.scan[subPos + firstNZPosInCG]] > 0 ? 0 : 1);
+                int absSum = 0;
+
+                for (n = firstNZPosInCG; n <= lastNZPosInCG; n++)
+                    absSum += dstCoeff[codeParams.scan[n + subPos]];
+
+                if (signbit != (absSum & 1U))
+                {
+                    /* We must find a coeff to toggle up or down so the sign bit of the first non-zero coeff
+                     * is properly implied. Note dstCoeff[] are signed by this point but curChange and
+                     * finalChange imply absolute levels (+1 is away from zero, -1 is towards zero) */
+
+                    int64_t minCostInc = MAX_INT64, curCost = MAX_INT64;
+                    int minPos = -1;
+                    int16_t finalChange = 0, curChange = 0;
+
+                    for (n = (lastCG ? lastNZPosInCG : SCAN_SET_SIZE - 1); n >= 0; --n)
+                    {
+                        uint32_t blkPos = codeParams.scan[n + subPos];
+                        int signCoef    = m_resiDctCoeff[blkPos]; /* pre-quantization DCT coeff */
+                        int absLevel    = abs(dstCoeff[blkPos]);
+
+                        int d = abs(signCoef) - UNQUANT(absLevel);
+                        int64_t origDist = (((int64_t)d * d)) << scaleBits;
+
+#define DELTARDCOST(d, deltabits) ((((int64_t)d * d) << scaleBits) - origDist + ((lambda2 * (int64_t)(deltabits)) >> 8))
+
+                        if (dstCoeff[blkPos])
+                        {
+                            d = abs(signCoef) - UNQUANT(absLevel + 1);
+                            int64_t costUp = DELTARDCOST(d, rateIncUp[blkPos]);
+
+                            /* if decrementing would make the coeff 0, we can include the
+                             * significant coeff flag cost savings */
+                            d = abs(signCoef) - UNQUANT(absLevel - 1);
+                            bool isOne = abs(dstCoeff[blkPos]) == 1;
+                            int downBits = rateIncDown[blkPos] - (isOne ? (IEP_RATE + sigRateDelta[blkPos]) : 0);
+                            int64_t costDown = DELTARDCOST(d, downBits);
+
+                            if (lastCG && lastNZPosInCG == n && isOne)
+                                costDown -= 4 * IEP_RATE;
+
+                            if (costUp < costDown)
+                            {
+                                curCost = costUp;
+                                curChange =  1;
+                            }
+                            else
+                            {
+                                curChange = -1;
+                                if (n == firstNZPosInCG && isOne)
+                                    curCost = MAX_INT64;
+                                else
+                                    curCost = costDown;
+                            }
+                        }
+                        else if (n < firstNZPosInCG && signbit != (signCoef >= 0 ? 0 : 1U))
+                        {
+                            /* don't try to make a new coded coeff before the first coeff if its
+                             * sign would be different than the first coeff, the inferred sign would
+                             * still be wrong and we'd have to do this again. */
+                            curCost = MAX_INT64;
+                        }
+                        else
+                        {
+                            /* evaluate changing an uncoded coeff 0 to a coded coeff +/-1 */
+                            d = abs(signCoef) - UNQUANT(1);
+                            curCost = DELTARDCOST(d, rateIncUp[blkPos] + IEP_RATE + sigRateDelta[blkPos]);
+                            curChange = 1;
+                        }
+
+                        if (curCost < minCostInc)
+                        {
+                            minCostInc = curCost;
+                            finalChange = curChange;
+                            minPos = blkPos;
+                        }
+                    }
+
+                    if (dstCoeff[minPos] == 32767 || dstCoeff[minPos] == -32768)
+                        /* don't allow sign hiding to violate the SPEC range */
+                        finalChange = -1;
+
+                    if (dstCoeff[minPos] == 0)
+                        numSig++;
+                    else if (finalChange == -1 && abs(dstCoeff[minPos]) == 1)
+                        numSig--;
+
+                    if (m_resiDctCoeff[minPos] >= 0)
+                        dstCoeff[minPos] += finalChange;
+                    else
+                        dstCoeff[minPos] -= finalChange;
+                }
+            }
+
+            lastCG = false;
+        }
+    }
+
+    return numSig;
+}
+
+/* Context derivation process of coeff_abs_significant_flag */
+uint32_t Quant::getSigCtxInc(uint32_t patternSigCtx, uint32_t log2TrSize, uint32_t trSize, uint32_t blkPos, bool bIsLuma,
+                             uint32_t firstSignificanceMapContext)
+{
+    static const uint8_t ctxIndMap[16] =
+    {
+        0, 1, 4, 5,
+        2, 3, 4, 5,
+        6, 6, 8, 8,
+        7, 7, 8, 8
+    };
+
+    if (!blkPos) // special case for the DC context variable
+        return 0;
+
+    if (log2TrSize == 2) // 4x4
+        return ctxIndMap[blkPos];
+
+    const uint32_t posY = blkPos >> log2TrSize;
+    const uint32_t posX = blkPos & (trSize - 1);
+    X265_CHECK((blkPos - (posY << log2TrSize)) == posX, "block pos check failed\n");
+
+    int posXinSubset = blkPos & 3;
+    X265_CHECK((posX & 3) == (blkPos & 3), "pos alignment fail\n");
+    int posYinSubset = posY & 3;
+
+    // NOTE: [patternSigCtx][posXinSubset][posYinSubset]
+    static const uint8_t table_cnt[4][4][4] =
+    {
+        // patternSigCtx = 0
+        {
+            { 2, 1, 1, 0 },
+            { 1, 1, 0, 0 },
+            { 1, 0, 0, 0 },
+            { 0, 0, 0, 0 },
+        },
+        // patternSigCtx = 1
+        {
+            { 2, 1, 0, 0 },
+            { 2, 1, 0, 0 },
+            { 2, 1, 0, 0 },
+            { 2, 1, 0, 0 },
+        },
+        // patternSigCtx = 2
+        {
+            { 2, 2, 2, 2 },
+            { 1, 1, 1, 1 },
+            { 0, 0, 0, 0 },
+            { 0, 0, 0, 0 },
+        },
+        // patternSigCtx = 3
+        {
+            { 2, 2, 2, 2 },
+            { 2, 2, 2, 2 },
+            { 2, 2, 2, 2 },
+            { 2, 2, 2, 2 },
+        }
+    };
+
+    int cnt = table_cnt[patternSigCtx][posXinSubset][posYinSubset];
+    int offset = firstSignificanceMapContext;
+
+    offset += cnt;
+
+    return (bIsLuma && (posX | posY) >= 4) ? 3 + offset : offset;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/quant.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,158 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_QUANT_H
+#define X265_QUANT_H
+
+#include "common.h"
+#include "scalinglist.h"
+#include "contexts.h"
+
+namespace X265_NS {
+// private namespace
+
+class CUData;
+class Entropy;
+struct TUEntropyCodingParameters;
+
+struct QpParam
+{
+    int rem;
+    int per;
+    int qp;
+    int64_t lambda2; /* FIX8 */
+    int32_t lambda;  /* FIX8, dynamic range is 18-bits in Main and 20-bits in Main10 */
+
+    QpParam() : qp(MAX_INT) {}
+
+    void setQpParam(int qpScaled)
+    {
+        if (qp != qpScaled)
+        {
+            rem = qpScaled % 6;
+            per = qpScaled / 6;
+            qp  = qpScaled;
+            lambda2 = (int64_t)(x265_lambda2_tab[qp - QP_BD_OFFSET] * 256. + 0.5);
+            lambda  = (int32_t)(x265_lambda_tab[qp - QP_BD_OFFSET] * 256. + 0.5);
+            X265_CHECK((x265_lambda_tab[qp - QP_BD_OFFSET] * 256. + 0.5) < (double)MAX_INT, "x265_lambda_tab[] value too large\n");
+        }
+    }
+};
+
+// NOTE: MUST be 16-byte aligned for asm code
+struct NoiseReduction
+{
+    /* 0 = luma 4x4,   1 = luma 8x8,   2 = luma 16x16,   3 = luma 32x32
+     * 4 = chroma 4x4, 5 = chroma 8x8, 6 = chroma 16x16, 7 = chroma 32x32
+     * Intra 0..7 - Inter 8..15 */
+    ALIGN_VAR_16(uint32_t, nrResidualSum[MAX_NUM_TR_CATEGORIES][MAX_NUM_TR_COEFFS]);
+    uint32_t nrCount[MAX_NUM_TR_CATEGORIES];
+    uint16_t nrOffsetDenoise[MAX_NUM_TR_CATEGORIES][MAX_NUM_TR_COEFFS];
+    uint16_t (*offset)[MAX_NUM_TR_COEFFS];
+    uint32_t (*residualSum)[MAX_NUM_TR_COEFFS];
+    uint32_t *count;
+};
+
+class Quant
+{
+protected:
+
+    const ScalingList* m_scalingList;
+    Entropy*           m_entropyCoder;
+
+    QpParam            m_qpParam[3];
+
+    int                m_rdoqLevel;
+    int32_t            m_psyRdoqScale;  // dynamic range [0,50] * 256 = 14-bits
+    int16_t*           m_resiDctCoeff;
+    int16_t*           m_fencDctCoeff;
+    int16_t*           m_fencShortBuf;
+
+    enum { IEP_RATE = 32768 }; /* FIX15 cost of an equal probable bit */
+
+public:
+
+    NoiseReduction*    m_nr;
+    NoiseReduction*    m_frameNr; // Array of NR structures, one for each frameEncoder
+
+    Quant();
+    ~Quant();
+
+    /* one-time setup */
+    bool init(int rdoqLevel, double psyScale, const ScalingList& scalingList, Entropy& entropy);
+    bool allocNoiseReduction(const x265_param& param);
+
+    /* CU setup */
+    void setQPforQuant(const CUData& ctu, int qp);
+
+    uint32_t transformNxN(const CUData& cu, const pixel* fenc, uint32_t fencStride, const int16_t* residual, uint32_t resiStride, coeff_t* coeff,
+                          uint32_t log2TrSize, TextType ttype, uint32_t absPartIdx, bool useTransformSkip);
+
+    void invtransformNxN(const CUData& cu, int16_t* residual, uint32_t resiStride, const coeff_t* coeff,
+                         uint32_t log2TrSize, TextType ttype, bool bIntra, bool useTransformSkip, uint32_t numSig);
+
+    /* Pattern decision for context derivation process of significant_coeff_flag */
+    static uint32_t calcPatternSigCtx(uint64_t sigCoeffGroupFlag64, uint32_t cgPosX, uint32_t cgPosY, uint32_t cgBlkPos, uint32_t trSizeCG)
+    {
+        if (trSizeCG == 1)
+            return 0;
+
+        X265_CHECK(trSizeCG <= 8, "transform CG is too large\n");
+        X265_CHECK(cgBlkPos < 64, "cgBlkPos is too large\n");
+        // NOTE: cgBlkPos+1 may more than 63, it is invalid for shift,
+        //       but in this case, both cgPosX and cgPosY equal to (trSizeCG - 1),
+        //       the sigRight and sigLower will clear value to zero, the final result will be correct
+        const uint32_t sigPos = (uint32_t)(sigCoeffGroupFlag64 >> (cgBlkPos + 1)); // just need lowest 7-bits valid
+
+        // TODO: instruction BT is faster, but _bittest64 still generate instruction 'BT m, r' in VS2012
+        const uint32_t sigRight = ((uint32_t)(cgPosX - (trSizeCG - 1)) >> 31) & sigPos;
+        const uint32_t sigLower = ((uint32_t)(cgPosY - (trSizeCG - 1)) >> 31) & (sigPos >> (trSizeCG - 1));
+        return sigRight + sigLower * 2;
+    }
+
+    /* Context derivation process of coeff_abs_significant_flag */
+    static uint32_t getSigCoeffGroupCtxInc(uint64_t cgGroupMask, uint32_t cgPosX, uint32_t cgPosY, uint32_t cgBlkPos, uint32_t trSizeCG)
+    {
+        X265_CHECK(cgBlkPos < 64, "cgBlkPos is too large\n");
+        // NOTE: unsafe shift operator, see NOTE in calcPatternSigCtx
+        const uint32_t sigPos = (uint32_t)(cgGroupMask >> (cgBlkPos + 1)); // just need lowest 8-bits valid
+        const uint32_t sigRight = ((uint32_t)(cgPosX - (trSizeCG - 1)) >> 31) & sigPos;
+        const uint32_t sigLower = ((uint32_t)(cgPosY - (trSizeCG - 1)) >> 31) & (sigPos >> (trSizeCG - 1));
+
+        return (sigRight | sigLower);
+    }
+
+    /* static methods shared with entropy.cpp */
+    static uint32_t getSigCtxInc(uint32_t patternSigCtx, uint32_t log2TrSize, uint32_t trSize, uint32_t blkPos, bool bIsLuma, uint32_t firstSignificanceMapContext);
+
+protected:
+
+    void setChromaQP(int qpin, TextType ttype, int chFmt);
+
+    uint32_t signBitHidingHDQ(int16_t* qcoeff, int32_t* deltaU, uint32_t numSig, const TUEntropyCodingParameters &codingParameters, uint32_t log2TrSize);
+
+    uint32_t rdoQuant(const CUData& cu, int16_t* dstCoeff, uint32_t log2TrSize, TextType ttype, uint32_t absPartIdx, bool usePsy);
+};
+}
+
+#endif // ifndef X265_QUANT_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/scalinglist.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,379 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "scalinglist.h"
+
+namespace {
+// file-anonymous namespace
+
+/* Strings for scaling list file parsing */
+const char MatrixType[4][6][20] =
+{
+    {
+        "INTRA4X4_LUMA",
+        "INTRA4X4_CHROMAU",
+        "INTRA4X4_CHROMAV",
+        "INTER4X4_LUMA",
+        "INTER4X4_CHROMAU",
+        "INTER4X4_CHROMAV"
+    },
+    {
+        "INTRA8X8_LUMA",
+        "INTRA8X8_CHROMAU",
+        "INTRA8X8_CHROMAV",
+        "INTER8X8_LUMA",
+        "INTER8X8_CHROMAU",
+        "INTER8X8_CHROMAV"
+    },
+    {
+        "INTRA16X16_LUMA",
+        "INTRA16X16_CHROMAU",
+        "INTRA16X16_CHROMAV",
+        "INTER16X16_LUMA",
+        "INTER16X16_CHROMAU",
+        "INTER16X16_CHROMAV"
+    },
+    {
+        "INTRA32X32_LUMA",
+        "INTER32X32_LUMA",
+    },
+};
+const char MatrixType_DC[4][12][22] =
+{
+    {
+    },
+    {
+    },
+    {
+        "INTRA16X16_LUMA_DC",
+        "INTRA16X16_CHROMAU_DC",
+        "INTRA16X16_CHROMAV_DC",
+        "INTER16X16_LUMA_DC",
+        "INTER16X16_CHROMAU_DC",
+        "INTER16X16_CHROMAV_DC"
+    },
+    {
+        "INTRA32X32_LUMA_DC",
+        "INTER32X32_LUMA_DC",
+    },
+};
+
+static int quantTSDefault4x4[16] =
+{
+    16, 16, 16, 16,
+    16, 16, 16, 16,
+    16, 16, 16, 16,
+    16, 16, 16, 16
+};
+
+static int quantIntraDefault8x8[64] =
+{
+    16, 16, 16, 16, 17, 18, 21, 24,
+    16, 16, 16, 16, 17, 19, 22, 25,
+    16, 16, 17, 18, 20, 22, 25, 29,
+    16, 16, 18, 21, 24, 27, 31, 36,
+    17, 17, 20, 24, 30, 35, 41, 47,
+    18, 19, 22, 27, 35, 44, 54, 65,
+    21, 22, 25, 31, 41, 54, 70, 88,
+    24, 25, 29, 36, 47, 65, 88, 115
+};
+
+static int quantInterDefault8x8[64] =
+{
+    16, 16, 16, 16, 17, 18, 20, 24,
+    16, 16, 16, 17, 18, 20, 24, 25,
+    16, 16, 17, 18, 20, 24, 25, 28,
+    16, 17, 18, 20, 24, 25, 28, 33,
+    17, 18, 20, 24, 25, 28, 33, 41,
+    18, 20, 24, 25, 28, 33, 41, 54,
+    20, 24, 25, 28, 33, 41, 54, 71,
+    24, 25, 28, 33, 41, 54, 71, 91
+};
+
+}
+
+namespace X265_NS {
+// private namespace
+
+const int     ScalingList::s_numCoefPerSize[NUM_SIZES] = { 16, 64, 256, 1024 };
+const int32_t ScalingList::s_quantScales[NUM_REM] = { 26214, 23302, 20560, 18396, 16384, 14564 };
+const int32_t ScalingList::s_invQuantScales[NUM_REM] = { 40, 45, 51, 57, 64, 72 };
+
+ScalingList::ScalingList()
+{
+    memset(m_quantCoef, 0, sizeof(m_quantCoef));
+    memset(m_dequantCoef, 0, sizeof(m_dequantCoef));
+    memset(m_scalingListCoef, 0, sizeof(m_scalingListCoef));
+}
+
+bool ScalingList::init()
+{
+    bool ok = true;
+    for (int sizeId = 0; sizeId < NUM_SIZES; sizeId++)
+    {
+        for (int listId = 0; listId < NUM_LISTS; listId++)
+        {
+            m_scalingListCoef[sizeId][listId] = X265_MALLOC(int32_t, X265_MIN(MAX_MATRIX_COEF_NUM, s_numCoefPerSize[sizeId]));
+            ok &= !!m_scalingListCoef[sizeId][listId];
+            for (int rem = 0; rem < NUM_REM; rem++)
+            {
+                m_quantCoef[sizeId][listId][rem] = X265_MALLOC(int32_t, s_numCoefPerSize[sizeId]);
+                m_dequantCoef[sizeId][listId][rem] = X265_MALLOC(int32_t, s_numCoefPerSize[sizeId]);
+                ok &= m_quantCoef[sizeId][listId][rem] && m_dequantCoef[sizeId][listId][rem];
+            }
+        }
+    }
+    return ok;
+}
+
+ScalingList::~ScalingList()
+{
+    for (int sizeId = 0; sizeId < NUM_SIZES; sizeId++)
+    {
+        for (int listId = 0; listId < NUM_LISTS; listId++)
+        {
+            X265_FREE(m_scalingListCoef[sizeId][listId]);
+            for (int rem = 0; rem < NUM_REM; rem++)
+            {
+                X265_FREE(m_quantCoef[sizeId][listId][rem]);
+                X265_FREE(m_dequantCoef[sizeId][listId][rem]);
+            }
+        }
+    }
+}
+
+/* returns predicted list index if a match is found, else -1 */ 
+int ScalingList::checkPredMode(int size, int list) const
+{
+    for (int predList = list; predList >= 0; predList--)
+    {
+        // check DC value
+        if (size < BLOCK_16x16 && m_scalingListDC[size][list] != m_scalingListDC[size][predList])
+            continue;
+
+        // check value of matrix
+        if (!memcmp(m_scalingListCoef[size][list],
+                    list == predList ? getScalingListDefaultAddress(size, predList) : m_scalingListCoef[size][predList],
+                    sizeof(int32_t) * X265_MIN(MAX_MATRIX_COEF_NUM, s_numCoefPerSize[size])))
+            return predList;
+    }
+
+    return -1;
+}
+
+/* check if use default quantization matrix
+ * returns true if default quantization matrix is used in all sizes */
+bool ScalingList::checkDefaultScalingList() const
+{
+    int defaultCounter = 0;
+
+    for (int s = 0; s < NUM_SIZES; s++)
+        for (int l = 0; l < NUM_LISTS; l++)
+            if (!memcmp(m_scalingListCoef[s][l], getScalingListDefaultAddress(s, l),
+                        sizeof(int32_t) * X265_MIN(MAX_MATRIX_COEF_NUM, s_numCoefPerSize[s])) &&
+                ((s < BLOCK_16x16) || (m_scalingListDC[s][l] == 16)))
+                defaultCounter++;
+
+    return defaultCounter != (NUM_LISTS * NUM_SIZES - 4); // -4 for 32x32
+}
+
+/* get address of default quantization matrix */
+const int32_t* ScalingList::getScalingListDefaultAddress(int sizeId, int listId) const
+{
+    switch (sizeId)
+    {
+    case BLOCK_4x4:
+        return quantTSDefault4x4;
+    case BLOCK_8x8:
+        return (listId < 3) ? quantIntraDefault8x8 : quantInterDefault8x8;
+    case BLOCK_16x16:
+        return (listId < 3) ? quantIntraDefault8x8 : quantInterDefault8x8;
+    case BLOCK_32x32:
+        return (listId < 1) ? quantIntraDefault8x8 : quantInterDefault8x8;
+    default:
+        break;
+    }
+
+    X265_CHECK(0, "invalid scaling list size\n");
+    return NULL;
+}
+
+void ScalingList::processDefaultMarix(int sizeId, int listId)
+{
+    memcpy(m_scalingListCoef[sizeId][listId], getScalingListDefaultAddress(sizeId, listId), sizeof(int) * X265_MIN(MAX_MATRIX_COEF_NUM, s_numCoefPerSize[sizeId]));
+    m_scalingListDC[sizeId][listId] = SCALING_LIST_DC;
+}
+
+void ScalingList::setDefaultScalingList()
+{
+    for (int sizeId = 0; sizeId < NUM_SIZES; sizeId++)
+        for (int listId = 0; listId < NUM_LISTS; listId++)
+            processDefaultMarix(sizeId, listId);
+    m_bEnabled = true;
+    m_bDataPresent = false;
+}
+
+bool ScalingList::parseScalingList(const char* filename)
+{
+    FILE *fp = fopen(filename, "r");
+    if (!fp)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "can't open scaling list file %s\n", filename);
+        return true;
+    }
+
+    char line[1024];
+    int32_t *src = NULL;
+
+    for (int sizeIdc = 0; sizeIdc < NUM_SIZES; sizeIdc++)
+    {
+        int size = X265_MIN(MAX_MATRIX_COEF_NUM, s_numCoefPerSize[sizeIdc]);
+        for (int listIdc = 0; listIdc < NUM_LISTS; listIdc++)
+        {
+            src = m_scalingListCoef[sizeIdc][listIdc];
+
+            fseek(fp, 0, 0);
+            do
+            {
+                char *ret = fgets(line, 1024, fp);
+                if (!ret || (!strstr(line, MatrixType[sizeIdc][listIdc]) && feof(fp)))
+                {
+                    x265_log(NULL, X265_LOG_ERROR, "can't read matrix from %s\n", filename);
+                    return true;
+                }
+            }
+            while (!strstr(line, MatrixType[sizeIdc][listIdc]));
+
+            for (int i = 0; i < size; i++)
+            {
+                int data;
+                if (fscanf(fp, "%d,", &data) != 1)
+                {
+                    x265_log(NULL, X265_LOG_ERROR, "can't read matrix from %s\n", filename);
+                    return true;
+                }
+                src[i] = data;
+            }
+
+            // set DC value for default matrix check
+            m_scalingListDC[sizeIdc][listIdc] = src[0];
+
+            if (sizeIdc > BLOCK_8x8)
+            {
+                fseek(fp, 0, 0);
+                do
+                {
+                    char *ret = fgets(line, 1024, fp);
+                    if (!ret || (!strstr(line, MatrixType_DC[sizeIdc][listIdc]) && feof(fp)))
+                    {
+                        x265_log(NULL, X265_LOG_ERROR, "can't read DC from %s\n", filename);
+                        return true;
+                    }
+                }
+                while (!strstr(line, MatrixType_DC[sizeIdc][listIdc]));
+
+                int data;
+                if (fscanf(fp, "%d,", &data) != 1)
+                {
+                    x265_log(NULL, X265_LOG_ERROR, "can't read matrix from %s\n", filename);
+                    return true;
+                }
+
+                // overwrite DC value when size of matrix is larger than 16x16
+                m_scalingListDC[sizeIdc][listIdc] = data;
+            }
+        }
+    }
+
+    fclose(fp);
+
+    m_bEnabled = true;
+    m_bDataPresent = !checkDefaultScalingList();
+
+    return false;
+}
+
+/** set quantized matrix coefficient for encode */
+void ScalingList::setupQuantMatrices()
+{
+    for (int size = 0; size < NUM_SIZES; size++)
+    {
+        int width = 1 << (size + 2);
+        int ratio = width / X265_MIN(MAX_MATRIX_SIZE_NUM, width);
+        int stride = X265_MIN(MAX_MATRIX_SIZE_NUM, width);
+        int count = s_numCoefPerSize[size];
+
+        for (int list = 0; list < NUM_LISTS; list++)
+        {
+            int32_t *coeff = m_scalingListCoef[size][list];
+            int32_t dc = m_scalingListDC[size][list];
+
+            for (int rem = 0; rem < NUM_REM; rem++)
+            {
+                int32_t *quantCoeff   = m_quantCoef[size][list][rem];
+                int32_t *dequantCoeff = m_dequantCoef[size][list][rem];
+
+                if (m_bEnabled)
+                {
+                    processScalingListEnc(coeff, quantCoeff, s_quantScales[rem] << 4, width, width, ratio, stride, dc);
+                    processScalingListDec(coeff, dequantCoeff, s_invQuantScales[rem], width, width, ratio, stride, dc);
+                }
+                else
+                {
+                    /* flat quant and dequant coefficients */
+                    for (int i = 0; i < count; i++)
+                    {
+                        quantCoeff[i] = s_quantScales[rem];
+                        dequantCoeff[i] = s_invQuantScales[rem];
+                    }
+                }
+            }
+        }
+    }
+}
+
+void ScalingList::processScalingListEnc(int32_t *coeff, int32_t *quantcoeff, int32_t quantScales, int height, int width,
+                                        int ratio, int stride, int32_t dc)
+{
+    for (int j = 0; j < height; j++)
+        for (int i = 0; i < width; i++)
+            quantcoeff[j * width + i] = quantScales / coeff[stride * (j / ratio) + i / ratio];
+
+    if (ratio > 1)
+        quantcoeff[0] = quantScales / dc;
+}
+
+void ScalingList::processScalingListDec(int32_t *coeff, int32_t *dequantcoeff, int32_t invQuantScales, int height, int width,
+                                        int ratio, int stride, int32_t dc)
+{
+    for (int j = 0; j < height; j++)
+        for (int i = 0; i < width; i++)
+            dequantcoeff[j * width + i] = invQuantScales * coeff[stride * (j / ratio) + i / ratio];
+
+    if (ratio > 1)
+        dequantcoeff[0] = invQuantScales * dc;
+}
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/scalinglist.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,80 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_SCALINGLIST_H
+#define X265_SCALINGLIST_H
+
+#include "common.h"
+
+namespace X265_NS {
+// private namespace
+
+class ScalingList
+{
+public:
+
+    enum { NUM_SIZES = 4 };            // 4x4, 8x8, 16x16, 32x32
+    enum { NUM_LISTS = 6 };            // number of quantization matrix lists (YUV * inter/intra)
+    enum { NUM_REM = 6 };              // number of remainders of QP/6
+    enum { MAX_MATRIX_COEF_NUM = 64 }; // max coefficient number per quantization matrix
+    enum { MAX_MATRIX_SIZE_NUM = 8 };  // max size number for quantization matrix
+
+    static const int     s_numCoefPerSize[NUM_SIZES];
+    static const int32_t s_invQuantScales[NUM_REM];
+    static const int32_t s_quantScales[NUM_REM];
+
+    int32_t  m_scalingListDC[NUM_SIZES][NUM_LISTS];   // the DC value of the matrix coefficient for 16x16
+    int32_t* m_scalingListCoef[NUM_SIZES][NUM_LISTS]; // quantization matrix
+
+    int32_t* m_quantCoef[NUM_SIZES][NUM_LISTS][NUM_REM];   // array of quantization matrix coefficient 4x4
+    int32_t* m_dequantCoef[NUM_SIZES][NUM_LISTS][NUM_REM]; // array of dequantization matrix coefficient 4x4
+
+    bool     m_bEnabled;
+    bool     m_bDataPresent; // non-default scaling lists must be signaled
+
+    ScalingList();
+    ~ScalingList();
+
+    bool     init();
+    void     setDefaultScalingList();
+    bool     parseScalingList(const char* filename);
+    void     setupQuantMatrices();
+
+    /* used during SPS coding */
+    int      checkPredMode(int sizeId, int listId) const;
+
+protected:
+
+    static const int SCALING_LIST_DC = 16;    // default DC value
+
+    const int32_t* getScalingListDefaultAddress(int sizeId, int listId) const;
+    void     processDefaultMarix(int sizeId, int listId);
+    bool     checkDefaultScalingList() const;
+
+    void     processScalingListEnc(int32_t *coeff, int32_t *quantcoeff, int32_t quantScales, int height, int width, int ratio, int stride, int32_t dc);
+    void     processScalingListDec(int32_t *coeff, int32_t *dequantcoeff, int32_t invQuantScales, int height, int width, int ratio, int stride, int32_t dc);
+};
+
+}
+
+#endif // ifndef X265_SCALINGLIST_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/shortyuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,131 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#include "common.h"
+#include "yuv.h"
+#include "shortyuv.h"
+#include "primitives.h"
+
+#include "x265.h"
+
+using namespace X265_NS;
+
+ShortYuv::ShortYuv()
+{
+    m_buf[0] = NULL;
+    m_buf[1] = NULL;
+    m_buf[2] = NULL;
+}
+
+bool ShortYuv::create(uint32_t size, int csp)
+{
+    m_csp = csp;
+    m_size = size;
+
+    size_t sizeL = size * size;
+    size_t sizeC;
+    if (m_csp != X265_CSP_I400) {
+        m_hChromaShift = CHROMA_H_SHIFT(csp);
+        m_vChromaShift = CHROMA_V_SHIFT(csp);
+        m_csize = size >> m_hChromaShift;
+        sizeC = sizeL >> (m_hChromaShift + m_vChromaShift);
+        X265_CHECK((sizeC & 15) == 0, "invalid size");
+    } else {
+        m_csize = 0;
+        sizeC = 0;
+    }
+    
+    CHECKED_MALLOC(m_buf[0], int16_t, sizeL + sizeC * 2);
+    if (m_csp != X265_CSP_I400) {
+        m_buf[1] = m_buf[0] + sizeL;
+        m_buf[2] = m_buf[0] + sizeL + sizeC;
+    }
+    return true;
+
+fail:
+    return false;
+}
+
+void ShortYuv::destroy()
+{
+    X265_FREE(m_buf[0]);
+}
+
+void ShortYuv::clear()
+{
+    memset(m_buf[0], 0, (m_size  * m_size) *  sizeof(int16_t));
+    if (m_csp != X265_CSP_I400) {
+        memset(m_buf[1], 0, (m_csize * m_csize) * sizeof(int16_t));
+        memset(m_buf[2], 0, (m_csize * m_csize) * sizeof(int16_t));
+    }
+}
+
+void ShortYuv::subtract(const Yuv& srcYuv0, const Yuv& srcYuv1, uint32_t log2Size)
+{
+    const int sizeIdx = log2Size - 2;
+    primitives.cu[sizeIdx].sub_ps(m_buf[0], m_size, srcYuv0.m_buf[0], srcYuv1.m_buf[0], srcYuv0.m_size, srcYuv1.m_size);
+    if (m_csp != X265_CSP_I400) {
+        primitives.chroma[m_csp].cu[sizeIdx].sub_ps(m_buf[1], m_csize, srcYuv0.m_buf[1], srcYuv1.m_buf[1], srcYuv0.m_csize, srcYuv1.m_csize);
+        primitives.chroma[m_csp].cu[sizeIdx].sub_ps(m_buf[2], m_csize, srcYuv0.m_buf[2], srcYuv1.m_buf[2], srcYuv0.m_csize, srcYuv1.m_csize);
+    }
+}
+
+void ShortYuv::copyPartToPartLuma(ShortYuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const
+{
+    const int16_t* src = getLumaAddr(absPartIdx);
+    int16_t* dst = dstYuv.getLumaAddr(absPartIdx);
+
+    primitives.cu[log2Size - 2].copy_ss(dst, dstYuv.m_size, src, m_size);
+}
+
+void ShortYuv::copyPartToPartLuma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const
+{
+    const int16_t* src = getLumaAddr(absPartIdx);
+    pixel* dst = dstYuv.getLumaAddr(absPartIdx);
+
+    primitives.cu[log2Size - 2].copy_sp(dst, dstYuv.m_size, src, m_size);
+}
+
+void ShortYuv::copyPartToPartChroma(ShortYuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const
+{
+    int part = partitionFromLog2Size(log2SizeL);
+    const int16_t* srcU = getCbAddr(absPartIdx);
+    const int16_t* srcV = getCrAddr(absPartIdx);
+    int16_t* dstU = dstYuv.getCbAddr(absPartIdx);
+    int16_t* dstV = dstYuv.getCrAddr(absPartIdx);
+
+    primitives.chroma[m_csp].cu[part].copy_ss(dstU, dstYuv.m_csize, srcU, m_csize);
+    primitives.chroma[m_csp].cu[part].copy_ss(dstV, dstYuv.m_csize, srcV, m_csize);
+}
+
+void ShortYuv::copyPartToPartChroma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const
+{
+    int part = partitionFromLog2Size(log2SizeL);
+    const int16_t* srcU = getCbAddr(absPartIdx);
+    const int16_t* srcV = getCrAddr(absPartIdx);
+    pixel* dstU = dstYuv.getCbAddr(absPartIdx);
+    pixel* dstV = dstYuv.getCrAddr(absPartIdx);
+
+    primitives.chroma[m_csp].cu[part].copy_sp(dstU, dstYuv.m_csize, srcU, m_csize);
+    primitives.chroma[m_csp].cu[part].copy_sp(dstV, dstYuv.m_csize, srcV, m_csize);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/shortyuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,93 @@
+/*****************************************************************************
+ * x265: ShortYUV class for short sized YUV-style frames
+ *****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#ifndef X265_SHORTYUV_H
+#define X265_SHORTYUV_H
+
+#include "common.h"
+
+namespace X265_NS {
+// private namespace
+
+class Yuv;
+
+/* A ShortYuv instance holds int16_ts for a square CU (64x64 down to 8x8) for all three planes,
+ * these are typically used to hold residual or coefficients */
+class ShortYuv
+{
+public:
+
+    int16_t* m_buf[3];
+
+    uint32_t m_size;
+    uint32_t m_csize;
+
+    int      m_csp;
+    int      m_hChromaShift;
+    int      m_vChromaShift;
+
+    ShortYuv();
+
+    bool create(uint32_t size, int csp);
+    void destroy();
+    void clear();
+
+    int16_t* getLumaAddr(uint32_t absPartIdx)                       { return m_buf[0] + getAddrOffset(absPartIdx, m_size); }
+    int16_t* getCbAddr(uint32_t absPartIdx)                         { return m_buf[1] + getChromaAddrOffset(absPartIdx); }
+    int16_t* getCrAddr(uint32_t absPartIdx)                         { return m_buf[2] + getChromaAddrOffset(absPartIdx); }
+    int16_t* getChromaAddr(uint32_t chromaId, uint32_t partUnitIdx) { return m_buf[chromaId] + getChromaAddrOffset(partUnitIdx); }
+
+    const int16_t* getLumaAddr(uint32_t absPartIdx) const                       { return m_buf[0] + getAddrOffset(absPartIdx, m_size); }
+    const int16_t* getCbAddr(uint32_t absPartIdx) const                         { return m_buf[1] + getChromaAddrOffset(absPartIdx); }
+    const int16_t* getCrAddr(uint32_t absPartIdx) const                         { return m_buf[2] + getChromaAddrOffset(absPartIdx); }
+    const int16_t* getChromaAddr(uint32_t chromaId, uint32_t partUnitIdx) const { return m_buf[chromaId] + getChromaAddrOffset(partUnitIdx); }
+
+    void subtract(const Yuv& srcYuv0, const Yuv& srcYuv1, uint32_t log2Size);
+
+    void copyPartToPartLuma(ShortYuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const;
+    void copyPartToPartChroma(ShortYuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const;
+
+    void copyPartToPartLuma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const;
+    void copyPartToPartChroma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const;
+
+    int getChromaAddrOffset(uint32_t idx) const
+    {
+        int blkX = g_zscanToPelX[idx] >> m_hChromaShift;
+        int blkY = g_zscanToPelY[idx] >> m_vChromaShift;
+
+        return blkX + blkY * m_csize;
+    }
+
+    static int getAddrOffset(uint32_t idx, uint32_t width)
+    {
+        int blkX = g_zscanToPelX[idx];
+        int blkY = g_zscanToPelY[idx];
+
+        return blkX + blkY * width;
+    }
+};
+}
+
+#endif // ifndef X265_SHORTYUV_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/slice.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,206 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "piclist.h"
+#include "picyuv.h"
+#include "slice.h"
+
+using namespace X265_NS;
+
+void Slice::setRefPicList(PicList& picList)
+{
+    if (m_sliceType == I_SLICE)
+    {
+        memset(m_refFrameList, 0, sizeof(m_refFrameList));
+        memset(m_refReconPicList, 0, sizeof(m_refReconPicList));
+        memset(m_refPOCList, 0, sizeof(m_refPOCList));
+        m_numRefIdx[1] = m_numRefIdx[0] = 0;
+        return;
+    }
+
+    Frame* refPic = NULL;
+    Frame* refPicSetStCurr0[MAX_NUM_REF];
+    Frame* refPicSetStCurr1[MAX_NUM_REF];
+    Frame* refPicSetLtCurr[MAX_NUM_REF];
+    int numPocStCurr0 = 0;
+    int numPocStCurr1 = 0;
+    int numPocLtCurr = 0;
+    int i;
+
+    for (i = 0; i < m_rps.numberOfNegativePictures; i++)
+    {
+        if (m_rps.bUsed[i])
+        {
+            refPic = picList.getPOC(m_poc + m_rps.deltaPOC[i]);
+            refPicSetStCurr0[numPocStCurr0] = refPic;
+            numPocStCurr0++;
+        }
+    }
+
+    for (; i < m_rps.numberOfNegativePictures + m_rps.numberOfPositivePictures; i++)
+    {
+        if (m_rps.bUsed[i])
+        {
+            refPic = picList.getPOC(m_poc + m_rps.deltaPOC[i]);
+            refPicSetStCurr1[numPocStCurr1] = refPic;
+            numPocStCurr1++;
+        }
+    }
+
+    X265_CHECK(m_rps.numberOfPictures == m_rps.numberOfNegativePictures + m_rps.numberOfPositivePictures,
+               "unexpected picture in RPS\n");
+
+    // ref_pic_list_init
+    Frame* rpsCurrList0[MAX_NUM_REF + 1];
+    Frame* rpsCurrList1[MAX_NUM_REF + 1];
+    int numPocTotalCurr = numPocStCurr0 + numPocStCurr1 + numPocLtCurr;
+
+    int cIdx = 0;
+    for (i = 0; i < numPocStCurr0; i++, cIdx++)
+        rpsCurrList0[cIdx] = refPicSetStCurr0[i];
+
+    for (i = 0; i < numPocStCurr1; i++, cIdx++)
+        rpsCurrList0[cIdx] = refPicSetStCurr1[i];
+
+    for (i = 0; i < numPocLtCurr; i++, cIdx++)
+        rpsCurrList0[cIdx] = refPicSetLtCurr[i];
+
+    X265_CHECK(cIdx == numPocTotalCurr, "RPS index check fail\n");
+
+    if (m_sliceType == B_SLICE)
+    {
+        cIdx = 0;
+        for (i = 0; i < numPocStCurr1; i++, cIdx++)
+            rpsCurrList1[cIdx] = refPicSetStCurr1[i];
+
+        for (i = 0; i < numPocStCurr0; i++, cIdx++)
+            rpsCurrList1[cIdx] = refPicSetStCurr0[i];
+
+        for (i = 0; i < numPocLtCurr; i++, cIdx++)
+            rpsCurrList1[cIdx] = refPicSetLtCurr[i];
+
+        X265_CHECK(cIdx == numPocTotalCurr, "RPS index check fail\n");
+    }
+
+    for (int rIdx = 0; rIdx < m_numRefIdx[0]; rIdx++)
+    {
+        cIdx = rIdx % numPocTotalCurr;
+        X265_CHECK(cIdx >= 0 && cIdx < numPocTotalCurr, "RPS index check fail\n");
+        m_refFrameList[0][rIdx] = rpsCurrList0[cIdx];
+    }
+
+    if (m_sliceType != B_SLICE)
+    {
+        m_numRefIdx[1] = 0;
+        memset(m_refFrameList[1], 0, sizeof(m_refFrameList[1]));
+    }
+    else
+    {
+        for (int rIdx = 0; rIdx < m_numRefIdx[1]; rIdx++)
+        {
+            cIdx = rIdx % numPocTotalCurr;
+            X265_CHECK(cIdx >= 0 && cIdx < numPocTotalCurr, "RPS index check fail\n");
+            m_refFrameList[1][rIdx] = rpsCurrList1[cIdx];
+        }
+    }
+
+    for (int dir = 0; dir < 2; dir++)
+        for (int numRefIdx = 0; numRefIdx < m_numRefIdx[dir]; numRefIdx++)
+            m_refPOCList[dir][numRefIdx] = m_refFrameList[dir][numRefIdx]->m_poc;
+}
+
+void Slice::disableWeights()
+{
+    for (int l = 0; l < 2; l++)
+        for (int i = 0; i < MAX_NUM_REF; i++)
+            for (int yuv = 0; yuv < 3; yuv++)
+            {
+                WeightParam& wp = m_weightPredTable[l][i][yuv];
+                wp.bPresentFlag = false;
+                wp.log2WeightDenom = 0;
+                wp.inputWeight = 1;
+                wp.inputOffset = 0;
+            }
+}
+
+/* Sorts the deltaPOC and Used by current values in the RPS based on the
+ * deltaPOC values.  deltaPOC values are sorted with -ve values before the +ve
+ * values.  -ve values are in decreasing order.  +ve values are in increasing
+ * order */
+void RPS::sortDeltaPOC()
+{
+    // sort in increasing order (smallest first)
+    for (int j = 1; j < numberOfPictures; j++)
+    {
+        int dPOC = deltaPOC[j];
+        bool used = bUsed[j];
+        for (int k = j - 1; k >= 0; k--)
+        {
+            int temp = deltaPOC[k];
+            if (dPOC < temp)
+            {
+                deltaPOC[k + 1] = temp;
+                bUsed[k + 1] = bUsed[k];
+                deltaPOC[k] = dPOC;
+                bUsed[k] = used;
+            }
+        }
+    }
+
+    // flip the negative values to largest first
+    int numNegPics = numberOfNegativePictures;
+    for (int j = 0, k = numNegPics - 1; j < numNegPics >> 1; j++, k--)
+    {
+        int dPOC = deltaPOC[j];
+        bool used = bUsed[j];
+        deltaPOC[j] = deltaPOC[k];
+        bUsed[j] = bUsed[k];
+        deltaPOC[k] = dPOC;
+        bUsed[k] = used;
+    }
+}
+
+uint32_t Slice::realEndAddress(uint32_t endCUAddr) const
+{
+    // Calculate end address
+    uint32_t internalAddress = (endCUAddr - 1) % NUM_4x4_PARTITIONS;
+    uint32_t externalAddress = (endCUAddr - 1) / NUM_4x4_PARTITIONS;
+    uint32_t xmax = m_sps->picWidthInLumaSamples - (externalAddress % m_sps->numCuInWidth) * g_maxCUSize;
+    uint32_t ymax = m_sps->picHeightInLumaSamples - (externalAddress / m_sps->numCuInWidth) * g_maxCUSize;
+
+    while (g_zscanToPelX[internalAddress] >= xmax || g_zscanToPelY[internalAddress] >= ymax)
+        internalAddress--;
+
+    internalAddress++;
+    if (internalAddress == NUM_4x4_PARTITIONS)
+    {
+        internalAddress = 0;
+        externalAddress++;
+    }
+
+    return externalAddress * NUM_4x4_PARTITIONS + internalAddress;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/slice.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,391 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_SLICE_H
+#define X265_SLICE_H
+
+#include "common.h"
+
+namespace X265_NS {
+// private namespace
+
+class Frame;
+class PicList;
+class PicYuv;
+class MotionReference;
+
+enum SliceType
+{
+    B_SLICE,
+    P_SLICE,
+    I_SLICE
+};
+
+struct RPS
+{
+    int  numberOfPictures;
+    int  numberOfNegativePictures;
+    int  numberOfPositivePictures;
+
+    int  poc[MAX_NUM_REF_PICS];
+    int  deltaPOC[MAX_NUM_REF_PICS];
+    bool bUsed[MAX_NUM_REF_PICS];
+
+    RPS()
+        : numberOfPictures(0)
+        , numberOfNegativePictures(0)
+        , numberOfPositivePictures(0)
+    {
+        memset(deltaPOC, 0, sizeof(deltaPOC));
+        memset(poc, 0, sizeof(poc));
+        memset(bUsed, 0, sizeof(bUsed));
+    }
+
+    void sortDeltaPOC();
+};
+
+namespace Profile {
+    enum Name
+    {
+        NONE = 0,
+        MAIN = 1,
+        MAIN10 = 2,
+        MAINSTILLPICTURE = 3,
+        MAINREXT = 4,
+        HIGHTHROUGHPUTREXT = 5
+    };
+}
+
+namespace Level {
+    enum Tier
+    {
+        MAIN = 0,
+        HIGH = 1,
+    };
+
+    enum Name
+    {
+        NONE = 0,
+        LEVEL1 = 30,
+        LEVEL2 = 60,
+        LEVEL2_1 = 63,
+        LEVEL3 = 90,
+        LEVEL3_1 = 93,
+        LEVEL4 = 120,
+        LEVEL4_1 = 123,
+        LEVEL5 = 150,
+        LEVEL5_1 = 153,
+        LEVEL5_2 = 156,
+        LEVEL6 = 180,
+        LEVEL6_1 = 183,
+        LEVEL6_2 = 186,
+        LEVEL8_5 = 255,
+    };
+}
+
+struct ProfileTierLevel
+{
+    bool     tierFlag;
+    bool     progressiveSourceFlag;
+    bool     interlacedSourceFlag;
+    bool     nonPackedConstraintFlag;
+    bool     frameOnlyConstraintFlag;
+    bool     profileCompatibilityFlag[32];
+    bool     intraConstraintFlag;
+    bool     onePictureOnlyConstraintFlag;
+    bool     lowerBitRateConstraintFlag;
+    int      profileIdc;
+    int      levelIdc;
+    uint32_t minCrForLevel;
+    uint32_t maxLumaSrForLevel;
+    uint32_t bitDepthConstraint;
+    int      chromaFormatConstraint;
+};
+
+struct HRDInfo
+{
+    uint32_t bitRateScale;
+    uint32_t cpbSizeScale;
+    uint32_t initialCpbRemovalDelayLength;
+    uint32_t cpbRemovalDelayLength;
+    uint32_t dpbOutputDelayLength;
+    uint32_t bitRateValue;
+    uint32_t cpbSizeValue;
+    bool     cbrFlag;
+
+    HRDInfo()
+        : bitRateScale(0)
+        , cpbSizeScale(0)
+        , initialCpbRemovalDelayLength(1)
+        , cpbRemovalDelayLength(1)
+        , dpbOutputDelayLength(1)
+        , cbrFlag(false)
+    {
+    }
+};
+
+struct TimingInfo
+{
+    uint32_t numUnitsInTick;
+    uint32_t timeScale;
+};
+
+struct VPS
+{
+    uint32_t         maxTempSubLayers;
+    uint32_t         numReorderPics;
+    uint32_t         maxDecPicBuffering;
+    uint32_t         maxLatencyIncrease;
+    HRDInfo          hrdParameters;
+    ProfileTierLevel ptl;
+};
+
+struct Window
+{
+    bool bEnabled;
+    int  leftOffset;
+    int  rightOffset;
+    int  topOffset;
+    int  bottomOffset;
+
+    Window()
+    {
+        bEnabled = false;
+    }
+};
+
+struct VUI
+{
+    bool       aspectRatioInfoPresentFlag;
+    int        aspectRatioIdc;
+    int        sarWidth;
+    int        sarHeight;
+
+    bool       overscanInfoPresentFlag;
+    bool       overscanAppropriateFlag;
+
+    bool       videoSignalTypePresentFlag;
+    int        videoFormat;
+    bool       videoFullRangeFlag;
+
+    bool       colourDescriptionPresentFlag;
+    int        colourPrimaries;
+    int        transferCharacteristics;
+    int        matrixCoefficients;
+
+    bool       chromaLocInfoPresentFlag;
+    int        chromaSampleLocTypeTopField;
+    int        chromaSampleLocTypeBottomField;
+
+    Window     defaultDisplayWindow;
+
+    bool       frameFieldInfoPresentFlag;
+    bool       fieldSeqFlag;
+
+    bool       hrdParametersPresentFlag;
+    HRDInfo    hrdParameters;
+
+    TimingInfo timingInfo;
+};
+
+struct SPS
+{
+    /* cached PicYuv offset arrays, shared by all instances of
+     * PicYuv created by this encoder */
+    intptr_t* cuOffsetY;
+    intptr_t* cuOffsetC;
+    intptr_t* buOffsetY;
+    intptr_t* buOffsetC;
+
+    int      chromaFormatIdc;        // use param
+    uint32_t picWidthInLumaSamples;  // use param
+    uint32_t picHeightInLumaSamples; // use param
+
+    uint32_t numCuInWidth;
+    uint32_t numCuInHeight;
+    uint32_t numCUsInFrame;
+    uint32_t numPartitions;
+    uint32_t numPartInCUSize;
+
+    int      log2MinCodingBlockSize;
+    int      log2DiffMaxMinCodingBlockSize;
+
+    uint32_t quadtreeTULog2MaxSize;
+    uint32_t quadtreeTULog2MinSize;
+
+    uint32_t quadtreeTUMaxDepthInter; // use param
+    uint32_t quadtreeTUMaxDepthIntra; // use param
+
+    bool     bUseSAO; // use param
+    bool     bUseAMP; // use param
+    uint32_t maxAMPDepth;
+
+    uint32_t maxTempSubLayers;   // max number of Temporal Sub layers
+    uint32_t maxDecPicBuffering; // these are dups of VPS values
+    uint32_t maxLatencyIncrease;
+    int      numReorderPics;
+
+    bool     bUseStrongIntraSmoothing; // use param
+    bool     bTemporalMVPEnabled;
+
+    Window   conformanceWindow;
+    VUI      vuiParameters;
+
+    SPS()
+    {
+        memset(this, 0, sizeof(*this));
+    }
+
+    ~SPS()
+    {
+        X265_FREE(cuOffsetY);
+        X265_FREE(cuOffsetC);
+        X265_FREE(buOffsetY);
+        X265_FREE(buOffsetC);
+    }
+};
+
+struct PPS
+{
+    uint32_t maxCuDQPDepth;
+
+    int      chromaQpOffset[2];      // use param
+
+    bool     bUseWeightPred;         // use param
+    bool     bUseWeightedBiPred;     // use param
+    bool     bUseDQP;
+    bool     bConstrainedIntraPred;  // use param
+
+    bool     bTransquantBypassEnabled;  // Indicates presence of cu_transquant_bypass_flag in CUs.
+    bool     bTransformSkipEnabled;     // use param
+    bool     bEntropyCodingSyncEnabled; // use param
+    bool     bSignHideEnabled;          // use param
+
+    bool     bDeblockingFilterControlPresent;
+    bool     bPicDisableDeblockingFilter;
+    int      deblockingFilterBetaOffsetDiv2;
+    int      deblockingFilterTcOffsetDiv2;
+};
+
+struct WeightParam
+{
+    // Explicit weighted prediction parameters parsed in slice header,
+    bool     bPresentFlag;
+    uint32_t log2WeightDenom;
+    int      inputWeight;
+    int      inputOffset;
+
+    /* makes a non-h265 weight (i.e. fix7), into an h265 weight */
+    void setFromWeightAndOffset(int w, int o, int denom, bool bNormalize)
+    {
+        inputOffset = o;
+        log2WeightDenom = denom;
+        inputWeight = w;
+        while (bNormalize && log2WeightDenom > 0 && (inputWeight > 127))
+        {
+            log2WeightDenom--;
+            inputWeight >>= 1;
+        }
+
+        inputWeight = X265_MIN(inputWeight, 127);
+    }
+};
+
+#define SET_WEIGHT(w, b, s, d, o) \
+    { \
+        (w).inputWeight = (s); \
+        (w).log2WeightDenom = (d); \
+        (w).inputOffset = (o); \
+        (w).bPresentFlag = (b); \
+    }
+
+class Slice
+{
+public:
+
+    const SPS*  m_sps;
+    const PPS*  m_pps;
+    WeightParam m_weightPredTable[2][MAX_NUM_REF][3]; // [list][refIdx][0:Y, 1:U, 2:V]
+    MotionReference (*m_mref)[MAX_NUM_REF + 1];
+    RPS         m_rps;
+
+    NalUnitType m_nalUnitType;
+    SliceType   m_sliceType;
+    int         m_sliceQp;
+    int         m_poc;
+    
+    int         m_lastIDR;
+
+    bool        m_bCheckLDC;       // TODO: is this necessary?
+    bool        m_sLFaseFlag;      // loop filter boundary flag
+    bool        m_colFromL0Flag;   // collocated picture from List0 or List1 flag
+    uint32_t    m_colRefIdx;       // never modified
+    
+    int         m_numRefIdx[2];
+    Frame*      m_refFrameList[2][MAX_NUM_REF + 1];
+    PicYuv*     m_refReconPicList[2][MAX_NUM_REF + 1];
+    int         m_refPOCList[2][MAX_NUM_REF + 1];
+
+    uint32_t    m_maxNumMergeCand; // use param
+    uint32_t    m_endCUAddr;
+
+    Slice()
+    {
+        m_lastIDR = 0;
+        m_sLFaseFlag = true;
+        m_numRefIdx[0] = m_numRefIdx[1] = 0;
+        memset(m_refFrameList, 0, sizeof(m_refFrameList));
+        memset(m_refReconPicList, 0, sizeof(m_refReconPicList));
+        memset(m_refPOCList, 0, sizeof(m_refPOCList));
+        disableWeights();
+    }
+
+    void disableWeights();
+
+    void setRefPicList(PicList& picList);
+
+    bool getRapPicFlag() const
+    {
+        return m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL
+            || m_nalUnitType == NAL_UNIT_CODED_SLICE_CRA;
+    }
+
+    bool getIdrPicFlag() const
+    {
+        return m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL;
+    }
+
+    bool isIRAP() const   { return m_nalUnitType >= 16 && m_nalUnitType <= 23; }
+
+    bool isIntra()  const { return m_sliceType == I_SLICE; }
+
+    bool isInterB() const { return m_sliceType == B_SLICE; }
+
+    bool isInterP() const { return m_sliceType == P_SLICE; }
+
+    uint32_t realEndAddress(uint32_t endCUAddr) const;
+};
+
+}
+
+#endif // ifndef X265_SLICE_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/threading.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,162 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#include "common.h"
+#include "threading.h"
+#include "cpu.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+#if X265_ARCH_X86 && !defined(X86_64) && ENABLE_ASSEMBLY && defined(__GNUC__)
+extern "C" intptr_t PFX(stack_align)(void (*func)(), ...);
+#define STACK_ALIGN(func, ...) PFX(stack_align)((void (*)())func, __VA_ARGS__)
+#else
+#define STACK_ALIGN(func, ...) func(__VA_ARGS__)
+#endif
+
+#if NO_ATOMICS
+pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int no_atomic_or(int* ptr, int mask)
+{ 
+    pthread_mutex_lock(&g_mutex);
+    int ret = *ptr;
+    *ptr |= mask;
+    pthread_mutex_unlock(&g_mutex);
+    return ret;
+}
+
+int no_atomic_and(int* ptr, int mask)
+{
+    pthread_mutex_lock(&g_mutex);
+    int ret = *ptr;
+    *ptr &= mask;
+    pthread_mutex_unlock(&g_mutex);
+    return ret;
+}
+
+int no_atomic_inc(int* ptr)
+{
+    pthread_mutex_lock(&g_mutex);
+    *ptr += 1;
+    int ret = *ptr;
+    pthread_mutex_unlock(&g_mutex);
+    return ret;
+}
+
+int no_atomic_dec(int* ptr)
+{
+    pthread_mutex_lock(&g_mutex);
+    *ptr -= 1;
+    int ret = *ptr;
+    pthread_mutex_unlock(&g_mutex);
+    return ret;
+}
+
+int no_atomic_add(int* ptr, int val)
+{
+    pthread_mutex_lock(&g_mutex);
+    *ptr += val;
+    int ret = *ptr;
+    pthread_mutex_unlock(&g_mutex);
+    return ret;
+}
+#endif
+
+/* C shim for forced stack alignment */
+static void stackAlignMain(Thread *instance)
+{
+    // defer processing to the virtual function implemented in the derived class
+    instance->threadMain();
+}
+
+#if _WIN32
+
+static DWORD WINAPI ThreadShim(Thread *instance)
+{
+    STACK_ALIGN(stackAlignMain, instance);
+
+    return 0;
+}
+
+bool Thread::start()
+{
+    DWORD threadId;
+
+    thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)ThreadShim, this, 0, &threadId);
+
+    return threadId > 0;
+}
+
+void Thread::stop()
+{
+    if (thread)
+        WaitForSingleObject(thread, INFINITE);
+}
+
+Thread::~Thread()
+{
+    if (thread)
+        CloseHandle(thread);
+}
+
+#else /* POSIX / pthreads */
+
+static void *ThreadShim(void *opaque)
+{
+    // defer processing to the virtual function implemented in the derived class
+    Thread *instance = reinterpret_cast<Thread *>(opaque);
+
+    STACK_ALIGN(stackAlignMain, instance);
+
+    return NULL;
+}
+
+bool Thread::start()
+{
+    if (pthread_create(&thread, NULL, ThreadShim, this))
+    {
+        thread = 0;
+        return false;
+    }
+
+    return true;
+}
+
+void Thread::stop()
+{
+    if (thread)
+        pthread_join(thread, NULL);
+}
+
+Thread::~Thread() {}
+
+#endif // if _WIN32
+
+Thread::Thread()
+{
+    thread = 0;
+}
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/threading.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,491 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#ifndef X265_THREADING_H
+#define X265_THREADING_H
+
+#include "common.h"
+#include "x265.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#include "winxp.h"  // XP workarounds for CONDITION_VARIABLE and ATOMIC_OR
+#else
+#include <pthread.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <fcntl.h>
+#endif
+
+#if MACOS
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#endif
+
+#if NO_ATOMICS
+
+#include <sys/time.h>
+#include <unistd.h>
+
+namespace X265_NS {
+// x265 private namespace
+int no_atomic_or(int* ptr, int mask);
+int no_atomic_and(int* ptr, int mask);
+int no_atomic_inc(int* ptr);
+int no_atomic_dec(int* ptr);
+int no_atomic_add(int* ptr, int val);
+}
+
+#define CLZ(id, x)            id = (unsigned long)__builtin_clz(x) ^ 31
+#define CTZ(id, x)            id = (unsigned long)__builtin_ctz(x)
+#define ATOMIC_OR(ptr, mask)  no_atomic_or((int*)ptr, mask)
+#define ATOMIC_AND(ptr, mask) no_atomic_and((int*)ptr, mask)
+#define ATOMIC_INC(ptr)       no_atomic_inc((int*)ptr)
+#define ATOMIC_DEC(ptr)       no_atomic_dec((int*)ptr)
+#define ATOMIC_ADD(ptr, val)  no_atomic_add((int*)ptr, val)
+#define GIVE_UP_TIME()        usleep(0)
+
+#elif __GNUC__               /* GCCs builtin atomics */
+
+#include <sys/time.h>
+#include <unistd.h>
+
+#define CLZ(id, x)            id = (unsigned long)__builtin_clz(x) ^ 31
+#define CTZ(id, x)            id = (unsigned long)__builtin_ctz(x)
+#define ATOMIC_OR(ptr, mask)  __sync_fetch_and_or(ptr, mask)
+#define ATOMIC_AND(ptr, mask) __sync_fetch_and_and(ptr, mask)
+#define ATOMIC_INC(ptr)       __sync_add_and_fetch((volatile int32_t*)ptr, 1)
+#define ATOMIC_DEC(ptr)       __sync_add_and_fetch((volatile int32_t*)ptr, -1)
+#define ATOMIC_ADD(ptr, val)  __sync_fetch_and_add((volatile int32_t*)ptr, val)
+#define GIVE_UP_TIME()        usleep(0)
+
+#elif defined(_MSC_VER)       /* Windows atomic intrinsics */
+
+#include <intrin.h>
+
+#define CLZ(id, x)            _BitScanReverse(&id, x)
+#define CTZ(id, x)            _BitScanForward(&id, x)
+#define ATOMIC_INC(ptr)       InterlockedIncrement((volatile LONG*)ptr)
+#define ATOMIC_DEC(ptr)       InterlockedDecrement((volatile LONG*)ptr)
+#define ATOMIC_ADD(ptr, val)  InterlockedExchangeAdd((volatile LONG*)ptr, val)
+#define ATOMIC_OR(ptr, mask)  _InterlockedOr((volatile LONG*)ptr, (LONG)mask)
+#define ATOMIC_AND(ptr, mask) _InterlockedAnd((volatile LONG*)ptr, (LONG)mask)
+#define GIVE_UP_TIME()        Sleep(0)
+
+#endif // ifdef __GNUC__
+
+namespace X265_NS {
+// x265 private namespace
+
+#ifdef _WIN32
+
+typedef HANDLE ThreadHandle;
+
+class Lock
+{
+public:
+
+    Lock()
+    {
+        InitializeCriticalSection(&this->handle);
+    }
+
+    ~Lock()
+    {
+        DeleteCriticalSection(&this->handle);
+    }
+
+    void acquire()
+    {
+        EnterCriticalSection(&this->handle);
+    }
+
+    void release()
+    {
+        LeaveCriticalSection(&this->handle);
+    }
+
+protected:
+
+    CRITICAL_SECTION handle;
+};
+
+class Event
+{
+public:
+
+    Event()
+    {
+        this->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+    }
+
+    ~Event()
+    {
+        CloseHandle(this->handle);
+    }
+
+    void wait()
+    {
+        WaitForSingleObject(this->handle, INFINITE);
+    }
+
+    bool timedWait(uint32_t milliseconds)
+    {
+        /* returns true if the wait timed out */
+        return WaitForSingleObject(this->handle, milliseconds) == WAIT_TIMEOUT;
+    }
+
+    void trigger()
+    {
+        SetEvent(this->handle);
+    }
+
+protected:
+
+    HANDLE handle;
+};
+
+/* This class is intended for use in signaling state changes safely between CPU
+ * cores. One thread should be a writer and multiple threads may be readers. The
+ * mutex's main purpose is to serve as a memory fence to ensure writes made by
+ * the writer thread are visible prior to readers seeing the m_val change. Its
+ * secondary purpose is for use with the condition variable for blocking waits */
+class ThreadSafeInteger
+{
+public:
+
+    ThreadSafeInteger()
+    {
+        m_val = 0;
+        InitializeCriticalSection(&m_cs);
+        InitializeConditionVariable(&m_cv);
+    }
+
+    ~ThreadSafeInteger()
+    {
+        DeleteCriticalSection(&m_cs);
+        XP_CONDITION_VAR_FREE(&m_cv);
+    }
+
+    int waitForChange(int prev)
+    {
+        EnterCriticalSection(&m_cs);
+        if (m_val == prev)
+            SleepConditionVariableCS(&m_cv, &m_cs, INFINITE);
+        LeaveCriticalSection(&m_cs);
+        return m_val;
+    }
+
+    int get()
+    {
+        EnterCriticalSection(&m_cs);
+        int ret = m_val;
+        LeaveCriticalSection(&m_cs);
+        return ret;
+    }
+
+    void set(int newval)
+    {
+        EnterCriticalSection(&m_cs);
+        m_val = newval;
+        WakeAllConditionVariable(&m_cv);
+        LeaveCriticalSection(&m_cs);
+    }
+
+    void poke(void)
+    {
+        /* awaken all waiting threads, but make no change */
+        EnterCriticalSection(&m_cs);
+        WakeAllConditionVariable(&m_cv);
+        LeaveCriticalSection(&m_cs);
+    }
+
+    void incr()
+    {
+        EnterCriticalSection(&m_cs);
+        m_val++;
+        WakeAllConditionVariable(&m_cv);
+        LeaveCriticalSection(&m_cs);
+    }
+
+protected:
+
+    CRITICAL_SECTION   m_cs;
+    CONDITION_VARIABLE m_cv;
+    int                m_val;
+};
+
+#else /* POSIX / pthreads */
+
+typedef pthread_t ThreadHandle;
+
+class Lock
+{
+public:
+
+    Lock()
+    {
+        pthread_mutex_init(&this->handle, NULL);
+    }
+
+    ~Lock()
+    {
+        pthread_mutex_destroy(&this->handle);
+    }
+
+    void acquire()
+    {
+        pthread_mutex_lock(&this->handle);
+    }
+
+    void release()
+    {
+        pthread_mutex_unlock(&this->handle);
+    }
+
+protected:
+
+    pthread_mutex_t handle;
+};
+
+class Event
+{
+public:
+
+    Event()
+    {
+        m_counter = 0;
+        if (pthread_mutex_init(&m_mutex, NULL) ||
+            pthread_cond_init(&m_cond, NULL))
+        {
+            x265_log(NULL, X265_LOG_ERROR, "fatal: unable to initialize conditional variable\n");
+        }
+    }
+
+    ~Event()
+    {
+        pthread_cond_destroy(&m_cond);
+        pthread_mutex_destroy(&m_mutex);
+    }
+
+    void wait()
+    {
+        pthread_mutex_lock(&m_mutex);
+
+        /* blocking wait on conditional variable, mutex is atomically released
+         * while blocked. When condition is signaled, mutex is re-acquired */
+        while (!m_counter)
+            pthread_cond_wait(&m_cond, &m_mutex);
+
+        m_counter--;
+        pthread_mutex_unlock(&m_mutex);
+    }
+
+    bool timedWait(uint32_t waitms)
+    {
+        bool bTimedOut = false;
+
+        pthread_mutex_lock(&m_mutex);
+        if (!m_counter)
+        {
+            struct timeval tv;
+            struct timespec ts;
+            gettimeofday(&tv, NULL);
+            /* convert current time from (sec, usec) to (sec, nsec) */
+            ts.tv_sec = tv.tv_sec;
+            ts.tv_nsec = tv.tv_usec * 1000;
+
+            ts.tv_nsec += 1000 * 1000 * (waitms % 1000);    /* add ms to tv_nsec */
+            ts.tv_sec += ts.tv_nsec / (1000 * 1000 * 1000); /* overflow tv_nsec */
+            ts.tv_nsec %= (1000 * 1000 * 1000);             /* clamp tv_nsec */
+            ts.tv_sec += waitms / 1000;                     /* add seconds */
+
+            /* blocking wait on conditional variable, mutex is atomically released
+             * while blocked. When condition is signaled, mutex is re-acquired.
+             * ts is absolute time to stop waiting */
+            bTimedOut = pthread_cond_timedwait(&m_cond, &m_mutex, &ts) == ETIMEDOUT;
+        }
+        if (m_counter > 0)
+        {
+            m_counter--;
+            bTimedOut = false;
+        }
+        pthread_mutex_unlock(&m_mutex);
+        return bTimedOut;
+    }
+
+    void trigger()
+    {
+        pthread_mutex_lock(&m_mutex);
+        if (m_counter < UINT_MAX)
+            m_counter++;
+        /* Signal a single blocking thread */
+        pthread_cond_signal(&m_cond);
+        pthread_mutex_unlock(&m_mutex);
+    }
+
+protected:
+
+    pthread_mutex_t m_mutex;
+    pthread_cond_t  m_cond;
+    uint32_t        m_counter;
+};
+
+/* This class is intended for use in signaling state changes safely between CPU
+ * cores. One thread should be a writer and multiple threads may be readers. The
+ * mutex's main purpose is to serve as a memory fence to ensure writes made by
+ * the writer thread are visible prior to readers seeing the m_val change. Its
+ * secondary purpose is for use with the condition variable for blocking waits */
+class ThreadSafeInteger
+{
+public:
+
+    ThreadSafeInteger()
+    {
+        m_val = 0;
+        if (pthread_mutex_init(&m_mutex, NULL) ||
+            pthread_cond_init(&m_cond, NULL))
+        {
+            x265_log(NULL, X265_LOG_ERROR, "fatal: unable to initialize conditional variable\n");
+        }
+    }
+
+    ~ThreadSafeInteger()
+    {
+        pthread_cond_destroy(&m_cond);
+        pthread_mutex_destroy(&m_mutex);
+    }
+
+    int waitForChange(int prev)
+    {
+        pthread_mutex_lock(&m_mutex);
+        if (m_val == prev)
+            pthread_cond_wait(&m_cond, &m_mutex);
+        pthread_mutex_unlock(&m_mutex);
+        return m_val;
+    }
+
+    int get()
+    {
+        pthread_mutex_lock(&m_mutex);
+        int ret = m_val;
+        pthread_mutex_unlock(&m_mutex);
+        return ret;
+    }
+
+    void set(int newval)
+    {
+        pthread_mutex_lock(&m_mutex);
+        m_val = newval;
+        pthread_cond_broadcast(&m_cond);
+        pthread_mutex_unlock(&m_mutex);
+    }
+
+    void poke(void)
+    {
+        /* awaken all waiting threads, but make no change */
+        pthread_mutex_lock(&m_mutex);
+        pthread_cond_broadcast(&m_cond);
+        pthread_mutex_unlock(&m_mutex);
+    }
+
+    void incr()
+    {
+        pthread_mutex_lock(&m_mutex);
+        m_val++;
+        pthread_cond_broadcast(&m_cond);
+        pthread_mutex_unlock(&m_mutex);
+    }
+
+protected:
+
+    pthread_mutex_t m_mutex;
+    pthread_cond_t  m_cond;
+    int             m_val;
+};
+
+#endif // ifdef _WIN32
+
+class ScopedLock
+{
+public:
+
+    ScopedLock(Lock &instance) : inst(instance)
+    {
+        this->inst.acquire();
+    }
+
+    ~ScopedLock()
+    {
+        this->inst.release();
+    }
+
+protected:
+
+    // do not allow assignments
+    ScopedLock &operator =(const ScopedLock &);
+
+    Lock &inst;
+};
+
+// Utility class which adds elapsed time of the scope of the object into the
+// accumulator provided to the constructor
+struct ScopedElapsedTime
+{
+    ScopedElapsedTime(int64_t& accum) : accumlatedTime(accum) { startTime = x265_mdate(); }
+
+    ~ScopedElapsedTime() { accumlatedTime += x265_mdate() - startTime; }
+
+protected:
+
+    int64_t  startTime;
+    int64_t& accumlatedTime;
+
+    // do not allow assignments
+    ScopedElapsedTime &operator =(const ScopedElapsedTime &);
+};
+
+//< Simplistic portable thread class.  Shutdown signalling left to derived class
+class Thread
+{
+private:
+
+    ThreadHandle thread;
+
+public:
+
+    Thread();
+
+    virtual ~Thread();
+
+    //< Derived class must implement ThreadMain.
+    virtual void threadMain() = 0;
+
+    //< Returns true if thread was successfully created
+    bool start();
+
+    void stop();
+};
+} // end namespace X265_NS
+
+#endif // ifndef X265_THREADING_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/threadpool.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,545 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#include "common.h"
+#include "threadpool.h"
+#include "threading.h"
+
+#include <new>
+
+#if X86_64
+
+#ifdef __GNUC__
+
+#define SLEEPBITMAP_CTZ(id, x)     id = (unsigned long)__builtin_ctzll(x)
+#define SLEEPBITMAP_OR(ptr, mask)  __sync_fetch_and_or(ptr, mask)
+#define SLEEPBITMAP_AND(ptr, mask) __sync_fetch_and_and(ptr, mask)
+
+#elif defined(_MSC_VER)
+
+#define SLEEPBITMAP_CTZ(id, x)     _BitScanForward64(&id, x)
+#define SLEEPBITMAP_OR(ptr, mask)  InterlockedOr64((volatile LONG64*)ptr, (LONG)mask)
+#define SLEEPBITMAP_AND(ptr, mask) InterlockedAnd64((volatile LONG64*)ptr, (LONG)mask)
+
+#endif // ifdef __GNUC__
+
+#else
+
+/* use 32-bit primitives defined in threading.h */
+#define SLEEPBITMAP_CTZ CTZ
+#define SLEEPBITMAP_OR  ATOMIC_OR
+#define SLEEPBITMAP_AND ATOMIC_AND
+
+#endif
+
+#if MACOS
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#endif
+#if HAVE_LIBNUMA
+#include <numa.h>
+#endif
+
+namespace X265_NS {
+// x265 private namespace
+
+class WorkerThread : public Thread
+{
+private:
+
+    ThreadPool&  m_pool;
+    int          m_id;
+    Event        m_wakeEvent;
+
+    WorkerThread& operator =(const WorkerThread&);
+
+public:
+
+    JobProvider*     m_curJobProvider;
+    BondedTaskGroup* m_bondMaster;
+
+    WorkerThread(ThreadPool& pool, int id) : m_pool(pool), m_id(id) {}
+    virtual ~WorkerThread() {}
+
+    void threadMain();
+    void awaken()           { m_wakeEvent.trigger(); }
+};
+
+void WorkerThread::threadMain()
+{
+    THREAD_NAME("Worker", m_id);
+
+#if _WIN32
+    SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
+#else
+    __attribute__((unused)) int val = nice(10);
+#endif
+
+    m_pool.setCurrentThreadAffinity();
+
+    sleepbitmap_t idBit = (sleepbitmap_t)1 << m_id;
+    m_curJobProvider = m_pool.m_jpTable[0];
+    m_bondMaster = NULL;
+
+    SLEEPBITMAP_OR(&m_curJobProvider->m_ownerBitmap, idBit);
+    SLEEPBITMAP_OR(&m_pool.m_sleepBitmap, idBit);
+    m_wakeEvent.wait();
+
+    while (m_pool.m_isActive)
+    {
+        if (m_bondMaster)
+        {
+            m_bondMaster->processTasks(m_id);
+            m_bondMaster->m_exitedPeerCount.incr();
+            m_bondMaster = NULL;
+        }
+
+        do
+        {
+            /* do pending work for current job provider */
+            m_curJobProvider->findJob(m_id);
+
+            /* if the current job provider still wants help, only switch to a
+             * higher priority provider (lower slice type). Else take the first
+             * available job provider with the highest priority */
+            int curPriority = (m_curJobProvider->m_helpWanted) ? m_curJobProvider->m_sliceType :
+                                                                 INVALID_SLICE_PRIORITY + 1;
+            int nextProvider = -1;
+            for (int i = 0; i < m_pool.m_numProviders; i++)
+            {
+                if (m_pool.m_jpTable[i]->m_helpWanted &&
+                    m_pool.m_jpTable[i]->m_sliceType < curPriority)
+                {
+                    nextProvider = i;
+                    curPriority = m_pool.m_jpTable[i]->m_sliceType;
+                }
+            }
+            if (nextProvider != -1 && m_curJobProvider != m_pool.m_jpTable[nextProvider])
+            {
+                SLEEPBITMAP_AND(&m_curJobProvider->m_ownerBitmap, ~idBit);
+                m_curJobProvider = m_pool.m_jpTable[nextProvider];
+                SLEEPBITMAP_OR(&m_curJobProvider->m_ownerBitmap, idBit);
+            }
+        }
+        while (m_curJobProvider->m_helpWanted);
+
+        /* While the worker sleeps, a job-provider or bond-group may acquire this
+         * worker's sleep bitmap bit. Once acquired, that thread may modify 
+         * m_bondMaster or m_curJobProvider, then waken the thread */
+        SLEEPBITMAP_OR(&m_pool.m_sleepBitmap, idBit);
+        m_wakeEvent.wait();
+    }
+
+    SLEEPBITMAP_OR(&m_pool.m_sleepBitmap, idBit);
+}
+
+void JobProvider::tryWakeOne()
+{
+    int id = m_pool->tryAcquireSleepingThread(m_ownerBitmap, ALL_POOL_THREADS);
+    if (id < 0)
+    {
+        m_helpWanted = true;
+        return;
+    }
+
+    WorkerThread& worker = m_pool->m_workers[id];
+    if (worker.m_curJobProvider != this) /* poaching */
+    {
+        sleepbitmap_t bit = (sleepbitmap_t)1 << id;
+        SLEEPBITMAP_AND(&worker.m_curJobProvider->m_ownerBitmap, ~bit);
+        worker.m_curJobProvider = this;
+        SLEEPBITMAP_OR(&worker.m_curJobProvider->m_ownerBitmap, bit);
+    }
+    worker.awaken();
+}
+
+int ThreadPool::tryAcquireSleepingThread(sleepbitmap_t firstTryBitmap, sleepbitmap_t secondTryBitmap)
+{
+    unsigned long id;
+
+    sleepbitmap_t masked = m_sleepBitmap & firstTryBitmap;
+    while (masked)
+    {
+        SLEEPBITMAP_CTZ(id, masked);
+
+        sleepbitmap_t bit = (sleepbitmap_t)1 << id;
+        if (SLEEPBITMAP_AND(&m_sleepBitmap, ~bit) & bit)
+            return (int)id;
+
+        masked = m_sleepBitmap & firstTryBitmap;
+    }
+
+    masked = m_sleepBitmap & secondTryBitmap;
+    while (masked)
+    {
+        SLEEPBITMAP_CTZ(id, masked);
+
+        sleepbitmap_t bit = (sleepbitmap_t)1 << id;
+        if (SLEEPBITMAP_AND(&m_sleepBitmap, ~bit) & bit)
+            return (int)id;
+
+        masked = m_sleepBitmap & secondTryBitmap;
+    }
+
+    return -1;
+}
+
+int ThreadPool::tryBondPeers(int maxPeers, sleepbitmap_t peerBitmap, BondedTaskGroup& master)
+{
+    int bondCount = 0;
+    do
+    {
+        int id = tryAcquireSleepingThread(peerBitmap, 0);
+        if (id < 0)
+            return bondCount;
+
+        m_workers[id].m_bondMaster = &master;
+        m_workers[id].awaken();
+        bondCount++;
+    }
+    while (bondCount < maxPeers);
+
+    return bondCount;
+}
+
+ThreadPool* ThreadPool::allocThreadPools(x265_param* p, int& numPools)
+{
+    enum { MAX_NODE_NUM = 127 };
+    int cpusPerNode[MAX_NODE_NUM + 1];
+    int threadsPerPool[MAX_NODE_NUM + 2];
+    uint32_t nodeMaskPerPool[MAX_NODE_NUM +2];
+
+    memset(cpusPerNode, 0, sizeof(cpusPerNode));
+    memset(threadsPerPool, 0, sizeof(threadsPerPool));
+    memset(nodeMaskPerPool, 0, sizeof(nodeMaskPerPool));
+
+    int numNumaNodes = X265_MIN(getNumaNodeCount(), MAX_NODE_NUM);
+    int cpuCount = getCpuCount();
+    bool bNumaSupport = false;
+
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+    bNumaSupport = true;
+#elif HAVE_LIBNUMA
+    bNumaSupport = numa_available() >= 0;
+#endif
+
+
+    for (int i = 0; i < cpuCount; i++)
+    {
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+        UCHAR node;
+        if (GetNumaProcessorNode((UCHAR)i, &node))
+            cpusPerNode[X265_MIN(node, (UCHAR)MAX_NODE_NUM)]++;
+        else
+#elif HAVE_LIBNUMA
+        if (bNumaSupport >= 0)
+            cpusPerNode[X265_MIN(numa_node_of_cpu(i), MAX_NODE_NUM)]++;
+        else
+#endif
+            cpusPerNode[0]++;
+    }
+
+    if (bNumaSupport && p->logLevel >= X265_LOG_DEBUG)
+        for (int i = 0; i < numNumaNodes; i++)
+            x265_log(p, X265_LOG_DEBUG, "detected NUMA node %d with %d logical cores\n", i, cpusPerNode[i]);
+
+    /* limit threads based on param->numaPools */
+    if (p->numaPools && *p->numaPools)
+    {
+        const char *nodeStr = p->numaPools;
+        for (int i = 0; i < numNumaNodes; i++)
+        {
+            if (!*nodeStr)
+            {
+                threadsPerPool[i] = 0;
+                continue;
+            }
+            else if (*nodeStr == '-')
+                threadsPerPool[i] = 0;
+            else if (*nodeStr == '*')
+            {
+                for (int j = i; j < numNumaNodes; j++)
+                {
+                    threadsPerPool[numNumaNodes] += cpusPerNode[j];
+                    nodeMaskPerPool[numNumaNodes] |= (1U << j);
+                }
+                break;
+            }
+            else if (*nodeStr == '+')
+            {
+                threadsPerPool[numNumaNodes] += cpusPerNode[i];
+                nodeMaskPerPool[numNumaNodes] = (1U << i);
+            }
+            else
+            {
+                int count = atoi(nodeStr);
+                threadsPerPool[i] = X265_MIN(count, cpusPerNode[i]);
+                nodeMaskPerPool[i] = (1U << i);
+            }
+
+            /* consume current node string, comma, and white-space */
+            while (*nodeStr && *nodeStr != ',')
+               ++nodeStr;
+            if (*nodeStr == ',' || *nodeStr == ' ')
+               ++nodeStr;
+        }
+    }
+    else
+    {
+        for (int i = 0; i < numNumaNodes; i++)
+        {
+            threadsPerPool[numNumaNodes]  += cpusPerNode[i];
+            nodeMaskPerPool[numNumaNodes] |= (1U << i);
+        }
+    }
+ 
+    // If the last pool size is > MAX_POOL_THREADS, clip it to spawn thread pools only of size >= 1/2 max (heuristic)
+    if ((threadsPerPool[numNumaNodes] > MAX_POOL_THREADS) &&
+        ((threadsPerPool[numNumaNodes] % MAX_POOL_THREADS) < (MAX_POOL_THREADS / 2)))
+    {
+        threadsPerPool[numNumaNodes] -= (threadsPerPool[numNumaNodes] % MAX_POOL_THREADS);
+        x265_log(p, X265_LOG_DEBUG,
+                 "Creating only %d worker threads beyond specified numbers with --pools (if specified) to prevent asymmetry in pools; may not use all HW contexts\n", threadsPerPool[numNumaNodes]);
+    }
+
+    numPools = 0;
+    for (int i = 0; i < numNumaNodes + 1; i++)
+    {
+        if (bNumaSupport)
+            x265_log(p, X265_LOG_DEBUG, "NUMA node %d may use %d logical cores\n", i, cpusPerNode[i]);
+        if (threadsPerPool[i])
+            numPools += (threadsPerPool[i] + MAX_POOL_THREADS - 1) / MAX_POOL_THREADS;
+    }
+
+    if (!numPools)
+        return NULL;
+
+    if (numPools > p->frameNumThreads)
+    {
+        x265_log(p, X265_LOG_DEBUG, "Reducing number of thread pools for frame thread count\n");
+        numPools = X265_MAX(p->frameNumThreads / 2, 1);
+    }
+
+    ThreadPool *pools = new ThreadPool[numPools];
+    if (pools)
+    {
+        int maxProviders = (p->frameNumThreads + numPools - 1) / numPools + 1; /* +1 is Lookahead, always assigned to threadpool 0 */
+        int node = 0;
+        for (int i = 0; i < numPools; i++)
+        {
+            while (!threadsPerPool[node])
+                node++;
+            int numThreads = X265_MIN(MAX_POOL_THREADS, threadsPerPool[node]);
+            if (!pools[i].create(numThreads, maxProviders, nodeMaskPerPool[node]))
+            {
+                X265_FREE(pools);
+                numPools = 0;
+                return NULL;
+            }
+            if (numNumaNodes > 1)
+                x265_log(p, X265_LOG_INFO, "Thread pool %d using %d threads with NUMA node mask %lx\n", i, numThreads, nodeMaskPerPool[node]);
+            else
+                x265_log(p, X265_LOG_INFO, "Thread pool created using %d threads\n", numThreads);
+            threadsPerPool[node] -= numThreads;
+        }
+    }
+    else
+        numPools = 0;
+    return pools;
+}
+
+ThreadPool::ThreadPool()
+{
+    memset(this, 0, sizeof(*this));
+}
+
+bool ThreadPool::create(int numThreads, int maxProviders, uint32_t nodeMask)
+{
+    X265_CHECK(numThreads <= MAX_POOL_THREADS, "a single thread pool cannot have more than MAX_POOL_THREADS threads\n");
+
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+    m_winCpuMask = 0x0;
+    GROUP_AFFINITY groupAffinity;
+    for (int i = 0; i < getNumaNodeCount(); i++)
+    {
+        int numaNode = ((nodeMask >> i) & 0x1U) ? i : -1;
+        if (numaNode != -1)
+            if (GetNumaNodeProcessorMaskEx((USHORT)numaNode, &groupAffinity))
+                m_winCpuMask |= groupAffinity.Mask;
+    }
+    m_numaMask = &m_winCpuMask;
+#elif HAVE_LIBNUMA
+    if (numa_available() >= 0)
+    {
+        struct bitmask* nodemask = numa_allocate_nodemask();
+        if (nodemask)
+        {
+            *(nodemask->maskp) = nodeMask;
+            m_numaMask = nodemask;
+        }
+        else
+            x265_log(NULL, X265_LOG_ERROR, "unable to get NUMA node mask for %lx\n", nodeMask);
+    }
+#else
+    (void)nodeMask;
+#endif
+
+    m_numWorkers = numThreads;
+
+    m_workers = X265_MALLOC(WorkerThread, numThreads);
+    /* placement new initialization */
+    if (m_workers)
+        for (int i = 0; i < numThreads; i++)
+            new (m_workers + i)WorkerThread(*this, i);
+
+    m_jpTable = X265_MALLOC(JobProvider*, maxProviders);
+    m_numProviders = 0;
+
+    return m_workers && m_jpTable;
+}
+
+bool ThreadPool::start()
+{
+    m_isActive = true;
+    for (int i = 0; i < m_numWorkers; i++)
+    {
+        if (!m_workers[i].start())
+        {
+            m_isActive = false;
+            return false;
+        }
+    }
+    return true;
+}
+
+void ThreadPool::stopWorkers()
+{
+    if (m_workers)
+    {
+        m_isActive = false;
+        for (int i = 0; i < m_numWorkers; i++)
+        {
+            while (!(m_sleepBitmap & ((sleepbitmap_t)1 << i)))
+                GIVE_UP_TIME();
+            m_workers[i].awaken();
+            m_workers[i].stop();
+        }
+    }
+}
+
+ThreadPool::~ThreadPool()
+{
+    if (m_workers)
+    {
+        for (int i = 0; i < m_numWorkers; i++)
+            m_workers[i].~WorkerThread();
+    }
+
+    X265_FREE(m_workers);
+    X265_FREE(m_jpTable);
+
+#if HAVE_LIBNUMA
+    if(m_numaMask)
+        numa_free_nodemask((struct bitmask*)m_numaMask);
+#endif
+}
+
+void ThreadPool::setCurrentThreadAffinity()
+{
+    setThreadNodeAffinity(m_numaMask);
+}
+
+/* static */
+void ThreadPool::setThreadNodeAffinity(void *numaMask)
+{
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+    if (SetThreadAffinityMask(GetCurrentThread(), (DWORD_PTR)(*((DWORD*)numaMask))))
+        return;
+    else
+        x265_log(NULL, X265_LOG_ERROR, "unable to set thread affinity for NUMA node mask\n");
+#elif HAVE_LIBNUMA
+    if (numa_available() >= 0)
+    {
+        numa_run_on_node_mask((struct bitmask*)numaMask);
+        numa_set_interleave_mask((struct bitmask*)numaMask);
+        numa_set_localalloc();
+        return;
+    }
+    x265_log(NULL, X265_LOG_ERROR, "unable to set thread affinity for NUMA node mask\n");
+#else
+    (void)numaMask;
+#endif
+    return;
+}
+
+/* static */
+int ThreadPool::getNumaNodeCount()
+{
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+    ULONG num = 1;
+    if (GetNumaHighestNodeNumber(&num))
+        num++;
+    return (int)num;
+#elif HAVE_LIBNUMA
+    if (numa_available() >= 0)
+        return numa_max_node() + 1;
+    else
+        return 1;
+#else
+    return 1;
+#endif
+}
+
+/* static */
+int ThreadPool::getCpuCount()
+{
+#if _WIN32
+    SYSTEM_INFO sysinfo;
+    GetSystemInfo(&sysinfo);
+    return sysinfo.dwNumberOfProcessors;
+#elif __unix__
+    return sysconf(_SC_NPROCESSORS_ONLN);
+#elif MACOS
+    int nm[2];
+    size_t len = 4;
+    uint32_t count;
+
+    nm[0] = CTL_HW;
+    nm[1] = HW_AVAILCPU;
+    sysctl(nm, 2, &count, &len, NULL, 0);
+
+    if (count < 1)
+    {
+        nm[1] = HW_NCPU;
+        sysctl(nm, 2, &count, &len, NULL, 0);
+        if (count < 1)
+            count = 1;
+    }
+
+    return count;
+#else
+    return 2; // default to 2 threads, everywhere else
+#endif
+}
+
+} // end namespace X265_NS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/threadpool.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,175 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#ifndef X265_THREADPOOL_H
+#define X265_THREADPOOL_H
+
+#include "common.h"
+#include "threading.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+class ThreadPool;
+class WorkerThread;
+class BondedTaskGroup;
+
+#if X86_64
+typedef uint64_t sleepbitmap_t;
+#else
+typedef uint32_t sleepbitmap_t;
+#endif
+
+static const sleepbitmap_t ALL_POOL_THREADS = (sleepbitmap_t)-1;
+enum { MAX_POOL_THREADS = sizeof(sleepbitmap_t) * 8 };
+enum { INVALID_SLICE_PRIORITY = 10 }; // a value larger than any X265_TYPE_* macro
+
+// Frame level job providers. FrameEncoder and Lookahead derive from
+// this class and implement findJob()
+class JobProvider
+{
+public:
+
+    ThreadPool*   m_pool;
+    sleepbitmap_t m_ownerBitmap;
+    int           m_jpId;
+    int           m_sliceType;
+    bool          m_helpWanted;
+    bool          m_isFrameEncoder; /* rather ugly hack, but nothing better presents itself */
+
+    JobProvider()
+        : m_pool(NULL)
+        , m_ownerBitmap(0)
+        , m_jpId(-1)
+        , m_sliceType(INVALID_SLICE_PRIORITY)
+        , m_helpWanted(false)
+        , m_isFrameEncoder(false)
+    {}
+
+    virtual ~JobProvider() {}
+
+    // Worker threads will call this method to perform work
+    virtual void findJob(int workerThreadId) = 0;
+
+    // Will awaken one idle thread, preferring a thread which most recently
+    // performed work for this provider.
+    void tryWakeOne();
+};
+
+class ThreadPool
+{
+public:
+
+    sleepbitmap_t m_sleepBitmap;
+    int           m_numProviders;
+    int           m_numWorkers;
+    void*         m_numaMask; // node mask in linux, cpu mask in windows
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= _WIN32_WINNT_WIN7 
+    DWORD         m_winCpuMask;
+#endif
+    bool          m_isActive;
+
+    JobProvider** m_jpTable;
+    WorkerThread* m_workers;
+
+    ThreadPool();
+    ~ThreadPool();
+
+    bool create(int numThreads, int maxProviders, uint32_t nodeMask);
+    bool start();
+    void stopWorkers();
+    void setCurrentThreadAffinity();
+    int  tryAcquireSleepingThread(sleepbitmap_t firstTryBitmap, sleepbitmap_t secondTryBitmap);
+    int  tryBondPeers(int maxPeers, sleepbitmap_t peerBitmap, BondedTaskGroup& master);
+
+    static ThreadPool* allocThreadPools(x265_param* p, int& numPools);
+
+    static int  getCpuCount();
+    static int  getNumaNodeCount();
+    static void setThreadNodeAffinity(void *numaMask);
+};
+
+/* Any worker thread may enlist the help of idle worker threads from the same
+ * job provider. They must derive from this class and implement the
+ * processTasks() method.  To use, an instance must be instantiated by a worker
+ * thread (referred to as the master thread) and then tryBondPeers() must be
+ * called. If it returns non-zero then some number of slave worker threads are
+ * already in the process of calling your processTasks() function. The master
+ * thread should participate and call processTasks() itself. When
+ * waitForExit() returns, all bonded peer threads are quarunteed to have
+ * exitied processTasks(). Since the thread count is small, it uses explicit
+ * locking instead of atomic counters and bitmasks */
+class BondedTaskGroup
+{
+public:
+
+    Lock              m_lock;
+    ThreadSafeInteger m_exitedPeerCount;
+    int               m_bondedPeerCount;
+    int               m_jobTotal;
+    int               m_jobAcquired;
+
+    BondedTaskGroup()  { m_bondedPeerCount = m_jobTotal = m_jobAcquired = 0; }
+
+    /* Do not allow the instance to be destroyed before all bonded peers have
+     * exited processTasks() */
+    ~BondedTaskGroup() { waitForExit(); }
+
+    /* Try to enlist the help of idle worker threads on most recently associated
+     * with the given job provider and "bond" them to work on your tasks. Up to
+     * maxPeers worker threads will call your processTasks() method. */
+    int tryBondPeers(JobProvider& jp, int maxPeers)
+    {
+        int count = jp.m_pool->tryBondPeers(maxPeers, jp.m_ownerBitmap, *this);
+        m_bondedPeerCount += count;
+        return count;
+    }
+
+    /* Try to enlist the help of any idle worker threads and "bond" them to work
+     * on your tasks. Up to maxPeers worker threads will call your
+     * processTasks() method. */
+    int tryBondPeers(ThreadPool& pool, int maxPeers)
+    {
+        int count = pool.tryBondPeers(maxPeers, ALL_POOL_THREADS, *this);
+        m_bondedPeerCount += count;
+        return count;
+    }
+
+    /* Returns when all bonded peers have exited processTasks(). It does *NOT*
+     * ensure all tasks are completed (but this is generally implied). */
+    void waitForExit()
+    {
+        int exited = m_exitedPeerCount.get();
+        while (m_bondedPeerCount != exited)
+            exited = m_exitedPeerCount.waitForChange(exited);
+    }
+
+    /* Derived classes must define this method. The worker thread ID may be
+     * used to index into thread local data, or ignored.  The ID will be between
+     * 0 and jp.m_numWorkers - 1 */
+    virtual void processTasks(int workerThreadId) = 0;
+};
+
+} // end namespace X265_NS
+
+#endif // ifndef X265_THREADPOOL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/vec/dct-sse3.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1426 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include <xmmintrin.h> // SSE
+#include <pmmintrin.h> // SSE3
+
+using namespace X265_NS;
+
+#define SHIFT1  7
+#define ADD1    64
+
+#define SHIFT2  (12 - (X265_DEPTH - 8))
+#define ADD2    (1 << ((SHIFT2) - 1))
+
+ALIGN_VAR_32(static const int16_t, tab_idct_8x8[12][8]) =
+{
+    {  89,  75,  89,  75, 89,  75, 89,  75 },
+    {  50,  18,  50,  18, 50,  18, 50,  18 },
+    {  75, -18,  75, -18, 75, -18, 75, -18 },
+    { -89, -50, -89, -50, -89, -50, -89, -50 },
+    {  50, -89,  50, -89, 50, -89, 50, -89 },
+    {  18,  75,  18,  75, 18,  75, 18,  75 },
+    {  18, -50,  18, -50, 18, -50, 18, -50 },
+    {  75, -89,  75, -89, 75, -89, 75, -89 },
+    {  64,  64,  64,  64, 64,  64, 64,  64 },
+    {  64, -64,  64, -64, 64, -64, 64, -64 },
+    {  83,  36,  83,  36, 83,  36, 83,  36 },
+    {  36, -83,  36, -83, 36, -83, 36, -83 }
+};
+
+static void idct8(const int16_t* src, int16_t* dst, intptr_t stride)
+{
+    __m128i m128iS0, m128iS1, m128iS2, m128iS3, m128iS4, m128iS5, m128iS6, m128iS7, m128iAdd, m128Tmp0, m128Tmp1, m128Tmp2, m128Tmp3, E0h, E1h, E2h, E3h, E0l, E1l, E2l, E3l, O0h, O1h, O2h, O3h, O0l, O1l, O2l, O3l, EE0l, EE1l, E00l, E01l, EE0h, EE1h, E00h, E01h;
+    __m128i T00, T01, T02, T03, T04, T05, T06, T07;
+
+    m128iAdd = _mm_set1_epi32(ADD1);
+
+    m128iS1 = _mm_load_si128((__m128i*)&src[8 + 0]);
+    m128iS3 = _mm_load_si128((__m128i*)&src[24 + 0]);
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS1, m128iS3);
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[0])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS1, m128iS3);
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[0])));
+
+    m128iS5 = _mm_load_si128((__m128i*)&src[40 + 0]);
+    m128iS7 = _mm_load_si128((__m128i*)&src[56 + 0]);
+    m128Tmp2 = _mm_unpacklo_epi16(m128iS5, m128iS7);
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[1])));
+    m128Tmp3 = _mm_unpackhi_epi16(m128iS5, m128iS7);
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[1])));
+    O0l = _mm_add_epi32(E1l, E2l);
+    O0h = _mm_add_epi32(E1h, E2h);
+
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[2])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[2])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[3])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[3])));
+
+    O1l = _mm_add_epi32(E1l, E2l);
+    O1h = _mm_add_epi32(E1h, E2h);
+
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[4])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[4])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[5])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[5])));
+    O2l = _mm_add_epi32(E1l, E2l);
+    O2h = _mm_add_epi32(E1h, E2h);
+
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[6])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[6])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[7])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[7])));
+    O3h = _mm_add_epi32(E1h, E2h);
+    O3l = _mm_add_epi32(E1l, E2l);
+
+    /*    -------     */
+
+    m128iS0 = _mm_load_si128((__m128i*)&src[0 + 0]);
+    m128iS4 = _mm_load_si128((__m128i*)&src[32 + 0]);
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS0, m128iS4);
+    EE0l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[8])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS0, m128iS4);
+    EE0h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[8])));
+
+    EE1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[9])));
+    EE1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[9])));
+
+    /*    -------     */
+
+    m128iS2 = _mm_load_si128((__m128i*)&src[16 + 0]);
+    m128iS6 = _mm_load_si128((__m128i*)&src[48 + 0]);
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS2, m128iS6);
+    E00l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[10])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS2, m128iS6);
+    E00h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[10])));
+    E01l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[11])));
+    E01h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[11])));
+    E0l = _mm_add_epi32(EE0l, E00l);
+    E0l = _mm_add_epi32(E0l, m128iAdd);
+    E0h = _mm_add_epi32(EE0h, E00h);
+    E0h = _mm_add_epi32(E0h, m128iAdd);
+    E3l = _mm_sub_epi32(EE0l, E00l);
+    E3l = _mm_add_epi32(E3l, m128iAdd);
+    E3h = _mm_sub_epi32(EE0h, E00h);
+    E3h = _mm_add_epi32(E3h, m128iAdd);
+
+    E1l = _mm_add_epi32(EE1l, E01l);
+    E1l = _mm_add_epi32(E1l, m128iAdd);
+    E1h = _mm_add_epi32(EE1h, E01h);
+    E1h = _mm_add_epi32(E1h, m128iAdd);
+    E2l = _mm_sub_epi32(EE1l, E01l);
+    E2l = _mm_add_epi32(E2l, m128iAdd);
+    E2h = _mm_sub_epi32(EE1h, E01h);
+    E2h = _mm_add_epi32(E2h, m128iAdd);
+    m128iS0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E0l, O0l), SHIFT1), _mm_srai_epi32(_mm_add_epi32(E0h, O0h), SHIFT1));
+    m128iS1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E1l, O1l), SHIFT1), _mm_srai_epi32(_mm_add_epi32(E1h, O1h), SHIFT1));
+    m128iS2 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E2l, O2l), SHIFT1), _mm_srai_epi32(_mm_add_epi32(E2h, O2h), SHIFT1));
+    m128iS3 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E3l, O3l), SHIFT1), _mm_srai_epi32(_mm_add_epi32(E3h, O3h), SHIFT1));
+    m128iS4 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E3l, O3l), SHIFT1), _mm_srai_epi32(_mm_sub_epi32(E3h, O3h), SHIFT1));
+    m128iS5 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E2l, O2l), SHIFT1), _mm_srai_epi32(_mm_sub_epi32(E2h, O2h), SHIFT1));
+    m128iS6 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E1l, O1l), SHIFT1), _mm_srai_epi32(_mm_sub_epi32(E1h, O1h), SHIFT1));
+    m128iS7 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E0l, O0l), SHIFT1), _mm_srai_epi32(_mm_sub_epi32(E0h, O0h), SHIFT1));
+    /*  Invers matrix   */
+
+    E0l = _mm_unpacklo_epi16(m128iS0, m128iS4);
+    E1l = _mm_unpacklo_epi16(m128iS1, m128iS5);
+    E2l = _mm_unpacklo_epi16(m128iS2, m128iS6);
+    E3l = _mm_unpacklo_epi16(m128iS3, m128iS7);
+    O0l = _mm_unpackhi_epi16(m128iS0, m128iS4);
+    O1l = _mm_unpackhi_epi16(m128iS1, m128iS5);
+    O2l = _mm_unpackhi_epi16(m128iS2, m128iS6);
+    O3l = _mm_unpackhi_epi16(m128iS3, m128iS7);
+    m128Tmp0 = _mm_unpacklo_epi16(E0l, E2l);
+    m128Tmp1 = _mm_unpacklo_epi16(E1l, E3l);
+    m128iS0  = _mm_unpacklo_epi16(m128Tmp0, m128Tmp1);
+    m128iS1  = _mm_unpackhi_epi16(m128Tmp0, m128Tmp1);
+    m128Tmp2 = _mm_unpackhi_epi16(E0l, E2l);
+    m128Tmp3 = _mm_unpackhi_epi16(E1l, E3l);
+    m128iS2  = _mm_unpacklo_epi16(m128Tmp2, m128Tmp3);
+    m128iS3  = _mm_unpackhi_epi16(m128Tmp2, m128Tmp3);
+    m128Tmp0 = _mm_unpacklo_epi16(O0l, O2l);
+    m128Tmp1 = _mm_unpacklo_epi16(O1l, O3l);
+    m128iS4  = _mm_unpacklo_epi16(m128Tmp0, m128Tmp1);
+    m128iS5  = _mm_unpackhi_epi16(m128Tmp0, m128Tmp1);
+    m128Tmp2 = _mm_unpackhi_epi16(O0l, O2l);
+    m128Tmp3 = _mm_unpackhi_epi16(O1l, O3l);
+    m128iS6  = _mm_unpacklo_epi16(m128Tmp2, m128Tmp3);
+    m128iS7  = _mm_unpackhi_epi16(m128Tmp2, m128Tmp3);
+
+    m128iAdd = _mm_set1_epi32(ADD2);
+
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS1, m128iS3);
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[0])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS1, m128iS3);
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[0])));
+    m128Tmp2 = _mm_unpacklo_epi16(m128iS5, m128iS7);
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[1])));
+    m128Tmp3 = _mm_unpackhi_epi16(m128iS5, m128iS7);
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[1])));
+    O0l = _mm_add_epi32(E1l, E2l);
+    O0h = _mm_add_epi32(E1h, E2h);
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[2])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[2])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[3])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[3])));
+    O1l = _mm_add_epi32(E1l, E2l);
+    O1h = _mm_add_epi32(E1h, E2h);
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[4])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[4])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[5])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[5])));
+    O2l = _mm_add_epi32(E1l, E2l);
+    O2h = _mm_add_epi32(E1h, E2h);
+    E1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[6])));
+    E1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[6])));
+    E2l = _mm_madd_epi16(m128Tmp2, _mm_load_si128((__m128i*)(tab_idct_8x8[7])));
+    E2h = _mm_madd_epi16(m128Tmp3, _mm_load_si128((__m128i*)(tab_idct_8x8[7])));
+    O3h = _mm_add_epi32(E1h, E2h);
+    O3l = _mm_add_epi32(E1l, E2l);
+
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS0, m128iS4);
+    EE0l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[8])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS0, m128iS4);
+    EE0h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[8])));
+    EE1l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[9])));
+    EE1h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[9])));
+
+    m128Tmp0 = _mm_unpacklo_epi16(m128iS2, m128iS6);
+    E00l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[10])));
+    m128Tmp1 = _mm_unpackhi_epi16(m128iS2, m128iS6);
+    E00h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[10])));
+    E01l = _mm_madd_epi16(m128Tmp0, _mm_load_si128((__m128i*)(tab_idct_8x8[11])));
+    E01h = _mm_madd_epi16(m128Tmp1, _mm_load_si128((__m128i*)(tab_idct_8x8[11])));
+    E0l = _mm_add_epi32(EE0l, E00l);
+    E0l = _mm_add_epi32(E0l, m128iAdd);
+    E0h = _mm_add_epi32(EE0h, E00h);
+    E0h = _mm_add_epi32(E0h, m128iAdd);
+    E3l = _mm_sub_epi32(EE0l, E00l);
+    E3l = _mm_add_epi32(E3l, m128iAdd);
+    E3h = _mm_sub_epi32(EE0h, E00h);
+    E3h = _mm_add_epi32(E3h, m128iAdd);
+    E1l = _mm_add_epi32(EE1l, E01l);
+    E1l = _mm_add_epi32(E1l, m128iAdd);
+    E1h = _mm_add_epi32(EE1h, E01h);
+    E1h = _mm_add_epi32(E1h, m128iAdd);
+    E2l = _mm_sub_epi32(EE1l, E01l);
+    E2l = _mm_add_epi32(E2l, m128iAdd);
+    E2h = _mm_sub_epi32(EE1h, E01h);
+    E2h = _mm_add_epi32(E2h, m128iAdd);
+
+    m128iS0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E0l, O0l), SHIFT2), _mm_srai_epi32(_mm_add_epi32(E0h, O0h), SHIFT2));
+    m128iS1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E1l, O1l), SHIFT2), _mm_srai_epi32(_mm_add_epi32(E1h, O1h), SHIFT2));
+    m128iS2 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E2l, O2l), SHIFT2), _mm_srai_epi32(_mm_add_epi32(E2h, O2h), SHIFT2));
+    m128iS3 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(E3l, O3l), SHIFT2), _mm_srai_epi32(_mm_add_epi32(E3h, O3h), SHIFT2));
+    m128iS4 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E3l, O3l), SHIFT2), _mm_srai_epi32(_mm_sub_epi32(E3h, O3h), SHIFT2));
+    m128iS5 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E2l, O2l), SHIFT2), _mm_srai_epi32(_mm_sub_epi32(E2h, O2h), SHIFT2));
+    m128iS6 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E1l, O1l), SHIFT2), _mm_srai_epi32(_mm_sub_epi32(E1h, O1h), SHIFT2));
+    m128iS7 = _mm_packs_epi32(_mm_srai_epi32(_mm_sub_epi32(E0l, O0l), SHIFT2), _mm_srai_epi32(_mm_sub_epi32(E0h, O0h), SHIFT2));
+
+    // [07 06 05 04 03 02 01 00]
+    // [17 16 15 14 13 12 11 10]
+    // [27 26 25 24 23 22 21 20]
+    // [37 36 35 34 33 32 31 30]
+    // [47 46 45 44 43 42 41 40]
+    // [57 56 55 54 53 52 51 50]
+    // [67 66 65 64 63 62 61 60]
+    // [77 76 75 74 73 72 71 70]
+
+    T00 = _mm_unpacklo_epi16(m128iS0, m128iS1);     // [13 03 12 02 11 01 10 00]
+    T01 = _mm_unpackhi_epi16(m128iS0, m128iS1);     // [17 07 16 06 15 05 14 04]
+    T02 = _mm_unpacklo_epi16(m128iS2, m128iS3);     // [33 23 32 22 31 21 30 20]
+    T03 = _mm_unpackhi_epi16(m128iS2, m128iS3);     // [37 27 36 26 35 25 34 24]
+    T04 = _mm_unpacklo_epi16(m128iS4, m128iS5);     // [53 43 52 42 51 41 50 40]
+    T05 = _mm_unpackhi_epi16(m128iS4, m128iS5);     // [57 47 56 46 55 45 54 44]
+    T06 = _mm_unpacklo_epi16(m128iS6, m128iS7);     // [73 63 72 62 71 61 70 60]
+    T07 = _mm_unpackhi_epi16(m128iS6, m128iS7);     // [77 67 76 66 75 65 74 64]
+
+    __m128i T10, T11;
+    T10 = _mm_unpacklo_epi32(T00, T02);                                     // [31 21 11 01 30 20 10 00]
+    T11 = _mm_unpackhi_epi32(T00, T02);                                     // [33 23 13 03 32 22 12 02]
+    _mm_storel_epi64((__m128i*)&dst[0 * stride +  0], T10);                   // [30 20 10 00]
+    _mm_storeh_pi((__m64*)&dst[1 * stride +  0], _mm_castsi128_ps(T10));  // [31 21 11 01]
+    _mm_storel_epi64((__m128i*)&dst[2 * stride +  0], T11);                   // [32 22 12 02]
+    _mm_storeh_pi((__m64*)&dst[3 * stride +  0], _mm_castsi128_ps(T11));  // [33 23 13 03]
+
+    T10 = _mm_unpacklo_epi32(T04, T06);                                     // [71 61 51 41 70 60 50 40]
+    T11 = _mm_unpackhi_epi32(T04, T06);                                     // [73 63 53 43 72 62 52 42]
+    _mm_storel_epi64((__m128i*)&dst[0 * stride +  4], T10);
+    _mm_storeh_pi((__m64*)&dst[1 * stride +  4], _mm_castsi128_ps(T10));
+    _mm_storel_epi64((__m128i*)&dst[2 * stride +  4], T11);
+    _mm_storeh_pi((__m64*)&dst[3 * stride +  4], _mm_castsi128_ps(T11));
+
+    T10 = _mm_unpacklo_epi32(T01, T03);                                     // [35 25 15 05 34 24 14 04]
+    T11 = _mm_unpackhi_epi32(T01, T03);                                     // [37 27 17 07 36 26 16 06]
+    _mm_storel_epi64((__m128i*)&dst[4 * stride +  0], T10);
+    _mm_storeh_pi((__m64*)&dst[5 * stride +  0], _mm_castsi128_ps(T10));
+    _mm_storel_epi64((__m128i*)&dst[6 * stride +  0], T11);
+    _mm_storeh_pi((__m64*)&dst[7 * stride +  0], _mm_castsi128_ps(T11));
+
+    T10 = _mm_unpacklo_epi32(T05, T07);                                     // [75 65 55 45 74 64 54 44]
+    T11 = _mm_unpackhi_epi32(T05, T07);                                     // [77 67 57 47 76 56 46 36]
+    _mm_storel_epi64((__m128i*)&dst[4 * stride +  4], T10);
+    _mm_storeh_pi((__m64*)&dst[5 * stride +  4], _mm_castsi128_ps(T10));
+    _mm_storel_epi64((__m128i*)&dst[6 * stride +  4], T11);
+    _mm_storeh_pi((__m64*)&dst[7 * stride +  4], _mm_castsi128_ps(T11));
+}
+
+static void idct16(const int16_t *src, int16_t *dst, intptr_t stride)
+{
+#define READ_UNPACKHILO(offset)\
+    const __m128i T_00_00A = _mm_unpacklo_epi16(*(__m128i*)&src[1 * 16 + offset], *(__m128i*)&src[3 * 16 + offset]);\
+    const __m128i T_00_00B = _mm_unpackhi_epi16(*(__m128i*)&src[1 * 16 + offset], *(__m128i*)&src[3 * 16 + offset]);\
+    const __m128i T_00_01A = _mm_unpacklo_epi16(*(__m128i*)&src[5 * 16 + offset], *(__m128i*)&src[7 * 16 + offset]);\
+    const __m128i T_00_01B = _mm_unpackhi_epi16(*(__m128i*)&src[5 * 16 + offset], *(__m128i*)&src[7 * 16 + offset]);\
+    const __m128i T_00_02A = _mm_unpacklo_epi16(*(__m128i*)&src[9 * 16 + offset], *(__m128i*)&src[11 * 16 + offset]);\
+    const __m128i T_00_02B = _mm_unpackhi_epi16(*(__m128i*)&src[9 * 16 + offset], *(__m128i*)&src[11 * 16 + offset]);\
+    const __m128i T_00_03A = _mm_unpacklo_epi16(*(__m128i*)&src[13 * 16 + offset], *(__m128i*)&src[15 * 16 + offset]);\
+    const __m128i T_00_03B = _mm_unpackhi_epi16(*(__m128i*)&src[13 * 16 + offset], *(__m128i*)&src[15 * 16 + offset]);\
+    const __m128i T_00_04A = _mm_unpacklo_epi16(*(__m128i*)&src[2 * 16 + offset], *(__m128i*)&src[6 * 16 + offset]);\
+    const __m128i T_00_04B = _mm_unpackhi_epi16(*(__m128i*)&src[2 * 16 + offset], *(__m128i*)&src[6 * 16 + offset]);\
+    const __m128i T_00_05A = _mm_unpacklo_epi16(*(__m128i*)&src[10 * 16 + offset], *(__m128i*)&src[14 * 16 + offset]);\
+    const __m128i T_00_05B = _mm_unpackhi_epi16(*(__m128i*)&src[10 * 16 + offset], *(__m128i*)&src[14 * 16 + offset]);\
+    const __m128i T_00_06A = _mm_unpacklo_epi16(*(__m128i*)&src[4 * 16 + offset], *(__m128i*)&src[12 * 16 + offset]);\
+    const __m128i T_00_06B = _mm_unpackhi_epi16(*(__m128i*)&src[4 * 16 + offset], *(__m128i*)&src[12 * 16 + offset]);\
+    const __m128i T_00_07A = _mm_unpacklo_epi16(*(__m128i*)&src[0 * 16 + offset], *(__m128i*)&src[8 * 16 + offset]);\
+    const __m128i T_00_07B = _mm_unpackhi_epi16(*(__m128i*)&src[0 * 16 + offset], *(__m128i*)&src[8 * 16 + offset]);
+
+#define UNPACKHILO(part) \
+    const __m128i T_00_00A = _mm_unpacklo_epi16(in01[part], in03[part]);\
+    const __m128i T_00_00B = _mm_unpackhi_epi16(in01[part], in03[part]);\
+    const __m128i T_00_01A = _mm_unpacklo_epi16(in05[part], in07[part]);\
+    const __m128i T_00_01B = _mm_unpackhi_epi16(in05[part], in07[part]);\
+    const __m128i T_00_02A = _mm_unpacklo_epi16(in09[part], in11[part]);\
+    const __m128i T_00_02B = _mm_unpackhi_epi16(in09[part], in11[part]);\
+    const __m128i T_00_03A = _mm_unpacklo_epi16(in13[part], in15[part]);\
+    const __m128i T_00_03B = _mm_unpackhi_epi16(in13[part], in15[part]);\
+    const __m128i T_00_04A = _mm_unpacklo_epi16(in02[part], in06[part]);\
+    const __m128i T_00_04B = _mm_unpackhi_epi16(in02[part], in06[part]);\
+    const __m128i T_00_05A = _mm_unpacklo_epi16(in10[part], in14[part]);\
+    const __m128i T_00_05B = _mm_unpackhi_epi16(in10[part], in14[part]);\
+    const __m128i T_00_06A = _mm_unpacklo_epi16(in04[part], in12[part]);\
+    const __m128i T_00_06B = _mm_unpackhi_epi16(in04[part], in12[part]);\
+    const __m128i T_00_07A = _mm_unpacklo_epi16(in00[part], in08[part]);\
+    const __m128i T_00_07B = _mm_unpackhi_epi16(in00[part], in08[part]);
+
+#define COMPUTE_ROW(row0103, row0507, row0911, row1315, c0103, c0507, c0911, c1315, row) \
+    T00 = _mm_add_epi32(_mm_madd_epi16(row0103, c0103), _mm_madd_epi16(row0507, c0507)); \
+    T01 = _mm_add_epi32(_mm_madd_epi16(row0911, c0911), _mm_madd_epi16(row1315, c1315)); \
+    row = _mm_add_epi32(T00, T01);
+
+#define TRANSPOSE_8x8_16BIT(I0, I1, I2, I3, I4, I5, I6, I7, O0, O1, O2, O3, O4, O5, O6, O7) \
+    tr0_0 = _mm_unpacklo_epi16(I0, I1); \
+    tr0_1 = _mm_unpacklo_epi16(I2, I3); \
+    tr0_2 = _mm_unpackhi_epi16(I0, I1); \
+    tr0_3 = _mm_unpackhi_epi16(I2, I3); \
+    tr0_4 = _mm_unpacklo_epi16(I4, I5); \
+    tr0_5 = _mm_unpacklo_epi16(I6, I7); \
+    tr0_6 = _mm_unpackhi_epi16(I4, I5); \
+    tr0_7 = _mm_unpackhi_epi16(I6, I7); \
+    tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+    tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
+    tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+    tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
+    tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+    tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
+    tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+    tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
+    O0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+    O1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+    O2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+    O3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+    O4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
+    O5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
+    O6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
+    O7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+
+#define PROCESS(part, rnd, shift) \
+    __m128i c32_rnd = _mm_set1_epi32(rnd);\
+    int nShift = shift;\
+\
+    __m128i O0A, O1A, O2A, O3A, O4A, O5A, O6A, O7A;\
+    __m128i O0B, O1B, O2B, O3B, O4B, O5B, O6B, O7B;\
+    {\
+        __m128i T00, T01;\
+\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_p87_p90, c16_p70_p80, c16_p43_p57, c16_p09_p25, O0A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_p57_p87, c16_n43_p09, c16_n90_n80, c16_n25_n70, O1A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_p09_p80, c16_n87_n70, c16_p57_n25, c16_p43_p90, O2A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_n43_p70, c16_p09_n87, c16_p25_p90, c16_n57_n80, O3A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_n80_p57, c16_p90_n25, c16_n87_n09, c16_p70_p43, O4A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_n90_p43, c16_p25_p57, c16_p70_n87, c16_n80_p09, O5A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_n70_p25, c16_n80_p90, c16_p09_p43, c16_p87_n57, O6A)\
+        COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, c16_n25_p09, c16_n57_p43, c16_n80_p70, c16_n90_p87, O7A)\
+\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_p87_p90, c16_p70_p80, c16_p43_p57, c16_p09_p25, O0B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_p57_p87, c16_n43_p09, c16_n90_n80, c16_n25_n70, O1B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_p09_p80, c16_n87_n70, c16_p57_n25, c16_p43_p90, O2B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_n43_p70, c16_p09_n87, c16_p25_p90, c16_n57_n80, O3B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_n80_p57, c16_p90_n25, c16_n87_n09, c16_p70_p43, O4B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_n90_p43, c16_p25_p57, c16_p70_n87, c16_n80_p09, O5B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_n70_p25, c16_n80_p90, c16_p09_p43, c16_p87_n57, O6B)\
+        COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, c16_n25_p09, c16_n57_p43, c16_n80_p70, c16_n90_p87, O7B)\
+    }\
+\
+    __m128i EO0A, EO1A, EO2A, EO3A;\
+    __m128i EO0B, EO1B, EO2B, EO3B;\
+    EO0A = _mm_add_epi32(_mm_madd_epi16(T_00_04A, c16_p75_p89), _mm_madd_epi16(T_00_05A, c16_p18_p50));\
+    EO0B = _mm_add_epi32(_mm_madd_epi16(T_00_04B, c16_p75_p89), _mm_madd_epi16(T_00_05B, c16_p18_p50));\
+    EO1A = _mm_add_epi32(_mm_madd_epi16(T_00_04A, c16_n18_p75), _mm_madd_epi16(T_00_05A, c16_n50_n89));\
+    EO1B = _mm_add_epi32(_mm_madd_epi16(T_00_04B, c16_n18_p75), _mm_madd_epi16(T_00_05B, c16_n50_n89));\
+    EO2A = _mm_add_epi32(_mm_madd_epi16(T_00_04A, c16_n89_p50), _mm_madd_epi16(T_00_05A, c16_p75_p18));\
+    EO2B = _mm_add_epi32(_mm_madd_epi16(T_00_04B, c16_n89_p50), _mm_madd_epi16(T_00_05B, c16_p75_p18));\
+    EO3A = _mm_add_epi32(_mm_madd_epi16(T_00_04A, c16_n50_p18), _mm_madd_epi16(T_00_05A, c16_n89_p75));\
+    EO3B = _mm_add_epi32(_mm_madd_epi16(T_00_04B, c16_n50_p18), _mm_madd_epi16(T_00_05B, c16_n89_p75));\
+\
+    __m128i EEO0A, EEO1A;\
+    __m128i EEO0B, EEO1B;\
+    EEO0A = _mm_madd_epi16(T_00_06A, c16_p36_p83);\
+    EEO0B = _mm_madd_epi16(T_00_06B, c16_p36_p83);\
+    EEO1A = _mm_madd_epi16(T_00_06A, c16_n83_p36);\
+    EEO1B = _mm_madd_epi16(T_00_06B, c16_n83_p36);\
+\
+    __m128i EEE0A, EEE1A;\
+    __m128i EEE0B, EEE1B;\
+    EEE0A = _mm_madd_epi16(T_00_07A, c16_p64_p64);\
+    EEE0B = _mm_madd_epi16(T_00_07B, c16_p64_p64);\
+    EEE1A = _mm_madd_epi16(T_00_07A, c16_n64_p64);\
+    EEE1B = _mm_madd_epi16(T_00_07B, c16_n64_p64);\
+\
+    const __m128i EE0A = _mm_add_epi32(EEE0A, EEO0A);\
+    const __m128i EE0B = _mm_add_epi32(EEE0B, EEO0B);\
+    const __m128i EE1A = _mm_add_epi32(EEE1A, EEO1A);\
+    const __m128i EE1B = _mm_add_epi32(EEE1B, EEO1B);\
+    const __m128i EE3A = _mm_sub_epi32(EEE0A, EEO0A);\
+    const __m128i EE3B = _mm_sub_epi32(EEE0B, EEO0B);\
+    const __m128i EE2A = _mm_sub_epi32(EEE1A, EEO1A);\
+    const __m128i EE2B = _mm_sub_epi32(EEE1B, EEO1B);\
+\
+    const __m128i E0A = _mm_add_epi32(EE0A, EO0A);\
+    const __m128i E0B = _mm_add_epi32(EE0B, EO0B);\
+    const __m128i E1A = _mm_add_epi32(EE1A, EO1A);\
+    const __m128i E1B = _mm_add_epi32(EE1B, EO1B);\
+    const __m128i E2A = _mm_add_epi32(EE2A, EO2A);\
+    const __m128i E2B = _mm_add_epi32(EE2B, EO2B);\
+    const __m128i E3A = _mm_add_epi32(EE3A, EO3A);\
+    const __m128i E3B = _mm_add_epi32(EE3B, EO3B);\
+    const __m128i E7A = _mm_sub_epi32(EE0A, EO0A);\
+    const __m128i E7B = _mm_sub_epi32(EE0B, EO0B);\
+    const __m128i E6A = _mm_sub_epi32(EE1A, EO1A);\
+    const __m128i E6B = _mm_sub_epi32(EE1B, EO1B);\
+    const __m128i E5A = _mm_sub_epi32(EE2A, EO2A);\
+    const __m128i E5B = _mm_sub_epi32(EE2B, EO2B);\
+    const __m128i E4A = _mm_sub_epi32(EE3A, EO3A);\
+    const __m128i E4B = _mm_sub_epi32(EE3B, EO3B);\
+\
+    const __m128i T10A = _mm_add_epi32(E0A, c32_rnd);\
+    const __m128i T10B = _mm_add_epi32(E0B, c32_rnd);\
+    const __m128i T11A = _mm_add_epi32(E1A, c32_rnd);\
+    const __m128i T11B = _mm_add_epi32(E1B, c32_rnd);\
+    const __m128i T12A = _mm_add_epi32(E2A, c32_rnd);\
+    const __m128i T12B = _mm_add_epi32(E2B, c32_rnd);\
+    const __m128i T13A = _mm_add_epi32(E3A, c32_rnd);\
+    const __m128i T13B = _mm_add_epi32(E3B, c32_rnd);\
+    const __m128i T14A = _mm_add_epi32(E4A, c32_rnd);\
+    const __m128i T14B = _mm_add_epi32(E4B, c32_rnd);\
+    const __m128i T15A = _mm_add_epi32(E5A, c32_rnd);\
+    const __m128i T15B = _mm_add_epi32(E5B, c32_rnd);\
+    const __m128i T16A = _mm_add_epi32(E6A, c32_rnd);\
+    const __m128i T16B = _mm_add_epi32(E6B, c32_rnd);\
+    const __m128i T17A = _mm_add_epi32(E7A, c32_rnd);\
+    const __m128i T17B = _mm_add_epi32(E7B, c32_rnd);\
+\
+    const __m128i T20A = _mm_add_epi32(T10A, O0A);\
+    const __m128i T20B = _mm_add_epi32(T10B, O0B);\
+    const __m128i T21A = _mm_add_epi32(T11A, O1A);\
+    const __m128i T21B = _mm_add_epi32(T11B, O1B);\
+    const __m128i T22A = _mm_add_epi32(T12A, O2A);\
+    const __m128i T22B = _mm_add_epi32(T12B, O2B);\
+    const __m128i T23A = _mm_add_epi32(T13A, O3A);\
+    const __m128i T23B = _mm_add_epi32(T13B, O3B);\
+    const __m128i T24A = _mm_add_epi32(T14A, O4A);\
+    const __m128i T24B = _mm_add_epi32(T14B, O4B);\
+    const __m128i T25A = _mm_add_epi32(T15A, O5A);\
+    const __m128i T25B = _mm_add_epi32(T15B, O5B);\
+    const __m128i T26A = _mm_add_epi32(T16A, O6A);\
+    const __m128i T26B = _mm_add_epi32(T16B, O6B);\
+    const __m128i T27A = _mm_add_epi32(T17A, O7A);\
+    const __m128i T27B = _mm_add_epi32(T17B, O7B);\
+    const __m128i T2FA = _mm_sub_epi32(T10A, O0A);\
+    const __m128i T2FB = _mm_sub_epi32(T10B, O0B);\
+    const __m128i T2EA = _mm_sub_epi32(T11A, O1A);\
+    const __m128i T2EB = _mm_sub_epi32(T11B, O1B);\
+    const __m128i T2DA = _mm_sub_epi32(T12A, O2A);\
+    const __m128i T2DB = _mm_sub_epi32(T12B, O2B);\
+    const __m128i T2CA = _mm_sub_epi32(T13A, O3A);\
+    const __m128i T2CB = _mm_sub_epi32(T13B, O3B);\
+    const __m128i T2BA = _mm_sub_epi32(T14A, O4A);\
+    const __m128i T2BB = _mm_sub_epi32(T14B, O4B);\
+    const __m128i T2AA = _mm_sub_epi32(T15A, O5A);\
+    const __m128i T2AB = _mm_sub_epi32(T15B, O5B);\
+    const __m128i T29A = _mm_sub_epi32(T16A, O6A);\
+    const __m128i T29B = _mm_sub_epi32(T16B, O6B);\
+    const __m128i T28A = _mm_sub_epi32(T17A, O7A);\
+    const __m128i T28B = _mm_sub_epi32(T17B, O7B);\
+\
+    const __m128i T30A = _mm_srai_epi32(T20A, nShift);\
+    const __m128i T30B = _mm_srai_epi32(T20B, nShift);\
+    const __m128i T31A = _mm_srai_epi32(T21A, nShift);\
+    const __m128i T31B = _mm_srai_epi32(T21B, nShift);\
+    const __m128i T32A = _mm_srai_epi32(T22A, nShift);\
+    const __m128i T32B = _mm_srai_epi32(T22B, nShift);\
+    const __m128i T33A = _mm_srai_epi32(T23A, nShift);\
+    const __m128i T33B = _mm_srai_epi32(T23B, nShift);\
+    const __m128i T34A = _mm_srai_epi32(T24A, nShift);\
+    const __m128i T34B = _mm_srai_epi32(T24B, nShift);\
+    const __m128i T35A = _mm_srai_epi32(T25A, nShift);\
+    const __m128i T35B = _mm_srai_epi32(T25B, nShift);\
+    const __m128i T36A = _mm_srai_epi32(T26A, nShift);\
+    const __m128i T36B = _mm_srai_epi32(T26B, nShift);\
+    const __m128i T37A = _mm_srai_epi32(T27A, nShift);\
+    const __m128i T37B = _mm_srai_epi32(T27B, nShift);\
+\
+    const __m128i T38A = _mm_srai_epi32(T28A, nShift);\
+    const __m128i T38B = _mm_srai_epi32(T28B, nShift);\
+    const __m128i T39A = _mm_srai_epi32(T29A, nShift);\
+    const __m128i T39B = _mm_srai_epi32(T29B, nShift);\
+    const __m128i T3AA = _mm_srai_epi32(T2AA, nShift);\
+    const __m128i T3AB = _mm_srai_epi32(T2AB, nShift);\
+    const __m128i T3BA = _mm_srai_epi32(T2BA, nShift);\
+    const __m128i T3BB = _mm_srai_epi32(T2BB, nShift);\
+    const __m128i T3CA = _mm_srai_epi32(T2CA, nShift);\
+    const __m128i T3CB = _mm_srai_epi32(T2CB, nShift);\
+    const __m128i T3DA = _mm_srai_epi32(T2DA, nShift);\
+    const __m128i T3DB = _mm_srai_epi32(T2DB, nShift);\
+    const __m128i T3EA = _mm_srai_epi32(T2EA, nShift);\
+    const __m128i T3EB = _mm_srai_epi32(T2EB, nShift);\
+    const __m128i T3FA = _mm_srai_epi32(T2FA, nShift);\
+    const __m128i T3FB = _mm_srai_epi32(T2FB, nShift);\
+\
+    res00[part]  = _mm_packs_epi32(T30A, T30B);\
+    res01[part]  = _mm_packs_epi32(T31A, T31B);\
+    res02[part]  = _mm_packs_epi32(T32A, T32B);\
+    res03[part]  = _mm_packs_epi32(T33A, T33B);\
+    res04[part]  = _mm_packs_epi32(T34A, T34B);\
+    res05[part]  = _mm_packs_epi32(T35A, T35B);\
+    res06[part]  = _mm_packs_epi32(T36A, T36B);\
+    res07[part]  = _mm_packs_epi32(T37A, T37B);\
+\
+    res08[part]  = _mm_packs_epi32(T38A, T38B);\
+    res09[part]  = _mm_packs_epi32(T39A, T39B);\
+    res10[part]  = _mm_packs_epi32(T3AA, T3AB);\
+    res11[part]  = _mm_packs_epi32(T3BA, T3BB);\
+    res12[part]  = _mm_packs_epi32(T3CA, T3CB);\
+    res13[part]  = _mm_packs_epi32(T3DA, T3DB);\
+    res14[part]  = _mm_packs_epi32(T3EA, T3EB);\
+    res15[part]  = _mm_packs_epi32(T3FA, T3FB);
+
+    const __m128i c16_p87_p90   = _mm_set1_epi32(0x0057005A); //row0 87high - 90low address
+    const __m128i c16_p70_p80   = _mm_set1_epi32(0x00460050);
+    const __m128i c16_p43_p57   = _mm_set1_epi32(0x002B0039);
+    const __m128i c16_p09_p25   = _mm_set1_epi32(0x00090019);
+    const __m128i c16_p57_p87   = _mm_set1_epi32(0x00390057); //row1
+    const __m128i c16_n43_p09   = _mm_set1_epi32(0xFFD50009);
+    const __m128i c16_n90_n80   = _mm_set1_epi32(0xFFA6FFB0);
+    const __m128i c16_n25_n70   = _mm_set1_epi32(0xFFE7FFBA);
+    const __m128i c16_p09_p80   = _mm_set1_epi32(0x00090050); //row2
+    const __m128i c16_n87_n70   = _mm_set1_epi32(0xFFA9FFBA);
+    const __m128i c16_p57_n25   = _mm_set1_epi32(0x0039FFE7);
+    const __m128i c16_p43_p90   = _mm_set1_epi32(0x002B005A);
+    const __m128i c16_n43_p70   = _mm_set1_epi32(0xFFD50046); //row3
+    const __m128i c16_p09_n87   = _mm_set1_epi32(0x0009FFA9);
+    const __m128i c16_p25_p90   = _mm_set1_epi32(0x0019005A);
+    const __m128i c16_n57_n80   = _mm_set1_epi32(0xFFC7FFB0);
+    const __m128i c16_n80_p57   = _mm_set1_epi32(0xFFB00039); //row4
+    const __m128i c16_p90_n25   = _mm_set1_epi32(0x005AFFE7);
+    const __m128i c16_n87_n09   = _mm_set1_epi32(0xFFA9FFF7);
+    const __m128i c16_p70_p43   = _mm_set1_epi32(0x0046002B);
+    const __m128i c16_n90_p43   = _mm_set1_epi32(0xFFA6002B); //row5
+    const __m128i c16_p25_p57   = _mm_set1_epi32(0x00190039);
+    const __m128i c16_p70_n87   = _mm_set1_epi32(0x0046FFA9);
+    const __m128i c16_n80_p09   = _mm_set1_epi32(0xFFB00009);
+    const __m128i c16_n70_p25   = _mm_set1_epi32(0xFFBA0019); //row6
+    const __m128i c16_n80_p90   = _mm_set1_epi32(0xFFB0005A);
+    const __m128i c16_p09_p43   = _mm_set1_epi32(0x0009002B);
+    const __m128i c16_p87_n57   = _mm_set1_epi32(0x0057FFC7);
+    const __m128i c16_n25_p09   = _mm_set1_epi32(0xFFE70009); //row7
+    const __m128i c16_n57_p43   = _mm_set1_epi32(0xFFC7002B);
+    const __m128i c16_n80_p70   = _mm_set1_epi32(0xFFB00046);
+    const __m128i c16_n90_p87   = _mm_set1_epi32(0xFFA60057);
+
+    const __m128i c16_p75_p89   = _mm_set1_epi32(0x004B0059);
+    const __m128i c16_p18_p50   = _mm_set1_epi32(0x00120032);
+    const __m128i c16_n18_p75   = _mm_set1_epi32(0xFFEE004B);
+    const __m128i c16_n50_n89   = _mm_set1_epi32(0xFFCEFFA7);
+    const __m128i c16_n89_p50   = _mm_set1_epi32(0xFFA70032);
+    const __m128i c16_p75_p18   = _mm_set1_epi32(0x004B0012);
+    const __m128i c16_n50_p18   = _mm_set1_epi32(0xFFCE0012);
+    const __m128i c16_n89_p75   = _mm_set1_epi32(0xFFA7004B);
+
+    const __m128i c16_p36_p83   = _mm_set1_epi32(0x00240053);
+    const __m128i c16_n83_p36   = _mm_set1_epi32(0xFFAD0024);
+
+    const __m128i c16_n64_p64   = _mm_set1_epi32(0xFFC00040);
+    const __m128i c16_p64_p64   = _mm_set1_epi32(0x00400040);
+
+    // DCT1
+    __m128i in00[2], in01[2], in02[2], in03[2], in04[2], in05[2], in06[2], in07[2];
+    __m128i in08[2], in09[2], in10[2], in11[2], in12[2], in13[2], in14[2], in15[2];
+    __m128i res00[2], res01[2], res02[2], res03[2], res04[2], res05[2], res06[2], res07[2];
+    __m128i res08[2], res09[2], res10[2], res11[2], res12[2], res13[2], res14[2], res15[2];
+
+    {
+        READ_UNPACKHILO(0)
+        PROCESS(0, ADD1, SHIFT1)
+    }
+
+    {
+        READ_UNPACKHILO(8)
+        PROCESS(1, ADD1, SHIFT1)
+    }
+    {
+        __m128i tr0_0, tr0_1, tr0_2, tr0_3, tr0_4, tr0_5, tr0_6, tr0_7;
+        __m128i tr1_0, tr1_1, tr1_2, tr1_3, tr1_4, tr1_5, tr1_6, tr1_7;
+        TRANSPOSE_8x8_16BIT(res00[0], res01[0], res02[0], res03[0], res04[0], res05[0], res06[0], res07[0], in00[0], in01[0], in02[0], in03[0], in04[0], in05[0], in06[0], in07[0])
+        TRANSPOSE_8x8_16BIT(res08[0], res09[0], res10[0], res11[0], res12[0], res13[0], res14[0], res15[0], in00[1], in01[1], in02[1], in03[1], in04[1], in05[1], in06[1], in07[1])
+        TRANSPOSE_8x8_16BIT(res00[1], res01[1], res02[1], res03[1], res04[1], res05[1], res06[1], res07[1], in08[0], in09[0], in10[0], in11[0], in12[0], in13[0], in14[0], in15[0])
+        TRANSPOSE_8x8_16BIT(res08[1], res09[1], res10[1], res11[1], res12[1], res13[1], res14[1], res15[1], in08[1], in09[1], in10[1], in11[1], in12[1], in13[1], in14[1], in15[1])
+    }
+
+    {
+        UNPACKHILO(0)
+        PROCESS(0, ADD2, SHIFT2)
+    }
+    {
+        UNPACKHILO(1)
+        PROCESS(1, ADD2, SHIFT2)
+    }
+
+    {
+        __m128i tr0_0, tr0_1, tr0_2, tr0_3, tr0_4, tr0_5, tr0_6, tr0_7;
+        __m128i tr1_0, tr1_1, tr1_2, tr1_3, tr1_4, tr1_5, tr1_6, tr1_7;
+        TRANSPOSE_8x8_16BIT(res00[0], res01[0], res02[0], res03[0], res04[0], res05[0], res06[0], res07[0], in00[0], in01[0], in02[0], in03[0], in04[0], in05[0], in06[0], in07[0])
+        _mm_store_si128((__m128i*)&dst[0 * stride + 0], in00[0]);
+        _mm_store_si128((__m128i*)&dst[1 * stride + 0], in01[0]);
+        _mm_store_si128((__m128i*)&dst[2 * stride + 0], in02[0]);
+        _mm_store_si128((__m128i*)&dst[3 * stride + 0], in03[0]);
+        _mm_store_si128((__m128i*)&dst[4 * stride + 0], in04[0]);
+        _mm_store_si128((__m128i*)&dst[5 * stride + 0], in05[0]);
+        _mm_store_si128((__m128i*)&dst[6 * stride + 0], in06[0]);
+        _mm_store_si128((__m128i*)&dst[7 * stride + 0], in07[0]);
+        TRANSPOSE_8x8_16BIT(res08[0], res09[0], res10[0], res11[0], res12[0], res13[0], res14[0], res15[0], in00[1], in01[1], in02[1], in03[1], in04[1], in05[1], in06[1], in07[1])
+        _mm_store_si128((__m128i*)&dst[0 * stride + 8], in00[1]);
+        _mm_store_si128((__m128i*)&dst[1 * stride + 8], in01[1]);
+        _mm_store_si128((__m128i*)&dst[2 * stride + 8], in02[1]);
+        _mm_store_si128((__m128i*)&dst[3 * stride + 8], in03[1]);
+        _mm_store_si128((__m128i*)&dst[4 * stride + 8], in04[1]);
+        _mm_store_si128((__m128i*)&dst[5 * stride + 8], in05[1]);
+        _mm_store_si128((__m128i*)&dst[6 * stride + 8], in06[1]);
+        _mm_store_si128((__m128i*)&dst[7 * stride + 8], in07[1]);
+        TRANSPOSE_8x8_16BIT(res00[1], res01[1], res02[1], res03[1], res04[1], res05[1], res06[1], res07[1], in08[0], in09[0], in10[0], in11[0], in12[0], in13[0], in14[0], in15[0])
+        _mm_store_si128((__m128i*)&dst[8 * stride + 0], in08[0]);
+        _mm_store_si128((__m128i*)&dst[9 * stride + 0], in09[0]);
+        _mm_store_si128((__m128i*)&dst[10 * stride + 0], in10[0]);
+        _mm_store_si128((__m128i*)&dst[11 * stride + 0], in11[0]);
+        _mm_store_si128((__m128i*)&dst[12 * stride + 0], in12[0]);
+        _mm_store_si128((__m128i*)&dst[13 * stride + 0], in13[0]);
+        _mm_store_si128((__m128i*)&dst[14 * stride + 0], in14[0]);
+        _mm_store_si128((__m128i*)&dst[15 * stride + 0], in15[0]);
+        TRANSPOSE_8x8_16BIT(res08[1], res09[1], res10[1], res11[1], res12[1], res13[1], res14[1], res15[1], in08[1], in09[1], in10[1], in11[1], in12[1], in13[1], in14[1], in15[1])
+        _mm_store_si128((__m128i*)&dst[8 * stride + 8], in08[1]);
+        _mm_store_si128((__m128i*)&dst[9 * stride + 8], in09[1]);
+        _mm_store_si128((__m128i*)&dst[10 * stride + 8], in10[1]);
+        _mm_store_si128((__m128i*)&dst[11 * stride + 8], in11[1]);
+        _mm_store_si128((__m128i*)&dst[12 * stride + 8], in12[1]);
+        _mm_store_si128((__m128i*)&dst[13 * stride + 8], in13[1]);
+        _mm_store_si128((__m128i*)&dst[14 * stride + 8], in14[1]);
+        _mm_store_si128((__m128i*)&dst[15 * stride + 8], in15[1]);
+    }
+}
+#undef PROCESS
+#undef TRANSPOSE_8x8_16BIT
+#undef COMPUTE_ROW
+#undef UNPACKHILO
+#undef READ_UNPACKHILO
+
+static void idct32(const int16_t *src, int16_t *dst, intptr_t stride)
+{
+    //Odd
+    const __m128i c16_p90_p90   = _mm_set1_epi32(0x005A005A); //column 0
+    const __m128i c16_p85_p88   = _mm_set1_epi32(0x00550058);
+    const __m128i c16_p78_p82   = _mm_set1_epi32(0x004E0052);
+    const __m128i c16_p67_p73   = _mm_set1_epi32(0x00430049);
+    const __m128i c16_p54_p61   = _mm_set1_epi32(0x0036003D);
+    const __m128i c16_p38_p46   = _mm_set1_epi32(0x0026002E);
+    const __m128i c16_p22_p31   = _mm_set1_epi32(0x0016001F);
+    const __m128i c16_p04_p13   = _mm_set1_epi32(0x0004000D);
+    const __m128i c16_p82_p90   = _mm_set1_epi32(0x0052005A); //column 1
+    const __m128i c16_p46_p67   = _mm_set1_epi32(0x002E0043);
+    const __m128i c16_n04_p22   = _mm_set1_epi32(0xFFFC0016);
+    const __m128i c16_n54_n31   = _mm_set1_epi32(0xFFCAFFE1);
+    const __m128i c16_n85_n73   = _mm_set1_epi32(0xFFABFFB7);
+    const __m128i c16_n88_n90   = _mm_set1_epi32(0xFFA8FFA6);
+    const __m128i c16_n61_n78   = _mm_set1_epi32(0xFFC3FFB2);
+    const __m128i c16_n13_n38   = _mm_set1_epi32(0xFFF3FFDA);
+    const __m128i c16_p67_p88   = _mm_set1_epi32(0x00430058); //column 2
+    const __m128i c16_n13_p31   = _mm_set1_epi32(0xFFF3001F);
+    const __m128i c16_n82_n54   = _mm_set1_epi32(0xFFAEFFCA);
+    const __m128i c16_n78_n90   = _mm_set1_epi32(0xFFB2FFA6);
+    const __m128i c16_n04_n46   = _mm_set1_epi32(0xFFFCFFD2);
+    const __m128i c16_p73_p38   = _mm_set1_epi32(0x00490026);
+    const __m128i c16_p85_p90   = _mm_set1_epi32(0x0055005A);
+    const __m128i c16_p22_p61   = _mm_set1_epi32(0x0016003D);
+    const __m128i c16_p46_p85   = _mm_set1_epi32(0x002E0055); //column 3
+    const __m128i c16_n67_n13   = _mm_set1_epi32(0xFFBDFFF3);
+    const __m128i c16_n73_n90   = _mm_set1_epi32(0xFFB7FFA6);
+    const __m128i c16_p38_n22   = _mm_set1_epi32(0x0026FFEA);
+    const __m128i c16_p88_p82   = _mm_set1_epi32(0x00580052);
+    const __m128i c16_n04_p54   = _mm_set1_epi32(0xFFFC0036);
+    const __m128i c16_n90_n61   = _mm_set1_epi32(0xFFA6FFC3);
+    const __m128i c16_n31_n78   = _mm_set1_epi32(0xFFE1FFB2);
+    const __m128i c16_p22_p82   = _mm_set1_epi32(0x00160052); //column 4
+    const __m128i c16_n90_n54   = _mm_set1_epi32(0xFFA6FFCA);
+    const __m128i c16_p13_n61   = _mm_set1_epi32(0x000DFFC3);
+    const __m128i c16_p85_p78   = _mm_set1_epi32(0x0055004E);
+    const __m128i c16_n46_p31   = _mm_set1_epi32(0xFFD2001F);
+    const __m128i c16_n67_n90   = _mm_set1_epi32(0xFFBDFFA6);
+    const __m128i c16_p73_p04   = _mm_set1_epi32(0x00490004);
+    const __m128i c16_p38_p88   = _mm_set1_epi32(0x00260058);
+    const __m128i c16_n04_p78   = _mm_set1_epi32(0xFFFC004E); //column 5
+    const __m128i c16_n73_n82   = _mm_set1_epi32(0xFFB7FFAE);
+    const __m128i c16_p85_p13   = _mm_set1_epi32(0x0055000D);
+    const __m128i c16_n22_p67   = _mm_set1_epi32(0xFFEA0043);
+    const __m128i c16_n61_n88   = _mm_set1_epi32(0xFFC3FFA8);
+    const __m128i c16_p90_p31   = _mm_set1_epi32(0x005A001F);
+    const __m128i c16_n38_p54   = _mm_set1_epi32(0xFFDA0036);
+    const __m128i c16_n46_n90   = _mm_set1_epi32(0xFFD2FFA6);
+    const __m128i c16_n31_p73   = _mm_set1_epi32(0xFFE10049); //column 6
+    const __m128i c16_n22_n90   = _mm_set1_epi32(0xFFEAFFA6);
+    const __m128i c16_p67_p78   = _mm_set1_epi32(0x0043004E);
+    const __m128i c16_n90_n38   = _mm_set1_epi32(0xFFA6FFDA);
+    const __m128i c16_p82_n13   = _mm_set1_epi32(0x0052FFF3);
+    const __m128i c16_n46_p61   = _mm_set1_epi32(0xFFD2003D);
+    const __m128i c16_n04_n88   = _mm_set1_epi32(0xFFFCFFA8);
+    const __m128i c16_p54_p85   = _mm_set1_epi32(0x00360055);
+    const __m128i c16_n54_p67   = _mm_set1_epi32(0xFFCA0043); //column 7
+    const __m128i c16_p38_n78   = _mm_set1_epi32(0x0026FFB2);
+    const __m128i c16_n22_p85   = _mm_set1_epi32(0xFFEA0055);
+    const __m128i c16_p04_n90   = _mm_set1_epi32(0x0004FFA6);
+    const __m128i c16_p13_p90   = _mm_set1_epi32(0x000D005A);
+    const __m128i c16_n31_n88   = _mm_set1_epi32(0xFFE1FFA8);
+    const __m128i c16_p46_p82   = _mm_set1_epi32(0x002E0052);
+    const __m128i c16_n61_n73   = _mm_set1_epi32(0xFFC3FFB7);
+    const __m128i c16_n73_p61   = _mm_set1_epi32(0xFFB7003D); //column 8
+    const __m128i c16_p82_n46   = _mm_set1_epi32(0x0052FFD2);
+    const __m128i c16_n88_p31   = _mm_set1_epi32(0xFFA8001F);
+    const __m128i c16_p90_n13   = _mm_set1_epi32(0x005AFFF3);
+    const __m128i c16_n90_n04   = _mm_set1_epi32(0xFFA6FFFC);
+    const __m128i c16_p85_p22   = _mm_set1_epi32(0x00550016);
+    const __m128i c16_n78_n38   = _mm_set1_epi32(0xFFB2FFDA);
+    const __m128i c16_p67_p54   = _mm_set1_epi32(0x00430036);
+    const __m128i c16_n85_p54   = _mm_set1_epi32(0xFFAB0036); //column 9
+    const __m128i c16_p88_n04   = _mm_set1_epi32(0x0058FFFC);
+    const __m128i c16_n61_n46   = _mm_set1_epi32(0xFFC3FFD2);
+    const __m128i c16_p13_p82   = _mm_set1_epi32(0x000D0052);
+    const __m128i c16_p38_n90   = _mm_set1_epi32(0x0026FFA6);
+    const __m128i c16_n78_p67   = _mm_set1_epi32(0xFFB20043);
+    const __m128i c16_p90_n22   = _mm_set1_epi32(0x005AFFEA);
+    const __m128i c16_n73_n31   = _mm_set1_epi32(0xFFB7FFE1);
+    const __m128i c16_n90_p46   = _mm_set1_epi32(0xFFA6002E); //column 10
+    const __m128i c16_p54_p38   = _mm_set1_epi32(0x00360026);
+    const __m128i c16_p31_n90   = _mm_set1_epi32(0x001FFFA6);
+    const __m128i c16_n88_p61   = _mm_set1_epi32(0xFFA8003D);
+    const __m128i c16_p67_p22   = _mm_set1_epi32(0x00430016);
+    const __m128i c16_p13_n85   = _mm_set1_epi32(0x000DFFAB);
+    const __m128i c16_n82_p73   = _mm_set1_epi32(0xFFAE0049);
+    const __m128i c16_p78_p04   = _mm_set1_epi32(0x004E0004);
+    const __m128i c16_n88_p38   = _mm_set1_epi32(0xFFA80026); //column 11
+    const __m128i c16_n04_p73   = _mm_set1_epi32(0xFFFC0049);
+    const __m128i c16_p90_n67   = _mm_set1_epi32(0x005AFFBD);
+    const __m128i c16_n31_n46   = _mm_set1_epi32(0xFFE1FFD2);
+    const __m128i c16_n78_p85   = _mm_set1_epi32(0xFFB20055);
+    const __m128i c16_p61_p13   = _mm_set1_epi32(0x003D000D);
+    const __m128i c16_p54_n90   = _mm_set1_epi32(0x0036FFA6);
+    const __m128i c16_n82_p22   = _mm_set1_epi32(0xFFAE0016);
+    const __m128i c16_n78_p31   = _mm_set1_epi32(0xFFB2001F); //column 12
+    const __m128i c16_n61_p90   = _mm_set1_epi32(0xFFC3005A);
+    const __m128i c16_p54_p04   = _mm_set1_epi32(0x00360004);
+    const __m128i c16_p82_n88   = _mm_set1_epi32(0x0052FFA8);
+    const __m128i c16_n22_n38   = _mm_set1_epi32(0xFFEAFFDA);
+    const __m128i c16_n90_p73   = _mm_set1_epi32(0xFFA60049);
+    const __m128i c16_n13_p67   = _mm_set1_epi32(0xFFF30043);
+    const __m128i c16_p85_n46   = _mm_set1_epi32(0x0055FFD2);
+    const __m128i c16_n61_p22   = _mm_set1_epi32(0xFFC30016); //column 13
+    const __m128i c16_n90_p85   = _mm_set1_epi32(0xFFA60055);
+    const __m128i c16_n38_p73   = _mm_set1_epi32(0xFFDA0049);
+    const __m128i c16_p46_n04   = _mm_set1_epi32(0x002EFFFC);
+    const __m128i c16_p90_n78   = _mm_set1_epi32(0x005AFFB2);
+    const __m128i c16_p54_n82   = _mm_set1_epi32(0x0036FFAE);
+    const __m128i c16_n31_n13   = _mm_set1_epi32(0xFFE1FFF3);
+    const __m128i c16_n88_p67   = _mm_set1_epi32(0xFFA80043);
+    const __m128i c16_n38_p13   = _mm_set1_epi32(0xFFDA000D); //column 14
+    const __m128i c16_n78_p61   = _mm_set1_epi32(0xFFB2003D);
+    const __m128i c16_n90_p88   = _mm_set1_epi32(0xFFA60058);
+    const __m128i c16_n73_p85   = _mm_set1_epi32(0xFFB70055);
+    const __m128i c16_n31_p54   = _mm_set1_epi32(0xFFE10036);
+    const __m128i c16_p22_p04   = _mm_set1_epi32(0x00160004);
+    const __m128i c16_p67_n46   = _mm_set1_epi32(0x0043FFD2);
+    const __m128i c16_p90_n82   = _mm_set1_epi32(0x005AFFAE);
+    const __m128i c16_n13_p04   = _mm_set1_epi32(0xFFF30004); //column 15
+    const __m128i c16_n31_p22   = _mm_set1_epi32(0xFFE10016);
+    const __m128i c16_n46_p38   = _mm_set1_epi32(0xFFD20026);
+    const __m128i c16_n61_p54   = _mm_set1_epi32(0xFFC30036);
+    const __m128i c16_n73_p67   = _mm_set1_epi32(0xFFB70043);
+    const __m128i c16_n82_p78   = _mm_set1_epi32(0xFFAE004E);
+    const __m128i c16_n88_p85   = _mm_set1_epi32(0xFFA80055);
+    const __m128i c16_n90_p90   = _mm_set1_epi32(0xFFA6005A);
+
+    //EO
+    const __m128i c16_p87_p90   = _mm_set1_epi32(0x0057005A); //row0 87high - 90low address
+    const __m128i c16_p70_p80   = _mm_set1_epi32(0x00460050);
+    const __m128i c16_p43_p57   = _mm_set1_epi32(0x002B0039);
+    const __m128i c16_p09_p25   = _mm_set1_epi32(0x00090019);
+    const __m128i c16_p57_p87   = _mm_set1_epi32(0x00390057); //row1
+    const __m128i c16_n43_p09   = _mm_set1_epi32(0xFFD50009);
+    const __m128i c16_n90_n80   = _mm_set1_epi32(0xFFA6FFB0);
+    const __m128i c16_n25_n70   = _mm_set1_epi32(0xFFE7FFBA);
+    const __m128i c16_p09_p80   = _mm_set1_epi32(0x00090050); //row2
+    const __m128i c16_n87_n70   = _mm_set1_epi32(0xFFA9FFBA);
+    const __m128i c16_p57_n25   = _mm_set1_epi32(0x0039FFE7);
+    const __m128i c16_p43_p90   = _mm_set1_epi32(0x002B005A);
+    const __m128i c16_n43_p70   = _mm_set1_epi32(0xFFD50046); //row3
+    const __m128i c16_p09_n87   = _mm_set1_epi32(0x0009FFA9);
+    const __m128i c16_p25_p90   = _mm_set1_epi32(0x0019005A);
+    const __m128i c16_n57_n80   = _mm_set1_epi32(0xFFC7FFB0);
+    const __m128i c16_n80_p57   = _mm_set1_epi32(0xFFB00039); //row4
+    const __m128i c16_p90_n25   = _mm_set1_epi32(0x005AFFE7);
+    const __m128i c16_n87_n09   = _mm_set1_epi32(0xFFA9FFF7);
+    const __m128i c16_p70_p43   = _mm_set1_epi32(0x0046002B);
+    const __m128i c16_n90_p43   = _mm_set1_epi32(0xFFA6002B); //row5
+    const __m128i c16_p25_p57   = _mm_set1_epi32(0x00190039);
+    const __m128i c16_p70_n87   = _mm_set1_epi32(0x0046FFA9);
+    const __m128i c16_n80_p09   = _mm_set1_epi32(0xFFB00009);
+    const __m128i c16_n70_p25   = _mm_set1_epi32(0xFFBA0019); //row6
+    const __m128i c16_n80_p90   = _mm_set1_epi32(0xFFB0005A);
+    const __m128i c16_p09_p43   = _mm_set1_epi32(0x0009002B);
+    const __m128i c16_p87_n57   = _mm_set1_epi32(0x0057FFC7);
+    const __m128i c16_n25_p09   = _mm_set1_epi32(0xFFE70009); //row7
+    const __m128i c16_n57_p43   = _mm_set1_epi32(0xFFC7002B);
+    const __m128i c16_n80_p70   = _mm_set1_epi32(0xFFB00046);
+    const __m128i c16_n90_p87   = _mm_set1_epi32(0xFFA60057);
+    //EEO
+    const __m128i c16_p75_p89   = _mm_set1_epi32(0x004B0059);
+    const __m128i c16_p18_p50   = _mm_set1_epi32(0x00120032);
+    const __m128i c16_n18_p75   = _mm_set1_epi32(0xFFEE004B);
+    const __m128i c16_n50_n89   = _mm_set1_epi32(0xFFCEFFA7);
+    const __m128i c16_n89_p50   = _mm_set1_epi32(0xFFA70032);
+    const __m128i c16_p75_p18   = _mm_set1_epi32(0x004B0012);
+    const __m128i c16_n50_p18   = _mm_set1_epi32(0xFFCE0012);
+    const __m128i c16_n89_p75   = _mm_set1_epi32(0xFFA7004B);
+    //EEEO
+    const __m128i c16_p36_p83   = _mm_set1_epi32(0x00240053);
+    const __m128i c16_n83_p36   = _mm_set1_epi32(0xFFAD0024);
+    //EEEE
+    const __m128i c16_n64_p64   = _mm_set1_epi32(0xFFC00040);
+    const __m128i c16_p64_p64   = _mm_set1_epi32(0x00400040);
+    __m128i c32_rnd             = _mm_set1_epi32(ADD1);
+
+    int nShift = SHIFT1;
+
+    // DCT1
+    __m128i in00[4], in01[4], in02[4], in03[4], in04[4], in05[4], in06[4], in07[4], in08[4], in09[4], in10[4], in11[4], in12[4], in13[4], in14[4], in15[4];
+    __m128i in16[4], in17[4], in18[4], in19[4], in20[4], in21[4], in22[4], in23[4], in24[4], in25[4], in26[4], in27[4], in28[4], in29[4], in30[4], in31[4];
+    __m128i res00[4], res01[4], res02[4], res03[4], res04[4], res05[4], res06[4], res07[4], res08[4], res09[4], res10[4], res11[4], res12[4], res13[4], res14[4], res15[4];
+    __m128i res16[4], res17[4], res18[4], res19[4], res20[4], res21[4], res22[4], res23[4], res24[4], res25[4], res26[4], res27[4], res28[4], res29[4], res30[4], res31[4];
+
+    for (int i = 0; i < 4; i++)
+    {
+        const int offset = (i << 3);
+        in00[i]  = _mm_loadu_si128((const __m128i*)&src[0  * 32 + offset]);
+        in01[i]  = _mm_loadu_si128((const __m128i*)&src[1  * 32 + offset]);
+        in02[i]  = _mm_loadu_si128((const __m128i*)&src[2  * 32 + offset]);
+        in03[i]  = _mm_loadu_si128((const __m128i*)&src[3  * 32 + offset]);
+        in04[i]  = _mm_loadu_si128((const __m128i*)&src[4  * 32 + offset]);
+        in05[i]  = _mm_loadu_si128((const __m128i*)&src[5  * 32 + offset]);
+        in06[i]  = _mm_loadu_si128((const __m128i*)&src[6  * 32 + offset]);
+        in07[i]  = _mm_loadu_si128((const __m128i*)&src[7  * 32 + offset]);
+        in08[i]  = _mm_loadu_si128((const __m128i*)&src[8  * 32 + offset]);
+        in09[i]  = _mm_loadu_si128((const __m128i*)&src[9  * 32 + offset]);
+        in10[i]  = _mm_loadu_si128((const __m128i*)&src[10 * 32 + offset]);
+        in11[i]  = _mm_loadu_si128((const __m128i*)&src[11 * 32 + offset]);
+        in12[i]  = _mm_loadu_si128((const __m128i*)&src[12 * 32 + offset]);
+        in13[i]  = _mm_loadu_si128((const __m128i*)&src[13 * 32 + offset]);
+        in14[i]  = _mm_loadu_si128((const __m128i*)&src[14 * 32 + offset]);
+        in15[i]  = _mm_loadu_si128((const __m128i*)&src[15 * 32 + offset]);
+        in16[i]  = _mm_loadu_si128((const __m128i*)&src[16 * 32 + offset]);
+        in17[i]  = _mm_loadu_si128((const __m128i*)&src[17 * 32 + offset]);
+        in18[i]  = _mm_loadu_si128((const __m128i*)&src[18 * 32 + offset]);
+        in19[i]  = _mm_loadu_si128((const __m128i*)&src[19 * 32 + offset]);
+        in20[i]  = _mm_loadu_si128((const __m128i*)&src[20 * 32 + offset]);
+        in21[i]  = _mm_loadu_si128((const __m128i*)&src[21 * 32 + offset]);
+        in22[i]  = _mm_loadu_si128((const __m128i*)&src[22 * 32 + offset]);
+        in23[i]  = _mm_loadu_si128((const __m128i*)&src[23 * 32 + offset]);
+        in24[i]  = _mm_loadu_si128((const __m128i*)&src[24 * 32 + offset]);
+        in25[i]  = _mm_loadu_si128((const __m128i*)&src[25 * 32 + offset]);
+        in26[i]  = _mm_loadu_si128((const __m128i*)&src[26 * 32 + offset]);
+        in27[i]  = _mm_loadu_si128((const __m128i*)&src[27 * 32 + offset]);
+        in28[i]  = _mm_loadu_si128((const __m128i*)&src[28 * 32 + offset]);
+        in29[i]  = _mm_loadu_si128((const __m128i*)&src[29 * 32 + offset]);
+        in30[i]  = _mm_loadu_si128((const __m128i*)&src[30 * 32 + offset]);
+        in31[i]  = _mm_loadu_si128((const __m128i*)&src[31 * 32 + offset]);
+    }
+
+    for (int pass = 0; pass < 2; pass++)
+    {
+        if (pass == 1)
+        {
+            c32_rnd = _mm_set1_epi32(ADD2);
+            nShift  = SHIFT2;
+        }
+
+        for (int part = 0; part < 4; part++)
+        {
+            const __m128i T_00_00A = _mm_unpacklo_epi16(in01[part], in03[part]);       // [33 13 32 12 31 11 30 10]
+            const __m128i T_00_00B = _mm_unpackhi_epi16(in01[part], in03[part]);       // [37 17 36 16 35 15 34 14]
+            const __m128i T_00_01A = _mm_unpacklo_epi16(in05[part], in07[part]);       // [ ]
+            const __m128i T_00_01B = _mm_unpackhi_epi16(in05[part], in07[part]);       // [ ]
+            const __m128i T_00_02A = _mm_unpacklo_epi16(in09[part], in11[part]);       // [ ]
+            const __m128i T_00_02B = _mm_unpackhi_epi16(in09[part], in11[part]);       // [ ]
+            const __m128i T_00_03A = _mm_unpacklo_epi16(in13[part], in15[part]);       // [ ]
+            const __m128i T_00_03B = _mm_unpackhi_epi16(in13[part], in15[part]);       // [ ]
+            const __m128i T_00_04A = _mm_unpacklo_epi16(in17[part], in19[part]);       // [ ]
+            const __m128i T_00_04B = _mm_unpackhi_epi16(in17[part], in19[part]);       // [ ]
+            const __m128i T_00_05A = _mm_unpacklo_epi16(in21[part], in23[part]);       // [ ]
+            const __m128i T_00_05B = _mm_unpackhi_epi16(in21[part], in23[part]);       // [ ]
+            const __m128i T_00_06A = _mm_unpacklo_epi16(in25[part], in27[part]);       // [ ]
+            const __m128i T_00_06B = _mm_unpackhi_epi16(in25[part], in27[part]);       // [ ]
+            const __m128i T_00_07A = _mm_unpacklo_epi16(in29[part], in31[part]);       //
+            const __m128i T_00_07B = _mm_unpackhi_epi16(in29[part], in31[part]);       // [ ]
+
+            const __m128i T_00_08A = _mm_unpacklo_epi16(in02[part], in06[part]);       // [ ]
+            const __m128i T_00_08B = _mm_unpackhi_epi16(in02[part], in06[part]);       // [ ]
+            const __m128i T_00_09A = _mm_unpacklo_epi16(in10[part], in14[part]);       // [ ]
+            const __m128i T_00_09B = _mm_unpackhi_epi16(in10[part], in14[part]);       // [ ]
+            const __m128i T_00_10A = _mm_unpacklo_epi16(in18[part], in22[part]);       // [ ]
+            const __m128i T_00_10B = _mm_unpackhi_epi16(in18[part], in22[part]);       // [ ]
+            const __m128i T_00_11A = _mm_unpacklo_epi16(in26[part], in30[part]);       // [ ]
+            const __m128i T_00_11B = _mm_unpackhi_epi16(in26[part], in30[part]);       // [ ]
+
+            const __m128i T_00_12A = _mm_unpacklo_epi16(in04[part], in12[part]);       // [ ]
+            const __m128i T_00_12B = _mm_unpackhi_epi16(in04[part], in12[part]);       // [ ]
+            const __m128i T_00_13A = _mm_unpacklo_epi16(in20[part], in28[part]);       // [ ]
+            const __m128i T_00_13B = _mm_unpackhi_epi16(in20[part], in28[part]);       // [ ]
+
+            const __m128i T_00_14A = _mm_unpacklo_epi16(in08[part], in24[part]);       //
+            const __m128i T_00_14B = _mm_unpackhi_epi16(in08[part], in24[part]);       // [ ]
+            const __m128i T_00_15A = _mm_unpacklo_epi16(in00[part], in16[part]);       //
+            const __m128i T_00_15B = _mm_unpackhi_epi16(in00[part], in16[part]);       // [ ]
+
+            __m128i O00A, O01A, O02A, O03A, O04A, O05A, O06A, O07A, O08A, O09A, O10A, O11A, O12A, O13A, O14A, O15A;
+            __m128i O00B, O01B, O02B, O03B, O04B, O05B, O06B, O07B, O08B, O09B, O10B, O11B, O12B, O13B, O14B, O15B;
+            {
+                __m128i T00, T01, T02, T03;
+#define COMPUTE_ROW(r0103, r0507, r0911, r1315, r1719, r2123, r2527, r2931, c0103, c0507, c0911, c1315, c1719, c2123, c2527, c2931, row) \
+    T00 = _mm_add_epi32(_mm_madd_epi16(r0103, c0103), _mm_madd_epi16(r0507, c0507)); \
+    T01 = _mm_add_epi32(_mm_madd_epi16(r0911, c0911), _mm_madd_epi16(r1315, c1315)); \
+    T02 = _mm_add_epi32(_mm_madd_epi16(r1719, c1719), _mm_madd_epi16(r2123, c2123)); \
+    T03 = _mm_add_epi32(_mm_madd_epi16(r2527, c2527), _mm_madd_epi16(r2931, c2931)); \
+    row = _mm_add_epi32(_mm_add_epi32(T00, T01), _mm_add_epi32(T02, T03));
+
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_p90_p90, c16_p85_p88, c16_p78_p82, c16_p67_p73, c16_p54_p61, c16_p38_p46, c16_p22_p31, c16_p04_p13, O00A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_p82_p90, c16_p46_p67, c16_n04_p22, c16_n54_n31, c16_n85_n73, c16_n88_n90, c16_n61_n78, c16_n13_n38, O01A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_p67_p88, c16_n13_p31, c16_n82_n54, c16_n78_n90, c16_n04_n46, c16_p73_p38, c16_p85_p90, c16_p22_p61, O02A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_p46_p85, c16_n67_n13, c16_n73_n90, c16_p38_n22, c16_p88_p82, c16_n04_p54, c16_n90_n61, c16_n31_n78, O03A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_p22_p82, c16_n90_n54, c16_p13_n61, c16_p85_p78, c16_n46_p31, c16_n67_n90, c16_p73_p04, c16_p38_p88, O04A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n04_p78, c16_n73_n82, c16_p85_p13, c16_n22_p67, c16_n61_n88, c16_p90_p31, c16_n38_p54, c16_n46_n90, O05A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n31_p73, c16_n22_n90, c16_p67_p78, c16_n90_n38, c16_p82_n13, c16_n46_p61, c16_n04_n88, c16_p54_p85, O06A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n54_p67, c16_p38_n78, c16_n22_p85, c16_p04_n90, c16_p13_p90, c16_n31_n88, c16_p46_p82, c16_n61_n73, O07A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n73_p61, c16_p82_n46, c16_n88_p31, c16_p90_n13, c16_n90_n04, c16_p85_p22, c16_n78_n38, c16_p67_p54, O08A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n85_p54, c16_p88_n04, c16_n61_n46, c16_p13_p82, c16_p38_n90, c16_n78_p67, c16_p90_n22, c16_n73_n31, O09A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n90_p46, c16_p54_p38, c16_p31_n90, c16_n88_p61, c16_p67_p22, c16_p13_n85, c16_n82_p73, c16_p78_p04, O10A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n88_p38, c16_n04_p73, c16_p90_n67, c16_n31_n46, c16_n78_p85, c16_p61_p13, c16_p54_n90, c16_n82_p22, O11A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n78_p31, c16_n61_p90, c16_p54_p04, c16_p82_n88, c16_n22_n38, c16_n90_p73, c16_n13_p67, c16_p85_n46, O12A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n61_p22, c16_n90_p85, c16_n38_p73, c16_p46_n04, c16_p90_n78, c16_p54_n82, c16_n31_n13, c16_n88_p67, O13A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n38_p13, c16_n78_p61, c16_n90_p88, c16_n73_p85, c16_n31_p54, c16_p22_p04, c16_p67_n46, c16_p90_n82, O14A)
+                COMPUTE_ROW(T_00_00A, T_00_01A, T_00_02A, T_00_03A, T_00_04A, T_00_05A, T_00_06A, T_00_07A, \
+                            c16_n13_p04, c16_n31_p22, c16_n46_p38, c16_n61_p54, c16_n73_p67, c16_n82_p78, c16_n88_p85, c16_n90_p90, O15A)
+
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_p90_p90, c16_p85_p88, c16_p78_p82, c16_p67_p73, c16_p54_p61, c16_p38_p46, c16_p22_p31, c16_p04_p13, O00B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_p82_p90, c16_p46_p67, c16_n04_p22, c16_n54_n31, c16_n85_n73, c16_n88_n90, c16_n61_n78, c16_n13_n38, O01B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_p67_p88, c16_n13_p31, c16_n82_n54, c16_n78_n90, c16_n04_n46, c16_p73_p38, c16_p85_p90, c16_p22_p61, O02B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_p46_p85, c16_n67_n13, c16_n73_n90, c16_p38_n22, c16_p88_p82, c16_n04_p54, c16_n90_n61, c16_n31_n78, O03B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_p22_p82, c16_n90_n54, c16_p13_n61, c16_p85_p78, c16_n46_p31, c16_n67_n90, c16_p73_p04, c16_p38_p88, O04B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n04_p78, c16_n73_n82, c16_p85_p13, c16_n22_p67, c16_n61_n88, c16_p90_p31, c16_n38_p54, c16_n46_n90, O05B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n31_p73, c16_n22_n90, c16_p67_p78, c16_n90_n38, c16_p82_n13, c16_n46_p61, c16_n04_n88, c16_p54_p85, O06B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n54_p67, c16_p38_n78, c16_n22_p85, c16_p04_n90, c16_p13_p90, c16_n31_n88, c16_p46_p82, c16_n61_n73, O07B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n73_p61, c16_p82_n46, c16_n88_p31, c16_p90_n13, c16_n90_n04, c16_p85_p22, c16_n78_n38, c16_p67_p54, O08B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n85_p54, c16_p88_n04, c16_n61_n46, c16_p13_p82, c16_p38_n90, c16_n78_p67, c16_p90_n22, c16_n73_n31, O09B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n90_p46, c16_p54_p38, c16_p31_n90, c16_n88_p61, c16_p67_p22, c16_p13_n85, c16_n82_p73, c16_p78_p04, O10B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n88_p38, c16_n04_p73, c16_p90_n67, c16_n31_n46, c16_n78_p85, c16_p61_p13, c16_p54_n90, c16_n82_p22, O11B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n78_p31, c16_n61_p90, c16_p54_p04, c16_p82_n88, c16_n22_n38, c16_n90_p73, c16_n13_p67, c16_p85_n46, O12B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n61_p22, c16_n90_p85, c16_n38_p73, c16_p46_n04, c16_p90_n78, c16_p54_n82, c16_n31_n13, c16_n88_p67, O13B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n38_p13, c16_n78_p61, c16_n90_p88, c16_n73_p85, c16_n31_p54, c16_p22_p04, c16_p67_n46, c16_p90_n82, O14B)
+                COMPUTE_ROW(T_00_00B, T_00_01B, T_00_02B, T_00_03B, T_00_04B, T_00_05B, T_00_06B, T_00_07B, \
+                            c16_n13_p04, c16_n31_p22, c16_n46_p38, c16_n61_p54, c16_n73_p67, c16_n82_p78, c16_n88_p85, c16_n90_p90, O15B)
+
+#undef COMPUTE_ROW
+            }
+
+            __m128i EO0A, EO1A, EO2A, EO3A, EO4A, EO5A, EO6A, EO7A;
+            __m128i EO0B, EO1B, EO2B, EO3B, EO4B, EO5B, EO6B, EO7B;
+            {
+                __m128i T00, T01;
+#define COMPUTE_ROW(row0206, row1014, row1822, row2630, c0206, c1014, c1822, c2630, row) \
+    T00 = _mm_add_epi32(_mm_madd_epi16(row0206, c0206), _mm_madd_epi16(row1014, c1014)); \
+    T01 = _mm_add_epi32(_mm_madd_epi16(row1822, c1822), _mm_madd_epi16(row2630, c2630)); \
+    row = _mm_add_epi32(T00, T01);
+
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_p87_p90, c16_p70_p80, c16_p43_p57, c16_p09_p25, EO0A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_p57_p87, c16_n43_p09, c16_n90_n80, c16_n25_n70, EO1A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_p09_p80, c16_n87_n70, c16_p57_n25, c16_p43_p90, EO2A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_n43_p70, c16_p09_n87, c16_p25_p90, c16_n57_n80, EO3A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_n80_p57, c16_p90_n25, c16_n87_n09, c16_p70_p43, EO4A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_n90_p43, c16_p25_p57, c16_p70_n87, c16_n80_p09, EO5A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_n70_p25, c16_n80_p90, c16_p09_p43, c16_p87_n57, EO6A)
+                COMPUTE_ROW(T_00_08A, T_00_09A, T_00_10A, T_00_11A, c16_n25_p09, c16_n57_p43, c16_n80_p70, c16_n90_p87, EO7A)
+
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_p87_p90, c16_p70_p80, c16_p43_p57, c16_p09_p25, EO0B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_p57_p87, c16_n43_p09, c16_n90_n80, c16_n25_n70, EO1B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_p09_p80, c16_n87_n70, c16_p57_n25, c16_p43_p90, EO2B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_n43_p70, c16_p09_n87, c16_p25_p90, c16_n57_n80, EO3B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_n80_p57, c16_p90_n25, c16_n87_n09, c16_p70_p43, EO4B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_n90_p43, c16_p25_p57, c16_p70_n87, c16_n80_p09, EO5B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_n70_p25, c16_n80_p90, c16_p09_p43, c16_p87_n57, EO6B)
+                COMPUTE_ROW(T_00_08B, T_00_09B, T_00_10B, T_00_11B, c16_n25_p09, c16_n57_p43, c16_n80_p70, c16_n90_p87, EO7B)
+#undef COMPUTE_ROW
+            }
+
+            const __m128i EEO0A = _mm_add_epi32(_mm_madd_epi16(T_00_12A, c16_p75_p89), _mm_madd_epi16(T_00_13A, c16_p18_p50)); // EEO0
+            const __m128i EEO0B = _mm_add_epi32(_mm_madd_epi16(T_00_12B, c16_p75_p89), _mm_madd_epi16(T_00_13B, c16_p18_p50));
+            const __m128i EEO1A = _mm_add_epi32(_mm_madd_epi16(T_00_12A, c16_n18_p75), _mm_madd_epi16(T_00_13A, c16_n50_n89)); // EEO1
+            const __m128i EEO1B = _mm_add_epi32(_mm_madd_epi16(T_00_12B, c16_n18_p75), _mm_madd_epi16(T_00_13B, c16_n50_n89));
+            const __m128i EEO2A = _mm_add_epi32(_mm_madd_epi16(T_00_12A, c16_n89_p50), _mm_madd_epi16(T_00_13A, c16_p75_p18)); // EEO2
+            const __m128i EEO2B = _mm_add_epi32(_mm_madd_epi16(T_00_12B, c16_n89_p50), _mm_madd_epi16(T_00_13B, c16_p75_p18));
+            const __m128i EEO3A = _mm_add_epi32(_mm_madd_epi16(T_00_12A, c16_n50_p18), _mm_madd_epi16(T_00_13A, c16_n89_p75)); // EEO3
+            const __m128i EEO3B = _mm_add_epi32(_mm_madd_epi16(T_00_12B, c16_n50_p18), _mm_madd_epi16(T_00_13B, c16_n89_p75));
+
+            const __m128i EEEO0A = _mm_madd_epi16(T_00_14A, c16_p36_p83);
+            const __m128i EEEO0B = _mm_madd_epi16(T_00_14B, c16_p36_p83);
+            const __m128i EEEO1A = _mm_madd_epi16(T_00_14A, c16_n83_p36);
+            const __m128i EEEO1B = _mm_madd_epi16(T_00_14B, c16_n83_p36);
+
+            const __m128i EEEE0A = _mm_madd_epi16(T_00_15A, c16_p64_p64);
+            const __m128i EEEE0B = _mm_madd_epi16(T_00_15B, c16_p64_p64);
+            const __m128i EEEE1A = _mm_madd_epi16(T_00_15A, c16_n64_p64);
+            const __m128i EEEE1B = _mm_madd_epi16(T_00_15B, c16_n64_p64);
+
+            const __m128i EEE0A = _mm_add_epi32(EEEE0A, EEEO0A);          // EEE0 = EEEE0 + EEEO0
+            const __m128i EEE0B = _mm_add_epi32(EEEE0B, EEEO0B);
+            const __m128i EEE1A = _mm_add_epi32(EEEE1A, EEEO1A);          // EEE1 = EEEE1 + EEEO1
+            const __m128i EEE1B = _mm_add_epi32(EEEE1B, EEEO1B);
+            const __m128i EEE3A = _mm_sub_epi32(EEEE0A, EEEO0A);          // EEE2 = EEEE0 - EEEO0
+            const __m128i EEE3B = _mm_sub_epi32(EEEE0B, EEEO0B);
+            const __m128i EEE2A = _mm_sub_epi32(EEEE1A, EEEO1A);          // EEE3 = EEEE1 - EEEO1
+            const __m128i EEE2B = _mm_sub_epi32(EEEE1B, EEEO1B);
+
+            const __m128i EE0A = _mm_add_epi32(EEE0A, EEO0A);          // EE0 = EEE0 + EEO0
+            const __m128i EE0B = _mm_add_epi32(EEE0B, EEO0B);
+            const __m128i EE1A = _mm_add_epi32(EEE1A, EEO1A);          // EE1 = EEE1 + EEO1
+            const __m128i EE1B = _mm_add_epi32(EEE1B, EEO1B);
+            const __m128i EE2A = _mm_add_epi32(EEE2A, EEO2A);          // EE2 = EEE0 + EEO0
+            const __m128i EE2B = _mm_add_epi32(EEE2B, EEO2B);
+            const __m128i EE3A = _mm_add_epi32(EEE3A, EEO3A);          // EE3 = EEE1 + EEO1
+            const __m128i EE3B = _mm_add_epi32(EEE3B, EEO3B);
+            const __m128i EE7A = _mm_sub_epi32(EEE0A, EEO0A);          // EE7 = EEE0 - EEO0
+            const __m128i EE7B = _mm_sub_epi32(EEE0B, EEO0B);
+            const __m128i EE6A = _mm_sub_epi32(EEE1A, EEO1A);          // EE6 = EEE1 - EEO1
+            const __m128i EE6B = _mm_sub_epi32(EEE1B, EEO1B);
+            const __m128i EE5A = _mm_sub_epi32(EEE2A, EEO2A);          // EE5 = EEE0 - EEO0
+            const __m128i EE5B = _mm_sub_epi32(EEE2B, EEO2B);
+            const __m128i EE4A = _mm_sub_epi32(EEE3A, EEO3A);          // EE4 = EEE1 - EEO1
+            const __m128i EE4B = _mm_sub_epi32(EEE3B, EEO3B);
+
+            const __m128i E0A = _mm_add_epi32(EE0A, EO0A);          // E0 = EE0 + EO0
+            const __m128i E0B = _mm_add_epi32(EE0B, EO0B);
+            const __m128i E1A = _mm_add_epi32(EE1A, EO1A);          // E1 = EE1 + EO1
+            const __m128i E1B = _mm_add_epi32(EE1B, EO1B);
+            const __m128i E2A = _mm_add_epi32(EE2A, EO2A);          // E2 = EE2 + EO2
+            const __m128i E2B = _mm_add_epi32(EE2B, EO2B);
+            const __m128i E3A = _mm_add_epi32(EE3A, EO3A);          // E3 = EE3 + EO3
+            const __m128i E3B = _mm_add_epi32(EE3B, EO3B);
+            const __m128i E4A = _mm_add_epi32(EE4A, EO4A);          // E4 =
+            const __m128i E4B = _mm_add_epi32(EE4B, EO4B);
+            const __m128i E5A = _mm_add_epi32(EE5A, EO5A);          // E5 =
+            const __m128i E5B = _mm_add_epi32(EE5B, EO5B);
+            const __m128i E6A = _mm_add_epi32(EE6A, EO6A);          // E6 =
+            const __m128i E6B = _mm_add_epi32(EE6B, EO6B);
+            const __m128i E7A = _mm_add_epi32(EE7A, EO7A);          // E7 =
+            const __m128i E7B = _mm_add_epi32(EE7B, EO7B);
+            const __m128i EFA = _mm_sub_epi32(EE0A, EO0A);          // EF = EE0 - EO0
+            const __m128i EFB = _mm_sub_epi32(EE0B, EO0B);
+            const __m128i EEA = _mm_sub_epi32(EE1A, EO1A);          // EE = EE1 - EO1
+            const __m128i EEB = _mm_sub_epi32(EE1B, EO1B);
+            const __m128i EDA = _mm_sub_epi32(EE2A, EO2A);          // ED = EE2 - EO2
+            const __m128i EDB = _mm_sub_epi32(EE2B, EO2B);
+            const __m128i ECA = _mm_sub_epi32(EE3A, EO3A);          // EC = EE3 - EO3
+            const __m128i ECB = _mm_sub_epi32(EE3B, EO3B);
+            const __m128i EBA = _mm_sub_epi32(EE4A, EO4A);          // EB =
+            const __m128i EBB = _mm_sub_epi32(EE4B, EO4B);
+            const __m128i EAA = _mm_sub_epi32(EE5A, EO5A);          // EA =
+            const __m128i EAB = _mm_sub_epi32(EE5B, EO5B);
+            const __m128i E9A = _mm_sub_epi32(EE6A, EO6A);          // E9 =
+            const __m128i E9B = _mm_sub_epi32(EE6B, EO6B);
+            const __m128i E8A = _mm_sub_epi32(EE7A, EO7A);          // E8 =
+            const __m128i E8B = _mm_sub_epi32(EE7B, EO7B);
+
+            const __m128i T10A = _mm_add_epi32(E0A, c32_rnd);         // E0 + rnd
+            const __m128i T10B = _mm_add_epi32(E0B, c32_rnd);
+            const __m128i T11A = _mm_add_epi32(E1A, c32_rnd);         // E1 + rnd
+            const __m128i T11B = _mm_add_epi32(E1B, c32_rnd);
+            const __m128i T12A = _mm_add_epi32(E2A, c32_rnd);         // E2 + rnd
+            const __m128i T12B = _mm_add_epi32(E2B, c32_rnd);
+            const __m128i T13A = _mm_add_epi32(E3A, c32_rnd);         // E3 + rnd
+            const __m128i T13B = _mm_add_epi32(E3B, c32_rnd);
+            const __m128i T14A = _mm_add_epi32(E4A, c32_rnd);         // E4 + rnd
+            const __m128i T14B = _mm_add_epi32(E4B, c32_rnd);
+            const __m128i T15A = _mm_add_epi32(E5A, c32_rnd);         // E5 + rnd
+            const __m128i T15B = _mm_add_epi32(E5B, c32_rnd);
+            const __m128i T16A = _mm_add_epi32(E6A, c32_rnd);         // E6 + rnd
+            const __m128i T16B = _mm_add_epi32(E6B, c32_rnd);
+            const __m128i T17A = _mm_add_epi32(E7A, c32_rnd);         // E7 + rnd
+            const __m128i T17B = _mm_add_epi32(E7B, c32_rnd);
+            const __m128i T18A = _mm_add_epi32(E8A, c32_rnd);         // E8 + rnd
+            const __m128i T18B = _mm_add_epi32(E8B, c32_rnd);
+            const __m128i T19A = _mm_add_epi32(E9A, c32_rnd);         // E9 + rnd
+            const __m128i T19B = _mm_add_epi32(E9B, c32_rnd);
+            const __m128i T1AA = _mm_add_epi32(EAA, c32_rnd);         // E10 + rnd
+            const __m128i T1AB = _mm_add_epi32(EAB, c32_rnd);
+            const __m128i T1BA = _mm_add_epi32(EBA, c32_rnd);         // E11 + rnd
+            const __m128i T1BB = _mm_add_epi32(EBB, c32_rnd);
+            const __m128i T1CA = _mm_add_epi32(ECA, c32_rnd);         // E12 + rnd
+            const __m128i T1CB = _mm_add_epi32(ECB, c32_rnd);
+            const __m128i T1DA = _mm_add_epi32(EDA, c32_rnd);         // E13 + rnd
+            const __m128i T1DB = _mm_add_epi32(EDB, c32_rnd);
+            const __m128i T1EA = _mm_add_epi32(EEA, c32_rnd);         // E14 + rnd
+            const __m128i T1EB = _mm_add_epi32(EEB, c32_rnd);
+            const __m128i T1FA = _mm_add_epi32(EFA, c32_rnd);         // E15 + rnd
+            const __m128i T1FB = _mm_add_epi32(EFB, c32_rnd);
+
+            const __m128i T2_00A = _mm_add_epi32(T10A, O00A);          // E0 + O0 + rnd
+            const __m128i T2_00B = _mm_add_epi32(T10B, O00B);
+            const __m128i T2_01A = _mm_add_epi32(T11A, O01A);          // E1 + O1 + rnd
+            const __m128i T2_01B = _mm_add_epi32(T11B, O01B);
+            const __m128i T2_02A = _mm_add_epi32(T12A, O02A);          // E2 + O2 + rnd
+            const __m128i T2_02B = _mm_add_epi32(T12B, O02B);
+            const __m128i T2_03A = _mm_add_epi32(T13A, O03A);          // E3 + O3 + rnd
+            const __m128i T2_03B = _mm_add_epi32(T13B, O03B);
+            const __m128i T2_04A = _mm_add_epi32(T14A, O04A);          // E4
+            const __m128i T2_04B = _mm_add_epi32(T14B, O04B);
+            const __m128i T2_05A = _mm_add_epi32(T15A, O05A);          // E5
+            const __m128i T2_05B = _mm_add_epi32(T15B, O05B);
+            const __m128i T2_06A = _mm_add_epi32(T16A, O06A);          // E6
+            const __m128i T2_06B = _mm_add_epi32(T16B, O06B);
+            const __m128i T2_07A = _mm_add_epi32(T17A, O07A);          // E7
+            const __m128i T2_07B = _mm_add_epi32(T17B, O07B);
+            const __m128i T2_08A = _mm_add_epi32(T18A, O08A);          // E8
+            const __m128i T2_08B = _mm_add_epi32(T18B, O08B);
+            const __m128i T2_09A = _mm_add_epi32(T19A, O09A);          // E9
+            const __m128i T2_09B = _mm_add_epi32(T19B, O09B);
+            const __m128i T2_10A = _mm_add_epi32(T1AA, O10A);          // E10
+            const __m128i T2_10B = _mm_add_epi32(T1AB, O10B);
+            const __m128i T2_11A = _mm_add_epi32(T1BA, O11A);          // E11
+            const __m128i T2_11B = _mm_add_epi32(T1BB, O11B);
+            const __m128i T2_12A = _mm_add_epi32(T1CA, O12A);          // E12
+            const __m128i T2_12B = _mm_add_epi32(T1CB, O12B);
+            const __m128i T2_13A = _mm_add_epi32(T1DA, O13A);          // E13
+            const __m128i T2_13B = _mm_add_epi32(T1DB, O13B);
+            const __m128i T2_14A = _mm_add_epi32(T1EA, O14A);          // E14
+            const __m128i T2_14B = _mm_add_epi32(T1EB, O14B);
+            const __m128i T2_15A = _mm_add_epi32(T1FA, O15A);          // E15
+            const __m128i T2_15B = _mm_add_epi32(T1FB, O15B);
+            const __m128i T2_31A = _mm_sub_epi32(T10A, O00A);          // E0 - O0 + rnd
+            const __m128i T2_31B = _mm_sub_epi32(T10B, O00B);
+            const __m128i T2_30A = _mm_sub_epi32(T11A, O01A);          // E1 - O1 + rnd
+            const __m128i T2_30B = _mm_sub_epi32(T11B, O01B);
+            const __m128i T2_29A = _mm_sub_epi32(T12A, O02A);          // E2 - O2 + rnd
+            const __m128i T2_29B = _mm_sub_epi32(T12B, O02B);
+            const __m128i T2_28A = _mm_sub_epi32(T13A, O03A);          // E3 - O3 + rnd
+            const __m128i T2_28B = _mm_sub_epi32(T13B, O03B);
+            const __m128i T2_27A = _mm_sub_epi32(T14A, O04A);          // E4
+            const __m128i T2_27B = _mm_sub_epi32(T14B, O04B);
+            const __m128i T2_26A = _mm_sub_epi32(T15A, O05A);          // E5
+            const __m128i T2_26B = _mm_sub_epi32(T15B, O05B);
+            const __m128i T2_25A = _mm_sub_epi32(T16A, O06A);          // E6
+            const __m128i T2_25B = _mm_sub_epi32(T16B, O06B);
+            const __m128i T2_24A = _mm_sub_epi32(T17A, O07A);          // E7
+            const __m128i T2_24B = _mm_sub_epi32(T17B, O07B);
+            const __m128i T2_23A = _mm_sub_epi32(T18A, O08A);          //
+            const __m128i T2_23B = _mm_sub_epi32(T18B, O08B);
+            const __m128i T2_22A = _mm_sub_epi32(T19A, O09A);          //
+            const __m128i T2_22B = _mm_sub_epi32(T19B, O09B);
+            const __m128i T2_21A = _mm_sub_epi32(T1AA, O10A);          //
+            const __m128i T2_21B = _mm_sub_epi32(T1AB, O10B);
+            const __m128i T2_20A = _mm_sub_epi32(T1BA, O11A);          //
+            const __m128i T2_20B = _mm_sub_epi32(T1BB, O11B);
+            const __m128i T2_19A = _mm_sub_epi32(T1CA, O12A);          //
+            const __m128i T2_19B = _mm_sub_epi32(T1CB, O12B);
+            const __m128i T2_18A = _mm_sub_epi32(T1DA, O13A);          //
+            const __m128i T2_18B = _mm_sub_epi32(T1DB, O13B);
+            const __m128i T2_17A = _mm_sub_epi32(T1EA, O14A);          //
+            const __m128i T2_17B = _mm_sub_epi32(T1EB, O14B);
+            const __m128i T2_16A = _mm_sub_epi32(T1FA, O15A);          //
+            const __m128i T2_16B = _mm_sub_epi32(T1FB, O15B);
+
+            const __m128i T3_00A = _mm_srai_epi32(T2_00A, nShift);             // [30 20 10 00]
+            const __m128i T3_00B = _mm_srai_epi32(T2_00B, nShift);             // [70 60 50 40]
+            const __m128i T3_01A = _mm_srai_epi32(T2_01A, nShift);             // [31 21 11 01]
+            const __m128i T3_01B = _mm_srai_epi32(T2_01B, nShift);             // [71 61 51 41]
+            const __m128i T3_02A = _mm_srai_epi32(T2_02A, nShift);             // [32 22 12 02]
+            const __m128i T3_02B = _mm_srai_epi32(T2_02B, nShift);             // [72 62 52 42]
+            const __m128i T3_03A = _mm_srai_epi32(T2_03A, nShift);             // [33 23 13 03]
+            const __m128i T3_03B = _mm_srai_epi32(T2_03B, nShift);             // [73 63 53 43]
+            const __m128i T3_04A = _mm_srai_epi32(T2_04A, nShift);             // [33 24 14 04]
+            const __m128i T3_04B = _mm_srai_epi32(T2_04B, nShift);             // [74 64 54 44]
+            const __m128i T3_05A = _mm_srai_epi32(T2_05A, nShift);             // [35 25 15 05]
+            const __m128i T3_05B = _mm_srai_epi32(T2_05B, nShift);             // [75 65 55 45]
+            const __m128i T3_06A = _mm_srai_epi32(T2_06A, nShift);             // [36 26 16 06]
+            const __m128i T3_06B = _mm_srai_epi32(T2_06B, nShift);             // [76 66 56 46]
+            const __m128i T3_07A = _mm_srai_epi32(T2_07A, nShift);             // [37 27 17 07]
+            const __m128i T3_07B = _mm_srai_epi32(T2_07B, nShift);             // [77 67 57 47]
+            const __m128i T3_08A = _mm_srai_epi32(T2_08A, nShift);             // [30 20 10 00] x8
+            const __m128i T3_08B = _mm_srai_epi32(T2_08B, nShift);             // [70 60 50 40]
+            const __m128i T3_09A = _mm_srai_epi32(T2_09A, nShift);             // [31 21 11 01] x9
+            const __m128i T3_09B = _mm_srai_epi32(T2_09B, nShift);             // [71 61 51 41]
+            const __m128i T3_10A = _mm_srai_epi32(T2_10A, nShift);             // [32 22 12 02] xA
+            const __m128i T3_10B = _mm_srai_epi32(T2_10B, nShift);             // [72 62 52 42]
+            const __m128i T3_11A = _mm_srai_epi32(T2_11A, nShift);             // [33 23 13 03] xB
+            const __m128i T3_11B = _mm_srai_epi32(T2_11B, nShift);             // [73 63 53 43]
+            const __m128i T3_12A = _mm_srai_epi32(T2_12A, nShift);             // [33 24 14 04] xC
+            const __m128i T3_12B = _mm_srai_epi32(T2_12B, nShift);             // [74 64 54 44]
+            const __m128i T3_13A = _mm_srai_epi32(T2_13A, nShift);             // [35 25 15 05] xD
+            const __m128i T3_13B = _mm_srai_epi32(T2_13B, nShift);             // [75 65 55 45]
+            const __m128i T3_14A = _mm_srai_epi32(T2_14A, nShift);             // [36 26 16 06] xE
+            const __m128i T3_14B = _mm_srai_epi32(T2_14B, nShift);             // [76 66 56 46]
+            const __m128i T3_15A = _mm_srai_epi32(T2_15A, nShift);             // [37 27 17 07] xF
+            const __m128i T3_15B = _mm_srai_epi32(T2_15B, nShift);             // [77 67 57 47]
+
+            const __m128i T3_16A = _mm_srai_epi32(T2_16A, nShift);             // [30 20 10 00]
+            const __m128i T3_16B = _mm_srai_epi32(T2_16B, nShift);             // [70 60 50 40]
+            const __m128i T3_17A = _mm_srai_epi32(T2_17A, nShift);             // [31 21 11 01]
+            const __m128i T3_17B = _mm_srai_epi32(T2_17B, nShift);             // [71 61 51 41]
+            const __m128i T3_18A = _mm_srai_epi32(T2_18A, nShift);             // [32 22 12 02]
+            const __m128i T3_18B = _mm_srai_epi32(T2_18B, nShift);             // [72 62 52 42]
+            const __m128i T3_19A = _mm_srai_epi32(T2_19A, nShift);             // [33 23 13 03]
+            const __m128i T3_19B = _mm_srai_epi32(T2_19B, nShift);             // [73 63 53 43]
+            const __m128i T3_20A = _mm_srai_epi32(T2_20A, nShift);             // [33 24 14 04]
+            const __m128i T3_20B = _mm_srai_epi32(T2_20B, nShift);             // [74 64 54 44]
+            const __m128i T3_21A = _mm_srai_epi32(T2_21A, nShift);             // [35 25 15 05]
+            const __m128i T3_21B = _mm_srai_epi32(T2_21B, nShift);             // [75 65 55 45]
+            const __m128i T3_22A = _mm_srai_epi32(T2_22A, nShift);             // [36 26 16 06]
+            const __m128i T3_22B = _mm_srai_epi32(T2_22B, nShift);             // [76 66 56 46]
+            const __m128i T3_23A = _mm_srai_epi32(T2_23A, nShift);             // [37 27 17 07]
+            const __m128i T3_23B = _mm_srai_epi32(T2_23B, nShift);             // [77 67 57 47]
+            const __m128i T3_24A = _mm_srai_epi32(T2_24A, nShift);             // [30 20 10 00] x8
+            const __m128i T3_24B = _mm_srai_epi32(T2_24B, nShift);             // [70 60 50 40]
+            const __m128i T3_25A = _mm_srai_epi32(T2_25A, nShift);             // [31 21 11 01] x9
+            const __m128i T3_25B = _mm_srai_epi32(T2_25B, nShift);             // [71 61 51 41]
+            const __m128i T3_26A = _mm_srai_epi32(T2_26A, nShift);             // [32 22 12 02] xA
+            const __m128i T3_26B = _mm_srai_epi32(T2_26B, nShift);             // [72 62 52 42]
+            const __m128i T3_27A = _mm_srai_epi32(T2_27A, nShift);             // [33 23 13 03] xB
+            const __m128i T3_27B = _mm_srai_epi32(T2_27B, nShift);             // [73 63 53 43]
+            const __m128i T3_28A = _mm_srai_epi32(T2_28A, nShift);             // [33 24 14 04] xC
+            const __m128i T3_28B = _mm_srai_epi32(T2_28B, nShift);             // [74 64 54 44]
+            const __m128i T3_29A = _mm_srai_epi32(T2_29A, nShift);             // [35 25 15 05] xD
+            const __m128i T3_29B = _mm_srai_epi32(T2_29B, nShift);             // [75 65 55 45]
+            const __m128i T3_30A = _mm_srai_epi32(T2_30A, nShift);             // [36 26 16 06] xE
+            const __m128i T3_30B = _mm_srai_epi32(T2_30B, nShift);             // [76 66 56 46]
+            const __m128i T3_31A = _mm_srai_epi32(T2_31A, nShift);             // [37 27 17 07] xF
+            const __m128i T3_31B = _mm_srai_epi32(T2_31B, nShift);             // [77 67 57 47]
+
+            res00[part]  = _mm_packs_epi32(T3_00A, T3_00B);        // [70 60 50 40 30 20 10 00]
+            res01[part]  = _mm_packs_epi32(T3_01A, T3_01B);        // [71 61 51 41 31 21 11 01]
+            res02[part]  = _mm_packs_epi32(T3_02A, T3_02B);        // [72 62 52 42 32 22 12 02]
+            res03[part]  = _mm_packs_epi32(T3_03A, T3_03B);        // [73 63 53 43 33 23 13 03]
+            res04[part]  = _mm_packs_epi32(T3_04A, T3_04B);        // [74 64 54 44 34 24 14 04]
+            res05[part]  = _mm_packs_epi32(T3_05A, T3_05B);        // [75 65 55 45 35 25 15 05]
+            res06[part]  = _mm_packs_epi32(T3_06A, T3_06B);        // [76 66 56 46 36 26 16 06]
+            res07[part]  = _mm_packs_epi32(T3_07A, T3_07B);        // [77 67 57 47 37 27 17 07]
+            res08[part]  = _mm_packs_epi32(T3_08A, T3_08B);        // [A0 ... 80]
+            res09[part]  = _mm_packs_epi32(T3_09A, T3_09B);        // [A1 ... 81]
+            res10[part]  = _mm_packs_epi32(T3_10A, T3_10B);        // [A2 ... 82]
+            res11[part]  = _mm_packs_epi32(T3_11A, T3_11B);        // [A3 ... 83]
+            res12[part]  = _mm_packs_epi32(T3_12A, T3_12B);        // [A4 ... 84]
+            res13[part]  = _mm_packs_epi32(T3_13A, T3_13B);        // [A5 ... 85]
+            res14[part]  = _mm_packs_epi32(T3_14A, T3_14B);        // [A6 ... 86]
+            res15[part]  = _mm_packs_epi32(T3_15A, T3_15B);        // [A7 ... 87]
+            res16[part]  = _mm_packs_epi32(T3_16A, T3_16B);
+            res17[part]  = _mm_packs_epi32(T3_17A, T3_17B);
+            res18[part]  = _mm_packs_epi32(T3_18A, T3_18B);
+            res19[part]  = _mm_packs_epi32(T3_19A, T3_19B);
+            res20[part]  = _mm_packs_epi32(T3_20A, T3_20B);
+            res21[part]  = _mm_packs_epi32(T3_21A, T3_21B);
+            res22[part]  = _mm_packs_epi32(T3_22A, T3_22B);
+            res23[part]  = _mm_packs_epi32(T3_23A, T3_23B);
+            res24[part]  = _mm_packs_epi32(T3_24A, T3_24B);
+            res25[part]  = _mm_packs_epi32(T3_25A, T3_25B);
+            res26[part]  = _mm_packs_epi32(T3_26A, T3_26B);
+            res27[part]  = _mm_packs_epi32(T3_27A, T3_27B);
+            res28[part]  = _mm_packs_epi32(T3_28A, T3_28B);
+            res29[part]  = _mm_packs_epi32(T3_29A, T3_29B);
+            res30[part]  = _mm_packs_epi32(T3_30A, T3_30B);
+            res31[part]  = _mm_packs_epi32(T3_31A, T3_31B);
+        }
+        //transpose matrix 8x8 16bit.
+        {
+            __m128i tr0_0, tr0_1, tr0_2, tr0_3, tr0_4, tr0_5, tr0_6, tr0_7;
+            __m128i tr1_0, tr1_1, tr1_2, tr1_3, tr1_4, tr1_5, tr1_6, tr1_7;
+#define TRANSPOSE_8x8_16BIT(I0, I1, I2, I3, I4, I5, I6, I7, O0, O1, O2, O3, O4, O5, O6, O7) \
+    tr0_0 = _mm_unpacklo_epi16(I0, I1); \
+    tr0_1 = _mm_unpacklo_epi16(I2, I3); \
+    tr0_2 = _mm_unpackhi_epi16(I0, I1); \
+    tr0_3 = _mm_unpackhi_epi16(I2, I3); \
+    tr0_4 = _mm_unpacklo_epi16(I4, I5); \
+    tr0_5 = _mm_unpacklo_epi16(I6, I7); \
+    tr0_6 = _mm_unpackhi_epi16(I4, I5); \
+    tr0_7 = _mm_unpackhi_epi16(I6, I7); \
+    tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+    tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
+    tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+    tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
+    tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+    tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
+    tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+    tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
+    O0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+    O1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+    O2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+    O3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+    O4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
+    O5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
+    O6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
+    O7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
+
+            TRANSPOSE_8x8_16BIT(res00[0], res01[0], res02[0], res03[0], res04[0], res05[0], res06[0], res07[0], in00[0], in01[0], in02[0], in03[0], in04[0], in05[0], in06[0], in07[0])
+            TRANSPOSE_8x8_16BIT(res00[1], res01[1], res02[1], res03[1], res04[1], res05[1], res06[1], res07[1], in08[0], in09[0], in10[0], in11[0], in12[0], in13[0], in14[0], in15[0])
+            TRANSPOSE_8x8_16BIT(res00[2], res01[2], res02[2], res03[2], res04[2], res05[2], res06[2], res07[2], in16[0], in17[0], in18[0], in19[0], in20[0], in21[0], in22[0], in23[0])
+            TRANSPOSE_8x8_16BIT(res00[3], res01[3], res02[3], res03[3], res04[3], res05[3], res06[3], res07[3], in24[0], in25[0], in26[0], in27[0], in28[0], in29[0], in30[0], in31[0])
+
+            TRANSPOSE_8x8_16BIT(res08[0], res09[0], res10[0], res11[0], res12[0], res13[0], res14[0], res15[0], in00[1], in01[1], in02[1], in03[1], in04[1], in05[1], in06[1], in07[1])
+            TRANSPOSE_8x8_16BIT(res08[1], res09[1], res10[1], res11[1], res12[1], res13[1], res14[1], res15[1], in08[1], in09[1], in10[1], in11[1], in12[1], in13[1], in14[1], in15[1])
+            TRANSPOSE_8x8_16BIT(res08[2], res09[2], res10[2], res11[2], res12[2], res13[2], res14[2], res15[2], in16[1], in17[1], in18[1], in19[1], in20[1], in21[1], in22[1], in23[1])
+            TRANSPOSE_8x8_16BIT(res08[3], res09[3], res10[3], res11[3], res12[3], res13[3], res14[3], res15[3], in24[1], in25[1], in26[1], in27[1], in28[1], in29[1], in30[1], in31[1])
+
+            TRANSPOSE_8x8_16BIT(res16[0], res17[0], res18[0], res19[0], res20[0], res21[0], res22[0], res23[0], in00[2], in01[2], in02[2], in03[2], in04[2], in05[2], in06[2], in07[2])
+            TRANSPOSE_8x8_16BIT(res16[1], res17[1], res18[1], res19[1], res20[1], res21[1], res22[1], res23[1], in08[2], in09[2], in10[2], in11[2], in12[2], in13[2], in14[2], in15[2])
+            TRANSPOSE_8x8_16BIT(res16[2], res17[2], res18[2], res19[2], res20[2], res21[2], res22[2], res23[2], in16[2], in17[2], in18[2], in19[2], in20[2], in21[2], in22[2], in23[2])
+            TRANSPOSE_8x8_16BIT(res16[3], res17[3], res18[3], res19[3], res20[3], res21[3], res22[3], res23[3], in24[2], in25[2], in26[2], in27[2], in28[2], in29[2], in30[2], in31[2])
+
+            TRANSPOSE_8x8_16BIT(res24[0], res25[0], res26[0], res27[0], res28[0], res29[0], res30[0], res31[0], in00[3], in01[3], in02[3], in03[3], in04[3], in05[3], in06[3], in07[3])
+            TRANSPOSE_8x8_16BIT(res24[1], res25[1], res26[1], res27[1], res28[1], res29[1], res30[1], res31[1], in08[3], in09[3], in10[3], in11[3], in12[3], in13[3], in14[3], in15[3])
+            TRANSPOSE_8x8_16BIT(res24[2], res25[2], res26[2], res27[2], res28[2], res29[2], res30[2], res31[2], in16[3], in17[3], in18[3], in19[3], in20[3], in21[3], in22[3], in23[3])
+            TRANSPOSE_8x8_16BIT(res24[3], res25[3], res26[3], res27[3], res28[3], res29[3], res30[3], res31[3], in24[3], in25[3], in26[3], in27[3], in28[3], in29[3], in30[3], in31[3])
+
+#undef TRANSPOSE_8x8_16BIT
+        }
+    }
+
+    // Add
+    for (int i = 0; i < 2; i++)
+    {
+#define STORE_LINE(L0, L1, L2, L3, L4, L5, L6, L7, H0, H1, H2, H3, H4, H5, H6, H7, offsetV, offsetH) \
+    _mm_storeu_si128((__m128i*)&dst[(0 + (offsetV)) * stride + (offsetH) + 0], L0); \
+    _mm_storeu_si128((__m128i*)&dst[(0 + (offsetV)) * stride + (offsetH) + 8], H0); \
+    _mm_storeu_si128((__m128i*)&dst[(1 + (offsetV)) * stride + (offsetH) + 0], L1); \
+    _mm_storeu_si128((__m128i*)&dst[(1 + (offsetV)) * stride + (offsetH) + 8], H1); \
+    _mm_storeu_si128((__m128i*)&dst[(2 + (offsetV)) * stride + (offsetH) + 0], L2); \
+    _mm_storeu_si128((__m128i*)&dst[(2 + (offsetV)) * stride + (offsetH) + 8], H2); \
+    _mm_storeu_si128((__m128i*)&dst[(3 + (offsetV)) * stride + (offsetH) + 0], L3); \
+    _mm_storeu_si128((__m128i*)&dst[(3 + (offsetV)) * stride + (offsetH) + 8], H3); \
+    _mm_storeu_si128((__m128i*)&dst[(4 + (offsetV)) * stride + (offsetH) + 0], L4); \
+    _mm_storeu_si128((__m128i*)&dst[(4 + (offsetV)) * stride + (offsetH) + 8], H4); \
+    _mm_storeu_si128((__m128i*)&dst[(5 + (offsetV)) * stride + (offsetH) + 0], L5); \
+    _mm_storeu_si128((__m128i*)&dst[(5 + (offsetV)) * stride + (offsetH) + 8], H5); \
+    _mm_storeu_si128((__m128i*)&dst[(6 + (offsetV)) * stride + (offsetH) + 0], L6); \
+    _mm_storeu_si128((__m128i*)&dst[(6 + (offsetV)) * stride + (offsetH) + 8], H6); \
+    _mm_storeu_si128((__m128i*)&dst[(7 + (offsetV)) * stride + (offsetH) + 0], L7); \
+    _mm_storeu_si128((__m128i*)&dst[(7 + (offsetV)) * stride + (offsetH) + 8], H7);
+
+        const int k = i * 2;
+        STORE_LINE(in00[k], in01[k], in02[k], in03[k], in04[k], in05[k], in06[k], in07[k], in00[k + 1], in01[k + 1], in02[k + 1], in03[k + 1], in04[k + 1], in05[k + 1], in06[k + 1], in07[k + 1], 0, i * 16)
+        STORE_LINE(in08[k], in09[k], in10[k], in11[k], in12[k], in13[k], in14[k], in15[k], in08[k + 1], in09[k + 1], in10[k + 1], in11[k + 1], in12[k + 1], in13[k + 1], in14[k + 1], in15[k + 1], 8, i * 16)
+        STORE_LINE(in16[k], in17[k], in18[k], in19[k], in20[k], in21[k], in22[k], in23[k], in16[k + 1], in17[k + 1], in18[k + 1], in19[k + 1], in20[k + 1], in21[k + 1], in22[k + 1], in23[k + 1], 16, i * 16)
+        STORE_LINE(in24[k], in25[k], in26[k], in27[k], in28[k], in29[k], in30[k], in31[k], in24[k + 1], in25[k + 1], in26[k + 1], in27[k + 1], in28[k + 1], in29[k + 1], in30[k + 1], in31[k + 1], 24, i * 16)
+#undef STORE_LINE
+    }
+}
+
+namespace X265_NS {
+void setupIntrinsicDCT_sse3(EncoderPrimitives &p)
+{
+    /* Note: We have AVX2 assembly for these functions, but since AVX2 is still
+     * somewhat rare on end-user PCs we still compile and link these SSE3
+     * intrinsic SIMD functions */
+    p.cu[BLOCK_8x8].idct   = idct8;
+    p.cu[BLOCK_16x16].idct = idct16;
+    p.cu[BLOCK_32x32].idct = idct32;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/vec/dct-sse41.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,108 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include <xmmintrin.h> // SSE
+#include <smmintrin.h> // SSE4.1
+
+using namespace X265_NS;
+
+static void dequant_scaling(const int16_t* quantCoef, const int32_t *deQuantCoef, int16_t* coef, int num, int per, int shift)
+{
+    X265_CHECK(num <= 32 * 32, "dequant num too large\n");
+
+    int valueToAdd;
+
+    shift += 4;
+
+    if (shift > per)
+    {
+        valueToAdd = 1 << (shift - per - 1);
+        __m128i IAdd = _mm_set1_epi32(valueToAdd);
+
+        for (int n = 0; n < num; n = n + 8)
+        {
+            __m128i quantCoef1, quantCoef2, deQuantCoef1, deQuantCoef2, quantCoef12, sign;
+
+            quantCoef12 = _mm_loadu_si128((__m128i*)(quantCoef + n));
+
+            deQuantCoef1 = _mm_loadu_si128((__m128i*)(deQuantCoef + n));
+            deQuantCoef2 = _mm_loadu_si128((__m128i*)(deQuantCoef + n + 4));
+
+            sign = _mm_srai_epi16(quantCoef12, 15);
+            quantCoef1 = _mm_unpacklo_epi16(quantCoef12, sign);
+            quantCoef2 = _mm_unpackhi_epi16(quantCoef12, sign);
+
+            quantCoef1 = _mm_sra_epi32(_mm_add_epi32(_mm_mullo_epi32(quantCoef1, deQuantCoef1), IAdd), _mm_cvtsi32_si128(shift - per));
+            quantCoef2 = _mm_sra_epi32(_mm_add_epi32(_mm_mullo_epi32(quantCoef2, deQuantCoef2), IAdd), _mm_cvtsi32_si128(shift - per));
+
+            quantCoef12 = _mm_packs_epi32(quantCoef1, quantCoef2);
+            _mm_storeu_si128((__m128i*)(coef + n), quantCoef12);
+        }
+    }
+    else
+    {
+        for (int n = 0; n < num; n = n + 8)
+        {
+            __m128i quantCoef1, quantCoef2, deQuantCoef1, deQuantCoef2, quantCoef12, sign;
+
+            quantCoef12 = _mm_loadu_si128((__m128i*)(quantCoef + n));
+
+            deQuantCoef1 = _mm_loadu_si128((__m128i*)(deQuantCoef + n));
+            deQuantCoef2 = _mm_loadu_si128((__m128i*)(deQuantCoef + n + 4));
+
+            sign = _mm_srai_epi16(quantCoef12, 15);
+            quantCoef1 = _mm_unpacklo_epi16(quantCoef12, sign);
+            quantCoef2 = _mm_unpackhi_epi16(quantCoef12, sign);
+
+            quantCoef1 = _mm_mullo_epi32(quantCoef1, deQuantCoef1);
+            quantCoef2 = _mm_mullo_epi32(quantCoef2, deQuantCoef2);
+
+            quantCoef12 = _mm_packs_epi32(quantCoef1, quantCoef2);
+            sign = _mm_srai_epi16(quantCoef12, 15);
+            quantCoef1 = _mm_unpacklo_epi16(quantCoef12, sign);
+            quantCoef2 = _mm_unpackhi_epi16(quantCoef12, sign);
+
+            quantCoef1 = _mm_sll_epi32(quantCoef1, _mm_cvtsi32_si128(per - shift));
+            quantCoef2 = _mm_sll_epi32(quantCoef2, _mm_cvtsi32_si128(per - shift));
+
+            quantCoef12 = _mm_packs_epi32(quantCoef1, quantCoef2);
+            _mm_storeu_si128((__m128i*)(coef + n), quantCoef12);
+        }
+    }
+}
+
+namespace X265_NS {
+void setupIntrinsicDCT_sse41(EncoderPrimitives &p)
+{
+    p.dequant_scaling = dequant_scaling;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/vec/dct-ssse3.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1124 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include <xmmintrin.h> // SSE
+#include <pmmintrin.h> // SSE3
+#include <tmmintrin.h> // SSSE3
+
+#define DCT16_SHIFT1  (3 + X265_DEPTH - 8)
+#define DCT16_ADD1    (1 << ((DCT16_SHIFT1) - 1))
+
+#define DCT16_SHIFT2  10
+#define DCT16_ADD2    (1 << ((DCT16_SHIFT2) - 1))
+
+#define DCT32_SHIFT1  (DCT16_SHIFT1 + 1)
+#define DCT32_ADD1    (1 << ((DCT32_SHIFT1) - 1))
+
+#define DCT32_SHIFT2  (DCT16_SHIFT2 + 1)
+#define DCT32_ADD2    (1 << ((DCT32_SHIFT2) - 1))
+
+using namespace X265_NS;
+
+ALIGN_VAR_32(static const int16_t, tab_dct_8[][8]) =
+{
+    { 0x0100, 0x0F0E, 0x0706, 0x0908, 0x0302, 0x0D0C, 0x0504, 0x0B0A },
+
+    { 64, 64, 64, 64, 64, 64, 64, 64 },
+    { 64, -64, 64, -64, 64, -64, 64, -64 },
+    { 83, 36, 83, 36, 83, 36, 83, 36 },
+    { 36, -83, 36, -83, 36, -83, 36, -83 },
+    { 89, 18, 75, 50, 89, 18, 75, 50 },
+    { 75, -50, -18, -89, 75, -50, -18, -89 },
+    { 50, 75, -89, 18, 50, 75, -89, 18 },
+    { 18, -89, -50, 75, 18, -89, -50, 75 },
+
+    { 83, 83, -83, -83, 36, 36, -36, -36 },
+    { 36, 36, -36, -36, -83, -83, 83, 83 },
+    { 89, -89, 18, -18, 75, -75, 50, -50 },
+    { 75, -75, -50, 50, -18, 18, -89, 89 },
+    { 50, -50, 75, -75, -89, 89, 18, -18 },
+    { 18, -18, -89, 89, -50, 50, 75, -75 },
+};
+
+ALIGN_VAR_32(static const int16_t, tab_dct_16_0[][8]) =
+{
+    { 0x0F0E, 0x0D0C, 0x0B0A, 0x0908, 0x0706, 0x0504, 0x0302, 0x0100 },  // 0
+    { 0x0100, 0x0F0E, 0x0706, 0x0908, 0x0302, 0x0D0C, 0x0504, 0x0B0A },  // 1
+    { 0x0100, 0x0706, 0x0302, 0x0504, 0x0F0E, 0x0908, 0x0D0C, 0x0B0A },  // 2
+    { 0x0F0E, 0x0908, 0x0D0C, 0x0B0A, 0x0100, 0x0706, 0x0302, 0x0504 },  // 3
+};
+
+ALIGN_VAR_32(static const int16_t, tab_dct_16_1[][8]) =
+{
+    { 90, 87, 80, 70, 57, 43, 25,  9 },  //  0
+    { 87, 57,  9, -43, -80, -90, -70, -25 },  //  1
+    { 80,  9, -70, -87, -25, 57, 90, 43 },  //  2
+    { 70, -43, -87,  9, 90, 25, -80, -57 },  //  3
+    { 57, -80, -25, 90, -9, -87, 43, 70 },  //  4
+    { 43, -90, 57, 25, -87, 70,  9, -80 },  //  5
+    { 25, -70, 90, -80, 43,  9, -57, 87 },  //  6
+    {  9, -25, 43, -57, 70, -80, 87, -90 },  //  7
+    { 83, 83, -83, -83, 36, 36, -36, -36 },  //  8
+    { 36, 36, -36, -36, -83, -83, 83, 83 },  //  9
+    { 89, 89, 18, 18, 75, 75, 50, 50 },  // 10
+    { 75, 75, -50, -50, -18, -18, -89, -89 },  // 11
+    { 50, 50, 75, 75, -89, -89, 18, 18 },  // 12
+    { 18, 18, -89, -89, -50, -50, 75, 75 },  // 13
+
+#define MAKE_COEF(a0, a1, a2, a3, a4, a5, a6, a7) \
+    { (a0), -(a0), (a3), -(a3), (a1), -(a1), (a2), -(a2) \
+    }, \
+    { (a7), -(a7), (a4), -(a4), (a6), -(a6), (a5), -(a5) },
+
+    MAKE_COEF(90, 87, 80, 70, 57, 43, 25,  9)
+    MAKE_COEF(87, 57,  9, -43, -80, -90, -70, -25)
+    MAKE_COEF(80,  9, -70, -87, -25, 57, 90, 43)
+    MAKE_COEF(70, -43, -87,  9, 90, 25, -80, -57)
+    MAKE_COEF(57, -80, -25, 90, -9, -87, 43, 70)
+    MAKE_COEF(43, -90, 57, 25, -87, 70,  9, -80)
+    MAKE_COEF(25, -70, 90, -80, 43,  9, -57, 87)
+    MAKE_COEF(9, -25, 43, -57, 70, -80, 87, -90)
+#undef MAKE_COEF
+};
+
+static void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
+{
+    // Const
+    __m128i c_4     = _mm_set1_epi32(DCT16_ADD1);
+    __m128i c_512   = _mm_set1_epi32(DCT16_ADD2);
+
+    int i;
+
+    ALIGN_VAR_32(int16_t, tmp[16 * 16]);
+
+    __m128i T00A, T01A, T02A, T03A, T04A, T05A, T06A, T07A;
+    __m128i T00B, T01B, T02B, T03B, T04B, T05B, T06B, T07B;
+    __m128i T10, T11, T12, T13, T14, T15, T16, T17;
+    __m128i T20, T21, T22, T23, T24, T25, T26, T27;
+    __m128i T30, T31, T32, T33, T34, T35, T36, T37;
+    __m128i T40, T41, T42, T43, T44, T45, T46, T47;
+    __m128i T50, T51, T52, T53;
+    __m128i T60, T61, T62, T63, T64, T65, T66, T67;
+    __m128i T70;
+
+    // DCT1
+    for (i = 0; i < 16; i += 8)
+    {
+        T00A = _mm_load_si128((__m128i*)&src[(i + 0) * stride + 0]);    // [07 06 05 04 03 02 01 00]
+        T00B = _mm_load_si128((__m128i*)&src[(i + 0) * stride + 8]);    // [0F 0E 0D 0C 0B 0A 09 08]
+        T01A = _mm_load_si128((__m128i*)&src[(i + 1) * stride + 0]);    // [17 16 15 14 13 12 11 10]
+        T01B = _mm_load_si128((__m128i*)&src[(i + 1) * stride + 8]);    // [1F 1E 1D 1C 1B 1A 19 18]
+        T02A = _mm_load_si128((__m128i*)&src[(i + 2) * stride + 0]);    // [27 26 25 24 23 22 21 20]
+        T02B = _mm_load_si128((__m128i*)&src[(i + 2) * stride + 8]);    // [2F 2E 2D 2C 2B 2A 29 28]
+        T03A = _mm_load_si128((__m128i*)&src[(i + 3) * stride + 0]);    // [37 36 35 34 33 32 31 30]
+        T03B = _mm_load_si128((__m128i*)&src[(i + 3) * stride + 8]);    // [3F 3E 3D 3C 3B 3A 39 38]
+        T04A = _mm_load_si128((__m128i*)&src[(i + 4) * stride + 0]);    // [47 46 45 44 43 42 41 40]
+        T04B = _mm_load_si128((__m128i*)&src[(i + 4) * stride + 8]);    // [4F 4E 4D 4C 4B 4A 49 48]
+        T05A = _mm_load_si128((__m128i*)&src[(i + 5) * stride + 0]);    // [57 56 55 54 53 52 51 50]
+        T05B = _mm_load_si128((__m128i*)&src[(i + 5) * stride + 8]);    // [5F 5E 5D 5C 5B 5A 59 58]
+        T06A = _mm_load_si128((__m128i*)&src[(i + 6) * stride + 0]);    // [67 66 65 64 63 62 61 60]
+        T06B = _mm_load_si128((__m128i*)&src[(i + 6) * stride + 8]);    // [6F 6E 6D 6C 6B 6A 69 68]
+        T07A = _mm_load_si128((__m128i*)&src[(i + 7) * stride + 0]);    // [77 76 75 74 73 72 71 70]
+        T07B = _mm_load_si128((__m128i*)&src[(i + 7) * stride + 8]);    // [7F 7E 7D 7C 7B 7A 79 78]
+
+        T00B = _mm_shuffle_epi8(T00B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T01B = _mm_shuffle_epi8(T01B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T02B = _mm_shuffle_epi8(T02B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T03B = _mm_shuffle_epi8(T03B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T04B = _mm_shuffle_epi8(T04B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T05B = _mm_shuffle_epi8(T05B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T06B = _mm_shuffle_epi8(T06B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T07B = _mm_shuffle_epi8(T07B, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+
+        T10  = _mm_add_epi16(T00A, T00B);
+        T11  = _mm_add_epi16(T01A, T01B);
+        T12  = _mm_add_epi16(T02A, T02B);
+        T13  = _mm_add_epi16(T03A, T03B);
+        T14  = _mm_add_epi16(T04A, T04B);
+        T15  = _mm_add_epi16(T05A, T05B);
+        T16  = _mm_add_epi16(T06A, T06B);
+        T17  = _mm_add_epi16(T07A, T07B);
+
+        T20  = _mm_sub_epi16(T00A, T00B);
+        T21  = _mm_sub_epi16(T01A, T01B);
+        T22  = _mm_sub_epi16(T02A, T02B);
+        T23  = _mm_sub_epi16(T03A, T03B);
+        T24  = _mm_sub_epi16(T04A, T04B);
+        T25  = _mm_sub_epi16(T05A, T05B);
+        T26  = _mm_sub_epi16(T06A, T06B);
+        T27  = _mm_sub_epi16(T07A, T07B);
+
+        T30  = _mm_shuffle_epi8(T10, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T31  = _mm_shuffle_epi8(T11, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T32  = _mm_shuffle_epi8(T12, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T33  = _mm_shuffle_epi8(T13, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T34  = _mm_shuffle_epi8(T14, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T35  = _mm_shuffle_epi8(T15, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T36  = _mm_shuffle_epi8(T16, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T37  = _mm_shuffle_epi8(T17, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+
+        T40  = _mm_hadd_epi16(T30, T31);
+        T41  = _mm_hadd_epi16(T32, T33);
+        T42  = _mm_hadd_epi16(T34, T35);
+        T43  = _mm_hadd_epi16(T36, T37);
+        T44  = _mm_hsub_epi16(T30, T31);
+        T45  = _mm_hsub_epi16(T32, T33);
+        T46  = _mm_hsub_epi16(T34, T35);
+        T47  = _mm_hsub_epi16(T36, T37);
+
+        T50  = _mm_hadd_epi16(T40, T41);
+        T51  = _mm_hadd_epi16(T42, T43);
+        T52  = _mm_hsub_epi16(T40, T41);
+        T53  = _mm_hsub_epi16(T42, T43);
+
+        T60  = _mm_madd_epi16(T50, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T61  = _mm_madd_epi16(T51, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[0 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T50, _mm_load_si128((__m128i*)tab_dct_8[2]));
+        T61  = _mm_madd_epi16(T51, _mm_load_si128((__m128i*)tab_dct_8[2]));
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[8 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T52, _mm_load_si128((__m128i*)tab_dct_8[3]));
+        T61  = _mm_madd_epi16(T53, _mm_load_si128((__m128i*)tab_dct_8[3]));
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[4 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T52, _mm_load_si128((__m128i*)tab_dct_8[4]));
+        T61  = _mm_madd_epi16(T53, _mm_load_si128((__m128i*)tab_dct_8[4]));
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[12 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T44, _mm_load_si128((__m128i*)tab_dct_8[5]));
+        T61  = _mm_madd_epi16(T45, _mm_load_si128((__m128i*)tab_dct_8[5]));
+        T62  = _mm_madd_epi16(T46, _mm_load_si128((__m128i*)tab_dct_8[5]));
+        T63  = _mm_madd_epi16(T47, _mm_load_si128((__m128i*)tab_dct_8[5]));
+        T60  = _mm_hadd_epi32(T60, T61);
+        T61  = _mm_hadd_epi32(T62, T63);
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[2 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T44, _mm_load_si128((__m128i*)tab_dct_8[6]));
+        T61  = _mm_madd_epi16(T45, _mm_load_si128((__m128i*)tab_dct_8[6]));
+        T62  = _mm_madd_epi16(T46, _mm_load_si128((__m128i*)tab_dct_8[6]));
+        T63  = _mm_madd_epi16(T47, _mm_load_si128((__m128i*)tab_dct_8[6]));
+        T60  = _mm_hadd_epi32(T60, T61);
+        T61  = _mm_hadd_epi32(T62, T63);
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[6 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T44, _mm_load_si128((__m128i*)tab_dct_8[7]));
+        T61  = _mm_madd_epi16(T45, _mm_load_si128((__m128i*)tab_dct_8[7]));
+        T62  = _mm_madd_epi16(T46, _mm_load_si128((__m128i*)tab_dct_8[7]));
+        T63  = _mm_madd_epi16(T47, _mm_load_si128((__m128i*)tab_dct_8[7]));
+        T60  = _mm_hadd_epi32(T60, T61);
+        T61  = _mm_hadd_epi32(T62, T63);
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[10 * 16 + i], T70);
+
+        T60  = _mm_madd_epi16(T44, _mm_load_si128((__m128i*)tab_dct_8[8]));
+        T61  = _mm_madd_epi16(T45, _mm_load_si128((__m128i*)tab_dct_8[8]));
+        T62  = _mm_madd_epi16(T46, _mm_load_si128((__m128i*)tab_dct_8[8]));
+        T63  = _mm_madd_epi16(T47, _mm_load_si128((__m128i*)tab_dct_8[8]));
+        T60  = _mm_hadd_epi32(T60, T61);
+        T61  = _mm_hadd_epi32(T62, T63);
+        T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1);
+        T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1);
+        T70  = _mm_packs_epi32(T60, T61);
+        _mm_store_si128((__m128i*)&tmp[14 * 16 + i], T70);
+
+#define MAKE_ODD(tab, dstPos) \
+    T60  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T61  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T62  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T63  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T64  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T65  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T66  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T67  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T60  = _mm_hadd_epi32(T60, T61); \
+    T61  = _mm_hadd_epi32(T62, T63); \
+    T62  = _mm_hadd_epi32(T64, T65); \
+    T63  = _mm_hadd_epi32(T66, T67); \
+    T60  = _mm_hadd_epi32(T60, T61); \
+    T61  = _mm_hadd_epi32(T62, T63); \
+    T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), DCT16_SHIFT1); \
+    T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), DCT16_SHIFT1); \
+    T70  = _mm_packs_epi32(T60, T61); \
+    _mm_store_si128((__m128i*)&tmp[(dstPos) * 16 + i], T70);
+
+        MAKE_ODD(0, 1);
+        MAKE_ODD(1, 3);
+        MAKE_ODD(2, 5);
+        MAKE_ODD(3, 7);
+        MAKE_ODD(4, 9);
+        MAKE_ODD(5, 11);
+        MAKE_ODD(6, 13);
+        MAKE_ODD(7, 15);
+#undef MAKE_ODD
+    }
+
+    // DCT2
+    for (i = 0; i < 16; i += 4)
+    {
+        T00A = _mm_load_si128((__m128i*)&tmp[(i + 0) * 16 + 0]);    // [07 06 05 04 03 02 01 00]
+        T00B = _mm_load_si128((__m128i*)&tmp[(i + 0) * 16 + 8]);    // [0F 0E 0D 0C 0B 0A 09 08]
+        T01A = _mm_load_si128((__m128i*)&tmp[(i + 1) * 16 + 0]);    // [17 16 15 14 13 12 11 10]
+        T01B = _mm_load_si128((__m128i*)&tmp[(i + 1) * 16 + 8]);    // [1F 1E 1D 1C 1B 1A 19 18]
+        T02A = _mm_load_si128((__m128i*)&tmp[(i + 2) * 16 + 0]);    // [27 26 25 24 23 22 21 20]
+        T02B = _mm_load_si128((__m128i*)&tmp[(i + 2) * 16 + 8]);    // [2F 2E 2D 2C 2B 2A 29 28]
+        T03A = _mm_load_si128((__m128i*)&tmp[(i + 3) * 16 + 0]);    // [37 36 35 34 33 32 31 30]
+        T03B = _mm_load_si128((__m128i*)&tmp[(i + 3) * 16 + 8]);    // [3F 3E 3D 3C 3B 3A 39 38]
+
+        T00A = _mm_shuffle_epi8(T00A, _mm_load_si128((__m128i*)tab_dct_16_0[2]));
+        T00B = _mm_shuffle_epi8(T00B, _mm_load_si128((__m128i*)tab_dct_16_0[3]));
+        T01A = _mm_shuffle_epi8(T01A, _mm_load_si128((__m128i*)tab_dct_16_0[2]));
+        T01B = _mm_shuffle_epi8(T01B, _mm_load_si128((__m128i*)tab_dct_16_0[3]));
+        T02A = _mm_shuffle_epi8(T02A, _mm_load_si128((__m128i*)tab_dct_16_0[2]));
+        T02B = _mm_shuffle_epi8(T02B, _mm_load_si128((__m128i*)tab_dct_16_0[3]));
+        T03A = _mm_shuffle_epi8(T03A, _mm_load_si128((__m128i*)tab_dct_16_0[2]));
+        T03B = _mm_shuffle_epi8(T03B, _mm_load_si128((__m128i*)tab_dct_16_0[3]));
+
+        T10  = _mm_unpacklo_epi16(T00A, T00B);
+        T11  = _mm_unpackhi_epi16(T00A, T00B);
+        T12  = _mm_unpacklo_epi16(T01A, T01B);
+        T13  = _mm_unpackhi_epi16(T01A, T01B);
+        T14  = _mm_unpacklo_epi16(T02A, T02B);
+        T15  = _mm_unpackhi_epi16(T02A, T02B);
+        T16  = _mm_unpacklo_epi16(T03A, T03B);
+        T17  = _mm_unpackhi_epi16(T03A, T03B);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_8[1]));
+
+        T30  = _mm_add_epi32(T20, T21);
+        T31  = _mm_add_epi32(T22, T23);
+        T32  = _mm_add_epi32(T24, T25);
+        T33  = _mm_add_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T41  = _mm_hsub_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T41  = _mm_srai_epi32(_mm_add_epi32(T41, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        T41  = _mm_packs_epi32(T41, T41);
+        _mm_storel_epi64((__m128i*)&dst[0 * 16 + i], T40);
+        _mm_storel_epi64((__m128i*)&dst[8 * 16 + i], T41);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+
+        T30  = _mm_add_epi32(T20, T21);
+        T31  = _mm_add_epi32(T22, T23);
+        T32  = _mm_add_epi32(T24, T25);
+        T33  = _mm_add_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[4 * 16 + i], T40);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+
+        T30  = _mm_add_epi32(T20, T21);
+        T31  = _mm_add_epi32(T22, T23);
+        T32  = _mm_add_epi32(T24, T25);
+        T33  = _mm_add_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[12 * 16 + i], T40);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[10]));
+
+        T30  = _mm_sub_epi32(T20, T21);
+        T31  = _mm_sub_epi32(T22, T23);
+        T32  = _mm_sub_epi32(T24, T25);
+        T33  = _mm_sub_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[2 * 16 + i], T40);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[11]));
+
+        T30  = _mm_sub_epi32(T20, T21);
+        T31  = _mm_sub_epi32(T22, T23);
+        T32  = _mm_sub_epi32(T24, T25);
+        T33  = _mm_sub_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[6 * 16 + i], T40);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[12]));
+
+        T30  = _mm_sub_epi32(T20, T21);
+        T31  = _mm_sub_epi32(T22, T23);
+        T32  = _mm_sub_epi32(T24, T25);
+        T33  = _mm_sub_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[10 * 16 + i], T40);
+
+        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[13]));
+
+        T30  = _mm_sub_epi32(T20, T21);
+        T31  = _mm_sub_epi32(T22, T23);
+        T32  = _mm_sub_epi32(T24, T25);
+        T33  = _mm_sub_epi32(T26, T27);
+
+        T30  = _mm_hadd_epi32(T30, T31);
+        T31  = _mm_hadd_epi32(T32, T33);
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2);
+        T40  = _mm_packs_epi32(T40, T40);
+        _mm_storel_epi64((__m128i*)&dst[14 * 16 + i], T40);
+
+#define MAKE_ODD(tab, dstPos) \
+    T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)]));       /* [*O2_0 *O1_0 *O3_0 *O0_0] */ \
+    T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1]));   /* [*O5_0 *O6_0 *O4_0 *O7_0] */ \
+    T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
+    T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
+    T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
+    T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
+        \
+    T30  = _mm_add_epi32(T20, T21); \
+    T31  = _mm_add_epi32(T22, T23); \
+    T32  = _mm_add_epi32(T24, T25); \
+    T33  = _mm_add_epi32(T26, T27); \
+        \
+    T30  = _mm_hadd_epi32(T30, T31); \
+    T31  = _mm_hadd_epi32(T32, T33); \
+        \
+    T40  = _mm_hadd_epi32(T30, T31); \
+    T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), DCT16_SHIFT2); \
+    T40  = _mm_packs_epi32(T40, T40); \
+    _mm_storel_epi64((__m128i*)&dst[(dstPos) * 16 + i], T40);
+
+        MAKE_ODD(14,  1);
+        MAKE_ODD(16,  3);
+        MAKE_ODD(18,  5);
+        MAKE_ODD(20,  7);
+        MAKE_ODD(22,  9);
+        MAKE_ODD(24, 11);
+        MAKE_ODD(26, 13);
+        MAKE_ODD(28, 15);
+#undef MAKE_ODD
+    }
+}
+
+ALIGN_VAR_32(static const int16_t, tab_dct_32_0[][8]) =
+{
+    { 0x0F0E, 0x0100, 0x0908, 0x0706, 0x0D0C, 0x0302, 0x0B0A, 0x0504 },  // 0
+};
+
+ALIGN_VAR_32(static const int16_t, tab_dct_32_1[][8]) =
+{
+    { 89, -89, 18, -18, 75, -75, 50, -50 },          //  0
+    { 75, -75, -50, 50, -18, 18, -89, 89 },          //  1
+    { 50, -50, 75, -75, -89, 89, 18, -18 },          //  2
+    { 18, -18, -89, 89, -50, 50, 75, -75 },          //  3
+
+#define MAKE_COEF8(a0, a1, a2, a3, a4, a5, a6, a7) \
+    { (a0), (a7), (a3), (a4), (a1), (a6), (a2), (a5) \
+    }, \
+
+    MAKE_COEF8(90, 87, 80, 70, 57, 43, 25,  9)   //  4
+    MAKE_COEF8(87, 57,  9, -43, -80, -90, -70, -25)   //  5
+    MAKE_COEF8(80,  9, -70, -87, -25, 57, 90, 43)   //  6
+    MAKE_COEF8(70, -43, -87,  9, 90, 25, -80, -57)   //  7
+    MAKE_COEF8(57, -80, -25, 90, -9, -87, 43, 70)   //  8
+    MAKE_COEF8(43, -90, 57, 25, -87, 70,  9, -80)   //  9
+    MAKE_COEF8(25, -70, 90, -80, 43,  9, -57, 87)   // 10
+    MAKE_COEF8(9, -25, 43, -57, 70, -80, 87, -90)   // 11
+#undef MAKE_COEF8
+
+#define MAKE_COEF16(a00, a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15) \
+    { (a00), (a07), (a03), (a04), (a01), (a06), (a02), (a05) }, \
+    { (a15), (a08), (a12), (a11), (a14), (a09), (a13), (a10) },
+
+    MAKE_COEF16(90, 90, 88, 85, 82, 78, 73, 67, 61, 54, 46, 38, 31, 22, 13,  4)    // 12
+    MAKE_COEF16(90, 82, 67, 46, 22, -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13)    // 14
+    MAKE_COEF16(88, 67, 31, -13, -54, -82, -90, -78, -46, -4, 38, 73, 90, 85, 61, 22)    // 16
+    MAKE_COEF16(85, 46, -13, -67, -90, -73, -22, 38, 82, 88, 54, -4, -61, -90, -78, -31)    // 18
+    MAKE_COEF16(82, 22, -54, -90, -61, 13, 78, 85, 31, -46, -90, -67,  4, 73, 88, 38)    // 20
+    MAKE_COEF16(78, -4, -82, -73, 13, 85, 67, -22, -88, -61, 31, 90, 54, -38, -90, -46)    // 22
+    MAKE_COEF16(73, -31, -90, -22, 78, 67, -38, -90, -13, 82, 61, -46, -88, -4, 85, 54)    // 24
+    MAKE_COEF16(67, -54, -78, 38, 85, -22, -90,  4, 90, 13, -88, -31, 82, 46, -73, -61)    // 26
+    MAKE_COEF16(61, -73, -46, 82, 31, -88, -13, 90, -4, -90, 22, 85, -38, -78, 54, 67)    // 28
+    MAKE_COEF16(54, -85, -4, 88, -46, -61, 82, 13, -90, 38, 67, -78, -22, 90, -31, -73)    // 30
+    MAKE_COEF16(46, -90, 38, 54, -90, 31, 61, -88, 22, 67, -85, 13, 73, -82,  4, 78)    // 32
+    MAKE_COEF16(38, -88, 73, -4, -67, 90, -46, -31, 85, -78, 13, 61, -90, 54, 22, -82)    // 34
+    MAKE_COEF16(31, -78, 90, -61,  4, 54, -88, 82, -38, -22, 73, -90, 67, -13, -46, 85)    // 36
+    MAKE_COEF16(22, -61, 85, -90, 73, -38, -4, 46, -78, 90, -82, 54, -13, -31, 67, -88)    // 38
+    MAKE_COEF16(13, -38, 61, -78, 88, -90, 85, -73, 54, -31,  4, 22, -46, 67, -82, 90)    // 40
+    MAKE_COEF16(4, -13, 22, -31, 38, -46, 54, -61, 67, -73, 78, -82, 85, -88, 90, -90)    // 42
+#undef MAKE_COEF16
+
+    {
+        64, 64, 64, 64, 64, 64, 64, 64
+    },                                  // 44
+
+    { 64, 64, -64, -64, -64, -64, 64, 64 },  // 45
+
+    { 83, 83, 36, 36, -36, -36, -83, -83 },  // 46
+    { -83, -83, -36, -36, 36, 36, 83, 83 },  // 47
+
+    { 36, 36, -83, -83, 83, 83, -36, -36 },  // 48
+    { -36, -36, 83, 83, -83, -83, 36, 36 },  // 49
+
+#define MAKE_COEF16(a00, a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15) \
+    { (a00), (a00), (a01), (a01), (a02), (a02), (a03), (a03) }, \
+    { (a04), (a04), (a05), (a05), (a06), (a06), (a07), (a07) }, \
+    { (a08), (a08), (a09), (a09), (a10), (a10), (a11), (a11) }, \
+    { (a12), (a12), (a13), (a13), (a14), (a14), (a15), (a15) },
+
+    MAKE_COEF16(89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89) // 50
+    MAKE_COEF16(75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75) // 54
+
+    // TODO: convert below table here
+#undef MAKE_COEF16
+
+    {
+        50, 50, -89, -89, 18, 18, 75, 75
+    },                                  // 58
+    { -75, -75, -18, -18, 89, 89, -50, -50 },  // 59
+    { -50, -50, 89, 89, -18, -18, -75, -75 },  // 60
+    { 75, 75, 18, 18, -89, -89, 50, 50 },  // 61
+
+    { 18, 18, -50, -50, 75, 75, -89, -89 },  // 62
+    { 89, 89, -75, -75, 50, 50, -18, -18 },  // 63
+    { -18, -18, 50, 50, -75, -75, 89, 89 },  // 64
+    { -89, -89, 75, 75, -50, -50, 18, 18 },  // 65
+
+    { 90, 90, 87, 87, 80, 80, 70, 70 },  // 66
+    { 57, 57, 43, 43, 25, 25,  9,  9 },  // 67
+    { -9, -9, -25, -25, -43, -43, -57, -57 },  // 68
+    { -70, -70, -80, -80, -87, -87, -90, -90 },  // 69
+
+    { 87, 87, 57, 57,  9,  9, -43, -43 },  // 70
+    { -80, -80, -90, -90, -70, -70, -25, -25 },  // 71
+    { 25, 25, 70, 70, 90, 90, 80, 80 },  // 72
+    { 43, 43, -9, -9, -57, -57, -87, -87 },  // 73
+
+    { 80, 80,  9,  9, -70, -70, -87, -87 },  // 74
+    { -25, -25, 57, 57, 90, 90, 43, 43 },  // 75
+    { -43, -43, -90, -90, -57, -57, 25, 25 },  // 76
+    { 87, 87, 70, 70, -9, -9, -80, -80 },  // 77
+
+    { 70, 70, -43, -43, -87, -87,  9,  9 },  // 78
+    { 90, 90, 25, 25, -80, -80, -57, -57 },  // 79
+    { 57, 57, 80, 80, -25, -25, -90, -90 },  // 80
+    { -9, -9, 87, 87, 43, 43, -70, -70 },  // 81
+
+    { 57, 57, -80, -80, -25, -25, 90, 90 },  // 82
+    { -9, -9, -87, -87, 43, 43, 70, 70 },  // 83
+    { -70, -70, -43, -43, 87, 87,  9,  9 },  // 84
+    { -90, -90, 25, 25, 80, 80, -57, -57 },  // 85
+
+    { 43, 43, -90, -90, 57, 57, 25, 25 },  // 86
+    { -87, -87, 70, 70,  9,  9, -80, -80 },  // 87
+    { 80, 80, -9, -9, -70, -70, 87, 87 },  // 88
+    { -25, -25, -57, -57, 90, 90, -43, -43 },  // 89
+
+    { 25, 25, -70, -70, 90, 90, -80, -80 },  // 90
+    { 43, 43,  9,  9, -57, -57, 87, 87 },  // 91
+    { -87, -87, 57, 57, -9, -9, -43, -43 },  // 92
+    { 80, 80, -90, -90, 70, 70, -25, -25 },  // 93
+
+    {  9,  9, -25, -25, 43, 43, -57, -57 },  // 94
+    { 70, 70, -80, -80, 87, 87, -90, -90 },  // 95
+    { 90, 90, -87, -87, 80, 80, -70, -70 },  // 96
+    { 57, 57, -43, -43, 25, 25, -9, -9 },  // 97
+
+#define MAKE_COEF16(a00, a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15) \
+    { (a00), -(a00), (a01), -(a01), (a02), -(a02), (a03), -(a03) }, \
+    { (a04), -(a04), (a05), -(a05), (a06), -(a06), (a07), -(a07) }, \
+    { (a08), -(a08), (a09), -(a09), (a10), -(a10), (a11), -(a11) }, \
+    { (a12), -(a12), (a13), -(a13), (a14), -(a14), (a15), -(a15) },
+
+    MAKE_COEF16(90, 90, 88, 85, 82, 78, 73, 67, 61, 54, 46, 38, 31, 22, 13, 4)    // 98
+    MAKE_COEF16(90, 82, 67, 46, 22, -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13)     //102
+    MAKE_COEF16(88, 67, 31, -13, -54, -82, -90, -78, -46, -4, 38, 73, 90, 85, 61, 22)     //106
+    MAKE_COEF16(85, 46, -13, -67, -90, -73, -22, 38, +82, 88, 54, -4, -61, -90, -78, -31)     //110
+    MAKE_COEF16(82, 22, -54, -90, -61, 13, 78, 85, +31, -46, -90, -67,  4, 73, 88, 38)     //114
+    MAKE_COEF16(78, -4, -82, -73, 13, 85, 67, -22, -88, -61, 31, 90, 54, -38, -90, -46)     //118
+    MAKE_COEF16(73, -31, -90, -22, 78, 67, -38, -90, -13, 82, 61, -46, -88, -4, 85, 54)     //122
+    MAKE_COEF16(67, -54, -78, 38, 85, -22, -90,  4, +90, 13, -88, -31, 82, 46, -73, -61)     //126
+    MAKE_COEF16(61, -73, -46, 82, 31, -88, -13, 90, -4, -90, 22, 85, -38, -78, 54, 67)     //130
+    MAKE_COEF16(54, -85, -4, 88, -46, -61, 82, 13, -90, 38, 67, -78, -22, 90, -31, -73)     //134
+    MAKE_COEF16(46, -90, 38, 54, -90, 31, 61, -88, +22, 67, -85, 13, 73, -82,  4, 78)     //138
+    MAKE_COEF16(38, -88, 73, -4, -67, 90, -46, -31, +85, -78, 13, 61, -90, 54, 22, -82)     //142
+    MAKE_COEF16(31, -78, 90, -61,  4, 54, -88, 82, -38, -22, 73, -90, 67, -13, -46, 85)     //146
+    MAKE_COEF16(22, -61, 85, -90, 73, -38, -4, 46, -78, 90, -82, 54, -13, -31, 67, -88)     //150
+    MAKE_COEF16(13, -38, 61, -78, 88, -90, 85, -73, +54, -31,  4, 22, -46, 67, -82, 90)     //154
+    MAKE_COEF16(4, -13, 22, -31, 38, -46, 54, -61, +67, -73, 78, -82, 85, -88, 90, -90)     //158
+
+#undef MAKE_COEF16
+};
+
+static void dct32(const int16_t *src, int16_t *dst, intptr_t stride)
+{
+    // Const
+    __m128i c_8     = _mm_set1_epi32(DCT32_ADD1);
+    __m128i c_1024  = _mm_set1_epi32(DCT32_ADD2);
+
+    int i;
+
+    __m128i T00A, T01A, T02A, T03A, T04A, T05A, T06A, T07A;
+    __m128i T00B, T01B, T02B, T03B, T04B, T05B, T06B, T07B;
+    __m128i T00C, T01C, T02C, T03C, T04C, T05C, T06C, T07C;
+    __m128i T00D, T01D, T02D, T03D, T04D, T05D, T06D, T07D;
+    __m128i T10A, T11A, T12A, T13A, T14A, T15A, T16A, T17A;
+    __m128i T10B, T11B, T12B, T13B, T14B, T15B, T16B, T17B;
+    __m128i T20, T21, T22, T23, T24, T25, T26, T27;
+    __m128i T30, T31, T32, T33, T34, T35, T36, T37;
+    __m128i T40, T41, T42, T43, T44, T45, T46, T47;
+    __m128i T50, T51, T52, T53;
+    __m128i T60, T61, T62, T63, T64, T65, T66, T67;
+    __m128i im[32][4];
+
+    // DCT1
+    for (i = 0; i < 32 / 8; i++)
+    {
+        T00A = _mm_load_si128((__m128i*)&src[(i * 8 + 0) * stride + 0]);    // [07 06 05 04 03 02 01 00]
+        T00B = _mm_load_si128((__m128i*)&src[(i * 8 + 0) * stride + 8]);    // [15 14 13 12 11 10 09 08]
+        T00C = _mm_load_si128((__m128i*)&src[(i * 8 + 0) * stride + 16]);    // [23 22 21 20 19 18 17 16]
+        T00D = _mm_load_si128((__m128i*)&src[(i * 8 + 0) * stride + 24]);    // [31 30 29 28 27 26 25 24]
+        T01A = _mm_load_si128((__m128i*)&src[(i * 8 + 1) * stride + 0]);
+        T01B = _mm_load_si128((__m128i*)&src[(i * 8 + 1) * stride + 8]);
+        T01C = _mm_load_si128((__m128i*)&src[(i * 8 + 1) * stride + 16]);
+        T01D = _mm_load_si128((__m128i*)&src[(i * 8 + 1) * stride + 24]);
+        T02A = _mm_load_si128((__m128i*)&src[(i * 8 + 2) * stride + 0]);
+        T02B = _mm_load_si128((__m128i*)&src[(i * 8 + 2) * stride + 8]);
+        T02C = _mm_load_si128((__m128i*)&src[(i * 8 + 2) * stride + 16]);
+        T02D = _mm_load_si128((__m128i*)&src[(i * 8 + 2) * stride + 24]);
+        T03A = _mm_load_si128((__m128i*)&src[(i * 8 + 3) * stride + 0]);
+        T03B = _mm_load_si128((__m128i*)&src[(i * 8 + 3) * stride + 8]);
+        T03C = _mm_load_si128((__m128i*)&src[(i * 8 + 3) * stride + 16]);
+        T03D = _mm_load_si128((__m128i*)&src[(i * 8 + 3) * stride + 24]);
+        T04A = _mm_load_si128((__m128i*)&src[(i * 8 + 4) * stride + 0]);
+        T04B = _mm_load_si128((__m128i*)&src[(i * 8 + 4) * stride + 8]);
+        T04C = _mm_load_si128((__m128i*)&src[(i * 8 + 4) * stride + 16]);
+        T04D = _mm_load_si128((__m128i*)&src[(i * 8 + 4) * stride + 24]);
+        T05A = _mm_load_si128((__m128i*)&src[(i * 8 + 5) * stride + 0]);
+        T05B = _mm_load_si128((__m128i*)&src[(i * 8 + 5) * stride + 8]);
+        T05C = _mm_load_si128((__m128i*)&src[(i * 8 + 5) * stride + 16]);
+        T05D = _mm_load_si128((__m128i*)&src[(i * 8 + 5) * stride + 24]);
+        T06A = _mm_load_si128((__m128i*)&src[(i * 8 + 6) * stride + 0]);
+        T06B = _mm_load_si128((__m128i*)&src[(i * 8 + 6) * stride + 8]);
+        T06C = _mm_load_si128((__m128i*)&src[(i * 8 + 6) * stride + 16]);
+        T06D = _mm_load_si128((__m128i*)&src[(i * 8 + 6) * stride + 24]);
+        T07A = _mm_load_si128((__m128i*)&src[(i * 8 + 7) * stride + 0]);
+        T07B = _mm_load_si128((__m128i*)&src[(i * 8 + 7) * stride + 8]);
+        T07C = _mm_load_si128((__m128i*)&src[(i * 8 + 7) * stride + 16]);
+        T07D = _mm_load_si128((__m128i*)&src[(i * 8 + 7) * stride + 24]);
+
+        T00A = _mm_shuffle_epi8(T00A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));    // [05 02 06 01 04 03 07 00]
+        T00B = _mm_shuffle_epi8(T00B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));    // [10 13 09 14 11 12 08 15]
+        T00C = _mm_shuffle_epi8(T00C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));    // [21 18 22 17 20 19 23 16]
+        T00D = _mm_shuffle_epi8(T00D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));    // [26 29 25 30 27 28 24 31]
+        T01A = _mm_shuffle_epi8(T01A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T01B = _mm_shuffle_epi8(T01B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T01C = _mm_shuffle_epi8(T01C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T01D = _mm_shuffle_epi8(T01D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T02A = _mm_shuffle_epi8(T02A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T02B = _mm_shuffle_epi8(T02B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T02C = _mm_shuffle_epi8(T02C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T02D = _mm_shuffle_epi8(T02D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T03A = _mm_shuffle_epi8(T03A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T03B = _mm_shuffle_epi8(T03B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T03C = _mm_shuffle_epi8(T03C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T03D = _mm_shuffle_epi8(T03D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T04A = _mm_shuffle_epi8(T04A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T04B = _mm_shuffle_epi8(T04B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T04C = _mm_shuffle_epi8(T04C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T04D = _mm_shuffle_epi8(T04D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T05A = _mm_shuffle_epi8(T05A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T05B = _mm_shuffle_epi8(T05B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T05C = _mm_shuffle_epi8(T05C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T05D = _mm_shuffle_epi8(T05D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T06A = _mm_shuffle_epi8(T06A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T06B = _mm_shuffle_epi8(T06B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T06C = _mm_shuffle_epi8(T06C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T06D = _mm_shuffle_epi8(T06D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T07A = _mm_shuffle_epi8(T07A, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T07B = _mm_shuffle_epi8(T07B, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+        T07C = _mm_shuffle_epi8(T07C, _mm_load_si128((__m128i*)tab_dct_16_0[1]));
+        T07D = _mm_shuffle_epi8(T07D, _mm_load_si128((__m128i*)tab_dct_32_0[0]));
+
+        T10A = _mm_add_epi16(T00A, T00D);   // [E05 E02 E06 E01 E04 E03 E07 E00]
+        T10B = _mm_add_epi16(T00B, T00C);   // [E10 E13 E09 E14 E11 E12 E08 E15]
+        T11A = _mm_add_epi16(T01A, T01D);
+        T11B = _mm_add_epi16(T01B, T01C);
+        T12A = _mm_add_epi16(T02A, T02D);
+        T12B = _mm_add_epi16(T02B, T02C);
+        T13A = _mm_add_epi16(T03A, T03D);
+        T13B = _mm_add_epi16(T03B, T03C);
+        T14A = _mm_add_epi16(T04A, T04D);
+        T14B = _mm_add_epi16(T04B, T04C);
+        T15A = _mm_add_epi16(T05A, T05D);
+        T15B = _mm_add_epi16(T05B, T05C);
+        T16A = _mm_add_epi16(T06A, T06D);
+        T16B = _mm_add_epi16(T06B, T06C);
+        T17A = _mm_add_epi16(T07A, T07D);
+        T17B = _mm_add_epi16(T07B, T07C);
+
+        T00A = _mm_sub_epi16(T00A, T00D);   // [O05 O02 O06 O01 O04 O03 O07 O00]
+        T00B = _mm_sub_epi16(T00B, T00C);   // [O10 O13 O09 O14 O11 O12 O08 O15]
+        T01A = _mm_sub_epi16(T01A, T01D);
+        T01B = _mm_sub_epi16(T01B, T01C);
+        T02A = _mm_sub_epi16(T02A, T02D);
+        T02B = _mm_sub_epi16(T02B, T02C);
+        T03A = _mm_sub_epi16(T03A, T03D);
+        T03B = _mm_sub_epi16(T03B, T03C);
+        T04A = _mm_sub_epi16(T04A, T04D);
+        T04B = _mm_sub_epi16(T04B, T04C);
+        T05A = _mm_sub_epi16(T05A, T05D);
+        T05B = _mm_sub_epi16(T05B, T05C);
+        T06A = _mm_sub_epi16(T06A, T06D);
+        T06B = _mm_sub_epi16(T06B, T06C);
+        T07A = _mm_sub_epi16(T07A, T07D);
+        T07B = _mm_sub_epi16(T07B, T07C);
+
+        T20  = _mm_add_epi16(T10A, T10B);   // [EE5 EE2 EE6 EE1 EE4 EE3 EE7 EE0]
+        T21  = _mm_add_epi16(T11A, T11B);
+        T22  = _mm_add_epi16(T12A, T12B);
+        T23  = _mm_add_epi16(T13A, T13B);
+        T24  = _mm_add_epi16(T14A, T14B);
+        T25  = _mm_add_epi16(T15A, T15B);
+        T26  = _mm_add_epi16(T16A, T16B);
+        T27  = _mm_add_epi16(T17A, T17B);
+
+        T30  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T31  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T32  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T33  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T34  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T35  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T36  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_8[1]));
+        T37  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_8[1]));
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T41  = _mm_hadd_epi32(T32, T33);
+        T42  = _mm_hadd_epi32(T34, T35);
+        T43  = _mm_hadd_epi32(T36, T37);
+
+        T50  = _mm_hadd_epi32(T40, T41);
+        T51  = _mm_hadd_epi32(T42, T43);
+        T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1);
+        T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1);
+        T60  = _mm_packs_epi32(T50, T51);
+        im[0][i] = T60;
+
+        T50  = _mm_hsub_epi32(T40, T41);
+        T51  = _mm_hsub_epi32(T42, T43);
+        T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1);
+        T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1);
+        T60  = _mm_packs_epi32(T50, T51);
+        im[16][i] = T60;
+
+        T30  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T31  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T32  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T33  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T34  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T35  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T36  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+        T37  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_16_1[8]));
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T41  = _mm_hadd_epi32(T32, T33);
+        T42  = _mm_hadd_epi32(T34, T35);
+        T43  = _mm_hadd_epi32(T36, T37);
+
+        T50  = _mm_hadd_epi32(T40, T41);
+        T51  = _mm_hadd_epi32(T42, T43);
+        T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1);
+        T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1);
+        T60  = _mm_packs_epi32(T50, T51);
+        im[8][i] = T60;
+
+        T30  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T31  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T32  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T33  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T34  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T35  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T36  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+        T37  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_16_1[9]));
+
+        T40  = _mm_hadd_epi32(T30, T31);
+        T41  = _mm_hadd_epi32(T32, T33);
+        T42  = _mm_hadd_epi32(T34, T35);
+        T43  = _mm_hadd_epi32(T36, T37);
+
+        T50  = _mm_hadd_epi32(T40, T41);
+        T51  = _mm_hadd_epi32(T42, T43);
+        T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1);
+        T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1);
+        T60  = _mm_packs_epi32(T50, T51);
+        im[24][i] = T60;
+
+#define MAKE_ODD(tab, dstPos) \
+    T30  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T31  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T32  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T33  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T34  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T35  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T36  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T37  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+        \
+    T40  = _mm_hadd_epi32(T30, T31); \
+    T41  = _mm_hadd_epi32(T32, T33); \
+    T42  = _mm_hadd_epi32(T34, T35); \
+    T43  = _mm_hadd_epi32(T36, T37); \
+        \
+    T50  = _mm_hadd_epi32(T40, T41); \
+    T51  = _mm_hadd_epi32(T42, T43); \
+    T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1); \
+    T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1); \
+    T60  = _mm_packs_epi32(T50, T51); \
+    im[(dstPos)][i] = T60;
+
+        MAKE_ODD(0, 4);
+        MAKE_ODD(1, 12);
+        MAKE_ODD(2, 20);
+        MAKE_ODD(3, 28);
+
+        T20  = _mm_sub_epi16(T10A, T10B);   // [EO5 EO2 EO6 EO1 EO4 EO3 EO7 EO0]
+        T21  = _mm_sub_epi16(T11A, T11B);
+        T22  = _mm_sub_epi16(T12A, T12B);
+        T23  = _mm_sub_epi16(T13A, T13B);
+        T24  = _mm_sub_epi16(T14A, T14B);
+        T25  = _mm_sub_epi16(T15A, T15B);
+        T26  = _mm_sub_epi16(T16A, T16B);
+        T27  = _mm_sub_epi16(T17A, T17B);
+
+        MAKE_ODD(4, 2);
+        MAKE_ODD(5, 6);
+        MAKE_ODD(6, 10);
+        MAKE_ODD(7, 14);
+        MAKE_ODD(8, 18);
+        MAKE_ODD(9, 22);
+        MAKE_ODD(10, 26);
+        MAKE_ODD(11, 30);
+#undef MAKE_ODD
+
+#define MAKE_ODD(tab, dstPos) \
+    T20  = _mm_madd_epi16(T00A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T21  = _mm_madd_epi16(T00B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T22  = _mm_madd_epi16(T01A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T23  = _mm_madd_epi16(T01B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T24  = _mm_madd_epi16(T02A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T25  = _mm_madd_epi16(T02B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T26  = _mm_madd_epi16(T03A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T27  = _mm_madd_epi16(T03B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T30  = _mm_madd_epi16(T04A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T31  = _mm_madd_epi16(T04B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T32  = _mm_madd_epi16(T05A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T33  = _mm_madd_epi16(T05B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T34  = _mm_madd_epi16(T06A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T35  = _mm_madd_epi16(T06B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+    T36  = _mm_madd_epi16(T07A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab)])); \
+    T37  = _mm_madd_epi16(T07B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab) + 1])); \
+        \
+    T40  = _mm_hadd_epi32(T20, T21); \
+    T41  = _mm_hadd_epi32(T22, T23); \
+    T42  = _mm_hadd_epi32(T24, T25); \
+    T43  = _mm_hadd_epi32(T26, T27); \
+    T44  = _mm_hadd_epi32(T30, T31); \
+    T45  = _mm_hadd_epi32(T32, T33); \
+    T46  = _mm_hadd_epi32(T34, T35); \
+    T47  = _mm_hadd_epi32(T36, T37); \
+        \
+    T50  = _mm_hadd_epi32(T40, T41); \
+    T51  = _mm_hadd_epi32(T42, T43); \
+    T52  = _mm_hadd_epi32(T44, T45); \
+    T53  = _mm_hadd_epi32(T46, T47); \
+        \
+    T50  = _mm_hadd_epi32(T50, T51); \
+    T51  = _mm_hadd_epi32(T52, T53); \
+    T50  = _mm_srai_epi32(_mm_add_epi32(T50, c_8), DCT32_SHIFT1); \
+    T51  = _mm_srai_epi32(_mm_add_epi32(T51, c_8), DCT32_SHIFT1); \
+    T60  = _mm_packs_epi32(T50, T51); \
+    im[(dstPos)][i] = T60;
+
+        MAKE_ODD(12,  1);
+        MAKE_ODD(14,  3);
+        MAKE_ODD(16,  5);
+        MAKE_ODD(18,  7);
+        MAKE_ODD(20,  9);
+        MAKE_ODD(22, 11);
+        MAKE_ODD(24, 13);
+        MAKE_ODD(26, 15);
+        MAKE_ODD(28, 17);
+        MAKE_ODD(30, 19);
+        MAKE_ODD(32, 21);
+        MAKE_ODD(34, 23);
+        MAKE_ODD(36, 25);
+        MAKE_ODD(38, 27);
+        MAKE_ODD(40, 29);
+        MAKE_ODD(42, 31);
+
+#undef MAKE_ODD
+    }
+
+    // DCT2
+    for (i = 0; i < 32 / 4; i++)
+    {
+        // OPT_ME: to avoid register spill, I use matrix multiply, have other way?
+        T00A = im[i * 4 + 0][0];    // [07 06 05 04 03 02 01 00]
+        T00B = im[i * 4 + 0][1];    // [15 14 13 12 11 10 09 08]
+        T00C = im[i * 4 + 0][2];    // [23 22 21 20 19 18 17 16]
+        T00D = im[i * 4 + 0][3];    // [31 30 29 28 27 26 25 24]
+        T01A = im[i * 4 + 1][0];
+        T01B = im[i * 4 + 1][1];
+        T01C = im[i * 4 + 1][2];
+        T01D = im[i * 4 + 1][3];
+        T02A = im[i * 4 + 2][0];
+        T02B = im[i * 4 + 2][1];
+        T02C = im[i * 4 + 2][2];
+        T02D = im[i * 4 + 2][3];
+        T03A = im[i * 4 + 3][0];
+        T03B = im[i * 4 + 3][1];
+        T03C = im[i * 4 + 3][2];
+        T03D = im[i * 4 + 3][3];
+
+        T00C = _mm_shuffle_epi8(T00C, _mm_load_si128((__m128i*)tab_dct_16_0[0]));    // [16 17 18 19 20 21 22 23]
+        T00D = _mm_shuffle_epi8(T00D, _mm_load_si128((__m128i*)tab_dct_16_0[0]));    // [24 25 26 27 28 29 30 31]
+        T01C = _mm_shuffle_epi8(T01C, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T01D = _mm_shuffle_epi8(T01D, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T02C = _mm_shuffle_epi8(T02C, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T02D = _mm_shuffle_epi8(T02D, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T03C = _mm_shuffle_epi8(T03C, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+        T03D = _mm_shuffle_epi8(T03D, _mm_load_si128((__m128i*)tab_dct_16_0[0]));
+
+        T10A = _mm_unpacklo_epi16(T00A, T00D);  // [28 03 29 02 30 01 31 00]
+        T10B = _mm_unpackhi_epi16(T00A, T00D);  // [24 07 25 06 26 05 27 04]
+        T00A = _mm_unpacklo_epi16(T00B, T00C);  // [20 11 21 10 22 09 23 08]
+        T00B = _mm_unpackhi_epi16(T00B, T00C);  // [16 15 17 14 18 13 19 12]
+        T11A = _mm_unpacklo_epi16(T01A, T01D);
+        T11B = _mm_unpackhi_epi16(T01A, T01D);
+        T01A = _mm_unpacklo_epi16(T01B, T01C);
+        T01B = _mm_unpackhi_epi16(T01B, T01C);
+        T12A = _mm_unpacklo_epi16(T02A, T02D);
+        T12B = _mm_unpackhi_epi16(T02A, T02D);
+        T02A = _mm_unpacklo_epi16(T02B, T02C);
+        T02B = _mm_unpackhi_epi16(T02B, T02C);
+        T13A = _mm_unpacklo_epi16(T03A, T03D);
+        T13B = _mm_unpackhi_epi16(T03A, T03D);
+        T03A = _mm_unpacklo_epi16(T03B, T03C);
+        T03B = _mm_unpackhi_epi16(T03B, T03C);
+
+#define MAKE_ODD(tab0, tab1, tab2, tab3, dstPos) \
+    T20  = _mm_madd_epi16(T10A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab0)])); \
+    T21  = _mm_madd_epi16(T10B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab1)])); \
+    T22  = _mm_madd_epi16(T00A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab2)])); \
+    T23  = _mm_madd_epi16(T00B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab3)])); \
+    T24  = _mm_madd_epi16(T11A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab0)])); \
+    T25  = _mm_madd_epi16(T11B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab1)])); \
+    T26  = _mm_madd_epi16(T01A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab2)])); \
+    T27  = _mm_madd_epi16(T01B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab3)])); \
+    T30  = _mm_madd_epi16(T12A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab0)])); \
+    T31  = _mm_madd_epi16(T12B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab1)])); \
+    T32  = _mm_madd_epi16(T02A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab2)])); \
+    T33  = _mm_madd_epi16(T02B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab3)])); \
+    T34  = _mm_madd_epi16(T13A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab0)])); \
+    T35  = _mm_madd_epi16(T13B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab1)])); \
+    T36  = _mm_madd_epi16(T03A, _mm_load_si128((__m128i*)tab_dct_32_1[(tab2)])); \
+    T37  = _mm_madd_epi16(T03B, _mm_load_si128((__m128i*)tab_dct_32_1[(tab3)])); \
+        \
+    T60  = _mm_hadd_epi32(T20, T21); \
+    T61  = _mm_hadd_epi32(T22, T23); \
+    T62  = _mm_hadd_epi32(T24, T25); \
+    T63  = _mm_hadd_epi32(T26, T27); \
+    T64  = _mm_hadd_epi32(T30, T31); \
+    T65  = _mm_hadd_epi32(T32, T33); \
+    T66  = _mm_hadd_epi32(T34, T35); \
+    T67  = _mm_hadd_epi32(T36, T37); \
+        \
+    T60  = _mm_hadd_epi32(T60, T61); \
+    T61  = _mm_hadd_epi32(T62, T63); \
+    T62  = _mm_hadd_epi32(T64, T65); \
+    T63  = _mm_hadd_epi32(T66, T67); \
+        \
+    T60  = _mm_hadd_epi32(T60, T61); \
+    T61  = _mm_hadd_epi32(T62, T63); \
+        \
+    T60  = _mm_hadd_epi32(T60, T61); \
+        \
+    T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_1024), DCT32_SHIFT2); \
+    T60  = _mm_packs_epi32(T60, T60); \
+    _mm_storel_epi64((__m128i*)&dst[(dstPos) * 32 + (i * 4) + 0], T60); \
+
+        MAKE_ODD(44, 44, 44, 44,  0);
+        MAKE_ODD(45, 45, 45, 45, 16);
+        MAKE_ODD(46, 47, 46, 47,  8);
+        MAKE_ODD(48, 49, 48, 49, 24);
+
+        MAKE_ODD(50, 51, 52, 53,  4);
+        MAKE_ODD(54, 55, 56, 57, 12);
+        MAKE_ODD(58, 59, 60, 61, 20);
+        MAKE_ODD(62, 63, 64, 65, 28);
+
+        MAKE_ODD(66, 67, 68, 69,  2);
+        MAKE_ODD(70, 71, 72, 73,  6);
+        MAKE_ODD(74, 75, 76, 77, 10);
+        MAKE_ODD(78, 79, 80, 81, 14);
+
+        MAKE_ODD(82, 83, 84, 85, 18);
+        MAKE_ODD(86, 87, 88, 89, 22);
+        MAKE_ODD(90, 91, 92, 93, 26);
+        MAKE_ODD(94, 95, 96, 97, 30);
+
+        MAKE_ODD(98, 99, 100, 101,  1);
+        MAKE_ODD(102, 103, 104, 105,  3);
+        MAKE_ODD(106, 107, 108, 109,  5);
+        MAKE_ODD(110, 111, 112, 113,  7);
+        MAKE_ODD(114, 115, 116, 117,  9);
+        MAKE_ODD(118, 119, 120, 121, 11);
+        MAKE_ODD(122, 123, 124, 125, 13);
+        MAKE_ODD(126, 127, 128, 129, 15);
+        MAKE_ODD(130, 131, 132, 133, 17);
+        MAKE_ODD(134, 135, 136, 137, 19);
+        MAKE_ODD(138, 139, 140, 141, 21);
+        MAKE_ODD(142, 143, 144, 145, 23);
+        MAKE_ODD(146, 147, 148, 149, 25);
+        MAKE_ODD(150, 151, 152, 153, 27);
+        MAKE_ODD(154, 155, 156, 157, 29);
+        MAKE_ODD(158, 159, 160, 161, 31);
+#undef MAKE_ODD
+    }
+}
+
+namespace X265_NS {
+void setupIntrinsicDCT_ssse3(EncoderPrimitives &p)
+{
+    /* Note: We have AVX2 assembly for these two functions, but since AVX2 is
+     * still somewhat rare on end-user PCs we still compile and link these SSSE3
+     * intrinsic SIMD functions */
+    p.cu[BLOCK_16x16].dct = dct16;
+    p.cu[BLOCK_32x32].dct = dct32;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/vec/vec-primitives.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,85 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "primitives.h"
+#include "x265.h"
+
+/* The #if logic here must match the file lists in CMakeLists.txt */
+#if X265_ARCH_X86
+#if defined(__INTEL_COMPILER)
+#define HAVE_SSE3
+#define HAVE_SSSE3
+#define HAVE_SSE4
+#define HAVE_AVX2
+#elif defined(__GNUC__)
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if __clang__ || GCC_VERSION >= 40300 /* gcc_version >= gcc-4.3.0 */
+#define HAVE_SSE3
+#define HAVE_SSSE3
+#define HAVE_SSE4
+#endif
+#if __clang__ || GCC_VERSION >= 40700 /* gcc_version >= gcc-4.7.0 */
+#define HAVE_AVX2
+#endif
+#elif defined(_MSC_VER)
+#define HAVE_SSE3
+#define HAVE_SSSE3
+#define HAVE_SSE4
+#if _MSC_VER >= 1700 // VC11
+#define HAVE_AVX2
+#endif
+#endif // compiler checks
+#endif // if X265_ARCH_X86
+
+namespace X265_NS {
+// private x265 namespace
+
+void setupIntrinsicDCT_sse3(EncoderPrimitives&);
+void setupIntrinsicDCT_ssse3(EncoderPrimitives&);
+void setupIntrinsicDCT_sse41(EncoderPrimitives&);
+
+/* Use primitives for the best available vector architecture */
+void setupInstrinsicPrimitives(EncoderPrimitives &p, int cpuMask)
+{
+#ifdef HAVE_SSE3
+    if (cpuMask & X265_CPU_SSE3)
+    {
+        setupIntrinsicDCT_sse3(p);
+    }
+#endif
+#ifdef HAVE_SSSE3
+    if (cpuMask & X265_CPU_SSSE3)
+    {
+        setupIntrinsicDCT_ssse3(p);
+    }
+#endif
+#ifdef HAVE_SSE4
+    if (cpuMask & X265_CPU_SSE4)
+    {
+        setupIntrinsicDCT_sse41(p);
+    }
+#endif
+    (void)p;
+    (void)cpuMask;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/version.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,131 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "x265.h"
+#include "common.h"
+#include "primitives.h"
+
+#define XSTR(x) STR(x)
+#define STR(x) #x
+
+#if defined(__clang__)
+#define COMPILEDBY  "[clang " XSTR(__clang_major__) "." XSTR(__clang_minor__) "." XSTR(__clang_patchlevel__) "]"
+#ifdef __IA64__
+#define ONARCH    "[on 64-bit] "
+#else
+#define ONARCH    "[on 32-bit] "
+#endif
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+#define COMPILEDBY  "[GCC " XSTR(__GNUC__) "." XSTR(__GNUC_MINOR__) "." XSTR(__GNUC_PATCHLEVEL__) "]"
+#ifdef __IA64__
+#define ONARCH    "[on 64-bit] "
+#else
+#define ONARCH    "[on 32-bit] "
+#endif
+#endif
+
+#ifdef __INTEL_COMPILER
+#define COMPILEDBY "[ICC " XSTR(__INTEL_COMPILER) "]"
+#elif  _MSC_VER
+#define COMPILEDBY "[MSVC " XSTR(_MSC_VER) "]"
+#endif
+
+#ifndef COMPILEDBY
+#define COMPILEDBY "[Unk-CXX]"
+#endif
+
+#ifdef _WIN32
+#define ONOS    "[Windows]"
+#elif  __linux
+#define ONOS    "[Linux]"
+#elif __OpenBSD__
+#define ONOS    "[OpenBSD]"
+#elif  __CYGWIN__
+#define ONOS    "[Cygwin]"
+#elif __APPLE__
+#define ONOS    "[Mac OS X]"
+#else
+#define ONOS    "[Unk-OS]"
+#endif
+
+#if X86_64
+#define BITS    "[64 bit]"
+#else
+#define BITS    "[32 bit]"
+#endif
+
+#if defined(ENABLE_ASSEMBLY)
+#define ASM     ""
+#else
+#define ASM     "[noasm]"
+#endif
+ 
+#if NO_ATOMICS
+#define ATOMICS "[no-atomics]"
+#else
+#define ATOMICS ""
+#endif
+
+#if CHECKED_BUILD
+#define CHECKED "[CHECKED] "
+#else
+#define CHECKED " "
+#endif
+
+#if X265_DEPTH == 12
+
+#define BITDEPTH "12bit"
+const int PFX(max_bit_depth) = 12;
+
+#elif X265_DEPTH == 10
+
+#define BITDEPTH "10bit"
+const int PFX(max_bit_depth) = 10;
+
+#elif X265_DEPTH == 8
+
+#define BITDEPTH "8bit"
+const int PFX(max_bit_depth) = 8;
+
+#endif
+
+#if LINKED_8BIT
+#define ADD8 "+8bit"
+#else
+#define ADD8 ""
+#endif
+#if LINKED_10BIT
+#define ADD10 "+10bit"
+#else
+#define ADD10 ""
+#endif
+#if LINKED_12BIT
+#define ADD12 "+12bit"
+#else
+#define ADD12 ""
+#endif
+
+const char* PFX(version_str) = XSTR(X265_VERSION);
+const char* PFX(build_info_str) = ONOS COMPILEDBY BITS ASM ATOMICS CHECKED BITDEPTH ADD8 ADD10 ADD12;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/wavefront.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,110 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#include "threadpool.h"
+#include "threading.h"
+#include "wavefront.h"
+#include "common.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+bool WaveFront::init(int numRows)
+{
+    m_numRows = numRows;
+
+    m_numWords = (numRows + 31) >> 5;
+    m_internalDependencyBitmap = X265_MALLOC(uint32_t, m_numWords);
+    if (m_internalDependencyBitmap)
+        memset((void*)m_internalDependencyBitmap, 0, sizeof(uint32_t) * m_numWords);
+
+    m_externalDependencyBitmap = X265_MALLOC(uint32_t, m_numWords);
+    if (m_externalDependencyBitmap)
+        memset((void*)m_externalDependencyBitmap, 0, sizeof(uint32_t) * m_numWords);
+
+    return m_internalDependencyBitmap && m_externalDependencyBitmap;
+}
+
+WaveFront::~WaveFront()
+{
+    x265_free((void*)m_internalDependencyBitmap);
+    x265_free((void*)m_externalDependencyBitmap);
+}
+
+void WaveFront::clearEnabledRowMask()
+{
+    memset((void*)m_externalDependencyBitmap, 0, sizeof(uint32_t) * m_numWords);
+    memset((void*)m_internalDependencyBitmap, 0, sizeof(uint32_t) * m_numWords);
+}
+
+void WaveFront::enqueueRow(int row)
+{
+    uint32_t bit = 1 << (row & 31);
+    ATOMIC_OR(&m_internalDependencyBitmap[row >> 5], bit);
+}
+
+void WaveFront::enableRow(int row)
+{
+    uint32_t bit = 1 << (row & 31);
+    ATOMIC_OR(&m_externalDependencyBitmap[row >> 5], bit);
+}
+
+void WaveFront::enableAllRows()
+{
+    memset((void*)m_externalDependencyBitmap, ~0, sizeof(uint32_t) * m_numWords);
+}
+
+bool WaveFront::dequeueRow(int row)
+{
+    uint32_t bit = 1 << (row & 31);
+    return !!(ATOMIC_AND(&m_internalDependencyBitmap[row >> 5], ~bit) & bit);
+}
+
+void WaveFront::findJob(int threadId)
+{
+    unsigned long id;
+
+    /* Loop over each word until all available rows are finished */
+    for (int w = 0; w < m_numWords; w++)
+    {
+        uint32_t oldval = m_internalDependencyBitmap[w] & m_externalDependencyBitmap[w];
+        while (oldval)
+        {
+            CTZ(id, oldval);
+
+            uint32_t bit = 1 << id;
+            if (ATOMIC_AND(&m_internalDependencyBitmap[w], ~bit) & bit)
+            {
+                /* we cleared the bit, we get to process the row */
+                processRow(w * 32 + id, threadId);
+                m_helpWanted = true;
+                return; /* check for a higher priority task */
+            }
+
+            oldval = m_internalDependencyBitmap[w] & m_externalDependencyBitmap[w];
+        }
+    }
+
+    m_helpWanted = false;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/wavefront.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,97 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#ifndef X265_WAVEFRONT_H
+#define X265_WAVEFRONT_H
+
+#include "common.h"
+#include "threadpool.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+// Generic wave-front scheduler, manages busy-state of CU rows as a priority
+// queue (higher CU rows have priority over lower rows)
+//
+// Derived classes must implement ProcessRow().
+class WaveFront : public JobProvider
+{
+private:
+
+    // bitmaps of rows queued for processing, uses atomic intrinsics
+
+    // Dependencies are categorized as internal and external. Internal dependencies
+    // are caused by neighbor block availability.  External dependencies are generally
+    // reference frame reconstructed pixels being available.
+    uint32_t volatile *m_internalDependencyBitmap;
+    uint32_t volatile *m_externalDependencyBitmap;
+
+    // number of words in the bitmap
+    int m_numWords;
+
+    int m_numRows;
+
+public:
+
+    WaveFront()
+        : m_internalDependencyBitmap(NULL)
+        , m_externalDependencyBitmap(NULL)
+    {}
+
+    virtual ~WaveFront();
+
+    // If returns false, the frame must be encoded in series.
+    bool init(int numRows);
+
+    // Enqueue a row to be processed (mark its internal dependencies as resolved).
+    // A worker thread will later call processRow(row).
+    // This provider must be enqueued in the pool before enqueuing a row
+    void enqueueRow(int row);
+
+    // Mark a row as no longer having internal dependencies resolved. Returns
+    // true if bit clear was successful, false otherwise.
+    bool dequeueRow(int row);
+
+    // Mark the row's external dependencies as being resolved
+    void enableRow(int row);
+
+    // Mark all row external dependencies as being resolved. Some wavefront
+    // implementations (lookahead, for instance) have no recon pixel dependencies.
+    void enableAllRows();
+
+    // Mark all rows as having external dependencies which must be
+    // resolved before each row may proceed.
+    void clearEnabledRowMask();
+
+    // WaveFront's implementation of JobProvider::findJob. Consults
+    // m_queuedBitmap and calls ProcessRow(row) for lowest numbered queued row
+    // processes available rows and returns when no work remains
+    void findJob(int threadId);
+
+    // Start or resume encode processing of this row, must be implemented by
+    // derived classes.
+    virtual void processRow(int row, int threadId) = 0;
+};
+} // end namespace X265_NS
+
+#endif // ifndef X265_WAVEFRONT_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/winxp.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,130 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#include "threading.h"
+
+#if defined(_WIN32) && (_WIN32_WINNT < 0x0600) // _WIN32_WINNT_VISTA
+
+namespace X265_NS {
+/* Mimic CONDITION_VARIABLE functions only supported on Vista+ */
+
+int WINAPI cond_init(ConditionVariable *cond)
+{ // InitializeConditionVariable
+    cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
+    if (!cond->semaphore)
+        return -1;
+    cond->waitersDone = CreateEvent(NULL, FALSE, FALSE, NULL);
+    if (!cond->waitersDone)
+        return -1;
+
+    InitializeCriticalSection(&cond->waiterCountMutex);
+    InitializeCriticalSection(&cond->broadcastMutex);
+    cond->waiterCount = 0;
+    cond->bIsBroadcast = false;
+
+    return 0;
+}
+
+void WINAPI cond_broadcast(ConditionVariable *cond)
+{ // WakeAllConditionVariable
+    EnterCriticalSection(&cond->broadcastMutex);
+    EnterCriticalSection(&cond->waiterCountMutex);
+    int haveWaiter = 0;
+
+    if (cond->waiterCount)
+    {
+        cond->bIsBroadcast = 1;
+        haveWaiter = 1;
+    }
+
+    if (haveWaiter)
+    {
+        ReleaseSemaphore(cond->semaphore, cond->waiterCount, NULL);
+        LeaveCriticalSection(&cond->waiterCountMutex);
+        WaitForSingleObject(cond->waitersDone, INFINITE);
+        cond->bIsBroadcast = 0;
+    }
+    else
+        LeaveCriticalSection(&cond->waiterCountMutex);
+
+    LeaveCriticalSection(&cond->broadcastMutex);
+}
+
+void WINAPI cond_signal(ConditionVariable *cond)
+{ // WakeConditionVariable
+    EnterCriticalSection(&cond->broadcastMutex);
+    EnterCriticalSection(&cond->waiterCountMutex);
+    int haveWaiter = cond->waiterCount;
+    LeaveCriticalSection(&cond->waiterCountMutex);
+
+    if (haveWaiter)
+    {
+        ReleaseSemaphore(cond->semaphore, 1, NULL);
+        WaitForSingleObject(cond->waitersDone, INFINITE);
+    }
+
+    LeaveCriticalSection(&cond->broadcastMutex);
+}
+
+BOOL WINAPI cond_wait(ConditionVariable *cond, CRITICAL_SECTION *mutex, DWORD wait)
+{ // SleepConditionVariableCS
+    EnterCriticalSection(&cond->broadcastMutex);
+    EnterCriticalSection(&cond->waiterCountMutex);
+    cond->waiterCount++;
+    LeaveCriticalSection(&cond->waiterCountMutex);
+    LeaveCriticalSection(&cond->broadcastMutex);
+
+    // unlock the external mutex
+    LeaveCriticalSection(mutex);
+    BOOL ret = WaitForSingleObject(cond->semaphore, wait);
+
+    EnterCriticalSection(&cond->waiterCountMutex);
+    cond->waiterCount--;
+    int last_waiter = !cond->waiterCount || !cond->bIsBroadcast;
+    LeaveCriticalSection(&cond->waiterCountMutex);
+
+    if (last_waiter)
+        SetEvent(cond->waitersDone);
+
+    // lock the external mutex
+    EnterCriticalSection(mutex);
+
+    // returns false on timeout or error
+    return ret;
+}
+
+/* Native CONDITION_VARIABLE instances are not freed, so this is a special case */
+void cond_destroy(ConditionVariable *cond)
+{
+    CloseHandle(cond->semaphore);
+    CloseHandle(cond->waitersDone);
+    DeleteCriticalSection(&cond->broadcastMutex);
+    DeleteCriticalSection(&cond->waiterCountMutex);
+}
+} // namespace X265_NS
+
+#elif defined(_MSC_VER)
+
+namespace { int _avoid_linker_warnings = 0; }
+
+#endif // _WIN32_WINNT <= _WIN32_WINNT_WINXP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/winxp.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,67 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com
+ *****************************************************************************/
+
+#ifndef X265_WINXP_H
+#define X265_WINXP_H
+
+#if defined(_WIN32) && (_WIN32_WINNT < 0x0600) // _WIN32_WINNT_VISTA
+
+#ifdef _MSC_VER
+#include <intrin.h> // _InterlockedCompareExchange64
+#endif
+
+namespace X265_NS {
+/* non-native condition variable */
+typedef struct
+{
+    CRITICAL_SECTION broadcastMutex;
+    CRITICAL_SECTION waiterCountMutex;
+    HANDLE semaphore;
+    HANDLE waitersDone;
+    volatile int waiterCount;
+    volatile int bIsBroadcast;
+} ConditionVariable;
+
+int WINAPI cond_init(ConditionVariable *cond);
+void WINAPI cond_broadcast(ConditionVariable *cond);
+void WINAPI cond_signal(ConditionVariable *cond);
+BOOL WINAPI cond_wait(ConditionVariable *cond, CRITICAL_SECTION *mutex, DWORD wait);
+void cond_destroy(ConditionVariable *cond);
+
+/* map missing API symbols to our structure and functions */
+#define CONDITION_VARIABLE          X265_NS::ConditionVariable
+#define InitializeConditionVariable X265_NS::cond_init
+#define SleepConditionVariableCS    X265_NS::cond_wait
+#define WakeConditionVariable       X265_NS::cond_signal
+#define WakeAllConditionVariable    X265_NS::cond_broadcast
+#define XP_CONDITION_VAR_FREE       X265_NS::cond_destroy
+
+} // namespace X265_NS
+
+#else // if defined(_WIN32) && (_WIN32_WINNT < 0x0600)
+
+#define XP_CONDITION_VAR_FREE(x)
+
+#endif // _WIN32_WINNT <= _WIN32_WINNT_WINXP
+
+#endif // ifndef X265_WINXP_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/README.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,14 @@
+The ASM source here is directly pulled from the x264 project with two
+changes:
+
+1 - FENC_STRIDE must be increased to 64 in x86util.asm because of HEVC's
+    larger CU sizes
+2 - Because of #1, we must rebrand the functions with x265_ prefixes in
+    x86inc.asm (private_prefix) and pixel-a.asm (mangle(x265_pixel_ssd))
+3 - We have modified the MMX SSD primitives to use EMMS before returning
+4 - We have added some new SATD block sizes for SSE3
+
+Current assembly is based on x264 revision:
+   configure: Support cygwin64
+   Diogo Franco (Kovensky) <diogomfranco@gmail.com>
+   2013-07-23 22:17:44 -0300
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/asm-primitives.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3727 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "x265.h"
+#include "cpu.h"
+
+#define FUNCDEF_TU(ret, name, cpu, ...) \
+    ret PFX(name ## _4x4_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _8x8_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _16x16_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _32x32_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _64x64_ ## cpu(__VA_ARGS__))
+
+#define FUNCDEF_TU_S(ret, name, cpu, ...) \
+    ret PFX(name ## _4_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _8_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _16_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _32_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## _64_ ## cpu(__VA_ARGS__))
+
+#define FUNCDEF_TU_S2(ret, name, cpu, ...) \
+    ret PFX(name ## 4_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## 8_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## 16_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## 32_ ## cpu(__VA_ARGS__)); \
+    ret PFX(name ## 64_ ## cpu(__VA_ARGS__))
+
+#define FUNCDEF_PU(ret, name, cpu, ...) \
+    ret PFX(name ## _4x4_   ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x8_   ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x64_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x4_   ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _4x8_   ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x8_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x16_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x64_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x12_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _12x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x4_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _4x16_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x24_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _24x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x8_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x32_  ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x48_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _48x64_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x64_ ## cpu)(__VA_ARGS__)
+
+#define FUNCDEF_CHROMA_PU(ret, name, cpu, ...) \
+    FUNCDEF_PU(ret, name, cpu, __VA_ARGS__); \
+    ret PFX(name ## _4x2_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _2x4_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x2_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _2x8_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x6_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _6x8_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x12_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _12x8_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _6x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x6_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _2x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x2_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _4x12_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _12x4_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x12_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _12x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x4_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _4x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _32x48_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _48x32_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _16x24_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _24x16_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _8x64_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x8_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _64x24_ ## cpu)(__VA_ARGS__); \
+    ret PFX(name ## _24x64_ ## cpu)(__VA_ARGS__);
+
+extern "C" {
+#include "pixel.h"
+#include "pixel-util.h"
+#include "mc.h"
+#include "ipfilter8.h"
+#include "loopfilter.h"
+#include "blockcopy8.h"
+#include "intrapred.h"
+#include "dct8.h"
+}
+
+#define ALL_LUMA_CU_TYPED(prim, fncdef, fname, cpu) \
+    p.cu[BLOCK_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.cu[BLOCK_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.cu[BLOCK_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.cu[BLOCK_64x64].prim = fncdef PFX(fname ## _64x64_ ## cpu)
+#define ALL_LUMA_CU_TYPED_S(prim, fncdef, fname, cpu) \
+    p.cu[BLOCK_8x8].prim   = fncdef PFX(fname ## 8_ ## cpu); \
+    p.cu[BLOCK_16x16].prim = fncdef PFX(fname ## 16_ ## cpu); \
+    p.cu[BLOCK_32x32].prim = fncdef PFX(fname ## 32_ ## cpu); \
+    p.cu[BLOCK_64x64].prim = fncdef PFX(fname ## 64_ ## cpu)
+#define ALL_LUMA_TU_TYPED(prim, fncdef, fname, cpu) \
+    p.cu[BLOCK_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.cu[BLOCK_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.cu[BLOCK_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.cu[BLOCK_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu)
+#define ALL_LUMA_TU_TYPED_S(prim, fncdef, fname, cpu) \
+    p.cu[BLOCK_4x4].prim   = fncdef PFX(fname ## 4_ ## cpu); \
+    p.cu[BLOCK_8x8].prim   = fncdef PFX(fname ## 8_ ## cpu); \
+    p.cu[BLOCK_16x16].prim = fncdef PFX(fname ## 16_ ## cpu); \
+    p.cu[BLOCK_32x32].prim = fncdef PFX(fname ## 32_ ## cpu)
+#define ALL_LUMA_BLOCKS_TYPED(prim, fncdef, fname, cpu) \
+    p.cu[BLOCK_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.cu[BLOCK_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.cu[BLOCK_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.cu[BLOCK_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.cu[BLOCK_64x64].prim = fncdef PFX(fname ## _64x64_ ## cpu);
+#define ALL_LUMA_CU(prim, fname, cpu)      ALL_LUMA_CU_TYPED(prim, , fname, cpu)
+#define ALL_LUMA_CU_S(prim, fname, cpu)    ALL_LUMA_CU_TYPED_S(prim, , fname, cpu)
+#define ALL_LUMA_TU(prim, fname, cpu)      ALL_LUMA_TU_TYPED(prim, , fname, cpu)
+#define ALL_LUMA_BLOCKS(prim, fname, cpu)  ALL_LUMA_BLOCKS_TYPED(prim, , fname, cpu)
+#define ALL_LUMA_TU_S(prim, fname, cpu)    ALL_LUMA_TU_TYPED_S(prim, , fname, cpu)
+
+#define ALL_LUMA_PU_TYPED(prim, fncdef, fname, cpu) \
+    p.pu[LUMA_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.pu[LUMA_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.pu[LUMA_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.pu[LUMA_64x64].prim = fncdef PFX(fname ## _64x64_ ## cpu); \
+    p.pu[LUMA_8x4].prim   = fncdef PFX(fname ## _8x4_ ## cpu); \
+    p.pu[LUMA_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.pu[LUMA_16x8].prim  = fncdef PFX(fname ## _16x8_ ## cpu); \
+    p.pu[LUMA_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.pu[LUMA_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.pu[LUMA_32x16].prim = fncdef PFX(fname ## _32x16_ ## cpu); \
+    p.pu[LUMA_64x32].prim = fncdef PFX(fname ## _64x32_ ## cpu); \
+    p.pu[LUMA_32x64].prim = fncdef PFX(fname ## _32x64_ ## cpu); \
+    p.pu[LUMA_16x12].prim = fncdef PFX(fname ## _16x12_ ## cpu); \
+    p.pu[LUMA_12x16].prim = fncdef PFX(fname ## _12x16_ ## cpu); \
+    p.pu[LUMA_16x4].prim  = fncdef PFX(fname ## _16x4_ ## cpu); \
+    p.pu[LUMA_4x16].prim  = fncdef PFX(fname ## _4x16_ ## cpu); \
+    p.pu[LUMA_32x24].prim = fncdef PFX(fname ## _32x24_ ## cpu); \
+    p.pu[LUMA_24x32].prim = fncdef PFX(fname ## _24x32_ ## cpu); \
+    p.pu[LUMA_32x8].prim  = fncdef PFX(fname ## _32x8_ ## cpu); \
+    p.pu[LUMA_8x32].prim  = fncdef PFX(fname ## _8x32_ ## cpu); \
+    p.pu[LUMA_64x48].prim = fncdef PFX(fname ## _64x48_ ## cpu); \
+    p.pu[LUMA_48x64].prim = fncdef PFX(fname ## _48x64_ ## cpu); \
+    p.pu[LUMA_64x16].prim = fncdef PFX(fname ## _64x16_ ## cpu); \
+    p.pu[LUMA_16x64].prim = fncdef PFX(fname ## _16x64_ ## cpu)
+#define ALL_LUMA_PU(prim, fname, cpu) ALL_LUMA_PU_TYPED(prim, , fname, cpu)
+
+#define ALL_LUMA_PU_T(prim, fname) \
+    p.pu[LUMA_8x8].prim   = fname<LUMA_8x8>; \
+    p.pu[LUMA_16x16].prim = fname<LUMA_16x16>; \
+    p.pu[LUMA_32x32].prim = fname<LUMA_32x32>; \
+    p.pu[LUMA_64x64].prim = fname<LUMA_64x64>; \
+    p.pu[LUMA_8x4].prim   = fname<LUMA_8x4>; \
+    p.pu[LUMA_4x8].prim   = fname<LUMA_4x8>; \
+    p.pu[LUMA_16x8].prim  = fname<LUMA_16x8>; \
+    p.pu[LUMA_8x16].prim  = fname<LUMA_8x16>; \
+    p.pu[LUMA_16x32].prim = fname<LUMA_16x32>; \
+    p.pu[LUMA_32x16].prim = fname<LUMA_32x16>; \
+    p.pu[LUMA_64x32].prim = fname<LUMA_64x32>; \
+    p.pu[LUMA_32x64].prim = fname<LUMA_32x64>; \
+    p.pu[LUMA_16x12].prim = fname<LUMA_16x12>; \
+    p.pu[LUMA_12x16].prim = fname<LUMA_12x16>; \
+    p.pu[LUMA_16x4].prim  = fname<LUMA_16x4>; \
+    p.pu[LUMA_4x16].prim  = fname<LUMA_4x16>; \
+    p.pu[LUMA_32x24].prim = fname<LUMA_32x24>; \
+    p.pu[LUMA_24x32].prim = fname<LUMA_24x32>; \
+    p.pu[LUMA_32x8].prim  = fname<LUMA_32x8>; \
+    p.pu[LUMA_8x32].prim  = fname<LUMA_8x32>; \
+    p.pu[LUMA_64x48].prim = fname<LUMA_64x48>; \
+    p.pu[LUMA_48x64].prim = fname<LUMA_48x64>; \
+    p.pu[LUMA_64x16].prim = fname<LUMA_64x16>; \
+    p.pu[LUMA_16x64].prim = fname<LUMA_16x64>
+
+#define ALL_CHROMA_420_CU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu)
+#define ALL_CHROMA_420_CU_TYPED_S(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].prim   = fncdef PFX(fname ## _4_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].prim   = fncdef PFX(fname ## _8_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].prim = fncdef PFX(fname ## _16_ ## cpu); \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].prim = fncdef PFX(fname ## _32_ ## cpu)
+#define ALL_CHROMA_420_CU(prim, fname, cpu) ALL_CHROMA_420_CU_TYPED(prim, , fname, cpu)
+#define ALL_CHROMA_420_CU_S(prim, fname, cpu) ALL_CHROMA_420_CU_TYPED_S(prim, , fname, cpu)
+
+#define ALL_CHROMA_420_PU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].prim   = fncdef PFX(fname ## _4x2_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].prim   = fncdef PFX(fname ## _2x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].prim   = fncdef PFX(fname ## _8x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].prim  = fncdef PFX(fname ## _16x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].prim = fncdef PFX(fname ## _32x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].prim   = fncdef PFX(fname ## _8x6_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].prim   = fncdef PFX(fname ## _6x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].prim   = fncdef PFX(fname ## _8x2_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].prim   = fncdef PFX(fname ## _2x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].prim = fncdef PFX(fname ## _16x12_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].prim = fncdef PFX(fname ## _12x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].prim  = fncdef PFX(fname ## _16x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].prim  = fncdef PFX(fname ## _4x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].prim = fncdef PFX(fname ## _32x24_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].prim = fncdef PFX(fname ## _24x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].prim  = fncdef PFX(fname ## _32x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].prim  = fncdef PFX(fname ## _8x32_ ## cpu)
+#define ALL_CHROMA_420_PU(prim, fname, cpu) ALL_CHROMA_420_PU_TYPED(prim, , fname, cpu)
+
+#define ALL_CHROMA_420_4x4_PU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].prim   = fncdef PFX(fname ## _8x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].prim  = fncdef PFX(fname ## _16x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].prim = fncdef PFX(fname ## _32x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].prim = fncdef PFX(fname ## _16x12_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].prim = fncdef PFX(fname ## _12x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].prim  = fncdef PFX(fname ## _16x4_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].prim  = fncdef PFX(fname ## _4x16_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].prim = fncdef PFX(fname ## _32x24_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].prim = fncdef PFX(fname ## _24x32_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].prim  = fncdef PFX(fname ## _32x8_ ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].prim  = fncdef PFX(fname ## _8x32_ ## cpu)
+#define ALL_CHROMA_420_4x4_PU(prim, fname, cpu) ALL_CHROMA_420_4x4_PU_TYPED(prim, , fname, cpu)
+
+#define ALL_CHROMA_422_CU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].prim = fncdef PFX(fname ## _32x64_ ## cpu)
+#define ALL_CHROMA_422_CU(prim, fname, cpu) ALL_CHROMA_422_CU_TYPED(prim, , fname, cpu)
+
+#define ALL_CHROMA_422_PU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].prim = fncdef PFX(fname ## _32x64_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].prim   = fncdef PFX(fname ## _2x8_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].prim  = fncdef PFX(fname ## _4x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].prim  = fncdef PFX(fname ## _8x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].prim = fncdef PFX(fname ## _16x64_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].prim  = fncdef PFX(fname ## _8x12_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].prim  = fncdef PFX(fname ## _6x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].prim   = fncdef PFX(fname ## _8x4_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].prim  = fncdef PFX(fname ## _2x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].prim = fncdef PFX(fname ## _16x24_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].prim = fncdef PFX(fname ## _12x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].prim  = fncdef PFX(fname ## _16x8_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].prim  = fncdef PFX(fname ## _4x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].prim = fncdef PFX(fname ## _32x48_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].prim = fncdef PFX(fname ## _24x64_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].prim = fncdef PFX(fname ## _32x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].prim  = fncdef PFX(fname ## _8x64_ ## cpu)
+#define ALL_CHROMA_422_PU(prim, fname, cpu) ALL_CHROMA_422_PU_TYPED(prim, , fname, cpu)
+
+#define ALL_CHROMA_444_PU_TYPED(prim, fncdef, fname, cpu) \
+    p.chroma[X265_CSP_I444].pu[LUMA_4x4].prim   = fncdef PFX(fname ## _4x4_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_8x8].prim   = fncdef PFX(fname ## _8x8_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x16].prim = fncdef PFX(fname ## _16x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_32x32].prim = fncdef PFX(fname ## _32x32_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_64x64].prim = fncdef PFX(fname ## _64x64_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_8x4].prim   = fncdef PFX(fname ## _8x4_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_4x8].prim   = fncdef PFX(fname ## _4x8_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x8].prim  = fncdef PFX(fname ## _16x8_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_8x16].prim  = fncdef PFX(fname ## _8x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x32].prim = fncdef PFX(fname ## _16x32_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_32x16].prim = fncdef PFX(fname ## _32x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_64x32].prim = fncdef PFX(fname ## _64x32_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_32x64].prim = fncdef PFX(fname ## _32x64_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x12].prim = fncdef PFX(fname ## _16x12_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_12x16].prim = fncdef PFX(fname ## _12x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x4].prim  = fncdef PFX(fname ## _16x4_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_4x16].prim  = fncdef PFX(fname ## _4x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_32x24].prim = fncdef PFX(fname ## _32x24_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_24x32].prim = fncdef PFX(fname ## _24x32_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_32x8].prim  = fncdef PFX(fname ## _32x8_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_8x32].prim  = fncdef PFX(fname ## _8x32_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_64x48].prim = fncdef PFX(fname ## _64x48_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_48x64].prim = fncdef PFX(fname ## _48x64_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_64x16].prim = fncdef PFX(fname ## _64x16_ ## cpu); \
+    p.chroma[X265_CSP_I444].pu[LUMA_16x64].prim = fncdef PFX(fname ## _16x64_ ## cpu)
+#define ALL_CHROMA_444_PU(prim, fname, cpu) ALL_CHROMA_444_PU_TYPED(prim, , fname, cpu)
+
+#define AVC_LUMA_PU(name, cpu) \
+    p.pu[LUMA_16x16].name = PFX(pixel_ ## name ## _16x16_ ## cpu); \
+    p.pu[LUMA_16x8].name  = PFX(pixel_ ## name ## _16x8_ ## cpu); \
+    p.pu[LUMA_8x16].name  = PFX(pixel_ ## name ## _8x16_ ## cpu); \
+    p.pu[LUMA_8x8].name   = PFX(pixel_ ## name ## _8x8_ ## cpu); \
+    p.pu[LUMA_8x4].name   = PFX(pixel_ ## name ## _8x4_ ## cpu); \
+    p.pu[LUMA_4x8].name   = PFX(pixel_ ## name ## _4x8_ ## cpu); \
+    p.pu[LUMA_4x4].name   = PFX(pixel_ ## name ## _4x4_ ## cpu); \
+    p.pu[LUMA_4x16].name  = PFX(pixel_ ## name ## _4x16_ ## cpu)
+
+#define HEVC_SAD(cpu) \
+    p.pu[LUMA_8x32].sad  = PFX(pixel_sad_8x32_ ## cpu); \
+    p.pu[LUMA_16x4].sad  = PFX(pixel_sad_16x4_ ## cpu); \
+    p.pu[LUMA_16x12].sad = PFX(pixel_sad_16x12_ ## cpu); \
+    p.pu[LUMA_16x32].sad = PFX(pixel_sad_16x32_ ## cpu); \
+    p.pu[LUMA_16x64].sad = PFX(pixel_sad_16x64_ ## cpu); \
+    p.pu[LUMA_32x8].sad  = PFX(pixel_sad_32x8_ ## cpu); \
+    p.pu[LUMA_32x16].sad = PFX(pixel_sad_32x16_ ## cpu); \
+    p.pu[LUMA_32x24].sad = PFX(pixel_sad_32x24_ ## cpu); \
+    p.pu[LUMA_32x32].sad = PFX(pixel_sad_32x32_ ## cpu); \
+    p.pu[LUMA_32x64].sad = PFX(pixel_sad_32x64_ ## cpu); \
+    p.pu[LUMA_64x16].sad = PFX(pixel_sad_64x16_ ## cpu); \
+    p.pu[LUMA_64x32].sad = PFX(pixel_sad_64x32_ ## cpu); \
+    p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_ ## cpu); \
+    p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_ ## cpu); \
+    p.pu[LUMA_48x64].sad = PFX(pixel_sad_48x64_ ## cpu); \
+    p.pu[LUMA_24x32].sad = PFX(pixel_sad_24x32_ ## cpu); \
+    p.pu[LUMA_12x16].sad = PFX(pixel_sad_12x16_ ## cpu)
+
+#define HEVC_SAD_X3(cpu) \
+    p.pu[LUMA_16x8].sad_x3  = PFX(pixel_sad_x3_16x8_ ## cpu); \
+    p.pu[LUMA_16x12].sad_x3 = PFX(pixel_sad_x3_16x12_ ## cpu); \
+    p.pu[LUMA_16x16].sad_x3 = PFX(pixel_sad_x3_16x16_ ## cpu); \
+    p.pu[LUMA_16x32].sad_x3 = PFX(pixel_sad_x3_16x32_ ## cpu); \
+    p.pu[LUMA_16x64].sad_x3 = PFX(pixel_sad_x3_16x64_ ## cpu); \
+    p.pu[LUMA_32x8].sad_x3  = PFX(pixel_sad_x3_32x8_ ## cpu); \
+    p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_ ## cpu); \
+    p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_ ## cpu); \
+    p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_ ## cpu); \
+    p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_ ## cpu); \
+    p.pu[LUMA_24x32].sad_x3 = PFX(pixel_sad_x3_24x32_ ## cpu); \
+    p.pu[LUMA_48x64].sad_x3 = PFX(pixel_sad_x3_48x64_ ## cpu); \
+    p.pu[LUMA_64x16].sad_x3 = PFX(pixel_sad_x3_64x16_ ## cpu); \
+    p.pu[LUMA_64x32].sad_x3 = PFX(pixel_sad_x3_64x32_ ## cpu); \
+    p.pu[LUMA_64x48].sad_x3 = PFX(pixel_sad_x3_64x48_ ## cpu); \
+    p.pu[LUMA_64x64].sad_x3 = PFX(pixel_sad_x3_64x64_ ## cpu)
+
+#define HEVC_SAD_X4(cpu) \
+    p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_ ## cpu); \
+    p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_ ## cpu); \
+    p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_ ## cpu); \
+    p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_ ## cpu); \
+    p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_ ## cpu); \
+    p.pu[LUMA_32x8].sad_x4  = PFX(pixel_sad_x4_32x8_ ## cpu); \
+    p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_ ## cpu); \
+    p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_ ## cpu); \
+    p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_ ## cpu); \
+    p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_ ## cpu); \
+    p.pu[LUMA_24x32].sad_x4 = PFX(pixel_sad_x4_24x32_ ## cpu); \
+    p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_ ## cpu); \
+    p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_ ## cpu); \
+    p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_ ## cpu); \
+    p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_ ## cpu); \
+    p.pu[LUMA_64x64].sad_x4 = PFX(pixel_sad_x4_64x64_ ## cpu)
+
+#define ASSIGN_SSE_PP(cpu) \
+    p.cu[BLOCK_8x8].sse_pp   = PFX(pixel_ssd_8x8_ ## cpu); \
+    p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_ ## cpu); \
+    p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = PFX(pixel_ssd_8x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_ssd_16x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_ssd_32x64_ ## cpu);
+
+#define ASSIGN_SSE_SS(cpu) ALL_LUMA_BLOCKS(sse_ss, pixel_ssd_ss, cpu)
+
+#define ASSIGN_SA8D(cpu) \
+    ALL_LUMA_CU(sa8d, pixel_sa8d, cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sa8d = PFX(pixel_sa8d_8x16_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sa8d = PFX(pixel_sa8d_16x32_ ## cpu); \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sa8d = PFX(pixel_sa8d_32x64_ ## cpu)
+
+#define PIXEL_AVG(cpu) \
+    p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_64x64_ ## cpu); \
+    p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_64x48_ ## cpu); \
+    p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_64x32_ ## cpu); \
+    p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_64x16_ ## cpu); \
+    p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_48x64_ ## cpu); \
+    p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_32x64_ ## cpu); \
+    p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_32x32_ ## cpu); \
+    p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_32x24_ ## cpu); \
+    p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_32x16_ ## cpu); \
+    p.pu[LUMA_32x8].pixelavg_pp  = PFX(pixel_avg_32x8_ ## cpu); \
+    p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_24x32_ ## cpu); \
+    p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_ ## cpu); \
+    p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_ ## cpu); \
+    p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_ ## cpu); \
+    p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_ ## cpu); \
+    p.pu[LUMA_16x8].pixelavg_pp  = PFX(pixel_avg_16x8_ ## cpu); \
+    p.pu[LUMA_16x4].pixelavg_pp  = PFX(pixel_avg_16x4_ ## cpu); \
+    p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_12x16_ ## cpu); \
+    p.pu[LUMA_8x32].pixelavg_pp  = PFX(pixel_avg_8x32_ ## cpu); \
+    p.pu[LUMA_8x16].pixelavg_pp  = PFX(pixel_avg_8x16_ ## cpu); \
+    p.pu[LUMA_8x8].pixelavg_pp   = PFX(pixel_avg_8x8_ ## cpu); \
+    p.pu[LUMA_8x4].pixelavg_pp   = PFX(pixel_avg_8x4_ ## cpu);
+
+#define PIXEL_AVG_W4(cpu) \
+    p.pu[LUMA_4x4].pixelavg_pp  = PFX(pixel_avg_4x4_ ## cpu); \
+    p.pu[LUMA_4x8].pixelavg_pp  = PFX(pixel_avg_4x8_ ## cpu); \
+    p.pu[LUMA_4x16].pixelavg_pp = PFX(pixel_avg_4x16_ ## cpu);
+
+#define CHROMA_420_FILTERS(cpu) \
+    ALL_CHROMA_420_PU(filter_hpp, interp_4tap_horiz_pp, cpu); \
+    ALL_CHROMA_420_PU(filter_hps, interp_4tap_horiz_ps, cpu); \
+    ALL_CHROMA_420_PU(filter_vpp, interp_4tap_vert_pp, cpu); \
+    ALL_CHROMA_420_PU(filter_vps, interp_4tap_vert_ps, cpu);
+
+#define CHROMA_422_FILTERS(cpu) \
+    ALL_CHROMA_422_PU(filter_hpp, interp_4tap_horiz_pp, cpu); \
+    ALL_CHROMA_422_PU(filter_hps, interp_4tap_horiz_ps, cpu); \
+    ALL_CHROMA_422_PU(filter_vpp, interp_4tap_vert_pp, cpu); \
+    ALL_CHROMA_422_PU(filter_vps, interp_4tap_vert_ps, cpu);
+
+#define CHROMA_444_FILTERS(cpu) \
+    ALL_CHROMA_444_PU(filter_hpp, interp_4tap_horiz_pp, cpu); \
+    ALL_CHROMA_444_PU(filter_hps, interp_4tap_horiz_ps, cpu); \
+    ALL_CHROMA_444_PU(filter_vpp, interp_4tap_vert_pp, cpu); \
+    ALL_CHROMA_444_PU(filter_vps, interp_4tap_vert_ps, cpu);
+
+#define SETUP_CHROMA_420_VSP_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vsp = PFX(interp_4tap_vert_sp_ ## W ## x ## H ## cpu);
+
+#define CHROMA_420_VSP_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(4, 2, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(2, 4, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(6, 8, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(16, 12, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(12, 16, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(16, 4, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(32, 24, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(24, 32, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(32, 8, cpu);
+
+#define CHROMA_420_VSP_FILTERS(cpu) \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 2, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 6, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_420_VSP_FUNC_DEF(8, 32, cpu);
+
+#define SETUP_CHROMA_422_VSP_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vsp = PFX(interp_4tap_vert_sp_ ## W ## x ## H ## cpu);
+
+#define CHROMA_422_VSP_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(6, 16, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(2, 16, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(16, 24, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(12, 32, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(4, 32, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(32, 64, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(16, 64, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(32, 48, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(24, 64, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(32, 16, cpu);
+
+#define CHROMA_422_VSP_FILTERS(cpu) \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 12, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 32, cpu); \
+    SETUP_CHROMA_422_VSP_FUNC_DEF(8, 64, cpu);
+
+#define SETUP_CHROMA_444_VSP_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I444].pu[LUMA_ ## W ## x ## H].filter_vsp = PFX(interp_4tap_vert_sp_ ## W ## x ## H ## cpu);
+
+#define CHROMA_444_VSP_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 12, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(12, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 4, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(32, 24, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(24, 32, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(32, 8, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(64, 64, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(64, 32, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(32, 64, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(64, 48, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(48, 64, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(64, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(16, 64, cpu);
+
+#define CHROMA_444_VSP_FILTERS(cpu) \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_444_VSP_FUNC_DEF(8, 32, cpu);
+
+#define SETUP_CHROMA_420_VSS_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vss = PFX(interp_4tap_vert_ss_ ## W ## x ## H ## cpu);
+
+#define CHROMA_420_VSS_FILTERS(cpu) \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(4, 2, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 6, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 2, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(16, 12, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(12, 16, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(16, 4, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(32, 24, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(24, 32, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(32, 8, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(8, 32, cpu);
+
+#define CHROMA_420_VSS_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(2, 4, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_420_VSS_FUNC_DEF(6, 8, cpu);
+
+#define SETUP_CHROMA_422_VSS_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vss = PFX(interp_4tap_vert_ss_ ## W ## x ## H ## cpu);
+
+#define CHROMA_422_VSS_FILTERS(cpu) \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 12, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 32, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(16, 24, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(12, 32, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(4, 32, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(32, 64, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(16, 64, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(32, 48, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(24, 64, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(8, 64, cpu);
+
+#define CHROMA_422_VSS_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(2, 16, cpu); \
+    SETUP_CHROMA_422_VSS_FUNC_DEF(6, 16, cpu);
+
+#define CHROMA_444_VSS_FILTERS(cpu) ALL_CHROMA_444_PU(filter_vss, interp_4tap_vert_ss, cpu)
+
+#define LUMA_FILTERS(cpu) \
+    ALL_LUMA_PU(luma_hpp, interp_8tap_horiz_pp, cpu); p.pu[LUMA_4x4].luma_hpp = PFX(interp_8tap_horiz_pp_4x4_ ## cpu); \
+    ALL_LUMA_PU(luma_hps, interp_8tap_horiz_ps, cpu); p.pu[LUMA_4x4].luma_hps = PFX(interp_8tap_horiz_ps_4x4_ ## cpu); \
+    ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, cpu); p.pu[LUMA_4x4].luma_vpp = PFX(interp_8tap_vert_pp_4x4_ ## cpu); \
+    ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, cpu); p.pu[LUMA_4x4].luma_vps = PFX(interp_8tap_vert_ps_4x4_ ## cpu); \
+    ALL_LUMA_PU(luma_vsp, interp_8tap_vert_sp, cpu); p.pu[LUMA_4x4].luma_vsp = PFX(interp_8tap_vert_sp_4x4_ ## cpu); \
+    ALL_LUMA_PU_T(luma_hvpp, interp_8tap_hv_pp_cpu); p.pu[LUMA_4x4].luma_hvpp = interp_8tap_hv_pp_cpu<LUMA_4x4>;
+
+#define LUMA_VSS_FILTERS(cpu) ALL_LUMA_PU(luma_vss, interp_8tap_vert_ss, cpu); p.pu[LUMA_4x4].luma_vss = PFX(interp_8tap_vert_ss_4x4_ ## cpu)
+
+#define LUMA_CU_BLOCKCOPY(type, cpu) \
+    p.cu[BLOCK_4x4].copy_ ## type = PFX(blockcopy_ ## type ## _4x4_ ## cpu); \
+    ALL_LUMA_CU(copy_ ## type, blockcopy_ ## type, cpu);
+
+#define CHROMA_420_CU_BLOCKCOPY(type, cpu) ALL_CHROMA_420_CU(copy_ ## type, blockcopy_ ## type, cpu)
+#define CHROMA_422_CU_BLOCKCOPY(type, cpu) ALL_CHROMA_422_CU(copy_ ## type, blockcopy_ ## type, cpu)
+
+#define LUMA_PU_BLOCKCOPY(type, cpu)       ALL_LUMA_PU(copy_ ## type, blockcopy_ ## type, cpu); p.pu[LUMA_4x4].copy_ ## type = PFX(blockcopy_ ## type ## _4x4_ ## cpu)
+#define CHROMA_420_PU_BLOCKCOPY(type, cpu) ALL_CHROMA_420_PU(copy_ ## type, blockcopy_ ## type, cpu)
+#define CHROMA_422_PU_BLOCKCOPY(type, cpu) ALL_CHROMA_422_PU(copy_ ## type, blockcopy_ ## type, cpu)
+
+#define LUMA_PIXELSUB(cpu) \
+    p.cu[BLOCK_4x4].sub_ps = PFX(pixel_sub_ps_4x4_ ## cpu); \
+    p.cu[BLOCK_4x4].add_ps = PFX(pixel_add_ps_4x4_ ## cpu); \
+    ALL_LUMA_CU(sub_ps, pixel_sub_ps, cpu); \
+    ALL_LUMA_CU(add_ps, pixel_add_ps, cpu);
+
+#define CHROMA_420_PIXELSUB_PS(cpu) \
+    ALL_CHROMA_420_CU(sub_ps, pixel_sub_ps, cpu); \
+    ALL_CHROMA_420_CU(add_ps, pixel_add_ps, cpu);
+
+#define CHROMA_422_PIXELSUB_PS(cpu) \
+    ALL_CHROMA_422_CU(sub_ps, pixel_sub_ps, cpu); \
+    ALL_CHROMA_422_CU(add_ps, pixel_add_ps, cpu);
+
+#define LUMA_VAR(cpu)          ALL_LUMA_CU(var, pixel_var, cpu)
+
+#define LUMA_ADDAVG(cpu)       ALL_LUMA_PU(addAvg, addAvg, cpu); p.pu[LUMA_4x4].addAvg = PFX(addAvg_4x4_ ## cpu)
+#define CHROMA_420_ADDAVG(cpu) ALL_CHROMA_420_PU(addAvg, addAvg, cpu);
+#define CHROMA_422_ADDAVG(cpu) ALL_CHROMA_422_PU(addAvg, addAvg, cpu);
+
+#define SETUP_INTRA_ANG_COMMON(mode, fno, cpu) \
+    p.cu[BLOCK_4x4].intra_pred[mode] = PFX(intra_pred_ang4_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_8x8].intra_pred[mode] = PFX(intra_pred_ang8_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_16x16].intra_pred[mode] = PFX(intra_pred_ang16_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_32x32].intra_pred[mode] = PFX(intra_pred_ang32_ ## fno ## _ ## cpu);
+
+#define SETUP_INTRA_ANG4(mode, fno, cpu) \
+    p.cu[BLOCK_4x4].intra_pred[mode] = PFX(intra_pred_ang4_ ## fno ## _ ## cpu);
+
+#define SETUP_INTRA_ANG16_32(mode, fno, cpu) \
+    p.cu[BLOCK_16x16].intra_pred[mode] = PFX(intra_pred_ang16_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_32x32].intra_pred[mode] = PFX(intra_pred_ang32_ ## fno ## _ ## cpu);
+
+#define SETUP_INTRA_ANG4_8(mode, fno, cpu) \
+    p.cu[BLOCK_4x4].intra_pred[mode] = PFX(intra_pred_ang4_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_8x8].intra_pred[mode] = PFX(intra_pred_ang8_ ## fno ## _ ## cpu);
+
+#define INTRA_ANG_SSSE3(cpu) \
+    SETUP_INTRA_ANG_COMMON(2, 2, cpu); \
+    SETUP_INTRA_ANG_COMMON(34, 2, cpu);
+
+#define INTRA_ANG_SSE4_COMMON(cpu) \
+    SETUP_INTRA_ANG_COMMON(3,  3,  cpu); \
+    SETUP_INTRA_ANG_COMMON(4,  4,  cpu); \
+    SETUP_INTRA_ANG_COMMON(5,  5,  cpu); \
+    SETUP_INTRA_ANG_COMMON(6,  6,  cpu); \
+    SETUP_INTRA_ANG_COMMON(7,  7,  cpu); \
+    SETUP_INTRA_ANG_COMMON(8,  8,  cpu); \
+    SETUP_INTRA_ANG_COMMON(9,  9,  cpu); \
+    SETUP_INTRA_ANG_COMMON(10, 10, cpu); \
+    SETUP_INTRA_ANG_COMMON(11, 11, cpu); \
+    SETUP_INTRA_ANG_COMMON(12, 12, cpu); \
+    SETUP_INTRA_ANG_COMMON(13, 13, cpu); \
+    SETUP_INTRA_ANG_COMMON(14, 14, cpu); \
+    SETUP_INTRA_ANG_COMMON(15, 15, cpu); \
+    SETUP_INTRA_ANG_COMMON(16, 16, cpu); \
+    SETUP_INTRA_ANG_COMMON(17, 17, cpu); \
+    SETUP_INTRA_ANG_COMMON(18, 18, cpu);
+
+#define SETUP_INTRA_ANG_HIGH(mode, fno, cpu) \
+    p.cu[BLOCK_8x8].intra_pred[mode] = PFX(intra_pred_ang8_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_16x16].intra_pred[mode] = PFX(intra_pred_ang16_ ## fno ## _ ## cpu); \
+    p.cu[BLOCK_32x32].intra_pred[mode] = PFX(intra_pred_ang32_ ## fno ## _ ## cpu);
+
+#define INTRA_ANG_SSE4_HIGH(cpu) \
+    SETUP_INTRA_ANG_HIGH(19, 19, cpu); \
+    SETUP_INTRA_ANG_HIGH(20, 20, cpu); \
+    SETUP_INTRA_ANG_HIGH(21, 21, cpu); \
+    SETUP_INTRA_ANG_HIGH(22, 22, cpu); \
+    SETUP_INTRA_ANG_HIGH(23, 23, cpu); \
+    SETUP_INTRA_ANG_HIGH(24, 24, cpu); \
+    SETUP_INTRA_ANG_HIGH(25, 25, cpu); \
+    SETUP_INTRA_ANG_HIGH(26, 26, cpu); \
+    SETUP_INTRA_ANG_HIGH(27, 27, cpu); \
+    SETUP_INTRA_ANG_HIGH(28, 28, cpu); \
+    SETUP_INTRA_ANG_HIGH(29, 29, cpu); \
+    SETUP_INTRA_ANG_HIGH(30, 30, cpu); \
+    SETUP_INTRA_ANG_HIGH(31, 31, cpu); \
+    SETUP_INTRA_ANG_HIGH(32, 32, cpu); \
+    SETUP_INTRA_ANG_HIGH(33, 33, cpu); \
+    SETUP_INTRA_ANG4(19, 17, cpu); \
+    SETUP_INTRA_ANG4(20, 16, cpu); \
+    SETUP_INTRA_ANG4(21, 15, cpu); \
+    SETUP_INTRA_ANG4(22, 14, cpu); \
+    SETUP_INTRA_ANG4(23, 13, cpu); \
+    SETUP_INTRA_ANG4(24, 12, cpu); \
+    SETUP_INTRA_ANG4(25, 11, cpu); \
+    SETUP_INTRA_ANG4(26, 26, cpu); \
+    SETUP_INTRA_ANG4(27, 9, cpu); \
+    SETUP_INTRA_ANG4(28, 8, cpu); \
+    SETUP_INTRA_ANG4(29, 7, cpu); \
+    SETUP_INTRA_ANG4(30, 6, cpu); \
+    SETUP_INTRA_ANG4(31, 5, cpu); \
+    SETUP_INTRA_ANG4(32, 4, cpu); \
+    SETUP_INTRA_ANG4(33, 3, cpu);
+
+#define INTRA_ANG_SSE4(cpu) \
+    SETUP_INTRA_ANG4_8(19, 17, cpu); \
+    SETUP_INTRA_ANG4_8(20, 16, cpu); \
+    SETUP_INTRA_ANG4_8(21, 15, cpu); \
+    SETUP_INTRA_ANG4_8(22, 14, cpu); \
+    SETUP_INTRA_ANG4_8(23, 13, cpu); \
+    SETUP_INTRA_ANG4_8(24, 12, cpu); \
+    SETUP_INTRA_ANG4_8(25, 11, cpu); \
+    SETUP_INTRA_ANG4_8(26, 26, cpu); \
+    SETUP_INTRA_ANG4_8(27, 9, cpu); \
+    SETUP_INTRA_ANG4_8(28, 8, cpu); \
+    SETUP_INTRA_ANG4_8(29, 7, cpu); \
+    SETUP_INTRA_ANG4_8(30, 6, cpu); \
+    SETUP_INTRA_ANG4_8(31, 5, cpu); \
+    SETUP_INTRA_ANG4_8(32, 4, cpu); \
+    SETUP_INTRA_ANG4_8(33, 3, cpu); \
+    SETUP_INTRA_ANG16_32(19, 19, cpu); \
+    SETUP_INTRA_ANG16_32(20, 20, cpu); \
+    SETUP_INTRA_ANG16_32(21, 21, cpu); \
+    SETUP_INTRA_ANG16_32(22, 22, cpu); \
+    SETUP_INTRA_ANG16_32(23, 23, cpu); \
+    SETUP_INTRA_ANG16_32(24, 24, cpu); \
+    SETUP_INTRA_ANG16_32(25, 25, cpu); \
+    SETUP_INTRA_ANG16_32(26, 26, cpu); \
+    SETUP_INTRA_ANG16_32(27, 27, cpu); \
+    SETUP_INTRA_ANG16_32(28, 28, cpu); \
+    SETUP_INTRA_ANG16_32(29, 29, cpu); \
+    SETUP_INTRA_ANG16_32(30, 30, cpu); \
+    SETUP_INTRA_ANG16_32(31, 31, cpu); \
+    SETUP_INTRA_ANG16_32(32, 32, cpu); \
+    SETUP_INTRA_ANG16_32(33, 33, cpu);
+
+#define CHROMA_420_VERT_FILTERS(cpu) \
+    ALL_CHROMA_420_4x4_PU(filter_vss, interp_4tap_vert_ss, cpu); \
+    ALL_CHROMA_420_4x4_PU(filter_vpp, interp_4tap_vert_pp, cpu); \
+    ALL_CHROMA_420_4x4_PU(filter_vps, interp_4tap_vert_ps, cpu); \
+    ALL_CHROMA_420_4x4_PU(filter_vsp, interp_4tap_vert_sp, cpu)
+
+#define SETUP_CHROMA_420_VERT_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vss = PFX(interp_4tap_vert_ss_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vpp = PFX(interp_4tap_vert_pp_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vps = PFX(interp_4tap_vert_ps_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].filter_vsp = PFX(interp_4tap_vert_sp_ ## W ## x ## H ## cpu);
+
+#define CHROMA_420_VERT_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_420_VERT_FUNC_DEF(2, 4, cpu); \
+    SETUP_CHROMA_420_VERT_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_420_VERT_FUNC_DEF(4, 2, cpu); \
+    SETUP_CHROMA_420_VERT_FUNC_DEF(6, 8, cpu);
+
+#define SETUP_CHROMA_422_VERT_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vss = PFX(interp_4tap_vert_ss_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vpp = PFX(interp_4tap_vert_pp_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vps = PFX(interp_4tap_vert_ps_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_vsp = PFX(interp_4tap_vert_sp_ ## W ## x ## H ## cpu);
+
+#define CHROMA_422_VERT_FILTERS(cpu) \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 12, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 32, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(16, 24, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(12, 32, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(4, 32, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(32, 64, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(16, 64, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(32, 48, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(24, 64, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(8, 64, cpu);
+
+#define CHROMA_422_VERT_FILTERS_SSE4(cpu) \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(2, 16, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_422_VERT_FUNC_DEF(6, 16, cpu);
+
+#define CHROMA_444_VERT_FILTERS(cpu) \
+    ALL_CHROMA_444_PU(filter_vss, interp_4tap_vert_ss, cpu); \
+    ALL_CHROMA_444_PU(filter_vpp, interp_4tap_vert_pp, cpu); \
+    ALL_CHROMA_444_PU(filter_vps, interp_4tap_vert_ps, cpu); \
+    ALL_CHROMA_444_PU(filter_vsp, interp_4tap_vert_sp, cpu)
+
+#define CHROMA_420_HORIZ_FILTERS(cpu) \
+    ALL_CHROMA_420_PU(filter_hpp, interp_4tap_horiz_pp, cpu); \
+    ALL_CHROMA_420_PU(filter_hps, interp_4tap_horiz_ps, cpu);
+
+#define SETUP_CHROMA_422_HORIZ_FUNC_DEF(W, H, cpu) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_hpp = PFX(interp_4tap_horiz_pp_ ## W ## x ## H ## cpu); \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].filter_hps = PFX(interp_4tap_horiz_ps_ ## W ## x ## H ## cpu);
+
+#define CHROMA_422_HORIZ_FILTERS(cpu) \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(4, 8, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(4, 4, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(2, 8, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 8, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(4, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 12, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(6, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 4, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(2, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(16, 32, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(16, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 32, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(16, 24, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(12, 32, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(16, 8, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(4, 32, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(32, 64, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(32, 32, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(16, 64, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(32, 48, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(24, 64, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(32, 16, cpu); \
+    SETUP_CHROMA_422_HORIZ_FUNC_DEF(8, 64, cpu);
+
+#define CHROMA_444_HORIZ_FILTERS(cpu) \
+    ALL_CHROMA_444_PU(filter_hpp, interp_4tap_horiz_pp, cpu); \
+    ALL_CHROMA_444_PU(filter_hps, interp_4tap_horiz_ps, cpu);
+
+namespace X265_NS {
+// private x265 namespace
+
+template<int size>
+void interp_8tap_hv_pp_cpu(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int idxX, int idxY)
+{
+    ALIGN_VAR_32(int16_t, immed[MAX_CU_SIZE * (MAX_CU_SIZE + NTAPS_LUMA)]);
+    const int filterSize = NTAPS_LUMA;
+    const int halfFilterSize = filterSize >> 1;
+
+    primitives.pu[size].luma_hps(src, srcStride, immed, MAX_CU_SIZE, idxX, 1);
+    primitives.pu[size].luma_vsp(immed + (halfFilterSize - 1) * MAX_CU_SIZE, MAX_CU_SIZE, dst, dstStride, idxY);
+}
+
+#if HIGH_BIT_DEPTH
+
+void setupAssemblyPrimitives(EncoderPrimitives &p, int cpuMask) // Main10
+{
+#if !defined(X86_64)
+#error "Unsupported build configuration (32bit x86 and HIGH_BIT_DEPTH), you must configure ENABLE_ASSEMBLY=OFF"
+#endif
+
+#if X86_64
+    p.scanPosLast = PFX(scanPosLast_x64);
+#endif
+
+    if (cpuMask & X265_CPU_SSE2)
+    {
+        /* We do not differentiate CPUs which support MMX and not SSE2. We only check
+         * for SSE2 and then use both MMX and SSE2 functions */
+        AVC_LUMA_PU(sad, mmx2);
+
+        p.pu[LUMA_16x16].sad = PFX(pixel_sad_16x16_sse2);
+        p.pu[LUMA_16x8].sad  = PFX(pixel_sad_16x8_sse2);
+        p.pu[LUMA_8x16].sad  = PFX(pixel_sad_8x16_sse2);
+        HEVC_SAD(sse2);
+
+        p.pu[LUMA_4x4].sad_x3   = PFX(pixel_sad_x3_4x4_mmx2);
+        p.pu[LUMA_4x8].sad_x3   = PFX(pixel_sad_x3_4x8_mmx2);
+        p.pu[LUMA_4x16].sad_x3  = PFX(pixel_sad_x3_4x16_mmx2);
+        p.pu[LUMA_8x4].sad_x3   = PFX(pixel_sad_x3_8x4_sse2);
+        p.pu[LUMA_8x8].sad_x3   = PFX(pixel_sad_x3_8x8_sse2);
+        p.pu[LUMA_8x16].sad_x3  = PFX(pixel_sad_x3_8x16_sse2);
+        p.pu[LUMA_8x32].sad_x3  = PFX(pixel_sad_x3_8x32_sse2);
+        p.pu[LUMA_16x4].sad_x3  = PFX(pixel_sad_x3_16x4_sse2);
+        p.pu[LUMA_12x16].sad_x3 = PFX(pixel_sad_x3_12x16_mmx2);
+        HEVC_SAD_X3(sse2);
+
+        p.pu[LUMA_4x4].sad_x4   = PFX(pixel_sad_x4_4x4_mmx2);
+        p.pu[LUMA_4x8].sad_x4   = PFX(pixel_sad_x4_4x8_mmx2);
+        p.pu[LUMA_4x16].sad_x4  = PFX(pixel_sad_x4_4x16_mmx2);
+        p.pu[LUMA_8x4].sad_x4   = PFX(pixel_sad_x4_8x4_sse2);
+        p.pu[LUMA_8x8].sad_x4   = PFX(pixel_sad_x4_8x8_sse2);
+        p.pu[LUMA_8x16].sad_x4  = PFX(pixel_sad_x4_8x16_sse2);
+        p.pu[LUMA_8x32].sad_x4  = PFX(pixel_sad_x4_8x32_sse2);
+        p.pu[LUMA_16x4].sad_x4  = PFX(pixel_sad_x4_16x4_sse2);
+        p.pu[LUMA_12x16].sad_x4 = PFX(pixel_sad_x4_12x16_mmx2);
+        HEVC_SAD_X4(sse2);
+
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_mmx2);
+        ALL_LUMA_PU(satd, pixel_satd, sse2);
+
+#if X265_DEPTH <= 10
+        ASSIGN_SA8D(sse2);
+#endif /* X265_DEPTH <= 10 */
+        LUMA_PIXELSUB(sse2);
+        CHROMA_420_PIXELSUB_PS(sse2);
+        CHROMA_422_PIXELSUB_PS(sse2);
+
+        LUMA_CU_BLOCKCOPY(ss, sse2);
+        CHROMA_420_CU_BLOCKCOPY(ss, sse2);
+        CHROMA_422_CU_BLOCKCOPY(ss, sse2);
+
+        p.pu[LUMA_4x4].copy_pp = (copy_pp_t)PFX(blockcopy_ss_4x4_sse2);
+        ALL_LUMA_PU_TYPED(copy_pp, (copy_pp_t), blockcopy_ss, sse2);
+        ALL_CHROMA_420_PU_TYPED(copy_pp, (copy_pp_t), blockcopy_ss, sse2);
+        ALL_CHROMA_422_PU_TYPED(copy_pp, (copy_pp_t), blockcopy_ss, sse2);
+
+        CHROMA_420_VERT_FILTERS(sse2);
+        CHROMA_422_VERT_FILTERS(_sse2);
+        CHROMA_444_VERT_FILTERS(sse2);
+
+        ALL_LUMA_PU(luma_hpp, interp_8tap_horiz_pp, sse2);
+        p.pu[LUMA_4x4].luma_hpp = PFX(interp_8tap_horiz_pp_4x4_sse2);
+        ALL_LUMA_PU(luma_hps, interp_8tap_horiz_ps, sse2);
+        p.pu[LUMA_4x4].luma_hps = PFX(interp_8tap_horiz_ps_4x4_sse2);
+        ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, sse2);
+        ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, sse2);
+
+        p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_sse2);
+        p.ssim_end_4 = PFX(pixel_ssim_end4_sse2);
+        PIXEL_AVG(sse2);
+        PIXEL_AVG_W4(mmx2);
+        LUMA_VAR(sse2);
+
+
+        ALL_LUMA_TU(blockfill_s, blockfill_s, sse2);
+        ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, sse2);
+        ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, sse2);
+        ALL_LUMA_TU_S(cpy2Dto1D_shr, cpy2Dto1D_shr_, sse2);
+        ALL_LUMA_TU_S(cpy2Dto1D_shl, cpy2Dto1D_shl_, sse2);
+        ALL_LUMA_TU_S(ssd_s, pixel_ssd_s_, sse2);
+        ALL_LUMA_TU_S(calcresidual, getResidual, sse2);
+        ALL_LUMA_TU_S(transpose, transpose, sse2);
+
+#if X265_DEPTH <= 10
+        ALL_LUMA_TU_S(intra_pred[PLANAR_IDX], intra_pred_planar, sse2);
+#endif /* X265_DEPTH <= 10 */
+        ALL_LUMA_TU_S(intra_pred[DC_IDX], intra_pred_dc, sse2);
+
+        p.cu[BLOCK_4x4].intra_pred[2] = PFX(intra_pred_ang4_2_sse2);
+        p.cu[BLOCK_4x4].intra_pred[3] = PFX(intra_pred_ang4_3_sse2);
+        p.cu[BLOCK_4x4].intra_pred[4] = PFX(intra_pred_ang4_4_sse2);
+        p.cu[BLOCK_4x4].intra_pred[5] = PFX(intra_pred_ang4_5_sse2);
+        p.cu[BLOCK_4x4].intra_pred[6] = PFX(intra_pred_ang4_6_sse2);
+        p.cu[BLOCK_4x4].intra_pred[7] = PFX(intra_pred_ang4_7_sse2);
+        p.cu[BLOCK_4x4].intra_pred[8] = PFX(intra_pred_ang4_8_sse2);
+        p.cu[BLOCK_4x4].intra_pred[9] = PFX(intra_pred_ang4_9_sse2);
+        p.cu[BLOCK_4x4].intra_pred[10] = PFX(intra_pred_ang4_10_sse2);
+        p.cu[BLOCK_4x4].intra_pred[11] = PFX(intra_pred_ang4_11_sse2);
+        p.cu[BLOCK_4x4].intra_pred[12] = PFX(intra_pred_ang4_12_sse2);
+        p.cu[BLOCK_4x4].intra_pred[13] = PFX(intra_pred_ang4_13_sse2);
+        p.cu[BLOCK_4x4].intra_pred[14] = PFX(intra_pred_ang4_14_sse2);
+        p.cu[BLOCK_4x4].intra_pred[15] = PFX(intra_pred_ang4_15_sse2);
+        p.cu[BLOCK_4x4].intra_pred[16] = PFX(intra_pred_ang4_16_sse2);
+        p.cu[BLOCK_4x4].intra_pred[17] = PFX(intra_pred_ang4_17_sse2);
+        p.cu[BLOCK_4x4].intra_pred[18] = PFX(intra_pred_ang4_18_sse2);
+        p.cu[BLOCK_4x4].intra_pred[19] = PFX(intra_pred_ang4_19_sse2);
+        p.cu[BLOCK_4x4].intra_pred[20] = PFX(intra_pred_ang4_20_sse2);
+        p.cu[BLOCK_4x4].intra_pred[21] = PFX(intra_pred_ang4_21_sse2);
+        p.cu[BLOCK_4x4].intra_pred[22] = PFX(intra_pred_ang4_22_sse2);
+        p.cu[BLOCK_4x4].intra_pred[23] = PFX(intra_pred_ang4_23_sse2);
+        p.cu[BLOCK_4x4].intra_pred[24] = PFX(intra_pred_ang4_24_sse2);
+        p.cu[BLOCK_4x4].intra_pred[25] = PFX(intra_pred_ang4_25_sse2);
+        p.cu[BLOCK_4x4].intra_pred[26] = PFX(intra_pred_ang4_26_sse2);
+        p.cu[BLOCK_4x4].intra_pred[27] = PFX(intra_pred_ang4_27_sse2);
+        p.cu[BLOCK_4x4].intra_pred[28] = PFX(intra_pred_ang4_28_sse2);
+        p.cu[BLOCK_4x4].intra_pred[29] = PFX(intra_pred_ang4_29_sse2);
+        p.cu[BLOCK_4x4].intra_pred[30] = PFX(intra_pred_ang4_30_sse2);
+        p.cu[BLOCK_4x4].intra_pred[31] = PFX(intra_pred_ang4_31_sse2);
+        p.cu[BLOCK_4x4].intra_pred[32] = PFX(intra_pred_ang4_32_sse2);
+        p.cu[BLOCK_4x4].intra_pred[33] = PFX(intra_pred_ang4_33_sse2);
+
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_4x8_mmx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_8x16_sse2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_16x32_sse2);
+
+#if X265_DEPTH <= 10
+        p.cu[BLOCK_4x4].sse_ss = PFX(pixel_ssd_ss_4x4_mmx2);
+        ALL_LUMA_CU(sse_ss, pixel_ssd_ss, sse2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_32x64_sse2);
+#endif
+
+        p.cu[BLOCK_4x4].dct = PFX(dct4_sse2);
+        p.cu[BLOCK_8x8].dct = PFX(dct8_sse2);
+        p.cu[BLOCK_4x4].idct = PFX(idct4_sse2);
+        p.cu[BLOCK_8x8].idct = PFX(idct8_sse2);
+
+        p.idst4x4 = PFX(idst4_sse2);
+        p.dst4x4 = PFX(dst4_sse2);
+
+        LUMA_VSS_FILTERS(sse2);
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_sse2);
+        // TODO: the planecopy_sp is really planecopy_SC now, must be fix it 
+        //p.planecopy_sp = PFX(downShift_16_sse2);
+        p.planecopy_sp_shl = PFX(upShift_16_sse2);
+
+        ALL_CHROMA_420_PU(p2s, filterPixelToShort, sse2);
+        ALL_CHROMA_422_PU(p2s, filterPixelToShort, sse2);
+        ALL_CHROMA_444_PU(p2s, filterPixelToShort, sse2);
+        ALL_LUMA_PU(convert_p2s, filterPixelToShort, sse2);
+        ALL_LUMA_TU(count_nonzero, count_nonzero, sse2);
+    }
+    if (cpuMask & X265_CPU_SSE3)
+    {
+        ALL_CHROMA_420_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_422_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_444_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_420_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+        ALL_CHROMA_422_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+        ALL_CHROMA_444_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+    }
+    if (cpuMask & X265_CPU_SSSE3)
+    {
+        p.scale1D_128to64 = PFX(scale1D_128to64_ssse3);
+        p.scale2D_64to32 = PFX(scale2D_64to32_ssse3);
+
+        // p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_ssse3); this one is broken
+        ALL_LUMA_PU(satd, pixel_satd, ssse3);
+#if X265_DEPTH <= 10
+        ASSIGN_SA8D(ssse3);
+#endif
+        INTRA_ANG_SSSE3(ssse3);
+
+        p.dst4x4 = PFX(dst4_ssse3);
+        p.cu[BLOCK_8x8].idct = PFX(idct8_ssse3);
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_ssse3);
+
+        ALL_LUMA_PU(convert_p2s, filterPixelToShort, ssse3);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].p2s = PFX(filterPixelToShort_4x4_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].p2s = PFX(filterPixelToShort_4x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].p2s = PFX(filterPixelToShort_4x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].p2s = PFX(filterPixelToShort_8x4_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].p2s = PFX(filterPixelToShort_8x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].p2s = PFX(filterPixelToShort_8x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].p2s = PFX(filterPixelToShort_8x32_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].p2s = PFX(filterPixelToShort_16x4_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].p2s = PFX(filterPixelToShort_16x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].p2s = PFX(filterPixelToShort_16x12_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].p2s = PFX(filterPixelToShort_16x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].p2s = PFX(filterPixelToShort_16x32_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].p2s = PFX(filterPixelToShort_32x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].p2s = PFX(filterPixelToShort_32x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].p2s = PFX(filterPixelToShort_32x24_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].p2s = PFX(filterPixelToShort_32x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].p2s = PFX(filterPixelToShort_4x4_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].p2s = PFX(filterPixelToShort_4x8_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].p2s = PFX(filterPixelToShort_4x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].p2s = PFX(filterPixelToShort_4x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].p2s = PFX(filterPixelToShort_8x4_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].p2s = PFX(filterPixelToShort_8x8_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].p2s = PFX(filterPixelToShort_8x12_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].p2s = PFX(filterPixelToShort_8x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].p2s = PFX(filterPixelToShort_8x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].p2s = PFX(filterPixelToShort_8x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].p2s = PFX(filterPixelToShort_12x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].p2s = PFX(filterPixelToShort_16x8_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].p2s = PFX(filterPixelToShort_16x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].p2s = PFX(filterPixelToShort_16x24_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].p2s = PFX(filterPixelToShort_16x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].p2s = PFX(filterPixelToShort_16x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].p2s = PFX(filterPixelToShort_24x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].p2s = PFX(filterPixelToShort_32x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].p2s = PFX(filterPixelToShort_32x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].p2s = PFX(filterPixelToShort_32x48_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].p2s = PFX(filterPixelToShort_32x64_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].p2s = PFX(filterPixelToShort_4x2_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].p2s = PFX(filterPixelToShort_8x2_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].p2s = PFX(filterPixelToShort_8x6_ssse3);
+        p.findPosFirstLast = PFX(findPosFirstLast_ssse3);
+    }
+    if (cpuMask & X265_CPU_SSE4)
+    {
+        p.saoCuOrgE0 = PFX(saoCuOrgE0_sse4);
+        p.saoCuOrgE1 = PFX(saoCuOrgE1_sse4);
+        p.saoCuOrgE1_2Rows = PFX(saoCuOrgE1_2Rows_sse4);
+        p.saoCuOrgE2[0] = PFX(saoCuOrgE2_sse4);
+        p.saoCuOrgE2[1] = PFX(saoCuOrgE2_sse4);
+        p.saoCuOrgE3[0] = PFX(saoCuOrgE3_sse4);
+        p.saoCuOrgE3[1] = PFX(saoCuOrgE3_sse4);
+        p.saoCuOrgB0 = PFX(saoCuOrgB0_sse4);
+        p.sign = PFX(calSign_sse4);
+
+        LUMA_ADDAVG(sse4);
+        CHROMA_420_ADDAVG(sse4);
+        CHROMA_422_ADDAVG(sse4);
+
+        LUMA_FILTERS(sse4);
+        CHROMA_420_HORIZ_FILTERS(sse4);
+        CHROMA_420_VERT_FILTERS_SSE4(_sse4);
+        CHROMA_422_HORIZ_FILTERS(_sse4);
+        CHROMA_422_VERT_FILTERS_SSE4(_sse4);
+        CHROMA_444_HORIZ_FILTERS(sse4);
+
+        p.cu[BLOCK_8x8].dct = PFX(dct8_sse4);
+        p.quant = PFX(quant_sse4);
+        p.nquant = PFX(nquant_sse4);
+        p.dequant_normal = PFX(dequant_normal_sse4);
+        p.dequant_scaling = PFX(dequant_scaling_sse4);
+
+        // p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_sse4); fails tests
+        ALL_LUMA_PU(satd, pixel_satd, sse4);
+#if X265_DEPTH <= 10
+        ASSIGN_SA8D(sse4);
+#endif
+
+        p.cu[BLOCK_4x4].intra_filter = PFX(intra_filter_4x4_sse4);
+        p.cu[BLOCK_8x8].intra_filter = PFX(intra_filter_8x8_sse4);
+        p.cu[BLOCK_16x16].intra_filter = PFX(intra_filter_16x16_sse4);
+        p.cu[BLOCK_32x32].intra_filter = PFX(intra_filter_32x32_sse4);
+
+#if X265_DEPTH <= 10
+        ALL_LUMA_TU_S(intra_pred[PLANAR_IDX], intra_pred_planar, sse4);
+#endif
+        ALL_LUMA_TU_S(intra_pred[DC_IDX], intra_pred_dc, sse4);
+        INTRA_ANG_SSE4_COMMON(sse4);
+        INTRA_ANG_SSE4_HIGH(sse4);
+
+        p.planecopy_cp = PFX(upShift_8_sse4);
+        p.weight_pp = PFX(weight_pp_sse4);
+        p.weight_sp = PFX(weight_sp_sse4);
+
+        p.cu[BLOCK_4x4].psy_cost_pp = PFX(psyCost_pp_4x4_sse4);
+        p.cu[BLOCK_4x4].psy_cost_ss = PFX(psyCost_ss_4x4_sse4);
+
+        // TODO: check POPCNT flag!
+        ALL_LUMA_TU_S(copy_cnt, copy_cnt_, sse4);
+#if X265_DEPTH <= 10
+        ALL_LUMA_CU(psy_cost_pp, psyCost_pp, sse4);
+#endif
+        ALL_LUMA_CU(psy_cost_ss, psyCost_ss, sse4);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].p2s = PFX(filterPixelToShort_2x4_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].p2s = PFX(filterPixelToShort_2x8_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].p2s = PFX(filterPixelToShort_6x8_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].p2s = PFX(filterPixelToShort_2x8_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].p2s = PFX(filterPixelToShort_2x16_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].p2s = PFX(filterPixelToShort_6x16_sse4);
+    }
+    if (cpuMask & X265_CPU_AVX)
+    {
+        // p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_avx); fails tests
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].satd = PFX(pixel_satd_16x24_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].satd = PFX(pixel_satd_32x48_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].satd = PFX(pixel_satd_24x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].satd = PFX(pixel_satd_8x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].satd = PFX(pixel_satd_8x12_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].satd = PFX(pixel_satd_12x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].satd = PFX(pixel_satd_4x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd = PFX(pixel_satd_4x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd = PFX(pixel_satd_8x16_avx);
+        // p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd = PFX(pixel_satd_4x4_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd = PFX(pixel_satd_8x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd = PFX(pixel_satd_4x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd = PFX(pixel_satd_8x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd = PFX(pixel_satd_8x4_avx);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd = PFX(pixel_satd_8x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd = PFX(pixel_satd_8x4_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd = PFX(pixel_satd_8x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd = PFX(pixel_satd_8x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd = PFX(pixel_satd_12x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = PFX(pixel_satd_24x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd = PFX(pixel_satd_4x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd = PFX(pixel_satd_4x8_avx);
+
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
+
+        // copy_pp primitives
+        // 16 x N
+        p.pu[LUMA_64x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x64_avx);
+        p.pu[LUMA_16x4].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x4_avx);
+        p.pu[LUMA_16x8].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x8_avx);
+        p.pu[LUMA_16x12].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x12_avx);
+        p.pu[LUMA_16x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x16_avx);
+        p.pu[LUMA_16x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x32_avx);
+        p.pu[LUMA_16x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x64_avx);
+        p.pu[LUMA_64x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x16_avx);
+        p.pu[LUMA_64x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x32_avx);
+        p.pu[LUMA_64x48].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x48_avx);
+        p.pu[LUMA_64x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x64_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x4_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x12_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x24_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].copy_pp = (copy_pp_t)PFX(blockcopy_ss_16x8_avx);
+
+        // 24 X N
+        p.pu[LUMA_24x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_24x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_24x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_24x64_avx);
+
+        // 32 x N
+        p.pu[LUMA_32x8].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x8_avx);
+        p.pu[LUMA_32x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x16_avx);
+        p.pu[LUMA_32x24].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x24_avx);
+        p.pu[LUMA_32x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x32_avx);
+        p.pu[LUMA_32x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x64_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x24_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x48_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_32x64_avx);
+
+        // 48 X 64
+        p.pu[LUMA_48x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_48x64_avx);
+
+        // copy_ss primitives
+        // 16 X N
+        p.cu[BLOCK_16x16].copy_ss = PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_ss = PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_ss = PFX(blockcopy_ss_16x32_avx);
+
+        // 32 X N
+        p.cu[BLOCK_32x32].copy_ss = PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].copy_ss = PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_ss = PFX(blockcopy_ss_32x64_avx);
+
+        // 64 X N
+        p.cu[BLOCK_64x64].copy_ss = PFX(blockcopy_ss_64x64_avx);
+
+        // copy_ps primitives
+        // 16 X N
+        p.cu[BLOCK_16x16].copy_ps = (copy_ps_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_ps = (copy_ps_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_ps = (copy_ps_t)PFX(blockcopy_ss_16x32_avx);
+
+        // 32 X N
+        p.cu[BLOCK_32x32].copy_ps = (copy_ps_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].copy_ps = (copy_ps_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_ps = (copy_ps_t)PFX(blockcopy_ss_32x64_avx);
+
+        // 64 X N
+        p.cu[BLOCK_64x64].copy_ps = (copy_ps_t)PFX(blockcopy_ss_64x64_avx);
+
+        // copy_sp primitives
+        // 16 X N
+        p.cu[BLOCK_16x16].copy_sp = (copy_sp_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_sp = (copy_sp_t)PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_sp = (copy_sp_t)PFX(blockcopy_ss_16x32_avx);
+
+        // 32 X N
+        p.cu[BLOCK_32x32].copy_sp = (copy_sp_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].copy_sp = (copy_sp_t)PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = (copy_sp_t)PFX(blockcopy_ss_32x64_avx);
+
+        // 64 X N
+        p.cu[BLOCK_64x64].copy_sp = (copy_sp_t)PFX(blockcopy_ss_64x64_avx);
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_avx);
+
+        p.pu[LUMA_64x16].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x16_avx);
+        p.pu[LUMA_64x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x32_avx);
+        p.pu[LUMA_64x48].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x48_avx);
+        p.pu[LUMA_64x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x64_avx);
+
+        /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
+#if 0
+        ALL_LUMA_PU(satd, pixel_satd, avx);
+
+        p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
+        p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
+
+        LUMA_VAR(avx);
+
+        #if X265_DEPTH <= 10
+           ASSIGN_SA8D(avx);
+        #endif
+#endif
+    }
+    if (cpuMask & X265_CPU_XOP)
+    {
+        //p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_xop); this one is broken
+        ALL_LUMA_PU(satd, pixel_satd, xop);
+#if X265_DEPTH <= 10
+        ASSIGN_SA8D(xop);
+#endif
+        LUMA_VAR(xop);
+        p.frameInitLowres = PFX(frame_init_lowres_core_xop);
+    }
+    if (cpuMask & X265_CPU_AVX2)
+    {
+        p.cu[BLOCK_4x4].intra_filter = PFX(intra_filter_4x4_avx2);
+
+        // TODO: the planecopy_sp is really planecopy_SC now, must be fix it
+        //p.planecopy_sp = PFX(downShift_16_avx2);
+        p.planecopy_sp_shl = PFX(upShift_16_avx2);
+
+        p.saoCuOrgE0 = PFX(saoCuOrgE0_avx2);
+        p.saoCuOrgE1 = PFX(saoCuOrgE1_avx2);
+        p.saoCuOrgE1_2Rows = PFX(saoCuOrgE1_2Rows_avx2);
+        p.saoCuOrgE2[0] = PFX(saoCuOrgE2_avx2);
+        p.saoCuOrgE2[1] = PFX(saoCuOrgE2_32_avx2);
+        p.saoCuOrgE3[0] = PFX(saoCuOrgE3_avx2);
+        p.saoCuOrgE3[1] = PFX(saoCuOrgE3_32_avx2);
+        p.saoCuOrgB0 = PFX(saoCuOrgB0_avx2);
+
+        p.cu[BLOCK_16x16].intra_pred[2]     = PFX(intra_pred_ang16_2_avx2);
+        p.cu[BLOCK_16x16].intra_pred[3]     = PFX(intra_pred_ang16_3_avx2);
+        p.cu[BLOCK_16x16].intra_pred[4]     = PFX(intra_pred_ang16_4_avx2);
+        p.cu[BLOCK_16x16].intra_pred[5]     = PFX(intra_pred_ang16_5_avx2);
+        p.cu[BLOCK_16x16].intra_pred[6]     = PFX(intra_pred_ang16_6_avx2);
+        p.cu[BLOCK_16x16].intra_pred[7]     = PFX(intra_pred_ang16_7_avx2);
+        p.cu[BLOCK_16x16].intra_pred[8]     = PFX(intra_pred_ang16_8_avx2);
+        p.cu[BLOCK_16x16].intra_pred[9]     = PFX(intra_pred_ang16_9_avx2);
+        p.cu[BLOCK_16x16].intra_pred[10]    = PFX(intra_pred_ang16_10_avx2);
+        p.cu[BLOCK_16x16].intra_pred[11]    = PFX(intra_pred_ang16_11_avx2);
+        p.cu[BLOCK_16x16].intra_pred[12]    = PFX(intra_pred_ang16_12_avx2);
+        p.cu[BLOCK_16x16].intra_pred[13]    = PFX(intra_pred_ang16_13_avx2);
+        p.cu[BLOCK_16x16].intra_pred[14]    = PFX(intra_pred_ang16_14_avx2);
+        p.cu[BLOCK_16x16].intra_pred[15]    = PFX(intra_pred_ang16_15_avx2);
+        p.cu[BLOCK_16x16].intra_pred[16]    = PFX(intra_pred_ang16_16_avx2);
+        p.cu[BLOCK_16x16].intra_pred[17]    = PFX(intra_pred_ang16_17_avx2);
+        p.cu[BLOCK_16x16].intra_pred[18]    = PFX(intra_pred_ang16_18_avx2);
+        p.cu[BLOCK_16x16].intra_pred[19]    = PFX(intra_pred_ang16_19_avx2);
+        p.cu[BLOCK_16x16].intra_pred[20]    = PFX(intra_pred_ang16_20_avx2);
+        p.cu[BLOCK_16x16].intra_pred[21]    = PFX(intra_pred_ang16_21_avx2);
+        p.cu[BLOCK_16x16].intra_pred[22]    = PFX(intra_pred_ang16_22_avx2);
+        p.cu[BLOCK_16x16].intra_pred[23]    = PFX(intra_pred_ang16_23_avx2);
+        p.cu[BLOCK_16x16].intra_pred[24]    = PFX(intra_pred_ang16_24_avx2);
+        p.cu[BLOCK_16x16].intra_pred[25]    = PFX(intra_pred_ang16_25_avx2);
+        p.cu[BLOCK_16x16].intra_pred[26]    = PFX(intra_pred_ang16_26_avx2);
+        p.cu[BLOCK_16x16].intra_pred[27]    = PFX(intra_pred_ang16_27_avx2);
+        p.cu[BLOCK_16x16].intra_pred[28]    = PFX(intra_pred_ang16_28_avx2);
+        p.cu[BLOCK_16x16].intra_pred[29]    = PFX(intra_pred_ang16_29_avx2);
+        p.cu[BLOCK_16x16].intra_pred[30]    = PFX(intra_pred_ang16_30_avx2);
+        p.cu[BLOCK_16x16].intra_pred[31]    = PFX(intra_pred_ang16_31_avx2);
+        p.cu[BLOCK_16x16].intra_pred[32]    = PFX(intra_pred_ang16_32_avx2);
+        p.cu[BLOCK_16x16].intra_pred[33]    = PFX(intra_pred_ang16_33_avx2);
+        p.cu[BLOCK_16x16].intra_pred[34]    = PFX(intra_pred_ang16_2_avx2);
+
+        p.cu[BLOCK_32x32].intra_pred[2]     = PFX(intra_pred_ang32_2_avx2);
+        p.cu[BLOCK_32x32].intra_pred[3]     = PFX(intra_pred_ang32_3_avx2);
+        p.cu[BLOCK_32x32].intra_pred[4]     = PFX(intra_pred_ang32_4_avx2);
+        p.cu[BLOCK_32x32].intra_pred[5]     = PFX(intra_pred_ang32_5_avx2);
+        p.cu[BLOCK_32x32].intra_pred[6]     = PFX(intra_pred_ang32_6_avx2);
+        p.cu[BLOCK_32x32].intra_pred[7]     = PFX(intra_pred_ang32_7_avx2);
+        p.cu[BLOCK_32x32].intra_pred[8]     = PFX(intra_pred_ang32_8_avx2);
+        p.cu[BLOCK_32x32].intra_pred[9]     = PFX(intra_pred_ang32_9_avx2);
+        p.cu[BLOCK_32x32].intra_pred[10]    = PFX(intra_pred_ang32_10_avx2);
+        p.cu[BLOCK_32x32].intra_pred[11]    = PFX(intra_pred_ang32_11_avx2);
+        p.cu[BLOCK_32x32].intra_pred[12]    = PFX(intra_pred_ang32_12_avx2);
+        p.cu[BLOCK_32x32].intra_pred[13]    = PFX(intra_pred_ang32_13_avx2);
+        p.cu[BLOCK_32x32].intra_pred[14]    = PFX(intra_pred_ang32_14_avx2);
+        p.cu[BLOCK_32x32].intra_pred[15]    = PFX(intra_pred_ang32_15_avx2);
+        p.cu[BLOCK_32x32].intra_pred[16]    = PFX(intra_pred_ang32_16_avx2);
+        p.cu[BLOCK_32x32].intra_pred[17]    = PFX(intra_pred_ang32_17_avx2);
+        p.cu[BLOCK_32x32].intra_pred[18]    = PFX(intra_pred_ang32_18_avx2);
+        p.cu[BLOCK_32x32].intra_pred[19]    = PFX(intra_pred_ang32_19_avx2);
+        p.cu[BLOCK_32x32].intra_pred[20]    = PFX(intra_pred_ang32_20_avx2);
+        p.cu[BLOCK_32x32].intra_pred[21]    = PFX(intra_pred_ang32_21_avx2);
+        p.cu[BLOCK_32x32].intra_pred[22]    = PFX(intra_pred_ang32_22_avx2);
+        p.cu[BLOCK_32x32].intra_pred[23]    = PFX(intra_pred_ang32_23_avx2);
+        p.cu[BLOCK_32x32].intra_pred[24]    = PFX(intra_pred_ang32_24_avx2);
+        p.cu[BLOCK_32x32].intra_pred[25]    = PFX(intra_pred_ang32_25_avx2);
+        p.cu[BLOCK_32x32].intra_pred[26]    = PFX(intra_pred_ang32_26_avx2);
+        p.cu[BLOCK_32x32].intra_pred[27]    = PFX(intra_pred_ang32_27_avx2);
+        p.cu[BLOCK_32x32].intra_pred[28]    = PFX(intra_pred_ang32_28_avx2);
+        p.cu[BLOCK_32x32].intra_pred[29]    = PFX(intra_pred_ang32_29_avx2);
+        p.cu[BLOCK_32x32].intra_pred[30]    = PFX(intra_pred_ang32_30_avx2);
+        p.cu[BLOCK_32x32].intra_pred[31]    = PFX(intra_pred_ang32_31_avx2);
+        p.cu[BLOCK_32x32].intra_pred[32]    = PFX(intra_pred_ang32_32_avx2);
+        p.cu[BLOCK_32x32].intra_pred[33]    = PFX(intra_pred_ang32_33_avx2);
+        p.cu[BLOCK_32x32].intra_pred[34]    = PFX(intra_pred_ang32_2_avx2);
+
+        p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_12x16_avx2);
+        p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
+        p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
+        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
+        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
+        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
+        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
+        p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_24x32_avx2);
+        p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_32x8_avx2);
+        p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_32x16_avx2);
+        p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_32x24_avx2);
+        p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_32x32_avx2);
+        p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_32x64_avx2);
+        p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_64x16_avx2);
+        p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_64x32_avx2);
+        p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_64x48_avx2);
+        p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_64x64_avx2);
+        p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_48x64_avx2);
+
+        p.pu[LUMA_8x16].addAvg  = PFX(addAvg_8x16_avx2);
+        p.pu[LUMA_8x32].addAvg  = PFX(addAvg_8x32_avx2);
+        p.pu[LUMA_16x4].addAvg  = PFX(addAvg_16x4_avx2);
+        p.pu[LUMA_16x8].addAvg  = PFX(addAvg_16x8_avx2);
+        p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_avx2);
+        p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_avx2);
+        p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_avx2);
+        p.pu[LUMA_32x8].addAvg  = PFX(addAvg_32x8_avx2);
+        p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_avx2);
+        p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_avx2);
+        p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_avx2);
+        p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_avx2);
+        p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_avx2);
+        p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_avx2);
+        p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_avx2);
+        p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg   = PFX(addAvg_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg   = PFX(addAvg_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg   = PFX(addAvg_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg  = PFX(addAvg_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg  = PFX(addAvg_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg  = PFX(addAvg_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg  = PFX(addAvg_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg  = PFX(addAvg_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_avx2);
+
+        p.cu[BLOCK_4x4].psy_cost_ss = PFX(psyCost_ss_4x4_avx2);
+        p.cu[BLOCK_8x8].psy_cost_ss = PFX(psyCost_ss_8x8_avx2);
+        p.cu[BLOCK_16x16].psy_cost_ss = PFX(psyCost_ss_16x16_avx2);
+        p.cu[BLOCK_32x32].psy_cost_ss = PFX(psyCost_ss_32x32_avx2);
+        p.cu[BLOCK_64x64].psy_cost_ss = PFX(psyCost_ss_64x64_avx2);
+        p.cu[BLOCK_4x4].psy_cost_pp = PFX(psyCost_pp_4x4_avx2);
+#if X265_DEPTH <= 10
+        p.cu[BLOCK_8x8].psy_cost_pp = PFX(psyCost_pp_8x8_avx2);
+        p.cu[BLOCK_16x16].psy_cost_pp = PFX(psyCost_pp_16x16_avx2);
+        p.cu[BLOCK_32x32].psy_cost_pp = PFX(psyCost_pp_32x32_avx2);
+        p.cu[BLOCK_64x64].psy_cost_pp = PFX(psyCost_pp_64x64_avx2);
+        p.cu[BLOCK_16x16].intra_pred[PLANAR_IDX] = PFX(intra_pred_planar16_avx2);
+        p.cu[BLOCK_32x32].intra_pred[PLANAR_IDX] = PFX(intra_pred_planar32_avx2);
+#endif
+
+        p.cu[BLOCK_16x16].intra_pred[DC_IDX] = PFX(intra_pred_dc16_avx2);
+        p.cu[BLOCK_32x32].intra_pred[DC_IDX] = PFX(intra_pred_dc32_avx2);
+
+        p.pu[LUMA_48x64].satd = PFX(pixel_satd_48x64_avx2);
+
+        p.pu[LUMA_64x16].satd = PFX(pixel_satd_64x16_avx2);
+        p.pu[LUMA_64x32].satd = PFX(pixel_satd_64x32_avx2);
+        p.pu[LUMA_64x48].satd = PFX(pixel_satd_64x48_avx2);
+        p.pu[LUMA_64x64].satd = PFX(pixel_satd_64x64_avx2);
+
+        p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx2);
+        p.pu[LUMA_32x16].satd = PFX(pixel_satd_32x16_avx2);
+        p.pu[LUMA_32x24].satd = PFX(pixel_satd_32x24_avx2);
+        p.pu[LUMA_32x32].satd = PFX(pixel_satd_32x32_avx2);
+        p.pu[LUMA_32x64].satd = PFX(pixel_satd_32x64_avx2);
+
+        p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx2);
+        p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx2);
+        p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx2);
+        p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx2);
+        p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx2);
+        p.pu[LUMA_16x64].satd = PFX(pixel_satd_16x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = PFX(pixel_satd_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd = PFX(pixel_satd_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = PFX(pixel_satd_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = PFX(pixel_satd_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd = PFX(pixel_satd_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = PFX(pixel_satd_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = PFX(pixel_satd_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = PFX(pixel_satd_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd = PFX(pixel_satd_32x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].satd = PFX(pixel_satd_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].satd = PFX(pixel_satd_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = PFX(pixel_satd_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = PFX(pixel_satd_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = PFX(pixel_satd_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd = PFX(pixel_satd_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = PFX(pixel_satd_32x16_avx2);
+
+        p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16_avx2);
+        p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32_avx2);
+
+#if X265_DEPTH <= 10
+        p.cu[BLOCK_16x16].sse_ss = PFX(pixel_ssd_ss_16x16_avx2);
+        p.cu[BLOCK_32x32].sse_ss = PFX(pixel_ssd_ss_32x32_avx2);
+        p.cu[BLOCK_64x64].sse_ss = PFX(pixel_ssd_ss_64x64_avx2);
+
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
+        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
+        p.cu[BLOCK_64x64].sse_pp = PFX(pixel_ssd_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_16x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_32x64_avx2);
+#endif
+
+        p.quant = PFX(quant_avx2);
+        p.nquant = PFX(nquant_avx2);
+        p.dequant_normal  = PFX(dequant_normal_avx2);
+        p.dequant_scaling = PFX(dequant_scaling_avx2);
+        p.dst4x4 = PFX(dst4_avx2);
+        p.idst4x4 = PFX(idst4_avx2);
+        p.denoiseDct = PFX(denoise_dct_avx2);
+
+        p.scale1D_128to64 = PFX(scale1D_128to64_avx2);
+        p.scale2D_64to32 = PFX(scale2D_64to32_avx2);
+
+        p.weight_pp = PFX(weight_pp_avx2);
+        p.weight_sp = PFX(weight_sp_avx2);
+        p.sign = PFX(calSign_avx2);
+        p.planecopy_cp = PFX(upShift_8_avx2);
+
+        p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_avx2);
+        p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_avx2);
+
+        p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_avx2);
+        p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_avx2);
+
+        p.cu[BLOCK_8x8].count_nonzero = PFX(count_nonzero_8x8_avx2);
+        p.cu[BLOCK_16x16].count_nonzero = PFX(count_nonzero_16x16_avx2);
+        p.cu[BLOCK_32x32].count_nonzero = PFX(count_nonzero_32x32_avx2);
+
+        p.cu[BLOCK_16x16].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_32_avx2);
+        
+        p.cu[BLOCK_8x8].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_32_avx2);
+
+        p.cu[BLOCK_16x16].copy_cnt = PFX(copy_cnt_16_avx2);
+        p.cu[BLOCK_32x32].copy_cnt = PFX(copy_cnt_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_8_avx2);
+        p.cu[BLOCK_16x16].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32_avx2);
+
+#if X265_DEPTH <= 10
+        ALL_LUMA_TU_S(dct, dct, avx2);
+        ALL_LUMA_TU_S(idct, idct, avx2);
+#endif
+        ALL_LUMA_CU_S(transpose, transpose, avx2);
+
+        ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, avx2);
+        ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, avx2);
+#if X265_DEPTH <= 10
+        ALL_LUMA_PU(luma_vsp, interp_8tap_vert_sp, avx2);
+#endif
+        ALL_LUMA_PU(luma_vss, interp_8tap_vert_ss, avx2);
+#if X265_DEPTH <= 10
+        p.pu[LUMA_4x4].luma_vsp = PFX(interp_8tap_vert_sp_4x4_avx2);               // since ALL_LUMA_PU didn't declare 4x4 size, calling separately luma_vsp function to use 
+#endif
+
+        p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_avx2);
+        p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_avx2);
+        p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_avx2);
+
+        p.cu[BLOCK_16x16].sub_ps = PFX(pixel_sub_ps_16x16_avx2);
+        p.cu[BLOCK_32x32].sub_ps = PFX(pixel_sub_ps_32x32_avx2);
+        p.cu[BLOCK_64x64].sub_ps = PFX(pixel_sub_ps_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sub_ps = PFX(pixel_sub_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sub_ps = PFX(pixel_sub_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sub_ps = PFX(pixel_sub_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_avx2);
+
+        p.pu[LUMA_16x4].sad = PFX(pixel_sad_16x4_avx2);
+        p.pu[LUMA_16x8].sad = PFX(pixel_sad_16x8_avx2);
+        p.pu[LUMA_16x12].sad = PFX(pixel_sad_16x12_avx2);
+        p.pu[LUMA_16x16].sad = PFX(pixel_sad_16x16_avx2);
+        p.pu[LUMA_16x32].sad = PFX(pixel_sad_16x32_avx2);
+#if X265_DEPTH <= 10
+        p.pu[LUMA_16x64].sad = PFX(pixel_sad_16x64_avx2);
+        p.pu[LUMA_32x8].sad = PFX(pixel_sad_32x8_avx2);
+        p.pu[LUMA_32x16].sad = PFX(pixel_sad_32x16_avx2);
+        p.pu[LUMA_32x24].sad = PFX(pixel_sad_32x24_avx2);
+        p.pu[LUMA_32x32].sad = PFX(pixel_sad_32x32_avx2);
+        p.pu[LUMA_32x64].sad = PFX(pixel_sad_32x64_avx2);
+        p.pu[LUMA_48x64].sad = PFX(pixel_sad_48x64_avx2);
+        p.pu[LUMA_64x16].sad = PFX(pixel_sad_64x16_avx2);
+        p.pu[LUMA_64x32].sad = PFX(pixel_sad_64x32_avx2);
+        p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx2);
+        p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx2);
+#endif
+
+        p.pu[LUMA_16x4].sad_x3 = PFX(pixel_sad_x3_16x4_avx2);
+        p.pu[LUMA_16x8].sad_x3 = PFX(pixel_sad_x3_16x8_avx2);
+        p.pu[LUMA_16x12].sad_x3 = PFX(pixel_sad_x3_16x12_avx2);
+        p.pu[LUMA_16x16].sad_x3 = PFX(pixel_sad_x3_16x16_avx2);
+        p.pu[LUMA_16x32].sad_x3 = PFX(pixel_sad_x3_16x32_avx2);
+        p.pu[LUMA_16x64].sad_x3 = PFX(pixel_sad_x3_16x64_avx2);
+        p.pu[LUMA_32x8].sad_x3 = PFX(pixel_sad_x3_32x8_avx2);
+        p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_avx2);
+        p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx2);
+        p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx2);
+        p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx2);
+        p.pu[LUMA_48x64].sad_x3 = PFX(pixel_sad_x3_48x64_avx2);
+        p.pu[LUMA_64x16].sad_x3 = PFX(pixel_sad_x3_64x16_avx2);
+        p.pu[LUMA_64x32].sad_x3 = PFX(pixel_sad_x3_64x32_avx2);
+        p.pu[LUMA_64x48].sad_x3 = PFX(pixel_sad_x3_64x48_avx2);
+        p.pu[LUMA_64x64].sad_x3 = PFX(pixel_sad_x3_64x64_avx2);
+
+        p.pu[LUMA_16x4].sad_x4 = PFX(pixel_sad_x4_16x4_avx2);
+        p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
+        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
+        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
+        p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_avx2);
+        p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
+        p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx2);
+        p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx2);
+        p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx2);
+        p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_avx2);
+        p.pu[LUMA_64x64].sad_x4 = PFX(pixel_sad_x4_64x64_avx2);
+
+        p.pu[LUMA_16x4].convert_p2s = PFX(filterPixelToShort_16x4_avx2);
+        p.pu[LUMA_16x8].convert_p2s = PFX(filterPixelToShort_16x8_avx2);
+        p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_avx2);
+        p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_avx2);
+        p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_avx2);
+        p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_avx2);
+        p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_avx2);
+        p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_avx2);
+        p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_avx2);
+        p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_avx2);
+        p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_avx2);
+        p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_avx2);
+        p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_avx2);
+        p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_avx2);
+        p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].p2s = PFX(filterPixelToShort_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].p2s = PFX(filterPixelToShort_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].p2s = PFX(filterPixelToShort_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].p2s = PFX(filterPixelToShort_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].p2s = PFX(filterPixelToShort_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].p2s = PFX(filterPixelToShort_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].p2s = PFX(filterPixelToShort_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].p2s = PFX(filterPixelToShort_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].p2s = PFX(filterPixelToShort_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].p2s = PFX(filterPixelToShort_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].p2s = PFX(filterPixelToShort_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].p2s = PFX(filterPixelToShort_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].p2s = PFX(filterPixelToShort_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].p2s = PFX(filterPixelToShort_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].p2s = PFX(filterPixelToShort_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].p2s = PFX(filterPixelToShort_32x64_avx2);
+
+#if X265_DEPTH <= 10
+        p.pu[LUMA_4x4].luma_hps = PFX(interp_8tap_horiz_ps_4x4_avx2);
+        p.pu[LUMA_4x8].luma_hps = PFX(interp_8tap_horiz_ps_4x8_avx2);
+        p.pu[LUMA_4x16].luma_hps = PFX(interp_8tap_horiz_ps_4x16_avx2);
+        p.pu[LUMA_8x8].luma_hps = PFX(interp_8tap_horiz_ps_8x8_avx2);
+        p.pu[LUMA_8x4].luma_hps = PFX(interp_8tap_horiz_ps_8x4_avx2);
+        p.pu[LUMA_8x16].luma_hps = PFX(interp_8tap_horiz_ps_8x16_avx2);
+        p.pu[LUMA_8x32].luma_hps = PFX(interp_8tap_horiz_ps_8x32_avx2);
+        p.pu[LUMA_16x4].luma_hps = PFX(interp_8tap_horiz_ps_16x4_avx2);
+        p.pu[LUMA_16x8].luma_hps = PFX(interp_8tap_horiz_ps_16x8_avx2);
+        p.pu[LUMA_16x12].luma_hps = PFX(interp_8tap_horiz_ps_16x12_avx2);
+        p.pu[LUMA_16x16].luma_hps = PFX(interp_8tap_horiz_ps_16x16_avx2);
+        p.pu[LUMA_16x32].luma_hps = PFX(interp_8tap_horiz_ps_16x32_avx2);
+        p.pu[LUMA_16x64].luma_hps = PFX(interp_8tap_horiz_ps_16x64_avx2);
+        p.pu[LUMA_32x8].luma_hps = PFX(interp_8tap_horiz_ps_32x8_avx2);
+        p.pu[LUMA_32x16].luma_hps = PFX(interp_8tap_horiz_ps_32x16_avx2);
+        p.pu[LUMA_32x32].luma_hps = PFX(interp_8tap_horiz_ps_32x32_avx2);
+        p.pu[LUMA_32x24].luma_hps = PFX(interp_8tap_horiz_ps_32x24_avx2);
+        p.pu[LUMA_32x64].luma_hps = PFX(interp_8tap_horiz_ps_32x64_avx2);
+        p.pu[LUMA_64x64].luma_hps = PFX(interp_8tap_horiz_ps_64x64_avx2);
+        p.pu[LUMA_64x16].luma_hps = PFX(interp_8tap_horiz_ps_64x16_avx2);
+        p.pu[LUMA_64x32].luma_hps = PFX(interp_8tap_horiz_ps_64x32_avx2);
+        p.pu[LUMA_64x48].luma_hps = PFX(interp_8tap_horiz_ps_64x48_avx2);
+        p.pu[LUMA_48x64].luma_hps = PFX(interp_8tap_horiz_ps_48x64_avx2);
+        p.pu[LUMA_24x32].luma_hps = PFX(interp_8tap_horiz_ps_24x32_avx2);
+        p.pu[LUMA_12x16].luma_hps = PFX(interp_8tap_horiz_ps_12x16_avx2);
+#endif
+
+        p.pu[LUMA_4x4].luma_hpp = PFX(interp_8tap_horiz_pp_4x4_avx2);
+        p.pu[LUMA_4x8].luma_hpp = PFX(interp_8tap_horiz_pp_4x8_avx2);
+        p.pu[LUMA_4x16].luma_hpp = PFX(interp_8tap_horiz_pp_4x16_avx2);
+        p.pu[LUMA_8x4].luma_hpp = PFX(interp_8tap_horiz_pp_8x4_avx2);
+        p.pu[LUMA_8x8].luma_hpp = PFX(interp_8tap_horiz_pp_8x8_avx2);
+        p.pu[LUMA_8x16].luma_hpp = PFX(interp_8tap_horiz_pp_8x16_avx2);
+        p.pu[LUMA_8x32].luma_hpp = PFX(interp_8tap_horiz_pp_8x32_avx2);
+        p.pu[LUMA_16x4].luma_hpp = PFX(interp_8tap_horiz_pp_16x4_avx2);
+        p.pu[LUMA_16x8].luma_hpp = PFX(interp_8tap_horiz_pp_16x8_avx2);
+        p.pu[LUMA_16x12].luma_hpp = PFX(interp_8tap_horiz_pp_16x12_avx2);
+        p.pu[LUMA_16x16].luma_hpp = PFX(interp_8tap_horiz_pp_16x16_avx2);
+        p.pu[LUMA_16x32].luma_hpp = PFX(interp_8tap_horiz_pp_16x32_avx2);
+        p.pu[LUMA_16x64].luma_hpp = PFX(interp_8tap_horiz_pp_16x64_avx2);
+        p.pu[LUMA_32x8].luma_hpp = PFX(interp_8tap_horiz_pp_32x8_avx2);
+        p.pu[LUMA_32x16].luma_hpp = PFX(interp_8tap_horiz_pp_32x16_avx2);
+        p.pu[LUMA_32x24].luma_hpp = PFX(interp_8tap_horiz_pp_32x24_avx2);
+        p.pu[LUMA_32x32].luma_hpp = PFX(interp_8tap_horiz_pp_32x32_avx2);
+        p.pu[LUMA_32x64].luma_hpp = PFX(interp_8tap_horiz_pp_32x64_avx2);
+        p.pu[LUMA_64x16].luma_hpp = PFX(interp_8tap_horiz_pp_64x16_avx2);
+        p.pu[LUMA_64x32].luma_hpp = PFX(interp_8tap_horiz_pp_64x32_avx2);
+        p.pu[LUMA_64x48].luma_hpp = PFX(interp_8tap_horiz_pp_64x48_avx2);
+        p.pu[LUMA_64x64].luma_hpp = PFX(interp_8tap_horiz_pp_64x64_avx2);
+        p.pu[LUMA_12x16].luma_hpp = PFX(interp_8tap_horiz_pp_12x16_avx2);
+        p.pu[LUMA_24x32].luma_hpp = PFX(interp_8tap_horiz_pp_24x32_avx2);
+        p.pu[LUMA_48x64].luma_hpp = PFX(interp_8tap_horiz_pp_48x64_avx2);
+
+#if X265_DEPTH <= 10
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_hps = PFX(interp_4tap_horiz_ps_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_hps = PFX(interp_4tap_horiz_ps_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_hps = PFX(interp_4tap_horiz_ps_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_hps = PFX(interp_4tap_horiz_ps_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_hps = PFX(interp_4tap_horiz_ps_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_hps = PFX(interp_4tap_horiz_ps_32x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_hps = PFX(interp_4tap_horiz_ps_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_hps = PFX(interp_4tap_horiz_ps_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_hps = PFX(interp_4tap_horiz_ps_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_hps = PFX(interp_4tap_horiz_ps_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_hps = PFX(interp_4tap_horiz_ps_24x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_hps = PFX(interp_4tap_horiz_ps_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_hps = PFX(interp_4tap_horiz_ps_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_hps = PFX(interp_4tap_horiz_ps_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_hps = PFX(interp_4tap_horiz_ps_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_hps = PFX(interp_4tap_horiz_ps_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_hps = PFX(interp_4tap_horiz_ps_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_hps = PFX(interp_4tap_horiz_ps_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_hps = PFX(interp_4tap_horiz_ps_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_hps = PFX(interp_4tap_horiz_ps_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_hps = PFX(interp_4tap_horiz_ps_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hps = PFX(interp_4tap_horiz_ps_48x64_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_hpp = PFX(interp_4tap_horiz_pp_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_hpp = PFX(interp_4tap_horiz_pp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_hpp = PFX(interp_4tap_horiz_pp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_hpp = PFX(interp_4tap_horiz_pp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_hpp = PFX(interp_4tap_horiz_pp_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_hpp = PFX(interp_4tap_horiz_pp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_hpp = PFX(interp_4tap_horiz_pp_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_hpp = PFX(interp_4tap_horiz_pp_24x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_hpp = PFX(interp_4tap_horiz_pp_6x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_hpp = PFX(interp_4tap_horiz_pp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_hpp = PFX(interp_4tap_horiz_pp_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_hpp = PFX(interp_4tap_horiz_pp_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_hpp = PFX(interp_4tap_horiz_pp_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_hpp = PFX(interp_4tap_horiz_pp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_hpp = PFX(interp_4tap_horiz_pp_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_hpp = PFX(interp_4tap_horiz_pp_24x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_hpp = PFX(interp_4tap_horiz_pp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_hpp = PFX(interp_4tap_horiz_pp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_hpp = PFX(interp_4tap_horiz_pp_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_hpp = PFX(interp_4tap_horiz_pp_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_hpp = PFX(interp_4tap_horiz_pp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_hpp = PFX(interp_4tap_horiz_pp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_hpp = PFX(interp_4tap_horiz_pp_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_hpp = PFX(interp_4tap_horiz_pp_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_hpp = PFX(interp_4tap_horiz_pp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_hpp = PFX(interp_4tap_horiz_pp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_hpp = PFX(interp_4tap_horiz_pp_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hpp = PFX(interp_4tap_horiz_pp_48x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vpp = PFX(interp_4tap_vert_pp_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vsp = PFX(interp_4tap_vert_sp_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vss = PFX(interp_4tap_vert_ss_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vpp = PFX(interp_4tap_vert_pp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vps = PFX(interp_4tap_vert_ps_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vsp = PFX(interp_4tap_vert_sp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vss = PFX(interp_4tap_vert_ss_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vpp = PFX(interp_4tap_vert_pp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vps = PFX(interp_4tap_vert_ps_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vsp = PFX(interp_4tap_vert_sp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vss = PFX(interp_4tap_vert_ss_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vpp = PFX(interp_4tap_vert_pp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vps = PFX(interp_4tap_vert_ps_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vsp = PFX(interp_4tap_vert_sp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vss = PFX(interp_4tap_vert_ss_8x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vpp = PFX(interp_4tap_vert_pp_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vps = PFX(interp_4tap_vert_ps_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vsp = PFX(interp_4tap_vert_sp_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vss = PFX(interp_4tap_vert_ss_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vpp = PFX(interp_4tap_vert_pp_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vps = PFX(interp_4tap_vert_ps_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vsp = PFX(interp_4tap_vert_sp_8x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vss = PFX(interp_4tap_vert_ss_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vsp = PFX(interp_4tap_vert_sp_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps = PFX(interp_4tap_vert_ps_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vpp = PFX(interp_4tap_vert_pp_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vps = PFX(interp_4tap_vert_ps_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vss = PFX(interp_4tap_vert_ss_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vss = PFX(interp_4tap_vert_ss_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vss = PFX(interp_4tap_vert_ss_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vsp = PFX(interp_4tap_vert_sp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vsp = PFX(interp_4tap_vert_sp_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vpp = PFX(interp_4tap_vert_pp_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vpp = PFX(interp_4tap_vert_pp_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vps = PFX(interp_4tap_vert_ps_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vps = PFX(interp_4tap_vert_ps_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vss = PFX(interp_4tap_vert_ss_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vss = PFX(interp_4tap_vert_ss_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vsp = PFX(interp_4tap_vert_sp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vsp = PFX(interp_4tap_vert_sp_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vpp = PFX(interp_4tap_vert_pp_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vps = PFX(interp_4tap_vert_ps_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vss = PFX(interp_4tap_vert_ss_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vsp = PFX(interp_4tap_vert_sp_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp = PFX(interp_4tap_vert_pp_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vps = PFX(interp_4tap_vert_ps_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vps = PFX(interp_4tap_vert_ps_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vss = PFX(interp_4tap_vert_ss_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vss = PFX(interp_4tap_vert_ss_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vss = PFX(interp_4tap_vert_ss_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vsp = PFX(interp_4tap_vert_sp_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vsp = PFX(interp_4tap_vert_sp_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vpp = PFX(interp_4tap_vert_pp_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vps = PFX(interp_4tap_vert_ps_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vss = PFX(interp_4tap_vert_ss_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vsp = PFX(interp_4tap_vert_sp_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vpp = PFX(interp_4tap_vert_pp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vpp = PFX(interp_4tap_vert_pp_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vps = PFX(interp_4tap_vert_ps_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vps = PFX(interp_4tap_vert_ps_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vss = PFX(interp_4tap_vert_ss_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vss = PFX(interp_4tap_vert_ss_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vsp = PFX(interp_4tap_vert_sp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vsp = PFX(interp_4tap_vert_sp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vps = PFX(interp_4tap_vert_ps_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vps = PFX(interp_4tap_vert_ps_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vss = PFX(interp_4tap_vert_ss_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vss = PFX(interp_4tap_vert_ss_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vss = PFX(interp_4tap_vert_ss_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vss = PFX(interp_4tap_vert_ss_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vsp = PFX(interp_4tap_vert_sp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vsp = PFX(interp_4tap_vert_sp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vsp = PFX(interp_4tap_vert_sp_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vpp = PFX(interp_4tap_vert_pp_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = PFX(interp_4tap_vert_pp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vpp = PFX(interp_4tap_vert_pp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vps = PFX(interp_4tap_vert_ps_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vps = PFX(interp_4tap_vert_ps_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vps = PFX(interp_4tap_vert_ps_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vss = PFX(interp_4tap_vert_ss_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vss = PFX(interp_4tap_vert_ss_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vss = PFX(interp_4tap_vert_ss_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vsp = PFX(interp_4tap_vert_sp_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vsp = PFX(interp_4tap_vert_sp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vsp = PFX(interp_4tap_vert_sp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vpp = PFX(interp_4tap_vert_pp_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vps = PFX(interp_4tap_vert_ps_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vss = PFX(interp_4tap_vert_ss_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vsp = PFX(interp_4tap_vert_sp_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vpp = PFX(interp_4tap_vert_pp_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = PFX(interp_4tap_vert_pp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = PFX(interp_4tap_vert_pp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = PFX(interp_4tap_vert_pp_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vps = PFX(interp_4tap_vert_ps_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vps = PFX(interp_4tap_vert_ps_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vps = PFX(interp_4tap_vert_ps_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vps = PFX(interp_4tap_vert_ps_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vss = PFX(interp_4tap_vert_ss_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vss = PFX(interp_4tap_vert_ss_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vss = PFX(interp_4tap_vert_ss_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vss = PFX(interp_4tap_vert_ss_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vsp = PFX(interp_4tap_vert_sp_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vsp = PFX(interp_4tap_vert_sp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vsp = PFX(interp_4tap_vert_sp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vsp = PFX(interp_4tap_vert_sp_64x64_avx2);
+
+        /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
+#if 0
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vss = PFX(interp_4tap_vert_ss_6x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vps = PFX(interp_4tap_vert_ps_6x16_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_hpp = PFX(interp_4tap_horiz_pp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_hpp = PFX(interp_4tap_horiz_pp_12x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_hpp = PFX(interp_4tap_horiz_pp_12x32_avx2);
+
+        p.cu[BLOCK_4x4].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_4_avx2);
+        p.cu[BLOCK_8x8].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_8_avx2);
+        p.cu[BLOCK_4x4].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_4_avx2);
+        p.cu[BLOCK_4x4].count_nonzero = PFX(count_nonzero_4x4_avx2);
+        p.cu[BLOCK_8x8].copy_cnt = PFX(copy_cnt_8_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_hps = PFX(interp_4tap_horiz_ps_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_hps = PFX(interp_4tap_horiz_ps_12x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_hps = PFX(interp_4tap_horiz_ps_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_hps = PFX(interp_4tap_horiz_ps_6x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_hps = PFX(interp_4tap_horiz_ps_6x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_hps = PFX(interp_4tap_horiz_ps_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_hps = PFX(interp_4tap_horiz_ps_24x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_hps = PFX(interp_4tap_horiz_ps_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_hps = PFX(interp_4tap_horiz_ps_8x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vss = PFX(interp_4tap_vert_ss_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vsp = PFX(interp_4tap_vert_sp_6x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vpp = PFX(interp_4tap_vert_pp_6x16_avx2);
+
+        p.pu[LUMA_8x4].addAvg   = PFX(addAvg_8x4_avx2);
+        p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_avx2);
+        p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_avx2);
+#endif
+#endif
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_avx2);
+
+#if X265_DEPTH <= 10
+        // TODO: depends on hps and vsp
+        ALL_LUMA_PU_T(luma_hvpp, interp_8tap_hv_pp_cpu);                        // calling luma_hvpp for all sizes
+        p.pu[LUMA_4x4].luma_hvpp = interp_8tap_hv_pp_cpu<LUMA_4x4>;             // ALL_LUMA_PU_T has declared all sizes except 4x4, hence calling luma_hvpp[4x4] 
+#endif
+
+        if (cpuMask & X265_CPU_BMI2)
+            p.scanPosLast = PFX(scanPosLast_avx2_bmi2);
+    }
+}
+#else // if HIGH_BIT_DEPTH
+
+void setupAssemblyPrimitives(EncoderPrimitives &p, int cpuMask) // Main
+{
+#if X86_64
+    p.scanPosLast = PFX(scanPosLast_x64);
+#endif
+
+    if (cpuMask & X265_CPU_SSE2)
+    {
+        /* We do not differentiate CPUs which support MMX and not SSE2. We only check
+         * for SSE2 and then use both MMX and SSE2 functions */
+        AVC_LUMA_PU(sad, mmx2);
+        AVC_LUMA_PU(sad_x3, mmx2);
+        AVC_LUMA_PU(sad_x4, mmx2);
+
+        p.pu[LUMA_16x16].sad = PFX(pixel_sad_16x16_sse2);
+        p.pu[LUMA_16x16].sad_x3 = PFX(pixel_sad_x3_16x16_sse2);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_sse2);
+        p.pu[LUMA_16x8].sad  = PFX(pixel_sad_16x8_sse2);
+        p.pu[LUMA_16x8].sad_x3  = PFX(pixel_sad_x3_16x8_sse2);
+        p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_sse2);
+        HEVC_SAD(sse2);
+
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_mmx2);
+        ALL_LUMA_PU(satd, pixel_satd, sse2);
+
+        p.cu[BLOCK_4x4].sse_pp = PFX(pixel_ssd_4x4_mmx);
+        p.cu[BLOCK_8x8].sse_pp = PFX(pixel_ssd_8x8_mmx);
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_mmx);
+
+        PIXEL_AVG_W4(mmx2);
+        PIXEL_AVG(sse2);
+        LUMA_VAR(sse2);
+
+        ASSIGN_SA8D(sse2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp = PFX(pixel_ssd_4x8_mmx);
+        ASSIGN_SSE_PP(sse2);
+        ASSIGN_SSE_SS(sse2);
+
+        LUMA_PU_BLOCKCOPY(pp, sse2);
+        CHROMA_420_PU_BLOCKCOPY(pp, sse2);
+        CHROMA_422_PU_BLOCKCOPY(pp, sse2);
+
+        LUMA_CU_BLOCKCOPY(ss, sse2);
+        LUMA_CU_BLOCKCOPY(sp, sse2);
+        CHROMA_420_CU_BLOCKCOPY(ss, sse2);
+        CHROMA_422_CU_BLOCKCOPY(ss, sse2);
+        CHROMA_420_CU_BLOCKCOPY(sp, sse2);
+        CHROMA_422_CU_BLOCKCOPY(sp, sse2);
+
+        LUMA_VSS_FILTERS(sse2);
+        CHROMA_420_VSS_FILTERS(_sse2);
+        CHROMA_422_VSS_FILTERS(_sse2);
+        CHROMA_444_VSS_FILTERS(sse2);
+        CHROMA_420_VSP_FILTERS(_sse2);
+        CHROMA_422_VSP_FILTERS(_sse2);
+        CHROMA_444_VSP_FILTERS(_sse2);
+#if X86_64
+        ALL_CHROMA_420_PU(filter_vpp, interp_4tap_vert_pp, sse2);
+        ALL_CHROMA_422_PU(filter_vpp, interp_4tap_vert_pp, sse2);
+        ALL_CHROMA_444_PU(filter_vpp, interp_4tap_vert_pp, sse2);
+        ALL_CHROMA_420_PU(filter_vps, interp_4tap_vert_ps, sse2);
+        ALL_CHROMA_422_PU(filter_vps, interp_4tap_vert_ps, sse2);
+        ALL_CHROMA_444_PU(filter_vps, interp_4tap_vert_ps, sse2);
+        ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, sse2);
+        ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, sse2);
+#else
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vpp = PFX(interp_4tap_vert_pp_2x4_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vpp = PFX(interp_4tap_vert_pp_2x8_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vpp = PFX(interp_4tap_vert_pp_4x2_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vpp = PFX(interp_4tap_vert_pp_2x16_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vpp = PFX(interp_4tap_vert_pp_4x32_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vps = PFX(interp_4tap_vert_ps_2x4_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vps = PFX(interp_4tap_vert_ps_2x8_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_sse2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vps = PFX(interp_4tap_vert_ps_2x16_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_sse2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vps = PFX(interp_4tap_vert_ps_4x32_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_sse2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_sse2);
+#endif
+
+        ALL_LUMA_PU(luma_hpp, interp_8tap_horiz_pp, sse2);
+        p.pu[LUMA_4x4].luma_hpp = PFX(interp_8tap_horiz_pp_4x4_sse2);
+        ALL_LUMA_PU(luma_hps, interp_8tap_horiz_ps, sse2);
+        p.pu[LUMA_4x4].luma_hps = PFX(interp_8tap_horiz_ps_4x4_sse2);
+        p.pu[LUMA_8x8].luma_hvpp = PFX(interp_8tap_hv_pp_8x8_sse3);
+
+        //p.frameInitLowres = PFX(frame_init_lowres_core_mmx2);
+        p.frameInitLowres = PFX(frame_init_lowres_core_sse2);
+
+        ALL_LUMA_TU(blockfill_s, blockfill_s, sse2);
+        ALL_LUMA_TU_S(cpy2Dto1D_shl, cpy2Dto1D_shl_, sse2);
+        ALL_LUMA_TU_S(cpy2Dto1D_shr, cpy2Dto1D_shr_, sse2);
+        ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, sse2);
+        ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, sse2);
+        ALL_LUMA_TU_S(ssd_s, pixel_ssd_s_, sse2);
+
+        ALL_LUMA_TU_S(intra_pred[PLANAR_IDX], intra_pred_planar, sse2);
+        ALL_LUMA_TU_S(intra_pred[DC_IDX], intra_pred_dc, sse2);
+
+        p.cu[BLOCK_4x4].intra_pred[2] = PFX(intra_pred_ang4_2_sse2);
+        p.cu[BLOCK_4x4].intra_pred[3] = PFX(intra_pred_ang4_3_sse2);
+        p.cu[BLOCK_4x4].intra_pred[4] = PFX(intra_pred_ang4_4_sse2);
+        p.cu[BLOCK_4x4].intra_pred[5] = PFX(intra_pred_ang4_5_sse2);
+        p.cu[BLOCK_4x4].intra_pred[6] = PFX(intra_pred_ang4_6_sse2);
+        p.cu[BLOCK_4x4].intra_pred[7] = PFX(intra_pred_ang4_7_sse2);
+        p.cu[BLOCK_4x4].intra_pred[8] = PFX(intra_pred_ang4_8_sse2);
+        p.cu[BLOCK_4x4].intra_pred[9] = PFX(intra_pred_ang4_9_sse2);
+        p.cu[BLOCK_4x4].intra_pred[10] = PFX(intra_pred_ang4_10_sse2);
+        p.cu[BLOCK_4x4].intra_pred[11] = PFX(intra_pred_ang4_11_sse2);
+        p.cu[BLOCK_4x4].intra_pred[12] = PFX(intra_pred_ang4_12_sse2);
+        p.cu[BLOCK_4x4].intra_pred[13] = PFX(intra_pred_ang4_13_sse2);
+        p.cu[BLOCK_4x4].intra_pred[14] = PFX(intra_pred_ang4_14_sse2);
+        p.cu[BLOCK_4x4].intra_pred[15] = PFX(intra_pred_ang4_15_sse2);
+        p.cu[BLOCK_4x4].intra_pred[16] = PFX(intra_pred_ang4_16_sse2);
+        p.cu[BLOCK_4x4].intra_pred[17] = PFX(intra_pred_ang4_17_sse2);
+        p.cu[BLOCK_4x4].intra_pred[18] = PFX(intra_pred_ang4_18_sse2);
+        p.cu[BLOCK_4x4].intra_pred[19] = PFX(intra_pred_ang4_19_sse2);
+        p.cu[BLOCK_4x4].intra_pred[20] = PFX(intra_pred_ang4_20_sse2);
+        p.cu[BLOCK_4x4].intra_pred[21] = PFX(intra_pred_ang4_21_sse2);
+        p.cu[BLOCK_4x4].intra_pred[22] = PFX(intra_pred_ang4_22_sse2);
+        p.cu[BLOCK_4x4].intra_pred[23] = PFX(intra_pred_ang4_23_sse2);
+        p.cu[BLOCK_4x4].intra_pred[24] = PFX(intra_pred_ang4_24_sse2);
+        p.cu[BLOCK_4x4].intra_pred[25] = PFX(intra_pred_ang4_25_sse2);
+        p.cu[BLOCK_4x4].intra_pred[26] = PFX(intra_pred_ang4_26_sse2);
+        p.cu[BLOCK_4x4].intra_pred[27] = PFX(intra_pred_ang4_27_sse2);
+        p.cu[BLOCK_4x4].intra_pred[28] = PFX(intra_pred_ang4_28_sse2);
+        p.cu[BLOCK_4x4].intra_pred[29] = PFX(intra_pred_ang4_29_sse2);
+        p.cu[BLOCK_4x4].intra_pred[30] = PFX(intra_pred_ang4_30_sse2);
+        p.cu[BLOCK_4x4].intra_pred[31] = PFX(intra_pred_ang4_31_sse2);
+        p.cu[BLOCK_4x4].intra_pred[32] = PFX(intra_pred_ang4_32_sse2);
+        p.cu[BLOCK_4x4].intra_pred[33] = PFX(intra_pred_ang4_33_sse2);
+
+        p.cu[BLOCK_4x4].intra_pred_allangs = PFX(all_angs_pred_4x4_sse2);
+
+        p.cu[BLOCK_4x4].calcresidual = PFX(getResidual4_sse2);
+        p.cu[BLOCK_8x8].calcresidual = PFX(getResidual8_sse2);
+
+        ALL_LUMA_TU_S(transpose, transpose, sse2);
+        p.cu[BLOCK_64x64].transpose = PFX(transpose64_sse2);
+
+        p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_sse2);
+        p.ssim_end_4 = PFX(pixel_ssim_end4_sse2);
+
+        p.cu[BLOCK_4x4].dct = PFX(dct4_sse2);
+        p.cu[BLOCK_8x8].dct = PFX(dct8_sse2);
+        p.cu[BLOCK_4x4].idct = PFX(idct4_sse2);
+#if X86_64
+        p.cu[BLOCK_8x8].idct = PFX(idct8_sse2);
+
+        // TODO: it is passed smoke test, but we need testbench, so temporary disable
+        p.costC1C2Flag = x265_costC1C2Flag_sse2;
+#endif
+        p.idst4x4 = PFX(idst4_sse2);
+        p.dst4x4 = PFX(dst4_sse2);
+
+        p.planecopy_sp = PFX(downShift_16_sse2);
+        ALL_CHROMA_420_PU(p2s, filterPixelToShort, sse2);
+        ALL_CHROMA_422_PU(p2s, filterPixelToShort, sse2);
+        ALL_CHROMA_444_PU(p2s, filterPixelToShort, sse2);
+        ALL_LUMA_PU(convert_p2s, filterPixelToShort, sse2);
+        ALL_LUMA_TU(count_nonzero, count_nonzero, sse2);
+    }
+    if (cpuMask & X265_CPU_SSE3)
+    {
+        ALL_CHROMA_420_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_422_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_444_PU(filter_hpp, interp_4tap_horiz_pp, sse3);
+        ALL_CHROMA_420_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+        ALL_CHROMA_422_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+        ALL_CHROMA_444_PU(filter_hps, interp_4tap_horiz_ps, sse3);
+    }
+    if (cpuMask & X265_CPU_SSSE3)
+    {
+        p.pu[LUMA_8x16].sad_x3 = PFX(pixel_sad_x3_8x16_ssse3);
+        p.pu[LUMA_8x32].sad_x3 = PFX(pixel_sad_x3_8x32_ssse3);
+        p.pu[LUMA_12x16].sad_x3 = PFX(pixel_sad_x3_12x16_ssse3);
+        HEVC_SAD_X3(ssse3);
+
+        p.pu[LUMA_8x4].sad_x4  = PFX(pixel_sad_x4_8x4_ssse3);
+        p.pu[LUMA_8x8].sad_x4  = PFX(pixel_sad_x4_8x8_ssse3);
+        p.pu[LUMA_8x16].sad_x4 = PFX(pixel_sad_x4_8x16_ssse3);
+        p.pu[LUMA_8x32].sad_x4 = PFX(pixel_sad_x4_8x32_ssse3);
+        p.pu[LUMA_12x16].sad_x4 = PFX(pixel_sad_x4_12x16_ssse3);
+        HEVC_SAD_X4(ssse3);
+
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_ssse3);
+        ALL_LUMA_PU(satd, pixel_satd, ssse3);
+
+        ASSIGN_SA8D(ssse3);
+        PIXEL_AVG(ssse3);
+        PIXEL_AVG_W4(ssse3);
+        INTRA_ANG_SSSE3(ssse3);
+
+        ASSIGN_SSE_PP(ssse3);
+        p.cu[BLOCK_4x4].sse_pp = PFX(pixel_ssd_4x4_ssse3);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp = PFX(pixel_ssd_4x8_ssse3);
+
+        p.dst4x4 = PFX(dst4_ssse3);
+        p.cu[BLOCK_8x8].idct = PFX(idct8_ssse3);
+
+        // MUST be done after LUMA_FILTERS() to overwrite default version
+        p.pu[LUMA_8x8].luma_hvpp = PFX(interp_8tap_hv_pp_8x8_ssse3);
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_ssse3);
+        p.scale1D_128to64 = PFX(scale1D_128to64_ssse3);
+        p.scale2D_64to32 = PFX(scale2D_64to32_ssse3);
+
+        p.pu[LUMA_8x4].convert_p2s = PFX(filterPixelToShort_8x4_ssse3);
+        p.pu[LUMA_8x8].convert_p2s = PFX(filterPixelToShort_8x8_ssse3);
+        p.pu[LUMA_8x16].convert_p2s = PFX(filterPixelToShort_8x16_ssse3);
+        p.pu[LUMA_8x32].convert_p2s = PFX(filterPixelToShort_8x32_ssse3);
+        p.pu[LUMA_16x4].convert_p2s = PFX(filterPixelToShort_16x4_ssse3);
+        p.pu[LUMA_16x8].convert_p2s = PFX(filterPixelToShort_16x8_ssse3);
+        p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_ssse3);
+        p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_ssse3);
+        p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_ssse3);
+        p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_ssse3);
+        p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_ssse3);
+        p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_ssse3);
+        p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_ssse3);
+        p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_ssse3);
+        p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_ssse3);
+        p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_ssse3);
+        p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_ssse3);
+        p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_ssse3);
+        p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_ssse3);
+        p.pu[LUMA_12x16].convert_p2s = PFX(filterPixelToShort_12x16_ssse3);
+        p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_ssse3);
+        p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_ssse3);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].p2s = PFX(filterPixelToShort_8x2_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].p2s = PFX(filterPixelToShort_8x4_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].p2s = PFX(filterPixelToShort_8x6_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].p2s = PFX(filterPixelToShort_8x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].p2s = PFX(filterPixelToShort_8x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].p2s = PFX(filterPixelToShort_8x32_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].p2s = PFX(filterPixelToShort_16x4_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].p2s = PFX(filterPixelToShort_16x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].p2s = PFX(filterPixelToShort_16x12_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].p2s = PFX(filterPixelToShort_16x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].p2s = PFX(filterPixelToShort_16x32_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].p2s = PFX(filterPixelToShort_32x8_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].p2s = PFX(filterPixelToShort_32x16_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].p2s = PFX(filterPixelToShort_32x24_ssse3);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].p2s = PFX(filterPixelToShort_32x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].p2s = PFX(filterPixelToShort_8x4_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].p2s = PFX(filterPixelToShort_8x8_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].p2s = PFX(filterPixelToShort_8x12_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].p2s = PFX(filterPixelToShort_8x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].p2s = PFX(filterPixelToShort_8x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].p2s = PFX(filterPixelToShort_8x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].p2s = PFX(filterPixelToShort_12x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].p2s = PFX(filterPixelToShort_16x8_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].p2s = PFX(filterPixelToShort_16x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].p2s = PFX(filterPixelToShort_16x24_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].p2s = PFX(filterPixelToShort_16x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].p2s = PFX(filterPixelToShort_16x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].p2s = PFX(filterPixelToShort_24x64_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].p2s = PFX(filterPixelToShort_32x16_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].p2s = PFX(filterPixelToShort_32x32_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].p2s = PFX(filterPixelToShort_32x48_ssse3);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].p2s = PFX(filterPixelToShort_32x64_ssse3);
+        p.findPosFirstLast = PFX(findPosFirstLast_ssse3);
+    }
+    if (cpuMask & X265_CPU_SSE4)
+    {
+        p.sign = PFX(calSign_sse4);
+        p.saoCuOrgE0 = PFX(saoCuOrgE0_sse4);
+        p.saoCuOrgE1 = PFX(saoCuOrgE1_sse4);
+        p.saoCuOrgE1_2Rows = PFX(saoCuOrgE1_2Rows_sse4);
+        p.saoCuOrgE2[0] = PFX(saoCuOrgE2_sse4);
+        p.saoCuOrgE2[1] = PFX(saoCuOrgE2_sse4);
+        p.saoCuOrgE3[0] = PFX(saoCuOrgE3_sse4);
+        p.saoCuOrgE3[1] = PFX(saoCuOrgE3_sse4);
+        p.saoCuOrgB0 = PFX(saoCuOrgB0_sse4);
+
+        LUMA_ADDAVG(sse4);
+        CHROMA_420_ADDAVG(sse4);
+        CHROMA_422_ADDAVG(sse4);
+
+        // TODO: check POPCNT flag!
+        ALL_LUMA_TU_S(copy_cnt, copy_cnt_, sse4);
+
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_sse4);
+        ALL_LUMA_PU(satd, pixel_satd, sse4);
+        ASSIGN_SA8D(sse4);
+        ASSIGN_SSE_SS(sse4);
+        p.cu[BLOCK_64x64].sse_pp = PFX(pixel_ssd_64x64_sse4);
+
+        LUMA_PIXELSUB(sse4);
+        CHROMA_420_PIXELSUB_PS(sse4);
+        CHROMA_422_PIXELSUB_PS(sse4);
+
+        LUMA_FILTERS(sse4);
+        CHROMA_420_FILTERS(sse4);
+        CHROMA_422_FILTERS(sse4);
+        CHROMA_444_FILTERS(sse4);
+        CHROMA_420_VSS_FILTERS_SSE4(_sse4);
+        CHROMA_422_VSS_FILTERS_SSE4(_sse4);
+        CHROMA_420_VSP_FILTERS_SSE4(_sse4);
+        CHROMA_422_VSP_FILTERS_SSE4(_sse4);
+        CHROMA_444_VSP_FILTERS_SSE4(_sse4);
+
+        // MUST be done after LUMA_FILTERS() to overwrite default version
+        p.pu[LUMA_8x8].luma_hvpp = PFX(interp_8tap_hv_pp_8x8_ssse3);
+
+        LUMA_CU_BLOCKCOPY(ps, sse4);
+        CHROMA_420_CU_BLOCKCOPY(ps, sse4);
+        CHROMA_422_CU_BLOCKCOPY(ps, sse4);
+
+        p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_sse4);
+        p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_sse4);
+        p.cu[BLOCK_8x8].dct = PFX(dct8_sse4);
+        p.denoiseDct = PFX(denoise_dct_sse4);
+        p.quant = PFX(quant_sse4);
+        p.nquant = PFX(nquant_sse4);
+        p.dequant_normal = PFX(dequant_normal_sse4);
+        p.dequant_scaling = PFX(dequant_scaling_sse4);
+
+        p.weight_pp = PFX(weight_pp_sse4);
+        p.weight_sp = PFX(weight_sp_sse4);
+
+        p.cu[BLOCK_4x4].intra_filter = PFX(intra_filter_4x4_sse4);
+        p.cu[BLOCK_8x8].intra_filter = PFX(intra_filter_8x8_sse4);
+        p.cu[BLOCK_16x16].intra_filter = PFX(intra_filter_16x16_sse4);
+        p.cu[BLOCK_32x32].intra_filter = PFX(intra_filter_32x32_sse4);
+
+        ALL_LUMA_TU_S(intra_pred[PLANAR_IDX], intra_pred_planar, sse4);
+        ALL_LUMA_TU_S(intra_pred[DC_IDX], intra_pred_dc, sse4);
+        ALL_LUMA_TU(intra_pred_allangs, all_angs_pred, sse4);
+
+        INTRA_ANG_SSE4_COMMON(sse4);
+        INTRA_ANG_SSE4(sse4);
+
+        p.cu[BLOCK_4x4].psy_cost_pp = PFX(psyCost_pp_4x4_sse4);
+        p.cu[BLOCK_4x4].psy_cost_ss = PFX(psyCost_ss_4x4_sse4);
+
+        p.pu[LUMA_4x4].convert_p2s = PFX(filterPixelToShort_4x4_sse4);
+        p.pu[LUMA_4x8].convert_p2s = PFX(filterPixelToShort_4x8_sse4);
+        p.pu[LUMA_4x16].convert_p2s = PFX(filterPixelToShort_4x16_sse4);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].p2s = PFX(filterPixelToShort_2x4_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].p2s = PFX(filterPixelToShort_2x8_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].p2s = PFX(filterPixelToShort_4x2_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].p2s = PFX(filterPixelToShort_4x4_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].p2s = PFX(filterPixelToShort_4x8_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].p2s = PFX(filterPixelToShort_4x16_sse4);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].p2s = PFX(filterPixelToShort_6x8_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].p2s = PFX(filterPixelToShort_2x8_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].p2s = PFX(filterPixelToShort_2x16_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].p2s = PFX(filterPixelToShort_4x4_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].p2s = PFX(filterPixelToShort_4x8_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].p2s = PFX(filterPixelToShort_4x16_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].p2s = PFX(filterPixelToShort_4x32_sse4);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].p2s = PFX(filterPixelToShort_6x16_sse4);
+
+#if X86_64
+        p.saoCuStatsBO = PFX(saoCuStatsBO_sse4);
+        p.saoCuStatsE0 = PFX(saoCuStatsE0_sse4);
+        p.saoCuStatsE1 = PFX(saoCuStatsE1_sse4);
+        p.saoCuStatsE2 = PFX(saoCuStatsE2_sse4);
+        p.saoCuStatsE3 = PFX(saoCuStatsE3_sse4);
+
+        ALL_LUMA_CU(psy_cost_pp, psyCost_pp, sse4);
+        ALL_LUMA_CU(psy_cost_ss, psyCost_ss, sse4);
+
+        p.costCoeffNxN = PFX(costCoeffNxN_sse4);
+#endif
+        p.costCoeffRemain = PFX(costCoeffRemain_sse4);
+    }
+    if (cpuMask & X265_CPU_AVX)
+    {
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].satd = PFX(pixel_satd_16x24_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].satd = PFX(pixel_satd_32x48_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].satd = PFX(pixel_satd_24x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].satd = PFX(pixel_satd_8x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].satd = PFX(pixel_satd_8x12_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].satd = PFX(pixel_satd_12x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].satd = PFX(pixel_satd_4x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].satd = PFX(pixel_satd_16x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].satd = PFX(pixel_satd_32x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = PFX(pixel_satd_16x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = PFX(pixel_satd_32x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = PFX(pixel_satd_16x64_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = PFX(pixel_satd_32x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd = PFX(pixel_satd_8x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd = PFX(pixel_satd_8x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd = PFX(pixel_satd_8x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd = PFX(pixel_satd_8x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = PFX(pixel_satd_16x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = PFX(pixel_satd_32x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd = PFX(pixel_satd_8x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = PFX(pixel_satd_32x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = PFX(pixel_satd_16x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = PFX(pixel_satd_16x12_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = PFX(pixel_satd_32x24_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = PFX(pixel_satd_24x32_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd = PFX(pixel_satd_8x32_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sse_pp = PFX(pixel_ssd_8x8_avx);
+        p.pu[LUMA_16x4].sad_x4  = PFX(pixel_sad_x4_16x4_avx);
+        p.cu[BLOCK_16x16].copy_ss = PFX(blockcopy_ss_16x16_avx);
+        p.cu[BLOCK_32x32].copy_ss = PFX(blockcopy_ss_32x32_avx);
+        p.cu[BLOCK_64x64].copy_ss = PFX(blockcopy_ss_64x64_avx);
+        p.chroma[X265_CSP_I420].cu[CHROMA_420_16x16].copy_ss = PFX(blockcopy_ss_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ss = PFX(blockcopy_ss_32x32_avx);
+        p.chroma[X265_CSP_I422].cu[CHROMA_422_16x32].copy_ss = PFX(blockcopy_ss_16x32_avx);
+        p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ss = PFX(blockcopy_ss_32x64_avx);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].copy_pp = PFX(blockcopy_pp_32x8_avx);
+        p.pu[LUMA_32x8].copy_pp = PFX(blockcopy_pp_32x8_avx);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].copy_pp = PFX(blockcopy_pp_32x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].copy_pp = PFX(blockcopy_pp_32x16_avx);
+        p.pu[LUMA_32x16].copy_pp = PFX(blockcopy_pp_32x16_avx);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].copy_pp = PFX(blockcopy_pp_32x24_avx);
+        p.pu[LUMA_32x24].copy_pp = PFX(blockcopy_pp_32x24_avx);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].copy_pp = PFX(blockcopy_pp_32x32_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].copy_pp = PFX(blockcopy_pp_32x32_avx);
+        p.pu[LUMA_32x32].copy_pp  = PFX(blockcopy_pp_32x32_avx);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].copy_pp = PFX(blockcopy_pp_32x48_avx);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].copy_pp = PFX(blockcopy_pp_32x64_avx);
+        p.pu[LUMA_32x64].copy_pp = PFX(blockcopy_pp_32x64_avx);
+
+        p.pu[LUMA_64x16].copy_pp = PFX(blockcopy_pp_64x16_avx);
+        p.pu[LUMA_64x32].copy_pp = PFX(blockcopy_pp_64x32_avx);
+        p.pu[LUMA_64x48].copy_pp = PFX(blockcopy_pp_64x48_avx);
+        p.pu[LUMA_64x64].copy_pp = PFX(blockcopy_pp_64x64_avx);
+
+        p.pu[LUMA_48x64].copy_pp = PFX(blockcopy_pp_48x64_avx);
+
+        /* The following primitives have been disabled since performance compared to SSE4.2 is negligible/negative */
+#if 0
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd = PFX(pixel_satd_16x4_avx);
+        p.pu[LUMA_16x4].satd  = PFX(pixel_satd_16x4_avx);
+        p.pu[LUMA_16x8].satd  = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd = PFX(pixel_satd_32x8_avx);
+        p.pu[LUMA_32x8].satd  = PFX(pixel_satd_32x8_avx);
+        p.cu[BLOCK_8x8].sa8d   = PFX(pixel_sa8d_8x8_avx);
+        p.cu[BLOCK_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
+        p.cu[BLOCK_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
+        p.cu[BLOCK_64x64].sa8d = PFX(pixel_sa8d_64x64_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sa8d = PFX(pixel_satd_4x4_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sa8d = PFX(pixel_sa8d_8x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sa8d = PFX(pixel_sa8d_16x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sa8d = PFX(pixel_sa8d_32x64_avx);
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_avx);
+
+        ALL_LUMA_PU(satd, pixel_satd, avx);
+        p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd = PFX(pixel_satd_8x4_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd = PFX(pixel_satd_4x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd = PFX(pixel_satd_4x16_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd = PFX(pixel_satd_4x4_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd = PFX(pixel_satd_4x4_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd = PFX(pixel_satd_8x4_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd = PFX(pixel_satd_4x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd = PFX(pixel_satd_12x16_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd = PFX(pixel_satd_4x16_avx);
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx);
+        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx);
+        p.cu[BLOCK_8x8].sse_pp   = PFX(pixel_ssd_8x8_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = PFX(pixel_ssd_8x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_ssd_16x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_ssd_32x64_avx);
+        ASSIGN_SSE_SS(avx);
+
+        LUMA_VAR(avx);
+        p.pu[LUMA_12x16].sad_x3 = PFX(pixel_sad_x3_12x16_avx);
+        p.pu[LUMA_16x4].sad_x3  = PFX(pixel_sad_x3_16x4_avx);
+        HEVC_SAD_X3(avx);
+
+        p.pu[LUMA_32x8].sad_x4  = PFX(pixel_sad_x4_32x8_avx);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx);
+        p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_avx);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx);
+        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx);
+        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx);
+        p.pu[LUMA_12x16].sad_x4 = PFX(pixel_sad_x4_12x16_avx);
+        p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_avx);
+        p.pu[LUMA_24x32].sad_x4 = PFX(pixel_sad_x4_24x32_avx);
+        p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx);
+        p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx);
+        p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx);
+        p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_avx);
+        p.pu[LUMA_64x64].sad_x4 = PFX(pixel_sad_x4_64x64_avx)
+
+        p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
+        p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
+        p.frameInitLowres = PFX(frame_init_lowres_core_avx);
+#endif
+    }
+    if (cpuMask & X265_CPU_XOP)
+    {
+        //p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_xop); this one is broken
+        ALL_LUMA_PU(satd, pixel_satd, xop);
+        ASSIGN_SA8D(xop);
+        LUMA_VAR(xop);
+        p.cu[BLOCK_8x8].sse_pp = PFX(pixel_ssd_8x8_xop);
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_xop);
+        p.frameInitLowres = PFX(frame_init_lowres_core_xop);
+    }
+#if X86_64
+    if (cpuMask & X265_CPU_AVX2)
+    {
+        p.cu[BLOCK_4x4].intra_filter = PFX(intra_filter_4x4_avx2);
+
+        p.planecopy_sp = PFX(downShift_16_avx2);
+
+        p.cu[BLOCK_32x32].intra_pred[DC_IDX] = PFX(intra_pred_dc32_avx2);
+
+        p.cu[BLOCK_16x16].intra_pred[PLANAR_IDX] = PFX(intra_pred_planar16_avx2);
+        p.cu[BLOCK_32x32].intra_pred[PLANAR_IDX] = PFX(intra_pred_planar32_avx2);
+
+        p.idst4x4 = PFX(idst4_avx2);
+        p.dst4x4 = PFX(dst4_avx2);
+        p.scale2D_64to32 = PFX(scale2D_64to32_avx2);
+        p.saoCuOrgE0 = PFX(saoCuOrgE0_avx2);
+        p.saoCuOrgE1 = PFX(saoCuOrgE1_avx2);
+        p.saoCuOrgE1_2Rows = PFX(saoCuOrgE1_2Rows_avx2);
+        p.saoCuOrgE2[0] = PFX(saoCuOrgE2_avx2);
+        p.saoCuOrgE2[1] = PFX(saoCuOrgE2_32_avx2);
+        p.saoCuOrgE3[1] = PFX(saoCuOrgE3_32_avx2);
+        p.saoCuOrgB0 = PFX(saoCuOrgB0_avx2);
+        p.sign = PFX(calSign_avx2);
+
+        p.cu[BLOCK_4x4].psy_cost_ss = PFX(psyCost_ss_4x4_avx2);
+        p.cu[BLOCK_8x8].psy_cost_ss = PFX(psyCost_ss_8x8_avx2);
+        p.cu[BLOCK_16x16].psy_cost_ss = PFX(psyCost_ss_16x16_avx2);
+        p.cu[BLOCK_32x32].psy_cost_ss = PFX(psyCost_ss_32x32_avx2);
+        p.cu[BLOCK_64x64].psy_cost_ss = PFX(psyCost_ss_64x64_avx2);
+
+        p.cu[BLOCK_4x4].psy_cost_pp = PFX(psyCost_pp_4x4_avx2);
+        p.cu[BLOCK_8x8].psy_cost_pp = PFX(psyCost_pp_8x8_avx2);
+        p.cu[BLOCK_16x16].psy_cost_pp = PFX(psyCost_pp_16x16_avx2);
+        p.cu[BLOCK_32x32].psy_cost_pp = PFX(psyCost_pp_32x32_avx2);
+        p.cu[BLOCK_64x64].psy_cost_pp = PFX(psyCost_pp_64x64_avx2);
+        p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_avx2);
+        p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_avx2);
+        p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_avx2);
+        p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_avx2);
+        p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_avx2);
+        p.pu[LUMA_32x8].addAvg = PFX(addAvg_32x8_avx2);
+        p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_avx2);
+        p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_avx2);
+        p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_avx2);
+        p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_avx2);
+        p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_avx2);
+        p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_avx2);
+        p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_avx2);
+        p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg = PFX(addAvg_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg = PFX(addAvg_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg = PFX(addAvg_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_avx2);
+
+        p.cu[BLOCK_8x8].sa8d = PFX(pixel_sa8d_8x8_avx2);
+        p.cu[BLOCK_16x16].sa8d = PFX(pixel_sa8d_16x16_avx2);
+        p.cu[BLOCK_32x32].sa8d = PFX(pixel_sa8d_32x32_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx2);
+
+        p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_avx2);
+        p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_avx2);
+        p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_avx2);
+
+        p.cu[BLOCK_16x16].sub_ps = PFX(pixel_sub_ps_16x16_avx2);
+        p.cu[BLOCK_32x32].sub_ps = PFX(pixel_sub_ps_32x32_avx2);
+        p.cu[BLOCK_64x64].sub_ps = PFX(pixel_sub_ps_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sub_ps = PFX(pixel_sub_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sub_ps = PFX(pixel_sub_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sub_ps = PFX(pixel_sub_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_avx2);
+
+        p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_32x64_avx2);
+        p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_32x32_avx2);
+        p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_32x24_avx2);
+        p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_32x16_avx2);
+        p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_32x8_avx2);
+        p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_64x64_avx2);
+        p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_64x48_avx2);
+        p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_64x32_avx2);
+        p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_64x16_avx2);
+
+        p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx2);
+        p.pu[LUMA_16x8].satd  = PFX(pixel_satd_16x8_avx2);
+        p.pu[LUMA_8x16].satd  = PFX(pixel_satd_8x16_avx2);
+        p.pu[LUMA_8x8].satd   = PFX(pixel_satd_8x8_avx2);
+        p.pu[LUMA_16x4].satd  = PFX(pixel_satd_16x4_avx2);
+        p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx2);
+        p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx2);
+        p.pu[LUMA_16x64].satd = PFX(pixel_satd_16x64_avx2);
+        p.pu[LUMA_32x8].satd   = PFX(pixel_satd_32x8_avx2);
+        p.pu[LUMA_32x16].satd   = PFX(pixel_satd_32x16_avx2);
+        p.pu[LUMA_32x24].satd   = PFX(pixel_satd_32x24_avx2);
+        p.pu[LUMA_32x32].satd   = PFX(pixel_satd_32x32_avx2);
+        p.pu[LUMA_32x64].satd   = PFX(pixel_satd_32x64_avx2);
+        p.pu[LUMA_48x64].satd   = PFX(pixel_satd_48x64_avx2);
+        p.pu[LUMA_64x16].satd   = PFX(pixel_satd_64x16_avx2);
+        p.pu[LUMA_64x32].satd   = PFX(pixel_satd_64x32_avx2);
+        p.pu[LUMA_64x48].satd   = PFX(pixel_satd_64x48_avx2);
+        p.pu[LUMA_64x64].satd   = PFX(pixel_satd_64x64_avx2);
+
+        p.pu[LUMA_32x8].sad = PFX(pixel_sad_32x8_avx2);
+        p.pu[LUMA_32x16].sad = PFX(pixel_sad_32x16_avx2);
+        p.pu[LUMA_32x24].sad = PFX(pixel_sad_32x24_avx2);
+        p.pu[LUMA_32x32].sad = PFX(pixel_sad_32x32_avx2);
+        p.pu[LUMA_32x64].sad = PFX(pixel_sad_32x64_avx2);
+        p.pu[LUMA_48x64].sad = PFX(pixel_sad_48x64_avx2);
+        p.pu[LUMA_64x16].sad = PFX(pixel_sad_64x16_avx2);
+        p.pu[LUMA_64x32].sad = PFX(pixel_sad_64x32_avx2);
+        p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx2);
+        p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx2);
+        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
+        p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
+
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
+        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
+        p.cu[BLOCK_64x64].sse_pp = PFX(pixel_ssd_64x64_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
+
+        p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16_avx2);
+        p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32_avx2);
+
+        p.cu[BLOCK_8x8].copy_cnt = PFX(copy_cnt_8_avx2);
+        p.cu[BLOCK_16x16].copy_cnt = PFX(copy_cnt_16_avx2);
+        p.cu[BLOCK_32x32].copy_cnt = PFX(copy_cnt_32_avx2);
+
+        p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_avx2);
+        p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_32_avx2);
+        p.cu[BLOCK_8x8].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_8_avx2);
+        p.cu[BLOCK_16x16].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32_avx2);
+
+        p.cu[BLOCK_8x8].count_nonzero = PFX(count_nonzero_8x8_avx2);
+        p.cu[BLOCK_16x16].count_nonzero = PFX(count_nonzero_16x16_avx2);
+        p.cu[BLOCK_32x32].count_nonzero = PFX(count_nonzero_32x32_avx2);
+
+        p.denoiseDct = PFX(denoise_dct_avx2);
+        p.quant = PFX(quant_avx2);
+        p.nquant = PFX(nquant_avx2);
+        p.dequant_normal = PFX(dequant_normal_avx2);
+        p.dequant_scaling = PFX(dequant_scaling_avx2);
+
+        p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_avx2);
+        p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_avx2);
+
+        p.scale1D_128to64 = PFX(scale1D_128to64_avx2);
+        p.weight_pp = PFX(weight_pp_avx2);
+        p.weight_sp = PFX(weight_sp_avx2);
+
+        // intra_pred functions
+        p.cu[BLOCK_4x4].intra_pred[3] = PFX(intra_pred_ang4_3_avx2);
+        p.cu[BLOCK_4x4].intra_pred[4] = PFX(intra_pred_ang4_4_avx2);
+        p.cu[BLOCK_4x4].intra_pred[5] = PFX(intra_pred_ang4_5_avx2);
+        p.cu[BLOCK_4x4].intra_pred[6] = PFX(intra_pred_ang4_6_avx2);
+        p.cu[BLOCK_4x4].intra_pred[7] = PFX(intra_pred_ang4_7_avx2);
+        p.cu[BLOCK_4x4].intra_pred[8] = PFX(intra_pred_ang4_8_avx2);
+        p.cu[BLOCK_4x4].intra_pred[9] = PFX(intra_pred_ang4_9_avx2);
+        p.cu[BLOCK_4x4].intra_pred[11] = PFX(intra_pred_ang4_11_avx2);
+        p.cu[BLOCK_4x4].intra_pred[12] = PFX(intra_pred_ang4_12_avx2);
+        p.cu[BLOCK_4x4].intra_pred[13] = PFX(intra_pred_ang4_13_avx2);
+        p.cu[BLOCK_4x4].intra_pred[14] = PFX(intra_pred_ang4_14_avx2);
+        p.cu[BLOCK_4x4].intra_pred[15] = PFX(intra_pred_ang4_15_avx2);
+        p.cu[BLOCK_4x4].intra_pred[16] = PFX(intra_pred_ang4_16_avx2);
+        p.cu[BLOCK_4x4].intra_pred[17] = PFX(intra_pred_ang4_17_avx2);
+        p.cu[BLOCK_4x4].intra_pred[19] = PFX(intra_pred_ang4_19_avx2);
+        p.cu[BLOCK_4x4].intra_pred[20] = PFX(intra_pred_ang4_20_avx2);
+        p.cu[BLOCK_4x4].intra_pred[21] = PFX(intra_pred_ang4_21_avx2);
+        p.cu[BLOCK_4x4].intra_pred[22] = PFX(intra_pred_ang4_22_avx2);
+        p.cu[BLOCK_4x4].intra_pred[23] = PFX(intra_pred_ang4_23_avx2);
+        p.cu[BLOCK_4x4].intra_pred[24] = PFX(intra_pred_ang4_24_avx2);
+        p.cu[BLOCK_4x4].intra_pred[25] = PFX(intra_pred_ang4_25_avx2);
+        p.cu[BLOCK_4x4].intra_pred[27] = PFX(intra_pred_ang4_27_avx2);
+        p.cu[BLOCK_4x4].intra_pred[28] = PFX(intra_pred_ang4_28_avx2);
+        p.cu[BLOCK_4x4].intra_pred[29] = PFX(intra_pred_ang4_29_avx2);
+        p.cu[BLOCK_4x4].intra_pred[30] = PFX(intra_pred_ang4_30_avx2);
+        p.cu[BLOCK_4x4].intra_pred[31] = PFX(intra_pred_ang4_31_avx2);
+        p.cu[BLOCK_4x4].intra_pred[32] = PFX(intra_pred_ang4_32_avx2);
+        p.cu[BLOCK_4x4].intra_pred[33] = PFX(intra_pred_ang4_33_avx2);
+        p.cu[BLOCK_8x8].intra_pred[3] = PFX(intra_pred_ang8_3_avx2);
+        p.cu[BLOCK_8x8].intra_pred[4] = PFX(intra_pred_ang8_4_avx2);
+        p.cu[BLOCK_8x8].intra_pred[5] = PFX(intra_pred_ang8_5_avx2);
+        p.cu[BLOCK_8x8].intra_pred[6] = PFX(intra_pred_ang8_6_avx2);
+        p.cu[BLOCK_8x8].intra_pred[7] = PFX(intra_pred_ang8_7_avx2);
+        p.cu[BLOCK_8x8].intra_pred[8] = PFX(intra_pred_ang8_8_avx2);
+        p.cu[BLOCK_8x8].intra_pred[9] = PFX(intra_pred_ang8_9_avx2);
+        p.cu[BLOCK_8x8].intra_pred[11] = PFX(intra_pred_ang8_11_avx2);
+        p.cu[BLOCK_8x8].intra_pred[12] = PFX(intra_pred_ang8_12_avx2);
+        p.cu[BLOCK_8x8].intra_pred[13] = PFX(intra_pred_ang8_13_avx2);
+        p.cu[BLOCK_8x8].intra_pred[14] = PFX(intra_pred_ang8_14_avx2);
+        p.cu[BLOCK_8x8].intra_pred[15] = PFX(intra_pred_ang8_15_avx2);
+        p.cu[BLOCK_8x8].intra_pred[16] = PFX(intra_pred_ang8_16_avx2);
+        p.cu[BLOCK_8x8].intra_pred[20] = PFX(intra_pred_ang8_20_avx2);
+        p.cu[BLOCK_8x8].intra_pred[21] = PFX(intra_pred_ang8_21_avx2);
+        p.cu[BLOCK_8x8].intra_pred[22] = PFX(intra_pred_ang8_22_avx2);
+        p.cu[BLOCK_8x8].intra_pred[23] = PFX(intra_pred_ang8_23_avx2);
+        p.cu[BLOCK_8x8].intra_pred[24] = PFX(intra_pred_ang8_24_avx2);
+        p.cu[BLOCK_8x8].intra_pred[25] = PFX(intra_pred_ang8_25_avx2);
+        p.cu[BLOCK_8x8].intra_pred[27] = PFX(intra_pred_ang8_27_avx2);
+        p.cu[BLOCK_8x8].intra_pred[28] = PFX(intra_pred_ang8_28_avx2);
+        p.cu[BLOCK_8x8].intra_pred[29] = PFX(intra_pred_ang8_29_avx2);
+        p.cu[BLOCK_8x8].intra_pred[30] = PFX(intra_pred_ang8_30_avx2);
+        p.cu[BLOCK_8x8].intra_pred[31] = PFX(intra_pred_ang8_31_avx2);
+        p.cu[BLOCK_8x8].intra_pred[32] = PFX(intra_pred_ang8_32_avx2);
+        p.cu[BLOCK_8x8].intra_pred[33] = PFX(intra_pred_ang8_33_avx2);
+        p.cu[BLOCK_16x16].intra_pred[3] = PFX(intra_pred_ang16_3_avx2);
+        p.cu[BLOCK_16x16].intra_pred[4] = PFX(intra_pred_ang16_4_avx2);
+        p.cu[BLOCK_16x16].intra_pred[5] = PFX(intra_pred_ang16_5_avx2);
+        p.cu[BLOCK_16x16].intra_pred[6] = PFX(intra_pred_ang16_6_avx2);
+        p.cu[BLOCK_16x16].intra_pred[7] = PFX(intra_pred_ang16_7_avx2);
+        p.cu[BLOCK_16x16].intra_pred[8] = PFX(intra_pred_ang16_8_avx2);
+        p.cu[BLOCK_16x16].intra_pred[9] = PFX(intra_pred_ang16_9_avx2);
+        p.cu[BLOCK_16x16].intra_pred[12] = PFX(intra_pred_ang16_12_avx2);
+        p.cu[BLOCK_16x16].intra_pred[11] = PFX(intra_pred_ang16_11_avx2);
+        p.cu[BLOCK_16x16].intra_pred[13] = PFX(intra_pred_ang16_13_avx2);
+        p.cu[BLOCK_16x16].intra_pred[14] = PFX(intra_pred_ang16_14_avx2);
+        p.cu[BLOCK_16x16].intra_pred[15] = PFX(intra_pred_ang16_15_avx2);
+        p.cu[BLOCK_16x16].intra_pred[16] = PFX(intra_pred_ang16_16_avx2);
+        p.cu[BLOCK_16x16].intra_pred[17] = PFX(intra_pred_ang16_17_avx2);
+        p.cu[BLOCK_16x16].intra_pred[25] = PFX(intra_pred_ang16_25_avx2);
+        p.cu[BLOCK_16x16].intra_pred[28] = PFX(intra_pred_ang16_28_avx2);
+        p.cu[BLOCK_16x16].intra_pred[27] = PFX(intra_pred_ang16_27_avx2);
+        p.cu[BLOCK_16x16].intra_pred[29] = PFX(intra_pred_ang16_29_avx2);
+        p.cu[BLOCK_16x16].intra_pred[30] = PFX(intra_pred_ang16_30_avx2);
+        p.cu[BLOCK_16x16].intra_pred[31] = PFX(intra_pred_ang16_31_avx2);
+        p.cu[BLOCK_16x16].intra_pred[32] = PFX(intra_pred_ang16_32_avx2);
+        p.cu[BLOCK_16x16].intra_pred[33] = PFX(intra_pred_ang16_33_avx2);
+        p.cu[BLOCK_16x16].intra_pred[24] = PFX(intra_pred_ang16_24_avx2);
+        p.cu[BLOCK_16x16].intra_pred[23] = PFX(intra_pred_ang16_23_avx2);
+        p.cu[BLOCK_32x32].intra_pred[4]  = PFX(intra_pred_ang32_4_avx2);
+        p.cu[BLOCK_32x32].intra_pred[5]  = PFX(intra_pred_ang32_5_avx2);
+        p.cu[BLOCK_32x32].intra_pred[6]  = PFX(intra_pred_ang32_6_avx2);
+        p.cu[BLOCK_32x32].intra_pred[7]  = PFX(intra_pred_ang32_7_avx2);
+        p.cu[BLOCK_32x32].intra_pred[8]  = PFX(intra_pred_ang32_8_avx2);
+        p.cu[BLOCK_32x32].intra_pred[9]  = PFX(intra_pred_ang32_9_avx2);
+        p.cu[BLOCK_32x32].intra_pred[10] = PFX(intra_pred_ang32_10_avx2);
+        p.cu[BLOCK_32x32].intra_pred[11] = PFX(intra_pred_ang32_11_avx2);
+        p.cu[BLOCK_32x32].intra_pred[12] = PFX(intra_pred_ang32_12_avx2);
+        p.cu[BLOCK_32x32].intra_pred[13] = PFX(intra_pred_ang32_13_avx2);
+        p.cu[BLOCK_32x32].intra_pred[14] = PFX(intra_pred_ang32_14_avx2);
+        p.cu[BLOCK_32x32].intra_pred[15] = PFX(intra_pred_ang32_15_avx2);
+        p.cu[BLOCK_32x32].intra_pred[16] = PFX(intra_pred_ang32_16_avx2);
+        p.cu[BLOCK_32x32].intra_pred[17] = PFX(intra_pred_ang32_17_avx2);
+        p.cu[BLOCK_32x32].intra_pred[19] = PFX(intra_pred_ang32_19_avx2);
+        p.cu[BLOCK_32x32].intra_pred[20] = PFX(intra_pred_ang32_20_avx2);
+        p.cu[BLOCK_32x32].intra_pred[34] = PFX(intra_pred_ang32_34_avx2);
+        p.cu[BLOCK_32x32].intra_pred[2] = PFX(intra_pred_ang32_2_avx2);
+        p.cu[BLOCK_32x32].intra_pred[26] = PFX(intra_pred_ang32_26_avx2);
+        p.cu[BLOCK_32x32].intra_pred[27] = PFX(intra_pred_ang32_27_avx2);
+        p.cu[BLOCK_32x32].intra_pred[28] = PFX(intra_pred_ang32_28_avx2);
+        p.cu[BLOCK_32x32].intra_pred[29] = PFX(intra_pred_ang32_29_avx2);
+        p.cu[BLOCK_32x32].intra_pred[30] = PFX(intra_pred_ang32_30_avx2);
+        p.cu[BLOCK_32x32].intra_pred[31] = PFX(intra_pred_ang32_31_avx2);
+        p.cu[BLOCK_32x32].intra_pred[32] = PFX(intra_pred_ang32_32_avx2);
+        p.cu[BLOCK_32x32].intra_pred[33] = PFX(intra_pred_ang32_33_avx2);
+        p.cu[BLOCK_32x32].intra_pred[25] = PFX(intra_pred_ang32_25_avx2);
+        p.cu[BLOCK_32x32].intra_pred[24] = PFX(intra_pred_ang32_24_avx2);
+        p.cu[BLOCK_32x32].intra_pred[23] = PFX(intra_pred_ang32_23_avx2);
+        p.cu[BLOCK_32x32].intra_pred[22] = PFX(intra_pred_ang32_22_avx2);
+        p.cu[BLOCK_32x32].intra_pred[21] = PFX(intra_pred_ang32_21_avx2);
+        p.cu[BLOCK_32x32].intra_pred[18] = PFX(intra_pred_ang32_18_avx2);
+        p.cu[BLOCK_32x32].intra_pred[3]  = PFX(intra_pred_ang32_3_avx2);
+
+        // all_angs primitives
+        p.cu[BLOCK_4x4].intra_pred_allangs = PFX(all_angs_pred_4x4_avx2);
+
+        p.cu[BLOCK_32x32].copy_sp = PFX(blockcopy_sp_32x32_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].copy_sp = PFX(blockcopy_sp_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_avx2);
+
+        p.cu[BLOCK_64x64].copy_sp = PFX(blockcopy_sp_64x64_avx2);
+
+        // copy_ps primitives
+        p.cu[BLOCK_16x16].copy_ps = PFX(blockcopy_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[CHROMA_420_16x16].copy_ps = PFX(blockcopy_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].cu[CHROMA_422_16x32].copy_ps = PFX(blockcopy_ps_16x32_avx2);
+
+        ALL_LUMA_TU_S(dct, dct, avx2);
+        ALL_LUMA_TU_S(idct, idct, avx2);
+        ALL_LUMA_CU_S(transpose, transpose, avx2);
+
+        ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, avx2);
+        ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, avx2);
+        ALL_LUMA_PU(luma_vsp, interp_8tap_vert_sp, avx2);
+        ALL_LUMA_PU(luma_vss, interp_8tap_vert_ss, avx2);
+        p.pu[LUMA_4x4].luma_vsp = PFX(interp_8tap_vert_sp_4x4_avx2);
+
+        // missing 4x8, 4x16, 24x32, 12x16 for the fill set of luma PU
+        p.pu[LUMA_4x4].luma_hpp = PFX(interp_8tap_horiz_pp_4x4_avx2);
+        p.pu[LUMA_4x8].luma_hpp = PFX(interp_8tap_horiz_pp_4x8_avx2);
+        p.pu[LUMA_4x16].luma_hpp = PFX(interp_8tap_horiz_pp_4x16_avx2);
+        p.pu[LUMA_8x4].luma_hpp = PFX(interp_8tap_horiz_pp_8x4_avx2);
+        p.pu[LUMA_8x8].luma_hpp = PFX(interp_8tap_horiz_pp_8x8_avx2);
+        p.pu[LUMA_8x16].luma_hpp = PFX(interp_8tap_horiz_pp_8x16_avx2);
+        p.pu[LUMA_8x32].luma_hpp = PFX(interp_8tap_horiz_pp_8x32_avx2);
+        p.pu[LUMA_16x4].luma_hpp = PFX(interp_8tap_horiz_pp_16x4_avx2);
+        p.pu[LUMA_16x8].luma_hpp = PFX(interp_8tap_horiz_pp_16x8_avx2);
+        p.pu[LUMA_16x12].luma_hpp = PFX(interp_8tap_horiz_pp_16x12_avx2);
+        p.pu[LUMA_16x16].luma_hpp = PFX(interp_8tap_horiz_pp_16x16_avx2);
+        p.pu[LUMA_16x32].luma_hpp = PFX(interp_8tap_horiz_pp_16x32_avx2);
+        p.pu[LUMA_16x64].luma_hpp = PFX(interp_8tap_horiz_pp_16x64_avx2);
+        p.pu[LUMA_32x8].luma_hpp  = PFX(interp_8tap_horiz_pp_32x8_avx2);
+        p.pu[LUMA_32x16].luma_hpp = PFX(interp_8tap_horiz_pp_32x16_avx2);
+        p.pu[LUMA_32x24].luma_hpp = PFX(interp_8tap_horiz_pp_32x24_avx2);
+        p.pu[LUMA_32x32].luma_hpp = PFX(interp_8tap_horiz_pp_32x32_avx2);
+        p.pu[LUMA_32x64].luma_hpp = PFX(interp_8tap_horiz_pp_32x64_avx2);
+        p.pu[LUMA_64x64].luma_hpp = PFX(interp_8tap_horiz_pp_64x64_avx2);
+        p.pu[LUMA_64x48].luma_hpp = PFX(interp_8tap_horiz_pp_64x48_avx2);
+        p.pu[LUMA_64x32].luma_hpp = PFX(interp_8tap_horiz_pp_64x32_avx2);
+        p.pu[LUMA_64x16].luma_hpp = PFX(interp_8tap_horiz_pp_64x16_avx2);
+        p.pu[LUMA_48x64].luma_hpp = PFX(interp_8tap_horiz_pp_48x64_avx2);
+        p.pu[LUMA_24x32].luma_hpp = PFX(interp_8tap_horiz_pp_24x32_avx2);
+        p.pu[LUMA_12x16].luma_hpp = PFX(interp_8tap_horiz_pp_12x16_avx2);
+
+        p.pu[LUMA_4x4].luma_hps = PFX(interp_8tap_horiz_ps_4x4_avx2);
+        p.pu[LUMA_4x8].luma_hps = PFX(interp_8tap_horiz_ps_4x8_avx2);
+        p.pu[LUMA_4x16].luma_hps = PFX(interp_8tap_horiz_ps_4x16_avx2);
+        p.pu[LUMA_8x4].luma_hps = PFX(interp_8tap_horiz_ps_8x4_avx2);
+        p.pu[LUMA_8x8].luma_hps = PFX(interp_8tap_horiz_ps_8x8_avx2);
+        p.pu[LUMA_8x16].luma_hps = PFX(interp_8tap_horiz_ps_8x16_avx2);
+        p.pu[LUMA_8x32].luma_hps = PFX(interp_8tap_horiz_ps_8x32_avx2);
+        p.pu[LUMA_16x8].luma_hps = PFX(interp_8tap_horiz_ps_16x8_avx2);
+        p.pu[LUMA_16x16].luma_hps = PFX(interp_8tap_horiz_ps_16x16_avx2);
+        p.pu[LUMA_16x12].luma_hps = PFX(interp_8tap_horiz_ps_16x12_avx2);
+        p.pu[LUMA_16x4].luma_hps = PFX(interp_8tap_horiz_ps_16x4_avx2);
+        p.pu[LUMA_16x32].luma_hps = PFX(interp_8tap_horiz_ps_16x32_avx2);
+        p.pu[LUMA_16x64].luma_hps = PFX(interp_8tap_horiz_ps_16x64_avx2);
+        p.pu[LUMA_32x32].luma_hps = PFX(interp_8tap_horiz_ps_32x32_avx2);
+        p.pu[LUMA_32x16].luma_hps = PFX(interp_8tap_horiz_ps_32x16_avx2);
+        p.pu[LUMA_32x24].luma_hps = PFX(interp_8tap_horiz_ps_32x24_avx2);
+        p.pu[LUMA_32x8].luma_hps = PFX(interp_8tap_horiz_ps_32x8_avx2);
+        p.pu[LUMA_32x64].luma_hps = PFX(interp_8tap_horiz_ps_32x64_avx2);
+        p.pu[LUMA_48x64].luma_hps = PFX(interp_8tap_horiz_ps_48x64_avx2);
+        p.pu[LUMA_64x64].luma_hps = PFX(interp_8tap_horiz_ps_64x64_avx2);
+        p.pu[LUMA_64x48].luma_hps = PFX(interp_8tap_horiz_ps_64x48_avx2);
+        p.pu[LUMA_64x32].luma_hps = PFX(interp_8tap_horiz_ps_64x32_avx2);
+        p.pu[LUMA_64x16].luma_hps = PFX(interp_8tap_horiz_ps_64x16_avx2);
+        p.pu[LUMA_12x16].luma_hps = PFX(interp_8tap_horiz_ps_12x16_avx2);
+        p.pu[LUMA_24x32].luma_hps = PFX(interp_8tap_horiz_ps_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_hpp = PFX(interp_4tap_horiz_pp_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_hpp = PFX(interp_4tap_horiz_pp_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_hpp = PFX(interp_4tap_horiz_pp_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_hpp = PFX(interp_4tap_horiz_pp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_hpp = PFX(interp_4tap_horiz_pp_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_hpp = PFX(interp_4tap_horiz_pp_6x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_hpp = PFX(interp_4tap_horiz_pp_6x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_hpp = PFX(interp_4tap_horiz_pp_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_hpp = PFX(interp_4tap_horiz_pp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_hpp = PFX(interp_4tap_horiz_pp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_hpp = PFX(interp_4tap_horiz_pp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_hpp = PFX(interp_4tap_horiz_pp_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_hps = PFX(interp_4tap_horiz_ps_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_hps = PFX(interp_4tap_horiz_ps_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_hps = PFX(interp_4tap_horiz_ps_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_hps = PFX(interp_4tap_horiz_ps_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_hps = PFX(interp_4tap_horiz_ps_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_hps = PFX(interp_4tap_horiz_ps_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_hps = PFX(interp_4tap_horiz_ps_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_hps = PFX(interp_4tap_horiz_ps_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_hps = PFX(interp_4tap_horiz_ps_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_hps = PFX(interp_4tap_horiz_ps_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_hps = PFX(interp_4tap_horiz_ps_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_hps = PFX(interp_4tap_horiz_ps_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_hps = PFX(interp_4tap_horiz_ps_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_hps = PFX(interp_4tap_horiz_ps_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_hpp = PFX(interp_4tap_horiz_pp_24x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vpp = PFX(interp_4tap_vert_pp_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vpp = PFX(interp_4tap_vert_pp_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vpp = PFX(interp_4tap_vert_pp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vpp = PFX(interp_4tap_vert_pp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vpp = PFX(interp_4tap_vert_pp_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vps = PFX(interp_4tap_vert_ps_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vps = PFX(interp_4tap_vert_ps_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vps = PFX(interp_4tap_vert_ps_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vps = PFX(interp_4tap_vert_ps_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vps = PFX(interp_4tap_vert_ps_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vps = PFX(interp_4tap_vert_ps_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vps = PFX(interp_4tap_vert_ps_32x8_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vsp = PFX(interp_4tap_vert_sp_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vsp = PFX(interp_4tap_vert_sp_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vsp = PFX(interp_4tap_vert_sp_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vsp = PFX(interp_4tap_vert_sp_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vsp = PFX(interp_4tap_vert_sp_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vsp = PFX(interp_4tap_vert_sp_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vsp = PFX(interp_4tap_vert_sp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vsp = PFX(interp_4tap_vert_sp_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vsp = PFX(interp_4tap_vert_sp_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vsp = PFX(interp_4tap_vert_sp_32x24_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vss = PFX(interp_4tap_vert_ss_2x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vss = PFX(interp_4tap_vert_ss_2x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vss = PFX(interp_4tap_vert_ss_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vss = PFX(interp_4tap_vert_ss_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].filter_vss = PFX(interp_4tap_vert_ss_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].filter_vss = PFX(interp_4tap_vert_ss_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vss = PFX(interp_4tap_vert_ss_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vss = PFX(interp_4tap_vert_ss_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vss = PFX(interp_4tap_vert_ss_16x12_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vss = PFX(interp_4tap_vert_ss_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+
+        //i422 for chroma_vss
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_vss = PFX(interp_4tap_vert_ss_2x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vss = PFX(interp_4tap_vert_ss_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vss = PFX(interp_4tap_vert_ss_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vss = PFX(interp_4tap_vert_ss_6x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vss = PFX(interp_4tap_vert_ss_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vss = PFX(interp_4tap_vert_ss_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vss = PFX(interp_4tap_vert_ss_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vss = PFX(interp_4tap_vert_ss_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].filter_vss = PFX(interp_4tap_vert_ss_2x4_avx2);
+
+        //i444 for chroma_vss
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vss = PFX(interp_4tap_vert_ss_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vss = PFX(interp_4tap_vert_ss_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vss = PFX(interp_4tap_vert_ss_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vss = PFX(interp_4tap_vert_ss_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vss = PFX(interp_4tap_vert_ss_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vss = PFX(interp_4tap_vert_ss_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vss = PFX(interp_4tap_vert_ss_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vss = PFX(interp_4tap_vert_ss_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vss = PFX(interp_4tap_vert_ss_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vss = PFX(interp_4tap_vert_ss_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vss = PFX(interp_4tap_vert_ss_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vss = PFX(interp_4tap_vert_ss_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vss = PFX(interp_4tap_vert_ss_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vss = PFX(interp_4tap_vert_ss_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vss = PFX(interp_4tap_vert_ss_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vss = PFX(interp_4tap_vert_ss_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vss = PFX(interp_4tap_vert_ss_16x64_avx2);
+        p.pu[LUMA_16x16].luma_hvpp = PFX(interp_8tap_hv_pp_16x16_avx2);
+
+        ALL_LUMA_PU_T(luma_hvpp, interp_8tap_hv_pp_cpu);
+        p.pu[LUMA_4x4].luma_hvpp = interp_8tap_hv_pp_cpu<LUMA_4x4>;
+
+        p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_avx2);
+        p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_avx2);
+        p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_avx2);
+        p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_avx2);
+        p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_avx2);
+        p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_avx2);
+        p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_avx2);
+        p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_avx2);
+        p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].p2s = PFX(filterPixelToShort_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].p2s = PFX(filterPixelToShort_32x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].p2s = PFX(filterPixelToShort_32x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].p2s = PFX(filterPixelToShort_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].p2s = PFX(filterPixelToShort_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].p2s = PFX(filterPixelToShort_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].p2s = PFX(filterPixelToShort_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].p2s = PFX(filterPixelToShort_32x64_avx2);
+
+        //i422 for chroma_hpp
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_hpp = PFX(interp_4tap_horiz_pp_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_hpp = PFX(interp_4tap_horiz_pp_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_hpp = PFX(interp_4tap_horiz_pp_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_hpp = PFX(interp_4tap_horiz_pp_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_hpp = PFX(interp_4tap_horiz_pp_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_hpp = PFX(interp_4tap_horiz_pp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_hpp = PFX(interp_4tap_horiz_pp_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_hpp = PFX(interp_4tap_horiz_pp_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_hpp = PFX(interp_4tap_horiz_pp_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_hpp = PFX(interp_4tap_horiz_pp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_hpp = PFX(interp_4tap_horiz_pp_2x8_avx2);
+
+        //i444 filters hpp
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_hpp = PFX(interp_4tap_horiz_pp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_hpp = PFX(interp_4tap_horiz_pp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_hpp = PFX(interp_4tap_horiz_pp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_hpp = PFX(interp_4tap_horiz_pp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_hpp = PFX(interp_4tap_horiz_pp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_hpp = PFX(interp_4tap_horiz_pp_8x32_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_hpp = PFX(interp_4tap_horiz_pp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_hpp = PFX(interp_4tap_horiz_pp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_hpp = PFX(interp_4tap_horiz_pp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_hpp = PFX(interp_4tap_horiz_pp_16x64_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_hpp = PFX(interp_4tap_horiz_pp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_hpp = PFX(interp_4tap_horiz_pp_24x32_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_hpp = PFX(interp_4tap_horiz_pp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_hpp = PFX(interp_4tap_horiz_pp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_hpp = PFX(interp_4tap_horiz_pp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_hpp = PFX(interp_4tap_horiz_pp_32x8_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_hpp = PFX(interp_4tap_horiz_pp_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_hpp = PFX(interp_4tap_horiz_pp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_hpp = PFX(interp_4tap_horiz_pp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_hpp = PFX(interp_4tap_horiz_pp_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hpp = PFX(interp_4tap_horiz_pp_48x64_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_hps = PFX(interp_4tap_horiz_ps_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_hps = PFX(interp_4tap_horiz_ps_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_hps = PFX(interp_4tap_horiz_ps_4x16_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_hps = PFX(interp_4tap_horiz_ps_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_hps = PFX(interp_4tap_horiz_ps_8x12_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_hps = PFX(interp_4tap_horiz_ps_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_hps = PFX(interp_4tap_horiz_ps_16x24_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_hps = PFX(interp_4tap_horiz_ps_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_hps = PFX(interp_4tap_horiz_ps_32x48_avx2);
+
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_hps = PFX(interp_4tap_horiz_ps_2x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_hps = PFX(interp_4tap_horiz_ps_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_hps = PFX(interp_4tap_horiz_ps_2x16_avx2);
+
+        //i444 chroma_hps
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_hps = PFX(interp_4tap_horiz_ps_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_hps = PFX(interp_4tap_horiz_ps_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_hps = PFX(interp_4tap_horiz_ps_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_hps = PFX(interp_4tap_horiz_ps_64x64_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_hps = PFX(interp_4tap_horiz_ps_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_hps = PFX(interp_4tap_horiz_ps_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_hps = PFX(interp_4tap_horiz_ps_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_hps = PFX(interp_4tap_horiz_ps_32x32_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_hps = PFX(interp_4tap_horiz_ps_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_hps = PFX(interp_4tap_horiz_ps_4x16_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_hps = PFX(interp_4tap_horiz_ps_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_hps = PFX(interp_4tap_horiz_ps_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_hps = PFX(interp_4tap_horiz_ps_8x32_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_hps = PFX(interp_4tap_horiz_ps_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_hps = PFX(interp_4tap_horiz_ps_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_hps = PFX(interp_4tap_horiz_ps_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_hps = PFX(interp_4tap_horiz_ps_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_hps = PFX(interp_4tap_horiz_ps_16x64_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_hps = PFX(interp_4tap_horiz_ps_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hps = PFX(interp_4tap_horiz_ps_48x64_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_hps = PFX(interp_4tap_horiz_ps_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_hps = PFX(interp_4tap_horiz_ps_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_hps = PFX(interp_4tap_horiz_ps_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_hps = PFX(interp_4tap_horiz_ps_32x8_avx2);
+
+        //i422 for chroma_vsp
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_vsp = PFX(interp_4tap_vert_sp_2x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vsp = PFX(interp_4tap_vert_sp_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vsp = PFX(interp_4tap_vert_sp_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vsp = PFX(interp_4tap_vert_sp_24x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vsp = PFX(interp_4tap_vert_sp_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vsp = PFX(interp_4tap_vert_sp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vsp = PFX(interp_4tap_vert_sp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vsp = PFX(interp_4tap_vert_sp_6x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vsp = PFX(interp_4tap_vert_sp_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vsp = PFX(interp_4tap_vert_sp_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vsp = PFX(interp_4tap_vert_sp_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vsp = PFX(interp_4tap_vert_sp_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].filter_vsp = PFX(interp_4tap_vert_sp_2x4_avx2);
+
+        //i444 for chroma_vsp
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vsp = PFX(interp_4tap_vert_sp_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vsp = PFX(interp_4tap_vert_sp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vsp = PFX(interp_4tap_vert_sp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vsp = PFX(interp_4tap_vert_sp_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vsp = PFX(interp_4tap_vert_sp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vsp = PFX(interp_4tap_vert_sp_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vsp = PFX(interp_4tap_vert_sp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vsp = PFX(interp_4tap_vert_sp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vsp = PFX(interp_4tap_vert_sp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vsp = PFX(interp_4tap_vert_sp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vsp = PFX(interp_4tap_vert_sp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vsp = PFX(interp_4tap_vert_sp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vsp = PFX(interp_4tap_vert_sp_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vsp = PFX(interp_4tap_vert_sp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vsp = PFX(interp_4tap_vert_sp_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vsp = PFX(interp_4tap_vert_sp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vsp = PFX(interp_4tap_vert_sp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vsp = PFX(interp_4tap_vert_sp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vsp = PFX(interp_4tap_vert_sp_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vsp = PFX(interp_4tap_vert_sp_64x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vsp = PFX(interp_4tap_vert_sp_16x64_avx2);
+
+        //i422 for chroma_vps
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_vps = PFX(interp_4tap_vert_ps_2x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vps = PFX(interp_4tap_vert_ps_16x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vps = PFX(interp_4tap_vert_ps_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vps = PFX(interp_4tap_vert_ps_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vps = PFX(interp_4tap_vert_ps_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vps = PFX(interp_4tap_vert_ps_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vps = PFX(interp_4tap_vert_ps_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].filter_vps = PFX(interp_4tap_vert_ps_2x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vps = PFX(interp_4tap_vert_ps_16x24_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vps = PFX(interp_4tap_vert_ps_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vps = PFX(interp_4tap_vert_ps_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vps = PFX(interp_4tap_vert_ps_24x64_avx2);
+
+        //i444 for chroma_vps
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vps = PFX(interp_4tap_vert_ps_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vps = PFX(interp_4tap_vert_ps_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vps = PFX(interp_4tap_vert_ps_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vps = PFX(interp_4tap_vert_ps_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vps = PFX(interp_4tap_vert_ps_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vps = PFX(interp_4tap_vert_ps_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vps = PFX(interp_4tap_vert_ps_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vps = PFX(interp_4tap_vert_ps_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vps = PFX(interp_4tap_vert_ps_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vps = PFX(interp_4tap_vert_ps_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vps = PFX(interp_4tap_vert_ps_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vps = PFX(interp_4tap_vert_ps_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vps = PFX(interp_4tap_vert_ps_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vps = PFX(interp_4tap_vert_ps_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vps = PFX(interp_4tap_vert_ps_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vps = PFX(interp_4tap_vert_ps_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vps = PFX(interp_4tap_vert_ps_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vps = PFX(interp_4tap_vert_ps_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vps = PFX(interp_4tap_vert_ps_64x16_avx2);
+
+        //i422 for chroma_vpp
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].filter_vpp = PFX(interp_4tap_vert_pp_2x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vpp = PFX(interp_4tap_vert_pp_8x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vpp = PFX(interp_4tap_vert_pp_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vpp = PFX(interp_4tap_vert_pp_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vpp = PFX(interp_4tap_vert_pp_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].filter_vpp = PFX(interp_4tap_vert_pp_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].filter_vpp = PFX(interp_4tap_vert_pp_2x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].filter_vpp = PFX(interp_4tap_vert_pp_2x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].filter_vpp = PFX(interp_4tap_vert_pp_4x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vpp = PFX(interp_4tap_vert_pp_24x64_avx2);
+
+        //i444 for chroma_vpp
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vpp = PFX(interp_4tap_vert_pp_8x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vpp = PFX(interp_4tap_vert_pp_32x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vpp = PFX(interp_4tap_vert_pp_8x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_vpp = PFX(interp_4tap_vert_pp_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_vpp = PFX(interp_4tap_vert_pp_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = PFX(interp_4tap_vert_pp_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vpp = PFX(interp_4tap_vert_pp_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vpp = PFX(interp_4tap_vert_pp_48x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = PFX(interp_4tap_vert_pp_64x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = PFX(interp_4tap_vert_pp_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = PFX(interp_4tap_vert_pp_64x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vpp = PFX(interp_4tap_vert_pp_64x16_avx2);
+
+        p.frameInitLowres = PFX(frame_init_lowres_core_avx2);
+
+        if (cpuMask & X265_CPU_BMI2)
+            p.scanPosLast = PFX(scanPosLast_avx2_bmi2);
+
+        p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
+        p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
+        p.planeClipAndMax = PFX(planeClipAndMax_avx2);
+
+        /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
+#if 0
+        p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_avx2);
+        p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg = PFX(addAvg_8x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg = PFX(addAvg_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_avx2);
+        p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
+        p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
+        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
+        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
+        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
+        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
+        p.pu[LUMA_8x4].sad_x3 = PFX(pixel_sad_x3_8x4_avx2);
+        p.pu[LUMA_8x16].sad_x3 = PFX(pixel_sad_x3_8x16_avx2);
+        p.pu[LUMA_8x8].sad_x4 = PFX(pixel_sad_x4_8x8_avx2);
+        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
+        p.cu[BLOCK_4x4].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_4_avx2);
+        p.cu[BLOCK_4x4].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_4_avx2);
+        p.cu[BLOCK_4x4].count_nonzero = PFX(count_nonzero_4x4_avx2);
+        p.cu[BLOCK_16x16].intra_pred[13] = PFX(intra_pred_ang16_13_avx2);
+        p.cu[BLOCK_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_sp = PFX(blockcopy_sp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_hpp = PFX(interp_4tap_horiz_pp_4x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_hpp = PFX(interp_4tap_horiz_pp_4x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps = PFX(interp_4tap_vert_ps_6x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].filter_hpp = PFX(interp_4tap_horiz_pp_4x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].filter_hpp = PFX(interp_4tap_horiz_pp_4x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x8].filter_hpp = PFX(interp_4tap_horiz_pp_4x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x16].filter_hpp = PFX(interp_4tap_horiz_pp_4x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp = PFX(interp_4tap_vert_pp_16x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_hpp = PFX(interp_4tap_horiz_pp_4x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_4x4].filter_hpp = PFX(interp_4tap_horiz_pp_4x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].filter_hpp = PFX(interp_4tap_horiz_pp_4x4_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vpp = PFX(interp_4tap_vert_pp_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vpp = PFX(interp_4tap_vert_pp_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vpp = PFX(interp_4tap_vert_pp_6x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vpp = PFX(interp_4tap_vert_pp_4x2_avx2);
+
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vss = PFX(interp_4tap_vert_ss_64x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vss = PFX(interp_4tap_vert_ss_8x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vss = PFX(interp_4tap_vert_ss_64x48_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vss = PFX(interp_4tap_vert_ss_48x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vss = PFX(interp_4tap_vert_ss_32x64_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vss = PFX(interp_4tap_vert_ss_32x64_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vss = PFX(interp_4tap_vert_ss_32x48_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vss = PFX(interp_4tap_vert_ss_24x64_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vss = PFX(interp_4tap_vert_ss_32x24_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vss = PFX(interp_4tap_vert_ss_32x24_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vps = PFX(interp_4tap_vert_ps_8x16_avx2);
+
+        p.pu[LUMA_8x8].sad_x3 = PFX(pixel_sad_x3_8x8_avx2);
+        p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
+
+        p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_avx2);
+        p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg = PFX(addAvg_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg = PFX(addAvg_8x16_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg = PFX(addAvg_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_avx2);
+
+        p.cu[BLOCK_8x8].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_8_avx2);
+        p.saoCuOrgE3[0] = PFX(saoCuOrgE3_avx2);
+
+        p.cu[BLOCK_16x16].intra_pred[22] = PFX(intra_pred_ang16_22_avx2);
+        p.cu[BLOCK_8x8].intra_pred[21] = PFX(intra_pred_ang8_21_avx2);
+        p.cu[BLOCK_8x8].intra_pred[15] = PFX(intra_pred_ang8_15_avx2);
+#endif
+    }
+#endif
+}
+#endif // if HIGH_BIT_DEPTH
+
+} // namespace X265_NS
+
+extern "C" {
+#ifdef __INTEL_COMPILER
+
+/* Agner's patch to Intel's CPU dispatcher from pages 131-132 of
+ * http://agner.org/optimize/optimizing_cpp.pdf (2011-01-30)
+ * adapted to x265's cpu schema. */
+
+// Global variable indicating cpu
+int __intel_cpu_indicator = 0;
+// CPU dispatcher function
+void PFX(intel_cpu_indicator_init)(void)
+{
+    uint32_t cpu = x265::cpu_detect();
+
+    if (cpu & X265_CPU_AVX)
+        __intel_cpu_indicator = 0x20000;
+    else if (cpu & X265_CPU_SSE42)
+        __intel_cpu_indicator = 0x8000;
+    else if (cpu & X265_CPU_SSE4)
+        __intel_cpu_indicator = 0x2000;
+    else if (cpu & X265_CPU_SSSE3)
+        __intel_cpu_indicator = 0x1000;
+    else if (cpu & X265_CPU_SSE3)
+        __intel_cpu_indicator = 0x800;
+    else if (cpu & X265_CPU_SSE2 && !(cpu & X265_CPU_SSE2_IS_SLOW))
+        __intel_cpu_indicator = 0x200;
+    else if (cpu & X265_CPU_SSE)
+        __intel_cpu_indicator = 0x80;
+    else if (cpu & X265_CPU_MMX2)
+        __intel_cpu_indicator = 8;
+    else
+        __intel_cpu_indicator = 1;
+}
+
+/* __intel_cpu_indicator_init appears to have a non-standard calling convention that
+ * assumes certain registers aren't preserved, so we'll route it through a function
+ * that backs up all the registers. */
+void __intel_cpu_indicator_init(void)
+{
+    x265_safe_intel_cpu_indicator_init();
+}
+
+#else // ifdef __INTEL_COMPILER
+void PFX(intel_cpu_indicator_init)(void) {}
+
+#endif // ifdef __INTEL_COMPILER
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/blockcopy8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,5878 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;*          Murugan Vairavel <murugan@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+tab_Vm:    db 0, 2, 4, 6, 8, 10, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0
+
+cextern pb_4
+cextern pb_1
+cextern pb_16
+cextern pb_64
+cextern pw_4
+cextern pb_8
+cextern pb_32
+cextern pb_128
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_2x4(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_2x4, 4, 7, 0
+    mov    r4w,    [r2]
+    mov    r5w,    [r2 + r3]
+    mov    r6w,    [r2 + 2 * r3]
+    lea    r3,     [r3 + 2 * r3]
+    mov    r3w,    [r2 + r3]
+
+    mov    [r0],          r4w
+    mov    [r0 + r1],     r5w
+    mov    [r0 + 2 * r1], r6w
+    lea    r1,            [r1 + 2 * r1]
+    mov    [r0 + r1],     r3w
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_2x8(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_2x8, 4, 7, 0
+    lea     r5,      [3 * r1]
+    lea     r6,      [3 * r3]
+
+    mov     r4w,           [r2]
+    mov     [r0],          r4w
+    mov     r4w,           [r2 + r3]
+    mov     [r0 + r1],     r4w
+    mov     r4w,           [r2 + 2 * r3]
+    mov     [r0 + 2 * r1], r4w
+    mov     r4w,           [r2 + r6]
+    mov     [r0 + r5],     r4w
+
+    lea     r2,            [r2 + 4 * r3]
+    mov     r4w,           [r2]
+    lea     r0,            [r0 + 4 * r1]
+    mov     [r0],          r4w
+
+    mov     r4w,           [r2 + r3]
+    mov     [r0 + r1],     r4w
+    mov     r4w,           [r2 + 2 * r3]
+    mov     [r0 + 2 * r1], r4w
+    mov     r4w,           [r2 + r6]
+    mov     [r0 + r5],     r4w
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_2x16(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_2x16, 4, 7, 0
+    lea     r5,      [3 * r1]
+    lea     r6,      [3 * r3]
+
+    mov     r4w,           [r2]
+    mov     [r0],          r4w
+    mov     r4w,           [r2 + r3]
+    mov     [r0 + r1],     r4w
+    mov     r4w,           [r2 + 2 * r3]
+    mov     [r0 + 2 * r1], r4w
+    mov     r4w,           [r2 + r6]
+    mov     [r0 + r5],     r4w
+
+%rep 3
+    lea     r2,            [r2 + 4 * r3]
+    mov     r4w,           [r2]
+    lea     r0,            [r0 + 4 * r1]
+    mov     [r0],          r4w
+    mov     r4w,           [r2 + r3]
+    mov     [r0 + r1],     r4w
+    mov     r4w,           [r2 + 2 * r3]
+    mov     [r0 + 2 * r1], r4w
+    mov     r4w,           [r2 + r6]
+    mov     [r0 + r5],     r4w
+%endrep
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_4x2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_4x2, 4, 6, 0
+    mov     r4d,     [r2]
+    mov     r5d,     [r2 + r3]
+
+    mov     [r0],            r4d
+    mov     [r0 + r1],       r5d
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_4x4(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_4x4, 4, 4, 4
+    movd     m0,     [r2]
+    movd     m1,     [r2 + r3]
+    movd     m2,     [r2 + 2 * r3]
+    lea      r3,     [r3 + r3 * 2]
+    movd     m3,     [r2 + r3]
+
+    movd     [r0],            m0
+    movd     [r0 + r1],       m1
+    movd     [r0 + 2 * r1],   m2
+    lea      r1,              [r1 + 2 * r1]
+    movd     [r0 + r1],       m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_4x8(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_4x8, 4, 6, 4
+
+    lea     r4,    [3 * r1]
+    lea     r5,    [3 * r3]
+
+    movd     m0,     [r2]
+    movd     m1,     [r2 + r3]
+    movd     m2,     [r2 + 2 * r3]
+    movd     m3,     [r2 + r5]
+
+    movd     [r0],          m0
+    movd     [r0 + r1],     m1
+    movd     [r0 + 2 * r1], m2
+    movd     [r0 + r4],     m3
+
+    lea      r2,     [r2 + 4 * r3]
+    movd     m0,     [r2]
+    movd     m1,     [r2 + r3]
+    movd     m2,     [r2 + 2 * r3]
+    movd     m3,     [r2 + r5]
+
+    lea      r0,            [r0 + 4 * r1]
+    movd     [r0],          m0
+    movd     [r0 + r1],     m1
+    movd     [r0 + 2 * r1], m2
+    movd     [r0 + r4],     m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W4_H8 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 7, 4
+    mov    r4d,    %2/8
+    lea    r5,     [3 * r1]
+    lea    r6,     [3 * r3]
+
+.loop:
+    movd     m0,     [r2]
+    movd     m1,     [r2 + r3]
+    movd     m2,     [r2 + 2 * r3]
+    movd     m3,     [r2 + r6]
+
+    movd     [r0],          m0
+    movd     [r0 + r1],     m1
+    movd     [r0 + 2 * r1], m2
+    movd     [r0 + r5],     m3
+
+    lea      r2,     [r2 + 4 * r3]
+    movd     m0,     [r2]
+    movd     m1,     [r2 + r3]
+    movd     m2,     [r2 + 2 * r3]
+    movd     m3,     [r2 + r6]
+
+    lea      r0,            [r0 + 4 * r1]
+    movd     [r0],          m0
+    movd     [r0 + r1],     m1
+    movd     [r0 + 2 * r1], m2
+    movd     [r0 + r5],     m3
+
+    lea       r0,                  [r0 + 4 * r1]
+    lea       r2,                  [r2 + 4 * r3]
+
+    dec       r4d
+    jnz       .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W4_H8 4, 16
+BLOCKCOPY_PP_W4_H8 4, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_6x8(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_6x8, 4, 7, 3
+
+    movd     m0,  [r2]
+    mov      r4w, [r2 + 4]
+    movd     m1,  [r2 + r3]
+    mov      r5w, [r2 + r3 + 4]
+    movd     m2,  [r2 + 2 * r3]
+    mov      r6w, [r2 + 2 * r3 + 4]
+
+    movd     [r0],              m0
+    mov      [r0 + 4],          r4w
+    movd     [r0 + r1],         m1
+    mov      [r0 + r1 + 4],     r5w
+    movd     [r0 + 2 * r1],     m2
+    mov      [r0 + 2 * r1 + 4], r6w
+
+    lea      r2,  [r2 + 2 * r3]
+    movd     m0,  [r2 + r3]
+    mov      r4w, [r2 + r3 + 4]
+    movd     m1,  [r2 + 2 * r3]
+    mov      r5w, [r2 + 2 * r3 + 4]
+    lea      r2,  [r2 + 2 * r3]
+    movd     m2,  [r2 + r3]
+    mov      r6w, [r2 + r3 + 4]
+
+    lea      r0,                [r0 + 2 * r1]
+    movd     [r0 + r1],         m0
+    mov      [r0 + r1 + 4],     r4w
+    movd     [r0 + 2 * r1],     m1
+    mov      [r0 + 2 * r1 + 4], r5w
+    lea      r0,                [r0 + 2 * r1]
+    movd     [r0 + r1],         m2
+    mov      [r0 + r1 + 4],     r6w
+
+    lea      r2,                [r2 + 2 * r3]
+    movd     m0,                [r2]
+    mov      r4w,               [r2 + 4]
+    movd     m1,                [r2 + r3]
+    mov      r5w,               [r2 + r3 + 4]
+
+    lea      r0,            [r0 + 2 * r1]
+    movd     [r0],          m0
+    mov      [r0 + 4],      r4w
+    movd     [r0 + r1],     m1
+    mov      [r0 + r1 + 4], r5w
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_6x16(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_6x16, 4, 7, 2
+    mov     r6d,    16/2
+.loop:
+    movd    m0,     [r2]
+    mov     r4w,    [r2 + 4]
+    movd    m1,     [r2 + r3]
+    mov     r5w,    [r2 + r3 + 4]
+    lea     r2,     [r2 + r3 * 2]
+    movd    [r0],           m0
+    mov     [r0 + 4],       r4w
+    movd    [r0 + r1],      m1
+    mov     [r0 + r1 + 4],  r5w
+    lea     r0,     [r0 + r1 * 2]
+    dec     r6d
+    jnz     .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x2, 4, 4, 2
+    movh     m0,        [r2]
+    movh     m1,        [r2 + r3]
+
+    movh     [r0],       m0
+    movh     [r0 + r1],  m1
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x4(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x4, 4, 4, 4
+    movh     m0,     [r2]
+    movh     m1,     [r2 + r3]
+    movh     m2,     [r2 + 2 * r3]
+    lea      r3,     [r3 + r3 * 2]
+    movh     m3,     [r2 + r3]
+
+    movh     [r0],            m0
+    movh     [r0 + r1],       m1
+    movh     [r0 + 2 * r1],   m2
+    lea      r1,              [r1 + 2 * r1]
+    movh     [r0 + r1],       m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x6(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x6, 4, 4, 6
+    movh     m0,     [r2]
+    movh     m1,     [r2 + r3]
+    lea      r2,     [r2 + 2 * r3]
+    movh     m2,     [r2]
+    movh     m3,     [r2 + r3]
+    lea      r2,     [r2 + 2 * r3]
+    movh     m4,     [r2]
+    movh     m5,     [r2 + r3]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    lea      r0,            [r0 + 2 * r1]
+    movh     [r0],          m2
+    movh     [r0 + r1],     m3
+    lea      r0,            [r0 + 2 * r1]
+    movh     [r0],          m4
+    movh     [r0 + r1],     m5
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x12(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x12, 4, 6, 4
+
+    lea      r4, [3 * r3]
+    lea      r5, [3 * r1]
+
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+
+    %rep 2
+    lea      r2, [r2 + 4 * r3]
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    lea      r0,            [r0 + 4 * r1]
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+    %endrep
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x8(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x8, 4, 6, 4
+
+    lea      r4, [3 * r3]
+    lea      r5, [3 * r1]
+
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+
+    lea      r2, [r2 + 4 * r3]
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    lea      r0,            [r0 + 4 * r1]
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x16(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x16, 4, 6, 4
+
+    lea      r4, [3 * r3]
+    lea      r5, [3 * r1]
+
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+
+    %rep 3
+    lea      r2, [r2 + 4 * r3]
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    lea      r0,            [r0 + 4 * r1]
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+    %endrep
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x32(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x32, 4, 6, 4
+
+    lea      r4, [3 * r3]
+    lea      r5, [3 * r1]
+
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+
+    %rep 7
+    lea      r2, [r2 + 4 * r3]
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    lea      r0,            [r0 + 4 * r1]
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+    %endrep
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_8x64(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_pp_8x64, 4, 6, 4
+
+    lea      r4, [3 * r3]
+    lea      r5, [3 * r1]
+
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+
+    %rep 15
+    lea      r2, [r2 + 4 * r3]
+    movh     m0, [r2]
+    movh     m1, [r2 + r3]
+    movh     m2, [r2 + 2 * r3]
+    movh     m3, [r2 + r4]
+
+    lea      r0,            [r0 + 4 * r1]
+    movh     [r0],          m0
+    movh     [r0 + r1],     m1
+    movh     [r0 + 2 * r1], m2
+    movh     [r0 + r5],     m3
+    %endrep
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W12_H4 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 4
+    mov         r4d,       %2/4
+
+.loop:
+    movh    m0,     [r2]
+    movd    m1,     [r2 + 8]
+    movh    m2,     [r2 + r3]
+    movd    m3,     [r2 + r3 + 8]
+    lea     r2,     [r2 + 2 * r3]
+
+    movh    [r0],             m0
+    movd    [r0 + 8],         m1
+    movh    [r0 + r1],        m2
+    movd    [r0 + r1 + 8],    m3
+    lea     r0,               [r0 + 2 * r1]
+
+    movh    m0,     [r2]
+    movd    m1,     [r2 + 8]
+    movh    m2,     [r2 + r3]
+    movd    m3,     [r2 + r3 + 8]
+
+    movh    [r0],             m0
+    movd    [r0 + 8],         m1
+    movh    [r0 + r1],        m2
+    movd    [r0 + r1 + 8],    m3
+
+    dec     r4d
+    lea     r0,               [r0 + 2 * r1]
+    lea     r2,               [r2 + 2 * r3]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W12_H4 12, 16
+
+BLOCKCOPY_PP_W12_H4 12, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_16x4(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W16_H4 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 4
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movu    m1,    [r2 + r3]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m2,    [r2]
+    movu    m3,    [r2 + r3]
+
+    movu    [r0],         m0
+    movu    [r0 + r1],    m1
+    lea     r0,           [r0 + 2 * r1]
+    movu    [r0],         m2
+    movu    [r0 + r1],    m3
+
+    dec     r4d
+    lea     r0,               [r0 + 2 * r1]
+    lea     r2,               [r2 + 2 * r3]
+    jnz     .loop
+
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W16_H4 16, 4
+BLOCKCOPY_PP_W16_H4 16, 12
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W16_H8 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 6
+    mov    r4d,    %2/8
+
+.loop:
+    movu    m0,    [r2]
+    movu    m1,    [r2 + r3]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m2,    [r2]
+    movu    m3,    [r2 + r3]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m4,    [r2]
+    movu    m5,    [r2 + r3]
+    lea     r2,    [r2 + 2 * r3]
+
+    movu    [r0],         m0
+    movu    [r0 + r1],    m1
+    lea     r0,           [r0 + 2 * r1]
+    movu    [r0],         m2
+    movu    [r0 + r1],    m3
+    lea     r0,           [r0 + 2 * r1]
+    movu    [r0],         m4
+    movu    [r0 + r1],    m5
+    lea     r0,           [r0 + 2 * r1]
+
+    movu    m0,           [r2]
+    movu    m1,           [r2 + r3]
+    movu    [r0],         m0
+    movu    [r0 + r1],    m1
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W16_H8 16, 8
+BLOCKCOPY_PP_W16_H8 16, 16
+BLOCKCOPY_PP_W16_H8 16, 32
+BLOCKCOPY_PP_W16_H8 16, 64
+
+BLOCKCOPY_PP_W16_H8 16, 24
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W24_H4 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 6
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movh    m1,    [r2 + 16]
+    movu    m2,    [r2 + r3]
+    movh    m3,    [r2 + r3 + 16]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m4,    [r2]
+    movh    m5,    [r2 + 16]
+
+    movu    [r0],              m0
+    movh    [r0 + 16],         m1
+    movu    [r0 + r1],         m2
+    movh    [r0 + r1 + 16],    m3
+    lea     r0,                [r0 + 2 * r1]
+    movu    [r0],              m4
+    movh    [r0 + 16],         m5
+
+    movu    m0,                [r2 + r3]
+    movh    m1,                [r2 + r3 + 16]
+    movu    [r0 + r1],         m0
+    movh    [r0 + r1 + 16],    m1
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W24_H4 24, 32
+
+BLOCKCOPY_PP_W24_H4 24, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W32_H4 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 4
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + r3]
+    movu    m3,    [r2 + r3 + 16]
+    lea     r2,    [r2 + 2 * r3]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 16],    m3
+    lea     r0,                [r0 + 2 * r1]
+
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + r3]
+    movu    m3,    [r2 + r3 + 16]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 16],    m3
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W32_H4 32, 8
+BLOCKCOPY_PP_W32_H4 32, 16
+BLOCKCOPY_PP_W32_H4 32, 24
+BLOCKCOPY_PP_W32_H4 32, 32
+BLOCKCOPY_PP_W32_H4 32, 64
+
+BLOCKCOPY_PP_W32_H4 32, 48
+
+INIT_YMM avx
+cglobal blockcopy_pp_32x8, 4, 6, 6
+    lea    r4, [3 * r1]
+    lea    r5, [3 * r3]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m4, [r2]
+    movu    m5, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m4
+    movu    [r0 + r1], m5
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + r5]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + r4], m1
+    RET
+
+INIT_YMM avx
+cglobal blockcopy_pp_32x16, 4, 6, 6
+    lea    r4,  [3 * r1]
+    lea    r5,  [3 * r3]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m4, [r2]
+    movu    m5, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m4
+    movu    [r0 + r1], m5
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + 2 * r3]
+    movu    m5, [r2 + r5]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + r4], m1
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+    movu    [r0 + 2 * r1], m4
+    movu    [r0 + r4], m5
+
+    lea     r2, [r2 + 4 * r3]
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_32x24(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx
+cglobal blockcopy_pp_32x24, 4, 7, 6
+lea    r4,  [3 * r1]
+lea    r5,  [3 * r3]
+mov    r6d, 24/8
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m4, [r2]
+    movu    m5, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m4
+    movu    [r0 + r1], m5
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + r5]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + r4], m1
+
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    dec     r6d
+    jnz     .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W32_H16_avx 2
+INIT_YMM avx
+cglobal blockcopy_pp_%1x%2, 4, 7, 6
+    lea    r4,  [3 * r1]
+    lea    r5,  [3 * r3]
+    mov    r6d, %2/16
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m4, [r2]
+    movu    m5, [r2 + r3]
+  
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m4
+    movu    [r0 + r1], m5
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + r5]
+    lea     r2, [r2 + 4 * r3]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + 2 * r3]
+    movu    m5, [r2 + r5]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + r4], m1
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+    movu    [r0 + 2 * r1], m4
+    movu    [r0 + r4], m5
+
+    lea     r2, [r2 + 4 * r3]
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+
+    lea     r0, [r0 + 4 * r1]
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r4], m3
+
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    dec     r6d
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W32_H16_avx 32, 32
+BLOCKCOPY_PP_W32_H16_avx 32, 48
+BLOCKCOPY_PP_W32_H16_avx 32, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W48_H2 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 6
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + r3]
+    movu    m4,    [r2 + r3 + 16]
+    movu    m5,    [r2 + r3 + 32]
+    lea     r2,    [r2 + 2 * r3]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + 32],         m2
+    movu    [r0 + r1],         m3
+    movu    [r0 + r1 + 16],    m4
+    movu    [r0 + r1 + 32],    m5
+    lea     r0,    [r0 + 2 * r1]
+
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + r3]
+    movu    m4,    [r2 + r3 + 16]
+    movu    m5,    [r2 + r3 + 32]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + 32],         m2
+    movu    [r0 + r1],         m3
+    movu    [r0 + r1 + 16],    m4
+    movu    [r0 + r1 + 32],    m5
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W48_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_pp_%1x%2, 4, 5, 4
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movu    xm1,   [r2 + 32]
+    movu    m2,    [r2 + r3]
+    movu    xm3,   [r2 + r3 + 32]
+    lea     r2,    [r2 + 2 * r3]
+
+    movu    [r0],              m0
+    movu    [r0 + 32],         xm1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 32],    xm3
+    lea     r0,                [r0 + 2 * r1]
+
+    movu    m0,    [r2]
+    movu    xm1,   [r2 + 32]
+    movu    m2,    [r2 + r3]
+    movu    xm3,   [r2 + r3 + 32]
+
+    movu    [r0],              m0
+    movu    [r0 + 32],         xm1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 32],    xm3
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W48_H4_avx 48, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W64_H4 2
+INIT_XMM sse2
+cglobal blockcopy_pp_%1x%2, 4, 5, 6
+    mov    r4d,    %2/4
+
+.loop:
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + 48]
+    movu    m4,    [r2 + r3]
+    movu    m5,    [r2 + r3 + 16]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + 32],         m2
+    movu    [r0 + 48],         m3
+    movu    [r0 + r1],         m4
+    movu    [r0 + r1 + 16],    m5
+
+    movu    m0,    [r2 + r3 + 32]
+    movu    m1,    [r2 + r3 + 48]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m2,    [r2]
+    movu    m3,    [r2 + 16]
+    movu    m4,    [r2 + 32]
+    movu    m5,    [r2 + 48]
+
+    movu    [r0 + r1 + 32],    m0
+    movu    [r0 + r1 + 48],    m1
+    lea     r0,                [r0 + 2 * r1]
+    movu    [r0],              m2
+    movu    [r0 + 16],         m3
+    movu    [r0 + 32],         m4
+    movu    [r0 + 48],         m5
+
+    movu    m0,    [r2 + r3]
+    movu    m1,    [r2 + r3 + 16]
+    movu    m2,    [r2 + r3 + 32]
+    movu    m3,    [r2 + r3 + 48]
+
+    movu    [r0 + r1],         m0
+    movu    [r0 + r1 + 16],    m1
+    movu    [r0 + r1 + 32],    m2
+    movu    [r0 + r1 + 48],    m3
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W64_H4 64, 16
+BLOCKCOPY_PP_W64_H4 64, 32
+BLOCKCOPY_PP_W64_H4 64, 48
+BLOCKCOPY_PP_W64_H4 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_pp_%1x%2(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W64_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_pp_%1x%2, 4, 7, 6
+    lea    r4,  [3 * r1]
+    lea    r5,  [3 * r3]
+    mov    r6d, %2/4
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 32]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 32]
+    movu    m4, [r2 + 2 * r3]
+    movu    m5, [r2 + 2 * r3 + 32]
+
+    movu    [r0], m0
+    movu    [r0 + 32], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 32], m3
+    movu    [r0 + 2 * r1], m4
+    movu    [r0 + 2 * r1 + 32], m5
+
+    movu    m0, [r2 + r5]
+    movu    m1, [r2 + r5 + 32]
+
+    movu    [r0 + r4], m0
+    movu    [r0 + r4 + 32], m1
+
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    dec     r6d
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PP_W64_H4_avx 64, 16
+BLOCKCOPY_PP_W64_H4_avx 64, 32
+BLOCKCOPY_PP_W64_H4_avx 64, 48
+BLOCKCOPY_PP_W64_H4_avx 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_2x4(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_sp_2x4, 4, 5, 2
+
+add        r3, r3
+
+;Row 0-1
+movd       m0, [r2]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0], r4w
+pextrw     [r0 + r1], m0, 4
+
+;Row 2-3
+movd       m0, [r2 + 2 * r3]
+lea        r2, [r2 + 2 * r3]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0 + 2 * r1], r4w
+lea        r0, [r0 + 2 * r1]
+pextrw     [r0 + r1], m0, 4
+
+RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_2x8(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_sp_2x8, 4, 5, 2
+
+add        r3, r3
+
+;Row 0-1
+movd       m0, [r2]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0], r4w
+pextrw     [r0 + r1], m0, 4
+
+;Row 2-3
+movd       m0, [r2 + 2 * r3]
+lea        r2, [r2 + 2 * r3]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0 + 2 * r1], r4w
+lea        r0, [r0 + 2 * r1]
+pextrw     [r0 + r1], m0, 4
+
+;Row 4-5
+movd       m0, [r2 + 2 * r3]
+lea        r2, [r2 + 2 * r3]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0 + 2 * r1], r4w
+lea        r0, [r0 + 2 * r1]
+pextrw     [r0 + r1], m0, 4
+
+;Row 6-7
+movd       m0, [r2 + 2 * r3]
+lea        r2, [r2 + 2 * r3]
+movd       m1, [r2 + r3]
+packuswb   m0, m1
+movd       r4d, m0
+mov        [r0 + 2 * r1], r4w
+lea        r0, [r0 + 2 * r1]
+pextrw     [r0 + r1], m0, 4
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W2_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 7, 2, dst, dstStride, src, srcStride
+    add         r3,     r3
+    mov         r6d,    %2/2
+.loop:
+    movd        m0,     [r2]
+    movd        m1,     [r2 + r3]
+    dec         r6d
+    lea         r2,     [r2 + r3 * 2]
+    packuswb    m0,     m0
+    packuswb    m1,     m1
+    movd        r4d,        m0
+    movd        r5d,        m1
+    mov         [r0],       r4w
+    mov         [r0 + r1],  r5w
+    lea         r0,         [r0 + r1 * 2]
+    jnz         .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W2_H2 2,  4
+BLOCKCOPY_SP_W2_H2 2,  8
+
+BLOCKCOPY_SP_W2_H2 2, 16
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_4x2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_4x2, 4, 4, 2, dst, dstStride, src, srcStride
+
+add        r3,        r3
+
+movh       m0,        [r2]
+movh       m1,        [r2 + r3]
+
+packuswb   m0,        m1
+
+movd       [r0],      m0
+pshufd     m0,        m0,        2
+movd       [r0 + r1], m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_4x4(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_4x4, 4, 4, 4, dst, dstStride, src, srcStride
+
+add        r3,     r3
+
+movh       m0,     [r2]
+movh       m1,     [r2 + r3]
+movh       m2,     [r2 + 2 * r3]
+lea        r2,     [r2 + 2 * r3]
+movh       m3,     [r2 + r3]
+
+packuswb   m0,            m1
+packuswb   m2,            m3
+
+movd       [r0],          m0
+pshufd     m0,            m0,         2
+movd       [r0 + r1],     m0
+movd       [r0 + 2 * r1], m2
+lea        r0,            [r0 + 2 * r1]
+pshufd     m2,            m2,         2
+movd       [r0 + r1],     m2
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_4x8(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_4x8, 4, 4, 8, dst, dstStride, src, srcStride
+
+add        r3,      r3
+
+movh       m0,      [r2]
+movh       m1,      [r2 + r3]
+movh       m2,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movh       m3,      [r2 + r3]
+movh       m4,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movh       m5,      [r2 + r3]
+movh       m6,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movh       m7,      [r2 + r3]
+
+packuswb   m0,      m1
+packuswb   m2,      m3
+packuswb   m4,      m5
+packuswb   m6,      m7
+
+movd       [r0],          m0
+pshufd     m0,            m0,         2
+movd       [r0 + r1],     m0
+movd       [r0 + 2 * r1], m2
+lea        r0,            [r0 + 2 * r1]
+pshufd     m2,            m2,         2
+movd       [r0 + r1],     m2
+movd       [r0 + 2 * r1], m4
+lea        r0,            [r0 + 2 * r1]
+pshufd     m4,            m4,         2
+movd       [r0 + r1],     m4
+movd       [r0 + 2 * r1], m6
+lea        r0,            [r0 + 2 * r1]
+pshufd     m6,            m6,         2
+movd       [r0 + r1],     m6
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W4_H8 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov         r4d,    %2/8
+
+add         r3,     r3
+
+.loop:
+     movh       m0,      [r2]
+     movh       m1,      [r2 + r3]
+     movh       m2,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movh       m3,      [r2 + r3]
+     movh       m4,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movh       m5,      [r2 + r3]
+     movh       m6,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movh       m7,      [r2 + r3]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+     packuswb   m6,      m7
+
+     movd       [r0],          m0
+     pshufd     m0,            m0,         2
+     movd       [r0 + r1],     m0
+     movd       [r0 + 2 * r1], m2
+     lea        r0,            [r0 + 2 * r1]
+     pshufd     m2,            m2,         2
+     movd       [r0 + r1],     m2
+     movd       [r0 + 2 * r1], m4
+     lea        r0,            [r0 + 2 * r1]
+     pshufd     m4,            m4,         2
+     movd       [r0 + r1],     m4
+     movd       [r0 + 2 * r1], m6
+     lea        r0,            [r0 + 2 * r1]
+     pshufd     m6,            m6,         2
+     movd       [r0 + r1],     m6
+
+     lea        r0,            [r0 + 2 * r1]
+     lea        r2,            [r2 + 2 * r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W4_H8 4, 16
+
+BLOCKCOPY_SP_W4_H8 4, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_6x8(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_sp_6x8, 4, 4, 2
+
+    add       r3, r3
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    packuswb  m0, m1
+
+    movd      [r0], m0
+    pextrw    [r0 + 4], m0, 2
+
+    movhlps   m0, m0
+    movd      [r0 + r1], m0
+    pextrw    [r0 + r1 + 4], m0, 2
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    packuswb  m0, m1
+
+    movd      [r0], m0
+    pextrw    [r0 + 4], m0, 2
+
+    movhlps   m0, m0
+    movd      [r0 + r1], m0
+    pextrw    [r0 + r1 + 4], m0, 2
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    packuswb  m0, m1
+
+    movd      [r0], m0
+    pextrw    [r0 + 4], m0, 2
+
+    movhlps   m0, m0
+    movd      [r0 + r1], m0
+    pextrw    [r0 + r1 + 4], m0, 2
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    packuswb  m0, m1
+
+    movd      [r0], m0
+    pextrw    [r0 + 4], m0, 2
+
+    movhlps   m0, m0
+    movd      [r0 + r1], m0
+    pextrw    [r0 + r1 + 4], m0, 2
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W6_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 7, 4, dst, dstStride, src, srcStride
+    add         r3,     r3
+    mov         r6d,    %2/2
+.loop:
+    movh        m0, [r2]
+    movd        m2, [r2 + 8]
+    movh        m1, [r2 + r3]
+    movd        m3, [r2 + r3 + 8]
+    dec         r6d
+    lea         r2, [r2 + r3 * 2]
+    packuswb    m0, m0
+    packuswb    m2, m2
+    packuswb    m1, m1
+    packuswb    m3, m3
+    movd        r4d,            m2
+    movd        r5d,            m3
+    movd        [r0],           m0
+    mov         [r0 + 4],       r4w
+    movd        [r0 + r1],      m1
+    mov         [r0 + r1 + 4],  r5w
+    lea         r0, [r0 + r1 * 2]
+    jnz         .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W6_H2 6,  8
+
+BLOCKCOPY_SP_W6_H2 6, 16
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x2, 4, 4, 2, dst, dstStride, src, srcStride
+
+add        r3,         r3
+
+movu       m0,         [r2]
+movu       m1,         [r2 + r3]
+
+packuswb   m0,         m1
+
+movlps     [r0],       m0
+movhps     [r0 + r1],  m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x4(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x4, 4, 4, 4, dst, dstStride, src, srcStride
+
+add        r3,     r3
+
+movu       m0,     [r2]
+movu       m1,     [r2 + r3]
+movu       m2,     [r2 + 2 * r3]
+lea        r2,     [r2 + 2 * r3]
+movu       m3,     [r2 + r3]
+
+packuswb   m0,            m1
+packuswb   m2,            m3
+
+movlps     [r0],          m0
+movhps     [r0 + r1],     m0
+movlps     [r0 + 2 * r1], m2
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m2
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x6(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x6, 4, 4, 6, dst, dstStride, src, srcStride
+
+add        r3,      r3
+
+movu       m0,      [r2]
+movu       m1,      [r2 + r3]
+movu       m2,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movu       m3,      [r2 + r3]
+movu       m4,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movu       m5,      [r2 + r3]
+
+packuswb   m0,            m1
+packuswb   m2,            m3
+packuswb   m4,            m5
+
+movlps     [r0],          m0
+movhps     [r0 + r1],     m0
+movlps     [r0 + 2 * r1], m2
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m2
+movlps     [r0 + 2 * r1], m4
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m4
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x8(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x8, 4, 4, 8, dst, dstStride, src, srcStride
+
+add        r3,      r3
+
+movu       m0,      [r2]
+movu       m1,      [r2 + r3]
+movu       m2,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movu       m3,      [r2 + r3]
+movu       m4,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movu       m5,      [r2 + r3]
+movu       m6,      [r2 + 2 * r3]
+lea        r2,      [r2 + 2 * r3]
+movu       m7,      [r2 + r3]
+
+packuswb   m0,      m1
+packuswb   m2,      m3
+packuswb   m4,      m5
+packuswb   m6,      m7
+
+movlps     [r0],          m0
+movhps     [r0 + r1],     m0
+movlps     [r0 + 2 * r1], m2
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m2
+movlps     [r0 + 2 * r1], m4
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m4
+movlps     [r0 + 2 * r1], m6
+lea        r0,            [r0 + 2 * r1]
+movhps     [r0 + r1],     m6
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W8_H4 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 4, dst, dstStride, src, srcStride
+    add         r3,     r3
+    mov         r4d,    %2/4
+.loop:
+    movu        m0,     [r2]
+    movu        m1,     [r2 + r3]
+    lea         r2,     [r2 + r3 * 2]
+    movu        m2,     [r2]
+    movu        m3,     [r2 + r3]
+    dec         r4d
+    lea         r2,     [r2 + r3 * 2]
+    packuswb    m0,     m1
+    packuswb    m2,     m3
+    movlps      [r0],       m0
+    movhps      [r0 + r1],  m0
+    lea         r0,         [r0 + r1 * 2]
+    movlps      [r0],       m2
+    movhps      [r0 + r1],  m2
+    lea         r0,         [r0 + r1 * 2]
+    jnz         .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W8_H4 8, 12
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W8_H8 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov         r4d,    %2/8
+
+add         r3,     r3
+
+.loop:
+     movu       m0,      [r2]
+     movu       m1,      [r2 + r3]
+     movu       m2,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movu       m3,      [r2 + r3]
+     movu       m4,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movu       m5,      [r2 + r3]
+     movu       m6,      [r2 + 2 * r3]
+     lea        r2,      [r2 + 2 * r3]
+     movu       m7,      [r2 + r3]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+     packuswb   m6,      m7
+
+     movlps     [r0],          m0
+     movhps     [r0 + r1],     m0
+     movlps     [r0 + 2 * r1], m2
+     lea        r0,            [r0 + 2 * r1]
+     movhps     [r0 + r1],     m2
+     movlps     [r0 + 2 * r1], m4
+     lea        r0,            [r0 + 2 * r1]
+     movhps     [r0 + r1],     m4
+     movlps     [r0 + 2 * r1], m6
+     lea        r0,            [r0 + 2 * r1]
+     movhps     [r0 + r1],     m6
+
+    lea         r0,            [r0 + 2 * r1]
+    lea         r2,            [r2 + 2 * r3]
+
+    dec         r4d
+    jnz         .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W8_H8 8, 16
+BLOCKCOPY_SP_W8_H8 8, 32
+
+BLOCKCOPY_SP_W8_H8 8, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W12_H4 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov             r4d,     %2/4
+
+add             r3,      r3
+
+.loop:
+     movu       m0,      [r2]
+     movu       m1,      [r2 + 16]
+     movu       m2,      [r2 + r3]
+     movu       m3,      [r2 + r3 + 16]
+     movu       m4,      [r2 + 2 * r3]
+     movu       m5,      [r2 + 2 * r3 + 16]
+     lea        r2,      [r2 + 2 * r3]
+     movu       m6,      [r2 + r3]
+     movu       m7,      [r2 + r3 + 16]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+     packuswb   m6,      m7
+
+     movh       [r0],              m0
+     pshufd     m0,                m0,    2
+     movd       [r0 + 8],          m0
+
+     movh       [r0 + r1],         m2
+     pshufd     m2,                m2,    2
+     movd       [r0 + r1 + 8],     m2
+
+     movh       [r0 + 2 * r1],     m4
+     pshufd     m4,                m4,    2
+     movd       [r0 + 2 * r1 + 8], m4
+
+     lea        r0,                [r0 + 2 * r1]
+     movh       [r0 + r1],         m6
+     pshufd     m6,                m6,    2
+     movd       [r0 + r1 + 8],     m6
+
+     lea        r0,                [r0 + 2 * r1]
+     lea        r2,                [r2 + 2 * r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W12_H4 12, 16
+
+BLOCKCOPY_SP_W12_H4 12, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W16_H4 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov             r4d,     %2/4
+
+add             r3,      r3
+
+.loop:
+     movu       m0,      [r2]
+     movu       m1,      [r2 + 16]
+     movu       m2,      [r2 + r3]
+     movu       m3,      [r2 + r3 + 16]
+     movu       m4,      [r2 + 2 * r3]
+     movu       m5,      [r2 + 2 * r3 + 16]
+     lea        r2,      [r2 + 2 * r3]
+     movu       m6,      [r2 + r3]
+     movu       m7,      [r2 + r3 + 16]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+     packuswb   m6,      m7
+
+     movu       [r0],              m0
+     movu       [r0 + r1],         m2
+     movu       [r0 + 2 * r1],     m4
+     lea        r0,                [r0 + 2 * r1]
+     movu       [r0 + r1],         m6
+
+     lea        r0,                [r0 + 2 * r1]
+     lea        r2,                [r2 + 2 * r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W16_H4 16,  4
+BLOCKCOPY_SP_W16_H4 16,  8
+BLOCKCOPY_SP_W16_H4 16, 12
+BLOCKCOPY_SP_W16_H4 16, 16
+BLOCKCOPY_SP_W16_H4 16, 32
+BLOCKCOPY_SP_W16_H4 16, 64
+BLOCKCOPY_SP_W16_H4 16, 24
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W16_H8_avx2 2
+INIT_YMM avx2
+cglobal blockcopy_sp_%1x%2, 4, 7, 4, dst, dstStride, src, srcStride
+    mov    r4d, %2/8
+    add    r3,  r3
+    lea    r5,  [3 * r3]
+    lea    r6,  [3 * r1]
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    vextracti128 xm1, m0, 1
+    vextracti128 xm3, m2, 1
+
+    movu    [r0],          xm0
+    movu    [r0 + r1],     xm1
+    movu    [r0 + 2 * r1], xm2
+    movu    [r0 + r6],     xm3
+
+    lea     r2, [r2 + 4 * r3]
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    vextracti128 xm1, m0, 1
+    vextracti128 xm3, m2, 1
+
+    lea     r0,            [r0 + 4 * r1]
+    movu    [r0],          xm0
+    movu    [r0 + r1],     xm1
+    movu    [r0 + 2 * r1], xm2
+    movu    [r0 + r6],     xm3
+
+    lea    r0, [r0 + 4 * r1]
+    lea    r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W16_H8_avx2 16, 16
+BLOCKCOPY_SP_W16_H8_avx2 16, 32
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W24_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 6, dst, dstStride, src, srcStride
+
+mov             r4d,     %2/2
+
+add             r3,      r3
+
+.loop:
+     movu       m0,      [r2]
+     movu       m1,      [r2 + 16]
+     movu       m2,      [r2 + 32]
+     movu       m3,      [r2 + r3]
+     movu       m4,      [r2 + r3 + 16]
+     movu       m5,      [r2 + r3 + 32]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+
+     movu       [r0],            m0
+     movlps     [r0 + 16],       m2
+     movhps     [r0 + r1],       m2
+     movu       [r0 + r1 + 8],   m4
+
+     lea        r0,              [r0 + 2 * r1]
+     lea        r2,              [r2 + 2 * r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W24_H2 24, 32
+
+BLOCKCOPY_SP_W24_H2 24, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W32_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov             r4d,     %2/2
+
+add             r3,      r3
+
+.loop:
+     movu       m0,      [r2]
+     movu       m1,      [r2 + 16]
+     movu       m2,      [r2 + 32]
+     movu       m3,      [r2 + 48]
+     movu       m4,      [r2 + r3]
+     movu       m5,      [r2 + r3 + 16]
+     movu       m6,      [r2 + r3 + 32]
+     movu       m7,      [r2 + r3 + 48]
+
+     packuswb   m0,      m1
+     packuswb   m2,      m3
+     packuswb   m4,      m5
+     packuswb   m6,      m7
+
+     movu       [r0],            m0
+     movu       [r0 + 16],       m2
+     movu       [r0 + r1],       m4
+     movu       [r0 + r1 + 16],  m6
+
+     lea        r0,              [r0 + 2 * r1]
+     lea        r2,              [r2 + 2 * r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W32_H2 32,  8
+BLOCKCOPY_SP_W32_H2 32, 16
+BLOCKCOPY_SP_W32_H2 32, 24
+BLOCKCOPY_SP_W32_H2 32, 32
+BLOCKCOPY_SP_W32_H2 32, 64
+
+BLOCKCOPY_SP_W32_H2 32, 48
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W32_H4_avx2 2
+INIT_YMM avx2
+cglobal blockcopy_sp_%1x%2, 4, 7, 4, dst, dstStride, src, srcStride
+    mov    r4d, %2/4
+    add    r3,  r3
+    lea    r5,  [3 * r3]
+    lea    r6,  [3 * r1]
+
+.loop:
+    movu       m0, [r2]
+    movu       m1, [r2 + 32]
+    movu       m2, [r2 + r3]
+    movu       m3, [r2 + r3 + 32]
+
+    packuswb   m0, m1
+    packuswb   m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu       [r0],      m0
+    movu       [r0 + r1], m2
+
+    movu       m0, [r2 + 2 * r3]
+    movu       m1, [r2 + 2 * r3 + 32]
+    movu       m2, [r2 + r5]
+    movu       m3, [r2 + r5 + 32]
+
+    packuswb   m0, m1
+    packuswb   m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu       [r0 + 2 * r1], m0
+    movu       [r0 + r6],     m2
+
+    lea        r0, [r0 + 4 * r1]
+    lea        r2, [r2 + 4 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W32_H4_avx2 32, 32
+BLOCKCOPY_SP_W32_H4_avx2 32, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W48_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 6, dst, dstStride, src, srcStride
+
+mov             r4d,     %2
+
+add             r3,      r3
+
+.loop:
+     movu       m0,        [r2]
+     movu       m1,        [r2 + 16]
+     movu       m2,        [r2 + 32]
+     movu       m3,        [r2 + 48]
+     movu       m4,        [r2 + 64]
+     movu       m5,        [r2 + 80]
+
+     packuswb   m0,        m1
+     packuswb   m2,        m3
+     packuswb   m4,        m5
+
+     movu       [r0],      m0
+     movu       [r0 + 16], m2
+     movu       [r0 + 32], m4
+
+     lea        r0,        [r0 + r1]
+     lea        r2,        [r2 + r3]
+
+     dec        r4d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W64_H1 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dst, dstStride, src, srcStride
+
+mov             r4d,       %2
+
+add             r3,         r3
+
+.loop:
+      movu      m0,        [r2]
+      movu      m1,        [r2 + 16]
+      movu      m2,        [r2 + 32]
+      movu      m3,        [r2 + 48]
+      movu      m4,        [r2 + 64]
+      movu      m5,        [r2 + 80]
+      movu      m6,        [r2 + 96]
+      movu      m7,        [r2 + 112]
+
+     packuswb   m0,        m1
+     packuswb   m2,        m3
+     packuswb   m4,        m5
+     packuswb   m6,        m7
+
+      movu      [r0],      m0
+      movu      [r0 + 16], m2
+      movu      [r0 + 32], m4
+      movu      [r0 + 48], m6
+
+      lea       r0,        [r0 + r1]
+      lea       r2,        [r2 + r3]
+
+      dec       r4d
+      jnz       .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W64_H1 64, 16
+BLOCKCOPY_SP_W64_H1 64, 32
+BLOCKCOPY_SP_W64_H1 64, 48
+BLOCKCOPY_SP_W64_H1 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W64_H4_avx2 2
+INIT_YMM avx2
+cglobal blockcopy_sp_%1x%2, 4, 7, 4, dst, dstStride, src, srcStride
+    mov    r4d, %2/4
+    add    r3,  r3
+    lea    r5,  [3 * r3]
+    lea    r6,  [3 * r1]
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 32]
+    movu    m2, [r2 + 64]
+    movu    m3, [r2 + 96]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu    [r0],      m0
+    movu    [r0 + 32], m2
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 32]
+    movu    m2, [r2 + r3 + 64]
+    movu    m3, [r2 + r3 + 96]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 32], m2
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + 2 * r3 + 32]
+    movu    m2, [r2 + 2 * r3 + 64]
+    movu    m3, [r2 + 2 * r3 + 96]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu    [r0 + 2 * r1],      m0
+    movu    [r0 + 2 * r1 + 32], m2
+
+    movu    m0, [r2 + r5]
+    movu    m1, [r2 + r5 + 32]
+    movu    m2, [r2 + r5 + 64]
+    movu    m3, [r2 + r5 + 96]
+
+    packuswb    m0, m1
+    packuswb    m2, m3
+
+    vpermq    m0, m0, 11011000b
+    vpermq    m2, m2, 11011000b
+
+    movu    [r0 + r6],      m0
+    movu    [r0 + r6 + 32], m2
+
+    lea    r0, [r0 + 4 * r1]
+    lea    r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jnz    .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SP_W64_H4_avx2 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockfill_s_4x4(int16_t* dst, intptr_t dstride, int16_t val)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockfill_s_4x4, 3, 3, 1, dst, dstStride, val
+
+add        r1,            r1
+
+movd       m0,            r2d
+pshuflw    m0,            m0,         0
+
+movh       [r0],          m0
+movh       [r0 + r1],     m0
+movh       [r0 + 2 * r1], m0
+lea        r0,            [r0 + 2 * r1]
+movh       [r0 + r1],     m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockfill_s_8x8(int16_t* dst, intptr_t dstride, int16_t val)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockfill_s_8x8, 3, 4, 1, dst, dstStride, val
+
+add        r1,            r1
+lea        r3,            [3 * r1]
+
+movd       m0,            r2d
+pshuflw    m0,            m0,         0
+pshufd     m0,            m0,         0
+
+movu       [r0],          m0
+movu       [r0 + r1],     m0
+movu       [r0 + 2 * r1], m0
+
+movu       [r0 + r3],     m0
+
+lea        r0,            [r0 + 4 * r1]
+movu       [r0],          m0
+movu       [r0 + r1],     m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + r3],     m0
+RET
+
+;-----------------------------------------------------------------------------
+; void blockfill_s_16x16(int16_t* dst, intptr_t dstride, int16_t val)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockfill_s_16x16, 3, 4, 1, dst, dstStride, val
+
+add        r1,            r1
+lea        r3,            [3 * r1]
+
+movd       m0,            r2d
+pshuflw    m0,            m0,         0
+pshufd     m0,            m0,         0
+
+movu       [r0],           m0
+movu       [r0 + 16],      m0
+movu       [r0 + r1],      m0
+movu       [r0 + r1 + 16], m0
+movu       [r0 + 2 * r1],  m0
+movu       [r0 + 2 * r1 + 16], m0
+
+movu       [r0 + r3],          m0
+movu       [r0 + r3 + 16],     m0
+movu       [r0 + 4 * r1],      m0
+movu       [r0 + 4 * r1 + 16], m0
+
+lea        r0,                 [r0 + 4 * r1]
+movu       [r0 + r1],          m0
+movu       [r0 + r1 + 16],     m0
+movu       [r0 + 2 * r1],      m0
+movu       [r0 + 2 * r1 + 16], m0
+movu       [r0 + r3],          m0
+movu       [r0 + r3 + 16],     m0
+movu       [r0 + 4 * r1],      m0
+movu       [r0 + 4 * r1 + 16], m0
+
+lea        r0,                 [r0 + 4 * r1]
+movu       [r0 + r1],          m0
+movu       [r0 + r1 + 16],     m0
+movu       [r0 + 2 * r1],      m0
+movu       [r0 + 2 * r1 + 16], m0
+movu       [r0 + r3],          m0
+movu       [r0 + r3 + 16],     m0
+movu       [r0 + 4 * r1],      m0
+movu       [r0 + 4 * r1 + 16], m0
+
+lea        r0,                 [r0 + 4 * r1]
+movu       [r0 + r1],          m0
+movu       [r0 + r1 + 16],     m0
+movu       [r0 + 2 * r1],      m0
+movu       [r0 + 2 * r1 + 16], m0
+movu       [r0 + r3],          m0
+movu       [r0 + r3 + 16],     m0
+RET
+
+INIT_YMM avx2
+cglobal blockfill_s_16x16, 3, 4, 1
+add          r1, r1
+lea          r3, [3 * r1]
+movd         xm0, r2d
+vpbroadcastw m0, xm0
+
+movu       [r0], m0
+movu       [r0 + r1], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + r3], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + r1], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + r3], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + r1], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + r3], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + r1], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + r3], m0
+RET
+
+;-----------------------------------------------------------------------------
+; void blockfill_s_%1x%2(int16_t* dst, intptr_t dstride, int16_t val)
+;-----------------------------------------------------------------------------
+%macro BLOCKFILL_S_W32_H8 2
+INIT_XMM sse2
+cglobal blockfill_s_%1x%2, 3, 5, 1, dst, dstStride, val
+
+mov        r3d,           %2/8
+
+add        r1,            r1
+lea        r4,            [3 * r1]
+
+movd       m0,            r2d
+pshuflw    m0,            m0,       0
+pshufd     m0,            m0,       0
+
+.loop:
+     movu       [r0],               m0
+     movu       [r0 + 16],          m0
+     movu       [r0 + 32],          m0
+     movu       [r0 + 48],          m0
+
+     movu       [r0 + r1],          m0
+     movu       [r0 + r1 + 16],     m0
+     movu       [r0 + r1 + 32],     m0
+     movu       [r0 + r1 + 48],     m0
+
+     movu       [r0 + 2 * r1],      m0
+     movu       [r0 + 2 * r1 + 16], m0
+     movu       [r0 + 2 * r1 + 32], m0
+     movu       [r0 + 2 * r1 + 48], m0
+
+     movu       [r0 + r4],          m0
+     movu       [r0 + r4 + 16],     m0
+     movu       [r0 + r4 + 32],     m0
+     movu       [r0 + r4 + 48],     m0
+
+     movu       [r0 + 4 * r1],      m0
+     movu       [r0 + 4 * r1 + 16], m0
+     movu       [r0 + 4 * r1 + 32], m0
+     movu       [r0 + 4 * r1 + 48], m0
+
+     lea        r0,                 [r0 + 4 * r1]
+     movu       [r0 + r1],          m0
+     movu       [r0 + r1 + 16],     m0
+     movu       [r0 + r1 + 32],     m0
+     movu       [r0 + r1 + 48],     m0
+
+     movu       [r0 + 2 * r1],      m0
+     movu       [r0 + 2 * r1 + 16], m0
+     movu       [r0 + 2 * r1 + 32], m0
+     movu       [r0 + 2 * r1 + 48], m0
+
+     movu       [r0 + r4],          m0
+     movu       [r0 + r4 + 16],     m0
+     movu       [r0 + r4 + 32],     m0
+     movu       [r0 + r4 + 48],     m0
+
+     lea        r0,                 [r0 + 4 * r1]
+
+     dec        r3d
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKFILL_S_W32_H8 32, 32
+
+INIT_YMM avx2
+cglobal blockfill_s_32x32, 3, 4, 1
+add          r1, r1
+lea          r3, [3 * r1]
+movd         xm0, r2d
+vpbroadcastw m0, xm0
+
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+lea        r0, [r0 + 4 * r1]
+movu       [r0], m0
+movu       [r0 + 32], m0
+movu       [r0 + r1], m0
+movu       [r0 + r1 + 32], m0
+movu       [r0 + 2 * r1], m0
+movu       [r0 + 2 * r1 + 32], m0
+movu       [r0 + r3], m0
+movu       [r0 + r3 + 32], m0
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_2x4(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_2x4, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,            r1
+
+movd       m0,            [r2]
+pmovzxbw   m0,            m0
+movd       [r0],          m0
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+movd       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movd       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_2x8(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_2x8, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,            r1
+
+movd       m0,            [r2]
+pmovzxbw   m0,            m0
+movd       [r0],          m0
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+movd       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movd       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+movd       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movd       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+movd       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movd       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movd       [r0 + r1],     m0
+
+RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_2x16(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_2x16, 4, 5, 2, dst, dstStride, src, srcStride
+    add         r1,         r1
+    mov         r4d,        16/2
+.loop:
+    movd        m0,         [r2]
+    movd        m1,         [r2 + r3]
+    dec         r4d
+    lea         r2,         [r2 + r3 * 2]
+    pmovzxbw    m0,         m0
+    pmovzxbw    m1,         m1
+    movd        [r0],       m0
+    movd        [r0 + r1],  m1
+    lea         r0,         [r0 + r1 * 2]
+    jnz         .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_4x2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_4x2, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,         r1
+
+movd       m0,         [r2]
+pmovzxbw   m0,         m0
+movh       [r0],       m0
+
+movd       m0,         [r2 + r3]
+pmovzxbw   m0,         m0
+movh       [r0 + r1],  m0
+
+RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_4x4(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_4x4, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,            r1
+
+movd       m0,            [r2]
+pmovzxbw   m0,            m0
+movh       [r0],          m0
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movh       [r0 + r1],     m0
+
+movd       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movh       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movd       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movh       [r0 + r1],     m0
+
+RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W4_H4 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 1, dst, dstStride, src, srcStride
+
+add     r1,      r1
+mov    r4d,      %2/4
+
+.loop:
+      movd       m0,            [r2]
+      pmovzxbw   m0,            m0
+      movh       [r0],          m0
+
+      movd       m0,            [r2 + r3]
+      pmovzxbw   m0,            m0
+      movh       [r0 + r1],     m0
+
+      movd       m0,            [r2 + 2 * r3]
+      pmovzxbw   m0,            m0
+      movh       [r0 + 2 * r1], m0
+
+      lea        r2,            [r2 + 2 * r3]
+      lea        r0,            [r0 + 2 * r1]
+
+      movd       m0,            [r2 + r3]
+      pmovzxbw   m0,            m0
+      movh       [r0 + r1],     m0
+
+      lea        r0,            [r0 + 2 * r1]
+      lea        r2,            [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W4_H4 4, 8
+BLOCKCOPY_PS_W4_H4 4, 16
+
+BLOCKCOPY_PS_W4_H4 4, 32
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W6_H4 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 1, dst, dstStride, src, srcStride
+
+add     r1,      r1
+mov    r4d,      %2/4
+
+.loop:
+      movh       m0,                [r2]
+      pmovzxbw   m0,                m0
+      movh       [r0],              m0
+      pextrd     [r0 + 8],          m0,            2
+
+      movh       m0,                [r2 + r3]
+      pmovzxbw   m0,                m0
+      movh       [r0 + r1],         m0
+      pextrd     [r0 + r1 + 8],     m0,            2
+
+      movh       m0,                [r2 + 2 * r3]
+      pmovzxbw   m0,                m0
+      movh       [r0 + 2 * r1],     m0
+      pextrd     [r0 + 2 * r1 + 8], m0,            2
+
+      lea        r2,                [r2 + 2 * r3]
+      lea        r0,                [r0 + 2 * r1]
+
+      movh       m0,                [r2 + r3]
+      pmovzxbw   m0,                m0
+      movh       [r0 + r1],         m0
+      pextrd     [r0 + r1 + 8],     m0,            2
+
+      lea        r0,                [r0 + 2 * r1]
+      lea        r2,                [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W6_H4 6, 8
+
+BLOCKCOPY_PS_W6_H4 6, 16
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_8x2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_8x2, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,         r1
+
+movh       m0,         [r2]
+pmovzxbw   m0,         m0
+movu       [r0],       m0
+
+movh       m0,         [r2 + r3]
+pmovzxbw   m0,         m0
+movu       [r0 + r1],  m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_8x4(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_8x4, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,            r1
+
+movh       m0,            [r2]
+pmovzxbw   m0,            m0
+movu       [r0],          m0
+
+movh       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movu       [r0 + r1],     m0
+
+movh       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movu       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movh       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movu       [r0 + r1],     m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_8x6(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_8x6, 4, 4, 1, dst, dstStride, src, srcStride
+
+add        r1,            r1
+
+movh       m0,            [r2]
+pmovzxbw   m0,            m0
+movu       [r0],          m0
+
+movh       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movu       [r0 + r1],     m0
+
+movh       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movu       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movh       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movu       [r0 + r1],     m0
+
+movh       m0,            [r2 + 2 * r3]
+pmovzxbw   m0,            m0
+movu       [r0 + 2 * r1], m0
+
+lea        r2,            [r2 + 2 * r3]
+lea        r0,            [r0 + 2 * r1]
+
+movh       m0,            [r2 + r3]
+pmovzxbw   m0,            m0
+movu       [r0 + r1],     m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W8_H4 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 1, dst, dstStride, src, srcStride
+
+add     r1,      r1
+mov    r4d,      %2/4
+
+.loop:
+      movh       m0,            [r2]
+      pmovzxbw   m0,            m0
+      movu       [r0],          m0
+
+      movh       m0,            [r2 + r3]
+      pmovzxbw   m0,            m0
+      movu       [r0 + r1],     m0
+
+      movh       m0,            [r2 + 2 * r3]
+      pmovzxbw   m0,            m0
+      movu       [r0 + 2 * r1], m0
+
+      lea        r2,            [r2 + 2 * r3]
+      lea        r0,            [r0 + 2 * r1]
+
+      movh       m0,            [r2 + r3]
+      pmovzxbw   m0,            m0
+      movu       [r0 + r1],     m0
+
+      lea        r0,            [r0 + 2 * r1]
+      lea        r2,            [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W8_H4  8,  8
+BLOCKCOPY_PS_W8_H4  8, 16
+BLOCKCOPY_PS_W8_H4  8, 32
+
+BLOCKCOPY_PS_W8_H4  8, 12
+BLOCKCOPY_PS_W8_H4  8, 64
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W12_H2 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/2
+pxor       m0,      m0
+
+.loop:
+      movu       m1,             [r2]
+      pmovzxbw   m2,             m1
+      movu       [r0],           m2
+      punpckhbw  m1,             m0
+      movh       [r0 + 16],      m1
+
+      movu       m1,             [r2 + r3]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1],      m2
+      punpckhbw  m1,             m0
+      movh       [r0 + r1 + 16], m1
+
+      lea        r0,             [r0 + 2 * r1]
+      lea        r2,             [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W12_H2 12, 16
+
+BLOCKCOPY_PS_W12_H2 12, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_16x4(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal blockcopy_ps_16x4, 4, 4, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+pxor       m0,      m0
+
+movu       m1,                 [r2]
+pmovzxbw   m2,                 m1
+movu       [r0],               m2
+punpckhbw  m1,                 m0
+movu       [r0 + 16],          m1
+
+movu       m1,                 [r2 + r3]
+pmovzxbw   m2,                 m1
+movu       [r0 + r1],          m2
+punpckhbw  m1,                 m0
+movu       [r0 + r1 + 16],     m1
+
+movu       m1,                 [r2 + 2 * r3]
+pmovzxbw   m2,                 m1
+movu       [r0 + 2 * r1],      m2
+punpckhbw  m1,                 m0
+movu       [r0 + 2 * r1 + 16], m1
+
+lea        r0,                 [r0 + 2 * r1]
+lea        r2,                 [r2 + 2 * r3]
+
+movu       m1,                 [r2 + r3]
+pmovzxbw   m2,                 m1
+movu       [r0 + r1],          m2
+punpckhbw  m1,                 m0
+movu       [r0 + r1 + 16],     m1
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W16_H4 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/4
+pxor       m0,      m0
+
+.loop:
+      movu       m1,                 [r2]
+      pmovzxbw   m2,                 m1
+      movu       [r0],               m2
+      punpckhbw  m1,                 m0
+      movu       [r0 + 16],          m1
+
+      movu       m1,                 [r2 + r3]
+      pmovzxbw   m2,                 m1
+      movu       [r0 + r1],          m2
+      punpckhbw  m1,                 m0
+      movu       [r0 + r1 + 16],     m1
+
+      movu       m1,                 [r2 + 2 * r3]
+      pmovzxbw   m2,                 m1
+      movu       [r0 + 2 * r1],      m2
+      punpckhbw  m1,                 m0
+      movu       [r0 + 2 * r1 + 16], m1
+
+      lea        r0,                 [r0 + 2 * r1]
+      lea        r2,                 [r2 + 2 * r3]
+
+      movu       m1,                 [r2 + r3]
+      pmovzxbw   m2,                 m1
+      movu       [r0 + r1],          m2
+      punpckhbw  m1,                 m0
+      movu       [r0 + r1 + 16],     m1
+
+      lea        r0,                 [r0 + 2 * r1]
+      lea        r2,                 [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W16_H4 16,  8
+BLOCKCOPY_PS_W16_H4 16, 12
+BLOCKCOPY_PS_W16_H4 16, 16
+BLOCKCOPY_PS_W16_H4 16, 32
+BLOCKCOPY_PS_W16_H4 16, 64
+BLOCKCOPY_PS_W16_H4 16, 24
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W16_H4_avx2 2
+INIT_YMM avx2
+cglobal blockcopy_ps_%1x%2, 4, 7, 3
+
+    add     r1, r1
+    mov     r4d, %2/4
+    lea     r5, [3 * r3]
+    lea     r6, [3 * r1]
+    pxor    m0, m0
+
+.loop:
+    movu        xm1, [r2]
+    pmovzxbw    m2, xm1
+    movu        [r0], m2
+    movu        xm1, [r2 + r3]
+    pmovzxbw    m2, xm1
+    movu        [r0 + r1], m2
+    movu        xm1, [r2 + 2 * r3]
+    pmovzxbw    m2, xm1
+    movu        [r0 + 2 * r1], m2
+    movu        xm1, [r2 + r5]
+    pmovzxbw    m2, xm1
+    movu        [r0 + r6], m2
+
+    lea         r0, [r0 + 4 * r1]
+    lea         r2, [r2 + 4 * r3]
+
+    dec         r4d
+    jnz         .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PS_W16_H4_avx2 16, 16
+BLOCKCOPY_PS_W16_H4_avx2 16, 32
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W24_H2 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/2
+pxor       m0,      m0
+
+.loop:
+      movu       m1,             [r2]
+      pmovzxbw   m2,             m1
+      movu       [r0],           m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 16],      m1
+
+      movh       m1,             [r2 + 16]
+      pmovzxbw   m1,             m1
+      movu       [r0 + 32],      m1
+
+      movu       m1,             [r2 + r3]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 16], m1
+
+      movh       m1,             [r2 + r3 + 16]
+      pmovzxbw   m1,             m1
+      movu       [r0 + r1 + 32], m1
+
+      lea        r0,             [r0 + 2 * r1]
+      lea        r2,             [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W24_H2 24, 32
+
+BLOCKCOPY_PS_W24_H2 24, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W32_H2 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/2
+pxor       m0,      m0
+
+.loop:
+      movu       m1,             [r2]
+      pmovzxbw   m2,             m1
+      movu       [r0],           m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 16],      m1
+
+      movu       m1,             [r2 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 32],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 48],      m1
+
+      movu       m1,             [r2 + r3]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 16], m1
+
+      movu       m1,             [r2 + r3 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1 + 32], m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 48], m1
+
+      lea        r0,             [r0 + 2 * r1]
+      lea        r2,             [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W32_H2 32,  8
+BLOCKCOPY_PS_W32_H2 32, 16
+BLOCKCOPY_PS_W32_H2 32, 24
+BLOCKCOPY_PS_W32_H2 32, 32
+BLOCKCOPY_PS_W32_H2 32, 64
+
+BLOCKCOPY_PS_W32_H2 32, 48
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W32_H4_avx2 2
+INIT_YMM avx2
+cglobal blockcopy_ps_%1x%2, 4, 7, 2
+    add     r1, r1
+    mov     r4d, %2/4
+    lea     r5, [3 * r3]
+    lea     r6, [3 * r1]
+.loop:
+    pmovzxbw      m0, [r2 +  0]
+    pmovzxbw      m1, [r2 + 16]
+    movu          [r0 +  0], m0
+    movu          [r0 + 32], m1
+
+    pmovzxbw      m0, [r2 + r3 +  0]
+    pmovzxbw      m1, [r2 + r3 + 16]
+    movu          [r0 + r1 +  0], m0
+    movu          [r0 + r1 + 32], m1
+
+    pmovzxbw      m0, [r2 + r3 * 2 +  0]
+    pmovzxbw      m1, [r2 + r3 * 2 + 16]
+    movu          [r0 + r1 * 2 +  0], m0
+    movu          [r0 + r1 * 2 + 32], m1
+
+    pmovzxbw      m0, [r2 + r5 +  0]
+    pmovzxbw      m1, [r2 + r5 + 16]
+    movu          [r0 + r6 +  0], m0
+    movu          [r0 + r6 + 32], m1
+    lea           r0, [r0 + 4 * r1]
+    lea           r2, [r2 + 4 * r3]
+    dec           r4d
+    jnz           .loop
+    RET
+%endmacro
+
+BLOCKCOPY_PS_W32_H4_avx2 32, 32
+BLOCKCOPY_PS_W32_H4_avx2 32, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W48_H2 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/2
+pxor       m0,      m0
+
+.loop:
+      movu       m1,             [r2]
+      pmovzxbw   m2,             m1
+      movu       [r0],           m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 16],      m1
+
+      movu       m1,             [r2 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 32],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 48],      m1
+
+      movu       m1,             [r2 + 32]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 64],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 80],      m1
+
+      movu       m1,             [r2 + r3]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 16], m1
+
+      movu       m1,             [r2 + r3 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1 + 32], m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 48], m1
+
+      movu       m1,             [r2 + r3 + 32]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1 + 64], m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 80], m1
+
+      lea        r0,             [r0 + 2 * r1]
+      lea        r2,             [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PS_W64_H2 2
+INIT_XMM sse4
+cglobal blockcopy_ps_%1x%2, 4, 5, 3, dst, dstStride, src, srcStride
+
+add        r1,      r1
+mov        r4d,     %2/2
+pxor       m0,      m0
+
+.loop:
+      movu       m1,             [r2]
+      pmovzxbw   m2,             m1
+      movu       [r0],           m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 16],      m1
+
+      movu       m1,             [r2 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 32],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 48],      m1
+
+      movu       m1,             [r2 + 32]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 64],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 80],      m1
+
+      movu       m1,             [r2 + 48]
+      pmovzxbw   m2,             m1
+      movu       [r0 + 96],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + 112],     m1
+
+      movu       m1,             [r2 + r3]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1],      m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 16], m1
+
+      movu       m1,             [r2 + r3 + 16]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1 + 32], m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 48], m1
+
+      movu       m1,             [r2 + r3 + 32]
+      pmovzxbw   m2,             m1
+      movu       [r0 + r1 + 64], m2
+      punpckhbw  m1,             m0
+      movu       [r0 + r1 + 80], m1
+
+      movu       m1,              [r2 + r3 + 48]
+      pmovzxbw   m2,              m1
+      movu       [r0 + r1 + 96],  m2
+      punpckhbw  m1,              m0
+      movu       [r0 + r1 + 112], m1
+
+      lea        r0,              [r0 + 2 * r1]
+      lea        r2,              [r2 + 2 * r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_PS_W64_H2 64, 16
+BLOCKCOPY_PS_W64_H2 64, 32
+BLOCKCOPY_PS_W64_H2 64, 48
+BLOCKCOPY_PS_W64_H2 64, 64
+;-----------------------------------------------------------------------------
+; void blockcopy_ps_%1x%2(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal blockcopy_ps_64x64, 4, 7, 4
+    add     r1, r1
+    mov     r4d, 64/8
+    lea     r5, [3 * r3]
+    lea     r6, [3 * r1]
+.loop:
+%rep 2
+    pmovzxbw      m0, [r2 +  0]
+    pmovzxbw      m1, [r2 + 16]
+    pmovzxbw      m2, [r2 + 32]
+    pmovzxbw      m3, [r2 + 48]
+    movu          [r0 +  0], m0
+    movu          [r0 + 32], m1
+    movu          [r0 + 64], m2
+    movu          [r0 + 96], m3
+
+    pmovzxbw      m0, [r2 + r3 +  0]
+    pmovzxbw      m1, [r2 + r3 + 16]
+    pmovzxbw      m2, [r2 + r3 + 32]
+    pmovzxbw      m3, [r2 + r3 + 48]
+    movu          [r0 + r1 +  0], m0
+    movu          [r0 + r1 + 32], m1
+    movu          [r0 + r1 + 64], m2
+    movu          [r0 + r1 + 96], m3
+
+    pmovzxbw      m0, [r2 + r3 * 2 +  0]
+    pmovzxbw      m1, [r2 + r3 * 2 + 16]
+    pmovzxbw      m2, [r2 + r3 * 2 + 32]
+    pmovzxbw      m3, [r2 + r3 * 2 + 48]
+    movu          [r0 + r1 * 2 +  0], m0
+    movu          [r0 + r1 * 2 + 32], m1
+    movu          [r0 + r1 * 2 + 64], m2
+    movu          [r0 + r1 * 2 + 96], m3
+
+    pmovzxbw      m0, [r2 + r5 +  0]
+    pmovzxbw      m1, [r2 + r5 + 16]
+    pmovzxbw      m2, [r2 + r5 + 32]
+    pmovzxbw      m3, [r2 + r5 + 48]
+    movu          [r0 + r6 +  0], m0
+    movu          [r0 + r6 + 32], m1
+    movu          [r0 + r6 + 64], m2
+    movu          [r0 + r6 + 96], m3
+    lea           r0, [r0 + 4 * r1]
+    lea           r2, [r2 + 4 * r3]
+%endrep
+    dec           r4d
+    jnz           .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_2x4(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_2x4, 4, 6, 0
+    add    r1, r1
+    add    r3, r3
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    lea    r2, [r2 + r3 * 2]
+    lea    r0, [r0 + 2 * r1]
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_2x8(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_2x8, 4, 6, 0
+    add    r1, r1
+    add    r3, r3
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    lea    r2, [r2 + r3 * 2]
+    lea    r0, [r0 + 2 * r1]
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    lea    r2, [r2 + r3 * 2]
+    lea    r0, [r0 + 2 * r1]
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    lea    r2, [r2 + r3 * 2]
+    lea    r0, [r0 + 2 * r1]
+
+    mov    r4d, [r2]
+    mov    r5d, [r2 + r3]
+    mov    [r0], r4d
+    mov    [r0 + r1], r5d
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_2x16(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_2x16, 4, 7, 0
+    add     r1, r1
+    add     r3, r3
+    mov     r6d,    16/2
+.loop:
+    mov     r4d,    [r2]
+    mov     r5d,    [r2 + r3]
+    dec     r6d
+    lea     r2, [r2 + r3 * 2]
+    mov     [r0],       r4d
+    mov     [r0 + r1],  r5d
+    lea     r0, [r0 + r1 * 2]
+    jnz     .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_4x2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_4x2, 4, 4, 2
+    add     r1, r1
+    add     r3, r3
+
+    movh    m0, [r2]
+    movh    m1, [r2 + r3]
+
+    movh    [r0], m0
+    movh    [r0 + r1], m1
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_4x4(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_4x4, 4, 4, 4
+    add     r1, r1
+    add     r3, r3
+    movh    m0, [r2]
+    movh    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movh    m2, [r2]
+    movh    m3, [r2 + r3]
+
+    movh    [r0], m0
+    movh    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movh    [r0], m2
+    movh    [r0 + r1], m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W4_H8 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+    mov     r4d, %2/8
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movh    m0, [r2]
+    movh    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movh    m2, [r2]
+    movh    m3, [r2 + r3]
+
+    movh    [r0], m0
+    movh    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movh    [r0], m2
+    movh    [r0 + r1], m3
+
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+    movh    m0, [r2]
+    movh    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movh    m2, [r2]
+    movh    m3, [r2 + r3]
+
+    movh    [r0], m0
+    movh    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movh    [r0], m2
+    movh    [r0 + r1], m3
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec     r4d
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W4_H8 4, 8
+BLOCKCOPY_SS_W4_H8 4, 16
+
+BLOCKCOPY_SS_W4_H8 4, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_6x8(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_6x8, 4, 4, 4
+    add       r1, r1
+    add       r3, r3
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    pshufd    m2, m0, 2
+    pshufd    m3, m1, 2
+    movh      [r0], m0
+    movd      [r0 + 8], m2
+    movh      [r0 + r1], m1
+    movd      [r0 + r1 + 8], m3
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    pshufd    m2, m0, 2
+    pshufd    m3, m1, 2
+    movh      [r0], m0
+    movd      [r0 + 8], m2
+    movh      [r0 + r1], m1
+    movd      [r0 + r1 + 8], m3
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    pshufd    m2, m0, 2
+    pshufd    m3, m1, 2
+    movh      [r0], m0
+    movd      [r0 + 8], m2
+    movh      [r0 + r1], m1
+    movd      [r0 + r1 + 8], m3
+
+    lea       r0, [r0 + 2 * r1]
+    lea       r2, [r2 + 2 * r3]
+
+    movu      m0, [r2]
+    movu      m1, [r2 + r3]
+    pshufd    m2, m0, 2
+    pshufd    m3, m1, 2
+    movh      [r0], m0
+    movd      [r0 + 8], m2
+    movh      [r0 + r1], m1
+    movd      [r0 + r1 + 8], m3
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_6x16(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_6x16, 4, 5, 4
+    add     r1, r1
+    add     r3, r3
+    mov     r4d,    16/2
+.loop:
+    movh    m0, [r2]
+    movd    m2, [r2 + 8]
+    movh    m1, [r2 + r3]
+    movd    m3, [r2 + r3 + 8]
+    dec     r4d
+    lea     r2, [r2 + r3 * 2]
+    movh    [r0],           m0
+    movd    [r0 + 8],       m2
+    movh    [r0 + r1],      m1
+    movd    [r0 + r1 + 8],  m3
+    lea     r0, [r0 + r1 * 2]
+    jnz     .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_8x2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_8x2, 4, 4, 2
+    add     r1, r1
+    add     r3, r3
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_8x4(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_8x4, 4, 4, 4
+    add     r1, r1
+    add     r3, r3
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_8x6(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_8x6, 4, 4, 4
+
+    add     r1, r1
+    add     r3, r3
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+
+    lea     r2, [r2 + r3 * 2]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_8x12(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_ss_8x12, 4, 5, 2
+    add     r1, r1
+    add     r3, r3
+    mov     r4d, 12/2
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    lea     r2, [r2 + 2 * r3]
+    dec     r4d
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W8_H8 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+    mov     r4d, %2/8
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    lea     r2, [r2 + r3 * 2]
+    movu    m2, [r2]
+    movu    m3, [r2 + r3]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    lea     r0, [r0 + 2 * r1]
+    movu    [r0], m2
+    movu    [r0 + r1], m3
+
+    dec     r4d
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+    jnz    .loop
+RET
+%endmacro
+
+BLOCKCOPY_SS_W8_H8 8, 8
+BLOCKCOPY_SS_W8_H8 8, 16
+BLOCKCOPY_SS_W8_H8 8, 32
+
+BLOCKCOPY_SS_W8_H8 8, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W12_H4 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movh    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movh    m3, [r2 + r3 + 16]
+    lea     r2, [r2 + 2 * r3]
+
+    movu    [r0], m0
+    movh    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movh    [r0 + r1 + 16], m3
+
+    lea     r0, [r0 + 2 * r1]
+    movu    m0, [r2]
+    movh    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movh    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movh    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movh    [r0 + r1 + 16], m3
+
+    dec     r4d
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W12_H4 12, 16
+
+BLOCKCOPY_SS_W12_H4 12, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_16x4(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W16_H4 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    dec     r4d
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W16_H4 16, 4
+BLOCKCOPY_SS_W16_H4 16, 12
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W16_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 4
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+    lea     r5, [3 * r3]
+    lea     r6, [3 * r1]
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + 2 * r3]
+    movu    m3, [r2 + r5]
+
+    movu    [r0], m0
+    movu    [r0 + r1], m1
+    movu    [r0 + 2 * r1], m2
+    movu    [r0 + r6], m3
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    dec     r4d
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W16_H4_avx 16, 4
+BLOCKCOPY_SS_W16_H4_avx 16, 12
+BLOCKCOPY_SS_W16_H4_avx 16, 8
+BLOCKCOPY_SS_W16_H4_avx 16, 16
+BLOCKCOPY_SS_W16_H4_avx 16, 24
+BLOCKCOPY_SS_W16_H4_avx 16, 32
+BLOCKCOPY_SS_W16_H4_avx 16, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W16_H8 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+    mov     r4d, %2/8
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + r3]
+    movu    m3, [r2 + r3 + 16]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + r1], m2
+    movu    [r0 + r1 + 16], m3
+
+    dec     r4d
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W16_H8 16, 8
+BLOCKCOPY_SS_W16_H8 16, 16
+BLOCKCOPY_SS_W16_H8 16, 32
+BLOCKCOPY_SS_W16_H8 16, 64
+
+BLOCKCOPY_SS_W16_H8 16, 24
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W24_H4 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 6
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 16]
+    movu    m5, [r2 + r3 + 32]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + r1], m3
+    movu    [r0 + r1 + 16], m4
+    movu    [r0 + r1 + 32], m5
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 16]
+    movu    m5, [r2 + r3 + 32]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + r1], m3
+    movu    [r0 + r1 + 16], m4
+    movu    [r0 + r1 + 32], m5
+
+    dec     r4d
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W24_H4 24, 32
+
+BLOCKCOPY_SS_W24_H4 24, 64
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W24_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 2
+
+    mov    r4d, %2/4
+    add    r1, r1
+    add    r3, r3
+    lea    r5, [3 * r3]
+    lea    r6, [3 * r1]
+
+.loop
+    movu    m0, [r2]
+    movu    xm1, [r2 + 32]
+    movu    [r0], m0
+    movu    [r0 + 32], xm1
+    movu    m0, [r2 + r3]
+    movu    xm1, [r2 + r3 + 32]
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 32], xm1
+    movu    m0, [r2 + 2 * r3]
+    movu    xm1, [r2 + 2 * r3 + 32]
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + 2 * r1 + 32], xm1
+    movu    m0, [r2 + r5]
+    movu    xm1, [r2 + r5 + 32]
+    movu    [r0 + r6], m0
+    movu    [r0 + r6 + 32], xm1
+    dec     r4d
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W24_H4_avx 24, 32
+BLOCKCOPY_SS_W24_H4_avx 24, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W32_H4 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 4
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+
+    dec     r4d
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W32_H4 32, 8
+BLOCKCOPY_SS_W32_H4 32, 16
+BLOCKCOPY_SS_W32_H4 32, 24
+BLOCKCOPY_SS_W32_H4 32, 32
+BLOCKCOPY_SS_W32_H4 32, 64
+
+BLOCKCOPY_SS_W32_H4 32, 48
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W32_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 4
+
+    mov    r4d, %2/4
+    add    r1, r1
+    add    r3, r3
+    lea    r5, [3 * r1]
+    lea    r6, [3 * r3]
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 32]
+
+    movu    [r0], m0
+    movu    [r0 + 32], m1
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 32]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 32], m1
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + 2 * r3 + 32]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + 2 * r1 + 32], m1
+
+    movu    m0, [r2 + r6]
+    movu    m1, [r2 + r6 + 32]
+
+    movu    [r0 + r5], m0
+    movu    [r0 + r5 + 32], m1
+
+    dec     r4d
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W32_H4_avx 32,  8
+BLOCKCOPY_SS_W32_H4_avx 32, 16
+BLOCKCOPY_SS_W32_H4_avx 32, 24
+BLOCKCOPY_SS_W32_H4_avx 32, 32
+BLOCKCOPY_SS_W32_H4_avx 32, 48
+BLOCKCOPY_SS_W32_H4_avx 32, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W48_H2 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 6
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+    movu    m4, [r2 + 64]
+    movu    m5, [r2 + 80]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+    movu    [r0 + 64], m4
+    movu    [r0 + 80], m5
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+    movu    m4, [r2 + r3 + 64]
+    movu    m5, [r2 + r3 + 80]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+    movu    [r0 + r1 + 64], m4
+    movu    [r0 + r1 + 80], m5
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+    movu    m4, [r2 + 64]
+    movu    m5, [r2 + 80]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+    movu    [r0 + 64], m4
+    movu    [r0 + 80], m5
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+    movu    m4, [r2 + r3 + 64]
+    movu    m5, [r2 + r3 + 80]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+    movu    [r0 + r1 + 64], m4
+    movu    [r0 + r1 + 80], m5
+
+    dec     r4d
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+RET
+%endmacro
+
+BLOCKCOPY_SS_W48_H2 48, 64
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_48x64(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx
+cglobal blockcopy_ss_48x64, 4, 7, 6
+
+    mov    r4d, 64/4
+    add    r1, r1
+    add    r3, r3
+    lea    r5, [3 * r3]
+    lea    r6, [3 * r1]
+
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 32]
+    movu    m2, [r2 + 64]
+
+    movu    [r0], m0
+    movu    [r0 + 32], m1
+    movu    [r0 + 64], m2
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 32]
+    movu    m2, [r2 + r3 + 64]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 32], m1
+    movu    [r0 + r1 + 64], m2
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + 2 * r3 + 32]
+    movu    m2, [r2 + 2 * r3 + 64]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + 2 * r1 + 32], m1
+    movu    [r0 + 2 * r1 + 64], m2
+
+    movu    m0, [r2 + r5]
+    movu    m1, [r2 + r5 + 32]
+    movu    m2, [r2 + r5 + 64]
+
+    movu    [r0 + r6], m0
+    movu    [r0 + r6 + 32], m1
+    movu    [r0 + r6 + 64], m2
+
+    dec     r4d
+    lea     r2, [r2 + 4 * r3]
+    lea     r0, [r0 + 4 * r1]
+    jnz     .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W64_H4 2
+INIT_XMM sse2
+cglobal blockcopy_ss_%1x%2, 4, 5, 6, dst, dstStride, src, srcStride
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+
+    movu    m0,    [r2 + 64]
+    movu    m1,    [r2 + 80]
+    movu    m2,    [r2 + 96]
+    movu    m3,    [r2 + 112]
+
+    movu    [r0 + 64], m0
+    movu    [r0 + 80], m1
+    movu    [r0 + 96], m2
+    movu    [r0 + 112], m3
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+
+    movu    m0, [r2 + r3 + 64]
+    movu    m1, [r2 + r3 + 80]
+    movu    m2, [r2 + r3 + 96]
+    movu    m3, [r2 + r3 + 112]
+
+    movu    [r0 + r1 + 64], m0
+    movu    [r0 + r1 + 80], m1
+    movu    [r0 + r1 + 96], m2
+    movu    [r0 + r1 + 112], m3
+
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+
+    movu    m0, [r2]
+    movu    m1, [r2 + 16]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 48]
+
+    movu    [r0], m0
+    movu    [r0 + 16], m1
+    movu    [r0 + 32], m2
+    movu    [r0 + 48], m3
+
+    movu    m0,    [r2 + 64]
+    movu    m1,    [r2 + 80]
+    movu    m2,    [r2 + 96]
+    movu    m3,    [r2 + 112]
+
+    movu    [r0 + 64], m0
+    movu    [r0 + 80], m1
+    movu    [r0 + 96], m2
+    movu    [r0 + 112], m3
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 16]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 48]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 16], m1
+    movu    [r0 + r1 + 32], m2
+    movu    [r0 + r1 + 48], m3
+
+    movu    m0, [r2 + r3 + 64]
+    movu    m1, [r2 + r3 + 80]
+    movu    m2, [r2 + r3 + 96]
+    movu    m3, [r2 + r3 + 112]
+
+    movu    [r0 + r1 + 64], m0
+    movu    [r0 + r1 + 80], m1
+    movu    [r0 + r1 + 96], m2
+    movu    [r0 + r1 + 112], m3
+
+    dec     r4d
+    lea     r2, [r2 + 2 * r3]
+    lea     r0, [r0 + 2 * r1]
+    jnz     .loop
+
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W64_H4 64, 16
+BLOCKCOPY_SS_W64_H4 64, 32
+BLOCKCOPY_SS_W64_H4 64, 48
+BLOCKCOPY_SS_W64_H4 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W64_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 4, dst, dstStride, src, srcStride
+    mov     r4d, %2/4
+    add     r1, r1
+    add     r3, r3
+    lea     r5, [3 * r1]
+    lea     r6, [3 * r3]
+.loop:
+    movu    m0, [r2]
+    movu    m1, [r2 + 32]
+    movu    m2, [r2 + 64]
+    movu    m3, [r2 + 96]
+
+    movu    [r0], m0
+    movu    [r0 + 32], m1
+    movu    [r0 + 64], m2
+    movu    [r0 + 96], m3
+
+    movu    m0, [r2 + r3]
+    movu    m1, [r2 + r3 + 32]
+    movu    m2, [r2 + r3 + 64]
+    movu    m3, [r2 + r3 + 96]
+
+    movu    [r0 + r1], m0
+    movu    [r0 + r1 + 32], m1
+    movu    [r0 + r1 + 64], m2
+    movu    [r0 + r1 + 96], m3
+
+    movu    m0, [r2 + 2 * r3]
+    movu    m1, [r2 + 2 * r3 + 32]
+    movu    m2, [r2 + 2 * r3 + 64]
+    movu    m3, [r2 + 2 * r3 + 96]
+
+    movu    [r0 + 2 * r1], m0
+    movu    [r0 + 2 * r1 + 32], m1
+    movu    [r0 + 2 * r1 + 64], m2
+    movu    [r0 + 2 * r1 + 96], m3
+
+    movu    m0, [r2 + r6]
+    movu    m1, [r2 + r6 + 32]
+    movu    m2, [r2 + r6 + 64]
+    movu    m3, [r2 + r6 + 96]
+    lea     r2, [r2 + 4 * r3]
+
+    movu    [r0 + r5], m0
+    movu    [r0 + r5 + 32], m1
+    movu    [r0 + r5 + 64], m2
+    movu    [r0 + r5 + 96], m3
+    lea     r0, [r0 + 4 * r1]
+
+    dec     r4d
+    jnz     .loop
+    RET
+%endmacro
+
+BLOCKCOPY_SS_W64_H4_avx 64, 16
+BLOCKCOPY_SS_W64_H4_avx 64, 32
+BLOCKCOPY_SS_W64_H4_avx 64, 48
+BLOCKCOPY_SS_W64_H4_avx 64, 64
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shr_4, 3, 4, 4
+    add             r2d, r2d
+    movd            m0, r3m
+    pcmpeqw         m1, m1
+    psllw           m1, m0
+    psraw           m1, 1
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; m0 - shift
+    ; m1 - word [-round]
+
+    ; Row 0-3
+    movh            m2, [r1]
+    movhps          m2, [r1 + r2]
+    lea             r1, [r1 + r2 * 2]
+    movh            m3, [r1]
+    movhps          m3, [r1 + r2]
+    psubw           m2, m1
+    psubw           m3, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shr_8, 3, 5, 4
+    add             r2d, r2d
+    movd            m0, r3m
+    pcmpeqw         m1, m1
+    psllw           m1, m0
+    psraw           m1, 1
+    mov             r3d, 8/4
+    lea             r4, [r2 * 3]
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; r4 - stride * 3
+    ; m0 - shift
+    ; m1 - word [-round]
+
+.loop:
+    ; Row 0-1
+    mova            m2, [r1]
+    mova            m3, [r1 + r2]
+    psubw           m2, m1
+    psubw           m3, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+
+    ; Row 2-3
+    mova            m2, [r1 + r2 * 2]
+    mova            m3, [r1 + r4]
+    psubw           m2, m1
+    psubw           m3, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    mova            [r0 + 2 * mmsize], m2
+    mova            [r0 + 3 * mmsize], m3
+
+    add             r0, 4 * mmsize
+    lea             r1, [r1 + r2 * 4]
+    dec             r3d
+    jnz            .loop
+    RET
+
+INIT_YMM avx2
+cglobal cpy2Dto1D_shr_8, 3, 4, 4
+    add        r2d, r2d
+    movd       xm0, r3m
+    pcmpeqw    m1, m1
+    psllw      m1, xm0
+    psraw      m1, 1
+    lea        r3, [r2 * 3]
+
+    ; Row 0-3
+    movu           xm2, [r1]
+    vinserti128    m2, m2, [r1 + r2], 1
+    movu           xm3, [r1 + 2 * r2]
+    vinserti128    m3, m3, [r1 + r3], 1
+    psubw          m2, m1
+    psraw          m2, xm0
+    psubw          m3, m1
+    psraw          m3, xm0
+    movu           [r0], m2
+    movu           [r0 + 32], m3
+
+    ; Row 4-7
+    lea            r1, [r1 + 4 * r2]
+    movu           xm2, [r1]
+    vinserti128    m2, m2, [r1 + r2], 1
+    movu           xm3, [r1 + 2 * r2]
+    vinserti128    m3, m3, [r1 + r3], 1
+    psubw          m2, m1
+    psraw          m2, xm0
+    psubw          m3, m1
+    psraw          m3, xm0
+    movu           [r0 + 64], m2
+    movu           [r0 + 96], m3
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shr_16, 3, 4, 4
+    add             r2d, r2d
+    movd            m0, r3m
+    pcmpeqw         m1, m1
+    psllw           m1, m0
+    psraw           m1, 1
+    mov             r3d, 16/2
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; m0 - shift
+    ; m1 - word [-round]
+
+.loop:
+    ; Row 0
+    mova            m2, [r1 + 0 * mmsize]
+    mova            m3, [r1 + 1 * mmsize]
+    psubw           m2, m1
+    psubw           m3, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+
+    ; Row 1
+    mova            m2, [r1 + r2 + 0 * mmsize]
+    mova            m3, [r1 + r2 + 1 * mmsize]
+    psubw           m2, m1
+    psubw           m3, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    mova            [r0 + 2 * mmsize], m2
+    mova            [r0 + 3 * mmsize], m3
+
+    add             r0, 4 * mmsize
+    lea             r1, [r1 + r2 * 2]
+    dec             r3d
+    jnz            .loop
+    RET
+
+INIT_YMM avx2
+cglobal cpy2Dto1D_shr_16, 4, 5, 4
+    add        r2d, r2d
+    movd       xm0, r3d
+    pcmpeqw    m1, m1
+    psllw      m1, xm0
+    psraw      m1, 1
+    lea        r3, [r2 * 3]
+    mov        r4d, 16/8
+
+.loop:
+    ; Row 0-1
+    movu       m2, [r1]
+    movu       m3, [r1 + r2]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 0 * mmsize], m2
+    movu       [r0 + 1 * mmsize], m3
+
+    ; Row 2-3
+    movu       m2, [r1 + 2 * r2]
+    movu       m3, [r1 + r3]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 2 * mmsize], m2
+    movu       [r0 + 3 * mmsize], m3
+
+    ; Row 4-5
+    lea        r1, [r1 + 4 * r2]
+    movu       m2, [r1]
+    movu       m3, [r1 + r2]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 4 * mmsize], m2
+    movu       [r0 + 5 * mmsize], m3
+
+    ; Row 6-7
+    movu       m2, [r1 + 2 * r2]
+    movu       m3, [r1 + r3]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 6 * mmsize], m2
+    movu       [r0 + 7 * mmsize], m3
+
+    add        r0, 8 * mmsize
+    lea        r1, [r1 + 4 * r2]
+    dec        r4d
+    jnz        .loop
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shr_32, 3, 4, 6
+    add             r2d, r2d
+    movd            m0, r3m
+    pcmpeqw         m1, m1
+    psllw           m1, m0
+    psraw           m1, 1
+    mov             r3d, 32/1
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; m0 - shift
+    ; m1 - word [-round]
+
+.loop:
+    ; Row 0
+    mova            m2, [r1 + 0 * mmsize]
+    mova            m3, [r1 + 1 * mmsize]
+    mova            m4, [r1 + 2 * mmsize]
+    mova            m5, [r1 + 3 * mmsize]
+    psubw           m2, m1
+    psubw           m3, m1
+    psubw           m4, m1
+    psubw           m5, m1
+    psraw           m2, m0
+    psraw           m3, m0
+    psraw           m4, m0
+    psraw           m5, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+    mova            [r0 + 2 * mmsize], m4
+    mova            [r0 + 3 * mmsize], m5
+
+    add             r0, 4 * mmsize
+    add             r1, r2
+    dec             r3d
+    jnz            .loop
+    RET
+
+INIT_YMM avx2
+cglobal cpy2Dto1D_shr_32, 4, 5, 4
+    add        r2d, r2d
+    movd       xm0, r3d
+    pcmpeqw    m1, m1
+    psllw      m1, xm0
+    psraw      m1, 1
+    lea        r3, [r2 * 3]
+    mov        r4d, 32/4
+
+.loop:
+    ; Row 0
+    movu       m2, [r1]
+    movu       m3, [r1 + 32]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 0 * mmsize], m2
+    movu       [r0 + 1 * mmsize], m3
+
+    ; Row 1
+    movu       m2, [r1 + r2]
+    movu       m3, [r1 + r2 + 32]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 2 * mmsize], m2
+    movu       [r0 + 3 * mmsize], m3
+
+    ; Row 2
+    movu       m2, [r1 + 2 * r2]
+    movu       m3, [r1 + 2 * r2 + 32]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 4 * mmsize], m2
+    movu       [r0 + 5 * mmsize], m3
+
+    ; Row 3
+    movu       m2, [r1 + r3]
+    movu       m3, [r1 + r3 + 32]
+    psubw      m2, m1
+    psraw      m2, xm0
+    psubw      m3, m1
+    psraw      m3, xm0
+    movu       [r0 + 6 * mmsize], m2
+    movu       [r0 + 7 * mmsize], m3
+
+    add        r0, 8 * mmsize
+    lea        r1, [r1 + 4 * r2]
+    dec        r4d
+    jnz        .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shl_4, 3, 3, 3
+    add         r2d, r2d
+    movd        m0, r3m
+
+    ; Row 0-3
+    mova        m1, [r1 + 0 * mmsize]
+    mova        m2, [r1 + 1 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    movh        [r0], m1
+    movhps      [r0 + r2], m1
+    movh        [r0 + r2 * 2], m2
+    lea         r2, [r2 * 3]
+    movhps      [r0 + r2], m2
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shl_4, 3, 3, 2
+    add         r2d, r2d
+    movd        xm0, r3m
+
+    ; Row 0-3
+    movu        m1, [r1]
+    psllw       m1, xm0
+    vextracti128 xm0, m1, 1
+    movq        [r0], xm1
+    movhps      [r0 + r2], xm1
+    lea         r0, [r0 + r2 * 2]
+    movq        [r0], xm0
+    movhps      [r0 + r2], xm0
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shl_8, 3, 4, 5
+    add         r2d, r2d
+    movd        m0, r3m
+    lea         r3, [r2 * 3]
+
+    ; Row 0-3
+    mova        m1, [r1 + 0 * mmsize]
+    mova        m2, [r1 + 1 * mmsize]
+    mova        m3, [r1 + 2 * mmsize]
+    mova        m4, [r1 + 3 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0], m1
+    mova        [r0 + r2], m2
+    mova        [r0 + r2 * 2], m3
+    mova        [r0 + r3], m4
+    lea         r0, [r0 + r2 * 4]
+
+    ; Row 4-7
+    mova        m1, [r1 + 4 * mmsize]
+    mova        m2, [r1 + 5 * mmsize]
+    mova        m3, [r1 + 6 * mmsize]
+    mova        m4, [r1 + 7 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0], m1
+    mova        [r0 + r2], m2
+    mova        [r0 + r2 * 2], m3
+    mova        [r0 + r3], m4
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shl_8, 3, 4, 3
+    add         r2d, r2d
+    movd        xm0, r3m
+    lea         r3, [r2 * 3]
+
+    ; Row 0-3
+    movu        m1, [r1 + 0 * mmsize]
+    movu        m2, [r1 + 1 * mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    movu        [r0], xm1
+    vextracti128 [r0 + r2], m1, 1
+    movu        [r0 + r2 * 2], xm2
+    vextracti128 [r0 + r3], m2, 1
+
+    ; Row 4-7
+    movu        m1, [r1 + 2 * mmsize]
+    movu        m2, [r1 + 3 * mmsize]
+    lea         r0, [r0 + r2 * 4]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    movu        [r0], xm1
+    vextracti128 [r0 + r2], m1, 1
+    movu        [r0 + r2 * 2], xm2
+    vextracti128 [r0 + r3], m2, 1
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shl_16, 3, 4, 5
+    add         r2d, r2d
+    movd        m0, r3m
+    mov         r3d, 16/4
+
+.loop:
+    ; Row 0-1
+    mova        m1, [r1 + 0 * mmsize]
+    mova        m2, [r1 + 1 * mmsize]
+    mova        m3, [r1 + 2 * mmsize]
+    mova        m4, [r1 + 3 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0], m1
+    mova        [r0 + 16], m2
+    mova        [r0 + r2], m3
+    mova        [r0 + r2 + 16], m4
+
+    ; Row 2-3
+    mova        m1, [r1 + 4 * mmsize]
+    mova        m2, [r1 + 5 * mmsize]
+    mova        m3, [r1 + 6 * mmsize]
+    mova        m4, [r1 + 7 * mmsize]
+    lea         r0, [r0 + r2 * 2]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0], m1
+    mova        [r0 + 16], m2
+    mova        [r0 + r2], m3
+    mova        [r0 + r2 + 16], m4
+
+    add         r1, 8 * mmsize
+    lea         r0, [r0 + r2 * 2]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shl_16, 3, 5, 3
+    add         r2d, r2d
+    movd        xm0, r3m
+    mov         r3d, 16/4
+    lea         r4, [r2 * 3]
+
+.loop:
+    ; Row 0-1
+    movu        m1, [r1 + 0 * mmsize]
+    movu        m2, [r1 + 1 * mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    movu        [r0], m1
+    movu        [r0 + r2], m2
+
+    ; Row 2-3
+    movu        m1, [r1 + 2 * mmsize]
+    movu        m2, [r1 + 3 * mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    movu        [r0 + r2 * 2], m1
+    movu        [r0 + r4], m2
+
+    add         r1, 4 * mmsize
+    lea         r0, [r0 + r2 * 4]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shl_32, 3, 4, 5
+    add         r2d, r2d
+    movd        m0, r3m
+    mov         r3d, 32/2
+
+.loop:
+    ; Row 0
+    mova        m1, [r1 + 0 * mmsize]
+    mova        m2, [r1 + 1 * mmsize]
+    mova        m3, [r1 + 2 * mmsize]
+    mova        m4, [r1 + 3 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0 + 0 * mmsize], m1
+    mova        [r0 + 1 * mmsize], m2
+    mova        [r0 + 2 * mmsize], m3
+    mova        [r0 + 3 * mmsize], m4
+
+    ; Row 1
+    mova        m1, [r1 + 4 * mmsize]
+    mova        m2, [r1 + 5 * mmsize]
+    mova        m3, [r1 + 6 * mmsize]
+    mova        m4, [r1 + 7 * mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    psllw       m3, m0
+    psllw       m4, m0
+    mova        [r0 + r2 + 0 * mmsize], m1
+    mova        [r0 + r2 + 1 * mmsize], m2
+    mova        [r0 + r2 + 2 * mmsize], m3
+    mova        [r0 + r2 + 3 * mmsize], m4
+
+    add         r1, 8 * mmsize
+    lea         r0, [r0 + r2 * 2]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shl_32, 3, 4, 5
+    add         r2d, r2d
+    movd        xm0, r3m
+    mov         r3d, 32/2
+
+.loop:
+    ; Row 0-1
+    movu        m1, [r1 + 0 * mmsize]
+    movu        m2, [r1 + 1 * mmsize]
+    movu        m3, [r1 + 2 * mmsize]
+    movu        m4, [r1 + 3 * mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    psllw       m3, xm0
+    psllw       m4, xm0
+    movu        [r0], m1
+    movu        [r0 + mmsize], m2
+    movu        [r0 + r2], m3
+    movu        [r0 + r2 + mmsize], m4
+
+    add         r1, 4 * mmsize
+    lea         r0, [r0 + r2 * 2]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; uint32_t copy_cnt(int16_t* dst, const int16_t* src, intptr_t srcStride);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal copy_cnt_4, 3,3,3
+    add         r2d, r2d
+    pxor        m2, m2
+
+    ; row 0 & 1
+    movh        m0, [r1]
+    movhps      m0, [r1 + r2]
+    mova        [r0], m0
+
+    ; row 2 & 3
+    movh        m1, [r1 + r2 * 2]
+    lea         r2, [r2 * 3]
+    movhps      m1, [r1 + r2]
+    mova        [r0 + 16], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m2
+
+    ; get count
+    ; CHECK_ME: Intel documents said POPCNT is SSE4.2 instruction, but just implement after Nehalem
+%if 0
+    pmovmskb    eax, m0
+    not         ax
+    popcnt      ax, ax
+%else
+    mova        m1, [pb_1]
+    paddb       m0, m1
+    psadbw      m0, m2
+    pshufd      m1, m0, 2
+    paddw       m0, m1
+    movd        eax, m0
+%endif
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; uint32_t copy_cnt(int16_t* dst, const int16_t* src, intptr_t srcStride);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal copy_cnt_8, 3,3,6
+    add         r2d, r2d
+    pxor        m4, m4
+    pxor        m5, m5
+
+   ; row 0 & 1
+    movu         m0, [r1]
+    movu        m1, [r1 + r2]
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 2 & 3
+    lea         r1, [r1 + 2 * r2]
+    movu        m0, [r1]
+    movu        m1, [r1 + r2]
+    movu        [r0 + 32], m0
+    movu        [r0 + 48], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 4 & 5
+    lea         r1, [r1 + 2 * r2]
+    movu        m0, [r1]
+    movu        m1, [r1 + r2]
+    movu        [r0 + 64], m0
+    movu        [r0 + 80], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 6 & 7
+    lea         r1, [r1 + 2 * r2]
+    movu        m0, [r1]
+    movu        m1, [r1 + r2]
+    movu        [r0 + 96], m0
+    movu        [r0 + 112], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; get count
+    mova        m0, [pb_4]
+    paddb       m5, m0
+    psadbw      m5, m4
+    pshufd      m0, m5, 2
+    paddw       m5, m0
+    movd        eax, m5
+     RET
+
+
+INIT_YMM avx2
+cglobal copy_cnt_8, 3,4,5
+    add         r2d, r2d
+    lea         r3, [r2 * 3]
+
+    ; row 0 - 1
+    movu        xm0, [r1]
+    vinserti128 m0, m0, [r1 + r2], 1
+    movu        [r0], m0
+
+    ; row 2 - 3
+    movu        xm1, [r1 + r2 * 2]
+    vinserti128 m1, m1, [r1 + r3], 1
+    movu        [r0 + 32], m1
+    lea         r1,  [r1 + r2 * 4]
+
+    ; row 4 - 5
+    movu        xm2, [r1]
+    vinserti128 m2, m2, [r1 + r2], 1
+    movu        [r0 + 64], m2
+
+    ; row 6 - 7
+    movu        xm3, [r1 + r2 * 2]
+    vinserti128 m3, m3, [r1 + r3], 1
+    movu        [r0 + 96], m3
+
+    ; get count
+    xorpd        m4, m4
+    vpacksswb    m0, m1
+    vpacksswb    m2, m3
+    pminub       m0, [pb_1]
+    pminub       m2, [pb_1]
+    paddb        m0, m2
+    vextracti128 xm1, m0, 1
+    paddb        xm0, xm1
+    psadbw       xm0, xm4
+    movhlps      xm1, xm0
+    paddd        xm0, xm1
+    movd         eax, xm0
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; uint32_t copy_cnt(int16_t* dst, const int16_t* src, intptr_t srcStride);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal copy_cnt_16, 3,4,6
+     add         r2d, r2d
+     mov         r3d, 4
+     pxor        m4, m4
+     pxor        m5, m5
+
+.loop
+    ; row 0
+    movu        m0, [r1]
+    movu        m1, [r1 + 16]
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+     ; row 1
+    movu        m0, [r1 + r2]
+    movu        m1, [r1 + r2 + 16]
+    movu        [r0 + 32], m0
+    movu        [r0 + 48], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 2
+    movu        m0, [r1 + 2 * r2]
+    movu        m1, [r1 + 2 * r2 + 16]
+    movu        [r0 + 64], m0
+    movu        [r0 + 80], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 3
+    lea         r1, [r1 + 2 * r2]
+    movu        m0, [r1 + r2]
+    movu        m1, [r1 + r2 + 16]
+    movu        [r0 + 96], m0
+    movu        [r0 + 112], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    add         r0, 128
+    lea         r1, [r1 + 2 * r2]
+     dec         r3d
+     jnz        .loop
+
+    mova        m0, [pb_16]
+    paddb       m5, m0
+    psadbw      m5, m4
+    pshufd      m0, m5, 2
+    paddw       m5, m0
+    movd        eax, m5
+     RET
+
+
+INIT_YMM avx2
+cglobal copy_cnt_16, 3, 5, 5
+    add         r2d, r2d
+    lea         r3,  [r2 * 3]
+    mov         r4d, 16/4
+
+    mova        m3, [pb_1]
+    xorpd       m4, m4
+
+.loop:
+    ; row 0 - 1
+    movu        m0, [r1]
+    movu        [r0], m0
+    movu        m1, [r1 + r2]
+    movu        [r0 + 32], m1
+
+    packsswb    m0, m1
+    pminub      m0, m3
+
+    ; row 2 - 3
+    movu        m1, [r1 + r2 * 2]
+    movu        [r0 + 64], m1
+    movu        m2, [r1 + r3]
+    movu        [r0 + 96], m2
+
+    packsswb    m1, m2
+    pminub      m1, m3
+    paddb       m0, m1
+    paddb       m4, m0
+
+    add         r0, 128
+    lea         r1, [r1 + 4 * r2]
+    dec         r4d
+    jnz         .loop
+
+    ; get count
+    xorpd        m0,  m0
+    vextracti128 xm1, m4, 1
+    paddb        xm4, xm1
+    psadbw       xm4, xm0
+    movhlps      xm1, xm4
+    paddd        xm4, xm1
+    movd         eax, xm4
+    RET
+
+;--------------------------------------------------------------------------------------
+; uint32_t copy_cnt(int32_t* dst, const int16_t* src, intptr_t stride);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal copy_cnt_32, 3,4,6
+    add         r2d, r2d
+    mov         r3d, 16
+    pxor        m4, m4
+    pxor        m5, m5
+
+.loop
+    ; row 0
+    movu        m0, [r1]
+    movu        m1, [r1 + 16]
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    movu        m0, [r1 + 32]
+    movu        m1, [r1 + 48]
+    movu        [r0 + 32], m0
+    movu        [r0 + 48], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    ; row 1
+    movu        m0, [r1 + r2]
+    movu        m1, [r1 + r2 + 16]
+    movu        [r0 + 64], m0
+    movu        [r0 + 80], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    movu        m0, [r1 + r2 + 32]
+    movu        m1, [r1 + r2 + 48]
+    movu        [r0 + 96], m0
+    movu        [r0 + 112], m1
+
+    packsswb    m0, m1
+    pcmpeqb     m0, m4
+    paddb       m5, m0
+
+    add         r0, 128
+    lea         r1, [r1 + 2 * r2]
+     dec         r3d
+     jnz        .loop
+
+     ; get count
+    mova        m0, [pb_64]
+    paddb       m5, m0
+    psadbw      m5, m4
+    pshufd      m0, m5, 2
+    paddw       m5, m0
+    movd        eax, m5
+     RET
+
+
+INIT_YMM avx2
+cglobal copy_cnt_32, 3, 5, 5
+    add         r2d, r2d
+    mov         r3d, 32/2
+
+    mova        m3, [pb_1]
+    xorpd       m4, m4
+
+.loop:
+    ; row 0
+    movu        m0, [r1]
+    movu        [r0], m0
+    movu        m1, [r1 + 32]
+    movu        [r0 + 32], m1
+
+    packsswb    m0, m1
+    pminub      m0, m3
+
+    ; row 1
+    movu        m1, [r1 + r2]
+    movu        [r0 + 64], m1
+    movu        m2, [r1 + r2 + 32]
+    movu        [r0 + 96], m2
+
+    packsswb    m1, m2
+    pminub      m1, m3
+    paddb       m0, m1
+    paddb       m4, m0
+
+    add         r0, 128
+    lea         r1, [r1 + 2 * r2]
+    dec         r3d
+    jnz         .loop
+
+    ; get count
+    xorpd        m0,  m0
+    vextracti128 xm1, m4, 1
+    paddb        xm4, xm1
+    psadbw       xm4, xm0
+    movhlps      xm1, xm4
+    paddd        xm4, xm1
+    movd         eax, xm4
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shl_4, 4, 4, 4
+    add             r2d, r2d
+    movd            m0, r3d
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; m0 - shift
+
+    ; Row 0-3
+    movh            m2, [r1]
+    movhps          m2, [r1 + r2]
+    lea             r1, [r1 + r2 * 2]
+    movh            m3, [r1]
+    movhps          m3, [r1 + r2]
+    psllw           m2, m0
+    psllw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shl_8, 4, 5, 4
+    add             r2d, r2d
+    movd            m0, r3d
+    mov             r3d, 8/4
+    lea             r4, [r2 * 3]
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; r4 - stride * 3
+    ; m0 - shift
+
+.loop:
+    ; Row 0, 1
+    mova            m2, [r1]
+    mova            m3, [r1 + r2]
+    psllw           m2, m0
+    psllw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+
+    ; Row 2, 3
+    mova            m2, [r1 + r2 * 2]
+    mova            m3, [r1 + r4]
+    psllw           m2, m0
+    psllw           m3, m0
+    mova            [r0 + 2 * mmsize], m2
+    mova            [r0 + 3 * mmsize], m3
+
+    add             r0, 4 * mmsize
+    lea             r1, [r1 + r2 * 4]
+    dec             r3d
+    jnz            .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl_8(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal cpy2Dto1D_shl_8, 4, 5, 2
+    add     r2d, r2d
+    movd    xm0, r3d
+    lea     r4, [3 * r2]
+
+    ; Row 0, 1
+    movu           xm1, [r1]
+    vinserti128    m1, m1, [r1 + r2], 1
+    psllw          m1, xm0
+    movu           [r0], m1
+
+    ; Row 2, 3
+    movu           xm1, [r1 + 2 * r2]
+    vinserti128    m1, m1, [r1 + r4], 1
+    psllw          m1, xm0
+    movu           [r0 + 32], m1
+
+    lea            r1, [r1 + 4 * r2]
+
+    ; Row 4, 5
+    movu           xm1, [r1]
+    vinserti128    m1, m1, [r1 + r2], 1
+    psllw          m1, xm0
+    movu           [r0 + 64], m1
+
+    ; Row 6, 7
+    movu           xm1, [r1 + 2 * r2]
+    vinserti128    m1, m1, [r1 + r4], 1
+    psllw          m1, xm0
+    movu           [r0 + 96], m1
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shl_16, 4, 4, 4
+    add             r2d, r2d
+    movd            m0, r3d
+    mov             r3d, 16/2
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; m0 - shift
+
+.loop:
+    ; Row 0
+    mova            m2, [r1 + 0 * mmsize]
+    mova            m3, [r1 + 1 * mmsize]
+    psllw           m2, m0
+    psllw           m3, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+
+    ; Row 1
+    mova            m2, [r1 + r2 + 0 * mmsize]
+    mova            m3, [r1 + r2 + 1 * mmsize]
+    psllw           m2, m0
+    psllw           m3, m0
+    mova            [r0 + 2 * mmsize], m2
+    mova            [r0 + 3 * mmsize], m3
+
+    add             r0, 4 * mmsize
+    lea             r1, [r1 + r2 * 2]
+    dec             r3d
+    jnz            .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl_16(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal cpy2Dto1D_shl_16, 3, 5, 3
+    add    r2d, r2d
+    movd   xm0, r3m
+    mov    r3d, 16/4
+    lea     r4, [r2 * 3]
+
+.loop:
+    ; Row 0-1
+    movu     m1, [r1]
+    movu     m2, [r1 + r2]
+    psllw    m1, xm0
+    psllw    m2, xm0
+    movu     [r0 + 0 * mmsize], m1
+    movu     [r0 + 1 * mmsize], m2
+
+    ; Row 2-3
+    movu     m1, [r1 + 2 * r2]
+    movu     m2, [r1 + r4]
+    psllw    m1, xm0
+    psllw    m2, xm0
+    movu     [r0 + 2 * mmsize], m1
+    movu     [r0 + 3 * mmsize], m2
+
+    add      r0, 4 * mmsize
+    lea      r1, [r1 + r2 * 4]
+    dec      r3d
+    jnz      .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy2Dto1D_shl_32, 4, 4, 6
+    add             r2d, r2d
+    movd            m0, r3d
+    mov             r3d, 32/1
+
+    ; register alloc
+    ; r0 - dst
+    ; r1 - src
+    ; r2 - srcStride
+    ; r3 - loop counter
+    ; m0 - shift
+
+.loop:
+    ; Row 0
+    mova            m2, [r1 + 0 * mmsize]
+    mova            m3, [r1 + 1 * mmsize]
+    mova            m4, [r1 + 2 * mmsize]
+    mova            m5, [r1 + 3 * mmsize]
+    psllw           m2, m0
+    psllw           m3, m0
+    psllw           m4, m0
+    psllw           m5, m0
+    mova            [r0 + 0 * mmsize], m2
+    mova            [r0 + 1 * mmsize], m3
+    mova            [r0 + 2 * mmsize], m4
+    mova            [r0 + 3 * mmsize], m5
+
+    add             r0, 4 * mmsize
+    add             r1, r2
+    dec             r3d
+    jnz            .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy2Dto1D_shl_32(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+;--------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal cpy2Dto1D_shl_32, 3, 5, 5
+    add     r2d, r2d
+    movd    xm0, r3m
+    mov     r3d, 32/4
+    lea     r4, [3 * r2]
+
+.loop:
+    ; Row 0-1
+    movu     m1, [r1]
+    movu     m2, [r1 + 32]
+    movu     m3, [r1 + r2]
+    movu     m4, [r1 + r2 + 32]
+
+    psllw    m1, xm0
+    psllw    m2, xm0
+    psllw    m3, xm0
+    psllw    m4, xm0
+    movu     [r0], m1
+    movu     [r0 + mmsize], m2
+    movu     [r0 + 2 * mmsize], m3
+    movu     [r0 + 3 * mmsize], m4
+
+    ; Row 2-3
+    movu     m1, [r1 + 2 * r2]
+    movu     m2, [r1 + 2 * r2 + 32]
+    movu     m3, [r1 + r4]
+    movu     m4, [r1 + r4 + 32]
+
+    psllw    m1, xm0
+    psllw    m2, xm0
+    psllw    m3, xm0
+    psllw    m4, xm0
+    movu     [r0 + 4 * mmsize], m1
+    movu     [r0 + 5 * mmsize], m2
+    movu     [r0 + 6 * mmsize], m3
+    movu     [r0 + 7 * mmsize], m4
+
+    add      r0, 8 * mmsize
+    lea      r1, [r1 + r2 * 4]
+    dec      r3d
+    jnz      .loop
+    RET
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shr_4, 3, 3, 4
+    add         r2d, r2d
+    movd        m0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, m0
+    psraw       m1, 1
+
+    ; Row 0-3
+    mova        m2, [r1 + 0 * mmsize]
+    mova        m3, [r1 + 1 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    movh        [r0], m2
+    movhps      [r0 + r2], m2
+    movh        [r0 + r2 * 2], m3
+    lea         r2, [r2 * 3]
+    movhps      [r0 + r2], m3
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shr_4, 3, 3, 3
+    add         r2d, r2d
+    movd        xm0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, xm0
+    psraw       m1, 1
+
+    ; Row 0-3
+    movu        m2, [r1]
+    psubw       m2, m1
+    psraw       m2, xm0
+    vextracti128 xm1, m2, 1
+    movq        [r0], xm2
+    movhps      [r0 + r2], xm2
+    lea         r0, [r0 + r2 * 2]
+    movq        [r0], xm1
+    movhps      [r0 + r2], xm1
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shr_8, 3, 4, 6
+    add         r2d, r2d
+    movd        m0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, m0
+    psraw       m1, 1
+    lea         r3, [r2 * 3]
+
+    ; Row 0-3
+    mova        m2, [r1 + 0 * mmsize]
+    mova        m3, [r1 + 1 * mmsize]
+    mova        m4, [r1 + 2 * mmsize]
+    mova        m5, [r1 + 3 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0], m2
+    mova        [r0 + r2], m3
+    mova        [r0 + r2 * 2], m4
+    mova        [r0 + r3], m5
+
+    ; Row 4-7
+    mova        m2, [r1 + 4 * mmsize]
+    mova        m3, [r1 + 5 * mmsize]
+    mova        m4, [r1 + 6 * mmsize]
+    mova        m5, [r1 + 7 * mmsize]
+    lea         r0, [r0 + r2 * 4]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0], m2
+    mova        [r0 + r2], m3
+    mova        [r0 + r2 * 2], m4
+    mova        [r0 + r3], m5
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shr_8, 3, 4, 4
+    add         r2d, r2d
+    movd        xm0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, xm0
+    psraw       m1, 1
+    lea         r3, [r2 * 3]
+
+    ; Row 0-3
+    movu        m2, [r1 + 0 * mmsize]
+    movu        m3, [r1 + 1 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psraw       m2, xm0
+    psraw       m3, xm0
+    movu        [r0], xm2
+    vextracti128 [r0 + r2], m2, 1
+    movu        [r0 + r2 * 2], xm3
+    vextracti128 [r0 + r3], m3, 1
+
+    ; Row 4-7
+    movu        m2, [r1 + 2 * mmsize]
+    movu        m3, [r1 + 3 * mmsize]
+    lea         r0, [r0 + r2 * 4]
+    psubw       m2, m1
+    psubw       m3, m1
+    psraw       m2, xm0
+    psraw       m3, xm0
+    movu        [r0], xm2
+    vextracti128 [r0 + r2], m2, 1
+    movu        [r0 + r2 * 2], xm3
+    vextracti128 [r0 + r3], m3, 1
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shr_16, 3, 5, 6
+    add         r2d, r2d
+    movd        m0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, m0
+    psraw       m1, 1
+    mov         r3d, 16/4
+    lea         r4, [r2 * 3]
+
+.loop:
+    ; Row 0-1
+    mova        m2, [r1 + 0 * mmsize]
+    mova        m3, [r1 + 1 * mmsize]
+    mova        m4, [r1 + 2 * mmsize]
+    mova        m5, [r1 + 3 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0], m2
+    mova        [r0 + mmsize], m3
+    mova        [r0 + r2], m4
+    mova        [r0 + r2 + mmsize], m5
+
+    ; Row 2-3
+    mova        m2, [r1 + 4 * mmsize]
+    mova        m3, [r1 + 5 * mmsize]
+    mova        m4, [r1 + 6 * mmsize]
+    mova        m5, [r1 + 7 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0 + r2 * 2], m2
+    mova        [r0 + r2 * 2 + mmsize], m3
+    mova        [r0 + r4], m4
+    mova        [r0 + r4 + mmsize], m5
+
+    add         r1, 8 * mmsize
+    lea         r0, [r0 + r2 * 4]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shr_16, 3, 5, 4
+    add         r2d, r2d
+    movd        xm0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, xm0
+    psraw       m1, 1
+    mov         r3d, 16/4
+    lea         r4, [r2 * 3]
+
+.loop:
+    ; Row 0-1
+    movu        m2, [r1 + 0 * mmsize]
+    movu        m3, [r1 + 1 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psraw       m2, xm0
+    psraw       m3, xm0
+    movu        [r0], m2
+    movu        [r0 + r2], m3
+
+    ; Row 2-3
+    movu        m2, [r1 + 2 * mmsize]
+    movu        m3, [r1 + 3 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psraw       m2, xm0
+    psraw       m3, xm0
+    movu        [r0 + r2 * 2], m2
+    movu        [r0 + r4], m3
+
+    add         r1, 4 * mmsize
+    lea         r0, [r0 + r2 * 4]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+;--------------------------------------------------------------------------------------
+; void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+;--------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal cpy1Dto2D_shr_32, 3, 4, 6
+    add         r2d, r2d
+    movd        m0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, m0
+    psraw       m1, 1
+    mov         r3d, 32/2
+
+.loop:
+    ; Row 0
+    mova        m2, [r1 + 0 * mmsize]
+    mova        m3, [r1 + 1 * mmsize]
+    mova        m4, [r1 + 2 * mmsize]
+    mova        m5, [r1 + 3 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0 + 0 * mmsize], m2
+    mova        [r0 + 1 * mmsize], m3
+    mova        [r0 + 2 * mmsize], m4
+    mova        [r0 + 3 * mmsize], m5
+
+    ; Row 1
+    mova        m2, [r1 + 4 * mmsize]
+    mova        m3, [r1 + 5 * mmsize]
+    mova        m4, [r1 + 6 * mmsize]
+    mova        m5, [r1 + 7 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, m0
+    psraw       m3, m0
+    psraw       m4, m0
+    psraw       m5, m0
+    mova        [r0 + r2 + 0 * mmsize], m2
+    mova        [r0 + r2 + 1 * mmsize], m3
+    mova        [r0 + r2 + 2 * mmsize], m4
+    mova        [r0 + r2 + 3 * mmsize], m5
+
+    add         r1, 8 * mmsize
+    lea         r0, [r0 + r2 * 2]
+    dec         r3d
+    jnz        .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal cpy1Dto2D_shr_32, 3, 4, 6
+    add         r2d, r2d
+    movd        xm0, r3m
+    pcmpeqw     m1, m1
+    psllw       m1, xm0
+    psraw       m1, 1
+    mov         r3d, 32/2
+
+.loop:
+    ; Row 0-1
+    movu        m2, [r1 + 0 * mmsize]
+    movu        m3, [r1 + 1 * mmsize]
+    movu        m4, [r1 + 2 * mmsize]
+    movu        m5, [r1 + 3 * mmsize]
+    psubw       m2, m1
+    psubw       m3, m1
+    psubw       m4, m1
+    psubw       m5, m1
+    psraw       m2, xm0
+    psraw       m3, xm0
+    psraw       m4, xm0
+    psraw       m5, xm0
+    movu        [r0], m2
+    movu        [r0 + mmsize], m3
+    movu        [r0 + r2], m4
+    movu        [r0 + r2 + mmsize], m5
+
+    add         r1, 4 * mmsize
+    lea         r0, [r0 + r2 * 2]
+    dec         r3d
+    jnz        .loop
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/blockcopy8.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,63 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_BLOCKCOPY8_H
+#define X265_BLOCKCOPY8_H
+
+FUNCDEF_TU_S(void, cpy2Dto1D_shl, sse2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy2Dto1D_shl, sse4, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy2Dto1D_shl, avx2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+
+FUNCDEF_TU_S(void, cpy2Dto1D_shr, sse2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy2Dto1D_shr, sse4, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy2Dto1D_shr, avx2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+
+FUNCDEF_TU_S(void, cpy1Dto2D_shl, sse2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy1Dto2D_shl, sse4, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy1Dto2D_shl, avx2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+
+FUNCDEF_TU_S(void, cpy1Dto2D_shr, sse2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy1Dto2D_shr, sse4, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+FUNCDEF_TU_S(void, cpy1Dto2D_shr, avx2, int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+
+FUNCDEF_TU_S(uint32_t, copy_cnt, sse2, int16_t* dst, const int16_t* src, intptr_t srcStride);
+FUNCDEF_TU_S(uint32_t, copy_cnt, sse4, int16_t* dst, const int16_t* src, intptr_t srcStride);
+FUNCDEF_TU_S(uint32_t, copy_cnt, avx2, int16_t* dst, const int16_t* src, intptr_t srcStride);
+
+FUNCDEF_TU(void, blockfill_s, sse2, int16_t* dst, intptr_t dstride, int16_t val);
+FUNCDEF_TU(void, blockfill_s, avx2, int16_t* dst, intptr_t dstride, int16_t val);
+
+FUNCDEF_CHROMA_PU(void, blockcopy_ss, sse2, int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+FUNCDEF_CHROMA_PU(void, blockcopy_ss, avx, int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+
+FUNCDEF_CHROMA_PU(void, blockcopy_pp, sse2, pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+FUNCDEF_CHROMA_PU(void, blockcopy_pp, avx, pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+
+FUNCDEF_PU(void, blockcopy_sp, sse2, pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+FUNCDEF_PU(void, blockcopy_sp, sse4, pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+FUNCDEF_PU(void, blockcopy_sp, avx2, pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
+FUNCDEF_PU(void, blockcopy_ps, sse2, int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+FUNCDEF_PU(void, blockcopy_ps, sse4, int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+FUNCDEF_PU(void, blockcopy_ps, avx2, int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+
+#endif // ifndef X265_I386_PIXEL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/const-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,146 @@
+;*****************************************************************************
+;* const-a.asm: x86 global constants
+;*****************************************************************************
+;* Copyright (C) 2010-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA 32
+
+;; 8-bit constants
+
+const pb_0,                 times 16 db 0
+const pb_1,                 times 32 db 1
+const pb_2,                 times 32 db 2
+const pb_3,                 times 16 db 3
+const pb_4,                 times 32 db 4
+const pb_8,                 times 32 db 8
+const pb_15,                times 32 db 15
+const pb_16,                times 32 db 16
+const pb_32,                times 32 db 32
+const pb_64,                times 32 db 64
+const pb_128,               times 32 db 128
+const pb_a1,                times 16 db 0xa1
+
+const pb_01,                times  8 db   0,   1
+const hsub_mul,             times 16 db   1,  -1
+const pw_swap,              times  2 db   6,   7,   4,   5,   2,   3,   0,   1
+const pb_unpackbd1,         times  2 db   0,   0,   0,   0,   1,   1,   1,   1,   2,   2,   2,   2,   3,   3,   3,   3
+const pb_unpackbd2,         times  2 db   4,   4,   4,   4,   5,   5,   5,   5,   6,   6,   6,   6,   7,   7,   7,   7
+const pb_unpackwq1,         times  1 db   0,   1,   0,   1,   0,   1,   0,   1,   2,   3,   2,   3,   2,   3,   2,   3
+const pb_unpackwq2,         times  1 db   4,   5,   4,   5,   4,   5,   4,   5,   6,   7,   6,   7,   6,   7,   6,   7
+const pb_shuf8x8c,          times  1 db   0,   0,   0,   0,   2,   2,   2,   2,   4,   4,   4,   4,   6,   6,   6,   6
+const pb_movemask,          times 16 db 0x00
+                            times 16 db 0xFF
+
+const pb_movemask_32,       times 32 db 0x00
+                            times 32 db 0xFF
+                            times 32 db 0x00
+
+const pb_0000000000000F0F,  times  2 db 0xff, 0x00
+                            times 12 db 0x00
+const pb_000000000000000F,           db 0xff
+                            times 15 db 0x00
+
+;; 16-bit constants
+
+const pw_1,                 times 16 dw 1
+const pw_2,                 times 16 dw 2
+const pw_3,                 times 16 dw 3
+const pw_7,                 times 16 dw 7
+const pw_m2,                times  8 dw -2
+const pw_4,                 times  8 dw 4
+const pw_8,                 times  8 dw 8
+const pw_16,                times 16 dw 16
+const pw_15,                times 16 dw 15
+const pw_31,                times 16 dw 31
+const pw_32,                times 16 dw 32
+const pw_64,                times  8 dw 64
+const pw_128,               times 16 dw 128
+const pw_256,               times 16 dw 256
+const pw_257,               times 16 dw 257
+const pw_512,               times 16 dw 512
+const pw_1023,              times 16 dw 1023
+const pw_1024,              times 16 dw 1024
+const pw_2048,              times 16 dw 2048
+const pw_4096,              times 16 dw 4096
+const pw_8192,              times  8 dw 8192
+const pw_00ff,              times 16 dw 0x00ff
+const pw_ff00,              times  8 dw 0xff00
+const pw_2000,              times 16 dw 0x2000
+const pw_8000,              times  8 dw 0x8000
+const pw_3fff,              times  8 dw 0x3fff
+const pw_32_0,              times  4 dw 32,
+                            times  4 dw 0
+const pw_pixel_max,         times 16 dw ((1 << BIT_DEPTH)-1)
+
+const pw_0_15,              times  2 dw   0,   1,   2,   3,   4,   5,   6,   7
+const pw_ppppmmmm,          times  1 dw   1,   1,   1,   1,  -1,  -1,  -1,  -1
+const pw_ppmmppmm,          times  1 dw   1,   1,  -1,  -1,   1,   1,  -1,  -1
+const pw_pmpmpmpm,          times 16 dw   1,  -1,   1,  -1,   1,  -1,   1,  -1
+const pw_pmmpzzzz,          times  1 dw   1,  -1,  -1,   1,   0,   0,   0,   0
+const multi_2Row,           times  1 dw   1,   2,   3,   4,   1,   2,   3,   4
+const multiH,               times  1 dw   9,  10,  11,  12,  13,  14,  15,  16
+const multiH3,              times  1 dw  25,  26,  27,  28,  29,  30,  31,  32
+const multiL,               times  1 dw   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,  14,  15,  16
+const multiH2,              times  1 dw  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,  30,  31,  32
+const pw_planar16_mul,      times  1 dw  15,  14,  13,  12,  11,  10,   9,   8,   7,   6,   5,   4,   3,   2,   1,   0
+const pw_planar32_mul,      times  1 dw  31,  30,  29,  28,  27,  26,  25,  24,  23,  22,  21,  20,  19,  18,  17,  16
+const pw_FFFFFFFFFFFFFFF0,           dw 0x00
+                            times  7 dw 0xff
+const hmul_16p,             times 16 db   1
+                            times  8 db   1,  -1
+
+
+;; 32-bit constants
+
+const pd_1,                 times  8 dd 1
+const pd_2,                 times  8 dd 2
+const pd_4,                 times  4 dd 4
+const pd_8,                 times  4 dd 8
+const pd_16,                times  8 dd 16
+const pd_31,                times  4 dd 31
+const pd_32,                times  8 dd 32
+const pd_64,                times  4 dd 64
+const pd_128,               times  4 dd 128
+const pd_256,               times  4 dd 256
+const pd_512,               times  4 dd 512
+const pd_1024,              times  4 dd 1024
+const pd_2048,              times  4 dd 2048
+const pd_ffff,              times  4 dd 0xffff
+const pd_32767,             times  4 dd 32767
+const pd_524416,            times  4 dd 524416
+const pd_n32768,            times  8 dd 0xffff8000
+const pd_n131072,           times  4 dd 0xfffe0000
+
+const trans8_shuf,          times  1 dd   0,   4,   1,   5,   2,   6,   3,   7
+
+const popcnt_table
+%assign x 0
+%rep 256
+; population count
+db ((x>>0)&1)+((x>>1)&1)+((x>>2)&1)+((x>>3)&1)+((x>>4)&1)+((x>>5)&1)+((x>>6)&1)+((x>>7)&1)
+%assign x x+1
+%endrep
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/cpu-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,197 @@
+;*****************************************************************************
+;* cpu-a.asm: x86 cpu utilities
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
+;*          Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void cpu_cpuid( int op, int *eax, int *ebx, int *ecx, int *edx )
+;-----------------------------------------------------------------------------
+cglobal cpu_cpuid, 5,7
+    push rbx
+    push  r4
+    push  r3
+    push  r2
+    push  r1
+    mov  eax, r0d
+    xor  ecx, ecx
+    cpuid
+    pop   r4
+    mov [r4], eax
+    pop   r4
+    mov [r4], ebx
+    pop   r4
+    mov [r4], ecx
+    pop   r4
+    mov [r4], edx
+    pop  rbx
+    RET
+
+;-----------------------------------------------------------------------------
+; void cpu_xgetbv( int op, int *eax, int *edx )
+;-----------------------------------------------------------------------------
+cglobal cpu_xgetbv, 3,7
+    push  r2
+    push  r1
+    mov  ecx, r0d
+    xgetbv
+    pop   r4
+    mov [r4], eax
+    pop   r4
+    mov [r4], edx
+    RET
+
+%if ARCH_X86_64
+
+;-----------------------------------------------------------------------------
+; void stack_align( void (*func)(void*), void *arg );
+;-----------------------------------------------------------------------------
+cglobal stack_align
+    push rbp
+    mov  rbp, rsp
+%if WIN64
+    sub  rsp, 32 ; shadow space
+%endif
+    and  rsp, ~31
+    mov  rax, r0
+    mov   r0, r1
+    mov   r1, r2
+    mov   r2, r3
+    call rax
+    leave
+    ret
+
+%else
+
+;-----------------------------------------------------------------------------
+; int cpu_cpuid_test( void )
+; return 0 if unsupported
+;-----------------------------------------------------------------------------
+cglobal cpu_cpuid_test
+    pushfd
+    push    ebx
+    push    ebp
+    push    esi
+    push    edi
+    pushfd
+    pop     eax
+    mov     ebx, eax
+    xor     eax, 0x200000
+    push    eax
+    popfd
+    pushfd
+    pop     eax
+    xor     eax, ebx
+    pop     edi
+    pop     esi
+    pop     ebp
+    pop     ebx
+    popfd
+    ret
+
+cglobal stack_align
+    push ebp
+    mov  ebp, esp
+    sub  esp, 12
+    and  esp, ~31
+    mov  ecx, [ebp+8]
+    mov  edx, [ebp+12]
+    mov  [esp], edx
+    mov  edx, [ebp+16]
+    mov  [esp+4], edx
+    mov  edx, [ebp+20]
+    mov  [esp+8], edx
+    call ecx
+    leave
+    ret
+
+%endif
+
+;-----------------------------------------------------------------------------
+; void cpu_emms( void )
+;-----------------------------------------------------------------------------
+cglobal cpu_emms
+    emms
+    ret
+
+;-----------------------------------------------------------------------------
+; void cpu_sfence( void )
+;-----------------------------------------------------------------------------
+cglobal cpu_sfence
+    sfence
+    ret
+
+cextern intel_cpu_indicator_init
+
+;-----------------------------------------------------------------------------
+; void safe_intel_cpu_indicator_init( void );
+;-----------------------------------------------------------------------------
+cglobal safe_intel_cpu_indicator_init
+    push r0
+    push r1
+    push r2
+    push r3
+    push r4
+    push r5
+    push r6
+%if ARCH_X86_64
+    push r7
+    push r8
+    push r9
+    push r10
+    push r11
+    push r12
+    push r13
+    push r14
+%endif
+    push rbp
+    mov  rbp, rsp
+%if WIN64
+    sub  rsp, 32 ; shadow space
+%endif
+    and  rsp, ~31
+    call intel_cpu_indicator_init
+    leave
+%if ARCH_X86_64
+    pop r14
+    pop r13
+    pop r12
+    pop r11
+    pop r10
+    pop r9
+    pop r8
+    pop r7
+%endif
+    pop r6
+    pop r5
+    pop r4
+    pop r3
+    pop r2
+    pop r1
+    pop r0
+    ret
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/dct8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3719 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Nabajit Deka <nabajit@multicorewareinc.com>
+;*          Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*          Li Cao <li@multicorewareinc.com>
+;*          Praveen Kumar Tiwari <Praveen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+;TO-DO : Further optimize the routines.
+
+%include "x86inc.asm"
+%include "x86util.asm"
+SECTION_RODATA 32
+tab_dct8:       dw 64, 64, 64, 64, 64, 64, 64, 64
+                dw 89, 75, 50, 18, -18, -50, -75, -89
+                dw 83, 36, -36, -83, -83, -36, 36, 83
+                dw 75, -18, -89, -50, 50, 89, 18, -75
+                dw 64, -64, -64, 64, 64, -64, -64, 64
+                dw 50, -89, 18, 75, -75, -18, 89, -50
+                dw 36, -83, 83, -36, -36, 83, -83, 36
+                dw 18, -50, 75, -89, 89, -75, 50, -18
+
+dct8_shuf:      times 2 db 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9
+
+tab_dct16_1:    dw 64, 64, 64, 64, 64, 64, 64, 64
+                dw 90, 87, 80, 70, 57, 43, 25,  9
+                dw 89, 75, 50, 18, -18, -50, -75, -89
+                dw 87, 57,  9, -43, -80, -90, -70, -25
+                dw 83, 36, -36, -83, -83, -36, 36, 83
+                dw 80,  9, -70, -87, -25, 57, 90, 43
+                dw 75, -18, -89, -50, 50, 89, 18, -75
+                dw 70, -43, -87,  9, 90, 25, -80, -57
+                dw 64, -64, -64, 64, 64, -64, -64, 64
+                dw 57, -80, -25, 90, -9, -87, 43, 70
+                dw 50, -89, 18, 75, -75, -18, 89, -50
+                dw 43, -90, 57, 25, -87, 70,  9, -80
+                dw 36, -83, 83, -36, -36, 83, -83, 36
+                dw 25, -70, 90, -80, 43,  9, -57, 87
+                dw 18, -50, 75, -89, 89, -75, 50, -18
+                dw  9, -25, 43, -57, 70, -80, 87, -90
+
+
+tab_dct16_2:    dw 64, 64, 64, 64, 64, 64, 64, 64
+                dw -9, -25, -43, -57, -70, -80, -87, -90
+                dw -89, -75, -50, -18, 18, 50, 75, 89
+                dw 25, 70, 90, 80, 43, -9, -57, -87
+                dw 83, 36, -36, -83, -83, -36, 36, 83
+                dw -43, -90, -57, 25, 87, 70, -9, -80
+                dw -75, 18, 89, 50, -50, -89, -18, 75
+                dw 57, 80, -25, -90, -9, 87, 43, -70
+                dw 64, -64, -64, 64, 64, -64, -64, 64
+                dw -70, -43, 87,  9, -90, 25, 80, -57
+                dw -50, 89, -18, -75, 75, 18, -89, 50
+                dw 80, -9, -70, 87, -25, -57, 90, -43
+                dw 36, -83, 83, -36, -36, 83, -83, 36
+                dw -87, 57, -9, -43, 80, -90, 70, -25
+                dw -18, 50, -75, 89, -89, 75, -50, 18
+                dw 90, -87, 80, -70, 57, -43, 25, -9
+
+dct16_shuf1:     times 2 db 14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1
+
+dct16_shuf2:    times 2 db 0, 1, 14, 15, 2, 3, 12, 13, 4, 5, 10, 11, 6, 7, 8, 9
+
+tab_dct32_1:    dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+                dw 90, 90, 88, 85, 82, 78, 73, 67, 61, 54, 46, 38, 31, 22, 13,  4
+                dw 90, 87, 80, 70, 57, 43, 25,  9, -9, -25, -43, -57, -70, -80, -87, -90
+                dw 90, 82, 67, 46, 22, -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13
+                dw 89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89
+                dw 88, 67, 31, -13, -54, -82, -90, -78, -46, -4, 38, 73, 90, 85, 61, 22
+                dw 87, 57,  9, -43, -80, -90, -70, -25, 25, 70, 90, 80, 43, -9, -57, -87
+                dw 85, 46, -13, -67, -90, -73, -22, 38, 82, 88, 54, -4, -61, -90, -78, -31
+                dw 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83
+                dw 82, 22, -54, -90, -61, 13, 78, 85, 31, -46, -90, -67,  4, 73, 88, 38
+                dw 80,  9, -70, -87, -25, 57, 90, 43, -43, -90, -57, 25, 87, 70, -9, -80
+                dw 78, -4, -82, -73, 13, 85, 67, -22, -88, -61, 31, 90, 54, -38, -90, -46
+                dw 75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75
+                dw 73, -31, -90, -22, 78, 67, -38, -90, -13, 82, 61, -46, -88, -4, 85, 54
+                dw 70, -43, -87,  9, 90, 25, -80, -57, 57, 80, -25, -90, -9, 87, 43, -70
+                dw 67, -54, -78, 38, 85, -22, -90,  4, 90, 13, -88, -31, 82, 46, -73, -61
+                dw 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64
+                dw 61, -73, -46, 82, 31, -88, -13, 90, -4, -90, 22, 85, -38, -78, 54, 67
+                dw 57, -80, -25, 90, -9, -87, 43, 70, -70, -43, 87,  9, -90, 25, 80, -57
+                dw 54, -85, -4, 88, -46, -61, 82, 13, -90, 38, 67, -78, -22, 90, -31, -73
+                dw 50, -89, 18, 75, -75, -18, 89, -50, -50, 89, -18, -75, 75, 18, -89, 50
+                dw 46, -90, 38, 54, -90, 31, 61, -88, 22, 67, -85, 13, 73, -82,  4, 78
+                dw 43, -90, 57, 25, -87, 70,  9, -80, 80, -9, -70, 87, -25, -57, 90, -43
+                dw 38, -88, 73, -4, -67, 90, -46, -31, 85, -78, 13, 61, -90, 54, 22, -82
+                dw 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36
+                dw 31, -78, 90, -61,  4, 54, -88, 82, -38, -22, 73, -90, 67, -13, -46, 85
+                dw 25, -70, 90, -80, 43,  9, -57, 87, -87, 57, -9, -43, 80, -90, 70, -25
+                dw 22, -61, 85, -90, 73, -38, -4, 46, -78, 90, -82, 54, -13, -31, 67, -88
+                dw 18, -50, 75, -89, 89, -75, 50, -18, -18, 50, -75, 89, -89, 75, -50, 18
+                dw 13, -38, 61, -78, 88, -90, 85, -73, 54, -31,  4, 22, -46, 67, -82, 90
+                dw 9, -25, 43, -57, 70, -80, 87, -90, 90, -87, 80, -70, 57, -43, 25, -9
+                dw 4, -13, 22, -31, 38, -46, 54, -61, 67, -73, 78, -82, 85, -88, 90, -90
+
+tab_dct32_2:    dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+                dw -4, -13, -22, -31, -38, -46, -54, -61, -67, -73, -78, -82, -85, -88, -90, -90
+                dw -90, -87, -80, -70, -57, -43, -25, -9,  9, 25, 43, 57, 70, 80, 87, 90
+                dw 13, 38, 61, 78, 88, 90, 85, 73, 54, 31,  4, -22, -46, -67, -82, -90
+                dw 89, 75, 50, 18, -18, -50, -75, -89, -89, -75, -50, -18, 18, 50, 75, 89
+                dw -22, -61, -85, -90, -73, -38,  4, 46, 78, 90, 82, 54, 13, -31, -67, -88
+                dw -87, -57, -9, 43, 80, 90, 70, 25, -25, -70, -90, -80, -43,  9, 57, 87
+                dw 31, 78, 90, 61,  4, -54, -88, -82, -38, 22, 73, 90, 67, 13, -46, -85
+                dw 83, 36, -36, -83, -83, -36, 36, 83, 83, 36, -36, -83, -83, -36, 36, 83
+                dw -38, -88, -73, -4, 67, 90, 46, -31, -85, -78, -13, 61, 90, 54, -22, -82
+                dw -80, -9, 70, 87, 25, -57, -90, -43, 43, 90, 57, -25, -87, -70,  9, 80
+                dw 46, 90, 38, -54, -90, -31, 61, 88, 22, -67, -85, -13, 73, 82,  4, -78
+                dw 75, -18, -89, -50, 50, 89, 18, -75, -75, 18, 89, 50, -50, -89, -18, 75
+                dw -54, -85,  4, 88, 46, -61, -82, 13, 90, 38, -67, -78, 22, 90, 31, -73
+                dw -70, 43, 87, -9, -90, -25, 80, 57, -57, -80, 25, 90,  9, -87, -43, 70
+                dw 61, 73, -46, -82, 31, 88, -13, -90, -4, 90, 22, -85, -38, 78, 54, -67
+                dw 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64
+                dw -67, -54, 78, 38, -85, -22, 90,  4, -90, 13, 88, -31, -82, 46, 73, -61
+                dw -57, 80, 25, -90,  9, 87, -43, -70, 70, 43, -87, -9, 90, -25, -80, 57
+                dw 73, 31, -90, 22, 78, -67, -38, 90, -13, -82, 61, 46, -88,  4, 85, -54
+                dw 50, -89, 18, 75, -75, -18, 89, -50, -50, 89, -18, -75, 75, 18, -89, 50
+                dw -78, -4, 82, -73, -13, 85, -67, -22, 88, -61, -31, 90, -54, -38, 90, -46
+                dw -43, 90, -57, -25, 87, -70, -9, 80, -80,  9, 70, -87, 25, 57, -90, 43
+                dw 82, -22, -54, 90, -61, -13, 78, -85, 31, 46, -90, 67,  4, -73, 88, -38
+                dw 36, -83, 83, -36, -36, 83, -83, 36, 36, -83, 83, -36, -36, 83, -83, 36
+                dw -85, 46, 13, -67, 90, -73, 22, 38, -82, 88, -54, -4, 61, -90, 78, -31
+                dw -25, 70, -90, 80, -43, -9, 57, -87, 87, -57,  9, 43, -80, 90, -70, 25
+                dw 88, -67, 31, 13, -54, 82, -90, 78, -46,  4, 38, -73, 90, -85, 61, -22
+                dw 18, -50, 75, -89, 89, -75, 50, -18, -18, 50, -75, 89, -89, 75, -50, 18
+                dw -90, 82, -67, 46, -22, -4, 31, -54, 73, -85, 90, -88, 78, -61, 38, -13
+                dw -9, 25, -43, 57, -70, 80, -87, 90, -90, 87, -80, 70, -57, 43, -25,  9
+                dw 90, -90, 88, -85, 82, -78, 73, -67, 61, -54, 46, -38, 31, -22, 13, -4
+
+avx2_idct8_1:   times 4 dw 64, 83, 64, 36
+                times 4 dw 64, 36, -64, -83
+                times 4 dw 64, -36, -64, 83
+                times 4 dw 64, -83, 64, -36
+
+avx2_idct8_2:   times 4 dw 89, 75, 50, 18
+                times 4 dw 75, -18, -89, -50
+                times 4 dw 50, -89, 18, 75
+                times 4 dw 18, -50, 75, -89
+
+idct8_shuf1:    dd 0, 2, 4, 6, 1, 3, 5, 7
+
+const idct8_shuf2,    times 2 db 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+
+idct8_shuf3:    times 2 db 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
+
+tab_idct16_1:   dw 90, 87, 80, 70, 57, 43, 25, 9
+                dw 87, 57, 9, -43, -80, -90, -70, -25
+                dw 80, 9, -70, -87, -25, 57, 90, 43
+                dw 70, -43, -87, 9, 90, 25, -80, -57
+                dw 57, -80, -25, 90, -9, -87, 43, 70
+                dw 43, -90, 57, 25, -87, 70, 9, -80
+                dw 25, -70, 90, -80, 43, 9, -57, 87
+                dw 9, -25, 43, -57, 70, -80, 87, -90
+
+tab_idct16_2:   dw 64, 89, 83, 75, 64, 50, 36, 18
+                dw 64, 75, 36, -18, -64, -89, -83, -50
+                dw 64, 50, -36, -89, -64, 18, 83, 75
+                dw 64, 18, -83, -50, 64, 75, -36, -89
+                dw 64, -18, -83, 50, 64, -75, -36, 89
+                dw 64, -50, -36, 89, -64, -18, 83, -75
+                dw 64, -75, 36, 18, -64, 89, -83, 50
+                dw 64, -89, 83, -75, 64, -50, 36, -18
+
+idct16_shuff:   dd 0, 4, 2, 6, 1, 5, 3, 7
+
+idct16_shuff1:  dd 2, 6, 0, 4, 3, 7, 1, 5
+
+tab_idct32_1:   dw 90 ,90 ,88 ,85, 82, 78, 73, 67, 61, 54, 46, 38, 31, 22, 13, 4
+                dw 90, 82, 67, 46, 22, -4, -31, -54, -73, -85, -90, -88, -78, -61, -38, -13
+                dw 88, 67, 31, -13, -54, -82, -90, -78, -46, -4, 38, 73, 90, 85, 61, 22
+                dw 85, 46, -13, -67, -90, -73, -22, 38, 82, 88, 54, -4, -61, -90, -78, -31
+                dw 82, 22, -54, -90, -61, 13, 78, 85, 31, -46, -90, -67, 4, 73, 88, 38
+                dw 78, -4, -82, -73, 13, 85, 67, -22, -88, -61, 31, 90, 54, -38, -90, -46
+                dw 73, -31, -90, -22, 78, 67, -38, -90, -13, 82, 61, -46, -88, -4, 85, 54
+                dw 67, -54, -78, 38, 85, -22, -90, 4, 90, 13, -88, -31, 82, 46, -73, -61
+                dw 61, -73, -46, 82, 31, -88, -13, 90, -4, -90, 22, 85, -38, -78, 54, 67
+                dw 54, -85, -4, 88, -46, -61, 82, 13, -90, 38, 67, -78, -22, 90, -31, -73
+                dw 46, -90, 38, 54, -90, 31, 61, -88, 22, 67, -85, 13, 73, -82, 4, 78
+                dw 38, -88, 73, -4, -67, 90, -46, -31, 85, -78, 13, 61, -90, 54, 22, -82
+                dw 31, -78, 90, -61, 4, 54, -88, 82, -38, -22, 73, -90, 67, -13, -46, 85
+                dw 22, -61, 85, -90, 73, -38, -4, 46, -78, 90, -82, 54, -13, -31, 67, -88
+                dw 13, -38, 61, -78, 88, -90, 85, -73, 54, -31, 4, 22, -46, 67, -82, 90
+                dw 4, -13, 22, -31, 38, -46, 54, -61, 67, -73, 78, -82, 85, -88, 90, -90
+
+
+tab_idct32_2:   dw 64, 89, 83, 75, 64, 50, 36, 18
+                dw 64, 75, 36, -18, -64, -89, -83, -50
+                dw 64, 50, -36, -89, -64, 18, 83, 75
+                dw 64, 18, -83, -50, 64, 75, -36, -89
+                dw 64, -18, -83, 50, 64, -75, -36, 89
+                dw 64, -50, -36, 89, -64, -18, 83, -75
+                dw 64, -75, 36, 18, -64, 89, -83, 50
+                dw 64, -89, 83, -75, 64, -50, 36, -18
+
+
+tab_idct32_3:   dw 90, 87, 80, 70, 57, 43, 25, 9
+                dw 87, 57, 9, -43, -80, -90, -70, -25
+                dw 80, 9, -70, -87, -25, 57, 90, 43
+                dw 70, -43, -87, 9, 90, 25, -80, -57
+                dw 57, -80, -25, 90, -9, -87, 43, 70
+                dw 43, -90, 57, 25, -87, 70, 9, -80
+                dw 25, -70, 90, -80, 43, 9, -57, 87
+                dw 9, -25, 43, -57, 70, -80, 87, -90
+
+tab_idct32_4:   dw 64, 90, 89, 87, 83, 80, 75, 70, 64, 57, 50, 43, 36, 25, 18, 9
+                dw 64, 87, 75, 57, 36, 9, -18, -43, -64, -80, -89, -90, -83, -70, -50, -25
+                dw 64, 80, 50, 9, -36, -70, -89, -87, -64, -25, 18, 57, 83, 90, 75, 43
+                dw 64, 70, 18, -43, -83, -87, -50, 9, 64, 90, 75, 25, -36, -80, -89, -57
+                dw 64, 57, -18, -80, -83, -25, 50, 90, 64, -9, -75, -87, -36, 43, 89, 70
+                dw 64, 43, -50, -90, -36, 57, 89, 25, -64, -87, -18, 70, 83, 9, -75, -80
+                dw 64, 25, -75, -70, 36, 90, 18, -80, -64, 43, 89, 9, -83, -57, 50, 87
+                dw 64, 9, -89, -25, 83, 43, -75, -57, 64, 70, -50, -80, 36, 87, -18, -90
+                dw 64, -9, -89, 25, 83, -43, -75, 57, 64, -70, -50, 80, 36, -87, -18, 90
+                dw 64, -25, -75, 70, 36, -90, 18, 80, -64, -43, 89, -9, -83, 57, 50, -87
+                dw 64, -43, -50, 90, -36, -57, 89, -25, -64, 87, -18, -70, 83, -9, -75, 80
+                dw 64, -57, -18, 80, -83, 25, 50, -90, 64, 9, -75, 87, -36, -43, 89, -70
+                dw 64, -70, 18, 43, -83, 87, -50, -9, 64, -90, 75, -25, -36, 80, -89, 57
+                dw 64, -80, 50, -9, -36, 70, -89, 87, -64, 25, 18, -57, 83, -90, 75, -43
+                dw 64, -87, 75, -57, 36, -9, -18, 43, -64, 80, -89, 90, -83, 70, -50, 25
+                dw 64, -90, 89, -87, 83, -80, 75, -70, 64, -57, 50, -43, 36, -25, 18, -9
+
+avx2_dct4:      dw 64, 64, 64, 64, 64, 64, 64, 64, 64, -64, 64, -64, 64, -64, 64, -64
+                dw 83, 36, 83, 36, 83, 36, 83, 36, 36, -83, 36, -83, 36, -83, 36, -83
+
+avx2_idct4_1:   dw 64, 64, 64, 64, 64, 64, 64, 64, 64, -64, 64, -64, 64, -64, 64, -64
+                dw 83, 36, 83, 36, 83, 36, 83, 36, 36, -83, 36, -83, 36 ,-83, 36, -83
+
+avx2_idct4_2:   dw 64, 64, 64, -64, 83, 36, 36, -83
+
+const idct4_shuf1,    times 2 db 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15
+
+idct4_shuf2:    times 2 db 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8 ,9 ,10, 11
+
+tab_dct4:       times 4 dw 64, 64
+                times 4 dw 83, 36
+                times 4 dw 64, -64
+                times 4 dw 36, -83
+
+dct4_shuf:      db 0, 1, 2, 3, 8, 9, 10, 11, 6, 7, 4, 5, 14, 15, 12, 13
+
+tab_dst4:       times 2 dw 29, 55, 74, 84
+                times 2 dw 74, 74,  0, -74
+                times 2 dw 84, -29, -74, 55
+                times 2 dw 55, -84, 74, -29
+
+pw_dst4_tab:    times 4 dw 29,  55,  74,  84
+                times 4 dw 74,  74,   0, -74
+                times 4 dw 84, -29, -74,  55
+                times 4 dw 55, -84,  74, -29
+
+tab_idst4:      times 4 dw 29, +84
+                times 4 dw +74, +55
+                times 4 dw 55, -29
+                times 4 dw +74, -84
+                times 4 dw 74, -74
+                times 4 dw 0, +74
+                times 4 dw 84, +55
+                times 4 dw -74, -29
+
+pw_idst4_tab:   times 4 dw  29,  84
+                times 4 dw  55, -29
+                times 4 dw  74,  55
+                times 4 dw  74, -84
+                times 4 dw  74, -74
+                times 4 dw  84,  55
+                times 4 dw  0,   74
+                times 4 dw -74, -29
+pb_idst4_shuf:  times 2 db 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
+
+tab_dct8_1:     times 2 dw 89, 50, 75, 18
+                times 2 dw 75, -89, -18, -50
+                times 2 dw 50, 18, -89, 75
+                times 2 dw 18, 75, -50, -89
+
+tab_dct8_2:     times 2 dd 83, 36
+                times 2 dd 36, 83
+                times 1 dd 89, 75, 50, 18
+                times 1 dd 75, -18, -89, -50
+                times 1 dd 50, -89, 18, 75
+                times 1 dd 18, -50, 75, -89
+
+tab_idct8_3:    times 4 dw 89, 75
+                times 4 dw 50, 18
+                times 4 dw 75, -18
+                times 4 dw -89, -50
+                times 4 dw 50, -89
+                times 4 dw 18, 75
+                times 4 dw 18, -50
+                times 4 dw 75, -89
+
+pb_unpackhlw1:  db 0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15
+
+pb_idct8even:   db 0, 1, 8, 9, 4, 5, 12, 13, 0, 1, 8, 9, 4, 5, 12, 13
+
+tab_idct8_1:    times 1 dw 64, -64, 36, -83, 64, 64, 83, 36
+
+tab_idct8_2:    times 1 dw 89, 75, 50, 18, 75, -18, -89, -50
+                times 1 dw 50, -89, 18, 75, 18, -50, 75, -89
+
+pb_idct8odd:    db 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15
+
+SECTION .text
+cextern pd_1
+cextern pd_2
+cextern pd_4
+cextern pd_8
+cextern pd_16
+cextern pd_32
+cextern pd_64
+cextern pd_128
+cextern pd_256
+cextern pd_512
+cextern pd_1024
+cextern pd_2048
+cextern pw_ppppmmmm
+cextern trans8_shuf
+
+
+%if BIT_DEPTH == 12
+    %define     DCT4_SHIFT          5
+    %define     DCT4_ROUND          16
+    %define    IDCT_SHIFT           8
+    %define    IDCT_ROUND           128
+    %define     DST4_SHIFT          5
+    %define     DST4_ROUND          16
+    %define     DCT8_SHIFT1         6
+    %define     DCT8_ROUND1         32
+%elif BIT_DEPTH == 10
+    %define     DCT4_SHIFT          3
+    %define     DCT4_ROUND          4
+    %define    IDCT_SHIFT           10
+    %define    IDCT_ROUND           512
+    %define     DST4_SHIFT          3
+    %define     DST4_ROUND          4
+    %define     DCT8_SHIFT1         4
+    %define     DCT8_ROUND1         8
+%elif BIT_DEPTH == 8
+    %define     DCT4_SHIFT          1
+    %define     DCT4_ROUND          1
+    %define    IDCT_SHIFT           12
+    %define    IDCT_ROUND           2048
+    %define     DST4_SHIFT          1
+    %define     DST4_ROUND          1
+    %define     DCT8_SHIFT1         2
+    %define     DCT8_ROUND1         2
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+
+%define         DCT8_ROUND2         256
+%define         DCT8_SHIFT2         9
+
+;------------------------------------------------------
+;void dct4(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;------------------------------------------------------
+INIT_XMM sse2
+cglobal dct4, 3, 4, 8
+    mova        m7, [pd_ %+ DCT4_ROUND]
+    add         r2d, r2d
+    lea         r3, [tab_dct4]
+
+    mova        m4, [r3 + 0 * 16]
+    mova        m5, [r3 + 1 * 16]
+    mova        m6, [r3 + 2 * 16]
+    movh        m0, [r0 + 0 * r2]
+    movh        m1, [r0 + 1 * r2]
+    punpcklqdq  m0, m1
+    pshufd      m0, m0, 0xD8
+    pshufhw     m0, m0, 0xB1
+
+    lea         r0, [r0 + 2 * r2]
+    movh        m1, [r0]
+    movh        m2, [r0 + r2]
+    punpcklqdq  m1, m2
+    pshufd      m1, m1, 0xD8
+    pshufhw     m1, m1, 0xB1
+
+    punpcklqdq  m2, m0, m1
+    punpckhqdq  m0, m1
+
+    paddw       m1, m2, m0
+    psubw       m2, m0
+    pmaddwd     m0, m1, m4
+    paddd       m0, m7
+    psrad       m0, DCT4_SHIFT
+    pmaddwd     m3, m2, m5
+    paddd       m3, m7
+    psrad       m3, DCT4_SHIFT
+    packssdw    m0, m3
+    pshufd      m0, m0, 0xD8
+    pshufhw     m0, m0, 0xB1
+    pmaddwd     m1, m6
+    paddd       m1, m7
+    psrad       m1, DCT4_SHIFT
+    pmaddwd     m2, [r3 + 3 * 16]
+    paddd       m2, m7
+    psrad       m2, DCT4_SHIFT
+    packssdw    m1, m2
+    pshufd      m1, m1, 0xD8
+    pshufhw     m1, m1, 0xB1
+
+    punpcklqdq  m2, m0, m1
+    punpckhqdq  m0, m1
+
+    mova        m7, [pd_128]
+
+    pmaddwd     m1, m2, m4
+    pmaddwd     m3, m0, m4
+    paddd       m1, m3
+    paddd       m1, m7
+    psrad       m1, 8
+
+    pmaddwd     m4, m2, m5
+    pmaddwd     m3, m0, m5
+    psubd       m4, m3
+    paddd       m4, m7
+    psrad       m4, 8
+    packssdw    m1, m4
+    movu        [r1 + 0 * 16], m1
+
+    pmaddwd     m1, m2, m6
+    pmaddwd     m3, m0, m6
+    paddd       m1, m3
+    paddd       m1, m7
+    psrad       m1, 8
+
+    pmaddwd     m2, [r3 + 3 * 16]
+    pmaddwd     m0, [r3 + 3 * 16]
+    psubd       m2, m0
+    paddd       m2, m7
+    psrad       m2, 8
+    packssdw    m1, m2
+    movu        [r1 + 1 * 16], m1
+    RET
+
+; DCT 4x4
+;
+; Input parameters:
+; - r0:     source
+; - r1:     destination
+; - r2:     source stride
+INIT_YMM avx2
+cglobal dct4, 3, 4, 8, src, dst, srcStride
+    vbroadcasti128  m7, [pd_ %+ DCT4_ROUND]
+    add             r2d, r2d
+    lea             r3, [avx2_dct4]
+
+    vbroadcasti128  m4, [dct4_shuf]
+    mova            m5, [r3]
+    mova            m6, [r3 + 32]
+    movq            xm0, [r0]
+    movhps          xm0, [r0 + r2]
+    lea             r0, [r0 + 2 * r2]
+    movq            xm1, [r0]
+    movhps          xm1, [r0 + r2]
+
+    vinserti128     m0, m0, xm1, 1
+    pshufb          m0, m4
+    vpermq          m1, m0, 11011101b
+    vpermq          m0, m0, 10001000b
+    paddw           m2, m0, m1
+    psubw           m0, m1
+
+    pmaddwd         m2, m5
+    paddd           m2, m7
+    psrad           m2, DCT4_SHIFT
+
+    pmaddwd         m0, m6
+    paddd           m0, m7
+    psrad           m0, DCT4_SHIFT
+
+    packssdw        m2, m0
+    pshufb          m2, m4
+    vpermq          m1, m2, 11011101b
+    vpermq          m2, m2, 10001000b
+    vbroadcasti128  m7, [pd_128]
+
+    pmaddwd         m0, m2, m5
+    pmaddwd         m3, m1, m5
+    paddd           m3, m0
+    paddd           m3, m7
+    psrad           m3, 8
+
+    pmaddwd         m2, m6
+    pmaddwd         m1, m6
+    psubd           m2, m1
+    paddd           m2, m7
+    psrad           m2, 8
+
+    packssdw        m3, m2
+    movu            [r1], m3
+    RET
+
+;-------------------------------------------------------
+;void idct4(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+INIT_XMM sse2
+cglobal idct4, 3, 4, 6
+    add         r2d, r2d
+    lea         r3, [tab_dct4]
+
+    movu        m0, [r0 + 0 * 16]
+    movu        m1, [r0 + 1 * 16]
+
+    punpcklwd   m2, m0, m1
+    pmaddwd     m3, m2, [r3 + 0 * 16]       ; m3 = E1
+    paddd       m3, [pd_64]
+
+    pmaddwd     m2, [r3 + 2 * 16]           ; m2 = E2
+    paddd       m2, [pd_64]
+
+    punpckhwd   m0, m1
+    pmaddwd     m1, m0, [r3 + 1 * 16]       ; m1 = O1
+    pmaddwd     m0, [r3 + 3 * 16]           ; m0 = O2
+
+    paddd       m4, m3, m1
+    psrad       m4, 7                       ; m4 = m128iA
+    paddd       m5, m2, m0
+    psrad       m5, 7
+    packssdw    m4, m5                      ; m4 = m128iA
+
+    psubd       m2, m0
+    psrad       m2, 7
+    psubd       m3, m1
+    psrad       m3, 7
+    packssdw    m2, m3                      ; m2 = m128iD
+
+    punpcklwd   m1, m4, m2                  ; m1 = S0
+    punpckhwd   m4, m2                      ; m4 = S8
+
+    punpcklwd   m0, m1, m4                  ; m0 = m128iA
+    punpckhwd   m1, m4                      ; m1 = m128iD
+
+    punpcklwd   m2, m0, m1
+    pmaddwd     m3, m2, [r3 + 0 * 16]
+    paddd       m3, [pd_ %+ IDCT_ROUND]     ; m3 = E1
+
+    pmaddwd     m2, [r3 + 2 * 16]
+    paddd       m2, [pd_ %+ IDCT_ROUND]     ; m2 = E2
+
+    punpckhwd   m0, m1
+    pmaddwd     m1, m0, [r3 + 1 * 16]       ; m1 = O1
+    pmaddwd     m0, [r3 + 3 * 16]           ; m0 = O2
+
+    paddd       m4, m3, m1
+    psrad       m4, IDCT_SHIFT              ; m4 = m128iA
+    paddd       m5, m2, m0
+    psrad       m5, IDCT_SHIFT
+    packssdw    m4, m5                      ; m4 = m128iA
+
+    psubd       m2, m0
+    psrad       m2, IDCT_SHIFT
+    psubd       m3, m1
+    psrad       m3, IDCT_SHIFT
+    packssdw    m2, m3                      ; m2 = m128iD
+
+    punpcklwd   m1, m4, m2
+    punpckhwd   m4, m2
+
+    punpcklwd   m0, m1, m4
+    movlps      [r1 + 0 * r2], m0
+    movhps      [r1 + 1 * r2], m0
+
+    punpckhwd   m1, m4
+    movlps      [r1 + 2 * r2], m1
+    lea         r1, [r1 + 2 * r2]
+    movhps      [r1 + r2], m1
+    RET
+
+;------------------------------------------------------
+;void dst4(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;------------------------------------------------------
+INIT_XMM sse2
+%if ARCH_X86_64
+cglobal dst4, 3, 4, 8+4
+  %define       coef0   m8
+  %define       coef1   m9
+  %define       coef2   m10
+  %define       coef3   m11
+%else ; ARCH_X86_64 = 0
+cglobal dst4, 3, 4, 8
+  %define       coef0   [r3 + 0 * 16]
+  %define       coef1   [r3 + 1 * 16]
+  %define       coef2   [r3 + 2 * 16]
+  %define       coef3   [r3 + 3 * 16]
+%endif ; ARCH_X86_64
+
+    mova        m5, [pd_ %+ DST4_ROUND]
+    add         r2d, r2d
+    lea         r3, [tab_dst4]
+%if ARCH_X86_64
+    mova        coef0, [r3 + 0 * 16]
+    mova        coef1, [r3 + 1 * 16]
+    mova        coef2, [r3 + 2 * 16]
+    mova        coef3, [r3 + 3 * 16]
+%endif
+    movh        m0, [r0 + 0 * r2]            ; load
+    movhps      m0, [r0 + 1 * r2]
+    lea         r0, [r0 + 2 * r2]
+    movh        m1, [r0]
+    movhps      m1, [r0 + r2]
+    pmaddwd     m2, m0, coef0                ; DST1
+    pmaddwd     m3, m1, coef0
+    pshufd      m6, m2, q2301
+    pshufd      m7, m3, q2301
+    paddd       m2, m6
+    paddd       m3, m7
+    pshufd      m2, m2, q3120
+    pshufd      m3, m3, q3120
+    punpcklqdq  m2, m3
+    paddd       m2, m5
+    psrad       m2, DST4_SHIFT
+    pmaddwd     m3, m0, coef1
+    pmaddwd     m4, m1, coef1
+    pshufd      m6, m4, q2301
+    pshufd      m7, m3, q2301
+    paddd       m4, m6
+    paddd       m3, m7
+    pshufd      m4, m4, q3120
+    pshufd      m3, m3, q3120
+    punpcklqdq  m3, m4
+    paddd       m3, m5
+    psrad       m3, DST4_SHIFT
+    packssdw    m2, m3                       ; m2 = T70
+    pmaddwd     m3, m0, coef2
+    pmaddwd     m4, m1, coef2
+    pshufd      m6, m4, q2301
+    pshufd      m7, m3, q2301
+    paddd       m4, m6
+    paddd       m3, m7
+    pshufd      m4, m4, q3120
+    pshufd      m3, m3, q3120
+    punpcklqdq  m3, m4
+    paddd       m3, m5
+    psrad       m3, DST4_SHIFT
+    pmaddwd     m0, coef3
+    pmaddwd     m1, coef3
+    pshufd      m6, m0, q2301
+    pshufd      m7, m1, q2301
+    paddd       m0, m6
+    paddd       m1, m7
+    pshufd      m0, m0, q3120
+    pshufd      m1, m1, q3120
+    punpcklqdq  m0, m1
+    paddd       m0, m5
+    psrad       m0, DST4_SHIFT
+    packssdw    m3, m0                       ; m3 = T71
+    mova        m5, [pd_128]
+
+    pmaddwd     m0, m2, coef0                ; DST2
+    pmaddwd     m1, m3, coef0
+    pshufd      m6, m0, q2301
+    pshufd      m7, m1, q2301
+    paddd       m0, m6
+    paddd       m1, m7
+    pshufd      m0, m0, q3120
+    pshufd      m1, m1, q3120
+    punpcklqdq  m0, m1
+    paddd       m0, m5
+    psrad       m0, 8
+
+    pmaddwd     m4, m2, coef1
+    pmaddwd     m1, m3, coef1
+    pshufd      m6, m4, q2301
+    pshufd      m7, m1, q2301
+    paddd       m4, m6
+    paddd       m1, m7
+    pshufd      m4, m4, q3120
+    pshufd      m1, m1, q3120
+    punpcklqdq  m4, m1
+    paddd       m4, m5
+    psrad       m4, 8
+    packssdw    m0, m4
+    movu        [r1 + 0 * 16], m0
+
+    pmaddwd     m0, m2, coef2
+    pmaddwd     m1, m3, coef2
+    pshufd      m6, m0, q2301
+    pshufd      m7, m1, q2301
+    paddd       m0, m6
+    paddd       m1, m7
+    pshufd      m0, m0, q3120
+    pshufd      m1, m1, q3120
+    punpcklqdq  m0, m1
+    paddd       m0, m5
+    psrad       m0, 8
+
+    pmaddwd     m2, coef3
+    pmaddwd     m3, coef3
+    pshufd      m6, m2, q2301
+    pshufd      m7, m3, q2301
+    paddd       m2, m6
+    paddd       m3, m7
+    pshufd      m2, m2, q3120
+    pshufd      m3, m3, q3120
+    punpcklqdq  m2, m3
+    paddd       m2, m5
+    psrad       m2, 8
+    packssdw    m0, m2
+    movu        [r1 + 1 * 16], m0
+    RET
+
+;------------------------------------------------------
+;void dst4(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;------------------------------------------------------
+INIT_XMM ssse3
+%if ARCH_X86_64
+cglobal dst4, 3, 4, 8+2
+  %define       coef2   m8
+  %define       coef3   m9
+%else ; ARCH_X86_64 = 0
+cglobal dst4, 3, 4, 8
+  %define       coef2   [r3 + 2 * 16]
+  %define       coef3   [r3 + 3 * 16]
+%endif ; ARCH_X86_64
+%define         coef0   m6
+%define         coef1   m7
+
+    mova        m5, [pd_ %+ DST4_ROUND]
+    add         r2d, r2d
+    lea         r3, [tab_dst4]
+    mova        coef0, [r3 + 0 * 16]
+    mova        coef1, [r3 + 1 * 16]
+%if ARCH_X86_64
+    mova        coef2, [r3 + 2 * 16]
+    mova        coef3, [r3 + 3 * 16]
+%endif
+    movh        m0, [r0 + 0 * r2]            ; load
+    movh        m1, [r0 + 1 * r2]
+    punpcklqdq  m0, m1
+    lea         r0, [r0 + 2 * r2]
+    movh        m1, [r0]
+    movh        m2, [r0 + r2]
+    punpcklqdq  m1, m2
+    pmaddwd     m2, m0, coef0                ; DST1
+    pmaddwd     m3, m1, coef0
+    phaddd      m2, m3
+    paddd       m2, m5
+    psrad       m2, DST4_SHIFT
+    pmaddwd     m3, m0, coef1
+    pmaddwd     m4, m1, coef1
+    phaddd      m3, m4
+    paddd       m3, m5
+    psrad       m3, DST4_SHIFT
+    packssdw    m2, m3                       ; m2 = T70
+    pmaddwd     m3, m0, coef2
+    pmaddwd     m4, m1, coef2
+    phaddd      m3, m4
+    paddd       m3, m5
+    psrad       m3, DST4_SHIFT
+    pmaddwd     m0, coef3
+    pmaddwd     m1, coef3
+    phaddd      m0, m1
+    paddd       m0, m5
+    psrad       m0, DST4_SHIFT
+    packssdw    m3, m0                       ; m3 = T71
+    mova        m5, [pd_128]
+
+    pmaddwd     m0, m2, coef0                ; DST2
+    pmaddwd     m1, m3, coef0
+    phaddd      m0, m1
+    paddd       m0, m5
+    psrad       m0, 8
+
+    pmaddwd     m4, m2, coef1
+    pmaddwd     m1, m3, coef1
+    phaddd      m4, m1
+    paddd       m4, m5
+    psrad       m4, 8
+    packssdw    m0, m4
+    movu        [r1 + 0 * 16], m0
+
+    pmaddwd     m0, m2, coef2
+    pmaddwd     m1, m3, coef2
+    phaddd      m0, m1
+    paddd       m0, m5
+    psrad       m0, 8
+
+    pmaddwd     m2, coef3
+    pmaddwd     m3, coef3
+    phaddd      m2, m3
+    paddd       m2, m5
+    psrad       m2, 8
+    packssdw    m0, m2
+    movu        [r1 + 1 * 16], m0
+    RET
+
+;------------------------------------------------------------------
+;void dst4(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;------------------------------------------------------------------
+INIT_YMM avx2
+cglobal dst4, 3, 4, 6
+    vbroadcasti128 m5, [pd_ %+ DST4_ROUND]
+    mova        m4, [trans8_shuf]
+    add         r2d, r2d
+    lea         r3, [pw_dst4_tab]
+
+    movq        xm0, [r0 + 0 * r2]
+    movhps      xm0, [r0 + 1 * r2]
+    lea         r0, [r0 + 2 * r2]
+    movq        xm1, [r0]
+    movhps      xm1, [r0 + r2]
+
+    vinserti128 m0, m0, xm1, 1          ; m0 = src[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
+
+    pmaddwd     m2, m0, [r3 + 0 * 32]
+    pmaddwd     m1, m0, [r3 + 1 * 32]
+    phaddd      m2, m1
+    paddd       m2, m5
+    psrad       m2, DST4_SHIFT
+    pmaddwd     m3, m0, [r3 + 2 * 32]
+    pmaddwd     m1, m0, [r3 + 3 * 32]
+    phaddd      m3, m1
+    paddd       m3, m5
+    psrad       m3, DST4_SHIFT
+    packssdw    m2, m3
+    vpermd      m2, m4, m2
+
+    vpbroadcastd m5, [pd_128]
+    pmaddwd     m0, m2, [r3 + 0 * 32]
+    pmaddwd     m1, m2, [r3 + 1 * 32]
+    phaddd      m0, m1
+    paddd       m0, m5
+    psrad       m0, 8
+    pmaddwd     m3, m2, [r3 + 2 * 32]
+    pmaddwd     m2, m2, [r3 + 3 * 32]
+    phaddd      m3, m2
+    paddd       m3, m5
+    psrad       m3, 8
+    packssdw    m0, m3
+    vpermd      m0, m4, m0
+    movu        [r1], m0
+    RET
+
+;-------------------------------------------------------
+;void idst4(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+INIT_XMM sse2
+cglobal idst4, 3, 4, 7
+    mova        m6, [pd_ %+ IDCT_ROUND]
+    add         r2d, r2d
+    lea         r3, [tab_idst4]
+    mova        m5, [pd_64]
+
+    movu        m0, [r0 + 0 * 16]
+    movu        m1, [r0 + 1 * 16]
+
+    punpcklwd   m2, m0, m1                  ; m2 = m128iAC
+    punpckhwd   m0, m1                      ; m0 = m128iBD
+
+    pmaddwd     m1, m2, [r3 + 0 * 16]
+    pmaddwd     m3, m0, [r3 + 1 * 16]
+    paddd       m1, m3
+    paddd       m1, m5
+    psrad       m1, 7                       ; m1 = S0
+
+    pmaddwd     m3, m2, [r3 + 2 * 16]
+    pmaddwd     m4, m0, [r3 + 3 * 16]
+    paddd       m3, m4
+    paddd       m3, m5
+    psrad       m3, 7                       ; m3 = S8
+    packssdw    m1, m3                      ; m1 = m128iA
+
+    pmaddwd     m3, m2, [r3 + 4 * 16]
+    pmaddwd     m4, m0, [r3 + 5 * 16]
+    paddd       m3, m4
+    paddd       m3, m5
+    psrad       m3, 7                       ; m3 = S0
+
+    pmaddwd     m2, [r3 + 6 * 16]
+    pmaddwd     m0, [r3 + 7 * 16]
+    paddd       m2, m0
+    paddd       m2, m5
+    psrad       m2, 7                       ; m2 = S8
+    packssdw    m3, m2                      ; m3 = m128iD
+
+    punpcklwd   m0, m1, m3
+    punpckhwd   m1, m3
+
+    punpcklwd   m2, m0, m1
+    punpckhwd   m0, m1
+    punpcklwd   m1, m2, m0
+    punpckhwd   m2, m0
+    pmaddwd     m0, m1, [r3 + 0 * 16]
+    pmaddwd     m3, m2, [r3 + 1 * 16]
+    paddd       m0, m3
+    paddd       m0, m6
+    psrad       m0, IDCT_SHIFT              ; m0 = S0
+    pmaddwd     m3, m1, [r3 + 2 * 16]
+    pmaddwd     m4, m2, [r3 + 3 * 16]
+    paddd       m3, m4
+    paddd       m3, m6
+    psrad       m3, IDCT_SHIFT              ; m3 = S8
+    packssdw    m0, m3                      ; m0 = m128iA
+    pmaddwd     m3, m1, [r3 + 4 * 16]
+    pmaddwd     m4, m2, [r3 + 5 * 16]
+    paddd       m3, m4
+    paddd       m3, m6
+    psrad       m3, IDCT_SHIFT              ; m3 = S0
+    pmaddwd     m1, [r3 + 6 * 16]
+    pmaddwd     m2, [r3 + 7 * 16]
+    paddd       m1, m2
+    paddd       m1, m6
+    psrad       m1, IDCT_SHIFT              ; m1 = S8
+    packssdw    m3, m1                      ; m3 = m128iD
+    punpcklwd   m1, m0, m3
+    punpckhwd   m0, m3
+
+    punpcklwd   m2, m1, m0
+    movlps      [r1 + 0 * r2], m2
+    movhps      [r1 + 1 * r2], m2
+
+    punpckhwd   m1, m0
+    movlps      [r1 + 2 * r2], m1
+    lea         r1, [r1 + 2 * r2]
+    movhps      [r1 + r2], m1
+    RET
+
+;-----------------------------------------------------------------
+;void idst4(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-----------------------------------------------------------------
+INIT_YMM avx2
+cglobal idst4, 3, 4, 6
+    vbroadcasti128 m4, [pd_ %+ IDCT_ROUND]
+    add         r2d, r2d
+    lea         r3, [pw_idst4_tab]
+
+    movu        xm0, [r0 + 0 * 16]
+    movu        xm1, [r0 + 1 * 16]
+
+    punpcklwd   m2, m0, m1
+    punpckhwd   m0, m1
+
+    vinserti128 m2, m2, xm2, 1
+    vinserti128 m0, m0, xm0, 1
+
+    vpbroadcastd m5, [pd_64]
+    pmaddwd     m1, m2, [r3 + 0 * 32]
+    pmaddwd     m3, m0, [r3 + 1 * 32]
+    paddd       m1, m3
+    paddd       m1, m5
+    psrad       m1, 7
+    pmaddwd     m3, m2, [r3 + 2 * 32]
+    pmaddwd     m0, [r3 + 3 * 32]
+    paddd       m3, m0
+    paddd       m3, m5
+    psrad       m3, 7
+
+    packssdw    m0, m1, m3
+    pshufb      m0, [pb_idst4_shuf]
+    vpermq      m1, m0, 11101110b
+
+    punpcklwd   m2, m0, m1
+    punpckhwd   m0, m1
+    punpcklwd   m1, m2, m0
+    punpckhwd   m2, m0
+
+    vpermq      m1, m1, 01000100b
+    vpermq      m2, m2, 01000100b
+
+    pmaddwd     m0, m1, [r3 + 0 * 32]
+    pmaddwd     m3, m2, [r3 + 1 * 32]
+    paddd       m0, m3
+    paddd       m0, m4
+    psrad       m0, IDCT_SHIFT
+    pmaddwd     m3, m1, [r3 + 2 * 32]
+    pmaddwd     m2, m2, [r3 + 3 * 32]
+    paddd       m3, m2
+    paddd       m3, m4
+    psrad       m3, IDCT_SHIFT
+
+    packssdw    m0, m3
+    pshufb      m1, m0, [pb_idst4_shuf]
+    vpermq      m0, m1, 11101110b
+
+    punpcklwd   m2, m1, m0
+    movq        [r1 + 0 * r2], xm2
+    movhps      [r1 + 1 * r2], xm2
+
+    punpckhwd   m1, m0
+    movq        [r1 + 2 * r2], xm1
+    lea         r1, [r1 + 2 * r2]
+    movhps      [r1 + r2], xm1
+    RET
+
+;-------------------------------------------------------
+; void dct8(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;-------------------------------------------------------
+INIT_XMM sse2
+cglobal dct8, 3,6,8,0-16*mmsize
+    ;------------------------
+    ; Stack Mapping(dword)
+    ;------------------------
+    ; Row0[0-3] Row1[0-3]
+    ; ...
+    ; Row6[0-3] Row7[0-3]
+    ; Row0[0-3] Row7[0-3]
+    ; ...
+    ; Row6[4-7] Row7[4-7]
+    ;------------------------
+
+    add         r2, r2
+    lea         r3, [r2 * 3]
+    mov         r5, rsp
+%assign x 0
+%rep 2
+    movu        m0, [r0]
+    movu        m1, [r0 + r2]
+    movu        m2, [r0 + r2 * 2]
+    movu        m3, [r0 + r3]
+
+    punpcklwd   m4, m0, m1
+    punpckhwd   m0, m1
+    punpcklwd   m5, m2, m3
+    punpckhwd   m2, m3
+    punpckldq   m1, m4, m5          ; m1 = [1 0]
+    punpckhdq   m4, m5              ; m4 = [3 2]
+    punpckldq   m3, m0, m2
+    punpckhdq   m0, m2
+    pshufd      m2, m3, 0x4E        ; m2 = [4 5]
+    pshufd      m0, m0, 0x4E        ; m0 = [6 7]
+
+    paddw       m3, m1, m0
+    psubw       m1, m0              ; m1 = [d1 d0]
+    paddw       m0, m4, m2
+    psubw       m4, m2              ; m4 = [d3 d2]
+    punpcklqdq  m2, m3, m0          ; m2 = [s2 s0]
+    punpckhqdq  m3, m0
+    pshufd      m3, m3, 0x4E        ; m3 = [s1 s3]
+
+    punpcklwd   m0, m1, m4          ; m0 = [d2/d0]
+    punpckhwd   m1, m4              ; m1 = [d3/d1]
+    punpckldq   m4, m0, m1          ; m4 = [d3 d1 d2 d0]
+    punpckhdq   m0, m1              ; m0 = [d3 d1 d2 d0]
+
+    ; odd
+    lea         r4, [tab_dct8_1]
+    pmaddwd     m1, m4, [r4 + 0*16]
+    pmaddwd     m5, m0, [r4 + 0*16]
+    pshufd      m1, m1, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m1
+    punpckhqdq  m7, m5
+    punpcklqdq  m1, m5
+    paddd       m1, m7
+    paddd       m1, [pd_ %+ DCT8_ROUND1]
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 1*2*mmsize], m1 ; Row 1
+
+    pmaddwd     m1, m4, [r4 + 1*16]
+    pmaddwd     m5, m0, [r4 + 1*16]
+    pshufd      m1, m1, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m1
+    punpckhqdq  m7, m5
+    punpcklqdq  m1, m5
+    paddd       m1, m7
+    paddd       m1, [pd_ %+ DCT8_ROUND1]
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 3*2*mmsize], m1 ; Row 3
+
+    pmaddwd     m1, m4, [r4 + 2*16]
+    pmaddwd     m5, m0, [r4 + 2*16]
+    pshufd      m1, m1, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m1
+    punpckhqdq  m7, m5
+    punpcklqdq  m1, m5
+    paddd       m1, m7
+    paddd       m1, [pd_ %+ DCT8_ROUND1]
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 5*2*mmsize], m1 ; Row 5
+
+    pmaddwd     m4, [r4 + 3*16]
+    pmaddwd     m0, [r4 + 3*16]
+    pshufd      m4, m4, 0xD8
+    pshufd      m0, m0, 0xD8
+    mova        m7, m4
+    punpckhqdq  m7, m0
+    punpcklqdq  m4, m0
+    paddd       m4, m7
+    paddd       m4, [pd_ %+ DCT8_ROUND1]
+    psrad       m4, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m4, m4, 0x1B
+  %endif
+    mova        [r5 + 7*2*mmsize], m4; Row 7
+
+    ; even
+    lea         r4, [tab_dct4]
+    paddw       m0, m2, m3          ; m0 = [EE1 EE0]
+    pshufd      m0, m0, 0xD8
+    pshuflw     m0, m0, 0xD8
+    pshufhw     m0, m0, 0xD8
+    psubw       m2, m3              ; m2 = [EO1 EO0]
+    pmullw      m2, [pw_ppppmmmm]
+    pshufd      m2, m2, 0xD8
+    pshuflw     m2, m2, 0xD8
+    pshufhw     m2, m2, 0xD8
+    pmaddwd     m3, m0, [r4 + 0*16]
+    paddd       m3, [pd_ %+ DCT8_ROUND1]
+    psrad       m3, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m3, m3, 0x1B
+  %endif
+    mova        [r5 + 0*2*mmsize], m3 ; Row 0
+    pmaddwd     m0, [r4 + 2*16]
+    paddd       m0, [pd_ %+ DCT8_ROUND1]
+    psrad       m0, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m0, m0, 0x1B
+  %endif
+    mova        [r5 + 4*2*mmsize], m0 ; Row 4
+    pmaddwd     m3, m2, [r4 + 1*16]
+    paddd       m3, [pd_ %+ DCT8_ROUND1]
+    psrad       m3, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m3, m3, 0x1B
+  %endif
+    mova        [r5 + 2*2*mmsize], m3 ; Row 2
+    pmaddwd     m2, [r4 + 3*16]
+    paddd       m2, [pd_ %+ DCT8_ROUND1]
+    psrad       m2, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m2, m2, 0x1B
+  %endif
+    mova        [r5 + 6*2*mmsize], m2 ; Row 6
+
+  %if x != 1
+    lea         r0, [r0 + r2 * 4]
+    add         r5, mmsize
+  %endif
+%assign x x+1
+%endrep
+
+    mov         r0, rsp                 ; r0 = pointer to Low Part
+    lea         r4, [tab_dct8_2]
+
+%assign x 0
+%rep 4
+    mova        m0, [r0 + 0*2*mmsize]     ; [3 2 1 0]
+    mova        m1, [r0 + 1*2*mmsize]
+    paddd       m2, m0, [r0 + (0*2+1)*mmsize]
+    pshufd      m2, m2, 0x9C            ; m2 = [s2 s1 s3 s0]
+    paddd       m3, m1, [r0 + (1*2+1)*mmsize]
+    pshufd      m3, m3, 0x9C            ; m3 = ^^
+    psubd       m0, [r0 + (0*2+1)*mmsize]     ; m0 = [d3 d2 d1 d0]
+    psubd       m1, [r0 + (1*2+1)*mmsize]     ; m1 = ^^
+
+    ; even
+    pshufd      m4, m2, 0xD8
+    pshufd      m3, m3, 0xD8
+    mova        m7, m4
+    punpckhqdq  m7, m3
+    punpcklqdq  m4, m3
+    mova        m2, m4
+    paddd       m4, m7                  ; m4 = [EE1 EE0 EE1 EE0]
+    psubd       m2, m7                  ; m2 = [EO1 EO0 EO1 EO0]
+
+    pslld       m4, 6                   ; m4 = [64*EE1 64*EE0]
+    mova        m5, m2
+    pmuludq     m5, [r4 + 0*16]
+    pshufd      m7, m2, 0xF5
+    movu        m6, [r4 + 0*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m5, m5, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m5, m7                  ; m5 = [36*EO1 83*EO0]
+    pshufd      m7, m2, 0xF5
+    pmuludq     m2, [r4 + 1*16]
+    movu        m6, [r4 + 1*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m2, m2, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m2, m7                  ; m2 = [83*EO1 36*EO0]
+
+    pshufd      m3, m4, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m3
+    punpckhqdq  m7, m5
+    punpcklqdq  m3, m5
+    paddd       m3, m7                  ; m3 = [Row2 Row0]
+    paddd       m3, [pd_ %+ DCT8_ROUND2]
+    psrad       m3, DCT8_SHIFT2
+    pshufd      m4, m4, 0xD8
+    pshufd      m2, m2, 0xD8
+    mova        m7, m4
+    punpckhqdq  m7, m2
+    punpcklqdq  m4, m2
+    psubd       m4, m7                  ; m4 = [Row6 Row4]
+    paddd       m4, [pd_ %+ DCT8_ROUND2]
+    psrad       m4, DCT8_SHIFT2
+
+    packssdw    m3, m3
+    movd        [r1 + 0*mmsize], m3
+    pshufd      m3, m3, 1
+    movd        [r1 + 2*mmsize], m3
+
+    packssdw    m4, m4
+    movd        [r1 + 4*mmsize], m4
+    pshufd      m4, m4, 1
+    movd        [r1 + 6*mmsize], m4
+
+    ; odd
+    mova        m2, m0
+    pmuludq     m2, [r4 + 2*16]
+    pshufd      m7, m0, 0xF5
+    movu        m6, [r4 + 2*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m2, m2, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m2, m7
+    mova        m3, m1
+    pmuludq     m3, [r4 + 2*16]
+    pshufd      m7, m1, 0xF5
+    pmuludq     m7, m6
+    pshufd      m3, m3, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m3, m7
+    mova        m4, m0
+    pmuludq     m4, [r4 + 3*16]
+    pshufd      m7, m0, 0xF5
+    movu        m6, [r4 + 3*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m4, m4, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m4, m7
+    mova        m5, m1
+    pmuludq     m5, [r4 + 3*16]
+    pshufd      m7, m1, 0xF5
+    pmuludq     m7, m6
+    pshufd      m5, m5, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m5, m7
+    pshufd      m2, m2, 0xD8
+    pshufd      m3, m3, 0xD8
+    mova        m7, m2
+    punpckhqdq  m7, m3
+    punpcklqdq  m2, m3
+    paddd       m2, m7
+    pshufd      m4, m4, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m4
+    punpckhqdq  m7, m5
+    punpcklqdq  m4, m5
+    paddd       m4, m7
+    pshufd      m2, m2, 0xD8
+    pshufd      m4, m4, 0xD8
+    mova        m7, m2
+    punpckhqdq  m7, m4
+    punpcklqdq  m2, m4
+    paddd       m2, m7                  ; m2 = [Row3 Row1]
+    paddd       m2, [pd_ %+ DCT8_ROUND2]
+    psrad       m2, DCT8_SHIFT2
+
+    packssdw    m2, m2
+    movd        [r1 + 1*mmsize], m2
+    pshufd      m2, m2, 1
+    movd        [r1 + 3*mmsize], m2
+
+    mova        m2, m0
+    pmuludq     m2, [r4 + 4*16]
+    pshufd      m7, m0, 0xF5
+    movu        m6, [r4 + 4*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m2, m2, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m2, m7
+    mova        m3, m1
+    pmuludq     m3, [r4 + 4*16]
+    pshufd      m7, m1, 0xF5
+    pmuludq     m7, m6
+    pshufd      m3, m3, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m3, m7
+    mova        m4, m0
+    pmuludq     m4, [r4 + 5*16]
+    pshufd      m7, m0, 0xF5
+    movu        m6, [r4 + 5*16 + 4]
+    pmuludq     m7, m6
+    pshufd      m4, m4, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m4, m7
+    mova        m5, m1
+    pmuludq     m5, [r4 + 5*16]
+    pshufd      m7, m1, 0xF5
+    pmuludq     m7, m6
+    pshufd      m5, m5, 0x88
+    pshufd      m7, m7, 0x88
+    punpckldq   m5, m7
+    pshufd      m2, m2, 0xD8
+    pshufd      m3, m3, 0xD8
+    mova        m7, m2
+    punpckhqdq  m7, m3
+    punpcklqdq  m2, m3
+    paddd       m2, m7
+    pshufd      m4, m4, 0xD8
+    pshufd      m5, m5, 0xD8
+    mova        m7, m4
+    punpckhqdq  m7, m5
+    punpcklqdq  m4, m5
+    paddd       m4, m7
+    pshufd      m2, m2, 0xD8
+    pshufd      m4, m4, 0xD8
+    mova        m7, m2
+    punpckhqdq  m7, m4
+    punpcklqdq  m2, m4
+    paddd       m2, m7                  ; m2 = [Row7 Row5]
+    paddd       m2, [pd_ %+ DCT8_ROUND2]
+    psrad       m2, DCT8_SHIFT2
+
+    packssdw    m2, m2
+    movd        [r1 + 5*mmsize], m2
+    pshufd      m2, m2, 1
+    movd        [r1 + 7*mmsize], m2
+%if x < 3
+    add         r1, mmsize/4
+    add         r0, 2*2*mmsize
+%endif
+%assign x x+1
+%endrep
+
+    RET
+
+;-------------------------------------------------------
+; void dct8(const int16_t* src, int16_t* dst, intptr_t srcStride)
+;-------------------------------------------------------
+INIT_XMM sse4
+cglobal dct8, 3,6,7,0-16*mmsize
+    ;------------------------
+    ; Stack Mapping(dword)
+    ;------------------------
+    ; Row0[0-3] Row1[0-3]
+    ; ...
+    ; Row6[0-3] Row7[0-3]
+    ; Row0[0-3] Row7[0-3]
+    ; ...
+    ; Row6[4-7] Row7[4-7]
+    ;------------------------
+    mova        m6, [pd_ %+ DCT8_ROUND1]
+
+    add         r2, r2
+    lea         r3, [r2 * 3]
+    mov         r5, rsp
+%assign x 0
+%rep 2
+    movu        m0, [r0]
+    movu        m1, [r0 + r2]
+    movu        m2, [r0 + r2 * 2]
+    movu        m3, [r0 + r3]
+
+    punpcklwd   m4, m0, m1
+    punpckhwd   m0, m1
+    punpcklwd   m5, m2, m3
+    punpckhwd   m2, m3
+    punpckldq   m1, m4, m5          ; m1 = [1 0]
+    punpckhdq   m4, m5              ; m4 = [3 2]
+    punpckldq   m3, m0, m2
+    punpckhdq   m0, m2
+    pshufd      m2, m3, 0x4E        ; m2 = [4 5]
+    pshufd      m0, m0, 0x4E        ; m0 = [6 7]
+
+    paddw       m3, m1, m0
+    psubw       m1, m0              ; m1 = [d1 d0]
+    paddw       m0, m4, m2
+    psubw       m4, m2              ; m4 = [d3 d2]
+    punpcklqdq  m2, m3, m0          ; m2 = [s2 s0]
+    punpckhqdq  m3, m0
+    pshufd      m3, m3, 0x4E        ; m3 = [s1 s3]
+
+    punpcklwd   m0, m1, m4          ; m0 = [d2/d0]
+    punpckhwd   m1, m4              ; m1 = [d3/d1]
+    punpckldq   m4, m0, m1          ; m4 = [d3 d1 d2 d0]
+    punpckhdq   m0, m1              ; m0 = [d3 d1 d2 d0]
+
+    ; odd
+    lea         r4, [tab_dct8_1]
+    pmaddwd     m1, m4, [r4 + 0*16]
+    pmaddwd     m5, m0, [r4 + 0*16]
+    phaddd      m1, m5
+    paddd       m1, m6
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 1*2*mmsize], m1 ; Row 1
+
+    pmaddwd     m1, m4, [r4 + 1*16]
+    pmaddwd     m5, m0, [r4 + 1*16]
+    phaddd      m1, m5
+    paddd       m1, m6
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 3*2*mmsize], m1 ; Row 3
+
+    pmaddwd     m1, m4, [r4 + 2*16]
+    pmaddwd     m5, m0, [r4 + 2*16]
+    phaddd      m1, m5
+    paddd       m1, m6
+    psrad       m1, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m1, m1, 0x1B
+  %endif
+    mova        [r5 + 5*2*mmsize], m1 ; Row 5
+
+    pmaddwd     m4, [r4 + 3*16]
+    pmaddwd     m0, [r4 + 3*16]
+    phaddd      m4, m0
+    paddd       m4, m6
+    psrad       m4, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m4, m4, 0x1B
+  %endif
+    mova        [r5 + 7*2*mmsize], m4; Row 7
+
+    ; even
+    lea         r4, [tab_dct4]
+    paddw       m0, m2, m3          ; m0 = [EE1 EE0]
+    pshufb      m0, [pb_unpackhlw1]
+    psubw       m2, m3              ; m2 = [EO1 EO0]
+    psignw      m2, [pw_ppppmmmm]
+    pshufb      m2, [pb_unpackhlw1]
+    pmaddwd     m3, m0, [r4 + 0*16]
+    paddd       m3, m6
+    psrad       m3, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m3, m3, 0x1B
+  %endif
+    mova        [r5 + 0*2*mmsize], m3 ; Row 0
+    pmaddwd     m0, [r4 + 2*16]
+    paddd       m0, m6
+    psrad       m0, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m0, m0, 0x1B
+  %endif
+    mova        [r5 + 4*2*mmsize], m0 ; Row 4
+    pmaddwd     m3, m2, [r4 + 1*16]
+    paddd       m3, m6
+    psrad       m3, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m3, m3, 0x1B
+  %endif
+    mova        [r5 + 2*2*mmsize], m3 ; Row 2
+    pmaddwd     m2, [r4 + 3*16]
+    paddd       m2, m6
+    psrad       m2, DCT8_SHIFT1
+  %if x == 1
+    pshufd      m2, m2, 0x1B
+  %endif
+    mova        [r5 + 6*2*mmsize], m2 ; Row 6
+
+  %if x != 1
+    lea         r0, [r0 + r2 * 4]
+    add         r5, mmsize
+  %endif
+%assign x x+1
+%endrep
+
+    mov         r2, 2
+    mov         r0, rsp                 ; r0 = pointer to Low Part
+    lea         r4, [tab_dct8_2]
+    mova        m6, [pd_256]
+
+.pass2:
+%rep 2
+    mova        m0, [r0 + 0*2*mmsize]     ; [3 2 1 0]
+    mova        m1, [r0 + 1*2*mmsize]
+    paddd       m2, m0, [r0 + (0*2+1)*mmsize]
+    pshufd      m2, m2, 0x9C            ; m2 = [s2 s1 s3 s0]
+    paddd       m3, m1, [r0 + (1*2+1)*mmsize]
+    pshufd      m3, m3, 0x9C            ; m3 = ^^
+    psubd       m0, [r0 + (0*2+1)*mmsize]     ; m0 = [d3 d2 d1 d0]
+    psubd       m1, [r0 + (1*2+1)*mmsize]     ; m1 = ^^
+
+    ; even
+    phaddd      m4, m2, m3              ; m4 = [EE1 EE0 EE1 EE0]
+    phsubd      m2, m3                  ; m2 = [EO1 EO0 EO1 EO0]
+
+    pslld       m4, 6                   ; m4 = [64*EE1 64*EE0]
+    pmulld      m5, m2, [r4 + 0*16]     ; m5 = [36*EO1 83*EO0]
+    pmulld      m2, [r4 + 1*16]         ; m2 = [83*EO1 36*EO0]
+
+    phaddd      m3, m4, m5              ; m3 = [Row2 Row0]
+    paddd       m3, m6
+    psrad       m3, 9
+    phsubd      m4, m2                  ; m4 = [Row6 Row4]
+    paddd       m4, m6
+    psrad       m4, 9
+
+    packssdw    m3, m3
+    movd        [r1 + 0*mmsize], m3
+    pshufd      m3, m3, 1
+    movd        [r1 + 2*mmsize], m3
+
+    packssdw    m4, m4
+    movd        [r1 + 4*mmsize], m4
+    pshufd      m4, m4, 1
+    movd        [r1 + 6*mmsize], m4
+
+    ; odd
+    pmulld      m2, m0, [r4 + 2*16]
+    pmulld      m3, m1, [r4 + 2*16]
+    pmulld      m4, m0, [r4 + 3*16]
+    pmulld      m5, m1, [r4 + 3*16]
+    phaddd      m2, m3
+    phaddd      m4, m5
+    phaddd      m2, m4                  ; m2 = [Row3 Row1]
+    paddd       m2, m6
+    psrad       m2, 9
+
+    packssdw    m2, m2
+    movd        [r1 + 1*mmsize], m2
+    pshufd      m2, m2, 1
+    movd        [r1 + 3*mmsize], m2
+
+    pmulld      m2, m0, [r4 + 4*16]
+    pmulld      m3, m1, [r4 + 4*16]
+    pmulld      m4, m0, [r4 + 5*16]
+    pmulld      m5, m1, [r4 + 5*16]
+    phaddd      m2, m3
+    phaddd      m4, m5
+    phaddd      m2, m4                  ; m2 = [Row7 Row5]
+    paddd       m2, m6
+    psrad       m2, 9
+
+    packssdw    m2, m2
+    movd        [r1 + 5*mmsize], m2
+    pshufd      m2, m2, 1
+    movd        [r1 + 7*mmsize], m2
+
+    add         r1, mmsize/4
+    add         r0, 2*2*mmsize
+%endrep
+
+    dec         r2
+    jnz        .pass2
+    RET
+
+;-------------------------------------------------------
+; void idct8(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+%if ARCH_X86_64
+INIT_XMM sse2
+cglobal idct8, 3, 6, 16, 0-5*mmsize
+    mova        m9, [r0 + 1 * mmsize]
+    mova        m1, [r0 + 3 * mmsize]
+    mova        m7, m9
+    punpcklwd   m7, m1
+    punpckhwd   m9, m1
+    mova        m14, [tab_idct8_3]
+    mova        m3, m14
+    pmaddwd     m14, m7
+    pmaddwd     m3, m9
+    mova        m0, [r0 + 5 * mmsize]
+    mova        m10, [r0 + 7 * mmsize]
+    mova        m2, m0
+    punpcklwd   m2, m10
+    punpckhwd   m0, m10
+    mova        m15, [tab_idct8_3 + 1 * mmsize]
+    mova        m11, [tab_idct8_3 + 1 * mmsize]
+    pmaddwd     m15, m2
+    mova        m4, [tab_idct8_3 + 2 * mmsize]
+    pmaddwd     m11, m0
+    mova        m1, [tab_idct8_3 + 2 * mmsize]
+    paddd       m15, m14
+    mova        m5, [tab_idct8_3 + 4 * mmsize]
+    mova        m12, [tab_idct8_3 + 4 * mmsize]
+    paddd       m11, m3
+    mova        [rsp + 0 * mmsize], m11
+    mova        [rsp + 1 * mmsize], m15
+    pmaddwd     m4, m7
+    pmaddwd     m1, m9
+    mova        m14, [tab_idct8_3 + 3 * mmsize]
+    mova        m3, [tab_idct8_3 + 3 * mmsize]
+    pmaddwd     m14, m2
+    pmaddwd     m3, m0
+    paddd       m14, m4
+    paddd       m3, m1
+    mova        [rsp + 2 * mmsize], m3
+    pmaddwd     m5, m9
+    pmaddwd     m9, [tab_idct8_3 + 6 * mmsize]
+    mova        m6, [tab_idct8_3 + 5 * mmsize]
+    pmaddwd     m12, m7
+    pmaddwd     m7, [tab_idct8_3 + 6 * mmsize]
+    mova        m4, [tab_idct8_3 + 5 * mmsize]
+    pmaddwd     m6, m2
+    paddd       m6, m12
+    pmaddwd     m2, [tab_idct8_3 + 7 * mmsize]
+    paddd       m7, m2
+    mova        [rsp + 3 * mmsize], m6
+    pmaddwd     m4, m0
+    pmaddwd     m0, [tab_idct8_3 + 7 * mmsize]
+    paddd       m9, m0
+    paddd       m5, m4
+    mova        m6, [r0 + 0 * mmsize]
+    mova        m0, [r0 + 4 * mmsize]
+    mova        m4, m6
+    punpcklwd   m4, m0
+    punpckhwd   m6, m0
+    mova        m12, [r0 + 2 * mmsize]
+    mova        m0, [r0 + 6 * mmsize]
+    mova        m13, m12
+    mova        m8, [tab_dct4]
+    punpcklwd   m13, m0
+    mova        m10, [tab_dct4]
+    punpckhwd   m12, m0
+    pmaddwd     m8, m4
+    mova        m3, m8
+    pmaddwd     m4, [tab_dct4 + 2 * mmsize]
+    pmaddwd     m10, m6
+    mova        m2, [tab_dct4 + 1 * mmsize]
+    mova        m1, m10
+    pmaddwd     m6, [tab_dct4 + 2 * mmsize]
+    mova        m0, [tab_dct4 + 1 * mmsize]
+    pmaddwd     m2, m13
+    paddd       m3, m2
+    psubd       m8, m2
+    mova        m2, m6
+    pmaddwd     m13, [tab_dct4 + 3 * mmsize]
+    pmaddwd     m0, m12
+    paddd       m1, m0
+    psubd       m10, m0
+    mova        m0, m4
+    pmaddwd     m12, [tab_dct4 + 3 * mmsize]
+    paddd       m3, [pd_64]
+    paddd       m1, [pd_64]
+    paddd       m8, [pd_64]
+    paddd       m10, [pd_64]
+    paddd       m0, m13
+    paddd       m2, m12
+    paddd       m0, [pd_64]
+    paddd       m2, [pd_64]
+    psubd       m4, m13
+    psubd       m6, m12
+    paddd       m4, [pd_64]
+    paddd       m6, [pd_64]
+    mova        m12, m8
+    psubd       m8, m7
+    psrad       m8, 7
+    paddd       m15, m3
+    psubd       m3, [rsp + 1 * mmsize]
+    psrad       m15, 7
+    paddd       m12, m7
+    psrad       m12, 7
+    paddd       m11, m1
+    mova        m13, m14
+    psrad       m11, 7
+    packssdw    m15, m11
+    psubd       m1, [rsp + 0 * mmsize]
+    psrad       m1, 7
+    mova        m11, [rsp + 2 * mmsize]
+    paddd       m14, m0
+    psrad       m14, 7
+    psubd       m0, m13
+    psrad       m0, 7
+    paddd       m11, m2
+    mova        m13, [rsp + 3 * mmsize]
+    psrad       m11, 7
+    packssdw    m14, m11
+    mova        m11, m6
+    psubd       m6, m5
+    paddd       m13, m4
+    psrad       m13, 7
+    psrad       m6, 7
+    paddd       m11, m5
+    psrad       m11, 7
+    packssdw    m13, m11
+    mova        m11, m10
+    psubd       m4, [rsp + 3 * mmsize]
+    psubd       m10, m9
+    psrad       m4, 7
+    psrad       m10, 7
+    packssdw    m4, m6
+    packssdw    m8, m10
+    paddd       m11, m9
+    psrad       m11, 7
+    packssdw    m12, m11
+    psubd       m2, [rsp + 2 * mmsize]
+    mova        m5, m15
+    psrad       m2, 7
+    packssdw    m0, m2
+    mova        m2, m14
+    psrad       m3, 7
+    packssdw    m3, m1
+    mova        m6, m13
+    punpcklwd   m5, m8
+    punpcklwd   m2, m4
+    mova        m1, m12
+    punpcklwd   m6, m0
+    punpcklwd   m1, m3
+    mova        m9, m5
+    punpckhwd   m13, m0
+    mova        m0, m2
+    punpcklwd   m9, m6
+    punpckhwd   m5, m6
+    punpcklwd   m0, m1
+    punpckhwd   m2, m1
+    punpckhwd   m15, m8
+    mova        m1, m5
+    punpckhwd   m14, m4
+    punpckhwd   m12, m3
+    mova        m6, m9
+    punpckhwd   m9, m0
+    punpcklwd   m1, m2
+    mova        m4, [tab_idct8_3 + 0 * mmsize]
+    punpckhwd   m5, m2
+    punpcklwd   m6, m0
+    mova        m2, m15
+    mova        m0, m14
+    mova        m7, m9
+    punpcklwd   m2, m13
+    punpcklwd   m0, m12
+    punpcklwd   m7, m5
+    punpckhwd   m14, m12
+    mova        m10, m2
+    punpckhwd   m15, m13
+    punpckhwd   m9, m5
+    pmaddwd     m4, m7
+    mova        m13, m1
+    punpckhwd   m2, m0
+    punpcklwd   m10, m0
+    mova        m0, m15
+    punpckhwd   m15, m14
+    mova        m12, m1
+    mova        m3, [tab_idct8_3 + 0 * mmsize]
+    punpcklwd   m0, m14
+    pmaddwd     m3, m9
+    mova        m11, m2
+    punpckhwd   m2, m15
+    punpcklwd   m11, m15
+    mova        m8, [tab_idct8_3 + 1 * mmsize]
+    punpcklwd   m13, m0
+    punpckhwd   m12, m0
+    pmaddwd     m8, m11
+    paddd       m8, m4
+    mova        [rsp + 4 * mmsize], m8
+    mova        m4, [tab_idct8_3 + 2 * mmsize]
+    pmaddwd     m4, m7
+    mova        m15, [tab_idct8_3 + 2 * mmsize]
+    mova        m5, [tab_idct8_3 + 1 * mmsize]
+    pmaddwd     m15, m9
+    pmaddwd     m5, m2
+    paddd       m5, m3
+    mova        [rsp + 3 * mmsize], m5
+    mova        m14, [tab_idct8_3 + 3 * mmsize]
+    mova        m5, [tab_idct8_3 + 3 * mmsize]
+    pmaddwd     m14, m11
+    paddd       m14, m4
+    mova        [rsp + 2 * mmsize], m14
+    pmaddwd     m5, m2
+    paddd       m5, m15
+    mova        [rsp + 1 * mmsize], m5
+    mova        m15, [tab_idct8_3 + 4 * mmsize]
+    mova        m5, [tab_idct8_3 + 4 * mmsize]
+    pmaddwd     m15, m7
+    pmaddwd     m7, [tab_idct8_3 + 6 * mmsize]
+    pmaddwd     m5, m9
+    pmaddwd     m9, [tab_idct8_3 + 6 * mmsize]
+    mova        m4, [tab_idct8_3 + 5 * mmsize]
+    pmaddwd     m4, m2
+    paddd       m5, m4
+    mova        m4, m6
+    mova        m8, [tab_idct8_3 + 5 * mmsize]
+    punpckhwd   m6, m10
+    pmaddwd     m2, [tab_idct8_3 + 7 * mmsize]
+    punpcklwd   m4, m10
+    paddd       m9, m2
+    pmaddwd     m8, m11
+    mova        m10, [tab_dct4]
+    paddd       m8, m15
+    pmaddwd     m11, [tab_idct8_3 + 7 * mmsize]
+    paddd       m7, m11
+    mova        [rsp + 0 * mmsize], m8
+    pmaddwd     m10, m6
+    pmaddwd     m6, [tab_dct4 + 2 * mmsize]
+    mova        m1, m10
+    mova        m8, [tab_dct4]
+    mova        m3, [tab_dct4 + 1 * mmsize]
+    pmaddwd     m8, m4
+    pmaddwd     m4, [tab_dct4 + 2 * mmsize]
+    mova        m0, m8
+    mova        m2, [tab_dct4 + 1 * mmsize]
+    pmaddwd     m3, m13
+    psubd       m8, m3
+    paddd       m0, m3
+    mova        m3, m6
+    pmaddwd     m13, [tab_dct4 + 3 * mmsize]
+    pmaddwd     m2, m12
+    paddd       m1, m2
+    psubd       m10, m2
+    mova        m2, m4
+    pmaddwd     m12, [tab_dct4 + 3 * mmsize]
+    mova        m15, [pd_ %+ IDCT_ROUND]
+    paddd       m0, m15
+    paddd       m1, m15
+    paddd       m8, m15
+    paddd       m10, m15
+    paddd       m2, m13
+    paddd       m3, m12
+    paddd       m2, m15
+    paddd       m3, m15
+    psubd       m4, m13
+    psubd       m6, m12
+    paddd       m4, m15
+    paddd       m6, m15
+    mova        m15, [rsp + 4 * mmsize]
+    mova        m12, m8
+    psubd       m8, m7
+    psrad       m8, IDCT_SHIFT
+    mova        m11, [rsp + 3 * mmsize]
+    paddd       m15, m0
+    psrad       m15, IDCT_SHIFT
+    psubd       m0, [rsp + 4 * mmsize]
+    psrad       m0, IDCT_SHIFT
+    paddd       m12, m7
+    paddd       m11, m1
+    mova        m14, [rsp + 2 * mmsize]
+    psrad       m11, IDCT_SHIFT
+    packssdw    m15, m11
+    psubd       m1, [rsp + 3 * mmsize]
+    psrad       m1, IDCT_SHIFT
+    mova        m11, [rsp + 1 * mmsize]
+    paddd       m14, m2
+    psrad       m14, IDCT_SHIFT
+    packssdw    m0, m1
+    psrad       m12, IDCT_SHIFT
+    psubd       m2, [rsp + 2 * mmsize]
+    paddd       m11, m3
+    mova        m13, [rsp + 0 * mmsize]
+    psrad       m11, IDCT_SHIFT
+    packssdw    m14, m11
+    mova        m11, m6
+    psubd       m6, m5
+    paddd       m13, m4
+    psrad       m13, IDCT_SHIFT
+    mova        m1, m15
+    paddd       m11, m5
+    psrad       m11, IDCT_SHIFT
+    packssdw    m13, m11
+    mova        m11, m10
+    psubd       m10, m9
+    psrad       m10, IDCT_SHIFT
+    packssdw    m8, m10
+    psrad       m6, IDCT_SHIFT
+    psubd       m4, [rsp + 0 * mmsize]
+    paddd       m11, m9
+    psrad       m11, IDCT_SHIFT
+    packssdw    m12, m11
+    punpcklwd   m1, m14
+    mova        m5, m13
+    psrad       m4, IDCT_SHIFT
+    packssdw    m4, m6
+    psubd       m3, [rsp + 1 * mmsize]
+    psrad       m2, IDCT_SHIFT
+    mova        m6, m8
+    psrad       m3, IDCT_SHIFT
+    punpcklwd   m5, m12
+    packssdw    m2, m3
+    punpcklwd   m6, m4
+    punpckhwd   m8, m4
+    mova        m4, m1
+    mova        m3, m2
+    punpckhdq   m1, m5
+    punpckldq   m4, m5
+    punpcklwd   m3, m0
+    punpckhwd   m2, m0
+    mova        m0, m6
+    lea         r2, [r2 + r2]
+    lea         r4, [r2 + r2]
+    lea         r3, [r4 + r2]
+    lea         r4, [r4 + r3]
+    lea         r0, [r4 + r2 * 2]
+    movq        [r1], m4
+    punpckhwd   m15, m14
+    movhps      [r1 + r2], m4
+    punpckhdq   m0, m3
+    movq        [r1 + r2 * 2], m1
+    punpckhwd   m13, m12
+    movhps      [r1 + r3], m1
+    mova        m1, m6
+    punpckldq   m1, m3
+    movq        [r1 + 8], m1
+    movhps      [r1 + r2 + 8], m1
+    movq        [r1 + r2 * 2 + 8], m0
+    movhps      [r1 + r3 + 8], m0
+    mova        m0, m15
+    punpckhdq   m15, m13
+    punpckldq   m0, m13
+    movq        [r1 + r2 * 4], m0
+    movhps      [r1 + r4], m0
+    mova        m0, m8
+    punpckhdq   m8, m2
+    movq        [r1 + r3 * 2], m15
+    punpckldq   m0, m2
+    movhps      [r1 + r0], m15
+    movq        [r1 + r2 * 4 + 8], m0
+    movhps      [r1 + r4 + 8], m0
+    movq        [r1 + r3 * 2 + 8], m8
+    movhps      [r1 + r0 + 8], m8
+    RET
+%endif
+
+;-------------------------------------------------------
+; void idct8(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+INIT_XMM ssse3
+cglobal patial_butterfly_inverse_internal_pass1
+    movh        m0, [r0]
+    movhps      m0, [r0 + 2 * 16]
+    movh        m1, [r0 + 4 * 16]
+    movhps      m1, [r0 + 6 * 16]
+
+    punpckhwd   m2, m0, m1                  ; [2 6]
+    punpcklwd   m0, m1                      ; [0 4]
+    pmaddwd     m1, m0, [r6]                ; EE[0]
+    pmaddwd     m0, [r6 + 32]               ; EE[1]
+    pmaddwd     m3, m2, [r6 + 16]           ; EO[0]
+    pmaddwd     m2, [r6 + 48]               ; EO[1]
+
+    paddd       m4, m1, m3                  ; E[0]
+    psubd       m1, m3                      ; E[3]
+    paddd       m3, m0, m2                  ; E[1]
+    psubd       m0, m2                      ; E[2]
+
+    ;E[K] = E[k] + add
+    mova        m5, [pd_64]
+    paddd       m0, m5
+    paddd       m1, m5
+    paddd       m3, m5
+    paddd       m4, m5
+
+    movh        m2, [r0 + 16]
+    movhps      m2, [r0 + 5 * 16]
+    movh        m5, [r0 + 3 * 16]
+    movhps      m5, [r0 + 7 * 16]
+    punpcklwd   m6, m2, m5                  ;[1 3]
+    punpckhwd   m2, m5                      ;[5 7]
+
+    pmaddwd     m5, m6, [r4]
+    pmaddwd     m7, m2, [r4 + 16]
+    paddd       m5, m7                      ; O[0]
+
+    paddd       m7, m4, m5
+    psrad       m7, 7
+
+    psubd       m4, m5
+    psrad       m4, 7
+
+    packssdw    m7, m4
+    movh        [r5 + 0 * 16], m7
+    movhps      [r5 + 7 * 16], m7
+
+    pmaddwd     m5, m6, [r4 + 32]
+    pmaddwd     m4, m2, [r4 + 48]
+    paddd       m5, m4                      ; O[1]
+
+    paddd       m4, m3, m5
+    psrad       m4, 7
+
+    psubd       m3, m5
+    psrad       m3, 7
+
+    packssdw    m4, m3
+    movh        [r5 + 1 * 16], m4
+    movhps      [r5 + 6 * 16], m4
+
+    pmaddwd     m5, m6, [r4 + 64]
+    pmaddwd     m4, m2, [r4 + 80]
+    paddd       m5, m4                      ; O[2]
+
+    paddd       m4, m0, m5
+    psrad       m4, 7
+
+    psubd       m0, m5
+    psrad       m0, 7
+
+    packssdw    m4, m0
+    movh        [r5 + 2 * 16], m4
+    movhps      [r5 + 5 * 16], m4
+
+    pmaddwd     m5, m6, [r4 + 96]
+    pmaddwd     m4, m2, [r4 + 112]
+    paddd       m5, m4                      ; O[3]
+
+    paddd       m4, m1, m5
+    psrad       m4, 7
+
+    psubd       m1, m5
+    psrad       m1, 7
+
+    packssdw    m4, m1
+    movh        [r5 + 3 * 16], m4
+    movhps      [r5 + 4 * 16], m4
+
+    ret
+
+%macro PARTIAL_BUTTERFLY_PROCESS_ROW 1
+    pshufb      m4, %1, [pb_idct8even]
+    pmaddwd     m4, [tab_idct8_1]
+    phsubd      m5, m4
+    pshufd      m4, m4, 0x4E
+    phaddd      m4, m4
+    punpckhqdq  m4, m5                      ;m4 = dd e[ 0 1 2 3]
+    paddd       m4, m6
+
+    pshufb      %1, %1, [r6]
+    pmaddwd     m5, %1, [r4]
+    pmaddwd     %1, [r4 + 16]
+    phaddd      m5, %1                      ; m5 = dd O[0, 1, 2, 3]
+
+    paddd       %1, m4, m5
+    psrad       %1, IDCT_SHIFT
+
+    psubd       m4, m5
+    psrad       m4, IDCT_SHIFT
+    pshufd      m4, m4, 0x1B
+
+    packssdw    %1, m4
+%endmacro
+
+INIT_XMM ssse3
+cglobal patial_butterfly_inverse_internal_pass2
+    mova        m0, [r5]
+    PARTIAL_BUTTERFLY_PROCESS_ROW m0
+    movu        [r1], m0
+
+    mova        m2, [r5 + 16]
+    PARTIAL_BUTTERFLY_PROCESS_ROW m2
+    movu        [r1 + r2], m2
+
+    mova        m1, [r5 + 32]
+    PARTIAL_BUTTERFLY_PROCESS_ROW m1
+    movu        [r1 + 2 * r2], m1
+
+    mova        m3, [r5 + 48]
+    PARTIAL_BUTTERFLY_PROCESS_ROW m3
+    movu        [r1 + r3], m3
+    ret
+
+INIT_XMM ssse3
+cglobal idct8, 3,7,8 ;,0-16*mmsize
+    ; alignment stack to 64-bytes
+    mov         r5, rsp
+    sub         rsp, 16*mmsize + gprsize
+    and         rsp, ~(64-1)
+    mov         [rsp + 16*mmsize], r5
+    mov         r5, rsp
+
+    lea         r4, [tab_idct8_3]
+    lea         r6, [tab_dct4]
+
+    call        patial_butterfly_inverse_internal_pass1
+
+    add         r0, 8
+    add         r5, 8
+
+    call        patial_butterfly_inverse_internal_pass1
+
+    mova        m6, [pd_ %+ IDCT_ROUND]
+    add         r2, r2
+    lea         r3, [r2 * 3]
+    lea         r4, [tab_idct8_2]
+    lea         r6, [pb_idct8odd]
+    sub         r5, 8
+
+    call        patial_butterfly_inverse_internal_pass2
+
+    lea         r1, [r1 + 4 * r2]
+    add         r5, 64
+
+    call        patial_butterfly_inverse_internal_pass2
+
+    ; restore origin stack pointer
+    mov         rsp, [rsp + 16*mmsize]
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void denoise_dct(int16_t* dct, uint32_t* sum, uint16_t* offset, int size)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal denoise_dct, 4, 4, 6
+    pxor     m5,  m5
+    shr      r3d, 3
+.loop:
+    mova     m0, [r0]
+    pabsw    m1, m0
+
+    mova     m2, [r1]
+    pmovsxwd m3, m1
+    paddd    m2, m3
+    mova     [r1], m2
+    mova     m2, [r1 + 16]
+    psrldq   m3, m1, 8
+    pmovsxwd m4, m3
+    paddd    m2, m4
+    mova     [r1 + 16], m2
+
+    movu     m3, [r2]
+    psubusw  m1, m3
+    pcmpgtw  m4, m1, m5
+    pand     m1, m4
+    psignw   m1, m0
+    mova     [r0], m1
+    add      r0, 16
+    add      r1, 32
+    add      r2, 16
+    dec      r3d
+    jnz .loop
+    RET
+
+INIT_YMM avx2
+cglobal denoise_dct, 4, 4, 6
+    pxor     m5,  m5
+    shr      r3d, 4
+.loop:
+    movu     m0, [r0]
+    pabsw    m1, m0
+    movu     m2, [r1]
+    pmovsxwd m4, xm1
+    paddd    m2, m4
+    movu     [r1], m2
+    vextracti128 xm4, m1, 1
+    movu     m2, [r1 + 32]
+    pmovsxwd m3, xm4
+    paddd    m2, m3
+    movu     [r1 + 32], m2
+    movu     m3, [r2]
+    psubusw  m1, m3
+    pcmpgtw  m4, m1, m5
+    pand     m1, m4
+    psignw   m1, m0
+    movu     [r0], m1
+    add      r0, 32
+    add      r1, 64
+    add      r2, 32
+    dec      r3d
+    jnz .loop
+    RET
+
+%if ARCH_X86_64 == 1
+%macro DCT8_PASS_1 4
+    vpbroadcastq    m0,                 [r6 + %1]
+    pmaddwd         m2,                 m%3, m0
+    pmaddwd         m0,                 m%4
+    phaddd          m2,                 m0
+    paddd           m2,                 m5
+    psrad           m2,                 DCT_SHIFT
+    packssdw        m2,                 m2
+    vpermq          m2,                 m2, 0x08
+    mova            [r5 + %2],          xm2
+%endmacro
+
+%macro DCT8_PASS_2 2
+    vbroadcasti128  m4,                 [r6 + %1]
+    pmaddwd         m6,                 m0, m4
+    pmaddwd         m7,                 m1, m4
+    pmaddwd         m8,                 m2, m4
+    pmaddwd         m9,                 m3, m4
+    phaddd          m6,                 m7
+    phaddd          m8,                 m9
+    phaddd          m6,                 m8
+    paddd           m6,                 m5
+    psrad           m6,                 DCT_SHIFT2
+
+    vbroadcasti128  m4,                 [r6 + %2]
+    pmaddwd         m10,                m0, m4
+    pmaddwd         m7,                 m1, m4
+    pmaddwd         m8,                 m2, m4
+    pmaddwd         m9,                 m3, m4
+    phaddd          m10,                m7
+    phaddd          m8,                 m9
+    phaddd          m10,                m8
+    paddd           m10,                m5
+    psrad           m10,                DCT_SHIFT2
+
+    packssdw        m6,                 m10
+    vpermq          m10,                m6, 0xD8
+
+%endmacro
+
+INIT_YMM avx2
+cglobal dct8, 3, 7, 11, 0-8*16
+%if BIT_DEPTH == 12
+    %define         DCT_SHIFT          6
+    vbroadcasti128  m5,                [pd_16]
+%elif BIT_DEPTH == 10
+    %define         DCT_SHIFT          4
+    vbroadcasti128  m5,                [pd_8]
+%elif BIT_DEPTH == 8
+    %define         DCT_SHIFT          2
+    vbroadcasti128  m5,                [pd_2]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             DCT_SHIFT2         9
+
+    add             r2d,               r2d
+    lea             r3,                [r2 * 3]
+    lea             r4,                [r0 + r2 * 4]
+    mov             r5,                rsp
+    lea             r6,                [tab_dct8]
+    mova            m6,                [dct8_shuf]
+
+    ;pass1
+    mova            xm0,               [r0]
+    vinserti128     m0,                m0, [r4], 1
+    mova            xm1,               [r0 + r2]
+    vinserti128     m1,                m1, [r4 + r2], 1
+    mova            xm2,               [r0 + r2 * 2]
+    vinserti128     m2,                m2, [r4 + r2 * 2], 1
+    mova            xm3,               [r0 + r3]
+    vinserti128     m3,                m3,  [r4 + r3], 1
+
+    punpcklqdq      m4,                m0, m1
+    punpckhqdq      m0,                m1
+    punpcklqdq      m1,                m2, m3
+    punpckhqdq      m2,                m3
+
+    pshufb          m0,                m6
+    pshufb          m2,                m6
+
+    paddw           m3,                m4, m0
+    paddw           m7,                m1, m2
+
+    psubw           m4,                m0
+    psubw           m1,                m2
+
+    DCT8_PASS_1     0 * 16,             0 * 16, 3, 7
+    DCT8_PASS_1     1 * 16,             2 * 16, 4, 1
+    DCT8_PASS_1     2 * 16,             4 * 16, 3, 7
+    DCT8_PASS_1     3 * 16,             6 * 16, 4, 1
+    DCT8_PASS_1     4 * 16,             1 * 16, 3, 7
+    DCT8_PASS_1     5 * 16,             3 * 16, 4, 1
+    DCT8_PASS_1     6 * 16,             5 * 16, 3, 7
+    DCT8_PASS_1     7 * 16,             7 * 16, 4, 1
+
+    ;pass2
+    vbroadcasti128  m5,                [pd_256]
+
+    mova            m0,                [r5]
+    mova            m1,                [r5 + 32]
+    mova            m2,                [r5 + 64]
+    mova            m3,                [r5 + 96]
+
+    DCT8_PASS_2     0 * 16, 1 * 16
+    movu            [r1],              m10
+    DCT8_PASS_2     2 * 16, 3 * 16
+    movu            [r1 + 32],         m10
+    DCT8_PASS_2     4 * 16, 5 * 16
+    movu            [r1 + 64],         m10
+    DCT8_PASS_2     6 * 16, 7 * 16
+    movu            [r1 + 96],         m10
+    RET
+
+%macro DCT16_PASS_1_E 2
+    vpbroadcastq    m7,                [r7 + %1]
+
+    pmaddwd         m4,                m0, m7
+    pmaddwd         m6,                m2, m7
+    phaddd          m4,                m6
+
+    paddd           m4,                m9
+    psrad           m4,                DCT_SHIFT
+
+    packssdw        m4,                m4
+    vpermq          m4,                m4, 0x08
+
+    mova            [r5 + %2],         xm4
+%endmacro
+
+%macro DCT16_PASS_1_O 2
+    vbroadcasti128  m7,                [r7 + %1]
+
+    pmaddwd         m10,               m0, m7
+    pmaddwd         m11,               m2, m7
+    phaddd          m10,               m11                 ; [d0 d0 d1 d1 d4 d4 d5 d5]
+
+    pmaddwd         m11,               m4, m7
+    pmaddwd         m12,               m6, m7
+    phaddd          m11,               m12                 ; [d2 d2 d3 d3 d6 d6 d7 d7]
+
+    phaddd          m10,               m11                 ; [d0 d1 d2 d3 d4 d5 d6 d7]
+
+    paddd           m10,               m9
+    psrad           m10,               DCT_SHIFT
+
+    packssdw        m10,               m10                 ; [w0 w1 w2 w3 - - - - w4 w5 w6 w7 - - - -]
+    vpermq          m10,               m10, 0x08
+
+    mova            [r5 + %2],         xm10
+%endmacro
+
+%macro DCT16_PASS_2 2
+    vbroadcasti128  m8,                [r7 + %1]
+    vbroadcasti128  m13,               [r8 + %1]
+
+    pmaddwd         m10,               m0, m8
+    pmaddwd         m11,               m1, m13
+    paddd           m10,               m11
+
+    pmaddwd         m11,               m2, m8
+    pmaddwd         m12,               m3, m13
+    paddd           m11,               m12
+    phaddd          m10,               m11
+
+    pmaddwd         m11,               m4, m8
+    pmaddwd         m12,               m5, m13
+    paddd           m11,               m12
+
+    pmaddwd         m12,               m6, m8
+    pmaddwd         m13,               m7, m13
+    paddd           m12,               m13
+    phaddd          m11,               m12
+
+    phaddd          m10,               m11
+    paddd           m10,               m9
+    psrad           m10,               DCT_SHIFT2
+
+
+    vbroadcasti128  m8,                [r7 + %2]
+    vbroadcasti128  m13,               [r8 + %2]
+
+    pmaddwd         m14,               m0, m8
+    pmaddwd         m11,               m1, m13
+    paddd           m14,               m11
+
+    pmaddwd         m11,               m2, m8
+    pmaddwd         m12,               m3, m13
+    paddd           m11,               m12
+    phaddd          m14,               m11
+
+    pmaddwd         m11,               m4, m8
+    pmaddwd         m12,               m5, m13
+    paddd           m11,               m12
+
+    pmaddwd         m12,               m6, m8
+    pmaddwd         m13,               m7, m13
+    paddd           m12,               m13
+    phaddd          m11,               m12
+
+    phaddd          m14,               m11
+    paddd           m14,               m9
+    psrad           m14,               DCT_SHIFT2
+
+    packssdw        m10,               m14
+    vextracti128    xm14,              m10,       1
+    movlhps         xm15,              xm10,      xm14
+    movhlps         xm14,              xm10
+%endmacro
+INIT_YMM avx2
+cglobal dct16, 3, 9, 16, 0-16*mmsize
+%if BIT_DEPTH == 12
+    %define         DCT_SHIFT          7
+    vbroadcasti128  m9,                [pd_64]
+%elif BIT_DEPTH == 10
+    %define         DCT_SHIFT          5
+    vbroadcasti128  m9,                [pd_16]
+%elif BIT_DEPTH == 8
+    %define         DCT_SHIFT          3
+    vbroadcasti128  m9,                [pd_4]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             DCT_SHIFT2         10
+
+    add             r2d,               r2d
+
+    mova            m13,               [dct16_shuf1]
+    mova            m14,               [dct16_shuf2]
+    lea             r7,                [tab_dct16_1 + 8 * 16]
+    lea             r8,                [tab_dct16_2 + 8 * 16]
+    lea             r3,                [r2 * 3]
+    mov             r5,                rsp
+    mov             r4d,               2                   ; Each iteration process 8 rows, so 16/8 iterations
+
+.pass1:
+    lea             r6,                [r0 + r2 * 4]
+
+    movu            m2,                [r0]
+    movu            m1,                [r6]
+    vperm2i128      m0,                m2, m1, 0x20        ; [row0lo  row4lo]
+    vperm2i128      m1,                m2, m1, 0x31        ; [row0hi  row4hi]
+
+    movu            m4,                [r0 + r2]
+    movu            m3,                [r6 + r2]
+    vperm2i128      m2,                m4, m3, 0x20        ; [row1lo  row5lo]
+    vperm2i128      m3,                m4, m3, 0x31        ; [row1hi  row5hi]
+
+    movu            m6,                [r0 + r2 * 2]
+    movu            m5,                [r6 + r2 * 2]
+    vperm2i128      m4,                m6, m5, 0x20        ; [row2lo  row6lo]
+    vperm2i128      m5,                m6, m5, 0x31        ; [row2hi  row6hi]
+
+    movu            m8,                [r0 + r3]
+    movu            m7,                [r6 + r3]
+    vperm2i128      m6,                m8, m7, 0x20        ; [row3lo  row7lo]
+    vperm2i128      m7,                m8, m7, 0x31        ; [row3hi  row7hi]
+
+    pshufb          m1,                m13
+    pshufb          m3,                m13
+    pshufb          m5,                m13
+    pshufb          m7,                m13
+
+    paddw           m8,                m0, m1              ;E
+    psubw           m0,                m1                  ;O
+
+    paddw           m1,                m2, m3              ;E
+    psubw           m2,                m3                  ;O
+
+    paddw           m3,                m4, m5              ;E
+    psubw           m4,                m5                  ;O
+
+    paddw           m5,                m6, m7              ;E
+    psubw           m6,                m7                  ;O
+
+    DCT16_PASS_1_O  -7 * 16,           1 * 32
+    DCT16_PASS_1_O  -5 * 16,           3 * 32
+    DCT16_PASS_1_O  -3 * 16,           1 * 32 + 16
+    DCT16_PASS_1_O  -1 * 16,           3 * 32 + 16
+    DCT16_PASS_1_O  1 * 16,            5 * 32
+    DCT16_PASS_1_O  3 * 16,            7 * 32
+    DCT16_PASS_1_O  5 * 16,            5 * 32 + 16
+    DCT16_PASS_1_O  7 * 16,            7 * 32 + 16
+
+    pshufb          m8,                m14
+    pshufb          m1,                m14
+    phaddw          m0,                m8, m1
+
+    pshufb          m3,                m14
+    pshufb          m5,                m14
+    phaddw          m2,                m3, m5
+
+    DCT16_PASS_1_E  -8 * 16,           0 * 32
+    DCT16_PASS_1_E  -4 * 16,           0 * 32 + 16
+    DCT16_PASS_1_E  0 * 16,            4 * 32
+    DCT16_PASS_1_E  4 * 16,            4 * 32 + 16
+
+    phsubw          m0,                m8, m1
+    phsubw          m2,                m3, m5
+
+    DCT16_PASS_1_E  -6 * 16,           2 * 32
+    DCT16_PASS_1_E  -2 * 16,           2 * 32 + 16
+    DCT16_PASS_1_E  2 * 16,            6 * 32
+    DCT16_PASS_1_E  6 * 16,            6 * 32 + 16
+
+    lea             r0,                [r0 + 8 * r2]
+    add             r5,                256
+
+    dec             r4d
+    jnz             .pass1
+
+    mov             r5,                rsp
+    mov             r4d,               2
+    mov             r2d,               32
+    lea             r3,                [r2 * 3]
+    vbroadcasti128  m9,                [pd_512]
+
+.pass2:
+    mova            m0,                [r5 + 0 * 32]        ; [row0lo  row4lo]
+    mova            m1,                [r5 + 8 * 32]        ; [row0hi  row4hi]
+
+    mova            m2,                [r5 + 1 * 32]        ; [row1lo  row5lo]
+    mova            m3,                [r5 + 9 * 32]        ; [row1hi  row5hi]
+
+    mova            m4,                [r5 + 2 * 32]        ; [row2lo  row6lo]
+    mova            m5,                [r5 + 10 * 32]       ; [row2hi  row6hi]
+
+    mova            m6,                [r5 + 3 * 32]        ; [row3lo  row7lo]
+    mova            m7,                [r5 + 11 * 32]       ; [row3hi  row7hi]
+
+    DCT16_PASS_2    -8 * 16, -7 * 16
+    movu            [r1],              xm15
+    movu            [r1 + r2],         xm14
+
+    DCT16_PASS_2    -6 * 16, -5 * 16
+    movu            [r1 + r2 * 2],     xm15
+    movu            [r1 + r3],         xm14
+
+    lea             r6,                [r1 + r2 * 4]
+    DCT16_PASS_2    -4 * 16, -3 * 16
+    movu            [r6],              xm15
+    movu            [r6 + r2],         xm14
+
+    DCT16_PASS_2    -2 * 16, -1 * 16
+    movu            [r6 + r2 * 2],     xm15
+    movu            [r6 + r3],         xm14
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT16_PASS_2    0 * 16, 1 * 16
+    movu            [r6],              xm15
+    movu            [r6 + r2],         xm14
+
+    DCT16_PASS_2    2 * 16, 3 * 16
+    movu            [r6 + r2 * 2],     xm15
+    movu            [r6 + r3],         xm14
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT16_PASS_2    4 * 16, 5 * 16
+    movu            [r6],              xm15
+    movu            [r6 + r2],         xm14
+
+    DCT16_PASS_2    6 * 16, 7 * 16
+    movu            [r6 + r2 * 2],     xm15
+    movu            [r6 + r3],         xm14
+
+    add             r1,                16
+    add             r5,                128
+
+    dec             r4d
+    jnz             .pass2
+    RET
+
+%macro DCT32_PASS_1 4
+    vbroadcasti128  m8,                [r7 + %1]
+
+    pmaddwd         m11,               m%3, m8
+    pmaddwd         m12,               m%4, m8
+    phaddd          m11,               m12
+
+    vbroadcasti128  m8,                [r7 + %1 + 32]
+    vbroadcasti128  m10,               [r7 + %1 + 48]
+    pmaddwd         m12,               m5, m8
+    pmaddwd         m13,               m6, m10
+    phaddd          m12,               m13
+
+    pmaddwd         m13,               m4, m8
+    pmaddwd         m14,               m7, m10
+    phaddd          m13,               m14
+
+    phaddd          m12,               m13
+
+    phaddd          m11,               m12
+    paddd           m11,               m9
+    psrad           m11,               DCT_SHIFT
+
+    vpermq          m11,               m11, 0xD8
+    packssdw        m11,               m11
+    movq            [r5 + %2],         xm11
+    vextracti128    xm10,              m11, 1
+    movq            [r5 + %2 + 64],    xm10
+%endmacro
+
+%macro DCT32_PASS_2 1
+    mova            m8,                [r7 + %1]
+    mova            m10,               [r8 + %1]
+    pmaddwd         m11,               m0, m8
+    pmaddwd         m12,               m1, m10
+    paddd           m11,               m12
+
+    pmaddwd         m12,               m2, m8
+    pmaddwd         m13,               m3, m10
+    paddd           m12,               m13
+
+    phaddd          m11,               m12
+
+    pmaddwd         m12,               m4, m8
+    pmaddwd         m13,               m5, m10
+    paddd           m12,               m13
+
+    pmaddwd         m13,               m6, m8
+    pmaddwd         m14,               m7, m10
+    paddd           m13,               m14
+
+    phaddd          m12,               m13
+
+    phaddd          m11,               m12
+    vextracti128    xm10,              m11, 1
+    paddd           xm11,              xm10
+
+    paddd           xm11,               xm9
+    psrad           xm11,               DCT_SHIFT2
+    packssdw        xm11,               xm11
+
+%endmacro
+
+INIT_YMM avx2
+cglobal dct32, 3, 9, 16, 0-64*mmsize
+%if BIT_DEPTH == 12
+    %define         DCT_SHIFT          8
+    vpbroadcastq    m9,                [pd_128]
+%elif BIT_DEPTH == 10
+    %define         DCT_SHIFT          6
+    vpbroadcastq    m9,                [pd_32]
+%elif BIT_DEPTH == 8
+    %define         DCT_SHIFT          4
+    vpbroadcastq    m9,                [pd_8]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             DCT_SHIFT2         11
+
+    add             r2d,               r2d
+
+    lea             r7,                [tab_dct32_1]
+    lea             r8,                [tab_dct32_2]
+    lea             r3,                [r2 * 3]
+    mov             r5,                rsp
+    mov             r4d,               8
+    mova            m15,               [dct16_shuf1]
+
+.pass1:
+    movu            m2,                [r0]
+    movu            m1,                [r0 + 32]
+    pshufb          m1,                m15
+    vpermq          m1,                m1, 0x4E
+    psubw           m7,                m2, m1
+    paddw           m2,                m1
+
+    movu            m1,                [r0 + r2 * 2]
+    movu            m0,                [r0 + r2 * 2 + 32]
+    pshufb          m0,                m15
+    vpermq          m0,                m0, 0x4E
+    psubw           m8,                m1, m0
+    paddw           m1,                m0
+    vperm2i128      m0,                m2, m1, 0x20        ; [row0lo  row2lo] for E
+    vperm2i128      m3,                m2, m1, 0x31        ; [row0hi  row2hi] for E
+    pshufb          m3,                m15
+    psubw           m1,                m0, m3
+    paddw           m0,                m3
+
+    vperm2i128      m5,                m7, m8, 0x20        ; [row0lo  row2lo] for O
+    vperm2i128      m6,                m7, m8, 0x31        ; [row0hi  row2hi] for O
+
+
+    movu            m4,                [r0 + r2]
+    movu            m2,                [r0 + r2 + 32]
+    pshufb          m2,                m15
+    vpermq          m2,                m2, 0x4E
+    psubw           m10,               m4, m2
+    paddw           m4,                m2
+
+    movu            m3,                [r0 + r3]
+    movu            m2,                [r0 + r3 + 32]
+    pshufb          m2,                m15
+    vpermq          m2,                m2, 0x4E
+    psubw           m11,               m3, m2
+    paddw           m3,                m2
+    vperm2i128      m2,                m4, m3, 0x20        ; [row1lo  row3lo] for E
+    vperm2i128      m8,                m4, m3, 0x31        ; [row1hi  row3hi] for E
+    pshufb          m8,                m15
+    psubw           m3,                m2, m8
+    paddw           m2,                m8
+
+    vperm2i128      m4,                m10, m11, 0x20      ; [row1lo  row3lo] for O
+    vperm2i128      m7,                m10, m11, 0x31      ; [row1hi  row3hi] for O
+
+
+    DCT32_PASS_1    0 * 32,            0 * 64, 0, 2
+    DCT32_PASS_1    2 * 32,            2 * 64, 1, 3
+    DCT32_PASS_1    4 * 32,            4 * 64, 0, 2
+    DCT32_PASS_1    6 * 32,            6 * 64, 1, 3
+    DCT32_PASS_1    8 * 32,            8 * 64, 0, 2
+    DCT32_PASS_1    10 * 32,           10 * 64, 1, 3
+    DCT32_PASS_1    12 * 32,           12 * 64, 0, 2
+    DCT32_PASS_1    14 * 32,           14 * 64, 1, 3
+    DCT32_PASS_1    16 * 32,           16 * 64, 0, 2
+    DCT32_PASS_1    18 * 32,           18 * 64, 1, 3
+    DCT32_PASS_1    20 * 32,           20 * 64, 0, 2
+    DCT32_PASS_1    22 * 32,           22 * 64, 1, 3
+    DCT32_PASS_1    24 * 32,           24 * 64, 0, 2
+    DCT32_PASS_1    26 * 32,           26 * 64, 1, 3
+    DCT32_PASS_1    28 * 32,           28 * 64, 0, 2
+    DCT32_PASS_1    30 * 32,           30 * 64, 1, 3
+
+    add             r5,                8
+    lea             r0,                [r0 + r2 * 4]
+
+    dec             r4d
+    jnz             .pass1
+
+    mov             r2d,               64
+    lea             r3,                [r2 * 3]
+    mov             r5,                rsp
+    mov             r4d,               8
+    vpbroadcastq    m9,                [pd_1024]
+
+.pass2:
+    mova            m0,                [r5 + 0 * 64]
+    mova            m1,                [r5 + 0 * 64 + 32]
+
+    mova            m2,                [r5 + 1 * 64]
+    mova            m3,                [r5 + 1 * 64 + 32]
+
+    mova            m4,                [r5 + 2 * 64]
+    mova            m5,                [r5 + 2 * 64 + 32]
+
+    mova            m6,                [r5 + 3 * 64]
+    mova            m7,                [r5 + 3 * 64 + 32]
+
+    DCT32_PASS_2    0 * 32
+    movq            [r1],              xm11
+    DCT32_PASS_2    1 * 32
+    movq            [r1 + r2],         xm11
+    DCT32_PASS_2    2 * 32
+    movq            [r1 + r2 * 2],     xm11
+    DCT32_PASS_2    3 * 32
+    movq            [r1 + r3],         xm11
+
+    lea             r6,                [r1 + r2 * 4]
+    DCT32_PASS_2    4 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    5 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    6 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    7 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    8 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    9 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    10 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    11 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    12 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    13 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    14 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    15 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    16 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    17 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    18 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    19 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    20 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    21 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    22 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    23 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    24 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    25 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    26 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    27 * 32
+    movq            [r6 + r3],         xm11
+
+    lea             r6,                [r6 + r2 * 4]
+    DCT32_PASS_2    28 * 32
+    movq            [r6],              xm11
+    DCT32_PASS_2    29 * 32
+    movq            [r6 + r2],         xm11
+    DCT32_PASS_2    30 * 32
+    movq            [r6 + r2 * 2],     xm11
+    DCT32_PASS_2    31 * 32
+    movq            [r6 + r3],         xm11
+
+    add             r5,                256
+    add             r1,                8
+
+    dec             r4d
+    jnz             .pass2
+    RET
+
+%macro IDCT8_PASS_1 1
+    vpbroadcastd    m7,                [r5 + %1]
+    vpbroadcastd    m10,               [r5 + %1 + 4]
+    pmaddwd         m5,                m4, m7
+    pmaddwd         m6,                m0, m10
+    paddd           m5,                m6
+
+    vpbroadcastd    m7,                [r6 + %1]
+    vpbroadcastd    m10,               [r6 + %1 + 4]
+    pmaddwd         m6,                m1, m7
+    pmaddwd         m3,                m2, m10
+    paddd           m6,                m3
+
+    paddd           m3,                m5, m6
+    paddd           m3,                m11
+    psrad           m3,                IDCT_SHIFT1
+
+    psubd           m5,                m6
+    paddd           m5,                m11
+    psrad           m5,                IDCT_SHIFT1
+
+    vpbroadcastd    m7,                [r5 + %1 + 32]
+    vpbroadcastd    m10,               [r5 + %1 + 36]
+    pmaddwd         m6,                m4, m7
+    pmaddwd         m8,                m0, m10
+    paddd           m6,                m8
+
+    vpbroadcastd    m7,                [r6 + %1 + 32]
+    vpbroadcastd    m10,               [r6 + %1 + 36]
+    pmaddwd         m8,                m1, m7
+    pmaddwd         m9,                m2, m10
+    paddd           m8,                m9
+
+    paddd           m9,                m6, m8
+    paddd           m9,                m11
+    psrad           m9,                IDCT_SHIFT1
+
+    psubd           m6,                m8
+    paddd           m6,                m11
+    psrad           m6,                IDCT_SHIFT1
+
+    packssdw        m3,                m9
+    vpermq          m3,                m3, 0xD8
+
+    packssdw        m6,                m5
+    vpermq          m6,                m6, 0xD8
+%endmacro
+
+%macro IDCT8_PASS_2 0
+    punpcklqdq      m2,                m0, m1
+    punpckhqdq      m0,                m1
+
+    pmaddwd         m3,                m2, [r5]
+    pmaddwd         m5,                m2, [r5 + 32]
+    pmaddwd         m6,                m2, [r5 + 64]
+    pmaddwd         m7,                m2, [r5 + 96]
+    phaddd          m3,                m5
+    phaddd          m6,                m7
+    pshufb          m3,                [idct8_shuf2]
+    pshufb          m6,                [idct8_shuf2]
+    punpcklqdq      m7,                m3, m6
+    punpckhqdq      m3,                m6
+
+    pmaddwd         m5,                m0, [r6]
+    pmaddwd         m6,                m0, [r6 + 32]
+    pmaddwd         m8,                m0, [r6 + 64]
+    pmaddwd         m9,                m0, [r6 + 96]
+    phaddd          m5,                m6
+    phaddd          m8,                m9
+    pshufb          m5,                [idct8_shuf2]
+    pshufb          m8,                [idct8_shuf2]
+    punpcklqdq      m6,                m5, m8
+    punpckhqdq      m5,                m8
+
+    paddd           m8,                m7, m6
+    paddd           m8,                m12
+    psrad           m8,                IDCT_SHIFT2
+
+    psubd           m7,                m6
+    paddd           m7,                m12
+    psrad           m7,                IDCT_SHIFT2
+
+    pshufb          m7,                [idct8_shuf3]
+    packssdw        m8,                 m7
+
+    paddd           m9,                m3, m5
+    paddd           m9,                m12
+    psrad           m9,                IDCT_SHIFT2
+
+    psubd           m3,                m5
+    paddd           m3,                m12
+    psrad           m3,                IDCT_SHIFT2
+
+    pshufb          m3,                [idct8_shuf3]
+    packssdw        m9,                m3
+%endmacro
+
+INIT_YMM avx2
+cglobal idct8, 3, 7, 13, 0-8*16
+%if BIT_DEPTH == 12
+    %define         IDCT_SHIFT2        8
+    vpbroadcastd    m12,                [pd_256]
+%elif BIT_DEPTH == 10
+    %define         IDCT_SHIFT2        10
+    vpbroadcastd    m12,                [pd_512]
+%elif BIT_DEPTH == 8
+    %define         IDCT_SHIFT2        12
+    vpbroadcastd    m12,                [pd_2048]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             IDCT_SHIFT1         7
+
+    vbroadcasti128  m11,               [pd_64]
+
+    mov             r4,                rsp
+    lea             r5,                [avx2_idct8_1]
+    lea             r6,                [avx2_idct8_2]
+
+    ;pass1
+    mova            m1,                [r0 + 0 * 32]     ; [0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1]
+    mova            m0,                [r0 + 1 * 32]     ; [2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3]
+    vpunpcklwd      m5,      m1,       m0                ; [0 2 0 2 0 2 0 2 1 3 1 3 1 3 1 3]
+    vpunpckhwd      m1,      m0                          ; [0 2 0 2 0 2 0 2 1 3 1 3 1 3 1 3]
+    vinserti128     m4,      m5,       xm1,       1      ; [0 2 0 2 0 2 0 2 0 2 0 2 0 2 0 2]
+    vextracti128    xm2,     m5,       1                 ; [1 3 1 3 1 3 1 3]
+    vinserti128     m1,      m1,       xm2,       0      ; [1 3 1 3 1 3 1 3 1 3 1 3 1 3 1 3]
+
+    mova            m2,                [r0 + 2 * 32]     ; [4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5]
+    mova            m0,                [r0 + 3 * 32]     ; [6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7]
+    vpunpcklwd      m5,      m2,       m0                ; [4 6 4 6 4 6 4 6 5 7 5 7 5 7 5 7]
+    vpunpckhwd      m2,      m0                          ; [4 6 4 6 4 6 4 6 5 7 5 7 5 7 5 7]
+    vinserti128     m0,      m5,       xm2,       1     ; [4 6 4 6 4 6 4 6 4 6 4 6 4 6 4 6]
+    vextracti128    xm5,     m5,       1                ; [5 7 5 7 5 7 5 7]
+    vinserti128     m2,      m2,       xm5,       0     ; [5 7 5 7 5 7 5 7 5 7 5 7 5 7 5 7]
+
+    mova            m5,                [idct8_shuf1]
+    vpermd          m4,                m5, m4
+    vpermd          m0,                m5, m0
+    vpermd          m1,                m5, m1
+    vpermd          m2,                m5, m2
+
+    IDCT8_PASS_1    0
+    mova            [r4],              m3
+    mova            [r4 + 96],         m6
+
+    IDCT8_PASS_1    64
+    mova            [r4 + 32],         m3
+    mova            [r4 + 64],         m6
+
+    ;pass2
+    add             r2d,               r2d
+    lea             r3,                [r2 * 3]
+
+    mova            m0,                [r4]
+    mova            m1,                [r4 + 32]
+    IDCT8_PASS_2
+
+    vextracti128    xm3,               m8, 1
+    mova            [r1],              xm8
+    mova            [r1 + r2],         xm3
+    vextracti128    xm3,               m9, 1
+    mova            [r1 + r2 * 2],     xm9
+    mova            [r1 + r3],         xm3
+
+    lea             r1,                [r1 + r2 * 4]
+    mova            m0,                [r4 + 64]
+    mova            m1,                [r4 + 96]
+    IDCT8_PASS_2
+
+    vextracti128    xm3,               m8, 1
+    mova            [r1],              xm8
+    mova            [r1 + r2],         xm3
+    vextracti128    xm3,               m9, 1
+    mova            [r1 + r2 * 2],     xm9
+    mova            [r1 + r3],         xm3
+    RET
+
+%macro IDCT_PASS1 2
+    vbroadcasti128  m5, [tab_idct16_2 + %1 * 16]
+
+    pmaddwd         m9, m0, m5
+    pmaddwd         m10, m7, m5
+    phaddd          m9, m10
+
+    pmaddwd         m10, m6, m5
+    pmaddwd         m11, m8, m5
+    phaddd          m10, m11
+
+    phaddd          m9, m10
+    vbroadcasti128  m5, [tab_idct16_1 + %1 * 16]
+
+    pmaddwd         m10, m1, m5
+    pmaddwd         m11, m3, m5
+    phaddd          m10, m11
+
+    pmaddwd         m11, m4, m5
+    pmaddwd         m12, m2, m5
+    phaddd          m11, m12
+
+    phaddd          m10, m11
+
+    paddd           m11, m9, m10
+    paddd           m11, m14
+    psrad           m11, IDCT_SHIFT1
+
+    psubd           m9, m10
+    paddd           m9, m14
+    psrad           m9, IDCT_SHIFT1
+
+    vbroadcasti128  m5, [tab_idct16_2 + %1 * 16 + 16]
+
+    pmaddwd         m10, m0, m5
+    pmaddwd         m12, m7, m5
+    phaddd          m10, m12
+
+    pmaddwd         m12, m6, m5
+    pmaddwd         m13, m8, m5
+    phaddd          m12, m13
+
+    phaddd          m10, m12
+    vbroadcasti128  m5, [tab_idct16_1 + %1 * 16  + 16]
+
+    pmaddwd         m12, m1, m5
+    pmaddwd         m13, m3, m5
+    phaddd          m12, m13
+
+    pmaddwd         m13, m4, m5
+    pmaddwd         m5, m2
+    phaddd          m13, m5
+
+    phaddd          m12, m13
+
+    paddd           m5, m10, m12
+    paddd           m5, m14
+    psrad           m5, IDCT_SHIFT1
+
+    psubd           m10, m12
+    paddd           m10, m14
+    psrad           m10, IDCT_SHIFT1
+
+    packssdw        m11, m5
+    packssdw        m9, m10
+
+    mova            m10, [idct16_shuff]
+    mova            m5,  [idct16_shuff1]
+
+    vpermd          m12, m10, m11
+    vpermd          m13, m5, m9
+    mova            [r3 + %1 * 16 * 2], xm12
+    mova            [r3 + %2 * 16 * 2], xm13
+    vextracti128    [r3 + %2 * 16 * 2 + 32], m13, 1
+    vextracti128    [r3 + %1 * 16 * 2 + 32], m12, 1
+%endmacro
+
+;-------------------------------------------------------
+; void idct16(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+INIT_YMM avx2
+cglobal idct16, 3, 7, 16, 0-16*mmsize
+%if BIT_DEPTH == 12
+    %define         IDCT_SHIFT2        8
+    vpbroadcastd    m15,                [pd_256]
+%elif BIT_DEPTH == 10
+    %define         IDCT_SHIFT2        10
+    vpbroadcastd    m15,                [pd_512]
+%elif BIT_DEPTH == 8
+    %define         IDCT_SHIFT2        12
+    vpbroadcastd    m15,                [pd_2048]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             IDCT_SHIFT1         7
+
+    vbroadcasti128  m14,               [pd_64]
+
+    add             r2d,               r2d
+    mov             r3, rsp
+    mov             r4d, 2
+
+.pass1:
+     movu            xm0, [r0 +  0 * 32]
+     movu            xm1, [r0 +  8 * 32]
+     punpckhqdq      xm2, xm0, xm1
+     punpcklqdq      xm0, xm1
+     vinserti128     m0, m0, xm2, 1
+
+     movu            xm1, [r0 +  1 * 32]
+     movu            xm2, [r0 +  9 * 32]
+     punpckhqdq      xm3, xm1, xm2
+     punpcklqdq      xm1, xm2
+     vinserti128     m1, m1, xm3, 1
+
+     movu            xm2, [r0 + 2  * 32]
+     movu            xm3, [r0 + 10 * 32]
+     punpckhqdq      xm4, xm2, xm3
+     punpcklqdq      xm2, xm3
+     vinserti128     m2, m2, xm4, 1
+
+     movu            xm3, [r0 + 3  * 32]
+     movu            xm4, [r0 + 11 * 32]
+     punpckhqdq      xm5, xm3, xm4
+     punpcklqdq      xm3, xm4
+     vinserti128     m3, m3, xm5, 1
+
+     movu            xm4, [r0 + 4  * 32]
+     movu            xm5, [r0 + 12 * 32]
+     punpckhqdq      xm6, xm4, xm5
+     punpcklqdq      xm4, xm5
+     vinserti128     m4, m4, xm6, 1
+
+     movu            xm5, [r0 + 5  * 32]
+     movu            xm6, [r0 + 13 * 32]
+     punpckhqdq      xm7, xm5, xm6
+     punpcklqdq      xm5, xm6
+     vinserti128     m5, m5, xm7, 1
+
+     movu            xm6, [r0 + 6  * 32]
+     movu            xm7, [r0 + 14 * 32]
+     punpckhqdq      xm8, xm6, xm7
+     punpcklqdq      xm6, xm7
+     vinserti128     m6, m6, xm8, 1
+
+     movu            xm7, [r0 + 7  * 32]
+     movu            xm8, [r0 + 15 * 32]
+     punpckhqdq      xm9, xm7, xm8
+     punpcklqdq      xm7, xm8
+     vinserti128     m7, m7, xm9, 1
+
+    punpckhwd       m8, m0, m2                ;[8 10]
+    punpcklwd       m0, m2                    ;[0 2]
+
+    punpckhwd       m2, m1, m3                ;[9 11]
+    punpcklwd       m1, m3                    ;[1 3]
+
+    punpckhwd       m3, m4, m6                ;[12 14]
+    punpcklwd       m4, m6                    ;[4 6]
+
+    punpckhwd       m6, m5, m7                ;[13 15]
+    punpcklwd       m5, m7                    ;[5 7]
+
+    punpckhdq       m7, m0, m4                ;[02 22 42 62 03 23 43 63 06 26 46 66 07 27 47 67]
+    punpckldq       m0, m4                    ;[00 20 40 60 01 21 41 61 04 24 44 64 05 25 45 65]
+
+    punpckhdq       m4, m8, m3                ;[82 102 122 142 83 103 123 143 86 106 126 146 87 107 127 147]
+    punpckldq       m8, m3                    ;[80 100 120 140 81 101 121 141 84 104 124 144 85 105 125 145]
+
+    punpckhdq       m3, m1, m5                ;[12 32 52 72 13 33 53 73 16 36 56 76 17 37 57 77]
+    punpckldq       m1, m5                    ;[10 30 50 70 11 31 51 71 14 34 54 74 15 35 55 75]
+
+    punpckhdq       m5, m2, m6                ;[92 112 132 152 93 113 133 153 96 116 136 156 97 117 137 157]
+    punpckldq       m2, m6                    ;[90 110 130 150 91 111 131 151 94 114 134 154 95 115 135 155]
+
+    punpckhqdq      m6, m0, m8                ;[01 21 41 61 81 101 121 141 05 25 45 65 85 105 125 145]
+    punpcklqdq      m0, m8                    ;[00 20 40 60 80 100 120 140 04 24 44 64 84 104 124 144]
+
+    punpckhqdq      m8, m7, m4                ;[03 23 43 63 43 103 123 143 07 27 47 67 87 107 127 147]
+    punpcklqdq      m7, m4                    ;[02 22 42 62 82 102 122 142 06 26 46 66 86 106 126 146]
+
+    punpckhqdq      m4, m1, m2                ;[11 31 51 71 91 111 131 151 15 35 55 75 95 115 135 155]
+    punpcklqdq      m1, m2                    ;[10 30 50 70 90 110 130 150 14 34 54 74 94 114 134 154]
+
+    punpckhqdq      m2, m3, m5                ;[13 33 53 73 93 113 133 153 17 37 57 77 97 117 137 157]
+    punpcklqdq      m3, m5                    ;[12 32 52 72 92 112 132 152 16 36 56 76 96 116 136 156]
+
+    IDCT_PASS1      0, 14
+    IDCT_PASS1      2, 12
+    IDCT_PASS1      4, 10
+    IDCT_PASS1      6, 8
+
+    add             r0, 16
+    add             r3, 16
+    dec             r4d
+    jnz             .pass1
+
+    mov             r3, rsp
+    mov             r4d, 8
+    lea             r5, [tab_idct16_2]
+    lea             r6, [tab_idct16_1]
+
+    vbroadcasti128  m7,  [r5]
+    vbroadcasti128  m8,  [r5 + 16]
+    vbroadcasti128  m9,  [r5 + 32]
+    vbroadcasti128  m10, [r5 + 48]
+    vbroadcasti128  m11, [r5 + 64]
+    vbroadcasti128  m12, [r5 + 80]
+    vbroadcasti128  m13, [r5 + 96]
+
+.pass2:
+    movu            m1, [r3]
+    vpermq          m0, m1, 0xD8
+
+    pmaddwd         m1, m0, m7
+    pmaddwd         m2, m0, m8
+    phaddd          m1, m2
+
+    pmaddwd         m2, m0, m9
+    pmaddwd         m3, m0, m10
+    phaddd          m2, m3
+
+    phaddd          m1, m2
+
+    pmaddwd         m2, m0, m11
+    pmaddwd         m3, m0, m12
+    phaddd          m2, m3
+
+    vbroadcasti128  m14, [r5 + 112]
+    pmaddwd         m3, m0, m13
+    pmaddwd         m4, m0, m14
+    phaddd          m3, m4
+
+    phaddd          m2, m3
+
+    movu            m3, [r3 + 32]
+    vpermq          m0, m3, 0xD8
+
+    vbroadcasti128  m14, [r6]
+    pmaddwd         m3, m0, m14
+    vbroadcasti128  m14, [r6 + 16]
+    pmaddwd         m4, m0, m14
+    phaddd          m3, m4
+
+    vbroadcasti128  m14, [r6 + 32]
+    pmaddwd         m4, m0, m14
+    vbroadcasti128  m14, [r6 + 48]
+    pmaddwd         m5, m0, m14
+    phaddd          m4, m5
+
+    phaddd          m3, m4
+
+    vbroadcasti128  m14, [r6 + 64]
+    pmaddwd         m4, m0, m14
+    vbroadcasti128  m14, [r6 + 80]
+    pmaddwd         m5, m0, m14
+    phaddd          m4, m5
+
+    vbroadcasti128  m14, [r6 + 96]
+    pmaddwd         m6, m0, m14
+    vbroadcasti128  m14, [r6 + 112]
+    pmaddwd         m0, m14
+    phaddd          m6, m0
+
+    phaddd          m4, m6
+
+    paddd           m5, m1, m3
+    paddd           m5, m15
+    psrad           m5, IDCT_SHIFT2
+
+    psubd           m1, m3
+    paddd           m1, m15
+    psrad           m1, IDCT_SHIFT2
+
+    paddd           m6, m2, m4
+    paddd           m6, m15
+    psrad           m6, IDCT_SHIFT2
+
+    psubd           m2, m4
+    paddd           m2, m15
+    psrad           m2, IDCT_SHIFT2
+
+    packssdw        m5, m6
+    packssdw        m1, m2
+    pshufb          m2, m1, [dct16_shuf1]
+
+    mova            [r1], xm5
+    mova            [r1 + 16], xm2
+    vextracti128    [r1 + r2], m5, 1
+    vextracti128    [r1 + r2 + 16], m2, 1
+
+    lea             r1, [r1 + 2 * r2]
+    add             r3, 64
+    dec             r4d
+    jnz             .pass2
+    RET
+
+%macro IDCT32_PASS1 1
+    vbroadcasti128  m3, [tab_idct32_1 + %1 * 32]
+    vbroadcasti128  m13, [tab_idct32_1 + %1 * 32 + 16]
+    pmaddwd         m9, m4, m3
+    pmaddwd         m10, m8, m13
+    phaddd          m9, m10
+
+    pmaddwd         m10, m2, m3
+    pmaddwd         m11, m1, m13
+    phaddd          m10, m11
+
+    phaddd          m9, m10
+
+    vbroadcasti128  m3, [tab_idct32_1 + (15 - %1) * 32]
+    vbroadcasti128  m13, [tab_idct32_1 + (15- %1) * 32 + 16]
+    pmaddwd         m10, m4, m3
+    pmaddwd         m11, m8, m13
+    phaddd          m10, m11
+
+    pmaddwd         m11, m2, m3
+    pmaddwd         m12, m1, m13
+    phaddd          m11, m12
+
+    phaddd          m10, m11
+    phaddd          m9, m10                       ;[row0s0 row2s0 row0s15 row2s15 row1s0 row3s0 row1s15 row3s15]
+
+    vbroadcasti128  m3, [tab_idct32_2 + %1 * 16]
+    pmaddwd         m10, m0, m3
+    pmaddwd         m11, m7, m3
+    phaddd          m10, m11
+    phaddd          m10, m10
+
+    vbroadcasti128  m3, [tab_idct32_3 + %1 * 16]
+    pmaddwd         m11, m5, m3
+    pmaddwd         m12, m6, m3
+    phaddd          m11, m12
+    phaddd          m11, m11
+
+    paddd           m12, m10, m11                 ;[row0a0 row2a0 NIL NIL row1sa0 row3a0 NIL NIL]
+    psubd           m10, m11                      ;[row0a15 row2a15 NIL NIL row1a15 row3a15 NIL NIL]
+
+    punpcklqdq      m12, m10                      ;[row0a0 row2a0 row0a15 row2a15 row1a0 row3a0 row1a15 row3a15]
+    paddd           m10, m9, m12
+    paddd           m10, m15
+    psrad           m10, IDCT_SHIFT1
+
+    psubd           m12, m9
+    paddd           m12, m15
+    psrad           m12, IDCT_SHIFT1
+
+    packssdw        m10, m12
+    vextracti128    xm12, m10, 1
+    movd            [r3 + %1 * 64], xm10
+    movd            [r3 + 32 + %1 * 64], xm12
+    pextrd          [r4 - %1 * 64], xm10, 1
+    pextrd          [r4+ 32 - %1 * 64], xm12, 1
+    pextrd          [r3 + 16 * 64 + %1 *64], xm10, 3
+    pextrd          [r3 + 16 * 64 + 32 + %1 * 64], xm12, 3
+    pextrd          [r4 + 16 * 64 - %1 * 64], xm10, 2
+    pextrd          [r4 + 16 * 64 + 32 - %1 * 64], xm12, 2
+%endmacro
+
+;-------------------------------------------------------
+; void idct32(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+
+; TODO: Reduce PHADDD instruction by PADDD
+
+INIT_YMM avx2
+cglobal idct32, 3, 6, 16, 0-32*64
+
+%define             IDCT_SHIFT1         7
+
+    vbroadcasti128  m15, [pd_64]
+
+    mov             r3, rsp
+    lea             r4, [r3 + 15 * 64]
+    mov             r5d, 8
+
+.pass1:
+    movq            xm0,    [r0 +  2 * 64]
+    movq            xm1,    [r0 + 18 * 64]
+    punpcklqdq      xm0, xm0, xm1
+    movq            xm1,    [r0 +  0 * 64]
+    movq            xm2,    [r0 + 16 * 64]
+    punpcklqdq      xm1, xm1, xm2
+    vinserti128     m0,  m0,  xm1, 1             ;[2 18 0 16]
+
+    movq            xm1,    [r0 + 1 * 64]
+    movq            xm2,    [r0 + 9 * 64]
+    punpcklqdq      xm1, xm1, xm2
+    movq            xm2,    [r0 + 17 * 64]
+    movq            xm3,    [r0 + 25 * 64]
+    punpcklqdq      xm2, xm2, xm3
+    vinserti128     m1,  m1,  xm2, 1             ;[1 9 17 25]
+
+    movq            xm2,    [r0 + 6 * 64]
+    movq            xm3,    [r0 + 22 * 64]
+    punpcklqdq      xm2, xm2, xm3
+    movq            xm3,    [r0 + 4 * 64]
+    movq            xm4,    [r0 + 20 * 64]
+    punpcklqdq      xm3, xm3, xm4
+    vinserti128     m2,  m2,  xm3, 1             ;[6 22 4 20]
+
+    movq            xm3,    [r0 + 3 * 64]
+    movq            xm4,    [r0 + 11 * 64]
+    punpcklqdq      xm3, xm3, xm4
+    movq            xm4,    [r0 + 19 * 64]
+    movq            xm5,    [r0 + 27 * 64]
+    punpcklqdq      xm4, xm4, xm5
+    vinserti128     m3,  m3,  xm4, 1             ;[3 11 17 25]
+
+    movq            xm4,    [r0 + 10 * 64]
+    movq            xm5,    [r0 + 26 * 64]
+    punpcklqdq      xm4, xm4, xm5
+    movq            xm5,    [r0 + 8 * 64]
+    movq            xm6,    [r0 + 24 * 64]
+    punpcklqdq      xm5, xm5, xm6
+    vinserti128     m4,  m4,  xm5, 1             ;[10 26 8 24]
+
+    movq            xm5,    [r0 + 5 * 64]
+    movq            xm6,    [r0 + 13 * 64]
+    punpcklqdq      xm5, xm5, xm6
+    movq            xm6,    [r0 + 21 * 64]
+    movq            xm7,    [r0 + 29 * 64]
+    punpcklqdq      xm6, xm6, xm7
+    vinserti128     m5,  m5,  xm6, 1             ;[5 13 21 9]
+
+    movq            xm6,    [r0 + 14 * 64]
+    movq            xm7,    [r0 + 30 * 64]
+    punpcklqdq      xm6, xm6, xm7
+    movq            xm7,    [r0 + 12 * 64]
+    movq            xm8,    [r0 + 28 * 64]
+    punpcklqdq      xm7, xm7, xm8
+    vinserti128     m6,  m6,  xm7, 1             ;[14 30 12 28]
+
+    movq            xm7,    [r0 + 7 * 64]
+    movq            xm8,    [r0 + 15 * 64]
+    punpcklqdq      xm7, xm7, xm8
+    movq            xm8,    [r0 + 23 * 64]
+    movq            xm9,    [r0 + 31 * 64]
+    punpcklqdq      xm8, xm8, xm9
+    vinserti128     m7,  m7,  xm8, 1             ;[7 15 23 31]
+
+    punpckhwd       m8, m0, m2                  ;[18 22 16 20]
+    punpcklwd       m0, m2                      ;[2 6 0 4]
+
+    punpckhwd       m2, m1, m3                  ;[9 11 25 27]
+    punpcklwd       m1, m3                      ;[1 3 17 19]
+
+    punpckhwd       m3, m4, m6                  ;[26 30 24 28]
+    punpcklwd       m4, m6                      ;[10 14 8 12]
+
+    punpckhwd       m6, m5, m7                  ;[13 15 29 31]
+    punpcklwd       m5, m7                      ;[5 7 21 23]
+
+    punpckhdq       m7, m0, m4                  ;[22 62 102 142 23 63 103 143 02 42 82 122 03 43 83 123]
+    punpckldq       m0, m4                      ;[20 60 100 140 21 61 101 141 00 40 80 120 01 41 81 121]
+
+    punpckhdq       m4, m8, m3                  ;[182 222 262 302 183 223 263 303 162 202 242 282 163 203 243 283]
+    punpckldq       m8, m3                      ;[180 220 260 300 181 221 261 301 160 200 240 280 161 201 241 281]
+
+    punpckhdq       m3, m1, m5                  ;[12 32 52 72 13 33 53 73 172 192 212 232 173 193 213 233]
+    punpckldq       m1, m5                      ;[10 30 50 70 11 31 51 71 170 190 210 230 171 191 211 231]
+
+    punpckhdq       m5, m2, m6                  ;[92 112 132 152 93 113 133 153 252 272 292 312 253 273 293 313]
+    punpckldq       m2, m6                      ;[90 110 130 150 91 111 131 151 250 270 290 310 251 271 291 311]
+
+    punpckhqdq      m6, m0, m8                  ;[21 61 101 141 181 221 261 301 01 41 81 121 161 201 241 281]
+    punpcklqdq      m0, m8                      ;[20 60 100 140 180 220 260 300 00 40 80 120 160 200 240 280]
+
+    punpckhqdq      m8, m7, m4                  ;[23 63 103 143 183 223 263 303 03 43 83 123 163 203 243 283]
+    punpcklqdq      m7, m4                      ;[22 62 102 142 182 222 262 302 02 42 82 122 162 202 242 282]
+
+    punpckhqdq      m4, m1, m2                  ;[11 31 51 71 91 111 131 151 171 191 211 231 251 271 291 311]
+    punpcklqdq      m1, m2                      ;[10 30 50 70 90 110 130 150 170 190 210 230 250 270 290 310]
+
+    punpckhqdq      m2, m3, m5                  ;[13 33 53 73 93 113 133 153 173 193 213 233 253 273 293 313]
+    punpcklqdq      m3, m5                      ;[12 32 52 72 92 112 132 152 172 192 212 232 252 272 292 312]
+
+    vperm2i128      m5, m0, m6, 0x20            ;[20 60 100 140 180 220 260 300 21 61 101 141 181 221 261 301]
+    vperm2i128      m0, m0, m6, 0x31            ;[00 40 80 120 160 200 240 280 01 41 81 121 161 201 241 281]
+
+    vperm2i128      m6, m7, m8, 0x20            ;[22 62 102 142 182 222 262 302 23 63 103 143 183 223 263 303]
+    vperm2i128      m7, m7, m8, 0x31            ;[02 42 82 122 162 202 242 282 03 43 83 123 163 203 243 283]
+
+    vperm2i128      m8, m1, m4, 0x31            ;[170 190 210 230 250 270 290 310 171 191 211 231 251 271 291 311]
+    vperm2i128      m4, m1, m4, 0x20            ;[10 30 50 70 90 110 130 150 11 31 51 71 91 111 131 151]
+
+    vperm2i128      m1, m3, m2, 0x31            ;[172 192 212 232 252 272 292 312 173 193 213 233 253 273 293 313]
+    vperm2i128      m2, m3, m2, 0x20            ;[12 32 52 72 92 112 132 152 13 33 53 73 93 113 133 153]
+
+    IDCT32_PASS1 0
+    IDCT32_PASS1 1
+    IDCT32_PASS1 2
+    IDCT32_PASS1 3
+    IDCT32_PASS1 4
+    IDCT32_PASS1 5
+    IDCT32_PASS1 6
+    IDCT32_PASS1 7
+
+    add             r0, 8
+    add             r3, 4
+    add             r4, 4
+    dec             r5d
+    jnz             .pass1
+
+%if BIT_DEPTH == 12
+    %define         IDCT_SHIFT2        8
+    vpbroadcastd    m15,                [pd_256]
+%elif BIT_DEPTH == 10
+    %define         IDCT_SHIFT2        10
+    vpbroadcastd    m15,                [pd_512]
+%elif BIT_DEPTH == 8
+    %define         IDCT_SHIFT2        12
+    vpbroadcastd    m15,                [pd_2048]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+
+    mov             r3, rsp
+    add             r2d, r2d
+    mov             r4d, 32
+
+    mova            m7,  [tab_idct32_4]
+    mova            m8,  [tab_idct32_4 + 32]
+    mova            m9,  [tab_idct32_4 + 64]
+    mova            m10, [tab_idct32_4 + 96]
+    mova            m11, [tab_idct32_4 + 128]
+    mova            m12, [tab_idct32_4 + 160]
+    mova            m13, [tab_idct32_4 + 192]
+    mova            m14, [tab_idct32_4 + 224]
+.pass2:
+    movu            m0, [r3]
+    movu            m1, [r3 + 32]
+
+    pmaddwd         m2, m0, m7
+    pmaddwd         m3, m0, m8
+    phaddd          m2, m3
+
+    pmaddwd         m3, m0, m9
+    pmaddwd         m4, m0, m10
+    phaddd          m3, m4
+
+    phaddd          m2, m3
+
+    pmaddwd         m3, m0, m11
+    pmaddwd         m4, m0, m12
+    phaddd          m3, m4
+
+    pmaddwd         m4, m0, m13
+    pmaddwd         m5, m0, m14
+    phaddd          m4, m5
+
+    phaddd          m3, m4
+
+    vperm2i128      m4, m2, m3, 0x31
+    vperm2i128      m2, m2, m3, 0x20
+    paddd           m2, m4
+
+    pmaddwd         m3, m0, [tab_idct32_4 + 256]
+    pmaddwd         m4, m0, [tab_idct32_4 + 288]
+    phaddd          m3, m4
+
+    pmaddwd         m4, m0, [tab_idct32_4 + 320]
+    pmaddwd         m5, m0, [tab_idct32_4 + 352]
+    phaddd          m4, m5
+
+    phaddd          m3, m4
+
+    pmaddwd         m4, m0, [tab_idct32_4 + 384]
+    pmaddwd         m5, m0, [tab_idct32_4 + 416]
+    phaddd          m4, m5
+
+    pmaddwd         m5, m0, [tab_idct32_4 + 448]
+    pmaddwd         m0,     [tab_idct32_4 + 480]
+    phaddd          m5, m0
+
+    phaddd          m4, m5
+
+    vperm2i128      m0, m3, m4, 0x31
+    vperm2i128      m3, m3, m4, 0x20
+    paddd           m3, m0
+
+    pmaddwd         m4, m1, [tab_idct32_1]
+    pmaddwd         m0, m1, [tab_idct32_1 + 32]
+    phaddd          m4, m0
+
+    pmaddwd         m5, m1, [tab_idct32_1 + 64]
+    pmaddwd         m0, m1, [tab_idct32_1 + 96]
+    phaddd          m5, m0
+
+    phaddd          m4, m5
+
+    pmaddwd         m5, m1, [tab_idct32_1 + 128]
+    pmaddwd         m0, m1, [tab_idct32_1 + 160]
+    phaddd          m5, m0
+
+    pmaddwd         m6, m1, [tab_idct32_1 + 192]
+    pmaddwd         m0, m1, [tab_idct32_1 + 224]
+    phaddd          m6, m0
+
+    phaddd          m5, m6
+
+    vperm2i128      m0, m4, m5, 0x31
+    vperm2i128      m4, m4, m5, 0x20
+    paddd           m4, m0
+
+    pmaddwd         m5, m1, [tab_idct32_1 + 256]
+    pmaddwd         m0, m1, [tab_idct32_1 + 288]
+    phaddd          m5, m0
+
+    pmaddwd         m6, m1, [tab_idct32_1 + 320]
+    pmaddwd         m0, m1, [tab_idct32_1 + 352]
+    phaddd          m6, m0
+
+    phaddd          m5, m6
+
+    pmaddwd         m6, m1, [tab_idct32_1 + 384]
+    pmaddwd         m0, m1, [tab_idct32_1 + 416]
+    phaddd          m6, m0
+
+    pmaddwd         m0, m1, [tab_idct32_1 + 448]
+    pmaddwd         m1,     [tab_idct32_1 + 480]
+    phaddd          m0, m1
+
+    phaddd          m6, m0
+
+    vperm2i128      m0, m5, m6, 0x31
+    vperm2i128      m5, m5, m6, 0x20
+    paddd           m5, m0
+
+    paddd           m6, m2, m4
+    paddd           m6, m15
+    psrad           m6, IDCT_SHIFT2
+
+    psubd           m2, m4
+    paddd           m2, m15
+    psrad           m2, IDCT_SHIFT2
+
+    paddd           m4, m3, m5
+    paddd           m4, m15
+    psrad           m4, IDCT_SHIFT2
+
+    psubd           m3, m5
+    paddd           m3, m15
+    psrad           m3, IDCT_SHIFT2
+
+    packssdw        m6, m4
+    packssdw        m2, m3
+
+    vpermq          m6, m6, 0xD8
+    vpermq          m2, m2, 0x8D
+    pshufb          m2, [dct16_shuf1]
+
+    mova            [r1], m6
+    mova            [r1 + 32], m2
+
+    add             r1, r2
+    add             r3, 64
+    dec             r4d
+    jnz             .pass2
+    RET
+
+;-------------------------------------------------------
+; void idct4(const int16_t* src, int16_t* dst, intptr_t dstStride)
+;-------------------------------------------------------
+INIT_YMM avx2
+cglobal idct4, 3, 4, 6
+
+%define             IDCT_SHIFT1         7
+%if BIT_DEPTH == 12
+    %define         IDCT_SHIFT2        8
+    vpbroadcastd    m5,                [pd_256]
+%elif BIT_DEPTH == 10
+    %define         IDCT_SHIFT2        10
+    vpbroadcastd    m5,                [pd_512]
+%elif BIT_DEPTH == 8
+    %define         IDCT_SHIFT2        12
+    vpbroadcastd    m5,                [pd_2048]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+    vbroadcasti128  m4, [pd_64]
+
+    add             r2d, r2d
+    lea             r3, [r2 * 3]
+
+    movu            m0, [r0]                      ;[00 01 02 03 10 11 12 13 20 21 22 23 30 31 32 33]
+
+    pshufb          m0, [idct4_shuf1]             ;[00 02 01 03 10 12 11 13 20 22 21 23 30 32 31 33]
+    vextracti128    xm1, m0, 1                    ;[20 22 21 23 30 32 31 33]
+    punpcklwd       xm2, xm0, xm1                 ;[00 20 02 22 01 21 03 23]
+    punpckhwd       xm0, xm1                      ;[10 30 12 32 11 31 13 33]
+    vinserti128     m2, m2, xm2, 1                ;[00 20 02 22 01 21 03 23 00 20 02 22 01 21 03 23]
+    vinserti128     m0, m0, xm0, 1                ;[10 30 12 32 11 31 13 33 10 30 12 32 11 31 13 33]
+
+    mova            m1, [avx2_idct4_1]
+    mova            m3, [avx2_idct4_1 + 32]
+    pmaddwd         m1, m2
+    pmaddwd         m3, m0
+
+    paddd           m0, m1, m3
+    paddd           m0, m4
+    psrad           m0, IDCT_SHIFT1               ;[00 20 10 30 01 21 11 31]
+
+    psubd           m1, m3
+    paddd           m1, m4
+    psrad           m1, IDCT_SHIFT1               ;[03 23 13 33 02 22 12 32]
+
+    packssdw        m0, m1                        ;[00 20 10 30 03 23 13 33 01 21 11 31 02 22 12 32]
+    vmovshdup       m1, m0                        ;[10 30 10 30 13 33 13 33 11 31 11 31 12 32 12 32]
+    vmovsldup       m0, m0                        ;[00 20 00 20 03 23 03 23 01 21 01 21 02 22 02 22]
+
+    vpbroadcastq    m2, [avx2_idct4_2]
+    vpbroadcastq    m3, [avx2_idct4_2 + 8]
+    pmaddwd         m0, m2
+    pmaddwd         m1, m3
+
+    paddd           m2, m0, m1
+    paddd           m2, m5
+    psrad           m2, IDCT_SHIFT2               ;[00 01 10 11 30 31 20 21]
+
+    psubd           m0, m1
+    paddd           m0, m5
+    psrad           m0, IDCT_SHIFT2               ;[03 02 13 12 33 32 23 22]
+
+    pshufb          m0, [idct4_shuf2]             ;[02 03 12 13 32 33 22 23]
+    punpcklqdq      m1, m2, m0                    ;[00 01 02 03 10 11 12 13]
+    punpckhqdq      m2, m0                        ;[30 31 32 33 20 21 22 23]
+    packssdw        m1, m2                        ;[00 01 02 03 30 31 32 33 10 11 12 13 20 21 22 23]
+    vextracti128    xm0, m1, 1
+
+    movq            [r1], xm1
+    movq            [r1 + r2], xm0
+    movhps          [r1 + 2 * r2], xm0
+    movhps          [r1 + r3], xm1
+    RET
+%endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/dct8.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,45 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_DCT8_H
+#define X265_DCT8_H
+
+FUNCDEF_TU_S2(void, dct, sse2, const int16_t* src, int16_t* dst, intptr_t srcStride);
+FUNCDEF_TU_S2(void, dct, ssse3, const int16_t* src, int16_t* dst, intptr_t srcStride);
+FUNCDEF_TU_S2(void, dct, sse4, const int16_t* src, int16_t* dst, intptr_t srcStride);
+FUNCDEF_TU_S2(void, dct, avx2, const int16_t* src, int16_t* dst, intptr_t srcStride);
+
+FUNCDEF_TU_S2(void, idct, sse2, const int16_t* src, int16_t* dst, intptr_t dstStride);
+FUNCDEF_TU_S2(void, idct, ssse3, const int16_t* src, int16_t* dst, intptr_t dstStride);
+FUNCDEF_TU_S2(void, idct, sse4, const int16_t* src, int16_t* dst, intptr_t dstStride);
+FUNCDEF_TU_S2(void, idct, avx2, const int16_t* src, int16_t* dst, intptr_t dstStride);
+
+void PFX(dst4_ssse3)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+void PFX(dst4_sse2)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+void PFX(idst4_sse2)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+void PFX(dst4_avx2)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+void PFX(idst4_avx2)(const int16_t* src, int16_t* dst, intptr_t srcStride);
+void PFX(denoise_dct_sse4)(int16_t* dct, uint32_t* sum, const uint16_t* offset, int size);
+void PFX(denoise_dct_avx2)(int16_t* dct, uint32_t* sum, const uint16_t* offset, int size);
+
+#endif // ifndef X265_DCT8_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/intrapred.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,93 @@
+/*****************************************************************************
+ * intrapred.h: Intra Prediction metrics
+ *****************************************************************************
+ * Copyright (C) 2003-2013 x264 project
+ *
+ * Authors: Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_INTRAPRED_H
+#define X265_INTRAPRED_H
+
+#define DECL_ANG(bsize, mode, cpu) \
+    void PFX(intra_pred_ang ## bsize ## _ ## mode ## _ ## cpu)(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+
+#define DECL_ANGS(bsize, cpu) \
+    DECL_ANG(bsize, 2, cpu); \
+    DECL_ANG(bsize, 3, cpu); \
+    DECL_ANG(bsize, 4, cpu); \
+    DECL_ANG(bsize, 5, cpu); \
+    DECL_ANG(bsize, 6, cpu); \
+    DECL_ANG(bsize, 7, cpu); \
+    DECL_ANG(bsize, 8, cpu); \
+    DECL_ANG(bsize, 9, cpu); \
+    DECL_ANG(bsize, 10, cpu); \
+    DECL_ANG(bsize, 11, cpu); \
+    DECL_ANG(bsize, 12, cpu); \
+    DECL_ANG(bsize, 13, cpu); \
+    DECL_ANG(bsize, 14, cpu); \
+    DECL_ANG(bsize, 15, cpu); \
+    DECL_ANG(bsize, 16, cpu); \
+    DECL_ANG(bsize, 17, cpu); \
+    DECL_ANG(bsize, 18, cpu); \
+    DECL_ANG(bsize, 19, cpu); \
+    DECL_ANG(bsize, 20, cpu); \
+    DECL_ANG(bsize, 21, cpu); \
+    DECL_ANG(bsize, 22, cpu); \
+    DECL_ANG(bsize, 23, cpu); \
+    DECL_ANG(bsize, 24, cpu); \
+    DECL_ANG(bsize, 25, cpu); \
+    DECL_ANG(bsize, 26, cpu); \
+    DECL_ANG(bsize, 27, cpu); \
+    DECL_ANG(bsize, 28, cpu); \
+    DECL_ANG(bsize, 29, cpu); \
+    DECL_ANG(bsize, 30, cpu); \
+    DECL_ANG(bsize, 31, cpu); \
+    DECL_ANG(bsize, 32, cpu); \
+    DECL_ANG(bsize, 33, cpu); \
+    DECL_ANG(bsize, 34, cpu)
+
+#define DECL_ALL(cpu) \
+    FUNCDEF_TU(void, all_angs_pred, cpu, pixel *dest, pixel *refPix, pixel *filtPix, int bLuma); \
+    FUNCDEF_TU(void, intra_filter, cpu, const pixel *samples, pixel *filtered); \
+    DECL_ANGS(4, cpu); \
+    DECL_ANGS(8, cpu); \
+    DECL_ANGS(16, cpu); \
+    DECL_ANGS(32, cpu)
+
+FUNCDEF_TU_S2(void, intra_pred_dc, sse2, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+FUNCDEF_TU_S2(void, intra_pred_dc, sse4, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+FUNCDEF_TU_S2(void, intra_pred_dc, avx2, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+
+FUNCDEF_TU_S2(void, intra_pred_planar, sse2, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+FUNCDEF_TU_S2(void, intra_pred_planar, sse4, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+FUNCDEF_TU_S2(void, intra_pred_planar, avx2, pixel* dst, intptr_t dstStride, const pixel*srcPix, int, int filter);
+
+DECL_ALL(sse2);
+DECL_ALL(ssse3);
+DECL_ALL(sse4);
+DECL_ALL(avx2);
+
+#undef DECL_ALL
+#undef DECL_ANGS
+#undef DECL_ANG
+
+
+#endif // ifndef X265_INTRAPRED_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/intrapred16.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,22071 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Dnyaneshwar Gorade <dnyaneshwar@multicorewareinc.com>
+;*          Yuvaraj Venkatesh <yuvaraj@multicorewareinc.com>
+;*          Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+const ang_table
+%assign x 0
+%rep 32
+    times 4 dw (32-x), x
+%assign x x+1
+%endrep
+
+const ang_table_avx2
+%assign x 0
+%rep 32
+    times 8 dw (32-x), x
+%assign x x+1
+%endrep
+
+const pw_ang16_12_24,               db  0,  0,  0,  0,  0,  0,  0,  0, 14, 15, 14, 15,  0,  1,  0,  1
+const pw_ang16_13_23,               db  2,  3,  2,  3, 14, 15, 14, 15,  6,  7,  6,  7,  0,  1,  0,  1
+const pw_ang16_14_22,               db  2,  3,  2,  3, 10, 11, 10, 11,  6,  7,  6,  7,  0,  1,  0,  1
+const pw_ang16_15_21,               db 12, 13, 12, 13,  8,  9,  8,  9,  4,  5,  4,  5,  0,  1,  0,  1
+const pw_ang16_16_20,               db  8,  9,  8,  9,  6,  7,  6,  7,  2,  3,  2,  3,  0,  1,  0,  1
+
+const pw_ang32_12_24,               db  0,  1,  0,  1,  2,  3,  2,  3,  4,  5,  4,  5,  6,  7,  6,  7
+const pw_ang32_13_23,               db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 14, 15,  6,  7,  0,  1
+const pw_ang32_14_22,               db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 10, 11,  6,  7,  0,  1
+const pw_ang32_15_21,               db  0,  0,  0,  0,  0,  0,  0,  0, 12, 13,  8,  9,  4,  5,  0,  1
+const pw_ang32_16_20,               db  0,  0,  0,  0,  0,  0,  0,  0,  8,  9,  6,  7,  2,  3,  0,  1
+const pw_ang32_17_19_0,             db  0,  0,  0,  0, 12, 13, 10, 11,  8,  9,  6,  7,  2,  3,  0,  1
+
+const shuf_mode_13_23,              db  0,  0, 14, 15,  6,  7,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_14_22,              db 14, 15, 10, 11,  4,  5,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_15_21,              db 12, 13,  8,  9,  4,  5,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_16_20,              db  2,  3,  0,  1, 14, 15, 12, 13,  8,  9,  6,  7,  2,  3,  0,  1
+const shuf_mode_17_19,              db  0,  1, 14, 15, 12, 13, 10, 11,  6,  7,  4,  5,  2,  3,  0,  1
+const shuf_mode32_18,               db 14, 15, 12, 13, 10, 11,  8,  9,  6,  7,  4,  5,  2,  3,  0,  1
+const pw_punpcklwd,                 db  0,  1,  2,  3,  2,  3,  4,  5,  4,  5,  6,  7,  6,  7,  8,  9
+const c_mode32_10_0,                db  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1
+
+const pw_ang8_12,                   db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 12, 13,  0,  1
+const pw_ang8_13,                   db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 14, 15,  8,  9,  0,  1
+const pw_ang8_14,                   db  0,  0,  0,  0,  0,  0,  0,  0, 14, 15, 10, 11,  4,  5,  0,  1
+const pw_ang8_15,                   db  0,  0,  0,  0,  0,  0,  0,  0, 12, 13,  8,  9,  4,  5,  0,  1
+const pw_ang8_16,                   db  0,  0,  0,  0,  0,  0, 12, 13, 10, 11,  6,  7,  4,  5,  0,  1
+const pw_ang8_17,                   db  0,  0, 14, 15, 12, 13, 10, 11,  8,  9,  4,  5,  2,  3,  0,  1
+const pw_swap16,            times 2 db 14, 15, 12, 13, 10, 11,  8,  9,  6,  7,  4,  5,  2,  3,  0,  1
+
+const pw_ang16_13,                  db 14, 15,  8,  9,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0
+const pw_ang16_16,                  db  0,  0,  0,  0,  0,  0, 10, 11,  8,  9,  6,  7,  2,  3,  0,  1
+
+intra_filter4_shuf0:                db  2,  3,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10 ,11, 12, 13
+intra_filter4_shuf1:                db 14, 15,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10 ,11, 12, 13
+intra_filter4_shuf2:        times 2 db  4,  5,  0,  1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15
+
+;; (blkSize - 1 - x)
+pw_planar4_0:                       dw  3,  2,  1,  0,  3,  2,  1,  0
+
+const planar32_table
+%assign x 31
+%rep 8
+    dd x, x-1, x-2, x-3
+%assign x x-4
+%endrep
+
+const planar32_table1
+%assign x 1
+%rep 8
+    dd x, x+1, x+2, x+3
+%assign x x+4
+%endrep
+
+SECTION .text
+
+cextern pb_01
+cextern pw_1
+cextern pw_2
+cextern pw_3
+cextern pw_7
+cextern pw_4
+cextern pw_8
+cextern pw_15
+cextern pw_16
+cextern pw_31
+cextern pw_32
+cextern pd_16
+cextern pd_31
+cextern pd_32
+cextern pw_4096
+cextern pw_pixel_max
+cextern multiL
+cextern multiH
+cextern multiH2
+cextern multiH3
+cextern multi_2Row
+cextern pw_swap
+cextern pb_unpackwq1
+cextern pb_unpackwq2
+cextern pw_planar16_mul
+cextern pw_planar32_mul
+
+;-----------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* above, int, int filter)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc4, 5,6,2
+    movh        m0,             [r2 + 18]          ; sumAbove
+    movh        m1,             [r2 + 2]           ; sumLeft
+
+    paddw       m0,             m1
+    pshuflw     m1,             m0, 0x4E
+    paddw       m0,             m1
+    pshuflw     m1,             m0, 0xB1
+    paddw       m0,             m1
+
+    test        r4d,            r4d
+
+    paddw       m0,             [pw_4]
+    psrlw       m0,             3
+
+    ; store DC 4x4
+    movh        [r0],           m0
+    movh        [r0 + r1 * 2],  m0
+    movh        [r0 + r1 * 4],  m0
+    lea         r5,             [r0 + r1 * 4]
+    movh        [r5 + r1 * 2],  m0
+
+    ; do DC filter
+    jz          .end
+    movh        m1,             m0
+    psllw       m1,             1
+    paddw       m1,             [pw_2]
+    movd        r3d,            m1
+    paddw       m0,             m1
+    ; filter top
+    movh        m1,             [r2 + 2]
+    paddw       m1,             m0
+    psrlw       m1,             2
+    movh        [r0],           m1             ; overwrite top-left pixel, we will update it later
+
+    ; filter top-left
+    movzx       r3d,            r3w
+    movzx       r4d, word       [r2 + 18]
+    add         r3d,            r4d
+    movzx       r4d, word       [r2 + 2]
+    add         r4d,            r3d
+    shr         r4d,            2
+    mov         [r0],           r4w
+
+    ; filter left
+    movu        m1,             [r2 + 20]
+    paddw       m1,             m0
+    psrlw       m1,             2
+    movd        r3d,            m1
+    mov         [r0 + r1 * 2],  r3w
+    shr         r3d,            16
+    mov         [r0 + r1 * 4],  r3w
+    pextrw      r3d,            m1, 2
+    mov         [r5 + r1 * 2],  r3w
+.end:
+    RET
+
+;-----------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* above, int, int filter)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc8, 5, 8, 2
+    movu            m0,            [r2 + 34]
+    movu            m1,            [r2 + 2]
+
+    paddw           m0,            m1
+    movhlps         m1,            m0
+    paddw           m0,            m1
+    pshufd          m1,            m0, 1
+    paddw           m0,            m1
+    pmaddwd         m0,            [pw_1]
+
+    paddw           m0,            [pw_8]
+    psrlw           m0,            4              ; sum = sum / 16
+    pshuflw         m0,            m0, 0
+    pshufd          m0,            m0, 0          ; m0 = word [dc_val ...]
+
+    test            r4d,           r4d
+
+    ; store DC 8x8
+    lea             r6,            [r1 + r1 * 4]
+    lea             r6,            [r6 + r1]
+    lea             r5,            [r6 + r1 * 4]
+    lea             r7,            [r6 + r1 * 8]
+    movu            [r0],          m0
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 4], m0
+    movu            [r0 + r6],     m0
+    movu            [r0 + r1 * 8], m0
+    movu            [r0 + r5],     m0
+    movu            [r0 + r6 * 2], m0
+    movu            [r0 + r7],     m0
+
+    ; Do DC Filter
+    jz              .end
+    mova            m1,            [pw_2]
+    pmullw          m1,            m0
+    paddw           m1,            [pw_2]
+    movd            r4d,           m1             ; r4d = DC * 2 + 2
+    paddw           m1,            m0             ; m1 = DC * 3 + 2
+    pshuflw         m1,            m1, 0
+    pshufd          m1,            m1, 0          ; m1 = pixDCx3
+
+    ; filter top
+    movu            m0,            [r2 + 2]
+    paddw           m0,            m1
+    psrlw           m0,            2
+    movu            [r0],          m0
+
+    ; filter top-left
+    movzx           r4d,           r4w
+    movzx           r3d, word      [r2 + 34]
+    add             r4d,           r3d
+    movzx           r3d, word      [r2 + 2]
+    add             r3d,           r4d
+    shr             r3d,           2
+    mov             [r0],          r3w
+
+    ; filter left
+    movu            m0,            [r2 + 36]
+    paddw           m0,            m1
+    psrlw           m0,            2
+    movh            r3,            m0
+    mov             [r0 + r1 * 2], r3w
+    shr             r3,            16
+    mov             [r0 + r1 * 4], r3w
+    shr             r3,            16
+    mov             [r0 + r6],     r3w
+    shr             r3,            16
+    mov             [r0 + r1 * 8], r3w
+    pshufd          m0,            m0, 0x6E
+    movh            r3,            m0
+    mov             [r0 + r5],     r3w
+    shr             r3,            16
+    mov             [r0 + r6 * 2], r3w
+    shr             r3,            16
+    mov             [r0 + r7],     r3w
+.end:
+    RET
+
+;-------------------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* left, pixel* above, int dirMode, int filter)
+;-------------------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc16, 5, 10, 4
+    lea             r3,                  [r2 + 66]
+    add             r1,                  r1
+    movu            m0,                  [r3]
+    movu            m1,                  [r3 + 16]
+    movu            m2,                  [r2 + 2]
+    movu            m3,                  [r2 + 18]
+
+    paddw           m0,                  m1
+    paddw           m2,                  m3
+    paddw           m0,                  m2
+    HADDUW          m0,                  m1
+    paddd           m0,                  [pd_16]
+    psrld           m0,                  5
+
+    movd            r5d,                 m0
+    pshuflw         m0,                  m0, 0 ; m0 = word [dc_val ...]
+    pshufd          m0,                  m0, 0
+
+    test            r4d,                 r4d
+
+    ; store DC 16x16
+    lea             r6,                  [r1 + r1 * 2]        ;index 3
+    lea             r7,                  [r1 + r1 * 4]        ;index 5
+    lea             r8,                  [r6 + r1 * 4]        ;index 7
+    lea             r9,                  [r0 + r8]            ;base + 7
+    movu            [r0],                m0
+    movu            [r0 + 16],           m0
+    movu            [r0 + r1],           m0
+    movu            [r0 + 16 + r1],      m0
+    movu            [r0 + r1 * 2],       m0
+    movu            [r0 + r1 * 2 + 16],  m0
+    movu            [r0 + r6],           m0
+    movu            [r0 + r6 + 16],      m0
+    movu            [r0 + r1 * 4],       m0
+    movu            [r0 + r1 * 4 + 16],  m0
+    movu            [r0 + r7],           m0
+    movu            [r0 + r7 + 16],      m0
+    movu            [r0 + r6 * 2],       m0
+    movu            [r0 + r6 * 2 + 16],  m0
+    movu            [r9],                m0
+    movu            [r9 + 16],           m0
+    movu            [r0 + r1 * 8],       m0
+    movu            [r0 + r1 * 8 + 16],  m0
+    movu            [r9 + r1 * 2],       m0
+    movu            [r9 + r1 * 2 + 16],  m0
+    movu            [r0 + r7 * 2],       m0
+    movu            [r0 + r7 * 2 + 16],  m0
+    movu            [r9 + r1 * 4],       m0
+    movu            [r9 + r1 * 4 + 16],  m0
+    movu            [r0 + r6 * 4],       m0
+    movu            [r0 + r6 * 4 + 16],  m0
+    movu            [r9 + r6 * 2],       m0
+    movu            [r9 + r6 * 2 + 16],  m0
+    movu            [r9 + r8],           m0
+    movu            [r9 + r8 + 16],      m0
+    movu            [r9 + r1 * 8],       m0
+    movu            [r9 + r1 * 8 + 16],  m0
+
+    ; Do DC Filter
+    jz              .end
+    mova            m1,                  [pw_2]
+    pmullw          m1,                  m0
+    paddw           m1,                  [pw_2]
+    movd            r4d,                 m1
+    paddw           m1,                  m0
+
+    ; filter top
+    movu            m2,                  [r2 + 2]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+    movu            [r0],                m2
+    movu            m3,                  [r2 + 18]
+    paddw           m3,                  m1
+    psrlw           m3,                  2
+    movu            [r0 + 16],           m3
+
+    ; filter top-left
+    movzx           r4d,                 r4w
+    movzx           r5d, word            [r3]
+    add             r4d,                 r5d
+    movzx           r5d, word            [r2 + 2]
+    add             r5d,                 r4d
+    shr             r5d,                 2
+    mov             [r0],                r5w
+
+    ; filter left
+    movu            m2,                  [r3 + 2]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+
+    movq            r2,                  m2
+    pshufd          m2,                  m2, 0xEE
+    mov             [r0 + r1],           r2w
+    shr             r2,                  16
+    mov             [r0 + r1 * 2],       r2w
+    shr             r2,                  16
+    mov             [r0 + r6],           r2w
+    shr             r2,                  16
+    mov             [r0 + r1 * 4],       r2w
+    movq            r2,                  m2
+    mov             [r0 + r7],           r2w
+    shr             r2,                  16
+    mov             [r0 + r6 * 2],       r2w
+    shr             r2,                  16
+    mov             [r9],                r2w
+    shr             r2,                  16
+    mov             [r0 + r1 * 8],       r2w
+
+    movu            m3,                  [r3 + 18]
+    paddw           m3,                  m1
+    psrlw           m3,                  2
+
+    movq            r3,                  m3
+    pshufd          m3,                  m3, 0xEE
+    mov             [r9 + r1 * 2],       r3w
+    shr             r3,                  16
+    mov             [r0 + r7 * 2],       r3w
+    shr             r3,                  16
+    mov             [r9 + r1 * 4],       r3w
+    shr             r3,                  16
+    mov             [r0 + r6 * 4],       r3w
+    movq            r3,                  m3
+    mov             [r9 + r6 * 2],       r3w
+    shr             r3,                  16
+    mov             [r9 + r8],           r3w
+    shr             r3,                  16
+    mov             [r9 + r1 * 8],       r3w
+.end:
+    RET
+
+;-------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* above, pixel* left, pixel* dst, intptr_t dstStride, int filter)
+;-------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc32, 3, 4, 6
+    lea             r3,                  [r2 + 130]     ;130 = 32*sizeof(pixel)*2 + 1*sizeof(pixel)
+    add             r2,                  2
+    add             r1,                  r1
+    movu            m0,                  [r3]
+    movu            m1,                  [r3 + 16]
+    movu            m2,                  [r3 + 32]
+    movu            m3,                  [r3 + 48]
+    paddw           m0,                  m1
+    paddw           m2,                  m3
+    paddw           m0,                  m2
+    HADDUWD         m0,                  m1
+
+    movu            m1,                  [r2]
+    movu            m2,                  [r2 + 16]
+    movu            m3,                  [r2 + 32]
+    movu            m4,                  [r2 + 48]
+    paddw           m1,                  m2
+    paddw           m3,                  m4
+    paddw           m1,                  m3
+    HADDUWD         m1,                  m2
+
+    paddd           m0,                  m1
+    HADDD           m0,                  m1
+    paddd           m0,                  [pd_32]     ; sum = sum + 32
+    psrld           m0,                  6           ; sum = sum / 64
+    pshuflw         m0,                  m0, 0
+    pshufd          m0,                  m0, 0
+
+    lea             r2,                 [r1 * 3]
+    ; store DC 32x32
+%assign x 1
+%rep 8
+    movu            [r0 +  0],          m0
+    movu            [r0 + 16],          m0
+    movu            [r0 + 32],          m0
+    movu            [r0 + 48],          m0
+    movu            [r0 + r1 +  0],     m0
+    movu            [r0 + r1 + 16],     m0
+    movu            [r0 + r1 + 32],     m0
+    movu            [r0 + r1 + 48],     m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + 16], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    movu            [r0 + r1 * 2 + 48], m0
+    movu            [r0 + r2 +  0],     m0
+    movu            [r0 + r2 + 16],     m0
+    movu            [r0 + r2 + 32],     m0
+    movu            [r0 + r2 + 48],     m0
+    %if x < 8
+    lea             r0, [r0 + r1 * 4]
+    %endif
+%assign x x + 1
+%endrep
+    RET
+
+;-------------------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* left, pixel* above, int dirMode, int filter)
+;-------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_dc16, 3, 9, 4
+    mov             r3d,                 r4m
+    add             r1d,                 r1d
+    movu            m0,                  [r2 + 66]
+    movu            m2,                  [r2 +  2]
+    paddw           m0,                  m2                 ; dynamic range 13 bits
+
+    vextracti128    xm1,                 m0, 1
+    paddw           xm0,                 xm1                ; dynamic range 14 bits
+    movhlps         xm1,                 xm0
+    paddw           xm0,                 xm1                ; dynamic range 15 bits
+    pmaddwd         xm0,                 [pw_1]
+    phaddd          xm0,                 xm0
+    paddd           xm0,                 [pd_16]
+    psrld           xm0,                 5
+    movd            r5d,                 xm0
+    vpbroadcastw    m0,                  xm0
+
+    test            r3d,                 r3d
+
+    ; store DC 16x16
+    lea             r6,                  [r1 + r1 * 2]        ; index 3
+    lea             r7,                  [r1 + r1 * 4]        ; index 5
+    lea             r8,                  [r6 + r1 * 4]        ; index 7
+    lea             r4,                  [r0 + r8 * 1]        ; base + 7
+
+    movu            [r0],                m0
+    movu            [r0 + r1],           m0
+    movu            [r0 + r1 * 2],       m0
+    movu            [r0 + r6],           m0
+    movu            [r0 + r1 * 4],       m0
+    movu            [r0 + r7],           m0
+    movu            [r0 + r6 * 2],       m0
+    movu            [r4],                m0
+    movu            [r0 + r1 * 8],       m0
+    movu            [r4 + r1 * 2],       m0
+    movu            [r0 + r7 * 2],       m0
+    movu            [r4 + r1 * 4],       m0
+    movu            [r0 + r6 * 4],       m0
+    movu            [r4 + r6 * 2],       m0
+    movu            [r4 + r8],           m0
+    movu            [r4 + r1 * 8],       m0
+
+    ; Do DC Filter
+    jz              .end
+    mova            m1,                  [pw_2]
+    pmullw          m1,                  m0
+    paddw           m1,                  [pw_2]
+    movd            r3d,                 xm1
+    paddw           m1,                  m0
+
+    ; filter top
+    movu            m2,                  [r2 + 2]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+    movu            [r0],                m2
+
+    ; filter top-left
+    movzx           r3d,                 r3w
+    movzx           r5d, word            [r2 + 66]
+    add             r3d,                 r5d
+    movzx           r5d, word            [r2 + 2]
+    add             r5d,                 r3d
+    shr             r5d,                 2
+    mov             [r0],                r5w
+
+    ; filter left
+    movu            m2,                  [r2 + 68]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+    vextracti128    xm3,                 m2, 1
+
+    movq            r3,                  xm2
+    pshufd          xm2,                 xm2, 0xEE
+    mov             [r0 + r1],           r3w
+    shr             r3,                  16
+    mov             [r0 + r1 * 2],       r3w
+    shr             r3,                  16
+    mov             [r0 + r6],           r3w
+    shr             r3,                  16
+    mov             [r0 + r1 * 4],       r3w
+    movq            r3,                  xm2
+    mov             [r0 + r7],           r3w
+    shr             r3,                  16
+    mov             [r0 + r6 * 2],       r3w
+    shr             r3,                  16
+    mov             [r4],                r3w
+    shr             r3,                  16
+    mov             [r0 + r1 * 8],       r3w
+
+    movq            r3,                  xm3
+    pshufd          xm3,                 xm3, 0xEE
+    mov             [r4 + r1 * 2],       r3w
+    shr             r3,                  16
+    mov             [r0 + r7 * 2],       r3w
+    shr             r3,                  16
+    mov             [r4 + r1 * 4],       r3w
+    shr             r3,                  16
+    mov             [r0 + r6 * 4],       r3w
+    movq            r3,                  xm3
+    mov             [r4 + r6 * 2],       r3w
+    shr             r3,                  16
+    mov             [r4 + r8],           r3w
+    shr             r3,                  16
+    mov             [r4 + r1 * 8],       r3w
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_dc32, 3,3,3
+    add              r2, 2
+    add             r1d, r1d
+    movu             m0, [r2]
+    movu             m1, [r2 + 32]
+    add              r2, mmsize*4                       ; r2 += 128
+    paddw            m0, m1                             ; dynamic range 13 bits
+    movu             m1, [r2]
+    movu             m2, [r2 + 32]
+    paddw            m1, m2                             ; dynamic range 13 bits
+    paddw            m0, m1                             ; dynamic range 14 bits
+    vextracti128    xm1, m0, 1
+    paddw           xm0, xm1                            ; dynamic range 15 bits
+    pmaddwd         xm0, [pw_1]
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    phaddd          xm0, xm0
+    paddd           xm0, [pd_32]                        ; sum = sum + 32
+    psrld           xm0, 6                              ; sum = sum / 64
+    vpbroadcastw     m0, xm0
+
+    lea              r2, [r1 * 3]
+    ; store DC 32x32
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0 +  0], m0
+    movu            [r0 + r1 * 0 + mmsize], m0
+    movu            [r0 + r1 * 1 +  0], m0
+    movu            [r0 + r1 * 1 + mmsize], m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + mmsize], m0
+    movu            [r0 + r2 * 1 +  0], m0
+    movu            [r0 + r2 * 1 + mmsize], m0
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar8, 3,3,5
+    movu            m1, [r2 + 2]
+    movu            m2, [r2 + 34]
+
+    movd            m3, [r2 + 18]           ; topRight   = above[8];
+    movd            m4, [r2 + 50]           ; bottomLeft = left[8];
+
+    pshuflw         m3, m3, 0
+    pshuflw         m4, m4, 0
+    pshufd          m3, m3, 0               ; v_topRight
+    pshufd          m4, m4, 0               ; v_bottomLeft
+
+    pmullw          m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_7]          ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_8]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+%macro INTRA_PRED_PLANAR_8 1
+%if (%1 < 4)
+    pshuflw         m1, m2, 0x55 * %1
+    pshufd          m1, m1, 0
+%else
+    pshufhw         m1, m2, 0x55 * (%1 - 4)
+    pshufd          m1, m1, 0xAA
+%endif
+    pmullw          m1, [pw_planar16_mul + mmsize]
+    paddw           m1, m3
+    psraw           m1, 4
+    movu            [r0], m1
+%if (%1 < 7)
+    paddw           m3, m4
+    lea             r0, [r0 + r1 * 2]
+%endif
+%endmacro
+
+    INTRA_PRED_PLANAR_8 0
+    INTRA_PRED_PLANAR_8 1
+    INTRA_PRED_PLANAR_8 2
+    INTRA_PRED_PLANAR_8 3
+    INTRA_PRED_PLANAR_8 4
+    INTRA_PRED_PLANAR_8 5
+    INTRA_PRED_PLANAR_8 6
+    INTRA_PRED_PLANAR_8 7
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar16, 3,3,8
+    movu            m2, [r2 + 2]
+    movu            m7, [r2 + 18]
+
+    movd            m3, [r2 + 34]               ; topRight   = above[16]
+    movd            m6, [r2 + 98]               ; bottomLeft = left[16]
+
+    pshuflw         m3, m3, 0
+    pshuflw         m6, m6, 0
+    pshufd          m3, m3, 0                   ; v_topRight
+    pshufd          m6, m6, 0                   ; v_bottomLeft
+
+    pmullw          m4, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    pmullw          m5, m7, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m4, [pw_16]
+    paddw           m3, [pw_16]
+    paddw           m4, m6
+    paddw           m3, m6
+    paddw           m4, m5
+    paddw           m3, m1
+    psubw           m1, m6, m7
+    psubw           m6, m2
+
+    movu            m2, [r2 + 66]
+    movu            m7, [r2 + 82]
+
+%macro INTRA_PRED_PLANAR_16 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+%if (%1 < 8)
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%else
+%if (%1 < 12)
+    pshuflw         m5, m7, 0x55 * (%1 - 8)
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m7, 0x55 * (%1 - 12)
+    pshufd          m5, m5, 0xAA
+%endif
+%endif
+%endif
+%if (%1 > 0)
+    paddw           m3, m6
+    paddw           m4, m1
+    lea             r0, [r0 + r1 * 2]
+%endif
+    pmullw          m0, m5, [pw_planar16_mul + mmsize]
+    pmullw          m5, [pw_planar16_mul]
+    paddw           m0, m4
+    paddw           m5, m3
+    psraw           m5, 5
+    psraw           m0, 5
+    movu            [r0], m5
+    movu            [r0 + 16], m0
+%endmacro
+
+    INTRA_PRED_PLANAR_16 0
+    INTRA_PRED_PLANAR_16 1
+    INTRA_PRED_PLANAR_16 2
+    INTRA_PRED_PLANAR_16 3
+    INTRA_PRED_PLANAR_16 4
+    INTRA_PRED_PLANAR_16 5
+    INTRA_PRED_PLANAR_16 6
+    INTRA_PRED_PLANAR_16 7
+    INTRA_PRED_PLANAR_16 8
+    INTRA_PRED_PLANAR_16 9
+    INTRA_PRED_PLANAR_16 10
+    INTRA_PRED_PLANAR_16 11
+    INTRA_PRED_PLANAR_16 12
+    INTRA_PRED_PLANAR_16 13
+    INTRA_PRED_PLANAR_16 14
+    INTRA_PRED_PLANAR_16 15
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar32, 3,3,16
+    movd            m3, [r2 + 66]               ; topRight   = above[32]
+
+    pshuflw         m3, m3, 0x00
+    pshufd          m3, m3, 0x44
+
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
+
+    movd            m6, [r2 + 194]               ; bottomLeft = left[32]
+    pshuflw         m6, m6, 0x00
+    pshufd          m6, m6, 0x44
+    mova            m5, m6
+    paddw           m5, [pw_32]
+
+    paddw           m0, m5
+    paddw           m1, m5
+    paddw           m2, m5
+    paddw           m3, m5
+    mova            m8, m6
+    mova            m9, m6
+    mova            m10, m6
+
+    mova            m12, [pw_31]
+    movu            m4, [r2 + 2]
+    psubw           m8, m4
+    pmullw          m4, m12
+    paddw           m0, m4
+
+    movu            m5, [r2 + 18]
+    psubw           m9, m5
+    pmullw          m5, m12
+    paddw           m1, m5
+
+    movu            m4, [r2 + 34]
+    psubw           m10, m4
+    pmullw          m4, m12
+    paddw           m2, m4
+
+    movu            m5, [r2 + 50]
+    psubw           m6, m5
+    pmullw          m5, m12
+    paddw           m3, m5
+
+    mova            m12, [pw_planar32_mul]
+    mova            m13, [pw_planar32_mul + mmsize]
+    mova            m14, [pw_planar16_mul]
+    mova            m15, [pw_planar16_mul + mmsize]
+    add             r1, r1
+
+%macro PROCESS 1
+    pmullw          m5, %1, m12
+    pmullw          m11, %1, m13
+    paddw           m5, m0
+    paddw           m11, m1
+    psrlw           m5, 6
+    psrlw           m11, 6
+    movu            [r0], m5
+    movu            [r0 + 16], m11
+
+    pmullw          m5, %1, m14
+    pmullw          %1, m15
+    paddw           m5, m2
+    paddw           %1, m3
+    psrlw           m5, 6
+    psrlw           %1, 6
+    movu            [r0 + 32], m5
+    movu            [r0 + 48], %1
+%endmacro
+
+%macro  INCREMENT 0
+    paddw           m2, m10
+    paddw           m3, m6
+    paddw           m0, m8
+    paddw           m1, m9
+    add             r0, r1
+%endmacro
+
+    add             r2, 130             ;130 = 32*sizeof(pixel)*2 + 1*sizeof(pixel)
+%assign x 0
+%rep 4
+    movu            m4, [r2]
+    add             r2, 16
+%assign y 0
+%rep 8
+    %if y < 4
+    pshuflw         m7, m4, 0x55 * y
+    pshufd          m7, m7, 0x44
+    %else
+    pshufhw         m7, m4, 0x55 * (y - 4)
+    pshufd          m7, m7, 0xEE
+    %endif
+        PROCESS m7
+    %if x + y < 10
+    INCREMENT
+    %endif
+%assign y y+1
+%endrep
+%assign x x+1
+%endrep
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_planar32, 3,3,8
+    movu            m1, [r2 + 2]
+    movu            m4, [r2 + 34]
+    lea             r2, [r2 + 66]
+    vpbroadcastw    m3, [r2]                    ; topRight   = above[32]
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
+    vpbroadcastw    m6, [r2 + 128]              ; bottomLeft = left[32]
+    mova            m5, m6
+    paddw           m5, [pw_32]
+
+    paddw           m0, m5
+    paddw           m2, m5
+    mova            m5, m6
+    psubw           m3, m6, m1
+    pmullw          m1, [pw_31]
+    paddw           m0, m1
+    psubw           m5, m4
+    pmullw          m4, [pw_31]
+    paddw           m2, m4
+
+    mova            m6, [pw_planar32_mul]
+    mova            m4, [pw_planar16_mul]
+    add             r1, r1
+
+%macro PROCESS_AVX2 1
+    vpbroadcastw    m7, [r2 + %1 * 2]
+    pmullw          m1, m7, m6
+    pmullw          m7, m4
+    paddw           m1, m0
+    paddw           m7, m2
+    psrlw           m1, 6
+    psrlw           m7, 6
+    movu            [r0], m1
+    movu            [r0 + mmsize], m7
+%endmacro
+
+%macro  INCREMENT_AVX2 0
+    paddw           m2, m5
+    paddw           m0, m3
+    add             r0, r1
+%endmacro
+
+    add             r2, mmsize*2
+%assign x 0
+%rep 4
+%assign y 0
+%rep 8
+    PROCESS_AVX2 y
+%if x + y < 10
+    INCREMENT_AVX2
+%endif
+%assign y y+1
+%endrep
+lea     r2, [r2 + 16]
+%assign x x+1
+%endrep
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_planar16, 3,3,4
+    add             r1d, r1d
+    vpbroadcastw    m3, [r2 + 34]
+    vpbroadcastw    m4, [r2 + 98]
+    mova            m0, [pw_planar16_mul]
+    movu            m2, [r2 + 2]
+
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_16]
+    paddw           m3, m4
+    paddw           m3, m1
+    psubw           m4, m2
+    add             r2, 66
+
+%macro INTRA_PRED_PLANAR16_AVX2 1
+    vpbroadcastw    m1, [r2 + %1]
+    vpbroadcastw    m2, [r2 + %1 + 2]
+
+    pmullw          m1, m0
+    pmullw          m2, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 5
+    paddw           m2, m3
+    psraw           m2, 5
+    paddw           m3, m4
+    movu            [r0], m1
+    movu            [r0 + r1], m2
+%if %1 <= 24
+    lea             r0, [r0 + r1 * 2]
+%endif
+%endmacro
+    INTRA_PRED_PLANAR16_AVX2 0
+    INTRA_PRED_PLANAR16_AVX2 4
+    INTRA_PRED_PLANAR16_AVX2 8
+    INTRA_PRED_PLANAR16_AVX2 12
+    INTRA_PRED_PLANAR16_AVX2 16
+    INTRA_PRED_PLANAR16_AVX2 20
+    INTRA_PRED_PLANAR16_AVX2 24
+    INTRA_PRED_PLANAR16_AVX2 28
+%undef INTRA_PRED_PLANAR16_AVX2
+    RET
+
+%macro TRANSPOSE_4x4 0
+    punpckhwd    m0, m1, m3
+    punpcklwd    m1, m3
+    punpckhwd    m3, m1, m0
+    punpcklwd    m1, m0
+%endmacro
+
+%macro STORE_4x4 0
+    add         r1, r1
+    movh        [r0], m1
+    movhps      [r0 + r1], m1
+    movh        [r0 + r1 * 2], m3
+    lea         r1, [r1 * 3]
+    movhps      [r0 + r1], m3
+%endmacro
+
+%macro CALC_4x4 4
+    mova    m0, [pd_16]
+    pmaddwd m1, [ang_table + %1 * 16]
+    paddd   m1, m0
+    psrld   m1, 5
+
+    pmaddwd m2, [ang_table + %2 * 16]
+    paddd   m2, m0
+    psrld   m2, 5
+    packssdw m1, m2
+
+    pmaddwd m3, [ang_table + %3 * 16]
+    paddd   m3, m0
+    psrld   m3, 5
+
+    pmaddwd m4, [ang_table + %4 * 16]
+    paddd   m4, m0
+    psrld   m4, 5
+    packssdw m3, m4
+%endmacro
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng4(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_ang4_2, 3,5,4
+    lea         r4,            [r2 + 4]
+    add         r2,            20
+    cmp         r3m,           byte 34
+    cmove       r2,            r4
+
+    add         r1,            r1
+    movu        m0,            [r2]
+    movh        [r0],          m0
+    psrldq      m0,            2
+    movh        [r0 + r1],     m0
+    psrldq      m0,            2
+    movh        [r0 + r1 * 2], m0
+    lea         r1,            [r1 * 3]
+    psrldq      m0,            2
+    movh        [r0 + r1],     m0
+    RET
+
+cglobal intra_pred_ang4_3, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m0
+    psrldq      m0, 2
+    punpcklwd   m3, m0                  ;[7 6 6 5 5 4 4 3]
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[8 7 7 6 6 5 5 4]
+
+    CALC_4x4 26, 20, 14, 8
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_33, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m0
+    psrldq      m0, 2
+    punpcklwd   m3, m0                  ;[7 6 6 5 5 4 4 3]
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[8 7 7 6 6 5 5 4]
+
+    CALC_4x4 26, 20, 14, 8
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_4, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m2
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[7 6 6 5 5 4 4 3]
+
+    CALC_4x4 21, 10, 31, 20
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_6, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m0
+    psrldq      m0, 2
+    punpcklwd   m3, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m4, m3
+
+    CALC_4x4 13, 26, 7, 20
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_7, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[6 5 5 4 4 3 3 2]
+
+    CALC_4x4 9, 18, 27, 4
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_8, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 5, 10, 15, 20
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_9, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 2, 4, 6, 8
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_10, 3,3,3
+    movh        m0,             [r2 + 18] ;[4 3 2 1]
+
+    punpcklwd   m0,             m0      ;[4 4 3 3 2 2 1 1]
+    pshufd      m1,             m0, 0xFA
+    add         r1d,            r1d
+    pshufd      m0,             m0, 0x50
+    movhps      [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m1
+    lea         r1d,            [r1 * 3]
+    movhps      [r0 + r1],      m1
+
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    ; filter
+    movd        m2,             [r2]    ;[7 6 5 4 3 2 1 0]
+    pshuflw     m2,             m2, 0x00
+    movh        m1,             [r2 + 2]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+.quit:
+    movh        [r0],           m0
+    RET
+
+cglobal intra_pred_ang4_11, 3,3,5
+    movh        m0, [r2 + 18]           ;[x x x 4 3 2 1 0]
+    movh        m1, [r2 - 6]
+    punpcklqdq  m1, m0
+    psrldq      m1, 6
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 30, 28, 26, 24
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_12, 3,3,5
+    movh        m0, [r2 + 18]
+    movh        m1, [r2 - 6]
+    punpcklqdq  m1, m0
+    psrldq      m1, 6
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 27, 22, 17, 12
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_13, 3,3,5
+    movd        m4, [r2 + 6]
+    movd        m1, [r2 - 2]
+    movh        m0, [r2 + 18]
+    punpcklwd   m4, m1
+    punpcklqdq  m4, m0
+    psrldq      m4, 4
+    mova        m1, m4
+    psrldq      m1, 2
+    punpcklwd   m4, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+
+    CALC_4x4 23, 14, 5, 28
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_14, 3,3,5
+    movd        m4, [r2 + 2]
+    movd        m1, [r2 - 2]
+    movh        m0, [r2 + 18]
+    punpcklwd   m4, m1
+    punpcklqdq  m4, m0
+    psrldq      m4, 4
+    mova        m1, m4
+    psrldq      m1, 2
+    punpcklwd   m4, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m4
+
+    CALC_4x4 19, 6, 25, 12
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_15, 3,3,5
+    movd        m3, [r2]                ;[x x x A]
+    movh        m4, [r2 + 4]            ;[x C x B]
+    movh        m0, [r2 + 18]           ;[4 3 2 1]
+    pshuflw     m4, m4, 0x22            ;[B C B C]
+    punpcklqdq  m4, m3                  ;[x x x A B C B C]
+    psrldq      m4, 2                   ;[x x x x A B C B]
+    punpcklqdq  m4, m0
+    psrldq      m4, 2
+    mova        m1, m4
+    mova        m2, m4
+    psrldq      m1, 4
+    psrldq      m2, 2
+    punpcklwd   m4, m2                  ;[2 1 1 0 0 x x y]
+    punpcklwd   m2, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m3, m2
+
+    CALC_4x4 15, 30, 13, 28
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_16, 3,3,5
+    movd        m3, [r2]                ;[x x x A]
+    movd        m4, [r2 + 4]            ;[x x C B]
+    movh        m0, [r2 + 18]           ;[4 3 2 1]
+    punpcklwd   m4, m3                  ;[x C A B]
+    pshuflw     m4, m4, 0x4A            ;[A B C C]
+    punpcklqdq  m4, m0                  ;[4 3 2 1 A B C C]
+    psrldq      m4, 2
+    mova        m1, m4
+    mova        m2, m4
+    psrldq      m1, 4
+    psrldq      m2, 2
+    punpcklwd   m4, m2                  ;[2 1 1 0 0 x x y]
+    punpcklwd   m2, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m3, m2
+
+    CALC_4x4 11, 22, 1, 12
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_17, 3,3,5
+    movd        m3, [r2]
+    movh        m4, [r2 + 2]            ;[D x C B]
+    pshuflw     m4, m4, 0x1F            ;[B C D D]
+    punpcklqdq  m4, m3                  ;[x x x A B C D D]
+    psrldq      m4, 2                   ;[x x x x A B C D]
+    movhps      m4, [r2 + 18]
+
+    mova        m3, m4
+    psrldq      m3, 2
+    punpcklwd   m4, m3
+    mova        m2, m3
+    psrldq      m2, 2
+    punpcklwd   m3, m2
+    mova        m1, m2
+    psrldq      m1, 2
+    punpcklwd   m2, m1
+    mova        m0, m1
+    psrldq      m0, 2
+    punpcklwd   m1, m0
+
+    CALC_4x4 6, 12, 18, 24
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_18, 3,3,1
+    movh        m0, [r2 + 16]
+    pinsrw      m0, [r2], 0
+    pshuflw     m0, m0, q0123
+    movhps      m0, [r2 + 2]
+    add         r1, r1
+    lea         r2, [r1 * 3]
+    movh        [r0 + r2], m0
+    psrldq      m0, 2
+    movh        [r0 + r1 * 2], m0
+    psrldq      m0, 2
+    movh        [r0 + r1], m0
+    psrldq      m0, 2
+    movh        [r0], m0
+    RET
+
+    cglobal intra_pred_ang4_19, 3,3,5
+    movd        m3, [r2]
+    movh        m4, [r2 + 18]           ;[D x C B]
+    pshuflw     m4, m4, 0x1F            ;[B C D D]
+    punpcklqdq  m4, m3                  ;[x x x A B C D D]
+    psrldq      m4, 2                   ;[x x x x A B C D]
+    movhps      m4, [r2 + 2]
+
+    mova        m3, m4
+    psrldq      m3, 2
+    punpcklwd   m4, m3
+    mova        m2, m3
+    psrldq      m2, 2
+    punpcklwd   m3, m2
+    mova        m1, m2
+    psrldq      m1, 2
+    punpcklwd   m2, m1
+    mova        m0, m1
+    psrldq      m0, 2
+    punpcklwd   m1, m0
+
+    CALC_4x4 6, 12, 18, 24
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_20, 3,3,5
+    movd        m3, [r2]                ;[x x x A]
+    movd        m4, [r2 + 20]           ;[x x C B]
+    movh        m0, [r2 + 2]            ;[4 3 2 1]
+    punpcklwd   m4, m3                  ;[x C A B]
+    pshuflw     m4, m4, 0x4A            ;[A B C C]
+    punpcklqdq  m4, m0                  ;[4 3 2 1 A B C C]
+    psrldq      m4, 2
+    mova        m1, m4
+    mova        m2, m4
+    psrldq      m1, 4
+    psrldq      m2, 2
+    punpcklwd   m4, m2                  ;[2 1 1 0 0 x x y]
+    punpcklwd   m2, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m3, m2
+
+    CALC_4x4 11, 22, 1, 12
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_21, 3,3,5
+    movd        m3, [r2]                ;[x x x A]
+    movh        m4, [r2 + 20]           ;[x C x B]
+    movh        m0, [r2 + 2]            ;[4 3 2 1]
+    pshuflw     m4, m4, 0x22            ;[B C B C]
+    punpcklqdq  m4, m3                  ;[x x x A B C B C]
+    psrldq      m4, 2                   ;[x x x x A B C B]
+    punpcklqdq  m4, m0
+    psrldq      m4, 2
+    mova        m1, m4
+    mova        m2, m4
+    psrldq      m1, 4
+    psrldq      m2, 2
+    punpcklwd   m4, m2                  ;[2 1 1 0 0 x x y]
+    punpcklwd   m2, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m3, m2
+
+    CALC_4x4 15, 30, 13, 28
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_22, 3,3,5
+    movd        m4, [r2 + 18]
+    movd        m1, [r2 - 2]
+    movh        m0, [r2 + 2]
+    punpcklwd   m4, m1
+    punpcklqdq  m4, m0
+    psrldq      m4, 4
+    mova        m1, m4
+    psrldq      m1, 2
+    punpcklwd   m4, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m4
+
+    CALC_4x4 19, 6, 25, 12
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_23, 3,3,5
+    movd        m4, [r2 + 22]
+    movd        m1, [r2 - 2]
+    movh        m0, [r2 + 2]
+    punpcklwd   m4, m1
+    punpcklqdq  m4, m0
+    psrldq      m4, 4
+    mova        m1, m4
+    psrldq      m1, 2
+    punpcklwd   m4, m1                  ;[3 2 2 1 1 0 0 x]
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+
+    CALC_4x4 23, 14, 5, 28
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_24, 3,3,5
+    movh        m0, [r2 + 2]
+    movh        m1, [r2 - 6]
+    punpcklqdq  m1, m0
+    psrldq      m1, 6
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 27, 22, 17, 12
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_25, 3,3,5
+    movh        m0, [r2 + 2]            ;[x x x 4 3 2 1 0]
+    movh        m1, [r2 - 6]
+    punpcklqdq  m1, m0
+    psrldq      m1, 6
+    punpcklwd   m1, m0                  ;[4 3 3 2 2 1 1 0]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 30, 28, 26, 24
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_26, 3,3,3
+    movh        m0,             [r2 + 2] ;[8 7 6 5 4 3 2 1]
+    add         r1d,            r1d
+    ; store
+    movh        [r0],           m0
+    movh        [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m0
+    lea         r3,             [r1 * 3]
+    movh        [r0 + r3],      m0
+
+    ; filter
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    pshuflw     m0,             m0, 0x00
+    movd        m2,             [r2]
+    pshuflw     m2,             m2, 0x00
+    movh        m1,             [r2 + 18]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+
+    movh        r2,             m0
+    mov         [r0],           r2w
+    shr         r2,             16
+    mov         [r0 + r1],      r2w
+    shr         r2,             16
+    mov         [r0 + r1 * 2],  r2w
+    shr         r2,             16
+    mov         [r0 + r3],      r2w
+.quit:
+    RET
+
+cglobal intra_pred_ang4_27, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 2, 4, 6, 8
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_28, 3,3,5
+
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m1
+
+    CALC_4x4 5, 10, 15, 20
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_29, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m1
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[6 5 5 4 4 3 3 2]
+
+    CALC_4x4 9, 18, 27, 4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_30, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m1
+    mova        m3, m0
+    psrldq      m0, 2
+    punpcklwd   m3, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m4, m3
+
+    CALC_4x4 13, 26, 7, 20
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_5, 3,3,5
+    movu        m0, [r2 + 18]           ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m2
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[7 6 6 5 5 4 4 3]
+
+    CALC_4x4 17, 2, 19, 4
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_31, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m2
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[7 6 6 5 5 4 4 3]
+
+    CALC_4x4 17, 2, 19, 4
+
+    STORE_4x4
+    RET
+
+    cglobal intra_pred_ang4_32, 3,3,5
+    movu        m0, [r2 + 2]            ;[8 7 6 5 4 3 2 1]
+    mova        m1, m0
+    psrldq      m0, 2
+    punpcklwd   m1, m0                  ;[5 4 4 3 3 2 2 1]
+    mova        m2, m0
+    psrldq      m0, 2
+    punpcklwd   m2, m0                  ;[6 5 5 4 4 3 3 2]
+    mova        m3, m2
+    mova        m4, m0
+    psrldq      m0, 2
+    punpcklwd   m4, m0                  ;[7 6 6 5 5 4 4 3]
+
+    CALC_4x4 21, 10, 31, 20
+
+    STORE_4x4
+    RET
+
+;-----------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* above, int, int filter)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc4, 5,6,2
+    lea         r3,             [r2 + 18]
+    add         r2,             2
+
+    movh        m0,             [r3]           ; sumAbove
+    movh        m1,             [r2]           ; sumLeft
+
+    paddw       m0,             m1
+    pshufd      m1,             m0, 1
+    paddw       m0,             m1
+    phaddw      m0,             m0             ; m0 = sum
+
+    test        r4d,            r4d
+
+    pmulhrsw    m0,             [pw_4096]      ; m0 = (sum + 4) / 8
+    movd        r4d,            m0             ; r4d = dc_val
+    movzx       r4d,            r4w
+    pshuflw     m0,             m0, 0          ; m0 = word [dc_val ...]
+
+    ; store DC 4x4
+    movh        [r0],           m0
+    movh        [r0 + r1 * 2],  m0
+    movh        [r0 + r1 * 4],  m0
+    lea         r5,             [r0 + r1 * 4]
+    movh        [r5 + r1 * 2],  m0
+
+    ; do DC filter
+    jz          .end
+    lea         r5d,            [r4d * 2 + 2]  ; r5d = DC * 2 + 2
+    add         r4d,            r5d            ; r4d = DC * 3 + 2
+    movd        m0,             r4d
+    pshuflw     m0,             m0, 0          ; m0 = pixDCx3
+
+    ; filter top
+    movu        m1,             [r2]
+    paddw       m1,             m0
+    psrlw       m1,             2
+    movh        [r0],           m1             ; overwrite top-left pixel, we will update it later
+
+    ; filter top-left
+    movzx       r4d, word       [r3]
+    add         r5d,            r4d
+    movzx       r4d, word       [r2]
+    add         r4d,            r5d
+    shr         r4d,            2
+    mov         [r0],           r4w
+
+    ; filter left
+    lea         r0,             [r0 + r1 * 2]
+    movu        m1,             [r3 + 2]
+    paddw       m1,             m0
+    psrlw       m1,             2
+    movd        r3d,            m1
+    mov         [r0],           r3w
+    shr         r3d,            16
+    mov         [r0 + r1 * 2],  r3w
+    pextrw      [r0 + r1 * 4],  m1, 2
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar4, 3,3,5
+    movu            m1, [r2 + 2]
+    movu            m2, [r2 + 18]
+    pshufhw         m3, m1, 0               ; topRight
+    pshufd          m3, m3, 0xAA
+    pshufhw         m4, m2, 0               ; bottomLeft
+    pshufd          m4, m4, 0xAA
+
+    pmullw          m3, [multi_2Row]        ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_3]          ; (blkSize - 1 - y) * above[x]
+
+    paddw           m3, [pw_4]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+    pshuflw         m1, m2, 0
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0], m1
+
+    pshuflw         m1, m2, 01010101b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0 + r1 * 2], m1
+    lea             r0, [r0 + 4 * r1]
+
+    pshuflw         m1, m2, 10101010b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0], m1
+
+    pshuflw         m1, m2, 11111111b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    psraw           m1, 3
+    movh            [r0 + r1 * 2], m1
+    RET
+
+;-----------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* above, int, int filter)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc8, 5, 7, 2
+    lea             r3, [r2 + 34]
+    add             r2,            2
+    add             r1,            r1
+    movu            m0,            [r3]
+    movu            m1,            [r2]
+
+    paddw           m0,            m1
+    movhlps         m1,            m0
+    paddw           m0,            m1
+    phaddw          m0,            m0
+    pmaddwd         m0,            [pw_1]
+
+    movd            r5d,           m0
+    add             r5d,           8
+    shr             r5d,           4              ; sum = sum / 16
+    movd            m1,            r5d
+    pshuflw         m1,            m1, 0          ; m1 = word [dc_val ...]
+    pshufd          m1,            m1, 0
+
+    test            r4d,           r4d
+
+    ; store DC 8x8
+    mov             r6,            r0
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + r1 * 2], m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0 + r1],     m1
+    movu            [r0 + r1 * 2], m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0 + r1],     m1
+    movu            [r0 + r1 * 2], m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0 + r1],     m1
+
+    ; Do DC Filter
+    jz              .end
+    lea             r4d,           [r5d * 2 + 2]  ; r4d = DC * 2 + 2
+    add             r5d,           r4d            ; r5d = DC * 3 + 2
+    movd            m1,            r5d
+    pshuflw         m1,            m1, 0          ; m1 = pixDCx3
+    pshufd          m1,            m1, 0
+
+    ; filter top
+    movu            m0,            [r2]
+    paddw           m0,            m1
+    psrlw           m0,            2
+    movu            [r6],          m0
+
+    ; filter top-left
+    movzx           r5d, word      [r3]
+    add             r4d,           r5d
+    movzx           r5d, word      [r2]
+    add             r5d,           r4d
+    shr             r5d,           2
+    mov             [r6],          r5w
+
+    ; filter left
+    add             r6,            r1
+    movu            m0,            [r3 + 2]
+    paddw           m0,            m1
+    psrlw           m0,            2
+    pextrw          [r6],          m0, 0
+    pextrw          [r6 + r1],     m0, 1
+    pextrw          [r6 + r1 * 2], m0, 2
+    lea             r6,            [r6 + r1 * 2]
+    pextrw          [r6 + r1],     m0, 3
+    pextrw          [r6 + r1 * 2], m0, 4
+    lea             r6,            [r6 + r1 * 2]
+    pextrw          [r6 + r1],     m0, 5
+    pextrw          [r6 + r1 * 2], m0, 6
+.end:
+    RET
+
+;-------------------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel* left, pixel* above, int dirMode, int filter)
+;-------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc16, 5, 7, 4
+    lea             r3,                  [r2 + 66]
+    add             r2,                  2
+    add             r1,                  r1
+    movu            m0,                  [r3]
+    movu            m1,                  [r3 + 16]
+    movu            m2,                  [r2]
+    movu            m3,                  [r2 + 16]
+
+    paddw           m0,                  m1                     ; dynamic range 13 bits
+    paddw           m2,                  m3
+    paddw           m0,                  m2                     ; dynamic range 14 bits
+    movhlps         m1,                  m0                     ; dynamic range 15 bits
+    paddw           m0,                  m1                     ; dynamic range 16 bits
+    pmaddwd         m0,                  [pw_1]
+    phaddd          m0,                  m0
+
+    movd            r5d,                 m0
+    add             r5d,                 16
+    shr             r5d,                 5     ; sum = sum / 16
+    movd            m1,                  r5d
+    pshuflw         m1,                  m1, 0 ; m1 = word [dc_val ...]
+    pshufd          m1,                  m1, 0
+
+    test            r4d,                 r4d
+
+    ; store DC 16x16
+    mov             r6,                  r0
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+    lea             r0,                  [r0 + r1 * 2]
+    movu            [r0],                m1
+    movu            [r0 + 16],           m1
+    movu            [r0 + r1],           m1
+    movu            [r0 + 16 + r1],      m1
+
+    ; Do DC Filter
+    jz              .end
+    lea             r4d,                 [r5d * 2 + 2]  ; r4d = DC * 2 + 2
+    add             r5d,                 r4d            ; r5d = DC * 3 + 2
+    movd            m1,                  r5d
+    pshuflw         m1,                  m1, 0          ; m1 = pixDCx3
+    pshufd          m1,                  m1, 0
+
+    ; filter top
+    movu            m2,                  [r2]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+    movu            [r6],                m2
+    movu            m3,                  [r2 + 16]
+    paddw           m3,                  m1
+    psrlw           m3,                  2
+    movu            [r6 + 16],           m3
+
+    ; filter top-left
+    movzx           r5d, word            [r3]
+    add             r4d,                 r5d
+    movzx           r5d, word            [r2]
+    add             r5d,                 r4d
+    shr             r5d,                 2
+    mov             [r6],                r5w
+
+    ; filter left
+    add             r6,                  r1
+    movu            m2,                  [r3 + 2]
+    paddw           m2,                  m1
+    psrlw           m2,                  2
+
+    pextrw          [r6],                m2, 0
+    pextrw          [r6 + r1],           m2, 1
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m2, 2
+    pextrw          [r6 + r1],           m2, 3
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m2, 4
+    pextrw          [r6 + r1],           m2, 5
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m2, 6
+    pextrw          [r6 + r1],           m2, 7
+
+    lea             r6,                  [r6 + r1 * 2]
+    movu            m3,                  [r3 + 18]
+    paddw           m3,                  m1
+    psrlw           m3,                  2
+
+    pextrw          [r6],                m3, 0
+    pextrw          [r6 + r1],           m3, 1
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m3, 2
+    pextrw          [r6 + r1],           m3, 3
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m3, 4
+    pextrw          [r6 + r1],           m3, 5
+    lea             r6,                  [r6 + r1 * 2]
+    pextrw          [r6],                m3, 6
+.end:
+    RET
+
+;-------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* above, pixel* left, pixel* dst, intptr_t dstStride, int filter)
+;-------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc32, 3, 5, 6
+    lea             r3,                  [r2 + 130]     ;130 = 32*sizeof(pixel)*2 + 1*sizeof(pixel)
+    add             r2,                  2
+    add             r1,                  r1
+    movu            m0,                  [r3]
+    movu            m1,                  [r3 + 16]
+    movu            m2,                  [r3 + 32]
+    movu            m3,                  [r3 + 48]
+    paddw           m0,                  m1             ; dynamic range 13 bits
+    paddw           m2,                  m3
+    paddw           m0,                  m2             ; dynamic range 14 bits
+    movu            m1,                  [r2]
+    movu            m3,                  [r2 + 16]
+    movu            m4,                  [r2 + 32]
+    movu            m5,                  [r2 + 48]
+    paddw           m1,                  m3             ; dynamic range 13 bits
+    paddw           m4,                  m5
+    paddw           m1,                  m4             ; dynamic range 14 bits
+    paddw           m0,                  m1             ; dynamic range 15 bits
+    pmaddwd         m0,                  [pw_1]
+    movhlps         m1,                  m0
+    paddd           m0,                  m1
+    phaddd          m0,                  m0
+
+    paddd           m0,                  [pd_32]     ; sum = sum + 32
+    psrld           m0,                  6           ; sum = sum / 64
+    pshuflw         m0,                  m0, 0
+    pshufd          m0,                  m0, 0
+
+    lea             r2,                 [r1 * 3]
+    mov             r3d,                4
+.loop:
+    ; store DC 32x32
+    movu            [r0 +  0],          m0
+    movu            [r0 + 16],          m0
+    movu            [r0 + 32],          m0
+    movu            [r0 + 48],          m0
+    movu            [r0 + r1 +  0],     m0
+    movu            [r0 + r1 + 16],     m0
+    movu            [r0 + r1 + 32],     m0
+    movu            [r0 + r1 + 48],     m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + 16], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    movu            [r0 + r1 * 2 + 48], m0
+    movu            [r0 + r2 +  0],     m0
+    movu            [r0 + r2 + 16],     m0
+    movu            [r0 + r2 + 32],     m0
+    movu            [r0 + r2 + 48],     m0
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 +  0],          m0
+    movu            [r0 + 16],          m0
+    movu            [r0 + 32],          m0
+    movu            [r0 + 48],          m0
+    movu            [r0 + r1 +  0],     m0
+    movu            [r0 + r1 + 16],     m0
+    movu            [r0 + r1 + 32],     m0
+    movu            [r0 + r1 + 48],     m0
+    movu            [r0 + r1 * 2 +  0], m0
+    movu            [r0 + r1 * 2 + 16], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    movu            [r0 + r1 * 2 + 48], m0
+    movu            [r0 + r2 +  0],     m0
+    movu            [r0 + r2 + 16],     m0
+    movu            [r0 + r2 + 32],     m0
+    movu            [r0 + r2 + 48],     m0
+    lea             r0, [r0 + r1 * 4]
+    dec             r3d
+    jnz            .loop
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar4, 3,3,5
+    add             r1, r1
+    movu            m1, [r2 + 2]
+    movu            m2, [r2 + 18]
+    pshufhw         m3, m1, 0               ; topRight
+    pshufd          m3, m3, 0xAA
+    pshufhw         m4, m2, 0               ; bottomLeft
+    pshufd          m4, m4, 0xAA
+
+    pmullw          m3, [multi_2Row]        ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_3]          ; (blkSize - 1 - y) * above[x]
+
+    paddw           m3, [pw_4]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+    mova            m0, [pw_planar4_0]
+
+    pshuflw         m1, m2, 0
+    pmullw          m1, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0], m1
+
+    pshuflw         m1, m2, 01010101b
+    pmullw          m1, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0 + r1], m1
+    lea             r0, [r0 + 2 * r1]
+
+    pshuflw         m1, m2, 10101010b
+    pmullw          m1, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0], m1
+
+    pshuflw         m1, m2, 11111111b
+    pmullw          m1, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    movh            [r0 + r1], m1
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar8, 3,3,5
+    add             r1, r1
+    movu            m1, [r2 + 2]
+    movu            m2, [r2 + 34]
+
+    movd            m3, [r2 + 18]           ; topRight   = above[8];
+    movd            m4, [r2 + 50]           ; bottomLeft = left[8];
+
+    pshuflw         m3, m3, 0
+    pshuflw         m4, m4, 0
+    pshufd          m3, m3, 0               ; v_topRight
+    pshufd          m4, m4, 0               ; v_bottomLeft
+
+    pmullw          m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_7]          ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_8]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+    mova            m0, [pw_planar16_mul + mmsize]
+
+%macro INTRA_PRED_PLANAR8 1
+%if (%1 < 4)
+    pshuflw         m1, m2, 0x55 * %1
+    pshufd          m1, m1, 0
+%else
+    pshufhw         m1, m2, 0x55 * (%1 - 4)
+    pshufd          m1, m1, 0xAA
+%endif
+    pmullw          m1, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 4
+    movu            [r0], m1
+    lea             r0, [r0 + r1]
+%endmacro
+
+    INTRA_PRED_PLANAR8 0
+    INTRA_PRED_PLANAR8 1
+    INTRA_PRED_PLANAR8 2
+    INTRA_PRED_PLANAR8 3
+    INTRA_PRED_PLANAR8 4
+    INTRA_PRED_PLANAR8 5
+    INTRA_PRED_PLANAR8 6
+    INTRA_PRED_PLANAR8 7
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar16, 3,3,8
+    add             r1, r1
+    movu            m2, [r2 + 2]
+    movu            m7, [r2 + 18]
+
+    movd            m3, [r2 + 34]               ; topRight   = above[16]
+    movd            m6, [r2 + 98]               ; bottomLeft = left[16]
+
+    pshuflw         m3, m3, 0
+    pshuflw         m6, m6, 0
+    pshufd          m3, m3, 0                   ; v_topRight
+    pshufd          m6, m6, 0                   ; v_bottomLeft
+
+    pmullw          m4, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    pmullw          m5, m7, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m4, [pw_16]
+    paddw           m3, [pw_16]
+    paddw           m4, m6
+    paddw           m3, m6
+    paddw           m4, m5
+    paddw           m3, m1
+    psubw           m1, m6, m7
+    psubw           m6, m2
+
+    movu            m2, [r2 + 66]
+    movu            m7, [r2 + 82]
+
+%macro INTRA_PRED_PLANAR16 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+%if (%1 < 8)
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%else
+%if (%1 < 12)
+    pshuflw         m5, m7, 0x55 * (%1 - 8)
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m7, 0x55 * (%1 - 12)
+    pshufd          m5, m5, 0xAA
+%endif
+%endif
+%endif
+    pmullw          m0, m5, [pw_planar16_mul + mmsize]
+    pmullw          m5, [pw_planar16_mul]
+    paddw           m0, m4
+    paddw           m5, m3
+    paddw           m3, m6
+    paddw           m4, m1
+    psraw           m5, 5
+    psraw           m0, 5
+    movu            [r0], m5
+    movu            [r0 + 16], m0
+    lea             r0, [r0 + r1]
+%endmacro
+
+    INTRA_PRED_PLANAR16 0
+    INTRA_PRED_PLANAR16 1
+    INTRA_PRED_PLANAR16 2
+    INTRA_PRED_PLANAR16 3
+    INTRA_PRED_PLANAR16 4
+    INTRA_PRED_PLANAR16 5
+    INTRA_PRED_PLANAR16 6
+    INTRA_PRED_PLANAR16 7
+    INTRA_PRED_PLANAR16 8
+    INTRA_PRED_PLANAR16 9
+    INTRA_PRED_PLANAR16 10
+    INTRA_PRED_PLANAR16 11
+    INTRA_PRED_PLANAR16 12
+    INTRA_PRED_PLANAR16 13
+    INTRA_PRED_PLANAR16 14
+    INTRA_PRED_PLANAR16 15
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+%if ARCH_X86_64 == 1
+cglobal intra_pred_planar32, 3,7,16
+  ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+  mov               r6, rsp
+  sub               rsp, 4*mmsize
+  and               rsp, ~63
+  %define           m16 [rsp + 0 * mmsize]
+  %define           m17 [rsp + 1 * mmsize]
+  %define           m18 [rsp + 2 * mmsize]
+  %define           m19 [rsp + 3 * mmsize]
+%else
+cglobal intra_pred_planar32, 3,7,8
+  ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+  mov               r6, rsp
+  sub               rsp, 12*mmsize
+  and               rsp, ~63
+  %define           m8  [rsp + 0  * mmsize]
+  %define           m9  [rsp + 1  * mmsize]
+  %define           m10 [rsp + 2  * mmsize]
+  %define           m11 [rsp + 3  * mmsize]
+  %define           m12 [rsp + 4  * mmsize]
+  %define           m13 [rsp + 5  * mmsize]
+  %define           m14 [rsp + 6  * mmsize]
+  %define           m15 [rsp + 7  * mmsize]
+  %define           m16 [rsp + 8  * mmsize]
+  %define           m17 [rsp + 9  * mmsize]
+  %define           m18 [rsp + 10 * mmsize]
+  %define           m19 [rsp + 11 * mmsize]
+%endif
+    add             r1, r1
+    lea             r5, [planar32_table1]
+
+    movzx           r3d, word [r2 + 66]         ; topRight   = above[32]
+    movd            m7, r3d
+    pshufd          m7, m7, 0                   ; v_topRight
+
+    pmulld          m0, m7, [r5 + 0  ]          ; (x + 1) * topRight
+    pmulld          m1, m7, [r5 + 16 ]
+    pmulld          m2, m7, [r5 + 32 ]
+    pmulld          m3, m7, [r5 + 48 ]
+    pmulld          m4, m7, [r5 + 64 ]
+    pmulld          m5, m7, [r5 + 80 ]
+    pmulld          m6, m7, [r5 + 96 ]
+    pmulld          m7, m7, [r5 + 112]
+
+    mova            m12, m4
+    mova            m13, m5
+    mova            m14, m6
+    mova            m15, m7
+
+    movzx           r3d, word [r2 + 194]        ; bottomLeft = left[32]
+    movd            m6, r3d
+    pshufd          m6, m6, 0                   ; v_bottomLeft
+
+    paddd           m0, m6
+    paddd           m1, m6
+    paddd           m2, m6
+    paddd           m3, m6
+    paddd           m0, [pd_32]
+    paddd           m1, [pd_32]
+    paddd           m2, [pd_32]
+    paddd           m3, [pd_32]
+
+    mova            m4, m12
+    mova            m5, m13
+    paddd           m4, m6
+    paddd           m5, m6
+    paddd           m4, [pd_32]
+    paddd           m5, [pd_32]
+    mova            m12, m4
+    mova            m13, m5
+
+    mova            m4, m14
+    mova            m5, m15
+    paddd           m4, m6
+    paddd           m5, m6
+    paddd           m4, [pd_32]
+    paddd           m5, [pd_32]
+    mova            m14, m4
+    mova            m15, m5
+
+    ; above[0-3] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 2]
+    pmulld          m5, m4, [pd_31]
+    paddd           m0, m5
+    psubd           m5, m6, m4
+    mova            m8, m5
+
+    ; above[4-7] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 10]
+    pmulld          m5, m4, [pd_31]
+    paddd           m1, m5
+    psubd           m5, m6, m4
+    mova            m9, m5
+
+    ; above[8-11] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 18]
+    pmulld          m5, m4, [pd_31]
+    paddd           m2, m5
+    psubd           m5, m6, m4
+    mova            m10, m5
+
+    ; above[12-15] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 26]
+    pmulld          m5, m4, [pd_31]
+    paddd           m3, m5
+    psubd           m5, m6, m4
+    mova            m11, m5
+
+    ; above[16-19] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 34]
+    mova            m7, m12
+    pmulld          m5, m4, [pd_31]
+    paddd           m7, m5
+    mova            m12, m7
+    psubd           m5, m6, m4
+    mova            m16, m5
+
+    ; above[20-23] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 42]
+    mova            m7, m13
+    pmulld          m5, m4, [pd_31]
+    paddd           m7, m5
+    mova            m13, m7
+    psubd           m5, m6, m4
+    mova            m17, m5
+
+    ; above[24-27] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 50]
+    mova            m7, m14
+    pmulld          m5, m4, [pd_31]
+    paddd           m7, m5
+    mova            m14, m7
+    psubd           m5, m6, m4
+    mova            m18, m5
+
+    ; above[28-31] * (blkSize - 1 - y)
+    pmovzxwd        m4, [r2 + 58]
+    mova            m7, m15
+    pmulld          m5, m4, [pd_31]
+    paddd           m7, m5
+    mova            m15, m7
+    psubd           m5, m6, m4
+    mova            m19, m5
+
+    add             r2, 130                      ; (2 * blkSize + 1)
+    lea             r5, [planar32_table]
+
+%macro INTRA_PRED_PLANAR32 0
+    movzx           r3d, word [r2]
+    movd            m4, r3d
+    pshufd          m4, m4, 0
+
+    pmulld          m5, m4, [r5]
+    pmulld          m6, m4, [r5 + 16]
+    paddd           m5, m0
+    paddd           m6, m1
+    paddd           m0, m8
+    paddd           m1, m9
+    psrad           m5, 6
+    psrad           m6, 6
+    packusdw        m5, m6
+    movu            [r0], m5
+
+    pmulld          m5, m4, [r5 + 32]
+    pmulld          m6, m4, [r5 + 48]
+    paddd           m5, m2
+    paddd           m6, m3
+    paddd           m2, m10
+    paddd           m3, m11
+    psrad           m5, 6
+    psrad           m6, 6
+    packusdw        m5, m6
+    movu            [r0 + 16], m5
+
+    pmulld          m5, m4, [r5 + 64]
+    pmulld          m6, m4, [r5 + 80]
+    paddd           m5, m12
+    paddd           m6, m13
+    psrad           m5, 6
+    psrad           m6, 6
+    packusdw        m5, m6
+    movu            [r0 + 32], m5
+    mova            m5, m12
+    mova            m6, m13
+    paddd           m5, m16
+    paddd           m6, m17
+    mova            m12, m5
+    mova            m13, m6
+
+    pmulld          m5, m4, [r5 + 96]
+    pmulld          m4, [r5 + 112]
+    paddd           m5, m14
+    paddd           m4, m15
+    psrad           m5, 6
+    psrad           m4, 6
+    packusdw        m5, m4
+    movu            [r0 + 48], m5
+    mova            m4, m14
+    mova            m5, m15
+    paddd           m4, m18
+    paddd           m5, m19
+    mova            m14, m4
+    mova            m15, m5
+
+    lea             r0, [r0 + r1]
+    add             r2, 2
+%endmacro
+
+    mov             r4, 8
+.loop:
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    dec             r4
+    jnz             .loop
+    mov             rsp, r6
+    RET
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng4(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang4_2, 3,5,4
+    lea         r4,            [r2 + 4]
+    add         r2,            20
+    cmp         r3m,           byte 34
+    cmove       r2,            r4
+
+    add         r1,            r1
+    movu        m0,            [r2]
+    movh        [r0],          m0
+    palignr     m1,            m0, 2
+    movh        [r0 + r1],     m1
+    palignr     m2,            m0, 4
+    movh        [r0 + r1 * 2], m2
+    lea         r1,            [r1 * 3]
+    psrldq      m0,            6
+    movh        [r0 + r1],     m0
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang4_3, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 33
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    palignr     m5, m0, 4       ; [x x 8 7 6 5 4 3]
+    punpcklwd   m3, m1, m5      ; [6 5 5 4 4 3 3 2]
+    palignr     m1, m0, 6       ; [x x x 8 7 6 5 4]
+    punpcklwd   m4, m5 ,m1      ; [7 6 6 5 5 4 4 3]
+    movhlps     m0, m0          ; [x x x x 8 7 6 5]
+    punpcklwd   m5, m1, m0      ; [8 7 7 6 6 5 5 4]
+
+    lea         r3, [ang_table + 20 * 16]
+    mova        m0, [r3 + 6 * 16]   ; [26]
+    mova        m1, [r3]            ; [20]
+    mova        m6, [r3 - 6 * 16]   ; [14]
+    mova        m7, [r3 - 12 * 16]  ; [ 8]
+    jmp        .do_filter4x4
+
+ALIGN 16
+.do_filter4x4:
+    pmaddwd m2, m0
+    paddd   m2, [pd_16]
+    psrld   m2, 5
+
+    pmaddwd m3, m1
+    paddd   m3, [pd_16]
+    psrld   m3, 5
+    packusdw m2, m3
+
+    pmaddwd m4, m6
+    paddd   m4, [pd_16]
+    psrld   m4, 5
+
+    pmaddwd m5, m7
+    paddd   m5, [pd_16]
+    psrld   m5, 5
+    packusdw m4, m5
+
+    jz         .store
+
+    ; transpose 4x4
+    punpckhwd    m0, m2, m4
+    punpcklwd    m2, m4
+    punpckhwd    m4, m2, m0
+    punpcklwd    m2, m0
+
+.store:
+    add         r1, r1
+    movh        [r0], m2
+    movhps      [r0 + r1], m2
+    movh        [r0 + r1 * 2], m4
+    lea         r1, [r1 * 3]
+    movhps      [r0 + r1], m4
+    RET
+
+cglobal intra_pred_ang4_4, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 32
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    palignr     m6, m0, 4       ; [x x 8 7 6 5 4 3]
+    punpcklwd   m3, m1, m6      ; [6 5 5 4 4 3 3 2]
+    mova        m4, m3
+    palignr     m7, m0, 6       ; [x x x 8 7 6 5 4]
+    punpcklwd   m5, m6, m7      ; [7 6 6 5 5 4 4 3]
+
+    lea         r3, [ang_table + 18 * 16]
+    mova        m0, [r3 +  3 * 16]  ; [21]
+    mova        m1, [r3 -  8 * 16]  ; [10]
+    mova        m6, [r3 + 13 * 16]  ; [31]
+    mova        m7, [r3 +  2 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_5, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 31
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    palignr     m6, m0, 4       ; [x x 8 7 6 5 4 3]
+    punpcklwd   m3, m1, m6      ; [6 5 5 4 4 3 3 2]
+    mova        m4, m3
+    palignr     m7, m0, 6       ; [x x x 8 7 6 5 4]
+    punpcklwd   m5, m6, m7      ; [7 6 6 5 5 4 4 3]
+
+    lea         r3, [ang_table + 10 * 16]
+    mova        m0, [r3 +  7 * 16]  ; [17]
+    mova        m1, [r3 -  8 * 16]  ; [ 2]
+    mova        m6, [r3 +  9 * 16]  ; [19]
+    mova        m7, [r3 -  6 * 16]  ; [ 4]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_6, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 30
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    mova        m3, m2
+    palignr     m6, m0, 4       ; [x x 8 7 6 5 4 3]
+    punpcklwd   m4, m1, m6      ; [6 5 5 4 4 3 3 2]
+    mova        m5, m4
+
+    lea         r3, [ang_table + 19 * 16]
+    mova        m0, [r3 -  6 * 16]  ; [13]
+    mova        m1, [r3 +  7 * 16]  ; [26]
+    mova        m6, [r3 - 12 * 16]  ; [ 7]
+    mova        m7, [r3 +  1 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_7, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 29
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    mova        m3, m2
+    mova        m4, m2
+    palignr     m6, m0, 4       ; [x x 8 7 6 5 4 3]
+    punpcklwd   m5, m1, m6      ; [6 5 5 4 4 3 3 2]
+
+    lea         r3, [ang_table + 20 * 16]
+    mova        m0, [r3 - 11 * 16]  ; [ 9]
+    mova        m1, [r3 -  2 * 16]  ; [18]
+    mova        m6, [r3 +  7 * 16]  ; [27]
+    mova        m7, [r3 - 16 * 16]  ; [ 4]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_8, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 28
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    mova        m3, m2
+    mova        m4, m2
+    mova        m5, m2
+
+    lea         r3, [ang_table + 13 * 16]
+    mova        m0, [r3 -  8 * 16]  ; [ 5]
+    mova        m1, [r3 -  3 * 16]  ; [10]
+    mova        m6, [r3 +  2 * 16]  ; [15]
+    mova        m7, [r3 +  7 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_9, 3,5,8
+    mov         r4, 2
+    cmp         r3m, byte 27
+    mov         r3, 18
+    cmove       r3, r4
+
+    movu        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 2       ; [x 8 7 6 5 4 3 2]
+    punpcklwd   m2, m0, m1      ; [5 4 4 3 3 2 2 1]
+    mova        m3, m2
+    mova        m4, m2
+    mova        m5, m2
+
+    lea         r3, [ang_table + 4 * 16]
+    mova        m0, [r3 -  2 * 16]  ; [ 2]
+    mova        m1, [r3 -  0 * 16]  ; [ 4]
+    mova        m6, [r3 +  2 * 16]  ; [ 6]
+    mova        m7, [r3 +  4 * 16]  ; [ 8]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_10, 3,3,4
+    movh        m0,             [r2 + 18]           ; [4 3 2 1]
+    pshufb      m2,             m0, [pb_unpackwq2]  ; [4 4 4 4 3 3 3 3]
+    pshufb      m0,             [pb_unpackwq1]      ; [2 2 2 2 1 1 1 1]
+    add         r1,             r1
+    movhlps     m1,             m0                  ; [2 2 2 2]
+    movhlps     m3,             m2                  ; [4 4 4 4]
+    movh        [r0 + r1],      m1
+    movh        [r0 + r1 * 2],  m2
+    lea         r1,             [r1 * 3]
+    movh        [r0 + r1],      m3
+
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    ; filter
+    movu        m1,             [r2]                ; [7 6 5 4 3 2 1 0]
+    pshufb      m2,             m1, [pb_unpackwq1]  ; [0 0 0 0]
+    palignr     m1,             m1, 2               ; [4 3 2 1]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+.quit:
+    movh        [r0],           m0
+    RET
+
+cglobal intra_pred_ang4_26, 3,4,3
+    movh        m0,             [r2 + 2]            ; [8 7 6 5 4 3 2 1]
+    add         r1,             r1
+    ; store
+    movh        [r0],           m0
+    movh        [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m0
+    lea         r3,             [r1 * 3]
+    movh        [r0 + r3],      m0
+
+    ; filter
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    pshufb      m0,             [pb_unpackwq1]      ; [2 2 2 2 1 1 1 1]
+    movu        m1,             [r2 + 16]
+    pinsrw      m1,             [r2], 0             ; [7 6 5 4 3 2 1 0]
+    pshufb      m2,             m1, [pb_unpackwq1]  ; [0 0 0 0]
+    palignr     m1,             m1, 2               ; [4 3 2 1]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+
+    pextrw      [r0],           m0, 0
+    pextrw      [r0 + r1],      m0, 1
+    pextrw      [r0 + r1 * 2],  m0, 2
+    pextrw      [r0 + r3],      m0, 3
+.quit:
+    RET
+
+cglobal intra_pred_ang4_11, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 25
+    mov         r3, 16
+    cmove       r3, r4
+
+    movu        m2, [r2 + r3]   ; [x x x 4 3 2 1 0]
+    pinsrw      m2, [r2], 0
+    palignr     m1, m2, 2       ; [x x x x 4 3 2 1]
+    punpcklwd   m2, m1          ; [4 3 3 2 2 1 1 0]
+    mova        m3, m2
+    mova        m4, m2
+    mova        m5, m2
+
+    lea         r3, [ang_table + 24 * 16]
+    mova        m0, [r3 +  6 * 16]  ; [24]
+    mova        m1, [r3 +  4 * 16]  ; [26]
+    mova        m6, [r3 +  2 * 16]  ; [28]
+    mova        m7, [r3 +  0 * 16]  ; [30]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_12, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 24
+    mov         r3, 16
+    cmove       r3, r4
+
+    movu        m2, [r2 + r3]   ; [x x x 4 3 2 1 0]
+    pinsrw      m2, [r2], 0
+    palignr     m1, m2, 2       ; [x x x x 4 3 2 1]
+    punpcklwd   m2, m1          ; [4 3 3 2 2 1 1 0]
+    mova        m3, m2
+    mova        m4, m2
+    mova        m5, m2
+
+    lea         r3, [ang_table + 20 * 16]
+    mova        m0, [r3 +  7 * 16]  ; [27]
+    mova        m1, [r3 +  2 * 16]  ; [22]
+    mova        m6, [r3 -  3 * 16]  ; [17]
+    mova        m7, [r3 -  8 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_13, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 23
+    mov         r3, 16
+    jz          .next
+    xchg        r3, r4
+.next:
+    movu        m5, [r2 + r4 - 2]   ; [x x 4 3 2 1 0 x]
+    pinsrw      m5, [r2], 1
+    palignr     m2, m5, 2       ; [x x x 4 3 2 1 0]
+    palignr     m0, m5, 4       ; [x x x x 4 3 2 1]
+    pinsrw      m5, [r2 + r3 + 8], 0
+    punpcklwd   m5, m2          ; [3 2 2 1 1 0 0 x]
+    punpcklwd   m2, m0          ; [4 3 3 2 2 1 1 0]
+    mova        m3, m2
+    mova        m4, m2
+
+    lea         r3, [ang_table + 21 * 16]
+    mova        m0, [r3 +  2 * 16]  ; [23]
+    mova        m1, [r3 -  7 * 16]  ; [14]
+    mova        m6, [r3 - 16 * 16]  ; [ 5]
+    mova        m7, [r3 +  7 * 16]  ; [28]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_14, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 22
+    mov         r3, 16
+    jz          .next
+    xchg        r3, r4
+.next:
+    movu        m5, [r2 + r4 - 2]   ; [x x 4 3 2 1 0 x]
+    pinsrw      m5, [r2], 1
+    palignr     m2, m5, 2       ; [x x x 4 3 2 1 0]
+    palignr     m0, m5, 4       ; [x x x x 4 3 2 1]
+    pinsrw      m5, [r2 + r3 + 4], 0
+    punpcklwd   m5, m2          ; [3 2 2 1 1 0 0 x]
+    punpcklwd   m2, m0          ; [4 3 3 2 2 1 1 0]
+    mova        m3, m2
+    mova        m4, m5
+
+    lea         r3, [ang_table + 19 * 16]
+    mova        m0, [r3 +  0 * 16]  ; [19]
+    mova        m1, [r3 - 13 * 16]  ; [ 6]
+    mova        m6, [r3 +  6 * 16]  ; [25]
+    mova        m7, [r3 -  7 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_15, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 21
+    mov         r3, 16
+    jz          .next
+    xchg        r3, r4
+.next:
+    movu        m3, [r2 + r4 - 2]   ; [x x 4 3 2 1 0 x]
+    pinsrw      m3, [r2], 1
+    palignr     m2, m3, 2       ; [x x x 4 3 2 1 0]
+    palignr     m0, m3, 4       ; [x x x x 4 3 2 1]
+    pinsrw      m3, [r2 + r3 + 4], 0
+    pslldq      m5, m3, 2       ; [x 4 3 2 1 0 x y]
+    pinsrw      m5, [r2 + r3 + 8], 0
+    punpcklwd   m5, m3          ; [2 1 1 0 0 x x y]
+    punpcklwd   m3, m2          ; [3 2 2 1 1 0 0 x]
+    punpcklwd   m2, m0          ; [4 3 3 2 2 1 1 0]
+    mova        m4, m3
+
+    lea         r3, [ang_table + 23 * 16]
+    mova        m0, [r3 -  8 * 16]  ; [15]
+    mova        m1, [r3 +  7 * 16]  ; [30]
+    mova        m6, [r3 - 10 * 16]  ; [13]
+    mova        m7, [r3 +  5 * 16]  ; [28]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_16, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 20
+    mov         r3, 16
+    jz          .next
+    xchg        r3, r4
+.next:
+    movu        m3, [r2 + r4 - 2]   ; [x x 4 3 2 1 0 x]
+    pinsrw      m3, [r2], 1
+    palignr     m2, m3, 2       ; [x x x 4 3 2 1 0]
+    palignr     m0, m3, 4       ; [x x x x 4 3 2 1]
+    pinsrw      m3, [r2 + r3 + 4], 0
+    pslldq      m5, m3, 2       ; [x 4 3 2 1 0 x y]
+    pinsrw      m5, [r2 + r3 + 6], 0
+    punpcklwd   m5, m3          ; [2 1 1 0 0 x x y]
+    punpcklwd   m3, m2          ; [3 2 2 1 1 0 0 x]
+    punpcklwd   m2, m0          ; [4 3 3 2 2 1 1 0]
+    mova        m4, m3
+
+    lea         r3, [ang_table + 19 * 16]
+    mova        m0, [r3 -  8 * 16]  ; [11]
+    mova        m1, [r3 +  3 * 16]  ; [22]
+    mova        m6, [r3 - 18 * 16]  ; [ 1]
+    mova        m7, [r3 -  7 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_17, 3,5,8
+    xor         r4, r4
+    cmp         r3m, byte 19
+    mov         r3, 16
+    jz          .next
+    xchg        r3, r4
+.next:
+    movu        m6, [r2 + r4 - 2]   ; [- - 4 3 2 1 0 x]
+    pinsrw      m6, [r2], 1
+    palignr     m2, m6, 2       ; [- - - 4 3 2 1 0]
+    palignr     m1, m6, 4       ; [- - - - 4 3 2 1]
+    mova        m4, m2
+    punpcklwd   m2, m1          ; [4 3 3 2 2 1 1 0]
+
+    pinsrw      m6, [r2 + r3 + 2], 0
+    punpcklwd   m3, m6, m4      ; [3 2 2 1 1 0 0 x]
+
+    pslldq      m4, m6, 2       ; [- 4 3 2 1 0 x y]
+    pinsrw      m4, [r2 + r3 + 4], 0
+    pslldq      m5, m4, 2       ; [4 3 2 1 0 x y z]
+    pinsrw      m5, [r2 + r3 + 8], 0
+    punpcklwd   m5, m4          ; [1 0 0 x x y y z]
+    punpcklwd   m4, m6          ; [2 1 1 0 0 x x y]
+
+    lea         r3, [ang_table + 14 * 16]
+    mova        m0, [r3 -  8 * 16]  ; [ 6]
+    mova        m1, [r3 -  2 * 16]  ; [12]
+    mova        m6, [r3 +  4 * 16]  ; [18]
+    mova        m7, [r3 + 10 * 16]  ; [24]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_18, 3,3,1
+    movh        m0, [r2 + 16]
+    pinsrw      m0, [r2], 0
+    pshufb      m0, [pw_swap]
+    movhps      m0, [r2 + 2]
+    add         r1, r1
+    lea         r2, [r1 * 3]
+    movh        [r0 + r2], m0
+    psrldq      m0, 2
+    movh        [r0 + r1 * 2], m0
+    psrldq      m0, 2
+    movh        [r0 + r1], m0
+    psrldq      m0, 2
+    movh        [r0], m0
+    RET
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng8(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang8_2, 3,5,3
+    lea         r4,            [r2]
+    add         r2,            32
+    cmp         r3m,           byte 34
+    cmove       r2,            r4
+    add         r1,            r1
+    lea         r3,            [r1 * 3]
+    movu        m0,            [r2 + 4]
+    movu        m1,            [r2 + 20]
+    movu        [r0],          m0
+    palignr     m2,            m1, m0, 2
+    movu        [r0 + r1],     m2
+    palignr     m2,            m1, m0, 4
+    movu        [r0 + r1 * 2], m2
+    palignr     m2,            m1, m0, 6
+    movu        [r0 + r3],     m2
+    lea         r0,            [r0 + r1 * 4]
+    palignr     m2,            m1, m0, 8
+    movu        [r0],          m2
+    palignr     m2,            m1, m0, 10
+    movu        [r0 + r1],     m2
+    palignr     m2,            m1, m0, 12
+    movu        [r0 + r1 * 2], m2
+    palignr     m1,            m0, 14
+    movu        [r0 + r3],     m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang8_3, 3,5,8
+    add         r2,        32
+    lea         r3,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+    punpckhwd   m1,        m4                         ; [x 16 16 15 15 14 14 13]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 12 * 16]             ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m2,        [r3 + 6 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 6 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m6,        [r3]                       ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 6 * 16]              ; [ 8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m3,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m3,        [r3 - 6 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m7
+    movhps      [r0 + r1],       m7
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m3
+    movhps      [r2 + r4],       m3
+
+    mova        m4,        m0
+    pmaddwd     m4,        [r3 - 12 * 16]             ; [ 2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m5
+    pmaddwd     m2,        [r3 - 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 14 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m5
+    pmaddwd     m6,        [r3 + 14 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 8 * 16]              ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m1, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m7,        [r3 + 8 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3 + 2 * 16]              ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, 8                      ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m1,        [r3 + 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m7
+    movhps      [r0 + r1 + 8],       m7
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_4, 3,6,8
+    add         r2,        32
+    lea         r3,        [ang_table + 19 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 2 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 9 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 12 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 12 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 + 1 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m7
+    movhps      [r0 + r1],       m7
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r5,              [r0 + r1 * 4]
+    movh        [r5],            m6
+    movhps      [r5 + r1],       m6
+    movh        [r5 + r1 * 2],   m1
+    movhps      [r5 + r4],       m1
+
+    palignr     m4,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3 - 10 * 16]             ; [ 9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m3,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m6,        m3
+    pmaddwd     m3,        [r3 - 10 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m4,        m3
+
+    pmaddwd     m2,        [r3 + 11 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m6,        [r3 + 11 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m0
+    pmaddwd     m6,        [r3]                       ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m5
+    pmaddwd     m7,        [r3]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    movh        m1,        [r2 + 26]                  ; [16 15 14 13]
+    palignr     m7,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m7,        [r3 - 11 * 16]             ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, 4                      ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m1,        [r3 - 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m7
+    movhps      [r0 + r1 + 8],       m7
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_5, 3,5,8
+    add         r2,        32
+    lea         r3,        [ang_table + 13 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 4 * 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 4 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 11 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 6 * 16]              ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 9 * 16]              ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m7
+    movhps      [r0 + r1],       m7
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m1
+    movhps      [r2 + r4],       m1
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m4,        [r3 + 8 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m2,        [r3 + 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 7 * 16]              ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 10 * 16]             ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 10 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 5 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m5
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m7
+    movhps      [r0 + r1 + 8],       m7
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_6, 3,5,8
+    add         r2,        32
+    lea         r3,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 1 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m7,        m6
+    pmaddwd     m6,        [r3 - 7 * 16]              ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m7,        [r3 + 6 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m7
+    movhps      [r0 + r1],       m7
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m1
+    movhps      [r2 + r4],       m1
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 13 * 16]             ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pmaddwd     m2,        m6, [r3]                   ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m1,        m7, [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 6 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m5,        m0, 12                     ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m5,        [r3 - 6 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m7,        m5
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m7
+    movhps      [r0 + r1 + 8],       m7
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_7, 3,5,8
+    add         r2,        32
+    lea         r3,        [ang_table + 18 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 9 * 16]              ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 9 * 16]              ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m7,        [r3 - 14 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m7
+    movhps      [r0 + r1],       m7
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m1
+    movhps      [r2 + r4],       m1
+
+    palignr     m4,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 5 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pmaddwd     m2,        m6, [r3 + 4 * 16]          ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m1,        m7, [r3 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m5,        m0, 8                      ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m5,        [r3 - 10 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m7,        m5
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m7
+    punpcklwd   m6,        m7
+
+    punpckldq   m7,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m7
+    movhps      [r0 + r1 + 8],       m7
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_8, 3,6,7
+    add         r2,        32
+    lea         r3,        [ang_table + 17 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 4]                   ; [9 8 7 6 5 4 3 2]
+
+    punpcklwd   m3,        m0, m1                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m1                         ; [9 8 8 7 7 6 6 5]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 12 * 16]             ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 7 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 2 * 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 3 * 16]              ; [20]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r5,              [r0 + r1 * 4]
+    movh        [r5],            m6
+    movhps      [r5 + r1],       m6
+    movh        [r5 + r1 * 2],   m1
+    movhps      [r5 + r4],       m1
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 8 * 16]              ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 13 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    movh        m1,        [r2 + 18]                  ; [12 11 10 9]
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m5,        m6
+    pmaddwd     m6,        [r3 - 14 * 16]             ; [3]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m0, 4                      ; [10 9 9 8 8 7 7 6]
+    mova        m3,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m5,        [r3 - 9 * 16]              ; [8]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    pmaddwd     m3,        [r3 - 9 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m5,        m3
+
+    punpckhwd   m3,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m3, m2
+    punpckhdq   m3,        m2
+
+    movh        [r0 + 8],            m5
+    movhps      [r0 + r1 + 8],       m5
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m3
+    movhps      [r0 + r4 + 8],       m3
+    RET
+
+cglobal intra_pred_ang8_9, 3,5,7
+    add         r2,        32
+    lea         r3,        [ang_table + 9 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 4]                   ; [9 8 7 6 5 4 3 2]
+
+    punpcklwd   m3,        m0, m1                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m1                         ; [9 8 8 7 7 6 6 5]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 7 * 16]              ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 7 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 5 * 16]              ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 3 * 16]              ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 - 1 * 16]              ; [8]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m1
+    movhps      [r2 + r4],       m1
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 1 * 16]              ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 3 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 5 * 16]              ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r3 + 7 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 7 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_10, 3,6,3
+    movu        m1,             [r2 + 34]    ; [8 7 6 5 4 3 2 1]
+    pshufb      m0,             m1, [pb_01]  ; [1 1 1 1 1 1 1 1]
+    add         r1,             r1
+    lea         r3,             [r1 * 3]
+
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [2 2 2 2 2 2 2 2]
+    movu        [r0 + r1],      m2
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [3 3 3 3 3 3 3 3]
+    movu        [r0 + r1 * 2],  m2
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [4 4 4 4 4 4 4 4]
+    movu        [r0 + r3],      m2
+
+    lea         r5,             [r0 + r1 *4]
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [5 5 5 5 5 5 5 5]
+    movu        [r5],           m2
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [6 6 6 6 6 6 6 6]
+    movu        [r5 + r1],      m2
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [7 7 7 7 7 7 7 7]
+    movu        [r5 + r1 * 2],  m2
+    psrldq      m1,             2
+    pshufb      m2,             m1, [pb_01]  ; [8 8 8 8 8 8 8 8]
+    movu        [r5 + r3],      m2
+
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    ; filter
+
+    movh        m1,             [r2]                ; [3 2 1 0]
+    pshufb      m2,             m1, [pb_01]  ; [0 0 0 0 0 0 0 0]
+    movu        m1,             [r2 + 2]            ; [8 7 6 5 4 3 2 1]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+.quit:
+    movu        [r0],           m0
+    RET
+
+cglobal intra_pred_ang8_11, 3,5,7
+    lea         r3,        [ang_table + 23 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                       ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 7 * 16]              ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 7 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 5 * 16]              ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 3 * 16]              ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 1 * 16]              ; [24]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m1
+    movhps      [r2 + r4],       m1
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 1 * 16]              ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 3 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 5 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r3 - 7 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 - 7 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_12, 3,6,7
+    lea         r5,        [ang_table + 16 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 11 * 16]             ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 + 6 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 + 1 * 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 - 4 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    punpckhwd   m1,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m1, m2
+    punpckhdq   m1,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r3,              [r0 + r1 * 4]
+    movh        [r3],            m6
+    movhps      [r3 + r1],       m6
+    movh        [r3 + r1 * 2],   m1
+    movhps      [r3 + r4],       m1
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 - 9 * 16]              ; [7]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 14 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_12]
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r5 + 8 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 + 8 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_13, 3,6,8
+    lea         r5,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 9 * 16]              ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5]                       ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 9 * 16]              ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_13]
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 + 14 * 16]             ; [28]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r5 + 14 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    punpckhwd   m7,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m7, m2
+    punpckhdq   m7,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m7
+    movhps      [r2 + r4],       m7
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 5 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 4 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 4 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 13 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r5 + 10 * 16]             ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 + 10 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_14, 3,6,8
+    lea         r5,        [ang_table + 18 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 1 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 12 * 16]             ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r5 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_14]
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 + 7 * 16]              ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 - 6 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r5 - 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    punpckhwd   m7,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m7, m2
+    punpckhdq   m7,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r2,              [r0 + r1 * 4]
+    movh        [r2],            m6
+    movhps      [r2 + r1],       m6
+    movh        [r2 + r1 * 2],   m7
+    movhps      [r2 + r4],       m7
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 13 * 16]             ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 13 * 16]             ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r5 + 6 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 + 6 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_15, 3,6,8
+    lea         r5,        [ang_table + 20 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 - 5 * 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 - 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_15]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 + 10 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 10 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 7 * 16]              ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 + 8 * 16]              ; [28]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r5 + 8 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    punpckhwd   m7,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m7, m2
+    punpckhdq   m7,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r3,              [r0 + r1 * 4]
+    movh        [r3],            m6
+    movhps      [r3 + r1],       m6
+    movh        [r3 + r1 * 2],   m7
+    movhps      [r3 + r4],       m7
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 - 9 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 + 6 * 16]              ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 6 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 11 * 16]             ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 11 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+    pinsrw      m3,        [r2 + 16], 0
+
+    pmaddwd     m3,        [r5 + 4 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 + 4 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_16, 3,6,8
+    lea         r5,        [ang_table + 13 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 - 2 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 - 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_16]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 + 9 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 9 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 12 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 12 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 - 1 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r5 - 1 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    punpckhwd   m7,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m7, m2
+    punpckhdq   m7,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r3,              [r0 + r1 * 4]
+    movh        [r3],            m6
+    movhps      [r3 + r1],       m6
+    movh        [r3 + r1 * 2],   m7
+    movhps      [r3 + r4],       m7
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 10 * 16]             ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 10 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 11 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 11 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5]                       ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+    pinsrw      m3,        [r2 + 16], 0
+
+    pmaddwd     m3,        [r5 + 11 * 16]             ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 + 11 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_17, 3,6,8
+    lea         r5,        [ang_table + 17 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pinsrw      m0,        [r2], 0
+    movu        m1,        [r2 + 34]                  ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 - 11 * 16]             ; [6]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 - 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2]
+    pshufb      m1,        [pw_ang8_17]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 5 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 + 1 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 + 1 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r5 + 7 * 16]              ; [24]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r5 + 7 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    punpckhwd   m7,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m5
+    punpcklwd   m6,        m5
+
+    punpckldq   m5,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m7, m2
+    punpckhdq   m7,        m2
+
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m5
+    movhps      [r0 + r1],       m5
+    movh        [r0 + r1 * 2],   m4
+    movhps      [r0 + r4],       m4
+    lea         r3,              [r0 + r1 * 4]
+    movh        [r3],            m6
+    movhps      [r3 + r1],       m6
+    movh        [r3 + r1 * 2],   m7
+    movhps      [r3 + r4],       m7
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r5 + 13 * 16]             ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r5 + 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r5 - 13 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r5 - 7 * 16]              ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r5 - 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r5 - 1 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r5 - 1 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    punpckhwd   m5,        m4, m2
+    punpcklwd   m4,        m2
+    punpckhwd   m2,        m6, m3
+    punpcklwd   m6,        m3
+
+    punpckldq   m3,        m4, m6
+    punpckhdq   m4,        m6
+    punpckldq   m6,        m5, m2
+    punpckhdq   m5,        m2
+
+    movh        [r0 + 8],            m3
+    movhps      [r0 + r1 + 8],       m3
+    movh        [r0 + r1 * 2 + 8],   m4
+    movhps      [r0 + r4 + 8],       m4
+    lea         r0,                  [r0 + r1 * 4]
+    movh        [r0 + 8],            m6
+    movhps      [r0 + r1 + 8],       m6
+    movh        [r0 + r1 * 2 + 8],   m5
+    movhps      [r0 + r4 + 8],       m5
+    RET
+
+cglobal intra_pred_ang8_18, 3,4,3
+    add         r1,              r1
+    lea         r3,              [r1 * 3]
+    movu        m1,              [r2]
+    movu        m0,              [r2 + 34]
+    pshufb      m0,              [pw_swap16]
+    movu        [r0],            m1
+    palignr     m2,              m1, m0, 14
+    movu        [r0 + r1],       m2
+    palignr     m2,              m1, m0, 12
+    movu        [r0 + r1 * 2],   m2
+    palignr     m2,              m1, m0, 10
+    movu        [r0 + r3],       m2
+    lea         r0,              [r0 + r1 * 4]
+    palignr     m2,              m1, m0, 8
+    movu        [r0],            m2
+    palignr     m2,              m1, m0, 6
+    movu        [r0 + r1],       m2
+    palignr     m2,              m1, m0, 4
+    movu        [r0 + r1 * 2],   m2
+    palignr     m1,              m0, 2
+    movu        [r0 + r3],       m1
+    RET
+
+cglobal intra_pred_ang8_19, 3,5,8
+    lea         r3,        [ang_table + 17 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 11 * 16]             ; [6]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_17]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 5 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 1 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 1 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 7 * 16]              ; [24]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 + 7 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 13 * 16]             ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 13 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 7 * 16]              ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r3 - 1 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 - 1 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_20, 3,5,8
+    lea         r3,        [ang_table + 13 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 2 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_16]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 9 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 9 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 12 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 12 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 - 1 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 1 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 10 * 16]             ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 10 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 11 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 11 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3]                       ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+    pinsrw      m3,        [r2 + 16 + 32], 0
+
+    pmaddwd     m3,        [r3 + 11 * 16]             ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 11 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_21, 3,5,8
+    lea         r3,        [ang_table + 20 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 5 * 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_15]
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 10 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 10 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 7 * 16]              ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 8 * 16]              ; [28]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 + 8 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 9 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 6 * 16]              ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 6 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 11 * 16]             ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 11 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+    pinsrw      m3,        [r2 + 16 + 32], 0
+
+    pmaddwd     m3,        [r3 + 4 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 4 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_22, 3,5,8
+    lea         r3,        [ang_table + 18 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 1 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 12 * 16]             ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_14]
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 7 * 16]              ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 7 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 - 6 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 13 * 16]             ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 13 * 16]             ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r3 + 6 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 6 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_23, 3,5,8
+    lea         r3,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 9 * 16]              ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3]                       ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 9 * 16]              ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_13]
+    palignr     m3,        m1, 12
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 14 * 16]             ; [28]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 + 14 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m5,        m7
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 5 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 4 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 4 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m2,        m5
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 13 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pslldq      m1,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m1, 12
+
+    pmaddwd     m3,        [r3 + 10 * 16]             ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 10 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_24, 3,5,7
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 11 * 16]             ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 6 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 1 * 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 - 4 * 16]              ; [12]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 9 * 16]              ; [7]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 14 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    movu        m1,        [r2 + 32]
+    pinsrw      m1,        [r2], 0
+    pshufb      m1,        [pw_ang8_12]
+    palignr     m3,        m1, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 13 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r3 + 8 * 16]              ; [24]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 8 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_25, 3,5,7
+    lea         r3,        [ang_table + 23 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 7 * 16]              ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 7 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 5 * 16]              ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 3 * 16]              ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 1 * 16]              ; [24]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 1 * 16]              ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 3 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 5 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 - 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r3 - 7 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 - 7 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_26, 3,6,3
+    movu        m0,             [r2 + 2]            ; [8 7 6 5 4 3 2 1]
+    add         r1,             r1
+    lea         r5,             [r1 * 3]
+
+    movu        [r0],           m0
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 * 2],  m0
+    movu        [r0 + r5],      m0
+
+    lea         r3,             [r0 + r1 *4]
+    movu        [r3],           m0
+    movu        [r3 + r1],      m0
+    movu        [r3 + r1 * 2],  m0
+    movu        [r3 + r5],      m0
+
+    cmp         r4m,            byte 0
+    jz         .quit
+
+    ; filter
+    pshufb      m0,             [pb_01]
+    pinsrw      m1,             [r2], 0             ; [3 2 1 0]
+    pshufb      m2,             m1, [pb_01]         ; [0 0 0 0 0 0 0 0]
+    movu        m1,             [r2 + 2 + 32]       ; [8 7 6 5 4 3 2 1]
+    psubw       m1,             m2
+    psraw       m1,             1
+    paddw       m0,             m1
+    pxor        m1,             m1
+    pmaxsw      m0,             m1
+    pminsw      m0,             [pw_pixel_max]
+    pextrw      [r0],          m0, 0
+    pextrw      [r0 + r1],     m0, 1
+    pextrw      [r0 + r1 * 2], m0, 2
+    pextrw      [r0 + r5],     m0, 3
+    pextrw      [r3],          m0, 4
+    pextrw      [r3 + r1],     m0, 5
+    pextrw      [r3 + r1 * 2], m0, 6
+    pextrw      [r3 + r5],     m0, 7
+.quit:
+    RET
+
+cglobal intra_pred_ang8_27, 3,5,7
+    lea         r3,        [ang_table + 9 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 4]                   ; [9 8 7 6 5 4 3 2]
+
+    punpcklwd   m3,        m0, m1                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m1                         ; [9 8 8 7 7 6 6 5]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 7 * 16]              ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 7 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 5 * 16]              ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 3 * 16]              ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 - 1 * 16]              ; [8]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 1 * 16]              ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 3 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 5 * 16]              ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m5,        m0
+    pmaddwd     m5,        [r3 + 5 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m6,        m5
+
+    pmaddwd     m3,        [r3 + 7 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 7 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m3
+    RET
+
+cglobal intra_pred_ang8_28, 3,5,7
+    lea         r3,        [ang_table + 17 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 4]                   ; [9 8 7 6 5 4 3 2]
+
+    punpcklwd   m3,        m0, m1                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m1                         ; [9 8 8 7 7 6 6 5]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 12 * 16]             ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 7 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 2 * 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m5,        m3
+    pmaddwd     m5,        [r3 + 3 * 16]              ; [20]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 8 * 16]              ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 13 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    movh        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m5,        m6
+    pmaddwd     m6,        [r3 - 14 * 16]             ; [3]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m0, 4                      ; [10 9 9 8 8 7 7 6]
+    mova        m3,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m5,        [r3 - 9 * 16]              ; [8]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    pmaddwd     m3,        [r3 - 9 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m5,        m3
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m5
+    RET
+
+cglobal intra_pred_ang8_29, 3,5,8
+    lea         r3,        [ang_table + 18 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 9 * 16]              ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 9 * 16]              ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m7,        [r3 - 14 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+
+    palignr     m4,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 5 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 5 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pmaddwd     m2,        m6, [r3 + 4 * 16]          ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m1,        m7, [r3 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m5,        m0, 8                      ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m5,        [r3 - 10 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m7,        m5
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+    RET
+
+cglobal intra_pred_ang8_30, 3,5,8
+    lea         r3,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 1 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m7,        m6
+    pmaddwd     m6,        [r3 - 7 * 16]              ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m7,        [r3 + 6 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 13 * 16]             ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    pmaddwd     m2,        m6, [r3]                   ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m1,        m7, [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 6 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m5,        m0, 12                     ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m5,        [r3 - 6 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m7,        m5
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+    RET
+
+cglobal intra_pred_ang8_31, 3,5,8
+    lea         r3,        [ang_table + 13 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 4 * 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 4 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 11 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 6 * 16]              ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 9 * 16]              ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m4,        [r3 + 8 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m2,        [r3 + 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 7 * 16]              ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 10 * 16]             ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 10 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 5 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m5
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+    RET
+
+cglobal intra_pred_ang8_32, 3,5,8
+    lea         r3,        [ang_table + 19 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 2 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 9 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 12 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 12 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 + 1 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+
+    palignr     m4,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3 - 10 * 16]             ; [ 9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m3,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m6,        m3
+    pmaddwd     m3,        [r3 - 10 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m4,        m3
+
+    pmaddwd     m2,        [r3 + 11 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m6,        [r3 + 11 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m0
+    pmaddwd     m6,        [r3]                       ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m5
+    pmaddwd     m7,        [r3]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    movh        m1,        [r2 + 26]                  ; [16 15 14 13]
+    palignr     m7,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m7,        [r3 - 11 * 16]             ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, 4                      ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m1,        [r3 - 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+    RET
+
+cglobal intra_pred_ang8_33, 3,5,8
+    lea         r3,        [ang_table + 14 * 16]
+    add         r1,        r1
+
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+    punpckhwd   m1,        m4                         ; [x 16 16 15 15 14 14 13]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 12 * 16]             ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m2,        [r3 + 6 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 6 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m6,        [r3]                       ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 6 * 16]              ; [ 8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m3,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m3,        [r3 - 6 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    lea         r4,              [r1 * 3]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+
+    mova        m4,        m0
+    pmaddwd     m4,        [r3 - 12 * 16]             ; [ 2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m5
+    pmaddwd     m2,        [r3 - 12 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 14 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m5
+    pmaddwd     m6,        [r3 + 14 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 8 * 16]              ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m1, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m7,        [r3 + 8 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3 + 2 * 16]              ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, 8                      ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m1,        [r3 + 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r0,              [r0 + r1 * 4]
+    movu        [r0],            m4
+    movu        [r0 + r1],       m2
+    movu        [r0 + r1 * 2],   m6
+    movu        [r0 + r4],       m7
+    RET
+
+%macro TRANSPOSE_STORE 6
+    jnz         .skip%6
+    punpckhwd   %5,        %1, %2
+    punpcklwd   %1,        %2
+    punpckhwd   %2,        %3, %4
+    punpcklwd   %3,        %4
+
+    punpckldq   %4,        %1, %3
+    punpckhdq   %1,        %3
+    punpckldq   %3,        %5, %2
+    punpckhdq   %5,        %2
+
+    movh        [r0 + %6],            %4
+    movhps      [r0 + r1 + %6],       %4
+    movh        [r0 + r1 * 2 + %6],   %1
+    movhps      [r0 + r4 + %6],       %1
+    lea         r5,                   [r0 + r1 * 4]
+    movh        [r5 + %6],            %3
+    movhps      [r5 + r1 + %6],       %3
+    movh        [r5 + r1 * 2 + %6],   %5
+    movhps      [r5 + r4 + %6],       %5
+    jmp         .end%6
+
+.skip%6:
+    movu        [r5],            %1
+    movu        [r5 + r1],       %2
+    movu        [r5 + r1 * 2],   %3
+    movu        [r5 + r4],       %4
+.end%6:
+%endmacro
+
+INIT_XMM sse4
+cglobal ang16_mode_3_33
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+    punpckhwd   m1,        m4                         ; [x 16 16 15 15 14 14 13]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 10 * 16]             ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 10 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m2,        [r3 + 4 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 4 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m6,        [r3 - 2 * 16]              ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3 - 2 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 8 * 16]              ; [ 8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m3,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m3,        [r3 - 8 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m3, 0
+
+    mova        m4,        m0
+    pmaddwd     m4,        [r3 - 14 * 16]             ; [ 2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m5
+    pmaddwd     m2,        [r3 - 14 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m5
+    pmaddwd     m6,        [r3 + 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m6,        [r3 + 6 * 16]              ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m1, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m7,        [r3 + 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3]                       ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, 8                      ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m3, 8
+
+    movu        m1,        [r2 + 26]                  ; [20 19 18 17 16 15 14 13]
+    psrldq      m4,        m1, 2                      ; [x 20 19 18 17 16 15 14]
+
+    punpcklwd   m3,        m1, m4                     ; [17 16 16 15 15 14 14 13]
+    punpckhwd   m1,        m4                         ; [x 20 20 19 19 18 18 17]
+
+    palignr     m4,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m4,        [r3 - 6 * 16]              ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m3, m5, 12                 ; [15 16 15 14 14 13 13 12]
+    pmaddwd     m2,        [r3 - 6 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m5
+    pmaddwd     m2,        [r3 - 12 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m5
+    pmaddwd     m6,        [r3 + 14 * 16]             ; [30]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 + 14 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m3, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m7,        [r3 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m0,        m1, m3, 4                  ; [18 17 17 16 16 15 15 14]
+    pmaddwd     m0,        [r3 + 8 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m7,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m0, 16
+
+    palignr     m4,        m3, m5, 8                  ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m4,        [r3 + 2 * 16]              ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m1, m3, 8                  ; [19 18 18 17 17 16 16 15]
+    pmaddwd     m2,        [r3 + 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m3, m5, 12                 ; [16 15 15 14 14 13 13 12]
+    pmaddwd     m2,        [r3 - 4 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m1, m3, 12                 ; [20 19 19 18 18 17 17 16]
+    pmaddwd     m6,        [r3 - 4 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    pinsrw      m1,        [r2 + 42], 7
+    pmaddwd     m3,        [r3 - 10 * 16]             ; [6]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m1,        [r3 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m3,        m1
+
+    movu        m7,        [r2 + 28]
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m3, m7, m0, 24
+
+    ret
+
+cglobal ang16_mode_4_32
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 3 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 3 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 8 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 + 2 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 + 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    palignr     m4,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3 - 9 * 16]              ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m7,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m6,        m7
+    pmaddwd     m7,        [r3 - 9 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m4,        m7
+
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m6,        [r3 + 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m0
+    pmaddwd     m6,        [r3 + 1 * 16]              ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m5
+    pmaddwd     m7,        [r3 + 1 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    movu        m1,        [r2 + 26]                  ; [20 19 18 17 16 15 14 13]
+
+    palignr     m7,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m3,        m1, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m3,        [r3 - 10 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m3, 8
+
+    psrldq      m4,        m1, 2                      ; [x 20 19 18 17 16 15 14]
+
+    punpcklwd   m3,        m1, m4                     ; [17 16 16 15 15 14 14 13]
+    punpckhwd   m1,        m4                         ; [x 20 20 19 19 18 18 17]
+
+    palignr     m4,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m4,        [r3 + 11 * 16]             ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m3, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m2,        [r3 + 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m2,        [r3]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m3, m5, 8                  ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m6,        [r3]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    palignr     m6,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m7,        m6
+    pmaddwd     m6,        [r3 - 11 * 16]             ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m0,        m3, m5, 12                 ; [15 16 15 14 14 13 13 12]
+    pmaddwd     m0,        [r3 - 11 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m7,        [r3 + 10 * 16]             ; [28]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m0,        m3, m5, 12                 ; [15 16 15 14 14 13 13 12]
+    pmaddwd     m0,        [r3 + 10 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m7,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m0, 16
+
+    mova        m4,        m5
+    pmaddwd     m4,        [r3 - 1 * 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m3, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 12 * 16]             ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m1, m3, 4                  ; [18 17 17 16 16 15 15 14]
+    mova        m0,        m6
+    pmaddwd     m6,        [r3 - 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    pmaddwd     m7,        [r3 + 9 * 16]              ; [27]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    pmaddwd     m0,        [r3 + 9 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m7,        m0
+
+    palignr     m0,        m3, m5, 8                  ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m0,        [r3 - 2 * 16]              ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    palignr     m1,        m3, 8                      ; [19 18 18 17 17 16 16 15]
+    pmaddwd     m1,        [r3 - 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m0,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m0, m3, 24
+
+    ret
+
+cglobal ang16_mode_5_31
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 1 * 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 14 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 3 * 16]              ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 3 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 12 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m4,        [r3 + 5 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3 + 5 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m4,        m7
+
+    palignr     m2,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 10 * 16]             ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 7 * 16]              ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 7 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 8 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m3,        m5
+    pmaddwd     m3,        [r3 - 8 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m3, 8
+
+    movu        m1,        [r2 + 26]                  ; [20 19 18 17 16 15 14 13]
+    psrldq      m4,        m1, 2                      ; [x 20 19 18 17 16 15 14]
+
+    punpcklwd   m3,        m1, m4                     ; [17 16 16 15 15 14 14 13]
+
+    mova        m4,        m0
+    pmaddwd     m4,        [r3 + 9 * 16]              ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m5
+    pmaddwd     m2,        [r3 + 9 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m6,        m2
+    pmaddwd     m2,        [r3 - 6 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m7,        m3, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    mova        m1,        m7
+    pmaddwd     m7,        [r3 - 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m2,        m7
+
+    pmaddwd     m6,        [r3 + 11 * 16]             ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m1,        [r3 + 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m7,        [r3 - 4 * 16]              ; [12]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m3, m5, 8                  ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m1,        [r3 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    palignr     m4,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m4,        [r3 + 13 * 16]             ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m3, m5, 8                  ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m2,        [r3 + 13 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m2,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m7,        m2
+    pmaddwd     m2,        [r3 - 2 * 16]              ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    palignr     m6,        m3, m5, 12                 ; [15 16 15 14 14 13 13 12]
+    mova        m0,        m6
+    pmaddwd     m6,        [r3 - 2 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    pmaddwd     m7,        [r3 + 15 * 16]             ; [31]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    pmaddwd     m0,        [r3 + 15 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m7,        m0
+
+    pmaddwd     m5,        [r3]                       ; [16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    pmaddwd     m3,        [r3]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m5,        m3
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m5, m3, 24
+
+    ret
+
+cglobal ang16_mode_6_30
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 2 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 11 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m7,        m6
+    pmaddwd     m6,        [r3 - 8 * 16]              ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m7,        [r3 + 5 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 14 * 16]             ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m6
+    pmaddwd     m2,        [r3 - 1 * 16]              ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m7
+    pmaddwd     m1,        [r3 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 12 * 16]             ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 12 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 7 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    palignr     m4,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m4,        [r3 + 6 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m2,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m2,        [r3 + 6 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 13 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m7,        m5
+    pmaddwd     m7,        [r3 - 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m2,        m7
+
+    mova        m6,        m0
+    pmaddwd     m6,        [r3]                       ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m5
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 + 13 * 16]             ; [28]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m5
+    pmaddwd     m1,        [r3 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    movh        m3,        [r2 + 26]                  ; [16 15 14 13]
+
+    palignr     m4,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3 - 6 * 16]              ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m1,        m3, m5, 4                  ; [14 13 13 12 12 11 11 10]
+    mova        m6,        m1
+    pmaddwd     m1,        [r3 - 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m2,        [r3 + 7 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m6
+    pmaddwd     m1,        [r3 + 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    psrldq      m3,        2
+    palignr     m7,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    mova        m5,        m7
+    pmaddwd     m7,        [r3 - 12 * 16]             ; [3]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m3,        m6, 4                      ; [15 14 14 13 13 12 12 11]
+    mova        m1,        m3
+    pmaddwd     m3,        [r3 - 12 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    pmaddwd     m5,        [r3 + 1 * 16]              ; [16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m5, m3, 24
+
+    ret
+
+cglobal ang16_mode_7_29
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 8 * 16]              ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 1 * 16]              ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 10 * 16]             ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m7,        [r3 - 13 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    palignr     m4,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m6,        m4
+    pmaddwd     m4,        [r3 - 4 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m6
+    pmaddwd     m2,        [r3 + 5 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m7
+    pmaddwd     m1,        [r3 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m6,        [r3 + 14 * 16]             ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m7,        [r3 + 14 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    pmaddwd     m7,        [r3 - 9 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    pmaddwd     m1,        [r3 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    palignr     m4,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3]                       ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m1,        m5, m0, 8                  ; [11 10 10 9 9 8 8 7]
+    mova        m7,        m1
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m2,        [r3 + 9 * 16]              ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m7,        [r3 + 9 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m2,        m7
+
+    palignr     m6,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m6,        [r3 - 14 * 16]             ; [3]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    pmaddwd     m7,        [r3 - 5 * 16]             ; [12]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    palignr     m4,        m0, m3, 12                 ; [8 7 7 6 6 5 5 4]
+    mova        m2,        m4
+    pmaddwd     m4,        [r3 + 4 * 16]              ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m1,        m5, m0, 12                 ; [12 11 11 10 10 9 9 8]
+    mova        m3,        m1
+    pmaddwd     m1,        [r3 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m2,        [r3 + 13 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    pmaddwd     m3,        [r3 + 13 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m2,        m3
+
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [7]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m3,        m5
+    pmaddwd     m3,        [r3 - 10 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    pmaddwd     m0,        [r3 - 1 * 16]              ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    pmaddwd     m5,        [r3 - 1 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m0,        m5
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m0, m3, 24
+
+    ret
+
+cglobal ang16_mode_8_28
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m2,        m1, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    psrldq      m4,        m1, 2                      ; [x 16 15 14 13 12 11 10]
+
+    punpcklwd   m3,        m0, m2                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m2                         ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m5,        m1, m4                     ; [13 12 12 11 11 10 10 9]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 10 * 16]             ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 10 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 5 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3]                       ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 + 5 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 10 * 16]             ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 15 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 15 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m6,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m6,        [r3 - 12 * 16]             ; [3]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    palignr     m7,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m7,        [r3 - 12 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    palignr     m7,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m7,        [r3 - 7 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    palignr     m4,        m0, m3, 4                  ; [6 5 5 4 4 3 3 2]
+    mova        m7,        m4
+    pmaddwd     m4,        [r3 - 2 *16]               ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m6,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    mova        m1,        m6
+    pmaddwd     m6,        [r3 - 2 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    mova        m2,        m7
+    pmaddwd     m2,        [r3 + 3 * 16]              ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m1
+    pmaddwd     m6,        [r3 + 3 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m7
+    pmaddwd     m6,        [r3 + 8 * 16]              ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    pmaddwd     m1,        [r3 + 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m7,        [r3 + 13 * 16]             ; [28]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    palignr     m1,        m5, m0, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        [r3 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    palignr     m1,        m0, m3, 8                  ; [7 6 6 5 5 4 4 3]
+    mova        m4,        m1
+    pmaddwd     m4,        [r3 - 14 * 16]             ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    palignr     m5,        m0, 8                      ; [11 10 10 9 9 8 8 7]
+    mova        m0,        m5
+    pmaddwd     m0,        [r3 - 14 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m4,        m0
+
+    mova        m2,        m1
+    pmaddwd     m2,        [r3 - 9 * 16]              ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m3,        m5
+    pmaddwd     m3,        [r3 - 9 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m2,        m3
+
+    mova        m7,        m1
+    pmaddwd     m7,        [r3 - 4 * 16]              ; [11]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m3,        m5
+    pmaddwd     m3,        [r3 - 4 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    packusdw    m7,        m3
+
+    pmaddwd     m1,        [r3 + 1 * 16]              ; [16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    pmaddwd     m5,        [r3 + 1 * 16]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m1,        m5
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m1, m3, 24
+
+    ret
+
+cglobal ang16_mode_9_27
+    test        r6d,       r6d
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 4]                   ; [9 8 7 6 5 4 3 2]
+
+    punpcklwd   m3,        m0, m1                     ; [5 4 4 3 3 2 2 1]
+    punpckhwd   m0,        m1                         ; [9 8 8 7 7 6 6 5]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 14 * 16]             ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 - 14 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 12 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 10 *16]             ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 - 8 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 6 * 16]              ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 4 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 2 * 16]              ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 - 2 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3]                       ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 2 *16]               ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r3 + 2 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 4 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r3 + 4 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 6 * 16]              ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 10 * 16]             ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pmaddwd     m3,        [r3 + 14 * 16]             ; [30]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r3 + 14 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    movu        m7,        [r2 + 4]
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m3, m7, m1, 24
+
+    ret
+
+cglobal ang16_mode_11_25
+    test        r6d,       r6d
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 14 * 16]             ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r3 + 14 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 10 *16]             ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 + 6 * 16]              ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 + 4 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 + 2 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r3 + 2 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3]                       ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 2 *16]               ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r3 - 2 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 4 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r3 - 4 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r3 - 6 * 16]              ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 - 8 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r3 - 10 * 16]             ; [6]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r3 - 12 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r3 - 14 * 16]             ; [2]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r3 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    movu        m3,        [r2]
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_12_24
+    test        r3d,       r3d
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 11 * 16]             ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6 + 11 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 6 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 1 *16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 4 * 16]              ; [12]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 9 * 16]              ; [7]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 14 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6 + 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 3 *16]               ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 3 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 2 * 16]              ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 - 2 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 7 * 16]              ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 12 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 15 * 16]             ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 15 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 10 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 5 * 16]              ; [21]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pmaddwd     m3,        [r6]                       ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_13_23
+    test        r3d,       r3d
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 8 * 16]              ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6 + 8 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 1 * 16]              ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 10 *16]             ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 13 * 16]             ; [28]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 4 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 5 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 14 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6 - 14 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 9 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6]                       ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 9 * 16]              ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 - 9 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 14 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 5 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 4 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 13 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 10 * 16]             ; [25]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pmaddwd     m3,        [r6 + 1 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6 + 1 *16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_14_22
+    test        r3d,       r3d
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 1 * 16]              ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6 + 1 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 12 * 16]             ; [6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 7 * 16]              ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 7 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 6 * 16]              ; [12]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 13 * 16]             ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6]                       ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 13 * 16]             ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6 - 13 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 6 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 6 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 7 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 - 7 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 12 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 1 * 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 14 * 16]             ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 5 * 16]              ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 8 * 16]              ; [10]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 11 * 16]             ; [29]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pmaddwd     m3,        [r6 - 2 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6 - 2 *16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_15_21
+    test        r3d,       r3d
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    palignr     m6,        m0, m5, 2
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6]                       ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m6, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 15 * 16]             ; [30]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 15 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 2 * 16]              ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 13 * 16]             ; [28]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 4 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 11 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 6 * 16]              ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6 - 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 9 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 8 * 16]              ; [7]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 - 8 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 7 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 7 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 10 * 16]             ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 5 * 16]              ; [20]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 5 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 12 * 16]             ; [3]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 3 * 16]              ; [18]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 3 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 14 * 16]             ; [1]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    pmaddwd     m3,        [r6 + 1 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6 + 1 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_16_20
+    test        r4d,       r4d
+    lea         r4,        [r1 * 3]
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    palignr     m6,        m0, m5, 2
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 2 * 16]              ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6 - 2 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m6, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 9 * 16]              ; [22]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 12 * 16]             ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 1 * 16]              ; [12]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 1 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 10 * 16]             ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 11 * 16]             ; [2]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6]                       ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 11 * 16]             ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 11 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 10 * 16]             ; [3]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 - 10 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 1 * 16]              ; [14]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 1 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 12 * 16]             ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 9 * 16]              ; [4]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 9 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 2 * 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m5,        [r3]
+    pshufb      m5,        [pw_ang8_16]
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 13 * 16]             ; [26]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 13 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 8 * 16]              ; [5]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    pmaddwd     m3,        [r6 + 3 * 16]              ; [16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6 + 3 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+cglobal ang16_mode_17_19
+    test        r4d,       r4d
+    lea         r4,        [r1 * 3]
+    movu        m0,        [r2]                       ; [7 6 5 4 3 2 1 0]
+    movu        m1,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+
+    palignr     m6,        m0, m5, 2
+
+    punpcklwd   m3,        m0, m1                     ; [4 3 3 2 2 1 1 0]
+    punpckhwd   m0,        m1                         ; [8 7 7 6 6 5 5 4]
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 10 * 16]             ; [6]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m2,        m0
+    pmaddwd     m2,        [r6 - 10 * 16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m4,        m2
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m6, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 4 * 16]              ; [12]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 + 2 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    mov         r5,        r0
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 0
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 14 * 16]             ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 - 12 * 16]             ; [4]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 12 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 6 * 16]              ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m7,        m0
+    pmaddwd     m7,        [r6 - 6 * 16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6]                      ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r0 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 8
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 + 6 * 16]              ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 6 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m4,        m6
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m6,        m0
+    pmaddwd     m6,        [r6 + 12 * 16]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m2,        m6
+
+    mova        m6,        m3
+    pmaddwd     m6,        [r6 - 14 * 16]             ; [2]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 14 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m5,        [r3]
+    pshufb      m5,        [pw_ang8_17]
+
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 - 8 * 16]              ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 8 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m6, m7, m1, 16
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m4,        m3
+    pmaddwd     m4,        [r6 - 2 * 16]              ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 - 2 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m2,        m3
+    pmaddwd     m2,        [r6 + 4 * 16]              ; [20]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 4 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m2,        m1
+
+    pslldq      m5,        2
+    palignr     m0,        m3, 12
+    palignr     m3,        m5, 12
+
+    mova        m7,        m3
+    pmaddwd     m7,        [r6 + 10 * 16]             ; [26]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    mova        m1,        m0
+    pmaddwd     m1,        [r6 + 10 * 16]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m7,        m1
+
+    pmaddwd     m3,        [r6 - 16 * 16]
+    paddd       m3,        [pd_16]
+    psrld       m3,        5
+    pmaddwd     m0,        [r6 - 16 * 16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m3,        m0
+
+    lea         r5,        [r5 + r1 * 4]
+
+    TRANSPOSE_STORE m4, m2, m7, m3, m1, 24
+
+    ret
+
+;------------------------------------------------------------------------------------------
+; void intraPredAng16(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang16_2, 3,5,5
+    lea         r4,                 [r2]
+    add         r2,                 64
+    cmp         r3m,                byte 34
+    cmove       r2,                 r4
+    add         r1,                 r1
+    lea         r3,                 [r1 * 3]
+    movu        m0,                 [r2 + 4]
+    movu        m1,                 [r2 + 20]
+    movu        m2,                 [r2 + 36]
+
+    movu        [r0],               m0
+    movu        [r0 + 16],          m1
+    palignr     m3,                 m1, m0, 2
+    palignr     m4,                 m2, m1, 2
+    movu        [r0 + r1],          m3
+    movu        [r0 + r1 + 16],     m4
+    palignr     m3,                 m1, m0, 4
+    palignr     m4,                 m2, m1, 4
+    movu        [r0 + r1 * 2],      m3
+    movu        [r0 + r1 * 2 + 16], m4
+    palignr     m3,                 m1, m0, 6
+    palignr     m4,                 m2, m1, 6
+    movu        [r0 + r3],          m3
+    movu        [r0 + r3 + 16],     m4
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m3,                 m1, m0, 8
+    palignr     m4,                 m2, m1, 8
+    movu        [r0],               m3
+    movu        [r0 + 16],          m4
+    palignr     m3,                 m1, m0, 10
+    palignr     m4,                 m2, m1, 10
+    movu        [r0 + r1],          m3
+    movu        [r0 + r1 + 16],     m4
+    palignr     m3,                 m1, m0, 12
+    palignr     m4,                 m2, m1, 12
+    movu        [r0 + r1 * 2],      m3
+    movu        [r0 + r1 * 2 + 16], m4
+    palignr     m3,                 m1, m0, 14
+    palignr     m4,                 m2, m1, 14
+    movu        [r0 + r3],          m3
+    movu        [r0 + r3 + 16],     m4
+
+    movu        m0,                 [r2 + 52]
+    lea         r0,                 [r0 + r1 * 4]
+    movu        [r0],               m1
+    movu        [r0 + 16],          m2
+    palignr     m3,                 m2, m1, 2
+    palignr     m4,                 m0, m2, 2
+    movu        [r0 + r1],          m3
+    movu        [r0 + r1 + 16],     m4
+    palignr     m3,                 m2, m1, 4
+    palignr     m4,                 m0, m2, 4
+    movu        [r0 + r1 * 2],      m3
+    movu        [r0 + r1 * 2 + 16], m4
+    palignr     m3,                 m2, m1, 6
+    palignr     m4,                 m0, m2, 6
+    movu        [r0 + r3],          m3
+    movu        [r0 + r3 + 16],     m4
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m3,                 m2, m1, 8
+    palignr     m4,                 m0, m2, 8
+    movu        [r0],               m3
+    movu        [r0 + 16],          m4
+    palignr     m3,                 m2, m1, 10
+    palignr     m4,                 m0, m2, 10
+    movu        [r0 + r1],          m3
+    movu        [r0 + r1 + 16],     m4
+    palignr     m3,                 m2, m1, 12
+    palignr     m4,                 m0, m2, 12
+    movu        [r0 + r1 * 2],      m3
+    movu        [r0 + r1 * 2 + 16], m4
+    palignr     m3,                 m2, m1, 14
+    palignr     m4,                 m0, m2, 14
+    movu        [r0 + r3],          m3
+    movu        [r0 + r3 + 16],     m4
+    RET
+
+INIT_XMM sse4    
+cglobal intra_pred_ang16_3, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_3_33
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_3_33
+    RET
+
+cglobal intra_pred_ang16_33, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_3_33
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_3_33
+    RET
+
+cglobal intra_pred_ang16_4, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 18 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_4_32
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_4_32
+    RET
+
+cglobal intra_pred_ang16_32, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 18 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_4_32
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_4_32
+    RET
+
+cglobal intra_pred_ang16_5, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_5_31
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_5_31
+    RET
+
+cglobal intra_pred_ang16_31, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_5_31
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_5_31
+    RET
+
+cglobal intra_pred_ang16_6, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 15 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_6_30
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_6_30
+    RET
+
+cglobal intra_pred_ang16_30, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 15 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_6_30
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_6_30
+    RET
+
+cglobal intra_pred_ang16_7, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 17 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_7_29
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_7_29
+    RET
+
+cglobal intra_pred_ang16_29, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 17 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_7_29
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_7_29
+    RET
+
+cglobal intra_pred_ang16_8, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 15 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_8_28
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_8_28
+    RET
+
+cglobal intra_pred_ang16_28, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 15 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_8_28
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_8_28
+    RET
+
+cglobal intra_pred_ang16_9, 3,7,8
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_9_27
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang16_27, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_9_27
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang16_11, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_11_25
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + r1 * 8]
+
+    call        ang16_mode_11_25
+
+    mov         r6d,       [rsp]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_25, 3,7,8
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table + 16 * 16]
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_11_25
+
+    lea         r2,        [r2 + 16]
+    lea         r0,        [r0 + 16]
+
+    call        ang16_mode_11_25
+    RET
+
+cglobal intra_pred_ang16_12, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 16 * 16]
+    movu        m5,        [r2]
+    pshufb      m5,        [pw_ang8_12]
+    pinsrw      m5,        [r2 + 26], 5
+    xor         r3d,       r3d
+    add         r2,        64
+
+    call        ang16_mode_12_24
+
+    lea         r0,        [r0 + r1 * 8]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_12_24
+
+    mov         r6d,       [rsp]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_24, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 16 * 16]
+    movu        m5,        [r2 + 64]
+    pshufb      m5,        [pw_ang8_12]
+    pinsrw      m5,        [r2 + 26 + 64], 5
+    xor         r3d,       r3d
+    inc         r3d
+
+    call        ang16_mode_12_24
+
+    lea         r0,        [r0 + 16]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_12_24
+
+    mov         r6d,       [rsp]
+    mov         [r2 + 48], r6w
+    RET
+
+cglobal intra_pred_ang16_13, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 15 * 16]
+    movu        m5,        [r2]
+    pshufb      m5,        [pw_ang16_13]
+    movu        m6,        [r2 + 14]
+    pshufb      m6,        [pw_ang8_13]
+    pslldq      m6,        2
+    palignr     m5,        m6, 6
+    xor         r3d,       r3d
+    add         r2,        64
+
+    call        ang16_mode_13_23
+
+    lea         r0,        [r0 + r1 * 8]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_13_23
+
+    mov         r6d,       [rsp]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_23, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 15 * 16]
+    movu        m5,        [r2 + 64]
+    pshufb      m5,        [pw_ang16_13]
+    movu        m6,        [r2 + 14 + 64]
+    pshufb      m6,        [pw_ang8_13]
+    pslldq      m6,        2
+    palignr     m5,        m6, 6
+    xor         r3d,       r3d
+    inc         r3d
+
+    call        ang16_mode_13_23
+
+    lea         r0,        [r0 + 16]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_13_23
+
+    mov         r6d,       [rsp]
+    mov         [r2 + 48], r6w
+    RET
+
+cglobal intra_pred_ang16_14, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 18 * 16]
+    movu        m6,        [r2]
+    pshufb      m6,        [pw_ang8_14]
+    movu        m5,        [r2 + 20]
+    pshufb      m5,        [pw_ang8_14]
+    punpckhqdq  m5,        m6
+    xor         r3d,       r3d
+    add         r2,        64
+
+    call        ang16_mode_14_22
+
+    lea         r0,        [r0 + r1 * 8]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_14_22
+
+    mov         r6d,       [rsp]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_22, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 18 * 16]
+    movu        m6,        [r2 + 64]
+    pshufb      m6,        [pw_ang8_14]
+    movu        m5,        [r2 + 20 + 64]
+    pshufb      m5,        [pw_ang8_14]
+    punpckhqdq  m5,        m6
+    xor         r3d,       r3d
+    inc         r3d
+
+    call        ang16_mode_14_22
+
+    lea         r0,        [r0 + 16]
+    movu        m5,        [r2 + 2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_14_22
+
+    mov         r6d,       [rsp]
+    mov         [r2 + 48], r6w
+    RET
+
+cglobal intra_pred_ang16_15, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 15 * 16]
+    movu        m6,        [r2 + 4]
+    pshufb      m6,        [pw_ang8_15]
+    movu        m5,        [r2 + 18]
+    pshufb      m5,        [pw_ang8_15]
+    punpckhqdq  m5,        m6
+    xor         r3d,       r3d
+    add         r2,        64
+
+    call        ang16_mode_15_21
+
+    lea         r0,        [r0 + r1 * 8]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_15_21
+
+    mov         r6d,       [rsp]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_21, 3,7,8, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r4,        [r1 * 3]
+    lea         r6,        [ang_table + 15 * 16]
+    movu        m6,        [r2 + 4 + 64]
+    pshufb      m6,        [pw_ang8_15]
+    movu        m5,        [r2 + 18 + 64]
+    pshufb      m5,        [pw_ang8_15]
+    punpckhqdq  m5,        m6
+    xor         r3d,       r3d
+    inc         r3d
+
+    call        ang16_mode_15_21
+
+    lea         r0,        [r0 + 16]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+
+    call        ang16_mode_15_21
+
+    mov         r6d,       [rsp]
+    mov         [r2 + 48], r6w
+    RET
+
+cglobal intra_pred_ang16_16, 3,7,8,0-(1*mmsize+4)
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp + 16], r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r6,        [ang_table + 13 * 16]
+    movu        m6,        [r2 + 4]
+    pshufb      m6,        [pw_ang16_16]
+    movu        m5,        [r2 + 16]
+    pshufb      m5,        [pw_ang16_16]
+    punpckhqdq  m5,        m6
+    mov         [rsp],     r2
+    lea         r3,        [r2 + 24]
+    add         r2,        64
+    xor         r4,        r4
+
+    call        ang16_mode_16_20
+
+    lea         r0,        [r0 + r1 * 8]
+    mov         r3,        [rsp]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+    xor         r4,        r4
+
+    call        ang16_mode_16_20
+
+    mov         r6d,       [rsp + 16]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_20, 3,7,8,0-(1*mmsize+4)
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp + 16], r5w
+    mov         [r2 + 64], r6w
+
+    lea         r3,        [r2 + 64]
+    add         r1,        r1
+    lea         r6,        [ang_table + 13 * 16]
+    movu        m6,        [r3 + 4]
+    pshufb      m6,        [pw_ang16_16]
+    movu        m5,        [r3 + 16]
+    pshufb      m5,        [pw_ang16_16]
+    punpckhqdq  m5,        m6
+    mov         [rsp],     r3
+    lea         r3,        [r3 + 24]
+    xor         r4,        r4
+    inc         r4
+
+    call        ang16_mode_16_20
+
+    lea         r0,        [r0 + 16]
+    mov         r3,        [rsp]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+    xor         r4,        r4
+    inc         r4
+
+    call        ang16_mode_16_20
+    mov         r6d,       [rsp + 16]
+    mov         [r3],      r6w
+    RET
+
+cglobal intra_pred_ang16_17, 3,7,8,0-(1*mmsize+4)
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp + 16], r5w
+    mov         [r2 + 64], r6w
+
+    add         r1,        r1
+    lea         r6,        [ang_table + 16 * 16]
+    movu        m6,        [r2 + 2]
+    pshufb      m6,        [pw_ang16_16]
+    movu        m5,        [r2 + 12]
+    pshufb      m5,        [pw_ang16_16]
+    punpckhqdq  m5,        m6
+    mov         [rsp],     r2
+    lea         r3,        [r2 + 20]
+    add         r2,        64
+    xor         r4,        r4
+
+    call        ang16_mode_17_19
+
+    lea         r0,        [r0 + r1 * 8]
+    mov         r3,        [rsp]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+    xor         r4,        r4
+
+    call        ang16_mode_17_19
+
+    mov         r6d,       [rsp + 16]
+    mov         [r2 - 16], r6w
+    RET
+
+cglobal intra_pred_ang16_19, 3,7,8,0-(1*mmsize+4)
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp + 16], r5w
+    mov         [r2 + 64], r6w
+
+    lea         r3,        [r2 + 64]
+    add         r1,        r1
+    lea         r6,        [ang_table + 16 * 16]
+    movu        m6,        [r3 + 2]
+    pshufb      m6,        [pw_ang16_16]
+    movu        m5,        [r3 + 12]
+    pshufb      m5,        [pw_ang16_16]
+    punpckhqdq  m5,        m6
+    mov         [rsp],     r3
+    lea         r3,        [r3 + 20]
+    xor         r4,        r4
+    inc         r4
+
+    call        ang16_mode_17_19
+
+    lea         r0,        [r0 + 16]
+    mov         r3,        [rsp]
+    movu        m5,        [r2]
+    lea         r2,        [r2 + 16]
+    xor         r4,        r4
+    inc         r4
+
+    call        ang16_mode_17_19
+
+    mov         r6d,       [rsp + 16]
+    mov         [r3],      r6w
+    RET
+
+cglobal intra_pred_ang16_18, 3,5,4
+    add         r1,                  r1
+    lea         r4,                  [r1 * 3]
+    movu        m1,                  [r2]
+    movu        m3,                  [r2 + 16]
+    movu        m0,                  [r2 + 2 + 64]
+    pshufb      m0,                  [pw_swap16]
+    movu        [r0],                m1
+    movu        [r0 + 16],           m3
+    palignr     m2,                  m1, m0, 14
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m3, m1, 14
+    movu        [r0 + r1 + 16],      m2
+    palignr     m2,                  m1, m0, 12
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m3, m1, 12
+    movu        [r0 + r1 * 2 + 16],  m2
+    palignr     m2,                  m1, m0, 10
+    movu        [r0 + r4],           m2
+    palignr     m2,                  m3, m1, 10
+    movu        [r0 + r4 + 16],      m2
+
+    lea         r0,                  [r0 + r1 * 4]
+    palignr     m2,                  m1, m0, 8
+    movu        [r0],                m2
+    palignr     m2,                  m3, m1, 8
+    movu        [r0 + 16],           m2
+    palignr     m2,                  m1, m0, 6
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m3, m1, 6
+    movu        [r0 + r1 + 16],      m2
+    palignr     m2,                  m1, m0, 4
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m3, m1, 4
+    movu        [r0 + r1 * 2 + 16],  m2
+    palignr     m2,                  m1, m0, 2
+    movu        [r0 + r4],           m2
+    palignr     m3,                  m1, 2
+    movu        [r0 + r4 + 16],      m3
+
+    lea         r0,                  [r0 + r1 * 4]
+    movu        [r0],                m0
+    movu        [r0 + 16],           m1
+    movu        m3,                  [r2 + 18 + 64]
+    pshufb      m3,                  [pw_swap16]
+    palignr     m2,                  m0, m3, 14
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m1, m0, 14
+    movu        [r0 + r1 + 16],      m2
+    palignr     m2,                  m0, m3, 12
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m1, m0, 12
+    movu        [r0 + r1 * 2 + 16],  m2
+    palignr     m2,                  m0, m3, 10
+    movu        [r0 + r4],           m2
+    palignr     m2,                  m1, m0, 10
+    movu        [r0 + r4 + 16],      m2
+
+    lea         r0,                  [r0 + r1 * 4]
+    palignr     m2,                  m0, m3, 8
+    movu        [r0],                m2
+    palignr     m2,                  m1, m0, 8
+    movu        [r0 + 16],           m2
+    palignr     m2,                  m0, m3, 6
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m1, m0, 6
+    movu        [r0 + r1 + 16],      m2
+    palignr     m2,                  m0, m3, 4
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m1, m0, 4
+    movu        [r0 + r1 * 2 + 16],  m2
+    palignr     m2,                  m0, m3, 2
+    movu        [r0 + r4],           m2
+    palignr     m1,                  m0, 2
+    movu        [r0 + r4 + 16],      m1
+    RET
+
+cglobal intra_pred_ang16_10, 3,6,4
+    mov         r5d,                    r4m
+    movu        m1,                     [r2 + 2 + 64]       ; [8 7 6 5 4 3 2 1]
+    movu        m3,                     [r2 + 18 + 64]      ; [16 15 14 13 12 11 10 9]
+    pshufb      m0,                     m1, [pb_01]  ; [1 1 1 1 1 1 1 1]
+    add         r1,                     r1
+    lea         r4,                     [r1 * 3]
+
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [2 2 2 2 2 2 2 2]
+    movu        [r0 + r1],              m2
+    movu        [r0 + r1 + 16],         m2
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [3 3 3 3 3 3 3 3]
+    movu        [r0 + r1 * 2],          m2
+    movu        [r0 + r1 * 2 + 16],     m2
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [4 4 4 4 4 4 4 4]
+    movu        [r0 + r4],              m2
+    movu        [r0 + r4 + 16],         m2
+
+    lea         r3,                     [r0 + r1 *4]
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [5 5 5 5 5 5 5 5]
+    movu        [r3],                   m2
+    movu        [r3 + 16],              m2
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [6 6 6 6 6 6 6 6]
+    movu        [r3 + r1],              m2
+    movu        [r3 + r1 + 16],         m2
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [7 7 7 7 7 7 7 7]
+    movu        [r3 + r1 * 2],          m2
+    movu        [r3 + r1 * 2 + 16],     m2
+    psrldq      m1,                     2
+    pshufb      m2,                     m1, [pb_01]  ; [8 8 8 8 8 8 8 8]
+    movu        [r3 + r4],              m2
+    movu        [r3 + r4 + 16],         m2
+
+    lea         r3,                     [r3 + r1 *4]
+    pshufb      m2,                     m3, [pb_01]  ; [9 9 9 9 9 9 9 9]
+    movu        [r3],                   m2
+    movu        [r3 + 16],              m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [10 10 10 10 10 10 10 10]
+    movu        [r3 + r1],              m2
+    movu        [r3 + r1 + 16],         m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [11 11 11 11 11 11 11 11]
+    movu        [r3 + r1 * 2],          m2
+    movu        [r3 + r1 * 2 + 16],     m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [12 12 12 12 12 12 12 12]
+    movu        [r3 + r4],              m2
+    movu        [r3 + r4 + 16],         m2
+
+    lea         r3,                     [r3 + r1 *4]
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [13 13 13 13 13 13 13 13]
+    movu        [r3],                   m2
+    movu        [r3 + 16],              m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [14 14 14 14 14 14 14 14]
+    movu        [r3 + r1],              m2
+    movu        [r3 + r1 + 16],         m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [15 15 15 15 15 15 15 15]
+    movu        [r3 + r1 * 2],          m2
+    movu        [r3 + r1 * 2 + 16],     m2
+    psrldq      m3,                     2
+    pshufb      m2,                     m3, [pb_01]  ; [16 16 16 16 16 16 16 16]
+    movu        [r3 + r4],              m2
+    movu        [r3 + r4 + 16],         m2
+    mova        m3,                     m0
+
+    cmp         r5d,                    byte 0
+    jz         .quit
+
+    ; filter
+    pinsrw      m1,                     [r2], 0             ; [3 2 1 0]
+    pshufb      m2,                     m1, [pb_01]  ; [0 0 0 0 0 0 0 0]
+    movu        m1,                     [r2 + 2]            ; [8 7 6 5 4 3 2 1]
+    movu        m3,                     [r2 + 18]           ; [16 15 14 13 12 11 10 9]
+    psubw       m1,                     m2
+    psubw       m3,                     m2
+    psraw       m1,                     1
+    psraw       m3,                     1
+    paddw       m3,                     m0
+    paddw       m0,                     m1
+    pxor        m1,                     m1
+    pmaxsw      m0,                     m1
+    pminsw      m0,                     [pw_pixel_max]
+    pmaxsw      m3,                     m1
+    pminsw      m3,                     [pw_pixel_max]
+.quit:
+    movu        [r0],                   m0
+    movu        [r0 + 16],              m3
+    RET
+
+cglobal intra_pred_ang16_26, 3,6,4
+    mov         r5d,                r4m
+    movu        m0,                 [r2 + 2]            ; [8 7 6 5 4 3 2 1]
+    movu        m3,                 [r2 + 18]           ; [16 15 14 13 12 11 10 9]
+    add         r1,                 r1
+    lea         r4,                 [r1 * 3]
+
+    movu        [r0],               m0
+    movu        [r0 + 16],          m3
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 16],     m3
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 16], m3
+    movu        [r0 + r4],          m0
+    movu        [r0 + r4 + 16],     m3
+
+    lea         r3,                 [r0 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + 16],          m3
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 + 16],     m3
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r1 * 2 + 16], m3
+    movu        [r3 + r4],          m0
+    movu        [r3 + r4 + 16],     m3
+
+    lea         r3,                 [r3 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + 16],          m3
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 + 16],     m3
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r1 * 2 + 16], m3
+    movu        [r3 + r4],          m0
+    movu        [r3 + r4 + 16],     m3
+
+    lea         r3,                 [r3 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + 16],          m3
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 + 16],     m3
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r1 * 2 + 16], m3
+    movu        [r3 + r4],          m0
+    movu        [r3 + r4 + 16],     m3
+
+    cmp         r5d,                byte 0
+    jz         .quit
+
+    ; filter
+
+    pshufb      m0,                 [pb_01]
+    pinsrw      m1,                 [r2], 0             ; [3 2 1 0]
+    pshufb      m2,                 m1, [pb_01]         ; [0 0 0 0 0 0 0 0]
+    movu        m1,                 [r2 + 2 + 64]       ; [8 7 6 5 4 3 2 1]
+    movu        m3,                 [r2 + 18 + 64]      ; [16 15 14 13 12 11 10 9]
+    psubw       m1,                 m2
+    psubw       m3,                 m2
+    psraw       m1,                 1
+    psraw       m3,                 1
+    paddw       m3,                 m0
+    paddw       m0,                 m1
+    pxor        m1,                 m1
+    pmaxsw      m0,                 m1
+    pminsw      m0,                 [pw_pixel_max]
+    pmaxsw      m3,                 m1
+    pminsw      m3,                 [pw_pixel_max]
+    pextrw      [r0],               m0, 0
+    pextrw      [r0 + r1],          m0, 1
+    pextrw      [r0 + r1 * 2],      m0, 2
+    pextrw      [r0 + r4],          m0, 3
+    lea         r0,                 [r0 + r1 * 4]
+    pextrw      [r0],               m0, 4
+    pextrw      [r0 + r1],          m0, 5
+    pextrw      [r0 + r1 * 2],      m0, 6
+    pextrw      [r0 + r4],          m0, 7
+    lea         r0,                 [r0 + r1 * 4]
+    pextrw      [r0],               m3, 0
+    pextrw      [r0 + r1],          m3, 1
+    pextrw      [r0 + r1 * 2],      m3, 2
+    pextrw      [r0 + r4],          m3, 3
+    pextrw      [r3],               m3, 4
+    pextrw      [r3 + r1],          m3, 5
+    pextrw      [r3 + r1 * 2],      m3, 6
+    pextrw      [r3 + r4],          m3, 7
+.quit:
+    RET
+
+;-------------------------------------------------------------------------------------------------------
+; avx2 code for intra_pred_ang16 mode 2 to 34 start
+;-------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_ang16_2, 3,5,3
+    lea         r4,                 [r2]
+    add         r2,                 64
+    cmp         r3m,                byte 34
+    cmove       r2,                 r4
+    add         r1d,                 r1d
+    lea         r3,                 [r1 * 3]
+    movu        m0,                 [r2 + 4]
+    movu        m1,                 [r2 + 20]
+
+    movu        [r0],               m0
+    palignr     m2,                 m1, m0, 2
+    movu        [r0 + r1],          m2
+    palignr     m2,                 m1, m0, 4
+    movu        [r0 + r1 * 2],      m2
+    palignr     m2,                 m1, m0, 6
+    movu        [r0 + r3],          m2
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m1, m0, 8
+    movu        [r0],               m2
+    palignr     m2,                 m1, m0, 10
+    movu        [r0 + r1],          m2
+    palignr     m2,                 m1, m0, 12
+    movu        [r0 + r1 * 2],      m2
+    palignr     m2,                 m1, m0, 14
+    movu        [r0 + r3],          m2
+
+    movu        m0,                 [r2 + 36]
+    lea         r0,                 [r0 + r1 * 4]
+    movu        [r0],               m1
+    palignr     m2,                 m0, m1, 2
+    movu        [r0 + r1],          m2
+    palignr     m2,                 m0, m1, 4
+    movu        [r0 + r1 * 2],      m2
+    palignr     m2,                 m0, m1, 6
+    movu        [r0 + r3],          m2
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m0, m1, 8
+    movu        [r0],               m2
+    palignr     m2,                 m0, m1, 10
+    movu        [r0 + r1],          m2
+    palignr     m2,                 m0, m1, 12
+    movu        [r0 + r1 * 2],      m2
+    palignr     m2,                 m0, m1, 14
+    movu        [r0 + r3],          m2
+    RET
+
+%macro TRANSPOSE_STORE_AVX2 11
+    jnz             .skip%11
+    punpckhwd       m%9,  m%1,  m%2
+    punpcklwd       m%1,  m%2
+    punpckhwd       m%2,  m%3,  m%4
+    punpcklwd       m%3,  m%4
+
+    punpckldq       m%4,  m%1,  m%3
+    punpckhdq       m%1,  m%3
+    punpckldq       m%3,  m%9,  m%2
+    punpckhdq       m%9,  m%2
+
+    punpckhwd       m%10, m%5,  m%6
+    punpcklwd       m%5,  m%6
+    punpckhwd       m%6,  m%7,  m%8
+    punpcklwd       m%7,  m%8
+
+    punpckldq       m%8,  m%5,  m%7
+    punpckhdq       m%5,  m%7
+    punpckldq       m%7,  m%10, m%6
+    punpckhdq       m%10, m%6
+
+    punpcklqdq      m%6,  m%4,  m%8
+    punpckhqdq      m%2,  m%4,  m%8
+    punpcklqdq      m%4,  m%1,  m%5
+    punpckhqdq      m%8,  m%1,  m%5
+
+    punpcklqdq      m%1,  m%3,  m%7
+    punpckhqdq      m%5,  m%3,  m%7
+    punpcklqdq      m%3,  m%9,  m%10
+    punpckhqdq      m%7,  m%9,  m%10
+
+    movu            [r0 + r1 * 0 + %11], xm%6
+    movu            [r0 + r1 * 1 + %11], xm%2
+    movu            [r0 + r1 * 2 + %11], xm%4
+    movu            [r0 + r4 * 1 + %11], xm%8
+
+    lea             r5, [r0 + r1 * 4]
+    movu            [r5 + r1 * 0 + %11], xm%1
+    movu            [r5 + r1 * 1 + %11], xm%5
+    movu            [r5 + r1 * 2 + %11], xm%3
+    movu            [r5 + r4 * 1 + %11], xm%7
+
+    lea             r5, [r5 + r1 * 4]
+    vextracti128    [r5 + r1 * 0 + %11], m%6, 1
+    vextracti128    [r5 + r1 * 1 + %11], m%2, 1
+    vextracti128    [r5 + r1 * 2 + %11], m%4, 1
+    vextracti128    [r5 + r4 * 1 + %11], m%8, 1
+
+    lea             r5, [r5 + r1 * 4]
+    vextracti128    [r5 + r1 * 0 + %11], m%1, 1
+    vextracti128    [r5 + r1 * 1 + %11], m%5, 1
+    vextracti128    [r5 + r1 * 2 + %11], m%3, 1
+    vextracti128    [r5 + r4 * 1 + %11], m%7, 1
+    jmp             .end%11
+.skip%11:
+    movu            [r0 + r1 * 0], m%1
+    movu            [r0 + r1 * 1], m%2
+    movu            [r0 + r1 * 2], m%3
+    movu            [r0 + r4 * 1], m%4
+
+    lea             r0, [r0 + r1 * 4]
+    movu            [r0 + r1 * 0], m%5
+    movu            [r0 + r1 * 1], m%6
+    movu            [r0 + r1 * 2], m%7
+    movu            [r0 + r4 * 1], m%8
+    lea             r0, [r0 + r1 * 4]
+.end%11:
+%endmacro
+
+;; angle 16, modes 3 and 33
+cglobal ang16_mode_3_33
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 10 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m5, m0, m3, 4                   ; [14 13 13 12 12 11 11 10  6  5  5  4  4  3  3  2]
+    pmaddwd         m5, [r3 + 4 * 32]               ; [20]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m6, m2, m0, 4                   ; [18 17 17 16 16 15 15 14 10  9  9  8  8  7  7  6]
+    pmaddwd         m6, [r3 + 4 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m6, m0, m3, 8                   ; [15 14 14 13 13 12 12 11  7  6  6  5  5  4  4  3]
+    pmaddwd         m6, [r3 - 2 * 32]               ; [14]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m7, m2, m0, 8                   ; [19 18 18 17 17 16 16 15 11 10 10  9  9  8  8  7]
+    pmaddwd         m7, [r3 - 2 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m7, m0, m3, 12                  ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+    pmaddwd         m7, [r3 - 8 * 32]               ; [8]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m8, m2, m0, 12                  ; [20 19 19 18 18 17 17 16 12 11 11 10 10  9  9  8]
+    pmaddwd         m8, [r3 - 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m0, [r3 - 14 * 32]          ; [2]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m3, m2, [r3 - 14 * 32]          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m8, m3
+
+    pmaddwd         m9, m0, [r3 + 12 * 32]          ; [28]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m2, [r3 + 12 * 32]          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    palignr         m10, m2, m0, 4                  ; [18 17 17 16 16 15 15 14 10  9  9  8  8  7  7  6]
+    pmaddwd         m10, [r3 + 6 * 32]              ; [22]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m3, m1, m2, 4                   ; [22 21 21 20 20 19 19 18 14 13 13 12 12 11 11 10]
+    pmaddwd         m3, [r3 + 6 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m10, m3
+
+    palignr         m11, m2, m0, 8                  ; [19 18 18 17 17 16 16 15 11 10 10  9  9  8  8  7]
+    pmaddwd         m11, [r3]                       ; [16]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m3, m1, m2, 8                   ; [23 22 22 21 21 20 20 19 15 14 14 13 13 12 12 11]
+    pmaddwd         m3, [r3]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0
+
+    palignr         m4, m2, m0, 12                  ; [20 19 19 18 18 17 17 16 12 11 11 10 10  9  9  8]
+    pmaddwd         m4, [r3 - 6 * 32]               ; [10]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m1, m2, 12                  ; [24 23 23 22 22 21 21 20 15 16 15 14 14 13 13 12]
+    pmaddwd         m5, [r3 - 6 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 12 * 32]          ; [4]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m1, [r3 - 12 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    movu            m0, [r2 + 34]                   ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    pmaddwd         m6, m2, [r3 + 14 * 32]          ; [30]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m1, [r3 + 14 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m3, m0, m0, 2                   ; [ x 32 31 30 29 28 27 26  x 24 23 22 21 20 19 18]
+    punpcklwd       m0, m3                          ; [29 29 28 28 27 27 26 22 21 20 20 19 19 18 18 17]
+
+    palignr         m7, m1, m2, 4
+    pmaddwd         m7, [r3 + 8 * 32]               ; [24]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m8, m0, m1, 4
+    pmaddwd         m8, [r3 + 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m8, m1, m2, 8
+    pmaddwd         m8, [r3 + 2 * 32]               ; [18]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m9, m0, m1, 8
+    pmaddwd         m9, [r3 + 2 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m9, m1, m2, 12
+    pmaddwd         m9, [r3 - 4 * 32]               ; [12]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m3, m0, m1, 12
+    pmaddwd         m3, [r3 - 4 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m1, [r3 - 10 * 32]              ; [6]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m0, [r3 - 10 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m1, m0
+
+    movu            m2, [r2 + 28]
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 2, 0, 3, 16
+    ret
+
+;; angle 16, modes 4 and 32
+cglobal ang16_mode_4_32
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 + 3 * 32]           ; [21]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 3 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m0, m3, 4                   ; [14 13 13 12 12 11 11 10  6  5  5  4  4  3  3  2]
+    pmaddwd         m5, m6, [r3 - 8 * 32]           ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m7, m2, m0, 4                   ; [18 17 17 16 16 15 15 14 10  9  9  8  8  7  7  6]
+    pmaddwd         m8, m7, [r3 - 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 13 * 32]              ; [31]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 13 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m7, m0, m3, 8                   ; [15 14 14 13 13 12 12 11  7  6  6  5  5  4  4  3]
+    pmaddwd         m7, [r3 + 2 * 32]               ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m8, m2, m0, 8                   ; [19 18 18 17 17 16 16 15 11 10 10  9  9  8  8  7]
+    pmaddwd         m8, [r3 + 2 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m9, m0, m3, 12
+    pmaddwd         m8, m9, [r3 - 9 * 32]           ; [9]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m3, m2, m0, 12
+    pmaddwd         m10, m3, [r3 - 9 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 12 * 32]              ; [30]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, [r3 + 12 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, m0, [r3 + 1 * 32]          ; [19]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m3, m2, [r3 + 1 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m10, m3
+
+    palignr         m11, m2, m0, 4
+    pmaddwd         m11, [r3 - 10 * 32]             ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m3, m1, m2, 4
+    pmaddwd         m3, [r3 - 10 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0
+
+    palignr         m4, m2, m0, 4
+    pmaddwd         m4, [r3 + 11 * 32]              ; [29]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m1, m2, 4
+    pmaddwd         m5, [r3 + 11  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m5, m2, m0, 8
+    pmaddwd         m5, [r3]                        ; [18]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m6, m1, m2, 8
+    pmaddwd         m6, [r3]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m7, m2, m0, 12
+    pmaddwd         m6, m7, [r3 - 11 * 32]          ; [7]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m8, m1, m2, 12
+    pmaddwd         m3, m8, [r3 - 11 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m6, m3
+
+    pmaddwd         m7, [r3 + 10 * 32]              ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 10 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    movu            m0, [r2 + 34]                   ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    pmaddwd         m8, m2, [r3 - 1 * 32]           ; [17]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m1, [r3 - 1 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m3, m0, m0, 2                   ; [ x 32 31 30 29 28 27 26  x 24 23 22 21 20 19 18]
+    punpcklwd       m0, m3                          ; [29 29 28 28 27 27 26 22 21 20 20 19 19 18 18 17]
+
+    palignr         m10, m1, m2, 4
+    pmaddwd         m9, m10, [r3 - 12 * 32]         ; [6]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m11, m0, m1, 4
+    pmaddwd         m3, m11, [r3 - 12 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 9 * 32]              ; [27]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 9 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m3, m1, m2, 8
+    pmaddwd         m3, [r3 - 2 * 32]               ; [16]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    palignr         m0, m1, 8
+    pmaddwd         m0, [r3 - 2 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 1, 16
+    ret
+
+;; angle 16, modes 5 and 31
+cglobal ang16_mode_5_31
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 + 1 * 32]           ; [17]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 1 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m0, m3, 4
+    pmaddwd         m5, m6, [r3 - 14 * 32]          ; [2]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m7, m2, m0, 4
+    pmaddwd         m8, m7, [r3 - 14 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 3 * 32]               ; [19]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 3 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m0, m3, 8
+    pmaddwd         m7, m8, [r3 - 12 * 32]          ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m9, m2, m0, 8
+    pmaddwd         m10, m9, [r3 - 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 5 * 32]               ; [21]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 5 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m0, m3, 12
+    pmaddwd         m9, m10, [r3 - 10 * 32]         ; [6]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m11, m2, m0, 12
+    pmaddwd         m3, m11, [r3 - 10 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 7 * 32]              ; [23]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 7 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m11, m0, [r3 - 8 * 32]          ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m3, m2, [r3 - 8 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0
+
+    pmaddwd         m4, m0, [r3 + 9 * 32]           ; [25]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 9  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m2, m0, 4
+    pmaddwd         m5, m6, [r3 - 6 * 32]           ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m7, m1, m2, 4
+    pmaddwd         m3, m7, [r3 - 6 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, [r3 + 11 * 32]              ; [27]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 11 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m2, m0, 8
+    pmaddwd         m7, m8, [r3 - 4 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m9, m1, m2, 8
+    pmaddwd         m3, m9, [r3 - 4 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    pmaddwd         m8, [r3 + 13 * 32]              ; [29]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 13 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m2, m0, 12
+    pmaddwd         m9, m10, [r3 - 2 * 32]          ; [14]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m11, m1, m2, 12
+    pmaddwd         m3, m11, [r3 - 2 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 15 * 32]             ; [31]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 15 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m2, [r3]                        ; [16]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 2, 0, 1, 16
+    ret
+
+;; angle 16, modes 6 and 30
+cglobal ang16_mode_6_30
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 - 2 * 32]           ; [13]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 2 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 11 * 32]          ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m0, m3, 4
+    pmaddwd         m6, m7, [r3 - 8 * 32]           ; [7]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m8, m2, m0, 4
+    pmaddwd         m9, m8, [r3 - 8 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 5 * 32]               ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m10, m0, m3, 8
+    pmaddwd         m8, m10, [r3 - 14 * 32]         ; [1]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m11, m2, m0, 8
+    pmaddwd         m9, m11, [r3 - 14 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m10, [r3 - 1 * 32]          ; [14]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, m11, [r3 - 1 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, [r3 + 12 * 32]             ; [27]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 12 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m11, [r3 - 7 * 32]              ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m12, m2, m0, 12
+    pmaddwd         m12, [r3 - 7 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m4, m0, m3, 12
+    pmaddwd         m4, [r3 + 6 * 32]               ; [21]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m2, m0, 12
+    pmaddwd         m5, [r3 + 6  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m0, [r3 - 13 * 32]          ; [2]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, m2, [r3 - 13 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, m0, [r3]                    ; [15]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m2, [r3]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m0, [r3 + 13 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, m2, [r3 + 13 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    palignr         m9, m2, m0, 4
+    pmaddwd         m8, m9, [r3 - 6 * 32]           ; [9]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m3, m1, m2, 4
+    pmaddwd         m10, m3, [r3 - 6 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 7 * 32]               ; [22]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, [r3 + 7 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    palignr         m11, m2, m0, 8
+    pmaddwd         m10, m11, [r3 - 12 * 32]        ; [3]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m3, m1, m2, 8
+    pmaddwd         m12, m3, [r3 - 12 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, [r3 + 1 * 32]              ; [16]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m3, [r3 + 1 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 16
+    ret
+
+;; angle 16, modes 7 and 29
+cglobal ang16_mode_7_29
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m2, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    pmaddwd         m4, m3, [r3 - 8 * 32]           ; [9]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 8 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 1 * 32]           ; [18]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 1 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 10 * 32]          ; [27]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m10, m0, m3, 4
+    pmaddwd         m7, m10, [r3 - 13 * 32]         ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m11, m2, m0, 4
+    pmaddwd         m8, m11, [r3 - 13 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m10, [r3 - 4 * 32]          ; [13]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m11, [r3 - 4 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m10, [r3 + 5 * 32]          ; [22]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, m11, [r3 + 5 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, [r3 + 14 * 32]             ; [31]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 14 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m11, m0, m3, 8
+    pmaddwd         m11, [r3 - 9 * 32]              ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m12, m2, m0, 8
+    pmaddwd         m12, [r3 - 9 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+    palignr         m5, m0, m3, 8
+    pmaddwd         m4, m5, [r3]                    ; [17]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m6, m2, m0, 8
+    pmaddwd         m7, m6, [r3]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m4, m7
+
+    pmaddwd         m5, [r3 + 9 * 32]               ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, [r3 + 9 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m9, m0, m3, 12
+    pmaddwd         m6, m9, [r3 - 14 * 32]          ; [3]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m3, m2, m0, 12
+    pmaddwd         m7, m3, [r3 - 14 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m9, [r3 - 5 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m3, [r3 - 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m9, [r3 + 4 * 32]           ; [21]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m10, m3, [r3 + 4 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 13 * 32]              ; [30]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, [r3 + 13 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, m0, [r3 - 10 * 32]         ; [7]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m2, [r3 - 10 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m0, [r3 - 1 * 32]               ; [16]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    pmaddwd         m2, [r3 - 1 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m0, m2
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 16
+    ret
+
+;; angle 16, modes 8 and 28
+cglobal ang16_mode_8_28
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m2, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    pmaddwd         m4, m3, [r3 - 10 * 32]           ; [5]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 10 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 5 * 32]           ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 - 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3]                    ; [15]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m3, [r3 + 5 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m0, [r3 + 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m3, [r3 + 10 * 32]          ; [25]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 15 * 32]          ; [30]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 + 15 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m11, m0, m3, 4
+    pmaddwd         m10, m11, [r3 - 12 * 32]        ; [3]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m12, m1, [r3 - 12 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, [r3 - 7 * 32]              ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m1, [r3 - 7 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m11, m1
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+    palignr         m7, m0, m3, 4
+    pmaddwd         m4, m7, [r3 - 2 * 32]           ; [13]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m5, m1, [r3 - 2 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m7, [r3 + 3 * 32]           ; [18]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m1, [r3 + 3 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m7, [r3 + 8 * 32]           ; [23]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m1, [r3 + 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, [r3 + 13 * 32]              ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m1, [r3 + 13 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m7, m1
+
+    palignr         m1, m0, m3, 8
+    pmaddwd         m8, m1, [r3 - 14 * 32]          ; [1]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m2, m0, 8
+    pmaddwd         m9, m2, [r3 - 14 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m1, [r3 - 9 * 32]           ; [6]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m2, [r3 - 9 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m3, m1, [r3 - 4 * 32]           ; [11]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, m2, [r3 - 4 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+
+    pmaddwd         m1, [r3 + 1 * 32]               ; [16]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m2, [r3 + 1 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m1, m2
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 3, 1, 0, 2, 16
+    ret
+
+;; angle 16, modes 9 and 27
+cglobal ang16_mode_9_27
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m2, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    pmaddwd         m4, m3, [r3 - 14 * 32]          ; [2]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 14 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 12 * 32]          ; [4]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 - 10 * 32]          ; [6]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 - 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m3, [r3 - 8 * 32]           ; [8]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m0, [r3 - 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m3, [r3 - 6 * 32]           ; [10]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 - 6 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 - 4 * 32]           ; [12]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 - 4 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m3, [r3 - 2 * 32]          ; [14]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m1, m0, [r3 - 2 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m10, m1
+
+    pmaddwd         m11, m3, [r3]                   ; [16]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m1, m0, [r3]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m11, m1
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 2, 1, 0
+
+    pmaddwd         m4, m3, [r3 + 2 * 32]           ; [18]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 2 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 4 * 32]           ; [20]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m0, [r3 + 4 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m3, [r3 + 6 * 32]           ; [22]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m0, [r3 + 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m3, [r3 + 8 * 32]           ; [24]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m1, m0, [r3 + 8 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m7, m1
+
+    pmaddwd         m8, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 12 * 32]          ; [28]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m1, m0, [r3 + 12 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m9, m1
+
+    pmaddwd         m3, [r3 + 14 * 32]              ; [30]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3 + 14 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+
+    movu            m1, [r2 + 4]
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 3, 1, 0, 2, 16
+    ret
+
+;; angle 16, modes 11 and 25
+cglobal ang16_mode_11_25
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m1, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m1                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m0, m1                          ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 14 * 32]          ; [30]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 14 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 12 * 32]          ; [28]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m3, [r3 + 8 * 32]           ; [24]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m0, [r3 + 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m3, [r3 + 6 * 32]           ; [22]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 6 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 4 * 32]           ; [20]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 + 4 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m3, [r3 + 2 * 32]          ; [18]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m1, m0, [r3 + 2 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m10, m1
+
+    pmaddwd         m11, m3, [r3]                   ; [16]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m1, m0, [r3]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m11, m1
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 2, 1, 0
+
+    pmaddwd         m4, m3, [r3 - 2 * 32]           ; [14]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 2 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 4 * 32]           ; [12]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m0, [r3 - 4 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m3, [r3 - 6 * 32]           ; [10]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m0, [r3 - 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m3, [r3 - 8 * 32]           ; [8]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m1, m0, [r3 - 8 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m7, m1
+
+    pmaddwd         m8, m3, [r3 - 10 * 32]          ; [6]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 - 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 - 12 * 32]          ; [4]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m1, m0, [r3 - 12 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m9, m1
+
+    pmaddwd         m3, [r3 - 14 * 32]              ; [2]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3 - 14 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+
+    movu            m1, [r2]
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 3, 1, 0, 2, 16
+    ret
+
+;; angle 16, modes 12 and 24
+cglobal ang16_mode_12_24
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 11 * 32]          ; [27]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 11 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 6 * 32]           ; [22]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m2, [r3 + 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 1 * 32]           ; [17]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m2, [r3 + 1 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m3, [r3 - 4 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m2, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m3, [r3 - 9 * 32]           ; [7]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m2, [r3 - 9 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 - 14 * 32]          ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m2, [r3 - 14 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m9, m2
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  6  6 13 13  x  x  x  x]
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m10, m2, [r3 + 13 * 32]         ; [29]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 + 13 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m4, m2, [r3 + 3 * 32]           ; [19]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 + 3 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 2 * 32]           ; [14]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 - 2 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m2, [r3 - 7 * 32]           ; [9]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 - 7 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 - 12 * 32]          ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m0, m3, 10
+    palignr         m3, m1, 10
+
+    pmaddwd         m8, m3, [r3 + 15 * 32]          ; [31]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 15 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m1, m0, [r3 + 10 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m9, m1
+
+    pmaddwd         m1, m3, [r3 + 5 * 32]           ; [21]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m2, m0, [r3 + 5 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m1, m2
+
+    pmaddwd         m3, [r3]                        ; [16]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 3, 0, 2, 16
+    ret
+
+;; angle 16, modes 13 and 23
+cglobal ang16_mode_13_23
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 7 * 32]           ; [23]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 7 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 2 * 32]           ; [14]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m2, [r3 - 2 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m3, [r3 - 11 * 32]          ; [5]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m2, [r3 - 11 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m6, m2
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  4  4  7  7 11 11 14 14]
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m7, m2, [r3 + 12 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 + 3 * 32]           ; [19]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 3 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 6 * 32]           ; [10]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 - 6 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 15 * 32]         ; [1]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 15 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m2, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m4, m2, [r3 - 1 * 32]           ; [15]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 1 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 10 * 32]          ; [6]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 - 10 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m2, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m6, m2, [r3 + 13 * 32]          ; [29]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 + 13 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 + 4 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 - 5 * 32]           ; [11]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 - 5 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 14 * 32]          ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m13, [r3 - 14 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m9, m13
+
+    palignr         m0, m3, 2
+    palignr         m3, m1, 2
+
+    pmaddwd         m1, m3, [r3 + 9 * 32]           ; [25]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m2, m0, [r3 + 9 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m1, m2
+
+    pmaddwd         m3, [r3]                        ; [16]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 3, 0, 2, 16
+    ret
+
+;; angle 16, modes 14 and 22
+cglobal ang16_mode_14_22
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 3 * 32]           ; [19]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 3 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 10 * 32]          ; [6]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m2, [r3 - 10 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m5, m2
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  2  2  5  5  7  7 10 10]
+    vinserti128     m14, m14, xm3, 1                ; [ 3  3  2  2  1  1  0  0 12 12 15 15  x  x  x  x]
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m6, m2, [r3 + 9 * 32]           ; [25]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m13, [r3 + 9 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m2, [r3 - 4 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m2, m3, m1, 10                  ; [10  9  9  8  8  7  7  6  2  1  1  0  0  2  2  5]
+    palignr         m13, m0, m3, 10                 ; [14 13 13 12 12 11 11 10  6  5  5  4  4  3  3  2]
+
+    pmaddwd         m8, m2, [r3 + 15 * 32]          ; [31]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 15 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 + 2 * 32]           ; [18]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 + 2 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 11 * 32]         ; [5]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 11 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m2, m3, m1, 6                   ; [ 9  8  8  7  7  6  6  5  1  0  0  2  2  5  5  7]
+    palignr         m13, m0, m3, 6                  ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m4, m2, [r3 - 5 * 32]           ; [11]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 5 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m2, m0, m3, 2                   ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    palignr         m13, m3, m1, 2                  ; [ 8  7  7  6  6  5  5  4  0  2  2  5  5  7  7 10]
+
+    pmaddwd         m5, m13, [r3 + 14 * 32]         ; [30]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m2, [r3 + 14 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m13, [r3 + 1 * 32]          ; [17]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m2, [r3 + 1 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m13, [r3 - 12 * 32]         ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m2, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m2, m1, m14, 14                 ; [ 7  6  6  5  5  4  4  3  2  5  5  7  7 10 10 12]
+    palignr         m0, m3, m1, 14                  ; [11 10 10  9  9  8  8  7  3  2  2  1  1  0  0  2]
+
+    pmaddwd         m8, m2, [r3 + 7 * 32]           ; [23]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 7 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 6 * 32]           ; [10]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m2, m0, [r3 - 6 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m9, m2
+
+    palignr         m3, m1, 10                      ; [10  9  9  8  8  7  7  6  2  1  1  0  0  2  2  5]
+    palignr         m1, m14, 10                     ; [ 6  5  5  4  4  3  3  2  5  7  7 10 10 12 12 15]
+
+    pmaddwd         m2, m1, [r3 + 13 * 32]          ; [29]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m0, m3, [r3 + 13 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m2, m0
+
+    pmaddwd         m1, [r3]                        ; [16]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m3, [r3]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m1, m3
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 2, 1, 0, 3, 16
+    ret
+
+;; angle 16, modes 15 and 21
+cglobal ang16_mode_15_21
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 - 1 * 32]           ; [15]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 - 1 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1
+    vinserti128     m14, m14, xm3, 1
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m5, m2, [r3 + 14 * 32]          ; [30]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m13, [r3 + 14 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m2, [r3 - 3 * 32]           ; [13]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m13, [r3 - 3 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m2, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m7, m2, [r3 + 12 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 - 5 * 32]           ; [11]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 - 5 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m2, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m9, m2, [r3 + 10 * 32]          ; [26]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 + 10 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 7 * 32]          ; [9]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 7 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m2, m3, m1, 2
+    palignr         m13, m0, m3, 2
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 2
+
+    pmaddwd         m4, m2, [r3 - 9 * 32]           ; [7]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 9 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m1, m14, 14
+    palignr         m7, m3, m1, 14
+
+    pmaddwd         m5, m6, [r3 + 6 * 32]           ; [22]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m7, [r3 + 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 - 11 * 32]              ; [5]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 - 11 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m1, m14, 10
+    palignr         m9, m3, m1, 10
+
+    pmaddwd         m7, m8, [r3 + 4 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 + 4 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 - 13 * 32]              ; [3]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 - 13 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m2, m1, m14, 6
+    palignr         m0, m3, m1, 6
+
+    pmaddwd         m9, m2, [r3 + 2 * 32]           ; [18]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m13, m0, [r3 + 2 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m9, m13
+
+    pmaddwd         m2, [r3 - 15 * 32]              ; [1]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m0, [r3 - 15 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m2, m0
+
+    palignr         m3, m1, 2
+    palignr         m1, m14, 2
+
+    pmaddwd         m1, [r3]                        ; [16]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m3, [r3]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m1, m3
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 2, 1, 0, 3, 16
+    ret
+
+;; angle 16, modes 16 and 20
+cglobal ang16_mode_16_20
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m12, m0, m4                     ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 - 5 * 32]           ; [11]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m12, [r3 - 5 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  2  2  3  3  5  5  6  6]
+    vinserti128     m14, m14, xm3, 1                ; [ 3  3  2  2  1  1  0  0  8  8  9  9 11 11 12 12]
+    vinserti128     m2, m2, xm1, 1                  ; [ 2  2  3  3  5  5  6  6 14 14 15 15  x  x  x  x]
+
+    palignr         m12, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m5, m12, [r3 + 6 * 32]          ; [22]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m13, [r3 + 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m12, [r3 - 15 * 32]         ; [1]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m13, [r3 - 15 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m12, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m7, m12, [r3 - 4 * 32]          ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m12, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m8, m12, [r3 + 7 * 32]          ; [23]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 7 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m12, [r3 - 14 * 32]         ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 - 14 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m12, m3, m1, 2
+    palignr         m13, m0, m3, 2
+
+    pmaddwd         m10, m12, [r3 - 3 * 32]         ; [13]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m13, [r3 - 3 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m12, m1, m14, 14
+    palignr         m13, m3, m1, 14
+
+    pmaddwd         m11, m12, [r3 + 8 * 32]         ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 0, 13, 0
+
+    palignr         m13, m3, m1, 14
+
+    pmaddwd         m4, m12, [r3 - 13 * 32]         ; [3]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 13 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m1, m14, 10
+    palignr         m7, m3, m1, 10
+
+    pmaddwd         m5, m6, [r3 - 2 * 32]           ; [14]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m7, [r3 - 2 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m1, m14, 6
+    palignr         m10, m3, m1, 6
+
+    pmaddwd         m6, m7, [r3 + 9 * 32]           ; [25]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m10, [r3 + 9 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, [r3 - 12 * 32]              ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, [r3 - 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    palignr         m8, m1, m14, 2                  ; [ 4  3  3  2  2  1  1  0  6  8  8  9  9 11 11 12]
+    palignr         m9, m3, m1, 2                   ; [ 8  7  7  6  6  5  5  4  0  2  2  3  3  5  5  6]
+
+    pmaddwd         m8, [r3 - 1 * 32]               ; [15]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 - 1 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m12, m14, m2, 14
+    palignr         m0, m1, m14, 14
+
+    pmaddwd         m9, m12, [r3 + 10 * 32]         ; [26]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m13, m0, [r3 + 10 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m9, m13
+
+    pmaddwd         m12, [r3 - 11 * 32]             ; [5]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    pmaddwd         m0, [r3 - 11 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m12, m0
+
+    palignr         m1, m14, 10
+    palignr         m14, m2, 10
+
+    pmaddwd         m14, [r3]                       ; [16]
+    paddd           m14, [pd_16]
+    psrld           m14, 5
+    pmaddwd         m1, [r3]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m14, m1
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 12, 14, 0, 3, 16
+    ret
+
+;; angle 16, modes 17 and 19
+cglobal ang16_mode_17_19
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m12, m0, m4                     ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 - 10 * 32]           ; [6]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m12, [r3 - 10 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  2  2  3  3  5  5  6  6]
+    vinserti128     m14, m14, xm3, 1                ; [ 3  3  2  2  1  1  0  0  8  8  9  9 11 11 12 12]
+    vinserti128     m2, m2, xm1, 1                  ; [ 2  2  3  3  5  5  6  6 14 14 15 15  x  x  x  x]
+
+    palignr         m12, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m5, m12, [r3 - 4 * 32]          ; [12]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m13, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m12, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m6, m12, [r3 + 2 * 32]          ; [18]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m13, [r3 + 2 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m12, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m7, m12, [r3 + 8 * 32]          ; [24]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m12, m3, m1, 2
+    palignr         m13, m0, m3, 2
+
+    pmaddwd         m8, m12, [r3 + 14 * 32]         ; [30]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 14 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m12, [r3 - 12 * 32]         ; [4]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 - 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m12, m1, m14, 14
+    palignr         m13, m3, m1, 14
+
+    pmaddwd         m10, m12, [r3 - 6 * 32]         ; [10]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m13, [r3 - 6 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m12, m1, m14, 10
+    palignr         m13, m3, m1, 10
+
+    pmaddwd         m11, m12, [r3]                  ; [16]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 0, 13, 0
+
+    palignr         m12, m1, m14, 6
+    palignr         m13, m3, m1, 6
+
+    pmaddwd         m4, m12, [r3 + 6 * 32]          ; [22]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 + 6 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m12, m1, m14, 2
+    palignr         m13, m3, m1, 2
+
+    pmaddwd         m5, m12, [r3 + 12 * 32]         ; [28]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m13, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m12, [r3 - 14 * 32]         ; [2]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 - 14 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    palignr         m7, m14, m2, 14
+    palignr         m0, m1, m14, 14
+
+    pmaddwd         m7, [r3 - 8 * 32]               ; [8]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m0, [r3 - 8 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m7, m0
+
+    palignr         m8, m14, m2, 10
+    palignr         m9, m1, m14, 10
+
+    pmaddwd         m8, [r3 - 2 * 32]               ; [14]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 - 2 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m9, m14, m2, 6
+    palignr         m13, m1, m14, 6
+
+    pmaddwd         m9, [r3 + 4 * 32]               ; [20]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m13, [r3 + 4 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m9, m13
+
+    palignr         m1, m14, 2
+    palignr         m14, m2, 2
+
+    pmaddwd         m12, m14, [r3 + 10 * 32]        ; [26]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    pmaddwd         m0, m1, [r3 + 10 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m12, m0
+
+    pmaddwd         m14, [r3 - 16 * 32]             ; [0]
+    paddd           m14, [pd_16]
+    psrld           m14, 5
+    pmaddwd         m1, [r3 - 16 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m14, m1
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 12, 14, 0, 3, 16
+    ret
+
+cglobal intra_pred_ang16_3, 3,7,13
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_3_33
+    RET
+
+cglobal intra_pred_ang16_33, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_3_33
+    RET
+
+cglobal intra_pred_ang16_4, 3,7,13
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 18 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_4_32
+    RET
+
+cglobal intra_pred_ang16_32, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 18 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_4_32
+    RET
+
+cglobal intra_pred_ang16_5, 3,7,13
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_5_31
+    RET
+
+cglobal intra_pred_ang16_31, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_5_31
+    RET
+
+cglobal intra_pred_ang16_6, 3,7,14
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_6_30
+    RET
+
+cglobal intra_pred_ang16_30, 3,7,14
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_6_30
+    RET
+
+cglobal intra_pred_ang16_7, 3,7,13
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 17 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_7_29
+    RET
+
+cglobal intra_pred_ang16_29, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 17 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_7_29
+    RET
+
+cglobal intra_pred_ang16_8, 3,7,13
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_8_28
+    RET
+
+cglobal intra_pred_ang16_28, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_8_28
+    RET
+
+cglobal intra_pred_ang16_9, 3,7,12
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang16_27, 3,7,12
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang16_10, 3,6,3
+    mov             r5d, r4m
+    add             r1d, r1d
+    lea             r4, [r1 * 3]
+
+    vpbroadcastw    m2, [r2 + 2 + 64]       ; [1...]
+    mova            m0, m2
+    movu            [r0], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 2]   ; [2...]
+    movu            [r0 + r1], m1
+    vpbroadcastw    m2, [r2 + 2 + 64 + 4]   ; [3...]
+    movu            [r0 + r1 * 2], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 6]   ; [4...]
+    movu            [r0 + r4], m1
+
+    lea             r3, [r0 + r1 * 4]
+    vpbroadcastw    m2, [r2 + 2 + 64 + 8]   ; [5...]
+    movu            [r3], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 10]  ; [6...]
+    movu            [r3 + r1], m1
+    vpbroadcastw    m2, [r2 + 2 + 64 + 12]  ; [7...]
+    movu            [r3 + r1 * 2], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 14]  ; [8...]
+    movu            [r3 + r4], m1
+
+    lea             r3, [r3 + r1 *4]
+    vpbroadcastw    m2, [r2 + 2 + 64 + 16]  ; [9...]
+    movu            [r3], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 18]  ; [10...]
+    movu            [r3 + r1], m1
+    vpbroadcastw    m2, [r2 + 2 + 64 + 20]  ; [11...]
+    movu            [r3 + r1 * 2], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 22]  ; [12...]
+    movu            [r3 + r4], m1
+
+    lea             r3, [r3 + r1 *4]
+    vpbroadcastw    m2, [r2 + 2 + 64 + 24]  ; [13...]
+    movu            [r3], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 26]  ; [14...]
+    movu            [r3 + r1], m1
+    vpbroadcastw    m2, [r2 + 2 + 64 + 28]  ; [15...]
+    movu            [r3 + r1 * 2], m2
+    vpbroadcastw    m1, [r2 + 2 + 64 + 30]  ; [16...]
+    movu            [r3 + r4], m1
+
+    cmp             r5d, byte 0
+    jz              .quit
+
+    ; filter
+    vpbroadcastw    m2, [r2]                ; [0 0...]
+    movu            m1, [r2 + 2]            ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    psubw           m1, m2
+    psraw           m1, 1
+    paddw           m0, m1
+    pxor            m1, m1
+    pmaxsw          m0, m1
+    pminsw          m0, [pw_pixel_max]
+.quit:
+    movu            [r0], m0
+    RET
+
+cglobal intra_pred_ang16_26, 3,6,4
+    mov         r5d,                r4m
+    movu        m0,                 [r2 + 2]            ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    add         r1d,                r1d
+    lea         r4,                 [r1 * 3]
+
+    movu        [r0],               m0
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r4],          m0
+
+    lea         r3,                 [r0 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r4],          m0
+
+    lea         r3,                 [r3 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r4],          m0
+
+    lea         r3,                 [r3 + r1 *4]
+    movu        [r3],               m0
+    movu        [r3 + r1],          m0
+    movu        [r3 + r1 * 2],      m0
+    movu        [r3 + r4],          m0
+
+    cmp         r5d,                byte 0
+    jz         .quit
+
+    ; filter
+
+    vpbroadcastw m0,                xm0
+    vpbroadcastw m2,                [r2]
+    movu        m1,                 [r2 + 2 + 64]
+    psubw       m1,                 m2
+    psraw       m1,                 1
+    paddw       m0,                 m1
+    pxor        m1,                 m1
+    pmaxsw      m0,                 m1
+    pminsw      m0,                 [pw_pixel_max]
+    pextrw      [r0],               xm0, 0
+    pextrw      [r0 + r1],          xm0, 1
+    pextrw      [r0 + r1 * 2],      xm0, 2
+    pextrw      [r0 + r4],          xm0, 3
+    lea         r0,                 [r0 + r1 * 4]
+    pextrw      [r0],               xm0, 4
+    pextrw      [r0 + r1],          xm0, 5
+    pextrw      [r0 + r1 * 2],      xm0, 6
+    pextrw      [r0 + r4],          xm0, 7
+    lea         r0,                 [r0 + r1 * 4]
+    vpermq      m0,                 m0, 11101110b
+    pextrw      [r0],               xm0, 0
+    pextrw      [r0 + r1],          xm0, 1
+    pextrw      [r0 + r1 * 2],      xm0, 2
+    pextrw      [r0 + r4],          xm0, 3
+    pextrw      [r3],               xm0, 4
+    pextrw      [r3 + r1],          xm0, 5
+    pextrw      [r3 + r1 * 2],      xm0, 6
+    pextrw      [r3 + r4],          xm0, 7
+.quit:
+    RET
+
+cglobal intra_pred_ang16_11, 3,7,12, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r2,        64
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_11_25
+
+    mov         r6d,       [rsp]
+    mov         [r2],      r6w
+    RET
+
+cglobal intra_pred_ang16_25, 3,7,12
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+
+    call        ang16_mode_11_25
+    RET
+
+cglobal intra_pred_ang16_12, 3,7,14, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 12]            ; [13 12 11 10  9  8  7  6]
+    pshufb      xm1,       [pw_ang16_12_24]     ; [ 6  6 13 13  x  x  x  x]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_12_24
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_24, 3,7,14, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 76]            ; [13 12 11 10  9  8  7  6]
+    pshufb      xm1,       [pw_ang16_12_24]     ; [ 6  6 13 13  x  x  x  x]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_12_24
+    RET
+
+cglobal intra_pred_ang16_13, 3,7,14, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 8]             ; [11  x  x  x  7  x  x  4]
+    pinsrw      xm1,       [r2 + 28], 1         ; [11  x  x  x  7  x 14  4]
+    pshufb      xm1,       [pw_ang16_13_23]     ; [ 4  4  7  7 11 11 14 14]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_13_23
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_23, 3,7,14, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 72]            ; [11 10  9  8  7  6  5  4]
+    pinsrw      xm1,       [r2 + 92], 1         ; [11  x  x  x  7  x 14  4]
+    pshufb      xm1,       [pw_ang16_13_23]     ; [ 4  4  7  7 11 11 14 14]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_13_23
+    RET
+
+cglobal intra_pred_ang16_14, 3,7,15, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 4]             ; [ x  x  7  x  5  x  x  2]
+    pinsrw      xm1,       [r2 + 20], 1         ; [ x  x  7  x  5  x 10  2]
+    movu        xm14,      [r2 + 24]            ; [ x  x  x  x 15  x  x 12]
+    pshufb      xm14,      [pw_ang16_14_22]     ; [12 12 15 15  x  x  x  x]
+    pshufb      xm1,       [pw_ang16_14_22]     ; [ 2  2  5  5  7  7 10 10]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_14_22
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_22, 3,7,15, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 68]            ; [ x  x  7  x  5  x  x  2]
+    pinsrw      xm1,       [r2 + 84], 1         ; [ x  x  7  x  5  x 10  2]
+    movu        xm14,      [r2 + 88]            ; [ x  x  x  x 15  x  x 12]
+    pshufb      xm14,      [pw_ang16_14_22]     ; [12 12 15 15  x  x  x  x]
+    pshufb      xm1,       [pw_ang16_14_22]     ; [ 2  2  5  5  7  7 10 10]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_14_22
+    RET
+
+cglobal intra_pred_ang16_15, 3,7,15, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 4]             ; [ x  8  x  6  x  4  x  2]
+    movu        xm14,      [r2 + 18]            ; [ x 15  x 13  x 11  x  9]
+    pshufb      xm14,      [pw_ang16_15_21]     ; [ 9  9 11 11 13 13 15 15]
+    pshufb      xm1,       [pw_ang16_15_21]     ; [ 2  2  4  4  6  6  8  8]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_15_21
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_21, 3,7,15, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 68]            ; [ x  8  x  6  x  4  x  2]
+    movu        xm14,      [r2 + 82]            ; [ x 15  x 13  x 11  x  9]
+    pshufb      xm14,      [pw_ang16_15_21]     ; [ 9  9 11 11 13 13 15 15]
+    pshufb      xm1,       [pw_ang16_15_21]     ; [ 2  2  4  4  6  6  8  8]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_15_21
+    RET
+
+cglobal intra_pred_ang16_16, 3,7,15, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 4]             ; [ x  x  x  6  5  x  3  2]
+    movu        xm14,      [r2 + 16]            ; [ x  x  x 12 11  x  9  8]
+    movu        xm2,       [r2 + 28]            ; [ x  x  x  x  x  x 15 14]
+    pshufb      xm14,      [pw_ang16_16_20]     ; [ 8  8  9  9 11 11 12 12]
+    pshufb      xm1,       [pw_ang16_16_20]     ; [ 2  2  3  3  5  5  6  6]
+    pshufb      xm2,       [pw_ang16_16_20]     ; [14 14 15 15  x  x  x  x]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_16_20
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_20, 3,7,15, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 68]            ; [ x  x  x  6  5  x  3  2]
+    movu        xm14,      [r2 + 80]            ; [ x  x  x 12 11  x  9  8]
+    movu        xm2,       [r2 + 92]            ; [ x  x  x  x  x  x 15 14]
+    pshufb      xm14,      [pw_ang16_16_20]     ; [ 8  8  9  9 11 11 12 12]
+    pshufb      xm1,       [pw_ang16_16_20]     ; [ 2  2  3  3  5  5  6  6]
+    pshufb      xm2,       [pw_ang16_16_20]     ; [14 14 15 15  x  x  x  x]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_16_20
+    RET
+
+cglobal intra_pred_ang16_17, 3,7,15, 0-4
+    movzx       r5d,       word [r2 + 64]
+    movzx       r6d,       word [r2]
+    mov         [rsp],     r5w
+    mov         [r2 + 64], r6w
+
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 2]             ; [ x  x  x  6  5  x  3  2]
+    movu        xm14,      [r2 + 12]            ; [ x  x  x 12 11  x  9  8]
+    movu        xm2,       [r2 + 22]            ; [ x  x  x  x  x  x 15 14]
+    pshufb      xm14,      [pw_ang16_16_20]     ; [ 8  8  9  9 11 11 12 12]
+    pshufb      xm1,       [pw_ang16_16_20]     ; [ 2  2  3  3  5  5  6  6]
+    pshufb      xm2,       [pw_ang16_16_20]     ; [14 14 15 15  x  x  x  x]
+    xor         r6d,       r6d
+    add         r2,        64
+
+    call        ang16_mode_17_19
+
+    mov         r6d,       [rsp]
+    mov         [r2], r6w
+    RET
+
+cglobal intra_pred_ang16_19, 3,7,15, 0-4
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    movu        xm1,       [r2 + 66]            ; [ x  x  x  6  5  x  3  2]
+    movu        xm14,      [r2 + 76]            ; [ x  x  x 12 11  x  9  8]
+    movu        xm2,       [r2 + 86]            ; [ x  x  x  x  x  x 15 14]
+    pshufb      xm14,      [pw_ang16_16_20]     ; [ 8  8  9  9 11 11 12 12]
+    pshufb      xm1,       [pw_ang16_16_20]     ; [ 2  2  3  3  5  5  6  6]
+    pshufb      xm2,       [pw_ang16_16_20]     ; [14 14 15 15  x  x  x  x]
+    xor         r6d,       r6d
+    inc         r6d
+
+    call        ang16_mode_17_19
+    RET
+
+cglobal intra_pred_ang16_18, 3,5,4
+    add         r1d,                 r1d
+    lea         r4,                  [r1 * 3]
+    movu        m1,                  [r2]
+    movu        m0,                  [r2 + 2 + 64]
+    pshufb      m0,                  [pw_swap16]
+    mova        m3,                  m0
+    vinserti128 m0,                  m0, xm1, 1
+    movu        [r0],                m1
+    palignr     m2,                  m1, m0, 14
+    movu        [r0 + r1],           m2
+
+    palignr     m2,                  m1, m0, 12
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m1, m0, 10
+    movu        [r0 + r4],           m2
+
+    lea         r0,                  [r0 + r1 * 4]
+    palignr     m2,                  m1, m0, 8
+    movu        [r0],                m2
+    palignr     m2,                  m1, m0, 6
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m1, m0, 4
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m1, m0, 2
+    movu        [r0 + r4],           m2
+
+    lea         r0,                  [r0 + r1 * 4]
+    movu        [r0],                m0
+    vpermq      m3,                  m3, 01001110b
+    palignr     m2,                  m0, m3, 14
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m0, m3, 12
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m0, m3, 10
+    movu        [r0 + r4],           m2
+    palignr     m2,                  m1, m0, 10
+
+    lea         r0,                  [r0 + r1 * 4]
+    palignr     m2,                  m0, m3, 8
+    movu        [r0],                m2
+    palignr     m2,                  m0, m3, 6
+    movu        [r0 + r1],           m2
+    palignr     m2,                  m0, m3, 4
+    movu        [r0 + r1 * 2],       m2
+    palignr     m2,                  m0, m3, 2
+    movu        [r0 + r4],           m2
+    palignr     m1,                  m0, 2
+    RET
+
+;-------------------------------------------------------------------------------------------------------
+; end of avx2 code for intra_pred_ang16 mode 2 to 34
+;-------------------------------------------------------------------------------------------------------
+
+;-------------------------------------------------------------------------------------------------------
+; avx2 code for intra_pred_ang32 mode 2 to 34 start
+;-------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_ang32_2, 3,5,6
+    lea         r4,                 [r2]
+    add         r2,                 128
+    cmp         r3m,                byte 34
+    cmove       r2,                 r4
+    add         r1d,                 r1d
+    lea         r3,                 [r1 * 3]
+    movu        m0,                 [r2 + 4]
+    movu        m1,                 [r2 + 20]
+    movu        m3,                 [r2 + 36]
+    movu        m4,                 [r2 + 52]
+
+    movu        [r0],               m0
+    movu        [r0 + 32],          m3
+    palignr     m2,                 m1, m0, 2
+    palignr     m5,                 m4, m3, 2
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m1, m0, 4
+    palignr     m5,                 m4, m3, 4
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m1, m0, 6
+    palignr     m5,                 m4, m3, 6
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m1, m0, 8
+    palignr     m5,                 m4, m3, 8
+    movu        [r0],               m2
+    movu        [r0 + 32],          m5
+    palignr     m2,                 m1, m0, 10
+    palignr     m5,                 m4, m3, 10
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m1, m0, 12
+    palignr     m5,                 m4, m3, 12
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m1, m0, 14
+    palignr     m5,                 m4, m3, 14
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    movu        m0,                 [r2 + 36]
+    movu        m3,                 [r2 + 68]
+    lea         r0,                 [r0 + r1 * 4]
+    movu        [r0],               m1
+    movu        [r0 + 32],          m4
+    palignr     m2,                 m0, m1, 2
+    palignr     m5,                 m3, m4, 2
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m0, m1, 4
+    palignr     m5,                 m3, m4, 4
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m0, m1, 6
+    palignr     m5,                 m3, m4, 6
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m0, m1, 8
+    palignr     m5,                 m3, m4, 8
+    movu        [r0],               m2
+    movu        [r0 + 32],          m5
+    palignr     m2,                 m0, m1, 10
+    palignr     m5,                 m3, m4, 10
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m0, m1, 12
+    palignr     m5,                 m3, m4, 12
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m0, m1, 14
+    palignr     m5,                 m3, m4, 14
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    lea         r0,                 [r0 + r1 * 4]
+    movu        m1,                 [r2 + 52]
+    movu        m4,                 [r2 + 84]
+
+    movu        [r0],               m0
+    movu        [r0 + 32],          m3
+    palignr     m2,                 m1, m0, 2
+    palignr     m5,                 m4, m3, 2
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m1, m0, 4
+    palignr     m5,                 m4, m3, 4
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m1, m0, 6
+    palignr     m5,                 m4, m3, 6
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m1, m0, 8
+    palignr     m5,                 m4, m3, 8
+    movu        [r0],               m2
+    movu        [r0 + 32],          m5
+    palignr     m2,                 m1, m0, 10
+    palignr     m5,                 m4, m3, 10
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m1, m0, 12
+    palignr     m5,                 m4, m3, 12
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m1, m0, 14
+    palignr     m5,                 m4, m3, 14
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    movu        m0,                 [r2 + 68]
+    movu        m3,                 [r2 + 100]
+    lea         r0,                 [r0 + r1 * 4]
+    movu        [r0],               m1
+    movu        [r0 + 32],          m4
+    palignr     m2,                 m0, m1, 2
+    palignr     m5,                 m3, m4, 2
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m0, m1, 4
+    palignr     m5,                 m3, m4, 4
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m0, m1, 6
+    palignr     m5,                 m3, m4, 6
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+
+    lea         r0,                 [r0 + r1 * 4]
+    palignr     m2,                 m0, m1, 8
+    palignr     m5,                 m3, m4, 8
+    movu        [r0],               m2
+    movu        [r0 + 32],          m5
+    palignr     m2,                 m0, m1, 10
+    palignr     m5,                 m3, m4, 10
+    movu        [r0 + r1],          m2
+    movu        [r0 + r1 + 32],     m5
+    palignr     m2,                 m0, m1, 12
+    palignr     m5,                 m3, m4, 12
+    movu        [r0 + r1 * 2],      m2
+    movu        [r0 + r1 * 2 + 32], m5
+    palignr     m2,                 m0, m1, 14
+    palignr     m5,                 m3, m4, 14
+    movu        [r0 + r3],          m2
+    movu        [r0 + r3 + 32],     m5
+    RET
+
+cglobal intra_pred_ang32_3, 3,8,13
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_3_33
+
+    add         r2,        26
+    lea         r0,        [r0 + 32]
+
+    call        ang16_mode_3_33
+
+    add         r2,        6
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_3_33
+
+    add         r2,        26
+    lea         r0,        [r0 + 32]
+
+    call        ang16_mode_3_33
+    RET
+
+cglobal intra_pred_ang32_33, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_3_33
+
+    add         r2,        26
+
+    call        ang16_mode_3_33
+
+    add         r2,        6
+    mov         r0,        r5
+
+    call        ang16_mode_3_33
+
+    add         r2,        26
+
+    call        ang16_mode_3_33
+    RET
+
+;; angle 32, modes 4 and 32
+cglobal ang32_mode_4_32
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 - 13 * 32]          ; [5]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 13 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 8 * 32]           ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m6, m0, m3, 4                   ; [14 13 13 12 12 11 11 10  6  5  5  4  4  3  3  2]
+    pmaddwd         m6, [r3 - 3 * 32]               ; [15]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m7, m2, m0, 4                   ; [18 17 17 16 16 15 15 14 10  9  9  8  8  7  7  6]
+    pmaddwd         m7, [r3 - 3 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m0, m3, 8                   ; [15 14 14 13 13 12 12 11  7  6  6  5  5  4  4  3]
+    pmaddwd         m7, m8, [r3 - 14 * 32]              ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m9, m2, m0, 8                   ; [19 18 18 17 17 16 16 15 11 10 10  9  9  8  8  7]
+    pmaddwd         m10, m9, [r3 - 14 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 7 * 32]               ; [25]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 7 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m9, m0, m3, 12
+    pmaddwd         m9, [r3 - 4 * 32]               ; [14]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m3, m2, m0, 12
+    pmaddwd         m3, [r3 - 4 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, m0, [r3 - 15 * 32]         ; [3]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m3, m2, [r3 - 15 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m10, m3
+
+    pmaddwd         m11, m0, [r3 + 6 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m3, m2, [r3 + 6 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0
+
+    palignr         m4, m2, m0, 4
+    pmaddwd         m4, [r3 - 5* 32]                ; [13]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m1, m2, 4
+    pmaddwd         m5, [r3 - 5  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m2, m0, 8
+    pmaddwd         m5, m6, [r3 - 16 * 32]          ; [2]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m7, m1, m2, 8
+    pmaddwd         m8, m7, [r3 - 16 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 5 * 32]               ; [23]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 5 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m7, m2, m0, 12
+    pmaddwd         m7, [r3 - 6 * 32]               ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m8, m1, m2, 12
+    pmaddwd         m8, [r3 - 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    movu            m0, [r2 + 34]                   ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    pmaddwd         m8, m2, [r3 - 17 * 32]          ; [1]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m1, [r3 - 17 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m3, m0, m0, 2                   ; [ x 32 31 30 29 28 27 26  x 24 23 22 21 20 19 18]
+    punpcklwd       m0, m3                          ; [29 29 28 28 27 27 26 22 21 20 20 19 19 18 18 17]
+
+    pmaddwd         m9, m2, [r3 + 4 * 32]           ; [22]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m1, [r3 + 4 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    palignr         m10, m1, m2, 4
+    pmaddwd         m10, [r3 - 7 * 32]              ; [11]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m11, m0, m1, 4
+    pmaddwd         m11, [r3 - 7 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m3, m1, m2, 8
+    pmaddwd         m3, [r3 - 18 * 32]              ; [0]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    palignr         m0, m1, 8
+    pmaddwd         m0, [r3 - 18 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 1, 16
+    ret
+
+cglobal intra_pred_ang32_4, 3,8,13
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 18 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_4_32
+
+    add         r2,        22
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_4_32
+
+    add         r2,        10
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_4_32
+
+    add         r2,        22
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_4_32
+    RET
+
+cglobal intra_pred_ang32_32, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 18 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_4_32
+
+    add         r2,        22
+
+    call        ang32_mode_4_32
+
+    add         r2,        10
+    mov         r0,        r5
+
+    call        ang16_mode_4_32
+
+    add         r2,        22
+
+    call        ang32_mode_4_32
+    RET
+
+;; angle 32, modes 5 and 31
+cglobal ang32_mode_5_31
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 - 15 * 32]          ; [1]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 15 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 2 * 32]           ; [18]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 2 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m0, m3, 4
+    pmaddwd         m6, m7, [r3 - 13 * 32]          ; [3]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m8, m2, m0, 4
+    pmaddwd         m9, m8, [r3 - 13 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 4 * 32]               ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m9, m0, m3, 8
+    pmaddwd         m8, m9, [r3 - 11 * 32]          ; [5]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m10, m2, m0, 8
+    pmaddwd         m11, m10, [r3 - 11 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m8, m11
+
+    pmaddwd         m9, [r3 + 6 * 32]               ; [22]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, [r3 + 6 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m10, m11, [r3 - 9 * 32]         ; [7]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m12, m2, m0, 12
+    pmaddwd         m3, m12, [r3 - 9 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m10, m3
+
+    pmaddwd         m11, [r3 + 8 * 32]              ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 8 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0
+
+    pmaddwd         m4, m0, [r3 - 7 * 32]           ; [9]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 - 7  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m0, [r3 + 10 * 32]          ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, m2, [r3 + 10 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    palignr         m7, m2, m0, 4
+    pmaddwd         m6, m7, [r3 - 5 * 32]           ; [11]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m8, m1, m2, 4
+    pmaddwd         m9, m8, [r3 - 5 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 12 * 32]              ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m9, m2, m0, 8
+    pmaddwd         m8, m9, [r3 - 3 * 32]           ; [13]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m3, m1, m2, 8
+    pmaddwd         m10, m3, [r3 - 3 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 14 * 32]              ; [30]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, [r3 + 14 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    palignr         m10, m2, m0, 12
+    pmaddwd         m10, [r3 - 1 * 32]              ; [15]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m11, m1, m2, 12
+    pmaddwd         m11, [r3 - 1 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m2, [r3 - 16 * 32]              ; [0]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3 - 16 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 2, 0, 1, 16
+    ret
+
+cglobal intra_pred_ang32_5, 3,8,13
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_5_31
+
+    add         r2,        18
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_5_31
+
+    add         r2,        14
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_5_31
+
+    add         r2,        18
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_5_31
+    RET
+
+cglobal intra_pred_ang32_31, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_5_31
+
+    add         r2,        18
+
+    call        ang32_mode_5_31
+
+    add         r2,        14
+    mov         r0,        r5
+
+    call        ang16_mode_5_31
+
+    add         r2,        18
+
+    call        ang32_mode_5_31
+    RET
+
+;; angle 32, modes 6 and 30
+cglobal ang32_mode_6_30
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 + 14 * 32]          ; [29]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 14 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m0, m3, 4
+    pmaddwd         m5, m6, [r3 - 5 * 32]           ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m7, m2, m0, 4
+    pmaddwd         m8, m7, [r3 - 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 8 * 32]               ; [23]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 8 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m9, m0, m3, 8
+    pmaddwd         m7, m9, [r3 - 11 * 32]          ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m12, m2, m0, 8
+    pmaddwd         m11, m12, [r3 - 11 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m7, m11
+
+    pmaddwd         m8, m9, [r3 + 2 * 32]           ; [17]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m10, m12, [r3 + 2 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 15 * 32]              ; [30]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, [r3 + 15 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m10, m11, [r3 - 4 * 32]         ; [11]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    palignr         m12, m2, m0, 12
+    pmaddwd         m3, m12, [r3 - 4 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m10, m3
+
+    pmaddwd         m11, [r3 + 9 * 32]              ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 9 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    pmaddwd         m4, m0, [r3 - 10 * 32]          ; [5]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 - 10  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m0, [r3 + 3 * 32]           ; [18]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, m2, [r3 + 3 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, m0, [r3 + 16 * 32]          ; [31]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m2, [r3 + 16 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m2, m0, 4
+    pmaddwd         m7, m8, [r3 - 3 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m9, m1, m2, 4
+    pmaddwd         m3, m9, [r3 - 3 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    pmaddwd         m8, [r3 + 10 * 32]              ; [25]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m2, m0, 8
+    pmaddwd         m9, m10, [r3 - 9 * 32]          ; [6]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m12, m1, m2, 8
+    pmaddwd         m3, m12, [r3 - 9 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 4 * 32]              ; [19]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, [r3 + 4 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m11, m2, m0, 12
+    pmaddwd         m11, [r3 - 15 * 32]             ; [0]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m3, m1, m2, 12
+    pmaddwd         m3, [r3 - 15 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m11, m3
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 16
+    ret
+
+cglobal intra_pred_ang32_6, 3,8,14
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_6_30
+
+    add         r2,        12
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_6_30
+
+    add         r2,        20
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_6_30
+
+    add         r2,        12
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_6_30
+    RET
+
+cglobal intra_pred_ang32_30, 3,7,14
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_6_30
+
+    add         r2,        12
+
+    call        ang32_mode_6_30
+
+    add         r2,        20
+    mov         r0,        r5
+
+    call        ang16_mode_6_30
+
+    add         r2,        12
+
+    call        ang32_mode_6_30
+    RET
+
+;; angle 32, modes 7 and 29
+cglobal ang32_mode_7_29
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m1, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m1, m4                      ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+    punpckhwd       m1, m4                          ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13]
+
+    pmaddwd         m4, m3, [r3 + 8 * 32]           ; [25]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 8 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m8, m0, m3, 4
+    pmaddwd         m5, m8, [r3 - 15 * 32]          ; [2]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m9, m2, m0, 4
+    pmaddwd         m10, m9, [r3 - 15 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m5, m10
+
+    pmaddwd         m6, m8, [r3 - 6 * 32]           ; [11]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m9, [r3 - 6 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m8, [r3 + 3 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 + 3 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 12 * 32]              ; [29]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 12 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m11, m0, m3, 8
+    pmaddwd         m9, m11, [r3 - 11 * 32]         ; [6]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    palignr         m12, m2, m0, 8
+    pmaddwd         m10, m12, [r3 - 11 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m11, [r3 - 2 * 32]         ; [15]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m13, m12, [r3 - 2 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m10, m13
+
+    pmaddwd         m11, [r3 + 7 * 32]              ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 7 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m5, m0, m3, 12
+    pmaddwd         m4, m5, [r3 - 16 * 32]          ; [1]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m6, m2, m0, 12
+    pmaddwd         m7, m6, [r3 - 16 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m4, m7
+
+    pmaddwd         m5, [r3 - 7 * 32]               ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, [r3 - 7 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m9, m0, m3, 12
+    pmaddwd         m6, m9, [r3 + 2 * 32]           ; [19]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m3, m2, m0, 12
+    pmaddwd         m7, m3, [r3 + 2 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m9, [r3 + 11 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m3, [r3 + 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m0, [r3 - 12 * 32]          ; [5]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m10, m2, [r3 - 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, m0, [r3 - 3 * 32]           ; [14]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m2, [r3 - 3 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, m0, [r3 + 6 * 32]          ; [23]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m2, [r3 + 6 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m11, m2, m0, 4
+    pmaddwd         m11, [r3 - 17 * 32]             ; [0]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m12, m1, m2, 4
+    pmaddwd         m12, [r3 - 17 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 3, 2, 16
+    ret
+
+cglobal intra_pred_ang32_7, 3,8,14
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 17 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_7_29
+
+    add         r2,        8
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_7_29
+
+    add         r2,        24
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_7_29
+
+    add         r2,        8
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_7_29
+    RET
+
+cglobal intra_pred_ang32_29, 3,7,14
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 17 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_7_29
+
+    add         r2,        8
+
+    call        ang32_mode_7_29
+
+    add         r2,        24
+    mov         r0,        r5
+
+    call        ang16_mode_7_29
+
+    add         r2,        8
+
+    call        ang32_mode_7_29
+    RET
+
+;; angle 32, modes 8 and 28
+cglobal ang32_mode_8_28
+    test            r6d, r6d
+
+    movu            m0, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1, [r2 + 4]                    ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    movu            m2, [r2 + 18]                   ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    movu            m4, [r2 + 20]                   ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    pmaddwd         m4, m3, [r3 + 6 * 32]           ; [21]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 + 6 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 11 * 32]          ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 16 * 32]          ; [31]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 + 16 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m11, m0, m3, 4
+    pmaddwd         m7, m11, [r3 - 11 * 32]         ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m8, m1, [r3 - 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m11, [r3 - 6 * 32]          ; [9]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m1, [r3 - 6 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m11, [r3 - 1 * 32]          ; [14]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m1, [r3 - 1 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m11, [r3 + 4 * 32]         ; [19]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m1, [r3 + 4 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, [r3 + 9 * 32]              ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m1, [r3 + 9 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m11, m1
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+    palignr         m4, m0, m3, 4
+    pmaddwd         m4, [r3 + 14 * 32]              ; [29]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m2, m0, 4
+    pmaddwd         m5, [r3 + 14 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m1, m0, m3, 8
+    pmaddwd         m5, m1, [r3 - 13 * 32]          ; [2]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    palignr         m10, m2, m0, 8
+    pmaddwd         m6, m10, [r3 - 13 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m1, [r3 - 8 * 32]           ; [7]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m10, [r3 - 8 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m1, [r3 - 3 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m10, [r3 - 3 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m1, [r3 + 2 * 32]           ; [17]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m10, [r3 + 2 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m1, [r3 + 7 * 32]           ; [22]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m11, m10, [r3 + 7 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m9, m11
+
+    pmaddwd         m1, [r3 + 12 * 32]              ; [27]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m10, [r3 + 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m1, m10
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m11, [r3 - 15 * 32]             ; [0]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m2, m0, 12
+    pmaddwd         m2, [r3 - 15 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m11, m2
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 11, 0, 2, 16
+    ret
+
+cglobal intra_pred_ang32_8, 3,8,13
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_8_28
+
+    add         r2,        28
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_8_28
+    RET
+
+cglobal intra_pred_ang32_28, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+
+    call        ang32_mode_8_28
+
+    add         r2,        28
+    mov         r0,        r5
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+
+    call        ang32_mode_8_28
+    RET
+
+cglobal intra_pred_ang32_9, 3,8,13
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+
+    call        ang16_mode_9_27
+
+    add         r2,        2
+    lea         r0,        [r0 + 32]
+
+    call        ang16_mode_9_27
+
+    add         r2,        30
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_9_27
+
+    add         r2,        2
+    lea         r0,        [r0 + 32]
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang32_27, 3,7,13
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 16 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+
+    call        ang16_mode_9_27
+
+    add         r2,        2
+
+    call        ang16_mode_9_27
+
+    add         r2,        30
+    mov         r0,        r5
+
+    call        ang16_mode_9_27
+
+    add         r2,        2
+
+    call        ang16_mode_9_27
+    RET
+
+cglobal intra_pred_ang32_10, 3,4,2
+    add             r2, mmsize*4
+    add             r1d, r1d
+    lea             r3, [r1 * 3]
+
+    vpbroadcastw    m0, [r2 + 2]       ; [1...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 2]   ; [2...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 4]   ; [3...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 6]   ; [4...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 * 4]
+    vpbroadcastw    m0, [r2 + 2 + 8]   ; [5...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 10]  ; [6...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 12]  ; [7...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 14]  ; [8...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 16]  ; [9...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 18]  ; [10...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 20]  ; [11...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 22]  ; [12...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 24]  ; [13...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 26]  ; [14...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 28]  ; [15...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 30]  ; [16...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 32]  ; [17...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 34]  ; [18...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 36]  ; [19...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 38]  ; [20...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 40]  ; [21...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 42]  ; [22...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 44]  ; [23...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 46]  ; [24...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 48]  ; [25...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 50]  ; [26...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 52]  ; [27...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 54]  ; [28...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+
+    lea             r0, [r0 + r1 *4]
+    vpbroadcastw    m0, [r2 + 2 + 56]  ; [29...]
+    movu            [r0], m0
+    movu            [r0 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 58]  ; [30...]
+    movu            [r0 + r1], m1
+    movu            [r0 + r1 + 32], m1
+    vpbroadcastw    m0, [r2 + 2 + 60]  ; [31...]
+    movu            [r0 + r1 * 2], m0
+    movu            [r0 + r1 * 2 + 32], m0
+    vpbroadcastw    m1, [r2 + 2 + 62]  ; [32...]
+    movu            [r0 + r3], m1
+    movu            [r0 + r3 + 32], m1
+    RET
+
+cglobal intra_pred_ang32_26, 3,3,2
+    movu        m0,                 [r2 + 2]
+    movu        m1,                 [r2 + 34]
+    add         r1d,                r1d
+    lea         r2,                 [r1 * 3]
+
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+
+    lea         r0,                 [r0 + r1 *4]
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+    movu        [r0 + r1],          m0
+    movu        [r0 + r1 + 32],     m1
+    movu        [r0 + r1 * 2],      m0
+    movu        [r0 + r1 * 2 + 32], m1
+    movu        [r0 + r2],          m0
+    movu        [r0 + r2 + 32],     m1
+    RET
+
+cglobal intra_pred_ang32_11, 3,8,12, 0-8
+    movzx       r5d,        word [r2 + 128]  ; [0]
+    movzx       r6d,        word [r2]
+    mov         [rsp],      r5w
+    mov         [r2 + 128], r6w
+
+    movzx       r5d,        word [r2 + 126]  ; [16]
+    movzx       r6d,        word [r2 + 32]
+    mov         [rsp + 4],  r5w
+    mov         [r2 + 126], r6w
+
+    add         r2,         128
+    xor         r6d,        r6d
+    lea         r3,         [ang_table_avx2 + 16 * 32]
+    add         r1d,        r1d
+    lea         r4,         [r1 * 3]
+    lea         r7,         [r0 + 8 * r1]
+
+    call        ang16_mode_11_25
+
+    sub         r2,         2
+    lea         r0,         [r0 + 32]
+
+    call        ang16_mode_11_25
+
+    add         r2,         34
+    lea         r0,         [r7 + 8 * r1]
+
+    call        ang16_mode_11_25
+
+    sub         r2,         2
+    lea         r0,         [r0 + 32]
+
+    call        ang16_mode_11_25
+
+    mov         r6d,        [rsp]
+    mov         [r2 - 30], r6w
+    mov         r6d,       [rsp + 4]
+    mov         [r2 - 32], r6w
+    RET
+
+cglobal intra_pred_ang32_25, 3,7,12, 0-4
+    xor         r6d,        r6d
+    inc         r6d
+    lea         r3,         [ang_table_avx2 + 16 * 32]
+    add         r1d,        r1d
+
+    movzx       r4d,        word [r2 - 2]
+    movzx       r5d,        word [r2 + 160]     ; [16]
+    mov         [rsp],      r4w
+    mov         [r2 - 2],   r5w
+
+    lea         r4,         [r1 * 3]
+    lea         r5,         [r0 + 32]
+
+    call        ang16_mode_11_25
+
+    sub         r2,         2
+
+    call        ang16_mode_11_25
+
+    add         r2,         34
+    mov         r0,         r5
+
+    call        ang16_mode_11_25
+
+    sub         r2,         2
+
+    call        ang16_mode_11_25
+
+    mov         r5d,        [rsp]
+    mov         [r2 - 32],  r5w
+    RET
+
+;; angle 32, modes 12 and 24, row 0 to 15
+cglobal ang32_mode_12_24_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 11 * 32]          ; [27]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 11 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 6 * 32]           ; [22]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m2, [r3 + 6 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 1 * 32]           ; [17]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m2, [r3 + 1 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, m3, [r3 - 4 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m2, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m3, [r3 - 9 * 32]           ; [7]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m2, [r3 - 9 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 - 14 * 32]          ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m2, [r3 - 14 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m9, m2
+
+    movu            xm1, [r2 - 8]
+    pshufb          xm1, [pw_ang32_12_24]
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  6  6 13 13 19 19 26 26]
+
+    palignr         m2, m3, m1, 14                  ; [11 10 10  9  9  8  8  7  3  2  2  1  1  0  0  6]
+    palignr         m13, m0, m3, 14                 ; [15 14 14 13 13 12 12 11  7  6  6  5  5  4  4  3]
+
+    pmaddwd         m10, m2, [r3 + 13 * 32]         ; [29]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 + 13 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m4, m2, [r3 + 3 * 32]           ; [19]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 + 3 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 2 * 32]           ; [14]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 - 2 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m2, [r3 - 7 * 32]           ; [9]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 - 7 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 - 12 * 32]          ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m0, m3, 10
+    palignr         m3, m1, 10
+
+    pmaddwd         m8, m3, [r3 + 15 * 32]          ; [31]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 15 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 + 10 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m3, [r3 + 5 * 32]           ; [21]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m2, m0, [r3 + 5 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m10, m2
+
+    pmaddwd         m3, [r3]                        ; [16]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 2, 16
+    ret
+
+;; angle 32, modes 12 and 24, row 16 to 31
+cglobal ang32_mode_12_24_16_31
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+
+    palignr         m2, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m4, m2, [r3 - 5 * 32]           ; [11]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 5 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 10 * 32]          ; [6]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m13, [r3 - 10 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m2, [r3 - 15 * 32]          ; [1]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m13, [r3 - 15 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m2, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m7, m2, [r3 + 12 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 + 7 * 32]           ; [23]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 7 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 + 2 * 32]           ; [18]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 + 2 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 3 * 32]          ; [13]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 3 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, m2, [r3 - 8 * 32]          ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 - 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m4, m2, [r3 - 13 * 32]          ; [3]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 13 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m2, m3, m1, 2
+    palignr         m13, m0, m3, 2
+
+    pmaddwd         m5, m2, [r3 + 14 * 32]          ; [30]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 + 14 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m2, [r3 + 9 * 32]           ; [25]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 + 9 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 + 4 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 - 1 * 32]           ; [15]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 - 1 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 6 * 32]           ; [10]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 - 6 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 11 * 32]         ; [5]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 11 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m2, [r3 - 16 * 32]              ; [0]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m13, [r3 - 16 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m2, m13
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 2, 0, 3, 16
+    ret
+
+cglobal intra_pred_ang32_12, 3,8,14, 0-16
+    movu        xm0, [r2 + 114]
+    mova        [rsp], xm0
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    pinsrw      xm1, [r2], 7        ; [0]
+    pinsrw      xm1, [r2 + 12], 6   ; [6]
+    pinsrw      xm1, [r2 + 26], 5   ; [13]
+    pinsrw      xm1, [r2 + 38], 4   ; [19]
+    pinsrw      xm1, [r2 + 52], 3   ; [26]
+    movu        [r2 + 114], xm1
+
+    xor         r6d, r6d
+    add         r2, 128
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_12_24_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_12_24_16_31
+
+    add         r2, 32
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_12_24_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_12_24_16_31
+
+    mova        xm0, [rsp]
+    movu        [r2 - 46], xm0
+    RET
+
+cglobal intra_pred_ang32_24, 3,7,14, 0-16
+    movu        xm0, [r2 - 16]
+    mova        [rsp], xm0
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    pinsrw      xm1, [r2 + 140], 7   ; [6]
+    pinsrw      xm1, [r2 + 154], 6   ; [13]
+    pinsrw      xm1, [r2 + 166], 5   ; [19]
+    pinsrw      xm1, [r2 + 180], 4   ; [26]
+    movu        [r2 - 16], xm1
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_12_24_0_15
+
+    call        ang32_mode_12_24_16_31
+
+    add         r2, 32
+    mov         r0, r5
+
+    call        ang32_mode_12_24_0_15
+
+    call        ang32_mode_12_24_16_31
+
+    mova        xm0, [rsp]
+    movu        [r2 - 48], xm0
+    RET
+
+;; angle 32, modes 13 and 23, row 0 to 15
+cglobal ang32_mode_13_23_row_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu            m4, [r2 + 2]                    ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    punpcklwd       m3, m0, m4                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+    punpckhwd       m2, m0, m4                      ; [16 15 15 14 14 13 13 12  8  7  7  6  6  5  5  4]
+
+    pmaddwd         m4, m3, [r3 + 7 * 32]           ; [23]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 7 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 2 * 32]           ; [14]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m2, [r3 - 2 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m3, [r3 - 11 * 32]          ; [5]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m2, [r3 - 11 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m6, m2
+
+    movu            xm1, [r2 - 8]
+    pshufb          xm1, [pw_ang32_12_24]
+    punpcklwd       m3, m0, m0                      ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    punpckhwd       m0, m0                          ; [15 15 14 14 13 13 12 12  7  7  6  6  5  5  4  4]
+    vinserti128     m1, m1, xm0, 1                  ; [ 7  7  6  6  5  5  4  4  4  4  7  7 11 11 14 14]
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m7, m2, [r3 + 12 * 32]          ; [28]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 + 3 * 32]           ; [19]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 + 3 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 6 * 32]           ; [10]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 - 6 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 - 15 * 32]         ; [1]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 - 15 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m2, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m11, m2, [r3 + 8 * 32]          ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 + 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m4, m2, [r3 - 1 * 32]           ; [15]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 - 1 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 - 10 * 32]          ; [6]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 - 10 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m2, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m6, m2, [r3 + 13 * 32]          ; [29]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 + 13 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 + 4 * 32]           ; [20]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 + 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 - 5 * 32]           ; [11]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 - 5 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 - 14 * 32]          ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m13, [r3 - 14 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m9, m13
+
+    palignr         m0, m3, 2
+    palignr         m3, m1, 2
+
+    pmaddwd         m1, m3, [r3 + 9 * 32]           ; [25]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m2, m0, [r3 + 9 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m1, m2
+
+    pmaddwd         m3, [r3]                        ; [16]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 3, 0, 2, 16
+    ret
+
+;; angle 32, modes 13 and 23, row 16 to 31
+cglobal ang32_mode_13_23_row_16_31
+    test            r6d, r6d
+
+    movu            m0, [r2]                        ; [11 10  9  8  7  6  5  4  3  2  1  0  4  7 11 14]
+    movu            m5, [r2 + 2]                    ; [12 11 10  9  8  7  6  5  4  3  2  1  0  4  7 11]
+
+    punpcklwd       m4, m0, m5                      ; [ 8  7  7  6  6  5  5  4  0  4  4  7  7 11 11 14]
+    punpckhwd       m2, m0, m5                      ; [12 11 11 10 10  9  9  8  4  3  3  2  2  1  1  0]
+
+    pmaddwd         m4, [r3 - 9 * 32]               ; [7]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m2, [r3 - 9 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m4, m2
+
+    movu            xm1, [r2 - 8]
+    pshufb          xm1, [pw_ang32_12_24]           ; [18 18 21 21 25 25 28 28]
+    punpcklwd       m3, m0, m0                      ; [ 7  7  6  6  5  5  4  4  4  4  7  7 11 11 14 14]
+    punpckhwd       m0, m0                          ; [11 11 10 10  9  9  8  8  3  3  2  2  1  1  0  0]
+    vinserti128     m1, m1, xm0, 1                  ; [ 3  3  2  2  1  1  0  0 18 18 21 21 25 25 28 28]
+
+    palignr         m2, m3, m1, 14
+    palignr         m13, m0, m3, 14
+
+    pmaddwd         m5, m2, [r3 + 14 * 32]          ; [30]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 + 14 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m2, [r3 + 5 * 32]           ; [21]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m13, [r3 + 5 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m2, [r3 - 4 * 32]           ; [12]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    pmaddwd         m8, m2, [r3 - 13 * 32]          ; [3]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m13, [r3 - 13 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m2, m3, m1, 10
+    palignr         m13, m0, m3, 10
+
+    pmaddwd         m9, m2, [r3 + 10 * 32]          ; [26]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m13, [r3 + 10 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m2, [r3 + 1 * 32]          ; [17]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m13, [r3 + 1 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m11, m2, [r3 - 8 * 32]          ; [8]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m13, [r3 - 8 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m11, m13
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+    palignr         m2, m3, m1, 6
+    palignr         m13, m0, m3, 6
+
+    pmaddwd         m4, m2, [r3 + 15 * 32]          ; [31]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m13, [r3 + 15 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m2, [r3 + 6 * 32]           ; [22]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m13, [r3 + 6 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m2, [r3 - 3 * 32]           ; [13]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m13, [r3 - 3 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 - 12 * 32]          ; [4]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, m13, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m0, m3, 2
+    palignr         m3, m1, 2
+
+    pmaddwd         m8, m3, [r3 + 11 * 32]          ; [27]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 11 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m3, [r3 + 2 * 32]           ; [18]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 + 2 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m1, m3, [r3 - 7 * 32]           ; [9]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m2, m0, [r3 - 7 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m1, m2
+
+    pmaddwd         m3, [r3 - 16 * 32]              ; [0]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3 - 16 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 3, 0, 2, 16
+    ret
+
+cglobal intra_pred_ang32_13, 3,8,14, 0-mmsize
+    movu        m0, [r2 + 112]
+    mova        [rsp], m0
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 8]
+    movu        xm2, [r2 + 36]
+    pshufb      xm1, [pw_ang32_13_23]
+    pshufb      xm2, [pw_ang32_13_23]
+    pinsrw      xm1, [r2 + 28], 4
+    pinsrw      xm2, [r2 + 56], 4
+    punpckhqdq  xm2, xm1            ; [ 4  7  8 11 18 21 25 28]
+
+    movzx       r6d, word [r2]
+    mov         [r2 + 128], r6w
+    movu        [r2 + 112], xm2
+
+    xor         r6d, r6d
+    add         r2, 128
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_13_23_row_0_15
+
+    sub         r2, 8
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_13_23_row_16_31
+
+    add         r2, 40
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_13_23_row_0_15
+
+    sub         r2, 8
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_13_23_row_16_31
+
+    mova        m0, [rsp]
+    movu        [r2 - 40], m0
+    RET
+
+cglobal intra_pred_ang32_23, 3,7,14, 0-16
+    movu        xm0, [r2 - 16]
+    mova        [rsp], xm0
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 136]
+    movu        xm2, [r2 + 164]
+    pshufb      xm1, [pw_ang32_13_23]
+    pshufb      xm2, [pw_ang32_13_23]
+    pinsrw      xm1, [r2 + 156], 4
+    pinsrw      xm2, [r2 + 184], 4
+    punpckhqdq  xm2, xm1            ; [ 4  7  8 11 18 21 25 28]
+
+    movu        [r2 - 16], xm2
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_13_23_row_0_15
+
+    sub         r2, 8
+
+    call        ang32_mode_13_23_row_16_31
+
+    add         r2, 40
+    mov         r0, r5
+
+    call        ang32_mode_13_23_row_0_15
+
+    sub         r2, 8
+
+    call        ang32_mode_13_23_row_16_31
+
+    mova        xm0, [rsp]
+    movu        [r2 - 40], xm0
+    RET
+
+%macro TRANSPOSE_STORE_AVX2_STACK 11
+    jnz             .skip%11
+    punpckhwd       m%9,  m%1,  m%2
+    punpcklwd       m%1,  m%2
+    punpckhwd       m%2,  m%3,  m%4
+    punpcklwd       m%3,  m%4
+
+    punpckldq       m%4,  m%1,  m%3
+    punpckhdq       m%1,  m%3
+    punpckldq       m%3,  m%9,  m%2
+    punpckhdq       m%9,  m%2
+
+    punpckhwd       m%10, m%5,  m%6
+    punpcklwd       m%5,  m%6
+    punpckhwd       m%6,  m%7,  m%8
+    punpcklwd       m%7,  m%8
+
+    punpckldq       m%8,  m%5,  m%7
+    punpckhdq       m%5,  m%7
+    punpckldq       m%7,  m%10, m%6
+    punpckhdq       m%10, m%6
+
+    punpcklqdq      m%6,  m%4,  m%8
+    punpckhqdq      m%2,  m%4,  m%8
+    punpcklqdq      m%4,  m%1,  m%5
+    punpckhqdq      m%8,  m%1,  m%5
+
+    punpcklqdq      m%1,  m%3,  m%7
+    punpckhqdq      m%5,  m%3,  m%7
+    punpcklqdq      m%3,  m%9,  m%10
+    punpckhqdq      m%7,  m%9,  m%10
+
+    movu            [r0 + r1 * 0 + %11], xm%6
+    movu            [r0 + r1 * 1 + %11], xm%2
+    movu            [r0 + r1 * 2 + %11], xm%4
+    movu            [r0 + r4 * 1 + %11], xm%8
+
+    lea             r5, [r0 + r1 * 4]
+    movu            [r5 + r1 * 0 + %11], xm%1
+    movu            [r5 + r1 * 1 + %11], xm%5
+    movu            [r5 + r1 * 2 + %11], xm%3
+    movu            [r5 + r4 * 1 + %11], xm%7
+
+    lea             r5, [r5 + r1 * 4]
+    vextracti128    [r5 + r1 * 0 + %11], m%6, 1
+    vextracti128    [r5 + r1 * 1 + %11], m%2, 1
+    vextracti128    [r5 + r1 * 2 + %11], m%4, 1
+    vextracti128    [r5 + r4 * 1 + %11], m%8, 1
+
+    lea             r5, [r5 + r1 * 4]
+    vextracti128    [r5 + r1 * 0 + %11], m%1, 1
+    vextracti128    [r5 + r1 * 1 + %11], m%5, 1
+    vextracti128    [r5 + r1 * 2 + %11], m%3, 1
+    vextracti128    [r5 + r4 * 1 + %11], m%7, 1
+    jmp             .end%11
+.skip%11:
+%if %11 == 16
+    lea             r7, [r0 + 8 * r1]
+%else
+    lea             r7, [r0]
+%endif
+    movu            [r7 + r1 * 0], m%1
+    movu            [r7 + r1 * 1], m%2
+    movu            [r7 + r1 * 2], m%3
+    movu            [r7 + r4 * 1], m%4
+
+%if %11 == 16
+    lea             r7, [r7 + r1 * 4]
+%else
+    lea             r7, [r7 + r1 * 4]
+%endif
+    movu            [r7 + r1 * 0], m%5
+    movu            [r7 + r1 * 1], m%6
+    movu            [r7 + r1 * 2], m%7
+    movu            [r7 + r4 * 1], m%8
+.end%11:
+%endmacro
+
+;; angle 32, modes 14 and 22, row 0 to 15
+cglobal ang32_mode_14_22_rows_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2 - 12]
+    movu            m1, [r2 - 10]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 + 4]
+    movu            m4, [r2 + 6]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3]                    ; [16]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 13 * 32]          ; [29]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 13 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m0, m3, 4
+    pmaddwd         m6, m7, [r3 - 6 * 32]           ; [10]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    palignr         m8, m2, m0, 4
+    pmaddwd         m9, m8, [r3 - 6 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 7 * 32]               ; [23]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 7 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m10, m0, m3, 8
+    pmaddwd         m8, m10, [r3 - 12 * 32]         ; [4]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    palignr         m12, m2, m0, 8
+    pmaddwd         m9, m12, [r3 - 12 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m10, [r3 + 1 * 32]          ; [17]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m11, m12, [r3 + 1 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m9, m11
+
+    pmaddwd         m10, [r3 + 14 * 32]             ; [30]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, [r3 + 14 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m11, [r3 - 5 * 32]              ; [11]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m12, m2, m0, 12
+    pmaddwd         m12, [r3 - 5 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    palignr         m4, m0, m3, 12
+    pmaddwd         m4, [r3 + 8 * 32]               ; [24]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    palignr         m5, m2, m0, 12
+    pmaddwd         m5, [r3 + 8  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m0, [r3 - 11 * 32]          ; [5]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, m2, [r3 - 11 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, m0, [r3 + 2 * 32]           ; [18]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m2, [r3 + 2 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m0, [r3 + 15 * 32]          ; [31]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, m2, [r3 + 15 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    palignr         m9, m2, m0, 4
+    palignr         m10, m1, m2, 4
+    pmaddwd         m8, m9, [r3 - 4 * 32]           ; [12]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m11, m10, [r3 - 4 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m8, m11
+
+    pmaddwd         m9, [r3 + 9 * 32]               ; [25]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, [r3 + 9 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m1, m2, 8
+    palignr         m2, m0, 8
+
+    pmaddwd         m10, m2, [r3 - 10 * 32]         ; [6]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m12, m1, [r3 - 10 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m10, m12
+
+    pmaddwd         m2, [r3 + 3 * 32]               ; [19]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3 + 3 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+    ret
+
+;; angle 32, modes 14 and 22, rows 16 to 31
+cglobal ang32_mode_14_22_rows_16_31
+    test            r6d, r6d
+
+    movu            m0, [r2 - 24]
+    movu            m1, [r2 - 22]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 - 8]
+    movu            m4, [r2 - 6]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3 - 16 * 32]          ; [0]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 16 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 - 3 * 32]           ; [13]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 - 3 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, m3, [r3 + 10 * 32]          ; [26]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 + 10 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m8, m0, m3, 4
+    palignr         m9, m2, m0, 4
+    pmaddwd         m7, m8, [r3 - 9 * 32]           ; [7]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 - 9 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 4 * 32]               ; [20]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 4 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m11, m0, m3, 8
+    palignr         m12, m2, m0, 8
+    pmaddwd         m9, m11, [r3 - 15 * 32]         ; [1]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m12, [r3 - 15 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m11, [r3 - 2 * 32]         ; [14]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m13, m12, [r3 - 2 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m10, m13
+
+    pmaddwd         m11, [r3 + 11 * 32]             ; [27]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 11 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    palignr         m5, m0, m3, 12
+    palignr         m6, m2, m0, 12
+    pmaddwd         m4, m5, [r3 - 8 * 32]           ; [8]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m7, m6, [r3 - 8  * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m4, m7
+
+    pmaddwd         m5, [r3 + 5 * 32]               ; [21]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, [r3 + 5 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    pmaddwd         m6, m0, [r3 - 14 * 32]          ; [2]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, m2, [r3 - 14 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    pmaddwd         m7, m0, [r3 - 1 * 32]           ; [15]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, m2, [r3 - 1 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    pmaddwd         m8, m0, [r3 + 12 * 32]          ; [28]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m11, m2, [r3 + 12 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m8, m11
+
+    palignr         m10, m2, m0, 4
+    palignr         m11, m1, m2, 4
+
+    pmaddwd         m9, m10, [r3 - 7 * 32]          ; [9]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m11, [r3 - 7 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 6 * 32]              ; [22]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 6 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m1, m2, 8
+    palignr         m2, m0, 8
+
+    pmaddwd         m2, [r3 - 13 * 32]              ; [3]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3 - 13 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+    ret
+
+cglobal intra_pred_ang32_14, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 4*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+4*mmsize], r6
+
+    movu        m0, [r2 + 128]
+    movu        m1, [r2 + 160]
+    movd        xm2, [r2 + 192]
+
+    mova        [rsp + 1*mmsize], m0
+    mova        [rsp + 2*mmsize], m1
+    movd        [rsp + 3*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 4]
+    movu        xm2, [r2 + 24]
+    movu        xm3, [r2 + 44]
+    pshufb      xm1, [pw_ang32_14_22]
+    pshufb      xm2, [pw_ang32_14_22]
+    pshufb      xm3, [pw_ang32_14_22]
+    pinsrw      xm1, [r2 + 20], 4
+    pinsrw      xm2, [r2 + 40], 4
+    pinsrw      xm3, [r2 + 60], 4
+
+    punpckhqdq  xm2, xm1            ; [ 2  5  7 10 12 15 17 20]
+    punpckhqdq  xm3, xm3            ; [22 25 27 30 22 25 27 30]
+
+    movzx       r6d, word [r2]
+    mov         [rsp + 1*mmsize], r6w
+    movu        [rsp + 16], xm2
+    movq        [rsp + 8], xm3
+
+    xor         r6d, r6d
+    lea         r2, [rsp + 1*mmsize]
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_14_22_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_14_22_rows_16_31
+
+    add         r2, 32
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_14_22_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_14_22_rows_16_31
+
+    mov         rsp, [rsp+4*mmsize]
+    RET
+
+cglobal intra_pred_ang32_22, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 4*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+4*mmsize], r6
+
+    movu        m0, [r2]
+    movu        m1, [r2 + 32]
+    movd        xm2, [r2 + 64]
+
+    mova        [rsp + 1*mmsize], m0
+    mova        [rsp + 2*mmsize], m1
+    movd        [rsp + 3*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 132]
+    movu        xm2, [r2 + 152]
+    movu        xm3, [r2 + 172]
+    pshufb      xm1, [pw_ang32_14_22]
+    pshufb      xm2, [pw_ang32_14_22]
+    pshufb      xm3, [pw_ang32_14_22]
+    pinsrw      xm1, [r2 + 148], 4
+    pinsrw      xm2, [r2 + 168], 4
+    pinsrw      xm3, [r2 + 188], 4
+
+    punpckhqdq  xm2, xm1            ; [ 2  5  7 10 12 15 17 20]
+    punpckhqdq  xm3, xm3            ; [22 25 27 30 22 25 27 30]
+
+    movu        [rsp + 16], xm2
+    movq        [rsp + 8], xm3
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r2, [rsp + 1*mmsize]
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_14_22_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_14_22_rows_16_31
+
+    add         r2, 32
+    mov         r0, r5
+
+    call        ang32_mode_14_22_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_14_22_rows_16_31
+
+    mov         rsp, [rsp+4*mmsize]
+    RET
+
+;; angle 32, modes 15 and 21, row 0 to 15
+cglobal ang32_mode_15_21_rows_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2 - 16]
+    movu            m1, [r2 - 14]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2]
+    movu            m4, [r2 + 2]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3]                    ; [16]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m0, m3, 4
+    palignr         m7, m2, m0, 4
+    pmaddwd         m5, m6, [r3 - 15 * 32]          ; [1]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m7, [r3 - 15 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 2 * 32]               ; [18]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 2 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m0, m3, 8
+    palignr         m9, m2, m0, 8
+    pmaddwd         m7, m8, [r3 - 13 * 32]          ; [3]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 - 13 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 4 * 32]               ; [20]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 4 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m0, m3, 12
+    palignr         m11, m2, m0, 12
+    pmaddwd         m9, m10, [r3 - 11 * 32]         ; [5]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, m11, [r3 - 11 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, [r3 + 6 * 32]              ; [22]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 6 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m11, m0, [r3 - 9 * 32]          ; [7]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, m2, [r3 - 9 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    pmaddwd         m4, m0, [r3 + 8 * 32]           ; [24]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m2, [r3 + 8  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m2, m0, 4
+    palignr         m7, m1, m2, 4
+    pmaddwd         m5, m6, [r3 - 7 * 32]           ; [9]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, m7, [r3 - 7 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, [r3 + 10 * 32]              ; [26]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 10 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m2, m0, 8
+    palignr         m9, m1, m2, 8
+    pmaddwd         m7, m8, [r3 - 5 * 32]           ; [11]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, m9, [r3 - 5 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    pmaddwd         m8, [r3 + 12 * 32]              ; [28]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 12 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m2, m0, 12
+    palignr         m11, m1, m2, 12
+    pmaddwd         m9, m10, [r3 - 3 * 32]          ; [13]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m11, [r3 - 3 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    pmaddwd         m10, [r3 + 14 * 32]             ; [30]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 14 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m2, [r3 - 1 * 32]               ; [15]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3 - 1 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+    ret
+
+;; angle 32, modes 15 and 21, rows 16 to 31
+cglobal ang32_mode_15_21_rows_16_31
+    test            r6d, r6d
+
+    movu            m0, [r2 - 32]
+    movu            m1, [r2 - 30]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 - 16]
+    movu            m4, [r2 - 14]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3 - 16 * 32]          ; [0]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 16 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 1 * 32]           ; [17]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 1 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m0, m3, 4
+    palignr         m8, m2, m0, 4
+    pmaddwd         m6, m7, [r3 - 14 * 32]          ; [2]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m8, [r3 - 14 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 3 * 32]               ; [19]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 3 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m9, m0, m3, 8
+    palignr         m10, m2, m0, 8
+    pmaddwd         m8, m9, [r3 - 12 * 32]          ; [4]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m11, m10, [r3 - 12 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m8, m11
+
+    pmaddwd         m9, [r3 + 5 * 32]               ; [21]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, [r3 + 5 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m11, m0, m3, 12
+    palignr         m12, m2, m0, 12
+    pmaddwd         m10, m11, [r3 - 10 * 32]        ; [6]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m13, m12, [r3 - 10 * 32]
+    paddd           m13, [pd_16]
+    psrld           m13, 5
+    packusdw        m10, m13
+
+    pmaddwd         m11, [r3 + 7 * 32]              ; [23]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 7 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    pmaddwd         m4, m0, [r3 - 8 * 32]           ; [8]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m7, m2, [r3 - 8  * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m4, m7
+
+    pmaddwd         m5, m0, [r3 + 9 * 32]           ; [25]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, m2, [r3 + 9 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m7, m2, m0, 4
+    palignr         m8, m1, m2, 4
+    pmaddwd         m6, m7, [r3 - 6 * 32]           ; [10]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m3, m8, [r3 - 6 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m6, m3
+
+    pmaddwd         m7, [r3 + 11 * 32]              ; [27]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m9, m2, m0, 8
+    palignr         m3, m1, m2, 8
+    pmaddwd         m8, m9, [r3 - 4 * 32]           ; [12]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m11, m3, [r3 - 4 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m8, m11
+
+    pmaddwd         m9, [r3 + 13 * 32]              ; [29]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, [r3 + 13 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    palignr         m1, m2, 12
+    palignr         m2, m0, 12
+    pmaddwd         m10, m2, [r3 - 2 * 32]          ; [14]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m1, [r3 - 2 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m2, [r3 + 15 * 32]              ; [31]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    pmaddwd         m1, [r3 + 15 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m2, m1
+    TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+    ret
+
+cglobal intra_pred_ang32_15, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 4*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+4*mmsize], r6
+
+    movu        m0, [r2 + 128]
+    movu        m1, [r2 + 160]
+    movd        xm2, [r2 + 192]
+
+    mova        [rsp + 1*mmsize], m0
+    mova        [rsp + 2*mmsize], m1
+    movd        [rsp + 3*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 4]
+    movu        xm2, [r2 + 18]
+    movu        xm3, [r2 + 34]
+    movu        xm4, [r2 + 48]
+    pshufb      xm1, [pw_ang32_15_21]
+    pshufb      xm2, [pw_ang32_15_21]
+    pshufb      xm3, [pw_ang32_15_21]
+    pshufb      xm4, [pw_ang32_15_21]
+
+    punpckhqdq  xm2, xm1
+    punpckhqdq  xm4, xm3
+
+    movzx       r6d, word [r2]
+    mov         [rsp + 1*mmsize], r6w
+    movu        [rsp + 16], xm2
+    movu        [rsp], xm4
+
+    xor         r6d, r6d
+    lea         r2, [rsp + 1*mmsize]
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_15_21_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_15_21_rows_16_31
+
+    add         r2, 32
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_15_21_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_15_21_rows_16_31
+
+    mov         rsp, [rsp+4*mmsize]
+    RET
+
+cglobal intra_pred_ang32_21, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 4*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+4*mmsize], r6
+
+    movu        m0, [r2]
+    movu        m1, [r2 + 32]
+    movd        xm2, [r2 + 64]
+
+    mova        [rsp + 1*mmsize], m0
+    mova        [rsp + 2*mmsize], m1
+    movd        [rsp + 3*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 132]
+    movu        xm2, [r2 + 146]
+    movu        xm3, [r2 + 162]
+    movu        xm4, [r2 + 176]
+    pshufb      xm1, [pw_ang32_15_21]
+    pshufb      xm2, [pw_ang32_15_21]
+    pshufb      xm3, [pw_ang32_15_21]
+    pshufb      xm4, [pw_ang32_15_21]
+
+    punpckhqdq  xm2, xm1
+    punpckhqdq  xm4, xm3
+
+    movu        [rsp + 16], xm2
+    movu        [rsp], xm4
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r2, [rsp + 1*mmsize]
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_15_21_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_15_21_rows_16_31
+
+    add         r2, 32
+    mov         r0, r5
+
+    call        ang32_mode_15_21_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_15_21_rows_16_31
+
+    mov         rsp, [rsp+4*mmsize]
+    RET
+
+;; angle 32, modes 16 and 20, row 0 to 15
+cglobal ang32_mode_16_20_rows_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2 - 20]
+    movu            m1, [r2 - 18]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 - 4]                    ; [ 3  2  0 -1 -2 -3 -4 -5  -6  -7  -8  -9 -10 -11 -12 -13]
+    movu            m4, [r2 - 2]                    ; [ 2  0 -1 -2 -3 -4 -5 -6  -7  -8  -9 -10 -11 -12 -13 -14]
+    punpcklwd       m2, m1, m4                      ; [-3 -2 -4 -3 -5 -4 -6 -5 -11 -10 -12 -11 -13 -12 -14 -13]
+    punpckhwd       m1, m4                          ; [ 2  3  2  0 -1  0 -2 -1  -7  -6  -8  -7  -9  -8 -10  -9]
+
+    pmaddwd         m4, m3, [r3]                    ; [16]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m6, m0, m3, 4
+    palignr         m7, m2, m0, 4
+    pmaddwd         m5, m6, [r3 - 11 * 32]          ; [5]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m7, [r3 - 11 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    pmaddwd         m6, [r3 + 10 * 32]              ; [26]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m7, [r3 + 10 * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m6, m7
+
+    palignr         m8, m0, m3, 8
+    palignr         m9, m2, m0, 8
+    pmaddwd         m7, m8, [r3 - 1 * 32]           ; [15]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 - 1 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    palignr         m9, m0, m3, 12
+    palignr         m12, m2, m0, 12
+    pmaddwd         m8, m9, [r3 - 12 * 32]          ; [4]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m10, m12, [r3 - 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, [r3 + 9 * 32]               ; [25]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, [r3 + 9 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, m0, [r3 - 2 * 32]          ; [14]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m2, [r3 - 2 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m11, m2, m0, 4
+    palignr         m12, m1, m2, 4
+    pmaddwd         m11, [r3 - 13 * 32]             ; [3]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 - 13 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    palignr         m4, m2, m0, 4
+    palignr         m5, m1, m2, 4
+    pmaddwd         m4, [r3 + 8 * 32]               ; [24]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, [r3 + 8  * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m5, m2, m0, 8
+    palignr         m3, m1, m2, 8
+    pmaddwd         m5, [r3 - 3 * 32]               ; [13]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, [r3 - 3 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    palignr         m7, m2, m0, 12
+    palignr         m3, m1, m2, 12
+    pmaddwd         m6, m7, [r3 - 14 * 32]          ; [2]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m3, [r3 - 14 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, [r3 + 7 * 32]               ; [23]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, [r3 + 7 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    pmaddwd         m8, m2, [r3 - 4 * 32]           ; [12]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m1, [r3 - 4 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    movu            m0, [r2 - 2]
+    movu            m1, [r2]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m2, [r2 + 14]
+    movu            m1, [r2 + 16]
+    punpcklwd       m2, m1
+
+    pmaddwd         m9, m3, [r3 - 15 * 32]          ; [1]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, m0, [r3 - 15 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    pmaddwd         m10, m3, [r3 + 6 * 32]          ; [22]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m0, [r3 + 6 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m2, m0, 4
+    palignr         m0, m3, 4
+    pmaddwd         m0, [r3 - 5 * 32]               ; [11]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    pmaddwd         m2, [r3 - 5 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m0, m2
+    TRANSPOSE_STORE_AVX2_STACK 0, 10, 9, 8, 7, 6, 5, 4, 2, 1, 0
+    ret
+
+;; angle 32, modes 16 and 20, rows 16 to 31
+cglobal ang32_mode_16_20_rows_16_31
+    test            r6d, r6d
+
+    movu            m0, [r2 - 40]
+    movu            m1, [r2 - 38]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 - 24]
+    movu            m4, [r2 - 22]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3 - 16 * 32]          ; [0]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 16 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 5 * 32]           ; [21]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 5 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m7, m0, m3, 4
+    palignr         m8, m2, m0, 4
+    pmaddwd         m6, m7, [r3 - 6 * 32]           ; [10]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m8, [r3 - 6 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    pmaddwd         m7, [r3 + 15 * 32]              ; [31]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m8, [r3 + 15 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m7, m8
+
+    palignr         m8, m0, m3, 8
+    palignr         m9, m2, m0, 8
+    pmaddwd         m8, [r3 + 4 * 32]               ; [20]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 4 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m10, m0, m3, 12
+    palignr         m11, m2, m0, 12
+    pmaddwd         m9, m10, [r3 - 7 * 32]          ; [9]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, m11, [r3 - 7 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, [r3 + 14 * 32]             ; [30]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 + 14 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m11, m0, [r3 + 3 * 32]          ; [19]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, m2, [r3 + 3 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    palignr         m5, m2, m0, 4
+    palignr         m6, m1, m2, 4
+    pmaddwd         m4, m5, [r3 - 8 * 32]           ; [8]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m7, m6, [r3 - 8  * 32]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    packusdw        m4, m7
+
+    pmaddwd         m5, [r3 + 13 * 32]              ; [29]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m6, [r3 + 13 * 32]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    palignr         m6, m2, m0, 8
+    palignr         m3, m1, m2, 8
+    pmaddwd         m6, [r3 + 2 * 32]               ; [18]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m3, [r3 + 2 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m6, m3
+
+    palignr         m8, m2, m0, 12
+    palignr         m9, m1, m2, 12
+    pmaddwd         m7, m8, [r3 - 9 * 32]           ; [7]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m10, m9, [r3 - 9 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m7, m10
+
+    pmaddwd         m8, [r3 + 12 * 32]              ; [28]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, [r3 + 12 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    pmaddwd         m9, m2, [r3 + 1 * 32]           ; [17]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m3, m1, [r3 + 1 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m9, m3
+
+    movu            m0, [r2 - 22]
+    movu            m1, [r2 - 20]
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    pmaddwd         m10, m3, [r3 - 10 * 32]         ; [6]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m0, [r3 - 10 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    pmaddwd         m3, [r3 + 11 * 32]              ; [27]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    pmaddwd         m0, [r3 + 11 * 32]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    packusdw        m3, m0
+    TRANSPOSE_STORE_AVX2_STACK 3, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+    ret
+
+cglobal intra_pred_ang32_16, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 5*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+5*mmsize], r6
+
+    movu        m0, [r2 + 128]
+    movu        m1, [r2 + 160]
+    movd        xm2, [r2 + 192]
+
+    mova        [rsp + 2*mmsize], m0
+    mova        [rsp + 3*mmsize], m1
+    movd        [rsp + 4*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 4]
+    movu        xm2, [r2 + 16]
+    movu        xm3, [r2 + 28]
+    movu        xm4, [r2 + 40]
+    movu        xm5, [r2 + 52]
+    pshufb      xm1, [pw_ang32_16_20]
+    pshufb      xm2, [pw_ang32_16_20]
+    pshufb      xm3, [pw_ang32_16_20]
+    pshufb      xm4, [pw_ang32_16_20]
+    pshufb      xm5, [pw_ang32_16_20]
+
+    punpckhqdq  xm2, xm1
+    punpckhqdq  xm4, xm3
+    punpckhqdq  xm5, xm5
+
+    movzx       r6d, word [r2]
+    mov         [rsp + 2*mmsize], r6w
+    movu        [rsp + 48], xm2
+    movu        [rsp + 32], xm4
+    movq        [rsp + 24], xm5
+
+    xor         r6d, r6d
+    lea         r2, [rsp + 2*mmsize]
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_16_20_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_16_20_rows_16_31
+
+    add         r2, 32
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_16_20_rows_0_15
+
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_16_20_rows_16_31
+
+    mov         rsp, [rsp+5*mmsize]
+    RET
+
+cglobal intra_pred_ang32_20, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 5*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+5*mmsize], r6
+
+    movu        m0, [r2]
+    movu        m1, [r2 + 32]
+    movd        xm2, [r2 + 64]
+
+    mova        [rsp + 2*mmsize], m0
+    mova        [rsp + 3*mmsize], m1
+    movd        [rsp + 4*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 132]
+    movu        xm2, [r2 + 144]
+    movu        xm3, [r2 + 156]
+    movu        xm4, [r2 + 168]
+    movu        xm5, [r2 + 180]
+    pshufb      xm1, [pw_ang32_16_20]
+    pshufb      xm2, [pw_ang32_16_20]
+    pshufb      xm3, [pw_ang32_16_20]
+    pshufb      xm4, [pw_ang32_16_20]
+    pshufb      xm5, [pw_ang32_16_20]
+
+    punpckhqdq  xm2, xm1
+    punpckhqdq  xm4, xm3
+    punpckhqdq  xm5, xm5
+
+    movu        [rsp + 48], xm2
+    movu        [rsp + 32], xm4
+    movq        [rsp + 24], xm5
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r2, [rsp + 2*mmsize]
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_16_20_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_16_20_rows_16_31
+
+    add         r2, 32
+    mov         r0, r5
+
+    call        ang32_mode_16_20_rows_0_15
+
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_16_20_rows_16_31
+
+    mov         rsp, [rsp+5*mmsize]
+    RET
+
+;; angle 32, modes 17 and 19, row 0 to 15
+cglobal ang32_mode_17_19_rows_0_15
+    test            r6d, r6d
+
+    movu            m0, [r2 - 24]
+    movu            m1, [r2 - 22]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m1, [r2 - 8]
+    movu            m4, [r2 - 6]
+    punpcklwd       m2, m1, m4
+    punpckhwd       m1, m4
+
+    pmaddwd         m4, m3, [r3 - 16 * 32]              ; [0]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, m0, [r3 - 16 * 32]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    pmaddwd         m5, m3, [r3 + 10 * 32]              ; [26]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m8, m0, [r3 + 10 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m5, m8
+
+    palignr         m6, m0, m3, 4
+    palignr         m8, m2, m0, 4
+    pmaddwd         m6, [r3 + 4 * 32]                   ; [20]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, [r3 + 4 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    palignr         m7, m0, m3, 8
+    palignr         m9, m2, m0, 8
+    pmaddwd         m7, [r3 - 2 * 32]                   ; [14]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m9, [r3 - 2 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m7, m9
+
+    palignr         m8, m0, m3, 12
+    palignr         m10, m2, m0, 12
+    pmaddwd         m8, [r3 - 8 * 32]                   ; [8]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m10, [r3 - 8 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m8, m10
+
+    pmaddwd         m9, m0, [r3 - 14 * 32]              ; [2]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m12, m2, [r3 - 14 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m9, m12
+
+    pmaddwd         m10, m0, [r3 + 12 * 32]             ; [28]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, m2, [r3 + 12 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m11, m2, m0, 4
+    palignr         m12, m1, m2, 4
+    pmaddwd         m11, [r3 + 6 * 32]                  ; [22]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m12, [r3 + 6 * 32]
+    paddd           m12, [pd_16]
+    psrld           m12, 5
+    packusdw        m11, m12
+
+    TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+    palignr         m4, m2, m0, 8
+    palignr         m5, m1, m2, 8
+    pmaddwd         m4, [r3]                            ; [16]
+    paddd           m4, [pd_16]
+    psrld           m4, 5
+    pmaddwd         m5, [r3]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m5, m2, m0, 12
+    palignr         m3, m1, m2, 12
+    pmaddwd         m5, [r3 - 6 * 32]                   ; [10]
+    paddd           m5, [pd_16]
+    psrld           m5, 5
+    pmaddwd         m3, [r3 - 6 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m5, m3
+
+    pmaddwd         m6, m2, [r3 - 12 * 32]              ; [4]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m8, m1, [r3 - 12 * 32]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    packusdw        m6, m8
+
+    pmaddwd         m7, m2, [r3 + 14 * 32]              ; [30]
+    paddd           m7, [pd_16]
+    psrld           m7, 5
+    pmaddwd         m3, m1, [r3 + 14 * 32]
+    paddd           m3, [pd_16]
+    psrld           m3, 5
+    packusdw        m7, m3
+
+    movu            m0, [r2 - 6]
+    movu            m1, [r2 - 4]
+
+    punpcklwd       m3, m0, m1
+    punpckhwd       m0, m1
+
+    movu            m2, [r2 + 10]
+    movu            m1, [r2 + 12]
+    punpcklwd       m2, m1
+
+    pmaddwd         m8, m3, [r3 + 8 * 32]               ; [24]
+    paddd           m8, [pd_16]
+    psrld           m8, 5
+    pmaddwd         m9, m0, [r3 + 8 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m8, m9
+
+    palignr         m9, m0, m3, 4
+    palignr         m10, m2, m0, 4
+    pmaddwd         m9, [r3 + 2 * 32]                   ; [18]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    pmaddwd         m10, [r3 + 2 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m9, m10
+
+    palignr         m10, m0, m3, 8
+    palignr         m11, m2, m0, 8
+    pmaddwd         m10, [r3 - 4 * 32]                  ; [12]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    pmaddwd         m11, [r3 - 4 * 32]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    packusdw        m10, m11
+
+    palignr         m2, m0, 12
+    palignr         m0, m3, 12
+    pmaddwd         m0, [r3 - 10 * 32]                  ; [6]
+    paddd           m0, [pd_16]
+    psrld           m0, 5
+    pmaddwd         m2, [r3 - 10 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m0, m2
+    TRANSPOSE_STORE_AVX2_STACK 0, 10, 9, 8, 7, 6, 5, 4, 2, 1, 0
+    ret
+
+cglobal intra_pred_ang32_17, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 5*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+5*mmsize], r6
+
+    movu        m0, [r2 + 128]
+    movu        m1, [r2 + 160]
+    movd        xm2, [r2 + 192]
+
+    mova        [rsp + 2*mmsize], m0
+    mova        [rsp + 3*mmsize], m1
+    movd        [rsp + 4*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 2]
+    movu        xm2, [r2 + 18]
+    movu        xm3, [r2 + 34]
+    movu        xm4, [r2 + 50]
+    pshufb      xm1, [pw_ang32_17_19_0]
+    pshufb      xm2, [shuf_mode_17_19]
+    pshufb      xm3, [pw_ang32_17_19_0]
+    pshufb      xm4, [shuf_mode_17_19]
+
+    movzx       r6d, word [r2]
+    mov         [rsp + 2*mmsize], r6w
+    movu        [rsp + 48], xm1
+    movu        [rsp + 36], xm2
+    movu        [rsp + 22], xm3
+    movu        [rsp + 10], xm4
+
+    xor         r6d, r6d
+    lea         r2, [rsp + 2*mmsize]
+    lea         r7, [r0 + 8 * r1]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    sub         r2, 26
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    add         r2, 58
+    lea         r0, [r7 + 8 * r1]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    sub         r2, 26
+    lea         r0, [r0 + 32]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    mov         rsp, [rsp+5*mmsize]
+    RET
+
+cglobal intra_pred_ang32_19, 3,8,14
+    mov         r6, rsp
+    sub         rsp, 5*mmsize+gprsize
+    and         rsp, ~63
+    mov         [rsp+5*mmsize], r6
+
+    movu        m0, [r2]
+    movu        m1, [r2 + 32]
+    movd        xm2, [r2 + 64]
+
+    mova        [rsp + 2*mmsize], m0
+    mova        [rsp + 3*mmsize], m1
+    movd        [rsp + 4*mmsize], xm2
+
+    add         r1d, r1d
+    lea         r4, [r1 * 3]
+    lea         r3, [ang_table_avx2 + 16 * 32]
+
+    movu        xm1, [r2 + 130]
+    movu        xm2, [r2 + 146]
+    movu        xm3, [r2 + 162]
+    movu        xm4, [r2 + 178]
+    pshufb      xm1, [pw_ang32_17_19_0]
+    pshufb      xm2, [shuf_mode_17_19]
+    pshufb      xm3, [pw_ang32_17_19_0]
+    pshufb      xm4, [shuf_mode_17_19]
+
+    movu        [rsp + 48], xm1
+    movu        [rsp + 36], xm2
+    movu        [rsp + 22], xm3
+    movu        [rsp + 10], xm4
+
+    xor         r6d, r6d
+    inc         r6d
+    lea         r2, [rsp + 2*mmsize]
+    lea         r5, [r0 + 32]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    sub         r2, 26
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    add         r2, 58
+    mov         r0, r5
+
+    call        ang32_mode_17_19_rows_0_15
+
+    sub         r2, 26
+    lea         r0, [r0 + 8 * r1]
+    lea         r0, [r0 + 8 * r1]
+
+    call        ang32_mode_17_19_rows_0_15
+
+    mov         rsp, [rsp+5*mmsize]
+    RET
+
+cglobal intra_pred_ang32_18, 3,6,6
+    mov         r4,                 rsp
+    sub         rsp,                4*mmsize+gprsize
+    and         rsp,                ~63
+    mov         [rsp+4*mmsize],     r4
+
+    movu        m0,                 [r2]
+    movu        m1,                 [r2 + 32]
+    mova        [rsp + 2*mmsize],   m0
+    mova        [rsp + 3*mmsize],   m1
+
+    movu        m2,                 [r2 + 130]
+    movu        m3,                 [r2 + 162]
+    pshufb      m2,                 [pw_swap16]
+    pshufb      m3,                 [pw_swap16]
+    vpermq      m2,                 m2, 01001110b
+    vpermq      m3,                 m3, 01001110b
+    mova        [rsp + 1*mmsize],   m2
+    mova        [rsp + 0*mmsize],   m3
+
+    add         r1d,                r1d
+    lea         r2,                 [rsp+2*mmsize]
+    lea         r4,                 [r1 * 2]
+    lea         r3,                 [r1 * 3]
+    lea         r5,                 [r1 * 4]
+
+    movu        m0,                 [r2]
+    movu        m1,                 [r2 + 32]
+    movu        m2,                 [r2 - 16]
+    movu        m3,                 [r2 + 16]
+
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+
+    palignr     m4,                 m0, m2, 14
+    palignr     m5,                 m1, m3, 14
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m0, m2, 12
+    palignr     m5,                 m1, m3, 12
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m0, m2, 10
+    palignr     m5,                 m1, m3, 10
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    palignr     m4,                 m0, m2, 8
+    palignr     m5,                 m1, m3, 8
+    movu        [r0],               m4
+    movu        [r0 + 32],          m5
+
+    palignr     m4,                 m0, m2, 6
+    palignr     m5,                 m1, m3, 6
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m0, m2, 4
+    palignr     m5,                 m1, m3, 4
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m0, m2, 2
+    palignr     m5,                 m1, m3, 2
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    movu        [r0],               m2
+    movu        [r0 + 32],          m3
+
+    movu        m0,                 [r2 - 32]
+    movu        m1,                 [r2]
+
+    palignr     m4,                 m2, m0, 14
+    palignr     m5,                 m3, m1, 14
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m2, m0, 12
+    palignr     m5,                 m3, m1, 12
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m2, m0, 10
+    palignr     m5,                 m3, m1, 10
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    palignr     m4,                 m2, m0, 8
+    palignr     m5,                 m3, m1, 8
+    movu        [r0],               m4
+    movu        [r0 + 32],          m5
+
+    palignr     m4,                 m2, m0, 6
+    palignr     m5,                 m3, m1, 6
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m2, m0, 4
+    palignr     m5,                 m3, m1, 4
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m2, m0, 2
+    palignr     m5,                 m3, m1, 2
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    movu        [r0],               m0
+    movu        [r0 + 32],          m1
+
+    movu        m2,                 [r2 - 48]
+    movu        m3,                 [r2 - 16]
+
+    palignr     m4,                 m0, m2, 14
+    palignr     m5,                 m1, m3, 14
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m0, m2, 12
+    palignr     m5,                 m1, m3, 12
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m0, m2, 10
+    palignr     m5,                 m1, m3, 10
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    palignr     m4,                 m0, m2, 8
+    palignr     m5,                 m1, m3, 8
+    movu        [r0],               m4
+    movu        [r0 + 32],          m5
+
+    palignr     m4,                 m0, m2, 6
+    palignr     m5,                 m1, m3, 6
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m0, m2, 4
+    palignr     m5,                 m1, m3, 4
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m0, m2, 2
+    palignr     m5,                 m1, m3, 2
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    movu        [r0],               m2
+    movu        [r0 + 32],          m3
+
+    movu        m0,                 [r2 - 64]
+    movu        m1,                 [r2 - 32]
+
+    palignr     m4,                 m2, m0, 14
+    palignr     m5,                 m3, m1, 14
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m2, m0, 12
+    palignr     m5,                 m3, m1, 12
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m2, m0, 10
+    palignr     m5,                 m3, m1, 10
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    add         r0,                 r5
+
+    palignr     m4,                 m2, m0, 8
+    palignr     m5,                 m3, m1, 8
+    movu        [r0],               m4
+    movu        [r0 + 32],          m5
+
+    palignr     m4,                 m2, m0, 6
+    palignr     m5,                 m3, m1, 6
+    movu        [r0 + r1],          m4
+    movu        [r0 + r1 + 32],     m5
+
+    palignr     m4,                 m2, m0, 4
+    palignr     m5,                 m3, m1, 4
+    movu        [r0 + r4],          m4
+    movu        [r0 + r4 + 32],     m5
+
+    palignr     m4,                 m2, m0, 2
+    palignr     m5,                 m3, m1, 2
+    movu        [r0 + r3],          m4
+    movu        [r0 + r3 + 32],     m5
+
+    mov         rsp,                [rsp+4*mmsize]
+    RET
+;-------------------------------------------------------------------------------------------------------
+; end of avx2 code for intra_pred_ang32 mode 2 to 34
+;-------------------------------------------------------------------------------------------------------
+
+%macro MODE_2_34 0
+    movu            m0, [r2 + 4]
+    movu            m1, [r2 + 20]
+    movu            m2, [r2 + 36]
+    movu            m3, [r2 + 52]
+    movu            m4, [r2 + 68]
+    movu            [r0], m0
+    movu            [r0 + 16], m1
+    movu            [r0 + 32], m2
+    movu            [r0 + 48], m3
+    palignr         m5, m1, m0, 2
+    movu            [r0 + r1], m5
+    palignr         m5, m2, m1, 2
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m3, m2, 2
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m4, m3, 2
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m1, m0, 4
+    movu            [r0 + r3], m5
+    palignr         m5, m2, m1, 4
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m3, m2, 4
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m4, m3, 4
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m1, m0, 6
+    movu            [r0 + r4], m5
+    palignr         m5, m2, m1, 6
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m3, m2, 6
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m4, m3, 6
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    palignr         m5, m1, m0, 8
+    movu            [r0], m5
+    palignr         m5, m2, m1, 8
+    movu            [r0 + 16], m5
+    palignr         m5, m3, m2, 8
+    movu            [r0 + 32], m5
+    palignr         m5, m4, m3, 8
+    movu            [r0 + 48], m5
+    palignr         m5, m1, m0, 10
+    movu            [r0 + r1], m5
+    palignr         m5, m2, m1, 10
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m3, m2, 10
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m4, m3, 10
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m1, m0, 12
+    movu            [r0 + r3], m5
+    palignr         m5, m2, m1, 12
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m3, m2, 12
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m4, m3, 12
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m1, m0, 14
+    movu            [r0 + r4], m5
+    palignr         m5, m2, m1, 14
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m3, m2, 14
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m4, m3, 14
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r2 + 84]
+    movu            [r0], m1
+    movu            [r0 + 16], m2
+    movu            [r0 + 32], m3
+    movu            [r0 + 48], m4
+    palignr         m5, m2, m1, 2
+    movu            [r0 + r1], m5
+    palignr         m5, m3, m2, 2
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m4, m3, 2
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m0, m4, 2
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m2, m1, 4
+    movu            [r0 + r3], m5
+    palignr         m5, m3, m2, 4
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m4, m3, 4
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m0, m4, 4
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m2, m1, 6
+    movu            [r0 + r4], m5
+    palignr         m5, m3, m2, 6
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m4, m3, 6
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m0, m4, 6
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    palignr         m5, m2, m1, 8
+    movu            [r0], m5
+    palignr         m5, m3, m2, 8
+    movu            [r0 + 16], m5
+    palignr         m5, m4, m3, 8
+    movu            [r0 + 32], m5
+    palignr         m5, m0, m4, 8
+    movu            [r0 + 48], m5
+    palignr         m5, m2, m1, 10
+    movu            [r0 + r1], m5
+    palignr         m5, m3, m2, 10
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m4, m3, 10
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m0, m4, 10
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m2, m1, 12
+    movu            [r0 + r3], m5
+    palignr         m5, m3, m2, 12
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m4, m3, 12
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m0, m4, 12
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m2, m1, 14
+    movu            [r0 + r4], m5
+    palignr         m5, m3, m2, 14
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m4, m3, 14
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m0, m4, 14
+    movu            [r0 + r4 + 48], m5
+    lea             r0,    [r0 + r1 * 4]
+%endmacro
+
+%macro TRANSPOSE_STORE_8x8 6
+  %if %2 == 1
+    ; transpose 4x8 and then store, used by angle BLOCK_16x16 and BLOCK_32x32
+    punpckhwd   m0, %3, %4
+    punpcklwd   %3, %4
+    punpckhwd   %4, %3, m0
+    punpcklwd   %3, m0
+
+    punpckhwd   m0, %5, %6
+    punpcklwd   %5, %6
+    punpckhwd   %6, %5, m0
+    punpcklwd   %5, m0
+
+    punpckhqdq  m0, %3, %5
+    punpcklqdq  %3, %5
+    punpcklqdq  %5, %4, %6
+    punpckhqdq  %4, %6
+
+    movu        [r0 + %1], %3
+    movu        [r0 + r1 + %1], m0
+    movu        [r0 + r1 * 2 + %1], %5
+    movu        [r0 + r5 + %1], %4
+  %else
+    ; store 8x4, used by angle BLOCK_16x16 and BLOCK_32x32
+    movh        [r0], %3
+    movhps      [r0 + r1], %3
+    movh        [r0 + r1 * 2], %4
+    movhps      [r0 + r5], %4
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0], %5
+    movhps      [r0 + r1], %5
+    movh        [r0 + r1 * 2], %6
+    movhps      [r0 + r5], %6
+    lea         r0, [r0 + r1 * 4]
+  %endif
+%endmacro
+
+%macro MODE_3_33 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    mova        m7,        m0
+
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5] xmm2
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1] xmm0
+
+    palignr     m1,        m2, m0, 4                  ; [6 5 5 4 4 3 3 2] xmm1
+    pmaddwd     m4,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m5,        m1, [r3 + 4 * 16]          ; [20]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m4,        m5
+
+    palignr     m5,        m2, m0, 8
+    pmaddwd     m5,        [r3 - 2 * 16]              ; [14]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 12
+    pmaddwd     m6,        [r3 - 8 * 16]              ; [ 8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m2, [r3 - 14 * 16]         ; [ 2]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m2, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, m2, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        m0, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    psrldq      m2,        m3, 2   ; [x 16 15 14 13 12 11 10]
+    palignr     m2,        m0, 4   ;[11 10 10 9 9 8 8 7]
+
+    pmaddwd     m2,        [r3]                       ; [16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    palignr     m0,        m3, m7, 14                 ; [15 14 13 12 11 10 9 8]
+    movu        m3,        [r2 + 32]                  ; [23 22 21 20 19 18 17 16]
+    palignr     m1,        m3, m0, 2                  ; [16 15 14 13 12 11 10 9]
+    punpckhwd   m7,        m0, m1                     ; [16 15 15 14 14 13 13 12]
+    punpcklwd   m0,        m1                         ; [12 11 11 10 10 9 9 8]
+
+    palignr     m5,        m7, m0, 4                  ; [13 12 12 11 11 10 10 9]
+    pmaddwd     m4,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m5, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 14 * 16]             ; [30]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m7, m0, 8                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m6,        [r3 + 8 * 16]              ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m7, m0, 12                 ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m6,        m1, [r3 + 2 * 16]          ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m2,        m3, m7, 4                  ; [17 16 16 15 15 14 14 13]
+    pmaddwd     m1,        m2, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 28]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 28]                  ; [35 34 33 32 31 30 29 28]
+    palignr     m1,        m0, 2                      ; [ x 35 34 33 32 31 30 29]
+    punpckhwd   m2,        m0, m1                     ; [ x 35 35 34 34 33 33 32]
+    punpcklwd   m0,        m1                         ; [32 31 31 30 30 29 29 28]
+
+    pmaddwd     m4,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m1,        m2, m0, 4                  ; [33 32 32 31 31 30 30 29]
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m2, m0, 8                  ; [34 33 33 32 32 31 31 30]
+    pmaddwd     m5,        [r3 - 2 * 16]              ; [14]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 12                 ; [35 34 34 33 33 32 32 31]
+    pmaddwd     m6,        [r3 - 8 * 16]              ; [ 8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pinsrw      m2,        [r2 + 44], 7               ; [35 34 34 33 33 32 32 31]
+    pmaddwd     m6,        m2, [r3 - 14 * 16]         ; [ 2]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m6,        m2
+
+    movu        m3,        [r2 + 38]                  ; [45 44 43 42 41 40 39 38]
+    palignr     m1,        m3, 2                      ; [ x 45 44 43 42 41 40 39]
+    punpckhwd   m2,        m3, m1                     ; [ x 35 35 34 34 33 33 32]
+    punpcklwd   m3,        m1                         ; [32 31 31 30 30 29 29 28]
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m0,        m2, m3, 4
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m5,        m2, m3, 8
+    pmaddwd     m4,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m3, 12
+    pmaddwd     m1,        m5, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 14 * 16]             ; [30]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 46]
+    palignr     m1,        m3, 2
+    punpckhwd   m2,        m3, m1
+    punpcklwd   m3,        m1
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m6,        m2, m3, 4
+    pmaddwd     m6,        [r3 + 2 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m3, 8
+    pmaddwd     m1,        [r3 - 4 * 16]              ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m2, m3, 12
+    pmaddwd     m1,        [r3 - 10 * 16]             ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 54]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_4_32 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 + 5 * 16]          ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m0, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m1,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 15 * 16]             ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 8
+    pmaddwd     m6,        [r3 + 4 * 16]              ; [ 20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m2, m0, 12
+    pmaddwd     m6,        m1, [r3 - 7 * 16]          ; [ 9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 + 3 * 16]          ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m3, m2, 4                  ; [10 9 9 8 7 6 5 4]
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 13 * 16]         ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m1,        m2, m7, 4                  ; [11 10 10 9 9 8 7 6]
+    pmaddwd     m1,        [r3 +  2 * 16]             ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m2, m7, 8
+    mova        m6,        m5
+    pmaddwd     m5,        [r3 - 9 * 16]              ; [07]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        [r3 + 12 * 16]             ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m6,        m2, m7, 12
+    pmaddwd     m6,        [r3 +      16]             ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m2, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 + 11 * 16]         ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m3, m2, 4
+    pmaddwd     m7,        [r3]                       ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+    mova        m7,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m0,        m3, m2, 8
+    pmaddwd     m4,        m0, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m3, m2, 12
+    pmaddwd     m5,        [r3 - 16]                  ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m0,        [r2 + 50]                  ; [32 31 30 29 28 27 26 25]
+    palignr     m2,        m0, m7, 2                  ; [25 24 23 22 21 20 19 18]
+    palignr     m1,        m0, m7, 4                  ; [26 25 24 23 22 21 20 19]
+    punpckhwd   m7,        m2, m1                     ; [26 25 25 24 24 23 23 22]
+    punpcklwd   m2,        m1                         ; [22 21 21 20 20 19 19 18]
+
+    palignr     m1,        m2, m3, 4
+    pmaddwd     m1,        [r3 - 2 * 16]              ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m2, m3, 8
+    mova        m0,        m1
+    pmaddwd     m1,        [r3 - 13 * 16]             ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3 + 8 * 16]              ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 12
+    pmaddwd     m4,        [r3 - 3 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 + 7 * 16]          ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m7, m2, 4
+    pmaddwd     m6,        [r3 - 4 * 16]              ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m7, m2, 8
+    pmaddwd     m6,        m1, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 6 * 16]              ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m7, m2, 12
+    pmaddwd     m1,        [r3 - 5 * 16]              ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 44]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_5_31 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 + 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m1,        m2, m0, 4
+    mova        m5,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]             ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 3 * 16]              ; [19]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 8
+    mova        m1,        m6
+    pmaddwd     m6,        [r3 - 12 * 16]             ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 5 * 16]          ; [21]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m0, 12
+    mova        m7,        m1
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [6]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 7 * 16]              ; [23]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m7,        m2, [r3 - 8 * 16]          ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 + 9 * 16]          ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m7,        m3, m2, 4                  ; [10 9 9 8 7 6 5 4]
+    pmaddwd     m1,        m7, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 11 * 16]         ; [27]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m6,        m2, m7, 4
+    pmaddwd     m1,        m6, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m7, 8
+    mova        m0,        m1
+    pmaddwd     m1,        [r3 - 2 * 16]              ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m0, [r3 + 15 * 16]         ; [31]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m0,        m2, m7, 12
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 - 15 * 16]         ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m3, m2, 4
+    pmaddwd     m5,        m1, [r3 - 13 * 16]         ; [3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m3, m2, 8
+    pmaddwd     m6,        m1, [r3 - 11 * 16]         ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 6 * 16]              ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m3, m2, 12
+    pmaddwd     m1,        m7, [r3 - 9 * 16]          ; [7]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m7,        [r3 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 7 * 16]          ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m0,        [r2 + 36]                  ; [25 24 23 22 21 20 19 18]
+    palignr     m1,        m0, 2                      ; [x 25 24 23 22 21 20 19]
+    punpcklwd   m0,        m1                         ; [22 21 21 20 20 19 19 18]
+
+    palignr     m1,        m0, m3, 4
+    pmaddwd     m5,        m1, [r3 - 5 * 16]          ; [11]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 12 * 16]             ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m0, m3, 8
+    pmaddwd     m6,        m1, [r3 - 3 * 16]          ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m0, m3, 12
+    pmaddwd     m1,        [r3 - 16]                  ; [15]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 36]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_6_30 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 3 * 16]          ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m2, m0, 4
+    pmaddwd     m5,        m1, [r3 - 9 * 16]          ; [7]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m2, m0, 8
+    pmaddwd     m6,        m1, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m7,        m1, [r3 - 2 * 16]          ; [14]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 11 * 16]             ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m2, m0, 12
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 +  5 * 16]         ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 - 16]              ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m2, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m7,        m3, m2, 4
+    pmaddwd     m6,        m7, [r3 - 7 * 16]          ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m0,        m2, m7, 4
+    pmaddwd     m1,        m0, [r3 - 13 * 16]         ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m7, 4
+    pmaddwd     m4,        [r3 +  13 * 16]            ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m7, 8
+    pmaddwd     m1,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 7 * 16]              ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m1,        m2, m7, 12
+    pmaddwd     m6,        m1, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 - 5 * 16]          ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m2, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m5,        m3, m2, 4
+    pmaddwd     m4,        m5, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m5, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 15 * 16]             ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m3, m2, 8
+    pmaddwd     m1,        m6, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        [r3 + 9 * 16]              ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m3, m2, 12
+    pmaddwd     m0,        m1, [r3 - 10 * 16]         ; [6]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        [r3 + 3 * 16]              ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 28]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_7_29 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movd        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 7 * 16]          ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 + 11 * 16]         ; [27]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m1,        m2, m0, 4
+    pmaddwd     m6,        m1, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 - 3 * 16]          ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m7,        m1, [r3 + 6 * 16]          ; [22]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 15 * 16]             ; [31]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    mova        m3,        m0
+    palignr     m7,        m2, m0, 8
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m2, m3, 12
+    pmaddwd     m5,        m1, [r3 - 13 * 16]         ; [3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m1, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 5 * 16]          ; [21]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 - 9 * 16]          ; [7]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m2, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 + 9 * 16]          ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m7,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m7, 2                      ; [x 16 15 14 13 12 11 10]
+    punpcklwd   m7,        m1                         ; [13 12 12 11 11 10 10 9]
+
+    palignr     m6,        m7, m2, 4
+    pmaddwd     m1,        m6, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m6, [r3 - 5 * 16]          ; [11]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m6, [r3 + 4 * 16]          ; [20]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m0,        m7, m2, 8
+    pmaddwd     m1,        m0, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m0, [r3 - 16]              ; [15]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3 + 8 * 16]              ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m0,        m7, m2, 12
+    pmaddwd     m4,        m0, [r3 - 15 * 16]         ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 + 3 * 16]          ; [19]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        [r3 + 12 * 16]             ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 11 * 16]         ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        m7, [r3 + 7 * 16]          ; [23]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 20]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_8_28 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movd        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 - 16]              ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m0, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m0, [r3 + 9 * 16]          ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m0, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m2, m0, 4
+    pmaddwd     m1,        m7, [r3 - 13 * 16]         ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    mova        m3,        m0
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 - 3 * 16]          ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 7 * 16]          ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m7, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m7,        m2, m3, 8
+    pmaddwd     m6,        m7, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 - 5 * 16]          ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 5 * 16]          ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 15 * 16]         ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m7,        m2, m3, 12
+    pmaddwd     m0,        m7, [r3 - 12 * 16]         ; [4]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 7 * 16]          ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 + 3 * 16]          ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 13 * 16]         ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 - 9 * 16]          ; [7]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m2, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m2, [r3 + 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m2, [r3 + 6 * 16]          ; [22]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        m2, [r3 + 11 * 16]         ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 12]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_9_27 1
+    movu        m3,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 2                      ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m3, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m3,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 4]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m7,        m2, m3, 4
+    pmaddwd     m4,        m7, [r3 - 14 * 16]         ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 - 10 * 16]         ; [6]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 6 * 16]          ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 2 * 16]          ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 6 * 16]          ; [22]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m7, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m7, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m7,        [r3 + 14 * 16]             ; [30]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m7,        m7
+    movhps      m7,        [r2 + 6]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m7
+%endmacro
+
+%macro MODE_11_25 1
+    movu        m3,        [r2 + 2]                   ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        [pw_punpcklwd]             ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 2]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2]                       ; [6 5 4 3 2 1 0 16]
+    pshufb      m3,        [pw_punpcklwd]             ; [3 2 2 1 1 0 0 16]
+
+    pmaddwd     m4,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_12_24 1
+    movu        m3,        [r2 + 8]                   ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [9]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [1]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [3]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [5]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_13_23 1
+    movu        m3,        [r2 + 16]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 16]              ; [17]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_14_22 1
+    movu        m3,        [r2 + 24]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_15_21 1
+    movu        m3,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 16]              ; [17]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_16_20 1
+    movu        m3,        [r2 + 40]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 38]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 36]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 34]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 32]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_17_19 1
+    movu        m3,        [r2 + 50]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 48]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 46]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 44]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 42]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 40]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 38]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 36]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 34]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 32]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 26]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+
+;------------------------------------------------------------------------------------------
+; void intraPredAng32(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang32_2, 3,6,6
+    lea             r4, [r2]
+    add             r2, 128
+    cmp             r3m, byte 34
+    cmove           r2, r4
+
+    add             r1, r1
+    lea             r3, [r1 * 2]
+    lea             r4, [r1 * 3]
+    mov             r5, 2
+
+.loop:
+    MODE_2_34
+    add             r2, 32
+    dec             r5
+    jnz             .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_3, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_3_33 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_4, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_4_32 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_5, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_5_31 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_6, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_6_30 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_7, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_7_29 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_8, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_8_28 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_9, 3,6,8
+    add         r2, 128
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_9_27 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_10, 3,7,8
+    add         r2, 128
+    mov         r6d, 4
+    add         r1, r1
+    lea         r5, [r1 * 3]
+    lea         r4, [r1 * 2]
+    lea         r3, [r1 * 4]
+    mova        m7, [c_mode32_10_0]
+
+.loop:
+    movu        m0, [r2 + 2]
+    pshufb      m1, m0, m7
+    movu        [r0], m1
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m1
+
+    palignr     m1, m0, 2
+    pshufb      m1, m7
+    movu        [r0 + r1], m1
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m1
+    movu        [r0 + r1 + 48], m1
+
+    palignr     m1, m0, 4
+    pshufb      m1, m7
+    movu        [r0 + r4], m1
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m1
+    movu        [r0 + r4 + 48], m1
+
+    palignr     m1, m0, 6
+    pshufb      m1, m7
+    movu        [r0 + r5], m1
+    movu        [r0 + r5 + 16], m1
+    movu        [r0 + r5 + 32], m1
+    movu        [r0 + r5 + 48], m1
+
+    add         r0, r3
+
+    palignr     m1, m0, 8
+    pshufb      m1, m7
+    movu        [r0], m1
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m1
+
+    palignr     m1, m0, 10
+    pshufb      m1, m7
+    movu        [r0 + r1], m1
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m1
+    movu        [r0 + r1 + 48], m1
+
+    palignr     m1, m0, 12
+    pshufb      m1, m7
+    movu        [r0 + r4], m1
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m1
+    movu        [r0 + r4 + 48], m1
+
+    palignr     m1, m0, 14
+    pshufb      m1, m7
+    movu        [r0 + r5], m1
+    movu        [r0 + r5 + 16], m1
+    movu        [r0 + r5 + 32], m1
+    movu        [r0 + r5 + 48], m1
+
+    add         r0, r3
+    add         r2, 16
+    dec         r6d
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_11, 3,6,7,0-(4*mmsize+4)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 2], m0
+    movu     [rsp + 1*mmsize + 2], m1
+    movu     [rsp + 2*mmsize + 2], m2
+    movu     [rsp + 3*mmsize + 2], m3
+    mov      r4w, [r3+32]
+    mov      [rsp], r4w
+    mov      r4w, [r2+64]
+    mov      [rsp+66], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+
+.loop:
+    MODE_11_25 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_12, 3,6,7,0-(4*mmsize+10)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 8], m0
+    movu     [rsp + 1*mmsize + 8], m1
+    movu     [rsp + 2*mmsize + 8], m2
+    movu     [rsp + 3*mmsize + 8], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+72], r4w
+    mov      r4w, [r3+12]
+    mov      [rsp+6], r4w
+    mov      r4w, [r3+26]
+    mov      [rsp+4], r4w
+    mov      r4w, [r3+38]
+    mov      [rsp+2], r4w
+    mov      r4w, [r3+52]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_12_24 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_13, 3,6,7,0-(5*mmsize+2)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize], m0
+    movu     [rsp + 2*mmsize], m1
+    movu     [rsp + 3*mmsize], m2
+    movu     [rsp + 4*mmsize], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+80], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 36]
+    pshufb   m0, [shuf_mode_13_23]
+    pshufb   m1, [shuf_mode_13_23]
+    movh     [rsp + 8], m0
+    movh     [rsp], m1
+    mov      r4w, [r3+28]
+    mov      [rsp+8], r4w
+    mov      r4w, [r3+56]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_13_23 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_14, 3,6,7,0-(5*mmsize+10)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize + 8], m0
+    movu     [rsp + 2*mmsize + 8], m1
+    movu     [rsp + 3*mmsize + 8], m2
+    movu     [rsp + 4*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 88], r4w
+    mov      r4w, [r3+4]
+    mov      [rsp+22], r4w
+    movu     m0, [r3 + 10]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    pshufb   m0, [shuf_mode_14_22]
+    pshufb   m1, [shuf_mode_14_22]
+    pshufb   m2, [shuf_mode_14_22]
+    movh     [rsp + 14], m0
+    movh     [rsp + 6], m1
+    movh     [rsp - 2], m2
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_14_22 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_15, 3,6,7,0-(6*mmsize+2)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize], m0
+    movu     [rsp + 3*mmsize], m1
+    movu     [rsp + 4*mmsize], m2
+    movu     [rsp + 5*mmsize], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 96], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 18]
+    movu     m2, [r3 + 34]
+    movu     m3, [r3 + 48]
+    pshufb   m0, [shuf_mode_15_21]
+    pshufb   m1, [shuf_mode_15_21]
+    pshufb   m2, [shuf_mode_15_21]
+    pshufb   m3, [shuf_mode_15_21]
+    movh     [rsp + 24], m0
+    movh     [rsp + 16], m1
+    movh     [rsp + 8], m2
+    movh     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_15_21 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_16, 3,6,7,0-(6*mmsize+10)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize + 8], m0
+    movu     [rsp + 3*mmsize + 8], m1
+    movu     [rsp + 4*mmsize + 8], m2
+    movu     [rsp + 5*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 104], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 22]
+    movu     m2, [r3 + 40]
+    movd     m3, [r3 + 58]
+    pshufb   m0, [shuf_mode_16_20]
+    pshufb   m1, [shuf_mode_16_20]
+    pshufb   m2, [shuf_mode_16_20]
+    pshufb   m3, [shuf_mode_16_20]
+    movu     [rsp + 24], m0
+    movu     [rsp + 12], m1
+    movu     [rsp], m2
+    movd     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_16_20 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_17, 3,6,7,0-(7*mmsize+4)
+    mov      r3, r2mp
+    add      r2, 128
+    movu     m0, [r2 + 0*mmsize]
+    pinsrw   m0, [r3], 0
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 3*mmsize + 2], m0
+    movu     [rsp + 4*mmsize + 2], m1
+    movu     [rsp + 5*mmsize + 2], m2
+    movu     [rsp + 6*mmsize + 2], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 114], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    movd     m3, [r3 + 2]
+    pshufb   m0, [shuf_mode_17_19]
+    pshufb   m1, [shuf_mode_17_19]
+    pshufb   m2, [shuf_mode_17_19]
+    pshufb   m3, [shuf_mode_16_20]
+    movd     [rsp + 46], m3
+    movu     [rsp + 30], m0
+    movu     [rsp + 12], m1
+    movu     [rsp - 4], m2
+    mov      r4w, [r3 + 24]
+    mov      [rsp + 30], r4w
+    mov      r4w, [r3 + 28]
+    mov      [rsp + 28], r4w
+    mov      r4w, [r3 + 46]
+    mov      [rsp + 12], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_17_19 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_18, 3,7,8
+    mov      r3, r2mp
+    add      r2, 128
+    movu        m0, [r3]               ; [7 6 5 4 3 2 1 0]
+    movu        m1, [r3 + 16]          ; [15 14 13 12 11 10 9 8]
+    movu        m2, [r3 + 32]          ; [23 22 21 20 19 18 17 16]
+    movu        m3, [r3 + 48]          ; [31 30 29 28 27 26 25 24]
+    movu        m4, [r2 + 2]           ; [8 7 6 5 4 3 2 1]
+    movu        m5, [r2 + 18]          ; [16 15 14 13 12 11 10 9]
+
+    add         r1, r1
+    lea         r6, [r1 * 2]
+    lea         r3, [r1 * 3]
+    lea         r4, [r1 * 4]
+
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    pshufb      m4, [shuf_mode32_18]   ; [1 2 3 4 5 6 7 8]
+    pshufb      m5, [shuf_mode32_18]   ; [9 10 11 12 13 14 15 16]
+
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m2, m1, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m3, m2, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m2, m1, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m3, m2, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m2, m1, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m3, m2, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m0, m4, 8
+    movu        [r0], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m2, m1, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m3, m2, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m2, m1, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m3, m2, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m2, m1, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m3, m2, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m2, m1, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m3, m2, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        [r0], m4
+    movu        [r0 + 16], m0
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m2
+
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m2, m1, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m2, m1, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m2, m1, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m4, m5, 8
+    movu        [r0], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m2, m1, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m2, m1, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m2, m1, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m2, m1, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        m2, [r2 + 34]
+    movu        m3, [r2 + 50]
+    pshufb      m2, [shuf_mode32_18]
+    pshufb      m3, [shuf_mode32_18]
+
+    movu        [r0], m5
+    movu        [r0 + 16], m4
+    movu        [r0 + 32], m0
+    movu        [r0 + 48], m1
+
+    palignr     m6, m5, m2, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m5, m2, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m5, m2, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m5, m2, 8
+    movu        [r0], m6
+    palignr     m6, m4, m5, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m5, m2, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m5, m2, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m5, m2, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        [r0], m2
+    movu        [r0 + 16], m5
+    movu        [r0 + 32], m4
+    movu        [r0 + 48], m0
+
+    palignr     m6, m2, m3, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m5, m2, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m2, m3, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m5, m2, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m2, m3, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m5, m2, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m2, m3, 8
+    movu        [r0], m6
+    palignr     m6, m5, m2, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m4, m5, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m2, m3, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m5, m2, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m2, m3, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m5, m2, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m2, m3, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m5, m2, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 48], m6
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_19, 3,7,7,0-(7*mmsize+4)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 3*mmsize + 2], m0
+    movu     [rsp + 4*mmsize + 2], m1
+    movu     [rsp + 5*mmsize + 2], m2
+    movu     [rsp + 6*mmsize + 2], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 114], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    movd     m3, [r3 + 2]
+    pshufb   m0, [shuf_mode_17_19]
+    pshufb   m1, [shuf_mode_17_19]
+    pshufb   m2, [shuf_mode_17_19]
+    pshufb   m3, [shuf_mode_16_20]
+    movd     [rsp + 46], m3
+    movu     [rsp + 30], m0
+    movu     [rsp + 12], m1
+    movu     [rsp - 4], m2
+    mov      r4w, [r3 + 24]
+    mov      [rsp + 30], r4w
+    mov      r4w, [r3 + 28]
+    mov      [rsp + 28], r4w
+    mov      r4w, [r3 + 46]
+    mov      [rsp + 12], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_17_19 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_20, 3,7,7,0-(6*mmsize+10)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize + 8], m0
+    movu     [rsp + 3*mmsize + 8], m1
+    movu     [rsp + 4*mmsize + 8], m2
+    movu     [rsp + 5*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 104], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 22]
+    movu     m2, [r3 + 40]
+    movd     m3, [r3 + 58]
+    pshufb   m0, [shuf_mode_16_20]
+    pshufb   m1, [shuf_mode_16_20]
+    pshufb   m2, [shuf_mode_16_20]
+    pshufb   m3, [shuf_mode_16_20]
+    movu     [rsp + 24], m0
+    movu     [rsp + 12], m1
+    movu     [rsp], m2
+    movd     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_16_20 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_21, 3,7,7,0-(6*mmsize+2)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize], m0
+    movu     [rsp + 3*mmsize], m1
+    movu     [rsp + 4*mmsize], m2
+    movu     [rsp + 5*mmsize], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 96], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 18]
+    movu     m2, [r3 + 34]
+    movu     m3, [r3 + 48]
+    pshufb   m0, [shuf_mode_15_21]
+    pshufb   m1, [shuf_mode_15_21]
+    pshufb   m2, [shuf_mode_15_21]
+    pshufb   m3, [shuf_mode_15_21]
+    movh     [rsp + 24], m0
+    movh     [rsp + 16], m1
+    movh     [rsp + 8], m2
+    movh     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_15_21 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_22, 3,7,7,0-(5*mmsize+10)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize + 8], m0
+    movu     [rsp + 2*mmsize + 8], m1
+    movu     [rsp + 3*mmsize + 8], m2
+    movu     [rsp + 4*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 88], r4w
+    mov      r4w, [r3+4]
+    mov      [rsp+22], r4w
+    movu     m0, [r3 + 10]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    pshufb   m0, [shuf_mode_14_22]
+    pshufb   m1, [shuf_mode_14_22]
+    pshufb   m2, [shuf_mode_14_22]
+    movh     [rsp + 14], m0
+    movh     [rsp + 6], m1
+    movh     [rsp - 2], m2
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_14_22 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_23, 3,7,7,0-(5*mmsize+2)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize], m0
+    movu     [rsp + 2*mmsize], m1
+    movu     [rsp + 3*mmsize], m2
+    movu     [rsp + 4*mmsize], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+80], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 36]
+    pshufb   m0, [shuf_mode_13_23]
+    pshufb   m1, [shuf_mode_13_23]
+    movh     [rsp + 8], m0
+    movh     [rsp], m1
+    mov      r4w, [r3+28]
+    mov      [rsp+8], r4w
+    mov      r4w, [r3+56]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_13_23 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_24, 3,7,7,0-(4*mmsize+10)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+
+    movu     [rsp + 0*mmsize + 8], m0
+    movu     [rsp + 1*mmsize + 8], m1
+    movu     [rsp + 2*mmsize + 8], m2
+    movu     [rsp + 3*mmsize + 8], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+72], r4w
+    mov      r4w, [r3+12]
+    mov      [rsp+6], r4w
+    mov      r4w, [r3+26]
+    mov      [rsp+4], r4w
+    mov      r4w, [r3+38]
+    mov      [rsp+2], r4w
+    mov      r4w, [r3+52]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mov     r6, r0
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_12_24 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_25, 3,7,7,0-(4*mmsize+4)
+    lea      r3, [r2 + 128]
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 2], m0
+    movu     [rsp + 1*mmsize + 2], m1
+    movu     [rsp + 2*mmsize + 2], m2
+    movu     [rsp + 3*mmsize + 2], m3
+    mov      r4w, [r3+32]
+    mov      [rsp], r4w
+    mov      r4w, [r2+64]
+    mov      [rsp+66], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mov      r6, r0
+
+.loop:
+    MODE_11_25 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_26, 3,7,5
+    mov         r6d, 4
+    add         r1, r1
+    lea         r3, [r1 * 2]
+    lea         r4, [r1 * 3]
+    lea         r5, [r1 * 4]
+    mova        m4, [c_mode32_10_0]
+
+    movu        m0, [r2 + 2 ]
+    movu        m1, [r2 + 18]
+    movu        m2, [r2 + 34]
+    movu        m3, [r2 + 50]
+
+.loop:
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    movu        [r0 + r1], m0
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m2
+    movu        [r0 + r1 + 48], m3
+
+    movu        [r0 + r3], m0
+    movu        [r0 + r3 + 16], m1
+    movu        [r0 + r3 + 32], m2
+    movu        [r0 + r3 + 48], m3
+
+    movu        [r0 + r4], m0
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m2
+    movu        [r0 + r4 + 48], m3
+
+    add         r0, r5
+
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    movu        [r0 + r1], m0
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m2
+    movu        [r0 + r1 + 48], m3
+
+    movu        [r0 + r3], m0
+    movu        [r0 + r3 + 16], m1
+    movu        [r0 + r3 + 32], m2
+    movu        [r0 + r3 + 48], m3
+
+    movu        [r0 + r4], m0
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m2
+    movu        [r0 + r4 + 48], m3
+
+    add         r0, r5
+    dec         r6d
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_27, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_9_27 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_28, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_8_28 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_29, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_7_29 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_30, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_6_30 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_31, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_5_31 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_32, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_4_32 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_33, 3,7,8
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+.loop:
+    MODE_3_33 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;-----------------------------------------------------------------------------------
+; void intra_filter_NxN(const pixel* references, pixel* filtered)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_filter_4x4, 2,4,5
+    mov             r2w, word [r0 + 16]             ; topLast
+    mov             r3w, word [r0 + 32]             ; LeftLast
+
+    ; filtering top
+    movu            m0, [r0 +  0]
+    movu            m1, [r0 + 16]
+    movu            m2, [r0 + 32]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]   ; [6 5 4 3 2 1 0 1] samples[i - 1]
+    palignr         m3, m1, m0, 4
+    pshufb          m3, [intra_filter4_shuf1]       ; [8 7 6 5 4 3 2 9] samples[i + 1]
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    ; filtering left
+    palignr         m4, m1, m1, 14
+    pinsrw          m4, [r0], 1
+    palignr         m3, m2, m1, 4
+    pshufb          m3, [intra_filter4_shuf1]
+
+    psllw           m1, 1
+    paddw           m4, m3
+    paddw           m1, m4
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    movu            [r1], m0
+    movu            [r1 + 16], m1
+    mov             [r1 + 16], r2w                  ; topLast
+    mov             [r1 + 32], r3w                  ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_8x8, 2,4,6
+    mov             r2w, word [r0 + 32]             ; topLast
+    mov             r3w, word [r0 + 64]             ; LeftLast
+
+    ; filtering top
+    movu            m0, [r0]
+    movu            m1, [r0 + 16]
+    movu            m2, [r0 + 32]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]
+    palignr         m5, m1, m0, 2
+    pinsrw          m5, [r0 + 34], 0
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m1, 1
+    paddw           m4, m3
+    paddw           m1, m4
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+    movu            [r1], m0
+    movu            [r1 + 16], m1
+
+    ; filtering left
+    movu            m1, [r0 + 48]
+    movu            m0, [r0 + 64]
+
+    palignr         m4, m2, m2, 14
+    pinsrw          m4, [r0], 1
+    palignr         m5, m1, m2, 2
+
+    palignr         m3, m1, m2, 14
+    palignr         m0, m1, 2
+
+    psllw           m2, 1
+    paddw           m4, m5
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+
+    psllw           m1, 1
+    paddw           m0, m3
+    paddw           m1, m0
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    movu            [r1 + 32], m2
+    movu            [r1 + 48], m1
+    mov             [r1 + 32], r2w                  ; topLast
+    mov             [r1 + 64], r3w                  ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_16x16, 2,4,6
+    mov             r2w, word [r0 +  64]            ; topLast
+    mov             r3w, word [r0 + 128]            ; LeftLast
+
+    ; filtering top
+    movu            m0, [r0]
+    movu            m1, [r0 + 16]
+    movu            m2, [r0 + 32]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]
+    palignr         m5, m1, m0, 2
+    pinsrw          m5, [r0 + 66], 0
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1], m0
+    movu            [r1 + 16], m5
+
+    movu            m0, [r0 + 48]
+    movu            m5, [r0 + 64]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    movu            [r1 + 32], m1
+    movu            [r1 + 48], m0
+
+    ; filtering left
+    movu            m1, [r0 + 80]
+    movu            m2, [r0 + 96]
+
+    palignr         m4, m5, m5, 14
+    pinsrw          m4, [r0], 1
+    palignr         m0, m1, m5, 2
+
+    psllw           m3, m5, 1
+    paddw           m4, m0
+    paddw           m3, m4
+    paddw           m3, [pw_2]
+    psrlw           m3, 2
+
+    palignr         m0, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m0
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1 + 64], m3
+    movu            [r1 + 80], m5
+
+    movu            m5, [r0 + 112]
+    movu            m0, [r0 + 128]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m5, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1 +  96], m1
+    movu            [r1 + 112], m5
+
+    mov             [r1 +  64], r2w                 ; topLast
+    mov             [r1 + 128], r3w                 ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_32x32, 2,4,6
+    mov             r2w, word [r0 + 128]            ; topLast
+    mov             r3w, word [r0 + 256]            ; LeftLast
+
+    ; filtering top
+    ; 0 to 15
+    movu            m0, [r0 +  0]
+    movu            m1, [r0 + 16]
+    movu            m2, [r0 + 32]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]
+    palignr         m5, m1, m0, 2
+    pinsrw          m5, [r0 + 130], 0
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1], m0
+    movu            [r1 + 16], m5
+
+    ; 16 to 31
+    movu            m0, [r0 + 48]
+    movu            m5, [r0 + 64]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m2, m0, 1
+    paddw           m4, m3
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+    movu            [r1 + 32], m1
+    movu            [r1 + 48], m2
+
+    ; 32 to 47
+    movu            m1, [r0 + 80]
+    movu            m2, [r0 + 96]
+
+    palignr         m3, m5, m0, 14
+    palignr         m4, m1, m5, 2
+
+    psllw           m0, m5, 1
+    paddw           m3, m4
+    paddw           m0, m3
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m3, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1 + 64], m0
+    movu            [r1 + 80], m5
+
+    ; 48 to 63
+    movu            m0, [r0 + 112]
+    movu            m5, [r0 + 128]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    movu            [r1 +  96], m1
+    movu            [r1 + 112], m0
+
+    ; filtering left
+    ; 64 to 79
+    movu            m1, [r0 + 144]
+    movu            m2, [r0 + 160]
+
+    palignr         m4, m5, m5, 14
+    pinsrw          m4, [r0], 1
+    palignr         m0, m1, m5, 2
+
+    psllw           m3, m5, 1
+    paddw           m4, m0
+    paddw           m3, m4
+    paddw           m3, [pw_2]
+    psrlw           m3, 2
+
+    palignr         m0, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m0
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1 + 128], m3
+    movu            [r1 + 144], m5
+
+    ; 80 to 95
+    movu            m5, [r0 + 176]
+    movu            m0, [r0 + 192]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m2, m5, 1
+    paddw           m4, m3
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+    movu            [r1 + 160], m1
+    movu            [r1 + 176], m2
+
+    ; 96 to 111
+    movu            m1, [r0 + 208]
+    movu            m2, [r0 + 224]
+
+    palignr         m3, m0, m5, 14
+    palignr         m4, m1, m0, 2
+
+    psllw           m5, m0, 1
+    paddw           m3, m4
+    paddw           m5, m3
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+
+    palignr         m3, m1, m0, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m0, m1, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    movu            [r1 + 192], m5
+    movu            [r1 + 208], m0
+
+    ; 112 to 127
+    movu            m5, [r0 + 240]
+    movu            m0, [r0 + 256]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m5, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    movu            [r1 + 224], m1
+    movu            [r1 + 240], m5
+
+    mov             [r1 + 128], r2w                 ; topLast
+    mov             [r1 + 256], r3w                 ; LeftLast
+    RET
+
+INIT_YMM avx2
+cglobal intra_filter_4x4, 2,4,4
+    mov             r2w, word [r0 + 16]         ; topLast
+    mov             r3w, word [r0 + 32]         ; LeftLast
+
+    ; filtering top
+    movu            m0, [r0]
+    vpbroadcastw    m2, xm0
+    movu            m1, [r0 + 16]
+
+    palignr         m3, m0, m2, 14              ; [6 5 4 3 2 1 0 0] [14 13 12 11 10 9 8 0]
+    pshufb          m3, [intra_filter4_shuf2]   ; [6 5 4 3 2 1 0 1] [14 13 12 11 10 9 0 9] samples[i - 1]
+    palignr         m1, m0, 4                   ; [9 8 7 6 5 4 3 2]
+    palignr         m1, m1, 14                  ; [9 8 7 6 5 4 3 2]
+
+    psllw           m0, 1
+    paddw           m3, m1
+    paddw           m0, m3
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    movu            [r1], m0
+    mov             [r1 + 16], r2w              ; topLast
+    mov             [r1 + 32], r3w              ; LeftLast
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/intrapred8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,22682 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+const intra_pred_shuff_0_8,     times 2 db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+                                        db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
+
+intra_pred_shuff_15_0:   times 2 db 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+intra_filter4_shuf0:  times 2 db  2,  3,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13
+intra_filter4_shuf1:  times 2 db 14, 15,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13
+intra_filter4_shuf2:  times 2 db  4,  5,  0,  1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15
+
+pb_0_8        times 8 db  0,  8
+pb_unpackbw1  times 2 db  1,  8,  2,  8,  3,  8,  4,  8
+pb_swap8:     times 2 db  7,  6,  5,  4,  3,  2,  1,  0
+c_trans_4x4           db  0,  4,  8, 12,  1,  5,  9, 13,  2,  6, 10, 14,  3,  7, 11, 15
+const tab_S1,         db 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0,  0,  0,  0
+const tab_S2,         db 0, 1, 3, 5, 7, 9, 11, 13, 0, 0, 0, 0, 0, 0, 0, 0
+const tab_Si,         db  0,  1,  2,  3,  4,  5,  6,  7,  0,  1,  2,  3,  4,  5,  6,  7
+pb_fact0:             db  0,  2,  4,  6,  8, 10, 12, 14,  0,  0,  0,  0,  0,  0,  0,  0
+c_mode32_12_0:        db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 13,  7,  0
+c_mode32_13_0:        db  3,  6, 10, 13,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0
+c_mode32_13_shuf:     db  0,  0,  0,  0,  0,  0,  0,  0,  7,  6,  5,  4,  3,  2,  1,  0
+c_mode32_14_shuf:     db 15, 14, 13,  0,  2,  3,  4,  5,  6,  7, 10, 11, 12, 13, 14, 15
+c_mode32_14_0:        db 15, 12, 10,  7,  5,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0
+c_mode32_15_0:        db 15, 13, 11,  9,  8,  6,  4,  2,  0,  0,  0,  0,  0,  0,  0,  0
+c_mode32_16_0:        db 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0,  0,  0,  0,  0,  0
+c_mode32_17_0:        db 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0,  0,  0,  0
+c_mode32_18_0:        db 15, 14, 13, 12, 11, 10,  9,  8,  7,  6,  5,  4,  3,  2,  1,  0
+c_shuf8_0:            db  0,  1,  1,  2,  2,  3,  3,  4,  4,  5,  5,  6,  6,  7,  7,  8
+c_deinterval8:        db  0,  8,  1,  9,  2, 10,  3, 11,  4, 12,  5, 13,  6, 14,  7, 15
+pb_unpackbq:          db  0,  0,  0,  0,  0,  0,  0,  0,  1,  1,  1,  1,  1,  1,  1,  1
+c_mode16_12:          db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 6
+c_mode16_13:          db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 11, 7, 4
+c_mode16_14:          db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 12, 10, 7, 5, 2
+c_mode16_15:          db  0,  0,  0,  0,  0,  0,  0,  0, 15, 13, 11,  9,  8,  6,  4,  2
+c_mode16_16:          db  8,  6,  5,  3,  2,  0, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2
+c_mode16_17:          db  4,  2,  1,  0, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1
+c_mode16_18:          db 0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
+
+ALIGN 32
+c_ang8_src1_9_2_10:   db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
+c_ang8_26_20:         db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang8_src3_11_4_12:  db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+c_ang8_14_8:          db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang8_src5_13_5_13:  db 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+c_ang8_2_28:          db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+c_ang8_src6_14_7_15:  db 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
+c_ang8_22_16:         db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+c_ang8_21_10       :  db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+c_ang8_src2_10_3_11:  db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
+c_ang8_31_20:         db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang8_src4_12_4_12:  times 2 db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+c_ang8_9_30:          db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+c_ang8_src5_13_6_14:  db 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13
+c_ang8_19_8:          db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+
+c_ang8_17_2:          db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+c_ang8_19_4:          db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang8_21_6:          db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+c_ang8_23_8:          db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8,
+c_ang8_src4_12_5_13:  db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+
+c_ang8_13_26:         db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+c_ang8_7_20:          db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang8_1_14:          db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+c_ang8_27_8:          db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang8_src2_10_2_10:  db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
+c_ang8_src3_11_3_11:  db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
+
+c_ang8_31_8:          db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang8_13_22:         db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+c_ang8_27_4:          db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang8_9_18:          db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+
+c_ang8_5_10:          db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+c_ang8_15_20:         db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang8_25_30:         db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+c_ang8_3_8:           db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+
+c_ang8_mode_27:       db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+c_ang8_mode_25:       db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                      db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+c_ang8_mode_24:       db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+                      db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                      db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+
+ALIGN 32
+c_ang16_mode_25:      db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                      db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                      db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
+ALIGN 32
+c_ang16_mode_11:      db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                      db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                      db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                      db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                      db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                      db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                      db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
+
+ALIGN 32
+c_ang16_mode_12:      db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+                      db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                      db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9
+                      db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                      db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
+                      db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                      db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+                      db  8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+
+ALIGN 32
+c_ang16_mode_13:      db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                      db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29
+                      db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                      db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25
+                      db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_28:      db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                      db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                      db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                      db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                      db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                      db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_9:       db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                      db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+                      db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                      db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                      db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
+ALIGN 32
+c_ang16_mode_27:      db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+                      db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                      db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                      db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
+ALIGN 32
+intra_pred_shuff_0_15: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15
+
+ALIGN 32
+c_ang16_mode_29:     db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9,  14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                     db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
+                     db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13
+                     db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
+                     db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                     db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                     db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                     db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                     db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_30:      db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                      db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                      db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
+                      db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+                      db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+                      db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+                      db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_31:      db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                      db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+                      db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6,  9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23
+                      db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8,  7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
+                      db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29
+                      db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
+                      db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_24:     db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+                     db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                     db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                     db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                     db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                     db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                     db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                     db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_23:     db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                     db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
+                     db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+                     db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1
+                     db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+                     db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                     db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                     db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                     db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+c_ang16_mode_22:     db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                     db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                     db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                     db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
+                     db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11
+                     db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                     db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                     db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                     db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
+ALIGN 32
+intra_pred_shuff_0_4:    times 4 db 0, 1, 1, 2, 2, 3, 3, 4
+intra_pred4_shuff1:      db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5
+intra_pred4_shuff2:      db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5
+intra_pred4_shuff31:     db 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6
+intra_pred4_shuff33:     db 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6, 3, 4, 4, 5, 5, 6, 6, 7
+intra_pred4_shuff3:      db 8, 9, 9, 10, 10, 11, 11, 12, 9, 10, 10, 11, 11, 12, 12, 13, 10, 11, 11, 12, 12, 13, 13, 14, 11, 12, 12, 13, 13, 14, 14, 15
+intra_pred4_shuff4:      db 9, 10, 10, 11, 11, 12, 12, 13, 10, 11, 11, 12, 12, 13, 13, 14, 10, 11, 11, 12, 12, 13, 13, 14, 11, 12, 12, 13, 13, 14, 14, 15
+intra_pred4_shuff5:      db 9, 10, 10, 11, 11, 12, 12, 13, 10, 11, 11, 12, 12, 13, 13, 14, 10, 11, 11, 12, 12, 13, 13, 14, 11, 12, 12, 13, 13, 14, 14, 15
+intra_pred4_shuff6:      db 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13, 10, 11, 11, 12, 12, 13, 13, 14, 10, 11, 11, 12, 12, 13, 13, 14
+intra_pred4_shuff7:      db 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13, 10, 11, 11, 12, 12, 13, 13, 14
+intra_pred4_shuff9:      db 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13, 9, 10, 10, 11, 11, 12, 12, 13
+intra_pred4_shuff12:     db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12,0, 9, 9, 10, 10, 11, 11, 12
+intra_pred4_shuff13:     db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 4, 0, 0, 9, 9, 10, 10, 11
+intra_pred4_shuff14:     db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11
+intra_pred4_shuff15:     db 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11, 4, 2, 2, 0, 0, 9, 9, 10
+intra_pred4_shuff16:     db 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11, 3, 2, 2, 0, 0, 9, 9, 10
+intra_pred4_shuff17:     db 0, 9, 9, 10, 10, 11, 11, 12, 1, 0, 0, 9, 9, 10, 10, 11, 2, 1, 1, 0, 0, 9, 9, 10, 4, 2, 2, 1, 1, 0, 0, 9
+intra_pred4_shuff19:     db 0, 1, 1, 2, 2, 3, 3, 4, 9, 0, 0, 1, 1, 2, 2, 3, 10, 9, 9, 0, 0, 1, 1, 2, 12, 10, 10, 9, 9, 0, 0, 1
+intra_pred4_shuff20:     db 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3, 11, 10, 10, 0, 0, 1, 1, 2
+intra_pred4_shuff21:     db 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3, 12, 10, 10, 0, 0, 1, 1, 2
+intra_pred4_shuff22:     db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3
+intra_pred4_shuff23:     db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 12, 0, 0, 1, 1, 2, 2, 3
+
+c_ang4_mode_27:          db 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang4_mode_28:          db 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang4_mode_29:          db 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang4_mode_30:          db 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang4_mode_31:          db 15, 17, 15, 17, 15, 17, 15, 17, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang4_mode_32:          db 11, 21, 11, 21, 11, 21, 11, 21, 22, 10, 22, 10, 22, 10, 22, 10, 1, 31, 1, 31, 1, 31, 1, 31, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang4_mode_33:          db 6, 26, 6, 26, 6, 26, 6, 26, 12, 20, 12, 20, 12, 20, 12, 20, 18, 14, 18, 14, 18, 14, 18, 14, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang4_mode_5:           db 15, 17, 15, 17, 15, 17, 15, 17, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang4_mode_6:           db 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang4_mode_7:           db 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang4_mode_8:           db 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang4_mode_9:           db 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang4_mode_11:          db 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24
+c_ang4_mode_12:          db 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_13:          db 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 27, 5, 27, 5, 27, 5, 27, 5, 4, 28, 4, 28, 4, 28, 4, 28
+c_ang4_mode_14:          db 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_15:          db 17, 15, 17, 15, 17, 15, 17, 15, 2, 30, 2, 30, 2, 30, 2, 30, 19, 13, 19, 13, 19, 13, 19, 13, 4, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang4_mode_16:          db 21, 11, 21, 11, 21, 11, 21, 11, 10, 22, 10, 22, 10, 22, 10, 22, 31, 1, 31, 1, 31, 1, 31, 1, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_17:          db 26, 6, 26, 6, 26, 6, 26, 6, 20, 12, 20, 12, 20, 12, 20, 12, 14, 18, 14, 18, 14, 18, 14, 18, 8, 24, 8, 24, 8, 24, 8, 24
+c_ang4_mode_19:          db 26, 6, 26, 6, 26, 6, 26, 6, 20, 12, 20, 12, 20, 12, 20, 12, 14, 18, 14, 18, 14, 18, 14, 18, 8, 24, 8, 24, 8, 24, 8, 24
+c_ang4_mode_20:          db 21, 11, 21, 11, 21, 11, 21, 11, 10, 22, 10, 22, 10, 22, 10, 22, 31, 1, 31, 1, 31, 1, 31, 1, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_21:          db 17, 15, 17, 15, 17, 15, 17, 15, 2, 30, 2, 30, 2, 30, 2, 30, 19, 13, 19, 13, 19, 13, 19, 13, 4, 28, 4, 28, 4, 28, 4, 28
+c_ang4_mode_22:          db 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_23:          db 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 27, 5, 27, 5, 27, 5, 27, 5, 4, 28, 4, 28, 4, 28, 4, 28
+c_ang4_mode_24:          db 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12
+c_ang4_mode_25:          db 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24
+
+ALIGN 32
+;; (blkSize - 1 - x)
+pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
+ALIGN 32
+c_ang8_mode_13:       db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                      db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                      db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+
+ALIGN 32
+c_ang8_mode_14:       db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                      db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                      db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                      db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+
+ALIGN 32
+c_ang8_mode_15:       db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                      db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+                      db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+
+const c_ang8_mode_16,       db 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 12, 13, 15, 0, 0
+
+const intra_pred8_shuff16,  db 0, 1, 1, 2, 3, 3, 4, 5
+                            db 1, 2, 2, 3, 4, 4, 5, 6
+                            db 2, 3, 3, 4, 5, 5, 6, 7
+                            db 3, 4, 4, 5, 6, 6, 7, 8
+                            db 4, 5, 5, 6, 7, 7, 8, 9
+
+const angHor8_tab_16,       db (32-11), 11, (32-22), 22, (32-1 ),  1, (32-12), 12, (32-23), 23, (32- 2),  2, (32-13), 13, (32-24), 24
+
+const c_ang8_mode_20,       db 15, 13, 12, 10, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0
+
+; NOTE: this big table improve speed ~10%, if we have broadcast instruction work on high-128bits infuture, we can remove the table
+const angHor8_tab_20,       times 8 db (32-24), 24
+                            times 8 db (32-13), 13
+                            times 8 db (32- 2),  2
+                            times 8 db (32-23), 23
+                            times 8 db (32-12), 12
+                            times 8 db (32- 1),  1
+                            times 8 db (32-22), 22
+                            times 8 db (32-11), 11
+
+const ang16_shuf_mode9,    times 8 db 0, 1
+                           times 8 db 1, 2
+
+const angHor_tab_9,  db (32-2), 2, (32-4), 4, (32-6), 6, (32-8), 8, (32-10), 10, (32-12), 12, (32-14), 14, (32-16), 16
+                     db (32-18), 18, (32-20), 20, (32-22), 22, (32-24),  24, (32-26),  26, (32-28), 28, (32-30), 30, (32-32), 32
+
+const angHor_tab_11, db (32-30), 30, (32-28), 28, (32-26), 26, (32-24), 24, (32-22), 22, (32-20), 20, (32-18), 18, (32-16), 16
+                     db (32-14), 14, (32-12), 12, (32-10), 10, (32- 8),  8, (32- 6),  6, (32- 4),  4, (32- 2),  2, (32- 0),  0
+
+const ang16_shuf_mode12,   db 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 2, 3, 2, 3
+                           db 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 1, 2, 1, 2, 1, 2, 1, 2
+
+const angHor_tab_12, db (32-27), 27, (32-22), 22, (32-17), 17, (32-12), 12, (32-7), 7, (32-2), 2, (32-29), 29, (32-24), 24
+                     db (32-19), 19, (32-14), 14, (32-9), 9, (32-4), 4, (32-31), 31, (32-26),  26, (32-21), 21, (32-16), 16
+
+const ang16_shuf_mode13,   db 4, 5, 4, 5, 4, 5, 3, 4, 3, 4, 3, 4, 3, 4, 2, 3, 5, 6, 5, 6, 5, 6, 4, 5, 4, 5, 4, 5, 4, 5, 3, 4
+                           db 2, 3, 2, 3, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 0, 1, 3, 4, 3, 4, 2, 3, 2, 3, 2, 3, 2, 3, 1, 2, 1, 2
+                           db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 11, 7, 4, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 11, 7, 4, 0
+
+const angHor_tab_13, db (32-23), 23, (32-14), 14, (32-5), 5, (32-28), 28, (32-19), 19, (32-10), 10, (32-1), 1, (32-24), 24
+                     db (32-15), 15, (32-6), 6, (32-29), 29, (32-20), 20, (32-11), 11, (32-2), 2, (32-25), 25, (32-16), 16
+
+const ang16_shuf_mode14,   db 6, 7, 6, 7, 5, 6, 5, 6, 4, 5, 4, 5, 4, 5, 3, 4, 7, 8, 7, 8, 6, 7, 6, 7, 5, 6, 5, 6, 5, 6, 4, 5
+                           db 3, 4, 2, 3, 2, 3, 2, 3, 1, 2, 1, 2, 0, 1, 0, 1, 4, 5, 3, 4, 3, 4, 3, 4, 2, 3, 2, 3, 1, 2, 1, 2
+                           db 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 12, 10, 7, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 12, 10, 7, 5, 2, 0
+
+const angHor_tab_14, db (32-19), 19, (32-6), 6, (32-25), 25, (32-12), 12, (32-31), 31, (32-18), 18, (32-5), 5, (32-24), 24
+                     db (32-11), 11, (32-30), 30, (32-17), 17, (32-4), 4, (32-23), 23, (32-10), 10, (32-29), 29, (32-16), 16
+
+const ang16_shuf_mode15,   db 8, 9, 7, 8, 7, 8, 6, 7, 6, 7, 5, 6, 5, 6, 4, 5, 9, 10, 8, 9, 8, 9, 7, 8, 7, 8, 6, 7, 6, 7, 5, 6
+                           db 4, 5, 3, 4, 3, 4, 2, 3, 2, 3, 1, 2, 1, 2, 0, 1, 5, 6, 4, 5, 4, 5, 3, 4, 3, 4, 2, 3, 2, 3, 1, 2
+                           db 0, 0, 0, 0, 0, 0, 0, 15, 13, 11, 9, 8, 6, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 15, 13, 11, 9, 8, 6, 4, 2, 0
+
+const angHor_tab_15, db (32-15), 15, (32-30), 30, (32-13), 13, (32-28), 28, (32-11), 11, (32-26), 26, (32-9), 9, (32-24), 24
+                     db (32-7), 7, (32-22), 22, (32-5), 5, (32-20), 20, (32-3), 3, (32-18), 18, (32-1), 1, (32- 16), 16
+
+const ang16_shuf_mode16,   db 10, 11, 9, 10, 9, 10, 8, 9, 7, 8, 7, 8, 6, 7, 5, 6, 11, 12, 10, 11, 10, 11, 9, 10, 8, 9, 8, 9, 7, 8, 6, 7
+                           db 5, 6, 4, 5, 3, 4, 3, 4, 2, 3, 1, 2, 1, 2, 0, 1, 6, 7, 5, 6, 4, 5, 4, 5, 3, 4, 2, 3, 2, 3, 1, 2
+                           db 0 ,0, 0, 0, 0, 15, 14, 12 , 11, 9, 8, 6, 5, 3, 2, 0, 0, 0, 0, 0, 0, 15, 14, 12, 11, 9, 8, 6, 5, 3, 2, 0
+
+const angHor_tab_16, db (32-11), 11, (32-22), 22, (32-1), 1, (32-12), 12, (32-23), 23, (32-2), 2, (32-13), 13, (32-24), 24
+                     db (32-3), 3, (32-14), 14, (32-25), 25, (32-4), 4, (32-15), 15, (32-26), 26, (32-5), 5, (32-16), 16
+
+const ang16_shuf_mode17,   db 12, 13, 11, 12, 10, 11, 9, 10, 8, 9, 8, 9, 7, 8, 6, 7, 13, 14, 12, 13, 11, 12, 10, 11, 9, 10, 9, 10, 8, 9, 7, 8
+                           db 5, 6, 4, 5, 4, 5, 3, 4, 2, 3, 1, 2, 0, 1, 0, 1, 6, 7, 5, 6, 5, 6, 4, 5, 3, 4, 2, 3, 1, 2, 1, 2
+                           db 0, 0, 0, 15, 14, 12, 11, 10, 9, 7, 6, 5, 4, 2, 1, 0, 0, 0, 0, 15, 14, 12, 11, 10, 9, 7, 6, 5, 4, 2, 1, 0
+
+const angHor_tab_17, db (32- 6),  6, (32-12), 12, (32-18), 18, (32-24), 24, (32-30), 30, (32- 4),  4, (32-10), 10, (32-16), 16
+                     db (32-22), 22, (32-28), 28, (32- 2),  2, (32- 8),  8, (32-14), 14, (32-20), 20, (32-26), 26, (32- 0),  0
+
+; Intrapred_angle32x32, modes 1 to 33 constants
+const ang32_shuf_mode9,         times 8 db 0, 1
+                                times 8 db 1, 2
+
+const ang32_shuf_mode11,        times 8 db 1, 2
+                                times 8 db 0, 1
+
+const ang32_fact_mode12,        db (32-27), 27, (32-22), 22, (32-17), 17, (32-12), 12, (32- 7),  7, (32- 2),  2, (32-29), 29, (32-24), 24
+                                db (32-11), 11, (32- 6),  6, (32- 1),  1, (32-28), 28, (32-23), 23, (32-18), 18, (32-13), 13, (32- 8),  8
+                                db (32-19), 19, (32-14), 14, (32- 9),  9, (32- 4),  4, (32-31), 31, (32-26), 26, (32-21), 21, (32-16), 16
+                                db (32- 3),  3, (32-30), 30, (32-25), 25, (32-20), 20, (32-15), 15, (32-10), 10, (32- 5),  5, (32- 0),  0
+const ang32_shuf_mode12,        db  4,  5,  4,  5,  4,  5,  4,  5,  4,  5,  4,  5,  3,  4,  3,  4,  2,  3,  2,  3,  2,  3,  1,  2,  1,  2,  1,  2,  1,  2,  1,  2
+                                db  3,  4,  3,  4,  3,  4,  3,  4,  2,  3,  2,  3,  2,  3,  2,  3,  1,  2,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1
+const ang32_shuf_mode24,        db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 13, 13,  6,  6,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 10, 10,  3,  3
+                                dd  0,  0,  7,  3,  0,  0,  7,  3
+
+const ang32_fact_mode13,        db (32-23), 23, (32-14), 14, (32- 5),  5, (32-28), 28, (32-19), 19, (32-10), 10, (32- 1),  1, (32-24), 24
+                                db (32- 7),  7, (32-30), 30, (32-21), 21, (32-12), 12, (32- 3),  3, (32-26), 26, (32-17), 17, (32- 8),  8
+                                db (32-15), 15, (32- 6),  6, (32-29), 29, (32-20), 20, (32-11), 11, (32- 2),  2, (32-25), 25, (32-16), 16
+                                db (32-31), 31, (32-22), 22, (32-13), 13, (32- 4),  4, (32-27), 27, (32-18), 18, (32- 9),  9, (32- 0),  0
+const ang32_shuf_mode13,        db 14, 15, 14, 15, 14, 15, 13, 14, 13, 14, 13, 14, 13, 14, 12, 13, 10, 11,  9, 10,  9, 10,  9, 10,  9, 10,  8,  9,  8,  9,  8,  9
+                                db 12, 13, 12, 13, 11, 12, 11, 12, 11, 12, 11, 12, 10, 11, 10, 11,  7,  8,  7,  8,  7,  8,  7,  8,  6,  7,  6,  7,  6,  7,  6,  7
+                                db  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 14, 11,  7,  4,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 12,  9,  5,  2
+const ang32_shuf_mode23,        db  0,  0,  0,  0,  0,  0,  0,  0, 14, 14, 11, 11,  7,  7,  4,  4,  0,  0,  0,  0,  0,  0,  0,  0, 12, 12,  9,  9,  5,  5,  2,  2
+
+const ang32_fact_mode14,        db (32-19), 19, (32- 6),  6, (32-25), 25, (32-12), 12, (32-31), 31, (32-18), 18, (32- 5),  5, (32-24), 24
+                                db (32- 3),  3, (32-22), 22, (32- 9),  9, (32-28), 28, (32-15), 15, (32- 2),  2, (32-21), 21, (32- 8),  8
+                                db (32-11), 11, (32-30), 30, (32-17), 17, (32- 4),  4, (32-23), 23, (32-10), 10, (32-29), 29, (32-16), 16
+                                db (32-27), 27, (32-14), 14, (32- 1),  1, (32-20), 20, (32- 7),  7, (32-26), 26, (32-13), 13, (32- 0),  0
+const ang32_shuf_mode14,        db 14, 15, 14, 15, 13, 14, 13, 14, 12, 13, 12, 13, 12, 13, 11, 12,  8,  9,  7,  8,  7,  8,  6,  7,  6,  7,  6,  7,  5,  6,  5,  6
+                                db 11, 12, 10, 11, 10, 11, 10, 11,  9, 10,  9, 10,  8,  9,  8,  9,  4,  5,  4,  5,  4,  5,  3,  4,  3,  4,  2,  3,  2,  3,  2,  3
+                                db  0,  0,  0,  0,  0,  0,  0,  0, 15, 12, 10,  7,  5,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 14, 11,  9,  6,  4,  1
+const ang32_shuf_mode22,        db  0,  0, 15, 15, 13, 13, 10, 10,  8,  8,  5,  5,  3,  3,  0,  0,  0,  0,  0,  0,  0,  0,  0, 12, 12,  9,  9,  7,  7,  4,  4,  2
+
+const ang32_fact_mode15,        db (32-15), 15, (32-30), 30, (32-13), 13, (32-28), 28, (32-11), 11, (32-26), 26, (32- 9),  9, (32-24), 24
+                                db (32-31), 31, (32-14), 14, (32-29), 29, (32-12), 12, (32-27), 27, (32-10), 10, (32-25), 25, (32- 8),  8
+                                db (32- 7),  7, (32-22), 22, (32- 5),  5, (32-20), 20, (32- 3),  3, (32-18), 18, (32- 1),  1, (32-16), 16
+                                db (32-23), 23, (32- 6),  6, (32-21), 21, (32- 4),  4, (32-19), 19, (32- 2),  2, (32-17), 17, (32- 0),  0
+const ang32_shuf_mode15,        db 14, 15, 13, 14, 13, 14, 12, 13, 12, 13, 11, 12, 11, 12, 10, 11,  5,  6,  5,  6,  4,  5,  4,  5,  3,  4,  3,  4,  2,  3,  2,  3
+                                db 12, 13, 11, 12, 11, 12, 10, 11, 10, 11,  9, 10,  9, 10,  8,  9,  3,  4,  3,  4,  2,  3,  2,  3,  1,  2,  1,  2,  0,  1,  0,  1
+                                db  0,  0,  0,  0,  0,  0,  0,  0, 15, 13, 11,  9,  8,  6,  4,  2,  0,  0,  0,  0,  0,  0,  0,  0, 14, 12, 10,  8,  7,  5,  3,  1
+const ang32_shuf_mode21,        db 15, 15, 13, 13, 11, 11,  9,  9,  8,  8,  6,  6,  4,  4,  2,  2, 14, 14, 12, 12, 10, 10,  8,  8,  7,  7,  5,  5,  3,  3,  1,  1
+
+const ang32_fact_mode16,        db (32-11), 11, (32-22), 22, (32- 1),  1, (32-12), 12, (32-23), 23, (32- 2),  2, (32-13), 13, (32-24), 24
+                                db (32- 3),  3, (32-14), 14, (32-25), 25, (32- 4),  4, (32-15), 15, (32-26), 26, (32- 5),  5, (32-16), 16
+                                db (32-27), 27, (32- 6),  6, (32-17), 17, (32-28), 28, (32- 7),  7, (32-18), 18, (32-29), 29, (32- 8),  8
+                                db (32-19), 19, (32-30), 30, (32- 9),  9, (32-20), 20, (32-31), 31, (32-10), 10, (32-21), 21, (32- 0),  0
+const ang32_shuf_mode16,        db 14, 15, 13, 14, 13, 14, 12, 13, 11, 12, 11, 12, 10, 11,  9, 10,  9, 10,  8,  9,  7,  8,  7,  8,  6,  7,  5,  6,  5,  6,  4,  5
+                                db 14, 15, 14, 15, 13, 14, 12, 13, 12, 13, 11, 12, 10, 11, 10, 11,  9, 10,  8,  9,  8,  9,  7,  8,  6,  7,  6,  7,  5,  6,  5,  6
+                                db  0,  0,  0,  0, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0,  0,  0,  0,  0,  0,  0,  0, 14, 13, 11, 10,  8,  7,  5,  4,  2,  1
+                                dd  7,  1,  2,  3,  7,  1,  2,  3
+const ang32_shuf_mode20,        db 12, 11,  9,  8,  6,  5,  3,  2,  0,  0,  0,  0,  0,  0, 14, 15,  8,  7,  5,  4,  2,  1,  0,  0, 14, 13, 13, 11, 11, 10, 10,  8
+                                db  2,  2,  3,  3,  4,  4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9,  2,  2,  3,  3,  4,  4,  5,  5,  6,  6,  7,  7,  1,  1,  0,  0
+
+const ang32_fact_mode17,        db (32- 6),  6, (32-12), 12, (32-18), 18, (32-24), 24, (32-30), 30, (32- 4),  4, (32-10), 10, (32-16), 16
+                                db (32-22), 22, (32-28), 28, (32- 2),  2, (32- 8),  8, (32-14), 14, (32-20), 20, (32-26), 26, (32- 0),  0
+const ang32_shuf_mode17,        db 14, 15, 13, 14, 12, 13, 11, 12, 10, 11, 10, 11,  9, 10,  8,  9,  7,  8,  6,  7,  6,  7,  5,  6,  4,  5,  3,  4,  2,  3,  2,  3
+                                db  0,  0,  0,  0, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0,  0,  0, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0
+const ang32_shuf_mode19,        db  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15,  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15
+                                dd  0,  0,  2,  3,  0,  0,  7,  1
+                                dd  0,  0,  5,  6,  0,  0,  0,  0
+
+const ang_table
+%assign x 0
+%rep 32
+    times 8 db (32-x), x
+%assign x x+1
+%endrep
+
+const ang_table_avx2
+%assign x 0
+%rep 32
+    times 16 db (32-x), x
+%assign x x+1
+%endrep
+
+const pw_ang_table
+%assign x 0
+%rep 32
+    times 4 dw (32-x), x
+%assign x x+1
+%endrep
+
+SECTION .text
+cextern pb_1
+cextern pw_2
+cextern pw_3
+cextern pw_4
+cextern pw_7
+cextern pw_8
+cextern pw_16
+cextern pw_15
+cextern pw_31
+cextern pw_32
+cextern pw_257
+cextern pw_512
+cextern pw_1024
+cextern pw_4096
+cextern pw_00ff
+cextern pb_unpackbd1
+cextern multiL
+cextern multiH
+cextern multiH2
+cextern multiH3
+cextern multi_2Row
+cextern trans8_shuf
+cextern pw_planar16_mul
+cextern pw_planar32_mul
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc4, 5,5,3
+    inc         r2
+    pxor        m0, m0
+    movu        m1, [r2]
+    pshufd      m1, m1, 0xF8
+    psadbw      m1, m0              ; m1 = sum
+
+    test        r4d, r4d
+
+    paddw       m1, [pw_4]
+    psraw       m1, 3
+    movd        r4d, m1             ; r4d = dc_val
+    pmullw      m1, [pw_257]
+    pshuflw     m1, m1, 0x00
+
+    ; store DC 4x4
+    lea         r3, [r1 * 3]
+    movd        [r0], m1
+    movd        [r0 + r1], m1
+    movd        [r0 + r1 * 2], m1
+    movd        [r0 + r3], m1
+
+    ; do DC filter
+    jz         .end
+    lea         r3d, [r4d * 2 + 2]  ; r3d = DC * 2 + 2
+    add         r4d, r3d            ; r4d = DC * 3 + 2
+    movd        m1, r4d
+    pshuflw     m1, m1, 0           ; m1 = pixDCx3
+
+    ; filter top
+    movd        m2, [r2]
+    punpcklbw   m2, m0
+    paddw       m2, m1
+    psraw       m2, 2
+    packuswb    m2, m2
+    movd        [r0], m2            ; overwrite top-left pixel, we will update it later
+
+    ; filter top-left
+    movzx       r4d, byte [r2 + 8]
+    add         r3d, r4d
+    movzx       r4d, byte [r2]
+    add         r3d, r4d
+    shr         r3d, 2
+    mov         [r0], r3b
+
+    ; filter left
+    add         r0, r1
+    movq        m2, [r2 + 9]
+    punpcklbw   m2, m0
+    paddw       m2, m1
+    psraw       m2, 2
+    packuswb    m2, m2
+%if ARCH_X86_64
+    movq        r4, m2
+    mov         [r0], r4b
+    shr         r4, 8
+    mov         [r0 + r1], r4b
+    shr         r4, 8
+    mov         [r0 + r1 * 2], r4b
+%else
+    movd        r2d, m2
+    mov         [r0], r2b
+    shr         r2, 8
+    mov         [r0 + r1], r2b
+    shr         r2, 8
+    mov         [r0 + r1 * 2], r2b
+%endif
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc8, 5, 7, 3
+    pxor            m0,            m0
+    movh            m1,            [r2 + 1]
+    movh            m2,            [r2 + 17]
+    punpcklqdq      m1,            m2
+    psadbw          m1,            m0
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    paddw           m1,            [pw_8]
+    psraw           m1,            4
+    pmullw          m1,            [pw_257]
+    pshuflw         m1,            m1, 0x00       ; m1 = byte [dc_val ...]
+
+    test            r4d,           r4d
+
+    ; store DC 8x8
+    lea             r6,            [r1 + r1 * 2]
+    lea             r5,            [r6 + r1 * 2]
+    movh            [r0],          m1
+    movh            [r0 + r1],     m1
+    movh            [r0 + r1 * 2], m1
+    movh            [r0 + r6],     m1
+    movh            [r0 + r1 * 4], m1
+    movh            [r0 + r5],     m1
+    movh            [r0 + r6 * 2], m1
+    lea             r5,            [r5 + r1 * 2]
+    movh            [r0 + r5],     m1
+
+    ; Do DC Filter
+    jz              .end
+    psrlw           m1,            8
+    movq            m2,            [pw_2]
+    pmullw          m2,            m1
+    paddw           m2,            [pw_2]
+    movd            r4d,           m2             ; r4d = DC * 2 + 2
+    paddw           m1,            m2             ; m1 = DC * 3 + 2
+    pshufd          m1,            m1, 0
+
+    ; filter top
+    movq            m2,            [r2 + 1]
+    punpcklbw       m2,            m0
+    paddw           m2,            m1
+    psraw           m2,            2              ; sum = sum / 16
+    packuswb        m2,            m2
+    movh            [r0],          m2
+
+    ; filter top-left
+    movzx           r3d, byte      [r2 + 17]
+    add             r4d,           r3d
+    movzx           r3d, byte      [r2 + 1]
+    add             r3d,           r4d
+    shr             r3d,           2
+    mov             [r0],          r3b
+
+    ; filter left
+    movq            m2,            [r2 + 18]
+    punpcklbw       m2,            m0
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    movd            r2d,           m2
+    lea             r0,            [r0 + r1]
+    lea             r5,            [r6 + r1 * 2]
+    mov             [r0],          r2b
+    shr             r2,            8
+    mov             [r0 + r1],     r2b
+    shr             r2,            8
+    mov             [r0 + r1 * 2], r2b
+    shr             r2,            8
+    mov             [r0 + r6],     r2b
+    pshufd          m2,            m2, 0x01
+    movd            r2d,           m2
+    mov             [r0 + r1 * 4], r2b
+    shr             r2,            8
+    mov             [r0 + r5],     r2b
+    shr             r2,            8
+    mov             [r0 + r6 * 2], r2b
+
+.end:
+    RET
+
+;--------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;--------------------------------------------------------------------------------------------
+INIT_XMM sse2
+%if ARCH_X86_64
+cglobal intra_pred_dc16, 5, 10, 4
+%else
+cglobal intra_pred_dc16, 5, 7, 4
+%endif
+    pxor            m0,            m0
+    movu            m1,            [r2 + 1]
+    movu            m2,            [r2 + 33]
+    psadbw          m1,            m0
+    psadbw          m2,            m0
+    paddw           m1,            m2
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    paddw           m1,            [pw_16]
+    psraw           m1,            5
+    pmullw          m1,            [pw_257]
+    pshuflw         m1,            m1, 0x00       ; m1 = byte [dc_val ...]
+    pshufd          m1,            m1, 0x00
+
+
+    test            r4d,           r4d
+
+    ; store DC 16x16
+%if ARCH_X86_64
+    lea             r6,            [r1 + r1 * 2]        ;index 3
+    lea             r7,            [r1 + r1 * 4]        ;index 5
+    lea             r8,            [r6 + r1 * 4]        ;index 7
+    lea             r9,            [r0 + r8]            ;base + 7
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r6],     m1
+    movu            [r0 + r1 * 4], m1
+    movu            [r0 + r7],     m1
+    movu            [r0 + r6 * 2], m1
+    movu            [r0 + r8],     m1
+    movu            [r0 + r1 * 8], m1
+    movu            [r9 + r1 * 2], m1
+    movu            [r0 + r7 * 2], m1
+    movu            [r9 + r1 * 4], m1
+    movu            [r0 + r6 * 4], m1
+    movu            [r9 + r6 * 2], m1
+    movu            [r0 + r8 * 2], m1
+    movu            [r9 + r1 * 8], m1
+%else ;32 bit
+    mov             r6,            r0
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+%endif
+    ; Do DC Filter
+    jz              .end
+    psrlw           m1,            8
+    mova            m2,            [pw_2]
+    pmullw          m2,            m1
+    paddw           m2,            [pw_2]
+    movd            r4d,           m2
+    paddw           m1,            m2
+
+    ; filter top
+    movh            m2,            [r2 + 1]
+    punpcklbw       m2,            m0
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    movh            m3,            [r2 + 9]
+    punpcklbw       m3,            m0
+    paddw           m3,            m1
+    psraw           m3,            2
+    packuswb        m3,            m3
+
+    ; filter top-left
+    movzx           r5d, byte      [r2 + 33]
+    add             r4d,           r5d
+    movzx           r3d, byte      [r2 + 1]
+    add             r3d,           r4d
+    shr             r3d,           2
+
+%if ARCH_X86_64
+    movh            [r0],          m2
+    movh            [r0 + 8],      m3
+    mov             [r0],          r3b
+%else ;32 bit
+    movh            [r6],          m2
+    movh            [r6 + 8],      m3
+    mov             [r6],          r3b
+    add             r6,            r1
+%endif
+
+    ; filter left
+    movh            m2,            [r2 + 34]
+    punpcklbw       m2,            m0
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+
+    movh            m3,            [r2 + 42]
+    punpcklbw       m3,            m0
+    paddw           m3,            m1
+    psraw           m3,            2
+    packuswb        m3,            m3
+%if ARCH_X86_64
+    movh            r3,            m2
+    mov             [r0 + r1],     r3b
+    shr             r3,            8
+    mov             [r0 + r1 * 2], r3b
+    shr             r3,            8
+    mov             [r0 + r6],     r3b
+    shr             r3,            8
+    mov             [r0 + r1 * 4], r3b
+    shr             r3,            8
+    mov             [r0 + r7],     r3b
+    shr             r3,            8
+    mov             [r0 + r6 * 2], r3b
+    shr             r3,            8
+    mov             [r0 + r8],     r3b
+    shr             r3,            8
+    mov             [r0 + r1 * 8], r3b
+    movh            r3,            m3
+    mov             [r9 + r1 * 2], r3b
+    shr             r3,            8
+    mov             [r0 + r7 * 2], r3b
+    shr             r3,            8
+    mov             [r9 + r1 * 4], r3b
+    shr             r3,            8
+    mov             [r0 + r6 * 4], r3b
+    shr             r3,            8
+    mov             [r9 + r6 * 2], r3b
+    shr             r3,            8
+    mov             [r0 + r8 * 2], r3b
+    shr             r3,            8
+    mov             [r9 + r1 * 8], r3b
+%else ;32 bit
+    movd            r2d,            m2
+    pshufd          m2,            m2, 0x01
+    mov             [r6],          r2b
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    shr             r2,            8
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    movd            r2d,           m2
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    shr             r2,            8
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    movd            r2d,            m3
+    pshufd          m3,             m3, 0x01
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    shr             r2,            8
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    movd            r2d,           m3
+    mov             [r6 + r1 * 2], r2b
+    lea             r6,            [r6 + r1 * 2]
+    shr             r2,            8
+    mov             [r6 + r1],     r2b
+    shr             r2,            8
+    mov             [r6 + r1 * 2], r2b
+%endif
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_dc32, 3, 3, 5
+    pxor            m0,            m0
+    movu            m1,            [r2 + 1]
+    movu            m2,            [r2 + 17]
+    movu            m3,            [r2 + 65]
+    movu            m4,            [r2 + 81]
+    psadbw          m1,            m0
+    psadbw          m2,            m0
+    psadbw          m3,            m0
+    psadbw          m4,            m0
+    paddw           m1,            m2
+    paddw           m3,            m4
+    paddw           m1,            m3
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    paddw           m1,            [pw_32]
+    psraw           m1,            6
+    pmullw          m1,            [pw_257]
+    pshuflw         m1,            m1, 0x00       ; m1 = byte [dc_val ...]
+    pshufd          m1,            m1, 0x00
+
+%assign x 0
+%rep 16
+    ; store DC 16x16
+    movu            [r0],               m1
+    movu            [r0 + r1],          m1
+    movu            [r0 + 16],          m1
+    movu            [r0 + r1 + 16],     m1
+%if x < 16
+    lea             r0,            [r0 + 2 * r1]
+%endif
+%assign x x+1
+%endrep
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar4, 3,3,5
+    pxor            m0, m0
+    movh            m1, [r2 + 1]
+    punpcklbw       m1, m0
+    movh            m2, [r2 + 9]
+    punpcklbw       m2, m0
+    pshufhw         m3, m1, 0               ; topRight
+    pshufd          m3, m3, 0xAA
+    pshufhw         m4, m2, 0               ; bottomLeft
+    pshufd          m4, m4, 0xAA
+    pmullw          m3, [multi_2Row]        ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_3]          ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_4]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+    pshuflw         m1, m2, 0
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    packuswb        m1, m1
+    movd            [r0], m1
+
+    pshuflw         m1, m2, 01010101b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    packuswb        m1, m1
+    movd            [r0 + r1], m1
+    lea             r0, [r0 + 2 * r1]
+
+    pshuflw         m1, m2, 10101010b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 3
+    packuswb        m1, m1
+    movd            [r0], m1
+
+    pshuflw         m1, m2, 11111111b
+    pmullw          m1, [pw_planar4_0]
+    paddw           m1, m3
+    psraw           m1, 3
+    packuswb        m1, m1
+    movd            [r0 + r1], m1
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar8, 3,3,6
+    pxor            m0, m0
+    movh            m1, [r2 + 1]
+    punpcklbw       m1, m0
+    movh            m2, [r2 + 17]
+    punpcklbw       m2, m0
+
+    movd            m3, [r2 + 9]            ; topRight   = above[8];
+    movd            m4, [r2 + 25]           ; bottomLeft = left[8];
+
+    pand            m3, [pw_00ff]
+    pand            m4, [pw_00ff]
+    pshuflw         m3, m3, 0x00
+    pshuflw         m4, m4, 0x00
+    pshufd          m3, m3, 0x44
+    pshufd          m4, m4, 0x44
+    pmullw          m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_7]          ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_8]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+%macro INTRA_PRED_PLANAR_8 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%endif
+    pmullw          m5, [pw_planar16_mul + mmsize]
+    paddw           m5, m3
+    psraw           m5, 4
+    packuswb        m5, m5
+    movh            [r0], m5
+%if (%1 < 7)
+    paddw           m3, m4
+    lea             r0, [r0 + r1]
+%endif
+%endmacro
+
+    INTRA_PRED_PLANAR_8 0
+    INTRA_PRED_PLANAR_8 1
+    INTRA_PRED_PLANAR_8 2
+    INTRA_PRED_PLANAR_8 3
+    INTRA_PRED_PLANAR_8 4
+    INTRA_PRED_PLANAR_8 5
+    INTRA_PRED_PLANAR_8 6
+    INTRA_PRED_PLANAR_8 7
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_planar16, 3,5,8
+    pxor            m0, m0
+    movh            m2, [r2 + 1]
+    punpcklbw       m2, m0
+    movh            m7, [r2 + 9]
+    punpcklbw       m7, m0
+
+    movd            m3, [r2 + 17]               ; topRight   = above[16]
+    movd            m6, [r2 + 49]               ; bottomLeft = left[16]
+    pand            m3, [pw_00ff]
+    pand            m6, [pw_00ff]
+    pshuflw         m3, m3, 0x00
+    pshuflw         m6, m6, 0x00
+    pshufd          m3, m3, 0x44                ; v_topRight
+    pshufd          m6, m6, 0x44                ; v_bottomLeft
+    pmullw          m4, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    pmullw          m5, m7, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m4, [pw_16]
+    paddw           m3, [pw_16]
+    paddw           m4, m6
+    paddw           m3, m6
+    paddw           m4, m5
+    paddw           m3, m1
+    psubw           m1, m6, m7
+    psubw           m6, m2
+
+    movh            m2, [r2 + 33]
+    punpcklbw       m2, m0
+    movh            m7, [r2 + 41]
+    punpcklbw       m7, m0
+
+%macro INTRA_PRED_PLANAR_16 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+%if (%1 < 8)
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%else
+%if (%1 < 12)
+    pshuflw         m5, m7, 0x55 * (%1 - 8)
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m7, 0x55 * (%1 - 12)
+    pshufd          m5, m5, 0xAA
+%endif
+%endif
+%endif
+%if (%1 > 0)
+    paddw           m3, m6
+    paddw           m4, m1
+    lea             r0, [r0 + r1]
+%endif
+    pmullw          m0, m5, [pw_planar16_mul + mmsize]
+    pmullw          m5, [pw_planar16_mul]
+    paddw           m0, m4
+    paddw           m5, m3
+    psraw           m5, 5
+    psraw           m0, 5
+    packuswb        m5, m0
+    movu            [r0], m5
+%endmacro
+
+    INTRA_PRED_PLANAR_16 0
+    INTRA_PRED_PLANAR_16 1
+    INTRA_PRED_PLANAR_16 2
+    INTRA_PRED_PLANAR_16 3
+    INTRA_PRED_PLANAR_16 4
+    INTRA_PRED_PLANAR_16 5
+    INTRA_PRED_PLANAR_16 6
+    INTRA_PRED_PLANAR_16 7
+    INTRA_PRED_PLANAR_16 8
+    INTRA_PRED_PLANAR_16 9
+    INTRA_PRED_PLANAR_16 10
+    INTRA_PRED_PLANAR_16 11
+    INTRA_PRED_PLANAR_16 12
+    INTRA_PRED_PLANAR_16 13
+    INTRA_PRED_PLANAR_16 14
+    INTRA_PRED_PLANAR_16 15
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse2
+%if ARCH_X86_64 == 1
+cglobal intra_pred_planar32, 3,3,16
+    movd            m3, [r2 + 33]               ; topRight   = above[32]
+
+    pxor            m7, m7
+    pand            m3, [pw_00ff]
+    pshuflw         m3, m3, 0x00
+    pshufd          m3, m3, 0x44
+
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
+
+    movd            m11, [r2 + 97]               ; bottomLeft = left[32]
+    pand            m11, [pw_00ff]
+    pshuflw         m11, m11, 0x00
+    pshufd          m11, m11, 0x44
+    mova            m5,  m11
+    paddw           m5,  [pw_32]
+
+    paddw           m0, m5
+    paddw           m1, m5
+    paddw           m2, m5
+    paddw           m3, m5
+    mova            m8, m11
+    mova            m9, m11
+    mova            m10, m11
+    mova            m12, [pw_31]
+    movh            m4, [r2 + 1]
+    punpcklbw       m4, m7
+    psubw           m8, m4
+    pmullw          m4, m12
+    paddw           m0, m4
+
+    movh            m4, [r2 + 9]
+    punpcklbw       m4, m7
+    psubw           m9, m4
+    pmullw          m4, m12
+    paddw           m1, m4
+
+    movh            m4, [r2 + 17]
+    punpcklbw       m4, m7
+    psubw           m10, m4
+    pmullw          m4, m12
+    paddw           m2, m4
+
+    movh            m4, [r2 + 25]
+    punpcklbw       m4, m7
+    psubw           m11, m4
+    pmullw          m4, m12
+    paddw           m3, m4
+    mova            m12, [pw_planar32_mul]
+    mova            m13, [pw_planar32_mul + mmsize]
+    mova            m14, [pw_planar16_mul]
+    mova            m15, [pw_planar16_mul + mmsize]
+%macro PROCESS 1
+    pmullw          m5, %1, m12
+    pmullw          m6, %1, m13
+    paddw           m5, m0
+    paddw           m6, m1
+    psraw           m5, 6
+    psraw           m6, 6
+    packuswb        m5, m6
+    movu            [r0], m5
+
+    pmullw          m5, %1, m14
+    pmullw          %1, m15
+    paddw           m5, m2
+    paddw           %1, m3
+    psraw           m5, 6
+    psraw           %1, 6
+    packuswb        m5, %1
+    movu            [r0 + 16], m5
+%endmacro
+
+%macro INCREMENT 0
+    paddw           m2, m10
+    paddw           m3, m11
+    paddw           m0, m8
+    paddw           m1, m9
+    add             r0, r1
+%endmacro
+
+%assign x 0
+%rep 4
+    pxor            m7, m7
+    movq            m4, [r2 + 65 + x * 8]
+    punpcklbw       m4, m7
+%assign y 0
+%rep 8
+    %if y < 4
+    pshuflw         m7, m4, 0x55 * y
+    pshufd          m7, m7, 0x44
+    %else
+    pshufhw         m7, m4, 0x55 * (y - 4)
+    pshufd          m7, m7, 0xEE
+    %endif
+    PROCESS m7
+    %if x + y < 10
+    INCREMENT
+    %endif
+%assign y y+1
+%endrep
+%assign x x+1
+%endrep
+    RET
+
+%else ;end ARCH_X86_64, start ARCH_X86_32
+cglobal intra_pred_planar32, 3,3,8,0-(4*mmsize)
+    movd            m3, [r2 + 33]               ; topRight   = above[32]
+
+    pxor            m7, m7
+    pand            m3, [pw_00ff]
+    pshuflw         m3, m3, 0x00
+    pshufd          m3, m3, 0x44
+
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
+
+    movd            m6, [r2 + 97]               ; bottomLeft = left[32]
+    pand            m6, [pw_00ff]
+    pshuflw         m6, m6, 0x00
+    pshufd          m6, m6, 0x44
+    mova            m5, m6
+    paddw           m5, [pw_32]
+
+    paddw           m0, m5
+    paddw           m1, m5
+    paddw           m2, m5
+    paddw           m3, m5
+
+    movh            m4, [r2 + 1]
+    punpcklbw       m4, m7
+    psubw           m5, m6, m4
+    mova            [rsp + 0 * mmsize], m5
+    pmullw          m4, [pw_31]
+    paddw           m0, m4
+    movh            m4, [r2 + 9]
+    punpcklbw       m4, m7
+    psubw           m5, m6, m4
+    mova            [rsp + 1 * mmsize], m5
+    pmullw          m4, [pw_31]
+    paddw           m1, m4
+    movh            m4, [r2 + 17]
+    punpcklbw       m4, m7
+    psubw           m5, m6, m4
+    mova            [rsp + 2 * mmsize], m5
+    pmullw          m4, [pw_31]
+    paddw           m2, m4
+    movh            m4, [r2 + 25]
+    punpcklbw       m4, m7
+    psubw           m5, m6, m4
+    mova            [rsp + 3 * mmsize], m5
+    pmullw          m4, [pw_31]
+    paddw           m3, m4
+%macro PROCESS 1
+    pmullw          m5, %1, [pw_planar32_mul]
+    pmullw          m6, %1, [pw_planar32_mul + mmsize]
+    paddw           m5, m0
+    paddw           m6, m1
+    psraw           m5, 6
+    psraw           m6, 6
+    packuswb        m5, m6
+    movu            [r0], m5
+    pmullw          m5, %1, [pw_planar16_mul]
+    pmullw          %1, [pw_planar16_mul + mmsize]
+    paddw           m5, m2
+    paddw           %1, m3
+    psraw           m5, 6
+    psraw           %1, 6
+    packuswb        m5, %1
+    movu            [r0 + 16], m5
+%endmacro
+
+%macro INCREMENT 0
+    paddw           m0, [rsp + 0 * mmsize]
+    paddw           m1, [rsp + 1 * mmsize]
+    paddw           m2, [rsp + 2 * mmsize]
+    paddw           m3, [rsp + 3 * mmsize]
+    add             r0, r1
+%endmacro
+
+%assign y 0
+%rep 4
+    pxor            m7, m7
+    movq            m4, [r2 + 65 + y * 8]
+    punpcklbw       m4, m7
+%assign x 0
+%rep 8
+    %if x < 4
+    pshuflw         m7, m4, 0x55 * x
+    pshufd          m7, m7, 0x44
+    %else
+    pshufhw         m7, m4, 0x55 * (x - 4)
+    pshufd          m7, m7, 0xEE
+    %endif
+
+    PROCESS m7
+    %if x + y < 10
+    INCREMENT
+    %endif
+%assign x x+1
+%endrep
+%assign y y+1
+%endrep
+    RET
+
+%endif ; end ARCH_X86_32
+
+%macro STORE_4x4 0
+    movd        [r0], m0
+    psrldq      m0, 4
+    movd        [r0 + r1], m0
+    psrldq      m0, 4
+    movd        [r0 + r1 * 2], m0
+    lea         r1, [r1 * 3]
+    psrldq      m0, 4
+    movd        [r0 + r1], m0
+%endmacro
+
+%macro TRANSPOSE_4x4 0
+    pshufd      m0, m0, 0xD8
+    pshufd      m1, m2, 0xD8
+    pshuflw     m0, m0, 0xD8
+    pshuflw     m1, m1, 0xD8
+    pshufhw     m0, m0, 0xD8
+    pshufhw     m1, m1, 0xD8
+    mova        m2, m0
+    punpckldq   m0, m1
+    punpckhdq   m2, m1
+    packuswb    m0, m2
+%endmacro
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng4(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal intra_pred_ang4_2, 3,5,1
+    lea         r4, [r2 + 2]
+    add         r2, 10
+    cmp         r3m, byte 34
+    cmove       r2, r4
+
+    movh        m0, [r2]
+    movd        [r0], m0
+    psrldq      m0, 1
+    movd        [r0 + r1], m0
+    psrldq      m0, 1
+    movd        [r0 + r1 * 2], m0
+    lea         r1, [r1 * 3]
+    psrldq      m0, 1
+    movd        [r0 + r1], m0
+    RET
+
+INIT_XMM sse2
+cglobal intra_pred_ang4_3, 3,3,5
+    movh        m3, [r2 + 9]   ; [8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    movh        m0, m3                  ;[x x x x x x x x 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2
+    movh        m1, m3                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m3, 2
+    movh        m2, m3                  ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+    psrldq      m3, 2                   ;[x x x x x x x x 8 7 7 6 6 5 5 4]
+
+    pxor        m4, m4
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 20 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 26 * 16]
+    packssdw    m0, m1
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m4
+    pmaddwd     m3, [pw_ang_table + 8 * 16]
+    punpcklbw   m2, m4
+    pmaddwd     m2, [pw_ang_table + 14 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_4, 3,3,5
+    movh        m1, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m1, m1
+    psrldq      m1, 1
+    movh        m0, m1                  ;[x x x x x x x x 5 4 4 3 3 2 2 1]
+    psrldq      m1, 2
+    movh        m2, m1                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m1, 2                   ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 10 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 21 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 31 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_5, 3,3,5
+    movh        m3, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    mova        m0, m3                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2
+    mova        m2, m3                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m3, 2                   ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+
+    pxor        m1, m1
+    punpcklbw   m2, m1
+    mova        m4, m2
+    pmaddwd     m4, [pw_ang_table + 2 * 16]
+    punpcklbw   m0, m1
+    pmaddwd     m0, [pw_ang_table + 17 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m2, [pw_ang_table + 19 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_6, 3,3,4
+    movh        m2, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m2, m2
+    psrldq      m2, 1
+    movh        m0, m2                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m2, 2                   ;[x x x 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m3, m0
+    pmaddwd     m3, [pw_ang_table + 26 * 16]
+    pmaddwd     m0, [pw_ang_table + 13 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m2, m1
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 7 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_7, 3,3,5
+    movh        m3, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    movh        m0, m3                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2                   ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m4, m0
+    mova        m2, m0
+    pmaddwd     m4, [pw_ang_table + 18 * 16]
+    pmaddwd     m0, [pw_ang_table + 9 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m2, [pw_ang_table + 27 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_8, 3,3,5
+    movh        m0, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m0, m0
+    psrldq      m0, 1                   ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 10 * 16]
+    pmaddwd     m0, [pw_ang_table + 5 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 15 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_9, 3,3,5
+    movh        m0, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m0, m0
+    psrldq      m0, 1                   ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m0, [pw_ang_table + 2 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 8 * 16]
+    pmaddwd     m2, [pw_ang_table + 6 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_10, 3,5,4
+    movd        m0, [r2 + 9]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m0, m0
+    punpcklwd   m0, m0
+    pshufd      m1, m0, 1
+    movhlps     m2, m0
+    pshufd      m3, m0, 3
+    movd        [r0 + r1], m1
+    movd        [r0 + r1 * 2], m2
+    lea         r1, [r1 * 3]
+    movd        [r0 + r1], m3
+    cmp         r4m, byte 0
+    jz          .quit
+
+    ; filter
+    pxor        m3, m3
+    punpcklbw   m0, m3
+    movh        m1, [r2]                ;[4 3 2 1 0]
+    punpcklbw   m1, m3
+    pshuflw     m2, m1, 0x00
+    psrldq      m1, 2
+    psubw       m1, m2
+    psraw       m1, 1
+    paddw       m0, m1
+    packuswb    m0, m0
+
+.quit:
+    movd        [r0], m0
+    RET
+
+cglobal intra_pred_ang4_11, 3,3,5
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    movh        m0, [r2 - 7]            ;[A x x x x x x x]
+    punpcklbw   m1, m1                  ;[4 4 3 3 2 2 1 1]
+    punpcklqdq  m0, m1                  ;[4 4 3 3 2 2 1 1 A x x x x x x x]]
+    psrldq      m0, 7                   ;[x x x x x x x x 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 28 * 16]
+    pmaddwd     m0, [pw_ang_table + 30 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 24 * 16]
+    pmaddwd     m2, [pw_ang_table + 26 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+    cglobal intra_pred_ang4_12, 3,3,5
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    movh        m0, [r2 - 7]            ;[A x x x x x x x]
+    punpcklbw   m1, m1                  ;[4 4 3 3 2 2 1 1]
+    punpcklqdq  m0, m1                  ;[4 4 3 3 2 2 1 1 A x x x x x x x]
+    psrldq      m0, 7                   ;[x x x x x x x x 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 22 * 16]
+    pmaddwd     m0, [pw_ang_table + 27 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 17 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+    cglobal intra_pred_ang4_24, 3,3,5
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    movh        m0, [r2 - 7]            ;[A x x x x x x x]
+    punpcklbw   m1, m1                  ;[4 4 3 3 2 2 1 1]
+    punpcklqdq  m0, m1                  ;[4 4 3 3 2 2 1 1 A x x x x x x x]
+    psrldq      m0, 7                   ;[x x x x x x x x 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 22 * 16]
+    pmaddwd     m0, [pw_ang_table + 27 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 17 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_13, 3,3,5
+    movd        m1, [r2 - 1]            ;[x x A x]
+    movd        m2, [r2 + 9]           ;[4 3 2 1]
+    movd        m0, [r2 + 3]            ;[x x B x]
+    punpcklbw   m0, m1                  ;[x x x x A B x x]
+    punpckldq   m0, m2                  ;[4 3 2 1 A B x x]
+    psrldq      m0, 2                   ;[x x 4 3 2 1 A B]
+    punpcklbw   m0, m0
+    psrldq      m0, 1
+    movh        m3, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m4, m0
+    mova        m2, m0
+    pmaddwd     m4, [pw_ang_table + 14 * 16]
+    pmaddwd     m0, [pw_ang_table + 23 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 28 * 16]
+    pmaddwd     m2, [pw_ang_table + 5 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_14, 3,3,4
+    movd        m1, [r2 - 1]            ;[x x A x]
+    movd        m0, [r2 + 1]            ;[x x B x]
+    punpcklbw   m0, m1                  ;[A B x x]
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B x x]
+    psrldq      m0, 2                   ;[x x 4 3 2 1 A B]
+    punpcklbw   m0, m0                  ;[x x x x 4 4 3 3 2 2 1 1 A A B B]
+    psrldq      m0, 1
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m3, m0
+    pmaddwd     m3, [pw_ang_table + 6 * 16]
+    pmaddwd     m0, [pw_ang_table + 19 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m2, m1
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 25 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_15, 3,3,5
+    movd        m0, [r2]                ;[x x x A]
+    movd        m1, [r2 + 2]            ;[x x x B]
+    punpcklbw   m1, m0                  ;[x x A B]
+    movd        m0, [r2 + 3]            ;[x x C x]
+    punpcklwd   m0, m1                  ;[A B C x]
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C x]
+    psrldq      m0, 1                   ;[x 4 3 2 1 A B C]
+    punpcklbw   m0, m0                  ;[x x 4 4 3 3 2 2 1 1 A A B B C C]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 30 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 15 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 28 * 16]
+    pmaddwd     m2, [pw_ang_table + 13 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_16, 3,3,5
+    movd        m2, [r2]                ;[x x x A]
+    movd        m1, [r2 + 2]            ;[x x x B]
+    punpcklbw   m1, m2                  ;[x x A B]
+    movd        m0, [r2 + 2]            ;[x x C x]
+    punpcklwd   m0, m1                  ;[A B C x]
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C x]
+    psrldq      m0, 1                   ;[x 4 3 2 1 A B C]
+    punpcklbw   m0, m0                  ;[x x 4 4 3 3 2 2 1 1 A A B B C C]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 22 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 11 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 1 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_17, 3,3,5
+    movd        m2, [r2]                ;[x x x A]
+    movd        m3, [r2 + 1]            ;[x x x B]
+    movd        m4, [r2 + 2]            ;[x x x C]
+    movd        m0, [r2 + 4]            ;[x x x D]
+    punpcklbw   m3, m2                  ;[x x A B]
+    punpcklbw   m0, m4                  ;[x x C D]
+    punpcklwd   m0, m3                  ;[A B C D]
+    movd        m1, [r2 + 9]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C D]
+    punpcklbw   m0, m0                  ;[4 4 3 3 2 2 1 1 A A B B C C D D]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x 4 4 3 3 2 2 1 1 A A B B C C D]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m3, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m3, m4
+    pmaddwd     m3, [pw_ang_table + 12 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 6 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 24 * 16]
+    punpcklbw   m2, m4
+    pmaddwd     m2, [pw_ang_table + 18 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+
+    TRANSPOSE_4x4
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_18, 3,4,2
+    mov         r3d, [r2 + 8]
+    mov         r3b, byte [r2]
+    bswap       r3d
+    movd        m0, r3d
+
+    movd        m1, [r2 + 1]
+    punpckldq   m0, m1
+    lea         r3, [r1 * 3]
+    movd        [r0 + r3], m0
+    psrldq      m0, 1
+    movd        [r0 + r1 * 2], m0
+    psrldq      m0, 1
+    movd        [r0 + r1], m0
+    psrldq      m0, 1
+    movd        [r0], m0
+    RET
+
+cglobal intra_pred_ang4_19, 3,3,5
+    movd        m2, [r2]                ;[x x x A]
+    movd        m3, [r2 + 9]            ;[x x x B]
+    movd        m4, [r2 + 10]           ;[x x x C]
+    movd        m0, [r2 + 12]           ;[x x x D]
+    punpcklbw   m3, m2                  ;[x x A B]
+    punpcklbw   m0, m4                  ;[x x C D]
+    punpcklwd   m0, m3                  ;[A B C D]
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C D]
+    punpcklbw   m0, m0                  ;[4 4 3 3 2 2 1 1 A A B B C C D D]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x 4 4 3 3 2 2 1 1 A A B B C C D]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m3, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m3, m4
+    pmaddwd     m3, [pw_ang_table + 12 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 6 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 24 * 16]
+    punpcklbw   m2, m4
+    pmaddwd     m2, [pw_ang_table + 18 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_20, 3,3,5
+    movd        m2, [r2]                ;[x x x A]
+    movd        m1, [r2 + 10]           ;[x x x B]
+    punpcklbw   m1, m2                  ;[x x A B]
+    movd        m0, [r2 + 10]           ;[x x C x]
+    punpcklwd   m0, m1                  ;[A B C x]
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C x]
+    psrldq      m0, 1                   ;[x 4 3 2 1 A B C]
+    punpcklbw   m0, m0                  ;[x x 4 4 3 3 2 2 1 1 A A B B C C]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 22 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 11 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 1 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_21, 3,3,5
+    movd        m0, [r2]                ;[x x x A]
+    movd        m1, [r2 + 10]           ;[x x x B]
+    punpcklbw   m1, m0                  ;[x x A B]
+    movd        m0, [r2 + 11]           ;[x x C x]
+    punpcklwd   m0, m1                  ;[A B C x]
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B C x]
+    psrldq      m0, 1                   ;[x 4 3 2 1 A B C]
+    punpcklbw   m0, m0                  ;[x x 4 4 3 3 2 2 1 1 A A B B C C]
+    psrldq      m0, 1
+    movh        m1, m0                  ;[x x x 4 4 3 3 2 2 1 1 A A B B C]
+    psrldq      m0, 2
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 30 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 15 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 28 * 16]
+    pmaddwd     m2, [pw_ang_table + 13 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_22, 3,3,4
+    movd        m1, [r2 - 1]            ;[x x A x]
+    movd        m0, [r2 + 9]            ;[x x B x]
+    punpcklbw   m0, m1                  ;[A B x x]
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    punpckldq   m0, m1                  ;[4 3 2 1 A B x x]
+    psrldq      m0, 2                   ;[x x 4 3 2 1 A B]
+    punpcklbw   m0, m0                  ;[x x x x 4 4 3 3 2 2 1 1 A A B B]
+    psrldq      m0, 1
+    movh        m2, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m3, m0
+    pmaddwd     m3, [pw_ang_table + 6 * 16]
+    pmaddwd     m0, [pw_ang_table + 19 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m2, m1
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 12 * 16]
+    pmaddwd     m2, [pw_ang_table + 25 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_23, 3,3,5
+    movd        m1, [r2 - 1]            ;[x x A x]
+    movd        m2, [r2 + 1]            ;[4 3 2 1]
+    movd        m0, [r2 + 11]           ;[x x B x]
+    punpcklbw   m0, m1                  ;[x x x x A B x x]
+    punpckldq   m0, m2                  ;[4 3 2 1 A B x x]
+    psrldq      m0, 2                   ;[x x 4 3 2 1 A B]
+    punpcklbw   m0, m0
+    psrldq      m0, 1
+    mova        m3, m0                  ;[x x x x x 4 4 3 3 2 2 1 1 A A B]
+    psrldq      m0, 2                   ;[x x x x x x x 4 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m4, m0
+    mova        m2, m0
+    pmaddwd     m4, [pw_ang_table + 14 * 16]
+    pmaddwd     m0, [pw_ang_table + 23 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 28 * 16]
+    pmaddwd     m2, [pw_ang_table + 5 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_25, 3,3,5
+    movd        m1, [r2 + 1]            ;[4 3 2 1]
+    movh        m0, [r2 - 7]            ;[A x x x x x x x]
+    punpcklbw   m1, m1                  ;[4 4 3 3 2 2 1 1]
+    punpcklqdq  m0, m1                  ;[4 4 3 3 2 2 1 1 A x x x x x x x]
+    psrldq      m0, 7                   ;[x x x x x x x x 4 3 3 2 2 1 1 A]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 28 * 16]
+    pmaddwd     m0, [pw_ang_table + 30 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 24 * 16]
+    pmaddwd     m2, [pw_ang_table + 26 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_26, 3,4,4
+    movd        m0, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+
+    ; store
+    movd        [r0], m0
+    movd        [r0 + r1], m0
+    movd        [r0 + r1 * 2], m0
+    lea         r3, [r1 * 3]
+    movd        [r0 + r3], m0
+
+    ; filter
+    cmp         r4m, byte 0
+    jz         .quit
+
+    pxor        m3, m3
+    punpcklbw   m0, m3
+    pshuflw     m0, m0, 0x00
+    movd        m2, [r2]
+    punpcklbw   m2, m3
+    pshuflw     m2, m2, 0x00
+    movd        m1, [r2 + 9]
+    punpcklbw   m1, m3
+    psubw       m1, m2
+    psraw       m1, 1
+    paddw       m0, m1
+    packuswb    m0, m0
+
+    movd        r2, m0
+    mov         [r0], r2b
+    shr         r2, 8
+    mov         [r0 + r1], r2b
+    shr         r2, 8
+    mov         [r0 + r1 * 2], r2b
+    shr         r2, 8
+    mov         [r0 + r3], r2b
+
+.quit:
+    RET
+
+cglobal intra_pred_ang4_27, 3,3,5
+    movh        m0, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m0, m0
+    psrldq      m0, 1                   ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m0, [pw_ang_table + 2 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 8 * 16]
+    pmaddwd     m2, [pw_ang_table + 6 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_28, 3,3,5
+    movh        m0, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m0, m0
+    psrldq      m0, 1                   ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m2, m0
+    mova        m3, m0
+    mova        m4, m2
+    pmaddwd     m3, [pw_ang_table + 10 * 16]
+    pmaddwd     m0, [pw_ang_table + 5 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    pmaddwd     m4, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 15 * 16]
+    packssdw    m2, m4
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_29, 3,3,5
+    movh        m3, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    movh        m0, m3                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2                   ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m4, m0
+    mova        m2, m0
+    pmaddwd     m4, [pw_ang_table + 18 * 16]
+    pmaddwd     m0, [pw_ang_table + 9 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m2, [pw_ang_table + 27 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_30, 3,3,4
+    movh        m2, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m2, m2
+    psrldq      m2, 1
+    movh        m0, m2                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m2, 2                   ;[x x x 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pxor        m1, m1
+    punpcklbw   m0, m1
+    mova        m3, m0
+    pmaddwd     m3, [pw_ang_table + 26 * 16]
+    pmaddwd     m0, [pw_ang_table + 13 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m2, m1
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 7 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_31, 3,3,5
+    movh        m3, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    mova        m0, m3                  ;[x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2
+    mova        m2, m3                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m3, 2                   ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+
+    pxor        m1, m1
+    punpcklbw   m2, m1
+    mova        m4, m2
+    pmaddwd     m4, [pw_ang_table + 2 * 16]
+    punpcklbw   m0, m1
+    pmaddwd     m0, [pw_ang_table + 17 * 16]
+    packssdw    m0, m4
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m1
+    pmaddwd     m3, [pw_ang_table + 4 * 16]
+    pmaddwd     m2, [pw_ang_table + 19 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_32, 3,3,5
+    movh        m1, [r2 + 1]            ;[8 7 6 5 4 3 2 1]
+    punpcklbw   m1, m1
+    psrldq      m1, 1
+    movh        m0, m1                  ;[x x x x x x x x 5 4 4 3 3 2 2 1]
+    psrldq      m1, 2
+    movh        m2, m1                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m1, 2                   ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+
+    pxor        m4, m4
+    punpcklbw   m2, m4
+    mova        m3, m2
+    pmaddwd     m3, [pw_ang_table + 10 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 21 * 16]
+    packssdw    m0, m3
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 20 * 16]
+    pmaddwd     m2, [pw_ang_table + 31 * 16]
+    packssdw    m2, m1
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+cglobal intra_pred_ang4_33, 3,3,5
+    movh        m3, [r2 + 1]   ; [8 7 6 5 4 3 2 1]
+    punpcklbw   m3, m3
+    psrldq      m3, 1
+    movh        m0, m3                  ;[x x x x x x x x 5 4 4 3 3 2 2 1]
+    psrldq      m3, 2
+    movh        m1, m3                  ;[x x x x x x x x 6 5 5 4 4 3 3 2]
+    psrldq      m3, 2
+    movh        m2, m3                  ;[x x x x x x x x 7 6 6 5 5 4 4 3]
+    psrldq      m3, 2                   ;[x x x x x x x x 8 7 7 6 6 5 5 4]
+
+    pxor        m4, m4
+    punpcklbw   m1, m4
+    pmaddwd     m1, [pw_ang_table + 20 * 16]
+    punpcklbw   m0, m4
+    pmaddwd     m0, [pw_ang_table + 26 * 16]
+    packssdw    m0, m1
+    paddw       m0, [pw_16]
+    psraw       m0, 5
+    punpcklbw   m3, m4
+    pmaddwd     m3, [pw_ang_table + 8 * 16]
+    punpcklbw   m2, m4
+    pmaddwd     m2, [pw_ang_table + 14 * 16]
+    packssdw    m2, m3
+    paddw       m2, [pw_16]
+    psraw       m2, 5
+    packuswb    m0, m2
+
+    STORE_4x4
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc4, 5,5,3
+    inc         r2
+    pxor        m0, m0
+    movd        m1, [r2]
+    movd        m2, [r2 + 8]
+    punpckldq   m1, m2
+    psadbw      m1, m0              ; m1 = sum
+
+    test        r4d, r4d
+
+    pmulhrsw    m1, [pw_4096]       ; m1 = (sum + 4) / 8
+    movd        r4d, m1             ; r4d = dc_val
+    pshufb      m1, m0              ; m1 = byte [dc_val ...]
+
+    ; store DC 4x4
+    lea         r3, [r1 * 3]
+    movd        [r0], m1
+    movd        [r0 + r1], m1
+    movd        [r0 + r1 * 2], m1
+    movd        [r0 + r3], m1
+
+    ; do DC filter
+    jz         .end
+    lea         r3d, [r4d * 2 + 2]  ; r3d = DC * 2 + 2
+    add         r4d, r3d            ; r4d = DC * 3 + 2
+    movd        m1, r4d
+    pshuflw     m1, m1, 0           ; m1 = pixDCx3
+    pshufd      m1, m1, 0
+
+    ; filter top
+    movd        m2, [r2]
+    movd        m0, [r2 + 9]
+    punpckldq   m2, m0
+    pmovzxbw    m2, m2
+    paddw       m2, m1
+    psraw       m2, 2
+    packuswb    m2, m2
+    movd        [r0], m2            ; overwrite top-left pixel, we will update it later
+
+    ; filter top-left
+    movzx       r4d, byte [r2 + 8]
+    add         r3d, r4d
+    movzx       r4d, byte [r2]
+    add         r3d, r4d
+    shr         r3d, 2
+    mov         [r0], r3b
+
+    ; filter left
+    add         r0, r1
+    pextrb      [r0], m2, 4
+    pextrb      [r0 + r1], m2, 5
+    pextrb      [r0 + r1 * 2], m2, 6
+
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc8, 5, 7, 3
+    lea             r3, [r2 + 17]
+    inc             r2
+    pxor            m0,            m0
+    movh            m1,            [r2]
+    movh            m2,            [r3]
+    punpcklqdq      m1,            m2
+    psadbw          m1,            m0
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    movd            r5d,           m1
+    add             r5d,           8
+    shr             r5d,           4     ; sum = sum / 16
+    movd            m1,            r5d
+    pshufb          m1,            m0    ; m1 = byte [dc_val ...]
+
+    test            r4d,           r4d
+
+    ; store DC 8x8
+    mov             r6,            r0
+    movh            [r0],          m1
+    movh            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movh            [r0],          m1
+    movh            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movh            [r0],          m1
+    movh            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movh            [r0],          m1
+    movh            [r0 + r1],     m1
+
+    ; Do DC Filter
+    jz              .end
+    lea             r4d,           [r5d * 2 + 2]  ; r4d = DC * 2 + 2
+    add             r5d,           r4d            ; r5d = DC * 3 + 2
+    movd            m1,            r5d
+    pshuflw         m1,            m1, 0          ; m1 = pixDCx3
+    pshufd          m1,            m1, 0
+
+    ; filter top
+    pmovzxbw        m2,            [r2]
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    movh            [r6],          m2
+
+    ; filter top-left
+    movzx           r5d, byte      [r3]
+    add             r4d,           r5d
+    movzx           r3d, byte      [r2]
+    add             r3d,           r4d
+    shr             r3d,           2
+    mov             [r6],          r3b
+
+    ; filter left
+    add             r6,            r1
+    pmovzxbw        m2,            [r2 + 17]
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    pextrb          [r6],          m2, 0
+    pextrb          [r6 + r1],     m2, 1
+    pextrb          [r6 + 2 * r1], m2, 2
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m2, 3
+    pextrb          [r6 + r1 * 2], m2, 4
+    pextrb          [r6 + r1 * 4], m2, 6
+    lea             r1,            [r1 * 3]
+    pextrb          [r6 + r1],     m2, 5
+
+.end:
+    RET
+
+;--------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;--------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc16, 5, 7, 4
+    lea             r3, [r2 + 33]
+    inc             r2
+    pxor            m0,            m0
+    movu            m1,            [r2]
+    movu            m2,            [r3]
+    psadbw          m1,            m0
+    psadbw          m2,            m0
+    paddw           m1,            m2
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    movd            r5d,           m1
+    add             r5d,           16
+    shr             r5d,           5     ; sum = sum / 32
+    movd            m1,            r5d
+    pshufb          m1,            m0    ; m1 = byte [dc_val ...]
+
+    test            r4d,           r4d
+
+    ; store DC 16x16
+    mov             r6,            r0
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    lea             r0,            [r0 + r1 * 2]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+
+    ; Do DC Filter
+    jz              .end
+    lea             r4d,           [r5d * 2 + 2]  ; r4d = DC * 2 + 2
+    add             r5d,           r4d            ; r5d = DC * 3 + 2
+    movd            m1,            r5d
+    pshuflw         m1,            m1, 0          ; m1 = pixDCx3
+    pshufd          m1,            m1, 0
+
+    ; filter top
+    pmovzxbw        m2,            [r2]
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    movh            [r6],          m2
+    pmovzxbw        m3,            [r2 + 8]
+    paddw           m3,            m1
+    psraw           m3,            2
+    packuswb        m3,            m3
+    movh            [r6 + 8],      m3
+
+    ; filter top-left
+    movzx           r5d, byte      [r3]
+    add             r4d,           r5d
+    movzx           r3d, byte      [r2]
+    add             r3d,           r4d
+    shr             r3d,           2
+    mov             [r6],          r3b
+
+    ; filter left
+    add             r6,            r1
+    pmovzxbw        m2,            [r2 + 33]
+    paddw           m2,            m1
+    psraw           m2,            2
+    packuswb        m2,            m2
+    pextrb          [r6],          m2, 0
+    pextrb          [r6 + r1],     m2, 1
+    pextrb          [r6 + r1 * 2], m2, 2
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m2, 3
+    pextrb          [r6 + r1 * 2], m2, 4
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m2, 5
+    pextrb          [r6 + r1 * 2], m2, 6
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m2, 7
+
+    pmovzxbw        m3,            [r2 + 41]
+    paddw           m3,            m1
+    psraw           m3,            2
+    packuswb        m3,            m3
+    pextrb          [r6 + r1 * 2], m3, 0
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m3, 1
+    pextrb          [r6 + r1 * 2], m3, 2
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m3, 3
+    pextrb          [r6 + r1 * 2], m3, 4
+    lea             r6,            [r6 + r1 * 2]
+    pextrb          [r6 + r1],     m3, 5
+    pextrb          [r6 + r1 * 2], m3, 6
+
+.end:
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_dc32, 3, 5, 5
+    lea             r3, [r2 + 65]
+    inc             r2
+    pxor            m0,            m0
+    movu            m1,            [r2]
+    movu            m2,            [r2 + 16]
+    movu            m3,            [r3]
+    movu            m4,            [r3 + 16]
+    psadbw          m1,            m0
+    psadbw          m2,            m0
+    psadbw          m3,            m0
+    psadbw          m4,            m0
+    paddw           m1,            m2
+    paddw           m3,            m4
+    paddw           m1,            m3
+    pshufd          m2,            m1, 2
+    paddw           m1,            m2
+
+    movd            r4d,           m1
+    add             r4d,           32
+    shr             r4d,           6     ; sum = sum / 64
+    movd            m1,            r4d
+    pshufb          m1,            m0    ; m1 = byte [dc_val ...]
+
+%rep 2
+    ; store DC 16x16
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+    movu            [r0],          m1
+    movu            [r0 + r1],     m1
+    movu            [r0 + 16],     m1
+    movu            [r0 + r1 + 16],m1
+    lea             r0,            [r0 + 2 * r1]
+%endrep
+
+    RET
+
+;---------------------------------------------------------------------------------------------
+; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
+;---------------------------------------------------------------------------------------------
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal intra_pred_dc32, 3, 4, 3
+    lea             r3, [r1 * 3]
+    pxor            m0, m0
+    movu            m1, [r2 + 1]
+    movu            m2, [r2 + 65]
+    psadbw          m1, m0
+    psadbw          m2, m0
+    paddw           m1, m2
+    vextracti128    xm2, m1, 1
+    paddw           m1, m2
+    pshufd          m2, m1, 2
+    paddw           m1, m2
+
+    pmulhrsw        m1, [pw_512]    ; sum = (sum + 32) / 64
+    vpbroadcastb    m1, xm1         ; m1 = byte [dc_val ...]
+
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    lea             r0, [r0 + 4 * r1]
+    movu            [r0 + r1 * 0], m1
+    movu            [r0 + r1 * 1], m1
+    movu            [r0 + r1 * 2], m1
+    movu            [r0 + r3 * 1], m1
+    RET
+%endif ;; ARCH_X86_64 == 1
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar4, 3,3,7
+    pmovzxbw        m1, [r2 + 1]
+    pmovzxbw        m2, [r2 + 9]
+    pshufhw         m3, m1, 0               ; topRight
+    pshufd          m3, m3, 0xAA
+    pshufhw         m4, m2, 0               ; bottomLeft
+    pshufd          m4, m4, 0xAA
+    pmullw          m3, [multi_2Row]        ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_3]          ; (blkSize - 1 - y) * above[x]
+    mova            m6, [pw_planar4_0]
+    paddw           m3, [pw_4]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+    pshuflw         m5, m2, 0
+    pmullw          m5, m6
+    paddw           m5, m3
+    paddw           m3, m4
+    psraw           m5, 3
+    packuswb        m5, m5
+    movd            [r0], m5
+
+    pshuflw         m5, m2, 01010101b
+    pmullw          m5, m6
+    paddw           m5, m3
+    paddw           m3, m4
+    psraw           m5, 3
+    packuswb        m5, m5
+    movd            [r0 + r1], m5
+    lea             r0, [r0 + 2 * r1]
+
+    pshuflw         m5, m2, 10101010b
+    pmullw          m5, m6
+    paddw           m5, m3
+    paddw           m3, m4
+    psraw           m5, 3
+    packuswb        m5, m5
+    movd            [r0], m5
+
+    pshuflw         m5, m2, 11111111b
+    pmullw          m5, m6
+    paddw           m5, m3
+    paddw           m3, m4
+    psraw           m5, 3
+    packuswb        m5, m5
+    movd            [r0 + r1], m5
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar8, 3,3,7
+    pmovzxbw        m1, [r2 + 1]
+    pmovzxbw        m2, [r2 + 17]
+
+    movd            m3, [r2 + 9]            ; topRight   = above[8];
+    movd            m4, [r2 + 25]           ; bottomLeft = left[8];
+
+    pxor            m0, m0
+    pshufb          m3, m0
+    pshufb          m4, m0
+    punpcklbw       m3, m0                  ; v_topRight
+    punpcklbw       m4, m0                  ; v_bottomLeft
+    pmullw          m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m0, m1, [pw_7]          ; (blkSize - 1 - y) * above[x]
+    mova            m6, [pw_planar16_mul + mmsize]
+    paddw           m3, [pw_8]
+    paddw           m3, m4
+    paddw           m3, m0
+    psubw           m4, m1
+
+%macro INTRA_PRED_PLANAR8 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%endif
+    pmullw          m5, m6
+    paddw           m5, m3
+    paddw           m3, m4
+    psraw           m5, 4
+    packuswb        m5, m5
+    movh            [r0], m5
+    lea             r0, [r0 + r1]
+%endmacro
+
+    INTRA_PRED_PLANAR8 0
+    INTRA_PRED_PLANAR8 1
+    INTRA_PRED_PLANAR8 2
+    INTRA_PRED_PLANAR8 3
+    INTRA_PRED_PLANAR8 4
+    INTRA_PRED_PLANAR8 5
+    INTRA_PRED_PLANAR8 6
+    INTRA_PRED_PLANAR8 7
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_planar16, 3,3,8
+    pmovzxbw        m2, [r2 + 1]
+    pmovzxbw        m7, [r2 + 9]
+
+    movd            m3, [r2 + 17]               ; topRight   = above[16]
+    movd            m6, [r2 + 49]               ; bottomLeft = left[16]
+
+    pxor            m0, m0
+    pshufb          m3, m0
+    pshufb          m6, m0
+    punpcklbw       m3, m0                      ; v_topRight
+    punpcklbw       m6, m0                      ; v_bottomLeft
+    pmullw          m4, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    pmullw          m5, m7, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m4, [pw_16]
+    paddw           m3, [pw_16]
+    paddw           m4, m6
+    paddw           m3, m6
+    paddw           m4, m5
+    paddw           m3, m1
+    psubw           m1, m6, m7
+    psubw           m6, m2
+
+    pmovzxbw        m2, [r2 + 33]
+    pmovzxbw        m7, [r2 + 41]
+
+%macro INTRA_PRED_PLANAR16 1
+%if (%1 < 4)
+    pshuflw         m5, m2, 0x55 * %1
+    pshufd          m5, m5, 0
+%else
+%if (%1 < 8)
+    pshufhw         m5, m2, 0x55 * (%1 - 4)
+    pshufd          m5, m5, 0xAA
+%else
+%if (%1 < 12)
+    pshuflw         m5, m7, 0x55 * (%1 - 8)
+    pshufd          m5, m5, 0
+%else
+    pshufhw         m5, m7, 0x55 * (%1 - 12)
+    pshufd          m5, m5, 0xAA
+%endif
+%endif
+%endif
+    pmullw          m0, m5, [pw_planar16_mul + mmsize]
+    pmullw          m5, [pw_planar16_mul]
+    paddw           m0, m4
+    paddw           m5, m3
+    paddw           m3, m6
+    paddw           m4, m1
+    psraw           m5, 5
+    psraw           m0, 5
+    packuswb        m5, m0
+    movu            [r0], m5
+    lea             r0, [r0 + r1]
+%endmacro
+
+    INTRA_PRED_PLANAR16 0
+    INTRA_PRED_PLANAR16 1
+    INTRA_PRED_PLANAR16 2
+    INTRA_PRED_PLANAR16 3
+    INTRA_PRED_PLANAR16 4
+    INTRA_PRED_PLANAR16 5
+    INTRA_PRED_PLANAR16 6
+    INTRA_PRED_PLANAR16 7
+    INTRA_PRED_PLANAR16 8
+    INTRA_PRED_PLANAR16 9
+    INTRA_PRED_PLANAR16 10
+    INTRA_PRED_PLANAR16 11
+    INTRA_PRED_PLANAR16 12
+    INTRA_PRED_PLANAR16 13
+    INTRA_PRED_PLANAR16 14
+    INTRA_PRED_PLANAR16 15
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_planar16, 3,3,6
+    vpbroadcastw    m3, [r2 + 17]
+    mova            m5, [pw_00ff]
+    vpbroadcastw    m4, [r2 + 49]
+    mova            m0, [pw_planar16_mul]
+    pmovzxbw        m2, [r2 + 1]
+    pand            m3, m5                      ; v_topRight
+    pand            m4, m5                      ; v_bottomLeft
+
+    pmullw          m3, [multiL]                ; (x + 1) * topRight
+    pmullw          m1, m2, [pw_15]             ; (blkSize - 1 - y) * above[x]
+    paddw           m3, [pw_16]
+    paddw           m3, m4
+    paddw           m3, m1
+    psubw           m4, m2
+    add             r2, 33
+
+%macro INTRA_PRED_PLANAR16_AVX2 1
+    vpbroadcastw    m1, [r2 + %1]
+    vpsrlw          m2, m1, 8
+    pand            m1, m5
+
+    pmullw          m1, m0
+    pmullw          m2, m0
+    paddw           m1, m3
+    paddw           m3, m4
+    psraw           m1, 5
+    paddw           m2, m3
+    psraw           m2, 5
+    paddw           m3, m4
+    packuswb        m1, m2
+    vpermq          m1, m1, 11011000b
+    movu            [r0], xm1
+    vextracti128    [r0 + r1], m1, 1
+    lea             r0, [r0 + r1 * 2]
+%endmacro
+    INTRA_PRED_PLANAR16_AVX2 0
+    INTRA_PRED_PLANAR16_AVX2 2
+    INTRA_PRED_PLANAR16_AVX2 4
+    INTRA_PRED_PLANAR16_AVX2 6
+    INTRA_PRED_PLANAR16_AVX2 8
+    INTRA_PRED_PLANAR16_AVX2 10
+    INTRA_PRED_PLANAR16_AVX2 12
+    INTRA_PRED_PLANAR16_AVX2 14
+%undef INTRA_PRED_PLANAR16_AVX2
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+INIT_XMM sse4
+%if ARCH_X86_64 == 1
+cglobal intra_pred_planar32, 3,4,12
+%else
+cglobal intra_pred_planar32, 3,4,8,0-(4*mmsize)
+  %define           m8  [rsp + 0 * mmsize]
+  %define           m9  [rsp + 1 * mmsize]
+  %define           m10 [rsp + 2 * mmsize]
+  %define           m11 [rsp + 3 * mmsize]
+%endif
+    movd            m3, [r2 + 33]               ; topRight   = above[32]
+
+    pxor            m7, m7
+    pshufb          m3, m7
+    punpcklbw       m3, m7                      ; v_topRight
+
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
+
+    movd            m6, [r2 + 97]               ; bottomLeft = left[32]
+    pshufb          m6, m7
+    punpcklbw       m6, m7                      ; v_bottomLeft
+
+    paddw           m0, m6
+    paddw           m1, m6
+    paddw           m2, m6
+    paddw           m3, m6
+    paddw           m0, [pw_32]
+    paddw           m1, [pw_32]
+    paddw           m2, [pw_32]
+    paddw           m3, [pw_32]
+    pmovzxbw        m4, [r2 + 1]
+    pmullw          m5, m4, [pw_31]
+    paddw           m0, m5
+    psubw           m5, m6, m4
+    mova            m8, m5
+    pmovzxbw        m4, [r2 + 9]
+    pmullw          m5, m4, [pw_31]
+    paddw           m1, m5
+    psubw           m5, m6, m4
+    mova            m9, m5
+    pmovzxbw        m4, [r2 + 17]
+    pmullw          m5, m4, [pw_31]
+    paddw           m2, m5
+    psubw           m5, m6, m4
+    mova            m10, m5
+    pmovzxbw        m4, [r2 + 25]
+    pmullw          m5, m4, [pw_31]
+    paddw           m3, m5
+    psubw           m5, m6, m4
+    mova            m11, m5
+    add             r2, 65                      ; (2 * blkSize + 1)
+
+%macro INTRA_PRED_PLANAR32 0
+    movd            m4, [r2]
+    pshufb          m4, m7
+    punpcklbw       m4, m7
+    pmullw          m5, m4, [pw_planar32_mul]
+    pmullw          m6, m4, [pw_planar32_mul + mmsize]
+    paddw           m5, m0
+    paddw           m6, m1
+    paddw           m0, m8
+    paddw           m1, m9
+    psraw           m5, 6
+    psraw           m6, 6
+    packuswb        m5, m6
+    movu            [r0], m5
+    pmullw          m5, m4, [pw_planar16_mul]
+    pmullw          m4, [pw_planar16_mul + mmsize]
+    paddw           m5, m2
+    paddw           m4, m3
+    paddw           m2, m10
+    paddw           m3, m11
+    psraw           m5, 6
+    psraw           m4, 6
+    packuswb        m5, m4
+    movu            [r0 + 16], m5
+
+    lea             r0, [r0 + r1]
+    inc             r2
+%endmacro
+
+    mov             r3, 4
+.loop:
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    INTRA_PRED_PLANAR32
+    dec             r3
+    jnz             .loop
+    RET
+
+;---------------------------------------------------------------------------------------
+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
+;---------------------------------------------------------------------------------------
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal intra_pred_planar32, 3,4,11
+    mova            m6, [pw_00ff]
+    vpbroadcastw    m3, [r2 + 33]               ; topRight   = above[32]
+    vpbroadcastw    m2, [r2 + 97]               ; bottomLeft = left[32]
+    pand            m3, m6
+    pand            m2, m6
+
+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
+    pmullw          m3, [multiH2]               ; (x + 1) * topRight
+
+    paddw           m0, m2
+    paddw           m3, m2
+    paddw           m0, [pw_32]
+    paddw           m3, [pw_32]
+
+    pmovzxbw        m4, [r2 + 1]
+    pmovzxbw        m1, [r2 + 17]
+    pmullw          m5, m4, [pw_31]
+    paddw           m0, m5
+    psubw           m5, m2, m4
+    psubw           m2, m1
+    pmullw          m1, [pw_31]
+    paddw           m3, m1
+    mova            m1, m5
+
+    add             r2, 65                      ; (2 * blkSize + 1)
+    mova            m9, [pw_planar32_mul]
+    mova            m10, [pw_planar16_mul]
+
+%macro INTRA_PRED_PLANAR32_AVX2 0
+    vpbroadcastw    m4, [r2]
+    vpsrlw          m7, m4, 8
+    pand            m4, m6
+
+    pmullw          m5, m4, m9
+    pmullw          m4, m4, m10
+    paddw           m5, m0
+    paddw           m4, m3
+    paddw           m0, m1
+    paddw           m3, m2
+    psraw           m5, 6
+    psraw           m4, 6
+    packuswb        m5, m4
+    pmullw          m8, m7, m9
+    pmullw          m7, m7, m10
+    vpermq          m5, m5, 11011000b
+    paddw           m8, m0
+    paddw           m7, m3
+    paddw           m0, m1
+    paddw           m3, m2
+    psraw           m8, 6
+    psraw           m7, 6
+    packuswb        m8, m7
+    add             r2, 2
+    vpermq          m8, m8, 11011000b
+
+    movu            [r0], m5
+    movu            [r0 + r1], m8
+    lea             r0, [r0 + r1 * 2]
+%endmacro
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+    INTRA_PRED_PLANAR32_AVX2
+%undef INTRA_PRED_PLANAR32_AVX2
+    RET
+%endif ;; ARCH_X86_64 == 1
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng4(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang4_2, 3,5,3
+    lea         r4, [r2 + 2]
+    add         r2, 10
+    cmp         r3m, byte 34
+    cmove       r2, r4
+
+    movh        m0, [r2]
+    movd        [r0], m0
+    palignr     m1, m0, 1
+    movd        [r0 + r1], m1
+    palignr     m2, m0, 2
+    movd        [r0 + r1 * 2], m2
+    lea         r1, [r1 * 3]
+    psrldq      m0, 3
+    movd        [r0 + r1], m0
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang4_3, 3,5,5
+    mov         r4, 1
+    cmp         r3m, byte 33
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]   ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1, m0, 2       ; [x x x x x x x x 6 5 5 4 4 3 3 2]
+    palignr     m2, m0, 4       ; [x x x x x x x x 7 6 6 5 5 4 4 3]
+    palignr     m3, m0, 6       ; [x x x x x x x x 8 7 7 6 6 5 5 4]
+    punpcklqdq  m0, m1
+    punpcklqdq  m2, m3
+
+    lea         r3, [ang_table + 20 * 16]
+    movh        m3, [r3 + 6 * 16]   ; [26]
+    movhps      m3, [r3]            ; [20]
+    movh        m4, [r3 - 6 * 16]   ; [14]
+    movhps      m4, [r3 - 12 * 16]  ; [ 8]
+    jmp        .do_filter4x4
+
+    ; NOTE: share path, input is m0=[1 0], m2=[3 2], m3,m4=coef, flag_z=no_transpose
+ALIGN 16
+.do_filter4x4:
+    mova        m1, [pw_1024]
+
+    pmaddubsw   m0, m3
+    pmulhrsw    m0, m1
+    pmaddubsw   m2, m4
+    pmulhrsw    m2, m1
+    packuswb    m0, m2
+
+    ; NOTE: mode 33 doesn't reorde, UNSAFE but I don't use any instruction that affect eflag register before
+    jz         .store
+
+    ; transpose 4x4
+    pshufb      m0, [c_trans_4x4]
+
+.store:
+    ; TODO: use pextrd here after intrinsic ssse3 removed
+    movd        [r0], m0
+    pextrd      [r0 + r1], m0, 1
+    pextrd      [r0 + r1 * 2], m0, 2
+    lea         r1, [r1 * 3]
+    pextrd      [r0 + r1], m0, 3
+    RET
+
+cglobal intra_pred_ang4_4, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 32
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1, m0, 2       ; [x x x x x x x x 6 5 5 4 4 3 3 2]
+    palignr     m3, m0, 4       ; [x x x x x x x x 7 6 6 5 5 4 4 3]
+    punpcklqdq  m0, m1
+    punpcklqdq  m2, m1, m3
+
+    lea         r3, [ang_table + 18 * 16]
+    movh        m3, [r3 +  3 * 16]  ; [21]
+    movhps      m3, [r3 -  8 * 16]  ; [10]
+    movh        m4, [r3 + 13 * 16]  ; [31]
+    movhps      m4, [r3 +  2 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_5, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 31
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1, m0, 2       ; [x x x x x x x x 6 5 5 4 4 3 3 2]
+    palignr     m3, m0, 4       ; [x x x x x x x x 7 6 6 5 5 4 4 3]
+    punpcklqdq  m0, m1
+    punpcklqdq  m2, m1, m3
+
+    lea         r3, [ang_table + 10 * 16]
+    movh        m3, [r3 +  7 * 16]  ; [17]
+    movhps      m3, [r3 -  8 * 16]  ; [ 2]
+    movh        m4, [r3 +  9 * 16]  ; [19]
+    movhps      m4, [r3 -  6 * 16]  ; [ 4]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_6, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 30
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m2, m0, 2       ; [x x x x x x x x 6 5 5 4 4 3 3 2]
+    punpcklqdq  m0, m0
+    punpcklqdq  m2, m2
+
+    lea         r3, [ang_table + 19 * 16]
+    movh        m3, [r3 -  6 * 16]  ; [13]
+    movhps      m3, [r3 +  7 * 16]  ; [26]
+    movh        m4, [r3 - 12 * 16]  ; [ 7]
+    movhps      m4, [r3 +  1 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_7, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 29
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m3, m0, 2       ; [x x x x x x x x 6 5 5 4 4 3 3 2]
+    punpcklqdq  m2, m0, m3
+    punpcklqdq  m0, m0
+
+    lea         r3, [ang_table + 20 * 16]
+    movh        m3, [r3 - 11 * 16]  ; [ 9]
+    movhps      m3, [r3 -  2 * 16]  ; [18]
+    movh        m4, [r3 +  7 * 16]  ; [27]
+    movhps      m4, [r3 - 16 * 16]  ; [ 4]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_8, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 28
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    punpcklqdq  m0, m0
+    mova        m2, m0
+
+    lea         r3, [ang_table + 13 * 16]
+    movh        m3, [r3 -  8 * 16]  ; [ 5]
+    movhps      m3, [r3 -  3 * 16]  ; [10]
+    movh        m4, [r3 +  2 * 16]  ; [15]
+    movhps      m4, [r3 +  7 * 16]  ; [20]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_9, 3,5,5
+    xor         r4, r4
+    inc         r4
+    cmp         r3m, byte 27
+    mov         r3, 9
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]    ; [8 7 6 5 4 3 2 1]
+    palignr     m1, m0, 1       ; [x 8 7 6 5 4 3 2]
+    punpcklbw   m0, m1          ; [x 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    punpcklqdq  m0, m0
+    mova        m2, m0
+
+    lea         r3, [ang_table + 4 * 16]
+    movh        m3, [r3 -  2 * 16]  ; [ 2]
+    movhps      m3, [r3 -  0 * 16]  ; [ 4]
+    movh        m4, [r3 +  2 * 16]  ; [ 6]
+    movhps      m4, [r3 +  4 * 16]  ; [ 8]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_10, 3,3,4
+    movd        m0, [r2 + 9]            ; [8 7 6 5 4 3 2 1]
+    pshufb      m0, [pb_unpackbd1]
+    pshufd      m1, m0, 1
+    movhlps     m2, m0
+    pshufd      m3, m0, 3
+    movd        [r0 + r1], m1
+    movd        [r0 + r1 * 2], m2
+    lea         r1, [r1 * 3]
+    movd        [r0 + r1], m3
+    cmp         r4m, byte 0
+    jz          .quit
+
+    ; filter
+    pmovzxbw    m0, m0                  ; [-1 -1 -1 -1]
+    movh        m1, [r2]                ; [4 3 2 1 0]
+    pshufb      m2, m1, [pb_0_8]        ; [0 0 0 0]
+    pshufb      m1, [pb_unpackbw1]      ; [4 3 2 1]
+    psubw       m1, m2
+    psraw       m1, 1
+    paddw       m0, m1
+    packuswb    m0, m0
+.quit:
+    movd        [r0], m0
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang4_26, 3,4,3
+    movd        m0, [r2 + 1]            ; [8 7 6 5 4 3 2 1]
+
+    ; store
+    movd        [r0], m0
+    movd        [r0 + r1], m0
+    movd        [r0 + r1 * 2], m0
+    lea         r3, [r1 * 3]
+    movd        [r0 + r3], m0
+
+    ; filter
+    cmp         r4m, byte 0
+    jz         .quit
+
+    pshufb      m0, [pb_0_8]            ; [ 1  1  1  1]
+    movh        m1, [r2 + 8]                ; [-4 -3 -2 -1 0]
+    pinsrb      m1, [r2], 0
+    pshufb      m2, m1, [pb_0_8]        ; [0 0 0 0]
+    pshufb      m1, [pb_unpackbw1]      ; [-4 -3 -2 -1]
+    psubw       m1, m2
+    psraw       m1, 1
+    paddw       m0, m1
+    packuswb    m0, m0
+
+    pextrb      [r0], m0, 0
+    pextrb      [r0 + r1], m0, 1
+    pextrb      [r0 + r1 * 2], m0, 2
+    pextrb      [r0 + r3], m0, 3
+.quit:
+    RET
+
+cglobal intra_pred_ang4_11, 3,5,5
+    xor         r4, r4
+    cmp         r3m, byte 25
+    mov         r3, 8
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]        ; [x x x 4 3 2 1 0]
+    pinsrb      m0, [r2], 0
+    palignr     m1, m0, 1       ; [x x x x 4 3 2 1]
+    punpcklbw   m0, m1          ; [x x x x x x x x 4 3 3 2 2 1 1 0]
+    punpcklqdq  m0, m0
+    mova        m2, m0
+
+    lea         r3, [ang_table + 24 * 16]
+
+    movh        m3, [r3 +  6 * 16]  ; [24]
+    movhps      m3, [r3 +  4 * 16]  ; [26]
+    movh        m4, [r3 +  2 * 16]  ; [28]
+    movhps      m4, [r3 +  0 * 16]  ; [30]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_12, 3,5,5
+    xor         r4, r4
+    cmp         r3m, byte 24
+    mov         r3, 8
+    cmove       r3, r4
+
+    movh        m0, [r2 + r3]        ; [x x x 4 3 2 1 0]
+    pinsrb      m0, [r2], 0
+    palignr     m1, m0, 1       ; [x x x x 4 3 2 1]
+    punpcklbw   m0, m1          ; [x x x x x x x x 4 3 3 2 2 1 1 0]
+    punpcklqdq  m0, m0
+    mova        m2, m0
+
+    lea         r3, [ang_table + 20 * 16]
+    movh        m3, [r3 +  7 * 16]  ; [27]
+    movhps      m3, [r3 +  2 * 16]  ; [22]
+    movh        m4, [r3 -  3 * 16]  ; [17]
+    movhps      m4, [r3 -  8 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_13, 4,5,5
+    xor         r4, r4
+    cmp         r3m, byte 23
+    mov         r3, 8
+    jz          .next
+    xchg        r3, r4
+.next:
+    movh        m1, [r2 + r4 - 1]    ; [x x 4 3 2 1 0 x]
+    pinsrb      m1, [r2], 1
+    palignr     m0, m1, 1       ; [x x x 4 3 2 1 0]
+    palignr     m2, m1, 2       ; [x x x x 4 3 2 1]
+    pinsrb      m1, [r2 + r3 + 4], 0
+    punpcklbw   m1, m0          ; [3 2 2 1 1 0 0 x]
+    punpcklbw   m0, m2          ; [4 3 3 2 2 1 1 0]
+    punpcklqdq  m2, m0, m1
+    punpcklqdq  m0, m0
+
+    lea         r3, [ang_table + 21 * 16]
+    movh        m3, [r3 +  2 * 16]  ; [23]
+    movhps      m3, [r3 -  7 * 16]  ; [14]
+    movh        m4, [r3 - 16 * 16]  ; [ 5]
+    movhps      m4, [r3 +  7 * 16]  ; [28]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_14, 4,5,5
+    xor         r4, r4
+    cmp         r3m, byte 22
+    mov         r3, 8
+    jz          .next
+    xchg        r3, r4
+.next:
+    movh        m2, [r2 + r4 - 1]    ; [x x 4 3 2 1 0 x]
+    pinsrb      m2, [r2], 1
+    palignr     m0, m2, 1       ; [x x x 4 3 2 1 0]
+    palignr     m1, m2, 2       ; [x x x x 4 3 2 1]
+    pinsrb      m2, [r2 + r3 + 2], 0
+    punpcklbw   m2, m0          ; [3 2 2 1 1 0 0 x]
+    punpcklbw   m0, m1          ; [4 3 3 2 2 1 1 0]
+    punpcklqdq  m0, m0
+    punpcklqdq  m2, m2
+
+    lea         r3, [ang_table + 19 * 16]
+    movh        m3, [r3 +  0 * 16]  ; [19]
+    movhps      m3, [r3 - 13 * 16]  ; [ 6]
+    movh        m4, [r3 +  6 * 16]  ; [25]
+    movhps      m4, [r3 -  7 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_15, 4,5,5
+    xor         r4, r4
+    cmp         r3m, byte 21
+    mov         r3, 8
+    jz          .next
+    xchg        r3, r4
+.next:
+    movh        m2, [r2 + r4 - 1]    ; [x x 4 3 2 1 0 x]
+    pinsrb      m2, [r2], 1
+    palignr     m0, m2, 1       ; [x x x 4 3 2 1 0]
+    palignr     m1, m2, 2       ; [x x x x 4 3 2 1]
+    pinsrb      m2, [r2 + r3 + 2], 0
+    pslldq      m3, m2, 1       ; [x 4 3 2 1 0 x y]
+    pinsrb      m3, [r2 + r3 + 4], 0
+    punpcklbw   m4, m3, m2      ; [2 1 1 0 0 x x y]
+    punpcklbw   m2, m0          ; [3 2 2 1 1 0 0 x]
+    punpcklbw   m0, m1          ; [4 3 3 2 2 1 1 0]
+    punpcklqdq  m0, m2
+    punpcklqdq  m2, m4
+
+    lea         r3, [ang_table + 23 * 16]
+    movh        m3, [r3 -  8 * 16]  ; [15]
+    movhps      m3, [r3 +  7 * 16]  ; [30]
+    movh        m4, [r3 - 10 * 16]  ; [13]
+    movhps      m4, [r3 +  5 * 16]  ; [28]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_16, 3,5,5
+    xor         r4, r4
+    cmp         r3m, byte 20
+    mov         r3, 8
+    jz          .next
+    xchg        r3, r4
+.next:
+    movh        m2, [r2 + r4 - 1]    ; [x x 4 3 2 1 0 x]
+    pinsrb      m2, [r2], 1
+    palignr     m0, m2, 1       ; [x x x 4 3 2 1 0]
+    palignr     m1, m2, 2       ; [x x x x 4 3 2 1]
+    pinsrb      m2, [r2 + r3 + 2], 0
+    pslldq      m3, m2, 1       ; [x 4 3 2 1 0 x y]
+    pinsrb      m3, [r2 + r3 + 3], 0
+    punpcklbw   m4, m3, m2      ; [2 1 1 0 0 x x y]
+    punpcklbw   m2, m0          ; [3 2 2 1 1 0 0 x]
+    punpcklbw   m0, m1          ; [4 3 3 2 2 1 1 0]
+    punpcklqdq  m0, m2
+    punpcklqdq  m2, m4
+
+    lea         r3, [ang_table + 19 * 16]
+    movh        m3, [r3 -  8 * 16]  ; [11]
+    movhps      m3, [r3 +  3 * 16]  ; [22]
+    movh        m4, [r3 - 18 * 16]  ; [ 1]
+    movhps      m4, [r3 -  7 * 16]  ; [12]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_17, 3,5,5
+    xor         r4, r4
+    cmp         r3m, byte 19
+    mov         r3, 8
+    jz          .next
+    xchg        r3, r4
+.next:
+    movh        m3, [r2 + r4 - 1]    ; [- - 4 3 2 1 0 x]
+    pinsrb      m3, [r2], 1
+    palignr     m0, m3, 1       ; [- - - 4 3 2 1 0]
+    palignr     m1, m3, 2       ; [- - - - 4 3 2 1]
+    mova        m4, m0
+    punpcklbw   m0, m1          ; [4 3 3 2 2 1 1 0]
+    pinsrb      m3, [r2 + r3 + 1], 0
+    punpcklbw   m1, m3, m4      ; [3 2 2 1 1 0 0 x]
+    punpcklqdq  m0, m1
+
+    pslldq      m2, m3, 1       ; [- 4 3 2 1 0 x y]
+    pinsrb      m2, [r2 + r3 + 2], 0
+    pslldq      m1, m2, 1       ; [4 3 2 1 0 x y z]
+    pinsrb      m1, [r2 + r3 + 4], 0
+    punpcklbw   m1, m2          ; [1 0 0 x x y y z]
+    punpcklbw   m2, m3          ; [2 1 1 0 0 x x y]
+    punpcklqdq  m2, m1
+
+    lea         r3, [ang_table + 14 * 16]
+    movh        m3, [r3 -  8 * 16]  ; [ 6]
+    movhps      m3, [r3 -  2 * 16]  ; [12]
+    movh        m4, [r3 +  4 * 16]  ; [18]
+    movhps      m4, [r3 + 10 * 16]  ; [24]
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang4_3 %+ SUFFIX %+ .do_filter4x4)
+
+cglobal intra_pred_ang4_18, 3,5,1
+    mov         r4d, [r2 + 8]
+    mov         r3b, byte [r2]
+    mov         [r2 + 8], r3b
+    mov         r3d, [r2 + 8]
+    bswap       r3d
+    movd        m0, r3d
+
+    pinsrd      m0, [r2 + 1], 1     ; [- 3 2 1 0 -1 -2 -3]
+    lea         r3, [r1 * 3]
+    movd        [r0 + r3], m0
+    psrldq      m0, 1
+    movd        [r0 + r1 * 2], m0
+    psrldq      m0, 1
+    movd        [r0 + r1], m0
+    psrldq      m0, 1
+    movd        [r0], m0
+    mov         [r2 + 8], r4w
+    RET
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng8(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang8_2, 3,5,2
+    lea         r4,             [r2 + 2]
+    add         r2,             18
+    cmp         r3m,            byte 34
+    cmove       r2,             r4
+    movu        m0,             [r2]
+    lea         r4,             [r1 * 3]
+
+    movh        [r0],           m0
+    palignr     m1,             m0, 1
+    movh        [r0 + r1],      m1
+    palignr     m1,             m0, 2
+    movh        [r0 + r1 * 2],  m1
+    palignr     m1,             m0, 3
+    movh        [r0 + r4],      m1
+    palignr     m1,             m0, 4
+    lea         r0,             [r0 + r1 * 4]
+    movh        [r0],           m1
+    palignr     m1,             m0, 5
+    movh        [r0 + r1],      m1
+    palignr     m1,             m0, 6
+    movh        [r0 + r1 * 2],  m1
+    palignr     m1,             m0, 7
+    movh        [r0 + r4],      m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang8_3, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 33
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 22 * 16]
+    lea         r4,        [ang_table +  8 * 16]
+    mova        m3,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pmaddubsw   m4,        m0, [r3 + 4 * 16]          ; [26]
+    pmulhrsw    m4,        m3
+    pmaddubsw   m1,        [r3 - 2 * 16]              ; [20]
+    pmulhrsw    m1,        m3
+    packuswb    m4,        m1
+
+    palignr     m5,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+
+    pmaddubsw   m5,        [r3 - 8 * 16]              ; [14]
+    pmulhrsw    m5,        m3
+
+    palignr     m6,        m2, m0, 6                  ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+
+    pmaddubsw   m6,        [r4]                       ; [ 8]
+    pmulhrsw    m6,        m3
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 8                  ; [13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5]
+
+    pmaddubsw   m6,        m1, [r4 - 6 * 16]          ; [ 2]
+    pmulhrsw    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [28]
+    pmulhrsw    m1,        m3
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10                 ; [14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 6]
+
+    pmaddubsw   m1,        [r3]                       ; [22]
+    pmulhrsw    m1,        m3
+
+    palignr     m2,        m0, 12                     ; [15 14 14 13 13 12 12 11 11 10 10 9 9 8 8 7]
+
+    pmaddubsw   m2,        [r3 - 6 * 16]              ; [16]
+    pmulhrsw    m2,        m3
+    packuswb    m1,        m2
+    jmp         .transpose8x8
+
+ALIGN 16
+.transpose8x8:
+    jz         .store
+
+    ; transpose 8x8
+    punpckhbw   m0,        m4, m5
+    punpcklbw   m4,        m5
+    punpckhbw   m2,        m4, m0
+    punpcklbw   m4,        m0
+
+    punpckhbw   m0,        m6, m1
+    punpcklbw   m6,        m1
+    punpckhbw   m1,        m6, m0
+    punpcklbw   m6,        m0
+
+    punpckhdq   m5,        m4, m6
+    punpckldq   m4,        m6
+    punpckldq   m6,        m2, m1
+    punpckhdq   m2,        m1
+    mova        m1,        m2
+
+.store:
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m4
+    movhps      [r0 + r1],       m4
+    movh        [r0 + r1 * 2],   m5
+    movhps      [r0 + r4],       m5
+    add         r0,              r4
+    movh        [r0 + r1],       m6
+    movhps      [r0 + r1 * 2],   m6
+    movh        [r0 + r4],       m1
+    movhps      [r0 + r1 * 4],   m1
+    RET
+
+cglobal intra_pred_ang8_4, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 32
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 24 * 16]
+    lea         r4,        [ang_table + 10 * 16]
+    mova        m3,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+    mova        m5,        m1
+
+    pmaddubsw   m4,        m0, [r3 - 3 * 16]          ; [21]
+    pmulhrsw    m4,        m3
+    pmaddubsw   m1,        [r4]                       ; [10]
+    pmulhrsw    m1,        m3
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 7 * 16]              ; [31]
+    pmulhrsw    m5,        m3
+
+    palignr     m6,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+
+    pmaddubsw   m6,        [r3 - 4 * 16]              ; [ 20]
+    pmulhrsw    m6,        m3
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 6                  ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+
+    pmaddubsw   m6,        m1, [r4 - 1 * 16]          ; [ 9]
+    pmulhrsw    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [30]
+    pmulhrsw    m1,        m3
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 8                  ; [13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5]
+
+    pmaddubsw   m1,        [r3 - 5 * 16]              ; [19]
+    pmulhrsw    m1,        m3
+
+    palignr     m2,        m0, 10                     ; [14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 8]
+
+    pmaddubsw   m2,        [r4 - 2 * 16]              ; [8]
+    pmulhrsw    m2,        m3
+    packuswb    m1,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_5, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 31
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 17 * 16]
+    lea         r4,        [ang_table +  2 * 16]
+    mova        m3,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+    mova        m5,        m1
+
+    pmaddubsw   m4,        m0, [r3]                   ; [17]
+    pmulhrsw    m4,        m3
+    pmaddubsw   m1,        [r4]                       ; [2]
+    pmulhrsw    m1,        m3
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 2 * 16]              ; [19]
+    pmulhrsw    m5,        m3
+
+    palignr     m6,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+    mova        m1,        m6
+
+    pmaddubsw   m1,        [r4 + 2 * 16]              ; [4]
+    pmulhrsw    m1,        m3
+    packuswb    m5,        m1
+
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [21]
+    pmulhrsw    m6,        m3
+
+    palignr     m1,        m2, m0, 6                  ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+
+    mova        m7,        m1
+    pmaddubsw   m7,        [r4 + 4 * 16]              ; [6]
+    pmulhrsw    m7,        m3
+    packuswb    m6,        m7
+
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [23]
+    pmulhrsw    m1,        m3
+
+    palignr     m2,        m0, 8                      ; [13 12 12 11 11 10 10 9 9 8 8 7 7 8 8 9]
+
+    pmaddubsw   m2,        [r4 + 6 * 16]              ; [8]
+    pmulhrsw    m2,        m3
+    packuswb    m1,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_6, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 30
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 20 * 16]
+    lea         r4,        [ang_table +  8 * 16]
+    mova        m7,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    mova        m1,        m0
+
+    pmaddubsw   m4,        m0, [r3 - 7 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pmaddubsw   m5,        m6, [r4 - 1 * 16]          ; [7]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        [r3]                       ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+
+    pmaddubsw   m6,        m1, [r4 - 7 * 16]          ; [1]
+    pmulhrsw    m6,        m7
+
+    mova        m3,        m1
+    pmaddubsw   m3,        [r3 - 6 * 16]              ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 7 * 16]              ; [27]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 6                      ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+
+    pmaddubsw   m2,        [r4]                       ; [8]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_7, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 29
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 24 * 16]
+    lea         r4,        [ang_table +  6 * 16]
+    mova        m7,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m0, [r4 + 3 * 16]          ; [9]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r3 - 6 * 16]          ; [18]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m5,        m0, [r3 + 3 * 16]          ; [27]
+    pmulhrsw    m5,        m7
+
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pmaddubsw   m6,        m1, [r4 - 2 * 16]          ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r4 + 7 * 16]          ; [13]
+    pmulhrsw    m6,        m7
+
+    mova        m3,        m1
+    pmaddubsw   m3,        [r3 - 2 * 16]              ; [22]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 7 * 16]              ; [31]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 4                      ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+
+    pmaddubsw   m2,        [r4 + 2 * 16]              ; [8]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_8, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 28
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 23 * 16]
+    lea         r4,        [ang_table +  8 * 16]
+    mova        m7,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m2,        m0, 2                      ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    pmaddubsw   m4,        m0, [r4 - 3 * 16]          ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r4 + 2 * 16]          ; [10]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m5,        m0, [r3 - 8 * 16]          ; [15]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        m0, [r3 - 3 * 16]          ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r3 + 2 * 16]          ; [25]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m0,        [r3 + 7 * 16]              ; [30]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m2, [r4 - 5 * 16]          ; [3]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m2,        [r4]                       ; [8]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_9, 3,5,8
+    lea         r4,        [r2 + 1]
+    add         r2,        17
+    cmp         r3m,       byte 27
+    cmove       r2,        r4
+    lea         r3,        [ang_table + 10 * 16]
+    mova        m7,        [pw_1024]
+
+    movu        m0,        [r2]                       ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m0, [r3 - 8 * 16]          ; [2]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r3 - 6 * 16]          ; [4]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m5,        m0, [r3 - 4 * 16]          ; [6]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        m0, [r3 - 2 * 16]          ; [8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r3]                   ; [10]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m2,        m0, [r3 + 2 * 16]          ; [12]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        m0, [r3 + 4 * 16]          ; [14]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m0,        [r3 + 6 * 16]              ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_10, 3,6,5
+    movh        m0,        [r2 + 17]
+    mova        m4,        [pb_unpackbq]
+    palignr     m1,        m0, 2
+    pshufb      m1,        m4
+    palignr     m2,        m0, 4
+    pshufb      m2,        m4
+    palignr     m3,        m0, 6
+    pshufb      m3,        m4
+    pshufb      m0,        m4
+
+    lea         r5,             [r1 * 3]
+    movhps      [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m1
+    movhps      [r0 + r5],      m1
+    lea         r3,             [r0 + r1 * 4]
+    movh        [r3],           m2
+    movhps      [r3 + r1],      m2
+    movh        [r3 + r1 * 2],  m3
+    movhps      [r3 + r5],      m3
+
+; filter
+    cmp         r4m, byte 0
+    jz         .quit
+
+    pmovzxbw    m0,        m0
+    movu        m1,        [r2]
+    palignr     m2,        m1, 1
+    pshufb      m1,        m4
+    pmovzxbw    m1,        m1
+    pmovzxbw    m2,        m2
+    psubw       m2,        m1
+    psraw       m2,        1
+    paddw       m0,        m2
+    packuswb    m0,        m0
+
+.quit:
+    movh        [r0],      m0
+    RET
+
+cglobal intra_pred_ang8_26, 3,6,3
+    movu        m2,             [r2]
+    palignr     m0,             m2, 1
+    lea         r5,             [r1 * 3]
+    movh        [r0],           m0
+    movh        [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m0
+    movh        [r0 + r5],      m0
+    lea         r3,             [r0 + r1 * 4]
+    movh        [r3],           m0
+    movh        [r3 + r1],      m0
+    movh        [r3 + r1 * 2],  m0
+    movh        [r3 + r5],      m0
+
+; filter
+    cmp         r4m, byte 0
+    jz         .quit
+
+    pshufb      m2,        [pb_unpackbq]
+    movhlps     m1,        m2
+    pmovzxbw    m2,        m2
+    movu        m0,        [r2 + 17]
+    pmovzxbw    m1,        m1
+    pmovzxbw    m0,        m0
+    psubw       m0,        m2
+    psraw       m0,        1
+    paddw       m1,        m0
+    packuswb    m1,        m1
+    pextrb      [r0],          m1, 0
+    pextrb      [r0 + r1],     m1, 1
+    pextrb      [r0 + r1 * 2], m1, 2
+    pextrb      [r0 + r5],     m1, 3
+    pextrb      [r3],          m1, 4
+    pextrb      [r3 + r1],     m1, 5
+    pextrb      [r3 + r1 * 2], m1, 6
+    pextrb      [r3 + r5],     m1, 7
+.quit:
+    RET
+
+cglobal intra_pred_ang8_11, 3,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 25
+    mov         r3,        16
+    cmove       r3,        r4
+
+    movu        m0,        [r2 + r3]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m0,        [r2], 0
+    palignr     m1,        m0, 1                      ; [x 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+
+    punpcklbw   m0,        m1                         ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r3,        [ang_table + 23 * 16]
+    mova        m7,        [pw_1024]
+
+    pmaddubsw   m4,        m0, [r3 + 7 * 16]          ; [30]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r3 + 5 * 16]          ; [28]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m5,        m0, [r3 + 3 * 16]          ; [26]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        m0, [r3 + 1 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r3 - 1 * 16]          ; [22]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m2,        m0, [r3 - 3 * 16]          ; [20]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        m0, [r3 - 5 * 16]          ; [18]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m0,        [r3 - 7 * 16]              ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_12, 3,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 24
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m1,        [r2 + r4]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m1,        [r2], 0
+    pslldq      m0,        m1, 1                      ; [14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 a]
+    pinsrb      m0,        [r2 + r3 + 6], 0
+
+    lea         r4,        [ang_table + 22 * 16]
+    mova        m7,        [pw_1024]
+
+    punpckhbw   m2,        m0, m1                     ; [15 14 14 13 13 12 12 11 11 10 10 9 9 8 8 7]
+    punpcklbw   m0,        m1                         ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m2,        m0, 2                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m2, [r4 + 5 * 16]          ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4]                   ; [22]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m1,        m0, [r4 + 7 * 16]          ; [29]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m0,        [r4 + 2 * 16]              ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    pmaddubsw   m5,        m2, [r4 - 5 * 16]          ; [17]
+    pmulhrsw    m5,        m7
+
+    lea         r4,        [ang_table + 7 * 16]
+    pmaddubsw   m6,        m2, [r4 + 5 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m2, [r4]                   ; [7]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m2,        [r4 - 5 * 16]              ; [2]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_13, 4,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 23
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m1,        [r2 +  r4]                 ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m1,        [r2], 0
+    pslldq      m1,        1                          ; [14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 a]
+    pinsrb      m1,        [r2 + r3 + 4], 0
+    pslldq      m0,        m1, 1                      ; [13 12 11 10 9 8 7 6 5 4 3 2 1 0 a b]
+    pinsrb      m0,        [r2 + r3 + 7], 0
+    punpckhbw   m5,        m0, m1                     ; [14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 6]
+    punpcklbw   m0,        m1                         ; [6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    palignr     m1,        m5, m0, 2                  ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m5,        m0, 4                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r4,        [ang_table + 24 * 16]
+    mova        m7,        [pw_1024]
+
+    pmaddubsw   m4,        m5, [r4 - 1 * 16]          ; [23]
+    pmulhrsw    m4,        m7
+
+    pmaddubsw   m6,        m1, [r4 + 4 * 16]          ; [28]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m0,        [r4]                       ; [24]
+    pmulhrsw    m0,        m7
+
+    lea         r4,        [ang_table + 13 * 16]
+    pmaddubsw   m3,        m5, [r4 + 1 * 16]          ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m5,        [r4 - 8 * 16]              ; [5]
+    pmulhrsw    m5,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r4 + 6 * 16]          ; [19]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m2,        m1, [r4 - 3 * 16]          ; [10]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        [r4 - 12 * 16]             ; [1]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_14, 4,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 22
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m1,        [r2 + r4 - 2]              ; [13 12 11 10 9 8 7 6 5 4 3 2 1 0 a b]
+    pinsrb      m1,        [r2], 2
+    pinsrb      m1,        [r2 + r3 + 2], 1
+    pinsrb      m1,        [r2 + r3 + 5], 0
+    pslldq      m0,        m1, 1                      ; [12 11 10 9 8 7 6 5 4 3 2 1 0 a b c]
+    pinsrb      m0,        [r2 + r3 + 7], 0
+    punpckhbw   m2,        m0, m1                     ; [13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5]
+    punpcklbw   m0,        m1                         ; [5 4 4 3 3 2 2 1 1 0 0 a a b b c]
+    palignr     m1,        m2, m0, 2                  ; [6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    palignr     m6,        m2, m0, 4                  ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m2,        m0, 6                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r4,        [ang_table + 24 * 16]
+    mova        m3,        [pw_1024]
+
+    pmaddubsw   m4,        m2, [r4 - 5 * 16]          ; [19]
+    pmulhrsw    m4,        m3
+
+    pmaddubsw   m0,        [r4]                       ; [24]
+    pmulhrsw    m0,        m3
+
+    pmaddubsw   m5,        m6, [r4 + 1 * 16]          ; [25]
+    pmulhrsw    m5,        m3
+
+    lea         r4,        [ang_table + 12 * 16]
+    pmaddubsw   m6,        [r4]                       ; [12]
+    pmulhrsw    m6,        m3
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r4 + 19 * 16]         ; [31]
+    pmulhrsw    m6,        m3
+
+    pmaddubsw   m2,        [r4 - 6 * 16]              ; [6]
+    pmulhrsw    m2,        m3
+    packuswb    m4,        m2
+
+    pmaddubsw   m2,        m1, [r4 + 6 * 16]          ; [18]
+    pmulhrsw    m2,        m3
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        [r4 - 7 * 16]              ; [5]
+    pmulhrsw    m1,        m3
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_15, 4,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 21
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m1,        [r2 + r4]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m1,        [r2], 0
+    movu        m2,        [r2 + r3]
+    pshufb      m2,        [c_mode16_15]
+    palignr     m1,        m2, 13                     ; [12 11 10 9 8 7 6 5 4 3 2 1 0 a b c]
+    pslldq      m0,        m1, 1                      ; [11 10 9 8 7 6 5 4 3 2 1 0 a b c d]
+    pinsrb      m0,        [r2 + r3 + 8], 0
+    punpckhbw   m4,        m0, m1                     ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+    punpcklbw   m0,        m1                         ; [4 3 3 2 2 1 1 0 0 a a b b c c d]
+    palignr     m1,        m4, m0, 2                  ; [5 4 4 3 3 2 2 1 1 0 0 a a b b c]
+    palignr     m6,        m4, m0, 4                  ; [6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    palignr     m5,        m4, m0, 6                  ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m4,        m0, 8                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r4,        [ang_table + 23 * 16]
+    mova        m3,        [pw_1024]
+
+    pmaddubsw   m4,        [r4 - 8 * 16]              ; [15]
+    pmulhrsw    m4,        m3
+
+    pmaddubsw   m2,        m5, [r4 + 7 * 16]          ; [30]
+    pmulhrsw    m2,        m3
+    packuswb    m4,        m2
+
+    pmaddubsw   m5,        [r4 - 10 * 16]             ; [13]
+    pmulhrsw    m5,        m3
+
+    pmaddubsw   m2,        m6, [r4 + 5 * 16]          ; [28]
+    pmulhrsw    m2,        m3
+    packuswb    m5,        m2
+
+    pmaddubsw   m2,        m1, [r4 + 3 * 16]          ; [26]
+    pmulhrsw    m2,        m3
+
+    pmaddubsw   m0,        [r4 + 1 * 16]              ; [24]
+    pmulhrsw    m0,        m3
+
+    lea         r4,        [ang_table + 11 * 16]
+    pmaddubsw   m6,        [r4]                       ; [11]
+    pmulhrsw    m6,        m3
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        [r4 - 2 * 16]              ; [9]
+    pmulhrsw    m1,        m3
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_16, 4,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 20
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m1,        [r2 + r4]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m1,        [r2], 0
+    movu        m2,        [r2 + r3]
+    pshufb      m2,        [c_mode16_16]
+    palignr     m1,        m2, 12                     ; [11 10 9 8 7 6 5 4 3 2 1 0 a b c d]
+    pslldq      m0,        m1, 1                      ; [10 9 8 7 6 5 4 3 2 1 0 a b c d e]
+    pinsrb      m0,        [r2 + r3 + 8], 0
+    punpckhbw   m4,        m0, m1                     ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+    punpcklbw   m0,        m1                         ; [3 2 2 1 1 0 0 a a b b c c d d e]
+    palignr     m1,        m4, m0, 2                  ; [4 3 3 2 2 1 1 0 0 a a b b c c d]
+    palignr     m6,        m4, m0, 4                  ; [5 4 4 3 3 2 2 1 1 0 0 a a b b c]
+    palignr     m2,        m4, m0, 6                  ; [6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    palignr     m5,        m4, m0, 8                  ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m4,        m0, 10                     ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r4,        [ang_table + 22 * 16]
+    mova        m7,        [pw_1024]
+
+    pmaddubsw   m3,        m5, [r4]                   ; [22]
+    pmulhrsw    m3,        m7
+
+    pmaddubsw   m0,        [r4 + 2 * 16]              ; [24]
+    pmulhrsw    m0,        m7
+
+    lea         r4,        [ang_table + 9 * 16]
+
+    pmaddubsw   m4,        [r4 + 2 * 16]              ; [11]
+    pmulhrsw    m4,        m7
+    packuswb    m4,        m3
+
+    pmaddubsw   m2,        [r4 + 3 * 16]              ; [12]
+    pmulhrsw    m2,        m7
+
+    pmaddubsw   m5,        [r4 - 8 * 16]              ; [1]
+    pmulhrsw    m5,        m7
+    packuswb    m5,        m2
+
+    mova        m2,        m6
+    pmaddubsw   m6,        [r4 + 14 * 16]             ; [23]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m2,        [r4 -  7 * 16]             ; [2]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    pmaddubsw   m1,        [r4 + 4 * 16]              ; [13]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_17, 4,5,8
+    xor         r4,        r4
+    cmp         r3m,       byte 19
+    mov         r3,        16
+    jz          .next
+    xchg        r3,        r4
+.next:
+
+    movu        m2,        [r2 + r4]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m2,        [r2], 0
+    movu        m1,        [r2 + r3]
+    pshufb      m1,        [c_mode16_17]
+    palignr     m2,        m1, 11                     ; [10 9 8 7 6 5 4 3 2 1 0 a b c d e]
+    pslldq      m0,        m2, 1                      ; [9 8 7 6 5 4 3 2 1 0 a b c d e f]
+    pinsrb      m0,        [r2 + r3 + 7], 0
+    punpckhbw   m1,        m0, m2                     ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+    punpcklbw   m0,        m2                         ; [2 1 1 0 0 a a b b c c d d e e f]
+
+    palignr     m5,        m1, m0, 8                  ; [6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    palignr     m2,        m1, m0, 10                 ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    palignr     m4,        m1, m0, 12                 ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    lea         r4,        [ang_table + 17 * 16]
+    mova        m3,        [pw_1024]
+
+    pmaddubsw   m2,        [r4 - 5 * 16]              ; [12]
+    pmulhrsw    m2,        m3
+
+    pmaddubsw   m4,        [r4 - 11 * 16]             ; [6]
+    pmulhrsw    m4,        m3
+    packuswb    m4,        m2
+
+    pmaddubsw   m5,        [r4 + 1 * 16]              ; [18]
+    pmulhrsw    m5,        m3
+
+    palignr     m2,        m1, m0, 6                  ; [5 4 4 3 3 2 2 1 1 0 0 a a b b c]
+    pmaddubsw   m2,        [r4 + 7 * 16]              ; [24]
+    pmulhrsw    m2,        m3
+    packuswb    m5,        m2
+
+    palignr     m6,        m1, m0, 4                  ; [4 3 3 2 2 1 1 0 0 a a b b c c d]
+    mova        m2,        m6
+    pmaddubsw   m6,        [r4 + 13 * 16]             ; [30]
+    pmulhrsw    m6,        m3
+
+    pmaddubsw   m2,        [r4 - 13 * 16]             ; [4]
+    pmulhrsw    m2,        m3
+    packuswb    m6,        m2
+
+    palignr     m1,        m0, 2                      ; [3 2 2 1 1 0 0 a a b b c c d d e]
+    pmaddubsw   m1,        [r4 - 7 * 16]              ; [10]
+    pmulhrsw    m1,        m3
+
+    pmaddubsw   m0,        [r4 - 1 * 16]              ; [16]
+    pmulhrsw    m0,        m3
+    packuswb    m1,        m0
+    jmp         mangle(private_prefix %+ _ %+ intra_pred_ang8_3 %+ SUFFIX %+ .transpose8x8)
+
+cglobal intra_pred_ang8_18, 4,4,1
+    movu        m0, [r2 + 16]
+    pinsrb      m0, [r2], 0
+    pshufb      m0, [pb_swap8]
+    movhps      m0, [r2 + 1]
+    lea         r2, [r0 + r1 * 4]
+    lea         r3, [r1 * 3]
+    movh        [r2 + r3], m0
+    psrldq      m0, 1
+    movh        [r2 + r1 * 2], m0
+    psrldq      m0, 1
+    movh        [r2 + r1], m0
+    psrldq      m0, 1
+    movh        [r2], m0
+    psrldq      m0, 1
+    movh        [r0 + r3], m0
+    psrldq      m0, 1
+    movh        [r0 + r1 * 2], m0
+    psrldq      m0, 1
+    movh        [r0 + r1], m0
+    psrldq      m0, 1
+    movh        [r0], m0
+    RET
+
+%macro TRANSPOSE_STORE_8x8 6
+  %if %2 == 1
+    ; transpose 8x8 and then store, used by angle BLOCK_16x16 and BLOCK_32x32
+    punpckhbw   m0,        %3, %4
+    punpcklbw   %3,        %4
+    punpckhbw   %4,        %3, m0
+    punpcklbw   %3,        m0
+
+    punpckhbw   m0,        %5, m1
+    punpcklbw   %5,        %6
+    punpckhbw   %6,        %5, m0
+    punpcklbw   %5,        m0
+
+    punpckhdq   m0,        %3, %5
+    punpckldq   %3,        %5
+    punpckldq   %5,        %4, %6
+    punpckhdq   %4,        %6
+
+    movh        [r0 +       + %1 * 8], %3
+    movhps      [r0 +  r1   + %1 * 8], %3
+    movh        [r0 +  r1*2 + %1 * 8], m0
+    movhps      [r0 +  r5   + %1 * 8], m0
+    movh        [r6         + %1 * 8], %5
+    movhps      [r6 +  r1   + %1 * 8], %5
+    movh        [r6 +  r1*2 + %1 * 8], %4
+    movhps      [r6 +  r5   + %1 * 8], %4
+  %else
+    ; store 8x8, used by angle BLOCK_16x16 and BLOCK_32x32
+    movh        [r0         ], %3
+    movhps      [r0 + r1    ], %3
+    movh        [r0 + r1 * 2], %4
+    movhps      [r0 + r5    ], %4
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0         ], %5
+    movhps      [r0 + r1    ], %5
+    movh        [r0 + r1 * 2], %6
+    movhps      [r0 + r5    ], %6
+    lea         r0, [r0 + r1 * 4]
+  %endif
+%endmacro
+
+;------------------------------------------------------------------------------------------
+; void intraPredAng16(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang16_2, 3,5,3
+    lea             r4, [r2 + 2]
+    add             r2, 34
+    cmp             r3m, byte 34
+    cmove           r2, r4
+    movu            m0, [r2]
+    movu            m1, [r2 + 16]
+    movu            [r0], m0
+    palignr         m2, m1, m0, 1
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 2
+    movu            [r0], m2
+    palignr         m2, m1, m0, 3
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 4
+    movu            [r0], m2
+    palignr         m2, m1, m0, 5
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 6
+    movu            [r0], m2
+    palignr         m2, m1, m0, 7
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 8
+    movu            [r0], m2
+    palignr         m2, m1, m0, 9
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 10
+    movu            [r0], m2
+    palignr         m2, m1, m0, 11
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 12
+    movu            [r0], m2
+    palignr         m2, m1, m0, 13
+    movu            [r0 + r1], m2
+    lea             r0, [r0 + r1 * 2]
+    palignr         m2, m1, m0, 14
+    movu            [r0], m2
+    palignr         m2, m1, m0, 15
+    movu            [r0 + r1], m2
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_3, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+
+    pmaddubsw   m4,        m0, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m5,        m2, m0, 4
+
+    pmaddubsw   m5,        [r3 - 2 * 16]              ; [14]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 6
+
+    pmaddubsw   m6,        [r3 - 8 * 16]              ; [ 8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m6,        m1, [r3 - 14 * 16]         ; [ 2]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m1,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10
+
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 12
+
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 8]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m5,        m2, m0, 2
+
+    pmaddubsw   m4,        m0, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 4
+
+    pmaddubsw   m6,        [r3 + 8 * 16]              ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 6
+
+    pmaddubsw   m6,        m1, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m6,        m7
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m1,        [r3 - 4 * 16]              ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10
+
+    pmaddubsw   m1,        [r3 - 10 * 16]             ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+
+    movhps      m1,        [r2 + 14]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_33, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+
+    pmaddubsw   m4,        m0, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m5,        m2, m0, 4
+
+    pmaddubsw   m5,        [r3 - 2 * 16]              ; [14]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 6
+
+    pmaddubsw   m6,        [r3 - 8 * 16]              ; [ 8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m6,        m1, [r3 - 14 * 16]         ; [ 2]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m1,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10
+
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 12
+
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 8]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m5,        m2, m0, 2
+
+    pmaddubsw   m4,        m0, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 4
+
+    pmaddubsw   m6,        [r3 + 8 * 16]              ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 6
+
+    pmaddubsw   m6,        m1, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m6,        m7
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m1,        [r3 - 4 * 16]              ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10
+
+    pmaddubsw   m1,        [r3 - 10 * 16]             ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+
+    movh        m2,        [r2 + 14]                  ; [00]
+
+    movh        [r0         ], m4
+    movhps      [r0 + r1    ], m4
+    movh        [r0 + r1 * 2], m5
+    movhps      [r0 + r5    ], m5
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0         ], m6
+    movhps      [r0 + r1    ], m6
+    movh        [r0 + r1 * 2], m1
+    movh        [r0 + r5    ], m2
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_4, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    mova        m5,        m1
+
+    pmaddubsw   m4,        m0, [r3 + 5 * 16]          ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 - 6 * 16]              ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 4
+
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [ 20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 6
+
+    pmaddubsw   m6,        m1, [r3 - 7 * 16]          ; [ 9]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m1,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m1,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 10
+
+    pmaddubsw   m3,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 13 * 16]         ; [29]
+    pmulhrsw    m4,        m7
+
+    movu        m0,        [r2 + 6]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+
+    pmaddubsw   m1,        [r3 +  2 * 16]             ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m5,        m2, m0, 4
+    mova        m6,        m5
+
+    pmaddubsw   m5,        [r3 - 9 * 16]              ; [07]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m6,        m2, m0, 6
+
+    pmaddubsw   m6,        [r3 +      16]             ; [17]
+    pmulhrsw    m6,        m7
+
+    palignr     m1,        m2, m0, 8
+    palignr     m2,        m0, 10
+
+    pmaddubsw   m3,        m1, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_32, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    mova        m5,        m1
+
+
+    pmaddubsw   m4,        m0, [r3 + 5 * 16]          ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 - 6 * 16]              ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m5,        m7
+
+    palignr     m6,        m2, m0, 4
+
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [ 20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m0, 6
+
+    pmaddubsw   m6,        m1, [r3 - 7 * 16]          ; [ 9]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m1,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 8
+
+    pmaddubsw   m1,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m1,        m7
+
+    palignr     m2,        m0, 10
+
+    pmaddubsw   m3,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 13 * 16]         ; [29]
+    pmulhrsw    m4,        m7
+
+    movu        m0,        [r2 + 6]
+    palignr     m1,        m0, 1
+
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+
+    pmaddubsw   m1,        [r3 +  2 * 16]             ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m5,        m2, m0, 4
+    mova        m6,        m5
+
+    pmaddubsw   m5,        [r3 - 9 * 16]              ; [07]
+    pmulhrsw    m5,        m7
+
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m6,        m2, m0, 6
+
+    pmaddubsw   m6,        [r3 +      16]             ; [17]
+    pmulhrsw    m6,        m7
+
+    palignr     m1,        m2, m0, 8
+    palignr     m2,        m0, 10
+
+    pmaddubsw   m3,        m1, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_5, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 2]                   ;[17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[17 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    palignr     m5,        m2, m3, 2
+
+    pmaddubsw   m4,        m3, [r3 +      16]         ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 4
+
+    pmaddubsw   m5,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        m6, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+
+    palignr     m1,        m2, m3, 6
+
+    pmaddubsw   m6,        [r3 + 5 * 16]              ; [21]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 10 * 16]         ; [6]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 8
+
+    pmaddubsw   m1,        [r3 + 7 * 16]              ; [23]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 8
+    palignr     m5,        m2, m3, 10
+
+    pmaddubsw   m4,        [r3 + 9 * 16]              ; [25]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 12
+
+    pmaddubsw   m5,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        m6, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+
+    palignr     m1,        m2, m3, 14
+
+    pmaddubsw   m6,        [r3 + 13 * 16]             ; [29]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_31, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    movu        m1,        [r2 + 2]                   ;[17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[17 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    palignr     m5,        m2, m3, 2
+
+    pmaddubsw   m4,        m3, [r3 +      16]         ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 4
+
+    pmaddubsw   m5,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        m6, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+
+    palignr     m1,        m2, m3, 6
+
+    pmaddubsw   m6,        [r3 + 5 * 16]              ; [21]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 10 * 16]         ; [6]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 8
+
+    pmaddubsw   m1,        [r3 + 7 * 16]              ; [23]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 8
+    palignr     m5,        m2, m3, 10
+
+    pmaddubsw   m4,        [r3 + 9 * 16]              ; [25]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 12
+
+    pmaddubsw   m5,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        m6, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+
+    palignr     m1,        m2, m3, 14
+
+    pmaddubsw   m6,        [r3 + 13 * 16]             ; [29]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_6, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m3, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 2
+
+    pmaddubsw   m5,        m6, [r3 - 9 * 16]          ; [7]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m3, 4
+
+    pmaddubsw   m6,        m1, [r3 - 15 * 16]         ; [1]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 6
+
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 6
+    palignr     m6,        m2, m3, 8
+
+    pmaddubsw   m4,        [r3 +  5 * 16]             ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m6, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        m6, [r3 - 16]              ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m0,        m2, m3, 10
+
+    pmaddubsw   m6,        m0, [r3 - 7 * 16]          ; [9]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m2,        m3, 12
+
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]         ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_30, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m3, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m6,        m2, m3, 2
+
+    pmaddubsw   m5,        m6, [r3 - 9 * 16]          ; [7]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m1,        m2, m3, 4
+
+    pmaddubsw   m6,        m1, [r3 - 15 * 16]         ; [1]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 6
+
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 6
+    palignr     m6,        m2, m3, 8
+
+    pmaddubsw   m4,        [r3 +  5 * 16]             ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m6, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        m6, [r3 - 16]              ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m0,        m2, m3, 10
+
+    pmaddubsw   m6,        m0, [r3 - 7 * 16]          ; [9]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m2,        m3, 12
+
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]         ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_7, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]       ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m3, [r3 - 7 * 16]          ; [9]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    palignr     m1,        m2, m3, 2
+
+    pmaddubsw   m5,        m3, [r3 + 11 * 16]         ; [27]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m1, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 4
+
+    pmaddubsw   m1,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    palignr     m1,        m2, m3, 4
+
+    pmaddubsw   m4,        m1, [r3 + 16]              ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 10 * 16]             ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m0,        m2, m3, 6
+
+    pmaddubsw   m5,        m0, [r3 - 13 * 16]         ; [03]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r3 + 5 * 16]          ; [21]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m2,        m3, 8
+
+    pmaddubsw   m1,        m2, [r3 - 9 * 16]          ; [07]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_29, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m3, m1                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m3,        m1                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m3, [r3 - 7 * 16]          ; [9]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    palignr     m1,        m2, m3, 2
+
+    pmaddubsw   m5,        m3, [r3 + 11 * 16]         ; [27]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m1, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m0,        m2, m3, 4
+
+    pmaddubsw   m1,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        [r3 - 8 * 16]              ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    palignr     m1,        m2, m3, 4
+
+    pmaddubsw   m4,        m1, [r3 + 16]              ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 10 * 16]             ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    palignr     m0,        m2, m3, 6
+
+    pmaddubsw   m5,        m0, [r3 - 13 * 16]         ; [03]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r3 + 5 * 16]          ; [21]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m2,        m3, 8
+
+    pmaddubsw   m1,        m2, [r3 - 9 * 16]          ; [07]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_8, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m1,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m3,        m1, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m0,        m1, m3                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m1,        m3                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m1, [r3 - 11 * 16]         ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m2,        m1, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m2,        m7
+    packuswb    m4,        m2
+
+    pmaddubsw   m5,        m1, [r3 - 1 * 16]          ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m1, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r3 + 9 * 16]          ; [25]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m2,        m1, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    palignr     m2,        m0, m1, 2
+    palignr     m3,        m0, m1, 4
+
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]         ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m2, [r3 + 7 * 16]          ; [23]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m2,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m2,        m7
+    packuswb    m5,        m2
+
+    pmaddubsw   m6,        m3, [r3 - 15 * 16]         ; [01]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 - 5 * 16]          ; [11]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r3]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_28, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m1,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m3,        m1, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m0,        m1, m3                     ;[x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m1,        m3                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m1, [r3 - 11 * 16]         ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m2,        m1, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m2,        m7
+    packuswb    m4,        m2
+
+    pmaddubsw   m5,        m1, [r3 - 1 * 16]          ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m1, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m1, [r3 + 9 * 16]          ; [25]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m2,        m1, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+
+    palignr     m2,        m0, m1, 2
+    palignr     m3,        m0, m1, 4
+
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]         ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m2, [r3 + 7 * 16]          ; [23]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m2,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m2,        m7
+    packuswb    m5,        m2
+
+    pmaddubsw   m6,        m3, [r3 - 15 * 16]         ; [01]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 - 5 * 16]          ; [11]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r3]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_9, 3,7,8
+    add         r2,        32
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m2,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m3,        m2, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpcklbw   m2,        m3                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m2, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m2, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        m2, [r3 - 10 * 16]         ; [6]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m2, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m2, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m2, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m2, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m2, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m2, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+
+    punpcklqdq  m1,        m3                         ; [00]
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_27, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2 + 1]                   ;[16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m2,        m3, 1                      ;[x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpcklbw   m3,        m2                         ;[9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+
+    pmaddubsw   m4,        m3, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        m3, [r3 - 10 * 16]         ; [6]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+
+    movh        [r0         ], m4
+    movhps      [r0 + r1    ], m4
+    movh        [r0 + r1 * 2], m5
+    movhps      [r0 + r5    ], m5
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0         ], m6
+    movhps      [r0 + r1    ], m6
+    movh        [r0 + r1 * 2], m1
+    movh        [r0 + r5    ], m2
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_10, 5,6,8
+    lea         r5,        [r1 * 3]
+    pxor        m7,        m7
+
+    movu        m0,        [r2 + 1 + 32]
+    palignr     m1,        m0, 1
+    pshufb      m1,        m7
+    palignr     m2,        m0, 2
+    pshufb      m2,        m7
+    palignr     m3,        m0, 3
+    pshufb      m3,        m7
+    palignr     m4,        m0, 4
+    pshufb      m4,        m7
+    palignr     m5,        m0, 5
+    pshufb      m5,        m7
+    palignr     m6,        m0, 6
+    pshufb      m6,        m7
+
+    movu        [r0 + r1],      m1
+    movu        [r0 + r1 * 2],  m2
+    movu        [r0 + r5],      m3
+    lea         r3,             [r0 + r1 * 4]
+    movu        [r3],           m4
+    movu        [r3 + r1],      m5
+    movu        [r3 + r1 * 2],  m6
+
+    palignr     m1,        m0, 7
+    pshufb      m1,        m7
+    movhlps     m2,        m0
+    pshufb      m2,        m7
+    palignr     m3,        m0, 9
+    pshufb      m3,        m7
+    palignr     m4,        m0, 10
+    pshufb      m4,        m7
+    palignr     m5,        m0, 11
+    pshufb      m5,        m7
+    palignr     m6,        m0, 12
+    pshufb      m6,        m7
+
+    movu        [r3 + r5],      m1
+    lea         r3,             [r3 + r1 * 4]
+    movu        [r3],           m2
+    movu        [r3 + r1],      m3
+    movu        [r3 + r1 * 2],  m4
+    movu        [r3 + r5],      m5
+    lea         r3,             [r3 + r1 * 4]
+    movu        [r3],           m6
+
+    palignr     m1,        m0, 13
+    pshufb      m1,        m7
+    palignr     m2,        m0, 14
+    pshufb      m2,        m7
+    palignr     m3,        m0, 15
+    pshufb      m3,        m7
+    pshufb      m0,        m7
+
+    movu        [r3 + r1],      m1
+    movu        [r3 + r1 * 2],  m2
+    movu        [r3 + r5],      m3
+
+; filter
+    cmp         r4w, byte 0
+    jz         .quit
+    pmovzxbw    m0,        m0
+    mova        m1,        m0
+    movu        m2,        [r2]
+    movu        m3,        [r2 + 1]
+
+    pshufb      m2,        m7
+    pmovzxbw    m2,        m2
+    movhlps     m4,        m3
+    pmovzxbw    m3,        m3
+    pmovzxbw    m4,        m4
+    psubw       m3,        m2
+    psubw       m4,        m2
+    psraw       m3,        1
+    psraw       m4,        1
+    paddw       m0,        m3
+    paddw       m1,        m4
+    packuswb    m0,        m1
+.quit:
+    movu        [r0],      m0
+    RET
+
+INIT_XMM sse4
+%if ARCH_X86_64 == 1
+cglobal intra_pred_ang16_26, 3,8,5
+    mov     r7, r4mp
+    %define bfilter r7w
+%else
+cglobal intra_pred_ang16_26, 5,7,5,0-4
+    %define bfilter dword[rsp]
+    mov     bfilter, r4
+%endif
+    movu        m0,             [r2 + 1]
+
+    lea         r4,             [r1 * 3]
+    lea         r3,             [r0 + r1 * 4]
+    lea         r5,             [r3 + r1 * 4]
+    lea         r6,             [r5 + r1 * 4]
+
+    movu        [r0],           m0
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 * 2],  m0
+    movu        [r0 + r4],      m0
+    movu        [r3],           m0
+    movu        [r3 + r1],      m0
+    movu        [r3 + r1 * 2],  m0
+    movu        [r3 + r4],      m0
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+
+    movu        [r6],           m0
+    movu        [r6 + r1],      m0
+    movu        [r6 + r1 * 2],  m0
+    movu        [r6 + r4],      m0
+
+; filter
+    cmp         bfilter, byte 0
+    jz         .quit
+
+    pxor        m4,        m4
+    pshufb      m0,        m4
+    pmovzxbw    m0,        m0
+    mova        m1,        m0
+    movu        m2,        [r2 + 32]
+    pinsrb      m2,        [r2], 0
+    movu        m3,        [r2 + 1 + 32]
+
+    pshufb      m2,        m4
+    pmovzxbw    m2,        m2
+    movhlps     m4,        m3
+    pmovzxbw    m3,        m3
+    pmovzxbw    m4,        m4
+    psubw       m3,        m2
+    psubw       m4,        m2
+    psraw       m3,        1
+    psraw       m4,        1
+    paddw       m0,        m3
+    paddw       m1,        m4
+    packuswb    m0,        m1
+
+    pextrb      [r0],           m0, 0
+    pextrb      [r0 + r1],      m0, 1
+    pextrb      [r0 + r1 * 2],  m0, 2
+    pextrb      [r0 + r4],      m0, 3
+    pextrb      [r3],           m0, 4
+    pextrb      [r3 + r1],      m0, 5
+    pextrb      [r3 + r1 * 2],  m0, 6
+    pextrb      [r3 + r4],      m0, 7
+    pextrb      [r5],           m0, 8
+    pextrb      [r5 + r1],      m0, 9
+    pextrb      [r5 + r1 * 2],  m0, 10
+    pextrb      [r5 + r4],      m0, 11
+    pextrb      [r6],           m0, 12
+    pextrb      [r6 + r1],      m0, 13
+    pextrb      [r6 + r1 * 2],  m0, 14
+    pextrb      [r6 + r4],      m0, 15
+.quit:
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_11, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]              ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    mova        m2,        m3
+    palignr     m1,        m3, 1                  ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    punpcklbw   m3,        m1                     ;[8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m3, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 - 8 * 16]          ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    punpcklqdq  m1,        m2                         ;[00]
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m3,        [r2 + 40]              ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    mova        m2,        m3
+    palignr     m1,        m3, 1                  ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    punpcklbw   m3,        m1                     ;[8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m3, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 - 8 * 16]          ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    punpcklqdq  m1,        m2                         ;[00]
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_25, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       2
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+.loop:
+    movu        m3,        [r2]                   ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    mova        m2,        m3
+    palignr     m1,        m3, 1                  ;[15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    punpcklbw   m3,        m1                     ;[8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m3, [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m3, [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        m3, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 + 6 * 16]          ; [22]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r3 + 4 * 16]          ; [20]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r3 - 8 * 16]          ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r3 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+
+    movh        [r0         ], m4
+    movhps      [r0 + r1    ], m4
+    movh        [r0 + r1 * 2], m5
+    movhps      [r0 + r5    ], m5
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0         ], m6
+    movhps      [r0 + r1    ], m6
+    movh        [r0 + r1 * 2], m1
+    movh        [r0 + r5    ], m2
+
+    lea         r0,        [r6 + 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_12, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m0,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_12]
+
+    palignr     m0,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m0, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m0, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        m0, [r4 + 1 * 16]          ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r4 - 14 * 16]             ; [2]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 1 * 16]          ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [2]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_24, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m0,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pshufb      m2,        [c_mode16_12]
+
+    palignr     m0,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m0, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m0, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+
+    pmaddubsw   m5,        m0, [r4 + 1 * 16]          ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m0, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r4 - 14 * 16]             ; [2]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 1 * 16]          ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [2]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_13, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m5,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_13]
+
+    palignr     m5,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m5, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m5, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        [r4 - 11 * 16]             ; [05]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_23, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m5,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pshufb      m2,        [c_mode16_13]
+
+    palignr     m5,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m5, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m0,        m5, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m4,        m0
+
+    pmaddubsw   m5,        [r4 - 11 * 16]             ; [05]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_14, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m5,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_14]
+
+    palignr     m5,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m5, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        [r4 - 10 * 16]             ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_22, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m5,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pshufb      m2,        [c_mode16_14]
+
+    palignr     m5,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        m5, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        [r4 - 10 * 16]             ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 x x x x x x x]
+
+    pmaddubsw   m4,        m3, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_15, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_15]
+
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 16]                  ; [15]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 9  * 16]         ; [07]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0 0 0 0 0 0 0 15L]
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 -  5 * 16]         ; [11]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 7  * 16]         ; [09]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 9  * 16]         ; [07]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 +  4 * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_21, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pinsrb      m2,        [r2], 0
+    pshufb      m2,        [c_mode16_15]
+
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 16]                  ; [15]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 9  * 16]         ; [07]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0 0 0 0 0 0 0 15L]
+
+    pmaddubsw   m4,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 -  5 * 16]         ; [11]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pmaddubsw   m1,        m3, [r4 - 7  * 16]         ; [09]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 9  * 16]         ; [07]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 +  4 * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pmaddubsw   m6,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_16, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_16]              ; [2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8]
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 5  * 16]             ; [11]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [3, 5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7  * 16]         ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1                           ; [5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 3  * 16]         ; [13]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1                           ; [8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1                           ; [9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 9  * 16]         ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1                           ; [12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    palignr     m2,        m2, 6                      ; [x, x, x, x, x, x, 14, 15, 0, 2, 3, 5, 6, 8, x, x]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0, 2, 3, 5, 6, 8, x, x]
+
+    pmaddubsw   m4,        m3, [r4 - 5  * 16]         ; [11]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7  * 16]         ; [23]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 3  * 16]         ; [13]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 +  9 * 16]         ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_20, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pinsrb      m2,        [r2], 0
+    pshufb      m2,        [c_mode16_16]              ; [2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8]
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 5  * 16]             ; [11]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m5,        m7
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [3, 5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7  * 16]         ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1                           ; [5, 6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 3  * 16]         ; [13]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [6, 8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1                           ; [8, 9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1                           ; [9, 11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 9  * 16]         ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [11, 12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1                           ; [12, 14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [14, 15, 0, 2, 3, 5, 6, 8, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    palignr     m2,        m2, 6                      ; [x, x, x, x, x, x, 14, 15, 0, 2, 3, 5, 6, 8, x, x]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0, 2, 3, 5, 6, 8, x, x]
+
+    pmaddubsw   m4,        m3, [r4 - 5  * 16]         ; [11]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 15 * 16]         ; [01]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 7  * 16]         ; [23]
+    pmulhrsw    m6,        m7
+
+    pmaddubsw   m0,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 3  * 16]         ; [13]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m3, [r4 - 13 * 16]         ; [03]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 +  9 * 16]         ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pmaddubsw   m1,        m3, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m3,        [r4]                       ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_17, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2 + 32]                  ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    pinsrb      m3,        [r2], 0
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2]
+    pshufb      m2,        [c_mode16_17]              ; [1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4]
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 10 * 16]             ; [06]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 -  4 * 16]         ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1                           ; [2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, x]
+    pinsrb      m2,       [r2 + 5], 0                 ; [2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 8  * 16]         ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1                           ; [5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pslldq      m2,       1                           ; [7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m4,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1                           ; [9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1                           ; [10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  8 * 16]         ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1                           ; [12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1                           ; [14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4 - 16 * 16]             ; [00]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+
+    movu        m1,        [r2 + 1 + 32]              ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    palignr     m2,        m2, 6                      ; [x, x, x, x, x, x, 14, 15, 0, 1, 2, 4, 5, x, x, x]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0, 1, 2, 4, 5, x, x, x]
+
+    pmaddubsw   m4,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 8  * 16]         ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 1, m4, m5, m6, m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m4,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  8 * 16]         ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  2 * 16]         ; [14]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4 - 16 * 16]             ; [00]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 1, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_19, 4,7,8
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    movu        m3,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    punpckhbw   m4,        m3, m3                     ; [15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8]
+    punpcklbw   m3,        m3                         ; [7 7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    movu        m2,        [r2 + 32]
+    pinsrb      m2,        [r2], 0
+    pshufb      m2,        [c_mode16_17]              ; [1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4]
+    palignr     m4,        m3, 1                      ; [8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0]
+
+    pmaddubsw   m4,        [r4 - 10 * 16]             ; [06]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 15
+
+    pmaddubsw   m5,        m3, [r4 -  4 * 16]         ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1                           ; [2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, x]
+    pinsrb      m2,       [r2 + 5 + 32], 0            ; [2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 8  * 16]         ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1                           ; [5, 6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1                           ; [6, 7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pslldq      m2,       1                           ; [7, 9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m4,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1                           ; [9, 10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1                           ; [10, 11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  8 * 16]         ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1                           ; [11, 12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 - 2  * 16]         ; [14]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1                           ; [12, 14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1                           ; [14, 15, 0, 1, 2, 4, 5, x, x, x, x, x, x, x, x, x]
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4 - 16 * 16]             ; [00]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+
+    lea         r0,        [r6 + 8]
+
+    movu        m1,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    pslldq      m3,        m1, 1                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 x]
+    punpckhbw   m3,        m1                         ; [16 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8]
+    palignr     m2,        m2, 6                      ; [x, x, x, x, x, 14, 15, 0, 1, 2, 4, 5, x, x, x]
+    movlhps     m2,        m1                         ; [8 7 6 5 4 3 2 1 0, 2, 3, 5, 6, 8, x, x]
+
+    pmaddubsw   m4,        m3, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m4,        m7
+
+    palignr     m3,        m2, 14
+
+    pmaddubsw   m5,        m3, [r4 - 4  * 16]         ; [12]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 2  * 16]         ; [18]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 8  * 16]         ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m3, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 - 6  * 16]         ; [10]
+    pmulhrsw    m1,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m0,        m3, [r4]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, 0, m4, m5, m6, m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m4,        m3, [r4 + 6  * 16]         ; [22]
+    pmulhrsw    m4,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m5,        m3, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+
+    pmaddubsw   m5,        m3, [r4 - 14 * 16]         ; [02]
+    pmulhrsw    m5,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  8 * 16]         ; [08]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m6,        m3, [r4 -  2 * 16]         ; [14]
+    pmulhrsw    m6,        m7
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 4  * 16]         ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+
+    pslldq      m2,       1
+    palignr     m3,       m2, 14
+
+    pmaddubsw   m1,        m3, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        [r4 - 16 * 16]             ; [00]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, 0, m4, m5, m6, m1
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang16_18, 4,5,3
+    movu        m0,         [r2]
+    movu        m1,         [r2 + 32]
+    mova        m2,         [c_mode16_18]
+    pshufb      m1,         m2
+
+    lea         r2,         [r1 * 2]
+    lea         r3,         [r1 * 3]
+    lea         r4,         [r1 * 4]
+    movu        [r0],       m0
+    palignr     m2,         m0, m1, 15
+    movu        [r0 + r1],  m2
+    palignr     m2,         m0, m1, 14
+    movu        [r0 + r2],  m2
+    palignr     m2,         m0, m1, 13
+    movu        [r0 + r3],  m2
+    lea         r0,         [r0 + r4]
+    palignr     m2,         m0, m1, 12
+    movu        [r0],       m2
+    palignr     m2,         m0, m1, 11
+    movu        [r0 + r1],  m2
+    palignr     m2,         m0, m1, 10
+    movu        [r0 + r2],  m2
+    palignr     m2,         m0, m1, 9
+    movu        [r0 + r3],  m2
+    lea         r0,         [r0 + r4]
+    palignr     m2,         m0, m1, 8
+    movu        [r0],       m2
+    palignr     m2,         m0, m1, 7
+    movu        [r0 + r1],  m2
+    palignr     m2,         m0, m1, 6
+    movu        [r0 + r2],  m2
+    palignr     m2,         m0, m1, 5
+    movu        [r0 + r3],  m2
+    lea         r0,         [r0 + r4]
+    palignr     m2,         m0, m1, 4
+    movu        [r0],       m2
+    palignr     m2,         m0, m1, 3
+    movu        [r0 + r1],  m2
+    palignr     m2,         m0, m1, 2
+    movu        [r0 + r2],  m2
+    palignr     m0,         m1, 1
+    movu        [r0 + r3],  m0
+    RET
+
+; Process Intra32x32, input 8x8 in [m0, m1, m2, m3, m4, m5, m6, m7], output 8x8
+%macro PROC32_8x8 10  ; col4, transpose[0/1] c0, c1, c2, c3, c4, c5, c6, c7
+  %if %3 == 0
+  %else
+    pshufb      m0, [r3]
+    pmaddubsw   m0, [r4 + %3 * 16]
+    pmulhrsw    m0, [pw_1024]
+  %endif
+  %if %4 == 0
+    pmovzxbw    m1, m1
+  %else
+    pshufb      m1, [r3]
+    pmaddubsw   m1, [r4 + %4 * 16]
+    pmulhrsw    m1, [pw_1024]
+  %endif
+  %if %3 == 0
+    packuswb    m1, m1
+    movlhps     m0, m1
+  %else
+    packuswb    m0, m1
+  %endif
+    mova        m1, [pw_1024]
+  %if %5 == 0
+  %else
+    pshufb      m2, [r3]
+    pmaddubsw   m2, [r4 + %5 * 16]
+    pmulhrsw    m2, m1
+  %endif
+  %if %6 == 0
+    pmovzxbw    m3, m3
+  %else
+    pshufb      m3, [r3]
+    pmaddubsw   m3, [r4 + %6 * 16]
+    pmulhrsw    m3, m1
+  %endif
+  %if %5 == 0
+    packuswb    m3, m3
+    movlhps     m2, m3
+  %else
+    packuswb    m2, m3
+  %endif
+  %if %7 == 0
+  %else
+    pshufb      m4, [r3]
+    pmaddubsw   m4, [r4 + %7 * 16]
+    pmulhrsw    m4, m1
+  %endif
+  %if %8 == 0
+    pmovzxbw    m5, m5
+  %else
+    pshufb      m5, [r3]
+    pmaddubsw   m5, [r4 + %8 * 16]
+    pmulhrsw    m5, m1
+  %endif
+  %if %7 == 0
+    packuswb    m5, m5
+    movlhps     m4, m5
+  %else
+    packuswb    m4, m5
+  %endif
+  %if %9 == 0
+  %else
+    pshufb      m6, [r3]
+    pmaddubsw   m6, [r4 + %9 * 16]
+    pmulhrsw    m6, m1
+  %endif
+  %if %10 == 0
+    pmovzxbw    m7, m7
+  %else
+    pshufb      m7, [r3]
+    pmaddubsw   m7, [r4 + %10 * 16]
+    pmulhrsw    m7, m1
+  %endif
+  %if %9 == 0
+    packuswb    m7, m7
+    movlhps     m6, m7
+  %else
+    packuswb    m6, m7
+  %endif
+
+  %if %2 == 1
+    ; transpose
+    punpckhbw   m1,        m0, m2
+    punpcklbw   m0,        m2
+    punpckhbw   m3,        m0, m1
+    punpcklbw   m0,        m1
+
+    punpckhbw   m1,        m4, m6
+    punpcklbw   m4,        m6
+    punpckhbw   m6,        m4, m1
+    punpcklbw   m4,        m1
+
+    punpckhdq   m2,        m0, m4
+    punpckldq   m0,        m4
+    punpckldq   m4,        m3, m6
+    punpckhdq   m3,        m6
+
+    movh        [r0 +       + %1 * 8], m0
+    movhps      [r0 +  r1   + %1 * 8], m0
+    movh        [r0 +  r1*2 + %1 * 8], m2
+    movhps      [r0 +  r5   + %1 * 8], m2
+    movh        [r6         + %1 * 8], m4
+    movhps      [r6 +  r1   + %1 * 8], m4
+    movh        [r6 +  r1*2 + %1 * 8], m3
+    movhps      [r6 +  r5   + %1 * 8], m3
+  %else
+    movh        [r0         ], m0
+    movhps      [r0 + r1    ], m0
+    movh        [r0 + r1 * 2], m2
+    movhps      [r0 + r5    ], m2
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0         ], m4
+    movhps      [r0 + r1    ], m4
+    movh        [r0 + r1 * 2], m6
+    movhps      [r0 + r5    ], m6
+  %endif
+%endmacro
+
+%macro MODE_3_33 1
+    movu        m0,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9  8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [ x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+    pmaddubsw   m4,        m0, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 4
+    pmaddubsw   m5,        [r3 - 2 * 16]              ; [14]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 6
+    pmaddubsw   m6,        [r3 - 8 * 16]              ; [ 8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m6,        m1, [r3 - 14 * 16]         ; [ 2]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 10
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 12
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 8]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m5,        m2, m0, 2
+    pmaddubsw   m4,        m0, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    pmaddubsw   m6,        [r3 + 8 * 16]              ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 6
+    pmaddubsw   m6,        m1, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m1,        [r3 - 4 * 16]              ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 10
+    pmaddubsw   m1,        [r3 - 10 * 16]             ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 14]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 14]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    pmaddubsw   m4,        m0, [r3 + 10 * 16]         ; [26]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 4
+    pmaddubsw   m5,        [r3 - 2 * 16]              ; [14]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 6
+    pmaddubsw   m6,        [r3 - 8 * 16]              ; [ 8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m6,        m1, [r3 - 14 * 16]         ; [ 2]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 10
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 12
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 21]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m5,        m2, m0, 2
+    pmaddubsw   m4,        m0, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        m5, [r3 - 12 * 16]         ; [04]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    pmaddubsw   m6,        [r3 + 8 * 16]              ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 6
+    pmaddubsw   m6,        m1, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m1,        [r3 - 4 * 16]              ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 10
+    pmaddubsw   m1,        [r3 - 10 * 16]             ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 27]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_4_32 1
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    mova        m5,        m1
+    pmaddubsw   m4,        m0, [r3 + 5 * 16]          ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 - 6 * 16]              ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [ 20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 6
+    pmaddubsw   m6,        m1, [r3 - 7 * 16]          ; [ 9]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m1,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 10
+    pmaddubsw   m3,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 13 * 16]         ; [29]
+    pmulhrsw    m4,        m7
+    movu        m0,        [r2 + 6]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    pmaddubsw   m1,        [r3 +  2 * 16]             ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 4
+    mova        m6,        m5
+    pmaddubsw   m5,        [r3 - 9 * 16]              ; [07]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m6,        m2, m0, 6
+    pmaddubsw   m6,        [r3 +      16]             ; [17]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m3,        m1, [r3 - 10 * 16]         ; [06]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 10
+    pmaddubsw   m2,        [r3]                       ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 12]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m1,        m0
+    pmaddubsw   m4,        m0, [r3 - 11 * 16]         ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 10 * 16]             ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 2
+    pmaddubsw   m5,        [r3 - 16]                  ; [15]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    mova        m1,        m6
+    pmaddubsw   m1,        [r3 - 12 * 16]             ; [4]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+    pmaddubsw   m6,        [r3 + 9 * 16]              ; [25]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 6
+    pmaddubsw   m1,        [r3 - 2 * 16]              ; [14]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 8
+    mova        m2,        m1
+    pmaddubsw   m1,        [r3 - 13 * 16]             ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3 + 8 * 16]              ; [24]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 17]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    pmaddubsw   m4,        m0, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    palignr     m5,        m2, m0, 2
+    pmaddubsw   m1,        m5, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 7 * 16]              ; [23]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    pmaddubsw   m6,        [r3 - 4 * 16]              ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m6,        m2, m0, 6
+    mova        m1,        m6
+    pmaddubsw   m6,        [r3 - 15 * 16]             ; [1]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m1,        m2, m0, 8
+    pmaddubsw   m1,        [r3 - 5 * 16]              ; [11]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 22]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_5_31 1
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    mova        m5,        m1
+    pmaddubsw   m4,        m0, [r3 +      16]          ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 - 14 * 16]              ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 3 * 16]               ; [19]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    mova        m1,        m6
+    pmaddubsw   m6,        [r3 - 12 * 16]              ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m1, [r3 + 5 * 16]               ; [21]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 6
+    mova        m3,        m1
+    pmaddubsw   m3,        [r3 - 10 * 16]              ; [6]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        [r3 + 7 * 16]               ; [23]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 8
+    pmaddubsw   m2,        [r3 - 8 * 16]               ; [8]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 5]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m1,        m2, m0, 2
+    mova        m5,        m1
+    pmaddubsw   m4,        m0, [r3 + 9 * 16]           ; [25]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 - 6 * 16]               ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 11 * 16]              ; [27]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 4
+    mova        m1,        m6
+    pmaddubsw   m6,        [r3 - 4 * 16]               ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m1, [r3 + 13 * 16]          ; [29]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 6
+    mova        m3,        m1
+    pmaddubsw   m3,        [r3 - 2 * 16]               ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        [r3 + 15 * 16]              ; [31]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 8
+    pmaddubsw   m2,        [r3]                        ; [16]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 10]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m1,        m0
+    pmaddubsw   m4,        m0, [r3 - 15 * 16]          ; [1]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 2 * 16]               ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 2
+    mova        m1,        m5
+    pmaddubsw   m5,        [r3 - 13 * 16]              ; [3]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        [r3 + 4 * 16]               ; [20]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+    palignr     m1,        m2, m0, 4
+    pmaddubsw   m6,        m1, [r3 - 11 * 16]          ; [5]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 6 * 16]               ; [22]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m2,        m0, 6
+    pmaddubsw   m1,        m2, [r3 - 9 * 16]           ; [7]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        [r3 + 8 * 16]               ; [24]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 14]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m1,        m0
+    pmaddubsw   m4,        m0, [r3 - 7 * 16]           ; [9]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 10 * 16]              ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m5,        m2, m0, 2
+    mova        m1,        m5
+    pmaddubsw   m5,        [r3 - 5 * 16]               ; [11]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m1,        [r3 + 12 * 16]              ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+    palignr     m1,        m2, m0, 4
+    pmaddubsw   m6,        m1, [r3 - 3 * 16]           ; [13]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 14 * 16]              ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m2,        m0, 6
+    pmaddubsw   m1,        m2, [r3 - 16]               ; [15]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 18]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_6_30 1
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m1,        m0
+    pmaddubsw   m4,        m0, [r3 - 3 * 16]          ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m1,        [r3 + 10 * 16]             ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    palignr     m6,        m2, m0, 2
+    pmaddubsw   m5,        m6, [r3 - 9 * 16]          ; [7]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 4 * 16]              ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m1,        m2, m0, 4
+    pmaddubsw   m6,        m1, [r3 - 15 * 16]         ; [1]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m1, [r3 - 2 * 16]          ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        [r3 + 11 * 16]             ; [27]
+    pmulhrsw    m1,        m7
+    palignr     m2,        m0, 6
+    pmaddubsw   m3,        m2, [r3 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 +  5 * 16]         ; [21]
+    pmulhrsw    m4,        m7
+    movu        m0,        [r2 + 5]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m6,        m0
+    pmaddubsw   m1,        m6, [r3 - 14 * 16]         ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        m6, [r3 - 16]              ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        [r3 + 12 * 16]             ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m3,        m2, m0, 2
+    pmaddubsw   m6,        m3, [r3 - 7 * 16]          ; [9]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        [r3 + 6 * 16]              ; [22]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    palignr     m2,        m0, 4
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]         ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r3]                   ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 +  13 * 16]        ; [29]
+    pmulhrsw    m4,        m7
+    movu        m0,        [r2 + 7]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m5,        m2, m0, 2
+    pmaddubsw   m1,        m5, [r3 - 6 * 16]          ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        [r3 + 7 * 16]              ; [23]
+    pmulhrsw    m5,        m7
+    palignr     m1,        m2, m0, 4
+    pmaddubsw   m6,        m1, [r3 - 12 * 16]         ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m1, [r3 + 16]              ; [17]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        [r3 + 14 * 16]             ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    palignr     m2,        m2, m0, 6
+    pmaddubsw   m1,        m2, [r3 - 5 * 16]          ; [11]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        m2, [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 11]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m5,        m0
+    pmaddubsw   m4,        m0, [r3 - 11 * 16]         ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m5, [r3 + 2 * 16]          ; [18]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        [r3 + 15 * 16]             ; [31]
+    pmulhrsw    m5,        m7
+    palignr     m6,        m2, m0, 2
+    pmaddubsw   m1,        m6, [r3 - 4 * 16]          ; [12]
+    pmulhrsw    m1,        m7
+    packuswb    m5,        m1
+    pmaddubsw   m6,        [r3 + 9 * 16]              ; [25]
+    pmulhrsw    m6,        m7
+    palignr     m1,        m2, m0, 4
+    pmaddubsw   m2,        m1, [r3 - 10 * 16]         ; [6]
+    pmulhrsw    m2,        m7
+    packuswb    m6,        m2
+    pmaddubsw   m1,        [r3 + 3 * 16]              ; [19]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 14]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_7_29 1
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    mova        m5,        m0
+    pmaddubsw   m4,        m0, [r3 - 7 * 16]         ; [9]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m5, [r3 + 2 * 16]         ; [18]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        [r3 + 11 * 16]            ; [27]
+    pmulhrsw    m5,        m7
+    palignr     m1,        m2, m0, 2
+    palignr     m2,        m0, 4
+    pmaddubsw   m6,        m1, [r3 - 12 * 16]        ; [4]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m1, [r3 - 3 * 16]         ; [13]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m1, [r3 + 6 * 16]         ; [22]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        [r3 + 15 * 16]            ; [31]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3 - 8 * 16]         ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 16]             ; [17]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m2,        [r3 + 10 * 16]            ; [26]
+    pmulhrsw    m2,        m7
+    packuswb    m4,        m2
+    movu        m0,        [r2 + 4]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m2,        m0, 2
+    pmaddubsw   m5,        m0, [r3 - 13 * 16]        ; [03]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 - 4 * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r3 + 5 * 16]         ; [21]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 14 * 16]            ; [30]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r3 - 9 * 16]         ; [07]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r3]                  ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 9 * 16]         ; [25]
+    pmulhrsw    m4,        m7
+    movu        m0,        [r2 + 6]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m2,        m0, 2
+    pmaddubsw   m1,        m0, [r3 - 14 * 16]        ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        m0, [r3 - 5 * 16]         ; [11]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 + 4 * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r3 + 13 * 16]        ; [29]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r3 - 10 * 16]        ; [6]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r3 - 16]             ; [15]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m2,        m2, [r3 + 8 * 16]         ; [24]
+    pmulhrsw    m2,        m7
+    packuswb    m1,        m2
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 8]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    pmaddubsw   m4,        m0, [r3 - 15 * 16]        ; [1]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r3 - 6 * 16]         ; [10]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m0, [r3 + 3 * 16]         ; [19]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 + 12 * 16]        ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m2,        m0, 2
+    pmaddubsw   m6,        m2, [r3 - 11 * 16]        ; [5]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m2, [r3 - 2 * 16]         ; [14]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r3 + 7 * 16]         ; [23]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 10]                 ; [0]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_8_28 1
+    movu        m0,        [r2 + 1]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m2,        m0, 2
+    pmaddubsw   m4,        m0, [r3 - 11 * 16]     ; [5]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r3 - 6 * 16]      ; [10]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m0, [r3 - 1 * 16]      ; [15]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 + 4 * 16]      ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r3 + 9 * 16]      ; [25]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        [r3 + 14 * 16]         ; [30]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r3 - 13 * 16]     ; [3]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3 - 8 * 16]      ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 - 3 * 16]      ; [13]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 2 * 16]      ; [18]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r3 + 7 * 16]      ; [23]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m2,        [r3 + 12 * 16]         ; [28]
+    pmulhrsw    m2,        m7
+    packuswb    m5,        m2
+    movu        m0,        [r2 + 3]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    pmaddubsw   m6,        m0, [r3 - 15 * 16]     ; [01]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m0, [r3 - 10 * 16]     ; [06]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m0, [r3 - 5 * 16]      ; [11]
+    pmulhrsw    m1,        m7
+    mova        m2,        m0
+    pmaddubsw   m0,        [r3]                   ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 5 * 16]      ; [21]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 10 * 16]     ; [26]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r3 + 15 * 16]     ; [31]
+    pmulhrsw    m5,        m7
+    movu        m0,        [r2 + 4]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    pmaddubsw   m2,        m0, [r3 - 12 * 16]     ; [4]
+    pmulhrsw    m2,        m7
+    packuswb    m5,        m2
+    pmaddubsw   m6,        m0, [r3 - 7 * 16]      ; [9]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m0, [r3 - 2 * 16]      ; [14]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m0, [r3 + 3 * 16]      ; [19]
+    pmulhrsw    m1,        m7
+    mova        m2,        m0
+    pmaddubsw   m0,        [r3 + 8 * 16]          ; [24]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 13 * 16]     ; [29]
+    pmulhrsw    m4,        m7
+    movu        m0,        [r2 + 5]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    pmaddubsw   m1,        m0, [r3 - 14 * 16]     ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m4,        m1
+    pmaddubsw   m5,        m0, [r3 - 9 * 16]      ; [7]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r3 - 4 * 16]      ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r3 + 16]          ; [17]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m0, [r3 + 6 * 16]      ; [22]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m0, [r3 + 11 * 16]         ; [27]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 6]               ; [00]
+
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_9_27 1
+    movu        m2,        [r2 + 1]
+    palignr     m1,        m2, 1
+    punpckhbw   m0,        m2, m1
+    punpcklbw   m2,        m1
+    pmaddubsw   m4,        m2, [r3 - 14 * 16]   ; [2]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r3 - 12 * 16]   ; [4]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r3 - 10 * 16]   ; [6]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 - 8 * 16]    ; [8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r3 - 6 * 16]    ; [10]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m2, [r3 - 4 * 16]    ; [12]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m2, [r3 - 2 * 16]    ; [14]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3]             ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddubsw   m4,        m2, [r3 + 2 * 16]    ; [18]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 4 * 16]    ; [20]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r3 + 6 * 16]    ; [22]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 + 8 * 16]    ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r3 + 10 * 16]   ; [26]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r3 + 12 * 16]   ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r3 + 14 * 16]   ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 2]             ; [00]
+
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+
+    movu        m2,        [r2 + 2]
+    palignr     m1,        m2, 1
+    punpcklbw   m2,        m1
+    pmaddubsw   m4,        m2, [r3 - 14 * 16]   ; [2]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r3 - 12 * 16]   ; [4]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r3 - 10 * 16]   ; [6]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 - 8 * 16]    ; [8]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r3 - 6 * 16]    ; [10]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m2, [r3 - 4 * 16]    ; [12]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r3 - 2 * 16]    ; [14]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r3]             ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+
+    movu        m2,        [r2 + 2]
+    palignr     m1,        m2, 1
+    punpcklbw   m2,        m1
+    pmaddubsw   m4,        m2, [r3 + 2 * 16]    ; [18]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r3 + 4 * 16]    ; [20]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r3 + 6 * 16]    ; [22]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r3 + 8 * 16]    ; [24]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r3 + 10 * 16]   ; [26]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r3 + 12 * 16]   ; [28]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r3 + 14 * 16]   ; [30]
+    pmulhrsw    m1,        m7
+    packuswb    m1,        m1
+    movhps      m1,        [r2 + 3]             ; [00]
+
+     TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_12_24 1
+    movu        m2,        [r2]
+    palignr     m1,        m2, 1
+    punpckhbw   m0,        m2, m1
+    punpcklbw   m2,        m1
+    palignr     m0,        m2, 2
+    pmaddubsw   m4,        m0, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m0, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m0, [r4 - 14 * 16]         ; [2]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m2, [r4 + 13 * 16]         ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    movu        m0,        [r2 - 2]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m2,        m0, 2
+    pmaddubsw   m6,        m2, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4]                   ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 - 15 * 16]         ; [1]
+    pmulhrsw    m5,        m7
+    movu        m0,        [r2 - 3]
+    palignr     m1,        m0, 1
+    punpckhbw   m2,        m0, m1
+    punpcklbw   m0,        m1
+    palignr     m2,        m0, 2
+    pmaddubsw   m6,        m2, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m2, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m2, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 13 * 16]         ; [3]
+    pmulhrsw    m4,        m7
+    movu        m2,        [r2 - 4]
+    palignr     m1,        m2, 1
+    punpckhbw   m0,        m2, m1
+    punpcklbw   m2,        m1
+    palignr     m0,        m2, 2
+    pmaddubsw   m5,        m0, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m0, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m0, [r4 + 4 * 16]          ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m0, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m0, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m0, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+    movu        m2,        [pb_fact0]
+    pshufb      m0,        m2
+    pmovzxbw    m0,        m0
+    packuswb    m1,        m0
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+;------------------------------------------------------------------------------------------
+; void intraPredAng32(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang32_2, 3,5,4
+    lea             r4, [r2]
+    add             r2, 64
+    cmp             r3m, byte 34
+    cmove           r2, r4
+    movu            m0, [r2 + 2]
+    movu            m1, [r2 + 18]
+    movu            m3, [r2 + 34]
+
+    lea             r3, [r1 * 3]
+
+    movu            [r0], m0
+    movu            [r0 + 16], m1
+    palignr         m2, m1, m0, 1
+    movu            [r0 + r1], m2
+    palignr         m2, m3, m1, 1
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m1, m0, 2
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m3, m1, 2
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m1, m0, 3
+    movu            [r0 + r3], m2
+    palignr         m2, m3, m1, 3
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m1, m0, 4
+    movu            [r0], m2
+    palignr         m2, m3, m1, 4
+    movu            [r0 + 16], m2
+    palignr         m2, m1, m0, 5
+    movu            [r0 + r1], m2
+    palignr         m2, m3, m1, 5
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m1, m0, 6
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m3, m1, 6
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m1, m0, 7
+    movu            [r0 + r3], m2
+    palignr         m2, m3, m1, 7
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m1, m0, 8
+    movu            [r0], m2
+    palignr         m2, m3, m1, 8
+    movu            [r0 + 16], m2
+    palignr         m2, m1, m0, 9
+    movu            [r0 + r1], m2
+    palignr         m2, m3, m1, 9
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m1, m0, 10
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m3, m1, 10
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m1, m0, 11
+    movu            [r0 + r3], m2
+    palignr         m2, m3, m1, 11
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m1, m0, 12
+    movu            [r0], m2
+    palignr         m2, m3, m1, 12
+    movu            [r0 + 16], m2
+    palignr         m2, m1, m0, 13
+    movu            [r0 + r1], m2
+    palignr         m2, m3, m1, 13
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m1, m0, 14
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m3, m1, 14
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m1, m0, 15
+    movu            [r0 + r3], m2
+    palignr         m2, m3, m1, 15
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    movu            [r0], m1
+    movu            m0, [r2 + 50]
+    movu            [r0 + 16], m3
+    palignr         m2, m3, m1, 1
+    movu            [r0 + r1], m2
+    palignr         m2, m0, m3, 1
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m3, m1, 2
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m0, m3, 2
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m3, m1, 3
+    movu            [r0 + r3], m2
+    palignr         m2, m0, m3, 3
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m3, m1, 4
+    movu            [r0], m2
+    palignr         m2, m0, m3, 4
+    movu            [r0 + 16], m2
+    palignr         m2, m3, m1, 5
+    movu            [r0 + r1], m2
+    palignr         m2, m0, m3, 5
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m3, m1, 6
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m0, m3, 6
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m3, m1, 7
+    movu            [r0 + r3], m2
+    palignr         m2, m0, m3, 7
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m3, m1, 8
+    movu            [r0], m2
+    palignr         m2, m0, m3, 8
+    movu            [r0 + 16], m2
+    palignr         m2, m3, m1, 9
+    movu            [r0 + r1], m2
+    palignr         m2, m0, m3, 9
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m3, m1, 10
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m0, m3, 10
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m3, m1, 11
+    movu            [r0 + r3], m2
+    palignr         m2, m0, m3, 11
+    movu            [r0 + r3 + 16], m2
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m2, m3, m1, 12
+    movu            [r0], m2
+    palignr         m2, m0, m3, 12
+    movu            [r0 + 16], m2
+    palignr         m2, m3, m1, 13
+    movu            [r0 + r1], m2
+    palignr         m2, m0, m3, 13
+    movu            [r0 + r1 + 16], m2
+    palignr         m2, m3, m1, 14
+    movu            [r0 + r1 * 2], m2
+    palignr         m2, m0, m3, 14
+    movu            [r0 + r1 * 2 + 16], m2
+    palignr         m2, m3, m1, 15
+    movu            [r0 + r3], m2
+    palignr         m2, m0, m3, 15
+    movu            [r0 + r3 + 16], m2
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_3, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_3_33 1
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_4, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]                    ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]               ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_4_32 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_5, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_5_31 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_6, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]                  ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]             ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_6_30 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_7, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]               ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]          ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_7_29 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_8, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]       ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_8_28 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_9, 3,7,8
+    add         r2,        64
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]         ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]    ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_9_27 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_10, 5,7,8,0-(2*mmsize)
+%define m8 [rsp + 0 * mmsize]
+%define m9 [rsp + 1 * mmsize]
+    pxor        m7, m7
+    mov         r6, 2
+    movu        m0, [r2]
+    movu        m1, [r2 + 1]
+    mova        m8, m0
+    mova        m9, m1
+    mov         r3d, r4d
+    lea         r4, [r1 * 3]
+
+.loop:
+    movu        m0, [r2 + 1 + 64]
+    palignr     m1, m0, 1
+    pshufb      m1, m7
+    palignr     m2, m0, 2
+    pshufb      m2, m7
+    palignr     m3, m0, 3
+    pshufb      m3, m7
+    palignr     m4, m0, 4
+    pshufb      m4, m7
+    palignr     m5, m0, 5
+    pshufb      m5, m7
+    palignr     m6, m0, 6
+    pshufb      m6, m7
+
+    movu        [r0 + r1], m1
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 * 2], m2
+    movu        [r0 + r1 * 2 + 16], m2
+    movu        [r0 + r4], m3
+    movu        [r0 + r4 + 16], m3
+    lea         r5, [r0 + r1 * 4]
+    movu        [r5], m4
+    movu        [r5 + 16], m4
+    movu        [r5 + r1], m5
+    movu        [r5 + r1 + 16], m5
+    movu        [r5 + r1 * 2], m6
+    movu        [r5 + r1 * 2 + 16], m6
+
+    palignr     m1, m0, 7
+    pshufb      m1, m7
+    movhlps     m2, m0
+    pshufb      m2, m7
+    palignr     m3, m0, 9
+    pshufb      m3, m7
+    palignr     m4, m0, 10
+    pshufb      m4, m7
+    palignr     m5, m0, 11
+    pshufb      m5, m7
+    palignr     m6, m0, 12
+    pshufb      m6, m7
+
+    movu        [r5 + r4], m1
+    movu        [r5 + r4 + 16], m1
+    lea         r5, [r5 + r1 * 4]
+    movu        [r5], m2
+    movu        [r5 + 16], m2
+    movu        [r5 + r1], m3
+    movu        [r5 + r1 + 16], m3
+    movu        [r5 + r1 * 2], m4
+    movu        [r5 + r1 * 2 + 16], m4
+    movu        [r5 + r4], m5
+    movu        [r5 + r4 + 16], m5
+    lea         r5, [r5 + r1 * 4]
+    movu        [r5], m6
+    movu        [r5 + 16], m6
+
+    palignr     m1, m0, 13
+    pshufb      m1, m7
+    palignr     m2, m0, 14
+    pshufb      m2, m7
+    palignr     m3, m0, 15
+    pshufb      m3, m7
+    pshufb      m0, m7
+
+    movu        [r5 + r1], m1
+    movu        [r5 + r1 + 16], m1
+    movu        [r5 + r1 * 2], m2
+    movu        [r5 + r1 * 2 + 16], m2
+    movu        [r5 + r4], m3
+    movu        [r5 + r4 + 16], m3
+
+; filter
+    cmp         r3d, byte 0
+    jz         .quit
+    movhlps     m1, m0
+    pmovzxbw    m0, m0
+    mova        m1, m0
+    movu        m2, m8
+    movu        m3, m9
+
+    pshufb      m2, m7
+    pmovzxbw    m2, m2
+    movhlps     m4, m3
+    pmovzxbw    m3, m3
+    pmovzxbw    m4, m4
+    psubw       m3, m2
+    psubw       m4, m2
+    psraw       m3, 1
+    psraw       m4, 1
+    paddw       m0, m3
+    paddw       m1, m4
+    packuswb    m0, m1
+
+.quit:
+    movu        [r0], m0
+    movu        [r0 + 16], m0
+    dec         r6
+    lea         r0, [r5 + r1 * 4]
+    lea         r2, [r2 + 16]
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_11, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 16]
+    pxor        m1, m1
+    pshufb      m0, m1                   ; [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+    mova        [rsp], m0
+    movu        m0, [r2 + 64]
+    pinsrb      m0, [r2], 0
+    movu        m1, [r2 + 16 + 64]
+    movu        m2, [r2 + 32 + 64]
+    movu        [rsp + 1], m0
+    movu        [rsp + 1 + 16], m1
+    movu        [rsp + 1 + 32], m2
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 1]            ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0 + r1 * 4]        ; r6 -> 4 * stride
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  0, 1, 30,28,26,24,22,20,18,16
+
+    ; Row[8 - 15]
+    movu        m7, [r2]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  1, 1, 14,12,10,8,6,4,2,0
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 1]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  2, 1, 30,28,26,24,22,20,18,16
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 1]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  3, 1, 14,12,10,8,6,4,2,0
+
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+%macro MODE_12_24_ROW0 1
+    movu        m0,        [r3 + 6]
+    pshufb      m0,        [c_mode32_12_0]
+    pinsrb      m0,        [r3 + 26], 12
+    mova        above,     m0
+    movu        m2,        [r2]
+  %if %1 == 1
+    pinsrb      m2,        [r3], 0
+  %endif
+    palignr     m1,        m2, 1
+    punpcklbw   m2,        m1
+    pmaddubsw   m4,        m2, [r4 + 11 * 16]         ; [27]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4 + 6 * 16]          ; [22]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 + 16]              ; [17]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 4 * 16]          ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 9 * 16]          ; [7]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m2, [r4 - 14 * 16]         ; [2]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    movu        m1,        [r2]                       ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+  %if %1 == 1
+    pinsrb      m1,        [r3], 0
+  %endif
+    palignr     m2,        m1, above, 15              ; [14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 a]
+    punpcklbw   m2,        m1                         ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0 a]
+    pmaddubsw   m1,        m2, [r4 + 13 * 16]             ; [29]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4 + 8 * 16]          ; [24]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 + 3 * 16]          ; [19]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r4 - 2 * 16]          ; [14]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 - 7 * 16]          ; [09]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 12 * 16]         ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    palignr     m2,        above, 14                  ;[6 5 5 4 4 3 3 2 2 1 1 0 0 a a b]
+    pmaddubsw   m6,        m2, [r4 + 15 * 16]         ; [31]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 + 10 * 16]         ; [26]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r4 + 5 * 16]          ; [21]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4]                   ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 5 * 16]          ; [11]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4 - 10 * 16]         ; [06]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 - 15 * 16]         ; [1]
+    pmulhrsw    m5,        m7
+    pslldq      m1,        above, 1
+    palignr     m2,        m1, 14
+    pmaddubsw   m6,        m2, [r4 + 12 * 16]         ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 + 7 * 16]          ; [23]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m2, [r4 + 2 * 16]          ; [18]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m2, [r4 - 3 * 16]          ; [13]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4 - 8 * 16]          ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 13 * 16]         ; [3]
+    pmulhrsw    m4,        m7
+    pslldq      m1,        above, 2
+    palignr     m2,        m1, 14
+    pmaddubsw   m5,        m2, [r4 + 14 * 16]         ; [30]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 + 9 * 16]          ; [25]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 + 4 * 16]          ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 16]              ; [15]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 - 6 * 16]          ; [10]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r4 - 11 * 16]         ; [05]
+    pmulhrsw    m1,        m7
+    movu        m0,        [pb_fact0]
+    pshufb      m2,        m0
+    pmovzxbw    m2,        m2
+    packuswb    m1,        m2
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_12, 3,7,8,0-(1*mmsize)
+  %define above    [rsp + 0 * mmsize]
+    mov         r3,        r2
+    add         r2,        64
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                   ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]              ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    MODE_12_24_ROW0 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        7
+    mov         r3,        3
+.loop:
+    MODE_12_24 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r3
+    jnz         .loop
+    RET
+
+%macro MODE_13_23_ROW0 1
+    movu        m0,        [r3 + 1]
+    movu        m1,        [r3 + 15]
+    pshufb      m0,        [c_mode32_13_0]
+    pshufb      m1,        [c_mode32_13_0]
+    punpckldq   m0,        m1
+    pshufb      m0,        [c_mode32_13_shuf]
+    mova        above,     m0
+    movu        m2,        [r2]
+  %if (%1 == 1)
+    pinsrb      m2,        [r3], 0
+  %endif
+    palignr     m1,        m2, 1
+    punpcklbw   m2,        m1
+    pmaddubsw   m4,        m2, [r4 + 7 * 16]         ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4 - 2 * 16]         ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 - 11 * 16]        ; [5]
+    pmulhrsw    m5,        m7
+    movu        m1,        [r2]                      ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+  %if (%1 == 1)
+    pinsrb      m1,        [r3], 0
+  %endif
+    palignr     m2,        m1, above, 15             ; [14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 a]
+    punpcklbw   m2,        m1                        ; [7 6 6 5 5 4 4 3 3 2 2 1 1 0 0]
+    pmaddubsw   m6,        m2, [r4 + 12 * 16]        ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 + 3 * 16]         ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m0,        m2, [r4 - 6 * 16]         ; [10]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r4 - 15 * 16]        ; [1]
+    pmulhrsw    m1,        m7
+    palignr     m2,        above, 14
+    pmaddubsw   m3,        m2, [r4 + 8 * 16]         ; [24]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 16]             ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r4 - 10 * 16]        ; [6]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pslldq      m0,        above, 1
+    palignr     m2,        m0, 14
+    pmaddubsw   m5,        m2, [r4 + 13 * 16]        ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 + 4 * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 5 * 16]         ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 - 14 * 16]        ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pslldq      m0,        1
+    palignr     m2,        m0, 14
+    pmaddubsw   m1,        m2, [r4 + 9 * 16]         ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r4]                  ; [16]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 - 9 * 16]         ; [7]
+    pmulhrsw    m4,        m7
+    pslldq      m0,        above, 3
+    palignr     m2,        m0, 14
+    pmaddubsw   m3,        m2, [r4 + 14 * 16]        ; [30]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 + 5 * 16]         ; [21]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 4 * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 13 * 16]        ; [3]
+    pmulhrsw    m6,        m7
+    pslldq      m0,        1
+    palignr     m2,        m0, 14
+    pmaddubsw   m0,        m2, [r4 + 10 * 16]        ; [26]
+    pmulhrsw    m0,        m7
+    packuswb    m6,        m0
+    pmaddubsw   m1,        m2, [r4 + 16]             ; [17]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m0,        m2, [r4 - 8 * 16]         ; [8]
+    pmulhrsw    m0,        m7
+    packuswb    m1,        m0
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+    pslldq      m0,        above, 5
+    palignr     m2,        m0, 14
+    pmaddubsw   m4,        m2, [r4 + 15 * 16]        ; [31]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r4 + 6 * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 - 3 * 16]         ; [13]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 12 * 16]        ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pslldq      m0,        1
+    palignr     m2,        m0, 14
+    pmaddubsw   m6,        m2, [r4 + 11 * 16]        ; [27]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 + 2 * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r4 - 7 * 16]         ; [09]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m2, [r4 - 16 * 16]        ; [00]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+%macro MODE_13_23 2
+    movu        m2,        [r2]                      ; [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+    palignr     m1,        m2, 1                     ; [x ,15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
+    punpckhbw   m0,        m2, m1                    ; [x, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 8]
+    punpcklbw   m2,        m1                        ; [8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0]
+    palignr     m0,        m2, 2                     ; [9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1]
+    pmaddubsw   m4,        m0, [r4 + 7 * 16]         ; [23]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m0, [r4 - 2 * 16]         ; [14]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m0, [r4 - 11 * 16]        ; [05]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 + 12 * 16]        ; [28]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 + 3 * 16]         ; [19]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m3,        m2, [r4 - 6 * 16]         ; [10]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m2, [r4 - 15 * 16]        ; [1]
+    pmulhrsw    m1,        m7
+    movu        m2,        [r2 - 2]                  ; [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1]
+    palignr     m3,        m2, 1                     ; [x, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+    punpckhbw   m0,        m2, m3
+    punpcklbw   m2,        m3
+    palignr     m0,        m2, 2
+    pmaddubsw   m3,        m0, [r4 + 8 * 16]         ; [24]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    mova        m3,        m0
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m3, [r4 - 16]             ; [15]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m3, [r4 - 10 * 16]        ; [6]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 + 13 * 16]        ; [29]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 + 4 * 16]         ; [20]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 5 * 16]         ; [11]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 - 14 * 16]        ; [2]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    movu        m2,        [r2 - 4]                  ; [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+    palignr     m1,        m2, 1                     ; [x ,15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
+    punpckhbw   m0,        m2, m1                    ; [x, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 8]
+    punpcklbw   m2,        m1                        ; [8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0]
+    palignr     m0,        m2, 2                     ; [9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1]
+    pmaddubsw   m1,        m0, [r4 + 9 * 16]         ; [25]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m0, [r4]                  ; [16]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    mova        m3,        m0
+    TRANSPOSE_STORE_8x8 1, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m3, [r4 - 9 * 16]         ; [7]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m3,        m2, [r4 + 14 * 16]        ; [30]
+    pmulhrsw    m3,        m7
+    packuswb    m4,        m3
+    pmaddubsw   m5,        m2, [r4 + 5 * 16]         ; [21]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 4 * 16]         ; [12]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    pmaddubsw   m6,        m2, [r4 - 13 * 16]        ; [3]
+    pmulhrsw    m6,        m7
+    movu        m2,        [r2 - 6]                  ; [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+    palignr     m1,        m2, 1                     ; [x ,15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
+    punpckhbw   m0,        m2, m1                    ; [x, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 8]
+    punpcklbw   m2,        m1                        ; [8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0]
+    palignr     m0,        m2, 2                     ; [9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1]
+    pmaddubsw   m3,        m0, [r4 + 10 * 16]        ; [26]
+    pmulhrsw    m3,        m7
+    packuswb    m6,        m3
+    pmaddubsw   m1,        m0, [r4 + 16]             ; [17]
+    pmulhrsw    m1,        m7
+    pmaddubsw   m3,        m0, [r4 - 8 * 16]         ; [8]
+    pmulhrsw    m3,        m7
+    packuswb    m1,        m3
+    TRANSPOSE_STORE_8x8 2, %1, m4, m5, m6, m1
+    pmaddubsw   m4,        m2, [r4 + 15 * 16]        ; [31]
+    pmulhrsw    m4,        m7
+    pmaddubsw   m5,        m2, [r4 + 6 * 16]         ; [22]
+    pmulhrsw    m5,        m7
+    packuswb    m4,        m5
+    pmaddubsw   m5,        m2, [r4 - 3 * 16]         ; [13]
+    pmulhrsw    m5,        m7
+    pmaddubsw   m6,        m2, [r4 - 12 * 16]        ; [04]
+    pmulhrsw    m6,        m7
+    packuswb    m5,        m6
+    movu        m2,        [r2 - 7]                  ; [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+  %if ((%1 & %2) == 1)
+    pinsrb      m2,        [r3], 0
+  %endif
+    palignr     m1,        m2, 1                     ; [x ,15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
+    punpcklbw   m2,        m1                        ; [8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0]
+    pmaddubsw   m6,        m2, [r4 + 11 * 16]        ; [27]
+    pmulhrsw    m6,        m7
+    pmaddubsw   m1,        m2, [r4 + 2 * 16]         ; [18]
+    pmulhrsw    m1,        m7
+    packuswb    m6,        m1
+    pmaddubsw   m1,        m2, [r4 - 7 * 16]         ; [09]
+    pmulhrsw    m1,        m7
+    movu        m0,        [pb_fact0]
+    pshufb      m2,        m0
+    pmovzxbw    m2,        m2
+    packuswb    m1,        m2
+    TRANSPOSE_STORE_8x8 3, %1, m4, m5, m6, m1
+%endmacro
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_13, 3,7,8,0-(1*mmsize)
+%define above [rsp + 0 * mmsize]
+    mov         r3,        r2
+    add         r2,        64
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]                  ; r5 -> 3 * stride
+    lea         r6,        [r0 + r1 * 4]             ; r6 -> 4 * stride
+    mova        m7,        [pw_1024]
+
+    MODE_13_23_ROW0 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        7
+
+    MODE_13_23 1, 1
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    mov         r3,        2
+.loop:
+    MODE_13_23 1, 0
+    lea         r0,        [r6 + r1 * 4]
+    lea         r6,        [r6 + r1 * 8]
+    add         r2,        8
+    dec         r3
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_14, 3,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2]
+    movu        m1, [r2 + 15]
+    pshufb      m0, [c_mode32_14_0]      ; [x x x x x x x x x 0 2 5 7 10 12 15]
+    pshufb      m1, [c_mode32_14_0]      ; [x x x x x x x x x 15 17 20 22 25 27 30]
+    pslldq      m1, 10                   ; [17 20 22 25 27 30 x x x x x x x x x x x]
+    palignr     m0, m1, 10               ; [x x x 0 2 5 7 10 12 15 17 20 22 25 27 30]
+    mova        [rsp], m0
+    movu        m0, [r2 + 1 + 64]
+    movu        m1, [r2 + 1 + 16 + 64]
+    movu        [rsp + 13], m0
+    movu        [rsp + 13 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 13]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0 + r1 * 4]        ; r6 -> 4 * stride
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 4]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m4
+    PROC32_8x8  0, 1, 19,6,25,12,31,18,5,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 7]
+    palignr     m0, m7, 3
+    palignr     m1, m7, 2
+    mova        m2, m1
+    mova        m3, m1
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    PROC32_8x8  1, 1, 11,30,17,4,23,10,29,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 10]
+    palignr     m0, m7, 3
+    palignr     m1, m7, 2
+    mova        m2, m1
+    palignr     m3, m7, 1
+    mova        m4, m3
+    mova        m5, m3
+    mova        m6, m7
+    PROC32_8x8  2, 1, 3,22,9,28,15,2,21,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 13]
+    palignr     m0, m7, 2
+    mova        m1, m0
+    mova        m2, m0
+    palignr     m3, m7, 1
+    mova        m4, m3
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  3, 1, 27,14,1,20,7,26,13,0
+
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_15, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2]
+    movu        m1, [r2 + 15]
+    pshufb      m0, [c_mode32_15_0]      ; [x x x x x x x 0 2 4 6 8 9 11 13 15]
+    pshufb      m1, [c_mode32_15_0]      ; [x x x x x x x 15 17 19 21 23 24 26 28 30]
+    mova        [rsp], m1
+    movu        [rsp + 8], m0
+    movu        m0, [r2 + 1 + 64]
+    movu        m1, [r2 + 1 + 16 + 64]
+    movu        [rsp + 17], m0
+    movu        [rsp + 17 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 17]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0 + r1 * 4]        ; r6 -> 4 * stride
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 5]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m5
+    PROC32_8x8  0, 1, 15,30,13,28,11,26,9,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 9]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m5
+    PROC32_8x8  1, 1, 7,22,5,20,3,18,1,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 13]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    PROC32_8x8  2, 1, 31,14,29,12,27,10,25,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 17]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    PROC32_8x8  3, 1, 23,6,21,4,19,2,17,0
+
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_16, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2]
+    movu        m1, [r2 + 15]
+    pshufb      m0, [c_mode32_16_0]      ; [x x x x x 0 2 3 5 6 8 9 11 12 14 15]
+    pshufb      m1, [c_mode32_16_0]      ; [x x x x x 15 17 18 20 21 23 24 26 27 29 30]
+    mova        [rsp], m1
+    movu        [rsp + 10], m0
+    movu        m0, [r2 + 1 + 64]
+    movu        m1, [r2 + 1 + 16 + 64]
+    movu        [rsp + 21], m0
+    movu        [rsp + 21 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 21]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0 + r1 * 4]        ; r6 -> 4 * stride
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 6]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    PROC32_8x8  0, 1, 11,22,1,12,23,2,13,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 11]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    palignr     m2, m7, 3
+    mova        m3, m2
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m5
+    PROC32_8x8  1, 1, 3,14,25,4,15,26,5,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 16]
+    palignr     m0, m7, 4
+    mova        m1, m0
+    palignr     m2, m7, 3
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m7
+    PROC32_8x8  2, 1, 27,6,17,28,7,18,29,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 21]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    PROC32_8x8  3, 1, 19,30,9,20,31,10,21,0
+
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_17, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2]
+    movu        m1, [r2 + 16]
+    pshufb      m0, [c_mode32_17_0]
+    pshufb      m1, [c_mode32_17_0]
+    mova        [rsp     ], m1
+    movu        [rsp + 13], m0
+    movu        m0, [r2 + 1 + 64]
+    movu        m1, [r2 + 1 + 16 + 64]
+    movu        [rsp + 26], m0
+    movu        [rsp + 26 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 25]          ; r2 -> [0]
+    lea         r3, [c_shuf8_0]         ; r3 -> shuffle8
+    lea         r4, [ang_table]         ; r4 -> ang_table
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r0 + r1 * 4]       ; r6 -> 4 * stride
+    mova        m5, [pw_1024]           ; m5 -> 1024
+    mova        m6, [c_deinterval8]     ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 6]
+    palignr     m0, m7, 6
+    palignr     m1, m7, 5
+    palignr     m2, m7, 4
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    PROC32_8x8  0, 1, 6,12,18,24,30,4,10,16
+
+    ; Row[7 - 15]
+    movu        m7, [r2 - 12]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m7
+    PROC32_8x8  1, 1, 22,28,2,8,14,20,26,0
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 19]
+    palignr     m0, m7, 6
+    palignr     m1, m7, 5
+    palignr     m2, m7, 4
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    PROC32_8x8  2, 1, 6,12,18,24,30,4,10,16
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 25]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m7
+    PROC32_8x8  3, 1, 22,28,2,8,14,20,26,0
+
+    lea         r0, [r6 + r1 * 4]
+    lea         r6, [r6 + r1 * 8]
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_18, 4, 4, 3
+    movu           m0, [r2]
+    movu           xm1, [r2 + 1 + 64]
+    pshufb         xm1, [intra_pred_shuff_15_0]
+    mova           xm2, xm0
+    vinserti128    m1, m1, xm2, 1
+
+    lea            r3, [r1 * 3]
+
+    movu           [r0], m0
+    palignr        m2, m0, m1, 15
+    movu           [r0 + r1], m2
+    palignr        m2, m0, m1, 14
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m0, m1, 13
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m0, m1, 12
+    movu           [r0], m2
+    palignr        m2, m0, m1, 11
+    movu           [r0 + r1], m2
+    palignr        m2, m0, m1, 10
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m0, m1, 9
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m0, m1, 8
+    movu           [r0], m2
+    palignr        m2, m0, m1, 7
+    movu           [r0 + r1], m2
+    palignr        m2, m0, m1, 6
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m0, m1, 5
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m0, m1, 4
+    movu           [r0], m2
+    palignr        m2, m0, m1, 3
+    movu           [r0 + r1], m2
+    palignr        m2, m0, m1, 2
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m0, m1, 1
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    movu           [r0], m1
+
+    movu           xm0, [r2 + 64 + 17]
+    pshufb         xm0, [intra_pred_shuff_15_0]
+    vinserti128    m0, m0, xm1, 1
+
+    palignr        m2, m1, m0, 15
+    movu           [r0 + r1], m2
+    palignr        m2, m1, m0, 14
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m1, m0, 13
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m1, m0, 12
+    movu           [r0], m2
+    palignr        m2, m1, m0, 11
+    movu           [r0 + r1], m2
+    palignr        m2, m1, m0, 10
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m1, m0, 9
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m1, m0, 8
+    movu           [r0], m2
+    palignr        m2, m1, m0, 7
+    movu           [r0 + r1], m2
+    palignr        m2, m1, m0,6
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m1, m0, 5
+    movu           [r0 + r3], m2
+
+    lea            r0, [r0 + r1 * 4]
+    palignr        m2, m1, m0, 4
+    movu           [r0], m2
+    palignr        m2, m1, m0, 3
+    movu           [r0 + r1], m2
+    palignr        m2, m1, m0,2
+    movu           [r0 + r1 * 2], m2
+    palignr        m2, m1, m0, 1
+    movu           [r0 + r3], m2
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_18, 4,5,5
+    movu        m0, [r2]               ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+    movu        m1, [r2 + 16]          ; [31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16]
+    movu        m2, [r2 + 1 + 64]      ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    movu        m3, [r2 + 17 + 64]     ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+
+    lea         r2, [r1 * 2]
+    lea         r3, [r1 * 3]
+    lea         r4, [r1 * 4]
+
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+
+    pshufb      m2, [c_mode32_18_0]    ; [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    pshufb      m3, [c_mode32_18_0]    ; [17 18 19 20 21 22 23 24 25 26 27 28 19 30 31 32]
+
+    palignr     m4, m0, m2, 15
+    movu        [r0 + r1], m4
+    palignr     m4, m1, m0, 15
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m0, m2, 14
+    movu        [r0 + r2], m4
+    palignr     m4, m1, m0, 14
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m0, m2, 13
+    movu        [r0 + r3], m4
+    palignr     m4, m1, m0, 13
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m0, m2, 12
+    movu        [r0], m4
+    palignr     m4, m1, m0, 12
+    movu        [r0 + 16], m4
+    palignr     m4, m0, m2, 11
+    movu        [r0 + r1], m4
+    palignr     m4, m1, m0, 11
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m0, m2, 10
+    movu        [r0 + r2], m4
+    palignr     m4, m1, m0, 10
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m0, m2, 9
+    movu        [r0 + r3], m4
+    palignr     m4, m1, m0, 9
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m0, m2, 8
+    movu        [r0], m4
+    palignr     m4, m1, m0, 8
+    movu        [r0 + 16], m4
+    palignr     m4, m0, m2, 7
+    movu        [r0 + r1], m4
+    palignr     m4, m1, m0, 7
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m0, m2, 6
+    movu        [r0 + r2], m4
+    palignr     m4, m1, m0, 6
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m0, m2, 5
+    movu        [r0 + r3], m4
+    palignr     m4, m1, m0, 5
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m0, m2, 4
+    movu        [r0], m4
+    palignr     m4, m1, m0, 4
+    movu        [r0 + 16], m4
+    palignr     m4, m0, m2, 3
+    movu        [r0 + r1], m4
+    palignr     m4, m1, m0, 3
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m0, m2, 2
+    movu        [r0 + r2], m4
+    palignr     m4, m1, m0, 2
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m0, m2, 1
+    movu        [r0 + r3], m4
+    palignr     m4, m1, m0, 1
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    movu        [r0], m2
+    movu        [r0 + 16], m0
+    palignr     m4, m2, m3, 15
+    movu        [r0 + r1], m4
+    palignr     m4, m0, m2, 15
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m2, m3, 14
+    movu        [r0 + r2], m4
+    palignr     m4, m0, m2, 14
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m2, m3, 13
+    movu        [r0 + r3], m4
+    palignr     m4, m0, m2, 13
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m2, m3, 12
+    movu        [r0], m4
+    palignr     m4, m0, m2, 12
+    movu        [r0 + 16], m4
+    palignr     m4, m2, m3, 11
+    movu        [r0 + r1], m4
+    palignr     m4, m0, m2, 11
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m2, m3, 10
+    movu        [r0 + r2], m4
+    palignr     m4, m0, m2, 10
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m2, m3, 9
+    movu        [r0 + r3], m4
+    palignr     m4, m0, m2, 9
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m2, m3, 8
+    movu        [r0], m4
+    palignr     m4, m0, m2, 8
+    movu        [r0 + 16], m4
+    palignr     m4, m2, m3, 7
+    movu        [r0 + r1], m4
+    palignr     m4, m0, m2, 7
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m2, m3, 6
+    movu        [r0 + r2], m4
+    palignr     m4, m0, m2, 6
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m2, m3, 5
+    movu        [r0 + r3], m4
+    palignr     m4, m0, m2, 5
+    movu        [r0 + r3 + 16], m4
+
+    lea         r0, [r0 + r4]
+
+    palignr     m4, m2, m3, 4
+    movu        [r0], m4
+    palignr     m4, m0, m2, 4
+    movu        [r0 + 16], m4
+    palignr     m4, m2, m3, 3
+    movu        [r0 + r1], m4
+    palignr     m4, m0, m2, 3
+    movu        [r0 + r1 + 16], m4
+    palignr     m4, m2, m3, 2
+    movu        [r0 + r2], m4
+    palignr     m4, m0, m2, 2
+    movu        [r0 + r2 + 16], m4
+    palignr     m4, m2, m3, 1
+    movu        [r0 + r3], m4
+    palignr     m4, m0, m2, 1
+    movu        [r0 + r3 + 16], m4
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_19, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 64]
+    pinsrb      m0, [r2], 0
+    movu        m1, [r2 + 16 + 64]
+    pshufb      m0, [c_mode32_17_0]
+    pshufb      m1, [c_mode32_17_0]
+    mova        [rsp     ], m1
+    movu        [rsp + 13], m0
+    movu        m0, [r2 + 1]
+    movu        m1, [r2 + 1 + 16]
+    movu        [rsp + 26], m0
+    movu        [rsp + 26 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 25]          ; r2 -> [0]
+    lea         r3, [c_shuf8_0]         ; r3 -> shuffle8
+    lea         r4, [ang_table]         ; r4 -> ang_table
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r0]                ; r6 -> r0
+    mova        m5, [pw_1024]           ; m5 -> 1024
+    mova        m6, [c_deinterval8]     ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 6]
+    palignr     m0, m7, 6
+    palignr     m1, m7, 5
+    palignr     m2, m7, 4
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    PROC32_8x8  0, 0, 6,12,18,24,30,4,10,16
+
+    ; Row[7 - 15]
+    movu        m7, [r2 - 12]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  1, 0, 22,28,2,8,14,20,26,0
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 19]
+    palignr     m0, m7, 6
+    palignr     m1, m7, 5
+    palignr     m2, m7, 4
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  2, 0, 6,12,18,24,30,4,10,16
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 25]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  3, 0, 22,28,2,8,14,20,26,0
+
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_20, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 64]
+    pinsrb      m0, [r2], 0
+    movu        m1, [r2 + 15 + 64]
+    pshufb      m0, [c_mode32_16_0]      ; [x x x x x 0 2 3 5 6 8 9 11 12 14 15]
+    pshufb      m1, [c_mode32_16_0]      ; [x x x x x 15 17 18 20 21 23 24 26 27 29 30]
+    mova        [rsp], m1
+    movu        [rsp + 10], m0
+    movu        m0, [r2 + 1]
+    movu        m1, [r2 + 1 + 16]
+    movu        [rsp + 21], m0
+    movu        [rsp + 21 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 21]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0]                 ; r6 -> r0
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 6]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    mova        m2, m1
+    palignr     m3, m7, 3
+    palignr     m4, m7, 2
+    mova        m5, m4
+    palignr     m6, m7, 1
+    PROC32_8x8  0, 0, 11,22,1,12,23,2,13,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 11]
+    palignr     m0, m7, 5
+    palignr     m1, m7, 4
+    palignr     m2, m7, 3
+    mova        m3, m2
+    palignr     m4, m7, 2
+    palignr     m5, m7, 1
+    mova        m6, m5
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  1, 0, 3,14,25,4,15,26,5,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 16]
+    palignr     m0, m7, 4
+    mova        m1, m0
+    palignr     m2, m7, 3
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  2, 0, 27,6,17,28,7,18,29,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 21]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  3, 0, 19,30,9,20,31,10,21,0
+
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_21, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 64]
+    pinsrb      m0, [r2], 0
+    movu        m1, [r2 + 15 + 64]
+    pshufb      m0, [c_mode32_15_0]      ; [x x x x x x x 0 2 4 6 8 9 11 13 15]
+    pshufb      m1, [c_mode32_15_0]      ; [x x x x x x x 15 17 19 21 23 24 26 28 30]
+    mova        [rsp], m1
+    movu        [rsp + 8], m0
+    movu        m0, [r2 + 1]
+    movu        m1, [r2 + 1 + 16]
+    movu        [rsp + 17], m0
+    movu        [rsp + 17 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 17]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0]                 ; r6 -> r0
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 5]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m5
+    PROC32_8x8  0, 0, 15,30,13,28,11,26,9,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 9]
+    palignr     m0, m7, 4
+    palignr     m1, m7, 3
+    mova        m2, m1
+    palignr     m3, m7, 2
+    mova        m4, m3
+    palignr     m5, m7, 1
+    mova        m6, m5
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  1, 0, 7,22,5,20,3,18,1,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 13]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  2, 0, 31,14,29,12,27,10,25,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 17]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  3, 0, 23,6,21,4,19,2,17,0
+
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_22, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 64]
+    pinsrb      m0, [r2], 0
+    movu        m1, [r2 + 15 + 64]
+    pshufb      m0, [c_mode32_14_0]      ; [x x x x x x x x x 0 2 5 7 10 12 15]
+    pshufb      m1, [c_mode32_14_0]      ; [x x x x x x x x x 15 17 20 22 25 27 30]
+    pslldq      m1, 10                   ; [17 20 22 25 27 30 x x x x x x x x x x x]
+    palignr     m0, m1, 10               ; [x x x 0 2 5 7 10 12 15 17 20 22 25 27 30]
+    mova        [rsp], m0
+    movu        m0, [r2 + 1]
+    movu        m1, [r2 + 1 + 16]
+    movu        [rsp + 13], m0
+    movu        [rsp + 13 + 16], m1
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 13]           ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0]                 ; r6 -> r0
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2 - 4]
+    palignr     m0, m7, 3
+    mova        m1, m0
+    palignr     m2, m7, 2
+    mova        m3, m2
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m4
+    PROC32_8x8  0, 0, 19,6,25,12,31,18,5,24
+
+    ; Row[8 - 15]
+    movu        m7, [r2 - 7]
+    palignr     m0, m7, 3
+    palignr     m1, m7, 2
+    mova        m2, m1
+    mova        m3, m1
+    palignr     m4, m7, 1
+    mova        m5, m4
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  1, 0, 11,30,17,4,23,10,29,16
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 10]
+    palignr     m0, m7, 3
+    palignr     m1, m7, 2
+    mova        m2, m1
+    palignr     m3, m7, 1
+    mova        m4, m3
+    mova        m5, m3
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  2, 0, 3,22,9,28,15,2,21,8
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 13]
+    palignr     m0, m7, 2
+    mova        m1, m0
+    mova        m2, m0
+    palignr     m3, m7, 1
+    mova        m4, m3
+    mova        m5, m7
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  3, 0, 27,14,1,20,7,26,13,0
+
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_23, 4,7,8,0-(1*mmsize)
+%define above [rsp + 0 * mmsize]
+    lea         r3,        [r2 + 64]
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]            ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    MODE_13_23_ROW0 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        7
+    mov         r3,        3
+.loop:
+    MODE_13_23 0, 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r3
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_24, 4,7,8,0-(1*mmsize)
+  %define above    [rsp + 0 * mmsize]
+    lea         r3,        [r2 + 64]
+    lea         r4,        [ang_table + 16 * 16]
+    lea         r5,        [r1 * 3]            ; r5 -> 3 * stride
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+
+    MODE_12_24_ROW0 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        7
+    mov         r3,        3
+.loop:
+    MODE_12_24 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r3
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_25, 4,7,8
+    ; NOTE: alignment stack to 64 bytes, so all of local data in same cache line
+    mov         r6, rsp
+    sub         rsp, 64+gprsize
+    and         rsp, ~63
+    mov         [rsp+64], r6
+
+    ; collect reference pixel
+    movu        m0, [r2 + 16 + 64]
+    pxor        m1, m1
+    pshufb      m0, m1                   ; [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+    mova        [rsp], m0
+    movu        m0, [r2]
+    movu        m1, [r2 + 16]
+    movu        m2, [r2 + 32]
+    movu        [rsp + 1], m0
+    movu        [rsp + 1 + 16], m1
+    movu        [rsp + 1 + 32], m2
+    mov         [rsp + 63], byte 4
+
+    ; filter
+    lea         r2, [rsp + 1]            ; r2 -> [0]
+    lea         r3, [c_shuf8_0]          ; r3 -> shuffle8
+    lea         r4, [ang_table]          ; r4 -> ang_table
+    lea         r5, [r1 * 3]             ; r5 -> 3 * stride
+    lea         r6, [r0]                 ; r6 -> r0
+    mova        m5, [pw_1024]            ; m5 -> 1024
+    mova        m6, [c_deinterval8]      ; m6 -> c_deinterval8
+
+.loop:
+    ; Row[0 - 7]
+    movu        m7, [r2]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    PROC32_8x8  0, 0, 30,28,26,24,22,20,18,16
+
+    ; Row[8 - 15]
+    movu        m7, [r2]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  1, 0, 14,12,10,8,6,4,2,0
+
+    ; Row[16 - 23]
+    movu        m7, [r2 - 1]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  2, 0, 30,28,26,24,22,20,18,16
+
+    ; Row[24 - 31]
+    movu        m7, [r2 - 1]
+    mova        m0, m7
+    mova        m1, m7
+    mova        m2, m7
+    mova        m3, m7
+    mova        m4, m7
+    mova        m5, m7
+    mova        m6, m7
+    lea         r0, [r0 + r1 * 4]
+    PROC32_8x8  3, 0, 14,12,10,8,6,4,2,0
+
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         byte [rsp + 63]
+    jnz        .loop
+    mov         rsp, [rsp+64]
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_26, 5,7,7,0-(2*mmsize)
+%define m8 [rsp + 0 * mmsize]
+%define m9 [rsp + 1 * mmsize]
+    mov         r6,             2
+    movu        m0,             [r2 + 64]
+    pinsrb      m0,             [r2], 0
+    movu        m1,             [r2 + 1 + 64]
+    mova        m8,             m0
+    mova        m9,             m1
+    mov         r3d,            r4d
+    lea         r4,             [r1 * 3]
+
+.loop:
+    movu        m0,             [r2 + 1]
+
+    movu        [r0],           m0
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 * 2],  m0
+    movu        [r0 + r4],      m0
+    lea         r5,             [r0 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r0 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+    lea         r5,             [r5 + r1 * 4]
+    movu        [r5],           m0
+    movu        [r5 + r1],      m0
+    movu        [r5 + r1 * 2],  m0
+    movu        [r5 + r4],      m0
+
+; filter
+    cmp         r3d, byte 0
+    jz         .quit
+
+    pxor        m4,        m4
+    pshufb      m0,        m4
+    pmovzxbw    m0,        m0
+    mova        m1,        m0
+    movu        m2,        m8
+    movu        m3,        m9
+
+    pshufb      m2,        m4
+    pmovzxbw    m2,        m2
+    movhlps     m4,        m3
+    pmovzxbw    m3,        m3
+    pmovzxbw    m4,        m4
+    psubw       m3,        m2
+    psubw       m4,        m2
+    psraw       m3,        1
+    psraw       m4,        1
+    paddw       m0,        m3
+    paddw       m1,        m4
+    packuswb    m0,        m1
+
+    pextrb      [r0],           m0, 0
+    pextrb      [r0 + r1],      m0, 1
+    pextrb      [r0 + r1 * 2],  m0, 2
+    pextrb      [r0 + r4],      m0, 3
+    lea         r5,             [r0 + r1 * 4]
+    pextrb      [r5],           m0, 4
+    pextrb      [r5 + r1],      m0, 5
+    pextrb      [r5 + r1 * 2],  m0, 6
+    pextrb      [r5 + r4],      m0, 7
+    lea         r5,             [r5 + r1 * 4]
+    pextrb      [r5],           m0, 8
+    pextrb      [r5 + r1],      m0, 9
+    pextrb      [r5 + r1 * 2],  m0, 10
+    pextrb      [r5 + r4],      m0, 11
+    lea         r5,             [r5 + r1 * 4]
+    pextrb      [r5],           m0, 12
+    pextrb      [r5 + r1],      m0, 13
+    pextrb      [r5 + r1 * 2],  m0, 14
+    pextrb      [r5 + r4],      m0, 15
+
+.quit:
+    lea         r2, [r2 + 16]
+    add         r0, 16
+    dec         r6d
+    jnz         .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_27, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_9_27 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_28, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_8_28 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_29, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_7_29 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_30, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_6_30 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_31, 3,7,8
+    lea         r3,        [ang_table + 16 * 16]
+    mov         r4d,       4
+    lea         r5,        [r1 * 3]
+    mov         r6,        r0
+    mova        m7,        [pw_1024]
+.loop:
+    MODE_5_31 0
+    add         r6,        8
+    mov         r0,        r6
+    add         r2,        8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_32, 3,7,8
+    lea         r3,     [ang_table + 16 * 16]
+    mov         r4d,    4
+    lea         r5,     [r1 * 3]
+    mov         r6,     r0
+    mova        m7,     [pw_1024]
+.loop:
+    MODE_4_32 0
+    add         r6,      8
+    mov         r0,     r6
+    add         r2,     8
+    dec         r4
+    jnz        .loop
+    RET
+
+INIT_XMM sse4
+cglobal intra_pred_ang32_33, 3,7,8
+    lea         r3,    [ang_table + 16 * 16]
+    mov         r4d,   4
+    lea         r5,    [r1 * 3]
+    mov         r6,    r0
+    mova        m7,    [pw_1024]
+.loop:
+    MODE_3_33 0
+    add         r6,    8
+    mov         r0,    r6
+    add         r2,    8
+    dec         r4
+    jnz        .loop
+    RET
+
+;-----------------------------------------------------------------------------------------
+; start of intra_pred_ang32 angular modes avx2 asm
+;-----------------------------------------------------------------------------------------
+
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+
+; register mapping :
+; %1-%8 - output registers
+; %9    - temp register
+; %10   - for label naming
+%macro TRANSPOSE_32x8_AVX2 10
+    jnz         .skip%10
+
+    ; transpose 8x32 to 32x8 and then store
+    punpcklbw   m%9, m%1, m%2
+    punpckhbw   m%1, m%2
+    punpcklbw   m%2, m%3, m%4
+    punpckhbw   m%3, m%4
+    punpcklbw   m%4, m%5, m%6
+    punpckhbw   m%5, m%6
+    punpcklbw   m%6, m%7, m%8
+    punpckhbw   m%7, m%8
+
+    punpcklwd   m%8, m%9, m%2
+    punpckhwd   m%9, m%2
+    punpcklwd   m%2, m%4, m%6
+    punpckhwd   m%4, m%6
+    punpcklwd   m%6, m%1, m%3
+    punpckhwd   m%1, m%3
+    punpcklwd   m%3, m%5, m%7
+    punpckhwd   m%5, m%7
+
+    punpckldq   m%7, m%8, m%2
+    punpckhdq   m%8, m%2
+    punpckldq   m%2, m%6, m%3
+    punpckhdq   m%6, m%3
+    punpckldq   m%3, m%9, m%4
+    punpckhdq   m%9, m%4
+    punpckldq   m%4, m%1, m%5
+    punpckhdq   m%1, m%5
+
+    movq        [r0 + r1 * 0], xm%7
+    movhps      [r0 + r1 * 1], xm%7
+    movq        [r0 + r1 * 2], xm%8
+    movhps      [r0 + r5 * 1], xm%8
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%3
+    movhps      [r0 + r1 * 1], xm%3
+    movq        [r0 + r1 * 2], xm%9
+    movhps      [r0 + r5 * 1], xm%9
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%2
+    movhps      [r0 + r1 * 1], xm%2
+    movq        [r0 + r1 * 2], xm%6
+    movhps      [r0 + r5 * 1], xm%6
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%4
+    movhps      [r0 + r1 * 1], xm%4
+    movq        [r0 + r1 * 2], xm%1
+    movhps      [r0 + r5 * 1], xm%1
+
+    lea         r0, [r0 + r6]
+
+    vpermq      m%8, m%8, 00001110b
+    vpermq      m%7, m%7, 00001110b
+    vpermq      m%6, m%6, 00001110b
+    vpermq      m%3, m%3, 00001110b
+    vpermq      m%9, m%9, 00001110b
+    vpermq      m%2, m%2, 00001110b
+    vpermq      m%4, m%4, 00001110b
+    vpermq      m%1, m%1, 00001110b
+
+    movq        [r0 + r1 * 0], xm%7
+    movhps      [r0 + r1 * 1], xm%7
+    movq        [r0 + r1 * 2], xm%8
+    movhps      [r0 + r5 * 1], xm%8
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%3
+    movhps      [r0 + r1 * 1], xm%3
+    movq        [r0 + r1 * 2], xm%9
+    movhps      [r0 + r5 * 1], xm%9
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%2
+    movhps      [r0 + r1 * 1], xm%2
+    movq        [r0 + r1 * 2], xm%6
+    movhps      [r0 + r5 * 1], xm%6
+
+    lea         r0, [r0 + r6]
+
+    movq        [r0 + r1 * 0], xm%4
+    movhps      [r0 + r1 * 1], xm%4
+    movq        [r0 + r1 * 2], xm%1
+    movhps      [r0 + r5 * 1], xm%1
+
+    lea         r0, [r4 + 8]
+    jmp         .end%10
+.skip%10:
+    movu        [r0 + r1 * 0], m%1
+    movu        [r0 + r1 * 1], m%2
+    movu        [r0 + r1 * 2], m%3
+    movu        [r0 + r5 * 1], m%4
+
+    lea         r0, [r0 + r6]
+
+    movu        [r0 + r1 * 0], m%5
+    movu        [r0 + r1 * 1], m%6
+    movu        [r0 + r1 * 2], m%7
+    movu        [r0 + r5 * 1], m%8
+
+    lea         r0, [r0 + r6]
+.end%10:
+%endmacro
+
+cglobal ang32_mode_3_33_row_0_15
+    test        r7d,      r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 + 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m5,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m5,         [r3 + 4 * 32]       ; [20]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         [r3 + 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    palignr     m6,         m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m6,         [r3 - 2 * 32]       ; [14]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 - 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m8,         m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m8,         [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m10,        m2, m0, 8
+    palignr     m11,        m3, m2, 8
+    pmaddubsw   m9,         m10, [r3 - 14 * 32] ; [2]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m11, [r3 - 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        [r3 + 12 * 32]      ; [28]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        [r3 + 12 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    palignr     m11,        m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m11,        [r3 + 6 * 32]       ; [22]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m12,        m2, m0, 12
+    palignr     m1,         m3, m2, 12
+    pmaddubsw   m12,        [r3]                ; [16]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m4,         m2, m0, 14
+    palignr     m1,         m3, m2, 14
+    pmaddubsw   m4,         [r3 - 6 * 32]       ; [10]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 - 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m2, [r3 - 12 * 32]  ; [4]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         m3, [r3 - 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    pmaddubsw   m6,         m2, [r3 + 14 * 32]  ; [30]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         m3, [r3 + 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    movu        m0,         [r2 + 25]
+    movu        m1,         [r2 + 26]
+    punpcklbw   m0,         m1
+
+    palignr     m8,         m3, m2, 2
+    palignr     m1,         m0, m3, 2
+    pmaddubsw   m8,         [r3 + 8 * 32]       ; [24]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m9,         m3, m2, 4
+    palignr     m1,         m0, m3, 4
+    pmaddubsw   m9,         [r3 + 2 * 32]       ; [18]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         [r3 + 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    palignr     m10,        m3, m2, 6
+    palignr     m1,         m0, m3, 6
+    pmaddubsw   m10,        [r3 - 4 * 32]       ; [12]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         [r3 - 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    palignr     m11,        m3, m2, 8
+    palignr     m1,         m0, m3, 8
+    pmaddubsw   m11,        [r3 - 10 * 32]      ; [6]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    movu        m12,        [r2 + 14]
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 8
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_3, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_3_33_row_0_15
+
+    add         r4, 16
+    mov         r0, r4
+    add         r2, 13
+
+    call ang32_mode_3_33_row_0_15
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_33, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_3_33_row_0_15
+
+    add         r2, 13
+
+    call ang32_mode_3_33_row_0_15
+    RET
+
+cglobal ang32_mode_4_32_row_0_15
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 + 5 * 32]   ; [21]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 + 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m6,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m5,         m6, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 - 6 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         [r3 + 15 * 32]      ; [31]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m8,         m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m8,         [r3 + 4 * 32]       ; [20]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m10,        m2, m0, 6
+    palignr     m11,        m3, m2, 6
+    pmaddubsw   m9,         m10, [r3 - 7 * 32]  ; [9]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m11, [r3 - 7 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        [r3 + 14 * 32]      ; [30]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        [r3 + 14 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    palignr     m11,        m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m11,        [r3 + 3 * 32]       ; [19]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m12,        m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m12,        [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m4,         m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m4,         [r3 + 13 * 32]      ; [29]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 + 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m5,         m2, m0, 12
+    palignr     m1,         m3, m2, 12
+    pmaddubsw   m5,         [r3 + 2 * 32]       ; [18]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         [r3 + 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    palignr     m8,         m2, m0, 14
+    palignr     m1,         m3, m2, 14
+    pmaddubsw   m6,         m8, [r3 - 9 * 32]   ; [7]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 - 9 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         [r3 + 12 * 32]      ; [28]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m2, [r3 + 1 * 32]   ; [17]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m3, [r3 + 1 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    movu        m0,         [r2 + 25]
+    movu        m1,         [r2 + 26]
+    punpcklbw   m0,         m1
+
+    palignr     m11,        m3, m2, 2
+    palignr     m1,         m0, m3, 2
+    pmaddubsw   m10,        m11, [r3 - 10 * 32] ; [6]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m12,        m1, [r3 - 10 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m10,        m12
+
+    pmaddubsw   m11,        [r3 + 11 * 32]      ; [27]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m0,         m3, 4
+    palignr     m3,         m2, 4
+    pmaddubsw   m3,         [r3]                ; [16]
+    pmulhrsw    m3,         m7
+    pmaddubsw   m0,         [r3]
+    pmulhrsw    m0,         m7
+    packuswb    m3,         m0
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 3, 0, 8
+    ret
+
+cglobal ang32_mode_4_32_row_16_31
+    test        r7d,      r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 11 * 32]  ; [5]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         m2, [r3 + 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    palignr     m6,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m6,         [r3 - 1 * 32]       ; [15]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 - 1 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m9,         m2, m0, 4
+    palignr     m10,        m3, m2, 4
+    pmaddubsw   m8,         m9, [r3 - 12 * 32]  ; [4]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m10, [r3 - 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         [r3 + 9 * 32]       ; [25]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m10,        [r3 + 9 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m9,         m10
+
+    palignr     m10,         m2, m0, 6
+    palignr     m11,        m3, m2, 6
+    pmaddubsw   m10,        [r3 - 2 * 32]       ; [14]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        [r3 - 2 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    palignr     m12,        m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m11,        m12, [r3 - 13 * 32] ; [3]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m12,        [r3 + 8 * 32]       ; [24]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m4,         m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m4,         [r3 - 3 * 32]       ; [13]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 - 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m6,         m2, m0, 12
+    palignr     m8,         m3, m2, 12
+    pmaddubsw   m5,         m6, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         m8, [r3 - 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    pmaddubsw   m6,         [r3 + 7 * 32]       ; [23]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m8,         [r3 + 7 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m6,         m8
+
+    palignr     m8,         m2, m0, 14
+    palignr     m1,         m3, m2, 14
+    pmaddubsw   m8,         [r3 - 4 * 32]       ; [12]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 - 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m2, [r3 - 15 * 32]  ; [1]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m3, [r3 - 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        m2, [r3 + 6 * 32]   ; [22]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m3, [r3 + 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    movu        m0,         [r2 + 25]
+    movu        m1,         [r2 + 26]
+    punpcklbw   m0,         m1
+
+    palignr     m11,        m3, m2, 2
+    palignr     m1,         m0, m3, 2
+    pmaddubsw   m11,        [r3 - 5 * 32]       ; [11]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    movu        m12,        [r2 + 11]
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 8
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_4, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_4_32_row_0_15
+
+    add         r4, 16
+    mov         r0, r4
+    add         r2, 11
+
+    call ang32_mode_4_32_row_16_31
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_32, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_4_32_row_0_15
+
+    add         r2, 11
+
+    call ang32_mode_4_32_row_16_31
+    RET
+
+cglobal ang32_mode_5_31_row_0_15
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 + 1 * 32]   ; [17]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 + 1 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m6,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m5,         m6, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 - 14 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         [r3 + 3 * 32]       ; [19]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m9,         m2, m0, 4
+    palignr     m10,        m3, m2, 4
+    pmaddubsw   m8,         m9, [r3 - 12 * 32]  ; [4]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m10, [r3 - 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         [r3 + 5 * 32]       ; [21]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m10,        [r3 + 5 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m9,         m10
+
+    palignr     m11,        m2, m0, 6
+    palignr     m12,        m3, m2, 6
+    pmaddubsw   m10,        m11, [r3 - 10 * 32] ; [6]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m12, [r3 - 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m11,        [r3 + 7 * 32]       ; [23]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m12,        [r3 + 7 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m11,        m12
+
+    palignr     m12,        m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m12,        [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m4,         m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m4,         [r3 + 9 * 32]       ; [25]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 + 9 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m6,         m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m5,         m6, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 - 6 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         [r3 + 11 * 32]      ; [27]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m9,         m2, m0, 12
+    palignr     m1,         m3, m2, 12
+    pmaddubsw   m8,         m9, [r3 - 4 * 32]   ; [12]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m10,        m1, [r3 - 4 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m8,         m10
+
+    pmaddubsw   m9,         [r3 + 13 * 32]      ; [29]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         [r3 + 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    palignr     m11,        m2, m0, 14
+    palignr     m1,         m3, m2, 14
+    pmaddubsw   m10,        m11, [r3 - 2 * 32]  ; [14]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m12,        m1, [r3 - 2 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m10,        m12
+
+    pmaddubsw   m11,        [r3 + 15 * 32]      ; [31]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    pmaddubsw   m2,         [r3]                ; [16]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 2, 0, 8
+    ret
+
+cglobal ang32_mode_5_31_row_16_31
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]               ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]               ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]               ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]               ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1                  ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                      ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                      ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 15 * 32]      ; [1]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 + 2 * 32]       ; [18]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m2, [r3 + 2 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    palignr     m8,         m2, m0, 2
+    palignr     m9,         m3, m2, 2
+    pmaddubsw   m6,         m8, [r3 - 13 * 32]      ; [3]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         m9, [r3 - 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    pmaddubsw   m8,         [r3 + 4 * 32]           ; [20]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m9,         [r3 + 4 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m8,         m9
+
+    palignr     m10,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m9,         m10, [r3 - 11 * 32]     ; [5]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 - 11 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        [r3 + 6 * 32] ; [22]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         [r3 + 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    palignr     m12,        m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m11,        m12, [r3 - 9 * 32]      ; [7]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 9 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m12,        [r3 + 8 * 32]           ; [24]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m5,         m2, m0, 8
+    palignr     m8,         m3, m2, 8
+    pmaddubsw   m4,         m5, [r3 - 7 * 32]       ; [9]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m8, [r3 - 7 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         [r3 + 10 * 32]          ; [26]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         [r3 + 10 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    palignr     m8,         m2, m0, 10
+    palignr     m9,         m3, m2, 10
+    pmaddubsw   m6,         m8, [r3 - 5 * 32]       ; [11]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         m9, [r3 - 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    pmaddubsw   m8,         [r3 + 12 * 32]          ; [28]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m9,         [r3 + 12 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m8,         m9
+
+    palignr     m10,        m2, m0, 12
+    palignr     m11,        m3, m2, 12
+    pmaddubsw   m9,         m10, [r3 - 3 * 32]      ; [13]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m11, [r3 - 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        [r3 + 14 * 32]          ; [30]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        [r3 + 14 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    palignr     m11,        m2, m0, 14
+    palignr     m1,         m3, m2, 14
+    pmaddubsw   m11,        [r3 - 1 * 32]           ; [15]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 1 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    movu        m2,         [r2 + 9]
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 2, 0, 8
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_5, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_5_31_row_0_15
+
+    add         r4, 16
+    mov         r0, r4
+    add         r2, 9
+
+    call ang32_mode_5_31_row_16_31
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_31, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_5_31_row_0_15
+
+    add         r2, 9
+
+    call ang32_mode_5_31_row_16_31
+    RET
+
+cglobal ang32_mode_6_30_row_0_15
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 3 * 32]   ; [13]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m2, [r3 + 10 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    palignr     m8,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m6,         m8, [r3 - 9 * 32]   ; [7]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 - 9 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         [r3 + 4 * 32]       ; [20]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m11,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m9,         m11, [r3 - 15 * 32] ; [1]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m12,        m1, [r3 - 15 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m9,         m12
+
+    pmaddubsw   m10,        m11, [r3 - 2 * 32]  ; [14]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m12,        m1, [r3 - 2 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m10,        m12
+
+    pmaddubsw   m11,        [r3 + 11 * 32]      ; [27]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m12,        m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m12,        [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m4,         m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m4,         [r3 + 5 * 32]       ; [21]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 + 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m8,         m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m5,         m8, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m1, [r3 - 14 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m8, [r3 - 1 * 32]   ; [15]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 - 1 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         [r3 + 12 * 32]      ; [28]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m10,        m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m9,         m10, [r3 - 7 * 32]  ; [9]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 - 7 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        [r3 + 6 * 32]       ; [22]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m1, [r3 + 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    palignr     m3,         m2, 12
+    palignr     m2,         m0, 12
+    pmaddubsw   m11,        m2, [r3 - 13 * 32]  ; [3]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         m3, [r3 - 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    pmaddubsw   m2,         [r3]                ; [16]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 2, 0, 8
+    ret
+
+cglobal ang32_mode_6_30_row_16_31
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 + 13 * 32]  ; [29]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 + 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m6,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m5,         m6, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 - 6 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         [r3 + 7 * 32]       ; [23]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 7 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m10,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m8,         m10, [r3 - 12 * 32] ; [4]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m11,        m1, [r3 - 12 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m8,         m11
+
+    pmaddubsw   m9,         m10, [r3 + 1 * 32]  ; [17]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 + 1 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        [r3 + 14 * 32]      ; [30]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         [r3 + 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    palignr     m12,        m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m11,        m12, [r3 - 5 * 32]  ; [11]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m12,        [r3 + 8 * 32]       ; [24]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m6,         m2, m0, 8
+    palignr     m1,         m3, m2, 8
+    pmaddubsw   m4,         m6, [r3 - 11 * 32]  ; [5]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m8,         m1, [r3 - 11 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m4,         m8
+
+    pmaddubsw   m5,         m6, [r3 + 2 * 32]   ; [18]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m1, [r3 + 2 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         [r3 + 15 * 32]      ; [31]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m9,         m2, m0, 10
+    palignr     m1,         m3, m2, 10
+    pmaddubsw   m8,         m9, [r3 - 4 * 32]   ; [12]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m10,        m1, [r3 - 4 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m8,         m10
+
+    pmaddubsw   m9,         [r3 + 9 * 32]       ; [25]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         [r3 + 9 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    palignr     m3,         m2, 12
+    palignr     m2,         m0, 12
+    pmaddubsw   m10,        m2, [r3 - 10 * 32]  ; [6]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m3, [r3 - 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m2,         [r3 + 3 * 32]        ; [19]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3 + 3 * 32]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    movu        m3,         [r2 + 8]             ; [0]
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 2, 3, 0, 8
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_6, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_6_30_row_0_15
+
+    add         r4, 16
+    mov         r0, r4
+    add         r2, 6
+
+    call ang32_mode_6_30_row_16_31
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_30, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_6_30_row_0_15
+
+    add         r2, 6
+
+    call ang32_mode_6_30_row_16_31
+    RET
+
+cglobal ang32_mode_7_29_row_0_15
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 7 * 32]   ; [9]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 7 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 + 2 * 32]   ; [18]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m2, [r3 + 2 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         m0, [r3 + 11 * 32]  ; [27]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m2, [r3 + 11 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    palignr     m11,        m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m8,         m11, [r3 - 12 * 32] ; [4]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m12,        m1, [r3 - 12 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m8,         m12
+
+    pmaddubsw   m9,         m11, [r3 - 3 * 32]  ; [13]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m12,        m1, [r3 - 3 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m9,         m12
+
+    pmaddubsw   m10,        m11, [r3 + 6 * 32]  ; [22]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m12,        m1, [r3 + 6 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m10,        m12
+
+    pmaddubsw   m11,        [r3 + 15 * 32]      ; [31]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m12,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m12,        [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m5,         m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m4,         m5, [r3 + 1 * 32]   ; [17]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m8,         m1, [r3 + 1 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m4,         m8
+
+    pmaddubsw   m5,         [r3 + 10 * 32]      ; [26]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m1,         [r3 + 10 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m5,         m1
+
+    palignr     m10,        m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m6,         m10, [r3 - 13 * 32] ; [3]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 - 13 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m10, [r3 - 4 * 32]  ; [12]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m11,        m1, [r3 - 4 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m8,         m11
+
+    pmaddubsw   m9,         m10, [r3 + 5 * 32]  ; [21]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 + 5 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        [r3 + 14 * 32]      ; [30]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         [r3 + 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    palignr     m3,         m2, 8
+    palignr     m2,         m0, 8
+    pmaddubsw   m11,        m2, [r3 - 9 * 32]   ; [7]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         m3, [r3 - 9 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    pmaddubsw   m2,         [r3]                ; [16]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 2, 0, 8
+    ret
+
+cglobal ang32_mode_7_29_row_16_31
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 + 9 * 32]   ; [25]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 + 9 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m9,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m5,         m9, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 - 14 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         m9, [r3 - 5 * 32]   ; [11]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m10,        m1, [r3 - 5 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m6,         m10
+
+    pmaddubsw   m8,         m9, [r3 + 4 * 32]   ; [20]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m10,        m1, [r3 + 4 * 32]
+    pmulhrsw    m10,        m7
+    packuswb    m8,         m10
+
+    pmaddubsw   m9,         [r3 + 13 * 32]      ; [29]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         [r3 + 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    palignr     m12,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m10,        m12, [r3 - 10 * 32] ; [6]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        m1, [r3 - 10 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    pmaddubsw   m11,        m12, [r3 - 1 * 32]  ; [15]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 1 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m12,        [r3 + 8 * 32]       ; [24]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+    palignr     m8,         m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m4,         m8, [r3 - 15 * 32]  ; [1]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m9,         m1, [r3 - 15 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m4,         m9
+
+    pmaddubsw   m5,         m8, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m1, [r3 - 6 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m8, [r3 + 3 * 32]   ; [19]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 + 3 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         [r3 + 12 * 32]      ; [28]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m3,         m2, 8
+    palignr     m2,         m0, 8
+    pmaddubsw   m9,         m2, [r3 - 11 * 32]  ; [5]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m3, [r3 - 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        m2, [r3 - 2 * 32]   ; [14]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m3, [r3 - 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m2,        [r3 + 7 * 32]        ; [23]
+    pmulhrsw    m2,        m7
+    pmaddubsw   m3,        [r3 + 7 * 32]
+    pmulhrsw    m3,        m7
+    packuswb    m2,        m3
+
+    movu        m1,         [r2 + 6]            ; [0]
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 2, 1, 0, 8
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_7, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_7_29_row_0_15
+
+    add         r4, 16
+    mov         r0, r4
+    add         r2, 4
+
+    call ang32_mode_7_29_row_16_31
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_29, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_7_29_row_0_15
+
+    add         r2, 4
+
+    call ang32_mode_7_29_row_16_31
+    RET
+
+cglobal ang32_mode_8_28_avx2
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 11 * 32]  ; [5]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 11 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m2, [r3 - 6 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         m0, [r3 - 1 * 32]   ; [15]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m2, [r3 - 1 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m0, [r3 + 4 * 32]   ; [20]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m12,        m2, [r3 + 4 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m8,         m12
+
+    pmaddubsw   m9,         m0, [r3 + 9 * 32]   ; [25]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m12,        m2, [r3 + 9 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m9,         m12
+
+    pmaddubsw   m10,        m0, [r3 + 14 * 32]  ; [30]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m12,        m2, [r3 + 14 * 32]
+    pmulhrsw    m12,        m7
+    packuswb    m10,        m12
+
+    palignr     m12,        m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m11,        m12, [r3 - 13 * 32] ; [3]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m12,        [r3 - 8 * 32]       ; [8]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+
+    palignr     m8,         m2, m0, 2
+    palignr     m1,         m3, m2, 2
+    pmaddubsw   m4,         m8, [r3 - 3 * 32]   ; [13]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m9,         m1, [r3 - 3 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m4,         m9
+
+    pmaddubsw   m5,         m8, [r3 + 2 * 32]   ; [18]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m1, [r3 + 2 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m8, [r3 + 7 * 32]   ; [23]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m1, [r3 + 7 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         [r3 + 12 * 32]      ; [28]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         [r3 + 12 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    palignr     m12,        m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m9,         m12, [r3 - 15 * 32] ; [1]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 - 15 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        m12, [r3 - 10 * 32] ; [6]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        m1, [r3 - 10 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    pmaddubsw   m11,        m12, [r3 - 5 * 32]  ; [11]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 - 5 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m12,        [r3]                ; [16]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 8
+
+    ; rows 16 to 23
+
+    jnz         .doNotAdjustBufferPtr
+    lea         r4,         [r4 + mmsize/2]
+    mov         r0,         r4
+.doNotAdjustBufferPtr:
+
+    palignr     m6,         m2, m0, 4
+    palignr     m1,         m3, m2, 4
+    pmaddubsw   m4,         m6, [r3 + 5 * 32]   ; [21]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m8,         m1, [r3 + 5 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m4,         m8
+
+    pmaddubsw   m5,         m6, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m1, [r3 + 10 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         [r3 + 15 * 32]      ; [31]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m1,         [r3 + 15 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m6,         m1
+
+    palignr     m12,        m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m8,         m12, [r3 - 12 * 32] ; [4]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m11,        m1, [r3 - 12 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m8,         m11
+
+    pmaddubsw   m9,         m12, [r3 - 7 * 32]  ; [9]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m1, [r3 - 7 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        m12, [r3 - 2 * 32]  ; [14]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        m1, [r3 - 2 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    pmaddubsw   m11,        m12, [r3 + 3 * 32]  ; [19]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         [r3 + 3 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m12,        [r3 + 8 * 32]       ; [24]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 16
+
+    ; rows 24 to 31
+    palignr     m4,         m2, m0, 6
+    palignr     m1,         m3, m2, 6
+    pmaddubsw   m4,         [r3 + 13 * 32]      ; [29]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         [r3 + 13 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    palignr     m3,         m2, 8
+    palignr     m2,         m0, 8
+    pmaddubsw   m5,         m2, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m3, [r3 - 14 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m2, [r3 - 9 * 32]   ; [7]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m3, [r3 - 9 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m2, [r3 - 4 * 32]   ; [12]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m3, [r3 - 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m2, [r3 + 1 * 32]   ; [17]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m3, [r3 + 1 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        m2, [r3 + 6 * 32]   ; [22]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m3, [r3 + 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m2,         [r3 + 11 * 32]      ; [27]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3 + 11 * 32]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    movu        m3,         [r2 + 6]            ; [0]
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 2, 3, 0, 24
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_8, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_8_28_avx2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_28, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_8_28_avx2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_9, 3,5,8
+    vbroadcasti128      m0, [angHor_tab_9]
+    vbroadcasti128      m1, [angHor_tab_9 + mmsize/2]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode9]
+    lea                 r3, [r1 * 3]
+
+    vbroadcasti128      m3, [r2 + mmsize*2 +  1]
+    vbroadcasti128      m6, [r2 + mmsize*2 + 17]
+
+    pshufb              m5, m3, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 1
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 2
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m6, m3, 3
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 4
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 5
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 6
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m6, m3, 7
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 8
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 9
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 10
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m6, m3, 11
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 12
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 13
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 14
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m6, m3, 15
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    vbroadcasti128      m3, [r2 + mmsize*2 + 33]
+
+    pshufb              m5, m6, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 1
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 2
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 4
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 5
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 6
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m3, m6, 7
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 8
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 9
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 10
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m3, m6, 11
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 12
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 13
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 14
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1*2], m4
+
+    palignr             m5, m3, m6, 15
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_27, 3,5,6
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]            ; r4 -> 3 * stride
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu                m1, [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu                m3, [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu                m4, [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw           m2, m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw           m0, m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw           m3, m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw           m4, m0, [r3 - 14 * 32]  ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 12 * 32]  ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 10 * 32]  ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 - 8 * 32]   ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 - 6 * 32]   ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 4 * 32]   ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 2 * 32]   ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3]            ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m0, [r3 + 2 * 32]   ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 4 * 32]   ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 6 * 32]   ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 + 8 * 32]   ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 12 * 32]  ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 14 * 32]  ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m3, m2, 2
+    palignr             m2, m0, 2
+    movu                m1, [r2 + 2]            ; [0]
+    movu                [r0 + r4], m1
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    pmaddubsw           m4, m2, [r3 - 14 * 32]  ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m2, [r3 - 12 * 32]  ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m2, [r3 - 10 * 32]  ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m2, [r3 - 8 * 32]   ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m2, [r3 - 6 * 32]   ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m2, [r3 - 4 * 32]   ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1],  m4
+
+    pmaddubsw           m4, m2, [r3 - 2 * 32]   ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m2, [r3]            ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0,         [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m2, [r3 + 2 * 32]   ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m2, [r3 + 4 * 32]   ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1],  m4
+
+    pmaddubsw           m4, m2, [r3 + 6 * 32]   ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m2, [r3 + 8 * 32]   ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4],  m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m2, [r3 + 10 * 32]  ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m2, [r3 + 12 * 32]  ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1],  m4
+
+    pmaddubsw           m2, [r3 + 14 * 32]      ; [30]
+    pmulhrsw            m2, m5
+    pmaddubsw           m3, [r3 + 14 * 32]
+    pmulhrsw            m3, m5
+    packuswb            m2, m3
+    movu                [r0 + r1*2], m2
+
+    movu                m1, [r2 + 3]            ; [0]
+    movu                [r0 + r4], m1
+    RET
+
+cglobal intra_pred_ang32_10, 5,5,4
+    pxor                m0, m0
+    mova                m1, [pb_1]
+    lea                 r4, [r1 * 3]
+
+    vbroadcasti128      m2, [r2 + mmsize*2 + 1]
+
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+    pxor                m0, m0
+    vbroadcasti128      m2, [r2 + mmsize*2 + mmsize/2 + 1]
+
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+
+    lea                 r0, [r0 + r1 * 4]
+
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r1 * 2], m3
+    paddb               m0, m1
+    pshufb              m3, m2, m0
+    movu                [r0 + r4], m3
+    RET
+
+cglobal intra_pred_ang32_11, 3,4,8
+    vbroadcasti128      m0, [angHor_tab_11]
+    vbroadcasti128      m1, [angHor_tab_11 + mmsize/2]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode11]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [16 0 -1 -2 ...]
+    movu               xm3, [r2 + mmsize*2 -  1]
+    vbroadcasti128      m6, [r2 + mmsize*2 + 15]
+
+    pinsrb             xm3, [r2 +  0], 1
+    pinsrb             xm3, [r2 + 16], 0
+    vinserti128         m3, m3, xm3, 1          ; [16  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 16  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14]
+
+    pshufb              m5, m3, m7              ; [ 0  1  0  1  0  1  0  1  0  1  0  1  0  1  0  1 16  0 16  0 16  0 16  0 16  0 16  0 16  0 16  0]
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 1
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 2
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 3
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 4
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 5
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 6
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 7
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 8
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 9
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 10
+    pshufb              m5, m7
+
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 11
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 12
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 13
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 14
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 15
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    mova                m3, m6
+    vbroadcasti128      m6, [r2 + mmsize*2 + 15 + 16]
+    pshufb              m5, m3, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 1
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 2
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 3
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 4
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 5
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 6
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 7
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 8
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 9
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 10
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 11
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 12
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m6, m3, 13
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m6, m3, 14
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m6, m3, 15
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_25, 3,5,7
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 +  0]               ; [31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0]
+    movu                m1, [r2 +  1]               ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+
+    pinsrb              xm3, [r2], 15
+    pinsrb              xm3, [r2 + mmsize*2 + 16], 14
+
+    punpckhbw           m2, m0, m1                  ; [32 31 31 30 30 29 29 28 28 27 27 26 26 25 25 24 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8]
+    punpcklbw           m0, m1                      ; [24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16  8  7  7  6  6  5  5  4  4  3  3  2  2  1  1  0]
+    vinserti128         m3, m3, xm2, 1              ; [16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  0 16  x  x  x  x  x  x  x  x  x  x  x  x  x  x]
+
+    pmaddubsw           m4, m0, [r3 + 14 * 32]      ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 12 * 32]      ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 10 * 32]      ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 + 8 * 32]       ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 + 6 * 32]       ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 4 * 32]       ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 2 * 32]       ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3]                ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m0, [r3 - 2 * 32]       ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 4 * 32]       ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 6 * 32]       ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 - 8 * 32]       ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 - 10 * 32]      ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 12 * 32]      ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 14 * 32]      ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    movu                m1, [r2]                    ; [0]
+    movu                [r0 + r4],  m1
+
+    lea                 r0, [r0 + r1 * 4]
+    palignr             m2, m0, 14
+    palignr             m0, m3, 14
+
+    ; rows 16 to 23
+    pmaddubsw           m4, m0, [r3 + 14 * 32]      ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 12 * 32]      ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 10 * 32]      ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 + 8 * 32]       ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 + 6 * 32]       ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 4 * 32]       ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 2 * 32]       ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3]                ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    pmaddubsw           m4, m0, [r3 - 2 * 32]       ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 4 * 32]       ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 6 * 32]       ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m0, [r3 - 8 * 32]       ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 - 10 * 32]      ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 12 * 32]      ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m0, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m0, m5
+    pmaddubsw           m2, [r3 - 14 * 32]
+    pmulhrsw            m2, m5
+    packuswb            m0, m2
+    movu                [r0 + r1*2], m0
+
+    movu                m1, [r2 + 1]                ; [0]
+    palignr             m1, m3, 14
+    movu                [r0 + r4], m1
+    RET
+
+cglobal intra_pred_ang32_12, 3,4,9
+    movu                m0, [ang32_fact_mode12]
+    movu                m1, [ang32_fact_mode12 + mmsize]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode12]
+    mova                m8, [ang32_shuf_mode12 + mmsize]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [26, 19, 13,  6,  0, -1, -2....]
+
+    movu               xm4, [r2 + mmsize*2 - 4]
+    vbroadcasti128      m6, [r2 + mmsize*2 + 12]
+
+    pinsrb             xm4, [r2 +  0], 4
+    pinsrb             xm4, [r2 +  6], 3
+    pinsrb             xm4, [r2 + 13], 2
+    pinsrb             xm4, [r2 + 19], 1
+    pinsrb             xm4, [r2 + 26], 0
+    vinserti128         m3, m4, xm4, 1      ; [26, 19, 13,  6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 26, 19, 13,  6,  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11]
+
+    pshufb              m4, m3, m7          ; [ 0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  6,  0,  6,  0, 13,  6, 13,  6, 13,  6, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13]
+    pshufb              m5, m3, m8          ; [ 6,  0,  6,  0,  6,  0,  6,  0, 13,  6, 13,  6, 13,  6, 13,  6, 19, 13, 16, 19, 16, 19, 16, 19, 16, 19, 16, 19, 16, 19, 16, 19]
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 1
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 2
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 3
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 4
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 5
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 6
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 7
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 8
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 9
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 10
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 11
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 12
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 13
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 14
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 15
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+    mova                m3, m6
+    vbroadcasti128      m6, [r2 + mmsize*2 + 12 + 16]
+
+    pshufb              m4, m3, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 1
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 2
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 3
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 4
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 5
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 6
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 7
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 8
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 9
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 10
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 11
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m6, m3, 12
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m6, m3, 13
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m6, m3, 14
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m3, 15
+    pshufb              m5, m4, m8
+    pshufb              m4, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_24, 3,5,8
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2]
+    pshufb              m4, [ang32_shuf_mode24]
+    mova                m3, [ang32_shuf_mode24 + mmsize]
+    vpermd              m4, m3, m4                      ; [6  6 13 13 19 19 26 26 x x x...]
+    palignr             m3, m0, m4, 1
+    vinserti128         m3, m3, xm2, 1
+
+    pmaddubsw           m4, m0, [r3 + 11 * 32]          ; [27]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 + 1 * 32]           ; [17]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m0, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m0, [r3 - 9 * 32]           ; [7]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+
+    pmaddubsw           m4, m6, [r3 + 13 * 32]          ; [29]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m6, [r3 + 3 * 32]           ; [19]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 7 * 32]           ; [9]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+
+    pmaddubsw           m4, m6, [r3 + 15 * 32]          ; [31]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 + 5 * 32]           ; [21]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    pmaddubsw           m4, m6, [r3 - 5 * 32]           ; [11]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 15 * 32]          ; [1]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+
+    pmaddubsw           m4, m6, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 + 7 * 32]           ; [23]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 3 * 32]           ; [13]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    pmaddubsw           m4, m6, [r3 - 13 * 32]          ; [3]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 + 9 * 32]           ; [25]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 1 * 32]           ; [15]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 11 * 32]          ; [5]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pand                m6, [pw_00ff]
+    pand                m7, [pw_00ff]
+    packuswb            m6, m7
+    movu                [r0 + r4], m6
+    RET
+
+cglobal intra_pred_ang32_13, 3,4,9
+    movu                m0, [ang32_fact_mode13]
+    movu                m1, [ang32_fact_mode13 + mmsize]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode13]
+    mova                m8, [ang32_shuf_mode13 + mmsize]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [28, 25, 21, 18, 14, 11,  7,  4,  0, -1, -2....]
+
+    movu                m6, [r2]
+    pshufb              m6, [ang32_shuf_mode13 + mmsize*2]
+    mova                m3, [ang32_shuf_mode24 + mmsize*1]
+    vpermd              m6, m3, m6
+    palignr             m6, m6, 1
+    vbroadcasti128      m3, [r2 + mmsize*2 + 1]
+
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    pshufb              m4, m3, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    mova                m6, m3
+    vbroadcasti128      m3, [r2 + mmsize*2 + 17]
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m5, m7
+    pshufb              m5, m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    pshufb              m4, m3, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_23, 3,5,8
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2]
+    pshufb              m4, [ang32_shuf_mode23]
+    vpermq              m4, m4, q1313
+    palignr             m3, m0, m4, 1
+    vinserti128         m3, m3, xm2, 1
+
+    pmaddubsw           m4, m0, [r3 + 7 * 32]           ; [23]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m0, [r3 - 11 * 32]          ; [5]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+
+    pmaddubsw           m4, m6, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 + 3 * 32]           ; [19]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 15 * 32]          ; [1]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m6, [r3 - 1 * 32]           ; [15]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+
+    pmaddubsw           m4, m6, [r3 + 13 * 32]          ; [29]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 5 * 32]           ; [11]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+
+    pmaddubsw           m4, m6, [r3 + 9 * 32]           ; [25]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    pmaddubsw           m4, m6, [r3 - 9 * 32]           ; [7]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 6
+    palignr             m7, m2, m0, 6
+
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 + 5 * 32]           ; [21]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 13 * 32]          ; [3]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 4
+    palignr             m7, m2, m0, 4
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 + 1 * 32]           ; [17]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    palignr             m6, m0, m3, 2
+    palignr             m7, m2, m0, 2
+    pmaddubsw           m4, m6, [r3 + 15 * 32]          ; [31]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 3 * 32]           ; [13]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m3, [r3 + 11 * 32]          ; [27]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 + 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m3, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m3, [r3 - 7 * 32]           ; [9]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pand                m3, [pw_00ff]
+    pand                m0, [pw_00ff]
+    packuswb            m3, m0
+    movu                [r0 + r4], m3
+    RET
+
+cglobal intra_pred_ang32_14, 3,4,9
+    movu                m0, [ang32_fact_mode14]
+    movu                m1, [ang32_fact_mode14 + mmsize]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode14]
+    mova                m8, [ang32_shuf_mode14 + mmsize]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [30, 27, 25, 22, 20, 17, 15, 12, 10, 7, 5, 2, 0, -1, -2...]
+
+    movu                m6, [r2]
+    pshufb              m6, [ang32_shuf_mode14 + mmsize*2]
+    vpermq              m6, m6, 01110111b
+    pslldq              m6, m6, 1
+    vbroadcasti128      m3, [r2 + mmsize*2 + 1]
+
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    pshufb              m4, m3, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    mova                m6, m3
+    vbroadcasti128      m3, [r2 + mmsize*2 + 17]
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m5, m7
+    pshufb              m5, m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m5, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    pshufb              m4, m3, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_22, 3,5,9
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2 + 2]
+    pshufb              m4, [ang32_shuf_mode22]
+    vextracti128        xm8, m4, 1
+
+    palignr             m3, m0, m4, 2
+    palignr             m3, m8, 15
+    vinserti128         m3, m3, xm2, 1
+    vinserti128         m8, m8, xm0, 1
+
+    pmaddubsw           m4, m0, [r3 + 3 * 32]           ; [19]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 + 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m0, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+
+    pmaddubsw           m4, m6, [r3 + 9 * 32]           ; [25]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+
+    pmaddubsw           m4, m6, [r3 + 15 * 32]          ; [31]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 11 * 32]          ; [5]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m6, [r3 - 5 * 32]           ; [11]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 + 1 * 32]           ; [17]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m0, m3, 6
+    palignr             m7, m2, m0, 6
+
+    pmaddubsw           m4, m6, [r3 + 7 * 32]           ; [23]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 4
+    palignr             m7, m2, m0, 4
+
+    pmaddubsw           m4, m6, [r3 + 13 * 32]          ; [29]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    pmaddubsw           m4, m6, [r3 - 13 * 32]          ; [3]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 2
+    palignr             m7, m2, m0, 2
+
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 7 * 32]           ; [9]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m3, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m3, [r3 - 1 * 32]           ; [15]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m3, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 14
+    palignr             m7, m0, m3, 14
+
+    pmaddubsw           m4, m6, [r3 + 5 * 32]           ; [21]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    palignr             m6, m3, m8, 12
+    palignr             m7, m0, m3, 12
+    pmaddubsw           m4, m6, [r3 + 11 * 32]          ; [27]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 15 * 32]          ; [1]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m6, m3, m8, 10
+    palignr             m7, m0, m3, 10
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 9 * 32]           ; [7]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m0, m3, 8
+    palignr             m3, m8, 8
+    pmaddubsw           m4, m3, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m3, [r3 - 3 * 32]           ; [13]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pand                m3, [pw_00ff]
+    pand                m0, [pw_00ff]
+    packuswb            m3, m0
+    movu                [r0 + r4], m3
+    RET
+
+cglobal intra_pred_ang32_15, 3,4,9
+    movu                m0, [ang32_fact_mode15]
+    movu                m1, [ang32_fact_mode15 + mmsize]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode15]
+    mova                m8, [ang32_shuf_mode15 + mmsize]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [30, 28, 26, 24, 23, 21, 19, 17, 15, 13, 11,  9,  8,  6,  4,  2,  0, -1, -2...]
+
+    movu                m6, [r2]
+    pshufb              m6, [ang32_shuf_mode15 + mmsize*2]
+    vpermq              m6, m6, 01110111b
+
+    movu                xm3, [r2 + mmsize*2]
+    pinsrb              xm3, [r2], 0
+    vpermq              m3, m3, 01000100b
+
+    palignr             m4, m3, m6, 2
+    pshufb              m4, m7
+    pshufb              m5, m6, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 3
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 1
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 4
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 2
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 5
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 3
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 6
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 4
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 7
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 5
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 8
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 6
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 9
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 10
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 8
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 11
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 9
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 12
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 10
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 13
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 11
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 14
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 12
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 15
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 13
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    pshufb              m4, m3, m7
+    palignr             m5, m3, m6, 14
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 15
+    mova                m6, m3
+    vbroadcasti128      m3, [r2 + mmsize*2 + 16]
+
+    palignr             m4, m3, m6, 1
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 2
+    pshufb              m4, m7
+    pshufb              m5, m6, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 3
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 1
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 4
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 2
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 5
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 3
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 6
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 4
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 7
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 5
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 8
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 6
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 9
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 10
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 8
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 11
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 9
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 12
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 10
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 13
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 11
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 14
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 12
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 15
+    pshufb              m4, m7
+    palignr             m5, m3, m6, 13
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1], m4
+
+    pshufb              m4, m3, m7
+    palignr             m5, m3, m6, 14
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 15
+    vbroadcasti128      m6, [r2 + mmsize*2 + 32]
+
+    palignr             m4, m6, m3, 1
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_21, 3,5,9
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2]
+    pshufb              m4, [ang32_shuf_mode21]
+    vextracti128        xm6, m4, 1
+
+    palignr             m3, m0, m4, 1
+    palignr             m8, m3, m6, 1
+    vinserti128         m3, m3, xm2, 1
+    vinserti128         m8, m8, xm0, 1
+
+    pmaddubsw           m4, m0, [r3 - 1 * 32]           ; [15]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 3 * 32]           ; [13]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+    pmaddubsw           m4, m6, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 5 * 32]           ; [11]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 7 * 32]           ; [9]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m6, [r3 - 9 * 32]           ; [7]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 6
+    palignr             m7, m2, m0, 6
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 11 * 32]          ; [5]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 4
+    palignr             m7, m2, m0, 4
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 13 * 32]          ; [3]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 2
+    palignr             m7, m2, m0, 2
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 15 * 32]          ; [1]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m3, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    palignr             m6, m3, m8, 14
+    palignr             m7, m0, m3, 14
+    pmaddubsw           m4, m6, [r3 + 15 * 32]          ; [31]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 12
+    palignr             m7, m0, m3, 12
+    pmaddubsw           m4, m6, [r3 + 13 * 32]          ; [29]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m3, m8, 10
+    palignr             m7, m0, m3, 10
+    pmaddubsw           m4, m6, [r3 + 11 * 32]          ; [27]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 8
+    palignr             m7, m0, m3, 8
+    pmaddubsw           m4, m6, [r3 + 9 * 32]           ; [25]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    palignr             m6, m3, m8, 6
+    palignr             m7, m0, m3, 6
+    pmaddubsw           m4, m6, [r3 + 7 * 32]           ; [23]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 4
+    palignr             m7, m0, m3, 4
+    pmaddubsw           m4, m6, [r3 + 5 * 32]           ; [21]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m3, m8, 2
+    palignr             m7, m0, m3, 2
+    pmaddubsw           m4, m6, [r3 + 3 * 32]           ; [19]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m8, [r3 + 1 * 32]           ; [17]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pand                m8, [pw_00ff]
+    pand                m3, [pw_00ff]
+    packuswb            m8, m3
+    movu                [r0 + r4], m8
+    RET
+
+cglobal intra_pred_ang32_16, 3,4,10
+    movu                m0, [ang32_fact_mode16]
+    movu                m1, [ang32_fact_mode16 + mmsize]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode16]
+    mova                m8, [ang32_shuf_mode16 + mmsize]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 14, 12, 11,  9,  8,  6,  5,  3,  2,  0, -1, -2...]
+
+    movu                m6, [r2]
+    pshufb              m6, [ang32_shuf_mode16 + mmsize*2]
+    mova                m9, m6
+    mova                m3, [ang32_shuf_mode16 + mmsize*3]
+    vpermd              m6, m3, m6
+    vpermq              m9, m9, q3232
+    pslldq              m9, 4
+    palignr             m6, m9, 15
+    pslldq              m9, 1
+
+    vbroadcasti128      m3, [r2 + mmsize*2 + 1]
+
+    palignr             m4, m3, m6, 1
+    palignr             m5, m6, m9, 6
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 2
+    palignr             m5, m6, m9, 7
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 3
+    palignr             m5, m6, m9, 8
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 4
+    palignr             m5, m6, m9, 9
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 5
+    palignr             m5, m6, m9, 10
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 6
+    palignr             m5, m6, m9, 11
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 7
+    palignr             m5, m6, m9, 12
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 8
+    palignr             m5, m6, m9, 13
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 9
+    palignr             m5, m6, m9, 14
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 10
+    palignr             m5, m6, m9, 15
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 11
+    pshufb              m4, m7
+    pshufb              m5, m6, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 12
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 13
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 14
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 15
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m3, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    vbroadcasti128      m9, [r2 + mmsize*2 + 17]
+
+    palignr             m4, m9, m3, 1
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m9, m3, 2
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m9, m3, 3
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m9, m3, 4
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m9, m3, 5
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m9, m3, 6
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m9, m3, 7
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m9, m3, 8
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m9, m3, 9
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m9, m3, 10
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m9, m3, 11
+    pshufb              m4, m7
+    pshufb              m5, m3, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m9, m3, 12
+    palignr             m5, m9, m3, 1
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m9, m3, 13
+    palignr             m5, m9, m3, 2
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m9, m3, 14
+    palignr             m5, m9, m3, 3
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m9, m3, 15
+    palignr             m5, m9, m3, 4
+    pshufb              m4, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m5, m9, m3, 5
+    pshufb              m4, m9, m7
+    pshufb              m5, m8
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_20, 3,5,10
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2]
+    pshufb              m4, [ang32_shuf_mode20]
+    mova                m9, m4
+    vpermq              m9, m9, q3333
+    mova                m7, m4
+    vpermq              m7, m7, q1111
+    palignr             m4, m7, 14
+    pshufb              m4, [ang32_shuf_mode20 + mmsize*1]
+
+    vextracti128       xm6, m4, 1
+    palignr             m3, m0, m4, 1
+    palignr             m8, m3, m6, 1
+    vinserti128         m3, m3, xm2, 1
+    vinserti128         m8, m8, xm0, 1
+    vinserti128         m9, m9, xm3, 1
+
+    pmaddubsw           m4, m0, [r3 - 5 * 32]           ; [11]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 15 * 32]          ; [1]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+    pmaddubsw           m4, m6, [r3 + 7 * 32]           ; [23]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+    pmaddubsw           m4, m6, [r3 - 3 * 32]           ; [13]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 6
+    palignr             m7, m2, m0, 6
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    pmaddubsw           m4, m6, [r3 - 13 * 32]          ; [3]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 4
+    palignr             m7, m2, m0, 4
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 2
+    palignr             m7, m2, m0, 2
+    pmaddubsw           m4, m6, [r3 + 9 * 32]           ; [25]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m3, [r3 - 1 * 32]           ; [15]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m3, m8, 14
+    palignr             m7, m0, m3, 14
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 11 * 32]          ; [5]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m6, m3, m8, 12
+    palignr             m7, m0, m3, 12
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    palignr             m6, m3, m8, 10
+    palignr             m7, m0, m3, 10
+    pmaddubsw           m4, m6, [r3 + 11 * 32]          ; [27]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 11 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 8
+    palignr             m7, m0, m3, 8
+    pmaddubsw           m4, m6, [r3 + 1 * 32]           ; [17]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 1 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m3, m8, 6
+    palignr             m7, m0, m3, 6
+    pmaddubsw           m4, m6, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    pmaddubsw           m4, m6, [r3 - 9 * 32]           ; [7]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 9 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m3, m8, 4
+    palignr             m7, m0, m3, 4
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 2
+    palignr             m7, m0, m3, 2
+    pmaddubsw           m4, m6, [r3 + 13 * 32]          ; [29]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 13 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    pmaddubsw           m4, m8, [r3 + 3 * 32]           ; [19]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 3 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m8, m9, 14
+    palignr             m7, m3, m8, 14
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 7 * 32]           ; [9]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 7 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m6, m8, m9, 12
+    palignr             m7, m3, m8, 12
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m8, m9, 10
+    palignr             m7, m3, m8, 10
+    pmaddubsw           m4, m6, [r3 + 15 * 32]          ; [31]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 15 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m8, m9, 8
+    palignr             m7, m3, m8, 8
+    pmaddubsw           m4, m6, [r3 + 5 * 32]           ; [21]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 5 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pand                m6, [pw_00ff]
+    pand                m7, [pw_00ff]
+    packuswb            m6, m7
+    movu                [r0 + r4], m6
+    RET
+
+cglobal intra_pred_ang32_17, 3,4,8
+    movu                m0, [ang32_fact_mode17]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode17]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [31, 30, 28, 27, 26, 25, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 10,  9,  7,  6,  5,  4,  2,  1,  0, -1, -2...]
+
+    movu                m6, [r2]
+    pshufb              m6, [ang32_shuf_mode17 + mmsize]
+    mova                m1, m6
+    mova                m3, [ang32_shuf_mode16 + mmsize*3]
+    vpermd              m6, m3, m6
+    vpermq              m1, m1, q3232
+    pslldq              m1, 4
+
+    movu               xm4, [r2 + mmsize*2]
+    pinsrb             xm4, [r2], 0
+    vinserti128         m3, m4, xm4, 1
+
+    palignr             m4, m3, m6, 2
+    palignr             m5, m6, m1, 5
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 3
+    palignr             m5, m6, m1, 6
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 4
+    palignr             m5, m6, m1, 7
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 5
+    palignr             m5, m6, m1, 8
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 6
+    palignr             m5, m6, m1, 9
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 7
+    palignr             m5, m6, m1, 10
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 8
+    palignr             m5, m6, m1, 11
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 9
+    palignr             m5, m6, m1, 12
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 10
+    palignr             m5, m6, m1, 13
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 11
+    palignr             m5, m6, m1, 14
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m3, m6, 12
+    palignr             m5, m6, m1, 15
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m3, m6, 13
+    pshufb              m4, m7
+    pshufb              m5, m6, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m3, m6, 14
+    palignr             m5, m3, m6, 1
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m3, m6, 15
+    palignr             m5, m3, m6, 2
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m5, m3, m6, 3
+    pshufb              m4, m3, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    vbroadcasti128      m1, [r2 + mmsize*2 + 16]
+    palignr             m4, m1, m3, 1
+    palignr             m5, m3, m6, 4
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m1, m3, 2
+    palignr             m5, m3, m6, 5
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m1, m3, 3
+    palignr             m5, m3, m6, 6
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m1, m3, 4
+    palignr             m5, m3, m6, 7
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m1, m3, 5
+    palignr             m5, m3, m6, 8
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m1, m3, 6
+    palignr             m5, m3, m6, 9
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m1, m3, 7
+    palignr             m5, m3, m6, 10
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m1, m3, 8
+    palignr             m5, m3, m6, 11
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m1, m3, 9
+    palignr             m5, m3, m6, 12
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m1, m3, 10
+    palignr             m5, m3, m6, 13
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m1, m3, 11
+    palignr             m5, m3, m6, 14
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    palignr             m4, m1, m3, 12
+    palignr             m5, m3, m6, 15
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m1, m3, 13
+    pshufb              m4, m7
+    pshufb              m5, m3, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m4, m1, m3, 14
+    palignr             m5, m1, m3, 1
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0], m4
+
+    palignr             m4, m1, m3, 15
+    palignr             m5, m1, m3, 2
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1], m4
+
+    vbroadcasti128      m6, [r2 + mmsize*2 + mmsize]
+    palignr             m5, m1, m3, 3
+    pshufb              m4, m1, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r1 * 2], m4
+
+    palignr             m4, m6, m1, 1
+    palignr             m5, m1, m3, 4
+    pshufb              m4, m7
+    pshufb              m5, m7
+    pmaddubsw           m4, m0
+    pmaddubsw           m5, m0
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    vpermq              m4, m4, q3120
+    movu                [r0 + r3], m4
+    RET
+
+cglobal intra_pred_ang32_19, 3,5,10
+    lea                 r3, [ang_table_avx2 + 32 * 16]
+    lea                 r4, [r1 * 3]
+    mova                m5, [pw_1024]
+
+    ; rows 0 to 7
+    movu                m0, [r2 + 0]
+    movu                m1, [r2 + 1]
+    punpckhbw           m2, m0, m1
+    punpcklbw           m0, m1
+
+    movu                m4, [r2 + mmsize*2]
+    pshufb              m4, [ang32_shuf_mode17 + mmsize*1]
+    mova                m3, [ang32_shuf_mode19 + mmsize*1]
+    mova                m6, [ang32_shuf_mode19 + mmsize*2]
+    mova                m9, m4
+    vpermd              m4, m3, m4
+    vpermd              m9, m6, m9
+    pshufb              m4, [ang32_shuf_mode19]
+    pshufb              m9, [ang32_shuf_mode19]
+
+    vextracti128       xm6, m4, 1
+    palignr             m3, m0, m4, 1
+    palignr             m8, m3, m6, 1
+    palignr             m7, m8, m9, 1
+    vinserti128         m3, m3, xm2, 1
+    vinserti128         m8, m8, xm0, 1
+    vinserti128         m9, m7, xm3, 1
+
+    pmaddubsw           m4, m0, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m2, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m0, m3, 14
+    palignr             m7, m2, m0, 14
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 12
+    palignr             m7, m2, m0, 12
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 10
+    palignr             m7, m2, m0, 10
+    pmaddubsw           m4, m6, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m0, m3, 8
+    palignr             m7, m2, m0, 8
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m0, m3, 6
+    palignr             m7, m2, m0, 6
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m0, m3, 4
+    palignr             m7, m2, m0, 4
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 8 to 15
+    palignr             m6, m0, m3, 2
+    palignr             m7, m2, m0, 2
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m3, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m3, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m0, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m3, m8, 14
+    palignr             m7, m0, m3, 14
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m3, m8, 12
+    palignr             m7, m0, m3, 12
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m3, m8, 10
+    palignr             m7, m0, m3, 10
+    pmaddubsw           m4, m6, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 8
+    palignr             m7, m0, m3, 8
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pand                m6, [pw_00ff]
+    pand                m7, [pw_00ff]
+    packuswb            m6, m7
+    movu                [r0 + r4], m6
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 16 to 23
+    palignr             m6, m3, m8, 6
+    palignr             m7, m0, m3, 6
+    pmaddubsw           m4, m6, [r3 - 10 * 32]          ; [6]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m3, m8, 4
+    palignr             m7, m0, m3, 4
+    pmaddubsw           m4, m6, [r3 - 4 * 32]           ; [12]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m3, m8, 2
+    palignr             m7, m0, m3, 2
+    pmaddubsw           m4, m6, [r3 + 2 * 32]           ; [18]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    pmaddubsw           m4, m8, [r3 + 8 * 32]           ; [24]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m3, [r3 + 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m6, m8, m9, 14
+    palignr             m7, m3, m8, 14
+    pmaddubsw           m4, m6, [r3 + 14 * 32]          ; [30]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m6, [r3 - 12 * 32]          ; [4]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m8, m9, 12
+    palignr             m7, m3, m8, 12
+    pmaddubsw           m4, m6, [r3 - 6 * 32]           ; [10]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m8, m9, 10
+    palignr             m7, m3, m8, 10
+    pmaddubsw           m4, m6, [r3]                    ; [16]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    ; rows 24 to 31
+    palignr             m6, m8, m9, 8
+    palignr             m7, m3, m8, 8
+    pmaddubsw           m4, m6, [r3 + 6 * 32]           ; [22]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 6 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    palignr             m6, m8, m9, 6
+    palignr             m7, m3, m8, 6
+    pmaddubsw           m4, m6, [r3 + 12 * 32]          ; [28]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 12 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    pmaddubsw           m4, m6, [r3 - 14 * 32]          ; [2]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 14 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1*2], m4
+
+    palignr             m6, m8, m9, 4
+    palignr             m7, m3, m8, 4
+    pmaddubsw           m4, m6, [r3 - 8 * 32]           ; [8]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 8 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r4], m4
+
+    lea                 r0, [r0 + r1 * 4]
+
+    vpbroadcastb        m0, [r2 + mmsize*2 + 31]
+    palignr             m1, m9, m0, 1
+    vinserti128         m0, m1, xm8, 1
+
+    palignr             m6, m8, m9, 2
+    palignr             m7, m3, m8, 2
+    pmaddubsw           m4, m6, [r3 - 2 * 32]           ; [14]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 - 2 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0], m4
+
+    pmaddubsw           m4, m9, [r3 + 4 * 32]           ; [20]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m8, [r3 + 4 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1], m4
+
+    palignr             m6, m9, m0, 14
+    palignr             m7, m8, m9, 14
+    pmaddubsw           m4, m6, [r3 + 10 * 32]          ; [26]
+    pmulhrsw            m4, m5
+    pmaddubsw           m1, m7, [r3 + 10 * 32]
+    pmulhrsw            m1, m5
+    packuswb            m4, m1
+    movu                [r0 + r1 * 2], m4
+
+    pand                m6, [pw_00ff]
+    pand                m7, [pw_00ff]
+    packuswb            m6, m7
+    movu                [r0 + r4], m6
+    RET
+
+%endif  ; ARCH_X86_64
+;-----------------------------------------------------------------------------------------
+; end of intra_pred_ang32 angular modes avx2 asm
+;-----------------------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------------------
+; void intraPredAng8(pixel* dst, intptr_t dstStride, pixel* src, int dirMode, int bFilter)
+;-----------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal intra_pred_ang8_3, 3,4,5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src3_11_4_12]
+    pshufb            m4, m0, [c_ang8_src5_13_5_13]
+    pshufb            m0,     [c_ang8_src6_14_7_15]
+
+    pmaddubsw         m1, [c_ang8_26_20]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_14_8]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_2_28]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_22_16]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_33, 3,4,5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src3_11_4_12]
+    pshufb            m4, m0, [c_ang8_src5_13_5_13]
+    pshufb            m0,     [c_ang8_src6_14_7_15]
+
+    pmaddubsw         m1, [c_ang8_26_20]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_14_8]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_2_28]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_22_16]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_4, 3,4,5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src2_10_3_11]
+    pshufb            m4, m0, [c_ang8_src4_12_4_12]
+    pshufb            m0,     [c_ang8_src5_13_6_14]
+
+    pmaddubsw         m1, [c_ang8_21_10]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_31_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_9_30]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_19_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_32, 3,4,5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src2_10_3_11]
+    pshufb            m4, m0, [c_ang8_src4_12_4_12]
+    pshufb            m0,     [c_ang8_src5_13_6_14]
+
+    pmaddubsw         m1, [c_ang8_21_10]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_31_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_9_30]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_19_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_5, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src2_10_3_11]
+    pshufb            m4, m0, [c_ang8_src3_11_4_12]
+    pshufb            m0,     [c_ang8_src4_12_5_13]
+
+    pmaddubsw         m1, [c_ang8_17_2]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_19_4]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_21_6]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_23_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_31, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [c_ang8_src1_9_2_10]
+    pshufb            m2, m0, [c_ang8_src2_10_3_11]
+    pshufb            m4, m0, [c_ang8_src3_11_4_12]
+    pshufb            m0,     [c_ang8_src4_12_5_13]
+
+    pmaddubsw         m1, [c_ang8_17_2]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_19_4]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_21_6]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_23_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_6, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m1, m0, [intra_pred_shuff_0_8]
+    pshufb            m2, m0, [c_ang8_src2_10_2_10]
+    pshufb            m4, m0, [c_ang8_src3_11_3_11]
+    pshufb            m0,     [c_ang8_src3_11_4_12]
+
+    pmaddubsw         m1, [c_ang8_13_26]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_7_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_1_14]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_27_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_30, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [intra_pred_shuff_0_8]
+    pshufb            m2, m0, [c_ang8_src2_10_2_10]
+    pshufb            m4, m0, [c_ang8_src3_11_3_11]
+    pshufb            m0,     [c_ang8_src3_11_4_12]
+
+    pmaddubsw         m1, [c_ang8_13_26]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_7_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_1_14]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_27_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_9, 3, 5, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m0, [intra_pred_shuff_0_8]
+
+    lea               r4, [c_ang8_mode_27]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_27, 3, 5, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m0, [intra_pred_shuff_0_8]
+
+    lea               r4, [c_ang8_mode_27]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_25, 3, 5, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2]
+
+    pshufb            m0, [intra_pred_shuff_0_8]
+
+    lea               r4, [c_ang8_mode_25]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_7, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+
+    pshufb            m1, m0, [intra_pred_shuff_0_8]
+    pshufb            m2, m0, [c_ang8_src1_9_2_10]
+    pshufb            m4, m0, [c_ang8_src2_10_2_10]
+    pshufb            m0,     [c_ang8_src2_10_3_11]
+
+    pmaddubsw         m1, [c_ang8_9_18]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_27_4]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_13_22]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_31_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_29, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [intra_pred_shuff_0_8]
+    pshufb            m2, m0, [c_ang8_src1_9_2_10]
+    pshufb            m4, m0, [c_ang8_src2_10_2_10]
+    pshufb            m0,     [c_ang8_src2_10_3_11]
+
+    pmaddubsw         m1, [c_ang8_9_18]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_27_4]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_13_22]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_31_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_8, 3, 4, 6
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 17]
+    mova              m5, [intra_pred_shuff_0_8]
+
+    pshufb            m1, m0, m5
+    pshufb            m2, m0, m5
+    pshufb            m4, m0, m5
+    pshufb            m0,     [c_ang8_src2_10_2_10]
+
+    pmaddubsw         m1, [c_ang8_5_10]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_15_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_25_30]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_3_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_28, 3, 4, 6
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+    mova              m5, [intra_pred_shuff_0_8]
+
+    pshufb            m1, m0, m5
+    pshufb            m2, m0, m5
+    pshufb            m4, m0, m5
+    pshufb            m0,     [c_ang8_src2_10_2_10]
+
+    pmaddubsw         m1, [c_ang8_5_10]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_15_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_25_30]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_3_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_11, 3, 5, 5
+    mova              m3, [pw_1024]
+    movu              xm1, [r2 + 16]
+    pinsrb            xm1, [r2], 0
+    pshufb            xm1, [intra_pred_shuff_0_8]
+    vinserti128       m0, m1, xm1, 1
+
+    lea               r4, [c_ang8_mode_25]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_15, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2 + 16]
+    pinsrb            xm5, [r2], 0
+    lea               r5, [intra_pred_shuff_0_8]
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 2], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_15]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 4], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 6], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 8], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_16, 3,4,7
+    lea                 r0, [r0 + r1 * 8]
+    sub                 r0, r1
+    neg                 r1
+    lea                 r3, [r1 * 3]
+    vbroadcasti128      m0, [angHor8_tab_16]            ; m0 = factor
+    mova                m1, [intra_pred8_shuff16]       ; m1 = 4 of Row shuffle
+    movu                m2, [intra_pred8_shuff16 + 8]   ; m2 = 4 of Row shuffle
+
+    ; prepare reference pixel
+    movq                xm3, [r2 + 16 + 1]              ; m3 = [-1 -2 -3 -4 -5 -6 -7 -8 x x x x x x x x]
+    movhps              xm3, [r2 + 2]                   ; m3 = [-1 -2 -3 -4 -5 -6 -7 -8 2 3 x 5 6 x 8 x]
+    pslldq              xm3, 1
+    pinsrb              xm3, [r2], 0                    ; m3 = [ 0 -1 -2 -3 -4 -5 -6 -7 -8 2 3 x 5 6 x 8]
+    pshufb              xm3, [c_ang8_mode_16]
+    vinserti128         m3, m3, xm3, 1                  ; m3 = [-8 -7 -6 -5 -4 -3 -2 -1  0 2 3 5 6 8]
+
+    ; process 4 rows
+    pshufb              m4, m3, m1
+    pshufb              m5, m3, m2
+    psrldq              m3, 4
+    punpcklbw           m6, m5, m4
+    punpckhbw           m5, m4
+    pmaddubsw           m6, m0
+    pmulhrsw            m6, [pw_1024]
+    pmaddubsw           m5, m0
+    pmulhrsw            m5, [pw_1024]
+    packuswb            m6, m5
+    vextracti128        xm5, m6, 1
+    movq                [r0], xm6
+    movhps              [r0 + r1], xm6
+    movq                [r0 + r1 * 2], xm5
+    movhps              [r0 + r3], xm5
+
+    ; process 4 rows
+    lea                 r0, [r0 + r1 * 4]
+    pshufb              m4, m3, m1
+    pshufb              m5, m3, m2
+    punpcklbw           m6, m5, m4
+    punpckhbw           m5, m4
+    pmaddubsw           m6, m0
+    pmulhrsw            m6, [pw_1024]
+    pmaddubsw           m5, m0
+    pmulhrsw            m5, [pw_1024]
+    packuswb            m6, m5
+    vextracti128        xm5, m6, 1
+    movq                [r0], xm6
+    movhps              [r0 + r1], xm6
+    movq                [r0 + r1 * 2], xm5
+    movhps              [r0 + r3], xm5
+    RET
+
+%if 1
+INIT_YMM avx2
+cglobal intra_pred_ang8_20, 3,5,6
+    lea                 r0, [r0 + r1 * 8]
+    sub                 r0, r1
+    neg                 r1
+    lea                 r3, [angHor8_tab_20]
+    lea                 r4, [r1 * 3]
+    movu                m5, [intra_pred_shuff_0_8 + 16]
+
+    ; prepare reference pixel
+    movq                xm1, [r2 + 1]                   ; m3 = [ 1  2  3  4  5  6  7  8  x  x x  x  x  x  x  x]
+    movhps              xm1, [r2 + 16 + 2]              ; m3 = [ 1  2  3  4  5  6  7  8 -2 -3 x -5 -6  x -8  x]
+    palignr             xm1, xm1, [r2 - 15], 15         ; m3 = [ 0  1  2  3  4  5  6  7  8 -2 -3 x -5 -6  x -8]
+    pshufb              xm1, [c_ang8_mode_20]
+    vinserti128         m1, m1, xm1, 1
+
+    ; process 4 rows
+    pshufb              m3, m1, m5
+    psrldq              m1, 2
+    pmaddubsw           m3, [r3 + 0 * 16]
+    pmulhrsw            m3, [pw_1024]
+
+    pshufb              m4, m1, [intra_pred_shuff_0_8]
+    psrldq              m1, 1
+    pmaddubsw           m4, [r3 + 2 * 16]
+    pmulhrsw            m4, [pw_1024]
+
+    packuswb            m3, m4
+    vextracti128        xm4, m3, 1
+    movq                [r0], xm3
+    movq                [r0 + r1], xm4
+    movhps              [r0 + r1 * 2], xm3
+    movhps              [r0 + r4], xm4
+
+    ; process 4 rows
+    lea                 r0, [r0 + r1 * 4]
+    pshufb              m3, m1, m5
+    psrldq              m1, 1
+    pmaddubsw           m3, [r3 + 4 * 16]
+    pmulhrsw            m3, [pw_1024]
+
+    pshufb              m4, m1, m5
+    pmaddubsw           m4, [r3 + 6 * 16]
+    pmulhrsw            m4, [pw_1024]
+
+    packuswb            m3, m4
+    vextracti128        xm4, m3, 1
+    movq                [r0], xm3
+    movq                [r0 + r1], xm4
+    movhps              [r0 + r1 * 2], xm3
+    movhps              [r0 + r4], xm4
+    RET
+
+%else
+INIT_YMM avx2
+cglobal intra_pred_ang8_20, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2]
+    lea               r5, [intra_pred_shuff_0_8]
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 2 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_20]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 3 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 5 + 16], 0
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 6 + 16], 0
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 8 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+%endif
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_21, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2]
+    lea               r5, [intra_pred_shuff_0_8]
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 2 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_15]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 4 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 6 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    mova              xm0, xm5
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 8 + 16], 0
+    vinserti128       m0, m0, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_22, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2]
+    lea               r5, [intra_pred_shuff_0_8]
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_14]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 2 + 16], 0
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 5 + 16], 0
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 7 + 16], 0
+    pshufb            xm5, [r5]
+    vinserti128       m0, m0, xm5, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_14, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2 + 16]
+    pinsrb            xm5, [r2], 0
+    lea               r5, [intra_pred_shuff_0_8]
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_14]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 2], 0
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 5], 0
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 7], 0
+    pshufb            xm5, [r5]
+    vinserti128       m0, m0, xm5, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_13, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2 + 16]
+    pinsrb            xm5, [r2], 0
+    lea               r5, [intra_pred_shuff_0_8]
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_13]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 4], 0
+    pshufb            xm4, xm5, [r5]
+    vinserti128       m0, m0, xm4, 1
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    vinserti128       m0, m0, xm4, 0
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 7], 0
+    pshufb            xm5, [r5]
+    vinserti128       m0, m0, xm5, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_23, 3, 6, 6
+    mova              m3, [pw_1024]
+    movu              xm5, [r2]
+    lea               r5, [intra_pred_shuff_0_8]
+    vinserti128       m0, m5, xm5, 1
+    pshufb            m0, [r5]
+
+    lea               r4, [c_ang8_mode_13]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 4 + 16], 0
+    pshufb            xm4, xm5, [r5]
+    vinserti128       m0, m0, xm4, 1
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    vinserti128       m0, m0, xm4, 0
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm5, 1
+    pinsrb            xm5, [r2 + 7 + 16], 0
+    pshufb            xm5, [r5]
+    vinserti128       m0, m0, xm5, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_12, 3, 5, 5
+    mova              m3, [pw_1024]
+    movu              xm1, [r2 + 16]
+    pinsrb            xm1, [r2], 0
+    pshufb            xm1, [intra_pred_shuff_0_8]
+    vinserti128       m0, m1, xm1, 1
+
+    lea               r4, [c_ang8_mode_24]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm0, 2
+    pinsrb            xm0, [r2 + 6], 0
+    pinsrb            xm0, [r2 + 0], 1
+    vinserti128       m0, m0, xm0, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    vperm2i128        m2, m1, m4, 00100000b
+    vperm2i128        m1, m1, m4, 00110001b
+    punpcklbw         m4, m2, m1
+    punpckhbw         m2, m1
+    punpcklwd         m1, m4, m2
+    punpckhwd         m4, m2
+    mova              m0, [trans8_shuf]
+    vpermd            m1, m0, m1
+    vpermd            m4, m0, m4
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    movhps            [r0 + r1], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    movhps            [r0 + r1], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + 2 * r1], xm2
+    movhps            [r0 + r3], xm2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_24, 3, 5, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2]
+
+    pshufb            m0, [intra_pred_shuff_0_8]
+
+    lea               r4, [c_ang8_mode_24]
+    pmaddubsw         m1, m0, [r4]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, m0, [r4 + mmsize]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, m0, [r4 + 2 * mmsize]
+    pmulhrsw          m4, m3
+    pslldq            xm0, 2
+    pinsrb            xm0, [r2 + 16 + 6], 0
+    pinsrb            xm0, [r2 + 0], 1
+    vinserti128       m0, m0, xm0, 1
+    pmaddubsw         m0, [r4 + 3 * mmsize]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET
+
+%macro INTRA_PRED_ANG16_MC0 3
+    pmaddubsw         m3, m1, [r4 + %3 * mmsize]
+    pmulhrsw          m3, m0
+    pmaddubsw         m4, m2, [r4 + %3 * mmsize]
+    pmulhrsw          m4, m0
+    packuswb          m3, m4
+    movu              [%1], xm3
+    vextracti128      xm4, m3, 1
+    movu              [%2], xm4
+%endmacro
+
+%macro INTRA_PRED_ANG16_MC1 1
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, %1
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, (%1 + 1)
+%endmacro
+
+%macro INTRA_PRED_ANG16_MC2 1
+    vbroadcasti128    m1, [r2 + %1]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + (%1 + 8)]
+    pshufb            m2, m5
+%endmacro
+
+%macro INTRA_PRED_ANG16_MC3 2
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + (%2 * mmsize)]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [%1], xm3
+%endmacro
+
+%macro INTRA_PRED_ANG16_MC4 3
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m4, m1, [r4 + (%3 * mmsize)]
+    pmulhrsw          m4, m0
+    packuswb          m3, m4
+    vpermq            m3, m3, 11011000b
+    movu              [%1], xm3
+    vextracti128      xm3, m3, 1
+    movu              [%2], xm3
+%endmacro
+
+%if ARCH_X86_64 == 1
+%macro INTRA_PRED_TRANS_STORE_16x16 0
+    punpcklbw    m8, m0, m1
+    punpckhbw    m0, m1
+
+    punpcklbw    m1, m2, m3
+    punpckhbw    m2, m3
+
+    punpcklbw    m3, m4, m5
+    punpckhbw    m4, m5
+
+    punpcklbw    m5, m6, m7
+    punpckhbw    m6, m7
+
+    punpcklwd    m7, m8, m1
+    punpckhwd    m8, m1
+
+    punpcklwd    m1, m3, m5
+    punpckhwd    m3, m5
+
+    punpcklwd    m5, m0, m2
+    punpckhwd    m0, m2
+
+    punpcklwd    m2, m4, m6
+    punpckhwd    m4, m6
+
+    punpckldq    m6, m7, m1
+    punpckhdq    m7, m1
+
+    punpckldq    m1, m8, m3
+    punpckhdq    m8, m3
+
+    punpckldq    m3, m5, m2
+    punpckhdq    m5, m2
+
+    punpckldq    m2, m0, m4
+    punpckhdq    m0, m4
+
+    vpermq       m6, m6, 0xD8
+    vpermq       m7, m7, 0xD8
+    vpermq       m1, m1, 0xD8
+    vpermq       m8, m8, 0xD8
+    vpermq       m3, m3, 0xD8
+    vpermq       m5, m5, 0xD8
+    vpermq       m2, m2, 0xD8
+    vpermq       m0, m0, 0xD8
+
+    movu            [r0], xm6
+    vextracti128    xm4, m6, 1
+    movu            [r0 + r1], xm4
+
+    movu            [r0 + 2 * r1], xm7
+    vextracti128    xm4, m7, 1
+    movu            [r0 + r3], xm4
+
+    lea             r0, [r0 + 4 * r1]
+
+    movu            [r0], xm1
+    vextracti128    xm4, m1, 1
+    movu            [r0 + r1], xm4
+
+    movu            [r0 + 2 * r1], xm8
+    vextracti128    xm4, m8, 1
+    movu            [r0 + r3], xm4
+
+    lea             r0, [r0 + 4 * r1]
+
+    movu            [r0], xm3
+    vextracti128    xm4, m3, 1
+    movu            [r0 + r1], xm4
+
+    movu            [r0 + 2 * r1], xm5
+    vextracti128    xm4, m5, 1
+    movu            [r0 + r3], xm4
+
+    lea             r0, [r0 + 4 * r1]
+
+    movu            [r0], xm2
+    vextracti128    xm4, m2, 1
+    movu            [r0 + r1], xm4
+
+    movu            [r0 + 2 * r1], xm0
+    vextracti128    xm4, m0, 1
+    movu            [r0 + r3], xm4
+%endmacro
+
+%macro INTRA_PRED_ANG16_CAL_ROW 3
+    pmaddubsw         %1, m9, [r4 + (%3 * mmsize)]
+    pmulhrsw          %1, m11
+    pmaddubsw         %2, m10, [r4 + (%3 * mmsize)]
+    pmulhrsw          %2, m11
+    packuswb          %1, %2
+%endmacro
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_12, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_12]
+    vbroadcasti128    m1, [angHor_tab_12 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode12]
+    mova              m8, [ang16_shuf_mode12 + mmsize]
+    lea               r3, [r1 * 3]
+
+    movu              xm4, [r2 + mmsize - 2]
+    pinsrb            xm4, [r2 +  0], 2
+    pinsrb            xm4, [r2 +  6], 1
+    pinsrb            xm4, [r2 + 13], 0
+    vbroadcasti128    m6, [r2 + mmsize + 14]
+    vinserti128       m3, m4, xm4, 1
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_13, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_13]
+    vbroadcasti128    m1, [angHor_tab_13 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode13]
+    mova              m8, [ang16_shuf_mode13 + mmsize]
+    lea               r3, [r1 * 3]
+
+    vbroadcasti128    m3, [r2 + mmsize + 1]
+    vbroadcasti128    m4, [r2]
+    pshufb            m4, [ang16_shuf_mode13 + mmsize * 2]
+
+    palignr           m3, m4, 11
+    vbroadcasti128    m6, [r2 + mmsize + 12]
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_14, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_14]
+    vbroadcasti128    m1, [angHor_tab_14 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode14]
+    mova              m8, [ang16_shuf_mode14 + mmsize]
+    lea               r3, [r1 * 3]
+
+    vbroadcasti128    m3, [r2 + mmsize + 1]
+    vbroadcasti128    m4, [r2]
+    pshufb            m4, [ang16_shuf_mode14 + mmsize * 2]
+    palignr           m3, m4, 9
+    vbroadcasti128    m6, [r2 + mmsize + 10]
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_15, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_15]
+    vbroadcasti128    m1, [angHor_tab_15 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode15]
+    mova              m8, [ang16_shuf_mode15 + mmsize]
+    lea               r3, [r1 * 3]
+
+    vbroadcasti128    m3, [r2 + mmsize + 1]
+    vbroadcasti128    m4, [r2]
+    pshufb            m4, [ang16_shuf_mode15 + mmsize * 2]
+    palignr           m3, m3, m4, 7
+    vbroadcasti128    m6, [r2 + mmsize + 8]
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_16, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_16]
+    vbroadcasti128    m1, [angHor_tab_16 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode16]
+    mova              m8, [ang16_shuf_mode16 + mmsize]
+    lea               r3, [r1 * 3]
+
+    vbroadcasti128    m3, [r2 + mmsize + 1]
+    vbroadcasti128    m4, [r2]
+    pshufb            m4, [ang16_shuf_mode16 + mmsize * 2]
+    palignr           m3, m4, 5
+    vbroadcasti128    m6, [r2 + mmsize + 6]
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_17, 3,4,9
+    vbroadcasti128    m0, [angHor_tab_17]
+    vbroadcasti128    m1, [angHor_tab_17 + mmsize/2]
+    mova              m2, [pw_1024]
+    mova              m7, [ang16_shuf_mode17]
+    mova              m8, [ang16_shuf_mode17 + mmsize]
+    lea               r3, [r1 * 3]
+
+    vbroadcasti128    m3, [r2 + mmsize + 1]
+    vbroadcasti128    m4, [r2]
+    pshufb            m4, [ang16_shuf_mode17 + mmsize * 2]
+    palignr           m3, m4, 3
+    vbroadcasti128    m6, [r2 + mmsize + 4]
+
+    pshufb            m4, m3, m7
+    pshufb            m5, m3, m8
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 2
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 4
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 6
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 8
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 10
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    lea               r0, [r0 + r1 * 4]
+
+    palignr           m5, m6, m3, 12
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0], xm4
+    vextracti128      [r0 + r1], m4, 1
+
+    palignr           m5, m6, m3, 14
+    pshufb            m4, m5, m7
+    pshufb            m5, m8
+
+    pmaddubsw         m4, m0
+    pmaddubsw         m5, m1
+    pmulhrsw          m4, m2
+    pmulhrsw          m5, m2
+    packuswb          m4, m5
+    movu              [r0 + r1 * 2], xm4
+    vextracti128      [r0 + r3], m4, 1
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_11, 3,4,8
+    vbroadcasti128      m0, [angHor_tab_11]
+    vbroadcasti128      m1, [angHor_tab_11 + mmsize/2]
+    mova                m2, [pw_1024]
+    mova                m7, [ang32_shuf_mode9]
+    lea                 r3, [r1 * 3]
+
+    ; prepare for [0 -1 -2...]
+
+    movu               xm3, [r2 + mmsize]
+    pinsrb             xm3, [r2], 0
+    vbroadcasti128      m6, [r2 + mmsize + 16]
+    vinserti128         m3, m3, xm3, 1
+
+    pshufb              m5, m3, m7              ; [ 0  1  0  1  0  1  0  1  0  1  0  1  0  1  0  1  1  2  1  2  1  2  1  2  1  2  1  2  1  2  1  2]
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], xm4
+    vextracti128        [r0 + r1], m4, 1
+
+    palignr             m5, m6, m3, 2
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], xm4
+    vextracti128        [r0 + r3], m4, 1
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 4
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], xm4
+    vextracti128        [r0 + r1], m4, 1
+
+    palignr             m5, m6, m3, 6
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], xm4
+    vextracti128        [r0 + r3], m4, 1
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 8
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], xm4
+    vextracti128        [r0 + r1], m4, 1
+
+    palignr             m5, m6, m3, 10
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], xm4
+    vextracti128        [r0 + r3], m4, 1
+
+    lea                 r0, [r0 + r1 * 4]
+
+    palignr             m5, m6, m3, 12
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0], xm4
+    vextracti128        [r0 + r1], m4, 1
+
+    palignr             m5, m6, m3, 14
+    pshufb              m5, m7
+    pmaddubsw           m4, m5, m0
+    pmaddubsw           m5, m1
+    pmulhrsw            m4, m2
+    pmulhrsw            m5, m2
+    packuswb            m4, m5
+    movu                [r0 + r1 * 2], xm4
+    vextracti128        [r0 + r3], m4, 1
+    RET
+
+
+; transpose 8x32 to 16x16, used for intra_ang16x16 avx2 asm
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+%macro TRANSPOSE_STORE_8x32 12
+    jc             .skip
+
+    punpcklbw       m%9, m%1, m%2
+    punpckhbw       m%1, m%2
+    punpcklbw       m%10, m%3, m%4
+    punpckhbw       m%3, m%4
+
+    punpcklwd       m%11, m%9, m%10
+    punpckhwd       m%9, m%10
+    punpcklwd       m%10, m%1, m%3
+    punpckhwd       m%1, m%3
+
+    punpckldq       m%12, m%11, m%10
+    punpckhdq       m%11, m%10
+    punpckldq       m%10, m%9, m%1
+    punpckhdq       m%9, m%1
+
+    punpcklbw       m%1, m%5, m%6
+    punpckhbw       m%5, m%6
+    punpcklbw       m%2, m%7, m%8
+    punpckhbw       m%7, m%8
+
+    punpcklwd       m%3, m%1, m%2
+    punpckhwd       m%1, m%2
+    punpcklwd       m%4, m%5, m%7
+    punpckhwd       m%5, m%7
+
+    punpckldq       m%2, m%3, m%4
+    punpckhdq       m%3, m%4
+    punpckldq       m%4, m%1, m%5
+    punpckhdq       m%1, m%5
+
+    punpckldq       m%5, m%12, m%2
+    punpckhdq       m%6, m%12, m%2
+    punpckldq       m%7, m%10, m%4
+    punpckhdq       m%8, m%10, m%4
+
+    punpckldq       m%2, m%11, m%3
+    punpckhdq       m%11, m%11, m%3
+    punpckldq       m%4, m%9, m%1
+    punpckhdq       m%9, m%9, m%1
+
+    movu            [r0 + r1 * 0], xm%5
+    movu            [r0 + r1 * 1], xm%6
+    movu            [r0 + r1 * 2], xm%2
+    movu            [r0 + r5 * 1], xm%11
+
+    add             r0, r6
+
+    movu            [r0 + r1 * 0], xm%7
+    movu            [r0 + r1 * 1], xm%8
+    movu            [r0 + r1 * 2], xm%4
+    movu            [r0 + r5 * 1], xm%9
+
+    add             r0, r6
+
+    vextracti128    [r0 + r1 * 0], m%5, 1
+    vextracti128    [r0 + r1 * 1], m%6, 1
+    vextracti128    [r0 + r1 * 2], m%2, 1
+    vextracti128    [r0 + r5 * 1], m%11, 1
+
+    add             r0, r6
+
+    vextracti128    [r0 + r1 * 0], m%7, 1
+    vextracti128    [r0 + r1 * 1], m%8, 1
+    vextracti128    [r0 + r1 * 2], m%4, 1
+    vextracti128    [r0 + r5 * 1], m%9, 1
+    jmp             .end
+
+.skip:
+    vpermq          m%1, m%1, q3120
+    vpermq          m%2, m%2, q3120
+    vpermq          m%3, m%3, q3120
+    vpermq          m%4, m%4, q3120
+    vpermq          m%5, m%5, q3120
+    vpermq          m%6, m%6, q3120
+    vpermq          m%7, m%7, q3120
+    vpermq          m%8, m%8, q3120
+
+    movu            [r0 + r1 * 0], xm%1
+    movu            [r0 + r1 * 1], xm%2
+    movu            [r0 + r1 * 2], xm%3
+    movu            [r0 + r5 * 1], xm%4
+
+    add             r0, r6
+
+    movu            [r0 + r1 * 0], xm%5
+    movu            [r0 + r1 * 1], xm%6
+    movu            [r0 + r1 * 2], xm%7
+    movu            [r0 + r5 * 1], xm%8
+
+    add             r0, r6
+
+    vextracti128    [r0 + r1 * 0], m%1, 1
+    vextracti128    [r0 + r1 * 1], m%2, 1
+    vextracti128    [r0 + r1 * 2], m%3, 1
+    vextracti128    [r0 + r5 * 1], m%4, 1
+
+    add             r0, r6
+
+    vextracti128    [r0 + r1 * 0], m%5, 1
+    vextracti128    [r0 + r1 * 1], m%6, 1
+    vextracti128    [r0 + r1 * 2], m%7, 1
+    vextracti128    [r0 + r5 * 1], m%8, 1
+.end:
+%endmacro
+
+cglobal ang16_mode_3_33
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw        m4,         m7
+
+    palignr         m5,         m2, m0, 2
+    pmaddubsw       m5,         [r3 + 4 * 32]       ; [20]
+    pmulhrsw        m5,         m7
+
+    palignr         m6,         m2, m0, 4
+    palignr         m8,         m2, m0, 6
+    pmaddubsw       m6,         [r3 - 2 * 32]       ; [14]
+    pmulhrsw        m6,         m7
+    pmaddubsw       m8,         [r3 - 8 * 32]       ; [8]
+    pmulhrsw        m8,         m7
+
+    palignr         m10,        m2, m0, 8
+    pmaddubsw       m9,         m10, [r3 - 14 * 32] ; [2]
+    pmulhrsw        m9,         m7
+    pmaddubsw       m10,        [r3 + 12 * 32]      ; [28]
+    pmulhrsw        m10,        m7
+
+    palignr         m11,        m2, m0, 10
+    palignr         m12,        m2, m0, 12
+    pmaddubsw       m11,        [r3 + 6 * 32]       ; [22]
+    pmulhrsw        m11,        m7
+    pmaddubsw       m12,        [r3]                ; [16]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    palignr         m3,         m2, m0, 14
+    palignr         m1,         m1, m2, 14
+    pmaddubsw       m3,         [r3 - 6 * 32]       ; [10]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+
+    pmaddubsw       m3,         m2, [r3 - 12 * 32]  ; [4]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    pmaddubsw       m3,         m2, [r3 + 14 * 32]  ; [30]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+
+    movu            xm0,        [r2 + 25]
+    movu            xm1,        [r2 + 26]
+    punpcklbw       m0,         m1
+    mova            m1,         m2
+    vinserti128     m1,         m1, xm0, 0
+    vpermq          m1,         m1, 01001110b
+
+    palignr         m3,         m1, m2, 2
+    pmaddubsw       m3,         [r3 + 8 * 32]       ; [24]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    palignr         m3,         m1, m2, 4
+    pmaddubsw       m3,         [r3 + 2 * 32]       ; [18]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+
+    palignr         m3,         m1, m2, 6
+    pmaddubsw       m3,         [r3 - 4 * 32]       ; [12]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    palignr         m3,         m1, m2, 8
+    pmaddubsw       m3,         [r3 - 10 * 32]      ; [6]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+
+    pmovzxbw        m1,         [r2 + 14]
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_3, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_3_33
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_33, 3, 7, 13
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    stc
+
+    call ang16_mode_3_33
+    RET
+
+cglobal ang16_mode_4_32
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 + 5 * 32]   ; [21]
+    pmulhrsw        m4,         m7
+
+    palignr         m1,         m2, m0, 2
+    pmaddubsw       m5,         m1, [r3 - 6 * 32]   ; [10]
+    pmulhrsw        m5,         m7
+
+    palignr         m8,         m2, m0, 4
+    pmaddubsw       m6,         m1, [r3 + 15 * 32]  ; [31]
+    pmulhrsw        m6,         m7
+    pmaddubsw       m8,         [r3 + 4 * 32]       ; [20]
+    pmulhrsw        m8,         m7
+
+    palignr         m10,        m2, m0, 6
+    pmaddubsw       m9,         m10, [r3 - 7 * 32]  ; [9]
+    pmulhrsw        m9,         m7
+    pmaddubsw       m10,        [r3 + 14 * 32]      ; [30]
+    pmulhrsw        m10,        m7
+
+    palignr         m11,        m2, m0, 8
+    palignr         m1,         m2, m0, 10
+    pmaddubsw       m11,        [r3 + 3 * 32]       ; [19]
+    pmulhrsw        m11,        m7
+    pmaddubsw       m12,        m1, [r3 - 8 * 32]   ; [8]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    pmaddubsw       m3,         m1, [r3 + 13 * 32]  ; [29]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+
+    palignr         m3,         m2, m0, 12
+    pmaddubsw       m3,         m3, [r3 + 2 * 32]   ; [18]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    palignr         m1,         m2, m0, 14
+    pmaddubsw       m3,         m1, [r3 - 9 * 32]   ; [7]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 12 * 32]  ; [28]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    palignr         m3,         m2, m0, 16
+    pmaddubsw       m3,         [r3 + 1 * 32]       ; [17]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+
+    movu            xm0,        [r2 + 25]
+    movu            xm1,        [r2 + 26]
+    punpcklbw       m0,         m1
+    mova            m1,         m2
+    vinserti128     m1,         m1, xm0, 0
+    vpermq          m1,         m1, 01001110b
+
+    palignr         m0,         m1, m2, 2
+    pmaddubsw       m3,         m0, [r3 - 10 * 32]  ; [6]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    pmaddubsw       m3,         m0, [r3 + 11 * 32]  ; [27]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+
+    palignr         m1,         m1, m2, 4
+    pmaddubsw       m1,         [r3]                ; [16]
+    pmulhrsw        m1,         m7
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_4, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_4_32
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_32, 3, 7, 13
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    stc
+
+    call ang16_mode_4_32
+    RET
+
+cglobal ang16_mode_5
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 + 1 * 32]   ; [17]
+    pmulhrsw        m4,         m7
+
+    palignr         m1,         m2, m0, 2
+    pmaddubsw       m5,         m1, [r3 - 14 * 32]  ; [2]
+    pmulhrsw        m5,         m7
+
+    palignr         m3,         m2, m0, 4
+    pmaddubsw       m6,         m1, [r3 + 3 * 32]   ; [19]
+    pmulhrsw        m6,         m7
+    pmaddubsw       m8,         m3, [r3 - 12 * 32]  ; [4]
+    pmulhrsw        m8,         m7
+    pmaddubsw       m9,         m3, [r3 + 5 * 32]   ; [21]
+    pmulhrsw        m9,         m7
+
+    palignr         m3,         m2, m0, 6
+    pmaddubsw       m10,        m3, [r3 - 10 * 32]  ; [6]
+    pmulhrsw        m10,        m7
+
+    palignr         m1,         m2, m0, 8
+    pmaddubsw       m11,        m3, [r3 + 7 * 32]   ; [23]
+    pmulhrsw        m11,        m7
+    pmaddubsw       m12,        m1, [r3 - 8 * 32]   ; [8]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    pmaddubsw       m3,         m1, [r3 + 9 * 32]   ; [25]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+
+    palignr         m1,         m2, m0, 10
+    pmaddubsw       m3,         m1, [r3 - 6 * 32]   ; [10]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 11 * 32]  ; [27]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+
+    palignr         m1,         m2, m0, 12
+    pmaddubsw       m3,         m1, [r3 - 4 * 32]   ; [12]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 13 * 32]  ; [29]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+
+    palignr         m1,         m2, m0, 14
+    pmaddubsw       m3,         m1, [r3 - 2 * 32]   ; [14]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    pmaddubsw       m3,         m1, [r3 + 15 * 32]  ; [31]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+
+    palignr         m1,         m2, m0, 16
+    pmaddubsw       m1,         [r3]                ; [16]
+    pmulhrsw        m1,         m7
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_5, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_5
+    RET
+
+cglobal ang16_mode_6
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 - 3 * 32]   ; [13]
+    pmulhrsw        m4,         m7
+
+    pmaddubsw       m5,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw        m5,         m7
+
+    palignr         m3,         m2, m0, 2
+    pmaddubsw       m6,         m3, [r3 - 9 * 32]   ; [7]
+    pmulhrsw        m6,         m7
+    pmaddubsw       m8,         m3, [r3 + 4 * 32]   ; [20]
+    pmulhrsw        m8,         m7
+
+    palignr         m3,         m2, m0, 4
+    pmaddubsw       m9,         m3, [r3 - 15 * 32]  ; [1]
+    pmulhrsw        m9,         m7
+
+    pmaddubsw       m10,        m3, [r3 - 2 * 32]   ; [14]
+    pmulhrsw        m10,        m7
+
+    pmaddubsw       m11,        m3, [r3 + 11 * 32]  ; [27]
+    pmulhrsw        m11,        m7
+
+    palignr         m1,         m2, m0, 6
+    pmaddubsw       m12,        m1, [r3 - 8 * 32]   ; [8]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    pmaddubsw       m3,         m1, [r3 + 5 * 32]   ; [21]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+
+    palignr         m1,         m2, m0, 8
+    pmaddubsw       m3,         m1, [r3 - 14 * 32]  ; [2]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    pmaddubsw       m3,         m1, [r3 - 1 * 32]   ; [15]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 12 * 32]  ; [28]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    palignr         m1,         m2, m0, 10
+    pmaddubsw       m3,         m1, [r3 - 7 * 32]   ; [9]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 6 * 32]   ; [22]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    palignr         m1,         m2, m0, 12
+    pmaddubsw       m3,         m1, [r3 - 13 * 32]  ; [3]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+
+    pmaddubsw       m1,         [r3]                ; [16]
+    pmulhrsw        m1,         m7
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_6, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_6
+    RET
+
+cglobal ang16_mode_7
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 - 7 * 32]   ; [9]
+    pmulhrsw        m4,         m7
+
+    pmaddubsw       m5,         m0, [r3 + 2 * 32]   ; [18]
+    pmulhrsw        m5,         m7
+    pmaddubsw       m6,         m0, [r3 + 11 * 32]  ; [27]
+    pmulhrsw        m6,         m7
+
+    palignr         m3,         m2, m0, 2
+    pmaddubsw       m8,         m3, [r3 - 12 * 32]  ; [4]
+    pmulhrsw        m8,         m7
+
+    pmaddubsw       m9,         m3, [r3 - 3 * 32]   ; [13]
+    pmulhrsw        m9,         m7
+
+    pmaddubsw       m10,        m3, [r3 + 6 * 32]   ; [22]
+    pmulhrsw        m10,        m7
+
+    pmaddubsw       m11,        m3, [r3 + 15 * 32]  ; [31]
+    pmulhrsw        m11,        m7
+
+    palignr         m1,         m2, m0, 4
+    pmaddubsw       m12,        m1, [r3 - 8 * 32]   ; [8]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    pmaddubsw       m3,         m1, [r3 + 1 * 32]   ; [17]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 10 * 32]  ; [26]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    palignr         m1,         m2, m0, 6
+    pmaddubsw       m3,         m1, [r3 - 13 * 32]  ; [3]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+
+    pmaddubsw       m3,         m1, [r3 - 4 * 32]   ; [12]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 5 * 32]   ; [21]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 14 * 32]  ; [30]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    palignr         m1,         m2, m0, 8
+    pmaddubsw       m3,         m1, [r3 - 9 * 32]   ; [7]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+
+    pmaddubsw       m1,         [r3]                ; [16]
+    pmulhrsw        m1,         m7
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_7, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_7
+    RET
+
+cglobal ang16_mode_8
+    ; rows 0 to 7
+    movu            m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu            m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpckhbw       m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw       m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vextracti128    xm1,        m0, 1
+    vperm2i128      m0,         m0, m2, 0x20        ; [17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    vperm2i128      m2,         m2, m1, 0x20        ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+
+    pmaddubsw       m4,         m0, [r3 - 11 * 32]  ; [5]
+    pmulhrsw        m4,         m7
+    pmaddubsw       m5,         m0, [r3 - 6 * 32]   ; [10]
+    pmulhrsw        m5,         m7
+
+    pmaddubsw       m6,         m0, [r3 - 1 * 32]   ; [15]
+    pmulhrsw        m6,         m7
+    pmaddubsw       m8,         m0, [r3 + 4 * 32]   ; [20]
+    pmulhrsw        m8,         m7
+    pmaddubsw       m9,         m0, [r3 + 9 * 32]   ; [25]
+    pmulhrsw        m9,         m7
+
+    pmaddubsw       m10,        m0, [r3 + 14 * 32]  ; [30]
+    pmulhrsw        m10,        m7
+    palignr         m1,         m2, m0, 2
+    pmaddubsw       m11,        m1, [r3 - 13 * 32]  ; [3]
+    pmulhrsw        m11,        m7
+    pmaddubsw       m12,        m1, [r3 - 8 * 32]   ; [8]
+    pmulhrsw        m12,        m7
+
+    ; rows 8 to 15
+    pmaddubsw       m3,         m1, [r3 - 3 * 32]   ; [13]
+    pmulhrsw        m3,         m7
+    packuswb        m4,         m3
+    pmaddubsw       m3,         m1, [r3 + 2 * 32]   ; [18]
+    pmulhrsw        m3,         m7
+    packuswb        m5,         m3
+
+    pmaddubsw       m3,         m1, [r3 + 7 * 32]   ; [23]
+    pmulhrsw        m3,         m7
+    packuswb        m6,         m3
+    pmaddubsw       m3,         m1, [r3 + 12 * 32]  ; [28]
+    pmulhrsw        m3,         m7
+    packuswb        m8,         m3
+
+    palignr         m1,         m2, m0, 4
+    pmaddubsw       m3,         m1, [r3 - 15 * 32]  ; [1]
+    pmulhrsw        m3,         m7
+    packuswb        m9,         m3
+    pmaddubsw       m3,         m1, [r3 - 10 * 32]  ; [6]
+    pmulhrsw        m3,         m7
+    packuswb        m10,        m3
+
+    pmaddubsw       m3,         m1, [r3 - 5 * 32]   ; [11]
+    pmulhrsw        m3,         m7
+    packuswb        m11,        m3
+    pmaddubsw       m1,         [r3]                ; [16]
+    pmulhrsw        m1,         m7
+    packuswb        m12,        m1
+
+    TRANSPOSE_STORE_8x32 4, 5, 6, 8, 9, 10, 11, 12, 0, 1, 2, 3
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_8, 3, 7, 13
+    add             r2, 32
+    lea             r3, [ang_table_avx2 + 16 * 32]
+    lea             r5, [r1 * 3]       ; r5 -> 3 * stride
+    lea             r6, [r1 * 4]       ; r6 -> 4 * stride
+    mova            m7, [pw_1024]
+    clc
+
+    call ang16_mode_8
+    RET
+%endif  ; ARCH_X86_64
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_9, 3,4,8
+    vbroadcasti128  m0, [angHor_tab_9]
+    vbroadcasti128  m1, [angHor_tab_9 + mmsize/2]
+    mova            m2, [pw_1024]
+    lea             r3, [r1 * 3]
+    mova            m7, [ang16_shuf_mode9]
+
+    vbroadcasti128  m6, [r2 + mmsize + 17]
+    vbroadcasti128  m3, [r2 + mmsize + 1]
+
+    pshufb          m5, m3, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0], xm4
+    vextracti128    [r0 + r1], m4, 1
+
+    palignr         m5, m6, m3, 2
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0 + r1 * 2], xm4
+    vextracti128    [r0 + r3], m4, 1
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m5, m6, m3, 4
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0], xm4
+    vextracti128    [r0 + r1], m4, 1
+
+    palignr         m5, m6, m3, 6
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0 + r1 * 2], xm4
+    vextracti128    [r0 + r3], m4, 1
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m5, m6, m3, 8
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0], xm4
+    vextracti128    [r0 + r1], m4, 1
+
+    palignr         m5, m6, m3, 10
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0 + r1 * 2], xm4
+    vextracti128    [r0 + r3], m4, 1
+
+    lea             r0, [r0 + r1 * 4]
+
+    palignr         m5, m6, m3, 12
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0], xm4
+    vextracti128    [r0 + r1], m4, 1
+
+    palignr         m5, m6, m3, 14
+    pshufb          m5, m7
+    pmaddubsw       m4, m5, m0
+    pmaddubsw       m5, m1
+    pmulhrsw        m4, m2
+    pmulhrsw        m5, m2
+    packuswb        m4, m5
+    movu            [r0 + r1 * 2], xm4
+    vextracti128    [r0 + r3], m4, 1
+    RET
+%endif
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_25, 3, 5, 5
+    mova              m0, [pw_1024]
+
+    vbroadcasti128    m1, [r2]
+    pshufb            m1, [intra_pred_shuff_0_8]
+    vbroadcasti128    m2, [r2 + 8]
+    pshufb            m2, [intra_pred_shuff_0_8]
+
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_25]
+
+    INTRA_PRED_ANG16_MC1 0
+
+    lea    r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC1 2
+
+    add           r4, 4 * mmsize
+
+    lea    r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC1 0
+
+    lea    r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC1 2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_28, 3, 5, 6
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_28]
+
+    INTRA_PRED_ANG16_MC2 1
+    INTRA_PRED_ANG16_MC1 0
+
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+
+    INTRA_PRED_ANG16_MC2 2
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 3
+
+    lea               r0, [r0 + 4 * r1]
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC1 0
+    INTRA_PRED_ANG16_MC2 3
+
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC1 2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_27, 3, 5, 5
+    mova              m0, [pw_1024]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_27]
+
+    vbroadcasti128    m1, [r2 + 1]
+    pshufb            m1, [intra_pred_shuff_0_8]
+    vbroadcasti128    m2, [r2 + 9]
+    pshufb            m2, [intra_pred_shuff_0_8]
+
+    INTRA_PRED_ANG16_MC1 0
+
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC1 2
+
+    lea               r0, [r0 + 4 * r1]
+    add               r4, 4 * mmsize
+    INTRA_PRED_ANG16_MC1 0
+
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 3 * mmsize]
+    pmulhrsw          m3, m0
+    vbroadcasti128    m2, [r2 + 2]
+    pshufb            m2, [intra_pred_shuff_0_15]
+    pmaddubsw         m2, [r4 + 4 * mmsize]
+    pmulhrsw          m2, m0
+    packuswb          m3, m2
+    vpermq            m3, m3, 11011000b
+    movu              [r0 + 2 * r1], xm3
+    vextracti128      xm4, m3, 1
+    movu              [r0 + r3], xm4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_29, 3, 5, 5
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_29]
+
+    INTRA_PRED_ANG16_MC2 1
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+    INTRA_PRED_ANG16_MC3 r0 + 2 * r1, 1
+
+    INTRA_PRED_ANG16_MC2 2
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 2
+
+    lea               r0, [r0 + r1 * 4]
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 3
+
+    INTRA_PRED_ANG16_MC2 3
+    add               r4, 4 * mmsize
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+    lea               r0, [r0 + r1 * 4]
+    INTRA_PRED_ANG16_MC3 r0 + r1, 1
+
+    INTRA_PRED_ANG16_MC2 4
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 2
+    lea               r0, [r0 + r1 * 4]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC2 5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 0
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_30, 3, 5, 6
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_30]
+
+    INTRA_PRED_ANG16_MC2 1
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+
+    INTRA_PRED_ANG16_MC2 2
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 1
+
+    INTRA_PRED_ANG16_MC2 3
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+    INTRA_PRED_ANG16_MC3 r0 + 2 * r1, 3
+
+    INTRA_PRED_ANG16_MC2 4
+    add               r4, 4 * mmsize
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+
+    INTRA_PRED_ANG16_MC2 5
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+    INTRA_PRED_ANG16_MC3 r0 + r3 , 2
+
+    INTRA_PRED_ANG16_MC2 6
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+
+    INTRA_PRED_ANG16_MC2 7
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_31, 3, 5, 6
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_31]
+
+    INTRA_PRED_ANG16_MC2 1
+    INTRA_PRED_ANG16_MC3 r0, 0
+
+    INTRA_PRED_ANG16_MC2 2
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+
+    INTRA_PRED_ANG16_MC2 3
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 2
+
+    INTRA_PRED_ANG16_MC2 4
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 3
+
+    INTRA_PRED_ANG16_MC2 5
+    add               r4, 4 * mmsize
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+
+    INTRA_PRED_ANG16_MC2 6
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+
+    INTRA_PRED_ANG16_MC2 7
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 2
+
+    INTRA_PRED_ANG16_MC2 8
+    lea               r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 3
+
+    INTRA_PRED_ANG16_MC2 9
+    INTRA_PRED_ANG16_MC3 r0 + r3, 4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_24, 3, 5, 6
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_24]
+
+    INTRA_PRED_ANG16_MC2 0
+    INTRA_PRED_ANG16_MC1 0
+
+    lea                  r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+
+    movu              xm1, [r2 - 1]
+    pinsrb            xm1, [r2 + 38], 0
+    vinserti128       m1, m1, xm1, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 7]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 3
+
+    lea               r0, [r0 + 4 * r1]
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC1 0
+
+    movu              xm1, [r2 - 2]
+    pinsrb            xm1, [r2 + 45], 0
+    pinsrb            xm1, [r2 + 38], 1
+    vinserti128       m1, m1, xm1, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 6]
+    pshufb            m2, m5
+
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC1 2
+    RET
+
+%macro INTRA_PRED_ANG16_MC5 2
+    pslldq            xm6,  xm6, 1
+    pinsrb            xm6, [r2 + %1], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + %2]
+    pshufb            m2, m5
+%endmacro
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_23, 3, 5, 7
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_23]
+
+    INTRA_PRED_ANG16_MC2 0
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+    INTRA_PRED_ANG16_MC3 r0 + 2 * r1, 1
+
+    movu              xm6, [r2 - 1]
+    pinsrb            xm6, [r2 + 36], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 7]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 2
+
+    lea                  r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 3
+
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC5 39, 6
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+
+    lea                  r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC3 r0 + r1, 1
+    INTRA_PRED_ANG16_MC5 43, 5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 2
+
+    lea                  r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC5 46, 4
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 0
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_22, 3, 5, 7
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_22]
+
+    INTRA_PRED_ANG16_MC2 0
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+
+    movu              xm6, [r2 - 1]
+    pinsrb            xm6, [r2 + 34], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 7]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 1
+
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC5 37, 6
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+    INTRA_PRED_ANG16_MC3 r0 + 2 * r1, 3
+
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_MC5 39, 5
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC5 42, 4
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+    INTRA_PRED_ANG16_MC3 r0 + r3, 2
+
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG16_MC5 44, 3
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+    INTRA_PRED_ANG16_MC5 47, 2
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 4
+    RET
+
+%macro INTRA_PRED_ANG32_ALIGNR_STORE 1
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, %1
+    movu    [r0], m2
+    palignr m2, m1, m0, (%1 + 1)
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, (%1 + 2)
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, (%1 + 3)
+    movu    [r0 + r3], m2
+%endmacro
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_34, 3, 4,3
+    lea     r3, [3 * r1]
+
+    movu    m0, [r2 + 2]
+    movu    m1, [r2 + 18]
+    movu    [r0], m0
+    palignr m2, m1, m0, 1
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 3
+    movu    [r0 + r3], m2
+
+    INTRA_PRED_ANG32_ALIGNR_STORE  4
+    INTRA_PRED_ANG32_ALIGNR_STORE  8
+    INTRA_PRED_ANG32_ALIGNR_STORE 12
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 16
+    movu    [r0], m2
+    movu    m0, [r2 + 19]
+    movu    [r0 + r1], m0
+    movu    m1, [r2 + 35]
+    palignr m2, m1, m0, 1
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + r3], m2
+
+    INTRA_PRED_ANG32_ALIGNR_STORE  3
+    INTRA_PRED_ANG32_ALIGNR_STORE  7
+    INTRA_PRED_ANG32_ALIGNR_STORE 11
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_2, 3, 4,3
+    lea     r3, [3 * r1]
+
+    movu    m0, [r2 + 64 + 2]
+    movu    m1, [r2 + 64 + 18]
+    movu    [r0], m0
+    palignr m2, m1, m0, 1
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 3
+    movu    [r0 + r3], m2
+
+    INTRA_PRED_ANG32_ALIGNR_STORE  4
+    INTRA_PRED_ANG32_ALIGNR_STORE  8
+    INTRA_PRED_ANG32_ALIGNR_STORE 12
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 16
+    movu    [r0], m2
+    movu    m0, [r2 + 64 + 19]
+    movu    [r0 + r1], m0
+    movu    m1, [r2 + 64 + 35]
+    palignr m2, m1, m0, 1
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + r3], m2
+
+    INTRA_PRED_ANG32_ALIGNR_STORE  3
+    INTRA_PRED_ANG32_ALIGNR_STORE  7
+    INTRA_PRED_ANG32_ALIGNR_STORE 11
+    RET
+
+%macro INTRA_PRED_ANG32_STORE 0
+    lea    r0, [r0 + 4 * r1]
+    movu   [r0],          m0
+    movu   [r0 + r1],     m0
+    movu   [r0 + r1 * 2], m0
+    movu   [r0 + r3],     m0
+%endmacro
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_26, 3, 4, 1
+    lea    r3,            [3 * r1]
+    movu    m0,           [r2 + 1]
+    movu   [r0],          m0
+    movu   [r0 + r1],     m0
+    movu   [r0 + r1 * 2], m0
+    movu   [r0 + r3],     m0
+
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    INTRA_PRED_ANG32_STORE
+    RET
+
+%macro INTRA_PRED_STORE_4x4 0
+    movd              [r0], xm0
+    pextrd            [r0 + r1], xm0, 1
+    vextracti128      xm0, m0, 1
+    lea               r0, [r0 + 2 * r1]
+    movd              [r0], xm0
+    pextrd            [r0 + r1], xm0, 1
+%endmacro
+
+%macro INTRA_PRED_TRANS_STORE_4x4 0
+    vpermq            m0, m0, 00001000b
+    pshufb            m0, [c_trans_4x4]
+
+    ;store
+    movd              [r0], xm0
+    pextrd            [r0 + r1], xm0, 1
+    lea               r0, [r0 + 2 * r1]
+    pextrd            [r0], xm0, 2
+    pextrd            [r0 + r1], xm0, 3
+%endmacro
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_27, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred_shuff_0_4]
+    pmaddubsw         m0, [c_ang4_mode_27]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_28, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred_shuff_0_4]
+    pmaddubsw         m0, [c_ang4_mode_28]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_29, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff1]
+    pmaddubsw         m0, [c_ang4_mode_29]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_30, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff2]
+    pmaddubsw         m0, [c_ang4_mode_30]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_31, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff31]
+    pmaddubsw         m0, [c_ang4_mode_31]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_32, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff31]
+    pmaddubsw         m0, [c_ang4_mode_32]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_33, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff33]
+    pmaddubsw         m0, [c_ang4_mode_33]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_3, 3, 3, 1
+    vbroadcasti128    m0, [r2 + 1]
+    pshufb            m0, [intra_pred4_shuff3]
+    pmaddubsw         m0, [c_ang4_mode_33]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_4, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff5]
+    pmaddubsw         m0, [c_ang4_mode_32]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_5, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff5]
+    pmaddubsw         m0, [c_ang4_mode_5]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_6, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff6]
+    pmaddubsw         m0, [c_ang4_mode_6]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_7, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff7]
+    pmaddubsw         m0, [c_ang4_mode_7]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_8, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff9]
+    pmaddubsw         m0, [c_ang4_mode_8]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_9, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff9]
+    pmaddubsw         m0, [c_ang4_mode_9]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_11, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff12]
+    pmaddubsw         m0, [c_ang4_mode_11]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_12, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff12]
+    pmaddubsw         m0, [c_ang4_mode_12]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_13, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff13]
+    pmaddubsw         m0, [c_ang4_mode_13]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_14, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff14]
+    pmaddubsw         m0, [c_ang4_mode_14]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_15, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff15]
+    pmaddubsw         m0, [c_ang4_mode_15]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_16, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff16]
+    pmaddubsw         m0, [c_ang4_mode_16]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_17, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff17]
+    pmaddubsw         m0, [c_ang4_mode_17]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_TRANS_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_19, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff19]
+    pmaddubsw         m0, [c_ang4_mode_19]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_20, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff20]
+    pmaddubsw         m0, [c_ang4_mode_20]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_21, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff21]
+    pmaddubsw         m0, [c_ang4_mode_21]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_22, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff22]
+    pmaddubsw         m0, [c_ang4_mode_22]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_23, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred4_shuff23]
+    pmaddubsw         m0, [c_ang4_mode_23]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_24, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred_shuff_0_4]
+    pmaddubsw         m0, [c_ang4_mode_24]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang4_25, 3, 3, 1
+    vbroadcasti128    m0, [r2]
+    pshufb            m0, [intra_pred_shuff_0_4]
+    pmaddubsw         m0, [c_ang4_mode_25]
+    pmulhrsw          m0, [pw_1024]
+    packuswb          m0, m0
+
+    INTRA_PRED_STORE_4x4
+    RET
+
+;-----------------------------------------------------------------------------------
+; void intra_filter_NxN(const pixel* references, pixel* filtered)
+;-----------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_filter_4x4, 2,4,5
+    mov             r2b, byte [r0 +  8]             ; topLast
+    mov             r3b, byte [r0 + 16]             ; LeftLast
+
+    ; filtering top
+    pmovzxbw        m0, [r0 +  0]
+    pmovzxbw        m1, [r0 +  8]
+    pmovzxbw        m2, [r0 + 16]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]   ; [6 5 4 3 2 1 0 1] samples[i - 1]
+    palignr         m3, m1, m0, 4
+    pshufb          m3, [intra_filter4_shuf1]       ; [8 7 6 5 4 3 2 9] samples[i + 1]
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    ; filtering left
+    palignr         m4, m1, m1, 14                  ; [14 13 12 11 10 9 8 15] samples[i - 1]
+    pinsrb          m4, [r0], 2                     ; [14 13 12 11 10 9 0 15] samples[i + 1]
+    palignr         m3, m2, m1, 4
+    pshufb          m3, [intra_filter4_shuf1]
+
+    psllw           m1, 1
+    paddw           m4, m3
+    paddw           m1, m4
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+    packuswb        m0, m1
+
+    movu            [r1], m0
+    mov             [r1 +  8], r2b                  ; topLast
+    mov             [r1 + 16], r3b                  ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_8x8, 2,4,6
+    mov             r2b, byte [r0 + 16]             ; topLast
+    mov             r3b, byte [r0 + 32]             ; LeftLast
+
+    ; filtering top
+    pmovzxbw        m0, [r0 +  0]
+    pmovzxbw        m1, [r0 +  8]
+    pmovzxbw        m2, [r0 + 16]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]   ; [6 5 4 3 2 1 0 1] samples[i - 1]
+    palignr         m5, m1, m0, 2
+    pinsrb          m5, [r0 + 17], 0                ; [8 7 6 5 4 3 2 9] samples[i + 1]
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m1, 1
+    paddw           m4, m3
+    paddw           m1, m4
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    packuswb        m0, m1
+    movu            [r1], m0
+
+    ; filtering left
+    pmovzxbw        m1, [r0 + 24]
+    pmovzxbw        m0, [r0 + 32]
+
+    palignr         m4, m2, m2, 14
+    pinsrb          m4, [r0], 2
+    palignr         m5, m1, m2, 2
+
+    palignr         m3, m1, m2, 14
+    palignr         m0, m1, 2
+
+    psllw           m2, 1
+    paddw           m4, m5
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+
+    psllw           m1, 1
+    paddw           m0, m3
+    paddw           m1, m0
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    packuswb        m2, m1
+    movu            [r1 + 16], m2
+    mov             [r1 + 16], r2b                  ; topLast
+    mov             [r1 + 32], r3b                  ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_16x16, 2,4,6
+    mov             r2b, byte [r0 + 32]             ; topLast
+    mov             r3b, byte [r0 + 64]             ; LeftLast
+
+    ; filtering top
+    pmovzxbw        m0, [r0 +  0]
+    pmovzxbw        m1, [r0 +  8]
+    pmovzxbw        m2, [r0 + 16]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]   ; [6 5 4 3 2 1 0 1] samples[i - 1]
+    palignr         m5, m1, m0, 2
+    pinsrb          m5, [r0 + 33], 0                ; [8 7 6 5 4 3 2 9] samples[i + 1]
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m0, m5
+    movu            [r1], m0
+
+    pmovzxbw        m0, [r0 + 24]
+    pmovzxbw        m5, [r0 + 32]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    packuswb        m1, m0
+    movu            [r1 + 16], m1
+
+    ; filtering left
+    pmovzxbw        m1, [r0 + 40]
+    pmovzxbw        m2, [r0 + 48]
+
+    palignr         m4, m5, m5, 14
+    pinsrb          m4, [r0], 2
+    palignr         m0, m1, m5, 2
+
+    psllw           m3, m5, 1
+    paddw           m4, m0
+    paddw           m3, m4
+    paddw           m3, [pw_2]
+    psrlw           m3, 2
+
+    palignr         m0, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m0
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m3, m5
+    movu            [r1 + 32], m3
+
+    pmovzxbw        m5, [r0 + 56]
+    pmovzxbw        m0, [r0 + 64]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m5, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m1, m5
+    movu            [r1 + 48], m1
+
+    mov             [r1 + 32], r2b                  ; topLast
+    mov             [r1 + 64], r3b                  ; LeftLast
+    RET
+
+INIT_XMM sse4
+cglobal intra_filter_32x32, 2,4,6
+    mov             r2b, byte [r0 +  64]            ; topLast
+    mov             r3b, byte [r0 + 128]            ; LeftLast
+
+    ; filtering top
+    ; 0 to 15
+    pmovzxbw        m0, [r0 +  0]
+    pmovzxbw        m1, [r0 +  8]
+    pmovzxbw        m2, [r0 + 16]
+
+    pshufb          m4, m0, [intra_filter4_shuf0]   ; [6 5 4 3 2 1 0 1] samples[i - 1]
+    palignr         m5, m1, m0, 2
+    pinsrb          m5, [r0 + 65], 0                ; [8 7 6 5 4 3 2 9] samples[i + 1]
+
+    palignr         m3, m1, m0, 14
+    psllw           m0, 1
+    paddw           m4, m5
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m4, m2, m1, 2
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m0, m5
+    movu            [r1], m0
+
+    ; 16 to 31
+    pmovzxbw        m0, [r0 + 24]
+    pmovzxbw        m5, [r0 + 32]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m2, m0, 1
+    paddw           m4, m3
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+    packuswb        m1, m2
+    movu            [r1 + 16], m1
+
+    ; 32 to 47
+    pmovzxbw        m1, [r0 + 40]
+    pmovzxbw        m2, [r0 + 48]
+
+    palignr         m3, m5, m0, 14
+    palignr         m4, m1, m5, 2
+
+    psllw           m0, m5, 1
+    paddw           m3, m4
+    paddw           m0, m3
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    palignr         m3, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m0, m5
+    movu            [r1 + 32], m0
+
+    ; 48 to 63
+    pmovzxbw        m0, [r0 + 56]
+    pmovzxbw        m5, [r0 + 64]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m0, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m0, m2, 14
+    palignr         m4, m5, m0, 2
+
+    psllw           m0, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    packuswb        m1, m0
+    movu            [r1 + 48], m1
+
+    ; filtering left
+    ; 64 to 79
+    pmovzxbw        m1, [r0 + 72]
+    pmovzxbw        m2, [r0 + 80]
+
+    palignr         m4, m5, m5, 14
+    pinsrb          m4, [r0], 2
+    palignr         m0, m1, m5, 2
+
+    psllw           m3, m5, 1
+    paddw           m4, m0
+    paddw           m3, m4
+    paddw           m3, [pw_2]
+    psrlw           m3, 2
+
+    palignr         m0, m1, m5, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m5, m1, 1
+    paddw           m4, m0
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m3, m5
+    movu            [r1 + 64], m3
+
+    ; 80 to 95
+    pmovzxbw        m5, [r0 + 88]
+    pmovzxbw        m0, [r0 + 96]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m2, m5, 1
+    paddw           m4, m3
+    paddw           m2, m4
+    paddw           m2, [pw_2]
+    psrlw           m2, 2
+    packuswb        m1, m2
+    movu            [r1 + 80], m1
+
+    ; 96 to 111
+    pmovzxbw        m1, [r0 + 104]
+    pmovzxbw        m2, [r0 + 112]
+
+    palignr         m3, m0, m5, 14
+    palignr         m4, m1, m0, 2
+
+    psllw           m5, m0, 1
+    paddw           m3, m4
+    paddw           m5, m3
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+
+    palignr         m3, m1, m0, 14
+    palignr         m4, m2, m1, 2
+
+    psllw           m0, m1, 1
+    paddw           m4, m3
+    paddw           m0, m4
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+    packuswb        m5, m0
+    movu            [r1 + 96], m5
+
+    ; 112 to 127
+    pmovzxbw        m5, [r0 + 120]
+    pmovzxbw        m0, [r0 + 128]
+
+    palignr         m3, m2, m1, 14
+    palignr         m4, m5, m2, 2
+
+    psllw           m1, m2, 1
+    paddw           m3, m4
+    paddw           m1, m3
+    paddw           m1, [pw_2]
+    psrlw           m1, 2
+
+    palignr         m3, m5, m2, 14
+    palignr         m4, m0, m5, 2
+
+    psllw           m5, 1
+    paddw           m4, m3
+    paddw           m5, m4
+    paddw           m5, [pw_2]
+    psrlw           m5, 2
+    packuswb        m1, m5
+    movu            [r1 + 112], m1
+
+    mov             [r1 +  64], r2b                 ; topLast
+    mov             [r1 + 128], r3b                 ; LeftLast
+    RET
+
+INIT_YMM avx2
+cglobal intra_filter_4x4, 2,4,4
+    mov             r2b, byte [r0 +  8]         ; topLast
+    mov             r3b, byte [r0 + 16]         ; LeftLast
+
+    ; filtering top
+    pmovzxbw        m0, [r0]
+    vpbroadcastw    m2, xm0
+    pmovzxbw        m1, [r0 + 8]
+
+    palignr         m3, m0, m2, 14              ; [6 5 4 3 2 1 0 0] [14 13 12 11 10 9 8 0]
+    pshufb          m3, [intra_filter4_shuf2]   ; [6 5 4 3 2 1 0 1] [14 13 12 11 10 9 0 9] samples[i - 1]
+    palignr         m1, m0, 4                   ; [9 8 7 6 5 4 3 2]
+    palignr         m1, m1, 14                  ; [9 8 7 6 5 4 3 2]
+
+    psllw           m0, 1
+    paddw           m3, m1
+    paddw           m0, m3
+    paddw           m0, [pw_2]
+    psrlw           m0, 2
+
+    packuswb        m0, m0
+    vpermq          m0, m0, 10001000b
+
+    movu            [r1], xm0
+    mov             [r1 +  8], r2b              ; topLast
+    mov             [r1 + 16], r3b              ; LeftLast
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/intrapred8_allangs.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,24166 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*          Praveen Tiwari <praveen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+all_ang4_shuff: db 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6
+                db 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6, 3, 4, 4, 5, 5, 6, 6, 7
+                db 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 1, 2, 2, 3, 3, 4, 4, 5
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4
+                db 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+                db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12
+                db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 4, 0, 0, 9, 9, 10, 10, 11
+                db 0, 9, 9, 10, 10, 11, 11, 12, 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11
+                db 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11, 4, 2, 2, 0, 0, 9, 9, 10
+                db 0, 9, 9, 10, 10, 11, 11, 12, 2, 0, 0, 9, 9, 10, 10, 11, 2, 0, 0, 9, 9, 10, 10, 11, 3, 2, 2, 0, 0, 9, 9, 10
+                db 0, 9, 9, 10, 10, 11, 11, 12, 1, 0, 0, 9, 9, 10, 10, 11, 2, 1, 1, 0, 0, 9, 9, 10, 4, 2, 2, 1, 1, 0, 0, 9
+                db 0, 1, 2, 3, 9, 0, 1, 2, 10, 9, 0, 1, 11, 10, 9, 0, 0, 1, 2, 3, 9, 0, 1, 2, 10, 9, 0, 1, 11, 10, 9, 0
+                db 0, 1, 1, 2, 2, 3, 3, 4, 9, 0, 0, 1, 1, 2, 2, 3, 10, 9, 9, 0, 0, 1, 1, 2, 12, 10, 10, 9, 9, 0, 0, 1
+                db 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3, 11, 10, 10, 0, 0, 1, 1, 2
+                db 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3, 12, 10, 10, 0, 0, 1, 1, 2
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 10, 0, 0, 1, 1, 2, 2, 3, 10, 0, 0, 1, 1, 2, 2, 3
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 12, 0, 0, 1, 1, 2, 2, 3
+                db 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4, 0, 1, 1, 2, 2, 3, 3, 4
+                db 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4
+                db 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5
+                db 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6
+                db 1, 2, 2, 3, 3, 4, 4, 5, 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6, 2, 3, 3, 4, 4, 5, 5, 6
+                db 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6, 2, 3, 3, 4, 4, 5, 5, 6, 3, 4, 4, 5, 5, 6, 6, 7
+                db 1, 2, 2, 3, 3, 4, 4, 5, 2, 3, 3, 4, 4, 5, 5, 6, 3, 4, 4, 5, 5, 6, 6, 7, 4, 5, 5, 6, 6, 7, 7, 8
+                db 2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8, 2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8
+
+all_ang4: db 6, 26, 6, 26, 6, 26, 6, 26, 12, 20, 12, 20, 12, 20, 12, 20, 18, 14, 18, 14, 18, 14, 18, 14, 24, 8, 24, 8, 24, 8, 24, 8
+          db 11, 21, 11, 21, 11, 21, 11, 21, 22, 10, 22, 10, 22, 10, 22, 10, 1, 31, 1, 31, 1, 31, 1, 31, 12, 20, 12, 20, 12, 20, 12, 20
+          db 15, 17, 15, 17, 15, 17, 15, 17, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 28, 4, 28, 4, 28, 4, 28, 4
+          db 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20
+          db 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4
+          db 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20
+          db 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8
+          db 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24
+          db 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12
+          db 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 27, 5, 27, 5, 27, 5, 27, 5, 4, 28, 4, 28, 4, 28, 4, 28
+          db 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12
+          db 17, 15, 17, 15, 17, 15, 17, 15, 2, 30, 2, 30, 2, 30, 2, 30, 19, 13, 19, 13, 19, 13, 19, 13, 4, 28, 4, 28, 4, 28, 4, 28
+          db 21, 11, 21, 11, 21, 11, 21, 11, 10, 22, 10, 22, 10, 22, 10, 22, 31, 1, 31, 1, 31, 1, 31, 1, 20, 12, 20, 12, 20, 12, 20, 12
+          db 26, 6, 26, 6, 26, 6, 26, 6, 20, 12, 20, 12, 20, 12, 20, 12, 14, 18, 14, 18, 14, 18, 14, 18, 8, 24, 8, 24, 8, 24, 8, 24
+          db 26, 6, 26, 6, 26, 6, 26, 6, 20, 12, 20, 12, 20, 12, 20, 12, 14, 18, 14, 18, 14, 18, 14, 18, 8, 24, 8, 24, 8, 24, 8, 24
+          db 21, 11, 21, 11, 21, 11, 21, 11, 10, 22, 10, 22, 10, 22, 10, 22, 31, 1, 31, 1, 31, 1, 31, 1, 20, 12, 20, 12, 20, 12, 20, 12
+          db 17, 15, 17, 15, 17, 15, 17, 15, 2, 30, 2, 30, 2, 30, 2, 30, 19, 13, 19, 13, 19, 13, 19, 13, 4, 28, 4, 28, 4, 28, 4, 28
+          db 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12
+          db 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 27, 5, 27, 5, 27, 5, 27, 5, 4, 28, 4, 28, 4, 28, 4, 28
+          db 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12
+          db 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24
+          db 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8
+          db 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20
+          db 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4
+          db 19, 13, 19, 13, 19, 13, 19, 13, 6, 26, 6, 26, 6, 26, 6, 26, 25, 7, 25, 7, 25, 7, 25, 7, 12, 20, 12, 20, 12, 20, 12, 20
+          db 15, 17, 15, 17, 15, 17, 15, 17, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 28, 4, 28, 4, 28, 4, 28, 4
+          db 11, 21, 11, 21, 11, 21, 11, 21, 22, 10, 22, 10, 22, 10, 22, 10, 1, 31, 1, 31, 1, 31, 1, 31, 12, 20, 12, 20, 12, 20, 12, 20
+          db 6, 26, 6, 26, 6, 26, 6, 26, 12, 20, 12, 20, 12, 20, 12, 20, 18, 14, 18, 14, 18, 14, 18, 14, 24, 8, 24, 8, 24, 8, 24, 8
+
+
+SECTION .text
+
+; global constant
+cextern pw_1024
+
+; common constant with intrapred8.asm
+cextern ang_table
+cextern pw_ang_table
+cextern tab_S1
+cextern tab_S2
+cextern tab_Si
+cextern pw_16
+cextern pb_000000000000000F
+cextern pb_0000000000000F0F
+cextern pw_FFFFFFFFFFFFFFF0
+
+
+;-----------------------------------------------------------------------------
+; void all_angs_pred_4x4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal all_angs_pred_4x4, 4, 4, 8
+
+; mode 2
+
+movh      m0,         [r1 + 10]
+movd      [r0],       m0
+
+palignr   m1,         m0,      1
+movd      [r0 + 4],   m1
+
+palignr   m1,         m0,      2
+movd      [r0 + 8],   m1
+
+palignr   m1,         m0,      3
+movd      [r0 + 12],  m1
+
+; mode 3
+
+mova          m2,        [pw_1024]
+
+pslldq        m1,        m0,         1
+pinsrb        m1,        [r1 + 9],   0
+punpcklbw     m1,        m0
+
+lea           r3,        [ang_table]
+
+pmaddubsw     m6,        m1,        [r3 + 26 * 16]
+pmulhrsw      m6,        m2
+packuswb      m6,        m6
+movd          [r0 + 16], m6
+
+palignr       m0,        m1,        2
+
+mova          m7,        [r3 + 20 * 16]
+
+pmaddubsw     m3,        m0,        m7
+pmulhrsw      m3,        m2
+packuswb      m3,        m3
+movd          [r0 + 20], m3
+
+; mode 6 [row 3]
+movd          [r0 + 76], m3
+
+palignr       m3,        m1,       4
+
+pmaddubsw     m4,        m3,        [r3 + 14 * 16]
+pmulhrsw      m4,        m2
+packuswb      m4,        m4
+movd          [r0 + 24], m4
+
+palignr       m4,        m1,        6
+
+pmaddubsw     m4,        [r3 + 8 * 16]
+pmulhrsw      m4,        m2
+packuswb      m4,        m4
+movd          [r0 + 28], m4
+
+; mode 4
+
+pmaddubsw     m5,        m1,        [r3 + 21 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 32], m5
+
+pmaddubsw     m5,        m0,        [r3 + 10 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 36], m5
+
+pmaddubsw     m5,        m0,        [r3 + 31 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 40], m5
+
+pmaddubsw     m4,        m3,        m7
+pmulhrsw      m4,        m2
+packuswb      m4,        m4
+movd          [r0 + 44], m4
+
+; mode 5
+
+pmaddubsw     m5,        m1,        [r3 + 17 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 48], m5
+
+pmaddubsw     m5,        m0,        [r3 + 2 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 52], m5
+
+pmaddubsw     m5,        m0,        [r3 + 19 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 56], m5
+
+pmaddubsw     m4,        m3,        [r3 + 4 * 16]
+pmulhrsw      m4,        m2
+packuswb      m4,        m4
+movd          [r0 + 60], m4
+
+; mode 6
+
+pmaddubsw     m5,        m1,        [r3 + 13 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 64], m5
+
+movd          [r0 + 68], m6
+
+pmaddubsw     m5,        m0,        [r3 + 7 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 72], m5
+
+; mode 7
+
+pmaddubsw     m5,        m1,        [r3 + 9 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 80], m5
+
+pmaddubsw     m5,        m1,        [r3 + 18 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 84], m5
+
+pmaddubsw     m5,        m1,        [r3 + 27 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 88], m5
+
+pmaddubsw     m5,        m0,        [r3 + 4 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 92], m5
+
+; mode 8
+
+pmaddubsw     m5,        m1,        [r3 + 5 * 16]
+pmulhrsw      m5,        m2
+packuswb      m5,        m5
+movd          [r0 + 96], m5
+
+pmaddubsw     m5,         m1,       [r3 + 10 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 100], m5
+
+pmaddubsw     m5,         m1,        [r3 + 15 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 104], m5
+
+pmaddubsw     m5,         m1,        [r3 + 20 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 108], m5
+
+; mode 9
+
+pmaddubsw     m5,         m1,        [r3 + 2 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 112], m5
+
+pmaddubsw     m5,         m1,        [r3 + 4 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 116], m5
+
+pmaddubsw     m5,         m1,        [r3 + 6 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 120], m5
+
+pmaddubsw     m5,         m1,        [r3 + 8 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 124], m5
+
+; mode 10
+
+movd         m3,         [r1 + 9]
+pshufd       m4,         m3,        0
+movu         [r0 + 128], m4
+
+pxor         m5,         m5
+movd         m7,         [r1 + 1]
+pshufd       m4,         m7,        0
+punpcklbw    m4,         m5
+
+pinsrb       m7,         [r1],      0
+pshufb       m6,         m7,        m5
+punpcklbw    m6,         m5
+
+psubw        m4,         m6
+psraw        m4,         1
+
+pshufb       m6,         m3,       m5
+punpcklbw    m6,         m5
+
+paddw        m4,         m6
+packuswb     m4,         m5
+
+pextrb       [r0 + 128],  m4,    0
+pextrb       [r0 + 132],  m4,    1
+pextrb       [r0 + 136],  m4,    2
+pextrb       [r0 + 140],  m4,    3
+
+; mode 11
+
+pslldq        m1,        m1,         2
+pinsrb        m1,        [r1],       0
+pinsrb        m1,        [r1 + 9],   1
+
+pmaddubsw     m3,         m1,        [r3 + 30 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 144], m3
+
+pmaddubsw     m3,         m1,        [r3 + 28 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 148], m3
+
+pmaddubsw     m3,         m1,        [r3 + 26 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 152], m3
+
+pmaddubsw     m3,         m1,        [r3 + 24 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 156], m3
+
+; mode 12
+
+pmaddubsw     m3,         m1,        [r3 + 27 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 160], m3
+
+pmaddubsw     m3,         m1,        [r3 + 22 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 164], m3
+
+pmaddubsw     m3,         m1,        [r3 + 17 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 168], m3
+
+pmaddubsw     m3,         m1,        [r3 + 12 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 172], m3
+
+; mode 13
+
+pmaddubsw     m3,         m1,        [r3 + 23 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 176], m3
+
+pmaddubsw     m3,         m1,        [r3 + 14 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 180], m3
+
+pmaddubsw     m3,         m1,        [r3 + 5 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 184], m3
+
+pslldq        m5,         m1,        2
+pinsrb        m5,         [r1 + 0],  1
+pinsrb        m5,         [r1 + 4],  0
+
+pmaddubsw     m4,         m5,        [r3 + 28 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 188], m4
+
+; mode 14
+
+pmaddubsw     m4,         m1,        [r3 + 19 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 192], m4
+
+pmaddubsw     m7,         m1,        [r3 + 6 * 16]
+pmulhrsw      m7,         m2
+packuswb      m7,         m7
+movd          [r0 + 196], m7
+
+pinsrb        m5,         [r1 + 2],  0
+
+pmaddubsw     m4,         m5,        [r3 + 25 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 200], m4
+
+pmaddubsw     m4,         m5,        [r3 + 12 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 204], m4
+
+; mode 15
+
+pmaddubsw     m4,         m1,        [r3 + 15 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 208], m4
+
+pmaddubsw     m4,         m5,        [r3 + 30 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 212], m4
+
+pmaddubsw     m4,         m5,        [r3 + 13 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 216], m4
+
+pslldq        m4,         m5,         2
+pinsrb        m4,         [r1 + 2],   1
+pinsrb        m4,         [r1 + 4],   0
+
+pmaddubsw     m6,         m4,         [r3 + 28 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 220], m6
+
+; mode 16
+
+pmaddubsw     m6,         m1,        [r3 + 11 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 224], m6
+
+pmaddubsw     m6,         m5,        [r3 + 22 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 228], m6
+
+pmaddubsw     m6,         m5,        [r3 + 1 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 232], m6
+
+pinsrb        m4,         [r1 + 3],  0
+
+pmaddubsw     m4,         [r3 + 12 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 236], m4
+
+; mode 17
+
+movd          [r0 + 240],  m7
+
+pslldq        m1,         2
+pinsrb        m1,         [r1 + 1],  0
+pinsrb        m1,         [r1 + 0],  1
+
+pmaddubsw     m3,         m1,        [r3 + 12 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 244], m3
+
+pslldq        m1,         2
+pinsrb        m1,         [r1 + 1],  1
+pinsrb        m1,         [r1 + 2],  0
+
+pmaddubsw     m3,         m1,        [r3 + 18 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 248], m3
+
+pslldq        m1,         2
+pinsrb        m1,         [r1 + 2],  1
+pinsrb        m1,         [r1 + 4],  0
+
+pmaddubsw     m1,         [r3 + 24 * 16]
+pmulhrsw      m1,         m2
+packuswb      m1,         m1
+movd          [r0 + 252], m1
+
+; mode 18
+
+movh          m1,         [r1]
+movd          [r0 + 256], m1
+
+pslldq        m3,         m1,         1
+pinsrb        m3,         [r1 + 9],   0
+movd          [r0 + 260], m3
+
+pslldq        m4,         m3,         1
+pinsrb        m4,         [r1 + 10],  0
+movd          [r0 + 264], m4
+
+pslldq        m4,         1
+pinsrb        m4,         [r1 + 11],  0
+movd          [r0 + 268], m4
+
+; mode 19
+
+palignr       m3,         m1,        1
+punpcklbw     m1,         m3
+
+pmaddubsw     m7,         m1,        [r3 + 6 * 16]
+pmulhrsw      m7,         m2
+packuswb      m7,         m7
+movd          [r0 + 272], m7
+
+pslldq        m3,         m1,         2
+pinsrb        m3,         [r1],       1
+pinsrb        m3,         [r1 + 9],   0
+
+pmaddubsw     m4,         m3,         [r3 + 12 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 276], m4
+
+pslldq        m4,         m3,         2
+pinsrb        m4,         [r1 + 9],   1
+pinsrb        m4,         [r1 + 10],  0
+
+pmaddubsw     m5,         m4,         [r3 + 18 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 280], m5
+
+pslldq        m4,         2
+pinsrb        m4,         [r1 + 10],  1
+pinsrb        m4,         [r1 + 12],  0
+
+pmaddubsw     m4,         [r3 + 24 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 284], m4
+
+; mode 20
+
+pmaddubsw     m4,         m1,        [r3 + 11 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 288], m4
+
+pinsrb        m3,         [r1 + 10],  0
+
+pmaddubsw     m4,         m3,        [r3 + 22 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 292], m4
+
+pmaddubsw     m4,         m3,        [r3 + 1 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 296], m4
+
+pslldq        m6,         m3,        2
+pinsrb        m6,         [r1 + 10], 1
+pinsrb        m6,         [r1 + 11], 0
+
+pmaddubsw     m5,         m6,        [r3 + 12 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 300], m5
+
+; mode 21
+
+pmaddubsw     m4,         m1,        [r3 + 15 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 304], m4
+
+pmaddubsw     m4,         m3,        [r3 + 30 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 308], m4
+
+pmaddubsw     m4,         m3,        [r3 + 13 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 312], m4
+
+pinsrb        m6,         [r1 + 12],   0
+
+pmaddubsw     m6,         [r3 + 28 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 316], m6
+
+; mode 22
+
+pmaddubsw     m4,         m1,         [r3 + 19 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 320], m4
+
+movd          [r0 + 324], m7
+
+pmaddubsw     m4,         m3,        [r3 + 25 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 328], m4
+
+pmaddubsw     m4,         m3,         [r3 + 12 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 332], m4
+
+; mode 23
+
+pmaddubsw     m4,         m1,         [r3 + 23 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 336], m4
+
+pmaddubsw     m4,         m1,         [r3 + 14 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 340], m4
+
+pmaddubsw     m4,         m1,         [r3 + 5 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 344], m4
+
+pinsrb         m3,        [r1 + 12],   0
+
+pmaddubsw     m3,         [r3 + 28 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 348], m3
+
+; mode 24
+
+pmaddubsw     m3,         m1,         [r3 + 27 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 352], m3
+
+pmaddubsw     m3,         m1,         [r3 + 22 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 356], m3
+
+pmaddubsw     m3,         m1,         [r3 + 17 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 360], m3
+
+pmaddubsw     m3,         m1,         [r3 + 12 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 364], m3
+
+; mode 25
+
+pmaddubsw     m3,         m1,         [r3 + 30 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 368], m3
+
+pmaddubsw     m3,         m1,         [r3 + 28 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 372], m3
+
+pmaddubsw     m3,         m1,         [r3 + 26 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 376], m3
+
+pmaddubsw     m1,         [r3 + 24 * 16]
+pmulhrsw      m1,         m2
+packuswb      m1,         m1
+movd          [r0 + 380], m1
+
+; mode 26
+
+movh         m1,         [r1 + 1]
+pshufd       m3,         m1,        0
+movu         [r0 + 384], m3
+
+pxor         m4,         m4
+movd         m5,         [r1 + 9]
+pshufd       m5,         m5,        0
+punpcklbw    m5,         m4
+
+pinsrb       m6,         [r1],      0
+pshufb       m6,         m4
+punpcklbw    m6,         m4
+
+psubw        m5,         m6
+psraw        m5,         1
+
+pshufb       m6,         m1,        m4
+punpcklbw    m6,         m4
+
+paddw        m5,         m6
+packuswb     m5,         m4
+
+pextrb       [r0 + 384], m5,    0
+pextrb       [r0 + 388], m5,    1
+pextrb       [r0 + 392], m5,    2
+pextrb       [r0 + 396], m5,    3
+
+; mode 27
+
+palignr       m3,         m1,     1
+punpcklbw     m1,         m3
+
+pmaddubsw     m3,         m1,     [r3 + 2 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 400], m3
+
+pmaddubsw     m3,         m1,     [r3 + 4 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 404], m3
+
+pmaddubsw     m3,         m1,     [r3 + 6 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 408], m3
+
+pmaddubsw     m3,         m1,     [r3 + 8 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 412], m3
+
+; mode 28
+
+pmaddubsw     m3,         m1,     [r3 + 5 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 416], m3
+
+pmaddubsw     m3,         m1,     [r3 + 10 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 420], m3
+
+pmaddubsw     m3,         m1,     [r3 + 15 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 424], m3
+
+pmaddubsw     m3,         m1,     [r3 + 20 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 428], m3
+
+; mode 29
+
+pmaddubsw     m3,         m1,     [r3 + 9 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 432], m3
+
+pmaddubsw     m3,         m1,     [r3 + 18 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 436], m3
+
+pmaddubsw     m3,         m1,     [r3 + 27 * 16]
+pmulhrsw      m3,         m2
+packuswb      m3,         m3
+movd          [r0 + 440], m3
+
+palignr       m3,         m1,     2
+
+pmaddubsw     m4,         m3,     [r3 + 4 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 444], m4
+
+; mode 30
+
+pmaddubsw     m4,         m1,     [r3 + 13 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 448], m4
+
+pmaddubsw     m7,         m1,     [r3 + 26 * 16]
+pmulhrsw      m7,         m2
+packuswb      m7,         m7
+movd          [r0 + 452], m7
+
+pmaddubsw     m5,         m3,     [r3 + 7 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 456], m5
+
+pmaddubsw     m6,         m3,     [r3 + 20 * 16]
+pmulhrsw      m6,         m2
+packuswb      m6,         m6
+movd          [r0 + 460], m6
+
+; mode 31
+
+pmaddubsw     m4,         m1,     [r3 + 17 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 464], m4
+
+pmaddubsw     m5,         m3,     [r3 + 2 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 468], m5
+
+pmaddubsw     m5,         m3,     [r3 + 19 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 472], m5
+
+palignr       m4,         m3,     2
+
+pmaddubsw     m5,         m4,     [r3 + 4 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 476], m5
+
+; mode 32
+
+pmaddubsw     m5,         m1,     [r3 + 21 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 480], m5
+
+pmaddubsw     m5,         m3,     [r3 + 10 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 484], m5
+
+pmaddubsw     m5,         m3,     [r3 + 31 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 488], m5
+
+pmaddubsw     m5,         m4,     [r3 + 20 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 492], m5
+
+; mode 33
+
+movd          [r0 + 496], m7
+
+movd          [r0 + 500], m6
+
+pmaddubsw     m5,         m4,         [r3 + 14 * 16]
+pmulhrsw      m5,         m2
+packuswb      m5,         m5
+movd          [r0 + 504], m5
+
+psrldq        m4,         2
+
+pmaddubsw     m4,         [r3 + 8 * 16]
+pmulhrsw      m4,         m2
+packuswb      m4,         m4
+movd          [r0 + 508], m4
+
+; mode 34
+
+movh      m7,             [r1 + 2]
+movd      [r0 + 512],     m7
+
+psrldq    m7,      1
+movd      [r0 + 516],     m7
+
+psrldq    m7,      1
+movd      [r0 + 520],     m7
+
+psrldq    m7,      1
+movd      [r0 + 524],     m7
+
+RET
+
+;------------------------------------------------------------------------------
+; void all_angs_pred_8x8(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal all_angs_pred_8x8, 3,4,8
+    ; mode 2
+
+    movu         m0,          [r2 + 18]
+    palignr      m1,          m0,          1
+    punpcklqdq   m2,          m0,          m1
+    movu         [r0],        m2
+
+    palignr      m1,          m0,          2
+    palignr      m2,          m0,          3
+    punpcklqdq   m1,          m2
+    movu         [r0 + 16],   m1
+
+    palignr      m1,          m0,          4
+    palignr      m2,          m0,          5
+    punpcklqdq   m1,          m2
+    movu         [r0 + 32],   m1
+
+    palignr      m1,          m0,          6
+    palignr      m2,          m0,          7
+    punpcklqdq   m1,          m2
+    movu         [r0 + 48],   m1
+
+    ; mode 3 [row 0, 1]
+
+    mova          m7,         [pw_1024]
+    lea           r3,         [ang_table]
+
+    movu          m0,         [r1 + 17]
+
+    palignr       m1,         m0,               1
+    palignr       m2,         m0,               2
+
+    punpcklbw     m3,         m0,               m1
+    pmaddubsw     m4,         m3,               [r3 + 26 * 16]
+    pmulhrsw      m4,         m7
+
+    punpcklbw     m1,         m2
+    pmaddubsw     m5,         m1,               [r3 + 20 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+
+    movu          [r0 + 64],  m4
+
+    ; mode 6 [row 1]
+
+    movh          [r0 + 264], m4
+
+    ; mode 6 [row 3]
+
+    movhps        [r0 + 280], m4
+
+    ; mode 4 [row 0, 1]
+
+    pmaddubsw     m4,         m3,               [r3 + 21 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 10 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 128], m4
+
+    ; mode 5 [row 0, 1]
+
+    pmaddubsw     m4,         m3,               [r3 + 17 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 2 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 192], m4
+
+    ; mode 6 [row 0]
+
+    pmaddubsw     m4,         m3,               [r3 + 13 * 16]
+    pmulhrsw      m4,         m7
+
+    pxor          m5,         m5
+
+    packuswb      m4,         m5
+    movh          [r0 + 256], m4
+
+    ; mode 7 [row 0, 1]
+
+    pmaddubsw     m4,         m3,               [r3 + 9 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 18 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 320], m4
+
+    ; mode 8 [row 0, 1]
+
+    pmaddubsw     m4,         m3,               [r3 + 5 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 10 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 384], m4
+
+    ; mode 8 [row 2, 3]
+
+    pmaddubsw     m4,         m3,               [r3 + 15 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 20 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 400], m4
+
+    ; mode 8 [row 4, 5]
+
+    pmaddubsw     m4,         m3,               [r3 + 25 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 30 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 416], m4
+
+    ; mode 8 [row 6, 7]
+
+    pmaddubsw     m4,         m1,               [r3 + 3 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 8 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 432], m4
+
+    ; mode 9 [row 0, 1]
+
+    pmaddubsw     m4,         m3,               [r3 + 2 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 4 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 448], m4
+
+    ; mode 9 [row 2, 3]
+
+    pmaddubsw     m4,         m3,               [r3 + 6 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 8 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 464], m4
+
+    ; mode 9 [row 4, 5]
+
+    pmaddubsw     m4,         m3,               [r3 + 10 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 12 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 480], m4
+
+    ; mode 9 [row 6, 7]
+
+    pmaddubsw     m4,         m3,               [r3 + 14 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 16 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 496], m4
+
+    ; mode 7 [row 2, 3]
+
+    pmaddubsw     m4,         m3,               [r3 + 27 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 4 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 336], m4
+
+    ; mode 7 [row 4, 5]
+
+    pmaddubsw     m4,         m1,               [r3 + 13 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 22 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 352], m4
+
+    ; mode 6 [row 2]
+
+    pmaddubsw     m4,         m1,               [r3 + 7 * 16]
+    pmulhrsw      m4,         m7
+
+    pxor           m5,         m5
+
+    packuswb      m4,         m5
+    movh          [r0 + 272], m4
+
+    ; mode 3 [row 2, 3]
+
+    palignr       m1,         m0,               3
+    palignr       m3,         m0,               4
+
+    punpcklbw     m2,         m1
+    pmaddubsw     m5,         m2,               [r3 + 14 * 16]
+    pmulhrsw      m5,         m7
+
+    punpcklbw     m1,         m3
+    pmaddubsw     m6,         m1,               [r3 + 8 * 16]
+    pmulhrsw      m6,         m7
+
+    packuswb      m5,         m6
+    movu          [r0 + 80],  m5
+
+    ; mode 6 [row 7]
+
+    movhps        [r0 + 312], m5
+
+    ; mode 6 [row 5]
+
+    movh          [r0 + 296], m5
+
+    ; mode 4 [calculate and store row 4, 5]
+
+    pmaddubsw     m4,         m1,               [r3 + 9 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 30 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 160], m4
+
+    ; mode 5 [row 4, 5]
+
+    pmaddubsw     m4,         m2,               [r3 + 21 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m1,               [r3 + 6 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 224], m4
+
+    ; mode 6 [row 4, 5]
+
+    pmaddubsw     m5,         m2,               [r3 + 1 * 16]
+    pmulhrsw      m5,         m7
+
+    pxor           m6,        m6
+
+    packuswb      m5,         m6
+    movh          [r0 + 288], m5
+
+    ; mode 6 [row 6, 7]
+
+    pmaddubsw     m5,         m2,               [r3 + 27 * 16]
+    pmulhrsw      m5,         m7
+
+    pxor          m6,         m6
+
+    packuswb      m5,         m6
+    movh          [r0 + 304], m5
+
+    ; mode 5 [calculate row 6]
+
+    pmaddubsw     m6,         m1,               [r3 + 23 * 16]
+    pmulhrsw      m6,         m7
+
+    ; mode 3 [row 4, 5]
+
+    palignr       m1,         m0,               5
+
+    punpcklbw     m3,         m1
+    pmaddubsw     m4,         m3,               [r3 + 2 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m3,               [r3 + 28 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 96],  m4
+
+    ; mode 4 [calculate row 7]
+
+    pmaddubsw     m5,         m3,               [r3 + 19 * 16]
+    pmulhrsw      m5,         m7
+
+    ; mode 5 [calculate row 6]
+
+    pmaddubsw     m4,         m3,               [r3 + 8 * 16]
+    pmulhrsw      m4,         m7
+
+    packuswb      m6,         m4
+    movu          [r0 + 240], m6
+
+    ; mode 3 [row 6, 7]
+
+    palignr       m2,         m0,               6
+    palignr       m3,         m0,               7
+
+    punpcklbw     m1,         m2
+    pmaddubsw     m4,         m1,               [r3 + 22 * 16]
+    pmulhrsw      m4,         m7
+
+    punpcklbw     m2,         m3
+    pmaddubsw     m2,         [r3 + 16 * 16]
+    pmulhrsw      m2,         m7
+
+    packuswb      m4,         m2
+    movu          [r0 + 112], m4
+
+    ; mode 4 [calculate row 7]
+
+    pmaddubsw     m2,         m1,               [r3 + 8 * 16]
+    pmulhrsw      m2,         m7
+
+    ; mode 4 [store row 6 and 7]
+
+    packuswb      m5,         m2
+    movu          [r0 + 176], m5
+
+    ; mode 4 [row 2, 3]
+
+    palignr       m1,         m0,               1
+    palignr       m2,         m0,               2
+    palignr       m3,         m0,               3
+
+    punpcklbw     m1,         m2
+    pmaddubsw     m4,         m1,               [r3 + 31 * 16]
+    pmulhrsw      m4,         m7
+
+    punpcklbw     m2,         m3
+    pmaddubsw     m5,         m2,               [r3 + 20 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 144], m4
+
+    ; mode 5 [row 2, 3]
+
+    pmaddubsw     m4,         m1,               [r3 + 19 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m2,               [r3 + 4 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 208], m4
+
+    ; mode 7 [row 6, 7]
+
+    pmaddubsw     m4,         m1,               [r3 + 31 * 16]
+    pmulhrsw      m4,         m7
+
+    pmaddubsw     m5,         m2,               [r3 + 8 * 16]
+    pmulhrsw      m5,         m7
+
+    packuswb      m4,         m5
+    movu          [r0 + 368], m4
+
+    ; mode 10
+
+    pshufb       m1,          m0,          [tab_Si]
+    movu         [r0 + 512],  m1
+    movu         [r0 + 528],  m1
+    movu         [r0 + 544],  m1
+    movu         [r0 + 560],  m1
+
+    pxor         m0,          m0
+
+    pshufb       m1,          m1,          m0
+    punpcklbw    m1,          m0
+
+    movu         m2,          [r1]
+
+    pshufb       m3,          m2,          m0
+    punpcklbw    m3,          m0
+
+    psrldq       m4,          m2,          1
+    punpcklbw    m4,          m0
+
+    movu         m2,          [r1 + 9]
+    punpcklbw    m2,          m0
+
+    psubw        m4,          m3
+    psubw        m2,          m3
+
+    psraw        m4,          1
+    psraw        m2,          1
+
+    paddw        m4,          m1
+    paddw        m2,          m1
+
+    packuswb     m4,          m2
+
+    pextrb       [r0 + 512],  m4,          0
+    pextrb       [r0 + 520],  m4,          1
+    pextrb       [r0 + 528],  m4,          2
+    pextrb       [r0 + 536],  m4,          3
+    pextrb       [r0 + 544],  m4,          4
+    pextrb       [r0 + 552],  m4,          5
+    pextrb       [r0 + 560],  m4,          6
+    pextrb       [r0 + 568],  m4,          7
+
+    ; mode 11 [row 0, 1]
+
+    movu         m0,         [r1 + 16]
+    pinsrb       m0,         [r1], 0
+    palignr      m1,         m0,          1
+    punpcklbw    m2,         m0,          m1
+
+    pmaddubsw    m3,         m2,          [r3 + 30 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 28 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m3,         m4
+    movu         [r0 + 576], m3
+
+    ; mode 11 [row 2, 3]
+
+    pmaddubsw    m3,         m2,          [r3 + 26 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 24 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m3,         m4
+    movu         [r0 + 592], m3
+
+    ; mode 11 [row 4, 5]
+
+    pmaddubsw    m3,         m2,          [r3 + 22 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 20 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m5,         m3,         m4
+    movu         [r0 + 608], m5
+
+    ; mode 12 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 27 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m4,         m3
+    movu         [r0 + 640], m4
+
+    ; mode 11 [row 6, 7]
+
+    pmaddubsw    m3,         m2,          [r3 + 18 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 16 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m3,         m4
+    movu         [r0 + 624], m3
+
+    ; mode 12 [row 2, 3]
+
+    pmaddubsw    m3,         m2,          [r3 + 17 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 12 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m3,         m4
+    movu         [r0 + 656], m3
+
+    ; mode 12 [row 4, 5]
+
+    pmaddubsw    m3,         m2,          [r3 + 7 * 16]
+    pmulhrsw     m3,         m7
+
+    pmaddubsw    m4,         m2,          [r3 + 2 * 16]
+    pmulhrsw     m4,         m7
+
+    packuswb     m3,         m4
+    movu         [r0 + 672], m3
+
+    ; mode 12 [row 6, 7]
+
+    pslldq       m3,         m2,          2
+    pinsrb       m3,         [r1 + 0],    1
+    pinsrb       m3,         [r1 + 6],    0
+
+    pmaddubsw    m4,         m3,          [r3 + 29 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 24 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 688], m4
+
+    ; mode 13 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 23 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m2,          [r3 + 14 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 704], m4
+
+    ; mode 13 [row 2, 3]
+
+    pmaddubsw    m4,         m2,          [r3 + 5 * 16]
+    pmulhrsw     m4,         m7
+
+    pinsrb       m3,         [r1 + 4],    0
+    pmaddubsw    m5,         m3,          [r3 + 28 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 720], m4
+
+    ; mode 13 [row 4, 5]
+
+    pmaddubsw    m4,         m3,          [r3 + 19 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 10 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 736], m4
+
+    ; mode 13 [row 6, 7]
+
+    pmaddubsw    m4,         m3,          [r3 + 1 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m5,         m3,          2
+    pinsrb       m5,         [r1 + 4],    1
+    pinsrb       m5,         [r1 + 7],    0
+
+    pmaddubsw    m5,         [r3 + 24 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 752], m4
+
+    ; mode 14 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 19 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m2,          [r3 + 6 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 768], m4
+
+    ; mode 14 [row 2, 3]
+
+    pinsrb       m3,         [r1 + 2],    0
+
+    pmaddubsw    m4,         m3,          [r3 + 25 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 12 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 784], m4
+
+    ; mode 14 [row 4, 5]
+
+    pslldq       m1,         m3,          2
+    pinsrb       m1,         [r1 + 2],    1
+    pinsrb       m1,         [r1 + 5],    0
+
+    pmaddubsw    m4,         m1,          [r3 + 31 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m1,          [r3 + 18 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 800], m4
+
+    ; mode 14 [row 6, 7]
+
+    pmaddubsw    m4,         m1,          [r3 + 5 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m1,         2
+    pinsrb       m1,         [r1 + 5],    1
+    pinsrb       m1,         [r1 + 7],    0
+
+    pmaddubsw    m5,         m1,          [r3 + 24 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 816], m4
+
+    ; mode 15 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 15 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 30 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 832], m4
+
+    ; mode 15 [row 2, 3]
+
+    pmaddubsw    m4,         m3,          [r3 + 13 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m1,         m3,          2
+    pinsrb       m1,         [r1 + 2],    1
+    pinsrb       m1,         [r1 + 4],    0
+
+    pmaddubsw    m5,         m1,          [r3 + 28 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 848], m4
+
+    ; mode 15 [row 4, 5]
+
+    pmaddubsw    m4,         m1,          [r3 + 11 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m1,         2
+    pinsrb       m1,         [r1 + 4],    1
+    pinsrb       m1,         [r1 + 6],    0
+
+    pmaddubsw    m5,         m1,          [r3 + 26 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 864], m4
+
+    ; mode 15 [row 6, 7]
+
+    pmaddubsw    m4,         m1,          [r3 + 9 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m1,         2
+    pinsrb       m1,         [r1 + 6],    1
+    pinsrb       m1,         [r1 + 8],    0
+
+    pmaddubsw    m1,          [r3 + 24 * 16]
+    pmulhrsw     m1,         m7
+
+    packuswb     m4,         m1
+    movu         [r0 + 880], m4
+
+    ; mode 16 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 11 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 22 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 896], m4
+
+    ; mode 16 [row 2, 3]
+
+    pmaddubsw    m4,         m3,          [r3 + 1 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m3,         2
+    pinsrb       m3,         [r1 + 2],    1
+    pinsrb       m3,         [r1 + 3],    0
+
+    pmaddubsw    m5,         m3,          [r3 + 12 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 912], m4
+
+    ; mode 16 [row 4, 5]
+
+    pslldq       m3,         2
+    pinsrb       m3,         [r1 + 3],    1
+    pinsrb       m3,         [r1 + 5],    0
+
+    pmaddubsw    m4,         m3,          [r3 + 23 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m5,         m3,          [r3 + 2 * 16]
+    pmulhrsw     m5,         m7
+
+    packuswb     m4,         m5
+    movu         [r0 + 928], m4
+
+    ; mode 16 [row 6, 7]
+
+    pslldq       m3,         2
+    pinsrb       m3,         [r1 + 5],    1
+    pinsrb       m3,         [r1 + 6],    0
+
+    pmaddubsw    m4,         m3,          [r3 + 13 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m3,         2
+    pinsrb       m3,         [r1 + 6],    1
+    pinsrb       m3,         [r1 + 8],    0
+
+    pmaddubsw    m3,         [r3 + 24 * 16]
+    pmulhrsw     m3,         m7
+
+    packuswb     m4,         m3
+    movu         [r0 + 944], m4
+
+    ; mode 17 [row 0, 1]
+
+    pmaddubsw    m4,         m2,          [r3 + 6 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m2,         2
+    pinsrb       m2,         [r1 + 0],    1
+    pinsrb       m2,         [r1 + 1],    0
+
+    pmaddubsw    m3,         m2,          [r3 + 12 * 16]
+    pmulhrsw     m3,         m7
+
+    packuswb     m4,         m3
+    movu         [r0 + 960], m4
+
+    ; mode 17 [row 2, 3]
+
+    pslldq       m2,         2
+    pinsrb       m2,         [r1 + 1],    1
+    pinsrb       m2,         [r1 + 2],    0
+
+    pmaddubsw    m4,         m2,          [r3 + 18 * 16]
+    pmulhrsw     m4,         m7
+
+    pslldq       m2,         2
+    pinsrb       m2,         [r1 + 2],    1
+    pinsrb       m2,         [r1 + 4],    0
+
+    pmaddubsw    m3,         m2,          [r3 + 24 * 16]
+    pmulhrsw     m3,         m7
+
+    packuswb     m4,         m3
+    movu         [r0 + 976], m4
+
+    ; mode 17 [row 4, 5]
+
+    pslldq       m2,         2
+    pinsrb       m2,         [r1 + 4],    1
+    pinsrb       m2,         [r1 + 5],    0
+
+    pmaddubsw    m4,         m2,          [r3 + 30 * 16]
+    pmulhrsw     m4,         m7
+
+    pmaddubsw    m3,         m2,          [r3 + 4 * 16]
+    pmulhrsw     m3,         m7
+
+    packuswb     m4,         m3
+    movu         [r0 + 992], m4
+
+    ; mode 17 [row 6, 7]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 5],    1
+    pinsrb       m2,          [r1 + 6],    0
+
+    pmaddubsw    m4,          m2,          [r3 + 10 * 16]
+    pmulhrsw     m4,          m7
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 6],    1
+    pinsrb       m2,          [r1 + 7],    0
+
+    pmaddubsw    m3,          m2,          [r3 + 16 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m4,          m3
+    movu         [r0 + 1008], m4
+
+    ; mode 18 [row 0, 1, 2, 3, 4, 5, 6, 7]
+
+    movh          m1,          [r2]
+
+    pslldq        m2,          m1,         1
+    pinsrb        m2,          [r2 + 1 + 16],   0
+    punpcklqdq    m1,          m2
+    movu          [r0 + 1024], m1
+
+    pslldq        m2,          1
+    pinsrb        m2,          [r2 + 2 + 16],   0
+
+    pslldq        m0,          m2,          1
+    pinsrb        m0,          [r2 + 3 + 16],   0
+    punpcklqdq    m2,          m0
+    movu          [r0 + 1040], m2
+
+    pslldq        m0,          1
+    pinsrb        m0,          [r2 + 4 + 16],   0
+
+    pslldq        m2,          m0,              1
+    pinsrb        m2,          [r2 + 5 + 16],   0
+    punpcklqdq    m0,          m2
+    movu          [r0 + 1056], m0
+
+    pslldq        m2,          1
+    pinsrb        m2,          [r2 + 6 + 16],   0
+
+    pslldq        m0,           m2,             1
+    pinsrb        m0,          [r2 + 7 + 16],   0
+    punpcklqdq    m2,          m0
+    movu          [r0 + 1072], m2
+
+    ; mode 19 [row 0, 1]
+
+    movu         m0,          [r1]
+    palignr      m1,          m0,          1
+    punpcklbw    m0,          m1
+
+    pmaddubsw    m1,          m0,          [r3 + 6 * 16]
+    pmulhrsw     m1,          m7
+
+    pslldq       m2,          m0,          2
+    pinsrb       m2,          [r1],        1
+    pinsrb       m2,          [r1 + 1 + 16], 0
+
+    pmaddubsw    m3,          m2,          [r3 + 12 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m1,          m3
+    movu         [r0 + 1088], m1
+
+    ; mode 19 [row 2, 3]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 1 + 16], 1
+    pinsrb       m2,          [r1 + 2 + 16], 0
+
+    pmaddubsw    m4,          m2,          [r3 + 18 * 16]
+    pmulhrsw     m4,          m7
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 2 + 16],    1
+    pinsrb       m2,          [r1 + 4 + 16],    0
+
+    pmaddubsw    m5,          m2,          [r3 + 24 * 16]
+    pmulhrsw     m5,          m7
+
+    packuswb     m4,          m5
+    movu         [r0 + 1104], m4
+
+    ; mode 19 [row 4, 5]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 4 + 16], 1
+    pinsrb       m2,          [r1 + 5 + 16], 0
+
+    pmaddubsw    m4,          m2,          [r3 + 30 * 16]
+    pmulhrsw     m4,          m7
+
+    pmaddubsw    m5,          m2,          [r3 + 4 * 16]
+    pmulhrsw     m5,          m7
+
+    packuswb     m4,          m5
+    movu         [r0 + 1120], m4
+
+    ; mode 19 [row 6, 7]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 5 + 16], 1
+    pinsrb       m2,          [r1 + 6 + 16], 0
+
+    pmaddubsw    m4,          m2,          [r3 + 10 * 16]
+    pmulhrsw     m4,          m7
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 6 + 16], 1
+    pinsrb       m2,          [r1 + 7 + 16], 0
+
+    pmaddubsw    m2,          [r3 + 16 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m4,          m2
+    movu         [r0 + 1136], m4
+
+    ; mode 20 [row 0, 1]
+
+    pmaddubsw    m3,          m0,          [r3 + 11 * 16]
+    pmulhrsw     m3,          m7
+
+    pslldq       m1,          m0,          2
+    pinsrb       m1,          [r1 + 0],    1
+    pinsrb       m1,          [r1 + 2 + 16], 0
+
+    pmaddubsw    m4,          m1,          [r3 + 22 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m3,          m4
+    movu         [r0 + 1152], m3
+
+    ; mode 20 [row 2, 3]
+
+    pmaddubsw    m3,          m1,          [r3 + 1 * 16]
+    pmulhrsw     m3,          m7
+
+    pslldq       m2,          m1,          2
+    pinsrb       m2,          [r1 + 2 + 16], 1
+    pinsrb       m2,          [r1 + 3 + 16], 0
+
+    pmaddubsw    m4,          m2,          [r3 + 12 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m3,          m4
+    movu         [r0 + 1168], m3
+
+    ; mode 20 [row 4, 5]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 3 + 16], 1
+    pinsrb       m2,          [r1 + 5 + 16], 0
+
+    pmaddubsw    m3,          m2,          [r3 + 23 * 16]
+    pmulhrsw     m3,          m7
+
+    pmaddubsw    m4,          m2,          [r3 + 2 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m3,          m4
+    movu         [r0 + 1184], m3
+
+    ; mode 20 [row 6, 7]
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 5 + 16], 1
+    pinsrb       m2,          [r1 + 6 + 16], 0
+
+    pmaddubsw    m3,          m2,          [r3 + 13 * 16]
+    pmulhrsw     m3,          m7
+
+    pslldq       m2,          2
+    pinsrb       m2,          [r1 + 6 + 16], 1
+    pinsrb       m2,          [r1 + 8 + 16], 0
+
+    pmaddubsw    m4,          m2,          [r3 + 24 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m3,          m4
+    movu         [r0 + 1200], m3
+
+    ; mode 21 [row 0, 1]
+
+    pmaddubsw    m2,          m0,          [r3 + 15 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 30 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1216], m2
+
+    ; mode 21 [row 2, 3]
+
+    pmaddubsw    m2,          m1,          [r3 + 13 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m3,          m1,          2
+    pinsrb       m3,          [r1 + 2 + 16], 1
+    pinsrb       m3,          [r1 + 4 + 16], 0
+
+    pmaddubsw    m4,          m3,          [r3 + 28 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m2,          m4
+    movu         [r0 + 1232], m2
+
+    ; mode 21 [row 4, 5]
+
+    pmaddubsw    m2,          m3,          [r3 + 11 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m3,          2
+    pinsrb       m3,          [r1 + 4 + 16], 1
+    pinsrb       m3,          [r1 + 6 + 16], 0
+
+    pmaddubsw    m4,          m3,          [r3 + 26 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m2,          m4
+    movu         [r0 + 1248], m2
+
+    ; mode 21 [row 6, 7]
+
+    pmaddubsw    m2,          m3,          [r3 + 9 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m3,          2
+    pinsrb       m3,          [r1 + 6 + 16], 1
+    pinsrb       m3,          [r1 + 8 + 16], 0
+
+    pmaddubsw    m4,          m3,          [r3 + 24 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m2,          m4
+    movu         [r0 + 1264], m2
+
+    ; mode 22 [row 0, 1]
+
+    pmaddubsw    m2,          m0,          [r3 + 19 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m4,          m0,          [r3 + 6 * 16]
+    pmulhrsw     m4,          m7
+
+    packuswb     m2,          m4
+    movu         [r0 + 1280], m2
+
+    ; mode 22 [row 2, 3]
+
+    pmaddubsw    m2,          m1,          [r3 + 25 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 12 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1296], m2
+
+    ; mode 22 [row 4, 5]
+
+    pslldq       m1,          2
+    pinsrb       m1,          [r1 + 5 + 16], 0
+    pinsrb       m1,          [r1 + 2 + 16], 1
+
+    pmaddubsw    m2,          m1,          [r3 + 31 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 18 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1312], m2
+
+    ; mode 22 [row 6, 7]
+
+    pmaddubsw    m2,          m1,          [r3 + 5 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m1,          2
+    pinsrb       m1,          [r1 + 5 + 16], 1
+    pinsrb       m1,          [r1 + 7 + 16], 0
+
+    pmaddubsw    m1,          [r3 + 24 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m2,          m1
+    movu         [r0 + 1328], m2
+
+    ; mode 23 [row 0, 1]
+
+    pmaddubsw    m2,          m0,          [r3 + 23 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m0,          [r3 + 14 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1344], m2
+
+    ; mode 23 [row 2, 3]
+
+    pmaddubsw    m2,          m0,          [r3 + 5 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m1,          m0,          2
+    pinsrb       m1,          [r1], 1
+    pinsrb       m1,          [r1 + 4 + 16], 0
+
+    pmaddubsw    m3,          m1,          [r3 + 28 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1360], m2
+
+    ; mode 23 [row 4, 5]
+
+    pmaddubsw    m2,          m1,          [r3 + 19 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 10 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1376], m2
+
+    ; mode 23 [row 6, 7]
+
+    pmaddubsw    m2,          m1,          [r3 + 1 * 16]
+    pmulhrsw     m2,          m7
+
+    pslldq       m3,          m1,          2
+    pinsrb       m3,          [r1 + 4 + 16], 1
+    pinsrb       m3,          [r1 + 7 + 16], 0
+
+    pmaddubsw    m3,          [r3 + 24 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1392], m2
+
+    ; mode 24 [row 0, 1]
+
+    pmaddubsw    m2,          m0,          [r3 + 27 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m5,          m0,          [r3 + 22 * 16]
+    pmulhrsw     m5,          m7
+
+    packuswb     m2,          m5
+    movu         [r0 + 1408], m2
+
+    ; mode 24 [row 2, 3]
+
+    pmaddubsw    m2,          m0,          [r3 + 17 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m0,          [r3 + 12 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1424], m2
+
+    ; mode 24 [row 4, 5]
+
+    pmaddubsw    m2,          m0,          [r3 + 7 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m0,          [r3 + 2 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1440], m2
+
+    ; mode 24 [row 6, 7]
+
+    pinsrb       m1,          [r1 + 6 + 16], 0
+
+    pmaddubsw    m2,          m1,          [r3 + 29 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m1,          [r3 + 24 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m2,          m1
+    movu         [r0 + 1456], m2
+
+    ; mode 25 [row 0, 1]
+
+    pmaddubsw    m2,          m0,          [r3 + 30 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m1,          m0,          [r3 + 28 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m2,          m1
+    movu         [r0 + 1472], m2
+
+    ; mode 25 [row 2, 3]
+
+    pmaddubsw    m2,          m0,          [r3 + 26 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m1,          m0,          [r3 + 24 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m2,          m1
+    movu         [r0 + 1488], m2
+
+    ; mode 25 [row 4, 5]
+
+    pmaddubsw    m1,          m0,          [r3 + 20 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m5,          m1
+    movu         [r0 + 1504], m5
+
+    ; mode 25 [row 6, 7]
+
+    pmaddubsw    m2,          m0,          [r3 + 18 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m1,          m0,          [r3 + 16 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m2,          m1
+    movu         [r0 + 1520], m2
+
+    ; mode 26
+
+    movu         m0,          [r1 + 1]
+
+    pshufb       m1,          m0,          [tab_Si]
+    movu         [r0 + 1536], m1
+    movu         [r0 + 1552], m1
+    movu         [r0 + 1568], m1
+    movu         [r0 + 1584], m1
+
+    pxor         m5,          m5
+
+    pshufb       m1,          m1,          m5
+    punpcklbw    m1,          m5
+
+    movu         m2,          [r1 + 16]
+    pinsrb       m2,          [r1], 0
+
+    pshufb       m3,          m2,          m5
+    punpcklbw    m3,          m5
+
+    psrldq       m4,          m2,          1
+    punpcklbw    m4,          m5
+
+    movu         m2,          [r1 + 9 + 16]
+    punpcklbw    m2,          m5
+
+    psubw        m4,          m3
+    psubw        m2,          m3
+
+    psraw        m4,          1
+    psraw        m2,          1
+
+    paddw        m4,          m1
+    paddw        m2,          m1
+
+    packuswb     m4,          m2
+
+    pextrb       [r0 + 1536], m4,          0
+    pextrb       [r0 + 1544], m4,          1
+    pextrb       [r0 + 1552], m4,          2
+    pextrb       [r0 + 1560], m4,          3
+    pextrb       [r0 + 1568], m4,          4
+    pextrb       [r0 + 1576], m4,          5
+    pextrb       [r0 + 1584], m4,          6
+    pextrb       [r0 + 1592], m4,          7
+
+    ; mode 27 [row 0, 1]
+
+    palignr      m6,          m0,          1
+    punpcklbw    m4,          m0,          m6
+
+    pmaddubsw    m1,          m4,          [r3 + 2 * 16]
+    pmulhrsw     m1,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 4 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m2
+    movu         [r0 + 1600], m1
+
+    ; mode 27 [row 2, 3]
+
+    pmaddubsw    m1,          m4,          [r3 + 6 * 16]
+    pmulhrsw     m1,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 8 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m2
+    movu         [r0 + 1616], m1
+
+    ; mode 27 [row 4, 5]
+
+    pmaddubsw    m3,          m4,          [r3 + 10 * 16]
+    pmulhrsw     m3,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 12 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m3,          m2
+    movu         [r0 + 1632], m1
+
+    ; mode 27 [row 6, 7]
+
+    pmaddubsw    m1,          m4,          [r3 + 14 * 16]
+    pmulhrsw     m1,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 16 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m2
+    movu         [r0 + 1648], m1
+
+    ; mode 28 [row 0, 1]
+
+    pmaddubsw    m1,          m4,          [r3 + 5 * 16]
+    pmulhrsw     m1,          m7
+
+    packuswb     m1,          m3
+    movu         [r0 + 1664], m1
+
+    ; mode 28 [row 2, 3]
+
+    pmaddubsw    m1,          m4,          [r3 + 15 * 16]
+    pmulhrsw     m1,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 20 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m2
+    movu         [r0 + 1680], m1
+
+    ; mode 28 [row 4, 5]
+
+    pmaddubsw    m1,          m4,          [r3 + 25 * 16]
+    pmulhrsw     m1,          m7
+
+    pmaddubsw    m2,          m4,          [r3 + 30 * 16]
+    pmulhrsw     m2,          m7
+
+    packuswb     m1,          m2
+    movu         [r0 + 1696], m1
+
+    ; mode 28 [row 6, 7]
+
+    palignr      m1,          m0,          2
+    punpcklbw    m5,          m6,          m1
+
+    pmaddubsw    m2,          m5,          [r3 + 3 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 8 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1712], m2
+
+    ; mode 29 [row 0, 1]
+
+    pmaddubsw    m2,          m4,          [r3 + 9 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m4,          [r3 + 18 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1728], m2
+
+    ; mode 29 [row 2, 3]
+
+    pmaddubsw    m2,          m4,          [r3 + 27 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 4 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1744], m2
+
+    ; mode 29 [row 4, 5]
+
+    pmaddubsw    m2,          m5,          [r3 + 13 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 22 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1760], m2
+
+    ; mode 29 [row 6, 7]
+
+    pmaddubsw    m2,          m5,          [r3 + 31 * 16]
+    pmulhrsw     m2,          m7
+
+    palignr      m6,          m0,          3
+    punpcklbw    m1,          m6
+
+    pmaddubsw    m3,          m1,          [r3 + 8 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1776], m2
+
+    ; mode 32 [row 2]
+
+    movh         [r0 + 1936], m2
+
+    ; mode 30 [row 0, 1]
+
+    pmaddubsw    m2,          m4,          [r3 + 13 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m4,          [r3 + 26 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1792], m2
+
+    ; mode 30 [row 2, 3]
+
+    pmaddubsw    m2,          m5,          [r3 + 7 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 20 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1808], m2
+
+    ; mode 33 [row 1]
+
+    movhps       [r0 + 1992], m2
+
+    ; mode 30 [row 4, 5]
+
+    pmaddubsw    m2,          m1,          [r3 + 1 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 14 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1824], m2
+
+    ; mode 33 [row 2]
+
+    movhps       [r0 + 2000], m2
+
+    ; mode 30 [row 6, 7]
+
+    pmaddubsw    m2,          m1,          [r3 + 27 * 16]
+    pmulhrsw     m2,          m7
+
+    psrldq       m0,          4
+    punpcklbw    m6,          m0
+
+    pmaddubsw    m3,          m6,          [r3 + 8 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1840], m2
+
+    ; mode 33 [row 3]
+
+    movhps       [r0 + 2008], m2
+
+    ; mode 31 [row 0, 1]
+
+    pmaddubsw    m2,          m4,          [r3 + 17 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 2 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1856], m2
+
+    ; mode 31 [row 2, 3]
+
+    pmaddubsw    m2,          m5,          [r3 + 19 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m1,          [r3 + 4 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1872], m2
+
+    ; mode 31 [row 4, 5]
+
+    pmaddubsw    m2,          m1,          [r3 + 21 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m6,          [r3 + 6 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1888], m2
+
+    ; mode 31 [row 6, 7]
+
+    pmaddubsw    m2,          m6,          [r3 + 23 * 16]
+    pmulhrsw     m2,          m7
+
+    movu         m3,          [r1 + 6]
+    punpcklbw    m0,          m3
+
+    pmaddubsw    m3,          m0,          [r3 + 8 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1904], m2
+
+    ; mode 32 [row 0, 1]
+
+    pmaddubsw    m2,          m4,          [r3 + 21 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m5,          [r3 + 10 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1920], m2
+
+    ; mode 32 [row 3]
+
+    pmaddubsw    m2,          m1,          [r3 + 20 * 16]
+    pmulhrsw     m2,          m7
+
+    pxor         m3,          m3
+
+    packuswb     m2,          m3
+    movh         [r0 + 1944], m2
+
+    ; mode 32 [row 4, 5]
+
+    pmaddubsw    m2,          m6,          [r3 + 9 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m6,          [r3 + 30 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1952], m2
+
+    ; mode 33 [row 4, 5]
+
+    pmaddubsw    m2,          m0,          [r3 + 2 * 16]
+    pmulhrsw     m2,          m7
+
+    pmaddubsw    m3,          m0,          [r3 + 28 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 2016], m2
+
+    ; mode 32 [row 6]
+
+    pmaddubsw    m2,          m0,          [r3 + 19 * 16]
+    pmulhrsw     m2,          m7
+
+    ; mode 32 [row 7]
+
+    movu         m0,          [r1 + 6]
+    palignr      m3,          m0,          1
+    punpcklbw    m0,          m3
+
+    pmaddubsw    m3,          m0,          [r3 + 8 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 1968], m2
+
+    ; mode 33 [row 6, 7]
+
+    pmaddubsw    m2,          m0,          [r3 + 22 * 16]
+    pmulhrsw     m2,          m7
+
+    movu         m0,          [r1 + 7]
+    palignr      m3,          m0,          1
+    punpcklbw    m0,          m3
+
+    pmaddubsw    m3,          m0,          [r3 + 16 * 16]
+    pmulhrsw     m3,          m7
+
+    packuswb     m2,          m3
+    movu         [r0 + 2032], m2
+
+    ; mode 33 [row 0]
+
+    pmaddubsw    m2,          m4,          [r3 + 26 * 16]
+    pmulhrsw     m2,          m7
+
+    pxor         m3,          m3
+
+    packuswb     m2,          m3
+    movh         [r0 + 1984], m2
+
+    ; mode 34 [row 0, 1, 2, 3, 4, 5, 6, 7]
+
+    movu         m0,          [r2 + 2]
+    palignr      m1,          m0,          1
+    punpcklqdq   m2,          m0,          m1
+    movu         [r0 + 2048], m2
+
+    palignr      m1,          m0,          2
+    palignr      m2,          m0,          3
+    punpcklqdq   m1,          m2
+    movu         [r0 + 2064], m1
+
+    palignr      m1,          m0,          4
+    palignr      m2,          m0,          5
+    punpcklqdq   m1,          m2
+    movu         [r0 + 2080], m1
+
+    palignr      m1,          m0,          6
+    palignr      m2,          m0,          7
+    punpcklqdq   m1,          m2
+    movu         [r0 + 2096], m1
+RET
+
+;--------------------------------------------------------------------------------
+; void all_angs_pred_16x16(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;--------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal all_angs_pred_16x16, 3,4,8
+    ; mode 2
+
+    movu      m0,               [r2 + 2 + 32]
+    movu      [r0 + 0 * 16],    m0
+
+    movu      m1,               m0
+
+    movu      m6,              [r2 + 18 + 32]
+    palignr   m5,              m6,             m0,    1
+    movu     [r0 + 1 * 16],    m5
+
+    movu      m4,               m5
+
+    palignr   m5,              m6,             m0,    2
+    movu      [r0 + 2 * 16],   m5
+    palignr   m5,              m6,             m0,    3
+    movu      [r0 + 3 * 16],   m5
+    palignr   m5,              m6,             m0,    4
+    movu      [r0 + 4 * 16],   m5
+    palignr   m5,              m6,             m0,    5
+    movu      [r0 + 5 * 16],   m5
+    palignr   m5,              m6,             m0,    6
+    movu      [r0 + 6 * 16],   m5
+    palignr   m5,              m6,             m0,    7
+    movu      [r0 + 7 * 16],   m5
+
+    movu      m7,               m5
+
+    palignr   m5,              m6,             m0,    8
+    movu      [r0 + 8 * 16],   m5
+
+    movu      m2,              m5
+
+    palignr   m5,              m6,             m0,    9
+    movu      [r0 + 9 * 16],   m5
+
+    palignr   m3,              m6,             m0,    10
+    movu      [r0 + 10 * 16],  m3
+    palignr   m3,              m6,             m0,    11
+    movu      [r0 + 11 * 16],  m3
+    palignr   m3,              m6,             m0,    12
+    movu      [r0 + 12 * 16],  m3
+
+    ; mode 3  [row 15]
+    movu      [r0 + (3-2)*16*16 + 15 * 16], m3
+
+    palignr   m3,              m6,             m0,    13
+    movu      [r0 + 13 * 16],   m3
+    palignr   m3,              m6,             m0,    14
+    movu      [r0 + 14 * 16],   m3
+    palignr   m3,              m6,             m0,    15
+    movu      [r0 + 15 * 16],   m3
+
+    ; mode 3 [row 0]
+    lea           r3,    [ang_table]
+    movu          m3,    [pw_1024]
+    movu          m0,    [r2 + 1 + 32]
+    punpcklbw     m0,    m1
+
+    ; mode 17 [row 8 - second half]
+    pmaddubsw     m1,                   m0,    [r3 + 22 * 16]
+    pmulhrsw      m1,                   m3
+    packuswb      m1,                   m1
+    movh          [r0 + 248 * 16 + 8],  m1
+    ; mode 17 [row 8 - second half] end
+
+    pmaddubsw     m1,    m0,        [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    punpcklbw     m7,    m2
+    pmaddubsw     m2,    m7,        [r3 + 26 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 16 * 16],   m1
+
+    ;mode 6 [row 1]
+    movu          [r0 + 65 * 16],   m1
+
+    ; mode 4 [row 0]
+    pmaddubsw     m1,             m0,         [r3 + 21 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 21 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 32 * 16], m1
+
+    ; mode 5 [row 0]
+    pmaddubsw     m1,             m0,         [r3 + 17 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 17 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 48 * 16], m1
+
+    ; mode 6 [row 0]
+    pmaddubsw     m1,             m0,         [r3 + 13 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 13 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 64 * 16], m1
+
+    ; mode 7 [row 0]
+    pmaddubsw     m1,             m0,        [r3 + 9 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,        [r3 + 9 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 80 * 16], m1
+
+    ; mode 7 [row 1]
+    pmaddubsw     m1,             m0,         [r3 + 18 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 18 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 81 * 16], m1
+
+    ; mode 7 [row 2]
+    pmaddubsw     m1,             m0,         [r3 + 27 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 27 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 82 * 16], m1
+
+    ; mode 8 [row 0]
+    pmaddubsw     m1,             m0,        [r3 + 5 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,        [r3 + 5 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 96 * 16], m1
+
+    ; mode 8 [row 1]
+    pmaddubsw     m1,             m0,         [r3 + 10 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 10 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 97 * 16], m1
+
+    ; mode 8 [row 2]
+    pmaddubsw     m1,             m0,         [r3 + 15 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 15 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 98 * 16], m1
+
+    ; mode 8 [row 3]
+    pmaddubsw     m1,             m0,         [r3 + 20 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m2,             m7,         [r3 + 20 * 16]
+    pmulhrsw      m2,             m3
+    packuswb      m1,             m2
+    movu          [r0 + 99 * 16], m1
+
+    ; mode 8 [row 4]
+    pmaddubsw     m1,              m0,         [r3 + 25 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m2,              m7,         [r3 + 25 * 16]
+    pmulhrsw      m2,              m3
+    packuswb      m1,              m2
+    movu          [r0 + 100 * 16], m1
+
+    ; mode 8 [row 5]
+    pmaddubsw     m1,              m0,         [r3 + 30 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m2,              m7,         [r3 + 30 * 16]
+    pmulhrsw      m2,              m3
+    packuswb      m1,              m2
+    movu          [r0 + 101 * 16], m1
+
+    ; mode 15 [row 13 - second half]
+    pmaddubsw     m1,                  m0,     [r3 + 18 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 221 * 16 + 8], m1
+    ; mode 15 [row 13 - second half] end
+
+    ; mode 15 [row 14 - second half]
+    pmaddubsw     m1,                  m0,     [r3 + 1 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 222 * 16 + 8], m1
+    ; mode 15 [row 14 - second half] end
+
+    ; mode 16 [row 10 - second half]
+    pmaddubsw     m1,                  m0,    [r3 + 25 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 234 * 16 + 8], m1
+    ; mode 16 [row 10 - second half] end
+
+    ; mode 16 [row 11 - second half]
+    pmaddubsw     m1,                  m0,    [r3 + 4 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 235 * 16 + 8], m1
+    ; mode 16 [row 11 - second half] end
+
+    ; mode 3 [row 1]
+    movu          m6,    [r3 + 20 * 16]
+    movu          m0,    [r2 + 2 + 32]
+    punpcklbw     m0,    m4
+
+    ; mode 17 [row 7 - second half]
+    pmaddubsw     m1,     m0,          [r3 + 16 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 247 * 16 + 8], m1
+
+    ; mode 17 [row 7 - second half] end
+    pmaddubsw     m1,             m0,          m6
+    pmulhrsw      m1,             m3
+    movu          m2,             [r2 + 10 + 32]
+    punpcklbw     m2,             m5
+    pmaddubsw     m4,             m2,          m6
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 17 * 16], m1
+
+    ;mode 6 [row 3]
+    movu          [r0 + 67 * 16], m1
+
+    ; mode 4 row [row 1]
+    pmaddubsw     m1,             m0,         [r3 + 10 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 10 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 33 * 16], m1
+
+    ; mode 4 row [row 2]
+    pmaddubsw     m1,             m0,         [r3 + 31 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 31 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 34 * 16], m1
+
+    ; mode 7 [row 6]
+    movu          [r0 + 86 * 16], m1
+
+    ; mode 5 row [row 1]
+    pmaddubsw     m1,             m0,        [r3 + 2 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 2 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 49 * 16], m1
+
+    ; mode 5 row [row 2]
+    pmaddubsw     m1,             m0,         [r3 + 19 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 19 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 50 * 16], m1
+
+    ; mode 6 [row 2]
+    pmaddubsw     m1,             m0,        [r3 + 7 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 7 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 66 * 16], m1
+
+    ; mode 7 [row 3]
+    pmaddubsw     m1,             m0,        [r3 + 4 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 4 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 83 * 16], m1
+
+    ; mode 7 [row 4]
+    pmaddubsw     m1,             m0,         [r3 + 13 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 13 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 84 * 16], m1
+
+    ; mode 8 [row 8]
+    movu          [r0 + 104 * 16], m1
+
+    ; mode 7 [row 5]
+    pmaddubsw     m1,             m0,         [r3 + 22 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 22 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 85 * 16], m1
+
+    ; mode 8 [row 6]
+    pmaddubsw     m1,              m0,      [r3 + 3 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,      [r3 + 3 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 102 * 16], m1
+
+    ; mode 8 [row 7]
+    pmaddubsw     m1,              m0,        [r3 + 8 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,        [r3 + 8 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 103 * 16], m1
+
+    ; mode 8 [row 9]
+    pmaddubsw     m1,              m0,         [r3 + 18 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,         [r3 + 18 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 105 * 16], m1
+
+    ; mode 8 [row 10]
+    pmaddubsw     m1,              m0,         [r3 + 23 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,         [r3 + 23 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 106 * 16], m1
+
+    ; mode 8 [row 11]
+    pmaddubsw     m1,              m0,         [r3 + 28 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,         [r3 + 28 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 107 * 16], m1
+
+    ; mode 3 [row 2]
+    movu          m0,    [r2 + 3 + 32]
+    movd          m1,    [r2 + 19 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+
+    ; mode 17 [row 6 - second half]
+    pmaddubsw     m1,                  m0,     [r3 + 10 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 246 * 16 + 8], m1
+    ; mode 17 [row 6 - second half] end
+
+    pmaddubsw     m1,             m0,          [r3 + 14 * 16]
+    pmulhrsw      m1,             m3
+    movu          m2,             [r2 + 11 + 32]
+    movd          m4,             [r2 + 27 + 32]
+    palignr       m4,             m2,          1
+    punpcklbw     m2,             m4
+    pmaddubsw     m4,             m2,          [r3 + 14 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 18 * 16], m1
+
+    ; mode 6 [row 5]
+    movu          [r0 + 69 * 16], m1
+
+    ; mode 4 row [row 3]
+    pmaddubsw     m1,             m0,         [r3 + 20 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 20 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 35 * 16], m1
+
+    ; mode 5 row [row 3]
+    pmaddubsw     m1,             m0,        [r3 + 4 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 4 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 51 * 16], m1
+
+    ; mode 5 row [row 4]
+    pmaddubsw     m1,             m0,         [r3 + 21 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 21 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 52 * 16], m1
+
+    ; mode 6 [row 4]
+    pmaddubsw     m1,             m0,        [r3 + 1 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 1 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 68 * 16], m1
+
+    ; mode 6 [row 6]
+    pmaddubsw     m1,             m0,      [r3 + 27 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,      [r3 + 27 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 70 * 16], m1
+
+    ; mode 7 [row 7]
+    pmaddubsw     m1,             m0,        [r3 + 8 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 8 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 87 * 16], m1
+
+    ; mode 7 [row 8]
+    pmaddubsw     m1,             m0,         [r3 + 17 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 17 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 88 * 16], m1
+
+    ; mode 7 [row 9]
+    pmaddubsw     m1,             m0,       [r3 + 26 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,       [r3 + 26 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 89 * 16], m1
+
+    ; mode 8 [row 12]
+    pmaddubsw     m1,              m0,        [r3 + 1 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,        [r3 + 1 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 108 * 16], m1
+
+    ; mode 8 [row 13]
+    pmaddubsw     m1,              m0,      [r3 + 6 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,      [r3 + 6 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 109 * 16], m1
+
+    ; mode 8 [row 14]
+    pmaddubsw     m1,              m0,         [r3 + 11 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,         [r3 + 11 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 110 * 16], m1
+
+    ; mode 8 [row 15]
+    pmaddubsw     m1,              m0,         [r3 + 16 * 16]
+    pmulhrsw      m1,              m3
+    pmaddubsw     m4,              m2,         [r3 + 16 * 16]
+    pmulhrsw      m4,              m3
+    packuswb      m1,              m4
+    movu          [r0 + 111 * 16], m1
+
+    ; mode 3 [row 3]
+    movu          m0,              [r2 + 4 + 32]
+    movd          m1,              [r2 + 20 + 32]
+    palignr       m1,              m0,          1
+    punpcklbw     m0,              m1
+
+    ; mode 17 [row 4 - second half]
+    pmaddubsw     m1,                  m0,    [r3 + 30 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 244 * 16 + 8], m1
+    ; mode 17 [row 4 - second half] end
+
+    ; mode 17 [row 5 - second half]
+    pmaddubsw     m1,                  m0,    [r3 + 4 * 16]
+    pmulhrsw      m1,                  m3
+    packuswb      m1,                  m1
+    movh          [r0 + 245 * 16 + 8], m1
+    ; mode 17 [row 5 - second half] end
+
+    pmaddubsw     m1,             m0,          [r3 + 8 * 16]
+    pmulhrsw      m1,             m3
+    movu          m2,             [r2 + 12 + 32]
+    movd          m4,             [r2 + 28 + 32]
+    palignr       m4,             m2,          1
+    punpcklbw     m2,             m4
+    pmaddubsw     m4,             m2,          [r3 + 8 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 19 * 16], m1
+
+    ; mode 6 [row 7]
+    movu          [r0 + 71 * 16], m1
+
+    ; mode 4 row [row 4]
+    pmaddubsw     m1,             m0,        [r3 + 9 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 9 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 36 * 16], m1
+
+    ; mode 4 row [row 5]
+    pmaddubsw     m1,             m0,        [r3 + 30 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 30 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 37 * 16], m1
+
+    ; mode 7 row [row 13]
+    movu          [r0 + 93 * 16], m1
+
+    ; mode 5 row [row 5]
+    pmaddubsw     m1,             m0,        [r3 + 6 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,        [r3 + 6 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 53 * 16], m1
+
+    ; mode 5 row [row 6]
+    pmaddubsw     m1,             m0,         [r3 + 23 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 23 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 54 * 16], m1
+
+    ; mode 6 [row 8]
+    pmaddubsw     m1,             m0,         [r3 + 21 * 16]
+    pmulhrsw      m1,             m3
+    pmaddubsw     m4,             m2,         [r3 + 21 * 16]
+    pmulhrsw      m4,             m3
+    packuswb      m1,             m4
+    movu          [r0 + 72 * 16], m1
+
+    ; mode 7 [row 12]
+    movu          [r0 + 92 * 16], m1
+
+    ; mode 7 [row 10]
+    pmaddubsw     m1,    m0,      [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 90 * 16], m1
+
+    ; mode 7 [row 11]
+    pmaddubsw     m1,    m0,      [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 91 * 16], m1
+
+    ; mode 3 [row 4]
+    movu          m0,    [r2 + 5 + 32]
+    movd          m1,    [r2 + 20 + 32]
+    palignr       m1,    m0,         1
+    punpcklbw     m0,    m1
+
+    ; mode 17 [row 3 - second half]
+    pmaddubsw     m1,     m0,           [r3 + 24 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 243 * 16 + 8],  m1
+
+    ; mode 17 [row 3 - second half] end
+    pmaddubsw     m1,    m0,          [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 13 + 32]
+    movd          m4,    [r2 + 29 + 32]
+    palignr       m4,    m2,          1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 20 * 16], m1
+
+    ;mode 6 [row 9]
+    movu          [r0 + 73 * 16], m1
+
+    ; mode 4 row [row 6]
+    movu          m6,    [r3 + 19 * 16]
+    pmaddubsw     m1,    m0,      m6
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      m6
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 38 * 16], m1
+
+    ; mode 3 [row 5]
+    pmaddubsw     m1,    m0,      [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 21 * 16], m1
+
+    ;mode 6 [row 11]
+    movu          [r0 + 75 * 16], m1
+
+    ; mode 5 row [row 7]
+    pmaddubsw     m1,    m0,      [r3 + 8 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 8 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 55 * 16], m1
+
+    ; mode 5 row [row 8]
+    pmaddubsw     m1,    m0,      [r3 + 25 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 56 * 16], m1
+
+    ; mode 6 [row 10]
+    pmaddubsw     m1,    m0,      [r3 + 15 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 74 * 16], m1
+
+    ; mode 7 [row 14]
+    pmaddubsw     m1,    m0,      [r3 + 7 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 7 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 94 * 16], m1
+
+    ; mode 7 [row 15]
+    pmaddubsw     m1,    m0,      [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 95 * 16], m1
+
+    ; mode 3 [row 6]
+    movu          m0,    [r2 + 6 + 32]
+    movd          m1,    [r2 + 22 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+
+    ; mode 17 [row 2 - second half]
+    pmaddubsw     m1,     m0,          [r3 + 18 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 242 * 16 + 8],  m1
+    ; mode 17 [row 2 - second half] end
+
+    pmaddubsw     m1,    m0,          [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 14 + 32]
+    movd          m4,    [r2 + 30 + 32]
+    palignr       m4,    m2,          1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 22 * 16], m1
+
+    ; mode 6 [row 13]
+    movu          [r0 + 77 * 16], m1
+
+    ; mode 4 row [row 7]
+    pmaddubsw     m1,    m0,      [r3 + 8 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 8 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 39 * 16], m1
+
+    ; mode 4 row [row 8]
+    pmaddubsw     m1,    m0,       [r3 + 29 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,       [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 40 * 16], m1
+
+    ; mode 5 row [row 9]
+    pmaddubsw     m1,    m0,      [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 57 * 16], m1
+
+    ; mode 5 row [row 10]
+    pmaddubsw     m1,    m0,      [r3 + 27 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 27 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 58 * 16], m1
+
+    ; mode 6 [row 12]
+    pmaddubsw     m1,    m0,      [r3 + 9 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 76 * 16], m1
+
+    ; mode 3 [row 7]
+    movu          m0,    [r2 + 7 + 32]
+    movd          m1,    [r2 + 27 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+
+    ; mode 17 [row 1 - second half]
+    pmaddubsw     m1,     m0,           [r3 + 12 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 241 * 16 + 8],  m1
+    ; mode 17 [row 1 - second half] end
+
+    pmaddubsw     m1,    m0,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 15 + 32]
+    movd          m4,    [r2 + 25 + 32]
+    palignr       m4,    m2,          1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 23 * 16], m1
+
+    ; mode 6 [row 15]
+    movu          [r0 + 79 * 16], m1
+
+    ; mode 4 row [row 9]
+    pmaddubsw     m1,    m0,      [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 41 * 16], m1
+
+    ; mode 5 row [row 11]
+    pmaddubsw     m1,    m0,      [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 59 * 16], m1
+
+    ; mode 5 row [row 12]
+    pmaddubsw     m1,    m0,      [r3 + 29 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 60 * 16], m1
+
+    ; mode 6 [row 14]
+    pmaddubsw     m1,    m0,      [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 78 * 16], m1
+
+    ; mode 3 [row 8]
+    movu          m0,    [r2 + 8 + 32]
+    movd          m1,    [r2 + 24 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,          [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 16 + 32]
+    psrldq        m4,    m2,         1
+    pinsrb        m4,    [r2 + 32],  15
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 24 * 16], m1
+
+    ; mode 4 row [row 10]
+    pmaddubsw     m1,    m0,      [r3 + 7 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 7 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 42 * 16], m1
+
+    ; mode 4 row [row 11]
+    pmaddubsw     m1,    m0,      [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 43 * 16], m1
+
+    ; mode 5 row [row 13]
+    pmaddubsw     m1,    m0,      [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 61 * 16], m1
+
+    ; mode 5 row [row 14]
+    pmaddubsw     m1,    m0,      [r3 + 31 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 31 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 62 * 16], m1
+
+    ; mode 3 [row 9]
+    movu          m0,    [r2 +  9 + 32]
+    movd          m1,    [r2 + 16 + 32]
+    palignr       m1,    m0,         1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,         [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 17 + 32]
+    movd          m4,    [r2 + 33 + 32]
+    palignr       m4,    m2,         1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,         [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 25 * 16], m1
+
+    ; mode 4 row [row 12]
+    pmaddubsw     m1,    m0,      [r3 + 17 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 17 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 44 * 16], m1
+
+    ; mode 3 [row 10]
+    pmaddubsw     m1,    m0,          [r3 + 30 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,          [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 26 * 16], m1
+
+    ; mode 5 row [row 15]
+    pmaddubsw     m1,    m0,      [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 63 * 16], m1
+
+    ; mode 3 [row 11]
+    movu          m0,    [r2 + 10 + 32]
+    movd          m1,    [r2 + 26 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,          [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 18 + 32]
+    movd          m4,    [r2 + 34 + 32]
+    palignr       m4,    m2,         1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,         [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,                 m4
+    movu          [r0 + 27 * 16],     m1
+
+    ; mode 4 row [row 13]
+    pmaddubsw     m1,    m0,      [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 45 * 16], m1
+
+    ; mode 4 row [row 14]
+    pmaddubsw     m1,    m0,      [r3 + 27 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 27 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 46 * 16], m1
+
+    ; mode 3 [row 12]
+    movu          m0,    [r2 + 11 + 32]
+    movd          m1,    [r2 + 27 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,          [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 19 + 32]
+    movd          m4,    [r2 + 35 + 32]
+    palignr       m4,    m2,          1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 28 * 16], m1
+
+    ; mode 4 row [row 15]
+    pmaddubsw     m1,    m0,      [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m2,      [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 47 * 16], m1
+
+    ; mode 3 [row 13]
+    movu          m0,    [r2 + 12 + 32]
+    movd          m1,    [r2 + 28 + 32]
+    palignr       m1,    m0,          1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,          [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 20 + 32]
+    movd          m4,    [r2 + 36 + 32]
+    palignr       m4,    m2,          1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,          [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,             m4
+    movu          [r0 + 29 * 16], m1
+
+    ; mode 3 [row 14]
+    movu          m0,    [r2 + 13 + 32]
+    movd          m1,    [r2 + 29 + 32]
+    palignr       m1,    m0,         1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,         [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    movu          m2,    [r2 + 21 + 32]
+    movd          m4,    [r2 + 37 + 32]
+    palignr       m4,    m2,         1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m2,         [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,                m4
+    movu          [r0 + 30 * 16],    m1
+
+    ; mode 9
+    movu          m0,    [r1 + 1 + 32]
+    movd          m1,    [r1 + 17 + 32]
+    palignr       m1,    m0,         1
+
+    ; mode 9 [row 15]
+    movu          [r0 + 127 * 16],  m1
+
+    ; mode 9 [row 0]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    movu          m7,    [r1 +  9 + 32]
+    movd          m4,    [r2 + 25 + 32]
+    palignr       m2,    m7,        1
+    punpcklbw     m7,    m2
+    pmaddubsw     m2,    m7,        [r3 + 2 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 112 * 16],  m1
+
+    ; mode 9 [row 1]
+    pmaddubsw     m1,    m0,        [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 113 * 16],  m1
+
+    ; mode 9 [row 2]
+    pmaddubsw     m1,    m0,        [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 114 * 16],  m1
+
+    ; mode 9 [row 3]
+    pmaddubsw     m1,    m0,        [r3 + 8 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 115 * 16],  m1
+
+    ; mode 9 [row 4]
+    pmaddubsw     m1,    m0,        [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 116 * 16],  m1
+
+    ; mode 9 [row 5]
+    pmaddubsw     m1,    m0,        [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 117 * 16],  m1
+
+    ; mode 9 [row 6]
+    pmaddubsw     m1,    m0,        [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 14 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 118 * 16],  m1
+
+    ; mode 9 [row 7]
+    pmaddubsw     m1,    m0,        [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 119 * 16],  m1
+
+    ; mode 9 [row 8]
+    pmaddubsw     m1,    m0,        [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 120 * 16],  m1
+
+    ; mode 9 [row 9]
+    pmaddubsw     m1,    m0,        [r3 + 20 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 20 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 121 * 16],  m1
+
+    ; mode 9 [row 10]
+    pmaddubsw     m1,    m0,        [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 22 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 122 * 16],  m1
+
+    ; mode 9 [row 11]
+    pmaddubsw     m1,    m0,        [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 24 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 123 * 16],  m1
+
+    ; mode 9 [row 12]
+    pmaddubsw     m1,    m0,        [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 26 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 124 * 16],  m1
+
+    ; mode 9 [row 13]
+    pmaddubsw     m1,    m0,         [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,         [r3 + 28 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 125 * 16],  m1
+
+    ; mode 9 [row 14]
+    pmaddubsw     m1,    m0,        [r3 + 30 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 30 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 126 * 16],  m1
+
+    ; mode 10
+    movu         m1,               [r1 + 1 + 32]
+    movu         [r0 + 128 * 16],  m1
+    movu         [r0 + 129 * 16],  m1
+    movu         [r0 + 130 * 16],  m1
+    movu         [r0 + 131 * 16],  m1
+    movu         [r0 + 132 * 16],  m1
+    movu         [r0 + 133 * 16],  m1
+    movu         [r0 + 134 * 16],  m1
+    movu         [r0 + 135 * 16],  m1
+    movu         [r0 + 136 * 16],  m1
+    movu         [r0 + 137 * 16],  m1
+    movu         [r0 + 138 * 16],  m1
+    movu         [r0 + 139 * 16],  m1
+    movu         [r0 + 140 * 16],  m1
+    movu         [r0 + 141 * 16],  m1
+    movu         [r0 + 142 * 16],  m1
+    movu         [r0 + 143 * 16],  m1
+
+    pxor         m0,          m0
+    pshufb       m1,          m1,         m0
+    punpcklbw    m1,          m0
+    pinsrb       m2,          [r1], 0
+    pshufb       m2,          m2,         m0
+    punpcklbw    m2,          m0
+    movu         m4,          [r1 + 1]
+    punpcklbw    m5,          m4,         m0
+    punpckhbw    m4,          m0
+    psubw        m5,          m2
+    psubw        m4,          m2
+    psraw        m5,          1
+    psraw        m4,          1
+    paddw        m5,          m1
+    paddw        m4,          m1
+    packuswb     m5,          m4
+
+    pextrb       [r0 + 128 * 16],  m5,          0
+    pextrb       [r0 + 129 * 16],  m5,          1
+    pextrb       [r0 + 130 * 16],  m5,          2
+    pextrb       [r0 + 131 * 16],  m5,          3
+    pextrb       [r0 + 132 * 16],  m5,          4
+    pextrb       [r0 + 133 * 16],  m5,          5
+    pextrb       [r0 + 134 * 16],  m5,          6
+    pextrb       [r0 + 135 * 16],  m5,          7
+    pextrb       [r0 + 136 * 16],  m5,          8
+    pextrb       [r0 + 137 * 16],  m5,          9
+    pextrb       [r0 + 138 * 16],  m5,          10
+    pextrb       [r0 + 139 * 16],  m5,          11
+    pextrb       [r0 + 140 * 16],  m5,          12
+    pextrb       [r0 + 141 * 16],  m5,          13
+    pextrb       [r0 + 142 * 16],  m5,          14
+    pextrb       [r0 + 143 * 16],  m5,          15
+
+    ; mode 11
+    movu          m0,               [r1 + 32]
+    pinsrb        m0,               [r1], 0
+
+    ; mode 11 [row 15]
+    movu          [r0 + 159 * 16],  m0
+
+    ; mode 11 [row 0]
+    movu          m1,    [r1 + 1 + 32]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        [r3 + 30 * 16]
+    pmulhrsw      m1,    m3
+    movu          m7,    [r1 + 8 + 32]
+    movu          m2,    [r1 + 9 + 32]
+    punpcklbw     m7,    m2
+    pmaddubsw     m2,    m7,        [r3 + 30 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 144 * 16],  m1
+
+    ; mode 11 [row 1]
+    pmaddubsw     m1,    m0,        [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 28 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 145 * 16],  m1
+
+    ; mode 11 [row 2]
+    pmaddubsw     m1,    m0,        [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 26 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 146 * 16],  m1
+
+    ; mode 11 [row 3]
+    pmaddubsw     m1,    m0,         [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 24 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 147 * 16],  m1
+
+    ; mode 11 [row 4]
+    pmaddubsw     m1,    m0,        [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 22 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 148 * 16],  m1
+
+    ; mode 11 [row 5]
+    pmaddubsw     m1,    m0,        [r3 + 20 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 20 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 149 * 16],  m1
+
+    ; mode 11 [row 6]
+    pmaddubsw     m1,    m0,        [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 150 * 16],  m1
+
+    ; mode 11 [row 7]
+    pmaddubsw     m1,    m0,        [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 151 * 16],  m1
+
+    ; mode 11 [row 8]
+    pmaddubsw     m1,    m0,        [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 14 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 152 * 16],  m1
+
+    ; mode 11 [row 9]
+    pmaddubsw     m1,    m0,        [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 153 * 16],  m1
+
+    ; mode 11 [row 10]
+    pmaddubsw     m1,    m0,        [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 154 * 16],  m1
+
+    ; mode 11 [row 11]
+    pmaddubsw     m1,    m0,        [r3 + 8 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 155 * 16],  m1
+
+    ; mode 11 [row 12]
+    pmaddubsw     m1,    m0,        [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 156 * 16],  m1
+
+    ; mode 11 [row 13]
+    pmaddubsw     m1,    m0,        [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 157 * 16],  m1
+
+    ; mode 11 [row 14]
+    pmaddubsw     m1,    m0,        [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 2 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 158 * 16],  m1
+
+    ; mode 12 [row 0]
+    movu          m0,    [r2 + 32]
+    pinsrb        m0,    [r2], 0
+    movu          m1,    [r2 + 1 + 32]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        [r3 + 27 * 16]
+    pmulhrsw      m1,    m3
+    movu          m7,    [r2 + 8 + 32]
+    movd          m2,    [r2 + 24 + 32]
+    palignr       m2,    m7,        1
+    punpcklbw     m7,    m2
+    pmaddubsw     m2,    m7,        [r3 + 27 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 160 * 16],  m1
+
+    ; mode 12 [row 1]
+    pmaddubsw     m1,    m0,        [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 22 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 161 * 16],  m1
+
+    ; mode 12 [row 2]
+    pmaddubsw     m1,    m0,        [r3 + 17 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 17 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 162 * 16],  m1
+
+    ; mode 12 [row 3]
+    pmaddubsw     m1,    m0,        [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 163 * 16],  m1
+
+    ; mode 12 [row 4]
+    pmaddubsw     m1,    m0,        [r3 + 7 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 7 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 164 * 16],  m1
+
+    ; mode 12 [row 5]
+    pmaddubsw     m1,    m0,        [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 2 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 165 * 16],  m1
+
+    ; mode 13 [row 0]
+    pmaddubsw     m1,    m0,        [r3 + 23 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 23 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 176 * 16],  m1
+
+    ; mode 13 [row 1]
+    pmaddubsw     m1,    m0,        [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 14 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 177 * 16],  m1
+
+    ; mode 13 [row 2]
+    pmaddubsw     m1,    m0,        [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 5 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 178 * 16],  m1
+
+    ; mode 14 [row 0]
+    pmaddubsw     m1,    m0,        [r3 + 19 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 19 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 192 * 16],  m1
+
+    ; mode 14 [row 1]
+    pmaddubsw     m1,    m0,        [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 193 * 16],  m1
+
+    ; mode 17 [row 0]
+    movu          [r0 + 240 * 16],  m1
+
+    ; mode 15 [row 0]
+    pmaddubsw     m1,    m0,        [r3 + 15 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 15 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 208 * 16],  m1
+
+    ; mode 15 [row 15 - second half]
+    pmaddubsw     m1,    m0,           [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 223 * 16 + 8], m1
+    ; mode 15 [row 15 - second half] end
+
+    ; mode 16 [row 0]
+    pmaddubsw     m1,    m0,        [r3 + 11 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m2,    m7,        [r3 + 11 * 16]
+    pmulhrsw      m2,    m3
+    packuswb      m1,               m2
+    movu          [r0 + 224 * 16],  m1
+
+    ; mode 17 [row 9 - second half]
+    pmaddubsw     m1,     m0,          [r3 + 28 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 249 * 16 + 8],  m1
+    ; mode 17 [row 9 - second half] end
+
+    ; mode 17 [row 10 - second half]
+    pmaddubsw     m1,     m0,          [r3 + 2 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 250 * 16 + 8],  m1
+    ; mode 17 [row 10 - second half] end
+
+    ; mode 17 [row 1 - first half]
+    pslldq        m6,     m0,          2
+    pinsrb        m6,     [r2],        1
+    pinsrb        m6,     [r2 + 1],    0
+    pmaddubsw     m1,     m6,          [r3 + 12 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,               m1
+    movh          [r0 + 241 * 16],  m1
+
+    ; mode 17 [row 11 - second half]
+    pmaddubsw     m1,     m6,          [r3 + 8 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 251 * 16 + 8],  m1
+    ; mode 17 [row 11 - second half] end
+
+    ; mode 17 [row 2 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 1],    1
+    pinsrb        m6,     [r2 + 2],    0
+    pmaddubsw     m1,     m6,          [r3 + 18 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 242 * 16],     m1
+
+    ; mode 17 [row 12 - second half]
+    pmaddubsw     m1,     m6,           [r3 + 14 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 252 * 16 + 8],  m1
+    ; mode 17 [row 12 - second half] end
+
+    ; mode 17 [row 3 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 2],    1
+    pinsrb        m6,     [r2 + 4],    0
+    pmaddubsw     m1,     m6,          [r3 + 24 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,               m1
+    movh          [r0 + 243 * 16],  m1
+
+    ; mode 17 [row 13 - first half]
+    pmaddubsw     m1,     m6,           [r3 + 20 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 253 * 16 + 8],  m1
+
+    ; mode 17 [row 4 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 4],    1
+    pinsrb        m6,     [r2 + 5],    0
+    pmaddubsw     m1,     m6,          [r3 + 30 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 244 * 16],     m1
+
+    ; mode 17 [row 5 - first half]
+    pmaddubsw     m1,     m6,          [r3 + 4 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,               m1
+    movh          [r0 + 245 * 16],  m1
+
+    ; mode 17 [row 14 - second half]
+    pmaddubsw     m1,     m6,          [r3 + 26 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 254 * 16 + 8], m1
+    ; mode 17 [row 14 - second half] end
+
+    ; mode 17 [row 6 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 5],    1
+    pinsrb        m6,     [r2 + 6],    0
+    pmaddubsw     m1,     m6,          [r3 + 10 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 246 * 16],     m1
+
+    ; mode 17 [row 7 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 6],    1
+    pinsrb        m6,     [r2 + 7],    0
+    pmaddubsw     m1,     m6,          [r3 + 16 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 247 * 16],     m1
+
+    ; mode 17 [row 8 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 7],    1
+    pinsrb        m6,     [r2 + 9],    0
+    pmaddubsw     m1,     m6,          [r3 + 22 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 248 * 16],     m1
+
+    ; mode 17 [row 9 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 +  9],    1
+    pinsrb        m6,     [r2 + 10],    0
+    pmaddubsw     m1,     m6,           [r3 + 28 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 249 * 16],      m1
+
+    ; mode 17 [row 10 - first half]
+    pmaddubsw     m1,     m6,          [r3 + 2 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                  m1
+    movh          [r0 + 250 * 16],     m1
+
+    ; mode 17 [row 11 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 10],    1
+    pinsrb        m6,     [r2 + 11],    0
+    pmaddubsw     m1,     m6,           [r3 + 8 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 251 * 16],      m1
+
+    ; mode 17 [row 12 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 11],    1
+    pinsrb        m6,     [r2 + 12],    0
+    pmaddubsw     m1,     m6,           [r3 + 14 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 252 * 16],      m1
+
+    ; mode 17 [row 13 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 12],    1
+    pinsrb        m6,     [r2 + 14],    0
+    pmaddubsw     m1,     m6,           [r3 + 20 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 253 * 16],      m1
+
+    ; mode 17 [row 14 - first half]
+    pslldq        m6,     2
+    pinsrb        m6,     [r2 + 14],    1
+    pinsrb        m6,     [r2 + 15],    0
+    pmaddubsw     m1,     m6,           [r3 + 26 * 16]
+    pmulhrsw      m1,     m3
+    packuswb      m1,                   m1
+    movh          [r0 + 254 * 16],      m1
+
+    ; mode 16 [row 12 -  second half]
+    pmaddubsw     m1,    m0,            [r3 + 15 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                   m1
+    movh          [r0 + 236 * 16 + 8],  m1
+    ; mode 16 [row 12 -  second half]
+
+    ; mode 12 [row 6]
+    pslldq        m2,    m0,            2
+    pinsrb        m2,    [r2], 1
+    pinsrb        m2,    [r2 + 6],      0
+    pmaddubsw     m1,    m2,            [r3 + 29 * 16]
+    pmulhrsw      m1,    m3
+    movu          m0,    [r2 + 7 + 32]
+    psrldq        m4,    m0,            1
+    punpcklbw     m0,    m4
+    pmaddubsw     m4,    m0,            [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,                   m4
+    movu          [r0 + 166 * 16],      m1
+
+    ; mode 12 [row 7]
+    pmaddubsw     m1,    m2,        [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 167 * 16],  m1
+
+    ; mode 12 [row 8]
+    pmaddubsw     m1,    m2,        [r3 + 19 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 168 * 16],  m1
+
+    ; mode 12 [row 9]
+    pmaddubsw     m1,    m2,        [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 169 * 16],  m1
+
+    ; mode 12 [row 10]
+    pmaddubsw     m1,    m2,        [r3 + 9 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 170 * 16],  m1
+
+    ; mode 12 [row 11]
+    pmaddubsw     m1,    m2,        [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,    m4
+    movu          [r0 + 171 * 16],  m1
+
+    ; mode 13 [row 3]
+    pinsrb        m7,    m2,        [r2 +  4],   0
+    pmaddubsw     m1,    m7,        [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 179 * 16],  m1
+
+    ; mode 13 [row 4]
+    pmaddubsw     m1,    m7,        [r3 + 19 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 180 * 16],  m1
+
+    ; mode 13 [row 5]
+    pmaddubsw     m1,    m7,        [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 181 * 16],  m1
+
+    ; mode 13 [row 6]
+    pmaddubsw     m1,    m7,        [r3 + 1 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 182 * 16],  m1
+
+    ; mode 14 [row 2]
+    pinsrb        m5,    m7,        [r2 +  2],   0
+    pmaddubsw     m1,    m5,        [r3 + 25 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 194 * 16],  m1
+
+    ; mode 14 [row 3]
+    pmaddubsw     m1,    m5,        [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 195 * 16],  m1
+
+    ; mode 15 [row 1]
+    pmaddubsw     m1,    m5,        [r3 + 30 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 209 * 16],  m1
+
+    ; mode 15 [row 2]
+    pmaddubsw     m1,    m5,        [r3 + 13 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 13 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 210 * 16],  m1
+
+    ; mode 16 [row 1]
+    pmaddubsw     m1,    m5,        [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 225 * 16],  m1
+
+    ; mode 16 [row 2]
+    pmaddubsw     m1,    m5,        [r3 + 1 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m4,    m0,        [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m1,               m4
+    movu          [r0 + 226 * 16],  m1
+
+    ; mode 16 [row 13 - second half]
+    pmaddubsw     m1,    m5,           [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 237 * 16 + 8], m1
+    ; mode 16 [row 13 - second half]
+
+    ; mode 16 [row 14 - second half]
+    pmaddubsw     m1,    m5,           [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 238 * 16 + 8], m1
+    ; mode 16 [row 14 - second half]
+
+    ; mode 16 [row 3]
+    pslldq        m6,    m5,         2
+    pinsrb        m6,    [r2 + 2],   1
+    pinsrb        m6,    [r2 + 3],   0
+    pmaddubsw     m1,    m6,         [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 227 * 16],   m1
+
+    ; mode 16 [row 15 - second half]
+    pmaddubsw     m1,    m6,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 239 * 16 + 8], m1
+    ; mode 16 [row 15 - second half] end
+
+    ; mode 16 [row 4- first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 3],   1
+    pinsrb        m6,    [r2 + 5],   0
+    pmaddubsw     m1,    m6,         [r3 + 23 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 228 * 16],   m1
+
+    ; mode 16 [row 5- first half]
+    pmaddubsw     m1,    m6,        [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 229 * 16],  m1
+
+    ; mode 16 [row 6- first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 5],   1
+    pinsrb        m6,    [r2 + 6],   0
+    pmaddubsw     m1,    m6,         [r3 + 13 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 230 * 16],   m1
+
+    ; mode 16 [row 7- first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 6],   1
+    pinsrb        m6,    [r2 + 8],   0
+    pmaddubsw     m1,    m6,         [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 231 * 16],   m1
+
+    ; mode 16 [row 8- first half]
+    pmaddubsw     m1,    m6,        [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 232 * 16],  m1
+    ; mode 19 [row 0 - second half] end
+
+    ; mode 16 [row 9- first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 8],   1
+    pinsrb        m6,    [r2 + 9],   0
+    pmaddubsw     m1,    m6,        [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 233 * 16],  m1
+
+    ; mode 16 [row 10 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 +  9], 1
+    pinsrb        m6,    [r2 + 11], 0
+    pmaddubsw     m1,    m6,        [r3 + 25 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 234 * 16],  m1
+
+    ; mode 16 [row 11 - first half]
+    pmaddubsw     m1,    m6,        [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 235 * 16],  m1
+
+    ; mode 16 [row 12 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 11], 1
+    pinsrb        m6,    [r2 + 12], 0
+    pmaddubsw     m1,    m6,        [r3 + 15 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 236 * 16],  m1
+
+    ; mode 16 [row 13 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 12],   1
+    pinsrb        m6,    [r2 + 14],   0
+    pmaddubsw     m1,    m6,        [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 237 * 16],  m1
+
+    ; mode 16 [row 14 - first half]
+    pmaddubsw     m1,    m6,        [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 238 * 16],  m1
+
+    ; mode 16 [row 15 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 14],   1
+    pinsrb        m6,    [r2 + 15],   0
+    pmaddubsw     m1,    m6,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,               m1
+    movh          [r0 + 239 * 16],  m1
+
+    ; mode 14 [row 4]
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 2],   1
+    pinsrb        m5,    [r2 + 5],   0
+    movu          m4,    [r2 + 6 + 32]
+    psrldq        m0,    m4,         1
+    punpcklbw     m4,    m0
+
+    ; mode 16 [row 3 - second half]
+    pmaddubsw     m1,    m4,        [r3 + 12 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 227 * 16 + 8], m1
+
+    ; mode 16 [row 3 - second half] end
+    pmaddubsw     m1,    m5,        [r3 + 31 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m0,    m4,        [r3 + 31 * 16]
+    pmulhrsw      m0,    m3
+    packuswb      m1,               m0
+    movu          [r0 + 196 * 16],  m1
+
+    ; mode 14 [row 5]
+    pmaddubsw     m1,    m5,        [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m0,    m4,        [r3 + 18 * 16]
+    pmulhrsw      m0,    m3
+    packuswb      m1,               m0
+    movu          [r0 + 197 * 16],  m1
+
+    ; mode 14 [row 6]
+    pmaddubsw     m1,    m5,         [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m0,    m4,         [r3 + 5 * 16]
+    pmulhrsw      m0,    m3
+    packuswb      m1,               m0
+    movu          [r0 + 198 * 16],  m1
+
+    ; mode 15 [row 3]
+    movu          m6,    m5
+    pinsrb        m6,    [r2 + 4],   0
+    pmaddubsw     m1,    m6,         [r3 + 28 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m0,    m4,         [r3 + 28 * 16]
+    pmulhrsw      m0,    m3
+    packuswb      m1,                m0
+    movu          [r0 + 211 * 16],   m1
+
+    ; mode 15 [row 4]
+    pmaddubsw     m1,    m6,         [r3 + 11 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m0,    m4,         [r3 + 11 * 16]
+    pmulhrsw      m0,    m3
+    packuswb      m1,                m0
+    movu          [r0 + 212 * 16],   m1
+
+    ; mode 15 [row 5 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 4],   1
+    pinsrb        m6,    [r2 + 6],   0
+    pmaddubsw     m1,    m6,         [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 213 * 16],   m1
+
+    ; mode 15 [row 6 - first half]
+    pmaddubsw     m1,    m6,         [r3 + 9 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 214 * 16],   m1
+
+    ; mode 15 [row 7 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 6],   1
+    pinsrb        m6,    [r2 + 8],   0
+    pmaddubsw     m1,    m6,         [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 215 * 16],   m1
+
+    ; mode 15 [row 8 - first half]
+    pmaddubsw     m1,    m6,         [r3 + 7 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 216 * 16],   m1
+
+    ; mode 15 [row 9 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 8],   1
+    pinsrb        m6,    [r2 + 9],   0
+    pmaddubsw     m1,    m6,         [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 217 * 16],   m1
+
+    ; mode 15 [row 10 - first half]
+    pmaddubsw     m1,    m6,         [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 218 * 16],   m1
+
+    ; mode 15 [row 11 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 +  9],   1
+    pinsrb        m6,    [r2 + 11],   0
+    pmaddubsw     m1,    m6,         [r3 + 20 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 219 * 16],   m1
+
+    ; mode 15 [row 12 - first half]
+    pmaddubsw     m1,    m6,         [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 220 * 16],   m1
+
+    ; mode 15 [row 13 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 11],   1
+    pinsrb        m6,    [r2 + 13],   0
+    pmaddubsw     m1,    m6,         [r3 + 18 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 221 * 16],   m1
+
+    ; mode 15 [row 14 - first half]
+    pmaddubsw     m1,    m6,         [r3 + 1 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 222 * 16],   m1
+
+    ; mode 15 [row 15 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 13],   1
+    pinsrb        m6,    [r2 + 15],   0
+    pmaddubsw     m1,    m6,         [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                m1
+    movh          [r0 + 223 * 16],   m1
+
+    ; mode 14 [row 7]
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 5],   1
+    pinsrb        m5,    [r2 + 7],   0
+    movu          m0,    [r2 + 5 + 32]
+    psrldq        m6,    m0,          1
+    punpcklbw     m0,    m6
+
+    ; mode 15 [row 5 - second half]
+    pmaddubsw     m1,    m0,           [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 213 * 16 + 8], m1
+    ; mode 15 [row 5 - second half] end
+
+    ; mode 15 [row 6 - second half]
+    pmaddubsw     m1,    m0,           [r3 + 9 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 214 * 16 + 8], m1
+    ; mode 15 [row 6 - second half] end
+
+    ; mode 16 [row 4 - second half]
+    pmaddubsw     m1,    m0,        [r3 + 23 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 228 * 16 + 8], m1
+    ; mode 16 [row 4 - second half] end
+
+    ; mode 16 [row 5 - second half]
+    pmaddubsw     m1,    m0,        [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                  m1
+    movh          [r0 + 229 * 16 + 8], m1
+
+    ; mode 16 [row 5 - second half] end
+    pmaddubsw     m1,    m5,        [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,        [r3 + 24 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,               m6
+    movu          [r0 + 199 * 16],  m1
+
+    ; mode 14 [row 8]
+    pmaddubsw     m1,    m5,        [r3 + 11 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,        [r3 + 11 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,               m6
+    movu          [r0 + 200 * 16],  m1
+
+    ; mode 14 [row 9]
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 7],    1
+    pinsrb        m5,    [r2 + 10],   0
+    movu          m0,    [r2 + 4 + 32]
+    psrldq        m6,    m0,          1
+    punpcklbw     m0,    m6
+
+    ; mode 15 [row 7 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 215 * 16 + 8],   m1
+    ; mode 15 [row 7 - second half] end
+
+    ; mode 15 [row 8 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 7 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 216 * 16 + 8],   m1
+    ; mode 15 [row 8 - second half] end
+
+    ; mode 16 [row 6 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 13 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 230 * 16 + 8],   m1
+    ; mode 16 [row 6 - second half] end
+
+    ; mode 15 [row 6 - second half] end
+    pmaddubsw     m1,    m5,        [r3 + 30 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,        [r3 + 30 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,               m6
+    movu          [r0 + 201 * 16],  m1
+
+    ; mode 14 [row 10]
+    pmaddubsw     m1,    m5,        [r3 + 17 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,        [r3 + 17 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,               m6
+    movu          [r0 + 202 * 16],  m1
+
+    ; mode 14 [row 11]
+    pmaddubsw     m1,    m5,        [r3 + 4 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,        [r3 + 4 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,               m6
+    movu          [r0 + 203 * 16],  m1
+
+    ; mode 14 [row 12]
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 10],   1
+    pinsrb        m5,    [r2 + 12],   0
+    movu          m0,    [r2 + 3 + 32]
+    psrldq        m6,    m0,          1
+    punpcklbw     m0,    m6
+
+    ; mode 15 [row 9 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 22 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 217 * 16 + 8],   m1
+    ; mode 15 [row 9 - second half] end
+
+    ; mode 15 [row 10 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 5 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 218 * 16 + 8],   m1
+    ; mode 15 [row 10 - second half] end
+
+    ; mode 16 [row 7 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 231 * 16 + 8],   m1
+    ; mode 16 [row 7 - second half] end
+
+    ; mode 16 [row 8 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 232 * 16 + 8],   m1
+    ; mode 16 [row 8 - second half] end
+
+    pmaddubsw     m1,    m5,          [r3 + 23 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,          [r3 + 23 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,                 m6
+    movu          [r0 + 204 * 16],    m1
+
+    ; mode 14 [row 13]
+    pmaddubsw     m1,    m5,          [r3 + 10 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,          [r3 + 10 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,                 m6
+    movu          [r0 + 205 * 16],    m1
+
+    ; mode 14 [row 14]
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 12],   1
+    pinsrb        m5,    [r2 + 15],   0
+    movu          m0,    [r2 + 2 + 32]
+    psrldq        m6,    m0,          1
+    punpcklbw     m0,    m6
+
+    ; mode 15 [row 11 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 20 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 219 * 16 + 8],   m1
+    ; mode 15 [row 11 - second half] end
+
+    ; mode 15 [row 12 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 3 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 220 * 16 + 8],   m1
+    ; mode 15 [row 12 - second half] end
+
+    ; mode 16 [row 9 - second half]
+    pmaddubsw     m1,    m0,             [r3 + 14 * 16]
+    pmulhrsw      m1,    m3
+    packuswb      m1,                    m1
+    movh          [r0 + 233 * 16 + 8],   m1
+
+    ; mode 16 [row 9 - second half] end
+    pmaddubsw     m1,    m5,          [r3 + 29 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,          [r3 + 29 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,                 m6
+    movu          [r0 + 206 * 16],    m1
+
+    ; mode 14 [row 15]
+    pmaddubsw     m1,    m5,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m6,    m0,          [r3 + 16 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m1,                 m6
+    movu          [r0 + 207 * 16],    m1
+
+    ; mode 12 [row 12]
+    pslldq        m0,    m2,          2
+    pinsrb        m0,    [r2 +  6],   1
+    pinsrb        m0,    [r2 + 13],   0
+    pmaddubsw     m1,    m0,          [r3 + 31 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 31 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 172 * 16],    m1
+
+    ; mode 12 [row 13]
+    pmaddubsw     m1,    m0,          [r3 + 26 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 26 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 173 * 16],    m1
+
+    ; mode 12 [row 14]
+    pmaddubsw     m1,    m0,          [r3 + 21 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 21 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 174 * 16],    m1
+
+    ; mode 12 [row 15]
+    pmaddubsw     m1,    m0,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 175 * 16],    m1
+
+    ; mode 13 [row 7]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 +  4],   1
+    pinsrb        m7,    [r2 +  7],   0
+    pmaddubsw     m1,    m7,          [r3 + 24 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 24 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 183 * 16],    m1
+
+    ; mode 13 [row 8]
+    pmaddubsw     m1,    m7,          [r3 + 15 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 15 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 184 * 16],    m1
+
+    ; mode 13 [row 9]
+    pmaddubsw     m1,    m7,          [r3 + 6 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 185 * 16],    m1
+
+    ; mode 13 [row 10]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 +  7],   1
+    pinsrb        m7,    [r2 + 11],   0
+    pmaddubsw     m1,    m7,          [r3 + 29 * 16]
+    pmulhrsw      m1,    m3
+    movu          m4,    [r2 + 5 + 32]
+    psrldq        m5,    m4,         1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,          [r3 + 29 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,    m5
+    movu          [r0 + 186 * 16],    m1
+
+    ; mode 13 [row 11]
+    pmaddubsw     m1,    m7,          [r3 + 20 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 187 * 16],    m1
+
+    ; mode 13 [row 12]
+    pmaddubsw     m1,    m7,          [r3 + 11 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 11 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 188 * 16],    m1
+
+    ; mode 13 [row 13]
+    pmaddubsw     m1,    m7,          [r3 + 2 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 2 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 189 * 16],    m1
+
+    ; mode 13 [row 14]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 11],   1
+    pinsrb        m7,    [r2 + 14],   0
+    pmaddubsw     m1,    m7,          [r3 + 25 * 16]
+    pmulhrsw      m1,    m3
+    movu          m4,    [r2 + 4 + 32]
+    psrldq        m5,    m4,          1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,          [r3 + 25 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 190 * 16],    m1
+
+    ; mode 13 [row 15]
+    pmaddubsw     m1,    m7,          [r3 + 16 * 16]
+    pmulhrsw      m1,    m3
+    pmaddubsw     m5,    m4,          [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m1,                 m5
+    movu          [r0 + 191 * 16],    m1
+
+    ; mode 17 [row 15]
+    movu         m0,                   [r2]
+    pshufb       m1,                   m0,       [tab_S1]
+    movu         [r0 + 255 * 16],      m1
+    movu         m2,                   [r2 + 32]
+    pinsrb       m2,                   [r2], 0
+    movd         [r0 + 255 * 16 + 12], m2
+
+    ; mode 18 [row 0]
+    movu         [r0 + 256 * 16],      m0
+
+    ; mode 18 [row 1]
+    pslldq        m4,              m0,         1
+    pinsrb        m4,              [r2 + 1 + 32],   0
+    movu          [r0 + 257 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 2 + 32],   0
+    movu          [r0 + 258 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 3 + 32],   0
+    movu          [r0 + 259 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 4 + 32],   0
+    movu          [r0 + 260 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 5 + 32],   0
+    movu          [r0 + 261 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 6 + 32],   0
+    movu          [r0 + 262 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 7 + 32],   0
+    movu          [r0 + 263 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 8 + 32],   0
+    movu          [r0 + 264 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 9 + 32],   0
+    movu          [r0 + 265 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 10 + 32],   0
+    movu          [r0 + 266 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 11 + 32],   0
+    movu          [r0 + 267 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 12 + 32],   0
+    movu          [r0 + 268 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 13 + 32],   0
+    movu          [r0 + 269 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 14 + 32],   0
+    movu          [r0 + 270 * 16], m4
+    pslldq        m4,              1
+    pinsrb        m4,              [r2 + 15 + 32],   0
+    movu          [r0 + 271 * 16], m4
+
+    ; mode 19 [row 0]
+    psrldq        m2,    m0,           1
+    punpcklbw     m0,    m2
+    movu          m5,    [r2 + 8]
+    psrldq        m6,    m5,           1
+    punpcklbw     m5,    m6
+    pmaddubsw     m4,    m0,           [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 6 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 272 * 16],     m4
+
+    ; mode 20 [row 0]
+    pmaddubsw     m4,    m0,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 11 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 288 * 16],     m4
+
+    ; mode 21 [row 0]
+    pmaddubsw     m4,    m0,            [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,            [r3 + 15 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 304 * 16],     m4
+
+    ; mode 22 [row 0]
+    pmaddubsw     m4,    m0,           [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 19 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 320 * 16],     m4
+
+    ; mode 22 [row 1]
+    pmaddubsw     m4,    m0,           [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 6 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 321 * 16],     m4
+
+    ; mode 23 [row 0]
+    pmaddubsw     m4,    m0,           [r3 + 23 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 23 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 336 * 16],     m4
+
+    ; mode 23 [row 1]
+    pmaddubsw     m4,    m0,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 14 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 337 * 16],     m4
+
+    ; mode 23 [row 2]
+    pmaddubsw     m4,    m0,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 5 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 338 * 16],     m4
+
+    ; mode 24 [row 0]
+    pmaddubsw     m4,    m0,           [r3 + 27 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 27 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 352 * 16],     m4
+
+    ; mode 24 [row 1]
+    pmaddubsw     m4,    m0,            [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,            [r3 + 22 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 353 * 16],     m4
+
+    ; mode 24 [row 2]
+    pmaddubsw     m4,    m0,           [r3 + 17 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 17 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 354 * 16],     m4
+
+    ; mode 24 [row 3]
+    pmaddubsw     m4,    m0,           [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 12 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 355 * 16],     m4
+
+    ; mode 24 [row 4]
+    pmaddubsw     m4,    m0,            [r3 + 7 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,            [r3 + 7 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 356 * 16],     m4
+
+    ; mode 24 [row 5]
+    pmaddubsw     m4,    m0,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,           [r3 + 2 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                  m6
+    movu          [r0 + 357 * 16],     m4
+
+    ; mode 24 [row 6 - first half]
+    pslldq        m7,    m0,    2
+    pinsrb        m7,    [r2 + 0],     1
+    pinsrb        m7,    [r2 + 6 + 32],     0
+    pmaddubsw     m4,    m7,           [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 358 * 16],     m4
+
+    ; mode 24 [row 7 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 359 * 16],     m4
+
+    ; mode 24 [row 8 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 360 * 16],     m4
+
+    ; mode 24 [row 9 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 361 * 16],     m4
+
+    ; mode 24 [row 10 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 362 * 16],     m4
+
+    ; mode 24 [row 11 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 363 * 16],     m4
+
+    ; mode 24 [row 12 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 +  6 + 32],    1
+    pinsrb        m7,    [r2 + 13 + 32],    0
+    pmaddubsw     m4,    m7,           [r3 + 31 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 364 * 16],     m4
+
+    ; mode 24 [row 13 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 365 * 16],     m4
+
+    ; mode 24 [row 14 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 21 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 366 * 16],     m4
+
+    ; mode 24 [row 15 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 367 * 16],     m4
+
+    ; mode 23 [row 3 - first half]
+    pslldq        m7,    m0,    2
+    pinsrb        m7,    [r2 + 0],     1
+    pinsrb        m7,    [r2 + 4 + 32],     0
+    pmaddubsw     m4,    m7,           [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 339 * 16],     m4
+
+    ; mode 23 [row 4 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 340 * 16],     m4
+
+    ; mode 23 [row 5 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 341 * 16],     m4
+
+    ; mode 23 [row 6 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 342 * 16],     m4
+
+    ; mode 23 [row 7 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 4 + 32],     1
+    pinsrb        m7,    [r2 + 7 + 32],     0
+    pmaddubsw     m4,    m7,            [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 343 * 16],     m4
+
+    ; mode 23 [row 8 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 344 * 16],     m4
+
+    ; mode 23 [row 9 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 345 * 16],     m4
+
+    ; mode 23 [row 10 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 +  7 + 32],    1
+    pinsrb        m7,    [r2 + 11 + 32],    0
+    pmaddubsw     m4,    m7,           [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 346 * 16],     m4
+
+    ; mode 23 [row 11 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 347 * 16],     m4
+
+    ; mode 23 [row 12 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 348 * 16],     m4
+
+    ; mode 23 [row 13 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 349 * 16],     m4
+
+    ; mode 23 [row 14 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 11 + 32],   1
+    pinsrb        m7,    [r2 + 14 + 32],   0
+    pmaddubsw     m4,    m7,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 350 * 16],     m4
+
+    ; mode 23 [row 15 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 351 * 16],     m4
+
+    ; mode 21 [row 15 - first half]
+    pmaddubsw     m4,    m0,         [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 319 * 16 + 8], m4
+    ; mode 21 [row 15 - second half] end
+
+    ; mode 20 [row 1 - first half]
+    pslldq        m7,    m0,    2
+    pinsrb        m7,    [r2 + 0],   1
+    pinsrb        m7,    [r2 + 2 + 32],   0
+    pmaddubsw     m4,    m7,           [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 289 * 16],     m4
+
+    ; mode 20 [row 2 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 290 * 16],     m4
+
+    ; mode 21 [row 1 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 305 * 16],     m4
+
+    ; mode 21 [row 2 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 13 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 306 * 16],     m4
+
+    ; mode 22 [row 2 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 322 * 16],     m4
+
+    ; mode 22 [row 3 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 323 * 16],     m4
+
+    ; mode 22 [row 4 - first half]
+    pslldq        m1,    m7,    2
+    pinsrb        m1,    [r2 + 2 + 32],     1
+    pinsrb        m1,    [r2 + 5 + 32],     0
+    pmaddubsw     m4,    m1,           [r3 + 31 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 324 * 16],     m4
+
+    ; mode 22 [row 5 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 325 * 16],     m4
+
+    ; mode 22 [row 6 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 326 * 16],     m4
+
+    ; mode 22 [row 7 - first half]
+    pslldq        m1,    2
+    pinsrb        m1,    [r2 + 5 + 32],     1
+    pinsrb        m1,    [r2 + 7 + 32],     0
+    pmaddubsw     m4,    m1,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 327 * 16],     m4
+
+    ; mode 22 [row 8 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 328 * 16],     m4
+
+    ; mode 22 [row 9 - first half]
+    pslldq        m1,    2
+    pinsrb        m1,    [r2 +  7 + 32],    1
+    pinsrb        m1,    [r2 + 10 + 32],    0
+    pmaddubsw     m4,    m1,           [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 329 * 16],     m4
+
+    ; mode 22 [row 10 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 17 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 330 * 16],     m4
+
+    ; mode 22 [row 11 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 331 * 16],     m4
+
+    ; mode 22 [row 12 - first half]
+    pslldq        m1,    2
+    pinsrb        m1,    [r2 + 10 + 32],    1
+    pinsrb        m1,    [r2 + 12 + 32],    0
+    pmaddubsw     m4,    m1,           [r3 + 23 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 332 * 16],     m4
+
+    ; mode 22 [row 13 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 333 * 16],     m4
+
+    ; mode 22 [row 14 - first half]
+    pslldq        m1,    2
+    pinsrb        m1,    [r2 + 12 + 32],   1
+    pinsrb        m1,    [r2 + 15 + 32],   0
+    pmaddubsw     m4,    m1,          [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 334 * 16],     m4
+
+    ; mode 22 [row 15 - first half]
+    pmaddubsw     m4,    m1,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 335 * 16],     m4
+
+    ; mode 21 [row 3 - first half]
+    pslldq        m6,    m7,    2
+    pinsrb        m6,    [r2 + 2 + 32],     1
+    pinsrb        m6,    [r2 + 4 + 32],     0
+    pmaddubsw     m4,    m6,           [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 307 * 16],     m4
+
+    ; mode 21 [row 4 - first half]
+    pmaddubsw     m4,    m6,            [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 308 * 16],     m4
+
+    ; mode 21 [row 5 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 4 + 32],     1
+    pinsrb        m6,    [r2 + 6 + 32],     0
+    pmaddubsw     m4,    m6,           [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 309 * 16],     m4
+
+    ; mode 21 [row 6 - first half]
+    pmaddubsw     m4,    m6,           [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 310 * 16],     m4
+
+    ; mode 21 [row 7 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 6 + 32],     1
+    pinsrb        m6,    [r2 + 8 + 32],     0
+    pmaddubsw     m4,    m6,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 311 * 16],     m4
+
+    ; mode 21 [row 8 - first half]
+    pmaddubsw     m4,    m6,           [r3 + 7 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 312 * 16],     m4
+
+    ; mode 21 [row 9 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 8 + 32],     1
+    pinsrb        m6,    [r2 + 9 + 32],     0
+    pmaddubsw     m4,    m6,            [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 313 * 16],     m4
+
+    ; mode 21 [row 10 - first half]
+    pmaddubsw     m4,    m6,            [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 314 * 16],     m4
+
+    ; mode 21 [row 11 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 +  9 + 32],    1
+    pinsrb        m6,    [r2 + 11 + 32],    0
+    pmaddubsw     m4,    m6,           [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 315 * 16],     m4
+
+    ; mode 21 [row 12 - first half]
+    pmaddubsw     m4,    m6,           [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 316 * 16],     m4
+
+    ; mode 21 [row 13 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 11 + 32],    1
+    pinsrb        m6,    [r2 + 13 + 32],    0
+    pmaddubsw     m4,    m6,           [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 317 * 16],     m4
+
+    ; mode 21 [row 14 - first half]
+    pmaddubsw     m4,    m6,           [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 318 * 16],     m4
+
+    ; mode 21 [row 15 - first half]
+    pslldq        m6,    2
+    pinsrb        m6,    [r2 + 32 + 13],    1
+    pinsrb        m6,    [r2 + 32 + 15],    0
+    pmaddubsw     m4,    m6,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 319 * 16],     m4
+
+    ; mode 20 [row 13 - second half]
+    pmaddubsw     m4,    m7,           [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 301 * 16 + 8], m4
+    ; mode 20 [row 13 - second half]
+
+    ; mode 20 [row 14 - second half]
+    pmaddubsw     m4,    m7,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 302 * 16 + 8], m4
+    ; mode 20 [row 14 - second half]
+
+    ; mode 20 [row 3 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 2],    1
+    pinsrb        m7,    [r2 + 32 + 3],    0
+    pmaddubsw     m4,    m7,           [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 291 * 16],     m4
+
+    ; mode 20 [row 15 - second half]
+    pmaddubsw     m4,    m7,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 303 * 16 + 8], m4
+    ; mode 20 [row 15 - second half]
+
+    ; mode 20 [row 4 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 3],     1
+    pinsrb        m7,    [r2 + 32 + 5],     0
+    pmaddubsw     m4,    m7,           [r3 + 23 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 292 * 16],     m4
+
+    ; mode 20 [row 5 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 293 * 16],     m4
+
+    ; mode 20 [row 6 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 5],     1
+    pinsrb        m7,    [r2 + 32 + 6],     0
+    pmaddubsw     m4,    m7,           [r3 + 13 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 294 * 16],     m4
+
+    ; mode 20 [row 7 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 6],   1
+    pinsrb        m7,    [r2 + 32 + 8],   0
+    pmaddubsw     m4,    m7,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 295 * 16],     m4
+
+    ; mode 20 [row 8 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 296 * 16],     m4
+
+    ; mode 20 [row 9 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 8],   1
+    pinsrb        m7,    [r2 + 32 + 9],   0
+    pmaddubsw     m4,    m7,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 297 * 16],     m4
+
+    ; mode 20 [row 10 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 +  9],   1
+    pinsrb        m7,    [r2 + 32 + 11],   0
+    pmaddubsw     m4,    m7,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 298 * 16],     m4
+
+    ; mode 20 [row 11 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 299 * 16],     m4
+
+    ; mode 20 [row 12 - first half]
+    movu          m1,    [r3 + 15 * 16]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 11],   1
+    pinsrb        m7,    [r2 + 32 + 12],   0
+    pmaddubsw     m4,    m7,           [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 300 * 16],     m4
+
+    ; mode 20 [row 13 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 12],   1
+    pinsrb        m7,    [r2 + 32 + 14],   0
+    pmaddubsw     m4,    m7,           [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 301 * 16],     m4
+
+    ; mode 20 [row 14 - first half]
+    pmaddubsw     m4,    m7,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 302 * 16],     m4
+
+    ; mode 20 [row 15 - first half]
+    pslldq        m7,    2
+    pinsrb        m7,    [r2 + 32 + 14],    1
+    pinsrb        m7,    [r2 + 32 + 15],    0
+    pmaddubsw     m4,    m7,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 303 * 16],     m4
+
+    ; mode 19 [row 1]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2],            1
+    pinsrb        m0,    [r2 + 32 + 1],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 8],   1
+    pinsrb        m5,    [r2 + 7],   0
+
+    ; mode 20 [row 1 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 289 * 16 + 8], m4
+    ; mode 20 [row 1 - second half] end
+
+    ; mode 20 [row 2 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 290 * 16 + 8], m4
+    ; mode 20 [row 2 - second half] end
+
+    ; mode 21 [row 2 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 305 * 16 + 8], m4
+    ; mode 21 [row 2 - second half] end
+
+    ; mode 21 [row 3 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 13 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 306 * 16 + 8], m4
+    ; mode 21 [row 3 - second half] end
+
+    ; mode 21 [row 4 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 307 * 16 + 8], m4
+    ; mode 21 [row 4 - second half] end
+
+    ; mode 22 [row 2 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 322 * 16 + 8], m4
+    ; mode 22 [row 2 - second half] end
+
+    ; mode 22 [row 3 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 323 * 16 + 8], m4
+    ; mode 22 [row 3 - second half] end
+
+    ; mode 23 [row 3 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 339 * 16 + 8], m4
+    ; mode 23 [row 3 - second half] end
+
+    ; mode 23 [row 4 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 340 * 16 + 8], m4
+    ; mode 23 [row 4 - second half] end
+
+    ; mode 23 [row 5 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 341 * 16 + 8], m4
+    ; mode 23 [row 5 - second half] end
+
+    ; mode 23 [row 6 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 342 * 16 + 8], m4
+    ; mode 23 [row 6 - second half] end
+
+    ; mode 24 [row 6 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 358 * 16 + 8], m4
+    ; mode 24 [row 6 - second half] end
+
+    ; mode 24 [row 7 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 359 * 16 + 8], m4
+    ; mode 24 [row 7 - second half] end
+
+    ; mode 24 [row 8 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 19 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 360 * 16 + 8], m4
+    ; mode 24 [row 8 - second half] end
+
+    ; mode 24 [row 9 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 361 * 16 + 8], m4
+    ; mode 24 [row 9 - second half] end
+
+    ; mode 24 [row 10 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 362 * 16 + 8], m4
+    ; mode 24 [row 10 - second half] end
+
+    ; mode 24 [row 11 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 363 * 16 + 8], m4
+    ; mode 24 [row 11 - second half] end
+
+    pmaddubsw     m4,    m0,         [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 12 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 273 * 16],   m4
+
+    ; mode 19 [row 2]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 1],   1
+    pinsrb        m0,    [r2 + 32 + 2],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 7],   1
+    pinsrb        m5,    [r2 + 6],   0
+
+    ; mode 20 [row 3 - second half]
+    pmaddubsw     m4,    m5,            [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                   m4
+    movh          [r0 + 291 * 16 + 8], m4
+    ; mode 20 [row 3 - second half] end
+
+    ; mode 21 [row 3 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 307 * 16 + 8], m4
+    ; mode 21 [row 3 - second half] end
+
+    ; mode 21 [row 4 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 308 * 16 + 8], m4
+    ; mode 21 [row 4 - second half] end
+
+    ; mode 22 [row 4 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 31 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 324 * 16 + 8], m4
+    ; mode 22 [row 4 - second half] end
+
+    ; mode 22 [row 5 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 325 * 16 + 8], m4
+    ; mode 22 [row 5 - second half] end
+
+    ; mode 22 [row 6 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 326 * 16 + 8], m4
+    ; mode 22 [row 6 - second half] end
+
+    ; mode 23 [row 7 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 343 * 16 + 8], m4
+    ; mode 23 [row 7 - second half] end
+
+    ; mode 23 [row 8 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 344 * 16 + 8], m4
+    ; mode 23 [row 8 - second half] end
+
+    ; mode 23 [row 9 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 345 * 16 + 8], m4
+    ; mode 23 [row 9 - second half] end
+
+    ; mode 24 [row 12 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 31 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 364 * 16 + 8], m4
+    ; mode 24 [row 12 - second half] end
+
+    ; mode 24 [row 13 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 365 * 16 + 8], m4
+    ; mode 24 [row 13 - second half] end
+
+    ; mode 24 [row 14 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 21 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 366 * 16 + 8], m4
+    ; mode 24 [row 14 - second half] end
+
+    ; mode 24 [row 15 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 367 * 16 + 8], m4
+    ; mode 24 [row 15 - second half] end
+
+    pmaddubsw     m4,    m0,         [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 18 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 274 * 16],   m4
+
+    ; mode 19 [row 3]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 2],   1
+    pinsrb        m0,    [r2 + 32 + 4],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 6],   1
+    pinsrb        m5,    [r2 + 5],   0
+
+    ; mode 20 [row 4 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 23 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 292 * 16 + 8], m4
+    ; mode 20 [row 4 - second half] end
+
+    ; mode 20 [row 5 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 293 * 16 + 8], m4
+    ; mode 20 [row 5 - second half] end
+
+    ; mode 21 [row 5 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 309 * 16 + 8], m4
+    ; mode 21 [row 5 - second half] end
+
+    ; mode 21 [row 6 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 9 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 310 * 16 + 8], m4
+    ; mode 21 [row 6 - second half] end
+
+    ; mode 22 [row 7 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 327 * 16 + 8], m4
+    ; mode 22 [row 7 - second half] end
+
+    ; mode 22 [row 8 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 328 * 16 + 8], m4
+    ; mode 22 [row 7 - second half] end
+
+    ; mode 23 [row 10 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 346 * 16 + 8], m4
+    ; mode 23 [row 10 - second half] end
+
+    ; mode 23 [row 11 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 347 * 16 + 8], m4
+    ; mode 23 [row 11 - second half] end
+
+    ; mode 23 [row 12 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 11 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 348 * 16 + 8], m4
+    ; mode 23 [row 12 - second half] end
+
+    ; mode 23 [row 13 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 349 * 16 + 8], m4
+    ; mode 23 [row 13 - second half] end
+
+    pmaddubsw     m4,    m0,         [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 24 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 275 * 16],   m4
+
+    ; mode 19 [row 4]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 4],   1
+    pinsrb        m0,    [r2 + 32 + 5],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 5],   1
+    pinsrb        m5,    [r2 + 4],   0
+
+    ; mode 20 [row 6 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 13 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 294 * 16 + 8], m4
+    ; mode 20 [row 6 - second half] end
+
+    ; mode 21 [row 7 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 311 * 16 + 8], m4
+    ; mode 21 [row 7 - second half] end
+
+    ; mode 21 [row 8 - second half]
+    pmaddubsw     m4,    m5,          [r3 + 7 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 312 * 16 + 8], m4
+    ; mode 21 [row 8 - second half] end
+
+    ; mode 22 [row 9 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 329 * 16 + 8], m4
+    ; mode 22 [row 9 - second half] end
+
+    ; mode 22 [row 10 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 17 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 330 * 16 + 8], m4
+    ; mode 22 [row 10 - second half] end
+
+    ; mode 22 [row 11 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 331 * 16 + 8], m4
+    ; mode 22 [row 11 - second half] end
+
+    ; mode 23 [row 14 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 350 * 16 + 8], m4
+    ; mode 23 [row 14 - second half] end
+
+    ; mode 23 [row 15 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 351 * 16 + 8], m4
+
+    ; mode 23 [row 15 - second half] end
+    pmaddubsw     m4,    m0,         [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 30 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 276 * 16],   m4
+
+    ; mode 19 [row 5]
+    pmaddubsw     m4,    m0,         [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 4 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 277 * 16],   m4
+
+    ; mode 19 [row 6]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 5],   1
+    pinsrb        m0,    [r2 + 32 + 6],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 4],   1
+    pinsrb        m5,    [r2 + 3],   0
+
+    ; mode 20 [row 7 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 295 * 16 + 8], m4
+    ; mode 20 [row 7 - second half] end
+
+    ; mode 20 [row 8 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 296 * 16 + 8], m4
+    ; mode 20 [row 8 - second half] end
+
+    ; mode 21 [row 9 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 313 * 16 + 8], m4
+    ; mode 21 [row 9 - second half] end
+
+    ; mode 21 [row 10 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 5 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 314 * 16 + 8], m4
+    ; mode 21 [row 10 - second half] end
+
+    ; mode 22 [row 12 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 23 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 332 * 16 + 8], m4
+    ; mode 22 [row 12 - second half] end
+
+    ; mode 22 [row 12 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 333 * 16 + 8], m4
+    ; mode 22 [row 12 - second half] end
+
+    pmaddubsw     m4,    m0,          [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,          [r3 + 10 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 278 * 16],   m4
+
+    ; mode 19 [row 7]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 6],   1
+    pinsrb        m0,    [r2 + 32 + 7],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 3],   1
+    pinsrb        m5,    [r2 + 2],   0
+
+    ; mode 20 [row 9 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 297 * 16 + 8], m4
+    ; mode 20 [row 9 - second half]
+
+    ; mode 21 [row 11 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 315 * 16 + 8], m4
+    ; mode 21 [row 11 - second half] end
+
+    ; mode 21 [row 12 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 3 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 316 * 16 + 8], m4
+    ; mode 21 [row 12 - second half] end
+
+    ; mode 22 [row 14 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 29 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 334 * 16 + 8], m4
+    ; mode 22 [row 14 - second half] end
+
+    ; mode 22 [row 15 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 335 * 16 + 8], m4
+    ; mode 22 [row 15 - second half] end
+
+    pmaddubsw     m4,    m0,         [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 16 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 279 * 16],   m4
+
+    ; mode 19 [row 8]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 7],   1
+    pinsrb        m0,    [r2 + 32 + 9],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 2],   1
+    pinsrb        m5,    [r2 + 1],   0
+
+    ; mode 20 [row 10 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 25 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 298 * 16 + 8], m4
+    ; mode 20 [row 10 - second half] end
+
+    ; mode 20 [row 11 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 299 * 16 + 8], m4
+    ; mode 20 [row 11 - second half] end
+
+    ; mode 21 [row 13 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 317 * 16 + 8], m4
+    ; mode 21 [row 13 - second half] end
+
+    ; mode 21 [row 14 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 1 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 318 * 16 + 8], m4
+    ; mode 21 [row 14 - second half] end
+
+    pmaddubsw     m4,    m0,         [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 22 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 280 * 16],   m4
+
+    ; mode 19 [row 9]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 9],   1
+    pinsrb        m0,    [r2 + 32 + 10],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 1],   1
+    pinsrb        m5,    [r2 + 0],   0
+
+    ; mode 20 [row 12 - second half]
+    pmaddubsw     m4,    m5,           [r3 + 15 * 16]
+    pmulhrsw      m4,    m3
+    packuswb      m4,                  m4
+    movh          [r0 + 300 * 16 + 8], m4
+
+    ; mode 20 [row 12 - second half] end
+    pmaddubsw     m4,    m0,          [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,          [r3 + 28 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 281 * 16],   m4
+
+    ; mode 19 [row 10]
+    pmaddubsw     m4,    m0,         [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 2 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 282 * 16],   m4
+
+    ; mode 19 [row 11]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 10],   1
+    pinsrb        m0,    [r2 + 32 + 11],   0
+    pmaddubsw     m4,    m0,         [r3 + 8 * 16]
+    pmulhrsw      m4,    m3
+    pslldq        m5,    2
+    pinsrb        m5,    [r2],            1
+    pinsrb        m5,    [r2 + 32 + 1],   0
+    pmaddubsw     m6,    m5,         [r3 + 8 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 283 * 16],   m4
+
+    ; mode 19 [row 12]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 11],   1
+    pinsrb        m0,    [r2 + 32 + 12],   0
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 32 + 1],   1
+    pinsrb        m5,    [r2 + 32 + 2],   0
+    pmaddubsw     m4,    m0,         [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m6,    m5,         [r3 + 14 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 284 * 16],   m4
+
+    ; mode 19 [row 13]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 12],   1
+    pinsrb        m0,    [r2 + 32 + 14],   0
+    pmaddubsw     m4,    m0,         [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 32 + 2],   1
+    pinsrb        m5,    [r2 + 32 + 4],   0
+    pmaddubsw     m6,    m5,         [r3 + 20 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 285 * 16],   m4
+
+    ; mode 19 [row 14]
+    pslldq        m0,    2
+    pinsrb        m0,    [r2 + 32 + 14],   1
+    pinsrb        m0,    [r2 + 32 + 15],   0
+    pmaddubsw     m4,    m0,         [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    pslldq        m5,    2
+    pinsrb        m5,    [r2 + 32 + 4],   1
+    pinsrb        m5,    [r2 + 32 + 5],   0
+    pmaddubsw     m6,    m5,         [r3 + 26 * 16]
+    pmulhrsw      m6,    m3
+    packuswb      m4,                m6
+    movu          [r0 + 286 * 16],   m4
+
+    ; mode 19 [row 15]
+    movu         m0,                   [r2 + 32]
+    pshufb       m0,                   [tab_S1]
+    movu         [r0 + 287 * 16],      m0
+    movd         m1,                   [r2]
+    movd         [r0 + 287 * 16 + 12], m1
+
+    ; mode 25
+    movu          m1,    [r1]
+
+    ; mode 26 [all rows]
+    psrldq        m6,    m1,         1
+    pinsrb        m6,    [r1 + 16], 15
+    movu          m7,    m6
+    movu         [r0 + 384 * 16],   m6
+    movu         [r0 + 385 * 16],   m6
+    movu         [r0 + 386 * 16],   m6
+    movu         [r0 + 387 * 16],   m6
+    movu         [r0 + 388 * 16],   m6
+    movu         [r0 + 389 * 16],   m6
+    movu         [r0 + 390 * 16],   m6
+    movu         [r0 + 391 * 16],   m6
+    movu         [r0 + 392 * 16],   m6
+    movu         [r0 + 393 * 16],   m6
+    movu         [r0 + 394 * 16],   m6
+    movu         [r0 + 395 * 16],   m6
+    movu         [r0 + 396 * 16],   m6
+    movu         [r0 + 397 * 16],   m6
+    movu         [r0 + 398 * 16],   m6
+    movu         [r0 + 399 * 16],   m6
+
+    pxor         m0,          m0
+    pshufb       m6,          m6,         m0
+    punpcklbw    m6,          m0
+    pinsrb       m2,          [r1], 0
+    pshufb       m2,          m2,         m0
+    punpcklbw    m2,          m0
+    movu         m4,          [r1 + 1 + 32]
+    punpcklbw    m5,          m4,         m0
+    punpckhbw    m4,          m0
+    psubw        m5,          m2
+    psubw        m4,          m2
+    psraw        m5,          1
+    psraw        m4,          1
+    paddw        m5,          m6
+    paddw        m4,          m6
+    packuswb     m5,          m4
+
+    pextrb       [r0 + 384 * 16],  m5,          0
+    pextrb       [r0 + 385 * 16],  m5,          1
+    pextrb       [r0 + 386 * 16],  m5,          2
+    pextrb       [r0 + 387 * 16],  m5,          3
+    pextrb       [r0 + 388 * 16],  m5,          4
+    pextrb       [r0 + 389 * 16],  m5,          5
+    pextrb       [r0 + 390 * 16],  m5,          6
+    pextrb       [r0 + 391 * 16],  m5,          7
+    pextrb       [r0 + 392 * 16],  m5,          8
+    pextrb       [r0 + 393 * 16],  m5,          9
+    pextrb       [r0 + 394 * 16],  m5,          10
+    pextrb       [r0 + 395 * 16],  m5,          11
+    pextrb       [r0 + 396 * 16],  m5,          12
+    pextrb       [r0 + 397 * 16],  m5,          13
+    pextrb       [r0 + 398 * 16],  m5,          14
+    pextrb       [r0 + 399 * 16],  m5,          15
+
+    ; mode 25 [row 15]
+    movu          [r0 + 383 * 16],     m1
+
+    ; mode 25 [row 0]
+    psrldq        m2,    m1,           1
+    punpcklbw     m1,    m2
+    movu          m2,    [r1 + 8]
+    psrldq        m4,    m2,           1
+    punpcklbw     m2,    m4
+    pmaddubsw     m4,    m1,           [r3 + 30 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 30 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 368 * 16],     m4
+
+    ; mode 25 [row 1]
+    pmaddubsw     m4,    m1,            [r3 + 28 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 28 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 369 * 16],     m4
+
+    ; mode 25 [row 2]
+    pmaddubsw     m4,    m1,            [r3 + 26 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 26 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 370 * 16],     m4
+
+    ; mode 25 [row 3]
+    pmaddubsw     m4,    m1,            [r3 + 24 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 24 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 371 * 16],     m4
+
+    ; mode 25 [row 4]
+    pmaddubsw     m4,    m1,           [r3 + 22 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 22 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 372 * 16],     m4
+
+    ; mode 25 [row 5]
+    pmaddubsw     m4,    m1,           [r3 + 20 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 373 * 16],     m4
+
+    ; mode 25 [row 6]
+    pmaddubsw     m4,    m1,            [r3 + 18 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 374 * 16],     m4
+
+    ; mode 25 [row 7]
+    pmaddubsw     m4,    m1,            [r3 + 16 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 375 * 16],     m4
+
+    ; mode 25 [row 8]
+    pmaddubsw     m4,    m1,           [r3 + 14 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 14 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 376 * 16],     m4
+
+    ; mode 25 [row 9]
+    pmaddubsw     m4,    m1,            [r3 + 12 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 12 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 377 * 16],     m4
+
+    ; mode 25 [row 10]
+    pmaddubsw     m4,    m1,           [r3 + 10 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 378 * 16],     m4
+
+    ; mode 25 [row 11]
+    pmaddubsw     m4,    m1,             [r3 + 8 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,             [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 379 * 16],     m4
+
+    ; mode 25 [row 12]
+    pmaddubsw     m4,    m1,            [r3 + 6 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,            [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 380 * 16],     m4
+
+    ; mode 25 [row 13]
+    pmaddubsw     m4,    m1,           [r3 + 4 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 381 * 16],     m4
+
+    ; mode 25 [row 14]
+    pmaddubsw     m4,    m1,           [r3 + 2 * 16]
+    pmulhrsw      m4,    m3
+    pmaddubsw     m5,    m2,           [r3 + 2 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m4,                  m5
+    movu          [r0 + 382 * 16],     m4
+
+    ; mode 27 [row 15]
+    psrldq        m6,    m7,           1
+    punpcklbw     m7,    m6
+    pinsrb        m6,    [r1 + 17],    15
+    movu          [r0 + 415 * 16],     m6
+
+    ; mode 27 [row 0]
+    movu          m4,    [r1 + 9]
+    psrldq        m5,    m4,           1
+    punpcklbw     m4,    m5
+    pmaddubsw     m6,    m7,           [r3 + 2 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 2 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 400 * 16],     m6
+
+    ; mode 27 [row 1]
+    pmaddubsw     m6,    m7,           [r3 + 4 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 401 * 16],     m6
+
+    ; mode 27 [row 2]
+    pmaddubsw     m6,    m7,           [r3 + 6 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 402 * 16],     m6
+
+    ; mode 27 [row 3]
+    pmaddubsw     m6,    m7,           [r3 + 8 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 403 * 16],     m6
+
+    ; mode 27 [row 4]
+    pmaddubsw     m6,    m7,           [r3 + 10 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 404 * 16],     m6
+
+    ; mode 27 [row 5]
+    pmaddubsw     m6,    m7,           [r3 + 12 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 12 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 405 * 16],     m6
+
+    ; mode 27 [row 6]
+    pmaddubsw     m6,    m7,           [r3 + 14 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 14 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 406 * 16],     m6
+
+    ; mode 27 [row 7]
+    pmaddubsw     m6,    m7,           [r3 + 16 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 407 * 16],     m6
+
+    ; mode 27 [row 8]
+    pmaddubsw     m6,    m7,            [r3 + 18 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,            [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 408 * 16],     m6
+
+    ; mode 27 [row 9]
+    pmaddubsw     m6,    m7,           [r3 + 20 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 409 * 16],     m6
+
+    ; mode 27 [row 10]
+    pmaddubsw     m6,    m7,           [r3 + 22 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 22 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 410 * 16],     m6
+
+    ; mode 27 [row 11]
+    pmaddubsw     m6,    m7,           [r3 + 24 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 24 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 411 * 16],     m6
+
+    ; mode 27 [row 12]
+    pmaddubsw     m6,    m7,           [r3 + 26 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 26 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 412 * 16],     m6
+
+    ; mode 27 [row 13]
+    pmaddubsw     m6,    m7,           [r3 + 28 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 28 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 413 * 16],     m6
+
+    ; mode 27 [row 14]
+    pmaddubsw     m6,    m7,           [r3 + 30 * 16]
+    pmulhrsw      m6,    m3
+    pmaddubsw     m5,    m4,           [r3 + 30 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m6,                  m5
+    movu          [r0 + 414 * 16],     m6
+
+    ; mode 28 [row 0]
+    movu          m1,    [r2 + 1]
+    psrldq        m2,    m1,           1
+    punpcklbw     m1,    m2
+    movu          m4,    [r2 + 9]
+    psrldq        m5,    m4,           1
+    punpcklbw     m4,    m5
+    pmaddubsw     m2,    m1,           [r3 + 5 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 5 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 416 * 16],     m2
+
+    ; mode 28 [row 0]
+    pmaddubsw     m2,    m1,            [r3 + 5 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,            [r3 + 5 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 416 * 16],     m2
+
+    ; mode 28 [row 1]
+    pmaddubsw     m2,    m1,           [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 417 * 16],     m2
+
+    ; mode 28 [row 2]
+    pmaddubsw     m2,    m1,            [r3 + 15 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,            [r3 + 15 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 418 * 16],     m2
+
+    ; mode 28 [row 3]
+    pmaddubsw     m2,    m1,           [r3 + 20 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 419 * 16],     m2
+
+    ; mode 28 [row 4]
+    pmaddubsw     m2,    m1,           [r3 + 25 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 25 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 420 * 16],     m2
+
+    ; mode 28 [row 5]
+    pmaddubsw     m2,    m1,           [r3 + 30 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 30 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 421 * 16],     m2
+
+    ; mode 29 [row 0]
+    pmaddubsw     m2,    m1,           [r3 + 9 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 9 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 432 * 16],     m2
+
+    ; mode 29 [row 1]
+    pmaddubsw     m2,    m1,           [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 433 * 16],     m2
+
+    ; mode 29 [row 2]
+    pmaddubsw     m2,    m1,           [r3 + 27 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 27 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 434 * 16],     m2
+
+    ; mode 30 [row 0]
+    pmaddubsw     m2,    m1,           [r3 + 13 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 13 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 448 * 16],     m2
+
+    ; mode 30 [row 1]
+    pmaddubsw     m2,    m1,           [r3 + 26 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 26 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 449 * 16],     m2
+
+    ; mode 33 [row 0]
+    movu     [r0 + 496 * 16],     m2
+
+    ; mode 31 [row 0]
+    pmaddubsw     m2,    m1,           [r3 + 17 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 17 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 464 * 16],     m2
+
+    ; mode 32 [row 0]
+    pmaddubsw     m2,    m1,           [r3 + 21 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 21 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                  m5
+    movu          [r0 + 480 * 16],     m2
+
+    ; mode 28 [row 6]
+    movd          m7,    [r2 + 9]
+    palignr       m7,    m1,          2
+    pmaddubsw     m2,    m7,          [r3 + 3 * 16]
+    pmulhrsw      m2,    m3
+    movd          m6,    [r2 + 17]
+    palignr       m6,    m4,         2
+    pmaddubsw     m5,    m6,          [r3 + 3 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,    m5
+    movu          [r0 + 422 * 16],   m2
+
+    ; mode 28 [row 7]
+    pmaddubsw     m2,    m7,         [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 423 * 16],   m2
+
+    ; mode 28 [row 8]
+    pmaddubsw     m2,    m7,         [r3 + 13 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 13 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 424 * 16],   m2
+
+    ; mode 28 [row 9]
+    pmaddubsw     m2,    m7,         [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 425 * 16],   m2
+
+    ; mode 28 [row 10]
+    pmaddubsw     m2,    m7,         [r3 + 23 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 23 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 426 * 16],   m2
+
+    ; mode 29 [row 3]
+    pmaddubsw     m2,    m7,         [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 435 * 16],   m2
+
+    ; mode 29 [row 4]
+    pmaddubsw     m2,    m7,         [r3 + 13 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 13 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 436 * 16],   m2
+
+    ; mode 29 [row 5]
+    pmaddubsw     m2,    m7,         [r3 + 22 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 22 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 437 * 16],   m2
+
+    ; mode 29 [row 6]
+    pmaddubsw     m2,    m7,         [r3 + 31 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 31 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 438 * 16],   m2
+
+    ; mode 32 [row 2]
+    movu          [r0 + 482 * 16],   m2
+
+    ; mode 30 [row 2]
+    pmaddubsw     m2,    m7,         [r3 + 7 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 7 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 450 * 16],   m2
+
+    ; mode 30 [row 3]
+    pmaddubsw     m2,    m7,         [r3 + 20 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 451 * 16],   m2
+
+    ; mode 33 [row 1]
+    movu          [r0 + 497 * 16],   m2
+
+    ; mode 31 [row 1]
+    pmaddubsw     m2,    m7,         [r3 + 2 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 2 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 465 * 16],   m2
+
+    ; mode 31 [row 2]
+    pmaddubsw     m2,    m7,         [r3 + 19 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 19 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 466 * 16],   m2
+
+    ; mode 32 [row 1]
+    pmaddubsw     m2,    m7,         [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 481 * 16],   m2
+
+    ; mode 28 [row 11]
+    pmaddubsw     m2,    m7,         [r3 + 28 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 28 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 427 * 16],   m2
+
+    ; mode 28 [row 12]
+    movd          m1,    [r2 + 10]
+    palignr       m1,    m7,        2
+    pmaddubsw     m2,    m1,         [r3 + 1 * 16]
+    pmulhrsw      m2,    m3
+    movd          m4,    [r2 + 18]
+    palignr       m4,    m6,        2
+    pmaddubsw     m5,    m4,         [r3 + 1 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 428 * 16],   m2
+
+    ; mode 30 [row 4]
+    movu          [r0 + 452 * 16],   m2
+
+    ; mode 28 [row 13]
+    pmaddubsw     m2,    m1,         [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 429 * 16],   m2
+
+    ; mode 28 [row 14]
+    pmaddubsw     m2,    m1,         [r3 + 11 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 11 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 430 * 16],   m2
+
+    ; mode 28 [row 15]
+    pmaddubsw     m2,    m1,           [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,           [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 431 * 16],   m2
+
+    ; mode 29 [row 7]
+    pmaddubsw     m2,    m1,         [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 439 * 16],   m2
+
+    ; mode 29 [row 8]
+    pmaddubsw     m2,    m1,         [r3 + 17 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 17 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 440 * 16],   m2
+
+    ; mode 29 [row 9]
+    pmaddubsw     m2,    m1,          [r3 + 26 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 26 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 441 * 16],   m2
+
+    ; mode 30 [row 5]
+    pmaddubsw     m2,    m1,         [r3 + 14 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 14 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 453 * 16],   m2
+
+    ; mode 33 [row 2]
+    movu          [r0 + 498 * 16],   m2
+
+    ; mode 30 [row 6]
+    pmaddubsw     m2,    m1,         [r3 + 27 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 27 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 454 * 16],   m2
+
+    ; mode 31 [row 3]
+    pmaddubsw     m2,    m1,         [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 467 * 16],   m2
+
+    ; mode 31 [row 4]
+    pmaddubsw     m2,    m1,         [r3 + 21 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 21 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 468 * 16],   m2
+
+    ; mode 32 [row 3]
+    pmaddubsw     m2,    m1,         [r3 + 20 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 20 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 483 * 16],   m2
+
+    ; mode 29 [row 10]
+    movd          m7,     [r2 + 11]
+    palignr       m7,     m1,        2
+    pmaddubsw     m2,    m7,         [r3 + 3 * 16]
+    pmulhrsw      m2,    m3
+    movd          m6,     [r2 + 19]
+    palignr       m6,     m4,        2
+    pmaddubsw     m5,    m6,         [r3 + 3 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 442 * 16],   m2
+
+    ; mode 29 [row 11]
+    pmaddubsw     m2,    m7,         [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 12 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 443 * 16],   m2
+
+    ; mode 29 [row 12]
+    pmaddubsw     m2,    m7,         [r3 + 21 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 21 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 444 * 16],   m2
+
+    ; mode 30 [row 8]
+    movu          [r0 + 456 * 16],   m2
+
+    ; mode 29 [row 13]
+    pmaddubsw     m2,    m7,         [r3 + 30 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 30 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 445 * 16],   m2
+
+    ; mode 32 [row 5]
+    movu          [r0 + 485 * 16],   m2
+
+    ; mode 30 [row 7]
+    pmaddubsw     m2,    m7,         [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 455 * 16],   m2
+
+    ; mode 33 [row 3]
+    movu          [r0 + 499 * 16],   m2
+
+    ; mode 31 [row 5]
+    pmaddubsw     m2,    m7,         [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 469 * 16],   m2
+
+    ; mode 31 [row 6]
+    pmaddubsw     m2,    m7,         [r3 + 23 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 23 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 470 * 16],   m2
+
+    ; mode 32 [row 4]
+    pmaddubsw     m2,    m7,          [r3 + 9 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,          [r3 + 9 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 484 * 16],   m2
+
+    movu          m1,        m7
+    movu          m4,        m6
+
+    ; mode 29 [row 14]
+    movu          m1,    [r2 + 12]
+    palignr       m1,    m7,         2
+    pmaddubsw     m2,    m1,         [r3 + 7 * 16]
+    pmulhrsw      m2,    m3
+    movd          m4,     [r2 + 20]
+    palignr       m4,     m6,        2
+    pmaddubsw     m5,    m4,         [r3 + 7 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 446 * 16],   m2
+
+    ; mode 29 [row 15]
+    pmaddubsw     m2,    m1,         [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 447 * 16],   m2
+
+    ; mode 30 [row 9]
+    pmaddubsw     m2,    m1,         [r3 + 2 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 2 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 457 * 16],   m2
+
+    ; mode 33 [row 4]
+    movu          [r0 + 500 * 16],   m2
+
+    ; mode 30 [row 10]
+    pmaddubsw     m2,    m1,         [r3 + 15 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 15 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 458 * 16],   m2
+
+    ; mode 30 [row 11]
+    pmaddubsw     m2,    m1,          [r3 + 28 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 28 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 459 * 16],   m2
+
+    ; mode 33 [row 5]
+    movu          [r0 + 501 * 16],   m2
+
+    ; mode 31 [row 7]
+    pmaddubsw     m2,    m1,         [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 471 * 16],   m2
+
+    ; mode 31 [row 8]
+    pmaddubsw     m2,    m1,         [r3 + 25 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 25 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 472 * 16],   m2
+
+    ; mode 32 [row 6]
+    pmaddubsw     m2,    m1,         [r3 + 19 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 19 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 486 * 16],   m2
+
+    ; mode 30 [row 12]
+    movd          m7,    [r2 + 13]
+    palignr       m7,    m1,         2
+    pmaddubsw     m2,    m7,         [r3 + 9 * 16]
+    pmulhrsw      m2,    m3
+    movd          m6,    [r2 + 21]
+    palignr       m6,    m4,         2
+    pmaddubsw     m5,    m6,         [r3 + 9 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 460 * 16],   m2
+
+    ; mode 30 [row 13]
+    pmaddubsw     m2,    m7,          [r3 + 22 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,          [r3 + 22 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 461 * 16],   m2
+
+    ; mode 33 [row 6]
+    movu          [r0 + 502 * 16],   m2
+
+    ; mode 31 [row 9]
+    pmaddubsw     m2,    m7,          [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,          [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 473 * 16],   m2
+
+    ; mode 31 [row 10]
+    pmaddubsw     m2,    m7,         [r3 + 27 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 27 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 474 * 16],   m2
+
+    ; mode 32 [row 7]
+    pmaddubsw     m2,    m7,         [r3 + 8 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 8 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 487 * 16],   m2
+
+    ; mode 32 [row 8]
+    pmaddubsw     m2,    m7,         [r3 + 29 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 29 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 488 * 16],   m2
+
+
+    movu          m1,                m7
+    movu          m4,                m6
+
+    ; mode 30 [row 14]
+    movd          m1,    [r2 + 14]
+    palignr       m1,    m7,        2
+    pmaddubsw     m2,    m1,        [r3 + 3 * 16]
+    pmulhrsw      m2,    m3
+    movd          m4,    [r2 + 22]
+    palignr       m4,    m6,        2
+    pmaddubsw     m5,    m4,        [r3 + 3 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,               m5
+    movu          [r0 + 462 * 16],  m2
+
+    ; mode 30 [row 15]
+    pmaddubsw     m2,    m1,         [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 463 * 16],   m2
+
+    ; mode 33 [row 7]
+    movu          [r0 + 503 * 16],   m2
+
+    ; mode 31 [row 11]
+    pmaddubsw     m2,    m1,          [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 12 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 475 * 16],   m2
+
+    ; mode 31 [row 12]
+    pmaddubsw     m2,    m1,         [r3 + 29 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 29 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 476 * 16],   m2
+
+    ; mode 32 [row 9]
+    pmaddubsw     m2,    m1,         [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 489 * 16],   m2
+
+    ; mode 31 [row 13]
+    movd          m7,    [r2 + 15]
+    palignr       m7,    m1,         2
+    pmaddubsw     m2,    m7,         [r3 + 14 * 16]
+    pmulhrsw      m2,    m3
+    movd          m6,    [r2 + 23]
+    palignr       m6,    m4,         2
+    pmaddubsw     m5,    m6,         [r3 + 14 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 477 * 16],   m2
+
+    ; mode 31 [row 14]
+    pmaddubsw     m2,    m7,         [r3 + 31 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 31 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 478 * 16],   m2
+
+    ; mode 32 [row 10]
+    pmaddubsw     m2,    m7,         [r3 + 7 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 7 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 490 * 16],   m2
+
+    ; mode 32 [row 11]
+    pmaddubsw     m2,    m7,         [r3 + 28 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 28 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 491 * 16],   m2
+
+    ; mode 33 [row 8]
+    pmaddubsw     m2,    m7,         [r3 + 10 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 10 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 504 * 16],   m2
+
+    ; mode 31 [row 15]
+    movd          m1,    [r2 + 16]
+    palignr       m1,    m7,         2
+    pmaddubsw     m2,    m1,          [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    movd          m4,    [r2 + 24]
+    palignr       m4,    m6,         2
+    pmaddubsw     m5,    m4,          [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 479 * 16],   m2
+
+    ; mode 32 [row 12]
+    pmaddubsw     m2,    m1,          [r3 + 17 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 17 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 492 * 16],   m2
+
+    ; mode 33 [row 9]
+    pmaddubsw     m2,    m1,         [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 505 * 16],   m2
+
+    ; mode 33 [row 10]
+    pmaddubsw     m2,    m1,          [r3 + 30 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 30 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 506 * 16],   m2
+
+    ; mode 33 [row 10]
+    pmaddubsw     m2,    m1,          [r3 + 4 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,          [r3 + 4 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 505 * 16],   m2
+
+    ; mode 32 [row 13]
+    movd          m7,    [r2 + 17]
+    palignr       m7,    m1,         2
+    pmaddubsw     m2,    m7,         [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+
+    movd          m6,    [r2 + 25]
+    palignr       m6,    m4,         2
+    pmaddubsw     m5,    m6,         [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 493 * 16],   m2
+
+    ; mode 32 [row 14]
+    pmaddubsw     m2,    m7,         [r3 + 27 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 27 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 494 * 16],   m2
+
+    ; mode 33 [row 11]
+    pmaddubsw     m2,    m7,         [r3 + 24 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m6,         [r3 + 24 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 507 * 16],   m2
+
+    ; mode 32 [row 15]
+    movd          m1,    [r2 + 18]
+    palignr       m1,    m7,         2
+    pmaddubsw     m2,    m1,         [r3 + 16 * 16]
+    pmulhrsw      m2,    m3
+    psrldq        m4,    2
+    pinsrb        m4,    [r2 + 26],  14
+    pinsrb        m4,    [r2 + 27],  15
+    movd          m4,    [r2 + 26]
+    palignr       m4,    m6,         2
+    pmaddubsw     m5,    m4,         [r3 + 16 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 495 * 16],   m2
+
+    ; mode 33 [row 12]
+    pmaddubsw     m2,    m1,         [r3 + 18 * 16]
+    pmulhrsw      m2,    m3
+    pmaddubsw     m5,    m4,         [r3 + 18 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 508 * 16],   m2
+
+    ; mode 33 [row 13]
+    movd          m7,    [r2 + 19]
+    palignr       m7,    m1,         2
+    pmaddubsw     m2,    m7,         [r3 + 12 * 16]
+    pmulhrsw      m2,    m3
+    movd          m6,    [r2 + 27]
+    palignr       m6,    m4,         2
+    pmaddubsw     m5,    m6,         [r3 + 12 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 509 * 16],   m2
+
+    ; mode 33 [row 14]
+    movd          m1,    [r2 + 20]
+    palignr       m1,    m7,         2
+    pmaddubsw     m2,    m1,         [r3 + 6 * 16]
+    pmulhrsw      m2,    m3
+    movd          m4,    [r2 + 28]
+    palignr       m4,    m6,         2
+    pmaddubsw     m5,    m4,         [r3 + 6 * 16]
+    pmulhrsw      m5,    m3
+    packuswb      m2,                m5
+    movu          [r0 + 510 * 16],   m2
+
+    ; mode 34 [row 0]
+    movu          m1,                [r2 + 2]
+    movu          [r0 + 512 * 16],   m1
+    movu          m2,                [r2 + 18]
+    palignr       m3,                m2,     m1,    1
+    movu          [r0 + 513 * 16],   m3
+    palignr       m3,                m2,     m1,    2
+    movu          [r0 + 514 * 16],   m3
+    palignr       m3,                m2,     m1,    3
+    movu          [r0 + 515 * 16],   m3
+    palignr       m3,                m2,     m1,    4
+    movu          [r0 + 516 * 16],   m3
+    palignr       m3,                m2,     m1,    5
+    movu          [r0 + 517 * 16],   m3
+    palignr       m3,                m2,     m1,    6
+    movu          [r0 + 518 * 16],   m3
+    palignr       m3,                m2,     m1,    7
+    movu          [r0 + 519 * 16],   m3
+    palignr       m3,                m2,     m1,    8
+    movu          [r0 + 520 * 16],   m3
+    palignr       m3,                m2,     m1,    9
+    movu          [r0 + 521 * 16],   m3
+    palignr       m3,                m2,     m1,   10
+    movu          [r0 + 522 * 16],   m3
+    palignr       m3,                m2,     m1,   11
+    movu          [r0 + 523 * 16],   m3
+    palignr       m3,                m2,     m1,   12
+    movu          [r0 + 524 * 16],   m3
+
+    ; mode 33 [row 15]
+    movu          [r0 + 511 * 16],   m3
+
+    ; mode 34
+    palignr       m3,                m2,     m1,   13
+    movu          [r0 + 525 * 16],   m3
+    palignr       m3,                m2,     m1,   14
+    movu          [r0 + 526 * 16],   m3
+    palignr       m3,                m2,     m1,   15
+    movu          [r0 + 527 * 16],   m3
+    RET
+
+;--------------------------------------------------------------------------------
+; void all_angs_pred_32x32(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;--------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal all_angs_pred_32x32, 3,7,8, 0-4
+    mov        r6d, [r1 + 64]
+    mov        r3d, [r1]
+    mov        [rsp], r6d
+    mov        [r1 + 64], r3b
+    mov        r3d, [r2]
+    mov        r6d, [r2 + 64]
+    mov        [r2 + 64], r3b
+
+    lea        r3, [r2]
+    lea        r4, [r2 + 64]
+    lea        r2, [r1 + 64]
+
+    ;mode 2[row 0]
+    movu       m0,              [r4 + 2]
+    movu       [r0 + 0 * 16],   m0
+    movu       m1,              [r4 + 18]
+    movu       [r0 + 1 * 16],   m1
+
+    ;mode 9 [row 15]
+    movu       [r0 + 478 * 16],   m0
+    movu       [r0 + 479 * 16],   m1
+
+    ;mode 2[row 1]
+    movu       m2,              [r4 + 34]
+    palignr    m3,              m1,       m0,    1
+    movu       [r0 + 2 * 16],   m3
+    palignr    m4,              m2,       m1,    1
+    movu       [r0 + 3 * 16],   m4
+
+    ; mode 9 [row 31]
+    movu       [r0 + 510 * 16], m3
+    movu       [r0 + 511 * 16], m4
+
+    ;mode 2[row 17]
+    movu       [r0 + 34 * 16],  m4
+    movu       m5,              [r4 + 35]
+    movu       [r0 + 35 * 16],  m5
+
+    ;mode 2[row 2]
+    palignr    m3,              m1,       m0,    2
+    movu       [r0 + 4 * 16],   m3
+    palignr    m4,              m2,       m1,    2
+    movu       [r0 + 5 * 16],   m4
+
+    ;mode 2[row 18]
+    movu       [r0 + 36 * 16],  m4
+    movu       m6,              [r4 + 51]
+    palignr    m7,              m6,       m5,    1
+    movu       [r0 + 37 * 16],  m7
+
+    ;mode 2[row 3]
+    palignr    m3,              m1,       m0,    3
+    movu       [r0 + 6 * 16],   m3
+    palignr    m4,              m2,       m1,    3
+    movu       [r0 + 7 * 16],   m4
+
+    ;mode 2[row 19]
+    movu       [r0 + 38 * 16],  m4
+    palignr    m7,              m6,       m5,    2
+    movu       [r0 + 39 * 16],  m7
+
+    ;mode 2[row 4]
+    palignr    m3,              m1,       m0,    4
+    movu       [r0 + 8 * 16],   m3
+    palignr    m4,              m2,       m1,    4
+    movu       [r0 + 9 * 16],   m4
+
+    ; mode 8 [row 31]
+    movu       [r0 + 446 * 16],   m3
+    movu       [r0 + 447 * 16],   m4
+
+    ;mode 2[row 20]
+    movu       [r0 + 40 * 16],  m4
+    palignr    m7,              m6,       m5,    3
+    movu       [r0 + 41 * 16],  m7
+
+    ; mode 4 [row 31]
+    movu       [r0 + 190 * 16],  m4
+    movu       [r0 + 191 * 16],  m7
+
+    ;mode 2[row 5]
+    palignr    m3,              m1,       m0,    5
+    movu       [r0 + 10 * 16],  m3
+    palignr    m4,              m2,       m1,    5
+    movu       [r0 + 11 * 16],  m4
+
+    ;mode 2[row 21]
+    movu       [r0 + 42 * 16],  m4
+    palignr    m7,              m6,       m5,    4
+    movu       [r0 + 43 * 16],  m7
+
+    ;mode 2[row 6]
+    palignr    m3,              m1,       m0,    6
+    movu       [r0 + 12 * 16],  m3
+    palignr    m4,              m2,       m1,    6
+    movu       [r0 + 13 * 16],  m4
+
+    ;mode 2[row 22]
+    movu       [r0 + 44 * 16],  m4
+    palignr    m7,              m6,       m5,    5
+    movu       [r0 + 45 * 16],  m7
+
+    ;mode 2[row 7]
+    palignr    m3,              m1,       m0,    7
+    movu       [r0 + 14 * 16],  m3
+    palignr    m4,              m2,       m1,    7
+    movu       [r0 + 15 * 16],  m4
+
+    ;mode 2[row 23]
+    movu       [r0 + 46 * 16],  m4
+    palignr    m7,              m6,       m5,    6
+    movu       [r0 + 47 * 16],  m7
+
+    ;mode 2[row 8]
+    palignr    m3,              m1,       m0,    8
+    movu       [r0 + 16 * 16],  m3
+    palignr    m4,              m2,       m1,    8
+    movu       [r0 + 17 * 16],  m4
+
+    ;mode 7[row 31]
+    movu       [r0 + 382 * 16],  m3
+    movu       [r0 + 383 * 16],  m4
+
+    ;mode 2[row 24]
+    movu       [r0 + 48 * 16],  m4
+    palignr    m7,              m6,       m5,    7
+    movu       [r0 + 49 * 16],  m7
+
+    ;mode 2[row 9]
+    palignr    m3,              m1,       m0,    9
+    movu       [r0 + 18 * 16],  m3
+    palignr    m4,              m2,       m1,    9
+    movu       [r0 + 19 * 16],  m4
+
+    ;mode 2[row 25]
+    movu       [r0 + 50 * 16],  m4
+    palignr    m7,              m6,       m5,    8
+    movu       [r0 + 51 * 16],  m7
+
+    ; mode 3 [row 31]
+    movu       [r0 + 126 * 16],  m4
+    movu       [r0 + 127 * 16],  m7
+
+    ;mode 2[row 10]
+    palignr    m3,              m1,       m0,   10
+    movu       [r0 + 20 * 16],  m3
+    palignr    m4,              m2,       m1,   10
+    movu       [r0 + 21 * 16],  m4
+
+    ;mode 2[row 26]
+    movu       [r0 + 52 * 16],  m4
+    palignr    m7,              m6,       m5,    9
+    movu       [r0 + 53 * 16],  m7
+
+    ;mode 2[row 11]
+    palignr    m3,              m1,       m0,   11
+    movu       [r0 + 22 * 16],  m3
+    palignr    m4,              m2,       m1,   11
+    movu       [r0 + 23 * 16],  m4
+
+    ;mode 2[row 27]
+    movu       [r0 + 54 * 16],  m4
+    palignr    m7,              m6,       m5,   10
+    movu       [r0 + 55 * 16],  m7
+
+    ;mode 2[row 12]
+    palignr    m3,              m1,       m0,   12
+    movu       [r0 + 24 * 16],  m3
+    palignr    m4,              m2,       m1,   12
+    movu       [r0 + 25 * 16],  m4
+
+    ; mode 6 [row 31]
+    movu       [r0 + 318 * 16],  m3
+    movu       [r0 + 319 * 16],  m4
+
+    ; mode 3 [row 15]
+    movu       [r0 + 94 * 16],  m3
+    movu       [r0 + 95 * 16],  m4
+
+    ;mode 2[row 28]
+    movu       [r0 + 56 * 16],  m4
+    palignr    m7,              m6,       m5,   11
+    movu       [r0 + 57 * 16],  m7
+
+    ;mode 2[row 13]
+    palignr    m3,              m1,       m0,   13
+    movu       [r0 + 26 * 16],  m3
+    palignr    m4,              m2,       m1,   13
+    movu       [r0 + 27 * 16],  m4
+
+    ;mode 2[row 29]
+    movu       [r0 + 58 * 16],  m4
+    palignr    m7,              m6,       m5,   12
+    movu       [r0 + 59 * 16],  m7
+
+    ;mode 2[row 14]
+    palignr    m3,              m1,       m0,   14
+    movu       [r0 + 28 * 16],  m3
+    palignr    m4,              m2,       m1,   14
+    movu       [r0 + 29 * 16],  m4
+
+    ;mode 2[row 30]
+    movu       [r0 + 60 * 16],  m4
+    palignr    m7,              m6,       m5,   13
+    movu       [r0 + 61 * 16],  m7
+
+    ;mode 2[row 15]
+    palignr    m3,              m1,       m0,   15
+    movu       [r0 + 30 * 16],  m3
+    palignr    m4,              m2,       m1,   15
+    movu       [r0 + 31 * 16],  m4
+
+    ;mode 2[row 31]
+    movu       [r0 + 62 * 16],  m4
+    palignr    m7,              m6,       m5,   14
+    movu       [r0 + 63 * 16],  m7
+
+    ;mode 2[row 16]
+    movu       [r0 + 32 * 16],  m1
+    movu       [r0 + 33 * 16],  m2
+
+    ; mode 5[row 31]
+    movu       [r0 + 254 * 16],  m1
+    movu       [r0 + 255 * 16],  m2
+
+    ; mode 3 [row 0]
+    lea           r5,    [ang_table]
+    movu          m6,    [r5 + 26 * 16]
+    movu          m7,    [pw_1024  ]
+    movu          m1,    [r4 + 1   ]
+    punpcklbw     m1,    m0
+    pmaddubsw     m0,    m1,        m6
+    pmulhrsw      m0,    m7
+    movu          m2,    [r4 +  9]
+    movd          m3,    [r4 + 10]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m0,    m3
+    movu          [r0 + 64 * 16],   m0
+
+    ; mode 6 [row 1 - first half]
+    movu          [r0 + 258 * 16],  m0
+
+    ; mode 9 [row 12 - first half]
+    movu          [r0 + 472 * 16],  m0
+
+    movu          m0,    [r4 + 17]
+    movd          m3,    [r4 + 18]
+    palignr       m3,    m0,        1
+    punpcklbw     m0,    m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 25]
+    movd          m5,    [r4 + 26]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 65 * 16],   m3
+
+    ; mode 6 [row 1 - second half]
+    movu          [r0 + 259 * 16],  m3
+
+    ; mode 9 [row 12 - second half]
+    movu          [r0 + 473 * 16],  m3
+
+    ; mode 4 [row 0]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 128 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 129 * 16],  m3
+
+    ; mode 5 [row 0]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 192 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 193 * 16],  m3
+
+    ; mode 6 [row 0]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 256 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 257 * 16],  m3
+
+    ; mode 7 [row 0]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 320 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 321 * 16],  m3
+
+    ; mode 7 [row 1]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 322 * 16],  m3
+
+    ; mode 9 [row 8 - first half]
+    movu          [r0 + 464 * 16],  m3
+
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 323 * 16],  m3
+
+    ; mode 9 [row 8 - second half]
+    movu          [r0 + 465 * 16],  m3
+
+    ; mode 7 [row 2]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 324 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 325 * 16],  m3
+
+    ; mode 8 [row 0]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 384 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 385 * 16],  m3
+
+    ; mode 8 [row 1]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 386 * 16],  m3
+
+    ; mode 9 [row 4 - first half]
+    movu          [r0 + 456 * 16],  m3
+
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 387 * 16],  m3
+
+    ; mode 9 [row 4 - second half]
+    movu          [r0 + 457 * 16],  m3
+
+    ; mode 8 [row 2]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 388 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 389 * 16],  m3
+
+    ; mode 8 [row 3]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 390 * 16],  m3
+
+    ; mode 9 [row 9 - first half]
+    movu          [r0 + 466 * 16],  m3
+
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 391 * 16],  m3
+
+    ; mode 9 [row 9 - second half]
+    movu          [r0 + 467 * 16],  m3
+
+    ; mode 8 [row 4]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 392 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 393 * 16],  m3
+
+    ; mode 8 [row 5]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 394 * 16],  m3
+
+    ; mode 9 [row 14 - first half]
+    movu          [r0 + 476 * 16],  m3
+
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 395 * 16],  m3
+
+    ; mode 9 [row 14 - second half]
+    movu          [r0 + 477 * 16],  m3
+
+    ; mode 9 [row 0]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 448 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 449 * 16],  m3
+
+    ; mode 9 [row 1]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 450 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 451 * 16],  m3
+
+    ; mode 9 [row 2]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 452 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 453 * 16],  m3
+
+    ; mode 9 [row 3]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 454 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 455 * 16],  m3
+
+    ; mode 9 [row 5]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 458 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 459 * 16],  m3
+
+    ; mode 9 [row 6]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 460 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 461 * 16],  m3
+
+    ; mode 9 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 462 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 463 * 16],  m3
+
+    ; mode 9 [row 10]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 468 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 469 * 16],  m3
+
+    ; mode 9 [row 11]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 470 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 471 * 16],  m3
+
+    ; mode 9 [row 13]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 474 * 16],  m3
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 475 * 16],  m3
+
+    ; mode 3 [row 1]
+    movu          m6,    [r5 + 20 * 16]
+    movu          m0,    [r4 + 2]
+    movd          m1,    [r4 + 3]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 10]
+    movd          m3,    [r4 + 11]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 66 * 16],   m1
+
+    ; mode 6 [row 3 - first half]
+    movu          [r0 + 262 * 16],  m1
+
+    ; mode 9 [row 25 - first half]
+    movu          [r0 + 498 * 16],  m1
+
+    movu          m1,    [r4 + 18]
+    movd          m3,    [r4 + 19]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 26]
+    movd          m5,    [r4 + 27]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 67 * 16],   m3
+
+    ; mode 6 [row 3 - second half]
+    movu          [r0 + 263 * 16],  m3
+
+    ; mode 9 [row 25 - second half]
+    movu          [r0 + 499 * 16],  m3
+
+    ; mode 4 [row 1]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 130 * 16],  m3
+
+    ; mode 9 [row 20 - first half]
+    movu          [r0 + 488 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 131 * 16],  m3
+
+    ; mode 9 [row 20 - second half]
+    movu          [r0 + 489 * 16],  m3
+
+    ; mode 4 [row 2]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 132 * 16],  m3
+
+    ; mode 7 [row 6 - first half]
+    movu          [r0 + 332 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 133 * 16],  m3
+
+    ; mode 7 [row 6 - second half]
+    movu          [r0 + 333 * 16],  m3
+
+    ; mode 5 [row 1]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 194 * 16],  m3
+
+    ; mode 5 [row 1 - first half]
+    movu          [r0 + 480 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 195 * 16],  m3
+
+    ; mode 5 [row 1 - second half]
+    movu          [r0 + 481 * 16],  m3
+
+    ; mode 5 [row 2]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 196 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 197 * 16],  m3
+
+    ; mode 6 [row 2]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 260 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 261 * 16],  m3
+
+    ; mode 7 [row 3]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 326 * 16],  m3
+
+    ; mode 9 [row 17 - first half]
+    movu          [r0 + 482 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 327 * 16],  m3
+
+    ; mode 9 [row 17 - second half]
+    movu          [r0 + 483 * 16],  m3
+
+    ; mode 7 [row 4]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 328 * 16],  m3
+
+    ; mode 8 [row 8 - first half]
+    movu          [r0 + 400 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 329 * 16],  m3
+
+    ; mode 8 [row 8 - second half]
+    movu          [r0 + 401 * 16],  m3
+
+    ; mode 7 [row 5]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 330 * 16],  m3
+
+    ; mode 9 [row 26 - first half]
+    movu          [r0 + 500 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 331 * 16],  m3
+
+    ; mode 9 [row 26 - second half]
+    movu          [r0 + 501 * 16],  m3
+
+    ; mode 8 [row 6]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 396 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 397 * 16],  m3
+
+    ; mode 9 [row 18]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 484 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 485 * 16],  m3
+
+    ; mode 9 [row 21]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 490 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 491 * 16],  m3
+
+    ; mode 9 [row 22]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 492 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 493 * 16],  m3
+
+    ; mode 9 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 494 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 495 * 16],  m3
+
+    ; mode 9 [row 27]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 502 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 503 * 16],  m3
+
+    ; mode 9 [row 28]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 504 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 505 * 16],  m3
+
+    ; mode 9 [row 30]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 508 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 509 * 16],  m3
+
+    ; mode 8 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 398 * 16],  m3
+
+    ; mode 9 [row 19 - first half]
+    movu          [r0 + 486 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 399 * 16],  m3
+
+    ; mode 9 [row 19 - second half]
+    movu          [r0 + 487 * 16],  m3
+
+    ; mode 8 [row 9]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 402 * 16],  m3
+
+    ; mode 9 [row 24 - first half]
+    movu          [r0 + 496 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 403 * 16],  m3
+
+    ; mode 9 [row 24 - second half]
+    movu          [r0 + 497 * 16],  m3
+
+    ; mode 8 [row 10]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 404 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 405 * 16],  m3
+
+    ; mode 8 [row 11]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 406 * 16],  m3
+
+    ; mode 9 [row 29 - first half]
+    movu          [r0 + 506 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 407 * 16],  m3
+
+    ; mode 9 [row 29 - second half]
+    movu          [r0 + 507 * 16],  m3
+
+    ; mode 3 [row 2]
+    movu          m6,    [r5 + 14 * 16]
+    movu          m0,    [r4 + 3]
+    movd          m1,    [r4 + 4]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 11]
+    movd          m3,    [r4 + 12]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 68 * 16],   m1
+
+    ; mode 3 [row 2 - first half]
+    movu          [r0 + 266 * 16],  m1
+
+    movu          m1,    [r4 + 19]
+    movd          m3,    [r4 + 20]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 27]
+    movd          m5,    [r4 + 28]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 69 * 16],   m3
+
+    ; mode 3 [row 2 - second half]
+    movu          [r0 + 267 * 16],  m3
+
+    ; mode 4 [row 3]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 134 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 135 * 16],  m3
+
+    ; mode 5 [row 3]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 198 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 199 * 16],  m3
+
+    ; mode 5 [row 4]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 200 * 16],  m3
+
+    ; mode 8 [row 16 - first half]
+    movu          [r0 + 416 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 201 * 16],  m3
+
+    ; mode 8 [row 16 - second half]
+    movu          [r0 + 417 * 16],  m3
+
+    ; mode 6 [row 4]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 264 * 16],  m3
+
+    ; mode 6 [row 4 - first half]
+    movu          [r0 + 408 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 265 * 16],  m3
+
+    ; mode 6 [row 4 - second half]
+    movu          [r0 + 409 * 16],  m3
+
+    ; mode 6 [row 6]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 268 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 269 * 16],  m3
+
+    ; mode 7 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 334 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 335 * 16],  m3
+
+    ; mode 7 [row 8]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 336 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 337 * 16],  m3
+
+    ; mode 7 [row 9]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 338 * 16],  m3
+
+    ; mode 8 [row 17 - first half]
+    movu          [r0 + 418 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 339 * 16],  m3
+
+    ; mode 8 [row 17 - second half]
+    movu          [r0 + 419 * 16],  m3
+
+    ; mode 8 [row 13]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 410 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 411 * 16],  m3
+
+    ; mode 8 [row 14]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 412 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 413 * 16],  m3
+
+    ; mode 8 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 414 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 415 * 16],  m3
+
+    ; mode 8 [row 18]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 420 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 421 * 16],  m3
+
+    ; mode 3 [row 3]
+    movu          m6,    [r5 + 8 * 16]
+    movu          m0,    [r4 + 4]
+    movd          m1,    [r4 + 5]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 12]
+    movd          m3,    [r4 + 13]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 70 * 16],   m1
+
+    ; mode 6 [row 7 - first half]
+    movu          [r0 + 270 * 16],  m1
+
+    movu          m1,    [r4 + 20]
+    movd          m3,    [r4 + 21]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 28]
+    movd          m5,    [r4 + 29]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 71 * 16],   m3
+
+    ; mode 6 [row 7 - second half]
+    movu          [r0 + 271 * 16],  m3
+
+    ; mode 4 [row 4]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 136 * 16],  m3
+
+    ; mode 4 [row 4 - first half]
+    movu          [r0 + 424 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 137 * 16],  m3
+
+    ; mode 4 [row 4 - second half]
+    movu          [r0 + 425 * 16],  m3
+
+    ; mode 4 [row 5]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 138 * 16],  m3
+
+    ; mode 7 [row 13 - first half]
+    movu          [r0 + 346 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 139 * 16],  m3
+
+    ; mode 7 [row 13 - second half]
+    movu          [r0 + 347 * 16],  m3
+
+    ; mode 5 [row 5]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 202 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 203 * 16],  m3
+
+    ; mode 5 [row 6]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 204 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 205 * 16],  m3
+
+    ; mode 6 [row 8]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 272 * 16],  m3
+
+    ; mode 7 [row 12 - first half]
+    movu          [r0 + 344 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 273 * 16],  m3
+
+    ; mode 7 [row 12 - second half]
+    movu          [r0 + 345 * 16],  m3
+
+    ; mode 7 [row 10]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 340 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 341 * 16],  m3
+
+    ; mode 7 [row 11]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 342 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 343 * 16],  m3
+
+    ; mode 8 [row 19]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 422 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 423 * 16],  m3
+
+    ; mode 8 [row 21]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 426 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 427 * 16],  m3
+
+    ; mode 8 [row 22]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 428 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 429 * 16],  m3
+
+    ; mode 8 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 430 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 431 * 16],  m3
+
+    ; mode 8 [row 24]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 432 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 433 * 16],  m3
+
+    ; mode 3 [row 4]
+    movu          m6,    [r5 + 2 * 16]
+    movu          m0,    [r4 + 5]
+    movd          m1,    [r4 + 6]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 13]
+    movd          m3,    [r4 + 14]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 72 * 16],   m1
+
+    ; mode 3 [row 4 - first half]
+    movu          [r0 + 274 * 16],  m1
+
+    ; mode 8 [row 25 - first half]
+    movu          [r0 + 434 * 16],  m1
+
+    movu          m1,    [r4 + 21]
+    movd          m3,    [r4 + 22]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 29]
+    movd          m5,    [r4 + 30]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 73 * 16],   m3
+
+    ; mode 3 [row 4 - second half]
+    movu          [r0 + 275 * 16],  m3
+
+    ; mode 8 [row 25 - second half]
+    movu          [r0 + 435 * 16],  m3
+
+    ; mode 3 [row 5]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 74 * 16],   m3
+
+    ; mode 3 [row 5 - first half]
+    movu          [r0 + 278 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 75 * 16],   m3
+
+    ; mode 3 [row 5 - second half]
+    movu          [r0 + 279 * 16],  m3
+
+    ; mode 4 [row 6]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 140 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 141 * 16],  m3
+
+    ; mode 5 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 206 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 207 * 16],  m3
+
+    ; mode 5 [row 8]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 208 * 16],  m3
+
+    ; mode 7 [row 16 - first half]
+    movu          [r0 + 352 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 209 * 16],  m3
+
+    ; mode 7 [row 16 - second half]
+    movu          [r0 + 353 * 16],  m3
+
+    ; mode 6 [row 10]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 276 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 277 * 16],  m3
+
+    ; mode 7 [row 14]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 348 * 16],  m3
+
+    ; mode 8 [row 26 - first half]
+    movu          [r0 + 436 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 349 * 16],  m3
+
+    ; mode 8 [row 26 - second half]
+    movu          [r0 + 437 * 16],  m3
+
+    ; mode 7 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 350 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 351 * 16],  m3
+
+    ; mode 8 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 438 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 439 * 16],  m3
+
+    ; mode 8 [row 28]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 440 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 441 * 16],  m3
+
+    ; mode 8 [row 29]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 442 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 443 * 16],  m3
+
+    ; mode 8 [row 30]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 444 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 445 * 16],  m3
+
+    ; mode 3 [row 6]
+    movu          m6,    [r5 + 22 * 16]
+    movu          m0,    [r4 + 6]
+    movd          m1,    [r4 + 7]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 14]
+    movd          m3,    [r4 + 15]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 76 * 16],   m1
+
+    ; mode 6 [row 13 - first half]
+    movu          [r0 + 282 * 16],  m1
+
+    movu          m1,    [r4 + 22]
+    movd          m3,    [r4 + 23]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 30]
+    movd          m5,    [r4 + 31]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 77 * 16],   m3
+
+    ; mode 6 [row 13 - second half]
+    movu          [r0 + 283 * 16],  m3
+
+    ; mode 4 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 142 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 143 * 16],  m3
+
+    ; mode 4 [row 8]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 144 * 16],  m3
+
+    ; mode 4 [row 8 - first half]
+    movu          [r0 + 360 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 145 * 16],  m3
+
+    ; mode 4 [row 8 - second half]
+    movu          [r0 + 361 * 16],  m3
+
+    ; mode 5 [row 9]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 210 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 211 * 16],  m3
+
+    ; mode 5 [row 10]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 212 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 213 * 16],  m3
+
+    ; mode 7 [row 17]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 354 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 355 * 16],  m3
+
+    ; mode 7 [row 18]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 356 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 357 * 16],  m3
+
+    ; mode 7 [row 19]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 358 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 359 * 16],  m3
+
+    ; mode 6 [row 12]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 280 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 281 * 16],  m3
+
+    ; mode 3 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    movu          m0,    [r4 + 7]
+    movd          m1,    [r4 + 8]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 15]
+    movd          m3,    [r4 + 16]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 78 * 16],   m1
+
+    ; mode 6 [row 15 - first half]
+    movu          [r0 + 286 * 16],  m1
+
+    movu          m1,    [r4 + 23]
+    movd          m3,    [r4 + 24]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 31]
+    movd          m5,    [r4 + 32]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 79 * 16],   m3
+
+    ; mode 6 [row 15 - second half]
+    movu          [r0 + 287 * 16],  m3
+
+    ; mode 4 [row 9]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 146 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 147 * 16],  m3
+
+    ; mode 5 [row 11]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 214 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 215 * 16],  m3
+
+    ; mode 5 [row 12]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 216 * 16],  m3
+
+    ; mode 6 [row 16 - first half]
+    movu          [r0 + 288 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 217 * 16],  m3
+
+    ; mode 6 [row 16 - second half]
+    movu          [r0 + 289 * 16],  m3
+
+    ; mode 6 [row 14]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 284 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 285 * 16],  m3
+
+    ; mode 7 [row 21]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 362 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 363 * 16],  m3
+
+    ; mode 7 [row 22]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 364 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 365 * 16],  m3
+
+    ; mode 7 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 366 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 367 * 16],  m3
+
+    ; mode 3 [row 8]
+    movu          m6,    [r5 + 10 * 16]
+    movu          m0,    [r4 + 8]
+    movd          m1,    [r4 + 9]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 16]
+    movd          m3,    [r4 + 17]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 80 * 16],   m1
+
+    ; mode 7 [row 25 - first half]
+    movu          [r0 + 290 * 16],  m1
+
+    ; mode 6 [row 17 - first half]
+    movu          [r0 + 370 * 16],  m1
+
+    movu          m1,    [r4 + 24]
+    movd          m3,    [r4 + 25]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 32]
+    movd          m5,    [r4 + 33]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 81 * 16],   m3
+
+    ; mode 7 [row 25 - second half]
+    movu          [r0 + 291 * 16],  m3
+
+    ; mode 6 [row 17 - second half]
+    movu          [r0 + 371 * 16],  m3
+
+    ; mode 4 [row 10]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 148 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 149 * 16],  m3
+
+    ; mode 4 [row 11]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 150 * 16],  m3
+
+    ; mode 7 [row 27 - first half]
+    movu          [r0 + 374 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 151 * 16],  m3
+
+    ; mode 7 [row 27 - second half]
+    movu          [r0 + 375 * 16],  m3
+
+    ; mode 5 [row 13]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 218 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 219 * 16],  m3
+
+    ; mode 5 [row 14]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 220 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 221 * 16],  m3
+
+    ; mode 6 [row 18]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 292 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 293 * 16],  m3
+
+    ; mode 7 [row 24]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 368 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 369 * 16],  m3
+
+    ; mode 7 [row 26]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 372 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 373 * 16],  m3
+
+    ; mode 3 [row 9]
+    movu          m6,    [r5 + 4 * 16]
+    movu          m0,    [r4 + 9]
+    movd          m1,    [r4 + 10]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 17]
+    movd          m3,    [r4 + 18]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 82 * 16],   m1
+
+    ; mode 6 [row 19 - first half]
+    movu          [r0 + 294 * 16],  m1
+
+    movu          m1,    [r4 + 25]
+    movd          m3,    [r4 + 26]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 33]
+    movd          m5,    [r4 + 34]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 83 * 16],   m3
+
+    ; mode 6 [row 19 - second half]
+    movu          [r0 + 295 * 16],  m3
+
+    ; mode 4 [row 12]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 152 * 16],  m3
+
+    ; mode 4 [row 12 - first half]
+    movu          [r0 + 296 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 153 * 16],  m3
+
+    ; mode 4 [row 12 - second half]
+    movu          [r0 + 297 * 16],  m3
+
+    ; mode 3 [row 10]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 84 * 16],   m3
+
+    ; mode 6 [row 21 - first half]
+    movu          [r0 + 298 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 85 * 16],   m3
+
+    ; mode 6 [row 21 - second half]
+    movu          [r0 + 299 * 16],  m3
+
+    ; mode 5 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 222 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 223 * 16],  m3
+
+    ; mode 7 [row 28]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 376 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 377 * 16],  m3
+
+    ; mode 7 [row 29]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 378 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 379 * 16],  m3
+
+    ; mode 7 [row 30]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 380 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 381 * 16],  m3
+
+    ; mode 3 [row 11]
+    movu          m6,    [r5 + 24 * 16]
+    movu          m0,    [r4 + 10]
+    movd          m1,    [r4 + 11]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 18]
+    movd          m3,    [r4 + 19]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 86 * 16],   m1
+
+    ; mode 6 [row 23 - first half]
+    movu          [r0 + 302 * 16],  m1
+
+    movu          m1,    [r4 + 26]
+    movd          m3,    [r4 + 27]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 34]
+    movd          m5,    [r4 + 35]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 87 * 16],   m3
+
+    ; mode 6 [row 23 - second half]
+    movu          [r0 + 303 * 16],  m3
+
+    ; mode 4 [row 13]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 154 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 155 * 16],  m3
+
+    ; mode 4 [row 14]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 156 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 157 * 16],  m3
+
+    ; mode 5 [row 16]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 224 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 225 * 16],  m3
+
+    ; mode 5 [row 17]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 226 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 227 * 16],  m3
+
+    ; mode 6 [row 22]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 300 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 301 * 16],  m3
+
+    ; mode 3 [row 12]
+    movu          m6,    [r5 + 18 * 16]
+    movu          m0,    [r4 + 11]
+    movd          m1,    [r4 + 12]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 19]
+    movd          m3,    [r4 + 20]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 88 * 16],   m1
+
+    ; mode 6 [row 25 - first half]
+    movu          [r0 + 306 * 16],  m1
+
+    movu          m1,    [r4 + 27]
+    movd          m3,    [r4 + 28]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 35]
+    movd          m5,    [r4 + 36]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 89 * 16],   m3
+
+    ; mode 6 [row 25 - second half]
+    movu          [r0 + 307 * 16],  m3
+
+    ; mode 4 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 158 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 159 * 16],   m3
+
+    ; mode 5 [row 18]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 228 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 229 * 16],  m3
+
+    ; mode 5 [row 19]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 230 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 231 * 16],  m3
+
+    ; mode 6 [row 24]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 304 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 305 * 16],  m3
+
+    ; mode 6 [row 26]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 308 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 309 * 16],  m3
+
+    ; mode 3 [row 13]
+    movu          m6,    [r5 + 12 * 16]
+    movu          m0,    [r4 + 12]
+    movd          m1,    [r4 + 13]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 20]
+    movd          m3,    [r4 + 21]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 90 * 16],   m1
+
+    movu          m1,    [r4 + 28]
+    movd          m3,    [r4 + 29]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 36]
+    movd          m5,    [r4 + 37]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 91 * 16],   m3
+
+    ; mode 4 [row 16]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 160 * 16],  m3
+
+    ; mode 5 [row 20 - first half]
+    movu          [r0 + 232 * 16],  m3
+
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 161 * 16],  m3
+
+    ; mode 5 [row 20 - second half]
+    movu          [r0 + 233 * 16],  m3
+
+    ; mode 4 [row 17]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 162 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 163 * 16],  m3
+
+    ; mode 5 [row 21]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 234 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 235 * 16],  m3
+
+    ; mode 6 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 310 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 311 * 16],  m3
+
+    ; mode 6 [row 28]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 312 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 313 * 16],  m3
+
+    ; mode 3 [row 14]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r4 + 13]
+    movd          m1,    [r4 + 14]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 21]
+    movd          m3,    [r4 + 22]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 92 * 16],   m1
+
+    ; mode 6 [row 29 - first half]
+    movu          [r0 + 314 * 16],  m1
+
+    movu          m1,    [r4 + 29]
+    movd          m3,    [r4 + 30]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 37]
+    movd          m5,    [r4 + 38]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 93 * 16],   m3
+
+    ; mode 6 [row 29 - second half]
+    movu          [r0 + 315 * 16],  m3
+
+    ; mode 4 [row 18]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 164 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 165 * 16],  m3
+
+    ; mode 5 [row 22]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 236 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 237 * 16],  m3
+
+    ; mode 5 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 238 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 239 * 16],  m3
+
+    ; mode 6 [row 30]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 316 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 317 * 16],  m3
+
+    ; mode 3 [row 16]
+    movu          m6,    [r5 + 26 * 16]
+    movu          m0,    [r4 + 14]
+    movd          m1,    [r4 + 15]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 22]
+    movd          m3,    [r4 + 23]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 96 * 16],   m1
+
+    ; mode 5 [row 25 - first half]
+    movu          [r0 + 242 * 16],  m1
+
+    movu          m1,    [r4 + 30]
+    movd          m3,    [r4 + 31]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 38]
+    movd          m5,    [r4 + 39]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 97 * 16],   m3
+
+    ; mode 5 [row 25 - second half]
+    movu          [r0 + 243 * 16],  m3
+
+    ; mode 4 [row 19]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 166 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 167 * 16],  m3
+
+    ; mode 4 [row 20]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 168 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 169 * 16],  m3
+
+    ; mode 5 [row 24]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 240 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 241 * 16],  m3
+
+    ; mode 3 [row 17]
+    movu          m6,    [r5 + 20 * 16]
+    movu          m0,    [r4 + 15]
+    movd          m1,    [r4 + 16]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 23]
+    movd          m3,    [r4 + 24]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 98 * 16],   m1
+
+    movu          m1,    [r4 + 31]
+    movd          m3,    [r4 + 32]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 39]
+    movd          m5,    [r4 + 40]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 99 * 16],   m3
+
+    ; mode 4 [row 21]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 170 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 171 * 16],  m3
+
+    ; mode 5 [row 26]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 244 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 245 * 16],  m3
+
+    ; mode 5 [row 27]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 246 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 247 * 16],  m3
+
+    ; mode 3 [row 18]
+    movu          m6,    [r5 + 14 * 16]
+    movu          m0,    [r4 + 16]
+    movd          m1,    [r4 + 17]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 24]
+    movd          m3,    [r4 + 25]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 100 * 16],  m1
+
+    movu          m1,    [r4 + 32]
+    movd          m3,    [r4 + 33]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 40]
+    movd          m5,    [r4 + 41]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 101 * 16],  m3
+
+    ; mode 4 [row 22]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 172 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 173 * 16],  m3
+
+    ; mode 4 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 174 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 175 * 16],  m3
+
+    ; mode 5 [row 28]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 248 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 249 * 16],  m3
+
+    ; mode 5 [row 29]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 250 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 251 * 16],  m3
+
+    ; mode 3 [row 19]
+    movu          m6,    [r5 + 8 * 16]
+    movu          m0,    [r4 + 17]
+    movd          m1,    [r4 + 18]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 25]
+    movd          m3,    [r4 + 26]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 102 * 16],  m1
+
+    movu          m1,    [r4 + 33]
+    movd          m3,    [r4 + 34]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 41]
+    movd          m5,    [r4 + 42]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 103 * 16],  m3
+
+    ; mode 4 [row 24]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 176 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 177 * 16],  m3
+
+    ; mode 5 [row 30]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 252 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 253 * 16],  m3
+
+    ; mode 3 [row 20]
+    movu          m6,    [r5 + 2 * 16]
+    movu          m0,    [r4 + 18]
+    movd          m1,    [r4 + 19]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 26]
+    movd          m3,    [r4 + 27]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 104 * 16],  m1
+
+    movu          m1,    [r4 + 34]
+    movd          m3,    [r4 + 35]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 42]
+    movd          m5,    [r4 + 43]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 105 * 16],  m3
+
+    ; mode 4 [row 25]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 178 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 179 * 16],   m3
+
+    ; mode 4 [row 26]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 180 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 181 * 16],  m3
+
+    ; mode 3 [row 21]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 106 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 107 * 16],  m3
+
+    ; mode 3 [row 22]
+    movu          m6,    [r5 + 22 * 16]
+    movu          m0,    [r4 + 19]
+    movd          m1,    [r4 + 20]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 27]
+    movd          m3,    [r4 + 28]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 108 * 16],  m1
+
+    movu          m1,    [r4 + 35]
+    movd          m3,    [r4 + 36]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 43]
+    movd          m5,    [r4 + 44]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 109 * 16],  m3
+
+    ; mode 4 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 182 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 183 * 16],  m3
+
+    ; mode 3 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    movu          m0,    [r4 + 20]
+    movd          m1,    [r4 + 21]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 28]
+    movd          m3,    [r4 + 29]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 110 * 16],  m1
+
+    movu          m1,    [r4 + 36]
+    movd          m3,    [r4 + 37]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 44]
+    movd          m5,    [r4 + 45]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 111 * 16],  m3
+
+    ; mode 4 [row 28]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 184 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 185 * 16],  m3
+
+    ; mode 4 [row 29]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 186 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 187 * 16],  m3
+
+    ; mode 3 [row 24]
+    movu          m6,    [r5 + 10 * 16]
+    movu          m0,    [r4 + 21]
+    movd          m1,    [r4 + 22]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 29]
+    movd          m3,    [r4 + 30]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 112 * 16],  m1
+
+    movu          m1,    [r4 + 37]
+    movd          m3,    [r4 + 38]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 45]
+    movd          m5,    [r4 + 46]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 113 * 16],  m3
+
+    ; mode 4 [row 30]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 188 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 189 * 16],  m3
+
+    ; mode 3 [row 25]
+    movu          m6,    [r5 + 4 * 16]
+    movu          m0,    [r4 + 22]
+    movd          m1,    [r4 + 23]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 30]
+    movd          m3,    [r4 + 31]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 114 * 16],  m1
+
+    movu          m1,    [r4 + 38]
+    movd          m3,    [r4 + 39]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 46]
+    movd          m5,    [r4 + 47]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 115 * 16],  m3
+
+    ; mode 3 [row 26]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 116 * 16],  m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 117 * 16],  m3
+
+    ; mode 3 [row 27]
+    movu          m6,    [r5 + 24 * 16]
+    movu          m0,    [r4 + 23]
+    movd          m1,    [r4 + 24]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 31]
+    movd          m3,    [r4 + 32]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 118 * 16],  m1
+
+    movu          m1,    [r4 + 39]
+    movd          m3,    [r4 + 40]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 47]
+    movd          m5,    [r4 + 48]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 119 * 16],  m3
+
+    ; mode 3 [row 28]
+    movu          m6,    [r5 + 18 * 16]
+    movu          m0,    [r4 + 24]
+    movd          m1,    [r4 + 25]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 32]
+    movd          m3,    [r4 + 33]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 120 * 16],  m1
+
+    movu          m1,    [r4 + 40]
+    movd          m3,    [r4 + 41]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 48]
+    movd          m5,    [r4 + 49]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 121 * 16],  m3
+
+    ; mode 3 [row 29]
+    movu          m6,    [r5 + 12 * 16]
+    movu          m0,    [r4 + 25]
+    movd          m1,    [r4 + 26]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 33]
+    movd          m3,    [r4 + 34]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 122 * 16],  m1
+
+    movu          m1,    [r4 + 41]
+    movd          m3,    [r4 + 42]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 49]
+    movd          m5,    [r4 + 50]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 123 * 16],  m3
+
+    ; mode 3 [row 30]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r4 + 26]
+    movd          m1,    [r4 + 27]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 34]
+    movd          m3,    [r4 + 35]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 124 * 16],  m1
+
+    movu          m1,    [r4 + 42]
+    movd          m3,    [r4 + 43]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 50]
+    movd          m5,    [r4 + 51]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 125 * 16],  m3
+
+    ; mode 10
+    movu                      m1,  [r2 +  1]
+    movu                      m2,  [r2 + 17]
+    movu         [r0 + 512 * 16],  m1
+    movu         [r0 + 513 * 16],  m2
+    movu         [r0 + 514 * 16],  m1
+    movu         [r0 + 515 * 16],  m2
+    movu         [r0 + 516 * 16],  m1
+    movu         [r0 + 517 * 16],  m2
+    movu         [r0 + 518 * 16],  m1
+    movu         [r0 + 519 * 16],  m2
+    movu         [r0 + 520 * 16],  m1
+    movu         [r0 + 521 * 16],  m2
+    movu         [r0 + 522 * 16],  m1
+    movu         [r0 + 523 * 16],  m2
+    movu         [r0 + 524 * 16],  m1
+    movu         [r0 + 525 * 16],  m2
+    movu         [r0 + 526 * 16],  m1
+    movu         [r0 + 527 * 16],  m2
+
+    movu         [r0 + 528 * 16],  m1
+    movu         [r0 + 529 * 16],  m2
+    movu         [r0 + 530 * 16],  m1
+    movu         [r0 + 531 * 16],  m2
+    movu         [r0 + 532 * 16],  m1
+    movu         [r0 + 533 * 16],  m2
+    movu         [r0 + 534 * 16],  m1
+    movu         [r0 + 535 * 16],  m2
+    movu         [r0 + 536 * 16],  m1
+    movu         [r0 + 537 * 16],  m2
+    movu         [r0 + 538 * 16],  m1
+    movu         [r0 + 539 * 16],  m2
+    movu         [r0 + 540 * 16],  m1
+    movu         [r0 + 541 * 16],  m2
+    movu         [r0 + 542 * 16],  m1
+    movu         [r0 + 543 * 16],  m2
+
+    movu         [r0 + 544 * 16],  m1
+    movu         [r0 + 545 * 16],  m2
+    movu         [r0 + 546 * 16],  m1
+    movu         [r0 + 547 * 16],  m2
+    movu         [r0 + 548 * 16],  m1
+    movu         [r0 + 549 * 16],  m2
+    movu         [r0 + 550 * 16],  m1
+    movu         [r0 + 551 * 16],  m2
+    movu         [r0 + 552 * 16],  m1
+    movu         [r0 + 553 * 16],  m2
+    movu         [r0 + 554 * 16],  m1
+    movu         [r0 + 555 * 16],  m2
+    movu         [r0 + 556 * 16],  m1
+    movu         [r0 + 557 * 16],  m2
+    movu         [r0 + 558 * 16],  m1
+    movu         [r0 + 559 * 16],  m2
+
+    movu         [r0 + 560 * 16],  m1
+    movu         [r0 + 561 * 16],  m2
+    movu         [r0 + 562 * 16],  m1
+    movu         [r0 + 563 * 16],  m2
+    movu         [r0 + 564 * 16],  m1
+    movu         [r0 + 565 * 16],  m2
+    movu         [r0 + 566 * 16],  m1
+    movu         [r0 + 567 * 16],  m2
+    movu         [r0 + 568 * 16],  m1
+    movu         [r0 + 569 * 16],  m2
+    movu         [r0 + 570 * 16],  m1
+    movu         [r0 + 571 * 16],  m2
+    movu         [r0 + 572 * 16],  m1
+    movu         [r0 + 573 * 16],  m2
+    movu         [r0 + 574 * 16],  m1
+    movu         [r0 + 575 * 16],  m2
+
+    ; mode 11 [row 0]
+    movu          m0,    [r4]
+
+    ; mode 11 [row 15 - first half]
+    movu          [r0 + 606 * 16],  m0
+
+    movu          [r0 + 606 * 16],  m0
+
+    ; mode 12 [row 31]
+    pslldq        m6,    m0,          4
+    pinsrb        m6,    [r3 + 26],   0
+    pinsrb        m6,    [r3 + 19],   1
+    pinsrb        m6,    [r3 + 13],   2
+    pinsrb        m6,    [r3 +  6],   3
+    movu          [r0 + 702 * 16],    m6
+    movu          m6,                 [r4 + 12]
+    movu          [r0 + 703 * 16],    m6
+
+    ; mode 11 [row 31]
+    pslldq        m6,               m0,           1
+    pinsrb        m6,               [r3 + 16],    0
+    movu          [r0 + 638 * 16],  m6
+    movu          m6,               [r4 + 15]
+    movu          [r0 + 639 * 16],  m6
+
+    movd          m1,               [r4 + 1]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,        [r5 + 30 * 16]
+    pmulhrsw      m1,    m7
+    movu          m2,    [r4 + 8]
+    movd          m3,    [r4 + 9]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m1,               m3
+    movu          [r0 + 576 * 16],  m1
+
+    movu          m1,    [r4 + 16]
+
+    ; mode 11 [row 15 - second half]
+    movu          [r0 + 607 * 16],  m1
+
+    movd          m3,    [r4 + 17]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    movu          m4,    [r4 + 24]
+    movd          m5,    [r4 + 25]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 577 * 16],  m3
+
+    ; mode 11 [row 1]
+    pmaddubsw     m3,    m0,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 578 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 579 * 16],  m3
+
+    ; mode 11 [row 2]
+    pmaddubsw     m3,    m0,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 580 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 581 * 16],  m3
+
+    ; mode 11 [row 3]
+    pmaddubsw     m3,    m0,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 582 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 583 * 16],  m3
+
+    ; mode 11 [row 4]
+    pmaddubsw     m3,    m0,        [r5 + 22 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 22 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 584 * 16],  m3
+
+    ; mode 12 [row 1 - first half]
+    movu          [r0 + 642 * 16],  m3
+
+    pmaddubsw     m3,    m1,        [r5 + 22 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 22 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 585 * 16],  m3
+
+    ; mode 12 [row 1 - second half]
+    movu          [r0 + 643 * 16],  m3
+
+    ; mode 11 [row 5]
+    pmaddubsw     m3,    m0,        [r5 + 20 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 20 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 586 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 20 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 20 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 587 * 16],  m3
+
+    ; mode 11 [row 6]
+    pmaddubsw     m3,    m0,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 588 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 589 * 16],  m3
+
+    ; mode 11 [row 7]
+    pmaddubsw     m3,    m0,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 590 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 591 * 16],  m3
+
+    ; mode 11 [row 8]
+    pmaddubsw     m3,    m0,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 592 * 16],  m3
+
+    ; mode 13 [row 1 - first half]
+    movu          [r0 + 706 * 16],  m3
+
+    pmaddubsw     m3,    m1,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 593 * 16],  m3
+
+    ; mode 13 [row 1 - second half]
+    movu          [r0 + 707 * 16],  m3
+
+    ; mode 11 [row 9]
+    pmaddubsw     m3,    m0,        [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 594 * 16],  m3
+
+    ; mode 12 [row 3 - first half]
+    movu          [r0 + 646 * 16],  m3
+
+    pmaddubsw     m3,    m1,        [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 595 * 16],  m3
+
+    ; mode 12 [row 3 - second half]
+    movu          [r0 + 647 * 16],  m3
+
+    ; mode 11 [row 10]
+    pmaddubsw     m3,    m0,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 596 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 597 * 16],  m3
+
+    ; mode 11 [row 11]
+    pmaddubsw     m3,    m0,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 598 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 599 * 16],  m3
+
+    ; mode 11 [row 12]
+    pmaddubsw     m3,    m0,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 600 * 16],  m3
+
+    ; mode 14 [row 1 - first half]
+    movu          [r0 + 770 * 16],  m3
+
+    pmaddubsw     m3,    m1,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 601 * 16],  m3
+
+    ; mode 14 [row 1 - second half]
+    movu          [r0 + 771 * 16],  m3
+
+    ; mode 11 [row 13]
+    pmaddubsw     m3,    m0,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 602 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 603 * 16],  m3
+
+    ; mode 11 [row 14]
+    pmaddubsw     m3,    m0,        [r5 + 2 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 2 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 604 * 16],  m3
+
+    ; mode 13 [row 5 - first half]
+    movu          [r0 + 650 * 16],  m3
+
+    pmaddubsw     m3,    m1,        [r5 + 2 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 2 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 605 * 16],  m3
+
+    ; mode 13 [row 5 - second half]
+    movu          [r0 + 651 * 16],  m3
+
+    ; mode 12 [row 0]
+    pmaddubsw     m3,    m0,        [r5 + 27 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 27 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 640 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 27 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 27 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 641 * 16],  m3
+
+    ; mode 12 [row 2]
+    pmaddubsw     m3,    m0,        [r5 + 17 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 17 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 644 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 17 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 17 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 645 * 16],  m3
+
+    ; mode 12 [row 4]
+    pmaddubsw     m3,    m0,        [r5 + 7 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 7 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 648 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 7 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 7 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 649 * 16],  m3
+
+    ; mode 13 [row 0]
+    pmaddubsw     m3,    m0,        [r5 + 23 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 23 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 704 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 23 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 23 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 705 * 16],  m3
+
+    ; mode 13 [row 2]
+    pmaddubsw     m3,    m0,        [r5 + 5 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 5 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 708 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 5 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 5 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 709 * 16],  m3
+
+    ; mode 14 [row 0]
+    pmaddubsw     m3,    m0,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 768 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 769 * 16],  m3
+
+    ; mode 15 [row 0]
+    pmaddubsw     m3,    m0,        [r5 + 15 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 15 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 832 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 15 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 15 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 833 * 16],  m3
+
+    ; mode 11 [row 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  0],   1
+    pinsrb        m0,    [r3 + 16],   0
+    pmaddubsw     m3,    m0,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 8],   1
+    pinsrb        m2,    [r4 + 7],   0
+    pmaddubsw     m5,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 608 * 16],  m3
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 16],   1
+    pinsrb        m1,    [r4 + 15],   0
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrb        m4,    [r4 + 24],   1
+    pinsrb        m4,    [r4 + 23],   0
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 609 * 16],  m3
+
+    ; mode 11 [row 17]
+    pmaddubsw     m3,    m0,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 610 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 611 * 16],  m3
+
+    ; mode 11 [row 18]
+    pmaddubsw     m3,    m0,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 612 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 613 * 16],  m3
+
+    ; mode 11 [row 19]
+    pmaddubsw     m3,    m0,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 614 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 615 * 16],  m3
+
+    ; mode 11 [row 20]
+    pmaddubsw     m3,    m0,        [r5 + 22 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 22 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 616 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 22 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 22 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 617 * 16],  m3
+
+    ; mode 11 [row 21]
+    pmaddubsw     m3,    m0,        [r5 + 20 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 20 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 618 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 20 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 20 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 619 * 16],  m3
+
+    ; mode 11 [row 22]
+    pmaddubsw     m3,    m0,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 620 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 621 * 16],  m3
+
+    ; mode 11 [row 23]
+    pmaddubsw     m3,    m0,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 622 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 623 * 16],  m3
+
+    ; mode 11 [row 24]
+    pmaddubsw     m3,    m0,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 624 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 625 * 16],  m3
+
+    ; mode 11 [row 25]
+    pmaddubsw     m3,    m0,        [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 626 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 627 * 16],  m3
+
+    ; mode 11 [row 26]
+    pmaddubsw     m3,    m0,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 628 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 629 * 16],  m3
+
+    ; mode 11 [row 27]
+    pmaddubsw     m3,    m0,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 630 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 631 * 16],  m3
+
+    ; mode 11 [row 28]
+    pmaddubsw     m3,    m0,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 632 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 633 * 16],  m3
+
+    ; mode 11 [row 29]
+    pmaddubsw     m3,    m0,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 634 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 635 * 16],  m3
+
+    ; mode 11 [row 30]
+    pmaddubsw     m3,    m0,        [r5 + 2 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 2 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 636 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 2 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 2 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,               m5
+    movu          [r0 + 637 * 16],  m3
+
+    ; mode 12 [row 6]
+    pinsrb        m0,    [r3 + 6],  0
+    pmaddubsw     m3,    m0,        [r5 + 29 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 29 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 652 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 29 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 29 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 653 * 16],  m3
+
+    ; mode 12 [row 7]
+    pmaddubsw     m3,    m0,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 654 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 655 * 16],  m3
+
+    ; mode 12 [row 8]
+    pmaddubsw     m3,    m0,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 656 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 657 * 16],  m3
+
+    ; mode 12 [row 9]
+    pmaddubsw     m3,    m0,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 658 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 14 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 659 * 16],  m3
+
+    ; mode 12 [row 10]
+    pmaddubsw     m3,    m0,        [r5 + 9 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 9 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 660 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 9 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 9 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 661 * 16],  m3
+
+    ; mode 12 [row 11]
+    pmaddubsw     m3,    m0,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 662 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 663 * 16],  m3
+
+    ; mode 13 [row 3]
+    movu          m6,    m0
+    pinsrb        m6,    [r3 + 4],  0
+    pmaddubsw     m3,    m6,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 710 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 711 * 16],  m3
+
+    ; mode 13 [row 4]
+    pmaddubsw     m3,    m6,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 712 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 19 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 713 * 16],  m3
+
+    ; mode 13 [row 5]
+    pmaddubsw     m3,    m6,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 714 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 715 * 16],  m3
+
+    ; mode 13 [row 6]
+    pmaddubsw     m3,    m6,        [r5 + 1 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 1 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 716 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 1 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 1 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 717 * 16],  m3
+
+    ; mode 14 [row 2]
+    movu          m6,    m0
+    pinsrb        m6,    [r4 +  0],  1
+    pinsrb        m6,    [r3 +  2],  0
+    pmaddubsw     m3,    m6,         [r5 + 25 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,         [r5 + 25 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 772 * 16],   m3
+    pmaddubsw     m3,    m1,         [r5 + 25 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,         [r5 + 25 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 773 * 16],   m3
+
+    ; mode 14 [row 3]
+    pmaddubsw     m3,    m6,         [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,         [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 774 * 16],   m3
+    pmaddubsw     m3,    m1,         [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,         [r5 + 12 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 775 * 16],   m3
+
+    ; mode 15 [row 1]
+    pmaddubsw     m3,    m6,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 834 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 835 * 16],  m3
+
+    ; mode 15 [row 2]
+    pmaddubsw     m3,    m6,        [r5 + 13 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 13 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 836 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 13 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 13 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 837 * 16],  m3
+
+    ; mode 15 [row 3]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 +  2], 1
+    pinsrb        m6,    [r3 +  4], 0
+    pmaddubsw     m3,    m6,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 7],  1
+    pinsrb        m2,    [r4 + 6],  0
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 838 * 16],  m3
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 15], 1
+    pinsrb        m1,    [r4 + 14], 0
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrb        m4,    [r4 + 23], 1
+    pinsrb        m4,    [r4 + 22], 0
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 839 * 16],  m3
+
+    ; mode 15 [row 4]
+    pmaddubsw     m3,    m6,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 840 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 841 * 16],  m3
+
+    ; mode 15 [row 5, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 +  4], 1
+    pinsrb        m6,    [r3 +  6], 0
+    pmaddubsw     m3,    m6,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 842 * 16],  m3
+
+    ; mode 15 [row 6, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 9 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 844 * 16],  m3
+
+    ; mode 15 [row 7, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 +  6], 1
+    pinsrb        m6,    [r3 +  8], 0
+    pmaddubsw     m3,    m6,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 846 * 16],  m3
+
+    ; mode 15 [row 8, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 7 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 848 * 16],  m3
+
+    ; mode 15 [row 9, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 +  8], 1
+    pinsrb        m6,    [r3 +  9], 0
+    pmaddubsw     m3,    m6,        [r5 + 22 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 850 * 16],  m3
+
+    ; mode 15 [row 10, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 5 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 852 * 16],  m3
+
+    ; mode 15 [row 11, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 +  9], 1
+    pinsrb        m6,    [r3 + 11], 0
+    pmaddubsw     m3,    m6,        [r5 + 20 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 854 * 16],  m3
+
+    ; mode 15 [row 12, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 3 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 856 * 16],  m3
+
+    ; mode 15 [row 13, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 11], 1
+    pinsrb        m6,    [r3 + 13], 0
+    pmaddubsw     m3,    m6,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 858 * 16],  m3
+
+    ; mode 15 [row 14, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 1 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 860 * 16],  m3
+
+    ; mode 15 [row 15, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 13], 1
+    pinsrb        m6,    [r3 + 15], 0
+    pmaddubsw     m3,    m6,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 862 * 16],  m3
+
+    ; mode 15 [row 16, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 15], 1
+    pinsrb        m6,    [r3 + 17], 0
+    pmaddubsw     m3,    m6,        [r5 + 31 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 864 * 16],  m3
+
+    ; mode 15 [row 17, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 14 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 866 * 16],  m3
+
+    ; mode 15 [row 18, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 17], 1
+    pinsrb        m6,    [r3 + 19], 0
+    pmaddubsw     m3,    m6,        [r5 + 29 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 868 * 16],  m3
+
+    ; mode 15 [row 19, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 12 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 870 * 16],  m3
+
+    ; mode 15 [row 20, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 19], 1
+    pinsrb        m6,    [r3 + 21], 0
+    pmaddubsw     m3,    m6,        [r5 + 27 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 872 * 16],  m3
+
+    ; mode 15 [row 21, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 10 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 874 * 16],  m3
+
+    ; mode 15 [row 22, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 21], 1
+    pinsrb        m6,    [r3 + 23], 0
+    pmaddubsw     m3,    m6,        [r5 + 25 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 876 * 16],  m3
+
+    ; mode 15 [row 23, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 878 * 16],  m3
+
+    ; mode 15 [row 24, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 23], 1
+    pinsrb        m6,    [r3 + 24], 0
+    pmaddubsw     m3,    m6,        [r5 + 23 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 880 * 16],  m3
+
+    ; mode 15 [row 25, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 882 * 16],  m3
+
+    ; mode 15 [row 26, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 24], 1
+    pinsrb        m6,    [r3 + 26], 0
+    pmaddubsw     m3,    m6,        [r5 + 21 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 884 * 16],  m3
+
+    ; mode 15 [row 27, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 4 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 886 * 16],  m3
+
+    ; mode 15 [row 28, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 26], 1
+    pinsrb        m6,    [r3 + 28], 0
+    pmaddubsw     m3,    m6,        [r5 + 19 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 888 * 16],  m3
+
+    ; mode 15 [row 29, 0-7]
+    pmaddubsw     m3,    m6,        [r5 + 2 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 890 * 16],  m3
+
+    ; mode 15 [row 30, 0-7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 28], 1
+    pinsrb        m6,    [r3 + 30], 0
+    pmaddubsw     m3,    m6,        [r5 + 17 * 16]
+    pmulhrsw      m3,    m7
+    packuswb      m3,    m3
+    movh          [r0 + 892 * 16],  m3
+
+    ; mode 15 [row 31, 0-7]
+    pshufb        m3,    m6,           [tab_S2]
+    movh          [r0 + 894 * 16],     m3
+
+    ; mode 12 [row 12]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 +  6], 1
+    pinsrb        m0,    [r3 + 13], 0
+    pmaddubsw     m3,    m0,        [r5 + 31 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 31 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 664 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 31 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 31 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 665 * 16],  m3
+
+    ; mode 12 [row 13]
+    pmaddubsw     m3,    m0,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 666 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 667 * 16],  m3
+
+    ; mode 12 [row 14]
+    pmaddubsw     m3,    m0,        [r5 + 21 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 21 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 668 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 21 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 21 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 669 * 16],  m3
+
+    ; mode 12 [row 15]
+    pmaddubsw     m3,    m0,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 670 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 16 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 16 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 671 * 16],  m3
+
+    ; mode 12 [row 16]
+    pmaddubsw     m3,    m0,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 672 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 673 * 16],  m3
+
+    ; mode 12 [row 17]
+    pmaddubsw     m3,    m0,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 674 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 675 * 16],  m3
+
+    ; mode 12 [row 18]
+    pmaddubsw     m3,    m0,        [r5 + 1 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 1 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 676 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 1 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 1 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 677 * 16],  m3
+
+    ; mode 13 [row 7]
+    movu          m6,    m0
+    pinsrb        m6,    [r3 + 4],  2
+    pinsrb        m6,    [r3 + 4],  1
+    pinsrb        m6,    [r3 + 7],  0
+    pmaddubsw     m3,    m6,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 718 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 719 * 16],  m3
+
+    ; mode 13 [row 8]
+    pmaddubsw     m3,    m6,        [r5 + 15 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 15 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 720 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 15 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 15 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 721 * 16],  m3
+
+    ; mode 13 [row 9]
+    pmaddubsw     m3,    m6,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 722 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 6 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 6 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 723 * 16],  m3
+
+    ; mode 14 [row 4]
+    pinsrb        m6,    [r3 + 2],  2
+    pinsrb        m6,    [r3 + 2],  1
+    pinsrb        m6,    [r3 + 5],  0
+    pmaddubsw     m3,    m6,        [r5 + 31 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 31 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 776 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 31 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 31 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 777 * 16],  m3
+
+    ; mode 14 [row 5]
+    pmaddubsw     m3,    m6,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 778 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 779 * 16],  m3
+
+    ; mode 14 [row 6]
+    pmaddubsw     m3,    m6,        [r5 + 5 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 5 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 780 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 5 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 5 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 781 * 16],  m3
+
+    ; mode 14 [row 7]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 5], 1
+    pinsrb        m6,    [r3 + 7], 0
+    pmaddubsw     m3,    m6,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw         m2,    [r4 + 5], 0
+    pmaddubsw     m5,    m2,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 782 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 13], 0
+    pmaddubsw     m3,    m1,        [r5 + 24 * 16]
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 21], 0
+    pmaddubsw     m5,    m4,        [r5 + 24 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 783 * 16],  m3
+
+    ; mode 14 [row 8]
+    pmaddubsw     m3,    m6,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 784 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 11 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 11 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 785 * 16],  m3
+
+    ; mode 15 [row 5, 8-31]
+    pmaddubsw     m5,    m2,            [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m5,    m5
+    movh          [r0 + 842 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 26 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            [r5 + 26 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 843 * 16],      m3
+
+    ; mode 15 [row 6, 8-31]
+    pmaddubsw     m5,    m2,            [r5 + 9 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m5,    m5
+    movh          [r0 + 844 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 9 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            [r5 + 9 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 845 * 16],      m3
+
+    ; mode 12 [row 19]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 13], 1
+    pinsrb        m0,    [r3 + 19], 0
+    pmaddubsw     m3,    m0,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 678 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 679 * 16],  m3
+
+    ; mode 12 [row 20]
+    pmaddubsw     m3,    m0,        [r5 + 23 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 23 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 680 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 23 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 23 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 681 * 16],  m3
+
+    ; mode 12 [row 21]
+    pmaddubsw     m3,    m0,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 682 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 18 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 18 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 683 * 16],  m3
+
+    ; mode 12 [row 22]
+    pmaddubsw     m3,    m0,        [r5 + 13 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 13 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 684 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 13 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 13 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 685 * 16],  m3
+
+    ; mode 12 [row 23]
+    pmaddubsw     m3,    m0,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 686 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 687 * 16],  m3
+
+    ; mode 12 [row 24]
+    pmaddubsw     m3,    m0,        [r5 + 3 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,        [r5 + 3 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 688 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 3 * 16]
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,        [r5 + 3 * 16]
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 689 * 16],  m3
+
+    ; mode 13 [row 10]
+    movu          m7,    m6
+    movu          m6,    m0
+    pinsrb        m6,    [r3 + 4],  4
+    pinsrb        m6,    [r3 + 4],  3
+    pinsrb        m6,    [r3 + 7],  2
+    pinsrb        m6,    [r3 + 7],  1
+    pinsrb        m6,    [r3 + 11], 0
+    pmaddubsw     m3,    m6,        [r5 + 29 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 724 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 29 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 725 * 16],  m3
+
+    ; mode 13 [row 11]
+    pmaddubsw     m3,    m6,        [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 726 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 727 * 16],  m3
+
+    ; mode 13 [row 12]
+    pmaddubsw     m3,    m6,        [r5 + 11 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 11 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 728 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 11 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 11 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 729 * 16],  m3
+
+    ; mode 13 [row 13]
+    pmaddubsw     m3,    m6,        [r5 + 2 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 730 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 2 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 731 * 16],  m3
+
+    ; mode 14 [row 9]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 +  7], 1
+    pinsrb        m7,    [r3 + 10], 0
+    pmaddubsw     m3,    m7,        [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 4],  0
+    pmaddubsw     m5,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 786 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 12], 0
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrb        m4,    [r4 + 21], 1
+    pinsrb        m4,    [r4 + 20], 0
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 787 * 16],  m3
+
+    ; mode 14 [row 10]
+    pmaddubsw     m3,    m7,        [r5 + 17 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 788 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 17 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 789 * 16],  m3
+
+    ; mode 14 [row 11]
+    pmaddubsw     m3,    m7,        [r5 + 4 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 790 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 791 * 16],  m3
+
+    movu          m6,    [pw_1024]
+
+    ; mode 15 [row 7, 8-31]
+    pmaddubsw     m5,    m2,            [r5 + 24 * 16]
+    pmulhrsw      m5,    m6
+    packuswb      m5,    m5
+    movh          [r0 + 846 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 24 * 16]
+    pmulhrsw      m3,    m6
+    pmaddubsw     m5,    m4,            [r5 + 24 * 16]
+    pmulhrsw      m5,    m6
+    packuswb      m3,    m5
+    movu          [r0 + 847 * 16],      m3
+
+    ; mode 15 [row 8, 8-31]
+    pmaddubsw     m5,    m2,            [r5 + 7 * 16]
+    pmulhrsw      m5,    m6
+    packuswb      m5,    m5
+    movh          [r0 + 848 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 7 * 16]
+    pmulhrsw      m3,    m6
+    pmaddubsw     m5,    m4,            [r5 + 7 * 16]
+    pmulhrsw      m5,    m6
+    packuswb      m3,    m5
+    movu          [r0 + 849 * 16],      m3
+
+    ; mode 12 [row 25]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 19], 1
+    pinsrb        m0,    [r3 + 26], 0
+    pmaddubsw     m3,    m0,        [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 690 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 691 * 16],  m3
+
+    ; mode 12 [row 26]
+    pmaddubsw     m3,    m0,        [r5 + 25 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 692 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 25 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 693 * 16],  m3
+
+    ; mode 12 [row 27]
+    pmaddubsw     m3,    m0,        [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 694 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 695 * 16],  m3
+
+    ; mode 12 [row 28]
+    pmaddubsw     m3,    m0,        [r5 + 15 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 15 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 696 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 15 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 15 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 697 * 16],  m3
+
+    ; mode 12 [row 29]
+    pmaddubsw     m3,    m0,        [r5 + 10 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 698 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 699 * 16],  m3
+
+    ; mode 12 [row 30]
+    pmaddubsw     m3,    m0,        [r5 + 5 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 5 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 700 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 5 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 5 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 701 * 16],  m3
+
+    ; mode 13 [row 14]
+    movu          m6,    m0
+    pinsrb        m6,    [r3 +  4], 6
+    pinsrb        m6,    [r3 +  4], 5
+    pinsrb        m6,    [r3 +  7], 4
+    pinsrb        m6,    [r3 +  7], 3
+    pinsrb        m6,    [r3 + 11], 2
+    pinsrb        m6,    [r3 + 11], 1
+    pinsrb        m6,    [r3 + 14], 0
+    pmaddubsw     m3,    m6,        [r5 + 25 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 732 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 25 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 733 * 16],  m3
+
+    ; mode 13 [row 15]
+    pmaddubsw     m3,    m6,        [r5 + 16 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 734 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 16 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 735 * 16],  m3
+
+    ; mode 13 [row 16]
+    pmaddubsw     m3,    m6,        [r5 + 7 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 7 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 736 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 7 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 7 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 737 * 16],  m3
+
+    ; mode 13 [row 17]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 14],  1
+    pinsrb        m6,    [r3 + 18],  0
+    pmaddubsw     m3,    m6,         [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 3],  0
+    pmaddubsw     m5,    m2,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 738 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 11], 0
+    pmaddubsw     m3,    m1,        [r5 + 30 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 19], 0
+    pmaddubsw     m5,    m4,        [r5 + 30 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,               m5
+    movu          [r0 + 739 * 16],  m3
+
+    ; mode 13 [row 18]
+    pmaddubsw     m3,    m6,        [r5 + 21 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 740 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 21 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 741 * 16],  m3
+
+    ; mode 13 [row 19]
+    pmaddubsw     m3,    m6,        [r5 + 12 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 12 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 742 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 12 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 12 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 743 * 16],  m3
+
+    ; mode 13 [row 20]
+    pmaddubsw     m3,    m6,        [r5 + 3 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 744 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 3 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 745 * 16],  m3
+
+    ; mode 14 [row 12]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 10], 1
+    pinsrb        m7,    [r3 + 12], 0
+    pmaddubsw     m3,    m7,        [r5 + 23 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 23 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 792 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 23 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 23 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 793 * 16],  m3
+
+    ; mode 14 [row 13]
+    pmaddubsw     m3,    m7,        [r5 + 10 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 794 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 795 * 16],  m3
+
+    ; mode 15 [row 9]
+    pmaddubsw     m5,    m2,            [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movu          [r0 + 850 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 22 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 851 * 16],      m3
+
+    ; mode 15 [row 10]
+    pmaddubsw     m5,    m2,            [r5 + 5 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movu          [r0 + 852 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 5 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 5 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 853 * 16],      m3
+
+    ; mode 13 [row 21]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 18],  1
+    pinsrb        m6,    [r3 + 21],  0
+    pmaddubsw     m3,    m6,         [r5 + 26 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 2],  0
+    pmaddubsw     m5,    m2,        [r5 + 26 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 746 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 10], 0
+    pmaddubsw     m3,    m1,        [r5 + 26 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 18], 0
+    pmaddubsw     m5,    m4,        [r5 + 26 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 747 * 16],  m3
+
+    ; mode 13 [row 22]
+    pmaddubsw     m3,    m6,        [r5 + 17 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 748 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 17 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 749 * 16],  m3
+
+    ; mode 13 [row 23]
+    pmaddubsw     m3,    m6,        [r5 + 8 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 750 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 751 * 16],  m3
+
+    ; mode 14 [row 14]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 12], 1
+    pinsrb        m7,    [r3 + 15], 0
+    pmaddubsw     m3,    m7,        [r5 + 29 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 796 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 29 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 797 * 16],  m3
+
+    ; mode 14 [row 15]
+    pmaddubsw     m3,    m7,        [r5 + 16 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 798 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 16 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 799 * 16],  m3
+
+    ; mode 14 [row 16]
+    pmaddubsw     m3,    m7,        [r5 + 3 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 800 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 3 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 801 * 16],  m3
+
+    ; mode 15 [row 11]
+    pmaddubsw     m5,    m2,            [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 854 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 855 * 16],      m3
+
+    ; mode 15 [row 12]
+    pmaddubsw     m5,    m2,            [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 856 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 3 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 3 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 857 * 16],      m3
+
+    ; mode 13 [row 24]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 21],  1
+    pinsrb        m6,    [r3 + 25],  0
+    pmaddubsw     m3,    m6,         [r5 + 31 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 1],  0
+    pmaddubsw     m5,    m2,        [r5 + 31 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 752 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 9],  0
+    pmaddubsw     m3,    m1,        [r5 + 31 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 17], 0
+    pmaddubsw     m5,    m4,        [r5 + 31 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 753 * 16],  m3
+
+    ; mode 13 [row 25]
+    pmaddubsw     m3,    m6,        [r5 + 22 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 754 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 22 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 755 * 16],  m3
+
+    ; mode 13 [row 26]
+    pmaddubsw     m3,    m6,        [r5 + 13 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 13 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 756 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 13 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 13 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 757 * 16],  m3
+
+    ; mode 13 [row 27]
+    pmaddubsw     m3,    m6,        [r5 + 4 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 758 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 759 * 16],  m3
+
+    ; mode 14 [row 17]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 15], 1
+    pinsrb        m7,    [r3 + 17], 0
+    pmaddubsw     m3,    m7,        [r5 + 22 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 802 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 22 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 22 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 803 * 16],  m3
+
+    ; mode 14 [row 18]
+    pmaddubsw     m3,    m7,        [r5 + 9 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 9 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 804 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 9 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 9 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 805 * 16],  m3
+
+    ; mode 15 [row 13]
+    pmaddubsw     m5,    m2,           [r5 + 18 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 858 * 16 + 8], m5
+    pmaddubsw     m3,    m1,           [r5 + 18 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,           [r5 + 18 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 859 * 16],     m3
+
+    ; mode 15 [row 14]
+    pmaddubsw     m5,    m2,           [r5 + 1 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 860 * 16 + 8], m5
+    pmaddubsw     m3,    m1,           [r5 + 1 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,           [r5 + 1 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 861 * 16],     m3
+
+    ; mode 13 [row 28]
+    pslldq        m6,    2
+    pinsrb        m6,    [r3 + 25],  1
+    pinsrb        m6,    [r3 + 28],  0
+    pmaddubsw     m3,    m6,         [r5 + 27 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 0],  0
+    pmaddubsw     m5,    m2,         [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 760 * 16],   m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 8],  0
+    pmaddubsw     m3,    m1,         [r5 + 27 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 16],  0
+    pmaddubsw     m5,    m4,         [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 761 * 16],   m3
+
+    ; mode 13 [row 29]
+    pmaddubsw     m3,    m6,         [r5 + 18 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,         [r5 + 18 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 762 * 16],   m3
+    pmaddubsw     m3,    m1,         [r5 + 18 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,         [r5 + 18 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 763 * 16],   m3
+
+    ; mode 13 [row 30]
+    pmaddubsw     m3,    m6,         [r5 + 9 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,         [r5 + 9 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 764 * 16],   m3
+    pmaddubsw     m3,    m1,         [r5 + 9 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,         [r5 + 9 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 765 * 16],   m3
+
+    ; mode 14 [row 19]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 17], 1
+    pinsrb        m7,    [r3 + 20], 0
+    pmaddubsw     m3,    m7,        [r5 + 28 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 28 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 806 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 28 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 28 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 807 * 16],  m3
+
+    ; mode 14 [row 20]
+    pmaddubsw     m3,    m7,        [r5 + 15 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 15 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 808 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 15 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 15 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 809 * 16],  m3
+
+    ; mode 14 [row 21]
+    pmaddubsw     m3,    m7,        [r5 + 2 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 810 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 2 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 811 * 16],  m3
+
+    ; mode 15 [row 15]
+    pmaddubsw     m5,    m2,            [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 862 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 16 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 16 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 863 * 16],      m3
+
+    ; mode 14 [row 22]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 20],  1
+    pinsrb        m7,    [r3 + 22],  0
+    pmaddubsw     m3,    m7,         [r5 + 21 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],  1
+    pinsrb        m2,    [r3 + 2],  0
+    pmaddubsw     m5,    m2,        [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 812 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 7],  0
+    pmaddubsw     m3,    m1,        [r5 + 21 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 15],  0
+    pmaddubsw     m5,    m4,        [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 813 * 16],  m3
+
+    ; mode 14 [row 23]
+    pmaddubsw     m3,    m7,        [r5 + 8 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 814 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 815 * 16],  m3
+
+    ; mode 15 [row 16]
+    pmaddubsw     m5,    m2,            [r5 + 31 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 864 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 31 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 31 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 865 * 16],  m3
+
+    ; mode 15 [row 17]
+    pmaddubsw     m5,    m2,            [r5 + 14 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 866 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 14 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 14 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 867 * 16],      m3
+
+    ; mode 14 [row 24]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 22],  1
+    pinsrb        m7,    [r3 + 25],  0
+    pmaddubsw     m3,    m7,         [r5 + 27 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 2],  1
+    pinsrb        m2,    [r3 + 5],  0
+    pmaddubsw     m5,    m2,        [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 816 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 6],  0
+    pmaddubsw     m3,    m1,        [r5 + 27 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 14],  0
+    pmaddubsw     m5,    m4,        [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 817 * 16],  m3
+
+    ; mode 14 [row 25]
+    pmaddubsw     m3,    m7,        [r5 + 14 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 14 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 818 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 14 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 14 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 819 * 16],  m3
+
+    ; mode 14 [row 26]
+    pmaddubsw     m3,    m7,        [r5 + 1 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 1 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 820 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 1 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 1 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 821 * 16],  m3
+
+    ; mode 15 [row 18]
+    pinsrb        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 868 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 29 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 29 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 869 * 16],      m3
+
+    ; mode 15 [row 19]
+    pmaddubsw     m5,    m2,            [r5 + 12 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 870 * 16 + 8],  m5
+    pmaddubsw     m3,    m1,            [r5 + 12 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,            [r5 + 12 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 871 * 16],      m3
+
+    ; mode 15 [row 20 - 8 to 15]
+    pslldq        m3,     m2,           2
+    pinsrb        m3,    [r3 + 4],      1
+    pinsrb        m3,    [r3 + 6],      0
+    pmaddubsw     m5,    m3,            [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 872 * 16 + 8],  m5
+
+    ; mode 15 [row 21 - 8 to 15]
+    pmaddubsw     m5,    m3,            [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 874 * 16 + 8],  m5
+
+    ; mode 15 [row 22 - 8 to 15]
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 6],      1
+    pinsrb        m3,    [r3 + 8],      0
+    pmaddubsw     m5,    m3,            [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 876 * 16 + 8],  m5
+
+    ; mode 15 [row 23 - 8 to 15]
+    pmaddubsw     m5,    m3,            [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 878 * 16 + 8],  m5
+
+    ; mode 15 [row 24 - 8 to 15]
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 8],      1
+    pinsrb        m3,    [r3 + 9],      0
+    pmaddubsw     m5,    m3,            [r5 + 23 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 880 * 16 + 8],  m5
+
+    ; mode 15 [row 25 - 8 to 15]
+    pmaddubsw     m5,    m3,            [r5 + 6 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 882 * 16 + 8],  m5
+
+    ; mode 15 [row 26 - 8 to 15]
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 +  9],      1
+    pinsrb        m3,    [r3 + 11],      0
+    pmaddubsw     m5,    m3,             [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 884 * 16 + 8],   m5
+
+    ; mode 15 [row 27 - 8 to 15]
+    pmaddubsw     m5,    m3,             [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 886 * 16 + 8],   m5
+
+    ; mode 15 [row 28 - 8 to 15]
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 11],      1
+    pinsrb        m3,    [r3 + 13],      0
+    pmaddubsw     m5,    m3,             [r5 + 19 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 888 * 16 + 8],   m5
+
+    ; mode 15 [row 29 - 8 to 15]
+    pmaddubsw     m5,    m3,             [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 890 * 16 + 8],   m5
+
+    ; mode 15 [row 30 - 8 to 15]
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 13],      1
+    pinsrb        m3,    [r3 + 15],      0
+    pmaddubsw     m5,    m3,             [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m5,    m5
+    movh          [r0 + 892 * 16 + 8],   m5
+
+    ; mode 15 [row 31, 8 to 15]
+    pshufb        m5,    m3,           [tab_S2]
+    movh          [r0 + 894 * 16 + 8],     m5
+
+    ; mode 14 [row 27]
+    pinsrb        m2,    [r3 + 5],      0
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 25],  1
+    pinsrb        m7,    [r3 + 27],  0
+    pmaddubsw     m3,    m7,         [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 5],  1
+    pinsrb        m2,    [r3 + 7],  0
+    pmaddubsw     m5,    m2,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 822 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 5],  0
+    pmaddubsw     m3,    m1,        [r5 + 20 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 13],  0
+    pmaddubsw     m5,    m4,        [r5 + 20 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 823 * 16],  m3
+
+    ; mode 15 [row 20 - 16 to 31]
+    pmaddubsw     m3,    m1,        [r5 + 27 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 27 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 873 * 16],  m3
+
+    ; mode 15 [row 21 - 16 to 31]
+    pmaddubsw     m3,    m1,        [r5 + 10 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 10 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 875 * 16],  m3
+
+    ; mode 14 [row 28]
+    pmaddubsw     m3,    m7,        [r5 + 7 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,        [r5 + 7 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 824 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 7 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 7 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 825 * 16],  m3
+
+    ; mode 14 [row 29]
+    pslldq        m7,    2
+    pinsrb        m7,    [r3 + 27],  1
+    pinsrb        m7,    [r3 + 30],  0
+    pmaddubsw     m3,    m7,         [r5 + 26 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 +  7],  1
+    pinsrb        m2,    [r3 + 10],  0
+    pmaddubsw     m5,    m2,         [r5 + 26 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 826 * 16],  m3
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 4],  0
+    pmaddubsw     m3,    m1,        [r5 + 26 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 12], 0
+    pmaddubsw     m5,    m4,        [r5 + 26 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 827 * 16],  m3
+
+    ; mode 14 [row 30]
+    pmaddubsw     m3,    m7,         [r5 + 13 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m2,         [r5 + 13 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 828 * 16],  m3
+    pmaddubsw     m3,    m1,        [r5 + 13 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 13 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 829 * 16],  m3
+
+    ; mode 15 [row 22]
+    pmaddubsw     m3,    m1,        [r5 + 25 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 25 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 877 * 16],  m3
+
+    ; mode 15 [row 23]
+    pmaddubsw     m3,    m1,        [r5 + 8 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 8 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 879 * 16],  m3
+
+    ; mode 14 [row 31]
+    pshufb        m3,    m7,           [tab_S2]
+    movh          [r0 + 830 * 16],     m3
+    pshufb        m3,    m2,           [tab_S2]
+    movh          [r0 + 830 * 16 + 8], m3
+    pshufb        m3,    m1,           [tab_S2]
+    movh          [r0 + 831 * 16],     m3
+    pshufb        m3,    m4,           [tab_S2]
+    movh          [r0 + 831 * 16 + 8], m3
+
+    ; mode 13 [row 31]
+    pshufb        m0,    m6,           [tab_S2]
+    movh          [r0 + 766 * 16],     m0
+    movh          m0,                  [r4]
+    movh          [r0 + 766 * 16 + 8], m0
+    movu          m0,                  [r4 + 8]
+    movu          [r0 + 767 * 16],     m0
+
+    ; mode 15 [row 24]
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 3], 0
+    pmaddubsw     m3,    m1,        [r5 + 23 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 11], 0
+    pmaddubsw     m5,    m4,        [r5 + 23 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 881 * 16],  m3
+
+    ; mode 15 [row 25]
+    pmaddubsw     m3,    m1,        [r5 + 6 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 6 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 883 * 16],  m3
+
+    ; mode 15 [row 26]
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 2], 0
+    pmaddubsw     m3,    m1,        [r5 + 21 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 10], 0
+    pmaddubsw     m5,    m4,        [r5 + 21 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 885 * 16],  m3
+
+    ; mode 15 [row 27]
+    pmaddubsw     m3,    m1,        [r5 + 4 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 4 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 887 * 16],  m3
+
+    ; mode 15 [row 28]
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 1],  0
+    pmaddubsw     m3,    m1,        [r5 + 19 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 9],  0
+    pmaddubsw     m5,    m4,        [r5 + 19 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 889 * 16],  m3
+
+    ; mode 15 [row 29]
+    pmaddubsw     m3,    m1,        [r5 + 2 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pmaddubsw     m5,    m4,        [r5 + 2 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 891 * 16],  m3
+
+    ; mode 15 [row 30]
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 0],  0
+    pmaddubsw     m3,    m1,        [r5 + 17 * 16]
+    pmulhrsw      m3,    [pw_1024]
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 8], 0
+    pmaddubsw     m5,    m4,        [r5 + 17 * 16]
+    pmulhrsw      m5,    [pw_1024]
+    packuswb      m3,    m5
+    movu          [r0 + 893 * 16],  m3
+
+    ; mode 15 [row 31]
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 895 * 16],      m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 895 * 16 + 8],  m5
+
+    ; mode 16 [row 0]
+    movu          m6,    [r5 + 11 * 16]
+    movu          m7,    [pw_1024]
+    movh          m0,    [r4     ]
+    movh          m1,    [r4 + 1 ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movh          m2,    [r4 +  8]
+    movh          m3,    [r4 +  9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 896 * 16],      m1
+
+    movh          m1,    [r4 + 16]
+    movh          m3,    [r4 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movh          m4,    [r4 + 24]
+    movh          m5,    [r4 + 25]
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 897 * 16],      m3
+
+    ; mode16 [row 1]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4],          1
+    pinsrb        m0,    [r3 + 2],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 7],  0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 898 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 15],    0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 23],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 899 * 16],      m3
+
+    ; mode16 [row 2]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 900 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 901 * 16],      m3
+
+    ; mode16 [row 3]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 2],      1
+    pinsrb        m0,    [r3 + 3],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 6],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 902 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 14],    0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 22],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 903 * 16],      m3
+
+    ; mode16 [row 4]
+    movu          m6,    [r5 + 23 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 3],      1
+    pinsrb        m0,    [r3 + 5],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 904 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 13],     0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 21],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 905 * 16],      m3
+
+    ; mode16 [row 5]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 906 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 907 * 16],      m3
+
+    ; mode16 [row 6]
+    movu          m6,    [r5 + 13 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 5],      1
+    pinsrb        m0,    [r3 + 6],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 5],      1
+    pinsrb        m2,    [r4 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 908 * 16],      m3
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 12],    0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,     [r4 + 20],    0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 909 * 16],      m3
+
+    ; mode16 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 6],      1
+    pinsrb        m0,    [r3 + 8],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 3],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 910 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 11],    0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,     [r4 + 19],    0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 911 * 16],      m3
+
+    ; mode16 [row 8]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 912 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 913 * 16],      m3
+
+    ; mode16 [row 9]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 8],      1
+    pinsrb        m0,    [r3 + 9],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 2],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 914 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 10],     0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 18],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 915 * 16],      m3
+
+    ; mode16 [row 10]
+    movu          m6,    [r5 + 25 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 +  9],      1
+    pinsrb        m0,    [r3 + 11],      0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,     [r4 + 1],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 916 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 9],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrb        m4,    [r4 + 18],     1
+    pinsrb        m4,    [r4 + 17],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 917 * 16],      m3
+
+    ; mode16 [row 11]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 918 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 919 * 16],      m3
+
+    ; mode16 [row 12]
+    movu          m6,    [r5 + 15 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 11],     1
+    pinsrb        m0,    [r3 + 12],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 0],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 920 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 8],    0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 16],   0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 921 * 16],     m3
+
+    ; mode16 [row 13]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 12],     1
+    pinsrb        m0,    [r3 + 14],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],      1
+    pinsrb        m2,    [r3 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 922 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 7],    0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,     [r4 + 15],   0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 923 * 16],     m3
+
+    ; mode16 [row 14]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 924 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 925 * 16],      m3
+
+    ; mode16 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 14],     1
+    pinsrb        m0,    [r3 + 15],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 2],      1
+    pinsrb        m2,    [r3 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 926 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 6],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 14],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 927 * 16],      m3
+
+    ; mode16 [row 16]
+    movu          m6,    [r5 + 27 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 15],     1
+    pinsrb        m0,    [r3 + 17],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 3],      1
+    pinsrb        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 928 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 5],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 13],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 929 * 16],      m3
+
+    ; mode16 [row 17]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 930 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 931 * 16],      m3
+
+    ; mode16 [row 18]
+    movu          m6,    [r5 + 17 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 17],     1
+    pinsrb        m0,    [r3 + 18],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 5],      1
+    pinsrb        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 932 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 4],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 12],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 933 * 16],      m3
+
+    ; mode16 [row 19]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 18],     1
+    pinsrb        m0,    [r3 + 20],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 6],      1
+    pinsrb        m2,    [r3 + 8],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 934 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 3],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 11],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 935 * 16],      m3
+
+    ; mode16 [row 20]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 936 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 937 * 16],      m3
+
+    ; mode16 [row 21]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 20],     1
+    pinsrb        m0,    [r3 + 21],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 8],      1
+    pinsrb        m2,    [r3 + 9],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 938 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 2],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 10],     0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 939 * 16],      m3
+
+    ; mode16 [row 22]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 21],     1
+    pinsrb        m0,    [r3 + 23],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 +  9],     1
+    pinsrb        m2,    [r3 + 11],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 940 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 1],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 9],      0
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 941 * 16],      m3
+
+    ; mode16 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 942 * 16],      m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 943 * 16],      m3
+
+    ; mode16 [row 24]
+    movu          m6,    [r5 + 19 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 23],     1
+    pinsrb        m0,    [r3 + 24],     0
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 11],     1
+    pinsrb        m2,    [r3 + 12],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 944 * 16],      m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 0],      0
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 8],     0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 945 * 16],     m3
+
+    ; mode16 [row 25]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 24],    1
+    pinsrb        m0,    [r3 + 26],    0
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 12],    1
+    pinsrb        m2,    [r3 + 14],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 946 * 16],     m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 0],     1
+    pinsrb        m1,    [r3 + 2],     0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 7],     0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 947 * 16],     m3
+
+    ; mode16 [row 26]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 948 * 16],     m3
+
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 949 * 16],     m3
+
+    ; mode16 [row 27]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 26],    1
+    pinsrb        m0,    [r3 + 27],    0
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 14],    1
+    pinsrb        m2,    [r3 + 15],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 950 * 16],     m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 2],     1
+    pinsrb        m1,    [r3 + 3],     0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 6],     0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 951 * 16],     m3
+
+    ; mode16 [row 28]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 27],    1
+    pinsrb        m0,    [r3 + 29],    0
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 15],    1
+    pinsrb        m2,    [r3 + 17],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 952 * 16],     m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 3],     1
+    pinsrb        m1,    [r3 + 5],     0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 5],     0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 953 * 16],     m3
+
+    ; mode16 [row 29]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 954 * 16],     m3
+
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 955 * 16],     m3
+
+    ; mode16 [row 30]
+    movu          m6,    [r5 + 21 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 29],    1
+    pinsrb        m0,    [r3 + 30],    0
+    pmaddubsw     m3,    m0,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 17],    1
+    pinsrb        m2,    [r3 + 18],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 956 * 16],     m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 5],     1
+    pinsrb        m1,    [r3 + 6],     0
+    pmaddubsw     m3,    m1,           m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 4],     0
+    pmaddubsw     m5,    m4,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 957 * 16],     m3
+
+    ; mode16 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 958 * 16],      m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 958 * 16 + 8],  m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 959 * 16],      m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 959 * 16 + 8],  m5
+
+    ; mode 17 [row 0]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m7,    [pw_1024]
+    movh          m0,    [r4     ]
+    movh          m1,    [r4 + 1 ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movh          m2,    [r4 +  8]
+    movh          m3,    [r4 +  9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 960 * 16],      m1
+
+    movh          m1,    [r4 + 16]
+    movh          m3,    [r4 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movh          m4,    [r4 + 24]
+    movh          m5,    [r4 + 25]
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 961 * 16],      m3
+
+    ; mode17 [row 1]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 0],    1
+    pinsrb        m0,    [r3 + 1],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 7],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 962 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 15],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 23],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 963 * 16],    m3
+
+    ; mode17 [row 2]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 1],    1
+    pinsrb        m0,    [r3 + 2],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 6],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 964 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 14],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 22],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 965 * 16],    m3
+
+    ; mode17 [row 3]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 2],    1
+    pinsrb        m0,    [r3 + 4],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 5],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 966 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 13],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 21],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 967 * 16],    m3
+
+    ; mode17 [row 4]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 4],    1
+    pinsrb        m0,    [r3 + 5],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 4],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 968 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 12],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 20],  0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 969 * 16],    m3
+
+    ; mode17 [row 5]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 970 * 16],    m3
+
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 971 * 16],    m3
+
+    ; mode17 [row 6]
+    movu          m6,    [r5 + 10 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 5],    1
+    pinsrb        m0,    [r3 + 6],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 3],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 972 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 11],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 19],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 973 * 16],    m3
+
+    ; mode17 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 6],    1
+    pinsrb        m0,    [r3 + 7],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 2],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 974 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 10],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 18],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 975 * 16],    m3
+
+    ; mode17 [row 8]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 7],    1
+    pinsrb        m0,    [r3 + 9],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 1],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 976 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 9],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 17],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 977 * 16],    m3
+
+    ; mode17 [row 9]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 +  9],    1
+    pinsrb        m0,    [r3 + 10],    0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r4 + 0],     0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 978 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 8],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 16],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 979 * 16],    m3
+
+    ; mode17 [row 10]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 980 * 16],    m3
+
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 981 * 16],    m3
+
+    ; mode17 [row 11]
+    movu          m6,    [r5 + 8 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 10],   1
+    pinsrb        m0,    [r3 + 11],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],    1
+    pinsrb        m2,    [r3 + 1],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 982 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 7],     0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 15],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 983 * 16],    m3
+
+    ; mode17 [row 12]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 11],   1
+    pinsrb        m0,    [r3 + 12],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 1],    1
+    pinsrb        m2,    [r3 + 2],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 984 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 6],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 14],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 985 * 16],    m3
+
+    ; mode17 [row 13]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 12],   1
+    pinsrb        m0,    [r3 + 14],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 2],    1
+    pinsrb        m2,    [r3 + 4],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 986 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 5],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 13],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 987 * 16],    m3
+
+    ; mode17 [row 14]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 14],   1
+    pinsrb        m0,    [r3 + 15],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 4],    1
+    pinsrb        m2,    [r3 + 5],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 988 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 4],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 12],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 989 * 16],    m3
+
+    ; mode17 [row 15]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 990 * 16],      m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 990 * 16 + 8],  m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 991 * 16],      m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 991 * 16 + 8],  m5
+
+    ; mode17 [row 16]
+    movu          m6,    [r5 + 6 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 15],   1
+    pinsrb        m0,    [r3 + 16],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 5],    1
+    pinsrb        m2,    [r3 + 6],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 992 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 3],     0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 11],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 993 * 16],    m3
+
+    ; mode17 [row 17]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 16],   1
+    pinsrb        m0,    [r3 + 17],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 6],    1
+    pinsrb        m2,    [r3 + 7],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 994 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,     [r4 + 2],   0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 10],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 995 * 16],    m3
+
+    ; mode17 [row 18]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 17],   1
+    pinsrb        m0,    [r3 + 18],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 7],    1
+    pinsrb        m2,    [r3 + 9],    0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 996 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 1],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 9],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 997 * 16],    m3
+
+    ; mode17 [row 19]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 18],   1
+    pinsrb        m0,    [r3 + 20],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 +  9],   1
+    pinsrb        m2,    [r3 + 10],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 998 * 16],    m3
+
+    pslldq        m1,    2
+    pinsrw        m1,    [r4 + 0],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 8],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 999 * 16],    m3
+
+    ; mode17 [row 20]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 20],   1
+    pinsrb        m0,    [r3 + 21],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 10],   1
+    pinsrb        m2,    [r3 + 11],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1000 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 0],    1
+    pinsrb        m1,    [r3 + 1],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    ;pinsrb        m4,    [r4 + 8],   1
+    ;pinsrb        m4,    [r4 + 7],   0
+    pinsrw        m4,     [r4 + 7],  0
+    pmaddubsw     m5,    m4,         m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1001 * 16],  m3
+
+    ; mode17 [row 21]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1002 * 16],   m3
+
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,         m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1003 * 16],  m3
+
+    ; mode17 [row 22]
+    movu          m6,    [r5 + 10 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 21],   1
+    pinsrb        m0,    [r3 + 22],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 11],   1
+    pinsrb        m2,    [r3 + 12],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1004 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 1],    1
+    pinsrb        m1,    [r3 + 2],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 6],   0
+    pmaddubsw     m5,    m4,         m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1005 * 16],  m3
+
+    ; mode17 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 22],   1
+    pinsrb        m0,    [r3 + 23],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 12],   1
+    pinsrb        m2,    [r3 + 14],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1006 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 2],    1
+    pinsrb        m1,    [r3 + 4],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 5],   0
+    pmaddubsw     m5,    m4,         m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1007 * 16],  m3
+
+    ; mode17 [row 24]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 23],   1
+    pinsrb        m0,    [r3 + 25],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 14],   1
+    pinsrb        m2,    [r3 + 15],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1008 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 4],    1
+    pinsrb        m1,    [r3 + 5],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 4],   0
+    pmaddubsw     m5,    m4,         m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1009 * 16],  m3
+
+    ; mode17 [row 25]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 25],   1
+    pinsrb        m0,    [r3 + 26],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 15],   1
+    pinsrb        m2,    [r3 + 16],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1010 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 5],    1
+    pinsrb        m1,    [r3 + 6],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,     [r4 + 3],   0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1011 * 16],   m3
+
+    ; mode17 [row 26]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1012 * 16],   m3
+
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1013 * 16],   m3
+
+    ; mode17 [row 27]
+    movu          m6,    [r5 + 8 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 26],   1
+    pinsrb        m0,    [r3 + 27],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 16],   1
+    pinsrb        m2,    [r3 + 17],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1014 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 6],    1
+    pinsrb        m1,    [r3 + 7],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 2],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1015 * 16],   m3
+
+    ; mode17 [row 28]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 27],   1
+    pinsrb        m0,    [r3 + 28],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 17],   1
+    pinsrb        m2,    [r3 + 18],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1016 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 7],    1
+    pinsrb        m1,    [r3 + 9],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 1],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1017 * 16],   m3
+
+    ; mode17 [row 29]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 28],   1
+    pinsrb        m0,    [r3 + 30],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 18],   1
+    pinsrb        m2,    [r3 + 20],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1018 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 +  9],    1
+    pinsrb        m1,    [r3 + 10],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrw        m4,    [r4 + 0],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1019 * 16],   m3
+
+    ; mode17 [row 30]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r3 + 30],   1
+    pinsrb        m0,    [r3 + 31],   0
+    pmaddubsw     m3,    m0,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 20],   1
+    pinsrb        m2,    [r3 + 21],   0
+    pmaddubsw     m5,    m2,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1020 * 16],   m3
+
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 10],    1
+    pinsrb        m1,    [r3 + 11],    0
+    pmaddubsw     m3,    m1,          m6
+    pmulhrsw      m3,    m7
+    pslldq        m4,    2
+    pinsrb        m4,    [r4 + 0],    1
+    pinsrb        m4,    [r3 + 1],    0
+    pmaddubsw     m5,    m4,          m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1021 * 16],   m3
+
+    ; mode17 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1022 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1022 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1023 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1023 * 16 + 8], m5
+
+    ;mode 18[row 0]
+    movu          m0,                   [r3]
+    movu          [r0 + 1024 * 16],     m0
+    movu          m1,                   [r3 + 16]
+    movu          [r0 + 1025 * 16],     m1
+
+    ;mode 18[row 1]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 1],     0
+    movu          [r0 + 1026 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 15],    0
+    movu          [r0 + 1027 * 16],     m1
+
+    ;mode 18[row 2]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 2],     0
+    movu          [r0 + 1028 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 14],    0
+    movu          [r0 + 1029 * 16],     m1
+
+    ;mode 18[row 3]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 3],     0
+    movu          [r0 + 1030 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 13],    0
+    movu          [r0 + 1031 * 16],     m1
+
+    ;mode 18[row 4]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 4],     0
+    movu          [r0 + 1032 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 12],    0
+    movu          [r0 + 1033 * 16],     m1
+
+    ;mode 18[row 5]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 5],     0
+    movu          [r0 + 1034 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 11],    0
+    movu          [r0 + 1035 * 16],     m1
+
+    ;mode 18[row 6]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 6],     0
+    movu          [r0 + 1036 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 10],    0
+    movu          [r0 + 1037 * 16],     m1
+
+    ;mode 18[row 7]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 7],     0
+    movu          [r0 + 1038 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 9],     0
+    movu          [r0 + 1039 * 16],     m1
+
+    ;mode 18[row 8]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 8],     0
+    movu          [r0 + 1040 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 8],     0
+    movu          [r0 + 1041 * 16],     m1
+
+    ;mode 18[row 9]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 +  9],    0
+    movu          [r0 + 1042 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 7],     0
+    movu          [r0 + 1043 * 16],     m1
+
+    ;mode 18[row 10]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 10],    0
+    movu          [r0 + 1044 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 6],     0
+    movu          [r0 + 1045 * 16],     m1
+
+    ;mode 18[row 11]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 11],    0
+    movu          [r0 + 1046 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 5],     0
+    movu          [r0 + 1047 * 16],     m1
+
+    ;mode 18[row 12]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 12],    0
+    movu          [r0 + 1048 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 4],     0
+    movu          [r0 + 1049 * 16],     m1
+
+    ;mode 18[row 13]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 13],    0
+    movu          [r0 + 1050 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 3],     0
+    movu          [r0 + 1051 * 16],     m1
+
+    ;mode 18[row 14]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 14],    0
+    movu          [r0 + 1052 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 2],     0
+    movu          [r0 + 1053 * 16],     m1
+
+    ;mode 18[row 15]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 15],    0
+    movu          [r0 + 1054 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 1],     0
+    movu          [r0 + 1055 * 16],     m1
+
+    ;mode 18[row 16]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 16],    0
+    movu          [r0 + 1056 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r3 + 0],     0
+    movu          [r0 + 1057 * 16],     m1
+
+    ;mode 18[row 17]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 17],    0
+    movu          [r0 + 1058 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 1],     0
+    movu          [r0 + 1059 * 16],     m1
+
+    ;mode 18[row 18]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 18],    0
+    movu          [r0 + 1060 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 2],     0
+    movu          [r0 + 1061 * 16],     m1
+
+    ;mode 18[row 19]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 19],    0
+    movu          [r0 + 1062 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 3],     0
+    movu          [r0 + 1063 * 16],     m1
+
+    ;mode 18[row 20]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 20],    0
+    movu          [r0 + 1064 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 4],     0
+    movu          [r0 + 1065 * 16],     m1
+
+    ;mode 18[row 21]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 21],    0
+    movu          [r0 + 1066 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 5],     0
+    movu          [r0 + 1067 * 16],     m1
+
+    ;mode 18[row 22]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 22],    0
+    movu          [r0 + 1068 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 6],     0
+    movu          [r0 + 1069 * 16],     m1
+
+    ;mode 18[row 23]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 23],    0
+    movu          [r0 + 1070 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 7],     0
+    movu          [r0 + 1071 * 16],     m1
+
+    ;mode 18[row 24]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 24],    0
+    movu          [r0 + 1072 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 8],     0
+    movu          [r0 + 1073 * 16],     m1
+
+    ;mode 18[row 25]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 25],    0
+    movu          [r0 + 1074 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 9],     0
+    movu          [r0 + 1075 * 16],     m1
+
+    ;mode 18[row 26]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 26],    0
+    movu          [r0 + 1076 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 10],    0
+    movu          [r0 + 1077 * 16],     m1
+
+    ;mode 18[row 27]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 27],    0
+    movu          [r0 + 1078 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 11],    0
+    movu          [r0 + 1079 * 16],     m1
+
+    ;mode 18[row 28]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 28],    0
+    movu          [r0 + 1080 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 12],    0
+    movu          [r0 + 1081 * 16],     m1
+
+    ;mode 18[row 29]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 29],    0
+    movu          [r0 + 1082 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 13],    0
+    movu          [r0 + 1083 * 16],     m1
+
+    ;mode 18[row 30]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 30],    0
+    movu          [r0 + 1084 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 14],    0
+    movu          [r0 + 1085 * 16],     m1
+
+    ;mode 18[row 31]
+    pslldq        m0,                   1
+    pinsrb        m0,                   [r4 + 31],    0
+    movu          [r0 + 1086 * 16],     m0
+    pslldq        m1,                   1
+    pinsrb        m1,                   [r4 + 15],    0
+    movu          [r0 + 1087 * 16],     m1
+
+    ; mode 19 [row 0]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r3         ]
+    movu          m1,    [r3 + 1     ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 1088 * 16],     m1
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1089 * 16],     m4
+
+    ; mode 19 [row 1]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 1],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1090 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1091 * 16],     m4
+
+    ; mode 19 [row 2]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 1],      1
+    pinsrb        m0,    [r4 + 2],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1092 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1093 * 16],     m4
+
+    ; mode 19 [row 3]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 2],      1
+    pinsrb        m0,    [r4 + 4],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1094 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1095 * 16],     m4
+
+    ; mode 19 [row 4]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 4],      1
+    pinsrb        m0,    [r4 + 5],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1096 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1097 * 16],     m4
+
+    ; mode 19 [row 5]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1098 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1099 * 16],     m4
+
+    ; mode 19 [row 6]
+    movu          m6,    [r5 + 10 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 5],      1
+    pinsrb        m0,    [r4 + 6],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 3],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1100 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 11],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 19],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1101 * 16],    m4
+
+    ; mode 19 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 6],      1
+    pinsrb        m0,    [r4 + 7],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 2],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1102 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 10],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 18],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1103 * 16],    m4
+
+    ; mode 19 [row 8]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 7],      1
+    pinsrb        m0,    [r4 + 9],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 1],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1104 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 9],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 17],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1105 * 16],    m4
+
+    ; mode 19 [row 9]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  9],     1
+    pinsrb        m0,    [r4 + 10],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 0],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1106 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 8],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 16],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1107 * 16],    m4
+
+    ; mode 19 [row 10]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1108 * 16],    m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1109 * 16],    m4
+
+    ; mode 19 [row 11]
+    movu          m6,    [r5 + 8 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 10],     1
+    pinsrb        m0,    [r4 + 11],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 0],      1
+    pinsrb        m2,    [r4 + 1],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1110 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 7],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 15],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1111 * 16],    m4
+
+    ; mode 19 [row 12]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 11],     1
+    pinsrb        m0,    [r4 + 12],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 1],      1
+    pinsrb        m2,    [r4 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1112 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 6],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 14],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1113 * 16],    m4
+
+    ; mode 19 [row 13]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 12],     1
+    pinsrb        m0,    [r4 + 14],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 2],      1
+    pinsrb        m2,    [r4 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1114 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 5],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 13],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1115 * 16],    m4
+
+    ; mode 19 [row 14]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 14],     1
+    pinsrb        m0,    [r4 + 15],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 4],      1
+    pinsrb        m2,    [r4 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1116 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 4],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 12],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1117 * 16],    m4
+
+    ; mode19 [row 15]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1118 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1118 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1119 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1119 * 16 + 8], m5
+
+    ; mode 19 [row 16]
+    movu          m6,    [r5 + 6 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 15],     1
+    pinsrb        m0,    [r4 + 16],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 5],      1
+    pinsrb        m2,    [r4 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1120 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 3],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 11],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1121 * 16],    m4
+
+    ; mode 19 [row 17]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 16],     1
+    pinsrb        m0,    [r4 + 17],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 6],      1
+    pinsrb        m2,    [r4 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1122 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 2],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 10],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1123 * 16],    m4
+
+    ; mode 19 [row 18]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 17],     1
+    pinsrb        m0,    [r4 + 18],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 7],      1
+    pinsrb        m2,    [r4 + 9],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1124 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 1],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 +  9],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1125 * 16],    m4
+
+    ; mode 19 [row 19]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 18],     1
+    pinsrb        m0,    [r4 + 20],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 +  9],     1
+    pinsrb        m2,    [r4 + 10],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1126 * 16],    m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 0],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 8],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1127 * 16],    m4
+
+    ; mode 19 [row 20]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 20],     1
+    pinsrb        m0,    [r4 + 21],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 10],     1
+    pinsrb        m2,    [r4 + 11],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1128 * 16],    m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 0],      1
+    pinsrb        m1,    [r4 + 1],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 8],     1
+    pinsrb        m3,    [r3 + 7],     0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1129 * 16],   m4
+
+    ; mode 19 [row 21]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1130 * 16],   m4
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1131 * 16],   m4
+
+    ; mode 19 [row 22]
+    movu          m6,    [r5 + 10 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 21],    1
+    pinsrb        m0,    [r4 + 22],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 11],    1
+    pinsrb        m2,    [r4 + 12],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1132 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 1],     1
+    pinsrb        m1,    [r4 + 2],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 6],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1133 * 16],   m4
+
+    ; mode 19 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 22],    1
+    pinsrb        m0,    [r4 + 23],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 12],    1
+    pinsrb        m2,    [r4 + 14],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1134 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 2],     1
+    pinsrb        m1,    [r4 + 4],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 5],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1135 * 16],   m4
+
+    ; mode 19 [row 24]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 23],    1
+    pinsrb        m0,    [r4 + 25],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 14],    1
+    pinsrb        m2,    [r4 + 15],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1136 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 4],     1
+    pinsrb        m1,    [r4 + 5],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 4],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1137 * 16],   m4
+
+    ; mode 19 [row 25]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 25],    1
+    pinsrb        m0,    [r4 + 26],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 15],    1
+    pinsrb        m2,    [r4 + 16],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1138 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 5],     1
+    pinsrb        m1,    [r4 + 6],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 3],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1139 * 16],   m4
+
+    ; mode 19 [row 26]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1140 * 16],   m4
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1141 * 16],   m4
+
+    ; mode 19 [row 27]
+    movu          m6,    [r5 + 8 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 26],    1
+    pinsrb        m0,    [r4 + 27],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 16],    1
+    pinsrb        m2,    [r4 + 17],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1142 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 6],     1
+    pinsrb        m1,    [r4 + 7],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 2],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1143 * 16],   m4
+
+    ; mode 19 [row 28]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 27],    1
+    pinsrb        m0,    [r4 + 28],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 17],    1
+    pinsrb        m2,    [r4 + 18],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1144 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 7],     1
+    pinsrb        m1,    [r4 + 9],     0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 1],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1145 * 16],   m4
+
+    ; mode 19 [row 29]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 28],    1
+    pinsrb        m0,    [r4 + 30],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 18],    1
+    pinsrb        m2,    [r4 + 20],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1146 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 +  9],    1
+    pinsrb        m1,    [r4 + 10],    0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 0],      0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1147 * 16],   m4
+
+    ; mode 19 [row 30]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 30],    1
+    pinsrb        m0,    [r4 + 31],    0
+    pmaddubsw     m4,    m0,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 20],    1
+    pinsrb        m2,    [r4 + 21],    0
+    pmaddubsw     m5,    m2,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1148 * 16],   m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 10],    1
+    pinsrb        m1,    [r4 + 11],    0
+    pmaddubsw     m4,    m1,           m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrb        m3,    [r4 + 0],     1
+    pinsrb        m3,    [r4 + 1],     0
+    pmaddubsw     m5,    m3,           m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 +  1149 * 16],   m4
+
+    ; mode19 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1150 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1150 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1151 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1151 * 16 + 8], m5
+
+    ; mode 20 [row 0]
+    movu          m6,    [r5 + 11 * 16]
+    movu          m0,    [r3         ]
+    movu          m1,    [r3 + 1     ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 1152 * 16],     m1
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1153 * 16],     m4
+
+    ; mode 20 [row 1]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 2],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1154 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1155 * 16],     m4
+
+    ; mode 20 [row 2]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1156 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1157 * 16],     m4
+
+    ; mode 20 [row 3]
+    movu          m6,    [r5 + 12 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 2],      1
+    pinsrb        m0,    [r4 + 3],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1158 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1159 * 16],     m4
+
+    ; mode 20 [row 4]
+    movu          m6,    [r5 + 23 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 3],      1
+    pinsrb        m0,    [r4 + 5],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1160 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1161 * 16],     m4
+
+    ; mode 20 [row 5]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1162 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1163 * 16],     m4
+
+    ; mode 20 [row 6]
+    movu          m6,    [r5 + 13 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 5],      1
+    pinsrb        m0,    [r4 + 6],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1164 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1165 * 16],     m4
+
+    ; mode 20 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 6],      1
+    pinsrb        m0,    [r4 + 8],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1166 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 11],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 19],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1167 * 16],     m4
+
+    ; mode 20 [row 8]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1168 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1169 * 16],     m4
+
+    ; mode 20 [row 9]
+    movu          m6,    [r5 + 14 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 8],      1
+    pinsrb        m0,    [r4 + 9],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 3],      1
+    pinsrb        m2,    [r3 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1170 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 10],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 18],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1171 * 16],     m4
+
+    ; mode 20 [row 10]
+    movu          m6,    [r5 + 25 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  9],      1
+    pinsrb        m0,    [r4 + 11],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 1],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1172 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 9],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 17],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1173 * 16],     m4
+
+    ; mode 20 [row 11]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1174 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1175 * 16],     m4
+
+    ; mode 20 [row 12]
+    movu          m6,    [r5 + 15 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 11],     1
+    pinsrb        m0,    [r4 + 12],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r3 + 1],      1
+    pinsrb        m2,    [r3 + 0],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1176 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 8],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 16],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1177 * 16],     m4
+
+    ; mode 20 [row 13]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 12],     1
+    pinsrb        m0,    [r4 + 14],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],      1
+    pinsrb        m2,    [r4 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1178 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 7],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 15],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1179 * 16],     m4
+
+    ; mode 20 [row 14]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1180 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1181 * 16],     m4
+
+    ; mode 20 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 14],     1
+    pinsrb        m0,    [r4 + 15],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 2],      1
+    pinsrb        m2,    [r4 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1182 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 6],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 14],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1183 * 16],     m4
+
+    ; mode 20 [row 16]
+    movu          m6,    [r5 + 27 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 15],     1
+    pinsrb        m0,    [r4 + 17],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 3],      1
+    pinsrb        m2,    [r4 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1184 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 5],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 13],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1185 * 16],     m4
+
+    ; mode 20 [row 17]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1186 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1187 * 16],     m4
+
+    ; mode 20 [row 18]
+    movu          m6,    [r5 + 17 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 17],     1
+    pinsrb        m0,    [r4 + 18],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 5],      1
+    pinsrb        m2,    [r4 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1188 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 4],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 12],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1189 * 16],     m4
+
+    ; mode 20 [row 19]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 18],     1
+    pinsrb        m0,    [r4 + 20],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 6],      1
+    pinsrb        m2,    [r4 + 8],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1190 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 3],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 11],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1191 * 16],     m4
+
+    ; mode 20 [row 20]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1192 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1193 * 16],     m4
+
+    ; mode 20 [row 21]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 20],     1
+    pinsrb        m0,    [r4 + 21],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 8],      1
+    pinsrb        m2,    [r4 + 9],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1194 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 2],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 10],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1195 * 16],     m4
+
+    ; mode 20 [row 22]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 21],     1
+    pinsrb        m0,    [r4 + 23],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 +  9],      1
+    pinsrb        m2,    [r4 + 11],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1196 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 1],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 9],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1197 * 16],     m4
+
+    ; mode 20 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1198 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1199 * 16],     m4
+
+    ; mode 20 [row 24]
+    movu          m6,    [r5 + 19 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 23],     1
+    pinsrb        m0,    [r4 + 24],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 11],      1
+    pinsrb        m2,    [r4 + 12],      0
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1200 * 16],      m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 0],      0
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 8],       0
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1201 * 16],      m4
+
+    ; mode 20 [row 25]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 24],      1
+    pinsrb        m0,    [r4 + 26],      0
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 12],      1
+    pinsrb        m2,    [r4 + 14],      0
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1202 * 16],      m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 0],       1
+    pinsrb        m1,    [r4 + 2],       0
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 7],      0
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1203 * 16],      m4
+
+    ; mode 20 [row 26]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1204 * 16],      m4
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1205 * 16],      m4
+
+    ; mode 20 [row 27]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 26],      1
+    pinsrb        m0,    [r4 + 27],      0
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 14],      1
+    pinsrb        m2,    [r4 + 15],      0
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1206 * 16],      m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 2],       1
+    pinsrb        m1,    [r4 + 3],       0
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 6],      0
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1207 * 16],      m4
+
+    ; mode 20 [row 28]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 27],      1
+    pinsrb        m0,    [r4 + 29],      0
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 15],      1
+    pinsrb        m2,    [r4 + 17],      0
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1208 * 16],      m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 3],       1
+    pinsrb        m1,    [r4 + 5],       0
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 5],      0
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1209 * 16],      m4
+
+    ; mode 20 [row 29]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1210 * 16],      m4
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1211 * 16],      m4
+
+    ; mode 20 [row 30]
+    movu          m6,    [r5 + 21 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 29],      1
+    pinsrb        m0,    [r4 + 30],      0
+    pmaddubsw     m4,    m0,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 17],      1
+    pinsrb        m2,    [r4 + 18],      0
+    pmaddubsw     m5,    m2,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1212 * 16],      m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r4 + 5],       1
+    pinsrb        m1,    [r4 + 6],       0
+    pmaddubsw     m4,    m1,             m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 4],      0
+    pmaddubsw     m5,    m3,             m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1213 * 16],      m4
+
+    ; mode20 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1214 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1214 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1215 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1215 * 16 + 8], m5
+
+    ; mode 21 [row 0]
+    movu          m6,    [r5 + 15 * 16]
+    movu          m0,    [r3         ]
+    movu          m1,    [r3 + 1     ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 1216 * 16],     m1
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1217 * 16],     m4
+
+    ; mode 21 [row 1]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 2],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1218 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1219 * 16],     m4
+
+    ; mode 21 [row 2]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1220 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1221 * 16],     m4
+
+    ; mode 21 [row 3]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 2],      1
+    pinsrb        m0,    [r4 + 4],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1222 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1223 * 16],     m4
+
+    ; mode 21 [row 4]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1224 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1225 * 16],     m4
+
+    ; mode 21 [row 5]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 4],      1
+    pinsrb        m0,    [r4 + 6],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1226 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1227 * 16],     m4
+
+    ; mode 21 [row 6]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1228 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1229 * 16],     m4
+
+    ; mode 21 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 6],      1
+    pinsrb        m0,    [r4 + 8],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1230 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1231 * 16],     m4
+
+    ; mode 21 [row 8]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1232 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1233 * 16],     m4
+
+    ; mode 21 [row 9]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 8],      1
+    pinsrb        m0,    [r4 + 9],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1234 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 11],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 19],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1235 * 16],     m4
+
+    ; mode 21 [row 10]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1236 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1237 * 16],     m4
+
+    ; mode 21 [row 11]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  9],     1
+    pinsrb        m0,    [r4 + 11],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1238 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 10],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 18],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1239 * 16],     m4
+
+    ; mode 21 [row 12]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1240 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1241 * 16],     m4
+
+    ; mode 21 [row 13]
+    movu          m6,    [r5 + 18 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 11],     1
+    pinsrb        m0,    [r4 + 13],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 1],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1242 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 9],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 17],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1243 * 16],     m4
+
+    ; mode 21 [row 14]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1244 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1245 * 16],     m4
+
+    ; mode 21 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 13],     1
+    pinsrb        m0,    [r4 + 15],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 0],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1246 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 8],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 16],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1247 * 16],     m4
+
+    ; mode 21 [row 16]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 15],     1
+    pinsrb        m0,    [r4 + 17],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],      1
+    pinsrb        m2,    [r4 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1248 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 7],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 15],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1249 * 16],     m4
+
+    ; mode 21 [row 17]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1250 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1251 * 16],     m4
+
+    ; mode 21 [row 18]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 17],     1
+    pinsrb        m0,    [r4 + 19],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 2],      1
+    pinsrb        m2,    [r4 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1252 * 16],     m4
+    pslldq        m1,    2
+    pinsrb        m1,    [r3 + 7],      1
+    pinsrb        m1,    [r3 + 6],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrb        m3,    [r3 + 15],     1
+    pinsrb        m3,    [r3 + 14],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1253 * 16],     m4
+
+    ; mode 21 [row 19]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1254 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1255 * 16],     m4
+
+    ; mode 21 [row 20]
+    movu          m6,    [r5 + 27 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 19],     1
+    pinsrb        m0,    [r4 + 21],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 4],      1
+    pinsrb        m2,    [r4 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1256 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 5],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 13],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1257 * 16],     m4
+
+    ; mode 21 [row 21]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1258 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1259 * 16],     m4
+
+    ; mode 21 [row 22]
+    movu          m6,    [r5 + 25 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 21],     1
+    pinsrb        m0,    [r4 + 23],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 6],      1
+    pinsrb        m2,    [r4 + 8],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1260 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 4],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 12],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1261 * 16],     m4
+
+    ; mode 21 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1262 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1263 * 16],     m4
+
+    ; mode 21 [row 24]
+    movu          m6,    [r5 + 23 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 23],     1
+    pinsrb        m0,    [r4 + 24],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 8],      1
+    pinsrb        m2,    [r4 + 9],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1264 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 3],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 11],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1265 * 16],     m4
+
+    ; mode 21 [row 25]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1266 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1267 * 16],     m4
+
+    ; mode 21 [row 26]
+    movu          m6,    [r5 + 21 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 24],     1
+    pinsrb        m0,    [r4 + 26],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 +  9],      1
+    pinsrb        m2,    [r4 + 11],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1268 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 2],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 10],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1269 * 16],     m4
+
+    ; mode 21 [row 27]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1270 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1271 * 16],     m4
+
+    ; mode 21 [row 28]
+    movu          m6,    [r5 + 19 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 26],     1
+    pinsrb        m0,    [r4 + 28],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 11],      1
+    pinsrb        m2,    [r4 + 13],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1272 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 1],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 9],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1273 * 16],     m4
+
+    ; mode 21 [row 29]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1274 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1275 * 16],     m4
+
+    ; mode 21 [row 30]
+    movu          m6,    [r5 + 17 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 28],     1
+    pinsrb        m0,    [r4 + 30],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 13],     1
+    pinsrb        m2,    [r4 + 15],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1276 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 0],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 8],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1277 * 16],     m4
+
+    ; mode21 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1278 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1278 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1279 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1279 * 16 + 8], m5
+
+    ; mode 22 [row 0]
+    movu          m6,    [r5 + 19 * 16]
+    movu          m0,    [r3          ]
+    movu          m1,    [r3 + 1      ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 1280 * 16],     m1
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1281 * 16],     m4
+
+    ; mode 22 [row 1]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1282 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1283 * 16],     m4
+
+    ; mode 22 [row 2]
+    movu          m6,    [r5 + 25 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 2],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1284 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1285 * 16],     m4
+
+    ; mode 22 [row 3]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1286 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1287 * 16],     m4
+
+    ; mode 22 [row 4]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 2],      1
+    pinsrb        m0,    [r4 + 5],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1288 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1289 * 16],     m4
+
+    ; mode 22 [row 5]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1290 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1291 * 16],     m4
+
+    ; mode 22 [row 6]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1292 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1293 * 16],     m4
+
+    ; mode 22 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 5],      1
+    pinsrb        m0,    [r4 + 7],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1294 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1295 * 16],     m4
+
+    ; mode 22 [row 8]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1296 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1297 * 16],     m4
+
+    ; mode 22 [row 9]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  7],      1
+    pinsrb        m0,    [r4 + 10],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1298 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1299 * 16],     m4
+
+    ; mode 22 [row 10]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1300 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1301 * 16],     m4
+
+    ; mode 22 [row 11]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1302 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1303 * 16],     m4
+
+    ; mode 22 [row 12]
+    movu          m6,    [r5 + 23 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 10],     1
+    pinsrb        m0,    [r4 + 12],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1304 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 11],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 19],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1305 * 16],     m4
+
+    ; mode 22 [row 13]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1306 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1307 * 16],     m4
+
+    ; mode 22 [row 14]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 12],     1
+    pinsrb        m0,    [r4 + 15],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1308 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 10],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 18],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1309 * 16],     m4
+
+    ; mode 22 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1310 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1311 * 16],     m4
+
+    ; mode 22 [row 16]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1312 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1313 * 16],     m4
+
+    ; mode 22 [row 17]
+    movu          m6,    [r5 + 22 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 15],     1
+    pinsrb        m0,    [r4 + 17],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 1],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1314 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 9],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 17],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1315 * 16],     m4
+
+    ; mode 22 [row 18]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1316 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1317 * 16],     m4
+
+    ; mode 22 [row 19]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 17],     1
+    pinsrb        m0,    [r4 + 20],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 0],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1318 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 8],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 16],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1319 * 16],     m4
+
+    ; mode 22 [row 20]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1320 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1321 * 16],     m4
+
+    ; mode 22 [row 21]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1322 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1323 * 16],     m4
+
+    ; mode 22 [row 22]
+    movu          m6,    [r5 + 21 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 20],     1
+    pinsrb        m0,    [r4 + 22],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 0],      1
+    pinsrb        m2,    [r4 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1324 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 7],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 15],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1325 * 16],     m4
+
+    ; mode 22 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1326 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1327 * 16],     m4
+
+    ; mode 22 [row 24]
+    movu          m6,    [r5 + 27 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 22],     1
+    pinsrb        m0,    [r4 + 25],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 2],      1
+    pinsrb        m2,    [r4 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1328 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 6],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 14],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1329 * 16],     m4
+
+    ; mode 22 [row 25]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1330 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1331 * 16],     m4
+
+    ; mode 22 [row 26]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1332 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1333 * 16],     m4
+
+    ; mode 22 [row 27]
+    movu          m6,    [r5 + 20 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 25],     1
+    pinsrb        m0,    [r4 + 27],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 + 5],      1
+    pinsrb        m2,    [r4 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1334 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 +  5],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 13],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1335 * 16],     m4
+
+    ; mode 22 [row 28]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1336 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1337 * 16],     m4
+
+    ; mode 22 [row 29]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 27],     1
+    pinsrb        m0,    [r4 + 30],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrb        m2,    [r4 +  7],     1
+    pinsrb        m2,    [r4 + 10],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1338 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 +  4],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 12],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1339 * 16],     m4
+
+    ; mode 22 [row 30]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1340 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1341 * 16],     m4
+
+    ; mode22 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1342 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1342 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1343 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1343 * 16 + 8], m5
+
+    ; mode 23 [row 0]
+    movu          m6,    [r5 + 23 * 16]
+    movu          m0,    [r3          ]
+    movu          m1,    [r3 + 1      ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m1,    m0,            m6
+    pmulhrsw      m1,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m3,    m2,            m6
+    pmulhrsw      m3,    m7
+    packuswb      m1,    m3
+    movu          [r0 + 1344 * 16],     m1
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1345 * 16],     m4
+
+    ; mode 23 [row 1]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1346 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1347 * 16],     m4
+
+    ; mode 23 [row 2]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1348 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1349 * 16],     m4
+
+    ; mode 23 [row 3]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 4],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],     0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1350 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1351 * 16],     m4
+
+    ; mode 23 [row 4]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1352 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1353 * 16],     m4
+
+    ; mode 23 [row 5]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1354 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1355 * 16],     m4
+
+    ; mode 23 [row 6]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1356 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1357 * 16],     m4
+
+    ; mode 23 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 4],      1
+    pinsrb        m0,    [r4 + 7],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1358 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1359 * 16],     m4
+
+    ; mode 23 [row 8]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1360 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1361 * 16],     m4
+
+    ; mode 23 [row 9]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1362 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1363 * 16],     m4
+
+    ; mode 23 [row 10]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  7],      1
+    pinsrb        m0,    [r4 + 11],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1364 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1365 * 16],     m4
+
+    ; mode 23 [row 11]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1366 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1367 * 16],     m4
+
+    ; mode 23 [row 12]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1368 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1369 * 16],     m4
+
+    ; mode 23 [row 13]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1370 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1371 * 16],     m4
+
+    ; mode 23 [row 14]
+    movu          m6,    [r5 + 25 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 11],      1
+    pinsrb        m0,    [r4 + 14],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1372 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1373 * 16],     m4
+
+    ; mode 23 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1374 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1375 * 16],     m4
+
+    ; mode 23 [row 16]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1376 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1377 * 16],     m4
+
+    ; mode 23 [row 17]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 14],      1
+    pinsrb        m0,    [r4 + 18],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 3],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1378 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 11],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 19],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1379 * 16],     m4
+
+    ; mode 23 [row 18]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1380 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1381 * 16],     m4
+
+    ; mode 23 [row 19]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1382 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1383 * 16],     m4
+
+    ; mode 23 [row 20]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1384 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1385 * 16],     m4
+
+    ; mode 23 [row 21]
+    movu          m6,    [r5 + 26 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 18],      1
+    pinsrb        m0,    [r4 + 21],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 2],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1386 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 10],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 18],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1387 * 16],     m4
+
+    ; mode 23 [row 22]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1388 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1389 * 16],     m4
+
+    ; mode 23 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1390 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1391 * 16],     m4
+
+    ; mode 23 [row 24]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 21],      1
+    pinsrb        m0,    [r4 + 25],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 1],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1392 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 9],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 17],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1393 * 16],     m4
+
+    ; mode 23 [row 25]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1394 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1395 * 16],     m4
+
+    ; mode 23 [row 26]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1396 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1397 * 16],     m4
+
+    ; mode 23 [row 27]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1398 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1399 * 16],     m4
+
+    ; mode 23 [row 28]
+    movu          m6,    [r5 + 27 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 25],      1
+    pinsrb        m0,    [r4 + 28],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 0],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1400 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 8],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 16],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1401 * 16],     m4
+
+    ; mode 23 [row 29]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1402 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1403 * 16],     m4
+
+    ; mode 23 [row 30]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1404 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1405 * 16],     m4
+
+    ; mode23 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1406 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1406 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1407 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1407 * 16 + 8], m5
+
+    ; mode 24 [row 0]
+    movu          m6,    [r5 + 27 * 16]
+    movu          m0,    [r3          ]
+    movu          m1,    [r3 + 1      ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1408 * 16],     m4
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1409 * 16],     m4
+
+    ; mode 24 [row 1]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1410 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1411 * 16],     m4
+
+    ; mode 24 [row 2]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1412 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1413 * 16],     m4
+
+    ; mode 24 [row 3]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1414 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1415 * 16],     m4
+
+    ; mode 24 [row 4]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1416 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1417 * 16],     m4
+
+    ; mode 24 [row 5]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1418 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1419 * 16],     m4
+
+    ; mode 24 [row 6]
+    movu          m6,    [r5 + 29 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 6],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1420 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1421 * 16],     m4
+
+    ; mode 24 [row 7]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1422 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1423 * 16],     m4
+
+    ; mode 24 [row 8]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1424 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1425 * 16],     m4
+
+    ; mode 24 [row 9]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1426 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1427 * 16],     m4
+
+    ; mode 24 [row 10]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1428 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1429 * 16],     m4
+
+    ; mode 24 [row 11]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1430 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1431 * 16],     m4
+
+    ; mode 24 [row 12]
+    movu          m6,    [r5 + 31 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 +  6],      1
+    pinsrb        m0,    [r4 + 13],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 6],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1432 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 14],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 22],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1433 * 16],     m4
+
+    ; mode 24 [row 13]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1434 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1435 * 16],     m4
+
+    ; mode 24 [row 14]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1436 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1437 * 16],     m4
+
+    ; mode 24 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1438 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1439 * 16],     m4
+
+    ; mode 24 [row 16]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1440 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1441 * 16],     m4
+
+    ; mode 24 [row 17]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1442 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1443 * 16],     m4
+
+    ; mode 24 [row 18]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1444 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1445 * 16],     m4
+
+    ; mode 24 [row 19]
+    movu          m6,    [r5 + 28 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 13],      1
+    pinsrb        m0,    [r4 + 19],      0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 5],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1446 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 13],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 21],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1447 * 16],     m4
+
+    ; mode 24 [row 20]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1448 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1449 * 16],     m4
+
+    ; mode 24 [row 21]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1450 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1451 * 16],     m4
+
+    ; mode 24 [row 22]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1452 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1453 * 16],     m4
+
+    ; mode 24 [row 23]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1454 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1455 * 16],     m4
+
+    ; mode 24 [row 24]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1456 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1457 * 16],     m4
+
+    ; mode 24 [row 25]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 19],     1
+    pinsrb        m0,    [r4 + 26],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 4],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1458 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 12],      0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 20],      0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1459 * 16],     m4
+
+    ; mode 24 [row 26]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1460 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1461 * 16],     m4
+
+    ; mode 24 [row 27]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1462 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1463 * 16],     m4
+
+    ; mode 24 [row 28]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1464 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1465 * 16],     m4
+
+    ; mode 24 [row 29]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1466 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1467 * 16],     m4
+
+    ; mode 24 [row 30]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1468 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1469 * 16],     m4
+
+    ; mode 24 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1470 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1470 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1471 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1471 * 16 + 8], m5
+
+    ; mode 25 [row 0]
+    movu          m6,    [r5 + 30 * 16]
+    movu          m0,    [r3          ]
+    movu          m1,    [r3 + 1      ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    movu          m2,    [r3 + 8]
+    movu          m3,    [r3 + 9]
+    punpcklbw     m2,    m3
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1472 * 16],     m4
+
+    movu          m1,    [r3 + 16]
+    movu          m3,    [r3 + 17]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 24]
+    movu          m5,    [r3 + 25]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1473 * 16],     m4
+
+    ; mode 25 [row 1]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1474 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1475 * 16],     m4
+
+    ; mode 25 [row 2]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1476 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1477 * 16],     m4
+
+    ; mode 25 [row 3]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1478 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1479 * 16],     m4
+
+    ; mode 25 [row 4]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1480 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1481 * 16],     m4
+
+    ; mode 25 [row 5]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1482 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1483 * 16],     m4
+
+    ; mode 25 [row 6]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1484 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1485 * 16],     m4
+
+    ; mode 25 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1486 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1487 * 16],     m4
+
+    ; mode 25 [row 8]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1488 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1489 * 16],     m4
+
+    ; mode 25 [row 9]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1490 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1491 * 16],     m4
+
+    ; mode 25 [row 10]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1492 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1493 * 16],     m4
+
+    ; mode 25 [row 11]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1494 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1495 * 16],     m4
+
+    ; mode 25 [row 12]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1496 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1497 * 16],     m4
+
+    ; mode 25 [row 13]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1498 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1499 * 16],     m4
+
+    ; mode 25 [row 14]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1500 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1501 * 16],     m4
+
+    ; mode 25 [row 15]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1502 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1502 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1503 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1503 * 16 + 8], m5
+
+    ; mode 25 [row 16]
+    movu          m6,    [r5 + 30 * 16]
+    pslldq        m0,    2
+    pinsrb        m0,    [r4 + 0],      1
+    pinsrb        m0,    [r4 + 16],     0
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m2,    2
+    pinsrw        m2,    [r3 + 7],      0
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1504 * 16],     m4
+    pslldq        m1,    2
+    pinsrw        m1,    [r3 + 15],     0
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pslldq        m3,    2
+    pinsrw        m3,    [r3 + 23],     0
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1505 * 16],     m4
+
+    ; mode 25 [row 17]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1506 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1507 * 16],     m4
+
+    ; mode 25 [row 18]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1508 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1509 * 16],     m4
+
+    ; mode 25 [row 19]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1510 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1511 * 16],     m4
+
+    ; mode 25 [row 20]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1512 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1513 * 16],     m4
+
+    ; mode 25 [row 21]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1514 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1515 * 16],     m4
+
+    ; mode 25 [row 22]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1516 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1517 * 16],     m4
+
+    ; mode 25 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1518 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1519 * 16],     m4
+
+    ; mode 25 [row 24]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1520 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1521 * 16],     m4
+
+    ; mode 25 [row 25]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1522 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1523 * 16],     m4
+
+    ; mode 25 [row 26]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1524 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1525 * 16],     m4
+
+    ; mode 25 [row 27]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1526 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1527 * 16],     m4
+
+    ; mode 25 [row 28]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1528 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1529 * 16],     m4
+
+    ; mode 25 [row 29]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1530 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1531 * 16],     m4
+
+    ; mode 25 [row 30]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1532 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1533 * 16],     m4
+
+    ; mode 25 [row 31]
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1534 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1534 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1535 * 16],     m5
+    pshufb        m5,    m3,            [tab_S2]
+    movh          [r0 + 1535 * 16 + 8], m5
+
+    ; mode 26
+    movu                      m1,   [r1 +  1]
+    movu                      m2,   [r1 + 17]
+    movu         [r0 + 1536 * 16],  m1
+    movu         [r0 + 1537 * 16],  m2
+    movu         [r0 + 1538 * 16],  m1
+    movu         [r0 + 1539 * 16],  m2
+    movu         [r0 + 1540 * 16],  m1
+    movu         [r0 + 1541 * 16],  m2
+    movu         [r0 + 1542 * 16],  m1
+    movu         [r0 + 1543 * 16],  m2
+    movu         [r0 + 1544 * 16],  m1
+    movu         [r0 + 1545 * 16],  m2
+    movu         [r0 + 1546 * 16],  m1
+    movu         [r0 + 1547 * 16],  m2
+    movu         [r0 + 1548 * 16],  m1
+    movu         [r0 + 1549 * 16],  m2
+    movu         [r0 + 1550 * 16],  m1
+    movu         [r0 + 1551 * 16],  m2
+
+    movu         [r0 + 1552 * 16],  m1
+    movu         [r0 + 1553 * 16],  m2
+    movu         [r0 + 1554 * 16],  m1
+    movu         [r0 + 1555 * 16],  m2
+    movu         [r0 + 1556 * 16],  m1
+    movu         [r0 + 1557 * 16],  m2
+    movu         [r0 + 1558 * 16],  m1
+    movu         [r0 + 1559 * 16],  m2
+    movu         [r0 + 1560 * 16],  m1
+    movu         [r0 + 1561 * 16],  m2
+    movu         [r0 + 1562 * 16],  m1
+    movu         [r0 + 1563 * 16],  m2
+    movu         [r0 + 1564 * 16],  m1
+    movu         [r0 + 1565 * 16],  m2
+    movu         [r0 + 1566 * 16],  m1
+    movu         [r0 + 1567 * 16],  m2
+
+    movu         [r0 + 1568 * 16],  m1
+    movu         [r0 + 1569 * 16],  m2
+    movu         [r0 + 1570 * 16],  m1
+    movu         [r0 + 1571 * 16],  m2
+    movu         [r0 + 1572 * 16],  m1
+    movu         [r0 + 1573 * 16],  m2
+    movu         [r0 + 1574 * 16],  m1
+    movu         [r0 + 1575 * 16],  m2
+    movu         [r0 + 1576 * 16],  m1
+    movu         [r0 + 1577 * 16],  m2
+    movu         [r0 + 1578 * 16],  m1
+    movu         [r0 + 1579 * 16],  m2
+    movu         [r0 + 1580 * 16],  m1
+    movu         [r0 + 1581 * 16],  m2
+    movu         [r0 + 1582 * 16],  m1
+    movu         [r0 + 1583 * 16],  m2
+
+    movu         [r0 + 1584 * 16],  m1
+    movu         [r0 + 1585 * 16],  m2
+    movu         [r0 + 1586 * 16],  m1
+    movu         [r0 + 1587 * 16],  m2
+    movu         [r0 + 1588 * 16],  m1
+    movu         [r0 + 1589 * 16],  m2
+    movu         [r0 + 1590 * 16],  m1
+    movu         [r0 + 1591 * 16],  m2
+    movu         [r0 + 1592 * 16],  m1
+    movu         [r0 + 1593 * 16],  m2
+    movu         [r0 + 1594 * 16],  m1
+    movu         [r0 + 1595 * 16],  m2
+    movu         [r0 + 1596 * 16],  m1
+    movu         [r0 + 1597 * 16],  m2
+    movu         [r0 + 1598 * 16],  m1
+    movu         [r0 + 1599 * 16],  m2
+
+    ; mode 27 [row 0]
+    movu          m6,    [r5 + 2 * 16]
+    movu          m0,    [r3 + 1     ]
+    movu          m1,    [r3 + 2     ]
+    punpcklbw     m0,    m1
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    movu          m2,    [r3 +  9]
+    movu          m3,    [r3 + 10]
+    punpcklbw     m2,    m3
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1600 * 16],     m4
+
+    movu          m1,    [r3 + 17]
+    movu          m3,    [r3 + 18]
+    punpcklbw     m1,    m3
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    movu          m3,    [r3 + 25]
+    movu          m5,    [r3 + 26]
+    punpcklbw     m3,    m5
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1601 * 16],     m4
+
+    ; mode 27 [row 1]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1602 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1603 * 16],     m4
+
+    ; mode 27 [row 2]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1604 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1605 * 16],     m4
+
+    ; mode 27 [row 3]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1606 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1607 * 16],     m4
+
+    ; mode 27 [row 4]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1608 * 16],     m4
+
+    ; mode 28 [row 1 -first half]
+    movu          [r0 + 1666 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1609 * 16],     m4
+
+    ; mode 28 [row 1 - second half]
+    movu          [r0 + 1667 * 16],     m4
+
+    ; mode 27 [row 5]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1610 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1611 * 16],     m4
+
+    ; mode 27 [row 6]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1612 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1613 * 16],     m4
+
+    ; mode 27 [row 7]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1614 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1615 * 16],     m4
+
+    ; mode 27 [row 8]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1616 * 16],     m4
+
+    ; mode 29 [row 1 - first half]
+    movu          [r0 + 1730 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1617 * 16],     m4
+
+    ; mode 29 [row 1 - second half]
+    movu          [r0 + 1731 * 16],     m4
+
+    ; mode 27 [row 9]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1618 * 16],     m4
+
+    ; mode 28 [row 3 -first half]
+    movu          [r0 + 1670 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1619 * 16],     m4
+
+    ; mode 28 [row 3 -second half]
+    movu          [r0 + 1671 * 16],     m4
+
+    ; mode 27 [row 10]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1620 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1621 * 16],     m4
+
+    ; mode 27 [row 11]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1622 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1623 * 16],     m4
+
+    ; mode 27 [row 12]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1624 * 16],     m4
+
+    ; mode 30 [row 1 - first half]
+    movu          [r0 + 1794 * 16],     m4
+
+    ; mode 33 [row 0 - first half]
+    movu          [r0 + 1984 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1625 * 16],     m4
+
+    ; mode 30 [row 1 - second half]
+    movu          [r0 + 1795 * 16],     m4
+
+    ; mode 33 [row 0 - second half]
+    movu          [r0 + 1985 * 16],     m4
+
+    ; mode 27 [row 13]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1626 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1627 * 16],     m4
+
+    ; mode 27 [row 14]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1628 * 16],     m4
+
+    ; mode 28 [row 5 first half]
+    movu          [r0 + 1674 * 16],     m4
+
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1629 * 16],     m4
+
+    ; mode 28 [row 5 second half]
+    movu          [r0 + 1675 * 16],     m4
+
+    ; mode 28 [row 0]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1664 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1665 * 16],     m4
+
+    ; mode 28 [row 2]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1668 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1669 * 16],     m4
+
+    ; mode 28 [row 4]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1672 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1673 * 16],     m4
+
+    ; mode 30 [row 0]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1792 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1793 * 16],     m4
+
+    ; mode 29 [row 0]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1728 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1729 * 16],     m4
+
+    ; mode 29 [row 2]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1732 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1733 * 16],     m4
+
+    ; mode 31 [row 0]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1856 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1857 * 16],     m4
+
+    ; mode 32 [row 0]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m4,    m0,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1920 * 16],     m4
+    pmaddubsw     m4,    m1,            m6
+    pmulhrsw      m4,    m7
+    pmaddubsw     m5,    m3,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m4,    m5
+    movu          [r0 + 1921 * 16],     m4
+
+    ; mode 27 [row 15]
+    movu          m0,    [r3 + 2]
+    movd          m1,    [r3 + 3]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 10]
+    movd          m3,    [r3 + 11]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 18]
+    movd          m3,    [r3 + 19]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 26]
+    movd          m5,    [r3 + 27]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1630 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1630 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1631 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1631 * 16 + 8], m5
+
+    ; mode 27 [row 16]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1632 * 16],     m3
+
+    ; mode 31 [row 1 - first half]
+    movu          [r0 + 1858 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1633 * 16],     m3
+
+    ; mode 31 [row 1 - second half]
+    movu          [r0 + 1859 * 16],     m3
+
+    ; mode 27 [row 17]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1634 * 16],     m3
+
+    ; mode 29 [row 3 - first half]
+    movu          [r0 + 1734 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1635 * 16],     m3
+
+    ; mode 29 [row 3 - second half]
+    movu          [r0 + 1735 * 16],     m3
+
+    ; mode 27 [row 18]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1636 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1637 * 16],     m3
+
+    ; mode 27 [row 19]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1638 * 16],     m3
+
+    ; mode 28 [row 7 - first half]
+    movu          [r0 + 1678 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1639 * 16],     m3
+
+    ; mode 28 [row 7 - second half]
+    movu          [r0 + 1679 * 16],     m3
+
+    ; mode 27 [row 20]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1640 * 16],     m3
+
+    ; mode 32 [row 1 - first half]
+    movu          [r0 + 1922 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1641 * 16],     m3
+
+    ; mode 32 [row 1 - second half]
+    movu          [r0 + 1923 * 16],     m3
+
+    ; mode 27 [row 21]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1642 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1643 * 16],     m3
+
+    ; mode 27 [row 22]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1644 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1645 * 16],     m3
+
+    ; mode 27 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1646 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1647 * 16],     m3
+
+    ; mode 27 [row 24]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1648 * 16],     m3
+
+    ; mode 28 [row 9 - first half]
+    movu          [r0 + 1682 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1649 * 16],     m3
+
+    ; mode 28 [row 9 - second half]
+    movu          [r0 + 1683 * 16],     m3
+
+    ; mode 27 [row 25]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1650 * 16],     m3
+
+    ; mode 30 [row 3 - first half]
+    movu          [r0 + 1798 * 16],     m3
+
+    ; mode 33 [row 1 - first half]
+    movu          [r0 + 1986 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1651 * 16],     m3
+
+    ; mode 30 [row 3 - second half]
+    movu          [r0 + 1799 * 16],     m3
+
+    ; mode 33 [row 1 - second half]
+    movu          [r0 + 1987 * 16],     m3
+
+    ; mode 27 [row 26]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1652 * 16],     m3
+
+    ; mode 29 [row 5 - first half]
+    movu          [r0 + 1738 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1653 * 16],     m3
+
+    ; mode 29 [row 5 - second half]
+    movu          [r0 + 1739 * 16],     m3
+
+    ; mode 27 [row 27]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1654 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1655 * 16],     m3
+
+    ; mode 27 [row 28]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1656 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1657 * 16],     m3
+
+    ; mode 27 [row 29]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1658 * 16],     m3
+
+    ; mode 28 [row 11 - first half]
+    movu          [r0 + 1686 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1659 * 16],     m3
+
+    ; mode 28 [row 11 - second half]
+    movu          [r0 + 1687 * 16],     m3
+
+    ; mode 27 [row 30]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1660 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1661 * 16],     m3
+
+    ; mode 28 [row 6]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1676 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1677 * 16],     m3
+
+    ; mode 28 [row 8]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1680 * 16],     m3
+
+    ; mode 29 [row 4 - first half]
+    movu          [r0 + 1736 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1681 * 16],     m3
+
+    ; mode 29 [row 4 - second half]
+    movu          [r0 + 1737 * 16],     m3
+
+    ; mode 28 [row 10]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1684 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1685 * 16],     m3
+
+    ; mode 29 [row 6]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1740 * 16],     m3
+
+    ; mode 32 [row 2 - first half]
+    movu          [r0 + 1924 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1741 * 16],     m3
+
+    ; mode 32 [row 2 - second half]
+    movu          [r0 + 1925 * 16],     m3
+
+    ; mode 30 [row 2]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1796 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1797 * 16],     m3
+
+    ; mode 31 [row 2]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1860 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1861 * 16],     m3
+
+    ; mode 27 [row 15]
+    movu          m0,    [r3 + 3]
+    movd          m1,    [r3 + 4]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 11]
+    movd          m3,    [r3 + 12]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 19]
+    movd          m3,    [r3 + 20]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 27]
+    movd          m5,    [r3 + 28]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1662 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1662 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1663 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1663 * 16 + 8], m5
+
+    ; mode 28 [row 12]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1688 * 16],     m3
+
+    ; mode 30 [row 4 - first half]
+    movu          [r0 + 1800 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1689 * 16],     m3
+
+    ; mode 30 [row 4 - second half]
+    movu          [r0 + 1801 * 16],     m3
+
+    ; mode 28 [row 13]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1690 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1691 * 16],     m3
+
+    ; mode 28 [row 14]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1692 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1693 * 16],     m3
+
+    ; mode 28 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1694 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1695 * 16],     m3
+
+    ; mode 28 [row 16]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1696 * 16],     m3
+
+    ; mode 31 [row 4 - first half]
+    movu          [r0 + 1864 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1697 * 16],     m3
+
+    ; mode 31 [row 4 - second half]
+    movu          [r0 + 1865 * 16],     m3
+
+    ; mode 28 [row 17]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1698 * 16],     m3
+
+    ; mode 29 [row 9 - first half]
+    movu          [r0 + 1746 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1699 * 16],     m3
+
+    ; mode 29 [row 9 - second half]
+    movu          [r0 + 1747 * 16],     m3
+
+    ; mode 28 [row 18]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1700 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1701 * 16],     m3
+
+    ; mode 29 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1742 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1743 * 16],     m3
+
+    ; mode 29 [row 8]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1744 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1745 * 16],     m3
+
+    ; mode 30 [row 5]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1802 * 16],     m3
+
+    ; mode 33 [row 2 - first half]
+    movu          [r0 + 1988 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1803 * 16],     m3
+
+    ; mode 33 [row 2 - second half]
+    movu          [r0 + 1989 * 16],     m3
+
+    ; mode 30 [row 6]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1804 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1805 * 16],     m3
+
+    ; mode 31 [row 3]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1862 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1863 * 16],     m3
+
+    ; mode 32 [row 3]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1926 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1927 * 16],     m3
+
+    ; mode 28 [row 19]
+    movu          m6,    [r5 + 4 * 16]
+    movu          m0,    [r3 + 4]
+    movd          m1,    [r3 + 5]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 12]
+    movd          m4,    [r3 + 13]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1702 * 16],     m3
+
+    movu          m1,    [r3 + 20]
+    movd          m3,    [r3 + 21]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 28]
+    movd          m5,    [r3 + 29]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1703 * 16],     m3
+
+    ; mode 28 [row 20]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1704 * 16],     m3
+
+    ; mode 32 [row 4 - first half]
+    movu          [r0 + 1928 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1705 * 16],     m3
+
+    ; mode 32 [row 4 - second half]
+    movu          [r0 + 1929 * 16],     m3
+
+    ; mode 28 [row 21]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1706 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1707 * 16],     m3
+
+    ; mode 28 [row 22]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1708 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1709 * 16],     m3
+
+    ; mode 28 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1710 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1711 * 16],     m3
+
+    ; mode 28 [row 24]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1712 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1713 * 16],     m3
+
+    ; mode 29 [row 10]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1748 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1749 * 16],     m3
+
+    ; mode 29 [row 11]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1750 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1751 * 16],     m3
+
+    ; mode 29 [row 12]
+    movu          m6,    [r5 + 21 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1752 * 16],     m3
+
+    ; mode 30 [row 8 -first half]
+    movu          [r0 + 1808 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1753 * 16],     m3
+
+    ; mode 30 [row 8 -second half]
+    movu          [r0 + 1809 * 16],     m3
+
+    ; mode 29 [row 13]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1754 * 16],     m3
+
+    ; mode 32 [row 5 - first half]
+    movu          [r0 + 1930 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1755 * 16],     m3
+
+    ; mode 32 [row 5 - second half]
+    movu          [r0 + 1931 * 16],     m3
+
+    ; mode 30 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1806 * 16],     m3
+
+    ; mode 33 [row 3 - first half]
+    movu          [r0 + 1990 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1807 * 16],     m3
+
+    ; mode 33 [row 3 - second half]
+    movu          [r0 + 1991 * 16],     m3
+
+    ; mode 31 [row 5]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1866 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1867 * 16],     m3
+
+    ; mode 31 [row 6]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1868 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1869 * 16],     m3
+
+    ; mode 28 [row 25]
+    movu          m6,    [r5 + 2 * 16]
+    movu          m0,    [r3 + 5]
+    movd          m1,    [r3 + 6]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 13]
+    movd          m4,    [r3 + 14]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1714 * 16],     m3
+
+    movu          m1,    [r3 + 21]
+    movd          m3,    [r3 + 22]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 29]
+    movd          m5,    [r3 + 30]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1715 * 16],     m3
+
+    ; mode 28 [row 26]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1716 * 16],     m3
+
+    ; mode 29 [row 14 - first half]
+    movu          [r0 + 1756 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1717 * 16],     m3
+
+    ; mode 29 [row 14 - second half]
+    movu          [r0 + 1757 * 16],     m3
+
+    ; mode 28 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1718 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1719 * 16],     m3
+
+    ; mode 28 [row 28]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1720 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1721 * 16],     m3
+
+    ; mode 28 [row 29]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1722 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1723 * 16],     m3
+
+    ; mode 28 [row 30]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1724 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1725 * 16],     m3
+
+    ; mode 29 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1758 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1759 * 16],     m3
+
+    ; mode 29 [row 16]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1760 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1761 * 16],     m3
+
+    ; mode 30 [row 9]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1810 * 16],     m3
+
+    ; mode 33 [row 4 - first half]
+    movu          [r0 + 1992 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1811 * 16],     m3
+
+    ; mode 33 [row 4 - second half]
+    movu          [r0 + 1993 * 16],     m3
+
+    ; mode 30 [row 10]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1812 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1813 * 16],     m3
+
+    ; mode 31 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1870 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1871 * 16],     m3
+
+    ; mode 31 [row 8]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1872 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1873 * 16],     m3
+
+    ; mode 32 [row 6]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1932 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1933 * 16],     m3
+
+    ; mode 30 [row 11]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1814 * 16],     m3
+
+    ; mode 33 [row 5 - first half]
+    movu          [r0 + 1994 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1815 * 16],     m3
+
+    ; mode 33 [row 5 - second half]
+    movu          [r0 + 1995 * 16],     m3
+
+    ; mode 28 [row 31]
+    movu          m0,    [r3 + 6]
+    movd          m1,    [r3 + 7]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 14]
+    movd          m3,    [r3 + 15]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 22]
+    movd          m3,    [r3 + 23]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 30]
+    movd          m5,    [r3 + 31]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1726 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1726 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1727 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1727 * 16 + 8], m5
+
+    ; mode 29 [row 17]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1762 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1763 * 16],     m3
+
+    ; mode 29 [row 18]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1764 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1765 * 16],     m3
+
+    ; mode 29 [row 19]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1766 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1767 * 16],     m3
+
+    ; mode 29 [row 20]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1768 * 16],     m3
+
+    ; mode 32 [row 8 - first halif]
+    movu          [r0 + 1936 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1769 * 16],     m3
+
+    ; mode 32 [row 8 - second halif]
+    movu          [r0 + 1937 * 16],     m3
+
+    ; mode 30 [row 12]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1816 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1817 * 16],     m3
+
+    ; mode 30 [row 13]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1818 * 16],     m3
+
+    ; mode 33 [row 6 - first half]
+    movu          [r0 + 1996 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1819 * 16],     m3
+
+    ; mode 33 [row 6 - second half]
+    movu          [r0 + 1997 * 16],     m3
+
+    ; mode 31 [row 9]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1874 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1875 * 16],     m3
+
+    ; mode 31 [row 10]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1876 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1877 * 16],     m3
+
+    ; mode 32 [row 7]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1934 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1935 * 16],     m3
+
+    ; mode 29 [row 21]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r3 + 7]
+    movd          m1,    [r3 + 8]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 15]
+    movd          m4,    [r3 + 16]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1770 * 16],     m3
+
+    movu          m1,    [r3 + 23]
+    movd          m3,    [r3 + 24]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 31]
+    movd          m5,    [r3 + 32]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1771 * 16],     m3
+
+    ; mode 29 [row 22]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1772 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1773 * 16],     m3
+
+    ; mode 29 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1774 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1775 * 16],     m3
+
+    ; mode 30 [row 14]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1820 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1821 * 16],     m3
+
+    ; mode 30 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1822 * 16],     m3
+
+    ; mode 33 [row 7 - first half]
+    movu          [r0 + 1998 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1823 * 16],     m3
+
+    ; mode 33 [row 7 - second half]
+    movu          [r0 + 1999 * 16],     m3
+
+    ; mode 30 [row 16]
+    movu          m6,    [r5 + 29 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1824 * 16],     m3
+
+    ; mode 31 [row 12 - first half]
+    movu          [r0 + 1880 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1825 * 16],     m3
+
+    ; mode 31 [row 12 - second half]
+    movu          [r0 + 1881 * 16],     m3
+
+    ; mode 31 [row 11]
+    movu          m6,    [r5 + 12 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1878 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1879 * 16],     m3
+
+    ; mode 32 [row 9]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1938 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1939 * 16],     m3
+
+    ; mode 29 [row 24]
+    movu          m6,    [r5 + 1 * 16]
+    movu          m0,    [r3 + 8]
+    movd          m1,    [r3 + 9]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 16]
+    movd          m4,    [r3 + 17]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1776 * 16],     m3
+
+    movu          m1,    [r3 + 24]
+    movd          m3,    [r3 + 25]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 32]
+    movd          m5,    [r3 + 33]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1777 * 16],     m3
+
+    ; mode 29 [row 25]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1778 * 16],     m3
+
+    ; mode 30 [row 17 - first half]
+    movu          [r0 + 1826 * 16],     m3
+
+    ; mode 33 [row 8 - first half]
+    movu          [r0 + 2000 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1779 * 16],     m3
+
+    ; mode 30 [row 17 - second half]
+    movu          [r0 + 1827 * 16],     m3
+
+    ; mode 33 [row 8 - second half]
+    movu          [r0 + 2001 * 16],     m3
+
+    ; mode 29 [row 26]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1780 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1781 * 16],     m3
+
+    ; mode 29 [row 27]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1782 * 16],     m3
+
+    ; mode 32 [row 11 - first half]
+    movu          [r0 + 1942 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1783 * 16],     m3
+
+    ; mode 32 [row 11 - second half]
+    movu          [r0 + 1943 * 16],     m3
+
+    ; mode 30 [row 18]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1828 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1829 * 16],     m3
+
+    ; mode 31 [row 13]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1882 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1883 * 16],     m3
+
+    ; mode 31 [row 14]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1884 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1885 * 16],     m3
+
+    ; mode 32 [row 10]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1940 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1941 * 16],     m3
+
+    ; mode 29 [row 28]
+    movu          m6,    [r5 + 5 * 16]
+    movu          m0,    [r3 +  9]
+    movd          m1,    [r3 + 10]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 17]
+    movd          m4,    [r3 + 18]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1784 * 16],     m3
+
+    movu          m1,    [r3 + 25]
+    movd          m3,    [r3 + 26]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 33]
+    movd          m5,    [r3 + 34]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1785 * 16],     m3
+
+    ; mode 29 [row 29]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1786 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1787 * 16],     m3
+
+    ; mode 29 [row 30]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1788 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1789 * 16],     m3
+
+    ; mode 30 [row 19]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1830 * 16],     m3
+
+    ; mode 33 [row 9 - first half]
+    movu          [r0 + 2002 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1831 * 16],     m3
+
+    ; mode 33 [row 9 - second half]
+    movu          [r0 + 2003 * 16],     m3
+
+    ; mode 30 [row 20]
+    movu          m6,    [r5 + 17 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1832 * 16],     m3
+
+    ; mode 32 [row 12 - first half]
+    movu          [r0 + 1944 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1833 * 16],     m3
+
+    ; mode 32 [row 12 - second half]
+    movu          [r0 + 1945 * 16],     m3
+
+    ; mode 30 [row 21]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1834 * 16],     m3
+
+    ; mode 33 [row 10 - first half]
+    movu          [r0 + 2004 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1835 * 16],     m3
+
+    ; mode 33 [row 10 - second half]
+    movu          [r0 + 2005 * 16],     m3
+
+    ; mode 31 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1886 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1887 * 16],     m3
+
+    ; mode 29 [row 31]
+    movu          m0,    [r3 + 10]
+    movd          m1,    [r3 + 11]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 18]
+    movd          m3,    [r3 + 19]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 26]
+    movd          m3,    [r3 + 27]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 34]
+    movd          m5,    [r3 + 35]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1790 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1790 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1791 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1791 * 16 + 8], m5
+
+    ; mode 30 [row 22]
+    movu          m6,    [r5 + 11 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1836 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1837 * 16],     m3
+
+    ; mode 30 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1838 * 16],     m3
+
+    ; mode 33 [row 11 - first half]
+    movu          [r0 + 2006 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1839 * 16],     m3
+
+    ; mode 33 [row 11 - second half]
+    movu          [r0 + 2007 * 16],     m3
+
+    ; mode 31 [row 16]
+    movu          m6,    [r5 + 1 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1888 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1889 * 16],     m3
+
+    ; mode 31 [row 17]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1890 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1891 * 16],     m3
+
+    ; mode 32 [row 13]
+    movu          m6,    [r5 + 6 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1946 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1947 * 16],     m3
+
+    ; mode 32 [row 14]
+    movu          m6,    [r5 + 27 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1948 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1949 * 16],     m3
+
+    ; mode 30 [row 24]
+    movu          m6,    [r5 + 5 * 16]
+    movu          m0,    [r3 + 11]
+    movd          m1,    [r3 + 12]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 19]
+    movd          m4,    [r3 + 20]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1840 * 16],     m3
+
+    movu          m1,    [r3 + 27]
+    movd          m3,    [r3 + 28]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 35]
+    movd          m5,    [r3 + 36]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1841 * 16],     m3
+
+    ; mode 30 [row 25]
+    movu          m6,    [r5 + 18 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1842 * 16],     m3
+
+    ; mode 33 [row 12 - first half]
+    movu          [r0 + 2008 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1843 * 16],     m3
+
+    ; mode 33 [row 12 - second half]
+    movu          [r0 + 2009 * 16],     m3
+
+    ; mode 30 [row 26]
+    movu          m6,    [r5 + 31 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1844 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1845 * 16],     m3
+
+    ; mode 31 [row 18]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1892 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1893 * 16],     m3
+
+    ; mode 31 [row 19]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1894 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1895 * 16],     m3
+
+    ; mode 32 [row 15]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1950 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1951 * 16],     m3
+
+    ; mode 30 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    movu          m0,    [r3 + 12]
+    movd          m1,    [r3 + 13]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 20]
+    movd          m4,    [r3 + 21]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1846 * 16],     m3
+
+    ; mode 33 [row 13 - first half]
+    movu          [r0 + 2010 * 16],     m3
+
+    movu          m1,    [r3 + 28]
+    movd          m3,    [r3 + 29]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 36]
+    movd          m5,    [r3 + 37]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1847 * 16],     m3
+
+    ; mode 33 [row 13 - second half]
+    movu          [r0 + 2011 * 16],     m3
+
+    ; mode 30 [row 28]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1848 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1849 * 16],     m3
+
+    ; mode 31 [row 20]
+    movu          m6,    [r5 + 5 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1896 * 16],     m3
+
+    ; mode 32 [row 16 - first half]
+    movu          [r0 + 1952 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1897 * 16],     m3
+
+    ; mode 32 [row 16 - second half]
+    movu          [r0 + 1953 * 16],     m3
+
+    ; mode 31 [row 21]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1898 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1899 * 16],     m3
+
+    ; mode 32 [row 17]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1954 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1955 * 16],     m3
+
+    ; mode 30 [row 29]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r3 + 13]
+    movd          m1,    [r3 + 14]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 21]
+    movd          m4,    [r3 + 22]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1850 * 16],     m3
+
+    ; mode 33 [row 14 - first half]
+    movu          [r0 + 2012 * 16],     m3
+
+    movu          m1,    [r3 + 29]
+    movd          m3,    [r3 + 30]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 37]
+    movd          m5,    [r3 + 38]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1851 * 16],     m3
+
+    ; mode 33 [row 14 - second half]
+    movu          [r0 + 2013 * 16],     m3
+
+    ; mode 30 [row 30]
+    movu          m6,    [r5 + 19 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1852 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1853 * 16],     m3
+
+    ; mode 31 [row 22]
+    movu          m6,    [r5 + 7 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1900 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1901 * 16],     m3
+
+    ; mode 31 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1902 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1903 * 16],     m3
+
+    ; mode 32 [row 18]
+    movu          m6,    [r5 + 15 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1956 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1957 * 16],     m3
+
+    ; mode 30 [row 31]
+    movu          m0,    [r3 + 14]
+    movd          m1,    [r3 + 15]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 22]
+    movd          m3,    [r3 + 23]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 30]
+    movd          m3,    [r3 + 31]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 38]
+    movd          m5,    [r3 + 39]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1854 * 16],     m5
+
+    ; mode 33 [row 15 - first eight]
+    movh          [r0 + 2014 * 16],     m5
+
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1854 * 16 + 8], m5
+
+    ; mode 33 [row 15 - second eight]
+    movh          [r0 + 2014 * 16 + 8],     m5
+
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1855 * 16],     m5
+
+    ; mode 33 [row 15 - third eight]
+    movh          [r0 + 2015 * 16],     m5
+
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1855 * 16 + 8], m5
+
+    ; mode 33 [row 15 - fourth eight]
+    movh          [r0 + 2015 * 16 + 8], m5
+
+    ; mode 31 [row 24]
+    movu          m6,    [r5 + 9 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1904 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1905 * 16],     m3
+
+    ; mode 31 [row 25]
+    movu          m6,    [r5 + 26 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1906 * 16],     m3
+
+    ; mode 33 [row 16 - first half]
+    movu          [r0 + 2016 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1907 * 16],     m3
+
+    ; mode 33 [row 16 - second half]
+    movu          [r0 + 2017 * 16],     m3
+
+    ; mode 32 [row 19]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1958 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1959 * 16],     m3
+
+    ; mode 32 [row 20]
+    movu          m6,    [r5 + 25 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1960 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1961 * 16],     m3
+
+    ; mode 31 [row 26]
+    movu          m6,    [r5 + 11 * 16]
+    movu          m0,    [r3 + 15]
+    movd          m1,    [r3 + 16]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 23]
+    movd          m4,    [r3 + 24]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1908 * 16],     m3
+
+    movu          m1,    [r3 + 31]
+    movd          m3,    [r3 + 32]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 39]
+    movd          m5,    [r3 + 40]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1909 * 16],     m3
+
+    ; mode 31 [row 27]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1910 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1911 * 16],     m3
+
+    ; mode 32 [row 21]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1962 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1963 * 16],     m3
+
+    ; mode 33 [row 17]
+    movu          m6,    [r5 + 20 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2018 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2019 * 16],     m3
+
+    ; mode 31 [row 28]
+    movu          m6,    [r5 + 13 * 16]
+    movu          m0,    [r3 + 16]
+    movd          m1,    [r3 + 17]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 24]
+    movd          m4,    [r3 + 25]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1912 * 16],     m3
+
+    movu          m1,    [r3 + 32]
+    movd          m3,    [r3 + 33]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 40]
+    movd          m5,    [r3 + 41]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1913 * 16],     m3
+
+    ; mode 31 [row 29]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1914 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1915 * 16],     m3
+
+    ; mode 32 [row 22]
+    movu          m6,    [r5 + 3 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1964 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1965 * 16],     m3
+
+    ; mode 32 [row 23]
+    movu          m6,    [r5 + 24 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1966 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1967 * 16],     m3
+
+    ; mode 33 [row 18]
+    movu          m6,    [r5 + 14 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2020 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2021 * 16],     m3
+
+    ; mode 31 [row 30]
+    movu          m6,    [r5 + 15 * 16]
+    movu          m0,    [r3 + 17]
+    movd          m1,    [r3 + 18]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 25]
+    movd          m4,    [r3 + 26]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1916 * 16],     m3
+
+    movu          m1,    [r3 + 33]
+    movd          m3,    [r3 + 34]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 41]
+    movd          m5,    [r3 + 42]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1917 * 16],     m3
+
+    ; mode 32 [row 24]
+    movu          m6,    [r5 + 13 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1968 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1969 * 16],     m3
+
+    ; mode 33 [row 19]
+    movu          m6,    [r5 + 8 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2022 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2023 * 16],     m3
+
+    ; mode 31 [row 31]
+    movu          m0,    [r3 + 18]
+    movd          m1,    [r3 + 19]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 26]
+    movd          m3,    [r3 + 27]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 34]
+    movd          m3,    [r3 + 35]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 42]
+    movd          m5,    [r3 + 43]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1918 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1918 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1919 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1919 * 16 + 8], m5
+
+    ; mode 32 [row 25]
+    movu          m6,    [r5 + 2 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1970 * 16],     m3
+
+    ; mode 33 [row 20 - first half]
+    movu          [r0 + 2024 * 16],     m3
+
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1971 * 16],     m3
+
+    ; mode 33 [row 20 - second half]
+    movu          [r0 + 2025 * 16],     m3
+
+    ; mode 32 [row 26]
+    movu          m6,    [r5 + 23 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1972 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1973 * 16],     m3
+
+    ; mode 33 [row 21]
+    movu          m6,    [r5 + 28 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2026 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2027 * 16],     m3
+
+    ; mode 32 [row 27]
+    movu          m6,    [r5 + 12 * 16]
+    movu          m0,    [r3 + 19]
+    movd          m1,    [r3 + 20]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 27]
+    movd          m4,    [r3 + 28]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1974 * 16],     m3
+
+    movu          m1,    [r3 + 35]
+    movd          m3,    [r3 + 36]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 43]
+    movd          m5,    [r3 + 44]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1975 * 16],     m3
+
+    ; mode 33 [row 22]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2028 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2029 * 16],     m3
+
+    ; mode 32 [row 28]
+    movu          m6,    [r5 + 1 * 16]
+    movu          m0,    [r3 + 20]
+    movd          m1,    [r3 + 21]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 28]
+    movd          m4,    [r3 + 29]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1976 * 16],     m3
+
+    movu          m1,    [r3 + 36]
+    movd          m3,    [r3 + 37]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 44]
+    movd          m5,    [r3 + 45]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1977 * 16],     m3
+
+    ; mode 32 [row 29]
+    movu          m6,    [r5 + 22 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1978 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1979 * 16],     m3
+
+    ; mode 33 [row 23]
+    movu          m6,    [r5 + 16 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2030 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2031 * 16],     m3
+
+    ; mode 32 [row 30]
+    movu          m6,    [r5 + 11 * 16]
+    movu          m0,    [r3 + 21]
+    movd          m1,    [r3 + 22]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 29]
+    movd          m4,    [r3 + 30]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1980 * 16],     m3
+
+    movu          m1,    [r3 + 37]
+    movd          m3,    [r3 + 38]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 45]
+    movd          m5,    [r3 + 46]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 1981 * 16],     m3
+
+    ; mode 33 [row 24]
+    movu          m6,    [r5 + 10 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2032 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2033 * 16],     m3
+
+    ; mode 32 [row 31]
+    movu          m0,    [r3 + 22]
+    movd          m1,    [r3 + 23]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    movu          m2,    [r3 + 30]
+    movd          m3,    [r3 + 31]
+    palignr       m3,    m2,        1
+    punpcklbw     m2,    m3
+    movu          m1,    [r3 + 38]
+    movd          m3,    [r3 + 39]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    movu          m4,    [r3 + 46]
+    movd          m5,    [r3 + 47]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+
+    pshufb        m5,    m0,            [tab_S2]
+    movh          [r0 + 1982 * 16],     m5
+    pshufb        m5,    m2,            [tab_S2]
+    movh          [r0 + 1982 * 16 + 8], m5
+    pshufb        m5,    m1,            [tab_S2]
+    movh          [r0 + 1983 * 16],     m5
+    pshufb        m5,    m4,            [tab_S2]
+    movh          [r0 + 1983 * 16 + 8], m5
+
+    ; mode 33 [row 25]
+    movu          m6,    [r5 + 4 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2034 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2035 * 16],     m3
+
+    ; mode 33 [row 26]
+    movu          m6,    [r5 + 30 * 16]
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2036 * 16],     m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2037 * 16],     m3
+
+    ; mode 33 [row 27]
+    movu          m6,    [r5 + 24 * 16]
+    movu          m0,    [r3 + 23]
+    movd          m1,    [r3 + 24]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 31]
+    movd          m4,    [r3 + 32]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2038 * 16],     m3
+
+    movu          m1,    [r3 + 39]
+    movd          m3,    [r3 + 40]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 47]
+    movd          m5,    [r3 + 48]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2039 * 16],     m3
+
+    ; mode 33 [row 28]
+    movu          m6,    [r5 + 18 * 16]
+    movu          m0,    [r3 + 24]
+    movd          m1,    [r3 + 25]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 32]
+    movd          m4,    [r3 + 33]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2040 * 16],     m3
+
+    movu          m1,    [r3 + 40]
+    movd          m3,    [r3 + 41]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 48]
+    movd          m5,    [r3 + 49]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2041 * 16],     m3
+
+    ; mode 33 [row 29]
+    movu          m6,    [r5 + 12 * 16]
+    movu          m0,    [r3 + 25]
+    movd          m1,    [r3 + 26]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 33]
+    movd          m4,    [r3 + 34]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2042 * 16],     m3
+
+    movu          m1,    [r3 + 41]
+    movd          m3,    [r3 + 42]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 49]
+    movd          m5,    [r3 + 50]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2043 * 16],     m3
+
+    ; mode 33 [row 30]
+    movu          m6,    [r5 + 6 * 16]
+    movu          m0,    [r3 + 26]
+    movd          m1,    [r3 + 27]
+    palignr       m1,    m0,        1
+    punpcklbw     m0,    m1
+    pmaddubsw     m3,    m0,            m6
+    pmulhrsw      m3,    m7
+    movu          m2,    [r3 + 34]
+    movd          m4,    [r3 + 35]
+    palignr       m4,    m2,        1
+    punpcklbw     m2,    m4
+    pmaddubsw     m5,    m2,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2044 * 16],     m3
+
+    movu          m1,    [r3 + 42]
+    movd          m3,    [r3 + 43]
+    palignr       m3,    m1,        1
+    punpcklbw     m1,    m3
+    pmaddubsw     m3,    m1,            m6
+    pmulhrsw      m3,    m7
+    movu          m4,    [r3 + 50]
+    movd          m5,    [r3 + 51]
+    palignr       m5,    m4,        1
+    punpcklbw     m4,    m5
+    pmaddubsw     m5,    m4,            m6
+    pmulhrsw      m5,    m7
+    packuswb      m3,    m5
+    movu          [r0 + 2045 * 16],     m3
+
+    ; mode 33 [row 31]
+    movu          m5,                   [r3 + 27]
+    movu          [r0 + 2046 * 16],     m5
+    movu          m5,                   [r3 + 43]
+    movu          [r0 + 2047 * 16],     m5
+
+    ;mode 34 [row 0]
+    movu       m0,              [r3 + 2]
+    movu       [r0 + 2048 * 16],   m0
+    movu       m1,              [r3 + 18]
+    movu       [r0 + 2049 * 16],   m1
+
+    ;mode 34 [row 1]
+    movu       m2,              [r3 + 34]
+    palignr    m3,              m1,       m0,    1
+    movu       [r0 + 2050 * 16],   m3
+    palignr    m4,              m2,       m1,    1
+    movu       [r0 + 2051 * 16],   m4
+
+    ;mode 34 [row 2]
+    palignr    m3,              m1,       m0,    2
+    movu       [r0 + 2052 * 16],   m3
+    palignr    m4,              m2,       m1,    2
+    movu       [r0 + 2053 * 16],   m4
+
+    ;mode 34 [row 3]
+    palignr    m3,              m1,       m0,    3
+    movu       [r0 + 2054 * 16],   m3
+    palignr    m4,              m2,       m1,    3
+    movu       [r0 + 2055 * 16],   m4
+
+    ;mode 34 [row 4]
+    palignr    m3,              m1,       m0,    4
+    movu       [r0 + 2056 * 16],   m3
+    palignr    m4,              m2,       m1,    4
+    movu       [r0 + 2057 * 16],   m4
+
+    ;mode 34 [row 5]
+    palignr    m3,              m1,       m0,    5
+    movu       [r0 + 2058 * 16],   m3
+    palignr    m4,              m2,       m1,    5
+    movu       [r0 + 2059 * 16],   m4
+
+    ;mode 34 [row 6]
+    palignr    m3,              m1,       m0,    6
+    movu       [r0 + 2060 * 16],   m3
+    palignr    m4,              m2,       m1,    6
+    movu       [r0 + 2061 * 16],   m4
+
+    ;mode 34 [row 7]
+    palignr    m3,              m1,       m0,    7
+    movu       [r0 + 2062 * 16],   m3
+    palignr    m4,              m2,       m1,    7
+    movu       [r0 + 2063 * 16],   m4
+
+    ;mode 34 [row 8]
+    palignr    m3,              m1,       m0,    8
+    movu       [r0 + 2064 * 16],   m3
+    palignr    m4,              m2,       m1,    8
+    movu       [r0 + 2065 * 16],   m4
+
+    ;mode 34 [row 9]
+    palignr    m3,              m1,       m0,    9
+    movu       [r0 + 2066 * 16],   m3
+    palignr    m4,              m2,       m1,    9
+    movu       [r0 + 2067 * 16],   m4
+
+    ;mode 34 [row 10]
+    palignr    m3,              m1,       m0,    10
+    movu       [r0 + 2068 * 16],   m3
+    palignr    m4,              m2,       m1,    10
+    movu       [r0 + 2069 * 16],   m4
+
+    ;mode 34 [row 11]
+    palignr    m3,              m1,       m0,    11
+    movu       [r0 + 2070 * 16],   m3
+    palignr    m4,              m2,       m1,    11
+    movu       [r0 + 2071 * 16],   m4
+
+    ;mode 34 [row 12]
+    palignr    m3,              m1,       m0,    12
+    movu       [r0 + 2072 * 16],   m3
+    palignr    m4,              m2,       m1,    12
+    movu       [r0 + 2073 * 16],   m4
+
+    ;mode 34 [row 13]
+    palignr    m3,              m1,       m0,    13
+    movu       [r0 + 2074 * 16],   m3
+    palignr    m4,              m2,       m1,    13
+    movu       [r0 + 2075 * 16],   m4
+
+    ;mode 34 [row 14]
+    palignr    m3,              m1,       m0,    14
+    movu       [r0 + 2076 * 16],   m3
+    palignr    m4,              m2,       m1,    14
+    movu       [r0 + 2077 * 16],   m4
+
+    ;mode 34 [row 15]
+    palignr    m3,              m1,       m0,    15
+    movu       [r0 + 2078 * 16],   m3
+    palignr    m4,              m2,       m1,    15
+    movu       [r0 + 2079 * 16],   m4
+
+    ;mode 34 [row 16]
+    palignr    m3,              m1,       m0,    16
+    movu       [r0 + 2080 * 16],   m3
+    palignr    m4,              m2,       m1,    16
+    movu       [r0 + 2081 * 16],   m4
+
+    ;mode 34 [row 17]
+    movu       m0,                [r3 + 19]
+    movu       [r0 + 2082 * 16],   m0
+    movu       m1,                [r3 + 35]
+    movu       [r0 + 2083 * 16],   m1
+
+    mov        r2d, r6d
+    mov        [r4], r2b
+    mov        r2d, [rsp]
+    mov        [r1 + 64], r2b
+
+    ;mode 34 [row 18]
+    movu       m2,              [r3 + 51]
+    palignr    m3,              m1,       m0,    1
+    movu       [r0 + 2084 * 16],   m3
+    palignr    m4,              m2,       m1,    1
+    movu       [r0 + 2085 * 16],   m4
+
+    ;mode 34 [row 19]
+    palignr    m3,              m1,       m0,    2
+    movu       [r0 + 2086 * 16],   m3
+    palignr    m4,              m2,       m1,    2
+    movu       [r0 + 2087 * 16],   m4
+
+    ;mode 34 [row 20]
+    palignr    m3,              m1,       m0,    3
+    movu       [r0 + 2088 * 16],   m3
+    palignr    m4,              m2,       m1,    3
+    movu       [r0 + 2089 * 16],   m4
+
+    ;mode 34 [row 21]
+    palignr    m3,              m1,       m0,    4
+    movu       [r0 + 2090 * 16],   m3
+    palignr    m4,              m2,       m1,    4
+    movu       [r0 + 2091 * 16],   m4
+
+    ;mode 34 [row 22]
+    palignr    m3,              m1,       m0,    5
+    movu       [r0 + 2092 * 16],   m3
+    palignr    m4,              m2,       m1,    5
+    movu       [r0 + 2093 * 16],   m4
+
+    ;mode 34 [row 23]
+    palignr    m3,              m1,       m0,    6
+    movu       [r0 + 2094 * 16],   m3
+    palignr    m4,              m2,       m1,    6
+    movu       [r0 + 2095 * 16],   m4
+
+    ;mode 34 [row 24]
+    palignr    m3,              m1,       m0,    7
+    movu       [r0 + 2096 * 16],   m3
+    palignr    m4,              m2,       m1,    7
+    movu       [r0 + 2097 * 16],   m4
+
+    ;mode 34 [row 25]
+    palignr    m3,              m1,       m0,    8
+    movu       [r0 + 2098 * 16],   m3
+    palignr    m4,              m2,       m1,    8
+    movu       [r0 + 2099 * 16],   m4
+
+    ;mode 34 [row 26]
+    palignr    m3,              m1,       m0,    9
+    movu       [r0 + 2100 * 16],   m3
+    palignr    m4,              m2,       m1,    9
+    movu       [r0 + 2101 * 16],   m4
+
+    ;mode 34 [row 27]
+    palignr    m3,              m1,       m0,    10
+    movu       [r0 + 2102 * 16],   m3
+    palignr    m4,              m2,       m1,    10
+    movu       [r0 + 2103 * 16],   m4
+
+    ;mode 34 [row 28]
+    palignr    m3,              m1,       m0,    11
+    movu       [r0 + 2104 * 16],   m3
+    palignr    m4,              m2,       m1,    11
+    movu       [r0 + 2105 * 16],   m4
+
+    ;mode 34 [row 29]
+    palignr    m3,              m1,       m0,    12
+    movu       [r0 + 2106 * 16],   m3
+    palignr    m4,              m2,       m1,    12
+    movu       [r0 + 2107 * 16],   m4
+
+    ;mode 34 [row 30]
+    palignr    m3,              m1,       m0,    13
+    movu       [r0 + 2108 * 16],   m3
+    palignr    m4,              m2,       m1,    13
+    movu       [r0 + 2109 * 16],   m4
+
+    ;mode 34 [row 31]
+    palignr    m3,              m1,       m0,    14
+    movu       [r0 + 2110 * 16],   m3
+    palignr    m4,              m2,       m1,    14
+    movu       [r0 + 2111 * 16],   m4
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void all_angs_pred_4x4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal all_angs_pred_4x4, 4, 4, 6
+
+    mova           m5, [pw_1024]
+    lea            r2, [all_ang4]
+    lea            r3, [all_ang4_shuff]
+
+; mode 2
+
+    vbroadcasti128 m0, [r1 + 9]
+    mova           xm1, xm0
+    psrldq         xm1, 1
+    pshufb         xm1, [r3]
+    movu           [r0], xm1
+
+; mode 3
+
+    pshufb         m1, m0, [r3 + 1 * mmsize]
+    pmaddubsw      m1, [r2]
+    pmulhrsw       m1, m5
+
+; mode 4
+
+    pshufb         m2, m0, [r3 + 2 * mmsize]
+    pmaddubsw      m2, [r2 + 1 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (3 - 2) * 16], m1
+
+; mode 5
+
+    pshufb         m1, m0, [r3 + 2 * mmsize]
+    pmaddubsw      m1, [r2 + 2 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 6
+
+    pshufb         m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m2, [r2 + 3 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (5 - 2) * 16], m1
+
+    add            r3, 4 * mmsize
+    add            r2, 4 * mmsize
+
+; mode 7
+
+    pshufb         m1, m0, [r3 + 0 * mmsize]
+    pmaddubsw      m1, [r2 + 0 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 8
+
+    pshufb         m2, m0, [r3 + 1 * mmsize]
+    pmaddubsw      m2, [r2 + 1 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (7 - 2) * 16], m1
+
+; mode 9
+
+    pshufb         m1, m0, [r3 + 1 * mmsize]
+    pmaddubsw      m1, [r2 + 2 * mmsize]
+    pmulhrsw       m1, m5
+    packuswb       m1, m1
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (9 - 2) * 16], xm1
+
+; mode 10
+
+    pshufb         xm1, xm0, [r3 + 2 * mmsize]
+    movu           [r0 + (10 - 2) * 16], xm1
+
+    pxor           xm1, xm1
+    movd           xm2, [r1 + 1]
+    pshufd         xm3, xm2, 0
+    punpcklbw      xm3, xm1
+    pinsrb         xm2, [r1], 0
+    pshufb         xm4, xm2, xm1
+    punpcklbw      xm4, xm1
+    psubw          xm3, xm4
+    psraw          xm3, 1
+    pshufb         xm4, xm0, xm1
+    punpcklbw      xm4, xm1
+    paddw          xm3, xm4
+    packuswb       xm3, xm1
+
+    pextrb         [r0 + 128], xm3, 0
+    pextrb         [r0 + 132], xm3, 1
+    pextrb         [r0 + 136], xm3, 2
+    pextrb         [r0 + 140], xm3, 3
+
+; mode 11
+
+    vbroadcasti128 m0, [r1]
+    pshufb         m1, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m1, [r2 + 3 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 12
+
+    add            r2, 4 * mmsize
+
+    pshufb         m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m2, [r2 + 0 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (11 - 2) * 16], m1
+
+; mode 13
+
+    add            r3, 4 * mmsize
+
+    pshufb         m1, m0, [r3 + 0 * mmsize]
+    pmaddubsw      m1, [r2 + 1 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 14
+
+    pshufb         m2, m0, [r3 + 1 * mmsize]
+    pmaddubsw      m2, [r2 + 2 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (13 - 2) * 16], m1
+
+; mode 15
+
+    pshufb         m1, m0, [r3 + 2 * mmsize]
+    pmaddubsw      m1, [r2 + 3 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 16
+
+    add            r2, 4 * mmsize
+
+    pshufb         m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m2, [r2 + 0 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (15 - 2) * 16], m1
+
+; mode 17
+
+    add            r3, 4 * mmsize
+
+    pshufb         m1, m0, [r3 + 0 * mmsize]
+    pmaddubsw      m1, [r2 + 1 * mmsize]
+    pmulhrsw       m1, m5
+    packuswb       m1, m1
+    vpermq         m1, m1, 11011000b
+
+; mode 18
+
+    pshufb         m2, m0, [r3 + 1 * mmsize]
+    vinserti128    m1, m1, xm2, 1
+    movu           [r0 + (17 - 2) * 16], m1
+
+; mode 19
+
+    pshufb         m1, m0, [r3 + 2 * mmsize]
+    pmaddubsw      m1, [r2 + 2 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 20
+
+    pshufb         m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m2, [r2 + 3 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (19 - 2) * 16], m1
+
+; mode 21
+
+    add            r2, 4 * mmsize
+    add            r3, 4 * mmsize
+
+    pshufb         m1, m0, [r3 + 0 * mmsize]
+    pmaddubsw      m1, [r2 + 0 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 22
+
+    pshufb         m2, m0, [r3 + 1 * mmsize]
+    pmaddubsw      m2, [r2 + 1 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (21 - 2) * 16], m1
+
+; mode 23
+
+    pshufb         m1, m0, [r3 + 2 * mmsize]
+    pmaddubsw      m1, [r2 + 2 * mmsize]
+    pmulhrsw       m1, m5
+
+; mode 24
+
+    pshufb         m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m2, [r2 + 3 * mmsize]
+    pmulhrsw       m2, m5
+    packuswb       m1, m2
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (23 - 2) * 16], m1
+
+; mode 25
+
+    add            r2, 4 * mmsize
+
+    pshufb         m1, m0, [r3 + 3 * mmsize]
+    pmaddubsw      m1, [r2 + 0 * mmsize]
+    pmulhrsw       m1, m5
+    packuswb       m1, m1
+    vpermq         m1, m1, 11011000b
+    movu           [r0 + (25 - 2) * 16], xm1
+
+; mode 26
+
+    add            r3, 4 * mmsize
+
+    pshufb         xm1, xm0, [r3 + 0 * mmsize]
+    movu           [r0 + (26 - 2) * 16], xm1
+
+    pxor           xm1, xm1
+    movd           xm2, [r1 + 9]
+    pshufd         xm3, xm2, 0
+    punpcklbw      xm3, xm1
+    pinsrb         xm4, [r1 + 0], 0
+    pshufb         xm4, xm1
+    punpcklbw      xm4, xm1
+    psubw          xm3, xm4
+    psraw          xm3, 1
+    psrldq         xm2, xm0, 1
+    pshufb         xm2, xm1
+    punpcklbw      xm2, xm1
+    paddw          xm3, xm2
+    packuswb       xm3, xm1
+
+    pextrb       [r0 + 384], xm3, 0
+    pextrb       [r0 + 388], xm3, 1
+    pextrb       [r0 + 392], xm3, 2
+    pextrb       [r0 + 396], xm3, 3
+
+; mode 27
+
+    pshufb        m1, m0, [r3 + 1 * mmsize]
+    pmaddubsw     m1, [r2 + 1 * mmsize]
+    pmulhrsw      m1, m5
+
+; mode 28
+
+    pshufb        m2, m0, [r3 + 1 * mmsize]
+    pmaddubsw     m2, [r2 + 2 * mmsize]
+    pmulhrsw      m2, m5
+    packuswb      m1, m2
+    vpermq        m1, m1, 11011000b
+    movu          [r0 + (27 - 2) * 16], m1
+
+; mode 29
+
+    pshufb        m1, m0, [r3 + 2 * mmsize]
+    pmaddubsw     m1, [r2 + 3 * mmsize]
+    pmulhrsw      m1, m5
+
+; mode 30
+
+    add           r2, 4 * mmsize
+
+    pshufb        m2, m0, [r3 + 3 * mmsize]
+    pmaddubsw     m2, [r2 + 0 * mmsize]
+    pmulhrsw      m2, m5
+    packuswb      m1, m2
+    vpermq        m1, m1, 11011000b
+    movu          [r0 + (29 - 2) * 16], m1
+
+; mode 31
+
+    add           r3, 4 * mmsize
+
+    pshufb        m1, m0, [r3 + 0 * mmsize]
+    pmaddubsw     m1, [r2 + 1 * mmsize]
+    pmulhrsw      m1, m5
+
+; mode 32
+
+    pshufb        m2, m0, [r3 + 0 * mmsize]
+    pmaddubsw     m2, [r2 + 2 * mmsize]
+    pmulhrsw      m2, m5
+    packuswb      m1, m2
+    vpermq        m1, m1, 11011000b
+    movu          [r0 + (31 - 2) * 16], m1
+
+; mode 33
+
+    pshufb        m1, m0, [r3 + 1 * mmsize]
+    pmaddubsw     m1, [r2 + 3 * mmsize]
+    pmulhrsw      m1, m5
+    packuswb      m1, m2
+    vpermq        m1, m1, 11011000b
+
+; mode 34
+
+    pshufb        m0, [r3 + 2 * mmsize]
+    vinserti128   m1, m1, xm0, 1
+    movu          [r0 + (33 - 2) * 16], m1
+    RET
+
+;-----------------------------------------------------------------------------
+; void all_angs_pred_4x4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal all_angs_pred_4x4, 4, 4, 8
+
+; mode 2
+
+    movh        m6,             [r1 + 9]
+    mova        m2,             m6
+    psrldq      m2,             1
+    movd        [r0],           m2              ;byte[A, B, C, D]
+    psrldq      m2,             1
+    movd        [r0 + 4],       m2              ;byte[B, C, D, E]
+    psrldq      m2,             1
+    movd        [r0 + 8],       m2              ;byte[C, D, E, F]
+    psrldq      m2,             1
+    movd        [r0 + 12],      m2              ;byte[D, E, F, G]
+
+; mode 10/26
+
+    pxor        m7,             m7
+    pshufd      m5,             m6,        0
+    mova        [r0 + 128],     m5              ;mode 10 byte[9, A, B, C, 9, A, B, C, 9, A, B, C, 9, A, B, C]
+
+    movd        m4,             [r1 + 1]
+    pshufd      m4,             m4,        0
+    mova        [r0 + 384],     m4              ;mode 26 byte[1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
+
+    movd        m1,             [r1]
+    punpcklbw   m1,             m7
+    pshuflw     m1,             m1,     0x00
+    punpcklqdq  m1,             m1              ;m1 = byte[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+    punpckldq   m4,             m5
+    punpcklbw   m4,             m7              ;m4 = word[1, 2, 3, 4, 9, A, B, C]
+    pshuflw     m2,             m4,     0x00
+    pshufhw     m2,             m2,     0x00    ;m2 = word[1, 1, 1, 1, 9, 9, 9, 9]
+
+    psubw       m4,             m1
+    psraw       m4,             1
+
+    pshufd      m2,             m2,     q1032   ;m2 = word[9, 9, 9, 9, 1, 1, 1, 1]
+    paddw       m4,             m2
+    packuswb    m4,             m4
+
+%if ARCH_X86_64
+    movq        r2,             m4
+
+    mov         [r0 + 128],     r2b              ;mode 10
+    shr         r2,             8
+    mov         [r0 + 132],     r2b
+    shr         r2,             8
+    mov         [r0 + 136],     r2b
+    shr         r2,             8
+    mov         [r0 + 140],     r2b
+    shr         r2,             8
+    mov         [r0 + 384],     r2b              ;mode 26
+    shr         r2d,            8
+    mov         [r0 + 388],     r2b
+    shr         r2d,            8
+    mov         [r0 + 392],     r2b
+    shr         r2d,            8
+    mov         [r0 + 396],     r2b
+
+%else
+    movd        r2d,             m4
+
+    mov         [r0 + 128],     r2b              ;mode 10
+    shr         r2d,             8
+    mov         [r0 + 132],     r2b
+    shr         r2d,             8
+    mov         [r0 + 136],     r2b
+    shr         r2d,             8
+    mov         [r0 + 140],     r2b
+
+    psrldq      m4,             4
+    movd        r2d,            m4
+
+    mov         [r0 + 384],     r2b              ;mode 26
+    shr         r2d,            8
+    mov         [r0 + 388],     r2b
+    shr         r2d,            8
+    mov         [r0 + 392],     r2b
+    shr         r2d,            8
+    mov         [r0 + 396],     r2b
+%endif
+
+; mode 3
+
+    mova        m2,             [pw_16]
+    lea         r3,             [pw_ang_table + 7 * 16]
+    lea         r2,             [pw_ang_table + 23 * 16]
+    punpcklbw   m6,             m6
+    psrldq      m6,             1
+    movh        m1,             m6
+    psrldq      m6,             2
+    movh        m0,             m6
+    psrldq      m6,             2
+    movh        m3,             m6
+    psrldq      m6,             2
+    punpcklbw   m1,             m7              ;m1 = word[9, A, A, B, B, C, C, D]
+    punpcklbw   m0,             m7              ;m0 = word[A, B, B, C, C, D, D, E]
+    punpcklbw   m3,             m7              ;m3 = word[B, C, C, D, D, E, E, F]
+    punpcklbw   m6,             m7              ;m6 = word[C, D, D, E, E, F, F, G]
+
+    mova        m7,             [r2 - 3 * 16]
+
+    pmaddwd     m5,             m1,     [r2 + 3 * 16]
+    pmaddwd     m4,             m0,     m7
+
+    packssdw    m5,             m4
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m3,     [r3 + 7 * 16]
+    pmaddwd     m6,             [r3 + 1 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 16],      m5
+    movd        [r0 + 68],      m5              ;mode 6 row 1
+    psrldq      m5,             4
+    movd        [r0 + 76],      m5              ;mode 6 row 3
+
+; mode 4
+
+    pmaddwd     m4,             m0,     [r2 + 8 * 16]
+    pmaddwd     m6,             m3,     m7
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m5,             m1,     [r2 - 2 * 16]
+    pmaddwd     m6,             m0,     [r3 + 3 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 32],      m5
+
+; mode 5
+
+    pmaddwd     m5,             m1,     [r2 - 6 * 16]
+    pmaddwd     m6,             m0,     [r3 - 5 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m0,     [r2 - 4 * 16]
+    pmaddwd     m3,             [r3 - 3 * 16]
+
+    packssdw    m4,             m3
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 48],      m5
+
+; mode 6
+
+    pmaddwd     m5,             m1,     [r3 + 6 * 16]
+    pmaddwd     m6,             m0,     [r3 + 0 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    packuswb    m5,             m6
+    movd        [r0 + 64],      m5
+    psrldq      m5,             4
+    movd        [r0 + 72],      m5
+
+; mode 7
+
+    pmaddwd     m5,             m1,     [r3 + 2 * 16]
+    pmaddwd     m6,             m1,     [r2 - 5 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    mova        m3,             [r2 + 4 * 16]
+    pmaddwd     m4,             m1,     m3
+    pmaddwd     m0,             [r3 - 3 * 16]
+
+    packssdw    m4,             m0
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 80],      m5
+
+; mode 8
+
+    mova        m0,             [r3 - 2 * 16]
+    pmaddwd     m5,             m1,     m0
+    pmaddwd     m6,             m1,     [r3 + 3 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m1,     [r3 + 8 * 16]
+    pmaddwd     m7,             m1
+
+    packssdw    m4,             m7
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 96],      m5
+
+; mode 9
+
+    pmaddwd     m5,             m1,     [r3 - 5 * 16]
+    pmaddwd     m6,             m1,     [r3 - 3 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m1,     [r3 - 1 * 16]
+    pmaddwd     m6,             m1,     [r3 + 1 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 112],     m5
+
+; mode 11
+
+    movd        m5,             [r1]
+    punpcklwd   m5,             m1
+    pand        m5,             [pb_0000000000000F0F]
+    pslldq      m1,             4
+    por         m1,             m5              ;m1 = word[0, 9, 9, A, A, B, B, C]
+
+    pmaddwd     m5,             m1,     [r2 + 7 * 16]
+    pmaddwd     m6,             m1,     [r2 + 5 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m1,     [r2 + 3 * 16]
+    pmaddwd     m6,             m1,     [r2 + 1 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 144],     m5
+
+; mode 12
+
+    pmaddwd     m3,             m1
+    pmaddwd     m6,             m1,     [r2 - 1 * 16]
+
+    packssdw    m3,             m6
+    paddw       m3,             m2
+    psraw       m3,             5
+
+    pmaddwd     m4,             m1,     [r2 - 6 * 16]
+    pmaddwd     m6,             m1,     [r3 + 5 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m3,             m4
+    mova        [r0 + 160],     m3
+
+; mode 13
+
+    mova        m3,             m1
+    movd        m7,             [r1 + 4]
+    punpcklwd   m7,             m1
+    pand        m7,             [pb_0000000000000F0F]
+    pslldq      m3,             4
+    por         m3,             m7              ;m3 = word[4, 0, 0, 9, 9, A, A, B]
+
+    pmaddwd     m5,             m1,     [r2 + 0 * 16]
+    pmaddwd     m6,             m1,     [r3 + 7 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m1,     m0
+    pmaddwd     m6,             m3,     [r2 + 5 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 176],     m5
+
+; mode 14
+
+    pmaddwd     m5,             m1,     [r2 - 4 * 16]
+    pmaddwd     m6,             m1,     [r3 - 1 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    movd        m6,             [r1 + 2]
+    pand        m3,             [pw_FFFFFFFFFFFFFFF0]
+    pand        m6,             [pb_000000000000000F]
+    por         m3,             m6              ;m3 = word[2, 0, 0, 9, 9, A, A, B]
+
+    pmaddwd     m4,             m3,     [r2 + 2 * 16]
+    pmaddwd     m6,             m3,     [r3 + 5 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 192],     m5
+    psrldq      m5,             4
+    movd        [r0 + 240],     m5              ;mode 17 row 0
+
+; mode 15
+
+    pmaddwd     m5,             m1,     [r3 + 8 * 16]
+    pmaddwd     m6,             m3,     [r2 + 7 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m6,             m3,     [r3 + 6 * 16]
+
+    mova        m0,             m3
+    punpcklwd   m7,             m3
+    pslldq      m0,             4
+    pand        m7,             [pb_0000000000000F0F]
+    por         m0,             m7              ;m0 = word[4, 2, 2, 0, 0, 9, 9, A]
+
+    pmaddwd     m4,             m0,     [r2 + 5 * 16]
+
+    packssdw    m6,             m4
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m5,             m6
+    mova        [r0 + 208],     m5
+
+; mode 16
+
+    pmaddwd     m5,             m1,     [r3 + 4 * 16]
+    pmaddwd     m6,             m3,     [r2 - 1 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m3,             [r3 - 6 * 16]
+
+    movd        m6,             [r1 + 3]
+    pand        m0,             [pw_FFFFFFFFFFFFFFF0]
+    pand        m6,             [pb_000000000000000F]
+    por         m0,             m6              ;m0 = word[3, 2, 2, 0, 0, 9, 9, A]
+
+    pmaddwd     m0,             [r3 + 5 * 16]
+    packssdw    m3,             m0
+    paddw       m3,             m2
+    psraw       m3,             5
+
+    packuswb    m5,             m3
+    mova        [r0 + 224],     m5
+
+; mode 17
+
+    movd        m4,             [r1 + 1]
+    punpcklwd   m4,             m1
+    pand        m4,             [pb_0000000000000F0F]
+    pslldq      m1,             4
+    por         m1,             m4              ;m1 = word[1, 0, 0, 9, 9, A, A, B]
+
+    pmaddwd     m6,             m1,     [r3 + 5 * 16]
+
+    packssdw    m6,             m6
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    movd        m5,             [r1 + 2]
+    punpcklwd   m5,             m1
+    pand        m5,             [pb_0000000000000F0F]
+    pslldq      m1,             4
+    por         m1,             m5              ;m1 = word[2, 1, 1, 0, 0, 9, 9, A]
+
+    pmaddwd     m4,             m1,     [r2 - 5 * 16]
+
+    punpcklwd   m7,             m1
+    pand        m7,             [pb_0000000000000F0F]
+    pslldq      m1,             4
+    por         m1,             m7              ;m1 = word[4, 2, 2, 1, 1, 0, 0, 9]
+
+    pmaddwd     m1,             [r2 + 1 * 16]
+    packssdw    m4,             m1
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m6,             m4
+    movd        [r0 + 244],     m6
+    psrldq      m6,             8
+    movh        [r0 + 248],     m6
+
+; mode 18
+
+    movh        m1,             [r1]
+    movd        [r0 + 256],     m1              ;byte[0, 1, 2, 3]
+
+    movh        m3,             [r1 + 2]
+    punpcklqdq  m3,             m1
+    psrldq      m3,             7
+    movd        [r0 + 260],     m3              ;byte[2, 1, 0, 9]
+
+    movh        m4,             [r1 + 3]
+    punpcklqdq  m4,             m3
+    psrldq      m4,             7
+    movd        [r0 + 264],     m4              ;byte[1, 0, 9, A]
+
+    movh        m0,             [r1 + 4]
+    punpcklqdq  m0,             m4
+    psrldq      m0,             7
+    movd        [r0 + 268],     m0              ;byte[0, 9, A, B]
+
+; mode 19
+
+    pxor        m7,             m7
+    punpcklbw   m4,             m3
+    punpcklbw   m3,             m1
+    punpcklbw   m1,             m1
+    punpcklbw   m4,             m7              ;m4 = word[A, 9, 9, 0, 0, 1, 1, 2]
+    punpcklbw   m3,             m7              ;m3 = word[9, 0, 0, 1, 1, 2, 2, 3]
+    psrldq      m1,             1
+    punpcklbw   m1,             m7              ;m1 = word[0, 1, 1, 2, 2, 3, 3, 4]
+
+    pmaddwd     m6,             m1,     [r3 - 1 * 16]
+    pmaddwd     m7,             m3,     [r3 + 5 * 16]
+
+    packssdw    m6,             m7
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    pmaddwd     m5,             m4,     [r2 - 5 * 16]
+
+    movd        m7,             [r1 + 12]
+    punpcklwd   m7,             m4
+    pand        m7,             [pb_0000000000000F0F]
+    pslldq      m4,             4
+    por         m4,             m7              ;m4 = word[C, A, A, 9, 9, 0, 0, 1]
+
+    pmaddwd     m4,             [r2 + 1 * 16]
+    packssdw    m5,             m4
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    packuswb    m6,             m5
+    mova        [r0 + 272],     m6
+    movd        [r0 + 324],     m6              ;mode 22 row 1
+
+; mode 20
+
+    pmaddwd     m5,             m1,     [r3 + 4 * 16]
+
+    movd        m4,             [r1 + 10]
+    pand        m3,             [pw_FFFFFFFFFFFFFFF0]
+    pand        m4,             [pb_000000000000000F]
+    por         m3,             m4              ;m3 = word[A, 0, 0, 1, 1, 2, 2, 3]
+
+    pmaddwd     m6,             m3,     [r2 - 1 * 16]
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    pmaddwd     m4,             m3,     [r3 - 6 * 16]
+
+    punpcklwd   m0,             m3
+    pand        m0,             [pb_0000000000000F0F]
+    mova        m6,             m3
+    pslldq      m6,             4
+    por         m0,             m6              ;m0 = word[B, A, A, 0, 0, 1, 1, 2]
+
+    pmaddwd     m6,             m0,     [r3 + 5 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    packuswb    m5,             m4
+    mova        [r0 + 288],     m5
+
+; mode 21
+
+    pmaddwd     m4,             m1,     [r3 + 8 * 16]
+    pmaddwd     m6,             m3,     [r2 + 7 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m5,             m3,     [r3 + 6 * 16]
+
+    pand        m0,             [pw_FFFFFFFFFFFFFFF0]
+    pand        m7,             [pb_000000000000000F]
+    por         m0,             m7              ;m0 = word[C, A, A, 0, 0, 1, 1, 2]
+
+    pmaddwd     m0,             [r2 + 5 * 16]
+    packssdw    m5,             m0
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    packuswb    m4,             m5
+    mova        [r0 + 304],     m4
+
+; mode 22
+
+    pmaddwd     m4,             m1,     [r2 - 4 * 16]
+    packssdw    m4,             m4
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    mova        m0,             [r3 + 5 * 16]
+    pmaddwd     m5,             m3,     [r2 + 2 * 16]
+    pmaddwd     m6,             m3,     m0
+
+    packssdw    m5,             m6
+    paddw       m5,             m2
+    psraw       m5,             5
+
+    packuswb    m4,             m5
+    movd        [r0 + 320],     m4
+    psrldq      m4,             8
+    movh        [r0 + 328],     m4
+
+; mode 23
+
+    pmaddwd     m4,             m1,     [r2 + 0 * 16]
+    pmaddwd     m5,             m1,     [r3 + 7 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r3 - 2 * 16]
+
+    pand        m3,             [pw_FFFFFFFFFFFFFFF0]
+    por         m3,             m7              ;m3 = word[C, 0, 0, 1, 1, 2, 2, 3]
+
+    pmaddwd     m3,             [r2 + 5 * 16]
+    packssdw    m6,             m3
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 336],     m4
+
+; mode 24
+
+    pmaddwd     m4,             m1,     [r2 + 4 * 16]
+    pmaddwd     m5,             m1,     [r2 - 1 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r2 - 6 * 16]
+    pmaddwd     m0,             m1
+
+    packssdw    m6,             m0
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 352],     m4
+
+; mode 25
+
+    pmaddwd     m4,             m1,     [r2 + 7 * 16]
+    pmaddwd     m5,             m1,     [r2 + 5 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r2 + 3 * 16]
+    pmaddwd     m1,             [r2 + 1 * 16]
+
+    packssdw    m6,             m1
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 368],     m4
+
+; mode 27
+
+    movh        m0,             [r1 + 1]
+    pxor        m7,             m7
+    punpcklbw   m0,             m0
+    psrldq      m0,             1
+    movh        m1,             m0
+    psrldq      m0,             2
+    movh        m3,             m0
+    psrldq      m0,             2
+    punpcklbw   m1,             m7              ;m1 = word[1, 2, 2, 3, 3, 4, 4, 5]
+    punpcklbw   m3,             m7              ;m3 = word[2, 3, 3, 4, 4, 5, 5, 6]
+    punpcklbw   m0,             m7              ;m0 = word[3, 4, 4, 5, 5, 6, 6, 7]
+
+    mova        m7,             [r3 - 3 * 16]
+
+    pmaddwd     m4,             m1,     [r3 - 5 * 16]
+    pmaddwd     m5,             m1,     m7
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r3 - 1 * 16]
+    pmaddwd     m5,             m1,     [r3 + 1 * 16]
+
+    packssdw    m6,             m5
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 400],     m4
+
+; mode 28
+
+    pmaddwd     m4,             m1,     [r3 - 2 * 16]
+    pmaddwd     m5,             m1,     [r3 + 3 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r3 + 8 * 16]
+    pmaddwd     m5,             m1,     [r2 - 3 * 16]
+
+    packssdw    m6,             m5
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 416],     m4
+
+; mode 29
+
+    pmaddwd     m4,             m1,     [r3 + 2 * 16]
+    pmaddwd     m6,             m1,     [r2 - 5 * 16]
+
+    packssdw    m4,             m6
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m1,     [r2 + 4 * 16]
+    pmaddwd     m5,             m3,     m7
+
+    packssdw    m6,             m5
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 432],     m4
+
+; mode 30
+
+    pmaddwd     m4,             m1,     [r3 + 6 * 16]
+    pmaddwd     m5,             m1,     [r2 + 3 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m3,     [r3 + 0 * 16]
+    pmaddwd     m5,             m3,     [r2 - 3 * 16]
+
+    packssdw    m6,             m5
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 448],     m4
+    psrldq      m4,             4
+    movh        [r0 + 496],     m4              ;mode 33 row 0
+    psrldq      m4,             8
+    movd        [r0 + 500],     m4              ;mode 33 row 1
+
+; mode 31
+
+    pmaddwd     m4,             m1,     [r2 - 6 * 16]
+    pmaddwd     m5,             m3,     [r3 - 5 * 16]
+
+    packssdw    m4,             m5
+    paddw       m4,             m2
+    psraw       m4,             5
+
+    pmaddwd     m6,             m3,     [r2 - 4 * 16]
+    pmaddwd     m7,             m0
+
+    packssdw    m6,             m7
+    paddw       m6,             m2
+    psraw       m6,             5
+
+    packuswb    m4,             m6
+    mova        [r0 + 464],     m4
+
+; mode 32
+
+    pmaddwd     m1,             [r2 - 2 * 16]
+    pmaddwd     m5,             m3,     [r3 + 3 * 16]
+
+    packssdw    m1,             m5
+    paddw       m1,             m2
+    psraw       m1,             5
+
+    pmaddwd     m3,             [r2 + 8 * 16]
+    pmaddwd     m5,             m0,     [r2 - 3 * 16]
+    packssdw    m3,             m5
+    paddw       m3,             m2
+    psraw       m3,             5
+
+    packuswb    m1,             m3
+    mova        [r0 + 480],     m1
+
+; mode 33
+
+    pmaddwd     m0,             [r3 + 7 * 16]
+    pxor        m7,             m7
+    movh        m4,             [r1 + 4]
+    punpcklbw   m4,             m4
+    psrldq      m4,             1
+    punpcklbw   m4,             m7
+
+    pmaddwd     m4,             [r3 + 1 * 16]
+
+    packssdw    m0,             m4
+    paddw       m0,             m2
+    psraw       m0,             5
+
+    packuswb    m0,             m0
+    movh        [r0 + 504],     m0
+
+; mode 34
+
+    movh        m7,             [r1 + 2]
+    movd        [r0 + 512],     m7              ;byte[2, 3, 4, 5]
+
+    psrldq      m7,             1
+    movd        [r0 + 516],     m7              ;byte[3, 4, 5, 6]
+
+    psrldq      m7,             1
+    movd        [r0 + 520],     m7              ;byte[4, 5, 6, 7]
+
+    psrldq      m7,             1
+    movd        [r0 + 524],     m7              ;byte[5, 6, 7, 8]
+
+RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/ipfilter16.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,13007 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Nabajit Deka <nabajit@multicorewareinc.com>
+;*          Murugan Vairavel <murugan@multicorewareinc.com>
+;*          Min Chen <chenm003@163.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+
+%define INTERP_OFFSET_PP        pd_32
+%define INTERP_SHIFT_PP         6
+
+%if BIT_DEPTH == 10
+    %define INTERP_SHIFT_PS         2
+    %define INTERP_OFFSET_PS        pd_n32768
+    %define INTERP_SHIFT_SP         10
+    %define INTERP_OFFSET_SP        pd_524800
+%elif BIT_DEPTH == 12
+    %define INTERP_SHIFT_PS         4
+    %define INTERP_OFFSET_PS        pd_n131072
+    %define INTERP_SHIFT_SP         8
+    %define INTERP_OFFSET_SP        pd_524416
+%else
+    %error Unsupport bit depth!
+%endif
+
+
+SECTION_RODATA 32
+
+tab_c_32:         times 8 dd 32
+tab_c_524800:     times 4 dd 524800
+tab_c_n8192:      times 8 dw -8192
+pd_524800:        times 8 dd 524800
+
+tab_Tm16:         db 0, 1, 2, 3, 4,  5,  6, 7, 2, 3, 4,  5, 6, 7, 8, 9
+
+tab_ChromaCoeff:  dw  0, 64,  0,  0
+                  dw -2, 58, 10, -2
+                  dw -4, 54, 16, -2
+                  dw -6, 46, 28, -4
+                  dw -4, 36, 36, -4
+                  dw -4, 28, 46, -6
+                  dw -2, 16, 54, -4
+                  dw -2, 10, 58, -2
+
+const tab_ChromaCoeffV,  times 8 dw 0, 64
+                         times 8 dw 0, 0
+
+                         times 8 dw -2, 58
+                         times 8 dw 10, -2
+
+                         times 8 dw -4, 54
+                         times 8 dw 16, -2
+
+                         times 8 dw -6, 46
+                         times 8 dw 28, -4
+
+                         times 8 dw -4, 36
+                         times 8 dw 36, -4
+
+                         times 8 dw -4, 28
+                         times 8 dw 46, -6
+
+                         times 8 dw -2, 16
+                         times 8 dw 54, -4
+
+                         times 8 dw -2, 10
+                         times 8 dw 58, -2
+
+tab_ChromaCoeffVer: times 8 dw 0, 64
+                    times 8 dw 0, 0
+
+                    times 8 dw -2, 58
+                    times 8 dw 10, -2
+
+                    times 8 dw -4, 54
+                    times 8 dw 16, -2
+
+                    times 8 dw -6, 46
+                    times 8 dw 28, -4
+
+                    times 8 dw -4, 36
+                    times 8 dw 36, -4
+
+                    times 8 dw -4, 28
+                    times 8 dw 46, -6
+
+                    times 8 dw -2, 16
+                    times 8 dw 54, -4
+
+                    times 8 dw -2, 10
+                    times 8 dw 58, -2
+
+tab_LumaCoeff:    dw   0, 0,  0,  64,  0,   0,  0,  0
+                  dw  -1, 4, -10, 58,  17, -5,  1,  0
+                  dw  -1, 4, -11, 40,  40, -11, 4, -1
+                  dw   0, 1, -5,  17,  58, -10, 4, -1
+
+tab_LumaCoeffV:   times 4 dw 0, 0
+                  times 4 dw 0, 64
+                  times 4 dw 0, 0
+                  times 4 dw 0, 0
+
+                  times 4 dw -1, 4
+                  times 4 dw -10, 58
+                  times 4 dw 17, -5
+                  times 4 dw 1, 0
+
+                  times 4 dw -1, 4
+                  times 4 dw -11, 40
+                  times 4 dw 40, -11
+                  times 4 dw 4, -1
+
+                  times 4 dw 0, 1
+                  times 4 dw -5, 17
+                  times 4 dw 58, -10
+                  times 4 dw 4, -1
+ALIGN 32
+tab_LumaCoeffVer: times 8 dw 0, 0
+                  times 8 dw 0, 64
+                  times 8 dw 0, 0
+                  times 8 dw 0, 0
+
+                  times 8 dw -1, 4
+                  times 8 dw -10, 58
+                  times 8 dw 17, -5
+                  times 8 dw 1, 0
+
+                  times 8 dw -1, 4
+                  times 8 dw -11, 40
+                  times 8 dw 40, -11
+                  times 8 dw 4, -1
+
+                  times 8 dw 0, 1
+                  times 8 dw -5, 17
+                  times 8 dw 58, -10
+                  times 8 dw 4, -1
+
+const interp8_hps_shuf,     dd 0, 4, 1, 5, 2, 6, 3, 7
+
+const interp8_hpp_shuf,     db 0, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 8, 9
+                            db 4, 5, 6, 7, 8, 9, 10, 11, 6, 7, 8, 9, 10, 11, 12, 13
+
+const pb_shuf,  db 0, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 8, 9
+                db 4, 5, 6, 7, 8, 9, 10, 11, 6, 7, 8, 9, 10, 11, 12, 13
+
+
+SECTION .text
+cextern pd_8
+cextern pd_32
+cextern pw_pixel_max
+cextern pd_524416
+cextern pd_n32768
+cextern pd_n131072
+cextern pw_2000
+cextern idct8_shuf2
+
+%macro FILTER_LUMA_HOR_4_sse2 1
+    movu        m4,     [r0 + %1]       ; m4 = src[0-7]
+    movu        m5,     [r0 + %1 + 2]   ; m5 = src[1-8]
+    pmaddwd     m4,     m0
+    pmaddwd     m5,     m0
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m4,     m4,     q3120
+    pshufd      m5,     m5,     q3120
+    punpcklqdq  m4,     m5
+
+    movu        m5,     [r0 + %1 + 4]   ; m5 = src[2-9]
+    movu        m3,     [r0 + %1 + 6]   ; m3 = src[3-10]
+    pmaddwd     m5,     m0
+    pmaddwd     m3,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m3,     m3,     q3120
+    punpcklqdq  m5,     m3
+
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m4,     m4,     q3120
+    pshufd      m5,     m5,     q3120
+    punpcklqdq  m4,     m5
+    paddd       m4,     m1
+%endmacro
+
+%macro FILTER_LUMA_HOR_8_sse2 1
+    movu        m4,     [r0 + %1]       ; m4 = src[0-7]
+    movu        m5,     [r0 + %1 + 2]   ; m5 = src[1-8]
+    pmaddwd     m4,     m0
+    pmaddwd     m5,     m0
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m4,     m4,     q3120
+    pshufd      m5,     m5,     q3120
+    punpcklqdq  m4,     m5
+
+    movu        m5,     [r0 + %1 + 4]   ; m5 = src[2-9]
+    movu        m3,     [r0 + %1 + 6]   ; m3 = src[3-10]
+    pmaddwd     m5,     m0
+    pmaddwd     m3,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m3,     m3,     q3120
+    punpcklqdq  m5,     m3
+
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m4,     m4,     q3120
+    pshufd      m5,     m5,     q3120
+    punpcklqdq  m4,     m5
+    paddd       m4,     m1
+
+    movu        m5,     [r0 + %1 + 8]   ; m5 = src[4-11]
+    movu        m6,     [r0 + %1 + 10]  ; m6 = src[5-12]
+    pmaddwd     m5,     m0
+    pmaddwd     m6,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m6,     q2301
+    paddd       m6,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m6,     m6,     q3120
+    punpcklqdq  m5,     m6
+
+    movu        m6,     [r0 + %1 + 12]  ; m6 = src[6-13]
+    movu        m3,     [r0 + %1 + 14]  ; m3 = src[7-14]
+    pmaddwd     m6,     m0
+    pmaddwd     m3,     m0
+    pshufd      m2,     m6,     q2301
+    paddd       m6,     m2
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m6,     m6,     q3120
+    pshufd      m3,     m3,     q3120
+    punpcklqdq  m6,     m3
+
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m6,     q2301
+    paddd       m6,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m6,     m6,     q3120
+    punpcklqdq  m5,     m6
+    paddd       m5,     m1
+%endmacro
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_p%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_sse2 3
+INIT_XMM sse2
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+    mov         r4d,    r4m
+    sub         r0,     6
+    shl         r4d,    4
+    add         r1d,    r1d
+    add         r3d,    r3d
+
+%ifdef PIC
+    lea         r6,     [tab_LumaCoeff]
+    mova        m0,     [r6 + r4]
+%else
+    mova        m0,     [tab_LumaCoeff + r4]
+%endif
+
+%ifidn %3, pp
+    mova        m1,     [pd_32]
+    pxor        m7,     m7
+%else
+    mova        m1,     [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d,    %2
+%ifidn %3, ps
+    cmp         r5m,    byte 0
+    je          .loopH
+    lea         r6,     [r1 + 2 * r1]
+    sub         r0,     r6
+    add         r4d,    7
+%endif
+
+.loopH:
+%assign x 0
+%rep %1/8
+    FILTER_LUMA_HOR_8_sse2 x
+
+%ifidn %3, pp
+    psrad       m4,     6
+    psrad       m5,     6
+    packssdw    m4,     m5
+    CLIPW       m4,     m7,     [pw_pixel_max]
+%else
+  %if BIT_DEPTH == 10
+    psrad       m4,     2
+    psrad       m5,     2
+  %elif BIT_DEPTH == 12
+    psrad       m4,     4
+    psrad       m5,     4
+  %endif
+    packssdw    m4,     m5
+%endif
+
+    movu        [r2 + x], m4
+%assign x x+16
+%endrep
+
+%rep (%1 % 8)/4
+    FILTER_LUMA_HOR_4_sse2 x
+
+%ifidn %3, pp
+    psrad       m4,     6
+    packssdw    m4,     m4
+    CLIPW       m4,     m7,     [pw_pixel_max]
+%else
+  %if BIT_DEPTH == 10
+    psrad       m4,     2
+  %elif BIT_DEPTH == 12
+    psrad       m4,     4
+  %endif
+    packssdw    m4,     m4
+%endif
+
+    movh        [r2 + x], m4
+%endrep
+
+    add         r0,     r1
+    add         r2,     r3
+
+    dec         r4d
+    jnz         .loopH
+    RET
+
+%endmacro
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;------------------------------------------------------------------------------------------------------------
+    FILTER_HOR_LUMA_sse2 4, 4, pp
+    FILTER_HOR_LUMA_sse2 4, 8, pp
+    FILTER_HOR_LUMA_sse2 4, 16, pp
+    FILTER_HOR_LUMA_sse2 8, 4, pp
+    FILTER_HOR_LUMA_sse2 8, 8, pp
+    FILTER_HOR_LUMA_sse2 8, 16, pp
+    FILTER_HOR_LUMA_sse2 8, 32, pp
+    FILTER_HOR_LUMA_sse2 12, 16, pp
+    FILTER_HOR_LUMA_sse2 16, 4, pp
+    FILTER_HOR_LUMA_sse2 16, 8, pp
+    FILTER_HOR_LUMA_sse2 16, 12, pp
+    FILTER_HOR_LUMA_sse2 16, 16, pp
+    FILTER_HOR_LUMA_sse2 16, 32, pp
+    FILTER_HOR_LUMA_sse2 16, 64, pp
+    FILTER_HOR_LUMA_sse2 24, 32, pp
+    FILTER_HOR_LUMA_sse2 32, 8, pp
+    FILTER_HOR_LUMA_sse2 32, 16, pp
+    FILTER_HOR_LUMA_sse2 32, 24, pp
+    FILTER_HOR_LUMA_sse2 32, 32, pp
+    FILTER_HOR_LUMA_sse2 32, 64, pp
+    FILTER_HOR_LUMA_sse2 48, 64, pp
+    FILTER_HOR_LUMA_sse2 64, 16, pp
+    FILTER_HOR_LUMA_sse2 64, 32, pp
+    FILTER_HOR_LUMA_sse2 64, 48, pp
+    FILTER_HOR_LUMA_sse2 64, 64, pp
+
+;---------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;---------------------------------------------------------------------------------------------------------------------------
+    FILTER_HOR_LUMA_sse2 4, 4, ps
+    FILTER_HOR_LUMA_sse2 4, 8, ps
+    FILTER_HOR_LUMA_sse2 4, 16, ps
+    FILTER_HOR_LUMA_sse2 8, 4, ps
+    FILTER_HOR_LUMA_sse2 8, 8, ps
+    FILTER_HOR_LUMA_sse2 8, 16, ps
+    FILTER_HOR_LUMA_sse2 8, 32, ps
+    FILTER_HOR_LUMA_sse2 12, 16, ps
+    FILTER_HOR_LUMA_sse2 16, 4, ps
+    FILTER_HOR_LUMA_sse2 16, 8, ps
+    FILTER_HOR_LUMA_sse2 16, 12, ps
+    FILTER_HOR_LUMA_sse2 16, 16, ps
+    FILTER_HOR_LUMA_sse2 16, 32, ps
+    FILTER_HOR_LUMA_sse2 16, 64, ps
+    FILTER_HOR_LUMA_sse2 24, 32, ps
+    FILTER_HOR_LUMA_sse2 32, 8, ps
+    FILTER_HOR_LUMA_sse2 32, 16, ps
+    FILTER_HOR_LUMA_sse2 32, 24, ps
+    FILTER_HOR_LUMA_sse2 32, 32, ps
+    FILTER_HOR_LUMA_sse2 32, 64, ps
+    FILTER_HOR_LUMA_sse2 48, 64, ps
+    FILTER_HOR_LUMA_sse2 64, 16, ps
+    FILTER_HOR_LUMA_sse2 64, 32, ps
+    FILTER_HOR_LUMA_sse2 64, 48, ps
+    FILTER_HOR_LUMA_sse2 64, 64, ps
+
+%macro PROCESS_LUMA_VER_W4_4R_sse2 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *16]            ;m2=[2+3]  Row3
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *16]            ;m3=[3+4]  Row4
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m6, m4, [r6 + 1 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5]  Row3
+    pmaddwd    m4, [r6 + 2 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m6, m5, [r6 + 1 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6]  Row4
+    pmaddwd    m5, [r6 + 2 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[6 7]
+    pmaddwd    m6, m4, [r6 + 2 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5+6+7]  Row3
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5+6+7]  Row1 end
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[7 8]
+    pmaddwd    m6, m5, [r6 + 2 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6+7+8]  Row4
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6+7+8]  Row2 end
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[8 9]
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m2, m4                          ;m2=[2+3+4+5+6+7+8+9]  Row3 end
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[9 10]
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m3, m5                          ;m3=[3+4+5+6+7+8+9+10]  Row4 end
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%1_%2x%3(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_sse2 3
+INIT_XMM sse2
+cglobal interp_8tap_vert_%1_%2x%3, 5, 7, 8
+
+    add       r1d, r1d
+    add       r3d, r3d
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffV + r4]
+%endif
+
+%ifidn %1,pp
+    mova      m7, [INTERP_OFFSET_PP]
+%define SHIFT 6
+%elifidn %1,ps
+    mova      m7, [INTERP_OFFSET_PS]
+  %if BIT_DEPTH == 10
+    %define SHIFT 2
+  %elif BIT_DEPTH == 12
+    %define SHIFT 4
+  %endif
+%endif
+
+    mov         r4d, %3/4
+.loopH:
+%assign x 0
+%rep %2/4
+    PROCESS_LUMA_VER_W4_4R_sse2
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, SHIFT
+    psrad     m1, SHIFT
+    psrad     m2, SHIFT
+    psrad     m3, SHIFT
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+%ifidn %1,pp
+    pxor      m1, m1
+    CLIPW2    m0, m2, m1, [pw_pixel_max]
+%endif
+
+    movh      [r2 + x], m0
+    movhps    [r2 + r3 + x], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5 + x], m2
+    movhps    [r5 + r3 + x], m2
+
+    lea       r5, [8 * r1 - 2 * 4]
+    sub       r0, r5
+%assign x x+8
+%endrep
+
+    lea       r0, [r0 + 4 * r1 - 2 * %2]
+    lea       r2, [r2 + 4 * r3]
+
+    dec         r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_%2x%3(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_sse2 pp, 4, 4
+    FILTER_VER_LUMA_sse2 pp, 8, 8
+    FILTER_VER_LUMA_sse2 pp, 8, 4
+    FILTER_VER_LUMA_sse2 pp, 4, 8
+    FILTER_VER_LUMA_sse2 pp, 16, 16
+    FILTER_VER_LUMA_sse2 pp, 16, 8
+    FILTER_VER_LUMA_sse2 pp, 8, 16
+    FILTER_VER_LUMA_sse2 pp, 16, 12
+    FILTER_VER_LUMA_sse2 pp, 12, 16
+    FILTER_VER_LUMA_sse2 pp, 16, 4
+    FILTER_VER_LUMA_sse2 pp, 4, 16
+    FILTER_VER_LUMA_sse2 pp, 32, 32
+    FILTER_VER_LUMA_sse2 pp, 32, 16
+    FILTER_VER_LUMA_sse2 pp, 16, 32
+    FILTER_VER_LUMA_sse2 pp, 32, 24
+    FILTER_VER_LUMA_sse2 pp, 24, 32
+    FILTER_VER_LUMA_sse2 pp, 32, 8
+    FILTER_VER_LUMA_sse2 pp, 8, 32
+    FILTER_VER_LUMA_sse2 pp, 64, 64
+    FILTER_VER_LUMA_sse2 pp, 64, 32
+    FILTER_VER_LUMA_sse2 pp, 32, 64
+    FILTER_VER_LUMA_sse2 pp, 64, 48
+    FILTER_VER_LUMA_sse2 pp, 48, 64
+    FILTER_VER_LUMA_sse2 pp, 64, 16
+    FILTER_VER_LUMA_sse2 pp, 16, 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_%2x%3(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_sse2 ps, 4, 4
+    FILTER_VER_LUMA_sse2 ps, 8, 8
+    FILTER_VER_LUMA_sse2 ps, 8, 4
+    FILTER_VER_LUMA_sse2 ps, 4, 8
+    FILTER_VER_LUMA_sse2 ps, 16, 16
+    FILTER_VER_LUMA_sse2 ps, 16, 8
+    FILTER_VER_LUMA_sse2 ps, 8, 16
+    FILTER_VER_LUMA_sse2 ps, 16, 12
+    FILTER_VER_LUMA_sse2 ps, 12, 16
+    FILTER_VER_LUMA_sse2 ps, 16, 4
+    FILTER_VER_LUMA_sse2 ps, 4, 16
+    FILTER_VER_LUMA_sse2 ps, 32, 32
+    FILTER_VER_LUMA_sse2 ps, 32, 16
+    FILTER_VER_LUMA_sse2 ps, 16, 32
+    FILTER_VER_LUMA_sse2 ps, 32, 24
+    FILTER_VER_LUMA_sse2 ps, 24, 32
+    FILTER_VER_LUMA_sse2 ps, 32, 8
+    FILTER_VER_LUMA_sse2 ps, 8, 32
+    FILTER_VER_LUMA_sse2 ps, 64, 64
+    FILTER_VER_LUMA_sse2 ps, 64, 32
+    FILTER_VER_LUMA_sse2 ps, 32, 64
+    FILTER_VER_LUMA_sse2 ps, 64, 48
+    FILTER_VER_LUMA_sse2 ps, 48, 64
+    FILTER_VER_LUMA_sse2 ps, 64, 16
+    FILTER_VER_LUMA_sse2 ps, 16, 64
+
+%macro FILTERH_W2_4_sse3 2
+    movh        m3,     [r0 + %1]
+    movhps      m3,     [r0 + %1 + 2]
+    pmaddwd     m3,     m0
+    movh        m4,     [r0 + r1 + %1]
+    movhps      m4,     [r0 + r1 + %1 + 2]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m3,     m3,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m3,     m4
+    paddd       m3,     m1
+    movh        m5,     [r0 + 2 * r1 + %1]
+    movhps      m5,     [r0 + 2 * r1 + %1 + 2]
+    pmaddwd     m5,     m0
+    movh        m4,     [r0 + r4 + %1]
+    movhps      m4,     [r0 + r4 + %1 + 2]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m5,     m4
+    paddd       m5,     m1
+%ifidn %2, pp
+    psrad       m3,     6
+    psrad       m5,     6
+    packssdw    m3,     m5
+    CLIPW       m3,     m7,     m6
+%else
+    psrad       m3,     INTERP_SHIFT_PS
+    psrad       m5,     INTERP_SHIFT_PS
+    packssdw    m3,     m5
+%endif
+    movd        [r2 + %1], m3
+    psrldq      m3,     4
+    movd        [r2 + r3 + %1], m3
+    psrldq      m3,     4
+    movd        [r2 + r3 * 2 + %1], m3
+    psrldq      m3,     4
+    movd        [r2 + r5 + %1], m3
+%endmacro
+
+%macro FILTERH_W2_3_sse3 1
+    movh        m3,     [r0 + %1]
+    movhps      m3,     [r0 + %1 + 2]
+    pmaddwd     m3,     m0
+    movh        m4,     [r0 + r1 + %1]
+    movhps      m4,     [r0 + r1 + %1 + 2]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m3,     m3,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m3,     m4
+    paddd       m3,     m1
+
+    movh        m5,     [r0 + 2 * r1 + %1]
+    movhps      m5,     [r0 + 2 * r1 + %1 + 2]
+    pmaddwd     m5,     m0
+
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m5,     m5,     q3120
+    paddd       m5,     m1
+
+    psrad       m3,     INTERP_SHIFT_PS
+    psrad       m5,     INTERP_SHIFT_PS
+    packssdw    m3,     m5
+
+    movd        [r2 + %1], m3
+    psrldq      m3,     4
+    movd        [r2 + r3 + %1], m3
+    psrldq      m3,     4
+    movd        [r2 + r3 * 2 + %1], m3
+%endmacro
+
+%macro FILTERH_W4_2_sse3 2
+    movh        m3,     [r0 + %1]
+    movhps      m3,     [r0 + %1 + 2]
+    pmaddwd     m3,     m0
+    movh        m4,     [r0 + %1 + 4]
+    movhps      m4,     [r0 + %1 + 6]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m3,     m3,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m3,     m4
+    paddd       m3,     m1
+
+    movh        m5,     [r0 + r1 + %1]
+    movhps      m5,     [r0 + r1 + %1 + 2]
+    pmaddwd     m5,     m0
+    movh        m4,     [r0 + r1 + %1 + 4]
+    movhps      m4,     [r0 + r1 + %1 + 6]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m5,     m4
+    paddd       m5,     m1
+%ifidn %2, pp
+    psrad       m3,     6
+    psrad       m5,     6
+    packssdw    m3,     m5
+    CLIPW       m3,     m7,     m6
+%else
+    psrad       m3,     INTERP_SHIFT_PS
+    psrad       m5,     INTERP_SHIFT_PS
+    packssdw    m3,     m5
+%endif
+    movh        [r2 + %1], m3
+    movhps      [r2 + r3 + %1], m3
+%endmacro
+
+%macro FILTERH_W4_1_sse3 1
+    movh        m3,     [r0 + 2 * r1 + %1]
+    movhps      m3,     [r0 + 2 * r1 + %1 + 2]
+    pmaddwd     m3,     m0
+    movh        m4,     [r0 + 2 * r1 + %1 + 4]
+    movhps      m4,     [r0 + 2 * r1 + %1 + 6]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m3,     m3,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m3,     m4
+    paddd       m3,     m1
+
+    psrad       m3,     INTERP_SHIFT_PS
+    packssdw    m3,     m3
+    movh        [r2 + r3 * 2 + %1], m3
+%endmacro
+
+%macro FILTERH_W8_1_sse3 2
+    movh        m3,     [r0 + %1]
+    movhps      m3,     [r0 + %1 + 2]
+    pmaddwd     m3,     m0
+    movh        m4,     [r0 + %1 + 4]
+    movhps      m4,     [r0 + %1 + 6]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m3,     q2301
+    paddd       m3,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m3,     m3,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m3,     m4
+    paddd       m3,     m1
+
+    movh        m5,     [r0 + %1 + 8]
+    movhps      m5,     [r0 + %1 + 10]
+    pmaddwd     m5,     m0
+    movh        m4,     [r0 + %1 + 12]
+    movhps      m4,     [r0 + %1 + 14]
+    pmaddwd     m4,     m0
+    pshufd      m2,     m5,     q2301
+    paddd       m5,     m2
+    pshufd      m2,     m4,     q2301
+    paddd       m4,     m2
+    pshufd      m5,     m5,     q3120
+    pshufd      m4,     m4,     q3120
+    punpcklqdq  m5,     m4
+    paddd       m5,     m1
+%ifidn %2, pp
+    psrad       m3,     6
+    psrad       m5,     6
+    packssdw    m3,     m5
+    CLIPW       m3,     m7,     m6
+%else
+    psrad       m3,     INTERP_SHIFT_PS
+    psrad       m5,     INTERP_SHIFT_PS
+    packssdw    m3,     m5
+%endif
+    movdqu      [r2 + %1], m3
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_HOR_CHROMA_sse3 3
+INIT_XMM sse3
+cglobal interp_4tap_horiz_%3_%1x%2, 4, 7, 8
+    add         r3,     r3
+    add         r1,     r1
+    sub         r0,     2
+    mov         r4d,    r4m
+    add         r4d,    r4d
+
+%ifdef PIC
+    lea         r6,     [tab_ChromaCoeff]
+    movddup     m0,     [r6 + r4 * 4]
+%else
+    movddup     m0,     [tab_ChromaCoeff + r4 * 4]
+%endif
+
+%ifidn %3, ps
+    mova        m1,     [INTERP_OFFSET_PS]
+    cmp         r5m,    byte 0
+%if %1 <= 6
+    lea         r4,     [r1 * 3]
+    lea         r5,     [r3 * 3]
+%endif
+    je          .skip
+    sub         r0,     r1
+%if %1 <= 6
+%assign y 1
+%else
+%assign y 3
+%endif
+%assign z 0
+%rep y
+%assign x 0
+%rep %1/8
+    FILTERH_W8_1_sse3 x, %3
+%assign x x+16
+%endrep
+%if %1 == 4 || (%1 == 6 && z == 0) || (%1 == 12 && z == 0)
+    FILTERH_W4_2_sse3 x, %3
+    FILTERH_W4_1_sse3 x
+%assign x x+8
+%endif
+%if %1 == 2 || (%1 == 6 && z == 0)
+    FILTERH_W2_3_sse3 x
+%endif
+%if %1 <= 6
+    lea         r0,     [r0 + r4]
+    lea         r2,     [r2 + r5]
+%else
+    lea         r0,     [r0 + r1]
+    lea         r2,     [r2 + r3]
+%endif
+%assign z z+1
+%endrep
+.skip:
+%elifidn %3, pp
+    pxor        m7,     m7
+    mova        m6,     [pw_pixel_max]
+    mova        m1,     [tab_c_32]
+%if %1 == 2 || %1 == 6
+    lea         r4,     [r1 * 3]
+    lea         r5,     [r3 * 3]
+%endif
+%endif
+
+%if %1 == 2
+%assign y %2/4
+%elif %1 <= 6
+%assign y %2/2
+%else
+%assign y %2
+%endif
+%assign z 0
+%rep y
+%assign x 0
+%rep %1/8
+    FILTERH_W8_1_sse3 x, %3
+%assign x x+16
+%endrep
+%if %1 == 4 || %1 == 6 || (%1 == 12 && (z % 2) == 0)
+    FILTERH_W4_2_sse3 x, %3
+%assign x x+8
+%endif
+%if %1 == 2 || (%1 == 6 && (z % 2) == 0)
+    FILTERH_W2_4_sse3 x, %3
+%endif
+%assign z z+1
+%if z < y
+%if %1 == 2
+    lea         r0,     [r0 + 4 * r1]
+    lea         r2,     [r2 + 4 * r3]
+%elif %1 <= 6
+    lea         r0,     [r0 + 2 * r1]
+    lea         r2,     [r2 + 2 * r3]
+%else
+    lea         r0,     [r0 + r1]
+    lea         r2,     [r2 + r3]
+%endif
+%endif ;z < y
+%endrep
+
+    RET
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+
+FILTER_HOR_CHROMA_sse3 2, 4, pp
+FILTER_HOR_CHROMA_sse3 2, 8, pp
+FILTER_HOR_CHROMA_sse3 2, 16, pp
+FILTER_HOR_CHROMA_sse3 4, 2, pp
+FILTER_HOR_CHROMA_sse3 4, 4, pp
+FILTER_HOR_CHROMA_sse3 4, 8, pp
+FILTER_HOR_CHROMA_sse3 4, 16, pp
+FILTER_HOR_CHROMA_sse3 4, 32, pp
+FILTER_HOR_CHROMA_sse3 6, 8, pp
+FILTER_HOR_CHROMA_sse3 6, 16, pp
+FILTER_HOR_CHROMA_sse3 8, 2, pp
+FILTER_HOR_CHROMA_sse3 8, 4, pp
+FILTER_HOR_CHROMA_sse3 8, 6, pp
+FILTER_HOR_CHROMA_sse3 8, 8, pp
+FILTER_HOR_CHROMA_sse3 8, 12, pp
+FILTER_HOR_CHROMA_sse3 8, 16, pp
+FILTER_HOR_CHROMA_sse3 8, 32, pp
+FILTER_HOR_CHROMA_sse3 8, 64, pp
+FILTER_HOR_CHROMA_sse3 12, 16, pp
+FILTER_HOR_CHROMA_sse3 12, 32, pp
+FILTER_HOR_CHROMA_sse3 16, 4, pp
+FILTER_HOR_CHROMA_sse3 16, 8, pp
+FILTER_HOR_CHROMA_sse3 16, 12, pp
+FILTER_HOR_CHROMA_sse3 16, 16, pp
+FILTER_HOR_CHROMA_sse3 16, 24, pp
+FILTER_HOR_CHROMA_sse3 16, 32, pp
+FILTER_HOR_CHROMA_sse3 16, 64, pp
+FILTER_HOR_CHROMA_sse3 24, 32, pp
+FILTER_HOR_CHROMA_sse3 24, 64, pp
+FILTER_HOR_CHROMA_sse3 32, 8, pp
+FILTER_HOR_CHROMA_sse3 32, 16, pp
+FILTER_HOR_CHROMA_sse3 32, 24, pp
+FILTER_HOR_CHROMA_sse3 32, 32, pp
+FILTER_HOR_CHROMA_sse3 32, 48, pp
+FILTER_HOR_CHROMA_sse3 32, 64, pp
+FILTER_HOR_CHROMA_sse3 48, 64, pp
+FILTER_HOR_CHROMA_sse3 64, 16, pp
+FILTER_HOR_CHROMA_sse3 64, 32, pp
+FILTER_HOR_CHROMA_sse3 64, 48, pp
+FILTER_HOR_CHROMA_sse3 64, 64, pp
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+
+FILTER_HOR_CHROMA_sse3 2, 4, ps
+FILTER_HOR_CHROMA_sse3 2, 8, ps
+FILTER_HOR_CHROMA_sse3 2, 16, ps
+FILTER_HOR_CHROMA_sse3 4, 2, ps
+FILTER_HOR_CHROMA_sse3 4, 4, ps
+FILTER_HOR_CHROMA_sse3 4, 8, ps
+FILTER_HOR_CHROMA_sse3 4, 16, ps
+FILTER_HOR_CHROMA_sse3 4, 32, ps
+FILTER_HOR_CHROMA_sse3 6, 8, ps
+FILTER_HOR_CHROMA_sse3 6, 16, ps
+FILTER_HOR_CHROMA_sse3 8, 2, ps
+FILTER_HOR_CHROMA_sse3 8, 4, ps
+FILTER_HOR_CHROMA_sse3 8, 6, ps
+FILTER_HOR_CHROMA_sse3 8, 8, ps
+FILTER_HOR_CHROMA_sse3 8, 12, ps
+FILTER_HOR_CHROMA_sse3 8, 16, ps
+FILTER_HOR_CHROMA_sse3 8, 32, ps
+FILTER_HOR_CHROMA_sse3 8, 64, ps
+FILTER_HOR_CHROMA_sse3 12, 16, ps
+FILTER_HOR_CHROMA_sse3 12, 32, ps
+FILTER_HOR_CHROMA_sse3 16, 4, ps
+FILTER_HOR_CHROMA_sse3 16, 8, ps
+FILTER_HOR_CHROMA_sse3 16, 12, ps
+FILTER_HOR_CHROMA_sse3 16, 16, ps
+FILTER_HOR_CHROMA_sse3 16, 24, ps
+FILTER_HOR_CHROMA_sse3 16, 32, ps
+FILTER_HOR_CHROMA_sse3 16, 64, ps
+FILTER_HOR_CHROMA_sse3 24, 32, ps
+FILTER_HOR_CHROMA_sse3 24, 64, ps
+FILTER_HOR_CHROMA_sse3 32, 8, ps
+FILTER_HOR_CHROMA_sse3 32, 16, ps
+FILTER_HOR_CHROMA_sse3 32, 24, ps
+FILTER_HOR_CHROMA_sse3 32, 32, ps
+FILTER_HOR_CHROMA_sse3 32, 48, ps
+FILTER_HOR_CHROMA_sse3 32, 64, ps
+FILTER_HOR_CHROMA_sse3 48, 64, ps
+FILTER_HOR_CHROMA_sse3 64, 16, ps
+FILTER_HOR_CHROMA_sse3 64, 32, ps
+FILTER_HOR_CHROMA_sse3 64, 48, ps
+FILTER_HOR_CHROMA_sse3 64, 64, ps
+
+%macro FILTER_P2S_2_4_sse2 1
+    movd        m0,     [r0 + %1]
+    movd        m2,     [r0 + r1 * 2 + %1]
+    movhps      m0,     [r0 + r1 + %1]
+    movhps      m2,     [r0 + r4 + %1]
+    psllw       m0,     (14 - BIT_DEPTH)
+    psllw       m2,     (14 - BIT_DEPTH)
+    psubw       m0,     m1
+    psubw       m2,     m1
+
+    movd        [r2 + r3 * 0 + %1], m0
+    movd        [r2 + r3 * 2 + %1], m2
+    movhlps     m0,     m0
+    movhlps     m2,     m2
+    movd        [r2 + r3 * 1 + %1], m0
+    movd        [r2 + r5 + %1], m2
+%endmacro
+
+%macro FILTER_P2S_4_4_sse2 1
+    movh        m0,     [r0 + %1]
+    movhps      m0,     [r0 + r1 + %1]
+    psllw       m0,     (14 - BIT_DEPTH)
+    psubw       m0,     m1
+    movh        [r2 + r3 * 0 + %1], m0
+    movhps      [r2 + r3 * 1 + %1], m0
+
+    movh        m2,     [r0 + r1 * 2 + %1]
+    movhps      m2,     [r0 + r4 + %1]
+    psllw       m2,     (14 - BIT_DEPTH)
+    psubw       m2,     m1
+    movh        [r2 + r3 * 2 + %1], m2
+    movhps      [r2 + r5 + %1], m2
+%endmacro
+
+%macro FILTER_P2S_4_2_sse2 0
+    movh        m0,     [r0]
+    movhps      m0,     [r0 + r1 * 2]
+    psllw       m0,     (14 - BIT_DEPTH)
+    psubw       m0,     [pw_2000]
+    movh        [r2 + r3 * 0], m0
+    movhps      [r2 + r3 * 2], m0
+%endmacro
+
+%macro FILTER_P2S_8_4_sse2 1
+    movu        m0,     [r0 + %1]
+    movu        m2,     [r0 + r1 + %1]
+    psllw       m0,     (14 - BIT_DEPTH)
+    psllw       m2,     (14 - BIT_DEPTH)
+    psubw       m0,     m1
+    psubw       m2,     m1
+    movu        [r2 + r3 * 0 + %1], m0
+    movu        [r2 + r3 * 1 + %1], m2
+
+    movu        m3,     [r0 + r1 * 2 + %1]
+    movu        m4,     [r0 + r4 + %1]
+    psllw       m3,     (14 - BIT_DEPTH)
+    psllw       m4,     (14 - BIT_DEPTH)
+    psubw       m3,     m1
+    psubw       m4,     m1
+    movu        [r2 + r3 * 2 + %1], m3
+    movu        [r2 + r5 + %1], m4
+%endmacro
+
+%macro FILTER_P2S_8_2_sse2 1
+    movu        m0,     [r0 + %1]
+    movu        m2,     [r0 + r1 + %1]
+    psllw       m0,     (14 - BIT_DEPTH)
+    psllw       m2,     (14 - BIT_DEPTH)
+    psubw       m0,     m1
+    psubw       m2,     m1
+    movu        [r2 + r3 * 0 + %1], m0
+    movu        [r2 + r3 * 1 + %1], m2
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro FILTER_PIX_TO_SHORT_sse2 2
+INIT_XMM sse2
+cglobal filterPixelToShort_%1x%2, 4, 6, 3
+%if %2 == 2
+%if %1 == 4
+    FILTER_P2S_4_2_sse2
+%elif %1 == 8
+    add        r1d, r1d
+    add        r3d, r3d
+    mova       m1, [pw_2000]
+    FILTER_P2S_8_2_sse2 0
+%endif
+%else
+    add        r1d, r1d
+    add        r3d, r3d
+    mova       m1, [pw_2000]
+    lea        r4, [r1 * 3]
+    lea        r5, [r3 * 3]
+%assign y 1
+%rep %2/4
+%assign x 0
+%rep %1/8
+    FILTER_P2S_8_4_sse2 x
+%if %2 == 6
+    lea         r0,     [r0 + 4 * r1]
+    lea         r2,     [r2 + 4 * r3]
+    FILTER_P2S_8_2_sse2 x
+%endif
+%assign x x+16
+%endrep
+%rep (%1 % 8)/4
+    FILTER_P2S_4_4_sse2 x
+%assign x x+8
+%endrep
+%rep (%1 % 4)/2
+    FILTER_P2S_2_4_sse2 x
+%endrep
+%if y < %2/4
+    lea         r0,     [r0 + 4 * r1]
+    lea         r2,     [r2 + 4 * r3]
+%assign y y+1
+%endif
+%endrep
+%endif
+RET
+%endmacro
+
+    FILTER_PIX_TO_SHORT_sse2 2, 4
+    FILTER_PIX_TO_SHORT_sse2 2, 8
+    FILTER_PIX_TO_SHORT_sse2 2, 16
+    FILTER_PIX_TO_SHORT_sse2 4, 2
+    FILTER_PIX_TO_SHORT_sse2 4, 4
+    FILTER_PIX_TO_SHORT_sse2 4, 8
+    FILTER_PIX_TO_SHORT_sse2 4, 16
+    FILTER_PIX_TO_SHORT_sse2 4, 32
+    FILTER_PIX_TO_SHORT_sse2 6, 8
+    FILTER_PIX_TO_SHORT_sse2 6, 16
+    FILTER_PIX_TO_SHORT_sse2 8, 2
+    FILTER_PIX_TO_SHORT_sse2 8, 4
+    FILTER_PIX_TO_SHORT_sse2 8, 6
+    FILTER_PIX_TO_SHORT_sse2 8, 8
+    FILTER_PIX_TO_SHORT_sse2 8, 12
+    FILTER_PIX_TO_SHORT_sse2 8, 16
+    FILTER_PIX_TO_SHORT_sse2 8, 32
+    FILTER_PIX_TO_SHORT_sse2 8, 64
+    FILTER_PIX_TO_SHORT_sse2 12, 16
+    FILTER_PIX_TO_SHORT_sse2 12, 32
+    FILTER_PIX_TO_SHORT_sse2 16, 4
+    FILTER_PIX_TO_SHORT_sse2 16, 8
+    FILTER_PIX_TO_SHORT_sse2 16, 12
+    FILTER_PIX_TO_SHORT_sse2 16, 16
+    FILTER_PIX_TO_SHORT_sse2 16, 24
+    FILTER_PIX_TO_SHORT_sse2 16, 32
+    FILTER_PIX_TO_SHORT_sse2 16, 64
+    FILTER_PIX_TO_SHORT_sse2 24, 32
+    FILTER_PIX_TO_SHORT_sse2 24, 64
+    FILTER_PIX_TO_SHORT_sse2 32, 8
+    FILTER_PIX_TO_SHORT_sse2 32, 16
+    FILTER_PIX_TO_SHORT_sse2 32, 24
+    FILTER_PIX_TO_SHORT_sse2 32, 32
+    FILTER_PIX_TO_SHORT_sse2 32, 48
+    FILTER_PIX_TO_SHORT_sse2 32, 64
+    FILTER_PIX_TO_SHORT_sse2 48, 64
+    FILTER_PIX_TO_SHORT_sse2 64, 16
+    FILTER_PIX_TO_SHORT_sse2 64, 32
+    FILTER_PIX_TO_SHORT_sse2 64, 48
+    FILTER_PIX_TO_SHORT_sse2 64, 64
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W4 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+    mov         r4d, r4m
+    sub         r0, 6
+    shl         r4d, 4
+    add         r1, r1
+    add         r3, r3
+
+%ifdef PIC
+    lea         r6, [tab_LumaCoeff]
+    mova        m0, [r6 + r4]
+%else
+    mova        m0, [tab_LumaCoeff + r4]
+%endif
+
+%ifidn %3, pp
+    mova        m1, [pd_32]
+    pxor        m6, m6
+    mova        m7, [pw_pixel_max]
+%else
+    mova        m1, [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d, %2
+%ifidn %3, ps
+    cmp         r5m, byte 0
+    je          .loopH
+    lea         r6, [r1 + 2 * r1]
+    sub         r0, r6
+    add         r4d, 7
+%endif
+
+.loopH:
+    movu        m2, [r0]                     ; m2 = src[0-7]
+    movu        m3, [r0 + 16]                ; m3 = src[8-15]
+
+    pmaddwd     m4, m2, m0
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m3, m2, 6                    ; m3 = src[3-10]
+    pmaddwd     m3, m0
+    phaddd      m5, m3
+
+    phaddd      m4, m5
+    paddd       m4, m1
+%ifidn %3, pp
+    psrad       m4, 6
+    packusdw    m4, m4
+    CLIPW       m4, m6, m7
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    packssdw    m4, m4
+%endif
+
+    movh        [r2], m4
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endmacro
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_4x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W4 4, 4, pp
+FILTER_HOR_LUMA_W4 4, 8, pp
+FILTER_HOR_LUMA_W4 4, 16, pp
+
+;---------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_4x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;---------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W4 4, 4, ps
+FILTER_HOR_LUMA_W4 4, 8, ps
+FILTER_HOR_LUMA_W4 4, 16, ps
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W8 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+
+    add         r1, r1
+    add         r3, r3
+    mov         r4d, r4m
+    sub         r0, 6
+    shl         r4d, 4
+
+%ifdef PIC
+    lea         r6, [tab_LumaCoeff]
+    mova        m0, [r6 + r4]
+%else
+    mova        m0, [tab_LumaCoeff + r4]
+%endif
+
+%ifidn %3, pp
+    mova        m1, [pd_32]
+    pxor        m7, m7
+%else
+    mova        m1, [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d, %2
+%ifidn %3, ps
+    cmp         r5m, byte 0
+    je         .loopH
+    lea         r6, [r1 + 2 * r1]
+    sub         r0, r6
+    add         r4d, 7
+%endif
+
+.loopH:
+    movu        m2, [r0]                     ; m2 = src[0-7]
+    movu        m3, [r0 + 16]                ; m3 = src[8-15]
+
+    pmaddwd     m4, m2, m0
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m3, m2, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m3, m2, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m3, m2, 14                   ; m3 = src[7-14]
+    pmaddwd     m3, m0
+    phaddd      m6, m3
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, 6
+    psrad       m5, 6
+    packusdw    m4, m5
+    CLIPW       m4, m7, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+
+    movu        [r2], m4
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jnz        .loopH
+    RET
+%endmacro
+
+;------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W8 8, 4, pp
+FILTER_HOR_LUMA_W8 8, 8, pp
+FILTER_HOR_LUMA_W8 8, 16, pp
+FILTER_HOR_LUMA_W8 8, 32, pp
+
+;---------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;---------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W8 8, 4, ps
+FILTER_HOR_LUMA_W8 8, 8, ps
+FILTER_HOR_LUMA_W8 8, 16, ps
+FILTER_HOR_LUMA_W8 8, 32, ps
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W12 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+
+    add         r1, r1
+    add         r3, r3
+    mov         r4d, r4m
+    sub         r0, 6
+    shl         r4d, 4
+
+%ifdef PIC
+    lea         r6, [tab_LumaCoeff]
+    mova        m0, [r6 + r4]
+%else
+    mova        m0, [tab_LumaCoeff + r4]
+%endif
+%ifidn %3, pp
+    mova        m1, [INTERP_OFFSET_PP]
+%else
+    mova        m1, [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d, %2
+%ifidn %3, ps
+    cmp         r5m, byte 0
+    je          .loopH
+    lea         r6, [r1 + 2 * r1]
+    sub         r0, r6
+    add         r4d, 7
+%endif
+
+.loopH:
+    movu        m2, [r0]                     ; m2 = src[0-7]
+    movu        m3, [r0 + 16]                ; m3 = src[8-15]
+
+    pmaddwd     m4, m2, m0
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m3, m2, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m3, m2, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m7, m3, m2, 14               ; m2 = src[7-14]
+    pmaddwd     m7, m0
+    phaddd      m6, m7
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+
+    movu        [r2], m4
+
+    movu        m2, [r0 + 32]                ; m2 = src[16-23]
+
+    pmaddwd     m4, m3, m0                   ; m3 = src[8-15]
+    palignr     m5, m2, m3, 2                ; m5 = src[9-16]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m2, m3, 4                ; m5 = src[10-17]
+    pmaddwd     m5, m0
+    palignr     m2, m3, 6                    ; m2 = src[11-18]
+    pmaddwd     m2, m0
+    phaddd      m5, m2
+    phaddd      m4, m5
+    paddd       m4, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    packusdw    m4, m4
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    packssdw    m4, m4
+%endif
+
+    movh        [r2 + 16], m4
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_12x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W12 12, 16, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_12x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W12 12, 16, ps
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W16 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+
+    add         r1, r1
+    add         r3, r3
+    mov         r4d, r4m
+    sub         r0, 6
+    shl         r4d, 4
+
+%ifdef PIC
+    lea         r6, [tab_LumaCoeff]
+    mova        m0, [r6 + r4]
+%else
+    mova        m0, [tab_LumaCoeff + r4]
+%endif
+
+%ifidn %3, pp
+    mova        m1, [pd_32]
+%else
+    mova        m1, [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d, %2
+%ifidn %3, ps
+    cmp         r5m, byte 0
+    je          .loopH
+    lea         r6, [r1 + 2 * r1]
+    sub         r0, r6
+    add         r4d, 7
+%endif
+
+.loopH:
+%assign x 0
+%rep %1 / 16
+    movu        m2, [r0 + x]                 ; m2 = src[0-7]
+    movu        m3, [r0 + 16 + x]            ; m3 = src[8-15]
+
+    pmaddwd     m4, m2, m0
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m3, m2, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m3, m2, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m7, m3, m2, 14               ; m2 = src[7-14]
+    pmaddwd     m7, m0
+    phaddd      m6, m7
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+    movu        [r2 + x], m4
+
+    movu        m2, [r0 + 32 + x]            ; m2 = src[16-23]
+
+    pmaddwd     m4, m3, m0                   ; m3 = src[8-15]
+    palignr     m5, m2, m3, 2                ; m5 = src[9-16]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m2, m3, 4                ; m5 = src[10-17]
+    pmaddwd     m5, m0
+    palignr     m6, m2, m3, 6                ; m6 = src[11-18]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m2, m3, 8                ; m5 = src[12-19]
+    pmaddwd     m5, m0
+    palignr     m6, m2, m3, 10               ; m6 = src[13-20]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m2, m3, 12               ; m6 = src[14-21]
+    pmaddwd     m6, m0
+    palignr     m2, m3, 14                   ; m3 = src[15-22]
+    pmaddwd     m2, m0
+    phaddd      m6, m2
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+    movu        [r2 + 16 + x], m4
+
+%assign x x+32
+%endrep
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_16x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 16, 4, pp
+FILTER_HOR_LUMA_W16 16, 8, pp
+FILTER_HOR_LUMA_W16 16, 12, pp
+FILTER_HOR_LUMA_W16 16, 16, pp
+FILTER_HOR_LUMA_W16 16, 32, pp
+FILTER_HOR_LUMA_W16 16, 64, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_16x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 16, 4, ps
+FILTER_HOR_LUMA_W16 16, 8, ps
+FILTER_HOR_LUMA_W16 16, 12, ps
+FILTER_HOR_LUMA_W16 16, 16, ps
+FILTER_HOR_LUMA_W16 16, 32, ps
+FILTER_HOR_LUMA_W16 16, 64, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_32x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 32, 8, pp
+FILTER_HOR_LUMA_W16 32, 16, pp
+FILTER_HOR_LUMA_W16 32, 24, pp
+FILTER_HOR_LUMA_W16 32, 32, pp
+FILTER_HOR_LUMA_W16 32, 64, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_32x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 32, 8, ps
+FILTER_HOR_LUMA_W16 32, 16, ps
+FILTER_HOR_LUMA_W16 32, 24, ps
+FILTER_HOR_LUMA_W16 32, 32, ps
+FILTER_HOR_LUMA_W16 32, 64, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_48x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 48, 64, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_48x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 48, 64, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_64x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 64, 16, pp
+FILTER_HOR_LUMA_W16 64, 32, pp
+FILTER_HOR_LUMA_W16 64, 48, pp
+FILTER_HOR_LUMA_W16 64, 64, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_64x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W16 64, 16, ps
+FILTER_HOR_LUMA_W16 64, 32, ps
+FILTER_HOR_LUMA_W16 64, 48, ps
+FILTER_HOR_LUMA_W16 64, 64, ps
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W24 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4, 7, 8
+
+    add         r1, r1
+    add         r3, r3
+    mov         r4d, r4m
+    sub         r0, 6
+    shl         r4d, 4
+
+%ifdef PIC
+    lea         r6, [tab_LumaCoeff]
+    mova        m0, [r6 + r4]
+%else
+    mova        m0, [tab_LumaCoeff + r4]
+%endif
+%ifidn %3, pp
+    mova        m1, [pd_32]
+%else
+    mova        m1, [INTERP_OFFSET_PS]
+%endif
+
+    mov         r4d, %2
+%ifidn %3, ps
+    cmp         r5m, byte 0
+    je          .loopH
+    lea         r6, [r1 + 2 * r1]
+    sub         r0, r6
+    add         r4d, 7
+%endif
+
+.loopH:
+    movu        m2, [r0]                     ; m2 = src[0-7]
+    movu        m3, [r0 + 16]                ; m3 = src[8-15]
+
+    pmaddwd     m4, m2, m0
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m3, m2, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m3, m2, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m7, m3, m2, 14               ; m7 = src[7-14]
+    pmaddwd     m7, m0
+    phaddd      m6, m7
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+    movu        [r2], m4
+
+    movu        m2, [r0 + 32]                ; m2 = src[16-23]
+
+    pmaddwd     m4, m3, m0                   ; m3 = src[8-15]
+    palignr     m5, m2, m3, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m2, m3, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m2, m3, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m2, m3, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m2, m3, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m2, m3, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m7, m2, m3, 14               ; m7 = src[7-14]
+    pmaddwd     m7, m0
+    phaddd      m6, m7
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+    movu        [r2 + 16], m4
+
+    movu        m3, [r0 + 48]                ; m3 = src[24-31]
+
+    pmaddwd     m4, m2, m0                   ; m2 = src[16-23]
+    palignr     m5, m3, m2, 2                ; m5 = src[1-8]
+    pmaddwd     m5, m0
+    phaddd      m4, m5
+
+    palignr     m5, m3, m2, 4                ; m5 = src[2-9]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 6                ; m6 = src[3-10]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+    phaddd      m4, m5
+    paddd       m4, m1
+
+    palignr     m5, m3, m2, 8                ; m5 = src[4-11]
+    pmaddwd     m5, m0
+    palignr     m6, m3, m2, 10               ; m6 = src[5-12]
+    pmaddwd     m6, m0
+    phaddd      m5, m6
+
+    palignr     m6, m3, m2, 12               ; m6 = src[6-13]
+    pmaddwd     m6, m0
+    palignr     m7, m3, m2, 14               ; m7 = src[7-14]
+    pmaddwd     m7, m0
+    phaddd      m6, m7
+    phaddd      m5, m6
+    paddd       m5, m1
+%ifidn %3, pp
+    psrad       m4, INTERP_SHIFT_PP
+    psrad       m5, INTERP_SHIFT_PP
+    packusdw    m4, m5
+    pxor        m5, m5
+    CLIPW       m4, m5, [pw_pixel_max]
+%else
+    psrad       m4, INTERP_SHIFT_PS
+    psrad       m5, INTERP_SHIFT_PS
+    packssdw    m4, m5
+%endif
+    movu        [r2 + 32], m4
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_24x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W24 24, 32, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_24x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+FILTER_HOR_LUMA_W24 24, 32, ps
+
+%macro FILTER_W2_2 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + r1]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    packusdw    m3,         m3
+    CLIPW       m3,         m7,    m6
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    packssdw    m3,         m3
+%endif
+    movd        [r2],       m3
+    pextrd      [r2 + r3],  m3, 1
+%endmacro
+
+%macro FILTER_W4_2 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + r1]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + r1 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m7,    m6
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + r3],  m3
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W4_avx2 1
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_4x%1, 4,7,7
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    lea              r6, [pw_pixel_max]
+    mova             m3, [interp8_hpp_shuf]
+    mova             m6, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, %1/2
+
+.loop:
+    vbroadcasti128   m4, [r0]
+    vbroadcasti128   m5, [r0 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    phaddd           m4, m4
+    vpermq           m4, m4, q3120
+    paddd            m4, m6
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [r6]
+    movq             [r2], xm4
+
+    vbroadcasti128   m4, [r0 + r1]
+    vbroadcasti128   m5, [r0 + r1 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    phaddd           m4, m4
+    vpermq           m4, m4, q3120
+    paddd            m4, m6
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [r6]
+    movq             [r2 + r3], xm4
+
+    lea              r2, [r2 + 2 * r3]
+    lea              r0, [r0 + 2 * r1]
+    dec              r4d
+    jnz              .loop
+    RET
+%endmacro
+FILTER_HOR_LUMA_W4_avx2 4
+FILTER_HOR_LUMA_W4_avx2 8
+FILTER_HOR_LUMA_W4_avx2 16
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W8 1
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_8x%1, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, %1/2
+
+.loop:
+    vbroadcasti128   m4, [r0]
+    vbroadcasti128   m5, [r0 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8]
+    vbroadcasti128   m6, [r0 + 16]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2], xm4
+
+    vbroadcasti128   m4, [r0 + r1]
+    vbroadcasti128   m5, [r0 + r1 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + r1 + 8]
+    vbroadcasti128   m6, [r0 + r1 + 16]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + r3], xm4
+
+    lea              r2, [r2 + 2 * r3]
+    lea              r0, [r0 + 2 * r1]
+    dec              r4d
+    jnz              .loop
+    RET
+%endmacro
+FILTER_HOR_LUMA_W8 4
+FILTER_HOR_LUMA_W8 8
+FILTER_HOR_LUMA_W8 16
+FILTER_HOR_LUMA_W8 32
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W16 1
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_16x%1, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, %1
+
+.loop:
+    vbroadcasti128   m4, [r0]
+    vbroadcasti128   m5, [r0 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8]
+    vbroadcasti128   m6, [r0 + 16]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2], xm4
+
+    vbroadcasti128   m4, [r0 + 16]
+    vbroadcasti128   m5, [r0 + 24]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 24]
+    vbroadcasti128   m6, [r0 + 32]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 16], xm4
+
+    add              r2, r3
+    add              r0, r1
+    dec              r4d
+    jnz              .loop
+    RET
+%endmacro
+FILTER_HOR_LUMA_W16 4
+FILTER_HOR_LUMA_W16 8
+FILTER_HOR_LUMA_W16 12
+FILTER_HOR_LUMA_W16 16
+FILTER_HOR_LUMA_W16 32
+FILTER_HOR_LUMA_W16 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_HOR_LUMA_W32 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, %2
+
+.loop:
+%assign x 0
+%rep %1/16
+    vbroadcasti128   m4, [r0 + x]
+    vbroadcasti128   m5, [r0 + 8 + x]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8 + x]
+    vbroadcasti128   m6, [r0 + 16 + x]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + x], xm4
+
+    vbroadcasti128   m4, [r0 + 16 + x]
+    vbroadcasti128   m5, [r0 + 24 + x]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 24 + x]
+    vbroadcasti128   m6, [r0 + 32 + x]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 16 + x], xm4
+
+%assign x x+32
+%endrep
+
+    add              r2, r3
+    add              r0, r1
+    dec              r4d
+    jnz              .loop
+    RET
+%endmacro
+FILTER_HOR_LUMA_W32 32, 8
+FILTER_HOR_LUMA_W32 32, 16
+FILTER_HOR_LUMA_W32 32, 24
+FILTER_HOR_LUMA_W32 32, 32
+FILTER_HOR_LUMA_W32 32, 64
+FILTER_HOR_LUMA_W32 64, 16
+FILTER_HOR_LUMA_W32 64, 32
+FILTER_HOR_LUMA_W32 64, 48
+FILTER_HOR_LUMA_W32 64, 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_12x16, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, 16
+
+.loop:
+    vbroadcasti128   m4, [r0]
+    vbroadcasti128   m5, [r0 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8]
+    vbroadcasti128   m6, [r0 + 16]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2], xm4
+
+    vbroadcasti128   m4, [r0 + 16]
+    vbroadcasti128   m5, [r0 + 24]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 24]
+    vbroadcasti128   m6, [r0 + 32]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movq             [r2 + 16], xm4
+
+    add              r2, r3
+    add              r0, r1
+    dec              r4d
+    jnz              .loop
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_24x32, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, 32
+
+.loop:
+    vbroadcasti128   m4, [r0]
+    vbroadcasti128   m5, [r0 + 8]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8]
+    vbroadcasti128   m6, [r0 + 16]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2], xm4
+
+    vbroadcasti128   m4, [r0 + 16]
+    vbroadcasti128   m5, [r0 + 24]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 24]
+    vbroadcasti128   m6, [r0 + 32]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 16], xm4
+
+    vbroadcasti128   m4, [r0 + 32]
+    vbroadcasti128   m5, [r0 + 40]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 40]
+    vbroadcasti128   m6, [r0 + 48]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 32], xm4
+
+    add              r2, r3
+    add              r0, r1
+    dec              r4d
+    jnz              .loop
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_48x64, 4,6,8
+    add              r1d, r1d
+    add              r3d, r3d
+    sub              r0, 6
+    mov              r4d, r4m
+    shl              r4d, 4
+%ifdef PIC
+    lea              r5, [tab_LumaCoeff]
+    vpbroadcastq     m0, [r5 + r4]
+    vpbroadcastq     m1, [r5 + r4 + 8]
+%else
+    vpbroadcastq     m0, [tab_LumaCoeff + r4]
+    vpbroadcastq     m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova             m3, [interp8_hpp_shuf]
+    mova             m7, [pd_32]
+    pxor             m2, m2
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    mov              r4d, 64
+
+.loop:
+%assign x 0
+%rep 2
+    vbroadcasti128   m4, [r0 + x]
+    vbroadcasti128   m5, [r0 + 8 + x]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 8 + x]
+    vbroadcasti128   m6, [r0 + 16 + x]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + x], xm4
+
+    vbroadcasti128   m4, [r0 + 16 + x]
+    vbroadcasti128   m5, [r0 + 24 + x]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 24 + x]
+    vbroadcasti128   m6, [r0 + 32 + x]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 16 + x], xm4
+
+    vbroadcasti128   m4, [r0 + 32 + x]
+    vbroadcasti128   m5, [r0 + 40 + x]
+    pshufb           m4, m3
+    pshufb           m5, m3
+
+    pmaddwd          m4, m0
+    pmaddwd          m5, m1
+    paddd            m4, m5
+
+    vbroadcasti128   m5, [r0 + 40 + x]
+    vbroadcasti128   m6, [r0 + 48 + x]
+    pshufb           m5, m3
+    pshufb           m6, m3
+
+    pmaddwd          m5, m0
+    pmaddwd          m6, m1
+    paddd            m5, m6
+
+    phaddd           m4, m5
+    vpermq           m4, m4, q3120
+    paddd            m4, m7
+    psrad            m4, INTERP_SHIFT_PP
+
+    packusdw         m4, m4
+    vpermq           m4, m4, q2020
+    CLIPW            m4, m2, [pw_pixel_max]
+    movu             [r2 + 32 + x], xm4
+
+%assign x x+48
+%endrep
+
+    add              r2, r3
+    add              r0, r1
+    dec              r4d
+    jnz              .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_CHROMA_H 6
+INIT_XMM sse4
+cglobal interp_4tap_horiz_%3_%1x%2, 4, %4, %5
+
+    add         r3,       r3
+    add         r1,       r1
+    sub         r0,       2
+    mov         r4d,      r4m
+    add         r4d,      r4d
+
+%ifdef PIC
+    lea         r%6,      [tab_ChromaCoeff]
+    movh        m0,       [r%6 + r4 * 4]
+%else
+    movh        m0,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    punpcklqdq  m0,       m0
+    mova        m2,       [tab_Tm16]
+
+%ifidn %3, ps
+    mova        m1,       [INTERP_OFFSET_PS]
+    cmp         r5m, byte 0
+    je          .skip
+    sub         r0,       r1
+    movu        m3,       [r0]
+    pshufb      m3,       m3, m2
+    pmaddwd     m3,       m0
+
+  %if %1 == 4
+    movu        m4,       [r0 + 4]
+    pshufb      m4,       m4, m2
+    pmaddwd     m4,       m0
+    phaddd      m3,       m4
+  %else
+    phaddd      m3,       m3
+  %endif
+
+    paddd       m3,       m1
+    psrad       m3,       INTERP_SHIFT_PS
+    packssdw    m3,       m3
+
+  %if %1 == 2
+    movd        [r2],     m3
+  %else
+    movh        [r2],     m3
+  %endif
+
+    add         r0,       r1
+    add         r2,       r3
+    FILTER_W%1_2 %3
+    lea         r0,       [r0 + 2 * r1]
+    lea         r2,       [r2 + 2 * r3]
+
+.skip:
+
+%else     ;%ifidn %3, ps
+    pxor        m7,       m7
+    mova        m6,       [pw_pixel_max]
+    mova        m1,       [tab_c_32]
+%endif    ;%ifidn %3, ps
+
+    FILTER_W%1_2 %3
+
+%rep (%2/2) - 1
+    lea         r0,       [r0 + 2 * r1]
+    lea         r2,       [r2 + 2 * r3]
+    FILTER_W%1_2 %3
+%endrep
+    RET
+%endmacro
+
+FILTER_CHROMA_H 2, 4, pp, 6, 8, 5
+FILTER_CHROMA_H 2, 8, pp, 6, 8, 5
+FILTER_CHROMA_H 4, 2, pp, 6, 8, 5
+FILTER_CHROMA_H 4, 4, pp, 6, 8, 5
+FILTER_CHROMA_H 4, 8, pp, 6, 8, 5
+FILTER_CHROMA_H 4, 16, pp, 6, 8, 5
+
+FILTER_CHROMA_H 2, 4, ps, 7, 5, 6
+FILTER_CHROMA_H 2, 8, ps, 7, 5, 6
+FILTER_CHROMA_H 4, 2, ps, 7, 6, 6
+FILTER_CHROMA_H 4, 4, ps, 7, 6, 6
+FILTER_CHROMA_H 4, 8, ps, 7, 6, 6
+FILTER_CHROMA_H 4, 16, ps, 7, 6, 6
+
+FILTER_CHROMA_H 2, 16, pp, 6, 8, 5
+FILTER_CHROMA_H 4, 32, pp, 6, 8, 5
+FILTER_CHROMA_H 2, 16, ps, 7, 5, 6
+FILTER_CHROMA_H 4, 32, ps, 7, 6, 6
+
+
+%macro FILTER_W6_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m4,         [r0 + 8]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m4,         m4
+    paddd       m4,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m4,         INTERP_SHIFT_PP
+    packusdw    m3,         m4
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m4,         INTERP_SHIFT_PS
+    packssdw    m3,         m4
+%endif
+    movh        [r2],       m3
+    pextrd      [r2 + 8],   m3, 2
+%endmacro
+
+cglobal chroma_filter_pp_6x1_internal
+    FILTER_W6_1 pp
+    ret
+
+cglobal chroma_filter_ps_6x1_internal
+    FILTER_W6_1 ps
+    ret
+
+%macro FILTER_W8_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + 8],   m3
+%endmacro
+
+cglobal chroma_filter_pp_8x1_internal
+    FILTER_W8_1 pp
+    ret
+
+cglobal chroma_filter_ps_8x1_internal
+    FILTER_W8_1 ps
+    ret
+
+%macro FILTER_W12_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + 8],   m3
+
+    movu        m3,         [r0 + 16]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 20]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    packusdw    m3,         m3
+    CLIPW       m3,         m6, m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    packssdw    m3,         m3
+%endif
+    movh        [r2 + 16],  m3
+%endmacro
+
+cglobal chroma_filter_pp_12x1_internal
+    FILTER_W12_1 pp
+    ret
+
+cglobal chroma_filter_ps_12x1_internal
+    FILTER_W12_1 ps
+    ret
+
+%macro FILTER_W16_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + 8],   m3
+
+    movu        m3,         [r0 + 16]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 20]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 24]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 28]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 16],  m3
+    movhps      [r2 + 24],  m3
+%endmacro
+
+cglobal chroma_filter_pp_16x1_internal
+    FILTER_W16_1 pp
+    ret
+
+cglobal chroma_filter_ps_16x1_internal
+    FILTER_W16_1 ps
+    ret
+
+%macro FILTER_W24_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + 8],   m3
+
+    movu        m3,         [r0 + 16]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 20]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 24]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 28]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 16],  m3
+    movhps      [r2 + 24],  m3
+
+    movu        m3,         [r0 + 32]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 36]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 40]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 44]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 32],  m3
+    movhps      [r2 + 40],  m3
+%endmacro
+
+cglobal chroma_filter_pp_24x1_internal
+    FILTER_W24_1 pp
+    ret
+
+cglobal chroma_filter_ps_24x1_internal
+    FILTER_W24_1 ps
+    ret
+
+%macro FILTER_W32_1 1
+    movu        m3,         [r0]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2],       m3
+    movhps      [r2 + 8],   m3
+
+    movu        m3,         [r0 + 16]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 20]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 24]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 28]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 16],  m3
+    movhps      [r2 + 24],  m3
+
+    movu        m3,         [r0 + 32]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 36]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 40]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 44]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 32],  m3
+    movhps      [r2 + 40],  m3
+
+    movu        m3,         [r0 + 48]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + 52]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + 56]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + 60]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + 48],  m3
+    movhps      [r2 + 56],  m3
+%endmacro
+
+cglobal chroma_filter_pp_32x1_internal
+    FILTER_W32_1 pp
+    ret
+
+cglobal chroma_filter_ps_32x1_internal
+    FILTER_W32_1 ps
+    ret
+
+%macro FILTER_W8o_1 2
+    movu        m3,         [r0 + %2]
+    pshufb      m3,         m3, m2
+    pmaddwd     m3,         m0
+    movu        m4,         [r0 + %2 + 4]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m3,         m4
+    paddd       m3,         m1
+
+    movu        m5,         [r0 + %2 + 8]
+    pshufb      m5,         m5, m2
+    pmaddwd     m5,         m0
+    movu        m4,         [r0 + %2 + 12]
+    pshufb      m4,         m4, m2
+    pmaddwd     m4,         m0
+    phaddd      m5,         m4
+    paddd       m5,         m1
+%ifidn %1, pp
+    psrad       m3,         INTERP_SHIFT_PP
+    psrad       m5,         INTERP_SHIFT_PP
+    packusdw    m3,         m5
+    CLIPW       m3,         m6,    m7
+%else
+    psrad       m3,         INTERP_SHIFT_PS
+    psrad       m5,         INTERP_SHIFT_PS
+    packssdw    m3,         m5
+%endif
+    movh        [r2 + %2],       m3
+    movhps      [r2 + %2 + 8],   m3
+%endmacro
+
+%macro FILTER_W48_1 1
+    FILTER_W8o_1 %1, 0
+    FILTER_W8o_1 %1, 16
+    FILTER_W8o_1 %1, 32
+    FILTER_W8o_1 %1, 48
+    FILTER_W8o_1 %1, 64
+    FILTER_W8o_1 %1, 80
+%endmacro
+
+cglobal chroma_filter_pp_48x1_internal
+    FILTER_W48_1 pp
+    ret
+
+cglobal chroma_filter_ps_48x1_internal
+    FILTER_W48_1 ps
+    ret
+
+%macro FILTER_W64_1 1
+    FILTER_W8o_1 %1, 0
+    FILTER_W8o_1 %1, 16
+    FILTER_W8o_1 %1, 32
+    FILTER_W8o_1 %1, 48
+    FILTER_W8o_1 %1, 64
+    FILTER_W8o_1 %1, 80
+    FILTER_W8o_1 %1, 96
+    FILTER_W8o_1 %1, 112
+%endmacro
+
+cglobal chroma_filter_pp_64x1_internal
+    FILTER_W64_1 pp
+    ret
+
+cglobal chroma_filter_ps_64x1_internal
+    FILTER_W64_1 ps
+    ret
+
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+
+INIT_XMM sse4
+%macro IPFILTER_CHROMA 6
+cglobal interp_4tap_horiz_%3_%1x%2, 4, %5, %6
+
+    add         r3,        r3
+    add         r1,        r1
+    sub         r0,         2
+    mov         r4d,        r4m
+    add         r4d,        r4d
+
+%ifdef PIC
+    lea         r%4,       [tab_ChromaCoeff]
+    movh        m0,       [r%4 + r4 * 4]
+%else
+    movh        m0,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    punpcklqdq  m0,       m0
+    mova        m2,       [tab_Tm16]
+
+%ifidn %3, ps
+    mova        m1,       [INTERP_OFFSET_PS]
+    cmp         r5m, byte 0
+    je          .skip
+    sub         r0, r1
+    call chroma_filter_%3_%1x1_internal
+    add         r0, r1
+    add         r2, r3
+    call chroma_filter_%3_%1x1_internal
+    add         r0, r1
+    add         r2, r3
+    call chroma_filter_%3_%1x1_internal
+    add         r0, r1
+    add         r2, r3
+.skip:
+%else
+    mova        m1,         [tab_c_32]
+    pxor        m6,         m6
+    mova        m7,         [pw_pixel_max]
+%endif
+
+    call chroma_filter_%3_%1x1_internal
+%rep %2 - 1
+    add         r0,       r1
+    add         r2,       r3
+    call chroma_filter_%3_%1x1_internal
+%endrep
+RET
+%endmacro
+IPFILTER_CHROMA 6, 8, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 2, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 4, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 6, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 8, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 32, pp, 5, 6, 8
+IPFILTER_CHROMA 12, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 4, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 8, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 12, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 32, pp, 5, 6, 8
+IPFILTER_CHROMA 24, 32, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 8, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 24, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 32, pp, 5, 6, 8
+
+IPFILTER_CHROMA 6, 8, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 2, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 4, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 6, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 8, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 16, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 32, ps, 6, 7, 6
+IPFILTER_CHROMA 12, 16, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 4, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 8, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 12, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 16, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 32, ps, 6, 7, 6
+IPFILTER_CHROMA 24, 32, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 8, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 16, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 24, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 32, ps, 6, 7, 6
+
+IPFILTER_CHROMA 6, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 12, pp, 5, 6, 8
+IPFILTER_CHROMA 8, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 12, 32, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 24, pp, 5, 6, 8
+IPFILTER_CHROMA 16, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 24, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 48, pp, 5, 6, 8
+IPFILTER_CHROMA 32, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 6, 16, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 12, ps, 6, 7, 6
+IPFILTER_CHROMA 8, 64, ps, 6, 7, 6
+IPFILTER_CHROMA 12, 32, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 24, ps, 6, 7, 6
+IPFILTER_CHROMA 16, 64, ps, 6, 7, 6
+IPFILTER_CHROMA 24, 64, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 48, ps, 6, 7, 6
+IPFILTER_CHROMA 32, 64, ps, 6, 7, 6
+
+IPFILTER_CHROMA 48, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 64, 48, pp, 5, 6, 8
+IPFILTER_CHROMA 64, 64, pp, 5, 6, 8
+IPFILTER_CHROMA 64, 32, pp, 5, 6, 8
+IPFILTER_CHROMA 64, 16, pp, 5, 6, 8
+IPFILTER_CHROMA 48, 64, ps, 6, 7, 6
+IPFILTER_CHROMA 64, 48, ps, 6, 7, 6
+IPFILTER_CHROMA 64, 64, ps, 6, 7, 6
+IPFILTER_CHROMA 64, 32, ps, 6, 7, 6
+IPFILTER_CHROMA 64, 16, ps, 6, 7, 6
+
+
+%macro PROCESS_CHROMA_SP_W4_4R 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *32]                ;m0=[0+1]         Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *32]                ;m1=[1+2]         Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *32]            ;m2=[2+3]         Row3
+    pmaddwd    m4, [r6 + 1 * 32]
+    paddd      m0, m4                          ;m0=[0+1+2+3]     Row1 done
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *32]            ;m3=[3+4]         Row4
+    pmaddwd    m5, [r6 + 1 * 32]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]   Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m4, [r6 + 1 * 32]
+    paddd      m2, m4                          ;m2=[2+3+4+5]     Row3
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m5, [r6 + 1 * 32]
+    paddd      m3, m5                          ;m3=[3+4+5+6]     Row4
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_6xN 1
+cglobal interp_4tap_horiz_pp_6x%1, 5,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, %1/2
+.loop:
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, INTERP_SHIFT_PP           ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                      ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movq            [r2], xm3
+    pextrd          [r2 + 8], xm3, 2
+
+    vbroadcasti128  m3, [r0 + r1]
+    vbroadcasti128  m4, [r0 + r1 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, INTERP_SHIFT_PP           ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                      ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movq            [r2 + r3], xm3
+    pextrd          [r2 + r3 + 8], xm3, 2
+
+    lea             r0, [r0 + r1 * 2]
+    lea             r2, [r2 + r3 * 2]
+    dec             r4d
+    jnz             .loop
+    RET
+%endmacro
+IPFILTER_CHROMA_avx2_6xN 8
+IPFILTER_CHROMA_avx2_6xN 16
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_8x2, 5,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, INTERP_SHIFT_PP          ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3,q2020
+    pshufb          xm3, xm6                     ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2], xm3
+
+    vbroadcasti128  m3, [r0 + r1]
+    vbroadcasti128  m4, [r0 + r1 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, INTERP_SHIFT_PP           ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3,q2020
+    pshufb          xm3, xm6                      ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2 + r3], xm3
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_8x4, 5,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+%rep 2
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3,q2020
+    pshufb          xm3, xm6                    ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2], xm3
+
+    vbroadcasti128  m3, [r0 + r1]
+    vbroadcasti128  m4, [r0 + r1 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3,q2020
+    pshufb          xm3, xm6                    ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2 + r3], xm3
+
+    lea             r0, [r0 + r1 * 2]
+    lea             r2, [r2 + r3 * 2]
+%endrep
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_8xN 1
+cglobal interp_4tap_horiz_pp_8x%1, 5,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, %1/2
+.loop:
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                         ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                      ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2], xm3
+
+    vbroadcasti128  m3, [r0 + r1]
+    vbroadcasti128  m4, [r0 + r1 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                         ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                      ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2 + r3], xm3
+
+    lea             r0, [r0 + r1 * 2]
+    lea             r2, [r2 + r3 * 2]
+    dec             r4d
+    jnz             .loop
+    RET
+%endmacro
+IPFILTER_CHROMA_avx2_8xN 6
+IPFILTER_CHROMA_avx2_8xN 8
+IPFILTER_CHROMA_avx2_8xN 12
+IPFILTER_CHROMA_avx2_8xN 16
+IPFILTER_CHROMA_avx2_8xN 32
+IPFILTER_CHROMA_avx2_8xN 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_16xN 1
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_16x%1, 5,6,9
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, %1
+.loop:
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                     ; m3 = WORD[7 6 5 4 3 2 1 0]
+
+    vbroadcasti128  m4, [r0 + 16]
+    vbroadcasti128  m8, [r0 + 24]
+
+    pshufb          m4, m1
+    pshufb          m8, m1
+
+    pmaddwd         m4, m0
+    pmaddwd         m8, m0
+    phaddd          m4, m8
+    paddd           m4, m2
+    psrad           m4, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m4, m4
+    vpermq          m4, m4, q2020
+    pshufb          xm4, xm6                    ; m3 = WORD[7 6 5 4 3 2 1 0]
+    vinserti128     m3, m3, xm4, 1
+    CLIPW           m3, m5, m7
+    movu            [r2], m3
+
+    add             r0, r1
+    add             r2, r3
+    dec             r4d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+IPFILTER_CHROMA_avx2_16xN 4
+IPFILTER_CHROMA_avx2_16xN 8
+IPFILTER_CHROMA_avx2_16xN 12
+IPFILTER_CHROMA_avx2_16xN 16
+IPFILTER_CHROMA_avx2_16xN 24
+IPFILTER_CHROMA_avx2_16xN 32
+IPFILTER_CHROMA_avx2_16xN 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_32xN 1
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_32x%1, 5,6,9
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r6d, %1
+.loop:
+%assign x 0
+%rep 2
+    vbroadcasti128  m3, [r0 + x]
+    vbroadcasti128  m4, [r0 + 8 + x]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                     ; m3 = WORD[7 6 5 4 3 2 1 0]
+
+    vbroadcasti128  m4, [r0 + 16 + x]
+    vbroadcasti128  m8, [r0 + 24 + x]
+    pshufb          m4, m1
+    pshufb          m8, m1
+
+    pmaddwd         m4, m0
+    pmaddwd         m8, m0
+    phaddd          m4, m8
+    paddd           m4, m2
+    psrad           m4, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m4, m4
+    vpermq          m4, m4, q2020
+    pshufb          xm4, xm6                    ; m3 = WORD[7 6 5 4 3 2 1 0]
+    vinserti128     m3, m3, xm4, 1
+    CLIPW           m3, m5, m7
+    movu            [r2 + x], m3
+    %assign x x+32
+    %endrep
+
+    add             r0, r1
+    add             r2, r3
+    dec             r6d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+IPFILTER_CHROMA_avx2_32xN 8
+IPFILTER_CHROMA_avx2_32xN 16
+IPFILTER_CHROMA_avx2_32xN 24
+IPFILTER_CHROMA_avx2_32xN 32
+IPFILTER_CHROMA_avx2_32xN 48
+IPFILTER_CHROMA_avx2_32xN 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_12xN 1
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_12x%1, 5,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, %1
+.loop:
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                     ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movu            [r2], xm3
+
+    vbroadcasti128  m3, [r0 + 16]
+    vbroadcasti128  m4, [r0 + 24]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6                       ; m3 = DWORD[7 6 3 2 5 4 1 0]
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6                    ; m3 = WORD[7 6 5 4 3 2 1 0]
+    CLIPW           xm3, xm5, xm7
+    movq            [r2 + 16], xm3
+
+    add             r0, r1
+    add             r2, r3
+    dec             r4d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+IPFILTER_CHROMA_avx2_12xN 16
+IPFILTER_CHROMA_avx2_12xN 32
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_24xN 1
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_24x%1, 5,6,9
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, %1
+.loop:
+    vbroadcasti128  m3, [r0]
+    vbroadcasti128  m4, [r0 + 8]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6
+
+    vbroadcasti128  m4, [r0 + 16]
+    vbroadcasti128  m8, [r0 + 24]
+    pshufb          m4, m1
+    pshufb          m8, m1
+
+    pmaddwd         m4, m0
+    pmaddwd         m8, m0
+    phaddd          m4, m8
+    paddd           m4, m2
+    psrad           m4, 6
+
+    packusdw        m3, m4
+    vpermq          m3, m3, q3120
+    pshufb          m3, m6
+    CLIPW           m3, m5, m7
+    movu            [r2], m3
+
+    vbroadcasti128  m3, [r0 + 32]
+    vbroadcasti128  m4, [r0 + 40]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6
+
+    packusdw        m3, m3
+    vpermq          m3, m3, q2020
+    pshufb          xm3, xm6
+    CLIPW           xm3, xm5, xm7
+    movu            [r2 + 32], xm3
+
+    add             r0, r1
+    add             r2, r3
+    dec             r4d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+IPFILTER_CHROMA_avx2_24xN 32
+IPFILTER_CHROMA_avx2_24xN 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%macro IPFILTER_CHROMA_avx2_64xN 1
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_64x%1, 5,6,9
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r6d, %1
+.loop:
+%assign x 0
+%rep 4
+    vbroadcasti128  m3, [r0 + x]
+    vbroadcasti128  m4, [r0 + 8 + x]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6
+
+    vbroadcasti128  m4, [r0 + 16 + x]
+    vbroadcasti128  m8, [r0 + 24 + x]
+    pshufb          m4, m1
+    pshufb          m8, m1
+
+    pmaddwd         m4, m0
+    pmaddwd         m8, m0
+    phaddd          m4, m8
+    paddd           m4, m2
+    psrad           m4, 6
+
+    packusdw        m3, m4
+    vpermq          m3, m3, q3120
+    pshufb          m3, m6
+    CLIPW           m3, m5, m7
+    movu            [r2 + x], m3
+    %assign x x+32
+    %endrep
+
+    add             r0, r1
+    add             r2, r3
+    dec             r6d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+IPFILTER_CHROMA_avx2_64xN 16
+IPFILTER_CHROMA_avx2_64xN 32
+IPFILTER_CHROMA_avx2_64xN 48
+IPFILTER_CHROMA_avx2_64xN 64
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+%if ARCH_X86_64
+cglobal interp_4tap_horiz_pp_48x64, 5,6,9
+    add             r1d, r1d
+    add             r3d, r3d
+    sub             r0, 2
+    mov             r4d, r4m
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova            m1, [interp8_hpp_shuf]
+    vpbroadcastd    m2, [pd_32]
+    pxor            m5, m5
+    mova            m6, [idct8_shuf2]
+    mova            m7, [pw_pixel_max]
+
+    mov             r4d, 64
+.loop:
+%assign x 0
+%rep 3
+    vbroadcasti128  m3, [r0 + x]
+    vbroadcasti128  m4, [r0 + 8 + x]
+    pshufb          m3, m1
+    pshufb          m4, m1
+
+    pmaddwd         m3, m0
+    pmaddwd         m4, m0
+    phaddd          m3, m4
+    paddd           m3, m2
+    psrad           m3, 6
+
+    vbroadcasti128  m4, [r0 + 16 + x]
+    vbroadcasti128  m8, [r0 + 24 + x]
+    pshufb          m4, m1
+    pshufb          m8, m1
+
+    pmaddwd         m4, m0
+    pmaddwd         m8, m0
+    phaddd          m4, m8
+    paddd           m4, m2
+    psrad           m4, 6
+
+    packusdw        m3, m4
+    vpermq          m3, m3, q3120
+    pshufb          m3, m6
+    CLIPW           m3, m5, m7
+    movu            [r2 + x], m3
+%assign x x+32
+%endrep
+
+    add             r0, r1
+    add             r2, r3
+    dec             r4d
+    jnz             .loop
+    RET
+%endif
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_%3_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SS 4
+INIT_XMM sse2
+cglobal interp_4tap_vert_%3_%1x%2, 5, 7, %4 ,0-gprsize
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       dword [rsp], %2/4
+
+%ifnidn %3, ss
+    %ifnidn %3, ps
+        mova      m7, [pw_pixel_max]
+        %ifidn %3, pp
+            mova      m6, [INTERP_OFFSET_PP]
+        %else
+            mova      m6, [INTERP_OFFSET_SP]
+        %endif
+    %else
+        mova      m6, [INTERP_OFFSET_PS]
+    %endif
+%endif
+
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_CHROMA_SP_W4_4R
+
+%ifidn %3, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %3, ps
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+    %ifidn %3, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m1, INTERP_SHIFT_PP
+        psrad     m2, INTERP_SHIFT_PP
+        psrad     m3, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m1, INTERP_SHIFT_SP
+        psrad     m2, INTERP_SHIFT_SP
+        psrad     m3, INTERP_SHIFT_SP
+    %endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, m7
+%endif
+
+    movh      [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SS 4, 4, ss, 6
+    FILTER_VER_CHROMA_SS 4, 8, ss, 6
+    FILTER_VER_CHROMA_SS 16, 16, ss, 6
+    FILTER_VER_CHROMA_SS 16, 8, ss, 6
+    FILTER_VER_CHROMA_SS 16, 12, ss, 6
+    FILTER_VER_CHROMA_SS 12, 16, ss, 6
+    FILTER_VER_CHROMA_SS 16, 4, ss, 6
+    FILTER_VER_CHROMA_SS 4, 16, ss, 6
+    FILTER_VER_CHROMA_SS 32, 32, ss, 6
+    FILTER_VER_CHROMA_SS 32, 16, ss, 6
+    FILTER_VER_CHROMA_SS 16, 32, ss, 6
+    FILTER_VER_CHROMA_SS 32, 24, ss, 6
+    FILTER_VER_CHROMA_SS 24, 32, ss, 6
+    FILTER_VER_CHROMA_SS 32, 8, ss, 6
+
+    FILTER_VER_CHROMA_SS 4, 4, ps, 7
+    FILTER_VER_CHROMA_SS 4, 8, ps, 7
+    FILTER_VER_CHROMA_SS 16, 16, ps, 7
+    FILTER_VER_CHROMA_SS 16, 8, ps, 7
+    FILTER_VER_CHROMA_SS 16, 12, ps, 7
+    FILTER_VER_CHROMA_SS 12, 16, ps, 7
+    FILTER_VER_CHROMA_SS 16, 4, ps, 7
+    FILTER_VER_CHROMA_SS 4, 16, ps, 7
+    FILTER_VER_CHROMA_SS 32, 32, ps, 7
+    FILTER_VER_CHROMA_SS 32, 16, ps, 7
+    FILTER_VER_CHROMA_SS 16, 32, ps, 7
+    FILTER_VER_CHROMA_SS 32, 24, ps, 7
+    FILTER_VER_CHROMA_SS 24, 32, ps, 7
+    FILTER_VER_CHROMA_SS 32, 8, ps, 7
+
+    FILTER_VER_CHROMA_SS 4, 4, sp, 8
+    FILTER_VER_CHROMA_SS 4, 8, sp, 8
+    FILTER_VER_CHROMA_SS 16, 16, sp, 8
+    FILTER_VER_CHROMA_SS 16, 8, sp, 8
+    FILTER_VER_CHROMA_SS 16, 12, sp, 8
+    FILTER_VER_CHROMA_SS 12, 16, sp, 8
+    FILTER_VER_CHROMA_SS 16, 4, sp, 8
+    FILTER_VER_CHROMA_SS 4, 16, sp, 8
+    FILTER_VER_CHROMA_SS 32, 32, sp, 8
+    FILTER_VER_CHROMA_SS 32, 16, sp, 8
+    FILTER_VER_CHROMA_SS 16, 32, sp, 8
+    FILTER_VER_CHROMA_SS 32, 24, sp, 8
+    FILTER_VER_CHROMA_SS 24, 32, sp, 8
+    FILTER_VER_CHROMA_SS 32, 8, sp, 8
+
+    FILTER_VER_CHROMA_SS 4, 4, pp, 8
+    FILTER_VER_CHROMA_SS 4, 8, pp, 8
+    FILTER_VER_CHROMA_SS 16, 16, pp, 8
+    FILTER_VER_CHROMA_SS 16, 8, pp, 8
+    FILTER_VER_CHROMA_SS 16, 12, pp, 8
+    FILTER_VER_CHROMA_SS 12, 16, pp, 8
+    FILTER_VER_CHROMA_SS 16, 4, pp, 8
+    FILTER_VER_CHROMA_SS 4, 16, pp, 8
+    FILTER_VER_CHROMA_SS 32, 32, pp, 8
+    FILTER_VER_CHROMA_SS 32, 16, pp, 8
+    FILTER_VER_CHROMA_SS 16, 32, pp, 8
+    FILTER_VER_CHROMA_SS 32, 24, pp, 8
+    FILTER_VER_CHROMA_SS 24, 32, pp, 8
+    FILTER_VER_CHROMA_SS 32, 8, pp, 8
+
+
+    FILTER_VER_CHROMA_SS 16, 24, ss, 6
+    FILTER_VER_CHROMA_SS 12, 32, ss, 6
+    FILTER_VER_CHROMA_SS 4, 32, ss, 6
+    FILTER_VER_CHROMA_SS 32, 64, ss, 6
+    FILTER_VER_CHROMA_SS 16, 64, ss, 6
+    FILTER_VER_CHROMA_SS 32, 48, ss, 6
+    FILTER_VER_CHROMA_SS 24, 64, ss, 6
+
+    FILTER_VER_CHROMA_SS 16, 24, ps, 7
+    FILTER_VER_CHROMA_SS 12, 32, ps, 7
+    FILTER_VER_CHROMA_SS 4, 32, ps, 7
+    FILTER_VER_CHROMA_SS 32, 64, ps, 7
+    FILTER_VER_CHROMA_SS 16, 64, ps, 7
+    FILTER_VER_CHROMA_SS 32, 48, ps, 7
+    FILTER_VER_CHROMA_SS 24, 64, ps, 7
+
+    FILTER_VER_CHROMA_SS 16, 24, sp, 8
+    FILTER_VER_CHROMA_SS 12, 32, sp, 8
+    FILTER_VER_CHROMA_SS 4, 32, sp, 8
+    FILTER_VER_CHROMA_SS 32, 64, sp, 8
+    FILTER_VER_CHROMA_SS 16, 64, sp, 8
+    FILTER_VER_CHROMA_SS 32, 48, sp, 8
+    FILTER_VER_CHROMA_SS 24, 64, sp, 8
+
+    FILTER_VER_CHROMA_SS 16, 24, pp, 8
+    FILTER_VER_CHROMA_SS 12, 32, pp, 8
+    FILTER_VER_CHROMA_SS 4, 32, pp, 8
+    FILTER_VER_CHROMA_SS 32, 64, pp, 8
+    FILTER_VER_CHROMA_SS 16, 64, pp, 8
+    FILTER_VER_CHROMA_SS 32, 48, pp, 8
+    FILTER_VER_CHROMA_SS 24, 64, pp, 8
+
+
+    FILTER_VER_CHROMA_SS 48, 64, ss, 6
+    FILTER_VER_CHROMA_SS 64, 48, ss, 6
+    FILTER_VER_CHROMA_SS 64, 64, ss, 6
+    FILTER_VER_CHROMA_SS 64, 32, ss, 6
+    FILTER_VER_CHROMA_SS 64, 16, ss, 6
+
+    FILTER_VER_CHROMA_SS 48, 64, ps, 7
+    FILTER_VER_CHROMA_SS 64, 48, ps, 7
+    FILTER_VER_CHROMA_SS 64, 64, ps, 7
+    FILTER_VER_CHROMA_SS 64, 32, ps, 7
+    FILTER_VER_CHROMA_SS 64, 16, ps, 7
+
+    FILTER_VER_CHROMA_SS 48, 64, sp, 8
+    FILTER_VER_CHROMA_SS 64, 48, sp, 8
+    FILTER_VER_CHROMA_SS 64, 64, sp, 8
+    FILTER_VER_CHROMA_SS 64, 32, sp, 8
+    FILTER_VER_CHROMA_SS 64, 16, sp, 8
+
+    FILTER_VER_CHROMA_SS 48, 64, pp, 8
+    FILTER_VER_CHROMA_SS 64, 48, pp, 8
+    FILTER_VER_CHROMA_SS 64, 64, pp, 8
+    FILTER_VER_CHROMA_SS 64, 32, pp, 8
+    FILTER_VER_CHROMA_SS 64, 16, pp, 8
+
+
+%macro PROCESS_CHROMA_SP_W2_4R 1
+    movd       m0, [r0]
+    movd       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+
+    lea        r0, [r0 + 2 * r1]
+    movd       m2, [r0]
+    punpcklwd  m1, m2                          ;m1=[1 2]
+    punpcklqdq m0, m1                          ;m0=[0 1 1 2]
+    pmaddwd    m0, [%1 + 0 *32]                ;m0=[0+1 1+2] Row 1-2
+
+    movd       m1, [r0 + r1]
+    punpcklwd  m2, m1                          ;m2=[2 3]
+
+    lea        r0, [r0 + 2 * r1]
+    movd       m3, [r0]
+    punpcklwd  m1, m3                          ;m2=[3 4]
+    punpcklqdq m2, m1                          ;m2=[2 3 3 4]
+
+    pmaddwd    m4, m2, [%1 + 1 * 32]           ;m4=[2+3 3+4] Row 1-2
+    pmaddwd    m2, [%1 + 0 * 32]               ;m2=[2+3 3+4] Row 3-4
+    paddd      m0, m4                          ;m0=[0+1+2+3 1+2+3+4] Row 1-2
+
+    movd       m1, [r0 + r1]
+    punpcklwd  m3, m1                          ;m3=[4 5]
+
+    movd       m4, [r0 + 2 * r1]
+    punpcklwd  m1, m4                          ;m1=[5 6]
+    punpcklqdq m3, m1                          ;m2=[4 5 5 6]
+    pmaddwd    m3, [%1 + 1 * 32]               ;m3=[4+5 5+6] Row 3-4
+    paddd      m2, m3                          ;m2=[2+3+4+5 3+4+5+6] Row 3-4
+%endmacro
+
+;---------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_%2_2x%1(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W2 3
+INIT_XMM sse4
+cglobal interp_4tap_vert_%2_2x%1, 5, 6, %3
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, (%1/4)
+%ifnidn %2, ss
+    %ifnidn %2, ps
+        pxor      m7, m7
+        mova      m6, [pw_pixel_max]
+        %ifidn %2, pp
+            mova      m5, [INTERP_OFFSET_PP]
+        %else
+            mova      m5, [INTERP_OFFSET_SP]
+        %endif
+    %else
+        mova      m5, [INTERP_OFFSET_PS]
+    %endif
+%endif
+
+.loopH:
+    PROCESS_CHROMA_SP_W2_4R r5
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m2, 6
+    packssdw  m0, m2
+%elifidn %2, ps
+    paddd     m0, m5
+    paddd     m2, m5
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    packssdw  m0, m2
+%else
+    paddd     m0, m5
+    paddd     m2, m5
+    %ifidn %2, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m2, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m2, INTERP_SHIFT_SP
+    %endif
+    packusdw  m0, m2
+    CLIPW     m0, m7,    m6
+%endif
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrd    [r2], m0, 2
+    pextrd    [r2 + r3], m0, 3
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_W2 4, ss, 5
+FILTER_VER_CHROMA_W2 8, ss, 5
+
+FILTER_VER_CHROMA_W2 4, pp, 8
+FILTER_VER_CHROMA_W2 8, pp, 8
+
+FILTER_VER_CHROMA_W2 4, ps, 6
+FILTER_VER_CHROMA_W2 8, ps, 6
+
+FILTER_VER_CHROMA_W2 4, sp, 8
+FILTER_VER_CHROMA_W2 8, sp, 8
+
+FILTER_VER_CHROMA_W2 16, ss, 5
+FILTER_VER_CHROMA_W2 16, pp, 8
+FILTER_VER_CHROMA_W2 16, ps, 6
+FILTER_VER_CHROMA_W2 16, sp, 8
+
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_%1_4x2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W4 3
+INIT_XMM sse4
+cglobal interp_4tap_vert_%2_4x%1, 5, 6, %3
+    add        r1d, r1d
+    add        r3d, r3d
+    sub        r0, r1
+    shl        r4d, 6
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeffV]
+    lea        r5, [r5 + r4]
+%else
+    lea        r5, [tab_ChromaCoeffV + r4]
+%endif
+
+%ifnidn %2, 2
+    mov        r4d, %1/2
+%endif
+
+%ifnidn %2, ss
+    %ifnidn %2, ps
+        pxor      m6, m6
+        mova      m5, [pw_pixel_max]
+        %ifidn %2, pp
+            mova      m4, [INTERP_OFFSET_PP]
+        %else
+            mova      m4, [INTERP_OFFSET_SP]
+        %endif
+    %else
+        mova      m4, [INTERP_OFFSET_PS]
+    %endif
+%endif
+
+%ifnidn %2, 2
+.loop:
+%endif
+
+    movh       m0, [r0]
+    movh       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r5 + 0 *32]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movh       m2, [r0]
+    punpcklwd  m1, m2                          ;m1=[1 2]
+    pmaddwd    m1, [r5 + 0 *32]                ;m1=[1+2]  Row2
+
+    movh       m3, [r0 + r1]
+    punpcklwd  m2, m3                          ;m4=[2 3]
+    pmaddwd    m2, [r5 + 1 * 32]
+    paddd      m0, m2                          ;m0=[0+1+2+3]  Row1 done
+
+    movh       m2, [r0 + 2 * r1]
+    punpcklwd  m3, m2                          ;m5=[3 4]
+    pmaddwd    m3, [r5 + 1 * 32]
+    paddd      m1, m3                          ;m1=[1+2+3+4]  Row2 done
+
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    packssdw  m0, m1
+%elifidn %2, ps
+    paddd     m0, m4
+    paddd     m1, m4
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    packssdw  m0, m1
+%else
+    paddd     m0, m4
+    paddd     m1, m4
+    %ifidn %2, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m1, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m1, INTERP_SHIFT_SP
+    %endif
+    packusdw  m0, m1
+    CLIPW     m0, m6,    m5
+%endif
+
+    movh       [r2], m0
+    movhps     [r2 + r3], m0
+
+%ifnidn %2, 2
+    lea        r2, [r2 + r3 * 2]
+    dec        r4d
+    jnz        .loop
+%endif
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_W4 2, ss, 4
+FILTER_VER_CHROMA_W4 2, pp, 7
+FILTER_VER_CHROMA_W4 2, ps, 5
+FILTER_VER_CHROMA_W4 2, sp, 7
+
+FILTER_VER_CHROMA_W4 4, ss, 4
+FILTER_VER_CHROMA_W4 4, pp, 7
+FILTER_VER_CHROMA_W4 4, ps, 5
+FILTER_VER_CHROMA_W4 4, sp, 7
+
+;-------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_%1_6x8(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W6 3
+INIT_XMM sse4
+cglobal interp_4tap_vert_%2_6x%1, 5, 7, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, %1/4
+
+%ifnidn %2, ss
+    %ifnidn %2, ps
+        mova      m7, [pw_pixel_max]
+        %ifidn %2, pp
+            mova      m6, [INTERP_OFFSET_PP]
+        %else
+            mova      m6, [INTERP_OFFSET_SP]
+        %endif
+    %else
+        mova      m6, [INTERP_OFFSET_PS]
+    %endif
+%endif
+
+.loopH:
+    PROCESS_CHROMA_SP_W4_4R
+
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %2, ps
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+    %ifidn %2, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m1, INTERP_SHIFT_PP
+        psrad     m2, INTERP_SHIFT_PP
+        psrad     m3, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m1, INTERP_SHIFT_SP
+        psrad     m2, INTERP_SHIFT_SP
+        psrad     m3, INTERP_SHIFT_SP
+    %endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, m7
+%endif
+
+    movh      [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    PROCESS_CHROMA_SP_W2_4R r6
+
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m2, 6
+    packssdw  m0, m2
+%elifidn %2, ps
+    paddd     m0, m6
+    paddd     m2, m6
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    packssdw  m0, m2
+%else
+    paddd     m0, m6
+    paddd     m2, m6
+    %ifidn %2, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m2, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m2, INTERP_SHIFT_SP
+    %endif
+    packusdw  m0, m2
+    CLIPW     m0, m5,    m7
+%endif
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrd    [r2], m0, 2
+    pextrd    [r2 + r3], m0, 3
+
+    sub       r0, 2 * 4
+    lea       r2, [r2 + 2 * r3 - 2 * 4]
+
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_W6 8, ss, 6
+FILTER_VER_CHROMA_W6 8, ps, 7
+FILTER_VER_CHROMA_W6 8, sp, 8
+FILTER_VER_CHROMA_W6 8, pp, 8
+
+FILTER_VER_CHROMA_W6 16, ss, 6
+FILTER_VER_CHROMA_W6 16, ps, 7
+FILTER_VER_CHROMA_W6 16, sp, 8
+FILTER_VER_CHROMA_W6 16, pp, 8
+
+%macro PROCESS_CHROMA_SP_W8_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * 32]                ;m0 = [0l+1l]  Row1l
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * 32]                ;m1 = [0h+1h]  Row1h
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * 32]                ;m2 = [1l+2l]  Row2l
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * 32]                ;m3 = [1h+2h]  Row2h
+
+    lea        r0, [r0 + 2 * r1]
+    movu       m5, [r0 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * 32]                ;m6 = [2l+3l]  Row1l
+    paddd      m0, m6                           ;m0 = [0l+1l+2l+3l]  Row1l sum
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * 32]                ;m6 = [2h+3h]  Row1h
+    paddd      m1, m4                           ;m1 = [0h+1h+2h+3h]  Row1h sum
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * 32]                ;m6 = [3l+4l]  Row2l
+    paddd      m2, m6                           ;m2 = [1l+2l+3l+4l]  Row2l sum
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * 32]                ;m1 = [3h+4h]  Row2h
+    paddd      m3, m5                           ;m3 = [1h+2h+3h+4h]  Row2h sum
+%endmacro
+
+;----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_%3_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W8 4
+INIT_XMM sse2
+cglobal interp_4tap_vert_%3_%1x%2, 5, 6, %4
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, %2/2
+
+%ifidn %3, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %3, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %3, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_SP_W8_2R
+
+%ifidn %3, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %3, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    %ifidn %3, pp
+        psrad     m0, INTERP_SHIFT_PP
+        psrad     m1, INTERP_SHIFT_PP
+        psrad     m2, INTERP_SHIFT_PP
+        psrad     m3, INTERP_SHIFT_PP
+    %else
+        psrad     m0, INTERP_SHIFT_SP
+        psrad     m1, INTERP_SHIFT_SP
+        psrad     m2, INTERP_SHIFT_SP
+        psrad     m3, INTERP_SHIFT_SP
+    %endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    mova      m6, [pw_pixel_max]
+    CLIPW2    m0, m2, m5, m6
+%endif
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_W8 8, 2, ss, 7
+FILTER_VER_CHROMA_W8 8, 4, ss, 7
+FILTER_VER_CHROMA_W8 8, 6, ss, 7
+FILTER_VER_CHROMA_W8 8, 8, ss, 7
+FILTER_VER_CHROMA_W8 8, 16, ss, 7
+FILTER_VER_CHROMA_W8 8, 32, ss, 7
+
+FILTER_VER_CHROMA_W8 8, 2, sp, 8
+FILTER_VER_CHROMA_W8 8, 4, sp, 8
+FILTER_VER_CHROMA_W8 8, 6, sp, 8
+FILTER_VER_CHROMA_W8 8, 8, sp, 8
+FILTER_VER_CHROMA_W8 8, 16, sp, 8
+FILTER_VER_CHROMA_W8 8, 32, sp, 8
+
+FILTER_VER_CHROMA_W8 8, 2, ps, 8
+FILTER_VER_CHROMA_W8 8, 4, ps, 8
+FILTER_VER_CHROMA_W8 8, 6, ps, 8
+FILTER_VER_CHROMA_W8 8, 8, ps, 8
+FILTER_VER_CHROMA_W8 8, 16, ps, 8
+FILTER_VER_CHROMA_W8 8, 32, ps, 8
+
+FILTER_VER_CHROMA_W8 8, 2, pp, 8
+FILTER_VER_CHROMA_W8 8, 4, pp, 8
+FILTER_VER_CHROMA_W8 8, 6, pp, 8
+FILTER_VER_CHROMA_W8 8, 8, pp, 8
+FILTER_VER_CHROMA_W8 8, 16, pp, 8
+FILTER_VER_CHROMA_W8 8, 32, pp, 8
+
+FILTER_VER_CHROMA_W8 8, 12, ss, 7
+FILTER_VER_CHROMA_W8 8, 64, ss, 7
+FILTER_VER_CHROMA_W8 8, 12, sp, 8
+FILTER_VER_CHROMA_W8 8, 64, sp, 8
+FILTER_VER_CHROMA_W8 8, 12, ps, 8
+FILTER_VER_CHROMA_W8 8, 64, ps, 8
+FILTER_VER_CHROMA_W8 8, 12, pp, 8
+FILTER_VER_CHROMA_W8 8, 64, pp, 8
+
+%macro PROCESS_CHROMA_VERT_W16_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * 32]
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * 32]
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * 32]
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * 32]
+
+    lea        r0, [r0 + 2 * r1]
+    movu       m5, [r0 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * 32]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * 32]
+    paddd      m1, m4
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * 32]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * 32]
+    paddd      m3, m5
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_AVX2_6xN 2
+INIT_YMM avx2
+%if ARCH_X86_64
+cglobal interp_4tap_vert_%2_6x%1, 4, 7, 10
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    sub             r0, r1
+    mov             r6d, %1/4
+
+%ifidn %2,pp
+    vbroadcasti128  m8, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova            m8, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m8, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    movu            xm0, [r0]
+    movu            xm1, [r0 + r1]
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+
+    movu            xm2, [r0 + r1 * 2]
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+
+    lea             r4, [r1 * 3]
+    movu            xm3, [r0 + r4]
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+
+    movu            xm5, [r0 + r1]
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    pmaddwd         m4, [r5]
+    paddd           m2, m6
+
+    movu            xm6, [r0 + r1 * 2]
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m7
+    lea             r4, [r3 * 3]
+%ifidn %2,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%else
+    paddd           m0, m8
+    paddd           m1, m8
+    paddd           m2, m8
+    paddd           m3, m8
+%ifidn %2,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+%elifidn %2, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    pxor            m5, m5
+    mova            m9, [pw_pixel_max]
+%ifidn %2,pp
+    CLIPW           m0, m5, m9
+    CLIPW           m2, m5, m9
+%elifidn %2, sp
+    CLIPW           m0, m5, m9
+    CLIPW           m2, m5, m9
+%endif
+
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movq            [r2], xm0
+    pextrd          [r2 + 8], xm0, 2
+    movq            [r2 + r3], xm1
+    pextrd          [r2 + r3 + 8], xm1, 2
+    movq            [r2 + r3 * 2], xm2
+    pextrd          [r2 + r3 * 2 + 8], xm2, 2
+    movq            [r2 + r4], xm3
+    pextrd          [r2 + r4 + 8], xm3, 2
+
+    lea             r2, [r2 + r3 * 4]
+    dec r6d
+    jnz .loopH
+    RET
+%endif
+%endmacro
+FILTER_VER_CHROMA_AVX2_6xN 8, pp
+FILTER_VER_CHROMA_AVX2_6xN 8, ps
+FILTER_VER_CHROMA_AVX2_6xN 8, ss
+FILTER_VER_CHROMA_AVX2_6xN 8, sp
+FILTER_VER_CHROMA_AVX2_6xN 16, pp
+FILTER_VER_CHROMA_AVX2_6xN 16, ps
+FILTER_VER_CHROMA_AVX2_6xN 16, ss
+FILTER_VER_CHROMA_AVX2_6xN 16, sp
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_16xN_avx2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%2_16x%1, 5, 6, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %2, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W16_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+ %ifidn %2, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+    lea       r2, [r2 + 2 * r3]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+    FILTER_VER_CHROMA_W16_16xN_avx2 4, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 8, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 12, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 24, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 16, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 32, pp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 64, pp, 8
+
+    FILTER_VER_CHROMA_W16_16xN_avx2 4, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 8, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 12, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 24, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 16, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 32, ps, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 64, ps, 8
+
+    FILTER_VER_CHROMA_W16_16xN_avx2 4, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 8, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 12, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 24, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 16, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 32, ss, 7
+    FILTER_VER_CHROMA_W16_16xN_avx2 64, ss, 7
+
+    FILTER_VER_CHROMA_W16_16xN_avx2 4, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 8, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 12, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 24, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 16, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 32, sp, 8
+    FILTER_VER_CHROMA_W16_16xN_avx2 64, sp, 8
+
+%macro PROCESS_CHROMA_VERT_W32_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * mmsize]
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * mmsize]
+
+    movu       m9, [r0 + mmsize]
+    movu       m11, [r0 + r1 + mmsize]
+    punpcklwd  m8, m9, m11
+    pmaddwd    m8, [r5 + 0 * mmsize]
+    punpckhwd  m9, m11
+    pmaddwd    m9, [r5 + 0 * mmsize]
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * mmsize]
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * mmsize]
+
+    movu       m12, [r0 + 2 * r1 + mmsize]
+    punpcklwd  m10, m11, m12
+    pmaddwd    m10, [r5 + 0 * mmsize]
+    punpckhwd  m11, m12
+    pmaddwd    m11, [r5 + 0 * mmsize]
+
+    lea        r6, [r0 + 2 * r1]
+    movu       m5, [r6 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * mmsize]
+    paddd      m1, m4
+
+    movu       m13, [r6 + r1 + mmsize]
+    punpcklwd  m14, m12, m13
+    pmaddwd    m14, [r5 + 1 * mmsize]
+    paddd      m8, m14
+    punpckhwd  m12, m13
+    pmaddwd    m12, [r5 + 1 * mmsize]
+    paddd      m9, m12
+
+    movu       m4, [r6 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * mmsize]
+    paddd      m3, m5
+
+    movu       m12, [r6 + 2 * r1 + mmsize]
+    punpcklwd  m14, m13, m12
+    pmaddwd    m14, [r5 + 1 * mmsize]
+    paddd      m10, m14
+    punpckhwd  m13, m12
+    pmaddwd    m13, [r5 + 1 * mmsize]
+    paddd      m11, m13
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_32xN_avx2 3
+INIT_YMM avx2
+%if ARCH_X86_64
+cglobal interp_4tap_vert_%2_32x%1, 5, 7, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %2, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W32_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    psrad     m8, 6
+    psrad     m9, 6
+    psrad     m10, 6
+    psrad     m11, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+    psrad     m8, INTERP_SHIFT_PS
+    psrad     m9, INTERP_SHIFT_PS
+    psrad     m10, INTERP_SHIFT_PS
+    psrad     m11, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+ %ifidn %2, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+    psrad     m8, INTERP_SHIFT_PP
+    psrad     m9, INTERP_SHIFT_PP
+    psrad     m10, INTERP_SHIFT_PP
+    psrad     m11, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+    psrad     m8, INTERP_SHIFT_SP
+    psrad     m9, INTERP_SHIFT_SP
+    psrad     m10, INTERP_SHIFT_SP
+    psrad     m11, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+    CLIPW2    m8, m10, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+    movu      [r2 + mmsize], m8
+    movu      [r2 + r3 + mmsize], m10
+    lea       r2, [r2 + 2 * r3]
+    lea       r0, [r0 + 2 * r1]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endif
+%endmacro
+    FILTER_VER_CHROMA_W16_32xN_avx2 8, pp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 16, pp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 24, pp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 32, pp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 48, pp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 64, pp, 15
+
+    FILTER_VER_CHROMA_W16_32xN_avx2 8, ps, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 16, ps, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 24, ps, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 32, ps, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 48, ps, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 64, ps, 15
+
+    FILTER_VER_CHROMA_W16_32xN_avx2 8, ss, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 16, ss, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 24, ss, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 32, ss, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 48, ss, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 64, ss, 15
+
+    FILTER_VER_CHROMA_W16_32xN_avx2 8, sp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 16, sp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 24, sp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 32, sp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 48, sp, 15
+    FILTER_VER_CHROMA_W16_32xN_avx2 64, sp, 15
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_64xN_avx2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%2_64x%1, 5, 7, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %2, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+%assign x 0
+%rep 4
+    movu       m1, [r0 + x]
+    movu       m3, [r0 + r1 + x]
+    movu       m5, [r5 + 0 * mmsize]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, m5
+    punpckhwd  m1, m3
+    pmaddwd    m1, m5
+
+    movu       m4, [r0 + 2 * r1 + x]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, m5
+    punpckhwd  m3, m4
+    pmaddwd    m3, m5
+
+    lea        r6, [r0 + 2 * r1]
+    movu       m5, [r6 + r1 + x]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * mmsize]
+    paddd      m1, m4
+
+    movu       m4, [r6 + 2 * r1 + x]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * mmsize]
+    paddd      m3, m5
+
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+%ifidn %2, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2 + x], m0
+    movu      [r2 + r3 + x], m2
+%assign x x+mmsize
+%endrep
+
+    lea       r2, [r2 + 2 * r3]
+    lea       r0, [r0 + 2 * r1]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+    FILTER_VER_CHROMA_W16_64xN_avx2 16, ss, 7
+    FILTER_VER_CHROMA_W16_64xN_avx2 32, ss, 7
+    FILTER_VER_CHROMA_W16_64xN_avx2 48, ss, 7
+    FILTER_VER_CHROMA_W16_64xN_avx2 64, ss, 7
+    FILTER_VER_CHROMA_W16_64xN_avx2 16, sp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 32, sp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 48, sp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 64, sp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 16, ps, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 32, ps, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 48, ps, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 64, ps, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 16, pp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 32, pp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 48, pp, 8
+    FILTER_VER_CHROMA_W16_64xN_avx2 64, pp, 8
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_12xN_avx2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%2_12x%1, 5, 8, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %2, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W16_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+ %ifidn %2, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], xm0
+    movu      [r2 + r3], xm2
+    vextracti128 xm0, m0, 1
+    vextracti128 xm2, m2, 1
+    movq      [r2 + 16], xm0
+    movq      [r2 + r3 + 16], xm2
+    lea       r2, [r2 + 2 * r3]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, ss, 7
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, sp, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, ps, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, pp, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 32, ss, 7
+    FILTER_VER_CHROMA_W16_12xN_avx2 32, sp, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 32, ps, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 32, pp, 8
+
+%macro PROCESS_CHROMA_VERT_W24_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * mmsize]
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * mmsize]
+
+    movu       xm9, [r0 + mmsize]
+    movu       xm11, [r0 + r1 + mmsize]
+    punpcklwd  xm8, xm9, xm11
+    pmaddwd    xm8, [r5 + 0 * mmsize]
+    punpckhwd  xm9, xm11
+    pmaddwd    xm9, [r5 + 0 * mmsize]
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * mmsize]
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * mmsize]
+
+    movu       xm12, [r0 + 2 * r1 + mmsize]
+    punpcklwd  xm10, xm11, xm12
+    pmaddwd    xm10, [r5 + 0 * mmsize]
+    punpckhwd  xm11, xm12
+    pmaddwd    xm11, [r5 + 0 * mmsize]
+
+    lea        r6, [r0 + 2 * r1]
+    movu       m5, [r6 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * mmsize]
+    paddd      m1, m4
+
+    movu       xm13, [r6 + r1 + mmsize]
+    punpcklwd  xm14, xm12, xm13
+    pmaddwd    xm14, [r5 + 1 * mmsize]
+    paddd      xm8, xm14
+    punpckhwd  xm12, xm13
+    pmaddwd    xm12, [r5 + 1 * mmsize]
+    paddd      xm9, xm12
+
+    movu       m4, [r6 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * mmsize]
+    paddd      m3, m5
+
+    movu       xm12, [r6 + 2 * r1 + mmsize]
+    punpcklwd  xm14, xm13, xm12
+    pmaddwd    xm14, [r5 + 1 * mmsize]
+    paddd      xm10, xm14
+    punpckhwd  xm13, xm12
+    pmaddwd    xm13, [r5 + 1 * mmsize]
+    paddd      xm11, xm13
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_24xN_avx2 3
+INIT_YMM avx2
+%if ARCH_X86_64
+cglobal interp_4tap_vert_%2_24x%1, 5, 7, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %2, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %2, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W24_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    psrad     m8, 6
+    psrad     m9, 6
+    psrad     m10, 6
+    psrad     m11, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+    psrad     m8, INTERP_SHIFT_PS
+    psrad     m9, INTERP_SHIFT_PS
+    psrad     m10, INTERP_SHIFT_PS
+    psrad     m11, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+ %ifidn %2, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+    psrad     m8, INTERP_SHIFT_PP
+    psrad     m9, INTERP_SHIFT_PP
+    psrad     m10, INTERP_SHIFT_PP
+    psrad     m11, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+    psrad     m8, INTERP_SHIFT_SP
+    psrad     m9, INTERP_SHIFT_SP
+    psrad     m10, INTERP_SHIFT_SP
+    psrad     m11, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+    CLIPW2    m8, m10, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+    movu      [r2 + mmsize], xm8
+    movu      [r2 + r3 + mmsize], xm10
+    lea       r2, [r2 + 2 * r3]
+    lea       r0, [r0 + 2 * r1]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endif
+%endmacro
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, ss, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, sp, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, ps, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, pp, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 64, ss, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 64, sp, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 64, ps, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 64, pp, 15
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_48x64_avx2 2
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_48x64, 5, 7, %2
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, 32
+
+%ifidn %1, pp
+    mova      m7, [INTERP_OFFSET_PP]
+%elifidn %1, sp
+    mova      m7, [INTERP_OFFSET_SP]
+%elifidn %1, ps
+    mova      m7, [INTERP_OFFSET_PS]
+%endif
+
+.loopH:
+%assign x 0
+%rep 3
+    movu       m1, [r0 + x]
+    movu       m3, [r0 + r1 + x]
+    movu       m5, [r5 + 0 * mmsize]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, m5
+    punpckhwd  m1, m3
+    pmaddwd    m1, m5
+
+    movu       m4, [r0 + 2 * r1 + x]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, m5
+    punpckhwd  m3, m4
+    pmaddwd    m3, m5
+
+    lea        r6, [r0 + 2 * r1]
+    movu       m5, [r6 + r1 + x]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * mmsize]
+    paddd      m1, m4
+
+    movu       m4, [r6 + 2 * r1 + x]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * mmsize]
+    paddd      m3, m5
+
+%ifidn %1, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %1, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+%ifidn %1, pp
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+%else
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2 + x], m0
+    movu      [r2 + r3 + x], m2
+%assign x x+mmsize
+%endrep
+
+    lea       r2, [r2 + 2 * r3]
+    lea       r0, [r0 + 2 * r1]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_W16_48x64_avx2 pp, 8
+    FILTER_VER_CHROMA_W16_48x64_avx2 ps, 8
+    FILTER_VER_CHROMA_W16_48x64_avx2 ss, 7
+    FILTER_VER_CHROMA_W16_48x64_avx2 sp, 8
+
+INIT_XMM sse2
+cglobal chroma_p2s, 3, 7, 3
+    ; load width and height
+    mov         r3d, r3m
+    mov         r4d, r4m
+    add         r1, r1
+
+    ; load constant
+    mova        m2, [tab_c_n8192]
+
+.loopH:
+
+    xor         r5d, r5d
+.loopW:
+    lea         r6, [r0 + r5 * 2]
+
+    movu        m0, [r6]
+    psllw       m0, (14 - BIT_DEPTH)
+    paddw       m0, m2
+
+    movu        m1, [r6 + r1]
+    psllw       m1, (14 - BIT_DEPTH)
+    paddw       m1, m2
+
+    add         r5d, 8
+    cmp         r5d, r3d
+    lea         r6, [r2 + r5 * 2]
+    jg          .width4
+    movu        [r6 + FENC_STRIDE / 2 * 0 - 16], m0
+    movu        [r6 + FENC_STRIDE / 2 * 2 - 16], m1
+    je          .nextH
+    jmp         .loopW
+
+.width4:
+    test        r3d, 4
+    jz          .width2
+    test        r3d, 2
+    movh        [r6 + FENC_STRIDE / 2 * 0 - 16], m0
+    movh        [r6 + FENC_STRIDE / 2 * 2 - 16], m1
+    lea         r6, [r6 + 8]
+    pshufd      m0, m0, 2
+    pshufd      m1, m1, 2
+    jz          .nextH
+
+.width2:
+    movd        [r6 + FENC_STRIDE / 2 * 0 - 16], m0
+    movd        [r6 + FENC_STRIDE / 2 * 2 - 16], m1
+
+.nextH:
+    lea         r0, [r0 + r1 * 2]
+    add         r2, FENC_STRIDE / 2 * 4
+
+    sub         r4d, 2
+    jnz         .loopH
+    RET
+
+%macro PROCESS_LUMA_VER_W4_4R 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *16]            ;m2=[2+3]  Row3
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *16]            ;m3=[3+4]  Row4
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m6, m4, [r6 + 1 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5]  Row3
+    pmaddwd    m4, [r6 + 2 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m6, m5, [r6 + 1 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6]  Row4
+    pmaddwd    m5, [r6 + 2 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[6 7]
+    pmaddwd    m6, m4, [r6 + 2 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5+6+7]  Row3
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5+6+7]  Row1 end
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[7 8]
+    pmaddwd    m6, m5, [r6 + 2 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6+7+8]  Row4
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6+7+8]  Row2 end
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[8 9]
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m2, m4                          ;m2=[2+3+4+5+6+7+8+9]  Row3 end
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[9 10]
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m3, m5                          ;m3=[3+4+5+6+7+8+9+10]  Row4 end
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_PP 2
+INIT_XMM sse4
+cglobal interp_8tap_vert_pp_%1x%2, 5, 7, 8 ,0-gprsize
+
+    add       r1d, r1d
+    add       r3d, r3d
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mova      m7, [INTERP_OFFSET_PP]
+
+    mov       dword [rsp], %2/4
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_LUMA_VER_W4_4R
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, INTERP_SHIFT_PP
+    psrad     m1, INTERP_SHIFT_PP
+    psrad     m2, INTERP_SHIFT_PP
+    psrad     m3, INTERP_SHIFT_PP
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    pxor      m1, m1
+    CLIPW2    m0, m2, m1, [pw_pixel_max]
+
+    movh      [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [8 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_PP 4, 4
+    FILTER_VER_LUMA_PP 8, 8
+    FILTER_VER_LUMA_PP 8, 4
+    FILTER_VER_LUMA_PP 4, 8
+    FILTER_VER_LUMA_PP 16, 16
+    FILTER_VER_LUMA_PP 16, 8
+    FILTER_VER_LUMA_PP 8, 16
+    FILTER_VER_LUMA_PP 16, 12
+    FILTER_VER_LUMA_PP 12, 16
+    FILTER_VER_LUMA_PP 16, 4
+    FILTER_VER_LUMA_PP 4, 16
+    FILTER_VER_LUMA_PP 32, 32
+    FILTER_VER_LUMA_PP 32, 16
+    FILTER_VER_LUMA_PP 16, 32
+    FILTER_VER_LUMA_PP 32, 24
+    FILTER_VER_LUMA_PP 24, 32
+    FILTER_VER_LUMA_PP 32, 8
+    FILTER_VER_LUMA_PP 8, 32
+    FILTER_VER_LUMA_PP 64, 64
+    FILTER_VER_LUMA_PP 64, 32
+    FILTER_VER_LUMA_PP 32, 64
+    FILTER_VER_LUMA_PP 64, 48
+    FILTER_VER_LUMA_PP 48, 64
+    FILTER_VER_LUMA_PP 64, 16
+    FILTER_VER_LUMA_PP 16, 64
+
+%macro FILTER_VER_LUMA_AVX2_4x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x4, 4, 6, 7
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,pp
+    vbroadcasti128  m6, [pd_32]
+%elifidn %1, sp
+    mova            m6, [pd_524800]
+%else
+    vbroadcasti128  m6, [INTERP_OFFSET_PS]
+%endif
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m0, m5
+    paddd           m2, m4
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    pmaddwd         m1, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    paddd           m2, m1
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + 2 * r1]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [A 9 9 8]
+    pmaddwd         m4, [r5 + 3 * mmsize]
+    paddd           m2, m4
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m2, 6
+%else
+    paddd           m0, m6
+    paddd           m2, m6
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m2
+    pxor            m1, m1
+%ifidn %1,pp
+    CLIPW           m0, m1, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m0, m1, [pw_pixel_max]
+%endif
+
+    vextracti128    xm2, m0, 1
+    lea             r4, [r3 * 3]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+    RET
+%endmacro
+
+FILTER_VER_LUMA_AVX2_4x4 pp
+FILTER_VER_LUMA_AVX2_4x4 ps
+FILTER_VER_LUMA_AVX2_4x4 sp
+FILTER_VER_LUMA_AVX2_4x4 ss
+
+%macro FILTER_VER_LUMA_AVX2_8x8 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_8x8, 4, 6, 12
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,pp
+    vbroadcasti128  m11, [pd_32]
+%elifidn %1, sp
+    mova            m11, [pd_524800]
+%else
+    vbroadcasti128  m11, [INTERP_OFFSET_PS]
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m7
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m8
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    pmaddwd         m7, [r5]
+    paddd           m5, m9
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    pmaddwd         m8, [r5 + 1 * mmsize]
+    paddd           m4, m10
+    paddd           m6, m8
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm8, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm8, 1
+    pmaddwd         m8, m9, [r5 + 3 * mmsize]
+    paddd           m3, m8
+    pmaddwd         m8, m9, [r5 + 2 * mmsize]
+    pmaddwd         m9, [r5 + 1 * mmsize]
+    paddd           m5, m8
+    paddd           m7, m9
+    movu            xm8, [r0 + r4]                  ; m8 = row 11
+    punpckhwd       xm9, xm10, xm8
+    punpcklwd       xm10, xm8
+    vinserti128     m10, m10, xm9, 1
+    pmaddwd         m9, m10, [r5 + 3 * mmsize]
+    pmaddwd         m10, [r5 + 2 * mmsize]
+    paddd           m4, m9
+    paddd           m6, m10
+
+    lea             r4, [r3 * 3]
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%else
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    pxor            m10, m10
+    mova            m9, [pw_pixel_max]
+%ifidn %1,pp
+    CLIPW           m0, m10, m9
+    CLIPW           m2, m10, m9
+%elifidn %1, sp
+    CLIPW           m0, m10, m9
+    CLIPW           m2, m10, m9
+%endif
+
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 12
+    punpckhwd       xm3, xm8, xm2
+    punpcklwd       xm8, xm2
+    vinserti128     m8, m8, xm3, 1
+    pmaddwd         m3, m8, [r5 + 3 * mmsize]
+    pmaddwd         m8, [r5 + 2 * mmsize]
+    paddd           m5, m3
+    paddd           m7, m8
+    movu            xm3, [r0 + r1]                  ; m3 = row 13
+    punpckhwd       xm0, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm0, 1
+    pmaddwd         m2, [r5 + 3 * mmsize]
+    paddd           m6, m2
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm3, xm0
+    punpcklwd       xm3, xm0
+    vinserti128     m3, m3, xm1, 1
+    pmaddwd         m3, [r5 + 3 * mmsize]
+    paddd           m7, m3
+
+%ifidn %1,ss
+    psrad           m4, 6
+    psrad           m5, 6
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m4, m11
+    paddd           m5, m11
+    paddd           m6, m11
+    paddd           m7, m11
+%ifidn %1,pp
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m7, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m7, INTERP_SHIFT_SP
+%else
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m7, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m4, m5
+    packssdw        m6, m7
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+%ifidn %1,pp
+    CLIPW           m4, m10, m9
+    CLIPW           m6, m10, m9
+%elifidn %1, sp
+    CLIPW           m4, m10, m9
+    CLIPW           m6, m10, m9
+%endif
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r4], xm7
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_8x8 pp
+FILTER_VER_LUMA_AVX2_8x8 ps
+FILTER_VER_LUMA_AVX2_8x8 sp
+FILTER_VER_LUMA_AVX2_8x8 ss
+
+%macro PROCESS_LUMA_AVX2_W8_16R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 3 * mmsize]
+    paddd           m4, m12
+    pmaddwd         m12, m10, [r5 + 2 * mmsize]
+    paddd           m6, m12
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm12, [r7]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 3 * mmsize]
+    paddd           m5, m13
+    pmaddwd         m13, m11, [r5 + 2 * mmsize]
+    paddd           m7, m13
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%else
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    pxor            m5, m5
+    mova            m3, [pw_pixel_max]
+%ifidn %1,pp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%elifidn %1, sp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%endif
+
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm1
+    lea             r8, [r2 + r3 * 4]
+    vextracti128    xm1, m4, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm1
+
+    movu            xm13, [r7 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 3 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m12, [r5 + 2 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m0, m12, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    pmaddwd         m12, [r5]
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m1, m13, [r5 + 3 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m13, [r5 + 2 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m1, m13, [r5 + 1 * mmsize]
+    paddd           m11, m1
+    pmaddwd         m13, [r5]
+
+%ifidn %1,ss
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m6, m14
+    paddd           m7, m14
+%ifidn %1,pp
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m7, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m7, INTERP_SHIFT_SP
+%else
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m7, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m6, m7
+    vpermq          m6, m6, 11011000b
+%ifidn %1,pp
+    CLIPW           m6, m5, m3
+%elifidn %1, sp
+    CLIPW           m6, m5, m3
+%endif
+    vextracti128    xm7, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m8, m2
+    pmaddwd         m2, m0, [r5 + 2 * mmsize]
+    paddd           m10, m2
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m12, m2
+    pmaddwd         m0, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhwd       xm6, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm6, 1
+    pmaddwd         m6, m1, [r5 + 3 * mmsize]
+    paddd           m9, m6
+    pmaddwd         m6, m1, [r5 + 2 * mmsize]
+    paddd           m11, m6
+    pmaddwd         m6, m1, [r5 + 1 * mmsize]
+    paddd           m13, m6
+    pmaddwd         m1, [r5]
+    movu            xm6, [r7 + r1]                  ; m6 = row 17
+    punpckhwd       xm4, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m10, m4
+    pmaddwd         m4, m2, [r5 + 2 * mmsize]
+    paddd           m12, m4
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movu            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m2, m6, [r5 + 3 * mmsize]
+    paddd           m11, m2
+    pmaddwd         m2, m6, [r5 + 2 * mmsize]
+    paddd           m13, m2
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+    movu            xm2, [r7 + r4]                  ; m2 = row 19
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 3 * mmsize]
+    paddd           m12, m6
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    lea             r7, [r7 + r1 * 4]
+    movu            xm6, [r7]                       ; m6 = row 20
+    punpckhwd       xm7, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 3 * mmsize]
+    paddd           m13, m7
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m1, m2
+    movu            xm7, [r7 + r1]                  ; m7 = row 21
+    punpckhwd       xm2, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m0, m6
+    movu            xm2, [r7 + r1 * 2]              ; m2 = row 22
+    punpckhwd       xm6, xm7, xm2
+    punpcklwd       xm7, xm2
+    vinserti128     m7, m7, xm6, 1
+    pmaddwd         m7, [r5 + 3 * mmsize]
+    paddd           m1, m7
+
+%ifidn %1,ss
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m1, 6
+%else
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    paddd           m12, m14
+    paddd           m13, m14
+    paddd           m0, m14
+    paddd           m1, m14
+%ifidn %1,pp
+    psrad           m8, INTERP_SHIFT_PP
+    psrad           m9, INTERP_SHIFT_PP
+    psrad           m10, INTERP_SHIFT_PP
+    psrad           m11, INTERP_SHIFT_PP
+    psrad           m12, INTERP_SHIFT_PP
+    psrad           m13, INTERP_SHIFT_PP
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m8, INTERP_SHIFT_SP
+    psrad           m9, INTERP_SHIFT_SP
+    psrad           m10, INTERP_SHIFT_SP
+    psrad           m11, INTERP_SHIFT_SP
+    psrad           m12, INTERP_SHIFT_SP
+    psrad           m13, INTERP_SHIFT_SP
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+%else
+    psrad           m8, INTERP_SHIFT_PS
+    psrad           m9, INTERP_SHIFT_PS
+    psrad           m10, INTERP_SHIFT_PS
+    psrad           m11, INTERP_SHIFT_PS
+    psrad           m12, INTERP_SHIFT_PS
+    psrad           m13, INTERP_SHIFT_PS
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m8, m9
+    packssdw        m10, m11
+    packssdw        m12, m13
+    packssdw        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+%ifidn %1,pp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%elifidn %1, sp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%endif
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm8
+    movu            [r8 + r3], xm9
+    movu            [r8 + r3 * 2], xm10
+    movu            [r8 + r6], xm11
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm12
+    movu            [r8 + r3], xm13
+    movu            [r8 + r3 * 2], xm0
+    movu            [r8 + r6], xm1
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_Nx16 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_%2x16, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %1
+    add             r2, 16
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_Nx16 pp, 16
+FILTER_VER_LUMA_AVX2_Nx16 pp, 32
+FILTER_VER_LUMA_AVX2_Nx16 pp, 64
+FILTER_VER_LUMA_AVX2_Nx16 ps, 16
+FILTER_VER_LUMA_AVX2_Nx16 ps, 32
+FILTER_VER_LUMA_AVX2_Nx16 ps, 64
+FILTER_VER_LUMA_AVX2_Nx16 sp, 16
+FILTER_VER_LUMA_AVX2_Nx16 sp, 32
+FILTER_VER_LUMA_AVX2_Nx16 sp, 64
+FILTER_VER_LUMA_AVX2_Nx16 ss, 16
+FILTER_VER_LUMA_AVX2_Nx16 ss, 32
+FILTER_VER_LUMA_AVX2_Nx16 ss, 64
+
+%macro FILTER_VER_LUMA_AVX2_NxN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 12, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %3,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %3, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+
+    lea             r6, [r3 * 3]
+    lea             r11, [r1 * 4]
+    mov             r9d, %2 / 16
+.loopH:
+    mov             r10d, %1 / 8
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %3
+    add             r2, 16
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    sub             r7, r11
+    lea             r0, [r7 - 2 * %1 + 16]
+    lea             r2, [r8 + r3 * 4 - 2 * %1 + 16]
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_NxN 16, 32, pp
+FILTER_VER_LUMA_AVX2_NxN 16, 64, pp
+FILTER_VER_LUMA_AVX2_NxN 24, 32, pp
+FILTER_VER_LUMA_AVX2_NxN 32, 32, pp
+FILTER_VER_LUMA_AVX2_NxN 32, 64, pp
+FILTER_VER_LUMA_AVX2_NxN 48, 64, pp
+FILTER_VER_LUMA_AVX2_NxN 64, 32, pp
+FILTER_VER_LUMA_AVX2_NxN 64, 48, pp
+FILTER_VER_LUMA_AVX2_NxN 64, 64, pp
+FILTER_VER_LUMA_AVX2_NxN 16, 32, ps
+FILTER_VER_LUMA_AVX2_NxN 16, 64, ps
+FILTER_VER_LUMA_AVX2_NxN 24, 32, ps
+FILTER_VER_LUMA_AVX2_NxN 32, 32, ps
+FILTER_VER_LUMA_AVX2_NxN 32, 64, ps
+FILTER_VER_LUMA_AVX2_NxN 48, 64, ps
+FILTER_VER_LUMA_AVX2_NxN 64, 32, ps
+FILTER_VER_LUMA_AVX2_NxN 64, 48, ps
+FILTER_VER_LUMA_AVX2_NxN 64, 64, ps
+FILTER_VER_LUMA_AVX2_NxN 16, 32, sp
+FILTER_VER_LUMA_AVX2_NxN 16, 64, sp
+FILTER_VER_LUMA_AVX2_NxN 24, 32, sp
+FILTER_VER_LUMA_AVX2_NxN 32, 32, sp
+FILTER_VER_LUMA_AVX2_NxN 32, 64, sp
+FILTER_VER_LUMA_AVX2_NxN 48, 64, sp
+FILTER_VER_LUMA_AVX2_NxN 64, 32, sp
+FILTER_VER_LUMA_AVX2_NxN 64, 48, sp
+FILTER_VER_LUMA_AVX2_NxN 64, 64, sp
+FILTER_VER_LUMA_AVX2_NxN 16, 32, ss
+FILTER_VER_LUMA_AVX2_NxN 16, 64, ss
+FILTER_VER_LUMA_AVX2_NxN 24, 32, ss
+FILTER_VER_LUMA_AVX2_NxN 32, 32, ss
+FILTER_VER_LUMA_AVX2_NxN 32, 64, ss
+FILTER_VER_LUMA_AVX2_NxN 48, 64, ss
+FILTER_VER_LUMA_AVX2_NxN 64, 32, ss
+FILTER_VER_LUMA_AVX2_NxN 64, 48, ss
+FILTER_VER_LUMA_AVX2_NxN 64, 64, ss
+
+%macro FILTER_VER_LUMA_AVX2_8xN 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_8x%2, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r8d, %2 / 16
+.loopH:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 3 * mmsize]
+    paddd           m4, m12
+    pmaddwd         m12, m10, [r5 + 2 * mmsize]
+    paddd           m6, m12
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 3 * mmsize]
+    paddd           m5, m13
+    pmaddwd         m13, m11, [r5 + 2 * mmsize]
+    paddd           m7, m13
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%else
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    pxor            m5, m5
+    mova            m3, [pw_pixel_max]
+%ifidn %1,pp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%elifidn %1, sp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%endif
+
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm1
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm1, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm1
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 3 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m12, [r5 + 2 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m0, m12, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    pmaddwd         m12, [r5]
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m1, m13, [r5 + 3 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m13, [r5 + 2 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m1, m13, [r5 + 1 * mmsize]
+    paddd           m11, m1
+    pmaddwd         m13, [r5]
+
+%ifidn %1,ss
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m6, m14
+    paddd           m7, m14
+%ifidn %1,pp
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m7, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m7, INTERP_SHIFT_SP
+%else
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m7, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m6, m7
+    vpermq          m6, m6, 11011000b
+%ifidn %1,pp
+    CLIPW           m6, m5, m3
+%elifidn %1, sp
+    CLIPW           m6, m5, m3
+%endif
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m8, m2
+    pmaddwd         m2, m0, [r5 + 2 * mmsize]
+    paddd           m10, m2
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m12, m2
+    pmaddwd         m0, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhwd       xm6, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm6, 1
+    pmaddwd         m6, m1, [r5 + 3 * mmsize]
+    paddd           m9, m6
+    pmaddwd         m6, m1, [r5 + 2 * mmsize]
+    paddd           m11, m6
+    pmaddwd         m6, m1, [r5 + 1 * mmsize]
+    paddd           m13, m6
+    pmaddwd         m1, [r5]
+    movu            xm6, [r0 + r1]                  ; m6 = row 17
+    punpckhwd       xm4, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m10, m4
+    pmaddwd         m4, m2, [r5 + 2 * mmsize]
+    paddd           m12, m4
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m2, m6, [r5 + 3 * mmsize]
+    paddd           m11, m2
+    pmaddwd         m2, m6, [r5 + 2 * mmsize]
+    paddd           m13, m2
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+    movu            xm2, [r0 + r4]                  ; m2 = row 19
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 3 * mmsize]
+    paddd           m12, m6
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 20
+    punpckhwd       xm7, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 3 * mmsize]
+    paddd           m13, m7
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m1, m2
+    movu            xm7, [r0 + r1]                  ; m7 = row 21
+    punpckhwd       xm2, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m0, m6
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 22
+    punpckhwd       xm6, xm7, xm2
+    punpcklwd       xm7, xm2
+    vinserti128     m7, m7, xm6, 1
+    pmaddwd         m7, [r5 + 3 * mmsize]
+    paddd           m1, m7
+
+%ifidn %1,ss
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m1, 6
+%else
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    paddd           m12, m14
+    paddd           m13, m14
+    paddd           m0, m14
+    paddd           m1, m14
+%ifidn %1,pp
+    psrad           m8, INTERP_SHIFT_PP
+    psrad           m9, INTERP_SHIFT_PP
+    psrad           m10, INTERP_SHIFT_PP
+    psrad           m11, INTERP_SHIFT_PP
+    psrad           m12, INTERP_SHIFT_PP
+    psrad           m13, INTERP_SHIFT_PP
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m8, INTERP_SHIFT_SP
+    psrad           m9, INTERP_SHIFT_SP
+    psrad           m10, INTERP_SHIFT_SP
+    psrad           m11, INTERP_SHIFT_SP
+    psrad           m12, INTERP_SHIFT_SP
+    psrad           m13, INTERP_SHIFT_SP
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+%else
+    psrad           m8, INTERP_SHIFT_PS
+    psrad           m9, INTERP_SHIFT_PS
+    psrad           m10, INTERP_SHIFT_PS
+    psrad           m11, INTERP_SHIFT_PS
+    psrad           m12, INTERP_SHIFT_PS
+    psrad           m13, INTERP_SHIFT_PS
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m8, m9
+    packssdw        m10, m11
+    packssdw        m12, m13
+    packssdw        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+%ifidn %1,pp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%elifidn %1, sp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%endif
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    movu            [r2 + r3], xm13
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r6], xm1
+    lea             r2, [r2 + r3 * 4]
+    sub             r0, r7
+    dec             r8d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_8xN pp, 16
+FILTER_VER_LUMA_AVX2_8xN pp, 32
+FILTER_VER_LUMA_AVX2_8xN ps, 16
+FILTER_VER_LUMA_AVX2_8xN ps, 32
+FILTER_VER_LUMA_AVX2_8xN sp, 16
+FILTER_VER_LUMA_AVX2_8xN sp, 32
+FILTER_VER_LUMA_AVX2_8xN ss, 16
+FILTER_VER_LUMA_AVX2_8xN ss, 32
+
+%macro PROCESS_LUMA_AVX2_W8_8R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m8, [r5 + 1 * mmsize]
+    paddd           m6, m8
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm8, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm8, 1
+    pmaddwd         m8, m9, [r5 + 3 * mmsize]
+    paddd           m3, m8
+    pmaddwd         m8, m9, [r5 + 2 * mmsize]
+    paddd           m5, m8
+    pmaddwd         m9, [r5 + 1 * mmsize]
+    paddd           m7, m9
+    movu            xm8, [r7 + r4]                  ; m8 = row 11
+    punpckhwd       xm9, xm10, xm8
+    punpcklwd       xm10, xm8
+    vinserti128     m10, m10, xm9, 1
+    pmaddwd         m9, m10, [r5 + 3 * mmsize]
+    paddd           m4, m9
+    pmaddwd         m10, [r5 + 2 * mmsize]
+    paddd           m6, m10
+    lea             r7, [r7 + r1 * 4]
+    movu            xm9, [r7]                       ; m9 = row 12
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m5, m10
+    pmaddwd         m8, [r5 + 2 * mmsize]
+    paddd           m7, m8
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%else
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+    paddd           m4, m11
+    paddd           m5, m11
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    pxor            m8, m8
+%ifidn %1,pp
+    CLIPW           m0, m8, m12
+    CLIPW           m2, m8, m12
+    CLIPW           m4, m8, m12
+%elifidn %1, sp
+    CLIPW           m0, m8, m12
+    CLIPW           m2, m8, m12
+    CLIPW           m4, m8, m12
+%endif
+
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+
+    movu            xm10, [r7 + r1]                 ; m10 = row 13
+    punpckhwd       xm0, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm0, 1
+    pmaddwd         m9, [r5 + 3 * mmsize]
+    paddd           m6, m9
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm10, xm0
+    punpcklwd       xm10, xm0
+    vinserti128     m10, m10, xm1, 1
+    pmaddwd         m10, [r5 + 3 * mmsize]
+    paddd           m7, m10
+
+%ifidn %1,ss
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m6, m11
+    paddd           m7, m11
+%ifidn %1,pp
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m7, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m7, INTERP_SHIFT_SP
+%else
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m7, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m6, m7
+    vpermq          m6, m6, 11011000b
+%ifidn %1,pp
+    CLIPW           m6, m8, m12
+%elifidn %1, sp
+    CLIPW           m6, m8, m12
+%endif
+    vextracti128    xm7, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_Nx8 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_%2x8, 4, 10, 13
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m11, [pd_32]
+%elifidn %1, sp
+    mova            m11, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m11, [INTERP_OFFSET_PS]
+%endif
+    mova            m12, [pw_pixel_max]
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_LUMA_AVX2_W8_8R %1
+    add             r2, 16
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_Nx8 pp, 32
+FILTER_VER_LUMA_AVX2_Nx8 pp, 16
+FILTER_VER_LUMA_AVX2_Nx8 ps, 32
+FILTER_VER_LUMA_AVX2_Nx8 ps, 16
+FILTER_VER_LUMA_AVX2_Nx8 sp, 32
+FILTER_VER_LUMA_AVX2_Nx8 sp, 16
+FILTER_VER_LUMA_AVX2_Nx8 ss, 32
+FILTER_VER_LUMA_AVX2_Nx8 ss, 16
+
+%macro FILTER_VER_LUMA_AVX2_32x24 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_32x24, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, 4
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %1
+    add             r2, 16
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    lea             r9, [r1 * 4]
+    sub             r7, r9
+    lea             r0, [r7 - 48]
+    lea             r2, [r8 + r3 * 4 - 48]
+    mova            m11, m14
+    mova            m12, m3
+    mov             r9d, 4
+.loop:
+    PROCESS_LUMA_AVX2_W8_8R %1
+    add             r2, 16
+    add             r0, 16
+    dec             r9d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_32x24 pp
+FILTER_VER_LUMA_AVX2_32x24 ps
+FILTER_VER_LUMA_AVX2_32x24 sp
+FILTER_VER_LUMA_AVX2_32x24 ss
+
+%macro PROCESS_LUMA_AVX2_W8_4R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m4, m5, [r5 + 2 * mmsize]
+    paddd           m1, m4
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m3, m5
+    movu            xm4, [r0 + r4]                  ; m4 = row 7
+    punpckhwd       xm5, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m5, m6, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m6, [r5 + 2 * mmsize]
+    paddd           m2, m6
+    lea             r0, [r0 + r1 * 4]
+    movu            xm5, [r0]                       ; m5 = row 8
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 3 * mmsize]
+    paddd           m1, m6
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m3, m4
+    movu            xm6, [r0 + r1]                  ; m6 = row 9
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m5, [r5 + 3 * mmsize]
+    paddd           m2, m5
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 10
+    punpckhwd       xm5, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m3, m6
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%else
+    paddd           m0, m7
+    paddd           m1, m7
+    paddd           m2, m7
+    paddd           m3, m7
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    pxor            m4, m4
+%ifidn %1,pp
+    CLIPW           m0, m4, [pw_pixel_max]
+    CLIPW           m2, m4, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m0, m4, [pw_pixel_max]
+    CLIPW           m2, m4, [pw_pixel_max]
+%endif
+
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_16x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_16x4, 4, 7, 8, 0-gprsize
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    mov             dword [rsp], 2
+.loopW:
+    PROCESS_LUMA_AVX2_W8_4R %1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    lea             r6, [r3 * 3]
+    movu            [r2 + r6], xm3
+    add             r2, 16
+    lea             r6, [8 * r1 - 16]
+    sub             r0, r6
+    dec             dword [rsp]
+    jnz             .loopW
+    RET
+%endmacro
+
+FILTER_VER_LUMA_AVX2_16x4 pp
+FILTER_VER_LUMA_AVX2_16x4 ps
+FILTER_VER_LUMA_AVX2_16x4 sp
+FILTER_VER_LUMA_AVX2_16x4 ss
+
+%macro FILTER_VER_LUMA_AVX2_8x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_8x4, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+
+    PROCESS_LUMA_AVX2_W8_4R %1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    lea             r4, [r3 * 3]
+    movu            [r2 + r4], xm3
+    RET
+%endmacro
+
+FILTER_VER_LUMA_AVX2_8x4 pp
+FILTER_VER_LUMA_AVX2_8x4 ps
+FILTER_VER_LUMA_AVX2_8x4 sp
+FILTER_VER_LUMA_AVX2_8x4 ss
+
+%macro FILTER_VER_LUMA_AVX2_16x12 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x12, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    mova            m13, [pw_pixel_max]
+    pxor            m12, m12
+    lea             r6, [r3 * 3]
+    mov             r9d, 2
+.loopW:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%else
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+%ifidn %1,pp
+    CLIPW           m0, m12, m13
+    CLIPW           m2, m12, m13
+%elifidn %1, sp
+    CLIPW           m0, m12, m13
+    CLIPW           m2, m12, m13
+%endif
+
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhwd       xm0, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm0, 1
+    pmaddwd         m0, m10, [r5 + 3 * mmsize]
+    paddd           m4, m0
+    pmaddwd         m0, m10, [r5 + 2 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m10, [r5 + 1 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m10, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm0, [r7]                      ; m0 = row 12
+    punpckhwd       xm1, xm11, xm0
+    punpcklwd       xm11, xm0
+    vinserti128     m11, m11, xm1, 1
+    pmaddwd         m1, m11, [r5 + 3 * mmsize]
+    paddd           m5, m1
+    pmaddwd         m1, m11, [r5 + 2 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m11, [r5 + 1 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m11, [r5]
+    movu            xm2, [r7 + r1]                 ; m2 = row 13
+    punpckhwd       xm1, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm1, 1
+    pmaddwd         m1, m0, [r5 + 3 * mmsize]
+    paddd           m6, m1
+    pmaddwd         m1, m0, [r5 + 2 * mmsize]
+    paddd           m8, m1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm2, xm0
+    punpcklwd       xm2, xm0
+    vinserti128     m2, m2, xm1, 1
+    pmaddwd         m1, m2, [r5 + 3 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m2, [r5 + 2 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m11, m2
+
+%ifidn %1,ss
+    psrad           m4, 6
+    psrad           m5, 6
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m4, m14
+    paddd           m5, m14
+    paddd           m6, m14
+    paddd           m7, m14
+%ifidn %1,pp
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m7, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m7, INTERP_SHIFT_SP
+%else
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m7, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m4, m5
+    packssdw        m6, m7
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+%ifidn %1,pp
+    CLIPW           m4, m12, m13
+    CLIPW           m6, m12, m13
+%elifidn %1, sp
+    CLIPW           m4, m12, m13
+    CLIPW           m6, m12, m13
+%endif
+    lea             r8, [r2 + r3 * 4]
+    vextracti128    xm1, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m8, m2
+    pmaddwd         m0, [r5 + 2 * mmsize]
+    paddd           m10, m0
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m3, m1, [r5 + 3 * mmsize]
+    paddd           m9, m3
+    pmaddwd         m1, [r5 + 2 * mmsize]
+    paddd           m11, m1
+    movu            xm3, [r7 + r1]                  ; m3 = row 17
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m2, [r5 + 3 * mmsize]
+    paddd           m10, m2
+    movu            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddwd         m3, [r5 + 3 * mmsize]
+    paddd           m11, m3
+
+%ifidn %1,ss
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+%else
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+%ifidn %1,pp
+    psrad           m8, INTERP_SHIFT_PP
+    psrad           m9, INTERP_SHIFT_PP
+    psrad           m10, INTERP_SHIFT_PP
+    psrad           m11, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m8, INTERP_SHIFT_SP
+    psrad           m9, INTERP_SHIFT_SP
+    psrad           m10, INTERP_SHIFT_SP
+    psrad           m11, INTERP_SHIFT_SP
+%else
+    psrad           m8, INTERP_SHIFT_PS
+    psrad           m9, INTERP_SHIFT_PS
+    psrad           m10, INTERP_SHIFT_PS
+    psrad           m11, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m8, m9
+    packssdw        m10, m11
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+%ifidn %1,pp
+    CLIPW           m8, m12, m13
+    CLIPW           m10, m12, m13
+%elifidn %1, sp
+    CLIPW           m8, m12, m13
+    CLIPW           m10, m12, m13
+%endif
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm8
+    movu            [r8 + r3], xm9
+    movu            [r8 + r3 * 2], xm10
+    movu            [r8 + r6], xm11
+    add             r2, 16
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_16x12 pp
+FILTER_VER_LUMA_AVX2_16x12 ps
+FILTER_VER_LUMA_AVX2_16x12 sp
+FILTER_VER_LUMA_AVX2_16x12 ss
+
+%macro FILTER_VER_LUMA_AVX2_4x8 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x8, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m1, [r5 + 2 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [A 9 9 8]
+    pmaddwd         m3, m6, [r5 + 3 * mmsize]
+    paddd           m2, m3
+    pmaddwd         m3, m6, [r5 + 2 * mmsize]
+    paddd           m4, m3
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m2, 6
+%else
+    paddd           m0, m7
+    paddd           m2, m7
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m2
+    pxor            m6, m6
+    mova            m3, [pw_pixel_max]
+%ifidn %1,pp
+    CLIPW           m0, m6, m3
+%elifidn %1, sp
+    CLIPW           m0, m6, m3
+%endif
+
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+
+    movq            xm2, [r0 + r4]
+    punpcklwd       xm5, xm2
+    lea             r0, [r0 + 4 * r1]
+    movq            xm0, [r0]
+    punpcklwd       xm2, xm0
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [C B B A]
+    pmaddwd         m2, m5, [r5 + 3 * mmsize]
+    paddd           m4, m2
+    pmaddwd         m5, [r5 + 2 * mmsize]
+    paddd           m1, m5
+    movq            xm2, [r0 + r1]
+    punpcklwd       xm0, xm2
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm2, xm5
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [E D D C]
+    pmaddwd         m0, [r5 + 3 * mmsize]
+    paddd           m1, m0
+
+%ifidn %1,ss
+    psrad           m4, 6
+    psrad           m1, 6
+%else
+    paddd           m4, m7
+    paddd           m1, m7
+%ifidn %1,pp
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+%else
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m4, m1
+%ifidn %1,pp
+    CLIPW           m4, m6, m3
+%elifidn %1, sp
+    CLIPW           m4, m6, m3
+%endif
+
+    vextracti128    xm1, m4, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+    RET
+%endmacro
+
+FILTER_VER_LUMA_AVX2_4x8 pp
+FILTER_VER_LUMA_AVX2_4x8 ps
+FILTER_VER_LUMA_AVX2_4x8 sp
+FILTER_VER_LUMA_AVX2_4x8 ss
+
+%macro PROCESS_LUMA_AVX2_W4_16R 1
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m1, [r5 + 2 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [10 9 9 8]
+    pmaddwd         m3, m6, [r5 + 3 * mmsize]
+    paddd           m2, m3
+    pmaddwd         m3, m6, [r5 + 2 * mmsize]
+    paddd           m4, m3
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    paddd           m1, m3
+    pmaddwd         m6, [r5]
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m2, 6
+%else
+    paddd           m0, m7
+    paddd           m2, m7
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m2, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m2, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m2, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m2
+    pxor            m3, m3
+%ifidn %1,pp
+    CLIPW           m0, m3, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m0, m3, [pw_pixel_max]
+%endif
+
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+
+    movq            xm2, [r0 + r4]
+    punpcklwd       xm5, xm2
+    lea             r0, [r0 + 4 * r1]
+    movq            xm0, [r0]
+    punpcklwd       xm2, xm0
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [12 11 11 10]
+    pmaddwd         m2, m5, [r5 + 3 * mmsize]
+    paddd           m4, m2
+    pmaddwd         m2, m5, [r5 + 2 * mmsize]
+    paddd           m1, m2
+    pmaddwd         m2, m5, [r5 + 1 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m5, [r5]
+    movq            xm2, [r0 + r1]
+    punpcklwd       xm0, xm2
+    movq            xm3, [r0 + 2 * r1]
+    punpcklwd       xm2, xm3
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [14 13 13 12]
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m1, m2
+    pmaddwd         m2, m0, [r5 + 2 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m0, [r5]
+
+%ifidn %1,ss
+    psrad           m4, 6
+    psrad           m1, 6
+%else
+    paddd           m4, m7
+    paddd           m1, m7
+%ifidn %1,pp
+    psrad           m4, INTERP_SHIFT_PP
+    psrad           m1, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m4, INTERP_SHIFT_SP
+    psrad           m1, INTERP_SHIFT_SP
+%else
+    psrad           m4, INTERP_SHIFT_PS
+    psrad           m1, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m4, m1
+    pxor            m2, m2
+%ifidn %1,pp
+    CLIPW           m4, m2, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m4, m2, [pw_pixel_max]
+%endif
+
+    vextracti128    xm1, m4, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+
+    movq            xm4, [r0 + r4]
+    punpcklwd       xm3, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm1, [r0]
+    punpcklwd       xm4, xm1
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [16 15 15 14]
+    pmaddwd         m4, m3, [r5 + 3 * mmsize]
+    paddd           m6, m4
+    pmaddwd         m4, m3, [r5 + 2 * mmsize]
+    paddd           m5, m4
+    pmaddwd         m4, m3, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m3, [r5]
+    movq            xm4, [r0 + r1]
+    punpcklwd       xm1, xm4
+    movq            xm2, [r0 + 2 * r1]
+    punpcklwd       xm4, xm2
+    vinserti128     m1, m1, xm4, 1                  ; m1 = [18 17 17 16]
+    pmaddwd         m4, m1, [r5 + 3 * mmsize]
+    paddd           m5, m4
+    pmaddwd         m4, m1, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m3, m1
+
+%ifidn %1,ss
+    psrad           m6, 6
+    psrad           m5, 6
+%else
+    paddd           m6, m7
+    paddd           m5, m7
+%ifidn %1,pp
+    psrad           m6, INTERP_SHIFT_PP
+    psrad           m5, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m6, INTERP_SHIFT_SP
+    psrad           m5, INTERP_SHIFT_SP
+%else
+    psrad           m6, INTERP_SHIFT_PS
+    psrad           m5, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m6, m5
+    pxor            m1, m1
+%ifidn %1,pp
+    CLIPW           m6, m1, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m6, m1, [pw_pixel_max]
+%endif
+
+    vextracti128    xm5, m6, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm6
+    movq            [r2 + r3], xm5
+    movhps          [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm5
+
+    movq            xm4, [r0 + r4]
+    punpcklwd       xm2, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm4, xm6
+    vinserti128     m2, m2, xm4, 1                  ; m2 = [20 19 19 18]
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m3, m2
+    movq            xm4, [r0 + r1]
+    punpcklwd       xm6, xm4
+    movq            xm2, [r0 + 2 * r1]
+    punpcklwd       xm4, xm2
+    vinserti128     m6, m6, xm4, 1                  ; m6 = [22 21 21 20]
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m3, m6
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m3, 6
+%else
+    paddd           m0, m7
+    paddd           m3, m7
+%ifidn %1,pp
+    psrad           m0, INTERP_SHIFT_PP
+    psrad           m3, INTERP_SHIFT_PP
+%elifidn %1, sp
+    psrad           m0, INTERP_SHIFT_SP
+    psrad           m3, INTERP_SHIFT_SP
+%else
+    psrad           m0, INTERP_SHIFT_PS
+    psrad           m3, INTERP_SHIFT_PS
+%endif
+%endif
+
+    packssdw        m0, m3
+%ifidn %1,pp
+    CLIPW           m0, m1, [pw_pixel_max]
+%elifidn %1, sp
+    CLIPW           m0, m1, [pw_pixel_max]
+%endif
+
+    vextracti128    xm3, m0, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm3
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm3
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_4x16 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x16, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_LUMA_AVX2_W4_16R %1
+    RET
+%endmacro
+
+FILTER_VER_LUMA_AVX2_4x16 pp
+FILTER_VER_LUMA_AVX2_4x16 ps
+FILTER_VER_LUMA_AVX2_4x16 sp
+FILTER_VER_LUMA_AVX2_4x16 ss
+
+%macro FILTER_VER_LUMA_AVX2_12x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_12x16, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [INTERP_OFFSET_SP]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_LUMA_AVX2_W8_16R %1
+    add             r2, 16
+    add             r0, 16
+    mova            m7, m14
+    PROCESS_LUMA_AVX2_W4_16R %1
+    RET
+%endif
+%endmacro
+
+FILTER_VER_LUMA_AVX2_12x16 pp
+FILTER_VER_LUMA_AVX2_12x16 ps
+FILTER_VER_LUMA_AVX2_12x16 sp
+FILTER_VER_LUMA_AVX2_12x16 ss
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_%1x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_PS 2
+INIT_XMM sse4
+cglobal interp_8tap_vert_ps_%1x%2, 5, 7, 8 ,0-gprsize
+
+    add       r1d, r1d
+    add       r3d, r3d
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mova      m7, [INTERP_OFFSET_PS]
+
+    mov       dword [rsp], %2/4
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_LUMA_VER_W4_4R
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, INTERP_SHIFT_PS
+    psrad     m1, INTERP_SHIFT_PS
+    psrad     m2, INTERP_SHIFT_PS
+    psrad     m3, INTERP_SHIFT_PS
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    movh      [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [8 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+    RET
+%endmacro
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_%1x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_PS 4, 4
+    FILTER_VER_LUMA_PS 8, 8
+    FILTER_VER_LUMA_PS 8, 4
+    FILTER_VER_LUMA_PS 4, 8
+    FILTER_VER_LUMA_PS 16, 16
+    FILTER_VER_LUMA_PS 16, 8
+    FILTER_VER_LUMA_PS 8, 16
+    FILTER_VER_LUMA_PS 16, 12
+    FILTER_VER_LUMA_PS 12, 16
+    FILTER_VER_LUMA_PS 16, 4
+    FILTER_VER_LUMA_PS 4, 16
+    FILTER_VER_LUMA_PS 32, 32
+    FILTER_VER_LUMA_PS 32, 16
+    FILTER_VER_LUMA_PS 16, 32
+    FILTER_VER_LUMA_PS 32, 24
+    FILTER_VER_LUMA_PS 24, 32
+    FILTER_VER_LUMA_PS 32, 8
+    FILTER_VER_LUMA_PS 8, 32
+    FILTER_VER_LUMA_PS 64, 64
+    FILTER_VER_LUMA_PS 64, 32
+    FILTER_VER_LUMA_PS 32, 64
+    FILTER_VER_LUMA_PS 64, 48
+    FILTER_VER_LUMA_PS 48, 64
+    FILTER_VER_LUMA_PS 64, 16
+    FILTER_VER_LUMA_PS 16, 64
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_SP 2
+INIT_XMM sse4
+cglobal interp_8tap_vert_sp_%1x%2, 5, 7, 8 ,0-gprsize
+
+    add       r1d, r1d
+    add       r3d, r3d
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mova      m7, [INTERP_OFFSET_SP]
+
+    mov       dword [rsp], %2/4
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_LUMA_VER_W4_4R
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, INTERP_SHIFT_SP
+    psrad     m1, INTERP_SHIFT_SP
+    psrad     m2, INTERP_SHIFT_SP
+    psrad     m3, INTERP_SHIFT_SP
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    pxor      m1, m1
+    CLIPW2    m0, m2, m1, [pw_pixel_max]
+
+    movh      [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movh      [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [8 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+    RET
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_SP 4, 4
+    FILTER_VER_LUMA_SP 8, 8
+    FILTER_VER_LUMA_SP 8, 4
+    FILTER_VER_LUMA_SP 4, 8
+    FILTER_VER_LUMA_SP 16, 16
+    FILTER_VER_LUMA_SP 16, 8
+    FILTER_VER_LUMA_SP 8, 16
+    FILTER_VER_LUMA_SP 16, 12
+    FILTER_VER_LUMA_SP 12, 16
+    FILTER_VER_LUMA_SP 16, 4
+    FILTER_VER_LUMA_SP 4, 16
+    FILTER_VER_LUMA_SP 32, 32
+    FILTER_VER_LUMA_SP 32, 16
+    FILTER_VER_LUMA_SP 16, 32
+    FILTER_VER_LUMA_SP 32, 24
+    FILTER_VER_LUMA_SP 24, 32
+    FILTER_VER_LUMA_SP 32, 8
+    FILTER_VER_LUMA_SP 8, 32
+    FILTER_VER_LUMA_SP 64, 64
+    FILTER_VER_LUMA_SP 64, 32
+    FILTER_VER_LUMA_SP 32, 64
+    FILTER_VER_LUMA_SP 64, 48
+    FILTER_VER_LUMA_SP 48, 64
+    FILTER_VER_LUMA_SP 64, 16
+    FILTER_VER_LUMA_SP 16, 64
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ss_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_SS 2
+INIT_XMM sse2
+cglobal interp_8tap_vert_ss_%1x%2, 5, 7, 7 ,0-gprsize
+
+    add        r1d, r1d
+    add        r3d, r3d
+    lea        r5, [3 * r1]
+    sub        r0, r5
+    shl        r4d, 6
+
+%ifdef PIC
+    lea        r5, [tab_LumaCoeffV]
+    lea        r6, [r5 + r4]
+%else
+    lea        r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mov        dword [rsp], %2/4
+.loopH:
+    mov        r4d, (%1/4)
+.loopW:
+    PROCESS_LUMA_VER_W4_4R
+
+    psrad      m0, 6
+    psrad      m1, 6
+    packssdw   m0, m1
+    movlps     [r2], m0
+    movhps     [r2 + r3], m0
+
+    psrad      m2, 6
+    psrad      m3, 6
+    packssdw   m2, m3
+    movlps     [r2 + 2 * r3], m2
+    lea        r5, [3 * r3]
+    movhps     [r2 + r5], m2
+
+    lea        r5, [8 * r1 - 2 * 4]
+    sub        r0, r5
+    add        r2, 2 * 4
+
+    dec        r4d
+    jnz        .loopW
+
+    lea        r0, [r0 + 4 * r1 - 2 * %1]
+    lea        r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec        dword [rsp]
+    jnz        .loopH
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_SS 4, 4
+    FILTER_VER_LUMA_SS 8, 8
+    FILTER_VER_LUMA_SS 8, 4
+    FILTER_VER_LUMA_SS 4, 8
+    FILTER_VER_LUMA_SS 16, 16
+    FILTER_VER_LUMA_SS 16, 8
+    FILTER_VER_LUMA_SS 8, 16
+    FILTER_VER_LUMA_SS 16, 12
+    FILTER_VER_LUMA_SS 12, 16
+    FILTER_VER_LUMA_SS 16, 4
+    FILTER_VER_LUMA_SS 4, 16
+    FILTER_VER_LUMA_SS 32, 32
+    FILTER_VER_LUMA_SS 32, 16
+    FILTER_VER_LUMA_SS 16, 32
+    FILTER_VER_LUMA_SS 32, 24
+    FILTER_VER_LUMA_SS 24, 32
+    FILTER_VER_LUMA_SS 32, 8
+    FILTER_VER_LUMA_SS 8, 32
+    FILTER_VER_LUMA_SS 64, 64
+    FILTER_VER_LUMA_SS 64, 32
+    FILTER_VER_LUMA_SS 32, 64
+    FILTER_VER_LUMA_SS 64, 48
+    FILTER_VER_LUMA_SS 48, 64
+    FILTER_VER_LUMA_SS 64, 16
+    FILTER_VER_LUMA_SS 16, 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_2xN 1
+INIT_XMM sse4
+cglobal filterPixelToShort_2x%1, 3, 6, 2
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r1 * 3]
+    lea        r5, [r3 * 3]
+
+    ; load constant
+    mova       m1, [pw_2000]
+
+%rep %1/4
+    movd       m0, [r0]
+    movhps     m0, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+
+    movd       [r2 + r3 * 0], m0
+    pextrd     [r2 + r3 * 1], m0, 2
+
+    movd       m0, [r0 + r1 * 2]
+    movhps     m0, [r0 + r4]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+
+    movd       [r2 + r3 * 2], m0
+    pextrd     [r2 + r5], m0, 2
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endmacro
+P2S_H_2xN 4
+P2S_H_2xN 8
+P2S_H_2xN 16
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_4xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_4x%1, 3, 6, 2
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load constant
+    mova       m1, [pw_2000]
+
+%rep %1/4
+    movh       m0, [r0]
+    movhps     m0, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movh       [r2 + r3 * 0], m0
+    movhps     [r2 + r3 * 1], m0
+
+    movh       m0, [r0 + r1 * 2]
+    movhps     m0, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movh       [r2 + r3 * 2], m0
+    movhps     [r2 + r4], m0
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endmacro
+P2S_H_4xN 4
+P2S_H_4xN 8
+P2S_H_4xN 16
+P2S_H_4xN 32
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_4x2, 3, 4, 1
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+
+    movh       m0, [r0]
+    movhps     m0, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, [pw_2000]
+    movh       [r2 + r3 * 0], m0
+    movhps     [r2 + r3 * 1], m0
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_6xN 1
+INIT_XMM sse4
+cglobal filterPixelToShort_6x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movh       [r2 + r3 * 0], m0
+    pextrd     [r2 + r3 * 0 + 8], m0, 2
+    movh       [r2 + r3 * 1], m1
+    pextrd     [r2 + r3 * 1 + 8], m1, 2
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movh       [r2 + r3 * 2], m0
+    pextrd     [r2 + r3 * 2 + 8], m0, 2
+    movh       [r2 + r4], m1
+    pextrd     [r2 + r4 + 8], m1, 2
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_6xN 8
+P2S_H_6xN 16
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_8xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x%1, 3, 7, 2
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m1, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movu       [r2 + r3 * 0], m0
+
+    movu       m0, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movu       [r2 + r3 * 1], m0
+
+    movu       m0, [r0 + r1 * 2]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movu       [r2 + r3 * 2], m0
+
+    movu       m0, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m1
+    movu       [r2 + r4], m0
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_8xN 8
+P2S_H_8xN 4
+P2S_H_8xN 16
+P2S_H_8xN 32
+P2S_H_8xN 12
+P2S_H_8xN 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x2, 3, 4, 2
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, [pw_2000]
+    psubw      m1, [pw_2000]
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x6, 3, 7, 4
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r1 * 3]
+    lea        r5, [r1 * 5]
+    lea        r6, [r3 * 3]
+
+    ; load constant
+    mova       m3, [pw_2000]
+
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    movu       m2, [r0 + r1 * 2]
+
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m3
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m2, m3
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    movu       [r2 + r3 * 2], m2
+
+    movu       m0, [r0 + r4]
+    movu       m1, [r0 + r1 * 4]
+    movu       m2, [r0 + r5 ]
+
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m3
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m2, m3
+
+    movu       [r2 + r6], m0
+    movu       [r2 + r3 * 4], m1
+    lea        r2, [r2 + r3 * 4]
+    movu       [r2 + r3], m2
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_16xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_16x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r4], m1
+
+    movu       m0, [r0 + 16]
+    movu       m1, [r0 + r1 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0 + 16], m0
+    movu       [r2 + r3 * 1 + 16], m1
+
+    movu       m0, [r0 + r1 * 2 + 16]
+    movu       m1, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2 + 16], m0
+    movu       [r2 + r4 + 16], m1
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_16xN 16
+P2S_H_16xN 4
+P2S_H_16xN 8
+P2S_H_16xN 12
+P2S_H_16xN 32
+P2S_H_16xN 64
+P2S_H_16xN 24
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_16xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_16x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r4], m1
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_16xN_avx2 16
+P2S_H_16xN_avx2 4
+P2S_H_16xN_avx2 8
+P2S_H_16xN_avx2 12
+P2S_H_16xN_avx2 32
+P2S_H_16xN_avx2 64
+P2S_H_16xN_avx2 24
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_32xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_32x%1, 3, 7, 5
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m4, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    movu       m2, [r0 + r1 * 2]
+    movu       m3, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    movu       [r2 + r3 * 2], m2
+    movu       [r2 + r4], m3
+
+    movu       m0, [r0 + 16]
+    movu       m1, [r0 + r1 + 16]
+    movu       m2, [r0 + r1 * 2 + 16]
+    movu       m3, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 16], m0
+    movu       [r2 + r3 * 1 + 16], m1
+    movu       [r2 + r3 * 2 + 16], m2
+    movu       [r2 + r4 + 16], m3
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    movu       m2, [r0 + r1 * 2 + 32]
+    movu       m3, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+    movu       [r2 + r3 * 2 + 32], m2
+    movu       [r2 + r4 + 32], m3
+
+    movu       m0, [r0 + 48]
+    movu       m1, [r0 + r1 + 48]
+    movu       m2, [r0 + r1 * 2 + 48]
+    movu       m3, [r0 + r5 + 48]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 48], m0
+    movu       [r2 + r3 * 1 + 48], m1
+    movu       [r2 + r3 * 2 + 48], m2
+    movu       [r2 + r4 + 48], m3
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_32xN 32
+P2S_H_32xN 8
+P2S_H_32xN 16
+P2S_H_32xN 24
+P2S_H_32xN 64
+P2S_H_32xN 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_32xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_32x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r4], m1
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+
+    movu       m0, [r0 + r1 * 2 + 32]
+    movu       m1, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2 + 32], m0
+    movu       [r2 + r4 + 32], m1
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_32xN_avx2 32
+P2S_H_32xN_avx2 8
+P2S_H_32xN_avx2 16
+P2S_H_32xN_avx2 24
+P2S_H_32xN_avx2 64
+P2S_H_32xN_avx2 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_64xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_64x%1, 3, 7, 5
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m4, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    movu       m2, [r0 + r1 * 2]
+    movu       m3, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    movu       [r2 + r3 * 2], m2
+    movu       [r2 + r4], m3
+
+    movu       m0, [r0 + 16]
+    movu       m1, [r0 + r1 + 16]
+    movu       m2, [r0 + r1 * 2 + 16]
+    movu       m3, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 16], m0
+    movu       [r2 + r3 * 1 + 16], m1
+    movu       [r2 + r3 * 2 + 16], m2
+    movu       [r2 + r4 + 16], m3
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    movu       m2, [r0 + r1 * 2 + 32]
+    movu       m3, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+    movu       [r2 + r3 * 2 + 32], m2
+    movu       [r2 + r4 + 32], m3
+
+    movu       m0, [r0 + 48]
+    movu       m1, [r0 + r1 + 48]
+    movu       m2, [r0 + r1 * 2 + 48]
+    movu       m3, [r0 + r5 + 48]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 48], m0
+    movu       [r2 + r3 * 1 + 48], m1
+    movu       [r2 + r3 * 2 + 48], m2
+    movu       [r2 + r4 + 48], m3
+
+    movu       m0, [r0 + 64]
+    movu       m1, [r0 + r1 + 64]
+    movu       m2, [r0 + r1 * 2 + 64]
+    movu       m3, [r0 + r5 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 64], m0
+    movu       [r2 + r3 * 1 + 64], m1
+    movu       [r2 + r3 * 2 + 64], m2
+    movu       [r2 + r4 + 64], m3
+
+    movu       m0, [r0 + 80]
+    movu       m1, [r0 + r1 + 80]
+    movu       m2, [r0 + r1 * 2 + 80]
+    movu       m3, [r0 + r5 + 80]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 80], m0
+    movu       [r2 + r3 * 1 + 80], m1
+    movu       [r2 + r3 * 2 + 80], m2
+    movu       [r2 + r4 + 80], m3
+
+    movu       m0, [r0 + 96]
+    movu       m1, [r0 + r1 + 96]
+    movu       m2, [r0 + r1 * 2 + 96]
+    movu       m3, [r0 + r5 + 96]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 96], m0
+    movu       [r2 + r3 * 1 + 96], m1
+    movu       [r2 + r3 * 2 + 96], m2
+    movu       [r2 + r4 + 96], m3
+
+    movu       m0, [r0 + 112]
+    movu       m1, [r0 + r1 + 112]
+    movu       m2, [r0 + r1 * 2 + 112]
+    movu       m3, [r0 + r5 + 112]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 112], m0
+    movu       [r2 + r3 * 1 + 112], m1
+    movu       [r2 + r3 * 2 + 112], m2
+    movu       [r2 + r4 + 112], m3
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_64xN 64
+P2S_H_64xN 16
+P2S_H_64xN 32
+P2S_H_64xN 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_64xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_64x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r4], m1
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+
+    movu       m0, [r0 + r1 * 2 + 32]
+    movu       m1, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2 + 32], m0
+    movu       [r2 + r4 + 32], m1
+
+    movu       m0, [r0 + 64]
+    movu       m1, [r0 + r1 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0 + 64], m0
+    movu       [r2 + r3 * 1 + 64], m1
+
+    movu       m0, [r0 + r1 * 2 + 64]
+    movu       m1, [r0 + r5 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2 + 64], m0
+    movu       [r2 + r4 + 64], m1
+
+    movu       m0, [r0 + 96]
+    movu       m1, [r0 + r1 + 96]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0 + 96], m0
+    movu       [r2 + r3 * 1 + 96], m1
+
+    movu       m0, [r0 + r1 * 2 + 96]
+    movu       m1, [r0 + r5 + 96]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2 + 96], m0
+    movu       [r2 + r4 + 96], m1
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_64xN_avx2 64
+P2S_H_64xN_avx2 16
+P2S_H_64xN_avx2 32
+P2S_H_64xN_avx2 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_24xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_24x%1, 3, 7, 5
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m4, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    movu       m2, [r0 + r1 * 2]
+    movu       m3, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    movu       [r2 + r3 * 2], m2
+    movu       [r2 + r4], m3
+
+    movu       m0, [r0 + 16]
+    movu       m1, [r0 + r1 + 16]
+    movu       m2, [r0 + r1 * 2 + 16]
+    movu       m3, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 16], m0
+    movu       [r2 + r3 * 1 + 16], m1
+    movu       [r2 + r3 * 2 + 16], m2
+    movu       [r2 + r4 + 16], m3
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    movu       m2, [r0 + r1 * 2 + 32]
+    movu       m3, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+    movu       [r2 + r3 * 2 + 32], m2
+    movu       [r2 + r4 + 32], m3
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_24xN 32
+P2S_H_24xN 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_24xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_24x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 0 + 32], xm1
+
+    movu       m0, [r0 + r1]
+    movu       m1, [r0 + r1 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+    movu       [r2 + r3 * 1], m0
+    movu       [r2 + r3 * 1 + 32], xm1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r1 * 2 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r3 * 2 + 32], xm1
+
+    movu       m0, [r0 + r5]
+    movu       m1, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+    movu       [r2 + r4], m0
+    movu       [r2 + r4 + 32], xm1
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_24xN_avx2 32
+P2S_H_24xN_avx2 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_12xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_12x%1, 3, 7, 3
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, %1/4
+
+    ; load constant
+    mova       m2, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psubw      m0, m2
+    psubw      m1, m2
+
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r4], m1
+
+    movh       m0, [r0 + 16]
+    movhps     m0, [r0 + r1 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+
+    movh       [r2 + r3 * 0 + 16], m0
+    movhps     [r2 + r3 * 1 + 16], m0
+
+    movh       m0, [r0 + r1 * 2 + 16]
+    movhps     m0, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psubw      m0, m2
+
+    movh       [r2 + r3 * 2 + 16], m0
+    movhps     [r2 + r4 + 16], m0
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+%endmacro
+P2S_H_12xN 16
+P2S_H_12xN 32
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_48x64, 3, 7, 5
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, 16
+
+    ; load constant
+    mova       m4, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + r1]
+    movu       m2, [r0 + r1 * 2]
+    movu       m3, [r0 + r5]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 1], m1
+    movu       [r2 + r3 * 2], m2
+    movu       [r2 + r4], m3
+
+    movu       m0, [r0 + 16]
+    movu       m1, [r0 + r1 + 16]
+    movu       m2, [r0 + r1 * 2 + 16]
+    movu       m3, [r0 + r5 + 16]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 16], m0
+    movu       [r2 + r3 * 1 + 16], m1
+    movu       [r2 + r3 * 2 + 16], m2
+    movu       [r2 + r4 + 16], m3
+
+    movu       m0, [r0 + 32]
+    movu       m1, [r0 + r1 + 32]
+    movu       m2, [r0 + r1 * 2 + 32]
+    movu       m3, [r0 + r5 + 32]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 32], m0
+    movu       [r2 + r3 * 1 + 32], m1
+    movu       [r2 + r3 * 2 + 32], m2
+    movu       [r2 + r4 + 32], m3
+
+    movu       m0, [r0 + 48]
+    movu       m1, [r0 + r1 + 48]
+    movu       m2, [r0 + r1 * 2 + 48]
+    movu       m3, [r0 + r5 + 48]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 48], m0
+    movu       [r2 + r3 * 1 + 48], m1
+    movu       [r2 + r3 * 2 + 48], m2
+    movu       [r2 + r4 + 48], m3
+
+    movu       m0, [r0 + 64]
+    movu       m1, [r0 + r1 + 64]
+    movu       m2, [r0 + r1 * 2 + 64]
+    movu       m3, [r0 + r5 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 64], m0
+    movu       [r2 + r3 * 1 + 64], m1
+    movu       [r2 + r3 * 2 + 64], m2
+    movu       [r2 + r4 + 64], m3
+
+    movu       m0, [r0 + 80]
+    movu       m1, [r0 + r1 + 80]
+    movu       m2, [r0 + r1 * 2 + 80]
+    movu       m3, [r0 + r5 + 80]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psllw      m3, (14 - BIT_DEPTH)
+    psubw      m0, m4
+    psubw      m1, m4
+    psubw      m2, m4
+    psubw      m3, m4
+
+    movu       [r2 + r3 * 0 + 80], m0
+    movu       [r2 + r3 * 1 + 80], m1
+    movu       [r2 + r3 * 2 + 80], m2
+    movu       [r2 + r4 + 80], m3
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal filterPixelToShort_48x64, 3, 7, 4
+    add        r1d, r1d
+    mov        r3d, r3m
+    add        r3d, r3d
+    lea        r4, [r3 * 3]
+    lea        r5, [r1 * 3]
+
+    ; load height
+    mov        r6d, 16
+
+    ; load constant
+    mova       m3, [pw_2000]
+
+.loop
+    movu       m0, [r0]
+    movu       m1, [r0 + 32]
+    movu       m2, [r0 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psubw      m1, m3
+    psubw      m2, m3
+    movu       [r2 + r3 * 0], m0
+    movu       [r2 + r3 * 0 + 32], m1
+    movu       [r2 + r3 * 0 + 64], m2
+
+    movu       m0, [r0 + r1]
+    movu       m1, [r0 + r1 + 32]
+    movu       m2, [r0 + r1 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psubw      m1, m3
+    psubw      m2, m3
+    movu       [r2 + r3 * 1], m0
+    movu       [r2 + r3 * 1 + 32], m1
+    movu       [r2 + r3 * 1 + 64], m2
+
+    movu       m0, [r0 + r1 * 2]
+    movu       m1, [r0 + r1 * 2 + 32]
+    movu       m2, [r0 + r1 * 2 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psubw      m1, m3
+    psubw      m2, m3
+    movu       [r2 + r3 * 2], m0
+    movu       [r2 + r3 * 2 + 32], m1
+    movu       [r2 + r3 * 2 + 64], m2
+
+    movu       m0, [r0 + r5]
+    movu       m1, [r0 + r5 + 32]
+    movu       m2, [r0 + r5 + 64]
+    psllw      m0, (14 - BIT_DEPTH)
+    psllw      m1, (14 - BIT_DEPTH)
+    psllw      m2, (14 - BIT_DEPTH)
+    psubw      m0, m3
+    psubw      m1, m3
+    psubw      m2, m3
+    movu       [r2 + r4], m0
+    movu       [r2 + r4 + 32], m1
+    movu       [r2 + r4 + 64], m2
+
+    lea        r0, [r0 + r1 * 4]
+    lea        r2, [r2 + r3 * 4]
+
+    dec        r6d
+    jnz        .loop
+    RET
+
+
+;-----------------------------------------------------------------------------------------------------------------------------
+;void interp_horiz_ps_c(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+
+%macro IPFILTER_LUMA_PS_4xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_4x%1, 6,8,7
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+    add                         r1d,               r1d
+    add                         r3d,               r3d
+
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    lea                         r4,                [r4 * 8]
+    vbroadcasti128              m0,                [r6 + r4 * 2]
+%else
+    lea                         r4,                [r4 * 8]
+    vbroadcasti128              m0,                [tab_LumaCoeff + r4 * 2]
+%endif
+
+    vbroadcasti128              m2,                [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - pw_2000
+
+    sub                         r0,                6
+    test                        r5d,               r5d
+    mov                         r7d,               %1                                    ; loop count variable - height
+    jz                         .preloop
+    lea                         r6,                [r1 * 3]                              ; r6 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6                                    ; r0(src) - 3 * srcStride
+    add                         r7d,               6                                     ;7 - 1(since last row not in loop)                            ; need extra 7 rows, just set a specially flag here, blkheight += N - 1  (7 - 3 = 4 ; since the last three rows not in loop)
+
+.preloop:
+    lea                         r6,                [r3 * 3]
+.loop
+    ; Row 0
+    movu                        xm3,                [r0]                                 ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    movu                        xm4,                [r0 + 2]                             ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128                 m3,                 m3,                xm4,       1
+    movu                        xm4,                [r0 + 4]
+    movu                        xm5,                [r0 + 6]
+    vinserti128                 m4,                 m4,                xm5,       1
+    pmaddwd                     m3,                m0
+    pmaddwd                     m4,                m0
+    phaddd                      m3,                m4                                    ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    ; Row 1
+    movu                        xm4,                [r0 + r1]                            ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    movu                        xm5,                [r0 + r1 + 2]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128                 m4,                 m4,                xm5,       1
+    movu                        xm5,                [r0 + r1 + 4]
+    movu                        xm6,                [r0 + r1 + 6]
+    vinserti128                 m5,                 m5,                xm6,       1
+    pmaddwd                     m4,                m0
+    pmaddwd                     m5,                m0
+    phaddd                      m4,                m5                                     ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+    phaddd                      m3,                m4                                     ; all rows and col completed.
+
+    mova                        m5,                [interp8_hps_shuf]
+    vpermd                      m3,                m5,                  m3
+    paddd                       m3,                m2
+    vextracti128                xm4,               m3,                  1
+    psrad                       xm3,               2
+    psrad                       xm4,               2
+    packssdw                    xm3,               xm3
+    packssdw                    xm4,               xm4
+
+    movq                        [r2],              xm3                                   ;row 0
+    movq                        [r2 + r3],         xm4                                   ;row 1
+    lea                         r0,                [r0 + r1 * 2]                         ; first loop src ->5th row(i.e 4)
+    lea                         r2,                [r2 + r3 * 2]                         ; first loop dst ->5th row(i.e 4)
+
+    sub                         r7d,               2
+    jg                          .loop
+    test                        r5d,               r5d
+    jz                          .end
+
+    ; Row 10
+    movu                        xm3,                [r0]
+    movu                        xm4,                [r0 + 2]
+    vinserti128                 m3,                 m3,                 xm4,      1
+    movu                        xm4,                [r0 + 4]
+    movu                        xm5,                [r0 + 6]
+    vinserti128                 m4,                 m4,                 xm5,      1
+    pmaddwd                     m3,                m0
+    pmaddwd                     m4,                m0
+    phaddd                      m3,                m4
+
+    ; Row11
+    phaddd                      m3,                m4                                    ; all rows and col completed.
+
+    mova                        m5,                [interp8_hps_shuf]
+    vpermd                      m3,                m5,                  m3
+    paddd                       m3,                m2
+    vextracti128                xm4,               m3,                  1
+    psrad                       xm3,               2
+    psrad                       xm4,               2
+    packssdw                    xm3,               xm3
+    packssdw                    xm4,               xm4
+
+    movq                        [r2],              xm3                                   ;row 0
+.end
+    RET
+%endif
+%endmacro
+
+    IPFILTER_LUMA_PS_4xN_AVX2 4
+    IPFILTER_LUMA_PS_4xN_AVX2 8
+    IPFILTER_LUMA_PS_4xN_AVX2 16
+
+%macro IPFILTER_LUMA_PS_8xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_8x%1, 4, 6, 8
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+    shl                 r4d, 4
+%ifdef PIC
+    lea                 r6, [tab_LumaCoeff]
+    vpbroadcastq        m0, [r6 + r4]
+    vpbroadcastq        m1, [r6 + r4 + 8]
+%else
+    vpbroadcastq        m0, [tab_LumaCoeff + r4]
+    vpbroadcastq        m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 6
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    lea                 r6, [r1*3]
+    sub                 r0, r6
+    add                 r4d, 7
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m7, m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m7, m1
+    paddd               m4, m7
+
+    vbroadcasti128      m6, [r0 + 16]
+    pshufb              m5, m3
+    pshufb              m6, m3
+    pmaddwd             m5, m0
+    pmaddwd             m6, m1
+    paddd               m5, m6
+
+    phaddd              m4, m5
+    vpermq              m4, m4, q3120
+    paddd               m4, m2
+    vextracti128        xm5,m4, 1
+    psrad               xm4, 2
+    psrad               xm5, 2
+    packssdw            xm4, xm5
+
+    movu                [r2], xm4
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+    IPFILTER_LUMA_PS_8xN_AVX2 4
+    IPFILTER_LUMA_PS_8xN_AVX2 8
+    IPFILTER_LUMA_PS_8xN_AVX2 16
+    IPFILTER_LUMA_PS_8xN_AVX2 32
+
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_24x32, 4, 6, 8
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+    shl                 r4d, 4
+%ifdef PIC
+    lea                 r6, [tab_LumaCoeff]
+    vpbroadcastq        m0, [r6 + r4]
+    vpbroadcastq        m1, [r6 + r4 + 8]
+%else
+    vpbroadcastq        m0, [tab_LumaCoeff + r4]
+    vpbroadcastq        m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 6
+    test                r5d, r5d
+    mov                 r4d, 32
+    jz                  .loop0
+    lea                 r6, [r1*3]
+    sub                 r0, r6
+    add                 r4d, 7
+
+.loop0:
+%assign x 0
+%rep 24/8
+    vbroadcasti128      m4, [r0 + x]
+    vbroadcasti128      m5, [r0 + 8 + x]
+    pshufb              m4, m3
+    pshufb              m7, m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m7, m1
+    paddd               m4, m7
+
+    vbroadcasti128      m6, [r0 + 16 + x]
+    pshufb              m5, m3
+    pshufb              m6, m3
+    pmaddwd             m5, m0
+    pmaddwd             m6, m1
+    paddd               m5, m6
+
+    phaddd              m4, m5
+    vpermq              m4, m4, q3120
+    paddd               m4, m2
+    vextracti128        xm5,m4, 1
+    psrad               xm4, 2
+    psrad               xm5, 2
+    packssdw            xm4, xm5
+
+    movu                [r2 + x], xm4
+    %assign x x+16
+    %endrep
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+
+
+%macro IPFILTER_LUMA_PS_32_64_AVX2 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_%1x%2, 4, 6, 8
+
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+    shl                 r4d, 4
+%ifdef PIC
+    lea                 r6, [tab_LumaCoeff]
+    vpbroadcastq        m0, [r6 + r4]
+    vpbroadcastq        m1, [r6 + r4 + 8]
+%else
+    vpbroadcastq        m0, [tab_LumaCoeff + r4]
+    vpbroadcastq        m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 6
+    test                r5d, r5d
+    mov                 r4d, %2
+    jz                  .loop0
+    lea                 r6, [r1*3]
+    sub                 r0, r6
+    add                 r4d, 7
+
+.loop0:
+%assign x 0
+%rep %1/16
+    vbroadcasti128      m4, [r0 + x]
+    vbroadcasti128      m5, [r0 + 8 + x]
+    pshufb              m4, m3
+    pshufb              m7, m5, m3
+
+    pmaddwd             m4, m0
+    pmaddwd             m7, m1
+    paddd               m4, m7
+
+    vbroadcasti128      m6, [r0 + 16 + x]
+    pshufb              m5, m3
+    pshufb              m7, m6, m3
+
+    pmaddwd             m5, m0
+    pmaddwd             m7, m1
+    paddd               m5, m7
+
+    phaddd              m4, m5
+    vpermq              m4, m4, q3120
+    paddd               m4, m2
+    vextracti128        xm5,m4, 1
+    psrad               xm4, 2
+    psrad               xm5, 2
+    packssdw            xm4, xm5
+
+    movu                [r2 + x], xm4
+
+    vbroadcasti128      m5, [r0 + 24 + x]
+    pshufb              m6, m3
+    pshufb              m7, m5, m3
+
+    pmaddwd             m6, m0
+    pmaddwd             m7, m1
+    paddd               m6, m7
+
+    vbroadcasti128      m7, [r0 + 32 + x]
+    pshufb              m5, m3
+    pshufb              m7, m3
+
+    pmaddwd             m5, m0
+    pmaddwd             m7, m1
+    paddd               m5, m7
+
+    phaddd              m6, m5
+    vpermq              m6, m6, q3120
+    paddd               m6, m2
+    vextracti128        xm5,m6, 1
+    psrad               xm6, 2
+    psrad               xm5, 2
+    packssdw            xm6, xm5
+
+    movu                [r2 + 16 + x], xm6
+    %assign x x+32
+    %endrep
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+    IPFILTER_LUMA_PS_32_64_AVX2 32, 8
+    IPFILTER_LUMA_PS_32_64_AVX2 32, 16
+    IPFILTER_LUMA_PS_32_64_AVX2 32, 24
+    IPFILTER_LUMA_PS_32_64_AVX2 32, 32
+    IPFILTER_LUMA_PS_32_64_AVX2 32, 64
+
+    IPFILTER_LUMA_PS_32_64_AVX2 64, 16
+    IPFILTER_LUMA_PS_32_64_AVX2 64, 32
+    IPFILTER_LUMA_PS_32_64_AVX2 64, 48
+    IPFILTER_LUMA_PS_32_64_AVX2 64, 64
+
+    IPFILTER_LUMA_PS_32_64_AVX2 48, 64
+
+%macro IPFILTER_LUMA_PS_16xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_16x%1, 4, 6, 8
+
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+    shl                 r4d, 4
+%ifdef PIC
+    lea                 r6, [tab_LumaCoeff]
+    vpbroadcastq        m0, [r6 + r4]
+    vpbroadcastq        m1, [r6 + r4 + 8]
+%else
+    vpbroadcastq        m0, [tab_LumaCoeff + r4]
+    vpbroadcastq        m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 6
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    lea                 r6, [r1*3]
+    sub                 r0, r6
+    add                 r4d, 7
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m7, m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m7, m1
+    paddd               m4, m7
+
+    vbroadcasti128      m6, [r0 + 16]
+    pshufb              m5, m3
+    pshufb              m7, m6, m3
+    pmaddwd             m5, m0
+    pmaddwd             m7, m1
+    paddd               m5, m7
+
+    phaddd              m4, m5
+    vpermq              m4, m4, q3120
+    paddd               m4, m2
+    vextracti128        xm5, m4, 1
+    psrad               xm4, 2
+    psrad               xm5, 2
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m6, m3
+    pshufb              m7, m5, m3
+    pmaddwd             m6, m0
+    pmaddwd             m7, m1
+    paddd               m6, m7
+
+    vbroadcasti128      m7, [r0 + 32]
+    pshufb              m5, m3
+    pshufb              m7, m3
+    pmaddwd             m5, m0
+    pmaddwd             m7, m1
+    paddd               m5, m7
+
+    phaddd              m6, m5
+    vpermq              m6, m6, q3120
+    paddd               m6, m2
+    vextracti128        xm5,m6, 1
+    psrad               xm6, 2
+    psrad               xm5, 2
+    packssdw            xm6, xm5
+    movu                [r2 + 16], xm6
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+    IPFILTER_LUMA_PS_16xN_AVX2 4
+    IPFILTER_LUMA_PS_16xN_AVX2 8
+    IPFILTER_LUMA_PS_16xN_AVX2 12
+    IPFILTER_LUMA_PS_16xN_AVX2 16
+    IPFILTER_LUMA_PS_16xN_AVX2 32
+    IPFILTER_LUMA_PS_16xN_AVX2 64
+
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_12x16, 4, 6, 8
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+    shl                 r4d, 4
+%ifdef PIC
+    lea                 r6, [tab_LumaCoeff]
+    vpbroadcastq        m0, [r6 + r4]
+    vpbroadcastq        m1, [r6 + r4 + 8]
+%else
+    vpbroadcastq        m0, [tab_LumaCoeff + r4]
+    vpbroadcastq        m1, [tab_LumaCoeff + r4 + 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 6
+    test                r5d, r5d
+    mov                 r4d, 16
+    jz                  .loop0
+    lea                 r6, [r1*3]
+    sub                 r0, r6
+    add                 r4d, 7
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m7, m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m7, m1
+    paddd               m4, m7
+
+    vbroadcasti128      m6, [r0 + 16]
+    pshufb              m5, m3
+    pshufb              m7, m6, m3
+    pmaddwd             m5, m0
+    pmaddwd             m7, m1
+    paddd               m5, m7
+
+    phaddd              m4, m5
+    vpermq              m4, m4, q3120
+    paddd               m4, m2
+    vextracti128        xm5,m4, 1
+    psrad               xm4, 2
+    psrad               xm5, 2
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m6, m3
+    pshufb              m5, m3
+    pmaddwd             m6, m0
+    pmaddwd             m5, m1
+    paddd               m6, m5
+
+    phaddd              m6, m6
+    vpermq              m6, m6, q3120
+    paddd               xm6, xm2
+    psrad               xm6, 2
+    packssdw            xm6, xm6
+    movq                [r2 + 16], xm6
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+
+%macro IPFILTER_CHROMA_PS_8xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_8x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+    IPFILTER_CHROMA_PS_8xN_AVX2 4
+    IPFILTER_CHROMA_PS_8xN_AVX2 8
+    IPFILTER_CHROMA_PS_8xN_AVX2 16
+    IPFILTER_CHROMA_PS_8xN_AVX2 32
+    IPFILTER_CHROMA_PS_8xN_AVX2 6
+    IPFILTER_CHROMA_PS_8xN_AVX2 2
+    IPFILTER_CHROMA_PS_8xN_AVX2 12
+    IPFILTER_CHROMA_PS_8xN_AVX2 64
+
+%macro IPFILTER_CHROMA_PS_16xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_16x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 16], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+IPFILTER_CHROMA_PS_16xN_AVX2 16
+IPFILTER_CHROMA_PS_16xN_AVX2 8
+IPFILTER_CHROMA_PS_16xN_AVX2 32
+IPFILTER_CHROMA_PS_16xN_AVX2 12
+IPFILTER_CHROMA_PS_16xN_AVX2 4
+IPFILTER_CHROMA_PS_16xN_AVX2 64
+IPFILTER_CHROMA_PS_16xN_AVX2 24
+
+%macro IPFILTER_CHROMA_PS_24xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_24x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 16], xm4
+
+    vbroadcasti128      m4, [r0 + 32]
+    vbroadcasti128      m5, [r0 + 40]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 32], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+IPFILTER_CHROMA_PS_24xN_AVX2 32
+IPFILTER_CHROMA_PS_24xN_AVX2 64
+
+%macro IPFILTER_CHROMA_PS_12xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_12x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    pshufb              m4, m3
+    pmaddwd             m4, m0
+    phaddd              m4, m4
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movq                [r2 + 16], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+IPFILTER_CHROMA_PS_12xN_AVX2 16
+IPFILTER_CHROMA_PS_12xN_AVX2 32
+
+%macro IPFILTER_CHROMA_PS_32xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_32x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 16], xm4
+
+    vbroadcasti128      m4, [r0 + 32]
+    vbroadcasti128      m5, [r0 + 40]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 32], xm4
+
+    vbroadcasti128      m4, [r0 + 48]
+    vbroadcasti128      m5, [r0 + 56]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 48], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+IPFILTER_CHROMA_PS_32xN_AVX2 32
+IPFILTER_CHROMA_PS_32xN_AVX2 16
+IPFILTER_CHROMA_PS_32xN_AVX2 24
+IPFILTER_CHROMA_PS_32xN_AVX2 8
+IPFILTER_CHROMA_PS_32xN_AVX2 64
+IPFILTER_CHROMA_PS_32xN_AVX2 48
+
+
+%macro IPFILTER_CHROMA_PS_64xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_64x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 16], xm4
+
+    vbroadcasti128      m4, [r0 + 32]
+    vbroadcasti128      m5, [r0 + 40]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 32], xm4
+
+    vbroadcasti128      m4, [r0 + 48]
+    vbroadcasti128      m5, [r0 + 56]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 48], xm4
+
+    vbroadcasti128      m4, [r0 + 64]
+    vbroadcasti128      m5, [r0 + 72]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 64], xm4
+
+    vbroadcasti128      m4, [r0 + 80]
+    vbroadcasti128      m5, [r0 + 88]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 80], xm4
+
+    vbroadcasti128      m4, [r0 + 96]
+    vbroadcasti128      m5, [r0 + 104]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 96], xm4
+
+    vbroadcasti128      m4, [r0 + 112]
+    vbroadcasti128      m5, [r0 + 120]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 112], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+IPFILTER_CHROMA_PS_64xN_AVX2 64
+IPFILTER_CHROMA_PS_64xN_AVX2 48
+IPFILTER_CHROMA_PS_64xN_AVX2 32
+IPFILTER_CHROMA_PS_64xN_AVX2 16
+
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_48x64, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, 64
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2], xm4
+
+    vbroadcasti128      m4, [r0 + 16]
+    vbroadcasti128      m5, [r0 + 24]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 16], xm4
+
+    vbroadcasti128      m4, [r0 + 32]
+    vbroadcasti128      m5, [r0 + 40]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 32], xm4
+
+    vbroadcasti128      m4, [r0 + 48]
+    vbroadcasti128      m5, [r0 + 56]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 48], xm4
+
+    vbroadcasti128      m4, [r0 + 64]
+    vbroadcasti128      m5, [r0 + 72]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 64], xm4
+
+    vbroadcasti128      m4, [r0 + 80]
+    vbroadcasti128      m5, [r0 + 88]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movu                [r2 + 80], xm4
+
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+
+%macro IPFILTER_CHROMA_PS_6xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_horiz_ps_6x%1, 4, 7, 6
+    add                 r1d, r1d
+    add                 r3d, r3d
+    mov                 r4d, r4m
+    mov                 r5d, r5m
+
+%ifdef PIC
+    lea                 r6, [tab_ChromaCoeff]
+    vpbroadcastq        m0, [r6 + r4 * 8]
+%else
+    vpbroadcastq        m0, [tab_ChromaCoeff + r4 * 8]
+%endif
+    mova                m3, [pb_shuf]
+    vbroadcasti128      m2, [INTERP_OFFSET_PS]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+
+    sub                 r0, 2
+    test                r5d, r5d
+    mov                 r4d, %1
+    jz                  .loop0
+    sub                 r0, r1
+    add                 r4d, 3
+
+.loop0:
+    vbroadcasti128      m4, [r0]
+    vbroadcasti128      m5, [r0 + 8]
+    pshufb              m4, m3
+    pshufb              m5, m3
+    pmaddwd             m4, m0
+    pmaddwd             m5, m0
+    phaddd              m4, m5
+    paddd               m4, m2
+    vpermq              m4, m4, q3120
+    psrad               m4, 2
+    vextracti128        xm5, m4, 1
+    packssdw            xm4, xm5
+    movq                [r2], xm4
+    pextrd              [r2 + 8], xm4, 2
+    add                 r2, r3
+    add                 r0, r1
+    dec                 r4d
+    jnz                 .loop0
+    RET
+%endif
+%endmacro
+
+    IPFILTER_CHROMA_PS_6xN_AVX2 8
+    IPFILTER_CHROMA_PS_6xN_AVX2 16
+
+%macro FILTER_VER_CHROMA_AVX2_8xN 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_8x%2, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [pd_524800]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r8d, %2 / 16
+.loopH:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+
+
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+
+
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,ss
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%else
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+%ifidn %1,pp
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%elifidn %1, sp
+    psrad           m0, 10
+    psrad           m1, 10
+    psrad           m2, 10
+    psrad           m3, 10
+    psrad           m4, 10
+    psrad           m5, 10
+%else
+    psrad           m0, 2
+    psrad           m1, 2
+    psrad           m2, 2
+    psrad           m3, 2
+    psrad           m4, 2
+    psrad           m5, 2
+%endif
+%endif
+
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    vpermq          m4, m4, q3120
+    pxor            m5, m5
+    mova            m3, [pw_pixel_max]
+%ifidn %1,pp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%elifidn %1, sp
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%endif
+
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm1
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm1, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm1
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    pmaddwd         m12, [r5]
+
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m1, m13, [r5 + 1 * mmsize]
+    paddd           m11, m1
+    pmaddwd         m13, [r5]
+
+%ifidn %1,ss
+    psrad           m6, 6
+    psrad           m7, 6
+%else
+    paddd           m6, m14
+    paddd           m7, m14
+%ifidn %1,pp
+    psrad           m6, 6
+    psrad           m7, 6
+%elifidn %1, sp
+    psrad           m6, 10
+    psrad           m7, 10
+%else
+    psrad           m6, 2
+    psrad           m7, 2
+%endif
+%endif
+
+    packssdw        m6, m7
+    vpermq          m6, m6, q3120
+%ifidn %1,pp
+    CLIPW           m6, m5, m3
+%elifidn %1, sp
+    CLIPW           m6, m5, m3
+%endif
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m12, m2
+    pmaddwd         m0, [r5]
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhwd       xm6, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm6, 1
+    pmaddwd         m6, m1, [r5 + 1 * mmsize]
+    paddd           m13, m6
+    pmaddwd         m1, [r5]
+
+    movu            xm6, [r0 + r1]                  ; m6 = row 17
+    punpckhwd       xm4, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+
+%ifidn %1,ss
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m1, 6
+%else
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    paddd           m12, m14
+    paddd           m13, m14
+    paddd           m0, m14
+    paddd           m1, m14
+%ifidn %1,pp
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m1, 6
+%elifidn %1, sp
+    psrad           m8, 10
+    psrad           m9, 10
+    psrad           m10, 10
+    psrad           m11, 10
+    psrad           m12, 10
+    psrad           m13, 10
+    psrad           m0, 10
+    psrad           m1, 10
+%else
+    psrad           m8, 2
+    psrad           m9, 2
+    psrad           m10, 2
+    psrad           m11, 2
+    psrad           m12, 2
+    psrad           m13, 2
+    psrad           m0, 2
+    psrad           m1, 2
+%endif
+%endif
+
+    packssdw        m8, m9
+    packssdw        m10, m11
+    packssdw        m12, m13
+    packssdw        m0, m1
+    vpermq          m8, m8, q3120
+    vpermq          m10, m10, q3120
+    vpermq          m12, m12, q3120
+    vpermq          m0, m0, q3120
+%ifidn %1,pp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%elifidn %1, sp
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+    CLIPW           m12, m5, m3
+    CLIPW           m0, m5, m3
+%endif
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    movu            [r2 + r3], xm13
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r6], xm1
+    lea             r2, [r2 + r3 * 4]
+    dec             r8d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8xN pp, 16
+FILTER_VER_CHROMA_AVX2_8xN ps, 16
+FILTER_VER_CHROMA_AVX2_8xN ss, 16
+FILTER_VER_CHROMA_AVX2_8xN sp, 16
+FILTER_VER_CHROMA_AVX2_8xN pp, 32
+FILTER_VER_CHROMA_AVX2_8xN ps, 32
+FILTER_VER_CHROMA_AVX2_8xN sp, 32
+FILTER_VER_CHROMA_AVX2_8xN ss, 32
+FILTER_VER_CHROMA_AVX2_8xN pp, 64
+FILTER_VER_CHROMA_AVX2_8xN ps, 64
+FILTER_VER_CHROMA_AVX2_8xN sp, 64
+FILTER_VER_CHROMA_AVX2_8xN ss, 64
+
+%macro PROCESS_CHROMA_AVX2_8x2 3
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m2, m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m3, m3, [r5 + 1 * mmsize]
+    paddd           m1, m3
+
+%ifnidn %1,ss
+    paddd           m0, m7
+    paddd           m1, m7
+%endif
+    psrad           m0, %3
+    psrad           m1, %3
+
+    packssdw        m0, m1
+    vpermq          m0, m0, q3120
+    pxor            m4, m4
+
+%if %2
+    CLIPW           m0, m4, [pw_pixel_max]
+%endif
+    vextracti128    xm1, m0, 1
+%endmacro
+
+
+%macro FILTER_VER_CHROMA_AVX2_8x2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x2, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [pd_524800]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+
+    PROCESS_CHROMA_AVX2_8x2 %1, %2, %3
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8x2 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_8x2 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_8x2 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_8x2 ss, 0, 6
+
+%macro FILTER_VER_CHROMA_AVX2_4x2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x2, 4, 6, 7
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+%ifidn %1,pp
+    vbroadcasti128  m6, [pd_32]
+%elifidn %1, sp
+    mova            m6, [pd_524800]
+%else
+    vbroadcasti128  m6, [INTERP_OFFSET_PS]
+%endif
+
+    movq            xm0, [r0]                       ; row 0
+    movq            xm1, [r0 + r1]                  ; row 1
+    punpcklwd       xm0, xm1
+
+    movq            xm2, [r0 + r1 * 2]              ; row 2
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+
+    movq            xm3, [r0 + r4]                  ; row 3
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]                       ; row 4
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    paddd           m0, m5
+
+%ifnidn %1, ss
+    paddd           m0, m6
+%endif
+    psrad           m0, %3
+    packssdw        m0, m0
+    pxor            m1, m1
+
+%if %2
+    CLIPW           m0, m1, [pw_pixel_max]
+%endif
+
+    vextracti128    xm2, m0, 1
+    lea             r4, [r3 * 3]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_4x2 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_4x2 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_4x2 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_4x2 ss, 0, 6
+
+%macro FILTER_VER_CHROMA_AVX2_4x4 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x4, 4, 6, 7
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+%ifidn %1,pp
+   vbroadcasti128  m6, [pd_32]
+%elifidn %1, sp
+    mova            m6, [pd_524800]
+%else
+    vbroadcasti128  m6, [INTERP_OFFSET_PS]
+%endif
+    movq            xm0, [r0]                       ; row 0
+    movq            xm1, [r0 + r1]                  ; row 1
+    punpcklwd       xm0, xm1
+
+    movq            xm2, [r0 + r1 * 2]              ; row 2
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+
+    movq            xm3, [r0 + r4]                  ; row 3
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]                       ; row 4
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+
+    movq            xm3, [r0 + r1]                  ; row 5
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]              ; row 6
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+
+%ifnidn %1,ss
+    paddd           m0, m6
+    paddd           m2, m6
+%endif
+    psrad           m0, %3
+    psrad           m2, %3
+
+    packssdw        m0, m2
+    pxor            m1, m1
+%if %2
+    CLIPW           m0, m1, [pw_pixel_max]
+%endif
+
+    vextracti128    xm2, m0, 1
+    lea             r4, [r3 * 3]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_4x4 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_4x4 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_4x4 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_4x4 ss, 0, 6
+
+
+%macro FILTER_VER_CHROMA_AVX2_4x8 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x8, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [pd_524800]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+
+    movq            xm0, [r0]                       ; row 0
+    movq            xm1, [r0 + r1]                  ; row 1
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]              ; row 2
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+
+    movq            xm3, [r0 + r4]                  ; row 3
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]                       ; row 4
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+
+    movq            xm3, [r0 + r1]                  ; row 5
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]              ; row 6
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+
+    movq            xm3, [r0 + r4]                  ; row 7
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]                       ; row 8
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+
+    movq            xm3, [r0 + r1]                  ; row 9
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]              ; row 10
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [A 9 9 8]
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+%ifnidn %1,ss
+    paddd           m0, m7
+    paddd           m2, m7
+%endif
+    psrad           m0, %3
+    psrad           m2, %3
+    packssdw        m0, m2
+    pxor            m6, m6
+    mova            m3, [pw_pixel_max]
+%if %2
+    CLIPW           m0, m6, m3
+%endif
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+%ifnidn %1,ss
+    paddd           m4, m7
+    paddd           m1, m7
+%endif
+    psrad           m4, %3
+    psrad           m1, %3
+    packssdw        m4, m1
+%if %2
+    CLIPW           m4, m6, m3
+%endif
+    vextracti128    xm1, m4, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_4x8 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_4x8 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_4x8 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_4x8 ss, 0 , 6
+
+%macro PROCESS_LUMA_AVX2_W4_16R_4TAP 3
+    movq            xm0, [r0]                       ; row 0
+    movq            xm1, [r0 + r1]                  ; row 1
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]              ; row 2
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]                  ; row 3
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]                       ; row 4
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]                  ; row 5
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]              ; row 6
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]                  ; row 7
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]                       ; row 8
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]                  ; row 9
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]              ; row 10
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [10 9 9 8]
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    paddd           m1, m3
+    pmaddwd         m6, [r5]
+%ifnidn %1,ss
+    paddd           m0, m7
+    paddd           m2, m7
+%endif
+    psrad           m0, %3
+    psrad           m2, %3
+    packssdw        m0, m2
+    pxor            m3, m3
+%if %2
+    CLIPW           m0, m3, [pw_pixel_max]
+%endif
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+    movq            xm2, [r0 + r4]                  ;row 11
+    punpcklwd       xm5, xm2
+    lea             r0, [r0 + 4 * r1]
+    movq            xm0, [r0]                       ; row 12
+    punpcklwd       xm2, xm0
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [12 11 11 10]
+    pmaddwd         m2, m5, [r5 + 1 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m5, [r5]
+    movq            xm2, [r0 + r1]                  ; row 13
+    punpcklwd       xm0, xm2
+    movq            xm3, [r0 + 2 * r1]              ; row 14
+    punpcklwd       xm2, xm3
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [14 13 13 12]
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m0, [r5]
+%ifnidn %1,ss
+    paddd           m4, m7
+    paddd           m1, m7
+%endif
+    psrad           m4, %3
+    psrad           m1, %3
+    packssdw        m4, m1
+    pxor            m2, m2
+%if %2
+    CLIPW           m4, m2, [pw_pixel_max]
+%endif
+
+    vextracti128    xm1, m4, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+    movq            xm4, [r0 + r4]                  ; row 15
+    punpcklwd       xm3, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm1, [r0]                       ; row 16
+    punpcklwd       xm4, xm1
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [16 15 15 14]
+    pmaddwd         m4, m3, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m3, [r5]
+    movq            xm4, [r0 + r1]                  ; row 17
+    punpcklwd       xm1, xm4
+    movq            xm2, [r0 + 2 * r1]              ; row 18
+    punpcklwd       xm4, xm2
+    vinserti128     m1, m1, xm4, 1                  ; m1 = [18 17 17 16]
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m3, m1
+
+%ifnidn %1,ss
+    paddd           m6, m7
+    paddd           m5, m7
+%endif
+    psrad           m6, %3
+    psrad           m5, %3
+    packssdw        m6, m5
+    pxor            m1, m1
+%if %2
+    CLIPW           m6, m1, [pw_pixel_max]
+%endif
+    vextracti128    xm5, m6, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm6
+    movq            [r2 + r3], xm5
+    movhps          [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm5
+%ifnidn %1,ss
+    paddd           m0, m7
+    paddd           m3, m7
+%endif
+    psrad           m0, %3
+    psrad           m3, %3
+    packssdw        m0, m3
+%if %2
+    CLIPW           m0, m1, [pw_pixel_max]
+%endif
+    vextracti128    xm3, m0, 1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm3
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm3
+%endmacro
+
+
+%macro FILTER_VER_CHROMA_AVX2_4xN 4
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x%2, 4, 8, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+    mov             r7d, %2 / 16
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [pd_524800]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+.loopH:
+    PROCESS_LUMA_AVX2_W4_16R_4TAP %1, %3, %4
+    lea             r2, [r2 + r3 * 4]
+    dec             r7d
+    jnz             .loopH
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_4xN pp, 16, 1, 6
+FILTER_VER_CHROMA_AVX2_4xN ps, 16, 0, 2
+FILTER_VER_CHROMA_AVX2_4xN sp, 16, 1, 10
+FILTER_VER_CHROMA_AVX2_4xN ss, 16, 0, 6
+FILTER_VER_CHROMA_AVX2_4xN pp, 32, 1, 6
+FILTER_VER_CHROMA_AVX2_4xN ps, 32, 0, 2
+FILTER_VER_CHROMA_AVX2_4xN sp, 32, 1, 10
+FILTER_VER_CHROMA_AVX2_4xN ss, 32, 0, 6
+
+%macro FILTER_VER_CHROMA_AVX2_8x8 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_8x8, 4, 6, 12
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+   add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+%ifidn %1,pp
+    vbroadcasti128  m11, [pd_32]
+%elifidn %1, sp
+    mova            m11, [pd_524800]
+%else
+    vbroadcasti128  m11, [INTERP_OFFSET_PS]
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4                          ; res row0 done(0,1,2,3)
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5                          ;res row1 done(1, 2, 3, 4)
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    pmaddwd         m4, [r5]
+    paddd           m2, m6                          ;res row2 done(2,3,4,5)
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m7                          ;res row3 done(3,4,5,6)
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m8                          ;res row4 done(4,5,6,7)
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    pmaddwd         m7, [r5]
+    paddd           m5, m9                          ;res row5 done(5,6,7,8)
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m8, [r5 + 1 * mmsize]
+    paddd           m6, m8                          ;res row6 done(6,7,8,9)
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm8, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm8, 1
+    pmaddwd         m9, [r5 + 1 * mmsize]
+    paddd           m7, m9                          ;res row7 done 7,8,9,10
+    lea             r4, [r3 * 3]
+%ifnidn %1,ss
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+%endif
+    psrad           m0, %3
+    psrad           m1, %3
+    psrad           m2, %3
+    psrad           m3, %3
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    pxor            m1, m1
+    mova            m3, [pw_pixel_max]
+%if %2
+    CLIPW           m0, m1, m3
+    CLIPW           m2, m1, m3
+%endif
+    vextracti128    xm9, m0, 1
+    vextracti128    xm8, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm8
+%ifnidn %1,ss
+    paddd           m4, m11
+    paddd           m5, m11
+    paddd           m6, m11
+    paddd           m7, m11
+%endif
+    psrad           m4, %3
+    psrad           m5, %3
+    psrad           m6, %3
+    psrad           m7, %3
+    packssdw        m4, m5
+    packssdw        m6, m7
+    vpermq          m4, m4, q3120
+    vpermq          m6, m6, q3120
+%if %2
+    CLIPW           m4, m1, m3
+    CLIPW           m6, m1, m3
+%endif
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r4], xm7
+    RET
+%endif
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8x8 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_8x8 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_8x8 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_8x8 ss, 0, 6
+
+%macro FILTER_VER_CHROMA_AVX2_8x6 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_8x6, 4, 6, 12
+    mov             r4d, r4m
+    add             r1d, r1d
+    add             r3d, r3d
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+%ifidn %1,pp
+    vbroadcasti128  m11, [pd_32]
+%elifidn %1, sp
+    mova            m11, [pd_524800]
+%else
+    vbroadcasti128  m11, [INTERP_OFFSET_PS]
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4                          ; r0 done(0,1,2,3)
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5                          ;r1 done(1, 2, 3, 4)
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    pmaddwd         m4, [r5]
+    paddd           m2, m6                          ;r2 done(2,3,4,5)
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m7                          ;r3 done(3,4,5,6)
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8                          ;r4 done(4,5,6,7)
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m7, m7, [r5 + 1 * mmsize]
+    paddd           m5, m7                          ;r5 done(5,6,7,8)
+    lea             r4, [r3 * 3]
+%ifnidn %1,ss
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+%endif
+    psrad           m0, %3
+    psrad           m1, %3
+    psrad           m2, %3
+    psrad           m3, %3
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    pxor            m10, m10
+    mova            m9, [pw_pixel_max]
+%if %2
+    CLIPW           m0, m10, m9
+    CLIPW           m2, m10, m9
+%endif
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+%ifnidn %1,ss
+    paddd           m4, m11
+    paddd           m5, m11
+%endif
+    psrad           m4, %3
+    psrad           m5, %3
+    packssdw        m4, m5
+    vpermq          m4, m4, 11011000b
+%if %2
+    CLIPW           m4, m10, m9
+%endif
+    vextracti128    xm5, m4, 1
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    RET
+%endif
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8x6 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_8x6 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_8x6 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_8x6 ss, 0, 6
+
+%macro PROCESS_CHROMA_AVX2 3
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m3, m5
+%ifnidn %1,ss
+    paddd           m0, m7
+    paddd           m1, m7
+    paddd           m2, m7
+    paddd           m3, m7
+%endif
+    psrad           m0, %3
+    psrad           m1, %3
+    psrad           m2, %3
+    psrad           m3, %3
+    packssdw        m0, m1
+    packssdw        m2, m3
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    pxor            m4, m4
+%if %2
+    CLIPW           m0, m4, [pw_pixel_max]
+    CLIPW           m2, m4, [pw_pixel_max]
+%endif
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+%endmacro
+
+
+%macro FILTER_VER_CHROMA_AVX2_8x4 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x4, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    vbroadcasti128  m7, [pd_32]
+%elifidn %1, sp
+    mova            m7, [pd_524800]
+%else
+    vbroadcasti128  m7, [INTERP_OFFSET_PS]
+%endif
+    PROCESS_CHROMA_AVX2 %1, %2, %3
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    lea             r4, [r3 * 3]
+    movu            [r2 + r4], xm3
+    RET
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8x4 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_8x4 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_8x4 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_8x4 ss, 0, 6
+
+%macro FILTER_VER_CHROMA_AVX2_8x12 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_8x12, 4, 7, 15
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    add             r3d, r3d
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    vbroadcasti128  m14, [pd_32]
+%elifidn %1, sp
+    mova            m14, [pd_524800]
+%else
+    vbroadcasti128  m14, [INTERP_OFFSET_PS]
+%endif
+    lea             r6, [r3 * 3]
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+%ifnidn %1,ss
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+%endif
+    psrad           m0, %3
+    psrad           m1, %3
+    psrad           m2, %3
+    psrad           m3, %3
+    psrad           m4, %3
+    psrad           m5, %3
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    vpermq          m4, m4, q3120
+    pxor            m5, m5
+    mova            m3, [pw_pixel_max]
+%if %2
+    CLIPW           m0, m5, m3
+    CLIPW           m2, m5, m3
+    CLIPW           m4, m5, m3
+%endif
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm1
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm1, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm1
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m12, m12, [r5 + 1 * mmsize]
+    paddd           m10, m12
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m13, m13, [r5 + 1 * mmsize]
+    paddd           m11, m13
+%ifnidn %1,ss
+    paddd           m6, m14
+    paddd           m7, m14
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+%endif
+    psrad           m6, %3
+    psrad           m7, %3
+    psrad           m8, %3
+    psrad           m9, %3
+    psrad           m10, %3
+    psrad           m11, %3
+    packssdw        m6, m7
+    packssdw        m8, m9
+    packssdw        m10, m11
+    vpermq          m6, m6, q3120
+    vpermq          m8, m8, q3120
+    vpermq          m10, m10, q3120
+%if %2
+    CLIPW           m6, m5, m3
+    CLIPW           m8, m5, m3
+    CLIPW           m10, m5, m3
+%endif
+    vextracti128    xm7, m6, 1
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    RET
+%endif
+%endmacro
+
+FILTER_VER_CHROMA_AVX2_8x12 pp, 1, 6
+FILTER_VER_CHROMA_AVX2_8x12 ps, 0, 2
+FILTER_VER_CHROMA_AVX2_8x12 sp, 1, 10
+FILTER_VER_CHROMA_AVX2_8x12 ss, 0, 6
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/ipfilter8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,27826 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Min Chen <chenm003@163.com>
+;*          Nabajit Deka <nabajit@multicorewareinc.com>
+;*          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+const tab_Tm,    db 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6
+                 db 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10
+                 db 8, 9,10,11, 9,10,11,12,10,11,12,13,11,12,13, 14
+
+const interp4_vpp_shuf, times 2 db 0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15
+
+const interp_vert_shuf, times 2 db 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9
+                        times 2 db 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 9, 11, 10, 12, 11, 13
+
+const interp4_vpp_shuf1, dd 0, 1, 1, 2, 2, 3, 3, 4
+                         dd 2, 3, 3, 4, 4, 5, 5, 6
+
+const pb_8tap_hps_0, times 2 db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+                     times 2 db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9,10
+                     times 2 db 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9,10,10,11,11,12
+                     times 2 db 6, 7, 7, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14
+
+const tab_Lm,    db 0, 1, 2, 3, 4,  5,  6,  7,  1, 2, 3, 4,  5,  6,  7,  8
+                 db 2, 3, 4, 5, 6,  7,  8,  9,  3, 4, 5, 6,  7,  8,  9,  10
+                 db 4, 5, 6, 7, 8,  9,  10, 11, 5, 6, 7, 8,  9,  10, 11, 12
+                 db 6, 7, 8, 9, 10, 11, 12, 13, 7, 8, 9, 10, 11, 12, 13, 14
+
+const tab_Vm,    db 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+                 db 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3
+
+const tab_Cm,    db 0, 2, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3
+
+const pd_526336, times 8 dd 8192*64+2048
+
+const tab_ChromaCoeff, db  0, 64,  0,  0
+                       db -2, 58, 10, -2
+                       db -4, 54, 16, -2
+                       db -6, 46, 28, -4
+                       db -4, 36, 36, -4
+                       db -4, 28, 46, -6
+                       db -2, 16, 54, -4
+                       db -2, 10, 58, -2
+
+const tabw_ChromaCoeff, dw  0, 64,  0,  0
+                        dw -2, 58, 10, -2
+                        dw -4, 54, 16, -2
+                        dw -6, 46, 28, -4
+                        dw -4, 36, 36, -4
+                        dw -4, 28, 46, -6
+                        dw -2, 16, 54, -4
+                        dw -2, 10, 58, -2
+
+const tab_ChromaCoeff_V, times 8 db 0, 64
+                         times 8 db 0,  0
+
+                         times 8 db -2, 58
+                         times 8 db 10, -2
+
+                         times 8 db -4, 54
+                         times 8 db 16, -2
+
+                         times 8 db -6, 46
+                         times 8 db 28, -4
+
+                         times 8 db -4, 36
+                         times 8 db 36, -4
+
+                         times 8 db -4, 28
+                         times 8 db 46, -6
+
+                         times 8 db -2, 16
+                         times 8 db 54, -4
+
+                         times 8 db -2, 10
+                         times 8 db 58, -2
+
+const tab_ChromaCoeffV, times 4 dw 0, 64
+                        times 4 dw 0, 0
+
+                        times 4 dw -2, 58
+                        times 4 dw 10, -2
+
+                        times 4 dw -4, 54
+                        times 4 dw 16, -2
+
+                        times 4 dw -6, 46
+                        times 4 dw 28, -4
+
+                        times 4 dw -4, 36
+                        times 4 dw 36, -4
+
+                        times 4 dw -4, 28
+                        times 4 dw 46, -6
+
+                        times 4 dw -2, 16
+                        times 4 dw 54, -4
+
+                        times 4 dw -2, 10
+                        times 4 dw 58, -2
+
+const pw_ChromaCoeffV,  times 8 dw 0, 64
+                        times 8 dw 0, 0
+
+                        times 8 dw -2, 58
+                        times 8 dw 10, -2
+
+                        times 8 dw -4, 54
+                        times 8 dw 16, -2
+
+                        times 8 dw -6, 46
+                        times 8 dw 28, -4
+
+                        times 8 dw -4, 36
+                        times 8 dw 36, -4
+
+                        times 8 dw -4, 28
+                        times 8 dw 46, -6
+
+                        times 8 dw -2, 16
+                        times 8 dw 54, -4
+
+                        times 8 dw -2, 10
+                        times 8 dw 58, -2
+
+const tab_LumaCoeff,   db   0, 0,  0,  64,  0,   0,  0,  0
+                       db  -1, 4, -10, 58,  17, -5,  1,  0
+                       db  -1, 4, -11, 40,  40, -11, 4, -1
+                       db   0, 1, -5,  17,  58, -10, 4, -1
+
+const tabw_LumaCoeff,  dw   0, 0,  0,  64,  0,   0,  0,  0
+                       dw  -1, 4, -10, 58,  17, -5,  1,  0
+                       dw  -1, 4, -11, 40,  40, -11, 4, -1
+                       dw   0, 1, -5,  17,  58, -10, 4, -1
+
+const tab_LumaCoeffV,   times 4 dw 0, 0
+                        times 4 dw 0, 64
+                        times 4 dw 0, 0
+                        times 4 dw 0, 0
+
+                        times 4 dw -1, 4
+                        times 4 dw -10, 58
+                        times 4 dw 17, -5
+                        times 4 dw 1, 0
+
+                        times 4 dw -1, 4
+                        times 4 dw -11, 40
+                        times 4 dw 40, -11
+                        times 4 dw 4, -1
+
+                        times 4 dw 0, 1
+                        times 4 dw -5, 17
+                        times 4 dw 58, -10
+                        times 4 dw 4, -1
+
+const pw_LumaCoeffVer,  times 8 dw 0, 0
+                        times 8 dw 0, 64
+                        times 8 dw 0, 0
+                        times 8 dw 0, 0
+
+                        times 8 dw -1, 4
+                        times 8 dw -10, 58
+                        times 8 dw 17, -5
+                        times 8 dw 1, 0
+
+                        times 8 dw -1, 4
+                        times 8 dw -11, 40
+                        times 8 dw 40, -11
+                        times 8 dw 4, -1
+
+                        times 8 dw 0, 1
+                        times 8 dw -5, 17
+                        times 8 dw 58, -10
+                        times 8 dw 4, -1
+
+const pb_LumaCoeffVer,  times 16 db 0, 0
+                        times 16 db 0, 64
+                        times 16 db 0, 0
+                        times 16 db 0, 0
+
+                        times 16 db -1, 4
+                        times 16 db -10, 58
+                        times 16 db 17, -5
+                        times 16 db 1, 0
+
+                        times 16 db -1, 4
+                        times 16 db -11, 40
+                        times 16 db 40, -11
+                        times 16 db 4, -1
+
+                        times 16 db 0, 1
+                        times 16 db -5, 17
+                        times 16 db 58, -10
+                        times 16 db 4, -1
+
+const tab_LumaCoeffVer, times 8 db 0, 0
+                        times 8 db 0, 64
+                        times 8 db 0, 0
+                        times 8 db 0, 0
+
+                        times 8 db -1, 4
+                        times 8 db -10, 58
+                        times 8 db 17, -5
+                        times 8 db 1, 0
+
+                        times 8 db -1, 4
+                        times 8 db -11, 40
+                        times 8 db 40, -11
+                        times 8 db 4, -1
+
+                        times 8 db 0, 1
+                        times 8 db -5, 17
+                        times 8 db 58, -10
+                        times 8 db 4, -1
+
+const tab_LumaCoeffVer_32,  times 16 db 0, 0
+                            times 16 db 0, 64
+                            times 16 db 0, 0
+                            times 16 db 0, 0
+
+                            times 16 db -1, 4
+                            times 16 db -10, 58
+                            times 16 db 17, -5
+                            times 16 db 1, 0
+
+                            times 16 db -1, 4
+                            times 16 db -11, 40
+                            times 16 db 40, -11
+                            times 16 db 4, -1
+
+                            times 16 db 0, 1
+                            times 16 db -5, 17
+                            times 16 db 58, -10
+                            times 16 db 4, -1
+
+const tab_ChromaCoeffVer_32,    times 16 db 0, 64
+                                times 16 db 0, 0
+
+                                times 16 db -2, 58
+                                times 16 db 10, -2
+
+                                times 16 db -4, 54
+                                times 16 db 16, -2
+
+                                times 16 db -6, 46
+                                times 16 db 28, -4
+
+                                times 16 db -4, 36
+                                times 16 db 36, -4
+
+                                times 16 db -4, 28
+                                times 16 db 46, -6
+
+                                times 16 db -2, 16
+                                times 16 db 54, -4
+
+                                times 16 db -2, 10
+                                times 16 db 58, -2
+
+const tab_c_64_n64, times 8 db 64, -64
+
+const interp4_shuf, times 2 db 0, 1, 8, 9, 4, 5, 12, 13, 2, 3, 10, 11, 6, 7, 14, 15
+
+const interp4_horiz_shuf1,  db 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6
+                            db 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
+
+const interp4_hpp_shuf,     times 2 db 0, 1, 2, 3, 1, 2, 3, 4, 8, 9, 10, 11, 9, 10, 11, 12
+
+const interp8_hps_shuf,     dd 0, 4, 1, 5, 2, 6, 3, 7
+
+ALIGN 32
+interp4_hps_shuf: times 2 db 0, 1, 2, 3, 1, 2, 3, 4, 8, 9, 10, 11, 9, 10, 11, 12
+
+SECTION .text
+
+cextern pb_128
+cextern pw_1
+cextern pw_32
+cextern pw_512
+cextern pw_2000
+cextern pw_8192
+
+%macro FILTER_H4_w2_2_sse2 0
+    pxor        m3, m3
+    movd        m0, [srcq - 1]
+    movd        m2, [srcq]
+    punpckldq   m0, m2
+    punpcklbw   m0, m3
+    movd        m1, [srcq + srcstrideq - 1]
+    movd        m2, [srcq + srcstrideq]
+    punpckldq   m1, m2
+    punpcklbw   m1, m3
+    pmaddwd     m0, m4
+    pmaddwd     m1, m4
+    packssdw    m0, m1
+    pshuflw     m1, m0, q2301
+    pshufhw     m1, m1, q2301
+    paddw       m0, m1
+    psrld       m0, 16
+    packssdw    m0, m0
+    paddw       m0, m5
+    psraw       m0, 6
+    packuswb    m0, m0
+    movd        r4, m0
+    mov         [dstq], r4w
+    shr         r4, 16
+    mov         [dstq + dststrideq], r4w
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_2xN(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_H4_W2xN_sse3 1
+INIT_XMM sse3
+cglobal interp_4tap_horiz_pp_2x%1, 4, 6, 6, src, srcstride, dst, dststride
+    mov         r4d,    r4m
+    mova        m5,     [pw_32]
+
+%ifdef PIC
+    lea         r5,     [tabw_ChromaCoeff]
+    movddup     m4,     [r5 + r4 * 8]
+%else
+    movddup     m4,     [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+%assign x 1
+%rep %1/2
+    FILTER_H4_w2_2_sse2
+%if x < %1/2
+    lea         srcq,   [srcq + srcstrideq * 2]
+    lea         dstq,   [dstq + dststrideq * 2]
+%endif
+%assign x x+1
+%endrep
+
+    RET
+
+%endmacro
+
+    FILTER_H4_W2xN_sse3 4
+    FILTER_H4_W2xN_sse3 8
+    FILTER_H4_W2xN_sse3 16
+
+%macro FILTER_H4_w4_2_sse2 0
+    pxor        m5, m5
+    movd        m0, [srcq - 1]
+    movd        m6, [srcq]
+    punpckldq   m0, m6
+    punpcklbw   m0, m5
+    movd        m1, [srcq + 1]
+    movd        m6, [srcq + 2]
+    punpckldq   m1, m6
+    punpcklbw   m1, m5
+    movd        m2, [srcq + srcstrideq - 1]
+    movd        m6, [srcq + srcstrideq]
+    punpckldq   m2, m6
+    punpcklbw   m2, m5
+    movd        m3, [srcq + srcstrideq + 1]
+    movd        m6, [srcq + srcstrideq + 2]
+    punpckldq   m3, m6
+    punpcklbw   m3, m5
+    pmaddwd     m0, m4
+    pmaddwd     m1, m4
+    pmaddwd     m2, m4
+    pmaddwd     m3, m4
+    packssdw    m0, m1
+    packssdw    m2, m3
+    pshuflw     m1, m0, q2301
+    pshufhw     m1, m1, q2301
+    pshuflw     m3, m2, q2301
+    pshufhw     m3, m3, q2301
+    paddw       m0, m1
+    paddw       m2, m3
+    psrld       m0, 16
+    psrld       m2, 16
+    packssdw    m0, m2
+    paddw       m0, m7
+    psraw       m0, 6
+    packuswb    m0, m2
+    movd        [dstq], m0
+    psrldq      m0, 4
+    movd        [dstq + dststrideq], m0
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x32(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_H4_W4xN_sse3 1
+INIT_XMM sse3
+cglobal interp_4tap_horiz_pp_4x%1, 4, 6, 8, src, srcstride, dst, dststride
+    mov         r4d,    r4m
+    mova        m7,     [pw_32]
+
+%ifdef PIC
+    lea         r5,     [tabw_ChromaCoeff]
+    movddup     m4,     [r5 + r4 * 8]
+%else
+    movddup     m4,     [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+%assign x 1
+%rep %1/2
+    FILTER_H4_w4_2_sse2
+%if x < %1/2
+    lea         srcq,   [srcq + srcstrideq * 2]
+    lea         dstq,   [dstq + dststrideq * 2]
+%endif
+%assign x x+1
+%endrep
+
+    RET
+
+%endmacro
+
+    FILTER_H4_W4xN_sse3 2
+    FILTER_H4_W4xN_sse3 4
+    FILTER_H4_W4xN_sse3 8
+    FILTER_H4_W4xN_sse3 16
+    FILTER_H4_W4xN_sse3 32
+
+%macro FILTER_H4_w6_sse2 0
+    pxor        m4, m4
+    movh        m0, [srcq - 1]
+    movh        m5, [srcq]
+    punpckldq   m0, m5
+    movhlps     m2, m0
+    punpcklbw   m0, m4
+    punpcklbw   m2, m4
+    movd        m1, [srcq + 1]
+    movd        m5, [srcq + 2]
+    punpckldq   m1, m5
+    punpcklbw   m1, m4
+    pmaddwd     m0, m6
+    pmaddwd     m1, m6
+    pmaddwd     m2, m6
+    packssdw    m0, m1
+    packssdw    m2, m2
+    pshuflw     m1, m0, q2301
+    pshufhw     m1, m1, q2301
+    pshuflw     m3, m2, q2301
+    paddw       m0, m1
+    paddw       m2, m3
+    psrld       m0, 16
+    psrld       m2, 16
+    packssdw    m0, m2
+    paddw       m0, m7
+    psraw       m0, 6
+    packuswb    m0, m0
+    movd        [dstq], m0
+    pextrw      r4d, m0, 2
+    mov         [dstq + 4], r4w
+%endmacro
+
+%macro FILH4W8_sse2 1
+    movh        m0, [srcq - 1 + %1]
+    movh        m5, [srcq + %1]
+    punpckldq   m0, m5
+    movhlps     m2, m0
+    punpcklbw   m0, m4
+    punpcklbw   m2, m4
+    movh        m1, [srcq + 1 + %1]
+    movh        m5, [srcq + 2 + %1]
+    punpckldq   m1, m5
+    movhlps     m3, m1
+    punpcklbw   m1, m4
+    punpcklbw   m3, m4
+    pmaddwd     m0, m6
+    pmaddwd     m1, m6
+    pmaddwd     m2, m6
+    pmaddwd     m3, m6
+    packssdw    m0, m1
+    packssdw    m2, m3
+    pshuflw     m1, m0, q2301
+    pshufhw     m1, m1, q2301
+    pshuflw     m3, m2, q2301
+    pshufhw     m3, m3, q2301
+    paddw       m0, m1
+    paddw       m2, m3
+    psrld       m0, 16
+    psrld       m2, 16
+    packssdw    m0, m2
+    paddw       m0, m7
+    psraw       m0, 6
+    packuswb    m0, m0
+    movh        [dstq + %1], m0
+%endmacro
+
+%macro FILTER_H4_w8_sse2 0
+    FILH4W8_sse2 0
+%endmacro
+
+%macro FILTER_H4_w12_sse2 0
+    FILH4W8_sse2 0
+    movd        m1, [srcq - 1 + 8]
+    movd        m3, [srcq + 8]
+    punpckldq   m1, m3
+    punpcklbw   m1, m4
+    movd        m2, [srcq + 1 + 8]
+    movd        m3, [srcq + 2 + 8]
+    punpckldq   m2, m3
+    punpcklbw   m2, m4
+    pmaddwd     m1, m6
+    pmaddwd     m2, m6
+    packssdw    m1, m2
+    pshuflw     m2, m1, q2301
+    pshufhw     m2, m2, q2301
+    paddw       m1, m2
+    psrld       m1, 16
+    packssdw    m1, m1
+    paddw       m1, m7
+    psraw       m1, 6
+    packuswb    m1, m1
+    movd        [dstq + 8], m1
+%endmacro
+
+%macro FILTER_H4_w16_sse2 0
+    FILH4W8_sse2 0
+    FILH4W8_sse2 8
+%endmacro
+
+%macro FILTER_H4_w24_sse2 0
+    FILH4W8_sse2 0
+    FILH4W8_sse2 8
+    FILH4W8_sse2 16
+%endmacro
+
+%macro FILTER_H4_w32_sse2 0
+    FILH4W8_sse2 0
+    FILH4W8_sse2 8
+    FILH4W8_sse2 16
+    FILH4W8_sse2 24
+%endmacro
+
+%macro FILTER_H4_w48_sse2 0
+    FILH4W8_sse2 0
+    FILH4W8_sse2 8
+    FILH4W8_sse2 16
+    FILH4W8_sse2 24
+    FILH4W8_sse2 32
+    FILH4W8_sse2 40
+%endmacro
+
+%macro FILTER_H4_w64_sse2 0
+    FILH4W8_sse2 0
+    FILH4W8_sse2 8
+    FILH4W8_sse2 16
+    FILH4W8_sse2 24
+    FILH4W8_sse2 32
+    FILH4W8_sse2 40
+    FILH4W8_sse2 48
+    FILH4W8_sse2 56
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_sse3 2
+INIT_XMM sse3
+cglobal interp_4tap_horiz_pp_%1x%2, 4, 6, 8, src, srcstride, dst, dststride
+    mov         r4d,        r4m
+    mova        m7,         [pw_32]
+    pxor        m4,         m4
+
+%ifdef PIC
+    lea         r5,          [tabw_ChromaCoeff]
+    movddup     m6,       [r5 + r4 * 8]
+%else
+    movddup     m6,       [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+%assign x 1
+%rep %2
+    FILTER_H4_w%1_sse2
+%if x < %2
+    add         srcq,        srcstrideq
+    add         dstq,        dststrideq
+%endif
+%assign x x+1
+%endrep
+
+    RET
+
+%endmacro
+
+    IPFILTER_CHROMA_sse3 6,   8
+    IPFILTER_CHROMA_sse3 8,   2
+    IPFILTER_CHROMA_sse3 8,   4
+    IPFILTER_CHROMA_sse3 8,   6
+    IPFILTER_CHROMA_sse3 8,   8
+    IPFILTER_CHROMA_sse3 8,  16
+    IPFILTER_CHROMA_sse3 8,  32
+    IPFILTER_CHROMA_sse3 12, 16
+
+    IPFILTER_CHROMA_sse3 6,  16
+    IPFILTER_CHROMA_sse3 8,  12
+    IPFILTER_CHROMA_sse3 8,  64
+    IPFILTER_CHROMA_sse3 12, 32
+
+    IPFILTER_CHROMA_sse3 16,  4
+    IPFILTER_CHROMA_sse3 16,  8
+    IPFILTER_CHROMA_sse3 16, 12
+    IPFILTER_CHROMA_sse3 16, 16
+    IPFILTER_CHROMA_sse3 16, 32
+    IPFILTER_CHROMA_sse3 32,  8
+    IPFILTER_CHROMA_sse3 32, 16
+    IPFILTER_CHROMA_sse3 32, 24
+    IPFILTER_CHROMA_sse3 24, 32
+    IPFILTER_CHROMA_sse3 32, 32
+
+    IPFILTER_CHROMA_sse3 16, 24
+    IPFILTER_CHROMA_sse3 16, 64
+    IPFILTER_CHROMA_sse3 32, 48
+    IPFILTER_CHROMA_sse3 24, 64
+    IPFILTER_CHROMA_sse3 32, 64
+
+    IPFILTER_CHROMA_sse3 64, 64
+    IPFILTER_CHROMA_sse3 64, 32
+    IPFILTER_CHROMA_sse3 64, 48
+    IPFILTER_CHROMA_sse3 48, 64
+    IPFILTER_CHROMA_sse3 64, 16
+
+%macro FILTER_2 2
+    movd        m3,     [srcq + %1]
+    movd        m4,     [srcq + 1 + %1]
+    punpckldq   m3,     m4
+    punpcklbw   m3,     m0
+    pmaddwd     m3,     m1
+    packssdw    m3,     m3
+    pshuflw     m4,     m3, q2301
+    paddw       m3,     m4
+    psrldq      m3,     2
+    psubw       m3,     m2
+    movd        [dstq + %2], m3
+%endmacro
+
+%macro FILTER_4 2
+    movd        m3,     [srcq + %1]
+    movd        m4,     [srcq + 1 + %1]
+    punpckldq   m3,     m4
+    punpcklbw   m3,     m0
+    pmaddwd     m3,     m1
+    movd        m4,     [srcq + 2 + %1]
+    movd        m5,     [srcq + 3 + %1]
+    punpckldq   m4,     m5
+    punpcklbw   m4,     m0
+    pmaddwd     m4,     m1
+    packssdw    m3,     m4
+    pshuflw     m4,     m3, q2301
+    pshufhw     m4,     m4, q2301
+    paddw       m3,     m4
+    psrldq      m3,     2
+    pshufd      m3,     m3,     q3120
+    psubw       m3,     m2
+    movh        [dstq + %2], m3
+%endmacro
+
+%macro FILTER_4TAP_HPS_sse3 2
+INIT_XMM sse3
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 6, src, srcstride, dst, dststride
+    mov         r4d,    r4m
+    add         dststrided, dststrided
+    mova        m2,     [pw_2000]
+    pxor        m0,     m0
+
+%ifdef PIC
+    lea         r6,     [tabw_ChromaCoeff]
+    movddup     m1,     [r6 + r4 * 8]
+%else
+    movddup     m1,     [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+    mov        r4d,     %2
+    cmp        r5m,     byte 0
+    je         .loopH
+    sub        srcq,    srcstrideq
+    add        r4d,     3
+
+.loopH:
+%assign x -1
+%assign y 0
+%rep %1/4
+    FILTER_4 x,y
+%assign x x+4
+%assign y y+8
+%endrep
+%rep (%1 % 4)/2
+    FILTER_2 x,y
+%endrep
+    add         srcq,   srcstrideq
+    add         dstq,   dststrideq
+
+    dec         r4d
+    jnz         .loopH
+    RET
+
+%endmacro
+
+    FILTER_4TAP_HPS_sse3 2, 4
+    FILTER_4TAP_HPS_sse3 2, 8
+    FILTER_4TAP_HPS_sse3 2, 16
+    FILTER_4TAP_HPS_sse3 4, 2
+    FILTER_4TAP_HPS_sse3 4, 4
+    FILTER_4TAP_HPS_sse3 4, 8
+    FILTER_4TAP_HPS_sse3 4, 16
+    FILTER_4TAP_HPS_sse3 4, 32
+    FILTER_4TAP_HPS_sse3 6, 8
+    FILTER_4TAP_HPS_sse3 6, 16
+    FILTER_4TAP_HPS_sse3 8, 2
+    FILTER_4TAP_HPS_sse3 8, 4
+    FILTER_4TAP_HPS_sse3 8, 6
+    FILTER_4TAP_HPS_sse3 8, 8
+    FILTER_4TAP_HPS_sse3 8, 12
+    FILTER_4TAP_HPS_sse3 8, 16
+    FILTER_4TAP_HPS_sse3 8, 32
+    FILTER_4TAP_HPS_sse3 8, 64
+    FILTER_4TAP_HPS_sse3 12, 16
+    FILTER_4TAP_HPS_sse3 12, 32
+    FILTER_4TAP_HPS_sse3 16, 4
+    FILTER_4TAP_HPS_sse3 16, 8
+    FILTER_4TAP_HPS_sse3 16, 12
+    FILTER_4TAP_HPS_sse3 16, 16
+    FILTER_4TAP_HPS_sse3 16, 24
+    FILTER_4TAP_HPS_sse3 16, 32
+    FILTER_4TAP_HPS_sse3 16, 64
+    FILTER_4TAP_HPS_sse3 24, 32
+    FILTER_4TAP_HPS_sse3 24, 64
+    FILTER_4TAP_HPS_sse3 32,  8
+    FILTER_4TAP_HPS_sse3 32, 16
+    FILTER_4TAP_HPS_sse3 32, 24
+    FILTER_4TAP_HPS_sse3 32, 32
+    FILTER_4TAP_HPS_sse3 32, 48
+    FILTER_4TAP_HPS_sse3 32, 64
+    FILTER_4TAP_HPS_sse3 48, 64
+    FILTER_4TAP_HPS_sse3 64, 16
+    FILTER_4TAP_HPS_sse3 64, 32
+    FILTER_4TAP_HPS_sse3 64, 48
+    FILTER_4TAP_HPS_sse3 64, 64
+
+%macro FILTER_H8_W8_sse2 0
+    movh        m1, [r0 + x - 3]
+    movh        m4, [r0 + x - 2]
+    punpcklbw   m1, m6
+    punpcklbw   m4, m6
+    movh        m5, [r0 + x - 1]
+    movh        m0, [r0 + x]
+    punpcklbw   m5, m6
+    punpcklbw   m0, m6
+    pmaddwd     m1, m3
+    pmaddwd     m4, m3
+    pmaddwd     m5, m3
+    pmaddwd     m0, m3
+    packssdw    m1, m4
+    packssdw    m5, m0
+    pshuflw     m4, m1, q2301
+    pshufhw     m4, m4, q2301
+    pshuflw     m0, m5, q2301
+    pshufhw     m0, m0, q2301
+    paddw       m1, m4
+    paddw       m5, m0
+    psrldq      m1, 2
+    psrldq      m5, 2
+    pshufd      m1, m1, q3120
+    pshufd      m5, m5, q3120
+    punpcklqdq  m1, m5
+    movh        m7, [r0 + x + 1]
+    movh        m4, [r0 + x + 2]
+    punpcklbw   m7, m6
+    punpcklbw   m4, m6
+    movh        m5, [r0 + x + 3]
+    movh        m0, [r0 + x + 4]
+    punpcklbw   m5, m6
+    punpcklbw   m0, m6
+    pmaddwd     m7, m3
+    pmaddwd     m4, m3
+    pmaddwd     m5, m3
+    pmaddwd     m0, m3
+    packssdw    m7, m4
+    packssdw    m5, m0
+    pshuflw     m4, m7, q2301
+    pshufhw     m4, m4, q2301
+    pshuflw     m0, m5, q2301
+    pshufhw     m0, m0, q2301
+    paddw       m7, m4
+    paddw       m5, m0
+    psrldq      m7, 2
+    psrldq      m5, 2
+    pshufd      m7, m7, q3120
+    pshufd      m5, m5, q3120
+    punpcklqdq  m7, m5
+    pshuflw     m4, m1, q2301
+    pshufhw     m4, m4, q2301
+    pshuflw     m0, m7, q2301
+    pshufhw     m0, m0, q2301
+    paddw       m1, m4
+    paddw       m7, m0
+    psrldq      m1, 2
+    psrldq      m7, 2
+    pshufd      m1, m1, q3120
+    pshufd      m7, m7, q3120
+    punpcklqdq  m1, m7
+%endmacro
+
+%macro FILTER_H8_W4_sse2 0
+    movh        m1, [r0 + x - 3]
+    movh        m0, [r0 + x - 2]
+    punpcklbw   m1, m6
+    punpcklbw   m0, m6
+    movh        m4, [r0 + x - 1]
+    movh        m5, [r0 + x]
+    punpcklbw   m4, m6
+    punpcklbw   m5, m6
+    pmaddwd     m1, m3
+    pmaddwd     m0, m3
+    pmaddwd     m4, m3
+    pmaddwd     m5, m3
+    packssdw    m1, m0
+    packssdw    m4, m5
+    pshuflw     m0, m1, q2301
+    pshufhw     m0, m0, q2301
+    pshuflw     m5, m4, q2301
+    pshufhw     m5, m5, q2301
+    paddw       m1, m0
+    paddw       m4, m5
+    psrldq      m1, 2
+    psrldq      m4, 2
+    pshufd      m1, m1, q3120
+    pshufd      m4, m4, q3120
+    punpcklqdq  m1, m4
+    pshuflw     m0, m1, q2301
+    pshufhw     m0, m0, q2301
+    paddw       m1, m0
+    psrldq      m1, 2
+    pshufd      m1, m1, q3120
+%endmacro
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_LUMA_sse2 3
+INIT_XMM sse2
+cglobal interp_8tap_horiz_%3_%1x%2, 4,6,8
+    mov       r4d, r4m
+    add       r4d, r4d
+    pxor      m6, m6
+
+%ifidn %3, ps
+    add       r3d, r3d
+    cmp       r5m, byte 0
+%endif
+
+%ifdef PIC
+    lea       r5, [tabw_LumaCoeff]
+    movu      m3, [r5 + r4 * 8]
+%else
+    movu      m3, [tabw_LumaCoeff + r4 * 8]
+%endif
+
+    mov       r4d, %2
+
+%ifidn %3, pp
+    mova      m2, [pw_32]
+%else
+    mova      m2, [pw_2000]
+    je        .loopH
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    add       r4d, 7
+%endif
+
+.loopH:
+%assign x 0
+%rep %1 / 8
+    FILTER_H8_W8_sse2
+  %ifidn %3, pp
+    paddw     m1, m2
+    psraw     m1, 6
+    packuswb  m1, m1
+    movh      [r2 + x], m1
+  %else
+    psubw     m1, m2
+    movu      [r2 + 2 * x], m1
+  %endif
+%assign x x+8
+%endrep
+
+%rep (%1 % 8) / 4
+    FILTER_H8_W4_sse2
+  %ifidn %3, pp
+    paddw     m1, m2
+    psraw     m1, 6
+    packuswb  m1, m1
+    movd      [r2 + x], m1
+  %else
+    psubw     m1, m2
+    movh      [r2 + 2 * x], m1
+  %endif
+%endrep
+
+    add       r0, r1
+    add       r2, r3
+
+    dec       r4d
+    jnz       .loopH
+    RET
+
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+    IPFILTER_LUMA_sse2 4, 4, pp
+    IPFILTER_LUMA_sse2 4, 8, pp
+    IPFILTER_LUMA_sse2 8, 4, pp
+    IPFILTER_LUMA_sse2 8, 8, pp
+    IPFILTER_LUMA_sse2 16, 16, pp
+    IPFILTER_LUMA_sse2 16, 8, pp
+    IPFILTER_LUMA_sse2 8, 16, pp
+    IPFILTER_LUMA_sse2 16, 12, pp
+    IPFILTER_LUMA_sse2 12, 16, pp
+    IPFILTER_LUMA_sse2 16, 4, pp
+    IPFILTER_LUMA_sse2 4, 16, pp
+    IPFILTER_LUMA_sse2 32, 32, pp
+    IPFILTER_LUMA_sse2 32, 16, pp
+    IPFILTER_LUMA_sse2 16, 32, pp
+    IPFILTER_LUMA_sse2 32, 24, pp
+    IPFILTER_LUMA_sse2 24, 32, pp
+    IPFILTER_LUMA_sse2 32, 8, pp
+    IPFILTER_LUMA_sse2 8, 32, pp
+    IPFILTER_LUMA_sse2 64, 64, pp
+    IPFILTER_LUMA_sse2 64, 32, pp
+    IPFILTER_LUMA_sse2 32, 64, pp
+    IPFILTER_LUMA_sse2 64, 48, pp
+    IPFILTER_LUMA_sse2 48, 64, pp
+    IPFILTER_LUMA_sse2 64, 16, pp
+    IPFILTER_LUMA_sse2 16, 64, pp
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+    IPFILTER_LUMA_sse2 4, 4, ps
+    IPFILTER_LUMA_sse2 8, 8, ps
+    IPFILTER_LUMA_sse2 8, 4, ps
+    IPFILTER_LUMA_sse2 4, 8, ps
+    IPFILTER_LUMA_sse2 16, 16, ps
+    IPFILTER_LUMA_sse2 16, 8, ps
+    IPFILTER_LUMA_sse2 8, 16, ps
+    IPFILTER_LUMA_sse2 16, 12, ps
+    IPFILTER_LUMA_sse2 12, 16, ps
+    IPFILTER_LUMA_sse2 16, 4, ps
+    IPFILTER_LUMA_sse2 4, 16, ps
+    IPFILTER_LUMA_sse2 32, 32, ps
+    IPFILTER_LUMA_sse2 32, 16, ps
+    IPFILTER_LUMA_sse2 16, 32, ps
+    IPFILTER_LUMA_sse2 32, 24, ps
+    IPFILTER_LUMA_sse2 24, 32, ps
+    IPFILTER_LUMA_sse2 32, 8, ps
+    IPFILTER_LUMA_sse2 8, 32, ps
+    IPFILTER_LUMA_sse2 64, 64, ps
+    IPFILTER_LUMA_sse2 64, 32, ps
+    IPFILTER_LUMA_sse2 32, 64, ps
+    IPFILTER_LUMA_sse2 64, 48, ps
+    IPFILTER_LUMA_sse2 48, 64, ps
+    IPFILTER_LUMA_sse2 64, 16, ps
+    IPFILTER_LUMA_sse2 16, 64, ps
+
+%macro PROCESS_LUMA_W4_4R_sse2 0
+    movd        m2,     [r0]
+    movd        m7,     [r0 + r1]
+    punpcklbw   m2,     m7                      ; m2=[0 1]
+
+    lea         r0,     [r0 + 2 * r1]
+    movd        m3,     [r0]
+    punpcklbw   m7,     m3                      ; m7=[1 2]
+    punpcklbw   m2,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m2,     [r6 + 0 * 32]
+    pmaddwd     m7,     [r6 + 0 * 32]
+    packssdw    m2,     m7                      ; m2=[0+1 1+2]
+
+    movd        m7,     [r0 + r1]
+    punpcklbw   m3,     m7                      ; m3=[2 3]
+    lea         r0,     [r0 + 2 * r1]
+    movd        m5,     [r0]
+    punpcklbw   m7,     m5                      ; m7=[3 4]
+    punpcklbw   m3,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m4,     m3,     [r6 + 1 * 32]
+    pmaddwd     m6,     m7,     [r6 + 1 * 32]
+    packssdw    m4,     m6                      ; m4=[2+3 3+4]
+    paddw       m2,     m4                      ; m2=[0+1+2+3 1+2+3+4]                   Row1-2
+    pmaddwd     m3,     [r6 + 0 * 32]
+    pmaddwd     m7,     [r6 + 0 * 32]
+    packssdw    m3,     m7                      ; m3=[2+3 3+4]                           Row3-4
+
+    movd        m7,     [r0 + r1]
+    punpcklbw   m5,     m7                      ; m5=[4 5]
+    lea         r0,     [r0 + 2 * r1]
+    movd        m4,     [r0]
+    punpcklbw   m7,     m4                      ; m7=[5 6]
+    punpcklbw   m5,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m6,     m5,     [r6 + 2 * 32]
+    pmaddwd     m8,     m7,     [r6 + 2 * 32]
+    packssdw    m6,     m8                      ; m6=[4+5 5+6]
+    paddw       m2,     m6                      ; m2=[0+1+2+3+4+5 1+2+3+4+5+6]           Row1-2
+    pmaddwd     m5,     [r6 + 1 * 32]
+    pmaddwd     m7,     [r6 + 1 * 32]
+    packssdw    m5,     m7                      ; m5=[4+5 5+6]
+    paddw       m3,     m5                      ; m3=[2+3+4+5 3+4+5+6]                   Row3-4
+
+    movd        m7,     [r0 + r1]
+    punpcklbw   m4,     m7                      ; m4=[6 7]
+    lea         r0,     [r0 + 2 * r1]
+    movd        m5,     [r0]
+    punpcklbw   m7,     m5                      ; m7=[7 8]
+    punpcklbw   m4,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m6,     m4,     [r6 + 3 * 32]
+    pmaddwd     m8,     m7,     [r6 + 3 * 32]
+    packssdw    m6,     m8                      ; m7=[6+7 7+8]
+    paddw       m2,     m6                      ; m2=[0+1+2+3+4+5+6+7 1+2+3+4+5+6+7+8]   Row1-2 end
+    pmaddwd     m4,     [r6 + 2 * 32]
+    pmaddwd     m7,     [r6 + 2 * 32]
+    packssdw    m4,     m7                      ; m4=[6+7 7+8]
+    paddw       m3,     m4                      ; m3=[2+3+4+5+6+7 3+4+5+6+7+8]           Row3-4
+
+    movd        m7,     [r0 + r1]
+    punpcklbw   m5,     m7                      ; m5=[8 9]
+    movd        m4,     [r0 + 2 * r1]
+    punpcklbw   m7,     m4                      ; m7=[9 10]
+    punpcklbw   m5,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m5,     [r6 + 3 * 32]
+    pmaddwd     m7,     [r6 + 3 * 32]
+    packssdw    m5,     m7                      ; m5=[8+9 9+10]
+    paddw       m3,     m5                      ; m3=[2+3+4+5+6+7+8+9 3+4+5+6+7+8+9+10]  Row3-4 end
+%endmacro
+
+%macro PROCESS_LUMA_W8_4R_sse2 0
+    movq        m7,     [r0]
+    movq        m6,     [r0 + r1]
+    punpcklbw   m7,     m6
+    punpcklbw   m2,     m7,     m0
+    punpckhbw   m7,     m0
+    pmaddwd     m2,     [r6 + 0 * 32]
+    pmaddwd     m7,     [r6 + 0 * 32]
+    packssdw    m2,     m7                      ; m2=[0+1]               Row1
+
+    lea         r0,     [r0 + 2 * r1]
+    movq        m7,     [r0]
+    punpcklbw   m6,     m7
+    punpcklbw   m3,     m6,     m0
+    punpckhbw   m6,     m0
+    pmaddwd     m3,     [r6 + 0 * 32]
+    pmaddwd     m6,     [r6 + 0 * 32]
+    packssdw    m3,     m6                      ; m3=[1+2]               Row2
+
+    movq        m6,     [r0 + r1]
+    punpcklbw   m7,     m6
+    punpckhbw   m8,     m7,     m0
+    punpcklbw   m7,     m0
+    pmaddwd     m4,     m7,     [r6 + 0 * 32]
+    pmaddwd     m9,     m8,     [r6 + 0 * 32]
+    packssdw    m4,     m9                      ; m4=[2+3]               Row3
+    pmaddwd     m7,     [r6 + 1 * 32]
+    pmaddwd     m8,     [r6 + 1 * 32]
+    packssdw    m7,     m8
+    paddw       m2,     m7                      ; m2=[0+1+2+3]           Row1
+
+    lea         r0,     [r0 + 2 * r1]
+    movq        m10,    [r0]
+    punpcklbw   m6,     m10
+    punpckhbw   m8,     m6,     m0
+    punpcklbw   m6,     m0
+    pmaddwd     m5,     m6,     [r6 + 0 * 32]
+    pmaddwd     m9,     m8,     [r6 + 0 * 32]
+    packssdw    m5,     m9                      ; m5=[3+4]               Row4
+    pmaddwd     m6,     [r6 + 1 * 32]
+    pmaddwd     m8,     [r6 + 1 * 32]
+    packssdw    m6,     m8
+    paddw       m3,     m6                      ; m3 = [1+2+3+4]         Row2
+
+    movq        m6,     [r0 + r1]
+    punpcklbw   m10,    m6
+    punpckhbw   m8,     m10,    m0
+    punpcklbw   m10,    m0
+    pmaddwd     m7,     m10,    [r6 + 1 * 32]
+    pmaddwd     m9,     m8,     [r6 + 1 * 32]
+    packssdw    m7,     m9
+    pmaddwd     m10,    [r6 + 2 * 32]
+    pmaddwd     m8,     [r6 + 2 * 32]
+    packssdw    m10,    m8
+    paddw       m2,     m10                     ; m2=[0+1+2+3+4+5]       Row1
+    paddw       m4,     m7                      ; m4=[2+3+4+5]           Row3
+
+    lea         r0,     [r0 + 2 * r1]
+    movq        m10,    [r0]
+    punpcklbw   m6,     m10
+    punpckhbw   m8,     m6,     m0
+    punpcklbw   m6,     m0
+    pmaddwd     m7,     m6,     [r6 + 1 * 32]
+    pmaddwd     m9,     m8,     [r6 + 1 * 32]
+    packssdw    m7,     m9
+    pmaddwd     m6,     [r6 + 2 * 32]
+    pmaddwd     m8,     [r6 + 2 * 32]
+    packssdw    m6,     m8
+    paddw       m3,     m6                      ; m3=[1+2+3+4+5+6]       Row2
+    paddw       m5,     m7                      ; m5=[3+4+5+6]           Row4
+
+    movq        m6,     [r0 + r1]
+    punpcklbw   m10,    m6
+    punpckhbw   m8,     m10,    m0
+    punpcklbw   m10,    m0
+    pmaddwd     m7,     m10,    [r6 + 2 * 32]
+    pmaddwd     m9,     m8,     [r6 + 2 * 32]
+    packssdw    m7,     m9
+    pmaddwd     m10,    [r6 + 3 * 32]
+    pmaddwd     m8,     [r6 + 3 * 32]
+    packssdw    m10,    m8
+    paddw       m2,     m10                     ; m2=[0+1+2+3+4+5+6+7]   Row1 end
+    paddw       m4,     m7                      ; m4=[2+3+4+5+6+7]       Row3
+
+    lea         r0,     [r0 + 2 * r1]
+    movq        m10,    [r0]
+    punpcklbw   m6,     m10
+    punpckhbw   m8,     m6,     m0
+    punpcklbw   m6,     m0
+    pmaddwd     m7,     m6,     [r6 + 2 * 32]
+    pmaddwd     m9,     m8,     [r6 + 2 * 32]
+    packssdw    m7,     m9
+    pmaddwd     m6,     [r6 + 3 * 32]
+    pmaddwd     m8,     [r6 + 3 * 32]
+    packssdw    m6,     m8
+    paddw       m3,     m6                      ; m3=[1+2+3+4+5+6+7+8]   Row2 end
+    paddw       m5,     m7                      ; m5=[3+4+5+6+7+8]       Row4
+
+    movq        m6,     [r0 + r1]
+    punpcklbw   m10,    m6
+    punpckhbw   m8,     m10,     m0
+    punpcklbw   m10,    m0
+    pmaddwd     m8,     [r6 + 3 * 32]
+    pmaddwd     m10,    [r6 + 3 * 32]
+    packssdw    m10,    m8
+    paddw       m4,     m10                     ; m4=[2+3+4+5+6+7+8+9]   Row3 end
+
+    movq        m10,    [r0 + 2 * r1]
+    punpcklbw   m6,     m10
+    punpckhbw   m8,     m6,     m0
+    punpcklbw   m6,     m0
+    pmaddwd     m8,     [r6 + 3 * 32]
+    pmaddwd     m6,     [r6 + 3 * 32]
+    packssdw    m6,     m8
+    paddw       m5,     m6                      ; m5=[3+4+5+6+7+8+9+10]  Row4 end
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%3_4x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_sse2 3
+INIT_XMM sse2
+cglobal interp_8tap_vert_%3_%1x%2, 5, 8, 11
+    lea         r5,     [3 * r1]
+    sub         r0,     r5
+    shl         r4d,    7
+
+%ifdef PIC
+    lea         r6,     [pw_LumaCoeffVer]
+    add         r6,     r4
+%else
+    lea         r6,     [pw_LumaCoeffVer + r4]
+%endif
+
+%ifidn %3,pp
+    mova        m1,     [pw_32]
+%else
+    mova        m1,     [pw_2000]
+    add         r3d,    r3d
+%endif
+
+    mov         r4d,    %2/4
+    lea         r5,     [3 * r3]
+    pxor        m0,     m0
+
+.loopH:
+%assign x 0
+%rep (%1 / 8)
+    PROCESS_LUMA_W8_4R_sse2
+
+%ifidn %3,pp
+    paddw       m2,     m1
+    paddw       m3,     m1
+    paddw       m4,     m1
+    paddw       m5,     m1
+    psraw       m2,     6
+    psraw       m3,     6
+    psraw       m4,     6
+    psraw       m5,     6
+
+    packuswb    m2,     m3
+    packuswb    m4,     m5
+
+    movh        [r2 + x], m2
+    movhps      [r2 + r3 + x], m2
+    movh        [r2 + 2 * r3 + x], m4
+    movhps      [r2 + r5 + x], m4
+%else
+    psubw       m2,     m1
+    psubw       m3,     m1
+    psubw       m4,     m1
+    psubw       m5,     m1
+
+    movu        [r2 + (2*x)], m2
+    movu        [r2 + r3 + (2*x)], m3
+    movu        [r2 + 2 * r3 + (2*x)], m4
+    movu        [r2 + r5 + (2*x)], m5
+%endif
+%assign x x+8
+%if %1 > 8
+    lea         r7,     [8 * r1 - 8]
+    sub         r0,     r7
+%endif
+%endrep
+
+%rep (%1 % 8)/4
+    PROCESS_LUMA_W4_4R_sse2
+
+%ifidn %3,pp
+    paddw       m2,     m1
+    psraw       m2,     6
+    paddw       m3,     m1
+    psraw       m3,     6
+
+    packuswb    m2,     m3
+
+    movd        [r2 + x], m2
+    psrldq      m2,     4
+    movd        [r2 + r3 + x], m2
+    psrldq      m2,     4
+    movd        [r2 + 2 * r3 + x], m2
+    psrldq      m2,     4
+    movd        [r2 + r5 + x], m2
+%else
+    psubw       m2,     m1
+    psubw       m3,     m1
+
+    movh        [r2 + (2*x)], m2
+    movhps      [r2 + r3 + (2*x)], m2
+    movh        [r2 + 2 * r3 + (2*x)], m3
+    movhps      [r2 + r5 + (2*x)], m3
+%endif
+%endrep
+
+    lea         r2,     [r2 + 4 * r3]
+%if %1 <= 8
+    lea         r7,     [4 * r1]
+    sub         r0,     r7
+%elif %1 == 12
+    lea         r7,     [4 * r1 + 8]
+    sub         r0,     r7
+%else
+    lea         r0,     [r0 + 4 * r1 - %1]
+%endif
+
+    dec         r4d
+    jnz         .loopH
+
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_VER_LUMA_sse2 4, 4, pp
+    FILTER_VER_LUMA_sse2 4, 8, pp
+    FILTER_VER_LUMA_sse2 4, 16, pp
+    FILTER_VER_LUMA_sse2 8, 4, pp
+    FILTER_VER_LUMA_sse2 8, 8, pp
+    FILTER_VER_LUMA_sse2 8, 16, pp
+    FILTER_VER_LUMA_sse2 8, 32, pp
+    FILTER_VER_LUMA_sse2 12, 16, pp
+    FILTER_VER_LUMA_sse2 16, 4, pp
+    FILTER_VER_LUMA_sse2 16, 8, pp
+    FILTER_VER_LUMA_sse2 16, 12, pp
+    FILTER_VER_LUMA_sse2 16, 16, pp
+    FILTER_VER_LUMA_sse2 16, 32, pp
+    FILTER_VER_LUMA_sse2 16, 64, pp
+    FILTER_VER_LUMA_sse2 24, 32, pp
+    FILTER_VER_LUMA_sse2 32, 8, pp
+    FILTER_VER_LUMA_sse2 32, 16, pp
+    FILTER_VER_LUMA_sse2 32, 24, pp
+    FILTER_VER_LUMA_sse2 32, 32, pp
+    FILTER_VER_LUMA_sse2 32, 64, pp
+    FILTER_VER_LUMA_sse2 48, 64, pp
+    FILTER_VER_LUMA_sse2 64, 16, pp
+    FILTER_VER_LUMA_sse2 64, 32, pp
+    FILTER_VER_LUMA_sse2 64, 48, pp
+    FILTER_VER_LUMA_sse2 64, 64, pp
+
+    FILTER_VER_LUMA_sse2 4, 4, ps
+    FILTER_VER_LUMA_sse2 4, 8, ps
+    FILTER_VER_LUMA_sse2 4, 16, ps
+    FILTER_VER_LUMA_sse2 8, 4, ps
+    FILTER_VER_LUMA_sse2 8, 8, ps
+    FILTER_VER_LUMA_sse2 8, 16, ps
+    FILTER_VER_LUMA_sse2 8, 32, ps
+    FILTER_VER_LUMA_sse2 12, 16, ps
+    FILTER_VER_LUMA_sse2 16, 4, ps
+    FILTER_VER_LUMA_sse2 16, 8, ps
+    FILTER_VER_LUMA_sse2 16, 12, ps
+    FILTER_VER_LUMA_sse2 16, 16, ps
+    FILTER_VER_LUMA_sse2 16, 32, ps
+    FILTER_VER_LUMA_sse2 16, 64, ps
+    FILTER_VER_LUMA_sse2 24, 32, ps
+    FILTER_VER_LUMA_sse2 32, 8, ps
+    FILTER_VER_LUMA_sse2 32, 16, ps
+    FILTER_VER_LUMA_sse2 32, 24, ps
+    FILTER_VER_LUMA_sse2 32, 32, ps
+    FILTER_VER_LUMA_sse2 32, 64, ps
+    FILTER_VER_LUMA_sse2 48, 64, ps
+    FILTER_VER_LUMA_sse2 64, 16, ps
+    FILTER_VER_LUMA_sse2 64, 32, ps
+    FILTER_VER_LUMA_sse2 64, 48, ps
+    FILTER_VER_LUMA_sse2 64, 64, ps
+%endif
+
+%macro  WORD_TO_DOUBLE 1
+%if ARCH_X86_64
+    punpcklbw   %1,     m8
+%else
+    punpcklbw   %1,     %1
+    psrlw       %1,     8
+%endif
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_2x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W2_H4_sse2 2
+INIT_XMM sse2
+%if ARCH_X86_64
+cglobal interp_4tap_vert_%1_2x%2, 4, 6, 9
+    pxor        m8,        m8
+%else
+cglobal interp_4tap_vert_%1_2x%2, 4, 6, 8
+%endif
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifidn %1,pp
+    mova        m1,        [pw_32]
+%elifidn %1,ps
+    mova        m1,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tabw_ChromaCoeff]
+    movh        m0,        [r5 + r4 * 8]
+%else
+    movh        m0,        [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+    punpcklqdq  m0,        m0
+    lea         r5,        [3 * r1]
+
+%assign x 1
+%rep %2/4
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r5]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklwd   m2,        m6
+
+    WORD_TO_DOUBLE         m2
+    pmaddwd     m2,        m0
+
+    lea         r0,        [r0 + 4 * r1]
+    movd        m6,        [r0]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklwd   m3,        m7
+
+    WORD_TO_DOUBLE         m3
+    pmaddwd     m3,        m0
+
+    packssdw    m2,        m3
+    pshuflw     m3,        m2,          q2301
+    pshufhw     m3,        m3,          q2301
+    paddw       m2,        m3
+
+    movd        m7,        [r0 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklwd   m4,        m3
+
+    WORD_TO_DOUBLE         m4
+    pmaddwd     m4,        m0
+
+    movd        m3,        [r0 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklwd   m5,        m7
+
+    WORD_TO_DOUBLE         m5
+    pmaddwd     m5,        m0
+
+    packssdw    m4,        m5
+    pshuflw     m5,        m4,          q2301
+    pshufhw     m5,        m5,          q2301
+    paddw       m4,        m5
+
+%ifidn %1,pp
+    psrld       m2,        16
+    psrld       m4,        16
+    packssdw    m2,        m4
+    paddw       m2,        m1
+    psraw       m2,        6
+    packuswb    m2,        m2
+
+%if ARCH_X86_64
+    movq        r4,        m2
+    mov         [r2],      r4w
+    shr         r4,        16
+    mov         [r2 + r3], r4w
+    lea         r2,        [r2 + 2 * r3]
+    shr         r4,        16
+    mov         [r2],      r4w
+    shr         r4,        16
+    mov         [r2 + r3], r4w
+%else
+    movd        r4,        m2
+    mov         [r2],      r4w
+    shr         r4,        16
+    mov         [r2 + r3], r4w
+    lea         r2,        [r2 + 2 * r3]
+    psrldq      m2,        4
+    movd        r4,        m2
+    mov         [r2],      r4w
+    shr         r4,        16
+    mov         [r2 + r3], r4w
+%endif
+%elifidn %1,ps
+    psrldq      m2,        2
+    psrldq      m4,        2
+    pshufd      m2,        m2, q3120
+    pshufd      m4,        m4, q3120
+    psubw       m4,        m1
+    psubw       m2,        m1
+
+    movd        [r2],      m2
+    psrldq      m2,        4
+    movd        [r2 + r3], m2
+    lea         r2,        [r2 + 2 * r3]
+    movd        [r2],      m4
+    psrldq      m4,        4
+    movd        [r2 + r3], m4
+%endif
+
+%if x < %2/4
+    lea         r2,        [r2 + 2 * r3]
+%endif
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+    FILTER_V4_W2_H4_sse2 pp, 4
+    FILTER_V4_W2_H4_sse2 pp, 8
+    FILTER_V4_W2_H4_sse2 pp, 16
+
+    FILTER_V4_W2_H4_sse2 ps, 4
+    FILTER_V4_W2_H4_sse2 ps, 8
+    FILTER_V4_W2_H4_sse2 ps, 16
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_4x2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro  FILTER_V2_W4_H4_sse2 1
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_4x2, 4, 6, 8
+    mov         r4d,       r4m
+    sub         r0,        r1
+    pxor        m7,        m7
+
+%ifdef PIC
+    lea         r5,        [tabw_ChromaCoeff]
+    movh        m0,        [r5 + r4 * 8]
+%else
+    movh        m0,        [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+    lea         r5,        [r0 + 2 * r1]
+    punpcklqdq  m0,        m0
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r5]
+    movd        m5,        [r5 + r1]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m1,        m4,        m5
+    punpcklwd   m2,        m1
+
+    movhlps     m6,        m2
+    punpcklbw   m2,        m7
+    punpcklbw   m6,        m7
+    pmaddwd     m2,        m0
+    pmaddwd     m6,        m0
+    packssdw    m2,        m6
+
+    movd        m1,        [r0 + 4 * r1]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m5,        m1
+    punpcklwd   m3,        m5
+
+    movhlps     m6,        m3
+    punpcklbw   m3,        m7
+    punpcklbw   m6,        m7
+    pmaddwd     m3,        m0
+    pmaddwd     m6,        m0
+    packssdw    m3,        m6
+
+    pshuflw     m4,        m2,        q2301
+    pshufhw     m4,        m4,        q2301
+    paddw       m2,        m4
+    pshuflw     m5,        m3,        q2301
+    pshufhw     m5,        m5,        q2301
+    paddw       m3,        m5
+
+%ifidn %1, pp
+    psrld       m2,        16
+    psrld       m3,        16
+    packssdw    m2,        m3
+
+    paddw       m2,        [pw_32]
+    psraw       m2,        6
+    packuswb    m2,        m2
+
+    movd        [r2],      m2
+    psrldq      m2,        4
+    movd        [r2 + r3], m2
+%elifidn %1, ps
+    psrldq      m2,        2
+    psrldq      m3,        2
+    pshufd      m2,        m2, q3120
+    pshufd      m3,        m3, q3120
+    punpcklqdq  m2, m3
+
+    add         r3d,       r3d
+    psubw       m2,        [pw_2000]
+    movh        [r2],      m2
+    movhps      [r2 + r3], m2
+%endif
+    RET
+
+%endmacro
+
+    FILTER_V2_W4_H4_sse2 pp
+    FILTER_V2_W4_H4_sse2 ps
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_4x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W4_H4_sse2 2
+INIT_XMM sse2
+%if ARCH_X86_64
+cglobal interp_4tap_vert_%1_4x%2, 4, 6, 9
+    pxor        m8,        m8
+%else
+cglobal interp_4tap_vert_%1_4x%2, 4, 6, 8
+%endif
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tabw_ChromaCoeff]
+    movh        m0,        [r5 + r4 * 8]
+%else
+    movh        m0,        [tabw_ChromaCoeff + r4 * 8]
+%endif
+
+%ifidn %1,pp
+    mova        m1,        [pw_32]
+%elifidn %1,ps
+    add         r3d,       r3d
+    mova        m1,        [pw_2000]
+%endif
+
+    lea         r5,        [3 * r1]
+    lea         r4,        [3 * r3]
+    punpcklqdq  m0,        m0
+
+%assign x 1
+%rep %2/4
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r5]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklwd   m2,        m6
+
+    movhlps     m6,        m2
+    WORD_TO_DOUBLE         m2
+    WORD_TO_DOUBLE         m6
+    pmaddwd     m2,        m0
+    pmaddwd     m6,        m0
+    packssdw    m2,        m6
+
+    lea         r0,        [r0 + 4 * r1]
+    movd        m6,        [r0]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklwd   m3,        m7
+
+    movhlps     m7,        m3
+    WORD_TO_DOUBLE         m3
+    WORD_TO_DOUBLE         m7
+    pmaddwd     m3,        m0
+    pmaddwd     m7,        m0
+    packssdw    m3,        m7
+
+    pshuflw     m7,        m2,        q2301
+    pshufhw     m7,        m7,        q2301
+    paddw       m2,        m7
+    pshuflw     m7,        m3,        q2301
+    pshufhw     m7,        m7,        q2301
+    paddw       m3,        m7
+
+%ifidn %1,pp
+    psrld       m2,        16
+    psrld       m3,        16
+    packssdw    m2,        m3
+    paddw       m2,        m1
+    psraw       m2,        6
+%elifidn %1,ps
+    psrldq      m2,        2
+    psrldq      m3,        2
+    pshufd      m2,        m2, q3120
+    pshufd      m3,        m3, q3120
+    punpcklqdq  m2,        m3
+
+    psubw       m2,        m1
+    movh        [r2],      m2
+    movhps      [r2 + r3], m2
+%endif
+
+    movd        m7,        [r0 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklwd   m4,        m3
+
+    movhlps     m3,        m4
+    WORD_TO_DOUBLE         m4
+    WORD_TO_DOUBLE         m3
+    pmaddwd     m4,        m0
+    pmaddwd     m3,        m0
+    packssdw    m4,        m3
+
+    movd        m3,        [r0 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklwd   m5,        m7
+
+    movhlps     m3,        m5
+    WORD_TO_DOUBLE         m5
+    WORD_TO_DOUBLE         m3
+    pmaddwd     m5,        m0
+    pmaddwd     m3,        m0
+    packssdw    m5,        m3
+
+    pshuflw     m7,        m4,        q2301
+    pshufhw     m7,        m7,        q2301
+    paddw       m4,        m7
+    pshuflw     m7,        m5,        q2301
+    pshufhw     m7,        m7,        q2301
+    paddw       m5,        m7
+
+%ifidn %1,pp
+    psrld       m4,        16
+    psrld       m5,        16
+    packssdw    m4,        m5
+
+    paddw       m4,        m1
+    psraw       m4,        6
+    packuswb    m2,        m4
+
+    movd        [r2],      m2
+    psrldq      m2,        4
+    movd        [r2 + r3], m2
+    psrldq      m2,        4
+    movd        [r2 + 2 * r3],      m2
+    psrldq      m2,        4
+    movd        [r2 + r4], m2
+%elifidn %1,ps
+    psrldq      m4,        2
+    psrldq      m5,        2
+    pshufd      m4,        m4, q3120
+    pshufd      m5,        m5, q3120
+    punpcklqdq  m4,        m5
+    psubw       m4,        m1
+    movh        [r2 + 2 * r3],      m4
+    movhps      [r2 + r4], m4
+%endif
+
+%if x < %2/4
+    lea         r2,        [r2 + 4 * r3]
+%endif
+
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+    FILTER_V4_W4_H4_sse2 pp, 4
+    FILTER_V4_W4_H4_sse2 pp, 8
+    FILTER_V4_W4_H4_sse2 pp, 16
+    FILTER_V4_W4_H4_sse2 pp, 32
+
+    FILTER_V4_W4_H4_sse2 ps, 4
+    FILTER_V4_W4_H4_sse2 ps, 8
+    FILTER_V4_W4_H4_sse2 ps, 16
+    FILTER_V4_W4_H4_sse2 ps, 32
+
+;-----------------------------------------------------------------------------
+;void interp_4tap_vert_%1_6x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W6_H4_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_6x%2, 4, 7, 10
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m6,        [r5 + r4]
+    mova        m5,        [r5 + r4 + 16]
+%else
+    mova        m6,        [tab_ChromaCoeffV + r4]
+    mova        m5,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+%ifidn %1,pp
+    mova        m4,        [pw_32]
+%elifidn %1,ps
+    mova        m4,        [pw_2000]
+    add         r3d,       r3d
+%endif
+    lea         r5,        [3 * r1]
+
+%assign x 1
+%rep %2/4
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    movq        m3,        [r0 + r5]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m1,        m2
+    punpcklbw   m2,        m3
+
+    movhlps     m7,        m0
+    punpcklbw   m0,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m0,        m6
+    pmaddwd     m7,        m6
+    packssdw    m0,        m7
+
+    movhlps     m8,        m2
+    movq        m7,        m2
+    punpcklbw   m8,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m8,        m5
+    pmaddwd     m7,        m5
+    packssdw    m7,        m8
+
+    paddw       m0,        m7
+
+%ifidn %1,pp
+    paddw       m0,        m4
+    psraw       m0,        6
+    packuswb    m0,        m0
+
+    movd        [r2],      m0
+    pextrw      r6d,       m0,        2
+    mov         [r2 + 4],  r6w
+%elifidn %1,ps
+    psubw       m0,        m4
+    movh        [r2],      m0
+    pshufd      m0,        m0,        2
+    movd        [r2 + 8],  m0
+%endif
+
+    lea         r0,        [r0 + 4 * r1]
+
+    movq        m0,        [r0]
+    punpcklbw   m3,        m0
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m6
+    pmaddwd     m8,        m6
+    packssdw    m1,        m8
+
+    movhlps     m8,        m3
+    movq        m7,        m3
+    punpcklbw   m8,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m8,        m5
+    pmaddwd     m7,        m5
+    packssdw    m7,        m8
+
+    paddw       m1,        m7
+
+%ifidn %1,pp
+    paddw       m1,        m4
+    psraw       m1,        6
+    packuswb    m1,        m1
+
+    movd        [r2 + r3], m1
+    pextrw      r6d,       m1,        2
+    mov         [r2 + r3 + 4], r6w
+%elifidn %1,ps
+    psubw       m1,        m4
+    movh        [r2 + r3], m1
+    pshufd      m1,        m1,        2
+    movd        [r2 + r3 + 8],  m1
+%endif
+
+    movq        m1,        [r0 + r1]
+    punpcklbw   m7,        m0,        m1
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m6
+    pmaddwd     m8,        m6
+    packssdw    m2,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m2,        m7
+    lea         r2,        [r2 + 2 * r3]
+
+%ifidn %1,pp
+    paddw       m2,        m4
+    psraw       m2,        6
+    packuswb    m2,        m2
+    movd        [r2],      m2
+    pextrw      r6d,       m2,    2
+    mov         [r2 + 4],  r6w
+%elifidn %1,ps
+    psubw       m2,        m4
+    movh        [r2],      m2
+    pshufd      m2,        m2,        2
+    movd        [r2 + 8],  m2
+%endif
+
+    movq        m2,        [r0 + 2 * r1]
+    punpcklbw   m1,        m2
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m6
+    pmaddwd     m8,        m6
+    packssdw    m3,        m8
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m5
+    pmaddwd     m8,        m5
+    packssdw    m1,        m8
+
+    paddw       m3,        m1
+
+%ifidn %1,pp
+    paddw       m3,        m4
+    psraw       m3,        6
+    packuswb    m3,        m3
+
+    movd        [r2 + r3], m3
+    pextrw      r6d,       m3,    2
+    mov         [r2 + r3 + 4], r6w
+%elifidn %1,ps
+    psubw       m3,        m4
+    movh        [r2 + r3], m3
+    pshufd      m3,        m3,        2
+    movd        [r2 + r3 + 8],  m3
+%endif
+
+%if x < %2/4
+    lea         r2,        [r2 + 2 * r3]
+%endif
+
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W6_H4_sse2 pp, 8
+    FILTER_V4_W6_H4_sse2 pp, 16
+    FILTER_V4_W6_H4_sse2 ps, 8
+    FILTER_V4_W6_H4_sse2 ps, 16
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W8_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_8x%2, 4, 7, 12
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m4,        [pw_32]
+%elifidn %1,ps
+    mova        m4,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r6,        [tab_ChromaCoeffV]
+    mova        m6,        [r6 + r4]
+    mova        m5,        [r6 + r4 + 16]
+%else
+    mova        m6,        [tab_ChromaCoeffV + r4]
+    mova        m5,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    lea         r5,        [r0 + 2 * r1]
+    movq        m3,        [r5 + r1]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m7,        m2,          m3
+
+    movhlps     m8,        m0
+    punpcklbw   m0,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m0,        m6
+    pmaddwd     m8,        m6
+    packssdw    m0,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m0,        m7
+
+%ifidn %1,pp
+    paddw       m0,        m4
+    psraw       m0,        6
+%elifidn %1,ps
+    psubw       m0,        m4
+    movu        [r2],      m0
+%endif
+
+    movq        m11,        [r0 + 4 * r1]
+
+    punpcklbw   m1,        m2
+    punpcklbw   m7,        m3,        m11
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m6
+    pmaddwd     m8,        m6
+    packssdw    m1,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m1,        m7
+
+%ifidn %1,pp
+    paddw       m1,        m4
+    psraw       m1,        6
+    packuswb    m1,        m0
+
+    movhps      [r2],      m1
+    movh        [r2 + r3], m1
+%elifidn %1,ps
+    psubw       m1,        m4
+    movu        [r2 + r3], m1
+%endif
+%if %2 == 2     ;end of 8x2
+    RET
+
+%else
+    lea         r6,        [r0 + 4 * r1]
+    movq        m1,        [r6 + r1]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m7,        m11,        m1
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m6
+    pmaddwd     m8,        m6
+    packssdw    m2,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m2,        m7
+
+%ifidn %1,pp
+    paddw       m2,        m4
+    psraw       m2,        6
+%elifidn %1,ps
+    psubw       m2,        m4
+    movu        [r2 + 2 * r3], m2
+%endif
+
+    movq        m10,        [r6 + 2 * r1]
+
+    punpcklbw   m3,        m11
+    punpcklbw   m7,        m1,        m10
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m6
+    pmaddwd     m8,        m6
+    packssdw    m3,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m3,        m7
+    lea         r5,        [r2 + 2 * r3]
+
+%ifidn %1,pp
+    paddw       m3,        m4
+    psraw       m3,        6
+    packuswb    m3,        m2
+
+    movhps      [r2 + 2 * r3], m3
+    movh        [r5 + r3], m3
+%elifidn %1,ps
+    psubw       m3,        m4
+    movu        [r5 + r3], m3
+%endif
+%if %2 == 4     ;end of 8x4
+    RET
+
+%else
+    lea         r6,        [r6 + 2 * r1]
+    movq        m3,        [r6 + r1]
+
+    punpcklbw   m11,        m1
+    punpcklbw   m7,        m10,        m3
+
+    movhlps     m8,        m11
+    punpcklbw   m11,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m11,        m6
+    pmaddwd     m8,        m6
+    packssdw    m11,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m5
+    pmaddwd     m8,        m5
+    packssdw    m7,        m8
+
+    paddw       m11,       m7
+
+%ifidn %1, pp
+    paddw       m11,       m4
+    psraw       m11,       6
+%elifidn %1,ps
+    psubw       m11,       m4
+    movu        [r2 + 4 * r3], m11
+%endif
+
+    movq        m7,        [r0 + 8 * r1]
+
+    punpcklbw   m1,        m10
+    punpcklbw   m3,        m7
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m6
+    pmaddwd     m8,        m6
+    packssdw    m1,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m5
+    pmaddwd     m8,        m5
+    packssdw    m3,        m8
+
+    paddw       m1,        m3
+    lea         r5,        [r2 + 4 * r3]
+
+%ifidn %1,pp
+    paddw       m1,        m4
+    psraw       m1,        6
+    packuswb    m1,        m11
+
+    movhps      [r2 + 4 * r3], m1
+    movh        [r5 + r3], m1
+%elifidn %1,ps
+    psubw       m1,        m4
+    movu        [r5 + r3], m1
+%endif
+%if %2 == 6
+    RET
+
+%else
+  %error INVALID macro argument, only 2, 4 or 6!
+%endif
+%endif
+%endif
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W8_sse2 pp, 2
+    FILTER_V4_W8_sse2 pp, 4
+    FILTER_V4_W8_sse2 pp, 6
+    FILTER_V4_W8_sse2 ps, 2
+    FILTER_V4_W8_sse2 ps, 4
+    FILTER_V4_W8_sse2 ps, 6
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W8_H8_H16_H32_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_8x%2, 4, 6, 11
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m6,        [r5 + r4]
+    mova        m5,        [r5 + r4 + 16]
+%else
+    mova        m6,        [tab_ChromaCoeff + r4]
+    mova        m5,        [tab_ChromaCoeff + r4 + 16]
+%endif
+
+%ifidn %1,pp
+    mova        m4,        [pw_32]
+%elifidn %1,ps
+    mova        m4,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+    lea         r5,        [r1 * 3]
+
+%assign x 1
+%rep %2/4
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    movq        m3,        [r0 + r5]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m1,        m2
+    punpcklbw   m2,        m3
+
+    movhlps     m7,        m0
+    punpcklbw   m0,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m0,        m6
+    pmaddwd     m7,        m6
+    packssdw    m0,        m7
+
+    movhlps     m8,        m2
+    movq        m7,        m2
+    punpcklbw   m8,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m8,        m5
+    pmaddwd     m7,        m5
+    packssdw    m7,        m8
+
+    paddw       m0,        m7
+
+%ifidn %1,pp
+    paddw       m0,        m4
+    psraw       m0,        6
+%elifidn %1,ps
+    psubw       m0,        m4
+    movu        [r2],      m0
+%endif
+
+    lea         r0,        [r0 + 4 * r1]
+    movq        m10,       [r0]
+    punpcklbw   m3,        m10
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m6
+    pmaddwd     m8,        m6
+    packssdw    m1,        m8
+
+    movhlps     m8,        m3
+    movq        m7,        m3
+    punpcklbw   m8,        m9
+    punpcklbw   m7,        m9
+    pmaddwd     m8,        m5
+    pmaddwd     m7,        m5
+    packssdw    m7,        m8
+
+    paddw       m1,        m7
+
+%ifidn %1,pp
+    paddw       m1,        m4
+    psraw       m1,        6
+
+    packuswb    m0,        m1
+    movh        [r2],      m0
+    movhps      [r2 + r3], m0
+%elifidn %1,ps
+    psubw       m1,        m4
+    movu        [r2 + r3], m1
+%endif
+
+    movq        m1,        [r0 + r1]
+    punpcklbw   m10,       m1
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m6
+    pmaddwd     m8,        m6
+    packssdw    m2,        m8
+
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m5
+    pmaddwd     m8,        m5
+    packssdw    m10,       m8
+
+    paddw       m2,        m10
+    lea         r2,        [r2 + 2 * r3]
+
+%ifidn %1,pp
+    paddw       m2,        m4
+    psraw       m2,        6
+%elifidn %1,ps
+    psubw       m2,        m4
+    movu        [r2],      m2
+%endif
+
+    movq        m7,        [r0 + 2 * r1]
+    punpcklbw   m1,        m7
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m6
+    pmaddwd     m8,        m6
+    packssdw    m3,        m8
+
+    movhlps     m8,        m1
+    punpcklbw   m1,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m1,        m5
+    pmaddwd     m8,        m5
+    packssdw    m1,        m8
+
+    paddw       m3,        m1
+
+%ifidn %1,pp
+    paddw       m3,        m4
+    psraw       m3,        6
+
+    packuswb    m2,        m3
+    movh        [r2],      m2
+    movhps      [r2 + r3], m2
+%elifidn %1,ps
+    psubw       m3,        m4
+    movu        [r2 + r3], m3
+%endif
+
+%if x < %2/4
+    lea         r2,        [r2 + 2 * r3]
+%endif
+%endrep
+    RET
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W8_H8_H16_H32_sse2 pp,  8
+    FILTER_V4_W8_H8_H16_H32_sse2 pp, 16
+    FILTER_V4_W8_H8_H16_H32_sse2 pp, 32
+
+    FILTER_V4_W8_H8_H16_H32_sse2 pp, 12
+    FILTER_V4_W8_H8_H16_H32_sse2 pp, 64
+
+    FILTER_V4_W8_H8_H16_H32_sse2 ps,  8
+    FILTER_V4_W8_H8_H16_H32_sse2 ps, 16
+    FILTER_V4_W8_H8_H16_H32_sse2 ps, 32
+
+    FILTER_V4_W8_H8_H16_H32_sse2 ps, 12
+    FILTER_V4_W8_H8_H16_H32_sse2 ps, 64
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_12x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W12_H2_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_12x%2, 4, 6, 11
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m6,        [pw_32]
+%elifidn %1,ps
+    mova        m6,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+%assign x 1
+%rep %2/2
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r0,        [r0 + 2 * r1]
+    movu        m5,        [r0]
+    movu        m7,        [r0 + r1]
+
+    punpcklbw   m10,       m5,        m7
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+
+    paddw       m4,        m10
+
+    punpckhbw   m10,       m5,        m7
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+
+    paddw       m2,        m10
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m2,        m6
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movh        [r2],      m4
+    psrldq      m4,        8
+    movd        [r2 + 8],  m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m2,        m6
+    movu        [r2],      m4
+    movh        [r2 + 16], m2
+%endif
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m4
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    movu        m5,        [r0 + 2 * r1]
+    punpcklbw   m2,        m7,        m5
+    punpckhbw   m7,        m5
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m0
+    pmaddwd     m8,        m0
+    packssdw    m2,        m8
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+
+    paddw       m4,        m2
+    paddw       m3,        m7
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m3,        m6
+    psraw       m3,        6
+
+    packuswb    m4,        m3
+    movh        [r2 + r3], m4
+    psrldq      m4,        8
+    movd        [r2 + r3 + 8], m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m3,        m6
+    movu        [r2 + r3], m4
+    movh        [r2 + r3 + 16], m3
+%endif
+
+%if x < %2/2
+    lea         r2,        [r2 + 2 * r3]
+%endif
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W12_H2_sse2 pp, 16
+    FILTER_V4_W12_H2_sse2 pp, 32
+    FILTER_V4_W12_H2_sse2 ps, 16
+    FILTER_V4_W12_H2_sse2 ps, 32
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_16x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16_H2_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_16x%2, 4, 6, 11
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m6,        [pw_32]
+%elifidn %1,ps
+    mova        m6,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+%assign x 1
+%rep %2/2
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r0,        [r0 + 2 * r1]
+    movu        m5,        [r0]
+    movu        m10,       [r0 + r1]
+
+    punpckhbw   m7,        m5,        m10
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+    paddw       m2,        m7
+
+    punpcklbw   m7,        m5,        m10
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+    paddw       m4,        m7
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m2,        m6
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2],      m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m2,        m6
+    movu        [r2],      m4
+    movu        [r2 + 16], m2
+%endif
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    movu        m5,        [r0 + 2 * r1]
+
+    punpcklbw   m2,        m10,       m5
+    punpckhbw   m10,       m5
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m0
+    pmaddwd     m8,        m0
+    packssdw    m2,        m8
+
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+
+    paddw       m4,        m2
+    paddw       m3,        m10
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m3,        m6
+    psraw       m3,        6
+
+    packuswb    m4,        m3
+    movu        [r2 + r3], m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m3,        m6
+    movu        [r2 + r3], m4
+    movu        [r2 + r3 + 16], m3
+%endif
+
+%if x < %2/2
+    lea         r2,        [r2 + 2 * r3]
+%endif
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W16_H2_sse2 pp, 4
+    FILTER_V4_W16_H2_sse2 pp, 8
+    FILTER_V4_W16_H2_sse2 pp, 12
+    FILTER_V4_W16_H2_sse2 pp, 16
+    FILTER_V4_W16_H2_sse2 pp, 32
+
+    FILTER_V4_W16_H2_sse2 pp, 24
+    FILTER_V4_W16_H2_sse2 pp, 64
+
+    FILTER_V4_W16_H2_sse2 ps, 4
+    FILTER_V4_W16_H2_sse2 ps, 8
+    FILTER_V4_W16_H2_sse2 ps, 12
+    FILTER_V4_W16_H2_sse2 ps, 16
+    FILTER_V4_W16_H2_sse2 ps, 32
+
+    FILTER_V4_W16_H2_sse2 ps, 24
+    FILTER_V4_W16_H2_sse2 ps, 64
+%endif
+
+;-----------------------------------------------------------------------------
+;void interp_4tap_vert_%1_24%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W24_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_24x%2, 4, 6, 11
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m6,        [pw_32]
+%elifidn %1,ps
+    mova        m6,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+%assign x 1
+%rep %2/2
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m5,        [r5]
+    movu        m10,       [r5 + r1]
+    punpcklbw   m7,        m5,        m10
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+    paddw       m4,        m7
+
+    punpckhbw   m7,        m5,        m10
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+
+    paddw       m2,        m7
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m2,        m6
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2],      m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m2,        m6
+    movu        [r2],      m4
+    movu        [r2 + 16], m2
+%endif
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    movu        m2,        [r5 + 2 * r1]
+
+    punpcklbw   m5,        m10,        m2
+    punpckhbw   m10,       m2
+
+    movhlps     m8,        m5
+    punpcklbw   m5,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m5,        m0
+    pmaddwd     m8,        m0
+    packssdw    m5,        m8
+
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+
+    paddw       m4,        m5
+    paddw       m3,        m10
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m3,        m6
+    psraw       m3,        6
+
+    packuswb    m4,        m3
+    movu        [r2 + r3], m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m3,        m6
+    movu        [r2 + r3], m4
+    movu        [r2 + r3 + 16], m3
+%endif
+
+    movq        m2,        [r0 + 16]
+    movq        m3,        [r0 + r1 + 16]
+    movq        m4,        [r5 + 16]
+    movq        m5,        [r5 + r1 + 16]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m4,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m0
+    pmaddwd     m8,        m0
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    paddw       m2,        m4
+
+%ifidn %1,pp
+    paddw       m2,        m6
+    psraw       m2,        6
+%elifidn %1,ps
+    psubw       m2,        m6
+    movu        [r2 + 32], m2
+%endif
+
+    movq        m3,        [r0 + r1 + 16]
+    movq        m4,        [r5 + 16]
+    movq        m5,        [r5 + r1 + 16]
+    movq        m7,        [r5 + 2 * r1 + 16]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m5,        m7
+
+    movhlps     m8,        m5
+    punpcklbw   m5,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m5,        m0
+    pmaddwd     m8,        m0
+    packssdw    m5,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    paddw       m3,        m5
+
+%ifidn %1,pp
+    paddw       m3,        m6
+    psraw       m3,        6
+
+    packuswb    m2,        m3
+    movh        [r2 + 16], m2
+    movhps      [r2 + r3 + 16], m2
+%elifidn %1,ps
+    psubw       m3,        m6
+    movu        [r2 + r3 + 32], m3
+%endif
+
+%if x < %2/2
+    mov         r0,        r5
+    lea         r2,        [r2 + 2 * r3]
+%endif
+%assign x x+1
+%endrep
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W24_sse2 pp, 32
+    FILTER_V4_W24_sse2 pp, 64
+    FILTER_V4_W24_sse2 ps, 32
+    FILTER_V4_W24_sse2 ps, 64
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_32x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W32_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_32x%2, 4, 6, 10
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m6,        [pw_32]
+%elifidn %1,ps
+    mova        m6,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+    mov         r4d,       %2
+
+.loop:
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m3,        [r5]
+    movu        m5,        [r5 + r1]
+
+    punpcklbw   m7,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m0
+    pmaddwd     m8,        m0
+    packssdw    m3,        m8
+
+    paddw       m4,        m7
+    paddw       m2,        m3
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m2,        m6
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2],      m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m2,        m6
+    movu        [r2],      m4
+    movu        [r2 + 16], m2
+%endif
+
+    movu        m2,        [r0 + 16]
+    movu        m3,        [r0 + r1 + 16]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    movu        m3,        [r5 + 16]
+    movu        m5,        [r5 + r1 + 16]
+
+    punpcklbw   m7,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m7
+    punpcklbw   m7,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m7,        m0
+    pmaddwd     m8,        m0
+    packssdw    m7,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m0
+    pmaddwd     m8,        m0
+    packssdw    m3,        m8
+
+    paddw       m4,        m7
+    paddw       m2,        m3
+
+%ifidn %1,pp
+    paddw       m4,        m6
+    psraw       m4,        6
+    paddw       m2,        m6
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2 + 16], m4
+%elifidn %1,ps
+    psubw       m4,        m6
+    psubw       m2,        m6
+    movu        [r2 + 32], m4
+    movu        [r2 + 48], m2
+%endif
+
+    lea         r0,        [r0 + r1]
+    lea         r2,        [r2 + r3]
+    dec         r4
+    jnz        .loop
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W32_sse2 pp, 8
+    FILTER_V4_W32_sse2 pp, 16
+    FILTER_V4_W32_sse2 pp, 24
+    FILTER_V4_W32_sse2 pp, 32
+
+    FILTER_V4_W32_sse2 pp, 48
+    FILTER_V4_W32_sse2 pp, 64
+
+    FILTER_V4_W32_sse2 ps, 8
+    FILTER_V4_W32_sse2 ps, 16
+    FILTER_V4_W32_sse2 ps, 24
+    FILTER_V4_W32_sse2 ps, 32
+
+    FILTER_V4_W32_sse2 ps, 48
+    FILTER_V4_W32_sse2 ps, 64
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_%1_%2x%3(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16n_H2_sse2 3
+INIT_XMM sse2
+cglobal interp_4tap_vert_%1_%2x%3, 4, 7, 11
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+
+%ifidn %1,pp
+    mova        m7,        [pw_32]
+%elifidn %1,ps
+    mova        m7,        [pw_2000]
+    add         r3d,       r3d
+%endif
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+    mov         r4d,       %3/2
+
+.loop:
+
+    mov         r6d,       %2/16
+
+.loopW:
+
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m5,        [r5]
+    movu        m6,        [r5 + r1]
+
+    punpckhbw   m10,        m5,        m6
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+    paddw       m2,        m10
+
+    punpcklbw   m10,        m5,        m6
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+    paddw       m4,        m10
+
+%ifidn %1,pp
+    paddw       m4,        m7
+    psraw       m4,        6
+    paddw       m2,        m7
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2],      m4
+%elifidn %1,ps
+    psubw       m4,        m7
+    psubw       m2,        m7
+    movu        [r2],      m4
+    movu        [r2 + 16], m2
+%endif
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    movu        m5,        [r5 + 2 * r1]
+
+    punpcklbw   m2,        m6,        m5
+    punpckhbw   m6,        m5
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m0
+    pmaddwd     m8,        m0
+    packssdw    m2,        m8
+
+    movhlps     m8,        m6
+    punpcklbw   m6,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m6,        m0
+    pmaddwd     m8,        m0
+    packssdw    m6,        m8
+
+    paddw       m4,        m2
+    paddw       m3,        m6
+
+%ifidn %1,pp
+    paddw       m4,        m7
+    psraw       m4,        6
+    paddw       m3,        m7
+    psraw       m3,        6
+
+    packuswb    m4,        m3
+    movu        [r2 + r3], m4
+    add         r2,        16
+%elifidn %1,ps
+    psubw       m4,        m7
+    psubw       m3,        m7
+    movu        [r2 + r3], m4
+    movu        [r2 + r3 + 16], m3
+    add         r2,        32
+%endif
+
+    add         r0,        16
+    dec         r6d
+    jnz         .loopW
+
+    lea         r0,        [r0 + r1 * 2 - %2]
+
+%ifidn %1,pp
+    lea         r2,        [r2 + r3 * 2 - %2]
+%elifidn %1,ps
+    lea         r2,        [r2 + r3 * 2 - (%2 * 2)]
+%endif
+
+    dec         r4d
+    jnz        .loop
+    RET
+
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W16n_H2_sse2 pp, 64, 64
+    FILTER_V4_W16n_H2_sse2 pp, 64, 32
+    FILTER_V4_W16n_H2_sse2 pp, 64, 48
+    FILTER_V4_W16n_H2_sse2 pp, 48, 64
+    FILTER_V4_W16n_H2_sse2 pp, 64, 16
+    FILTER_V4_W16n_H2_sse2 ps, 64, 64
+    FILTER_V4_W16n_H2_sse2 ps, 64, 32
+    FILTER_V4_W16n_H2_sse2 ps, 64, 48
+    FILTER_V4_W16n_H2_sse2 ps, 48, 64
+    FILTER_V4_W16n_H2_sse2 ps, 64, 16
+%endif
+
+%macro FILTER_P2S_2_4_sse2 1
+    movd        m2,     [r0 + %1]
+    movd        m3,     [r0 + r1 + %1]
+    punpcklwd   m2,     m3
+    movd        m3,     [r0 + r1 * 2 + %1]
+    movd        m4,     [r0 + r4 + %1]
+    punpcklwd   m3,     m4
+    punpckldq   m2,     m3
+    punpcklbw   m2,     m0
+    psllw       m2,     6
+    psubw       m2,     m1
+
+    movd        [r2 + r3 * 0 + %1 * 2], m2
+    psrldq      m2,     4
+    movd        [r2 + r3 * 1 + %1 * 2], m2
+    psrldq      m2,     4
+    movd        [r2 + r3 * 2 + %1 * 2], m2
+    psrldq      m2,     4
+    movd        [r2 + r5 + %1 * 2], m2
+%endmacro
+
+%macro FILTER_P2S_4_4_sse2 1
+    movd        m2,     [r0 + %1]
+    movd        m3,     [r0 + r1 + %1]
+    movd        m4,     [r0 + r1 * 2 + %1]
+    movd        m5,     [r0 + r4 + %1]
+    punpckldq   m2,     m3
+    punpcklbw   m2,     m0
+    punpckldq   m4,     m5
+    punpcklbw   m4,     m0
+    psllw       m2,     6
+    psllw       m4,     6
+    psubw       m2,     m1
+    psubw       m4,     m1
+    movh        [r2 + r3 * 0 + %1 * 2], m2
+    movh        [r2 + r3 * 2 + %1 * 2], m4
+    movhps      [r2 + r3 * 1 + %1 * 2], m2
+    movhps      [r2 + r5 + %1 * 2], m4
+%endmacro
+
+%macro FILTER_P2S_4_2_sse2 0
+    movd        m2,     [r0]
+    movd        m3,     [r0 + r1]
+    punpckldq   m2,     m3
+    punpcklbw   m2,     m0
+    psllw       m2,     6
+    psubw       m2,     [pw_8192]
+    movh        [r2],   m2
+    movhps      [r2 + r3 * 2], m2
+%endmacro
+
+%macro FILTER_P2S_8_4_sse2 1
+    movh        m2,     [r0 + %1]
+    movh        m3,     [r0 + r1 + %1]
+    movh        m4,     [r0 + r1 * 2 + %1]
+    movh        m5,     [r0 + r4 + %1]
+    punpcklbw   m2,     m0
+    punpcklbw   m3,     m0
+    punpcklbw   m5,     m0
+    punpcklbw   m4,     m0
+    psllw       m2,     6
+    psllw       m3,     6
+    psllw       m5,     6
+    psllw       m4,     6
+    psubw       m2,     m1
+    psubw       m3,     m1
+    psubw       m4,     m1
+    psubw       m5,     m1
+    movu        [r2 + r3 * 0 + %1 * 2], m2
+    movu        [r2 + r3 * 1 + %1 * 2], m3
+    movu        [r2 + r3 * 2 + %1 * 2], m4
+    movu        [r2 + r5 + %1 * 2], m5
+%endmacro
+
+%macro FILTER_P2S_8_2_sse2 1
+    movh        m2,     [r0 + %1]
+    movh        m3,     [r0 + r1 + %1]
+    punpcklbw   m2,     m0
+    punpcklbw   m3,     m0
+    psllw       m2,     6
+    psllw       m3,     6
+    psubw       m2,     m1
+    psubw       m3,     m1
+    movu        [r2 + r3 * 0 + %1 * 2], m2
+    movu        [r2 + r3 * 1 + %1 * 2], m3
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro FILTER_PIX_TO_SHORT_sse2 2
+INIT_XMM sse2
+cglobal filterPixelToShort_%1x%2, 4, 6, 6
+    pxor        m0,     m0
+%if %2 == 2
+%if %1 == 4
+    FILTER_P2S_4_2_sse2
+%elif %1 == 8
+    add        r3d, r3d
+    mova       m1, [pw_8192]
+    FILTER_P2S_8_2_sse2 0
+%endif
+%else
+    add        r3d, r3d
+    mova       m1, [pw_8192]
+    lea        r4, [r1 * 3]
+    lea        r5, [r3 * 3]
+%assign y 1
+%rep %2/4
+%assign x 0
+%rep %1/8
+    FILTER_P2S_8_4_sse2 x
+%if %2 == 6
+    lea         r0,     [r0 + 4 * r1]
+    lea         r2,     [r2 + 4 * r3]
+    FILTER_P2S_8_2_sse2 x
+%endif
+%assign x x+8
+%endrep
+%rep (%1 % 8)/4
+    FILTER_P2S_4_4_sse2 x
+%assign x x+4
+%endrep
+%rep (%1 % 4)/2
+    FILTER_P2S_2_4_sse2 x
+%endrep
+%if y < %2/4
+    lea         r0,     [r0 + 4 * r1]
+    lea         r2,     [r2 + 4 * r3]
+%assign y y+1
+%endif
+%endrep
+%endif
+RET
+%endmacro
+
+    FILTER_PIX_TO_SHORT_sse2 2, 4
+    FILTER_PIX_TO_SHORT_sse2 2, 8
+    FILTER_PIX_TO_SHORT_sse2 2, 16
+    FILTER_PIX_TO_SHORT_sse2 4, 2
+    FILTER_PIX_TO_SHORT_sse2 4, 4
+    FILTER_PIX_TO_SHORT_sse2 4, 8
+    FILTER_PIX_TO_SHORT_sse2 4, 16
+    FILTER_PIX_TO_SHORT_sse2 4, 32
+    FILTER_PIX_TO_SHORT_sse2 6, 8
+    FILTER_PIX_TO_SHORT_sse2 6, 16
+    FILTER_PIX_TO_SHORT_sse2 8, 2
+    FILTER_PIX_TO_SHORT_sse2 8, 4
+    FILTER_PIX_TO_SHORT_sse2 8, 6
+    FILTER_PIX_TO_SHORT_sse2 8, 8
+    FILTER_PIX_TO_SHORT_sse2 8, 12
+    FILTER_PIX_TO_SHORT_sse2 8, 16
+    FILTER_PIX_TO_SHORT_sse2 8, 32
+    FILTER_PIX_TO_SHORT_sse2 8, 64
+    FILTER_PIX_TO_SHORT_sse2 12, 16
+    FILTER_PIX_TO_SHORT_sse2 12, 32
+    FILTER_PIX_TO_SHORT_sse2 16, 4
+    FILTER_PIX_TO_SHORT_sse2 16, 8
+    FILTER_PIX_TO_SHORT_sse2 16, 12
+    FILTER_PIX_TO_SHORT_sse2 16, 16
+    FILTER_PIX_TO_SHORT_sse2 16, 24
+    FILTER_PIX_TO_SHORT_sse2 16, 32
+    FILTER_PIX_TO_SHORT_sse2 16, 64
+    FILTER_PIX_TO_SHORT_sse2 24, 32
+    FILTER_PIX_TO_SHORT_sse2 24, 64
+    FILTER_PIX_TO_SHORT_sse2 32, 8
+    FILTER_PIX_TO_SHORT_sse2 32, 16
+    FILTER_PIX_TO_SHORT_sse2 32, 24
+    FILTER_PIX_TO_SHORT_sse2 32, 32
+    FILTER_PIX_TO_SHORT_sse2 32, 48
+    FILTER_PIX_TO_SHORT_sse2 32, 64
+    FILTER_PIX_TO_SHORT_sse2 48, 64
+    FILTER_PIX_TO_SHORT_sse2 64, 16
+    FILTER_PIX_TO_SHORT_sse2 64, 32
+    FILTER_PIX_TO_SHORT_sse2 64, 48
+    FILTER_PIX_TO_SHORT_sse2 64, 64
+
+%macro FILTER_H4_w2_2 3
+    movh        %2, [srcq - 1]
+    pshufb      %2, %2, Tm0
+    movh        %1, [srcq + srcstrideq - 1]
+    pshufb      %1, %1, Tm0
+    punpcklqdq  %2, %1
+    pmaddubsw   %2, coef2
+    phaddw      %2, %2
+    pmulhrsw    %2, %3
+    packuswb    %2, %2
+    movd        r4, %2
+    mov         [dstq], r4w
+    shr         r4, 16
+    mov         [dstq + dststrideq], r4w
+%endmacro
+
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_2x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_2x4, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+%rep 2
+    FILTER_H4_w2_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+%endrep
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_2x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_2x8, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+%rep 4
+    FILTER_H4_w2_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+%endrep
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_2x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_2x16, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+    mov         r5d,        16/2
+
+.loop:
+    FILTER_H4_w2_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+    dec         r5d
+    jnz         .loop
+
+    RET
+
+%macro FILTER_H4_w4_2 3
+    movh        %2, [srcq - 1]
+    pshufb      %2, %2, Tm0
+    pmaddubsw   %2, coef2
+    movh        %1, [srcq + srcstrideq - 1]
+    pshufb      %1, %1, Tm0
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    pmulhrsw    %2, %3
+    packuswb    %2, %2
+    movd        [dstq], %2
+    palignr     %2, %2, 4
+    movd        [dstq + dststrideq], %2
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_4x2, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+    FILTER_H4_w4_2   t0, t1, t2
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_4x4, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+%rep 2
+    FILTER_H4_w4_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+%endrep
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_4x8, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+%rep 4
+    FILTER_H4_w4_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+%endrep
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_4x16, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+%rep 8
+    FILTER_H4_w4_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+%endrep
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4x32(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_4x32, 4, 6, 5, src, srcstride, dst, dststride
+%define coef2       m4
+%define Tm0         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+
+    mov         r5d,        32/2
+
+.loop:
+    FILTER_H4_w4_2   t0, t1, t2
+    lea         srcq,       [srcq + srcstrideq * 2]
+    lea         dstq,       [dstq + dststrideq * 2]
+    dec         r5d
+    jnz         .loop
+
+    RET
+
+ALIGN 32
+const interp_4tap_8x8_horiz_shuf,   dd 0, 4, 1, 5, 2, 6, 3, 7
+
+
+%macro FILTER_H4_w6 3
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    pmulhrsw    %2, %3
+    packuswb    %2, %2
+    movd        [dstq],      %2
+    pextrw      [dstq + 4], %2, 2
+%endmacro
+
+%macro FILTER_H4_w8 3
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    pmulhrsw    %2, %3
+    packuswb    %2, %2
+    movh        [dstq],      %2
+%endmacro
+
+%macro FILTER_H4_w12 3
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    pmulhrsw    %2, %3
+    movu        %1, [srcq - 1 + 8]
+    pshufb      %1, %1, Tm0
+    pmaddubsw   %1, coef2
+    phaddw      %1, %1
+    pmulhrsw    %1, %3
+    packuswb    %2, %1
+    movh        [dstq],      %2
+    pextrd      [dstq + 8], %2, 2
+%endmacro
+
+%macro FILTER_H4_w16 4
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq - 1 + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    pmulhrsw    %2, %3
+    pmulhrsw    %4, %3
+    packuswb    %2, %4
+    movu        [dstq],      %2
+%endmacro
+
+%macro FILTER_H4_w24 4
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq - 1 + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    pmulhrsw    %2, %3
+    pmulhrsw    %4, %3
+    packuswb    %2, %4
+    movu        [dstq],          %2
+    movu        %1, [srcq - 1 + 16]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    pmulhrsw    %2, %3
+    packuswb    %2, %2
+    movh        [dstq + 16],     %2
+%endmacro
+
+%macro FILTER_H4_w32 4
+    movu        %1, [srcq - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq - 1 + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    pmulhrsw    %2, %3
+    pmulhrsw    %4, %3
+    packuswb    %2, %4
+    movu        [dstq],      %2
+    movu        %1, [srcq - 1 + 16]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq - 1 + 24]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    pmulhrsw    %2, %3
+    pmulhrsw    %4, %3
+    packuswb    %2, %4
+    movu        [dstq + 16],      %2
+%endmacro
+
+%macro FILTER_H4_w16o 5
+    movu        %1, [srcq + %5 - 1]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + %5 - 1 + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    pmulhrsw    %2, %3
+    pmulhrsw    %4, %3
+    packuswb    %2, %4
+    movu        [dstq + %5],      %2
+%endmacro
+
+%macro FILTER_H4_w48 4
+    FILTER_H4_w16o %1, %2, %3, %4, 0
+    FILTER_H4_w16o %1, %2, %3, %4, 16
+    FILTER_H4_w16o %1, %2, %3, %4, 32
+%endmacro
+
+%macro FILTER_H4_w64 4
+    FILTER_H4_w16o %1, %2, %3, %4, 0
+    FILTER_H4_w16o %1, %2, %3, %4, 16
+    FILTER_H4_w16o %1, %2, %3, %4, 32
+    FILTER_H4_w16o %1, %2, %3, %4, 48
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro IPFILTER_CHROMA 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_%1x%2, 4, 6, 6, src, srcstride, dst, dststride
+%define coef2       m5
+%define Tm0         m4
+%define Tm1         m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,        r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mov           r5d,       %2
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+    mova        Tm1,         [tab_Tm + 16]
+
+.loop:
+    FILTER_H4_w%1   t0, t1, t2
+    add         srcq,        srcstrideq
+    add         dstq,        dststrideq
+
+    dec         r5d
+    jnz        .loop
+
+    RET
+%endmacro
+
+
+    IPFILTER_CHROMA 6,   8
+    IPFILTER_CHROMA 8,   2
+    IPFILTER_CHROMA 8,   4
+    IPFILTER_CHROMA 8,   6
+    IPFILTER_CHROMA 8,   8
+    IPFILTER_CHROMA 8,  16
+    IPFILTER_CHROMA 8,  32
+    IPFILTER_CHROMA 12, 16
+
+    IPFILTER_CHROMA 6,  16
+    IPFILTER_CHROMA 8,  12
+    IPFILTER_CHROMA 8,  64
+    IPFILTER_CHROMA 12, 32
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_W 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_pp_%1x%2, 4, 6, 7, src, srcstride, dst, dststride
+%define coef2       m6
+%define Tm0         m5
+%define Tm1         m4
+%define t3          m3
+%define t2          m2
+%define t1          m1
+%define t0          m0
+
+    mov         r4d,         r4m
+
+%ifdef PIC
+    lea         r5,          [tab_ChromaCoeff]
+    movd        coef2,       [r5 + r4 * 4]
+%else
+    movd        coef2,       [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mov         r5d,          %2
+
+    pshufd      coef2,       coef2,      0
+    mova        t2,          [pw_512]
+    mova        Tm0,         [tab_Tm]
+    mova        Tm1,         [tab_Tm + 16]
+
+.loop:
+    FILTER_H4_w%1   t0, t1, t2, t3
+    add         srcq,        srcstrideq
+    add         dstq,        dststrideq
+
+    dec         r5d
+    jnz        .loop
+
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_W 16,  4
+    IPFILTER_CHROMA_W 16,  8
+    IPFILTER_CHROMA_W 16, 12
+    IPFILTER_CHROMA_W 16, 16
+    IPFILTER_CHROMA_W 16, 32
+    IPFILTER_CHROMA_W 32,  8
+    IPFILTER_CHROMA_W 32, 16
+    IPFILTER_CHROMA_W 32, 24
+    IPFILTER_CHROMA_W 24, 32
+    IPFILTER_CHROMA_W 32, 32
+
+    IPFILTER_CHROMA_W 16, 24
+    IPFILTER_CHROMA_W 16, 64
+    IPFILTER_CHROMA_W 32, 48
+    IPFILTER_CHROMA_W 24, 64
+    IPFILTER_CHROMA_W 32, 64
+
+    IPFILTER_CHROMA_W 64, 64
+    IPFILTER_CHROMA_W 64, 32
+    IPFILTER_CHROMA_W 64, 48
+    IPFILTER_CHROMA_W 48, 64
+    IPFILTER_CHROMA_W 64, 16
+
+
+%macro FILTER_H8_W8 7-8   ; t0, t1, t2, t3, coef, c512, src, dst
+    movu        %1, %7
+    pshufb      %2, %1, [tab_Lm +  0]
+    pmaddubsw   %2, %5
+    pshufb      %3, %1, [tab_Lm + 16]
+    pmaddubsw   %3, %5
+    phaddw      %2, %3
+    pshufb      %4, %1, [tab_Lm + 32]
+    pmaddubsw   %4, %5
+    pshufb      %1, %1, [tab_Lm + 48]
+    pmaddubsw   %1, %5
+    phaddw      %4, %1
+    phaddw      %2, %4
+  %if %0 == 8
+    pmulhrsw    %2, %6
+    packuswb    %2, %2
+    movh        %8, %2
+  %endif
+%endmacro
+
+%macro FILTER_H8_W4 2
+    movu        %1, [r0 - 3 + r5]
+    pshufb      %2, %1, [tab_Lm]
+    pmaddubsw   %2, m3
+    pshufb      m7, %1, [tab_Lm + 16]
+    pmaddubsw   m7, m3
+    phaddw      %2, m7
+    phaddw      %2, %2
+%endmacro
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_LUMA 3
+INIT_XMM sse4
+cglobal interp_8tap_horiz_%3_%1x%2, 4,7,8
+
+    mov       r4d, r4m
+
+%ifdef PIC
+    lea       r6, [tab_LumaCoeff]
+    movh      m3, [r6 + r4 * 8]
+%else
+    movh      m3, [tab_LumaCoeff + r4 * 8]
+%endif
+    punpcklqdq  m3, m3
+
+%ifidn %3, pp
+    mova      m2, [pw_512]
+%else
+    mova      m2, [pw_2000]
+%endif
+
+    mov       r4d, %2
+%ifidn %3, ps
+    add       r3, r3
+    cmp       r5m, byte 0
+    je        .loopH
+    lea       r6, [r1 + 2 * r1]
+    sub       r0, r6
+    add       r4d, 7
+%endif
+
+.loopH:
+    xor       r5, r5
+%rep %1 / 8
+  %ifidn %3, pp
+    FILTER_H8_W8  m0, m1, m4, m5, m3, m2, [r0 - 3 + r5], [r2 + r5]
+  %else
+    FILTER_H8_W8  m0, m1, m4, m5, m3, UNUSED, [r0 - 3 + r5]
+    psubw     m1, m2
+    movu      [r2 + 2 * r5], m1
+  %endif
+    add       r5, 8
+%endrep
+
+%rep (%1 % 8) / 4
+    FILTER_H8_W4  m0, m1
+  %ifidn %3, pp
+    pmulhrsw  m1, m2
+    packuswb  m1, m1
+    movd      [r2 + r5], m1
+  %else
+    psubw     m1, m2
+    movh      [r2 + 2 * r5], m1
+  %endif
+%endrep
+
+    add       r0, r1
+    add       r2, r3
+
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_4x4, 4,6,6
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_LumaCoeff + r4 * 8]
+%endif
+
+    mova            m1, [tab_Lm]
+    vpbroadcastd    m2, [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    sub             r0, 3
+    ; Row 0-1
+    vbroadcasti128  m3, [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m3, m1
+    pmaddubsw       m3, m0
+    pmaddwd         m3, m2
+    vbroadcasti128  m4, [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    phaddd          m3, m4                          ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    ; Row 2-3
+    lea             r0, [r0 + r1 * 2]
+    vbroadcasti128  m4, [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    vbroadcasti128  m5, [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m1
+    pmaddubsw       m5, m0
+    pmaddwd         m5, m2
+    phaddd          m4, m5                          ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+
+    packssdw        m3, m4                          ; WORD [R3D R3C R2D R2C R1D R1C R0D R0C R3B R3A R2B R2A R1B R1A R0B R0A]
+    pmulhrsw        m3, [pw_512]
+    vextracti128    xm4, m3, 1
+    packuswb        xm3, xm4                        ; BYTE [R3D R3C R2D R2C R1D R1C R0D R0C R3B R3A R2B R2A R1B R1A R0B R0A]
+    pshufb          xm3, [interp4_shuf]             ; [row3 row1 row2 row0]
+
+    lea             r0, [r3 * 3]
+    movd            [r2], xm3
+    pextrd          [r2+r3], xm3, 2
+    pextrd          [r2+r3*2], xm3, 1
+    pextrd          [r2+r0], xm3, 3
+    RET
+
+%macro FILTER_HORIZ_LUMA_AVX2_4xN 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_pp_4x%1, 4, 6, 9
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_LumaCoeff + r4 * 8]
+%endif
+
+    mova            m1, [tab_Lm]
+    mova            m2, [pw_1]
+    mova            m7, [interp8_hps_shuf]
+    mova            m8, [pw_512]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    lea             r4, [r1 * 3]
+    lea             r5, [r3 * 3]
+    sub             r0, 3
+%rep %1 / 8
+    ; Row 0-1
+    vbroadcasti128  m3, [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m3, m1
+    pmaddubsw       m3, m0
+    pmaddwd         m3, m2
+    vbroadcasti128  m4, [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    phaddd          m3, m4                          ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    ; Row 2-3
+    vbroadcasti128  m4, [r0 + r1 * 2]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    vbroadcasti128  m5, [r0 + r4]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m1
+    pmaddubsw       m5, m0
+    pmaddwd         m5, m2
+    phaddd          m4, m5                          ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+
+    packssdw        m3, m4                          ; WORD [R3D R3C R2D R2C R1D R1C R0D R0C R3B R3A R2B R2A R1B R1A R0B R0A]
+    lea             r0, [r0 + r1 * 4]
+    ; Row 4-5
+    vbroadcasti128  m5, [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m1
+    pmaddubsw       m5, m0
+    pmaddwd         m5, m2
+    vbroadcasti128  m4, [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    phaddd          m5, m4                          ; DWORD [R5D R5C R4D R4C R5B R5A R4B R4A]
+
+    ; Row 6-7
+    vbroadcasti128  m4, [r0 + r1 * 2]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddwd         m4, m2
+    vbroadcasti128  m6, [r0 + r4]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m6, m1
+    pmaddubsw       m6, m0
+    pmaddwd         m6, m2
+    phaddd          m4, m6                          ; DWORD [R7D R7C R6D R6C R7B R7A R6B R6A]
+
+    packssdw        m5, m4                          ; WORD [R7D R7C R6D R6C R5D R5C R4D R4C R7B R7A R6B R6A R5B R5A R4B R4A]
+    vpermd          m3, m7, m3
+    vpermd          m5, m7, m5
+    pmulhrsw        m3, m8
+    pmulhrsw        m5, m8
+    packuswb        m3, m5
+    vextracti128    xm5, m3, 1
+
+    movd            [r2], xm3
+    pextrd          [r2 + r3], xm3, 1
+    movd            [r2 + r3 * 2], xm5
+    pextrd          [r2 + r5], xm5, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm3, 2
+    pextrd          [r2 + r3], xm3, 3
+    pextrd          [r2 + r3 * 2], xm5, 2
+    pextrd          [r2 + r5], xm5, 3
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_HORIZ_LUMA_AVX2_4xN 8
+    FILTER_HORIZ_LUMA_AVX2_4xN 16
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_8x4, 4, 6, 7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_LumaCoeff + r4 * 8]
+%endif
+
+    mova            m1, [tab_Lm]
+    mova            m2, [tab_Lm + 32]
+
+    ; register map
+    ; m0     - interpolate coeff
+    ; m1, m2 - shuffle order table
+
+    sub             r0, 3
+    lea             r5, [r1 * 3]
+    lea             r4, [r3 * 3]
+
+    ; Row 0
+    vbroadcasti128  m3, [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m3, m2
+    pshufb          m3, m1
+    pmaddubsw       m3, m0
+    pmaddubsw       m4, m0
+    phaddw          m3, m4
+    ; Row 1
+    vbroadcasti128  m4, [r0 + r1]                   ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m4, m2
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddubsw       m5, m0
+    phaddw          m4, m5
+
+    phaddw          m3, m4                          ; WORD [R1H R1G R1D R1C R0H R0G R0D R0C R1F R1E R1B R1A R0F R0E R0B R0A]
+    pmulhrsw        m3, [pw_512]
+
+    ; Row 2
+    vbroadcasti128  m4, [r0 + r1 * 2]               ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m4, m2
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddubsw       m5, m0
+    phaddw          m4, m5
+    ; Row 3
+    vbroadcasti128  m5, [r0 + r5]                   ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m6, m5, m2
+    pshufb          m5, m1
+    pmaddubsw       m5, m0
+    pmaddubsw       m6, m0
+    phaddw          m5, m6
+
+    phaddw          m4, m5                          ; WORD [R3H R3G R3D R3C R2H R2G R2D R2C R3F R3E R3B R3A R2F R2E R2B R2A]
+    pmulhrsw        m4, [pw_512]
+
+    packuswb        m3, m4
+    vextracti128    xm4, m3, 1
+    punpcklwd       xm5, xm3, xm4
+
+    movq            [r2], xm5
+    movhps          [r2 + r3], xm5
+
+    punpckhwd       xm5, xm3, xm4
+    movq            [r2 + r3 * 2], xm5
+    movhps          [r2 + r4], xm5
+    RET
+
+%macro IPFILTER_LUMA_AVX2_8xN 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_%1x%2, 4, 7, 7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastq    m0, [r5 + r4 * 8]
+%else
+    vpbroadcastq    m0, [tab_LumaCoeff + r4 * 8]
+%endif
+
+    mova            m1, [tab_Lm]
+    mova            m2, [tab_Lm + 32]
+
+    ; register map
+    ; m0     - interpolate coeff
+    ; m1, m2 - shuffle order table
+
+    sub             r0, 3
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+    mov             r4d, %2 / 4
+.loop:
+    ; Row 0
+    vbroadcasti128  m3, [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m4, m3, m2
+    pshufb          m3, m1
+    pmaddubsw       m3, m0
+    pmaddubsw       m4, m0
+    phaddw          m3, m4
+    ; Row 1
+    vbroadcasti128  m4, [r0 + r1]                   ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m4, m2
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddubsw       m5, m0
+    phaddw          m4, m5
+
+    phaddw          m3, m4                          ; WORD [R1H R1G R1D R1C R0H R0G R0D R0C R1F R1E R1B R1A R0F R0E R0B R0A]
+    pmulhrsw        m3, [pw_512]
+
+    ; Row 2
+    vbroadcasti128  m4, [r0 + r1 * 2]               ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m5, m4, m2
+    pshufb          m4, m1
+    pmaddubsw       m4, m0
+    pmaddubsw       m5, m0
+    phaddw          m4, m5
+    ; Row 3
+    vbroadcasti128  m5, [r0 + r5]                   ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb          m6, m5, m2
+    pshufb          m5, m1
+    pmaddubsw       m5, m0
+    pmaddubsw       m6, m0
+    phaddw          m5, m6
+
+    phaddw          m4, m5                          ; WORD [R3H R3G R3D R3C R2H R2G R2D R2C R3F R3E R3B R3A R2F R2E R2B R2A]
+    pmulhrsw        m4, [pw_512]
+
+    packuswb        m3, m4
+    vextracti128    xm4, m3, 1
+    punpcklwd       xm5, xm3, xm4
+
+    movq            [r2], xm5
+    movhps          [r2 + r3], xm5
+
+    punpckhwd       xm5, xm3, xm4
+    movq            [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm5
+
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    dec             r4d
+    jnz             .loop
+    RET
+%endmacro
+
+    IPFILTER_LUMA_AVX2_8xN 8, 8
+    IPFILTER_LUMA_AVX2_8xN 8, 16
+    IPFILTER_LUMA_AVX2_8xN 8, 32
+
+%macro IPFILTER_LUMA_AVX2 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,8
+    sub               r0,        3
+    mov               r4d,       r4m
+%ifdef PIC
+    lea               r5,        [tab_LumaCoeff]
+    vpbroadcastd      m0,        [r5 + r4 * 8]
+    vpbroadcastd      m1,        [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,         [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,         [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,         [tab_Tm + 16]
+    vpbroadcastd      m7,         [pw_1]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,        %2/2
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,         [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [tab_Tm]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+    vbroadcasti128    m5,         [r0 + 8]                    ; second 8 elements in Row0
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m4,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,         [pw_512]
+    vbroadcasti128    m2,         [r0 + r1]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,         m2,     m3
+    pshufb            m2,         [tab_Tm]
+    pmaddubsw         m2,         m0
+    pmaddubsw         m5,         m1
+    paddw             m2,         m5
+    pmaddwd           m2,         m7
+    vbroadcasti128    m5,         [r0 + r1 + 8]                    ; second 8 elements in Row0
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m2,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m2,         [pw_512]
+    packuswb          m4,         m2
+    vpermq            m4,         m4,     11011000b
+    vextracti128      xm5,        m4,     1
+    pshufd            xm4,        xm4,    11011000b
+    pshufd            xm5,        xm5,    11011000b
+    movu              [r2],       xm4
+    movu              [r2+r3],    xm5
+    lea               r0,         [r0 + r1 * 2]
+    lea               r2,         [r2 + r3 * 2]
+    dec               r4d
+    jnz              .loop
+    RET
+%endmacro
+
+%macro IPFILTER_LUMA_32x_avx2 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,8
+    sub               r0,         3
+    mov               r4d,        r4m
+%ifdef PIC
+    lea               r5,         [tab_LumaCoeff]
+    vpbroadcastd      m0,         [r5 + r4 * 8]
+    vpbroadcastd      m1,         [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,         [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,         [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,         [tab_Tm + 16]
+    vpbroadcastd      m7,         [pw_1]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,        %2
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,         [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [tab_Tm]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+    vbroadcasti128    m5,         [r0 + 8]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m4,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,         [pw_512]
+    vbroadcasti128    m2,         [r0 + 16]
+    pshufb            m5,         m2,     m3
+    pshufb            m2,         [tab_Tm]
+    pmaddubsw         m2,         m0
+    pmaddubsw         m5,         m1
+    paddw             m2,         m5
+    pmaddwd           m2,         m7
+    vbroadcasti128    m5,         [r0 + 24]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m2,         m5
+    pmulhrsw          m2,         [pw_512]
+    packuswb          m4,         m2
+    vpermq            m4,         m4,     11011000b
+    vextracti128      xm5,        m4,     1
+    pshufd            xm4,        xm4,    11011000b
+    pshufd            xm5,        xm5,    11011000b
+    movu              [r2],       xm4
+    movu              [r2 + 16],  xm5
+    lea               r0,         [r0 + r1]
+    lea               r2,         [r2 + r3]
+    dec               r4d
+    jnz               .loop
+    RET
+%endmacro
+
+%macro IPFILTER_LUMA_64x_avx2 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,8
+    sub               r0,    3
+    mov               r4d,   r4m
+%ifdef PIC
+    lea               r5,        [tab_LumaCoeff]
+    vpbroadcastd      m0,        [r5 + r4 * 8]
+    vpbroadcastd      m1,        [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,        [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,        [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,        [tab_Tm + 16]
+    vpbroadcastd      m7,        [pw_1]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,   %2
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,        [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,        m4,    m3
+    pshufb            m4,        [tab_Tm]
+    pmaddubsw         m4,        m0
+    pmaddubsw         m5,        m1
+    paddw             m4,        m5
+    pmaddwd           m4,        m7
+    vbroadcasti128    m5,        [r0 + 8]
+    pshufb            m6,        m5,    m3
+    pshufb            m5,        [tab_Tm]
+    pmaddubsw         m5,        m0
+    pmaddubsw         m6,        m1
+    paddw             m5,        m6
+    pmaddwd           m5,        m7
+    packssdw          m4,        m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,        [pw_512]
+    vbroadcasti128    m2,        [r0 + 16]
+    pshufb            m5,        m2,    m3
+    pshufb            m2,        [tab_Tm]
+    pmaddubsw         m2,        m0
+    pmaddubsw         m5,        m1
+    paddw             m2,        m5
+    pmaddwd           m2,        m7
+    vbroadcasti128    m5,        [r0 + 24]
+    pshufb            m6,        m5,    m3
+    pshufb            m5,        [tab_Tm]
+    pmaddubsw         m5,        m0
+    pmaddubsw         m6,        m1
+    paddw             m5,        m6
+    pmaddwd           m5,        m7
+    packssdw          m2,        m5
+    pmulhrsw          m2,        [pw_512]
+    packuswb          m4,        m2
+    vpermq            m4,        m4,    11011000b
+    vextracti128      xm5,       m4,    1
+    pshufd            xm4,       xm4,   11011000b
+    pshufd            xm5,       xm5,   11011000b
+    movu              [r2],      xm4
+    movu              [r2 + 16], xm5
+
+    vbroadcasti128    m4,        [r0 + 32]
+    pshufb            m5,        m4,    m3
+    pshufb            m4,        [tab_Tm]
+    pmaddubsw         m4,        m0
+    pmaddubsw         m5,        m1
+    paddw             m4,        m5
+    pmaddwd           m4,        m7
+    vbroadcasti128    m5,        [r0 + 40]
+    pshufb            m6,        m5,    m3
+    pshufb            m5,        [tab_Tm]
+    pmaddubsw         m5,        m0
+    pmaddubsw         m6,        m1
+    paddw             m5,        m6
+    pmaddwd           m5,        m7
+    packssdw          m4,        m5
+    pmulhrsw          m4,        [pw_512]
+    vbroadcasti128    m2,        [r0 + 48]
+    pshufb            m5,        m2,    m3
+    pshufb            m2,        [tab_Tm]
+    pmaddubsw         m2,        m0
+    pmaddubsw         m5,        m1
+    paddw             m2,        m5
+    pmaddwd           m2,        m7
+    vbroadcasti128    m5,        [r0 + 56]
+    pshufb            m6,        m5,    m3
+    pshufb            m5,        [tab_Tm]
+    pmaddubsw         m5,        m0
+    pmaddubsw         m6,        m1
+    paddw             m5,        m6
+    pmaddwd           m5,        m7
+    packssdw          m2,        m5
+    pmulhrsw          m2,        [pw_512]
+    packuswb          m4,        m2
+    vpermq            m4,        m4,    11011000b
+    vextracti128      xm5,       m4,    1
+    pshufd            xm4,       xm4,   11011000b
+    pshufd            xm5,       xm5,   11011000b
+    movu              [r2 +32],  xm4
+    movu              [r2 + 48], xm5
+
+    lea               r0,        [r0 + r1]
+    lea               r2,        [r2 + r3]
+    dec               r4d
+    jnz               .loop
+    RET
+%endmacro
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_48x64, 4,6,8
+    sub               r0,         3
+    mov               r4d,        r4m
+%ifdef PIC
+    lea               r5,         [tab_LumaCoeff]
+    vpbroadcastd      m0,         [r5 + r4 * 8]
+    vpbroadcastd      m1,         [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,         [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,         [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,         [tab_Tm + 16]
+    vpbroadcastd      m7,         [pw_1]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,        64
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,         [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [tab_Tm]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+    vbroadcasti128    m5,         [r0 + 8]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m4,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,         [pw_512]
+
+    vbroadcasti128    m2,         [r0 + 16]
+    pshufb            m5,         m2,     m3
+    pshufb            m2,         [tab_Tm]
+    pmaddubsw         m2,         m0
+    pmaddubsw         m5,         m1
+    paddw             m2,         m5
+    pmaddwd           m2,         m7
+    vbroadcasti128    m5,         [r0 + 24]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m2,         m5
+    pmulhrsw          m2,         [pw_512]
+    packuswb          m4,         m2
+    vpermq            m4,         m4,     11011000b
+    vextracti128      xm5,        m4,     1
+    pshufd            xm4,        xm4,    11011000b
+    pshufd            xm5,        xm5,    11011000b
+    movu              [r2],       xm4
+    movu              [r2 + 16],  xm5
+
+    vbroadcasti128    m4,         [r0 + 32]
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [tab_Tm]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+    vbroadcasti128    m5,         [r0 + 40]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [tab_Tm]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m4,         m5
+    pmulhrsw          m4,         [pw_512]
+    packuswb          m4,         m4
+    vpermq            m4,         m4,     11011000b
+    pshufd            xm4,        xm4,    11011000b
+    movu              [r2 + 32],  xm4
+
+    lea               r0,         [r0 + r1]
+    lea               r2,         [r2 + r3]
+    dec               r4d
+    jnz               .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_4x4, 4,6,6
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vpbroadcastd      m2,           [pw_1]
+    vbroadcasti128    m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec                r0
+
+    ; Row 0-1
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 2-3
+    lea               r0,           [r0 + r1 * 2]
+    vbroadcasti128    m4,           [r0]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128       m4,           m4,      [r0 + r1],     1
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    pmulhrsw          m3,           [pw_512]
+    vextracti128      xm4,          m3,     1
+    packuswb          xm3,          xm4
+
+    lea               r0,           [r3 * 3]
+    movd              [r2],         xm3
+    pextrd            [r2+r3],      xm3,     2
+    pextrd            [r2+r3*2],    xm3,     1
+    pextrd            [r2+r0],      xm3,     3
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_2x4, 4, 6, 3
+    mov               r4d,           r4m
+
+%ifdef PIC
+    lea               r5,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    dec               r0
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,          1
+    pshufb            m1,            [interp4_hpp_shuf]
+    pmaddubsw         m1,            m0
+    pmaddwd           m1,            [pw_1]
+    vextracti128      xm2,           m1,          1
+    packssdw          xm1,           xm2
+    pmulhrsw          xm1,           [pw_512]
+    packuswb          xm1,           xm1
+
+    lea               r4,            [r3 * 3]
+    pextrw            [r2],          xm1,         0
+    pextrw            [r2 + r3],     xm1,         1
+    pextrw            [r2 + r3 * 2], xm1,         2
+    pextrw            [r2 + r4],     xm1,         3
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_2x8, 4, 6, 6
+    mov               r4d,           r4m
+
+%ifdef PIC
+    lea               r5,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m4,            [interp4_hpp_shuf]
+    mova              m5,            [pw_1]
+    dec               r0
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,          1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,          xm2,          1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    pmulhrsw          m1,            [pw_512]
+    vextracti128      xm2,           m1,          1
+    packuswb          xm1,           xm2
+
+    lea               r4,            [r3 * 3]
+    pextrw            [r2],          xm1,         0
+    pextrw            [r2 + r3],     xm1,         1
+    pextrw            [r2 + r3 * 2], xm1,         4
+    pextrw            [r2 + r4],     xm1,         5
+    lea               r2,            [r2 + r3 * 4]
+    pextrw            [r2],          xm1,         2
+    pextrw            [r2 + r3],     xm1,         3
+    pextrw            [r2 + r3 * 2], xm1,         6
+    pextrw            [r2 + r4],     xm1,         7
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_32x32, 4,6,7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+    mova              m6,           [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          32
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 16]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 20]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    movu              [r2],         m3
+    lea               r2,           [r2 + r3]
+    lea               r0,           [r0 + r1]
+    dec               r4d
+    jnz               .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_16x16, 4, 6, 7
+    mov               r4d,          r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m6,           [pw_512]
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          8
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + r1 + 4]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movu              [r2],         xm3
+    movu              [r2 + r3],    xm4
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+    dec               r4d
+    jnz               .loop
+    RET
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+    IPFILTER_LUMA 4, 4, pp
+    IPFILTER_LUMA 4, 8, pp
+    IPFILTER_LUMA 12, 16, pp
+    IPFILTER_LUMA 4, 16, pp
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_8x8, 4,6,6
+    mov               r4d,    r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    movu              m1,           [tab_Tm]
+    vpbroadcastd      m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    sub               r0,           1
+    mov               r4d,          2
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           [pw_512]
+    lea               r0,           [r0 + r1 * 2]
+
+    ; Row 2
+    vbroadcasti128    m4,           [r0 ]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    ; Row 3
+    vbroadcasti128    m5,           [r0 + r1]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           [pw_512]
+
+    packuswb          m3,           m4
+    mova              m5,           [interp_4tap_8x8_horiz_shuf]
+    vpermd            m3,           m5,     m3
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movhps            [r2 + r3],    xm3
+    lea               r2,           [r2 + r3 * 2]
+    movq              [r2],         xm4
+    movhps            [r2 + r3],    xm4
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1*2]
+    dec               r4d
+    jnz               .loop
+    RET
+
+    IPFILTER_LUMA_AVX2 16, 4
+    IPFILTER_LUMA_AVX2 16, 8
+    IPFILTER_LUMA_AVX2 16, 12
+    IPFILTER_LUMA_AVX2 16, 16
+    IPFILTER_LUMA_AVX2 16, 32
+    IPFILTER_LUMA_AVX2 16, 64
+
+    IPFILTER_LUMA_32x_avx2 32 , 8
+    IPFILTER_LUMA_32x_avx2 32 , 16
+    IPFILTER_LUMA_32x_avx2 32 , 24
+    IPFILTER_LUMA_32x_avx2 32 , 32
+    IPFILTER_LUMA_32x_avx2 32 , 64
+
+    IPFILTER_LUMA_64x_avx2 64 , 64
+    IPFILTER_LUMA_64x_avx2 64 , 48
+    IPFILTER_LUMA_64x_avx2 64 , 32
+    IPFILTER_LUMA_64x_avx2 64 , 16
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_8x2, 4, 6, 5
+    mov               r4d,          r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [tab_Tm]
+    mova              m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           [pw_512]
+    vextracti128      xm4,          m3,          1
+    packuswb          xm3,          xm4
+    pshufd            xm3,          xm3,         11011000b
+    movq              [r2],         xm3
+    movhps            [r2 + r3],    xm3
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_8x6, 4, 6, 7
+    mov               r4d,           r4m
+
+%ifdef PIC
+    lea               r5,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,            [tab_Tm]
+    mova              m2,            [pw_1]
+    mova              m6,            [pw_512]
+    lea               r4,            [r1 * 3]
+    lea               r5,            [r3 * 3]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    ; Row 0
+    vbroadcasti128    m3,            [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,            m1
+    pmaddubsw         m3,            m0
+    pmaddwd           m3,            m2
+
+    ; Row 1
+    vbroadcasti128    m4,            [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    packssdw          m3,            m4
+    pmulhrsw          m3,            m6
+
+    ; Row 2
+    vbroadcasti128    m4,            [r0 + r1 * 2]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+
+    ; Row 3
+    vbroadcasti128    m5,            [r0 + r4]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,            m1
+    pmaddubsw         m5,            m0
+    pmaddwd           m5,            m2
+    packssdw          m4,            m5
+    pmulhrsw          m4,            m6
+
+    packuswb          m3,            m4
+    mova              m5,            [interp8_hps_shuf]
+    vpermd            m3,            m5,          m3
+    vextracti128      xm4,           m3,          1
+    movq              [r2],          xm3
+    movhps            [r2 + r3],     xm3
+    movq              [r2 + r3 * 2], xm4
+    movhps            [r2 + r5],     xm4
+    lea               r2,            [r2 + r3 * 4]
+    lea               r0,            [r0 + r1 * 4]
+    ; Row 4
+    vbroadcasti128    m3,            [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,            m1
+    pmaddubsw         m3,            m0
+    pmaddwd           m3,            m2
+
+    ; Row 5
+    vbroadcasti128    m4,            [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    packssdw          m3,            m4
+    pmulhrsw          m3,            m6
+    vextracti128      xm4,           m3,          1
+    packuswb          xm3,           xm4
+    pshufd            xm3,           xm3,         11011000b
+    movq              [r2],          xm3
+    movhps            [r2 + r3],     xm3
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_6x8, 4, 6, 7
+    mov               r4d,               r4m
+
+%ifdef PIC
+    lea               r5,                [tab_ChromaCoeff]
+    vpbroadcastd      m0,                [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,                [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,                [tab_Tm]
+    mova              m2,                [pw_1]
+    mova              m6,                [pw_512]
+    lea               r4,                [r1 * 3]
+    lea               r5,                [r3 * 3]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+%rep 2
+    ; Row 0
+    vbroadcasti128    m3,                [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,                m1
+    pmaddubsw         m3,                m0
+    pmaddwd           m3,                m2
+
+    ; Row 1
+    vbroadcasti128    m4,                [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,                m1
+    pmaddubsw         m4,                m0
+    pmaddwd           m4,                m2
+    packssdw          m3,                m4
+    pmulhrsw          m3,                m6
+
+    ; Row 2
+    vbroadcasti128    m4,                [r0 + r1 * 2]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,                m1
+    pmaddubsw         m4,                m0
+    pmaddwd           m4,                m2
+
+    ; Row 3
+    vbroadcasti128    m5,                [r0 + r4]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,                m1
+    pmaddubsw         m5,                m0
+    pmaddwd           m5,                m2
+    packssdw          m4,                m5
+    pmulhrsw          m4,                m6
+
+    packuswb          m3,                m4
+    vextracti128      xm4,               m3,          1
+    movd              [r2],              xm3
+    pextrw            [r2 + 4],          xm4,         0
+    pextrd            [r2 + r3],         xm3,         1
+    pextrw            [r2 + r3 + 4],     xm4,         2
+    pextrd            [r2 + r3 * 2],     xm3,         2
+    pextrw            [r2 + r3 * 2 + 4], xm4,         4
+    pextrd            [r2 + r5],         xm3,         3
+    pextrw            [r2 + r5 + 4],     xm4,         6
+    lea               r2,                [r2 + r3 * 4]
+    lea               r0,                [r0 + r1 * 4]
+%endrep
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_64xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+%macro IPFILTER_CHROMA_HPS_64xN 1
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_64x%1, 4,7,6
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m5,           [pw_2000]
+    mova               m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                r6d,         %1
+    dec                r0
+    test                r5d,      r5d
+    je                 .loop
+    sub                r0 ,         r1
+    add                r6d ,        3
+
+.loop
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 8]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu              [r2],         m3
+
+    vbroadcasti128    m3,           [r0 + 16]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 24]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu              [r2 + 32],    m3
+
+    vbroadcasti128    m3,           [r0 + 32]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 40]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu              [r2 + 64],    m3
+
+    vbroadcasti128    m3,           [r0 + 48]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 56]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu              [r2 + 96],    m3
+
+    add                r2,           r3
+    add                r0,           r1
+    dec                r6d
+    jnz                .loop
+    RET
+%endmacro
+
+   IPFILTER_CHROMA_HPS_64xN 64
+   IPFILTER_CHROMA_HPS_64xN 32
+   IPFILTER_CHROMA_HPS_64xN 48
+   IPFILTER_CHROMA_HPS_64xN 16
+
+;-----------------------------------------------------------------------------------------------------------------------------
+;void interp_horiz_ps_c(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+
+%macro IPFILTER_LUMA_PS_4xN_AVX2 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_4x%1, 6,7,6
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m1,                [tab_Lm]
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_2000]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - pw_2000
+
+    sub                         r0,                3
+    test                        r5d,               r5d
+    mov                         r5d,               %1                           ; loop count variable - height
+    jz                         .preloop
+    lea                         r6,                [r1 * 3]                     ; r8 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6                           ; r0(src) - 3 * srcStride
+    add                         r5d,               7                            ; need extra 7 rows, just set a specially flag here, blkheight += N - 1  (7 - 3 = 4 ; since the last three rows not in loop)
+
+.preloop:
+    lea                         r6,                [r3 * 3]
+.loop
+    ; Row 0-1
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm
+    pmaddubsw                   m3,                m0
+    vbroadcasti128              m4,                [r0 + r1]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m1
+    pmaddubsw                   m4,                m0
+    phaddw                      m3,                m4                           ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    ; Row 2-3
+    lea                         r0,                [r0 + r1 * 2]                ;3rd row(i.e 2nd row)
+    vbroadcasti128              m4,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m1
+    pmaddubsw                   m4,                m0
+    vbroadcasti128              m5,                [r0 + r1]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m5,                m1
+    pmaddubsw                   m5,                m0
+    phaddw                      m4,                m5                           ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+    phaddw                      m3,                m4                           ; all rows and col completed.
+
+    mova                        m5,                [interp8_hps_shuf]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+
+    vextracti128                xm4,               m3,               1
+    movq                        [r2],              xm3                          ;row 0
+    movhps                      [r2 + r3],         xm3                          ;row 1
+    movq                        [r2 + r3 * 2],     xm4                          ;row 2
+    movhps                      [r2 + r6],         xm4                          ;row 3
+
+    lea                         r0,                [r0 + r1 * 2]                ; first loop src ->5th row(i.e 4)
+    lea                         r2,                [r2 + r3 * 4]                ; first loop dst ->5th row(i.e 4)
+    sub                         r5d,               4
+    jz                         .end
+    cmp                         r5d,               4
+    jge                        .loop
+
+    ; Row 8-9
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m3,                m1
+    pmaddubsw                   m3,                m0
+    vbroadcasti128              m4,                [r0 + r1]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m1
+    pmaddubsw                   m4,                m0
+    phaddw                      m3,                m4                           ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    ; Row 10
+    vbroadcasti128              m4,                [r0 + r1 * 2]                ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m1
+    pmaddubsw                   m4,                m0
+    phaddw                      m4,                m4                           ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+    phaddw                      m3,                m4
+
+    vpermd                      m3,                m5,            m3            ; m5 don't broken in above
+    psubw                       m3,                m2
+
+    vextracti128                xm4,               m3,            1
+    movq                        [r2],              xm3
+    movhps                      [r2 + r3],         xm3
+    movq                        [r2 + r3 * 2],     xm4
+.end
+    RET
+%endif
+%endmacro
+
+    IPFILTER_LUMA_PS_4xN_AVX2 4
+    IPFILTER_LUMA_PS_4xN_AVX2 8
+    IPFILTER_LUMA_PS_4xN_AVX2 16
+
+%macro IPFILTER_LUMA_PS_8xN_AVX2 1
+; TODO: verify and enable on X86 mode
+%if ARCH_X86_64 == 1
+; void filter_hps(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_8x%1, 4,7,6
+    mov                         r5d,        r5m
+    mov                         r4d,        r4m
+    shl                         r4d,        7
+%ifdef PIC
+    lea                         r6,         [pb_LumaCoeffVer]
+    add                         r6,         r4
+%else
+    lea                         r6,         [pb_LumaCoeffVer + r4]
+%endif
+    add                         r3d,        r3d
+    vpbroadcastd                m0,         [pw_2000]
+    sub                         r0,         3
+    lea                         r4,         [pb_8tap_hps_0]
+    vbroadcasti128              m5,         [r4 + 0 * mmsize]
+
+    ; check row count extend for interpolateHV
+    test                        r5d,        r5d;
+    mov                         r5d,        %1
+    jz                         .enter_loop
+    lea                         r4,         [r1 * 3]                        ; r8 = (N / 2 - 1) * srcStride
+    sub                         r0,         r4                              ; r0(src)-r8
+    add                         r5d,        8-1-2                           ; blkheight += N - 1  (7 - 3 = 4 ; since the last three rows not in loop)
+
+.enter_loop:
+    lea                         r4,         [pb_8tap_hps_0]
+
+    ; ***** register map *****
+    ; m0 - pw_2000
+    ; r4 - base pointer of shuffle order table
+    ; r5 - count of loop
+    ; r6 - point to LumaCoeff
+.loop:
+
+    ; Row 0-1
+    movu                        xm1,        [r0]
+    movu                        xm2,        [r0 + r1]
+    vinserti128                 m1,         m1,         xm2, 1
+    pshufb                      m2,         m1,         m5                  ; [0 1 1 2 2 3 3 4 ...]
+    pshufb                      m3,         m1,         [r4 + 1 * mmsize]   ; [2 3 3 4 4 5 5 6 ...]
+    pshufb                      m4,         m1,         [r4 + 2 * mmsize]   ; [4 5 5 6 6 7 7 8 ...]
+    pshufb                      m1,         m1,         [r4 + 3 * mmsize]   ; [6 7 7 8 8 9 9 A ...]
+    pmaddubsw                   m2,         [r6 + 0 * mmsize]
+    pmaddubsw                   m3,         [r6 + 1 * mmsize]
+    pmaddubsw                   m4,         [r6 + 2 * mmsize]
+    pmaddubsw                   m1,         [r6 + 3 * mmsize]
+    paddw                       m2,         m3
+    paddw                       m1,         m4
+    paddw                       m1,         m2
+    psubw                       m1,         m0
+
+    vextracti128                xm2,        m1,         1
+    movu                        [r2],       xm1                             ; row 0
+    movu                        [r2 + r3],  xm2                             ; row 1
+
+    lea                         r0,         [r0 + r1 * 2]                   ; first loop src ->5th row(i.e 4)
+    lea                         r2,         [r2 + r3 * 2]                   ; first loop dst ->5th row(i.e 4)
+    sub                         r5d,        2
+    jg                         .loop
+    jz                         .end
+
+    ; last row
+    movu                        xm1,        [r0]
+    pshufb                      xm2,        xm1,         xm5                ; [0 1 1 2 2 3 3 4 ...]
+    pshufb                      xm3,        xm1,         [r4 + 1 * mmsize]  ; [2 3 3 4 4 5 5 6 ...]
+    pshufb                      xm4,        xm1,         [r4 + 2 * mmsize]  ; [4 5 5 6 6 7 7 8 ...]
+    pshufb                      xm1,        xm1,         [r4 + 3 * mmsize]  ; [6 7 7 8 8 9 9 A ...]
+    pmaddubsw                   xm2,        [r6 + 0 * mmsize]
+    pmaddubsw                   xm3,        [r6 + 1 * mmsize]
+    pmaddubsw                   xm4,        [r6 + 2 * mmsize]
+    pmaddubsw                   xm1,        [r6 + 3 * mmsize]
+    paddw                       xm2,        xm3
+    paddw                       xm1,        xm4
+    paddw                       xm1,        xm2
+    psubw                       xm1,        xm0
+    movu                        [r2],       xm1                          ;row 0
+.end
+    RET
+%endif
+%endmacro ; IPFILTER_LUMA_PS_8xN_AVX2
+
+    IPFILTER_LUMA_PS_8xN_AVX2  4
+    IPFILTER_LUMA_PS_8xN_AVX2  8
+    IPFILTER_LUMA_PS_8xN_AVX2 16
+    IPFILTER_LUMA_PS_8xN_AVX2 32
+
+
+%macro IPFILTER_LUMA_PS_16x_AVX2 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_horiz_ps_%1x%2, 6, 10, 7
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r9,                %2                           ;height
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_2000]
+
+    ; register map
+    ; m0      - interpolate coeff
+    ; m1 , m6 - shuffle order table
+    ; m2      - pw_2000
+
+    xor                         r7,                r7                          ; loop count variable
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .label
+    lea                         r8,                [r1 * 3]                     ; r8 = (N / 2 - 1) * srcStride
+    sub                         r0,                r8                           ; r0(src)-r8
+    add                         r9,                7                            ; blkheight += N - 1  (7 - 1 = 6 ; since the last one row not in loop)
+
+.label
+    ; Row 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    phaddw                      m3,                m4                           ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    vbroadcasti128              m4,                [r0 + 8]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m5,                m4,            m6            ;row 1 (col 4 to 7)
+    pshufb                      m4,                m1                           ;row 1 (col 0 to 3)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    phaddw                      m4,                m5                           ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+    phaddw                      m3,                m4                           ; all rows and col completed.
+
+    mova                        m5,                [interp8_hps_shuf]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+
+    movu                        [r2],              m3                          ;row 0
+
+    lea                         r0,                [r0 + r1]                ; first loop src ->5th row(i.e 4)
+    lea                         r2,                [r2 + r3]                ; first loop dst ->5th row(i.e 4)
+    dec                         r9d
+    jnz                         .label
+
+    RET
+%endif
+%endmacro
+
+
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 16
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 8
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 12
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 4
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 32
+    IPFILTER_LUMA_PS_16x_AVX2 16 , 64
+
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_LUMA_PP_W8 2
+INIT_XMM sse4
+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,7
+    mov         r4d, r4m
+
+%ifdef PIC
+    lea         r5, [tab_LumaCoeff]
+    movh        m3, [r5 + r4 * 8]
+%else
+    movh        m3, [tab_LumaCoeff + r4 * 8]
+%endif
+    pshufd      m0, m3, 0                       ; m0 = coeff-L
+    pshufd      m1, m3, 0x55                    ; m1 = coeff-H
+    lea         r5, [tab_Tm]                    ; r5 = shuffle
+    mova        m2, [pw_512]                    ; m2 = 512
+
+    mov         r4d, %2
+.loopH:
+%assign x 0
+%rep %1 / 8
+    movu        m3, [r0 - 3 + x]                ; m3 = [F E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb      m4, m3, [r5 + 0*16]             ; m4 = [6 5 4 3 5 4 3 2 4 3 2 1 3 2 1 0]
+    pshufb      m5, m3, [r5 + 1*16]             ; m5 = [A 9 8 7 9 8 7 6 8 7 6 5 7 6 5 4]
+    pshufb          m3, [r5 + 2*16]             ; m3 = [E D C B D C B A C B A 9 B A 9 8]
+    pmaddubsw   m4, m0
+    pmaddubsw   m6, m5, m1
+    pmaddubsw   m5, m0
+    pmaddubsw   m3, m1
+    paddw       m4, m6
+    paddw       m5, m3
+    phaddw      m4, m5
+    pmulhrsw    m4, m2
+    packuswb    m4, m4
+    movh        [r2 + x], m4
+%assign x x+8
+%endrep
+
+    add       r0, r1
+    add       r2, r3
+
+    dec       r4d
+    jnz      .loopH
+    RET
+%endmacro
+
+    IPFILTER_LUMA_PP_W8      8,  4
+    IPFILTER_LUMA_PP_W8      8,  8
+    IPFILTER_LUMA_PP_W8      8, 16
+    IPFILTER_LUMA_PP_W8      8, 32
+    IPFILTER_LUMA_PP_W8     16,  4
+    IPFILTER_LUMA_PP_W8     16,  8
+    IPFILTER_LUMA_PP_W8     16, 12
+    IPFILTER_LUMA_PP_W8     16, 16
+    IPFILTER_LUMA_PP_W8     16, 32
+    IPFILTER_LUMA_PP_W8     16, 64
+    IPFILTER_LUMA_PP_W8     24, 32
+    IPFILTER_LUMA_PP_W8     32,  8
+    IPFILTER_LUMA_PP_W8     32, 16
+    IPFILTER_LUMA_PP_W8     32, 24
+    IPFILTER_LUMA_PP_W8     32, 32
+    IPFILTER_LUMA_PP_W8     32, 64
+    IPFILTER_LUMA_PP_W8     48, 64
+    IPFILTER_LUMA_PP_W8     64, 16
+    IPFILTER_LUMA_PP_W8     64, 32
+    IPFILTER_LUMA_PP_W8     64, 48
+    IPFILTER_LUMA_PP_W8     64, 64
+
+;----------------------------------------------------------------------------------------------------------------------------
+; void interp_8tap_horiz_ps_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;----------------------------------------------------------------------------------------------------------------------------
+    IPFILTER_LUMA 4, 4, ps
+    IPFILTER_LUMA 8, 8, ps
+    IPFILTER_LUMA 8, 4, ps
+    IPFILTER_LUMA 4, 8, ps
+    IPFILTER_LUMA 16, 16, ps
+    IPFILTER_LUMA 16, 8, ps
+    IPFILTER_LUMA 8, 16, ps
+    IPFILTER_LUMA 16, 12, ps
+    IPFILTER_LUMA 12, 16, ps
+    IPFILTER_LUMA 16, 4, ps
+    IPFILTER_LUMA 4, 16, ps
+    IPFILTER_LUMA 32, 32, ps
+    IPFILTER_LUMA 32, 16, ps
+    IPFILTER_LUMA 16, 32, ps
+    IPFILTER_LUMA 32, 24, ps
+    IPFILTER_LUMA 24, 32, ps
+    IPFILTER_LUMA 32, 8, ps
+    IPFILTER_LUMA 8, 32, ps
+    IPFILTER_LUMA 64, 64, ps
+    IPFILTER_LUMA 64, 32, ps
+    IPFILTER_LUMA 32, 64, ps
+    IPFILTER_LUMA 64, 48, ps
+    IPFILTER_LUMA 48, 64, ps
+    IPFILTER_LUMA 64, 16, ps
+    IPFILTER_LUMA 16, 64, ps
+
+;-----------------------------------------------------------------------------
+; Interpolate HV
+;-----------------------------------------------------------------------------
+%macro FILTER_HV8_START 7 ; (t0, t1, t2, t3, t4, off_src, off_coeff) -> (t3, t5), (t4, t1), [2]
+    mova        %5, [r0 +  (%6 + 0) * 16]
+    mova        %1, [r0 +  (%6 + 1) * 16]
+    mova        %2, [r0 +  (%6 + 2) * 16]
+    punpcklwd   %3, %5, %1
+    punpckhwd   %5, %1
+    pmaddwd     %3, [r5 + (%7) * 16]   ; R3 = L[0+1] -- Row 0
+    pmaddwd     %5, [r5 + (%7) * 16]   ; R0 = H[0+1]
+    punpcklwd   %4, %1, %2
+    punpckhwd   %1, %2
+    pmaddwd     %4, [r5 + (%7) * 16]   ; R4 = L[1+2] -- Row 1
+    pmaddwd     %1, [r5 + (%7) * 16]   ; R1 = H[1+2]
+%endmacro ; FILTER_HV8_START
+
+%macro FILTER_HV8_MID 10 ; (Row3, prevRow, sum0L, sum1L, sum0H, sum1H, t6, t7, off_src, off_coeff) -> [6]
+    mova        %8, [r0 +  (%9 + 0) * 16]
+    mova        %1, [r0 +  (%9 + 1) * 16]
+    punpcklwd   %7, %2, %8
+    punpckhwd   %2, %8
+    pmaddwd     %7, [r5 + %10 * 16]
+    pmaddwd     %2, [r5 + %10 * 16]
+    paddd       %3, %7              ; R3 = L[0+1+2+3] -- Row 0
+    paddd       %5, %2              ; R0 = H[0+1+2+3]
+    punpcklwd   %7, %8, %1
+    punpckhwd   %8, %1
+    pmaddwd     %7, [r5 + %10 * 16]
+    pmaddwd     %8, [r5 + %10 * 16]
+    paddd       %4, %7              ; R4 = L[1+2+3+4] -- Row 1
+    paddd       %6, %8              ; R1 = H[1+2+3+4]
+%endmacro ; FILTER_HV8_MID
+
+; Round and Saturate
+%macro FILTER_HV8_END 4 ; output in [1, 3]
+    paddd       %1, [pd_526336]
+    paddd       %2, [pd_526336]
+    paddd       %3, [pd_526336]
+    paddd       %4, [pd_526336]
+    psrad       %1, 12
+    psrad       %2, 12
+    psrad       %3, 12
+    psrad       %4, 12
+    packssdw    %1, %2
+    packssdw    %3, %4
+
+    ; TODO: is merge better? I think this way is short dependency link
+    packuswb    %1, %3
+%endmacro ; FILTER_HV8_END
+
+;-----------------------------------------------------------------------------
+; void interp_8tap_hv_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int idxX, int idxY)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal interp_8tap_hv_pp_8x8, 4, 7, 8, 0-15*16
+%define coef        m7
+%define stk_buf     rsp
+
+    mov         r4d,        r4m
+    mov         r5d,        r5m
+
+%ifdef PIC
+    lea         r6,         [tab_LumaCoeff]
+    movh        coef,       [r6 + r4 * 8]
+%else
+    movh        coef,       [tab_LumaCoeff + r4 * 8]
+%endif
+    punpcklqdq  coef,       coef
+
+    ; move to row -3
+    lea         r6,         [r1 + r1 * 2]
+    sub         r0,         r6
+
+    xor         r6,         r6
+    mov         r4,         rsp
+
+.loopH:
+    FILTER_H8_W8 m0, m1, m2, m3, coef, [pw_512], [r0 - 3]
+    psubw       m1,         [pw_2000]
+    mova        [r4],       m1
+
+    add         r0,         r1
+    add         r4,         16
+    inc         r6
+    cmp         r6,         8+7
+    jnz         .loopH
+
+    ; ready to phase V
+    ; Here all of mN is free
+
+    ; load coeff table
+    shl         r5,         6
+    lea         r6,         [tab_LumaCoeffV]
+    lea         r5,         [r5 + r6]
+
+    ; load intermedia buffer
+    mov         r0,         stk_buf
+
+    ; register mapping
+    ; r0 - src
+    ; r5 - coeff
+    ; r6 - loop_i
+
+    ; let's go
+    xor         r6,         r6
+
+    ; TODO: this loop have more than 70 instructions, I think it is more than Intel loop decode cache
+.loopV:
+
+    FILTER_HV8_START    m1, m2, m3, m4, m0,             0, 0
+    FILTER_HV8_MID      m6, m2, m3, m4, m0, m1, m7, m5, 3, 1
+    FILTER_HV8_MID      m5, m6, m3, m4, m0, m1, m7, m2, 5, 2
+    FILTER_HV8_MID      m6, m5, m3, m4, m0, m1, m7, m2, 7, 3
+    FILTER_HV8_END      m3, m0, m4, m1
+
+    movh        [r2],       m3
+    movhps      [r2 + r3],  m3
+
+    lea         r0,         [r0 + 16 * 2]
+    lea         r2,         [r2 + r3 * 2]
+
+    inc         r6
+    cmp         r6,         8/2
+    jnz         .loopV
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_8tap_hv_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int idxX, int idxY)
+;-----------------------------------------------------------------------------
+INIT_XMM sse3
+cglobal interp_8tap_hv_pp_8x8, 4, 7, 8, 0-15*16
+    mov         r4d,        r4m
+    mov         r5d,        r5m
+    add         r4d,        r4d
+    pxor        m6,         m6
+
+%ifdef PIC
+    lea         r6,         [tabw_LumaCoeff]
+    mova        m3,         [r6 + r4 * 8]
+%else
+    mova        m3,         [tabw_LumaCoeff + r4 * 8]
+%endif
+
+    ; move to row -3
+    lea         r6,         [r1 + r1 * 2]
+    sub         r0,         r6
+
+    mov         r4,         rsp
+
+%assign x 0     ;needed for FILTER_H8_W8_sse2 macro
+%assign y 1
+%rep 15
+    FILTER_H8_W8_sse2
+    psubw       m1,         [pw_2000]
+    mova        [r4],       m1
+
+%if y < 15
+    add         r0,         r1
+    add         r4,         16
+%endif
+%assign y y+1
+%endrep
+
+    ; ready to phase V
+    ; Here all of mN is free
+
+    ; load coeff table
+    shl         r5,         6
+    lea         r6,         [tab_LumaCoeffV]
+    lea         r5,         [r5 + r6]
+
+    ; load intermedia buffer
+    mov         r0,         rsp
+
+    ; register mapping
+    ; r0 - src
+    ; r5 - coeff
+
+    ; let's go
+%assign y 1
+%rep 4
+    FILTER_HV8_START    m1, m2, m3, m4, m0,             0, 0
+    FILTER_HV8_MID      m6, m2, m3, m4, m0, m1, m7, m5, 3, 1
+    FILTER_HV8_MID      m5, m6, m3, m4, m0, m1, m7, m2, 5, 2
+    FILTER_HV8_MID      m6, m5, m3, m4, m0, m1, m7, m2, 7, 3
+    FILTER_HV8_END      m3, m0, m4, m1
+
+    movh        [r2],       m3
+    movhps      [r2 + r3],  m3
+
+%if y < 4
+    lea         r0,         [r0 + 16 * 2]
+    lea         r2,         [r2 + r3 * 2]
+%endif
+%assign y y+1
+%endrep
+    RET
+
+;-----------------------------------------------------------------------------
+;void interp_4tap_vert_pp_2x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_2x4, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 4 * r1]
+    pshufb      m0,        [tab_Cm]
+    mova        m1,        [pw_512]
+
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r4]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklbw   m2,        m6
+
+    pmaddubsw   m2,        m0
+
+    movd        m6,        [r5]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklbw   m3,        m7
+
+    pmaddubsw   m3,        m0
+
+    phaddw      m2,        m3
+
+    pmulhrsw    m2,        m1
+
+    movd        m7,        [r5 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklbw   m4,        m3
+
+    pmaddubsw   m4,        m0
+
+    movd        m3,        [r5 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklbw   m5,        m7
+
+    pmaddubsw   m5,        m0
+
+    phaddw      m4,        m5
+
+    pmulhrsw    m4,        m1
+    packuswb    m2,        m4
+
+    pextrw      [r2],      m2, 0
+    pextrw      [r2 + r3], m2, 2
+    lea         r2,        [r2 + 2 * r3]
+    pextrw      [r2],      m2, 4
+    pextrw      [r2 + r3], m2, 6
+
+    RET
+
+%macro FILTER_VER_CHROMA_AVX2_2x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x4, 4, 6, 2
+    mov             r4d, r4m
+    shl             r4d, 5
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff_V]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeff_V + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+
+    pinsrw          xm1, [r0], 0
+    pinsrw          xm1, [r0 + r1], 1
+    pinsrw          xm1, [r0 + r1 * 2], 2
+    pinsrw          xm1, [r0 + r4], 3
+    lea             r0, [r0 + r1 * 4]
+    pinsrw          xm1, [r0], 4
+    pinsrw          xm1, [r0 + r1], 5
+    pinsrw          xm1, [r0 + r1 * 2], 6
+
+    pshufb          xm0, xm1, [interp_vert_shuf]
+    pshufb          xm1, [interp_vert_shuf + 32]
+    vinserti128     m0, m0, xm1, 1
+    pmaddubsw       m0, [r5]
+    vextracti128    xm1, m0, 1
+    paddw           xm0, xm1
+%ifidn %1,pp
+    pmulhrsw        xm0, [pw_512]
+    packuswb        xm0, xm0
+    lea             r4, [r3 * 3]
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r4], xm0, 3
+%else
+    add             r3d, r3d
+    lea             r4, [r3 * 3]
+    psubw           xm0, [pw_2000]
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrd          [r2 + r4], xm0, 3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_2x4 pp
+    FILTER_VER_CHROMA_AVX2_2x4 ps
+
+%macro FILTER_VER_CHROMA_AVX2_2x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x8, 4, 6, 2
+    mov             r4d, r4m
+    shl             r4d, 6
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+
+    pinsrw          xm1, [r0], 0
+    pinsrw          xm1, [r0 + r1], 1
+    pinsrw          xm1, [r0 + r1 * 2], 2
+    pinsrw          xm1, [r0 + r4], 3
+    lea             r0, [r0 + r1 * 4]
+    pinsrw          xm1, [r0], 4
+    pinsrw          xm1, [r0 + r1], 5
+    pinsrw          xm1, [r0 + r1 * 2], 6
+    pinsrw          xm1, [r0 + r4], 7
+    movhlps         xm0, xm1
+    lea             r0, [r0 + r1 * 4]
+    pinsrw          xm0, [r0], 4
+    pinsrw          xm0, [r0 + r1], 5
+    pinsrw          xm0, [r0 + r1 * 2], 6
+    vinserti128     m1, m1, xm0, 1
+
+    pshufb          m0, m1, [interp_vert_shuf]
+    pshufb          m1, [interp_vert_shuf + 32]
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m1, [r5 + 1 * mmsize]
+    paddw           m0, m1
+%ifidn %1,pp
+    pmulhrsw        m0, [pw_512]
+    vextracti128    xm1, m0, 1
+    packuswb        xm0, xm1
+    lea             r4, [r3 * 3]
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r4], xm0, 3
+    lea             r2, [r2 + r3 * 4]
+    pextrw          [r2], xm0, 4
+    pextrw          [r2 + r3], xm0, 5
+    pextrw          [r2 + r3 * 2], xm0, 6
+    pextrw          [r2 + r4], xm0, 7
+%else
+    add             r3d, r3d
+    lea             r4, [r3 * 3]
+    psubw           m0, [pw_2000]
+    vextracti128    xm1, m0, 1
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrd          [r2 + r4], xm0, 3
+    lea             r2, [r2 + r3 * 4]
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 1
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r4], xm1, 3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_2x8 pp
+    FILTER_VER_CHROMA_AVX2_2x8 ps
+
+%macro FILTER_VER_CHROMA_AVX2_2x16 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x16, 4, 6, 3
+    mov             r4d, r4m
+    shl             r4d, 6
+    sub             r0,  r1
+
+%ifdef PIC
+    lea             r5,  [tab_ChromaCoeffVer_32]
+    add             r5,  r4
+%else
+    lea             r5,  [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4,  [r1 * 3]
+
+    movd            xm1, [r0]
+    pinsrw          xm1, [r0 + r1], 1
+    pinsrw          xm1, [r0 + r1 * 2], 2
+    pinsrw          xm1, [r0 + r4], 3
+    lea             r0,  [r0 + r1 * 4]
+    pinsrw          xm1, [r0], 4
+    pinsrw          xm1, [r0 + r1], 5
+    pinsrw          xm1, [r0 + r1 * 2], 6
+    pinsrw          xm1, [r0 + r4], 7
+    lea             r0,  [r0 + r1 * 4]
+    pinsrw          xm0, [r0], 4
+    pinsrw          xm0, [r0 + r1], 5
+    pinsrw          xm0, [r0 + r1 * 2], 6
+    pinsrw          xm0, [r0 + r4], 7
+    punpckhqdq      xm0, xm1, xm0
+    vinserti128     m1,  m1,  xm0,  1
+
+    pshufb          m2,  m1,  [interp_vert_shuf]
+    pshufb          m1,  [interp_vert_shuf + 32]
+    pmaddubsw       m2,  [r5]
+    pmaddubsw       m1,  [r5 + 1 * mmsize]
+    paddw           m2,  m1
+
+    lea             r0,  [r0 + r1 * 4]
+    pinsrw          xm1, [r0], 4
+    pinsrw          xm1, [r0 + r1], 5
+    pinsrw          xm1, [r0 + r1 * 2], 6
+    pinsrw          xm1, [r0 + r4], 7
+    punpckhqdq      xm1, xm0, xm1
+    lea             r0,  [r0 + r1 * 4]
+    pinsrw          xm0, [r0], 4
+    pinsrw          xm0, [r0 + r1], 5
+    pinsrw          xm0, [r0 + r1 * 2], 6
+    punpckhqdq      xm0, xm1, xm0
+    vinserti128     m1,  m1,  xm0,  1
+
+    pshufb          m0,  m1,  [interp_vert_shuf]
+    pshufb          m1,  [interp_vert_shuf + 32]
+    pmaddubsw       m0,  [r5]
+    pmaddubsw       m1,  [r5 + 1 * mmsize]
+    paddw           m0,  m1
+%ifidn %1,pp
+    mova            m1,  [pw_512]
+    pmulhrsw        m2,  m1
+    pmulhrsw        m0,  m1
+    packuswb        m2,  m0
+    lea             r4,  [r3 * 3]
+    pextrw          [r2], xm2, 0
+    pextrw          [r2 + r3], xm2, 1
+    pextrw          [r2 + r3 * 2], xm2, 2
+    pextrw          [r2 + r4], xm2, 3
+    vextracti128    xm0, m2, 1
+    lea             r2,  [r2 + r3 * 4]
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r4], xm0, 3
+    lea             r2,  [r2 + r3 * 4]
+    pextrw          [r2], xm2, 4
+    pextrw          [r2 + r3], xm2, 5
+    pextrw          [r2 + r3 * 2], xm2, 6
+    pextrw          [r2 + r4], xm2, 7
+    lea             r2,  [r2 + r3 * 4]
+    pextrw          [r2], xm0, 4
+    pextrw          [r2 + r3], xm0, 5
+    pextrw          [r2 + r3 * 2], xm0, 6
+    pextrw          [r2 + r4], xm0, 7
+%else
+    add             r3d, r3d
+    lea             r4,  [r3 * 3]
+    vbroadcasti128  m1,  [pw_2000]
+    psubw           m2,  m1
+    psubw           m0,  m1
+    vextracti128    xm1, m2, 1
+    movd            [r2], xm2
+    pextrd          [r2 + r3], xm2, 1
+    pextrd          [r2 + r3 * 2], xm2, 2
+    pextrd          [r2 + r4], xm2, 3
+    lea             r2, [r2 + r3 * 4]
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 1
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r4], xm1, 3
+    vextracti128    xm1, m0, 1
+    lea             r2,  [r2 + r3 * 4]
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrd          [r2 + r4], xm0, 3
+    lea             r2,  [r2 + r3 * 4]
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 1
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r4], xm1, 3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_2x16 pp
+    FILTER_VER_CHROMA_AVX2_2x16 ps
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_2x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W2_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_2x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0,        [tab_Cm]
+
+    mova        m1,        [pw_512]
+
+    mov         r4d,       %2
+    lea         r5,        [3 * r1]
+
+.loop:
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r5]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklbw   m2,        m6
+
+    pmaddubsw   m2,        m0
+
+    lea         r0,        [r0 + 4 * r1]
+    movd        m6,        [r0]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklbw   m3,        m7
+
+    pmaddubsw   m3,        m0
+
+    phaddw      m2,        m3
+
+    pmulhrsw    m2,        m1
+
+    movd        m7,        [r0 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklbw   m4,        m3
+
+    pmaddubsw   m4,        m0
+
+    movd        m3,        [r0 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklbw   m5,        m7
+
+    pmaddubsw   m5,        m0
+
+    phaddw      m4,        m5
+
+    pmulhrsw    m4,        m1
+    packuswb    m2,        m4
+
+    pextrw      [r2],      m2, 0
+    pextrw      [r2 + r3], m2, 2
+    lea         r2,        [r2 + 2 * r3]
+    pextrw      [r2],      m2, 4
+    pextrw      [r2 + r3], m2, 6
+
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,        4
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W2_H4 2, 8
+
+    FILTER_V4_W2_H4 2, 16
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_4x2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_4x2, 4, 6, 6
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0,        [tab_Cm]
+    lea         r5,        [r0 + 2 * r1]
+
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r5]
+    movd        m5,        [r5 + r1]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m1,        m4,        m5
+    punpcklbw   m2,        m1
+
+    pmaddubsw   m2,        m0
+
+    movd        m1,        [r0 + 4 * r1]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m5,        m1
+    punpcklbw   m3,        m5
+
+    pmaddubsw   m3,        m0
+
+    phaddw      m2,        m3
+
+    pmulhrsw    m2,        [pw_512]
+    packuswb    m2,        m2
+    movd        [r2],      m2
+    pextrd      [r2 + r3], m2,  1
+
+    RET
+
+%macro FILTER_VER_CHROMA_AVX2_4x2 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x2, 4, 6, 4
+    mov             r4d, r4m
+    shl             r4d, 5
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeff_V]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeff_V + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+
+    movd            xm1, [r0]
+    movd            xm2, [r0 + r1]
+    punpcklbw       xm1, xm2
+    movd            xm3, [r0 + r1 * 2]
+    punpcklbw       xm2, xm3
+    movlhps         xm1, xm2
+    movd            xm0, [r0 + r4]
+    punpcklbw       xm3, xm0
+    movd            xm2, [r0 + r1 * 4]
+    punpcklbw       xm0, xm2
+    movlhps         xm3, xm0
+    vinserti128     m1, m1, xm3, 1                          ; m1 = row[x x x 4 3 2 1 0]
+
+    pmaddubsw       m1, [r5]
+    vextracti128    xm3, m1, 1
+    paddw           xm1, xm3
+%ifidn %1,pp
+    pmulhrsw        xm1, [pw_512]
+    packuswb        xm1, xm1
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 1
+%else
+    add             r3d, r3d
+    psubw           xm1, [pw_2000]
+    movq            [r2], xm1
+    movhps          [r2 + r3], xm1
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_4x2 pp
+    FILTER_VER_CHROMA_AVX2_4x2 ps
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_4x4, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0,        [tab_Cm]
+    mova        m1,        [pw_512]
+    lea         r5,        [r0 + 4 * r1]
+    lea         r4,        [r1 * 3]
+
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r4]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklbw   m2,        m6
+
+    pmaddubsw   m2,        m0
+
+    movd        m6,        [r5]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklbw   m3,        m7
+
+    pmaddubsw   m3,        m0
+
+    phaddw      m2,        m3
+
+    pmulhrsw    m2,        m1
+
+    movd        m7,        [r5 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklbw   m4,        m3
+
+    pmaddubsw   m4,        m0
+
+    movd        m3,        [r5 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklbw   m5,        m7
+
+    pmaddubsw   m5,        m0
+
+    phaddw      m4,        m5
+
+    pmulhrsw    m4,        m1
+
+    packuswb    m2,        m4
+    movd        [r2],      m2
+    pextrd      [r2 + r3], m2, 1
+    lea         r2,        [r2 + 2 * r3]
+    pextrd      [r2],      m2, 2
+    pextrd      [r2 + r3], m2, 3
+    RET
+%macro FILTER_VER_CHROMA_AVX2_4x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x4, 4, 6, 3
+    mov             r4d, r4m
+    shl             r4d, 6
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+
+    movd            xm1, [r0]
+    pinsrd          xm1, [r0 + r1], 1
+    pinsrd          xm1, [r0 + r1 * 2], 2
+    pinsrd          xm1, [r0 + r4], 3                       ; m1 = row[3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm2, [r0]
+    pinsrd          xm2, [r0 + r1], 1
+    pinsrd          xm2, [r0 + r1 * 2], 2                   ; m2 = row[x 6 5 4]
+    vinserti128     m1, m1, xm2, 1                          ; m1 = row[x 6 5 4 3 2 1 0]
+    mova            m2, [interp4_vpp_shuf1]
+    vpermd          m0, m2, m1                              ; m0 = row[4 3 3 2 2 1 1 0]
+    mova            m2, [interp4_vpp_shuf1 + mmsize]
+    vpermd          m1, m2, m1                              ; m1 = row[6 5 5 4 4 3 3 2]
+
+    mova            m2, [interp4_vpp_shuf]
+    pshufb          m0, m0, m2
+    pshufb          m1, m1, m2
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m1, [r5 + mmsize]
+    paddw           m0, m1                                  ; m0 = WORD ROW[3 2 1 0]
+%ifidn %1,pp
+    pmulhrsw        m0, [pw_512]
+    vextracti128    xm1, m0, 1
+    packuswb        xm0, xm1
+    lea             r5, [r3 * 3]
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrd          [r2 + r5], xm0, 3
+%else
+    add             r3d, r3d
+    psubw           m0, [pw_2000]
+    vextracti128    xm1, m0, 1
+    lea             r5, [r3 * 3]
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm1
+    movhps          [r2 + r5], xm1
+%endif
+    RET
+%endmacro
+    FILTER_VER_CHROMA_AVX2_4x4 pp
+    FILTER_VER_CHROMA_AVX2_4x4 ps
+
+%macro FILTER_VER_CHROMA_AVX2_4x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x8, 4, 6, 5
+    mov             r4d, r4m
+    shl             r4d, 6
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+
+    movd            xm1, [r0]
+    pinsrd          xm1, [r0 + r1], 1
+    pinsrd          xm1, [r0 + r1 * 2], 2
+    pinsrd          xm1, [r0 + r4], 3                       ; m1 = row[3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm2, [r0]
+    pinsrd          xm2, [r0 + r1], 1
+    pinsrd          xm2, [r0 + r1 * 2], 2
+    pinsrd          xm2, [r0 + r4], 3                       ; m2 = row[7 6 5 4]
+    vinserti128     m1, m1, xm2, 1                          ; m1 = row[7 6 5 4 3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm3, [r0]
+    pinsrd          xm3, [r0 + r1], 1
+    pinsrd          xm3, [r0 + r1 * 2], 2                   ; m3 = row[x 10 9 8]
+    vinserti128     m2, m2, xm3, 1                          ; m2 = row[x 10 9 8 7 6 5 4]
+    mova            m3, [interp4_vpp_shuf1]
+    vpermd          m0, m3, m1                              ; m0 = row[4 3 3 2 2 1 1 0]
+    vpermd          m4, m3, m2                              ; m4 = row[8 7 7 6 6 5 5 4]
+    mova            m3, [interp4_vpp_shuf1 + mmsize]
+    vpermd          m1, m3, m1                              ; m1 = row[6 5 5 4 4 3 3 2]
+    vpermd          m2, m3, m2                              ; m2 = row[10 9 9 8 8 7 7 6]
+
+    mova            m3, [interp4_vpp_shuf]
+    pshufb          m0, m0, m3
+    pshufb          m1, m1, m3
+    pshufb          m2, m2, m3
+    pshufb          m4, m4, m3
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m1, [r5 + mmsize]
+    pmaddubsw       m2, [r5 + mmsize]
+    paddw           m0, m1                                  ; m0 = WORD ROW[3 2 1 0]
+    paddw           m4, m2                                  ; m4 = WORD ROW[7 6 5 4]
+%ifidn %1,pp
+    pmulhrsw        m0, [pw_512]
+    pmulhrsw        m4, [pw_512]
+    packuswb        m0, m4
+    vextracti128    xm1, m0, 1
+    lea             r5, [r3 * 3]
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    movd            [r2 + r3 * 2], xm1
+    pextrd          [r2 + r5], xm1, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm0, 3
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r5], xm1, 3
+%else
+    add             r3d, r3d
+    psubw           m0, [pw_2000]
+    psubw           m4, [pw_2000]
+    vextracti128    xm1, m0, 1
+    vextracti128    xm2, m4, 1
+    lea             r5, [r3 * 3]
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm1
+    movhps          [r2 + r5], xm1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r5], xm2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_4x8 pp
+    FILTER_VER_CHROMA_AVX2_4x8 ps
+
+%macro FILTER_VER_CHROMA_AVX2_4xN 2
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x%2, 4, 6, 12
+    mov             r4d, r4m
+    shl             r4d, 6
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    mova            m10, [r5]
+    mova            m11, [r5 + mmsize]
+%ifidn %1,pp
+    mova            m9, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m9, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+%rep %2 / 16
+    movd            xm1, [r0]
+    pinsrd          xm1, [r0 + r1], 1
+    pinsrd          xm1, [r0 + r1 * 2], 2
+    pinsrd          xm1, [r0 + r4], 3                       ; m1 = row[3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm2, [r0]
+    pinsrd          xm2, [r0 + r1], 1
+    pinsrd          xm2, [r0 + r1 * 2], 2
+    pinsrd          xm2, [r0 + r4], 3                       ; m2 = row[7 6 5 4]
+    vinserti128     m1, m1, xm2, 1                          ; m1 = row[7 6 5 4 3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm3, [r0]
+    pinsrd          xm3, [r0 + r1], 1
+    pinsrd          xm3, [r0 + r1 * 2], 2
+    pinsrd          xm3, [r0 + r4], 3                       ; m3 = row[11 10 9 8]
+    vinserti128     m2, m2, xm3, 1                          ; m2 = row[11 10 9 8 7 6 5 4]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm4, [r0]
+    pinsrd          xm4, [r0 + r1], 1
+    pinsrd          xm4, [r0 + r1 * 2], 2
+    pinsrd          xm4, [r0 + r4], 3                       ; m4 = row[15 14 13 12]
+    vinserti128     m3, m3, xm4, 1                          ; m3 = row[15 14 13 12 11 10 9 8]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm5, [r0]
+    pinsrd          xm5, [r0 + r1], 1
+    pinsrd          xm5, [r0 + r1 * 2], 2                   ; m5 = row[x 18 17 16]
+    vinserti128     m4, m4, xm5, 1                          ; m4 = row[x 18 17 16 15 14 13 12]
+    mova            m5, [interp4_vpp_shuf1]
+    vpermd          m0, m5, m1                              ; m0 = row[4 3 3 2 2 1 1 0]
+    vpermd          m6, m5, m2                              ; m6 = row[8 7 7 6 6 5 5 4]
+    vpermd          m7, m5, m3                              ; m7 = row[12 11 11 10 10 9 9 8]
+    vpermd          m8, m5, m4                              ; m8 = row[16 15 15 14 14 13 13 12]
+    mova            m5, [interp4_vpp_shuf1 + mmsize]
+    vpermd          m1, m5, m1                              ; m1 = row[6 5 5 4 4 3 3 2]
+    vpermd          m2, m5, m2                              ; m2 = row[10 9 9 8 8 7 7 6]
+    vpermd          m3, m5, m3                              ; m3 = row[14 13 13 12 12 11 11 10]
+    vpermd          m4, m5, m4                              ; m4 = row[18 17 17 16 16 15 15 14]
+
+    mova            m5, [interp4_vpp_shuf]
+    pshufb          m0, m0, m5
+    pshufb          m1, m1, m5
+    pshufb          m2, m2, m5
+    pshufb          m4, m4, m5
+    pshufb          m3, m3, m5
+    pshufb          m6, m6, m5
+    pshufb          m7, m7, m5
+    pshufb          m8, m8, m5
+    pmaddubsw       m0, m10
+    pmaddubsw       m6, m10
+    pmaddubsw       m7, m10
+    pmaddubsw       m8, m10
+    pmaddubsw       m1, m11
+    pmaddubsw       m2, m11
+    pmaddubsw       m3, m11
+    pmaddubsw       m4, m11
+    paddw           m0, m1                                  ; m0 = WORD ROW[3 2 1 0]
+    paddw           m6, m2                                  ; m6 = WORD ROW[7 6 5 4]
+    paddw           m7, m3                                  ; m7 = WORD ROW[11 10 9 8]
+    paddw           m8, m4                                  ; m8 = WORD ROW[15 14 13 12]
+%ifidn %1,pp
+    pmulhrsw        m0, m9
+    pmulhrsw        m6, m9
+    pmulhrsw        m7, m9
+    pmulhrsw        m8, m9
+    packuswb        m0, m6
+    packuswb        m7, m8
+    vextracti128    xm1, m0, 1
+    vextracti128    xm2, m7, 1
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    movd            [r2 + r3 * 2], xm1
+    pextrd          [r2 + r5], xm1, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm0, 3
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r5], xm1, 3
+    lea             r2, [r2 + r3 * 4]
+    movd            [r2], xm7
+    pextrd          [r2 + r3], xm7, 1
+    movd            [r2 + r3 * 2], xm2
+    pextrd          [r2 + r5], xm2, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm7, 2
+    pextrd          [r2 + r3], xm7, 3
+    pextrd          [r2 + r3 * 2], xm2, 2
+    pextrd          [r2 + r5], xm2, 3
+%else
+    psubw           m0, m9
+    psubw           m6, m9
+    psubw           m7, m9
+    psubw           m8, m9
+    vextracti128    xm1, m0, 1
+    vextracti128    xm2, m6, 1
+    vextracti128    xm3, m7, 1
+    vextracti128    xm4, m8, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm1
+    movhps          [r2 + r5], xm1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm6
+    movhps          [r2 + r3], xm6
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r5], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm7
+    movhps          [r2 + r3], xm7
+    movq            [r2 + r3 * 2], xm3
+    movhps          [r2 + r5], xm3
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm8
+    movhps          [r2 + r3], xm8
+    movq            [r2 + r3 * 2], xm4
+    movhps          [r2 + r5], xm4
+%endif
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_4xN pp, 16
+    FILTER_VER_CHROMA_AVX2_4xN ps, 16
+    FILTER_VER_CHROMA_AVX2_4xN pp, 32
+    FILTER_VER_CHROMA_AVX2_4xN ps, 32
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W4_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_%1x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0,        [tab_Cm]
+
+    mova        m1,        [pw_512]
+
+    mov         r4d,       %2
+
+    lea         r5,        [3 * r1]
+
+.loop:
+    movd        m2,        [r0]
+    movd        m3,        [r0 + r1]
+    movd        m4,        [r0 + 2 * r1]
+    movd        m5,        [r0 + r5]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m6,        m4,        m5
+    punpcklbw   m2,        m6
+
+    pmaddubsw   m2,        m0
+
+    lea         r0,        [r0 + 4 * r1]
+    movd        m6,        [r0]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m7,        m5,        m6
+    punpcklbw   m3,        m7
+
+    pmaddubsw   m3,        m0
+
+    phaddw      m2,        m3
+
+    pmulhrsw    m2,        m1
+
+    movd        m7,        [r0 + r1]
+
+    punpcklbw   m4,        m5
+    punpcklbw   m3,        m6,        m7
+    punpcklbw   m4,        m3
+
+    pmaddubsw   m4,        m0
+
+    movd        m3,        [r0 + 2 * r1]
+
+    punpcklbw   m5,        m6
+    punpcklbw   m7,        m3
+    punpcklbw   m5,        m7
+
+    pmaddubsw   m5,        m0
+
+    phaddw      m4,        m5
+
+    pmulhrsw    m4,        m1
+    packuswb    m2,        m4
+    movd        [r2],      m2
+    pextrd      [r2 + r3], m2,  1
+    lea         r2,        [r2 + 2 * r3]
+    pextrd      [r2],      m2, 2
+    pextrd      [r2 + r3], m2, 3
+
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,        4
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W4_H4 4,  8
+    FILTER_V4_W4_H4 4, 16
+
+    FILTER_V4_W4_H4 4, 32
+
+%macro FILTER_V4_W8_H2 0
+    punpcklbw   m1,        m2
+    punpcklbw   m7,        m3,        m0
+
+    pmaddubsw   m1,        m6
+    pmaddubsw   m7,        m5
+
+    paddw       m1,        m7
+
+    pmulhrsw    m1,        m4
+    packuswb    m1,        m1
+%endmacro
+
+%macro FILTER_V4_W8_H3 0
+    punpcklbw   m2,        m3
+    punpcklbw   m7,        m0,        m1
+
+    pmaddubsw   m2,        m6
+    pmaddubsw   m7,        m5
+
+    paddw       m2,        m7
+
+    pmulhrsw    m2,        m4
+    packuswb    m2,        m2
+%endmacro
+
+%macro FILTER_V4_W8_H4 0
+    punpcklbw   m3,        m0
+    punpcklbw   m7,        m1,        m2
+
+    pmaddubsw   m3,        m6
+    pmaddubsw   m7,        m5
+
+    paddw       m3,        m7
+
+    pmulhrsw    m3,        m4
+    packuswb    m3,        m3
+%endmacro
+
+%macro FILTER_V4_W8_H5 0
+    punpcklbw   m0,        m1
+    punpcklbw   m7,        m2,        m3
+
+    pmaddubsw   m0,        m6
+    pmaddubsw   m7,        m5
+
+    paddw       m0,        m7
+
+    pmulhrsw    m0,        m4
+    packuswb    m0,        m0
+%endmacro
+
+%macro FILTER_V4_W8_8x2 2
+    FILTER_V4_W8 %1, %2
+    movq        m0,        [r0 + 4 * r1]
+
+    FILTER_V4_W8_H2
+
+    movh        [r2 + r3], m1
+%endmacro
+
+%macro FILTER_V4_W8_8x4 2
+    FILTER_V4_W8_8x2 %1, %2
+;8x3
+    lea         r6,        [r0 + 4 * r1]
+    movq        m1,        [r6 + r1]
+
+    FILTER_V4_W8_H3
+
+    movh        [r2 + 2 * r3], m2
+
+;8x4
+    movq        m2,        [r6 + 2 * r1]
+
+    FILTER_V4_W8_H4
+
+    lea         r5,        [r2 + 2 * r3]
+    movh        [r5 + r3], m3
+%endmacro
+
+%macro FILTER_V4_W8_8x6 2
+    FILTER_V4_W8_8x4 %1, %2
+;8x5
+    lea         r6,        [r6 + 2 * r1]
+    movq        m3,        [r6 + r1]
+
+    FILTER_V4_W8_H5
+
+    movh        [r2 + 4 * r3], m0
+
+;8x6
+    movq        m0,        [r0 + 8 * r1]
+
+    FILTER_V4_W8_H2
+
+    lea         r5,        [r2 + 4 * r3]
+    movh        [r5 + r3], m1
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W8 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_%1x%2, 4, 7, 8
+
+    mov         r4d,       r4m
+
+    sub         r0,        r1
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    lea         r5,        [r0 + 2 * r1]
+    movq        m3,        [r5 + r1]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m4,        m2,          m3
+
+%ifdef PIC
+    lea         r6,        [tab_ChromaCoeff]
+    movd        m5,        [r6 + r4 * 4]
+%else
+    movd        m5,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m6,        m5,       [tab_Vm]
+    pmaddubsw   m0,        m6
+
+    pshufb      m5,        [tab_Vm + 16]
+    pmaddubsw   m4,        m5
+
+    paddw       m0,        m4
+
+    mova        m4,        [pw_512]
+
+    pmulhrsw    m0,        m4
+    packuswb    m0,        m0
+    movh        [r2],      m0
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_8x2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+    FILTER_V4_W8_8x2 8, 2
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_8x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+    FILTER_V4_W8_8x4 8, 4
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_8x6(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+    FILTER_V4_W8_8x6 8, 6
+
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_4x2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_4x2, 4, 6, 6
+
+    mov         r4d, r4m
+    sub         r0, r1
+    add         r3d, r3d
+
+%ifdef PIC
+    lea         r5, [tab_ChromaCoeff]
+    movd        m0, [r5 + r4 * 4]
+%else
+    movd        m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0, [tab_Cm]
+
+    movd        m2, [r0]
+    movd        m3, [r0 + r1]
+    lea         r5, [r0 + 2 * r1]
+    movd        m4, [r5]
+    movd        m5, [r5 + r1]
+
+    punpcklbw   m2, m3
+    punpcklbw   m1, m4, m5
+    punpcklbw   m2, m1
+
+    pmaddubsw   m2, m0
+
+    movd        m1, [r0 + 4 * r1]
+
+    punpcklbw   m3, m4
+    punpcklbw   m5, m1
+    punpcklbw   m3, m5
+
+    pmaddubsw   m3, m0
+
+    phaddw      m2, m3
+
+    psubw       m2, [pw_2000]
+    movh        [r2], m2
+    movhps      [r2 + r3], m2
+
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_4x4(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_4x4, 4, 6, 7
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m0, [tab_Cm]
+
+    lea        r4, [r1 * 3]
+    lea        r5, [r0 + 4 * r1]
+
+    movd       m2, [r0]
+    movd       m3, [r0 + r1]
+    movd       m4, [r0 + 2 * r1]
+    movd       m5, [r0 + r4]
+
+    punpcklbw  m2, m3
+    punpcklbw  m6, m4, m5
+    punpcklbw  m2, m6
+
+    pmaddubsw  m2, m0
+
+    movd       m6, [r5]
+
+    punpcklbw  m3, m4
+    punpcklbw  m1, m5, m6
+    punpcklbw  m3, m1
+
+    pmaddubsw  m3, m0
+
+    phaddw     m2, m3
+
+    mova       m1, [pw_2000]
+
+    psubw      m2, m1
+    movh       [r2], m2
+    movhps     [r2 + r3], m2
+
+    movd       m2, [r5 + r1]
+
+    punpcklbw  m4, m5
+    punpcklbw  m3, m6, m2
+    punpcklbw  m4, m3
+
+    pmaddubsw  m4, m0
+
+    movd       m3, [r5 + 2 * r1]
+
+    punpcklbw  m5, m6
+    punpcklbw  m2, m3
+    punpcklbw  m5, m2
+
+    pmaddubsw  m5, m0
+
+    phaddw     m4, m5
+
+    psubw      m4, m1
+    lea        r2, [r2 + 2 * r3]
+    movh       [r2], m4
+    movhps     [r2 + r3], m4
+
+    RET
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_%1x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W4_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m0, [tab_Cm]
+
+    mova       m1, [pw_2000]
+
+    mov        r4d, %2/4
+    lea        r5, [3 * r1]
+
+.loop:
+    movd       m2, [r0]
+    movd       m3, [r0 + r1]
+    movd       m4, [r0 + 2 * r1]
+    movd       m5, [r0 + r5]
+
+    punpcklbw  m2, m3
+    punpcklbw  m6, m4, m5
+    punpcklbw  m2, m6
+
+    pmaddubsw  m2, m0
+
+    lea        r0, [r0 + 4 * r1]
+    movd       m6, [r0]
+
+    punpcklbw  m3, m4
+    punpcklbw  m7, m5, m6
+    punpcklbw  m3, m7
+
+    pmaddubsw  m3, m0
+
+    phaddw     m2, m3
+
+    psubw      m2, m1
+    movh       [r2], m2
+    movhps     [r2 + r3], m2
+
+    movd       m2, [r0 + r1]
+
+    punpcklbw  m4, m5
+    punpcklbw  m3, m6, m2
+    punpcklbw  m4, m3
+
+    pmaddubsw  m4, m0
+
+    movd       m3, [r0 + 2 * r1]
+
+    punpcklbw  m5, m6
+    punpcklbw  m2, m3
+    punpcklbw  m5, m2
+
+    pmaddubsw  m5, m0
+
+    phaddw     m4, m5
+
+    psubw      m4, m1
+    lea        r2, [r2 + 2 * r3]
+    movh       [r2], m4
+    movhps     [r2 + r3], m4
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W4_H4 4, 8
+    FILTER_V_PS_W4_H4 4, 16
+
+    FILTER_V_PS_W4_H4 4, 32
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_8x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W8_H8_H16_H2 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 6, 7
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m5, [r5 + r4 * 4]
+%else
+    movd       m5, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m6, m5, [tab_Vm]
+    pshufb     m5, [tab_Vm + 16]
+    mova       m4, [pw_2000]
+
+    mov        r4d, %2/2
+    lea        r5, [3 * r1]
+
+.loopH:
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    movq       m2, [r0 + 2 * r1]
+    movq       m3, [r0 + r5]
+
+    punpcklbw  m0, m1
+    punpcklbw  m1, m2
+    punpcklbw  m2, m3
+
+    pmaddubsw  m0, m6
+    pmaddubsw  m2, m5
+
+    paddw      m0, m2
+
+    psubw      m0, m4
+    movu       [r2], m0
+
+    movq       m0, [r0 + 4 * r1]
+
+    punpcklbw  m3, m0
+
+    pmaddubsw  m1, m6
+    pmaddubsw  m3, m5
+
+    paddw      m1, m3
+    psubw      m1, m4
+
+    movu       [r2 + r3], m1
+
+    lea        r0, [r0 + 2 * r1]
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_V_PS_W8_H8_H16_H2 8, 2
+    FILTER_V_PS_W8_H8_H16_H2 8, 4
+    FILTER_V_PS_W8_H8_H16_H2 8, 6
+
+    FILTER_V_PS_W8_H8_H16_H2 8, 12
+    FILTER_V_PS_W8_H8_H16_H2 8, 64
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_8x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W8_H8_H16_H32 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m5, [r5 + r4 * 4]
+%else
+    movd       m5, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m6, m5, [tab_Vm]
+    pshufb     m5, [tab_Vm + 16]
+    mova       m4, [pw_2000]
+
+    mov        r4d, %2/4
+    lea        r5, [3 * r1]
+
+.loop:
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    movq       m2, [r0 + 2 * r1]
+    movq       m3, [r0 + r5]
+
+    punpcklbw  m0, m1
+    punpcklbw  m1, m2
+    punpcklbw  m2, m3
+
+    pmaddubsw  m0, m6
+    pmaddubsw  m7, m2, m5
+
+    paddw      m0, m7
+
+    psubw       m0, m4
+    movu       [r2], m0
+
+    lea        r0, [r0 + 4 * r1]
+    movq       m0, [r0]
+
+    punpcklbw  m3, m0
+
+    pmaddubsw  m1, m6
+    pmaddubsw  m7, m3, m5
+
+    paddw      m1, m7
+
+    psubw      m1, m4
+    movu       [r2 + r3], m1
+
+    movq       m1, [r0 + r1]
+
+    punpcklbw  m0, m1
+
+    pmaddubsw  m2, m6
+    pmaddubsw  m0, m5
+
+    paddw      m2, m0
+
+    psubw      m2, m4
+    lea        r2, [r2 + 2 * r3]
+    movu       [r2], m2
+
+    movq       m2, [r0 + 2 * r1]
+
+    punpcklbw  m1, m2
+
+    pmaddubsw  m3, m6
+    pmaddubsw  m1, m5
+
+    paddw      m3, m1
+    psubw      m3, m4
+
+    movu       [r2 + r3], m3
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W8_H8_H16_H32 8,  8
+    FILTER_V_PS_W8_H8_H16_H32 8, 16
+    FILTER_V_PS_W8_H8_H16_H32 8, 32
+
+;------------------------------------------------------------------------------------------------------------
+;void interp_4tap_vert_ps_6x8(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W6 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_6x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m5, [r5 + r4 * 4]
+%else
+    movd       m5, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m6, m5, [tab_Vm]
+    pshufb     m5, [tab_Vm + 16]
+    mova       m4, [pw_2000]
+    lea        r5, [3 * r1]
+    mov        r4d, %2/4
+
+.loop:
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    movq       m2, [r0 + 2 * r1]
+    movq       m3, [r0 + r5]
+
+    punpcklbw  m0, m1
+    punpcklbw  m1, m2
+    punpcklbw  m2, m3
+
+    pmaddubsw  m0, m6
+    pmaddubsw  m7, m2, m5
+
+    paddw      m0, m7
+    psubw      m0, m4
+
+    movh       [r2], m0
+    pshufd     m0, m0, 2
+    movd       [r2 + 8], m0
+
+    lea        r0, [r0 + 4 * r1]
+    movq       m0, [r0]
+    punpcklbw  m3, m0
+
+    pmaddubsw  m1, m6
+    pmaddubsw  m7, m3, m5
+
+    paddw      m1, m7
+    psubw      m1, m4
+
+    movh       [r2 + r3], m1
+    pshufd     m1, m1, 2
+    movd       [r2 + r3 + 8], m1
+
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+
+    pmaddubsw  m2, m6
+    pmaddubsw  m0, m5
+
+    paddw      m2, m0
+    psubw      m2, m4
+
+    lea        r2,[r2 + 2 * r3]
+    movh       [r2], m2
+    pshufd     m2, m2, 2
+    movd       [r2 + 8], m2
+
+    movq       m2,[r0 + 2 * r1]
+    punpcklbw  m1, m2
+
+    pmaddubsw  m3, m6
+    pmaddubsw  m1, m5
+
+    paddw      m3, m1
+    psubw      m3, m4
+
+    movh       [r2 + r3], m3
+    pshufd     m3, m3, 2
+    movd       [r2 + r3 + 8], m3
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W6 6, 8
+    FILTER_V_PS_W6 6, 16
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_12x16(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W12 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_12x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m1, m0, [tab_Vm]
+    pshufb     m0, [tab_Vm + 16]
+
+    mov        r4d, %2/2
+
+.loop:
+    movu       m2, [r0]
+    movu       m3, [r0 + r1]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    lea        r0, [r0 + 2 * r1]
+    movu       m5, [r0]
+    movu       m7, [r0 + r1]
+
+    punpcklbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m4, m6
+
+    punpckhbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m2, m6
+
+    mova       m6, [pw_2000]
+
+    psubw      m4, m6
+    psubw      m2, m6
+
+    movu       [r2], m4
+    movh       [r2 + 16], m2
+
+    punpcklbw  m4, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m3, m1
+
+    movu       m2, [r0 + 2 * r1]
+
+    punpcklbw  m5, m7, m2
+    punpckhbw  m7, m2
+
+    pmaddubsw  m5, m0
+    pmaddubsw  m7, m0
+
+    paddw      m4, m5
+    paddw      m3, m7
+
+    psubw      m4, m6
+    psubw      m3, m6
+
+    movu       [r2 + r3], m4
+    movh       [r2 + r3 + 16], m3
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W12 12, 16
+    FILTER_V_PS_W12 12, 32
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_16x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W16 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m1, m0, [tab_Vm]
+    pshufb     m0, [tab_Vm + 16]
+    mov        r4d, %2/2
+
+.loop:
+    movu       m2, [r0]
+    movu       m3, [r0 + r1]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    lea        r0, [r0 + 2 * r1]
+    movu       m5, [r0]
+    movu       m7, [r0 + r1]
+
+    punpcklbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m4, m6
+
+    punpckhbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m2, m6
+
+    mova       m6, [pw_2000]
+
+    psubw      m4, m6
+    psubw      m2, m6
+
+    movu       [r2], m4
+    movu       [r2 + 16], m2
+
+    punpcklbw  m4, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m3, m1
+
+    movu       m5, [r0 + 2 * r1]
+
+    punpcklbw  m2, m7, m5
+    punpckhbw  m7, m5
+
+    pmaddubsw  m2, m0
+    pmaddubsw  m7, m0
+
+    paddw      m4, m2
+    paddw      m3, m7
+
+    psubw      m4, m6
+    psubw      m3, m6
+
+    movu       [r2 + r3], m4
+    movu       [r2 + r3 + 16], m3
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W16 16,  4
+    FILTER_V_PS_W16 16,  8
+    FILTER_V_PS_W16 16, 12
+    FILTER_V_PS_W16 16, 16
+    FILTER_V_PS_W16 16, 32
+
+    FILTER_V_PS_W16 16, 24
+    FILTER_V_PS_W16 16, 64
+
+;--------------------------------------------------------------------------------------------------------------
+;void interp_4tap_vert_ps_24x32(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_V4_PS_W24 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_24x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m1, m0, [tab_Vm]
+    pshufb     m0, [tab_Vm + 16]
+
+    mov        r4d, %2/2
+
+.loop:
+    movu       m2, [r0]
+    movu       m3, [r0 + r1]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    lea        r5, [r0 + 2 * r1]
+
+    movu       m5, [r5]
+    movu       m7, [r5 + r1]
+
+    punpcklbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m4, m6
+
+    punpckhbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m2, m6
+
+    mova       m6, [pw_2000]
+
+    psubw      m4, m6
+    psubw      m2, m6
+
+    movu       [r2], m4
+    movu       [r2 + 16], m2
+
+    punpcklbw  m4, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m3, m1
+
+    movu       m2, [r5 + 2 * r1]
+
+    punpcklbw  m5, m7, m2
+    punpckhbw  m7, m2
+
+    pmaddubsw  m5, m0
+    pmaddubsw  m7, m0
+
+    paddw      m4, m5
+    paddw      m3, m7
+
+    psubw      m4, m6
+    psubw      m3, m6
+
+    movu       [r2 + r3], m4
+    movu       [r2 + r3 + 16], m3
+
+    movq       m2, [r0 + 16]
+    movq       m3, [r0 + r1 + 16]
+    movq       m4, [r5 + 16]
+    movq       m5, [r5 + r1 + 16]
+
+    punpcklbw  m2, m3
+    punpcklbw  m7, m4, m5
+
+    pmaddubsw  m2, m1
+    pmaddubsw  m7, m0
+
+    paddw      m2, m7
+    psubw      m2, m6
+
+    movu       [r2 + 32], m2
+
+    movq       m2, [r5 + 2 * r1 + 16]
+
+    punpcklbw  m3, m4
+    punpcklbw  m5, m2
+
+    pmaddubsw  m3, m1
+    pmaddubsw  m5, m0
+
+    paddw      m3, m5
+    psubw      m3,  m6
+
+    movu       [r2 + r3 + 32], m3
+
+    mov        r0, r5
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_PS_W24 24, 32
+
+    FILTER_V4_PS_W24 24, 64
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_32x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W32 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m1, m0, [tab_Vm]
+    pshufb     m0, [tab_Vm + 16]
+
+    mova       m7, [pw_2000]
+
+    mov        r4d, %2
+
+.loop:
+    movu       m2, [r0]
+    movu       m3, [r0 + r1]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    lea        r5, [r0 + 2 * r1]
+    movu       m3, [r5]
+    movu       m5, [r5 + r1]
+
+    punpcklbw  m6, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m6, m0
+    pmaddubsw  m3, m0
+
+    paddw      m4, m6
+    paddw      m2, m3
+
+    psubw      m4, m7
+    psubw      m2, m7
+
+    movu       [r2], m4
+    movu       [r2 + 16], m2
+
+    movu       m2, [r0 + 16]
+    movu       m3, [r0 + r1 + 16]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    movu       m3, [r5 + 16]
+    movu       m5, [r5 + r1 + 16]
+
+    punpcklbw  m6, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m6, m0
+    pmaddubsw  m3, m0
+
+    paddw      m4, m6
+    paddw      m2, m3
+
+    psubw      m4, m7
+    psubw      m2, m7
+
+    movu       [r2 + 32], m4
+    movu       [r2 + 48], m2
+
+    lea        r0, [r0 + r1]
+    lea        r2, [r2 + r3]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W32 32,  8
+    FILTER_V_PS_W32 32, 16
+    FILTER_V_PS_W32 32, 24
+    FILTER_V_PS_W32 32, 32
+
+    FILTER_V_PS_W32 32, 48
+    FILTER_V_PS_W32 32, 64
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W8_H8_H16_H32 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_%1x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m5,        [r5 + r4 * 4]
+%else
+    movd        m5,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m6,        m5,       [tab_Vm]
+    pshufb      m5,        [tab_Vm + 16]
+    mova        m4,        [pw_512]
+    lea         r5,        [r1 * 3]
+
+    mov         r4d,       %2
+
+.loop:
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    movq        m3,        [r0 + r5]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m1,        m2
+    punpcklbw   m2,        m3
+
+    pmaddubsw   m0,        m6
+    pmaddubsw   m7,        m2, m5
+
+    paddw       m0,        m7
+
+    pmulhrsw    m0,        m4
+    packuswb    m0,        m0
+    movh        [r2],      m0
+
+    lea         r0,        [r0 + 4 * r1]
+    movq        m0,        [r0]
+
+    punpcklbw   m3,        m0
+
+    pmaddubsw   m1,        m6
+    pmaddubsw   m7,        m3, m5
+
+    paddw       m1,        m7
+
+    pmulhrsw    m1,        m4
+    packuswb    m1,        m1
+    movh        [r2 + r3], m1
+
+    movq        m1,        [r0 + r1]
+
+    punpcklbw   m0,        m1
+
+    pmaddubsw   m2,        m6
+    pmaddubsw   m0,        m5
+
+    paddw       m2,        m0
+
+    pmulhrsw    m2,        m4
+
+    movq        m7,        [r0 + 2 * r1]
+    punpcklbw   m1,        m7
+
+    pmaddubsw   m3,        m6
+    pmaddubsw   m1,        m5
+
+    paddw       m3,        m1
+
+    pmulhrsw    m3,        m4
+    packuswb    m2,        m3
+
+    lea         r2,        [r2 + 2 * r3]
+    movh        [r2],      m2
+    movhps      [r2 + r3], m2
+
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,         4
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W8_H8_H16_H32 8,  8
+    FILTER_V4_W8_H8_H16_H32 8, 16
+    FILTER_V4_W8_H8_H16_H32 8, 32
+
+    FILTER_V4_W8_H8_H16_H32 8, 12
+    FILTER_V4_W8_H8_H16_H32 8, 64
+
+%macro PROCESS_CHROMA_AVX2_W8_8R 0
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    pmaddubsw       m3, m4, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3                        ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6                        ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    pmaddubsw       m0, [r5 + 1 * mmsize]
+    paddw           m4, m0
+%endmacro
+
+%macro FILTER_VER_CHROMA_AVX2_8x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x8, 4, 6, 7
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+    PROCESS_CHROMA_AVX2_W8_8R
+%ifidn %1,pp
+    lea             r4, [r3 * 3]
+    mova            m3, [pw_512]
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m3                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m3                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r4], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+    movhps          [r2 + r3 * 2], xm1
+    movhps          [r2 + r4], xm4
+%else
+    add             r3d, r3d
+    vbroadcasti128  m3, [pw_2000]
+    lea             r4, [r3 * 3]
+    psubw           m5, m3                          ; m5 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    psubw           m1, m3                          ; m1 = word: row 4, row 5
+    psubw           m4, m3                          ; m4 = word: row 6, row 7
+    vextracti128    xm6, m5, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm0, m1, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm6
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm4
+    vextracti128    xm4, m4, 1
+    movu            [r2 + r4], xm4
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x8 pp
+    FILTER_VER_CHROMA_AVX2_8x8 ps
+
+%macro FILTER_VER_CHROMA_AVX2_8x6 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x6, 4, 6, 6
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    pmaddubsw       m4, [r5 + 1 * mmsize]
+    paddw           m1, m4
+%ifidn %1,pp
+    lea             r4, [r3 * 3]
+    mova            m3, [pw_512]
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m3                          ; m1 = word: row 4, row 5
+    packuswb        m5, m2
+    packuswb        m1, m1
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r4], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+%else
+    add             r3d, r3d
+    mova            m3, [pw_2000]
+    lea             r4, [r3 * 3]
+    psubw           m5, m3                          ; m5 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    psubw           m1, m3                          ; m1 = word: row 4, row 5
+    vextracti128    xm4, m5, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm0, m1, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm4
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm1
+    movu            [r2 + r3], xm0
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x6 pp
+    FILTER_VER_CHROMA_AVX2_8x6 ps
+
+%macro PROCESS_CHROMA_AVX2_W8_16R 1
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3
+    vinserti128     m5, m1, xm2, 1
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1
+    vinserti128     m2, m3, xm4, 1
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0
+    vinserti128     m4, m4, xm3, 1
+    pmaddubsw       m3, m4, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6
+    vinserti128     m0, m0, xm3, 1
+    pmaddubsw       m3, m0, [r5 + 1 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m0, [r5]
+%ifidn %1,pp
+    pmulhrsw        m5, m7                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m7                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m7                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m7                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+    movhps          [r2 + r3 * 2], xm1
+    movhps          [r2 + r6], xm4
+%else
+    psubw           m5, m7                          ; m5 = word: row 0, row 1
+    psubw           m2, m7                          ; m2 = word: row 2, row 3
+    psubw           m1, m7                          ; m1 = word: row 4, row 5
+    psubw           m4, m7                          ; m4 = word: row 6, row 7
+    vextracti128    xm3, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm3
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm5, m1, 1
+    vextracti128    xm3, m4, 1
+    movu            [r2], xm1
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r6], xm3
+%endif
+    movq            xm3, [r0 + r4]                  ; m3 = row 11
+    punpcklbw       xm6, xm3
+    lea             r0, [r0 + r1 * 4]
+    movq            xm5, [r0]                       ; m5 = row 12
+    punpcklbw       xm3, xm5
+    vinserti128     m6, m6, xm3, 1
+    pmaddubsw       m3, m6, [r5 + 1 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m6, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 13
+    punpcklbw       xm5, xm3
+    movq            xm2, [r0 + r1 * 2]              ; m2 = row 14
+    punpcklbw       xm3, xm2
+    vinserti128     m5, m5, xm3, 1
+    pmaddubsw       m3, m5, [r5 + 1 * mmsize]
+    paddw           m6, m3
+    pmaddubsw       m5, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 15
+    punpcklbw       xm2, xm3
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 16
+    punpcklbw       xm3, xm1
+    vinserti128     m2, m2, xm3, 1
+    pmaddubsw       m3, m2, [r5 + 1 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 17
+    punpcklbw       xm1, xm3
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5 + 1 * mmsize]
+    paddw           m2, m1
+    lea             r2, [r2 + r3 * 4]
+%ifidn %1,pp
+    pmulhrsw        m0, m7                          ; m0 = word: row 8, row 9
+    pmulhrsw        m6, m7                          ; m6 = word: row 10, row 11
+    pmulhrsw        m5, m7                          ; m5 = word: row 12, row 13
+    pmulhrsw        m2, m7                          ; m2 = word: row 14, row 15
+    packuswb        m0, m6
+    packuswb        m5, m2
+    vextracti128    xm6, m0, 1
+    vextracti128    xm2, m5, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm6
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm6
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm2
+%else
+    psubw           m0, m7                          ; m0 = word: row 8, row 9
+    psubw           m6, m7                          ; m6 = word: row 10, row 11
+    psubw           m5, m7                          ; m5 = word: row 12, row 13
+    psubw           m2, m7                          ; m2 = word: row 14, row 15
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m6, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm1, m5, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+%endmacro
+
+%macro FILTER_VER_CHROMA_AVX2_8x16 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x16, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_CHROMA_AVX2_W8_16R %1
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x16 pp
+    FILTER_VER_CHROMA_AVX2_8x16 ps
+
+%macro FILTER_VER_CHROMA_AVX2_8x12 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x12, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1, pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3
+    vinserti128     m5, m1, xm2, 1
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1
+    vinserti128     m2, m3, xm4, 1
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0
+    vinserti128     m4, m4, xm3, 1
+    pmaddubsw       m3, m4, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6
+    vinserti128     m0, m0, xm3, 1
+    pmaddubsw       m3, m0, [r5 + 1 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m0, [r5]
+%ifidn %1, pp
+    pmulhrsw        m5, m7                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m7                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m7                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m7                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+    movhps          [r2 + r3 * 2], xm1
+    movhps          [r2 + r6], xm4
+%else
+    psubw           m5, m7                          ; m5 = word: row 0, row 1
+    psubw           m2, m7                          ; m2 = word: row 2, row 3
+    psubw           m1, m7                          ; m1 = word: row 4, row 5
+    psubw           m4, m7                          ; m4 = word: row 6, row 7
+    vextracti128    xm3, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm3
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    vextracti128    xm5, m1, 1
+    vextracti128    xm3, m4, 1
+    movu            [r2], xm1
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r6], xm3
+%endif
+    movq            xm3, [r0 + r4]                  ; m3 = row 11
+    punpcklbw       xm6, xm3
+    lea             r0, [r0 + r1 * 4]
+    movq            xm5, [r0]                       ; m5 = row 12
+    punpcklbw       xm3, xm5
+    vinserti128     m6, m6, xm3, 1
+    pmaddubsw       m3, m6, [r5 + 1 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m6, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 13
+    punpcklbw       xm5, xm3
+    movq            xm2, [r0 + r1 * 2]              ; m2 = row 14
+    punpcklbw       xm3, xm2
+    vinserti128     m5, m5, xm3, 1
+    pmaddubsw       m3, m5, [r5 + 1 * mmsize]
+    paddw           m6, m3
+    lea             r2, [r2 + r3 * 4]
+%ifidn %1, pp
+    pmulhrsw        m0, m7                          ; m0 = word: row 8, row 9
+    pmulhrsw        m6, m7                          ; m6 = word: row 10, row 11
+    packuswb        m0, m6
+    vextracti128    xm6, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm6
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm6
+%else
+    psubw           m0, m7                          ; m0 = word: row 8, row 9
+    psubw           m6, m7                          ; m6 = word: row 10, row 11
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m6, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x12 pp
+    FILTER_VER_CHROMA_AVX2_8x12 ps
+
+%macro FILTER_VER_CHROMA_AVX2_8xN 2
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x%2, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+%rep %2 / 16
+    PROCESS_CHROMA_AVX2_W8_16R %1
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8xN pp, 32
+    FILTER_VER_CHROMA_AVX2_8xN ps, 32
+    FILTER_VER_CHROMA_AVX2_8xN pp, 64
+    FILTER_VER_CHROMA_AVX2_8xN ps, 64
+
+%macro PROCESS_CHROMA_AVX2_W8_4R 0
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m0, m1, xm2, 1                  ; m0 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m0, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m1, [r5 + 1 * mmsize]
+    paddw           m2, m1
+%endmacro
+
+%macro FILTER_VER_CHROMA_AVX2_8x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x4, 4, 6, 5
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+    PROCESS_CHROMA_AVX2_W8_4R
+%ifidn %1,pp
+    lea             r4, [r3 * 3]
+    mova            m3, [pw_512]
+    pmulhrsw        m0, m3                          ; m0 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    packuswb        m0, m2
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+%else
+    add             r3d, r3d
+    vbroadcasti128  m3, [pw_2000]
+    lea             r4, [r3 * 3]
+    psubw           m0, m3                          ; m0 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    vextracti128    xm1, m0, 1
+    vextracti128    xm4, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm4
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x4 pp
+    FILTER_VER_CHROMA_AVX2_8x4 ps
+
+%macro FILTER_VER_CHROMA_AVX2_8x2 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x2, 4, 6, 4
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m1, m1, xm2, 1                  ; m1 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m1, [r5]
+    movq            xm2, [r0 + r4]                  ; m2 = row 3
+    punpcklbw       xm3, xm2                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    movq            xm0, [r0 + r1 * 4]              ; m0 = row 4
+    punpcklbw       xm2, xm0                        ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m3, m3, xm2, 1                  ; m3 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m1, m3
+%ifidn %1,pp
+    pmulhrsw        m1, [pw_512]                    ; m1 = word: row 0, row 1
+    packuswb        m1, m1
+    vextracti128    xm0, m1, 1
+    movq            [r2], xm1
+    movq            [r2 + r3], xm0
+%else
+    add             r3d, r3d
+    psubw           m1, [pw_2000]                   ; m1 = word: row 0, row 1
+    vextracti128    xm0, m1, 1
+    movu            [r2], xm1
+    movu            [r2 + r3], xm0
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_8x2 pp
+    FILTER_VER_CHROMA_AVX2_8x2 ps
+
+%macro FILTER_VER_CHROMA_AVX2_6x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_6x8, 4, 6, 7
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+    PROCESS_CHROMA_AVX2_W8_8R
+%ifidn %1,pp
+    lea             r4, [r3 * 3]
+    mova            m3, [pw_512]
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m3                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m3                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movd            [r2], xm5
+    pextrw          [r2 + 4], xm5, 2
+    movd            [r2 + r3], xm2
+    pextrw          [r2 + r3 + 4], xm2, 2
+    pextrd          [r2 + r3 * 2], xm5, 2
+    pextrw          [r2 + r3 * 2 + 4], xm5, 6
+    pextrd          [r2 + r4], xm2, 2
+    pextrw          [r2 + r4 + 4], xm2, 6
+    lea             r2, [r2 + r3 * 4]
+    movd            [r2], xm1
+    pextrw          [r2 + 4], xm1, 2
+    movd            [r2 + r3], xm4
+    pextrw          [r2 + r3 + 4], xm4, 2
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrw          [r2 + r3 * 2 + 4], xm1, 6
+    pextrd          [r2 + r4], xm4, 2
+    pextrw          [r2 + r4 + 4], xm4, 6
+%else
+    add             r3d, r3d
+    vbroadcasti128  m3, [pw_2000]
+    lea             r4, [r3 * 3]
+    psubw           m5, m3                          ; m5 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    psubw           m1, m3                          ; m1 = word: row 4, row 5
+    psubw           m4, m3                          ; m4 = word: row 6, row 7
+    vextracti128    xm6, m5, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm0, m1, 1
+    movq            [r2], xm5
+    pextrd          [r2 + 8], xm5, 2
+    movq            [r2 + r3], xm6
+    pextrd          [r2 + r3 + 8], xm6, 2
+    movq            [r2 + r3 * 2], xm2
+    pextrd          [r2 + r3 * 2 + 8], xm2, 2
+    movq            [r2 + r4], xm3
+    pextrd          [r2 + r4 + 8], xm3, 2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    pextrd          [r2 + 8], xm1, 2
+    movq            [r2 + r3], xm0
+    pextrd          [r2 + r3 + 8], xm0, 2
+    movq            [r2 + r3 * 2], xm4
+    pextrd          [r2 + r3 * 2 + 8], xm4, 2
+    vextracti128    xm4, m4, 1
+    movq            [r2 + r4], xm4
+    pextrd          [r2 + r4 + 8], xm4, 2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_6x8 pp
+    FILTER_VER_CHROMA_AVX2_6x8 ps
+
+;-----------------------------------------------------------------------------
+;void interp_4tap_vert_pp_6x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W6_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_6x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m5,        [r5 + r4 * 4]
+%else
+    movd        m5,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m6,        m5,       [tab_Vm]
+    pshufb      m5,        [tab_Vm + 16]
+    mova        m4,        [pw_512]
+
+    mov         r4d,       %2
+    lea         r5,        [3 * r1]
+
+.loop:
+    movq        m0,        [r0]
+    movq        m1,        [r0 + r1]
+    movq        m2,        [r0 + 2 * r1]
+    movq        m3,        [r0 + r5]
+
+    punpcklbw   m0,        m1
+    punpcklbw   m1,        m2
+    punpcklbw   m2,        m3
+
+    pmaddubsw   m0,        m6
+    pmaddubsw   m7,        m2, m5
+
+    paddw       m0,        m7
+
+    pmulhrsw    m0,        m4
+    packuswb    m0,        m0
+    movd        [r2],      m0
+    pextrw      [r2 + 4],  m0,    2
+
+    lea         r0,        [r0 + 4 * r1]
+
+    movq        m0,        [r0]
+    punpcklbw   m3,        m0
+
+    pmaddubsw   m1,        m6
+    pmaddubsw   m7,        m3, m5
+
+    paddw       m1,        m7
+
+    pmulhrsw    m1,        m4
+    packuswb    m1,        m1
+    movd        [r2 + r3],      m1
+    pextrw      [r2 + r3 + 4],  m1,    2
+
+    movq        m1,        [r0 + r1]
+    punpcklbw   m7,        m0,        m1
+
+    pmaddubsw   m2,        m6
+    pmaddubsw   m7,        m5
+
+    paddw       m2,        m7
+
+    pmulhrsw    m2,        m4
+    packuswb    m2,        m2
+    lea         r2,        [r2 + 2 * r3]
+    movd        [r2],      m2
+    pextrw      [r2 + 4],  m2,    2
+
+    movq        m2,        [r0 + 2 * r1]
+    punpcklbw   m1,        m2
+
+    pmaddubsw   m3,        m6
+    pmaddubsw   m1,        m5
+
+    paddw       m3,        m1
+
+    pmulhrsw    m3,        m4
+    packuswb    m3,        m3
+
+    movd        [r2 + r3],        m3
+    pextrw      [r2 + r3 + 4],    m3,    2
+
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,         4
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W6_H4 6, 8
+
+    FILTER_V4_W6_H4 6, 16
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_12x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W12_H2 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_12x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m1,        m0,       [tab_Vm]
+    pshufb      m0,        [tab_Vm + 16]
+
+    mov         r4d,       %2
+
+.loop:
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    lea         r0,        [r0 + 2 * r1]
+    movu        m5,        [r0]
+    movu        m7,        [r0 + r1]
+
+    punpcklbw   m6,        m5,        m7
+    pmaddubsw   m6,        m0
+    paddw       m4,        m6
+
+    punpckhbw   m6,        m5,        m7
+    pmaddubsw   m6,        m0
+    paddw       m2,        m6
+
+    mova        m6,        [pw_512]
+
+    pmulhrsw    m4,        m6
+    pmulhrsw    m2,        m6
+
+    packuswb    m4,        m2
+
+    movh         [r2],     m4
+    pextrd       [r2 + 8], m4,  2
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m3,        m1
+
+    movu        m5,        [r0 + 2 * r1]
+
+    punpcklbw   m2,        m7,        m5
+    punpckhbw   m7,        m5
+
+    pmaddubsw   m2,        m0
+    pmaddubsw   m7,        m0
+
+    paddw       m4,        m2
+    paddw       m3,        m7
+
+    pmulhrsw    m4,        m6
+    pmulhrsw    m3,        m6
+
+    packuswb    m4,        m3
+
+    movh        [r2 + r3],      m4
+    pextrd      [r2 + r3 + 8],  m4,  2
+
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,        2
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W12_H2 12, 16
+
+    FILTER_V4_W12_H2 12, 32
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_16x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16_H2 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_16x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m1,        m0,       [tab_Vm]
+    pshufb      m0,        [tab_Vm + 16]
+
+    mov         r4d,       %2/2
+
+.loop:
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    lea         r0,        [r0 + 2 * r1]
+    movu        m5,        [r0]
+    movu        m6,        [r0 + r1]
+
+    punpckhbw   m7,        m5,        m6
+    pmaddubsw   m7,        m0
+    paddw       m2,        m7
+
+    punpcklbw   m7,        m5,        m6
+    pmaddubsw   m7,        m0
+    paddw       m4,        m7
+
+    mova        m7,        [pw_512]
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m2,        m7
+
+    packuswb    m4,        m2
+
+    movu        [r2],      m4
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m3,        m1
+
+    movu        m5,        [r0 + 2 * r1]
+
+    punpcklbw   m2,        m6,        m5
+    punpckhbw   m6,        m5
+
+    pmaddubsw   m2,        m0
+    pmaddubsw   m6,        m0
+
+    paddw       m4,        m2
+    paddw       m3,        m6
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m3,        m7
+
+    packuswb    m4,        m3
+
+    movu        [r2 + r3],      m4
+
+    lea         r2,        [r2 + 2 * r3]
+
+    dec         r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W16_H2 16,  4
+    FILTER_V4_W16_H2 16,  8
+    FILTER_V4_W16_H2 16, 12
+    FILTER_V4_W16_H2 16, 16
+    FILTER_V4_W16_H2 16, 32
+
+    FILTER_V4_W16_H2 16, 24
+    FILTER_V4_W16_H2 16, 64
+
+%macro FILTER_VER_CHROMA_AVX2_16x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_16x16, 4, 6, 15
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m12, [r5]
+    mova            m13, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, m12
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, m12
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, m13
+    paddw           m0, m4
+    pmaddubsw       m2, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, m13
+    paddw           m1, m5
+    pmaddubsw       m3, m12
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, m13
+    paddw           m2, m6
+    pmaddubsw       m4, m12
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, m13
+    paddw           m3, m7
+    pmaddubsw       m5, m12
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, m13
+    paddw           m4, m8
+    pmaddubsw       m6, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, m13
+    paddw           m5, m9
+    pmaddubsw       m7, m12
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, m13
+    paddw           m6, m10
+    pmaddubsw       m8, m12
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, m13
+    paddw           m7, m11
+    pmaddubsw       m9, m12
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    packuswb        m6, m7
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r5], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r5], xm7
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r5], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r5], m7
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm6, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm6, 1
+    pmaddubsw       m6, m10, m13
+    paddw           m8, m6
+    pmaddubsw       m10, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 12
+    punpckhbw       xm7, xm11, xm6
+    punpcklbw       xm11, xm6
+    vinserti128     m11, m11, xm7, 1
+    pmaddubsw       m7, m11, m13
+    paddw           m9, m7
+    pmaddubsw       m11, m12
+
+    movu            xm7, [r0 + r1]                  ; m7 = row 13
+    punpckhbw       xm0, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm0, 1
+    pmaddubsw       m0, m6, m13
+    paddw           m10, m0
+    pmaddubsw       m6, m12
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm7, xm0
+    punpcklbw       xm7, xm0
+    vinserti128     m7, m7, xm1, 1
+    pmaddubsw       m1, m7, m13
+    paddw           m11, m1
+    pmaddubsw       m7, m12
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, m13
+    paddw           m6, m2
+    pmaddubsw       m0, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, m13
+    paddw           m7, m3
+    pmaddubsw       m1, m12
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m2, m13
+    paddw           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m3, m13
+    paddw           m1, m3
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m6, m14                         ; m6 = word: row 12
+    pmulhrsw        m7, m14                         ; m7 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m6, m7
+    packuswb        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m6, m6, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm7, m6, 1
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r5], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm6
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r5], xm1
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m6, m14                         ; m6 = word: row 12
+    psubw           m7, m14                         ; m7 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r2], m8
+    movu            [r2 + r3], m9
+    movu            [r2 + r3 * 2], m10
+    movu            [r2 + r5], m11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m6
+    movu            [r2 + r3], m7
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r5], m1
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16x16 pp
+    FILTER_VER_CHROMA_AVX2_16x16 ps
+%macro FILTER_VER_CHROMA_AVX2_16x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_16x8, 4, 7, 7
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m6, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m6, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+%ifidn %1,pp
+    pmulhrsw        m0, m6                          ; m0 = word: row 0
+    pmulhrsw        m1, m6                          ; m1 = word: row 1
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+%else
+    psubw           m0, m6                          ; m0 = word: row 0
+    psubw           m1, m6                          ; m1 = word: row 1
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+%endif
+
+    movu            xm0, [r0 + r1]                  ; m0 = row 5
+    punpckhbw       xm1, xm4, xm0
+    punpcklbw       xm4, xm0
+    vinserti128     m4, m4, xm1, 1
+    pmaddubsw       m1, m4, [r5 + mmsize]
+    paddw           m2, m1
+    pmaddubsw       m4, [r5]
+    movu            xm1, [r0 + r1 * 2]              ; m1 = row 6
+    punpckhbw       xm5, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm5, 1
+    pmaddubsw       m5, m0, [r5 + mmsize]
+    paddw           m3, m5
+    pmaddubsw       m0, [r5]
+%ifidn %1,pp
+    pmulhrsw        m2, m6                          ; m2 = word: row 2
+    pmulhrsw        m3, m6                          ; m3 = word: row 3
+    packuswb        m2, m3
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%else
+    psubw           m2, m6                          ; m2 = word: row 2
+    psubw           m3, m6                          ; m3 = word: row 3
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+%endif
+
+    movu            xm2, [r0 + r4]                  ; m2 = row 7
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + mmsize]
+    paddw           m4, m3
+    pmaddubsw       m1, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm3, [r0]                       ; m3 = row 8
+    punpckhbw       xm5, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm5, 1
+    pmaddubsw       m5, m2, [r5 + mmsize]
+    paddw           m0, m5
+    pmaddubsw       m2, [r5]
+    lea             r2, [r2 + r3 * 4]
+%ifidn %1,pp
+    pmulhrsw        m4, m6                          ; m4 = word: row 4
+    pmulhrsw        m0, m6                          ; m0 = word: row 5
+    packuswb        m4, m0
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm0, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm0
+%else
+    psubw           m4, m6                          ; m4 = word: row 4
+    psubw           m0, m6                          ; m0 = word: row 5
+    movu            [r2], m4
+    movu            [r2 + r3], m0
+%endif
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 9
+    punpckhbw       xm4, xm3, xm5
+    punpcklbw       xm3, xm5
+    vinserti128     m3, m3, xm4, 1
+    pmaddubsw       m3, [r5 + mmsize]
+    paddw           m1, m3
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 10
+    punpckhbw       xm0, xm5, xm4
+    punpcklbw       xm5, xm4
+    vinserti128     m5, m5, xm0, 1
+    pmaddubsw       m5, [r5 + mmsize]
+    paddw           m2, m5
+%ifidn %1,pp
+    pmulhrsw        m1, m6                          ; m1 = word: row 6
+    pmulhrsw        m2, m6                          ; m2 = word: row 7
+    packuswb        m1, m2
+    vpermq          m1, m1, 11011000b
+    vextracti128    xm2, m1, 1
+    movu            [r2 + r3 * 2], xm1
+    movu            [r2 + r6], xm2
+%else
+    psubw           m1, m6                          ; m1 = word: row 6
+    psubw           m2, m6                          ; m2 = word: row 7
+    movu            [r2 + r3 * 2], m1
+    movu            [r2 + r6], m2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16x8 pp
+    FILTER_VER_CHROMA_AVX2_16x8 ps
+
+%macro FILTER_VER_CHROMA_AVX2_16x12 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_16x12, 4, 6, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m8, [r5]
+    mova            m9, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m7, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r1 * 2], 1
+    movu            xm1, [r0 + r1]
+    vinserti128     m1, m1, [r0 + r4], 1
+
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    vperm2i128      m4, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    pmaddubsw       m4, m8
+    pmaddubsw       m3, m2, m9
+    paddw           m4, m3
+    pmaddubsw       m2, m8
+
+    vextracti128    xm0, m0, 1
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m0, m0, [r0], 1
+
+    punpcklbw       m5, m1, m0
+    punpckhbw       m3, m1, m0
+    vperm2i128      m6, m5, m3, 0x20
+    vperm2i128      m5, m5, m3, 0x31
+    pmaddubsw       m6, m8
+    pmaddubsw       m3, m5, m9
+    paddw           m6, m3
+    pmaddubsw       m5, m8
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 0
+    pmulhrsw        m6, m7                         ; m6 = word: row 1
+    packuswb        m4, m6
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm6, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm6
+%else
+    psubw           m4, m7                         ; m4 = word: row 0
+    psubw           m6, m7                         ; m6 = word: row 1
+    movu            [r2], m4
+    movu            [r2 + r3], m6
+%endif
+
+    movu            xm4, [r0 + r1 * 2]
+    vinserti128     m4, m4, [r0 + r1], 1
+    vextracti128    xm1, m4, 1
+    vinserti128     m0, m0, xm1, 0
+
+    punpcklbw       m6, m0, m4
+    punpckhbw       m1, m0, m4
+    vperm2i128      m0, m6, m1, 0x20
+    vperm2i128      m6, m6, m1, 0x31
+    pmaddubsw       m1, m0, m9
+    paddw           m5, m1
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m6, m9
+    paddw           m2, m1
+    pmaddubsw       m6, m8
+
+%ifidn %1,pp
+    pmulhrsw        m2, m7                         ; m2 = word: row 2
+    pmulhrsw        m5, m7                         ; m5 = word: row 3
+    packuswb        m2, m5
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm5, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r5], xm5
+%else
+    psubw           m2, m7                         ; m2 = word: row 2
+    psubw           m5, m7                         ; m5 = word: row 3
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r5], m5
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m1, m1, [r0], 1
+    vinserti128     m4, m4, xm1, 1
+
+    punpcklbw       m2, m4, m1
+    punpckhbw       m5, m4, m1
+    vperm2i128      m3, m2, m5, 0x20
+    vperm2i128      m2, m2, m5, 0x31
+    pmaddubsw       m5, m3, m9
+    paddw           m6, m5
+    pmaddubsw       m3, m8
+    pmaddubsw       m5, m2, m9
+    paddw           m0, m5
+    pmaddubsw       m2, m8
+
+%ifidn %1,pp
+    pmulhrsw        m6, m7                         ; m6 = word: row 4
+    pmulhrsw        m0, m7                         ; m0 = word: row 5
+    packuswb        m6, m0
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm0, m6, 1
+    movu            [r2], xm6
+    movu            [r2 + r3], xm0
+%else
+    psubw           m6, m7                         ; m6 = word: row 4
+    psubw           m0, m7                         ; m0 = word: row 5
+    movu            [r2], m6
+    movu            [r2 + r3], m0
+%endif
+
+    movu            xm6, [r0 + r1 * 2]
+    vinserti128     m6, m6, [r0 + r1], 1
+    vextracti128    xm0, m6, 1
+    vinserti128     m1, m1, xm0, 0
+
+    punpcklbw       m4, m1, m6
+    punpckhbw       m5, m1, m6
+    vperm2i128      m0, m4, m5, 0x20
+    vperm2i128      m5, m4, m5, 0x31
+    pmaddubsw       m4, m0, m9
+    paddw           m2, m4
+    pmaddubsw       m0, m8
+    pmaddubsw       m4, m5, m9
+    paddw           m3, m4
+    pmaddubsw       m5, m8
+
+%ifidn %1,pp
+    pmulhrsw        m3, m7                         ; m3 = word: row 6
+    pmulhrsw        m2, m7                         ; m2 = word: row 7
+    packuswb        m3, m2
+    vpermq          m3, m3, 11011000b
+    vextracti128    xm2, m3, 1
+    movu            [r2 + r3 * 2], xm3
+    movu            [r2 + r5], xm2
+%else
+    psubw           m3, m7                         ; m3 = word: row 6
+    psubw           m2, m7                         ; m2 = word: row 7
+    movu            [r2 + r3 * 2], m3
+    movu            [r2 + r5], m2
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm3, [r0 + r4]
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m3, m3, [r0], 1
+    vinserti128     m6, m6, xm3, 1
+
+    punpcklbw       m2, m6, m3
+    punpckhbw       m1, m6, m3
+    vperm2i128      m4, m2, m1, 0x20
+    vperm2i128      m2, m2, m1, 0x31
+    pmaddubsw       m1, m4, m9
+    paddw           m5, m1
+    pmaddubsw       m4, m8
+    pmaddubsw       m1, m2, m9
+    paddw           m0, m1
+    pmaddubsw       m2, m8
+
+%ifidn %1,pp
+    pmulhrsw        m5, m7                         ; m5 = word: row 8
+    pmulhrsw        m0, m7                         ; m0 = word: row 9
+    packuswb        m5, m0
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm0, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm0
+%else
+    psubw           m5, m7                         ; m5 = word: row 8
+    psubw           m0, m7                         ; m0 = word: row 9
+    movu            [r2], m5
+    movu            [r2 + r3], m0
+%endif
+
+    movu            xm5, [r0 + r1 * 2]
+    vinserti128     m5, m5, [r0 + r1], 1
+    vextracti128    xm0, m5, 1
+    vinserti128     m3, m3, xm0, 0
+
+    punpcklbw       m1, m3, m5
+    punpckhbw       m0, m3, m5
+    vperm2i128      m6, m1, m0, 0x20
+    vperm2i128      m0, m1, m0, 0x31
+    pmaddubsw       m1, m6, m9
+    paddw           m2, m1
+    pmaddubsw       m1, m0, m9
+    paddw           m4, m1
+
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 10
+    pmulhrsw        m2, m7                         ; m2 = word: row 11
+    packuswb        m4, m2
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm2, m4, 1
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r5], xm2
+%else
+    psubw           m4, m7                         ; m4 = word: row 10
+    psubw           m2, m7                         ; m2 = word: row 11
+    movu            [r2 + r3 * 2], m4
+    movu            [r2 + r5], m2
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16x12 pp
+    FILTER_VER_CHROMA_AVX2_16x12 ps
+
+%macro FILTER_VER_CHROMA_AVX2_16xN 2
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_16x%2, 4, 8, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+    mov             r7d, %2 / 16
+.loopH:
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r1 * 2], 1
+    movu            xm1, [r0 + r1]
+    vinserti128     m1, m1, [r0 + r4], 1
+
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    vperm2i128      m4, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m3, m2, [r5 + mmsize]
+    paddw           m4, m3
+    pmaddubsw       m2, [r5]
+
+    vextracti128    xm0, m0, 1
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m0, m0, [r0], 1
+
+    punpcklbw       m5, m1, m0
+    punpckhbw       m3, m1, m0
+    vperm2i128      m6, m5, m3, 0x20
+    vperm2i128      m5, m5, m3, 0x31
+    pmaddubsw       m6, [r5]
+    pmaddubsw       m3, m5, [r5 + mmsize]
+    paddw           m6, m3
+    pmaddubsw       m5, [r5]
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 0
+    pmulhrsw        m6, m7                         ; m6 = word: row 1
+    packuswb        m4, m6
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm6, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm6
+%else
+    psubw           m4, m7                         ; m4 = word: row 0
+    psubw           m6, m7                         ; m6 = word: row 1
+    movu            [r2], m4
+    movu            [r2 + r3], m6
+%endif
+
+    movu            xm4, [r0 + r1 * 2]
+    vinserti128     m4, m4, [r0 + r1], 1
+    vextracti128    xm1, m4, 1
+    vinserti128     m0, m0, xm1, 0
+
+    punpcklbw       m6, m0, m4
+    punpckhbw       m1, m0, m4
+    vperm2i128      m0, m6, m1, 0x20
+    vperm2i128      m6, m6, m1, 0x31
+    pmaddubsw       m1, m0, [r5 + mmsize]
+    paddw           m5, m1
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m1, m6, [r5 + mmsize]
+    paddw           m2, m1
+    pmaddubsw       m6, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m2, m7                         ; m2 = word: row 2
+    pmulhrsw        m5, m7                         ; m5 = word: row 3
+    packuswb        m2, m5
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm5, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm5
+%else
+    psubw           m2, m7                         ; m2 = word: row 2
+    psubw           m5, m7                         ; m5 = word: row 3
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m5
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m1, m1, [r0], 1
+    vinserti128     m4, m4, xm1, 1
+
+    punpcklbw       m2, m4, m1
+    punpckhbw       m5, m4, m1
+    vperm2i128      m3, m2, m5, 0x20
+    vperm2i128      m2, m2, m5, 0x31
+    pmaddubsw       m5, m3, [r5 + mmsize]
+    paddw           m6, m5
+    pmaddubsw       m3, [r5]
+    pmaddubsw       m5, m2, [r5 + mmsize]
+    paddw           m0, m5
+    pmaddubsw       m2, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m6, m7                         ; m6 = word: row 4
+    pmulhrsw        m0, m7                         ; m0 = word: row 5
+    packuswb        m6, m0
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm0, m6, 1
+    movu            [r2], xm6
+    movu            [r2 + r3], xm0
+%else
+    psubw           m6, m7                         ; m6 = word: row 4
+    psubw           m0, m7                         ; m0 = word: row 5
+    movu            [r2], m6
+    movu            [r2 + r3], m0
+%endif
+
+    movu            xm6, [r0 + r1 * 2]
+    vinserti128     m6, m6, [r0 + r1], 1
+    vextracti128    xm0, m6, 1
+    vinserti128     m1, m1, xm0, 0
+
+    punpcklbw       m4, m1, m6
+    punpckhbw       m5, m1, m6
+    vperm2i128      m0, m4, m5, 0x20
+    vperm2i128      m5, m4, m5, 0x31
+    pmaddubsw       m4, m0, [r5 + mmsize]
+    paddw           m2, m4
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m4, m5, [r5 + mmsize]
+    paddw           m3, m4
+    pmaddubsw       m5, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m3, m7                         ; m3 = word: row 6
+    pmulhrsw        m2, m7                         ; m2 = word: row 7
+    packuswb        m3, m2
+    vpermq          m3, m3, 11011000b
+    vextracti128    xm2, m3, 1
+    movu            [r2 + r3 * 2], xm3
+    movu            [r2 + r6], xm2
+%else
+    psubw           m3, m7                         ; m3 = word: row 6
+    psubw           m2, m7                         ; m2 = word: row 7
+    movu            [r2 + r3 * 2], m3
+    movu            [r2 + r6], m2
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm3, [r0 + r4]
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m3, m3, [r0], 1
+    vinserti128     m6, m6, xm3, 1
+
+    punpcklbw       m2, m6, m3
+    punpckhbw       m1, m6, m3
+    vperm2i128      m4, m2, m1, 0x20
+    vperm2i128      m2, m2, m1, 0x31
+    pmaddubsw       m1, m4, [r5 + mmsize]
+    paddw           m5, m1
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m1, m2, [r5 + mmsize]
+    paddw           m0, m1
+    pmaddubsw       m2, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m5, m7                         ; m5 = word: row 8
+    pmulhrsw        m0, m7                         ; m0 = word: row 9
+    packuswb        m5, m0
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm0, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm0
+%else
+    psubw           m5, m7                         ; m5 = word: row 8
+    psubw           m0, m7                         ; m0 = word: row 9
+    movu            [r2], m5
+    movu            [r2 + r3], m0
+%endif
+
+    movu            xm5, [r0 + r1 * 2]
+    vinserti128     m5, m5, [r0 + r1], 1
+    vextracti128    xm0, m5, 1
+    vinserti128     m3, m3, xm0, 0
+
+    punpcklbw       m1, m3, m5
+    punpckhbw       m0, m3, m5
+    vperm2i128      m6, m1, m0, 0x20
+    vperm2i128      m0, m1, m0, 0x31
+    pmaddubsw       m1, m6, [r5 + mmsize]
+    paddw           m2, m1
+    pmaddubsw       m6, [r5]
+    pmaddubsw       m1, m0, [r5 + mmsize]
+    paddw           m4, m1
+    pmaddubsw       m0, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 10
+    pmulhrsw        m2, m7                         ; m2 = word: row 11
+    packuswb        m4, m2
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm2, m4, 1
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r6], xm2
+%else
+    psubw           m4, m7                         ; m4 = word: row 10
+    psubw           m2, m7                         ; m2 = word: row 11
+    movu            [r2 + r3 * 2], m4
+    movu            [r2 + r6], m2
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm3, [r0 + r4]
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m3, m3, [r0], 1
+    vinserti128     m5, m5, xm3, 1
+
+    punpcklbw       m2, m5, m3
+    punpckhbw       m1, m5, m3
+    vperm2i128      m4, m2, m1, 0x20
+    vperm2i128      m2, m2, m1, 0x31
+    pmaddubsw       m1, m4, [r5 + mmsize]
+    paddw           m0, m1
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m1, m2, [r5 + mmsize]
+    paddw           m6, m1
+    pmaddubsw       m2, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m0, m7                         ; m0 = word: row 12
+    pmulhrsw        m6, m7                         ; m6 = word: row 13
+    packuswb        m0, m6
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm6, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm6
+%else
+    psubw           m0, m7                         ; m0 = word: row 12
+    psubw           m6, m7                         ; m6 = word: row 13
+    movu            [r2], m0
+    movu            [r2 + r3], m6
+%endif
+
+    movu            xm5, [r0 + r1 * 2]
+    vinserti128     m5, m5, [r0 + r1], 1
+    vextracti128    xm0, m5, 1
+    vinserti128     m3, m3, xm0, 0
+
+    punpcklbw       m1, m3, m5
+    punpckhbw       m0, m3, m5
+    vperm2i128      m6, m1, m0, 0x20
+    vperm2i128      m0, m1, m0, 0x31
+    pmaddubsw       m6, [r5 + mmsize]
+    paddw           m2, m6
+    pmaddubsw       m0, [r5 + mmsize]
+    paddw           m4, m0
+
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 14
+    pmulhrsw        m2, m7                         ; m2 = word: row 15
+    packuswb        m4, m2
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm2, m4, 1
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r6], xm2
+%else
+    psubw           m4, m7                         ; m4 = word: row 14
+    psubw           m2, m7                         ; m2 = word: row 15
+    movu            [r2 + r3 * 2], m4
+    movu            [r2 + r6], m2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    dec             r7d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16xN pp, 32
+    FILTER_VER_CHROMA_AVX2_16xN ps, 32
+    FILTER_VER_CHROMA_AVX2_16xN pp, 64
+    FILTER_VER_CHROMA_AVX2_16xN ps, 64
+
+%macro FILTER_VER_CHROMA_AVX2_16x24 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_16x24, 4, 6, 15
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m12, [r5]
+    mova            m13, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, m12
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, m12
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, m13
+    paddw           m0, m4
+    pmaddubsw       m2, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, m13
+    paddw           m1, m5
+    pmaddubsw       m3, m12
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, m13
+    paddw           m2, m6
+    pmaddubsw       m4, m12
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, m13
+    paddw           m3, m7
+    pmaddubsw       m5, m12
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, m13
+    paddw           m4, m8
+    pmaddubsw       m6, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, m13
+    paddw           m5, m9
+    pmaddubsw       m7, m12
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, m13
+    paddw           m6, m10
+    pmaddubsw       m8, m12
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, m13
+    paddw           m7, m11
+    pmaddubsw       m9, m12
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    packuswb        m6, m7
+    vpermq          m0, m0, q3120
+    vpermq          m2, m2, q3120
+    vpermq          m4, m4, q3120
+    vpermq          m6, m6, q3120
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r5], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r5], xm7
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r5], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r5], m7
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm6, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm6, 1
+    pmaddubsw       m6, m10, m13
+    paddw           m8, m6
+    pmaddubsw       m10, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 12
+    punpckhbw       xm7, xm11, xm6
+    punpcklbw       xm11, xm6
+    vinserti128     m11, m11, xm7, 1
+    pmaddubsw       m7, m11, m13
+    paddw           m9, m7
+    pmaddubsw       m11, m12
+
+    movu            xm7, [r0 + r1]                  ; m7 = row 13
+    punpckhbw       xm0, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm0, 1
+    pmaddubsw       m0, m6, m13
+    paddw           m10, m0
+    pmaddubsw       m6, m12
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm7, xm0
+    punpcklbw       xm7, xm0
+    vinserti128     m7, m7, xm1, 1
+    pmaddubsw       m1, m7, m13
+    paddw           m11, m1
+    pmaddubsw       m7, m12
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, m13
+    paddw           m6, m2
+    pmaddubsw       m0, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, m13
+    paddw           m7, m3
+    pmaddubsw       m1, m12
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, m13
+    paddw           m0, m4
+    pmaddubsw       m2, m12
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, m13
+    paddw           m1, m5
+    pmaddubsw       m3, m12
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m6, m14                         ; m6 = word: row 12
+    pmulhrsw        m7, m14                         ; m7 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m6, m7
+    packuswb        m0, m1
+    vpermq          m8, m8, q3120
+    vpermq          m10, m10, q3120
+    vpermq          m6, m6, q3120
+    vpermq          m0, m0, q3120
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm7, m6, 1
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r5], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm6
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r5], xm1
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m6, m14                         ; m6 = word: row 12
+    psubw           m7, m14                         ; m7 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r2], m8
+    movu            [r2 + r3], m9
+    movu            [r2 + r3 * 2], m10
+    movu            [r2 + r5], m11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m6
+    movu            [r2 + r3], m7
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r5], m1
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm5, [r0 + r4]                  ; m5 = row 19
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, m13
+    paddw           m2, m6
+    pmaddubsw       m4, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 20
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, m13
+    paddw           m3, m7
+    pmaddubsw       m5, m12
+    movu            xm7, [r0 + r1]                  ; m7 = row 21
+    punpckhbw       xm0, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm0, 1
+    pmaddubsw       m0, m6, m13
+    paddw           m4, m0
+    pmaddubsw       m6, m12
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 22
+    punpckhbw       xm1, xm7, xm0
+    punpcklbw       xm7, xm0
+    vinserti128     m7, m7, xm1, 1
+    pmaddubsw       m1, m7, m13
+    paddw           m5, m1
+    pmaddubsw       m7, m12
+    movu            xm1, [r0 + r4]                  ; m1 = row 23
+    punpckhbw       xm8, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm8, 1
+    pmaddubsw       m8, m0, m13
+    paddw           m6, m8
+    pmaddubsw       m0, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 24
+    punpckhbw       xm9, xm1, xm8
+    punpcklbw       xm1, xm8
+    vinserti128     m1, m1, xm9, 1
+    pmaddubsw       m9, m1, m13
+    paddw           m7, m9
+    pmaddubsw       m1, m12
+    movu            xm9, [r0 + r1]                  ; m9 = row 25
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m8, m13
+    paddw           m0, m8
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 26
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m9, m13
+    paddw           m1, m9
+
+%ifidn %1,pp
+    pmulhrsw        m2, m14                         ; m2 = word: row 16
+    pmulhrsw        m3, m14                         ; m3 = word: row 17
+    pmulhrsw        m4, m14                         ; m4 = word: row 18
+    pmulhrsw        m5, m14                         ; m5 = word: row 19
+    pmulhrsw        m6, m14                         ; m6 = word: row 20
+    pmulhrsw        m7, m14                         ; m7 = word: row 21
+    pmulhrsw        m0, m14                         ; m0 = word: row 22
+    pmulhrsw        m1, m14                         ; m1 = word: row 23
+    packuswb        m2, m3
+    packuswb        m4, m5
+    packuswb        m6, m7
+    packuswb        m0, m1
+    vpermq          m2, m2, q3120
+    vpermq          m4, m4, q3120
+    vpermq          m6, m6, q3120
+    vpermq          m0, m0, q3120
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm2
+    movu            [r2 + r3], xm3
+    movu            [r2 + r3 * 2], xm4
+    movu            [r2 + r5], xm5
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm6
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r5], xm1
+%else
+    psubw           m2, m14                         ; m2 = word: row 16
+    psubw           m3, m14                         ; m3 = word: row 17
+    psubw           m4, m14                         ; m4 = word: row 18
+    psubw           m5, m14                         ; m5 = word: row 19
+    psubw           m6, m14                         ; m6 = word: row 20
+    psubw           m7, m14                         ; m7 = word: row 21
+    psubw           m0, m14                         ; m0 = word: row 22
+    psubw           m1, m14                         ; m1 = word: row 23
+    movu            [r2], m2
+    movu            [r2 + r3], m3
+    movu            [r2 + r3 * 2], m4
+    movu            [r2 + r5], m5
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m6
+    movu            [r2 + r3], m7
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r5], m1
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16x24 pp
+    FILTER_VER_CHROMA_AVX2_16x24 ps
+
+%macro FILTER_VER_CHROMA_AVX2_24x32 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_24x32, 4, 9, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m8, [r5]
+    mova            m9, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+    mov             r5d, 2
+.loopH:
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r1 * 2], 1
+    movu            xm1, [r0 + r1]
+    vinserti128     m1, m1, [r0 + r4], 1
+
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    vperm2i128      m4, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    pmaddubsw       m4, m8
+    pmaddubsw       m3, m2, m9
+    paddw           m4, m3
+    pmaddubsw       m2, m8
+
+    vextracti128    xm0, m0, 1
+    lea             r7, [r0 + r1 * 4]
+    vinserti128     m0, m0, [r7], 1
+
+    punpcklbw       m5, m1, m0
+    punpckhbw       m3, m1, m0
+    vperm2i128      m6, m5, m3, 0x20
+    vperm2i128      m5, m5, m3, 0x31
+    pmaddubsw       m6, m8
+    pmaddubsw       m3, m5, m9
+    paddw           m6, m3
+    pmaddubsw       m5, m8
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 0
+    pmulhrsw        m6, m7                         ; m6 = word: row 1
+    packuswb        m4, m6
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm6, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm6
+%else
+    psubw           m4, m7                         ; m4 = word: row 0
+    psubw           m6, m7                         ; m6 = word: row 1
+    movu            [r2], m4
+    movu            [r2 + r3], m6
+%endif
+
+    movu            xm4, [r7 + r1 * 2]
+    vinserti128     m4, m4, [r7 + r1], 1
+    vextracti128    xm1, m4, 1
+    vinserti128     m0, m0, xm1, 0
+
+    punpcklbw       m6, m0, m4
+    punpckhbw       m1, m0, m4
+    vperm2i128      m0, m6, m1, 0x20
+    vperm2i128      m6, m6, m1, 0x31
+    pmaddubsw       m1, m0, m9
+    paddw           m5, m1
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m6, m9
+    paddw           m2, m1
+    pmaddubsw       m6, m8
+
+%ifidn %1,pp
+    pmulhrsw        m2, m7                         ; m2 = word: row 2
+    pmulhrsw        m5, m7                         ; m5 = word: row 3
+    packuswb        m2, m5
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm5, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm5
+%else
+    psubw           m2, m7                         ; m2 = word: row 2
+    psubw           m5, m7                         ; m5 = word: row 3
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m5
+%endif
+    lea             r8, [r2 + r3 * 4]
+
+    movu            xm1, [r7 + r4]
+    lea             r7, [r7 + r1 * 4]
+    vinserti128     m1, m1, [r7], 1
+    vinserti128     m4, m4, xm1, 1
+
+    punpcklbw       m2, m4, m1
+    punpckhbw       m5, m4, m1
+    vperm2i128      m3, m2, m5, 0x20
+    vperm2i128      m2, m2, m5, 0x31
+    pmaddubsw       m5, m3, m9
+    paddw           m6, m5
+    pmaddubsw       m3, m8
+    pmaddubsw       m5, m2, m9
+    paddw           m0, m5
+    pmaddubsw       m2, m8
+
+%ifidn %1,pp
+    pmulhrsw        m6, m7                         ; m6 = word: row 4
+    pmulhrsw        m0, m7                         ; m0 = word: row 5
+    packuswb        m6, m0
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm0, m6, 1
+    movu            [r8], xm6
+    movu            [r8 + r3], xm0
+%else
+    psubw           m6, m7                         ; m6 = word: row 4
+    psubw           m0, m7                         ; m0 = word: row 5
+    movu            [r8], m6
+    movu            [r8 + r3], m0
+%endif
+
+    movu            xm6, [r7 + r1 * 2]
+    vinserti128     m6, m6, [r7 + r1], 1
+    vextracti128    xm0, m6, 1
+    vinserti128     m1, m1, xm0, 0
+
+    punpcklbw       m4, m1, m6
+    punpckhbw       m5, m1, m6
+    vperm2i128      m0, m4, m5, 0x20
+    vperm2i128      m5, m4, m5, 0x31
+    pmaddubsw       m4, m0, m9
+    paddw           m2, m4
+    pmaddubsw       m0, m8
+    pmaddubsw       m4, m5, m9
+    paddw           m3, m4
+    pmaddubsw       m5, m8
+
+%ifidn %1,pp
+    pmulhrsw        m3, m7                         ; m3 = word: row 6
+    pmulhrsw        m2, m7                         ; m2 = word: row 7
+    packuswb        m3, m2
+    vpermq          m3, m3, 11011000b
+    vextracti128    xm2, m3, 1
+    movu            [r8 + r3 * 2], xm3
+    movu            [r8 + r6], xm2
+%else
+    psubw           m3, m7                         ; m3 = word: row 6
+    psubw           m2, m7                         ; m2 = word: row 7
+    movu            [r8 + r3 * 2], m3
+    movu            [r8 + r6], m2
+%endif
+    lea             r8, [r8 + r3 * 4]
+
+    movu            xm3, [r7 + r4]
+    lea             r7, [r7 + r1 * 4]
+    vinserti128     m3, m3, [r7], 1
+    vinserti128     m6, m6, xm3, 1
+
+    punpcklbw       m2, m6, m3
+    punpckhbw       m1, m6, m3
+    vperm2i128      m4, m2, m1, 0x20
+    vperm2i128      m2, m2, m1, 0x31
+    pmaddubsw       m1, m4, m9
+    paddw           m5, m1
+    pmaddubsw       m4, m8
+    pmaddubsw       m1, m2, m9
+    paddw           m0, m1
+    pmaddubsw       m2, m8
+
+%ifidn %1,pp
+    pmulhrsw        m5, m7                         ; m5 = word: row 8
+    pmulhrsw        m0, m7                         ; m0 = word: row 9
+    packuswb        m5, m0
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm0, m5, 1
+    movu            [r8], xm5
+    movu            [r8 + r3], xm0
+%else
+    psubw           m5, m7                         ; m5 = word: row 8
+    psubw           m0, m7                         ; m0 = word: row 9
+    movu            [r8], m5
+    movu            [r8 + r3], m0
+%endif
+
+    movu            xm5, [r7 + r1 * 2]
+    vinserti128     m5, m5, [r7 + r1], 1
+    vextracti128    xm0, m5, 1
+    vinserti128     m3, m3, xm0, 0
+
+    punpcklbw       m1, m3, m5
+    punpckhbw       m0, m3, m5
+    vperm2i128      m6, m1, m0, 0x20
+    vperm2i128      m0, m1, m0, 0x31
+    pmaddubsw       m1, m6, m9
+    paddw           m2, m1
+    pmaddubsw       m6, m8
+    pmaddubsw       m1, m0, m9
+    paddw           m4, m1
+    pmaddubsw       m0, m8
+
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 10
+    pmulhrsw        m2, m7                         ; m2 = word: row 11
+    packuswb        m4, m2
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm2, m4, 1
+    movu            [r8 + r3 * 2], xm4
+    movu            [r8 + r6], xm2
+%else
+    psubw           m4, m7                         ; m4 = word: row 10
+    psubw           m2, m7                         ; m2 = word: row 11
+    movu            [r8 + r3 * 2], m4
+    movu            [r8 + r6], m2
+%endif
+    lea             r8, [r8 + r3 * 4]
+
+    movu            xm3, [r7 + r4]
+    lea             r7, [r7 + r1 * 4]
+    vinserti128     m3, m3, [r7], 1
+    vinserti128     m5, m5, xm3, 1
+
+    punpcklbw       m2, m5, m3
+    punpckhbw       m1, m5, m3
+    vperm2i128      m4, m2, m1, 0x20
+    vperm2i128      m2, m2, m1, 0x31
+    pmaddubsw       m1, m4, m9
+    paddw           m0, m1
+    pmaddubsw       m4, m8
+    pmaddubsw       m1, m2, m9
+    paddw           m6, m1
+    pmaddubsw       m2, m8
+
+%ifidn %1,pp
+    pmulhrsw        m0, m7                         ; m0 = word: row 12
+    pmulhrsw        m6, m7                         ; m6 = word: row 13
+    packuswb        m0, m6
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm6, m0, 1
+    movu            [r8], xm0
+    movu            [r8 + r3], xm6
+%else
+    psubw           m0, m7                         ; m0 = word: row 12
+    psubw           m6, m7                         ; m6 = word: row 13
+    movu            [r8], m0
+    movu            [r8 + r3], m6
+%endif
+
+    movu            xm5, [r7 + r1 * 2]
+    vinserti128     m5, m5, [r7 + r1], 1
+    vextracti128    xm0, m5, 1
+    vinserti128     m3, m3, xm0, 0
+
+    punpcklbw       m1, m3, m5
+    punpckhbw       m0, m3, m5
+    vperm2i128      m6, m1, m0, 0x20
+    vperm2i128      m0, m1, m0, 0x31
+    pmaddubsw       m6, m9
+    paddw           m2, m6
+    pmaddubsw       m0, m9
+    paddw           m4, m0
+
+%ifidn %1,pp
+    pmulhrsw        m4, m7                         ; m4 = word: row 14
+    pmulhrsw        m2, m7                         ; m2 = word: row 15
+    packuswb        m4, m2
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm2, m4, 1
+    movu            [r8 + r3 * 2], xm4
+    movu            [r8 + r6], xm2
+    add             r2, 16
+%else
+    psubw           m4, m7                         ; m4 = word: row 14
+    psubw           m2, m7                         ; m2 = word: row 15
+    movu            [r8 + r3 * 2], m4
+    movu            [r8 + r6], m2
+    add             r2, 32
+%endif
+    add             r0, 16
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3
+    vinserti128     m5, m1, xm2, 1
+    pmaddubsw       m5, m8
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4
+    lea             r7, [r0 + r1 * 4]
+    movq            xm1, [r7]                       ; m1 = row 4
+    punpcklbw       xm4, xm1
+    vinserti128     m2, m3, xm4, 1
+    pmaddubsw       m0, m2, m9
+    paddw           m5, m0
+    pmaddubsw       m2, m8
+    movq            xm3, [r7 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3
+    movq            xm4, [r7 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m0, m1, m9
+    paddw           m2, m0
+    pmaddubsw       m1, m8
+    movq            xm3, [r7 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm0, [r7]                       ; m0 = row 8
+    punpcklbw       xm3, xm0
+    vinserti128     m4, m4, xm3, 1
+    pmaddubsw       m3, m4, m9
+    paddw           m1, m3
+    pmaddubsw       m4, m8
+    movq            xm3, [r7 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3
+    movq            xm6, [r7 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6
+    vinserti128     m0, m0, xm3, 1
+    pmaddubsw       m3, m0, m9
+    paddw           m4, m3
+    pmaddubsw       m0, m8
+
+%ifidn %1,pp
+    pmulhrsw        m5, m7                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m7                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m7                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m7                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm2
+    lea             r8, [r2 + r3 * 4]
+    movq            [r8], xm1
+    movq            [r8 + r3], xm4
+    movhps          [r8 + r3 * 2], xm1
+    movhps          [r8 + r6], xm4
+%else
+    psubw           m5, m7                          ; m5 = word: row 0, row 1
+    psubw           m2, m7                          ; m2 = word: row 2, row 3
+    psubw           m1, m7                          ; m1 = word: row 4, row 5
+    psubw           m4, m7                          ; m4 = word: row 6, row 7
+    vextracti128    xm3, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm3
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    vextracti128    xm3, m1, 1
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], xm1
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m4, 1
+    movu            [r8 + r3 * 2], xm4
+    movu            [r8 + r6], xm3
+%endif
+    lea             r8, [r8 + r3 * 4]
+
+    movq            xm3, [r7 + r4]                  ; m3 = row 11
+    punpcklbw       xm6, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm5, [r7]                       ; m5 = row 12
+    punpcklbw       xm3, xm5
+    vinserti128     m6, m6, xm3, 1
+    pmaddubsw       m3, m6, m9
+    paddw           m0, m3
+    pmaddubsw       m6, m8
+    movq            xm3, [r7 + r1]                  ; m3 = row 13
+    punpcklbw       xm5, xm3
+    movq            xm2, [r7 + r1 * 2]              ; m2 = row 14
+    punpcklbw       xm3, xm2
+    vinserti128     m5, m5, xm3, 1
+    pmaddubsw       m3, m5, m9
+    paddw           m6, m3
+    pmaddubsw       m5, m8
+    movq            xm3, [r7 + r4]                  ; m3 = row 15
+    punpcklbw       xm2, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm1, [r7]                       ; m1 = row 16
+    punpcklbw       xm3, xm1
+    vinserti128     m2, m2, xm3, 1
+    pmaddubsw       m3, m2, m9
+    paddw           m5, m3
+    pmaddubsw       m2, m8
+    movq            xm3, [r7 + r1]                  ; m3 = row 17
+    punpcklbw       xm1, xm3
+    movq            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, m9
+    paddw           m2, m3
+%ifidn %1,pp
+    pmulhrsw        m0, m7                          ; m0 = word: row 8, row 9
+    pmulhrsw        m6, m7                          ; m6 = word: row 10, row 11
+    pmulhrsw        m5, m7                          ; m5 = word: row 12, row 13
+    pmulhrsw        m2, m7                          ; m2 = word: row 14, row 15
+    packuswb        m0, m6
+    packuswb        m5, m2
+    vextracti128    xm6, m0, 1
+    vextracti128    xm2, m5, 1
+    movq            [r8], xm0
+    movq            [r8 + r3], xm6
+    movhps          [r8 + r3 * 2], xm0
+    movhps          [r8 + r6], xm6
+    lea             r8, [r8 + r3 * 4]
+    movq            [r8], xm5
+    movq            [r8 + r3], xm2
+    movhps          [r8 + r3 * 2], xm5
+    movhps          [r8 + r6], xm2
+    lea             r2, [r8 + r3 * 4 - 16]
+%else
+    psubw           m0, m7                          ; m0 = word: row 8, row 9
+    psubw           m6, m7                          ; m6 = word: row 10, row 11
+    psubw           m5, m7                          ; m5 = word: row 12, row 13
+    psubw           m2, m7                          ; m2 = word: row 14, row 15
+    vextracti128    xm3, m0, 1
+    movu            [r8], xm0
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm3
+    vextracti128    xm3, m5, 1
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm5
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m2, 1
+    movu            [r8 + r3 * 2], xm2
+    movu            [r8 + r6], xm3
+    lea             r2, [r8 + r3 * 4 - 32]
+%endif
+    lea             r0, [r7 - 16]
+    dec             r5d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_24x32 pp
+    FILTER_VER_CHROMA_AVX2_24x32 ps
+
+%macro FILTER_VER_CHROMA_AVX2_24x64 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_24x64, 4, 7, 13
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m10, [r5]
+    mova            m11, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m12, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m12, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+    mov             r6d, 16
+.loopH:
+    movu            m0, [r0]                        ; m0 = row 0
+    movu            m1, [r0 + r1]                   ; m1 = row 1
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 2
+    punpcklbw       m4, m1, m0
+    punpckhbw       m5, m1, m0
+    pmaddubsw       m4, m10
+    pmaddubsw       m5, m10
+    movu            m1, [r0 + r4]                   ; m1 = row 3
+    punpcklbw       m6, m0, m1
+    punpckhbw       m7, m0, m1
+    pmaddubsw       m8, m6, m11
+    pmaddubsw       m9, m7, m11
+    pmaddubsw       m6, m10
+    pmaddubsw       m7, m10
+    paddw           m2, m8
+    paddw           m3, m9
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2], xm2
+    vextracti128    xm2, m2, 1
+    movq            [r2 + 16], xm2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2], m0
+    movu            [r2 + mmsize], xm2
+%endif
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r0]                        ; m0 = row 4
+    punpcklbw       m2, m1, m0
+    punpckhbw       m3, m1, m0
+    pmaddubsw       m8, m2, m11
+    pmaddubsw       m9, m3, m11
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    paddw           m4, m8
+    paddw           m5, m9
+%ifidn %1,pp
+    pmulhrsw        m4, m12
+    pmulhrsw        m5, m12
+    packuswb        m4, m5
+    movu            [r2 + r3], xm4
+    vextracti128    xm4, m4, 1
+    movq            [r2 + r3 + 16], xm4
+%else
+    psubw           m4, m12
+    psubw           m5, m12
+    vperm2i128      m1, m4, m5, 0x20
+    vperm2i128      m4, m4, m5, 0x31
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 + mmsize], xm4
+%endif
+
+    movu            m1, [r0 + r1]                   ; m1 = row 5
+    punpcklbw       m4, m0, m1
+    punpckhbw       m5, m0, m1
+    pmaddubsw       m4, m11
+    pmaddubsw       m5, m11
+    paddw           m6, m4
+    paddw           m7, m5
+%ifidn %1,pp
+    pmulhrsw        m6, m12
+    pmulhrsw        m7, m12
+    packuswb        m6, m7
+    movu            [r2 + r3 * 2], xm6
+    vextracti128    xm6, m6, 1
+    movq            [r2 + r3 * 2 + 16], xm6
+%else
+    psubw           m6, m12
+    psubw           m7, m12
+    vperm2i128      m0, m6, m7, 0x20
+    vperm2i128      m6, m6, m7, 0x31
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r3 * 2 + mmsize], xm6
+%endif
+
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 6
+    punpcklbw       m6, m1, m0
+    punpckhbw       m7, m1, m0
+    pmaddubsw       m6, m11
+    pmaddubsw       m7, m11
+    paddw           m2, m6
+    paddw           m3, m7
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2 + r5], xm2
+    vextracti128    xm2, m2, 1
+    movq            [r2 + r5 + 16], xm2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2 + r5], m0
+    movu            [r2 + r5 + mmsize], xm2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    dec             r6d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_24x64 pp
+    FILTER_VER_CHROMA_AVX2_24x64 ps
+
+%macro FILTER_VER_CHROMA_AVX2_16x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_16x4, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    mova            m7, [pw_2000]
+%endif
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r1 * 2], 1
+    movu            xm1, [r0 + r1]
+    vinserti128     m1, m1, [r0 + r4], 1
+
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    vperm2i128      m4, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m3, m2, [r5 + mmsize]
+    paddw           m4, m3
+    pmaddubsw       m2, [r5]
+
+    vextracti128    xm0, m0, 1
+    lea             r0, [r0 + r1 * 4]
+    vinserti128     m0, m0, [r0], 1
+
+    punpcklbw       m5, m1, m0
+    punpckhbw       m3, m1, m0
+    vperm2i128      m6, m5, m3, 0x20
+    vperm2i128      m5, m5, m3, 0x31
+    pmaddubsw       m6, [r5]
+    pmaddubsw       m3, m5, [r5 + mmsize]
+    paddw           m6, m3
+    pmaddubsw       m5, [r5]
+%ifidn %1,pp
+    pmulhrsw        m4, m7                          ; m4 = word: row 0
+    pmulhrsw        m6, m7                          ; m6 = word: row 1
+    packuswb        m4, m6
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm6, m4, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm6
+%else
+    psubw           m4, m7                          ; m4 = word: row 0
+    psubw           m6, m7                          ; m6 = word: row 1
+    movu            [r2], m4
+    movu            [r2 + r3], m6
+%endif
+    lea             r2, [r2 + r3 * 2]
+
+    movu            xm4, [r0 + r1 * 2]
+    vinserti128     m4, m4, [r0 + r1], 1
+    vextracti128    xm1, m4, 1
+    vinserti128     m0, m0, xm1, 0
+
+    punpcklbw       m6, m0, m4
+    punpckhbw       m1, m0, m4
+    vperm2i128      m0, m6, m1, 0x20
+    vperm2i128      m6, m6, m1, 0x31
+    pmaddubsw       m0, [r5 + mmsize]
+    paddw           m5, m0
+    pmaddubsw       m6, [r5 + mmsize]
+    paddw           m2, m6
+
+%ifidn %1,pp
+    pmulhrsw        m2, m7                          ; m2 = word: row 2
+    pmulhrsw        m5, m7                          ; m5 = word: row 3
+    packuswb        m2, m5
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm5, m2, 1
+    movu            [r2], xm2
+    movu            [r2 + r3], xm5
+%else
+    psubw           m2, m7                          ; m2 = word: row 2
+    psubw           m5, m7                          ; m5 = word: row 3
+    movu            [r2], m2
+    movu            [r2 + r3], m5
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_16x4 pp
+    FILTER_VER_CHROMA_AVX2_16x4 ps
+
+%macro FILTER_VER_CHROMA_AVX2_12xN 2
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_12x%2, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m7, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+%rep %2 / 16
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+%ifidn %1,pp
+    pmulhrsw        m0, m7                          ; m0 = word: row 0
+    pmulhrsw        m1, m7                          ; m1 = word: row 1
+    packuswb        m0, m1
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movd            [r2 + 8], xm1
+    movhps          [r2 + r3], xm0
+    pextrd          [r2 + r3 + 8], xm1, 2
+%else
+    psubw           m0, m7                          ; m0 = word: row 0
+    psubw           m1, m7                          ; m1 = word: row 1
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    movq            [r2 + 16], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m1, 1
+    movq            [r2 + r3 + 16], xm1
+%endif
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm0, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm0, 1
+    pmaddubsw       m0, m5, [r5 + 1 * mmsize]
+    paddw           m3, m0
+    pmaddubsw       m5, [r5]
+%ifidn %1,pp
+    pmulhrsw        m2, m7                          ; m2 = word: row 2
+    pmulhrsw        m3, m7                          ; m3 = word: row 3
+    packuswb        m2, m3
+    vextracti128    xm3, m2, 1
+    movq            [r2 + r3 * 2], xm2
+    movd            [r2 + r3 * 2 + 8], xm3
+    movhps          [r2 + r6], xm2
+    pextrd          [r2 + r6 + 8], xm3, 2
+%else
+    psubw           m2, m7                          ; m2 = word: row 2
+    psubw           m3, m7                          ; m3 = word: row 3
+    movu            [r2 + r3 * 2], xm2
+    vextracti128    xm2, m2, 1
+    movq            [r2 + r3 * 2 + 16], xm2
+    movu            [r2 + r6], xm3
+    vextracti128    xm3, m3, 1
+    movq            [r2 + r6 + 16], xm3
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm0, [r0 + r4]                  ; m0 = row 7
+    punpckhbw       xm3, xm6, xm0
+    punpcklbw       xm6, xm0
+    vinserti128     m6, m6, xm3, 1
+    pmaddubsw       m3, m6, [r5 + 1 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm3, [r0]                       ; m3 = row 8
+    punpckhbw       xm1, xm0, xm3
+    punpcklbw       xm0, xm3
+    vinserti128     m0, m0, xm1, 1
+    pmaddubsw       m1, m0, [r5 + 1 * mmsize]
+    paddw           m5, m1
+    pmaddubsw       m0, [r5]
+%ifidn %1,pp
+    pmulhrsw        m4, m7                          ; m4 = word: row 4
+    pmulhrsw        m5, m7                          ; m5 = word: row 5
+    packuswb        m4, m5
+    vextracti128    xm5, m4, 1
+    movq            [r2], xm4
+    movd            [r2 + 8], xm5
+    movhps          [r2 + r3], xm4
+    pextrd          [r2 + r3 + 8], xm5, 2
+%else
+    psubw           m4, m7                          ; m4 = word: row 4
+    psubw           m5, m7                          ; m5 = word: row 5
+    movu            [r2], xm4
+    vextracti128    xm4, m4, 1
+    movq            [r2 + 16], xm4
+    movu            [r2 + r3], xm5
+    vextracti128    xm5, m5, 1
+    movq            [r2 + r3 + 16], xm5
+%endif
+
+    movu            xm1, [r0 + r1]                  ; m1 = row 9
+    punpckhbw       xm2, xm3, xm1
+    punpcklbw       xm3, xm1
+    vinserti128     m3, m3, xm2, 1
+    pmaddubsw       m2, m3, [r5 + 1 * mmsize]
+    paddw           m6, m2
+    pmaddubsw       m3, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 10
+    punpckhbw       xm4, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm4, 1
+    pmaddubsw       m4, m1, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m1, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m6, m7                          ; m6 = word: row 6
+    pmulhrsw        m0, m7                          ; m0 = word: row 7
+    packuswb        m6, m0
+    vextracti128    xm0, m6, 1
+    movq            [r2 + r3 * 2], xm6
+    movd            [r2 + r3 * 2 + 8], xm0
+    movhps          [r2 + r6], xm6
+    pextrd          [r2 + r6 + 8], xm0, 2
+%else
+    psubw           m6, m7                          ; m6 = word: row 6
+    psubw           m0, m7                          ; m0 = word: row 7
+    movu            [r2 + r3 * 2], xm6
+    vextracti128    xm6, m6, 1
+    movq            [r2 + r3 * 2 + 16], xm6
+    movu            [r2 + r6], xm0
+    vextracti128    xm0, m0, 1
+    movq            [r2 + r6 + 16], xm0
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm4, [r0 + r4]                  ; m4 = row 11
+    punpckhbw       xm6, xm2, xm4
+    punpcklbw       xm2, xm4
+    vinserti128     m2, m2, xm6, 1
+    pmaddubsw       m6, m2, [r5 + 1 * mmsize]
+    paddw           m3, m6
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 12
+    punpckhbw       xm0, xm4, xm6
+    punpcklbw       xm4, xm6
+    vinserti128     m4, m4, xm0, 1
+    pmaddubsw       m0, m4, [r5 + 1 * mmsize]
+    paddw           m1, m0
+    pmaddubsw       m4, [r5]
+%ifidn %1,pp
+    pmulhrsw        m3, m7                          ; m3 = word: row 8
+    pmulhrsw        m1, m7                          ; m1 = word: row 9
+    packuswb        m3, m1
+    vextracti128    xm1, m3, 1
+    movq            [r2], xm3
+    movd            [r2 + 8], xm1
+    movhps          [r2 + r3], xm3
+    pextrd          [r2 + r3 + 8], xm1, 2
+%else
+    psubw           m3, m7                          ; m3 = word: row 8
+    psubw           m1, m7                          ; m1 = word: row 9
+    movu            [r2], xm3
+    vextracti128    xm3, m3, 1
+    movq            [r2 + 16], xm3
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m1, 1
+    movq            [r2 + r3 + 16], xm1
+%endif
+
+    movu            xm0, [r0 + r1]                  ; m0 = row 13
+    punpckhbw       xm1, xm6, xm0
+    punpcklbw       xm6, xm0
+    vinserti128     m6, m6, xm1, 1
+    pmaddubsw       m1, m6, [r5 + 1 * mmsize]
+    paddw           m2, m1
+    pmaddubsw       m6, [r5]
+    movu            xm1, [r0 + r1 * 2]              ; m1 = row 14
+    punpckhbw       xm5, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm5, 1
+    pmaddubsw       m5, m0, [r5 + 1 * mmsize]
+    paddw           m4, m5
+    pmaddubsw       m0, [r5]
+%ifidn %1,pp
+    pmulhrsw        m2, m7                          ; m2 = word: row 10
+    pmulhrsw        m4, m7                          ; m4 = word: row 11
+    packuswb        m2, m4
+    vextracti128    xm4, m2, 1
+    movq            [r2 + r3 * 2], xm2
+    movd            [r2 + r3 * 2 + 8], xm4
+    movhps          [r2 + r6], xm2
+    pextrd          [r2 + r6 + 8], xm4, 2
+%else
+    psubw           m2, m7                          ; m2 = word: row 10
+    psubw           m4, m7                          ; m4 = word: row 11
+    movu            [r2 + r3 * 2], xm2
+    vextracti128    xm2, m2, 1
+    movq            [r2 + r3 * 2 + 16], xm2
+    movu            [r2 + r6], xm4
+    vextracti128    xm4, m4, 1
+    movq            [r2 + r6 + 16], xm4
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm5, [r0 + r4]                  ; m5 = row 15
+    punpckhbw       xm2, xm1, xm5
+    punpcklbw       xm1, xm5
+    vinserti128     m1, m1, xm2, 1
+    pmaddubsw       m2, m1, [r5 + 1 * mmsize]
+    paddw           m6, m2
+    pmaddubsw       m1, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm5, xm2
+    punpcklbw       xm5, xm2
+    vinserti128     m5, m5, xm3, 1
+    pmaddubsw       m3, m5, [r5 + 1 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m5, [r5]
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m2, [r5 + 1 * mmsize]
+    paddw           m1, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm2, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m5, m3
+
+%ifidn %1,pp
+    pmulhrsw        m6, m7                          ; m6 = word: row 12
+    pmulhrsw        m0, m7                          ; m0 = word: row 13
+    pmulhrsw        m1, m7                          ; m1 = word: row 14
+    pmulhrsw        m5, m7                          ; m5 = word: row 15
+    packuswb        m6, m0
+    packuswb        m1, m5
+    vextracti128    xm0, m6, 1
+    vextracti128    xm5, m1, 1
+    movq            [r2], xm6
+    movd            [r2 + 8], xm0
+    movhps          [r2 + r3], xm6
+    pextrd          [r2 + r3 + 8], xm0, 2
+    movq            [r2 + r3 * 2], xm1
+    movd            [r2 + r3 * 2 + 8], xm5
+    movhps          [r2 + r6], xm1
+    pextrd          [r2 + r6 + 8], xm5, 2
+%else
+    psubw           m6, m7                          ; m6 = word: row 12
+    psubw           m0, m7                          ; m0 = word: row 13
+    psubw           m1, m7                          ; m1 = word: row 14
+    psubw           m5, m7                          ; m5 = word: row 15
+    movu            [r2], xm6
+    vextracti128    xm6, m6, 1
+    movq            [r2 + 16], xm6
+    movu            [r2 + r3], xm0
+    vextracti128    xm0, m0, 1
+    movq            [r2 + r3 + 16], xm0
+    movu            [r2 + r3 * 2], xm1
+    vextracti128    xm1, m1, 1
+    movq            [r2 + r3 * 2 + 16], xm1
+    movu            [r2 + r6], xm5
+    vextracti128    xm5, m5, 1
+    movq            [r2 + r6 + 16], xm5
+%endif
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_12xN pp, 16
+    FILTER_VER_CHROMA_AVX2_12xN ps, 16
+    FILTER_VER_CHROMA_AVX2_12xN pp, 32
+    FILTER_VER_CHROMA_AVX2_12xN ps, 32
+
+;-----------------------------------------------------------------------------
+;void interp_4tap_vert_pp_24x32(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W24 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_24x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m1,        m0,       [tab_Vm]
+    pshufb      m0,        [tab_Vm + 16]
+
+    mov         r4d,       %2
+
+.loop:
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m5,        [r5]
+    movu        m7,        [r5 + r1]
+
+    punpcklbw   m6,        m5,        m7
+    pmaddubsw   m6,        m0
+    paddw       m4,        m6
+
+    punpckhbw   m6,        m5,        m7
+    pmaddubsw   m6,        m0
+    paddw       m2,        m6
+
+    mova        m6,        [pw_512]
+
+    pmulhrsw    m4,        m6
+    pmulhrsw    m2,        m6
+
+    packuswb    m4,        m2
+
+    movu        [r2],      m4
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m3,        m1
+
+    movu        m2,        [r5 + 2 * r1]
+
+    punpcklbw   m5,        m7,        m2
+    punpckhbw   m7,        m2
+
+    pmaddubsw   m5,        m0
+    pmaddubsw   m7,        m0
+
+    paddw       m4,        m5
+    paddw       m3,        m7
+
+    pmulhrsw    m4,        m6
+    pmulhrsw    m3,        m6
+
+    packuswb    m4,        m3
+
+    movu        [r2 + r3],      m4
+
+    movq        m2,        [r0 + 16]
+    movq        m3,        [r0 + r1 + 16]
+    movq        m4,        [r5 + 16]
+    movq        m5,        [r5 + r1 + 16]
+
+    punpcklbw   m2,        m3
+    punpcklbw   m4,        m5
+
+    pmaddubsw   m2,        m1
+    pmaddubsw   m4,        m0
+
+    paddw       m2,        m4
+
+    pmulhrsw    m2,        m6
+
+    movq        m3,        [r0 + r1 + 16]
+    movq        m4,        [r5 + 16]
+    movq        m5,        [r5 + r1 + 16]
+    movq        m7,        [r5 + 2 * r1 + 16]
+
+    punpcklbw   m3,        m4
+    punpcklbw   m5,        m7
+
+    pmaddubsw   m3,        m1
+    pmaddubsw   m5,        m0
+
+    paddw       m3,        m5
+
+    pmulhrsw    m3,        m6
+    packuswb    m2,        m3
+
+    movh        [r2 + 16], m2
+    movhps      [r2 + r3 + 16], m2
+
+    mov         r0,        r5
+    lea         r2,        [r2 + 2 * r3]
+
+    sub         r4,        2
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W24 24, 32
+
+    FILTER_V4_W24 24, 64
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_32x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W32 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_%1x%2, 4, 6, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m1,        m0,       [tab_Vm]
+    pshufb      m0,        [tab_Vm + 16]
+
+    mova        m7,        [pw_512]
+
+    mov         r4d,       %2
+
+.loop:
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m3,        [r5]
+    movu        m5,        [r5 + r1]
+
+    punpcklbw   m6,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m6,        m0
+    pmaddubsw   m3,        m0
+
+    paddw       m4,        m6
+    paddw       m2,        m3
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m2,        m7
+
+    packuswb    m4,        m2
+
+    movu        [r2],      m4
+
+    movu        m2,        [r0 + 16]
+    movu        m3,        [r0 + r1 + 16]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    movu        m3,        [r5 + 16]
+    movu        m5,        [r5 + r1 + 16]
+
+    punpcklbw   m6,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m6,        m0
+    pmaddubsw   m3,        m0
+
+    paddw       m4,        m6
+    paddw       m2,        m3
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m2,        m7
+
+    packuswb    m4,        m2
+
+    movu        [r2 + 16], m4
+
+    lea         r0,        [r0 + r1]
+    lea         r2,        [r2 + r3]
+
+    dec         r4
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W32 32,  8
+    FILTER_V4_W32 32, 16
+    FILTER_V4_W32 32, 24
+    FILTER_V4_W32 32, 32
+
+    FILTER_V4_W32 32, 48
+    FILTER_V4_W32 32, 64
+
+%macro FILTER_VER_CHROMA_AVX2_32xN 2
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_32x%2, 4, 7, 13
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m10, [r5]
+    mova            m11, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m12, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m12, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+    mov             r6d, %2 / 4
+.loopW:
+    movu            m0, [r0]                        ; m0 = row 0
+    movu            m1, [r0 + r1]                   ; m1 = row 1
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 2
+    punpcklbw       m4, m1, m0
+    punpckhbw       m5, m1, m0
+    pmaddubsw       m4, m10
+    pmaddubsw       m5, m10
+    movu            m1, [r0 + r4]                   ; m1 = row 3
+    punpcklbw       m6, m0, m1
+    punpckhbw       m7, m0, m1
+    pmaddubsw       m8, m6, m11
+    pmaddubsw       m9, m7, m11
+    pmaddubsw       m6, m10
+    pmaddubsw       m7, m10
+    paddw           m2, m8
+    paddw           m3, m9
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2], m2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2], m0
+    movu            [r2 + mmsize], m2
+%endif
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r0]                        ; m0 = row 4
+    punpcklbw       m2, m1, m0
+    punpckhbw       m3, m1, m0
+    pmaddubsw       m8, m2, m11
+    pmaddubsw       m9, m3, m11
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    paddw           m4, m8
+    paddw           m5, m9
+%ifidn %1,pp
+    pmulhrsw        m4, m12
+    pmulhrsw        m5, m12
+    packuswb        m4, m5
+    movu            [r2 + r3], m4
+%else
+    psubw           m4, m12
+    psubw           m5, m12
+    vperm2i128      m1, m4, m5, 0x20
+    vperm2i128      m4, m4, m5, 0x31
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 + mmsize], m4
+%endif
+
+    movu            m1, [r0 + r1]                   ; m1 = row 5
+    punpcklbw       m4, m0, m1
+    punpckhbw       m5, m0, m1
+    pmaddubsw       m4, m11
+    pmaddubsw       m5, m11
+    paddw           m6, m4
+    paddw           m7, m5
+%ifidn %1,pp
+    pmulhrsw        m6, m12
+    pmulhrsw        m7, m12
+    packuswb        m6, m7
+    movu            [r2 + r3 * 2], m6
+%else
+    psubw           m6, m12
+    psubw           m7, m12
+    vperm2i128      m0, m6, m7, 0x20
+    vperm2i128      m6, m6, m7, 0x31
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r3 * 2 + mmsize], m6
+%endif
+
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 6
+    punpcklbw       m6, m1, m0
+    punpckhbw       m7, m1, m0
+    pmaddubsw       m6, m11
+    pmaddubsw       m7, m11
+    paddw           m2, m6
+    paddw           m3, m7
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2 + r5], m2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2 + r5], m0
+    movu            [r2 + r5 + mmsize], m2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    dec             r6d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_32xN pp, 64
+    FILTER_VER_CHROMA_AVX2_32xN pp, 48
+    FILTER_VER_CHROMA_AVX2_32xN pp, 32
+    FILTER_VER_CHROMA_AVX2_32xN pp, 24
+    FILTER_VER_CHROMA_AVX2_32xN pp, 16
+    FILTER_VER_CHROMA_AVX2_32xN pp, 8
+    FILTER_VER_CHROMA_AVX2_32xN ps, 64
+    FILTER_VER_CHROMA_AVX2_32xN ps, 48
+    FILTER_VER_CHROMA_AVX2_32xN ps, 32
+    FILTER_VER_CHROMA_AVX2_32xN ps, 24
+    FILTER_VER_CHROMA_AVX2_32xN ps, 16
+    FILTER_VER_CHROMA_AVX2_32xN ps, 8
+
+%macro FILTER_VER_CHROMA_AVX2_48x64 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_48x64, 4, 8, 13
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m10, [r5]
+    mova            m11, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m12, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m12, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r6d, 16
+.loopH:
+    movu            m0, [r0]                        ; m0 = row 0
+    movu            m1, [r0 + r1]                   ; m1 = row 1
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 2
+    punpcklbw       m4, m1, m0
+    punpckhbw       m5, m1, m0
+    pmaddubsw       m4, m10
+    pmaddubsw       m5, m10
+    movu            m1, [r0 + r4]                   ; m1 = row 3
+    punpcklbw       m6, m0, m1
+    punpckhbw       m7, m0, m1
+    pmaddubsw       m8, m6, m11
+    pmaddubsw       m9, m7, m11
+    pmaddubsw       m6, m10
+    pmaddubsw       m7, m10
+    paddw           m2, m8
+    paddw           m3, m9
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2], m2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2], m0
+    movu            [r2 + mmsize], m2
+%endif
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r0]                        ; m0 = row 4
+    punpcklbw       m2, m1, m0
+    punpckhbw       m3, m1, m0
+    pmaddubsw       m8, m2, m11
+    pmaddubsw       m9, m3, m11
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    paddw           m4, m8
+    paddw           m5, m9
+%ifidn %1,pp
+    pmulhrsw        m4, m12
+    pmulhrsw        m5, m12
+    packuswb        m4, m5
+    movu            [r2 + r3], m4
+%else
+    psubw           m4, m12
+    psubw           m5, m12
+    vperm2i128      m1, m4, m5, 0x20
+    vperm2i128      m4, m4, m5, 0x31
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 + mmsize], m4
+%endif
+
+    movu            m1, [r0 + r1]                   ; m1 = row 5
+    punpcklbw       m4, m0, m1
+    punpckhbw       m5, m0, m1
+    pmaddubsw       m4, m11
+    pmaddubsw       m5, m11
+    paddw           m6, m4
+    paddw           m7, m5
+%ifidn %1,pp
+    pmulhrsw        m6, m12
+    pmulhrsw        m7, m12
+    packuswb        m6, m7
+    movu            [r2 + r3 * 2], m6
+%else
+    psubw           m6, m12
+    psubw           m7, m12
+    vperm2i128      m0, m6, m7, 0x20
+    vperm2i128      m6, m6, m7, 0x31
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r3 * 2 + mmsize], m6
+%endif
+
+    movu            m0, [r0 + r1 * 2]               ; m0 = row 6
+    punpcklbw       m6, m1, m0
+    punpckhbw       m7, m1, m0
+    pmaddubsw       m6, m11
+    pmaddubsw       m7, m11
+    paddw           m2, m6
+    paddw           m3, m7
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2 + r5], m2
+    add             r2, 32
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2 + r5], m0
+    movu            [r2 + r5 + mmsize], m2
+    add             r2, 64
+%endif
+    sub             r0, r7
+
+    movu            xm0, [r0 + 32]                  ; m0 = row 0
+    movu            xm1, [r0 + r1 + 32]             ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, m10
+    movu            xm2, [r0 + r1 * 2 + 32]         ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, m10
+    movu            xm3, [r0 + r4 + 32]             ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, m11
+    paddw           m0, m4
+    pmaddubsw       m2, m10
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0 + 32]                  ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, m11
+    paddw           m1, m5
+    pmaddubsw       m3, m10
+    movu            xm5, [r0 + r1 + 32]             ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m4, m11
+    paddw           m2, m4
+    movu            xm6, [r0 + r1 * 2 + 32]         ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m5, m11
+    paddw           m3, m5
+%ifidn %1,pp
+    pmulhrsw        m0, m12                         ; m0 = word: row 0
+    pmulhrsw        m1, m12                         ; m1 = word: row 1
+    pmulhrsw        m2, m12                         ; m2 = word: row 2
+    pmulhrsw        m3, m12                         ; m3 = word: row 3
+    packuswb        m0, m1
+    packuswb        m2, m3
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r5], xm3
+    lea             r2, [r2 + r3 * 4 - 32]
+%else
+    psubw           m0, m12                         ; m0 = word: row 0
+    psubw           m1, m12                         ; m1 = word: row 1
+    psubw           m2, m12                         ; m2 = word: row 2
+    psubw           m3, m12                         ; m3 = word: row 3
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r5], m3
+    lea             r2, [r2 + r3 * 4 - 64]
+%endif
+    dec             r6d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_48x64 pp
+    FILTER_VER_CHROMA_AVX2_48x64 ps
+
+%macro FILTER_VER_CHROMA_AVX2_64xN 2
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_64x%2, 4, 8, 13
+    mov             r4d, r4m
+    shl             r4d, 6
+
+%ifdef PIC
+    lea             r5, [tab_ChromaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_ChromaCoeffVer_32 + r4]
+%endif
+
+    mova            m10, [r5]
+    mova            m11, [r5 + mmsize]
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,pp
+    mova            m12, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m12, [pw_2000]
+%endif
+    lea             r5, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r6d, %2 / 4
+.loopH:
+%assign x 0
+%rep 2
+    movu            m0, [r0 + x]                    ; m0 = row 0
+    movu            m1, [r0 + r1 + x]               ; m1 = row 1
+    punpcklbw       m2, m0, m1
+    punpckhbw       m3, m0, m1
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    movu            m0, [r0 + r1 * 2 + x]           ; m0 = row 2
+    punpcklbw       m4, m1, m0
+    punpckhbw       m5, m1, m0
+    pmaddubsw       m4, m10
+    pmaddubsw       m5, m10
+    movu            m1, [r0 + r4 + x]               ; m1 = row 3
+    punpcklbw       m6, m0, m1
+    punpckhbw       m7, m0, m1
+    pmaddubsw       m8, m6, m11
+    pmaddubsw       m9, m7, m11
+    pmaddubsw       m6, m10
+    pmaddubsw       m7, m10
+    paddw           m2, m8
+    paddw           m3, m9
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2], m2
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2], m0
+    movu            [r2 + mmsize], m2
+%endif
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r0 + x]                    ; m0 = row 4
+    punpcklbw       m2, m1, m0
+    punpckhbw       m3, m1, m0
+    pmaddubsw       m8, m2, m11
+    pmaddubsw       m9, m3, m11
+    pmaddubsw       m2, m10
+    pmaddubsw       m3, m10
+    paddw           m4, m8
+    paddw           m5, m9
+%ifidn %1,pp
+    pmulhrsw        m4, m12
+    pmulhrsw        m5, m12
+    packuswb        m4, m5
+    movu            [r2 + r3], m4
+%else
+    psubw           m4, m12
+    psubw           m5, m12
+    vperm2i128      m1, m4, m5, 0x20
+    vperm2i128      m4, m4, m5, 0x31
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 + mmsize], m4
+%endif
+
+    movu            m1, [r0 + r1 + x]               ; m1 = row 5
+    punpcklbw       m4, m0, m1
+    punpckhbw       m5, m0, m1
+    pmaddubsw       m4, m11
+    pmaddubsw       m5, m11
+    paddw           m6, m4
+    paddw           m7, m5
+%ifidn %1,pp
+    pmulhrsw        m6, m12
+    pmulhrsw        m7, m12
+    packuswb        m6, m7
+    movu            [r2 + r3 * 2], m6
+%else
+    psubw           m6, m12
+    psubw           m7, m12
+    vperm2i128      m0, m6, m7, 0x20
+    vperm2i128      m6, m6, m7, 0x31
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r3 * 2 + mmsize], m6
+%endif
+
+    movu            m0, [r0 + r1 * 2 + x]           ; m0 = row 6
+    punpcklbw       m6, m1, m0
+    punpckhbw       m7, m1, m0
+    pmaddubsw       m6, m11
+    pmaddubsw       m7, m11
+    paddw           m2, m6
+    paddw           m3, m7
+%ifidn %1,pp
+    pmulhrsw        m2, m12
+    pmulhrsw        m3, m12
+    packuswb        m2, m3
+    movu            [r2 + r5], m2
+    add             r2, 32
+%else
+    psubw           m2, m12
+    psubw           m3, m12
+    vperm2i128      m0, m2, m3, 0x20
+    vperm2i128      m2, m2, m3, 0x31
+    movu            [r2 + r5], m0
+    movu            [r2 + r5 + mmsize], m2
+    add             r2, 64
+%endif
+    sub             r0, r7
+%assign x x+32
+%endrep
+%ifidn %1,pp
+    lea             r2, [r2 + r3 * 4 - 64]
+%else
+    lea             r2, [r2 + r3 * 4 - 128]
+%endif
+    add             r0, r7
+    dec             r6d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_AVX2_64xN pp, 64
+    FILTER_VER_CHROMA_AVX2_64xN pp, 48
+    FILTER_VER_CHROMA_AVX2_64xN pp, 32
+    FILTER_VER_CHROMA_AVX2_64xN pp, 16
+    FILTER_VER_CHROMA_AVX2_64xN ps, 64
+    FILTER_VER_CHROMA_AVX2_64xN ps, 48
+    FILTER_VER_CHROMA_AVX2_64xN ps, 32
+    FILTER_VER_CHROMA_AVX2_64xN ps, 16
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16n_H2 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_pp_%1x%2, 4, 7, 8
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeff]
+    movd        m0,        [r5 + r4 * 4]
+%else
+    movd        m0,        [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m1,        m0,       [tab_Vm]
+    pshufb      m0,        [tab_Vm + 16]
+
+    mov         r4d,       %2/2
+
+.loop:
+
+    mov         r6d,       %1/16
+
+.loopW:
+
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m2,        m1
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m5,        [r5]
+    movu        m6,        [r5 + r1]
+
+    punpckhbw   m7,        m5,        m6
+    pmaddubsw   m7,        m0
+    paddw       m2,        m7
+
+    punpcklbw   m7,        m5,        m6
+    pmaddubsw   m7,        m0
+    paddw       m4,        m7
+
+    mova        m7,        [pw_512]
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m2,        m7
+
+    packuswb    m4,        m2
+
+    movu        [r2],      m4
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    pmaddubsw   m4,        m1
+    pmaddubsw   m3,        m1
+
+    movu        m5,        [r5 + 2 * r1]
+
+    punpcklbw   m2,        m6,        m5
+    punpckhbw   m6,        m5
+
+    pmaddubsw   m2,        m0
+    pmaddubsw   m6,        m0
+
+    paddw       m4,        m2
+    paddw       m3,        m6
+
+    pmulhrsw    m4,        m7
+    pmulhrsw    m3,        m7
+
+    packuswb    m4,        m3
+
+    movu        [r2 + r3],      m4
+
+    add         r0,        16
+    add         r2,        16
+    dec         r6d
+    jnz         .loopW
+
+    lea         r0,        [r0 + r1 * 2 - %1]
+    lea         r2,        [r2 + r3 * 2 - %1]
+
+    dec         r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V4_W16n_H2 64, 64
+    FILTER_V4_W16n_H2 64, 32
+    FILTER_V4_W16n_H2 64, 48
+    FILTER_V4_W16n_H2 48, 64
+    FILTER_V4_W16n_H2 64, 16
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_2xN 1
+INIT_XMM sse4
+cglobal filterPixelToShort_2x%1, 3, 4, 3
+    mov         r3d, r3m
+    add         r3d, r3d
+
+    ; load constant
+    mova        m1, [pb_128]
+    mova        m2, [tab_c_64_n64]
+
+%rep %1/2
+    movd        m0, [r0]
+    pinsrd      m0, [r0 + r1], 1
+    punpcklbw   m0, m1
+    pmaddubsw   m0, m2
+
+    movd        [r2 + r3 * 0], m0
+    pextrd      [r2 + r3 * 1], m0, 2
+
+    lea         r0, [r0 + r1 * 2]
+    lea         r2, [r2 + r3 * 2]
+%endrep
+    RET
+%endmacro
+    P2S_H_2xN 4
+    P2S_H_2xN 8
+    P2S_H_2xN 16
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_4xN 1
+INIT_XMM sse4
+cglobal filterPixelToShort_4x%1, 3, 6, 4
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r3 * 3]
+    lea         r5, [r1 * 3]
+
+    ; load constant
+    mova        m2, [pb_128]
+    mova        m3, [tab_c_64_n64]
+
+%assign x 0
+%rep %1/4
+    movd        m0, [r0]
+    pinsrd      m0, [r0 + r1], 1
+    punpcklbw   m0, m2
+    pmaddubsw   m0, m3
+
+    movd        m1, [r0 + r1 * 2]
+    pinsrd      m1, [r0 + r5], 1
+    punpcklbw   m1, m2
+    pmaddubsw   m1, m3
+
+    movq        [r2 + r3 * 0], m0
+    movq        [r2 + r3 * 2], m1
+    movhps      [r2 + r3 * 1], m0
+    movhps      [r2 + r4], m1
+%assign x x+1
+%if (x != %1/4)
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+%endif
+%endrep
+    RET
+%endmacro
+    P2S_H_4xN 4
+    P2S_H_4xN 8
+    P2S_H_4xN 16
+    P2S_H_4xN 32
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_6xN 1
+INIT_XMM sse4
+cglobal filterPixelToShort_6x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r5, [r3 * 3]
+
+    ; load height
+    mov         r6d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop:
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r4]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movh        [r2 + r3 * 0], m0
+    pextrd      [r2 + r3 * 0 + 8], m0, 2
+    movh        [r2 + r3 * 1], m1
+    pextrd      [r2 + r3 * 1 + 8], m1, 2
+    movh        [r2 + r3 * 2], m2
+    pextrd      [r2 + r3 * 2 + 8], m2, 2
+    movh        [r2 + r5], m3
+    pextrd      [r2 + r5 + 8], m3, 2
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_6xN 8
+    P2S_H_6xN 16
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_8xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r5, [r1 * 3]
+    lea         r6, [r3 * 3]
+
+    ; load height
+    mov         r4d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0], m0
+    movu        [r2 + r3 * 1], m1
+    movu        [r2 + r3 * 2], m2
+    movu        [r2 + r6 ], m3
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r4d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_8xN 8
+    P2S_H_8xN 4
+    P2S_H_8xN 16
+    P2S_H_8xN 32
+    P2S_H_8xN 12
+    P2S_H_8xN 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x6, 3, 7, 5
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r5, [r1 * 5]
+    lea         r6, [r3 * 3]
+
+    ; load constant
+    mova        m3, [pb_128]
+    mova        m4, [tab_c_64_n64]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m3
+    pmaddubsw   m0, m4
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m3
+    pmaddubsw   m1, m4
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+
+    movu        [r2 + r3 * 0], m0
+    movu        [r2 + r3 * 1], m1
+    movu        [r2 + r3 * 2], m2
+
+    movh        m0, [r0 + r4]
+    punpcklbw   m0, m3
+    pmaddubsw   m0, m4
+
+    movh        m1, [r0 + r1 * 4]
+    punpcklbw   m1, m3
+    pmaddubsw   m1, m4
+
+    movh        m2, [r0 + r5]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+
+    movu        [r2 + r6 ], m0
+    movu        [r2 + r3 * 4], m1
+    lea         r2, [r2 + r3 * 4]
+    movu        [r2 + r3], m2
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_16xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_16x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r3 * 3]
+    lea         r5, [r1 * 3]
+
+   ; load height
+    mov         r6d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop:
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0], m0
+    movu        [r2 + r3 * 1], m1
+    movu        [r2 + r3 * 2], m2
+    movu        [r2 + r4], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 16], m0
+    movu        [r2 + r3 * 1 + 16], m1
+    movu        [r2 + r3 * 2 + 16], m2
+    movu        [r2 + r4 + 16], m3
+
+    lea         r0, [r0 + r1 * 4 - 8]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_16xN 16
+    P2S_H_16xN 4
+    P2S_H_16xN 8
+    P2S_H_16xN 12
+    P2S_H_16xN 32
+    P2S_H_16xN 64
+    P2S_H_16xN 24
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_32xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_32x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r3 * 3]
+    lea         r5, [r1 * 3]
+
+    ; load height
+    mov         r6d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop:
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0], m0
+    movu        [r2 + r3 * 1], m1
+    movu        [r2 + r3 * 2], m2
+    movu        [r2 + r4], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 16], m0
+    movu        [r2 + r3 * 1 + 16], m1
+    movu        [r2 + r3 * 2 + 16], m2
+    movu        [r2 + r4 + 16], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 32], m0
+    movu        [r2 + r3 * 1 + 32], m1
+    movu        [r2 + r3 * 2 + 32], m2
+    movu        [r2 + r4 + 32], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 48], m0
+    movu        [r2 + r3 * 1 + 48], m1
+    movu        [r2 + r3 * 2 + 48], m2
+    movu        [r2 + r4 + 48], m3
+
+    lea         r0, [r0 + r1 * 4 - 24]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_32xN 32
+    P2S_H_32xN 8
+    P2S_H_32xN 16
+    P2S_H_32xN 24
+    P2S_H_32xN 64
+    P2S_H_32xN 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_32xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_32x%1, 3, 7, 3
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r5, [r1 * 3]
+    lea         r6, [r3 * 3]
+
+    ; load height
+    mov         r4d, %1/4
+
+    ; load constant
+    vpbroadcastd m2, [pw_2000]
+
+.loop:
+    pmovzxbw    m0, [r0 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + 1 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psubw       m0, m2
+    psubw       m1, m2
+    movu        [r2 + 0 * mmsize], m0
+    movu        [r2 + 1 * mmsize], m1
+
+    pmovzxbw    m0, [r0 + r1 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 + 1 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psubw       m0, m2
+    psubw       m1, m2
+    movu        [r2 + r3 + 0 * mmsize], m0
+    movu        [r2 + r3 + 1 * mmsize], m1
+
+    pmovzxbw    m0, [r0 + r1 * 2 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 * 2 + 1 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psubw       m0, m2
+    psubw       m1, m2
+    movu        [r2 + r3 * 2 + 0 * mmsize], m0
+    movu        [r2 + r3 * 2 + 1 * mmsize], m1
+
+    pmovzxbw    m0, [r0 + r5 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r5 + 1 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psubw       m0, m2
+    psubw       m1, m2
+    movu        [r2 + r6 + 0 * mmsize], m0
+    movu        [r2 + r6 + 1 * mmsize], m1
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r4d
+    jnz        .loop
+    RET
+%endmacro
+    P2S_H_32xN_avx2 32
+    P2S_H_32xN_avx2 8
+    P2S_H_32xN_avx2 16
+    P2S_H_32xN_avx2 24
+    P2S_H_32xN_avx2 64
+    P2S_H_32xN_avx2 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_64xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_64x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r3 * 3]
+    lea         r5, [r1 * 3]
+
+    ; load height
+    mov         r6d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop:
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0], m0
+    movu        [r2 + r3 * 1], m1
+    movu        [r2 + r3 * 2], m2
+    movu        [r2 + r4], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 16], m0
+    movu        [r2 + r3 * 1 + 16], m1
+    movu        [r2 + r3 * 2 + 16], m2
+    movu        [r2 + r4 + 16], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 32], m0
+    movu        [r2 + r3 * 1 + 32], m1
+    movu        [r2 + r3 * 2 + 32], m2
+    movu        [r2 + r4 + 32], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 48], m0
+    movu        [r2 + r3 * 1 + 48], m1
+    movu        [r2 + r3 * 2 + 48], m2
+    movu        [r2 + r4 + 48], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 64], m0
+    movu        [r2 + r3 * 1 + 64], m1
+    movu        [r2 + r3 * 2 + 64], m2
+    movu        [r2 + r4 + 64], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 80], m0
+    movu        [r2 + r3 * 1 + 80], m1
+    movu        [r2 + r3 * 2 + 80], m2
+    movu        [r2 + r4 + 80], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 96], m0
+    movu        [r2 + r3 * 1 + 96], m1
+    movu        [r2 + r3 * 2 + 96], m2
+    movu        [r2 + r4 + 96], m3
+
+    lea         r0, [r0 + 8]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m4
+    pmaddubsw   m0, m5
+
+    movh        m1, [r0 + r1]
+    punpcklbw   m1, m4
+    pmaddubsw   m1, m5
+
+    movh        m2, [r0 + r1 * 2]
+    punpcklbw   m2, m4
+    pmaddubsw   m2, m5
+
+    movh        m3, [r0 + r5]
+    punpcklbw   m3, m4
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0 + 112], m0
+    movu        [r2 + r3 * 1 + 112], m1
+    movu        [r2 + r3 * 2 + 112], m2
+    movu        [r2 + r4 + 112], m3
+
+    lea         r0, [r0 + r1 * 4 - 56]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_64xN 64
+    P2S_H_64xN 16
+    P2S_H_64xN 32
+    P2S_H_64xN 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_64xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_64x%1, 3, 7, 5
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r5, [r1 * 3]
+    lea         r6, [r3 * 3]
+
+    ; load height
+    mov         r4d, %1/4
+
+    ; load constant
+    vpbroadcastd m4, [pw_2000]
+
+.loop:
+    pmovzxbw    m0, [r0 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + 2 * mmsize/2]
+    pmovzxbw    m3, [r0 + 3 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psllw       m3, 6
+    psubw       m0, m4
+    psubw       m1, m4
+    psubw       m2, m4
+    psubw       m3, m4
+
+    movu        [r2 + 0 * mmsize], m0
+    movu        [r2 + 1 * mmsize], m1
+    movu        [r2 + 2 * mmsize], m2
+    movu        [r2 + 3 * mmsize], m3
+
+    pmovzxbw    m0, [r0 + r1 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r1 + 2 * mmsize/2]
+    pmovzxbw    m3, [r0 + r1 + 3 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psllw       m3, 6
+    psubw       m0, m4
+    psubw       m1, m4
+    psubw       m2, m4
+    psubw       m3, m4
+
+    movu        [r2 + r3 + 0 * mmsize], m0
+    movu        [r2 + r3 + 1 * mmsize], m1
+    movu        [r2 + r3 + 2 * mmsize], m2
+    movu        [r2 + r3 + 3 * mmsize], m3
+
+    pmovzxbw    m0, [r0 + r1 * 2 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 * 2 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r1 * 2 + 2 * mmsize/2]
+    pmovzxbw    m3, [r0 + r1 * 2 + 3 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psllw       m3, 6
+    psubw       m0, m4
+    psubw       m1, m4
+    psubw       m2, m4
+    psubw       m3, m4
+
+    movu        [r2 + r3 * 2 + 0 * mmsize], m0
+    movu        [r2 + r3 * 2 + 1 * mmsize], m1
+    movu        [r2 + r3 * 2 + 2 * mmsize], m2
+    movu        [r2 + r3 * 2 + 3 * mmsize], m3
+
+    pmovzxbw    m0, [r0 + r5 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r5 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r5 + 2 * mmsize/2]
+    pmovzxbw    m3, [r0 + r5 + 3 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psllw       m3, 6
+    psubw       m0, m4
+    psubw       m1, m4
+    psubw       m2, m4
+    psubw       m3, m4
+
+    movu        [r2 + r6 + 0 * mmsize], m0
+    movu        [r2 + r6 + 1 * mmsize], m1
+    movu        [r2 + r6 + 2 * mmsize], m2
+    movu        [r2 + r6 + 3 * mmsize], m3
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r4d
+    jnz        .loop
+    RET
+%endmacro
+    P2S_H_64xN_avx2 64
+    P2S_H_64xN_avx2 16
+    P2S_H_64xN_avx2 32
+    P2S_H_64xN_avx2 48
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel src, intptr_t srcStride, int16_t dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_12xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_12x%1, 3, 7, 6
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r6, [r3 * 3]
+    mov         r5d, %1/4
+
+    ; load constant
+    mova        m4, [pb_128]
+    mova        m5, [tab_c_64_n64]
+
+.loop:
+    movu        m0, [r0]
+    punpcklbw   m1, m0, m4
+    punpckhbw   m0, m4
+    pmaddubsw   m0, m5
+    pmaddubsw   m1, m5
+
+    movu        m2, [r0 + r1]
+    punpcklbw   m3, m2, m4
+    punpckhbw   m2, m4
+    pmaddubsw   m2, m5
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 0], m1
+    movu        [r2 + r3 * 1], m3
+
+    movh        [r2 + r3 * 0 + 16], m0
+    movh        [r2 + r3 * 1 + 16], m2
+
+    movu        m0, [r0 + r1 * 2]
+    punpcklbw   m1, m0, m4
+    punpckhbw   m0, m4
+    pmaddubsw   m0, m5
+    pmaddubsw   m1, m5
+
+    movu        m2, [r0 + r4]
+    punpcklbw   m3, m2, m4
+    punpckhbw   m2, m4
+    pmaddubsw   m2, m5
+    pmaddubsw   m3, m5
+
+    movu        [r2 + r3 * 2], m1
+    movu        [r2 + r6], m3
+
+    movh        [r2 + r3 * 2 + 16], m0
+    movh        [r2 + r6 + 16], m2
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r5d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_12xN 16
+    P2S_H_12xN 32
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_24xN 1
+INIT_XMM ssse3
+cglobal filterPixelToShort_24x%1, 3, 7, 5
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r5, [r3 * 3]
+    mov         r6d, %1/4
+
+    ; load constant
+    mova        m3, [pb_128]
+    mova        m4, [tab_c_64_n64]
+
+.loop:
+    movu        m0, [r0]
+    punpcklbw   m1, m0, m3
+    punpckhbw   m0, m3
+    pmaddubsw   m0, m4
+    pmaddubsw   m1, m4
+
+    movu        m2, [r0 + 16]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+
+    movu        [r2 +  r3 * 0], m1
+    movu        [r2 +  r3 * 0 + 16], m0
+    movu        [r2 +  r3 * 0 + 32], m2
+
+    movu        m0, [r0 + r1]
+    punpcklbw   m1, m0, m3
+    punpckhbw   m0, m3
+    pmaddubsw   m0, m4
+    pmaddubsw   m1, m4
+
+    movu        m2, [r0 + r1 + 16]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+
+    movu        [r2 +  r3 * 1], m1
+    movu        [r2 +  r3 * 1 + 16], m0
+    movu        [r2 +  r3 * 1 + 32], m2
+
+    movu        m0, [r0 + r1 * 2]
+    punpcklbw   m1, m0, m3
+    punpckhbw   m0, m3
+    pmaddubsw   m0, m4
+    pmaddubsw   m1, m4
+
+    movu        m2, [r0 + r1 * 2 + 16]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+
+    movu        [r2 +  r3 * 2], m1
+    movu        [r2 +  r3 * 2 + 16], m0
+    movu        [r2 +  r3 * 2 + 32], m2
+
+    movu        m0, [r0 + r4]
+    punpcklbw   m1, m0, m3
+    punpckhbw   m0, m3
+    pmaddubsw   m0, m4
+    pmaddubsw   m1, m4
+
+    movu        m2, [r0 + r4 + 16]
+    punpcklbw   m2, m3
+    pmaddubsw   m2, m4
+    movu        [r2 +  r5], m1
+    movu        [r2 +  r5 + 16], m0
+    movu        [r2 +  r5 + 32], m2
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_24xN 32
+    P2S_H_24xN 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_24xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_24x%1, 3, 7, 4
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r5, [r3 * 3]
+    mov         r6d, %1/4
+
+    ; load constant
+    vpbroadcastd m1, [pw_2000]
+    vpbroadcastd m2, [pb_128]
+    vpbroadcastd m3, [tab_c_64_n64]
+
+.loop:
+    pmovzxbw    m0, [r0]
+    psllw       m0, 6
+    psubw       m0, m1
+    movu        [r2], m0
+
+    movu        m0, [r0 + mmsize/2]
+    punpcklbw   m0, m2
+    pmaddubsw   m0, m3
+    movu        [r2 +  r3 * 0 + mmsize], xm0
+
+    pmovzxbw    m0, [r0 + r1]
+    psllw       m0, 6
+    psubw       m0, m1
+    movu        [r2 + r3], m0
+
+    movu        m0, [r0 + r1 + mmsize/2]
+    punpcklbw   m0, m2
+    pmaddubsw   m0, m3
+    movu        [r2 +  r3 * 1 + mmsize], xm0
+
+    pmovzxbw    m0, [r0 + r1 * 2]
+    psllw       m0, 6
+    psubw       m0, m1
+    movu        [r2 + r3 * 2], m0
+
+    movu        m0, [r0 + r1 * 2 + mmsize/2]
+    punpcklbw   m0, m2
+    pmaddubsw   m0, m3
+    movu        [r2 +  r3 * 2 + mmsize], xm0
+
+    pmovzxbw    m0, [r0 + r4]
+    psllw       m0, 6
+    psubw       m0, m1
+    movu        [r2 + r5], m0
+
+    movu        m0, [r0 + r4 + mmsize/2]
+    punpcklbw   m0, m2
+    pmaddubsw   m0, m3
+    movu        [r2 + r5 + mmsize], xm0
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+    P2S_H_24xN_avx2 32
+    P2S_H_24xN_avx2 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_48x64, 3, 7, 4
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r4, [r1 * 3]
+    lea         r5, [r3 * 3]
+    mov         r6d, 16
+
+    ; load constant
+    mova        m2, [pb_128]
+    mova        m3, [tab_c_64_n64]
+
+.loop:
+    movu        m0, [r0]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 0], m1
+    movu        [r2 +  r3 * 0 + 16], m0
+
+    movu        m0, [r0 + 16]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 0 + 32], m1
+    movu        [r2 +  r3 * 0 + 48], m0
+
+    movu        m0, [r0 + 32]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 0 + 64], m1
+    movu        [r2 +  r3 * 0 + 80], m0
+
+    movu        m0, [r0 + r1]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 1], m1
+    movu        [r2 +  r3 * 1 + 16], m0
+
+    movu        m0, [r0 + r1 + 16]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 1 + 32], m1
+    movu        [r2 +  r3 * 1 + 48], m0
+
+    movu        m0, [r0 + r1 + 32]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 1 + 64], m1
+    movu        [r2 +  r3 * 1 + 80], m0
+
+    movu        m0, [r0 + r1 * 2]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 2], m1
+    movu        [r2 +  r3 * 2 + 16], m0
+
+    movu        m0, [r0 + r1 * 2 + 16]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 2 + 32], m1
+    movu        [r2 +  r3 * 2 + 48], m0
+
+    movu        m0, [r0 + r1 * 2 + 32]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r3 * 2 + 64], m1
+    movu        [r2 +  r3 * 2 + 80], m0
+
+    movu        m0, [r0 + r4]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r5], m1
+    movu        [r2 +  r5 + 16], m0
+
+    movu        m0, [r0 + r4 + 16]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r5 + 32], m1
+    movu        [r2 +  r5 + 48], m0
+
+    movu        m0, [r0 + r4 + 32]
+    punpcklbw   m1, m0, m2
+    punpckhbw   m0, m2
+    pmaddubsw   m0, m3
+    pmaddubsw   m1, m3
+
+    movu        [r2 +  r5 + 64], m1
+    movu        [r2 +  r5 + 80], m0
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal filterPixelToShort_48x64, 3,7,4
+    mov         r3d, r3m
+    add         r3d, r3d
+    lea         r5, [r1 * 3]
+    lea         r6, [r3 * 3]
+
+    ; load height
+    mov         r4d, 64/4
+
+    ; load constant
+    vpbroadcastd m3, [pw_2000]
+
+    ; just unroll(1) because it is best choice for 48x64
+.loop:
+    pmovzxbw    m0, [r0 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + 2 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psubw       m0, m3
+    psubw       m1, m3
+    psubw       m2, m3
+    movu        [r2 + 0 * mmsize], m0
+    movu        [r2 + 1 * mmsize], m1
+    movu        [r2 + 2 * mmsize], m2
+
+    pmovzxbw    m0, [r0 + r1 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r1 + 2 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psubw       m0, m3
+    psubw       m1, m3
+    psubw       m2, m3
+    movu        [r2 + r3 + 0 * mmsize], m0
+    movu        [r2 + r3 + 1 * mmsize], m1
+    movu        [r2 + r3 + 2 * mmsize], m2
+
+    pmovzxbw    m0, [r0 + r1 * 2 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r1 * 2 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r1 * 2 + 2 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psubw       m0, m3
+    psubw       m1, m3
+    psubw       m2, m3
+    movu        [r2 + r3 * 2 + 0 * mmsize], m0
+    movu        [r2 + r3 * 2 + 1 * mmsize], m1
+    movu        [r2 + r3 * 2 + 2 * mmsize], m2
+
+    pmovzxbw    m0, [r0 + r5 + 0 * mmsize/2]
+    pmovzxbw    m1, [r0 + r5 + 1 * mmsize/2]
+    pmovzxbw    m2, [r0 + r5 + 2 * mmsize/2]
+    psllw       m0, 6
+    psllw       m1, 6
+    psllw       m2, 6
+    psubw       m0, m3
+    psubw       m1, m3
+    psubw       m2, m3
+    movu        [r2 + r6 + 0 * mmsize], m0
+    movu        [r2 + r6 + 1 * mmsize], m1
+    movu        [r2 + r6 + 2 * mmsize], m2
+
+    lea         r0, [r0 + r1 * 4]
+    lea         r2, [r2 + r3 * 4]
+
+    dec         r4d
+    jnz        .loop
+    RET
+
+
+%macro PROCESS_LUMA_W4_4R 0
+    movd        m0, [r0]
+    movd        m1, [r0 + r1]
+    punpcklbw   m2, m0, m1                     ; m2=[0 1]
+
+    lea         r0, [r0 + 2 * r1]
+    movd        m0, [r0]
+    punpcklbw   m1, m0                         ; m1=[1 2]
+    punpcklqdq  m2, m1                         ; m2=[0 1 1 2]
+    pmaddubsw   m4, m2, [r6 + 0 * 16]          ; m4=[0+1 1+2]
+
+    movd        m1, [r0 + r1]
+    punpcklbw   m5, m0, m1                     ; m2=[2 3]
+    lea         r0, [r0 + 2 * r1]
+    movd        m0, [r0]
+    punpcklbw   m1, m0                         ; m1=[3 4]
+    punpcklqdq  m5, m1                         ; m5=[2 3 3 4]
+    pmaddubsw   m2, m5, [r6 + 1 * 16]          ; m2=[2+3 3+4]
+    paddw       m4, m2                         ; m4=[0+1+2+3 1+2+3+4]                   Row1-2
+    pmaddubsw   m5, [r6 + 0 * 16]              ; m5=[2+3 3+4]                           Row3-4
+
+    movd        m1, [r0 + r1]
+    punpcklbw   m2, m0, m1                     ; m2=[4 5]
+    lea         r0, [r0 + 2 * r1]
+    movd        m0, [r0]
+    punpcklbw   m1, m0                         ; m1=[5 6]
+    punpcklqdq  m2, m1                         ; m2=[4 5 5 6]
+    pmaddubsw   m1, m2, [r6 + 2 * 16]          ; m1=[4+5 5+6]
+    paddw       m4, m1                         ; m4=[0+1+2+3+4+5 1+2+3+4+5+6]           Row1-2
+    pmaddubsw   m2, [r6 + 1 * 16]              ; m2=[4+5 5+6]
+    paddw       m5, m2                         ; m5=[2+3+4+5 3+4+5+6]                   Row3-4
+
+    movd        m1, [r0 + r1]
+    punpcklbw   m2, m0, m1                     ; m2=[6 7]
+    lea         r0, [r0 + 2 * r1]
+    movd        m0, [r0]
+    punpcklbw   m1, m0                         ; m1=[7 8]
+    punpcklqdq  m2, m1                         ; m2=[6 7 7 8]
+    pmaddubsw   m1, m2, [r6 + 3 * 16]          ; m1=[6+7 7+8]
+    paddw       m4, m1                         ; m4=[0+1+2+3+4+5+6+7 1+2+3+4+5+6+7+8]   Row1-2 end
+    pmaddubsw   m2, [r6 + 2 * 16]              ; m2=[6+7 7+8]
+    paddw       m5, m2                         ; m5=[2+3+4+5+6+7 3+4+5+6+7+8]           Row3-4
+
+    movd        m1, [r0 + r1]
+    punpcklbw   m2, m0, m1                     ; m2=[8 9]
+    movd        m0, [r0 + 2 * r1]
+    punpcklbw   m1, m0                         ; m1=[9 10]
+    punpcklqdq  m2, m1                         ; m2=[8 9 9 10]
+    pmaddubsw   m2, [r6 + 3 * 16]              ; m2=[8+9 9+10]
+    paddw       m5, m2                         ; m5=[2+3+4+5+6+7+8+9 3+4+5+6+7+8+9+10]  Row3-4 end
+%endmacro
+
+%macro PROCESS_LUMA_W8_4R 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+    pmaddubsw  m7, m0, [r6 + 0 *16]            ;m7=[0+1]               Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m0, [r0]
+    punpcklbw  m1, m0
+    pmaddubsw  m6, m1, [r6 + 0 *16]            ;m6=[1+2]               Row2
+
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+    pmaddubsw  m5, m0, [r6 + 0 *16]            ;m5=[2+3]               Row3
+    pmaddubsw  m0, [r6 + 1 * 16]
+    paddw      m7, m0                          ;m7=[0+1+2+3]           Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m0, [r0]
+    punpcklbw  m1, m0
+    pmaddubsw  m4, m1, [r6 + 0 *16]            ;m4=[3+4]               Row4
+    pmaddubsw  m1, [r6 + 1 * 16]
+    paddw      m6, m1                          ;m6 = [1+2+3+4]         Row2
+
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+    pmaddubsw  m2, m0, [r6 + 1 * 16]
+    pmaddubsw  m0, [r6 + 2 * 16]
+    paddw      m7, m0                          ;m7=[0+1+2+3+4+5]       Row1
+    paddw      m5, m2                          ;m5=[2+3+4+5]           Row3
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m0, [r0]
+    punpcklbw  m1, m0
+    pmaddubsw  m2, m1, [r6 + 1 * 16]
+    pmaddubsw  m1, [r6 + 2 * 16]
+    paddw      m6, m1                          ;m6=[1+2+3+4+5+6]       Row2
+    paddw      m4, m2                          ;m4=[3+4+5+6]           Row4
+
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+    pmaddubsw  m2, m0, [r6 + 2 * 16]
+    pmaddubsw  m0, [r6 + 3 * 16]
+    paddw      m7, m0                          ;m7=[0+1+2+3+4+5+6+7]   Row1 end
+    paddw      m5, m2                          ;m5=[2+3+4+5+6+7]       Row3
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m0, [r0]
+    punpcklbw  m1, m0
+    pmaddubsw  m2, m1, [r6 + 2 * 16]
+    pmaddubsw  m1, [r6 + 3 * 16]
+    paddw      m6, m1                          ;m6=[1+2+3+4+5+6+7+8]   Row2 end
+    paddw      m4, m2                          ;m4=[3+4+5+6+7+8]       Row4
+
+    movq       m1, [r0 + r1]
+    punpcklbw  m0, m1
+    pmaddubsw  m0, [r6 + 3 * 16]
+    paddw      m5, m0                          ;m5=[2+3+4+5+6+7+8+9]   Row3 end
+
+    movq       m0, [r0 + 2 * r1]
+    punpcklbw  m1, m0
+    pmaddubsw  m1, [r6 + 3 * 16]
+    paddw      m4, m1                          ;m4=[3+4+5+6+7+8+9+10]  Row4 end
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%3_4x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_4xN 3
+INIT_XMM sse4
+cglobal interp_8tap_vert_%3_%1x%2, 5, 7, 6
+    lea       r5, [3 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+%ifidn %3,ps
+    add       r3d, r3d
+%endif
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffVer]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffVer + r4]
+%endif
+
+%ifidn %3,pp
+    mova      m3, [pw_512]
+%else
+    mova      m3, [pw_2000]
+%endif
+
+    mov       r4d, %2/4
+    lea       r5, [4 * r1]
+
+.loopH:
+    PROCESS_LUMA_W4_4R
+
+%ifidn %3,pp
+    pmulhrsw  m4, m3
+    pmulhrsw  m5, m3
+
+    packuswb  m4, m5
+
+    movd      [r2], m4
+    pextrd    [r2 + r3], m4, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrd    [r2], m4, 2
+    pextrd    [r2 + r3], m4, 3
+%else
+    psubw     m4, m3
+    psubw     m5, m3
+
+    movlps    [r2], m4
+    movhps    [r2 + r3], m4
+    lea       r2, [r2 + 2 * r3]
+    movlps    [r2], m5
+    movhps    [r2 + r3], m5
+%endif
+
+    sub       r0, r5
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+
+INIT_YMM avx2
+cglobal interp_8tap_vert_pp_4x4, 4,6,8
+    mov             r4d, r4m
+    lea             r5, [r1 * 3]
+    sub             r0, r5
+
+    ; TODO: VPGATHERDD
+    movd            xm1, [r0]                       ; m1 = row0
+    movd            xm2, [r0 + r1]                  ; m2 = row1
+    punpcklbw       xm1, xm2                        ; m1 = [13 03 12 02 11 01 10 00]
+
+    movd            xm3, [r0 + r1 * 2]              ; m3 = row2
+    punpcklbw       xm2, xm3                        ; m2 = [23 13 22 12 21 11 20 10]
+    movd            xm4, [r0 + r5]
+    punpcklbw       xm3, xm4                        ; m3 = [33 23 32 22 31 21 30 20]
+    punpcklwd       xm1, xm3                        ; m1 = [33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00]
+
+    lea             r0, [r0 + r1 * 4]
+    movd            xm5, [r0]                       ; m5 = row4
+    punpcklbw       xm4, xm5                        ; m4 = [43 33 42 32 41 31 40 30]
+    punpcklwd       xm2, xm4                        ; m2 = [43 33 21 13 42 32 22 12 41 31 21 11 40 30 20 10]
+    vinserti128     m1, m1, xm2, 1                  ; m1 = [43 33 21 13 42 32 22 12 41 31 21 11 40 30 20 10] - [33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00]
+    movd            xm2, [r0 + r1]                  ; m2 = row5
+    punpcklbw       xm5, xm2                        ; m5 = [53 43 52 42 51 41 50 40]
+    punpcklwd       xm3, xm5                        ; m3 = [53 43 44 23 52 42 32 22 51 41 31 21 50 40 30 20]
+    movd            xm6, [r0 + r1 * 2]              ; m6 = row6
+    punpcklbw       xm2, xm6                        ; m2 = [63 53 62 52 61 51 60 50]
+    punpcklwd       xm4, xm2                        ; m4 = [63 53 43 33 62 52 42 32 61 51 41 31 60 50 40 30]
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [63 53 43 33 62 52 42 32 61 51 41 31 60 50 40 30] - [53 43 44 23 52 42 32 22 51 41 31 21 50 40 30 20]
+    movd            xm4, [r0 + r5]                  ; m4 = row7
+    punpcklbw       xm6, xm4                        ; m6 = [73 63 72 62 71 61 70 60]
+    punpcklwd       xm5, xm6                        ; m5 = [73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40]
+
+    lea             r0, [r0 + r1 * 4]
+    movd            xm7, [r0]                       ; m7 = row8
+    punpcklbw       xm4, xm7                        ; m4 = [83 73 82 72 81 71 80 70]
+    punpcklwd       xm2, xm4                        ; m2 = [83 73 63 53 82 72 62 52 81 71 61 51 80 70 60 50]
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [83 73 63 53 82 72 62 52 81 71 61 51 80 70 60 50] - [73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40]
+    movd            xm2, [r0 + r1]                  ; m2 = row9
+    punpcklbw       xm7, xm2                        ; m7 = [93 83 92 82 91 81 90 80]
+    punpcklwd       xm6, xm7                        ; m6 = [93 83 73 63 92 82 72 62 91 81 71 61 90 80 70 60]
+    movd            xm7, [r0 + r1 * 2]              ; m7 = rowA
+    punpcklbw       xm2, xm7                        ; m2 = [A3 93 A2 92 A1 91 A0 90]
+    punpcklwd       xm4, xm2                        ; m4 = [A3 93 83 73 A2 92 82 72 A1 91 81 71 A0 90 80 70]
+    vinserti128     m6, m6, xm4, 1                  ; m6 = [A3 93 83 73 A2 92 82 72 A1 91 81 71 A0 90 80 70] - [93 83 73 63 92 82 72 62 91 81 71 61 90 80 70 60]
+
+    ; load filter coeff
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastd    m0, [r5 + r4 * 8 + 0]
+    vpbroadcastd    m2, [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd    m0, [tab_LumaCoeff + r4 * 8 + 0]
+    vpbroadcastd    m2, [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+
+    pmaddubsw       m1, m0
+    pmaddubsw       m3, m0
+    pmaddubsw       m5, m2
+    pmaddubsw       m6, m2
+    vbroadcasti128  m0, [pw_1]
+    pmaddwd         m1, m0
+    pmaddwd         m3, m0
+    pmaddwd         m5, m0
+    pmaddwd         m6, m0
+    paddd           m1, m5                          ; m1 = DQWORD ROW[1 0]
+    paddd           m3, m6                          ; m3 = DQWORD ROW[3 2]
+    packssdw        m1, m3                          ; m1 =  QWORD ROW[3 1 2 0]
+
+    ; TODO: does it overflow?
+    pmulhrsw        m1, [pw_512]
+    vextracti128    xm2, m1, 1
+    packuswb        xm1, xm2                        ; m1 =  DWORD ROW[3 1 2 0]
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 2
+    pextrd          [r2 + r3 * 2], xm1, 1
+    lea             r4, [r3 * 3]
+    pextrd          [r2 + r4], xm1, 3
+    RET
+
+INIT_YMM avx2
+cglobal interp_8tap_vert_ps_4x4, 4, 6, 5
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+    add             r3d, r3d
+
+    movd            xm1, [r0]
+    pinsrd          xm1, [r0 + r1], 1
+    pinsrd          xm1, [r0 + r1 * 2], 2
+    pinsrd          xm1, [r0 + r4], 3                       ; m1 = row[3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm2, [r0]
+    pinsrd          xm2, [r0 + r1], 1
+    pinsrd          xm2, [r0 + r1 * 2], 2
+    pinsrd          xm2, [r0 + r4], 3                       ; m2 = row[7 6 5 4]
+    vinserti128     m1, m1, xm2, 1                          ; m1 = row[7 6 5 4 3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm3, [r0]
+    pinsrd          xm3, [r0 + r1], 1
+    pinsrd          xm3, [r0 + r1 * 2], 2                   ; m3 = row[x 10 9 8]
+    vinserti128     m2, m2, xm3, 1                          ; m2 = row[x 10 9 8 7 6 5 4]
+    mova            m3, [interp4_vpp_shuf1]
+    vpermd          m0, m3, m1                              ; m0 = row[4 3 3 2 2 1 1 0]
+    vpermd          m4, m3, m2                              ; m4 = row[8 7 7 6 6 5 5 4]
+    mova            m3, [interp4_vpp_shuf1 + mmsize]
+    vpermd          m1, m3, m1                              ; m1 = row[6 5 5 4 4 3 3 2]
+    vpermd          m2, m3, m2                              ; m2 = row[10 9 9 8 8 7 7 6]
+
+    mova            m3, [interp4_vpp_shuf]
+    pshufb          m0, m0, m3
+    pshufb          m1, m1, m3
+    pshufb          m4, m4, m3
+    pshufb          m2, m2, m3
+    pmaddubsw       m0, [r5]
+    pmaddubsw       m1, [r5 + mmsize]
+    pmaddubsw       m4, [r5 + 2 * mmsize]
+    pmaddubsw       m2, [r5 + 3 * mmsize]
+    paddw           m0, m1
+    paddw           m0, m4
+    paddw           m0, m2                                  ; m0 = WORD ROW[3 2 1 0]
+
+    psubw           m0, [pw_2000]
+    vextracti128    xm2, m0, 1
+    lea             r5, [r3 * 3]
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r5], xm2
+    RET
+
+%macro FILTER_VER_LUMA_AVX2_4xN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 9, 10
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+    lea             r6, [r1 * 4]
+%ifidn %3,pp
+    mova            m6, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m6, [pw_2000]
+%endif
+    lea             r8, [r3 * 3]
+    mova            m5, [interp4_vpp_shuf]
+    mova            m0, [interp4_vpp_shuf1]
+    mova            m7, [interp4_vpp_shuf1 + mmsize]
+    mov             r7d, %2 / 8
+.loop:
+    movd            xm1, [r0]
+    pinsrd          xm1, [r0 + r1], 1
+    pinsrd          xm1, [r0 + r1 * 2], 2
+    pinsrd          xm1, [r0 + r4], 3                       ; m1 = row[3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm2, [r0]
+    pinsrd          xm2, [r0 + r1], 1
+    pinsrd          xm2, [r0 + r1 * 2], 2
+    pinsrd          xm2, [r0 + r4], 3                       ; m2 = row[7 6 5 4]
+    vinserti128     m1, m1, xm2, 1                          ; m1 = row[7 6 5 4 3 2 1 0]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm3, [r0]
+    pinsrd          xm3, [r0 + r1], 1
+    pinsrd          xm3, [r0 + r1 * 2], 2
+    pinsrd          xm3, [r0 + r4], 3                       ; m3 = row[11 10 9 8]
+    vinserti128     m2, m2, xm3, 1                          ; m2 = row[11 10 9 8 7 6 5 4]
+    lea             r0, [r0 + r1 * 4]
+    movd            xm4, [r0]
+    pinsrd          xm4, [r0 + r1], 1
+    pinsrd          xm4, [r0 + r1 * 2], 2                   ; m4 = row[x 14 13 12]
+    vinserti128     m3, m3, xm4, 1                          ; m3 = row[x 14 13 12 11 10 9 8]
+    vpermd          m8, m0, m1                              ; m8 = row[4 3 3 2 2 1 1 0]
+    vpermd          m4, m0, m2                              ; m4 = row[8 7 7 6 6 5 5 4]
+    vpermd          m1, m7, m1                              ; m1 = row[6 5 5 4 4 3 3 2]
+    vpermd          m2, m7, m2                              ; m2 = row[10 9 9 8 8 7 7 6]
+    vpermd          m9, m0, m3                              ; m9 = row[12 11 11 10 10 9 9 8]
+    vpermd          m3, m7, m3                              ; m3 = row[14 13 13 12 12 11 11 10]
+
+    pshufb          m8, m8, m5
+    pshufb          m1, m1, m5
+    pshufb          m4, m4, m5
+    pshufb          m9, m9, m5
+    pshufb          m2, m2, m5
+    pshufb          m3, m3, m5
+    pmaddubsw       m8, [r5]
+    pmaddubsw       m1, [r5 + mmsize]
+    pmaddubsw       m9, [r5 + 2 * mmsize]
+    pmaddubsw       m3, [r5 + 3 * mmsize]
+    paddw           m8, m1
+    paddw           m9, m3
+    pmaddubsw       m1, m4, [r5 + 2 * mmsize]
+    pmaddubsw       m3, m2, [r5 + 3 * mmsize]
+    pmaddubsw       m4, [r5]
+    pmaddubsw       m2, [r5 + mmsize]
+    paddw           m3, m1
+    paddw           m2, m4
+    paddw           m8, m3                                  ; m8 = WORD ROW[3 2 1 0]
+    paddw           m9, m2                                  ; m9 = WORD ROW[7 6 5 4]
+
+%ifidn %3,pp
+    pmulhrsw        m8, m6
+    pmulhrsw        m9, m6
+    packuswb        m8, m9
+    vextracti128    xm1, m8, 1
+    movd            [r2], xm8
+    pextrd          [r2 + r3], xm8, 1
+    movd            [r2 + r3 * 2], xm1
+    pextrd          [r2 + r8], xm1, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm8, 2
+    pextrd          [r2 + r3], xm8, 3
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrd          [r2 + r8], xm1, 3
+%else
+    psubw           m8, m6
+    psubw           m9, m6
+    vextracti128    xm1, m8, 1
+    vextracti128    xm2, m9, 1
+    movq            [r2], xm8
+    movhps          [r2 + r3], xm8
+    movq            [r2 + r3 * 2], xm1
+    movhps          [r2 + r8], xm1
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm9
+    movhps          [r2 + r3], xm9
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r8], xm2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    sub             r0, r6
+    dec             r7d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 4, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_4x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 8, pp
+    FILTER_VER_LUMA_AVX2_4xN 4, 8, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_4x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 16, pp
+    FILTER_VER_LUMA_AVX2_4xN 4, 16, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 4, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_4x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 8, ps
+    FILTER_VER_LUMA_AVX2_4xN 4, 8, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_4x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_4xN 4, 16, ps
+    FILTER_VER_LUMA_AVX2_4xN 4, 16, ps
+
+%macro PROCESS_LUMA_AVX2_W8_8R 0
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    pmaddubsw       m3, m4, [r5 + 3 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m3, m4, [r5 + 2 * mmsize]
+    paddw           m2, m3
+    pmaddubsw       m3, m4, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3                        ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6                        ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    pmaddubsw       m3, m0, [r5 + 3 * mmsize]
+    paddw           m2, m3
+    pmaddubsw       m3, m0, [r5 + 2 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m0, [r5 + 1 * mmsize]
+    paddw           m4, m0
+
+    movq            xm3, [r0 + r4]                  ; m3 = row 11
+    punpcklbw       xm6, xm3                        ; m6 = [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 12
+    punpcklbw       xm3, xm0                        ; m3 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0]
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0] - [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+    pmaddubsw       m3, m6, [r5 + 3 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m6, [r5 + 2 * mmsize]
+    paddw           m4, m6
+    movq            xm3, [r0 + r1]                  ; m3 = row 13
+    punpcklbw       xm0, xm3                        ; m0 = [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 14
+    punpcklbw       xm3, xm6                        ; m3 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0] - [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+    pmaddubsw       m0, [r5 + 3 * mmsize]
+    paddw           m4, m0
+%endmacro
+
+%macro PROCESS_LUMA_AVX2_W8_4R 0
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm1, [r0]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r0 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    movq            xm4, [r0 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    movq            xm3, [r0 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    lea             r0, [r0 + r1 * 4]
+    movq            xm0, [r0]                       ; m0 = row 8
+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    pmaddubsw       m3, m4, [r5 + 3 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m3, m4, [r5 + 2 * mmsize]
+    paddw           m2, m3
+    movq            xm3, [r0 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3                        ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    movq            xm6, [r0 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6                        ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    pmaddubsw       m3, m0, [r5 + 3 * mmsize]
+    paddw           m2, m3
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%3_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_8xN 3
+INIT_XMM sse4
+cglobal interp_8tap_vert_%3_%1x%2, 5, 7, 8
+    lea       r5, [3 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifidn %3,ps
+    add       r3d, r3d
+%endif
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffVer]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffVer + r4]
+%endif
+
+ %ifidn %3,pp
+    mova      m3, [pw_512]
+%else
+    mova      m3, [pw_2000]
+%endif
+
+    mov       r4d, %2/4
+    lea       r5, [4 * r1]
+
+.loopH:
+    PROCESS_LUMA_W8_4R
+
+%ifidn %3,pp
+    pmulhrsw  m7, m3
+    pmulhrsw  m6, m3
+    pmulhrsw  m5, m3
+    pmulhrsw  m4, m3
+
+    packuswb  m7, m6
+    packuswb  m5, m4
+
+    movlps    [r2], m7
+    movhps    [r2 + r3], m7
+    lea       r2, [r2 + 2 * r3]
+    movlps    [r2], m5
+    movhps    [r2 + r3], m5
+%else
+    psubw     m7, m3
+    psubw     m6, m3
+    psubw     m5, m3
+    psubw     m4, m3
+
+    movu      [r2], m7
+    movu      [r2 + r3], m6
+    lea       r2, [r2 + 2 * r3]
+    movu      [r2], m5
+    movu      [r2 + r3], m4
+%endif
+
+    sub       r0, r5
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_8xN 3
+INIT_YMM avx2
+cglobal interp_8tap_vert_%3_%1x%2, 4, 7, 8, 0-gprsize
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+    lea             r6, [r1 * 4]
+%ifidn %3,pp
+    mova            m7, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m7, [pw_2000]
+%endif
+    mov             word [rsp], %2 / 8
+
+.loop:
+    PROCESS_LUMA_AVX2_W8_8R
+%ifidn %3,pp
+    pmulhrsw        m5, m7                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m7                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m7                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m7                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    lea             r2, [r2 + r3 * 2]
+    movhps          [r2], xm5
+    movhps          [r2 + r3], xm2
+    lea             r2, [r2 + r3 * 2]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+    lea             r2, [r2 + r3 * 2]
+    movhps          [r2], xm1
+    movhps          [r2 + r3], xm4
+%else
+    psubw           m5, m7                          ; m5 = word: row 0, row 1
+    psubw           m2, m7                          ; m2 = word: row 2, row 3
+    psubw           m1, m7                          ; m1 = word: row 4, row 5
+    psubw           m4, m7                          ; m4 = word: row 6, row 7
+    vextracti128    xm6, m5, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm0, m1, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm6
+    lea             r2, [r2 + r3 * 2]
+    movu            [r2], xm2
+    movu            [r2 + r3], xm3
+    lea             r2, [r2 + r3 * 2]
+    movu            [r2], xm1
+    movu            [r2 + r3], xm0
+    lea             r2, [r2 + r3 * 2]
+    movu            [r2], xm4
+    vextracti128    xm4, m4, 1
+    movu            [r2 + r3], xm4
+%endif
+    lea             r2, [r2 + r3 * 2]
+    sub             r0, r6
+    dec             word [rsp]
+    jnz             .loop
+    RET
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_8x8 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_8x8, 4, 6, 7
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+    PROCESS_LUMA_AVX2_W8_8R
+%ifidn %1,pp
+    mova            m3, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m3, [pw_2000]
+%endif
+    lea             r4, [r3 * 3]
+%ifidn %1,pp
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m3                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m3                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r4], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm1
+    movq            [r2 + r3], xm4
+    movhps          [r2 + r3 * 2], xm1
+    movhps          [r2 + r4], xm4
+%else
+    psubw           m5, m3                          ; m5 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    psubw           m1, m3                          ; m1 = word: row 4, row 5
+    psubw           m4, m3                          ; m4 = word: row 6, row 7
+    vextracti128    xm6, m5, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm0, m1, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm6
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm4
+    vextracti128    xm4, m4, 1
+    movu            [r2 + r4], xm4
+%endif
+    RET
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_8x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_8x4, 4, 6, 7
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+    PROCESS_LUMA_AVX2_W8_4R
+%ifidn %1,pp
+    mova            m3, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m3, [pw_2000]
+%endif
+    lea             r4, [r3 * 3]
+%ifidn %1,pp
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    packuswb        m5, m2
+    vextracti128    xm2, m5, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r4], xm2
+%else
+    psubw           m5, m3                          ; m5 = word: row 0, row 1
+    psubw           m2, m3                          ; m2 = word: row 2, row 3
+    movu            [r2], xm5
+    vextracti128    xm5, m5, 1
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm2
+    vextracti128    xm2, m2, 1
+    movu            [r2 + r4], xm2
+%endif
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_8x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 4, pp
+    FILTER_VER_LUMA_AVX2_8x4 pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_8x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 8, pp
+    FILTER_VER_LUMA_AVX2_8x8 pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_8x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 16, pp
+    FILTER_VER_LUMA_AVX2_8xN 8, 16, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_8x32(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 32, pp
+    FILTER_VER_LUMA_AVX2_8xN 8, 32, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_8x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 4, ps
+    FILTER_VER_LUMA_AVX2_8x4 ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_8x8(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 8, ps
+    FILTER_VER_LUMA_AVX2_8x8 ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_8x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 16, ps
+    FILTER_VER_LUMA_AVX2_8xN 8, 16, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_8x32(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_8xN 8, 32, ps
+    FILTER_VER_LUMA_AVX2_8xN 8, 32, ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%3_12x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_12xN 3
+INIT_XMM sse4
+cglobal interp_8tap_vert_%3_%1x%2, 5, 7, 8
+    lea       r5, [3 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+%ifidn %3,ps
+    add       r3d, r3d
+%endif
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffVer]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffVer + r4]
+%endif
+
+ %ifidn %3,pp
+    mova      m3, [pw_512]
+%else
+    mova      m3, [pw_2000]
+%endif
+
+    mov       r4d, %2/4
+
+.loopH:
+    PROCESS_LUMA_W8_4R
+
+%ifidn %3,pp
+    pmulhrsw  m7, m3
+    pmulhrsw  m6, m3
+    pmulhrsw  m5, m3
+    pmulhrsw  m4, m3
+
+    packuswb  m7, m6
+    packuswb  m5, m4
+
+    movlps    [r2], m7
+    movhps    [r2 + r3], m7
+    lea       r5, [r2 + 2 * r3]
+    movlps    [r5], m5
+    movhps    [r5 + r3], m5
+%else
+    psubw     m7, m3
+    psubw     m6, m3
+    psubw     m5, m3
+    psubw     m4, m3
+
+    movu      [r2], m7
+    movu      [r2 + r3], m6
+    lea       r5, [r2 + 2 * r3]
+    movu      [r5], m5
+    movu      [r5 + r3], m4
+%endif
+
+    lea       r5, [8 * r1 - 8]
+    sub       r0, r5
+%ifidn %3,pp
+    add       r2, 8
+%else
+    add       r2, 16
+%endif
+
+    PROCESS_LUMA_W4_4R
+
+%ifidn %3,pp
+    pmulhrsw  m4, m3
+    pmulhrsw  m5, m3
+
+    packuswb  m4, m5
+
+    movd      [r2], m4
+    pextrd    [r2 + r3], m4, 1
+    lea       r5, [r2 + 2 * r3]
+    pextrd    [r5], m4, 2
+    pextrd    [r5 + r3], m4, 3
+%else
+    psubw     m4, m3
+    psubw     m5, m3
+
+    movlps    [r2], m4
+    movhps    [r2 + r3], m4
+    lea       r5, [r2 + 2 * r3]
+    movlps    [r5], m5
+    movhps    [r5 + r3], m5
+%endif
+
+    lea       r5, [4 * r1 + 8]
+    sub       r0, r5
+%ifidn %3,pp
+    lea       r2, [r2 + 4 * r3 - 8]
+%else
+    lea       r2, [r2 + 4 * r3 - 16]
+%endif
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_pp_12x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_12xN 12, 16, pp
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ps_12x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_12xN 12, 16, ps
+
+%macro FILTER_VER_LUMA_AVX2_12x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_12x16, 4, 7, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    pmaddubsw       m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    pmaddubsw       m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    pmaddubsw       m12, m10, [r5 + 1 * mmsize]
+    paddw           m8, m12
+    pmaddubsw       m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    pmaddubsw       m13, m11, [r5 + 1 * mmsize]
+    paddw           m9, m13
+    pmaddubsw       m11, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movq            [r2], xm0
+    pextrd          [r2 + 8], xm0, 2
+    movq            [r2 + r3], xm1
+    pextrd          [r2 + r3 + 8], xm1, 2
+    movq            [r2 + r3 * 2], xm2
+    pextrd          [r2 + r3 * 2 + 8], xm2, 2
+    movq            [r2 + r6], xm3
+    pextrd          [r2 + r6 + 8], xm3, 2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    pextrd          [r2 + 8], xm4, 2
+    movq            [r2 + r3], xm5
+    pextrd          [r2 + r3 + 8], xm5, 2
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    movq            [r2 + 16], xm0
+    movu            [r2 + r3], xm1
+    vextracti128    xm1, m1, 1
+    movq            [r2 + r3 + 16], xm1
+    movu            [r2 + r3 * 2], xm2
+    vextracti128    xm2, m2, 1
+    movq            [r2 + r3 * 2 + 16], xm2
+    movu            [r2 + r6], xm3
+    vextracti128    xm3, m3, 1
+    movq            [r2 + r6 + 16], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    vextracti128    xm4, m4, 1
+    movq            [r2 + 16], xm4
+    movu            [r2 + r3], xm5
+    vextracti128    xm5, m5, 1
+    movq            [r2 + r3 + 16], xm5
+%endif
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    pmaddubsw       m0, m12, [r5 + 2 * mmsize]
+    paddw           m8, m0
+    pmaddubsw       m0, m12, [r5 + 1 * mmsize]
+    paddw           m10, m0
+    pmaddubsw       m12, [r5]
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+    pmaddubsw       m1, m13, [r5 + 2 * mmsize]
+    paddw           m9, m1
+    pmaddubsw       m1, m13, [r5 + 1 * mmsize]
+    paddw           m11, m1
+    pmaddubsw       m13, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movq            [r2 + r3 * 2], xm6
+    pextrd          [r2 + r3 * 2 + 8], xm6, 2
+    movq            [r2 + r6], xm7
+    pextrd          [r2 + r6 + 8], xm7, 2
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2 + r3 * 2], xm6
+    vextracti128    xm6, m6, 1
+    movq            [r2 + r3 * 2 + 16], xm6
+    movu            [r2 + r6], xm7
+    vextracti128    xm7, m7, 1
+    movq            [r2 + r6 + 16], xm7
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, [r5 + 3 * mmsize]
+    paddw           m8, m2
+    pmaddubsw       m2, m0, [r5 + 2 * mmsize]
+    paddw           m10, m2
+    pmaddubsw       m2, m0, [r5 + 1 * mmsize]
+    paddw           m12, m2
+    pmaddubsw       m0, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 3 * mmsize]
+    paddw           m9, m3
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m11, m3
+    pmaddubsw       m3, m1, [r5 + 1 * mmsize]
+    paddw           m13, m3
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 3 * mmsize]
+    paddw           m10, m4
+    pmaddubsw       m4, m2, [r5 + 2 * mmsize]
+    paddw           m12, m4
+    pmaddubsw       m2, [r5 + 1 * mmsize]
+    paddw           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 3 * mmsize]
+    paddw           m11, m5
+    pmaddubsw       m5, m3, [r5 + 2 * mmsize]
+    paddw           m13, m5
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    movu            xm5, [r0 + r4]                  ; m5 = row 19
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 3 * mmsize]
+    paddw           m12, m6
+    pmaddubsw       m4, [r5 + 2 * mmsize]
+    paddw           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 20
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 3 * mmsize]
+    paddw           m13, m7
+    pmaddubsw       m5, [r5 + 2 * mmsize]
+    paddw           m1, m5
+    movu            xm7, [r0 + r1]                  ; m7 = row 21
+    punpckhbw       xm2, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddubsw       m6, [r5 + 3 * mmsize]
+    paddw           m0, m6
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 22
+    punpckhbw       xm3, xm7, xm2
+    punpcklbw       xm7, xm2
+    vinserti128     m7, m7, xm3, 1
+    pmaddubsw       m7, [r5 + 3 * mmsize]
+    paddw           m1, m7
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m12, m14                        ; m12 = word: row 12
+    pmulhrsw        m13, m14                        ; m13 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m12, m13
+    packuswb        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm8
+    pextrd          [r2 + 8], xm8, 2
+    movq            [r2 + r3], xm9
+    pextrd          [r2 + r3 + 8], xm9, 2
+    movq            [r2 + r3 * 2], xm10
+    pextrd          [r2 + r3 * 2 + 8], xm10, 2
+    movq            [r2 + r6], xm11
+    pextrd          [r2 + r6 + 8], xm11, 2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm12
+    pextrd          [r2 + 8], xm12, 2
+    movq            [r2 + r3], xm13
+    pextrd          [r2 + r3 + 8], xm13, 2
+    movq            [r2 + r3 * 2], xm0
+    pextrd          [r2 + r3 * 2 + 8], xm0, 2
+    movq            [r2 + r6], xm1
+    pextrd          [r2 + r6 + 8], xm1, 2
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m12, m14                        ; m12 = word: row 12
+    psubw           m13, m14                        ; m13 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r2], xm8
+    vextracti128    xm8, m8, 1
+    movq            [r2 + 16], xm8
+    movu            [r2 + r3], xm9
+    vextracti128    xm9, m9, 1
+    movq            [r2 + r3 + 16], xm9
+    movu            [r2 + r3 * 2], xm10
+    vextracti128    xm10, m10, 1
+    movq            [r2 + r3 * 2 + 16], xm10
+    movu            [r2 + r6], xm11
+    vextracti128    xm11, m11, 1
+    movq            [r2 + r6 + 16], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    vextracti128    xm12, m12, 1
+    movq            [r2 + 16], xm12
+    movu            [r2 + r3], xm13
+    vextracti128    xm13, m13, 1
+    movq            [r2 + r3 + 16], xm13
+    movu            [r2 + r3 * 2], xm0
+    vextracti128    xm0, m0, 1
+    movq            [r2 + r3 * 2 + 16], xm0
+    movu            [r2 + r6], xm1
+    vextracti128    xm1, m1, 1
+    movq            [r2 + r6 + 16], xm1
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_12x16 pp
+    FILTER_VER_LUMA_AVX2_12x16 ps
+
+%macro FILTER_VER_LUMA_AVX2_16x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x16, 4, 7, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    pmaddubsw       m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    pmaddubsw       m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    pmaddubsw       m12, m10, [r5 + 1 * mmsize]
+    paddw           m8, m12
+    pmaddubsw       m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    pmaddubsw       m13, m11, [r5 + 1 * mmsize]
+    paddw           m9, m13
+    pmaddubsw       m11, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+%endif
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    pmaddubsw       m0, m12, [r5 + 2 * mmsize]
+    paddw           m8, m0
+    pmaddubsw       m0, m12, [r5 + 1 * mmsize]
+    paddw           m10, m0
+    pmaddubsw       m12, [r5]
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+    pmaddubsw       m1, m13, [r5 + 2 * mmsize]
+    paddw           m9, m1
+    pmaddubsw       m1, m13, [r5 + 1 * mmsize]
+    paddw           m11, m1
+    pmaddubsw       m13, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r6], m7
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, [r5 + 3 * mmsize]
+    paddw           m8, m2
+    pmaddubsw       m2, m0, [r5 + 2 * mmsize]
+    paddw           m10, m2
+    pmaddubsw       m2, m0, [r5 + 1 * mmsize]
+    paddw           m12, m2
+    pmaddubsw       m0, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 3 * mmsize]
+    paddw           m9, m3
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m11, m3
+    pmaddubsw       m3, m1, [r5 + 1 * mmsize]
+    paddw           m13, m3
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 3 * mmsize]
+    paddw           m10, m4
+    pmaddubsw       m4, m2, [r5 + 2 * mmsize]
+    paddw           m12, m4
+    pmaddubsw       m2, [r5 + 1 * mmsize]
+    paddw           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 3 * mmsize]
+    paddw           m11, m5
+    pmaddubsw       m5, m3, [r5 + 2 * mmsize]
+    paddw           m13, m5
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    movu            xm5, [r0 + r4]                  ; m5 = row 19
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 3 * mmsize]
+    paddw           m12, m6
+    pmaddubsw       m4, [r5 + 2 * mmsize]
+    paddw           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 20
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 3 * mmsize]
+    paddw           m13, m7
+    pmaddubsw       m5, [r5 + 2 * mmsize]
+    paddw           m1, m5
+    movu            xm7, [r0 + r1]                  ; m7 = row 21
+    punpckhbw       xm2, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddubsw       m6, [r5 + 3 * mmsize]
+    paddw           m0, m6
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 22
+    punpckhbw       xm3, xm7, xm2
+    punpcklbw       xm7, xm2
+    vinserti128     m7, m7, xm3, 1
+    pmaddubsw       m7, [r5 + 3 * mmsize]
+    paddw           m1, m7
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m12, m14                        ; m12 = word: row 12
+    pmulhrsw        m13, m14                        ; m13 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m12, m13
+    packuswb        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    movu            [r2 + r3], xm13
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r6], xm1
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m12, m14                        ; m12 = word: row 12
+    psubw           m13, m14                        ; m13 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r2], m8
+    movu            [r2 + r3], m9
+    movu            [r2 + r3 * 2], m10
+    movu            [r2 + r6], m11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m12
+    movu            [r2 + r3], m13
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r6], m1
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_16x16 pp
+    FILTER_VER_LUMA_AVX2_16x16 ps
+
+%macro FILTER_VER_LUMA_AVX2_16x12 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x12, 4, 7, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    lea             r6, [r3 * 3]
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    pmaddubsw       m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    pmaddubsw       m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    pmaddubsw       m12, m10, [r5 + 1 * mmsize]
+    paddw           m8, m12
+    pmaddubsw       m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    pmaddubsw       m13, m11, [r5 + 1 * mmsize]
+    paddw           m9, m13
+    pmaddubsw       m11, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+%endif
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    pmaddubsw       m0, m12, [r5 + 2 * mmsize]
+    paddw           m8, m0
+    pmaddubsw       m0, m12, [r5 + 1 * mmsize]
+    paddw           m10, m0
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+    pmaddubsw       m1, m13, [r5 + 2 * mmsize]
+    paddw           m9, m1
+    pmaddubsw       m1, m13, [r5 + 1 * mmsize]
+    paddw           m11, m1
+
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r6], m7
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, [r5 + 3 * mmsize]
+    paddw           m8, m2
+    pmaddubsw       m2, m0, [r5 + 2 * mmsize]
+    paddw           m10, m2
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 3 * mmsize]
+    paddw           m9, m3
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m11, m3
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 3 * mmsize]
+    paddw           m10, m4
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 3 * mmsize]
+    paddw           m11, m5
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    packuswb        m8, m9
+    packuswb        m10, m11
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    movu            [r2], m8
+    movu            [r2 + r3], m9
+    movu            [r2 + r3 * 2], m10
+    movu            [r2 + r6], m11
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_16x12 pp
+    FILTER_VER_LUMA_AVX2_16x12 ps
+
+%macro FILTER_VER_LUMA_AVX2_16x8 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x8, 4, 6, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    lea             r4, [r3 * 3]
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r4], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+%endif
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r4], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r4], m7
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_16x8 pp
+    FILTER_VER_LUMA_AVX2_16x8 ps
+
+%macro FILTER_VER_LUMA_AVX2_16x4 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x4, 4, 6, 13
+    mov             r4d, r4m
+    shl             r4d, 7
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,pp
+    mova            m12, [pw_512]
+%else
+    add             r3d, r3d
+    vbroadcasti128  m12, [pw_2000]
+%endif
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+%ifidn %1,pp
+    pmulhrsw        m0, m12                         ; m0 = word: row 0
+    pmulhrsw        m1, m12                         ; m1 = word: row 1
+    pmulhrsw        m2, m12                         ; m2 = word: row 2
+    pmulhrsw        m3, m12                         ; m3 = word: row 3
+    packuswb        m0, m1
+    packuswb        m2, m3
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    lea             r4, [r3 * 3]
+    movu            [r2 + r4], xm3
+%else
+    psubw           m0, m12                         ; m0 = word: row 0
+    psubw           m1, m12                         ; m1 = word: row 1
+    psubw           m2, m12                         ; m2 = word: row 2
+    psubw           m3, m12                         ; m3 = word: row 3
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    lea             r4, [r3 * 3]
+    movu            [r2 + r4], m3
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_16x4 pp
+    FILTER_VER_LUMA_AVX2_16x4 ps
+%macro FILTER_VER_LUMA_AVX2_16xN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %3,ps
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%else
+    mova            m14, [pw_512]
+%endif
+    lea             r6, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r8d, %2 / 16
+
+.loop:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    pmaddubsw       m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    pmaddubsw       m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    pmaddubsw       m12, m10, [r5 + 1 * mmsize]
+    paddw           m8, m12
+    pmaddubsw       m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    pmaddubsw       m13, m11, [r5 + 1 * mmsize]
+    paddw           m9, m13
+    pmaddubsw       m11, [r5]
+
+%ifidn %3,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m4
+    movu            [r2 + r3], m5
+%endif
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    pmaddubsw       m0, m12, [r5 + 2 * mmsize]
+    paddw           m8, m0
+    pmaddubsw       m0, m12, [r5 + 1 * mmsize]
+    paddw           m10, m0
+    pmaddubsw       m12, [r5]
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+    pmaddubsw       m1, m13, [r5 + 2 * mmsize]
+    paddw           m9, m1
+    pmaddubsw       m1, m13, [r5 + 1 * mmsize]
+    paddw           m11, m1
+    pmaddubsw       m13, [r5]
+
+%ifidn %3,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r2 + r3 * 2], m6
+    movu            [r2 + r6], m7
+%endif
+
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, [r5 + 3 * mmsize]
+    paddw           m8, m2
+    pmaddubsw       m2, m0, [r5 + 2 * mmsize]
+    paddw           m10, m2
+    pmaddubsw       m2, m0, [r5 + 1 * mmsize]
+    paddw           m12, m2
+    pmaddubsw       m0, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 3 * mmsize]
+    paddw           m9, m3
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m11, m3
+    pmaddubsw       m3, m1, [r5 + 1 * mmsize]
+    paddw           m13, m3
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 3 * mmsize]
+    paddw           m10, m4
+    pmaddubsw       m4, m2, [r5 + 2 * mmsize]
+    paddw           m12, m4
+    pmaddubsw       m2, [r5 + 1 * mmsize]
+    paddw           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 3 * mmsize]
+    paddw           m11, m5
+    pmaddubsw       m5, m3, [r5 + 2 * mmsize]
+    paddw           m13, m5
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    movu            xm5, [r0 + r4]                  ; m5 = row 19
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 3 * mmsize]
+    paddw           m12, m6
+    pmaddubsw       m4, [r5 + 2 * mmsize]
+    paddw           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm6, [r0]                       ; m6 = row 20
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 3 * mmsize]
+    paddw           m13, m7
+    pmaddubsw       m5, [r5 + 2 * mmsize]
+    paddw           m1, m5
+    movu            xm7, [r0 + r1]                  ; m7 = row 21
+    punpckhbw       xm2, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddubsw       m6, [r5 + 3 * mmsize]
+    paddw           m0, m6
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 22
+    punpckhbw       xm3, xm7, xm2
+    punpcklbw       xm7, xm2
+    vinserti128     m7, m7, xm3, 1
+    pmaddubsw       m7, [r5 + 3 * mmsize]
+    paddw           m1, m7
+
+%ifidn %3,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m12, m14                        ; m12 = word: row 12
+    pmulhrsw        m13, m14                        ; m13 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m12, m13
+    packuswb        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    movu            [r2 + r3], xm13
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r6], xm1
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m12, m14                        ; m12 = word: row 12
+    psubw           m13, m14                        ; m13 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r2], m8
+    movu            [r2 + r3], m9
+    movu            [r2 + r3 * 2], m10
+    movu            [r2 + r6], m11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], m12
+    movu            [r2 + r3], m13
+    movu            [r2 + r3 * 2], m0
+    movu            [r2 + r6], m1
+%endif
+
+    lea             r2, [r2 + r3 * 4]
+    sub             r0, r7
+    dec             r8d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_16xN 16, 32, pp
+    FILTER_VER_LUMA_AVX2_16xN 16, 64, pp
+    FILTER_VER_LUMA_AVX2_16xN 16, 32, ps
+    FILTER_VER_LUMA_AVX2_16xN 16, 64, ps
+
+%macro PROCESS_LUMA_AVX2_W16_16R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    pmaddubsw       m8, [r5]
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    pmaddubsw       m9, [r5]
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    pmaddubsw       m12, m10, [r5 + 1 * mmsize]
+    paddw           m8, m12
+    pmaddubsw       m10, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm12, [r7]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+    pmaddubsw       m13, m11, [r5 + 1 * mmsize]
+    paddw           m9, m13
+    pmaddubsw       m11, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], m4
+    movu            [r8 + r3], m5
+%endif
+
+    movu            xm13, [r7 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    pmaddubsw       m0, m12, [r5 + 2 * mmsize]
+    paddw           m8, m0
+    pmaddubsw       m0, m12, [r5 + 1 * mmsize]
+    paddw           m10, m0
+    pmaddubsw       m12, [r5]
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+    pmaddubsw       m1, m13, [r5 + 2 * mmsize]
+    paddw           m9, m1
+    pmaddubsw       m1, m13, [r5 + 1 * mmsize]
+    paddw           m11, m1
+    pmaddubsw       m13, [r5]
+
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r8 + r3 * 2], m6
+    movu            [r8 + r6], m7
+%endif
+
+    lea             r8, [r8 + r3 * 4]
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 15
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m2, m0, [r5 + 3 * mmsize]
+    paddw           m8, m2
+    pmaddubsw       m2, m0, [r5 + 2 * mmsize]
+    paddw           m10, m2
+    pmaddubsw       m2, m0, [r5 + 1 * mmsize]
+    paddw           m12, m2
+    pmaddubsw       m0, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 3 * mmsize]
+    paddw           m9, m3
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m11, m3
+    pmaddubsw       m3, m1, [r5 + 1 * mmsize]
+    paddw           m13, m3
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r7 + r1]                  ; m3 = row 17
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 3 * mmsize]
+    paddw           m10, m4
+    pmaddubsw       m4, m2, [r5 + 2 * mmsize]
+    paddw           m12, m4
+    pmaddubsw       m2, [r5 + 1 * mmsize]
+    paddw           m0, m2
+    movu            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 3 * mmsize]
+    paddw           m11, m5
+    pmaddubsw       m5, m3, [r5 + 2 * mmsize]
+    paddw           m13, m5
+    pmaddubsw       m3, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    movu            xm5, [r7 + r4]                  ; m5 = row 19
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 3 * mmsize]
+    paddw           m12, m6
+    pmaddubsw       m4, [r5 + 2 * mmsize]
+    paddw           m0, m4
+    lea             r7, [r7 + r1 * 4]
+    movu            xm6, [r7]                       ; m6 = row 20
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 3 * mmsize]
+    paddw           m13, m7
+    pmaddubsw       m5, [r5 + 2 * mmsize]
+    paddw           m1, m5
+    movu            xm7, [r7 + r1]                  ; m7 = row 21
+    punpckhbw       xm2, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddubsw       m6, [r5 + 3 * mmsize]
+    paddw           m0, m6
+    movu            xm2, [r7 + r1 * 2]              ; m2 = row 22
+    punpckhbw       xm3, xm7, xm2
+    punpcklbw       xm7, xm2
+    vinserti128     m7, m7, xm3, 1
+    pmaddubsw       m7, [r5 + 3 * mmsize]
+    paddw           m1, m7
+
+%ifidn %1,pp
+    pmulhrsw        m8, m14                         ; m8 = word: row 8
+    pmulhrsw        m9, m14                         ; m9 = word: row 9
+    pmulhrsw        m10, m14                        ; m10 = word: row 10
+    pmulhrsw        m11, m14                        ; m11 = word: row 11
+    pmulhrsw        m12, m14                        ; m12 = word: row 12
+    pmulhrsw        m13, m14                        ; m13 = word: row 13
+    pmulhrsw        m0, m14                         ; m0 = word: row 14
+    pmulhrsw        m1, m14                         ; m1 = word: row 15
+    packuswb        m8, m9
+    packuswb        m10, m11
+    packuswb        m12, m13
+    packuswb        m0, m1
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    movu            [r8], xm8
+    movu            [r8 + r3], xm9
+    movu            [r8 + r3 * 2], xm10
+    movu            [r8 + r6], xm11
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm12
+    movu            [r8 + r3], xm13
+    movu            [r8 + r3 * 2], xm0
+    movu            [r8 + r6], xm1
+%else
+    psubw           m8, m14                         ; m8 = word: row 8
+    psubw           m9, m14                         ; m9 = word: row 9
+    psubw           m10, m14                        ; m10 = word: row 10
+    psubw           m11, m14                        ; m11 = word: row 11
+    psubw           m12, m14                        ; m12 = word: row 12
+    psubw           m13, m14                        ; m13 = word: row 13
+    psubw           m0, m14                         ; m0 = word: row 14
+    psubw           m1, m14                         ; m1 = word: row 15
+    movu            [r8], m8
+    movu            [r8 + r3], m9
+    movu            [r8 + r3 * 2], m10
+    movu            [r8 + r6], m11
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], m12
+    movu            [r8 + r3], m13
+    movu            [r8 + r3 * 2], m0
+    movu            [r8 + r6], m1
+%endif
+%endmacro
+
+%macro PROCESS_LUMA_AVX2_W16_8R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhbw       xm2, xm0, xm1
+    punpcklbw       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddubsw       m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhbw       xm3, xm1, xm2
+    punpcklbw       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhbw       xm4, xm2, xm3
+    punpcklbw       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddubsw       m4, m2, [r5 + 1 * mmsize]
+    paddw           m0, m4
+    pmaddubsw       m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhbw       xm5, xm3, xm4
+    punpcklbw       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddubsw       m5, m3, [r5 + 1 * mmsize]
+    paddw           m1, m5
+    pmaddubsw       m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhbw       xm6, xm4, xm5
+    punpcklbw       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddubsw       m6, m4, [r5 + 2 * mmsize]
+    paddw           m0, m6
+    pmaddubsw       m6, m4, [r5 + 1 * mmsize]
+    paddw           m2, m6
+    pmaddubsw       m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhbw       xm7, xm5, xm6
+    punpcklbw       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddubsw       m7, m5, [r5 + 2 * mmsize]
+    paddw           m1, m7
+    pmaddubsw       m7, m5, [r5 + 1 * mmsize]
+    paddw           m3, m7
+    pmaddubsw       m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhbw       xm8, xm6, xm7
+    punpcklbw       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddubsw       m8, m6, [r5 + 3 * mmsize]
+    paddw           m0, m8
+    pmaddubsw       m8, m6, [r5 + 2 * mmsize]
+    paddw           m2, m8
+    pmaddubsw       m8, m6, [r5 + 1 * mmsize]
+    paddw           m4, m8
+    pmaddubsw       m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhbw       xm9, xm7, xm8
+    punpcklbw       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddubsw       m9, m7, [r5 + 3 * mmsize]
+    paddw           m1, m9
+    pmaddubsw       m9, m7, [r5 + 2 * mmsize]
+    paddw           m3, m9
+    pmaddubsw       m9, m7, [r5 + 1 * mmsize]
+    paddw           m5, m9
+    pmaddubsw       m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhbw       xm10, xm8, xm9
+    punpcklbw       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddubsw       m10, m8, [r5 + 3 * mmsize]
+    paddw           m2, m10
+    pmaddubsw       m10, m8, [r5 + 2 * mmsize]
+    paddw           m4, m10
+    pmaddubsw       m10, m8, [r5 + 1 * mmsize]
+    paddw           m6, m10
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhbw       xm11, xm9, xm10
+    punpcklbw       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddubsw       m11, m9, [r5 + 3 * mmsize]
+    paddw           m3, m11
+    pmaddubsw       m11, m9, [r5 + 2 * mmsize]
+    paddw           m5, m11
+    pmaddubsw       m11, m9, [r5 + 1 * mmsize]
+    paddw           m7, m11
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhbw       xm12, xm10, xm11
+    punpcklbw       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddubsw       m12, m10, [r5 + 3 * mmsize]
+    paddw           m4, m12
+    pmaddubsw       m12, m10, [r5 + 2 * mmsize]
+    paddw           m6, m12
+    lea             r7, [r7 + r1 * 4]
+    movu            xm12, [r7]                      ; m12 = row 12
+    punpckhbw       xm13, xm11, xm12
+    punpcklbw       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddubsw       m13, m11, [r5 + 3 * mmsize]
+    paddw           m5, m13
+    pmaddubsw       m13, m11, [r5 + 2 * mmsize]
+    paddw           m7, m13
+
+%ifidn %1,pp
+    pmulhrsw        m0, m14                         ; m0 = word: row 0
+    pmulhrsw        m1, m14                         ; m1 = word: row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2
+    pmulhrsw        m3, m14                         ; m3 = word: row 3
+    pmulhrsw        m4, m14                         ; m4 = word: row 4
+    pmulhrsw        m5, m14                         ; m5 = word: row 5
+    packuswb        m0, m1
+    packuswb        m2, m3
+    packuswb        m4, m5
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+%else
+    psubw           m0, m14                         ; m0 = word: row 0
+    psubw           m1, m14                         ; m1 = word: row 1
+    psubw           m2, m14                         ; m2 = word: row 2
+    psubw           m3, m14                         ; m3 = word: row 3
+    psubw           m4, m14                         ; m4 = word: row 4
+    psubw           m5, m14                         ; m5 = word: row 5
+    movu            [r2], m0
+    movu            [r2 + r3], m1
+    movu            [r2 + r3 * 2], m2
+    movu            [r2 + r6], m3
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], m4
+    movu            [r8 + r3], m5
+%endif
+
+    movu            xm13, [r7 + r1]                 ; m13 = row 13
+    punpckhbw       xm0, xm12, xm13
+    punpcklbw       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddubsw       m0, m12, [r5 + 3 * mmsize]
+    paddw           m6, m0
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhbw       xm1, xm13, xm0
+    punpcklbw       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddubsw       m1, m13, [r5 + 3 * mmsize]
+    paddw           m7, m1
+
+%ifidn %1,pp
+    pmulhrsw        m6, m14                         ; m6 = word: row 6
+    pmulhrsw        m7, m14                         ; m7 = word: row 7
+    packuswb        m6, m7
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%else
+    psubw           m6, m14                         ; m6 = word: row 6
+    psubw           m7, m14                         ; m7 = word: row 7
+    movu            [r8 + r3 * 2], m6
+    movu            [r8 + r6], m7
+%endif
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_24x32 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_24x32, 4, 11, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,ps
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%else
+    mova            m14, [pw_512]
+%endif
+    lea             r6, [r3 * 3]
+    lea             r10, [r1 * 4]
+    mov             r9d, 2
+.loopH:
+    PROCESS_LUMA_AVX2_W16_16R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2
+    movq            xm3, [r0 + r1 * 2]              ; m3 = row 2
+    punpcklbw       xm2, xm3
+    vinserti128     m5, m1, xm2, 1
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r0 + r4]                  ; m4 = row 3
+    punpcklbw       xm3, xm4
+    lea             r7, [r0 + r1 * 4]
+    movq            xm1, [r7]                       ; m1 = row 4
+    punpcklbw       xm4, xm1
+    vinserti128     m2, m3, xm4, 1
+    pmaddubsw       m0, m2, [r5 + 1 * mmsize]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r7 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3
+    movq            xm4, [r7 + r1 * 2]              ; m4 = row 6
+    punpcklbw       xm3, xm4
+    vinserti128     m1, m1, xm3, 1
+    pmaddubsw       m3, m1, [r5 + 2 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m0, m1, [r5 + 1 * mmsize]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r7 + r4]                  ; m3 = row 7
+    punpcklbw       xm4, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm0, [r7]                       ; m0 = row 8
+    punpcklbw       xm3, xm0
+    vinserti128     m4, m4, xm3, 1
+    pmaddubsw       m3, m4, [r5 + 3 * mmsize]
+    paddw           m5, m3
+    pmaddubsw       m3, m4, [r5 + 2 * mmsize]
+    paddw           m2, m3
+    pmaddubsw       m3, m4, [r5 + 1 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r7 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3
+    movq            xm6, [r7 + r1 * 2]              ; m6 = row 10
+    punpcklbw       xm3, xm6
+    vinserti128     m0, m0, xm3, 1
+    pmaddubsw       m3, m0, [r5 + 3 * mmsize]
+    paddw           m2, m3
+    pmaddubsw       m3, m0, [r5 + 2 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m3, m0, [r5 + 1 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m0, [r5]
+
+    movq            xm3, [r7 + r4]                  ; m3 = row 11
+    punpcklbw       xm6, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm7, [r7]                       ; m7 = row 12
+    punpcklbw       xm3, xm7
+    vinserti128     m6, m6, xm3, 1
+    pmaddubsw       m3, m6, [r5 + 3 * mmsize]
+    paddw           m1, m3
+    pmaddubsw       m3, m6, [r5 + 2 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m3, m6, [r5 + 1 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m6, [r5]
+    movq            xm3, [r7 + r1]                  ; m3 = row 13
+    punpcklbw       xm7, xm3
+    movq            xm8, [r7 + r1 * 2]              ; m8 = row 14
+    punpcklbw       xm3, xm8
+    vinserti128     m7, m7, xm3, 1
+    pmaddubsw       m3, m7, [r5 + 3 * mmsize]
+    paddw           m4, m3
+    pmaddubsw       m3, m7, [r5 + 2 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m3, m7, [r5 + 1 * mmsize]
+    paddw           m6, m3
+    pmaddubsw       m7, [r5]
+    movq            xm3, [r7 + r4]                  ; m3 = row 15
+    punpcklbw       xm8, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm9, [r7]                       ; m9 = row 16
+    punpcklbw       xm3, xm9
+    vinserti128     m8, m8, xm3, 1
+    pmaddubsw       m3, m8, [r5 + 3 * mmsize]
+    paddw           m0, m3
+    pmaddubsw       m3, m8, [r5 + 2 * mmsize]
+    paddw           m6, m3
+    pmaddubsw       m3, m8, [r5 + 1 * mmsize]
+    paddw           m7, m3
+    pmaddubsw       m8, [r5]
+    movq            xm3, [r7 + r1]                  ; m3 = row 17
+    punpcklbw       xm9, xm3
+    movq            xm10, [r7 + r1 * 2]             ; m10 = row 18
+    punpcklbw       xm3, xm10
+    vinserti128     m9, m9, xm3, 1
+    pmaddubsw       m3, m9, [r5 + 3 * mmsize]
+    paddw           m6, m3
+    pmaddubsw       m3, m9, [r5 + 2 * mmsize]
+    paddw           m7, m3
+    pmaddubsw       m3, m9, [r5 + 1 * mmsize]
+    paddw           m8, m3
+    movq            xm3, [r7 + r4]                  ; m3 = row 19
+    punpcklbw       xm10, xm3
+    lea             r7, [r7 + r1 * 4]
+    movq            xm9, [r7]                       ; m9 = row 20
+    punpcklbw       xm3, xm9
+    vinserti128     m10, m10, xm3, 1
+    pmaddubsw       m3, m10, [r5 + 3 * mmsize]
+    paddw           m7, m3
+    pmaddubsw       m3, m10, [r5 + 2 * mmsize]
+    paddw           m8, m3
+    movq            xm3, [r7 + r1]                  ; m3 = row 21
+    punpcklbw       xm9, xm3
+    movq            xm10, [r7 + r1 * 2]             ; m10 = row 22
+    punpcklbw       xm3, xm10
+    vinserti128     m9, m9, xm3, 1
+    pmaddubsw       m3, m9, [r5 + 3 * mmsize]
+    paddw           m8, m3
+%ifidn %1,pp
+    pmulhrsw        m5, m14                         ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m14                         ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m14                         ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m14                         ; m4 = word: row 6, row 7
+    pmulhrsw        m0, m14                         ; m0 = word: row 8, row 9
+    pmulhrsw        m6, m14                         ; m6 = word: row 10, row 11
+    pmulhrsw        m7, m14                         ; m7 = word: row 12, row 13
+    pmulhrsw        m8, m14                         ; m8 = word: row 14, row 15
+    packuswb        m5, m2
+    packuswb        m1, m4
+    packuswb        m0, m6
+    packuswb        m7, m8
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    vextracti128    xm6, m0, 1
+    vextracti128    xm8, m7, 1
+    movq            [r2], xm5
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm2
+    lea             r8, [r2 + r3 * 4]
+    movq            [r8], xm1
+    movq            [r8 + r3], xm4
+    movhps          [r8 + r3 * 2], xm1
+    movhps          [r8 + r6], xm4
+    lea             r8, [r8 + r3 * 4]
+    movq            [r8], xm0
+    movq            [r8 + r3], xm6
+    movhps          [r8 + r3 * 2], xm0
+    movhps          [r8 + r6], xm6
+    lea             r8, [r8 + r3 * 4]
+    movq            [r8], xm7
+    movq            [r8 + r3], xm8
+    movhps          [r8 + r3 * 2], xm7
+    movhps          [r8 + r6], xm8
+%else
+    psubw           m5, m14                         ; m5 = word: row 0, row 1
+    psubw           m2, m14                         ; m2 = word: row 2, row 3
+    psubw           m1, m14                         ; m1 = word: row 4, row 5
+    psubw           m4, m14                         ; m4 = word: row 6, row 7
+    psubw           m0, m14                         ; m0 = word: row 8, row 9
+    psubw           m6, m14                         ; m6 = word: row 10, row 11
+    psubw           m7, m14                         ; m7 = word: row 12, row 13
+    psubw           m8, m14                         ; m8 = word: row 14, row 15
+    vextracti128    xm3, m5, 1
+    movu            [r2], xm5
+    movu            [r2 + r3], xm3
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    vextracti128    xm3, m1, 1
+    lea             r8, [r2 + r3 * 4]
+    movu            [r8], xm1
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m4, 1
+    movu            [r8 + r3 * 2], xm4
+    movu            [r8 + r6], xm3
+    vextracti128    xm3, m0, 1
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm0
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m6, 1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm3
+    vextracti128    xm3, m7, 1
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm7
+    movu            [r8 + r3], xm3
+    vextracti128    xm3, m8, 1
+    movu            [r8 + r3 * 2], xm8
+    movu            [r8 + r6], xm3
+%endif
+    sub             r7, r10
+    lea             r0, [r7 - 16]
+%ifidn %1,pp
+    lea             r2, [r8 + r3 * 4 - 16]
+%else
+    lea             r2, [r8 + r3 * 4 - 32]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_24x32 pp
+    FILTER_VER_LUMA_AVX2_24x32 ps
+
+%macro FILTER_VER_LUMA_AVX2_32xN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 12, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %3,ps
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%else
+    mova            m14, [pw_512]
+%endif
+    lea             r6, [r3 * 3]
+    lea             r11, [r1 * 4]
+    mov             r9d, %2 / 16
+.loopH:
+    mov             r10d, %1 / 16
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %3
+%ifidn %3,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    sub             r7, r11
+    lea             r0, [r7 - 16]
+%ifidn %3,pp
+    lea             r2, [r8 + r3 * 4 - 16]
+%else
+    lea             r2, [r8 + r3 * 4 - 32]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_32xN 32, 32, pp
+    FILTER_VER_LUMA_AVX2_32xN 32, 64, pp
+    FILTER_VER_LUMA_AVX2_32xN 32, 32, ps
+    FILTER_VER_LUMA_AVX2_32xN 32, 64, ps
+
+%macro FILTER_VER_LUMA_AVX2_32x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_32x16, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,ps
+    add             r3d, r3d
+    vbroadcasti128  m14, [pw_2000]
+%else
+    mova            m14, [pw_512]
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, 2
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_32x16 pp
+    FILTER_VER_LUMA_AVX2_32x16 ps
+
+%macro FILTER_VER_LUMA_AVX2_32x24 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_32x24, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,ps
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    mov             r9d, 2
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    lea             r9, [r1 * 4]
+    sub             r7, r9
+    lea             r0, [r7 - 16]
+%ifidn %1,pp
+    lea             r2, [r8 + r3 * 4 - 16]
+%else
+    lea             r2, [r8 + r3 * 4 - 32]
+%endif
+    mov             r9d, 2
+.loop:
+    PROCESS_LUMA_AVX2_W16_8R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_32x24 pp
+    FILTER_VER_LUMA_AVX2_32x24 ps
+
+%macro FILTER_VER_LUMA_AVX2_32x8 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_32x8, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,ps
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    vbroadcasti128  m14, [pw_2000]
+%endif
+    mov             r9d, 2
+.loopW:
+    PROCESS_LUMA_AVX2_W16_8R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_32x8 pp
+    FILTER_VER_LUMA_AVX2_32x8 ps
+
+%macro FILTER_VER_LUMA_AVX2_48x64 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_48x64, 4, 12, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,ps
+    add             r3d, r3d
+%endif
+
+    lea             r6, [r3 * 3]
+    lea             r11, [r1 * 4]
+
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    vbroadcasti128  m14, [pw_2000]
+%endif
+
+    mov             r9d, 4
+.loopH:
+    mov             r10d, 3
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    sub             r7, r11
+    lea             r0, [r7 - 32]
+%ifidn %1,pp
+    lea             r2, [r8 + r3 * 4 - 32]
+%else
+    lea             r2, [r8 + r3 * 4 - 64]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_48x64 pp
+    FILTER_VER_LUMA_AVX2_48x64 ps
+
+%macro FILTER_VER_LUMA_AVX2_64xN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 12, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %3,ps
+    add             r3d, r3d
+%endif
+
+    lea             r6, [r3 * 3]
+    lea             r11, [r1 * 4]
+
+%ifidn %3,pp
+    mova            m14, [pw_512]
+%else
+    vbroadcasti128  m14, [pw_2000]
+%endif
+
+    mov             r9d, %2 / 16
+.loopH:
+    mov             r10d, %1 / 16
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %3
+%ifidn %3,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    sub             r7, r11
+    lea             r0, [r7 - 48]
+%ifidn %3,pp
+    lea             r2, [r8 + r3 * 4 - 48]
+%else
+    lea             r2, [r8 + r3 * 4 - 96]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_64xN 64, 32, pp
+    FILTER_VER_LUMA_AVX2_64xN 64, 48, pp
+    FILTER_VER_LUMA_AVX2_64xN 64, 64, pp
+    FILTER_VER_LUMA_AVX2_64xN 64, 32, ps
+    FILTER_VER_LUMA_AVX2_64xN 64, 48, ps
+    FILTER_VER_LUMA_AVX2_64xN 64, 64, ps
+
+%macro FILTER_VER_LUMA_AVX2_64x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_64x16, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,ps
+    add             r3d, r3d
+%endif
+
+    lea             r6, [r3 * 3]
+
+%ifidn %1,pp
+    mova            m14, [pw_512]
+%else
+    vbroadcasti128  m14, [pw_2000]
+%endif
+
+    mov             r9d, 4
+.loopW:
+    PROCESS_LUMA_AVX2_W16_16R %1
+%ifidn %1,pp
+    add             r2, 16
+%else
+    add             r2, 32
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_64x16 pp
+    FILTER_VER_LUMA_AVX2_64x16 ps
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_%3_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA 3
+INIT_XMM sse4
+cglobal interp_8tap_vert_%3_%1x%2, 5, 7, 8 ,0-gprsize
+    lea       r5, [3 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+%ifidn %3,ps
+    add       r3d, r3d
+%endif
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffVer]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffVer + r4]
+%endif
+
+%ifidn %3,pp
+    mova      m3, [pw_512]
+%else
+    mova      m3, [pw_2000]
+%endif
+    mov       dword [rsp], %2/4
+
+.loopH:
+    mov       r4d, (%1/8)
+.loopW:
+    PROCESS_LUMA_W8_4R
+%ifidn %3,pp
+    pmulhrsw  m7, m3
+    pmulhrsw  m6, m3
+    pmulhrsw  m5, m3
+    pmulhrsw  m4, m3
+
+    packuswb  m7, m6
+    packuswb  m5, m4
+
+    movlps    [r2], m7
+    movhps    [r2 + r3], m7
+    lea       r5, [r2 + 2 * r3]
+    movlps    [r5], m5
+    movhps    [r5 + r3], m5
+%else
+    psubw     m7, m3
+    psubw     m6, m3
+    psubw     m5, m3
+    psubw     m4, m3
+
+    movu      [r2], m7
+    movu      [r2 + r3], m6
+    lea       r5, [r2 + 2 * r3]
+    movu      [r5], m5
+    movu      [r5 + r3], m4
+%endif
+
+    lea       r5, [8 * r1 - 8]
+    sub       r0, r5
+%ifidn %3,pp
+    add       r2, 8
+%else
+    add       r2, 16
+%endif
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - %1]
+%ifidn %3,pp
+    lea       r2, [r2 + 4 * r3 - %1]
+%else
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+%endif
+
+    dec       dword [rsp]
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_LUMA 16, 4, pp
+    FILTER_VER_LUMA 16, 8, pp
+    FILTER_VER_LUMA 16, 12, pp
+    FILTER_VER_LUMA 16, 16, pp
+    FILTER_VER_LUMA 16, 32, pp
+    FILTER_VER_LUMA 16, 64, pp
+    FILTER_VER_LUMA 24, 32, pp
+    FILTER_VER_LUMA 32, 8, pp
+    FILTER_VER_LUMA 32, 16, pp
+    FILTER_VER_LUMA 32, 24, pp
+    FILTER_VER_LUMA 32, 32, pp
+    FILTER_VER_LUMA 32, 64, pp
+    FILTER_VER_LUMA 48, 64, pp
+    FILTER_VER_LUMA 64, 16, pp
+    FILTER_VER_LUMA 64, 32, pp
+    FILTER_VER_LUMA 64, 48, pp
+    FILTER_VER_LUMA 64, 64, pp
+
+    FILTER_VER_LUMA 16, 4, ps
+    FILTER_VER_LUMA 16, 8, ps
+    FILTER_VER_LUMA 16, 12, ps
+    FILTER_VER_LUMA 16, 16, ps
+    FILTER_VER_LUMA 16, 32, ps
+    FILTER_VER_LUMA 16, 64, ps
+    FILTER_VER_LUMA 24, 32, ps
+    FILTER_VER_LUMA 32, 8, ps
+    FILTER_VER_LUMA 32, 16, ps
+    FILTER_VER_LUMA 32, 24, ps
+    FILTER_VER_LUMA 32, 32, ps
+    FILTER_VER_LUMA 32, 64, ps
+    FILTER_VER_LUMA 48, 64, ps
+    FILTER_VER_LUMA 64, 16, ps
+    FILTER_VER_LUMA 64, 32, ps
+    FILTER_VER_LUMA 64, 48, ps
+    FILTER_VER_LUMA 64, 64, ps
+
+%macro PROCESS_LUMA_SP_W4_4R 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *16]            ;m2=[2+3]  Row3
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *16]            ;m3=[3+4]  Row4
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m6, m4, [r6 + 1 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5]  Row3
+    pmaddwd    m4, [r6 + 2 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m6, m5, [r6 + 1 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6]  Row4
+    pmaddwd    m5, [r6 + 2 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[6 7]
+    pmaddwd    m6, m4, [r6 + 2 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5+6+7]  Row3
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5+6+7]  Row1 end
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[7 8]
+    pmaddwd    m6, m5, [r6 + 2 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6+7+8]  Row4
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6+7+8]  Row2 end
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[8 9]
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m2, m4                          ;m2=[2+3+4+5+6+7+8+9]  Row3 end
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[9 10]
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m3, m5                          ;m3=[3+4+5+6+7+8+9+10]  Row4 end
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_SP 2
+INIT_XMM sse4
+cglobal interp_8tap_vert_sp_%1x%2, 5, 7, 8 ,0-gprsize
+
+    add       r1d, r1d
+    lea       r5, [r1 + 2 * r1]
+    sub       r0, r5
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_LumaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mova      m7, [pd_526336]
+
+    mov       dword [rsp], %2/4
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_LUMA_SP_W4_4R
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, 12
+    psrad     m1, 12
+    psrad     m2, 12
+    psrad     m3, 12
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    packuswb  m0, m2
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r5, [r2 + 2 * r3]
+    pextrd    [r5], m0, 2
+    pextrd    [r5 + r3], m0, 3
+
+    lea       r5, [8 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+
+    RET
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+    FILTER_VER_LUMA_SP 4, 4
+    FILTER_VER_LUMA_SP 8, 8
+    FILTER_VER_LUMA_SP 8, 4
+    FILTER_VER_LUMA_SP 4, 8
+    FILTER_VER_LUMA_SP 16, 16
+    FILTER_VER_LUMA_SP 16, 8
+    FILTER_VER_LUMA_SP 8, 16
+    FILTER_VER_LUMA_SP 16, 12
+    FILTER_VER_LUMA_SP 12, 16
+    FILTER_VER_LUMA_SP 16, 4
+    FILTER_VER_LUMA_SP 4, 16
+    FILTER_VER_LUMA_SP 32, 32
+    FILTER_VER_LUMA_SP 32, 16
+    FILTER_VER_LUMA_SP 16, 32
+    FILTER_VER_LUMA_SP 32, 24
+    FILTER_VER_LUMA_SP 24, 32
+    FILTER_VER_LUMA_SP 32, 8
+    FILTER_VER_LUMA_SP 8, 32
+    FILTER_VER_LUMA_SP 64, 64
+    FILTER_VER_LUMA_SP 64, 32
+    FILTER_VER_LUMA_SP 32, 64
+    FILTER_VER_LUMA_SP 64, 48
+    FILTER_VER_LUMA_SP 48, 64
+    FILTER_VER_LUMA_SP 64, 16
+    FILTER_VER_LUMA_SP 16, 64
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal filterPixelToShort_4x2, 3, 4, 3
+    mov         r3d, r3m
+    add         r3d, r3d
+
+    ; load constant
+    mova        m1, [pb_128]
+    mova        m2, [tab_c_64_n64]
+
+    movd        m0, [r0]
+    pinsrd      m0, [r0 + r1], 1
+    punpcklbw   m0, m1
+    pmaddubsw   m0, m2
+
+    movq        [r2 + r3 * 0], m0
+    movhps      [r2 + r3 * 1], m0
+
+    RET
+
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal filterPixelToShort_8x2, 3, 4, 3
+    mov         r3d, r3m
+    add         r3d, r3d
+
+    ; load constant
+    mova        m1, [pb_128]
+    mova        m2, [tab_c_64_n64]
+
+    movh        m0, [r0]
+    punpcklbw   m0, m1
+    pmaddubsw   m0, m2
+    movu        [r2 + r3 * 0], m0
+
+    movh        m0, [r0 + r1]
+    punpcklbw   m0, m1
+    pmaddubsw   m0, m2
+    movu        [r2 + r3 * 1], m0
+
+    RET
+
+%macro PROCESS_CHROMA_SP_W4_4R 0
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *16]                ;m0=[0+1]         Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *16]                ;m1=[1+2]         Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *16]            ;m2=[2+3]         Row3
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3]     Row1 done
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *16]            ;m3=[3+4]         Row4
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]   Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m2, m4                          ;m2=[2+3+4+5]     Row3
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m3, m5                          ;m3=[3+4+5+6]     Row4
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SP 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_sp_%1x%2, 5, 7, 7 ,0-gprsize
+
+    add       r1d, r1d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mova      m6, [pd_526336]
+
+    mov       dword [rsp], %2/4
+
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_CHROMA_SP_W4_4R
+
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+
+    psrad     m0, 12
+    psrad     m1, 12
+    psrad     m2, 12
+    psrad     m3, 12
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    packuswb  m0, m2
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r5, [r2 + 2 * r3]
+    pextrd    [r5], m0, 2
+    pextrd    [r5 + r3], m0, 3
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SP 4, 4
+    FILTER_VER_CHROMA_SP 4, 8
+    FILTER_VER_CHROMA_SP 16, 16
+    FILTER_VER_CHROMA_SP 16, 8
+    FILTER_VER_CHROMA_SP 16, 12
+    FILTER_VER_CHROMA_SP 12, 16
+    FILTER_VER_CHROMA_SP 16, 4
+    FILTER_VER_CHROMA_SP 4, 16
+    FILTER_VER_CHROMA_SP 32, 32
+    FILTER_VER_CHROMA_SP 32, 16
+    FILTER_VER_CHROMA_SP 16, 32
+    FILTER_VER_CHROMA_SP 32, 24
+    FILTER_VER_CHROMA_SP 24, 32
+    FILTER_VER_CHROMA_SP 32, 8
+
+    FILTER_VER_CHROMA_SP 16, 24
+    FILTER_VER_CHROMA_SP 16, 64
+    FILTER_VER_CHROMA_SP 12, 32
+    FILTER_VER_CHROMA_SP 4, 32
+    FILTER_VER_CHROMA_SP 32, 64
+    FILTER_VER_CHROMA_SP 32, 48
+    FILTER_VER_CHROMA_SP 24, 64
+
+    FILTER_VER_CHROMA_SP 64, 64
+    FILTER_VER_CHROMA_SP 64, 32
+    FILTER_VER_CHROMA_SP 64, 48
+    FILTER_VER_CHROMA_SP 48, 64
+    FILTER_VER_CHROMA_SP 64, 16
+
+
+%macro PROCESS_CHROMA_SP_W2_4R 1
+    movd       m0, [r0]
+    movd       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+
+    lea        r0, [r0 + 2 * r1]
+    movd       m2, [r0]
+    punpcklwd  m1, m2                          ;m1=[1 2]
+    punpcklqdq m0, m1                          ;m0=[0 1 1 2]
+    pmaddwd    m0, [%1 + 0 *16]                ;m0=[0+1 1+2] Row 1-2
+
+    movd       m1, [r0 + r1]
+    punpcklwd  m2, m1                          ;m2=[2 3]
+
+    lea        r0, [r0 + 2 * r1]
+    movd       m3, [r0]
+    punpcklwd  m1, m3                          ;m2=[3 4]
+    punpcklqdq m2, m1                          ;m2=[2 3 3 4]
+
+    pmaddwd    m4, m2, [%1 + 1 * 16]           ;m4=[2+3 3+4] Row 1-2
+    pmaddwd    m2, [%1 + 0 * 16]               ;m2=[2+3 3+4] Row 3-4
+    paddd      m0, m4                          ;m0=[0+1+2+3 1+2+3+4] Row 1-2
+
+    movd       m1, [r0 + r1]
+    punpcklwd  m3, m1                          ;m3=[4 5]
+
+    movd       m4, [r0 + 2 * r1]
+    punpcklwd  m1, m4                          ;m1=[5 6]
+    punpcklqdq m3, m1                          ;m2=[4 5 5 6]
+    pmaddwd    m3, [%1 + 1 * 16]               ;m3=[4+5 5+6] Row 3-4
+    paddd      m2, m3                          ;m2=[2+3+4+5 3+4+5+6] Row 3-4
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_sp_%1x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SP_W2_4R 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_sp_%1x%2, 5, 6, 6
+
+    add       r1d, r1d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mova      m5, [pd_526336]
+
+    mov       r4d, (%2/4)
+
+.loopH:
+    PROCESS_CHROMA_SP_W2_4R r5
+
+    paddd     m0, m5
+    paddd     m2, m5
+
+    psrad     m0, 12
+    psrad     m2, 12
+
+    packssdw  m0, m2
+    packuswb  m0, m0
+
+    pextrw    [r2], m0, 0
+    pextrw    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrw    [r2], m0, 2
+    pextrw    [r2 + r3], m0, 3
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SP_W2_4R 2, 4
+    FILTER_VER_CHROMA_SP_W2_4R 2, 8
+
+    FILTER_VER_CHROMA_SP_W2_4R 2, 16
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_sp_4x2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_sp_4x2, 5, 6, 5
+
+    add        r1d, r1d
+    sub        r0, r1
+    shl        r4d, 5
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeffV]
+    lea        r5, [r5 + r4]
+%else
+    lea        r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mova       m4, [pd_526336]
+
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r5 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m2, [r0]
+    punpcklwd  m1, m2                          ;m1=[1 2]
+    pmaddwd    m1, [r5 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m3, [r0 + r1]
+    punpcklwd  m2, m3                          ;m4=[2 3]
+    pmaddwd    m2, [r5 + 1 * 16]
+    paddd      m0, m2                          ;m0=[0+1+2+3]  Row1 done
+    paddd      m0, m4
+    psrad      m0, 12
+
+    movq       m2, [r0 + 2 * r1]
+    punpcklwd  m3, m2                          ;m5=[3 4]
+    pmaddwd    m3, [r5 + 1 * 16]
+    paddd      m1, m3                          ;m1 = [1+2+3+4]  Row2 done
+    paddd      m1, m4
+    psrad      m1, 12
+
+    packssdw   m0, m1
+    packuswb   m0, m0
+
+    movd       [r2], m0
+    pextrd     [r2 + r3], m0, 1
+
+    RET
+
+;-------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_sp_6x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SP_W6_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_sp_6x%2, 5, 7, 7
+
+    add       r1d, r1d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mova      m6, [pd_526336]
+
+    mov       r4d, %2/4
+
+.loopH:
+    PROCESS_CHROMA_SP_W4_4R
+
+    paddd     m0, m6
+    paddd     m1, m6
+    paddd     m2, m6
+    paddd     m3, m6
+
+    psrad     m0, 12
+    psrad     m1, 12
+    psrad     m2, 12
+    psrad     m3, 12
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    packuswb  m0, m2
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r5, [r2 + 2 * r3]
+    pextrd    [r5], m0, 2
+    pextrd    [r5 + r3], m0, 3
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 4
+
+    PROCESS_CHROMA_SP_W2_4R r6
+
+    paddd     m0, m6
+    paddd     m2, m6
+
+    psrad     m0, 12
+    psrad     m2, 12
+
+    packssdw  m0, m2
+    packuswb  m0, m0
+
+    pextrw    [r2], m0, 0
+    pextrw    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrw    [r2], m0, 2
+    pextrw    [r2 + r3], m0, 3
+
+    sub       r0, 2 * 4
+    lea       r2, [r2 + 2 * r3 - 4]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SP_W6_H4 6, 8
+
+    FILTER_VER_CHROMA_SP_W6_H4 6, 16
+
+%macro PROCESS_CHROMA_SP_W8_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * 16]                ;m0 = [0l+1l]  Row1l
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * 16]                ;m1 = [0h+1h]  Row1h
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * 16]                ;m2 = [1l+2l]  Row2l
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * 16]                ;m3 = [1h+2h]  Row2h
+
+    lea        r0, [r0 + 2 * r1]
+    movu       m5, [r0 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * 16]                ;m6 = [2l+3l]  Row1l
+    paddd      m0, m6                           ;m0 = [0l+1l+2l+3l]  Row1l sum
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * 16]                ;m6 = [2h+3h]  Row1h
+    paddd      m1, m4                           ;m1 = [0h+1h+2h+3h]  Row1h sum
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * 16]                ;m6 = [3l+4l]  Row2l
+    paddd      m2, m6                           ;m2 = [1l+2l+3l+4l]  Row2l sum
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * 16]                ;m1 = [3h+4h]  Row2h
+    paddd      m3, m5                           ;m3 = [1h+2h+3h+4h]  Row2h sum
+%endmacro
+
+;--------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_sp_8x%2(int16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;--------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SP_W8_H2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_sp_%1x%2, 5, 6, 8
+
+    add       r1d, r1d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mova      m7, [pd_526336]
+
+    mov       r4d, %2/2
+.loopH:
+    PROCESS_CHROMA_SP_W8_2R
+
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+
+    psrad     m0, 12
+    psrad     m1, 12
+    psrad     m2, 12
+    psrad     m3, 12
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    packuswb  m0, m2
+
+    movlps    [r2], m0
+    movhps    [r2 + r3], m0
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec r4d
+    jnz .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SP_W8_H2 8, 2
+    FILTER_VER_CHROMA_SP_W8_H2 8, 4
+    FILTER_VER_CHROMA_SP_W8_H2 8, 6
+    FILTER_VER_CHROMA_SP_W8_H2 8, 8
+    FILTER_VER_CHROMA_SP_W8_H2 8, 16
+    FILTER_VER_CHROMA_SP_W8_H2 8, 32
+
+    FILTER_VER_CHROMA_SP_W8_H2 8, 12
+    FILTER_VER_CHROMA_SP_W8_H2 8, 64
+
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_2x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro FILTER_HORIZ_CHROMA_2xN 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 4, src, srcstride, dst, dststride
+%define coef2  m3
+%define Tm0    m2
+%define t1     m1
+%define t0     m0
+
+    dec        srcq
+    mov        r4d, r4m
+    add        dststrided, dststrided
+
+%ifdef PIC
+    lea        r6, [tab_ChromaCoeff]
+    movd       coef2, [r6 + r4 * 4]
+%else
+    movd       coef2, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd     coef2, coef2, 0
+    mova       t1, [pw_2000]
+    mova       Tm0, [tab_Tm]
+
+    mov        r4d, %2
+    cmp        r5m, byte 0
+    je         .loopH
+    sub        srcq, srcstrideq
+    add        r4d, 3
+
+.loopH:
+    movh       t0, [srcq]
+    pshufb     t0, t0, Tm0
+    pmaddubsw  t0, coef2
+    phaddw     t0, t0
+    psubw      t0, t1
+    movd       [dstq], t0
+
+    lea        srcq, [srcq + srcstrideq]
+    lea        dstq, [dstq + dststrideq]
+
+    dec        r4d
+    jnz        .loopH
+
+    RET
+%endmacro
+
+    FILTER_HORIZ_CHROMA_2xN 2, 4
+    FILTER_HORIZ_CHROMA_2xN 2, 8
+
+    FILTER_HORIZ_CHROMA_2xN 2, 16
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_4x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro FILTER_HORIZ_CHROMA_4xN 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 4, src, srcstride, dst, dststride
+%define coef2  m3
+%define Tm0    m2
+%define t1     m1
+%define t0     m0
+
+    dec        srcq
+    mov        r4d, r4m
+    add        dststrided, dststrided
+
+%ifdef PIC
+    lea        r6, [tab_ChromaCoeff]
+    movd       coef2, [r6 + r4 * 4]
+%else
+    movd       coef2, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd     coef2, coef2, 0
+    mova       t1, [pw_2000]
+    mova       Tm0, [tab_Tm]
+
+    mov        r4d, %2
+    cmp        r5m, byte 0
+    je         .loopH
+    sub        srcq, srcstrideq
+    add        r4d, 3
+
+.loopH:
+    movh       t0, [srcq]
+    pshufb     t0, t0, Tm0
+    pmaddubsw  t0, coef2
+    phaddw     t0, t0
+    psubw      t0, t1
+    movlps     [dstq], t0
+
+    lea        srcq, [srcq + srcstrideq]
+    lea        dstq, [dstq + dststrideq]
+
+    dec        r4d
+    jnz        .loopH
+    RET
+%endmacro
+
+    FILTER_HORIZ_CHROMA_4xN 4, 2
+    FILTER_HORIZ_CHROMA_4xN 4, 4
+    FILTER_HORIZ_CHROMA_4xN 4, 8
+    FILTER_HORIZ_CHROMA_4xN 4, 16
+
+    FILTER_HORIZ_CHROMA_4xN 4, 32
+
+%macro PROCESS_CHROMA_W6 3
+    movu       %1, [srcq]
+    pshufb     %2, %1, Tm0
+    pmaddubsw  %2, coef2
+    pshufb     %1, %1, Tm1
+    pmaddubsw  %1, coef2
+    phaddw     %2, %1
+    psubw      %2, %3
+    movh       [dstq], %2
+    pshufd     %2, %2, 2
+    movd       [dstq + 8], %2
+%endmacro
+
+%macro PROCESS_CHROMA_W12 3
+    movu       %1, [srcq]
+    pshufb     %2, %1, Tm0
+    pmaddubsw  %2, coef2
+    pshufb     %1, %1, Tm1
+    pmaddubsw  %1, coef2
+    phaddw     %2, %1
+    psubw      %2, %3
+    movu       [dstq], %2
+    movu       %1, [srcq + 8]
+    pshufb     %1, %1, Tm0
+    pmaddubsw  %1, coef2
+    phaddw     %1, %1
+    psubw      %1, %3
+    movh       [dstq + 16], %1
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_6x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro FILTER_HORIZ_CHROMA 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 6, src, srcstride, dst, dststride
+%define coef2    m5
+%define Tm0      m4
+%define Tm1      m3
+%define t2       m2
+%define t1       m1
+%define t0       m0
+
+    dec     srcq
+    mov     r4d, r4m
+    add     dststrided, dststrided
+
+%ifdef PIC
+    lea     r6, [tab_ChromaCoeff]
+    movd    coef2, [r6 + r4 * 4]
+%else
+    movd    coef2, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd  coef2, coef2, 0
+    mova    t2, [pw_2000]
+    mova    Tm0, [tab_Tm]
+    mova    Tm1, [tab_Tm + 16]
+
+    mov     r4d, %2
+    cmp     r5m, byte 0
+    je      .loopH
+    sub     srcq, srcstrideq
+    add     r4d, 3
+
+.loopH:
+    PROCESS_CHROMA_W%1  t0, t1, t2
+    add     srcq, srcstrideq
+    add     dstq, dststrideq
+
+    dec     r4d
+    jnz     .loopH
+
+    RET
+%endmacro
+
+    FILTER_HORIZ_CHROMA 6, 8
+    FILTER_HORIZ_CHROMA 12, 16
+
+    FILTER_HORIZ_CHROMA 6, 16
+    FILTER_HORIZ_CHROMA 12, 32
+
+%macro PROCESS_CHROMA_W8 3
+    movu        %1, [srcq]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    psubw       %2, %3
+    movu        [dstq], %2
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_8x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro FILTER_HORIZ_CHROMA_8xN 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 6, src, srcstride, dst, dststride
+%define coef2    m5
+%define Tm0      m4
+%define Tm1      m3
+%define t2       m2
+%define t1       m1
+%define t0       m0
+
+    dec     srcq
+    mov     r4d, r4m
+    add     dststrided, dststrided
+
+%ifdef PIC
+    lea     r6, [tab_ChromaCoeff]
+    movd    coef2, [r6 + r4 * 4]
+%else
+    movd    coef2, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd  coef2, coef2, 0
+    mova    t2, [pw_2000]
+    mova    Tm0, [tab_Tm]
+    mova    Tm1, [tab_Tm + 16]
+
+    mov     r4d, %2
+    cmp     r5m, byte 0
+    je      .loopH
+    sub     srcq, srcstrideq
+    add     r4d, 3
+
+.loopH:
+    PROCESS_CHROMA_W8  t0, t1, t2
+    add     srcq, srcstrideq
+    add     dstq, dststrideq
+
+    dec     r4d
+    jnz     .loopH
+
+    RET
+%endmacro
+
+    FILTER_HORIZ_CHROMA_8xN 8, 2
+    FILTER_HORIZ_CHROMA_8xN 8, 4
+    FILTER_HORIZ_CHROMA_8xN 8, 6
+    FILTER_HORIZ_CHROMA_8xN 8, 8
+    FILTER_HORIZ_CHROMA_8xN 8, 16
+    FILTER_HORIZ_CHROMA_8xN 8, 32
+
+    FILTER_HORIZ_CHROMA_8xN 8, 12
+    FILTER_HORIZ_CHROMA_8xN 8, 64
+
+%macro PROCESS_CHROMA_W16 4
+    movu        %1, [srcq]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    psubw       %2, %3
+    psubw       %4, %3
+    movu        [dstq], %2
+    movu        [dstq + 16], %4
+%endmacro
+
+%macro PROCESS_CHROMA_W24 4
+    movu        %1, [srcq]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    psubw       %2, %3
+    psubw       %4, %3
+    movu        [dstq], %2
+    movu        [dstq + 16], %4
+    movu        %1, [srcq + 16]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    psubw       %2, %3
+    movu        [dstq + 32], %2
+%endmacro
+
+%macro PROCESS_CHROMA_W32 4
+    movu        %1, [srcq]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    psubw       %2, %3
+    psubw       %4, %3
+    movu        [dstq], %2
+    movu        [dstq + 16], %4
+    movu        %1, [srcq + 16]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + 24]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    psubw       %2, %3
+    psubw       %4, %3
+    movu        [dstq + 32], %2
+    movu        [dstq + 48], %4
+%endmacro
+
+%macro PROCESS_CHROMA_W16o 5
+    movu        %1, [srcq + %5]
+    pshufb      %2, %1, Tm0
+    pmaddubsw   %2, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %2, %1
+    movu        %1, [srcq + %5 + 8]
+    pshufb      %4, %1, Tm0
+    pmaddubsw   %4, coef2
+    pshufb      %1, %1, Tm1
+    pmaddubsw   %1, coef2
+    phaddw      %4, %1
+    psubw       %2, %3
+    psubw       %4, %3
+    movu        [dstq + %5 * 2], %2
+    movu        [dstq + %5 * 2 + 16], %4
+%endmacro
+
+%macro PROCESS_CHROMA_W48 4
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 0
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 16
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 32
+%endmacro
+
+%macro PROCESS_CHROMA_W64 4
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 0
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 16
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 32
+    PROCESS_CHROMA_W16o %1, %2, %3, %4, 48
+%endmacro
+
+;------------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_%1x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;------------------------------------------------------------------------------------------------------------------------------
+%macro FILTER_HORIZ_CHROMA_WxN 2
+INIT_XMM sse4
+cglobal interp_4tap_horiz_ps_%1x%2, 4, 7, 7, src, srcstride, dst, dststride
+%define coef2    m6
+%define Tm0      m5
+%define Tm1      m4
+%define t3       m3
+%define t2       m2
+%define t1       m1
+%define t0       m0
+
+    dec     srcq
+    mov     r4d, r4m
+    add     dststrided, dststrided
+
+%ifdef PIC
+    lea     r6, [tab_ChromaCoeff]
+    movd    coef2, [r6 + r4 * 4]
+%else
+    movd    coef2, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufd  coef2, coef2, 0
+    mova    t2, [pw_2000]
+    mova    Tm0, [tab_Tm]
+    mova    Tm1, [tab_Tm + 16]
+
+    mov     r4d, %2
+    cmp     r5m, byte 0
+    je      .loopH
+    sub     srcq, srcstrideq
+    add     r4d, 3
+
+.loopH:
+    PROCESS_CHROMA_W%1   t0, t1, t2, t3
+    add     srcq, srcstrideq
+    add     dstq, dststrideq
+
+    dec     r4d
+    jnz     .loopH
+
+    RET
+%endmacro
+
+    FILTER_HORIZ_CHROMA_WxN 16, 4
+    FILTER_HORIZ_CHROMA_WxN 16, 8
+    FILTER_HORIZ_CHROMA_WxN 16, 12
+    FILTER_HORIZ_CHROMA_WxN 16, 16
+    FILTER_HORIZ_CHROMA_WxN 16, 32
+    FILTER_HORIZ_CHROMA_WxN 24, 32
+    FILTER_HORIZ_CHROMA_WxN 32,  8
+    FILTER_HORIZ_CHROMA_WxN 32, 16
+    FILTER_HORIZ_CHROMA_WxN 32, 24
+    FILTER_HORIZ_CHROMA_WxN 32, 32
+
+    FILTER_HORIZ_CHROMA_WxN 16, 24
+    FILTER_HORIZ_CHROMA_WxN 16, 64
+    FILTER_HORIZ_CHROMA_WxN 24, 64
+    FILTER_HORIZ_CHROMA_WxN 32, 48
+    FILTER_HORIZ_CHROMA_WxN 32, 64
+
+    FILTER_HORIZ_CHROMA_WxN 64, 64
+    FILTER_HORIZ_CHROMA_WxN 64, 32
+    FILTER_HORIZ_CHROMA_WxN 64, 48
+    FILTER_HORIZ_CHROMA_WxN 48, 64
+    FILTER_HORIZ_CHROMA_WxN 64, 16
+
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_%1x%2(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W16n 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_%1x%2, 4, 7, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m1, m0, [tab_Vm]
+    pshufb     m0, [tab_Vm + 16]
+    mov        r4d, %2/2
+
+.loop:
+
+    mov         r6d,       %1/16
+
+.loopW:
+
+    movu       m2, [r0]
+    movu       m3, [r0 + r1]
+
+    punpcklbw  m4, m2, m3
+    punpckhbw  m2, m3
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m2, m1
+
+    lea        r5, [r0 + 2 * r1]
+    movu       m5, [r5]
+    movu       m7, [r5 + r1]
+
+    punpcklbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m4, m6
+
+    punpckhbw  m6, m5, m7
+    pmaddubsw  m6, m0
+    paddw      m2, m6
+
+    mova       m6, [pw_2000]
+
+    psubw      m4, m6
+    psubw      m2, m6
+
+    movu       [r2], m4
+    movu       [r2 + 16], m2
+
+    punpcklbw  m4, m3, m5
+    punpckhbw  m3, m5
+
+    pmaddubsw  m4, m1
+    pmaddubsw  m3, m1
+
+    movu       m5, [r5 + 2 * r1]
+
+    punpcklbw  m2, m7, m5
+    punpckhbw  m7, m5
+
+    pmaddubsw  m2, m0
+    pmaddubsw  m7, m0
+
+    paddw      m4, m2
+    paddw      m3, m7
+
+    psubw      m4, m6
+    psubw      m3, m6
+
+    movu       [r2 + r3], m4
+    movu       [r2 + r3 + 16], m3
+
+    add         r0,        16
+    add         r2,        32
+    dec         r6d
+    jnz         .loopW
+
+    lea         r0,        [r0 + r1 * 2 - %1]
+    lea         r2,        [r2 + r3 * 2 - %1 * 2]
+
+    dec        r4d
+    jnz        .loop
+    RET
+%endmacro
+
+    FILTER_V_PS_W16n 64, 64
+    FILTER_V_PS_W16n 64, 32
+    FILTER_V_PS_W16n 64, 48
+    FILTER_V_PS_W16n 48, 64
+    FILTER_V_PS_W16n 64, 16
+
+
+;------------------------------------------------------------------------------------------------------------
+;void interp_4tap_vert_ps_2x4(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_2x4, 4, 6, 7
+
+    mov         r4d, r4m
+    sub         r0, r1
+    add         r3d, r3d
+
+%ifdef PIC
+    lea         r5, [tab_ChromaCoeff]
+    movd        m0, [r5 + r4 * 4]
+%else
+    movd        m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb      m0, [tab_Cm]
+
+    lea         r5, [3 * r1]
+
+    movd        m2, [r0]
+    movd        m3, [r0 + r1]
+    movd        m4, [r0 + 2 * r1]
+    movd        m5, [r0 + r5]
+
+    punpcklbw   m2, m3
+    punpcklbw   m6, m4, m5
+    punpcklbw   m2, m6
+
+    pmaddubsw   m2, m0
+
+    lea         r0, [r0 + 4 * r1]
+    movd        m6, [r0]
+
+    punpcklbw   m3, m4
+    punpcklbw   m1, m5, m6
+    punpcklbw   m3, m1
+
+    pmaddubsw   m3, m0
+    phaddw      m2, m3
+
+    mova        m1, [pw_2000]
+
+    psubw       m2, m1
+
+    movd        [r2], m2
+    pextrd      [r2 + r3], m2, 2
+
+    movd        m2, [r0 + r1]
+
+    punpcklbw   m4, m5
+    punpcklbw   m3, m6, m2
+    punpcklbw   m4, m3
+
+    pmaddubsw   m4, m0
+
+    movd        m3, [r0 + 2 * r1]
+
+    punpcklbw   m5, m6
+    punpcklbw   m2, m3
+    punpcklbw   m5, m2
+
+    pmaddubsw   m5, m0
+    phaddw      m4, m5
+    psubw       m4, m1
+
+    lea         r2, [r2 + 2 * r3]
+    movd        [r2], m4
+    pextrd      [r2 + r3], m4, 2
+
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ps_2x8(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------
+%macro FILTER_V_PS_W2 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ps_2x%2, 4, 6, 8
+
+    mov        r4d, r4m
+    sub        r0, r1
+    add        r3d, r3d
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeff]
+    movd       m0, [r5 + r4 * 4]
+%else
+    movd       m0, [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    pshufb     m0, [tab_Cm]
+
+    mova       m1, [pw_2000]
+    lea        r5, [3 * r1]
+    mov        r4d, %2/4
+.loop:
+    movd       m2, [r0]
+    movd       m3, [r0 + r1]
+    movd       m4, [r0 + 2 * r1]
+    movd       m5, [r0 + r5]
+
+    punpcklbw  m2, m3
+    punpcklbw  m6, m4, m5
+    punpcklbw  m2, m6
+
+    pmaddubsw  m2, m0
+
+    lea        r0, [r0 + 4 * r1]
+    movd       m6, [r0]
+
+    punpcklbw  m3, m4
+    punpcklbw  m7, m5, m6
+    punpcklbw  m3, m7
+
+    pmaddubsw  m3, m0
+
+    phaddw     m2, m3
+    psubw      m2, m1
+
+
+    movd       [r2], m2
+    pshufd     m2, m2, 2
+    movd       [r2 + r3], m2
+
+    movd       m2, [r0 + r1]
+
+    punpcklbw  m4, m5
+    punpcklbw  m3, m6, m2
+    punpcklbw  m4, m3
+
+    pmaddubsw  m4, m0
+
+    movd       m3, [r0 + 2 * r1]
+
+    punpcklbw  m5, m6
+    punpcklbw  m2, m3
+    punpcklbw  m5, m2
+
+    pmaddubsw  m5, m0
+
+    phaddw     m4, m5
+
+    psubw      m4, m1
+
+    lea        r2, [r2 + 2 * r3]
+    movd       [r2], m4
+    pshufd     m4 , m4 ,2
+    movd       [r2 + r3], m4
+
+    lea        r2, [r2 + 2 * r3]
+
+    dec        r4d
+    jnz        .loop
+
+    RET
+%endmacro
+
+    FILTER_V_PS_W2 2, 8
+
+    FILTER_V_PS_W2 2, 16
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ss_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SS 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_ss_%1x%2, 5, 7, 6 ,0-gprsize
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       dword [rsp], %2/4
+
+.loopH:
+    mov       r4d, (%1/4)
+.loopW:
+    PROCESS_CHROMA_SP_W4_4R
+
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    movlps    [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movlps    [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    dec       r4d
+    jnz       .loopW
+
+    lea       r0, [r0 + 4 * r1 - 2 * %1]
+    lea       r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec       dword [rsp]
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SS 4, 4
+    FILTER_VER_CHROMA_SS 4, 8
+    FILTER_VER_CHROMA_SS 16, 16
+    FILTER_VER_CHROMA_SS 16, 8
+    FILTER_VER_CHROMA_SS 16, 12
+    FILTER_VER_CHROMA_SS 12, 16
+    FILTER_VER_CHROMA_SS 16, 4
+    FILTER_VER_CHROMA_SS 4, 16
+    FILTER_VER_CHROMA_SS 32, 32
+    FILTER_VER_CHROMA_SS 32, 16
+    FILTER_VER_CHROMA_SS 16, 32
+    FILTER_VER_CHROMA_SS 32, 24
+    FILTER_VER_CHROMA_SS 24, 32
+    FILTER_VER_CHROMA_SS 32, 8
+
+    FILTER_VER_CHROMA_SS 16, 24
+    FILTER_VER_CHROMA_SS 12, 32
+    FILTER_VER_CHROMA_SS 4, 32
+    FILTER_VER_CHROMA_SS 32, 64
+    FILTER_VER_CHROMA_SS 16, 64
+    FILTER_VER_CHROMA_SS 32, 48
+    FILTER_VER_CHROMA_SS 24, 64
+
+    FILTER_VER_CHROMA_SS 64, 64
+    FILTER_VER_CHROMA_SS 64, 32
+    FILTER_VER_CHROMA_SS 64, 48
+    FILTER_VER_CHROMA_SS 48, 64
+    FILTER_VER_CHROMA_SS 64, 16
+
+%macro FILTER_VER_CHROMA_S_AVX2_4x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x4, 4, 6, 7
+    mov             r4d, r4m
+    add             r1d, r1d
+    shl             r4d, 6
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m6, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+
+%ifidn %1,sp
+    paddd           m0, m6
+    paddd           m2, m6
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+    vextracti128    xm2, m0, 1
+    lea             r4, [r3 * 3]
+
+%ifidn %1,sp
+    packuswb        xm0, xm2
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 2
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r4], xm0, 3
+%else
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_4x4 sp
+    FILTER_VER_CHROMA_S_AVX2_4x4 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_4x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x8, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [A 9 9 8]
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+    lea             r4, [r3 * 3]
+
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m2, m7
+    paddd           m4, m7
+    paddd           m1, m7
+    psrad           m0, 12
+    psrad           m2, 12
+    psrad           m4, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+    psrad           m4, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m2
+    packssdw        m4, m1
+%ifidn %1,sp
+    packuswb        m0, m4
+    vextracti128    xm2, m0, 1
+    movd            [r2], xm0
+    movd            [r2 + r3], xm2
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r4], xm2, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm2, 2
+    pextrd          [r2 + r3 * 2], xm0, 3
+    pextrd          [r2 + r4], xm2, 3
+%else
+    vextracti128    xm2, m0, 1
+    vextracti128    xm1, m4, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r4], xm1
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_4x8 sp
+    FILTER_VER_CHROMA_S_AVX2_4x8 ss
+
+%macro PROCESS_CHROMA_AVX2_W4_16R 1
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [10 9 9 8]
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    paddd           m1, m3
+    pmaddwd         m6, [r5]
+
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m2, m7
+    paddd           m4, m7
+    paddd           m1, m7
+    psrad           m4, 12
+    psrad           m1, 12
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+    psrad           m4, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m2
+    packssdw        m4, m1
+%ifidn %1,sp
+    packuswb        m0, m4
+    vextracti128    xm4, m0, 1
+    movd            [r2], xm0
+    movd            [r2 + r3], xm4
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r6], xm4, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm4, 2
+    pextrd          [r2 + r3 * 2], xm0, 3
+    pextrd          [r2 + r6], xm4, 3
+%else
+    vextracti128    xm2, m0, 1
+    vextracti128    xm1, m4, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+%endif
+
+    movq            xm2, [r0 + r4]
+    punpcklwd       xm5, xm2
+    lea             r0, [r0 + 4 * r1]
+    movq            xm0, [r0]
+    punpcklwd       xm2, xm0
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [12 11 11 10]
+    pmaddwd         m2, m5, [r5 + 1 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m5, [r5]
+    movq            xm2, [r0 + r1]
+    punpcklwd       xm0, xm2
+    movq            xm3, [r0 + 2 * r1]
+    punpcklwd       xm2, xm3
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [14 13 13 12]
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m0, [r5]
+    movq            xm4, [r0 + r4]
+    punpcklwd       xm3, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm1, [r0]
+    punpcklwd       xm4, xm1
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [16 15 15 14]
+    pmaddwd         m4, m3, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m3, [r5]
+    movq            xm4, [r0 + r1]
+    punpcklwd       xm1, xm4
+    movq            xm2, [r0 + 2 * r1]
+    punpcklwd       xm4, xm2
+    vinserti128     m1, m1, xm4, 1                  ; m1 = [18 17 17 16]
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m3, m1
+
+%ifidn %1,sp
+    paddd           m6, m7
+    paddd           m5, m7
+    paddd           m0, m7
+    paddd           m3, m7
+    psrad           m6, 12
+    psrad           m5, 12
+    psrad           m0, 12
+    psrad           m3, 12
+%else
+    psrad           m6, 6
+    psrad           m5, 6
+    psrad           m0, 6
+    psrad           m3, 6
+%endif
+    packssdw        m6, m5
+    packssdw        m0, m3
+    lea             r2, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m6, m0
+    vextracti128    xm0, m6, 1
+    movd            [r2], xm6
+    movd            [r2 + r3], xm0
+    pextrd          [r2 + r3 * 2], xm6, 1
+    pextrd          [r2 + r6], xm0, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm6, 2
+    pextrd          [r2 + r3], xm0, 2
+    pextrd          [r2 + r3 * 2], xm6, 3
+    pextrd          [r2 + r6], xm0, 3
+%else
+    vextracti128    xm5, m6, 1
+    vextracti128    xm3, m0, 1
+    movq            [r2], xm6
+    movq            [r2 + r3], xm5
+    movhps          [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm5
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm3
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm3
+%endif
+%endmacro
+
+%macro FILTER_VER_CHROMA_S_AVX2_4x16 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x16, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_CHROMA_AVX2_W4_16R %1
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_4x16 sp
+    FILTER_VER_CHROMA_S_AVX2_4x16 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_4x32 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x32, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%rep 2
+    PROCESS_CHROMA_AVX2_W4_16R %1
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_4x32 sp
+    FILTER_VER_CHROMA_S_AVX2_4x32 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_4x2 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_4x2, 4, 6, 6
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m5, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    movq            xm4, [r0 + 4 * r1]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+%ifidn %1,sp
+    paddd           m0, m5
+    psrad           m0, 12
+%else
+    psrad           m0, 6
+%endif
+    vextracti128    xm1, m0, 1
+    packssdw        xm0, xm1
+%ifidn %1,sp
+    packuswb        xm0, xm0
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+%else
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_4x2 sp
+    FILTER_VER_CHROMA_S_AVX2_4x2 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_2x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x4, 4, 6, 6
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m5, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    movd            xm0, [r0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movd            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    punpcklqdq      xm0, xm1                        ; m0 = [2 1 1 0]
+    movd            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movd            xm4, [r0]
+    punpcklwd       xm3, xm4
+    punpcklqdq      xm2, xm3                        ; m2 = [4 3 3 2]
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [4 3 3 2 2 1 1 0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm4, xm1
+    movd            xm3, [r0 + r1 * 2]
+    punpcklwd       xm1, xm3
+    punpcklqdq      xm4, xm1                        ; m4 = [6 5 5 4]
+    vinserti128     m2, m2, xm4, 1                  ; m2 = [6 5 5 4 4 3 3 2]
+    pmaddwd         m0, [r5]
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+%ifidn %1,sp
+    paddd           m0, m5
+    psrad           m0, 12
+%else
+    psrad           m0, 6
+%endif
+    vextracti128    xm1, m0, 1
+    packssdw        xm0, xm1
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        xm0, xm0
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + 2 * r3], xm0, 2
+    pextrw          [r2 + r4], xm0, 3
+%else
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    pextrd          [r2 + 2 * r3], xm0, 2
+    pextrd          [r2 + r4], xm0, 3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_2x4 sp
+    FILTER_VER_CHROMA_S_AVX2_2x4 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_8x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x8, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm3, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm3, 1
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m3
+
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+%endif
+    lea             r2, [r2 + r3 * 4]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m5, m7
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r0 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m6, m0
+    movu            xm5, [r0 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm0, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm0, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m1, m2
+
+%ifidn %1,sp
+    paddd           m6, m7
+    paddd           m1, m7
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r4], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm5, m4, 1
+    vextracti128    xm1, m6, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r4], xm1
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8x8 sp
+    FILTER_VER_CHROMA_S_AVX2_8x8 ss
+
+%macro PROCESS_CHROMA_S_AVX2_W8_16R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+%ifidn %1,sp
+    paddd           m0, m9
+    paddd           m1, m9
+    paddd           m2, m9
+    paddd           m3, m9
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm0, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm0, 1
+    pmaddwd         m0, m7, [r5 + 1 * mmsize]
+    paddd           m5, m0
+    pmaddwd         m7, [r5]
+    movu            xm0, [r7 + r1]                  ; m0 = row 9
+    punpckhwd       xm1, xm8, xm0
+    punpcklwd       xm8, xm0
+    vinserti128     m8, m8, xm1, 1
+    pmaddwd         m1, m8, [r5 + 1 * mmsize]
+    paddd           m6, m1
+    pmaddwd         m8, [r5]
+    movu            xm1, [r7 + r1 * 2]              ; m1 = row 10
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m7, m2
+    pmaddwd         m0, [r5]
+%ifidn %1,sp
+    paddd           m4, m9
+    paddd           m5, m9
+    psrad           m4, 12
+    psrad           m5, 12
+    paddd           m6, m9
+    paddd           m7, m9
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m4, m5
+    packssdw        m6, m7
+    lea             r8, [r2 + r3 * 4]
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endif
+
+    movu            xm2, [r7 + r4]                  ; m2 = row 11
+    punpckhwd       xm4, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm4, 1
+    pmaddwd         m4, m1, [r5 + 1 * mmsize]
+    paddd           m8, m4
+    pmaddwd         m1, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 12
+    punpckhwd       xm5, xm2, xm4
+    punpcklwd       xm2, xm4
+    vinserti128     m2, m2, xm5, 1
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m2, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 13
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m1, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 14
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m2, m7
+    pmaddwd         m5, [r5]
+%ifidn %1,sp
+    paddd           m8, m9
+    paddd           m0, m9
+    paddd           m1, m9
+    paddd           m2, m9
+    psrad           m8, 12
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+%else
+    psrad           m8, 6
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+%endif
+    packssdw        m8, m0
+    packssdw        m1, m2
+    lea             r8, [r8 + r3 * 4]
+%ifidn %1,sp
+    packuswb        m8, m1
+    vpermd          m8, m3, m8
+    vextracti128    xm1, m8, 1
+    movq            [r8], xm8
+    movhps          [r8 + r3], xm8
+    movq            [r8 + r3 * 2], xm1
+    movhps          [r8 + r6], xm1
+%else
+    vpermq          m8, m8, 11011000b
+    vpermq          m1, m1, 11011000b
+    vextracti128    xm0, m8, 1
+    vextracti128    xm2, m1, 1
+    movu            [r8], xm8
+    movu            [r8 + r3], xm0
+    movu            [r8 + r3 * 2], xm1
+    movu            [r8 + r6], xm2
+%endif
+    lea             r8, [r8 + r3 * 4]
+
+    movu            xm7, [r7 + r4]                  ; m7 = row 15
+    punpckhwd       xm2, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m2, m6, [r5 + 1 * mmsize]
+    paddd           m4, m2
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhwd       xm1, xm7, xm2
+    punpcklwd       xm7, xm2
+    vinserti128     m7, m7, xm1, 1
+    pmaddwd         m1, m7, [r5 + 1 * mmsize]
+    paddd           m5, m1
+    pmaddwd         m7, [r5]
+    movu            xm1, [r7 + r1]                  ; m1 = row 17
+    punpckhwd       xm0, xm2, xm1
+    punpcklwd       xm2, xm1
+    vinserti128     m2, m2, xm0, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m6, m2
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 18
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m7, m1
+
+%ifidn %1,sp
+    paddd           m4, m9
+    paddd           m5, m9
+    paddd           m6, m9
+    paddd           m7, m9
+    psrad           m4, 12
+    psrad           m5, 12
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m4, m5
+    packssdw        m6, m7
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endif
+%endmacro
+
+%macro FILTER_VER_CHROMA_S_AVX2_Nx16 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_%2x16, 4, 10, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m9, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_CHROMA_S_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_Nx16 sp, 16
+    FILTER_VER_CHROMA_S_AVX2_Nx16 sp, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx16 sp, 64
+    FILTER_VER_CHROMA_S_AVX2_Nx16 ss, 16
+    FILTER_VER_CHROMA_S_AVX2_Nx16 ss, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx16 ss, 64
+
+%macro FILTER_VER_CHROMA_S_AVX2_NxN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%3_%1x%2, 4, 11, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %3,sp
+    mova            m9, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 16
+.loopH:
+    mov             r10d, %1 / 8
+.loopW:
+    PROCESS_CHROMA_S_AVX2_W8_16R %3
+%ifidn %3,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    lea             r0, [r7 - 2 * %1 + 16]
+%ifidn %3,sp
+    lea             r2, [r8 + r3 * 4 - %1 + 8]
+%else
+    lea             r2, [r8 + r3 * 4 - 2 * %1 + 16]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_NxN 16, 32, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 24, 32, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 32, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 16, 32, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 24, 32, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 32, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 16, 64, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 24, 64, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 64, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 48, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 48, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 16, 64, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 24, 64, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 32, 64, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 64, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 32, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 48, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 48, 64, sp
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 64, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 32, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 64, 48, ss
+    FILTER_VER_CHROMA_S_AVX2_NxN 48, 64, ss
+
+%macro PROCESS_CHROMA_S_AVX2_W8_4R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m3, m5
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+%endif
+%endmacro
+
+%macro FILTER_VER_CHROMA_S_AVX2_8x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x4, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    PROCESS_CHROMA_S_AVX2_W8_4R %1
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+%else
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8x4 sp
+    FILTER_VER_CHROMA_S_AVX2_8x4 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_12x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_12x16, 4, 9, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m9, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_CHROMA_S_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    mova            m7, m9
+    PROCESS_CHROMA_AVX2_W4_16R %1
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_12x16 sp
+    FILTER_VER_CHROMA_S_AVX2_12x16 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_12x32 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_12x32, 4, 9, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1, sp
+    mova            m9, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%rep 2
+    PROCESS_CHROMA_S_AVX2_W8_16R %1
+%ifidn %1, sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    mova            m7, m9
+    PROCESS_CHROMA_AVX2_W4_16R %1
+    sub             r0, 16
+%ifidn %1, sp
+    lea             r2, [r2 + r3 * 4 - 8]
+%else
+    lea             r2, [r2 + r3 * 4 - 16]
+%endif
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_12x32 sp
+    FILTER_VER_CHROMA_S_AVX2_12x32 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_16x12 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_16x12, 4, 9, 9
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m8, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%rep 2
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m1, m8
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m3, m8
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+    lea             r8, [r2 + r3 * 4]
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 7
+    punpckhwd       xm0, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm0, 1
+    pmaddwd         m0, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m0
+    lea             r7, [r7 + r1 * 4]
+    movu            xm0, [r7]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m8
+    paddd           m5, m8
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r7 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m5, m0, [r5 + 1 * mmsize]
+    paddd           m6, m5
+    pmaddwd         m0, [r5]
+    movu            xm5, [r7 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm7, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 1 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m2, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m8
+    paddd           m1, m8
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m4, 1
+    vextracti128    xm1, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm7
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm1
+%endif
+    lea             r8, [r8 + r3 * 4]
+
+    movu            xm7, [r7 + r4]                  ; m7 = row 11
+    punpckhwd       xm1, xm5, xm7
+    punpcklwd       xm5, xm7
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    paddd           m0, m1
+    pmaddwd         m5, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm1, [r7]                       ; m1 = row 12
+    punpckhwd       xm4, xm7, xm1
+    punpcklwd       xm7, xm1
+    vinserti128     m7, m7, xm4, 1
+    pmaddwd         m4, m7, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    pmaddwd         m7, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m2, m8
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+
+    movu            xm4, [r7 + r1]                  ; m4 = row 13
+    punpckhwd       xm2, xm1, xm4
+    punpcklwd       xm1, xm4
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m5, m1
+    movu            xm2, [r7 + r1 * 2]              ; m2 = row 14
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m7, m4
+%ifidn %1,sp
+    paddd           m5, m8
+    paddd           m7, m8
+    psrad           m5, 12
+    psrad           m7, 12
+%else
+    psrad           m5, 6
+    psrad           m7, 6
+%endif
+    packssdw        m5, m7
+%ifidn %1,sp
+    packuswb        m0, m5
+    vpermd          m0, m3, m0
+    vextracti128    xm5, m0, 1
+    movq            [r8], xm0
+    movhps          [r8 + r3], xm0
+    movq            [r8 + r3 * 2], xm5
+    movhps          [r8 + r6], xm5
+    add             r2, 8
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm7, m0, 1
+    vextracti128    xm6, m5, 1
+    movu            [r8], xm0
+    movu            [r8 + r3], xm7
+    movu            [r8 + r3 * 2], xm5
+    movu            [r8 + r6], xm6
+    add             r2, 16
+%endif
+    add             r0, 16
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_16x12 sp
+    FILTER_VER_CHROMA_S_AVX2_16x12 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_8x12 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x12, 4, 7, 9
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m8, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m1, m8
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m3, m8
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm0, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm0, 1
+    pmaddwd         m0, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m0
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m8
+    paddd           m5, m8
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r0 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m5, m0, [r5 + 1 * mmsize]
+    paddd           m6, m5
+    pmaddwd         m0, [r5]
+    movu            xm5, [r0 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm7, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 1 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m2, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m8
+    paddd           m1, m8
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m4, 1
+    vextracti128    xm1, m6, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm1
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm7, [r0 + r4]                  ; m7 = row 11
+    punpckhwd       xm1, xm5, xm7
+    punpcklwd       xm5, xm7
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    paddd           m0, m1
+    pmaddwd         m5, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm1, [r0]                       ; m1 = row 12
+    punpckhwd       xm4, xm7, xm1
+    punpcklwd       xm7, xm1
+    vinserti128     m7, m7, xm4, 1
+    pmaddwd         m4, m7, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    pmaddwd         m7, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m2, m8
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+
+    movu            xm4, [r0 + r1]                  ; m4 = row 13
+    punpckhwd       xm2, xm1, xm4
+    punpcklwd       xm1, xm4
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m5, m1
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 14
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m7, m4
+%ifidn %1,sp
+    paddd           m5, m8
+    paddd           m7, m8
+    psrad           m5, 12
+    psrad           m7, 12
+%else
+    psrad           m5, 6
+    psrad           m7, 6
+%endif
+    packssdw        m5, m7
+%ifidn %1,sp
+    packuswb        m0, m5
+    vpermd          m0, m3, m0
+    vextracti128    xm5, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm5
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm7, m0, 1
+    vextracti128    xm6, m5, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm5
+    movu            [r2 + r6], xm6
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8x12 sp
+    FILTER_VER_CHROMA_S_AVX2_8x12 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_16x4 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_16x4, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+%rep 2
+    PROCESS_CHROMA_S_AVX2_W8_4R %1
+    lea             r6, [r3 * 3]
+%ifidn %1,sp
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+    add             r2, 8
+%else
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    add             r2, 16
+%endif
+    lea             r6, [4 * r1 - 16]
+    sub             r0, r6
+%endrep
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_16x4 sp
+    FILTER_VER_CHROMA_S_AVX2_16x4 ss
+
+%macro PROCESS_CHROMA_S_AVX2_W8_8R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+    lea             r8, [r2 + r3 * 4]
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 7
+    punpckhwd       xm0, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm0, 1
+    pmaddwd         m0, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m0
+    lea             r7, [r7 + r1 * 4]
+    movu            xm0, [r7]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m5, m7
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r7 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m6, m0
+    movu            xm5, [r7 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm0, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm0, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m1, m2
+
+%ifidn %1,sp
+    paddd           m6, m7
+    paddd           m1, m7
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m4, 1
+    vextracti128    xm1, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm7
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm1
+%endif
+%endmacro
+
+%macro FILTER_VER_CHROMA_S_AVX2_Nx8 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_%2x8, 4, 9, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%rep %2 / 8
+    PROCESS_CHROMA_S_AVX2_W8_8R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_Nx8 sp, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx8 sp, 16
+    FILTER_VER_CHROMA_S_AVX2_Nx8 ss, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx8 ss, 16
+
+%macro FILTER_VER_CHROMA_S_AVX2_8x2 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x2, 4, 6, 6
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m5, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movu            xm4, [r0 + r1 * 4]              ; m4 = row 4
+    punpckhwd       xm2, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddwd         m3, [r5 + 1 * mmsize]
+    paddd           m1, m3
+%ifidn %1,sp
+    paddd           m0, m5
+    paddd           m1, m5
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+%ifidn %1,sp
+    vextracti128    xm1, m0, 1
+    packuswb        xm0, xm1
+    pshufd          xm0, xm0, 11011000b
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+%else
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm1, m0, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8x2 sp
+    FILTER_VER_CHROMA_S_AVX2_8x2 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_8x6 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_8x6, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm3, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm3, 1
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m4, m6
+    movu            xm6, [r0 + r1 * 4]              ; m6 = row 8
+    punpckhwd       xm3, xm1, xm6
+    punpcklwd       xm1, xm6
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m5, m1
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m5, m7
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    vextracti128    xm5, m4, 1
+    packuswb        xm4, xm5
+    pshufd          xm4, xm4, 11011000b
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vpermq          m4, m4, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    vextracti128    xm5, m4, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8x6 sp
+    FILTER_VER_CHROMA_S_AVX2_8x6 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_8xN 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_4tap_vert_%1_8x%2, 4, 7, 9
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m8, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+%rep %2 / 16
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m1, m8
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m3, m8
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m3, [interp8_hps_shuf]
+    vpermd          m0, m3, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    movu            [r2], xm0
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2 + r3], xm0
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm0, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm0, 1
+    pmaddwd         m0, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m0
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m8
+    paddd           m5, m8
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r0 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m5, m0, [r5 + 1 * mmsize]
+    paddd           m6, m5
+    pmaddwd         m0, [r5]
+    movu            xm5, [r0 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm7, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 1 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m2, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m8
+    paddd           m1, m8
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m3, m4
+    vextracti128    xm6, m4, 1
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm7, m4, 1
+    vextracti128    xm1, m6, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm1
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm7, [r0 + r4]                  ; m7 = row 11
+    punpckhwd       xm1, xm5, xm7
+    punpcklwd       xm5, xm7
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    paddd           m0, m1
+    pmaddwd         m5, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm1, [r0]                       ; m1 = row 12
+    punpckhwd       xm4, xm7, xm1
+    punpcklwd       xm7, xm1
+    vinserti128     m7, m7, xm4, 1
+    pmaddwd         m4, m7, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    pmaddwd         m7, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m2, m8
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+
+    movu            xm4, [r0 + r1]                  ; m4 = row 13
+    punpckhwd       xm2, xm1, xm4
+    punpcklwd       xm1, xm4
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m1, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 14
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m7, m6
+    pmaddwd         m4, [r5]
+%ifidn %1,sp
+    paddd           m5, m8
+    paddd           m7, m8
+    psrad           m5, 12
+    psrad           m7, 12
+%else
+    psrad           m5, 6
+    psrad           m7, 6
+%endif
+    packssdw        m5, m7
+%ifidn %1,sp
+    packuswb        m0, m5
+    vpermd          m0, m3, m0
+    vextracti128    xm5, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm5
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m5, m5, 11011000b
+    vextracti128    xm7, m0, 1
+    vextracti128    xm6, m5, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm7
+    movu            [r2 + r3 * 2], xm5
+    movu            [r2 + r6], xm6
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm6, [r0 + r4]                  ; m6 = row 15
+    punpckhwd       xm5, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm5, 1
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 16
+    punpckhwd       xm5, xm6, xm0
+    punpcklwd       xm6, xm0
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m5, m6, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m6, [r5]
+%ifidn %1,sp
+    paddd           m1, m8
+    paddd           m4, m8
+    psrad           m1, 12
+    psrad           m4, 12
+%else
+    psrad           m1, 6
+    psrad           m4, 6
+%endif
+    packssdw        m1, m4
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 17
+    punpckhwd       xm4, xm0, xm5
+    punpcklwd       xm0, xm5
+    vinserti128     m0, m0, xm4, 1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m2, m0
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm0, xm5, xm4
+    punpcklwd       xm5, xm4
+    vinserti128     m5, m5, xm0, 1
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m6, m5
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m6, m8
+    psrad           m2, 12
+    psrad           m6, 12
+%else
+    psrad           m2, 6
+    psrad           m6, 6
+%endif
+    packssdw        m2, m6
+%ifidn %1,sp
+    packuswb        m1, m2
+    vpermd          m1, m3, m1
+    vextracti128    xm2, m1, 1
+    movq            [r2], xm1
+    movhps          [r2 + r3], xm1
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m1, m1, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm6, m1, 1
+    vextracti128    xm4, m2, 1
+    movu            [r2], xm1
+    movu            [r2 + r3], xm6
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm4
+%endif
+    lea             r2, [r2 + r3 * 4]
+%endrep
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_8xN sp, 16
+    FILTER_VER_CHROMA_S_AVX2_8xN sp, 32
+    FILTER_VER_CHROMA_S_AVX2_8xN sp, 64
+    FILTER_VER_CHROMA_S_AVX2_8xN ss, 16
+    FILTER_VER_CHROMA_S_AVX2_8xN ss, 32
+    FILTER_VER_CHROMA_S_AVX2_8xN ss, 64
+
+%macro FILTER_VER_CHROMA_S_AVX2_Nx24 2
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_%2x24, 4, 10, 10
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m9, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_CHROMA_S_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+%ifidn %1,sp
+    lea             r2, [r8 + r3 * 4 - %2 + 8]
+%else
+    lea             r2, [r8 + r3 * 4 - 2 * %2 + 16]
+%endif
+    lea             r0, [r7 - 2 * %2 + 16]
+    mova            m7, m9
+    mov             r9d, %2 / 8
+.loop:
+    PROCESS_CHROMA_S_AVX2_W8_8R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_Nx24 sp, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx24 sp, 16
+    FILTER_VER_CHROMA_S_AVX2_Nx24 ss, 32
+    FILTER_VER_CHROMA_S_AVX2_Nx24 ss, 16
+
+%macro FILTER_VER_CHROMA_S_AVX2_2x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x8, 4, 6, 7
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m6, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    movd            xm0, [r0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movd            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    punpcklqdq      xm0, xm1                        ; m0 = [2 1 1 0]
+    movd            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movd            xm4, [r0]
+    punpcklwd       xm3, xm4
+    punpcklqdq      xm2, xm3                        ; m2 = [4 3 3 2]
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [4 3 3 2 2 1 1 0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm4, xm1
+    movd            xm3, [r0 + r1 * 2]
+    punpcklwd       xm1, xm3
+    punpcklqdq      xm4, xm1                        ; m4 = [6 5 5 4]
+    vinserti128     m2, m2, xm4, 1                  ; m2 = [6 5 5 4 4 3 3 2]
+    pmaddwd         m0, [r5]
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movd            xm1, [r0 + r4]
+    punpcklwd       xm3, xm1
+    lea             r0, [r0 + 4 * r1]
+    movd            xm2, [r0]
+    punpcklwd       xm1, xm2
+    punpcklqdq      xm3, xm1                        ; m3 = [8 7 7 6]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [8 7 7 6 6 5 5 4]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm2, xm1
+    movd            xm5, [r0 + r1 * 2]
+    punpcklwd       xm1, xm5
+    punpcklqdq      xm2, xm1                        ; m2 = [10 9 9 8]
+    vinserti128     m3, m3, xm2, 1                  ; m3 = [10 9 9 8 8 7 7 6]
+    pmaddwd         m4, [r5]
+    pmaddwd         m3, [r5 + 1 * mmsize]
+    paddd           m4, m3
+%ifidn %1,sp
+    paddd           m0, m6
+    paddd           m4, m6
+    psrad           m0, 12
+    psrad           m4, 12
+%else
+    psrad           m0, 6
+    psrad           m4, 6
+%endif
+    packssdw        m0, m4
+    vextracti128    xm4, m0, 1
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        xm0, xm4
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + 2 * r3], xm0, 4
+    pextrw          [r2 + r4], xm0, 5
+    lea             r2, [r2 + r3 * 4]
+    pextrw          [r2], xm0, 2
+    pextrw          [r2 + r3], xm0, 3
+    pextrw          [r2 + 2 * r3], xm0, 6
+    pextrw          [r2 + r4], xm0, 7
+%else
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    movd            [r2 + 2 * r3], xm4
+    pextrd          [r2 + r4], xm4, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm0, 3
+    pextrd          [r2 + 2 * r3], xm4, 2
+    pextrd          [r2 + r4], xm4, 3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_2x8 sp
+    FILTER_VER_CHROMA_S_AVX2_2x8 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_2x16 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_2x16, 4, 6, 9
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+    sub             r0, r1
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+%ifidn %1,sp
+    mova            m6, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    movd            xm0, [r0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movd            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    punpcklqdq      xm0, xm1                        ; m0 = [2 1 1 0]
+    movd            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movd            xm4, [r0]
+    punpcklwd       xm3, xm4
+    punpcklqdq      xm2, xm3                        ; m2 = [4 3 3 2]
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [4 3 3 2 2 1 1 0]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm4, xm1
+    movd            xm3, [r0 + r1 * 2]
+    punpcklwd       xm1, xm3
+    punpcklqdq      xm4, xm1                        ; m4 = [6 5 5 4]
+    vinserti128     m2, m2, xm4, 1                  ; m2 = [6 5 5 4 4 3 3 2]
+    pmaddwd         m0, [r5]
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movd            xm1, [r0 + r4]
+    punpcklwd       xm3, xm1
+    lea             r0, [r0 + 4 * r1]
+    movd            xm2, [r0]
+    punpcklwd       xm1, xm2
+    punpcklqdq      xm3, xm1                        ; m3 = [8 7 7 6]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [8 7 7 6 6 5 5 4]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm2, xm1
+    movd            xm5, [r0 + r1 * 2]
+    punpcklwd       xm1, xm5
+    punpcklqdq      xm2, xm1                        ; m2 = [10 9 9 8]
+    vinserti128     m3, m3, xm2, 1                  ; m3 = [10 9 9 8 8 7 7 6]
+    pmaddwd         m4, [r5]
+    pmaddwd         m3, [r5 + 1 * mmsize]
+    paddd           m4, m3
+    movd            xm1, [r0 + r4]
+    punpcklwd       xm5, xm1
+    lea             r0, [r0 + 4 * r1]
+    movd            xm3, [r0]
+    punpcklwd       xm1, xm3
+    punpcklqdq      xm5, xm1                        ; m5 = [12 11 11 10]
+    vinserti128     m2, m2, xm5, 1                  ; m2 = [12 11 11 10 10 9 9 8]
+    movd            xm1, [r0 + r1]
+    punpcklwd       xm3, xm1
+    movd            xm7, [r0 + r1 * 2]
+    punpcklwd       xm1, xm7
+    punpcklqdq      xm3, xm1                        ; m3 = [14 13 13 12]
+    vinserti128     m5, m5, xm3, 1                  ; m5 = [14 13 13 12 12 11 11 10]
+    pmaddwd         m2, [r5]
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    movd            xm5, [r0 + r4]
+    punpcklwd       xm7, xm5
+    lea             r0, [r0 + 4 * r1]
+    movd            xm1, [r0]
+    punpcklwd       xm5, xm1
+    punpcklqdq      xm7, xm5                        ; m7 = [16 15 15 14]
+    vinserti128     m3, m3, xm7, 1                  ; m3 = [16 15 15 14 14 13 13 12]
+    movd            xm5, [r0 + r1]
+    punpcklwd       xm1, xm5
+    movd            xm8, [r0 + r1 * 2]
+    punpcklwd       xm5, xm8
+    punpcklqdq      xm1, xm5                        ; m1 = [18 17 17 16]
+    vinserti128     m7, m7, xm1, 1                  ; m7 = [18 17 17 16 16 15 15 14]
+    pmaddwd         m3, [r5]
+    pmaddwd         m7, [r5 + 1 * mmsize]
+    paddd           m3, m7
+%ifidn %1,sp
+    paddd           m0, m6
+    paddd           m4, m6
+    paddd           m2, m6
+    paddd           m3, m6
+    psrad           m0, 12
+    psrad           m4, 12
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m0, 6
+    psrad           m4, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m0, m4
+    packssdw        m2, m3
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        m0, m2
+    vextracti128    xm2, m0, 1
+    pextrw          [r2], xm0, 0
+    pextrw          [r2 + r3], xm0, 1
+    pextrw          [r2 + 2 * r3], xm2, 0
+    pextrw          [r2 + r4], xm2, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrw          [r2], xm0, 2
+    pextrw          [r2 + r3], xm0, 3
+    pextrw          [r2 + 2 * r3], xm2, 2
+    pextrw          [r2 + r4], xm2, 3
+    lea             r2, [r2 + r3 * 4]
+    pextrw          [r2], xm0, 4
+    pextrw          [r2 + r3], xm0, 5
+    pextrw          [r2 + 2 * r3], xm2, 4
+    pextrw          [r2 + r4], xm2, 5
+    lea             r2, [r2 + r3 * 4]
+    pextrw          [r2], xm0, 6
+    pextrw          [r2 + r3], xm0, 7
+    pextrw          [r2 + 2 * r3], xm2, 6
+    pextrw          [r2 + r4], xm2, 7
+%else
+    vextracti128    xm4, m0, 1
+    vextracti128    xm3, m2, 1
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 1
+    movd            [r2 + 2 * r3], xm4
+    pextrd          [r2 + r4], xm4, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm0, 3
+    pextrd          [r2 + 2 * r3], xm4, 2
+    pextrd          [r2 + r4], xm4, 3
+    lea             r2, [r2 + r3 * 4]
+    movd            [r2], xm2
+    pextrd          [r2 + r3], xm2, 1
+    movd            [r2 + 2 * r3], xm3
+    pextrd          [r2 + r4], xm3, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm2, 2
+    pextrd          [r2 + r3], xm2, 3
+    pextrd          [r2 + 2 * r3], xm3, 2
+    pextrd          [r2 + r4], xm3, 3
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_2x16 sp
+    FILTER_VER_CHROMA_S_AVX2_2x16 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_6x8 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_6x8, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm3, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm3, 1
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m3
+
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    packuswb        m0, m2
+    vextracti128    xm2, m0, 1
+    movd            [r2], xm0
+    pextrw          [r2 + 4], xm2, 0
+    pextrd          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 + 4], xm2, 2
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r3 * 2 + 4], xm2, 4
+    pextrd          [r2 + r4], xm0, 3
+    pextrw          [r2 + r4 + 4], xm2, 6
+%else
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movd            [r2 + 8], xm0
+    pextrd          [r2 + r3 + 8], xm0, 2
+    movd            [r2 + r3 * 2 + 8], xm3
+    pextrd          [r2 + r4 + 8], xm3, 2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m5, m7
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r0 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m6, m0
+    movu            xm5, [r0 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm0, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm0, 1
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m1, m2
+
+%ifidn %1,sp
+    paddd           m6, m7
+    paddd           m1, m7
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vextracti128    xm6, m4, 1
+    movd            [r2], xm4
+    pextrw          [r2 + 4], xm6, 0
+    pextrd          [r2 + r3], xm4, 1
+    pextrw          [r2 + r3 + 4], xm6, 2
+    pextrd          [r2 + r3 * 2], xm4, 2
+    pextrw          [r2 + r3 * 2 + 4], xm6, 4
+    pextrd          [r2 + r4], xm4, 3
+    pextrw          [r2 + r4 + 4], xm6, 6
+%else
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r4], xm6
+    vextracti128    xm5, m4, 1
+    vextracti128    xm1, m6, 1
+    movd            [r2 + 8], xm5
+    pextrd          [r2 + r3 + 8], xm5, 2
+    movd            [r2 + r3 * 2 + 8], xm1
+    pextrd          [r2 + r4 + 8], xm1, 2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_6x8 sp
+    FILTER_VER_CHROMA_S_AVX2_6x8 ss
+
+%macro FILTER_VER_CHROMA_S_AVX2_6x16 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal interp_4tap_vert_%1_6x16, 4, 7, 9
+    mov             r4d, r4m
+    shl             r4d, 6
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_ChromaCoeffV]
+    add             r5, r4
+%else
+    lea             r5, [pw_ChromaCoeffV + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r1
+%ifidn %1,sp
+    mova            m8, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m1, m8
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m0, m1
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm1, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m1
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m3, m8
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    vextracti128    xm2, m0, 1
+    movd            [r2], xm0
+    pextrw          [r2 + 4], xm2, 0
+    pextrd          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 + 4], xm2, 2
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r3 * 2 + 4], xm2, 4
+    pextrd          [r2 + r6], xm0, 3
+    pextrw          [r2 + r6 + 4], xm2, 6
+%else
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+    vextracti128    xm0, m0, 1
+    vextracti128    xm3, m2, 1
+    movd            [r2 + 8], xm0
+    pextrd          [r2 + r3 + 8], xm0, 2
+    movd            [r2 + r3 * 2 + 8], xm3
+    pextrd          [r2 + r6 + 8], xm3, 2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    movu            xm1, [r0 + r4]                  ; m1 = row 7
+    punpckhwd       xm0, xm6, xm1
+    punpcklwd       xm6, xm1
+    vinserti128     m6, m6, xm0, 1
+    pmaddwd         m0, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m0
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 8
+    punpckhwd       xm2, xm1, xm0
+    punpcklwd       xm1, xm0
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    pmaddwd         m1, [r5]
+    paddd           m5, m2
+%ifidn %1,sp
+    paddd           m4, m8
+    paddd           m5, m8
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m4, m5
+
+    movu            xm2, [r0 + r1]                  ; m2 = row 9
+    punpckhwd       xm5, xm0, xm2
+    punpcklwd       xm0, xm2
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m5, m0, [r5 + 1 * mmsize]
+    paddd           m6, m5
+    pmaddwd         m0, [r5]
+    movu            xm5, [r0 + r1 * 2]              ; m5 = row 10
+    punpckhwd       xm7, xm2, xm5
+    punpcklwd       xm2, xm5
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 1 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m2, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m8
+    paddd           m1, m8
+    psrad           m6, 12
+    psrad           m1, 12
+%else
+    psrad           m6, 6
+    psrad           m1, 6
+%endif
+    packssdw        m6, m1
+%ifidn %1,sp
+    packuswb        m4, m6
+    vextracti128    xm6, m4, 1
+    movd            [r2], xm4
+    pextrw          [r2 + 4], xm6, 0
+    pextrd          [r2 + r3], xm4, 1
+    pextrw          [r2 + r3 + 4], xm6, 2
+    pextrd          [r2 + r3 * 2], xm4, 2
+    pextrw          [r2 + r3 * 2 + 4], xm6, 4
+    pextrd          [r2 + r6], xm4, 3
+    pextrw          [r2 + r6 + 4], xm6, 6
+%else
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm6
+    vextracti128    xm4, m4, 1
+    vextracti128    xm1, m6, 1
+    movd            [r2 + 8], xm4
+    pextrd          [r2 + r3 + 8], xm4, 2
+    movd            [r2 + r3 * 2 + 8], xm1
+    pextrd          [r2 + r6 + 8], xm1, 2
+%endif
+    lea             r2, [r2 + r3 * 4]
+    movu            xm7, [r0 + r4]                  ; m7 = row 11
+    punpckhwd       xm1, xm5, xm7
+    punpcklwd       xm5, xm7
+    vinserti128     m5, m5, xm1, 1
+    pmaddwd         m1, m5, [r5 + 1 * mmsize]
+    paddd           m0, m1
+    pmaddwd         m5, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm1, [r0]                       ; m1 = row 12
+    punpckhwd       xm4, xm7, xm1
+    punpcklwd       xm7, xm1
+    vinserti128     m7, m7, xm4, 1
+    pmaddwd         m4, m7, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    pmaddwd         m7, [r5]
+%ifidn %1,sp
+    paddd           m0, m8
+    paddd           m2, m8
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+
+    movu            xm4, [r0 + r1]                  ; m4 = row 13
+    punpckhwd       xm2, xm1, xm4
+    punpcklwd       xm1, xm4
+    vinserti128     m1, m1, xm2, 1
+    pmaddwd         m2, m1, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m1, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 14
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m7, m6
+    pmaddwd         m4, [r5]
+%ifidn %1,sp
+    paddd           m5, m8
+    paddd           m7, m8
+    psrad           m5, 12
+    psrad           m7, 12
+%else
+    psrad           m5, 6
+    psrad           m7, 6
+%endif
+    packssdw        m5, m7
+%ifidn %1,sp
+    packuswb        m0, m5
+    vextracti128    xm5, m0, 1
+    movd            [r2], xm0
+    pextrw          [r2 + 4], xm5, 0
+    pextrd          [r2 + r3], xm0, 1
+    pextrw          [r2 + r3 + 4], xm5, 2
+    pextrd          [r2 + r3 * 2], xm0, 2
+    pextrw          [r2 + r3 * 2 + 4], xm5, 4
+    pextrd          [r2 + r6], xm0, 3
+    pextrw          [r2 + r6 + 4], xm5, 6
+%else
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm5
+    movhps          [r2 + r6], xm5
+    vextracti128    xm0, m0, 1
+    vextracti128    xm7, m5, 1
+    movd            [r2 + 8], xm0
+    pextrd          [r2 + r3 + 8], xm0, 2
+    movd            [r2 + r3 * 2 + 8], xm7
+    pextrd          [r2 + r6 + 8], xm7, 2
+%endif
+    lea             r2, [r2 + r3 * 4]
+
+    movu            xm6, [r0 + r4]                  ; m6 = row 15
+    punpckhwd       xm5, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm5, 1
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm0, [r0]                       ; m0 = row 16
+    punpckhwd       xm5, xm6, xm0
+    punpcklwd       xm6, xm0
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m5, m6, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m6, [r5]
+%ifidn %1,sp
+    paddd           m1, m8
+    paddd           m4, m8
+    psrad           m1, 12
+    psrad           m4, 12
+%else
+    psrad           m1, 6
+    psrad           m4, 6
+%endif
+    packssdw        m1, m4
+
+    movu            xm5, [r0 + r1]                  ; m5 = row 17
+    punpckhwd       xm4, xm0, xm5
+    punpcklwd       xm0, xm5
+    vinserti128     m0, m0, xm4, 1
+    pmaddwd         m0, [r5 + 1 * mmsize]
+    paddd           m2, m0
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm0, xm5, xm4
+    punpcklwd       xm5, xm4
+    vinserti128     m5, m5, xm0, 1
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m6, m5
+%ifidn %1,sp
+    paddd           m2, m8
+    paddd           m6, m8
+    psrad           m2, 12
+    psrad           m6, 12
+%else
+    psrad           m2, 6
+    psrad           m6, 6
+%endif
+    packssdw        m2, m6
+%ifidn %1,sp
+    packuswb        m1, m2
+    vextracti128    xm2, m1, 1
+    movd            [r2], xm1
+    pextrw          [r2 + 4], xm2, 0
+    pextrd          [r2 + r3], xm1, 1
+    pextrw          [r2 + r3 + 4], xm2, 2
+    pextrd          [r2 + r3 * 2], xm1, 2
+    pextrw          [r2 + r3 * 2 + 4], xm2, 4
+    pextrd          [r2 + r6], xm1, 3
+    pextrw          [r2 + r6 + 4], xm2, 6
+%else
+    movq            [r2], xm1
+    movhps          [r2 + r3], xm1
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+    vextracti128    xm4, m1, 1
+    vextracti128    xm6, m2, 1
+    movd            [r2 + 8], xm4
+    pextrd          [r2 + r3 + 8], xm4, 2
+    movd            [r2 + r3 * 2 + 8], xm6
+    pextrd          [r2 + r6 + 8], xm6, 2
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_CHROMA_S_AVX2_6x16 sp
+    FILTER_VER_CHROMA_S_AVX2_6x16 ss
+
+;---------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_ss_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SS_W2_4R 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ss_%1x%2, 5, 6, 5
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, (%2/4)
+
+.loopH:
+    PROCESS_CHROMA_SP_W2_4R r5
+
+    psrad     m0, 6
+    psrad     m2, 6
+
+    packssdw  m0, m2
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrd    [r2], m0, 2
+    pextrd    [r2 + r3], m0, 3
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SS_W2_4R 2, 4
+    FILTER_VER_CHROMA_SS_W2_4R 2, 8
+
+    FILTER_VER_CHROMA_SS_W2_4R 2, 16
+
+;---------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ss_4x2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;---------------------------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal interp_4tap_vert_ss_4x2, 5, 6, 4
+
+    add        r1d, r1d
+    add        r3d, r3d
+    sub        r0, r1
+    shl        r4d, 5
+
+%ifdef PIC
+    lea        r5, [tab_ChromaCoeffV]
+    lea        r5, [r5 + r4]
+%else
+    lea        r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r5 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m2, [r0]
+    punpcklwd  m1, m2                          ;m1=[1 2]
+    pmaddwd    m1, [r5 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m3, [r0 + r1]
+    punpcklwd  m2, m3                          ;m4=[2 3]
+    pmaddwd    m2, [r5 + 1 * 16]
+    paddd      m0, m2                          ;m0=[0+1+2+3]  Row1 done
+    psrad      m0, 6
+
+    movq       m2, [r0 + 2 * r1]
+    punpcklwd  m3, m2                          ;m5=[3 4]
+    pmaddwd    m3, [r5 + 1 * 16]
+    paddd      m1, m3                          ;m1=[1+2+3+4]  Row2 done
+    psrad      m1, 6
+
+    packssdw   m0, m1
+
+    movlps     [r2], m0
+    movhps     [r2 + r3], m0
+
+    RET
+
+;-------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vertical_ss_6x8(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-------------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SS_W6_H4 2
+INIT_XMM sse4
+cglobal interp_4tap_vert_ss_6x%2, 5, 7, 6
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r6, [r5 + r4]
+%else
+    lea       r6, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, %2/4
+
+.loopH:
+    PROCESS_CHROMA_SP_W4_4R
+
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    movlps    [r2], m0
+    movhps    [r2 + r3], m0
+    lea       r5, [r2 + 2 * r3]
+    movlps    [r5], m2
+    movhps    [r5 + r3], m2
+
+    lea       r5, [4 * r1 - 2 * 4]
+    sub       r0, r5
+    add       r2, 2 * 4
+
+    PROCESS_CHROMA_SP_W2_4R r6
+
+    psrad     m0, 6
+    psrad     m2, 6
+
+    packssdw  m0, m2
+
+    movd      [r2], m0
+    pextrd    [r2 + r3], m0, 1
+    lea       r2, [r2 + 2 * r3]
+    pextrd    [r2], m0, 2
+    pextrd    [r2 + r3], m0, 3
+
+    sub       r0, 2 * 4
+    lea       r2, [r2 + 2 * r3 - 2 * 4]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SS_W6_H4 6, 8
+
+    FILTER_VER_CHROMA_SS_W6_H4 6, 16
+
+
+;----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert_ss_8x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_SS_W8_H2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_ss_%1x%2, 5, 6, 7
+
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 5
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+
+    mov       r4d, %2/2
+.loopH:
+    PROCESS_CHROMA_SP_W8_2R
+
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+
+    lea       r2, [r2 + 2 * r3]
+
+    dec       r4d
+    jnz       .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_CHROMA_SS_W8_H2 8, 2
+    FILTER_VER_CHROMA_SS_W8_H2 8, 4
+    FILTER_VER_CHROMA_SS_W8_H2 8, 6
+    FILTER_VER_CHROMA_SS_W8_H2 8, 8
+    FILTER_VER_CHROMA_SS_W8_H2 8, 16
+    FILTER_VER_CHROMA_SS_W8_H2 8, 32
+
+    FILTER_VER_CHROMA_SS_W8_H2 8, 12
+    FILTER_VER_CHROMA_SS_W8_H2 8, 64
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert_ss_%1x%2(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_LUMA_SS 2
+INIT_XMM sse2
+cglobal interp_8tap_vert_ss_%1x%2, 5, 7, 7 ,0-gprsize
+
+    add        r1d, r1d
+    add        r3d, r3d
+    lea        r5, [3 * r1]
+    sub        r0, r5
+    shl        r4d, 6
+
+%ifdef PIC
+    lea        r5, [tab_LumaCoeffV]
+    lea        r6, [r5 + r4]
+%else
+    lea        r6, [tab_LumaCoeffV + r4]
+%endif
+
+    mov        dword [rsp], %2/4
+.loopH:
+    mov        r4d, (%1/4)
+.loopW:
+    movq       m0, [r0]
+    movq       m1, [r0 + r1]
+    punpcklwd  m0, m1                          ;m0=[0 1]
+    pmaddwd    m0, [r6 + 0 *16]                ;m0=[0+1]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m1, m4                          ;m1=[1 2]
+    pmaddwd    m1, [r6 + 0 *16]                ;m1=[1+2]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[2 3]
+    pmaddwd    m2, m4, [r6 + 0 *16]            ;m2=[2+3]  Row3
+    pmaddwd    m4, [r6 + 1 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[3 4]
+    pmaddwd    m3, m5, [r6 + 0 *16]            ;m3=[3+4]  Row4
+    pmaddwd    m5, [r6 + 1 * 16]
+    paddd      m1, m5                          ;m1 = [1+2+3+4]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[4 5]
+    pmaddwd    m6, m4, [r6 + 1 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5]  Row3
+    pmaddwd    m4, [r6 + 2 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5]  Row1
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[5 6]
+    pmaddwd    m6, m5, [r6 + 1 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6]  Row4
+    pmaddwd    m5, [r6 + 2 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6]  Row2
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[6 7]
+    pmaddwd    m6, m4, [r6 + 2 * 16]
+    paddd      m2, m6                          ;m2=[2+3+4+5+6+7]  Row3
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m0, m4                          ;m0=[0+1+2+3+4+5+6+7]  Row1 end
+    psrad      m0, 6
+
+    lea        r0, [r0 + 2 * r1]
+    movq       m4, [r0]
+    punpcklwd  m5, m4                          ;m5=[7 8]
+    pmaddwd    m6, m5, [r6 + 2 * 16]
+    paddd      m3, m6                          ;m3=[3+4+5+6+7+8]  Row4
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m1, m5                          ;m1=[1+2+3+4+5+6+7+8]  Row2 end
+    psrad      m1, 6
+
+    packssdw   m0, m1
+
+    movlps     [r2], m0
+    movhps     [r2 + r3], m0
+
+    movq       m5, [r0 + r1]
+    punpcklwd  m4, m5                          ;m4=[8 9]
+    pmaddwd    m4, [r6 + 3 * 16]
+    paddd      m2, m4                          ;m2=[2+3+4+5+6+7+8+9]  Row3 end
+    psrad      m2, 6
+
+    movq       m4, [r0 + 2 * r1]
+    punpcklwd  m5, m4                          ;m5=[9 10]
+    pmaddwd    m5, [r6 + 3 * 16]
+    paddd      m3, m5                          ;m3=[3+4+5+6+7+8+9+10]  Row4 end
+    psrad      m3, 6
+
+    packssdw   m2, m3
+
+    movlps     [r2 + 2 * r3], m2
+    lea        r5, [3 * r3]
+    movhps     [r2 + r5], m2
+
+    lea        r5, [8 * r1 - 2 * 4]
+    sub        r0, r5
+    add        r2, 2 * 4
+
+    dec        r4d
+    jnz        .loopW
+
+    lea        r0, [r0 + 4 * r1 - 2 * %1]
+    lea        r2, [r2 + 4 * r3 - 2 * %1]
+
+    dec        dword [rsp]
+    jnz        .loopH
+
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_SS 4, 4
+    FILTER_VER_LUMA_SS 8, 8
+    FILTER_VER_LUMA_SS 8, 4
+    FILTER_VER_LUMA_SS 4, 8
+    FILTER_VER_LUMA_SS 16, 16
+    FILTER_VER_LUMA_SS 16, 8
+    FILTER_VER_LUMA_SS 8, 16
+    FILTER_VER_LUMA_SS 16, 12
+    FILTER_VER_LUMA_SS 12, 16
+    FILTER_VER_LUMA_SS 16, 4
+    FILTER_VER_LUMA_SS 4, 16
+    FILTER_VER_LUMA_SS 32, 32
+    FILTER_VER_LUMA_SS 32, 16
+    FILTER_VER_LUMA_SS 16, 32
+    FILTER_VER_LUMA_SS 32, 24
+    FILTER_VER_LUMA_SS 24, 32
+    FILTER_VER_LUMA_SS 32, 8
+    FILTER_VER_LUMA_SS 8, 32
+    FILTER_VER_LUMA_SS 64, 64
+    FILTER_VER_LUMA_SS 64, 32
+    FILTER_VER_LUMA_SS 32, 64
+    FILTER_VER_LUMA_SS 64, 48
+    FILTER_VER_LUMA_SS 48, 64
+    FILTER_VER_LUMA_SS 64, 16
+    FILTER_VER_LUMA_SS 16, 64
+
+%macro FILTER_VER_LUMA_AVX2_4x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x4, 4, 6, 7
+    mov             r4d, r4m
+    add             r1d, r1d
+    shl             r4d, 7
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,sp
+    mova            m6, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m0, m5
+    paddd           m2, m4
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    pmaddwd         m1, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    paddd           m2, m1
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + 2 * r1]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [A 9 9 8]
+    pmaddwd         m4, [r5 + 3 * mmsize]
+    paddd           m2, m4
+
+%ifidn %1,sp
+    paddd           m0, m6
+    paddd           m2, m6
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+    vextracti128    xm2, m0, 1
+    lea             r4, [r3 * 3]
+
+%ifidn %1,sp
+    packuswb        xm0, xm2
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 2
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r4], xm0, 3
+%else
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r4], xm2
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_4x4 sp
+    FILTER_VER_LUMA_AVX2_4x4 ss
+
+%macro FILTER_VER_LUMA_AVX2_4x8 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x8, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m1, [r5 + 2 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [A 9 9 8]
+    pmaddwd         m3, m6, [r5 + 3 * mmsize]
+    paddd           m2, m3
+    pmaddwd         m3, m6, [r5 + 2 * mmsize]
+    paddd           m4, m3
+    pmaddwd         m6, [r5 + 1 * mmsize]
+    paddd           m1, m6
+
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m2, m7
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm5, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm2, [r0]
+    punpcklwd       xm3, xm2
+    vinserti128     m5, m5, xm3, 1                  ; m5 = [C B B A]
+    pmaddwd         m3, m5, [r5 + 3 * mmsize]
+    paddd           m4, m3
+    pmaddwd         m5, [r5 + 2 * mmsize]
+    paddd           m1, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm2, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [E D D C]
+    pmaddwd         m2, [r5 + 3 * mmsize]
+    paddd           m1, m2
+
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m1, m7
+    psrad           m4, 12
+    psrad           m1, 12
+%else
+    psrad           m4, 6
+    psrad           m1, 6
+%endif
+    packssdw        m4, m1
+
+%ifidn %1,sp
+    packuswb        m0, m4
+    vextracti128    xm2, m0, 1
+    movd            [r2], xm0
+    movd            [r2 + r3], xm2
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r6], xm2, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm0, 2
+    pextrd          [r2 + r3], xm2, 2
+    pextrd          [r2 + r3 * 2], xm0, 3
+    pextrd          [r2 + r6], xm2, 3
+%else
+    vextracti128    xm2, m0, 1
+    vextracti128    xm1, m4, 1
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_4x8 sp
+    FILTER_VER_LUMA_AVX2_4x8 ss
+
+%macro PROCESS_LUMA_AVX2_W4_16R 1
+    movq            xm0, [r0]
+    movq            xm1, [r0 + r1]
+    punpcklwd       xm0, xm1
+    movq            xm2, [r0 + r1 * 2]
+    punpcklwd       xm1, xm2
+    vinserti128     m0, m0, xm1, 1                  ; m0 = [2 1 1 0]
+    pmaddwd         m0, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm2, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm4, [r0]
+    punpcklwd       xm3, xm4
+    vinserti128     m2, m2, xm3, 1                  ; m2 = [4 3 3 2]
+    pmaddwd         m5, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m5
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm4, xm3
+    movq            xm1, [r0 + r1 * 2]
+    punpcklwd       xm3, xm1
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [6 5 5 4]
+    pmaddwd         m5, m4, [r5 + 2 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m4, [r5 + 1 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m4, [r5]
+    movq            xm3, [r0 + r4]
+    punpcklwd       xm1, xm3
+    lea             r0, [r0 + 4 * r1]
+    movq            xm6, [r0]
+    punpcklwd       xm3, xm6
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [8 7 7 6]
+    pmaddwd         m5, m1, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m5, m1, [r5 + 2 * mmsize]
+    paddd           m2, m5
+    pmaddwd         m5, m1, [r5 + 1 * mmsize]
+    paddd           m4, m5
+    pmaddwd         m1, [r5]
+    movq            xm3, [r0 + r1]
+    punpcklwd       xm6, xm3
+    movq            xm5, [r0 + 2 * r1]
+    punpcklwd       xm3, xm5
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [10 9 9 8]
+    pmaddwd         m3, m6, [r5 + 3 * mmsize]
+    paddd           m2, m3
+    pmaddwd         m3, m6, [r5 + 2 * mmsize]
+    paddd           m4, m3
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    paddd           m1, m3
+    pmaddwd         m6, [r5]
+
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m2, m7
+    psrad           m0, 12
+    psrad           m2, 12
+%else
+    psrad           m0, 6
+    psrad           m2, 6
+%endif
+    packssdw        m0, m2
+    vextracti128    xm2, m0, 1
+%ifidn %1,sp
+    packuswb        xm0, xm2
+    movd            [r2], xm0
+    pextrd          [r2 + r3], xm0, 2
+    pextrd          [r2 + r3 * 2], xm0, 1
+    pextrd          [r2 + r6], xm0, 3
+%else
+    movq            [r2], xm0
+    movq            [r2 + r3], xm2
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm2
+%endif
+
+    movq            xm2, [r0 + r4]
+    punpcklwd       xm5, xm2
+    lea             r0, [r0 + 4 * r1]
+    movq            xm0, [r0]
+    punpcklwd       xm2, xm0
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [12 11 11 10]
+    pmaddwd         m2, m5, [r5 + 3 * mmsize]
+    paddd           m4, m2
+    pmaddwd         m2, m5, [r5 + 2 * mmsize]
+    paddd           m1, m2
+    pmaddwd         m2, m5, [r5 + 1 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m5, [r5]
+    movq            xm2, [r0 + r1]
+    punpcklwd       xm0, xm2
+    movq            xm3, [r0 + 2 * r1]
+    punpcklwd       xm2, xm3
+    vinserti128     m0, m0, xm2, 1                  ; m0 = [14 13 13 12]
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m1, m2
+    pmaddwd         m2, m0, [r5 + 2 * mmsize]
+    paddd           m6, m2
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m5, m2
+    pmaddwd         m0, [r5]
+
+%ifidn %1,sp
+    paddd           m4, m7
+    paddd           m1, m7
+    psrad           m4, 12
+    psrad           m1, 12
+%else
+    psrad           m4, 6
+    psrad           m1, 6
+%endif
+    packssdw        m4, m1
+    vextracti128    xm1, m4, 1
+    lea             r2, [r2 + r3 * 4]
+%ifidn %1,sp
+    packuswb        xm4, xm1
+    movd            [r2], xm4
+    pextrd          [r2 + r3], xm4, 2
+    pextrd          [r2 + r3 * 2], xm4, 1
+    pextrd          [r2 + r6], xm4, 3
+%else
+    movq            [r2], xm4
+    movq            [r2 + r3], xm1
+    movhps          [r2 + r3 * 2], xm4
+    movhps          [r2 + r6], xm1
+%endif
+
+    movq            xm4, [r0 + r4]
+    punpcklwd       xm3, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm1, [r0]
+    punpcklwd       xm4, xm1
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [16 15 15 14]
+    pmaddwd         m4, m3, [r5 + 3 * mmsize]
+    paddd           m6, m4
+    pmaddwd         m4, m3, [r5 + 2 * mmsize]
+    paddd           m5, m4
+    pmaddwd         m4, m3, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m3, [r5]
+    movq            xm4, [r0 + r1]
+    punpcklwd       xm1, xm4
+    movq            xm2, [r0 + 2 * r1]
+    punpcklwd       xm4, xm2
+    vinserti128     m1, m1, xm4, 1                  ; m1 = [18 17 17 16]
+    pmaddwd         m4, m1, [r5 + 3 * mmsize]
+    paddd           m5, m4
+    pmaddwd         m4, m1, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m1, [r5 + 1 * mmsize]
+    paddd           m3, m1
+    movq            xm4, [r0 + r4]
+    punpcklwd       xm2, xm4
+    lea             r0, [r0 + 4 * r1]
+    movq            xm1, [r0]
+    punpcklwd       xm4, xm1
+    vinserti128     m2, m2, xm4, 1                  ; m2 = [20 19 19 18]
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m3, m2
+    movq            xm4, [r0 + r1]
+    punpcklwd       xm1, xm4
+    movq            xm2, [r0 + 2 * r1]
+    punpcklwd       xm4, xm2
+    vinserti128     m1, m1, xm4, 1                  ; m1 = [22 21 21 20]
+    pmaddwd         m1, [r5 + 3 * mmsize]
+    paddd           m3, m1
+
+%ifidn %1,sp
+    paddd           m6, m7
+    paddd           m5, m7
+    paddd           m0, m7
+    paddd           m3, m7
+    psrad           m6, 12
+    psrad           m5, 12
+    psrad           m0, 12
+    psrad           m3, 12
+%else
+    psrad           m6, 6
+    psrad           m5, 6
+    psrad           m0, 6
+    psrad           m3, 6
+%endif
+    packssdw        m6, m5
+    packssdw        m0, m3
+    lea             r2, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m6, m0
+    vextracti128    xm0, m6, 1
+    movd            [r2], xm6
+    movd            [r2 + r3], xm0
+    pextrd          [r2 + r3 * 2], xm6, 1
+    pextrd          [r2 + r6], xm0, 1
+    lea             r2, [r2 + r3 * 4]
+    pextrd          [r2], xm6, 2
+    pextrd          [r2 + r3], xm0, 2
+    pextrd          [r2 + r3 * 2], xm6, 3
+    pextrd          [r2 + r6], xm0, 3
+%else
+    vextracti128    xm5, m6, 1
+    vextracti128    xm3, m0, 1
+    movq            [r2], xm6
+    movq            [r2 + r3], xm5
+    movhps          [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm5
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm0
+    movq            [r2 + r3], xm3
+    movhps          [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm3
+%endif
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_4x16 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_4x16, 4, 7, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_LUMA_AVX2_W4_16R %1
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_4x16 sp
+    FILTER_VER_LUMA_AVX2_4x16 ss
+
+%macro FILTER_VER_LUMA_S_AVX2_8x8 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_8x8, 4, 6, 12
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %1,sp
+    mova            m11, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    pmaddwd         m2, [r5]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    pmaddwd         m3, [r5]
+    paddd           m1, m5
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    pmaddwd         m5, [r5]
+    paddd           m3, m7
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    pmaddwd         m6, [r5]
+    paddd           m4, m8
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    pmaddwd         m7, [r5]
+    paddd           m5, m9
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    pmaddwd         m8, [r5 + 1 * mmsize]
+    paddd           m4, m10
+    paddd           m6, m8
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm8, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm8, 1
+    pmaddwd         m8, m9, [r5 + 3 * mmsize]
+    paddd           m3, m8
+    pmaddwd         m8, m9, [r5 + 2 * mmsize]
+    pmaddwd         m9, [r5 + 1 * mmsize]
+    paddd           m5, m8
+    paddd           m7, m9
+    movu            xm8, [r0 + r4]                  ; m8 = row 11
+    punpckhwd       xm9, xm10, xm8
+    punpcklwd       xm10, xm8
+    vinserti128     m10, m10, xm9, 1
+    pmaddwd         m9, m10, [r5 + 3 * mmsize]
+    pmaddwd         m10, [r5 + 2 * mmsize]
+    paddd           m4, m9
+    paddd           m6, m10
+
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m1, [interp8_hps_shuf]
+    vpermd          m0, m1, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+%endif
+
+    lea             r0, [r0 + r1 * 4]
+    movu            xm9, [r0]                       ; m9 = row 12
+    punpckhwd       xm3, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm3, 1
+    pmaddwd         m3, m8, [r5 + 3 * mmsize]
+    pmaddwd         m8, [r5 + 2 * mmsize]
+    paddd           m5, m3
+    paddd           m7, m8
+    movu            xm3, [r0 + r1]                  ; m3 = row 13
+    punpckhwd       xm0, xm9, xm3
+    punpcklwd       xm9, xm3
+    vinserti128     m9, m9, xm0, 1
+    pmaddwd         m9, [r5 + 3 * mmsize]
+    paddd           m6, m9
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm9, xm3, xm0
+    punpcklwd       xm3, xm0
+    vinserti128     m3, m3, xm9, 1
+    pmaddwd         m3, [r5 + 3 * mmsize]
+    paddd           m7, m3
+
+%ifidn %1,sp
+    paddd           m4, m11
+    paddd           m5, m11
+    paddd           m6, m11
+    paddd           m7, m11
+    psrad           m4, 12
+    psrad           m5, 12
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m4, 6
+    psrad           m5, 6
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m4, m5
+    packssdw        m6, m7
+    lea             r2, [r2 + r3 * 4]
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m1, m4
+    vextracti128    xm6, m4, 1
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r4], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm5
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r4], xm7
+%endif
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_8x8 sp
+    FILTER_VER_LUMA_S_AVX2_8x8 ss
+
+%macro FILTER_VER_LUMA_S_AVX2_8xN 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_8x%2, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    lea             r7, [r1 * 4]
+    mov             r8d, %2 / 16
+.loopH:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r0 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm8, [r0]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r0 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r0 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r0 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 3 * mmsize]
+    paddd           m4, m12
+    pmaddwd         m12, m10, [r5 + 2 * mmsize]
+    paddd           m6, m12
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm12, [r0]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 3 * mmsize]
+    paddd           m5, m13
+    pmaddwd         m13, m11, [r5 + 2 * mmsize]
+    paddd           m7, m13
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,sp
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m1, [interp8_hps_shuf]
+    vpermd          m0, m1, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+
+    movu            xm13, [r0 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 3 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m12, [r5 + 2 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m0, m12, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    pmaddwd         m12, [r5]
+    movu            xm0, [r0 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm2, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm2, 1
+    pmaddwd         m2, m13, [r5 + 3 * mmsize]
+    paddd           m7, m2
+    pmaddwd         m2, m13, [r5 + 2 * mmsize]
+    paddd           m9, m2
+    pmaddwd         m2, m13, [r5 + 1 * mmsize]
+    paddd           m11, m2
+    pmaddwd         m13, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m14
+    paddd           m7, m14
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m6, m7
+    lea             r2, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m1, m4
+    vextracti128    xm6, m4, 1
+    movq            [r2], xm4
+    movhps          [r2 + r3], xm4
+    movq            [r2 + r3 * 2], xm6
+    movhps          [r2 + r6], xm6
+%else
+    vpermq          m6, m6, 11011000b
+    vpermq          m4, m4, 11011000b
+    vextracti128    xm1, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r2], xm4
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm6
+    movu            [r2 + r6], xm7
+%endif
+
+    movu            xm6, [r0 + r4]                  ; m6 = row 15
+    punpckhwd       xm5, xm0, xm6
+    punpcklwd       xm0, xm6
+    vinserti128     m0, m0, xm5, 1
+    pmaddwd         m5, m0, [r5 + 3 * mmsize]
+    paddd           m8, m5
+    pmaddwd         m5, m0, [r5 + 2 * mmsize]
+    paddd           m10, m5
+    pmaddwd         m5, m0, [r5 + 1 * mmsize]
+    paddd           m12, m5
+    pmaddwd         m0, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm2, [r0]                       ; m2 = row 16
+    punpckhwd       xm3, xm6, xm2
+    punpcklwd       xm6, xm2
+    vinserti128     m6, m6, xm3, 1
+    pmaddwd         m3, m6, [r5 + 3 * mmsize]
+    paddd           m9, m3
+    pmaddwd         m3, m6, [r5 + 2 * mmsize]
+    paddd           m11, m3
+    pmaddwd         m3, m6, [r5 + 1 * mmsize]
+    paddd           m13, m3
+    pmaddwd         m6, [r5]
+    movu            xm3, [r0 + r1]                  ; m3 = row 17
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m10, m4
+    pmaddwd         m4, m2, [r5 + 2 * mmsize]
+    paddd           m12, m4
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddwd         m2, m3, [r5 + 3 * mmsize]
+    paddd           m11, m2
+    pmaddwd         m2, m3, [r5 + 2 * mmsize]
+    paddd           m13, m2
+    pmaddwd         m3, [r5 + 1 * mmsize]
+    paddd           m6, m3
+    movu            xm2, [r0 + r4]                  ; m2 = row 19
+    punpckhwd       xm7, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm7, 1
+    pmaddwd         m7, m4, [r5 + 3 * mmsize]
+    paddd           m12, m7
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    lea             r0, [r0 + r1 * 4]
+    movu            xm7, [r0]                       ; m7 = row 20
+    punpckhwd       xm3, xm2, xm7
+    punpcklwd       xm2, xm7
+    vinserti128     m2, m2, xm3, 1
+    pmaddwd         m3, m2, [r5 + 3 * mmsize]
+    paddd           m13, m3
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m6, m2
+    movu            xm3, [r0 + r1]                  ; m3 = row 21
+    punpckhwd       xm2, xm7, xm3
+    punpcklwd       xm7, xm3
+    vinserti128     m7, m7, xm2, 1
+    pmaddwd         m7, [r5 + 3 * mmsize]
+    paddd           m0, m7
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 22
+    punpckhwd       xm7, xm3, xm2
+    punpcklwd       xm3, xm2
+    vinserti128     m3, m3, xm7, 1
+    pmaddwd         m3, [r5 + 3 * mmsize]
+    paddd           m6, m3
+
+%ifidn %1,sp
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    paddd           m12, m14
+    paddd           m13, m14
+    paddd           m0, m14
+    paddd           m6, m14
+    psrad           m8, 12
+    psrad           m9, 12
+    psrad           m10, 12
+    psrad           m11, 12
+    psrad           m12, 12
+    psrad           m13, 12
+    psrad           m0, 12
+    psrad           m6, 12
+%else
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m6, 6
+%endif
+    packssdw        m8, m9
+    packssdw        m10, m11
+    packssdw        m12, m13
+    packssdw        m0, m6
+    lea             r2, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m8, m10
+    packuswb        m12, m0
+    vpermd          m8, m1, m8
+    vpermd          m12, m1, m12
+    vextracti128    xm10, m8, 1
+    vextracti128    xm0, m12, 1
+    movq            [r2], xm8
+    movhps          [r2 + r3], xm8
+    movq            [r2 + r3 * 2], xm10
+    movhps          [r2 + r6], xm10
+    lea             r2, [r2 + r3 * 4]
+    movq            [r2], xm12
+    movhps          [r2 + r3], xm12
+    movq            [r2 + r3 * 2], xm0
+    movhps          [r2 + r6], xm0
+%else
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm6, m0, 1
+    movu            [r2], xm8
+    movu            [r2 + r3], xm9
+    movu            [r2 + r3 * 2], xm10
+    movu            [r2 + r6], xm11
+    lea             r2, [r2 + r3 * 4]
+    movu            [r2], xm12
+    movu            [r2 + r3], xm13
+    movu            [r2 + r3 * 2], xm0
+    movu            [r2 + r6], xm6
+%endif
+
+    lea             r2, [r2 + r3 * 4]
+    sub             r0, r7
+    dec             r8d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_8xN sp, 16
+    FILTER_VER_LUMA_S_AVX2_8xN sp, 32
+    FILTER_VER_LUMA_S_AVX2_8xN ss, 16
+    FILTER_VER_LUMA_S_AVX2_8xN ss, 32
+
+%macro PROCESS_LUMA_S_AVX2_W8_4R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r0, [r0 + r1 * 4]
+    movu            xm4, [r0]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r0 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m4, [r5 + 1 * mmsize]
+    paddd           m2, m4
+    movu            xm6, [r0 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m4, m5, [r5 + 2 * mmsize]
+    paddd           m1, m4
+    pmaddwd         m5, [r5 + 1 * mmsize]
+    paddd           m3, m5
+    movu            xm4, [r0 + r4]                  ; m4 = row 7
+    punpckhwd       xm5, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m5, m6, [r5 + 3 * mmsize]
+    paddd           m0, m5
+    pmaddwd         m6, [r5 + 2 * mmsize]
+    paddd           m2, m6
+    lea             r0, [r0 + r1 * 4]
+    movu            xm5, [r0]                       ; m5 = row 8
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 3 * mmsize]
+    paddd           m1, m6
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m3, m4
+    movu            xm6, [r0 + r1]                  ; m6 = row 9
+    punpckhwd       xm4, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm4, 1
+    pmaddwd         m5, [r5 + 3 * mmsize]
+    paddd           m2, m5
+    movu            xm4, [r0 + r1 * 2]              ; m4 = row 10
+    punpckhwd       xm5, xm6, xm4
+    punpcklwd       xm6, xm4
+    vinserti128     m6, m6, xm5, 1
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m3, m6
+
+%ifidn %1,sp
+    paddd           m0, m7
+    paddd           m1, m7
+    paddd           m2, m7
+    paddd           m3, m7
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m4, [interp8_hps_shuf]
+    vpermd          m0, m4, m0
+    vextracti128    xm2, m0, 1
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+%endif
+%endmacro
+
+%macro FILTER_VER_LUMA_S_AVX2_8x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_8x4, 4, 6, 8
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    PROCESS_LUMA_S_AVX2_W8_4R %1
+    lea             r4, [r3 * 3]
+%ifidn %1,sp
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r4], xm2
+%else
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r4], xm3
+%endif
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_8x4 sp
+    FILTER_VER_LUMA_S_AVX2_8x4 ss
+
+%macro PROCESS_LUMA_AVX2_W8_16R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 3 * mmsize]
+    paddd           m4, m12
+    pmaddwd         m12, m10, [r5 + 2 * mmsize]
+    paddd           m6, m12
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm12, [r7]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 3 * mmsize]
+    paddd           m5, m13
+    pmaddwd         m13, m11, [r5 + 2 * mmsize]
+    paddd           m7, m13
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,sp
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m5, [interp8_hps_shuf]
+    vpermd          m0, m5, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+
+    movu            xm13, [r7 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 3 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m12, [r5 + 2 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m0, m12, [r5 + 1 * mmsize]
+    paddd           m10, m0
+    pmaddwd         m12, [r5]
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m1, m13, [r5 + 3 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m13, [r5 + 2 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m1, m13, [r5 + 1 * mmsize]
+    paddd           m11, m1
+    pmaddwd         m13, [r5]
+
+%ifidn %1,sp
+    paddd           m6, m14
+    paddd           m7, m14
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m6, m7
+    lea             r8, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m5, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm1, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endif
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m8, m2
+    pmaddwd         m2, m0, [r5 + 2 * mmsize]
+    paddd           m10, m2
+    pmaddwd         m2, m0, [r5 + 1 * mmsize]
+    paddd           m12, m2
+    pmaddwd         m0, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m3, m1, [r5 + 3 * mmsize]
+    paddd           m9, m3
+    pmaddwd         m3, m1, [r5 + 2 * mmsize]
+    paddd           m11, m3
+    pmaddwd         m3, m1, [r5 + 1 * mmsize]
+    paddd           m13, m3
+    pmaddwd         m1, [r5]
+    movu            xm3, [r7 + r1]                  ; m3 = row 17
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 3 * mmsize]
+    paddd           m10, m4
+    pmaddwd         m4, m2, [r5 + 2 * mmsize]
+    paddd           m12, m4
+    pmaddwd         m2, [r5 + 1 * mmsize]
+    paddd           m0, m2
+    movu            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddwd         m2, m3, [r5 + 3 * mmsize]
+    paddd           m11, m2
+    pmaddwd         m2, m3, [r5 + 2 * mmsize]
+    paddd           m13, m2
+    pmaddwd         m3, [r5 + 1 * mmsize]
+    paddd           m1, m3
+    movu            xm2, [r7 + r4]                  ; m2 = row 19
+    punpckhwd       xm6, xm4, xm2
+    punpcklwd       xm4, xm2
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 3 * mmsize]
+    paddd           m12, m6
+    pmaddwd         m4, [r5 + 2 * mmsize]
+    paddd           m0, m4
+    lea             r7, [r7 + r1 * 4]
+    movu            xm6, [r7]                       ; m6 = row 20
+    punpckhwd       xm7, xm2, xm6
+    punpcklwd       xm2, xm6
+    vinserti128     m2, m2, xm7, 1
+    pmaddwd         m7, m2, [r5 + 3 * mmsize]
+    paddd           m13, m7
+    pmaddwd         m2, [r5 + 2 * mmsize]
+    paddd           m1, m2
+    movu            xm7, [r7 + r1]                  ; m7 = row 21
+    punpckhwd       xm2, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm2, 1
+    pmaddwd         m6, [r5 + 3 * mmsize]
+    paddd           m0, m6
+    movu            xm2, [r7 + r1 * 2]              ; m2 = row 22
+    punpckhwd       xm3, xm7, xm2
+    punpcklwd       xm7, xm2
+    vinserti128     m7, m7, xm3, 1
+    pmaddwd         m7, [r5 + 3 * mmsize]
+    paddd           m1, m7
+
+%ifidn %1,sp
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    paddd           m12, m14
+    paddd           m13, m14
+    paddd           m0, m14
+    paddd           m1, m14
+    psrad           m8, 12
+    psrad           m9, 12
+    psrad           m10, 12
+    psrad           m11, 12
+    psrad           m12, 12
+    psrad           m13, 12
+    psrad           m0, 12
+    psrad           m1, 12
+%else
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+    psrad           m12, 6
+    psrad           m13, 6
+    psrad           m0, 6
+    psrad           m1, 6
+%endif
+    packssdw        m8, m9
+    packssdw        m10, m11
+    packssdw        m12, m13
+    packssdw        m0, m1
+    lea             r8, [r8 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m8, m10
+    packuswb        m12, m0
+    vpermd          m8, m5, m8
+    vpermd          m12, m5, m12
+    vextracti128    xm10, m8, 1
+    vextracti128    xm0, m12, 1
+    movq            [r8], xm8
+    movhps          [r8 + r3], xm8
+    movq            [r8 + r3 * 2], xm10
+    movhps          [r8 + r6], xm10
+    lea             r8, [r8 + r3 * 4]
+    movq            [r8], xm12
+    movhps          [r8 + r3], xm12
+    movq            [r8 + r3 * 2], xm0
+    movhps          [r8 + r6], xm0
+%else
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vpermq          m12, m12, 11011000b
+    vpermq          m0, m0, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    vextracti128    xm13, m12, 1
+    vextracti128    xm1, m0, 1
+    movu            [r8], xm8
+    movu            [r8 + r3], xm9
+    movu            [r8 + r3 * 2], xm10
+    movu            [r8 + r6], xm11
+    lea             r8, [r8 + r3 * 4]
+    movu            [r8], xm12
+    movu            [r8 + r3], xm13
+    movu            [r8 + r3 * 2], xm0
+    movu            [r8 + r6], xm1
+%endif
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_Nx16 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_%2x16, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_Nx16 sp, 16
+    FILTER_VER_LUMA_AVX2_Nx16 sp, 32
+    FILTER_VER_LUMA_AVX2_Nx16 sp, 64
+    FILTER_VER_LUMA_AVX2_Nx16 ss, 16
+    FILTER_VER_LUMA_AVX2_Nx16 ss, 32
+    FILTER_VER_LUMA_AVX2_Nx16 ss, 64
+
+%macro FILTER_VER_LUMA_AVX2_NxN 3
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%3_%1x%2, 4, 12, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+
+%ifidn %3,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+
+    lea             r6, [r3 * 3]
+    lea             r11, [r1 * 4]
+    mov             r9d, %2 / 16
+.loopH:
+    mov             r10d, %1 / 8
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %3
+%ifidn %3,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r10d
+    jnz             .loopW
+    sub             r7, r11
+    lea             r0, [r7 - 2 * %1 + 16]
+%ifidn %3,sp
+    lea             r2, [r8 + r3 * 4 - %1 + 8]
+%else
+    lea             r2, [r8 + r3 * 4 - 2 * %1 + 16]
+%endif
+    dec             r9d
+    jnz             .loopH
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_NxN 16, 32, sp
+    FILTER_VER_LUMA_AVX2_NxN 16, 64, sp
+    FILTER_VER_LUMA_AVX2_NxN 24, 32, sp
+    FILTER_VER_LUMA_AVX2_NxN 32, 32, sp
+    FILTER_VER_LUMA_AVX2_NxN 32, 64, sp
+    FILTER_VER_LUMA_AVX2_NxN 48, 64, sp
+    FILTER_VER_LUMA_AVX2_NxN 64, 32, sp
+    FILTER_VER_LUMA_AVX2_NxN 64, 48, sp
+    FILTER_VER_LUMA_AVX2_NxN 64, 64, sp
+    FILTER_VER_LUMA_AVX2_NxN 16, 32, ss
+    FILTER_VER_LUMA_AVX2_NxN 16, 64, ss
+    FILTER_VER_LUMA_AVX2_NxN 24, 32, ss
+    FILTER_VER_LUMA_AVX2_NxN 32, 32, ss
+    FILTER_VER_LUMA_AVX2_NxN 32, 64, ss
+    FILTER_VER_LUMA_AVX2_NxN 48, 64, ss
+    FILTER_VER_LUMA_AVX2_NxN 64, 32, ss
+    FILTER_VER_LUMA_AVX2_NxN 64, 48, ss
+    FILTER_VER_LUMA_AVX2_NxN 64, 64, ss
+
+%macro FILTER_VER_LUMA_S_AVX2_12x16 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_12x16, 4, 9, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    PROCESS_LUMA_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    mova            m7, m14
+    PROCESS_LUMA_AVX2_W4_16R %1
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_12x16 sp
+    FILTER_VER_LUMA_S_AVX2_12x16 ss
+
+%macro FILTER_VER_LUMA_S_AVX2_16x12 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_16x12, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, 2
+.loopW:
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m10, m8, [r5 + 1 * mmsize]
+    paddd           m6, m10
+    pmaddwd         m8, [r5]
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm11, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm11, 1
+    pmaddwd         m11, m9, [r5 + 3 * mmsize]
+    paddd           m3, m11
+    pmaddwd         m11, m9, [r5 + 2 * mmsize]
+    paddd           m5, m11
+    pmaddwd         m11, m9, [r5 + 1 * mmsize]
+    paddd           m7, m11
+    pmaddwd         m9, [r5]
+    movu            xm11, [r7 + r4]                 ; m11 = row 11
+    punpckhwd       xm12, xm10, xm11
+    punpcklwd       xm10, xm11
+    vinserti128     m10, m10, xm12, 1
+    pmaddwd         m12, m10, [r5 + 3 * mmsize]
+    paddd           m4, m12
+    pmaddwd         m12, m10, [r5 + 2 * mmsize]
+    paddd           m6, m12
+    pmaddwd         m12, m10, [r5 + 1 * mmsize]
+    paddd           m8, m12
+    pmaddwd         m10, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm12, [r7]                      ; m12 = row 12
+    punpckhwd       xm13, xm11, xm12
+    punpcklwd       xm11, xm12
+    vinserti128     m11, m11, xm13, 1
+    pmaddwd         m13, m11, [r5 + 3 * mmsize]
+    paddd           m5, m13
+    pmaddwd         m13, m11, [r5 + 2 * mmsize]
+    paddd           m7, m13
+    pmaddwd         m13, m11, [r5 + 1 * mmsize]
+    paddd           m9, m13
+    pmaddwd         m11, [r5]
+
+%ifidn %1,sp
+    paddd           m0, m14
+    paddd           m1, m14
+    paddd           m2, m14
+    paddd           m3, m14
+    paddd           m4, m14
+    paddd           m5, m14
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m5, [interp8_hps_shuf]
+    vpermd          m0, m5, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+
+    movu            xm13, [r7 + r1]                 ; m13 = row 13
+    punpckhwd       xm0, xm12, xm13
+    punpcklwd       xm12, xm13
+    vinserti128     m12, m12, xm0, 1
+    pmaddwd         m0, m12, [r5 + 3 * mmsize]
+    paddd           m6, m0
+    pmaddwd         m0, m12, [r5 + 2 * mmsize]
+    paddd           m8, m0
+    pmaddwd         m12, [r5 + 1 * mmsize]
+    paddd           m10, m12
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm13, xm0
+    punpcklwd       xm13, xm0
+    vinserti128     m13, m13, xm1, 1
+    pmaddwd         m1, m13, [r5 + 3 * mmsize]
+    paddd           m7, m1
+    pmaddwd         m1, m13, [r5 + 2 * mmsize]
+    paddd           m9, m1
+    pmaddwd         m13, [r5 + 1 * mmsize]
+    paddd           m11, m13
+
+%ifidn %1,sp
+    paddd           m6, m14
+    paddd           m7, m14
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m6, m7
+    lea             r8, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m5, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm1, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm1
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endif
+
+    movu            xm1, [r7 + r4]                  ; m1 = row 15
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m2, m0, [r5 + 3 * mmsize]
+    paddd           m8, m2
+    pmaddwd         m0, [r5 + 2 * mmsize]
+    paddd           m10, m0
+    lea             r7, [r7 + r1 * 4]
+    movu            xm2, [r7]                       ; m2 = row 16
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m3, m1, [r5 + 3 * mmsize]
+    paddd           m9, m3
+    pmaddwd         m1, [r5 + 2 * mmsize]
+    paddd           m11, m1
+    movu            xm3, [r7 + r1]                  ; m3 = row 17
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m2, [r5 + 3 * mmsize]
+    paddd           m10, m2
+    movu            xm4, [r7 + r1 * 2]              ; m4 = row 18
+    punpckhwd       xm2, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm2, 1
+    pmaddwd         m3, [r5 + 3 * mmsize]
+    paddd           m11, m3
+
+%ifidn %1,sp
+    paddd           m8, m14
+    paddd           m9, m14
+    paddd           m10, m14
+    paddd           m11, m14
+    psrad           m8, 12
+    psrad           m9, 12
+    psrad           m10, 12
+    psrad           m11, 12
+%else
+    psrad           m8, 6
+    psrad           m9, 6
+    psrad           m10, 6
+    psrad           m11, 6
+%endif
+    packssdw        m8, m9
+    packssdw        m10, m11
+    lea             r8, [r8 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m8, m10
+    vpermd          m8, m5, m8
+    vextracti128    xm10, m8, 1
+    movq            [r8], xm8
+    movhps          [r8 + r3], xm8
+    movq            [r8 + r3 * 2], xm10
+    movhps          [r8 + r6], xm10
+    add             r2, 8
+%else
+    vpermq          m8, m8, 11011000b
+    vpermq          m10, m10, 11011000b
+    vextracti128    xm9, m8, 1
+    vextracti128    xm11, m10, 1
+    movu            [r8], xm8
+    movu            [r8 + r3], xm9
+    movu            [r8 + r3 * 2], xm10
+    movu            [r8 + r6], xm11
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_16x12 sp
+    FILTER_VER_LUMA_S_AVX2_16x12 ss
+
+%macro FILTER_VER_LUMA_S_AVX2_16x4 1
+INIT_YMM avx2
+cglobal interp_8tap_vert_%1_16x4, 4, 7, 8, 0 - gprsize
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m7, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    mov             dword [rsp], 2
+.loopW:
+    PROCESS_LUMA_S_AVX2_W8_4R %1
+    lea             r6, [r3 * 3]
+%ifidn %1,sp
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+    add             r2, 8
+%else
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+    add             r2, 16
+%endif
+    lea             r6, [8 * r1 - 16]
+    sub             r0, r6
+    dec             dword [rsp]
+    jnz             .loopW
+    RET
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_16x4 sp
+    FILTER_VER_LUMA_S_AVX2_16x4 ss
+
+%macro PROCESS_LUMA_S_AVX2_W8_8R 1
+    movu            xm0, [r0]                       ; m0 = row 0
+    movu            xm1, [r0 + r1]                  ; m1 = row 1
+    punpckhwd       xm2, xm0, xm1
+    punpcklwd       xm0, xm1
+    vinserti128     m0, m0, xm2, 1
+    pmaddwd         m0, [r5]
+    movu            xm2, [r0 + r1 * 2]              ; m2 = row 2
+    punpckhwd       xm3, xm1, xm2
+    punpcklwd       xm1, xm2
+    vinserti128     m1, m1, xm3, 1
+    pmaddwd         m1, [r5]
+    movu            xm3, [r0 + r4]                  ; m3 = row 3
+    punpckhwd       xm4, xm2, xm3
+    punpcklwd       xm2, xm3
+    vinserti128     m2, m2, xm4, 1
+    pmaddwd         m4, m2, [r5 + 1 * mmsize]
+    paddd           m0, m4
+    pmaddwd         m2, [r5]
+    lea             r7, [r0 + r1 * 4]
+    movu            xm4, [r7]                       ; m4 = row 4
+    punpckhwd       xm5, xm3, xm4
+    punpcklwd       xm3, xm4
+    vinserti128     m3, m3, xm5, 1
+    pmaddwd         m5, m3, [r5 + 1 * mmsize]
+    paddd           m1, m5
+    pmaddwd         m3, [r5]
+    movu            xm5, [r7 + r1]                  ; m5 = row 5
+    punpckhwd       xm6, xm4, xm5
+    punpcklwd       xm4, xm5
+    vinserti128     m4, m4, xm6, 1
+    pmaddwd         m6, m4, [r5 + 2 * mmsize]
+    paddd           m0, m6
+    pmaddwd         m6, m4, [r5 + 1 * mmsize]
+    paddd           m2, m6
+    pmaddwd         m4, [r5]
+    movu            xm6, [r7 + r1 * 2]              ; m6 = row 6
+    punpckhwd       xm7, xm5, xm6
+    punpcklwd       xm5, xm6
+    vinserti128     m5, m5, xm7, 1
+    pmaddwd         m7, m5, [r5 + 2 * mmsize]
+    paddd           m1, m7
+    pmaddwd         m7, m5, [r5 + 1 * mmsize]
+    paddd           m3, m7
+    pmaddwd         m5, [r5]
+    movu            xm7, [r7 + r4]                  ; m7 = row 7
+    punpckhwd       xm8, xm6, xm7
+    punpcklwd       xm6, xm7
+    vinserti128     m6, m6, xm8, 1
+    pmaddwd         m8, m6, [r5 + 3 * mmsize]
+    paddd           m0, m8
+    pmaddwd         m8, m6, [r5 + 2 * mmsize]
+    paddd           m2, m8
+    pmaddwd         m8, m6, [r5 + 1 * mmsize]
+    paddd           m4, m8
+    pmaddwd         m6, [r5]
+    lea             r7, [r7 + r1 * 4]
+    movu            xm8, [r7]                       ; m8 = row 8
+    punpckhwd       xm9, xm7, xm8
+    punpcklwd       xm7, xm8
+    vinserti128     m7, m7, xm9, 1
+    pmaddwd         m9, m7, [r5 + 3 * mmsize]
+    paddd           m1, m9
+    pmaddwd         m9, m7, [r5 + 2 * mmsize]
+    paddd           m3, m9
+    pmaddwd         m9, m7, [r5 + 1 * mmsize]
+    paddd           m5, m9
+    pmaddwd         m7, [r5]
+    movu            xm9, [r7 + r1]                  ; m9 = row 9
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m2, m10
+    pmaddwd         m10, m8, [r5 + 2 * mmsize]
+    paddd           m4, m10
+    pmaddwd         m8, [r5 + 1 * mmsize]
+    paddd           m6, m8
+    movu            xm10, [r7 + r1 * 2]             ; m10 = row 10
+    punpckhwd       xm8, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm8, 1
+    pmaddwd         m8, m9, [r5 + 3 * mmsize]
+    paddd           m3, m8
+    pmaddwd         m8, m9, [r5 + 2 * mmsize]
+    paddd           m5, m8
+    pmaddwd         m9, [r5 + 1 * mmsize]
+    paddd           m7, m9
+    movu            xm8, [r7 + r4]                  ; m8 = row 11
+    punpckhwd       xm9, xm10, xm8
+    punpcklwd       xm10, xm8
+    vinserti128     m10, m10, xm9, 1
+    pmaddwd         m9, m10, [r5 + 3 * mmsize]
+    paddd           m4, m9
+    pmaddwd         m10, [r5 + 2 * mmsize]
+    paddd           m6, m10
+    lea             r7, [r7 + r1 * 4]
+    movu            xm9, [r7]                       ; m9 = row 12
+    punpckhwd       xm10, xm8, xm9
+    punpcklwd       xm8, xm9
+    vinserti128     m8, m8, xm10, 1
+    pmaddwd         m10, m8, [r5 + 3 * mmsize]
+    paddd           m5, m10
+    pmaddwd         m8, [r5 + 2 * mmsize]
+    paddd           m7, m8
+
+%ifidn %1,sp
+    paddd           m0, m11
+    paddd           m1, m11
+    paddd           m2, m11
+    paddd           m3, m11
+    paddd           m4, m11
+    paddd           m5, m11
+    psrad           m0, 12
+    psrad           m1, 12
+    psrad           m2, 12
+    psrad           m3, 12
+    psrad           m4, 12
+    psrad           m5, 12
+%else
+    psrad           m0, 6
+    psrad           m1, 6
+    psrad           m2, 6
+    psrad           m3, 6
+    psrad           m4, 6
+    psrad           m5, 6
+%endif
+    packssdw        m0, m1
+    packssdw        m2, m3
+    packssdw        m4, m5
+
+%ifidn %1,sp
+    packuswb        m0, m2
+    mova            m5, [interp8_hps_shuf]
+    vpermd          m0, m5, m0
+    vextracti128    xm2, m0, 1
+    movq            [r2], xm0
+    movhps          [r2 + r3], xm0
+    movq            [r2 + r3 * 2], xm2
+    movhps          [r2 + r6], xm2
+%else
+    vpermq          m0, m0, 11011000b
+    vpermq          m2, m2, 11011000b
+    vextracti128    xm1, m0, 1
+    vextracti128    xm3, m2, 1
+    movu            [r2], xm0
+    movu            [r2 + r3], xm1
+    movu            [r2 + r3 * 2], xm2
+    movu            [r2 + r6], xm3
+%endif
+
+    movu            xm10, [r7 + r1]                 ; m10 = row 13
+    punpckhwd       xm0, xm9, xm10
+    punpcklwd       xm9, xm10
+    vinserti128     m9, m9, xm0, 1
+    pmaddwd         m9, [r5 + 3 * mmsize]
+    paddd           m6, m9
+    movu            xm0, [r7 + r1 * 2]              ; m0 = row 14
+    punpckhwd       xm1, xm10, xm0
+    punpcklwd       xm10, xm0
+    vinserti128     m10, m10, xm1, 1
+    pmaddwd         m10, [r5 + 3 * mmsize]
+    paddd           m7, m10
+
+%ifidn %1,sp
+    paddd           m6, m11
+    paddd           m7, m11
+    psrad           m6, 12
+    psrad           m7, 12
+%else
+    psrad           m6, 6
+    psrad           m7, 6
+%endif
+    packssdw        m6, m7
+    lea             r8, [r2 + r3 * 4]
+
+%ifidn %1,sp
+    packuswb        m4, m6
+    vpermd          m4, m5, m4
+    vextracti128    xm6, m4, 1
+    movq            [r8], xm4
+    movhps          [r8 + r3], xm4
+    movq            [r8 + r3 * 2], xm6
+    movhps          [r8 + r6], xm6
+%else
+    vpermq          m4, m4, 11011000b
+    vpermq          m6, m6, 11011000b
+    vextracti128    xm5, m4, 1
+    vextracti128    xm7, m6, 1
+    movu            [r8], xm4
+    movu            [r8 + r3], xm5
+    movu            [r8 + r3 * 2], xm6
+    movu            [r8 + r6], xm7
+%endif
+%endmacro
+
+%macro FILTER_VER_LUMA_AVX2_Nx8 2
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_%2x8, 4, 10, 12
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m11, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, %2 / 8
+.loopW:
+    PROCESS_LUMA_S_AVX2_W8_8R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_AVX2_Nx8 sp, 32
+    FILTER_VER_LUMA_AVX2_Nx8 sp, 16
+    FILTER_VER_LUMA_AVX2_Nx8 ss, 32
+    FILTER_VER_LUMA_AVX2_Nx8 ss, 16
+
+%macro FILTER_VER_LUMA_S_AVX2_32x24 1
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_%1_32x24, 4, 10, 15
+    mov             r4d, r4m
+    shl             r4d, 7
+    add             r1d, r1d
+
+%ifdef PIC
+    lea             r5, [pw_LumaCoeffVer]
+    add             r5, r4
+%else
+    lea             r5, [pw_LumaCoeffVer + r4]
+%endif
+
+    lea             r4, [r1 * 3]
+    sub             r0, r4
+%ifidn %1,sp
+    mova            m14, [pd_526336]
+%else
+    add             r3d, r3d
+%endif
+    lea             r6, [r3 * 3]
+    mov             r9d, 4
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loopW
+    lea             r9, [r1 * 4]
+    sub             r7, r9
+    lea             r0, [r7 - 48]
+%ifidn %1,sp
+    lea             r2, [r8 + r3 * 4 - 24]
+%else
+    lea             r2, [r8 + r3 * 4 - 48]
+%endif
+    mova            m11, m14
+    mov             r9d, 4
+.loop:
+    PROCESS_LUMA_S_AVX2_W8_8R %1
+%ifidn %1,sp
+    add             r2, 8
+%else
+    add             r2, 16
+%endif
+    add             r0, 16
+    dec             r9d
+    jnz             .loop
+    RET
+%endif
+%endmacro
+
+    FILTER_VER_LUMA_S_AVX2_32x24 sp
+    FILTER_VER_LUMA_S_AVX2_32x24 ss
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_32x32(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_32x32, 4,6,8
+    mov             r4d, r4m
+    add             r3d, r3d
+    dec             r0
+
+    ; check isRowExt
+    cmp             r5m, byte 0
+
+    lea             r5, [tab_ChromaCoeff]
+    vpbroadcastw    m0, [r5 + r4 * 4 + 0]
+    vpbroadcastw    m1, [r5 + r4 * 4 + 2]
+    mova            m7, [pw_2000]
+
+    ; register map
+    ; m0 - interpolate coeff Low
+    ; m1 - interpolate coeff High
+    ; m7 - constant pw_2000
+    mov             r4d, 32
+    je             .loop
+    sub             r0, r1
+    add             r4d, 3
+
+.loop
+    ; Row 0
+    movu            m2, [r0]
+    movu            m3, [r0 + 1]
+    punpckhbw       m4, m2, m3
+    punpcklbw       m2, m3
+    pmaddubsw       m4, m0
+    pmaddubsw       m2, m0
+
+    movu            m3, [r0 + 2]
+    movu            m5, [r0 + 3]
+    punpckhbw       m6, m3, m5
+    punpcklbw       m3, m5
+    pmaddubsw       m6, m1
+    pmaddubsw       m3, m1
+
+    paddw           m4, m6
+    paddw           m2, m3
+    psubw           m4, m7
+    psubw           m2, m7
+    vperm2i128      m3, m2, m4, 0x20
+    vperm2i128      m5, m2, m4, 0x31
+    movu            [r2], m3
+    movu            [r2 + mmsize], m5
+
+    add             r2, r3
+    add             r0, r1
+    dec             r4d
+    jnz            .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_16x16(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_16x16, 4,7,6
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m5,           [pw_2000]
+    mova               m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                r6d,         16
+    dec                r0
+    test                r5d,        r5d
+    je                 .loop
+    sub                r0 ,         r1
+    add                r6d ,        3
+
+.loop
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 8]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu              [r2],         m3
+
+    add                r2,          r3
+    add                r0,          r1
+    dec                r6d
+    jnz                .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_16xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PS_16xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_%1x%2, 4,7,6
+    mov                    r4d,        r4m
+    mov                    r5d,        r5m
+    add                    r3d,        r3d
+
+%ifdef PIC
+    lea                    r6,         [tab_ChromaCoeff]
+    vpbroadcastd           m0,         [r6 + r4 * 4]
+%else
+    vpbroadcastd           m0,         [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128         m2,         [pw_1]
+    vbroadcasti128         m5,         [pw_2000]
+    mova                   m1,         [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                    r6d,        %2
+    dec                    r0
+    test                   r5d,        r5d
+    je                     .loop
+    sub                    r0 ,        r1
+    add                    r6d ,       3
+
+.loop
+    ; Row 0
+    vbroadcasti128         m3,         [r0]
+    pshufb                 m3,         m1
+    pmaddubsw              m3,         m0
+    pmaddwd                m3,         m2
+    vbroadcasti128         m4,         [r0 + 8]
+    pshufb                 m4,         m1
+    pmaddubsw              m4,         m0
+    pmaddwd                m4,         m2
+
+    packssdw               m3,         m4
+    psubw                  m3,         m5
+
+    vpermq                 m3,         m3,          11011000b
+    movu                   [r2],       m3
+
+    add                    r2,         r3
+    add                    r0,         r1
+    dec                    r6d
+    jnz                    .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 32
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 12
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 8
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 4
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 24
+    IPFILTER_CHROMA_PS_16xN_AVX2  16 , 64
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_32xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PS_32xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_%1x%2, 4,7,6
+    mov                r4d,          r4m
+    mov                r5d,          r5m
+    add                r3d,          r3d
+
+%ifdef PIC
+    lea                r6,           [tab_ChromaCoeff]
+    vpbroadcastd       m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m5,           [pw_2000]
+    mova               m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                r6d,          %2
+    dec                r0
+    test               r5d,          r5d
+    je                 .loop
+    sub                r0 ,          r1
+    add                r6d ,         3
+
+.loop
+    ; Row 0
+    vbroadcasti128     m3,           [r0]
+    pshufb             m3,           m1
+    pmaddubsw          m3,           m0
+    pmaddwd            m3,           m2
+    vbroadcasti128     m4,           [r0 + 8]
+    pshufb             m4,           m1
+    pmaddubsw          m4,           m0
+    pmaddwd            m4,           m2
+
+    packssdw           m3,           m4
+    psubw              m3,           m5
+
+    vpermq             m3,           m3,          11011000b
+    movu              [r2],          m3
+
+    vbroadcasti128     m3,           [r0 + 16]
+    pshufb             m3,           m1
+    pmaddubsw          m3,           m0
+    pmaddwd            m3,           m2
+    vbroadcasti128     m4,           [r0 + 24]
+    pshufb             m4,           m1
+    pmaddubsw          m4,           m0
+    pmaddwd            m4,           m2
+
+    packssdw           m3,           m4
+    psubw              m3,           m5
+
+    vpermq             m3,           m3,          11011000b
+    movu               [r2 + 32],    m3
+
+    add                r2,           r3
+    add                r0,           r1
+    dec                r6d
+    jnz                .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PS_32xN_AVX2  32 , 16
+    IPFILTER_CHROMA_PS_32xN_AVX2  32 , 24
+    IPFILTER_CHROMA_PS_32xN_AVX2  32 , 8
+    IPFILTER_CHROMA_PS_32xN_AVX2  32 , 64
+    IPFILTER_CHROMA_PS_32xN_AVX2  32 , 48
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_4x4(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_4x4, 4,7,5
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec                r0
+    test                r5d,       r5d
+    je                 .label
+    sub                r0 , r1
+
+.label
+    ; Row 0-1
+    movu              xm3,           [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 2-3
+    lea               r0,           [r0 + r1 * 2]
+    movu              xm4,           [r0]
+    vinserti128       m4,           m4,      [r0 + r1],     1
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           [pw_2000]
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+    lea               r2,           [r2 + r3 * 2]
+    movhps            [r2],         xm3
+    movhps            [r2 + r3],    xm4
+
+    test                r5d,        r5d
+    jz                .end
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+
+    ;Row 5-6
+    movu              xm3,          [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 7
+    lea               r0,           [r0 + r1 * 2]
+    vbroadcasti128    m4,           [r0]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           [pw_2000]
+
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+    lea               r2,           [r2 + r3 * 2]
+    movhps            [r2],         xm3
+.end
+    RET
+
+cglobal interp_4tap_horiz_ps_4x2, 4,7,5
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec                r0
+    test                r5d,       r5d
+    je                 .label
+    sub                r0 , r1
+
+.label
+    ; Row 0-1
+    movu              xm3,           [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    packssdw          m3,           m3
+    psubw             m3,           [pw_2000]
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+
+    test              r5d,          r5d
+    jz                .end
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+
+    ;Row 2-3
+    movu              xm3,          [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 5
+    lea               r0,           [r0 + r1 * 2]
+    vbroadcasti128    m4,           [r0]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           [pw_2000]
+
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+    lea               r2,           [r2 + r3 * 2]
+    movhps            [r2],         xm3
+.end
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_4xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+%macro IPFILTER_CHROMA_PS_4xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_%1x%2, 4,7,5
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    mov              r4,                %2
+    dec              r0
+    test             r5d,       r5d
+    je               .loop
+    sub              r0 ,               r1
+
+
+.loop
+    sub              r4d,           4
+    ; Row 0-1
+    movu              xm3,          [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 2-3
+    lea               r0,           [r0 + r1 * 2]
+    movu              xm4,          [r0]
+    vinserti128       m4,           m4,      [r0 + r1],     1
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           [pw_2000]
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+    lea               r2,           [r2 + r3 * 2]
+    movhps            [r2],         xm3
+    movhps            [r2 + r3],    xm4
+
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+
+    test              r4d,          r4d
+    jnz               .loop
+    test                r5d,        r5d
+    jz                .end
+
+    ;Row 5-6
+    movu              xm3,          [r0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 7
+    lea               r0,           [r0 + r1 * 2]
+    vbroadcasti128    m4,           [r0]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           [pw_2000]
+
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movq              [r2+r3],      xm4
+    lea               r2,           [r2 + r3 * 2]
+    movhps            [r2],         xm3
+.end
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PS_4xN_AVX2  4 , 8
+    IPFILTER_CHROMA_PS_4xN_AVX2  4 , 16
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_8x8(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_8x8, 4,7,6
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,           [pw_1]
+    vbroadcasti128     m5,           [pw_2000]
+    mova               m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    mov                r6d,      4
+    dec                r0
+    test                r5d,     r5d
+    je                 .loop
+    sub                r0 ,      r1
+    add                r6d ,     1
+
+.loop
+     dec               r6d
+    ; Row 0
+    vbroadcasti128    m3,           [r0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+
+    vpermq            m3,           m3,          11011000b
+    vextracti128      xm4,          m3,     1
+    movu             [r2],         xm3
+    movu             [r2 + r3],    xm4
+
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+    test               r6d,          r6d
+    jnz               .loop
+    test              r5d,         r5d
+    je                .end
+
+    ;Row 11
+    vbroadcasti128    m3,           [r0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    packssdw          m3,           m3
+    psubw             m3,           m5
+    vpermq            m3,           m3,          11011000b
+    movu             [r2],         xm3
+.end
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_4x2, 4,6,4
+    mov             r4d, r4m
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128    m1,           [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+
+    ; Row 0-1
+    movu              xm2,          [r0 - 1]
+    vinserti128       m2,           m2,      [r0 + r1 - 1],     1
+    pshufb            m2,           m1
+    pmaddubsw         m2,           m0
+    pmaddwd           m2,           [pw_1]
+
+    packssdw          m2,           m2
+    pmulhrsw          m2,           [pw_512]
+    vextracti128      xm3,          m2,     1
+    packuswb          xm2,          xm3
+
+    movd              [r2],         xm2
+    pextrd            [r2+r3],      xm2,     2
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_32xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PP_32xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_%1x%2, 4,6,7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+    mova              m6,           [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          %2
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 16]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 20]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    movu              [r2],         m3
+    add               r2,           r3
+    add               r0,           r1
+    dec               r4d
+    jnz               .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PP_32xN_AVX2 32, 16
+    IPFILTER_CHROMA_PP_32xN_AVX2 32, 24
+    IPFILTER_CHROMA_PP_32xN_AVX2 32, 8
+    IPFILTER_CHROMA_PP_32xN_AVX2 32, 64
+    IPFILTER_CHROMA_PP_32xN_AVX2 32, 48
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_8xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PP_8xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_%1x%2, 4,6,6
+    mov               r4d,    r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    movu              m1,           [tab_Tm]
+    vpbroadcastd      m2,           [pw_1]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    sub               r0,           1
+    mov               r4d,          %2
+
+.loop:
+    sub               r4d,          4
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           [pw_512]
+    lea               r0,           [r0 + r1 * 2]
+
+    ; Row 2
+    vbroadcasti128    m4,           [r0 ]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    ; Row 3
+    vbroadcasti128    m5,           [r0 + r1]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           [pw_512]
+
+    packuswb          m3,           m4
+    mova              m5,           [interp_4tap_8x8_horiz_shuf]
+    vpermd            m3,           m5,     m3
+    vextracti128      xm4,          m3,     1
+    movq              [r2],         xm3
+    movhps            [r2 + r3],    xm3
+    lea               r2,           [r2 + r3 * 2]
+    movq              [r2],         xm4
+    movhps            [r2 + r3],    xm4
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1*2]
+    test              r4d,          r4d
+    jnz               .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PP_8xN_AVX2   8 , 16
+    IPFILTER_CHROMA_PP_8xN_AVX2   8 , 32
+    IPFILTER_CHROMA_PP_8xN_AVX2   8 , 4
+    IPFILTER_CHROMA_PP_8xN_AVX2   8 , 64
+    IPFILTER_CHROMA_PP_8xN_AVX2   8 , 12
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_4xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PP_4xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_%1x%2, 4,6,6
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vpbroadcastd      m2,           [pw_1]
+    vbroadcasti128    m1,           [tab_Tm]
+    mov               r4d,          %2
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec                r0
+
+.loop
+    sub               r4d,          4
+    ; Row 0-1
+    movu              xm3,          [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128       m3,           m3,      [r0 + r1],     1
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+
+    ; Row 2-3
+    lea               r0,           [r0 + r1 * 2]
+    movu              xm4,          [r0]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    vinserti128       m4,           m4,      [r0 + r1],     1
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    pmulhrsw          m3,           [pw_512]
+    vextracti128      xm4,          m3,                     1
+    packuswb          xm3,          xm4
+
+    movd              [r2],         xm3
+    pextrd            [r2+r3],      xm3,                    2
+    lea               r2,           [r2 + r3 * 2]
+    pextrd            [r2],         xm3,                    1
+    pextrd            [r2+r3],      xm3,                    3
+
+    lea               r0,           [r0 + r1 * 2]
+    lea               r2,           [r2 + r3 * 2]
+    test              r4d,          r4d
+    jnz               .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PP_4xN_AVX2  4 , 8
+    IPFILTER_CHROMA_PP_4xN_AVX2  4 , 16
+
+%macro IPFILTER_LUMA_PS_32xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_%1x%2, 4, 7, 8
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r4d,                %2                           ;height
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_1]
+    mova                        m7,                [interp8_hps_shuf]
+
+    ; register map
+    ; m0      - interpolate coeff
+    ; m1 , m6 - shuffle order table
+    ; m2      - pw_1
+
+
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .label
+    lea                         r6,                [r1 * 3]                     ; r8 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6
+    add                         r4d,                7
+
+.label
+    lea                         r6,                 [pw_2000]
+.loop
+    ; Row 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+
+    vbroadcasti128              m4,                [r0 + 8]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 12 to 15)
+    pshufb                      m4,                m1                           ;row 0 (col 8 to 11)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+
+    movu                        [r2],              m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 16]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 20 to 23)
+    pshufb                      m3,                m1                           ; row 0 (col 16 to 19)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 24]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 28 to 31)
+    pshufb                      m4,                m1                           ;row 0 (col 24 to 27)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+
+    movu                        [r2 + 32],         m3                          ;row 0
+
+    add                         r0,                r1
+    add                         r2,                r3
+    dec                         r4d
+    jnz                         .loop
+    RET
+%endmacro
+
+    IPFILTER_LUMA_PS_32xN_AVX2 32 , 32
+    IPFILTER_LUMA_PS_32xN_AVX2 32 , 16
+    IPFILTER_LUMA_PS_32xN_AVX2 32 , 24
+    IPFILTER_LUMA_PS_32xN_AVX2 32 , 8
+    IPFILTER_LUMA_PS_32xN_AVX2 32 , 64
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_48x64, 4, 7, 8
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r4d,               64                           ;height
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_2000]
+    mova                        m7,                [pw_1]
+
+    ; register map
+    ; m0      - interpolate coeff
+    ; m1 , m6 - shuffle order table
+    ; m2      - pw_2000
+
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .label
+    lea                         r6,                [r1 * 3]                     ; r6 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6                           ; r0(src)-r6
+    add                         r4d,                7                            ; blkheight += N - 1  (7 - 1 = 6 ; since the last one row not in loop)
+
+.label
+    lea                         r6,                [interp8_hps_shuf]
+.loop
+    ; Row 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 8]
+    pshufb                      m5,                m4,             m6            ;row 0 (col 12 to 15)
+    pshufb                      m4,                m1                           ;row 0 (col 8 to 11)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m7
+    pmaddwd                     m5,                m7
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    mova                        m5,                [r6]
+    vpermd                      m3,                m5,             m3
+    psubw                       m3,                m2
+    movu                        [r2],              m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 16]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 20 to 23)
+    pshufb                      m3,                m1                           ; row 0 (col 16 to 19)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 24]
+    pshufb                      m5,                m4,             m6            ;row 0 (col 28 to 31)
+    pshufb                      m4,                m1                           ;row 0 (col 24 to 27)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m7
+    pmaddwd                     m5,                m7
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    mova                        m5,                [r6]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+    movu                        [r2 + 32],         m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 32]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 36 to 39)
+    pshufb                      m3,                m1                           ; row 0 (col 32 to 35)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 40]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 44 to 47)
+    pshufb                      m4,                m1                           ;row 0 (col 40 to 43)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m7
+    pmaddwd                     m5,                m7
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    mova                        m5,                [r6]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+    movu                        [r2 + 64],         m3                          ;row 0
+
+    add                         r0,                r1
+    add                         r2,                r3
+    dec                         r4d
+    jnz                         .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_24x32, 4,6,8
+    sub               r0,         3
+    mov               r4d,        r4m
+%ifdef PIC
+    lea               r5,         [tab_LumaCoeff]
+    vpbroadcastd      m0,         [r5 + r4 * 8]
+    vpbroadcastd      m1,         [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,         [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,         [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,         [tab_Tm + 16]
+    vpbroadcastd      m7,         [pw_1]
+    lea               r5,         [tab_Tm]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,        32
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,         [r0]                        ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [r5]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+
+    vbroadcasti128    m5,         [r0 + 8]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [r5]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+    packssdw          m4,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,         [pw_512]
+
+    vbroadcasti128    m2,         [r0 + 16]
+    pshufb            m5,         m2,     m3
+    pshufb            m2,         [r5]
+    pmaddubsw         m2,         m0
+    pmaddubsw         m5,         m1
+    paddw             m2,         m5
+    pmaddwd           m2,         m7
+
+    packssdw          m2,         m2
+    pmulhrsw          m2,         [pw_512]
+    packuswb          m4,         m2
+    vpermq            m4,         m4,     11011000b
+    vextracti128      xm5,        m4,     1
+    pshufd            xm4,        xm4,    11011000b
+    pshufd            xm5,        xm5,    11011000b
+
+    movu              [r2],       xm4
+    movq              [r2 + 16],  xm5
+    add               r0,         r1
+    add               r2,         r3
+    dec               r4d
+    jnz               .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_pp_12x16, 4,6,8
+    sub               r0,        3
+    mov               r4d,       r4m
+%ifdef PIC
+    lea               r5,        [tab_LumaCoeff]
+    vpbroadcastd      m0,        [r5 + r4 * 8]
+    vpbroadcastd      m1,        [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastd      m0,         [tab_LumaCoeff + r4 * 8]
+    vpbroadcastd      m1,         [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+    movu              m3,         [tab_Tm + 16]
+    vpbroadcastd      m7,         [pw_1]
+    lea               r5,         [tab_Tm]
+
+    ; register map
+    ; m0 , m1 interpolate coeff
+    ; m2 , m2  shuffle order table
+    ; m7 - pw_1
+
+    mov               r4d,        8
+.loop:
+    ; Row 0
+    vbroadcasti128    m4,         [r0]                        ;first 8 element
+    pshufb            m5,         m4,     m3
+    pshufb            m4,         [r5]
+    pmaddubsw         m4,         m0
+    pmaddubsw         m5,         m1
+    paddw             m4,         m5
+    pmaddwd           m4,         m7
+
+    vbroadcasti128    m5,         [r0 + 8]                    ; element 8 to 11
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [r5]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+
+    packssdw          m4,         m5                          ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
+    pmulhrsw          m4,         [pw_512]
+
+    ;Row 1
+    vbroadcasti128    m2,         [r0 + r1]
+    pshufb            m5,         m2,     m3
+    pshufb            m2,         [r5]
+    pmaddubsw         m2,         m0
+    pmaddubsw         m5,         m1
+    paddw             m2,         m5
+    pmaddwd           m2,         m7
+
+    vbroadcasti128    m5,         [r0 + r1 + 8]
+    pshufb            m6,         m5,     m3
+    pshufb            m5,         [r5]
+    pmaddubsw         m5,         m0
+    pmaddubsw         m6,         m1
+    paddw             m5,         m6
+    pmaddwd           m5,         m7
+
+    packssdw          m2,         m5
+    pmulhrsw          m2,         [pw_512]
+    packuswb          m4,         m2
+    vpermq            m4,         m4,     11011000b
+    vextracti128      xm5,        m4,     1
+    pshufd            xm4,        xm4,    11011000b
+    pshufd            xm5,        xm5,    11011000b
+
+    movq              [r2],       xm4
+    pextrd            [r2+8],     xm4,    2
+    movq              [r2 + r3],  xm5
+    pextrd            [r2+r3+8],  xm5,    2
+    lea               r0,         [r0 + r1 * 2]
+    lea               r2,         [r2 + r3 * 2]
+    dec               r4d
+    jnz              .loop
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_16xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PP_16xN_AVX2 2
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_%1x%2, 4, 6, 7
+    mov               r4d,          r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m6,           [pw_512]
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          %2/2
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + r1 + 4]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movu              [r2],         xm3
+    movu              [r2 + r3],    xm4
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+    dec               r4d
+    jnz               .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 8
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 32
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 12
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 4
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 64
+    IPFILTER_CHROMA_PP_16xN_AVX2 16 , 24
+
+%macro IPFILTER_LUMA_PS_64xN_AVX2 1
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_64x%1, 4, 7, 8
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r4d,               %1                           ;height
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_1]
+    mova                        m7,                [interp8_hps_shuf]
+
+    ; register map
+    ; m0      - interpolate coeff
+    ; m1 , m6 - shuffle order table
+    ; m2      - pw_2000
+
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .label
+    lea                         r6,                [r1 * 3]
+    sub                         r0,                r6                           ; r0(src)-r6
+    add                         r4d,               7                            ; blkheight += N - 1
+
+.label
+    lea                         r6,                [pw_2000]
+.loop
+    ; Row 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 8]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 12 to 15)
+    pshufb                      m4,                m1                           ;row 0 (col 8 to 11)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+    movu                        [r2],              m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 16]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 20 to 23)
+    pshufb                      m3,                m1                           ; row 0 (col 16 to 19)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 24]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 28 to 31)
+    pshufb                      m4,                m1                           ;row 0 (col 24 to 27)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+    movu                        [r2 + 32],         m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 32]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 36 to 39)
+    pshufb                      m3,                m1                           ; row 0 (col 32 to 35)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 40]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 44 to 47)
+    pshufb                      m4,                m1                           ;row 0 (col 40 to 43)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+    movu                        [r2 + 64],         m3                          ;row 0
+    vbroadcasti128              m3,                [r0 + 48]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 52 to 55)
+    pshufb                      m3,                m1                           ; row 0 (col 48 to 51)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 56]
+    pshufb                      m5,                m4,            m6            ;row 0 (col 60 to 63)
+    pshufb                      m4,                m1                           ;row 0 (col 56 to 59)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4
+    vpermd                      m3,                m7,               m3
+    psubw                       m3,                [r6]
+    movu                        [r2 + 96],         m3                          ;row 0
+
+    add                          r0,                r1
+    add                          r2,                r3
+    dec                          r4d
+    jnz                         .loop
+    RET
+%endmacro
+
+    IPFILTER_LUMA_PS_64xN_AVX2 64
+    IPFILTER_LUMA_PS_64xN_AVX2 48
+    IPFILTER_LUMA_PS_64xN_AVX2 32
+    IPFILTER_LUMA_PS_64xN_AVX2 16
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_8xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PS_8xN_AVX2 1
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_8x%1, 4,7,6
+    mov                r4d,             r4m
+    mov                r5d,             r5m
+    add                r3d,             r3d
+
+%ifdef PIC
+    lea                r6,              [tab_ChromaCoeff]
+    vpbroadcastd       m0,              [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,              [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,              [pw_1]
+    vbroadcasti128     m5,              [pw_2000]
+    mova               m1,              [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    mov                r6d,             %1/2
+    dec                r0
+    test               r5d,             r5d
+    jz                 .loop
+    sub                r0 ,             r1
+    inc                r6d
+
+.loop
+    ; Row 0
+    vbroadcasti128     m3,              [r0]
+    pshufb             m3,              m1
+    pmaddubsw          m3,              m0
+    pmaddwd            m3,              m2
+
+    ; Row 1
+    vbroadcasti128     m4,              [r0 + r1]
+    pshufb             m4,              m1
+    pmaddubsw          m4,              m0
+    pmaddwd            m4,              m2
+    packssdw           m3,              m4
+    psubw              m3,              m5
+    vpermq             m3,              m3,          11011000b
+    vextracti128       xm4,             m3,          1
+    movu               [r2],            xm3
+    movu               [r2 + r3],       xm4
+
+    lea                r2,              [r2 + r3 * 2]
+    lea                r0,              [r0 + r1 * 2]
+    dec                r6d
+    jnz                .loop
+    test               r5d,             r5d
+    jz                 .end
+
+    ;Row 11
+    vbroadcasti128     m3,              [r0]
+    pshufb             m3,              m1
+    pmaddubsw          m3,              m0
+    pmaddwd            m3,              m2
+    packssdw           m3,              m3
+    psubw              m3,              m5
+    vpermq             m3,              m3,          11011000b
+    movu               [r2],            xm3
+.end
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PS_8xN_AVX2  2
+    IPFILTER_CHROMA_PS_8xN_AVX2  32
+    IPFILTER_CHROMA_PS_8xN_AVX2  16
+    IPFILTER_CHROMA_PS_8xN_AVX2  6
+    IPFILTER_CHROMA_PS_8xN_AVX2  4
+    IPFILTER_CHROMA_PS_8xN_AVX2  12
+    IPFILTER_CHROMA_PS_8xN_AVX2  64
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_2x4, 4, 7, 3
+    mov                r4d,            r4m
+    mov                r5d,            r5m
+    add                r3d,            r3d
+%ifdef PIC
+    lea                r6,             [tab_ChromaCoeff]
+    vpbroadcastd       m0,             [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,             [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova               xm3,            [pw_2000]
+    dec                r0
+    test               r5d,            r5d
+    jz                 .label
+    sub                r0,             r1
+
+.label
+    lea                r6,             [r1 * 3]
+    movq               xm1,            [r0]
+    movhps             xm1,            [r0 + r1]
+    movq               xm2,            [r0 + r1 * 2]
+    movhps             xm2,            [r0 + r6]
+
+    vinserti128        m1,             m1,          xm2,          1
+    pshufb             m1,             [interp4_hpp_shuf]
+    pmaddubsw          m1,             m0
+    pmaddwd            m1,             [pw_1]
+    vextracti128       xm2,            m1,          1
+    packssdw           xm1,            xm2
+    psubw              xm1,            xm3
+
+    lea                r4,             [r3 * 3]
+    movd               [r2],           xm1
+    pextrd             [r2 + r3],      xm1,         1
+    pextrd             [r2 + r3 * 2],  xm1,         2
+    pextrd             [r2 + r4],      xm1,         3
+
+    test               r5d,            r5d
+    jz                .end
+    lea                r2,             [r2 + r3 * 4]
+    lea                r0,             [r0 + r1 * 4]
+
+    movq               xm1,            [r0]
+    movhps             xm1,            [r0 + r1]
+    movq               xm2,            [r0 + r1 * 2]
+    vinserti128        m1,             m1,          xm2,          1
+    pshufb             m1,             [interp4_hpp_shuf]
+    pmaddubsw          m1,             m0
+    pmaddwd            m1,             [pw_1]
+    vextracti128       xm2,            m1,          1
+    packssdw           xm1,            xm2
+    psubw              xm1,            xm3
+
+    movd               [r2],           xm1
+    pextrd             [r2 + r3],      xm1,         1
+    pextrd             [r2 + r3 * 2],  xm1,         2
+.end
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_2x8, 4, 7, 7
+    mov               r4d,           r4m
+    mov               r5d,           r5m
+    add               r3d,           r3d
+
+%ifdef PIC
+    lea               r6,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+    vbroadcasti128    m6,            [pw_2000]
+    test              r5d,            r5d
+    jz                .label
+    sub               r0,             r1
+
+.label
+    mova              m4,            [interp4_hpp_shuf]
+    mova              m5,            [pw_1]
+    dec               r0
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]                                   ;row 0
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,          1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,          xm2,          1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    psubw             m1,            m6
+
+    lea               r4,            [r3 * 3]
+    vextracti128      xm2,           m1,          1
+
+    movd              [r2],          xm1
+    pextrd            [r2 + r3],     xm1,         1
+    movd              [r2 + r3 * 2], xm2
+    pextrd            [r2 + r4],     xm2,         1
+    lea               r2,            [r2 + r3 * 4]
+    pextrd            [r2],          xm1,         2
+    pextrd            [r2 + r3],     xm1,         3
+    pextrd            [r2 + r3 * 2], xm2,         2
+    pextrd            [r2 + r4],     xm2,         3
+    test              r5d,            r5d
+    jz                .end
+
+    lea               r0,            [r0 + r1 * 4]
+    lea               r2,            [r2 + r3 * 4]
+    movq              xm1,           [r0]                                   ;row 0
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    vinserti128       m1,            m1,          xm2,          1
+    pshufb            m1,            m4
+    pmaddubsw         m1,            m0
+    pmaddwd           m1,            m5
+    packssdw          m1,            m1
+    psubw             m1,            m6
+    vextracti128      xm2,           m1,          1
+
+    movd              [r2],          xm1
+    pextrd            [r2 + r3],     xm1,         1
+    movd              [r2 + r3 * 2], xm2
+.end
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_12x16, 4, 6, 7
+    mov               r4d,          r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m6,           [pw_512]
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          8
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + r1 + 4]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movq              [r2],         xm3
+    pextrd            [r2+8],       xm3,      2
+    movq              [r2 + r3],    xm4
+    pextrd            [r2 + r3 + 8],xm4,      2
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+    dec               r4d
+    jnz               .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_24x32, 4,6,7
+    mov              r4d,           r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+    mova              m6,           [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          32
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 16]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 20]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movu              [r2],         xm3
+    movq              [r2 + 16],    xm4
+    add               r2,           r3
+    add               r0,           r1
+    dec               r4d
+    jnz               .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_6x8(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_6x8, 4,7,6
+    mov                r4d,            r4m
+    mov                r5d,            r5m
+    add                r3d,            r3d
+
+%ifdef PIC
+    lea                r6,             [tab_ChromaCoeff]
+    vpbroadcastd       m0,             [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,             [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,             [pw_1]
+    vbroadcasti128     m5,             [pw_2000]
+    mova               m1,             [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    mov               r6d,             8/2
+    dec               r0
+    test              r5d,             r5d
+    jz                .loop
+    sub               r0 ,             r1
+    inc               r6d
+
+.loop
+    ; Row 0
+    vbroadcasti128    m3,              [r0]
+    pshufb            m3,              m1
+    pmaddubsw         m3,              m0
+    pmaddwd           m3,              m2
+
+    ; Row 1
+    vbroadcasti128    m4,              [r0 + r1]
+    pshufb            m4,              m1
+    pmaddubsw         m4,              m0
+    pmaddwd           m4,              m2
+    packssdw          m3,              m4
+    psubw             m3,              m5
+    vpermq            m3,              m3,          11011000b
+    vextracti128      xm4,             m3,          1
+    movq              [r2],            xm3
+    pextrd            [r2 + 8],        xm3,         2
+    movq              [r2 + r3],       xm4
+    pextrd            [r2 + r3 + 8],   xm4,         2
+    lea               r2,              [r2 + r3 * 2]
+    lea               r0,              [r0 + r1 * 2]
+    dec               r6d
+    jnz              .loop
+    test              r5d,             r5d
+    jz               .end
+
+    ;Row 11
+    vbroadcasti128    m3,              [r0]
+    pshufb            m3,              m1
+    pmaddubsw         m3,              m0
+    pmaddwd           m3,              m2
+    packssdw          m3,              m3
+    psubw             m3,              m5
+    vextracti128      xm4,             m3,          1
+    movq              [r2],            xm3
+    movd              [r2+8],          xm4
+.end
+    RET
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_12x16, 6, 7, 8
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_2000]
+    mov                         r4d,                16
+    vbroadcasti128              m7,                [pw_1]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - pw_2000
+
+    mova                        m5,                [interp8_hps_shuf]
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .loop
+    lea                         r6,                [r1 * 3]                     ; r6 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6                           ; r0(src)-r6
+    add                         r4d,                7
+.loop
+
+    ; Row 0
+
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,        m6
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 8]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m1
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m4,                m7
+    packssdw                    m4,                m4
+
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+
+    vextracti128                xm4,               m3,               1
+    movu                        [r2],              xm3                          ;row 0
+    movq                        [r2 + 16],         xm4                          ;row 1
+
+    add                         r0,                r1
+    add                         r2,                r3
+    dec                         r4d
+    jnz                         .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_8tap_horiz_ps_24x32, 4, 7, 8
+    mov                         r5d,               r5m
+    mov                         r4d,               r4m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r4d,               32                           ;height
+    add                         r3d,               r3d
+    vbroadcasti128              m2,                [pw_2000]
+    vbroadcasti128              m7,                [pw_1]
+
+    ; register map
+    ; m0      - interpolate coeff
+    ; m1 , m6 - shuffle order table
+    ; m2      - pw_2000
+
+    sub                         r0,                3
+    test                        r5d,               r5d
+    jz                          .label
+    lea                         r6,                [r1 * 3]                     ; r6 = (N / 2 - 1) * srcStride
+    sub                         r0,                r6                           ; r0(src)-r6
+    add                         r4d,               7                            ; blkheight += N - 1  (7 - 1 = 6 ; since the last one row not in loop)
+
+.label
+    lea                         r6,                [interp8_hps_shuf]
+.loop
+    ; Row 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+
+    vbroadcasti128              m4,                [r0 + 8]                     ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m5,                m4,            m6            ;row 1 (col 4 to 7)
+    pshufb                      m4,                m1                           ;row 1 (col 0 to 3)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m7
+    pmaddwd                     m5,                m7
+    packssdw                    m4,                m5
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    mova                        m5,                [r6]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m2
+    movu                        [r2],              m3                          ;row 0
+
+    vbroadcasti128              m3,                [r0 + 16]
+    pshufb                      m4,                m3,          m6
+    pshufb                      m3,                m1
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    pmaddwd                     m3,                m7
+    pmaddwd                     m4,                m7
+    packssdw                    m3,                m4
+    mova                        m4,                [r6]
+    vpermd                      m3,                m4,            m3
+    psubw                       m3,                m2
+    movu                        [r2 + 32],         xm3                          ;row 0
+
+    add                         r0,                r1
+    add                         r2,                r3
+    dec                         r4d
+    jnz                         .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_24x32(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_24x32, 4,7,6
+    mov                r4d,            r4m
+    mov                r5d,            r5m
+    add                r3d,            r3d
+%ifdef PIC
+    lea                r6,             [tab_ChromaCoeff]
+    vpbroadcastd       m0,             [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,             [tab_ChromaCoeff + r4 * 4]
+%endif
+    vbroadcasti128     m2,             [pw_1]
+    vbroadcasti128     m5,             [pw_2000]
+    mova               m1,             [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                r6d,            32
+    dec                r0
+    test               r5d,            r5d
+    je                 .loop
+    sub                r0 ,            r1
+    add                r6d ,           3
+
+.loop
+    ; Row 0
+    vbroadcasti128     m3,             [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m3,             m1
+    pmaddubsw          m3,             m0
+    pmaddwd            m3,             m2
+    vbroadcasti128     m4,             [r0 + 8]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m4,             m1
+    pmaddubsw          m4,             m0
+    pmaddwd            m4,             m2
+    packssdw           m3,             m4
+    psubw              m3,             m5
+    vpermq             m3,             m3,          11011000b
+    movu               [r2],           m3
+
+    vbroadcasti128     m3,             [r0 + 16]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m3,             m1
+    pmaddubsw          m3,             m0
+    pmaddwd            m3,             m2
+    packssdw           m3,             m3
+    psubw              m3,             m5
+    vpermq             m3,             m3,          11011000b
+    movu               [r2 + 32],      xm3
+
+    add                r2,             r3
+    add                r0,             r1
+    dec                r6d
+    jnz                .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------
+;macro FILTER_H8_W8_16N_AVX2
+;-----------------------------------------------------------------------------------------------------------------------
+%macro  FILTER_H8_W8_16N_AVX2 0
+    vbroadcasti128              m3,                [r0]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m4,                m3,             m6           ; row 0 (col 4 to 7)
+    pshufb                      m3,                m1                           ; shuffled based on the col order tab_Lm row 0 (col 0 to 3)
+    pmaddubsw                   m3,                m0
+    pmaddubsw                   m4,                m0
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4                         ; DWORD [R1D R1C R0D R0C R1B R1A R0B R0A]
+
+    vbroadcasti128              m4,                [r0 + 8]                         ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb                      m5,                m4,            m6            ;row 1 (col 4 to 7)
+    pshufb                      m4,                m1                           ;row 1 (col 0 to 3)
+    pmaddubsw                   m4,                m0
+    pmaddubsw                   m5,                m0
+    pmaddwd                     m4,                m2
+    pmaddwd                     m5,                m2
+    packssdw                    m4,                m5                         ; DWORD [R3D R3C R2D R2C R3B R3A R2B R2A]
+
+    pmaddwd                     m3,                m2
+    pmaddwd                     m4,                m2
+    packssdw                    m3,                m4                         ; all rows and col completed.
+
+    mova                        m5,                [interp8_hps_shuf]
+    vpermd                      m3,                m5,               m3
+    psubw                       m3,                m8
+
+    vextracti128                xm4,               m3,               1
+    mova                        [r4],              xm3
+    mova                        [r4 + 16],         xm4
+    %endmacro
+
+;-----------------------------------------------------------------------------
+; void interp_8tap_hv_pp_16x16(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int idxX, int idxY)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_hv_pp_16x16, 4, 10, 15, 0-31*32
+%define stk_buf1    rsp
+    mov                         r4d,               r4m
+    mov                         r5d,               r5m
+%ifdef PIC
+    lea                         r6,                [tab_LumaCoeff]
+    vpbroadcastq                m0,                [r6 + r4 * 8]
+%else
+    vpbroadcastq                m0,                [tab_LumaCoeff + r4 * 8]
+%endif
+
+    xor                         r6,                 r6
+    mov                         r4,                 rsp
+    mova                        m6,                [tab_Lm + 32]
+    mova                        m1,                [tab_Lm]
+    mov                         r8,                16                           ;height
+    vbroadcasti128              m8,                [pw_2000]
+    vbroadcasti128              m2,                [pw_1]
+    sub                         r0,                3
+    lea                         r7,                [r1 * 3]                     ; r7 = (N / 2 - 1) * srcStride
+    sub                         r0,                r7                           ; r0(src)-r7
+    add                         r8,                7
+
+.loopH:
+    FILTER_H8_W8_16N_AVX2
+    add                         r0,                r1
+    add                         r4,                32
+    inc                         r6
+    cmp                         r6,                16+7
+    jnz                        .loopH
+
+; vertical phase
+    xor                         r6,                r6
+    xor                         r1,                r1
+.loopV:
+
+;load necessary variables
+    mov                         r4d,               r5d          ;coeff here for vertical is r5m
+    shl                         r4d,               7
+    mov                         r1d,               16
+    add                         r1d,               r1d
+
+ ; load intermedia buffer
+    mov                         r0,                stk_buf1
+
+    ; register mapping
+    ; r0 - src
+    ; r5 - coeff
+    ; r6 - loop_i
+
+; load coeff table
+%ifdef PIC
+    lea                          r5,                [pw_LumaCoeffVer]
+    add                          r5,                r4
+%else
+    lea                          r5,                [pw_LumaCoeffVer + r4]
+%endif
+
+    lea                          r4,                [r1*3]
+    mova                         m14,               [pd_526336]
+    lea                          r6,                [r3 * 3]
+    mov                          r9d,               16 / 8
+
+.loopW:
+    PROCESS_LUMA_AVX2_W8_16R sp
+    add                          r2,                 8
+    add                          r0,                 16
+    dec                          r9d
+    jnz                          .loopW
+    RET
+%endif
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_12x32, 4, 6, 7
+    mov               r4d,          r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m6,           [pw_512]
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          16
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]                    ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    ; Row 1
+    vbroadcasti128    m4,           [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + r1 + 4]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movq              [r2],         xm3
+    pextrd            [r2+8],       xm3,      2
+    movq              [r2 + r3],    xm4
+    pextrd            [r2 + r3 + 8],xm4,      2
+    lea               r2,           [r2 + r3 * 2]
+    lea               r0,           [r0 + r1 * 2]
+    dec               r4d
+    jnz               .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_24x64, 4,6,7
+    mov              r4d,           r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+    mova              m6,           [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          64
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 16]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 20]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+
+    vextracti128      xm4,          m3,       1
+    movu              [r2],         xm3
+    movq              [r2 + 16],    xm4
+    add               r2,           r3
+    add               r0,           r1
+    dec               r4d
+    jnz               .loop
+    RET
+
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_2x16, 4, 6, 6
+    mov               r4d,           r4m
+
+%ifdef PIC
+    lea               r5,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m4,            [interp4_hpp_shuf]
+    mova              m5,            [pw_1]
+    dec               r0
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,          1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,          xm2,          1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    pmulhrsw          m1,            [pw_512]
+    vextracti128      xm2,           m1,          1
+    packuswb          xm1,           xm2
+
+    lea               r4,            [r3 * 3]
+    pextrw            [r2],          xm1,         0
+    pextrw            [r2 + r3],     xm1,         1
+    pextrw            [r2 + r3 * 2], xm1,         4
+    pextrw            [r2 + r4],     xm1,         5
+    lea               r2,            [r2 + r3 * 4]
+    pextrw            [r2],          xm1,         2
+    pextrw            [r2 + r3],     xm1,         3
+    pextrw            [r2 + r3 * 2], xm1,         6
+    pextrw            [r2 + r4],     xm1,         7
+    lea               r2,            [r2 + r3 * 4]
+    lea               r0,            [r0 + r1 * 4]
+
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,          1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,          xm2,          1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    pmulhrsw          m1,            [pw_512]
+    vextracti128      xm2,           m1,          1
+    packuswb          xm1,           xm2
+
+    lea               r4,            [r3 * 3]
+    pextrw            [r2],          xm1,         0
+    pextrw            [r2 + r3],     xm1,         1
+    pextrw            [r2 + r3 * 2], xm1,         4
+    pextrw            [r2 + r4],     xm1,         5
+    lea               r2,            [r2 + r3 * 4]
+    pextrw            [r2],          xm1,         2
+    pextrw            [r2 + r3],     xm1,         3
+    pextrw            [r2 + r3 * 2], xm1,         6
+    pextrw            [r2 + r4],     xm1,         7
+    RET
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_64xN(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+%macro IPFILTER_CHROMA_PP_64xN_AVX2 1
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_64x%1, 4,6,7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,           [interp4_horiz_shuf1]
+    vpbroadcastd      m2,           [pw_1]
+    mova              m6,           [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,          %1
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 4]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 16]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 20]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+    movu              [r2],         m3
+
+    vbroadcasti128    m3,           [r0 + 32]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 36]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    packssdw          m3,           m4
+    pmulhrsw          m3,           m6
+
+    vbroadcasti128    m4,           [r0 + 48]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+    vbroadcasti128    m5,           [r0 + 52]
+    pshufb            m5,           m1
+    pmaddubsw         m5,           m0
+    pmaddwd           m5,           m2
+    packssdw          m4,           m5
+    pmulhrsw          m4,           m6
+    packuswb          m3,           m4
+    vpermq            m3,           m3,      11011000b
+    movu              [r2 + 32],         m3
+
+    add               r2,           r3
+    add               r0,           r1
+    dec               r4d
+    jnz               .loop
+    RET
+%endmacro
+
+    IPFILTER_CHROMA_PP_64xN_AVX2  64
+    IPFILTER_CHROMA_PP_64xN_AVX2  32
+    IPFILTER_CHROMA_PP_64xN_AVX2  48
+    IPFILTER_CHROMA_PP_64xN_AVX2  16
+
+;-------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_pp_48x64(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx
+;-------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_48x64, 4,6,7
+    mov             r4d, r4m
+
+%ifdef PIC
+    lea               r5,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,            [interp4_horiz_shuf1]
+    vpbroadcastd      m2,            [pw_1]
+    mova              m6,            [pw_512]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+    mov               r4d,           64
+
+.loop:
+    ; Row 0
+    vbroadcasti128    m3,            [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,            m1
+    pmaddubsw         m3,            m0
+    pmaddwd           m3,            m2
+    vbroadcasti128    m4,            [r0 + 4]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    packssdw          m3,            m4
+    pmulhrsw          m3,            m6
+
+    vbroadcasti128    m4,            [r0 + 16]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    vbroadcasti128    m5,            [r0 + 20]
+    pshufb            m5,            m1
+    pmaddubsw         m5,            m0
+    pmaddwd           m5,            m2
+    packssdw          m4,            m5
+    pmulhrsw          m4,            m6
+
+    packuswb          m3,            m4
+    vpermq            m3,            m3,      q3120
+
+    movu              [r2],          m3
+
+    vbroadcasti128    m3,            [r0 + mmsize]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,            m1
+    pmaddubsw         m3,            m0
+    pmaddwd           m3,            m2
+    vbroadcasti128    m4,            [r0 + mmsize + 4]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    packssdw          m3,            m4
+    pmulhrsw          m3,            m6
+
+    vbroadcasti128    m4,            [r0 + mmsize + 16]
+    pshufb            m4,            m1
+    pmaddubsw         m4,            m0
+    pmaddwd           m4,            m2
+    vbroadcasti128    m5,            [r0 + mmsize + 20]
+    pshufb            m5,            m1
+    pmaddubsw         m5,            m0
+    pmaddwd           m5,            m2
+    packssdw          m4,            m5
+    pmulhrsw          m4,            m6
+
+    packuswb          m3,            m4
+    vpermq            m3,            m3,      q3120
+    movu              [r2 + mmsize], xm3
+
+    add               r2,            r3
+    add               r0,            r1
+    dec               r4d
+    jnz               .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_48x64(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------;
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_48x64, 4,7,6
+    mov             r4d, r4m
+    mov             r5d, r5m
+    add             r3d, r3d
+
+%ifdef PIC
+    lea               r6,           [tab_ChromaCoeff]
+    vpbroadcastd      m0,           [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,           [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    vbroadcasti128     m2,          [pw_1]
+    vbroadcasti128     m5,          [pw_2000]
+    mova               m1,          [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov               r6d,          64
+    dec               r0
+    test              r5d,          r5d
+    je                .loop
+    sub               r0 ,          r1
+    add               r6d ,         3
+
+.loop
+    ; Row 0
+    vbroadcasti128    m3,           [r0]                           ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 8]                       ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          q3120
+    movu              [r2],         m3
+
+    vbroadcasti128    m3,           [r0 + 16]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 24]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          q3120
+    movu              [r2 + 32],    m3
+
+    vbroadcasti128    m3,           [r0 + 32]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,           m1
+    pmaddubsw         m3,           m0
+    pmaddwd           m3,           m2
+    vbroadcasti128    m4,           [r0 + 40]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,           m1
+    pmaddubsw         m4,           m0
+    pmaddwd           m4,           m2
+
+    packssdw          m3,           m4
+    psubw             m3,           m5
+    vpermq            m3,           m3,          q3120
+    movu              [r2 + 64],    m3
+
+    add               r2,          r3
+    add               r0,          r1
+    dec               r6d
+    jnz               .loop
+    RET
+
+;-----------------------------------------------------------------------------------------------------------------------------
+; void interp_4tap_horiz_ps_24x64(pixel *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx, int isRowExt)
+;-----------------------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_24x64, 4,7,6
+    mov                r4d,            r4m
+    mov                r5d,            r5m
+    add                r3d,            r3d
+%ifdef PIC
+    lea                r6,             [tab_ChromaCoeff]
+    vpbroadcastd       m0,             [r6 + r4 * 4]
+%else
+    vpbroadcastd       m0,             [tab_ChromaCoeff + r4 * 4]
+%endif
+    vbroadcasti128     m2,             [pw_1]
+    vbroadcasti128     m5,             [pw_2000]
+    mova               m1,             [tab_Tm]
+
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+    mov                r6d,            64
+    dec                r0
+    test               r5d,            r5d
+    je                 .loop
+    sub                r0 ,            r1
+    add                r6d ,           3
+
+.loop
+    ; Row 0
+    vbroadcasti128     m3,             [r0]                          ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m3,             m1
+    pmaddubsw          m3,             m0
+    pmaddwd            m3,             m2
+    vbroadcasti128     m4,             [r0 + 8]                      ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m4,             m1
+    pmaddubsw          m4,             m0
+    pmaddwd            m4,             m2
+    packssdw           m3,             m4
+    psubw              m3,             m5
+    vpermq             m3,             m3,          q3120
+    movu               [r2],           m3
+
+    vbroadcasti128     m3,             [r0 + 16]                     ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb             m3,             m1
+    pmaddubsw          m3,             m0
+    pmaddwd            m3,             m2
+    packssdw           m3,             m3
+    psubw              m3,             m5
+    vpermq             m3,             m3,          q3120
+    movu               [r2 + 32],      xm3
+
+    add                r2,             r3
+    add                r0,             r1
+    dec                r6d
+    jnz                .loop
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_ps_2x16, 4, 7, 7
+    mov               r4d,           r4m
+    mov               r5d,           r5m
+    add               r3d,           r3d
+
+%ifdef PIC
+    lea               r6,            [tab_ChromaCoeff]
+    vpbroadcastd      m0,            [r6 + r4 * 4]
+%else
+    vpbroadcastd      m0,            [tab_ChromaCoeff + r4 * 4]
+%endif
+    vbroadcasti128    m6,            [pw_2000]
+    test              r5d,            r5d
+    jz                .label
+    sub               r0,             r1
+
+.label
+    mova              m4,            [interp4_hps_shuf]
+    mova              m5,            [pw_1]
+    dec               r0
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]                                   ;row 0
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,           xm2,          1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,           xm2,          1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    psubw             m1,            m6
+
+    lea               r4,            [r3 * 3]
+    vextracti128      xm2,           m1,           1
+
+    movd              [r2],          xm1
+    pextrd            [r2 + r3],     xm1,          1
+    movd              [r2 + r3 * 2], xm2
+    pextrd            [r2 + r4],     xm2,          1
+    lea               r2,            [r2 + r3 * 4]
+    pextrd            [r2],          xm1,          2
+    pextrd            [r2 + r3],     xm1,          3
+    pextrd            [r2 + r3 * 2], xm2,          2
+    pextrd            [r2 + r4],     xm2,          3
+
+    lea               r0,            [r0 + r1 * 4]
+    lea               r2,            [r2 + r3 * 4]
+    lea               r4,            [r1 * 3]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m1,            m1,          xm2,           1
+    lea               r0,            [r0 + r1 * 4]
+    movq              xm3,           [r0]
+    movhps            xm3,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    movhps            xm2,           [r0 + r4]
+    vinserti128       m3,            m3,          xm2,           1
+
+    pshufb            m1,            m4
+    pshufb            m3,            m4
+    pmaddubsw         m1,            m0
+    pmaddubsw         m3,            m0
+    pmaddwd           m1,            m5
+    pmaddwd           m3,            m5
+    packssdw          m1,            m3
+    psubw             m1,            m6
+
+    lea               r4,            [r3 * 3]
+    vextracti128      xm2,           m1,           1
+
+    movd              [r2],          xm1
+    pextrd            [r2 + r3],     xm1,          1
+    movd              [r2 + r3 * 2], xm2
+    pextrd            [r2 + r4],     xm2,          1
+    lea               r2,            [r2 + r3 * 4]
+    pextrd            [r2],          xm1,          2
+    pextrd            [r2 + r3],     xm1,          3
+    pextrd            [r2 + r3 * 2], xm2,          2
+    pextrd            [r2 + r4],     xm2,          3
+
+    test              r5d,            r5d
+    jz                .end
+
+    lea               r0,            [r0 + r1 * 4]
+    lea               r2,            [r2 + r3 * 4]
+    movq              xm1,           [r0]
+    movhps            xm1,           [r0 + r1]
+    movq              xm2,           [r0 + r1 * 2]
+    vinserti128       m1,            m1,          xm2,           1
+    pshufb            m1,            m4
+    pmaddubsw         m1,            m0
+    pmaddwd           m1,            m5
+    packssdw          m1,            m1
+    psubw             m1,            m6
+    vextracti128      xm2,           m1,           1
+
+    movd              [r2],          xm1
+    pextrd            [r2 + r3],     xm1,          1
+    movd              [r2 + r3 * 2], xm2
+.end
+    RET
+
+INIT_YMM avx2
+cglobal interp_4tap_horiz_pp_6x16, 4, 6, 7
+    mov               r4d,               r4m
+
+%ifdef PIC
+    lea               r5,                [tab_ChromaCoeff]
+    vpbroadcastd      m0,                [r5 + r4 * 4]
+%else
+    vpbroadcastd      m0,                [tab_ChromaCoeff + r4 * 4]
+%endif
+
+    mova              m1,                [tab_Tm]
+    mova              m2,                [pw_1]
+    mova              m6,                [pw_512]
+    lea               r4,                [r1 * 3]
+    lea               r5,                [r3 * 3]
+    ; register map
+    ; m0 - interpolate coeff
+    ; m1 - shuffle order table
+    ; m2 - constant word 1
+
+    dec               r0
+%rep 4
+    ; Row 0
+    vbroadcasti128    m3,                [r0]                        ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m3,                m1
+    pmaddubsw         m3,                m0
+    pmaddwd           m3,                m2
+
+    ; Row 1
+    vbroadcasti128    m4,                [r0 + r1]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,                m1
+    pmaddubsw         m4,                m0
+    pmaddwd           m4,                m2
+    packssdw          m3,                m4
+    pmulhrsw          m3,                m6
+
+    ; Row 2
+    vbroadcasti128    m4,                [r0 + r1 * 2]               ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m4,                m1
+    pmaddubsw         m4,                m0
+    pmaddwd           m4,                m2
+
+    ; Row 3
+    vbroadcasti128    m5,                [r0 + r4]                   ; [x x x x x A 9 8 7 6 5 4 3 2 1 0]
+    pshufb            m5,                m1
+    pmaddubsw         m5,                m0
+    pmaddwd           m5,                m2
+    packssdw          m4,                m5
+    pmulhrsw          m4,                m6
+
+    packuswb          m3,                m4
+    vextracti128      xm4,               m3,          1
+    movd              [r2],              xm3
+    pextrw            [r2 + 4],          xm4,         0
+    pextrd            [r2 + r3],         xm3,         1
+    pextrw            [r2 + r3 + 4],     xm4,         2
+    pextrd            [r2 + r3 * 2],     xm3,         2
+    pextrw            [r2 + r3 * 2 + 4], xm4,         4
+    pextrd            [r2 + r5],         xm3,         3
+    pextrw            [r2 + r5 + 4],     xm4,         6
+    lea               r2,                [r2 + r3 * 4]
+    lea               r0,                [r0 + r1 * 4]
+%endrep
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/ipfilter8.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,49 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_IPFILTER8_H
+#define X265_IPFILTER8_H
+
+#define SETUP_FUNC_DEF(cpu) \
+    FUNCDEF_PU(void, interp_8tap_horiz_pp, cpu, const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_PU(void, interp_8tap_horiz_ps, cpu, const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt); \
+    FUNCDEF_PU(void, interp_8tap_vert_pp, cpu, const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_PU(void, interp_8tap_vert_ps, cpu, const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_PU(void, interp_8tap_vert_sp, cpu, const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_PU(void, interp_8tap_vert_ss, cpu, const int16_t* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_PU(void, interp_8tap_hv_pp, cpu, const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int idxX, int idxY); \
+    FUNCDEF_CHROMA_PU(void, filterPixelToShort, cpu, const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_horiz_pp, cpu, const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_horiz_ps, cpu, const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_vert_pp, cpu, const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_vert_ps, cpu, const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_vert_sp, cpu, const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx); \
+    FUNCDEF_CHROMA_PU(void, interp_4tap_vert_ss, cpu, const int16_t* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx)
+
+SETUP_FUNC_DEF(sse2);
+SETUP_FUNC_DEF(ssse3);
+SETUP_FUNC_DEF(sse3);
+SETUP_FUNC_DEF(sse4);
+SETUP_FUNC_DEF(avx2);
+
+#endif // ifndef X265_IPFILTER8_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/loopfilter.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2281 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Min Chen <chenm001@163.com>
+;*          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;*          Nabajit Deka <nabajit@multicorewareinc.com>
+;*          Dnyaneshwar Gorade <dnyaneshwar@multicorewareinc.com>
+;*          Murugan Vairavel <murugan@multicorewareinc.com>
+;*          Yuvaraj Venkatesh <yuvaraj@multicorewareinc.com>
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+
+SECTION_RODATA 32
+pb_31:      times 32 db 31
+pb_124:     times 32 db 124
+pb_15:      times 32 db 15
+pb_movemask_32:  times 32 db 0x00
+                 times 32 db 0xFF
+
+SECTION .text
+cextern pb_1
+cextern pb_128
+cextern pb_2
+cextern pw_2
+cextern pw_pixel_max
+cextern pb_movemask
+cextern pw_1
+cextern hmul_16p
+cextern pb_4
+
+
+;============================================================================================================
+; void saoCuOrgE0(pixel * rec, int8_t * offsetEo, int lcuWidth, int8_t* signLeft, intptr_t stride)
+;============================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE0, 4,5,9
+    mov         r4d, r4m
+    movh        m6,  [r1]
+    movzx       r1d, byte [r3]
+    pxor        m5, m5
+    neg         r1b
+    movd        m0, r1d
+    lea         r1, [r0 + r4 * 2]
+    mov         r4d, r2d
+
+.loop:
+    movu        m7, [r0]
+    movu        m8, [r0 + 16]
+    movu        m2, [r0 + 2]
+    movu        m1, [r0 + 18]
+
+    pcmpgtw     m3, m7, m2
+    pcmpgtw     m2, m7
+    pcmpgtw     m4, m8, m1
+    pcmpgtw     m1, m8 
+
+    packsswb    m3, m4
+    packsswb    m2, m1
+
+    pand        m3, [pb_1]
+    por         m3, m2
+
+    palignr     m2, m3, m5, 15
+    por         m2, m0
+
+    mova        m4, [pw_pixel_max]
+    psignb      m2, [pb_128]                ; m2 = signLeft
+    pxor        m0, m0
+    palignr     m0, m3, 15
+    paddb       m3, m2
+    paddb       m3, [pb_2]                  ; m2 = uiEdgeType
+    pshufb      m2, m6, m3
+    pmovsxbw    m3, m2                      ; offsetEo
+    punpckhbw   m2, m2
+    psraw       m2, 8
+    paddw       m7, m3
+    paddw       m8, m2
+    pmaxsw      m7, m5
+    pmaxsw      m8, m5
+    pminsw      m7, m4
+    pminsw      m8, m4
+    movu        [r0], m7
+    movu        [r0 + 16], m8
+
+    add         r0q, 32
+    sub         r2d, 16
+    jnz        .loop
+
+    movzx       r3d, byte [r3 + 1]
+    neg         r3b
+    movd        m0, r3d
+.loopH:
+    movu        m7, [r1]
+    movu        m8, [r1 + 16]
+    movu        m2, [r1 + 2]
+    movu        m1, [r1 + 18]
+
+    pcmpgtw     m3, m7, m2
+    pcmpgtw     m2, m7
+    pcmpgtw     m4, m8, m1
+    pcmpgtw     m1, m8 
+
+    packsswb    m3, m4
+    packsswb    m2, m1
+
+    pand        m3, [pb_1]
+    por         m3, m2
+
+    palignr     m2, m3, m5, 15
+    por         m2, m0
+
+    mova        m4, [pw_pixel_max]
+    psignb      m2, [pb_128]                ; m2 = signLeft
+    pxor        m0, m0
+    palignr     m0, m3, 15
+    paddb       m3, m2
+    paddb       m3, [pb_2]                  ; m2 = uiEdgeType
+    pshufb      m2, m6, m3
+    pmovsxbw    m3, m2                      ; offsetEo
+    punpckhbw   m2, m2
+    psraw       m2, 8
+    paddw       m7, m3
+    paddw       m8, m2
+    pmaxsw      m7, m5
+    pmaxsw      m8, m5
+    pminsw      m7, m4
+    pminsw      m8, m4
+    movu        [r1], m7
+    movu        [r1 + 16], m8
+
+    add         r1q, 32
+    sub         r4d, 16
+    jnz        .loopH
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE0, 5, 5, 8, rec, offsetEo, lcuWidth, signLeft, stride
+
+    mov         r4d, r4m
+    mova        m4,  [pb_128]                ; m4 = [80]
+    pxor        m5,  m5                      ; m5 = 0
+    movu        m6,  [r1]                    ; m6 = offsetEo
+
+    movzx       r1d, byte [r3]
+    inc         r3
+    neg         r1b
+    movd        m0, r1d
+    lea         r1, [r0 + r4]
+    mov         r4d, r2d
+
+.loop:
+    movu        m7, [r0]                    ; m7 = rec[x]
+    movu        m2, [r0 + 1]                ; m2 = rec[x+1]
+
+    pxor        m1, m7, m4
+    pxor        m3, m2, m4
+    pcmpgtb     m2, m1, m3
+    pcmpgtb     m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    pslldq      m3, m2, 1
+    por         m3, m0
+
+    psignb      m3, m4                      ; m3 = signLeft
+    pxor        m0, m0
+    palignr     m0, m2, 15
+    paddb       m2, m3
+    paddb       m2, [pb_2]                  ; m2 = uiEdgeType
+    pshufb      m3, m6, m2
+    pmovzxbw    m2, m7                      ; rec
+    punpckhbw   m7, m5
+    pmovsxbw    m1, m3                      ; offsetEo
+    punpckhbw   m3, m3
+    psraw       m3, 8
+    paddw       m2, m1
+    paddw       m7, m3
+    packuswb    m2, m7
+    movu        [r0], m2
+
+    add         r0q, 16
+    sub         r2d, 16
+    jnz        .loop
+
+    movzx       r3d, byte [r3]
+    neg         r3b
+    movd        m0, r3d
+.loopH:
+    movu        m7, [r1]                    ; m7 = rec[x]
+    movu        m2, [r1 + 1]                ; m2 = rec[x+1]
+
+    pxor        m1, m7, m4
+    pxor        m3, m2, m4
+    pcmpgtb     m2, m1, m3
+    pcmpgtb     m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    pslldq      m3, m2, 1
+    por         m3, m0
+
+    psignb      m3, m4                      ; m3 = signLeft
+    pxor        m0, m0
+    palignr     m0, m2, 15
+    paddb       m2, m3
+    paddb       m2, [pb_2]                  ; m2 = uiEdgeType
+    pshufb      m3, m6, m2
+    pmovzxbw    m2, m7                      ; rec
+    punpckhbw   m7, m5
+    pmovsxbw    m1, m3                      ; offsetEo
+    punpckhbw   m3, m3
+    psraw       m3, 8
+    paddw       m2, m1
+    paddw       m7, m3
+    packuswb    m2, m7
+    movu        [r1], m2
+
+    add         r1q, 16
+    sub         r4d, 16
+    jnz        .loopH
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE0, 4,4,9
+    vbroadcasti128  m6, [r1]
+    movzx           r1d, byte [r3]
+    neg             r1b
+    movd            xm0, r1d
+    movzx           r1d, byte [r3 + 1]
+    neg             r1b
+    movd            xm1, r1d
+    vinserti128     m0, m0, xm1, 1
+    mova            m5, [pw_pixel_max]
+    mov             r1d, r4m
+    add             r1d, r1d
+    shr             r2d, 4
+
+.loop:
+    movu            m7, [r0]
+    movu            m8, [r0 + r1]
+    movu            m2, [r0 + 2]
+    movu            m1, [r0 + r1 + 2]
+
+    pcmpgtw         m3, m7, m2
+    pcmpgtw         m2, m7
+    pcmpgtw         m4, m8, m1
+    pcmpgtw         m1, m8
+
+    packsswb        m3, m4
+    packsswb        m2, m1
+    vpermq          m3, m3, 11011000b
+    vpermq          m2, m2, 11011000b
+
+    pand            m3, [pb_1]
+    por             m3, m2
+
+    pslldq          m2, m3, 1
+    por             m2, m0
+
+    psignb          m2, [pb_128]                ; m2 = signLeft
+    pxor            m0, m0
+    palignr         m0, m3, 15
+    paddb           m3, m2
+    paddb           m3, [pb_2]                  ; m3 = uiEdgeType
+    pshufb          m2, m6, m3
+    pmovsxbw        m3, xm2                     ; offsetEo
+    vextracti128    xm2, m2, 1
+    pmovsxbw        m2, xm2
+    pxor            m4, m4
+    paddw           m7, m3
+    paddw           m8, m2
+    pmaxsw          m7, m4
+    pmaxsw          m8, m4
+    pminsw          m7, m5
+    pminsw          m8, m5
+    movu            [r0], m7
+    movu            [r0 + r1], m8
+
+    add             r0q, 32
+    dec             r2d
+    jnz             .loop
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE0, 5, 5, 7, rec, offsetEo, lcuWidth, signLeft, stride
+
+    mov                 r4d,        r4m
+    vbroadcasti128      m4,         [pb_128]                   ; m4 = [80]
+    vbroadcasti128      m6,         [r1]                       ; m6 = offsetEo
+    movzx               r1d,        byte [r3]
+    neg                 r1b
+    movd                xm0,        r1d
+    movzx               r1d,        byte [r3 + 1]
+    neg                 r1b
+    movd                xm1,        r1d
+    vinserti128         m0,         m0,        xm1,           1
+
+.loop:
+    movu                xm5,        [r0]                       ; xm5 = rec[x]
+    movu                xm2,        [r0 + 1]                   ; xm2 = rec[x + 1]
+    vinserti128         m5,         m5,        [r0 + r4],     1
+    vinserti128         m2,         m2,        [r0 + r4 + 1], 1
+
+    pxor                m1,         m5,        m4
+    pxor                m3,         m2,        m4
+    pcmpgtb             m2,         m1,        m3
+    pcmpgtb             m3,         m1
+    pand                m2,         [pb_1]
+    por                 m2,         m3
+
+    pslldq              m3,         m2,        1
+    por                 m3,         m0
+
+    psignb              m3,         m4                         ; m3 = signLeft
+    pxor                m0,         m0
+    palignr             m0,         m2,        15
+    paddb               m2,         m3
+    paddb               m2,         [pb_2]                     ; m2 = uiEdgeType
+    pshufb              m3,         m6,        m2
+    pmovzxbw            m2,         xm5                        ; rec
+    vextracti128        xm5,        m5,        1
+    pmovzxbw            m5,         xm5
+    pmovsxbw            m1,         xm3                        ; offsetEo
+    vextracti128        xm3,        m3,        1
+    pmovsxbw            m3,         xm3
+    paddw               m2,         m1
+    paddw               m5,         m3
+    packuswb            m2,         m5
+    vpermq              m2,         m2,        11011000b
+    movu                [r0],       xm2
+    vextracti128        [r0 + r4],  m2,        1
+
+    add                 r0q,        16
+    sub                 r2d,        16
+    jnz                 .loop
+    RET
+%endif
+
+;==================================================================================================
+; void saoCuOrgE1(pixel *pRec, int8_t *m_iUpBuff1, int8_t *m_iOffsetEo, Int iStride, Int iLcuWidth)
+;==================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE1, 4,5,8
+    add         r3d, r3d
+    mov         r4d, r4m
+    pxor        m0, m0                      ; m0 = 0
+    mova        m6, [pb_2]                  ; m6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    shr         r4d, 4
+.loop
+    movu        m7, [r0]
+    movu        m5, [r0 + 16]
+    movu        m3, [r0 + r3]
+    movu        m1, [r0 + r3 + 16]
+
+    pcmpgtw     m2, m7, m3
+    pcmpgtw     m3, m7
+    pcmpgtw     m4, m5, m1
+    pcmpgtw     m1, m5 
+
+    packsswb    m2, m4
+    packsswb    m3, m1
+
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    movu        m3, [r1]                    ; m3 = m_iUpBuff1
+
+    paddb       m3, m2
+    paddb       m3, m6
+
+    movu        m4, [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m1, m4, m3
+
+    psubb       m3, m0, m2
+    movu        [r1], m3
+
+    pmovsxbw    m3, m1
+    punpckhbw   m1, m1
+    psraw       m1, 8
+
+    paddw       m7, m3
+    paddw       m5, m1
+
+    pmaxsw      m7, m0
+    pmaxsw      m5, m0
+    pminsw      m7, [pw_pixel_max]
+    pminsw      m5, [pw_pixel_max]
+
+    movu        [r0], m7
+    movu        [r0 + 16],  m5
+
+    add         r0, 32
+    add         r1, 16
+    dec         r4d
+    jnz         .loop
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE1, 3, 5, 8, pRec, m_iUpBuff1, m_iOffsetEo, iStride, iLcuWidth
+    mov         r3d, r3m
+    mov         r4d, r4m
+    pxor        m0,    m0                      ; m0 = 0
+    mova        m6,    [pb_2]                  ; m6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    mova        m7,    [pb_128]
+    shr         r4d,   4
+.loop
+    movu        m1,    [r0]                    ; m1 = pRec[x]
+    movu        m2,    [r0 + r3]               ; m2 = pRec[x + iStride]
+
+    pxor        m3,    m1,    m7
+    pxor        m4,    m2,    m7
+    pcmpgtb     m2,    m3,    m4
+    pcmpgtb     m4,    m3
+    pand        m2,    [pb_1]
+    por         m2,    m4
+
+    movu        m3,    [r1]                    ; m3 = m_iUpBuff1
+
+    paddb       m3,    m2
+    paddb       m3,    m6
+
+    movu        m4,    [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m5,    m4,    m3
+
+    psubb       m3,    m0,    m2
+    movu        [r1],  m3
+
+    pmovzxbw    m2,    m1
+    punpckhbw   m1,    m0
+    pmovsxbw    m3,    m5
+    punpckhbw   m5,    m5
+    psraw       m5,    8
+
+    paddw       m2,    m3
+    paddw       m1,    m5
+    packuswb    m2,    m1
+    movu        [r0],  m2
+
+    add         r0,    16
+    add         r1,    16
+    dec         r4d
+    jnz         .loop
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE1, 4,5,6
+    add         r3d, r3d
+    mov         r4d, r4m
+    mova        m4, [pb_2]
+    shr         r4d, 4
+    mova        m0, [pw_pixel_max]
+.loop
+    movu        m5, [r0]
+    movu        m3, [r0 + r3]
+
+    pcmpgtw     m2, m5, m3
+    pcmpgtw     m3, m5
+
+    packsswb    m2, m3
+    vpermq      m3, m2, 11011101b
+    vpermq      m2, m2, 10001000b
+
+    pand        xm2, [pb_1]
+    por         xm2, xm3
+
+    movu        xm3, [r1]       ; m3 = m_iUpBuff1
+
+    paddb       xm3, xm2
+    paddb       xm3, xm4
+
+    movu        xm1, [r2]       ; m1 = m_iOffsetEo
+    pshufb      xm1, xm3
+    pmovsxbw    m3, xm1
+
+    paddw       m5, m3
+    pxor        m3, m3
+    pmaxsw      m5, m3
+    pminsw      m5, m0
+    movu        [r0], m5
+
+    psubb       xm3, xm2
+    movu        [r1], xm3
+
+    add         r0, 32
+    add         r1, 16
+    dec         r4d
+    jnz         .loop
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE1, 3, 5, 8, pRec, m_iUpBuff1, m_iOffsetEo, iStride, iLcuWidth
+    mov           r3d,    r3m
+    mov           r4d,    r4m
+    movu          xm0,    [r2]                    ; xm0 = m_iOffsetEo
+    mova          xm6,    [pb_2]                  ; xm6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    mova          xm7,    [pb_128]
+    shr           r4d,    4
+.loop
+    movu          xm1,    [r0]                    ; xm1 = pRec[x]
+    movu          xm2,    [r0 + r3]               ; xm2 = pRec[x + iStride]
+
+    pxor          xm3,    xm1,    xm7
+    pxor          xm4,    xm2,    xm7
+    pcmpgtb       xm2,    xm3,    xm4
+    pcmpgtb       xm4,    xm3
+    pand          xm2,    [pb_1]
+    por           xm2,    xm4
+
+    movu          xm3,    [r1]                    ; xm3 = m_iUpBuff1
+
+    paddb         xm3,    xm2
+    paddb         xm3,    xm6
+
+    pshufb        xm5,    xm0,    xm3
+    pxor          xm4,    xm4
+    psubb         xm3,    xm4,    xm2
+    movu          [r1],   xm3
+
+    pmovzxbw      m2,     xm1
+    pmovsxbw      m3,     xm5
+
+    paddw         m2,     m3
+    vextracti128  xm3,    m2,     1
+    packuswb      xm2,    xm3
+    movu          [r0],   xm2
+
+    add           r0,     16
+    add           r1,     16
+    dec           r4d
+    jnz           .loop
+    RET
+%endif
+
+;========================================================================================================
+; void saoCuOrgE1_2Rows(pixel *pRec, int8_t *m_iUpBuff1, int8_t *m_iOffsetEo, Int iStride, Int iLcuWidth)
+;========================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE1_2Rows, 4,7,8
+    add         r3d, r3d
+    mov         r4d, r4m
+    pxor        m0, m0                      ; m0 = 0
+    mova        m6, [pw_pixel_max]
+    mov         r5d, r4d
+    shr         r4d, 4
+    mov         r6, r0
+.loop
+    movu        m7, [r0]
+    movu        m5, [r0 + 16]
+    movu        m3, [r0 + r3]
+    movu        m1, [r0 + r3 + 16]
+
+    pcmpgtw     m2, m7, m3
+    pcmpgtw     m3, m7
+    pcmpgtw     m4, m5, m1
+    pcmpgtw     m1, m5
+    packsswb    m2, m4
+    packsswb    m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    movu        m3, [r1]                    ; m3 = m_iUpBuff1
+
+    paddb       m3, m2
+    paddb       m3, [pb_2]
+
+    movu        m4, [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m1, m4, m3
+
+    psubb       m3, m0, m2
+    movu        [r1], m3
+
+    pmovsxbw    m3, m1
+    punpckhbw   m1, m1
+    psraw       m1, 8
+
+    paddw       m7, m3
+    paddw       m5, m1
+
+    pmaxsw      m7, m0
+    pmaxsw      m5, m0
+    pminsw      m7, m6
+    pminsw      m5, m6
+
+    movu        [r0], m7
+    movu        [r0 + 16],  m5
+
+    add         r0, 32
+    add         r1, 16
+    dec         r4d
+    jnz         .loop
+
+    sub         r1, r5
+    shr         r5d, 4
+    lea         r0, [r6 + r3]
+.loopH:
+    movu        m7, [r0]
+    movu        m5, [r0 + 16]
+    movu        m3, [r0 + r3]
+    movu        m1, [r0 + r3 + 16]
+
+    pcmpgtw     m2, m7, m3
+    pcmpgtw     m3, m7
+    pcmpgtw     m4, m5, m1
+    pcmpgtw     m1, m5
+    packsswb    m2, m4
+    packsswb    m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    movu        m3, [r1]                    ; m3 = m_iUpBuff1
+
+    paddb       m3, m2
+    paddb       m3, [pb_2]
+
+    movu        m4, [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m1, m4, m3
+
+    psubb       m3, m0, m2
+    movu        [r1], m3
+
+    pmovsxbw    m3, m1
+    punpckhbw   m1, m1
+    psraw       m1, 8
+
+    paddw       m7, m3
+    paddw       m5, m1
+
+    pmaxsw      m7, m0
+    pmaxsw      m5, m0
+    pminsw      m7, m6
+    pminsw      m5, m6
+
+    movu        [r0], m7
+    movu        [r0 + 16],  m5
+
+    add         r0, 32
+    add         r1, 16
+    dec         r5d
+    jnz         .loopH
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE1_2Rows, 3, 5, 8, pRec, m_iUpBuff1, m_iOffsetEo, iStride, iLcuWidth
+    mov         r3d,        r3m
+    mov         r4d,        r4m
+    pxor        m0,         m0                      ; m0 = 0
+    mova        m7,         [pb_128]
+    shr         r4d,        4
+.loop
+    movu        m1,         [r0]                    ; m1 = pRec[x]
+    movu        m2,         [r0 + r3]               ; m2 = pRec[x + iStride]
+
+    pxor        m3,         m1,         m7
+    pxor        m4,         m2,         m7
+    pcmpgtb     m6,         m3,         m4
+    pcmpgtb     m5,         m4,         m3
+    pand        m6,         [pb_1]
+    por         m6,         m5
+
+    movu        m5,         [r0 + r3 * 2]
+    pxor        m3,         m5,         m7
+    pcmpgtb     m5,         m4,         m3
+    pcmpgtb     m3,         m4
+    pand        m5,         [pb_1]
+    por         m5,         m3
+
+    movu        m3,         [r1]                    ; m3 = m_iUpBuff1
+    paddb       m3,         m6
+    paddb       m3,         [pb_2]
+
+    movu        m4,         [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m4,         m3
+
+    psubb       m3,         m0,         m6
+    movu        [r1],       m3
+
+    pmovzxbw    m6,         m1
+    punpckhbw   m1,         m0
+    pmovsxbw    m3,         m4
+    punpckhbw   m4,         m4
+    psraw       m4,         8
+
+    paddw       m6,         m3
+    paddw       m1,         m4
+    packuswb    m6,         m1
+    movu        [r0],       m6
+
+    movu        m3,         [r1]                    ; m3 = m_iUpBuff1
+    paddb       m3,         m5
+    paddb       m3,         [pb_2]
+
+    movu        m4,         [r2]                    ; m4 = m_iOffsetEo
+    pshufb      m4,         m3
+    psubb       m3,         m0,         m5
+    movu        [r1],       m3
+
+    pmovzxbw    m5,         m2
+    punpckhbw   m2,         m0
+    pmovsxbw    m3,         m4
+    punpckhbw   m4,         m4
+    psraw       m4,         8
+
+    paddw       m5,         m3
+    paddw       m2,         m4
+    packuswb    m5,         m2
+    movu        [r0 + r3],  m5
+
+    add         r0,         16
+    add         r1,         16
+    dec         r4d
+    jnz         .loop
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE1_2Rows, 4,5,8
+    add             r3d, r3d
+    mov             r4d, r4m
+    mova            m4, [pw_pixel_max]
+    vbroadcasti128  m6, [r2]                ; m6 = m_iOffsetEo
+    shr             r4d, 4
+.loop
+    movu            m7, [r0]
+    movu            m5, [r0 + r3]
+    movu            m1, [r0 + r3 * 2]
+
+    pcmpgtw         m2, m7, m5
+    pcmpgtw         m3, m5, m7
+    pcmpgtw         m0, m5, m1
+    pcmpgtw         m1, m5
+
+    packsswb        m2, m0
+    packsswb        m3, m1
+    vpermq          m2, m2, 11011000b
+    vpermq          m3, m3, 11011000b
+
+    pand            m2, [pb_1]
+    por             m2, m3
+
+    movu            xm3, [r1]               ; m3 = m_iUpBuff1
+    pxor            m0, m0
+    psubb           m1, m0, m2
+    vinserti128     m3, m3, xm1, 1
+    vextracti128    [r1], m1, 1
+
+    paddb           m3, m2
+    paddb           m3, [pb_2]
+
+    pshufb          m1, m6, m3
+    pmovsxbw        m3, xm1
+    vextracti128    xm1, m1, 1
+    pmovsxbw        m1, xm1
+
+    paddw           m7, m3
+    paddw           m5, m1
+
+    pmaxsw          m7, m0
+    pmaxsw          m5, m0
+    pminsw          m7, m4
+    pminsw          m5, m4
+
+    movu            [r0], m7
+    movu            [r0 + r3],  m5
+
+    add             r0, 32
+    add             r1, 16
+    dec             r4d
+    jnz             .loop
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE1_2Rows, 3, 5, 7, pRec, m_iUpBuff1, m_iOffsetEo, iStride, iLcuWidth
+    mov             r3d,        r3m
+    mov             r4d,        r4m
+    pxor            m0,         m0                           ; m0 = 0
+    vbroadcasti128  m5,         [pb_128]
+    vbroadcasti128  m6,         [r2]                         ; m6 = m_iOffsetEo
+    shr             r4d,        4
+.loop
+    movu            xm1,        [r0]                         ; m1 = pRec[x]
+    movu            xm2,        [r0 + r3]                    ; m2 = pRec[x + iStride]
+    vinserti128     m1,         m1,       xm2,            1
+    vinserti128     m2,         m2,       [r0 + r3 * 2],  1
+
+    pxor            m3,         m1,       m5
+    pxor            m4,         m2,       m5
+    pcmpgtb         m2,         m3,       m4
+    pcmpgtb         m4,         m3
+    pand            m2,         [pb_1]
+    por             m2,         m4
+
+    movu            xm3,        [r1]                         ; xm3 = m_iUpBuff
+    psubb           m4,         m0,       m2
+    vinserti128     m3,         m3,       xm4,            1
+    paddb           m3,         m2
+    paddb           m3,         [pb_2]
+    pshufb          m2,         m6,       m3
+    vextracti128    [r1],       m4,       1
+
+    pmovzxbw        m4,         xm1
+    vextracti128    xm3,        m1,       1
+    pmovzxbw        m3,         xm3
+    pmovsxbw        m1,         xm2
+    vextracti128    xm2,        m2,       1
+    pmovsxbw        m2,         xm2
+
+    paddw           m4,         m1
+    paddw           m3,         m2
+    packuswb        m4,         m3
+    vpermq          m4,         m4,       11011000b
+    movu            [r0],       xm4
+    vextracti128    [r0 + r3],  m4,       1
+
+    add             r0,         16
+    add             r1,         16
+    dec             r4d
+    jnz             .loop
+    RET
+%endif
+
+;======================================================================================================================================================
+; void saoCuOrgE2(pixel * rec, int8_t * bufft, int8_t * buff1, int8_t * offsetEo, int lcuWidth, intptr_t stride)
+;======================================================================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE2, 6,6,8
+    mov         r4d, r4m
+    add         r5d, r5d
+    pxor        m0, m0
+    inc         r1
+    movh        m6, [r0 + r4 * 2]
+    movhps      m6, [r1 + r4]
+
+.loop
+    movu        m7, [r0]
+    movu        m5, [r0 + 16]
+    movu        m3, [r0 + r5 + 2]
+    movu        m1, [r0 + r5 + 18]
+
+    pcmpgtw     m2, m7, m3
+    pcmpgtw     m3, m7
+    pcmpgtw     m4, m5, m1
+    pcmpgtw     m1, m5
+    packsswb    m2, m4
+    packsswb    m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3
+
+    movu        m3, [r2]
+
+    paddb       m3, m2
+    paddb       m3, [pb_2]
+
+    movu        m4, [r3]
+    pshufb      m4, m3
+
+    psubb       m3, m0, m2
+    movu        [r1], m3
+
+    pmovsxbw    m3, m4
+    punpckhbw   m4, m4
+    psraw       m4, 8
+
+    paddw       m7, m3
+    paddw       m5, m4
+    pmaxsw      m7, m0
+    pmaxsw      m5, m0
+    pminsw      m7, [pw_pixel_max]
+    pminsw      m5, [pw_pixel_max]
+    movu        [r0], m7
+    movu        [r0 + 16], m5
+
+    add         r0, 32
+    add         r1, 16
+    add         r2, 16
+    sub         r4, 16
+    jg          .loop
+
+    movh        [r0 + r4 * 2], m6
+    movhps      [r1 + r4], m6
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE2, 5, 6, 8, rec, bufft, buff1, offsetEo, lcuWidth
+    mov         r4d,   r4m
+    mov         r5d,   r5m
+    pxor        m0,    m0                      ; m0 = 0
+    mova        m6,    [pb_2]                  ; m6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    mova        m7,    [pb_128]
+    inc         r1
+    movh        m5,    [r0 + r4]
+    movhps      m5,    [r1 + r4]
+
+.loop
+    movu        m1,    [r0]                    ; m1 = rec[x]
+    movu        m2,    [r0 + r5 + 1]           ; m2 = rec[x + stride + 1]
+    pxor        m3,    m1,    m7
+    pxor        m4,    m2,    m7
+    pcmpgtb     m2,    m3,    m4
+    pcmpgtb     m4,    m3
+    pand        m2,    [pb_1]
+    por         m2,    m4
+    movu        m3,    [r2]                    ; m3 = buff1
+
+    paddb       m3,    m2
+    paddb       m3,    m6                      ; m3 = edgeType
+
+    movu        m4,    [r3]                    ; m4 = offsetEo
+    pshufb      m4,    m3
+
+    psubb       m3,    m0,    m2
+    movu        [r1],  m3
+
+    pmovzxbw    m2,    m1
+    punpckhbw   m1,    m0
+    pmovsxbw    m3,    m4
+    punpckhbw   m4,    m4
+    psraw       m4,    8
+
+    paddw       m2,    m3
+    paddw       m1,    m4
+    packuswb    m2,    m1
+    movu        [r0],  m2
+
+    add         r0,    16
+    add         r1,    16
+    add         r2,    16
+    sub         r4,    16
+    jg          .loop
+
+    movh        [r0 + r4], m5
+    movhps      [r1 + r4], m5
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE2, 6,6,7
+    mov             r4d, r4m
+    add             r5d, r5d
+    inc             r1
+    movq            xm4, [r0 + r4 * 2]
+    movhps          xm4, [r1 + r4]
+    vbroadcasti128  m5, [r3]
+    mova            m6, [pw_pixel_max]
+.loop
+    movu            m1, [r0]
+    movu            m3, [r0 + r5 + 2]
+
+    pcmpgtw         m2, m1, m3
+    pcmpgtw         m3, m1
+
+    packsswb        m2, m3
+    vpermq          m3, m2, 11011101b
+    vpermq          m2, m2, 10001000b
+
+    pand            xm2, [pb_1]
+    por             xm2, xm3
+
+    movu            xm3, [r2]
+
+    paddb           xm3, xm2
+    paddb           xm3, [pb_2]
+    pshufb          xm0, xm5, xm3
+    pmovsxbw        m3, xm0
+
+    pxor            m0, m0
+    paddw           m1, m3
+    pmaxsw          m1, m0
+    pminsw          m1, m6
+    movu            [r0], m1
+
+    psubb           xm0, xm2
+    movu            [r1], xm0
+
+    add             r0, 32
+    add             r1, 16
+    add             r2, 16
+    sub             r4, 16
+    jg              .loop
+
+    movq            [r0 + r4 * 2], xm4
+    movhps          [r1 + r4], xm4
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE2, 5, 6, 7, rec, bufft, buff1, offsetEo, lcuWidth
+    mov            r4d,   r4m
+    mov            r5d,   r5m
+    pxor           xm0,   xm0                     ; xm0 = 0
+    mova           xm5,   [pb_128]
+    inc            r1
+    movq           xm6,   [r0 + r4]
+    movhps         xm6,   [r1 + r4]
+
+    movu           xm1,   [r0]                    ; xm1 = rec[x]
+    movu           xm2,   [r0 + r5 + 1]           ; xm2 = rec[x + stride + 1]
+    pxor           xm3,   xm1,   xm5
+    pxor           xm4,   xm2,   xm5
+    pcmpgtb        xm2,   xm3,   xm4
+    pcmpgtb        xm4,   xm3
+    pand           xm2,   [pb_1]
+    por            xm2,   xm4
+    movu           xm3,   [r2]                    ; xm3 = buff1
+
+    paddb          xm3,   xm2
+    paddb          xm3,   [pb_2]                  ; xm3 = edgeType
+
+    movu           xm4,   [r3]                    ; xm4 = offsetEo
+    pshufb         xm4,   xm3
+
+    psubb          xm3,   xm0,   xm2
+    movu           [r1],  xm3
+
+    pmovzxbw       m2,    xm1
+    pmovsxbw       m3,    xm4
+
+    paddw          m2,    m3
+    vextracti128   xm3,   m2,    1
+    packuswb       xm2,   xm3
+    movu           [r0],  xm2
+
+    movq           [r0 + r4], xm6
+    movhps         [r1 + r4], xm6
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE2_32, 6,6,8
+    mov             r4d, r4m
+    add             r5d, r5d
+    inc             r1
+    movq            xm4, [r0 + r4 * 2]
+    movhps          xm4, [r1 + r4]
+    vbroadcasti128  m5, [r3]
+
+.loop
+    movu            m1, [r0]
+    movu            m7, [r0 + 32]
+    movu            m3, [r0 + r5 + 2]
+    movu            m6, [r0 + r5 + 34]
+
+    pcmpgtw         m2, m1, m3
+    pcmpgtw         m0, m7, m6
+    pcmpgtw         m3, m1
+    pcmpgtw         m6, m7
+
+    packsswb        m2, m0
+    packsswb        m3, m6
+    vpermq          m3, m3, 11011000b
+    vpermq          m2, m2, 11011000b
+
+    pand            m2, [pb_1]
+    por             m2, m3
+
+    movu            m3, [r2]
+
+    paddb           m3, m2
+    paddb           m3, [pb_2]
+    pshufb          m0, m5, m3
+
+    pmovsxbw        m3, xm0
+    vextracti128    xm0, m0, 1
+    pmovsxbw        m6, xm0
+
+    pxor            m0, m0
+    paddw           m1, m3
+    paddw           m7, m6
+    pmaxsw          m1, m0
+    pmaxsw          m7, m0
+    pminsw          m1, [pw_pixel_max]
+    pminsw          m7, [pw_pixel_max]
+    movu            [r0], m1
+    movu            [r0 + 32], m7
+
+    psubb           m0, m2
+    movu            [r1], m0
+
+    add             r0, 64
+    add             r1, 32
+    add             r2, 32
+    sub             r4, 32
+    jg              .loop
+
+    movq            [r0 + r4 * 2], xm4
+    movhps          [r1 + r4], xm4
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE2_32, 5, 6, 8, rec, bufft, buff1, offsetEo, lcuWidth
+    mov             r4d,   r4m
+    mov             r5d,   r5m
+    pxor            m0,    m0                      ; m0 = 0
+    vbroadcasti128  m7,    [pb_128]
+    vbroadcasti128  m5,    [r3]                    ; m5 = offsetEo
+    inc             r1
+    movq            xm6,   [r0 + r4]
+    movhps          xm6,   [r1 + r4]
+
+.loop:
+    movu            m1,    [r0]                    ; m1 = rec[x]
+    movu            m2,    [r0 + r5 + 1]           ; m2 = rec[x + stride + 1]
+    pxor            m3,    m1,    m7
+    pxor            m4,    m2,    m7
+    pcmpgtb         m2,    m3,    m4
+    pcmpgtb         m4,    m3
+    pand            m2,    [pb_1]
+    por             m2,    m4
+    movu            m3,    [r2]                    ; m3 = buff1
+
+    paddb           m3,    m2
+    paddb           m3,    [pb_2]                  ; m3 = edgeType
+
+    pshufb          m4,    m5,    m3
+
+    psubb           m3,    m0,    m2
+    movu            [r1],  m3
+
+    pmovzxbw        m2,    xm1
+    vextracti128    xm1,   m1,    1
+    pmovzxbw        m1,    xm1
+    pmovsxbw        m3,    xm4
+    vextracti128    xm4,   m4,    1
+    pmovsxbw        m4,    xm4
+
+    paddw           m2,    m3
+    paddw           m1,    m4
+    packuswb        m2,    m1
+    vpermq          m2,    m2,    11011000b
+    movu            [r0],  m2
+
+    add             r0,    32
+    add             r1,    32
+    add             r2,    32
+    sub             r4,    32
+    jg              .loop
+
+    movq            [r0 + r4], xm6
+    movhps          [r1 + r4], xm6
+    RET
+%endif
+
+;=======================================================================================================
+;void saoCuOrgE3(pixel *rec, int8_t *upBuff1, int8_t *m_offsetEo, intptr_t stride, int startX, int endX)
+;=======================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE3, 4,6,8
+    add             r3d, r3d
+    mov             r4d, r4m
+    mov             r5d, r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movh            m6, [r0 + r5 * 2]
+    movhps          m6, [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    lea             r0, [r0 + r4 * 2]           ; x = startX + 1
+    add             r1, r4
+    sub             r5d, r4d
+    pxor            m0, m0
+
+.loop:
+    movu            m7, [r0]
+    movu            m5, [r0 + 16]
+    movu            m3, [r0 + r3]
+    movu            m1, [r0 + r3 + 16]
+
+    pcmpgtw         m2, m7, m3
+    pcmpgtw         m3, m7
+    pcmpgtw         m4, m5, m1
+    pcmpgtw         m1, m5
+    packsswb        m2, m4
+    packsswb        m3, m1
+    pand            m2, [pb_1]
+    por             m2, m3
+
+    movu            m3, [r1]                    ; m3 = m_iUpBuff1
+
+    paddb           m3, m2
+    paddb           m3, [pb_2]                  ; m3 = uiEdgeType
+
+    movu            m4, [r2]                    ; m4 = m_iOffsetEo
+    pshufb          m4, m3
+
+    psubb           m3, m0, m2
+    movu            [r1 - 1], m3
+
+    pmovsxbw        m3, m4
+    punpckhbw       m4, m4
+    psraw           m4, 8
+
+    paddw           m7, m3
+    paddw           m5, m4
+    pmaxsw          m7, m0
+    pmaxsw          m5, m0
+    pminsw          m7, [pw_pixel_max]
+    pminsw          m5, [pw_pixel_max]
+    movu            [r0], m7
+    movu            [r0 + 16], m5
+
+    add             r0, 32
+    add             r1, 16
+
+    sub             r5, 16
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movh            [r0 + r5 * 2], m6
+    movhps          [r1 + r5 - 1], m6
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE3, 3,6,8
+    mov             r3d, r3m
+    mov             r4d, r4m
+    mov             r5d, r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movh            m7, [r0 + r5]
+    movhps          m7, [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    add             r0, r4
+    add             r1, r4
+    sub             r5d, r4d
+    pxor            m0, m0                      ; m0 = 0
+    movu            m6, [pb_2]                  ; m6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+
+.loop:
+    movu            m1, [r0]                    ; m1 = pRec[x]
+    movu            m2, [r0 + r3]               ; m2 = pRec[x + iStride]
+
+    psubusb         m3, m2, m1
+    psubusb         m4, m1, m2
+    pcmpeqb         m3, m0
+    pcmpeqb         m4, m0
+    pcmpeqb         m2, m1
+
+    pabsb           m3, m3
+    por             m4, m3
+    pandn           m2, m4                      ; m2 = iSignDown
+
+    movu            m3, [r1]                    ; m3 = m_iUpBuff1
+
+    paddb           m3, m2
+    paddb           m3, m6                      ; m3 = uiEdgeType
+
+    movu            m4, [r2]                    ; m4 = m_iOffsetEo
+    pshufb          m5, m4, m3
+
+    psubb           m3, m0, m2
+    movu            [r1 - 1], m3
+
+    pmovzxbw        m2, m1
+    punpckhbw       m1, m0
+    pmovsxbw        m3, m5
+    punpckhbw       m5, m5
+    psraw           m5, 8
+
+    paddw           m2, m3
+    paddw           m1, m5
+    packuswb        m2, m1
+    movu            [r0], m2
+
+    add             r0, 16
+    add             r1, 16
+
+    sub             r5, 16
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movh            [r0 + r5], m7
+    movhps          [r1 + r5 - 1], m7
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE3, 4,6,6
+    add             r3d, r3d
+    mov             r4d, r4m
+    mov             r5d, r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movq            xm5, [r0 + r5 * 2]
+    movhps          xm5, [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    lea             r0, [r0 + r4 * 2]           ; x = startX + 1
+    add             r1, r4
+    sub             r5d, r4d
+    movu            xm4, [r2]
+
+.loop:
+    movu            m1, [r0]
+    movu            m0, [r0 + r3]
+
+    pcmpgtw         m2, m1, m0
+    pcmpgtw         m0, m1
+    packsswb        m2, m0
+    vpermq          m0, m2, 11011101b
+    vpermq          m2, m2, 10001000b
+    pand            m2, [pb_1]
+    por             m2, m0
+
+    movu            xm0, [r1]
+    paddb           xm0, xm2
+    paddb           xm0, [pb_2]
+
+    pshufb          xm3, xm4, xm0
+    pmovsxbw        m3, xm3
+
+    paddw           m1, m3
+    pxor            m0, m0
+    pmaxsw          m1, m0
+    pminsw          m1, [pw_pixel_max]
+    movu            [r0], m1
+
+    psubb           xm0, xm2
+    movu            [r1 - 1], xm0
+
+    add             r0, 32
+    add             r1, 16
+    sub             r5, 16
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movq            [r0 + r5 * 2], xm5
+    movhps          [r1 + r5 - 1], xm5
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE3, 3, 6, 8
+    mov             r3d,  r3m
+    mov             r4d,  r4m
+    mov             r5d,  r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movq            xm7,  [r0 + r5]
+    movhps          xm7,  [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    add             r0,   r4
+    add             r1,   r4
+    sub             r5d,  r4d
+    pxor            xm0,  xm0                     ; xm0 = 0
+    mova            xm6,  [pb_2]                  ; xm6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    movu            xm5,  [r2]                    ; xm5 = m_iOffsetEo
+
+.loop:
+    movu            xm1,  [r0]                    ; xm1 = pRec[x]
+    movu            xm2,  [r0 + r3]               ; xm2 = pRec[x + iStride]
+
+    psubusb         xm3,  xm2,  xm1
+    psubusb         xm4,  xm1,  xm2
+    pcmpeqb         xm3,  xm0
+    pcmpeqb         xm4,  xm0
+    pcmpeqb         xm2,  xm1
+
+    pabsb           xm3,  xm3
+    por             xm4,  xm3
+    pandn           xm2,  xm4                     ; xm2 = iSignDown
+
+    movu            xm3,  [r1]                    ; xm3 = m_iUpBuff1
+
+    paddb           xm3,  xm2
+    paddb           xm3,  xm6                     ; xm3 = uiEdgeType
+
+    pshufb          xm4,  xm5,  xm3
+
+    psubb           xm3,  xm0,  xm2
+    movu            [r1 - 1],   xm3
+
+    pmovzxbw        m2,   xm1
+    pmovsxbw        m3,   xm4
+
+    paddw           m2,   m3
+    vextracti128    xm3,  m2,   1
+    packuswb        xm2,  xm3
+    movu            [r0], xm2
+
+    add             r0,   16
+    add             r1,   16
+
+    sub             r5,   16
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movq            [r0 + r5],     xm7
+    movhps          [r1 + r5 - 1], xm7
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgE3_32, 3,6,8
+    add             r3d, r3d
+    mov             r4d, r4m
+    mov             r5d, r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movq            xm5, [r0 + r5 * 2]
+    movhps          xm5, [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    lea             r0, [r0 + r4 * 2]           ; x = startX + 1
+    add             r1, r4
+    sub             r5d, r4d
+    vbroadcasti128  m4, [r2]
+
+.loop:
+    movu            m1, [r0]
+    movu            m7, [r0 + 32]
+    movu            m0, [r0 + r3]
+    movu            m6, [r0 + r3 + 32]
+
+    pcmpgtw         m2, m1, m0
+    pcmpgtw         m3, m7, m6
+    pcmpgtw         m0, m1
+    pcmpgtw         m6, m7
+
+    packsswb        m2, m3
+    packsswb        m0, m6
+    vpermq          m2, m2, 11011000b
+    vpermq          m0, m0, 11011000b
+    pand            m2, [pb_1]
+    por             m2, m0
+
+    movu            m0, [r1]
+    paddb           m0, m2
+    paddb           m0, [pb_2]
+
+    pshufb          m3, m4, m0
+    vextracti128    xm6, m3, 1
+    pmovsxbw        m3, xm3
+    pmovsxbw        m6, xm6
+
+    paddw           m1, m3
+    paddw           m7, m6
+    pxor            m0, m0
+    pmaxsw          m1, m0
+    pmaxsw          m7, m0
+    pminsw          m1, [pw_pixel_max]
+    pminsw          m7, [pw_pixel_max]
+    movu            [r0], m1
+    movu            [r0 + 32], m7
+
+    psubb           m0, m2
+    movu            [r1 - 1], m0
+
+    add             r0, 64
+    add             r1, 32
+    sub             r5, 32
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movq            [r0 + r5 * 2], xm5
+    movhps          [r1 + r5 - 1], xm5
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgE3_32, 3, 6, 8
+    mov             r3d,  r3m
+    mov             r4d,  r4m
+    mov             r5d,  r5m
+
+    ; save latest 2 pixels for case startX=1 or left_endX=15
+    movq            xm7,  [r0 + r5]
+    movhps          xm7,  [r1 + r5 - 1]
+
+    ; move to startX+1
+    inc             r4d
+    add             r0,   r4
+    add             r1,   r4
+    sub             r5d,  r4d
+    pxor            m0,   m0                      ; m0 = 0
+    mova            m6,   [pb_2]                  ; m6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+    vbroadcasti128  m5,   [r2]                    ; m5 = m_iOffsetEo
+
+.loop:
+    movu            m1,   [r0]                    ; m1 = pRec[x]
+    movu            m2,   [r0 + r3]               ; m2 = pRec[x + iStride]
+
+    psubusb         m3,   m2,   m1
+    psubusb         m4,   m1,   m2
+    pcmpeqb         m3,   m0
+    pcmpeqb         m4,   m0
+    pcmpeqb         m2,   m1
+
+    pabsb           m3,   m3
+    por             m4,   m3
+    pandn           m2,   m4                      ; m2 = iSignDown
+
+    movu            m3,   [r1]                    ; m3 = m_iUpBuff1
+
+    paddb           m3,   m2
+    paddb           m3,   m6                      ; m3 = uiEdgeType
+
+    pshufb          m4,   m5,   m3
+
+    psubb           m3,   m0,   m2
+    movu            [r1 - 1],   m3
+
+    pmovzxbw        m2,   xm1
+    vextracti128    xm1,  m1,   1
+    pmovzxbw        m1,   xm1
+    pmovsxbw        m3,   xm4
+    vextracti128    xm4,  m4,   1
+    pmovsxbw        m4,   xm4
+
+    paddw           m2,   m3
+    paddw           m1,   m4
+    packuswb        m2,   m1
+    vpermq          m2,   m2,   11011000b
+    movu            [r0], m2
+
+    add             r0,   32
+    add             r1,   32
+    sub             r5,   32
+    jg             .loop
+
+    ; restore last pixels (up to 2)
+    movq            [r0 + r5],     xm7
+    movhps          [r1 + r5 - 1], xm7
+    RET
+%endif
+
+;=====================================================================================
+; void saoCuOrgB0(pixel* rec, const pixel* offset, int lcuWidth, int lcuHeight, int stride)
+;=====================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgB0, 5,7,8
+    add         r4d, r4d
+
+    shr         r2d, 4
+    movu        m3, [r1]            ; offset[0-15]
+    movu        m4, [r1 + 16]       ; offset[16-31]
+    pxor        m7, m7
+
+.loopH
+    mov         r5d, r2d
+    xor         r6,  r6
+
+.loopW
+    movu        m2, [r0 + r6]
+    movu        m5, [r0 + r6 + 16]
+    psrlw       m0, m2, (BIT_DEPTH - 5)
+    psrlw       m6, m5, (BIT_DEPTH - 5)
+    packuswb    m0, m6
+    pand        m0, [pb_31]         ; m0 = [index]
+
+    pshufb      m6, m3, m0
+    pshufb      m1, m4, m0
+    pcmpgtb     m0, [pb_15]         ; m0 = [mask]
+
+    pblendvb    m6, m6, m1, m0      ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+
+    pmovsxbw    m0, m6              ; offset
+    punpckhbw   m6, m6
+    psraw       m6, 8
+
+    paddw       m2, m0
+    paddw       m5, m6
+    pmaxsw      m2, m7
+    pmaxsw      m5, m7
+    pminsw      m2, [pw_pixel_max]
+    pminsw      m5, [pw_pixel_max]
+
+    movu        [r0 + r6], m2
+    movu        [r0 + r6 + 16], m5
+    add         r6d, 32
+    dec         r5d
+    jnz         .loopW
+
+    lea         r0, [r0 + r4]
+
+    dec         r3d
+    jnz         .loopH
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgB0, 4, 7, 8
+
+    mov         r3d, r3m
+    mov         r4d, r4m
+
+    shr         r2d, 4
+    movu        m3, [r1 + 0]      ; offset[0-15]
+    movu        m4, [r1 + 16]     ; offset[16-31]
+    pxor        m7, m7            ; m7 =[0]
+.loopH
+    mov         r5d, r2d
+    xor         r6,  r6
+
+.loopW
+    movu        m2, [r0 + r6]     ; m0 = [rec]
+    psrlw       m1, m2, 3
+    pand        m1, [pb_31]       ; m1 = [index]
+    pcmpgtb     m0, m1, [pb_15]   ; m2 = [mask]
+
+    pshufb      m6, m3, m1
+    pshufb      m5, m4, m1
+
+    pblendvb    m6, m6, m5, m0    ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+
+    pmovzxbw    m1, m2            ; rec
+    punpckhbw   m2, m7
+
+    pmovsxbw    m0, m6            ; offset
+    punpckhbw   m6, m6
+    psraw       m6, 8
+
+    paddw       m1, m0
+    paddw       m2, m6
+    packuswb    m1, m2
+
+    movu        [r0 + r6], m1
+    add         r6d, 16
+    dec         r5d
+    jnz         .loopW
+
+    lea         r0, [r0 + r4]
+
+    dec         r3d
+    jnz         .loopH
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal saoCuOrgB0, 5,7,8
+    vbroadcasti128  m3, [r1]
+    vbroadcasti128  m4, [r1 + 16]
+    add             r4d, r4d
+    lea             r1, [r4 * 2]
+    sub             r1d, r2d
+    sub             r1d, r2d
+    shr             r2d, 4
+    mova            m7, [pw_pixel_max]
+
+    mov             r6d, r3d
+    shr             r3d, 1
+
+.loopH
+    mov             r5d, r2d
+.loopW
+    movu            m2, [r0]
+    movu            m5, [r0 + r4]
+    psrlw           m0, m2, (BIT_DEPTH - 5)
+    psrlw           m6, m5, (BIT_DEPTH - 5)
+    packuswb        m0, m6
+    vpermq          m0, m0, 11011000b
+    pand            m0, [pb_31]         ; m0 = [index]
+
+    pshufb          m6, m3, m0
+    pshufb          m1, m4, m0
+    pcmpgtb         m0, [pb_15]         ; m0 = [mask]
+
+    pblendvb        m6, m6, m1, m0      ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+
+    pmovsxbw        m0, xm6
+    vextracti128    xm6, m6, 1
+    pmovsxbw        m6, xm6
+
+    paddw           m2, m0
+    paddw           m5, m6
+    pxor            m1, m1
+    pmaxsw          m2, m1
+    pmaxsw          m5, m1
+    pminsw          m2, m7
+    pminsw          m5, m7
+
+    movu            [r0], m2
+    movu            [r0 + r4], m5
+
+    add             r0, 32
+    dec             r5d
+    jnz             .loopW
+
+    add             r0, r1
+    dec             r3d
+    jnz             .loopH
+
+    test            r6b, 1
+    jz              .end
+    xor             r1, r1
+.loopW1:
+    movu            m2, [r0 + r1]
+    psrlw           m0, m2, (BIT_DEPTH - 5)
+    packuswb        m0, m0
+    vpermq          m0, m0, 10001000b
+    pand            m0, [pb_31]         ; m0 = [index]
+
+    pshufb          m6, m3, m0
+    pshufb          m1, m4, m0
+    pcmpgtb         m0, [pb_15]         ; m0 = [mask]
+
+    pblendvb        m6, m6, m1, m0      ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+    pmovsxbw        m0, xm6             ; offset
+
+    paddw           m2, m0
+    pxor            m0, m0
+    pmaxsw          m2, m0
+    pminsw          m2, m7
+
+    movu            [r0 + r1], m2
+    add             r1d, 32
+    dec             r2d
+    jnz             .loopW1
+.end:
+    RET
+%else ; HIGH_BIT_DEPTH
+cglobal saoCuOrgB0, 4, 7, 8
+
+    mov             r3d,        r3m
+    mov             r4d,        r4m
+    mova            m7,         [pb_31]
+    vbroadcasti128  m3,         [r1 + 0]            ; offset[0-15]
+    vbroadcasti128  m4,         [r1 + 16]           ; offset[16-31]
+    lea             r6,         [r4 * 2]
+    sub             r6d,        r2d
+    shr             r2d,        4
+    mov             r1d,        r3d
+    shr             r3d,        1
+.loopH
+    mov             r5d,        r2d
+.loopW
+    movu            xm2,        [r0]                ; m2 = [rec]
+    vinserti128     m2,         m2,  [r0 + r4],  1
+    psrlw           m1,         m2,  3
+    pand            m1,         m7                  ; m1 = [index]
+    pcmpgtb         m0,         m1,  [pb_15]        ; m0 = [mask]
+
+    pshufb          m6,         m3,  m1
+    pshufb          m5,         m4,  m1
+
+    pblendvb        m6,         m6,  m5,  m0        ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+
+    pmovzxbw        m1,         xm2                 ; rec
+    vextracti128    xm2,        m2,  1
+    pmovzxbw        m2,         xm2
+    pmovsxbw        m0,         xm6                 ; offset
+    vextracti128    xm6,        m6,  1
+    pmovsxbw        m6,         xm6
+
+    paddw           m1,         m0
+    paddw           m2,         m6
+    packuswb        m1,         m2
+    vpermq          m1,         m1,  11011000b
+
+    movu            [r0],       xm1
+    vextracti128    [r0 + r4],  m1,  1
+    add             r0,         16
+    dec             r5d
+    jnz             .loopW
+
+    add             r0,         r6
+    dec             r3d
+    jnz             .loopH
+    test            r1b,        1
+    jz              .end
+    mov             r5d,        r2d
+.loopW1
+    movu            xm2,        [r0]                ; m2 = [rec]
+    psrlw           xm1,        xm2, 3
+    pand            xm1,        xm7                 ; m1 = [index]
+    pcmpgtb         xm0,        xm1, [pb_15]        ; m0 = [mask]
+
+    pshufb          xm6,        xm3, xm1
+    pshufb          xm5,        xm4, xm1
+
+    pblendvb        xm6,        xm6, xm5, xm0       ; NOTE: don't use 3 parameters style, x264 macro have some bug!
+
+    pmovzxbw        m1,         xm2                 ; rec
+    pmovsxbw        m0,         xm6                 ; offset
+
+    paddw           m1,         m0
+    vextracti128    xm0,        m1,  1
+    packuswb        xm1,        xm0
+
+    movu            [r0],       xm1
+    add             r0,         16
+    dec             r5d
+    jnz             .loopW1
+.end
+    RET
+%endif
+
+;============================================================================================================
+; void calSign(int8_t *dst, const Pixel *src1, const Pixel *src2, const int width)
+;============================================================================================================
+INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal calSign, 4, 7, 5
+    mova            m0, [pw_1]
+    mov             r4d, r3d
+    shr             r3d, 4
+    add             r3d, 1
+    mov             r5, r0
+    movu            m4, [r0 + r4]
+.loop
+    movu            m1, [r1]        ; m2 = pRec[x]
+    movu            m2, [r2]        ; m3 = pTmpU[x]
+
+    pcmpgtw         m3, m1, m2
+    pcmpgtw         m2, m1
+    pand            m3, m0
+    por             m3, m2
+    packsswb        m3, m3
+    movh            [r0], xm3
+
+    movu            m1, [r1 + 16]   ; m2 = pRec[x]
+    movu            m2, [r2 + 16]   ; m3 = pTmpU[x]
+
+    pcmpgtw         m3, m1, m2
+    pcmpgtw         m2, m1
+    pand            m3, m0
+    por             m3, m2
+    packsswb        m3, m3
+    movh            [r0 + 8], xm3
+
+    add             r0, 16
+    add             r1, 32
+    add             r2, 32
+    dec             r3d
+    jnz             .loop
+
+    mov             r6, r0
+    sub             r6, r5
+    sub             r4, r6
+    movu            [r0 + r4], m4
+    RET
+%else ; HIGH_BIT_DEPTH
+
+cglobal calSign, 4,5,6
+    mova        m0,     [pb_128]
+    mova        m1,     [pb_1]
+
+    sub         r1,     r0
+    sub         r2,     r0
+
+    mov         r4d,    r3d
+    shr         r3d,    4
+    jz         .next
+.loop:
+    movu        m2,     [r0 + r1]            ; m2 = pRec[x]
+    movu        m3,     [r0 + r2]            ; m3 = pTmpU[x]
+    pxor        m4,     m2,     m0
+    pxor        m3,     m0
+    pcmpgtb     m5,     m4,     m3
+    pcmpgtb     m3,     m4
+    pand        m5,     m1
+    por         m5,     m3
+    movu        [r0],   m5
+
+    add         r0,     16
+    dec         r3d
+    jnz        .loop
+
+    ; process partial
+.next:
+    and         r4d, 15
+    jz         .end
+
+    movu        m2,     [r0 + r1]            ; m2 = pRec[x]
+    movu        m3,     [r0 + r2]            ; m3 = pTmpU[x]
+    pxor        m4,     m2,     m0
+    pxor        m3,     m0
+    pcmpgtb     m5,     m4,     m3
+    pcmpgtb     m3,     m4
+    pand        m5,     m1
+    por         m5,     m3
+
+    lea         r3,     [pb_movemask + 16]
+    sub         r3,     r4
+    movu        xmm0,   [r3]
+    movu        m3,     [r0]
+    pblendvb    m5,     m5,     m3,     xmm0
+    movu        [r0],   m5
+
+.end:
+    RET
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal calSign, 4, 7, 5
+    mova            m0, [pw_1]
+    mov             r4d, r3d
+    shr             r3d, 4
+    add             r3d, 1
+    mov             r5, r0
+    movu            m4, [r0 + r4]
+
+.loop
+    movu            m1, [r1]        ; m2 = pRec[x]
+    movu            m2, [r2]        ; m3 = pTmpU[x]
+
+    pcmpgtw         m3, m1, m2
+    pcmpgtw         m2, m1
+
+    pand            m3, m0
+    por             m3, m2
+    packsswb        m3, m3
+    vpermq          m3, m3, q3220
+    movu            [r0 ], xm3
+
+    add             r0, 16
+    add             r1, 32
+    add             r2, 32
+    dec             r3d
+    jnz             .loop
+
+    mov             r6, r0
+    sub             r6, r5
+    sub             r4, r6
+    movu            [r0 + r4], m4
+    RET
+%else ; HIGH_BIT_DEPTH
+
+cglobal calSign, 4, 5, 6
+    vbroadcasti128  m0,     [pb_128]
+    mova            m1,     [pb_1]
+
+    sub             r1,     r0
+    sub             r2,     r0
+
+    mov             r4d,    r3d
+    shr             r3d,    5
+    jz              .next
+.loop:
+    movu            m2,     [r0 + r1]            ; m2 = pRec[x]
+    movu            m3,     [r0 + r2]            ; m3 = pTmpU[x]
+    pxor            m4,     m2,     m0
+    pxor            m3,     m0
+    pcmpgtb         m5,     m4,     m3
+    pcmpgtb         m3,     m4
+    pand            m5,     m1
+    por             m5,     m3
+    movu            [r0],   m5
+
+    add             r0,     mmsize
+    dec             r3d
+    jnz             .loop
+
+    ; process partial
+.next:
+    and             r4d,    31
+    jz              .end
+
+    movu            m2,     [r0 + r1]            ; m2 = pRec[x]
+    movu            m3,     [r0 + r2]            ; m3 = pTmpU[x]
+    pxor            m4,     m2,     m0
+    pxor            m3,     m0
+    pcmpgtb         m5,     m4,     m3
+    pcmpgtb         m3,     m4
+    pand            m5,     m1
+    por             m5,     m3
+
+    lea             r3,     [pb_movemask_32 + 32]
+    sub             r3,     r4
+    movu            m0,     [r3]
+    movu            m3,     [r0]
+    pblendvb        m5,     m5,     m3,     m0
+    movu            [r0],   m5
+
+.end:
+    RET
+%endif
+
+;--------------------------------------------------------------------------------------------------------------------------
+; saoCuStatsBO_c(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count)
+;--------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal saoCuStatsBO, 7,12,6
+    mova        m3, [hmul_16p + 16]
+    mova        m4, [pb_124]
+    mova        m5, [pb_4]
+    xor         r7d, r7d
+
+.loopH:
+    mov         r10, r0
+    mov         r11, r1
+    mov         r9d, r3d
+.loopL:
+    movu        m1, [r11]
+    movu        m0, [r10]
+
+    punpckhbw   m2, m0, m1
+    punpcklbw   m0, m1
+    psrlw       m1, 1               ; rec[x] >> boShift
+    pmaddubsw   m2, m3
+    pmaddubsw   m0, m3
+    pand        m1, m4
+    paddb       m1, m5
+
+%assign x 0
+%rep 16
+    pextrb      r7d, m1, x
+
+%if (x < 8)
+    pextrw      r8d, m0, (x % 8)
+%else
+    pextrw      r8d, m2, (x % 8)
+%endif
+    movsx       r8d, r8w
+    inc         dword  [r6 + r7]    ; count[classIdx]++
+    add         [r5 + r7], r8d      ; stats[classIdx] += (fenc[x] - rec[x]);
+    dec         r9d
+    jz          .next
+%assign x x+1
+%endrep
+
+    add         r10, 16
+    add         r11, 16
+    jmp         .loopL
+
+.next:
+    add         r0, r2
+    add         r1, r2
+    dec         r4d
+    jnz         .loopH
+    RET
+%endif
+
+;-----------------------------------------------------------------------------------------------------------------------
+; saoCuStatsE0(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count)
+;-----------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal saoCuStatsE0, 5,9,8, 0-32
+    mov         r3d, r3m
+    mov         r8, r5mp
+
+    ; clear internal temporary buffer
+    pxor        m0, m0
+    mova        [rsp], m0
+    mova        [rsp + mmsize], m0
+    mova        m4, [pb_128]
+    mova        m5, [hmul_16p + 16]
+    mova        m6, [pb_2]
+    xor         r7d, r7d
+
+.loopH:
+    mov         r5d, r3d
+
+    ; calculate signLeft
+    mov         r7b, [r1]
+    sub         r7b, [r1 - 1]
+    seta        r7b
+    setb        r6b
+    sub         r7b, r6b
+    neg         r7b
+    pinsrb      m0, r7d, 15
+
+.loopL:
+    movu        m7, [r1]
+    movu        m2, [r1 + 1]
+
+    pxor        m1, m7, m4
+    pxor        m3, m2, m4
+    pcmpgtb     m2, m1, m3
+    pcmpgtb     m3, m1
+    pand        m2, [pb_1]
+    por         m2, m3              ; signRight
+
+    palignr     m3, m2, m0, 15
+    psignb      m3, m4              ; signLeft
+
+    mova        m0, m2
+    paddb       m2, m3
+    paddb       m2, m6              ; edgeType
+
+    ; stats[edgeType]
+    movu        m3, [r0]            ; fenc[0-15]
+    punpckhbw   m1, m3, m7
+    punpcklbw   m3, m7
+    pmaddubsw   m1, m5
+    pmaddubsw   m3, m5
+
+%assign x 0
+%rep 16
+    pextrb      r7d, m2, x
+
+%if (x < 8)
+    pextrw      r6d, m3, (x % 8)
+%else
+    pextrw      r6d, m1, (x % 8)
+%endif
+    movsx       r6d, r6w
+    inc         word [rsp + r7 * 2]             ; tmp_count[edgeType]++
+    add         [rsp + 5 * 2 + r7 * 4], r6d     ; tmp_stats[edgeType] += (fenc[x] - rec[x])
+    dec         r5d
+    jz          .next
+%assign x x+1
+%endrep
+
+    add         r0q, 16
+    add         r1q, 16
+    jmp         .loopL
+
+.next:
+    mov         r6d, r3d
+    and         r6d, 15
+
+    sub         r6, r3
+    add         r6, r2
+    add         r0, r6
+    add         r1, r6
+
+    dec         r4d
+    jnz         .loopH
+
+    ; sum to global buffer
+    mov         r0, r6mp
+
+    ; s_eoTable = {1, 2, 0, 3, 4}
+    movzx       r5d, word [rsp + 0 * 2]
+    add         [r0 + 1 * 4], r5d
+    movzx       r6d, word [rsp + 1 * 2]
+    add         [r0 + 2 * 4], r6d
+    movzx       r5d, word [rsp + 2 * 2]
+    add         [r0 + 0 * 4], r5d
+    movzx       r6d, word [rsp + 3 * 2]
+    add         [r0 + 3 * 4], r6d
+    movzx       r5d, word [rsp + 4 * 2]
+    add         [r0 + 4 * 4], r5d
+
+    mov         r6d, [rsp + 5 * 2 + 0 * 4]
+    add         [r8 + 1 * 4], r6d
+    mov         r5d, [rsp + 5 * 2 + 1 * 4]
+    add         [r8 + 2 * 4], r5d
+    mov         r6d, [rsp + 5 * 2 + 2 * 4]
+    add         [r8 + 0 * 4], r6d
+    mov         r5d, [rsp + 5 * 2 + 3 * 4]
+    add         [r8 + 3 * 4], r5d
+    mov         r6d, [rsp + 5 * 2 + 4 * 4]
+    add         [r8 + 4 * 4], r6d
+    RET
+%endif
+
+;-------------------------------------------------------------------------------------------------------------------------------------------
+; saoCuStatsE1_c(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count)
+;-------------------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal saoCuStatsE1, 4,12,9,0-32    ; Stack: 5 of stats and 5 of count
+    mov         r5d, r5m
+    mov         r4d, r4m
+    mov         r11d, r5d
+
+    ; clear internal temporary buffer
+    pxor        m0, m0
+    mova        [rsp], m0
+    mova        [rsp + mmsize], m0
+    mova        m0, [pb_128]
+    mova        m5, [pb_1]
+    mova        m6, [pb_2]
+    mova        m8, [hmul_16p + 16]
+    movh        m7, [r3 + r4]
+
+.loopH:
+    mov         r6d, r4d
+    mov         r9, r0
+    mov         r10, r1
+    mov         r11, r3
+
+.loopW:
+    movu        m1, [r10]
+    movu        m2, [r10 + r2]
+
+    ; signDown
+    pxor        m1, m0
+    pxor        m2, m0
+    pcmpgtb     m3, m1, m2
+    pand        m3, m5
+    pcmpgtb     m2, m1
+    por         m2, m3
+    pxor        m3, m3
+    psubb       m3, m2      ; -signDown
+
+    ; edgeType
+    movu        m4, [r11]
+    paddb       m4, m6
+    paddb       m2, m4
+
+    ; update upBuff1
+    movu        [r11], m3
+
+    ; stats[edgeType]
+    pxor        m1, m0
+    movu        m3, [r9]
+    punpckhbw   m4, m3, m1
+    punpcklbw   m3, m1
+    pmaddubsw   m3, m8
+    pmaddubsw   m4, m8
+
+    ; 16 pixels
+%assign x 0
+%rep 16
+    pextrb      r7d, m2, x
+    inc         word [rsp + r7 * 2]
+
+  %if (x < 8)
+    pextrw      r8d, m3, (x % 8)
+  %else
+    pextrw      r8d, m4, (x % 8)
+  %endif
+    movsx       r8d, r8w
+    add         [rsp + 5 * 2 + r7 * 4], r8d
+
+    dec         r6d
+    jz         .next
+%assign x x+1
+%endrep
+
+    add         r9, 16
+    add         r10, 16
+    add         r11, 16
+    jmp         .loopW
+
+.next:
+    ; restore pointer upBuff1
+    add         r0, r2
+    add         r1, r2
+
+    dec         r5d
+    jg         .loopH
+
+    ; restore unavailable pixels
+    movh        [r3 + r4], m7
+
+    ; sum to global buffer
+    mov         r1, r6m
+    mov         r0, r7m
+
+    ; s_eoTable = {1,2,0,3,4}
+    movzx       r6d, word [rsp + 0 * 2]
+    add         [r0 + 1 * 4], r6d
+    movzx       r6d, word [rsp + 1 * 2]
+    add         [r0 + 2 * 4], r6d
+    movzx       r6d, word [rsp + 2 * 2]
+    add         [r0 + 0 * 4], r6d
+    movzx       r6d, word [rsp + 3 * 2]
+    add         [r0 + 3 * 4], r6d
+    movzx       r6d, word [rsp + 4 * 2]
+    add         [r0 + 4 * 4], r6d
+
+    mov         r6d, [rsp + 5 * 2 + 0 * 4]
+    add         [r1 + 1 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 1 * 4]
+    add         [r1 + 2 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 2 * 4]
+    add         [r1 + 0 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 3 * 4]
+    add         [r1 + 3 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 4 * 4]
+    add         [r1 + 4 * 4], r6d
+    RET
+%endif ; ARCH_X86_64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/loopfilter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,48 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Dnyaneshwar Gorade <dnyaneshwar@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_LOOPFILTER_H
+#define X265_LOOPFILTER_H
+
+#define DECL_SAO(cpu) \
+    void PFX(saoCuOrgE0_ ## cpu)(pixel * rec, int8_t * offsetEo, int endX, int8_t* signLeft, intptr_t stride); \
+    void PFX(saoCuOrgE1_ ## cpu)(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width); \
+    void PFX(saoCuOrgE1_2Rows_ ## cpu)(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width); \
+    void PFX(saoCuOrgE2_ ## cpu)(pixel* rec, int8_t* pBufft, int8_t* pBuff1, int8_t* offsetEo, int lcuWidth, intptr_t stride); \
+    void PFX(saoCuOrgE2_ ## cpu)(pixel* rec, int8_t* pBufft, int8_t* pBuff1, int8_t* offsetEo, int lcuWidth, intptr_t stride); \
+    void PFX(saoCuOrgE2_32_ ## cpu)(pixel* rec, int8_t* pBufft, int8_t* pBuff1, int8_t* offsetEo, int lcuWidth, intptr_t stride); \
+    void PFX(saoCuOrgE3_ ## cpu)(pixel *rec, int8_t *upBuff1, int8_t *m_offsetEo, intptr_t stride, int startX, int endX); \
+    void PFX(saoCuOrgE3_32_ ## cpu)(pixel *rec, int8_t *upBuff1, int8_t *m_offsetEo, intptr_t stride, int startX, int endX); \
+    void PFX(saoCuOrgB0_ ## cpu)(pixel* rec, const int8_t* offsetBo, int ctuWidth, int ctuHeight, intptr_t stride); \
+    void PFX(saoCuStatsBO_ ## cpu)(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count); \
+    void PFX(saoCuStatsE0_ ## cpu)(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count); \
+    void PFX(saoCuStatsE1_ ## cpu)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count); \
+    void PFX(saoCuStatsE2_ ## cpu)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int8_t *upBufft, int endX, int endY, int32_t *stats, int32_t *count); \
+    void PFX(saoCuStatsE3_ ## cpu)(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count); \
+    void PFX(calSign_ ## cpu)(int8_t *dst, const pixel *src1, const pixel *src2, const int endX);
+
+DECL_SAO(sse4);
+DECL_SAO(avx2);
+
+#endif // ifndef X265_LOOPFILTER_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/mc-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,5725 @@
+;*****************************************************************************
+;* mc-a.asm: x86 motion compensation
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Laurent Aimar <fenrir@via.ecp.fr>
+;*          Dylan Yudaken <dyudaken@gmail.com>
+;*          Holger Lubitz <holger@lubitz.org>
+;*          Min Chen <chenm001@163.com>
+;*          Oskar Arvidsson <oskar@irock.se>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+%if BIT_DEPTH==8
+    %define ADDAVG_FACTOR       256
+    %define ADDAVG_ROUND        128
+%elif BIT_DEPTH==10
+    %define ADDAVG_FACTOR       1024
+    %define ADDAVG_ROUND        512
+%elif BIT_DEPTH==12
+    %define ADDAVG_FACTOR       4096
+    %define ADDAVG_ROUND        2048
+%else
+    %error Unsupport bit depth!
+%endif
+
+SECTION_RODATA 32
+
+ch_shuf: times 2 db 0,2,2,4,4,6,6,8,1,3,3,5,5,7,7,9
+ch_shuf_adj: times 8 db 0
+             times 8 db 2
+             times 8 db 4
+             times 8 db 6
+sq_1: times 1 dq 1
+
+SECTION .text
+
+cextern pb_0
+cextern pw_1
+cextern pw_4
+cextern pw_8
+cextern pw_32
+cextern pw_64
+cextern pw_128
+cextern pw_256
+cextern pw_512
+cextern pw_1023
+cextern pw_1024
+cextern pw_2048
+cextern pw_4096
+cextern pw_00ff
+cextern pw_pixel_max
+cextern pd_32
+cextern pd_64
+
+;====================================================================================================================
+;void addAvg (int16_t* src0, int16_t* src1, pixel* dst, intptr_t src0Stride, intptr_t src1Stride, intptr_t dstStride)
+;====================================================================================================================
+; r0 = pSrc0,    r1 = pSrc1
+; r2 = pDst,     r3 = iStride0
+; r4 = iStride1, r5 = iDstStride
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal addAvg_2x4, 6,6,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    add           r3,          r3
+    add           r4,          r4
+    add           r5,          r5
+
+    movd          m1,          [r0]
+    movd          m2,          [r0 + r3]
+    movd          m3,          [r1]
+    movd          m4,          [r1 + r4]
+
+    punpckldq     m1,          m2
+    punpckldq     m3,          m4
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m2,          [r0]
+    movd          m4,          [r0 + r3]
+    movd          m5,          [r1]
+    movd          m0,          [r1 + r4]
+    punpckldq     m2,          m4
+    punpckldq     m5,          m0
+    punpcklqdq    m1,          m2
+    punpcklqdq    m3,          m5
+    paddw         m1,          m3
+    pmulhrsw      m1,          [pw_ %+ ADDAVG_FACTOR]
+    paddw         m1,          [pw_ %+ ADDAVG_ROUND]
+
+    pxor          m0,          m0
+    pmaxsw        m1,          m0
+    pminsw        m1,          [pw_pixel_max]
+    movd          [r2],        m1
+    pextrd        [r2 + r5],   m1, 1
+    lea           r2,          [r2 + 2 * r5]
+    pextrd        [r2],        m1, 2
+    pextrd        [r2 + r5],   m1, 3
+    RET
+
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_2x8, 6,6,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova          m0,          [pw_ %+ ADDAVG_ROUND]
+    pxor          m7,          m7
+    add           r3,          r3
+    add           r4,          r4
+    add           r5,          r5
+
+%rep 2
+    movd          m1,          [r0]
+    movd          m2,          [r0 + r3]
+    movd          m3,          [r1]
+    movd          m4,          [r1 + r4]
+
+    punpckldq     m1,          m2
+    punpckldq     m3,          m4
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m2,          [r0]
+    movd          m4,          [r0 + r3]
+    movd          m5,          [r1]
+    movd          m6,          [r1 + r4]
+
+    punpckldq     m2,          m4
+    punpckldq     m5,          m6
+    punpcklqdq    m1,          m2
+    punpcklqdq    m3,          m5
+    paddw         m1,          m3
+    pmulhrsw      m1,          [pw_ %+ ADDAVG_FACTOR]
+    paddw         m1,          m0
+
+    pmaxsw        m1,          m7
+    pminsw        m1,          [pw_pixel_max]
+    movd          [r2],        m1
+    pextrd        [r2 + r5],   m1, 1
+    lea           r2,          [r2 + 2 * r5]
+    pextrd        [r2],        m1, 2
+    pextrd        [r2 + r5],   m1, 3
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+    lea           r2,          [r2 + 2 * r5]
+%endrep
+    RET
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_2x16, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m6,         [pw_pixel_max]
+    mova        m7,         [pw_ %+ ADDAVG_FACTOR]
+    mov         r6d,        16/4
+    add         r3,         r3
+    add         r4,         r4
+    add         r5,         r5
+.loop:
+    movd        m1,         [r0]
+    movd        m2,         [r0 + r3]
+    movd        m3,         [r1]
+    movd        m4,         [r1 + r4]
+    lea         r0,         [r0 + r3 * 2]
+    lea         r1,         [r1 + r4 * 2]
+    punpckldq   m1,         m2
+    punpckldq   m3,         m4
+    movd        m2,         [r0]
+    movd        m4,         [r0 + r3]
+    movd        m5,         [r1]
+    movd        m0,         [r1 + r4]
+    lea         r0,         [r0 + r3 * 2]
+    lea         r1,         [r1 + r4 * 2]
+    punpckldq   m2,         m4
+    punpckldq   m5,         m0
+    punpcklqdq  m1,         m2
+    punpcklqdq  m3,         m5
+    paddw       m1,         m3
+    pmulhrsw    m1,         m7
+    paddw       m1,         [pw_ %+ ADDAVG_ROUND]
+    pxor        m0,         m0
+    pmaxsw      m1,         m0
+    pminsw      m1,         m6
+    movd        [r2],       m1
+    pextrd      [r2 + r5],  m1, 1
+    lea         r2,         [r2 + r5 * 2]
+    pextrd      [r2],       m1, 2
+    pextrd      [r2 + r5],  m1, 3
+    lea         r2,         [r2 + r5 * 2]
+    dec         r6d
+    jnz         .loop
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_4x2, 6,6,7, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    add            r3,          r3
+    add            r4,          r4
+    add            r5,          r5
+
+    movh           m0,          [r0]
+    movh           m1,          [r0 + r3]
+    movh           m2,          [r1]
+    movh           m3,          [r1 + r4]
+
+    punpcklqdq     m0,          m1
+    punpcklqdq     m2,          m3
+    paddw          m0,          m2
+    pmulhrsw       m0,          [pw_ %+ ADDAVG_FACTOR]
+    paddw          m0,          [pw_ %+ ADDAVG_ROUND]
+
+    pxor           m6,          m6
+    pmaxsw         m0,          m6
+    pminsw         m0,          [pw_pixel_max]
+    movh           [r2],        m0
+    movhps         [r2 + r5],   m0
+    RET
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_6x8, 6,6,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,             [pw_ %+ ADDAVG_ROUND]
+    mova        m5,             [pw_pixel_max]
+    mova        m7,             [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,             m6
+    add         r3,             r3
+    add         r4,             r4
+    add         r5,             r5
+
+%rep 4
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    paddw       m0,             m2
+    pmulhrsw    m0,             m7
+    paddw       m0,             m4
+
+    pmaxsw      m0,             m6
+    pminsw      m0,             m5
+    movh        [r2],           m0
+    pextrd      [r2 + 8],       m0, 2
+
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    paddw       m1,             m3
+    pmulhrsw    m1,             m7
+    paddw       m1,             m4
+
+    pmaxsw      m1,             m6
+    pminsw      m1,             m5
+    movh        [r2 + r5],      m1
+    pextrd      [r2 + r5 + 8],  m1, 2
+
+    lea         r2,             [r2 + 2 * r5]
+    lea         r0,             [r0 + 2 * r3]
+    lea         r1,             [r1 + 2 * r4]
+%endrep
+    RET
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_6x16, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,             [pw_ %+ ADDAVG_ROUND]
+    mova        m5,             [pw_pixel_max]
+    mova        m7,             [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,             m6
+    mov         r6d,            16/2
+    add         r3,             r3
+    add         r4,             r4
+    add         r5,             r5
+.loop:
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    dec         r6d
+    lea         r0,             [r0 + r3 * 2]
+    lea         r1,             [r1 + r4 * 2]
+    paddw       m0,             m2
+    paddw       m1,             m3
+    pmulhrsw    m0,             m7
+    pmulhrsw    m1,             m7
+    paddw       m0,             m4
+    paddw       m1,             m4
+    pmaxsw      m0,             m6
+    pmaxsw      m1,             m6
+    pminsw      m0,             m5
+    pminsw      m1,             m5
+    movh        [r2],           m0
+    pextrd      [r2 + 8],       m0, 2
+    movh        [r2 + r5],      m1
+    pextrd      [r2 + r5 + 8],  m1, 2
+    lea         r2,             [r2 + r5 * 2]
+    jnz         .loop
+    RET
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_8x2, 6,6,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,          [pw_ %+ ADDAVG_ROUND]
+    mova        m5,          [pw_pixel_max]
+    mova        m7,          [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,          m6
+    add         r3,          r3
+    add         r4,          r4
+    add         r5,          r5
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m7
+    paddw       m0,          m4
+
+    pmaxsw      m0,          m6
+    pminsw      m0,          m5
+    movu        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m7
+    paddw       m1,          m4
+
+    pmaxsw      m1,          m6
+    pminsw      m1,          m5
+    movu        [r2 + r5],   m1
+    RET
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_8x6, 6,6,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,          [pw_ %+ ADDAVG_ROUND]
+    mova        m5,          [pw_pixel_max]
+    mova        m7,          [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,          m6
+    add         r3,          r3
+    add         r4,          r4
+    add         r5,          r5
+
+%rep 3
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m7
+    paddw       m0,          m4
+
+    pmaxsw      m0,          m6
+    pminsw      m0,          m5
+    movu        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m7
+    paddw       m1,          m4
+
+    pmaxsw      m1,          m6
+    pminsw      m1,          m5
+    movu        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+%endrep
+    RET
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W4_H4 1
+INIT_XMM sse4
+cglobal addAvg_4x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova           m4,          [pw_ %+ ADDAVG_ROUND]
+    mova           m5,          [pw_pixel_max]
+    mova           m7,          [pw_ %+ ADDAVG_FACTOR]
+    pxor           m6,          m6
+    add            r3,          r3
+    add            r4,          r4
+    add            r5,          r5
+
+    mov            r6d,         %1/4
+
+.loop:
+%rep 2
+    movh           m0,          [r0]
+    movh           m1,          [r0 + r3]
+    movh           m2,          [r1]
+    movh           m3,          [r1 + r4]
+
+    punpcklqdq     m0,          m1
+    punpcklqdq     m2,          m3
+
+    paddw          m0,          m2
+    pmulhrsw       m0,          m7
+    paddw          m0,          m4
+
+    pmaxsw         m0,          m6
+    pminsw         m0,          m5
+
+    movh           [r2],        m0
+    movhps         [r2 + r5],   m0
+
+    lea            r2,          [r2 + 2 * r5]
+    lea            r0,          [r0 + 2 * r3]
+    lea            r1,          [r1 + 2 * r4]
+%endrep
+
+    dec            r6d
+    jnz            .loop
+    RET
+%endmacro
+
+ADDAVG_W4_H4 4
+ADDAVG_W4_H4 8
+ADDAVG_W4_H4 16
+
+ADDAVG_W4_H4 32
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W8_H4 1
+INIT_XMM sse4
+cglobal addAvg_8x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,          [pw_ %+ ADDAVG_ROUND]
+    mova        m5,          [pw_pixel_max]
+    mova        m7,          [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,          m6
+    add         r3,          r3
+    add         r4,          r4
+    add         r5,          r5
+    mov         r6d,         %1/4
+
+.loop:
+%rep 2
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m7
+    paddw       m0,          m4
+    pmaxsw      m0,          m6
+    pminsw      m0,          m5
+    movu        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m7
+    paddw       m1,          m4
+    pmaxsw      m1,          m6
+    pminsw      m1,          m5
+    movu        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+%endrep
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W8_H4 4
+ADDAVG_W8_H4 8
+ADDAVG_W8_H4 16
+ADDAVG_W8_H4 32
+
+ADDAVG_W8_H4 12
+ADDAVG_W8_H4 64
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W12_H4 1
+INIT_XMM sse4
+cglobal addAvg_12x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova           m4,             [pw_ %+ ADDAVG_ROUND]
+    mova           m5,             [pw_pixel_max]
+    mova           m7,             [pw_ %+ ADDAVG_FACTOR]
+    pxor           m6,             m6
+    add            r3,             r3
+    add            r4,             r4
+    add            r5,             r5
+    mov            r6d,            %1/4
+
+.loop:
+%rep 2
+    movu           m0,             [r0]
+    movu           m2,             [r1]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m7
+    paddw          m0,             m4
+    pmaxsw         m0,             m6
+    pminsw         m0,             m5
+    movu           [r2],           m0
+
+    movh           m0,             [r0 + 16]
+    movh           m1,             [r0 + 16 + r3]
+    movh           m2,             [r1 + 16]
+    movh           m3,             [r1 + 16 + r4]
+
+    punpcklqdq     m0,             m1
+    punpcklqdq     m2,             m3
+
+    paddw          m0,             m2
+    pmulhrsw       m0,             m7
+    paddw          m0,             m4
+    pmaxsw         m0,             m6
+    pminsw         m0,             m5
+    movh           [r2 + 16],       m0
+    movhps         [r2 + r5 + 16],  m0
+
+    movu           m1,             [r0 + r3]
+    movu           m3,             [r1 + r4]
+    paddw          m1,             m3
+    pmulhrsw       m1,             m7
+    paddw          m1,             m4
+    pmaxsw         m1,             m6
+    pminsw         m1,             m5
+    movu           [r2 + r5],      m1
+
+    lea            r2,             [r2 + 2 * r5]
+    lea            r0,             [r0 + 2 * r3]
+    lea            r1,             [r1 + 2 * r4]
+%endrep
+    dec            r6d
+    jnz            .loop
+    RET
+%endmacro
+
+ADDAVG_W12_H4 16
+
+ADDAVG_W12_H4 32
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W16_H4 1
+INIT_XMM sse4
+cglobal addAvg_16x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m7,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,              m6
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+    mov         r6d,             %1/4
+
+.loop:
+%rep 2
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 16],       m1
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5],       m1
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + r5 + 16],  m2
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+%endrep
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W16_H4 4
+ADDAVG_W16_H4 8
+ADDAVG_W16_H4 12
+ADDAVG_W16_H4 16
+ADDAVG_W16_H4 32
+ADDAVG_W16_H4 64
+
+ADDAVG_W16_H4 24
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W24_H2 2
+INIT_XMM sse4
+cglobal addAvg_%1x%2, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m7,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,              m6
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             %2/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 16],       m1
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5],       m1
+
+    movu        m2,              [r0 + r3 + 16]
+    movu        m3,              [r1 + r4 + 16]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + r5 + 16],  m2
+
+    movu        m1,              [r0 + r3 + 32]
+    movu        m3,              [r1 + r4 + 32]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5 + 32],  m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W24_H2 24, 32
+
+ADDAVG_W24_H2 24, 64
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W32_H2 1
+INIT_XMM sse4
+cglobal addAvg_32x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m7,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,              m6
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 16],       m1
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 48],       m1
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5],       m1
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + r5 + 16],  m2
+
+    movu        m1,              [r0 + 32 + r3]
+    movu        m3,              [r1 + 32 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5 + 32],  m1
+
+    movu        m2,              [r0 + 48 + r3]
+    movu        m3,              [r1 + 48 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + r5 + 48],  m2
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz        .loop
+    RET
+%endmacro
+
+ADDAVG_W32_H2 8
+ADDAVG_W32_H2 16
+ADDAVG_W32_H2 24
+ADDAVG_W32_H2 32
+ADDAVG_W32_H2 64
+
+ADDAVG_W32_H2 48
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W48_H2 1
+INIT_XMM sse4
+cglobal addAvg_48x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m7,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,              m6
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 16],       m1
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 48],       m1
+
+    movu        m0,              [r0 + 64]
+    movu        m2,              [r1 + 64]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 64],       m0
+
+    movu        m1,              [r0 + 80]
+    movu        m2,              [r1 + 80]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 80],       m1
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + r5],       m1
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + 16 + r5],  m2
+
+    movu        m1,              [r0 + 32 + r3]
+    movu        m3,              [r1 + 32 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 32 + r5],  m1
+
+    movu        m2,              [r0 + 48 + r3]
+    movu        m3,              [r1 + 48 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + 48 + r5],  m2
+
+    movu        m1,              [r0 + 64 + r3]
+    movu        m3,              [r1 + 64 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 64 + r5],  m1
+
+    movu        m2,              [r0 + 80 + r3]
+    movu        m3,              [r1 + 80 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m7
+    paddw       m2,              m4
+    pmaxsw      m2,              m6
+    pminsw      m2,              m5
+    movu        [r2 + 80 + r5],  m2
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W48_H2 64
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W64_H1 1
+INIT_XMM sse4
+cglobal addAvg_64x%1, 6,7,8, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m7,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m6,              m6
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+    mov         r6d,             %1
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 16],       m1
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 48],       m1
+
+    movu        m0,              [r0 + 64]
+    movu        m2,              [r1 + 64]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 64],       m0
+
+    movu        m1,              [r0 + 80]
+    movu        m2,              [r1 + 80]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 80],       m1
+
+    movu        m0,              [r0 + 96]
+    movu        m2,              [r1 + 96]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m7
+    paddw       m0,              m4
+    pmaxsw      m0,              m6
+    pminsw      m0,              m5
+    movu        [r2 + 96],       m0
+
+    movu        m1,              [r0 + 112]
+    movu        m2,              [r1 + 112]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m7
+    paddw       m1,              m4
+    pmaxsw      m1,              m6
+    pminsw      m1,              m5
+    movu        [r2 + 112],       m1
+
+    add         r2,              r5
+    add         r0,              r3
+    add         r1,              r4
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W64_H1 16
+ADDAVG_W64_H1 32
+ADDAVG_W64_H1 48
+ADDAVG_W64_H1 64
+
+;------------------------------------------------------------------------------
+; avx2 asm for addAvg high_bit_depth
+;------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal addAvg_8x2, 6,6,2, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    movu        xm0,         [r0]
+    vinserti128 m0,          m0, [r0 + r3 * 2], 1
+    movu        xm1,         [r1]
+    vinserti128 m1,          m1, [r1 + r4 * 2], 1
+
+    paddw       m0,          m1
+    pxor        m1,          m1
+    pmulhrsw    m0,          [pw_ %+ ADDAVG_FACTOR]
+    paddw       m0,          [pw_ %+ ADDAVG_ROUND]
+    pmaxsw      m0,          m1
+    pminsw      m0,          [pw_pixel_max]
+    vextracti128 xm1,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5 * 2], xm1
+    RET
+
+cglobal addAvg_8x6, 6,6,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,          [pw_ %+ ADDAVG_ROUND]
+    mova        m5,          [pw_pixel_max]
+    mova        m3,          [pw_ %+ ADDAVG_FACTOR]
+    pxor        m1,          m1
+    add         r3d,         r3d
+    add         r4d,         r4d
+    add         r5d,         r5d
+
+    movu        xm0,         [r0]
+    vinserti128 m0,          m0, [r0 + r3], 1
+    movu        xm2,         [r1]
+    vinserti128 m2,          m2, [r1 + r4], 1
+
+    paddw       m0,          m2
+    pmulhrsw    m0,          m3
+    paddw       m0,          m4
+    pmaxsw      m0,          m1
+    pminsw      m0,          m5
+    vextracti128 xm2,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5],   xm2
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        xm0,         [r0]
+    vinserti128 m0,          m0, [r0 + r3], 1
+    movu        xm2,         [r1]
+    vinserti128 m2,          m2, [r1 + r4], 1
+
+    paddw       m0,          m2
+    pmulhrsw    m0,          m3
+    paddw       m0,          m4
+    pmaxsw      m0,          m1
+    pminsw      m0,          m5
+    vextracti128 xm2,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5],   xm2
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        xm0,         [r0]
+    vinserti128 m0,          m0, [r0 + r3], 1
+    movu        xm2,         [r1]
+    vinserti128 m2,          m2, [r1 + r4], 1
+
+    paddw       m0,          m2
+    pmulhrsw    m0,          m3
+    paddw       m0,          m4
+    pmaxsw      m0,          m1
+    pminsw      m0,          m5
+    vextracti128 xm2,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5],   xm2
+    RET
+
+%macro ADDAVG_W8_H4_AVX2 1
+cglobal addAvg_8x%1, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,          [pw_ %+ ADDAVG_ROUND]
+    mova        m5,          [pw_pixel_max]
+    mova        m3,          [pw_ %+ ADDAVG_FACTOR]
+    pxor        m1,          m1
+    add         r3d,         r3d
+    add         r4d,         r4d
+    add         r5d,         r5d
+    mov         r6d,         %1/4
+
+.loop:
+    movu        m0,          [r0]
+    vinserti128 m0,          m0, [r0 + r3], 1
+    movu        m2,          [r1]
+    vinserti128 m2,          m2, [r1 + r4], 1
+
+    paddw       m0,          m2
+    pmulhrsw    m0,          m3
+    paddw       m0,          m4
+    pmaxsw      m0,          m1
+    pminsw      m0,          m5
+    vextracti128 xm2,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5],   xm2
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        m0,          [r0]
+    vinserti128 m0,          m0, [r0 + r3], 1
+    movu        m2,          [r1]
+    vinserti128 m2,          m2, [r1 + r4], 1
+
+    paddw       m0,          m2
+    pmulhrsw    m0,          m3
+    paddw       m0,          m4
+    pmaxsw      m0,          m1
+    pminsw      m0,          m5
+    vextracti128 xm2,        m0, 1
+    movu        [r2],        xm0
+    movu        [r2 + r5],   xm2
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W8_H4_AVX2 4
+ADDAVG_W8_H4_AVX2 8
+ADDAVG_W8_H4_AVX2 12
+ADDAVG_W8_H4_AVX2 16
+ADDAVG_W8_H4_AVX2 32
+ADDAVG_W8_H4_AVX2 64
+
+cglobal addAvg_12x16, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova           m4,             [pw_ %+ ADDAVG_ROUND]
+    mova           m5,             [pw_pixel_max]
+    mova           m3,             [pw_ %+ ADDAVG_FACTOR]
+    pxor           m1,             m1
+    add            r3,             r3
+    add            r4,             r4
+    add            r5,             r5
+    mov            r6d,            4
+
+.loop:
+%rep 2
+    movu           m0,             [r0]
+    movu           m2,             [r1]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m3
+    paddw          m0,             m4
+    pmaxsw         m0,             m1
+    pminsw         m0,             m5
+    vextracti128   xm2,            m0, 1
+    movu           [r2],           xm0
+    movq           [r2 + 16],      xm2
+
+    movu           m0,             [r0 + r3]
+    movu           m2,             [r1 + r4]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m3
+    paddw          m0,             m4
+    pmaxsw         m0,             m1
+    pminsw         m0,             m5
+    vextracti128   xm2,            m0, 1
+    movu           [r2 + r5],      xm0
+    movq           [r2 + r5 + 16], xm2
+
+    lea            r2,             [r2 + 2 * r5]
+    lea            r0,             [r0 + 2 * r3]
+    lea            r1,             [r1 + 2 * r4]
+%endrep
+    dec            r6d
+    jnz            .loop
+    RET
+
+cglobal addAvg_12x32, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova           m4,             [pw_ %+ ADDAVG_ROUND]
+    mova           m5,             [pw_pixel_max]
+    paddw          m3,             m4,  m4
+    pxor           m1,             m1
+    add            r3,             r3
+    add            r4,             r4
+    add            r5,             r5
+    mov            r6d,            8
+
+.loop:
+%rep 2
+    movu           m0,             [r0]
+    movu           m2,             [r1]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m3
+    paddw          m0,             m4
+    pmaxsw         m0,             m1
+    pminsw         m0,             m5
+    vextracti128   xm2,            m0, 1
+    movu           [r2],           xm0
+    movq           [r2 + 16],      xm2
+
+    movu           m0,             [r0 + r3]
+    movu           m2,             [r1 + r4]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m3
+    paddw          m0,             m4
+    pmaxsw         m0,             m1
+    pminsw         m0,             m5
+    vextracti128   xm2,            m0, 1
+    movu           [r2 + r5],      xm0
+    movq           [r2 + r5 + 16], xm2
+
+    lea            r2,             [r2 + 2 * r5]
+    lea            r0,             [r0 + 2 * r3]
+    lea            r1,             [r1 + 2 * r4]
+%endrep
+    dec            r6d
+    jnz            .loop
+    RET
+
+%macro ADDAVG_W16_H4_AVX2 1
+cglobal addAvg_16x%1, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m2,              m2
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+    mov         r6d,             %1/4
+
+.loop:
+%rep 2
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+%endrep
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W16_H4_AVX2 4
+ADDAVG_W16_H4_AVX2 8
+ADDAVG_W16_H4_AVX2 12
+ADDAVG_W16_H4_AVX2 16
+ADDAVG_W16_H4_AVX2 24
+ADDAVG_W16_H4_AVX2 32
+ADDAVG_W16_H4_AVX2 64
+
+cglobal addAvg_24x32, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m1,              m1
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             16
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m1
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        xm0,             [r0 + 32]
+    movu        xm2,             [r1 + 32]
+    paddw       xm0,             xm2
+    pmulhrsw    xm0,             xm3
+    paddw       xm0,             xm4
+    pmaxsw      xm0,             xm1
+    pminsw      xm0,             xm5
+    movu        [r2 + 32],       xm0
+
+    movu        m0,              [r0 + r3]
+    movu        m2,              [r1 + r4]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m1
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        xm2,             [r0 + r3 + 32]
+    movu        xm0,             [r1 + r4 + 32]
+    paddw       xm2,             xm0
+    pmulhrsw    xm2,             xm3
+    paddw       xm2,             xm4
+    pmaxsw      xm2,             xm1
+    pminsw      xm2,             xm5
+    movu        [r2 + r5 + 32],  xm2
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+
+cglobal addAvg_24x64, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    paddw       m3,              m4,  m4
+    pxor        m1,              m1
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             32
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m1
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        xm0,             [r0 + 32]
+    movu        xm2,             [r1 + 32]
+    paddw       xm0,             xm2
+    pmulhrsw    xm0,             xm3
+    paddw       xm0,             xm4
+    pmaxsw      xm0,             xm1
+    pminsw      xm0,             xm5
+    movu        [r2 + 32],       xm0
+
+    movu        m0,              [r0 + r3]
+    movu        m2,              [r1 + r4]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m1
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        xm2,             [r0 + r3 + 32]
+    movu        xm0,             [r1 + r4 + 32]
+    paddw       xm2,             xm0
+    pmulhrsw    xm2,             xm3
+    paddw       xm2,             xm4
+    pmaxsw      xm2,             xm1
+    pminsw      xm2,             xm5
+    movu        [r2 + r5 + 32],  xm2
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+
+%macro ADDAVG_W32_H2_AVX2 1
+cglobal addAvg_32x%1, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m2,              m2
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m1,              [r1 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        m0,              [r0 + r3 + 32]
+    movu        m1,              [r1 + r4 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 32],  m0
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz        .loop
+    RET
+%endmacro
+
+ADDAVG_W32_H2_AVX2 8
+ADDAVG_W32_H2_AVX2 16
+ADDAVG_W32_H2_AVX2 24
+ADDAVG_W32_H2_AVX2 32
+ADDAVG_W32_H2_AVX2 48
+ADDAVG_W32_H2_AVX2 64
+
+cglobal addAvg_48x64, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m2,              m2
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+
+    mov         r6d,             32
+
+.loop:
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m1,              [r1 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m0,              [r0 + 64]
+    movu        m1,              [r1 + 64]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 64],       m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        m0,              [r0 + r3 + 32]
+    movu        m1,              [r1 + r4 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 32],  m0
+
+    movu        m0,              [r0 + r3 + 64]
+    movu        m1,              [r1 + r4 + 64]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 64],  m0
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz        .loop
+    RET
+
+%macro ADDAVG_W64_H1_AVX2 1
+cglobal addAvg_64x%1, 6,7,6, pSrc0, pSrc1, pDst, iStride0, iStride1, iDstStride
+    mova        m4,              [pw_ %+ ADDAVG_ROUND]
+    mova        m5,              [pw_pixel_max]
+    mova        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m2,              m2
+    add         r3d,             r3d
+    add         r4d,             r4d
+    add         r5d,             r5d
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m1,              [r1 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 32],       m0
+
+    movu        m0,              [r0 + 64]
+    movu        m1,              [r1 + 64]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 64],       m0
+
+    movu        m0,              [r0 + 96]
+    movu        m1,              [r1 + 96]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 96],       m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        m0,              [r0 + r3 + 32]
+    movu        m1,              [r1 + r4 + 32]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 32],  m0
+
+    movu        m0,              [r0 + r3 + 64]
+    movu        m1,              [r1 + r4 + 64]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 64],  m0
+
+    movu        m0,              [r0 + r3 + 96]
+    movu        m1,              [r1 + r4 + 96]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5 + 96],  m0
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz        .loop
+    RET
+%endmacro
+
+ADDAVG_W64_H1_AVX2 16
+ADDAVG_W64_H1_AVX2 32
+ADDAVG_W64_H1_AVX2 48
+ADDAVG_W64_H1_AVX2 64
+;-----------------------------------------------------------------------------
+%else ; !HIGH_BIT_DEPTH
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_2x4, 6,6,8, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova          m0,          [pw_256]
+    mova          m7,          [pw_128]
+    add           r3,          r3
+    add           r4,          r4
+
+    movd          m1,          [r0]
+    movd          m2,          [r0 + r3]
+    movd          m3,          [r1]
+    movd          m4,          [r1 + r4]
+
+    punpckldq     m1,          m2
+    punpckldq     m3,          m4
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m2,          [r0]
+    movd          m4,          [r0 + r3]
+    movd          m5,          [r1]
+    movd          m6,          [r1 + r4]
+
+    punpckldq     m2,          m4
+    punpckldq     m5,          m6
+    punpcklqdq    m1,          m2
+    punpcklqdq    m3,          m5
+
+    paddw         m1,          m3
+    pmulhrsw      m1,          m0
+    paddw         m1,          m7
+    packuswb      m1,          m1
+
+    pextrw        [r2],        m1, 0
+    pextrw        [r2 + r5],   m1, 1
+    lea           r2,          [r2 + 2 * r5]
+    pextrw        [r2],        m1, 2
+    pextrw        [r2 + r5],   m1, 3
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_2x8, 6,6,8, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova          m0,          [pw_256]
+    mova          m7,          [pw_128]
+    add           r3,          r3
+    add           r4,          r4
+
+    movd          m1,          [r0]
+    movd          m2,          [r0 + r3]
+    movd          m3,          [r1]
+    movd          m4,          [r1 + r4]
+
+    punpckldq     m1,          m2
+    punpckldq     m3,          m4
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m2,          [r0]
+    movd          m4,          [r0 + r3]
+    movd          m5,          [r1]
+    movd          m6,          [r1 + r4]
+
+    punpckldq     m2,          m4
+    punpckldq     m5,          m6
+    punpcklqdq    m1,          m2
+    punpcklqdq    m3,          m5
+
+    paddw         m1,          m3
+    pmulhrsw      m1,          m0
+    paddw         m1,          m7
+    packuswb      m1,          m1
+
+    pextrw        [r2],        m1, 0
+    pextrw        [r2 + r5],   m1, 1
+    lea           r2,          [r2 + 2 * r5]
+    pextrw        [r2],        m1, 2
+    pextrw        [r2 + r5],   m1, 3
+
+    lea           r2,          [r2 + 2 * r5]
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m1,          [r0]
+    movd          m2,          [r0 + r3]
+    movd          m3,          [r1]
+    movd          m4,          [r1 + r4]
+
+    punpckldq     m1,          m2
+    punpckldq     m3,          m4
+
+    lea           r0,          [r0 + 2 * r3]
+    lea           r1,          [r1 + 2 * r4]
+
+    movd          m2,          [r0]
+    movd          m4,          [r0 + r3]
+    movd          m5,          [r1]
+    movd          m6,          [r1 + r4]
+
+    punpckldq     m2,          m4
+    punpckldq     m5,          m6
+    punpcklqdq    m1,          m2
+    punpcklqdq    m3,          m5
+
+    paddw         m1,          m3
+    pmulhrsw      m1,          m0
+    paddw         m1,          m7
+    packuswb      m1,          m1
+
+    pextrw        [r2],        m1, 0
+    pextrw        [r2 + r5],   m1, 1
+    lea           r2,          [r2 + 2 * r5]
+    pextrw        [r2],        m1, 2
+    pextrw        [r2 + r5],   m1, 3
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_2x16, 6,7,8, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m0,         [pw_256]
+    mova        m7,         [pw_128]
+    mov         r6d,        16/4
+    add         r3,         r3
+    add         r4,         r4
+.loop:
+    movd        m1,         [r0]
+    movd        m2,         [r0 + r3]
+    movd        m3,         [r1]
+    movd        m4,         [r1 + r4]
+    lea         r0,         [r0 + r3 * 2]
+    lea         r1,         [r1 + r4 * 2]
+    punpckldq   m1,         m2
+    punpckldq   m3,         m4
+    movd        m2,         [r0]
+    movd        m4,         [r0 + r3]
+    movd        m5,         [r1]
+    movd        m6,         [r1 + r4]
+    lea         r0,         [r0 + r3 * 2]
+    lea         r1,         [r1 + r4 * 2]
+    punpckldq   m2,         m4
+    punpckldq   m5,         m6
+    punpcklqdq  m1,         m2
+    punpcklqdq  m3,         m5
+    paddw       m1,         m3
+    pmulhrsw    m1,         m0
+    paddw       m1,         m7
+    packuswb    m1,         m1
+    pextrw      [r2],       m1, 0
+    pextrw      [r2 + r5],  m1, 1
+    lea         r2,         [r2 + r5 * 2]
+    pextrw      [r2],       m1, 2
+    pextrw      [r2 + r5],  m1, 3
+    lea         r2,         [r2 + r5 * 2]
+    dec         r6d
+    jnz         .loop
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_4x2, 6,6,4, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova           m1,          [pw_256]
+    mova           m3,          [pw_128]
+    add            r3,          r3
+    add            r4,          r4
+
+    movh           m0,          [r0]
+    movhps         m0,          [r0 + r3]
+    movh           m2,          [r1]
+    movhps         m2,          [r1 + r4]
+
+    paddw          m0,          m2
+    pmulhrsw       m0,          m1
+    paddw          m0,          m3
+
+    packuswb       m0,          m0
+    movd           [r2],        m0
+    pshufd         m0,          m0, 1
+    movd           [r2 + r5],   m0
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W4_H4 1
+INIT_XMM sse4
+cglobal addAvg_4x%1, 6,7,4, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova           m1,          [pw_256]
+    mova           m3,          [pw_128]
+    add            r3,          r3
+    add            r4,          r4
+
+    mov            r6d,         %1/4
+
+.loop:
+    movh           m0,          [r0]
+    movhps         m0,          [r0 + r3]
+    movh           m2,          [r1]
+    movhps         m2,          [r1 + r4]
+
+    paddw          m0,          m2
+    pmulhrsw       m0,          m1
+    paddw          m0,          m3
+
+    packuswb       m0,          m0
+    movd           [r2],        m0
+    pshufd         m0,          m0, 1
+    movd           [r2 + r5],   m0
+
+    lea            r2,          [r2 + 2 * r5]
+    lea            r0,          [r0 + 2 * r3]
+    lea            r1,          [r1 + 2 * r4]
+
+    movh           m0,          [r0]
+    movhps         m0,          [r0 + r3]
+    movh           m2,          [r1]
+    movhps         m2,          [r1 + r4]
+
+    paddw          m0,          m2
+    pmulhrsw       m0,          m1
+    paddw          m0,          m3
+
+    packuswb       m0,          m0
+    movd           [r2],        m0
+    pshufd         m0,          m0, 1
+    movd           [r2 + r5],   m0
+
+    lea            r2,          [r2 + 2 * r5]
+    lea            r0,          [r0 + 2 * r3]
+    lea            r1,          [r1 + 2 * r4]
+
+    dec            r6d
+    jnz            .loop
+    RET
+%endmacro
+
+ADDAVG_W4_H4 4
+ADDAVG_W4_H4 8
+ADDAVG_W4_H4 16
+
+ADDAVG_W4_H4 32
+
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_6x8, 6,6,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova        m4,             [pw_256]
+    mova        m5,             [pw_128]
+    add         r3,             r3
+    add         r4,             r4
+
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    paddw       m0,             m2
+    pmulhrsw    m0,             m4
+    paddw       m0,             m5
+    packuswb    m0,             m0
+    movd        [r2],           m0
+    pextrw      [r2 + 4],       m0, 2
+
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    paddw       m1,             m3
+    pmulhrsw    m1,             m4
+    paddw       m1,             m5
+    packuswb    m1,             m1
+    movd        [r2 + r5],      m1
+    pextrw      [r2 + r5 + 4],  m1, 2
+
+    lea         r2,             [r2 + 2 * r5]
+    lea         r0,             [r0 + 2 * r3]
+    lea         r1,             [r1 + 2 * r4]
+
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    paddw       m0,             m2
+    pmulhrsw    m0,             m4
+    paddw       m0,             m5
+    packuswb    m0,             m0
+    movd        [r2],           m0
+    pextrw      [r2 + 4],       m0, 2
+
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    paddw       m1,             m3
+    pmulhrsw    m1,             m4
+    paddw       m1,             m5
+    packuswb    m1,             m1
+    movd        [r2 + r5],      m1
+    pextrw      [r2 + r5 + 4],  m1, 2
+
+    lea         r2,             [r2 + 2 * r5]
+    lea         r0,             [r0 + 2 * r3]
+    lea         r1,             [r1 + 2 * r4]
+
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    paddw       m0,             m2
+    pmulhrsw    m0,             m4
+    paddw       m0,             m5
+    packuswb    m0,             m0
+    movd        [r2],           m0
+    pextrw      [r2 + 4],       m0, 2
+
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    paddw       m1,             m3
+    pmulhrsw    m1,             m4
+    paddw       m1,             m5
+    packuswb    m1,             m1
+    movd        [r2 + r5],      m1
+    pextrw      [r2 + r5 + 4],  m1, 2
+
+    lea         r2,             [r2 + 2 * r5]
+    lea         r0,             [r0 + 2 * r3]
+    lea         r1,             [r1 + 2 * r4]
+
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    paddw       m0,             m2
+    pmulhrsw    m0,             m4
+    paddw       m0,             m5
+    packuswb    m0,             m0
+    movd        [r2],           m0
+    pextrw      [r2 + 4],       m0, 2
+
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    paddw       m1,             m3
+    pmulhrsw    m1,             m4
+    paddw       m1,             m5
+    packuswb    m1,             m1
+    movd        [r2 + r5],      m1
+    pextrw      [r2 + r5 + 4],  m1, 2
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_6x16, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,             [pw_256]
+    mova        m5,             [pw_128]
+    mov         r6d,            16/2
+    add         r3,             r3
+    add         r4,             r4
+.loop:
+    movu        m0,             [r0]
+    movu        m2,             [r1]
+    movu        m1,             [r0 + r3]
+    movu        m3,             [r1 + r4]
+    dec         r6d
+    lea         r0,             [r0 + r3 * 2]
+    lea         r1,             [r1 + r4 * 2]
+    paddw       m0,             m2
+    paddw       m1,             m3
+    pmulhrsw    m0,             m4
+    pmulhrsw    m1,             m4
+    paddw       m0,             m5
+    paddw       m1,             m5
+    packuswb    m0,             m0
+    packuswb    m1,             m1
+    movd        [r2],           m0
+    pextrw      [r2 + 4],       m0, 2
+    movd        [r2 + r5],      m1
+    pextrw      [r2 + r5 + 4],  m1, 2
+    lea         r2,             [r2 + r5 * 2]
+    jnz         .loop
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_8x2, 6,6,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,          [pw_256]
+    mova        m5,          [pw_128]
+    add         r3,          r3
+    add         r4,          r4
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal addAvg_8x6, 6,6,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova        m4,          [pw_256]
+    mova        m5,          [pw_128]
+    add         r3,          r3
+    add         r4,          r4
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    RET
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W8_H4 1
+INIT_XMM sse4
+cglobal addAvg_8x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova        m4,          [pw_256]
+    mova        m5,          [pw_128]
+    add         r3,          r3
+    add         r4,          r4
+
+    mov         r6d,         %1/4
+
+.loop:
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    movu        m0,          [r0]
+    movu        m2,          [r1]
+    paddw       m0,          m2
+    pmulhrsw    m0,          m4
+    paddw       m0,          m5
+
+    packuswb    m0,          m0
+    movh        [r2],        m0
+
+    movu        m1,          [r0 + r3]
+    movu        m3,          [r1 + r4]
+    paddw       m1,          m3
+    pmulhrsw    m1,          m4
+    paddw       m1,          m5
+
+    packuswb    m1,          m1
+    movh        [r2 + r5],   m1
+
+    lea         r2,          [r2 + 2 * r5]
+    lea         r0,          [r0 + 2 * r3]
+    lea         r1,          [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W8_H4 4
+ADDAVG_W8_H4 8
+ADDAVG_W8_H4 16
+ADDAVG_W8_H4 32
+
+ADDAVG_W8_H4 12
+ADDAVG_W8_H4 64
+
+;-----------------------------------------------------------------------------
+
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W12_H4 1
+INIT_XMM sse4
+cglobal addAvg_12x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova           m4,             [pw_256]
+    mova           m5,             [pw_128]
+    add            r3,             r3
+    add            r4,             r4
+
+    mov            r6d,            %1/4
+
+.loop:
+    movu           m0,             [r0]
+    movu           m2,             [r1]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m4
+    paddw          m0,             m5
+    packuswb       m0,             m0
+    movh           [r2],           m0
+
+    movh           m0,             [r0 + 16]
+    movhps         m0,             [r0 + 16 + r3]
+    movh           m2,             [r1 + 16]
+    movhps         m2,             [r1 + 16 + r4]
+
+    paddw          m0,             m2
+    pmulhrsw       m0,             m4
+    paddw          m0,             m5
+
+    packuswb       m0,             m0
+    movd           [r2 + 8],       m0
+    pshufd         m0,             m0, 1
+    movd           [r2 + 8 + r5],  m0
+
+    movu           m1,             [r0 + r3]
+    movu           m3,             [r1 + r4]
+    paddw          m1,             m3
+    pmulhrsw       m1,             m4
+    paddw          m1,             m5
+
+    packuswb       m1,             m1
+    movh           [r2 + r5],      m1
+
+    lea            r2,             [r2 + 2 * r5]
+    lea            r0,             [r0 + 2 * r3]
+    lea            r1,             [r1 + 2 * r4]
+
+    movu           m0,             [r0]
+    movu           m2,             [r1]
+    paddw          m0,             m2
+    pmulhrsw       m0,             m4
+    paddw          m0,             m5
+
+    packuswb       m0,             m0
+    movh           [r2],           m0
+
+    movh           m0,             [r0 + 16]
+    movhps         m0,             [r0 + 16 + r3]
+    movh           m2,             [r1 + 16]
+    movhps         m2,             [r1 + 16 + r4]
+
+    paddw          m0,             m2
+    pmulhrsw       m0,             m4
+    paddw          m0,             m5
+
+    packuswb       m0,             m0
+    movd           [r2 + 8],       m0
+    pshufd         m0,             m0,  1
+    movd           [r2 + 8 + r5],  m0
+
+    movu           m1,             [r0 + r3]
+    movu           m3,             [r1 + r4]
+    paddw          m1,             m3
+    pmulhrsw       m1,             m4
+    paddw          m1,             m5
+
+    packuswb       m1,             m1
+    movh           [r2 + r5],      m1
+
+    lea            r2,             [r2 + 2 * r5]
+    lea            r0,             [r0 + 2 * r3]
+    lea            r1,             [r1 + 2 * r4]
+
+    dec            r6d
+    jnz            .loop
+    RET
+%endmacro
+
+ADDAVG_W12_H4 16
+
+ADDAVG_W12_H4 32
+
+;-----------------------------------------------------------------------------
+
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W16_H4 1
+INIT_XMM sse4
+cglobal addAvg_16x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,              [pw_256]
+    mova        m5,              [pw_128]
+    add         r3,              r3
+    add         r4,              r4
+
+    mov         r6d,             %1/4
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + r5],       m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + r5],       m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W16_H4 4
+ADDAVG_W16_H4 8
+ADDAVG_W16_H4 12
+ADDAVG_W16_H4 16
+ADDAVG_W16_H4 32
+ADDAVG_W16_H4 64
+
+ADDAVG_W16_H4 24
+
+;-----------------------------------------------------------------------------
+; addAvg avx2 code start
+;-----------------------------------------------------------------------------
+
+INIT_YMM avx2
+cglobal addAvg_8x2, 6,6,4, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + 2 * r3], 1
+
+    movu            xm2, [r1]
+    vinserti128     m2, m2, [r1 + 2 * r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, [pw_256]
+    paddw           m0, [pw_128]
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+    RET
+
+cglobal addAvg_8x6, 6,6,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r3], 1
+
+    movu            xm2, [r1]
+    vinserti128     m2, m2, [r1 + r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0+  r3], 1
+
+    movu            xm2, [r1]
+    vinserti128     m2, m2, [r1 + r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r3], 1
+
+    movu            xm2, [r1]
+    vinserti128     m2, m2, [r1 + r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+    RET
+
+%macro ADDAVG_W8_H4_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_8x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/4
+
+.loop:
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r3], 1
+
+    movu            xm2, [r1]
+    vinserti128     m2, m2, [r1 + r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    movu            xm0, [r0]
+    vinserti128     m0, m0, [r0 + r3], 1
+
+    movu            m2, [r1]
+    vinserti128     m2, m2, [r1 + r4], 1
+
+    paddw           m0, m2
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movq            [r2 + r5], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W8_H4_AVX2 4
+ADDAVG_W8_H4_AVX2 8
+ADDAVG_W8_H4_AVX2 12
+ADDAVG_W8_H4_AVX2 16
+ADDAVG_W8_H4_AVX2 32
+ADDAVG_W8_H4_AVX2 64
+
+%macro ADDAVG_W12_H4_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_12x%1, 6,7,7, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/4
+
+.loop:
+    movu            xm0, [r0]
+    movu            xm1, [r1]
+    movq            xm2, [r0 + 16]
+    movq            xm3, [r1 + 16]
+    vinserti128     m0, m0, xm2, 1
+    vinserti128     m1, m1, xm3, 1
+
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            xm1, [r0 + r3]
+    movu            xm2, [r1 + r4]
+    movq            xm3, [r0 + r3 + 16]
+    movq            xm6, [r1 + r3 + 16]
+    vinserti128     m1, m1, xm3, 1
+    vinserti128     m2, m2, xm6, 1
+
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movd            [r2 + 8], xm1
+    vpshufd         m1, m1, 2
+    movhps          [r2 + r5], xm0
+    movd            [r2 + r5 + 8], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    movu            xm0, [r0]
+    movu            xm1, [r1]
+    movq            xm2, [r0 + 16]
+    movq            xm3, [r1 + 16]
+    vinserti128     m0, m0, xm2, 1
+    vinserti128     m1, m1, xm3, 1
+
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            xm1, [r0 + r3]
+    movu            xm2, [r1 + r4]
+    movq            xm3, [r0 + r3 + 16]
+    movq            xm6, [r1 + r3 + 16]
+    vinserti128     m1, m1, xm3, 1
+    vinserti128     m2, m2, xm6, 1
+
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vextracti128    xm1, m0, 1
+    movq            [r2], xm0
+    movd            [r2 + 8], xm1
+    vpshufd         m1, m1, 2
+    movhps          [r2 + r5], xm0
+    movd            [r2 + r5 + 8], xm1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W12_H4_AVX2 16
+ADDAVG_W12_H4_AVX2 32
+
+%macro ADDAVG_W16_H4_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_16x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/4
+
+.loop:
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3]
+    movu            m2, [r1 + r4]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    vextracti128    [r2], m0, 0
+    vextracti128    [r2 + r5], m0, 1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3]
+    movu            m2, [r1 + r4]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    vextracti128    [r2], m0, 0
+    vextracti128    [r2 + r5], m0, 1
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W16_H4_AVX2 4
+ADDAVG_W16_H4_AVX2 8
+ADDAVG_W16_H4_AVX2 12
+ADDAVG_W16_H4_AVX2 16
+ADDAVG_W16_H4_AVX2 24
+ADDAVG_W16_H4_AVX2 32
+ADDAVG_W16_H4_AVX2 64
+
+%macro ADDAVG_W24_H2_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_24x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/2
+
+.loop:
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            xm1, [r0 + 32]
+    movu            xm2, [r1 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 10001101b
+    vextracti128    [r2], m0, 1
+    movq            [r2 + 16], xm0
+
+    movu            m0, [r0 + r3]
+    movu            m1, [r1 + r4]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            xm1, [r0 + r3 + 32]
+    movu            xm2, [r1 + r4 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 10001101b
+    vextracti128    [r2 + r5], m0, 1
+    movq            [r2 + r5 + 16], xm0
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W24_H2_AVX2 32
+ADDAVG_W24_H2_AVX2 64
+
+%macro ADDAVG_W32_H2_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_32x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/2
+
+.loop:
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + 32]
+    movu            m2, [r1 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2], m0
+
+    movu            m0, [r0 + r3]
+    movu            m1, [r1 + r4]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3 + 32]
+    movu            m2, [r1 + r4 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2 + r5], m0
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W32_H2_AVX2 8
+ADDAVG_W32_H2_AVX2 16
+ADDAVG_W32_H2_AVX2 24
+ADDAVG_W32_H2_AVX2 32
+ADDAVG_W32_H2_AVX2 48
+ADDAVG_W32_H2_AVX2 64
+
+%macro ADDAVG_W64_H2_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_64x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/2
+
+.loop:
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + 32]
+    movu            m2, [r1 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2], m0
+
+    movu            m0, [r0 + 64]
+    movu            m1, [r1 + 64]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + 96]
+    movu            m2, [r1 + 96]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2 + 32], m0
+
+    movu            m0, [r0 + r3]
+    movu            m1, [r1 + r4]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3 + 32]
+    movu            m2, [r1 + r4 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2 + r5], m0
+
+    movu            m0, [r0 + r3 + 64]
+    movu            m1, [r1 + r4 + 64]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3 + 96]
+    movu            m2, [r1 + r4 + 96]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2 + r5 + 32], m0
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W64_H2_AVX2 16
+ADDAVG_W64_H2_AVX2 32
+ADDAVG_W64_H2_AVX2 48
+ADDAVG_W64_H2_AVX2 64
+
+%macro ADDAVG_W48_H2_AVX2 1
+INIT_YMM avx2
+cglobal addAvg_48x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova            m4, [pw_256]
+    mova            m5, [pw_128]
+    add             r3, r3
+    add             r4, r4
+    mov             r6d, %1/2
+
+.loop:
+    movu            m0, [r0]
+    movu            m1, [r1]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + 32]
+    movu            m2, [r1 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2], m0
+
+    movu            m0, [r0 + 64]
+    movu            m1, [r1 + 64]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vpermq          m0, m0, 11011000b
+    vextracti128    [r2 + 32], m0, 0
+
+    movu            m0, [r0 + r3]
+    movu            m1, [r1 + r4]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    movu            m1, [r0 + r3 + 32]
+    movu            m2, [r1 + r4 + 32]
+    paddw           m1, m2
+    pmulhrsw        m1, m4
+    paddw           m1, m5
+
+    packuswb        m0, m1
+    vpermq          m0, m0, 11011000b
+    movu            [r2 + r5], m0
+
+    movu            m0, [r0 + r3 + 64]
+    movu            m1, [r1 + r4 + 64]
+    paddw           m0, m1
+    pmulhrsw        m0, m4
+    paddw           m0, m5
+
+    packuswb        m0, m0
+    vpermq          m0, m0, 11011000b
+    vextracti128    [r2 + r5 + 32], m0, 0
+
+    lea             r2, [r2 + 2 * r5]
+    lea             r0, [r0 + 2 * r3]
+    lea             r1, [r1 + 2 * r4]
+
+    dec             r6d
+    jnz             .loop
+    RET
+%endmacro
+
+ADDAVG_W48_H2_AVX2 64
+
+;-----------------------------------------------------------------------------
+; addAvg avx2 code end
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W24_H2 2
+INIT_XMM sse4
+cglobal addAvg_%1x%2, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,              [pw_256]
+    mova        m5,              [pw_128]
+    add         r3,              r3
+    add         r4,              r4
+
+    mov         r6d,             %2/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    packuswb    m0,              m0
+    movh        [r2 + 16],       m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + r5],       m1
+
+    movu        m1,              [r0 + 32 + r3]
+    movu        m3,              [r1 + 32 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m1,              m1
+    movh        [r2 + 16 + r5],  m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W24_H2 24, 32
+
+ADDAVG_W24_H2 24, 64
+
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W32_H2 1
+INIT_XMM sse4
+cglobal addAvg_32x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,              [pw_256]
+    mova        m5,              [pw_128]
+    add         r3,              r3
+    add         r4,              r4
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 16],       m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + r5],       m1
+
+    movu        m1,              [r0 + 32 + r3]
+    movu        m3,              [r1 + 32 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 48 + r3]
+    movu        m3,              [r1 + 48 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + 16 + r5],  m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz        .loop
+    RET
+%endmacro
+
+ADDAVG_W32_H2 8
+ADDAVG_W32_H2 16
+ADDAVG_W32_H2 24
+ADDAVG_W32_H2 32
+ADDAVG_W32_H2 64
+
+ADDAVG_W32_H2 48
+
+;-----------------------------------------------------------------------------
+
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W48_H2 1
+INIT_XMM sse4
+cglobal addAvg_48x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+
+    mova        m4,              [pw_256]
+    mova        m5,              [pw_128]
+    add         r3,              r3
+    add         r4,              r4
+
+    mov         r6d,             %1/2
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 16],       m0
+
+    movu        m0,              [r0 + 64]
+    movu        m2,              [r1 + 64]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 80]
+    movu        m2,              [r1 + 80]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 32],       m0
+
+    movu        m1,              [r0 + r3]
+    movu        m3,              [r1 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 16 + r3]
+    movu        m3,              [r1 + 16 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + r5],       m1
+
+    movu        m1,              [r0 + 32 + r3]
+    movu        m3,              [r1 + 32 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 48 + r3]
+    movu        m3,              [r1 + 48 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + 16 + r5],  m1
+
+    movu        m1,              [r0 + 64 + r3]
+    movu        m3,              [r1 + 64 + r4]
+    paddw       m1,              m3
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    movu        m2,              [r0 + 80 + r3]
+    movu        m3,              [r1 + 80 + r4]
+    paddw       m2,              m3
+    pmulhrsw    m2,              m4
+    paddw       m2,              m5
+
+    packuswb    m1,              m2
+    movu        [r2 + 32 + r5],  m1
+
+    lea         r2,              [r2 + 2 * r5]
+    lea         r0,              [r0 + 2 * r3]
+    lea         r1,              [r1 + 2 * r4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W48_H2 64
+
+;-----------------------------------------------------------------------------
+
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W64_H1 1
+INIT_XMM sse4
+cglobal addAvg_64x%1, 6,7,6, pSrc0, src0, src1, dst, src0Stride, src1tride, dstStride
+    mova        m4,              [pw_256]
+    mova        m5,              [pw_128]
+    add         r3,              r3
+    add         r4,              r4
+
+    mov         r6d,             %1
+
+.loop:
+    movu        m0,              [r0]
+    movu        m2,              [r1]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 16]
+    movu        m2,              [r1 + 16]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + 32]
+    movu        m2,              [r1 + 32]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 48]
+    movu        m2,              [r1 + 48]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 16],       m0
+
+    movu        m0,              [r0 + 64]
+    movu        m2,              [r1 + 64]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 80]
+    movu        m2,              [r1 + 80]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 32],       m0
+
+    movu        m0,              [r0 + 96]
+    movu        m2,              [r1 + 96]
+    paddw       m0,              m2
+    pmulhrsw    m0,              m4
+    paddw       m0,              m5
+
+    movu        m1,              [r0 + 112]
+    movu        m2,              [r1 + 112]
+    paddw       m1,              m2
+    pmulhrsw    m1,              m4
+    paddw       m1,              m5
+
+    packuswb    m0,              m1
+    movu        [r2 + 48],       m0
+
+    add         r2,              r5
+    add         r0,              r3
+    add         r1,              r4
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endmacro
+
+ADDAVG_W64_H1 16
+ADDAVG_W64_H1 32
+ADDAVG_W64_H1 48
+ADDAVG_W64_H1 64
+;-----------------------------------------------------------------------------
+%endif ; HIGH_BIT_DEPTH
+
+;=============================================================================
+; implicit weighted biprediction
+;=============================================================================
+; assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64
+%if WIN64
+    DECLARE_REG_TMP 0,1,2,3,4,5,4,5
+    %macro AVG_START 0-1 0
+        PROLOGUE 6,7,%1
+    %endmacro
+%elif UNIX64
+    DECLARE_REG_TMP 0,1,2,3,4,5,7,8
+    %macro AVG_START 0-1 0
+        PROLOGUE 6,9,%1
+    %endmacro
+%else
+    DECLARE_REG_TMP 1,2,3,4,5,6,1,2
+    %macro AVG_START 0-1 0
+        PROLOGUE 0,7,%1
+        mov t0, r0m
+        mov t1, r1m
+        mov t2, r2m
+        mov t3, r3m
+        mov t4, r4m
+        mov t5, r5m
+    %endmacro
+%endif
+
+%macro AVG_END 0
+    lea  t4, [t4+t5*2*SIZEOF_PIXEL]
+    lea  t2, [t2+t3*2*SIZEOF_PIXEL]
+    lea  t0, [t0+t1*2*SIZEOF_PIXEL]
+    sub eax, 2
+    jg .height_loop
+ %ifidn movu,movq ; detect MMX
+    EMMS
+ %endif
+    RET
+%endmacro
+
+%if HIGH_BIT_DEPTH
+
+%macro BIWEIGHT_MMX 2
+    movh      m0, %1
+    movh      m1, %2
+    punpcklwd m0, m1
+    pmaddwd   m0, m3
+    paddd     m0, m4
+    psrad     m0, 6
+%endmacro
+
+%macro BIWEIGHT_START_MMX 0
+    movzx  t6d, word r6m
+    mov    t7d, 64
+    sub    t7d, t6d
+    shl    t7d, 16
+    add    t6d, t7d
+    movd    m3, t6d
+    SPLATD  m3, m3
+    mova    m4, [pd_32]
+    pxor    m5, m5
+%endmacro
+
+%else ;!HIGH_BIT_DEPTH
+%macro BIWEIGHT_MMX 2
+    movh      m0, %1
+    movh      m1, %2
+    punpcklbw m0, m5
+    punpcklbw m1, m5
+    pmullw    m0, m2
+    pmullw    m1, m3
+    paddw     m0, m1
+    paddw     m0, m4
+    psraw     m0, 6
+%endmacro
+
+%macro BIWEIGHT_START_MMX 0
+    movd    m2, r6m
+    SPLATW  m2, m2   ; weight_dst
+    mova    m3, [pw_64]
+    psubw   m3, m2   ; weight_src
+    mova    m4, [pw_32] ; rounding
+    pxor    m5, m5
+%endmacro
+%endif ;HIGH_BIT_DEPTH
+
+%macro BIWEIGHT_SSSE3 2
+    movh      m0, %1
+    movh      m1, %2
+    punpcklbw m0, m1
+    pmaddubsw m0, m3
+    pmulhrsw  m0, m4
+%endmacro
+
+%macro BIWEIGHT_START_SSSE3 0
+    movzx  t6d, byte r6m ; FIXME x86_64
+    mov    t7d, 64
+    sub    t7d, t6d
+    shl    t7d, 8
+    add    t6d, t7d
+    mova    m4, [pw_512]
+    movd   xm3, t6d
+%if cpuflag(avx2)
+    vpbroadcastw m3, xm3
+%else
+    SPLATW  m3, m3   ; weight_dst,src
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+%macro BIWEIGHT_ROW 4
+    BIWEIGHT   [%2], [%3]
+%if %4==mmsize/4
+    packssdw     m0, m0
+    CLIPW        m0, m5, m7
+    movh       [%1], m0
+%else
+    SWAP 0, 6
+    BIWEIGHT   [%2+mmsize/2], [%3+mmsize/2]
+    packssdw     m6, m0
+    CLIPW        m6, m5, m7
+    mova       [%1], m6
+%endif
+%endmacro
+
+%else ;!HIGH_BIT_DEPTH
+%macro BIWEIGHT_ROW 4
+    BIWEIGHT [%2], [%3]
+%if %4==mmsize/2
+    packuswb   m0, m0
+    movh     [%1], m0
+%else
+    SWAP 0, 6
+    BIWEIGHT [%2+mmsize/2], [%3+mmsize/2]
+    packuswb   m6, m0
+%if %4 != 12
+    mova    [%1], m6
+%else ; !w12
+    movh    [%1], m6
+    movhlps m6, m6
+    movd    [%1+mmsize/2], m6
+%endif ; w12
+%endif
+%endmacro
+
+%endif ;HIGH_BIT_DEPTH
+
+;-----------------------------------------------------------------------------
+; int pixel_avg_weight_w16( pixel *dst, intptr_t, pixel *src1, intptr_t, pixel *src2, intptr_t, int i_weight )
+;-----------------------------------------------------------------------------
+%macro AVG_WEIGHT 1-2 0
+cglobal pixel_avg_weight_w%1
+    BIWEIGHT_START
+    AVG_START %2
+%if HIGH_BIT_DEPTH
+    mova    m7, [pw_pixel_max]
+%endif
+.height_loop:
+%if mmsize==16 && %1==mmsize/(2*SIZEOF_PIXEL)
+    BIWEIGHT [t2], [t4]
+    SWAP 0, 6
+    BIWEIGHT [t2+SIZEOF_PIXEL*t3], [t4+SIZEOF_PIXEL*t5]
+%if HIGH_BIT_DEPTH
+    packssdw m6, m0
+    CLIPW    m6, m5, m7
+%else ;!HIGH_BIT_DEPTH
+    packuswb m6, m0
+%endif ;HIGH_BIT_DEPTH
+    movlps   [t0], m6
+    movhps   [t0+SIZEOF_PIXEL*t1], m6
+%else
+%assign x 0
+%rep (%1*SIZEOF_PIXEL+mmsize-1)/mmsize
+%assign y mmsize
+%if (%1 == 12) && (%1*SIZEOF_PIXEL-x < mmsize)
+%assign y (%1*SIZEOF_PIXEL-x)
+%endif
+    BIWEIGHT_ROW   t0+x,                   t2+x,                   t4+x,                 y
+    BIWEIGHT_ROW   t0+x+SIZEOF_PIXEL*t1,   t2+x+SIZEOF_PIXEL*t3,   t4+x+SIZEOF_PIXEL*t5, y
+%assign x x+mmsize
+%endrep
+%endif
+    AVG_END
+%endmacro
+
+%define BIWEIGHT BIWEIGHT_MMX
+%define BIWEIGHT_START BIWEIGHT_START_MMX
+INIT_MMX mmx2
+AVG_WEIGHT 4
+AVG_WEIGHT 8
+AVG_WEIGHT 12
+AVG_WEIGHT 16
+AVG_WEIGHT 32
+AVG_WEIGHT 64
+AVG_WEIGHT 24
+AVG_WEIGHT 48
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+AVG_WEIGHT 4,  8
+AVG_WEIGHT 8,  8
+AVG_WEIGHT 12, 8
+AVG_WEIGHT 16, 8
+AVG_WEIGHT 24, 8
+AVG_WEIGHT 32, 8
+AVG_WEIGHT 48, 8
+AVG_WEIGHT 64, 8
+%else ;!HIGH_BIT_DEPTH
+INIT_XMM sse2
+AVG_WEIGHT 8,  7
+AVG_WEIGHT 12, 7
+AVG_WEIGHT 16, 7
+AVG_WEIGHT 32, 7
+AVG_WEIGHT 64, 7
+AVG_WEIGHT 24, 7
+AVG_WEIGHT 48, 7
+%define BIWEIGHT BIWEIGHT_SSSE3
+%define BIWEIGHT_START BIWEIGHT_START_SSSE3
+INIT_MMX ssse3
+AVG_WEIGHT 4
+INIT_XMM ssse3
+AVG_WEIGHT 8,  7
+AVG_WEIGHT 12, 7
+AVG_WEIGHT 16, 7
+AVG_WEIGHT 32, 7
+AVG_WEIGHT 64, 7
+AVG_WEIGHT 24, 7
+AVG_WEIGHT 48, 7
+
+INIT_YMM avx2
+cglobal pixel_avg_weight_w16
+    BIWEIGHT_START
+    AVG_START 5
+.height_loop:
+    movu     xm0, [t2]
+    movu     xm1, [t4]
+    vinserti128 m0, m0, [t2+t3], 1
+    vinserti128 m1, m1, [t4+t5], 1
+    SBUTTERFLY bw, 0, 1, 2
+    pmaddubsw m0, m3
+    pmaddubsw m1, m3
+    pmulhrsw  m0, m4
+    pmulhrsw  m1, m4
+    packuswb  m0, m1
+    mova    [t0], xm0
+    vextracti128 [t0+t1], m0, 1
+    AVG_END
+
+cglobal pixel_avg_weight_w32
+    BIWEIGHT_START
+    AVG_START 5
+.height_loop:
+    movu     m0, [t2]
+    movu     m1, [t4]
+    SBUTTERFLY bw, 0, 1, 2
+    pmaddubsw m0, m3
+    pmaddubsw m1, m3
+    pmulhrsw  m0, m4
+    pmulhrsw  m1, m4
+    packuswb  m0, m1
+    mova    [t0], m0
+    AVG_END
+
+cglobal pixel_avg_weight_w64
+    BIWEIGHT_START
+    AVG_START 5
+.height_loop:
+    movu     m0, [t2]
+    movu     m1, [t4]
+    SBUTTERFLY bw, 0, 1, 2
+    pmaddubsw m0, m3
+    pmaddubsw m1, m3
+    pmulhrsw  m0, m4
+    pmulhrsw  m1, m4
+    packuswb  m0, m1
+    mova    [t0], m0
+    movu     m0, [t2 + 32]
+    movu     m1, [t4 + 32]
+    SBUTTERFLY bw, 0, 1, 2
+    pmaddubsw m0, m3
+    pmaddubsw m1, m3
+    pmulhrsw  m0, m4
+    pmulhrsw  m1, m4
+    packuswb  m0, m1
+    mova    [t0 + 32], m0
+    AVG_END
+
+%endif ;HIGH_BIT_DEPTH
+
+;=============================================================================
+; P frame explicit weighted prediction
+;=============================================================================
+
+%if HIGH_BIT_DEPTH
+; width
+%macro WEIGHT_START 1
+    mova        m0, [r4+ 0]         ; 1<<denom
+    mova        m3, [r4+16]
+    movd        m2, [r4+32]         ; denom
+    mova        m4, [pw_pixel_max]
+    paddw       m2, [sq_1]          ; denom+1
+%endmacro
+
+; src1, src2
+%macro WEIGHT 2
+    movh        m5, [%1]
+    movh        m6, [%2]
+    punpcklwd   m5, m0
+    punpcklwd   m6, m0
+    pmaddwd     m5, m3
+    pmaddwd     m6, m3
+    psrad       m5, m2
+    psrad       m6, m2
+    packssdw    m5, m6
+%endmacro
+
+; src, dst, width
+%macro WEIGHT_TWO_ROW 4
+    %assign x 0
+%rep (%3+mmsize/2-1)/(mmsize/2)
+%if %3-x/2 <= 4 && mmsize == 16
+    WEIGHT      %1+x, %1+r3+x
+    CLIPW         m5, [pb_0], m4
+    movh      [%2+x], m5
+    movhps [%2+r1+x], m5
+%else
+    WEIGHT      %1+x, %1+x+mmsize/2
+    SWAP           5,  7
+    WEIGHT   %1+r3+x, %1+r3+x+mmsize/2
+    CLIPW         m5, [pb_0], m4
+    CLIPW         m7, [pb_0], m4
+    mova      [%2+x], m7
+    mova   [%2+r1+x], m5
+%endif
+    %assign x x+mmsize
+%endrep
+%endmacro
+
+%else ; !HIGH_BIT_DEPTH
+
+%macro WEIGHT_START 1
+%if cpuflag(avx2)
+    vbroadcasti128 m3, [r4]
+    vbroadcasti128 m4, [r4+16]
+%else
+    mova     m3, [r4]
+    mova     m4, [r4+16]
+%if notcpuflag(ssse3)
+    movd     m5, [r4+32]
+%endif
+%endif
+    pxor     m2, m2
+%endmacro
+
+; src1, src2, dst1, dst2, fast
+%macro WEIGHT_ROWx2 5
+    movh      m0, [%1         ]
+    movh      m1, [%1+mmsize/2]
+    movh      m6, [%2         ]
+    movh      m7, [%2+mmsize/2]
+    punpcklbw m0, m2
+    punpcklbw m1, m2
+    punpcklbw m6, m2
+    punpcklbw m7, m2
+%if cpuflag(ssse3)
+%if %5==0
+    psllw     m0, 7
+    psllw     m1, 7
+    psllw     m6, 7
+    psllw     m7, 7
+%endif
+    pmulhrsw  m0, m3
+    pmulhrsw  m1, m3
+    pmulhrsw  m6, m3
+    pmulhrsw  m7, m3
+    paddw     m0, m4
+    paddw     m1, m4
+    paddw     m6, m4
+    paddw     m7, m4
+%else
+    pmullw    m0, m3
+    pmullw    m1, m3
+    pmullw    m6, m3
+    pmullw    m7, m3
+    paddsw    m0, m4        ;1<<(denom-1)+(offset<<denom)
+    paddsw    m1, m4
+    paddsw    m6, m4
+    paddsw    m7, m4
+    psraw     m0, m5
+    psraw     m1, m5
+    psraw     m6, m5
+    psraw     m7, m5
+%endif
+    packuswb  m0, m1
+    packuswb  m6, m7
+    mova    [%3], m0
+    mova    [%4], m6
+%endmacro
+
+; src1, src2, dst1, dst2, width, fast
+%macro WEIGHT_COL 6
+%if cpuflag(avx2)
+%if %5==16
+    movu     xm0, [%1]
+    vinserti128 m0, m0, [%2], 1
+    punpckhbw m1, m0, m2
+    punpcklbw m0, m0, m2
+%if %6==0
+    psllw     m0, 7
+    psllw     m1, 7
+%endif
+    pmulhrsw  m0, m3
+    pmulhrsw  m1, m3
+    paddw     m0, m4
+    paddw     m1, m4
+    packuswb  m0, m1
+    mova    [%3], xm0
+    vextracti128 [%4], m0, 1
+%else
+    movq     xm0, [%1]
+    vinserti128 m0, m0, [%2], 1
+    punpcklbw m0, m2
+%if %6==0
+    psllw     m0, 7
+%endif
+    pmulhrsw  m0, m3
+    paddw     m0, m4
+    packuswb  m0, m0
+    vextracti128 xm1, m0, 1
+%if %5 == 8
+    movq    [%3], xm0
+    movq    [%4], xm1
+%else
+    movd    [%3], xm0
+    movd    [%4], xm1
+%endif
+%endif
+%else
+    movh      m0, [%1]
+    movh      m1, [%2]
+    punpcklbw m0, m2
+    punpcklbw m1, m2
+%if cpuflag(ssse3)
+%if %6==0
+    psllw     m0, 7
+    psllw     m1, 7
+%endif
+    pmulhrsw  m0, m3
+    pmulhrsw  m1, m3
+    paddw     m0, m4
+    paddw     m1, m4
+%else
+    pmullw    m0, m3
+    pmullw    m1, m3
+    paddsw    m0, m4        ;1<<(denom-1)+(offset<<denom)
+    paddsw    m1, m4
+    psraw     m0, m5
+    psraw     m1, m5
+%endif
+%if %5 == 8
+    packuswb  m0, m1
+    movh    [%3], m0
+    movhps  [%4], m0
+%else
+    packuswb  m0, m0
+    packuswb  m1, m1
+    movd    [%3], m0    ; width 2 can write garbage for the last 2 bytes
+    movd    [%4], m1
+%endif
+%endif
+%endmacro
+; src, dst, width
+%macro WEIGHT_TWO_ROW 4
+%assign x 0
+%rep %3
+%if (%3-x) >= mmsize
+    WEIGHT_ROWx2 %1+x, %1+r3+x, %2+x, %2+r1+x, %4
+    %assign x (x+mmsize)
+%else
+    %assign w %3-x
+%if w == 20
+    %assign w 16
+%endif
+    WEIGHT_COL %1+x, %1+r3+x, %2+x, %2+r1+x, w, %4
+    %assign x (x+w)
+%endif
+%if x >= %3
+    %exitrep
+%endif
+%endrep
+%endmacro
+
+%endif ; HIGH_BIT_DEPTH
+
+;-----------------------------------------------------------------------------
+;void mc_weight_wX( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride, weight_t *weight, int h )
+;-----------------------------------------------------------------------------
+
+%macro WEIGHTER 1
+cglobal mc_weight_w%1, 6,6,8
+    FIX_STRIDES r1, r3
+    WEIGHT_START %1
+%if cpuflag(ssse3) && HIGH_BIT_DEPTH == 0
+    ; we can merge the shift step into the scale factor
+    ; if (m3<<7) doesn't overflow an int16_t
+    cmp byte [r4+1], 0
+    jz .fast
+%endif
+.loop:
+    WEIGHT_TWO_ROW r2, r0, %1, 0
+    lea  r0, [r0+r1*2]
+    lea  r2, [r2+r3*2]
+    sub r5d, 2
+    jg .loop
+    RET
+%if cpuflag(ssse3) && HIGH_BIT_DEPTH == 0
+.fast:
+    psllw m3, 7
+.fastloop:
+    WEIGHT_TWO_ROW r2, r0, %1, 1
+    lea  r0, [r0+r1*2]
+    lea  r2, [r2+r3*2]
+    sub r5d, 2
+    jg .fastloop
+    RET
+%endif
+%endmacro
+
+INIT_MMX mmx2
+WEIGHTER  4
+WEIGHTER  8
+WEIGHTER 12
+WEIGHTER 16
+WEIGHTER 20
+INIT_XMM sse2
+WEIGHTER  8
+WEIGHTER 16
+WEIGHTER 20
+%if HIGH_BIT_DEPTH
+WEIGHTER 12
+%else
+INIT_MMX ssse3
+WEIGHTER  4
+INIT_XMM ssse3
+WEIGHTER  8
+WEIGHTER 16
+WEIGHTER 20
+INIT_YMM avx2
+WEIGHTER 8
+WEIGHTER 16
+WEIGHTER 20
+%endif
+
+%macro OFFSET_OP 7
+    mov%6        m0, [%1]
+    mov%6        m1, [%2]
+%if HIGH_BIT_DEPTH
+    p%5usw       m0, m2
+    p%5usw       m1, m2
+%ifidn %5,add
+    pminsw       m0, m3
+    pminsw       m1, m3
+%endif
+%else
+    p%5usb       m0, m2
+    p%5usb       m1, m2
+%endif
+    mov%7      [%3], m0
+    mov%7      [%4], m1
+%endmacro
+
+%macro OFFSET_TWO_ROW 4
+%assign x 0
+%rep %3
+%if (%3*SIZEOF_PIXEL-x) >= mmsize
+    OFFSET_OP (%1+x), (%1+x+r3), (%2+x), (%2+x+r1), %4, u, a
+    %assign x (x+mmsize)
+%else
+%if HIGH_BIT_DEPTH
+    OFFSET_OP (%1+x), (%1+x+r3), (%2+x), (%2+x+r1), %4, h, h
+%else
+    OFFSET_OP (%1+x), (%1+x+r3), (%2+x), (%2+x+r1), %4, d, d
+%endif
+    %exitrep
+%endif
+%if x >= %3*SIZEOF_PIXEL
+    %exitrep
+%endif
+%endrep
+%endmacro
+
+;-----------------------------------------------------------------------------
+;void mc_offset_wX( pixel *src, intptr_t i_src_stride, pixel *dst, intptr_t i_dst_stride, weight_t *w, int h )
+;-----------------------------------------------------------------------------
+%macro OFFSET 2
+cglobal mc_offset%2_w%1, 6,6
+    FIX_STRIDES r1, r3
+    mova m2, [r4]
+%if HIGH_BIT_DEPTH
+%ifidn %2,add
+    mova m3, [pw_pixel_max]
+%endif
+%endif
+.loop:
+    OFFSET_TWO_ROW r2, r0, %1, %2
+    lea  r0, [r0+r1*2]
+    lea  r2, [r2+r3*2]
+    sub r5d, 2
+    jg .loop
+    RET
+%endmacro
+
+%macro OFFSETPN 1
+       OFFSET %1, add
+       OFFSET %1, sub
+%endmacro
+INIT_MMX mmx2
+OFFSETPN  4
+OFFSETPN  8
+OFFSETPN 12
+OFFSETPN 16
+OFFSETPN 20
+INIT_XMM sse2
+OFFSETPN 12
+OFFSETPN 16
+OFFSETPN 20
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+OFFSETPN  8
+%endif
+
+
+;=============================================================================
+; pixel avg
+;=============================================================================
+
+;-----------------------------------------------------------------------------
+; void pixel_avg_4x4( pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
+;                     pixel *src2, intptr_t src2_stride, int weight );
+;-----------------------------------------------------------------------------
+%macro AVGH 2
+cglobal pixel_avg_%1x%2
+    mov eax, %2
+    cmp dword r6m, 32
+    jne pixel_avg_weight_w%1 %+ SUFFIX
+%if cpuflag(avx2) && %1 == 16 ; all AVX2 machines can do fast 16-byte unaligned loads
+    jmp pixel_avg_w%1_avx2
+%else
+%if mmsize == 16 && (%1 % 16 == 0)
+    test dword r4m, 15
+    jz pixel_avg_w%1_sse2
+%endif
+    jmp pixel_avg_w%1_mmx2
+%endif
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void pixel_avg_w4( pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
+;                    pixel *src2, intptr_t src2_stride, int height, int weight );
+;-----------------------------------------------------------------------------
+
+%macro AVG_FUNC 3-4
+cglobal pixel_avg_w%1
+    AVG_START
+.height_loop:
+%assign x 0
+%rep (%1*SIZEOF_PIXEL+mmsize-1)/mmsize
+    %2     m0, [t2+x]
+    %2     m1, [t2+x+SIZEOF_PIXEL*t3]
+%if HIGH_BIT_DEPTH
+    pavgw  m0, [t4+x]
+    pavgw  m1, [t4+x+SIZEOF_PIXEL*t5]
+%else ;!HIGH_BIT_DEPTH
+    pavgb  m0, [t4+x]
+    pavgb  m1, [t4+x+SIZEOF_PIXEL*t5]
+%endif
+%if (%1 == 12) && (%1-x/SIZEOF_PIXEL < mmsize)
+    %4     [t0+x], m0
+    %4     [t0+x+SIZEOF_PIXEL*t1], m1
+%else
+    %3     [t0+x], m0
+    %3     [t0+x+SIZEOF_PIXEL*t1], m1
+%endif
+%assign x x+mmsize
+%endrep
+    AVG_END
+%endmacro
+
+%macro  pixel_avg_W8 0
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+%if ARCH_X86_64
+INIT_XMM sse2
+cglobal pixel_avg_8x4, 6,9,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    pixel_avg_W8
+    RET
+
+cglobal pixel_avg_8x8, 6,9,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    pixel_avg_W8
+    pixel_avg_W8
+    RET
+
+cglobal pixel_avg_8x16, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_W8
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_8x32, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 8
+.loop
+    pixel_avg_W8
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+%endif
+
+%if HIGH_BIT_DEPTH
+
+INIT_MMX mmx2
+AVG_FUNC 4, movq, movq
+AVGH 4, 16
+AVGH 4, 8
+AVGH 4, 4
+AVGH 4, 2
+
+AVG_FUNC 8, movq, movq
+AVGH 8, 32
+AVGH 8, 16
+AVGH 8,  8
+AVGH 8,  4
+
+AVG_FUNC 16, movq, movq
+AVGH 16, 64
+AVGH 16, 32
+AVGH 16, 16
+AVGH 16, 12
+AVGH 16,  8
+AVGH 16,  4
+
+AVG_FUNC 24, movq, movq
+AVGH 24, 32
+
+AVG_FUNC 32, movq, movq
+AVGH 32, 32
+AVGH 32, 24
+AVGH 32, 16
+AVGH 32, 8
+
+AVG_FUNC 48, movq, movq
+AVGH 48, 64
+
+AVG_FUNC 64, movq, movq
+AVGH 64, 64
+AVGH 64, 48
+AVGH 64, 32
+AVGH 64, 16
+
+AVG_FUNC 12, movq, movq, movq
+AVGH 12, 16
+
+INIT_XMM sse2
+AVG_FUNC 4, movq, movq
+AVGH  4, 16
+AVGH  4, 8
+AVGH  4, 4
+AVGH  4, 2
+
+
+AVG_FUNC 16, movdqu, movdqa
+AVGH  16, 64
+AVGH  16, 32
+AVGH  16, 16
+AVGH  16, 12
+AVGH  16,  8
+AVGH  16,  4
+
+AVG_FUNC 24, movdqu, movdqa
+AVGH 24, 32
+
+AVG_FUNC 32, movdqu, movdqa
+AVGH 32, 64
+AVGH 32, 32
+AVGH 32, 24
+AVGH 32, 16
+AVGH 32, 8
+
+AVG_FUNC 48, movdqu, movdqa
+AVGH 48, 64
+
+AVG_FUNC 64, movdqu, movdqa
+AVGH 64, 64
+AVGH 64, 48
+AVGH 64, 32
+AVGH 64, 16
+
+AVG_FUNC 12, movdqu, movdqa, movq
+AVGH 12, 16
+
+%else ;!HIGH_BIT_DEPTH
+
+INIT_MMX mmx2
+AVG_FUNC 4, movd, movd
+AVGH 4, 16
+AVGH 4, 8
+AVGH 4, 4
+AVGH 4, 2
+
+AVG_FUNC 8, movq, movq
+AVGH 8, 32
+AVGH 8, 16
+AVGH 8,  8
+AVGH 8,  4
+
+AVG_FUNC 12, movq, movq, movd
+AVGH 12, 16
+
+AVG_FUNC 16, movq, movq
+AVGH 16, 64
+AVGH 16, 32
+AVGH 16, 16
+AVGH 16, 12
+AVGH 16, 8
+AVGH 16, 4
+
+AVG_FUNC 32, movq, movq
+AVGH 32, 32
+AVGH 32, 24
+AVGH 32, 16
+AVGH 32, 8
+
+AVG_FUNC 64, movq, movq
+AVGH 64, 64
+AVGH 64, 48
+AVGH 64, 16
+
+AVG_FUNC 24, movq, movq
+AVGH 24, 32
+
+AVG_FUNC 48, movq, movq
+AVGH 48, 64
+
+INIT_XMM sse2
+AVG_FUNC 64, movdqu, movdqa
+AVGH 64, 64
+AVGH 64, 48
+AVGH 64, 32
+AVGH 64, 16
+
+AVG_FUNC 32, movdqu, movdqa
+AVGH 32, 64
+AVGH 32, 32
+AVGH 32, 24
+AVGH 32, 16
+AVGH 32, 8
+
+AVG_FUNC 24, movdqu, movdqa
+AVGH 24, 32
+
+AVG_FUNC 16, movdqu, movdqa
+AVGH 16, 64
+AVGH 16, 32
+AVGH 16, 16
+AVGH 16, 12
+AVGH 16, 8
+AVGH 16, 4
+
+AVG_FUNC 48, movdqu, movdqa
+AVGH 48, 64
+
+AVG_FUNC 12, movdqu, movdqa, movq
+AVGH 12, 16
+
+AVGH  8, 32
+AVGH  8, 16
+AVGH  8,  8
+AVGH  8,  4
+INIT_XMM ssse3
+AVGH 24, 32
+
+AVGH 64, 64
+AVGH 64, 48
+AVGH 64, 32
+AVGH 64, 16
+
+AVGH 32, 64
+AVGH 32, 32
+AVGH 32, 24
+AVGH 32, 16
+AVGH 32, 8
+
+AVGH 16, 64
+AVGH 16, 32
+AVGH 16, 16
+AVGH 16, 12
+AVGH 16, 8
+AVGH 16, 4
+
+AVGH 48, 64
+
+AVGH 12, 16
+
+AVGH  8, 32
+AVGH  8, 16
+AVGH  8,  8
+AVGH  8,  4
+INIT_MMX ssse3
+AVGH  4, 16
+AVGH  4,  8
+AVGH  4,  4
+AVGH  4,  2
+
+INIT_XMM avx2
+; TODO: active AVX2 after debug
+;AVG_FUNC 24, movdqu, movdqa
+;AVGH 24, 32
+
+AVG_FUNC 16, movdqu, movdqa
+AVGH 16, 64
+AVGH 16, 32
+AVGH 16, 16
+AVGH 16, 12
+AVGH 16, 8
+AVGH 16, 4
+
+%endif ;HIGH_BIT_DEPTH
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64 && BIT_DEPTH == 8
+INIT_YMM avx2
+cglobal pixel_avg_8x32
+%rep 4
+    movu        m0, [r2]
+    movu        m2, [r2 + r3]
+    movu        m1, [r4]
+    movu        m3, [r4 + r5]
+    pavgb       m0, m1
+    pavgb       m2, m3
+    movu        [r0], m0
+    movu        [r0 + r1], m2
+
+    lea         r2, [r2 + r3 * 2]
+    lea         r4, [r4 + r5 * 2]
+    lea         r0, [r0 + r1 * 2]
+%endrep
+    ret
+
+cglobal pixel_avg_16x64_8bit
+%rep 8
+    movu        m0, [r2]
+    movu        m2, [r2 + mmsize]
+    movu        m1, [r4]
+    movu        m3, [r4 + mmsize]
+    pavgb       m0, m1
+    pavgb       m2, m3
+    movu        [r0], m0
+    movu        [r0 + mmsize], m2
+
+    movu        m0, [r2 + r3]
+    movu        m2, [r2 + r3 + mmsize]
+    movu        m1, [r4 + r5]
+    movu        m3, [r4 + r5 + mmsize]
+    pavgb       m0, m1
+    pavgb       m2, m3
+    movu        [r0 + r1], m0
+    movu        [r0 + r1 + mmsize], m2
+
+    lea         r2, [r2 + r3 * 2]
+    lea         r4, [r4 + r5 * 2]
+    lea         r0, [r0 + r1 * 2]
+%endrep
+    ret
+
+cglobal pixel_avg_32x8, 6,6,4
+    call pixel_avg_8x32
+    RET
+
+cglobal pixel_avg_32x16, 6,6,4
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    RET
+
+cglobal pixel_avg_32x24, 6,6,4
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    RET
+
+cglobal pixel_avg_32x32, 6,6,4
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    RET
+
+cglobal pixel_avg_32x64, 6,6,4
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    call pixel_avg_8x32
+    RET
+
+cglobal pixel_avg_64x16, 6,6,4
+    call pixel_avg_16x64_8bit
+    RET
+
+cglobal pixel_avg_64x32, 6,6,4
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    RET
+
+cglobal pixel_avg_64x48, 6,6,4
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    RET
+
+cglobal pixel_avg_64x64, 6,6,4
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    call pixel_avg_16x64_8bit
+    RET
+%endif
+
+;=============================================================================
+; pixel avg2
+;=============================================================================
+
+%if HIGH_BIT_DEPTH
+;-----------------------------------------------------------------------------
+; void pixel_avg2_wN( uint16_t *dst,  intptr_t dst_stride,
+;                     uint16_t *src1, intptr_t src_stride,
+;                     uint16_t *src2, int height );
+;-----------------------------------------------------------------------------
+%macro AVG2_W_ONE 1
+cglobal pixel_avg2_w%1, 6,7,4
+    sub     r4, r2
+    lea     r6, [r4+r3*2]
+.height_loop:
+    movu    m0, [r2]
+    movu    m1, [r2+r3*2]
+%if cpuflag(avx) || mmsize == 8
+    pavgw   m0, [r2+r4]
+    pavgw   m1, [r2+r6]
+%else
+    movu    m2, [r2+r4]
+    movu    m3, [r2+r6]
+    pavgw   m0, m2
+    pavgw   m1, m3
+%endif
+    mova   [r0], m0
+    mova   [r0+r1*2], m1
+    lea     r2, [r2+r3*4]
+    lea     r0, [r0+r1*4]
+    sub    r5d, 2
+    jg .height_loop
+    RET
+%endmacro
+
+%macro AVG2_W_TWO 3
+cglobal pixel_avg2_w%1, 6,7,8
+    sub     r4, r2
+    lea     r6, [r4+r3*2]
+.height_loop:
+    movu    m0, [r2]
+    %2      m1, [r2+mmsize]
+    movu    m2, [r2+r3*2]
+    %2      m3, [r2+r3*2+mmsize]
+%if mmsize == 8
+    pavgw   m0, [r2+r4]
+    pavgw   m1, [r2+r4+mmsize]
+    pavgw   m2, [r2+r6]
+    pavgw   m3, [r2+r6+mmsize]
+%else
+    movu    m4, [r2+r4]
+    %2      m5, [r2+r4+mmsize]
+    movu    m6, [r2+r6]
+    %2      m7, [r2+r6+mmsize]
+    pavgw   m0, m4
+    pavgw   m1, m5
+    pavgw   m2, m6
+    pavgw   m3, m7
+%endif
+    mova   [r0], m0
+    %3     [r0+mmsize], m1
+    mova   [r0+r1*2], m2
+    %3     [r0+r1*2+mmsize], m3
+    lea     r2, [r2+r3*4]
+    lea     r0, [r0+r1*4]
+    sub    r5d, 2
+    jg .height_loop
+    RET
+%endmacro
+
+INIT_MMX mmx2
+AVG2_W_ONE  4
+AVG2_W_TWO  8, movu, mova
+INIT_XMM sse2
+AVG2_W_ONE  8
+AVG2_W_TWO 10, movd, movd
+AVG2_W_TWO 16, movu, mova
+INIT_YMM avx2
+AVG2_W_ONE 16
+
+INIT_MMX
+cglobal pixel_avg2_w10_mmx2, 6,7
+    sub     r4, r2
+    lea     r6, [r4+r3*2]
+.height_loop:
+    movu    m0, [r2+ 0]
+    movu    m1, [r2+ 8]
+    movh    m2, [r2+16]
+    movu    m3, [r2+r3*2+ 0]
+    movu    m4, [r2+r3*2+ 8]
+    movh    m5, [r2+r3*2+16]
+    pavgw   m0, [r2+r4+ 0]
+    pavgw   m1, [r2+r4+ 8]
+    pavgw   m2, [r2+r4+16]
+    pavgw   m3, [r2+r6+ 0]
+    pavgw   m4, [r2+r6+ 8]
+    pavgw   m5, [r2+r6+16]
+    mova   [r0+ 0], m0
+    mova   [r0+ 8], m1
+    movh   [r0+16], m2
+    mova   [r0+r1*2+ 0], m3
+    mova   [r0+r1*2+ 8], m4
+    movh   [r0+r1*2+16], m5
+    lea     r2, [r2+r3*2*2]
+    lea     r0, [r0+r1*2*2]
+    sub    r5d, 2
+    jg .height_loop
+    RET
+
+cglobal pixel_avg2_w16_mmx2, 6,7
+    sub     r4, r2
+    lea     r6, [r4+r3*2]
+.height_loop:
+    movu    m0, [r2+ 0]
+    movu    m1, [r2+ 8]
+    movu    m2, [r2+16]
+    movu    m3, [r2+24]
+    movu    m4, [r2+r3*2+ 0]
+    movu    m5, [r2+r3*2+ 8]
+    movu    m6, [r2+r3*2+16]
+    movu    m7, [r2+r3*2+24]
+    pavgw   m0, [r2+r4+ 0]
+    pavgw   m1, [r2+r4+ 8]
+    pavgw   m2, [r2+r4+16]
+    pavgw   m3, [r2+r4+24]
+    pavgw   m4, [r2+r6+ 0]
+    pavgw   m5, [r2+r6+ 8]
+    pavgw   m6, [r2+r6+16]
+    pavgw   m7, [r2+r6+24]
+    mova   [r0+ 0], m0
+    mova   [r0+ 8], m1
+    mova   [r0+16], m2
+    mova   [r0+24], m3
+    mova   [r0+r1*2+ 0], m4
+    mova   [r0+r1*2+ 8], m5
+    mova   [r0+r1*2+16], m6
+    mova   [r0+r1*2+24], m7
+    lea     r2, [r2+r3*2*2]
+    lea     r0, [r0+r1*2*2]
+    sub    r5d, 2
+    jg .height_loop
+    RET
+
+cglobal pixel_avg2_w18_mmx2, 6,7
+    sub     r4, r2
+.height_loop:
+    movu    m0, [r2+ 0]
+    movu    m1, [r2+ 8]
+    movu    m2, [r2+16]
+    movu    m3, [r2+24]
+    movh    m4, [r2+32]
+    pavgw   m0, [r2+r4+ 0]
+    pavgw   m1, [r2+r4+ 8]
+    pavgw   m2, [r2+r4+16]
+    pavgw   m3, [r2+r4+24]
+    pavgw   m4, [r2+r4+32]
+    mova   [r0+ 0], m0
+    mova   [r0+ 8], m1
+    mova   [r0+16], m2
+    mova   [r0+24], m3
+    movh   [r0+32], m4
+    lea     r2, [r2+r3*2]
+    lea     r0, [r0+r1*2]
+    dec    r5d
+    jg .height_loop
+    RET
+
+%macro PIXEL_AVG_W18 0
+cglobal pixel_avg2_w18, 6,7
+    sub     r4, r2
+.height_loop:
+    movu    m0, [r2+ 0]
+    movd   xm2, [r2+32]
+%if mmsize == 32
+    pavgw   m0, [r2+r4+ 0]
+    movd   xm1, [r2+r4+32]
+    pavgw  xm2, xm1
+%else
+    movu    m1, [r2+16]
+    movu    m3, [r2+r4+ 0]
+    movu    m4, [r2+r4+16]
+    movd    m5, [r2+r4+32]
+    pavgw   m0, m3
+    pavgw   m1, m4
+    pavgw   m2, m5
+    mova   [r0+16], m1
+%endif
+    mova   [r0+ 0], m0
+    movd   [r0+32], xm2
+    lea     r2, [r2+r3*2]
+    lea     r0, [r0+r1*2]
+    dec    r5d
+    jg .height_loop
+    RET
+%endmacro
+
+INIT_XMM sse2
+PIXEL_AVG_W18
+INIT_YMM avx2
+PIXEL_AVG_W18
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_12x16, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+
+.loop
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], xm0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], xm2
+
+    vextracti128 xm0, m0, 1
+    vextracti128 xm2, m2, 1
+    movq    [r0 + 16], xm0
+    movq    [r0 + r1 + 16], xm2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], xm0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], xm2
+
+    vextracti128 xm0, m0, 1
+    vextracti128 xm2, m2, 1
+    movq    [r0 + r1 * 2 + 16], xm0
+    movq    [r0 + r8 + 16], xm2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+
+%macro  pixel_avg_H4 0
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_16x4, 6,9,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    pixel_avg_H4
+    RET
+
+cglobal pixel_avg_16x8, 6,9,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    pixel_avg_H4
+    pixel_avg_H4
+    RET
+
+cglobal pixel_avg_16x12, 6,9,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    pixel_avg_H4
+    pixel_avg_H4
+    pixel_avg_H4
+    RET
+%endif
+
+%macro  pixel_avg_H16 0
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_16x16, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_H16
+    dec r9d
+    jnz .loop
+    RET
+
+cglobal pixel_avg_16x32, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_H16
+    pixel_avg_H16
+    dec r9d
+    jnz .loop
+    RET
+
+cglobal pixel_avg_16x64, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_H16
+    pixel_avg_H16
+    pixel_avg_H16
+    pixel_avg_H16
+    dec r9d
+    jnz .loop
+    RET
+%endif
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_24x32, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 8
+
+.loop
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    xm0, [r2 + 32]
+    movu    xm1, [r4 + 32]
+    pavgw   xm0, xm1
+    movu    [r0 + 32], xm0
+    movu    xm2, [r2 + r3 + 32]
+    movu    xm3, [r4 + r5 + 32]
+    pavgw   xm2, xm3
+    movu    [r0 + r1 + 32], xm2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    movu    xm0, [r2 + r3 * 2 + 32]
+    movu    xm1, [r4 + r5 * 2 + 32]
+    pavgw   xm0, xm1
+    movu    [r0 + r1 * 2 + 32], xm0
+    movu    xm2, [r2 + r6 + 32]
+    movu    xm3, [r4 + r7 + 32]
+    pavgw   xm2, xm3
+    movu    [r0 + r8 + 32], xm2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+
+%macro  pixel_avg_W32 0
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + 32]
+    movu    m1, [r4 + 32]
+    pavgw   m0, m1
+    movu    [r0 + 32], m0
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r4 + r5 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 32], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    movu    m0, [r2 + r3 * 2 + 32]
+    movu    m1, [r4 + r5 * 2 + 32]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 32], m0
+    movu    m2, [r2 + r6 + 32]
+    movu    m3, [r4 + r7 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 32], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_32x8, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 2
+.loop
+    pixel_avg_W32
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_32x16, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_W32
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_32x24, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 6
+.loop
+    pixel_avg_W32
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_32x32, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 8
+.loop
+    pixel_avg_W32
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_32x64, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 16
+.loop
+    pixel_avg_W32
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+
+%macro  pixel_avg_W64 0
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + 32]
+    movu    m1, [r4 + 32]
+    pavgw   m0, m1
+    movu    [r0 + 32], m0
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r4 + r5 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 32], m2
+
+    movu    m0, [r2 + 64]
+    movu    m1, [r4 + 64]
+    pavgw   m0, m1
+    movu    [r0 + 64], m0
+    movu    m2, [r2 + r3 + 64]
+    movu    m3, [r4 + r5 + 64]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 64], m2
+
+    movu    m0, [r2 + 96]
+    movu    m1, [r4 + 96]
+    pavgw   m0, m1
+    movu    [r0 + 96], m0
+    movu    m2, [r2 + r3 + 96]
+    movu    m3, [r4 + r5 + 96]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 96], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    movu    m0, [r2 + r3 * 2 + 32]
+    movu    m1, [r4 + r5 * 2 + 32]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 32], m0
+    movu    m2, [r2 + r6 + 32]
+    movu    m3, [r4 + r7 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 32], m2
+
+    movu    m0, [r2 + r3 * 2 + 64]
+    movu    m1, [r4 + r5 * 2 + 64]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 64], m0
+    movu    m2, [r2 + r6 + 64]
+    movu    m3, [r4 + r7 + 64]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 64], m2
+
+    movu    m0, [r2 + r3 * 2 + 96]
+    movu    m1, [r4 + r5 * 2 + 96]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 96], m0
+    movu    m2, [r2 + r6 + 96]
+    movu    m3, [r4 + r7 + 96]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 96], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+%endmacro
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_64x16, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 4
+.loop
+    pixel_avg_W64
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_64x32, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 8
+.loop
+    pixel_avg_W64
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_64x48, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 12
+.loop
+    pixel_avg_W64
+    dec     r9d
+    jnz     .loop
+    RET
+
+cglobal pixel_avg_64x64, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 16
+.loop
+    pixel_avg_W64
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+
+;-------------------------------------------------------------------------------------------------------------------------------
+;void pixelavg_pp(pixel dst, intptr_t dstride, const pixel src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+;-------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_avg_48x64, 6,10,4
+    add     r1d, r1d
+    add     r3d, r3d
+    add     r5d, r5d
+    lea     r6, [r3 * 3]
+    lea     r7, [r5 * 3]
+    lea     r8, [r1 * 3]
+    mov     r9d, 16
+
+.loop
+    movu    m0, [r2]
+    movu    m1, [r4]
+    pavgw   m0, m1
+    movu    [r0], m0
+    movu    m2, [r2 + r3]
+    movu    m3, [r4 + r5]
+    pavgw   m2, m3
+    movu    [r0 + r1], m2
+
+    movu    m0, [r2 + 32]
+    movu    m1, [r4 + 32]
+    pavgw   m0, m1
+    movu    [r0 + 32], m0
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r4 + r5 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 32], m2
+
+    movu    m0, [r2 + 64]
+    movu    m1, [r4 + 64]
+    pavgw   m0, m1
+    movu    [r0 + 64], m0
+    movu    m2, [r2 + r3 + 64]
+    movu    m3, [r4 + r5 + 64]
+    pavgw   m2, m3
+    movu    [r0 + r1 + 64], m2
+
+    movu    m0, [r2 + r3 * 2]
+    movu    m1, [r4 + r5 * 2]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2], m0
+    movu    m2, [r2 + r6]
+    movu    m3, [r4 + r7]
+    pavgw   m2, m3
+    movu    [r0 + r8], m2
+
+    movu    m0, [r2 + r3 * 2 + 32]
+    movu    m1, [r4 + r5 * 2 + 32]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 32], m0
+    movu    m2, [r2 + r6 + 32]
+    movu    m3, [r4 + r7 + 32]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 32], m2
+
+    movu    m0, [r2 + r3 * 2 + 64]
+    movu    m1, [r4 + r5 * 2 + 64]
+    pavgw   m0, m1
+    movu    [r0 + r1 * 2 + 64], m0
+    movu    m2, [r2 + r6 + 64]
+    movu    m3, [r4 + r7 + 64]
+    pavgw   m2, m3
+    movu    [r0 + r8 + 64], m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    lea     r4, [r4 + 4 * r5]
+    dec     r9d
+    jnz     .loop
+    RET
+%endif
+
+%endif ; HIGH_BIT_DEPTH
+
+%if HIGH_BIT_DEPTH == 0
+;-----------------------------------------------------------------------------
+; void pixel_avg2_w4( uint8_t *dst,  intptr_t dst_stride,
+;                     uint8_t *src1, intptr_t src_stride,
+;                     uint8_t *src2, int height );
+;-----------------------------------------------------------------------------
+%macro AVG2_W8 2
+cglobal pixel_avg2_w%1_mmx2, 6,7
+    sub    r4, r2
+    lea    r6, [r4+r3]
+.height_loop:
+    %2     mm0, [r2]
+    %2     mm1, [r2+r3]
+    pavgb  mm0, [r2+r4]
+    pavgb  mm1, [r2+r6]
+    lea    r2, [r2+r3*2]
+    %2     [r0], mm0
+    %2     [r0+r1], mm1
+    lea    r0, [r0+r1*2]
+    sub    r5d, 2
+    jg     .height_loop
+    RET
+%endmacro
+
+INIT_MMX
+AVG2_W8 4, movd
+AVG2_W8 8, movq
+
+%macro AVG2_W16 2
+cglobal pixel_avg2_w%1_mmx2, 6,7
+    sub    r2, r4
+    lea    r6, [r2+r3]
+.height_loop:
+    movq   mm0, [r4]
+    %2     mm1, [r4+8]
+    movq   mm2, [r4+r3]
+    %2     mm3, [r4+r3+8]
+    pavgb  mm0, [r4+r2]
+    pavgb  mm1, [r4+r2+8]
+    pavgb  mm2, [r4+r6]
+    pavgb  mm3, [r4+r6+8]
+    lea    r4, [r4+r3*2]
+    movq   [r0], mm0
+    %2     [r0+8], mm1
+    movq   [r0+r1], mm2
+    %2     [r0+r1+8], mm3
+    lea    r0, [r0+r1*2]
+    sub    r5d, 2
+    jg     .height_loop
+    RET
+%endmacro
+
+AVG2_W16 12, movd
+AVG2_W16 16, movq
+
+cglobal pixel_avg2_w20_mmx2, 6,7
+    sub    r2, r4
+    lea    r6, [r2+r3]
+.height_loop:
+    movq   mm0, [r4]
+    movq   mm1, [r4+8]
+    movd   mm2, [r4+16]
+    movq   mm3, [r4+r3]
+    movq   mm4, [r4+r3+8]
+    movd   mm5, [r4+r3+16]
+    pavgb  mm0, [r4+r2]
+    pavgb  mm1, [r4+r2+8]
+    pavgb  mm2, [r4+r2+16]
+    pavgb  mm3, [r4+r6]
+    pavgb  mm4, [r4+r6+8]
+    pavgb  mm5, [r4+r6+16]
+    lea    r4, [r4+r3*2]
+    movq   [r0], mm0
+    movq   [r0+8], mm1
+    movd   [r0+16], mm2
+    movq   [r0+r1], mm3
+    movq   [r0+r1+8], mm4
+    movd   [r0+r1+16], mm5
+    lea    r0, [r0+r1*2]
+    sub    r5d, 2
+    jg     .height_loop
+    RET
+
+INIT_XMM
+cglobal pixel_avg2_w16_sse2, 6,7
+    sub    r4, r2
+    lea    r6, [r4+r3]
+.height_loop:
+    movu   m0, [r2]
+    movu   m2, [r2+r3]
+    movu   m1, [r2+r4]
+    movu   m3, [r2+r6]
+    lea    r2, [r2+r3*2]
+    pavgb  m0, m1
+    pavgb  m2, m3
+    mova [r0], m0
+    mova [r0+r1], m2
+    lea    r0, [r0+r1*2]
+    sub   r5d, 2
+    jg .height_loop
+    RET
+
+cglobal pixel_avg2_w20_sse2, 6,7
+    sub    r2, r4
+    lea    r6, [r2+r3]
+.height_loop:
+    movu   m0, [r4]
+    movu   m2, [r4+r3]
+    movu   m1, [r4+r2]
+    movu   m3, [r4+r6]
+    movd  mm4, [r4+16]
+    movd  mm5, [r4+r3+16]
+    pavgb  m0, m1
+    pavgb  m2, m3
+    pavgb mm4, [r4+r2+16]
+    pavgb mm5, [r4+r6+16]
+    lea    r4, [r4+r3*2]
+    mova [r0], m0
+    mova [r0+r1], m2
+    movd [r0+16], mm4
+    movd [r0+r1+16], mm5
+    lea    r0, [r0+r1*2]
+    sub   r5d, 2
+    jg .height_loop
+    RET
+
+INIT_YMM avx2
+cglobal pixel_avg2_w20, 6,7
+    sub    r2, r4
+    lea    r6, [r2+r3]
+.height_loop:
+    movu   m0, [r4]
+    movu   m1, [r4+r3]
+    pavgb  m0, [r4+r2]
+    pavgb  m1, [r4+r6]
+    lea    r4, [r4+r3*2]
+    mova [r0], m0
+    mova [r0+r1], m1
+    lea    r0, [r0+r1*2]
+    sub    r5d, 2
+    jg     .height_loop
+    RET
+
+; Cacheline split code for processors with high latencies for loads
+; split over cache lines.  See sad-a.asm for a more detailed explanation.
+; This particular instance is complicated by the fact that src1 and src2
+; can have different alignments.  For simplicity and code size, only the
+; MMX cacheline workaround is used.  As a result, in the case of SSE2
+; pixel_avg, the cacheline check functions calls the SSE2 version if there
+; is no cacheline split, and the MMX workaround if there is.
+
+%macro INIT_SHIFT 2
+    and    eax, 7
+    shl    eax, 3
+    movd   %1, [pd_64]
+    movd   %2, eax
+    psubw  %1, %2
+%endmacro
+
+%macro AVG_CACHELINE_START 0
+    %assign stack_offset 0
+    INIT_SHIFT mm6, mm7
+    mov    eax, r4m
+    INIT_SHIFT mm4, mm5
+    PROLOGUE 6,6
+    and    r2, ~7
+    and    r4, ~7
+    sub    r4, r2
+.height_loop:
+%endmacro
+
+%macro AVG_CACHELINE_LOOP 2
+    movq   mm1, [r2+%1]
+    movq   mm0, [r2+8+%1]
+    movq   mm3, [r2+r4+%1]
+    movq   mm2, [r2+r4+8+%1]
+    psrlq  mm1, mm7
+    psllq  mm0, mm6
+    psrlq  mm3, mm5
+    psllq  mm2, mm4
+    por    mm0, mm1
+    por    mm2, mm3
+    pavgb  mm2, mm0
+    %2 [r0+%1], mm2
+%endmacro
+
+%macro AVG_CACHELINE_FUNC 2
+pixel_avg2_w%1_cache_mmx2:
+    AVG_CACHELINE_START
+    AVG_CACHELINE_LOOP 0, movq
+%if %1>8
+    AVG_CACHELINE_LOOP 8, movq
+%if %1>16
+    AVG_CACHELINE_LOOP 16, movd
+%endif
+%endif
+    add    r2, r3
+    add    r0, r1
+    dec    r5d
+    jg .height_loop
+    RET
+%endmacro
+
+%macro AVG_CACHELINE_CHECK 3 ; width, cacheline, instruction set
+%if %1 == 12
+;w12 isn't needed because w16 is just as fast if there's no cacheline split
+%define cachesplit pixel_avg2_w16_cache_mmx2
+%else
+%define cachesplit pixel_avg2_w%1_cache_mmx2
+%endif
+cglobal pixel_avg2_w%1_cache%2_%3
+    mov    eax, r2m
+    and    eax, %2-1
+    cmp    eax, (%2-%1-(%1 % 8))
+%if %1==12||%1==20
+    jbe pixel_avg2_w%1_%3
+%else
+    jb pixel_avg2_w%1_%3
+%endif
+%if 0 ; or %1==8 - but the extra branch seems too expensive
+    ja cachesplit
+%if ARCH_X86_64
+    test      r4b, 1
+%else
+    test byte r4m, 1
+%endif
+    jz pixel_avg2_w%1_%3
+%else
+    or     eax, r4m
+    and    eax, 7
+    jz pixel_avg2_w%1_%3
+    mov    eax, r2m
+%endif
+%if mmsize==16 || (%1==8 && %2==64)
+    AVG_CACHELINE_FUNC %1, %2
+%else
+    jmp cachesplit
+%endif
+%endmacro
+
+INIT_MMX
+AVG_CACHELINE_CHECK  8, 64, mmx2
+AVG_CACHELINE_CHECK 12, 64, mmx2
+%if ARCH_X86_64 == 0
+AVG_CACHELINE_CHECK 16, 64, mmx2
+AVG_CACHELINE_CHECK 20, 64, mmx2
+AVG_CACHELINE_CHECK  8, 32, mmx2
+AVG_CACHELINE_CHECK 12, 32, mmx2
+AVG_CACHELINE_CHECK 16, 32, mmx2
+AVG_CACHELINE_CHECK 20, 32, mmx2
+%endif
+INIT_XMM
+AVG_CACHELINE_CHECK 16, 64, sse2
+AVG_CACHELINE_CHECK 20, 64, sse2
+
+; computed jump assumes this loop is exactly 48 bytes
+%macro AVG16_CACHELINE_LOOP_SSSE3 2 ; alignment
+ALIGN 16
+avg_w16_align%1_%2_ssse3:
+%if %1==0 && %2==0
+    movdqa  xmm1, [r2]
+    pavgb   xmm1, [r2+r4]
+    add    r2, r3
+%elif %1==0
+    movdqa  xmm1, [r2+r4+16]
+    palignr xmm1, [r2+r4], %2
+    pavgb   xmm1, [r2]
+    add    r2, r3
+%elif %2&15==0
+    movdqa  xmm1, [r2+16]
+    palignr xmm1, [r2], %1
+    pavgb   xmm1, [r2+r4]
+    add    r2, r3
+%else
+    movdqa  xmm1, [r2+16]
+    movdqa  xmm2, [r2+r4+16]
+    palignr xmm1, [r2], %1
+    palignr xmm2, [r2+r4], %2&15
+    add    r2, r3
+    pavgb   xmm1, xmm2
+%endif
+    movdqa  [r0], xmm1
+    add    r0, r1
+    dec    r5d
+    jg     avg_w16_align%1_%2_ssse3
+    ret
+%if %1==0
+    ; make sure the first ones don't end up short
+    ALIGN 16
+    times (48-($-avg_w16_align%1_%2_ssse3))>>4 nop
+%endif
+%endmacro
+
+cglobal pixel_avg2_w16_cache64_ssse3
+%if 0 ; seems both tests aren't worth it if src1%16==0 is optimized
+    mov   eax, r2m
+    and   eax, 0x3f
+    cmp   eax, 0x30
+    jb x265_pixel_avg2_w16_sse2
+    or    eax, r4m
+    and   eax, 7
+    jz x265_pixel_avg2_w16_sse2
+%endif
+    PROLOGUE 6, 8
+    lea    r6, [r4+r2]
+    and    r4, ~0xf
+    and    r6, 0x1f
+    and    r2, ~0xf
+    lea    r6, [r6*3]    ;(offset + align*2)*3
+    sub    r4, r2
+    shl    r6, 4         ;jump = (offset + align*2)*48
+%define avg_w16_addr avg_w16_align1_1_ssse3-(avg_w16_align2_2_ssse3-avg_w16_align1_1_ssse3)
+%ifdef PIC
+    lea    r7, [avg_w16_addr]
+    add    r6, r7
+%else
+    lea    r6, [avg_w16_addr + r6]
+%endif
+    TAIL_CALL r6, 1
+
+%assign j 0
+%assign k 1
+%rep 16
+AVG16_CACHELINE_LOOP_SSSE3 j, j
+AVG16_CACHELINE_LOOP_SSSE3 j, k
+%assign j j+1
+%assign k k+1
+%endrep
+%endif ; !HIGH_BIT_DEPTH
+
+;=============================================================================
+; pixel copy
+;=============================================================================
+
+%macro COPY1 2
+    movu  m0, [r2]
+    movu  m1, [r2+r3]
+    movu  m2, [r2+r3*2]
+    movu  m3, [r2+%2]
+    mova  [r0],      m0
+    mova  [r0+r1],   m1
+    mova  [r0+r1*2], m2
+    mova  [r0+%1],   m3
+%endmacro
+
+%macro COPY2 2-4 0, 1
+    movu  m0, [r2+%3*mmsize]
+    movu  m1, [r2+%4*mmsize]
+    movu  m2, [r2+r3+%3*mmsize]
+    movu  m3, [r2+r3+%4*mmsize]
+    mova  [r0+%3*mmsize],      m0
+    mova  [r0+%4*mmsize],      m1
+    mova  [r0+r1+%3*mmsize],   m2
+    mova  [r0+r1+%4*mmsize],   m3
+    movu  m0, [r2+r3*2+%3*mmsize]
+    movu  m1, [r2+r3*2+%4*mmsize]
+    movu  m2, [r2+%2+%3*mmsize]
+    movu  m3, [r2+%2+%4*mmsize]
+    mova  [r0+r1*2+%3*mmsize], m0
+    mova  [r0+r1*2+%4*mmsize], m1
+    mova  [r0+%1+%3*mmsize],   m2
+    mova  [r0+%1+%4*mmsize],   m3
+%endmacro
+
+%macro COPY4 2
+    COPY2 %1, %2, 0, 1
+    COPY2 %1, %2, 2, 3
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void mc_copy_w4( uint8_t *dst, intptr_t i_dst_stride,
+;                  uint8_t *src, intptr_t i_src_stride, int i_height )
+;-----------------------------------------------------------------------------
+INIT_MMX
+cglobal mc_copy_w4_mmx, 4,6
+    FIX_STRIDES r1, r3
+    cmp dword r4m, 4
+    lea     r5, [r3*3]
+    lea     r4, [r1*3]
+    je .end
+%if HIGH_BIT_DEPTH == 0
+    %define mova movd
+    %define movu movd
+%endif
+    COPY1   r4, r5
+    lea     r2, [r2+r3*4]
+    lea     r0, [r0+r1*4]
+.end:
+    COPY1   r4, r5
+    RET
+
+%macro MC_COPY 1
+%assign %%w %1*SIZEOF_PIXEL/mmsize
+%if %%w > 0
+cglobal mc_copy_w%1, 5,7
+    FIX_STRIDES r1, r3
+    lea     r6, [r3*3]
+    lea     r5, [r1*3]
+.height_loop:
+    COPY %+ %%w r5, r6
+    lea     r2, [r2+r3*4]
+    lea     r0, [r0+r1*4]
+    sub    r4d, 4
+    jg .height_loop
+    RET
+%endif
+%endmacro
+
+INIT_MMX mmx
+MC_COPY  8
+MC_COPY 16
+INIT_XMM sse
+MC_COPY  8
+MC_COPY 16
+INIT_XMM aligned, sse
+MC_COPY 16
+%if HIGH_BIT_DEPTH
+INIT_YMM avx
+MC_COPY 16
+INIT_YMM aligned, avx
+MC_COPY 16
+%endif
+
+;=============================================================================
+; prefetch
+;=============================================================================
+; assumes 64 byte cachelines
+; FIXME doesn't cover all pixels in high depth and/or 4:4:4
+
+;-----------------------------------------------------------------------------
+; void prefetch_fenc( pixel *pix_y,  intptr_t stride_y,
+;                     pixel *pix_uv, intptr_t stride_uv, int mb_x )
+;-----------------------------------------------------------------------------
+
+%macro PREFETCH_FENC 1
+%if ARCH_X86_64
+cglobal prefetch_fenc_%1, 5,5
+    FIX_STRIDES r1, r3
+    and    r4d, 3
+    mov    eax, r4d
+    imul   r4d, r1d
+    lea    r0,  [r0+r4*4+64*SIZEOF_PIXEL]
+    prefetcht0  [r0]
+    prefetcht0  [r0+r1]
+    lea    r0,  [r0+r1*2]
+    prefetcht0  [r0]
+    prefetcht0  [r0+r1]
+
+    imul   eax, r3d
+    lea    r2,  [r2+rax*2+64*SIZEOF_PIXEL]
+    prefetcht0  [r2]
+    prefetcht0  [r2+r3]
+%ifidn %1, 422
+    lea    r2,  [r2+r3*2]
+    prefetcht0  [r2]
+    prefetcht0  [r2+r3]
+%endif
+    RET
+
+%else
+cglobal prefetch_fenc_%1, 0,3
+    mov    r2, r4m
+    mov    r1, r1m
+    mov    r0, r0m
+    FIX_STRIDES r1
+    and    r2, 3
+    imul   r2, r1
+    lea    r0, [r0+r2*4+64*SIZEOF_PIXEL]
+    prefetcht0 [r0]
+    prefetcht0 [r0+r1]
+    lea    r0, [r0+r1*2]
+    prefetcht0 [r0]
+    prefetcht0 [r0+r1]
+
+    mov    r2, r4m
+    mov    r1, r3m
+    mov    r0, r2m
+    FIX_STRIDES r1
+    and    r2, 3
+    imul   r2, r1
+    lea    r0, [r0+r2*2+64*SIZEOF_PIXEL]
+    prefetcht0 [r0]
+    prefetcht0 [r0+r1]
+%ifidn %1, 422
+    lea    r0,  [r0+r1*2]
+    prefetcht0  [r0]
+    prefetcht0  [r0+r1]
+%endif
+    ret
+%endif ; ARCH_X86_64
+%endmacro
+
+INIT_MMX mmx2
+PREFETCH_FENC 420
+PREFETCH_FENC 422
+
+;-----------------------------------------------------------------------------
+; void prefetch_ref( pixel *pix, intptr_t stride, int parity )
+;-----------------------------------------------------------------------------
+INIT_MMX mmx2
+cglobal prefetch_ref, 3,3
+    FIX_STRIDES r1
+    dec    r2d
+    and    r2d, r1d
+    lea    r0,  [r0+r2*8+64*SIZEOF_PIXEL]
+    lea    r2,  [r1*3]
+    prefetcht0  [r0]
+    prefetcht0  [r0+r1]
+    prefetcht0  [r0+r1*2]
+    prefetcht0  [r0+r2]
+    lea    r0,  [r0+r1*4]
+    prefetcht0  [r0]
+    prefetcht0  [r0+r1]
+    prefetcht0  [r0+r1*2]
+    prefetcht0  [r0+r2]
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/mc-a2.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1137 @@
+;*****************************************************************************
+;* mc-a2.asm: x86 motion compensation
+;*****************************************************************************
+;* Copyright (C) 2005-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Holger Lubitz <holger@lubitz.org>
+;*          Mathieu Monnier <manao@melix.net>
+;*          Oskar Arvidsson <oskar@irock.se>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+deinterleave_shuf: times 2 db 0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15
+
+%if HIGH_BIT_DEPTH
+deinterleave_shuf32a: SHUFFLE_MASK_W 0,2,4,6,8,10,12,14
+deinterleave_shuf32b: SHUFFLE_MASK_W 1,3,5,7,9,11,13,15
+%else
+deinterleave_shuf32a: db 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30
+deinterleave_shuf32b: db 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31
+%endif
+pw_1024: times 16 dw 1024
+
+pd_16: times 4 dd 16
+pd_0f: times 4 dd 0xffff
+pf_inv256: times 8 dd 0.00390625
+
+SECTION .text
+
+cextern pb_0
+cextern pw_1
+cextern pw_16
+cextern pw_32
+cextern pw_512
+cextern pw_00ff
+cextern pw_3fff
+cextern pw_pixel_max
+cextern pd_ffff
+
+;The hpel_filter routines use non-temporal writes for output.
+;The following defines may be uncommented for testing.
+;Doing the hpel_filter temporal may be a win if the last level cache
+;is big enough (preliminary benching suggests on the order of 4* framesize).
+
+;%define movntq movq
+;%define movntps movaps
+;%define sfence
+
+%if HIGH_BIT_DEPTH == 0
+%undef movntq
+%undef movntps
+%undef sfence
+%endif ; !HIGH_BIT_DEPTH
+
+;-----------------------------------------------------------------------------
+; void plane_copy_core( pixel *dst, intptr_t i_dst,
+;                       pixel *src, intptr_t i_src, int w, int h )
+;-----------------------------------------------------------------------------
+; assumes i_dst and w are multiples of 16, and i_dst>w
+INIT_MMX
+cglobal plane_copy_core_mmx2, 6,7
+    FIX_STRIDES r1, r3, r4d
+%if HIGH_BIT_DEPTH == 0
+    movsxdifnidn r4, r4d
+%endif
+    sub    r1,  r4
+    sub    r3,  r4
+.loopy:
+    lea   r6d, [r4-63]
+.loopx:
+    prefetchnta [r2+256]
+    movq   m0, [r2   ]
+    movq   m1, [r2+ 8]
+    movntq [r0   ], m0
+    movntq [r0+ 8], m1
+    movq   m2, [r2+16]
+    movq   m3, [r2+24]
+    movntq [r0+16], m2
+    movntq [r0+24], m3
+    movq   m4, [r2+32]
+    movq   m5, [r2+40]
+    movntq [r0+32], m4
+    movntq [r0+40], m5
+    movq   m6, [r2+48]
+    movq   m7, [r2+56]
+    movntq [r0+48], m6
+    movntq [r0+56], m7
+    add    r2,  64
+    add    r0,  64
+    sub    r6d, 64
+    jg .loopx
+    prefetchnta [r2+256]
+    add    r6d, 63
+    jle .end16
+.loop16:
+    movq   m0, [r2  ]
+    movq   m1, [r2+8]
+    movntq [r0  ], m0
+    movntq [r0+8], m1
+    add    r2,  16
+    add    r0,  16
+    sub    r6d, 16
+    jg .loop16
+.end16:
+    add    r0, r1
+    add    r2, r3
+    dec    r5d
+    jg .loopy
+    sfence
+    emms
+    RET
+
+
+%macro INTERLEAVE 4-5 ; dst, srcu, srcv, is_aligned, nt_hint
+%if HIGH_BIT_DEPTH
+%assign x 0
+%rep 16/mmsize
+    mov%4     m0, [%2+(x/2)*mmsize]
+    mov%4     m1, [%3+(x/2)*mmsize]
+    punpckhwd m2, m0, m1
+    punpcklwd m0, m1
+    mov%5a    [%1+(x+0)*mmsize], m0
+    mov%5a    [%1+(x+1)*mmsize], m2
+    %assign x (x+2)
+%endrep
+%else
+    movq   m0, [%2]
+%if mmsize==16
+%ifidn %4, a
+    punpcklbw m0, [%3]
+%else
+    movq   m1, [%3]
+    punpcklbw m0, m1
+%endif
+    mov%5a [%1], m0
+%else
+    movq   m1, [%3]
+    punpckhbw m2, m0, m1
+    punpcklbw m0, m1
+    mov%5a [%1+0], m0
+    mov%5a [%1+8], m2
+%endif
+%endif ; HIGH_BIT_DEPTH
+%endmacro
+
+%macro DEINTERLEAVE 6 ; dstu, dstv, src, dstv==dstu+8, shuffle constant, is aligned
+%if HIGH_BIT_DEPTH
+%assign n 0
+%rep 16/mmsize
+    mova     m0, [%3+(n+0)*mmsize]
+    mova     m1, [%3+(n+1)*mmsize]
+    psrld    m2, m0, 16
+    psrld    m3, m1, 16
+    pand     m0, %5
+    pand     m1, %5
+    packssdw m0, m1
+    packssdw m2, m3
+    mov%6    [%1+(n/2)*mmsize], m0
+    mov%6    [%2+(n/2)*mmsize], m2
+    %assign n (n+2)
+%endrep
+%else ; !HIGH_BIT_DEPTH
+%if mmsize==16
+    mova   m0, [%3]
+%if cpuflag(ssse3)
+    pshufb m0, %5
+%else
+    mova   m1, m0
+    pand   m0, %5
+    psrlw  m1, 8
+    packuswb m0, m1
+%endif
+%if %4
+    mova   [%1], m0
+%else
+    movq   [%1], m0
+    movhps [%2], m0
+%endif
+%else
+    mova   m0, [%3]
+    mova   m1, [%3+8]
+    mova   m2, m0
+    mova   m3, m1
+    pand   m0, %5
+    pand   m1, %5
+    psrlw  m2, 8
+    psrlw  m3, 8
+    packuswb m0, m1
+    packuswb m2, m3
+    mova   [%1], m0
+    mova   [%2], m2
+%endif ; mmsize == 16
+%endif ; HIGH_BIT_DEPTH
+%endmacro
+
+%macro PLANE_INTERLEAVE 0
+;-----------------------------------------------------------------------------
+; void plane_copy_interleave_core( uint8_t *dst,  intptr_t i_dst,
+;                                  uint8_t *srcu, intptr_t i_srcu,
+;                                  uint8_t *srcv, intptr_t i_srcv, int w, int h )
+;-----------------------------------------------------------------------------
+; assumes i_dst and w are multiples of 16, and i_dst>2*w
+cglobal plane_copy_interleave_core, 6,9
+    mov   r6d, r6m
+%if HIGH_BIT_DEPTH
+    FIX_STRIDES r1, r3, r5, r6d
+    movifnidn r1mp, r1
+    movifnidn r3mp, r3
+    mov  r6m, r6d
+%endif
+    lea    r0, [r0+r6*2]
+    add    r2,  r6
+    add    r4,  r6
+%if ARCH_X86_64
+    DECLARE_REG_TMP 7,8
+%else
+    DECLARE_REG_TMP 1,3
+%endif
+    mov  t1, r1
+    shr  t1, SIZEOF_PIXEL
+    sub  t1, r6
+    mov  t0d, r7m
+.loopy:
+    mov    r6d, r6m
+    neg    r6
+.prefetch:
+    prefetchnta [r2+r6]
+    prefetchnta [r4+r6]
+    add    r6, 64
+    jl .prefetch
+    mov    r6d, r6m
+    neg    r6
+.loopx:
+    INTERLEAVE r0+r6*2+ 0*SIZEOF_PIXEL, r2+r6+0*SIZEOF_PIXEL, r4+r6+0*SIZEOF_PIXEL, u, nt
+    INTERLEAVE r0+r6*2+16*SIZEOF_PIXEL, r2+r6+8*SIZEOF_PIXEL, r4+r6+8*SIZEOF_PIXEL, u, nt
+    add    r6, 16*SIZEOF_PIXEL
+    jl .loopx
+.pad:
+%assign n 0
+%rep SIZEOF_PIXEL
+%if mmsize==8
+    movntq [r0+r6*2+(n+ 0)], m0
+    movntq [r0+r6*2+(n+ 8)], m0
+    movntq [r0+r6*2+(n+16)], m0
+    movntq [r0+r6*2+(n+24)], m0
+%else
+    movntdq [r0+r6*2+(n+ 0)], m0
+    movntdq [r0+r6*2+(n+16)], m0
+%endif
+    %assign n n+32
+%endrep
+    add    r6, 16*SIZEOF_PIXEL
+    cmp    r6, t1
+    jl .pad
+    add    r0, r1mp
+    add    r2, r3mp
+    add    r4, r5
+    dec    t0d
+    jg .loopy
+    sfence
+    emms
+    RET
+
+;-----------------------------------------------------------------------------
+; void store_interleave_chroma( uint8_t *dst, intptr_t i_dst, uint8_t *srcu, uint8_t *srcv, int height )
+;-----------------------------------------------------------------------------
+cglobal store_interleave_chroma, 5,5
+    FIX_STRIDES r1
+.loop:
+    INTERLEAVE r0+ 0, r2+           0, r3+           0, a
+    INTERLEAVE r0+r1, r2+FDEC_STRIDEB, r3+FDEC_STRIDEB, a
+    add    r2, FDEC_STRIDEB*2
+    add    r3, FDEC_STRIDEB*2
+    lea    r0, [r0+r1*2]
+    sub   r4d, 2
+    jg .loop
+    RET
+%endmacro ; PLANE_INTERLEAVE
+
+%macro DEINTERLEAVE_START 0
+%if HIGH_BIT_DEPTH
+    mova   m4, [pd_ffff]
+%elif cpuflag(ssse3)
+    mova   m4, [deinterleave_shuf]
+%else
+    mova   m4, [pw_00ff]
+%endif ; HIGH_BIT_DEPTH
+%endmacro
+
+%macro PLANE_DEINTERLEAVE 0
+;-----------------------------------------------------------------------------
+; void plane_copy_deinterleave( pixel *dstu, intptr_t i_dstu,
+;                               pixel *dstv, intptr_t i_dstv,
+;                               pixel *src,  intptr_t i_src, int w, int h )
+;-----------------------------------------------------------------------------
+cglobal plane_copy_deinterleave, 6,7
+    DEINTERLEAVE_START
+    mov    r6d, r6m
+    FIX_STRIDES r1, r3, r5, r6d
+%if HIGH_BIT_DEPTH
+    mov    r6m, r6d
+%endif
+    add    r0,  r6
+    add    r2,  r6
+    lea    r4, [r4+r6*2]
+.loopy:
+    mov    r6d, r6m
+    neg    r6
+.loopx:
+    DEINTERLEAVE r0+r6+0*SIZEOF_PIXEL, r2+r6+0*SIZEOF_PIXEL, r4+r6*2+ 0*SIZEOF_PIXEL, 0, m4, u
+    DEINTERLEAVE r0+r6+8*SIZEOF_PIXEL, r2+r6+8*SIZEOF_PIXEL, r4+r6*2+16*SIZEOF_PIXEL, 0, m4, u
+    add    r6, 16*SIZEOF_PIXEL
+    jl .loopx
+    add    r0, r1
+    add    r2, r3
+    add    r4, r5
+    dec dword r7m
+    jg .loopy
+    RET
+
+;-----------------------------------------------------------------------------
+; void load_deinterleave_chroma_fenc( pixel *dst, pixel *src, intptr_t i_src, int height )
+;-----------------------------------------------------------------------------
+cglobal load_deinterleave_chroma_fenc, 4,4
+    DEINTERLEAVE_START
+    FIX_STRIDES r2
+.loop:
+    DEINTERLEAVE r0+           0, r0+FENC_STRIDEB*1/2, r1+ 0, 1, m4, a
+    DEINTERLEAVE r0+FENC_STRIDEB, r0+FENC_STRIDEB*3/2, r1+r2, 1, m4, a
+    add    r0, FENC_STRIDEB*2
+    lea    r1, [r1+r2*2]
+    sub   r3d, 2
+    jg .loop
+    RET
+
+;-----------------------------------------------------------------------------
+; void load_deinterleave_chroma_fdec( pixel *dst, pixel *src, intptr_t i_src, int height )
+;-----------------------------------------------------------------------------
+cglobal load_deinterleave_chroma_fdec, 4,4
+    DEINTERLEAVE_START
+    FIX_STRIDES r2
+.loop:
+    DEINTERLEAVE r0+           0, r0+FDEC_STRIDEB*1/2, r1+ 0, 0, m4, a
+    DEINTERLEAVE r0+FDEC_STRIDEB, r0+FDEC_STRIDEB*3/2, r1+r2, 0, m4, a
+    add    r0, FDEC_STRIDEB*2
+    lea    r1, [r1+r2*2]
+    sub   r3d, 2
+    jg .loop
+    RET
+%endmacro ; PLANE_DEINTERLEAVE
+
+%if HIGH_BIT_DEPTH
+INIT_MMX mmx2
+PLANE_INTERLEAVE
+INIT_MMX mmx
+PLANE_DEINTERLEAVE
+INIT_XMM sse2
+PLANE_INTERLEAVE
+PLANE_DEINTERLEAVE
+INIT_XMM avx
+PLANE_INTERLEAVE
+PLANE_DEINTERLEAVE
+%else
+INIT_MMX mmx2
+PLANE_INTERLEAVE
+INIT_MMX mmx
+PLANE_DEINTERLEAVE
+INIT_XMM sse2
+PLANE_INTERLEAVE
+PLANE_DEINTERLEAVE
+INIT_XMM ssse3
+PLANE_DEINTERLEAVE
+%endif
+
+; These functions are not general-use; not only do the SSE ones require aligned input,
+; but they also will fail if given a non-mod16 size.
+; memzero SSE will fail for non-mod128.
+
+;-----------------------------------------------------------------------------
+; void *memcpy_aligned( void *dst, const void *src, size_t n );
+;-----------------------------------------------------------------------------
+%macro MEMCPY 0
+cglobal memcpy_aligned, 3,3
+%if mmsize == 16
+    test r2d, 16
+    jz .copy2
+    mova  m0, [r1+r2-16]
+    mova [r0+r2-16], m0
+    sub  r2d, 16
+.copy2:
+%endif
+    test r2d, 2*mmsize
+    jz .copy4start
+    mova  m0, [r1+r2-1*mmsize]
+    mova  m1, [r1+r2-2*mmsize]
+    mova [r0+r2-1*mmsize], m0
+    mova [r0+r2-2*mmsize], m1
+    sub  r2d, 2*mmsize
+.copy4start:
+    test r2d, r2d
+    jz .ret
+.copy4:
+    mova  m0, [r1+r2-1*mmsize]
+    mova  m1, [r1+r2-2*mmsize]
+    mova  m2, [r1+r2-3*mmsize]
+    mova  m3, [r1+r2-4*mmsize]
+    mova [r0+r2-1*mmsize], m0
+    mova [r0+r2-2*mmsize], m1
+    mova [r0+r2-3*mmsize], m2
+    mova [r0+r2-4*mmsize], m3
+    sub  r2d, 4*mmsize
+    jg .copy4
+.ret:
+    REP_RET
+%endmacro
+
+INIT_MMX mmx
+MEMCPY
+INIT_XMM sse
+MEMCPY
+
+;-----------------------------------------------------------------------------
+; void *memzero_aligned( void *dst, size_t n );
+;-----------------------------------------------------------------------------
+%macro MEMZERO 1
+cglobal memzero_aligned, 2,2
+    add  r0, r1
+    neg  r1
+%if mmsize == 8
+    pxor m0, m0
+%else
+    xorps m0, m0
+%endif
+.loop:
+%assign i 0
+%rep %1
+    mova [r0 + r1 + i], m0
+%assign i i+mmsize
+%endrep
+    add r1, mmsize*%1
+    jl .loop
+    RET
+%endmacro
+
+INIT_MMX mmx
+MEMZERO 8
+INIT_XMM sse
+MEMZERO 8
+INIT_YMM avx
+MEMZERO 4
+
+%if HIGH_BIT_DEPTH == 0
+;-----------------------------------------------------------------------------
+; void integral_init4h( uint16_t *sum, uint8_t *pix, intptr_t stride )
+;-----------------------------------------------------------------------------
+%macro INTEGRAL_INIT4H 0
+cglobal integral_init4h, 3,4
+    lea     r3, [r0+r2*2]
+    add     r1, r2
+    neg     r2
+    pxor    m4, m4
+.loop:
+    mova    m0, [r1+r2]
+%if mmsize==32
+    movu    m1, [r1+r2+8]
+%else
+    mova    m1, [r1+r2+16]
+    palignr m1, m0, 8
+%endif
+    mpsadbw m0, m4, 0
+    mpsadbw m1, m4, 0
+    paddw   m0, [r0+r2*2]
+    paddw   m1, [r0+r2*2+mmsize]
+    mova  [r3+r2*2   ], m0
+    mova  [r3+r2*2+mmsize], m1
+    add     r2, mmsize
+    jl .loop
+    RET
+%endmacro
+
+INIT_XMM sse4
+INTEGRAL_INIT4H
+INIT_YMM avx2
+INTEGRAL_INIT4H
+
+%macro INTEGRAL_INIT8H 0
+cglobal integral_init8h, 3,4
+    lea     r3, [r0+r2*2]
+    add     r1, r2
+    neg     r2
+    pxor    m4, m4
+.loop:
+    mova    m0, [r1+r2]
+%if mmsize==32
+    movu    m1, [r1+r2+8]
+    mpsadbw m2, m0, m4, 100100b
+    mpsadbw m3, m1, m4, 100100b
+%else
+    mova    m1, [r1+r2+16]
+    palignr m1, m0, 8
+    mpsadbw m2, m0, m4, 100b
+    mpsadbw m3, m1, m4, 100b
+%endif
+    mpsadbw m0, m4, 0
+    mpsadbw m1, m4, 0
+    paddw   m0, [r0+r2*2]
+    paddw   m1, [r0+r2*2+mmsize]
+    paddw   m0, m2
+    paddw   m1, m3
+    mova  [r3+r2*2   ], m0
+    mova  [r3+r2*2+mmsize], m1
+    add     r2, mmsize
+    jl .loop
+    RET
+%endmacro
+
+INIT_XMM sse4
+INTEGRAL_INIT8H
+INIT_XMM avx
+INTEGRAL_INIT8H
+INIT_YMM avx2
+INTEGRAL_INIT8H
+%endif ; !HIGH_BIT_DEPTH
+
+%macro INTEGRAL_INIT_8V 0
+;-----------------------------------------------------------------------------
+; void integral_init8v( uint16_t *sum8, intptr_t stride )
+;-----------------------------------------------------------------------------
+cglobal integral_init8v, 3,3
+    add   r1, r1
+    add   r0, r1
+    lea   r2, [r0+r1*8]
+    neg   r1
+.loop:
+    mova  m0, [r2+r1]
+    mova  m1, [r2+r1+mmsize]
+    psubw m0, [r0+r1]
+    psubw m1, [r0+r1+mmsize]
+    mova  [r0+r1], m0
+    mova  [r0+r1+mmsize], m1
+    add   r1, 2*mmsize
+    jl .loop
+    RET
+%endmacro
+
+INIT_MMX mmx
+INTEGRAL_INIT_8V
+INIT_XMM sse2
+INTEGRAL_INIT_8V
+INIT_YMM avx2
+INTEGRAL_INIT_8V
+
+;-----------------------------------------------------------------------------
+; void integral_init4v( uint16_t *sum8, uint16_t *sum4, intptr_t stride )
+;-----------------------------------------------------------------------------
+INIT_MMX mmx
+cglobal integral_init4v, 3,5
+    shl   r2, 1
+    lea   r3, [r0+r2*4]
+    lea   r4, [r0+r2*8]
+    mova  m0, [r0+r2]
+    mova  m4, [r4+r2]
+.loop:
+    mova  m1, m4
+    psubw m1, m0
+    mova  m4, [r4+r2-8]
+    mova  m0, [r0+r2-8]
+    paddw m1, m4
+    mova  m3, [r3+r2-8]
+    psubw m1, m0
+    psubw m3, m0
+    mova  [r0+r2-8], m1
+    mova  [r1+r2-8], m3
+    sub   r2, 8
+    jge .loop
+    RET
+
+INIT_XMM sse2
+cglobal integral_init4v, 3,5
+    shl     r2, 1
+    add     r0, r2
+    add     r1, r2
+    lea     r3, [r0+r2*4]
+    lea     r4, [r0+r2*8]
+    neg     r2
+.loop:
+    mova    m0, [r0+r2]
+    mova    m1, [r4+r2]
+    mova    m2, m0
+    mova    m4, m1
+    shufpd  m0, [r0+r2+16], 1
+    shufpd  m1, [r4+r2+16], 1
+    paddw   m0, m2
+    paddw   m1, m4
+    mova    m3, [r3+r2]
+    psubw   m1, m0
+    psubw   m3, m2
+    mova  [r0+r2], m1
+    mova  [r1+r2], m3
+    add     r2, 16
+    jl .loop
+    RET
+
+INIT_XMM ssse3
+cglobal integral_init4v, 3,5
+    shl     r2, 1
+    add     r0, r2
+    add     r1, r2
+    lea     r3, [r0+r2*4]
+    lea     r4, [r0+r2*8]
+    neg     r2
+.loop:
+    mova    m2, [r0+r2]
+    mova    m0, [r0+r2+16]
+    mova    m4, [r4+r2]
+    mova    m1, [r4+r2+16]
+    palignr m0, m2, 8
+    palignr m1, m4, 8
+    paddw   m0, m2
+    paddw   m1, m4
+    mova    m3, [r3+r2]
+    psubw   m1, m0
+    psubw   m3, m2
+    mova  [r0+r2], m1
+    mova  [r1+r2], m3
+    add     r2, 16
+    jl .loop
+    RET
+
+INIT_YMM avx2
+cglobal integral_init4v, 3,5
+    add     r2, r2
+    add     r0, r2
+    add     r1, r2
+    lea     r3, [r0+r2*4]
+    lea     r4, [r0+r2*8]
+    neg     r2
+.loop:
+    mova    m2, [r0+r2]
+    movu    m1, [r4+r2+8]
+    paddw   m0, m2, [r0+r2+8]
+    paddw   m1, [r4+r2]
+    mova    m3, [r3+r2]
+    psubw   m1, m0
+    psubw   m3, m2
+    mova  [r0+r2], m1
+    mova  [r1+r2], m3
+    add     r2, 32
+    jl .loop
+    RET
+
+%macro FILT8x4 7
+    mova      %3, [r0+%7]
+    mova      %4, [r0+r5+%7]
+    pavgb     %3, %4
+    pavgb     %4, [r0+r5*2+%7]
+    PALIGNR   %1, %3, 1, m6
+    PALIGNR   %2, %4, 1, m6
+%if cpuflag(xop)
+    pavgb     %1, %3
+    pavgb     %2, %4
+%else
+    pavgb     %1, %3
+    pavgb     %2, %4
+    psrlw     %5, %1, 8
+    psrlw     %6, %2, 8
+    pand      %1, m7
+    pand      %2, m7
+%endif
+%endmacro
+
+%macro FILT32x4U 4
+    movu      m1, [r0+r5]
+    pavgb     m0, m1, [r0]
+    movu      m3, [r0+r5+1]
+    pavgb     m2, m3, [r0+1]
+    pavgb     m1, [r0+r5*2]
+    pavgb     m3, [r0+r5*2+1]
+    pavgb     m0, m2
+    pavgb     m1, m3
+
+    movu      m3, [r0+r5+mmsize]
+    pavgb     m2, m3, [r0+mmsize]
+    movu      m5, [r0+r5+1+mmsize]
+    pavgb     m4, m5, [r0+1+mmsize]
+    pavgb     m3, [r0+r5*2+mmsize]
+    pavgb     m5, [r0+r5*2+1+mmsize]
+    pavgb     m2, m4
+    pavgb     m3, m5
+
+    pshufb    m0, m7
+    pshufb    m1, m7
+    pshufb    m2, m7
+    pshufb    m3, m7
+    punpckhqdq m4, m0, m2
+    punpcklqdq m0, m0, m2
+    punpckhqdq m5, m1, m3
+    punpcklqdq m2, m1, m3
+    vpermq    m0, m0, q3120
+    vpermq    m1, m4, q3120
+    vpermq    m2, m2, q3120
+    vpermq    m3, m5, q3120
+    movu    [%1], m0
+    movu    [%2], m1
+    movu    [%3], m2
+    movu    [%4], m3
+%endmacro
+
+%macro FILT16x2 4
+    mova      m3, [r0+%4+mmsize]
+    mova      m2, [r0+%4]
+    pavgb     m3, [r0+%4+r5+mmsize]
+    pavgb     m2, [r0+%4+r5]
+    PALIGNR   %1, m3, 1, m6
+    pavgb     %1, m3
+    PALIGNR   m3, m2, 1, m6
+    pavgb     m3, m2
+%if cpuflag(xop)
+    vpperm    m5, m3, %1, m7
+    vpperm    m3, m3, %1, m6
+%else
+    psrlw     m5, m3, 8
+    psrlw     m4, %1, 8
+    pand      m3, m7
+    pand      %1, m7
+    packuswb  m3, %1
+    packuswb  m5, m4
+%endif
+    mova    [%2], m3
+    mova    [%3], m5
+    mova      %1, m2
+%endmacro
+
+%macro FILT8x2U 3
+    mova      m3, [r0+%3+8]
+    mova      m2, [r0+%3]
+    pavgb     m3, [r0+%3+r5+8]
+    pavgb     m2, [r0+%3+r5]
+    mova      m1, [r0+%3+9]
+    mova      m0, [r0+%3+1]
+    pavgb     m1, [r0+%3+r5+9]
+    pavgb     m0, [r0+%3+r5+1]
+    pavgb     m1, m3
+    pavgb     m0, m2
+    psrlw     m3, m1, 8
+    psrlw     m2, m0, 8
+    pand      m1, m7
+    pand      m0, m7
+    packuswb  m0, m1
+    packuswb  m2, m3
+    mova    [%1], m0
+    mova    [%2], m2
+%endmacro
+
+%macro FILT8xU 3
+    mova      m3, [r0+%3+8]
+    mova      m2, [r0+%3]
+    pavgw     m3, [r0+%3+r5+8]
+    pavgw     m2, [r0+%3+r5]
+    movu      m1, [r0+%3+10]
+    movu      m0, [r0+%3+2]
+    pavgw     m1, [r0+%3+r5+10]
+    pavgw     m0, [r0+%3+r5+2]
+    pavgw     m1, m3
+    pavgw     m0, m2
+    psrld     m3, m1, 16
+    psrld     m2, m0, 16
+    pand      m1, m7
+    pand      m0, m7
+    packssdw  m0, m1
+    packssdw  m2, m3
+    movu    [%1], m0
+    mova    [%2], m2
+%endmacro
+
+%macro FILT8xA 4
+    movu      m3, [r0+%4+mmsize]
+    movu      m2, [r0+%4]
+    pavgw     m3, [r0+%4+r5+mmsize]
+    pavgw     m2, [r0+%4+r5]
+    PALIGNR   %1, m3, 2, m6
+    pavgw     %1, m3
+    PALIGNR   m3, m2, 2, m6
+    pavgw     m3, m2
+%if cpuflag(xop)
+    vpperm    m5, m3, %1, m7
+    vpperm    m3, m3, %1, m6
+%else
+    psrld     m5, m3, 16
+    psrld     m4, %1, 16
+    pand      m3, m7
+    pand      %1, m7
+    packssdw  m3, %1
+    packssdw  m5, m4
+%endif
+%if cpuflag(avx2)
+    vpermq     m3, m3, q3120
+    vpermq     m5, m5, q3120
+%endif
+    movu    [%2], m3
+    movu    [%3], m5
+    movu      %1, m2
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
+;                              intptr_t src_stride, intptr_t dst_stride, int width, int height )
+;-----------------------------------------------------------------------------
+%macro FRAME_INIT_LOWRES 0
+cglobal frame_init_lowres_core, 6,7,(12-4*(BIT_DEPTH/9)) ; 8 for HIGH_BIT_DEPTH, 12 otherwise
+%if HIGH_BIT_DEPTH
+    shl   dword r6m, 1
+    FIX_STRIDES r5
+    shl   dword r7m, 1
+%endif
+%if mmsize >= 16
+    add   dword r7m, mmsize-1
+    and   dword r7m, ~(mmsize-1)
+%endif
+    ; src += 2*(height-1)*stride + 2*width
+    mov      r6d, r8m
+    dec      r6d
+    imul     r6d, r5d
+    add      r6d, r7m
+    lea       r0, [r0+r6*2]
+    ; dst += (height-1)*stride + width
+    mov      r6d, r8m
+    dec      r6d
+    imul     r6d, r6m
+    add      r6d, r7m
+    add       r1, r6
+    add       r2, r6
+    add       r3, r6
+    add       r4, r6
+    ; gap = stride - width
+    mov      r6d, r6m
+    sub      r6d, r7m
+    PUSH      r6
+    %define dst_gap [rsp+gprsize]
+    mov      r6d, r5d
+    sub      r6d, r7m
+    shl      r6d, 1
+    PUSH      r6
+    %define src_gap [rsp]
+%if HIGH_BIT_DEPTH
+%if cpuflag(xop)
+    mova      m6, [deinterleave_shuf32a]
+    mova      m7, [deinterleave_shuf32b]
+%else
+    pcmpeqw   m7, m7
+    psrld     m7, 16
+%endif
+.vloop:
+    mov      r6d, r7m
+%ifnidn cpuname, mmx2
+    movu      m0, [r0]
+    movu      m1, [r0+r5]
+    pavgw     m0, m1
+    pavgw     m1, [r0+r5*2]
+%endif
+.hloop:
+    sub       r0, mmsize*2
+    sub       r1, mmsize
+    sub       r2, mmsize
+    sub       r3, mmsize
+    sub       r4, mmsize
+%ifidn cpuname, mmx2
+    FILT8xU r1, r2, 0
+    FILT8xU r3, r4, r5
+%else
+    FILT8xA m0, r1, r2, 0
+    FILT8xA m1, r3, r4, r5
+%endif
+    sub      r6d, mmsize
+    jg .hloop
+%else ; !HIGH_BIT_DEPTH
+%if cpuflag(avx2)
+    mova      m7, [deinterleave_shuf]
+%elif cpuflag(xop)
+    mova      m6, [deinterleave_shuf32a]
+    mova      m7, [deinterleave_shuf32b]
+%else
+    pcmpeqb   m7, m7
+    psrlw     m7, 8
+%endif
+.vloop:
+    mov      r6d, r7m
+%ifnidn cpuname, mmx2
+%if mmsize <= 16
+    mova      m0, [r0]
+    mova      m1, [r0+r5]
+    pavgb     m0, m1
+    pavgb     m1, [r0+r5*2]
+%endif
+%endif
+.hloop:
+    sub       r0, mmsize*2
+    sub       r1, mmsize
+    sub       r2, mmsize
+    sub       r3, mmsize
+    sub       r4, mmsize
+%if mmsize==32
+    FILT32x4U r1, r2, r3, r4
+%elifdef m8
+    FILT8x4   m0, m1, m2, m3, m10, m11, mmsize
+    mova      m8, m0
+    mova      m9, m1
+    FILT8x4   m2, m3, m0, m1, m4, m5, 0
+%if cpuflag(xop)
+    vpperm    m4, m2, m8, m7
+    vpperm    m2, m2, m8, m6
+    vpperm    m5, m3, m9, m7
+    vpperm    m3, m3, m9, m6
+%else
+    packuswb  m2, m8
+    packuswb  m3, m9
+    packuswb  m4, m10
+    packuswb  m5, m11
+%endif
+    mova    [r1], m2
+    mova    [r2], m4
+    mova    [r3], m3
+    mova    [r4], m5
+%elifidn cpuname, mmx2
+    FILT8x2U  r1, r2, 0
+    FILT8x2U  r3, r4, r5
+%else
+    FILT16x2  m0, r1, r2, 0
+    FILT16x2  m1, r3, r4, r5
+%endif
+    sub      r6d, mmsize
+    jg .hloop
+%endif ; HIGH_BIT_DEPTH
+.skip:
+    mov       r6, dst_gap
+    sub       r0, src_gap
+    sub       r1, r6
+    sub       r2, r6
+    sub       r3, r6
+    sub       r4, r6
+    dec    dword r8m
+    jg .vloop
+    ADD      rsp, 2*gprsize
+    emms
+    RET
+%endmacro ; FRAME_INIT_LOWRES
+
+INIT_MMX mmx2
+FRAME_INIT_LOWRES
+%if ARCH_X86_64 == 0
+INIT_MMX cache32, mmx2
+FRAME_INIT_LOWRES
+%endif
+INIT_XMM sse2
+FRAME_INIT_LOWRES
+INIT_XMM ssse3
+FRAME_INIT_LOWRES
+INIT_XMM avx
+FRAME_INIT_LOWRES
+INIT_XMM xop
+FRAME_INIT_LOWRES
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+FRAME_INIT_LOWRES
+%endif
+
+;-----------------------------------------------------------------------------
+; void mbtree_propagate_cost( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+;                             uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
+;-----------------------------------------------------------------------------
+%macro MBTREE 0
+cglobal mbtree_propagate_cost, 7,7,7
+    add        r6d, r6d
+    lea         r0, [r0+r6*2]
+    add         r1, r6
+    add         r2, r6
+    add         r3, r6
+    add         r4, r6
+    neg         r6
+    pxor      xmm4, xmm4
+    movss     xmm6, [r5]
+    shufps    xmm6, xmm6, 0
+    mulps     xmm6, [pf_inv256]
+    movdqa    xmm5, [pw_3fff]
+.loop:
+    movq      xmm2, [r2+r6] ; intra
+    movq      xmm0, [r4+r6] ; invq
+    movq      xmm3, [r3+r6] ; inter
+    movq      xmm1, [r1+r6] ; prop
+    punpcklwd xmm2, xmm4
+    punpcklwd xmm0, xmm4
+    pmaddwd   xmm0, xmm2
+    pand      xmm3, xmm5
+    punpcklwd xmm1, xmm4
+    punpcklwd xmm3, xmm4
+%if cpuflag(fma4)
+    cvtdq2ps  xmm0, xmm0
+    cvtdq2ps  xmm1, xmm1
+    fmaddps   xmm0, xmm0, xmm6, xmm1
+    cvtdq2ps  xmm1, xmm2
+    psubd     xmm2, xmm3
+    cvtdq2ps  xmm2, xmm2
+    rcpps     xmm3, xmm1
+    mulps     xmm1, xmm3
+    mulps     xmm0, xmm2
+    addps     xmm2, xmm3, xmm3
+    fnmaddps  xmm3, xmm1, xmm3, xmm2
+    mulps     xmm0, xmm3
+%else
+    cvtdq2ps  xmm0, xmm0
+    mulps     xmm0, xmm6    ; intra*invq*fps_factor>>8
+    cvtdq2ps  xmm1, xmm1    ; prop
+    addps     xmm0, xmm1    ; prop + (intra*invq*fps_factor>>8)
+    cvtdq2ps  xmm1, xmm2    ; intra
+    psubd     xmm2, xmm3    ; intra - inter
+    cvtdq2ps  xmm2, xmm2    ; intra - inter
+    rcpps     xmm3, xmm1    ; 1 / intra 1st approximation
+    mulps     xmm1, xmm3    ; intra * (1/intra 1st approx)
+    mulps     xmm1, xmm3    ; intra * (1/intra 1st approx)^2
+    mulps     xmm0, xmm2    ; (prop + (intra*invq*fps_factor>>8)) * (intra - inter)
+    addps     xmm3, xmm3    ; 2 * (1/intra 1st approx)
+    subps     xmm3, xmm1    ; 2nd approximation for 1/intra
+    mulps     xmm0, xmm3    ; / intra
+%endif
+    cvtps2dq  xmm0, xmm0
+    movdqa [r0+r6*2], xmm0
+    add         r6, 8
+    jl .loop
+    RET
+%endmacro
+
+INIT_XMM sse2
+MBTREE
+; Bulldozer only has a 128-bit float unit, so the AVX version of this function is actually slower.
+INIT_XMM fma4
+MBTREE
+
+%macro INT16_UNPACK 1
+    vpunpckhwd   xm4, xm%1, xm7
+    vpunpcklwd  xm%1, xm7
+    vinsertf128  m%1, m%1, xm4, 1
+%endmacro
+
+; FIXME: align loads/stores to 16 bytes
+%macro MBTREE_AVX 0
+cglobal mbtree_propagate_cost, 7,7,8
+    add          r6d, r6d
+    lea           r0, [r0+r6*2]
+    add           r1, r6
+    add           r2, r6
+    add           r3, r6
+    add           r4, r6
+    neg           r6
+    mova         xm5, [pw_3fff]
+    vbroadcastss  m6, [r5]
+    mulps         m6, [pf_inv256]
+%if notcpuflag(avx2)
+    pxor         xm7, xm7
+%endif
+.loop:
+%if cpuflag(avx2)
+    pmovzxwd     m0, [r2+r6]      ; intra
+    pmovzxwd     m1, [r4+r6]      ; invq
+    pmovzxwd     m2, [r1+r6]      ; prop
+    pand        xm3, xm5, [r3+r6] ; inter
+    pmovzxwd     m3, xm3
+    pmaddwd      m1, m0
+    psubd        m4, m0, m3
+    cvtdq2ps     m0, m0
+    cvtdq2ps     m1, m1
+    cvtdq2ps     m2, m2
+    cvtdq2ps     m4, m4
+    fmaddps      m1, m1, m6, m2
+    rcpps        m3, m0
+    mulps        m2, m0, m3
+    mulps        m1, m4
+    addps        m4, m3, m3
+    fnmaddps     m4, m2, m3, m4
+    mulps        m1, m4
+%else
+    movu        xm0, [r2+r6]
+    movu        xm1, [r4+r6]
+    movu        xm2, [r1+r6]
+    pand        xm3, xm5, [r3+r6]
+    INT16_UNPACK 0
+    INT16_UNPACK 1
+    INT16_UNPACK 2
+    INT16_UNPACK 3
+    cvtdq2ps     m0, m0
+    cvtdq2ps     m1, m1
+    cvtdq2ps     m2, m2
+    cvtdq2ps     m3, m3
+    mulps        m1, m0
+    subps        m4, m0, m3
+    mulps        m1, m6         ; intra*invq*fps_factor>>8
+    addps        m1, m2         ; prop + (intra*invq*fps_factor>>8)
+    rcpps        m3, m0         ; 1 / intra 1st approximation
+    mulps        m2, m0, m3     ; intra * (1/intra 1st approx)
+    mulps        m2, m3         ; intra * (1/intra 1st approx)^2
+    mulps        m1, m4         ; (prop + (intra*invq*fps_factor>>8)) * (intra - inter)
+    addps        m3, m3         ; 2 * (1/intra 1st approx)
+    subps        m3, m2         ; 2nd approximation for 1/intra
+    mulps        m1, m3         ; / intra
+%endif
+    vcvtps2dq    m1, m1
+    movu  [r0+r6*2], m1
+    add          r6, 16
+    jl .loop
+    RET
+%endmacro
+
+INIT_YMM avx
+MBTREE_AVX
+INIT_YMM avx2,fma3
+MBTREE_AVX
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/mc.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,39 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_MC_H
+#define X265_MC_H
+
+#define LOWRES(cpu) \
+    void PFX(frame_init_lowres_core_ ## cpu)(const pixel* src0, pixel* dst0, pixel* dsth, pixel* dstv, pixel* dstc, \
+                                             intptr_t src_stride, intptr_t dst_stride, int width, int height);
+LOWRES(mmx2)
+LOWRES(sse2)
+LOWRES(ssse3)
+LOWRES(avx)
+LOWRES(avx2)
+LOWRES(xop)
+
+#undef LOWRES
+
+#endif // ifndef X265_MC_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixel-32.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,420 @@
+;*****************************************************************************
+;* pixel-32.asm: x86_32 pixel metrics
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Laurent Aimar <fenrir@via.ecp.fr>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+cextern pw_ppmmppmm
+cextern pw_pmpmpmpm
+
+SECTION .text
+INIT_MMX mmx2
+
+%macro LOAD_DIFF_4x8P 1 ; dx
+    LOAD_DIFF  m0, m7, none, [r0+%1],      [r2+%1]
+    LOAD_DIFF  m1, m6, none, [r0+%1+r1],   [r2+%1+r3]
+    LOAD_DIFF  m2, m7, none, [r0+%1+r1*2], [r2+%1+r3*2]
+    LOAD_DIFF  m3, m6, none, [r0+%1+r4],   [r2+%1+r5]
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+    LOAD_DIFF  m4, m7, none, [r0+%1],      [r2+%1]
+    LOAD_DIFF  m5, m6, none, [r0+%1+r1],   [r2+%1+r3]
+    LOAD_DIFF  m6, m7, none, [r0+%1+r1*2], [r2+%1+r3*2]
+    movq [spill], m5
+    LOAD_DIFF  m7, m5, none, [r0+%1+r4],   [r2+%1+r5]
+    movq m5, [spill]
+%endmacro
+
+%macro SUM4x8_MM 0
+    movq [spill],   m6
+    movq [spill+8], m7
+    ABSW2    m0, m1, m0, m1, m6, m7
+    ABSW2    m2, m3, m2, m3, m6, m7
+    paddw    m0, m2
+    paddw    m1, m3
+    movq     m6, [spill]
+    movq     m7, [spill+8]
+    ABSW2    m4, m5, m4, m5, m2, m3
+    ABSW2    m6, m7, m6, m7, m2, m3
+    paddw    m4, m6
+    paddw    m5, m7
+    paddw    m0, m4
+    paddw    m1, m5
+    paddw    m0, m1
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_sa8d_8x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sa8d_8x8_internal
+    push   r0
+    push   r2
+    sub    esp, 0x74
+%define args  esp+0x74
+%define spill esp+0x60 ; +16
+%define trans esp+0    ; +96
+    LOAD_DIFF_4x8P 0
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movq   [spill], m1
+    TRANSPOSE4x4W 4, 5, 6, 7, 1
+    movq   [trans+0x00], m4
+    movq   [trans+0x08], m5
+    movq   [trans+0x10], m6
+    movq   [trans+0x18], m7
+    movq   m1, [spill]
+    TRANSPOSE4x4W 0, 1, 2, 3, 4
+    movq   [trans+0x20], m0
+    movq   [trans+0x28], m1
+    movq   [trans+0x30], m2
+    movq   [trans+0x38], m3
+
+    mov    r0, [args+4]
+    mov    r2, [args]
+    LOAD_DIFF_4x8P 4
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movq   [spill], m7
+    TRANSPOSE4x4W 0, 1, 2, 3, 7
+    movq   [trans+0x40], m0
+    movq   [trans+0x48], m1
+    movq   [trans+0x50], m2
+    movq   [trans+0x58], m3
+    movq   m7, [spill]
+    TRANSPOSE4x4W 4, 5, 6, 7, 1
+    movq   m0, [trans+0x00]
+    movq   m1, [trans+0x08]
+    movq   m2, [trans+0x10]
+    movq   m3, [trans+0x18]
+
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+    SUM4x8_MM
+    movq   [trans], m0
+
+    movq   m0, [trans+0x20]
+    movq   m1, [trans+0x28]
+    movq   m2, [trans+0x30]
+    movq   m3, [trans+0x38]
+    movq   m4, [trans+0x40]
+    movq   m5, [trans+0x48]
+    movq   m6, [trans+0x50]
+    movq   m7, [trans+0x58]
+
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+    SUM4x8_MM
+
+    pavgw  m0, [trans]
+    add   esp, 0x7c
+    ret
+%undef args
+%undef spill
+%undef trans
+
+%macro SUM_MM_X3 8 ; 3x sum, 4x tmp, op
+    pxor        %7, %7
+    pshufw      %4, %1, q1032
+    pshufw      %5, %2, q1032
+    pshufw      %6, %3, q1032
+    paddusw     %1, %4
+    paddusw     %2, %5
+    paddusw     %3, %6
+    punpcklwd   %1, %7
+    punpcklwd   %2, %7
+    punpcklwd   %3, %7
+    pshufw      %4, %1, q1032
+    pshufw      %5, %2, q1032
+    pshufw      %6, %3, q1032
+    %8          %1, %4
+    %8          %2, %5
+    %8          %3, %6
+%endmacro
+
+%macro LOAD_4x8P 1 ; dx
+    pxor        m7, m7
+    movd        m6, [r0+%1+7*FENC_STRIDE]
+    movd        m0, [r0+%1+0*FENC_STRIDE]
+    movd        m1, [r0+%1+1*FENC_STRIDE]
+    movd        m2, [r0+%1+2*FENC_STRIDE]
+    movd        m3, [r0+%1+3*FENC_STRIDE]
+    movd        m4, [r0+%1+4*FENC_STRIDE]
+    movd        m5, [r0+%1+5*FENC_STRIDE]
+    punpcklbw   m6, m7
+    punpcklbw   m0, m7
+    punpcklbw   m1, m7
+    movq   [spill], m6
+    punpcklbw   m2, m7
+    punpcklbw   m3, m7
+    movd        m6, [r0+%1+6*FENC_STRIDE]
+    punpcklbw   m4, m7
+    punpcklbw   m5, m7
+    punpcklbw   m6, m7
+    movq        m7, [spill]
+%endmacro
+
+%macro HSUMSUB2 4
+    pshufw m4, %1, %3
+    pshufw m5, %2, %3
+    pmullw %1, %4
+    pmullw m5, %4
+    paddw  %1, m4
+    paddw  %2, m5
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void intra_sa8d_x3_8x8( uint8_t *fenc, uint8_t edge[36], int *res )
+;-----------------------------------------------------------------------------
+cglobal intra_sa8d_x3_8x8, 2,3
+    SUB    esp, 0x94
+%define edge  esp+0x70 ; +32
+%define spill esp+0x60 ; +16
+%define trans esp+0    ; +96
+%define sum   esp+0    ; +32
+
+    pxor      m7, m7
+    movq      m0, [r1+7]
+    movq      m2, [r1+16]
+    movq      m1, m0
+    movq      m3, m2
+    punpcklbw m0, m7
+    punpckhbw m1, m7
+    punpcklbw m2, m7
+    punpckhbw m3, m7
+    movq      m6, [pw_ppmmppmm]
+    HSUMSUB2  m0, m2, q1032, m6
+    HSUMSUB2  m1, m3, q1032, m6
+    movq      m6, [pw_pmpmpmpm]
+    HSUMSUB2  m0, m2, q2301, m6
+    HSUMSUB2  m1, m3, q2301, m6
+    movq      m4, m0
+    movq      m5, m2
+    paddw     m0, m1
+    paddw     m2, m3
+    psubw     m4, m1
+    psubw     m3, m5
+    movq [edge+0], m0
+    movq [edge+8], m4
+    movq [edge+16], m2
+    movq [edge+24], m3
+
+    LOAD_4x8P 0
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movq   [spill], m0
+    TRANSPOSE4x4W 4, 5, 6, 7, 0
+    movq   [trans+0x00], m4
+    movq   [trans+0x08], m5
+    movq   [trans+0x10], m6
+    movq   [trans+0x18], m7
+    movq   m0, [spill]
+    TRANSPOSE4x4W 0, 1, 2, 3, 4
+    movq   [trans+0x20], m0
+    movq   [trans+0x28], m1
+    movq   [trans+0x30], m2
+    movq   [trans+0x38], m3
+
+    LOAD_4x8P 4
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movq   [spill], m7
+    TRANSPOSE4x4W 0, 1, 2, 3, 7
+    movq   [trans+0x40], m0
+    movq   [trans+0x48], m1
+    movq   [trans+0x50], m2
+    movq   [trans+0x58], m3
+    movq   m7, [spill]
+    TRANSPOSE4x4W 4, 5, 6, 7, 0
+    movq   m0, [trans+0x00]
+    movq   m1, [trans+0x08]
+    movq   m2, [trans+0x10]
+    movq   m3, [trans+0x18]
+
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movq [spill+0], m0
+    movq [spill+8], m1
+    ABSW2    m2, m3, m2, m3, m0, m1
+    ABSW2    m4, m5, m4, m5, m0, m1
+    paddw    m2, m4
+    paddw    m3, m5
+    ABSW2    m6, m7, m6, m7, m4, m5
+    movq     m0, [spill+0]
+    movq     m1, [spill+8]
+    paddw    m2, m6
+    paddw    m3, m7
+    paddw    m2, m3
+    ABSW     m1, m1, m4
+    paddw    m2, m1 ; 7x4 sum
+    movq     m7, m0
+    movq     m1, [edge+8] ; left bottom
+    psllw    m1, 3
+    psubw    m7, m1
+    ABSW2    m0, m7, m0, m7, m5, m3
+    paddw    m0, m2
+    paddw    m7, m2
+    movq [sum+0], m0 ; dc
+    movq [sum+8], m7 ; left
+
+    movq   m0, [trans+0x20]
+    movq   m1, [trans+0x28]
+    movq   m2, [trans+0x30]
+    movq   m3, [trans+0x38]
+    movq   m4, [trans+0x40]
+    movq   m5, [trans+0x48]
+    movq   m6, [trans+0x50]
+    movq   m7, [trans+0x58]
+
+    HADAMARD8_V 0, 1, 2, 3, 4, 5, 6, 7
+
+    movd   [sum+0x10], m0
+    movd   [sum+0x12], m1
+    movd   [sum+0x14], m2
+    movd   [sum+0x16], m3
+    movd   [sum+0x18], m4
+    movd   [sum+0x1a], m5
+    movd   [sum+0x1c], m6
+    movd   [sum+0x1e], m7
+
+    movq [spill],   m0
+    movq [spill+8], m1
+    ABSW2    m2, m3, m2, m3, m0, m1
+    ABSW2    m4, m5, m4, m5, m0, m1
+    paddw    m2, m4
+    paddw    m3, m5
+    paddw    m2, m3
+    movq     m0, [spill]
+    movq     m1, [spill+8]
+    ABSW2    m6, m7, m6, m7, m4, m5
+    ABSW     m1, m1, m3
+    paddw    m2, m7
+    paddw    m1, m6
+    paddw    m2, m1 ; 7x4 sum
+    movq     m1, m0
+
+    movq     m7, [edge+0]
+    psllw    m7, 3   ; left top
+
+    mov      r2, [edge+0]
+    add      r2, [edge+16]
+    lea      r2, [4*r2+32]
+    and      r2, 0xffc0
+    movd     m6, r2 ; dc
+
+    psubw    m1, m7
+    psubw    m0, m6
+    ABSW2    m0, m1, m0, m1, m5, m6
+    movq     m3, [sum+0] ; dc
+    paddw    m0, m2
+    paddw    m1, m2
+    movq     m2, m0
+    paddw    m0, m3
+    paddw    m1, [sum+8] ; h
+    psrlq    m2, 16
+    paddw    m2, m3
+
+    movq     m3, [edge+16] ; top left
+    movq     m4, [edge+24] ; top right
+    psllw    m3, 3
+    psllw    m4, 3
+    psubw    m3, [sum+16]
+    psubw    m4, [sum+24]
+    ABSW2    m3, m4, m3, m4, m5, m6
+    paddw    m2, m3
+    paddw    m2, m4 ; v
+
+    SUM_MM_X3 m0, m1, m2, m3, m4, m5, m6, pavgw
+    mov      r2, r2m
+    pxor      m7, m7
+    punpckldq m2, m1
+    pavgw     m0, m7
+    pavgw     m2, m7
+    movd  [r2+8], m0 ; dc
+    movq  [r2+0], m2 ; v, h
+    ADD     esp, 0x94
+    RET
+%undef edge
+%undef spill
+%undef trans
+%undef sum
+
+
+
+;-----------------------------------------------------------------------------
+; void pixel_ssim_4x4x2_core( const uint8_t *pix1, intptr_t stride1,
+;                             const uint8_t *pix2, intptr_t stride2, int sums[2][4] )
+;-----------------------------------------------------------------------------
+cglobal pixel_ssim_4x4x2_core, 0,5
+    mov       r1, r1m
+    mov       r3, r3m
+    mov       r4, 4
+    pxor      m0, m0
+.loop:
+    mov       r0, r0m
+    mov       r2, r2m
+    add       r0, r4
+    add       r2, r4
+    pxor      m1, m1
+    pxor      m2, m2
+    pxor      m3, m3
+    pxor      m4, m4
+%rep 4
+    movd      m5, [r0]
+    movd      m6, [r2]
+    punpcklbw m5, m0
+    punpcklbw m6, m0
+    paddw     m1, m5
+    paddw     m2, m6
+    movq      m7, m5
+    pmaddwd   m5, m5
+    pmaddwd   m7, m6
+    pmaddwd   m6, m6
+    paddd     m3, m5
+    paddd     m4, m7
+    paddd     m3, m6
+    add       r0, r1
+    add       r2, r3
+%endrep
+    mov       r0, r4m
+    lea       r0, [r0+r4*4]
+    pshufw    m5, m1, q0032
+    pshufw    m6, m2, q0032
+    paddusw   m1, m5
+    paddusw   m2, m6
+    punpcklwd m1, m2
+    pshufw    m2, m1, q0032
+    pshufw    m5, m3, q0032
+    pshufw    m6, m4, q0032
+    paddusw   m1, m2
+    paddd     m3, m5
+    paddd     m4, m6
+    punpcklwd m1, m0
+    punpckldq m3, m4
+    movq  [r0+0], m1
+    movq  [r0+8], m3
+    sub       r4, 4
+    jge .loop
+    emms
+    RET
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixel-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,12266 @@
+;*****************************************************************************
+;* pixel.asm: x86 pixel metrics
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Holger Lubitz <holger@lubitz.org>
+;*          Laurent Aimar <fenrir@via.ecp.fr>
+;*          Alex Izvorski <aizvorksi@gmail.com>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Oskar Arvidsson <oskar@irock.se>
+;*          Min Chen <chenm003@163.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+hmul_8p:   times 8 db 1
+           times 4 db 1, -1
+           times 8 db 1
+           times 4 db 1, -1
+hmul_4p:   times 4 db 1, 1, 1, 1, 1, -1, 1, -1
+mask_10:   times 4 dw 0, -1
+mask_1100: times 2 dd 0, -1
+hmul_8w:   times 4 dw 1
+           times 2 dw 1, -1
+           times 4 dw 1
+           times 2 dw 1, -1
+
+ALIGN 32
+transd_shuf1: SHUFFLE_MASK_W 0, 8, 2, 10, 4, 12, 6, 14
+transd_shuf2: SHUFFLE_MASK_W 1, 9, 3, 11, 5, 13, 7, 15
+
+sw_f0:     dq 0xfff0, 0
+pd_f0:     times 4 dd 0xffff0000
+
+SECTION .text
+
+cextern pb_0
+cextern pb_1
+cextern pw_1
+cextern pw_8
+cextern pw_16
+cextern pw_32
+cextern pw_00ff
+cextern pw_ppppmmmm
+cextern pw_ppmmppmm
+cextern pw_pmpmpmpm
+cextern pw_pmmpzzzz
+cextern pd_1
+cextern popcnt_table
+cextern pd_2
+cextern hmul_16p
+cextern pb_movemask
+cextern pb_movemask_32
+cextern pw_pixel_max
+
+;=============================================================================
+; SATD
+;=============================================================================
+
+%macro JDUP 2
+%if cpuflag(sse4)
+    ; just use shufps on anything post conroe
+    shufps %1, %2, 0
+%elif cpuflag(ssse3) && notcpuflag(atom)
+    ; join 2x 32 bit and duplicate them
+    ; emulating shufps is faster on conroe
+    punpcklqdq %1, %2
+    movsldup %1, %1
+%else
+    ; doesn't need to dup. sse2 does things by zero extending to words and full h_2d
+    punpckldq %1, %2
+%endif
+%endmacro
+
+%macro HSUMSUB 5
+    pmaddubsw m%2, m%5
+    pmaddubsw m%1, m%5
+    pmaddubsw m%4, m%5
+    pmaddubsw m%3, m%5
+%endmacro
+
+%macro DIFF_UNPACK_SSE2 5
+    punpcklbw m%1, m%5
+    punpcklbw m%2, m%5
+    punpcklbw m%3, m%5
+    punpcklbw m%4, m%5
+    psubw m%1, m%2
+    psubw m%3, m%4
+%endmacro
+
+%macro DIFF_SUMSUB_SSSE3 5
+    HSUMSUB %1, %2, %3, %4, %5
+    psubw m%1, m%2
+    psubw m%3, m%4
+%endmacro
+
+%macro LOAD_DUP_2x4P 4 ; dst, tmp, 2* pointer
+    movd %1, %3
+    movd %2, %4
+    JDUP %1, %2
+%endmacro
+
+%macro LOAD_DUP_4x8P_CONROE 8 ; 4*dst, 4*pointer
+    movddup m%3, %6
+    movddup m%4, %8
+    movddup m%1, %5
+    movddup m%2, %7
+%endmacro
+
+%macro LOAD_DUP_4x8P_PENRYN 8
+    ; penryn and nehalem run punpcklqdq and movddup in different units
+    movh m%3, %6
+    movh m%4, %8
+    punpcklqdq m%3, m%3
+    movddup m%1, %5
+    punpcklqdq m%4, m%4
+    movddup m%2, %7
+%endmacro
+
+%macro LOAD_SUMSUB_8x2P 9
+    LOAD_DUP_4x8P %1, %2, %3, %4, %6, %7, %8, %9
+    DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
+%endmacro
+
+%macro LOAD_SUMSUB_8x4P_SSSE3 7-11 r0, r2, 0, 0
+; 4x dest, 2x tmp, 1x mul, [2* ptr], [increment?]
+    LOAD_SUMSUB_8x2P %1, %2, %5, %6, %7, [%8], [%9], [%8+r1], [%9+r3]
+    LOAD_SUMSUB_8x2P %3, %4, %5, %6, %7, [%8+2*r1], [%9+2*r3], [%8+r4], [%9+r5]
+%if %10
+    lea %8, [%8+4*r1]
+    lea %9, [%9+4*r3]
+%endif
+%endmacro
+
+%macro LOAD_SUMSUB_16P_SSSE3 7 ; 2*dst, 2*tmp, mul, 2*ptr
+    movddup m%1, [%7]
+    movddup m%2, [%7+8]
+    mova m%4, [%6]
+    movddup m%3, m%4
+    punpckhqdq m%4, m%4
+    DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
+%endmacro
+
+%macro LOAD_SUMSUB_16P_SSE2 7 ; 2*dst, 2*tmp, mask, 2*ptr
+    movu  m%4, [%7]
+    mova  m%2, [%6]
+    DEINTB %1, %2, %3, %4, %5
+    psubw m%1, m%3
+    psubw m%2, m%4
+    SUMSUB_BA w, %1, %2, %3
+%endmacro
+
+%macro LOAD_SUMSUB_16x4P 10-13 r0, r2, none
+; 8x dest, 1x tmp, 1x mul, [2* ptr] [2nd tmp]
+    LOAD_SUMSUB_16P %1, %5, %2, %3, %10, %11, %12
+    LOAD_SUMSUB_16P %2, %6, %3, %4, %10, %11+r1, %12+r3
+    LOAD_SUMSUB_16P %3, %7, %4, %9, %10, %11+2*r1, %12+2*r3
+    LOAD_SUMSUB_16P %4, %8, %13, %9, %10, %11+r4, %12+r5
+%endmacro
+
+%macro LOAD_SUMSUB_16x2P_AVX2 9
+; 2*dst, 2*tmp, mul, 4*ptr
+    vbroadcasti128 m%1, [%6]
+    vbroadcasti128 m%3, [%7]
+    vbroadcasti128 m%2, [%8]
+    vbroadcasti128 m%4, [%9]
+    DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
+%endmacro
+
+%macro LOAD_SUMSUB_16x4P_AVX2 7-11 r0, r2, 0, 0
+; 4x dest, 2x tmp, 1x mul, [2* ptr], [increment?]
+    LOAD_SUMSUB_16x2P_AVX2 %1, %2, %5, %6, %7, %8, %9, %8+r1, %9+r3
+    LOAD_SUMSUB_16x2P_AVX2 %3, %4, %5, %6, %7, %8+2*r1, %9+2*r3, %8+r4, %9+r5
+%if %10
+    lea  %8, [%8+4*r1]
+    lea  %9, [%9+4*r3]
+%endif
+%endmacro
+
+%macro LOAD_DUP_4x16P_AVX2 8 ; 4*dst, 4*pointer
+    mova  xm%3, %6
+    mova  xm%4, %8
+    mova  xm%1, %5
+    mova  xm%2, %7
+    vpermq m%3, m%3, q0011
+    vpermq m%4, m%4, q0011
+    vpermq m%1, m%1, q0011
+    vpermq m%2, m%2, q0011
+%endmacro
+
+%macro LOAD_SUMSUB8_16x2P_AVX2 9
+; 2*dst, 2*tmp, mul, 4*ptr
+    LOAD_DUP_4x16P_AVX2 %1, %2, %3, %4, %6, %7, %8, %9
+    DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
+%endmacro
+
+%macro LOAD_SUMSUB8_16x4P_AVX2 7-11 r0, r2, 0, 0
+; 4x dest, 2x tmp, 1x mul, [2* ptr], [increment?]
+    LOAD_SUMSUB8_16x2P_AVX2 %1, %2, %5, %6, %7, [%8], [%9], [%8+r1], [%9+r3]
+    LOAD_SUMSUB8_16x2P_AVX2 %3, %4, %5, %6, %7, [%8+2*r1], [%9+2*r3], [%8+r4], [%9+r5]
+%if %10
+    lea  %8, [%8+4*r1]
+    lea  %9, [%9+4*r3]
+%endif
+%endmacro
+
+; in: r4=3*stride1, r5=3*stride2
+; in: %2 = horizontal offset
+; in: %3 = whether we need to increment pix1 and pix2
+; clobber: m3..m7
+; out: %1 = satd
+%macro SATD_4x4_MMX 3
+    %xdefine %%n n%1
+    %assign offset %2*SIZEOF_PIXEL
+    LOAD_DIFF m4, m3, none, [r0+     offset], [r2+     offset]
+    LOAD_DIFF m5, m3, none, [r0+  r1+offset], [r2+  r3+offset]
+    LOAD_DIFF m6, m3, none, [r0+2*r1+offset], [r2+2*r3+offset]
+    LOAD_DIFF m7, m3, none, [r0+  r4+offset], [r2+  r5+offset]
+%if %3
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+%endif
+    HADAMARD4_2D 4, 5, 6, 7, 3, %%n
+    paddw m4, m6
+;%if HIGH_BIT_DEPTH && (BIT_DEPTH == 12)
+;    pxor m5, m5
+;    punpcklwd m6, m4, m5
+;    punpckhwd m4, m5
+;    paddd m4, m6
+;%endif
+    SWAP %%n, 4
+%endmacro
+
+; in: %1 = horizontal if 0, vertical if 1
+%macro SATD_8x4_SSE 8-9
+%if %1
+    HADAMARD4_2D_SSE %2, %3, %4, %5, %6, amax
+%else
+    HADAMARD4_V %2, %3, %4, %5, %6
+    ; doing the abs first is a slight advantage
+    ABSW2 m%2, m%4, m%2, m%4, m%6, m%7
+    ABSW2 m%3, m%5, m%3, m%5, m%6, m%7
+    HADAMARD 1, max, %2, %4, %6, %7
+%endif
+%ifnidn %9, swap
+  %if (BIT_DEPTH == 12)
+    pxor m%6, m%6
+    punpcklwd m%7, m%2, m%6
+    punpckhwd m%2, m%6
+    paddd m%8, m%7
+    paddd m%8, m%2
+  %else
+    paddw m%8, m%2
+  %endif
+%else
+    SWAP %8, %2
+  %if (BIT_DEPTH == 12)
+    pxor m%6, m%6
+    punpcklwd m%7, m%8, m%6
+    punpckhwd m%8, m%6
+    paddd m%8, m%7
+  %endif
+%endif
+%if %1
+  %if (BIT_DEPTH == 12)
+    pxor m%6, m%6
+    punpcklwd m%7, m%4, m%6
+    punpckhwd m%4, m%6
+    paddd m%8, m%7
+    paddd m%8, m%4
+  %else
+    paddw m%8, m%4
+  %endif
+%else
+    HADAMARD 1, max, %3, %5, %6, %7
+  %if (BIT_DEPTH == 12)
+    pxor m%6, m%6
+    punpcklwd m%7, m%3, m%6
+    punpckhwd m%3, m%6
+    paddd m%8, m%7
+    paddd m%8, m%3
+  %else
+    paddw m%8, m%3
+  %endif
+%endif
+%endmacro
+
+%macro SATD_8x4_1_SSE 10
+%if %1
+    HADAMARD4_2D_SSE %2, %3, %4, %5, %6, amax
+%else
+    HADAMARD4_V %2, %3, %4, %5, %6
+    ; doing the abs first is a slight advantage
+    ABSW2 m%2, m%4, m%2, m%4, m%6, m%7
+    ABSW2 m%3, m%5, m%3, m%5, m%6, m%7
+    HADAMARD 1, max, %2, %4, %6, %7
+%endif
+
+    pxor m%10, m%10
+    punpcklwd m%9, m%2, m%10
+    paddd m%8, m%9
+    punpckhwd m%9, m%2, m%10
+    paddd m%8, m%9
+
+%if %1
+    pxor m%10, m%10
+    punpcklwd m%9, m%4, m%10
+    paddd m%8, m%9
+    punpckhwd m%9, m%4, m%10
+    paddd m%8, m%9
+%else
+    HADAMARD 1, max, %3, %5, %6, %7
+    pxor m%10, m%10
+    punpcklwd m%9, m%3, m%10
+    paddd m%8, m%9
+    punpckhwd m%9, m%3, m%10
+    paddd m%8, m%9
+%endif
+%endmacro
+
+%macro SATD_START_MMX 0
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1] ; 3*stride1
+    lea  r5, [3*r3] ; 3*stride2
+%endmacro
+
+%macro SATD_END_MMX 0
+%if HIGH_BIT_DEPTH
+    HADDUW      m0, m1
+    movd       eax, m0
+%else ; !HIGH_BIT_DEPTH
+    pshufw      m1, m0, q1032
+    paddw       m0, m1
+    pshufw      m1, m0, q2301
+    paddw       m0, m1
+    movd       eax, m0
+    and        eax, 0xffff
+%endif ; HIGH_BIT_DEPTH
+    EMMS
+    RET
+%endmacro
+
+; FIXME avoid the spilling of regs to hold 3*stride.
+; for small blocks on x86_32, modify pixel pointer instead.
+
+;-----------------------------------------------------------------------------
+; int pixel_satd_16x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_MMX mmx2
+cglobal pixel_satd_4x4, 4,6
+    SATD_START_MMX
+    SATD_4x4_MMX m0, 0, 0
+    SATD_END_MMX
+
+%macro SATD_START_SSE2 2-3 0
+    FIX_STRIDES r1, r3
+%if HIGH_BIT_DEPTH && %3
+    pxor    %2, %2
+%elif cpuflag(ssse3) && notcpuflag(atom)
+%if mmsize==32
+    mova    %2, [hmul_16p]
+%else
+    mova    %2, [hmul_8p]
+%endif
+%endif
+    lea     r4, [3*r1]
+    lea     r5, [3*r3]
+    pxor    %1, %1
+%endmacro
+
+%macro SATD_END_SSE2 1-2
+%if HIGH_BIT_DEPTH
+  %if BIT_DEPTH == 12
+    HADDD   %1, xm0
+  %else ; BIT_DEPTH == 12
+    HADDUW  %1, xm0
+  %endif ; BIT_DEPTH == 12
+  %if %0 == 2
+    paddd   %1, %2
+  %endif
+%else
+    HADDW   %1, xm7
+%endif
+    movd   eax, %1
+    RET
+%endmacro
+
+%macro SATD_ACCUM 3
+%if HIGH_BIT_DEPTH
+    HADDUW %1, %2
+    paddd  %3, %1
+    pxor   %1, %1
+%endif
+%endmacro
+
+%macro BACKUP_POINTERS 0
+%if ARCH_X86_64
+%if WIN64
+    PUSH r7
+%endif
+    mov     r6, r0
+    mov     r7, r2
+%endif
+%endmacro
+
+%macro RESTORE_AND_INC_POINTERS 0
+%if ARCH_X86_64
+    lea     r0, [r6+8*SIZEOF_PIXEL]
+    lea     r2, [r7+8*SIZEOF_PIXEL]
+%if WIN64
+    POP r7
+%endif
+%else
+    mov     r0, r0mp
+    mov     r2, r2mp
+    add     r0, 8*SIZEOF_PIXEL
+    add     r2, 8*SIZEOF_PIXEL
+%endif
+%endmacro
+
+%macro SATD_4x8_SSE 3-4
+%if HIGH_BIT_DEPTH
+    movh    m0, [r0+0*r1]
+    movh    m4, [r2+0*r3]
+    movh    m1, [r0+1*r1]
+    movh    m5, [r2+1*r3]
+    movhps  m0, [r0+4*r1]
+    movhps  m4, [r2+4*r3]
+    movh    m2, [r0+2*r1]
+    movh    m6, [r2+2*r3]
+    psubw   m0, m4
+    movh    m3, [r0+r4]
+    movh    m4, [r2+r5]
+    lea     r0, [r0+4*r1]
+    lea     r2, [r2+4*r3]
+    movhps  m1, [r0+1*r1]
+    movhps  m5, [r2+1*r3]
+    movhps  m2, [r0+2*r1]
+    movhps  m6, [r2+2*r3]
+    psubw   m1, m5
+    movhps  m3, [r0+r4]
+    movhps  m4, [r2+r5]
+    psubw   m2, m6
+    psubw   m3, m4
+%else ; !HIGH_BIT_DEPTH
+    movd m4, [r2]
+    movd m5, [r2+r3]
+    movd m6, [r2+2*r3]
+    add r2, r5
+    movd m0, [r0]
+    movd m1, [r0+r1]
+    movd m2, [r0+2*r1]
+    add r0, r4
+    movd m3, [r2+r3]
+    JDUP m4, m3
+    movd m3, [r0+r1]
+    JDUP m0, m3
+    movd m3, [r2+2*r3]
+    JDUP m5, m3
+    movd m3, [r0+2*r1]
+    JDUP m1, m3
+%if %1==0 && %2==1
+    mova m3, [hmul_4p]
+    DIFFOP 0, 4, 1, 5, 3
+%else
+    DIFFOP 0, 4, 1, 5, 7
+%endif
+    movd m5, [r2]
+    add r2, r5
+    movd m3, [r0]
+    add r0, r4
+    movd m4, [r2]
+    JDUP m6, m4
+    movd m4, [r0]
+    JDUP m2, m4
+    movd m4, [r2+r3]
+    JDUP m5, m4
+    movd m4, [r0+r1]
+    JDUP m3, m4
+%if %1==0 && %2==1
+    mova m4, [hmul_4p]
+    DIFFOP 2, 6, 3, 5, 4
+%else
+    DIFFOP 2, 6, 3, 5, 7
+%endif
+%endif ; HIGH_BIT_DEPTH
+%if %0 == 4
+    SATD_8x4_1_SSE %1, 0, 1, 2, 3, 4, 5, 7, %3, %4
+%else
+    SATD_8x4_SSE %1, 0, 1, 2, 3, 4, 5, 7, %3
+%endif
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_satd_8x4( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%macro SATDS_SSE2 0
+%define vertical ((notcpuflag(ssse3) || cpuflag(atom)) || HIGH_BIT_DEPTH)
+
+%if cpuflag(ssse3) && (vertical==0 || HIGH_BIT_DEPTH)
+cglobal pixel_satd_4x4, 4, 6, 6
+    SATD_START_MMX
+    mova m4, [hmul_4p]
+    LOAD_DUP_2x4P m2, m5, [r2], [r2+r3]
+    LOAD_DUP_2x4P m3, m5, [r2+2*r3], [r2+r5]
+    LOAD_DUP_2x4P m0, m5, [r0], [r0+r1]
+    LOAD_DUP_2x4P m1, m5, [r0+2*r1], [r0+r4]
+    DIFF_SUMSUB_SSSE3 0, 2, 1, 3, 4
+    HADAMARD 0, sumsub, 0, 1, 2, 3
+    HADAMARD 4, sumsub, 0, 1, 2, 3
+    HADAMARD 1, amax, 0, 1, 2, 3
+    HADDW m0, m1
+    movd eax, m0
+    RET
+%endif
+
+cglobal pixel_satd_4x8, 4, 6, 8
+    SATD_START_MMX
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+%if BIT_DEPTH == 12
+    HADDD m7, m1
+%else
+    HADDUW m7, m1
+%endif
+    movd eax, m7
+    RET
+
+cglobal pixel_satd_4x16, 4, 6, 8
+    SATD_START_MMX
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0+r1*2*SIZEOF_PIXEL]
+    lea r2, [r2+r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+%if BIT_DEPTH == 12
+    HADDD m7, m1
+%else
+    HADDUW m7, m1
+%endif
+    movd eax, m7
+    RET
+
+cglobal pixel_satd_8x8_internal
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_SSE vertical, 0, 1, 2, 3, 4, 5, 6
+%%pixel_satd_8x4_internal:
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_SSE vertical, 0, 1, 2, 3, 4, 5, 6
+    ret
+
+cglobal pixel_satd_8x8_internal2
+%if WIN64
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_1_SSE vertical, 0, 1, 2, 3, 4, 5, 6, 12, 13
+%%pixel_satd_8x4_internal2:
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_1_SSE vertical, 0, 1, 2, 3, 4, 5, 6, 12, 13
+%else
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_1_SSE vertical, 0, 1, 2, 3, 4, 5, 6, 4, 5
+%%pixel_satd_8x4_internal2:
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1, 0
+    SATD_8x4_1_SSE vertical, 0, 1, 2, 3, 4, 5, 6, 4, 5
+%endif
+    ret
+
+; 16x8 regresses on phenom win64, 16x16 is almost the same (too many spilled registers)
+; These aren't any faster on AVX systems with fast movddup (Bulldozer, Sandy Bridge)
+%if HIGH_BIT_DEPTH == 0 && (WIN64 || UNIX64) && notcpuflag(avx)
+
+cglobal pixel_satd_16x4_internal2
+    LOAD_SUMSUB_16x4P 0, 1, 2, 3, 4, 8, 5, 9, 6, 7, r0, r2, 11
+    lea  r2, [r2+4*r3]
+    lea  r0, [r0+4*r1]
+    SATD_8x4_1_SSE 0, 0, 1, 2, 3, 6, 11, 10, 12, 13
+    SATD_8x4_1_SSE 0, 4, 8, 5, 9, 6, 3, 10, 12, 13
+    ret
+
+cglobal pixel_satd_16x4, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_16x8, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    jmp %%pixel_satd_16x8_internal
+
+cglobal pixel_satd_16x12, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    jmp %%pixel_satd_16x8_internal
+
+cglobal pixel_satd_16x32, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    jmp %%pixel_satd_16x8_internal
+
+cglobal pixel_satd_16x64, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    jmp %%pixel_satd_16x8_internal
+
+cglobal pixel_satd_16x16, 4,6,14
+    SATD_START_SSE2 m10, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+%%pixel_satd_16x8_internal:
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_32x8, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_32x16, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd    eax, m10
+    RET
+
+cglobal pixel_satd_32x24, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_32x32, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_32x64, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_48x64, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 32]
+    lea r2, [r7 + 32]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_64x16, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 32]
+    lea r2, [r7 + 32]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 48]
+    lea r2, [r7 + 48]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_64x32, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 32]
+    lea r2, [r7 + 32]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 48]
+    lea r2, [r7 + 48]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_64x48, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 32]
+    lea r2, [r7 + 32]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 48]
+    lea r2, [r7 + 48]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+cglobal pixel_satd_64x64, 4,8,14    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
+    mov r6, r0
+    mov r7, r2
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 32]
+    lea r2, [r7 + 32]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    lea r0, [r6 + 48]
+    lea r2, [r7 + 48]
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+    call pixel_satd_16x4_internal2
+
+    HADDD m10, m0
+    movd eax, m10
+    RET
+
+%else
+%if WIN64
+cglobal pixel_satd_16x24, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd   eax, m6
+    RET
+%else
+cglobal pixel_satd_16x24, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+%if WIN64
+cglobal pixel_satd_32x48, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x48, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_24x64, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_24x64, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_8x64, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_8x64, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_8x12, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call %%pixel_satd_8x4_internal2
+    pxor    m7, m7
+    movhlps m7, m6
+    paddd   m6, m7
+    pshufd  m7, m6, 1
+    paddd   m6, m7
+    movd   eax, m6
+    RET
+%else
+cglobal pixel_satd_8x12, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call %%pixel_satd_8x4_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if HIGH_BIT_DEPTH
+%if WIN64
+cglobal pixel_satd_12x32, 4,8,8   ;if WIN64 && cpuflag(avx)
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    lea r2, [r7 + 4*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    HADDD m7, m0
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_12x32, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 4*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    HADDD m7, m0
+    movd eax, m7
+    RET
+%endif
+%else ;HIGH_BIT_DEPTH
+%if WIN64
+cglobal pixel_satd_12x32, 4,8,8   ;if WIN64 && cpuflag(avx)
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    lea r2, [r7 + 4*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_12x32, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 4*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%endif
+%endif
+
+%if HIGH_BIT_DEPTH
+%if WIN64
+cglobal pixel_satd_4x32, 4,8,8   ;if WIN64 && cpuflag(avx)
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    HADDD m7, m0
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_4x32, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    pxor    m1, m1
+    movhlps m1, m7
+    paddd   m7, m1
+    pshufd  m1, m7, 1
+    paddd   m7, m1
+    movd   eax, m7
+    RET
+%endif
+%else
+%if WIN64
+cglobal pixel_satd_4x32, 4,8,8   ;if WIN64 && cpuflag(avx)
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_4x32, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%endif
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x8, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x8, 4,7,8,0-gprsize    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x16, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x16, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x24, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x24, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x32, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x32, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x64, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_32x64, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_48x64, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    lea r2, [r7 + 32*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    lea r2, [r7 + 40*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_48x64, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,32*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,40*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+
+%if WIN64
+cglobal pixel_satd_64x16, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    lea r2, [r7 + 32*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    lea r2, [r7 + 40*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    lea r2, [r7 + 48*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    lea r2, [r7 + 56*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_64x16, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,32*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,40*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,48*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2,56*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_64x32, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    lea r2, [r7 + 32*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    lea r2, [r7 + 40*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    lea r2, [r7 + 48*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    lea r2, [r7 + 56*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_64x32, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 32*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 40*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 48*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 56*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_64x48, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    lea r2, [r7 + 32*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    lea r2, [r7 + 40*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    lea r2, [r7 + 48*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    lea r2, [r7 + 56*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_64x48, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 32*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 40*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 48*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 56*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_64x64, 4,8,14    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    lea r2, [r7 + 24*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    lea r2, [r7 + 32*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    lea r2, [r7 + 40*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    lea r2, [r7 + 48*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    lea r2, [r7 + 56*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_64x64, 4,7,8,0-gprsize   ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 24*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 24*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 32*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 32*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 40*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 40*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 48*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 48*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 56*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 56*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if WIN64
+cglobal pixel_satd_16x4, 4,6,14
+%else
+cglobal pixel_satd_16x4, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7
+    BACKUP_POINTERS
+    call %%pixel_satd_8x4_internal2
+    RESTORE_AND_INC_POINTERS
+    call %%pixel_satd_8x4_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_16x8, 4,6,14
+%else
+cglobal pixel_satd_16x8, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7
+    BACKUP_POINTERS
+    call pixel_satd_8x8_internal2
+    RESTORE_AND_INC_POINTERS
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_16x12, 4,6,14
+%else
+cglobal pixel_satd_16x12, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7, 1
+    BACKUP_POINTERS
+    call pixel_satd_8x8_internal2
+    call %%pixel_satd_8x4_internal2
+    RESTORE_AND_INC_POINTERS
+    call pixel_satd_8x8_internal2
+    call %%pixel_satd_8x4_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_16x16, 4,6,14
+%else
+cglobal pixel_satd_16x16, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7, 1
+    BACKUP_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    RESTORE_AND_INC_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_16x32, 4,6,14
+%else
+cglobal pixel_satd_16x32, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7, 1
+    BACKUP_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    RESTORE_AND_INC_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_16x64, 4,6,14
+%else
+cglobal pixel_satd_16x64, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7, 1
+    BACKUP_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    RESTORE_AND_INC_POINTERS
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif
+
+%if HIGH_BIT_DEPTH
+%if WIN64
+cglobal pixel_satd_12x16, 4,8,8
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    lea r2, [r7 + 4*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    HADDD m7, m0
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_12x16, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+    pxor m7, m7
+    SATD_4x8_SSE vertical, 0, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 4*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, 4, 5
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, 4, 5
+    HADDD m7, m0
+    movd eax, m7
+    RET
+%endif
+%else    ;HIGH_BIT_DEPTH
+%if WIN64
+cglobal pixel_satd_12x16, 4,8,8
+    SATD_START_MMX
+    mov r6, r0
+    mov r7, r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    lea r2, [r7 + 4*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%else
+cglobal pixel_satd_12x16, 4,7,8,0-gprsize
+    SATD_START_MMX
+    mov r6, r0
+    mov [rsp], r2
+%if vertical==0
+    mova m7, [hmul_4p]
+%endif
+    SATD_4x8_SSE vertical, 0, swap
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 4*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 4*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    SATD_4x8_SSE vertical, 1, add
+    lea r0, [r0 + r1*2*SIZEOF_PIXEL]
+    lea r2, [r2 + r3*2*SIZEOF_PIXEL]
+    SATD_4x8_SSE vertical, 1, add
+    HADDW m7, m1
+    movd eax, m7
+    RET
+%endif
+%endif
+
+%if WIN64
+cglobal pixel_satd_24x32, 4,8,14
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    lea r2, [r7 + 8*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    lea r2, [r7 + 16*SIZEOF_PIXEL]
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%else
+cglobal pixel_satd_24x32, 4,7,8,0-gprsize
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov [rsp], r2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 8*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 8*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    lea r0, [r6 + 16*SIZEOF_PIXEL]
+    mov r2, [rsp]
+    add r2, 16*SIZEOF_PIXEL
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+%endif    ;WIN64
+
+%if WIN64
+cglobal pixel_satd_8x32, 4,6,14
+%else
+cglobal pixel_satd_8x32, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+%if WIN64
+cglobal pixel_satd_8x16, 4,6,14
+%else
+cglobal pixel_satd_8x16, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7
+    call pixel_satd_8x8_internal2
+    call pixel_satd_8x8_internal2
+    HADDD m6, m0
+    movd eax, m6
+    RET
+
+cglobal pixel_satd_8x8, 4,6,8
+    SATD_START_SSE2 m6, m7
+    call pixel_satd_8x8_internal
+    SATD_END_SSE2 m6
+
+%if WIN64
+cglobal pixel_satd_8x4, 4,6,14
+%else
+cglobal pixel_satd_8x4, 4,6,8
+%endif
+    SATD_START_SSE2 m6, m7
+    call %%pixel_satd_8x4_internal2
+    SATD_END_SSE2 m6
+%endmacro ; SATDS_SSE2
+
+
+;=============================================================================
+; SA8D
+;=============================================================================
+
+%macro SA8D_INTER 0
+%if ARCH_X86_64
+    %define lh m10
+    %define rh m0
+%else
+    %define lh m0
+    %define rh [esp+48]
+%endif
+%if HIGH_BIT_DEPTH
+    HADDUW  m0, m1
+    paddd   lh, rh
+%else
+    paddusw lh, rh
+%endif ; HIGH_BIT_DEPTH
+%endmacro
+
+%macro SA8D_8x8 0
+    call pixel_sa8d_8x8_internal
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%else
+    HADDW m0, m1
+%endif ; HIGH_BIT_DEPTH
+    paddd  m0, [pd_1]
+    psrld  m0, 1
+    paddd  m12, m0
+%endmacro
+
+%macro SA8D_16x16 0
+    call pixel_sa8d_8x8_internal ; pix[0]
+    add  r2, 8*SIZEOF_PIXEL
+    add  r0, 8*SIZEOF_PIXEL
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova m10, m0
+    call pixel_sa8d_8x8_internal ; pix[8]
+    lea  r2, [r2+8*r3]
+    lea  r0, [r0+8*r1]
+    SA8D_INTER
+    call pixel_sa8d_8x8_internal ; pix[8*stride+8]
+    sub  r2, 8*SIZEOF_PIXEL
+    sub  r0, 8*SIZEOF_PIXEL
+    SA8D_INTER
+    call pixel_sa8d_8x8_internal ; pix[8*stride]
+    SA8D_INTER
+    SWAP 0, 10
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    paddd  m0, [pd_1]
+    psrld  m0, 1
+    paddd  m12, m0
+%endmacro
+
+%macro AVG_16x16 0
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+%endmacro
+
+%macro SA8D 0
+; sse2 doesn't seem to like the horizontal way of doing things
+%define vertical ((notcpuflag(ssse3) || cpuflag(atom)) || HIGH_BIT_DEPTH)
+
+%if ARCH_X86_64
+;-----------------------------------------------------------------------------
+; int pixel_sa8d_8x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sa8d_8x8_internal
+    lea  r6, [r0+4*r1]
+    lea  r7, [r2+4*r3]
+    LOAD_SUMSUB_8x4P 0, 1, 2, 8, 5, 6, 7, r0, r2
+    LOAD_SUMSUB_8x4P 4, 5, 3, 9, 11, 6, 7, r6, r7
+%if vertical
+    HADAMARD8_2D 0, 1, 2, 8, 4, 5, 3, 9, 6, amax
+%else ; non-sse2
+    HADAMARD8_2D_HMUL 0, 1, 2, 8, 4, 5, 3, 9, 6, 11
+%endif
+    paddw m0, m1
+    paddw m0, m2
+    paddw m0, m8
+    SAVE_MM_PERMUTATION
+    ret
+
+cglobal pixel_sa8d_8x8, 4,8,12
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    call pixel_sa8d_8x8_internal
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%else
+    HADDW m0, m1
+%endif ; HIGH_BIT_DEPTH
+    movd eax, m0
+    add eax, 1
+    shr eax, 1
+    RET
+
+cglobal pixel_sa8d_16x16, 4,8,12
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    call pixel_sa8d_8x8_internal ; pix[0]
+    add  r2, 8*SIZEOF_PIXEL
+    add  r0, 8*SIZEOF_PIXEL
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova m10, m0
+    call pixel_sa8d_8x8_internal ; pix[8]
+    lea  r2, [r2+8*r3]
+    lea  r0, [r0+8*r1]
+    SA8D_INTER
+    call pixel_sa8d_8x8_internal ; pix[8*stride+8]
+    sub  r2, 8*SIZEOF_PIXEL
+    sub  r0, 8*SIZEOF_PIXEL
+    SA8D_INTER
+    call pixel_sa8d_8x8_internal ; pix[8*stride]
+    SA8D_INTER
+    SWAP 0, 10
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd eax, m0
+    add  eax, 1
+    shr  eax, 1
+    RET
+
+cglobal pixel_sa8d_8x16, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    lea r0, [r0 + 8*r1]
+    lea r2, [r2 + 8*r3]
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_8x32, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_16x8, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_16x32, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_16x64, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_24x32, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_32x8, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_32x16, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_32x24, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    sub r0, 8*SIZEOF_PIXEL
+    sub r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    lea r0, [r0 + r1*8]
+    lea r2, [r2 + r3*8]
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    add r0, 8*SIZEOF_PIXEL
+    add r2, 8*SIZEOF_PIXEL
+    SA8D_8x8
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_32x32, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_32x64, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_48x64, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_64x16, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_64x32, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_64x48, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+cglobal pixel_sa8d_64x64, 4,8,13
+    FIX_STRIDES r1, r3
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    pxor m12, m12
+%if vertical == 0
+    mova m7, [hmul_8p]
+%endif
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    add  r2, 16*SIZEOF_PIXEL
+    add  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea r0, [r0+8*r1]
+    lea r2, [r2+8*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    lea  r4, [8*r1]
+    lea  r5, [8*r3]
+    sub  r0, r4
+    sub  r2, r5
+    sub  r2, 16*SIZEOF_PIXEL
+    sub  r0, 16*SIZEOF_PIXEL
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    SA8D_16x16
+    movd eax, m12
+    RET
+
+%else ; ARCH_X86_32
+%if mmsize == 16
+cglobal pixel_sa8d_8x8_internal
+    %define spill0 [esp+4]
+    %define spill1 [esp+20]
+    %define spill2 [esp+36]
+%if vertical
+    LOAD_DIFF_8x4P 0, 1, 2, 3, 4, 5, 6, r0, r2, 1
+    HADAMARD4_2D 0, 1, 2, 3, 4
+    movdqa spill0, m3
+    LOAD_DIFF_8x4P 4, 5, 6, 7, 3, 3, 2, r0, r2, 1
+    HADAMARD4_2D 4, 5, 6, 7, 3
+    HADAMARD2_2D 0, 4, 1, 5, 3, qdq, amax
+    movdqa m3, spill0
+    paddw m0, m1
+    HADAMARD2_2D 2, 6, 3, 7, 5, qdq, amax
+%else ; mmsize == 8
+    mova m7, [hmul_8p]
+    LOAD_SUMSUB_8x4P 0, 1, 2, 3, 5, 6, 7, r0, r2, 1
+    ; could do first HADAMARD4_V here to save spilling later
+    ; surprisingly, not a win on conroe or even p4
+    mova spill0, m2
+    mova spill1, m3
+    mova spill2, m1
+    SWAP 1, 7
+    LOAD_SUMSUB_8x4P 4, 5, 6, 7, 2, 3, 1, r0, r2, 1
+    HADAMARD4_V 4, 5, 6, 7, 3
+    mova m1, spill2
+    mova m2, spill0
+    mova m3, spill1
+    mova spill0, m6
+    mova spill1, m7
+    HADAMARD4_V 0, 1, 2, 3, 7
+    SUMSUB_BADC w, 0, 4, 1, 5, 7
+    HADAMARD 2, sumsub, 0, 4, 7, 6
+    HADAMARD 2, sumsub, 1, 5, 7, 6
+    HADAMARD 1, amax, 0, 4, 7, 6
+    HADAMARD 1, amax, 1, 5, 7, 6
+    mova m6, spill0
+    mova m7, spill1
+    paddw m0, m1
+    SUMSUB_BADC w, 2, 6, 3, 7, 4
+    HADAMARD 2, sumsub, 2, 6, 4, 5
+    HADAMARD 2, sumsub, 3, 7, 4, 5
+    HADAMARD 1, amax, 2, 6, 4, 5
+    HADAMARD 1, amax, 3, 7, 4, 5
+%endif ; sse2/non-sse2
+    paddw m0, m2
+    paddw m0, m3
+    SAVE_MM_PERMUTATION
+    ret
+%endif ; ifndef mmx2
+
+cglobal pixel_sa8d_8x8_internal2
+    %define spill0 [esp+4]
+    LOAD_DIFF_8x4P 0, 1, 2, 3, 4, 5, 6, r0, r2, 1
+    HADAMARD4_2D 0, 1, 2, 3, 4
+    movdqa spill0, m3
+    LOAD_DIFF_8x4P 4, 5, 6, 7, 3, 3, 2, r0, r2, 1
+    HADAMARD4_2D 4, 5, 6, 7, 3
+    HADAMARD2_2D 0, 4, 1, 5, 3, qdq, amax
+    movdqa m3, spill0
+    paddw m0, m1
+    HADAMARD2_2D 2, 6, 3, 7, 5, qdq, amax
+    paddw m0, m2
+    paddw m0, m3
+    SAVE_MM_PERMUTATION
+    ret
+
+cglobal pixel_sa8d_8x8, 4,7
+    FIX_STRIDES r1, r3
+    mov    r6, esp
+    and   esp, ~15
+    sub   esp, 48
+    lea    r4, [3*r1]
+    lea    r5, [3*r3]
+    call pixel_sa8d_8x8_internal
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%else
+    HADDW  m0, m1
+%endif ; HIGH_BIT_DEPTH
+    movd  eax, m0
+    add   eax, 1
+    shr   eax, 1
+    mov   esp, r6
+    RET
+
+cglobal pixel_sa8d_16x16, 4,7
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+    lea  r4, [3*r1]
+    lea  r5, [3*r3]
+    call pixel_sa8d_8x8_internal
+%if mmsize == 8
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+%endif
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal
+%if mmsize == 8
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+%else
+    SA8D_INTER
+%endif
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal
+%if HIGH_BIT_DEPTH
+    SA8D_INTER
+%else ; !HIGH_BIT_DEPTH
+    paddusw m0, [esp+64-mmsize]
+%if mmsize == 16
+    HADDUW m0, m1
+%else
+    mova m2, [esp+48]
+    pxor m7, m7
+    mova m1, m0
+    mova m3, m2
+    punpcklwd m0, m7
+    punpckhwd m1, m7
+    punpcklwd m2, m7
+    punpckhwd m3, m7
+    paddd m0, m1
+    paddd m2, m3
+    paddd m0, m2
+    HADDD m0, m1
+%endif
+%endif ; HIGH_BIT_DEPTH
+    movd eax, m0
+    add  eax, 1
+    shr  eax, 1
+    mov  esp, r6
+    RET
+
+cglobal pixel_sa8d_8x16, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_8x32, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_16x8, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_16x32, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_16x64, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_24x32, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_32x8, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_32x16, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_32x24, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+    HADDUW m0, m1
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add  r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_32x32, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_32x64, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_48x64, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_64x16, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_64x32, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_64x48, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+
+cglobal pixel_sa8d_64x64, 4,7,8
+    FIX_STRIDES r1, r3
+    mov  r6, esp
+    and  esp, ~15
+    sub  esp, 64
+
+    lea  r4, [r1 + 2*r1]
+    lea  r5, [r3 + 2*r3]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [rsp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    mov dword [esp+36], r4d
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    lea  r0, [r0 + r1*8]
+    lea  r2, [r2 + r3*8]
+    mov  [r6+20], r0
+    mov  [r6+28], r2
+
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 8*SIZEOF_PIXEL
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 16*SIZEOF_PIXEL
+    add  r2, 16*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 24*SIZEOF_PIXEL
+    add  r2, 24*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 32*SIZEOF_PIXEL
+    add  r2, 32*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 40*SIZEOF_PIXEL
+    add  r2, 40*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    AVG_16x16
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 48*SIZEOF_PIXEL
+    add  r2, 48*SIZEOF_PIXEL
+    lea  r4, [r1 + 2*r1]
+    call pixel_sa8d_8x8_internal2
+%if HIGH_BIT_DEPTH
+    HADDUW m0, m1
+%endif
+    mova [esp+48], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+48], m0
+
+    mov  r0, [r6+20]
+    mov  r2, [r6+28]
+    add  r0, 56*SIZEOF_PIXEL
+    add  r2, 56*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+    mova [esp+64-mmsize], m0
+    call pixel_sa8d_8x8_internal2
+    SA8D_INTER
+%if HIGH_BIT_DEPTH == 0
+    HADDUW m0, m1
+%endif
+    movd r4d, m0
+    add  r4d, 1
+    shr  r4d, 1
+    add r4d, dword [esp+36]
+    mov eax, r4d
+    mov esp, r6
+    RET
+%endif ; !ARCH_X86_64
+%endmacro ; SA8D
+
+;=============================================================================
+; INTRA SATD
+;=============================================================================
+%define TRANS TRANS_SSE2
+%define DIFFOP DIFF_UNPACK_SSE2
+%define LOAD_SUMSUB_8x4P LOAD_DIFF_8x4P
+%define LOAD_SUMSUB_16P  LOAD_SUMSUB_16P_SSE2
+%define movdqa movaps ; doesn't hurt pre-nehalem, might as well save size
+%define movdqu movups
+%define punpcklqdq movlhps
+INIT_XMM sse2
+SA8D
+SATDS_SSE2
+
+%if HIGH_BIT_DEPTH == 0
+INIT_XMM ssse3,atom
+SATDS_SSE2
+SA8D
+%endif
+
+%define DIFFOP DIFF_SUMSUB_SSSE3
+%define LOAD_DUP_4x8P LOAD_DUP_4x8P_CONROE
+%if HIGH_BIT_DEPTH == 0
+%define LOAD_SUMSUB_8x4P LOAD_SUMSUB_8x4P_SSSE3
+%define LOAD_SUMSUB_16P  LOAD_SUMSUB_16P_SSSE3
+%endif
+INIT_XMM ssse3
+SATDS_SSE2
+SA8D
+%undef movdqa ; nehalem doesn't like movaps
+%undef movdqu ; movups
+%undef punpcklqdq ; or movlhps
+
+%define TRANS TRANS_SSE4
+%define LOAD_DUP_4x8P LOAD_DUP_4x8P_PENRYN
+INIT_XMM sse4
+SATDS_SSE2
+SA8D
+
+; Sandy/Ivy Bridge and Bulldozer do movddup in the load unit, so
+; it's effectively free.
+%define LOAD_DUP_4x8P LOAD_DUP_4x8P_CONROE
+INIT_XMM avx
+SATDS_SSE2
+SA8D
+
+%define TRANS TRANS_XOP
+INIT_XMM xop
+SATDS_SSE2
+SA8D
+
+
+%if HIGH_BIT_DEPTH == 0
+%define LOAD_SUMSUB_8x4P LOAD_SUMSUB8_16x4P_AVX2
+%define LOAD_DUP_4x8P LOAD_DUP_4x16P_AVX2
+%define TRANS TRANS_SSE4
+
+%macro LOAD_SUMSUB_8x8P_AVX2 7 ; 4*dst, 2*tmp, mul]
+    movq   xm%1, [r0]
+    movq   xm%3, [r2]
+    movq   xm%2, [r0+r1]
+    movq   xm%4, [r2+r3]
+    vinserti128 m%1, m%1, [r0+4*r1], 1
+    vinserti128 m%3, m%3, [r2+4*r3], 1
+    vinserti128 m%2, m%2, [r0+r4], 1
+    vinserti128 m%4, m%4, [r2+r5], 1
+    punpcklqdq m%1, m%1
+    punpcklqdq m%3, m%3
+    punpcklqdq m%2, m%2
+    punpcklqdq m%4, m%4
+    DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %7
+    lea      r0, [r0+2*r1]
+    lea      r2, [r2+2*r3]
+
+    movq   xm%3, [r0]
+    movq   xm%5, [r2]
+    movq   xm%4, [r0+r1]
+    movq   xm%6, [r2+r3]
+    vinserti128 m%3, m%3, [r0+4*r1], 1
+    vinserti128 m%5, m%5, [r2+4*r3], 1
+    vinserti128 m%4, m%4, [r0+r4], 1
+    vinserti128 m%6, m%6, [r2+r5], 1
+    punpcklqdq m%3, m%3
+    punpcklqdq m%5, m%5
+    punpcklqdq m%4, m%4
+    punpcklqdq m%6, m%6
+    DIFF_SUMSUB_SSSE3 %3, %5, %4, %6, %7
+%endmacro
+
+%macro SATD_START_AVX2 2-3 0
+    FIX_STRIDES r1, r3
+%if %3
+    mova    %2, [hmul_8p]
+    lea     r4, [5*r1]
+    lea     r5, [5*r3]
+%else
+    mova    %2, [hmul_16p]
+    lea     r4, [3*r1]
+    lea     r5, [3*r3]
+%endif
+    pxor    %1, %1
+%endmacro
+
+%define TRANS TRANS_SSE4
+INIT_YMM avx2
+cglobal pixel_satd_16x8_internal
+    LOAD_SUMSUB_16x4P_AVX2 0, 1, 2, 3, 4, 5, 7, r0, r2, 1
+    SATD_8x4_SSE 0, 0, 1, 2, 3, 4, 5, 6
+    LOAD_SUMSUB_16x4P_AVX2 0, 1, 2, 3, 4, 5, 7, r0, r2, 0
+    SATD_8x4_SSE 0, 0, 1, 2, 3, 4, 5, 6
+    ret
+
+cglobal pixel_satd_16x16, 4,6,8
+    SATD_START_AVX2 m6, m7
+    call pixel_satd_16x8_internal
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+pixel_satd_16x8_internal:
+    call pixel_satd_16x8_internal
+    vextracti128 xm0, m6, 1
+    paddw        xm0, xm6
+    SATD_END_SSE2 xm0
+    RET
+
+cglobal pixel_satd_16x8, 4,6,8
+    SATD_START_AVX2 m6, m7
+    jmp pixel_satd_16x8_internal
+
+cglobal pixel_satd_8x8_internal
+    LOAD_SUMSUB_8x8P_AVX2 0, 1, 2, 3, 4, 5, 7
+    SATD_8x4_SSE 0, 0, 1, 2, 3, 4, 5, 6
+    ret
+
+cglobal pixel_satd_8x16, 4,6,8
+    SATD_START_AVX2 m6, m7, 1
+    call pixel_satd_8x8_internal
+    lea  r0, [r0+2*r1]
+    lea  r2, [r2+2*r3]
+    lea  r0, [r0+4*r1]
+    lea  r2, [r2+4*r3]
+    call pixel_satd_8x8_internal
+    vextracti128 xm0, m6, 1
+    paddw        xm0, xm6
+    SATD_END_SSE2 xm0
+    RET
+
+cglobal pixel_satd_8x8, 4,6,8
+    SATD_START_AVX2 m6, m7, 1
+    call pixel_satd_8x8_internal
+    vextracti128 xm0, m6, 1
+    paddw        xm0, xm6
+    SATD_END_SSE2 xm0
+    RET
+
+cglobal pixel_sa8d_8x8_internal
+    LOAD_SUMSUB_8x8P_AVX2 0, 1, 2, 3, 4, 5, 7
+    HADAMARD4_V 0, 1, 2, 3, 4
+    HADAMARD 8, sumsub, 0, 1, 4, 5
+    HADAMARD 8, sumsub, 2, 3, 4, 5
+    HADAMARD 2, sumsub, 0, 1, 4, 5
+    HADAMARD 2, sumsub, 2, 3, 4, 5
+    HADAMARD 1, amax, 0, 1, 4, 5
+    HADAMARD 1, amax, 2, 3, 4, 5
+    paddw  m6, m0
+    paddw  m6, m2
+    ret
+
+cglobal pixel_sa8d_8x8, 4,6,8
+    SATD_START_AVX2 m6, m7, 1
+    call pixel_sa8d_8x8_internal
+    vextracti128 xm1, m6, 1
+    paddw xm6, xm1
+    HADDW xm6, xm1
+    movd  eax, xm6
+    add   eax, 1
+    shr   eax, 1
+    RET
+
+cglobal pixel_sa8d_16x16, 4,6,8
+    SATD_START_AVX2 m6, m7, 1
+
+    call pixel_sa8d_8x8_internal ; pix[0]
+
+    sub  r0, r1
+    sub  r0, r1
+    add  r0, 8*SIZEOF_PIXEL
+    sub  r2, r3
+    sub  r2, r3
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal ; pix[8]
+
+    add  r0, r4
+    add  r0, r1
+    add  r2, r5
+    add  r2, r3
+    call pixel_sa8d_8x8_internal ; pix[8*stride+8]
+
+    sub  r0, r1
+    sub  r0, r1
+    sub  r0, 8*SIZEOF_PIXEL
+    sub  r2, r3
+    sub  r2, r3
+    sub  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal ; pix[8*stride]
+
+    ; TODO: analyze Dynamic Range
+    vextracti128 xm0, m6, 1
+    paddusw xm6, xm0
+    HADDUW xm6, xm0
+    movd  eax, xm6
+    add   eax, 1
+    shr   eax, 1
+    RET
+
+cglobal pixel_sa8d_16x16_internal
+    call pixel_sa8d_8x8_internal ; pix[0]
+
+    sub  r0, r1
+    sub  r0, r1
+    add  r0, 8*SIZEOF_PIXEL
+    sub  r2, r3
+    sub  r2, r3
+    add  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal ; pix[8]
+
+    add  r0, r4
+    add  r0, r1
+    add  r2, r5
+    add  r2, r3
+    call pixel_sa8d_8x8_internal ; pix[8*stride+8]
+
+    sub  r0, r1
+    sub  r0, r1
+    sub  r0, 8*SIZEOF_PIXEL
+    sub  r2, r3
+    sub  r2, r3
+    sub  r2, 8*SIZEOF_PIXEL
+    call pixel_sa8d_8x8_internal ; pix[8*stride]
+
+    ; TODO: analyze Dynamic Range
+    vextracti128 xm0, m6, 1
+    paddusw xm6, xm0
+    HADDUW xm6, xm0
+    movd  eax, xm6
+    add   eax, 1
+    shr   eax, 1
+    ret
+
+%if ARCH_X86_64
+cglobal pixel_sa8d_32x32, 4,8,8
+    ; TODO: R6 is RAX on x64 platform, so we use it directly
+
+    SATD_START_AVX2 m6, m7, 1
+    xor     r7d, r7d
+
+    call    pixel_sa8d_16x16_internal   ; [0]
+    pxor    m6, m6
+    add     r7d, eax
+
+    add     r0, r4
+    add     r0, r1
+    add     r2, r5
+    add     r2, r3
+    call    pixel_sa8d_16x16_internal   ; [2]
+    pxor    m6, m6
+    add     r7d, eax
+
+    lea     eax, [r4 * 5 - 16]
+    sub     r0, rax
+    sub     r0, r1
+    lea     eax, [r5 * 5 - 16]
+    sub     r2, rax
+    sub     r2, r3
+    call    pixel_sa8d_16x16_internal   ; [1]
+    pxor    m6, m6
+    add     r7d, eax
+
+    add     r0, r4
+    add     r0, r1
+    add     r2, r5
+    add     r2, r3
+    call    pixel_sa8d_16x16_internal   ; [3]
+    add     eax, r7d
+    RET
+%endif ; ARCH_X86_64=1
+%endif ; HIGH_BIT_DEPTH
+
+; Input 10bit, Output 8bit
+;------------------------------------------------------------------------------------------------------------------------
+;void planecopy_sc(uint16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+;------------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal downShift_16, 7,7,3
+    movd        m0, r6d        ; m0 = shift
+    add         r1, r1
+    dec         r5d
+.loopH:
+    xor         r6, r6
+.loopW:
+    movu        m1, [r0 + r6 * 2]
+    movu        m2, [r0 + r6 * 2 + 16]
+    psrlw       m1, m0
+    psrlw       m2, m0
+    packuswb    m1, m2
+    movu        [r2 + r6], m1
+
+    add         r6, 16
+    cmp         r6d, r4d
+    jl          .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jnz         .loopH
+
+;processing last row of every frame [To handle width which not a multiple of 16]
+
+.loop16:
+    movu        m1, [r0]
+    movu        m2, [r0 + 16]
+    psrlw       m1, m0
+    psrlw       m2, m0
+    packuswb    m1, m2
+    movu        [r2], m1
+
+    add         r0, 2 * mmsize
+    add         r2, mmsize
+    sub         r4d, 16
+    jz          .end
+    cmp         r4d, 15
+    jg          .loop16
+
+    cmp         r4d, 8
+    jl          .process4
+    movu        m1, [r0]
+    psrlw       m1, m0
+    packuswb    m1, m1
+    movh        [r2], m1
+
+    add         r0, mmsize
+    add         r2, 8
+    sub         r4d, 8
+    jz          .end
+
+.process4:
+    cmp         r4d, 4
+    jl          .process2
+    movh        m1,[r0]
+    psrlw       m1, m0
+    packuswb    m1, m1
+    movd        [r2], m1
+
+    add         r0, 8
+    add         r2, 4
+    sub         r4d, 4
+    jz          .end
+
+.process2:
+    cmp         r4d, 2
+    jl          .process1
+    movd        m1, [r0]
+    psrlw       m1, m0
+    packuswb    m1, m1
+    movd        r6, m1
+    mov         [r2], r6w
+
+    add         r0, 4
+    add         r2, 2
+    sub         r4d, 2
+    jz          .end
+
+.process1:
+    movd        m1, [r0]
+    psrlw       m1, m0
+    packuswb    m1, m1
+    movd        r3, m1
+    mov         [r2], r3b
+.end:
+    RET
+
+; Input 10bit, Output 8bit
+;-------------------------------------------------------------------------------------------------------------------------------------
+;void planecopy_sp(uint16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+;-------------------------------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal downShift_16, 6,7,3
+    movd        xm0, r6m        ; m0 = shift
+    add         r1d, r1d
+    dec         r5d
+.loopH:
+    xor         r6, r6
+.loopW:
+    movu        m1, [r0 + r6 * 2 +  0]
+    movu        m2, [r0 + r6 * 2 + 32]
+    vpsrlw      m1, xm0
+    vpsrlw      m2, xm0
+    packuswb    m1, m2
+    vpermq      m1, m1, 11011000b
+    movu        [r2 + r6], m1
+
+    add         r6d, mmsize
+    cmp         r6d, r4d
+    jl          .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jnz         .loopH
+
+; processing last row of every frame [To handle width which not a multiple of 32]
+    mov         r6d, r4d
+    and         r4d, 31
+    shr         r6d, 5
+
+.loop32:
+    movu        m1, [r0]
+    movu        m2, [r0 + 32]
+    psrlw       m1, xm0
+    psrlw       m2, xm0
+    packuswb    m1, m2
+    vpermq      m1, m1, 11011000b
+    movu        [r2], m1
+
+    add         r0, 2*mmsize
+    add         r2, mmsize
+    dec         r6d
+    jnz         .loop32
+
+    cmp         r4d, 16
+    jl          .process8
+    movu        m1, [r0]
+    psrlw       m1, xm0
+    packuswb    m1, m1
+    vpermq      m1, m1, 10001000b
+    movu        [r2], xm1
+
+    add         r0, mmsize
+    add         r2, 16
+    sub         r4d, 16
+    jz          .end
+
+.process8:
+    cmp         r4d, 8
+    jl          .process4
+    movu        m1, [r0]
+    psrlw       m1, xm0
+    packuswb    m1, m1
+    movq        [r2], xm1
+
+    add         r0, 16
+    add         r2, 8
+    sub         r4d, 8
+    jz          .end
+
+.process4:
+    cmp         r4d, 4
+    jl          .process2
+    movq        xm1,[r0]
+    psrlw       m1, xm0
+    packuswb    m1, m1
+    movd        [r2], xm1
+
+    add         r0, 8
+    add         r2, 4
+    sub         r4d, 4
+    jz          .end
+
+.process2:
+    cmp         r4d, 2
+    jl          .process1
+    movd        xm1, [r0]
+    psrlw       m1, xm0
+    packuswb    m1, m1
+    movd        r6d, xm1
+    mov         [r2], r6w
+
+    add         r0, 4
+    add         r2, 2
+    sub         r4d, 2
+    jz          .end
+
+.process1:
+    movd        xm1, [r0]
+    psrlw       m1, xm0
+    packuswb    m1, m1
+    movd        r3d, xm1
+    mov         [r2], r3b
+.end:
+    RET
+
+; Input 8bit, Output 10bit
+;---------------------------------------------------------------------------------------------------------------------
+;void planecopy_cp(uint8_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift)
+;---------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal upShift_8, 6,7,3
+    movd        xm2, r6m
+    add         r3d, r3d
+    dec         r5d
+
+.loopH:
+    xor         r6, r6
+.loopW:
+    pmovzxbw    m0,[r0 + r6]
+    pmovzxbw    m1,[r0 + r6 + mmsize/2]
+    psllw       m0, m2
+    psllw       m1, m2
+    movu        [r2 + r6 * 2], m0
+    movu        [r2 + r6 * 2 + mmsize], m1
+
+    add         r6d, mmsize
+    cmp         r6d, r4d
+    jl         .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jg         .loopH
+
+    ; processing last row of every frame [To handle width which not a multiple of 16]
+    mov         r1d, (mmsize/2 - 1)
+    and         r1d, r4d
+    sub         r1, mmsize/2
+
+    ; NOTE: Width MUST BE more than or equal to 8
+    shr         r4d, 3          ; log2(mmsize)
+.loopW8:
+    pmovzxbw    m0,[r0]
+    psllw       m0, m2
+    movu        [r2], m0
+    add         r0, mmsize/2
+    add         r2, mmsize
+    dec         r4d
+    jg         .loopW8
+
+    ; Mac OS X can't read beyond array bound, so rollback some bytes
+    pmovzxbw    m0,[r0 + r1]
+    psllw       m0, m2
+    movu        [r2 + r1 * 2], m0
+    RET
+
+
+;---------------------------------------------------------------------------------------------------------------------
+;void planecopy_cp(uint8_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift)
+;---------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal upShift_8, 6,7,3
+    movd        xm2, r6m
+    add         r3d, r3d
+    dec         r5d
+
+.loopH:
+    xor         r6, r6
+.loopW:
+    pmovzxbw    m0,[r0 + r6]
+    pmovzxbw    m1,[r0 + r6 + mmsize/2]
+    psllw       m0, xm2
+    psllw       m1, xm2
+    movu        [r2 + r6 * 2], m0
+    movu        [r2 + r6 * 2 + mmsize], m1
+
+    add         r6d, mmsize
+    cmp         r6d, r4d
+    jl         .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jg         .loopH
+
+    ; processing last row of every frame [To handle width which not a multiple of 32]
+    mov         r1d, (mmsize/2 - 1)
+    and         r1d, r4d
+    sub         r1, mmsize/2
+
+    ; NOTE: Width MUST BE more than or equal to 16
+    shr         r4d, 4          ; log2(mmsize)
+.loopW16:
+    pmovzxbw    m0,[r0]
+    psllw       m0, xm2
+    movu        [r2], m0
+    add         r0, mmsize/2
+    add         r2, mmsize
+    dec         r4d
+    jg         .loopW16
+
+    ; Mac OS X can't read beyond array bound, so rollback some bytes
+    pmovzxbw    m0,[r0 + r1]
+    psllw       m0, xm2
+    movu        [r2 + r1 * 2], m0
+    RET
+%endif
+
+%macro ABSD2 6 ; dst1, dst2, src1, src2, tmp, tmp
+%if cpuflag(ssse3)
+    pabsd   %1, %3
+    pabsd   %2, %4
+%elifidn %1, %3
+    pxor    %5, %5
+    pxor    %6, %6
+    psubd   %5, %1
+    psubd   %6, %2
+    pmaxsd  %1, %5
+    pmaxsd  %2, %6
+%else
+    pxor    %1, %1
+    pxor    %2, %2
+    psubd   %1, %3
+    psubd   %2, %4
+    pmaxsd  %1, %3
+    pmaxsd  %2, %4
+%endif
+%endmacro
+
+
+; Input 10bit, Output 12bit
+;------------------------------------------------------------------------------------------------------------------------
+;void planecopy_sp_shl(uint16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+;------------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal upShift_16, 6,7,4
+    movd        m0, r6m        ; m0 = shift
+    mova        m3, [pw_pixel_max]
+    FIX_STRIDES r1d, r3d
+    dec         r5d
+.loopH:
+    xor         r6d, r6d
+.loopW:
+    movu        m1, [r0 + r6 * SIZEOF_PIXEL]
+    movu        m2, [r0 + r6 * SIZEOF_PIXEL + mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    ; TODO: if input always valid, we can remove below 2 instructions.
+    pand        m1, m3
+    pand        m2, m3
+    movu        [r2 + r6 * SIZEOF_PIXEL], m1
+    movu        [r2 + r6 * SIZEOF_PIXEL + mmsize], m2
+
+    add         r6, mmsize * 2 / SIZEOF_PIXEL
+    cmp         r6d, r4d
+    jl         .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jnz        .loopH
+
+;processing last row of every frame [To handle width which not a multiple of 16]
+
+.loop16:
+    movu        m1, [r0]
+    movu        m2, [r0 + mmsize]
+    psllw       m1, m0
+    psllw       m2, m0
+    pand        m1, m3
+    pand        m2, m3
+    movu        [r2], m1
+    movu        [r2 + mmsize], m2
+
+    add         r0, 2 * mmsize
+    add         r2, 2 * mmsize
+    sub         r4d, 16
+    jz         .end
+    jg         .loop16
+
+    cmp         r4d, 8
+    jl         .process4
+    movu        m1, [r0]
+    psrlw       m1, m0
+    pand        m1, m3
+    movu        [r2], m1
+
+    add         r0, mmsize
+    add         r2, mmsize
+    sub         r4d, 8
+    jz          .end
+
+.process4:
+    cmp         r4d, 4
+    jl         .process2
+    movh        m1,[r0]
+    psllw       m1, m0
+    pand        m1, m3
+    movh        [r2], m1
+
+    add         r0, 8
+    add         r2, 8
+    sub         r4d, 4
+    jz         .end
+
+.process2:
+    cmp         r4d, 2
+    jl         .process1
+    movd        m1, [r0]
+    psllw       m1, m0
+    pand        m1, m3
+    movd        [r2], m1
+
+    add         r0, 4
+    add         r2, 4
+    sub         r4d, 2
+    jz         .end
+
+.process1:
+    movd        m1, [r0]
+    psllw       m1, m0
+    pand        m1, m3
+    movd        r3, m1
+    mov         [r2], r3w
+.end:
+    RET
+
+; Input 10bit, Output 12bit
+;-------------------------------------------------------------------------------------------------------------------------------------
+;void planecopy_sp_shl(uint16_t *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+;-------------------------------------------------------------------------------------------------------------------------------------
+; TODO: NO TEST CODE!
+INIT_YMM avx2
+cglobal upShift_16, 6,7,4
+    movd        xm0, r6m        ; m0 = shift
+    vbroadcasti128 m3, [pw_pixel_max]
+    FIX_STRIDES r1d, r3d
+    dec         r5d
+.loopH:
+    xor         r6d, r6d
+.loopW:
+    movu        m1, [r0 + r6 * SIZEOF_PIXEL]
+    movu        m2, [r0 + r6 * SIZEOF_PIXEL + mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    pand        m1, m3
+    pand        m2, m3
+    movu        [r2 + r6 * SIZEOF_PIXEL], m1
+    movu        [r2 + r6 * SIZEOF_PIXEL + mmsize], m2
+
+    add         r6, mmsize * 2 / SIZEOF_PIXEL
+    cmp         r6d, r4d
+    jl         .loopW
+
+    ; move to next row
+    add         r0, r1
+    add         r2, r3
+    dec         r5d
+    jnz        .loopH
+
+; processing last row of every frame [To handle width which not a multiple of 32]
+    mov         r6d, r4d
+    and         r4d, 31
+    shr         r6d, 5
+
+.loop32:
+    movu        m1, [r0]
+    movu        m2, [r0 + mmsize]
+    psllw       m1, xm0
+    psllw       m2, xm0
+    pand        m1, m3
+    pand        m2, m3
+    movu        [r2], m1
+    movu        [r2 + mmsize], m2
+
+    add         r0, 2*mmsize
+    add         r2, 2*mmsize
+    dec         r6d
+    jnz        .loop32
+
+    cmp         r4d, 16
+    jl         .process8
+    movu        m1, [r0]
+    psllw       m1, xm0
+    pand        m1, m3
+    movu        [r2], m1
+
+    add         r0, mmsize
+    add         r2, mmsize
+    sub         r4d, 16
+    jz         .end
+
+.process8:
+    cmp         r4d, 8
+    jl         .process4
+    movu        xm1, [r0]
+    psllw       xm1, xm0
+    pand        xm1, xm3
+    movu        [r2], xm1
+
+    add         r0, 16
+    add         r2, 16
+    sub         r4d, 8
+    jz         .end
+
+.process4:
+    cmp         r4d, 4
+    jl          .process2
+    movq        xm1,[r0]
+    psllw       xm1, xm0
+    pand        xm1, xm3
+    movq        [r2], xm1
+
+    add         r0, 8
+    add         r2, 8
+    sub         r4d, 4
+    jz         .end
+
+.process2:
+    cmp         r4d, 2
+    jl         .process1
+    movd        xm1, [r0]
+    psllw       xm1, xm0
+    pand        xm1, xm3
+    movd        [r2], xm1
+
+    add         r0, 4
+    add         r2, 4
+    sub         r4d, 2
+    jz         .end
+
+.process1:
+    movd        xm1, [r0]
+    psllw       xm1, xm0
+    pand        xm1, xm3
+    movd        r3d, xm1
+    mov         [r2], r3w
+.end:
+    RET
+
+
+;---------------------------------------------------------------------------------------------------------------------
+;int psyCost_pp(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride)
+;---------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal psyCost_pp_4x4, 4, 5, 8
+
+%if HIGH_BIT_DEPTH
+    FIX_STRIDES r1, r3
+    lea             r4, [3 * r1]
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    paddd           m5, m0, m1
+    paddd           m5, m2
+    paddd           m5, m3
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m5, 2
+
+    SUMSUB_BA d, 0, 1, 4
+    SUMSUB_BA d, 2, 3, 4
+    SUMSUB_BA d, 0, 2, 4
+    SUMSUB_BA d, 1, 3, 4
+    %define ORDER unord
+    TRANS q, ORDER, 0, 2, 4, 6
+    TRANS q, ORDER, 1, 3, 4, 6
+    ABSD2 m0, m2, m0, m2, m4, m6
+    pmaxsd          m0, m2
+    ABSD2 m1, m3, m1, m3, m4, m6
+    pmaxsd          m1, m3
+    paddd           m0, m1
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+
+    psubd           m7, m0, m5
+
+    lea             r4, [3 * r3]
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r4]
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    paddd           m5, m0, m1
+    paddd           m5, m2
+    paddd           m5, m3
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m5, 2
+
+    SUMSUB_BA d, 0, 1, 4
+    SUMSUB_BA d, 2, 3, 4
+    SUMSUB_BA d, 0, 2, 4
+    SUMSUB_BA d, 1, 3, 4
+    %define ORDER unord
+    TRANS q, ORDER, 0, 2, 4, 6
+    TRANS q, ORDER, 1, 3, 4, 6
+    ABSD2 m0, m2, m0, m2, m4, m6
+    pmaxsd          m0, m2
+    ABSD2 m1, m3, m1, m3, m4, m6
+    pmaxsd          m1, m3
+    paddd           m0, m1
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+
+    psubd           m0, m5
+
+    psubd           m7, m0
+    pabsd           m0, m7
+    movd            eax, m0
+
+%else ; !HIGH_BIT_DEPTH
+    lea             r4, [3 * r1]
+    movd            m0, [r0]
+    movd            m1, [r0 + r1]
+    movd            m2, [r0 + r1 * 2]
+    movd            m3, [r0 + r4]
+    shufps          m0, m1, 0
+    shufps          m2, m3, 0
+    mova            m4, [hmul_4p]
+    pmaddubsw       m0, m4
+    pmaddubsw       m2, m4
+
+    paddw           m5, m0, m2
+    movhlps         m4, m5
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrld           m5, 2
+
+    HADAMARD 0, sumsub, 0, 2, 1, 3
+    HADAMARD 4, sumsub, 0, 2, 1, 3
+    HADAMARD 1, amax, 0, 2, 1, 3
+    HADDW m0, m2
+
+    psubd           m6, m0, m5
+
+    lea             r4, [3 * r3]
+    movd            m0, [r2]
+    movd            m1, [r2 + r3]
+    movd            m2, [r2 + r3 * 2]
+    movd            m3, [r2 + r4]
+    shufps          m0, m1, 0
+    shufps          m2, m3, 0
+    mova            m4, [hmul_4p]
+    pmaddubsw       m0, m4
+    pmaddubsw       m2, m4
+
+    paddw           m5, m0, m2
+    movhlps         m4, m5
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrld           m5, 2
+
+    HADAMARD 0, sumsub, 0, 2, 1, 3
+    HADAMARD 4, sumsub, 0, 2, 1, 3
+    HADAMARD 1, amax, 0, 2, 1, 3
+    HADDW m0, m2
+
+    psubd           m0, m5
+
+    psubd           m6, m0
+    pabsd           m0, m6
+    movd            eax, m0
+%endif ; HIGH_BIT_DEPTH
+    RET
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal psyCost_pp_8x8, 4, 6, 13
+
+%if HIGH_BIT_DEPTH
+    FIX_STRIDES r1, r3
+    lea             r4, [3 * r1]
+    pxor            m10, m10
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, [pw_1]
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+    psubd           m10, m0, m8
+
+    lea             r4, [3 * r3]
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r4]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r4]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, [pw_1]
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+    psubd           m0, m8
+    psubd           m10, m0
+    pabsd           m0, m10
+    movd            eax, m0
+%else ; !HIGH_BIT_DEPTH
+    lea             r4, [3 * r1]
+    mova            m8, [hmul_8p]
+
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r1]
+    movddup         m6, [r5 + r1 * 2]
+    movddup         m7, [r5 + r4]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, [pw_1]
+    psrldq          m10, m11, 4
+    paddd           m11, m10
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 10
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+    psubd           m12, m0, m11
+
+    lea             r4, [3 * r3]
+
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r4]
+    lea             r5, [r2 + r3 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r3]
+    movddup         m6, [r5 + r3 * 2]
+    movddup         m7, [r5 + r4]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, [pw_1]
+    psrldq          m10, m11, 4
+    paddd           m11, m10
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 10
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+    psubd           m0, m11
+    psubd           m12, m0
+    pabsd           m0, m12
+    movd            eax, m0
+%endif ; HIGH_BIT_DEPTH
+    RET
+%endif
+
+%if ARCH_X86_64
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_16x16, 4, 9, 14
+
+    FIX_STRIDES r1, r3
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m12, [pw_1]
+    mova            m13, [pd_1]
+    pxor            m11, m11
+    mov             r7d, 2
+.loopH:
+    mov             r6d, 2
+.loopW:
+    pxor            m10, m10
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m10, m0, m8
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r8]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m0, m8
+    psubd           m10, m0
+    pabsd           m0, m10
+    paddd           m11, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m11
+    RET
+%else ; !HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_16x16, 4, 9, 15
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m8, [hmul_8p]
+    mova            m10, [pw_1]
+    mova            m14, [pd_1]
+    pxor            m13, m13
+    mov             r7d, 2
+.loopH:
+    mov             r6d, 2
+.loopW:
+    pxor            m12, m12
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r1]
+    movddup         m6, [r5 + r1 * 2]
+    movddup         m7, [r5 + r4]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m12, m0, m11
+
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r3]
+    movddup         m6, [r5 + r3 * 2]
+    movddup         m7, [r5 + r8]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m0, m11
+    psubd           m12, m0
+    pabsd           m0, m12
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 16]
+    lea             r2, [r2 + r3 * 8 - 16]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m13
+    RET
+%endif ; HIGH_BIT_DEPTH
+%endif
+
+%if ARCH_X86_64
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_32x32, 4, 9, 14
+
+    FIX_STRIDES r1, r3
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m12, [pw_1]
+    mova            m13, [pd_1]
+    pxor            m11, m11
+    mov             r7d, 4
+.loopH:
+    mov             r6d, 4
+.loopW:
+    pxor            m10, m10
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m10, m0, m8
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r8]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m0, m8
+    psubd           m10, m0
+    pabsd           m0, m10
+    paddd           m11, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m11
+    RET
+
+%else ; !HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_32x32, 4, 9, 15
+
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m8, [hmul_8p]
+    mova            m10, [pw_1]
+    mova            m14, [pd_1]
+    pxor            m13, m13
+    mov             r7d, 4
+.loopH:
+    mov             r6d, 4
+.loopW:
+    pxor            m12, m12
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r1]
+    movddup         m6, [r5 + r1 * 2]
+    movddup         m7, [r5 + r4]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m12, m0, m11
+
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r3]
+    movddup         m6, [r5 + r3 * 2]
+    movddup         m7, [r5 + r8]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m0, m11
+    psubd           m12, m0
+    pabsd           m0, m12
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m13
+    RET
+%endif ; HIGH_BIT_DEPTH
+%endif
+
+%if ARCH_X86_64
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_64x64, 4, 9, 14
+
+    FIX_STRIDES r1, r3
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m12, [pw_1]
+    mova            m13, [pd_1]
+    pxor            m11, m11
+    mov             r7d, 8
+.loopH:
+    mov             r6d, 8
+.loopW:
+    pxor            m10, m10
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m10, m0, m8
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r8]
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, m12
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+    paddd           m0, m1
+    paddd           m0, m2
+    paddd           m0, m3
+    HADDUW m0, m1
+    paddd           m0, m13
+    psrld           m0, 1
+    psubd           m0, m8
+    psubd           m10, m0
+    pabsd           m0, m10
+    paddd           m11, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 128]
+    lea             r2, [r2 + r3 * 8 - 128]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m11
+    RET
+
+%else ; !HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_64x64, 4, 9, 15
+
+    lea             r4, [3 * r1]
+    lea             r8, [3 * r3]
+    mova            m8, [hmul_8p]
+    mova            m10, [pw_1]
+    mova            m14, [pd_1]
+    pxor            m13, m13
+    mov             r7d, 8
+.loopH:
+    mov             r6d, 8
+.loopW:
+    pxor            m12, m12
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r1]
+    movddup         m6, [r5 + r1 * 2]
+    movddup         m7, [r5 + r4]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m12, m0, m11
+
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r8]
+    lea             r5, [r2 + r3 * 4]
+    movddup         m4, [r5]
+    movddup         m5, [r5 + r3]
+    movddup         m6, [r5 + r3 * 2]
+    movddup         m7, [r5 + r8]
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, m10
+    psrldq          m9, m11, 4
+    paddd           m11, m9
+    psrld           m11, 2
+
+    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    HADDW m0, m1
+
+    paddd           m0, m14
+    psrld           m0, 1
+    psubd           m0, m11
+    psubd           m12, m0
+    pabsd           m0, m12
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r6d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m13
+    RET
+%endif ; HIGH_BIT_DEPTH
+%endif
+
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal psyCost_pp_4x4, 4, 5, 6
+    add             r1d, r1d
+    add             r3d, r3d
+    lea              r4, [r1 * 3]
+    movddup         xm0, [r0]
+    movddup         xm1, [r0 + r1]
+    movddup         xm2, [r0 + r1 * 2]
+    movddup         xm3, [r0 + r4]
+
+    lea              r4, [r3 * 3]
+    movddup         xm4, [r2]
+    movddup         xm5, [r2 + r3]
+    vinserti128      m0, m0, xm4, 1
+    vinserti128      m1, m1, xm5, 1
+    movddup         xm4, [r2 + r3 * 2]
+    movddup         xm5, [r2 + r4]
+    vinserti128      m2, m2, xm4, 1
+    vinserti128      m3, m3, xm5, 1
+
+    mova             m4, [hmul_8w]
+    pmaddwd          m0, m4
+    pmaddwd          m1, m4
+    pmaddwd          m2, m4
+    pmaddwd          m3, m4
+    paddd            m5, m0, m1
+    paddd            m4, m2, m3
+    paddd            m5, m4
+    psrldq           m4, m5, 4
+    paddd            m5, m4
+    psrld            m5, 2
+
+    mova             m4, m0
+    paddd            m0, m1
+    psubd            m1, m4
+    mova             m4, m2
+    paddd            m2, m3
+    psubd            m3, m4
+    mova             m4, m0
+    paddd            m0, m2
+    psubd            m2, m4
+    mova             m4, m1
+    paddd            m1, m3
+    psubd            m3, m4
+    movaps           m4, m0
+    vshufps          m4, m4, m2, 11011101b
+    vshufps          m0, m0, m2, 10001000b
+    movaps           m2, m1
+    vshufps          m2, m2, m3, 11011101b
+    vshufps          m1, m1, m3, 10001000b
+    pabsd            m0, m0
+    pabsd            m4, m4
+    pmaxsd           m0, m4
+    pabsd            m1, m1
+    pabsd            m2, m2
+    pmaxsd           m1, m2
+    paddd            m0, m1
+
+    vpermq           m1, m0, 11110101b
+    paddd            m0, m1
+    psrldq           m1, m0, 4
+    paddd            m0, m1
+    psubd            m0, m5
+
+    vextracti128    xm1, m0, 1
+    psubd           xm1, xm0
+    pabsd           xm1, xm1
+    movd            eax, xm1
+    RET
+%else ; !HIGH_BIT_DEPTH
+cglobal psyCost_pp_4x4, 4, 5, 6
+    lea             r4, [3 * r1]
+    movd            xm0, [r0]
+    movd            xm1, [r0 + r1]
+    movd            xm2, [r0 + r1 * 2]
+    movd            xm3, [r0 + r4]
+    vshufps         xm0, xm1, 0
+    vshufps         xm2, xm3, 0
+
+    lea             r4, [3 * r3]
+    movd            xm1, [r2]
+    movd            xm3, [r2 + r3]
+    movd            xm4, [r2 + r3 * 2]
+    movd            xm5, [r2 + r4]
+    vshufps         xm1, xm3, 0
+    vshufps         xm4, xm5, 0
+
+    vinserti128     m0, m0, xm1, 1
+    vinserti128     m2, m2, xm4, 1
+
+    mova            m4, [hmul_4p]
+    pmaddubsw       m0, m4
+    pmaddubsw       m2, m4
+
+    paddw           m5, m0, m2
+    mova            m1, m5
+    psrldq          m4, m5, 8
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrld           m5, 2
+
+    vpsubw          m2, m2, m0
+    vpunpckhqdq     m0, m1, m2
+    vpunpcklqdq     m1, m1, m2
+    vpaddw          m2, m1, m0
+    vpsubw          m0, m0, m1
+    vpblendw        m1, m2, m0, 10101010b
+    vpslld          m0, m0, 10h
+    vpsrld          m2, m2, 10h
+    vpor            m0, m0, m2
+    vpabsw          m1, m1
+    vpabsw          m0, m0
+    vpmaxsw         m1, m1, m0
+    vpmaddwd        m1, m1, [pw_1]
+    psrldq          m2, m1, 8
+    paddd           m1, m2
+    psrldq          m3, m1, 4
+    paddd           m1, m3
+    psubd           m1, m5
+    vextracti128    xm2, m1, 1
+    psubd           m1, m2
+    pabsd           m1, m1
+    movd            eax, xm1
+    RET
+%endif
+
+%macro PSY_PP_8x8 0
+    movddup         m0, [r0 + r1 * 0]
+    movddup         m1, [r0 + r1 * 1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4 * 1]
+
+    lea             r5, [r0 + r1 * 4]
+
+    movddup         m4, [r2 + r3 * 0]
+    movddup         m5, [r2 + r3 * 1]
+    movddup         m6, [r2 + r3 * 2]
+    movddup         m7, [r2 + r7 * 1]
+
+    lea             r6, [r2 + r3 * 4]
+
+    vinserti128     m0, m0, xm4, 1
+    vinserti128     m1, m1, xm5, 1
+    vinserti128     m2, m2, xm6, 1
+    vinserti128     m3, m3, xm7, 1
+
+    movddup         m4, [r5 + r1 * 0]
+    movddup         m5, [r5 + r1 * 1]
+    movddup         m6, [r5 + r1 * 2]
+    movddup         m7, [r5 + r4 * 1]
+
+    movddup         m9, [r6 + r3 * 0]
+    movddup         m10, [r6 + r3 * 1]
+    movddup         m11, [r6 + r3 * 2]
+    movddup         m12, [r6 + r7 * 1]
+
+    vinserti128     m4, m4, xm9, 1
+    vinserti128     m5, m5, xm10, 1
+    vinserti128     m6, m6, xm11, 1
+    vinserti128     m7, m7, xm12, 1
+
+    pmaddubsw       m0, m8
+    pmaddubsw       m1, m8
+    pmaddubsw       m2, m8
+    pmaddubsw       m3, m8
+    pmaddubsw       m4, m8
+    pmaddubsw       m5, m8
+    pmaddubsw       m6, m8
+    pmaddubsw       m7, m8
+
+    paddw           m11, m0, m1
+    paddw           m11, m2
+    paddw           m11, m3
+    paddw           m11, m4
+    paddw           m11, m5
+    paddw           m11, m6
+    paddw           m11, m7
+
+    pmaddwd         m11, [pw_1]
+    psrldq          m10, m11, 4
+    paddd           m11, m10
+    psrld           m11, 2
+
+    mova            m9, m0
+    paddw           m0, m1      ; m0+m1
+    psubw           m1, m9      ; m1-m0
+    mova            m9, m2
+    paddw           m2, m3      ; m2+m3
+    psubw           m3, m9      ; m3-m2
+    mova            m9, m0
+    paddw           m0, m2      ; m0+m1+m2+m3
+    psubw           m2, m9      ; m2+m3-m0+m1
+    mova            m9, m1
+    paddw           m1, m3      ; m1-m0+m3-m2
+    psubw           m3, m9      ; m3-m2-m1-m0
+
+    movdqa          m9, m4
+    paddw           m4, m5      ; m4+m5
+    psubw           m5, m9      ; m5-m4
+    movdqa          m9, m6
+    paddw           m6, m7      ; m6+m7
+    psubw           m7, m9      ; m7-m6
+    movdqa          m9, m4
+    paddw           m4, m6      ; m4+m5+m6+m7
+    psubw           m6, m9      ; m6+m7-m4+m5
+    movdqa          m9, m5
+    paddw           m5, m7      ; m5-m4+m7-m6
+    psubw           m7, m9      ; m7-m6-m5-m4
+
+    movdqa          m9, m0
+    paddw           m0, m4      ; (m0+m1+m2+m3)+(m4+m5+m6+m7)
+    psubw           m4, m9      ; (m4+m5+m6+m7)-(m0+m1+m2+m3)
+    movdqa          m9, m1
+    paddw           m1, m5      ; (m1-m0+m3-m2)+(m5-m4+m7-m6)
+    psubw           m5, m9      ; (m5-m4+m7-m6)-(m1-m0+m3-m2)
+
+    mova            m9, m0
+    vshufps         m9, m9, m4, 11011101b
+    vshufps         m0, m0, m4, 10001000b
+
+    movdqa          m4, m0
+    paddw           m0, m9      ; (a0 + a4) + (a4 - a0)
+    psubw           m9, m4      ; (a0 + a4) - (a4 - a0) == (a0 + a4) + (a0 - a4)
+
+    movaps          m4, m1
+    vshufps         m4, m4, m5, 11011101b
+    vshufps         m1, m1, m5, 10001000b
+
+    movdqa          m5, m1
+    paddw           m1, m4
+    psubw           m4, m5
+    movdqa          m5, m2
+    paddw           m2, m6
+    psubw           m6, m5
+    movdqa          m5, m3
+    paddw           m3, m7
+    psubw           m7, m5
+
+    movaps          m5, m2
+    vshufps         m5, m5, m6, 11011101b
+    vshufps         m2, m2, m6, 10001000b
+
+    movdqa          m6, m2
+    paddw           m2, m5
+    psubw           m5, m6
+    movaps          m6, m3
+
+    vshufps         m6, m6, m7, 11011101b
+    vshufps         m3, m3, m7, 10001000b
+
+    movdqa          m7, m3
+    paddw           m3, m6
+    psubw           m6, m7
+    movdqa          m7, m0
+
+    pblendw         m0, m9, 10101010b
+    pslld           m9, 10h
+    psrld           m7, 10h
+    por             m9, m7
+    pabsw           m0, m0
+    pabsw           m9, m9
+    pmaxsw          m0, m9
+    movdqa          m7, m1
+    pblendw         m1, m4, 10101010b
+    pslld           m4, 10h
+    psrld           m7, 10h
+    por             m4, m7
+    pabsw           m1, m1
+    pabsw           m4, m4
+    pmaxsw          m1, m4
+    movdqa          m7, m2
+    pblendw         m2, m5, 10101010b
+    pslld           m5, 10h
+    psrld           m7, 10h
+    por             m5, m7
+    pabsw           m2, m2
+    pabsw           m5, m5
+    pmaxsw          m2, m5
+    mova            m7, m3
+
+    pblendw         m3, m6, 10101010b
+    pslld           m6, 10h
+    psrld           m7, 10h
+    por             m6, m7
+    pabsw           m3, m3
+    pabsw           m6, m6
+    pmaxsw          m3, m6
+    paddw           m0, m1
+    paddw           m0, m2
+    paddw           m0, m3
+    pmaddwd         m0, [pw_1]
+    psrldq          m1, m0, 8
+    paddd           m0, m1
+
+    pshuflw         m1, m0, 00001110b
+    paddd           m0, m1
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+
+    psubd           m0, m11
+
+    vextracti128    xm1, m0, 1
+    psubd           m0, m1
+    pabsd           m0, m0
+%endmacro
+
+%macro PSY_PP_8x8_AVX2 0
+    lea             r4, [r1 * 3]
+    movu           xm0, [r0]
+    movu           xm1, [r0 + r1]
+    movu           xm2, [r0 + r1 * 2]
+    movu           xm3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu           xm4, [r5]
+    movu           xm5, [r5 + r1]
+    movu           xm6, [r5 + r1 * 2]
+    movu           xm7, [r5 + r4]
+
+    lea             r4, [r3 * 3]
+    vinserti128     m0, m0, [r2], 1
+    vinserti128     m1, m1, [r2 + r3], 1
+    vinserti128     m2, m2, [r2 + r3 * 2], 1
+    vinserti128     m3, m3, [r2 + r4], 1
+    lea             r5, [r2 + r3 * 4]
+    vinserti128     m4, m4, [r5], 1
+    vinserti128     m5, m5, [r5 + r3], 1
+    vinserti128     m6, m6, [r5 + r3 * 2], 1
+    vinserti128     m7, m7, [r5 + r4], 1
+
+    paddw           m8, m0, m1
+    paddw           m8, m2
+    paddw           m8, m3
+    paddw           m8, m4
+    paddw           m8, m5
+    paddw           m8, m6
+    paddw           m8, m7
+    pmaddwd         m8, [pw_1]
+
+    psrldq          m9, m8, 8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    psubw           m9, m1, m0
+    paddw           m0, m1
+    psubw           m1, m3, m2
+    paddw           m2, m3
+    punpckhwd       m3, m0, m9
+    punpcklwd       m0, m9
+    psubw           m9, m3, m0
+    paddw           m0, m3
+    punpckhwd       m3, m2, m1
+    punpcklwd       m2, m1
+    psubw           m10, m3, m2
+    paddw           m2, m3
+    psubw           m3, m5, m4
+    paddw           m4, m5
+    psubw           m5, m7, m6
+    paddw           m6, m7
+    punpckhwd       m1, m4, m3
+    punpcklwd       m4, m3
+    psubw           m7, m1, m4
+    paddw           m4, m1
+    punpckhwd       m3, m6, m5
+    punpcklwd       m6, m5
+    psubw           m1, m3, m6
+    paddw           m6, m3
+    psubw           m3, m2, m0
+    paddw           m0, m2
+    psubw           m2, m10, m9
+    paddw           m9, m10
+    punpckhdq       m5, m0, m3
+    punpckldq       m0, m3
+    psubw           m10, m5, m0
+    paddw           m0, m5
+    punpckhdq       m3, m9, m2
+    punpckldq       m9, m2
+    psubw           m5, m3, m9
+    paddw           m9, m3
+    psubw           m3, m6, m4
+    paddw           m4, m6
+    psubw           m6, m1, m7
+    paddw           m7, m1
+    punpckhdq       m2, m4, m3
+    punpckldq       m4, m3
+    psubw           m1, m2, m4
+    paddw           m4, m2
+    punpckhdq       m3, m7, m6
+    punpckldq       m7, m6
+    psubw           m2, m3, m7
+    paddw           m7, m3
+    psubw           m3, m4, m0
+    paddw           m0, m4
+    psubw           m4, m1, m10
+    paddw           m10, m1
+    punpckhqdq      m6, m0, m3
+    punpcklqdq      m0, m3
+    pabsw           m0, m0
+    pabsw           m6, m6
+    pmaxsw          m0, m6
+    punpckhqdq      m3, m10, m4
+    punpcklqdq      m10, m4
+    pabsw           m10, m10
+    pabsw           m3, m3
+    pmaxsw          m10, m3
+    psubw           m3, m7, m9
+    paddw           m9, m7
+    psubw           m7, m2, m5
+    paddw           m5, m2
+    punpckhqdq      m4, m9, m3
+    punpcklqdq      m9, m3
+    pabsw           m9, m9
+    pabsw           m4, m4
+    pmaxsw          m9, m4
+    punpckhqdq      m3, m5, m7
+    punpcklqdq      m5, m7
+    pabsw           m5, m5
+    pabsw           m3, m3
+    pmaxsw          m5, m3
+    paddd           m0, m9
+    paddd           m0, m10
+    paddd           m0, m5
+    psrld           m9, m0, 16
+    pslld           m0, 16
+    psrld           m0, 16
+    paddd           m0, m9
+    psrldq          m9, m0, 8
+    paddd           m0, m9
+    psrldq          m9, m0, 4
+    paddd           m0, m9
+    paddd           m0, [pd_1]
+    psrld           m0, 1
+    psubd           m0, m8
+
+    vextracti128   xm1, m0, 1
+    psubd          xm1, xm0
+    pabsd          xm1, xm1
+%endmacro
+
+%if ARCH_X86_64
+%if HIGH_BIT_DEPTH
+cglobal psyCost_pp_8x8, 4, 8, 11
+    add            r1d, r1d
+    add            r3d, r3d
+    PSY_PP_8x8_AVX2
+    movd           eax, xm1
+    RET
+%else ; !HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal psyCost_pp_8x8, 4, 8, 13
+    lea             r4, [3 * r1]
+    lea             r7, [3 * r3]
+    mova            m8, [hmul_8p]
+
+    PSY_PP_8x8
+
+    movd            eax, xm0
+    RET
+%endif
+%endif
+%if ARCH_X86_64
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal psyCost_pp_16x16, 4, 10, 12
+    add            r1d, r1d
+    add            r3d, r3d
+    pxor           m11, m11
+
+    mov            r8d, 2
+.loopH:
+    mov            r9d, 2
+.loopW:
+    PSY_PP_8x8_AVX2
+
+    paddd         xm11, xm1
+    add             r0, 16
+    add             r2, 16
+    dec            r9d
+    jnz            .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec            r8d
+    jnz            .loopH
+    movd           eax, xm11
+    RET
+%else ; !HIGH_BIT_DEPTH
+cglobal psyCost_pp_16x16, 4, 10, 14
+    lea             r4, [3 * r1]
+    lea             r7, [3 * r3]
+    mova            m8, [hmul_8p]
+    pxor            m13, m13
+
+    mov             r8d, 2
+.loopH:
+    mov             r9d, 2
+.loopW:
+    PSY_PP_8x8
+
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r9d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 16]
+    lea             r2, [r2 + r3 * 8 - 16]
+    dec             r8d
+    jnz             .loopH
+    movd            eax, xm13
+    RET
+%endif
+%endif
+%if ARCH_X86_64
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal psyCost_pp_32x32, 4, 10, 12
+    add            r1d, r1d
+    add            r3d, r3d
+    pxor           m11, m11
+
+    mov            r8d, 4
+.loopH:
+    mov            r9d, 4
+.loopW:
+    PSY_PP_8x8_AVX2
+
+    paddd         xm11, xm1
+    add             r0, 16
+    add             r2, 16
+    dec            r9d
+    jnz            .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec            r8d
+    jnz            .loopH
+    movd           eax, xm11
+    RET
+%else ; !HIGH_BIT_DEPTH
+cglobal psyCost_pp_32x32, 4, 10, 14
+    lea             r4, [3 * r1]
+    lea             r7, [3 * r3]
+    mova            m8, [hmul_8p]
+    pxor            m13, m13
+
+    mov             r8d, 4
+.loopH:
+    mov             r9d, 4
+.loopW:
+    PSY_PP_8x8
+
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r9d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r8d
+    jnz             .loopH
+    movd            eax, xm13
+    RET
+%endif
+%endif
+%if ARCH_X86_64
+INIT_YMM avx2
+%if HIGH_BIT_DEPTH
+cglobal psyCost_pp_64x64, 4, 10, 12
+    add            r1d, r1d
+    add            r3d, r3d
+    pxor           m11, m11
+
+    mov            r8d, 8
+.loopH:
+    mov            r9d, 8
+.loopW:
+    PSY_PP_8x8_AVX2
+
+    paddd         xm11, xm1
+    add             r0, 16
+    add             r2, 16
+    dec            r9d
+    jnz            .loopW
+    lea             r0, [r0 + r1 * 8 - 128]
+    lea             r2, [r2 + r3 * 8 - 128]
+    dec            r8d
+    jnz            .loopH
+    movd           eax, xm11
+    RET
+%else ; !HIGH_BIT_DEPTH
+cglobal psyCost_pp_64x64, 4, 10, 14
+    lea             r4, [3 * r1]
+    lea             r7, [3 * r3]
+    mova            m8, [hmul_8p]
+    pxor            m13, m13
+
+    mov             r8d, 8
+.loopH:
+    mov             r9d, 8
+.loopW:
+    PSY_PP_8x8
+
+    paddd           m13, m0
+    add             r0, 8
+    add             r2, 8
+    dec             r9d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r8d
+    jnz             .loopH
+    movd            eax, xm13
+    RET
+%endif
+%endif
+
+;---------------------------------------------------------------------------------------------------------------------
+;int psyCost_ss(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride)
+;---------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal psyCost_ss_4x4, 4, 5, 8
+
+    add             r1, r1
+    lea             r4, [3 * r1]
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+
+    pabsw           m4, m0
+    pabsw           m5, m1
+    paddw           m5, m4
+    pabsw           m4, m2
+    paddw           m5, m4
+    pabsw           m4, m3
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m6, m5, 2
+
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    psrldq          m4, m0, 4
+    psubd           m5, m0, m4
+    paddd           m0, m4
+    shufps          m0, m5, 10001000b
+
+    psrldq          m4, m1, 4
+    psubd           m5, m1, m4
+    paddd           m1, m4
+    shufps          m1, m5, 10001000b
+
+    psrldq          m4, m2, 4
+    psubd           m5, m2, m4
+    paddd           m2, m4
+    shufps          m2, m5, 10001000b
+
+    psrldq          m4, m3, 4
+    psubd           m5, m3, m4
+    paddd           m3, m4
+    shufps          m3, m5, 10001000b
+
+    mova            m4, m0
+    paddd           m0, m1
+    psubd           m1, m4
+    mova            m4, m2
+    paddd           m2, m3
+    psubd           m3, m4
+    mova            m4, m0
+    paddd           m0, m2
+    psubd           m2, m4
+    mova            m4, m1
+    paddd           m1, m3
+    psubd           m3, m4
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    psrld           m0, 1
+    psubd           m7, m0, m6
+
+    add             r3, r3
+    lea             r4, [3 * r3]
+    movddup         m0, [r2]
+    movddup         m1, [r2 + r3]
+    movddup         m2, [r2 + r3 * 2]
+    movddup         m3, [r2 + r4]
+
+    pabsw           m4, m0
+    pabsw           m5, m1
+    paddw           m5, m4
+    pabsw           m4, m2
+    paddw           m5, m4
+    pabsw           m4, m3
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m6, m5, 2
+
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    psrldq          m4, m0, 4
+    psubd           m5, m0, m4
+    paddd           m0, m4
+    shufps          m0, m5, 10001000b
+
+    psrldq          m4, m1, 4
+    psubd           m5, m1, m4
+    paddd           m1, m4
+    shufps          m1, m5, 10001000b
+
+    psrldq          m4, m2, 4
+    psubd           m5, m2, m4
+    paddd           m2, m4
+    shufps          m2, m5, 10001000b
+
+    psrldq          m4, m3, 4
+    psubd           m5, m3, m4
+    paddd           m3, m4
+    shufps          m3, m5, 10001000b
+
+    mova            m4, m0
+    paddd           m0, m1
+    psubd           m1, m4
+    mova            m4, m2
+    paddd           m2, m3
+    psubd           m3, m4
+    mova            m4, m0
+    paddd           m0, m2
+    psubd           m2, m4
+    mova            m4, m1
+    paddd           m1, m3
+    psubd           m3, m4
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    psrld           m0, 1
+    psubd           m0, m6
+    psubd           m7, m0
+    pabsd           m0, m7
+    movd            eax, m0
+    RET
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal psyCost_ss_8x8, 4, 6, 15
+
+    mova            m13, [pw_pmpmpmpm]
+    mova            m14, [pw_1]
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m12, m7
+    paddw           m11, m12
+    paddw           m9, m11
+    paddw           m8, m9
+    movhlps         m9, m8
+    pmovzxwd        m8, m8
+    pmovzxwd        m9, m9
+    paddd           m8, m9
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+
+    pmaddwd         m0, m14
+    pmaddwd         m1, m14
+    pmaddwd         m2, m14
+    pmaddwd         m3, m14
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    pmaddwd         m4, m14
+    pmaddwd         m5, m14
+    pmaddwd         m6, m14
+    pmaddwd         m7, m14
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m12, m0, m8
+
+    lea             r4, [3 * r3]
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r4]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r4]
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m10, m7
+    paddw           m11, m10
+    paddw           m9, m11
+    paddw           m8, m9
+    movhlps         m9, m8
+    pmovzxwd        m8, m8
+    pmovzxwd        m9, m9
+    paddd           m8, m9
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r4]
+
+    pmaddwd         m0, m14
+    pmaddwd         m1, m14
+    pmaddwd         m2, m14
+    pmaddwd         m3, m14
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r4]
+
+    pmaddwd         m4, m14
+    pmaddwd         m5, m14
+    pmaddwd         m6, m14
+    pmaddwd         m7, m14
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m0, m8
+
+    psubd           m12, m0
+    pabsd           m0, m12
+    movd            eax, m0
+    RET
+%endif
+
+%macro psy_cost_ss 0
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+    lea             r5, [r0 + r1 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m12, m7
+    paddw           m11, m12
+    paddw           m9, m11
+    paddw           m8, m9
+    movhlps         m9, m8
+    pmovzxwd        m8, m8
+    pmovzxwd        m9, m9
+    paddd           m8, m9
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    movu            m0, [r0]
+    movu            m1, [r0 + r1]
+    movu            m2, [r0 + r1 * 2]
+    movu            m3, [r0 + r4]
+
+    pmaddwd         m0, m14
+    pmaddwd         m1, m14
+    pmaddwd         m2, m14
+    pmaddwd         m3, m14
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    movu            m4, [r5]
+    movu            m5, [r5 + r1]
+    movu            m6, [r5 + r1 * 2]
+    movu            m7, [r5 + r4]
+
+    pmaddwd         m4, m14
+    pmaddwd         m5, m14
+    pmaddwd         m6, m14
+    pmaddwd         m7, m14
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m12, m0, m8
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r6]
+    lea             r5, [r2 + r3 * 4]
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r6]
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m10, m7
+    paddw           m11, m10
+    paddw           m9, m11
+    paddw           m8, m9
+    movhlps         m9, m8
+    pmovzxwd        m8, m8
+    pmovzxwd        m9, m9
+    paddd           m8, m9
+    movhlps         m9, m8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    movu            m0, [r2]
+    movu            m1, [r2 + r3]
+    movu            m2, [r2 + r3 * 2]
+    movu            m3, [r2 + r6]
+
+    pmaddwd         m0, m14
+    pmaddwd         m1, m14
+    pmaddwd         m2, m14
+    pmaddwd         m3, m14
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    shufps          m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    shufps          m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    shufps          m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    shufps          m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    movu            m4, [r5]
+    movu            m5, [r5 + r3]
+    movu            m6, [r5 + r3 * 2]
+    movu            m7, [r5 + r6]
+
+    pmaddwd         m4, m14
+    pmaddwd         m5, m14
+    pmaddwd         m6, m14
+    pmaddwd         m7, m14
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    shufps          m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    shufps          m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    shufps          m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    shufps          m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    movhlps         m1, m0
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m0, m8
+
+    psubd           m12, m0
+    pabsd           m0, m12
+    paddd           m15, m0
+%endmacro
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal psyCost_ss_16x16, 4, 9, 16
+
+    mova            m13, [pw_pmpmpmpm]
+    mova            m14, [pw_1]
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    lea             r6, [3 * r3]
+    pxor            m15, m15
+    mov             r7d, 2
+.loopH:
+    mov             r8d, 2
+.loopW:
+    psy_cost_ss
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m15
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal psyCost_ss_32x32, 4, 9, 16
+
+    mova            m13, [pw_pmpmpmpm]
+    mova            m14, [pw_1]
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    lea             r6, [3 * r3]
+    pxor            m15, m15
+    mov             r7d, 4
+.loopH:
+    mov             r8d, 4
+.loopW:
+    psy_cost_ss
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m15
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal psyCost_ss_64x64, 4, 9, 16
+
+    mova            m13, [pw_pmpmpmpm]
+    mova            m14, [pw_1]
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    lea             r6, [3 * r3]
+    pxor            m15, m15
+    mov             r7d, 8
+.loopH:
+    mov             r8d, 8
+.loopW:
+    psy_cost_ss
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 128]
+    lea             r2, [r2 + r3 * 8 - 128]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, m15
+    RET
+%endif
+
+INIT_YMM avx2
+cglobal psyCost_ss_4x4, 4, 5, 8
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+
+    lea             r4, [3 * r3]
+    movddup         m4, [r2]
+    movddup         m5, [r2 + r3]
+    movddup         m6, [r2 + r3 * 2]
+    movddup         m7, [r2 + r4]
+
+    vinserti128     m0, m0, xm4, 1
+    vinserti128     m1, m1, xm5, 1
+    vinserti128     m2, m2, xm6, 1
+    vinserti128     m3, m3, xm7, 1
+
+    pabsw           m4, m0
+    pabsw           m5, m1
+    paddw           m5, m4
+    pabsw           m4, m2
+    paddw           m5, m4
+    pabsw           m4, m3
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m6, m5, 2
+
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    psrldq          m4, m0, 4
+    psubd           m5, m0, m4
+    paddd           m0, m4
+    shufps          m0, m0, m5, 10001000b
+
+    psrldq          m4, m1, 4
+    psubd           m5, m1, m4
+    paddd           m1, m4
+    shufps          m1, m1, m5, 10001000b
+
+    psrldq          m4, m2, 4
+    psubd           m5, m2, m4
+    paddd           m2, m4
+    shufps          m2, m2, m5, 10001000b
+
+    psrldq          m4, m3, 4
+    psubd           m5, m3, m4
+    paddd           m3, m4
+    shufps          m3, m3, m5, 10001000b
+
+    mova            m4, m0
+    paddd           m0, m1
+    psubd           m1, m4
+    mova            m4, m2
+    paddd           m2, m3
+    psubd           m3, m4
+    mova            m4, m0
+    paddd           m0, m2
+    psubd           m2, m4
+    mova            m4, m1
+    paddd           m1, m3
+    psubd           m3, m4
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    psrldq          m1, m0, 8
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    psrld           m0, 1
+    psubd           m0, m6
+    vextracti128    xm1, m0, 1
+    psubd           m0, m1
+    pabsd           m0, m0
+    movd            eax, xm0
+    RET
+
+%macro PSY_SS_8x8 0
+    lea             r4, [3 * r1]
+    lea             r6, [r0 + r1 * 4]
+    movu            xm0, [r0]
+    movu            xm1, [r0 + r1]
+    movu            xm2, [r0 + r1 * 2]
+    movu            xm3, [r0 + r4]
+    movu            xm4, [r6]
+    movu            xm5, [r6 + r1]
+    movu            xm6, [r6 + r1 * 2]
+    movu            xm7, [r6 + r4]
+
+    lea             r4, [3 * r3]
+    lea             r6, [r2 + r3 * 4]
+    movu            xm8, [r2]
+    movu            xm9, [r2 + r3]
+    movu            xm10, [r2 + r3 * 2]
+    movu            xm11, [r2 + r4]
+    vinserti128     m0, m0, xm8, 1
+    vinserti128     m1, m1, xm9, 1
+    vinserti128     m2, m2, xm10, 1
+    vinserti128     m3, m3, xm11, 1
+    movu            xm8, [r6]
+    movu            xm9, [r6 + r3]
+    movu            xm10, [r6 + r3 * 2]
+    movu            xm11, [r6 + r4]
+    vinserti128     m4, m4, xm8, 1
+    vinserti128     m5, m5, xm9, 1
+    vinserti128     m6, m6, xm10, 1
+    vinserti128     m7, m7, xm11, 1
+
+    ;; store on stack to use later
+    mova            [rsp + 0 * mmsize], m0
+    mova            [rsp + 1 * mmsize], m1
+    mova            [rsp + 2 * mmsize], m2
+    mova            [rsp + 3 * mmsize], m3
+    mova            [rsp + 4 * mmsize], m4
+    mova            [rsp + 5 * mmsize], m5
+    mova            [rsp + 6 * mmsize], m6
+    mova            [rsp + 7 * mmsize], m7
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m10, m7
+    paddw           m11, m10
+    paddw           m9, m11
+    paddw           m8, m9
+    psrldq          m9, m8, 8
+
+    vextracti128    xm10, m8, 1
+    vextracti128    xm11, m9, 1
+
+    vpmovzxwd       m8, xm8
+    vpmovzxwd       m9, xm9
+    vpmovzxwd       m10, xm10
+    vpmovzxwd       m11, xm11
+
+    vinserti128     m8, m8, xm10, 1
+    vinserti128     m9, m9, xm11, 1
+
+    paddd           m8, m9
+    psrldq          m9, m8, 8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2       ; sad_4x4
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    pmaddwd         m0, m12, [rsp + 0 * mmsize]
+    pmaddwd         m1, m12, [rsp + 1 * mmsize]
+    pmaddwd         m2, m12, [rsp + 2 * mmsize]
+    pmaddwd         m3, m12, [rsp + 3 * mmsize]
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m12, [rsp + 4 * mmsize]
+    pmaddwd         m5, m12, [rsp + 5 * mmsize]
+    pmaddwd         m6, m12, [rsp + 6 * mmsize]
+    pmaddwd         m7, m12, [rsp + 7 * mmsize]
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    psrldq          m1, m0, 8
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m0, m8
+    vextracti128    xm1, m0, 1
+    psubd           m0, m1
+    pabsd           m0, m0
+%endmacro
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_8x8, 4, 7, 14
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [pw_pmpmpmpm]
+    add             r1, r1
+    add             r3, r3
+
+    PSY_SS_8x8
+
+    movd            eax, xm0
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_16x16, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [pw_pmpmpmpm]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 2
+.loopH:
+    mov             r8d, 2
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_32x32, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [pw_pmpmpmpm]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 4
+.loopH:
+    mov             r8d, 4
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_64x64, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [pw_pmpmpmpm]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 8
+.loopH:
+    mov             r8d, 8
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 128]
+    lea             r2, [r2 + r3 * 8 - 128]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
+
+;;---------------------------------------------------------------
+;; SATD AVX2
+;; int pixel_satd(const pixel*, intptr_t, const pixel*, intptr_t)
+;;---------------------------------------------------------------
+;; r0   - pix0
+;; r1   - pix0Stride
+;; r2   - pix1
+;; r3   - pix1Stride
+
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
+INIT_YMM avx2
+cglobal calc_satd_16x8    ; function to compute satd cost for 16 columns, 8 rows
+    pxor                m6, m6
+    vbroadcasti128      m0, [r0]
+    vbroadcasti128      m4, [r2]
+    vbroadcasti128      m1, [r0 + r1]
+    vbroadcasti128      m5, [r2 + r3]
+    pmaddubsw           m4, m7
+    pmaddubsw           m0, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m1, m7
+    psubw               m0, m4
+    psubw               m1, m5
+    vbroadcasti128      m2, [r0 + r1 * 2]
+    vbroadcasti128      m4, [r2 + r3 * 2]
+    vbroadcasti128      m3, [r0 + r4]
+    vbroadcasti128      m5, [r2 + r5]
+    pmaddubsw           m4, m7
+    pmaddubsw           m2, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m3, m7
+    psubw               m2, m4
+    psubw               m3, m5
+    lea                 r0, [r0 + r1 * 4]
+    lea                 r2, [r2 + r3 * 4]
+    paddw               m4, m0, m1
+    psubw               m1, m1, m0
+    paddw               m0, m2, m3
+    psubw               m3, m2
+    paddw               m2, m4, m0
+    psubw               m0, m4
+    paddw               m4, m1, m3
+    psubw               m3, m1
+    pabsw               m2, m2
+    pabsw               m0, m0
+    pabsw               m4, m4
+    pabsw               m3, m3
+    pblendw             m1, m2, m0, 10101010b
+    pslld               m0, 16
+    psrld               m2, 16
+    por                 m0, m2
+    pmaxsw              m1, m0
+    paddw               m6, m1
+    pblendw             m2, m4, m3, 10101010b
+    pslld               m3, 16
+    psrld               m4, 16
+    por                 m3, m4
+    pmaxsw              m2, m3
+    paddw               m6, m2
+    vbroadcasti128      m1, [r0]
+    vbroadcasti128      m4, [r2]
+    vbroadcasti128      m2, [r0 + r1]
+    vbroadcasti128      m5, [r2 + r3]
+    pmaddubsw           m4, m7
+    pmaddubsw           m1, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m2, m7
+    psubw               m1, m4
+    psubw               m2, m5
+    vbroadcasti128      m0, [r0 + r1 * 2]
+    vbroadcasti128      m4, [r2 + r3 * 2]
+    vbroadcasti128      m3, [r0 + r4]
+    vbroadcasti128      m5, [r2 + r5]
+    lea                 r0, [r0 + r1 * 4]
+    lea                 r2, [r2 + r3 * 4]
+    pmaddubsw           m4, m7
+    pmaddubsw           m0, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m3, m7
+    psubw               m0, m4
+    psubw               m3, m5
+    paddw               m4, m1, m2
+    psubw               m2, m1
+    paddw               m1, m0, m3
+    psubw               m3, m0
+    paddw               m0, m4, m1
+    psubw               m1, m4
+    paddw               m4, m2, m3
+    psubw               m3, m2
+    pabsw               m0, m0
+    pabsw               m1, m1
+    pabsw               m4, m4
+    pabsw               m3, m3
+    pblendw             m2, m0, m1, 10101010b
+    pslld               m1, 16
+    psrld               m0, 16
+    por                 m1, m0
+    pmaxsw              m2, m1
+    paddw               m6, m2
+    pblendw             m0, m4, m3, 10101010b
+    pslld               m3, 16
+    psrld               m4, 16
+    por                 m3, m4
+    pmaxsw              m0, m3
+    paddw               m6, m0
+    vextracti128        xm0, m6, 1
+    pmovzxwd            m6, xm6
+    pmovzxwd            m0, xm0
+    paddd               m8, m6
+    paddd               m9, m0
+    ret
+
+cglobal calc_satd_16x4    ; function to compute satd cost for 16 columns, 4 rows
+    pxor                m6, m6
+    vbroadcasti128      m0, [r0]
+    vbroadcasti128      m4, [r2]
+    vbroadcasti128      m1, [r0 + r1]
+    vbroadcasti128      m5, [r2 + r3]
+    pmaddubsw           m4, m7
+    pmaddubsw           m0, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m1, m7
+    psubw               m0, m4
+    psubw               m1, m5
+    vbroadcasti128      m2, [r0 + r1 * 2]
+    vbroadcasti128      m4, [r2 + r3 * 2]
+    vbroadcasti128      m3, [r0 + r4]
+    vbroadcasti128      m5, [r2 + r5]
+    pmaddubsw           m4, m7
+    pmaddubsw           m2, m7
+    pmaddubsw           m5, m7
+    pmaddubsw           m3, m7
+    psubw               m2, m4
+    psubw               m3, m5
+    paddw               m4, m0, m1
+    psubw               m1, m1, m0
+    paddw               m0, m2, m3
+    psubw               m3, m2
+    paddw               m2, m4, m0
+    psubw               m0, m4
+    paddw               m4, m1, m3
+    psubw               m3, m1
+    pabsw               m2, m2
+    pabsw               m0, m0
+    pabsw               m4, m4
+    pabsw               m3, m3
+    pblendw             m1, m2, m0, 10101010b
+    pslld               m0, 16
+    psrld               m2, 16
+    por                 m0, m2
+    pmaxsw              m1, m0
+    paddw               m6, m1
+    pblendw             m2, m4, m3, 10101010b
+    pslld               m3, 16
+    psrld               m4, 16
+    por                 m3, m4
+    pmaxsw              m2, m3
+    paddw               m6, m2
+    vextracti128        xm0, m6, 1
+    pmovzxwd            m6, xm6
+    pmovzxwd            m0, xm0
+    paddd               m8, m6
+    paddd               m9, m0
+    ret
+
+cglobal pixel_satd_16x4, 4,6,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+
+    call            calc_satd_16x4
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_16x12, 4,6,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+
+    call            calc_satd_16x8
+    call            calc_satd_16x4
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_16x32, 4,6,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_16x64, 4,6,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_32x8, 4,8,10          ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_32x16, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_32x24, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_32x32, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_32x64, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_48x64, 4,8,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_64x16, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 48]
+    lea             r2, [r7 + 48]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_64x32, 4,8,10         ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 48]
+    lea             r2, [r7 + 48]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_64x48, 4,8,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 48]
+    lea             r2, [r7 + 48]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+
+cglobal pixel_satd_64x64, 4,8,10        ; if WIN64 && cpuflag(avx2)
+    mova            m7, [hmul_16p]
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m8, m8
+    pxor            m9, m9
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 16]
+    lea             r2, [r7 + 16]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    lea             r0, [r6 + 48]
+    lea             r2, [r7 + 48]
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    paddd           m8, m9
+    vextracti128    xm0, m8, 1
+    paddd           xm0, xm8
+    movhlps         xm1, xm0
+    paddd           xm0, xm1
+    pshuflw         xm1, xm0, q0032
+    paddd           xm0, xm1
+    movd            eax, xm0
+    RET
+%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
+
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1
+INIT_YMM avx2
+cglobal calc_satd_16x8    ; function to compute satd cost for 16 columns, 8 rows
+    ; rows 0-3
+    movu            m0, [r0]
+    movu            m4, [r2]
+    psubw           m0, m4
+    movu            m1, [r0 + r1]
+    movu            m5, [r2 + r3]
+    psubw           m1, m5
+    movu            m2, [r0 + r1 * 2]
+    movu            m4, [r2 + r3 * 2]
+    psubw           m2, m4
+    movu            m3, [r0 + r4]
+    movu            m5, [r2 + r5]
+    psubw           m3, m5
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    paddw           m4, m0, m1
+    psubw           m1, m0
+    paddw           m0, m2, m3
+    psubw           m3, m2
+    punpckhwd       m2, m4, m1
+    punpcklwd       m4, m1
+    punpckhwd       m1, m0, m3
+    punpcklwd       m0, m3
+    paddw           m3, m4, m0
+    psubw           m0, m4
+    paddw           m4, m2, m1
+    psubw           m1, m2
+    punpckhdq       m2, m3, m0
+    punpckldq       m3, m0
+    paddw           m0, m3, m2
+    psubw           m2, m3
+    punpckhdq       m3, m4, m1
+    punpckldq       m4, m1
+    paddw           m1, m4, m3
+    psubw           m3, m4
+    punpckhqdq      m4, m0, m1
+    punpcklqdq      m0, m1
+    pabsw           m0, m0
+    pabsw           m4, m4
+    pmaxsw          m0, m0, m4
+    punpckhqdq      m1, m2, m3
+    punpcklqdq      m2, m3
+    pabsw           m2, m2
+    pabsw           m1, m1
+    pmaxsw          m2, m1
+    pxor            m7, m7
+    mova            m1, m0
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m0
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    pxor            m7, m7
+    mova            m1, m2
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m2
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    ; rows 4-7
+    movu            m0, [r0]
+    movu            m4, [r2]
+    psubw           m0, m4
+    movu            m1, [r0 + r1]
+    movu            m5, [r2 + r3]
+    psubw           m1, m5
+    movu            m2, [r0 + r1 * 2]
+    movu            m4, [r2 + r3 * 2]
+    psubw           m2, m4
+    movu            m3, [r0 + r4]
+    movu            m5, [r2 + r5]
+    psubw           m3, m5
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    paddw           m4, m0, m1
+    psubw           m1, m0
+    paddw           m0, m2, m3
+    psubw           m3, m2
+    punpckhwd       m2, m4, m1
+    punpcklwd       m4, m1
+    punpckhwd       m1, m0, m3
+    punpcklwd       m0, m3
+    paddw           m3, m4, m0
+    psubw           m0, m4
+    paddw           m4, m2, m1
+    psubw           m1, m2
+    punpckhdq       m2, m3, m0
+    punpckldq       m3, m0
+    paddw           m0, m3, m2
+    psubw           m2, m3
+    punpckhdq       m3, m4, m1
+    punpckldq       m4, m1
+    paddw           m1, m4, m3
+    psubw           m3, m4
+    punpckhqdq      m4, m0, m1
+    punpcklqdq      m0, m1
+    pabsw           m0, m0
+    pabsw           m4, m4
+    pmaxsw          m0, m0, m4
+    punpckhqdq      m1, m2, m3
+    punpcklqdq      m2, m3
+    pabsw           m2, m2
+    pabsw           m1, m1
+    pmaxsw          m2, m1
+    pxor            m7, m7
+    mova            m1, m0
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m0
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    pxor            m7, m7
+    mova            m1, m2
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m2
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    ret
+
+cglobal calc_satd_16x4    ; function to compute satd cost for 16 columns, 4 rows
+    ; rows 0-3
+    movu            m0, [r0]
+    movu            m4, [r2]
+    psubw           m0, m4
+    movu            m1, [r0 + r1]
+    movu            m5, [r2 + r3]
+    psubw           m1, m5
+    movu            m2, [r0 + r1 * 2]
+    movu            m4, [r2 + r3 * 2]
+    psubw           m2, m4
+    movu            m3, [r0 + r4]
+    movu            m5, [r2 + r5]
+    psubw           m3, m5
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    paddw           m4, m0, m1
+    psubw           m1, m0
+    paddw           m0, m2, m3
+    psubw           m3, m2
+    punpckhwd       m2, m4, m1
+    punpcklwd       m4, m1
+    punpckhwd       m1, m0, m3
+    punpcklwd       m0, m3
+    paddw           m3, m4, m0
+    psubw           m0, m4
+    paddw           m4, m2, m1
+    psubw           m1, m2
+    punpckhdq       m2, m3, m0
+    punpckldq       m3, m0
+    paddw           m0, m3, m2
+    psubw           m2, m3
+    punpckhdq       m3, m4, m1
+    punpckldq       m4, m1
+    paddw           m1, m4, m3
+    psubw           m3, m4
+    punpckhqdq      m4, m0, m1
+    punpcklqdq      m0, m1
+    pabsw           m0, m0
+    pabsw           m4, m4
+    pmaxsw          m0, m0, m4
+    punpckhqdq      m1, m2, m3
+    punpcklqdq      m2, m3
+    pabsw           m2, m2
+    pabsw           m1, m1
+    pmaxsw          m2, m1
+    pxor            m7, m7
+    mova            m1, m0
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m0
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    pxor            m7, m7
+    mova            m1, m2
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m2
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    ret
+
+cglobal pixel_satd_16x4, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x4
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_16x8, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_16x12, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+    call            calc_satd_16x4
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_16x16, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_16x32, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_16x64, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x8, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x16, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x24, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x32, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x64, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_48x64, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 64]
+    lea             r2, [r7 + 64]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_64x16, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 64]
+    lea             r2, [r7 + 64]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 96]
+    lea             r2, [r7 + 96]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_64x32, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 64]
+    lea             r2, [r7 + 64]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 96]
+    lea             r2, [r7 + 96]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_64x48, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 64]
+    lea             r2, [r7 + 64]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 96]
+    lea             r2, [r7 + 96]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_64x64, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 64]
+    lea             r2, [r7 + 64]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 96]
+    lea             r2, [r7 + 96]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1
+
+
+;-------------------------------------------------------------------------------------------------------------------------------------
+; pixel planeClipAndMax(pixel *src, intptr_t stride, int width, int height, uint64_t *outsum, const pixel minPix, const pixel maxPix)
+;-------------------------------------------------------------------------------------------------------------------------------------
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
+INIT_YMM avx2
+cglobal planeClipAndMax, 5,7,8
+    movd            xm0, r5m
+    vpbroadcastb    m0, xm0                 ; m0 = [min]
+    vpbroadcastb    m1, r6m                 ; m1 = [max]
+    pxor            m2, m2                  ; m2 = sumLuma
+    pxor            m3, m3                  ; m3 = maxLumaLevel
+    pxor            m4, m4                  ; m4 = zero
+
+    ; get mask to partial register pixels
+    mov             r5d, r2d
+    and             r2d, ~(mmsize - 1)
+    sub             r5d, r2d
+    lea             r6, [pb_movemask_32 + mmsize]
+    sub             r6, r5
+    movu            m5, [r6]                ; m5 = mask for last couple column
+
+.loopH:
+    lea             r5d, [r2 - mmsize]
+
+.loopW:
+    movu            m6, [r0 + r5]
+    pmaxub          m6, m0
+    pminub          m6, m1
+    movu            [r0 + r5], m6           ; store back
+    pmaxub          m3, m6                  ; update maxLumaLevel
+    psadbw          m6, m4
+    paddq           m2, m6
+
+    sub             r5d, mmsize
+    jge            .loopW
+
+    ; partial pixels
+    movu            m7, [r0 + r2]
+    pmaxub          m6, m7, m0
+    pminub          m6, m1
+
+    pand            m7, m5                  ; get invalid/unchange pixel
+    pandn           m6, m5, m6              ; clear invalid pixels
+    por             m7, m6                  ; combin valid & invalid pixels
+    movu            [r0 + r2], m7           ; store back
+    pmaxub          m3, m6                  ; update maxLumaLevel
+    psadbw          m6, m4
+    paddq           m2, m6
+
+.next:
+    add             r0, r1
+    dec             r3d
+    jg             .loopH
+
+    ; sumLuma
+    vextracti128    xm0, m2, 1
+    paddq           xm0, xm2
+    movhlps         xm1, xm0
+    paddq           xm0, xm1
+    movq            [r4], xm0
+
+    ; maxLumaLevel
+    vextracti128    xm0, m3, 1
+    pmaxub          xm0, xm3
+    movhlps         xm3, xm0
+    pmaxub          xm0, xm3
+    pmovzxbw        xm0, xm0
+    pxor            xm0, [pb_movemask + 16]
+    phminposuw      xm0, xm0
+
+    movd            eax, xm0
+    not             al
+    movzx           eax, al
+    RET
+%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixel-util.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,59 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_PIXEL_UTIL_H
+#define X265_PIXEL_UTIL_H
+
+#define DEFINE_UTILS(cpu) \
+    FUNCDEF_TU_S2(void, getResidual, cpu, const pixel* fenc, const pixel* pred, int16_t* residual, intptr_t stride); \
+    FUNCDEF_TU_S2(void, transpose, cpu, pixel* dest, const pixel* src, intptr_t stride); \
+    FUNCDEF_TU(int, count_nonzero, cpu, const int16_t* quantCoeff); \
+    uint32_t PFX(quant_ ## cpu(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)); \
+    uint32_t PFX(nquant_ ## cpu(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)); \
+    void PFX(dequant_normal_ ## cpu(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)); \
+    void PFX(dequant_scaling_## cpu(const int16_t* src, const int32_t* dequantCoef, int16_t* dst, int num, int mcqp_miper, int shift)); \
+    void PFX(weight_pp_ ## cpu(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)); \
+    void PFX(weight_sp_ ## cpu(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)); \
+    void PFX(scale1D_128to64_ ## cpu(pixel*, const pixel*)); \
+    void PFX(scale2D_64to32_ ## cpu(pixel*, const pixel*, intptr_t)); \
+    uint32_t PFX(costCoeffRemain_ ## cpu(uint16_t *absCoeff, int numNonZero, int idx)); \
+    uint32_t PFX(costC1C2Flag_sse2(uint16_t *absCoeff, intptr_t numNonZero, uint8_t *baseCtxMod, intptr_t ctxOffset)); \
+
+DEFINE_UTILS(sse2);
+DEFINE_UTILS(ssse3);
+DEFINE_UTILS(sse4);
+DEFINE_UTILS(avx2);
+
+#undef DEFINE_UTILS
+
+void PFX(pixel_ssim_4x4x2_core_sse2(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4]));
+void PFX(pixel_ssim_4x4x2_core_avx(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4]));
+float PFX(pixel_ssim_end4_sse2(int sum0[5][4], int sum1[5][4], int width));
+float PFX(pixel_ssim_end4_avx(int sum0[5][4], int sum1[5][4], int width));
+
+int PFX(scanPosLast_x64(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* scanCG4x4, const int trSize));
+int PFX(scanPosLast_avx2_bmi2(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* scanCG4x4, const int trSize));
+uint32_t PFX(findPosFirstLast_ssse3(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16]));
+uint32_t PFX(costCoeffNxN_sse4(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase));
+
+#endif // ifndef X265_PIXEL_UTIL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixel-util8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,7340 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+;*          Nabajit Deka <nabajit@multicorewareinc.com>
+;*          Rajesh Paulraj <rajesh@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+%if BIT_DEPTH == 12
+ssim_c1:   times 4 dd 107321.76    ; .01*.01*4095*4095*64
+ssim_c2:   times 4 dd 60851437.92  ; .03*.03*4095*4095*64*63
+pf_64:     times 4 dd 64.0
+pf_128:    times 4 dd 128.0
+%elif BIT_DEPTH == 10
+ssim_c1:   times 4 dd 6697.7856    ; .01*.01*1023*1023*64
+ssim_c2:   times 4 dd 3797644.4352 ; .03*.03*1023*1023*64*63
+pf_64:     times 4 dd 64.0
+pf_128:    times 4 dd 128.0
+%elif BIT_DEPTH == 9
+ssim_c1:   times 4 dd 1671         ; .01*.01*511*511*64
+ssim_c2:   times 4 dd 947556       ; .03*.03*511*511*64*63
+%else ; 8-bit
+ssim_c1:   times 4 dd 416          ; .01*.01*255*255*64
+ssim_c2:   times 4 dd 235963       ; .03*.03*255*255*64*63
+%endif
+
+mask_ff:                times 16 db 0xff
+                        times 16 db 0
+deinterleave_shuf:      times  2 db 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15
+deinterleave_word_shuf: times  2 db 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15
+hmulw_16p:              times  8 dw 1
+                        times  4 dw 1, -1
+
+SECTION .text
+
+cextern pw_1
+cextern pw_0_15
+cextern pb_1
+cextern pb_128
+cextern pw_00ff
+cextern pw_1023
+cextern pw_3fff
+cextern pw_2000
+cextern pw_pixel_max
+cextern pd_1
+cextern pd_32767
+cextern pd_n32768
+cextern pb_2
+cextern pb_4
+cextern pb_8
+cextern pb_15
+cextern pb_16
+cextern pb_32
+cextern pb_64
+cextern hmul_16p
+cextern trans8_shuf
+cextern_naked private_prefix %+ _entropyStateBits
+cextern pb_movemask
+
+;-----------------------------------------------------------------------------
+; void getResidual(pixel *fenc, pixel *pred, int16_t *residual, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+%if HIGH_BIT_DEPTH
+cglobal getResidual4, 4,4,4
+    add      r3,    r3
+
+    ; row 0-1
+    movh         m0, [r0]
+    movh         m1, [r0 + r3]
+    movh         m2, [r1]
+    movh         m3, [r1 + r3]
+    punpcklqdq   m0, m1
+    punpcklqdq   m2, m3
+    psubw        m0, m2
+
+    movh         [r2], m0
+    movhps       [r2 + r3], m0
+    lea          r0, [r0 + r3 * 2]
+    lea          r1, [r1 + r3 * 2]
+    lea          r2, [r2 + r3 * 2]
+
+    ; row 2-3
+    movh         m0, [r0]
+    movh         m1, [r0 + r3]
+    movh         m2, [r1]
+    movh         m3, [r1 + r3]
+    punpcklqdq   m0, m1
+    punpcklqdq   m2, m3
+    psubw        m0, m2
+    movh        [r2], m0
+    movhps      [r2 + r3], m0
+    RET
+%else
+cglobal getResidual4, 4,4,5
+    pxor        m0, m0
+
+    ; row 0-1
+    movd        m1, [r0]
+    movd        m2, [r0 + r3]
+    movd        m3, [r1]
+    movd        m4, [r1 + r3]
+    punpckldq   m1, m2
+    punpcklbw   m1, m0
+    punpckldq   m3, m4
+    punpcklbw   m3, m0
+    psubw       m1, m3
+    movh        [r2], m1
+    movhps      [r2 + r3 * 2], m1
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+
+    ; row 2-3
+    movd        m1, [r0]
+    movd        m2, [r0 + r3]
+    movd        m3, [r1]
+    movd        m4, [r1 + r3]
+    punpckldq   m1, m2
+    punpcklbw   m1, m0
+    punpckldq   m3, m4
+    punpcklbw   m3, m0
+    psubw       m1, m3
+    movh        [r2], m1
+    movhps      [r2 + r3 * 2], m1
+    RET
+%endif
+
+
+INIT_XMM sse2
+%if HIGH_BIT_DEPTH
+cglobal getResidual8, 4,4,4
+    add      r3,    r3
+
+%assign x 0
+%rep 8/2
+    ; row 0-1
+    movu        m1, [r0]
+    movu        m2, [r0 + r3]
+    movu        m3, [r1]
+    movu        m4, [r1 + r3]
+    psubw       m1, m3
+    psubw       m2, m4
+    movu        [r2], m1
+    movu        [r2 + r3], m2
+%assign x x+1
+%if (x != 4)
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 2]
+%endif
+%endrep
+    RET
+%else
+cglobal getResidual8, 4,4,5
+    pxor        m0, m0
+
+%assign x 0
+%rep 8/2
+    ; row 0-1
+    movh        m1, [r0]
+    movh        m2, [r0 + r3]
+    movh        m3, [r1]
+    movh        m4, [r1 + r3]
+    punpcklbw   m1, m0
+    punpcklbw   m2, m0
+    punpcklbw   m3, m0
+    punpcklbw   m4, m0
+    psubw       m1, m3
+    psubw       m2, m4
+    movu        [r2], m1
+    movu        [r2 + r3 * 2], m2
+%assign x x+1
+%if (x != 4)
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+%endif
+%endrep
+    RET
+%endif
+
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal getResidual16, 4,5,6
+    add         r3, r3
+    mov         r4d, 16/4
+.loop:
+    ; row 0-1
+    movu        m0, [r0]
+    movu        m1, [r0 + 16]
+    movu        m2, [r0 + r3]
+    movu        m3, [r0 + r3 + 16]
+    movu        m4, [r1]
+    movu        m5, [r1 + 16]
+    psubw       m0, m4
+    psubw       m1, m5
+    movu        m4, [r1 + r3]
+    movu        m5, [r1 + r3 + 16]
+    psubw       m2, m4
+    psubw       m3, m5
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+
+    movu        [r2], m0
+    movu        [r2 + 16], m1
+    movu        [r2 + r3], m2
+    movu        [r2 + r3 + 16], m3
+    lea         r2, [r2 + r3 * 2]
+
+    ; row 2-3
+    movu        m0, [r0]
+    movu        m1, [r0 + 16]
+    movu        m2, [r0 + r3]
+    movu        m3, [r0 + r3 + 16]
+    movu        m4, [r1]
+    movu        m5, [r1 + 16]
+    psubw       m0, m4
+    psubw       m1, m5
+    movu        m4, [r1 + r3]
+    movu        m5, [r1 + r3 + 16]
+    psubw       m2, m4
+    psubw       m3, m5
+
+    movu        [r2], m0
+    movu        [r2 + 16], m1
+    movu        [r2 + r3], m2
+    movu        [r2 + r3 + 16], m3
+
+    dec         r4d
+
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 2]
+    jnz        .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal getResidual16, 4,5,8
+    mov         r4d, 16/4
+    pxor        m0, m0
+.loop:
+    ; row 0-1
+    movu        m1, [r0]
+    movu        m2, [r0 + r3]
+    movu        m3, [r1]
+    movu        m4, [r1 + r3]
+    pmovzxbw    m5, m1
+    punpckhbw   m1, m0
+    pmovzxbw    m6, m2
+    punpckhbw   m2, m0
+    pmovzxbw    m7, m3
+    punpckhbw   m3, m0
+    psubw       m5, m7
+    psubw       m1, m3
+    pmovzxbw    m7, m4
+    punpckhbw   m4, m0
+    psubw       m6, m7
+    psubw       m2, m4
+
+    movu        [r2], m5
+    movu        [r2 + 16], m1
+    movu        [r2 + r3 * 2], m6
+    movu        [r2 + r3 * 2 + 16], m2
+
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+
+    ; row 2-3
+    movu        m1, [r0]
+    movu        m2, [r0 + r3]
+    movu        m3, [r1]
+    movu        m4, [r1 + r3]
+    pmovzxbw    m5, m1
+    punpckhbw   m1, m0
+    pmovzxbw    m6, m2
+    punpckhbw   m2, m0
+    pmovzxbw    m7, m3
+    punpckhbw   m3, m0
+    psubw       m5, m7
+    psubw       m1, m3
+    pmovzxbw    m7, m4
+    punpckhbw   m4, m0
+    psubw       m6, m7
+    psubw       m2, m4
+
+    movu        [r2], m5
+    movu        [r2 + 16], m1
+    movu        [r2 + r3 * 2], m6
+    movu        [r2 + r3 * 2 + 16], m2
+
+    dec         r4d
+
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+    jnz        .loop
+    RET
+%endif
+
+%if HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal getResidual16, 4,4,5
+    add         r3, r3
+    pxor        m0, m0
+
+%assign x 0
+%rep 16/2
+    movu        m1, [r0]
+    movu        m2, [r0 + r3]
+    movu        m3, [r1]
+    movu        m4, [r1 + r3]
+
+    psubw       m1, m3
+    psubw       m2, m4
+    movu        [r2], m1
+    movu        [r2 + r3], m2
+%assign x x+1
+%if (x != 8)
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 2]
+%endif
+%endrep
+    RET
+%else
+INIT_YMM avx2
+cglobal getResidual16, 4,5,8
+    lea         r4, [r3 * 2]
+    add         r4d, r3d
+%assign x 0
+%rep 4
+    pmovzxbw    m0, [r0]
+    pmovzxbw    m1, [r0 + r3]
+    pmovzxbw    m2, [r0 + r3 * 2]
+    pmovzxbw    m3, [r0 + r4]
+    pmovzxbw    m4, [r1]
+    pmovzxbw    m5, [r1 + r3]
+    pmovzxbw    m6, [r1 + r3 * 2]
+    pmovzxbw    m7, [r1 + r4]
+    psubw       m0, m4
+    psubw       m1, m5
+    psubw       m2, m6
+    psubw       m3, m7
+    movu        [r2], m0
+    movu        [r2 + r3 * 2], m1
+    movu        [r2 + r3 * 2 * 2], m2
+    movu        [r2 + r4 * 2], m3
+%assign x x+1
+%if (x != 4)
+    lea         r0, [r0 + r3 * 2 * 2]
+    lea         r1, [r1 + r3 * 2 * 2]
+    lea         r2, [r2 + r3 * 4 * 2]
+%endif
+%endrep
+    RET
+%endif
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal getResidual32, 4,5,6
+    add         r3, r3
+    mov         r4d, 32/2
+.loop:
+    ; row 0
+    movu        m0, [r0]
+    movu        m1, [r0 + 16]
+    movu        m2, [r0 + 32]
+    movu        m3, [r0 + 48]
+    movu        m4, [r1]
+    movu        m5, [r1 + 16]
+    psubw       m0, m4
+    psubw       m1, m5
+    movu        m4, [r1 + 32]
+    movu        m5, [r1 + 48]
+    psubw       m2, m4
+    psubw       m3, m5
+
+    movu        [r2], m0
+    movu        [r2 + 16], m1
+    movu        [r2 + 32], m2
+    movu        [r2 + 48], m3
+
+    ; row 1
+    movu        m0, [r0 + r3]
+    movu        m1, [r0 + r3 + 16]
+    movu        m2, [r0 + r3 + 32]
+    movu        m3, [r0 + r3 + 48]
+    movu        m4, [r1 + r3]
+    movu        m5, [r1 + r3 + 16]
+    psubw       m0, m4
+    psubw       m1, m5
+    movu        m4, [r1 + r3 + 32]
+    movu        m5, [r1 + r3 + 48]
+    psubw       m2, m4
+    psubw       m3, m5
+
+    movu        [r2 + r3], m0
+    movu        [r2 + r3 + 16], m1
+    movu        [r2 + r3 + 32], m2
+    movu        [r2 + r3 + 48], m3
+
+    dec         r4d
+
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 2]
+    jnz        .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal getResidual32, 4,5,7
+    mov         r4d, 32/2
+    pxor        m0, m0
+.loop:
+    movu        m1, [r0]
+    movu        m2, [r0 + 16]
+    movu        m3, [r1]
+    movu        m4, [r1 + 16]
+    pmovzxbw    m5, m1
+    punpckhbw   m1, m0
+    pmovzxbw    m6, m3
+    punpckhbw   m3, m0
+    psubw       m5, m6
+    psubw       m1, m3
+    movu        [r2 + 0 * 16], m5
+    movu        [r2 + 1 * 16], m1
+
+    pmovzxbw    m5, m2
+    punpckhbw   m2, m0
+    pmovzxbw    m6, m4
+    punpckhbw   m4, m0
+    psubw       m5, m6
+    psubw       m2, m4
+    movu        [r2 + 2 * 16], m5
+    movu        [r2 + 3 * 16], m2
+
+    movu        m1, [r0 + r3]
+    movu        m2, [r0 + r3 + 16]
+    movu        m3, [r1 + r3]
+    movu        m4, [r1 + r3 + 16]
+    pmovzxbw    m5, m1
+    punpckhbw   m1, m0
+    pmovzxbw    m6, m3
+    punpckhbw   m3, m0
+    psubw       m5, m6
+    psubw       m1, m3
+    movu        [r2 + r3 * 2 + 0 * 16], m5
+    movu        [r2 + r3 * 2 + 1 * 16], m1
+
+    pmovzxbw    m5, m2
+    punpckhbw   m2, m0
+    pmovzxbw    m6, m4
+    punpckhbw   m4, m0
+    psubw       m5, m6
+    psubw       m2, m4
+    movu        [r2 + r3 * 2 + 2 * 16], m5
+    movu        [r2 + r3 * 2 + 3 * 16], m2
+
+    dec         r4d
+
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+    jnz        .loop
+    RET
+%endif
+
+
+%if HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal getResidual32, 4,4,5
+    add         r3, r3
+    pxor        m0, m0
+
+%assign x 0
+%rep 32
+    movu        m1, [r0]
+    movu        m2, [r0 + 32]
+    movu        m3, [r1]
+    movu        m4, [r1 + 32]
+
+    psubw       m1, m3
+    psubw       m2, m4
+    movu        [r2], m1
+    movu        [r2 + 32], m2
+%assign x x+1
+%if (x != 32)
+    lea         r0, [r0 + r3]
+    lea         r1, [r1 + r3]
+    lea         r2, [r2 + r3]
+%endif
+%endrep
+    RET
+%else
+INIT_YMM avx2
+cglobal getResidual32, 4,5,8
+    lea         r4, [r3 * 2]
+%assign x 0
+%rep 16
+    pmovzxbw    m0, [r0]
+    pmovzxbw    m1, [r0 + 16]
+    pmovzxbw    m2, [r0 + r3]
+    pmovzxbw    m3, [r0 + r3 + 16]
+
+    pmovzxbw    m4, [r1]
+    pmovzxbw    m5, [r1 + 16]
+    pmovzxbw    m6, [r1 + r3]
+    pmovzxbw    m7, [r1 + r3 + 16]
+
+    psubw       m0, m4
+    psubw       m1, m5
+    psubw       m2, m6
+    psubw       m3, m7
+
+    movu        [r2 + 0 ], m0
+    movu        [r2 + 32], m1
+    movu        [r2 + r4 + 0], m2
+    movu        [r2 + r4 + 32], m3
+%assign x x+1
+%if (x != 16)
+    lea         r0, [r0 + r3 * 2]
+    lea         r1, [r1 + r3 * 2]
+    lea         r2, [r2 + r3 * 4]
+%endif
+%endrep
+    RET
+%endif
+;-----------------------------------------------------------------------------
+; uint32_t quant(int16_t *coef, int32_t *quantCoeff, int32_t *deltaU, int16_t *qCoef, int qBits, int add, int numCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal quant, 5,6,8
+    ; fill qbits
+    movd        m4, r4d         ; m4 = qbits
+
+    ; fill qbits-8
+    sub         r4d, 8
+    movd        m6, r4d         ; m6 = qbits8
+
+    ; fill offset
+    movd        m5, r5m
+    pshufd      m5, m5, 0       ; m5 = add
+
+    lea         r5, [pd_1]
+
+    mov         r4d, r6m
+    shr         r4d, 3
+    pxor        m7, m7          ; m7 = numZero
+.loop:
+    ; 4 coeff
+    pmovsxwd    m0, [r0]        ; m0 = level
+    pabsd       m1, m0
+    pmulld      m1, [r1]        ; m0 = tmpLevel1
+    paddd       m2, m1, m5
+    psrad       m2, m4          ; m2 = level1
+
+    pslld       m3, m2, 8
+    psrad       m1, m6
+    psubd       m1, m3          ; m1 = deltaU1
+
+    movu        [r2], m1
+    psignd      m3, m2, m0
+    pminud      m2, [r5]
+    paddd       m7, m2
+    packssdw    m3, m3
+    movh        [r3], m3
+
+    ; 4 coeff
+    pmovsxwd    m0, [r0 + 8]    ; m0 = level
+    pabsd       m1, m0
+    pmulld      m1, [r1 + 16]   ; m0 = tmpLevel1
+    paddd       m2, m1, m5
+    psrad       m2, m4          ; m2 = level1
+    pslld       m3, m2, 8
+    psrad       m1, m6
+    psubd       m1, m3          ; m1 = deltaU1
+    movu        [r2 + 16], m1
+    psignd      m3, m2, m0
+    pminud      m2, [r5]
+    paddd       m7, m2
+    packssdw    m3, m3
+    movh        [r3 + 8], m3
+
+    add         r0, 16
+    add         r1, 32
+    add         r2, 32
+    add         r3, 16
+
+    dec         r4d
+    jnz        .loop
+
+    pshufd      m0, m7, 00001110b
+    paddd       m0, m7
+    pshufd      m1, m0, 00000001b
+    paddd       m0, m1
+    movd        eax, m0
+    RET
+
+
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal quant, 5,6,9
+    ; fill qbits
+    movd            xm4, r4d            ; m4 = qbits
+
+    ; fill qbits-8
+    sub             r4d, 8
+    movd            xm6, r4d            ; m6 = qbits8
+
+    ; fill offset
+%if UNIX64 == 0
+    vpbroadcastd    m5, r5m             ; m5 = add
+%else ; Mac
+    movd           xm5, r5m
+    vpbroadcastd    m5, xm5             ; m5 = add
+%endif
+
+    lea             r5, [pw_1]
+
+    mov             r4d, r6m
+    shr             r4d, 4
+    pxor            m7, m7              ; m7 = numZero
+.loop:
+    ; 8 coeff
+    pmovsxwd        m0, [r0]            ; m0 = level
+    pabsd           m1, m0
+    pmulld          m1, [r1]            ; m0 = tmpLevel1
+    paddd           m2, m1, m5
+    psrad           m2, xm4             ; m2 = level1
+
+    pslld           m3, m2, 8
+    psrad           m1, xm6
+    psubd           m1, m3              ; m1 = deltaU1
+    movu            [r2], m1
+    psignd          m2, m0
+
+    ; 8 coeff
+    pmovsxwd        m0, [r0 + mmsize/2] ; m0 = level
+    pabsd           m1, m0
+    pmulld          m1, [r1 + mmsize]   ; m0 = tmpLevel1
+    paddd           m3, m1, m5
+    psrad           m3, xm4             ; m2 = level1
+
+    pslld           m8, m3, 8
+    psrad           m1, xm6
+    psubd           m1, m8              ; m1 = deltaU1
+    movu            [r2 + mmsize], m1
+    psignd          m3, m0
+
+    packssdw        m2, m3
+    vpermq          m2, m2, q3120
+    movu            [r3], m2
+
+    ; count non-zero coeff
+    ; TODO: popcnt is faster, but some CPU can't support
+    pminuw          m2, [r5]
+    paddw           m7, m2
+
+    add             r0, mmsize
+    add             r1, mmsize*2
+    add             r2, mmsize*2
+    add             r3, mmsize
+
+    dec             r4d
+    jnz            .loop
+
+    ; sum count
+    xorpd           m0, m0
+    psadbw          m7, m0
+    vextracti128    xm1, m7, 1
+    paddd           xm7, xm1
+    movhlps         xm0, xm7
+    paddd           xm7, xm0
+    movd            eax, xm7
+    RET
+
+%else ; ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal quant, 5,6,8
+    ; fill qbits
+    movd            xm4, r4d        ; m4 = qbits
+
+    ; fill qbits-8
+    sub             r4d, 8
+    movd            xm6, r4d        ; m6 = qbits8
+
+    ; fill offset
+%if UNIX64 == 0
+    vpbroadcastd    m5, r5m         ; m5 = add
+%else ; Mac
+    movd           xm5, r5m
+    vpbroadcastd    m5, xm5         ; m5 = add
+%endif
+
+    lea             r5, [pd_1]
+
+    mov             r4d, r6m
+    shr             r4d, 4
+    pxor            m7, m7          ; m7 = numZero
+.loop:
+    ; 8 coeff
+    pmovsxwd        m0, [r0]        ; m0 = level
+    pabsd           m1, m0
+    pmulld          m1, [r1]        ; m0 = tmpLevel1
+    paddd           m2, m1, m5
+    psrad           m2, xm4         ; m2 = level1
+
+    pslld           m3, m2, 8
+    psrad           m1, xm6
+    psubd           m1, m3          ; m1 = deltaU1
+
+    movu            [r2], m1
+    psignd          m3, m2, m0
+    pminud          m2, [r5]
+    paddd           m7, m2
+    packssdw        m3, m3
+    vpermq          m3, m3, q0020
+    movu            [r3], xm3
+
+    ; 8 coeff
+    pmovsxwd        m0, [r0 + mmsize/2]        ; m0 = level
+    pabsd           m1, m0
+    pmulld          m1, [r1 + mmsize]        ; m0 = tmpLevel1
+    paddd           m2, m1, m5
+    psrad           m2, xm4         ; m2 = level1
+
+    pslld           m3, m2, 8
+    psrad           m1, xm6
+    psubd           m1, m3          ; m1 = deltaU1
+
+    movu            [r2 + mmsize], m1
+    psignd          m3, m2, m0
+    pminud          m2, [r5]
+    paddd           m7, m2
+    packssdw        m3, m3
+    vpermq          m3, m3, q0020
+    movu            [r3 + mmsize/2], xm3
+
+    add             r0, mmsize
+    add             r1, mmsize*2
+    add             r2, mmsize*2
+    add             r3, mmsize
+
+    dec             r4d
+    jnz            .loop
+
+    xorpd           m0, m0
+    psadbw          m7, m0
+    vextracti128    xm1, m7, 1
+    paddd           xm7, xm1
+    movhlps         xm0, xm7
+    paddd           xm7, xm0
+    movd            eax, xm7
+    RET
+%endif ; ARCH_X86_64 == 1
+
+
+;-----------------------------------------------------------------------------
+; uint32_t nquant(int16_t *coef, int32_t *quantCoeff, int16_t *qCoef, int qBits, int add, int numCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal nquant, 3,5,8
+    movd        m6, r4m
+    mov         r4d, r5m
+    pxor        m7, m7          ; m7 = numZero
+    movd        m5, r3m         ; m5 = qbits
+    pshufd      m6, m6, 0       ; m6 = add
+    mov         r3d, r4d        ; r3 = numCoeff
+    shr         r4d, 3
+
+.loop:
+    pmovsxwd    m0, [r0]        ; m0 = level
+    pmovsxwd    m1, [r0 + 8]    ; m1 = level
+
+    pabsd       m2, m0
+    pmulld      m2, [r1]        ; m0 = tmpLevel1 * qcoeff
+    paddd       m2, m6
+    psrad       m2, m5          ; m0 = level1
+    psignd      m2, m0
+
+    pabsd       m3, m1
+    pmulld      m3, [r1 + 16]   ; m1 = tmpLevel1 * qcoeff
+    paddd       m3, m6
+    psrad       m3, m5          ; m1 = level1
+    psignd      m3, m1
+
+    packssdw    m2, m3
+
+    movu        [r2], m2
+    add         r0, 16
+    add         r1, 32
+    add         r2, 16
+
+    pxor        m4, m4
+    pcmpeqw     m2, m4
+    psubw       m7, m2
+
+    dec         r4d
+    jnz         .loop
+
+    packuswb    m7, m7
+    psadbw      m7, m4
+    mov         eax, r3d
+    movd        r4d, m7
+    sub         eax, r4d        ; numSig
+    RET
+
+
+INIT_YMM avx2
+cglobal nquant, 3,5,7
+%if UNIX64 == 0
+    vpbroadcastd m4, r4m
+%else ; Mac
+    movd         xm4, r4m
+    vpbroadcastd m4, xm4
+%endif
+    vpbroadcastd m6, [pw_1]
+    mov         r4d, r5m
+    pxor        m5, m5              ; m7 = numZero
+    movd        xm3, r3m            ; m5 = qbits
+    mov         r3d, r4d            ; r3 = numCoeff
+    shr         r4d, 4
+
+.loop:
+    pmovsxwd    m0, [r0]            ; m0 = level
+    pabsd       m1, m0
+    pmulld      m1, [r1]            ; m0 = tmpLevel1 * qcoeff
+    paddd       m1, m4
+    psrad       m1, xm3             ; m0 = level1
+    psignd      m1, m0
+
+    pmovsxwd    m0, [r0 + mmsize/2] ; m0 = level
+    pabsd       m2, m0
+    pmulld      m2, [r1 + mmsize]   ; m0 = tmpLevel1 * qcoeff
+    paddd       m2, m4
+    psrad       m2, xm3             ; m0 = level1
+    psignd      m2, m0
+
+    packssdw    m1, m2
+    vpermq      m2, m1, q3120
+
+    movu        [r2], m2
+    add         r0, mmsize
+    add         r1, mmsize * 2
+    add         r2, mmsize
+
+    pminuw      m1, m6
+    paddw       m5, m1
+
+    dec         r4d
+    jnz         .loop
+
+    pxor        m0, m0
+    psadbw      m5, m0
+    vextracti128 xm0, m5, 1
+    paddd       xm5, xm0
+    pshufd      xm0, xm5, 2
+    paddd       xm5, xm0
+    movd        eax, xm5
+    RET
+
+
+;-----------------------------------------------------------------------------
+; void dequant_normal(const int16_t* quantCoef, int32_t* coef, int num, int scale, int shift)
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal dequant_normal, 5,5,5
+    mova        m2, [pw_1]
+%if HIGH_BIT_DEPTH
+    cmp         r3d, 32767
+    jle         .skip
+    shr         r3d, (BIT_DEPTH - 8)
+    sub         r4d, (BIT_DEPTH - 8)
+.skip:
+%endif
+    movd        m0, r4d             ; m0 = shift
+    add         r4d, 15
+    bts         r3d, r4d
+    movd        m1, r3d
+    pshufd      m1, m1, 0           ; m1 = dword [add scale]
+    ; m0 = shift
+    ; m1 = scale
+    ; m2 = word [1]
+.loop:
+    movu        m3, [r0]
+    punpckhwd   m4, m3, m2
+    punpcklwd   m3, m2
+    pmaddwd     m3, m1              ; m3 = dword (clipQCoef * scale + add)
+    pmaddwd     m4, m1
+    psrad       m3, m0
+    psrad       m4, m0
+    packssdw    m3, m4
+    mova        [r1], m3
+
+    add         r0, 16
+    add         r1, 16
+
+    sub         r2d, 8
+    jnz        .loop
+    RET
+
+;----------------------------------------------------------------------------------------------------------------------
+;void dequant_scaling(const int16_t* src, const int32_t* dequantCoef, int16_t* dst, int num, int mcqp_miper, int shift)
+;----------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal dequant_scaling, 6,6,6
+    add         r5d, 4
+    shr         r3d, 3          ; num/8
+    cmp         r5d, r4d
+    jle         .skip
+    sub         r5d, r4d
+    mova        m0, [pd_1]
+    movd        m1, r5d         ; shift - per
+    dec         r5d
+    movd        m2, r5d         ; shift - per - 1
+    pslld       m0, m2          ; 1 << shift - per - 1
+
+.part0:
+    pmovsxwd    m2, [r0]
+    pmovsxwd    m4, [r0 + 8]
+    movu        m3, [r1]
+    movu        m5, [r1 + 16]
+    pmulld      m2, m3
+    pmulld      m4, m5
+    paddd       m2, m0
+    paddd       m4, m0
+    psrad       m2, m1
+    psrad       m4, m1
+    packssdw    m2, m4
+    movu        [r2], m2
+
+    add         r0, 16
+    add         r1, 32
+    add         r2, 16
+    dec         r3d
+    jnz         .part0
+    jmp         .end
+
+.skip:
+    sub         r4d, r5d        ; per - shift
+    movd        m0, r4d
+
+.part1:
+    pmovsxwd    m2, [r0]
+    pmovsxwd    m4, [r0 + 8]
+    movu        m3, [r1]
+    movu        m5, [r1 + 16]
+    pmulld      m2, m3
+    pmulld      m4, m5
+    packssdw    m2, m4
+    pmovsxwd    m1, m2
+    psrldq      m2, 8
+    pmovsxwd    m2, m2
+    pslld       m1, m0
+    pslld       m2, m0
+    packssdw    m1, m2
+    movu        [r2], m1
+
+    add         r0, 16
+    add         r1, 32
+    add         r2, 16
+    dec         r3d
+    jnz         .part1
+.end:
+    RET
+
+;----------------------------------------------------------------------------------------------------------------------
+;void dequant_scaling(const int16_t* src, const int32_t* dequantCoef, int16_t* dst, int num, int mcqp_miper, int shift)
+;----------------------------------------------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal dequant_scaling, 6,6,6
+    add         r5d, 4
+    shr         r3d, 4          ; num/16
+    cmp         r5d, r4d
+    jle         .skip
+    sub         r5d, r4d
+    mova        m0, [pd_1]
+    movd        xm1, r5d         ; shift - per
+    dec         r5d
+    movd        xm2, r5d         ; shift - per - 1
+    pslld       m0, xm2          ; 1 << shift - per - 1
+
+.part0:
+    pmovsxwd    m2, [r0]
+    pmovsxwd    m4, [r0 + 16]
+    movu        m3, [r1]
+    movu        m5, [r1 + 32]
+    pmulld      m2, m3
+    pmulld      m4, m5
+    paddd       m2, m0
+    paddd       m4, m0
+    psrad       m2, xm1
+    psrad       m4, xm1
+    packssdw    m2, m4
+    vpermq      m2, m2, 11011000b
+    movu        [r2], m2
+
+    add         r0, 32
+    add         r1, 64
+    add         r2, 32
+    dec         r3d
+    jnz         .part0
+    jmp         .end
+
+.skip:
+    sub         r4d, r5d        ; per - shift
+    movd        xm0, r4d
+
+.part1:
+    pmovsxwd    m2, [r0]
+    pmovsxwd    m4, [r0 + 16]
+    movu        m3, [r1]
+    movu        m5, [r1 + 32]
+    pmulld      m2, m3
+    pmulld      m4, m5
+    packssdw    m2, m4
+    vextracti128 xm4, m2, 1
+    pmovsxwd    m1, xm2
+    pmovsxwd    m2, xm4
+    pslld       m1, xm0
+    pslld       m2, xm0
+    packssdw    m1, m2
+    movu        [r2], m1
+
+    add         r0, 32
+    add         r1, 64
+    add         r2, 32
+    dec         r3d
+    jnz         .part1
+.end:
+    RET
+
+INIT_YMM avx2
+cglobal dequant_normal, 5,5,7
+    vpbroadcastd    m2, [pw_1]          ; m2 = word [1]
+    vpbroadcastd    m5, [pd_32767]      ; m5 = dword [32767]
+    vpbroadcastd    m6, [pd_n32768]     ; m6 = dword [-32768]
+%if HIGH_BIT_DEPTH
+    cmp             r3d, 32767
+    jle            .skip
+    shr             r3d, (BIT_DEPTH - 8)
+    sub             r4d, (BIT_DEPTH - 8)
+.skip:
+%endif
+    movd            xm0, r4d            ; m0 = shift
+    add             r4d, -1+16
+    bts             r3d, r4d
+
+    movd            xm1, r3d
+    vpbroadcastd    m1, xm1             ; m1 = dword [add scale]
+
+    ; m0 = shift
+    ; m1 = scale
+    ; m2 = word [1]
+    shr             r2d, 4
+.loop:
+    movu            m3, [r0]
+    punpckhwd       m4, m3, m2
+    punpcklwd       m3, m2
+    pmaddwd         m3, m1              ; m3 = dword (clipQCoef * scale + add)
+    pmaddwd         m4, m1
+    psrad           m3, xm0
+    psrad           m4, xm0
+    pminsd          m3, m5
+    pmaxsd          m3, m6
+    pminsd          m4, m5
+    pmaxsd          m4, m6
+    packssdw        m3, m4
+    mova            [r1 + 0 * mmsize/2], xm3
+    vextracti128    [r1 + 1 * mmsize/2], m3, 1
+
+    add             r0, mmsize
+    add             r1, mmsize
+
+    dec             r2d
+    jnz            .loop
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_4x4_sse2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal count_nonzero_4x4, 1,1,2
+    pxor            m0, m0
+
+    mova            m1, [r0 + 0]
+    packsswb        m1, [r0 + 16]
+    pcmpeqb         m1, m0
+    paddb           m1, [pb_1]
+
+    psadbw          m1, m0
+    pshufd          m0, m1, 2
+    paddd           m0, m1
+    movd            eax, m0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_4x4_avx2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal count_nonzero_4x4, 1,1,2
+    pxor            m0, m0
+    movu            m1, [r0]
+    pcmpeqw         m1, m0
+    pmovmskb        eax, m1
+    not             eax
+    popcnt          eax, eax
+    shr             eax, 1
+    RET
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_8x8_sse2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal count_nonzero_8x8, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_4]
+
+%rep 4
+    mova            m2, [r0 + 0]
+    packsswb        m2, [r0 + 16]
+    add             r0, 32
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+%endrep
+
+    psadbw          m1, m0
+    pshufd          m0, m1, 2
+    paddd           m0, m1
+    movd            eax, m0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_8x8_avx2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal count_nonzero_8x8, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_2]
+
+    mova            m2, [r0]
+    packsswb        m2, [r0 + 32]
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+
+    mova            m2, [r0 + 64]
+    packsswb        m2, [r0 + 96]
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+ 
+    psadbw          m1, m0
+    vextracti128    xm0, m1, 1
+    paddd           m0, m1
+    pshufd          m1, m0, 2
+    paddd           m0, m1
+    movd            eax, xm0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_16x16_sse2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal count_nonzero_16x16, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_16]
+
+%rep 16
+    mova            m2, [r0 + 0]
+    packsswb        m2, [r0 + 16]
+    add             r0, 32
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+%endrep
+
+    psadbw          m1, m0
+    pshufd          m0, m1, 2
+    paddd           m0, m1
+    movd            eax, m0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_16x16_avx2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal count_nonzero_16x16, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_8]
+
+%assign x 0
+%rep 8
+    mova            m2, [r0 + x]
+    packsswb        m2, [r0 + x + 32]
+%assign x x+64
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+%endrep 
+
+    psadbw          m1, m0
+    vextracti128    xm0, m1, 1
+    paddd           m0, m1
+    pshufd          m1, m0, 2
+    paddd           m0, m1
+    movd            eax, xm0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_32x32_sse2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal count_nonzero_32x32, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_64]
+
+%rep 64 
+    mova            m2, [r0 + 0]
+    packsswb        m2, [r0 + 16]
+    add             r0, 32
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+%endrep
+
+    psadbw          m1, m0
+    pshufd          m0, m1, 2
+    paddd           m0, m1
+    movd            eax, m0
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int x265_count_nonzero_32x32_avx2(const int16_t *quantCoeff);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal count_nonzero_32x32, 1,1,3
+    pxor            m0, m0
+    movu            m1, [pb_32]
+
+%assign x 0
+%rep 32
+    mova            m2, [r0 + x]
+    packsswb        m2, [r0 + x + 32]
+%assign x x+64
+    pcmpeqb         m2, m0
+    paddb           m1, m2
+%endrep 
+
+    psadbw          m1, m0
+    vextracti128    xm0, m1, 1
+    paddd           m0, m1
+    pshufd          m1, m0, 2
+    paddd           m0, m1
+    movd            eax, xm0
+    RET
+
+
+;-----------------------------------------------------------------------------------------------------------------------------------------------
+;void weight_pp(pixel *src, pixel *dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
+;-----------------------------------------------------------------------------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal weight_pp, 4,7,7
+%define correction      (14 - BIT_DEPTH)
+    mova        m6, [pw_pixel_max]
+    mov         r6d, r6m
+    mov         r4d, r4m
+    mov         r5d, r5m
+    shl         r6d, 16 - correction
+    or          r6d, r5d    ; assuming both (w0) and round are using maximum of 16 bits each.
+    movd        m0, r6d
+    pshufd      m0, m0, 0   ; m0 = [w0, round]
+    mov         r5d, r7m
+    sub         r5d, correction
+    movd        m1, r5d
+    movd        m2, r8m
+    pshufd      m2, m2, 0
+    mova        m5, [pw_1]
+    sub         r2d, r3d
+    add         r2d, r2d
+    shr         r3d, 4
+
+.loopH:
+    mov         r5d, r3d
+
+.loopW:
+    movu        m4, [r0]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, m1
+    paddd       m3, m2      ; TODO: we can put Offset into Round, but we have to analyze Dynamic Range before that.
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, m1
+    paddd       m4, m2
+
+    packusdw    m3, m4
+    pminuw      m3, m6
+    movu        [r1], m3
+
+    movu        m4, [r0 + mmsize]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, m1
+    paddd       m3, m2
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, m1
+    paddd       m4, m2
+
+    packusdw    m3, m4
+    pminuw      m3, m6
+    movu        [r1 + mmsize], m3
+
+    add         r0, 2 * mmsize
+    add         r1, 2 * mmsize
+
+    dec         r5d
+    jnz        .loopW
+
+    add         r0, r2
+    add         r1, r2
+
+    dec         r4d
+    jnz         .loopH
+    RET
+
+%else   ; end of (HIGH_BIT_DEPTH == 1)
+
+INIT_XMM sse4
+cglobal weight_pp, 6,7,6
+    shl         r5d, 6      ; m0 = [w0<<6]
+    mov         r6d, r6m
+    shl         r6d, 16
+    or          r6d, r5d    ; assuming both (w0<<6) and round are using maximum of 16 bits each.
+    movd        m0, r6d
+    pshufd      m0, m0, 0   ; m0 = [w0<<6, round]
+    movd        m1, r7m
+    movd        m2, r8m
+    pshufd      m2, m2, 0
+    mova        m5, [pw_1]
+    sub         r2d, r3d
+    shr         r3d, 4
+
+.loopH:
+    mov         r5d, r3d
+
+.loopW:
+    pmovzxbw    m4, [r0]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, m1
+    paddd       m3, m2
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, m1
+    paddd       m4, m2
+
+    packssdw    m3, m4
+    packuswb    m3, m3
+    movh        [r1], m3
+
+    pmovzxbw    m4, [r0 + 8]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, m1
+    paddd       m3, m2
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, m1
+    paddd       m4, m2
+
+    packssdw    m3, m4
+    packuswb    m3, m3
+    movh        [r1 + 8], m3
+
+    add         r0, 16
+    add         r1, 16
+
+    dec         r5d
+    jnz         .loopW
+
+    lea         r0, [r0 + r2]
+    lea         r1, [r1 + r2]
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endif  ; end of (HIGH_BIT_DEPTH == 0)
+
+
+%if HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal weight_pp, 6, 7, 7
+%define correction      (14 - BIT_DEPTH)
+    mov          r6d, r6m
+    shl          r6d, 16 - correction
+    or           r6d, r5d          ; assuming both w0 and round are using maximum of 16 bits each.
+
+    movd         xm0, r6d
+    vpbroadcastd m0, xm0
+
+    mov          r5d, r7m
+    sub          r5d, correction
+    movd         xm1, r5d
+    vpbroadcastd m2, r8m
+    mova         m5, [pw_1]
+    mova         m6, [pw_pixel_max]
+    add         r2d, r2d
+    add         r3d, r3d
+    sub          r2d, r3d
+    shr          r3d, 5
+
+.loopH:
+    mov          r5d, r3d
+
+.loopW:
+    movu        m4, [r0]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, xm1
+    paddd       m3, m2
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, xm1
+    paddd       m4, m2
+
+    packusdw    m3, m4
+    pminuw      m3, m6
+    movu        [r1], m3
+
+    add         r0, 32
+    add         r1, 32
+
+    dec         r5d
+    jnz         .loopW
+
+    lea         r0, [r0 + r2]
+    lea         r1, [r1 + r2]
+
+    dec         r4d
+    jnz         .loopH
+%undef correction
+    RET
+%else
+INIT_YMM avx2
+cglobal weight_pp, 6, 7, 6
+
+    shl          r5d, 6            ; m0 = [w0<<6]
+    mov          r6d, r6m
+    shl          r6d, 16
+    or           r6d, r5d          ; assuming both (w0<<6) and round are using maximum of 16 bits each.
+
+    movd         xm0, r6d
+    vpbroadcastd m0, xm0
+
+    movd         xm1, r7m
+    vpbroadcastd m2, r8m
+    mova         m5, [pw_1]
+    sub          r2d, r3d
+    shr          r3d, 4
+
+.loopH:
+    mov          r5d, r3d
+
+.loopW:
+    pmovzxbw    m4, [r0]
+    punpcklwd   m3, m4, m5
+    pmaddwd     m3, m0
+    psrad       m3, xm1
+    paddd       m3, m2
+
+    punpckhwd   m4, m5
+    pmaddwd     m4, m0
+    psrad       m4, xm1
+    paddd       m4, m2
+
+    packssdw    m3, m4
+    vextracti128 xm4, m3, 1
+    packuswb    xm3, xm4
+    movu        [r1], xm3
+
+    add         r0, 16
+    add         r1, 16
+
+    dec         r5d
+    jnz         .loopW
+
+    lea         r0, [r0 + r2]
+    lea         r1, [r1 + r2]
+
+    dec         r4d
+    jnz         .loopH
+    RET
+%endif
+;-------------------------------------------------------------------------------------------------------------------------------------------------
+;void weight_sp(int16_t *src, pixel *dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
+;-------------------------------------------------------------------------------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal weight_sp, 6,7,8
+    mova        m1, [pw_pixel_max]
+    mova        m2, [pw_1]
+    mov         r6d, r7m
+    shl         r6d, 16
+    or          r6d, r6m    ; assuming both (w0) and round are using maximum of 16 bits each.
+    movd        m3, r6d
+    pshufd      m3, m3, 0   ; m3 = [round w0]
+
+    movd        m4, r8m     ; m4 = [shift]
+    movd        m5, r9m
+    pshufd      m5, m5, 0   ; m5 = [offset]
+
+    ; correct row stride
+    add         r3d, r3d
+    add         r2d, r2d
+    mov         r6d, r4d
+    and         r6d, ~(mmsize / SIZEOF_PIXEL - 1)
+    sub         r3d, r6d
+    sub         r3d, r6d
+    sub         r2d, r6d
+    sub         r2d, r6d
+
+    ; generate partial width mask (MUST BE IN XMM0)
+    mov         r6d, r4d
+    and         r6d, (mmsize / SIZEOF_PIXEL - 1)
+    movd        m0, r6d
+    pshuflw     m0, m0, 0
+    punpcklqdq  m0, m0
+    pcmpgtw     m0, [pw_0_15]
+
+.loopH:
+    mov         r6d, r4d
+
+.loopW:
+    movu        m6, [r0]
+    paddw       m6, [pw_2000]
+
+    punpcklwd   m7, m6, m2
+    pmaddwd     m7, m3
+    psrad       m7, m4
+    paddd       m7, m5
+
+    punpckhwd   m6, m2
+    pmaddwd     m6, m3
+    psrad       m6, m4
+    paddd       m6, m5
+
+    packusdw    m7, m6
+    pminuw      m7, m1
+
+    sub         r6d, (mmsize / SIZEOF_PIXEL)
+    jl         .widthLess8
+    movu        [r1], m7
+    lea         r0, [r0 + mmsize]
+    lea         r1, [r1 + mmsize]
+    je         .nextH
+    jmp        .loopW
+
+.widthLess8:
+    movu        m6, [r1]
+    pblendvb    m6, m6, m7, m0
+    movu        [r1], m6
+
+.nextH:
+    add         r0, r2
+    add         r1, r3
+
+    dec         r5d
+    jnz         .loopH
+    RET
+
+%else   ; end of (HIGH_BIT_DEPTH == 1)
+
+INIT_XMM sse4
+%if ARCH_X86_64
+cglobal weight_sp, 6, 7+2, 7
+    %define tmp_r0      r7
+    %define tmp_r1      r8
+%else ; ARCH_X86_64 = 0
+cglobal weight_sp, 6, 7, 7, 0-(2*4)
+    %define tmp_r0      [(rsp + 0 * 4)]
+    %define tmp_r1      [(rsp + 1 * 4)]
+%endif ; ARCH_X86_64
+
+    movd        m0, r6m         ; m0 = [w0]
+
+    movd        m1, r7m         ; m1 = [round]
+    punpcklwd   m0, m1
+    pshufd      m0, m0, 0       ; m0 = [w0 round]
+
+    movd        m1, r8m         ; m1 = [shift]
+
+    movd        m2, r9m
+    pshufd      m2, m2, 0       ; m2 =[offset]
+
+    mova        m3, [pw_1]
+    mova        m4, [pw_2000]
+
+    add         r2d, r2d
+
+.loopH:
+    mov         r6d, r4d
+
+    ; save old src and dst
+    mov         tmp_r0, r0
+    mov         tmp_r1, r1
+.loopW:
+    movu        m5, [r0]
+    paddw       m5, m4
+
+    punpcklwd   m6,m5, m3
+    pmaddwd     m6, m0
+    psrad       m6, m1
+    paddd       m6, m2
+
+    punpckhwd   m5, m3
+    pmaddwd     m5, m0
+    psrad       m5, m1
+    paddd       m5, m2
+
+    packssdw    m6, m5
+    packuswb    m6, m6
+
+    sub         r6d, 8
+    jl          .width4
+    movh        [r1], m6
+    je          .nextH
+    add         r0, 16
+    add         r1, 8
+
+    jmp         .loopW
+
+.width4:
+    cmp         r6d, -4
+    jl          .width2
+    movd        [r1], m6
+    je          .nextH
+    add         r1, 4
+    pshufd      m6, m6, 1
+
+.width2:
+    pextrw      [r1], m6, 0
+
+.nextH:
+    mov         r0, tmp_r0
+    mov         r1, tmp_r1
+    lea         r0, [r0 + r2]
+    lea         r1, [r1 + r3]
+
+    dec         r5d
+    jnz         .loopH
+    RET
+%endif
+
+
+%if ARCH_X86_64 == 1
+%if HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal weight_sp, 6,7,9
+    mova                      m1, [pw_pixel_max]
+    mova                      m2, [pw_1]
+    mov                       r6d, r7m
+    shl                       r6d, 16
+    or                        r6d, r6m
+    movd                      xm3, r6d
+    vpbroadcastd              m3, xm3      ; m3 = [round w0]
+    movd                      xm4, r8m     ; m4 = [shift]
+    vpbroadcastd              m5, r9m      ; m5 = [offset]
+
+    ; correct row stride
+    add                       r3d, r3d
+    add                       r2d, r2d
+    mov                       r6d, r4d
+    and                       r6d, ~(mmsize / SIZEOF_PIXEL - 1)
+    sub                       r3d, r6d
+    sub                       r3d, r6d
+    sub                       r2d, r6d
+    sub                       r2d, r6d
+
+    ; generate partial width mask (MUST BE IN YMM0)
+    mov                       r6d, r4d
+    and                       r6d, (mmsize / SIZEOF_PIXEL - 1)
+    movd                      xm0, r6d
+    pshuflw                   m0, m0, 0
+    punpcklqdq                m0, m0
+    vinserti128               m0, m0, xm0, 1
+    pcmpgtw                   m0, [pw_0_15]
+
+.loopH:
+    mov                       r6d, r4d
+
+.loopW:
+    movu                      m6, [r0]
+    paddw                     m6, [pw_2000]
+
+    punpcklwd                 m7, m6, m2
+    pmaddwd                   m7, m3       ;(round w0)
+    psrad                     m7, xm4      ;(shift)
+    paddd                     m7, m5       ;(offset)
+
+    punpckhwd                 m6, m2
+    pmaddwd                   m6, m3
+    psrad                     m6, xm4
+    paddd                     m6, m5
+
+    packusdw                  m7, m6
+    pminuw                    m7, m1
+
+    sub                       r6d, (mmsize / SIZEOF_PIXEL)
+    jl                        .width14
+    movu                      [r1], m7
+    lea                       r0, [r0 + mmsize]
+    lea                       r1, [r1 + mmsize]
+    je                        .nextH
+    jmp                       .loopW
+
+.width14:
+    add                       r6d, 16
+    cmp                       r6d, 14
+    jl                        .width12
+    movu                      [r1], xm7
+    vextracti128              xm8, m7, 1
+    movq                      [r1 + 16], xm8
+    pextrd                    [r1 + 24], xm8, 2
+    je                        .nextH
+
+.width12:
+    cmp                       r6d, 12
+    jl                        .width10
+    movu                      [r1], xm7
+    vextracti128              xm8, m7, 1
+    movq                      [r1 + 16], xm8
+    je                        .nextH
+
+.width10:
+    cmp                       r6d, 10
+    jl                        .width8
+    movu                      [r1], xm7
+    vextracti128              xm8, m7, 1
+    movd                      [r1 + 16], xm8
+    je                        .nextH
+
+.width8:
+    cmp                       r6d, 8
+    jl                        .width6
+    movu                      [r1], xm7
+    je                        .nextH
+
+.width6
+    cmp                       r6d, 6
+    jl                        .width4
+    movq                      [r1], xm7
+    pextrd                    [r1 + 8], xm7, 2
+    je                        .nextH
+
+.width4:
+    cmp                       r6d, 4
+    jl                        .width2
+    movq                      [r1], xm7
+    je                        .nextH
+    add                       r1, 4
+    pshufd                    m6, m6, 1
+    je                        .nextH
+
+.width2:
+    movd                      [r1], xm7
+
+.nextH:
+    add                       r0, r2
+    add                       r1, r3
+
+    dec                       r5d
+    jnz                       .loopH
+    RET
+
+%else
+INIT_YMM avx2
+cglobal weight_sp, 6, 9, 7
+    mov             r7d, r7m
+    shl             r7d, 16
+    or              r7d, r6m
+    movd            xm0, r7d
+    vpbroadcastd    m0, xm0            ; m0 = times 8 dw w0, round
+    movd            xm1, r8m           ; m1 = [shift]
+    vpbroadcastd    m2, r9m            ; m2 = times 16 dw offset
+    vpbroadcastw    m3, [pw_1]
+    vpbroadcastw    m4, [pw_2000]
+
+    add             r2d, r2d            ; 2 * srcstride
+
+    mov             r7, r0
+    mov             r8, r1
+.loopH:
+    mov             r6d, r4d            ; width
+
+    ; save old src and dst
+    mov             r0, r7              ; src
+    mov             r1, r8              ; dst
+.loopW:
+    movu            m5, [r0]
+    paddw           m5, m4
+
+    punpcklwd       m6,m5, m3
+    pmaddwd         m6, m0
+    psrad           m6, xm1
+    paddd           m6, m2
+
+    punpckhwd       m5, m3
+    pmaddwd         m5, m0
+    psrad           m5, xm1
+    paddd           m5, m2
+
+    packssdw        m6, m5
+    packuswb        m6, m6
+    vpermq          m6, m6, 10001000b
+
+    sub             r6d, 16
+    jl              .width8
+    movu            [r1], xm6
+    je              .nextH
+    add             r0, 32
+    add             r1, 16
+    jmp             .loopW
+
+.width8:
+    add             r6d, 16
+    cmp             r6d, 8
+    jl              .width4
+    movq            [r1], xm6
+    je              .nextH
+    psrldq          m6, 8
+    sub             r6d, 8
+    add             r1, 8
+
+.width4:
+    cmp             r6d, 4
+    jl              .width2
+    movd            [r1], xm6
+    je              .nextH
+    add             r1, 4
+    pshufd          m6, m6, 1
+
+.width2:
+    pextrw          [r1], xm6, 0
+
+.nextH:
+    lea             r7, [r7 + r2]
+    lea             r8, [r8 + r3]
+
+    dec             r5d
+    jnz             .loopH
+    RET
+%endif
+%endif
+
+;-----------------------------------------------------------------
+; void transpose_4x4(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+INIT_XMM sse2
+cglobal transpose4, 3, 3, 4, dest, src, stride
+%if HIGH_BIT_DEPTH == 1
+    add          r2,    r2
+    movh         m0,    [r1]
+    movh         m1,    [r1 + r2]
+    movh         m2,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movh         m3,    [r1 + r2]
+    punpcklwd    m0,    m1
+    punpcklwd    m2,    m3
+    punpckhdq    m1,    m0,    m2
+    punpckldq    m0,    m2
+    movu         [r0],       m0
+    movu         [r0 + 16],  m1
+%else ;HIGH_BIT_DEPTH == 0
+    movd         m0,    [r1]
+    movd         m1,    [r1 + r2]
+    movd         m2,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movd         m3,    [r1 + r2]
+
+    punpcklbw    m0,    m1
+    punpcklbw    m2,    m3
+    punpcklwd    m0,    m2
+    movu         [r0],    m0
+%endif
+    RET
+
+;-----------------------------------------------------------------
+; void transpose_8x8(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH == 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose8, 3, 5, 5
+    add          r2, r2
+    lea          r3, [3 * r2]
+    lea          r4, [r1 + 4 * r2]
+    movu         xm0, [r1]
+    vinserti128  m0, m0, [r4], 1
+    movu         xm1, [r1 + r2]
+    vinserti128  m1, m1, [r4 + r2], 1
+    movu         xm2, [r1 + 2 * r2]
+    vinserti128  m2, m2, [r4 + 2 * r2], 1
+    movu         xm3, [r1 + r3]
+    vinserti128  m3, m3, [r4 + r3], 1
+
+    punpcklwd    m4, m0, m1          ;[1 - 4][row1row2;row5row6]
+    punpckhwd    m0, m1              ;[5 - 8][row1row2;row5row6]
+
+    punpcklwd    m1, m2, m3          ;[1 - 4][row3row4;row7row8]
+    punpckhwd    m2, m3              ;[5 - 8][row3row4;row7row8]
+
+    punpckldq    m3, m4, m1          ;[1 - 2][row1row2row3row4;row5row6row7row8]
+    punpckhdq    m4, m1              ;[3 - 4][row1row2row3row4;row5row6row7row8]
+
+    punpckldq    m1, m0, m2          ;[5 - 6][row1row2row3row4;row5row6row7row8]
+    punpckhdq    m0, m2              ;[7 - 8][row1row2row3row4;row5row6row7row8]
+
+    vpermq       m3, m3, 0xD8        ;[1 ; 2][row1row2row3row4row5row6row7row8]
+    vpermq       m4, m4, 0xD8        ;[3 ; 4][row1row2row3row4row5row6row7row8]
+    vpermq       m1, m1, 0xD8        ;[5 ; 6][row1row2row3row4row5row6row7row8]
+    vpermq       m0, m0, 0xD8        ;[7 ; 8][row1row2row3row4row5row6row7row8]
+
+    movu         [r0 + 0 * 32], m3
+    movu         [r0 + 1 * 32], m4
+    movu         [r0 + 2 * 32], m1
+    movu         [r0 + 3 * 32], m0
+    RET
+%endif
+
+INIT_XMM sse2
+%macro TRANSPOSE_4x4 1
+    movh         m0,    [r1]
+    movh         m1,    [r1 + r2]
+    movh         m2,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movh         m3,    [r1 + r2]
+    punpcklwd    m0,    m1
+    punpcklwd    m2,    m3
+    punpckhdq    m1,    m0,    m2
+    punpckldq    m0,    m2
+    movh         [r0],             m0
+    movhps       [r0 + %1],        m0
+    movh         [r0 + 2 * %1],    m1
+    lea            r0,               [r0 + 2 * %1]
+    movhps         [r0 + %1],        m1
+%endmacro
+cglobal transpose8_internal
+    TRANSPOSE_4x4 r5
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r3 + 8]
+    TRANSPOSE_4x4 r5
+    lea    r1,    [r1 + 2 * r2]
+    neg    r2
+    lea    r1,    [r1 + r2 * 8 + 8]
+    neg    r2
+    lea    r0,    [r3 + 4 * r5]
+    TRANSPOSE_4x4 r5
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r3 + 8 + 4 * r5]
+    TRANSPOSE_4x4 r5
+    ret
+cglobal transpose8, 3, 6, 4, dest, src, stride
+    add    r2,    r2
+    mov    r3,    r0
+    mov    r5,    16
+    call   transpose8_internal
+    RET
+%else ;HIGH_BIT_DEPTH == 0
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose8, 3, 4, 4
+    lea          r3, [r2 * 3]
+    movq         xm0, [r1]
+    movhps       xm0, [r1 + 2 * r2]
+    movq         xm1, [r1 + r2]
+    movhps       xm1, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+    movq         xm2, [r1]
+    movhps       xm2, [r1 + 2 * r2]
+    movq         xm3, [r1 + r2]
+    movhps       xm3, [r1 + r3]
+
+    vinserti128  m0, m0, xm2, 1             ;[row1 row3 row5 row7]
+    vinserti128  m1, m1, xm3, 1             ;[row2 row4 row6 row8]
+
+    punpcklbw    m2, m0, m1                 ;[1 - 8; 1 - 8][row1row2; row5row6]
+    punpckhbw    m0, m1                     ;[1 - 8; 1 - 8][row3row4; row7row8]
+
+    punpcklwd    m1, m2, m0                 ;[1 - 4; 1 - 4][row1row2row3row4; row5row6row7row8]
+    punpckhwd    m2, m0                     ;[5 - 8; 5 - 8][row1row2row3row4; row5row6row7row8]
+
+    mova         m0, [trans8_shuf]
+
+    vpermd       m1, m0, m1                 ;[1 - 2; 3 - 4][row1row2row3row4row5row6row7row8]
+    vpermd       m2, m0, m2                 ;[4 - 5; 6 - 7][row1row2row3row4row5row6row7row8]
+
+    movu         [r0], m1
+    movu         [r0 + 32], m2
+    RET
+%endif
+
+INIT_XMM sse2
+cglobal transpose8, 3, 5, 8, dest, src, stride
+    lea          r3,    [2 * r2]
+    lea          r4,    [3 * r2]
+    movh         m0,    [r1]
+    movh         m1,    [r1 + r2]
+    movh         m2,    [r1 + r3]
+    movh         m3,    [r1 + r4]
+    movh         m4,    [r1 + 4 * r2]
+    lea          r1,    [r1 + 4 * r2]
+    movh         m5,    [r1 + r2]
+    movh         m6,    [r1 + r3]
+    movh         m7,    [r1 + r4]
+
+    punpcklbw    m0,    m1
+    punpcklbw    m2,    m3
+    punpcklbw    m4,    m5
+    punpcklbw    m6,    m7
+
+    punpckhwd    m1,    m0,    m2
+    punpcklwd    m0,    m2
+    punpckhwd    m5,    m4,    m6
+    punpcklwd    m4,    m6
+    punpckhdq    m2,    m0,    m4
+    punpckldq    m0,    m4
+    punpckhdq    m3,    m1,    m5
+    punpckldq    m1,    m5
+
+    movu         [r0],         m0
+    movu         [r0 + 16],    m2
+    movu         [r0 + 32],    m1
+    movu         [r0 + 48],    m3
+    RET
+%endif
+
+%macro TRANSPOSE_8x8 1
+
+    movh         m0,    [r1]
+    movh         m1,    [r1 + r2]
+    movh         m2,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movh         m3,    [r1 + r2]
+    movh         m4,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movh         m5,    [r1 + r2]
+    movh         m6,    [r1 + 2 * r2]
+    lea          r1,    [r1 + 2 * r2]
+    movh         m7,    [r1 + r2]
+
+    punpcklbw    m0,    m1
+    punpcklbw    m2,    m3
+    punpcklbw    m4,    m5
+    punpcklbw    m6,    m7
+
+    punpckhwd    m1,    m0,    m2
+    punpcklwd    m0,    m2
+    punpckhwd    m5,    m4,    m6
+    punpcklwd    m4,    m6
+    punpckhdq    m2,    m0,    m4
+    punpckldq    m0,    m4
+    punpckhdq    m3,    m1,    m5
+    punpckldq    m1,    m5
+
+    movh           [r0],             m0
+    movhps         [r0 + %1],        m0
+    movh           [r0 + 2 * %1],    m2
+    lea            r0,               [r0 + 2 * %1]
+    movhps         [r0 + %1],        m2
+    movh           [r0 + 2 * %1],    m1
+    lea            r0,               [r0 + 2 * %1]
+    movhps         [r0 + %1],        m1
+    movh           [r0 + 2 * %1],    m3
+    lea            r0,               [r0 + 2 * %1]
+    movhps         [r0 + %1],        m3
+
+%endmacro
+
+
+;-----------------------------------------------------------------
+; void transpose_16x16(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH == 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose16x8_internal
+    movu         m0, [r1]
+    movu         m1, [r1 + r2]
+    movu         m2, [r1 + 2 * r2]
+    movu         m3, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+
+    movu         m4, [r1]
+    movu         m5, [r1 + r2]
+    movu         m6, [r1 + 2 * r2]
+    movu         m7, [r1 + r3]
+
+    punpcklwd    m8, m0, m1                 ;[1 - 4; 9 - 12][1 2]
+    punpckhwd    m0, m1                     ;[5 - 8; 13 -16][1 2]
+
+    punpcklwd    m1, m2, m3                 ;[1 - 4; 9 - 12][3 4]
+    punpckhwd    m2, m3                     ;[5 - 8; 13 -16][3 4]
+
+    punpcklwd    m3, m4, m5                 ;[1 - 4; 9 - 12][5 6]
+    punpckhwd    m4, m5                     ;[5 - 8; 13 -16][5 6]
+
+    punpcklwd    m5, m6, m7                 ;[1 - 4; 9 - 12][7 8]
+    punpckhwd    m6, m7                     ;[5 - 8; 13 -16][7 8]
+
+    punpckldq    m7, m8, m1                 ;[1 - 2; 9 -  10][1 2 3 4]
+    punpckhdq    m8, m1                     ;[3 - 4; 11 - 12][1 2 3 4]
+
+    punpckldq    m1, m3, m5                 ;[1 - 2; 9 -  10][5 6 7 8]
+    punpckhdq    m3, m5                     ;[3 - 4; 11 - 12][5 6 7 8]
+
+    punpckldq    m5, m0, m2                 ;[5 - 6; 13 - 14][1 2 3 4]
+    punpckhdq    m0, m2                     ;[7 - 8; 15 - 16][1 2 3 4]
+
+    punpckldq    m2, m4, m6                 ;[5 - 6; 13 - 14][5 6 7 8]
+    punpckhdq    m4, m6                     ;[7 - 8; 15 - 16][5 6 7 8]
+
+    punpcklqdq   m6, m7, m1                 ;[1 ; 9 ][1 2 3 4 5 6 7 8]
+    punpckhqdq   m7, m1                     ;[2 ; 10][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m1, m8, m3                 ;[3 ; 11][1 2 3 4 5 6 7 8]
+    punpckhqdq   m8, m3                     ;[4 ; 12][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m3, m5, m2                 ;[5 ; 13][1 2 3 4 5 6 7 8]
+    punpckhqdq   m5, m2                     ;[6 ; 14][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m2, m0, m4                 ;[7 ; 15][1 2 3 4 5 6 7 8]
+    punpckhqdq   m0, m4                     ;[8 ; 16][1 2 3 4 5 6 7 8]
+
+    movu         [r0 + 0 * 32], xm6
+    vextracti128 [r0 + 8 * 32], m6, 1
+    movu         [r0 + 1  * 32], xm7
+    vextracti128 [r0 + 9  * 32], m7, 1
+    movu         [r0 + 2  * 32], xm1
+    vextracti128 [r0 + 10 * 32], m1, 1
+    movu         [r0 + 3  * 32], xm8
+    vextracti128 [r0 + 11 * 32], m8, 1
+    movu         [r0 + 4  * 32], xm3
+    vextracti128 [r0 + 12 * 32], m3, 1
+    movu         [r0 + 5  * 32], xm5
+    vextracti128 [r0 + 13 * 32], m5, 1
+    movu         [r0 + 6  * 32], xm2
+    vextracti128 [r0 + 14 * 32], m2, 1
+    movu         [r0 + 7  * 32], xm0
+    vextracti128 [r0 + 15 * 32], m0, 1
+    ret
+
+cglobal transpose16, 3, 4, 9
+    add          r2, r2
+    lea          r3, [r2 * 3]
+    call         transpose16x8_internal
+    lea          r1, [r1 + 4 * r2]
+    add          r0, 16
+    call         transpose16x8_internal
+    RET
+%endif
+INIT_XMM sse2
+cglobal transpose16, 3, 7, 4, dest, src, stride
+    add    r2,    r2
+    mov    r3,    r0
+    mov    r4,    r1
+    mov    r5,    32
+    mov    r6,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r4 + 16]
+    lea    r0,    [r6 + 8 * r5]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * r5 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    RET
+%else ;HIGH_BIT_DEPTH == 0
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose16, 3, 5, 9
+    lea          r3, [r2 * 3]
+    lea          r4, [r1 + 8 * r2]
+
+    movu         xm0, [r1]
+    movu         xm1, [r1 + r2]
+    movu         xm2, [r1 + 2 * r2]
+    movu         xm3, [r1 + r3]
+    vinserti128  m0,  m0, [r4], 1
+    vinserti128  m1,  m1, [r4 + r2], 1
+    vinserti128  m2,  m2, [r4 + 2 * r2], 1
+    vinserti128  m3,  m3, [r4 + r3], 1
+    lea          r1,  [r1 + 4 * r2]
+    lea          r4,  [r4 + 4 * r2]
+
+    movu         xm4, [r1]
+    movu         xm5, [r1 + r2]
+    movu         xm6, [r1 + 2 * r2]
+    movu         xm7, [r1 + r3]
+    vinserti128  m4,  m4, [r4], 1
+    vinserti128  m5,  m5, [r4 + r2], 1
+    vinserti128  m6,  m6, [r4 + 2 * r2], 1
+    vinserti128  m7,  m7, [r4 + r3], 1
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 1 - 8 ][1 2  9 10]
+    punpckhbw    m0, m1                     ;[9 - 16; 9 - 16][1 2  9 10]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 1 - 8 ][3 4 11 12]
+    punpckhbw    m2, m3                     ;[9 - 16; 9 - 16][3 4 11 12]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 1 - 8 ][5 6 13 14]
+    punpckhbw    m4, m5                     ;[9 - 16; 9 - 16][5 6 13 14]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 1 - 8 ][7 8 15 16]
+    punpckhbw    m6, m7                     ;[9 - 16; 9 - 16][7 8 15 16]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 1 - 4][1 2 3 4 9 10 11 12]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 5 - 8][1 2 3 4 9 10 11 12]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 1 - 4][5 6 7 8 13 14 15 16]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 5 - 8][5 6 7 8 13 14 15 16]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 9 -  12][1 2 3 4 9 10 11 12]
+    punpckhwd    m0, m2                     ;[13- 16; 13 - 16][1 2 3 4 9 10 11 12]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 9 -  12][5 6 7 8 13 14 15 16]
+    punpckhwd    m4, m6                     ;[13- 16; 13 - 16][5 6 7 8 13 14 15 16]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 1 - 2][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 3 - 4][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 5 - 6][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 7 - 8][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 9 -  10][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m5, m2                     ;[11- 12; 11 - 12][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 13 - 14][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m0, m4                     ;[15- 16; 15 - 16][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    vpermq       m6, m6, 0xD8
+    vpermq       m7, m7, 0xD8
+    vpermq       m1, m1, 0xD8
+    vpermq       m8, m8, 0xD8
+    vpermq       m3, m3, 0xD8
+    vpermq       m5, m5, 0xD8
+    vpermq       m2, m2, 0xD8
+    vpermq       m0, m0, 0xD8
+
+    movu         [r0 + 0 *  16], m6
+    movu         [r0 + 2 *  16], m7
+    movu         [r0 + 4 *  16], m1
+    movu         [r0 + 6 *  16], m8
+    movu         [r0 + 8 *  16], m3
+    movu         [r0 + 10 * 16], m5
+    movu         [r0 + 12 * 16], m2
+    movu         [r0 + 14 * 16], m0
+    RET
+%endif
+INIT_XMM sse2
+cglobal transpose16, 3, 5, 8, dest, src, stride
+    mov    r3,    r0
+    mov    r4,    r1
+    TRANSPOSE_8x8 16
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r3 + 8]
+    TRANSPOSE_8x8 16
+    lea    r1,    [r4 + 8]
+    lea    r0,    [r3 + 8 * 16]
+    TRANSPOSE_8x8 16
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r3 + 8 * 16 + 8]
+    TRANSPOSE_8x8 16
+    RET
+%endif
+
+cglobal transpose16_internal
+    TRANSPOSE_8x8 r6
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r5 + 8]
+    TRANSPOSE_8x8 r6
+    lea    r1,    [r1 + 2 * r2]
+    neg    r2
+    lea    r1,    [r1 + r2 * 8]
+    lea    r1,    [r1 + r2 * 8 + 8]
+    neg    r2
+    lea    r0,    [r5 + 8 * r6]
+    TRANSPOSE_8x8 r6
+    lea    r1,    [r1 + 2 * r2]
+    lea    r0,    [r5 + 8 * r6 + 8]
+    TRANSPOSE_8x8 r6
+    ret
+
+;-----------------------------------------------------------------
+; void transpose_32x32(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH == 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose8x32_internal
+    movu         m0, [r1]
+    movu         m1, [r1 + 32]
+    movu         m2, [r1 + r2]
+    movu         m3, [r1 + r2 + 32]
+    movu         m4, [r1 + 2 * r2]
+    movu         m5, [r1 + 2 * r2 + 32]
+    movu         m6, [r1 + r3]
+    movu         m7, [r1 + r3 + 32]
+    lea          r1, [r1 + 4 * r2]
+
+    punpcklwd    m8, m0, m2               ;[1 - 4;  9 - 12][1 2]
+    punpckhwd    m0, m2                   ;[5 - 8; 13 - 16][1 2]
+
+    punpcklwd    m2, m4, m6               ;[1 - 4;  9 - 12][3 4]
+    punpckhwd    m4, m6                   ;[5 - 8; 13 - 16][3 4]
+
+    punpcklwd    m6, m1, m3               ;[17 - 20; 25 - 28][1 2]
+    punpckhwd    m1, m3                   ;[21 - 24; 29 - 32][1 2]
+
+    punpcklwd    m3, m5, m7               ;[17 - 20; 25 - 28][3 4]
+    punpckhwd    m5, m7                   ;[21 - 24; 29 - 32][3 4]
+
+    punpckldq    m7, m8, m2               ;[1 - 2;  9 - 10][1 2 3 4]
+    punpckhdq    m8, m2                   ;[3 - 4; 11 - 12][1 2 3 4]
+
+    punpckldq    m2, m0, m4               ;[5 - 6; 13 - 14][1 2 3 4]
+    punpckhdq    m0, m4                   ;[7 - 8; 15 - 16][1 2 3 4]
+
+    punpckldq    m4, m6, m3               ;[17 - 18; 25 - 26][1 2 3 4]
+    punpckhdq    m6, m3                   ;[19 - 20; 27 - 28][1 2 3 4]
+
+    punpckldq    m3, m1, m5               ;[21 - 22; 29 - 30][1 2 3 4]
+    punpckhdq    m1, m5                   ;[23 - 24; 31 - 32][1 2 3 4]
+
+    movq         [r0 + 0  * 64], xm7
+    movhps       [r0 + 1  * 64], xm7
+    vextracti128 xm5, m7, 1
+    movq         [r0 + 8 * 64], xm5
+    movhps       [r0 + 9 * 64], xm5
+
+    movu         m7,  [r1]
+    movu         m9,  [r1 + 32]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + r2 + 32]
+    movu         m12, [r1 + 2 * r2]
+    movu         m13, [r1 + 2 * r2 + 32]
+    movu         m14, [r1 + r3]
+    movu         m15, [r1 + r3 + 32]
+
+    punpcklwd    m5, m7, m10              ;[1 - 4;  9 - 12][5 6]
+    punpckhwd    m7, m10                  ;[5 - 8; 13 - 16][5 6]
+
+    punpcklwd    m10, m12, m14            ;[1 - 4;  9 - 12][7 8]
+    punpckhwd    m12, m14                 ;[5 - 8; 13 - 16][7 8]
+
+    punpcklwd    m14, m9, m11             ;[17 - 20; 25 - 28][5 6]
+    punpckhwd    m9, m11                  ;[21 - 24; 29 - 32][5 6]
+
+    punpcklwd    m11, m13, m15            ;[17 - 20; 25 - 28][7 8]
+    punpckhwd    m13, m15                 ;[21 - 24; 29 - 32][7 8]
+
+    punpckldq    m15, m5, m10             ;[1 - 2;  9 - 10][5 6 7 8]
+    punpckhdq    m5, m10                  ;[3 - 4; 11 - 12][5 6 7 8]
+
+    punpckldq    m10, m7, m12             ;[5 - 6; 13 - 14][5 6 7 8]
+    punpckhdq    m7, m12                  ;[7 - 8; 15 - 16][5 6 7 8]
+
+    punpckldq    m12, m14, m11            ;[17 - 18; 25 - 26][5 6 7 8]
+    punpckhdq    m14, m11                 ;[19 - 20; 27 - 28][5 6 7 8]
+
+    punpckldq    m11, m9, m13             ;[21 - 22; 29 - 30][5 6 7 8]
+    punpckhdq    m9, m13                  ;[23 - 24; 31 - 32][5 6 7 8]
+
+    movq         [r0 + 0 * 64 + 8], xm15
+    movhps       [r0 + 1 * 64 + 8], xm15
+    vextracti128 xm13, m15, 1
+    movq         [r0 + 8 * 64 + 8], xm13
+    movhps       [r0 + 9 * 64 + 8], xm13
+
+    punpcklqdq   m13, m8, m5              ;[3 ; 11][1 2 3 4 5 6 7 8]
+    punpckhqdq   m8, m5                   ;[4 ; 12][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m5, m2, m10              ;[5 ; 13][1 2 3 4 5 6 7 8]
+    punpckhqdq   m2, m10                  ;[6 ; 14][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m10, m0, m7              ;[7 ; 15][1 2 3 4 5 6 7 8]
+    punpckhqdq   m0, m7                   ;[8 ; 16][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m7, m4, m12              ;[17 ; 25][1 2 3 4 5 6 7 8]
+    punpckhqdq   m4, m12                  ;[18 ; 26][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m12, m6, m14             ;[19 ; 27][1 2 3 4 5 6 7 8]
+    punpckhqdq   m6, m14                  ;[20 ; 28][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m14, m3, m11             ;[21 ; 29][1 2 3 4 5 6 7 8]
+    punpckhqdq   m3, m11                  ;[22 ; 30][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m11, m1, m9              ;[23 ; 31][1 2 3 4 5 6 7 8]
+    punpckhqdq   m1, m9                   ;[24 ; 32][1 2 3 4 5 6 7 8]
+
+    movu         [r0 + 2  * 64], xm13
+    vextracti128 [r0 + 10 * 64], m13, 1
+
+    movu         [r0 + 3  * 64], xm8
+    vextracti128 [r0 + 11 * 64], m8, 1
+
+    movu         [r0 + 4  * 64], xm5
+    vextracti128 [r0 + 12 * 64], m5, 1
+
+    movu         [r0 + 5  * 64], xm2
+    vextracti128 [r0 + 13 * 64], m2, 1
+
+    movu         [r0 + 6  * 64], xm10
+    vextracti128 [r0 + 14 * 64], m10, 1
+
+    movu         [r0 + 7  * 64], xm0
+    vextracti128 [r0 + 15 * 64], m0, 1
+
+    movu         [r0 + 16 * 64], xm7
+    vextracti128 [r0 + 24 * 64], m7, 1
+
+    movu         [r0 + 17 * 64], xm4
+    vextracti128 [r0 + 25 * 64], m4, 1
+
+    movu         [r0 + 18 * 64], xm12
+    vextracti128 [r0 + 26 * 64], m12, 1
+
+    movu         [r0 + 19 * 64], xm6
+    vextracti128 [r0 + 27 * 64], m6, 1
+
+    movu         [r0 + 20 * 64], xm14
+    vextracti128 [r0 + 28 * 64], m14, 1
+
+    movu         [r0 + 21 * 64], xm3
+    vextracti128 [r0 + 29 * 64], m3, 1
+
+    movu         [r0 + 22 * 64], xm11
+    vextracti128 [r0 + 30 * 64], m11, 1
+
+    movu         [r0 + 23 * 64], xm1
+    vextracti128 [r0 + 31 * 64], m1, 1
+    ret
+
+cglobal transpose32, 3, 4, 16
+    add          r2, r2
+    lea          r3, [r2 * 3]
+    call         transpose8x32_internal
+    add          r0, 16
+    lea          r1, [r1 + 4 * r2]
+    call         transpose8x32_internal
+    add          r0, 16
+    lea          r1, [r1 + 4 * r2]
+    call         transpose8x32_internal
+    add          r0, 16
+    lea          r1, [r1 + 4 * r2]
+    call         transpose8x32_internal
+    RET
+%endif
+INIT_XMM sse2
+cglobal transpose32, 3, 7, 4, dest, src, stride
+    add    r2,    r2
+    mov    r3,    r0
+    mov    r4,    r1
+    mov    r5,    64
+    mov    r6,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r4 + 16]
+    lea    r0,    [r6 + 8 * 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 64 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 64 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 64 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r4 + 32]
+    lea    r0,    [r6 + 16 * 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 64 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 64 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 64 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r4 + 48]
+    lea    r0,    [r6 + 24 * 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 64 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 64 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 64 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    RET
+%else ;HIGH_BIT_DEPTH == 0
+INIT_XMM sse2
+cglobal transpose32, 3, 7, 8, dest, src, stride
+    mov    r3,    r0
+    mov    r4,    r1
+    mov    r5,    r0
+    mov    r6,    32
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r4 + 16]
+    lea    r0,    [r3 + 16 * 32]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16 * 32 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    RET
+
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose32, 3, 5, 16
+    lea          r3, [r2 * 3]
+    mov          r4d, 2
+
+.loop:
+    movu         m0, [r1]
+    movu         m1, [r1 + r2]
+    movu         m2, [r1 + 2 * r2]
+    movu         m3, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+
+    movu         m4, [r1]
+    movu         m5, [r1 + r2]
+    movu         m6, [r1 + 2 * r2]
+    movu         m7, [r1 + r3]
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 17 - 24][1 2]
+    punpckhbw    m0, m1                     ;[9 - 16; 25 - 32][1 2]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 17 - 24][3 4]
+    punpckhbw    m2, m3                     ;[9 - 16; 25 - 32][3 4]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 17 - 24][5 6]
+    punpckhbw    m4, m5                     ;[9 - 16; 25 - 32][5 6]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 17 - 24][7 8]
+    punpckhbw    m6, m7                     ;[9 - 16; 25 - 32][7 8]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 17 - 20][1 2 3 4]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 20 - 24][1 2 3 4]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 17 - 20][5 6 7 8]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 20 - 24][5 6 7 8]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 25 - 28][1 2 3 4]
+    punpckhwd    m0, m2                     ;[13- 15; 29 - 32][1 2 3 4]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 25 - 28][5 6 7 8]
+    punpckhwd    m4, m6                     ;[13- 15; 29 - 32][5 6 7 8]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 17 - 18][1 2 3 4 5 6 7 8]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 19 - 20][1 2 3 4 5 6 7 8]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 21 - 22][1 2 3 4 5 6 7 8]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 23 - 24][1 2 3 4 5 6 7 8]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 25 - 26][1 2 3 4 5 6 7 8]
+    punpckhdq    m5, m2                     ;[11- 12; 27 - 28][1 2 3 4 5 6 7 8]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 29 - 30][1 2 3 4 5 6 7 8]
+    punpckhdq    m0, m4                     ;[15- 16; 31 - 32][1 2 3 4 5 6 7 8]
+
+    movq         [r0 + 0  * 32], xm6
+    movhps       [r0 + 1  * 32], xm6
+    vextracti128 xm4, m6, 1
+    movq         [r0 + 16 * 32], xm4
+    movhps       [r0 + 17 * 32], xm4
+
+    lea          r1,  [r1 + 4 * r2]
+    movu         m9,  [r1]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + 2 * r2]
+    movu         m12, [r1 + r3]
+    lea          r1,  [r1 + 4 * r2]
+
+    movu         m13, [r1]
+    movu         m14, [r1 + r2]
+    movu         m15, [r1 + 2 * r2]
+    movu         m6,  [r1 + r3]
+
+    punpcklbw    m4, m9, m10                ;[1 - 8 ; 17 - 24][9 10]
+    punpckhbw    m9, m10                    ;[9 - 16; 25 - 32][9 10]
+
+    punpcklbw    m10, m11, m12              ;[1 - 8 ; 17 - 24][11 12]
+    punpckhbw    m11, m12                   ;[9 - 16; 25 - 32][11 12]
+
+    punpcklbw    m12, m13, m14              ;[1 - 8 ; 17 - 24][13 14]
+    punpckhbw    m13, m14                   ;[9 - 16; 25 - 32][13 14]
+
+    punpcklbw    m14, m15, m6               ;[1 - 8 ; 17 - 24][15 16]
+    punpckhbw    m15, m6                    ;[9 - 16; 25 - 32][15 16]
+
+    punpcklwd    m6, m4, m10                ;[1 - 4 ; 17 - 20][9 10 11 12]
+    punpckhwd    m4, m10                    ;[5 - 8 ; 20 - 24][9 10 11 12]
+
+    punpcklwd    m10, m12, m14              ;[1 - 4 ; 17 - 20][13 14 15 16]
+    punpckhwd    m12, m14                   ;[5 - 8 ; 20 - 24][13 14 15 16]
+
+    punpcklwd    m14, m9, m11               ;[9 - 12; 25 - 28][9 10 11 12]
+    punpckhwd    m9, m11                    ;[13- 16; 29 - 32][9 10 11 12]
+
+    punpcklwd    m11, m13, m15              ;[9 - 12; 25 - 28][13 14 15 16]
+    punpckhwd    m13, m15                   ;[13- 16; 29 - 32][13 14 15 16]
+
+    punpckldq    m15, m6, m10               ;[1 - 2 ; 17 - 18][9 10 11 12 13 14 15 16]
+    punpckhdq    m6, m10                    ;[3 - 4 ; 19 - 20][9 10 11 12 13 14 15 16]
+
+    punpckldq    m10, m4, m12               ;[5 - 6 ; 21 - 22][9 10 11 12 13 14 15 16]
+    punpckhdq    m4, m12                    ;[7 - 8 ; 23 - 24][9 10 11 12 13 14 15 16]
+
+    punpckldq    m12, m14, m11              ;[9 - 10; 25 - 26][9 10 11 12 13 14 15 16]
+    punpckhdq    m14, m11                   ;[11- 12; 27 - 28][9 10 11 12 13 14 15 16]
+
+    punpckldq    m11, m9, m13               ;[13- 14; 29 - 30][9 10 11 12 13 14 15 16]
+    punpckhdq    m9, m13                    ;[15- 16; 31 - 32][9 10 11 12 13 14 15 16]
+
+
+    punpcklqdq   m13, m7, m6                ;[3 ; 19][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m7, m6                     ;[4 ; 20][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m6, m1, m10                ;[5 ; 21][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m1, m10                    ;[6 ; 22][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m10, m8, m4                ;[7 ; 23][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m8, m4                     ;[8 ; 24][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m4, m3, m12                ;[9 ; 25][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m3, m12                    ;[10; 26][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m12, m5, m14               ;[11; 27][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m5, m14                    ;[12; 28][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m14, m2, m11               ;[13; 29][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m2, m11                    ;[14; 30][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m11, m0, m9                ;[15; 31][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m0, m9                     ;[16; 32][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    movq         [r0 + 0  * 32 + 8], xm15
+    movhps       [r0 + 1  * 32 + 8], xm15
+    vextracti128 xm9, m15, 1
+    movq         [r0 + 16 * 32 + 8], xm9
+    movhps       [r0 + 17 * 32 + 8], xm9
+
+    movu         [r0 + 2  * 32], xm13
+    vextracti128 [r0 + 18 * 32], m13, 1
+
+    movu         [r0 + 3  * 32], xm7
+    vextracti128 [r0 + 19 * 32], m7, 1
+
+    movu         [r0 + 4  * 32], xm6
+    vextracti128 [r0 + 20 * 32], m6, 1
+
+    movu         [r0 + 5  * 32], xm1
+    vextracti128 [r0 + 21 * 32], m1, 1
+
+    movu         [r0 + 6  * 32], xm10
+    vextracti128 [r0 + 22 * 32], m10, 1
+
+    movu         [r0 + 7  * 32], xm8
+    vextracti128 [r0 + 23 * 32], m8, 1
+
+    movu         [r0 + 8  * 32], xm4
+    vextracti128 [r0 + 24 * 32], m4, 1
+
+    movu         [r0 + 9  * 32], xm3
+    vextracti128 [r0 + 25 * 32], m3, 1
+
+    movu         [r0 + 10 * 32], xm12
+    vextracti128 [r0 + 26 * 32], m12, 1
+
+    movu         [r0 + 11 * 32], xm5
+    vextracti128 [r0 + 27 * 32], m5, 1
+
+    movu         [r0 + 12 * 32], xm14
+    vextracti128 [r0 + 28 * 32], m14, 1
+
+    movu         [r0 + 13 * 32], xm2
+    vextracti128 [r0 + 29 * 32], m2, 1
+
+    movu         [r0 + 14 * 32], xm11
+    vextracti128 [r0 + 30 * 32], m11, 1
+
+    movu         [r0 + 15 * 32], xm0
+    vextracti128 [r0 + 31 * 32], m0, 1
+
+    add          r0, 16
+    lea          r1,  [r1 + 4 * r2]
+    dec          r4d
+    jnz          .loop
+    RET
+%endif
+%endif
+
+;-----------------------------------------------------------------
+; void transpose_64x64(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH == 1
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose8x32_64_internal
+    movu         m0, [r1]
+    movu         m1, [r1 + 32]
+    movu         m2, [r1 + r2]
+    movu         m3, [r1 + r2 + 32]
+    movu         m4, [r1 + 2 * r2]
+    movu         m5, [r1 + 2 * r2 + 32]
+    movu         m6, [r1 + r3]
+    movu         m7, [r1 + r3 + 32]
+    lea          r1, [r1 + 4 * r2]
+
+    punpcklwd    m8, m0, m2               ;[1 - 4;  9 - 12][1 2]
+    punpckhwd    m0, m2                   ;[5 - 8; 13 - 16][1 2]
+
+    punpcklwd    m2, m4, m6               ;[1 - 4;  9 - 12][3 4]
+    punpckhwd    m4, m6                   ;[5 - 8; 13 - 16][3 4]
+
+    punpcklwd    m6, m1, m3               ;[17 - 20; 25 - 28][1 2]
+    punpckhwd    m1, m3                   ;[21 - 24; 29 - 32][1 2]
+
+    punpcklwd    m3, m5, m7               ;[17 - 20; 25 - 28][3 4]
+    punpckhwd    m5, m7                   ;[21 - 24; 29 - 32][3 4]
+
+    punpckldq    m7, m8, m2               ;[1 - 2;  9 - 10][1 2 3 4]
+    punpckhdq    m8, m2                   ;[3 - 4; 11 - 12][1 2 3 4]
+
+    punpckldq    m2, m0, m4               ;[5 - 6; 13 - 14][1 2 3 4]
+    punpckhdq    m0, m4                   ;[7 - 8; 15 - 16][1 2 3 4]
+
+    punpckldq    m4, m6, m3               ;[17 - 18; 25 - 26][1 2 3 4]
+    punpckhdq    m6, m3                   ;[19 - 20; 27 - 28][1 2 3 4]
+
+    punpckldq    m3, m1, m5               ;[21 - 22; 29 - 30][1 2 3 4]
+    punpckhdq    m1, m5                   ;[23 - 24; 31 - 32][1 2 3 4]
+
+    movq         [r0 + 0  * 128], xm7
+    movhps       [r0 + 1  * 128], xm7
+    vextracti128 xm5, m7, 1
+    movq         [r0 + 8 * 128], xm5
+    movhps       [r0 + 9 * 128], xm5
+
+    movu         m7,  [r1]
+    movu         m9,  [r1 + 32]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + r2 + 32]
+    movu         m12, [r1 + 2 * r2]
+    movu         m13, [r1 + 2 * r2 + 32]
+    movu         m14, [r1 + r3]
+    movu         m15, [r1 + r3 + 32]
+
+    punpcklwd    m5, m7, m10              ;[1 - 4;  9 - 12][5 6]
+    punpckhwd    m7, m10                  ;[5 - 8; 13 - 16][5 6]
+
+    punpcklwd    m10, m12, m14            ;[1 - 4;  9 - 12][7 8]
+    punpckhwd    m12, m14                 ;[5 - 8; 13 - 16][7 8]
+
+    punpcklwd    m14, m9, m11             ;[17 - 20; 25 - 28][5 6]
+    punpckhwd    m9, m11                  ;[21 - 24; 29 - 32][5 6]
+
+    punpcklwd    m11, m13, m15            ;[17 - 20; 25 - 28][7 8]
+    punpckhwd    m13, m15                 ;[21 - 24; 29 - 32][7 8]
+
+    punpckldq    m15, m5, m10             ;[1 - 2;  9 - 10][5 6 7 8]
+    punpckhdq    m5, m10                  ;[3 - 4; 11 - 12][5 6 7 8]
+
+    punpckldq    m10, m7, m12             ;[5 - 6; 13 - 14][5 6 7 8]
+    punpckhdq    m7, m12                  ;[7 - 8; 15 - 16][5 6 7 8]
+
+    punpckldq    m12, m14, m11            ;[17 - 18; 25 - 26][5 6 7 8]
+    punpckhdq    m14, m11                 ;[19 - 20; 27 - 28][5 6 7 8]
+
+    punpckldq    m11, m9, m13             ;[21 - 22; 29 - 30][5 6 7 8]
+    punpckhdq    m9, m13                  ;[23 - 24; 31 - 32][5 6 7 8]
+
+    movq         [r0 + 0 * 128 + 8], xm15
+    movhps       [r0 + 1 * 128 + 8], xm15
+    vextracti128 xm13, m15, 1
+    movq         [r0 + 8 * 128 + 8], xm13
+    movhps       [r0 + 9 * 128 + 8], xm13
+
+    punpcklqdq   m13, m8, m5              ;[3 ; 11][1 2 3 4 5 6 7 8]
+    punpckhqdq   m8, m5                   ;[4 ; 12][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m5, m2, m10              ;[5 ; 13][1 2 3 4 5 6 7 8]
+    punpckhqdq   m2, m10                  ;[6 ; 14][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m10, m0, m7              ;[7 ; 15][1 2 3 4 5 6 7 8]
+    punpckhqdq   m0, m7                   ;[8 ; 16][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m7, m4, m12              ;[17 ; 25][1 2 3 4 5 6 7 8]
+    punpckhqdq   m4, m12                  ;[18 ; 26][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m12, m6, m14             ;[19 ; 27][1 2 3 4 5 6 7 8]
+    punpckhqdq   m6, m14                  ;[20 ; 28][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m14, m3, m11             ;[21 ; 29][1 2 3 4 5 6 7 8]
+    punpckhqdq   m3, m11                  ;[22 ; 30][1 2 3 4 5 6 7 8]
+
+    punpcklqdq   m11, m1, m9              ;[23 ; 31][1 2 3 4 5 6 7 8]
+    punpckhqdq   m1, m9                   ;[24 ; 32][1 2 3 4 5 6 7 8]
+
+    movu         [r0 + 2  * 128], xm13
+    vextracti128 [r0 + 10 * 128], m13, 1
+
+    movu         [r0 + 3  * 128], xm8
+    vextracti128 [r0 + 11 * 128], m8, 1
+
+    movu         [r0 + 4  * 128], xm5
+    vextracti128 [r0 + 12 * 128], m5, 1
+
+    movu         [r0 + 5  * 128], xm2
+    vextracti128 [r0 + 13 * 128], m2, 1
+
+    movu         [r0 + 6  * 128], xm10
+    vextracti128 [r0 + 14 * 128], m10, 1
+
+    movu         [r0 + 7  * 128], xm0
+    vextracti128 [r0 + 15 * 128], m0, 1
+
+    movu         [r0 + 16 * 128], xm7
+    vextracti128 [r0 + 24 * 128], m7, 1
+
+    movu         [r0 + 17 * 128], xm4
+    vextracti128 [r0 + 25 * 128], m4, 1
+
+    movu         [r0 + 18 * 128], xm12
+    vextracti128 [r0 + 26 * 128], m12, 1
+
+    movu         [r0 + 19 * 128], xm6
+    vextracti128 [r0 + 27 * 128], m6, 1
+
+    movu         [r0 + 20 * 128], xm14
+    vextracti128 [r0 + 28 * 128], m14, 1
+
+    movu         [r0 + 21 * 128], xm3
+    vextracti128 [r0 + 29 * 128], m3, 1
+
+    movu         [r0 + 22 * 128], xm11
+    vextracti128 [r0 + 30 * 128], m11, 1
+
+    movu         [r0 + 23 * 128], xm1
+    vextracti128 [r0 + 31 * 128], m1, 1
+    ret
+
+cglobal transpose64, 3, 6, 16
+    add          r2, r2
+    lea          r3, [3 * r2]
+    lea          r4, [r1 + 64]
+    lea          r5, [r0 + 16]
+
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r5, [r0 + 16]
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    mov          r0, r5
+    lea          r4, [r1 + 4 * r2]
+    lea          r1, [r4 - 64]
+    call         transpose8x32_64_internal
+    mov          r1, r4
+    lea          r0, [r0 + 32 * 128]
+    call         transpose8x32_64_internal
+    RET
+%endif
+INIT_XMM sse2
+cglobal transpose64, 3, 7, 4, dest, src, stride
+    add    r2,    r2
+    mov    r3,    r0
+    mov    r4,    r1
+    mov    r5,    128
+    mov    r6,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 16]
+    lea    r0,    [r6 + 8 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 8 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 32]
+    lea    r0,    [r6 + 16 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 16 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 48]
+    lea    r0,    [r6 + 24 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 24 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 64]
+    lea    r0,    [r6 + 32 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 32 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 80]
+    lea    r0,    [r6 + 40 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 40 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 96]
+    lea    r0,    [r6 + 48 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 48 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+
+    lea    r1,    [r4 + 112]
+    lea    r0,    [r6 + 56 * 128]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 16]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 32]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 48]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 64]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 80]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 96]
+    mov    r3,    r0
+    call   transpose8_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r6 + 56 * 128 + 112]
+    mov    r3,    r0
+    call   transpose8_internal
+    RET
+%else ;HIGH_BIT_DEPTH == 0
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+
+cglobal transpose16x32_avx2
+    movu         m0, [r1]
+    movu         m1, [r1 + r2]
+    movu         m2, [r1 + 2 * r2]
+    movu         m3, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+
+    movu         m4, [r1]
+    movu         m5, [r1 + r2]
+    movu         m6, [r1 + 2 * r2]
+    movu         m7, [r1 + r3]
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 17 - 24][1 2]
+    punpckhbw    m0, m1                     ;[9 - 16; 25 - 32][1 2]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 17 - 24][3 4]
+    punpckhbw    m2, m3                     ;[9 - 16; 25 - 32][3 4]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 17 - 24][5 6]
+    punpckhbw    m4, m5                     ;[9 - 16; 25 - 32][5 6]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 17 - 24][7 8]
+    punpckhbw    m6, m7                     ;[9 - 16; 25 - 32][7 8]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 17 - 20][1 2 3 4]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 20 - 24][1 2 3 4]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 17 - 20][5 6 7 8]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 20 - 24][5 6 7 8]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 25 - 28][1 2 3 4]
+    punpckhwd    m0, m2                     ;[12- 15; 29 - 32][1 2 3 4]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 25 - 28][5 6 7 8]
+    punpckhwd    m4, m6                     ;[12- 15; 29 - 32][5 6 7 8]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 17 - 18][1 2 3 4 5 6 7 8]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 19 - 20][1 2 3 4 5 6 7 8]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 21 - 22][1 2 3 4 5 6 7 8]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 23 - 24][1 2 3 4 5 6 7 8]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 25 - 26][1 2 3 4 5 6 7 8]
+    punpckhdq    m5, m2                     ;[11- 12; 27 - 28][1 2 3 4 5 6 7 8]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 29 - 30][1 2 3 4 5 6 7 8]
+    punpckhdq    m0, m4                     ;[15- 16; 31 - 32][1 2 3 4 5 6 7 8]
+
+    movq         [r0 + 0  * 64], xm6
+    movhps       [r0 + 1  * 64], xm6
+    vextracti128 xm4, m6, 1
+    movq         [r0 + 16 * 64], xm4
+    movhps       [r0 + 17 * 64], xm4
+
+    lea          r1,  [r1 + 4 * r2]
+    movu         m9,  [r1]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + 2 * r2]
+    movu         m12, [r1 + r3]
+    lea          r1,  [r1 + 4 * r2]
+
+    movu         m13, [r1]
+    movu         m14, [r1 + r2]
+    movu         m15, [r1 + 2 * r2]
+    movu         m6,  [r1 + r3]
+
+    punpcklbw    m4, m9, m10                ;[1 - 8 ; 17 - 24][9 10]
+    punpckhbw    m9, m10                    ;[9 - 16; 25 - 32][9 10]
+
+    punpcklbw    m10, m11, m12              ;[1 - 8 ; 17 - 24][11 12]
+    punpckhbw    m11, m12                   ;[9 - 16; 25 - 32][11 12]
+
+    punpcklbw    m12, m13, m14              ;[1 - 8 ; 17 - 24][13 14]
+    punpckhbw    m13, m14                   ;[9 - 16; 25 - 32][13 14]
+
+    punpcklbw    m14, m15, m6               ;[1 - 8 ; 17 - 24][15 16]
+    punpckhbw    m15, m6                    ;[9 - 16; 25 - 32][15 16]
+
+    punpcklwd    m6, m4, m10                ;[1 - 4 ; 17 - 20][9 10 11 12]
+    punpckhwd    m4, m10                    ;[5 - 8 ; 20 - 24][9 10 11 12]
+
+    punpcklwd    m10, m12, m14              ;[1 - 4 ; 17 - 20][13 14 15 16]
+    punpckhwd    m12, m14                   ;[5 - 8 ; 20 - 24][13 14 15 16]
+
+    punpcklwd    m14, m9, m11               ;[9 - 12; 25 - 28][9 10 11 12]
+    punpckhwd    m9, m11                    ;[13- 16; 29 - 32][9 10 11 12]
+
+    punpcklwd    m11, m13, m15              ;[9 - 12; 25 - 28][13 14 15 16]
+    punpckhwd    m13, m15                   ;[13- 16; 29 - 32][13 14 15 16]
+
+    punpckldq    m15, m6, m10               ;[1 - 2 ; 17 - 18][9 10 11 12 13 14 15 16]
+    punpckhdq    m6, m10                    ;[3 - 4 ; 19 - 20][9 10 11 12 13 14 15 16]
+
+    punpckldq    m10, m4, m12               ;[5 - 6 ; 21 - 22][9 10 11 12 13 14 15 16]
+    punpckhdq    m4, m12                    ;[7 - 8 ; 23 - 24][9 10 11 12 13 14 15 16]
+
+    punpckldq    m12, m14, m11              ;[9 - 10; 25 - 26][9 10 11 12 13 14 15 16]
+    punpckhdq    m14, m11                   ;[11- 12; 27 - 28][9 10 11 12 13 14 15 16]
+
+    punpckldq    m11, m9, m13               ;[13- 14; 29 - 30][9 10 11 12 13 14 15 16]
+    punpckhdq    m9, m13                    ;[15- 16; 31 - 32][9 10 11 12 13 14 15 16]
+
+
+    punpcklqdq   m13, m7, m6                ;[3 ; 19][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m7, m6                     ;[4 ; 20][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m6, m1, m10                ;[5 ; 21][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m1, m10                    ;[6 ; 22][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m10, m8, m4                ;[7 ; 23][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m8, m4                     ;[8 ; 24][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m4, m3, m12                ;[9 ; 25][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m3, m12                    ;[10; 26][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m12, m5, m14               ;[11; 27][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m5, m14                    ;[12; 28][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m14, m2, m11               ;[13; 29][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m2, m11                    ;[14; 30][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m11, m0, m9                ;[15; 31][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m0, m9                     ;[16; 32][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    movq         [r0 + 0  * 64 + 8], xm15
+    movhps       [r0 + 1  * 64 + 8], xm15
+    vextracti128 xm9, m15, 1
+    movq         [r0 + 16 * 64 + 8], xm9
+    movhps       [r0 + 17 * 64 + 8], xm9
+
+    movu         [r0 + 2  * 64], xm13
+    vextracti128 [r0 + 18 * 64], m13, 1
+
+    movu         [r0 + 3  * 64], xm7
+    vextracti128 [r0 + 19 * 64], m7, 1
+
+    movu         [r0 + 4  * 64], xm6
+    vextracti128 [r0 + 20 * 64], m6, 1
+
+    movu         [r0 + 5  * 64], xm1
+    vextracti128 [r0 + 21 * 64], m1, 1
+
+    movu         [r0 + 6  * 64], xm10
+    vextracti128 [r0 + 22 * 64], m10, 1
+
+    movu         [r0 + 7  * 64], xm8
+    vextracti128 [r0 + 23 * 64], m8, 1
+
+    movu         [r0 + 8  * 64], xm4
+    vextracti128 [r0 + 24 * 64], m4, 1
+
+    movu         [r0 + 9  * 64], xm3
+    vextracti128 [r0 + 25 * 64], m3, 1
+
+    movu         [r0 + 10 * 64], xm12
+    vextracti128 [r0 + 26 * 64], m12, 1
+
+    movu         [r0 + 11 * 64], xm5
+    vextracti128 [r0 + 27 * 64], m5, 1
+
+    movu         [r0 + 12 * 64], xm14
+    vextracti128 [r0 + 28 * 64], m14, 1
+
+    movu         [r0 + 13 * 64], xm2
+    vextracti128 [r0 + 29 * 64], m2, 1
+
+    movu         [r0 + 14 * 64], xm11
+    vextracti128 [r0 + 30 * 64], m11, 1
+
+    movu         [r0 + 15 * 64], xm0
+    vextracti128 [r0 + 31 * 64], m0, 1
+    ret
+
+cglobal transpose64, 3, 6, 16
+
+    lea          r3, [r2 * 3]
+    lea          r4, [r0 + 16]
+
+    lea          r5, [r1 + 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    mov          r0, r4
+    lea          r5, [r1 + 4 * r2]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    lea          r0, [r4 + 16]
+    lea          r5, [r1 + 4 * r2]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    lea          r5, [r1 + 4 * r2]
+    lea          r0, [r4 + 32]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+    RET
+%endif
+
+INIT_XMM sse2
+cglobal transpose64, 3, 7, 8, dest, src, stride
+    mov    r3,    r0
+    mov    r4,    r1
+    mov    r5,    r0
+    mov    r6,    64
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 32]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 48]
+    mov    r5,    r0
+    call   transpose16_internal
+
+    lea    r1,    [r4 + 16]
+    lea    r0,    [r3 + 16 * 64]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16 * 64 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16 * 64 + 32]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 16 * 64 + 48]
+    mov    r5,    r0
+    call   transpose16_internal
+
+    lea    r1,    [r4 + 32]
+    lea    r0,    [r3 + 32 * 64]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 32 * 64 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 32 * 64 + 32]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 32 * 64 + 48]
+    mov    r5,    r0
+    call   transpose16_internal
+
+    lea    r1,    [r4 + 48]
+    lea    r0,    [r3 + 48 * 64]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 48 * 64 + 16]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 48 * 64 + 32]
+    mov    r5,    r0
+    call   transpose16_internal
+    lea    r1,    [r1 - 8 + 2 * r2]
+    lea    r0,    [r3 + 48 * 64 + 48]
+    mov    r5,    r0
+    call   transpose16_internal
+    RET
+%endif
+
+
+;=============================================================================
+; SSIM
+;=============================================================================
+
+;-----------------------------------------------------------------------------
+; void pixel_ssim_4x4x2_core( const uint8_t *pix1, intptr_t stride1,
+;                             const uint8_t *pix2, intptr_t stride2, int sums[2][4] )
+;-----------------------------------------------------------------------------
+%macro SSIM_ITER 1
+%if HIGH_BIT_DEPTH
+    movdqu    m5, [r0+(%1&1)*r1]
+    movdqu    m6, [r2+(%1&1)*r3]
+%else
+    movq      m5, [r0+(%1&1)*r1]
+    movq      m6, [r2+(%1&1)*r3]
+    punpcklbw m5, m0
+    punpcklbw m6, m0
+%endif
+%if %1==1
+    lea       r0, [r0+r1*2]
+    lea       r2, [r2+r3*2]
+%endif
+%if %1==0
+    movdqa    m1, m5
+    movdqa    m2, m6
+%else
+    paddw     m1, m5
+    paddw     m2, m6
+%endif
+    pmaddwd   m7, m5, m6
+    pmaddwd   m5, m5
+    pmaddwd   m6, m6
+    ACCUM  paddd, 3, 5, %1
+    ACCUM  paddd, 4, 7, %1
+    paddd     m3, m6
+%endmacro
+
+%macro SSIM 0
+cglobal pixel_ssim_4x4x2_core, 4,4,8
+    FIX_STRIDES r1, r3
+    pxor      m0, m0
+    SSIM_ITER 0
+    SSIM_ITER 1
+    SSIM_ITER 2
+    SSIM_ITER 3
+    ; PHADDW m1, m2
+    ; PHADDD m3, m4
+    movdqa    m7, [pw_1]
+    pshufd    m5, m3, q2301
+    pmaddwd   m1, m7
+    pmaddwd   m2, m7
+    pshufd    m6, m4, q2301
+    packssdw  m1, m2
+    paddd     m3, m5
+    pshufd    m1, m1, q3120
+    paddd     m4, m6
+    pmaddwd   m1, m7
+    punpckhdq m5, m3, m4
+    punpckldq m3, m4
+
+%if UNIX64
+    %define t0 r4
+%else
+    %define t0 rax
+    mov t0, r4mp
+%endif
+
+    movq      [t0+ 0], m1
+    movq      [t0+ 8], m3
+    movhps    [t0+16], m1
+    movq      [t0+24], m5
+    RET
+
+;-----------------------------------------------------------------------------
+; float pixel_ssim_end( int sum0[5][4], int sum1[5][4], int width )
+;-----------------------------------------------------------------------------
+cglobal pixel_ssim_end4, 2,3
+    mov       r2d, r2m
+    mova      m0, [r0+ 0]
+    mova      m1, [r0+16]
+    mova      m2, [r0+32]
+    mova      m3, [r0+48]
+    mova      m4, [r0+64]
+    paddd     m0, [r1+ 0]
+    paddd     m1, [r1+16]
+    paddd     m2, [r1+32]
+    paddd     m3, [r1+48]
+    paddd     m4, [r1+64]
+    paddd     m0, m1
+    paddd     m1, m2
+    paddd     m2, m3
+    paddd     m3, m4
+    TRANSPOSE4x4D  0, 1, 2, 3, 4
+
+;   s1=m0, s2=m1, ss=m2, s12=m3
+%if BIT_DEPTH >= 10
+    cvtdq2ps  m0, m0
+    cvtdq2ps  m1, m1
+    cvtdq2ps  m2, m2
+    cvtdq2ps  m3, m3
+    mulps     m4, m0, m1  ; s1*s2
+    mulps     m0, m0      ; s1*s1
+    mulps     m1, m1      ; s2*s2
+    mulps     m2, [pf_64] ; ss*64
+    mulps     m3, [pf_128] ; s12*128
+    addps     m4, m4      ; s1*s2*2
+    addps     m0, m1      ; s1*s1 + s2*s2
+    subps     m2, m0      ; vars
+    subps     m3, m4      ; covar*2
+    movaps    m1, [ssim_c1]
+    addps     m4, m1      ; s1*s2*2 + ssim_c1
+    addps     m0, m1      ; s1*s1 + s2*s2 + ssim_c1
+    movaps    m1, [ssim_c2]
+    addps     m2, m1      ; vars + ssim_c2
+    addps     m3, m1      ; covar*2 + ssim_c2
+%else
+    pmaddwd   m4, m1, m0  ; s1*s2
+    pslld     m1, 16
+    por       m0, m1
+    pmaddwd   m0, m0  ; s1*s1 + s2*s2
+    pslld     m4, 1
+    pslld     m3, 7
+    pslld     m2, 6
+    psubd     m3, m4  ; covar*2
+    psubd     m2, m0  ; vars
+    mova      m1, [ssim_c1]
+    paddd     m0, m1
+    paddd     m4, m1
+    mova      m1, [ssim_c2]
+    paddd     m3, m1
+    paddd     m2, m1
+    cvtdq2ps  m0, m0  ; (float)(s1*s1 + s2*s2 + ssim_c1)
+    cvtdq2ps  m4, m4  ; (float)(s1*s2*2 + ssim_c1)
+    cvtdq2ps  m3, m3  ; (float)(covar*2 + ssim_c2)
+    cvtdq2ps  m2, m2  ; (float)(vars + ssim_c2)
+%endif
+    mulps     m4, m3
+    mulps     m0, m2
+    divps     m4, m0  ; ssim
+
+    cmp       r2d, 4
+    je .skip ; faster only if this is the common case; remove branch if we use ssim on a macroblock level
+    neg       r2
+
+%ifdef PIC
+    lea       r3, [mask_ff + 16]
+    %xdefine %%mask r3
+%else
+    %xdefine %%mask mask_ff + 16
+%endif
+%if cpuflag(avx)
+    andps     m4, [%%mask + r2*4]
+%else
+    movups    m0, [%%mask + r2*4]
+    andps     m4, m0
+%endif
+
+.skip:
+    movhlps   m0, m4
+    addps     m0, m4
+%if cpuflag(ssse3)
+    movshdup  m4, m0
+%else
+     pshuflw   m4, m0, q0032
+%endif
+    addss     m0, m4
+%if ARCH_X86_64 == 0
+    movss    r0m, m0
+    fld     dword r0m
+%endif
+    RET
+%endmacro ; SSIM
+
+INIT_XMM sse2
+SSIM
+INIT_XMM avx
+SSIM
+
+%macro SCALE1D_128to64_HBD 0
+    movu        m0,      [r1]
+    palignr     m1,      m0,    2
+    movu        m2,      [r1 + 16]
+    palignr     m3,      m2,    2
+    movu        m4,      [r1 + 32]
+    palignr     m5,      m4,    2
+    movu        m6,      [r1 + 48]
+    pavgw       m0,      m1
+    palignr     m1,      m6,    2
+    pavgw       m2,      m3
+    pavgw       m4,      m5
+    pavgw       m6,      m1
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+    punpcklqdq    m0,           m2
+    movu          [r0],         m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 16],    m4
+
+    movu        m0,      [r1 + 64]
+    palignr     m1,      m0,    2
+    movu        m2,      [r1 + 80]
+    palignr     m3,      m2,    2
+    movu        m4,      [r1 + 96]
+    palignr     m5,      m4,    2
+    movu        m6,      [r1 + 112]
+    pavgw       m0,      m1
+    palignr     m1,      m6,    2
+    pavgw       m2,      m3
+    pavgw       m4,      m5
+    pavgw       m6,      m1
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+    punpcklqdq    m0,           m2
+    movu          [r0 + 32],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 48],    m4
+
+    movu        m0,      [r1 + 128]
+    palignr     m1,      m0,    2
+    movu        m2,      [r1 + 144]
+    palignr     m3,      m2,    2
+    movu        m4,      [r1 + 160]
+    palignr     m5,      m4,    2
+    movu        m6,      [r1 + 176]
+    pavgw       m0,      m1
+    palignr     m1,      m6,    2
+    pavgw       m2,      m3
+    pavgw       m4,      m5
+    pavgw       m6,      m1
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0 + 64],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 80],    m4
+
+    movu        m0,      [r1 + 192]
+    palignr     m1,      m0,    2
+    movu        m2,      [r1 + 208]
+    palignr     m3,      m2,    2
+    movu        m4,      [r1 + 224]
+    palignr     m5,      m4,    2
+    movu        m6,      [r1 + 240]
+    pavgw       m0,      m1
+    palignr     m1,      m6,    2
+    pavgw       m2,      m3
+    pavgw       m4,      m5
+    pavgw       m6,      m1
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0 + 96],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 112],    m4
+%endmacro
+
+;-----------------------------------------------------------------
+; void scale1D_128to64(pixel *dst, pixel *src, intptr_t /*stride*/)
+;-----------------------------------------------------------------
+INIT_XMM ssse3
+cglobal scale1D_128to64, 2, 2, 8, dest, src1, stride
+%if HIGH_BIT_DEPTH
+    mova        m7,      [deinterleave_word_shuf]
+
+    ;Top pixel
+    SCALE1D_128to64_HBD
+
+    ;Left pixel
+    add         r1,      256
+    add         r0,      128
+    SCALE1D_128to64_HBD
+
+%else
+    mova        m7,      [deinterleave_shuf]
+
+    ;Top pixel
+    movu        m0,      [r1]
+    palignr     m1,      m0,    1
+    movu        m2,      [r1 + 16]
+    palignr     m3,      m2,    1
+    movu        m4,      [r1 + 32]
+    palignr     m5,      m4,    1
+    movu        m6,      [r1 + 48]
+
+    pavgb       m0,      m1
+
+    palignr     m1,      m6,    1
+
+    pavgb       m2,      m3
+    pavgb       m4,      m5
+    pavgb       m6,      m1
+
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0],         m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 16],    m4
+
+    movu        m0,      [r1 + 64]
+    palignr     m1,      m0,    1
+    movu        m2,      [r1 + 80]
+    palignr     m3,      m2,    1
+    movu        m4,      [r1 + 96]
+    palignr     m5,      m4,    1
+    movu        m6,      [r1 + 112]
+
+    pavgb       m0,      m1
+
+    palignr     m1,      m6,    1
+
+    pavgb       m2,      m3
+    pavgb       m4,      m5
+    pavgb       m6,      m1
+
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0 + 32],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 48],    m4
+
+    ;Left pixel
+    movu        m0,      [r1 + 128]
+    palignr     m1,      m0,    1
+    movu        m2,      [r1 + 144]
+    palignr     m3,      m2,    1
+    movu        m4,      [r1 + 160]
+    palignr     m5,      m4,    1
+    movu        m6,      [r1 + 176]
+
+    pavgb       m0,      m1
+
+    palignr     m1,      m6,    1
+
+    pavgb       m2,      m3
+    pavgb       m4,      m5
+    pavgb       m6,      m1
+
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0 + 64],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 80],    m4
+
+    movu        m0,      [r1 + 192]
+    palignr     m1,      m0,    1
+    movu        m2,      [r1 + 208]
+    palignr     m3,      m2,    1
+    movu        m4,      [r1 + 224]
+    palignr     m5,      m4,    1
+    movu        m6,      [r1 + 240]
+
+    pavgb       m0,      m1
+
+    palignr     m1,      m6,    1
+
+    pavgb       m2,      m3
+    pavgb       m4,      m5
+    pavgb       m6,      m1
+
+    pshufb      m0,      m0,    m7
+    pshufb      m2,      m2,    m7
+    pshufb      m4,      m4,    m7
+    pshufb      m6,      m6,    m7
+
+    punpcklqdq    m0,           m2
+    movu          [r0 + 96],    m0
+    punpcklqdq    m4,           m6
+    movu          [r0 + 112],   m4
+%endif
+RET
+
+%if HIGH_BIT_DEPTH == 1
+INIT_YMM avx2
+cglobal scale1D_128to64, 2, 2, 3
+    pxor            m2, m2
+
+    ;Top pixel
+    movu            m0, [r1]
+    movu            m1, [r1 + 32]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0], m0
+
+    movu            m0, [r1 + 64]
+    movu            m1, [r1 + 96]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 32], m0
+
+    movu            m0, [r1 + 128]
+    movu            m1, [r1 + 160]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 64], m0
+
+    movu            m0, [r1 + 192]
+    movu            m1, [r1 + 224]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 96], m0
+
+    ;Left pixel
+    movu            m0, [r1 + 256]
+    movu            m1, [r1 + 288]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 128], m0
+
+    movu            m0, [r1 + 320]
+    movu            m1, [r1 + 352]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 160], m0
+
+    movu            m0, [r1 + 384]
+    movu            m1, [r1 + 416]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 192], m0
+
+    movu            m0, [r1 + 448]
+    movu            m1, [r1 + 480]
+    phaddw          m0, m1
+    pavgw           m0, m2
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 224], m0
+
+    RET
+%else ; HIGH_BIT_DEPTH == 0
+INIT_YMM avx2
+cglobal scale1D_128to64, 2, 2, 4
+    pxor            m2, m2
+    mova            m3, [pb_1]
+
+    ;Top pixel
+    movu            m0, [r1]
+    pmaddubsw       m0, m0, m3
+    pavgw           m0, m2
+    movu            m1, [r1 + 32]
+    pmaddubsw       m1, m1, m3
+    pavgw           m1, m2
+    packuswb        m0, m1
+    vpermq          m0, m0, 0xD8
+    movu            [r0], m0
+
+    movu            m0, [r1 + 64]
+    pmaddubsw       m0, m0, m3
+    pavgw           m0, m2
+    movu            m1, [r1 + 96]
+    pmaddubsw       m1, m1, m3
+    pavgw           m1, m2
+    packuswb        m0, m1
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 32], m0
+
+    ;Left pixel
+    movu            m0, [r1 + 128]
+    pmaddubsw       m0, m0, m3
+    pavgw           m0, m2
+    movu            m1, [r1 + 160]
+    pmaddubsw       m1, m1, m3
+    pavgw           m1, m2
+    packuswb        m0, m1
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 64], m0
+
+    movu            m0, [r1 + 192]
+    pmaddubsw       m0, m0, m3
+    pavgw           m0, m2
+    movu            m1, [r1 + 224]
+    pmaddubsw       m1, m1, m3
+    pavgw           m1, m2
+    packuswb        m0, m1
+    vpermq          m0, m0, 0xD8
+    movu            [r0 + 96], m0
+    RET
+%endif
+
+;-----------------------------------------------------------------
+; void scale2D_64to32(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM ssse3
+cglobal scale2D_64to32, 3, 4, 8, dest, src, stride
+    mov       r3d,    32
+    mova      m7,    [deinterleave_word_shuf]
+    add       r2,    r2
+.loop:
+    movu      m0,    [r1]                  ;i
+    psrld     m1,    m0,    16             ;j
+    movu      m2,    [r1 + r2]             ;k
+    psrld     m3,    m2,    16             ;l
+    movu      m4,    m0
+    movu      m5,    m2
+    pxor      m4,    m1                    ;i^j
+    pxor      m5,    m3                    ;k^l
+    por       m4,    m5                    ;ij|kl
+    pavgw     m0,    m1                    ;s
+    pavgw     m2,    m3                    ;t
+    movu      m5,    m0
+    pavgw     m0,    m2                    ;(s+t+1)/2
+    pxor      m5,    m2                    ;s^t
+    pand      m4,    m5                    ;(ij|kl)&st
+    pand      m4,    [hmulw_16p]
+    psubw     m0,    m4                    ;Result
+    movu      m1,    [r1 + 16]             ;i
+    psrld     m2,    m1,    16             ;j
+    movu      m3,    [r1 + r2 + 16]        ;k
+    psrld     m4,    m3,    16             ;l
+    movu      m5,    m1
+    movu      m6,    m3
+    pxor      m5,    m2                    ;i^j
+    pxor      m6,    m4                    ;k^l
+    por       m5,    m6                    ;ij|kl
+    pavgw     m1,    m2                    ;s
+    pavgw     m3,    m4                    ;t
+    movu      m6,    m1
+    pavgw     m1,    m3                    ;(s+t+1)/2
+    pxor      m6,    m3                    ;s^t
+    pand      m5,    m6                    ;(ij|kl)&st
+    pand      m5,    [hmulw_16p]
+    psubw     m1,    m5                    ;Result
+    pshufb    m0,    m7
+    pshufb    m1,    m7
+
+    punpcklqdq    m0,       m1
+    movu          [r0],     m0
+
+    movu      m0,    [r1 + 32]             ;i
+    psrld     m1,    m0,    16             ;j
+    movu      m2,    [r1 + r2 + 32]        ;k
+    psrld     m3,    m2,    16             ;l
+    movu      m4,    m0
+    movu      m5,    m2
+    pxor      m4,    m1                    ;i^j
+    pxor      m5,    m3                    ;k^l
+    por       m4,    m5                    ;ij|kl
+    pavgw     m0,    m1                    ;s
+    pavgw     m2,    m3                    ;t
+    movu      m5,    m0
+    pavgw     m0,    m2                    ;(s+t+1)/2
+    pxor      m5,    m2                    ;s^t
+    pand      m4,    m5                    ;(ij|kl)&st
+    pand      m4,    [hmulw_16p]
+    psubw     m0,    m4                    ;Result
+    movu      m1,    [r1 + 48]             ;i
+    psrld     m2,    m1,    16             ;j
+    movu      m3,    [r1 + r2 + 48]        ;k
+    psrld     m4,    m3,    16             ;l
+    movu      m5,    m1
+    movu      m6,    m3
+    pxor      m5,    m2                    ;i^j
+    pxor      m6,    m4                    ;k^l
+    por       m5,    m6                    ;ij|kl
+    pavgw     m1,    m2                    ;s
+    pavgw     m3,    m4                    ;t
+    movu      m6,    m1
+    pavgw     m1,    m3                    ;(s+t+1)/2
+    pxor      m6,    m3                    ;s^t
+    pand      m5,    m6                    ;(ij|kl)&st
+    pand      m5,    [hmulw_16p]
+    psubw     m1,    m5                    ;Result
+    pshufb    m0,    m7
+    pshufb    m1,    m7
+
+    punpcklqdq    m0,           m1
+    movu          [r0 + 16],    m0
+
+    movu      m0,    [r1 + 64]             ;i
+    psrld     m1,    m0,    16             ;j
+    movu      m2,    [r1 + r2 + 64]        ;k
+    psrld     m3,    m2,    16             ;l
+    movu      m4,    m0
+    movu      m5,    m2
+    pxor      m4,    m1                    ;i^j
+    pxor      m5,    m3                    ;k^l
+    por       m4,    m5                    ;ij|kl
+    pavgw     m0,    m1                    ;s
+    pavgw     m2,    m3                    ;t
+    movu      m5,    m0
+    pavgw     m0,    m2                    ;(s+t+1)/2
+    pxor      m5,    m2                    ;s^t
+    pand      m4,    m5                    ;(ij|kl)&st
+    pand      m4,    [hmulw_16p]
+    psubw     m0,    m4                    ;Result
+    movu      m1,    [r1 + 80]             ;i
+    psrld     m2,    m1,    16             ;j
+    movu      m3,    [r1 + r2 + 80]        ;k
+    psrld     m4,    m3,    16             ;l
+    movu      m5,    m1
+    movu      m6,    m3
+    pxor      m5,    m2                    ;i^j
+    pxor      m6,    m4                    ;k^l
+    por       m5,    m6                    ;ij|kl
+    pavgw     m1,    m2                    ;s
+    pavgw     m3,    m4                    ;t
+    movu      m6,    m1
+    pavgw     m1,    m3                    ;(s+t+1)/2
+    pxor      m6,    m3                    ;s^t
+    pand      m5,    m6                    ;(ij|kl)&st
+    pand      m5,    [hmulw_16p]
+    psubw     m1,    m5                    ;Result
+    pshufb    m0,    m7
+    pshufb    m1,    m7
+
+    punpcklqdq    m0,           m1
+    movu          [r0 + 32],    m0
+
+    movu      m0,    [r1 + 96]             ;i
+    psrld     m1,    m0,    16             ;j
+    movu      m2,    [r1 + r2 + 96]        ;k
+    psrld     m3,    m2,    16             ;l
+    movu      m4,    m0
+    movu      m5,    m2
+    pxor      m4,    m1                    ;i^j
+    pxor      m5,    m3                    ;k^l
+    por       m4,    m5                    ;ij|kl
+    pavgw     m0,    m1                    ;s
+    pavgw     m2,    m3                    ;t
+    movu      m5,    m0
+    pavgw     m0,    m2                    ;(s+t+1)/2
+    pxor      m5,    m2                    ;s^t
+    pand      m4,    m5                    ;(ij|kl)&st
+    pand      m4,    [hmulw_16p]
+    psubw     m0,    m4                    ;Result
+    movu      m1,    [r1 + 112]            ;i
+    psrld     m2,    m1,    16             ;j
+    movu      m3,    [r1 + r2 + 112]       ;k
+    psrld     m4,    m3,    16             ;l
+    movu      m5,    m1
+    movu      m6,    m3
+    pxor      m5,    m2                    ;i^j
+    pxor      m6,    m4                    ;k^l
+    por       m5,    m6                    ;ij|kl
+    pavgw     m1,    m2                    ;s
+    pavgw     m3,    m4                    ;t
+    movu      m6,    m1
+    pavgw     m1,    m3                    ;(s+t+1)/2
+    pxor      m6,    m3                    ;s^t
+    pand      m5,    m6                    ;(ij|kl)&st
+    pand      m5,    [hmulw_16p]
+    psubw     m1,    m5                    ;Result
+    pshufb    m0,    m7
+    pshufb    m1,    m7
+
+    punpcklqdq    m0,           m1
+    movu          [r0 + 48],    m0
+    lea    r0,    [r0 + 64]
+    lea    r1,    [r1 + 2 * r2]
+    dec    r3d
+    jnz    .loop
+    RET
+%else
+
+INIT_XMM ssse3
+cglobal scale2D_64to32, 3, 4, 8, dest, src, stride
+    mov       r3d,    32
+    mova        m7,      [deinterleave_shuf]
+.loop:
+
+    movu        m0,      [r1]                  ;i
+    psrlw       m1,      m0,    8              ;j
+    movu        m2,      [r1 + r2]             ;k
+    psrlw       m3,      m2,    8              ;l
+    movu        m4,      m0
+    movu        m5,      m2
+
+    pxor        m4,      m1                    ;i^j
+    pxor        m5,      m3                    ;k^l
+    por         m4,      m5                    ;ij|kl
+
+    pavgb       m0,      m1                    ;s
+    pavgb       m2,      m3                    ;t
+    movu        m5,      m0
+    pavgb       m0,      m2                    ;(s+t+1)/2
+    pxor        m5,      m2                    ;s^t
+    pand        m4,      m5                    ;(ij|kl)&st
+    pand        m4,      [hmul_16p]
+    psubb       m0,      m4                    ;Result
+
+    movu        m1,      [r1 + 16]             ;i
+    psrlw       m2,      m1,    8              ;j
+    movu        m3,      [r1 + r2 + 16]        ;k
+    psrlw       m4,      m3,    8              ;l
+    movu        m5,      m1
+    movu        m6,      m3
+
+    pxor        m5,      m2                    ;i^j
+    pxor        m6,      m4                    ;k^l
+    por         m5,      m6                    ;ij|kl
+
+    pavgb       m1,      m2                    ;s
+    pavgb       m3,      m4                    ;t
+    movu        m6,      m1
+    pavgb       m1,      m3                    ;(s+t+1)/2
+    pxor        m6,      m3                    ;s^t
+    pand        m5,      m6                    ;(ij|kl)&st
+    pand        m5,      [hmul_16p]
+    psubb       m1,      m5                    ;Result
+
+    pshufb      m0,      m0,    m7
+    pshufb      m1,      m1,    m7
+
+    punpcklqdq    m0,           m1
+    movu          [r0],         m0
+
+    movu        m0,      [r1 + 32]             ;i
+    psrlw       m1,      m0,    8              ;j
+    movu        m2,      [r1 + r2 + 32]        ;k
+    psrlw       m3,      m2,    8              ;l
+    movu        m4,      m0
+    movu        m5,      m2
+
+    pxor        m4,      m1                    ;i^j
+    pxor        m5,      m3                    ;k^l
+    por         m4,      m5                    ;ij|kl
+
+    pavgb       m0,      m1                    ;s
+    pavgb       m2,      m3                    ;t
+    movu        m5,      m0
+    pavgb       m0,      m2                    ;(s+t+1)/2
+    pxor        m5,      m2                    ;s^t
+    pand        m4,      m5                    ;(ij|kl)&st
+    pand        m4,      [hmul_16p]
+    psubb       m0,      m4                    ;Result
+
+    movu        m1,      [r1 + 48]             ;i
+    psrlw       m2,      m1,    8              ;j
+    movu        m3,      [r1 + r2 + 48]        ;k
+    psrlw       m4,      m3,    8              ;l
+    movu        m5,      m1
+    movu        m6,      m3
+
+    pxor        m5,      m2                    ;i^j
+    pxor        m6,      m4                    ;k^l
+    por         m5,      m6                    ;ij|kl
+
+    pavgb       m1,      m2                    ;s
+    pavgb       m3,      m4                    ;t
+    movu        m6,      m1
+    pavgb       m1,      m3                    ;(s+t+1)/2
+    pxor        m6,      m3                    ;s^t
+    pand        m5,      m6                    ;(ij|kl)&st
+    pand        m5,      [hmul_16p]
+    psubb       m1,      m5                    ;Result
+
+    pshufb      m0,      m0,    m7
+    pshufb      m1,      m1,    m7
+
+    punpcklqdq    m0,           m1
+    movu          [r0 + 16],    m0
+
+    lea    r0,    [r0 + 32]
+    lea    r1,    [r1 + 2 * r2]
+    dec    r3d
+    jnz    .loop
+    RET
+%endif
+
+;-----------------------------------------------------------------
+; void scale2D_64to32(pixel *dst, pixel *src, intptr_t stride)
+;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_YMM avx2
+cglobal scale2D_64to32, 3, 4, 5, dest, src, stride
+    mov         r3d,     32
+    add         r2d,     r2d
+    mova        m4,      [pw_2000]
+
+.loop:
+    movu        m0,      [r1]
+    movu        m1,      [r1 + 1 * mmsize]
+    movu        m2,      [r1 + r2]
+    movu        m3,      [r1 + r2 + 1 * mmsize]
+
+    paddw       m0,      m2
+    paddw       m1,      m3
+    phaddw      m0,      m1
+
+    pmulhrsw    m0,      m4
+    vpermq      m0,      m0, q3120
+    movu        [r0],    m0
+
+    movu        m0,      [r1 + 2 * mmsize]
+    movu        m1,      [r1 + 3 * mmsize]
+    movu        m2,      [r1 + r2 + 2 * mmsize]
+    movu        m3,      [r1 + r2 + 3 * mmsize]
+
+    paddw       m0,      m2
+    paddw       m1,      m3
+    phaddw      m0,      m1
+
+    pmulhrsw    m0,      m4
+    vpermq      m0,      m0, q3120
+    movu        [r0 + mmsize], m0
+
+    add         r0,      64
+    lea         r1,      [r1 + 2 * r2]
+    dec         r3d
+    jnz         .loop
+    RET
+%else
+
+INIT_YMM avx2
+cglobal scale2D_64to32, 3, 5, 8, dest, src, stride
+    mov         r3d,     16
+    mova        m7,      [deinterleave_shuf]
+.loop:
+    movu        m0,      [r1]                  ; i
+    lea         r4,      [r1 + r2 * 2]
+    psrlw       m1,      m0, 8                 ; j
+    movu        m2,      [r1 + r2]             ; k
+    psrlw       m3,      m2, 8                 ; l
+
+    pxor        m4,      m0, m1                ; i^j
+    pxor        m5,      m2, m3                ; k^l
+    por         m4,      m5                    ; ij|kl
+
+    pavgb       m0,      m1                    ; s
+    pavgb       m2,      m3                    ; t
+    mova        m5,      m0
+    pavgb       m0,      m2                    ; (s+t+1)/2
+    pxor        m5,      m2                    ; s^t
+    pand        m4,      m5                    ; (ij|kl)&st
+    pand        m4,      [pb_1]
+    psubb       m0,      m4                    ; Result
+
+    movu        m1,      [r1 + 32]             ; i
+    psrlw       m2,      m1, 8                 ; j
+    movu        m3,      [r1 + r2 + 32]        ; k
+    psrlw       m4,      m3, 8                 ; l
+
+    pxor        m5,      m1, m2                ; i^j
+    pxor        m6,      m3, m4                ; k^l
+    por         m5,      m6                    ; ij|kl
+
+    pavgb       m1,      m2                    ; s
+    pavgb       m3,      m4                    ; t
+    mova        m6,      m1
+    pavgb       m1,      m3                    ; (s+t+1)/2
+    pxor        m6,      m3                    ; s^t
+    pand        m5,      m6                    ; (ij|kl)&st
+    pand        m5,      [pb_1]
+    psubb       m1,      m5                    ; Result
+
+    pshufb      m0,      m0, m7
+    pshufb      m1,      m1, m7
+
+    punpcklqdq  m0,      m1
+    vpermq      m0,      m0, 11011000b
+    movu        [r0],    m0
+
+    add         r0,      32
+
+    movu        m0,      [r4]                  ; i
+    psrlw       m1,      m0, 8                 ; j
+    movu        m2,      [r4 + r2]             ; k
+    psrlw       m3,      m2, 8                 ; l
+
+    pxor        m4,      m0, m1                ; i^j
+    pxor        m5,      m2, m3                ; k^l
+    por         m4,      m5                    ; ij|kl
+
+    pavgb       m0,      m1                    ; s
+    pavgb       m2,      m3                    ; t
+    mova        m5,      m0
+    pavgb       m0,      m2                    ; (s+t+1)/2
+    pxor        m5,      m2                    ; s^t
+    pand        m4,      m5                    ; (ij|kl)&st
+    pand        m4,      [pb_1]
+    psubb       m0,      m4                    ; Result
+
+    movu        m1,      [r4 + 32]             ; i
+    psrlw       m2,      m1, 8                 ; j
+    movu        m3,      [r4 + r2 + 32]        ; k
+    psrlw       m4,      m3, 8                 ; l
+
+    pxor        m5,      m1, m2                ; i^j
+    pxor        m6,      m3, m4                ; k^l
+    por         m5,      m6                    ; ij|kl
+
+    pavgb       m1,      m2                    ; s
+    pavgb       m3,      m4                    ; t
+    mova        m6,      m1
+    pavgb       m1,      m3                    ; (s+t+1)/2
+    pxor        m6,      m3                    ; s^t
+    pand        m5,      m6                    ; (ij|kl)&st
+    pand        m5,      [pb_1]
+    psubb       m1,      m5                    ; Result
+
+    pshufb      m0,      m0, m7
+    pshufb      m1,      m1, m7
+
+    punpcklqdq  m0,      m1
+    vpermq      m0,      m0, 11011000b
+    movu        [r0],    m0
+
+    lea         r1,      [r1 + 4 * r2]
+    add         r0,      32
+    dec         r3d
+    jnz         .loop
+    RET
+%endif
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_4x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_sub_ps_4x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+    movh    m0,     [r2]
+    movh    m2,     [r2 + r4]
+    movh    m1,     [r3]
+    movh    m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+    movh    m4,     [r2]
+    movh    m6,     [r2 + r4]
+    movh    m5,     [r3]
+    movh    m7,     [r3 + r5]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movh    [r0],       m0
+    movh    [r0 + r1],  m2
+    lea     r0,     [r0 + r1 * 2]
+    movh    [r0],       m4
+    movh    [r0 + r1],  m6
+
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_sub_ps_4x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1,     r1
+    movd        m0,     [r2]
+    movd        m2,     [r2 + r4]
+    movd        m1,     [r3]
+    movd        m3,     [r3 + r5]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    movd        m4,     [r2]
+    movd        m6,     [r2 + r4]
+    movd        m5,     [r3]
+    movd        m7,     [r3 + r5]
+    punpckldq   m0,     m2
+    punpckldq   m1,     m3
+    punpckldq   m4,     m6
+    punpckldq   m5,     m7
+    pmovzxbw    m0,     m0
+    pmovzxbw    m1,     m1
+    pmovzxbw    m4,     m4
+    pmovzxbw    m5,     m5
+
+    psubw       m0,     m1
+    psubw       m4,     m5
+
+    movh        [r0],           m0
+    movhps      [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m4
+    lea         r0,     [r0 + r1 * 2]
+    movhps      [r0 + r1],      m4
+
+    RET
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_4x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W4_H4 2
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_4x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movh    m0,     [r2]
+    movh    m2,     [r2 + r4]
+    movh    m1,     [r3]
+    movh    m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+    movh    m4,     [r2]
+    movh    m6,     [r2 + r4]
+    movh    m5,     [r3]
+    movh    m7,     [r3 + r5]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movh    [r0],           m0
+    movh    [r0 + r1],      m2
+    movh    [r0 + r1 * 2],  m4
+    lea     r0,     [r0 + r1 * 2]
+    movh    [r0 + r1],      m6
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+cglobal pixel_sub_ps_4x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov         r6d,    %2/4
+    add         r1,     r1
+.loop:
+    movd        m0,     [r2]
+    movd        m2,     [r2 + r4]
+    movd        m1,     [r3]
+    movd        m3,     [r3 + r5]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    movd        m4,     [r2]
+    movd        m6,     [r2 + r4]
+    movd        m5,     [r3]
+    movd        m7,     [r3 + r5]
+    dec         r6d
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    punpckldq   m0,     m2
+    punpckldq   m1,     m3
+    punpckldq   m4,     m6
+    punpckldq   m5,     m7
+    pmovzxbw    m0,     m0
+    pmovzxbw    m1,     m1
+    pmovzxbw    m4,     m4
+    pmovzxbw    m5,     m5
+
+    psubw       m0,     m1
+    psubw       m4,     m5
+
+    movh        [r0],           m0
+    movhps      [r0 + r1],      m0
+    movh        [r0 + r1 * 2],  m4
+    lea         r0,     [r0 + r1 * 2]
+    movhps      [r0 + r1],      m4
+    lea         r0,     [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+PIXELSUB_PS_W4_H4 4, 8
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W4_H4 4, 8
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_8x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W8_H4 2
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_8x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + r4]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+    movu    m4,     [r2]
+    movu    m6,     [r2 + r4]
+    movu    m5,     [r3]
+    movu    m7,     [r3 + r5]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0],           m0
+    movu    [r0 + r1],      m2
+    movu    [r0 + r1 * 2],  m4
+    lea     r0,     [r0 + r1 * 2]
+    movu    [r0 + r1],      m6
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+cglobal pixel_sub_ps_8x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov         r6d,    %2/4
+    add         r1,     r1
+.loop:
+    movh        m0,     [r2]
+    movh        m2,     [r2 + r4]
+    movh        m1,     [r3]
+    movh        m3,     [r3 + r5]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    movh        m4,     [r2]
+    movh        m6,     [r2 + r4]
+    movh        m5,     [r3]
+    movh        m7,     [r3 + r5]
+    dec         r6d
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    pmovzxbw    m0,     m0
+    pmovzxbw    m1,     m1
+    pmovzxbw    m2,     m2
+    pmovzxbw    m3,     m3
+    pmovzxbw    m4,     m4
+    pmovzxbw    m5,     m5
+    pmovzxbw    m6,     m6
+    pmovzxbw    m7,     m7
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+    psubw       m4,     m5
+    psubw       m6,     m7
+
+    movu        [r0],           m0
+    movu        [r0 + r1],      m2
+    movu        [r0 + r1 * 2],  m4
+    lea         r0,     [r0 + r1 * 2]
+    movu        [r0 + r1],      m6
+    lea         r0,     [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+PIXELSUB_PS_W8_H4 8, 8
+PIXELSUB_PS_W8_H4 8, 16
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W8_H4 8, 8
+PIXELSUB_PS_W8_H4 8, 16
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_16x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W16_H4 2
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_16x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+    movu    m4,     [r2 + r4]
+    movu    m6,     [r2 + r4 + 16]
+    movu    m5,     [r3 + r5]
+    movu    m7,     [r3 + r5 + 16]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0],           m0
+    movu    [r0 + 16],      m2
+    movu    [r0 + r1],      m4
+    movu    [r0 + r1 + 16], m6
+
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+    movu    m4,     [r2 + r4]
+    movu    m5,     [r3 + r5]
+    movu    m6,     [r2 + r4 + 16]
+    movu    m7,     [r3 + r5 + 16]
+    lea     r0,     [r0 + r1 * 2]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0],           m0
+    movu    [r0 + 16],      m2
+    movu    [r0 + r1],      m4
+    movu    [r0 + r1 + 16], m6
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+cglobal pixel_sub_ps_16x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov         r6d,    %2/4
+    pxor        m6,     m6
+    add         r1,     r1
+.loop:
+    movu        m1,     [r2]
+    movu        m3,     [r3]
+    pmovzxbw    m0,     m1
+    pmovzxbw    m2,     m3
+    punpckhbw   m1,     m6
+    punpckhbw   m3,     m6
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        m5,     [r2 + r4]
+    movu        m3,     [r3 + r5]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    pmovzxbw    m4,     m5
+    pmovzxbw    m2,     m3
+    punpckhbw   m5,     m6
+    punpckhbw   m3,     m6
+
+    psubw       m4,     m2
+    psubw       m5,     m3
+
+    movu        [r0],           m0
+    movu        [r0 + 16],      m1
+    movu        [r0 + r1],      m4
+    movu        [r0 + r1 + 16], m5
+
+    movu        m1,     [r2]
+    movu        m3,     [r3]
+    pmovzxbw    m0,     m1
+    pmovzxbw    m2,     m3
+    punpckhbw   m1,     m6
+    punpckhbw   m3,     m6
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        m5,     [r2 + r4]
+    movu        m3,     [r3 + r5]
+    dec         r6d
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    lea         r0,     [r0 + r1 * 2]
+    pmovzxbw    m4,     m5
+    pmovzxbw    m2,     m3
+    punpckhbw   m5,     m6
+    punpckhbw   m3,     m6
+
+    psubw       m4,     m2
+    psubw       m5,     m3
+
+    movu        [r0],           m0
+    movu        [r0 + 16],      m1
+    movu        [r0 + r1],      m4
+    movu        [r0 + r1 + 16], m5
+    lea         r0,     [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+PIXELSUB_PS_W16_H4 16, 16
+PIXELSUB_PS_W16_H4 16, 32
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W16_H4 16, 16
+PIXELSUB_PS_W16_H4 16, 32
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_16x16(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+%macro PIXELSUB_PS_W16_H4_avx2 1
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_sub_ps_16x%1, 6, 9, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1d,    r1d
+    add         r4d,    r4d
+    add         r5d,    r5d
+    lea         r6,     [r1 * 3]
+    lea         r7,     [r4 * 3]
+    lea         r8,     [r5 * 3]
+
+%rep %1/4
+    movu        m0,     [r2]
+    movu        m1,     [r3]
+    movu        m2,     [r2 + r4]
+    movu        m3,     [r3 + r5]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + r1],       m2
+
+    movu        m0,     [r2 + r4 * 2]
+    movu        m1,     [r3 + r5 * 2]
+    movu        m2,     [r2 + r7]
+    movu        m3,     [r3 + r8]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0 + r1 * 2],   m0
+    movu        [r0 + r6],       m2
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+%endrep
+    RET
+%endif
+%endmacro
+PIXELSUB_PS_W16_H4_avx2 16
+PIXELSUB_PS_W16_H4_avx2 32
+%else
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_16x16(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W16_H8_avx2 2
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_sub_ps_16x%2, 6, 10, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1,     r1
+    lea         r6,     [r1 * 3]
+    mov         r7d,    %2/8
+
+    lea         r9,     [r4 * 3]
+    lea         r8,     [r5 * 3]
+
+.loop
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r3]
+    pmovzxbw    m2,     [r2 + r4]
+    pmovzxbw    m3,     [r3 + r5]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + r1],       m2
+
+    pmovzxbw    m0,     [r2 + 2 * r4]
+    pmovzxbw    m1,     [r3 + 2 * r5]
+    pmovzxbw    m2,     [r2 + r9]
+    pmovzxbw    m3,     [r3 + r8]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0 + r1 * 2],   m0
+    movu        [r0 + r6],       m2
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r3]
+    pmovzxbw    m2,     [r2 + r4]
+    pmovzxbw    m3,     [r3 + r5]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + r1],       m2
+
+    pmovzxbw    m0,     [r2 + 2 * r4]
+    pmovzxbw    m1,     [r3 + 2 * r5]
+    pmovzxbw    m2,     [r2 + r9]
+    pmovzxbw    m3,     [r3 + r8]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0 + r1 * 2],   m0
+    movu        [r0 + r6],       m2
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+
+    dec         r7d
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+PIXELSUB_PS_W16_H8_avx2 16, 16
+PIXELSUB_PS_W16_H8_avx2 16, 32
+%endif
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_32x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W32_H2 2
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_32x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov     r6d,    %2/2
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m4,     [r2 + 32]
+    movu    m6,     [r2 + 48]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+    movu    m5,     [r3 + 32]
+    movu    m7,     [r3 + 48]
+    dec     r6d
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+    movu    [r0 + 32],  m4
+    movu    [r0 + 48],  m6
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m4,     [r2 + r4 + 32]
+    movu    m6,     [r2 + r4 + 48]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+    movu    m5,     [r3 + r5 + 32]
+    movu    m7,     [r3 + r5 + 48]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+    movu    [r0 + r1 + 32], m4
+    movu    [r0 + r1 + 48], m6
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+cglobal pixel_sub_ps_32x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov         r6d,    %2/2
+    add         r1,     r1
+.loop:
+    movh        m0,     [r2]
+    movh        m1,     [r2 + 8]
+    movh        m2,     [r2 + 16]
+    movh        m6,     [r2 + 24]
+    movh        m3,     [r3]
+    movh        m4,     [r3 + 8]
+    movh        m5,     [r3 + 16]
+    movh        m7,     [r3 + 24]
+    dec         r6d
+    pmovzxbw    m0,     m0
+    pmovzxbw    m1,     m1
+    pmovzxbw    m2,     m2
+    pmovzxbw    m6,     m6
+    pmovzxbw    m3,     m3
+    pmovzxbw    m4,     m4
+    pmovzxbw    m5,     m5
+    pmovzxbw    m7,     m7
+
+    psubw       m0,     m3
+    psubw       m1,     m4
+    psubw       m2,     m5
+    psubw       m6,     m7
+
+    movu        [r0],       m0
+    movu        [r0 + 16],  m1
+    movu        [r0 + 32],  m2
+    movu        [r0 + 48],  m6
+
+    movh        m0,     [r2 + r4]
+    movh        m1,     [r2 + r4 + 8]
+    movh        m2,     [r2 + r4 + 16]
+    movh        m6,     [r2 + r4 + 24]
+    movh        m3,     [r3 + r5]
+    movh        m4,     [r3 + r5 + 8]
+    movh        m5,     [r3 + r5 + 16]
+    movh        m7,     [r3 + r5 + 24]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    pmovzxbw    m0,     m0
+    pmovzxbw    m1,     m1
+    pmovzxbw    m2,     m2
+    pmovzxbw    m6,     m6
+    pmovzxbw    m3,     m3
+    pmovzxbw    m4,     m4
+    pmovzxbw    m5,     m5
+    pmovzxbw    m7,     m7
+
+    psubw       m0,     m3
+    psubw       m1,     m4
+    psubw       m2,     m5
+    psubw       m6,     m7
+
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m2
+    movu        [r0 + r1 + 48], m6
+    lea         r0,     [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+PIXELSUB_PS_W32_H2 32, 32
+PIXELSUB_PS_W32_H2 32, 64
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W32_H2 32, 32
+PIXELSUB_PS_W32_H2 32, 64
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_32x32(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+%macro PIXELSUB_PS_W32_H4_avx2 1
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_sub_ps_32x%1, 6, 10, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1d,    r1d
+    add         r4d,    r4d
+    add         r5d,    r5d
+    mov         r9d,    %1/4
+    lea         r6,     [r1 * 3]
+    lea         r7,     [r4 * 3]
+    lea         r8,     [r5 * 3]
+
+.loop
+    movu        m0,     [r2]
+    movu        m1,     [r2 + 32]
+    movu        m2,     [r3]
+    movu        m3,     [r3 + 32]
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0],                 m0
+    movu        [r0 + 32],            m1
+
+    movu        m0,     [r2 + r4]
+    movu        m1,     [r2 + r4 + 32]
+    movu        m2,     [r3 + r5]
+    movu        m3,     [r3 + r5 + 32]
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1],            m0
+    movu        [r0 + r1 + 32],       m1
+
+    movu        m0,     [r2 + r4 * 2]
+    movu        m1,     [r2 + r4 * 2 + 32]
+    movu        m2,     [r3 + r5 * 2]
+    movu        m3,     [r3 + r5 * 2 + 32]
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1 * 2],        m0
+    movu        [r0 + r1 * 2 + 32],   m1
+
+    movu        m0,     [r2 + r7]
+    movu        m1,     [r2 + r7 + 32]
+    movu        m2,     [r3 + r8]
+    movu        m3,     [r3 + r8 + 32]
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r6],            m0
+    movu        [r0 + r6 + 32],       m1
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+    dec         r9d
+    jnz         .loop
+    RET
+%endif
+%endmacro
+PIXELSUB_PS_W32_H4_avx2 32
+PIXELSUB_PS_W32_H4_avx2 64
+%else
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_32x32(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W32_H8_avx2 2
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_sub_ps_32x%2, 6, 10, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov        r6d,    %2/8
+    add        r1,     r1
+    lea         r7,         [r4 * 3]
+    lea         r8,         [r5 * 3]
+    lea         r9,         [r1 * 3]
+
+.loop:
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r3]
+    pmovzxbw    m3,     [r3 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1],       m0
+    movu        [r0 + r1 + 32],  m1
+
+    pmovzxbw    m0,     [r2 + 2 * r4]
+    pmovzxbw    m1,     [r2 + 2 * r4 + 16]
+    pmovzxbw    m2,     [r3 + 2 * r5]
+    pmovzxbw    m3,     [r3 + 2 * r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1 * 2 ],           m0
+    movu        [r0 + r1 * 2 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + r7]
+    pmovzxbw    m1,     [r2 + r7 + 16]
+    pmovzxbw    m2,     [r3 + r8]
+    pmovzxbw    m3,     [r3 + r8 + 16]
+
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r9],           m0
+    movu        [r0 + r9 +32],       m1
+
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+    lea         r0,     [r0 + r1 * 4]
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r3]
+    pmovzxbw    m3,     [r3 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1],           m0
+    movu        [r0 + r1 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + 2 * r4]
+    pmovzxbw    m1,     [r2 + 2 * r4 + 16]
+    pmovzxbw    m2,     [r3 + 2 * r5]
+    pmovzxbw    m3,     [r3 + 2 * r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1 * 2],           m0
+    movu        [r0 + r1 * 2 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + r7]
+    pmovzxbw    m1,     [r2 + r7 + 16]
+    pmovzxbw    m2,     [r3 + r8]
+    pmovzxbw    m3,     [r3 + r8 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r9],           m0
+    movu        [r0 + r9 + 32],       m1
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+PIXELSUB_PS_W32_H8_avx2 32, 32
+PIXELSUB_PS_W32_H8_avx2 32, 64
+%endif
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_64x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W64_H2 2
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_64x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov     r6d,    %2/2
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m4,     [r2 + 32]
+    movu    m6,     [r2 + 48]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+    movu    m5,     [r3 + 32]
+    movu    m7,     [r3 + 48]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+    movu    [r0 + 32],  m4
+    movu    [r0 + 48],  m6
+
+    movu    m0,     [r2 + 64]
+    movu    m2,     [r2 + 80]
+    movu    m4,     [r2 + 96]
+    movu    m6,     [r2 + 112]
+    movu    m1,     [r3 + 64]
+    movu    m3,     [r3 + 80]
+    movu    m5,     [r3 + 96]
+    movu    m7,     [r3 + 112]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0 + 64],  m0
+    movu    [r0 + 80],  m2
+    movu    [r0 + 96],  m4
+    movu    [r0 + 112], m6
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m4,     [r2 + r4 + 32]
+    movu    m6,     [r2 + r4 + 48]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+    movu    m5,     [r3 + r5 + 32]
+    movu    m7,     [r3 + r5 + 48]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+    movu    [r0 + r1 + 32], m4
+    movu    [r0 + r1 + 48], m6
+
+    movu    m0,     [r2 + r4 + 64]
+    movu    m2,     [r2 + r4 + 80]
+    movu    m4,     [r2 + r4 + 96]
+    movu    m6,     [r2 + r4 + 112]
+    movu    m1,     [r3 + r5 + 64]
+    movu    m3,     [r3 + r5 + 80]
+    movu    m5,     [r3 + r5 + 96]
+    movu    m7,     [r3 + r5 + 112]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    psubw   m0,     m1
+    psubw   m2,     m3
+    psubw   m4,     m5
+    psubw   m6,     m7
+
+    movu    [r0 + r1 + 64],     m0
+    movu    [r0 + r1 + 80],     m2
+    movu    [r0 + r1 + 96],     m4
+    movu    [r0 + r1 + 112],    m6
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+cglobal pixel_sub_ps_64x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    mov         r6d,    %2/2
+    pxor        m6,     m6
+    add         r1,     r1
+.loop:
+    movu        m1,     [r2]
+    movu        m5,     [r2 + 16]
+    movu        m3,     [r3]
+    movu        m7,     [r3 + 16]
+
+    pmovzxbw    m0,     m1
+    pmovzxbw    m4,     m5
+    pmovzxbw    m2,     m3
+    punpckhbw   m1,     m6
+    punpckhbw   m3,     m6
+    punpckhbw   m5,     m6
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    pmovzxbw    m2,     m7
+    punpckhbw   m7,     m6
+    psubw       m4,     m2
+    psubw       m5,     m7
+
+    movu        m3,     [r2 + 32]
+    movu        m7,     [r3 + 32]
+    pmovzxbw    m2,     m3
+    punpckhbw   m3,     m6
+
+    movu        [r0],       m0
+    movu        [r0 + 16],  m1
+    movu        [r0 + 32],  m4
+    movu        [r0 + 48],  m5
+
+    movu        m1,     [r2 + 48]
+    movu        m5,     [r3 + 48]
+    pmovzxbw    m0,     m1
+    pmovzxbw    m4,     m7
+    punpckhbw   m1,     m6
+    punpckhbw   m7,     m6
+
+    psubw       m2,     m4
+    psubw       m3,     m7
+
+    movu        [r0 + 64],  m2
+    movu        [r0 + 80],  m3
+
+    movu        m7,     [r2 + r4]
+    movu        m3,     [r3 + r5]
+    pmovzxbw    m2,     m5
+    pmovzxbw    m4,     m7
+    punpckhbw   m5,     m6
+    punpckhbw   m7,     m6
+
+    psubw       m0,     m2
+    psubw       m1,     m5
+
+    movu        [r0 + 96],  m0
+    movu        [r0 + 112], m1
+
+    movu        m2,     [r2 + r4 + 16]
+    movu        m5,     [r3 + r5 + 16]
+    pmovzxbw    m0,     m3
+    pmovzxbw    m1,     m2
+    punpckhbw   m3,     m6
+    punpckhbw   m2,     m6
+
+    psubw       m4,     m0
+    psubw       m7,     m3
+
+    movu        [r0 + r1],      m4
+    movu        [r0 + r1 + 16], m7
+
+    movu        m0,     [r2 + r4 + 32]
+    movu        m3,     [r3 + r5 + 32]
+    dec         r6d
+    pmovzxbw    m4,     m5
+    pmovzxbw    m7,     m0
+    punpckhbw   m5,     m6
+    punpckhbw   m0,     m6
+
+    psubw       m1,     m4
+    psubw       m2,     m5
+
+    movu        [r0 + r1 + 32], m1
+    movu        [r0 + r1 + 48], m2
+
+    movu        m4,     [r2 + r4 + 48]
+    movu        m5,     [r3 + r5 + 48]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+    pmovzxbw    m1,     m3
+    pmovzxbw    m2,     m4
+    punpckhbw   m3,     m6
+    punpckhbw   m4,     m6
+
+    psubw       m7,     m1
+    psubw       m0,     m3
+
+    movu        [r0 + r1 + 64], m7
+    movu        [r0 + r1 + 80], m0
+
+    pmovzxbw    m7,     m5
+    punpckhbw   m5,     m6
+    psubw       m2,     m7
+    psubw       m4,     m5
+
+    movu        [r0 + r1 + 96],     m2
+    movu        [r0 + r1 + 112],    m4
+    lea         r0,     [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+PIXELSUB_PS_W64_H2 64, 64
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W64_H2 64, 64
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_64x64(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_sub_ps_64x64, 6, 10, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1d,    r1d
+    add         r4d,    r4d
+    add         r5d,    r5d
+    mov         r9d,    16
+    lea         r6,     [r1 * 3]
+    lea         r7,     [r4 * 3]
+    lea         r8,     [r5 * 3]
+
+.loop
+    movu        m0,     [r2]
+    movu        m1,     [r2 + 32]
+    movu        m2,     [r2 + 64]
+    movu        m3,     [r2 + 96]
+    movu        m4,     [r3]
+    movu        m5,     [r3 + 32]
+    movu        m6,     [r3 + 64]
+    movu        m7,     [r3 + 96]
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],                 m0
+    movu        [r0 + 32],            m1
+    movu        [r0 + 64],            m2
+    movu        [r0 + 96],            m3
+
+    movu        m0,     [r2 + r4]
+    movu        m1,     [r2 + r4 + 32]
+    movu        m2,     [r2 + r4 + 64]
+    movu        m3,     [r2 + r4 + 96]
+    movu        m4,     [r3 + r5]
+    movu        m5,     [r3 + r5 + 32]
+    movu        m6,     [r3 + r5 + 64]
+    movu        m7,     [r3 + r5 + 96]
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0 + r1],            m0
+    movu        [r0 + r1 + 32],       m1
+    movu        [r0 + r1 + 64],       m2
+    movu        [r0 + r1 + 96],       m3
+
+    movu        m0,     [r2 + r4 * 2]
+    movu        m1,     [r2 + r4 * 2 + 32]
+    movu        m2,     [r2 + r4 * 2 + 64]
+    movu        m3,     [r2 + r4 * 2 + 96]
+    movu        m4,     [r3 + r5 * 2]
+    movu        m5,     [r3 + r5 * 2 + 32]
+    movu        m6,     [r3 + r5 * 2 + 64]
+    movu        m7,     [r3 + r5 * 2 + 96]
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0 + r1 * 2],        m0
+    movu        [r0 + r1 * 2 + 32],   m1
+    movu        [r0 + r1 * 2 + 64],   m2
+    movu        [r0 + r1 * 2 + 96],   m3
+
+    movu        m0,     [r2 + r7]
+    movu        m1,     [r2 + r7 + 32]
+    movu        m2,     [r2 + r7 + 64]
+    movu        m3,     [r2 + r7 + 96]
+    movu        m4,     [r3 + r8]
+    movu        m5,     [r3 + r8 + 32]
+    movu        m6,     [r3 + r8 + 64]
+    movu        m7,     [r3 + r8 + 96]
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0 + r6],            m0
+    movu        [r0 + r6 + 32],       m1
+    movu        [r0 + r6 + 64],       m2
+    movu        [r0 + r6 + 96],       m3
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 4]
+    lea         r3,     [r3 + r5 * 4]
+    dec         r9d
+    jnz         .loop
+    RET
+%endif
+%else
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_64x64(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_64x64, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+     mov        r6d,    16
+     add        r1,     r1
+
+.loop:
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endif
+;=============================================================================
+; variance
+;=============================================================================
+
+%macro VAR_START 1
+    pxor  m5, m5    ; sum
+    pxor  m6, m6    ; sum squared
+%if HIGH_BIT_DEPTH == 0
+%if %1
+    mova  m7, [pw_00ff]
+%elif mmsize < 32
+    pxor  m7, m7    ; zero
+%endif
+%endif ; !HIGH_BIT_DEPTH
+%endmacro
+
+%macro VAR_END 2
+%if HIGH_BIT_DEPTH
+%if mmsize == 8 && %1*%2 == 256
+    HADDUW  m5, m2
+%else
+%if %1 >= 32
+    HADDW     m5,    m2
+    movd      m7,    r4d
+    paddd     m5,    m7
+%else
+    HADDW   m5, m2
+%endif
+%endif
+%else ; !HIGH_BIT_DEPTH
+%if %1 == 64
+    HADDW     m5,    m2
+    movd      m7,    r4d
+    paddd     m5,    m7
+%else
+    HADDW   m5, m2
+%endif
+%endif ; HIGH_BIT_DEPTH
+    HADDD   m6, m1
+%if ARCH_X86_64
+    punpckldq m5, m6
+    movq   rax, m5
+%else
+    movd   eax, m5
+    movd   edx, m6
+%endif
+    RET
+%endmacro
+
+%macro VAR_END_12bit 2
+    HADDD   m5, m1
+    HADDD   m6, m1
+%if ARCH_X86_64
+    punpckldq m5, m6
+    movq   rax, m5
+%else
+    movd   eax, m5
+    movd   edx, m6
+%endif
+    RET
+%endmacro
+
+%macro VAR_CORE 0
+    paddw     m5, m0
+    paddw     m5, m3
+    paddw     m5, m1
+    paddw     m5, m4
+    pmaddwd   m0, m0
+    pmaddwd   m3, m3
+    pmaddwd   m1, m1
+    pmaddwd   m4, m4
+    paddd     m6, m0
+    paddd     m6, m3
+    paddd     m6, m1
+    paddd     m6, m4
+%endmacro
+
+%macro VAR_2ROW 2
+    mov      r2d, %2
+%%loop:
+%if HIGH_BIT_DEPTH
+    movu      m0, [r0]
+    movu      m1, [r0+mmsize]
+    movu      m3, [r0+%1]
+    movu      m4, [r0+%1+mmsize]
+%else ; !HIGH_BIT_DEPTH
+    mova      m0, [r0]
+    punpckhbw m1, m0, m7
+    mova      m3, [r0+%1]
+    mova      m4, m3
+    punpcklbw m0, m7
+%endif ; HIGH_BIT_DEPTH
+%ifidn %1, r1
+    lea       r0, [r0+%1*2]
+%else
+    add       r0, r1
+%endif
+%if HIGH_BIT_DEPTH == 0
+    punpcklbw m3, m7
+    punpckhbw m4, m7
+%endif ; !HIGH_BIT_DEPTH
+    VAR_CORE
+    dec r2d
+    jg %%loop
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_var_wxh( uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_MMX mmx2
+cglobal pixel_var_16x16, 2,3
+    FIX_STRIDES r1
+    VAR_START 0
+    VAR_2ROW 8*SIZEOF_PIXEL, 16
+    VAR_END 16, 16
+
+cglobal pixel_var_8x8, 2,3
+    FIX_STRIDES r1
+    VAR_START 0
+    VAR_2ROW r1, 4
+    VAR_END 8, 8
+
+%if HIGH_BIT_DEPTH
+%macro VAR 0
+
+%if BIT_DEPTH <= 10
+cglobal pixel_var_16x16, 2,3,8
+    FIX_STRIDES r1
+    VAR_START 0
+    VAR_2ROW r1, 8
+    VAR_END 16, 16
+
+cglobal pixel_var_32x32, 2,6,8
+    FIX_STRIDES r1
+    mov       r3,    r0
+    VAR_START 0
+    VAR_2ROW  r1,    8
+    HADDW      m5,    m2
+    movd       r4d,   m5
+    pxor       m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 32]
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    VAR_END   32,    32
+
+cglobal pixel_var_64x64, 2,6,8
+    FIX_STRIDES r1
+    mov       r3,    r0
+    VAR_START 0
+    VAR_2ROW  r1,    8
+    HADDW      m5,    m2
+    movd       r4d,   m5
+    pxor       m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 32]
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 64]
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 96]
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    VAR_2ROW  r1,    8
+    VAR_END   64,    64
+
+%else ; BIT_DEPTH <= 10
+
+cglobal pixel_var_16x16, 2,3,8
+    FIX_STRIDES r1
+    VAR_START 0
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    mova m7, m5
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m5, m7
+    VAR_END_12bit 16, 16
+
+cglobal pixel_var_32x32, 2,6,8
+    FIX_STRIDES r1
+    mov r3,    r0
+    VAR_START 0
+
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    mova m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    lea       r0, [r3 + 32]
+    pxor      m5, m5
+    VAR_2ROW  r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor      m5, m5
+    VAR_2ROW  r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m5, m7
+    VAR_END_12bit 32, 32
+
+cglobal pixel_var_64x64, 2,6,8
+    FIX_STRIDES r1
+    mov r3,    r0
+    VAR_START 0
+
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    mova m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    lea       r0, [r3 + 16 * SIZEOF_PIXEL]
+    pxor      m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    lea       r0, [r3 + 32 * SIZEOF_PIXEL]
+    pxor      m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    lea       r0, [r3 + 48 * SIZEOF_PIXEL]
+    pxor      m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m7, m5
+
+    pxor m5, m5
+    VAR_2ROW r1, 4
+    HADDUWD m5, m1
+    paddd m5, m7
+    VAR_END_12bit 64, 64
+
+%endif ; BIT_DEPTH <= 10
+
+cglobal pixel_var_8x8, 2,3,8
+    lea       r2, [r1*3]
+    VAR_START 0
+    movu      m0, [r0]
+    movu      m1, [r0+r1*2]
+    movu      m3, [r0+r1*4]
+    movu      m4, [r0+r2*2]
+    lea       r0, [r0+r1*8]
+    VAR_CORE
+    movu      m0, [r0]
+    movu      m1, [r0+r1*2]
+    movu      m3, [r0+r1*4]
+    movu      m4, [r0+r2*2]
+    VAR_CORE
+    VAR_END 8, 8
+
+%endmacro ; VAR
+
+INIT_XMM sse2
+VAR
+INIT_XMM avx
+VAR
+INIT_XMM xop
+VAR
+%endif ; HIGH_BIT_DEPTH
+
+%if HIGH_BIT_DEPTH == 0
+%macro VAR 0
+cglobal pixel_var_8x8, 2,3,8
+    VAR_START 1
+    lea       r2,    [r1 * 3]
+    movh      m0,    [r0]
+    movh      m3,    [r0 + r1]
+    movhps    m0,    [r0 + r1 * 2]
+    movhps    m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    lea       r0,    [r0 + r1 * 4]
+    VAR_CORE
+    movh      m0,    [r0]
+    movh      m3,    [r0 + r1]
+    movhps    m0,    [r0 + r1 * 2]
+    movhps    m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    VAR_END 8, 8
+
+cglobal pixel_var_16x16_internal
+    movu      m0,    [r0]
+    movu      m3,    [r0 + r1]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    movu      m0,    [r0 + 2 * r1]
+    movu      m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    lea       r0,    [r0 + r1 * 4]
+    VAR_CORE
+    movu      m0,    [r0]
+    movu      m3,    [r0 + r1]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    movu      m0,    [r0 + 2 * r1]
+    movu      m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    lea       r0,    [r0 + r1 * 4]
+    VAR_CORE
+    movu      m0,    [r0]
+    movu      m3,    [r0 + r1]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    movu      m0,    [r0 + 2 * r1]
+    movu      m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    lea       r0,    [r0 + r1 * 4]
+    VAR_CORE
+    movu      m0,    [r0]
+    movu      m3,    [r0 + r1]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    movu      m0,    [r0 + 2 * r1]
+    movu      m3,    [r0 + r2]
+    DEINTB    1, 0, 4, 3, 7
+    VAR_CORE
+    ret
+
+cglobal pixel_var_16x16, 2,3,8
+    VAR_START 1
+    lea     r2,    [r1 * 3]
+    call    pixel_var_16x16_internal
+    VAR_END 16, 16
+
+cglobal pixel_var_32x32, 2,4,8
+    VAR_START 1
+    lea     r2,    [r1 * 3]
+    mov     r3,    r0
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r3 + 16]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    VAR_END 32, 32
+
+cglobal pixel_var_64x64, 2,6,8
+    VAR_START 1
+    lea     r2,    [r1 * 3]
+    mov     r3,    r0
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    HADDW     m5,    m2
+    movd      r4d,   m5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 16]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    lea       r0,    [r3 + 32]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r3 + 48]
+    HADDW     m5,    m2
+    movd      r5d,   m5
+    add       r4,    r5
+    pxor      m5,    m5
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    lea       r0,    [r0 + r1 * 4]
+    call    pixel_var_16x16_internal
+    VAR_END 64, 64
+%endmacro ; VAR
+
+INIT_XMM sse2
+VAR
+INIT_XMM avx
+VAR
+INIT_XMM xop
+VAR
+
+INIT_YMM avx2
+cglobal pixel_var_16x16, 2,4,7
+    VAR_START 0
+    mov      r2d, 4
+    lea       r3, [r1*3]
+.loop:
+    pmovzxbw  m0, [r0]
+    pmovzxbw  m3, [r0+r1]
+    pmovzxbw  m1, [r0+r1*2]
+    pmovzxbw  m4, [r0+r3]
+    lea       r0, [r0+r1*4]
+    VAR_CORE
+    dec r2d
+    jg .loop
+    vextracti128 xm0, m5, 1
+    vextracti128 xm1, m6, 1
+    paddw  xm5, xm0
+    paddd  xm6, xm1
+    HADDW  xm5, xm2
+    HADDD  xm6, xm1
+%if ARCH_X86_64
+    punpckldq xm5, xm6
+    movq   rax, xm5
+%else
+    movd   eax, xm5
+    movd   edx, xm6
+%endif
+    RET
+%endif ; !HIGH_BIT_DEPTH
+
+%macro VAR2_END 3
+    HADDW   %2, xm1
+    movd   r1d, %2
+    imul   r1d, r1d
+    HADDD   %3, xm1
+    shr    r1d, %1
+    movd   eax, %3
+    movd  [r4], %3
+    sub    eax, r1d  ; sqr - (sum * sum >> shift)
+    RET
+%endmacro
+
+;int scanPosLast(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* scanCG4x4, const int trSize)
+;{
+;    int scanPosLast = 0;
+;    do
+;    {
+;        const uint32_t cgIdx = (uint32_t)scanPosLast >> MLS_CG_SIZE;
+;
+;        const uint32_t posLast = scan[scanPosLast++];
+;
+;        const int curCoeff = coeff[posLast];
+;        const uint32_t isNZCoeff = (curCoeff != 0);
+;        numSig -= isNZCoeff;
+;
+;        coeffSign[cgIdx] += (uint16_t)(((uint32_t)curCoeff >> 31) << coeffNum[cgIdx]);
+;        coeffFlag[cgIdx] = (coeffFlag[cgIdx] << 1) + (uint16_t)isNZCoeff;
+;        coeffNum[cgIdx] += (uint8_t)isNZCoeff;
+;    }
+;    while (numSig > 0);
+;    return scanPosLast - 1;
+;}
+
+%if ARCH_X86_64 == 1
+INIT_XMM avx2,bmi2
+cglobal scanPosLast, 7,11,6
+    ; convert unit of Stride(trSize) to int16_t
+    mov         r7d, r7m
+    add         r7d, r7d
+
+    ; loading scan table and convert to Byte
+    mova        m0, [r6]
+    packuswb    m0, [r6 + mmsize]
+    pxor        m1, m0, [pb_15]
+
+    ; clear CG count
+    xor         r9d, r9d
+
+    ; m0 - Zigzag scan table
+    ; m1 - revert order scan table
+    ; m4 - zero
+    ; m5 - ones
+
+    pxor        m4, m4
+    pcmpeqb     m5, m5
+    lea         r8d, [r7d * 3]
+
+.loop:
+    ; position of current CG
+    movzx       r6d, word [r0]
+    lea         r6, [r6 * 2 + r1]
+    add         r0, 16 * 2
+
+    ; loading current CG
+    movh        m2, [r6]
+    movhps      m2, [r6 + r7]
+    movh        m3, [r6 + r7 * 2]
+    movhps      m3, [r6 + r8]
+    packsswb    m2, m3
+
+    ; Zigzag
+    pshufb      m3, m2, m0
+    pshufb      m2, m1
+
+    ; get sign
+    pmovmskb    r6d, m3
+    pcmpeqb     m3, m4
+    pmovmskb    r10d, m3
+    not         r10d
+    pext        r6d, r6d, r10d
+    mov         [r2 + r9 * 2], r6w
+
+    ; get non-zero flag
+    ; TODO: reuse above result with reorder
+    pcmpeqb     m2, m4
+    pxor        m2, m5
+    pmovmskb    r6d, m2
+    mov         [r3 + r9 * 2], r6w
+
+    ; get non-zero number, POPCNT is faster
+    pabsb       m2, m2
+    psadbw      m2, m4
+    movhlps     m3, m2
+    paddd       m2, m3
+    movd        r6d, m2
+    mov         [r4 + r9], r6b
+
+    inc         r9d
+    sub         r5d, r6d
+    jg         .loop
+
+    ; fixup last CG non-zero flag
+    dec         r9d
+    movzx       r0d, word [r3 + r9 * 2]
+;%if cpuflag(bmi1)  ; 2uops?
+;    tzcnt       r1d, r0d
+;%else
+    bsf         r1d, r0d
+;%endif
+    shrx        r0d, r0d, r1d
+    mov         [r3 + r9 * 2], r0w
+
+    ; get last pos
+    mov         eax, r9d
+    shl         eax, 4
+    xor         r1d, 15
+    add         eax, r1d
+    RET
+
+
+; t3 must be ecx, since it's used for shift.
+%if WIN64
+    DECLARE_REG_TMP 3,1,2,0
+%elif ARCH_X86_64
+    DECLARE_REG_TMP 0,1,2,3
+%else ; X86_32
+    %error Unsupport platform X86_32
+%endif
+INIT_CPUFLAGS
+cglobal scanPosLast_x64, 5,12
+    mov         r10, r3mp
+    movifnidn   t0, r0mp
+    mov         r5d, r5m
+    xor         r11d, r11d                  ; cgIdx
+    xor         r7d, r7d                    ; tmp for non-zero flag
+
+.loop:
+    xor         r8d, r8d                    ; coeffSign[]
+    xor         r9d, r9d                    ; coeffFlag[]
+    xor         t3d, t3d                    ; coeffNum[]
+
+%assign x 0
+%rep 16
+    movzx       r6d, word [t0 + x * 2]
+    movsx       r6d, word [t1 + r6 * 2]
+    test        r6d, r6d
+    setnz       r7b
+    shr         r6d, 31
+    shl         r6d, t3b
+    or          r8d, r6d
+    lea         r9, [r9 * 2 + r7]
+    add         t3d, r7d
+%assign x x+1
+%endrep
+
+    ; store latest group data
+    mov         [t2 + r11 * 2], r8w
+    mov         [r10 + r11 * 2], r9w
+    mov         [r4 + r11], t3b
+    inc         r11d
+
+    add         t0, 16 * 2
+    sub         r5d, t3d
+    jnz        .loop
+
+    ; store group data
+    bsf         t3d, r9d
+    shr         r9d, t3b
+    mov         [r10 + (r11 - 1) * 2], r9w
+
+    ; get posLast
+    shl         r11d, 4
+    sub         r11d, t3d
+    lea         eax, [r11d - 1]
+    RET
+%endif
+
+
+;-----------------------------------------------------------------------------
+; uint32_t[last first] findPosFirstAndLast(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
+;-----------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal findPosFirstLast, 3,3,3
+    ; convert stride to int16_t
+    add         r1d, r1d
+
+    ; loading scan table and convert to Byte
+    mova        m0, [r2]
+    packuswb    m0, [r2 + mmsize]
+
+    ; loading 16 of coeff
+    movh        m1, [r0]
+    movhps      m1, [r0 + r1]
+    movh        m2, [r0 + r1 * 2]
+    lea         r1, [r1 * 3]
+    movhps      m2, [r0 + r1]
+    packsswb    m1, m2
+
+    ; get non-zero mask
+    pxor        m2, m2
+    pcmpeqb     m1, m2
+
+    ; reorder by Zigzag scan
+    pshufb      m1, m0
+
+    ; get First and Last pos
+    pmovmskb    r0d, m1
+    not         r0d
+    bsr         r1w, r0w
+    bsf         eax, r0d    ; side effect: clear AH to Zero
+    shl         r1d, 16
+    or          eax, r1d
+    RET
+
+
+;void saoCuStatsE2_c(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int8_t *upBufft, int endX, int endY, int32_t *stats, int32_t *count)
+;{
+;    X265_CHECK(endX < MAX_CU_SIZE, "endX check failure\n");
+;    X265_CHECK(endY < MAX_CU_SIZE, "endY check failure\n");
+;    int x, y;
+;    int32_t tmp_stats[SAO::NUM_EDGETYPE];
+;    int32_t tmp_count[SAO::NUM_EDGETYPE];
+;    memset(tmp_stats, 0, sizeof(tmp_stats));
+;    memset(tmp_count, 0, sizeof(tmp_count));
+;    for (y = 0; y < endY; y++)
+;    {
+;        upBufft[0] = signOf(rec[stride] - rec[-1]);
+;        for (x = 0; x < endX; x++)
+;        {
+;            int signDown = signOf2(rec[x], rec[x + stride + 1]);
+;            X265_CHECK(signDown == signOf(rec[x] - rec[x + stride + 1]), "signDown check failure\n");
+;            uint32_t edgeType = signDown + upBuff1[x] + 2;
+;            upBufft[x + 1] = (int8_t)(-signDown);
+;            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+;            tmp_count[edgeType]++;
+;        }
+;        std::swap(upBuff1, upBufft);
+;        rec += stride;
+;        fenc += stride;
+;    }
+;    for (x = 0; x < SAO::NUM_EDGETYPE; x++)
+;    {
+;        stats[SAO::s_eoTable[x]] += tmp_stats[x];
+;        count[SAO::s_eoTable[x]] += tmp_count[x];
+;    }
+;}
+
+%if ARCH_X86_64
+; TODO: x64 only because I need temporary register r7,r8, easy portab to x86
+INIT_XMM sse4
+cglobal saoCuStatsE2, 5,9,8,0-32    ; Stack: 5 of stats and 5 of count
+    mov         r5d, r5m
+
+    ; clear internal temporary buffer
+    pxor        m0, m0
+    mova        [rsp], m0
+    mova        [rsp + mmsize], m0
+    mova        m0, [pb_128]
+    mova        m5, [pb_1]
+    mova        m6, [pb_2]
+
+.loopH:
+    ; TODO: merge into below SIMD
+    ; get upBuffX[0]
+    mov         r6b, [r1 + r2]
+    sub         r6b, [r1 -  1]
+    seta        r6b
+    setb        r7b
+    sub         r6b, r7b
+    mov         [r4], r6b
+
+    ; backup unavailable pixels
+    movh        m7, [r4 + r5 + 1]
+
+    mov         r6d, r5d
+.loopW:
+    movu        m1, [r1]
+    movu        m2, [r1 + r2 + 1]
+
+    ; signDown
+    pxor        m1, m0
+    pxor        m2, m0
+    pcmpgtb     m3, m1, m2
+    pand        m3, m5
+    pcmpgtb     m2, m1
+    por         m2, m3
+    pxor        m3, m3
+    psubb       m3, m2
+
+    ; edgeType
+    movu        m4, [r3]
+    paddb       m4, m6
+    paddb       m2, m4
+
+    ; update upBuff1
+    movu        [r4 + 1], m3
+
+    ; stats[edgeType]
+    pxor        m1, m0
+    movu        m3, [r0]
+    punpckhbw   m4, m3, m1
+    punpcklbw   m3, m1
+    pmaddubsw   m3, [hmul_16p + 16]
+    pmaddubsw   m4, [hmul_16p + 16]
+
+    ; 16 pixels
+%assign x 0
+%rep 16
+    pextrb      r7d, m2, x
+    inc    word [rsp + r7 * 2]
+
+  %if (x < 8)
+    pextrw      r8d, m3, (x % 8)
+  %else
+    pextrw      r8d, m4, (x % 8)
+  %endif
+    movsx       r8d, r8w
+    add         [rsp + 5 * 2 + r7 * 4], r8d
+
+    dec         r6d
+    jz         .next
+%assign x x+1
+%endrep
+
+    add         r0, 16
+    add         r1, 16
+    add         r3, 16
+    add         r4, 16
+    jmp         .loopW
+
+.next:
+    xchg        r3, r4
+
+    ; restore pointer upBuff1
+    mov         r6d, r5d
+    and         r6d, 15
+
+    ; move to next row
+    sub         r6, r5
+    add         r3, r6
+    add         r4, r6
+    add         r6, r2
+    add         r0, r6
+    add         r1, r6
+
+    ; restore unavailable pixels
+    movh        [r3 + r5 + 1], m7
+
+    dec    byte r6m
+    jg         .loopH
+
+    ; sum to global buffer
+    mov         r1, r7m
+    mov         r0, r8m
+
+    ; s_eoTable = {1,2,0,3,4}
+    movzx       r6d, word [rsp + 0 * 2]
+    add         [r0 + 1 * 4], r6d
+    movzx       r6d, word [rsp + 1 * 2]
+    add         [r0 + 2 * 4], r6d
+    movzx       r6d, word [rsp + 2 * 2]
+    add         [r0 + 0 * 4], r6d
+    movzx       r6d, word [rsp + 3 * 2]
+    add         [r0 + 3 * 4], r6d
+    movzx       r6d, word [rsp + 4 * 2]
+    add         [r0 + 4 * 4], r6d
+
+    mov         r6d, [rsp + 5 * 2 + 0 * 4]
+    add         [r1 + 1 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 1 * 4]
+    add         [r1 + 2 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 2 * 4]
+    add         [r1 + 0 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 3 * 4]
+    add         [r1 + 3 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 4 * 4]
+    add         [r1 + 4 * 4], r6d
+    RET
+%endif ; ARCH_X86_64
+
+
+;void saoStatE3(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count);
+;{
+;    memset(tmp_stats, 0, sizeof(tmp_stats));
+;    memset(tmp_count, 0, sizeof(tmp_count));
+;    for (y = startY; y < endY; y++)
+;    {
+;        for (x = startX; x < endX; x++)
+;        {
+;            int signDown = signOf2(rec[x], rec[x + stride - 1]);
+;            uint32_t edgeType = signDown + upBuff1[x] + 2;
+;            upBuff1[x - 1] = (int8_t)(-signDown);
+;            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+;            tmp_count[edgeType]++;
+;        }
+;        upBuff1[endX - 1] = signOf(rec[endX - 1 + stride] - rec[endX]);
+;        rec += stride;
+;        fenc += stride;
+;    }
+;    for (x = 0; x < NUM_EDGETYPE; x++)
+;    {
+;        stats[s_eoTable[x]] += tmp_stats[x];
+;        count[s_eoTable[x]] += tmp_count[x];
+;    }
+;}
+
+%if ARCH_X86_64
+INIT_XMM sse4
+cglobal saoCuStatsE3, 4,9,8,0-32    ; Stack: 5 of stats and 5 of count
+    mov         r4d, r4m
+    mov         r5d, r5m
+
+    ; clear internal temporary buffer
+    pxor        m0, m0
+    mova        [rsp], m0
+    mova        [rsp + mmsize], m0
+    mova        m0, [pb_128]
+    mova        m5, [pb_1]
+    mova        m6, [pb_2]
+    movh        m7, [r3 + r4]
+
+.loopH:
+    mov         r6d, r4d
+
+.loopW:
+    movu        m1, [r1]
+    movu        m2, [r1 + r2 - 1]
+
+    ; signDown
+    pxor        m1, m0
+    pxor        m2, m0
+    pcmpgtb     m3, m1, m2
+    pand        m3, m5
+    pcmpgtb     m2, m1
+    por         m2, m3
+    pxor        m3, m3
+    psubb       m3, m2
+
+    ; edgeType
+    movu        m4, [r3]
+    paddb       m4, m6
+    paddb       m2, m4
+
+    ; update upBuff1
+    movu        [r3 - 1], m3
+
+    ; stats[edgeType]
+    pxor        m1, m0
+    movu        m3, [r0]
+    punpckhbw   m4, m3, m1
+    punpcklbw   m3, m1
+    pmaddubsw   m3, [hmul_16p + 16]
+    pmaddubsw   m4, [hmul_16p + 16]
+
+    ; 16 pixels
+%assign x 0
+%rep 16
+    pextrb      r7d, m2, x
+    inc    word [rsp + r7 * 2]
+
+  %if (x < 8)
+    pextrw      r8d, m3, (x % 8)
+  %else
+    pextrw      r8d, m4, (x % 8)
+  %endif
+    movsx       r8d, r8w
+    add         [rsp + 5 * 2 + r7 * 4], r8d
+
+    dec         r6d
+    jz         .next
+%assign x x+1
+%endrep
+
+    add         r0, 16
+    add         r1, 16
+    add         r3, 16
+    jmp         .loopW
+
+.next:
+    ; restore pointer upBuff1
+    mov         r6d, r4d
+    and         r6d, 15
+
+    ; move to next row
+    sub         r6, r4
+    add         r3, r6
+    add         r6, r2
+    add         r0, r6
+    add         r1, r6
+    dec         r5d
+    jg         .loopH
+
+    ; restore unavailable pixels
+    movh        [r3 + r4], m7
+
+    ; sum to global buffer
+    mov         r1, r6m
+    mov         r0, r7m
+
+    ; s_eoTable = {1,2,0,3,4}
+    movzx       r6d, word [rsp + 0 * 2]
+    add         [r0 + 1 * 4], r6d
+    movzx       r6d, word [rsp + 1 * 2]
+    add         [r0 + 2 * 4], r6d
+    movzx       r6d, word [rsp + 2 * 2]
+    add         [r0 + 0 * 4], r6d
+    movzx       r6d, word [rsp + 3 * 2]
+    add         [r0 + 3 * 4], r6d
+    movzx       r6d, word [rsp + 4 * 2]
+    add         [r0 + 4 * 4], r6d
+
+    mov         r6d, [rsp + 5 * 2 + 0 * 4]
+    add         [r1 + 1 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 1 * 4]
+    add         [r1 + 2 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 2 * 4]
+    add         [r1 + 0 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 3 * 4]
+    add         [r1 + 3 * 4], r6d
+    mov         r6d, [rsp + 5 * 2 + 4 * 4]
+    add         [r1 + 4 * 4], r6d
+    RET
+%endif ; ARCH_X86_64
+
+
+; uint32_t costCoeffNxN(uint16_t *scan, coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, uint8_t *tabSigCtx, uint16_t scanFlagMask, uint8_t *baseCtx, int offset, int subPosBase)
+;for (int i = 0; i < MLS_CG_SIZE; i++)
+;{
+;    tmpCoeff[i * MLS_CG_SIZE + 0] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 0]);
+;    tmpCoeff[i * MLS_CG_SIZE + 1] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 1]);
+;    tmpCoeff[i * MLS_CG_SIZE + 2] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 2]);
+;    tmpCoeff[i * MLS_CG_SIZE + 3] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 3]);
+;}
+;do
+;{
+;    uint32_t blkPos, sig, ctxSig;
+;    blkPos = g_scan4x4[codingParameters.scanType][scanPosSigOff];
+;    const uint32_t posZeroMask = (subPosBase + scanPosSigOff) ? ~0 : 0;
+;    sig     = scanFlagMask & 1;
+;    scanFlagMask >>= 1;
+;    if (scanPosSigOff + (subSet == 0) + numNonZero)
+;    {
+;        const uint32_t cnt = tabSigCtx[blkPos] + offset + posOffset;
+;        ctxSig = cnt & posZeroMask;
+;
+;        const uint32_t mstate = baseCtx[ctxSig];
+;        const uint32_t mps = mstate & 1;
+;        const uint32_t stateBits = x265_entropyStateBits[mstate ^ sig];
+;        uint32_t nextState = (stateBits >> 24) + mps;
+;        if ((mstate ^ sig) == 1)
+;            nextState = sig;
+;        baseCtx[ctxSig] = (uint8_t)nextState;
+;        sum += stateBits;
+;    }
+;    absCoeff[numNonZero] = tmpCoeff[blkPos];
+;    numNonZero += sig;
+;    scanPosSigOff--;
+;}
+;while(scanPosSigOff >= 0);
+; sum &= 0xFFFFFF
+
+%if ARCH_X86_64
+; uint32_t costCoeffNxN(uint16_t *scan, coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, uint8_t *tabSigCtx, uint16_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
+INIT_XMM sse4
+cglobal costCoeffNxN, 6,11,5
+    add         r2d, r2d
+
+    ; abs(coeff)
+    movh        m1, [r1]
+    movhps      m1, [r1 + r2]
+    movh        m2, [r1 + r2 * 2]
+    lea         r2, [r2 * 3]
+    movhps      m2, [r1 + r2]
+    pabsw       m1, m1
+    pabsw       m2, m2
+    ; r[1-2] free here
+
+    ; WARNING: beyond-bound read here!
+    ; loading scan table
+    mov         r2d, r8m
+    xor         r2d, 15
+    movu        m0, [r0 + r2 * 2]
+    movu        m3, [r0 + r2 * 2 + mmsize]
+    packuswb    m0, m3
+    pxor        m0, [pb_15]
+    xchg        r2d, r8m
+    ; r[0-1] free here
+
+    ; reorder coeff
+    mova        m3, [deinterleave_shuf]
+    pshufb      m1, m3
+    pshufb      m2, m3
+    punpcklqdq  m3, m1, m2
+    punpckhqdq  m1, m2
+    pshufb      m3, m0
+    pshufb      m1, m0
+    punpcklbw   m2, m3, m1
+    punpckhbw   m3, m1
+    ; r[0-1], m[1] free here
+
+    ; loading tabSigCtx (+offset)
+    mova        m1, [r4]
+    pshufb      m1, m0
+    movd        m4, r7m
+    pxor        m5, m5
+    pshufb      m4, m5
+    paddb       m1, m4
+
+    ; register mapping
+    ; m0 - Zigzag
+    ; m1 - sigCtx
+    ; {m3,m2} - abs(coeff)
+    ; r0 - x265_entropyStateBits
+    ; r1 - baseCtx
+    ; r2 - scanPosSigOff
+    ; r3 - absCoeff
+    ; r4 - nonZero
+    ; r5 - scanFlagMask
+    ; r6 - sum
+    lea         r0, [private_prefix %+ _entropyStateBits]
+    mov         r1, r6mp
+    xor         r6d, r6d
+    xor         r4d, r4d
+    xor         r8d, r8d
+
+    test        r2d, r2d
+    jz         .idx_zero
+
+.loop:
+;   {
+;        const uint32_t cnt = tabSigCtx[blkPos] + offset + posOffset;
+;        ctxSig = cnt & posZeroMask;
+;        const uint32_t mstate = baseCtx[ctxSig];
+;        const uint32_t mps = mstate & 1;
+;        const uint32_t stateBits = x265_entropyStateBits[mstate ^ sig];
+;        uint32_t nextState = (stateBits >> 24) + mps;
+;        if ((mstate ^ sig) == 1)
+;            nextState = sig;
+;        baseCtx[ctxSig] = (uint8_t)nextState;
+;        sum += stateBits;
+;    }
+;    absCoeff[numNonZero] = tmpCoeff[blkPos];
+;    numNonZero += sig;
+;    scanPosSigOff--;
+
+    pextrw      [r3 + r4 * 2], m2, 0            ; absCoeff[numNonZero] = tmpCoeff[blkPos]
+    shr         r5d, 1
+    setc        r8b                             ; r8 = sig
+    add         r4d, r8d                        ; numNonZero += sig
+    palignr     m4, m3, m2, 2
+    psrldq      m3, 2
+    mova        m2, m4
+    movd        r7d, m1                         ; r7 = ctxSig
+    movzx       r7d, r7b
+    psrldq      m1, 1
+    movzx       r9d, byte [r1 + r7]             ; mstate = baseCtx[ctxSig]
+    mov         r10d, r9d
+    and         r10d, 1                         ; mps = mstate & 1
+    xor         r9d, r8d                        ; r9 = mstate ^ sig
+    add         r6d, [r0 + r9 * 4]              ; sum += x265_entropyStateBits[mstate ^ sig]
+    add         r10b, byte [r0 + r9 * 4 + 3]    ; nextState = (stateBits >> 24) + mps
+    cmp         r9b, 1
+    cmove       r10d, r8d
+    mov    byte [r1 + r7], r10b
+
+    dec         r2d
+    jg         .loop
+
+.idx_zero:
+    pextrw      [r3 + r4 * 2], m2, 0            ; absCoeff[numNonZero] = tmpCoeff[blkPos]
+    add         r4b, r8m
+    xor         r2d, r2d
+    cmp    word r9m, 0
+    sete        r2b
+    add         r4b, r2b
+    jz         .exit
+
+    dec         r2b
+    movd        r3d, m1
+    and         r2d, r3d
+
+    movzx       r3d, byte [r1 + r2]             ; mstate = baseCtx[ctxSig]
+    mov         r4d, r5d
+    xor         r5d, r3d                        ; r0 = mstate ^ sig
+    and         r3d, 1                          ; mps = mstate & 1
+    add         r6d, [r0 + r5 * 4]              ; sum += x265_entropyStateBits[mstate ^ sig]
+    add         r3b, [r0 + r5 * 4 + 3]          ; nextState = (stateBits >> 24) + mps
+    cmp         r5b, 1
+    cmove       r3d, r4d
+    mov    byte [r1 + r2], r3b
+
+.exit:
+%ifnidn eax,r6d
+    mov         eax, r6d
+%endif
+    and         eax, 0xFFFFFF
+    RET
+%endif ; ARCH_X86_64
+
+
+;uint32_t goRiceParam = 0;
+;int firstCoeff2 = 1;
+;uint32_t baseLevelN = 0x5555AAAA; // 2-bits encode format baseLevel
+;idx = 0;
+;do
+;{
+;    int baseLevel = (baseLevelN & 3) | firstCoeff2;
+;    baseLevelN >>= 2;
+;    int codeNumber = absCoeff[idx] - baseLevel;
+;    if (codeNumber >= 0)
+;    {
+;        uint32_t length = 0;
+;        codeNumber = ((uint32_t)codeNumber >> goRiceParam) - COEF_REMAIN_BIN_REDUCTION;
+;        if (codeNumber >= 0)
+;        {
+;            {
+;                unsigned long cidx;
+;                CLZ(cidx, codeNumber + 1);
+;                length = cidx;
+;            }
+;            codeNumber = (length + length);
+;        }
+;        sum += (COEF_REMAIN_BIN_REDUCTION + 1 + goRiceParam + codeNumber);
+;        if (absCoeff[idx] > (COEF_REMAIN_BIN_REDUCTION << goRiceParam))
+;            goRiceParam = (goRiceParam + 1) - (goRiceParam >> 2);
+;    }
+;    if (absCoeff[idx] >= 2)
+;        firstCoeff2 = 0;
+;    idx++;
+;}
+;while(idx < numNonZero);
+
+; uint32_t costCoeffRemain(uint16_t *absCoeff, int numNonZero, int idx)
+INIT_XMM sse4
+cglobal costCoeffRemain, 0,7,1
+    ; assign RCX to R3
+    ; RAX always in R6 and free
+  %if WIN64
+    DECLARE_REG_TMP 3,1,2,0
+    mov         t0, r0
+    mov         r4d, r2d
+  %elif ARCH_X86_64
+    ; *nix x64 didn't do anything
+    DECLARE_REG_TMP 0,1,2,3
+    mov         r4d, r2d
+  %else ; X86_32
+    DECLARE_REG_TMP 6,3,2,1
+    mov         t0, r0m
+    mov         r4d, r2m
+  %endif
+
+    xor         t3d, t3d
+    xor         r5d, r5d
+
+    lea         t0, [t0 + r4 * 2]
+    mov         r2d, 3
+
+    ; register mapping
+    ; r2d - baseLevel & tmp
+    ; r4d - idx
+    ; t3  - goRiceParam
+    ; eax - absCoeff[idx] & tmp
+    ; r5  - sum
+
+.loop:
+    mov         eax, 1
+    cmp         r4d, 8
+    cmovge      r2d, eax
+
+    movzx       eax, word [t0]
+    add         t0, 2
+    sub         eax, r2d                ; codeNumber = absCoeff[idx] - baseLevel
+    jl         .next
+
+    shr         eax, t3b                ; codeNumber = ((uint32_t)codeNumber >> goRiceParam) - COEF_REMAIN_BIN_REDUCTION
+
+    lea         r2d, [rax - 3 + 1]      ; CLZ(cidx, codeNumber + 1);
+    bsr         r2d, r2d
+    add         r2d, r2d                ; codeNumber = (length + length)
+
+    sub         eax, 3
+    cmovge      eax, r2d
+
+    lea         eax, [3 + 1 + t3 + rax] ; sum += (COEF_REMAIN_BIN_REDUCTION + 1 + goRiceParam + codeNumber)
+    add         r5d, eax
+
+    ; if (absCoeff[idx] > (COEF_REMAIN_BIN_REDUCTION << goRiceParam))
+    ;     goRiceParam = (goRiceParam + 1) - (goRiceParam >> 2);
+    cmp         t3d, 4
+    setl        al
+
+    mov         r2d, 3
+    shl         r2d, t3b
+    cmp         word [t0 - 2], r2w
+    setg        r2b
+    and         al, r2b
+    add         t3b, al
+
+.next:
+    inc         r4d
+    mov         r2d, 2
+    cmp         r4d, r1m
+    jl         .loop
+
+    mov         eax, r5d
+    RET
+
+
+; uint32_t costC1C2Flag(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
+;idx = 0;
+;do
+;{
+;    uint32_t symbol1 = absCoeff[idx] > 1;
+;    uint32_t symbol2 = absCoeff[idx] > 2;
+;    {
+;        const uint32_t mstate = baseCtxMod[c1];
+;        baseCtxMod[c1] = sbacNext(mstate, symbol1);
+;        sum += sbacGetEntropyBits(mstate, symbol1);
+;    }
+;    if (symbol1)
+;        c1Next = 0;
+;    if (symbol1 + firstC2Flag == 3)
+;        firstC2Flag = symbol2;
+;    if (symbol1 + firstC2Idx == 9)
+;        firstC2Idx  = idx;
+;    c1 = (c1Next & 3);
+;    c1Next >>= 2;
+;    idx++;
+;}
+;while(idx < numC1Flag);
+;if (!c1)
+;{
+;    baseCtxMod = &m_contextState[(bIsLuma ? 0 : NUM_ABS_FLAG_CTX_LUMA) + OFF_ABS_FLAG_CTX + ctxSet];
+;    {
+;        const uint32_t mstate = baseCtxMod[0];
+;        baseCtxMod[0] = sbacNext(mstate, firstC2Flag);
+;        sum += sbacGetEntropyBits(mstate, firstC2Flag);
+;    }
+;}
+;m_fracBits += (sum & 0xFFFFFF);
+
+
+; TODO: we need more register, so I writen code as x64 only, but it is easy to portab to x86 platform
+%if ARCH_X86_64
+INIT_XMM sse2
+cglobal costC1C2Flag, 4,12,2
+
+    mova        m0, [r0]
+    packsswb    m0, m0
+
+    pcmpgtb     m1, m0, [pb_1]
+    pcmpgtb     m0, [pb_2]
+
+    ; get mask for 'X>1'
+    pmovmskb    r0d, m1
+    mov         r11d, r0d
+
+    ; clear unavailable coeff flags
+    xor         r6d, r6d
+    bts         r6d, r1d
+    dec         r6d
+    and         r11d, r6d
+
+    ; calculate firstC2Idx
+    or          r11d, 0x100                     ; default value setting to 8
+    bsf         r11d, r11d
+
+    lea         r5, [private_prefix %+ _entropyStateBits]
+    xor         r6d, r6d
+    mov         r4d, 0xFFFFFFF9
+
+    ; register mapping
+    ; r4d       - nextC1
+    ; r5        - x265_entropyStateBits
+    ; r6d       - sum
+    ; r[7-10]   - tmp
+    ; r11d      - firstC2Idx (not use in loop)
+
+    ; process c1 flag
+.loop:
+    ; const uint32_t mstate = baseCtx[ctxSig];
+    ; const uint32_t mps = mstate & 1;
+    ; const uint32_t stateBits = x265_entropyStateBits[mstate ^ sig];
+    ; uint32_t nextState = (stateBits >> 24) + mps;
+    ; if ((mstate ^ sig) == 1)
+    ;     nextState = sig;
+    mov         r10d, r4d                       ; c1
+    and         r10d, 3
+    shr         r4d, 2
+
+    xor         r7d, r7d
+    shr         r0d, 1
+    cmovc       r4d, r7d                        ; c1 <- 0 when C1Flag=1
+    setc        r7b                             ; symbol1
+
+    movzx       r8d, byte [r2 + r10]            ; mstate = baseCtx[c1]
+    mov         r9d, r7d                        ; sig = symbol1
+    xor         r7d, r8d                        ; mstate ^ sig
+    and         r8d, 1                          ; mps = mstate & 1
+    add         r6d, [r5 + r7 * 4]              ; sum += x265_entropyStateBits[mstate ^ sig]
+    add         r8b, [r5 + r7 * 4 + 3]          ; nextState = (stateBits >> 24) + mps
+    cmp         r7b, 1                          ; if ((mstate ^ sig) == 1) nextState = sig;
+    cmove       r8d, r9d
+    mov    byte [r2 + r10], r8b
+
+    dec         r1d
+    jg         .loop
+
+    ; check and generate c1 flag
+    shl         r4d, 30
+    jnz        .quit
+
+    ; move to c2 ctx
+    add         r2, r3
+
+    ; process c2 flag
+    pmovmskb    r8d, m0
+    bt          r8d, r11d
+    setc        r7b
+
+    movzx       r8d, byte [r2]                  ; mstate = baseCtx[c1]
+    mov         r1d, r7d                        ; sig = symbol1
+    xor         r7d, r8d                        ; mstate ^ sig
+    and         r8d, 1                          ; mps = mstate & 1
+    add         r6d, [r5 + r7 * 4]              ; sum += x265_entropyStateBits[mstate ^ sig]
+    add         r8b, [r5 + r7 * 4 + 3]          ; nextState = (stateBits >> 24) + mps
+    cmp         r7b, 1                          ; if ((mstate ^ sig) == 1) nextState = sig;
+    cmove       r8d, r1d
+    mov    byte [r2], r8b
+
+.quit:
+    shrd        r4d, r11d, 4
+%ifnidn r6d,eax
+    mov         eax, r6d
+%endif
+    and         eax, 0x00FFFFFF
+    or          eax, r4d
+    RET
+%endif ; ARCH_X86_64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixel.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,69 @@
+/*****************************************************************************
+ * pixel.h: x86 pixel metrics
+ *****************************************************************************
+ * Copyright (C) 2003-2013 x264 project
+ *
+ * Authors: Laurent Aimar <fenrir@via.ecp.fr>
+ *          Loren Merritt <lorenm@u.washington.edu>
+ *          Fiona Glaser <fiona@x264.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_I386_PIXEL_H
+#define X265_I386_PIXEL_H
+
+void PFX(downShift_16_sse2)(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
+void PFX(downShift_16_avx2)(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
+void PFX(upShift_16_sse2)(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
+void PFX(upShift_16_avx2)(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
+void PFX(upShift_8_sse4)(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift);
+void PFX(upShift_8_avx2)(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift);
+pixel PFX(planeClipAndMax_avx2)(pixel *src, intptr_t stride, int width, int height, uint64_t *outsum, const pixel minPix, const pixel maxPix);
+
+#define DECL_PIXELS(cpu) \
+    FUNCDEF_PU(uint32_t, pixel_ssd, cpu, const pixel*, intptr_t, const pixel*, intptr_t); \
+    FUNCDEF_PU(int, pixel_sa8d, cpu, const pixel*, intptr_t, const pixel*, intptr_t); \
+    FUNCDEF_PU(void, pixel_sad_x3, cpu, const pixel*, const pixel*, const pixel*, const pixel*, intptr_t, int32_t*); \
+    FUNCDEF_PU(void, pixel_sad_x4, cpu, const pixel*, const pixel*, const pixel*, const pixel*, const pixel*, intptr_t, int32_t*); \
+    FUNCDEF_PU(void, pixel_avg, cpu, pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int); \
+    FUNCDEF_PU(void, pixel_add_ps, cpu, pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1); \
+    FUNCDEF_PU(void, pixel_sub_ps, cpu, int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1); \
+    FUNCDEF_CHROMA_PU(int, pixel_satd, cpu, const pixel*, intptr_t, const pixel*, intptr_t); \
+    FUNCDEF_CHROMA_PU(int, pixel_sad, cpu, const pixel*, intptr_t, const pixel*, intptr_t); \
+    FUNCDEF_CHROMA_PU(uint32_t, pixel_ssd_ss, cpu, const int16_t*, intptr_t, const int16_t*, intptr_t); \
+    FUNCDEF_CHROMA_PU(void, addAvg, cpu, const int16_t*, const int16_t*, pixel*, intptr_t, intptr_t, intptr_t); \
+    FUNCDEF_CHROMA_PU(int, pixel_ssd_s, cpu, const int16_t*, intptr_t); \
+    FUNCDEF_TU_S(int, pixel_ssd_s, cpu, const int16_t*, intptr_t); \
+    FUNCDEF_TU(uint64_t, pixel_var, cpu, const pixel*, intptr_t); \
+    FUNCDEF_TU(int, psyCost_pp, cpu, const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride); \
+    FUNCDEF_TU(int, psyCost_ss, cpu, const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride)
+
+DECL_PIXELS(mmx);
+DECL_PIXELS(mmx2);
+DECL_PIXELS(sse2);
+DECL_PIXELS(sse3);
+DECL_PIXELS(sse4);
+DECL_PIXELS(ssse3);
+DECL_PIXELS(avx);
+DECL_PIXELS(xop);
+DECL_PIXELS(avx2);
+
+#undef DECL_PIXELS
+
+#endif // ifndef X265_I386_PIXEL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/pixeladd8.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1146 @@
+;*****************************************************************************
+;* Copyright (C) 2013 x265 project
+;*
+;* Authors: Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+SECTION .text
+
+cextern pw_pixel_max
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_4x4(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_4x4, 6, 6, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m1,     [pw_pixel_max]
+    pxor    m0,     m0
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+    movh    m2,     [r2]
+    movhps  m2,     [r2 + r4]
+    movh    m3,     [r3]
+    movhps  m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+    movh    m4,     [r2]
+    movhps  m4,     [r2 + r4]
+    movh    m5,     [r3]
+    movhps  m5,     [r3 + r5]
+
+    paddw   m2,     m3
+    paddw   m4,     m5
+    CLIPW2  m2, m4, m0, m1
+
+    movh    [r0],       m2
+    movhps  [r0 + r1],  m2
+    lea     r0,     [r0 + r1 * 2]
+    movh    [r0],       m4
+    movhps  [r0 + r1],  m4
+
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_4x4, 6, 6, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    add         r5,         r5
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m2,         [r2 + r4]
+    movh        m1,         [r3]
+    movh        m3,         [r3 + r5]
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+    pmovzxbw    m4,         [r2]
+    pmovzxbw    m6,         [r2 + r4]
+    movh        m5,         [r3]
+    movh        m7,         [r3 + r5]
+
+    paddw       m0,         m1
+    paddw       m2,         m3
+    paddw       m4,         m5
+    paddw       m6,         m7
+    packuswb    m0,         m0
+    packuswb    m2,         m2
+    packuswb    m4,         m4
+    packuswb    m6,         m6
+
+    movd        [r0],       m0
+    movd        [r0 + r1],  m2
+    lea         r0,         [r0 + r1 * 2]
+    movd        [r0],       m4
+    movd        [r0 + r1],  m6
+
+    RET
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_4x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W4_H4 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_4x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m1,     [pw_pixel_max]
+    pxor    m0,     m0
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movh    m2,     [r2]
+    movhps  m2,     [r2 + r4]
+    movh    m3,     [r3]
+    movhps  m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+    movh    m4,     [r2]
+    movhps  m4,     [r2 + r4]
+    movh    m5,     [r3]
+    movhps  m5,     [r3 + r5]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m2,     m3
+    paddw   m4,     m5
+    CLIPW2  m2, m4, m0, m1
+
+    movh    [r0],       m2
+    movhps  [r0 + r1],  m2
+    lea     r0,     [r0 + r1 * 2]
+    movh    [r0],       m4
+    movhps  [r0 + r1],  m4
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_4x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/4
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m2,         [r2 + r4]
+    movh        m1,         [r3]
+    movh        m3,         [r3 + r5]
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+    pmovzxbw    m4,         [r2]
+    pmovzxbw    m6,         [r2 + r4]
+    movh        m5,         [r3]
+    movh        m7,         [r3 + r5]
+    dec         r6d
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m1
+    paddw       m2,         m3
+    paddw       m4,         m5
+    paddw       m6,         m7
+    packuswb    m0,         m0
+    packuswb    m2,         m2
+    packuswb    m4,         m4
+    packuswb    m6,         m6
+
+    movd        [r0],       m0
+    movd        [r0 + r1],  m2
+    lea         r0,         [r0 + r1 * 2]
+    movd        [r0],       m4
+    movd        [r0 + r1],  m6
+    lea         r0,         [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+PIXEL_ADD_PS_W4_H4   4,  8
+
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_8x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W8_H4 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_8x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + r4]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + r5]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + r1],  m2
+
+    movu    m0,     [r2]
+    movu    m2,     [r2 + r4]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + r5]
+    dec     r6d
+    lea     r0,     [r0 + r1 * 2]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + r1],  m2
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_8x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/4
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m2,         [r2 + r4]
+    movu        m1,         [r3]
+    movu        m3,         [r3 + r5]
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+    pmovzxbw    m4,         [r2]
+    pmovzxbw    m6,         [r2 + r4]
+    movu        m5,         [r3]
+    movu        m7,         [r3 + r5]
+    dec         r6d
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m1
+    paddw       m2,         m3
+    paddw       m4,         m5
+    paddw       m6,         m7
+    packuswb    m0,         m0
+    packuswb    m2,         m2
+    packuswb    m4,         m4
+    packuswb    m6,         m6
+
+    movh        [r0],       m0
+    movh        [r0 + r1],  m2
+    lea         r0,         [r0 + r1 * 2]
+    movh        [r0],       m4
+    movh        [r0 + r1],  m6
+    lea         r0,         [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+
+PIXEL_ADD_PS_W8_H4 8,  8
+PIXEL_ADD_PS_W8_H4 8, 16
+
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_16x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W16_H4 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_16x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    %2/4
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+    lea     r0,     [r0 + r1 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_16x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/4
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m1,         [r2 + 8]
+    pmovzxbw    m4,         [r2 + r4]
+    pmovzxbw    m5,         [r2 + r4 + 8]
+    movu        m2,         [r3]
+    movu        m3,         [r3 + 16]
+    movu        m6,         [r3 + r5]
+    movu        m7,         [r3 + r5 + 16]
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    paddw       m4,         m6
+    paddw       m5,         m7
+    packuswb    m0,         m1
+    packuswb    m4,         m5
+
+    movu        [r0],       m0
+    movu        [r0 + r1],  m4
+
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m1,         [r2 + 8]
+    pmovzxbw    m4,         [r2 + r4]
+    pmovzxbw    m5,         [r2 + r4 + 8]
+    movu        m2,         [r3]
+    movu        m3,         [r3 + 16]
+    movu        m6,         [r3 + r5]
+    movu        m7,         [r3 + r5 + 16]
+    dec         r6d
+    lea         r0,         [r0 + r1 * 2]
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    paddw       m4,         m6
+    paddw       m5,         m7
+    packuswb    m0,         m1
+    packuswb    m4,         m5
+
+    movu        [r0],       m0
+    movu        [r0 + r1],  m4
+    lea         r0,         [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+PIXEL_ADD_PS_W16_H4 16, 16
+PIXEL_ADD_PS_W16_H4 16, 32
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_16x16(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W16_H4_avx2 1
+%if HIGH_BIT_DEPTH
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_add_ps_16x%1, 6, 10, 4, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m3,     [pw_pixel_max]
+    pxor    m2,     m2
+    mov     r6d,    %1/4
+    add     r4d,    r4d
+    add     r5d,    r5d
+    add     r1d,    r1d
+    lea     r7,     [r4 * 3]
+    lea     r8,     [r5 * 3]
+    lea     r9,     [r1 * 3]
+
+.loop:
+    movu    m0,     [r2]
+    movu    m1,     [r3]
+    paddw   m0,     m1
+    CLIPW   m0, m2, m3
+    movu    [r0],              m0
+
+    movu    m0,     [r2 + r4]
+    movu    m1,     [r3 + r5]
+    paddw   m0,     m1
+    CLIPW   m0, m2, m3
+    movu    [r0 + r1],         m0
+
+    movu    m0,     [r2 + r4 * 2]
+    movu    m1,     [r3 + r5 * 2]
+    paddw   m0,     m1
+    CLIPW   m0, m2, m3
+    movu    [r0 + r1 * 2],     m0
+
+    movu    m0,     [r2 + r7]
+    movu    m1,     [r3 + r8]
+    paddw   m0,     m1
+    CLIPW   m0, m2, m3
+    movu    [r0 + r9],         m0
+
+    dec     r6d
+    lea     r0,     [r0 + r1 * 4]
+    lea     r2,     [r2 + r4 * 4]
+    lea     r3,     [r3 + r5 * 4]
+    jnz     .loop
+    RET
+%endif
+%else
+INIT_YMM avx2
+cglobal pixel_add_ps_16x%1, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %1/4
+    add         r5,         r5
+.loop:
+
+    pmovzxbw    m0,         [r2]        ; row 0 of src0
+    pmovzxbw    m1,         [r2 + r4]   ; row 1 of src0
+    movu        m2,        [r3]        ; row 0 of src1
+    movu        m3,        [r3 + r5]   ; row 1 of src1
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    pmovzxbw    m2,         [r2]        ; row 2 of src0
+    pmovzxbw    m3,         [r2 + r4]   ; row 3 of src0
+    movu        m4,        [r3]        ; row 2 of src1
+    movu        m5,        [r3 + r5]   ; row 3 of src1
+    paddw       m2,         m4
+    paddw       m3,         m5
+    packuswb    m2,         m3
+
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    vpermq      m0, m0, 11011000b
+    movu        [r0],      xm0           ; row 0 of dst
+    vextracti128 xm3, m0, 1
+    movu        [r0 + r1], xm3           ; row 1 of dst
+
+    lea         r0,         [r0 + r1 * 2]
+    vpermq      m2, m2, 11011000b
+    movu        [r0],      xm2           ; row 2 of dst
+    vextracti128 xm3, m2, 1
+    movu         [r0 + r1], xm3          ; row 3 of dst
+
+    lea         r0,         [r0 + r1 * 2]
+
+    dec         r6d
+    jnz         .loop
+
+    RET
+%endif
+%endmacro
+
+PIXEL_ADD_PS_W16_H4_avx2 16
+PIXEL_ADD_PS_W16_H4_avx2 32
+
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_32x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W32_H2 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_32x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    %2/2
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+
+    movu    m0,     [r2 + 32]
+    movu    m2,     [r2 + 48]
+    movu    m1,     [r3 + 32]
+    movu    m3,     [r3 + 48]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + 32],  m0
+    movu    [r0 + 48],  m2
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+
+    movu    m0,     [r2 + r4 + 32]
+    movu    m2,     [r2 + r4 + 48]
+    movu    m1,     [r3 + r5 + 32]
+    movu    m3,     [r3 + r5 + 48]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1 + 32], m0
+    movu    [r0 + r1 + 48], m2
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_32x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/2
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m1,         [r2 + 8]
+    pmovzxbw    m2,         [r2 + 16]
+    pmovzxbw    m3,         [r2 + 24]
+    movu        m4,         [r3]
+    movu        m5,         [r3 + 16]
+    movu        m6,         [r3 + 32]
+    movu        m7,         [r3 + 48]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0],       m0
+    movu        [r0 + 16],  m2
+
+    pmovzxbw    m0,         [r2 + r4]
+    pmovzxbw    m1,         [r2 + r4 + 8]
+    pmovzxbw    m2,         [r2 + r4 + 16]
+    pmovzxbw    m3,         [r2 + r4 + 24]
+    movu        m4,         [r3 + r5]
+    movu        m5,         [r3 + r5 + 16]
+    movu        m6,         [r3 + r5 + 32]
+    movu        m7,         [r3 + r5 + 48]
+    dec         r6d
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 + 16], m2
+    lea         r0,         [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+PIXEL_ADD_PS_W32_H2 32, 32
+PIXEL_ADD_PS_W32_H2 32, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_32x32(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W32_H4_avx2 1
+%if HIGH_BIT_DEPTH
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_add_ps_32x%1, 6, 10, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    %1/4
+    add     r4d,    r4d
+    add     r5d,    r5d
+    add     r1d,    r1d
+    lea     r7,     [r4 * 3]
+    lea     r8,     [r5 * 3]
+    lea     r9,     [r1 * 3]
+
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 32]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 32]
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],               m0
+    movu    [r0 + 32],          m2
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 32]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 32]
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1],          m0
+    movu    [r0 + r1 + 32],     m2
+
+    movu    m0,     [r2 + r4 * 2]
+    movu    m2,     [r2 + r4 * 2 + 32]
+    movu    m1,     [r3 + r5 * 2]
+    movu    m3,     [r3 + r5 * 2 + 32]
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1 * 2],      m0
+    movu    [r0 + r1 * 2 + 32], m2
+
+    movu    m0,     [r2 + r7]
+    movu    m2,     [r2 + r7 + 32]
+    movu    m1,     [r3 + r8]
+    movu    m3,     [r3 + r8 + 32]
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r9],          m0
+    movu    [r0 + r9 + 32],     m2
+
+    dec     r6d
+    lea     r0,     [r0 + r1 * 4]
+    lea     r2,     [r2 + r4 * 4]
+    lea     r3,     [r3 + r5 * 4]
+    jnz     .loop
+    RET
+%endif
+%else
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_add_ps_32x%1, 6, 10, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %1/4
+    add         r5,         r5
+    lea         r7,         [r4 * 3]
+    lea         r8,         [r5 * 3]
+    lea         r9,         [r1 * 3]
+.loop:
+    pmovzxbw    m0,         [r2]                ; first half of row 0 of src0
+    pmovzxbw    m1,         [r2 + 16]           ; second half of row 0 of src0
+    movu        m2,         [r3]                ; first half of row 0 of src1
+    movu        m3,         [r3 + 32]           ; second half of row 0 of src1
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+    vpermq      m0, m0, 11011000b
+    movu        [r0],      m0                   ; row 0 of dst
+
+    pmovzxbw    m0,         [r2 + r4]           ; first half of row 1 of src0
+    pmovzxbw    m1,         [r2 + r4 + 16]      ; second half of row 1 of src0
+    movu        m2,         [r3 + r5]           ; first half of row 1 of src1
+    movu        m3,         [r3 + r5 + 32]      ; second half of row 1 of src1
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+    vpermq      m0, m0, 11011000b
+    movu        [r0 + r1],      m0              ; row 1 of dst
+
+    pmovzxbw    m0,         [r2 + r4 * 2]       ; first half of row 2 of src0
+    pmovzxbw    m1,         [r2 + r4 * 2 + 16]  ; second half of row 2 of src0
+    movu        m2,         [r3 + r5 * 2]       ; first half of row 2 of src1
+    movu        m3,         [r3 + + r5 * 2 + 32]; second half of row 2 of src1
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+    vpermq      m0, m0, 11011000b
+    movu        [r0 + r1 * 2],      m0          ; row 2 of dst
+
+    pmovzxbw    m0,         [r2 + r7]           ; first half of row 3 of src0
+    pmovzxbw    m1,         [r2 + r7 + 16]      ; second half of row 3 of src0
+    movu        m2,         [r3 + r8]           ; first half of row 3 of src1
+    movu        m3,         [r3 + r8 + 32]      ; second half of row 3 of src1
+
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+    vpermq      m0, m0, 11011000b
+    movu        [r0 + r9],      m0              ; row 3 of dst
+
+    lea         r2,         [r2 + r4 * 4]
+    lea         r3,         [r3 + r5 * 4]
+    lea         r0,         [r0 + r1 * 4]
+
+    dec         r6d
+    jnz         .loop
+    RET
+%endif
+%endif
+%endmacro
+
+PIXEL_ADD_PS_W32_H4_avx2 32
+PIXEL_ADD_PS_W32_H4_avx2 64
+
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_64x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W64_H2 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_64x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    %2/2
+    add     r4,     r4
+    add     r5,     r5
+    add     r1,     r1
+.loop:
+    movu    m0,     [r2]
+    movu    m2,     [r2 + 16]
+    movu    m1,     [r3]
+    movu    m3,     [r3 + 16]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0],       m0
+    movu    [r0 + 16],  m2
+
+    movu    m0,     [r2 + 32]
+    movu    m2,     [r2 + 48]
+    movu    m1,     [r3 + 32]
+    movu    m3,     [r3 + 48]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + 32],  m0
+    movu    [r0 + 48],  m2
+
+    movu    m0,     [r2 + 64]
+    movu    m2,     [r2 + 80]
+    movu    m1,     [r3 + 64]
+    movu    m3,     [r3 + 80]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + 64],  m0
+    movu    [r0 + 80],  m2
+
+    movu    m0,     [r2 + 96]
+    movu    m2,     [r2 + 112]
+    movu    m1,     [r3 + 96]
+    movu    m3,     [r3 + 112]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + 96],  m0
+    movu    [r0 + 112], m2
+
+    movu    m0,     [r2 + r4]
+    movu    m2,     [r2 + r4 + 16]
+    movu    m1,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 16]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1],      m0
+    movu    [r0 + r1 + 16], m2
+
+    movu    m0,     [r2 + r4 + 32]
+    movu    m2,     [r2 + r4 + 48]
+    movu    m1,     [r3 + r5 + 32]
+    movu    m3,     [r3 + r5 + 48]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1 + 32], m0
+    movu    [r0 + r1 + 48], m2
+
+    movu    m0,     [r2 + r4 + 64]
+    movu    m2,     [r2 + r4 + 80]
+    movu    m1,     [r3 + r5 + 64]
+    movu    m3,     [r3 + r5 + 80]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1 + 64], m0
+    movu    [r0 + r1 + 80], m2
+
+    movu    m0,     [r2 + r4 + 96]
+    movu    m2,     [r2 + r4 + 112]
+    movu    m1,     [r3 + r5 + 96]
+    movu    m3,     [r3 + r5 + 112]
+    dec     r6d
+    lea     r2,     [r2 + r4 * 2]
+    lea     r3,     [r3 + r5 * 2]
+
+    paddw   m0,     m1
+    paddw   m2,     m3
+    CLIPW2  m0, m2, m4, m5
+
+    movu    [r0 + r1 + 96],     m0
+    movu    [r0 + r1 + 112],    m2
+    lea     r0,     [r0 + r1 * 2]
+
+    jnz     .loop
+    RET
+%else
+INIT_XMM sse4
+cglobal pixel_add_ps_64x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/2
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]
+    pmovzxbw    m1,         [r2 + 8]
+    pmovzxbw    m2,         [r2 + 16]
+    pmovzxbw    m3,         [r2 + 24]
+    movu        m4,         [r3]
+    movu        m5,         [r3 + 16]
+    movu        m6,         [r3 + 32]
+    movu        m7,         [r3 + 48]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0],       m0
+    movu        [r0 + 16],  m2
+
+    pmovzxbw    m0,         [r2 + 32]
+    pmovzxbw    m1,         [r2 + 40]
+    pmovzxbw    m2,         [r2 + 48]
+    pmovzxbw    m3,         [r2 + 56]
+    movu        m4,         [r3 + 64]
+    movu        m5,         [r3 + 80]
+    movu        m6,         [r3 + 96]
+    movu        m7,         [r3 + 112]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0 + 32],  m0
+    movu        [r0 + 48],  m2
+
+    pmovzxbw    m0,         [r2 + r4]
+    pmovzxbw    m1,         [r2 + r4 + 8]
+    pmovzxbw    m2,         [r2 + r4 + 16]
+    pmovzxbw    m3,         [r2 + r4 + 24]
+    movu        m4,         [r3 + r5]
+    movu        m5,         [r3 + r5 + 16]
+    movu        m6,         [r3 + r5 + 32]
+    movu        m7,         [r3 + r5 + 48]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0 + r1],      m0
+    movu        [r0 + r1 + 16], m2
+
+    pmovzxbw    m0,         [r2 + r4 + 32]
+    pmovzxbw    m1,         [r2 + r4 + 40]
+    pmovzxbw    m2,         [r2 + r4 + 48]
+    pmovzxbw    m3,         [r2 + r4 + 56]
+    movu        m4,         [r3 + r5 + 64]
+    movu        m5,         [r3 + r5 + 80]
+    movu        m6,         [r3 + r5 + 96]
+    movu        m7,         [r3 + r5 + 112]
+    dec         r6d
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+
+    movu        [r0 + r1 + 32], m0
+    movu        [r0 + r1 + 48], m2
+    lea         r0,         [r0 + r1 * 2]
+
+    jnz         .loop
+    RET
+%endif
+%endmacro
+PIXEL_ADD_PS_W64_H2 64, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_64x64(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal pixel_add_ps_64x64, 6, 10, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mova    m5,     [pw_pixel_max]
+    pxor    m4,     m4
+    mov     r6d,    16
+    add     r4d,    r4d
+    add     r5d,    r5d
+    add     r1d,    r1d
+    lea     r7,     [r4 * 3]
+    lea     r8,     [r5 * 3]
+    lea     r9,     [r1 * 3]
+
+.loop:
+    movu    m0,     [r2]
+    movu    m1,     [r2 + 32]
+    movu    m2,     [r3]
+    movu    m3,     [r3 + 32]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0],                m0
+    movu    [r0 + 32],           m1
+
+    movu    m0,     [r2 + 64]
+    movu    m1,     [r2 + 96]
+    movu    m2,     [r3 + 64]
+    movu    m3,     [r3 + 96]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + 64],           m0
+    movu    [r0 + 96],           m1
+
+    movu    m0,     [r2 + r4]
+    movu    m1,     [r2 + r4 + 32]
+    movu    m2,     [r3 + r5]
+    movu    m3,     [r3 + r5 + 32]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r1],           m0
+    movu    [r0 + r1 + 32],      m1
+
+    movu    m0,     [r2 + r4 + 64]
+    movu    m1,     [r2 + r4 + 96]
+    movu    m2,     [r3 + r5 + 64]
+    movu    m3,     [r3 + r5 + 96]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r1 + 64],      m0
+    movu    [r0 + r1 + 96],      m1
+
+    movu    m0,     [r2 + r4 * 2]
+    movu    m1,     [r2 + r4 * 2 + 32]
+    movu    m2,     [r3 + r5 * 2]
+    movu    m3,     [r3 + r5 * 2+ 32]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r1 * 2],       m0
+    movu    [r0 + r1 * 2 + 32],  m1
+
+    movu    m0,     [r2 + r4 * 2 + 64]
+    movu    m1,     [r2 + r4 * 2 + 96]
+    movu    m2,     [r3 + r5 * 2 + 64]
+    movu    m3,     [r3 + r5 * 2 + 96]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r1 * 2 + 64],  m0
+    movu    [r0 + r1 * 2 + 96],  m1
+
+    movu    m0,     [r2 + r7]
+    movu    m1,     [r2 + r7 + 32]
+    movu    m2,     [r3 + r8]
+    movu    m3,     [r3 + r8 + 32]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r9],           m0
+    movu    [r0 + r9 + 32],      m1
+
+    movu    m0,     [r2 + r7 + 64]
+    movu    m1,     [r2 + r7 + 96]
+    movu    m2,     [r3 + r8 + 64]
+    movu    m3,     [r3 + r8 + 96]
+    paddw   m0,     m2
+    paddw   m1,     m3
+
+    CLIPW2  m0, m1, m4, m5
+    movu    [r0 + r9 + 64],      m0
+    movu    [r0 + r9 + 96],      m1
+
+    dec     r6d
+    lea     r0,     [r0 + r1 * 4]
+    lea     r2,     [r2 + r4 * 4]
+    lea     r3,     [r3 + r5 * 4]
+    jnz     .loop
+    RET
+%endif
+%else
+INIT_YMM avx2
+cglobal pixel_add_ps_64x64, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        32
+    add         r5,         r5
+.loop:
+    pmovzxbw    m0,         [r2]                ; first 16 of row 0 of src0
+    pmovzxbw    m1,         [r2 + 16]           ; second 16 of row 0 of src0
+    pmovzxbw    m2,         [r2 + 32]           ; third 16 of row 0 of src0
+    pmovzxbw    m3,         [r2 + 48]           ; forth 16 of row 0 of src0
+    movu        m4,         [r3]                ; first 16 of row 0 of src1
+    movu        m5,         [r3 + 32]           ; second 16 of row 0 of src1
+    movu        m6,         [r3 + 64]           ; third 16 of row 0 of src1
+    movu        m7,         [r3 + 96]           ; forth 16 of row 0 of src1
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+    vpermq      m0, m0, 11011000b
+    movu        [r0],      m0                   ; first 32 of row 0 of dst
+    vpermq      m2, m2, 11011000b
+    movu        [r0 + 32],      m2              ; second 32 of row 0 of dst
+
+    pmovzxbw    m0,         [r2 + r4]           ; first 16 of row 1 of src0
+    pmovzxbw    m1,         [r2 + r4 + 16]      ; second 16 of row 1 of src0
+    pmovzxbw    m2,         [r2 + r4 + 32]      ; third 16 of row 1 of src0
+    pmovzxbw    m3,         [r2 + r4 + 48]      ; forth 16 of row 1 of src0
+    movu        m4,         [r3 + r5]           ; first 16 of row 1 of src1
+    movu        m5,         [r3 + r5 + 32]      ; second 16 of row 1 of src1
+    movu        m6,         [r3 + r5 + 64]      ; third 16 of row 1 of src1
+    movu        m7,         [r3 + r5 + 96]      ; forth 16 of row 1 of src1
+
+    paddw       m0,         m4
+    paddw       m1,         m5
+    paddw       m2,         m6
+    paddw       m3,         m7
+    packuswb    m0,         m1
+    packuswb    m2,         m3
+    vpermq      m0, m0, 11011000b
+    movu        [r0 + r1],      m0              ; first 32 of row 1 of dst
+    vpermq      m2, m2, 11011000b
+    movu        [r0 + r1 + 32],      m2         ; second 32 of row 1 of dst
+
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+    lea         r0,         [r0 + r1 * 2]
+
+    dec         r6d
+    jnz         .loop
+    RET
+
+%endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/sad-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,4573 @@
+;*****************************************************************************
+;* sad-a.asm: x86 sad functions
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Laurent Aimar <fenrir@via.ecp.fr>
+;*          Alex Izvorski <aizvorksi@gmail.com>
+;*          Min Chen <chenm003@163.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+MSK:                  db 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0
+
+SECTION .text
+
+cextern pb_3
+cextern pb_shuf8x8c
+cextern pw_8
+cextern pd_64
+
+;=============================================================================
+; SAD MMX
+;=============================================================================
+
+%macro SAD_INC_2x16P 0
+    movq    mm1,    [r0]
+    movq    mm2,    [r0+8]
+    movq    mm3,    [r0+r1]
+    movq    mm4,    [r0+r1+8]
+    psadbw  mm1,    [r2]
+    psadbw  mm2,    [r2+8]
+    psadbw  mm3,    [r2+r3]
+    psadbw  mm4,    [r2+r3+8]
+    lea     r0,     [r0+2*r1]
+    paddw   mm1,    mm2
+    paddw   mm3,    mm4
+    lea     r2,     [r2+2*r3]
+    paddw   mm0,    mm1
+    paddw   mm0,    mm3
+%endmacro
+
+%macro SAD_INC_2x8P 0
+    movq    mm1,    [r0]
+    movq    mm2,    [r0+r1]
+    psadbw  mm1,    [r2]
+    psadbw  mm2,    [r2+r3]
+    lea     r0,     [r0+2*r1]
+    paddw   mm0,    mm1
+    paddw   mm0,    mm2
+    lea     r2,     [r2+2*r3]
+%endmacro
+
+%macro SAD_INC_2x4P 0
+    movd    mm1,    [r0]
+    movd    mm2,    [r2]
+    punpckldq mm1,  [r0+r1]
+    punpckldq mm2,  [r2+r3]
+    psadbw  mm1,    mm2
+    paddw   mm0,    mm1
+    lea     r0,     [r0+2*r1]
+    lea     r2,     [r2+2*r3]
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%macro SAD 2
+cglobal pixel_sad_%1x%2_mmx2, 4,4
+    pxor    mm0, mm0
+%rep %2/2
+    SAD_INC_2x%1P
+%endrep
+    movd    eax, mm0
+    RET
+%endmacro
+
+SAD 16, 16
+SAD 16,  8
+SAD  8, 16
+SAD  8,  8
+SAD  8,  4
+SAD  4, 16
+SAD  4,  8
+SAD  4,  4
+
+
+
+;=============================================================================
+; SAD XMM
+;=============================================================================
+
+%macro SAD_END_SSE2 0
+    movhlps m1, m0
+    paddw   m0, m1
+    movd   eax, m0
+    RET
+%endmacro
+
+%macro PROCESS_SAD_12x4 0
+    movu    m1,  [r2]
+    movu    m2,  [r0]
+    pand    m1,  m4
+    pand    m2,  m4
+    psadbw  m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r0]
+    pand    m1,  m4
+    pand    m2,  m4
+    psadbw  m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r0]
+    pand    m1,  m4
+    pand    m2,  m4
+    psadbw  m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r0]
+    pand    m1,  m4
+    pand    m2,  m4
+    psadbw  m1,  m2
+    paddd   m0,  m1
+%endmacro
+
+%macro PROCESS_SAD_16x4 0
+    movu    m1,  [r2]
+    movu    m2,  [r2 + r3]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + r1]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + 2 * r3]
+    lea     r0,  [r0 + 2 * r1]
+    movu    m1,  [r2]
+    movu    m2,  [r2 + r3]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + r1]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + 2 * r3]
+    lea     r0,  [r0 + 2 * r1]
+%endmacro
+
+%macro PROCESS_SAD_24x4 0
+    movu        m1,  [r2]
+    movq        m2,  [r2 + 16]
+    lea         r2,  [r2 + r3]
+    movu        m3,  [r2]
+    movq        m4,  [r2 + 16]
+    psadbw      m1,  [r0]
+    psadbw      m3,  [r0 + r1]
+    paddd       m0,  m1
+    paddd       m0,  m3
+    movq        m1,  [r0 + 16]
+    lea         r0,  [r0 + r1]
+    movq        m3,  [r0 + 16]
+    punpcklqdq  m2,  m4
+    punpcklqdq  m1,  m3
+    psadbw      m2, m1
+    paddd       m0, m2
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+
+    movu        m1,  [r2]
+    movq        m2,  [r2 + 16]
+    lea         r2,  [r2 + r3]
+    movu        m3,  [r2]
+    movq        m4,  [r2 + 16]
+    psadbw      m1,  [r0]
+    psadbw      m3,  [r0 + r1]
+    paddd       m0,  m1
+    paddd       m0,  m3
+    movq        m1,  [r0 + 16]
+    lea         r0,  [r0 + r1]
+    movq        m3,  [r0 + 16]
+    punpcklqdq  m2,  m4
+    punpcklqdq  m1,  m3
+    psadbw      m2, m1
+    paddd       m0, m2
+%endmacro
+
+%macro PROCESS_SAD_32x4 0
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+%endmacro
+
+%macro PROCESS_SAD_48x4 0
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    paddd   m1,  m2
+    paddd   m0,  m1
+    paddd   m0,  m3
+%endmacro
+
+%macro PROCESS_SAD_8x4 0
+    movq        m1, [r2]
+    movq        m2, [r2 + r3]
+    lea         r2, [r2 + 2 * r3]
+    movq        m3, [r0]
+    movq        m4, [r0 + r1]
+    lea         r0, [r0 + 2 * r1]
+    punpcklqdq  m1, m2
+    punpcklqdq  m3, m4
+    psadbw      m1, m3
+    paddd       m0, m1
+    movq        m1, [r2]
+    movq        m2, [r2 + r3]
+    lea         r2, [r2 + 2 * r3]
+    movq        m3, [r0]
+    movq        m4, [r0 + r1]
+    lea         r0, [r0 + 2 * r1]
+    punpcklqdq  m1, m2
+    punpcklqdq  m3, m4
+    psadbw      m1, m3
+    paddd       m0, m1
+%endmacro
+
+%macro PROCESS_SAD_64x4 0
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    movu    m4,  [r2 + 48]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    psadbw  m4,  [r0 + 48]
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    movu    m4,  [r2 + 48]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    psadbw  m4,  [r0 + 48]
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    movu    m4,  [r2 + 48]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    psadbw  m4,  [r0 + 48]
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    movu    m1,  [r2]
+    movu    m2,  [r2 + 16]
+    movu    m3,  [r2 + 32]
+    movu    m4,  [r2 + 48]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + 16]
+    psadbw  m3,  [r0 + 32]
+    psadbw  m4,  [r0 + 48]
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+%endmacro
+
+%macro SAD_W16 0
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x16, 4,4,8
+    movu    m0, [r2]
+    movu    m1, [r2+r3]
+    lea     r2, [r2+2*r3]
+    movu    m2, [r2]
+    movu    m3, [r2+r3]
+    lea     r2, [r2+2*r3]
+    psadbw  m0, [r0]
+    psadbw  m1, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m4, [r2]
+    paddw   m0, m1
+    psadbw  m2, [r0]
+    psadbw  m3, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m5, [r2+r3]
+    lea     r2, [r2+2*r3]
+    paddw   m2, m3
+    movu    m6, [r2]
+    movu    m7, [r2+r3]
+    lea     r2, [r2+2*r3]
+    paddw   m0, m2
+    psadbw  m4, [r0]
+    psadbw  m5, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m1, [r2]
+    paddw   m4, m5
+    psadbw  m6, [r0]
+    psadbw  m7, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m2, [r2+r3]
+    lea     r2, [r2+2*r3]
+    paddw   m6, m7
+    movu    m3, [r2]
+    paddw   m0, m4
+    movu    m4, [r2+r3]
+    lea     r2, [r2+2*r3]
+    paddw   m0, m6
+    psadbw  m1, [r0]
+    psadbw  m2, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m5, [r2]
+    paddw   m1, m2
+    psadbw  m3, [r0]
+    psadbw  m4, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movu    m6, [r2+r3]
+    lea     r2, [r2+2*r3]
+    paddw   m3, m4
+    movu    m7, [r2]
+    paddw   m0, m1
+    movu    m1, [r2+r3]
+    paddw   m0, m3
+    psadbw  m5, [r0]
+    psadbw  m6, [r0+r1]
+    lea     r0, [r0+2*r1]
+    paddw   m5, m6
+    psadbw  m7, [r0]
+    psadbw  m1, [r0+r1]
+    paddw   m7, m1
+    paddw   m0, m5
+    paddw   m0, m7
+    SAD_END_SSE2
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x8, 4,4
+    movu    m0, [r2]
+    movu    m2, [r2+r3]
+    lea     r2, [r2+2*r3]
+    movu    m3, [r2]
+    movu    m4, [r2+r3]
+    psadbw  m0, [r0]
+    psadbw  m2, [r0+r1]
+    lea     r0, [r0+2*r1]
+    psadbw  m3, [r0]
+    psadbw  m4, [r0+r1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    paddw   m0, m2
+    paddw   m3, m4
+    paddw   m0, m3
+    movu    m1, [r2]
+    movu    m2, [r2+r3]
+    lea     r2, [r2+2*r3]
+    movu    m3, [r2]
+    movu    m4, [r2+r3]
+    psadbw  m1, [r0]
+    psadbw  m2, [r0+r1]
+    lea     r0, [r0+2*r1]
+    psadbw  m3, [r0]
+    psadbw  m4, [r0+r1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+    SAD_END_SSE2
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x12( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x12, 4,4,3
+    pxor m0, m0
+
+    PROCESS_SAD_16x4
+    PROCESS_SAD_16x4
+    PROCESS_SAD_16x4
+
+    movhlps m1, m0
+    paddd   m0, m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x32, 4,5,3
+    pxor m0,  m0
+    mov  r4d, 4
+.loop:
+    PROCESS_SAD_16x4
+    PROCESS_SAD_16x4
+    dec  r4d
+    jnz .loop
+
+    movhlps m1, m0
+    paddd   m0, m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x64, 4,5,3
+    pxor m0,  m0
+    mov  r4d, 8
+.loop:
+    PROCESS_SAD_16x4
+    PROCESS_SAD_16x4
+    dec  r4d
+    jnz .loop
+
+    movhlps m1, m0
+    paddd   m0, m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_16x4( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_16x4, 4,4,3
+
+    movu    m0,  [r2]
+    movu    m1,  [r2 + r3]
+    psadbw  m0,  [r0]
+    psadbw  m1,  [r0 + r1]
+    paddd   m0,  m1
+    lea     r2,  [r2 + 2 * r3]
+    lea     r0,  [r0 + 2 * r1]
+    movu    m1,  [r2]
+    movu    m2,  [r2 + r3]
+    psadbw  m1,  [r0]
+    psadbw  m2,  [r0 + r1]
+    paddd   m1,  m2
+    paddd   m0,  m1
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_32x8, 4,4,3
+    pxor  m0,  m0
+
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x24( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_32x24, 4,5,3
+    pxor  m0,  m0
+    mov   r4d, 3
+.loop:
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+    dec r4d
+    jnz .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_32x32, 4,5,3
+    pxor  m0,  m0
+    mov   r4d, 4
+.loop:
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+    dec r4d
+    jnz .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_32x16, 4,4,3
+    pxor  m0,  m0
+
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_32x64, 4,5,3
+    pxor  m0,  m0
+    mov   r4d, 8
+.loop:
+    PROCESS_SAD_32x4
+    PROCESS_SAD_32x4
+    dec  r4d
+    jnz .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_8x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_8x32, 4,5,3
+    pxor  m0,  m0
+    mov   r4d, 4
+.loop:
+    PROCESS_SAD_8x4
+    PROCESS_SAD_8x4
+    dec  r4d
+    jnz .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_64x16, 4,4,5
+    pxor  m0,  m0
+
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_64x32, 4,5,5
+    pxor  m0,  m0
+    mov   r4,  4
+
+.loop:
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+
+    dec   r4
+    jnz   .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x48( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_64x48, 4,5,5
+    pxor  m0,  m0
+    mov   r4,  6
+
+.loop:
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+    dec     r4d
+    jnz     .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_64x64, 4,5,5
+    pxor  m0,  m0
+    mov   r4,  8
+
+.loop:
+    PROCESS_SAD_64x4
+    PROCESS_SAD_64x4
+    dec   r4
+    jnz   .loop
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_48x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_48x64, 4,5,5
+    pxor  m0,  m0
+    mov   r4,  64
+
+.loop:
+    PROCESS_SAD_48x4
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    PROCESS_SAD_48x4
+    lea     r2,  [r2 + r3]
+    lea     r0,  [r0 + r1]
+
+    sub   r4,  8
+    cmp   r4,  8
+
+jnz .loop
+    PROCESS_SAD_48x4
+    lea   r2,  [r2 + r3]
+    lea   r0,  [r0 + r1]
+    PROCESS_SAD_48x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_24x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_24x32, 4,5,4
+    pxor  m0,  m0
+    mov   r4,  32
+
+.loop:
+    PROCESS_SAD_24x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    PROCESS_SAD_24x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    sub   r4,  8
+    cmp   r4,  8
+jnz .loop
+    PROCESS_SAD_24x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    PROCESS_SAD_24x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_12x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sad_12x16, 4,4,4
+    mova  m4,  [MSK]
+    pxor  m0,  m0
+
+    PROCESS_SAD_12x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    PROCESS_SAD_12x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    PROCESS_SAD_12x4
+    lea         r2,  [r2 + r3]
+    lea         r0,  [r0 + r1]
+    PROCESS_SAD_12x4
+
+    movhlps m1,  m0
+    paddd   m0,  m1
+    movd    eax, m0
+    RET
+
+%endmacro
+
+INIT_XMM sse2
+SAD_W16
+INIT_XMM sse3
+SAD_W16
+INIT_XMM sse2, aligned
+SAD_W16
+
+%macro SAD_INC_4x8P_SSE 1
+    movq    m1, [r0]
+    movq    m2, [r0+r1]
+    lea     r0, [r0+2*r1]
+    movq    m3, [r2]
+    movq    m4, [r2+r3]
+    lea     r2, [r2+2*r3]
+    movhps  m1, [r0]
+    movhps  m2, [r0+r1]
+    movhps  m3, [r2]
+    movhps  m4, [r2+r3]
+    lea     r0, [r0+2*r1]
+    psadbw  m1, m3
+    psadbw  m2, m4
+    lea     r2, [r2+2*r3]
+    ACCUM paddw, 0, 1, %1
+    paddw   m0, m2
+%endmacro
+
+INIT_XMM
+;Even on Nehalem, no sizes other than 8x16 benefit from this method.
+cglobal pixel_sad_8x16_sse2, 4,4
+    SAD_INC_4x8P_SSE 0
+    SAD_INC_4x8P_SSE 1
+    SAD_INC_4x8P_SSE 1
+    SAD_INC_4x8P_SSE 1
+    SAD_END_SSE2
+    RET
+
+;=============================================================================
+; SAD x3/x4 MMX
+;=============================================================================
+
+%macro SAD_X3_START_1x8P 0
+    movq    mm3,    [r0]
+    movq    mm0,    [r1]
+    movq    mm1,    [r2]
+    movq    mm2,    [r3]
+    psadbw  mm0,    mm3
+    psadbw  mm1,    mm3
+    psadbw  mm2,    mm3
+%endmacro
+
+%macro SAD_X3_1x8P 2
+    movq    mm3,    [r0+%1]
+    movq    mm4,    [r1+%2]
+    movq    mm5,    [r2+%2]
+    movq    mm6,    [r3+%2]
+    psadbw  mm4,    mm3
+    psadbw  mm5,    mm3
+    psadbw  mm6,    mm3
+    paddw   mm0,    mm4
+    paddw   mm1,    mm5
+    paddw   mm2,    mm6
+%endmacro
+
+%macro SAD_X3_START_2x4P 3
+    movd      mm3,  [r0]
+    movd      %1,   [r1]
+    movd      %2,   [r2]
+    movd      %3,   [r3]
+    punpckldq mm3,  [r0+FENC_STRIDE]
+    punpckldq %1,   [r1+r4]
+    punpckldq %2,   [r2+r4]
+    punpckldq %3,   [r3+r4]
+    psadbw    %1,   mm3
+    psadbw    %2,   mm3
+    psadbw    %3,   mm3
+%endmacro
+
+%macro SAD_X3_2x16P 1
+%if %1
+    SAD_X3_START_1x8P
+%else
+    SAD_X3_1x8P 0, 0
+%endif
+    SAD_X3_1x8P 8, 8
+    SAD_X3_1x8P FENC_STRIDE, r4
+    SAD_X3_1x8P FENC_STRIDE+8, r4+8
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r4]
+    lea     r2, [r2+2*r4]
+    lea     r3, [r3+2*r4]
+%endmacro
+
+%macro SAD_X3_2x8P 1
+%if %1
+    SAD_X3_START_1x8P
+%else
+    SAD_X3_1x8P 0, 0
+%endif
+    SAD_X3_1x8P FENC_STRIDE, r4
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r4]
+    lea     r2, [r2+2*r4]
+    lea     r3, [r3+2*r4]
+%endmacro
+
+%macro SAD_X3_2x4P 1
+%if %1
+    SAD_X3_START_2x4P mm0, mm1, mm2
+%else
+    SAD_X3_START_2x4P mm4, mm5, mm6
+    paddw     mm0,  mm4
+    paddw     mm1,  mm5
+    paddw     mm2,  mm6
+%endif
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r4]
+    lea     r2, [r2+2*r4]
+    lea     r3, [r3+2*r4]
+%endmacro
+
+%macro SAD_X4_START_1x8P 0
+    movq    mm7,    [r0]
+    movq    mm0,    [r1]
+    movq    mm1,    [r2]
+    movq    mm2,    [r3]
+    movq    mm3,    [r4]
+    psadbw  mm0,    mm7
+    psadbw  mm1,    mm7
+    psadbw  mm2,    mm7
+    psadbw  mm3,    mm7
+%endmacro
+
+%macro SAD_X4_1x8P 2
+    movq    mm7,    [r0+%1]
+    movq    mm4,    [r1+%2]
+    movq    mm5,    [r2+%2]
+    movq    mm6,    [r3+%2]
+    psadbw  mm4,    mm7
+    psadbw  mm5,    mm7
+    psadbw  mm6,    mm7
+    psadbw  mm7,    [r4+%2]
+    paddw   mm0,    mm4
+    paddw   mm1,    mm5
+    paddw   mm2,    mm6
+    paddw   mm3,    mm7
+%endmacro
+
+%macro SAD_X4_START_2x4P 0
+    movd      mm7,  [r0]
+    movd      mm0,  [r1]
+    movd      mm1,  [r2]
+    movd      mm2,  [r3]
+    movd      mm3,  [r4]
+    punpckldq mm7,  [r0+FENC_STRIDE]
+    punpckldq mm0,  [r1+r5]
+    punpckldq mm1,  [r2+r5]
+    punpckldq mm2,  [r3+r5]
+    punpckldq mm3,  [r4+r5]
+    psadbw    mm0,  mm7
+    psadbw    mm1,  mm7
+    psadbw    mm2,  mm7
+    psadbw    mm3,  mm7
+%endmacro
+
+%macro SAD_X4_INC_2x4P 0
+    movd      mm7,  [r0]
+    movd      mm4,  [r1]
+    movd      mm5,  [r2]
+    punpckldq mm7,  [r0+FENC_STRIDE]
+    punpckldq mm4,  [r1+r5]
+    punpckldq mm5,  [r2+r5]
+    psadbw    mm4,  mm7
+    psadbw    mm5,  mm7
+    paddw     mm0,  mm4
+    paddw     mm1,  mm5
+    movd      mm4,  [r3]
+    movd      mm5,  [r4]
+    punpckldq mm4,  [r3+r5]
+    punpckldq mm5,  [r4+r5]
+    psadbw    mm4,  mm7
+    psadbw    mm5,  mm7
+    paddw     mm2,  mm4
+    paddw     mm3,  mm5
+%endmacro
+
+%macro SAD_X4_2x16P 1
+%if %1
+    SAD_X4_START_1x8P
+%else
+    SAD_X4_1x8P 0, 0
+%endif
+    SAD_X4_1x8P 8, 8
+    SAD_X4_1x8P FENC_STRIDE, r5
+    SAD_X4_1x8P FENC_STRIDE+8, r5+8
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r5]
+    lea     r2, [r2+2*r5]
+    lea     r3, [r3+2*r5]
+    lea     r4, [r4+2*r5]
+%endmacro
+
+%macro SAD_X4_2x8P 1
+%if %1
+    SAD_X4_START_1x8P
+%else
+    SAD_X4_1x8P 0, 0
+%endif
+    SAD_X4_1x8P FENC_STRIDE, r5
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r5]
+    lea     r2, [r2+2*r5]
+    lea     r3, [r3+2*r5]
+    lea     r4, [r4+2*r5]
+%endmacro
+
+%macro SAD_X4_2x4P 1
+%if %1
+    SAD_X4_START_2x4P
+%else
+    SAD_X4_INC_2x4P
+%endif
+    add     r0, 2*FENC_STRIDE
+    lea     r1, [r1+2*r5]
+    lea     r2, [r2+2*r5]
+    lea     r3, [r3+2*r5]
+    lea     r4, [r4+2*r5]
+%endmacro
+
+%macro SAD_X3_END 0
+%if UNIX64
+    movd    [r5+0], mm0
+    movd    [r5+4], mm1
+    movd    [r5+8], mm2
+%else
+    mov     r0, r5mp
+    movd    [r0+0], mm0
+    movd    [r0+4], mm1
+    movd    [r0+8], mm2
+%endif
+    RET
+%endmacro
+
+%macro SAD_X4_END 0
+    mov     r0, r6mp
+    movd    [r0+0], mm0
+    movd    [r0+4], mm1
+    movd    [r0+8], mm2
+    movd    [r0+12], mm3
+    RET
+%endmacro
+
+%macro SAD_X3_12x4 0
+    mova    m3,  [r0]
+    movu    m5,  [r1]
+    pand    m3,  m4
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    mova    m3,  [r0 + FENC_STRIDE]
+    movu    m5,  [r1 + r4]
+    pand    m3,  m4
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    mova    m3,  [r0 + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r4 * 2]
+    pand    m3,  m4
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4 * 2]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4 * 2]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    lea     r1, [r1 + r4 * 2]
+    lea     r2, [r2 + r4 * 2]
+    lea     r3, [r3 + r4 * 2]
+    mova    m3,  [r0 + FENC_STRIDE + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r4]
+    pand    m3,  m4
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    pand    m5,  m4
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE * 4]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+%endmacro
+
+%macro SAD_X4_12x4 0
+    mova    m4,  [r0]
+    movu    m5,  [r1]
+    pand    m4,  m6
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    movu    m5,  [r4]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m3,  m5
+    mova    m4,  [r0 + FENC_STRIDE]
+    movu    m5,  [r1 + r5]
+    pand    m4,  m6
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    movu    m5,  [r4 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m3,  m5
+    mova    m4,  [r0 + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r5 * 2]
+    pand    m4,  m6
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r5 * 2]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r5 * 2]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    movu    m5,  [r4 + r5 * 2]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m3,  m5
+    lea     r1, [r1 + r5 * 2]
+    lea     r2, [r2 + r5 * 2]
+    lea     r3, [r3 + r5 * 2]
+    lea     r4, [r4 + r5 * 2]
+    mova    m4,  [r0 + FENC_STRIDE + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r5]
+    pand    m4,  m6
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    movu    m5,  [r4 + r5]
+    pand    m5,  m6
+    psadbw  m5,  m4
+    paddd   m3,  m5
+    lea     r0,  [r0 + FENC_STRIDE * 4]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+%endmacro
+
+%macro SAD_X3_24x4 0
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m2,  m5
+
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    movu    m5,  [r1 + r4]
+    movu    m6,  [r1 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    movu    m6,  [r2 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    movu    m6,  [r3 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m2,  m5
+
+    mova    m3,  [r0 + FENC_STRIDE * 2]
+    mova    m4,  [r0 + 16 + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r4 * 2]
+    movu    m6,  [r1 + 16 + r4 * 2]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4 * 2]
+    movu    m6,  [r2 + 16 + r4 * 2]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4 * 2]
+    movu    m6,  [r3 + 16 + r4 * 2]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    movu    m5,  [r1 + r4]
+    movu    m6,  [r1 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    movu    m6,  [r2 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    movu    m6,  [r3 + 16 + r4]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    pshufd  m6,  m6, 84
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+%endmacro
+
+%macro SAD_X4_24x4 0
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    movu    m7,  [r1 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    movu    m7,  [r3 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    movu    m7,  [r4 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m3,  m6
+
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    movu    m6,  [r1 + r5]
+    movu    m7,  [r1 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5]
+    movu    m7,  [r2 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5]
+    movu    m7,  [r3 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5]
+    movu    m7,  [r4 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m3,  m6
+
+    mova    m4,  [r0 + FENC_STRIDE * 2]
+    mova    m5,  [r0 + 16 + FENC_STRIDE * 2]
+    movu    m6,  [r1 + r5 * 2]
+    movu    m7,  [r1 + 16 + r5 * 2]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5 * 2]
+    movu    m7,  [r2 + 16 + r5 * 2]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5 * 2]
+    movu    m7,  [r3 + 16 + r5 * 2]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5 * 2]
+    movu    m7,  [r4 + 16 + r5 * 2]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    movu    m6,  [r1 + r5]
+    movu    m7,  [r1 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5]
+    movu    m7,  [r2 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5]
+    movu    m7,  [r3 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5]
+    movu    m7,  [r4 + 16 + r5]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    pshufd  m7,  m7, 84
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+%endmacro
+
+%macro SAD_X3_32x4 0
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+%endmacro
+
+%macro SAD_X4_32x4 0
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    movu    m7,  [r1 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    movu    m7,  [r3 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    movu    m7,  [r4 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r5]
+    lea     r2,  [r2 + r5]
+    lea     r3,  [r3 + r5]
+    lea     r4,  [r4 + r5]
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    movu    m7,  [r1 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    movu    m7,  [r3 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    movu    m7,  [r4 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r5]
+    lea     r2,  [r2 + r5]
+    lea     r3,  [r3 + r5]
+    lea     r4,  [r4 + r5]
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    movu    m7,  [r1 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    movu    m7,  [r3 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    movu    m7,  [r4 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r5]
+    lea     r2,  [r2 + r5]
+    lea     r3,  [r3 + r5]
+    lea     r4,  [r4 + r5]
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    movu    m7,  [r1 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    movu    m7,  [r3 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    movu    m7,  [r4 + 16]
+    psadbw  m6,  m4
+    psadbw  m7,  m5
+    paddd   m6,  m7
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r5]
+    lea     r2,  [r2 + r5]
+    lea     r3,  [r3 + r5]
+    lea     r4,  [r4 + r5]
+%endmacro
+
+%macro SAD_X3_48x4 0
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    mova    m5,  [r0 + 32]
+    movu    m6,  [r1]
+    psadbw  m6,  m3
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 32]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    psadbw  m6,  m3
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 32]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    psadbw  m6,  m3
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 32]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    mova    m5,  [r0 + 32 + FENC_STRIDE]
+    movu    m6,  [r1 + r4]
+    psadbw  m6,  m3
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r4]
+    psadbw  m6,  m3
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r4]
+    psadbw  m6,  m3
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+
+    mova    m3,  [r0 + FENC_STRIDE * 2]
+    mova    m4,  [r0 + 16 + FENC_STRIDE * 2]
+    mova    m5,  [r0 + 32 + FENC_STRIDE * 2]
+    movu    m6,  [r1 + r4 * 2]
+    psadbw  m6,  m3
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r4 * 2]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 32 + r4 * 2]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r4 * 2]
+    psadbw  m6,  m3
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r4 * 2]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 32 + r4 * 2]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r4 * 2]
+    psadbw  m6,  m3
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r4 * 2]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 32 + r4 * 2]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    mova    m5,  [r0 + 32 + FENC_STRIDE]
+    movu    m6,  [r1 + r4]
+    psadbw  m6,  m3
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r4]
+    psadbw  m6,  m3
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r4]
+    psadbw  m6,  m3
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r4]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 32 + r4]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+%endmacro
+
+%macro SAD_X4_48x4 0
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    mova    m6,  [r0 + 32]
+    movu    m7,  [r1]
+    psadbw  m7,  m4
+    paddd   m0,  m7
+    movu    m7,  [r1 + 16]
+    psadbw  m7,  m5
+    paddd   m0,  m7
+    movu    m7,  [r1 + 32]
+    psadbw  m7,  m6
+    paddd   m0,  m7
+    movu    m7,  [r2]
+    psadbw  m7,  m4
+    paddd   m1,  m7
+    movu    m7,  [r2 + 16]
+    psadbw  m7,  m5
+    paddd   m1,  m7
+    movu    m7,  [r2 + 32]
+    psadbw  m7,  m6
+    paddd   m1,  m7
+    movu    m7,  [r3]
+    psadbw  m7,  m4
+    paddd   m2,  m7
+    movu    m7,  [r3 + 16]
+    psadbw  m7,  m5
+    paddd   m2,  m7
+    movu    m7,  [r3 + 32]
+    psadbw  m7,  m6
+    paddd   m2,  m7
+    movu    m7,  [r4]
+    psadbw  m7,  m4
+    paddd   m3,  m7
+    movu    m7,  [r4 + 16]
+    psadbw  m7,  m5
+    paddd   m3,  m7
+    movu    m7,  [r4 + 32]
+    psadbw  m7,  m6
+    paddd   m3,  m7
+
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    mova    m6,  [r0 + 32 + FENC_STRIDE]
+    movu    m7,  [r1 + r5]
+    psadbw  m7,  m4
+    paddd   m0,  m7
+    movu    m7,  [r1 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m0,  m7
+    movu    m7,  [r1 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m0,  m7
+    movu    m7,  [r2 + r5]
+    psadbw  m7,  m4
+    paddd   m1,  m7
+    movu    m7,  [r2 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m1,  m7
+    movu    m7,  [r2 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m1,  m7
+    movu    m7,  [r3 + r5]
+    psadbw  m7,  m4
+    paddd   m2,  m7
+    movu    m7,  [r3 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m2,  m7
+    movu    m7,  [r3 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m2,  m7
+    movu    m7,  [r4 + r5]
+    psadbw  m7,  m4
+    paddd   m3,  m7
+    movu    m7,  [r4 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m3,  m7
+    movu    m7,  [r4 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m3,  m7
+
+    mova    m4,  [r0 + FENC_STRIDE * 2]
+    mova    m5,  [r0 + 16 + FENC_STRIDE * 2]
+    mova    m6,  [r0 + 32 + FENC_STRIDE * 2]
+    movu    m7,  [r1 + r5 * 2]
+    psadbw  m7,  m4
+    paddd   m0,  m7
+    movu    m7,  [r1 + 16 + r5 * 2]
+    psadbw  m7,  m5
+    paddd   m0,  m7
+    movu    m7,  [r1 + 32 + r5 * 2]
+    psadbw  m7,  m6
+    paddd   m0,  m7
+    movu    m7,  [r2 + r5 * 2]
+    psadbw  m7,  m4
+    paddd   m1,  m7
+    movu    m7,  [r2 + 16 + r5 * 2]
+    psadbw  m7,  m5
+    paddd   m1,  m7
+    movu    m7,  [r2 + 32 + r5 * 2]
+    psadbw  m7,  m6
+    paddd   m1,  m7
+    movu    m7,  [r3 + r5 * 2]
+    psadbw  m7,  m4
+    paddd   m2,  m7
+    movu    m7,  [r3 + 16 + r5 * 2]
+    psadbw  m7,  m5
+    paddd   m2,  m7
+    movu    m7,  [r3 + 32 + r5 * 2]
+    psadbw  m7,  m6
+    paddd   m2,  m7
+    movu    m7,  [r4 + r5 * 2]
+    psadbw  m7,  m4
+    paddd   m3,  m7
+    movu    m7,  [r4 + 16 + r5 * 2]
+    psadbw  m7,  m5
+    paddd   m3,  m7
+    movu    m7,  [r4 + 32 + r5 * 2]
+    psadbw  m7,  m6
+    paddd   m3,  m7
+
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    mova    m6,  [r0 + 32 + FENC_STRIDE]
+    movu    m7,  [r1 + r5]
+    psadbw  m7,  m4
+    paddd   m0,  m7
+    movu    m7,  [r1 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m0,  m7
+    movu    m7,  [r1 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m0,  m7
+    movu    m7,  [r2 + r5]
+    psadbw  m7,  m4
+    paddd   m1,  m7
+    movu    m7,  [r2 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m1,  m7
+    movu    m7,  [r2 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m1,  m7
+    movu    m7,  [r3 + r5]
+    psadbw  m7,  m4
+    paddd   m2,  m7
+    movu    m7,  [r3 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m2,  m7
+    movu    m7,  [r3 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m2,  m7
+    movu    m7,  [r4 + r5]
+    psadbw  m7,  m4
+    paddd   m3,  m7
+    movu    m7,  [r4 + 16 + r5]
+    psadbw  m7,  m5
+    paddd   m3,  m7
+    movu    m7,  [r4 + 32 + r5]
+    psadbw  m7,  m6
+    paddd   m3,  m7
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+%endmacro
+
+%macro SAD_X3_64x4 0
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 16]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 16]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 16]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    mova    m3,  [r0 + 32]
+    mova    m4,  [r0 + 48]
+    movu    m5,  [r1 + 32]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 48]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + 32]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 48]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + 32]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 48]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    movu    m5,  [r1 + r4]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    mova    m3,  [r0 + 32 + FENC_STRIDE]
+    mova    m4,  [r0 + 48 + FENC_STRIDE]
+    movu    m5,  [r1 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+
+    mova    m3,  [r0 + FENC_STRIDE * 2]
+    mova    m4,  [r0 + 16 + FENC_STRIDE * 2]
+    movu    m5,  [r1 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 16 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 16 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 16 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    mova    m3,  [r0 + 32 + FENC_STRIDE * 2]
+    mova    m4,  [r0 + 48 + FENC_STRIDE * 2]
+    movu    m5,  [r1 + 32 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 48 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + 32 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 48 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + 32 + r4 * 2]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 48 + r4 * 2]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+    mova    m3,  [r0 + FENC_STRIDE]
+    mova    m4,  [r0 + 16 + FENC_STRIDE]
+    movu    m5,  [r1 + r4]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + r4]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + r4]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 16 + r4]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    mova    m3,  [r0 + 32 + FENC_STRIDE]
+    mova    m4,  [r0 + 48 + FENC_STRIDE]
+    movu    m5,  [r1 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m0,  m5
+    movu    m5,  [r1 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m0,  m5
+    movu    m5,  [r2 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m1,  m5
+    movu    m5,  [r2 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m1,  m5
+    movu    m5,  [r3 + 32 + r4]
+    psadbw  m5,  m3
+    paddd   m2,  m5
+    movu    m5,  [r3 + 48 + r4]
+    psadbw  m5,  m4
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r4 * 2]
+    lea     r2,  [r2 + r4 * 2]
+    lea     r3,  [r3 + r4 * 2]
+%endmacro
+
+%macro SAD_X4_64x4 0
+    mova    m4,  [r0]
+    mova    m5,  [r0 + 16]
+    movu    m6,  [r1]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 16]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+    mova    m4,  [r0 + 32]
+    mova    m5,  [r0 + 48]
+    movu    m6,  [r1 + 32]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 48]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + 32]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 48]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + 32]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 48]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + 32]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 48]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    movu    m6,  [r1 + r5]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+    mova    m4,  [r0 + 32 + FENC_STRIDE]
+    mova    m5,  [r0 + 48 + FENC_STRIDE]
+    movu    m6,  [r1 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+
+    mova    m4,  [r0 + FENC_STRIDE * 2]
+    mova    m5,  [r0 + 16 + FENC_STRIDE * 2]
+    movu    m6,  [r1 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 16 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+    mova    m4,  [r0 + 32 + FENC_STRIDE * 2]
+    mova    m5,  [r0 + 48 + FENC_STRIDE * 2]
+    movu    m6,  [r1 + 32 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 48 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + 32 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 48 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + 32 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 48 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + 32 + r5 * 2]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 48 + r5 * 2]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+    mova    m4,  [r0 + FENC_STRIDE]
+    mova    m5,  [r0 + 16 + FENC_STRIDE]
+    movu    m6,  [r1 + r5]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + r5]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + r5]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + r5]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 16 + r5]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+    mova    m4,  [r0 + 32 + FENC_STRIDE]
+    mova    m5,  [r0 + 48 + FENC_STRIDE]
+    movu    m6,  [r1 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m0,  m6
+    movu    m6,  [r1 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m0,  m6
+    movu    m6,  [r2 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m1,  m6
+    movu    m6,  [r2 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m1,  m6
+    movu    m6,  [r3 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m2,  m6
+    movu    m6,  [r3 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m2,  m6
+    movu    m6,  [r4 + 32 + r5]
+    psadbw  m6,  m4
+    paddd   m3,  m6
+    movu    m6,  [r4 + 48 + r5]
+    psadbw  m6,  m5
+    paddd   m3,  m6
+    lea     r0,  [r0 + FENC_STRIDE * 2]
+    lea     r1,  [r1 + r5 * 2]
+    lea     r2,  [r2 + r5 * 2]
+    lea     r3,  [r3 + r5 * 2]
+    lea     r4,  [r4 + r5 * 2]
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
+;                          uint8_t *pix2, intptr_t i_stride, int scores[3] )
+;-----------------------------------------------------------------------------
+%macro SAD_X 3
+cglobal pixel_sad_x%1_%2x%3_mmx2, %1+2, %1+2
+    SAD_X%1_2x%2P 1
+%rep %3/2-1
+    SAD_X%1_2x%2P 0
+%endrep
+    SAD_X%1_END
+%endmacro
+
+INIT_MMX
+SAD_X 3, 16, 16
+SAD_X 3, 16,  8
+SAD_X 3,  8, 16
+SAD_X 3,  8,  8
+SAD_X 3,  8,  4
+SAD_X 3,  4, 16
+SAD_X 3,  4,  8
+SAD_X 3,  4,  4
+SAD_X 4, 16, 16
+SAD_X 4, 16,  8
+SAD_X 4,  8, 16
+SAD_X 4,  8,  8
+SAD_X 4,  8,  4
+SAD_X 4,  4, 16
+SAD_X 4,  4,  8
+SAD_X 4,  4,  4
+
+
+
+;=============================================================================
+; SAD x3/x4 XMM
+;=============================================================================
+
+%macro SAD_X3_START_1x16P_SSE2 0
+    mova     m2, [r0]
+%if cpuflag(avx)
+    psadbw   m0, m2, [r1]
+    psadbw   m1, m2, [r2]
+    psadbw   m2, [r3]
+%else
+    movu     m0, [r1]
+    movu     m1, [r2]
+    movu     m3, [r3]
+    psadbw   m0, m2
+    psadbw   m1, m2
+    psadbw   m2, m3
+%endif
+%endmacro
+
+%macro SAD_X3_1x16P_SSE2 2
+    mova     m3, [r0+%1]
+%if cpuflag(avx)
+    psadbw   m4, m3, [r1+%2]
+    psadbw   m5, m3, [r2+%2]
+    psadbw   m3, [r3+%2]
+%else
+    movu     m4, [r1+%2]
+    movu     m5, [r2+%2]
+    movu     m6, [r3+%2]
+    psadbw   m4, m3
+    psadbw   m5, m3
+    psadbw   m3, m6
+%endif
+    paddd    m0, m4
+    paddd    m1, m5
+    paddd    m2, m3
+%endmacro
+
+%if ARCH_X86_64
+    DECLARE_REG_TMP 6
+%else
+    DECLARE_REG_TMP 5
+%endif
+
+%macro SAD_X3_4x16P_SSE2 2
+%if %1==0
+    lea  t0, [r4*3]
+    SAD_X3_START_1x16P_SSE2
+%else
+    SAD_X3_1x16P_SSE2 FENC_STRIDE*(0+(%1&1)*4), r4*0
+%endif
+    SAD_X3_1x16P_SSE2 FENC_STRIDE*(1+(%1&1)*4), r4*1
+    SAD_X3_1x16P_SSE2 FENC_STRIDE*(2+(%1&1)*4), r4*2
+    SAD_X3_1x16P_SSE2 FENC_STRIDE*(3+(%1&1)*4), t0
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r4]
+    lea  r2, [r2+4*r4]
+    lea  r3, [r3+4*r4]
+%endif
+%endmacro
+
+%macro SAD_X3_START_2x8P_SSE2 0
+    movq     m3, [r0]
+    movq     m0, [r1]
+    movq     m1, [r2]
+    movq     m2, [r3]
+    movhps   m3, [r0+FENC_STRIDE]
+    movhps   m0, [r1+r4]
+    movhps   m1, [r2+r4]
+    movhps   m2, [r3+r4]
+    psadbw   m0, m3
+    psadbw   m1, m3
+    psadbw   m2, m3
+%endmacro
+
+%macro SAD_X3_2x8P_SSE2 4
+    movq     m6, [r0+%1]
+    movq     m3, [r1+%2]
+    movq     m4, [r2+%2]
+    movq     m5, [r3+%2]
+    movhps   m6, [r0+%3]
+    movhps   m3, [r1+%4]
+    movhps   m4, [r2+%4]
+    movhps   m5, [r3+%4]
+    psadbw   m3, m6
+    psadbw   m4, m6
+    psadbw   m5, m6
+    paddd    m0, m3
+    paddd    m1, m4
+    paddd    m2, m5
+%endmacro
+
+%macro SAD_X4_START_2x8P_SSE2 0
+    movq     m4, [r0]
+    movq     m0, [r1]
+    movq     m1, [r2]
+    movq     m2, [r3]
+    movq     m3, [r4]
+    movhps   m4, [r0+FENC_STRIDE]
+    movhps   m0, [r1+r5]
+    movhps   m1, [r2+r5]
+    movhps   m2, [r3+r5]
+    movhps   m3, [r4+r5]
+    psadbw   m0, m4
+    psadbw   m1, m4
+    psadbw   m2, m4
+    psadbw   m3, m4
+%endmacro
+
+%macro SAD_X4_2x8P_SSE2 4
+    movq     m6, [r0+%1]
+    movq     m4, [r1+%2]
+    movq     m5, [r2+%2]
+    movhps   m6, [r0+%3]
+    movhps   m4, [r1+%4]
+    movhps   m5, [r2+%4]
+    psadbw   m4, m6
+    psadbw   m5, m6
+    paddd    m0, m4
+    paddd    m1, m5
+    movq     m4, [r3+%2]
+    movq     m5, [r4+%2]
+    movhps   m4, [r3+%4]
+    movhps   m5, [r4+%4]
+    psadbw   m4, m6
+    psadbw   m5, m6
+    paddd    m2, m4
+    paddd    m3, m5
+%endmacro
+
+%macro SAD_X4_START_1x16P_SSE2 0
+    mova     m3, [r0]
+%if cpuflag(avx)
+    psadbw   m0, m3, [r1]
+    psadbw   m1, m3, [r2]
+    psadbw   m2, m3, [r3]
+    psadbw   m3, [r4]
+%else
+    movu     m0, [r1]
+    movu     m1, [r2]
+    movu     m2, [r3]
+    movu     m4, [r4]
+    psadbw   m0, m3
+    psadbw   m1, m3
+    psadbw   m2, m3
+    psadbw   m3, m4
+%endif
+%endmacro
+
+%macro SAD_X4_1x16P_SSE2 2
+    mova     m6, [r0+%1]
+%if cpuflag(avx)
+    psadbw   m4, m6, [r1+%2]
+    psadbw   m5, m6, [r2+%2]
+%else
+    movu     m4, [r1+%2]
+    movu     m5, [r2+%2]
+    psadbw   m4, m6
+    psadbw   m5, m6
+%endif
+    paddd    m0, m4
+    paddd    m1, m5
+%if cpuflag(avx)
+    psadbw   m4, m6, [r3+%2]
+    psadbw   m5, m6, [r4+%2]
+%else
+    movu     m4, [r3+%2]
+    movu     m5, [r4+%2]
+    psadbw   m4, m6
+    psadbw   m5, m6
+%endif
+    paddd    m2, m4
+    paddd    m3, m5
+%endmacro
+
+%macro SAD_X4_4x16P_SSE2 2
+%if %1==0
+    lea  r6, [r5*3]
+    SAD_X4_START_1x16P_SSE2
+%else
+    SAD_X4_1x16P_SSE2 FENC_STRIDE*(0+(%1&1)*4), r5*0
+%endif
+    SAD_X4_1x16P_SSE2 FENC_STRIDE*(1+(%1&1)*4), r5*1
+    SAD_X4_1x16P_SSE2 FENC_STRIDE*(2+(%1&1)*4), r5*2
+    SAD_X4_1x16P_SSE2 FENC_STRIDE*(3+(%1&1)*4), r6
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r5]
+    lea  r2, [r2+4*r5]
+    lea  r3, [r3+4*r5]
+    lea  r4, [r4+4*r5]
+%endif
+%endmacro
+
+%macro SAD_X3_4x8P_SSE2 2
+%if %1==0
+    lea  t0, [r4*3]
+    SAD_X3_START_2x8P_SSE2
+%else
+    SAD_X3_2x8P_SSE2 FENC_STRIDE*(0+(%1&1)*4), r4*0, FENC_STRIDE*(1+(%1&1)*4), r4*1
+%endif
+    SAD_X3_2x8P_SSE2 FENC_STRIDE*(2+(%1&1)*4), r4*2, FENC_STRIDE*(3+(%1&1)*4), t0
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r4]
+    lea  r2, [r2+4*r4]
+    lea  r3, [r3+4*r4]
+%endif
+%endmacro
+
+%macro SAD_X4_4x8P_SSE2 2
+%if %1==0
+    lea    r6, [r5*3]
+    SAD_X4_START_2x8P_SSE2
+%else
+    SAD_X4_2x8P_SSE2 FENC_STRIDE*(0+(%1&1)*4), r5*0, FENC_STRIDE*(1+(%1&1)*4), r5*1
+%endif
+    SAD_X4_2x8P_SSE2 FENC_STRIDE*(2+(%1&1)*4), r5*2, FENC_STRIDE*(3+(%1&1)*4), r6
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r5]
+    lea  r2, [r2+4*r5]
+    lea  r3, [r3+4*r5]
+    lea  r4, [r4+4*r5]
+%endif
+%endmacro
+
+%macro SAD_X3_END_SSE2 1
+    movifnidn r5, r5mp
+    movhlps    m3, m0
+    movhlps    m4, m1
+    movhlps    m5, m2
+    paddd      m0, m3
+    paddd      m1, m4
+    paddd      m2, m5
+    movd   [r5+0], m0
+    movd   [r5+4], m1
+    movd   [r5+8], m2
+    RET
+%endmacro
+
+%macro SAD_X4_END_SSE2 1
+    mov      r0, r6mp
+    psllq      m1, 32
+    psllq      m3, 32
+    paddd      m0, m1
+    paddd      m2, m3
+    movhlps    m1, m0
+    movhlps    m3, m2
+    paddd      m0, m1
+    paddd      m2, m3
+    movq   [r0+0], m0
+    movq   [r0+8], m2
+    RET
+%endmacro
+
+%macro SAD_X3_START_2x16P_AVX2 0
+    movu    m3, [r0] ; assumes FENC_STRIDE == 16
+    movu   xm0, [r1]
+    movu   xm1, [r2]
+    movu   xm2, [r3]
+    vinserti128  m0, m0, [r1+r4], 1
+    vinserti128  m1, m1, [r2+r4], 1
+    vinserti128  m2, m2, [r3+r4], 1
+    psadbw  m0, m3
+    psadbw  m1, m3
+    psadbw  m2, m3
+%endmacro
+
+%macro SAD_X3_2x16P_AVX2 3
+    movu    m3, [r0+%1] ; assumes FENC_STRIDE == 16
+    movu   xm4, [r1+%2]
+    movu   xm5, [r2+%2]
+    movu   xm6, [r3+%2]
+    vinserti128  m4, m4, [r1+%3], 1
+    vinserti128  m5, m5, [r2+%3], 1
+    vinserti128  m6, m6, [r3+%3], 1
+    psadbw  m4, m3
+    psadbw  m5, m3
+    psadbw  m6, m3
+    paddw   m0, m4
+    paddw   m1, m5
+    paddw   m2, m6
+%endmacro
+
+%macro SAD_X3_4x16P_AVX2 2
+%if %1==0
+    lea  t0, [r4*3]
+    SAD_X3_START_2x16P_AVX2
+%else
+    SAD_X3_2x16P_AVX2 FENC_STRIDE*(0+(%1&1)*4), r4*0, r4*1
+%endif
+    SAD_X3_2x16P_AVX2 FENC_STRIDE*(2+(%1&1)*4), r4*2, t0
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r4]
+    lea  r2, [r2+4*r4]
+    lea  r3, [r3+4*r4]
+%endif
+%endmacro
+
+%macro SAD_X4_START_2x16P_AVX2 0
+    vbroadcasti128 m4, [r0]
+    vbroadcasti128 m5, [r0+FENC_STRIDE]
+    movu   xm0, [r1]
+    movu   xm1, [r2]
+    movu   xm2, [r1+r5]
+    movu   xm3, [r2+r5]
+    vinserti128 m0, m0, [r3], 1
+    vinserti128 m1, m1, [r4], 1
+    vinserti128 m2, m2, [r3+r5], 1
+    vinserti128 m3, m3, [r4+r5], 1
+    psadbw  m0, m4
+    psadbw  m1, m4
+    psadbw  m2, m5
+    psadbw  m3, m5
+    paddw   m0, m2
+    paddw   m1, m3
+%endmacro
+
+%macro SAD_X4_2x16P_AVX2 4
+    vbroadcasti128 m6, [r0+%1]
+    vbroadcasti128 m7, [r0+%3]
+    movu   xm2, [r1+%2]
+    movu   xm3, [r2+%2]
+    movu   xm4, [r1+%4]
+    movu   xm5, [r2+%4]
+    vinserti128 m2, m2, [r3+%2], 1
+    vinserti128 m3, m3, [r4+%2], 1
+    vinserti128 m4, m4, [r3+%4], 1
+    vinserti128 m5, m5, [r4+%4], 1
+    psadbw  m2, m6
+    psadbw  m3, m6
+    psadbw  m4, m7
+    psadbw  m5, m7
+    paddd   m0, m2
+    paddd   m1, m3
+    paddd   m0, m4
+    paddd   m1, m5
+%endmacro
+
+%macro SAD_X4_4x16P_AVX2 2
+%if %1==0
+    lea  r6, [r5*3]
+    SAD_X4_START_2x16P_AVX2
+%else
+    SAD_X4_2x16P_AVX2 FENC_STRIDE*(0+(%1&1)*4), r5*0, FENC_STRIDE*(1+(%1&1)*4), r5*1
+%endif
+    SAD_X4_2x16P_AVX2 FENC_STRIDE*(2+(%1&1)*4), r5*2, FENC_STRIDE*(3+(%1&1)*4), r6
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r5]
+    lea  r2, [r2+4*r5]
+    lea  r3, [r3+4*r5]
+    lea  r4, [r4+4*r5]
+%endif
+%endmacro
+
+%macro SAD_X4_START_2x32P_AVX2 0
+    mova        m4, [r0]
+    movu        m0, [r1]
+    movu        m2, [r2]
+    movu        m1, [r3]
+    movu        m3, [r4]
+    psadbw      m0, m4
+    psadbw      m2, m4
+    psadbw      m1, m4
+    psadbw      m3, m4
+    packusdw    m0, m2
+    packusdw    m1, m3
+
+    mova        m6, [r0+FENC_STRIDE]
+    movu        m2, [r1+r5]
+    movu        m4, [r2+r5]
+    movu        m3, [r3+r5]
+    movu        m5, [r4+r5]
+    psadbw      m2, m6
+    psadbw      m4, m6
+    psadbw      m3, m6
+    psadbw      m5, m6
+    packusdw    m2, m4
+    packusdw    m3, m5
+    paddd       m0, m2
+    paddd       m1, m3
+%endmacro
+
+%macro SAD_X4_2x32P_AVX2 4
+    mova        m6, [r0+%1]
+    movu        m2, [r1+%2]
+    movu        m4, [r2+%2]
+    movu        m3, [r3+%2]
+    movu        m5, [r4+%2]
+    psadbw      m2, m6
+    psadbw      m4, m6
+    psadbw      m3, m6
+    psadbw      m5, m6
+    packusdw    m2, m4
+    packusdw    m3, m5
+    paddd       m0, m2
+    paddd       m1, m3
+
+    mova        m6, [r0+%3]
+    movu        m2, [r1+%4]
+    movu        m4, [r2+%4]
+    movu        m3, [r3+%4]
+    movu        m5, [r4+%4]
+    psadbw      m2, m6
+    psadbw      m4, m6
+    psadbw      m3, m6
+    psadbw      m5, m6
+    packusdw    m2, m4
+    packusdw    m3, m5
+    paddd       m0, m2
+    paddd       m1, m3
+%endmacro
+
+%macro SAD_X4_4x32P_AVX2 2
+%if %1==0
+    lea  r6, [r5*3]
+    SAD_X4_START_2x32P_AVX2
+%else
+    SAD_X4_2x32P_AVX2 FENC_STRIDE*(0+(%1&1)*4), r5*0, FENC_STRIDE*(1+(%1&1)*4), r5*1
+%endif
+    SAD_X4_2x32P_AVX2 FENC_STRIDE*(2+(%1&1)*4), r5*2, FENC_STRIDE*(3+(%1&1)*4), r6
+%if %1 != %2-1
+%if (%1&1) != 0
+    add  r0, 8*FENC_STRIDE
+%endif
+    lea  r1, [r1+4*r5]
+    lea  r2, [r2+4*r5]
+    lea  r3, [r3+4*r5]
+    lea  r4, [r4+4*r5]
+%endif
+%endmacro
+
+%macro SAD_X3_END_AVX2 0
+    movifnidn r5, r5mp
+    packssdw  m0, m1        ; 0 0 1 1 0 0 1 1
+    packssdw  m2, m2        ; 2 2 _ _ 2 2 _ _
+    phaddd    m0, m2        ; 0 1 2 _ 0 1 2 _
+    vextracti128 xm1, m0, 1
+    paddd    xm0, xm1       ; 0 1 2 _
+    mova    [r5], xm0
+    RET
+%endmacro
+
+%macro SAD_X4_END_AVX2 0
+    mov       r0, r6mp
+    pshufd     m0, m0, 0x8
+    pshufd     m1, m1, 0x8
+    vextracti128 xm2, m0, 1
+    vextracti128 xm3, m1, 1
+    punpcklqdq   xm0, xm1
+    punpcklqdq   xm2, xm3
+    phaddd   xm0, xm2       ; 0 1 2 3
+    mova    [r0], xm0
+    RET
+%endmacro
+
+%macro SAD_X4_32P_END_AVX2 0
+    mov          r0, r6mp
+    vextracti128 xm2, m0, 1
+    vextracti128 xm3, m1, 1
+    paddd        xm0, xm2
+    paddd        xm1, xm3
+    phaddd       xm0, xm1
+    mova         [r0], xm0
+    RET
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
+;                          uint8_t *pix2, intptr_t i_stride, int scores[3] )
+;-----------------------------------------------------------------------------
+%macro SAD_X_SSE2 4
+cglobal pixel_sad_x%1_%2x%3, 2+%1,3+%1,%4
+%assign x 0
+%rep %3/4
+    SAD_X%1_4x%2P_SSE2 x, %3/4
+%assign x x+1
+%endrep
+%if %3 == 64
+    SAD_X%1_END_SSE2 1
+%else
+    SAD_X%1_END_SSE2 0
+%endif
+%endmacro
+
+%macro SAD_X3_W12 0
+cglobal pixel_sad_x3_12x16, 5, 7, 8
+    mova  m4,  [MSK]
+    pxor  m0,  m0
+    pxor  m1,  m1
+    pxor  m2,  m2
+
+    SAD_X3_12x4
+    SAD_X3_12x4
+    SAD_X3_12x4
+    SAD_X3_12x4
+    SAD_X3_END_SSE2 1
+%endmacro
+
+%macro SAD_X4_W12 0
+cglobal pixel_sad_x4_12x16, 6, 8, 8
+    mova  m6,  [MSK]
+    pxor  m0,  m0
+    pxor  m1,  m1
+    pxor  m2,  m2
+    pxor  m3,  m3
+
+    SAD_X4_12x4
+    SAD_X4_12x4
+    SAD_X4_12x4
+    SAD_X4_12x4
+    SAD_X4_END_SSE2 1
+%endmacro
+
+%macro SAD_X3_W24 0
+cglobal pixel_sad_x3_24x32, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 32
+
+.loop:
+    SAD_X3_24x4
+    SAD_X3_24x4
+    SAD_X3_24x4
+    SAD_X3_24x4
+
+    sub r6,  16
+    cmp r6,  0
+jnz .loop
+    SAD_X3_END_SSE2 1
+%endmacro
+
+%macro SAD_X4_W24 0
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_24x32, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_24x32, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 32
+
+.loop:
+    SAD_X4_24x4
+    SAD_X4_24x4
+    SAD_X4_24x4
+    SAD_X4_24x4
+
+    sub count,  16
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%endmacro
+
+%macro SAD_X3_W32 0
+cglobal pixel_sad_x3_32x8, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x16, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x24, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x32, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 32
+
+.loop:
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+
+    sub r6,  16
+    cmp r6,  0
+jnz .loop
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x64, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 64
+
+.loop1:
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+
+    sub r6,  16
+    cmp r6,  0
+jnz .loop1
+    SAD_X3_END_SSE2 1
+%endmacro
+
+%macro SAD_X4_W32 0
+cglobal pixel_sad_x4_32x8, 6, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_END_SSE2 1
+
+cglobal pixel_sad_x4_32x16, 6, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_END_SSE2 1
+
+cglobal pixel_sad_x4_32x24, 6, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_END_SSE2 1
+
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_32x32, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_32x32, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 32
+
+.loop:
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+
+    sub count,  16
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_32x64, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_32x64, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 64
+
+.loop:
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+    SAD_X4_32x4
+
+    sub count,  16
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%endmacro
+
+%macro SAD_X3_W48 0
+cglobal pixel_sad_x3_48x64, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 64
+
+.loop:
+    SAD_X3_48x4
+    SAD_X3_48x4
+    SAD_X3_48x4
+    SAD_X3_48x4
+
+    sub r6,  16
+    jnz .loop
+    SAD_X3_END_SSE2 1
+%endmacro
+
+%macro SAD_X4_W48 0
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_48x64, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_48x64, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 64
+
+.loop:
+    SAD_X4_48x4
+    SAD_X4_48x4
+    SAD_X4_48x4
+    SAD_X4_48x4
+
+    sub count,  16
+    jnz .loop
+    SAD_X4_END_SSE2 1
+%endmacro
+
+%macro SAD_X3_W64 0
+cglobal pixel_sad_x3_64x16, 5, 7, 7
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 16
+
+.loop:
+    SAD_X3_64x4
+    SAD_X3_64x4
+
+    sub r6,  8
+    jnz .loop
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_64x32, 5, 7, 7
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 32
+
+.loop:
+    SAD_X3_64x4
+    SAD_X3_64x4
+
+    sub r6,  8
+    jnz .loop
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_64x48, 5, 7, 7
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 48
+
+.loop:
+    SAD_X3_64x4
+    SAD_X3_64x4
+
+    sub r6,  8
+    jnz .loop
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_64x64, 5, 7, 7
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 64
+
+.loop:
+    SAD_X3_64x4
+    SAD_X3_64x4
+
+    sub r6,  8
+    jnz .loop
+    SAD_X3_END_SSE2 1
+%endmacro
+
+%macro SAD_X4_W64 0
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_64x16, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_64x16, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 16
+
+.loop:
+    SAD_X4_64x4
+    SAD_X4_64x4
+
+    sub count,  8
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_64x32, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_64x32, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 32
+
+.loop:
+    SAD_X4_64x4
+    SAD_X4_64x4
+
+    sub count,  8
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_64x48, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_64x48, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 48
+
+.loop:
+    SAD_X4_64x4
+    SAD_X4_64x4
+
+    sub count,  8
+    jnz .loop
+    SAD_X4_END_SSE2 1
+
+%if ARCH_X86_64 == 1
+cglobal pixel_sad_x4_64x64, 6, 8, 8
+%define count r7
+%else
+cglobal pixel_sad_x4_64x64, 6, 7, 8, 0-4
+%define count dword [rsp]
+%endif
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    pxor  m3, m3
+    mov   count, 64
+
+.loop:
+    SAD_X4_64x4
+    SAD_X4_64x4
+
+    sub count,  8
+    jnz .loop
+    SAD_X4_END_SSE2 1
+%endmacro
+
+INIT_XMM sse2
+SAD_X_SSE2 3, 16, 16, 7
+SAD_X_SSE2 3, 16,  8, 7
+SAD_X_SSE2 3,  8, 16, 7
+SAD_X_SSE2 3,  8,  8, 7
+SAD_X_SSE2 3,  8,  4, 7
+SAD_X_SSE2 4, 16, 16, 7
+SAD_X_SSE2 4, 16,  8, 7
+SAD_X_SSE2 4,  8, 16, 7
+SAD_X_SSE2 4,  8,  8, 7
+SAD_X_SSE2 4,  8,  4, 7
+
+INIT_XMM sse3
+SAD_X_SSE2 3, 16, 16, 7
+SAD_X_SSE2 3, 16,  8, 7
+SAD_X_SSE2 3, 16,  4, 7
+SAD_X_SSE2 4, 16, 16, 7
+SAD_X_SSE2 4, 16,  8, 7
+SAD_X_SSE2 4, 16,  4, 7
+
+INIT_XMM ssse3
+SAD_X3_W12
+SAD_X3_W32
+SAD_X3_W24
+SAD_X3_W48
+SAD_X3_W64
+SAD_X_SSE2  3, 16, 64, 7
+SAD_X_SSE2  3, 16, 32, 7
+SAD_X_SSE2  3, 16, 16, 7
+SAD_X_SSE2  3, 16, 12, 7
+SAD_X_SSE2  3, 16,  8, 7
+SAD_X_SSE2  3,  8, 32, 7
+SAD_X_SSE2  3,  8, 16, 7
+SAD_X4_W12
+SAD_X4_W24
+SAD_X4_W32
+SAD_X4_W48
+SAD_X4_W64
+SAD_X_SSE2  4, 16, 64, 7
+SAD_X_SSE2  4, 16, 32, 7
+SAD_X_SSE2  4, 16, 16, 7
+SAD_X_SSE2  4, 16, 12, 7
+SAD_X_SSE2  4, 16,  8, 7
+SAD_X_SSE2  4,  8, 32, 7
+SAD_X_SSE2  4,  8, 16, 7
+SAD_X_SSE2  4,  8,  8, 7
+SAD_X_SSE2  4,  8,  4, 7
+
+INIT_XMM avx
+SAD_X3_W12
+SAD_X3_W32
+SAD_X3_W24
+SAD_X3_W48
+SAD_X3_W64
+SAD_X_SSE2 3, 16, 64, 7
+SAD_X_SSE2 3, 16, 32, 6
+SAD_X_SSE2 3, 16, 16, 6
+SAD_X_SSE2 3, 16, 12, 6
+SAD_X_SSE2 3, 16,  8, 6
+SAD_X_SSE2 3, 16,  4, 6
+SAD_X4_W12
+SAD_X4_W24
+SAD_X4_W32
+SAD_X4_W48
+SAD_X4_W64
+SAD_X_SSE2 4, 16, 64, 7
+SAD_X_SSE2 4, 16, 32, 7
+SAD_X_SSE2 4, 16, 16, 7
+SAD_X_SSE2 4, 16, 12, 7
+SAD_X_SSE2 4, 16,  8, 7
+SAD_X_SSE2 4, 16,  4, 7
+
+%macro SAD_X_AVX2 4
+cglobal pixel_sad_x%1_%2x%3, 2+%1,3+%1,%4
+%assign x 0
+%rep %3/4
+    SAD_X%1_4x%2P_AVX2 x, %3/4
+%assign x x+1
+%endrep
+
+  %if (%1==4) && (%2==32)
+    SAD_X%1_32P_END_AVX2
+  %else
+    SAD_X%1_END_AVX2
+  %endif
+%endmacro
+
+INIT_YMM avx2
+SAD_X_AVX2 3, 16, 32, 7
+SAD_X_AVX2 3, 16, 16, 7
+SAD_X_AVX2 3, 16, 12, 7
+SAD_X_AVX2 3, 16,  8, 7
+SAD_X_AVX2 4, 16, 32, 8
+SAD_X_AVX2 4, 16, 16, 8
+SAD_X_AVX2 4, 16, 12, 8
+SAD_X_AVX2 4, 16,  8, 8
+
+SAD_X_AVX2 4, 32,  8, 8
+SAD_X_AVX2 4, 32, 16, 8
+SAD_X_AVX2 4, 32, 24, 8
+SAD_X_AVX2 4, 32, 32, 8
+SAD_X_AVX2 4, 32, 64, 8
+
+;=============================================================================
+; SAD cacheline split
+;=============================================================================
+
+; Core2 (Conroe) can load unaligned data just as quickly as aligned data...
+; unless the unaligned data spans the border between 2 cachelines, in which
+; case it's really slow. The exact numbers may differ, but all Intel cpus prior
+; to Nehalem have a large penalty for cacheline splits.
+; (8-byte alignment exactly half way between two cachelines is ok though.)
+; LDDQU was supposed to fix this, but it only works on Pentium 4.
+; So in the split case we load aligned data and explicitly perform the
+; alignment between registers. Like on archs that have only aligned loads,
+; except complicated by the fact that PALIGNR takes only an immediate, not
+; a variable alignment.
+; It is also possible to hoist the realignment to the macroblock level (keep
+; 2 copies of the reference frame, offset by 32 bytes), but the extra memory
+; needed for that method makes it often slower.
+
+; sad 16x16 costs on Core2:
+; good offsets: 49 cycles (50/64 of all mvs)
+; cacheline split: 234 cycles (14/64 of all mvs. ammortized: +40 cycles)
+; page split: 3600 cycles (14/4096 of all mvs. ammortized: +11.5 cycles)
+; cache or page split with palignr: 57 cycles (ammortized: +2 cycles)
+
+; computed jump assumes this loop is exactly 80 bytes
+%macro SAD16_CACHELINE_LOOP_SSE2 1 ; alignment
+ALIGN 16
+sad_w16_align%1_sse2:
+    movdqa  xmm1, [r2+16]
+    movdqa  xmm2, [r2+r3+16]
+    movdqa  xmm3, [r2]
+    movdqa  xmm4, [r2+r3]
+    pslldq  xmm1, 16-%1
+    pslldq  xmm2, 16-%1
+    psrldq  xmm3, %1
+    psrldq  xmm4, %1
+    por     xmm1, xmm3
+    por     xmm2, xmm4
+    psadbw  xmm1, [r0]
+    psadbw  xmm2, [r0+r1]
+    paddw   xmm0, xmm1
+    paddw   xmm0, xmm2
+    lea     r0,   [r0+2*r1]
+    lea     r2,   [r2+2*r3]
+    dec     r4
+    jg sad_w16_align%1_sse2
+    ret
+%endmacro
+
+; computed jump assumes this loop is exactly 64 bytes
+%macro SAD16_CACHELINE_LOOP_SSSE3 1 ; alignment
+ALIGN 16
+sad_w16_align%1_ssse3:
+    movdqa  xmm1, [r2+16]
+    movdqa  xmm2, [r2+r3+16]
+    palignr xmm1, [r2], %1
+    palignr xmm2, [r2+r3], %1
+    psadbw  xmm1, [r0]
+    psadbw  xmm2, [r0+r1]
+    paddw   xmm0, xmm1
+    paddw   xmm0, xmm2
+    lea     r0,   [r0+2*r1]
+    lea     r2,   [r2+2*r3]
+    dec     r4
+    jg sad_w16_align%1_ssse3
+    ret
+%endmacro
+
+%macro SAD16_CACHELINE_FUNC 2 ; cpu, height
+cglobal pixel_sad_16x%2_cache64_%1
+    mov     eax, r2m
+    and     eax, 0x37
+    cmp     eax, 0x30
+    jle pixel_sad_16x%2_sse2
+    PROLOGUE 4,6
+    mov     r4d, r2d
+    and     r4d, 15
+%ifidn %1, ssse3
+    shl     r4d, 6  ; code size = 64
+%else
+    lea     r4, [r4*5]
+    shl     r4d, 4  ; code size = 80
+%endif
+%define sad_w16_addr (sad_w16_align1_%1 + (sad_w16_align1_%1 - sad_w16_align2_%1))
+%ifdef PIC
+    lea     r5, [sad_w16_addr]
+    add     r5, r4
+%else
+    lea     r5, [sad_w16_addr + r4]
+%endif
+    and     r2, ~15
+    mov     r4d, %2/2
+    pxor    xmm0, xmm0
+    call    r5
+    movhlps xmm1, xmm0
+    paddw   xmm0, xmm1
+    movd    eax,  xmm0
+    RET
+%endmacro
+
+%macro SAD_CACHELINE_START_MMX2 4 ; width, height, iterations, cacheline
+    mov    eax, r2m
+    and    eax, 0x17|%1|(%4>>1)
+    cmp    eax, 0x10|%1|(%4>>1)
+    jle pixel_sad_%1x%2_mmx2
+    and    eax, 7
+    shl    eax, 3
+    movd   mm6, [pd_64]
+    movd   mm7, eax
+    psubw  mm6, mm7
+    PROLOGUE 4,5
+    and    r2, ~7
+    mov    r4d, %3
+    pxor   mm0, mm0
+%endmacro
+
+%macro SAD16_CACHELINE_FUNC_MMX2 2 ; height, cacheline
+cglobal pixel_sad_16x%1_cache%2_mmx2
+    SAD_CACHELINE_START_MMX2 16, %1, %1, %2
+.loop:
+    movq   mm1, [r2]
+    movq   mm2, [r2+8]
+    movq   mm3, [r2+16]
+    movq   mm4, mm2
+    psrlq  mm1, mm7
+    psllq  mm2, mm6
+    psllq  mm3, mm6
+    psrlq  mm4, mm7
+    por    mm1, mm2
+    por    mm3, mm4
+    psadbw mm1, [r0]
+    psadbw mm3, [r0+8]
+    paddw  mm0, mm1
+    paddw  mm0, mm3
+    add    r2, r3
+    add    r0, r1
+    dec    r4
+    jg .loop
+    movd   eax, mm0
+    RET
+%endmacro
+
+%macro SAD8_CACHELINE_FUNC_MMX2 2 ; height, cacheline
+cglobal pixel_sad_8x%1_cache%2_mmx2
+    SAD_CACHELINE_START_MMX2 8, %1, %1/2, %2
+.loop:
+    movq   mm1, [r2+8]
+    movq   mm2, [r2+r3+8]
+    movq   mm3, [r2]
+    movq   mm4, [r2+r3]
+    psllq  mm1, mm6
+    psllq  mm2, mm6
+    psrlq  mm3, mm7
+    psrlq  mm4, mm7
+    por    mm1, mm3
+    por    mm2, mm4
+    psadbw mm1, [r0]
+    psadbw mm2, [r0+r1]
+    paddw  mm0, mm1
+    paddw  mm0, mm2
+    lea    r2, [r2+2*r3]
+    lea    r0, [r0+2*r1]
+    dec    r4
+    jg .loop
+    movd   eax, mm0
+    RET
+%endmacro
+
+; sad_x3/x4_cache64: check each mv.
+; if they're all within a cacheline, use normal sad_x3/x4.
+; otherwise, send them individually to sad_cache64.
+%macro CHECK_SPLIT 3 ; pix, width, cacheline
+    mov  eax, %1
+    and  eax, 0x17|%2|(%3>>1)
+    cmp  eax, 0x10|%2|(%3>>1)
+    jg .split
+%endmacro
+
+%macro SADX3_CACHELINE_FUNC 6 ; width, height, cacheline, normal_ver, split_ver, name
+cglobal pixel_sad_x3_%1x%2_cache%3_%6
+    CHECK_SPLIT r1m, %1, %3
+    CHECK_SPLIT r2m, %1, %3
+    CHECK_SPLIT r3m, %1, %3
+    jmp pixel_sad_x3_%1x%2_%4
+.split:
+%if ARCH_X86_64
+    PROLOGUE 6,9
+    push r3
+    push r2
+%if WIN64
+    movsxd r4, r4d
+    sub rsp, 40 ; shadow space and alignment
+%endif
+    mov  r2, r1
+    mov  r1, FENC_STRIDE
+    mov  r3, r4
+    mov  r7, r0
+    mov  r8, r5
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8], eax
+%if WIN64
+    mov  r2, [rsp+40+0*8]
+%else
+    pop  r2
+%endif
+    mov  r0, r7
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8+4], eax
+%if WIN64
+    mov  r2, [rsp+40+1*8]
+%else
+    pop  r2
+%endif
+    mov  r0, r7
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8+8], eax
+%if WIN64
+    add  rsp, 40+2*8
+%endif
+    RET
+%else
+    push edi
+    mov  edi, [esp+28]
+    push dword [esp+24]
+    push dword [esp+16]
+    push dword 16
+    push dword [esp+20]
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  ecx, [esp+32]
+    mov  [edi], eax
+    mov  [esp+8], ecx
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  ecx, [esp+36]
+    mov  [edi+4], eax
+    mov  [esp+8], ecx
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [edi+8], eax
+    add  esp, 16
+    pop  edi
+    ret
+%endif
+%endmacro
+
+%macro SADX4_CACHELINE_FUNC 6 ; width, height, cacheline, normal_ver, split_ver, name
+cglobal pixel_sad_x4_%1x%2_cache%3_%6
+    CHECK_SPLIT r1m, %1, %3
+    CHECK_SPLIT r2m, %1, %3
+    CHECK_SPLIT r3m, %1, %3
+    CHECK_SPLIT r4m, %1, %3
+    jmp pixel_sad_x4_%1x%2_%4
+.split:
+%if ARCH_X86_64
+    PROLOGUE 6,9
+    mov  r8,  r6mp
+    push r4
+    push r3
+    push r2
+%if WIN64
+    sub rsp, 32 ; shadow space
+%endif
+    mov  r2, r1
+    mov  r1, FENC_STRIDE
+    mov  r3, r5
+    mov  r7, r0
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8], eax
+%if WIN64
+    mov  r2, [rsp+32+0*8]
+%else
+    pop  r2
+%endif
+    mov  r0, r7
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8+4], eax
+%if WIN64
+    mov  r2, [rsp+32+1*8]
+%else
+    pop  r2
+%endif
+    mov  r0, r7
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8+8], eax
+%if WIN64
+    mov  r2, [rsp+32+2*8]
+%else
+    pop  r2
+%endif
+    mov  r0, r7
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [r8+12], eax
+%if WIN64
+    add  rsp, 32+3*8
+%endif
+    RET
+%else
+    push edi
+    mov  edi, [esp+32]
+    push dword [esp+28]
+    push dword [esp+16]
+    push dword 16
+    push dword [esp+20]
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  ecx, [esp+32]
+    mov  [edi], eax
+    mov  [esp+8], ecx
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  ecx, [esp+36]
+    mov  [edi+4], eax
+    mov  [esp+8], ecx
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  ecx, [esp+40]
+    mov  [edi+8], eax
+    mov  [esp+8], ecx
+    call pixel_sad_%1x%2_cache%3_%5
+    mov  [edi+12], eax
+    add  esp, 16
+    pop  edi
+    ret
+%endif
+%endmacro
+
+%macro SADX34_CACHELINE_FUNC 1+
+    SADX3_CACHELINE_FUNC %1
+    SADX4_CACHELINE_FUNC %1
+%endmacro
+
+
+; instantiate the aligned sads
+
+INIT_MMX
+%if ARCH_X86_64 == 0
+SAD16_CACHELINE_FUNC_MMX2  8, 32
+SAD16_CACHELINE_FUNC_MMX2 16, 32
+SAD8_CACHELINE_FUNC_MMX2   4, 32
+SAD8_CACHELINE_FUNC_MMX2   8, 32
+SAD8_CACHELINE_FUNC_MMX2  16, 32
+SAD16_CACHELINE_FUNC_MMX2  8, 64
+SAD16_CACHELINE_FUNC_MMX2 16, 64
+%endif ; !ARCH_X86_64
+SAD8_CACHELINE_FUNC_MMX2   4, 64
+SAD8_CACHELINE_FUNC_MMX2   8, 64
+SAD8_CACHELINE_FUNC_MMX2  16, 64
+
+%if ARCH_X86_64 == 0
+SADX34_CACHELINE_FUNC 16, 16, 32, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC 16,  8, 32, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC  8, 16, 32, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC  8,  8, 32, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC 16, 16, 64, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC 16,  8, 64, mmx2, mmx2, mmx2
+%endif ; !ARCH_X86_64
+SADX34_CACHELINE_FUNC  8, 16, 64, mmx2, mmx2, mmx2
+SADX34_CACHELINE_FUNC  8,  8, 64, mmx2, mmx2, mmx2
+
+%if ARCH_X86_64 == 0
+SAD16_CACHELINE_FUNC sse2, 8
+SAD16_CACHELINE_FUNC sse2, 16
+%assign i 1
+%rep 15
+SAD16_CACHELINE_LOOP_SSE2 i
+%assign i i+1
+%endrep
+SADX34_CACHELINE_FUNC 16, 16, 64, sse2, sse2, sse2
+SADX34_CACHELINE_FUNC 16,  8, 64, sse2, sse2, sse2
+%endif ; !ARCH_X86_64
+SADX34_CACHELINE_FUNC  8, 16, 64, sse2, mmx2, sse2
+
+SAD16_CACHELINE_FUNC ssse3, 8
+SAD16_CACHELINE_FUNC ssse3, 16
+%assign i 1
+%rep 15
+SAD16_CACHELINE_LOOP_SSSE3 i
+%assign i i+1
+%endrep
+SADX34_CACHELINE_FUNC 16, 16, 64, sse2, ssse3, ssse3
+SADX34_CACHELINE_FUNC 16,  8, 64, sse2, ssse3, ssse3
+
+%if HIGH_BIT_DEPTH==0
+INIT_YMM avx2
+cglobal pixel_sad_x3_8x4, 6,6,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r4
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r4
+
+    ; row 2
+    vpbroadcastq   xm2, [r0 + 2 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r4
+
+    ; row 3
+    vpbroadcastq   xm2, [r0 + 3 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+    pshufd          xm0, xm0, q0020
+    movq            [r5 + 0], xm0
+    movd            [r5 + 8], xm1
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_x3_8x8, 6,6,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+%assign x 0
+%rep 4
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r4
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+%assign x x+1
+  %if x < 4
+    add             r1, r4
+    add             r0, 2 * FENC_STRIDE
+  %endif
+%endrep
+
+    pshufd          xm0, xm0, q0020
+    movq            [r5 + 0], xm0
+    movd            [r5 + 8], xm1
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_x3_8x16, 6,6,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+%assign x 0
+%rep 8
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r4
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+%assign x x+1
+  %if x < 8
+    add             r1, r4
+    add             r0, 2 * FENC_STRIDE
+  %endif
+%endrep
+
+    pshufd          xm0, xm0, q0020
+    movq            [r5 + 0], xm0
+    movd            [r5 + 8], xm1
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_x4_8x8, 7,7,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+    sub             r4, r1
+%assign x 0
+%rep 4
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r5
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+%assign x x+1
+  %if x < 4
+    add             r1, r5
+    add             r0, 2 * FENC_STRIDE
+  %endif
+%endrep
+
+    pshufd          xm0, xm0, q0020
+    pshufd          xm1, xm1, q0020
+    movq            [r6 + 0], xm0
+    movq            [r6 + 8], xm1
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x8, 4,4,6
+    xorps           m0, m0
+    xorps           m5, m5
+
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; row 2 of pix0
+    movu           m2, [r2]               ; row 2 of pix1
+    movu           m3, [r0 + r1]          ; row 3 of pix0
+    movu           m4, [r2 + r3]          ; row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; row 4 of pix0
+    movu           m2, [r2]               ; row 4 of pix1
+    movu           m3, [r0 + r1]          ; row 5 of pix0
+    movu           m4, [r2 + r3]          ; row 5 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; row 6 of pix0
+    movu           m2, [r2]               ; row 6 of pix1
+    movu           m3, [r0 + r1]          ; row 7 of pix0
+    movu           m4, [r2 + r3]          ; row 7 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd           eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x16, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 4
+
+.loop
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; row 2 of pix0
+    movu           m2, [r2]               ; row 2 of pix1
+    movu           m3, [r0 + r1]          ; row 3 of pix0
+    movu           m4, [r2 + r3]          ; row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd           eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x24, 4,7,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 6
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+.loop
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + 2 * r1]      ; row 2 of pix0
+    movu           m2, [r2 + 2 * r3]      ; row 2 of pix1
+    movu           m3, [r0 + r5]          ; row 3 of pix0
+    movu           m4, [r2 + r6]          ; row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 4 * r3]
+    lea     r0,     [r0 + 4 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd           eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x32, 4,7,5
+    xorps           m0, m0
+    mov             r4d, 32/4
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+.loop
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    movu           m1, [r0 + 2 * r1]      ; row 2 of pix0
+    movu           m2, [r2 + 2 * r3]      ; row 2 of pix1
+    movu           m3, [r0 + r5]          ; row 3 of pix0
+    movu           m4, [r2 + r6]          ; row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    lea            r2,     [r2 + 4 * r3]
+    lea            r0,     [r0 + 4 * r1]
+
+    dec            r4d
+    jnz           .loop
+
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+ INIT_YMM avx2
+cglobal pixel_sad_32x64, 4,7,5
+    xorps           m0, m0
+    mov             r4d, 64/8
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+.loop
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    movu           m1, [r0 + 2 * r1]      ; row 2 of pix0
+    movu           m2, [r2 + 2 * r3]      ; row 2 of pix1
+    movu           m3, [r0 + r5]          ; row 3 of pix0
+    movu           m4, [r2 + r6]          ; row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    lea            r2,     [r2 + 4 * r3]
+    lea            r0,     [r0 + 4 * r1]
+
+    movu           m1, [r0]               ; row 4 of pix0
+    movu           m2, [r2]               ; row 4 of pix1
+    movu           m3, [r0 + r1]          ; row 5 of pix0
+    movu           m4, [r2 + r3]          ; row 5 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    movu           m1, [r0 + 2 * r1]      ; row 6 of pix0
+    movu           m2, [r2 + 2 * r3]      ; row 6 of pix1
+    movu           m3, [r0 + r5]          ; row 7 of pix0
+    movu           m4, [r2 + r6]          ; row 7 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    lea            r2,     [r2 + 4 * r3]
+    lea            r0,     [r0 + 4 * r1]
+
+    dec            r4d
+    jnz           .loop
+
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_48x64, 4,7,7
+    xorps           m0, m0
+    mov             r4d, 64/4
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+.loop
+    movu           m1, [r0]               ; row 0 of pix0
+    movu           m2, [r2]               ; row 0 of pix1
+    movu           m3, [r0 + r1]          ; row 1 of pix0
+    movu           m4, [r2 + r3]          ; row 1 of pix1
+    movu           xm5, [r0 +32]          ; last 16 of row 0 of pix0
+    vinserti128    m5, m5, [r0 + r1 + 32], 1
+    movu           xm6, [r2 +32]          ; last 16 of row 0 of pix1
+    vinserti128    m6, m6, [r2 + r3 + 32], 1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    psadbw         m5, m6
+    paddd          m0, m1
+    paddd          m0, m3
+    paddd          m0, m5
+
+    movu           m1, [r0 + 2 * r1]      ; row 2 of pix0
+    movu           m2, [r2 + 2 * r3]      ; row 2 of pix1
+    movu           m3, [r0 + r5]          ; row 3 of pix0
+    movu           m4, [r2 + r6]          ; row 3 of pix1
+    movu           xm5, [r0 +32 + 2 * r1]
+    vinserti128    m5, m5, [r0 + r5 + 32], 1
+    movu           xm6, [r2 +32 + 2 * r3]
+    vinserti128    m6, m6, [r2 + r6 + 32], 1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    psadbw         m5, m6
+    paddd          m0, m1
+    paddd          m0, m3
+    paddd          m0, m5
+
+    lea     r2,     [r2 + 4 * r3]
+    lea     r0,     [r0 + 4 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x16, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 4
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 2 of pix0
+    movu           m2, [r2]               ; first 32 of row 2 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 2 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 2 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 3 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 3 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 3 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x32, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 16
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x48, 4,7,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 12
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + 2 * r1]      ; first 32 of row 0 of pix0
+    movu           m2, [r2 + 2 * r3]      ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 2 * r1 + 32] ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 2 * r3 + 32] ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r5]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r6]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r5]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r6]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 4 * r3]
+    lea     r0,     [r0 + 4 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x64, 4,7,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 8
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + 2 * r1]      ; first 32 of row 2 of pix0
+    movu           m2, [r2 + 2 * r3]      ; first 32 of row 2 of pix1
+    movu           m3, [r0 + 2 * r1 + 32] ; second 32 of row 2 of pix0
+    movu           m4, [r2 + 2 * r3 + 32] ; second 32 of row 2 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r5]          ; first 32 of row 3 of pix0
+    movu           m2, [r2 + r6]          ; first 32 of row 3 of pix1
+    movu           m3, [r0 + 32 + r5]     ; second 32 of row 3 of pix0
+    movu           m4, [r2 + 32 + r6]     ; second 32 of row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 4 * r3]
+    lea     r0,     [r0 + 4 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 4 of pix0
+    movu           m2, [r2]               ; first 32 of row 4 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 4 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 4 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 5 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 5 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 5 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 5 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + 2 * r1]      ; first 32 of row 6 of pix0
+    movu           m2, [r2 + 2 * r3]      ; first 32 of row 6 of pix1
+    movu           m3, [r0 + 2 * r1 + 32] ; second 32 of row 6 of pix0
+    movu           m4, [r2 + 2 * r3 + 32] ; second 32 of row 6 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r5]          ; first 32 of row 7 of pix0
+    movu           m2, [r2 + r6]          ; first 32 of row 7 of pix1
+    movu           m3, [r0 + 32 + r5]     ; second 32 of row 7 of pix0
+    movu           m4, [r2 + 32 + r6]     ; second 32 of row 7 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 4 * r3]
+    lea     r0,     [r0 + 4 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
+%endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/sad16-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1591 @@
+;*****************************************************************************
+;* sad16-a.asm: x86 high depth sad functions
+;*****************************************************************************
+;* Copyright (C) 2010-2013 x264 project
+;*
+;* Authors: Oskar Arvidsson <oskar@irock.se>
+;*          Henrik Gramner <henrik@gramner.com>
+;*          Dnyaneshwar Gorade <dnyaneshwar@multicorewareinc.com>
+;*          Min Chen <chenm003@163.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION .text
+
+cextern pw_1
+
+;=============================================================================
+; SAD MMX
+;=============================================================================
+
+%macro SAD_INC_1x16P_MMX 0
+    movu    m1, [r0+ 0]
+    movu    m2, [r0+ 8]
+    movu    m3, [r0+16]
+    movu    m4, [r0+24]
+    psubw   m1, [r2+ 0]
+    psubw   m2, [r2+ 8]
+    psubw   m3, [r2+16]
+    psubw   m4, [r2+24]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    ABSW2   m3, m4, m3, m4, m7, m5
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    paddw   m1, m2
+    paddw   m3, m4
+  %if BIT_DEPTH <= 10
+    paddw   m0, m1
+    paddw   m0, m3
+  %else
+    paddw   m1, m3
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+  %endif
+%endmacro
+
+%macro SAD_INC_2x8P_MMX 0
+    movu    m1, [r0+0]
+    movu    m2, [r0+8]
+    movu    m3, [r0+2*r1+0]
+    movu    m4, [r0+2*r1+8]
+    psubw   m1, [r2+0]
+    psubw   m2, [r2+8]
+    psubw   m3, [r2+2*r3+0]
+    psubw   m4, [r2+2*r3+8]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    ABSW2   m3, m4, m3, m4, m7, m5
+    lea     r0, [r0+4*r1]
+    lea     r2, [r2+4*r3]
+    paddw   m1, m2
+    paddw   m3, m4
+  %if BIT_DEPTH <= 10
+    paddw   m0, m1
+    paddw   m0, m3
+  %else
+    paddw   m1, m3
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+  %endif
+%endmacro
+
+%macro SAD_INC_2x4P_MMX 0
+    movu    m1, [r0]
+    movu    m2, [r0+2*r1]
+    psubw   m1, [r2]
+    psubw   m2, [r2+2*r3]
+    ABSW2   m1, m2, m1, m2, m3, m4
+    lea     r0, [r0+4*r1]
+    lea     r2, [r2+4*r3]
+  %if BIT_DEPTH <= 10
+    paddw   m0, m1
+    paddw   m0, m2
+  %else
+    paddw   m1, m2
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+  %endif
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_NxM( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%macro SAD_MMX 3
+cglobal pixel_sad_%1x%2, 4,5-(%2&4/4)
+    pxor    m0, m0
+%if %2 == 4
+    SAD_INC_%3x%1P_MMX
+    SAD_INC_%3x%1P_MMX
+%else
+    mov    r4d, %2/%3
+.loop:
+    SAD_INC_%3x%1P_MMX
+    dec    r4d
+    jg .loop
+%endif
+%if %1*%2 == 256
+  %if BIT_DEPTH <= 10
+    HADDUW  m0, m1
+  %else
+    HADDD  m0, m1
+  %endif
+%else
+  %if BIT_DEPTH <= 10
+    HADDW   m0, m1
+  %else
+    HADDD  m0, m1
+  %endif
+%endif
+    movd   eax, m0
+    RET
+%endmacro
+
+INIT_MMX mmx2
+SAD_MMX 16, 16, 1
+SAD_MMX 16,  8, 1
+SAD_MMX  8, 16, 2
+SAD_MMX  8,  8, 2
+SAD_MMX  8,  4, 2
+SAD_MMX  4,  8, 2
+SAD_MMX  4,  4, 2
+SAD_MMX  4,  16, 2
+INIT_MMX ssse3
+SAD_MMX  4,  8, 2
+SAD_MMX  4,  4, 2
+
+;=============================================================================
+; SAD XMM
+;=============================================================================
+
+%macro SAD_1x32 0
+    movu    m1, [r2+ 0]
+    movu    m2, [r2+16]
+    movu    m3, [r2+32]
+    movu    m4, [r2+48]
+    psubw   m1, [r0+0]
+    psubw   m2, [r0+16]
+    psubw   m3, [r0+32]
+    psubw   m4, [r0+48]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    pmaddwd m3, [pw_1]
+    pmaddwd m4, [pw_1]
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+%endmacro
+
+%macro SAD_1x24 0
+    movu    m1, [r2+ 0]
+    movu    m2, [r2+16]
+    movu    m3, [r2+32]
+    psubw   m1, [r0+0]
+    psubw   m2, [r0+16]
+    psubw   m3, [r0+32]
+    ABSW2   m1, m2, m1, m2, m4, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    pxor    m4, m4
+    psubw    m4, m3
+    pmaxsw  m3, m4
+    pmaddwd m3, [pw_1]
+    paddd   m1, m2
+    paddd   m0, m1
+    paddd   m0, m3
+%endmacro
+
+%macro SAD_1x48 0
+    movu    m1, [r2+ 0]
+    movu    m2, [r2+16]
+    movu    m3, [r2+32]
+    movu    m4, [r2+48]
+    psubw   m1, [r0+0]
+    psubw   m2, [r0+16]
+    psubw   m3, [r0+32]
+    psubw   m4, [r0+48]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    pmaddwd m3, [pw_1]
+    pmaddwd m4, [pw_1]
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+    movu    m1, [r2+64]
+    movu    m2, [r2+80]
+    psubw   m1, [r0+64]
+    psubw   m2, [r0+80]
+    ABSW2   m1, m2, m1, m2, m3, m4
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    paddd   m0, m1
+    paddd   m0, m2
+%endmacro
+
+%macro SAD_1x64 0
+    movu    m1, [r2+ 0]
+    movu    m2, [r2+16]
+    movu    m3, [r2+32]
+    movu    m4, [r2+48]
+    psubw   m1, [r0+0]
+    psubw   m2, [r0+16]
+    psubw   m3, [r0+32]
+    psubw   m4, [r0+48]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    pmaddwd m3, [pw_1]
+    pmaddwd m4, [pw_1]
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+    movu    m1, [r2+64]
+    movu    m2, [r2+80]
+    movu    m3, [r2+96]
+    movu    m4, [r2+112]
+    psubw   m1, [r0+64]
+    psubw   m2, [r0+80]
+    psubw   m3, [r0+96]
+    psubw   m4, [r0+112]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    pmaddwd m3, [pw_1]
+    pmaddwd m4, [pw_1]
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+%endmacro
+
+%macro SAD_1x12 0
+    movu    m1, [r2+0]
+    movh    m2, [r2+16]
+    psubw   m1, [r0+0]
+    movh    m3, [r0+16]
+    psubw   m2, m3
+    ABSW2   m1, m2, m1, m2, m4, m6
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    lea     r0, [r0+2*r1]
+    lea     r2, [r2+2*r3]
+    paddd   m1, m2
+    paddd   m0, m1
+%endmacro
+
+%macro SAD_INC_2ROW 1
+%if 2*%1 > mmsize
+    movu    m1, [r2+ 0]
+    movu    m2, [r2+16]
+    movu    m3, [r2+2*r3+ 0]
+    movu    m4, [r2+2*r3+16]
+    psubw   m1, [r0+ 0]
+    psubw   m2, [r0+16]
+    psubw   m3, [r0+2*r1+ 0]
+    psubw   m4, [r0+2*r1+16]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    lea     r0, [r0+4*r1]
+    lea     r2, [r2+4*r3]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m1, m3
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+%else
+    movu    m1, [r2]
+    movu    m2, [r2+2*r3]
+    psubw   m1, [r0]
+    psubw   m2, [r0+2*r1]
+    ABSW2   m1, m2, m1, m2, m3, m4
+    lea     r0, [r0+4*r1]
+    lea     r2, [r2+4*r3]
+    paddw   m1, m2
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+%endif
+%endmacro
+
+%macro SAD_INC_2ROW_Nx64 1
+%if 2*%1 > mmsize
+    movu    m1, [r2 + 0]
+    movu    m2, [r2 + 16]
+    movu    m3, [r2 + 2 * r3 + 0]
+    movu    m4, [r2 + 2 * r3 + 16]
+    psubw   m1, [r0 + 0]
+    psubw   m2, [r0 + 16]
+    psubw   m3, [r0 + 2 * r1 + 0]
+    psubw   m4, [r0 + 2 * r1 + 16]
+    ABSW2   m1, m2, m1, m2, m5, m6
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    ABSW2   m3, m4, m3, m4, m7, m5
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m1, m3
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+%else
+    movu    m1, [r2]
+    movu    m2, [r2 + 2 * r3]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 2 * r1]
+    ABSW2   m1, m2, m1, m2, m3, m4
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    paddw   m1, m2
+    pmaddwd m1, [pw_1]
+    paddd   m0, m1
+%endif
+%endmacro
+
+; ---------------------------------------------------------------------------- -
+; int pixel_sad_NxM(uint16_t *, intptr_t, uint16_t *, intptr_t)
+; ---------------------------------------------------------------------------- -
+%macro SAD 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0, m0
+%if %2 == 4
+    SAD_INC_2ROW %1
+    SAD_INC_2ROW %1
+%else
+    mov     r4d, %2/2
+.loop:
+    SAD_INC_2ROW %1
+    dec    r4d
+    jg .loop
+%endif
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+%endmacro
+
+; ---------------------------------------------------------------------------- -
+; int pixel_sad_Nx64(uint16_t *, intptr_t, uint16_t *, intptr_t)
+; ---------------------------------------------------------------------------- -
+%macro SAD_Nx64 1
+cglobal pixel_sad_%1x64, 4,5, 8
+    pxor    m0, m0
+    mov     r4d, 64 / 2
+.loop:
+    SAD_INC_2ROW_Nx64 %1
+    dec    r4d
+    jg .loop
+
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD  16,  4
+SAD  16,  8
+SAD  16, 12
+SAD  16, 16
+SAD  16, 32
+SAD_Nx64  16
+
+INIT_XMM sse2
+SAD  8,  4
+SAD  8,  8
+SAD  8, 16
+SAD  8, 32
+
+INIT_YMM avx2
+SAD  16,  4
+SAD  16,  8
+SAD  16, 12
+SAD  16, 16
+SAD  16, 32
+
+INIT_YMM avx2
+cglobal pixel_sad_16x64, 4,7,4
+    pxor    m0, m0
+    pxor    m3, m3
+    mov     r4d, 64 / 8
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + r3]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + r1]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    paddw   m0, m1
+    paddw   m3, m2
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + r6]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + r5]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    paddw   m0, m1
+    paddw   m3, m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    movu    m1, [r2]
+    movu    m2, [r2 + r3]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + r1]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    paddw   m0, m1
+    paddw   m3, m2
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + r6]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + r5]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    paddw   m0, m1
+    paddw   m3, m2
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jg .loop
+
+    HADDUWD m0, m1
+    HADDUWD m3, m1
+    HADDD   m0, m1
+    HADDD   m3, m1
+    paddd   m0, m3
+
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x8, 4,7,5
+    pxor    m0, m0
+    mov     r4d, 8/4
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    dec    r4d
+    jg .loop
+
+    HADDW   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x16, 4,7,5
+    pxor    m0, m0
+    mov     r4d, 16/8
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    dec    r4d
+    jg .loop
+
+    HADDW   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x24, 4,7,5
+    pxor    m0, m0
+    mov     r4d, 24/4
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jg .loop
+
+    HADDUWD m0, m1
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+
+INIT_YMM avx2
+cglobal pixel_sad_32x32, 4,7,5
+    pxor    m0, m0
+    mov     r4d, 32/4
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jg .loop
+
+    HADDUWD m0, m1
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x64, 4,7,6
+    pxor    m0, m0
+    pxor    m5, m5
+    mov     r4d, 64 / 4
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r5,     [r1 * 3]
+    lea     r6,     [r3 * 3]
+.loop:
+    movu    m1, [r2]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + 32]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m5, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + 32]
+    movu    m3, [r2 + r6]
+    movu    m4, [r2 + r6 + 32]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + 32]
+    psubw   m3, [r0 + r5]
+    psubw   m4, [r0 + r5 + 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m5, m3
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    dec    r4d
+    jg .loop
+
+    HADDUWD m0, m1
+    HADDUWD m5, m1
+    HADDD   m0, m1
+    HADDD   m5, m1
+    paddd   m0, m5
+
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_48x64, 4, 5, 7
+    pxor    m0, m0
+    pxor    m5, m5
+    pxor    m6, m6
+    mov     r4d, 64/2
+    add     r3d, r3d
+    add     r1d, r1d
+.loop:
+    movu    m1, [r2 + 0 * mmsize]
+    movu    m2, [r2 + 1 * mmsize]
+    movu    m3, [r2 + 2 * mmsize]
+    psubw   m1, [r0 + 0 * mmsize]
+    psubw   m2, [r0 + 1 * mmsize]
+    psubw   m3, [r0 + 2 * mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+
+    movu    m1, [r2 + r3 + 0 * mmsize]
+    movu    m2, [r2 + r3 + 1 * mmsize]
+    movu    m3, [r2 + r3 + 2 * mmsize]
+    psubw   m1, [r0 + r1 + 0 * mmsize]
+    psubw   m2, [r0 + r1 + 1 * mmsize]
+    psubw   m3, [r0 + r1 + 2 * mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec     r4d
+    jg      .loop
+
+    HADDUWD m0, m1
+    HADDUWD m5, m1
+    HADDUWD m6, m1
+    paddd   m0, m5
+    paddd   m0, m6
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x16, 4, 5, 5
+    pxor    m0, m0
+    mov     r4d, 16 / 2
+    add     r3d, r3d
+    add     r1d, r1d
+.loop:
+    movu    m1, [r2 + 0]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 2 * 32]
+    movu    m4, [r2 + 3 * 32]
+    psubw   m1, [r0 + 0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + 2 * 32]
+    psubw   m4, [r0 + 3 * 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 64]
+    movu    m4, [r2 + r3 + 96]
+    psubw   m1, [r0 + r1]
+    psubw   m2, [r0 + r1 + 32]
+    psubw   m3, [r0 + r1 + 64]
+    psubw   m4, [r0 + r1 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m0, m3
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec    r4d
+    jg     .loop
+
+    HADDUWD m0, m1
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x32, 4, 5, 6
+    pxor    m0, m0
+    pxor    m5, m5
+    mov     r4d, 32 / 2
+    add     r3d, r3d
+    add     r1d, r1d
+.loop:
+    movu    m1, [r2 + 0]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 2 * 32]
+    movu    m4, [r2 + 3 * 32]
+    psubw   m1, [r0 + 0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + 2 * 32]
+    psubw   m4, [r0 + 3 * 32]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m5, m3
+
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 64]
+    movu    m4, [r2 + r3 + 96]
+    psubw   m1, [r0 + r1]
+    psubw   m2, [r0 + r1 + 32]
+    psubw   m3, [r0 + r1 + 64]
+    psubw   m4, [r0 + r1 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m0, m1
+    paddw   m5, m3
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec    r4d
+    jg     .loop
+
+    HADDUWD m0, m1
+    HADDUWD m5, m1
+    paddd   m0, m5
+    HADDD   m0, m1
+    
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x48, 4, 5, 8
+    pxor    m0, m0
+    pxor    m5, m5
+    pxor    m6, m6
+    pxor    m7, m7
+    mov     r4d, 48 / 2
+    add     r3d, r3d
+    add     r1d, r1d
+.loop:
+    movu    m1, [r2 + 0]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 64]
+    movu    m4, [r2 + 96]
+    psubw   m1, [r0 + 0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + 64]
+    psubw   m4, [r0 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+    paddw   m7, m4
+
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 64]
+    movu    m4, [r2 + r3 + 96]
+    psubw   m1, [r0 + r1]
+    psubw   m2, [r0 + r1 + 32]
+    psubw   m3, [r0 + r1 + 64]
+    psubw   m4, [r0 + r1 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+    paddw   m7, m4
+
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec    r4d
+    jg     .loop
+
+    HADDUWD m0, m1
+    HADDUWD m5, m1
+    HADDUWD m6, m1
+    HADDUWD m7, m1
+    paddd   m0, m5
+    paddd   m0, m6
+    paddd   m0, m7
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_sad_64x64, 4, 5, 8
+    pxor    m0, m0
+    pxor    m5, m5
+    pxor    m6, m6
+    pxor    m7, m7
+    mov     r4d, 64 / 2
+    add     r3d, r3d
+    add     r1d, r1d
+.loop:
+    movu    m1, [r2 + 0]
+    movu    m2, [r2 + 32]
+    movu    m3, [r2 + 64]
+    movu    m4, [r2 + 96]
+    psubw   m1, [r0 + 0]
+    psubw   m2, [r0 + 32]
+    psubw   m3, [r0 + 64]
+    psubw   m4, [r0 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+    paddw   m7, m4
+
+    movu    m1, [r2 + r3]
+    movu    m2, [r2 + r3 + 32]
+    movu    m3, [r2 + r3 + 64]
+    movu    m4, [r2 + r3 + 96]
+    psubw   m1, [r0 + r1]
+    psubw   m2, [r0 + r1 + 32]
+    psubw   m3, [r0 + r1 + 64]
+    psubw   m4, [r0 + r1 + 96]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m0, m1
+    paddw   m5, m2
+    paddw   m6, m3
+    paddw   m7, m4
+
+    lea     r0, [r0 + 2 * r1]
+    lea     r2, [r2 + 2 * r3]
+
+    dec    r4d
+    jg     .loop
+
+    HADDUWD m0, m1
+    HADDUWD m5, m1
+    HADDUWD m6, m1
+    HADDUWD m7, m1
+    paddd   m0, m5
+    paddd   m0, m6
+    paddd   m0, m7
+    HADDD   m0, m1    
+    movd    eax, xm0
+    RET
+
+;------------------------------------------------------------------
+; int pixel_sad_32xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;------------------------------------------------------------------
+%macro SAD_32 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0,  m0
+    mov     r4d, %2/4
+.loop:
+    SAD_1x32
+    SAD_1x32
+    SAD_1x32
+    SAD_1x32
+    dec     r4d
+    jnz     .loop
+
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD_32  32,  8
+SAD_32  32, 16
+SAD_32  32, 24
+SAD_32  32, 32
+SAD_32  32, 64
+
+;------------------------------------------------------------------
+; int pixel_sad_64xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;------------------------------------------------------------------
+%macro SAD_64 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0, m0
+    mov     r4d, %2/4
+.loop:
+    SAD_1x64
+    SAD_1x64
+    SAD_1x64
+    SAD_1x64
+    dec     r4d
+    jnz     .loop
+
+    HADDD   m0, m1
+    movd    eax, xmm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD_64  64, 16
+SAD_64  64, 32
+SAD_64  64, 48
+SAD_64  64, 64
+
+;------------------------------------------------------------------
+; int pixel_sad_48xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;------------------------------------------------------------------
+%macro SAD_48 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0, m0
+    mov     r4d, %2/4
+.loop:
+    SAD_1x48
+    SAD_1x48
+    SAD_1x48
+    SAD_1x48
+    dec     r4d
+    jnz     .loop
+
+    HADDD   m0, m1
+    movd    eax, xmm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD_48  48, 64
+
+;------------------------------------------------------------------
+; int pixel_sad_24xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;------------------------------------------------------------------
+%macro SAD_24 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0, m0
+    mov     r4d, %2/4
+.loop:
+    SAD_1x24
+    SAD_1x24
+    SAD_1x24
+    SAD_1x24
+    dec     r4d
+    jnz     .loop
+
+    HADDD   m0, m1
+    movd    eax, xmm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD_24  24, 32
+
+;------------------------------------------------------------------
+; int pixel_sad_12xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;------------------------------------------------------------------
+%macro SAD_12 2
+cglobal pixel_sad_%1x%2, 4,5,8
+    pxor    m0,  m0
+    mov     r4d, %2/4
+.loop:
+    SAD_1x12
+    SAD_1x12
+    SAD_1x12
+    SAD_1x12
+    dec     r4d
+    jnz     .loop
+
+    HADDD   m0, m1
+    movd    eax, xmm0
+    RET
+%endmacro
+
+INIT_XMM sse2
+SAD_12  12, 16
+
+
+;=============================================================================
+; SAD x3/x4
+;=============================================================================
+
+%macro SAD_X3_INC_P 0
+    add     r0, 4*FENC_STRIDE
+    lea     r1, [r1+4*r4]
+    lea     r2, [r2+4*r4]
+    lea     r3, [r3+4*r4]
+%endmacro
+
+%macro SAD_X3_ONE_START 0
+    mova    m3, [r0]
+    movu    m0, [r1]
+    movu    m1, [r2]
+    movu    m2, [r3]
+    psubw   m0, m3
+    psubw   m1, m3
+    psubw   m2, m3
+    ABSW2   m0, m1, m0, m1, m4, m5
+    ABSW    m2, m2, m6
+    pmaddwd m0, [pw_1]
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+%endmacro
+
+%macro SAD_X3_ONE 2
+    mova    m6, [r0+%1]
+    movu    m3, [r1+%2]
+    movu    m4, [r2+%2]
+    movu    m5, [r3+%2]
+    psubw   m3, m6
+    psubw   m4, m6
+    psubw   m5, m6
+    ABSW2   m3, m4, m3, m4, m7, m6
+    ABSW    m5, m5, m6
+    pmaddwd m3, [pw_1]
+    pmaddwd m4, [pw_1]
+    pmaddwd m5, [pw_1]
+    paddd   m0, m3
+    paddd   m1, m4
+    paddd   m2, m5
+%endmacro
+
+%macro SAD_X3_END 2
+%if mmsize == 8 && %1*%2 == 256
+    HADDUW   m0, m3
+    HADDUW   m1, m4
+    HADDUW   m2, m5
+%else
+    HADDD    m0, m3
+    HADDD    m1, m4
+    HADDD    m2, m5
+%endif
+%if UNIX64
+    movd [r5+0], xm0
+    movd [r5+4], xm1
+    movd [r5+8], xm2
+%else
+    mov      r0, r5mp
+    movd [r0+0], xm0
+    movd [r0+4], xm1
+    movd [r0+8], xm2
+%endif
+    RET
+%endmacro
+
+%macro SAD_X4_INC_P 0
+    add     r0, 4*FENC_STRIDE
+    lea     r1, [r1+4*r5]
+    lea     r2, [r2+4*r5]
+    lea     r3, [r3+4*r5]
+    lea     r4, [r4+4*r5]
+%endmacro
+
+%macro SAD_X4_ONE_START 0
+    mova    m4, [r0]
+    movu    m0, [r1]
+    movu    m1, [r2]
+    movu    m2, [r3]
+    movu    m3, [r4]
+    psubw   m0, m4
+    psubw   m1, m4
+    psubw   m2, m4
+    psubw   m3, m4
+    ABSW2   m0, m1, m0, m1, m5, m6
+    ABSW2   m2, m3, m2, m3, m4, m7
+    pmaddwd m0, [pw_1]
+    pmaddwd m1, [pw_1]
+    pmaddwd m2, [pw_1]
+    pmaddwd m3, [pw_1]
+%endmacro
+
+%macro SAD_X4_ONE 2
+    mova    m4, [r0+%1]
+    movu    m5, [r1+%2]
+    movu    m6, [r2+%2]
+%if num_mmregs > 8
+    movu    m7, [r3+%2]
+    movu    m8, [r4+%2]
+    psubw   m5, m4
+    psubw   m6, m4
+    psubw   m7, m4
+    psubw   m8, m4
+    ABSW2   m5, m6, m5, m6, m9, m10
+    ABSW2   m7, m8, m7, m8, m9, m10
+    pmaddwd m5, [pw_1]
+    pmaddwd m6, [pw_1]
+    pmaddwd m7, [pw_1]
+    pmaddwd m8, [pw_1]
+    paddd   m0, m5
+    paddd   m1, m6
+    paddd   m2, m7
+    paddd   m3, m8
+%elif cpuflag(ssse3)
+    movu    m7, [r3+%2]
+    psubw   m5, m4
+    psubw   m6, m4
+    psubw   m7, m4
+    movu    m4, [r4+%2]
+    pabsw   m5, m5
+    psubw   m4, [r0+%1]
+    pabsw   m6, m6
+    pabsw   m7, m7
+    pabsw   m4, m4
+    pmaddwd m5, [pw_1]
+    pmaddwd m6, [pw_1]
+    pmaddwd m7, [pw_1]
+    pmaddwd m4, [pw_1]
+    paddd   m0, m5
+    paddd   m1, m6
+    paddd   m2, m7
+    paddd   m3, m4
+%else ; num_mmregs == 8 && !ssse3
+    psubw   m5, m4
+    psubw   m6, m4
+    ABSW    m5, m5, m7
+    ABSW    m6, m6, m7
+    pmaddwd m5, [pw_1]
+    pmaddwd m6, [pw_1]
+    paddd   m0, m5
+    paddd   m1, m6
+    movu    m5, [r3+%2]
+    movu    m6, [r4+%2]
+    psubw   m5, m4
+    psubw   m6, m4
+    ABSW2   m5, m6, m5, m6, m7, m4
+    pmaddwd m5, [pw_1]
+    pmaddwd m6, [pw_1]
+    paddd   m2, m5
+    paddd   m3, m6
+%endif
+%endmacro
+
+%macro SAD_X4_END 2
+%if mmsize == 8 && %1*%2 == 256
+    HADDUW    m0, m4
+    HADDUW    m1, m5
+    HADDUW    m2, m6
+    HADDUW    m3, m7
+%else
+    HADDD     m0, m4
+    HADDD     m1, m5
+    HADDD     m2, m6
+    HADDD     m3, m7
+%endif
+    mov       r0, r6mp
+    movd [r0+ 0], xm0
+    movd [r0+ 4], xm1
+    movd [r0+ 8], xm2
+    movd [r0+12], xm3
+    RET
+%endmacro
+
+%macro SAD_X_2xNP 4
+    %assign x %3
+%rep %4
+    SAD_X%1_ONE x*mmsize, x*mmsize
+    SAD_X%1_ONE 2*FENC_STRIDE+x*mmsize, 2*%2+x*mmsize
+    %assign x x+1
+%endrep
+%endmacro
+
+%macro PIXEL_VSAD 0
+cglobal pixel_vsad, 3,3,8
+    mova      m0, [r0]
+    mova      m1, [r0+16]
+    mova      m2, [r0+2*r1]
+    mova      m3, [r0+2*r1+16]
+    lea       r0, [r0+4*r1]
+    psubw     m0, m2
+    psubw     m1, m3
+    ABSW2     m0, m1, m0, m1, m4, m5
+    paddw     m0, m1
+    sub      r2d, 2
+    je .end
+.loop:
+    mova      m4, [r0]
+    mova      m5, [r0+16]
+    mova      m6, [r0+2*r1]
+    mova      m7, [r0+2*r1+16]
+    lea       r0, [r0+4*r1]
+    psubw     m2, m4
+    psubw     m3, m5
+    psubw     m4, m6
+    psubw     m5, m7
+    ABSW      m2, m2, m1
+    ABSW      m3, m3, m1
+    ABSW      m4, m4, m1
+    ABSW      m5, m5, m1
+    paddw     m0, m2
+    paddw     m0, m3
+    paddw     m0, m4
+    paddw     m0, m5
+    mova      m2, m6
+    mova      m3, m7
+    sub r2d, 2
+    jg .loop
+.end:
+%if BIT_DEPTH == 9
+    HADDW     m0, m1 ; max sum: 62(pixel diffs)*511(pixel_max)=31682
+%else
+    HADDUW    m0, m1 ; max sum: 62(pixel diffs)*1023(pixel_max)=63426
+%endif
+    movd     eax, m0
+    RET
+%endmacro
+INIT_XMM sse2
+PIXEL_VSAD
+INIT_XMM ssse3
+PIXEL_VSAD
+INIT_XMM xop
+PIXEL_VSAD
+
+INIT_YMM avx2
+cglobal pixel_vsad, 3,3
+    mova      m0, [r0]
+    mova      m1, [r0+2*r1]
+    lea       r0, [r0+4*r1]
+    psubw     m0, m1
+    pabsw     m0, m0
+    sub      r2d, 2
+    je .end
+.loop:
+    mova      m2, [r0]
+    mova      m3, [r0+2*r1]
+    lea       r0, [r0+4*r1]
+    psubw     m1, m2
+    psubw     m2, m3
+    pabsw     m1, m1
+    pabsw     m2, m2
+    paddw     m0, m1
+    paddw     m0, m2
+    mova      m1, m3
+    sub      r2d, 2
+    jg .loop
+.end:
+%if BIT_DEPTH == 9
+    HADDW     m0, m1
+%else
+    HADDUW    m0, m1
+%endif
+    movd     eax, xm0
+    RET
+;-----------------------------------------------------------------------------
+; void pixel_sad_xN_WxH( uint16_t *fenc, uint16_t *pix0, uint16_t *pix1,
+;                        uint16_t *pix2, intptr_t i_stride, int scores[3] )
+;-----------------------------------------------------------------------------
+%macro SAD_X 3
+cglobal pixel_sad_x%1_%2x%3, 6,7,XMM_REGS
+    %assign regnum %1+1
+    %xdefine STRIDE r %+ regnum
+    mov     r6, %3/2-1
+    SAD_X%1_ONE_START
+    SAD_X%1_ONE 2*FENC_STRIDE, 2*STRIDE
+    SAD_X_2xNP %1, STRIDE, 1, %2/(mmsize/2)-1
+.loop:
+    SAD_X%1_INC_P
+    SAD_X_2xNP %1, STRIDE, 0, %2/(mmsize/2)
+    dec     r6
+    jg .loop
+%if %1 == 4
+    mov     r6, r6m
+%endif
+    SAD_X%1_END %2, %3
+%endmacro
+
+INIT_MMX mmx2
+%define XMM_REGS 0
+SAD_X 3, 16, 16
+SAD_X 3, 16,  8
+SAD_X 3, 12, 16
+SAD_X 3,  8, 16
+SAD_X 3,  8,  8
+SAD_X 3,  8,  4
+SAD_X 3,  4, 16
+SAD_X 3,  4,  8
+SAD_X 3,  4,  4
+SAD_X 4, 16, 16
+SAD_X 4, 16,  8
+SAD_X 4, 12, 16
+SAD_X 4,  8, 16
+SAD_X 4,  8,  8
+SAD_X 4,  8,  4
+SAD_X 4,  4, 16
+SAD_X 4,  4,  8
+SAD_X 4,  4,  4
+INIT_MMX ssse3
+SAD_X 3,  4,  8
+SAD_X 3,  4,  4
+SAD_X 4,  4,  8
+SAD_X 4,  4,  4
+INIT_XMM ssse3
+%define XMM_REGS 7
+SAD_X 3, 16, 16
+SAD_X 3, 16,  8
+SAD_X 3,  8, 16
+SAD_X 3,  8,  8
+SAD_X 3,  8,  4
+%define XMM_REGS 9
+SAD_X 4, 16, 16
+SAD_X 4, 16,  8
+SAD_X 4,  8, 16
+SAD_X 4,  8,  8
+SAD_X 4,  8,  4
+INIT_XMM sse2
+%define XMM_REGS 8
+SAD_X 3, 64, 64
+SAD_X 3, 64, 48
+SAD_X 3, 64, 32
+SAD_X 3, 64, 16
+SAD_X 3, 48, 64
+SAD_X 3, 32, 64
+SAD_X 3, 32, 32
+SAD_X 3, 32, 24
+SAD_X 3, 32, 16
+SAD_X 3, 32,  8
+SAD_X 3, 24, 32
+SAD_X 3, 16, 64
+SAD_X 3, 16, 32
+SAD_X 3, 16, 16
+SAD_X 3, 16, 12
+SAD_X 3, 16,  8
+SAD_X 3, 16,  4
+SAD_X 3,  8, 32
+SAD_X 3,  8, 16
+SAD_X 3,  8,  8
+SAD_X 3,  8,  4
+%define XMM_REGS 11
+SAD_X 4, 64, 64
+SAD_X 4, 64, 48
+SAD_X 4, 64, 32
+SAD_X 4, 64, 16
+SAD_X 4, 48, 64
+SAD_X 4, 32, 64
+SAD_X 4, 32, 32
+SAD_X 4, 32, 24
+SAD_X 4, 32, 16
+SAD_X 4, 32,  8
+SAD_X 4, 24, 32
+SAD_X 4, 16, 64
+SAD_X 4, 16, 32
+SAD_X 4, 16, 16
+SAD_X 4, 16, 12
+SAD_X 4, 16,  8
+SAD_X 4, 16,  4
+SAD_X 4,  8, 32
+SAD_X 4,  8, 16
+SAD_X 4,  8,  8
+SAD_X 4,  8,  4
+INIT_YMM avx2
+%define XMM_REGS 7
+SAD_X 3, 16,  4
+SAD_X 3, 16,  8
+SAD_X 3, 16,  12
+SAD_X 3, 16,  16
+SAD_X 3, 16,  32
+SAD_X 3, 16,  64
+SAD_X 3, 32,  8
+SAD_X 3, 32, 16
+SAD_X 3, 32, 24
+SAD_X 3, 32, 32
+SAD_X 3, 32, 64
+SAD_X 3, 48, 64
+SAD_X 3, 64, 16
+SAD_X 3, 64, 32
+SAD_X 3, 64, 48
+SAD_X 3, 64, 64
+%define XMM_REGS 9
+SAD_X 4, 16,  4
+SAD_X 4, 16,  8
+SAD_X 4, 16,  12
+SAD_X 4, 16,  16
+SAD_X 4, 16,  32
+SAD_X 4, 16,  64
+SAD_X 4, 32,  8
+SAD_X 4, 32, 16
+SAD_X 4, 32, 24
+SAD_X 4, 32, 32
+SAD_X 4, 32, 64
+SAD_X 4, 48, 64
+SAD_X 4, 64, 16
+SAD_X 4, 64, 32
+SAD_X 4, 64, 48
+SAD_X 4, 64, 64
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/ssd-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2760 @@
+;*****************************************************************************
+;* ssd-a.asm: x86 ssd functions
+;*****************************************************************************
+;* Copyright (C) 2003-2013 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Laurent Aimar <fenrir@via.ecp.fr>
+;*          Alex Izvorski <aizvorksi@gmail.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA 32
+
+SECTION .text
+
+cextern pw_00ff
+cextern hsub_mul
+
+;=============================================================================
+; SSD
+;=============================================================================
+
+%if HIGH_BIT_DEPTH
+;-----------------------------------------------------------------------------
+; int pixel_ssd_WxH( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%macro SSD_ONE 2
+cglobal pixel_ssd_ss_%1x%2, 4,7,8
+    FIX_STRIDES r1, r3
+%if mmsize == %1*2
+    %define offset0_1 r1
+    %define offset0_2 r1*2
+    %define offset0_3 r5
+    %define offset1_1 r3
+    %define offset1_2 r3*2
+    %define offset1_3 r6
+    lea     r5, [3*r1]
+    lea     r6, [3*r3]
+%elif mmsize == %1
+    %define offset0_1 mmsize
+    %define offset0_2 r1
+    %define offset0_3 r1+mmsize
+    %define offset1_1 mmsize
+    %define offset1_2 r3
+    %define offset1_3 r3+mmsize
+%elif mmsize == %1/2
+    %define offset0_1 mmsize
+    %define offset0_2 mmsize*2
+    %define offset0_3 mmsize*3
+    %define offset1_1 mmsize
+    %define offset1_2 mmsize*2
+    %define offset1_3 mmsize*3
+%endif
+    %assign %%n %2/(2*mmsize/%1)
+%if %%n > 1
+    mov    r4d, %%n
+%endif
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0+offset0_1]
+    movu    m3, [r0+offset0_2]
+    movu    m4, [r0+offset0_3]
+    movu    m6, [r2]
+    movu    m7, [r2+offset1_1]
+    psubw   m1, m6
+    psubw   m2, m7
+    movu    m6, [r2+offset1_2]
+    movu    m7, [r2+offset1_3]
+    psubw   m3, m6
+    psubw   m4, m7
+%if %%n > 1
+    lea     r0, [r0+r1*(%2/%%n)]
+    lea     r2, [r2+r3*(%2/%%n)]
+%endif
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+%if %%n > 1
+    dec    r4d
+    jg .loop
+%endif
+
+%if BIT_DEPTH == 12 && mmsize == 16
+    movu        m5, m0
+    pxor        m6, m6
+    punpckldq   m0, m6
+    punpckhdq   m5, m6
+    paddq       m0, m5
+    movhlps     m5, m0
+    paddq       m0, m5
+    movq        r6, xm0
+%else 
+    HADDD   m0, m5
+    movd    eax,xm0
+%endif
+%ifidn movu,movq ; detect MMX
+    EMMS
+%endif
+    RET
+%endmacro
+
+%macro SSD_TWO 2
+cglobal pixel_ssd_ss_%1x%2, 4,7,8
+    FIX_STRIDES r1, r3
+    pxor    m0,  m0
+    mov     r4d, %2/2
+    lea     r5,  [r1 * 2]
+    lea     r6,  [r3 * 2]
+.loop:
+    movu    m1,  [r0]
+    movu    m2,  [r0 + 16]
+    movu    m3,  [r0 + 32]
+    movu    m4,  [r0 + 48]
+    movu    m6,  [r2]
+    movu    m7,  [r2 + 16]
+    psubw   m1,  m6
+    psubw   m2,  m7
+    movu    m6,  [r2 + 32]
+    movu    m7,  [r2 + 48]
+    psubw   m3,  m6
+    psubw   m4,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    pmaddwd m3,  m3
+    pmaddwd m4,  m4
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    movu    m1,  [r0 + 64]
+    movu    m2,  [r0 + 80]
+    movu    m6,  [r2 + 64]
+    movu    m7,  [r2 + 80]
+    psubw   m1,  m6
+    psubw   m2,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    paddd   m1,  m2
+    paddd   m0,  m1
+%if %1 == 64
+    movu    m3,  [r0 + 96]
+    movu    m4,  [r0 + 112]
+    movu    m6,  [r2 + 96]
+    movu    m7,  [r2 + 112]
+    psubw   m3,  m6
+    psubw   m4,  m7
+    pmaddwd m3,  m3
+    pmaddwd m4,  m4
+    paddd   m3,  m4
+    paddd   m0,  m3
+%endif
+    movu    m1,  [r0 + r1]
+    movu    m2,  [r0 + r1 + 16]
+    movu    m3,  [r0 + r1 + 32]
+    movu    m4,  [r0 + r1 + 48]
+    movu    m6,  [r2 + r3]
+    movu    m7,  [r2 + r3 + 16]
+    psubw   m1,  m6
+    psubw   m2,  m7
+    movu    m6,  [r2 + r3 + 32]
+    movu    m7,  [r2 + r3 + 48]
+    psubw   m3,  m6
+    psubw   m4,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    pmaddwd m3,  m3
+    pmaddwd m4,  m4
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    movu    m1,  [r0 + r1 + 64]
+    movu    m2,  [r0 + r1 + 80]
+    movu    m6,  [r2 + r3 + 64]
+    movu    m7,  [r2 + r3 + 80]
+    psubw   m1,  m6
+    psubw   m2,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    paddd   m1,  m2
+    paddd   m0,  m1
+%if %1 == 64
+    movu    m3,  [r0 + r1 + 96]
+    movu    m4,  [r0 + r1 + 112]
+    movu    m6,  [r2 + r3 + 96]
+    movu    m7,  [r2 + r3 + 112]
+    psubw   m3,  m6
+    psubw   m4,  m7
+    pmaddwd m3,  m3
+    pmaddwd m4,  m4
+    paddd   m3,  m4
+    paddd   m0,  m3
+%endif
+    lea     r0,  [r0 + r5]
+    lea     r2,  [r2 + r6]
+    dec     r4d
+    jnz  .loop
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+%endmacro
+%macro SSD_24 2
+cglobal pixel_ssd_ss_%1x%2, 4,7,8
+    FIX_STRIDES r1, r3
+    pxor    m0,  m0
+    mov     r4d, %2/2
+    lea     r5,  [r1 * 2]
+    lea     r6,  [r3 * 2]
+.loop:
+    movu    m1,  [r0]
+    movu    m2,  [r0 + 16]
+    movu    m3,  [r0 + 32]
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    movu    m7,  [r2 + 32]
+    psubw   m1,  m5
+    psubw   m2,  m6
+    psubw   m3,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    pmaddwd m3,  m3
+    paddd   m1,  m2
+    paddd   m0,  m1
+    movu    m1,  [r0 + r1]
+    movu    m2,  [r0 + r1 + 16]
+    movu    m4,  [r0 + r1 + 32]
+    movu    m5,  [r2 + r3]
+    movu    m6,  [r2 + r3 + 16]
+    movu    m7,  [r2 + r3 + 32]
+    psubw   m1,  m5
+    psubw   m2,  m6
+    psubw   m4,  m7
+    pmaddwd m1,  m1
+    pmaddwd m2,  m2
+    pmaddwd m4,  m4
+    paddd   m1,  m2
+    paddd   m3,  m4
+    paddd   m0,  m1
+    paddd   m0,  m3
+    lea     r0,  [r0 + r5]
+    lea     r2,  [r2 + r6]
+    dec     r4d
+    jnz  .loop
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+%endmacro
+%macro SSD_12 2
+cglobal pixel_ssd_ss_%1x%2, 4,7,8
+    FIX_STRIDES r1, r3
+    pxor    m0,  m0
+    mov     r4d, %2/4
+    lea     r5,  [r1 * 2]
+    lea     r6,  [r3 * 2]
+.loop:
+    movu        m1,  [r0]
+    movh        m2,  [r0 + 16]
+    movu        m3,  [r0 + r1]
+    punpcklqdq  m2,  [r0 + r1 + 16]
+    movu        m7,  [r2]
+    psubw       m1,  m7
+    movh        m4,  [r2 + 16]
+    movu        m7,  [r2 + r3]
+    psubw       m3,  m7
+    punpcklqdq  m4,  [r2 + r3 + 16]
+    psubw       m2,  m4
+    pmaddwd     m1,  m1
+    pmaddwd     m2,  m2
+    pmaddwd     m3,  m3
+    paddd       m1,  m2
+    paddd       m0,  m1
+
+    movu        m1,  [r0 + r5]
+    movh        m2,  [r0 + r5 + 16]
+    lea         r0,  [r0 + r5]
+    movu        m6,  [r0 + r1]
+    punpcklqdq  m2,  [r0 + r1 + 16]
+    movu        m7,  [r2 + r6]
+    psubw       m1,  m7
+    movh        m4,  [r2 + r6 + 16]
+    lea         r2,  [r2 + r6]
+    movu        m7,  [r2 + r3]
+    psubw       m6,  m7
+    punpcklqdq  m4,  [r2 + r3 + 16]
+    psubw       m2,  m4
+    pmaddwd     m1,  m1
+    pmaddwd     m2,  m2
+    pmaddwd     m6,  m6
+    paddd       m1,  m2
+    paddd       m3,  m6
+    paddd       m0,  m1
+    paddd       m0,  m3
+    lea         r0,  [r0 + r5]
+    lea         r2,  [r2 + r6]
+    dec         r4d
+    jnz     .loop
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+%endmacro
+
+INIT_YMM avx2
+cglobal pixel_ssd_16x16, 4,7,8
+    FIX_STRIDES r1, r3
+    lea     r5, [3 * r1]
+    lea     r6, [3 * r3]
+    mov    r4d, 4
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0 + r1]
+    movu    m3, [r0 + r1 * 2]
+    movu    m4, [r0 + r5]
+    movu    m6, [r2]
+    movu    m7, [r2 + r3]
+    psubw   m1, m6
+    psubw   m2, m7
+    movu    m6, [r2 + r3 * 2]
+    movu    m7, [r2 + r6]
+    psubw   m3, m6
+    psubw   m4, m7
+
+    lea     r0, [r0 + r1 * 4]
+    lea     r2, [r2 + r3 * 4]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+
+    dec    r4d
+    jg .loop
+
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_ssd_32x32, 4,7,8
+    add     r1, r1
+    add     r3, r3
+    mov     r4d, 16
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0 + 32]
+    movu    m3, [r0 + r1]
+    movu    m4, [r0 + r1 + 32]
+    movu    m6, [r2]
+    movu    m7, [r2 + 32]
+    psubw   m1, m6
+    psubw   m2, m7
+    movu    m6, [r2 + r3]
+    movu    m7, [r2 + r3 + 32]
+    psubw   m3, m6
+    psubw   m4, m7
+
+    lea     r0, [r0 + r1 * 2]
+    lea     r2, [r2 + r3 * 2]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+
+    dec    r4d
+    jg .loop
+
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_ssd_64x64, 4,7,8
+    FIX_STRIDES r1, r3
+    mov    r4d, 64
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0+32]
+    movu    m3, [r0+32*2]
+    movu    m4, [r0+32*3]
+    movu    m6, [r2]
+    movu    m7, [r2+32]
+    psubw   m1, m6
+    psubw   m2, m7
+    movu    m6, [r2+32*2]
+    movu    m7, [r2+32*3]
+    psubw   m3, m6
+    psubw   m4, m7
+
+    lea     r0, [r0+r1]
+    lea     r2, [r2+r3]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m0, m1
+    paddd   m0, m3
+
+    dec    r4d
+    jg .loop
+
+    HADDD   m0, m5
+    movd   eax, xm0
+    RET
+
+INIT_MMX mmx2
+SSD_ONE     4,  4
+SSD_ONE     4,  8
+SSD_ONE     4, 16
+SSD_ONE     8,  4
+SSD_ONE     8,  8
+SSD_ONE     8, 16
+SSD_ONE    16,  8
+SSD_ONE    16, 16
+INIT_XMM sse2
+SSD_ONE     8,  4
+SSD_ONE     8,  8
+SSD_ONE     8, 16
+SSD_ONE     8, 32
+SSD_12     12, 16
+SSD_ONE    16,  4
+SSD_ONE    16,  8
+SSD_ONE    16, 12
+SSD_ONE    16, 16
+SSD_ONE    16, 32
+SSD_ONE    16, 64
+SSD_24     24, 32
+SSD_ONE    32,  8
+SSD_ONE    32, 16
+SSD_ONE    32, 24
+SSD_ONE    32, 32
+SSD_ONE    32, 64
+SSD_TWO    48, 64
+SSD_TWO    64, 16
+SSD_TWO    64, 32
+SSD_TWO    64, 48
+SSD_TWO    64, 64
+INIT_YMM avx2
+SSD_ONE    16,  8
+SSD_ONE    16, 16
+SSD_ONE    32, 32
+SSD_ONE    64, 64
+SSD_ONE    16, 32
+SSD_ONE    32, 64
+%endif ; HIGH_BIT_DEPTH
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_WxH( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH == 0
+%macro SSD_SS 2
+cglobal pixel_ssd_ss_%1x%2, 4,7,6
+    FIX_STRIDES r1, r3
+%if mmsize == %1*4 || mmsize == %1*2
+    %define offset0_1 r1*2
+    %define offset0_2 r1*4
+    %define offset0_3 r5
+    %define offset1_1 r3*2
+    %define offset1_2 r3*4
+    %define offset1_3 r6
+    lea     r5, [4*r1]
+    lea     r6, [4*r3]
+    lea     r5, [r5 + 2*r1]
+    lea     r6, [r6 + 2*r3]
+%elif mmsize == %1
+    %define offset0_1 16
+    %define offset0_2 r1*2
+    %define offset0_3 r1*2+16
+    %define offset1_1 16
+    %define offset1_2 r3*2
+    %define offset1_3 r3*2+16
+%endif
+%if %1 == 4
+    %assign %%n %2/(mmsize/%1)
+%else
+    %assign %%n %2/(2*mmsize/%1)
+%endif
+%if %%n > 1
+    mov    r4d, %%n
+%endif
+    pxor    m0, m0
+.loop:
+%if %1 == 4
+    movh    m1, [r0]
+    movh    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movh    m1, [r0 + offset0_1]
+    movh    m2, [r2 + offset1_1]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movh    m1, [r0 + offset0_2]
+    movh    m2, [r2 + offset1_2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movh    m1, [r0 + offset0_3]
+    movh    m2, [r2 + offset1_3]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+%else
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + offset0_1]
+    movu    m2, [r2 + offset1_1]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + offset0_2]
+    movu    m2, [r2 + offset1_2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + offset0_3]
+    movu    m2, [r2 + offset1_3]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+%endif
+    lea       r0, [r0+r1*(%2/%%n)*2]
+    lea       r2, [r2+r3*(%2/%%n)*2]
+%if %%n > 1
+    dec    r4d
+    jg .loop
+%endif
+%if %1 == 4
+  %if notcpuflag(ssse3)
+    pshufd   m1, m0, 1
+    paddd    m0, m1
+  %else
+    phaddd   m0, m0
+  %endif
+%else
+    HADDD    m0, m1
+%endif
+    movd     eax, m0
+    RET
+%endmacro
+%macro SSD_SS_ONE 0
+SSD_SS     4,  4
+SSD_SS     4,  8
+SSD_SS     4, 16
+SSD_SS     8,  4
+SSD_SS     8,  8
+SSD_SS     8, 16
+SSD_SS     8, 32
+SSD_SS    16,  4
+SSD_SS    16,  8
+SSD_SS    16, 12
+SSD_SS    16, 16
+SSD_SS    16, 32
+SSD_SS    16, 64
+%endmacro
+
+%macro SSD_SS_12x16 0
+cglobal pixel_ssd_ss_12x16, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, 8
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    pslldq  m1, 8
+    psrldq  m1, 8
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    pslldq  m1, 8
+    psrldq  m1, 8
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    dec      r4d
+    jnz .loop
+    HADDD     m0, m1
+    movd     eax, m0
+    RET
+%endmacro
+
+%macro SSD_SS_32 1
+cglobal pixel_ssd_ss_32x%1, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, %1/2
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    dec      r4d
+    jnz .loop
+    HADDD     m0, m1
+    movd     eax, m0
+    RET
+%endmacro
+
+%macro SSD_SS_32xN 0
+SSD_SS_32 8
+SSD_SS_32 16
+SSD_SS_32 24
+SSD_SS_32 32
+SSD_SS_32 64
+%endmacro
+
+%macro SSD_SS_24 0
+cglobal pixel_ssd_ss_24x32, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, 16
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    dec      r4d
+    jnz .loop
+    HADDD     m0, m1
+    movd     eax, m0
+    RET
+%endmacro
+
+%macro SSD_SS_48 0
+cglobal pixel_ssd_ss_48x64, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, 32
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    dec      r4d
+    jnz .loop
+    HADDD     m0, m1
+    movd     eax, m0
+    RET
+%endmacro
+
+%macro SSD_SS_64 1
+cglobal pixel_ssd_ss_64x%1, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, %1/2
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 96]
+    movu    m2, [r2 + 96]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 112]
+    movu    m2, [r2 + 112]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea     r0, [r0 + 2*r1]
+    lea     r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 96]
+    movu    m2, [r2 + 96]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 112]
+    movu    m2, [r2 + 112]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea     r0, [r0 + 2*r1]
+    lea     r2, [r2 + 2*r3]
+    dec     r4d
+    jnz .loop
+    HADDD     m0, m1
+    movd     eax, m0
+    RET
+%endmacro
+
+%macro SSD_SS_64xN 0
+SSD_SS_64 16
+SSD_SS_64 32
+SSD_SS_64 48
+SSD_SS_64 64
+%endmacro
+
+INIT_XMM sse2
+SSD_SS_ONE
+SSD_SS_12x16
+SSD_SS_24
+SSD_SS_32xN
+SSD_SS_48
+SSD_SS_64xN
+INIT_XMM sse4
+SSD_SS_ONE
+SSD_SS_12x16
+SSD_SS_24
+SSD_SS_32xN
+SSD_SS_48
+SSD_SS_64xN
+INIT_XMM avx
+SSD_SS_ONE
+SSD_SS_12x16
+SSD_SS_24
+SSD_SS_32xN
+SSD_SS_48
+SSD_SS_64xN
+%endif ; !HIGH_BIT_DEPTH
+
+%if HIGH_BIT_DEPTH == 0
+%macro SSD_LOAD_FULL 5
+    movu      m1, [t0+%1]
+    movu      m2, [t2+%2]
+    movu      m3, [t0+%3]
+    movu      m4, [t2+%4]
+%if %5==1
+    add       t0, t1
+    add       t2, t3
+%elif %5==2
+    lea       t0, [t0+2*t1]
+    lea       t2, [t2+2*t3]
+%endif
+%endmacro
+
+%macro LOAD 5
+    movh      m%1, %3
+    movh      m%2, %4
+%if %5
+    lea       t0, [t0+2*t1]
+%endif
+%endmacro
+
+%macro JOIN 7
+    movh      m%3, %5
+    movh      m%4, %6
+%if %7
+    lea       t2, [t2+2*t3]
+%endif
+    punpcklbw m%1, m7
+    punpcklbw m%3, m7
+    psubw     m%1, m%3
+    punpcklbw m%2, m7
+    punpcklbw m%4, m7
+    psubw     m%2, m%4
+%endmacro
+
+%macro JOIN_SSE2 7
+    movh      m%3, %5
+    movh      m%4, %6
+%if %7
+    lea       t2, [t2+2*t3]
+%endif
+    punpcklqdq m%1, m%2
+    punpcklqdq m%3, m%4
+    DEINTB %2, %1, %4, %3, 7
+    psubw m%2, m%4
+    psubw m%1, m%3
+%endmacro
+
+%macro JOIN_SSSE3 7
+    movh      m%3, %5
+    movh      m%4, %6
+%if %7
+    lea       t2, [t2+2*t3]
+%endif
+    punpcklbw m%1, m%3
+    punpcklbw m%2, m%4
+%endmacro
+
+%macro LOAD_AVX2 5
+    mova     xm%1, %3
+    vinserti128 m%1, m%1, %4, 1
+%if %5
+    lea       t0, [t0+2*t1]
+%endif
+%endmacro
+
+%macro JOIN_AVX2 7
+    mova     xm%2, %5
+    vinserti128 m%2, m%2, %6, 1
+%if %7
+    lea       t2, [t2+2*t3]
+%endif
+    SBUTTERFLY bw, %1, %2, %3
+%endmacro
+
+%macro SSD_LOAD_HALF 5
+    LOAD      1, 2, [t0+%1], [t0+%3], 1
+    JOIN      1, 2, 3, 4, [t2+%2], [t2+%4], 1
+    LOAD      3, 4, [t0+%1], [t0+%3], %5
+    JOIN      3, 4, 5, 6, [t2+%2], [t2+%4], %5
+%endmacro
+
+%macro SSD_CORE 7-8
+%ifidn %8, FULL
+    mova      m%6, m%2
+    mova      m%7, m%4
+    psubusb   m%2, m%1
+    psubusb   m%4, m%3
+    psubusb   m%1, m%6
+    psubusb   m%3, m%7
+    por       m%1, m%2
+    por       m%3, m%4
+    punpcklbw m%2, m%1, m%5
+    punpckhbw m%1, m%5
+    punpcklbw m%4, m%3, m%5
+    punpckhbw m%3, m%5
+%endif
+    pmaddwd   m%1, m%1
+    pmaddwd   m%2, m%2
+    pmaddwd   m%3, m%3
+    pmaddwd   m%4, m%4
+%endmacro
+
+%macro SSD_CORE_SSE2 7-8
+%ifidn %8, FULL
+    DEINTB %6, %1, %7, %2, %5
+    psubw m%6, m%7
+    psubw m%1, m%2
+    SWAP %6, %2, %1
+    DEINTB %6, %3, %7, %4, %5
+    psubw m%6, m%7
+    psubw m%3, m%4
+    SWAP %6, %4, %3
+%endif
+    pmaddwd   m%1, m%1
+    pmaddwd   m%2, m%2
+    pmaddwd   m%3, m%3
+    pmaddwd   m%4, m%4
+%endmacro
+
+%macro SSD_CORE_SSSE3 7-8
+%ifidn %8, FULL
+    punpckhbw m%6, m%1, m%2
+    punpckhbw m%7, m%3, m%4
+    punpcklbw m%1, m%2
+    punpcklbw m%3, m%4
+    SWAP %6, %2, %3
+    SWAP %7, %4
+%endif
+    pmaddubsw m%1, m%5
+    pmaddubsw m%2, m%5
+    pmaddubsw m%3, m%5
+    pmaddubsw m%4, m%5
+    pmaddwd   m%1, m%1
+    pmaddwd   m%2, m%2
+    pmaddwd   m%3, m%3
+    pmaddwd   m%4, m%4
+%endmacro
+
+%macro SSD_ITER 6
+    SSD_LOAD_%1 %2,%3,%4,%5,%6
+    SSD_CORE  1, 2, 3, 4, 7, 5, 6, %1
+    paddd     m1, m2
+    paddd     m3, m4
+    paddd     m0, m1
+    paddd     m0, m3
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_16x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+%macro SSD 2
+%if %1 != %2
+    %assign function_align 8
+%else
+    %assign function_align 16
+%endif
+cglobal pixel_ssd_%1x%2, 0,0,0
+    mov     al, %1*%2/mmsize/2
+
+%if %1 != %2
+    jmp mangle(private_prefix %+ _ %+ pixel_ssd_%1x%1 %+ SUFFIX %+ .startloop)
+%else
+
+.startloop:
+%if ARCH_X86_64
+    DECLARE_REG_TMP 0,1,2,3
+    PROLOGUE 0,0,8
+%else
+    PROLOGUE 0,5
+    DECLARE_REG_TMP 1,2,3,4
+    mov t0, r0m
+    mov t1, r1m
+    mov t2, r2m
+    mov t3, r3m
+%endif
+
+%if cpuflag(ssse3)
+    mova    m7, [hsub_mul]
+%elifidn cpuname, sse2
+    mova    m7, [pw_00ff]
+%elif %1 >= mmsize
+    pxor    m7, m7
+%endif
+    pxor    m0, m0
+
+ALIGN 16
+.loop:
+%if %1 > mmsize
+    SSD_ITER FULL, 0, 0, mmsize, mmsize, 1
+%elif %1 == mmsize
+    SSD_ITER FULL, 0, 0, t1, t3, 2
+%else
+    SSD_ITER HALF, 0, 0, t1, t3, 2
+%endif
+    dec     al
+    jg .loop
+%if mmsize==32
+    vextracti128 xm1, m0, 1
+    paddd  xm0, xm1
+    HADDD  xm0, xm1
+    movd   eax, xm0
+%else
+    HADDD   m0, m1
+    movd   eax, m0
+%endif
+%if (mmsize == 8)
+    emms
+%endif
+    RET
+%endif
+%endmacro
+
+%macro HEVC_SSD 0
+SSD 32, 64
+SSD 16, 64
+SSD 32, 32
+SSD 32, 16
+SSD 16, 32
+SSD 32, 8
+SSD 8,  32
+SSD 32, 24
+SSD 24, 24 ; not used, but resolves x265_pixel_ssd_24x24_sse2.startloop symbol
+SSD 8,  4
+SSD 8,  8
+SSD 16, 16
+SSD 16, 12
+SSD 16, 8
+SSD 8,  16
+SSD 16, 4
+%endmacro
+
+INIT_MMX mmx
+SSD 16, 16
+SSD 16,  8
+SSD  8,  8
+SSD  8, 16
+SSD  4,  4
+SSD  8,  4
+SSD  4,  8
+SSD  4, 16
+INIT_XMM sse2slow
+SSD 16, 16
+SSD  8,  8
+SSD 16,  8
+SSD  8, 16
+SSD  8,  4
+INIT_XMM sse2
+%define SSD_CORE SSD_CORE_SSE2
+%define JOIN JOIN_SSE2
+HEVC_SSD
+INIT_XMM ssse3
+%define SSD_CORE SSD_CORE_SSSE3
+%define JOIN JOIN_SSSE3
+HEVC_SSD
+INIT_XMM avx
+HEVC_SSD
+INIT_MMX ssse3
+SSD  4,  4
+SSD  4,  8
+SSD  4, 16
+INIT_XMM xop
+SSD 16, 16
+SSD  8,  8
+SSD 16,  8
+SSD  8, 16
+SSD  8,  4
+%define LOAD LOAD_AVX2
+%define JOIN JOIN_AVX2
+INIT_YMM avx2
+SSD 16, 16
+SSD 16,  8
+SSD 32, 32
+SSD 64, 64
+%assign function_align 16
+%endif ; !HIGH_BIT_DEPTH
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_12x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_12x16, 4, 5, 7, src1, stride1, src2, stride2
+
+    pxor        m6,     m6
+    mov         r4d,    4
+
+.loop:
+    movu        m0,    [r0]
+    movu        m1,    [r2]
+    movu        m2,    [r0 + r1]
+    movu        m3,    [r2 + r3]
+
+    punpckhdq   m4,    m0,    m2
+    punpckhdq   m5,    m1,    m3
+
+    pmovzxbw    m0,    m0
+    pmovzxbw    m1,    m1
+    pmovzxbw    m2,    m2
+    pmovzxbw    m3,    m3
+    pmovzxbw    m4,    m4
+    pmovzxbw    m5,    m5
+
+    psubw       m0,    m1
+    psubw       m2,    m3
+    psubw       m4,    m5
+
+    pmaddwd     m0,    m0
+    pmaddwd     m2,    m2
+    pmaddwd     m4,    m4
+
+    paddd       m0,    m2
+    paddd       m6,    m4
+    paddd       m6,    m0
+
+    movu        m0,    [r0 + 2 * r1]
+    movu        m1,    [r2 + 2 * r3]
+    lea         r0,    [r0 + 2 * r1]
+    lea         r2,    [r2 + 2 * r3]
+    movu        m2,    [r0 + r1]
+    movu        m3,    [r2 + r3]
+
+    punpckhdq   m4,    m0,    m2
+    punpckhdq   m5,    m1,    m3
+
+    pmovzxbw    m0,    m0
+    pmovzxbw    m1,    m1
+    pmovzxbw    m2,    m2
+    pmovzxbw    m3,    m3
+    pmovzxbw    m4,    m4
+    pmovzxbw    m5,    m5
+
+    psubw       m0,    m1
+    psubw       m2,    m3
+    psubw       m4,    m5
+
+    pmaddwd     m0,    m0
+    pmaddwd     m2,    m2
+    pmaddwd     m4,    m4
+
+    paddd       m0,    m2
+    paddd       m6,    m4
+    paddd       m6,    m0
+
+    dec    r4d
+    lea       r0,                    [r0 + 2 * r1]
+    lea       r2,                    [r2 + 2 * r3]
+    jnz    .loop
+
+    HADDD   m6, m1
+    movd   eax, m6
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_24x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_24x32, 4, 5, 8, src1, stride1, src2, stride2
+
+    pxor    m7,     m7
+    pxor    m6,     m6
+    mov     r4d,    16
+
+.loop:
+    movu         m1,    [r0]
+    pmovzxbw     m0,    m1
+    punpckhbw    m1,    m6
+    pmovzxbw     m2,    [r0 + 16]
+    movu         m4,    [r2]
+    pmovzxbw     m3,    m4
+    punpckhbw    m4,    m6
+    pmovzxbw     m5,    [r2 + 16]
+
+    psubw        m0,    m3
+    psubw        m1,    m4
+    psubw        m2,    m5
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m2,    m2
+
+    paddd        m0,    m1
+    paddd        m7,    m2
+    paddd        m7,    m0
+
+    movu         m1,    [r0 + r1]
+    pmovzxbw     m0,    m1
+    punpckhbw    m1,    m6
+    pmovzxbw     m2,    [r0 + r1 + 16]
+    movu         m4,    [r2 + r3]
+    pmovzxbw     m3,    m4
+    punpckhbw    m4,    m6
+    pmovzxbw     m5,    [r2 + r3 + 16]
+
+    psubw        m0,    m3
+    psubw        m1,    m4
+    psubw        m2,    m5
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m2,    m2
+
+    paddd        m0,    m1
+    paddd        m7,    m2
+    paddd        m7,    m0
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+
+    HADDD   m7, m1
+    movd   eax, m7
+
+    RET
+
+%macro PIXEL_SSD_16x4 0
+    movu         m1,    [r0]
+    pmovzxbw     m0,    m1
+    punpckhbw    m1,    m6
+    movu         m3,    [r2]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m0,    m2
+    psubw        m1,    m3
+
+    movu         m5,    [r0 + r1]
+    pmovzxbw     m4,    m5
+    punpckhbw    m5,    m6
+    movu         m3,    [r2 + r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m4,    m2
+    psubw        m5,    m3
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m4,    m4
+    pmaddwd      m5,    m5
+
+    paddd        m0,    m1
+    paddd        m4,    m5
+    paddd        m4,    m0
+    paddd        m7,    m4
+
+    movu         m1,    [r0 + r6]
+    pmovzxbw     m0,    m1
+    punpckhbw    m1,    m6
+    movu         m3,    [r2 + 2 * r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m0,    m2
+    psubw        m1,    m3
+
+    lea          r0,    [r0 + r6]
+    lea          r2,    [r2 + 2 * r3]
+    movu         m5,    [r0 + r1]
+    pmovzxbw     m4,    m5
+    punpckhbw    m5,    m6
+    movu         m3,    [r2 + r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m4,    m2
+    psubw        m5,    m3
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m4,    m4
+    pmaddwd      m5,    m5
+
+    paddd        m0,    m1
+    paddd        m4,    m5
+    paddd        m4,    m0
+    paddd        m7,    m4
+%endmacro
+
+cglobal pixel_ssd_16x16_internal
+    PIXEL_SSD_16x4
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_16x4
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_16x4
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_16x4
+    ret
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_48x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_48x64, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor    m7,    m7
+    pxor    m6,    m6
+    mov     r4,    r0
+    mov     r5,    r2
+    lea     r6,    [r1 * 2]
+
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 16]
+    lea     r2,    [r5 + 16]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 32]
+    lea     r2,    [r5 + 32]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+
+    HADDD    m7,     m1
+    movd     eax,    m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_64x16, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor    m7,    m7
+    pxor    m6,    m6
+    mov     r4,    r0
+    mov     r5,    r2
+    lea     r6,    [r1 * 2]
+
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 16]
+    lea     r2,    [r5 + 16]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 32]
+    lea     r2,    [r5 + 32]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 48]
+    lea     r2,    [r5 + 48]
+    call    pixel_ssd_16x16_internal
+
+    HADDD    m7,      m1
+    movd     eax,     m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_64x32, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor    m7,    m7
+    pxor    m6,    m6
+    mov     r4,    r0
+    mov     r5,    r2
+    lea     r6,    [r1 * 2]
+
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 16]
+    lea     r2,    [r5 + 16]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 32]
+    lea     r2,    [r5 + 32]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 48]
+    lea     r2,    [r5 + 48]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+
+    HADDD    m7,     m1
+    movd     eax,    m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x48( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_64x48, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor    m7,    m7
+    pxor    m6,    m6
+    mov     r4,    r0
+    mov     r5,    r2
+    lea     r6,    [r1 * 2]
+
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 16]
+    lea     r2,    [r5 + 16]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 32]
+    lea     r2,    [r5 + 32]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 48]
+    lea     r2,    [r5 + 48]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+
+    HADDD    m7,     m1
+    movd     eax,    m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_64x64, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor    m7,    m7
+    pxor    m6,    m6
+    mov     r4,    r0
+    mov     r5,    r2
+    lea     r6,    [r1 * 2]
+
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 16]
+    lea     r2,    [r5 + 16]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 32]
+    lea     r2,    [r5 + 32]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r4 + 48]
+    lea     r2,    [r5 + 48]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+    lea     r0,    [r0 + r6]
+    lea     r2,    [r2 + 2 * r3]
+    call    pixel_ssd_16x16_internal
+
+    HADDD    m7,     m1
+    movd     eax,    m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp ( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+
+cglobal pixel_ssd_sp_4x4_internal
+    movh          m0,    [r0]
+    movh          m1,    [r0 + r1]
+    punpcklqdq    m0,    m1
+    movd          m2,    [r2]
+    movd          m3,    [r2 + r3]
+    punpckldq     m2,    m3
+    pmovzxbw      m2,    m2
+    psubw         m0,    m2
+    movh          m4,    [r0 + 2 * r1]
+    movh          m5,    [r0 + r4]
+    punpcklqdq    m4,    m5
+    movd          m6,    [r2 + 2 * r3]
+    lea           r2,    [r2 + 2 * r3]
+    movd          m1,    [r2 + r3]
+    punpckldq     m6,    m1
+    pmovzxbw      m6,    m6
+    psubw         m4,    m6
+    pmaddwd       m0,    m0
+    pmaddwd       m4,    m4
+    paddd         m0,    m4
+    paddd         m7,    m0
+    ret
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_4x4( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_4x4, 4, 5, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    call     pixel_ssd_sp_4x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_4x8( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_4x8, 4, 5, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_4x16( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_4x16, 4, 5, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+cglobal pixel_ssd_sp_8x4_internal
+    movu         m0,    [r0]
+    movu         m1,    [r0 + r1]
+    movh         m2,    [r2]
+    movh         m3,    [r2 + r3]
+    pmovzxbw     m2,    m2
+    pmovzxbw     m3,    m3
+
+    psubw        m0,    m2
+    psubw        m1,    m3
+
+    movu         m4,    [r0 + 2 * r1]
+    movu         m5,    [r0 + r4]
+    movh         m2,    [r2 + 2 * r3]
+    movh         m3,    [r2 + r5]
+    pmovzxbw     m2,    m2
+    pmovzxbw     m3,    m3
+
+    psubw        m4,    m2
+    psubw        m5,    m3
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m4,    m4
+    pmaddwd      m5,    m5
+
+    paddd        m0,    m1
+    paddd        m4,    m5
+    paddd        m4,    m0
+    paddd        m7,    m4
+    ret
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_8x4( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_8x4, 4, 6, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_8x8( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_8x8, 4, 6, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_8x16( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_8x16, 4, 6, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_8x32( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_8x32, 4, 6, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_12x16( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_12x16, 4, 7, 8, src1, stride1, src2, stride2
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 3]
+    mov      r5,     r0
+    mov      r6,     r2
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_4x4_internal
+    lea      r0,     [r5 + 8]
+    lea      r2,     [r6 + 4]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+%macro PIXEL_SSD_SP_16x4 0
+    movu         m0,    [r0]
+    movu         m1,    [r0 + 16]
+    movu         m3,    [r2]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m0,    m2
+    psubw        m1,    m3
+
+    movu         m4,    [r0 + r1]
+    movu         m5,    [r0 + r1 +16]
+    movu         m3,    [r2 + r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m4,    m2
+    psubw        m5,    m3
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m4,    m4
+    pmaddwd      m5,    m5
+
+    paddd        m0,    m1
+    paddd        m4,    m5
+    paddd        m4,    m0
+    paddd        m7,    m4
+
+    movu         m0,    [r0 + 2 * r1]
+    movu         m1,    [r0 + 2 * r1 + 16]
+    movu         m3,    [r2 + 2 * r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m0,    m2
+    psubw        m1,    m3
+
+    lea          r0,    [r0 + 2 * r1]
+    lea          r2,    [r2 + 2 * r3]
+    movu         m4,    [r0 + r1]
+    movu         m5,    [r0 + r1 + 16]
+    movu         m3,    [r2 + r3]
+    pmovzxbw     m2,    m3
+    punpckhbw    m3,    m6
+
+    psubw        m4,    m2
+    psubw        m5,    m3
+
+    pmaddwd      m0,    m0
+    pmaddwd      m1,    m1
+    pmaddwd      m4,    m4
+    pmaddwd      m5,    m5
+
+    paddd        m0,    m1
+    paddd        m4,    m5
+    paddd        m4,    m0
+    paddd        m7,    m4
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x4( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x4, 4, 6, 8, src1, stride1, src2, stride2
+
+    pxor        m6,     m6
+    pxor        m7,     m7
+    add         r1,     r1
+    PIXEL_SSD_SP_16x4
+    HADDD   m7, m1
+    movd   eax, m7
+
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x8( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x8, 4, 4, 8, src1, stride1, src2, stride2
+
+    pxor    m6,     m6
+    pxor    m7,     m7
+    add     r1,     r1
+    PIXEL_SSD_SP_16x4
+    lea     r0,    [r0 + 2 * r1]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    HADDD   m7,     m1
+    movd    eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x12( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x12, 4, 6, 8, src1, stride1, src2, stride2
+
+    pxor    m6,     m6
+    pxor    m7,     m7
+    add     r1,     r1
+    lea     r4,     [r1 * 2]
+    lea     r5,     [r3 * 2]
+    PIXEL_SSD_SP_16x4
+    lea     r0,     [r0 + r4]
+    lea     r2,     [r2 + r5]
+    PIXEL_SSD_SP_16x4
+    lea     r0,     [r0 + r4]
+    lea     r2,     [r2 + r5]
+    PIXEL_SSD_SP_16x4
+    HADDD   m7,     m1
+    movd    eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x16( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x16, 4, 6, 8, src1, stride1, src2, stride2
+
+    pxor    m6,     m6
+    pxor    m7,     m7
+    add     r1,     r1
+    lea     r4,     [r1 * 2]
+    lea     r5,     [r3 * 2]
+    PIXEL_SSD_SP_16x4
+    lea     r0,     [r0 + r4]
+    lea     r2,     [r2 + r5]
+    PIXEL_SSD_SP_16x4
+    lea     r0,     [r0 + r4]
+    lea     r2,     [r2 + r5]
+    PIXEL_SSD_SP_16x4
+    lea     r0,     [r0 + r4]
+    lea     r2,     [r2 + r5]
+    PIXEL_SSD_SP_16x4
+    HADDD   m7,     m1
+    movd    eax,    m7
+    RET
+
+cglobal pixel_ssd_sp_16x16_internal
+    PIXEL_SSD_SP_16x4
+    lea     r0,    [r0 + r4]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea     r0,    [r0 + r4]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea     r0,    [r0 + r4]
+    lea     r2,    [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    ret
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x32( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x32, 4, 5, 8, src1, stride1, src2, stride2
+
+    pxor     m6,     m6
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_16x64( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_16x64, 4, 6, 8, src1, stride1, src2, stride2
+
+    pxor     m6,     m6
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    lea      r5,     [r3 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + r5]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + r5]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + r5]
+    call     pixel_ssd_sp_16x16_internal
+
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_sp_24x32( int16_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_24x32, 4, 7, 8, src1, stride1, src2, stride2
+    pxor     m6,     m6
+    pxor     m7,     m7
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    mov      r5,     r0
+    mov      r6,     r2
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    lea      r4,     [r1 * 3]
+    lea      r5,     [r3 * 3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    lea      r0,     [r0 + 4 * r1]
+    lea      r2,     [r2 + 4 * r3]
+    call     pixel_ssd_sp_8x4_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_32x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_32x8, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_32x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_32x16, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_32x24( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_32x24, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    PIXEL_SSD_SP_16x4
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_32x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_32x32, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_32x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_32x64, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_48x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_48x64, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 64]
+    lea      r2,     [r6 + 32]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x16( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_64x16, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 64]
+    lea      r2,     [r6 + 32]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 96]
+    lea      r2,     [r6 + 48]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x32( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_64x32, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 64]
+    lea      r2,     [r6 + 32]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 96]
+    lea      r2,     [r6 + 48]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x48( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_64x48, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 64]
+    lea      r2,     [r6 + 32]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 96]
+    lea      r2,     [r6 + 48]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_64x64( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_ssd_sp_64x64, 4, 7, 8, src1, stride1, src2, stride2
+
+    pxor     m7,     m7
+    pxor     m6,     m6
+    mov      r5,     r0
+    mov      r6,     r2
+    add      r1,     r1
+    lea      r4,     [r1 * 2]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 32]
+    lea      r2,     [r6 + 16]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 64]
+    lea      r2,     [r6 + 32]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r5 + 96]
+    lea      r2,     [r6 + 48]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    lea      r0,     [r0 + r4]
+    lea      r2,     [r2 + 2 * r3]
+    call     pixel_ssd_sp_16x16_internal
+    HADDD    m7,     m1
+    movd     eax,    m7
+    RET
+
+
+;-----------------------------------------------------------------------------
+; int pixel_ssd_s( int16_t *ref, intptr_t i_stride )
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pixel_ssd_s_4, 2,2,2
+    add     r1, r1
+    movh    m0, [r0]
+    movhps  m0, [r0 + r1]
+
+    lea     r0, [r0 + r1 * 2]
+    movh    m1, [r0]
+    movhps  m1, [r0 + r1]
+
+    pmaddwd m0, m0
+    pmaddwd m1, m1
+    paddd   m0, m1
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, m0
+    RET
+
+
+INIT_XMM sse2
+cglobal pixel_ssd_s_8, 2,3,5
+    add     r1, r1
+    lea     r2, [r1 * 3]
+    movu    m0, [r0]
+    movu    m1, [r0 + r1]
+    movu    m2, [r0 + r1 * 2]
+    movu    m3, [r0 + r2]
+
+    pmaddwd m0, m0
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    paddd   m0, m1
+    paddd   m2, m3
+    paddd   m0, m2
+
+    lea     r0, [r0 + r1 * 4]
+    movu    m4, [r0]
+    movu    m1, [r0 + r1]
+    movu    m2, [r0 + r1 * 2]
+    movu    m3, [r0 + r2]
+
+    pmaddwd m4, m4
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    paddd   m4, m1
+    paddd   m2, m3
+    paddd   m4, m2
+    paddd   m0, m4
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, m0
+    RET
+
+
+INIT_XMM sse2
+cglobal pixel_ssd_s_16, 2,3,5
+    add     r1, r1
+
+    mov     r2d, 4
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0 + mmsize]
+    movu    m3, [r0 + r1]
+    movu    m4, [r0 + r1 + mmsize]
+    lea     r0, [r0 + r1 * 2]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    movu    m1, [r0]
+    movu    m2, [r0 + mmsize]
+    movu    m3, [r0 + r1]
+    movu    m4, [r0 + r1 + mmsize]
+    lea     r0, [r0 + r1 * 2]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    dec     r2d
+    jnz    .loop
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, m0
+    RET
+
+
+INIT_XMM sse2
+cglobal pixel_ssd_s_32, 2,3,5
+    add     r1, r1
+
+    mov     r2d, 16
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0 + 0 * mmsize]
+    movu    m2, [r0 + 1 * mmsize]
+    movu    m3, [r0 + 2 * mmsize]
+    movu    m4, [r0 + 3 * mmsize]
+    add     r0, r1
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    movu    m1, [r0 + 0 * mmsize]
+    movu    m2, [r0 + 1 * mmsize]
+    movu    m3, [r0 + 2 * mmsize]
+    movu    m4, [r0 + 3 * mmsize]
+    add     r0, r1
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    dec     r2d
+    jnz    .loop
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, m0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_ssd_s_16, 2,4,5
+    add     r1, r1
+    lea     r3, [r1 * 3]
+    mov     r2d, 16/4
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0]
+    movu    m2, [r0 + r1]
+    movu    m3, [r0 + 2 * r1]
+    movu    m4, [r0 + r3]
+
+    lea     r0, [r0 + r1 * 4]
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    dec     r2d
+    jnz    .loop
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
+
+INIT_YMM avx2
+cglobal pixel_ssd_s_32, 2,4,5
+    add     r1, r1
+    lea     r3, [r1 * 3]
+
+    mov     r2d, 8
+    pxor    m0, m0
+.loop:
+    movu    m1, [r0 + 0 * mmsize]
+    movu    m2, [r0 + 1 * mmsize]
+    movu    m3, [r0 + r1 + 0 * mmsize]
+    movu    m4, [r0 + r1 + 1 * mmsize]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    movu    m1, [r0 + r1 * 2 + 0 * mmsize]
+    movu    m2, [r0 + r1 * 2 + 1 * mmsize]
+    movu    m3, [r0 + r3 + 0 * mmsize]
+    movu    m4, [r0 + r3 + 1 * mmsize]
+    lea     r0, [r0 + 4 * r1]
+
+    pmaddwd m1, m1
+    pmaddwd m2, m2
+    pmaddwd m3, m3
+    pmaddwd m4, m4
+    paddd   m1, m2
+    paddd   m3, m4
+    paddd   m1, m3
+    paddd   m0, m1
+
+    dec     r2d
+    jnz    .loop
+
+    ; calculate sum and return
+    HADDD   m0, m1
+    movd    eax, xm0
+    RET
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/x86inc.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1485 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2014 x264 project
+;*               2013-2014 x265 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Anton Mitrofanov <BugMaster@narod.ru>
+;*          Fiona Glaser <fiona@x264.com>
+;*          Henrik Gramner <henrik@gramner.com>
+;*          Min Chen <chenm003@163.com>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible.  Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well.  Send patches or ideas
+; to x264-devel@videolan.org .
+
+%ifndef private_prefix
+    %define private_prefix X265_NS
+%endif
+
+%ifndef public_prefix
+    %define public_prefix private_prefix
+%endif
+
+%ifndef STACK_ALIGNMENT
+    %if ARCH_X86_64
+        %define STACK_ALIGNMENT 16
+    %else
+        %define STACK_ALIGNMENT 4
+    %endif
+%endif
+
+%define WIN64  0
+%define UNIX64 0
+%if ARCH_X86_64
+    %ifidn __OUTPUT_FORMAT__,win32
+        %define WIN64  1
+    %elifidn __OUTPUT_FORMAT__,win64
+        %define WIN64  1
+    %elifidn __OUTPUT_FORMAT__,x64
+        %define WIN64  1
+    %else
+        %define UNIX64 1
+    %endif
+%endif
+
+%ifdef PREFIX
+    %define mangle(x) _ %+ x
+%else
+    %define mangle(x) x
+%endif
+
+%macro SECTION_RODATA 0-1 32
+    SECTION .rodata align=%1
+%endmacro
+
+%macro SECTION_TEXT 0-1 16
+    SECTION .text align=%1
+%endmacro
+
+%if WIN64
+    %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+    %undef PIC
+%endif
+%ifdef PIC
+    default rel
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = (optional) stack size to be allocated. The stack will be aligned before
+;      allocating the specified stack size. If the required stack alignment is
+;      larger than the known stack alignment the stack will be manually aligned
+;      and an extra register will be allocated to hold the original stack
+;      pointer (to not invalidate r0m etc.). To prevent the use of an extra
+;      register as stack pointer, request a negative stack size.
+; %4+/%5+ = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,7,0x40, dst, src, tmp
+; declares a function (foo) that automatically loads two arguments (dst and
+; src) into registers, uses one additional register (tmp) plus 7 vector
+; registers (m0-m6) and allocates 0x40 bytes of stack space.
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Use this instead of RET if it's a branch target.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNh is the high 8 bits of the word size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 2-3
+    %define r%1q %2
+    %define r%1d %2d
+    %define r%1w %2w
+    %define r%1b %2b
+    %define r%1h %2h
+    %if %0 == 2
+        %define r%1m  %2d
+        %define r%1mp %2
+    %elif ARCH_X86_64 ; memory
+        %define r%1m [rstk + stack_offset + %3]
+        %define r%1mp qword r %+ %1 %+ m
+    %else
+        %define r%1m [rstk + stack_offset + %3]
+        %define r%1mp dword r %+ %1 %+ m
+    %endif
+    %define r%1  %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 3
+    %define r%1q r%1
+    %define e%1q r%1
+    %define r%1d e%1
+    %define e%1d e%1
+    %define r%1w %1
+    %define e%1w %1
+    %define r%1h %3
+    %define e%1h %3
+    %define r%1b %2
+    %define e%1b %2
+%if ARCH_X86_64 == 0
+    %define r%1  e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al, ah
+DECLARE_REG_SIZE bx, bl, bh
+DECLARE_REG_SIZE cx, cl, ch
+DECLARE_REG_SIZE dx, dl, dh
+DECLARE_REG_SIZE si, sil, null
+DECLARE_REG_SIZE di, dil, null
+DECLARE_REG_SIZE bp, bpl, null
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+    %assign %%i 0
+    %rep %0
+        CAT_XDEFINE t, %%i, r%1
+        %assign %%i %%i+1
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+    %rep %0
+        %define t%1q t%1 %+ q
+        %define t%1d t%1 %+ d
+        %define t%1w t%1 %+ w
+        %define t%1h t%1 %+ h
+        %define t%1b t%1 %+ b
+        %rotate 1
+    %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+    %define gprsize 8
+%else
+    %define gprsize 4
+%endif
+
+%macro PUSH 1
+    push %1
+    %ifidn rstk, rsp
+        %assign stack_offset stack_offset+gprsize
+    %endif
+%endmacro
+
+%macro POP 1
+    pop %1
+    %ifidn rstk, rsp
+        %assign stack_offset stack_offset-gprsize
+    %endif
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+    %rep %0
+        %if %1 < regs_used
+            PUSH r%1
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+    %rep %0
+        %if %1 < regs_used
+            pop r%1
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+    %rep %0
+        %if %1 < num_args
+            mov r%1, r %+ %1 %+ mp
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+%macro SUB 2
+    sub %1, %2
+    %ifidn %1, rstk
+        %assign stack_offset stack_offset+(%2)
+    %endif
+%endmacro
+
+%macro ADD 2
+    add %1, %2
+    %ifidn %1, rstk
+        %assign stack_offset stack_offset-(%2)
+    %endif
+%endmacro
+
+%macro movifnidn 2
+    %ifnidn %1, %2
+        mov %1, %2
+    %endif
+%endmacro
+
+%macro movsxdifnidn 2
+    %ifnidn %1, %2
+        movsxd %1, %2
+    %endif
+%endmacro
+
+%macro ASSERT 1
+    %if (%1) == 0
+        %error assert failed
+    %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+    %ifdef n_arg_names
+        %assign %%i 0
+        %rep n_arg_names
+            CAT_UNDEF arg_name %+ %%i, q
+            CAT_UNDEF arg_name %+ %%i, d
+            CAT_UNDEF arg_name %+ %%i, w
+            CAT_UNDEF arg_name %+ %%i, h
+            CAT_UNDEF arg_name %+ %%i, b
+            CAT_UNDEF arg_name %+ %%i, m
+            CAT_UNDEF arg_name %+ %%i, mp
+            CAT_UNDEF arg_name, %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+
+    %xdefine %%stack_offset stack_offset
+    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+    %assign %%i 0
+    %rep %0
+        %xdefine %1q r %+ %%i %+ q
+        %xdefine %1d r %+ %%i %+ d
+        %xdefine %1w r %+ %%i %+ w
+        %xdefine %1h r %+ %%i %+ h
+        %xdefine %1b r %+ %%i %+ b
+        %xdefine %1m r %+ %%i %+ m
+        %xdefine %1mp r %+ %%i %+ mp
+        CAT_XDEFINE arg_name, %%i, %1
+        %assign %%i %%i+1
+        %rotate 1
+    %endrep
+    %xdefine stack_offset %%stack_offset
+    %assign n_arg_names %0
+%endmacro
+
+%define required_stack_alignment ((mmsize + 15) & ~15)
+
+%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
+    %ifnum %1
+        %if %1 != 0
+            %assign %%pad 0
+            %assign stack_size %1
+            %if stack_size < 0
+                %assign stack_size -stack_size
+            %endif
+            %if WIN64
+                %assign %%pad %%pad + 32 ; shadow space
+                %if mmsize != 8
+                    %assign xmm_regs_used %2
+                    %if xmm_regs_used > 8
+                        %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
+                    %endif
+                %endif
+            %endif
+            %if required_stack_alignment <= STACK_ALIGNMENT
+                ; maintain the current stack alignment
+                %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+                SUB rsp, stack_size_padded
+            %else
+                %assign %%reg_num (regs_used - 1)
+                %xdefine rstk r %+ %%reg_num
+                ; align stack, and save original stack location directly above
+                ; it, i.e. in [rsp+stack_size_padded], so we can restore the
+                ; stack in a single instruction (i.e. mov rsp, rstk or mov
+                ; rsp, [rsp+stack_size_padded])
+                %if %1 < 0 ; need to store rsp on stack
+                    %xdefine rstkm [rsp + stack_size + %%pad]
+                    %assign %%pad %%pad + gprsize
+                %else ; can keep rsp in rstk during whole function
+                    %xdefine rstkm rstk
+                %endif
+                %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
+                mov rstk, rsp
+                and rsp, ~(required_stack_alignment-1)
+                sub rsp, stack_size_padded
+                movifnidn rstkm, rstk
+            %endif
+            WIN64_PUSH_XMM
+        %endif
+    %endif
+%endmacro
+
+%macro SETUP_STACK_POINTER 1
+    %ifnum %1
+        %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
+            %if %1 > 0
+                %assign regs_used (regs_used + 1)
+            %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
+                %warning "Stack pointer will overwrite register argument"
+            %endif
+        %endif
+    %endif
+%endmacro
+
+%macro DEFINE_ARGS_INTERNAL 3+
+    %ifnum %2
+        DEFINE_ARGS %3
+    %elif %1 == 4
+        DEFINE_ARGS %2
+    %elif %1 > 4
+        DEFINE_ARGS %2, %3
+    %endif
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0,  rcx
+DECLARE_REG 1,  rdx
+DECLARE_REG 2,  R8
+DECLARE_REG 3,  R9
+DECLARE_REG 4,  R10, 40
+DECLARE_REG 5,  R11, 48
+DECLARE_REG 6,  rax, 56
+DECLARE_REG 7,  rdi, 64
+DECLARE_REG 8,  rsi, 72
+DECLARE_REG 9,  rbx, 80
+DECLARE_REG 10, rbp, 88
+DECLARE_REG 11, R12, 96
+DECLARE_REG 12, R13, 104
+DECLARE_REG 13, R14, 112
+DECLARE_REG 14, R15, 120
+
+%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    ASSERT regs_used >= num_args
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 15
+    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+    ALLOC_STACK %4, %3
+    %if mmsize != 8 && stack_size == 0
+        WIN64_SPILL_XMM %3
+    %endif
+    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%macro WIN64_PUSH_XMM 0
+    ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
+    %if xmm_regs_used > 6
+        movaps [rstk + stack_offset +  8], xmm6
+    %endif
+    %if xmm_regs_used > 7
+        movaps [rstk + stack_offset + 24], xmm7
+    %endif
+    %if xmm_regs_used > 8
+        %assign %%i 8
+        %rep xmm_regs_used-8
+            movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+    %assign xmm_regs_used %1
+    ASSERT xmm_regs_used <= 16
+    %if xmm_regs_used > 8
+        ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
+        %assign %%pad (xmm_regs_used-8)*16 + 32
+        %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+        SUB rsp, stack_size_padded
+    %endif
+    WIN64_PUSH_XMM
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+    %assign %%pad_size 0
+    %if xmm_regs_used > 8
+        %assign %%i xmm_regs_used
+        %rep xmm_regs_used-8
+            %assign %%i %%i-1
+            movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32]
+        %endrep
+    %endif
+    %if stack_size_padded > 0
+        %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
+            mov rsp, rstkm
+        %else
+            add %1, stack_size_padded
+            %assign %%pad_size stack_size_padded
+        %endif
+    %endif
+    %if xmm_regs_used > 7
+        movaps xmm7, [%1 + stack_offset - %%pad_size + 24]
+    %endif
+    %if xmm_regs_used > 6
+        movaps xmm6, [%1 + stack_offset - %%pad_size +  8]
+    %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+    WIN64_RESTORE_XMM_INTERNAL %1
+    %assign stack_offset (stack_offset-stack_size_padded)
+    %assign xmm_regs_used 0
+%endmacro
+
+%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+    WIN64_RESTORE_XMM_INTERNAL rsp
+    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+%if mmsize == 32
+    vzeroupper
+%endif
+    AUTO_REP_RET
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0,  rdi
+DECLARE_REG 1,  rsi
+DECLARE_REG 2,  rdx
+DECLARE_REG 3,  rcx
+DECLARE_REG 4,  R8
+DECLARE_REG 5,  R9
+DECLARE_REG 6,  rax, 8
+DECLARE_REG 7,  R10, 16
+DECLARE_REG 8,  R11, 24
+DECLARE_REG 9,  rbx, 32
+DECLARE_REG 10, rbp, 40
+DECLARE_REG 11, R12, 48
+DECLARE_REG 12, R13, 56
+DECLARE_REG 13, R14, 64
+DECLARE_REG 14, R15, 72
+
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    ASSERT regs_used >= num_args
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 15
+    PUSH_IF_USED 9, 10, 11, 12, 13, 14
+    ALLOC_STACK %4
+    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+%if stack_size_padded > 0
+%if required_stack_alignment > STACK_ALIGNMENT
+    mov rsp, rstkm
+%else
+    add rsp, stack_size_padded
+%endif
+%endif
+    POP_IF_USED 14, 13, 12, 11, 10, 9
+%if mmsize == 32
+    vzeroupper
+%endif
+    AUTO_REP_RET
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, 4
+DECLARE_REG 1, ecx, 8
+DECLARE_REG 2, edx, 12
+DECLARE_REG 3, ebx, 16
+DECLARE_REG 4, esi, 20
+DECLARE_REG 5, edi, 24
+DECLARE_REG 6, ebp, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+    %rep %0
+        %define r%1m [rstk + stack_offset + 4*%1 + 4]
+        %define r%1mp dword r%1m
+        %rotate 1
+    %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+    %assign num_args %1
+    %assign regs_used %2
+    ASSERT regs_used >= num_args
+    %if num_args > 7
+        %assign num_args 7
+    %endif
+    %if regs_used > 7
+        %assign regs_used 7
+    %endif
+    SETUP_STACK_POINTER %4
+    ASSERT regs_used <= 7
+    PUSH_IF_USED 3, 4, 5, 6
+    ALLOC_STACK %4
+    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+    DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+%if stack_size_padded > 0
+%if required_stack_alignment > STACK_ALIGNMENT
+    mov rsp, rstkm
+%else
+    add rsp, stack_size_padded
+%endif
+%endif
+    POP_IF_USED 6, 5, 4, 3
+%if mmsize == 32
+    vzeroupper
+%endif
+    AUTO_REP_RET
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+%macro WIN64_SPILL_XMM 1
+%endmacro
+%macro WIN64_RESTORE_XMM 1
+%endmacro
+%macro WIN64_PUSH_XMM 0
+%endmacro
+%endif
+
+; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
+; a branch or a branch target. So switch to a 2-byte form of ret in that case.
+; We can automatically detect "follows a branch", but not a branch target.
+; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
+%macro REP_RET 0
+    %if has_epilogue
+        RET
+    %else
+        rep ret
+    %endif
+%endmacro
+
+%define last_branch_adr $$
+%macro AUTO_REP_RET 0
+    %ifndef cpuflags
+        times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
+    %elif notcpuflag(ssse3)
+        times ((last_branch_adr-$)>>31)+1 rep
+    %endif
+    ret
+%endmacro
+
+%macro BRANCH_INSTR 0-*
+    %rep %0
+        %macro %1 1-2 %1
+            %2 %1
+            %%branch_instr:
+            %xdefine last_branch_adr %%branch_instr
+        %endmacro
+        %rotate 1
+    %endrep
+%endmacro
+
+BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
+
+%macro TAIL_CALL 2 ; callee, is_nonadjacent
+    %if has_epilogue
+        call %1
+        RET
+    %elif %2
+        jmp %1
+    %endif
+%endmacro
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
+; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
+%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
+    cglobal_internal 1, %1 %+ SUFFIX, %2
+%endmacro
+%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
+    cglobal_internal 0, %1 %+ SUFFIX, %2
+%endmacro
+%macro cglobal_internal 2-3+
+    %if %1
+        %xdefine %%FUNCTION_PREFIX private_prefix
+        %xdefine %%VISIBILITY hidden
+    %else
+        %xdefine %%FUNCTION_PREFIX public_prefix
+        %xdefine %%VISIBILITY
+    %endif
+    %ifndef cglobaled_%2
+        %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
+        %xdefine %2.skip_prologue %2 %+ .skip_prologue
+        CAT_XDEFINE cglobaled_, %2, 1
+    %endif
+    %xdefine current_function %2
+    %ifidn __OUTPUT_FORMAT__,elf
+        global %2:function %%VISIBILITY
+    %else
+        global %2
+    %endif
+    align function_align
+    %2:
+    RESET_MM_PERMUTATION        ; needed for x86-64, also makes disassembly somewhat nicer
+    %xdefine rstk rsp           ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
+    %assign stack_offset 0      ; stack pointer offset relative to the return address
+    %assign stack_size 0        ; amount of stack space that can be freely used inside a function
+    %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
+    %assign xmm_regs_used 0     ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64
+    %ifnidn %3, ""
+        PROLOGUE %3
+    %endif
+%endmacro
+
+%macro cextern 1
+    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+    CAT_XDEFINE cglobaled_, %1, 1
+    extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+    %xdefine %1 mangle(%1)
+    CAT_XDEFINE cglobaled_, %1, 1
+    extern %1
+%endmacro
+
+%macro const 1-2+
+    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+    %ifidn __OUTPUT_FORMAT__,elf
+        global %1:data hidden
+    %else
+        global %1
+    %endif
+    ALIGN 32
+    %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+; cpuflags
+
+%assign cpuflags_mmx      (1<<0)
+%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2     (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_sse3     (1<<7) | cpuflags_sse2
+%assign cpuflags_ssse3    (1<<8) | cpuflags_sse3
+%assign cpuflags_sse4     (1<<9) | cpuflags_ssse3
+%assign cpuflags_sse42    (1<<10)| cpuflags_sse4
+%assign cpuflags_avx      (1<<11)| cpuflags_sse42
+%assign cpuflags_xop      (1<<12)| cpuflags_avx
+%assign cpuflags_fma4     (1<<13)| cpuflags_avx
+%assign cpuflags_avx2     (1<<14)| cpuflags_avx
+%assign cpuflags_fma3     (1<<15)| cpuflags_avx
+
+%assign cpuflags_cache32  (1<<16)
+%assign cpuflags_cache64  (1<<17)
+%assign cpuflags_slowctz  (1<<18)
+%assign cpuflags_lzcnt    (1<<19)
+%assign cpuflags_aligned  (1<<20) ; not a cpu feature, but a function variant
+%assign cpuflags_atom     (1<<21)
+%assign cpuflags_bmi1     (1<<22)|cpuflags_lzcnt
+%assign cpuflags_bmi2     (1<<23)|cpuflags_bmi1
+
+%define    cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
+%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
+
+; Takes an arbitrary number of cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-*
+    %xdefine SUFFIX
+    %undef cpuname
+    %assign cpuflags 0
+
+    %if %0 >= 1
+        %rep %0
+            %ifdef cpuname
+                %xdefine cpuname cpuname %+ _%1
+            %else
+                %xdefine cpuname %1
+            %endif
+            %assign cpuflags cpuflags | cpuflags_%1
+            %rotate 1
+        %endrep
+        %xdefine SUFFIX _ %+ cpuname
+
+        %if cpuflag(avx)
+            %assign avx_enabled 1
+        %endif
+        %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
+            %define mova movaps
+            %define movu movups
+            %define movnta movntps
+        %endif
+        %if cpuflag(aligned)
+            %define movu mova
+        %elif cpuflag(sse3) && notcpuflag(ssse3)
+            %define movu lddqu
+        %endif
+    %endif
+
+    %if ARCH_X86_64 || cpuflag(sse2)
+        CPU amdnop
+    %else
+        CPU basicnop
+    %endif
+%endmacro
+
+; Merge mmx and sse*
+; m# is a simd register of the currently selected size
+; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
+; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
+; (All 3 remain in sync through SWAP.)
+
+%macro CAT_XDEFINE 3
+    %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+    %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0-1+
+    %assign avx_enabled 0
+    %define RESET_MM_PERMUTATION INIT_MMX %1
+    %define mmsize 8
+    %define num_mmregs 8
+    %define mova movq
+    %define movu movq
+    %define movh movd
+    %define movnta movntq
+    %assign %%i 0
+    %rep 8
+    CAT_XDEFINE m, %%i, mm %+ %%i
+    CAT_XDEFINE nmm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+    %rep 8
+    CAT_UNDEF m, %%i
+    CAT_UNDEF nmm, %%i
+    %assign %%i %%i+1
+    %endrep
+    INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_XMM 0-1+
+    %assign avx_enabled 0
+    %define RESET_MM_PERMUTATION INIT_XMM %1
+    %define mmsize 16
+    %define num_mmregs 8
+    %if ARCH_X86_64
+    %define num_mmregs 16
+    %endif
+    %define mova movdqa
+    %define movu movdqu
+    %define movh movq
+    %define movnta movntdq
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE m, %%i, xmm %+ %%i
+    CAT_XDEFINE nxmm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+    INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_YMM 0-1+
+    %assign avx_enabled 1
+    %define RESET_MM_PERMUTATION INIT_YMM %1
+    %define mmsize 32
+    %define num_mmregs 8
+    %if ARCH_X86_64
+    %define num_mmregs 16
+    %endif
+    %define mova movdqa
+    %define movu movdqu
+    %undef movh
+    %define movnta movntdq
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE m, %%i, ymm %+ %%i
+    CAT_XDEFINE nymm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+    INIT_CPUFLAGS %1
+%endmacro
+
+INIT_XMM
+
+%macro DECLARE_MMCAST 1
+    %define  mmmm%1   mm%1
+    %define  mmxmm%1  mm%1
+    %define  mmymm%1  mm%1
+    %define xmmmm%1   mm%1
+    %define xmmxmm%1 xmm%1
+    %define xmmymm%1 xmm%1
+    %define ymmmm%1   mm%1
+    %define ymmxmm%1 xmm%1
+    %define ymmymm%1 ymm%1
+    %define ymm%1xmm xmm%1
+    %define xmm%1ymm ymm%1
+    %define xm%1 xmm %+ m%1
+    %define ym%1 ymm %+ m%1
+%endmacro
+
+%assign i 0
+%rep 16
+    DECLARE_MMCAST i
+%assign i i+1
+%endrep
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+    %xdefine %%tmp%2 m%2
+    %rotate 2
+%endrep
+%rep %0/2
+    %xdefine m%1 %%tmp%2
+    CAT_XDEFINE n, m%1, %1
+    %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
+%ifnum %1 ; SWAP 0, 1, ...
+    SWAP_INTERNAL_NUM %1, %2
+%else ; SWAP m0, m1, ...
+    SWAP_INTERNAL_NAME %1, %2
+%endif
+%endmacro
+
+%macro SWAP_INTERNAL_NUM 2-*
+    %rep %0-1
+        %xdefine %%tmp m%1
+        %xdefine m%1 m%2
+        %xdefine m%2 %%tmp
+        CAT_XDEFINE n, m%1, %1
+        CAT_XDEFINE n, m%2, %2
+    %rotate 1
+    %endrep
+%endmacro
+
+%macro SWAP_INTERNAL_NAME 2-*
+    %xdefine %%args n %+ %1
+    %rep %0-1
+        %xdefine %%args %%args, n %+ %2
+    %rotate 1
+    %endrep
+    SWAP_INTERNAL_NUM %%args
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+    %if %0
+        %xdefine %%f %1_m
+    %else
+        %xdefine %%f current_function %+ _m
+    %endif
+    %assign %%i 0
+    %rep num_mmregs
+        CAT_XDEFINE %%f, %%i, m %+ %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+    %ifdef %1_m0
+        %assign %%i 0
+        %rep num_mmregs
+            CAT_XDEFINE m, %%i, %1_m %+ %%i
+            CAT_XDEFINE n, m %+ %%i, %%i
+        %assign %%i %%i+1
+        %endrep
+    %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+    call_internal %1, %1 %+ SUFFIX
+%endmacro
+%macro call_internal 2
+    %xdefine %%i %1
+    %ifndef cglobaled_%1
+        %ifdef cglobaled_%2
+            %xdefine %%i %2
+        %endif
+    %endif
+    call %%i
+    LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+    %ifnum %2
+        %if %2==128
+            sub %1, -128
+        %else
+            add %1, %2
+        %endif
+    %else
+        add %1, %2
+    %endif
+%endmacro
+
+%macro sub 2
+    %ifnum %2
+        %if %2==128
+            add %1, -128
+        %else
+            sub %1, %2
+        %endif
+    %else
+        sub %1, %2
+    %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+    %if i < 8
+        CAT_XDEFINE sizeofmm, i, 8
+    %endif
+    CAT_XDEFINE sizeofxmm, i, 16
+    CAT_XDEFINE sizeofymm, i, 32
+%assign i i+1
+%endrep
+%undef i
+
+%macro CHECK_AVX_INSTR_EMU 3-*
+    %xdefine %%opcode %1
+    %xdefine %%dst %2
+    %rep %0-2
+        %ifidn %%dst, %3
+            %error non-avx emulation of ``%%opcode'' is not supported
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+;%6+: operands
+%macro RUN_AVX_INSTR 6-9+
+    %ifnum sizeof%7
+        %assign __sizeofreg sizeof%7
+    %elifnum sizeof%6
+        %assign __sizeofreg sizeof%6
+    %else
+        %assign __sizeofreg mmsize
+    %endif
+    %assign __emulate_avx 0
+    %if avx_enabled && __sizeofreg >= 16
+        %xdefine __instr v%1
+    %else
+        %xdefine __instr %1
+        %if %0 >= 8+%4
+            %assign __emulate_avx 1
+        %endif
+    %endif
+    %ifnidn %2, fnord
+        %ifdef cpuname
+            %if notcpuflag(%2)
+                %error use of ``%1'' %2 instruction in cpuname function: current_function
+            %endif
+        %endif
+    %endif
+
+    %if __emulate_avx
+        %xdefine __src1 %7
+        %xdefine __src2 %8
+        %ifnidn %6, %7
+            %if %0 >= 9
+                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9
+            %else
+                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8
+            %endif
+            %if %5 && %4 == 0
+                %ifnid %8
+                    ; 3-operand AVX instructions with a memory arg can only have it in src2,
+                    ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
+                    ; So, if the instruction is commutative with a memory arg, swap them.
+                    %xdefine __src1 %8
+                    %xdefine __src2 %7
+                %endif
+            %endif
+            %if __sizeofreg == 8
+                MOVQ %6, __src1
+            %elif %3
+                MOVAPS %6, __src1
+            %else
+                MOVDQA %6, __src1
+            %endif
+        %endif
+        %if %0 >= 9
+            %1 %6, __src2, %9
+        %else
+            %1 %6, __src2
+        %endif
+    %elif %0 >= 9
+        __instr %6, %7, %8, %9
+    %elif %0 == 8
+        __instr %6, %7, %8
+    %elif %0 == 7
+        __instr %6, %7
+    %else
+        __instr %6
+    %endif
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 1-5 fnord, 0, 1, 0
+    %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
+        %ifidn %2, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
+        %elifidn %3, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
+        %elifidn %4, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
+        %elifidn %5, fnord
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
+        %else
+            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
+        %endif
+    %endmacro
+%endmacro
+
+; Instructions with both VEX and non-VEX encodings
+; Non-destructive instructions are written without parameters
+AVX_INSTR addpd, sse2, 1, 0, 1
+AVX_INSTR addps, sse, 1, 0, 1
+AVX_INSTR addsd, sse2, 1, 0, 1
+AVX_INSTR addss, sse, 1, 0, 1
+AVX_INSTR addsubpd, sse3, 1, 0, 0
+AVX_INSTR addsubps, sse3, 1, 0, 0
+AVX_INSTR aesdec, fnord, 0, 0, 0
+AVX_INSTR aesdeclast, fnord, 0, 0, 0
+AVX_INSTR aesenc, fnord, 0, 0, 0
+AVX_INSTR aesenclast, fnord, 0, 0, 0
+AVX_INSTR aesimc
+AVX_INSTR aeskeygenassist
+AVX_INSTR andnpd, sse2, 1, 0, 0
+AVX_INSTR andnps, sse, 1, 0, 0
+AVX_INSTR andpd, sse2, 1, 0, 1
+AVX_INSTR andps, sse, 1, 0, 1
+AVX_INSTR blendpd, sse4, 1, 0, 0
+AVX_INSTR blendps, sse4, 1, 0, 0
+AVX_INSTR blendvpd, sse4, 1, 0, 0
+AVX_INSTR blendvps, sse4, 1, 0, 0
+AVX_INSTR cmppd, sse2, 1, 1, 0
+AVX_INSTR cmpps, sse, 1, 1, 0
+AVX_INSTR cmpsd, sse2, 1, 1, 0
+AVX_INSTR cmpss, sse, 1, 1, 0
+AVX_INSTR comisd, sse2
+AVX_INSTR comiss, sse
+AVX_INSTR cvtdq2pd, sse2
+AVX_INSTR cvtdq2ps, sse2
+AVX_INSTR cvtpd2dq, sse2
+AVX_INSTR cvtpd2ps, sse2
+AVX_INSTR cvtps2dq, sse2
+AVX_INSTR cvtps2pd, sse2
+AVX_INSTR cvtsd2si, sse2
+AVX_INSTR cvtsd2ss, sse2
+AVX_INSTR cvtsi2sd, sse2
+AVX_INSTR cvtsi2ss, sse
+AVX_INSTR cvtss2sd, sse2
+AVX_INSTR cvtss2si, sse
+AVX_INSTR cvttpd2dq, sse2
+AVX_INSTR cvttps2dq, sse2
+AVX_INSTR cvttsd2si, sse2
+AVX_INSTR cvttss2si, sse
+AVX_INSTR divpd, sse2, 1, 0, 0
+AVX_INSTR divps, sse, 1, 0, 0
+AVX_INSTR divsd, sse2, 1, 0, 0
+AVX_INSTR divss, sse, 1, 0, 0
+AVX_INSTR dppd, sse4, 1, 1, 0
+AVX_INSTR dpps, sse4, 1, 1, 0
+AVX_INSTR extractps, sse4
+AVX_INSTR haddpd, sse3, 1, 0, 0
+AVX_INSTR haddps, sse3, 1, 0, 0
+AVX_INSTR hsubpd, sse3, 1, 0, 0
+AVX_INSTR hsubps, sse3, 1, 0, 0
+AVX_INSTR insertps, sse4, 1, 1, 0
+AVX_INSTR lddqu, sse3
+AVX_INSTR ldmxcsr, sse
+AVX_INSTR maskmovdqu, sse2
+AVX_INSTR maxpd, sse2, 1, 0, 1
+AVX_INSTR maxps, sse, 1, 0, 1
+AVX_INSTR maxsd, sse2, 1, 0, 1
+AVX_INSTR maxss, sse, 1, 0, 1
+AVX_INSTR minpd, sse2, 1, 0, 1
+AVX_INSTR minps, sse, 1, 0, 1
+AVX_INSTR minsd, sse2, 1, 0, 1
+AVX_INSTR minss, sse, 1, 0, 1
+AVX_INSTR movapd, sse2
+AVX_INSTR movaps, sse
+AVX_INSTR movd
+AVX_INSTR movddup, sse3
+AVX_INSTR movdqa, sse2
+AVX_INSTR movdqu, sse2
+AVX_INSTR movhlps, sse, 1, 0, 0
+AVX_INSTR movhpd, sse2, 1, 0, 0
+AVX_INSTR movhps, sse, 1, 0, 0
+AVX_INSTR movlhps, sse, 1, 0, 0
+AVX_INSTR movlpd, sse2, 1, 0, 0
+AVX_INSTR movlps, sse, 1, 0, 0
+AVX_INSTR movmskpd, sse2
+AVX_INSTR movmskps, sse
+AVX_INSTR movntdq, sse2
+AVX_INSTR movntdqa, sse4
+AVX_INSTR movntpd, sse2
+AVX_INSTR movntps, sse
+AVX_INSTR movq
+AVX_INSTR movsd, sse2, 1, 0, 0
+AVX_INSTR movshdup, sse3
+AVX_INSTR movsldup, sse3
+AVX_INSTR movss, sse, 1, 0, 0
+AVX_INSTR movupd, sse2
+AVX_INSTR movups, sse
+AVX_INSTR mpsadbw, sse4
+AVX_INSTR mulpd, sse2, 1, 0, 1
+AVX_INSTR mulps, sse, 1, 0, 1
+AVX_INSTR mulsd, sse2, 1, 0, 1
+AVX_INSTR mulss, sse, 1, 0, 1
+AVX_INSTR orpd, sse2, 1, 0, 1
+AVX_INSTR orps, sse, 1, 0, 1
+AVX_INSTR pabsb, ssse3
+AVX_INSTR pabsd, ssse3
+AVX_INSTR pabsw, ssse3
+AVX_INSTR packsswb, mmx, 0, 0, 0
+AVX_INSTR packssdw, mmx, 0, 0, 0
+AVX_INSTR packuswb, mmx, 0, 0, 0
+AVX_INSTR packusdw, sse4, 0, 0, 0
+AVX_INSTR paddb, mmx, 0, 0, 1
+AVX_INSTR paddw, mmx, 0, 0, 1
+AVX_INSTR paddd, mmx, 0, 0, 1
+AVX_INSTR paddq, sse2, 0, 0, 1
+AVX_INSTR paddsb, mmx, 0, 0, 1
+AVX_INSTR paddsw, mmx, 0, 0, 1
+AVX_INSTR paddusb, mmx, 0, 0, 1
+AVX_INSTR paddusw, mmx, 0, 0, 1
+AVX_INSTR palignr, ssse3
+AVX_INSTR pand, mmx, 0, 0, 1
+AVX_INSTR pandn, mmx, 0, 0, 0
+AVX_INSTR pavgb, mmx2, 0, 0, 1
+AVX_INSTR pavgw, mmx2, 0, 0, 1
+AVX_INSTR pblendvb, sse4, 0, 0, 0
+AVX_INSTR pblendw, sse4
+AVX_INSTR pclmulqdq
+AVX_INSTR pcmpestri, sse42
+AVX_INSTR pcmpestrm, sse42
+AVX_INSTR pcmpistri, sse42
+AVX_INSTR pcmpistrm, sse42
+AVX_INSTR pcmpeqb, mmx, 0, 0, 1
+AVX_INSTR pcmpeqw, mmx, 0, 0, 1
+AVX_INSTR pcmpeqd, mmx, 0, 0, 1
+AVX_INSTR pcmpeqq, sse4, 0, 0, 1
+AVX_INSTR pcmpgtb, mmx, 0, 0, 0
+AVX_INSTR pcmpgtw, mmx, 0, 0, 0
+AVX_INSTR pcmpgtd, mmx, 0, 0, 0
+AVX_INSTR pcmpgtq, sse42, 0, 0, 0
+AVX_INSTR pextrb, sse4
+AVX_INSTR pextrd, sse4
+AVX_INSTR pextrq, sse4
+AVX_INSTR pextrw, mmx2
+AVX_INSTR phaddw, ssse3, 0, 0, 0
+AVX_INSTR phaddd, ssse3, 0, 0, 0
+AVX_INSTR phaddsw, ssse3, 0, 0, 0
+AVX_INSTR phminposuw, sse4
+AVX_INSTR phsubw, ssse3, 0, 0, 0
+AVX_INSTR phsubd, ssse3, 0, 0, 0
+AVX_INSTR phsubsw, ssse3, 0, 0, 0
+AVX_INSTR pinsrb, sse4
+AVX_INSTR pinsrd, sse4
+AVX_INSTR pinsrq, sse4
+AVX_INSTR pinsrw, mmx2
+AVX_INSTR pmaddwd, mmx, 0, 0, 1
+AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
+AVX_INSTR pmaxsb, sse4, 0, 0, 1
+AVX_INSTR pmaxsw, mmx2, 0, 0, 1
+AVX_INSTR pmaxsd, sse4, 0, 0, 1
+AVX_INSTR pmaxub, mmx2, 0, 0, 1
+AVX_INSTR pmaxuw, sse4, 0, 0, 1
+AVX_INSTR pmaxud, sse4, 0, 0, 1
+AVX_INSTR pminsb, sse4, 0, 0, 1
+AVX_INSTR pminsw, mmx2, 0, 0, 1
+AVX_INSTR pminsd, sse4, 0, 0, 1
+AVX_INSTR pminub, mmx2, 0, 0, 1
+AVX_INSTR pminuw, sse4, 0, 0, 1
+AVX_INSTR pminud, sse4, 0, 0, 1
+AVX_INSTR pmovmskb, mmx2
+AVX_INSTR pmovsxbw, sse4
+AVX_INSTR pmovsxbd, sse4
+AVX_INSTR pmovsxbq, sse4
+AVX_INSTR pmovsxwd, sse4
+AVX_INSTR pmovsxwq, sse4
+AVX_INSTR pmovsxdq, sse4
+AVX_INSTR pmovzxbw, sse4
+AVX_INSTR pmovzxbd, sse4
+AVX_INSTR pmovzxbq, sse4
+AVX_INSTR pmovzxwd, sse4
+AVX_INSTR pmovzxwq, sse4
+AVX_INSTR pmovzxdq, sse4
+AVX_INSTR pmuldq, sse4, 0, 0, 1
+AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
+AVX_INSTR pmulhuw, mmx2, 0, 0, 1
+AVX_INSTR pmulhw, mmx, 0, 0, 1
+AVX_INSTR pmullw, mmx, 0, 0, 1
+AVX_INSTR pmulld, sse4, 0, 0, 1
+AVX_INSTR pmuludq, sse2, 0, 0, 1
+AVX_INSTR por, mmx, 0, 0, 1
+AVX_INSTR psadbw, mmx2, 0, 0, 1
+AVX_INSTR pshufb, ssse3, 0, 0, 0
+AVX_INSTR pshufd, sse2
+AVX_INSTR pshufhw, sse2
+AVX_INSTR pshuflw, sse2
+AVX_INSTR psignb, ssse3, 0, 0, 0
+AVX_INSTR psignw, ssse3, 0, 0, 0
+AVX_INSTR psignd, ssse3, 0, 0, 0
+AVX_INSTR psllw, mmx, 0, 0, 0
+AVX_INSTR pslld, mmx, 0, 0, 0
+AVX_INSTR psllq, mmx, 0, 0, 0
+AVX_INSTR pslldq, sse2, 0, 0, 0
+AVX_INSTR psraw, mmx, 0, 0, 0
+AVX_INSTR psrad, mmx, 0, 0, 0
+AVX_INSTR psrlw, mmx, 0, 0, 0
+AVX_INSTR psrld, mmx, 0, 0, 0
+AVX_INSTR psrlq, mmx, 0, 0, 0
+AVX_INSTR psrldq, sse2, 0, 0, 0
+AVX_INSTR psubb, mmx, 0, 0, 0
+AVX_INSTR psubw, mmx, 0, 0, 0
+AVX_INSTR psubd, mmx, 0, 0, 0
+AVX_INSTR psubq, sse2, 0, 0, 0
+AVX_INSTR psubsb, mmx, 0, 0, 0
+AVX_INSTR psubsw, mmx, 0, 0, 0
+AVX_INSTR psubusb, mmx, 0, 0, 0
+AVX_INSTR psubusw, mmx, 0, 0, 0
+AVX_INSTR ptest, sse4
+AVX_INSTR punpckhbw, mmx, 0, 0, 0
+AVX_INSTR punpckhwd, mmx, 0, 0, 0
+AVX_INSTR punpckhdq, mmx, 0, 0, 0
+AVX_INSTR punpckhqdq, sse2, 0, 0, 0
+AVX_INSTR punpcklbw, mmx, 0, 0, 0
+AVX_INSTR punpcklwd, mmx, 0, 0, 0
+AVX_INSTR punpckldq, mmx, 0, 0, 0
+AVX_INSTR punpcklqdq, sse2, 0, 0, 0
+AVX_INSTR pxor, mmx, 0, 0, 1
+AVX_INSTR rcpps, sse, 1, 0, 0
+AVX_INSTR rcpss, sse, 1, 0, 0
+AVX_INSTR roundpd, sse4
+AVX_INSTR roundps, sse4
+AVX_INSTR roundsd, sse4
+AVX_INSTR roundss, sse4
+AVX_INSTR rsqrtps, sse, 1, 0, 0
+AVX_INSTR rsqrtss, sse, 1, 0, 0
+AVX_INSTR shufpd, sse2, 1, 1, 0
+AVX_INSTR shufps, sse, 1, 1, 0
+AVX_INSTR sqrtpd, sse2, 1, 0, 0
+AVX_INSTR sqrtps, sse, 1, 0, 0
+AVX_INSTR sqrtsd, sse2, 1, 0, 0
+AVX_INSTR sqrtss, sse, 1, 0, 0
+AVX_INSTR stmxcsr, sse
+AVX_INSTR subpd, sse2, 1, 0, 0
+AVX_INSTR subps, sse, 1, 0, 0
+AVX_INSTR subsd, sse2, 1, 0, 0
+AVX_INSTR subss, sse, 1, 0, 0
+AVX_INSTR ucomisd, sse2
+AVX_INSTR ucomiss, sse
+AVX_INSTR unpckhpd, sse2, 1, 0, 0
+AVX_INSTR unpckhps, sse, 1, 0, 0
+AVX_INSTR unpcklpd, sse2, 1, 0, 0
+AVX_INSTR unpcklps, sse, 1, 0, 0
+AVX_INSTR xorpd, sse2, 1, 0, 1
+AVX_INSTR xorps, sse, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 3dnow, 1, 0, 1
+AVX_INSTR pfsub, 3dnow, 1, 0, 0
+AVX_INSTR pfmul, 3dnow, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+    %if j < 10
+        CAT_XDEFINE q000, j, i
+    %elif j < 100
+        CAT_XDEFINE q00, j, i
+    %elif j < 1000
+        CAT_XDEFINE q0, j, i
+    %else
+        CAT_XDEFINE q, j, i
+    %endif
+%assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+    %macro %1 4-7 %1, %2, %3
+        %if cpuflag(xop)
+            v%5 %1, %2, %3, %4
+        %elifnidn %1, %4
+            %6 %1, %2, %3
+            %7 %1, %4
+        %else
+            %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
+        %endif
+    %endmacro
+%endmacro
+
+FMA_INSTR  pmacsww,  pmullw, paddw
+FMA_INSTR  pmacsdd,  pmulld, paddd ; sse4 emulation
+FMA_INSTR pmacsdql,  pmuldq, paddq ; sse4 emulation
+FMA_INSTR pmadcswd, pmaddwd, paddd
+
+; convert FMA4 to FMA3 if possible
+%macro FMA4_INSTR 4
+    %macro %1 4-8 %1, %2, %3, %4
+        %if cpuflag(fma4)
+            v%5 %1, %2, %3, %4
+        %elifidn %1, %2
+            v%6 %1, %4, %3 ; %1 = %1 * %3 + %4
+        %elifidn %1, %3
+            v%7 %1, %2, %4 ; %1 = %2 * %1 + %4
+        %elifidn %1, %4
+            v%8 %1, %2, %3 ; %1 = %2 * %3 + %1
+        %else
+            %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported
+        %endif
+    %endmacro
+%endmacro
+
+FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd
+FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps
+FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd
+FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss
+
+FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd
+FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps
+FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd
+FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps
+
+FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd
+FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps
+FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd
+FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss
+
+FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd
+FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps
+FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd
+FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss
+
+FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd
+FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps
+FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd
+FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss
+
+; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug
+%if ARCH_X86_64 == 0
+%macro vpbroadcastq 2
+%if sizeof%1 == 16
+    movddup %1, %2
+%else
+    vbroadcastsd %1, %2
+%endif
+%endmacro
+%endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/x86/x86util.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,893 @@
+;*****************************************************************************
+;* x86util.asm: x86 utility macros
+;*****************************************************************************
+;* Copyright (C) 2008-2013 x264 project
+;*
+;* Authors: Holger Lubitz <holger@lubitz.org>
+;*          Loren Merritt <lorenm@u.washington.edu>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%assign FENC_STRIDE 64
+%assign FDEC_STRIDE 32
+
+%assign SIZEOF_PIXEL 1
+%assign SIZEOF_DCTCOEF 2
+%define pixel byte
+%define vpbroadcastdct vpbroadcastw
+%define vpbroadcastpix vpbroadcastb
+%if HIGH_BIT_DEPTH
+    %assign SIZEOF_PIXEL 2
+    %assign SIZEOF_DCTCOEF 4
+    %define pixel word
+    %define vpbroadcastdct vpbroadcastd
+    %define vpbroadcastpix vpbroadcastw
+%endif
+
+%assign FENC_STRIDEB SIZEOF_PIXEL*FENC_STRIDE
+%assign FDEC_STRIDEB SIZEOF_PIXEL*FDEC_STRIDE
+
+%assign PIXEL_MAX ((1 << BIT_DEPTH)-1)
+
+%macro FIX_STRIDES 1-*
+%if HIGH_BIT_DEPTH
+%rep %0
+    add %1, %1
+    %rotate 1
+%endrep
+%endif
+%endmacro
+
+
+%macro SBUTTERFLY 4
+%ifidn %1, dqqq
+    vperm2i128  m%4, m%2, m%3, q0301 ; punpckh
+    vinserti128 m%2, m%2, xm%3, 1    ; punpckl
+%elif avx_enabled && mmsize >= 16
+    punpckh%1 m%4, m%2, m%3
+    punpckl%1 m%2, m%3
+%else
+    mova      m%4, m%2
+    punpckl%1 m%2, m%3
+    punpckh%1 m%4, m%3
+%endif
+    SWAP %3, %4
+%endmacro
+
+%macro SBUTTERFLY2 4
+    punpckl%1 m%4, m%2, m%3
+    punpckh%1 m%2, m%2, m%3
+    SWAP %2, %4, %3
+%endmacro
+
+%macro TRANSPOSE4x4W 5
+    SBUTTERFLY wd, %1, %2, %5
+    SBUTTERFLY wd, %3, %4, %5
+    SBUTTERFLY dq, %1, %3, %5
+    SBUTTERFLY dq, %2, %4, %5
+    SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE2x4x4W 5
+    SBUTTERFLY wd,  %1, %2, %5
+    SBUTTERFLY wd,  %3, %4, %5
+    SBUTTERFLY dq,  %1, %3, %5
+    SBUTTERFLY dq,  %2, %4, %5
+    SBUTTERFLY qdq, %1, %2, %5
+    SBUTTERFLY qdq, %3, %4, %5
+%endmacro
+
+%macro TRANSPOSE4x4D 5
+    SBUTTERFLY dq,  %1, %2, %5
+    SBUTTERFLY dq,  %3, %4, %5
+    SBUTTERFLY qdq, %1, %3, %5
+    SBUTTERFLY qdq, %2, %4, %5
+    SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE8x8W 9-11
+%if ARCH_X86_64
+    SBUTTERFLY wd,  %1, %2, %9
+    SBUTTERFLY wd,  %3, %4, %9
+    SBUTTERFLY wd,  %5, %6, %9
+    SBUTTERFLY wd,  %7, %8, %9
+    SBUTTERFLY dq,  %1, %3, %9
+    SBUTTERFLY dq,  %2, %4, %9
+    SBUTTERFLY dq,  %5, %7, %9
+    SBUTTERFLY dq,  %6, %8, %9
+    SBUTTERFLY qdq, %1, %5, %9
+    SBUTTERFLY qdq, %2, %6, %9
+    SBUTTERFLY qdq, %3, %7, %9
+    SBUTTERFLY qdq, %4, %8, %9
+    SWAP %2, %5
+    SWAP %4, %7
+%else
+; in:  m0..m7, unless %11 in which case m6 is in %9
+; out: m0..m7, unless %11 in which case m4 is in %10
+; spills into %9 and %10
+%if %0<11
+    movdqa %9, m%7
+%endif
+    SBUTTERFLY wd,  %1, %2, %7
+    movdqa %10, m%2
+    movdqa m%7, %9
+    SBUTTERFLY wd,  %3, %4, %2
+    SBUTTERFLY wd,  %5, %6, %2
+    SBUTTERFLY wd,  %7, %8, %2
+    SBUTTERFLY dq,  %1, %3, %2
+    movdqa %9, m%3
+    movdqa m%2, %10
+    SBUTTERFLY dq,  %2, %4, %3
+    SBUTTERFLY dq,  %5, %7, %3
+    SBUTTERFLY dq,  %6, %8, %3
+    SBUTTERFLY qdq, %1, %5, %3
+    SBUTTERFLY qdq, %2, %6, %3
+    movdqa %10, m%2
+    movdqa m%3, %9
+    SBUTTERFLY qdq, %3, %7, %2
+    SBUTTERFLY qdq, %4, %8, %2
+    SWAP %2, %5
+    SWAP %4, %7
+%if %0<11
+    movdqa m%5, %10
+%endif
+%endif
+%endmacro
+
+%macro WIDEN_SXWD 2
+    punpckhwd m%2, m%1
+    psrad     m%2, 16
+%if cpuflag(sse4)
+    pmovsxwd  m%1, m%1
+%else
+    punpcklwd m%1, m%1
+    psrad     m%1, 16
+%endif
+%endmacro
+
+%macro ABSW 2-3 ; dst, src, tmp (tmp used only if dst==src)
+%if cpuflag(ssse3)
+    pabsw   %1, %2
+%elifidn %3, sign ; version for pairing with PSIGNW: modifies src
+    pxor    %1, %1
+    pcmpgtw %1, %2
+    pxor    %2, %1
+    psubw   %2, %1
+    SWAP    %1, %2
+%elifidn %1, %2
+    pxor    %3, %3
+    psubw   %3, %1
+    pmaxsw  %1, %3
+%elifid %2
+    pxor    %1, %1
+    psubw   %1, %2
+    pmaxsw  %1, %2
+%elif %0 == 2
+    pxor    %1, %1
+    psubw   %1, %2
+    pmaxsw  %1, %2
+%else
+    mova    %1, %2
+    pxor    %3, %3
+    psubw   %3, %1
+    pmaxsw  %1, %3
+%endif
+%endmacro
+
+%macro ABSW2 6 ; dst1, dst2, src1, src2, tmp, tmp
+%if cpuflag(ssse3)
+    pabsw   %1, %3
+    pabsw   %2, %4
+%elifidn %1, %3
+    pxor    %5, %5
+    pxor    %6, %6
+    psubw   %5, %1
+    psubw   %6, %2
+    pmaxsw  %1, %5
+    pmaxsw  %2, %6
+%else
+    pxor    %1, %1
+    pxor    %2, %2
+    psubw   %1, %3
+    psubw   %2, %4
+    pmaxsw  %1, %3
+    pmaxsw  %2, %4
+%endif
+%endmacro
+
+%macro ABSB 2
+%if cpuflag(ssse3)
+    pabsb   %1, %1
+%else
+    pxor    %2, %2
+    psubb   %2, %1
+    pminub  %1, %2
+%endif
+%endmacro
+
+%macro ABSD 2-3
+%if cpuflag(ssse3)
+    pabsd   %1, %2
+%else
+    %define %%s %2
+%if %0 == 3
+    mova    %3, %2
+    %define %%s %3
+%endif
+    pxor     %1, %1
+    pcmpgtd  %1, %%s
+    pxor    %%s, %1
+    psubd   %%s, %1
+    SWAP     %1, %%s
+%endif
+%endmacro
+
+%macro PSIGN 3-4
+%if cpuflag(ssse3) && %0 == 4
+    psign%1 %2, %3, %4
+%elif cpuflag(ssse3)
+    psign%1 %2, %3
+%elif %0 == 4
+    pxor    %2, %3, %4
+    psub%1  %2, %4
+%else
+    pxor    %2, %3
+    psub%1  %2, %3
+%endif
+%endmacro
+
+%define PSIGNW PSIGN w,
+%define PSIGND PSIGN d,
+
+%macro SPLATB_LOAD 3
+%if cpuflag(ssse3)
+    movd      %1, [%2-3]
+    pshufb    %1, %3
+%else
+    movd      %1, [%2-3] ;to avoid crossing a cacheline
+    punpcklbw %1, %1
+    SPLATW    %1, %1, 3
+%endif
+%endmacro
+
+%imacro SPLATW 2-3 0
+%if cpuflag(avx2) && %3 == 0
+    vpbroadcastw %1, %2
+%else
+    PSHUFLW      %1, %2, (%3)*q1111
+%if mmsize == 16
+    punpcklqdq   %1, %1
+%endif
+%endif
+%endmacro
+
+%imacro SPLATD 2-3 0
+%if mmsize == 16
+    pshufd %1, %2, (%3)*q1111
+%else
+    pshufw %1, %2, (%3)*q0101 + ((%3)+1)*q1010
+%endif
+%endmacro
+
+%macro CLIPW 3 ;(dst, min, max)
+    pmaxsw %1, %2
+    pminsw %1, %3
+%endmacro
+
+%macro CLIPW2 4 ;(dst0, dst1, min, max)
+    pmaxsw %1, %3
+    pmaxsw %2, %3
+    pminsw %1, %4
+    pminsw %2, %4
+%endmacro
+
+%macro HADDD 2 ; sum junk
+%if sizeof%1 == 32
+%define %2 xmm%2
+    vextracti128 %2, %1, 1
+%define %1 xmm%1
+    paddd   %1, %2
+%endif
+%if mmsize >= 16
+%if cpuflag(xop) && sizeof%1 == 16
+    vphadddq %1, %1
+%endif
+    movhlps %2, %1
+    paddd   %1, %2
+%endif
+%if notcpuflag(xop)
+    PSHUFLW %2, %1, q0032
+    paddd   %1, %2
+%endif
+%undef %1
+%undef %2
+%endmacro
+
+%macro HADDW 2 ; reg, tmp
+%if cpuflag(xop) && sizeof%1 == 16
+    vphaddwq  %1, %1
+    movhlps   %2, %1
+    paddd     %1, %2
+%else
+    pmaddwd %1, [pw_1]
+    HADDD   %1, %2
+%endif
+%endmacro
+
+%macro HADDUWD 2
+%if cpuflag(xop) && sizeof%1 == 16
+    vphadduwd %1, %1
+%else
+    psrld %2, %1, 16
+    pslld %1, 16
+    psrld %1, 16
+    paddd %1, %2
+%endif
+%endmacro
+
+%macro HADDUW 2
+%if cpuflag(xop) && sizeof%1 == 16
+    vphadduwq %1, %1
+    movhlps   %2, %1
+    paddd     %1, %2
+%else
+    HADDUWD   %1, %2
+    HADDD     %1, %2
+%endif
+%endmacro
+
+%macro PALIGNR 4-5 ; [dst,] src1, src2, imm, tmp
+; AVX2 version uses a precalculated extra input that
+; can be re-used across calls
+%if sizeof%1==32
+                                 ; %3 = abcdefgh ijklmnop (lower address)
+                                 ; %2 = ABCDEFGH IJKLMNOP (higher address)
+    vperm2i128 %4, %1, %2, q0003 ; %4 = ijklmnop ABCDEFGH
+%if %3 < 16
+    palignr    %1, %4, %2, %3    ; %1 = bcdefghi jklmnopA
+%else
+    palignr    %1, %2, %4, %3-16 ; %1 = pABCDEFG HIJKLMNO
+%endif
+%elif cpuflag(ssse3)
+    %if %0==5
+        palignr %1, %2, %3, %4
+    %else
+        palignr %1, %2, %3
+    %endif
+%else
+    %define %%dst %1
+    %if %0==5
+        %ifnidn %1, %2
+            mova %%dst, %2
+        %endif
+        %rotate 1
+    %endif
+    %ifnidn %4, %2
+        mova %4, %2
+    %endif
+    %if mmsize==8
+        psllq  %%dst, (8-%3)*8
+        psrlq  %4, %3*8
+    %else
+        pslldq %%dst, 16-%3
+        psrldq %4, %3
+    %endif
+    por %%dst, %4
+%endif
+%endmacro
+
+%macro PSHUFLW 1+
+    %if mmsize == 8
+        pshufw %1
+    %else
+        pshuflw %1
+    %endif
+%endmacro
+
+; shift a mmxreg by n bytes, or a xmmreg by 2*n bytes
+; values shifted in are undefined
+; faster if dst==src
+%define PSLLPIX PSXLPIX l, -1, ;dst, src, shift
+%define PSRLPIX PSXLPIX r,  1, ;dst, src, shift
+%macro PSXLPIX 5
+    %if mmsize == 8
+        %if %5&1
+            ps%1lq %3, %4, %5*8
+        %else
+            pshufw %3, %4, (q3210<<8>>(8+%2*%5))&0xff
+        %endif
+    %else
+        ps%1ldq %3, %4, %5*2
+    %endif
+%endmacro
+
+%macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
+%ifnum %5
+    pand   m%3, m%5, m%4 ; src .. y6 .. y4
+    pand   m%1, m%5, m%2 ; dst .. y6 .. y4
+%else
+    mova   m%1, %5
+    pand   m%3, m%1, m%4 ; src .. y6 .. y4
+    pand   m%1, m%1, m%2 ; dst .. y6 .. y4
+%endif
+    psrlw  m%2, 8        ; dst .. y7 .. y5
+    psrlw  m%4, 8        ; src .. y7 .. y5
+%endmacro
+
+%macro SUMSUB_BA 3-4
+%if %0==3
+    padd%1  m%2, m%3
+    padd%1  m%3, m%3
+    psub%1  m%3, m%2
+%elif avx_enabled
+    padd%1  m%4, m%2, m%3
+    psub%1  m%3, m%2
+    SWAP    %2, %4
+%else
+    mova    m%4, m%2
+    padd%1  m%2, m%3
+    psub%1  m%3, m%4
+%endif
+%endmacro
+
+%macro SUMSUB_BADC 5-6
+%if %0==6
+    SUMSUB_BA %1, %2, %3, %6
+    SUMSUB_BA %1, %4, %5, %6
+%else
+    padd%1  m%2, m%3
+    padd%1  m%4, m%5
+    padd%1  m%3, m%3
+    padd%1  m%5, m%5
+    psub%1  m%3, m%2
+    psub%1  m%5, m%4
+%endif
+%endmacro
+
+%macro HADAMARD4_V 4+
+    SUMSUB_BADC w, %1, %2, %3, %4
+    SUMSUB_BADC w, %1, %3, %2, %4
+%endmacro
+
+%macro HADAMARD8_V 8+
+    SUMSUB_BADC w, %1, %2, %3, %4
+    SUMSUB_BADC w, %5, %6, %7, %8
+    SUMSUB_BADC w, %1, %3, %2, %4
+    SUMSUB_BADC w, %5, %7, %6, %8
+    SUMSUB_BADC w, %1, %5, %2, %6
+    SUMSUB_BADC w, %3, %7, %4, %8
+%endmacro
+
+%macro TRANS_SSE2 5-6
+; TRANSPOSE2x2
+; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
+; %2: ord/unord (for compat with sse4, unused)
+; %3/%4: source regs
+; %5/%6: tmp regs
+%ifidn %1, d
+%define mask [mask_10]
+%define shift 16
+%elifidn %1, q
+%define mask [mask_1100]
+%define shift 32
+%endif
+%if %0==6 ; less dependency if we have two tmp
+    mova   m%5, mask   ; ff00
+    mova   m%6, m%4    ; x5x4
+    psll%1 m%4, shift  ; x4..
+    pand   m%6, m%5    ; x5..
+    pandn  m%5, m%3    ; ..x0
+    psrl%1 m%3, shift  ; ..x1
+    por    m%4, m%5    ; x4x0
+    por    m%3, m%6    ; x5x1
+%else ; more dependency, one insn less. sometimes faster, sometimes not
+    mova   m%5, m%4    ; x5x4
+    psll%1 m%4, shift  ; x4..
+    pxor   m%4, m%3    ; (x4^x1)x0
+    pand   m%4, mask   ; (x4^x1)..
+    pxor   m%3, m%4    ; x4x0
+    psrl%1 m%4, shift  ; ..(x1^x4)
+    pxor   m%5, m%4    ; x5x1
+    SWAP   %4, %3, %5
+%endif
+%endmacro
+
+%macro TRANS_SSE4 5-6 ; see above
+%ifidn %1, d
+%ifidn %2, ord
+    psrl%1  m%5, m%3, 16
+    pblendw m%5, m%4, q2222
+    psll%1  m%4, 16
+    pblendw m%4, m%3, q1111
+    SWAP     %3, %5
+%else
+%if avx_enabled
+    pblendw m%5, m%3, m%4, q2222
+    SWAP     %3, %5
+%else
+    mova    m%5, m%3
+    pblendw m%3, m%4, q2222
+%endif
+    psll%1  m%4, 16
+    psrl%1  m%5, 16
+    por     m%4, m%5
+%endif
+%elifidn %1, q
+    shufps m%5, m%3, m%4, q3131
+    shufps m%3, m%3, m%4, q2020
+    SWAP    %4, %5
+%endif
+%endmacro
+
+%macro TRANS_XOP 5-6
+%ifidn %1, d
+    vpperm m%5, m%3, m%4, [transd_shuf1]
+    vpperm m%3, m%3, m%4, [transd_shuf2]
+%elifidn %1, q
+    shufps m%5, m%3, m%4, q3131
+    shufps m%3, m%4, q2020
+%endif
+    SWAP    %4, %5
+%endmacro
+
+%macro HADAMARD 5-6
+; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
+; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
+; %3/%4: regs
+; %5(%6): tmpregs
+%if %1!=0 ; have to reorder stuff for horizontal op
+    %ifidn %2, sumsub
+        %define ORDER ord
+        ; sumsub needs order because a-b != b-a unless a=b
+    %else
+        %define ORDER unord
+        ; if we just max, order doesn't matter (allows pblendw+or in sse4)
+    %endif
+    %if %1==1
+        TRANS d, ORDER, %3, %4, %5, %6
+    %elif %1==2
+        %if mmsize==8
+            SBUTTERFLY dq, %3, %4, %5
+        %else
+            TRANS q, ORDER, %3, %4, %5, %6
+        %endif
+    %elif %1==4
+        SBUTTERFLY qdq, %3, %4, %5
+    %elif %1==8
+        SBUTTERFLY dqqq, %3, %4, %5
+    %endif
+%endif
+%ifidn %2, sumsub
+    SUMSUB_BA w, %3, %4, %5
+%else
+    %ifidn %2, amax
+        %if %0==6
+            ABSW2 m%3, m%4, m%3, m%4, m%5, m%6
+        %else
+            ABSW m%3, m%3, m%5
+            ABSW m%4, m%4, m%5
+        %endif
+    %endif
+    pmaxsw m%3, m%4
+%endif
+%endmacro
+
+
+%macro HADAMARD2_2D 6-7 sumsub
+    HADAMARD 0, sumsub, %1, %2, %5
+    HADAMARD 0, sumsub, %3, %4, %5
+    SBUTTERFLY %6, %1, %2, %5
+%ifnum %7
+    HADAMARD 0, amax, %1, %2, %5, %7
+%else
+    HADAMARD 0, %7, %1, %2, %5
+%endif
+    SBUTTERFLY %6, %3, %4, %5
+%ifnum %7
+    HADAMARD 0, amax, %3, %4, %5, %7
+%else
+    HADAMARD 0, %7, %3, %4, %5
+%endif
+%endmacro
+
+%macro HADAMARD4_2D 5-6 sumsub
+    HADAMARD2_2D %1, %2, %3, %4, %5, wd
+    HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
+    SWAP %2, %3
+%endmacro
+
+%macro HADAMARD4_2D_SSE 5-6 sumsub
+    HADAMARD  0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
+    HADAMARD  0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
+    SBUTTERFLY   wd, %1, %2, %5     ; %1: m0 1+0 %2: m1 1+0
+    SBUTTERFLY   wd, %3, %4, %5     ; %3: m0 3+2 %4: m1 3+2
+    HADAMARD2_2D %1, %3, %2, %4, %5, dq
+    SBUTTERFLY  qdq, %1, %2, %5
+    HADAMARD  0, %6, %1, %2, %5     ; 2nd H m1/m0 row 0+1
+    SBUTTERFLY  qdq, %3, %4, %5
+    HADAMARD  0, %6, %3, %4, %5     ; 2nd H m1/m0 row 2+3
+%endmacro
+
+%macro HADAMARD8_2D 9-10 sumsub
+    HADAMARD2_2D %1, %2, %3, %4, %9, wd
+    HADAMARD2_2D %5, %6, %7, %8, %9, wd
+    HADAMARD2_2D %1, %3, %2, %4, %9, dq
+    HADAMARD2_2D %5, %7, %6, %8, %9, dq
+    HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
+    HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
+%ifnidn %10, amax
+    SWAP %2, %5
+    SWAP %4, %7
+%endif
+%endmacro
+
+; doesn't include the "pmaddubsw hmul_8p" pass
+%macro HADAMARD8_2D_HMUL 10
+    HADAMARD4_V %1, %2, %3, %4, %9
+    HADAMARD4_V %5, %6, %7, %8, %9
+    SUMSUB_BADC w, %1, %5, %2, %6, %9
+    HADAMARD 2, sumsub, %1, %5, %9, %10
+    HADAMARD 2, sumsub, %2, %6, %9, %10
+    SUMSUB_BADC w, %3, %7, %4, %8, %9
+    HADAMARD 2, sumsub, %3, %7, %9, %10
+    HADAMARD 2, sumsub, %4, %8, %9, %10
+    HADAMARD 1, amax, %1, %5, %9, %10
+    HADAMARD 1, amax, %2, %6, %9, %5
+    HADAMARD 1, amax, %3, %7, %9, %5
+    HADAMARD 1, amax, %4, %8, %9, %5
+%endmacro
+
+%macro SUMSUB2_AB 4
+%if cpuflag(xop)
+    pmacs%1%1 m%4, m%3, [p%1_m2], m%2
+    pmacs%1%1 m%2, m%2, [p%1_2], m%3
+%elifnum %3
+    psub%1  m%4, m%2, m%3
+    psub%1  m%4, m%3
+    padd%1  m%2, m%2
+    padd%1  m%2, m%3
+%else
+    mova    m%4, m%2
+    padd%1  m%2, m%2
+    padd%1  m%2, %3
+    psub%1  m%4, %3
+    psub%1  m%4, %3
+%endif
+%endmacro
+
+%macro SUMSUBD2_AB 5
+%ifnum %4
+    psra%1  m%5, m%2, 1  ; %3: %3>>1
+    psra%1  m%4, m%3, 1  ; %2: %2>>1
+    padd%1  m%4, m%2     ; %3: %3>>1+%2
+    psub%1  m%5, m%3     ; %2: %2>>1-%3
+    SWAP     %2, %5
+    SWAP     %3, %4
+%else
+    mova    %5, m%2
+    mova    %4, m%3
+    psra%1  m%3, 1  ; %3: %3>>1
+    psra%1  m%2, 1  ; %2: %2>>1
+    padd%1  m%3, %5 ; %3: %3>>1+%2
+    psub%1  m%2, %4 ; %2: %2>>1-%3
+%endif
+%endmacro
+
+%macro DCT4_1D 5
+%ifnum %5
+    SUMSUB_BADC w, %4, %1, %3, %2, %5
+    SUMSUB_BA   w, %3, %4, %5
+    SUMSUB2_AB  w, %1, %2, %5
+    SWAP %1, %3, %4, %5, %2
+%else
+    SUMSUB_BADC w, %4, %1, %3, %2
+    SUMSUB_BA   w, %3, %4
+    mova     [%5], m%2
+    SUMSUB2_AB  w, %1, [%5], %2
+    SWAP %1, %3, %4, %2
+%endif
+%endmacro
+
+%macro IDCT4_1D 6-7
+%ifnum %6
+    SUMSUBD2_AB %1, %3, %5, %7, %6
+    ; %3: %3>>1-%5 %5: %3+%5>>1
+    SUMSUB_BA   %1, %4, %2, %7
+    ; %4: %2+%4 %2: %2-%4
+    SUMSUB_BADC %1, %5, %4, %3, %2, %7
+    ; %5: %2+%4 + (%3+%5>>1)
+    ; %4: %2+%4 - (%3+%5>>1)
+    ; %3: %2-%4 + (%3>>1-%5)
+    ; %2: %2-%4 - (%3>>1-%5)
+%else
+%ifidn %1, w
+    SUMSUBD2_AB %1, %3, %5, [%6], [%6+16]
+%else
+    SUMSUBD2_AB %1, %3, %5, [%6], [%6+32]
+%endif
+    SUMSUB_BA   %1, %4, %2
+    SUMSUB_BADC %1, %5, %4, %3, %2
+%endif
+    SWAP %2, %5, %4
+    ; %2: %2+%4 + (%3+%5>>1) row0
+    ; %3: %2-%4 + (%3>>1-%5) row1
+    ; %4: %2-%4 - (%3>>1-%5) row2
+    ; %5: %2+%4 - (%3+%5>>1) row3
+%endmacro
+
+
+%macro LOAD_DIFF 5-6 1
+%if HIGH_BIT_DEPTH
+%if %6 ; %5 aligned?
+    mova       %1, %4
+    psubw      %1, %5
+%else
+    movu       %1, %4
+    movu       %2, %5
+    psubw      %1, %2
+%endif
+%else ; !HIGH_BIT_DEPTH
+%ifidn %3, none
+    movh       %1, %4
+    movh       %2, %5
+    punpcklbw  %1, %2
+    punpcklbw  %2, %2
+    psubw      %1, %2
+%else
+    movh       %1, %4
+    punpcklbw  %1, %3
+    movh       %2, %5
+    punpcklbw  %2, %3
+    psubw      %1, %2
+%endif
+%endif ; HIGH_BIT_DEPTH
+%endmacro
+
+%macro LOAD_DIFF8x4 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
+%if BIT_DEPTH == 8 && cpuflag(ssse3)
+    movh       m%2, [%8+%1*FDEC_STRIDE]
+    movh       m%1, [%7+%1*FENC_STRIDE]
+    punpcklbw  m%1, m%2
+    movh       m%3, [%8+%2*FDEC_STRIDE]
+    movh       m%2, [%7+%2*FENC_STRIDE]
+    punpcklbw  m%2, m%3
+    movh       m%4, [%8+%3*FDEC_STRIDE]
+    movh       m%3, [%7+%3*FENC_STRIDE]
+    punpcklbw  m%3, m%4
+    movh       m%5, [%8+%4*FDEC_STRIDE]
+    movh       m%4, [%7+%4*FENC_STRIDE]
+    punpcklbw  m%4, m%5
+    pmaddubsw  m%1, m%6
+    pmaddubsw  m%2, m%6
+    pmaddubsw  m%3, m%6
+    pmaddubsw  m%4, m%6
+%else
+    LOAD_DIFF  m%1, m%5, m%6, [%7+%1*FENC_STRIDEB], [%8+%1*FDEC_STRIDEB]
+    LOAD_DIFF  m%2, m%5, m%6, [%7+%2*FENC_STRIDEB], [%8+%2*FDEC_STRIDEB]
+    LOAD_DIFF  m%3, m%5, m%6, [%7+%3*FENC_STRIDEB], [%8+%3*FDEC_STRIDEB]
+    LOAD_DIFF  m%4, m%5, m%6, [%7+%4*FENC_STRIDEB], [%8+%4*FDEC_STRIDEB]
+%endif
+%endmacro
+
+%macro STORE_DCT 6
+    movq   [%5+%6+ 0], m%1
+    movq   [%5+%6+ 8], m%2
+    movq   [%5+%6+16], m%3
+    movq   [%5+%6+24], m%4
+    movhps [%5+%6+32], m%1
+    movhps [%5+%6+40], m%2
+    movhps [%5+%6+48], m%3
+    movhps [%5+%6+56], m%4
+%endmacro
+
+%macro STORE_IDCT 4
+    movhps [r0-4*FDEC_STRIDE], %1
+    movh   [r0-3*FDEC_STRIDE], %1
+    movhps [r0-2*FDEC_STRIDE], %2
+    movh   [r0-1*FDEC_STRIDE], %2
+    movhps [r0+0*FDEC_STRIDE], %3
+    movh   [r0+1*FDEC_STRIDE], %3
+    movhps [r0+2*FDEC_STRIDE], %4
+    movh   [r0+3*FDEC_STRIDE], %4
+%endmacro
+
+%macro LOAD_DIFF_8x4P 7-11 r0,r2,0,1 ; 4x dest, 2x temp, 2x pointer, increment, aligned?
+    LOAD_DIFF m%1, m%5, m%7, [%8],      [%9],      %11
+    LOAD_DIFF m%2, m%6, m%7, [%8+r1],   [%9+r3],   %11
+    LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3], %11
+    LOAD_DIFF m%4, m%6, m%7, [%8+r4],   [%9+r5],   %11
+%if %10
+    lea %8, [%8+4*r1]
+    lea %9, [%9+4*r3]
+%endif
+%endmacro
+
+; 2xdst, 2xtmp, 2xsrcrow
+%macro LOAD_DIFF16x2_AVX2 6
+    pmovzxbw m%1, [r1+%5*FENC_STRIDE]
+    pmovzxbw m%2, [r1+%6*FENC_STRIDE]
+    pmovzxbw m%3, [r2+(%5-4)*FDEC_STRIDE]
+    pmovzxbw m%4, [r2+(%6-4)*FDEC_STRIDE]
+    psubw    m%1, m%3
+    psubw    m%2, m%4
+%endmacro
+
+%macro DIFFx2 6-7
+    movh       %3, %5
+    punpcklbw  %3, %4
+    psraw      %1, 6
+    paddsw     %1, %3
+    movh       %3, %6
+    punpcklbw  %3, %4
+    psraw      %2, 6
+    paddsw     %2, %3
+    packuswb   %2, %1
+%endmacro
+
+; (high depth) in: %1, %2, min to clip, max to clip, mem128
+; in: %1, tmp, %3, mem64
+%macro STORE_DIFF 4-5
+%if HIGH_BIT_DEPTH
+    psrad      %1, 6
+    psrad      %2, 6
+    packssdw   %1, %2
+    paddw      %1, %5
+    CLIPW      %1, %3, %4
+    mova       %5, %1
+%else
+    movh       %2, %4
+    punpcklbw  %2, %3
+    psraw      %1, 6
+    paddsw     %1, %2
+    packuswb   %1, %1
+    movh       %4, %1
+%endif
+%endmacro
+
+%macro SHUFFLE_MASK_W 8
+    %rep 8
+        %if %1>=0x80
+            db %1, %1
+        %else
+            db %1*2
+            db %1*2+1
+        %endif
+        %rotate 1
+    %endrep
+%endmacro
+
+; instruction, accum, input, iteration (zero to swap, nonzero to add)
+%macro ACCUM 4
+%if %4
+    %1        m%2, m%3
+%else
+    SWAP       %2, %3
+%endif
+%endmacro
+
+; IACA support
+%macro IACA_START 0
+    mov ebx, 111
+    db 0x64, 0x67, 0x90
+%endmacro
+
+%macro IACA_END 0
+    mov ebx, 222
+    db 0x64, 0x67, 0x90
+%endmacro
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/yuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,212 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+
+#include "common.h"
+#include "yuv.h"
+#include "shortyuv.h"
+#include "picyuv.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+Yuv::Yuv()
+{
+    m_buf[0] = NULL;
+    m_buf[1] = NULL;
+    m_buf[2] = NULL;
+}
+
+bool Yuv::create(uint32_t size, int csp)
+{
+    m_csp = csp;
+    m_hChromaShift = CHROMA_H_SHIFT(csp);
+    m_vChromaShift = CHROMA_V_SHIFT(csp);
+
+    m_size  = size;
+    m_part = partitionFromSizes(size, size);
+
+    if (csp == X265_CSP_I400)
+    {
+        CHECKED_MALLOC(m_buf[0], pixel, size * size + 8);
+        m_buf[1] = m_buf[2] = 0;
+        m_csize = MAX_INT;
+        return true;
+    }
+    else
+    {
+        m_csize = size >> m_hChromaShift;
+
+        size_t sizeL = size * size;
+        size_t sizeC = sizeL >> (m_vChromaShift + m_hChromaShift);
+
+        X265_CHECK((sizeC & 15) == 0, "invalid size");
+
+        // memory allocation (padded for SIMD reads)
+        CHECKED_MALLOC(m_buf[0], pixel, sizeL + sizeC * 2 + 8);
+        m_buf[1] = m_buf[0] + sizeL;
+        m_buf[2] = m_buf[0] + sizeL + sizeC;
+        return true;
+    }
+
+fail:
+    return false;
+}
+
+void Yuv::destroy()
+{
+    X265_FREE(m_buf[0]);
+}
+
+void Yuv::copyToPicYuv(PicYuv& dstPic, uint32_t cuAddr, uint32_t absPartIdx) const
+{
+    pixel* dstY = dstPic.getLumaAddr(cuAddr, absPartIdx);
+    primitives.cu[m_part].copy_pp(dstY, dstPic.m_stride, m_buf[0], m_size);
+
+    if (m_csp != X265_CSP_I400) {
+        pixel* dstU = dstPic.getCbAddr(cuAddr, absPartIdx);
+        pixel* dstV = dstPic.getCrAddr(cuAddr, absPartIdx);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(dstU, dstPic.m_strideC, m_buf[1], m_csize);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(dstV, dstPic.m_strideC, m_buf[2], m_csize);
+    }
+}
+
+void Yuv::copyFromPicYuv(const PicYuv& srcPic, uint32_t cuAddr, uint32_t absPartIdx)
+{
+    const pixel* srcY = srcPic.getLumaAddr(cuAddr, absPartIdx);
+    primitives.cu[m_part].copy_pp(m_buf[0], m_size, srcY, srcPic.m_stride);
+
+    if (m_csp != X265_CSP_I400) {
+        const pixel* srcU = srcPic.getCbAddr(cuAddr, absPartIdx);
+        const pixel* srcV = srcPic.getCrAddr(cuAddr, absPartIdx);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(m_buf[1], m_csize, srcU, srcPic.m_strideC);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(m_buf[2], m_csize, srcV, srcPic.m_strideC);
+    }
+}
+
+void Yuv::copyFromYuv(const Yuv& srcYuv)
+{
+    X265_CHECK(m_size >= srcYuv.m_size, "invalid size\n");
+
+    primitives.cu[m_part].copy_pp(m_buf[0], m_size, srcYuv.m_buf[0], srcYuv.m_size);
+    if (m_csp != X265_CSP_I400) {
+        primitives.chroma[m_csp].cu[m_part].copy_pp(m_buf[1], m_csize, srcYuv.m_buf[1], srcYuv.m_csize);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(m_buf[2], m_csize, srcYuv.m_buf[2], srcYuv.m_csize);
+    }
+}
+
+/* This version is intended for use by ME, which required FENC_STRIDE for luma fenc pixels */
+void Yuv::copyPUFromYuv(const Yuv& srcYuv, uint32_t absPartIdx, int partEnum, bool bChroma)
+{
+    X265_CHECK(m_size == FENC_STRIDE && m_size >= srcYuv.m_size, "PU buffer size mismatch\n");
+
+    const pixel* srcY = srcYuv.m_buf[0] + getAddrOffset(absPartIdx, srcYuv.m_size);
+    primitives.pu[partEnum].copy_pp(m_buf[0], m_size, srcY, srcYuv.m_size);
+
+    if (bChroma)
+    {
+        const pixel* srcU = srcYuv.m_buf[1] + srcYuv.getChromaAddrOffset(absPartIdx);
+        const pixel* srcV = srcYuv.m_buf[2] + srcYuv.getChromaAddrOffset(absPartIdx);
+        primitives.chroma[m_csp].pu[partEnum].copy_pp(m_buf[1], m_csize, srcU, srcYuv.m_csize);
+        primitives.chroma[m_csp].pu[partEnum].copy_pp(m_buf[2], m_csize, srcV, srcYuv.m_csize);
+    }
+}
+
+void Yuv::copyToPartYuv(Yuv& dstYuv, uint32_t absPartIdx) const
+{
+    pixel* dstY = dstYuv.getLumaAddr(absPartIdx);
+    primitives.cu[m_part].copy_pp(dstY, dstYuv.m_size, m_buf[0], m_size);
+
+    if (m_csp != X265_CSP_I400) {
+        pixel* dstU = dstYuv.getCbAddr(absPartIdx);
+        pixel* dstV = dstYuv.getCrAddr(absPartIdx);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(dstU, dstYuv.m_csize, m_buf[1], m_csize);
+        primitives.chroma[m_csp].cu[m_part].copy_pp(dstV, dstYuv.m_csize, m_buf[2], m_csize);
+    }
+}
+
+void Yuv::copyPartToYuv(Yuv& dstYuv, uint32_t absPartIdx) const
+{
+    pixel* srcY = m_buf[0] + getAddrOffset(absPartIdx, m_size);
+    pixel* dstY = dstYuv.m_buf[0];
+    primitives.cu[dstYuv.m_part].copy_pp(dstY, dstYuv.m_size, srcY, m_size);
+
+    if (m_csp != X265_CSP_I400) {
+        pixel* srcU = m_buf[1] + getChromaAddrOffset(absPartIdx);
+        pixel* srcV = m_buf[2] + getChromaAddrOffset(absPartIdx);
+        pixel* dstU = dstYuv.m_buf[1];
+        pixel* dstV = dstYuv.m_buf[2];
+        primitives.chroma[m_csp].cu[dstYuv.m_part].copy_pp(dstU, dstYuv.m_csize, srcU, m_csize);
+        primitives.chroma[m_csp].cu[dstYuv.m_part].copy_pp(dstV, dstYuv.m_csize, srcV, m_csize);
+    }
+}
+
+void Yuv::addClip(const Yuv& srcYuv0, const ShortYuv& srcYuv1, uint32_t log2SizeL)
+{
+    primitives.cu[log2SizeL - 2].add_ps(m_buf[0], m_size, srcYuv0.m_buf[0], srcYuv1.m_buf[0], srcYuv0.m_size, srcYuv1.m_size);
+    if (m_csp != X265_CSP_I400) {
+        primitives.chroma[m_csp].cu[log2SizeL - 2].add_ps(m_buf[1], m_csize, srcYuv0.m_buf[1], srcYuv1.m_buf[1], srcYuv0.m_csize, srcYuv1.m_csize);
+        primitives.chroma[m_csp].cu[log2SizeL - 2].add_ps(m_buf[2], m_csize, srcYuv0.m_buf[2], srcYuv1.m_buf[2], srcYuv0.m_csize, srcYuv1.m_csize);
+    }
+}
+
+void Yuv::addAvg(const ShortYuv& srcYuv0, const ShortYuv& srcYuv1, uint32_t absPartIdx, uint32_t width, uint32_t height, bool bLuma, bool bChroma)
+{
+    int part = partitionFromSizes(width, height);
+
+    if (bLuma)
+    {
+        const int16_t* srcY0 = srcYuv0.getLumaAddr(absPartIdx);
+        const int16_t* srcY1 = srcYuv1.getLumaAddr(absPartIdx);
+        pixel* dstY = getLumaAddr(absPartIdx);
+        primitives.pu[part].addAvg(srcY0, srcY1, dstY, srcYuv0.m_size, srcYuv1.m_size, m_size);
+    }
+    if (bChroma)
+    {
+        const int16_t* srcU0 = srcYuv0.getCbAddr(absPartIdx);
+        const int16_t* srcV0 = srcYuv0.getCrAddr(absPartIdx);
+        const int16_t* srcU1 = srcYuv1.getCbAddr(absPartIdx);
+        const int16_t* srcV1 = srcYuv1.getCrAddr(absPartIdx);
+        pixel* dstU = getCbAddr(absPartIdx);
+        pixel* dstV = getCrAddr(absPartIdx);
+        primitives.chroma[m_csp].pu[part].addAvg(srcU0, srcU1, dstU, srcYuv0.m_csize, srcYuv1.m_csize, m_csize);
+        primitives.chroma[m_csp].pu[part].addAvg(srcV0, srcV1, dstV, srcYuv0.m_csize, srcYuv1.m_csize, m_csize);
+    }
+}
+
+void Yuv::copyPartToPartLuma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const
+{
+    const pixel* src = getLumaAddr(absPartIdx);
+    pixel* dst = dstYuv.getLumaAddr(absPartIdx);
+    primitives.cu[log2Size - 2].copy_pp(dst, dstYuv.m_size, src, m_size);
+}
+
+void Yuv::copyPartToPartChroma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const
+{
+    const pixel* srcU = getCbAddr(absPartIdx);
+    const pixel* srcV = getCrAddr(absPartIdx);
+    pixel* dstU = dstYuv.getCbAddr(absPartIdx);
+    pixel* dstV = dstYuv.getCrAddr(absPartIdx);
+    primitives.chroma[m_csp].cu[log2SizeL - 2].copy_pp(dstU, dstYuv.m_csize, srcU, m_csize);
+    primitives.chroma[m_csp].cu[log2SizeL - 2].copy_pp(dstV, dstYuv.m_csize, srcV, m_csize);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/common/yuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,112 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_YUV_H
+#define X265_YUV_H
+
+#include "common.h"
+#include "primitives.h"
+
+namespace X265_NS {
+// private namespace
+
+class ShortYuv;
+class PicYuv;
+
+/* A Yuv instance holds pixels for a square CU (64x64 down to 8x8) for all three planes
+ * these are typically used to hold fenc, predictions, or reconstructed blocks */
+class Yuv
+{
+public:
+
+    pixel*   m_buf[3];
+
+    uint32_t m_size;
+    uint32_t m_csize;
+    int      m_part;         // cached partition enum size
+
+    int      m_csp;
+    int      m_hChromaShift;
+    int      m_vChromaShift;
+
+    Yuv();
+
+    bool   create(uint32_t size, int csp);
+    void   destroy();
+
+    // Copy YUV buffer to picture buffer
+    void   copyToPicYuv(PicYuv& destPicYuv, uint32_t cuAddr, uint32_t absPartIdx) const;
+
+    // Copy YUV buffer from picture buffer
+    void   copyFromPicYuv(const PicYuv& srcPicYuv, uint32_t cuAddr, uint32_t absPartIdx);
+
+    // Copy from same size YUV buffer
+    void   copyFromYuv(const Yuv& srcYuv);
+
+    // Copy portion of srcYuv into ME prediction buffer
+    void   copyPUFromYuv(const Yuv& srcYuv, uint32_t absPartIdx, int partEnum, bool bChroma);
+
+    // Copy Small YUV buffer to the part of other Big YUV buffer
+    void   copyToPartYuv(Yuv& dstYuv, uint32_t absPartIdx) const;
+
+    // Copy the part of Big YUV buffer to other Small YUV buffer
+    void   copyPartToYuv(Yuv& dstYuv, uint32_t absPartIdx) const;
+
+    // Clip(srcYuv0 + srcYuv1) -> m_buf .. aka recon = clip(pred + residual)
+    void   addClip(const Yuv& srcYuv0, const ShortYuv& srcYuv1, uint32_t log2SizeL);
+
+    // (srcYuv0 + srcYuv1)/2 for YUV partition (bidir averaging)
+    void   addAvg(const ShortYuv& srcYuv0, const ShortYuv& srcYuv1, uint32_t absPartIdx, uint32_t width, uint32_t height, bool bLuma, bool bChroma);
+
+    void copyPartToPartLuma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2Size) const;
+    void copyPartToPartChroma(Yuv& dstYuv, uint32_t absPartIdx, uint32_t log2SizeL) const;
+
+    pixel* getLumaAddr(uint32_t absPartIdx)                      { return m_buf[0] + getAddrOffset(absPartIdx, m_size); }
+    pixel* getCbAddr(uint32_t absPartIdx)                        { return m_buf[1] + getChromaAddrOffset(absPartIdx); }
+    pixel* getCrAddr(uint32_t absPartIdx)                        { return m_buf[2] + getChromaAddrOffset(absPartIdx); }
+    pixel* getChromaAddr(uint32_t chromaId, uint32_t absPartIdx) { return m_buf[chromaId] + getChromaAddrOffset(absPartIdx); }
+
+    const pixel* getLumaAddr(uint32_t absPartIdx) const                      { return m_buf[0] + getAddrOffset(absPartIdx, m_size); }
+    const pixel* getCbAddr(uint32_t absPartIdx) const                        { return m_buf[1] + getChromaAddrOffset(absPartIdx); }
+    const pixel* getCrAddr(uint32_t absPartIdx) const                        { return m_buf[2] + getChromaAddrOffset(absPartIdx); }
+    const pixel* getChromaAddr(uint32_t chromaId, uint32_t absPartIdx) const { return m_buf[chromaId] + getChromaAddrOffset(absPartIdx); }
+
+    int getChromaAddrOffset(uint32_t absPartIdx) const
+    {
+        int blkX = g_zscanToPelX[absPartIdx] >> m_hChromaShift;
+        int blkY = g_zscanToPelY[absPartIdx] >> m_vChromaShift;
+
+        return blkX + blkY * m_csize;
+    }
+
+    static int getAddrOffset(uint32_t absPartIdx, uint32_t width)
+    {
+        int blkX = g_zscanToPelX[absPartIdx];
+        int blkY = g_zscanToPelY[absPartIdx];
+
+        return blkX + blkY * width;
+    }
+};
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/compat/getopt/LGPL.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,504 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/compat/getopt/getopt.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1066 @@
+/* Getopt for GNU.
+   NOTE: getopt is now part of the C library, so if you don't know what
+   "Keep this file name-space clean" means, talk to drepper@gnu.org
+   before changing it!
+   Copyright (C) 1987,88,89,90,91,92,93,94,95,96,98,99,2000,2001
+   	Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* This tells Alpha OSF/1 not to define a getopt prototype in <stdio.h>.
+   Ditto for AIX 3.2 and <stdlib.h>.  */
+#ifndef _NO_PROTO
+# define _NO_PROTO
+#endif
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#if !defined __STDC__ || !__STDC__
+/* This is a separate conditional since some stdc systems
+   reject `defined (const)'.  */
+# ifndef const
+#  define const
+# endif
+#endif
+
+#include <stdio.h>
+
+/* Comment out all this code if we are using the GNU C Library, and are not
+   actually compiling the library itself.  This code is part of the GNU C
+   Library, but also included in many other GNU distributions.  Compiling
+   and linking in this code is a waste when using the GNU C library
+   (especially if it is a shared library).  Rather than having every GNU
+   program understand `configure --with-gnu-libc' and omit the object files,
+   it is simpler to just do this in the source for each such file.  */
+
+#define GETOPT_INTERFACE_VERSION 2
+#if !defined _LIBC && defined __GLIBC__ && __GLIBC__ >= 2
+# include <gnu-versions.h>
+# if _GNU_GETOPT_INTERFACE_VERSION == GETOPT_INTERFACE_VERSION
+#  define ELIDE_CODE
+# endif
+#endif
+
+#ifndef ELIDE_CODE
+
+
+/* This needs to come after some library #include
+   to get __GNU_LIBRARY__ defined.  */
+#ifdef	__GNU_LIBRARY__
+/* Don't include stdlib.h for non-GNU C libraries because some of them
+   contain conflicting prototypes for getopt.  */
+# include <stdlib.h>
+# include <unistd.h>
+#endif	/* GNU C library.  */
+
+#ifdef VMS
+# include <unixlib.h>
+# if HAVE_STRING_H - 0
+#  include <string.h>
+# endif
+#endif
+
+#ifndef _
+/* This is for other GNU distributions with internationalized messages.  */
+# if defined HAVE_LIBINTL_H || defined _LIBC
+#  include <libintl.h>
+#  ifndef _
+#   define _(msgid)	gettext (msgid)
+#  endif
+# else
+#  define _(msgid)	(msgid)
+# endif
+#endif
+
+/* This version of `getopt' appears to the caller like standard Unix `getopt'
+   but it behaves differently for the user, since it allows the user
+   to intersperse the options with the other arguments.
+
+   As `getopt' works, it permutes the elements of ARGV so that,
+   when it is done, all the options precede everything else.  Thus
+   all application programs are extended to handle flexible argument order.
+
+   Setting the environment variable POSIXLY_CORRECT disables permutation.
+   Then the behavior is completely standard.
+
+   GNU application programs can use a third alternative mode in which
+   they can distinguish the relative order of options and other arguments.  */
+
+#include "getopt.h"
+
+/* For communication from `getopt' to the caller.
+   When `getopt' finds an option that takes an argument,
+   the argument value is returned here.
+   Also, when `ordering' is RETURN_IN_ORDER,
+   each non-option ARGV-element is returned here.  */
+
+char *optarg;
+
+/* Index in ARGV of the next element to be scanned.
+   This is used for communication to and from the caller
+   and for communication between successive calls to `getopt'.
+
+   On entry to `getopt', zero means this is the first call; initialize.
+
+   When `getopt' returns -1, this is the index of the first of the
+   non-option elements that the caller should itself scan.
+
+   Otherwise, `optind' communicates from one call to the next
+   how much of ARGV has been scanned so far.  */
+
+/* 1003.2 says this must be 1 before any call.  */
+int optind = 1;
+
+/* Formerly, initialization of getopt depended on optind==0, which
+   causes problems with re-calling getopt as programs generally don't
+   know that. */
+
+int __getopt_initialized;
+
+/* The next char to be scanned in the option-element
+   in which the last option character we returned was found.
+   This allows us to pick up the scan where we left off.
+
+   If this is zero, or a null string, it means resume the scan
+   by advancing to the next ARGV-element.  */
+
+static char *nextchar;
+
+/* Callers store zero here to inhibit the error message
+   for unrecognized options.  */
+
+int opterr = 1;
+
+/* Set to an option character which was unrecognized.
+   This must be initialized on some systems to avoid linking in the
+   system's own getopt implementation.  */
+
+int optopt = '?';
+
+/* Describe how to deal with options that follow non-option ARGV-elements.
+
+   If the caller did not specify anything,
+   the default is REQUIRE_ORDER if the environment variable
+   POSIXLY_CORRECT is defined, PERMUTE otherwise.
+
+   REQUIRE_ORDER means don't recognize them as options;
+   stop option processing when the first non-option is seen.
+   This is what Unix does.
+   This mode of operation is selected by either setting the environment
+   variable POSIXLY_CORRECT, or using `+' as the first character
+   of the list of option characters.
+
+   PERMUTE is the default.  We permute the contents of ARGV as we scan,
+   so that eventually all the non-options are at the end.  This allows options
+   to be given in any order, even with programs that were not written to
+   expect this.
+
+   RETURN_IN_ORDER is an option available to programs that were written
+   to expect options and other ARGV-elements in any order and that care about
+   the ordering of the two.  We describe each non-option ARGV-element
+   as if it were the argument of an option with character code 1.
+   Using `-' as the first character of the list of option characters
+   selects this mode of operation.
+
+   The special argument `--' forces an end of option-scanning regardless
+   of the value of `ordering'.  In the case of RETURN_IN_ORDER, only
+   `--' can cause `getopt' to return -1 with `optind' != ARGC.  */
+
+static enum
+{
+  REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER
+} ordering;
+
+/* Value of POSIXLY_CORRECT environment variable.  */
+static char *posixly_correct;
+
+#ifdef	__GNU_LIBRARY__
+/* We want to avoid inclusion of string.h with non-GNU libraries
+   because there are many ways it can cause trouble.
+   On some systems, it contains special magic macros that don't work
+   in GCC.  */
+# include <string.h>
+# define my_index	strchr
+#else
+
+# if HAVE_STRING_H
+#  include <string.h>
+# else
+#  include <strings.h>
+# endif
+
+/* Avoid depending on library functions or files
+   whose names are inconsistent.  */
+
+#ifndef getenv
+extern char *getenv ();
+#endif
+
+static char *
+my_index (str, chr)
+     const char *str;
+     int chr;
+{
+  while (*str)
+    {
+      if (*str == chr)
+	return (char *) str;
+      str++;
+    }
+  return 0;
+}
+
+/* If using GCC, we can safely declare strlen this way.
+   If not using GCC, it is ok not to declare it.  */
+#ifdef __GNUC__
+/* Note that Motorola Delta 68k R3V7 comes with GCC but not stddef.h.
+   That was relevant to code that was here before.  */
+# if (!defined __STDC__ || !__STDC__) && !defined strlen
+/* gcc with -traditional declares the built-in strlen to return int,
+   and has done so at least since version 2.4.5. -- rms.  */
+extern int strlen (const char *);
+# endif /* not __STDC__ */
+#endif /* __GNUC__ */
+
+#endif /* not __GNU_LIBRARY__ */
+
+/* Handle permutation of arguments.  */
+
+/* Describe the part of ARGV that contains non-options that have
+   been skipped.  `first_nonopt' is the index in ARGV of the first of them;
+   `last_nonopt' is the index after the last of them.  */
+
+static int first_nonopt;
+static int last_nonopt;
+
+#ifdef _LIBC
+/* Stored original parameters.
+   XXX This is no good solution.  We should rather copy the args so
+   that we can compare them later.  But we must not use malloc(3).  */
+extern int __libc_argc;
+extern char **__libc_argv;
+
+/* Bash 2.0 gives us an environment variable containing flags
+   indicating ARGV elements that should not be considered arguments.  */
+
+# ifdef USE_NONOPTION_FLAGS
+/* Defined in getopt_init.c  */
+extern char *__getopt_nonoption_flags;
+
+static int nonoption_flags_max_len;
+static int nonoption_flags_len;
+# endif
+
+# ifdef USE_NONOPTION_FLAGS
+#  define SWAP_FLAGS(ch1, ch2) \
+  if (nonoption_flags_len > 0)						      \
+    {									      \
+      char __tmp = __getopt_nonoption_flags[ch1];			      \
+      __getopt_nonoption_flags[ch1] = __getopt_nonoption_flags[ch2];	      \
+      __getopt_nonoption_flags[ch2] = __tmp;				      \
+    }
+# else
+#  define SWAP_FLAGS(ch1, ch2)
+# endif
+#else	/* !_LIBC */
+# define SWAP_FLAGS(ch1, ch2)
+#endif	/* _LIBC */
+
+/* Exchange two adjacent subsequences of ARGV.
+   One subsequence is elements [first_nonopt,last_nonopt)
+   which contains all the non-options that have been skipped so far.
+   The other is elements [last_nonopt,optind), which contains all
+   the options processed since those non-options were skipped.
+
+   `first_nonopt' and `last_nonopt' are relocated so that they describe
+   the new indices of the non-options in ARGV after they are moved.  */
+
+#if defined __STDC__ && __STDC__
+static void exchange (char **);
+#endif
+
+static void
+exchange (argv)
+     char **argv;
+{
+  int bottom = first_nonopt;
+  int middle = last_nonopt;
+  int top = optind;
+  char *tem;
+
+  /* Exchange the shorter segment with the far end of the longer segment.
+     That puts the shorter segment into the right place.
+     It leaves the longer segment in the right place overall,
+     but it consists of two parts that need to be swapped next.  */
+
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+  /* First make sure the handling of the `__getopt_nonoption_flags'
+     string can work normally.  Our top argument must be in the range
+     of the string.  */
+  if (nonoption_flags_len > 0 && top >= nonoption_flags_max_len)
+    {
+      /* We must extend the array.  The user plays games with us and
+	 presents new arguments.  */
+      char *new_str = malloc (top + 1);
+      if (new_str == NULL)
+	nonoption_flags_len = nonoption_flags_max_len = 0;
+      else
+	{
+	  memset (__mempcpy (new_str, __getopt_nonoption_flags,
+			     nonoption_flags_max_len),
+		  '\0', top + 1 - nonoption_flags_max_len);
+	  nonoption_flags_max_len = top + 1;
+	  __getopt_nonoption_flags = new_str;
+	}
+    }
+#endif
+
+  while (top > middle && middle > bottom)
+    {
+      if (top - middle > middle - bottom)
+	{
+	  /* Bottom segment is the short one.  */
+	  int len = middle - bottom;
+	  register int i;
+
+	  /* Swap it with the top part of the top segment.  */
+	  for (i = 0; i < len; i++)
+	    {
+	      tem = argv[bottom + i];
+	      argv[bottom + i] = argv[top - (middle - bottom) + i];
+	      argv[top - (middle - bottom) + i] = tem;
+	      SWAP_FLAGS (bottom + i, top - (middle - bottom) + i);
+	    }
+	  /* Exclude the moved bottom segment from further swapping.  */
+	  top -= len;
+	}
+      else
+	{
+	  /* Top segment is the short one.  */
+	  int len = top - middle;
+	  register int i;
+
+	  /* Swap it with the bottom part of the bottom segment.  */
+	  for (i = 0; i < len; i++)
+	    {
+	      tem = argv[bottom + i];
+	      argv[bottom + i] = argv[middle + i];
+	      argv[middle + i] = tem;
+	      SWAP_FLAGS (bottom + i, middle + i);
+	    }
+	  /* Exclude the moved top segment from further swapping.  */
+	  bottom += len;
+	}
+    }
+
+  /* Update records for the slots the non-options now occupy.  */
+
+  first_nonopt += (optind - last_nonopt);
+  last_nonopt = optind;
+}
+
+/* Initialize the internal data when the first call is made.  */
+
+#if defined __STDC__ && __STDC__
+static const char *_getopt_initialize (int, char *const *, const char *);
+#endif
+static const char *
+_getopt_initialize (argc, argv, optstring)
+     int argc;
+     char *const *argv;
+     const char *optstring;
+{
+  /* Start processing options with ARGV-element 1 (since ARGV-element 0
+     is the program name); the sequence of previously skipped
+     non-option ARGV-elements is empty.  */
+
+  first_nonopt = last_nonopt = optind;
+
+  nextchar = NULL;
+
+  posixly_correct = getenv ("POSIXLY_CORRECT");
+
+  /* Determine how to handle the ordering of options and nonoptions.  */
+
+  if (optstring[0] == '-')
+    {
+      ordering = RETURN_IN_ORDER;
+      ++optstring;
+    }
+  else if (optstring[0] == '+')
+    {
+      ordering = REQUIRE_ORDER;
+      ++optstring;
+    }
+  else if (posixly_correct != NULL)
+    ordering = REQUIRE_ORDER;
+  else
+    ordering = PERMUTE;
+
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+  if (posixly_correct == NULL
+      && argc == __libc_argc && argv == __libc_argv)
+    {
+      if (nonoption_flags_max_len == 0)
+	{
+	  if (__getopt_nonoption_flags == NULL
+	      || __getopt_nonoption_flags[0] == '\0')
+	    nonoption_flags_max_len = -1;
+	  else
+	    {
+	      const char *orig_str = __getopt_nonoption_flags;
+	      int len = nonoption_flags_max_len = strlen (orig_str);
+	      if (nonoption_flags_max_len < argc)
+		nonoption_flags_max_len = argc;
+	      __getopt_nonoption_flags =
+		(char *) malloc (nonoption_flags_max_len);
+	      if (__getopt_nonoption_flags == NULL)
+		nonoption_flags_max_len = -1;
+	      else
+		memset (__mempcpy (__getopt_nonoption_flags, orig_str, len),
+			'\0', nonoption_flags_max_len - len);
+	    }
+	}
+      nonoption_flags_len = nonoption_flags_max_len;
+    }
+  else
+    nonoption_flags_len = 0;
+#endif
+
+  return optstring;
+}
+
+/* Scan elements of ARGV (whose length is ARGC) for option characters
+   given in OPTSTRING.
+
+   If an element of ARGV starts with '-', and is not exactly "-" or "--",
+   then it is an option element.  The characters of this element
+   (aside from the initial '-') are option characters.  If `getopt'
+   is called repeatedly, it returns successively each of the option characters
+   from each of the option elements.
+
+   If `getopt' finds another option character, it returns that character,
+   updating `optind' and `nextchar' so that the next call to `getopt' can
+   resume the scan with the following option character or ARGV-element.
+
+   If there are no more option characters, `getopt' returns -1.
+   Then `optind' is the index in ARGV of the first ARGV-element
+   that is not an option.  (The ARGV-elements have been permuted
+   so that those that are not options now come last.)
+
+   OPTSTRING is a string containing the legitimate option characters.
+   If an option character is seen that is not listed in OPTSTRING,
+   return '?' after printing an error message.  If you set `opterr' to
+   zero, the error message is suppressed but we still return '?'.
+
+   If a char in OPTSTRING is followed by a colon, that means it wants an arg,
+   so the following text in the same ARGV-element, or the text of the following
+   ARGV-element, is returned in `optarg'.  Two colons mean an option that
+   wants an optional arg; if there is text in the current ARGV-element,
+   it is returned in `optarg', otherwise `optarg' is set to zero.
+
+   If OPTSTRING starts with `-' or `+', it requests different methods of
+   handling the non-option ARGV-elements.
+   See the comments about RETURN_IN_ORDER and REQUIRE_ORDER, above.
+
+   Long-named options begin with `--' instead of `-'.
+   Their names may be abbreviated as long as the abbreviation is unique
+   or is an exact match for some defined option.  If they have an
+   argument, it follows the option name in the same ARGV-element, separated
+   from the option name by a `=', or else the in next ARGV-element.
+   When `getopt' finds a long-named option, it returns 0 if that option's
+   `flag' field is nonzero, the value of the option's `val' field
+   if the `flag' field is zero.
+
+   The elements of ARGV aren't really const, because we permute them.
+   But we pretend they're const in the prototype to be compatible
+   with other systems.
+
+   LONGOPTS is a vector of `struct option' terminated by an
+   element containing a name which is zero.
+
+   LONGIND returns the index in LONGOPT of the long-named option found.
+   It is only valid when a long-named option has been found by the most
+   recent call.
+
+   If LONG_ONLY is nonzero, '-' as well as '--' can introduce
+   long-named options.  */
+
+int
+_getopt_internal (argc, argv, optstring, longopts, longind, long_only)
+     int argc;
+     char *const *argv;
+     const char *optstring;
+     const struct option *longopts;
+     int32_t *longind;
+     int long_only;
+{
+  int print_errors = opterr;
+  if (optstring[0] == ':')
+    print_errors = 0;
+
+  if (argc < 1)
+    return -1;
+
+  optarg = NULL;
+
+  if (optind == 0 || !__getopt_initialized)
+    {
+      if (optind == 0)
+	optind = 1;	/* Don't scan ARGV[0], the program name.  */
+      optstring = _getopt_initialize (argc, argv, optstring);
+      __getopt_initialized = 1;
+    }
+
+  /* Test whether ARGV[optind] points to a non-option argument.
+     Either it does not have option syntax, or there is an environment flag
+     from the shell indicating it is not an option.  The later information
+     is only used when the used in the GNU libc.  */
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+# define NONOPTION_P (argv[optind][0] != '-' || argv[optind][1] == '\0'	      \
+		      || (optind < nonoption_flags_len			      \
+			  && __getopt_nonoption_flags[optind] == '1'))
+#else
+# define NONOPTION_P (argv[optind][0] != '-' || argv[optind][1] == '\0')
+#endif
+
+  if (nextchar == NULL || *nextchar == '\0')
+    {
+      /* Advance to the next ARGV-element.  */
+
+      /* Give FIRST_NONOPT & LAST_NONOPT rational values if OPTIND has been
+	 moved back by the user (who may also have changed the arguments).  */
+      if (last_nonopt > optind)
+	last_nonopt = optind;
+      if (first_nonopt > optind)
+	first_nonopt = optind;
+
+      if (ordering == PERMUTE)
+	{
+	  /* If we have just processed some options following some non-options,
+	     exchange them so that the options come first.  */
+
+	  if (first_nonopt != last_nonopt && last_nonopt != optind)
+	    exchange ((char **) argv);
+	  else if (last_nonopt != optind)
+	    first_nonopt = optind;
+
+	  /* Skip any additional non-options
+	     and extend the range of non-options previously skipped.  */
+
+	  while (optind < argc && NONOPTION_P)
+	    optind++;
+	  last_nonopt = optind;
+	}
+
+      /* The special ARGV-element `--' means premature end of options.
+	 Skip it like a null option,
+	 then exchange with previous non-options as if it were an option,
+	 then skip everything else like a non-option.  */
+
+      if (optind != argc && !strcmp (argv[optind], "--"))
+	{
+	  optind++;
+
+	  if (first_nonopt != last_nonopt && last_nonopt != optind)
+	    exchange ((char **) argv);
+	  else if (first_nonopt == last_nonopt)
+	    first_nonopt = optind;
+	  last_nonopt = argc;
+
+	  optind = argc;
+	}
+
+      /* If we have done all the ARGV-elements, stop the scan
+	 and back over any non-options that we skipped and permuted.  */
+
+      if (optind == argc)
+	{
+	  /* Set the next-arg-index to point at the non-options
+	     that we previously skipped, so the caller will digest them.  */
+	  if (first_nonopt != last_nonopt)
+	    optind = first_nonopt;
+	  return -1;
+	}
+
+      /* If we have come to a non-option and did not permute it,
+	 either stop the scan or describe it to the caller and pass it by.  */
+
+      if (NONOPTION_P)
+	{
+	  if (ordering == REQUIRE_ORDER)
+	    return -1;
+	  optarg = argv[optind++];
+	  return 1;
+	}
+
+      /* We have found another option-ARGV-element.
+	 Skip the initial punctuation.  */
+
+      nextchar = (argv[optind] + 1
+		  + (longopts != NULL && argv[optind][1] == '-'));
+    }
+
+  /* Decode the current option-ARGV-element.  */
+
+  /* Check whether the ARGV-element is a long option.
+
+     If long_only and the ARGV-element has the form "-f", where f is
+     a valid short option, don't consider it an abbreviated form of
+     a long option that starts with f.  Otherwise there would be no
+     way to give the -f short option.
+
+     On the other hand, if there's a long option "fubar" and
+     the ARGV-element is "-fu", do consider that an abbreviation of
+     the long option, just like "--fu", and not "-f" with arg "u".
+
+     This distinction seems to be the most useful approach.  */
+
+  if (longopts != NULL
+      && (argv[optind][1] == '-'
+	  || (long_only && (argv[optind][2] || !my_index (optstring, argv[optind][1])))))
+    {
+      char *nameend;
+      const struct option *p;
+      const struct option *pfound = NULL;
+      int exact = 0;
+      int ambig = 0;
+      int indfound = -1;
+      int option_index;
+
+      for (nameend = nextchar; *nameend && *nameend != '='; nameend++)
+	/* Do nothing.  */ ;
+
+      /* Test all long options for either exact match
+	 or abbreviated matches.  */
+      for (p = longopts, option_index = 0; p->name; p++, option_index++)
+	if (!strncmp (p->name, nextchar, nameend - nextchar))
+	  {
+	    if ((unsigned int) (nameend - nextchar)
+		== (unsigned int) strlen (p->name))
+	      {
+		/* Exact match found.  */
+		pfound = p;
+		indfound = option_index;
+		exact = 1;
+		break;
+	      }
+	    else if (pfound == NULL)
+	      {
+		/* First nonexact match found.  */
+		pfound = p;
+		indfound = option_index;
+	      }
+	    else if (long_only
+		     || pfound->has_arg != p->has_arg
+		     || pfound->flag != p->flag
+		     || pfound->val != p->val)
+	      /* Second or later nonexact match found.  */
+	      ambig = 1;
+	  }
+
+      if (ambig && !exact)
+	{
+	  if (print_errors)
+	    fprintf (stderr, _("%s: option `%s' is ambiguous\n"),
+		     argv[0], argv[optind]);
+	  nextchar += strlen (nextchar);
+	  optind++;
+	  optopt = 0;
+	  return '?';
+	}
+
+      if (pfound != NULL)
+	{
+	  option_index = indfound;
+	  optind++;
+	  if (*nameend)
+	    {
+	      /* Don't test has_arg with >, because some C compilers don't
+		 allow it to be used on enums.  */
+	      if (pfound->has_arg)
+		optarg = nameend + 1;
+	      else
+		{
+		  if (print_errors)
+		    {
+		      if (argv[optind - 1][1] == '-')
+			/* --option */
+			fprintf (stderr,
+				 _("%s: option `--%s' doesn't allow an argument\n"),
+				 argv[0], pfound->name);
+		      else
+			/* +option or -option */
+			fprintf (stderr,
+				 _("%s: option `%c%s' doesn't allow an argument\n"),
+				 argv[0], argv[optind - 1][0], pfound->name);
+		    }
+
+		  nextchar += strlen (nextchar);
+
+		  optopt = pfound->val;
+		  return '?';
+		}
+	    }
+	  else if (pfound->has_arg == 1)
+	    {
+	      if (optind < argc)
+		optarg = argv[optind++];
+	      else
+		{
+		  if (print_errors)
+		    fprintf (stderr,
+			   _("%s: option `%s' requires an argument\n"),
+			   argv[0], argv[optind - 1]);
+		  nextchar += strlen (nextchar);
+		  optopt = pfound->val;
+		  return optstring[0] == ':' ? ':' : '?';
+		}
+	    }
+	  nextchar += strlen (nextchar);
+	  if (longind != NULL)
+	    *longind = option_index;
+	  if (pfound->flag)
+	    {
+	      *(pfound->flag) = pfound->val;
+	      return 0;
+	    }
+	  return pfound->val;
+	}
+
+      /* Can't find it as a long option.  If this is not getopt_long_only,
+	 or the option starts with '--' or is not a valid short
+	 option, then it's an error.
+	 Otherwise interpret it as a short option.  */
+      if (!long_only || argv[optind][1] == '-'
+	  || my_index (optstring, *nextchar) == NULL)
+	{
+	  if (print_errors)
+	    {
+	      if (argv[optind][1] == '-')
+		/* --option */
+		fprintf (stderr, _("%s: unrecognized option `--%s'\n"),
+			 argv[0], nextchar);
+	      else
+		/* +option or -option */
+		fprintf (stderr, _("%s: unrecognized option `%c%s'\n"),
+			 argv[0], argv[optind][0], nextchar);
+	    }
+	  nextchar = (char *) "";
+	  optind++;
+	  optopt = 0;
+	  return '?';
+	}
+    }
+
+  /* Look at and handle the next short option-character.  */
+
+  {
+    char c = *nextchar++;
+    char *temp = my_index (optstring, c);
+
+    /* Increment `optind' when we start to process its last character.  */
+    if (*nextchar == '\0')
+      ++optind;
+
+    if (temp == NULL || c == ':')
+      {
+	if (print_errors)
+	  {
+	    if (posixly_correct)
+	      /* 1003.2 specifies the format of this message.  */
+	      fprintf (stderr, _("%s: illegal option -- %c\n"),
+		       argv[0], c);
+	    else
+	      fprintf (stderr, _("%s: invalid option -- %c\n"),
+		       argv[0], c);
+	  }
+	optopt = c;
+	return '?';
+      }
+    /* Convenience. Treat POSIX -W foo same as long option --foo */
+    if (temp[0] == 'W' && temp[1] == ';')
+      {
+	char *nameend;
+	const struct option *p;
+	const struct option *pfound = NULL;
+	int exact = 0;
+	int ambig = 0;
+	int indfound = 0;
+	int option_index;
+
+	/* This is an option that requires an argument.  */
+	if (*nextchar != '\0')
+	  {
+	    optarg = nextchar;
+	    /* If we end this ARGV-element by taking the rest as an arg,
+	       we must advance to the next element now.  */
+	    optind++;
+	  }
+	else if (optind == argc)
+	  {
+	    if (print_errors)
+	      {
+		/* 1003.2 specifies the format of this message.  */
+		fprintf (stderr, _("%s: option requires an argument -- %c\n"),
+			 argv[0], c);
+	      }
+	    optopt = c;
+	    if (optstring[0] == ':')
+	      c = ':';
+	    else
+	      c = '?';
+	    return c;
+	  }
+	else
+	  /* We already incremented `optind' once;
+	     increment it again when taking next ARGV-elt as argument.  */
+	  optarg = argv[optind++];
+
+	/* optarg is now the argument, see if it's in the
+	   table of longopts.  */
+
+	for (nextchar = nameend = optarg; *nameend && *nameend != '='; nameend++)
+	  /* Do nothing.  */ ;
+
+	/* Test all long options for either exact match
+	   or abbreviated matches.  */
+	for (p = longopts, option_index = 0; p->name; p++, option_index++)
+	  if (!strncmp (p->name, nextchar, nameend - nextchar))
+	    {
+	      if ((unsigned int) (nameend - nextchar) == strlen (p->name))
+		{
+		  /* Exact match found.  */
+		  pfound = p;
+		  indfound = option_index;
+		  exact = 1;
+		  break;
+		}
+	      else if (pfound == NULL)
+		{
+		  /* First nonexact match found.  */
+		  pfound = p;
+		  indfound = option_index;
+		}
+	      else
+		/* Second or later nonexact match found.  */
+		ambig = 1;
+	    }
+	if (ambig && !exact)
+	  {
+	    if (print_errors)
+	      fprintf (stderr, _("%s: option `-W %s' is ambiguous\n"),
+		       argv[0], argv[optind]);
+	    nextchar += strlen (nextchar);
+	    optind++;
+	    return '?';
+	  }
+	if (pfound != NULL)
+	  {
+	    option_index = indfound;
+	    if (*nameend)
+	      {
+		/* Don't test has_arg with >, because some C compilers don't
+		   allow it to be used on enums.  */
+		if (pfound->has_arg)
+		  optarg = nameend + 1;
+		else
+		  {
+		    if (print_errors)
+		      fprintf (stderr, _("\
+%s: option `-W %s' doesn't allow an argument\n"),
+			       argv[0], pfound->name);
+
+		    nextchar += strlen (nextchar);
+		    return '?';
+		  }
+	      }
+	    else if (pfound->has_arg == 1)
+	      {
+		if (optind < argc)
+		  optarg = argv[optind++];
+		else
+		  {
+		    if (print_errors)
+		      fprintf (stderr,
+			       _("%s: option `%s' requires an argument\n"),
+			       argv[0], argv[optind - 1]);
+		    nextchar += strlen (nextchar);
+		    return optstring[0] == ':' ? ':' : '?';
+		  }
+	      }
+	    nextchar += strlen (nextchar);
+	    if (longind != NULL)
+	      *longind = option_index;
+	    if (pfound->flag)
+	      {
+		*(pfound->flag) = pfound->val;
+		return 0;
+	      }
+	    return pfound->val;
+	  }
+	  nextchar = NULL;
+	  return 'W';	/* Let the application handle it.   */
+      }
+    if (temp[1] == ':')
+      {
+	if (temp[2] == ':')
+	  {
+	    /* This is an option that accepts an argument optionally.  */
+	    if (*nextchar != '\0')
+	      {
+		optarg = nextchar;
+		optind++;
+	      }
+	    else
+	      optarg = NULL;
+	    nextchar = NULL;
+	  }
+	else
+	  {
+	    /* This is an option that requires an argument.  */
+	    if (*nextchar != '\0')
+	      {
+		optarg = nextchar;
+		/* If we end this ARGV-element by taking the rest as an arg,
+		   we must advance to the next element now.  */
+		optind++;
+	      }
+	    else if (optind == argc)
+	      {
+		if (print_errors)
+		  {
+		    /* 1003.2 specifies the format of this message.  */
+		    fprintf (stderr,
+			     _("%s: option requires an argument -- %c\n"),
+			     argv[0], c);
+		  }
+		optopt = c;
+		if (optstring[0] == ':')
+		  c = ':';
+		else
+		  c = '?';
+	      }
+	    else
+	      /* We already incremented `optind' once;
+		 increment it again when taking next ARGV-elt as argument.  */
+	      optarg = argv[optind++];
+	    nextchar = NULL;
+	  }
+      }
+    return c;
+  }
+}
+
+int
+getopt (argc, argv, optstring)
+     int argc;
+     char *const *argv;
+     const char *optstring;
+{
+  return _getopt_internal (argc, argv, optstring,
+			   (const struct option *) 0,
+			   (int32_t *) 0,
+			   0);
+}
+
+int
+getopt_long (argc, argv, options, long_options, opt_index)
+    int argc;
+    char *const *argv;
+    const char *options;
+    const struct option *long_options;
+    int32_t *opt_index;
+{
+    return _getopt_internal (argc, argv, options, long_options, opt_index, 0);
+}
+
+#endif	/* Not ELIDE_CODE.  */
+
+#ifdef TEST
+
+/* Compile with -DTEST to make an executable for use in testing
+   the above definition of `getopt'.  */
+
+int
+main (argc, argv)
+     int argc;
+     char **argv;
+{
+  int c;
+  int digit_optind = 0;
+
+  while (1)
+    {
+      int this_option_optind = optind ? optind : 1;
+
+      c = getopt (argc, argv, "abc:d:0123456789");
+      if (c == -1)
+	break;
+
+      switch (c)
+	{
+	case '0':
+	case '1':
+	case '2':
+	case '3':
+	case '4':
+	case '5':
+	case '6':
+	case '7':
+	case '8':
+	case '9':
+	  if (digit_optind != 0 && digit_optind != this_option_optind)
+	    printf ("digits occur in two different argv-elements.\n");
+	  digit_optind = this_option_optind;
+	  printf ("option %c\n", c);
+	  break;
+
+	case 'a':
+	  printf ("option a\n");
+	  break;
+
+	case 'b':
+	  printf ("option b\n");
+	  break;
+
+	case 'c':
+	  printf ("option c with value `%s'\n", optarg);
+	  break;
+
+	case '?':
+	  break;
+
+	default:
+	  printf ("?? getopt returned character code 0%o ??\n", c);
+	}
+    }
+
+  if (optind < argc)
+    {
+      printf ("non-option ARGV-elements: ");
+      while (optind < argc)
+	printf ("%s ", argv[optind++]);
+      printf ("\n");
+    }
+
+  exit (0);
+}
+
+#endif /* TEST */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/compat/getopt/getopt.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,182 @@
+/* Declarations for getopt.
+   Copyright (C) 1989-1994, 1996-1999, 2001 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#ifndef _GETOPT_H
+
+#ifndef __need_getopt
+# define _GETOPT_H 1
+#endif
+
+#include<stdint.h>
+
+/* If __GNU_LIBRARY__ is not already defined, either we are being used
+   standalone, or this is the first header included in the source file.
+   If we are being used with glibc, we need to include <features.h>, but
+   that does not exist if we are standalone.  So: if __GNU_LIBRARY__ is
+   not defined, include <ctype.h>, which will pull in <features.h> for us
+   if it's from glibc.  (Why ctype.h?  It's guaranteed to exist and it
+   doesn't flood the namespace with stuff the way some other headers do.)  */
+#if !defined __GNU_LIBRARY__
+# include <ctype.h>
+#endif
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/* For communication from `getopt' to the caller.
+   When `getopt' finds an option that takes an argument,
+   the argument value is returned here.
+   Also, when `ordering' is RETURN_IN_ORDER,
+   each non-option ARGV-element is returned here.  */
+
+extern char *optarg;
+
+/* Index in ARGV of the next element to be scanned.
+   This is used for communication to and from the caller
+   and for communication between successive calls to `getopt'.
+
+   On entry to `getopt', zero means this is the first call; initialize.
+
+   When `getopt' returns -1, this is the index of the first of the
+   non-option elements that the caller should itself scan.
+
+   Otherwise, `optind' communicates from one call to the next
+   how much of ARGV has been scanned so far.  */
+
+extern int optind;
+
+/* Callers store zero here to inhibit the error message `getopt' prints
+   for unrecognized options.  */
+
+extern int opterr;
+
+/* Set to an option character which was unrecognized.  */
+
+extern int optopt;
+
+#ifndef __need_getopt
+/* Describe the long-named options requested by the application.
+   The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector
+   of `struct option' terminated by an element containing a name which is
+   zero.
+
+   The field `has_arg' is:
+   no_argument		(or 0) if the option does not take an argument,
+   required_argument	(or 1) if the option requires an argument,
+   optional_argument 	(or 2) if the option takes an optional argument.
+
+   If the field `flag' is not NULL, it points to a variable that is set
+   to the value given in the field `val' when the option is found, but
+   left unchanged if the option is not found.
+
+   To have a long-named option do something other than set an `int' to
+   a compiled-in constant, such as set a value from `optarg', set the
+   option's `flag' field to zero and its `val' field to a nonzero
+   value (the equivalent single-letter option character, if there is
+   one).  For long options that have a zero `flag' field, `getopt'
+   returns the contents of the `val' field.  */
+
+struct option
+{
+# if (defined __STDC__ && __STDC__) || defined __cplusplus
+  const char *name;
+# else
+  char *name;
+# endif
+  /* has_arg can't be an enum because some compilers complain about
+     type mismatches in all the code that assumes it is an int.  */
+  int has_arg;
+  int32_t *flag;
+  int val;
+};
+
+/* Names for the values of the `has_arg' field of `struct option'.  */
+
+# define no_argument		0
+# define required_argument	1
+# define optional_argument	2
+#endif	/* need getopt */
+
+
+/* Get definitions and prototypes for functions to process the
+   arguments in ARGV (ARGC of them, minus the program name) for
+   options given in OPTS.
+
+   Return the option character from OPTS just read.  Return -1 when
+   there are no more options.  For unrecognized options, or options
+   missing arguments, `optopt' is set to the option letter, and '?' is
+   returned.
+
+   The OPTS string is a list of characters which are recognized option
+   letters, optionally followed by colons, specifying that that letter
+   takes an argument, to be placed in `optarg'.
+
+   If a letter in OPTS is followed by two colons, its argument is
+   optional.  This behavior is specific to the GNU `getopt'.
+
+   The argument `--' causes premature termination of argument
+   scanning, explicitly telling `getopt' that there are no more
+   options.
+
+   If OPTS begins with `--', then non-option arguments are treated as
+   arguments to the option '\0'.  This behavior is specific to the GNU
+   `getopt'.  */
+
+#if (defined __STDC__ && __STDC__) || defined __cplusplus
+# ifdef __GNU_LIBRARY__
+/* Many other libraries have conflicting prototypes for getopt, with
+   differences in the consts, in stdlib.h.  To avoid compilation
+   errors, only prototype getopt for the GNU C library.  */
+extern int getopt (int argc, char *const *argv, const char *shortopts);
+# else /* not __GNU_LIBRARY__ */
+extern int getopt ();
+# endif /* __GNU_LIBRARY__ */
+
+# ifndef __need_getopt
+extern int getopt_long (int argc, char *const *argv, const char *shortopts,
+		        const struct option *longopts, int32_t *longind);
+extern int getopt_long_only (int argc, char *const *argv,
+			     const char *shortopts,
+		             const struct option *longopts, int32_t *longind);
+
+/* Internal only.  Users should not call this directly.  */
+extern int _getopt_internal (int argc, char *const *argv,
+			     const char *shortopts,
+		             const struct option *longopts, int32_t *longind,
+			     int longonly);
+# endif
+#else /* not __STDC__ */
+extern int getopt ();
+# ifndef __need_getopt
+extern int getopt_long ();
+extern int getopt_long_only ();
+
+extern int _getopt_internal ();
+# endif
+#endif /* __STDC__ */
+
+#ifdef	__cplusplus
+}
+#endif
+
+/* Make sure we later can get all the definitions and declarations.  */
+#undef __need_getopt
+
+#endif /* getopt.h */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/compat/msvc/stdint.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,24 @@
+#pragma once
+
+#ifndef _MSC_VER
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif
+
+#include <crtdefs.h> // for intptr_t
+#if !defined(UINT64_MAX)
+#include <limits.h>
+#define UINT64_MAX _UI64_MAX
+#endif
+
+/* a minimal set of C99 types for use with MSVC (VC9) */
+
+typedef signed char int8_t;
+typedef short int int16_t;
+typedef int int32_t;
+typedef __int64 int64_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short int uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned __int64 uint64_t;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,48 @@
+# vim: syntax=cmake
+
+if(GCC)
+    add_definitions(-Wno-uninitialized)
+    if(CC_HAS_NO_STRICT_OVERFLOW)
+        # GCC 4.9.2 gives warnings we know we can ignore in this file
+        set_source_files_properties(slicetype.cpp PROPERTIES COMPILE_FLAGS -Wno-strict-overflow)
+    endif(CC_HAS_NO_STRICT_OVERFLOW)
+endif()
+if(MSVC)
+   add_definitions(/wd4701) # potentially uninitialized local variable 'foo' used
+endif()
+
+if(LINKED_8BIT)
+  list(APPEND APIFLAGS "-DLINKED_8BIT=1")
+endif(LINKED_8BIT)
+if(LINKED_10BIT)
+  list(APPEND APIFLAGS "-DLINKED_10BIT=1")
+endif(LINKED_10BIT)
+if(LINKED_12BIT)
+  list(APPEND APIFLAGS "-DLINKED_12BIT=1")
+endif(LINKED_12BIT)
+if(ENABLE_SHARED)
+  list(APPEND APIFLAGS "-DENABLE_SHARED=1")
+endif(ENABLE_SHARED)
+
+string(REPLACE ";" " " APIFLAGSTR "${APIFLAGS}")
+set_source_files_properties(api.cpp PROPERTIES COMPILE_FLAGS "${APIFLAGSTR}")
+
+add_library(encoder OBJECT ../x265.h
+    analysis.cpp analysis.h
+    search.cpp search.h
+    bitcost.cpp bitcost.h rdcost.h
+    motion.cpp motion.h
+    slicetype.cpp slicetype.h
+    frameencoder.cpp frameencoder.h
+    framefilter.cpp framefilter.h
+    level.cpp level.h
+    nal.cpp nal.h
+    sei.cpp sei.h
+    sao.cpp sao.h
+    entropy.cpp entropy.h
+    dpb.cpp dpb.h
+    ratecontrol.cpp ratecontrol.h
+    reference.cpp reference.h
+    encoder.cpp encoder.h
+    api.cpp
+    weightPrediction.cpp)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/analysis.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2168 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+*          Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "primitives.h"
+#include "threading.h"
+
+#include "analysis.h"
+#include "rdcost.h"
+#include "encoder.h"
+
+using namespace X265_NS;
+
+/* An explanation of rate distortion levels (--rd-level)
+ * 
+ * rd-level 0 generates no recon per CU (NO RDO or Quant)
+ *
+ *   sa8d selection between merge / skip / inter / intra and split
+ *   no recon pixels generated until CTU analysis is complete, requiring
+ *   intra predictions to use source pixels
+ *
+ * rd-level 1 uses RDO for merge and skip, sa8d for all else
+ *
+ *   RDO selection between merge and skip
+ *   sa8d selection between (merge/skip) / inter modes / intra and split
+ *   intra prediction uses reconstructed pixels
+ *
+ * rd-level 2 uses RDO for merge/skip and split
+ *
+ *   RDO selection between merge and skip
+ *   sa8d selection between (merge/skip) / inter modes / intra
+ *   RDO split decisions
+ *
+ * rd-level 3 uses RDO for merge/skip/best inter/intra
+ *
+ *   RDO selection between merge and skip
+ *   sa8d selection of best inter mode
+ *   sa8d decisions include chroma residual cost
+ *   RDO selection between (merge/skip) / best inter mode / intra / split
+ *
+ * rd-level 4 enables RDOQuant
+ *   chroma residual cost included in satd decisions, including subpel refine
+ *    (as a result of --subme 3 being used by preset slow)
+ *
+ * rd-level 5,6 does RDO for each inter mode
+ */
+
+Analysis::Analysis()
+{
+    m_reuseIntraDataCTU = NULL;
+    m_reuseInterDataCTU = NULL;
+    m_reuseRef = NULL;
+    m_reuseBestMergeCand = NULL;
+}
+
+bool Analysis::create(ThreadLocalData *tld)
+{
+    m_tld = tld;
+    m_bTryLossless = m_param->bCULossless && !m_param->bLossless && m_param->rdLevel >= 2;
+    m_bChromaSa8d = m_param->rdLevel >= 3;
+
+    int csp = m_param->internalCsp;
+    uint32_t cuSize = g_maxCUSize;
+
+    bool ok = true;
+    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++, cuSize >>= 1)
+    {
+        ModeDepth &md = m_modeDepth[depth];
+
+        md.cuMemPool.create(depth, csp, MAX_PRED_TYPES);
+        ok &= md.fencYuv.create(cuSize, csp);
+
+        for (int j = 0; j < MAX_PRED_TYPES; j++)
+        {
+            md.pred[j].cu.initialize(md.cuMemPool, depth, csp, j);
+            ok &= md.pred[j].predYuv.create(cuSize, csp);
+            ok &= md.pred[j].reconYuv.create(cuSize, csp);
+            md.pred[j].fencYuv = &md.fencYuv;
+        }
+    }
+
+    return ok;
+}
+
+void Analysis::destroy()
+{
+    for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+    {
+        m_modeDepth[i].cuMemPool.destroy();
+        m_modeDepth[i].fencYuv.destroy();
+
+        for (int j = 0; j < MAX_PRED_TYPES; j++)
+        {
+            m_modeDepth[i].pred[j].predYuv.destroy();
+            m_modeDepth[i].pred[j].reconYuv.destroy();
+        }
+    }
+}
+
+Mode& Analysis::compressCTU(CUData& ctu, Frame& frame, const CUGeom& cuGeom, const Entropy& initialContext)
+{
+    m_slice = ctu.m_slice;
+    m_frame = &frame;
+
+#if _DEBUG || CHECKED_BUILD
+    for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+        for (uint32_t j = 0; j < MAX_PRED_TYPES; j++)
+            m_modeDepth[i].pred[j].invalidate();
+    invalidateContexts(0);
+#endif
+
+    int qp = setLambdaFromQP(ctu, m_slice->m_pps->bUseDQP ? calculateQpforCuSize(ctu, cuGeom) : m_slice->m_sliceQp);
+    ctu.setQPSubParts((int8_t)qp, 0, 0);
+
+    m_rqt[0].cur.load(initialContext);
+    m_modeDepth[0].fencYuv.copyFromPicYuv(*m_frame->m_fencPic, ctu.m_cuAddr, 0);
+
+    uint32_t numPartition = ctu.m_numPartitions;
+    if (m_param->analysisMode)
+    {
+        if (m_slice->m_sliceType == I_SLICE)
+            m_reuseIntraDataCTU = (analysis_intra_data*)m_frame->m_analysisData.intraData;
+        else
+        {
+            int numPredDir = m_slice->isInterP() ? 1 : 2;
+            m_reuseInterDataCTU = (analysis_inter_data*)m_frame->m_analysisData.interData;
+            m_reuseRef = &m_reuseInterDataCTU->ref[ctu.m_cuAddr * X265_MAX_PRED_MODE_PER_CTU * numPredDir];
+            m_reuseBestMergeCand = &m_reuseInterDataCTU->bestMergeCand[ctu.m_cuAddr * CUGeom::MAX_GEOMS];
+        }
+    }
+
+    ProfileCUScope(ctu, totalCTUTime, totalCTUs);
+
+    uint32_t zOrder = 0;
+    if (m_slice->m_sliceType == I_SLICE)
+    {
+        compressIntraCU(ctu, cuGeom, zOrder, qp);
+        if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_frame->m_analysisData.intraData)
+        {
+            CUData* bestCU = &m_modeDepth[0].bestMode->cu;
+            memcpy(&m_reuseIntraDataCTU->depth[ctu.m_cuAddr * numPartition], bestCU->m_cuDepth, sizeof(uint8_t) * numPartition);
+            memcpy(&m_reuseIntraDataCTU->modes[ctu.m_cuAddr * numPartition], bestCU->m_lumaIntraDir, sizeof(uint8_t) * numPartition);
+            memcpy(&m_reuseIntraDataCTU->partSizes[ctu.m_cuAddr * numPartition], bestCU->m_partSize, sizeof(uint8_t) * numPartition);
+            memcpy(&m_reuseIntraDataCTU->chromaModes[ctu.m_cuAddr * numPartition], bestCU->m_chromaIntraDir, sizeof(uint8_t) * numPartition);
+        }
+    }
+    else
+    {
+        if (!m_param->rdLevel)
+        {
+            /* In RD Level 0/1, copy source pixels into the reconstructed block so
+            * they are available for intra predictions */
+            m_modeDepth[0].fencYuv.copyToPicYuv(*m_frame->m_reconPic, ctu.m_cuAddr, 0);
+
+            compressInterCU_rd0_4(ctu, cuGeom, qp);
+
+            /* generate residual for entire CTU at once and copy to reconPic */
+            encodeResidue(ctu, cuGeom);
+        }
+        else if (m_param->bDistributeModeAnalysis && m_param->rdLevel >= 2)
+            compressInterCU_dist(ctu, cuGeom, qp);
+        else if (m_param->rdLevel <= 4)
+            compressInterCU_rd0_4(ctu, cuGeom, qp);
+        else
+        {
+            compressInterCU_rd5_6(ctu, cuGeom, zOrder, qp);
+            if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_frame->m_analysisData.interData)
+            {
+                CUData* bestCU = &m_modeDepth[0].bestMode->cu;
+                memcpy(&m_reuseInterDataCTU->depth[ctu.m_cuAddr * numPartition], bestCU->m_cuDepth, sizeof(uint8_t) * numPartition);
+                memcpy(&m_reuseInterDataCTU->modes[ctu.m_cuAddr * numPartition], bestCU->m_predMode, sizeof(uint8_t) * numPartition);
+            }
+        }
+    }
+
+    return *m_modeDepth[0].bestMode;
+}
+
+void Analysis::tryLossless(const CUGeom& cuGeom)
+{
+    ModeDepth& md = m_modeDepth[cuGeom.depth];
+
+    if (!md.bestMode->distortion)
+        /* already lossless */
+        return;
+    else if (md.bestMode->cu.isIntra(0))
+    {
+        md.pred[PRED_LOSSLESS].initCosts();
+        md.pred[PRED_LOSSLESS].cu.initLosslessCU(md.bestMode->cu, cuGeom);
+        PartSize size = (PartSize)md.pred[PRED_LOSSLESS].cu.m_partSize[0];
+        uint8_t* modes = md.pred[PRED_LOSSLESS].cu.m_lumaIntraDir;
+        checkIntra(md.pred[PRED_LOSSLESS], cuGeom, size, modes, NULL);
+        checkBestMode(md.pred[PRED_LOSSLESS], cuGeom.depth);
+    }
+    else
+    {
+        md.pred[PRED_LOSSLESS].initCosts();
+        md.pred[PRED_LOSSLESS].cu.initLosslessCU(md.bestMode->cu, cuGeom);
+        md.pred[PRED_LOSSLESS].predYuv.copyFromYuv(md.bestMode->predYuv);
+        encodeResAndCalcRdInterCU(md.pred[PRED_LOSSLESS], cuGeom);
+        checkBestMode(md.pred[PRED_LOSSLESS], cuGeom.depth);
+    }
+}
+
+void Analysis::compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t& zOrder, int32_t qp)
+{
+    uint32_t depth = cuGeom.depth;
+    ModeDepth& md = m_modeDepth[depth];
+    md.bestMode = NULL;
+
+    bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
+    bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+
+    if (m_param->analysisMode == X265_ANALYSIS_LOAD)
+    {
+        uint8_t* reuseDepth  = &m_reuseIntraDataCTU->depth[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+        uint8_t* reuseModes  = &m_reuseIntraDataCTU->modes[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+        char* reusePartSizes = &m_reuseIntraDataCTU->partSizes[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+        uint8_t* reuseChromaModes = &m_reuseIntraDataCTU->chromaModes[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+
+        if (mightNotSplit && depth == reuseDepth[zOrder] && zOrder == cuGeom.absPartIdx)
+        {
+            PartSize size = (PartSize)reusePartSizes[zOrder];
+            Mode& mode = size == SIZE_2Nx2N ? md.pred[PRED_INTRA] : md.pred[PRED_INTRA_NxN];
+            mode.cu.initSubCU(parentCTU, cuGeom, qp);
+            checkIntra(mode, cuGeom, size, &reuseModes[zOrder], &reuseChromaModes[zOrder]);
+            checkBestMode(mode, depth);
+
+            if (m_bTryLossless)
+                tryLossless(cuGeom);
+
+            if (mightSplit)
+                addSplitFlagCost(*md.bestMode, cuGeom.depth);
+
+            // increment zOrder offset to point to next best depth in sharedDepth buffer
+            zOrder += g_depthInc[g_maxCUDepth - 1][reuseDepth[zOrder]];
+            mightSplit = false;
+        }
+    }
+    else if (mightNotSplit)
+    {
+        md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+        checkIntra(md.pred[PRED_INTRA], cuGeom, SIZE_2Nx2N, NULL, NULL);
+        checkBestMode(md.pred[PRED_INTRA], depth);
+
+        if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
+        {
+            md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
+            checkIntra(md.pred[PRED_INTRA_NxN], cuGeom, SIZE_NxN, NULL, NULL);
+            checkBestMode(md.pred[PRED_INTRA_NxN], depth);
+        }
+
+        if (m_bTryLossless)
+            tryLossless(cuGeom);
+
+        if (mightSplit)
+            addSplitFlagCost(*md.bestMode, cuGeom.depth);
+    }
+
+    if (mightSplit)
+    {
+        Mode* splitPred = &md.pred[PRED_SPLIT];
+        splitPred->initCosts();
+        CUData* splitCU = &splitPred->cu;
+        splitCU->initSubCU(parentCTU, cuGeom, qp);
+
+        uint32_t nextDepth = depth + 1;
+        ModeDepth& nd = m_modeDepth[nextDepth];
+        invalidateContexts(nextDepth);
+        Entropy* nextContext = &m_rqt[depth].cur;
+        int32_t nextQP = qp;
+
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+            {
+                m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
+                m_rqt[nextDepth].cur.load(*nextContext);
+
+                if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
+                    nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
+
+                compressIntraCU(parentCTU, childGeom, zOrder, nextQP);
+
+                // Save best CU and pred data for this sub CU
+                splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
+                splitPred->addSubCosts(*nd.bestMode);
+                nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
+                nextContext = &nd.bestMode->contexts;
+            }
+            else
+            {
+                /* record the depth of this non-present sub-CU */
+                splitCU->setEmptyPart(childGeom, subPartIdx);
+                zOrder += g_depthInc[g_maxCUDepth - 1][nextDepth];
+            }
+        }
+        nextContext->store(splitPred->contexts);
+        if (mightNotSplit)
+            addSplitFlagCost(*splitPred, cuGeom.depth);
+        else
+            updateModeCost(*splitPred);
+
+        checkDQPForSplitPred(*splitPred, cuGeom);
+        checkBestMode(*splitPred, depth);
+    }
+
+    /* Copy best data to encData CTU and recon */
+    md.bestMode->cu.copyToPic(depth);
+    if (md.bestMode != &md.pred[PRED_SPLIT])
+        md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, parentCTU.m_cuAddr, cuGeom.absPartIdx);
+}
+
+void Analysis::PMODE::processTasks(int workerThreadId)
+{
+#if DETAILED_CU_STATS
+    int fe = master.m_modeDepth[cuGeom.depth].pred[PRED_2Nx2N].cu.m_encData->m_frameEncoderID;
+    master.m_stats[fe].countPModeTasks++;
+    ScopedElapsedTime pmodeTime(master.m_stats[fe].pmodeTime);
+#endif
+    ProfileScopeEvent(pmode);
+    master.processPmode(*this, master.m_tld[workerThreadId].analysis);
+}
+
+/* process pmode jobs until none remain; may be called by the master thread or by
+ * a bonded peer (slave) thread via pmodeTasks() */
+void Analysis::processPmode(PMODE& pmode, Analysis& slave)
+{
+    /* acquire a mode task, else exit early */
+    int task;
+    pmode.m_lock.acquire();
+    if (pmode.m_jobTotal > pmode.m_jobAcquired)
+    {
+        task = pmode.m_jobAcquired++;
+        pmode.m_lock.release();
+    }
+    else
+    {
+        pmode.m_lock.release();
+        return;
+    }
+
+    ModeDepth& md = m_modeDepth[pmode.cuGeom.depth];
+
+    /* setup slave Analysis */
+    if (&slave != this)
+    {
+        slave.m_slice = m_slice;
+        slave.m_frame = m_frame;
+        slave.m_param = m_param;
+        slave.setLambdaFromQP(md.pred[PRED_2Nx2N].cu, m_rdCost.m_qp);
+        slave.invalidateContexts(0);
+        slave.m_rqt[pmode.cuGeom.depth].cur.load(m_rqt[pmode.cuGeom.depth].cur);
+    }
+
+    /* perform Mode task, repeat until no more work is available */
+    do
+    {
+        uint32_t refMasks[2] = { 0, 0 };
+
+        if (m_param->rdLevel <= 4)
+        {
+            switch (pmode.modes[task])
+            {
+            case PRED_INTRA:
+                slave.checkIntraInInter(md.pred[PRED_INTRA], pmode.cuGeom);
+                if (m_param->rdLevel > 2)
+                    slave.encodeIntraInInter(md.pred[PRED_INTRA], pmode.cuGeom);
+                break;
+
+            case PRED_2Nx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3];
+
+                slave.checkInter_rd0_4(md.pred[PRED_2Nx2N], pmode.cuGeom, SIZE_2Nx2N, refMasks);
+                if (m_slice->m_sliceType == B_SLICE)
+                    slave.checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], pmode.cuGeom);
+                break;
+
+            case PRED_Nx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[2]; /* left */
+                refMasks[1] = m_splitRefIdx[1] | m_splitRefIdx[3]; /* right */
+
+                slave.checkInter_rd0_4(md.pred[PRED_Nx2N], pmode.cuGeom, SIZE_Nx2N, refMasks);
+                break;
+
+            case PRED_2NxN:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1]; /* top */
+                refMasks[1] = m_splitRefIdx[2] | m_splitRefIdx[3]; /* bot */
+
+                slave.checkInter_rd0_4(md.pred[PRED_2NxN], pmode.cuGeom, SIZE_2NxN, refMasks);
+                break;
+
+            case PRED_2NxnU:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1]; /* 25% top */
+                refMasks[1] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% bot */
+
+                slave.checkInter_rd0_4(md.pred[PRED_2NxnU], pmode.cuGeom, SIZE_2NxnU, refMasks);
+                break;
+
+            case PRED_2NxnD:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% top */
+                refMasks[1] = m_splitRefIdx[2] | m_splitRefIdx[3]; /* 25% bot */
+
+                slave.checkInter_rd0_4(md.pred[PRED_2NxnD], pmode.cuGeom, SIZE_2NxnD, refMasks);
+                break;
+
+            case PRED_nLx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[2]; /* 25% left */
+                refMasks[1] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% right */
+
+                slave.checkInter_rd0_4(md.pred[PRED_nLx2N], pmode.cuGeom, SIZE_nLx2N, refMasks);
+                break;
+
+            case PRED_nRx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% left */
+                refMasks[1] = m_splitRefIdx[1] | m_splitRefIdx[3]; /* 25% right */
+
+                slave.checkInter_rd0_4(md.pred[PRED_nRx2N], pmode.cuGeom, SIZE_nRx2N, refMasks);
+                break;
+
+            default:
+                X265_CHECK(0, "invalid job ID for parallel mode analysis\n");
+                break;
+            }
+        }
+        else
+        {
+            switch (pmode.modes[task])
+            {
+            case PRED_INTRA:
+                slave.checkIntra(md.pred[PRED_INTRA], pmode.cuGeom, SIZE_2Nx2N, NULL, NULL);
+                if (pmode.cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
+                    slave.checkIntra(md.pred[PRED_INTRA_NxN], pmode.cuGeom, SIZE_NxN, NULL, NULL);
+                break;
+
+            case PRED_2Nx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3];
+
+                slave.checkInter_rd5_6(md.pred[PRED_2Nx2N], pmode.cuGeom, SIZE_2Nx2N, refMasks);
+                md.pred[PRED_BIDIR].rdCost = MAX_INT64;
+                if (m_slice->m_sliceType == B_SLICE)
+                {
+                    slave.checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], pmode.cuGeom);
+                    if (md.pred[PRED_BIDIR].sa8dCost < MAX_INT64)
+                        slave.encodeResAndCalcRdInterCU(md.pred[PRED_BIDIR], pmode.cuGeom);
+                }
+                break;
+
+            case PRED_Nx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[2]; /* left */
+                refMasks[1] = m_splitRefIdx[1] | m_splitRefIdx[3]; /* right */
+
+                slave.checkInter_rd5_6(md.pred[PRED_Nx2N], pmode.cuGeom, SIZE_Nx2N, refMasks);
+                break;
+
+            case PRED_2NxN:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1]; /* top */
+                refMasks[1] = m_splitRefIdx[2] | m_splitRefIdx[3]; /* bot */
+
+                slave.checkInter_rd5_6(md.pred[PRED_2NxN], pmode.cuGeom, SIZE_2NxN, refMasks);
+                break;
+
+            case PRED_2NxnU:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1]; /* 25% top */
+                refMasks[1] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% bot */
+
+                slave.checkInter_rd5_6(md.pred[PRED_2NxnU], pmode.cuGeom, SIZE_2NxnU, refMasks);
+                break;
+
+            case PRED_2NxnD:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% top */
+                refMasks[1] = m_splitRefIdx[2] | m_splitRefIdx[3]; /* 25% bot */
+                slave.checkInter_rd5_6(md.pred[PRED_2NxnD], pmode.cuGeom, SIZE_2NxnD, refMasks);
+                break;
+
+            case PRED_nLx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[2]; /* 25% left */
+                refMasks[1] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% right */
+
+                slave.checkInter_rd5_6(md.pred[PRED_nLx2N], pmode.cuGeom, SIZE_nLx2N, refMasks);
+                break;
+
+            case PRED_nRx2N:
+                refMasks[0] = m_splitRefIdx[0] | m_splitRefIdx[1] | m_splitRefIdx[2] | m_splitRefIdx[3]; /* 75% left */
+                refMasks[1] = m_splitRefIdx[1] | m_splitRefIdx[3]; /* 25% right */
+                slave.checkInter_rd5_6(md.pred[PRED_nRx2N], pmode.cuGeom, SIZE_nRx2N, refMasks);
+                break;
+
+            default:
+                X265_CHECK(0, "invalid job ID for parallel mode analysis\n");
+                break;
+            }
+        }
+
+        task = -1;
+        pmode.m_lock.acquire();
+        if (pmode.m_jobTotal > pmode.m_jobAcquired)
+            task = pmode.m_jobAcquired++;
+        pmode.m_lock.release();
+    }
+    while (task >= 0);
+}
+
+uint32_t Analysis::compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp)
+{
+    uint32_t depth = cuGeom.depth;
+    uint32_t cuAddr = parentCTU.m_cuAddr;
+    ModeDepth& md = m_modeDepth[depth];
+    md.bestMode = NULL;
+
+    bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
+    bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+    uint32_t minDepth = m_param->rdLevel <= 4 ? topSkipMinDepth(parentCTU, cuGeom) : 0;
+    uint32_t splitRefs[4] = { 0, 0, 0, 0 };
+
+    X265_CHECK(m_param->rdLevel >= 2, "compressInterCU_dist does not support RD 0 or 1\n");
+
+    PMODE pmode(*this, cuGeom);
+
+    if (mightNotSplit && depth >= minDepth)
+    {
+        /* Initialize all prediction CUs based on parentCTU */
+        md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+        md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+
+        if (m_param->rdLevel <= 4)
+            checkMerge2Nx2N_rd0_4(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom);
+        else
+            checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, false);
+    }
+
+    bool bNoSplit = false;
+    bool splitIntra = true;
+    if (md.bestMode)
+    {
+        bNoSplit = md.bestMode->cu.isSkipped(0);
+        if (mightSplit && depth && depth >= minDepth && !bNoSplit && m_param->rdLevel <= 4)
+            bNoSplit = recursionDepthCheck(parentCTU, cuGeom, *md.bestMode);
+    }
+
+    if (mightSplit && !bNoSplit)
+    {
+        Mode* splitPred = &md.pred[PRED_SPLIT];
+        splitPred->initCosts();
+        CUData* splitCU = &splitPred->cu;
+        splitCU->initSubCU(parentCTU, cuGeom, qp);
+
+        uint32_t nextDepth = depth + 1;
+        ModeDepth& nd = m_modeDepth[nextDepth];
+        invalidateContexts(nextDepth);
+        Entropy* nextContext = &m_rqt[depth].cur;
+        int nextQP = qp;
+        splitIntra = false;
+
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+            {
+                m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
+                m_rqt[nextDepth].cur.load(*nextContext);
+
+                if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
+                    nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
+
+                splitRefs[subPartIdx] = compressInterCU_dist(parentCTU, childGeom, nextQP);
+
+                // Save best CU and pred data for this sub CU
+                splitIntra |= nd.bestMode->cu.isIntra(0);
+                splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
+                splitPred->addSubCosts(*nd.bestMode);
+
+                nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
+                nextContext = &nd.bestMode->contexts;
+            }
+            else
+                splitCU->setEmptyPart(childGeom, subPartIdx);
+        }
+        nextContext->store(splitPred->contexts);
+
+        if (mightNotSplit)
+            addSplitFlagCost(*splitPred, cuGeom.depth);
+        else
+            updateModeCost(*splitPred);
+
+        checkDQPForSplitPred(*splitPred, cuGeom);
+    }
+
+    if (mightNotSplit && depth >= minDepth)
+    {
+        int bTryAmp = m_slice->m_sps->maxAMPDepth > depth;
+        int bTryIntra = (m_slice->m_sliceType != B_SLICE || m_param->bIntraInBFrames) && (!m_param->limitReferences || splitIntra);
+
+        if (m_slice->m_pps->bUseDQP && depth <= m_slice->m_pps->maxCuDQPDepth && m_slice->m_pps->maxCuDQPDepth != 0)
+            setLambdaFromQP(parentCTU, qp);
+
+        if (bTryIntra)
+        {
+            md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+            if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3 && m_param->rdLevel >= 5)
+                md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
+            pmode.modes[pmode.m_jobTotal++] = PRED_INTRA;
+        }
+        md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2Nx2N;
+        md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
+        if (m_param->bEnableRectInter)
+        {
+            md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxN;
+            md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_Nx2N;
+        }
+        if (bTryAmp)
+        {
+            md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnU;
+            md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnD;
+            md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_nLx2N;
+            md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_nRx2N;
+        }
+
+        m_splitRefIdx[0] = splitRefs[0]; m_splitRefIdx[1] = splitRefs[1]; m_splitRefIdx[2] = splitRefs[2]; m_splitRefIdx[3] = splitRefs[3];
+
+        pmode.tryBondPeers(*m_frame->m_encData->m_jobProvider, pmode.m_jobTotal);
+
+        /* participate in processing jobs, until all are distributed */
+        processPmode(pmode, *this);
+
+        /* the master worker thread (this one) does merge analysis. By doing
+         * merge after all the other jobs are at least started, we usually avoid
+         * blocking on another thread */
+
+        if (m_param->rdLevel <= 4)
+        {
+            {
+                ProfileCUScope(parentCTU, pmodeBlockTime, countPModeMasters);
+                pmode.waitForExit();
+            }
+
+            /* select best inter mode based on sa8d cost */
+            Mode *bestInter = &md.pred[PRED_2Nx2N];
+
+            if (m_param->bEnableRectInter)
+            {
+                if (md.pred[PRED_Nx2N].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_Nx2N];
+                if (md.pred[PRED_2NxN].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_2NxN];
+            }
+
+            if (bTryAmp)
+            {
+                if (md.pred[PRED_2NxnU].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_2NxnU];
+                if (md.pred[PRED_2NxnD].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_2NxnD];
+                if (md.pred[PRED_nLx2N].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_nLx2N];
+                if (md.pred[PRED_nRx2N].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_nRx2N];
+            }
+
+            if (m_param->rdLevel > 2)
+            {
+                /* RD selection between merge, inter, bidir and intra */
+                if (!m_bChromaSa8d) /* When m_bChromaSa8d is enabled, chroma MC has already been done */
+                {
+                    uint32_t numPU = bestInter->cu.getNumPartInter(0);
+                    for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+                    {
+                        PredictionUnit pu(bestInter->cu, cuGeom, puIdx);
+                        motionCompensation(bestInter->cu, pu, bestInter->predYuv, false, true);
+                    }
+                }
+                encodeResAndCalcRdInterCU(*bestInter, cuGeom);
+                checkBestMode(*bestInter, depth);
+
+                /* If BIDIR is available and within 17/16 of best inter option, choose by RDO */
+                if (m_slice->m_sliceType == B_SLICE && md.pred[PRED_BIDIR].sa8dCost != MAX_INT64 &&
+                    md.pred[PRED_BIDIR].sa8dCost * 16 <= bestInter->sa8dCost * 17)
+                {
+                    encodeResAndCalcRdInterCU(md.pred[PRED_BIDIR], cuGeom);
+                    checkBestMode(md.pred[PRED_BIDIR], depth);
+                }
+
+                if (bTryIntra)
+                    checkBestMode(md.pred[PRED_INTRA], depth);
+            }
+            else /* m_param->rdLevel == 2 */
+            {
+                if (!md.bestMode || bestInter->sa8dCost < md.bestMode->sa8dCost)
+                    md.bestMode = bestInter;
+
+                if (m_slice->m_sliceType == B_SLICE && md.pred[PRED_BIDIR].sa8dCost < md.bestMode->sa8dCost)
+                    md.bestMode = &md.pred[PRED_BIDIR];
+
+                if (bTryIntra && md.pred[PRED_INTRA].sa8dCost < md.bestMode->sa8dCost)
+                {
+                    md.bestMode = &md.pred[PRED_INTRA];
+                    encodeIntraInInter(*md.bestMode, cuGeom);
+                }
+                else if (!md.bestMode->cu.m_mergeFlag[0])
+                {
+                    /* finally code the best mode selected from SA8D costs */
+                    uint32_t numPU = md.bestMode->cu.getNumPartInter(0);
+                    for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+                    {
+                        PredictionUnit pu(md.bestMode->cu, cuGeom, puIdx);
+                        motionCompensation(md.bestMode->cu, pu, md.bestMode->predYuv, false, true);
+                    }
+                    encodeResAndCalcRdInterCU(*md.bestMode, cuGeom);
+                }
+            }
+        }
+        else
+        {
+            {
+                ProfileCUScope(parentCTU, pmodeBlockTime, countPModeMasters);
+                pmode.waitForExit();
+            }
+
+            checkBestMode(md.pred[PRED_2Nx2N], depth);
+            if (m_slice->m_sliceType == B_SLICE && md.pred[PRED_BIDIR].sa8dCost < MAX_INT64)
+                checkBestMode(md.pred[PRED_BIDIR], depth);
+
+            if (m_param->bEnableRectInter)
+            {
+                checkBestMode(md.pred[PRED_Nx2N], depth);
+                checkBestMode(md.pred[PRED_2NxN], depth);
+            }
+
+            if (bTryAmp)
+            {
+                checkBestMode(md.pred[PRED_2NxnU], depth);
+                checkBestMode(md.pred[PRED_2NxnD], depth);
+                checkBestMode(md.pred[PRED_nLx2N], depth);
+                checkBestMode(md.pred[PRED_nRx2N], depth);
+            }
+
+            if (bTryIntra)
+            {
+                checkBestMode(md.pred[PRED_INTRA], depth);
+                if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
+                    checkBestMode(md.pred[PRED_INTRA_NxN], depth);
+            }
+        }
+
+        if (m_bTryLossless)
+            tryLossless(cuGeom);
+
+        if (mightSplit)
+            addSplitFlagCost(*md.bestMode, cuGeom.depth);
+    }
+
+    /* compare split RD cost against best cost */
+    if (mightSplit && !bNoSplit)
+        checkBestMode(md.pred[PRED_SPLIT], depth);
+
+    /* determine which motion references the parent CU should search */
+    uint32_t refMask;
+    if (!(m_param->limitReferences & X265_REF_LIMIT_DEPTH))
+        refMask = 0;
+    else if (md.bestMode == &md.pred[PRED_SPLIT])
+        refMask = splitRefs[0] | splitRefs[1] | splitRefs[2] | splitRefs[3];
+    else
+    {
+        /* use best merge/inter mode, in case of intra use 2Nx2N inter references */
+        CUData& cu = md.bestMode->cu.isIntra(0) ? md.pred[PRED_2Nx2N].cu : md.bestMode->cu;
+        uint32_t numPU = cu.getNumPartInter(0);
+        refMask = 0;
+        for (uint32_t puIdx = 0, subPartIdx = 0; puIdx < numPU; puIdx++, subPartIdx += cu.getPUOffset(puIdx, 0))
+            refMask |= cu.getBestRefIdx(subPartIdx);
+    }
+
+    if (mightNotSplit)
+    {
+        /* early-out statistics */
+        FrameData& curEncData = *m_frame->m_encData;
+        FrameData::RCStatCU& cuStat = curEncData.m_cuStat[parentCTU.m_cuAddr];
+        uint64_t temp = cuStat.avgCost[depth] * cuStat.count[depth];
+        cuStat.count[depth] += 1;
+        cuStat.avgCost[depth] = (temp + md.bestMode->rdCost) / cuStat.count[depth];
+    }
+
+    /* Copy best data to encData CTU and recon */
+    X265_CHECK(md.bestMode->ok(), "best mode is not ok");
+    md.bestMode->cu.copyToPic(depth);
+    md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, cuAddr, cuGeom.absPartIdx);
+
+    return refMask;
+}
+
+uint32_t Analysis::compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp)
+{
+    uint32_t depth = cuGeom.depth;
+    uint32_t cuAddr = parentCTU.m_cuAddr;
+    ModeDepth& md = m_modeDepth[depth];
+    md.bestMode = NULL;
+
+    PicYuv& reconPic = *m_frame->m_reconPic;
+
+    bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
+    bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+    uint32_t minDepth = topSkipMinDepth(parentCTU, cuGeom);
+    bool earlyskip = false;
+    bool splitIntra = true;
+    uint32_t splitRefs[4] = { 0, 0, 0, 0 };
+    /* Step 1. Evaluate Merge/Skip candidates for likely early-outs */
+    if (mightNotSplit && depth >= minDepth)
+    {
+        /* Compute Merge Cost */
+        md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+        md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+        checkMerge2Nx2N_rd0_4(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom);
+        if (m_param->rdLevel)
+            earlyskip = m_param->bEnableEarlySkip && md.bestMode && md.bestMode->cu.isSkipped(0); // TODO: sa8d threshold per depth
+    }
+
+    bool bNoSplit = false;
+    if (md.bestMode)
+    {
+        bNoSplit = md.bestMode->cu.isSkipped(0);
+        if (mightSplit && depth && depth >= minDepth && !bNoSplit)
+            bNoSplit = recursionDepthCheck(parentCTU, cuGeom, *md.bestMode);
+    }
+
+    /* Step 2. Evaluate each of the 4 split sub-blocks in series */
+    if (mightSplit && !bNoSplit)
+    {
+        Mode* splitPred = &md.pred[PRED_SPLIT];
+        splitPred->initCosts();
+        CUData* splitCU = &splitPred->cu;
+        splitCU->initSubCU(parentCTU, cuGeom, qp);
+
+        uint32_t nextDepth = depth + 1;
+        ModeDepth& nd = m_modeDepth[nextDepth];
+        invalidateContexts(nextDepth);
+        Entropy* nextContext = &m_rqt[depth].cur;
+        int nextQP = qp;
+        splitIntra = false;
+
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+            {
+                m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
+                m_rqt[nextDepth].cur.load(*nextContext);
+
+                if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
+                    nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
+
+                splitRefs[subPartIdx] = compressInterCU_rd0_4(parentCTU, childGeom, nextQP);
+
+                // Save best CU and pred data for this sub CU
+                splitIntra |= nd.bestMode->cu.isIntra(0);
+                splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
+                splitPred->addSubCosts(*nd.bestMode);
+
+                if (m_param->rdLevel)
+                    nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
+                else
+                    nd.bestMode->predYuv.copyToPartYuv(splitPred->predYuv, childGeom.numPartitions * subPartIdx);
+                if (m_param->rdLevel > 1)
+                    nextContext = &nd.bestMode->contexts;
+            }
+            else
+                splitCU->setEmptyPart(childGeom, subPartIdx);
+        }
+        nextContext->store(splitPred->contexts);
+
+        if (mightNotSplit)
+            addSplitFlagCost(*splitPred, cuGeom.depth);
+        else if (m_param->rdLevel > 1)
+            updateModeCost(*splitPred);
+        else
+            splitPred->sa8dCost = m_rdCost.calcRdSADCost((uint32_t)splitPred->distortion, splitPred->sa8dBits);
+    }
+
+    /* Split CUs
+     *   0  1
+     *   2  3 */
+    uint32_t allSplitRefs = splitRefs[0] | splitRefs[1] | splitRefs[2] | splitRefs[3];
+    /* Step 3. Evaluate ME (2Nx2N, rect, amp) and intra modes at current depth */
+    if (mightNotSplit && depth >= minDepth)
+    {
+        if (m_slice->m_pps->bUseDQP && depth <= m_slice->m_pps->maxCuDQPDepth && m_slice->m_pps->maxCuDQPDepth != 0)
+            setLambdaFromQP(parentCTU, qp);
+
+        if (!earlyskip)
+        {
+            uint32_t refMasks[2];
+            refMasks[0] = allSplitRefs;
+            md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+            checkInter_rd0_4(md.pred[PRED_2Nx2N], cuGeom, SIZE_2Nx2N, refMasks);
+
+            if (m_param->limitReferences & X265_REF_LIMIT_CU)
+            {
+                CUData& cu = md.pred[PRED_2Nx2N].cu;
+                uint32_t refMask = cu.getBestRefIdx(0);
+                allSplitRefs = splitRefs[0] = splitRefs[1] = splitRefs[2] = splitRefs[3] = refMask;
+            }
+
+            if (m_slice->m_sliceType == B_SLICE)
+            {
+                md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], cuGeom);
+            }
+
+            Mode *bestInter = &md.pred[PRED_2Nx2N];
+            if (m_param->bEnableRectInter)
+            {
+                refMasks[0] = splitRefs[0] | splitRefs[2]; /* left */
+                refMasks[1] = splitRefs[1] | splitRefs[3]; /* right */
+                md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkInter_rd0_4(md.pred[PRED_Nx2N], cuGeom, SIZE_Nx2N, refMasks);
+                if (md.pred[PRED_Nx2N].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_Nx2N];
+
+                refMasks[0] = splitRefs[0] | splitRefs[1]; /* top */
+                refMasks[1] = splitRefs[2] | splitRefs[3]; /* bot */
+                md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkInter_rd0_4(md.pred[PRED_2NxN], cuGeom, SIZE_2NxN, refMasks);
+                if (md.pred[PRED_2NxN].sa8dCost < bestInter->sa8dCost)
+                    bestInter = &md.pred[PRED_2NxN];
+            }
+
+            if (m_slice->m_sps->maxAMPDepth > depth)
+            {
+                bool bHor = false, bVer = false;
+                if (bestInter->cu.m_partSize[0] == SIZE_2NxN)
+                    bHor = true;
+                else if (bestInter->cu.m_partSize[0] == SIZE_Nx2N)
+                    bVer = true;
+                else if (bestInter->cu.m_partSize[0] == SIZE_2Nx2N &&
+                         md.bestMode && md.bestMode->cu.getQtRootCbf(0))
+                {
+                    bHor = true;
+                    bVer = true;
+                }
+
+                if (bHor)
+                {
+                    refMasks[0] = splitRefs[0] | splitRefs[1]; /* 25% top */
+                    refMasks[1] = allSplitRefs;                /* 75% bot */
+                    md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd0_4(md.pred[PRED_2NxnU], cuGeom, SIZE_2NxnU, refMasks);
+                    if (md.pred[PRED_2NxnU].sa8dCost < bestInter->sa8dCost)
+                        bestInter = &md.pred[PRED_2NxnU];
+
+                    refMasks[0] = allSplitRefs;                /* 75% top */
+                    refMasks[1] = splitRefs[2] | splitRefs[3]; /* 25% bot */
+                    md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd0_4(md.pred[PRED_2NxnD], cuGeom, SIZE_2NxnD, refMasks);
+                    if (md.pred[PRED_2NxnD].sa8dCost < bestInter->sa8dCost)
+                        bestInter = &md.pred[PRED_2NxnD];
+                }
+                if (bVer)
+                {
+                    refMasks[0] = splitRefs[0] | splitRefs[2]; /* 25% left */
+                    refMasks[1] = allSplitRefs;                /* 75% right */
+                    md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd0_4(md.pred[PRED_nLx2N], cuGeom, SIZE_nLx2N, refMasks);
+                    if (md.pred[PRED_nLx2N].sa8dCost < bestInter->sa8dCost)
+                        bestInter = &md.pred[PRED_nLx2N];
+
+                    refMasks[0] = allSplitRefs;                /* 75% left */
+                    refMasks[1] = splitRefs[1] | splitRefs[3]; /* 25% right */
+                    md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd0_4(md.pred[PRED_nRx2N], cuGeom, SIZE_nRx2N, refMasks);
+                    if (md.pred[PRED_nRx2N].sa8dCost < bestInter->sa8dCost)
+                        bestInter = &md.pred[PRED_nRx2N];
+                }
+            }
+            bool bTryIntra = m_slice->m_sliceType != B_SLICE || m_param->bIntraInBFrames;
+            if (m_param->rdLevel >= 3)
+            {
+                /* Calculate RD cost of best inter option */
+                if (!m_bChromaSa8d) /* When m_bChromaSa8d is enabled, chroma MC has already been done */
+                {
+                    uint32_t numPU = bestInter->cu.getNumPartInter(0);
+                    for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+                    {
+                        PredictionUnit pu(bestInter->cu, cuGeom, puIdx);
+                        motionCompensation(bestInter->cu, pu, bestInter->predYuv, false, true);
+                    }
+                }
+                encodeResAndCalcRdInterCU(*bestInter, cuGeom);
+                checkBestMode(*bestInter, depth);
+
+                /* If BIDIR is available and within 17/16 of best inter option, choose by RDO */
+                if (m_slice->m_sliceType == B_SLICE && md.pred[PRED_BIDIR].sa8dCost != MAX_INT64 &&
+                    md.pred[PRED_BIDIR].sa8dCost * 16 <= bestInter->sa8dCost * 17)
+                {
+                    encodeResAndCalcRdInterCU(md.pred[PRED_BIDIR], cuGeom);
+                    checkBestMode(md.pred[PRED_BIDIR], depth);
+                }
+
+                if ((bTryIntra && md.bestMode->cu.getQtRootCbf(0)) ||
+                    md.bestMode->sa8dCost == MAX_INT64)
+                {
+                    if (!m_param->limitReferences || splitIntra)
+                    {
+                        ProfileCounter(parentCTU, totalIntraCU[cuGeom.depth]);
+                        md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+                        checkIntraInInter(md.pred[PRED_INTRA], cuGeom);
+                        encodeIntraInInter(md.pred[PRED_INTRA], cuGeom);
+                        checkBestMode(md.pred[PRED_INTRA], depth);
+                    }
+                    else
+                    {
+                        ProfileCounter(parentCTU, skippedIntraCU[cuGeom.depth]);
+                    }
+                }
+            }
+            else
+            {
+                /* SA8D choice between merge/skip, inter, bidir, and intra */
+                if (!md.bestMode || bestInter->sa8dCost < md.bestMode->sa8dCost)
+                    md.bestMode = bestInter;
+
+                if (m_slice->m_sliceType == B_SLICE &&
+                    md.pred[PRED_BIDIR].sa8dCost < md.bestMode->sa8dCost)
+                    md.bestMode = &md.pred[PRED_BIDIR];
+
+                if (bTryIntra || md.bestMode->sa8dCost == MAX_INT64)
+                {
+                    if (!m_param->limitReferences || splitIntra)
+                    {
+                        ProfileCounter(parentCTU, totalIntraCU[cuGeom.depth]);
+                        md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+                        checkIntraInInter(md.pred[PRED_INTRA], cuGeom);
+                        if (md.pred[PRED_INTRA].sa8dCost < md.bestMode->sa8dCost)
+                            md.bestMode = &md.pred[PRED_INTRA];
+                    }
+                    else
+                    {
+                        ProfileCounter(parentCTU, skippedIntraCU[cuGeom.depth]);
+                    }
+                }
+
+                /* finally code the best mode selected by SA8D costs:
+                 * RD level 2 - fully encode the best mode
+                 * RD level 1 - generate recon pixels
+                 * RD level 0 - generate chroma prediction */
+                if (md.bestMode->cu.m_mergeFlag[0] && md.bestMode->cu.m_partSize[0] == SIZE_2Nx2N)
+                {
+                    /* prediction already generated for this CU, and if rd level
+                     * is not 0, it is already fully encoded */
+                }
+                else if (md.bestMode->cu.isInter(0))
+                {
+                    uint32_t numPU = md.bestMode->cu.getNumPartInter(0);
+                    for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+                    {
+                        PredictionUnit pu(md.bestMode->cu, cuGeom, puIdx);
+                        motionCompensation(md.bestMode->cu, pu, md.bestMode->predYuv, false, true);
+                    }
+                    if (m_param->rdLevel == 2)
+                        encodeResAndCalcRdInterCU(*md.bestMode, cuGeom);
+                    else if (m_param->rdLevel == 1)
+                    {
+                        /* generate recon pixels with no rate distortion considerations */
+                        CUData& cu = md.bestMode->cu;
+
+                        uint32_t tuDepthRange[2];
+                        cu.getInterTUQtDepthRange(tuDepthRange, 0);
+
+                        m_rqt[cuGeom.depth].tmpResiYuv.subtract(*md.bestMode->fencYuv, md.bestMode->predYuv, cuGeom.log2CUSize);
+                        residualTransformQuantInter(*md.bestMode, cuGeom, 0, 0, tuDepthRange);
+                        if (cu.getQtRootCbf(0))
+                            md.bestMode->reconYuv.addClip(md.bestMode->predYuv, m_rqt[cuGeom.depth].tmpResiYuv, cu.m_log2CUSize[0]);
+                        else
+                        {
+                            md.bestMode->reconYuv.copyFromYuv(md.bestMode->predYuv);
+                            if (cu.m_mergeFlag[0] && cu.m_partSize[0] == SIZE_2Nx2N)
+                                cu.setPredModeSubParts(MODE_SKIP);
+                        }
+                    }
+                }
+                else
+                {
+                    if (m_param->rdLevel == 2)
+                        encodeIntraInInter(*md.bestMode, cuGeom);
+                    else if (m_param->rdLevel == 1)
+                    {
+                        /* generate recon pixels with no rate distortion considerations */
+                        CUData& cu = md.bestMode->cu;
+
+                        uint32_t tuDepthRange[2];
+                        cu.getIntraTUQtDepthRange(tuDepthRange, 0);
+
+                        residualTransformQuantIntra(*md.bestMode, cuGeom, 0, 0, tuDepthRange);
+                        getBestIntraModeChroma(*md.bestMode, cuGeom);
+                        residualQTIntraChroma(*md.bestMode, cuGeom, 0, 0);
+                        md.bestMode->reconYuv.copyFromPicYuv(reconPic, cu.m_cuAddr, cuGeom.absPartIdx); // TODO:
+                    }
+                }
+            }
+        } // !earlyskip
+
+        if (m_bTryLossless)
+            tryLossless(cuGeom);
+
+        if (mightSplit)
+            addSplitFlagCost(*md.bestMode, cuGeom.depth);
+    }
+
+    if (mightSplit && !bNoSplit)
+    {
+        Mode* splitPred = &md.pred[PRED_SPLIT];
+        if (!md.bestMode)
+            md.bestMode = splitPred;
+        else if (m_param->rdLevel > 1)
+            checkBestMode(*splitPred, cuGeom.depth);
+        else if (splitPred->sa8dCost < md.bestMode->sa8dCost)
+            md.bestMode = splitPred;
+
+        checkDQPForSplitPred(*md.bestMode, cuGeom);
+    }
+
+    /* determine which motion references the parent CU should search */
+    uint32_t refMask;
+    if (!(m_param->limitReferences & X265_REF_LIMIT_DEPTH))
+        refMask = 0;
+    else if (md.bestMode == &md.pred[PRED_SPLIT])
+        refMask = allSplitRefs;
+    else
+    {
+        /* use best merge/inter mode, in case of intra use 2Nx2N inter references */
+        CUData& cu = md.bestMode->cu.isIntra(0) ? md.pred[PRED_2Nx2N].cu : md.bestMode->cu;
+        uint32_t numPU = cu.getNumPartInter(0);
+        refMask = 0;
+        for (uint32_t puIdx = 0, subPartIdx = 0; puIdx < numPU; puIdx++, subPartIdx += cu.getPUOffset(puIdx, 0))
+            refMask |= cu.getBestRefIdx(subPartIdx);
+    }
+    
+    if (mightNotSplit)
+    {
+        /* early-out statistics */
+        FrameData& curEncData = *m_frame->m_encData;
+        FrameData::RCStatCU& cuStat = curEncData.m_cuStat[parentCTU.m_cuAddr];
+        uint64_t temp = cuStat.avgCost[depth] * cuStat.count[depth];
+        cuStat.count[depth] += 1;
+        cuStat.avgCost[depth] = (temp + md.bestMode->rdCost) / cuStat.count[depth];
+    }
+
+    /* Copy best data to encData CTU and recon */
+    X265_CHECK(md.bestMode->ok(), "best mode is not ok");
+    md.bestMode->cu.copyToPic(depth);
+    if (m_param->rdLevel)
+        md.bestMode->reconYuv.copyToPicYuv(reconPic, cuAddr, cuGeom.absPartIdx);
+
+    return refMask;
+}
+
+uint32_t Analysis::compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qp)
+{
+    uint32_t depth = cuGeom.depth;
+    ModeDepth& md = m_modeDepth[depth];
+    md.bestMode = NULL;
+
+    bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
+    bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+
+    if (m_param->analysisMode == X265_ANALYSIS_LOAD)
+    {
+        uint8_t* reuseDepth  = &m_reuseInterDataCTU->depth[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+        uint8_t* reuseModes  = &m_reuseInterDataCTU->modes[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
+        if (mightNotSplit && depth == reuseDepth[zOrder] && zOrder == cuGeom.absPartIdx && reuseModes[zOrder] == MODE_SKIP)
+        {
+            md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+            md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+            checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, true);
+
+            if (m_bTryLossless)
+                tryLossless(cuGeom);
+
+            if (mightSplit)
+                addSplitFlagCost(*md.bestMode, cuGeom.depth);
+
+            // increment zOrder offset to point to next best depth in sharedDepth buffer
+            zOrder += g_depthInc[g_maxCUDepth - 1][reuseDepth[zOrder]];
+
+            mightSplit = false;
+            mightNotSplit = false;
+        }
+    }
+
+    bool foundSkip = false;
+    bool splitIntra = true;
+    uint32_t splitRefs[4] = { 0, 0, 0, 0 };
+    /* Step 1. Evaluate Merge/Skip candidates for likely early-outs */
+    if (mightNotSplit)
+    {
+        md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+        md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+        checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, false);
+        foundSkip = md.bestMode && !md.bestMode->cu.getQtRootCbf(0);
+    }
+
+    // estimate split cost
+    /* Step 2. Evaluate each of the 4 split sub-blocks in series */
+    if (mightSplit && !foundSkip)
+    {
+        Mode* splitPred = &md.pred[PRED_SPLIT];
+        splitPred->initCosts();
+        CUData* splitCU = &splitPred->cu;
+        splitCU->initSubCU(parentCTU, cuGeom, qp);
+
+        uint32_t nextDepth = depth + 1;
+        ModeDepth& nd = m_modeDepth[nextDepth];
+        invalidateContexts(nextDepth);
+        Entropy* nextContext = &m_rqt[depth].cur;
+        int nextQP = qp;
+        splitIntra = false;
+
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+            {
+                m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
+                m_rqt[nextDepth].cur.load(*nextContext);
+
+                if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
+                    nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
+
+                splitRefs[subPartIdx] = compressInterCU_rd5_6(parentCTU, childGeom, zOrder, nextQP);
+
+                // Save best CU and pred data for this sub CU
+                splitIntra |= nd.bestMode->cu.isIntra(0);
+                splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
+                splitPred->addSubCosts(*nd.bestMode);
+                nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
+                nextContext = &nd.bestMode->contexts;
+            }
+            else
+            {
+                splitCU->setEmptyPart(childGeom, subPartIdx);
+                zOrder += g_depthInc[g_maxCUDepth - 1][nextDepth];
+            }
+        }
+        nextContext->store(splitPred->contexts);
+        if (mightNotSplit)
+            addSplitFlagCost(*splitPred, cuGeom.depth);
+        else
+            updateModeCost(*splitPred);
+
+        checkDQPForSplitPred(*splitPred, cuGeom);
+    }
+
+    /* Split CUs
+     *   0  1
+     *   2  3 */
+    uint32_t allSplitRefs = splitRefs[0] | splitRefs[1] | splitRefs[2] | splitRefs[3];
+    /* Step 3. Evaluate ME (2Nx2N, rect, amp) and intra modes at current depth */
+    if (mightNotSplit)
+    {
+        if (m_slice->m_pps->bUseDQP && depth <= m_slice->m_pps->maxCuDQPDepth && m_slice->m_pps->maxCuDQPDepth != 0)
+            setLambdaFromQP(parentCTU, qp);
+
+        if (!(foundSkip && m_param->bEnableEarlySkip))
+        {
+            uint32_t refMasks[2];
+            refMasks[0] = allSplitRefs;
+            md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+            checkInter_rd5_6(md.pred[PRED_2Nx2N], cuGeom, SIZE_2Nx2N, refMasks);
+            checkBestMode(md.pred[PRED_2Nx2N], cuGeom.depth);
+
+            if (m_param->limitReferences & X265_REF_LIMIT_CU)
+            {
+                CUData& cu = md.pred[PRED_2Nx2N].cu;
+                uint32_t refMask = cu.getBestRefIdx(0);
+                allSplitRefs = splitRefs[0] = splitRefs[1] = splitRefs[2] = splitRefs[3] = refMask;
+            }
+
+            if (m_slice->m_sliceType == B_SLICE)
+            {
+                md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], cuGeom);
+                if (md.pred[PRED_BIDIR].sa8dCost < MAX_INT64)
+                {
+                    encodeResAndCalcRdInterCU(md.pred[PRED_BIDIR], cuGeom);
+                    checkBestMode(md.pred[PRED_BIDIR], cuGeom.depth);
+                }
+            }
+
+            if (m_param->bEnableRectInter)
+            {
+                refMasks[0] = splitRefs[0] | splitRefs[2]; /* left */
+                refMasks[1] = splitRefs[1] | splitRefs[3]; /* right */
+                md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkInter_rd5_6(md.pred[PRED_Nx2N], cuGeom, SIZE_Nx2N, refMasks);
+                checkBestMode(md.pred[PRED_Nx2N], cuGeom.depth);
+
+                refMasks[0] = splitRefs[0] | splitRefs[1]; /* top */
+                refMasks[1] = splitRefs[2] | splitRefs[3]; /* bot */
+                md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp);
+                checkInter_rd5_6(md.pred[PRED_2NxN], cuGeom, SIZE_2NxN, refMasks);
+                checkBestMode(md.pred[PRED_2NxN], cuGeom.depth);
+            }
+
+            // Try AMP (SIZE_2NxnU, SIZE_2NxnD, SIZE_nLx2N, SIZE_nRx2N)
+            if (m_slice->m_sps->maxAMPDepth > depth)
+            {
+                bool bHor = false, bVer = false;
+                if (md.bestMode->cu.m_partSize[0] == SIZE_2NxN)
+                    bHor = true;
+                else if (md.bestMode->cu.m_partSize[0] == SIZE_Nx2N)
+                    bVer = true;
+                else if (md.bestMode->cu.m_partSize[0] == SIZE_2Nx2N && !md.bestMode->cu.m_mergeFlag[0])
+                {
+                    bHor = true;
+                    bVer = true;
+                }
+
+                if (bHor)
+                {
+                    refMasks[0] = splitRefs[0] | splitRefs[1]; /* 25% top */
+                    refMasks[1] = allSplitRefs;                /* 75% bot */
+                    md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd5_6(md.pred[PRED_2NxnU], cuGeom, SIZE_2NxnU, refMasks);
+                    checkBestMode(md.pred[PRED_2NxnU], cuGeom.depth);
+
+                    refMasks[0] = allSplitRefs;                /* 75% top */
+                    refMasks[1] = splitRefs[2] | splitRefs[3]; /* 25% bot */
+                    md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd5_6(md.pred[PRED_2NxnD], cuGeom, SIZE_2NxnD, refMasks);
+                    checkBestMode(md.pred[PRED_2NxnD], cuGeom.depth);
+                }
+                if (bVer)
+                {
+                    refMasks[0] = splitRefs[0] | splitRefs[2]; /* 25% left */
+                    refMasks[1] = allSplitRefs;                /* 75% right */
+                    md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd5_6(md.pred[PRED_nLx2N], cuGeom, SIZE_nLx2N, refMasks);
+                    checkBestMode(md.pred[PRED_nLx2N], cuGeom.depth);
+
+                    refMasks[0] = allSplitRefs;                /* 75% left */
+                    refMasks[1] = splitRefs[1] | splitRefs[3]; /* 25% right */
+                    md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkInter_rd5_6(md.pred[PRED_nRx2N], cuGeom, SIZE_nRx2N, refMasks);
+                    checkBestMode(md.pred[PRED_nRx2N], cuGeom.depth);
+                }
+            }
+
+            if (m_slice->m_sliceType != B_SLICE || m_param->bIntraInBFrames)
+            {
+                if (!m_param->limitReferences || splitIntra)
+                {
+                    ProfileCounter(parentCTU, totalIntraCU[cuGeom.depth]);
+                    md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+                    checkIntra(md.pred[PRED_INTRA], cuGeom, SIZE_2Nx2N, NULL, NULL);
+                    checkBestMode(md.pred[PRED_INTRA], depth);
+
+                    if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
+                    {
+                        md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
+                        checkIntra(md.pred[PRED_INTRA_NxN], cuGeom, SIZE_NxN, NULL, NULL);
+                        checkBestMode(md.pred[PRED_INTRA_NxN], depth);
+                    }
+                }
+                else
+                {
+                    ProfileCounter(parentCTU, skippedIntraCU[cuGeom.depth]);
+                }
+            }
+        }
+
+        if (m_bTryLossless)
+            tryLossless(cuGeom);
+
+        if (mightSplit)
+            addSplitFlagCost(*md.bestMode, cuGeom.depth);
+    }
+
+    /* compare split RD cost against best cost */
+    if (mightSplit && !foundSkip)
+        checkBestMode(md.pred[PRED_SPLIT], depth);
+
+       /* determine which motion references the parent CU should search */
+    uint32_t refMask;
+    if (!(m_param->limitReferences & X265_REF_LIMIT_DEPTH))
+        refMask = 0;
+    else if (md.bestMode == &md.pred[PRED_SPLIT])
+        refMask = allSplitRefs;
+    else
+    {
+        /* use best merge/inter mode, in case of intra use 2Nx2N inter references */
+        CUData& cu = md.bestMode->cu.isIntra(0) ? md.pred[PRED_2Nx2N].cu : md.bestMode->cu;
+        uint32_t numPU = cu.getNumPartInter(0);
+        refMask = 0;
+        for (uint32_t puIdx = 0, subPartIdx = 0; puIdx < numPU; puIdx++, subPartIdx += cu.getPUOffset(puIdx, 0))
+            refMask |= cu.getBestRefIdx(subPartIdx);
+    }
+
+    /* Copy best data to encData CTU and recon */
+    X265_CHECK(md.bestMode->ok(), "best mode is not ok");
+    md.bestMode->cu.copyToPic(depth);
+    md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, parentCTU.m_cuAddr, cuGeom.absPartIdx);
+
+    return refMask;
+}
+
+/* sets md.bestMode if a valid merge candidate is found, else leaves it NULL */
+void Analysis::checkMerge2Nx2N_rd0_4(Mode& skip, Mode& merge, const CUGeom& cuGeom)
+{
+    uint32_t depth = cuGeom.depth;
+    ModeDepth& md = m_modeDepth[depth];
+    Yuv *fencYuv = &md.fencYuv;
+
+    /* Note that these two Mode instances are named MERGE and SKIP but they may
+     * hold the reverse when the function returns. We toggle between the two modes */
+    Mode* tempPred = &merge;
+    Mode* bestPred = &skip;
+
+    X265_CHECK(m_slice->m_sliceType != I_SLICE, "Evaluating merge in I slice\n");
+
+    tempPred->initCosts();
+    tempPred->cu.setPartSizeSubParts(SIZE_2Nx2N);
+    tempPred->cu.setPredModeSubParts(MODE_INTER);
+    tempPred->cu.m_mergeFlag[0] = true;
+
+    bestPred->initCosts();
+    bestPred->cu.setPartSizeSubParts(SIZE_2Nx2N);
+    bestPred->cu.setPredModeSubParts(MODE_INTER);
+    bestPred->cu.m_mergeFlag[0] = true;
+
+    MVField candMvField[MRG_MAX_NUM_CANDS][2]; // double length for mv of both lists
+    uint8_t candDir[MRG_MAX_NUM_CANDS];
+    uint32_t numMergeCand = tempPred->cu.getInterMergeCandidates(0, 0, candMvField, candDir);
+    PredictionUnit pu(merge.cu, cuGeom, 0);
+
+    bestPred->sa8dCost = MAX_INT64;
+    int bestSadCand = -1;
+    int sizeIdx = cuGeom.log2CUSize - 2;
+
+    for (uint32_t i = 0; i < numMergeCand; ++i)
+    {
+        if (m_bFrameParallel &&
+            (candMvField[i][0].mv.y >= (m_param->searchRange + 1) * 4 ||
+            candMvField[i][1].mv.y >= (m_param->searchRange + 1) * 4))
+            continue;
+
+        tempPred->cu.m_mvpIdx[0][0] = (uint8_t)i; // merge candidate ID is stored in L0 MVP idx
+        X265_CHECK(m_slice->m_sliceType == B_SLICE || !(candDir[i] & 0x10), " invalid merge for P slice\n");
+        tempPred->cu.m_interDir[0] = candDir[i];
+        tempPred->cu.m_mv[0][0] = candMvField[i][0].mv;
+        tempPred->cu.m_mv[1][0] = candMvField[i][1].mv;
+        tempPred->cu.m_refIdx[0][0] = (int8_t)candMvField[i][0].refIdx;
+        tempPred->cu.m_refIdx[1][0] = (int8_t)candMvField[i][1].refIdx;
+
+        motionCompensation(tempPred->cu, pu, tempPred->predYuv, true, m_bChromaSa8d);
+
+        tempPred->sa8dBits = getTUBits(i, numMergeCand);
+        tempPred->distortion = primitives.cu[sizeIdx].sa8d(fencYuv->m_buf[0], fencYuv->m_size, tempPred->predYuv.m_buf[0], tempPred->predYuv.m_size);
+        if (m_bChromaSa8d)
+        {
+            tempPred->distortion += primitives.chroma[m_csp].cu[sizeIdx].sa8d(fencYuv->m_buf[1], fencYuv->m_csize, tempPred->predYuv.m_buf[1], tempPred->predYuv.m_csize);
+            tempPred->distortion += primitives.chroma[m_csp].cu[sizeIdx].sa8d(fencYuv->m_buf[2], fencYuv->m_csize, tempPred->predYuv.m_buf[2], tempPred->predYuv.m_csize);
+        }
+        tempPred->sa8dCost = m_rdCost.calcRdSADCost((uint32_t)tempPred->distortion, tempPred->sa8dBits);
+
+        if (tempPred->sa8dCost < bestPred->sa8dCost)
+        {
+            bestSadCand = i;
+            std::swap(tempPred, bestPred);
+        }
+    }
+
+    /* force mode decision to take inter or intra */
+    if (bestSadCand < 0)
+        return;
+
+    /* calculate the motion compensation for chroma for the best mode selected */
+    if (!m_bChromaSa8d) /* Chroma MC was done above */
+        motionCompensation(bestPred->cu, pu, bestPred->predYuv, false, true);
+
+    if (m_param->rdLevel)
+    {
+        if (m_param->bLossless)
+            bestPred->rdCost = MAX_INT64;
+        else
+            encodeResAndCalcRdSkipCU(*bestPred);
+
+        /* Encode with residual */
+        tempPred->cu.m_mvpIdx[0][0] = (uint8_t)bestSadCand;
+        tempPred->cu.setPUInterDir(candDir[bestSadCand], 0, 0);
+        tempPred->cu.setPUMv(0, candMvField[bestSadCand][0].mv, 0, 0);
+        tempPred->cu.setPUMv(1, candMvField[bestSadCand][1].mv, 0, 0);
+        tempPred->cu.setPURefIdx(0, (int8_t)candMvField[bestSadCand][0].refIdx, 0, 0);
+        tempPred->cu.setPURefIdx(1, (int8_t)candMvField[bestSadCand][1].refIdx, 0, 0);
+        tempPred->sa8dCost = bestPred->sa8dCost;
+        tempPred->sa8dBits = bestPred->sa8dBits;
+        tempPred->predYuv.copyFromYuv(bestPred->predYuv);
+
+        encodeResAndCalcRdInterCU(*tempPred, cuGeom);
+
+        md.bestMode = tempPred->rdCost < bestPred->rdCost ? tempPred : bestPred;
+    }
+    else
+        md.bestMode = bestPred;
+
+    /* broadcast sets of MV field data */
+    md.bestMode->cu.setPUInterDir(candDir[bestSadCand], 0, 0);
+    md.bestMode->cu.setPUMv(0, candMvField[bestSadCand][0].mv, 0, 0);
+    md.bestMode->cu.setPUMv(1, candMvField[bestSadCand][1].mv, 0, 0);
+    md.bestMode->cu.setPURefIdx(0, (int8_t)candMvField[bestSadCand][0].refIdx, 0, 0);
+    md.bestMode->cu.setPURefIdx(1, (int8_t)candMvField[bestSadCand][1].refIdx, 0, 0);
+    checkDQP(*md.bestMode, cuGeom);
+    X265_CHECK(md.bestMode->ok(), "Merge mode not ok\n");
+}
+
+/* sets md.bestMode if a valid merge candidate is found, else leaves it NULL */
+void Analysis::checkMerge2Nx2N_rd5_6(Mode& skip, Mode& merge, const CUGeom& cuGeom, bool isShareMergeCand)
+{
+    uint32_t depth = cuGeom.depth;
+
+    /* Note that these two Mode instances are named MERGE and SKIP but they may
+     * hold the reverse when the function returns. We toggle between the two modes */
+    Mode* tempPred = &merge;
+    Mode* bestPred = &skip;
+
+    merge.initCosts();
+    merge.cu.setPredModeSubParts(MODE_INTER);
+    merge.cu.setPartSizeSubParts(SIZE_2Nx2N);
+    merge.cu.m_mergeFlag[0] = true;
+
+    skip.initCosts();
+    skip.cu.setPredModeSubParts(MODE_INTER);
+    skip.cu.setPartSizeSubParts(SIZE_2Nx2N);
+    skip.cu.m_mergeFlag[0] = true;
+
+    MVField candMvField[MRG_MAX_NUM_CANDS][2]; // double length for mv of both lists
+    uint8_t candDir[MRG_MAX_NUM_CANDS];
+    uint32_t numMergeCand = merge.cu.getInterMergeCandidates(0, 0, candMvField, candDir);
+    PredictionUnit pu(merge.cu, cuGeom, 0);
+
+    bool foundCbf0Merge = false;
+    bool triedPZero = false, triedBZero = false;
+    bestPred->rdCost = MAX_INT64;
+
+    uint32_t first = 0, last = numMergeCand;
+    if (isShareMergeCand)
+    {
+        first = *m_reuseBestMergeCand;
+        last = first + 1;
+    }
+
+    for (uint32_t i = first; i < last; i++)
+    {
+        if (m_bFrameParallel &&
+            (candMvField[i][0].mv.y >= (m_param->searchRange + 1) * 4 ||
+            candMvField[i][1].mv.y >= (m_param->searchRange + 1) * 4))
+            continue;
+
+        /* the merge candidate list is packed with MV(0,0) ref 0 when it is not full */
+        if (candDir[i] == 1 && !candMvField[i][0].mv.word && !candMvField[i][0].refIdx)
+        {
+            if (triedPZero)
+                continue;
+            triedPZero = true;
+        }
+        else if (candDir[i] == 3 &&
+            !candMvField[i][0].mv.word && !candMvField[i][0].refIdx &&
+            !candMvField[i][1].mv.word && !candMvField[i][1].refIdx)
+        {
+            if (triedBZero)
+                continue;
+            triedBZero = true;
+        }
+
+        tempPred->cu.m_mvpIdx[0][0] = (uint8_t)i;    /* merge candidate ID is stored in L0 MVP idx */
+        tempPred->cu.m_interDir[0] = candDir[i];
+        tempPred->cu.m_mv[0][0] = candMvField[i][0].mv;
+        tempPred->cu.m_mv[1][0] = candMvField[i][1].mv;
+        tempPred->cu.m_refIdx[0][0] = (int8_t)candMvField[i][0].refIdx;
+        tempPred->cu.m_refIdx[1][0] = (int8_t)candMvField[i][1].refIdx;
+        tempPred->cu.setPredModeSubParts(MODE_INTER); /* must be cleared between encode iterations */
+
+        motionCompensation(tempPred->cu, pu, tempPred->predYuv, true, true);
+
+        uint8_t hasCbf = true;
+        bool swapped = false;
+        if (!foundCbf0Merge)
+        {
+            /* if the best prediction has CBF (not a skip) then try merge with residual */
+
+            encodeResAndCalcRdInterCU(*tempPred, cuGeom);
+            hasCbf = tempPred->cu.getQtRootCbf(0);
+            foundCbf0Merge = !hasCbf;
+
+            if (tempPred->rdCost < bestPred->rdCost)
+            {
+                std::swap(tempPred, bestPred);
+                swapped = true;
+            }
+        }
+        if (!m_param->bLossless && hasCbf)
+        {
+            /* try merge without residual (skip), if not lossless coding */
+
+            if (swapped)
+            {
+                tempPred->cu.m_mvpIdx[0][0] = (uint8_t)i;
+                tempPred->cu.m_interDir[0] = candDir[i];
+                tempPred->cu.m_mv[0][0] = candMvField[i][0].mv;
+                tempPred->cu.m_mv[1][0] = candMvField[i][1].mv;
+                tempPred->cu.m_refIdx[0][0] = (int8_t)candMvField[i][0].refIdx;
+                tempPred->cu.m_refIdx[1][0] = (int8_t)candMvField[i][1].refIdx;
+                tempPred->cu.setPredModeSubParts(MODE_INTER);
+                tempPred->predYuv.copyFromYuv(bestPred->predYuv);
+            }
+
+            encodeResAndCalcRdSkipCU(*tempPred);
+
+            if (tempPred->rdCost < bestPred->rdCost)
+                std::swap(tempPred, bestPred);
+        }
+    }
+
+    if (bestPred->rdCost < MAX_INT64)
+    {
+        m_modeDepth[depth].bestMode = bestPred;
+
+        /* broadcast sets of MV field data */
+        uint32_t bestCand = bestPred->cu.m_mvpIdx[0][0];
+        bestPred->cu.setPUInterDir(candDir[bestCand], 0, 0);
+        bestPred->cu.setPUMv(0, candMvField[bestCand][0].mv, 0, 0);
+        bestPred->cu.setPUMv(1, candMvField[bestCand][1].mv, 0, 0);
+        bestPred->cu.setPURefIdx(0, (int8_t)candMvField[bestCand][0].refIdx, 0, 0);
+        bestPred->cu.setPURefIdx(1, (int8_t)candMvField[bestCand][1].refIdx, 0, 0);
+        checkDQP(*bestPred, cuGeom);
+        X265_CHECK(bestPred->ok(), "merge mode is not ok");
+    }
+
+    if (m_param->analysisMode)
+    {
+        m_reuseBestMergeCand++;
+        if (m_param->analysisMode == X265_ANALYSIS_SAVE)
+            *m_reuseBestMergeCand = bestPred->cu.m_mvpIdx[0][0];
+    }
+}
+
+void Analysis::checkInter_rd0_4(Mode& interMode, const CUGeom& cuGeom, PartSize partSize, uint32_t refMask[2])
+{
+    interMode.initCosts();
+    interMode.cu.setPartSizeSubParts(partSize);
+    interMode.cu.setPredModeSubParts(MODE_INTER);
+    int numPredDir = m_slice->isInterP() ? 1 : 2;
+
+    if (m_param->analysisMode == X265_ANALYSIS_LOAD && m_reuseInterDataCTU)
+    {
+        uint32_t numPU = interMode.cu.getNumPartInter(0);
+        for (uint32_t part = 0; part < numPU; part++)
+        {
+            MotionData* bestME = interMode.bestME[part];
+            for (int32_t i = 0; i < numPredDir; i++)
+            {
+                bestME[i].ref = *m_reuseRef;
+                m_reuseRef++;
+            }
+        }
+    }
+
+    predInterSearch(interMode, cuGeom, m_bChromaSa8d, refMask);
+
+    /* predInterSearch sets interMode.sa8dBits */
+    const Yuv& fencYuv = *interMode.fencYuv;
+    Yuv& predYuv = interMode.predYuv;
+    int part = partitionFromLog2Size(cuGeom.log2CUSize);
+    interMode.distortion = primitives.cu[part].sa8d(fencYuv.m_buf[0], fencYuv.m_size, predYuv.m_buf[0], predYuv.m_size);
+    if (m_bChromaSa8d)
+    {
+        interMode.distortion += primitives.chroma[m_csp].cu[part].sa8d(fencYuv.m_buf[1], fencYuv.m_csize, predYuv.m_buf[1], predYuv.m_csize);
+        interMode.distortion += primitives.chroma[m_csp].cu[part].sa8d(fencYuv.m_buf[2], fencYuv.m_csize, predYuv.m_buf[2], predYuv.m_csize);
+    }
+    interMode.sa8dCost = m_rdCost.calcRdSADCost((uint32_t)interMode.distortion, interMode.sa8dBits);
+
+    if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_reuseInterDataCTU)
+    {
+        uint32_t numPU = interMode.cu.getNumPartInter(0);
+        for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+        {
+            MotionData* bestME = interMode.bestME[puIdx];
+            for (int32_t i = 0; i < numPredDir; i++)
+            {
+                *m_reuseRef = bestME[i].ref;
+                m_reuseRef++;
+            }
+        }
+    }
+}
+
+void Analysis::checkInter_rd5_6(Mode& interMode, const CUGeom& cuGeom, PartSize partSize, uint32_t refMask[2])
+{
+    interMode.initCosts();
+    interMode.cu.setPartSizeSubParts(partSize);
+    interMode.cu.setPredModeSubParts(MODE_INTER);
+    int numPredDir = m_slice->isInterP() ? 1 : 2;
+
+    if (m_param->analysisMode == X265_ANALYSIS_LOAD && m_reuseInterDataCTU)
+    {
+        uint32_t numPU = interMode.cu.getNumPartInter(0);
+        for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+        {
+            MotionData* bestME = interMode.bestME[puIdx];
+            for (int32_t i = 0; i < numPredDir; i++)
+            {
+                bestME[i].ref = *m_reuseRef;
+                m_reuseRef++;
+            }
+        }
+    }
+
+    predInterSearch(interMode, cuGeom, true, refMask);
+
+    /* predInterSearch sets interMode.sa8dBits, but this is ignored */
+    encodeResAndCalcRdInterCU(interMode, cuGeom);
+
+    if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_reuseInterDataCTU)
+    {
+        uint32_t numPU = interMode.cu.getNumPartInter(0);
+        for (uint32_t puIdx = 0; puIdx < numPU; puIdx++)
+        {
+            MotionData* bestME = interMode.bestME[puIdx];
+            for (int32_t i = 0; i < numPredDir; i++)
+            {
+                *m_reuseRef = bestME[i].ref;
+                m_reuseRef++;
+            }
+        }
+    }
+}
+
+void Analysis::checkBidir2Nx2N(Mode& inter2Nx2N, Mode& bidir2Nx2N, const CUGeom& cuGeom)
+{
+    CUData& cu = bidir2Nx2N.cu;
+
+    if (cu.isBipredRestriction() || inter2Nx2N.bestME[0][0].cost == MAX_UINT || inter2Nx2N.bestME[0][1].cost == MAX_UINT)
+    {
+        bidir2Nx2N.sa8dCost = MAX_INT64;
+        bidir2Nx2N.rdCost = MAX_INT64;
+        return;
+    }
+
+    const Yuv& fencYuv = *bidir2Nx2N.fencYuv;
+    MV   mvzero(0, 0);
+    int  partEnum = cuGeom.log2CUSize - 2;
+
+    bidir2Nx2N.bestME[0][0] = inter2Nx2N.bestME[0][0];
+    bidir2Nx2N.bestME[0][1] = inter2Nx2N.bestME[0][1];
+    MotionData* bestME = bidir2Nx2N.bestME[0];
+    int ref0    = bestME[0].ref;
+    MV  mvp0    = bestME[0].mvp;
+    int mvpIdx0 = bestME[0].mvpIdx;
+    int ref1    = bestME[1].ref;
+    MV  mvp1    = bestME[1].mvp;
+    int mvpIdx1 = bestME[1].mvpIdx;
+
+    bidir2Nx2N.initCosts();
+    cu.setPartSizeSubParts(SIZE_2Nx2N);
+    cu.setPredModeSubParts(MODE_INTER);
+    cu.setPUInterDir(3, 0, 0);
+    cu.setPURefIdx(0, (int8_t)ref0, 0, 0);
+    cu.setPURefIdx(1, (int8_t)ref1, 0, 0);
+    cu.m_mvpIdx[0][0] = (uint8_t)mvpIdx0;
+    cu.m_mvpIdx[1][0] = (uint8_t)mvpIdx1;
+    cu.m_mergeFlag[0] = 0;
+
+    /* Estimate cost of BIDIR using best 2Nx2N L0 and L1 motion vectors */
+    cu.setPUMv(0, bestME[0].mv, 0, 0);
+    cu.m_mvd[0][0] = bestME[0].mv - mvp0;
+
+    cu.setPUMv(1, bestME[1].mv, 0, 0);
+    cu.m_mvd[1][0] = bestME[1].mv - mvp1;
+
+    PredictionUnit pu(cu, cuGeom, 0);
+    motionCompensation(cu, pu, bidir2Nx2N.predYuv, true, m_bChromaSa8d);
+
+    int sa8d = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, bidir2Nx2N.predYuv.m_buf[0], bidir2Nx2N.predYuv.m_size);
+    if (m_bChromaSa8d)
+    {
+        /* Add in chroma distortion */
+        sa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[1], fencYuv.m_csize, bidir2Nx2N.predYuv.m_buf[1], bidir2Nx2N.predYuv.m_csize);
+        sa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[2], fencYuv.m_csize, bidir2Nx2N.predYuv.m_buf[2], bidir2Nx2N.predYuv.m_csize);
+    }
+    bidir2Nx2N.sa8dBits = bestME[0].bits + bestME[1].bits + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
+    bidir2Nx2N.sa8dCost = sa8d + m_rdCost.getCost(bidir2Nx2N.sa8dBits);
+
+    bool bTryZero = bestME[0].mv.notZero() || bestME[1].mv.notZero();
+    if (bTryZero)
+    {
+        /* Do not try zero MV if unidir motion predictors are beyond
+         * valid search area */
+        MV mvmin, mvmax;
+        int merange = X265_MAX(m_param->sourceWidth, m_param->sourceHeight);
+        setSearchRange(cu, mvzero, merange, mvmin, mvmax);
+        mvmax.y += 2; // there is some pad for subpel refine
+        mvmin <<= 2;
+        mvmax <<= 2;
+
+        bTryZero &= bestME[0].mvp.checkRange(mvmin, mvmax);
+        bTryZero &= bestME[1].mvp.checkRange(mvmin, mvmax);
+    }
+    if (bTryZero)
+    {
+        /* Estimate cost of BIDIR using coincident blocks */
+        Yuv& tmpPredYuv = m_rqt[cuGeom.depth].tmpPredYuv;
+
+        int zsa8d;
+
+        if (m_bChromaSa8d)
+        {
+            cu.m_mv[0][0] = mvzero;
+            cu.m_mv[1][0] = mvzero;
+
+            motionCompensation(cu, pu, tmpPredYuv, true, true);
+
+            zsa8d  = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
+            zsa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[1], fencYuv.m_csize, tmpPredYuv.m_buf[1], tmpPredYuv.m_csize);
+            zsa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[2], fencYuv.m_csize, tmpPredYuv.m_buf[2], tmpPredYuv.m_csize);
+        }
+        else
+        {
+            pixel *fref0 = m_slice->m_mref[0][ref0].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx);
+            pixel *fref1 = m_slice->m_mref[1][ref1].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx);
+            intptr_t refStride = m_slice->m_mref[0][0].lumaStride;
+
+            primitives.pu[partEnum].pixelavg_pp(tmpPredYuv.m_buf[0], tmpPredYuv.m_size, fref0, refStride, fref1, refStride, 32);
+            zsa8d = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
+        }
+
+        uint32_t bits0 = bestME[0].bits - m_me.bitcost(bestME[0].mv, mvp0) + m_me.bitcost(mvzero, mvp0);
+        uint32_t bits1 = bestME[1].bits - m_me.bitcost(bestME[1].mv, mvp1) + m_me.bitcost(mvzero, mvp1);
+        uint32_t zcost = zsa8d + m_rdCost.getCost(bits0) + m_rdCost.getCost(bits1);
+
+        /* refine MVP selection for zero mv, updates: mvp, mvpidx, bits, cost */
+        mvp0 = checkBestMVP(inter2Nx2N.amvpCand[0][ref0], mvzero, mvpIdx0, bits0, zcost);
+        mvp1 = checkBestMVP(inter2Nx2N.amvpCand[1][ref1], mvzero, mvpIdx1, bits1, zcost);
+
+        uint32_t zbits = bits0 + bits1 + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
+        zcost = zsa8d + m_rdCost.getCost(zbits);
+
+        if (zcost < bidir2Nx2N.sa8dCost)
+        {
+            bidir2Nx2N.sa8dBits = zbits;
+            bidir2Nx2N.sa8dCost = zcost;
+
+            cu.setPUMv(0, mvzero, 0, 0);
+            cu.m_mvd[0][0] = mvzero - mvp0;
+            cu.m_mvpIdx[0][0] = (uint8_t)mvpIdx0;
+
+            cu.setPUMv(1, mvzero, 0, 0);
+            cu.m_mvd[1][0] = mvzero - mvp1;
+            cu.m_mvpIdx[1][0] = (uint8_t)mvpIdx1;
+
+            if (m_bChromaSa8d)
+                /* real MC was already performed */
+                bidir2Nx2N.predYuv.copyFromYuv(tmpPredYuv);
+            else
+                motionCompensation(cu, pu, bidir2Nx2N.predYuv, true, true);
+        }
+        else if (m_bChromaSa8d)
+        {
+            /* recover overwritten motion vectors */
+            cu.m_mv[0][0] = bestME[0].mv;
+            cu.m_mv[1][0] = bestME[1].mv;
+        }
+    }
+}
+
+void Analysis::encodeResidue(const CUData& ctu, const CUGeom& cuGeom)
+{
+    if (cuGeom.depth < ctu.m_cuDepth[cuGeom.absPartIdx] && cuGeom.depth < g_maxCUDepth)
+    {
+        for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+                encodeResidue(ctu, childGeom);
+        }
+        return;
+    }
+
+    uint32_t absPartIdx = cuGeom.absPartIdx;
+    int sizeIdx = cuGeom.log2CUSize - 2;
+
+    /* reuse the bestMode data structures at the current depth */
+    Mode *bestMode = m_modeDepth[cuGeom.depth].bestMode;
+    CUData& cu = bestMode->cu;
+
+    cu.copyFromPic(ctu, cuGeom);
+
+    PicYuv& reconPic = *m_frame->m_reconPic;
+
+    Yuv& fencYuv = m_modeDepth[cuGeom.depth].fencYuv;
+    if (cuGeom.depth)
+        m_modeDepth[0].fencYuv.copyPartToYuv(fencYuv, absPartIdx);
+    X265_CHECK(bestMode->fencYuv == &fencYuv, "invalid fencYuv\n");
+
+    if (cu.isIntra(0))
+    {
+        ProfileCUScope(ctu, intraRDOElapsedTime[cuGeom.depth], countIntraRDO[cuGeom.depth]); // not really RDO, but close enough
+        
+        uint32_t tuDepthRange[2];
+        cu.getIntraTUQtDepthRange(tuDepthRange, 0);
+
+        residualTransformQuantIntra(*bestMode, cuGeom, 0, 0, tuDepthRange);
+        getBestIntraModeChroma(*bestMode, cuGeom);
+        residualQTIntraChroma(*bestMode, cuGeom, 0, 0);
+    }
+    else // if (cu.isInter(0))
+    {
+        ProfileCUScope(ctu, interRDOElapsedTime[cuGeom.depth], countInterRDO[cuGeom.depth]); // not really RDO, but close enough
+
+        X265_CHECK(!ctu.isSkipped(absPartIdx), "skip not expected prior to transform\n");
+
+        /* Calculate residual for current CU part into depth sized resiYuv */
+
+        ShortYuv& resiYuv = m_rqt[cuGeom.depth].tmpResiYuv;
+
+        /* at RD 0, the prediction pixels are accumulated into the top depth predYuv */
+        Yuv& predYuv = m_modeDepth[0].bestMode->predYuv;
+        pixel* predY = predYuv.getLumaAddr(absPartIdx);
+        pixel* predU = predYuv.getCbAddr(absPartIdx);
+        pixel* predV = predYuv.getCrAddr(absPartIdx);
+
+        primitives.cu[sizeIdx].sub_ps(resiYuv.m_buf[0], resiYuv.m_size,
+                                      fencYuv.m_buf[0], predY,
+                                      fencYuv.m_size, predYuv.m_size);
+
+        primitives.chroma[m_csp].cu[sizeIdx].sub_ps(resiYuv.m_buf[1], resiYuv.m_csize,
+                                                 fencYuv.m_buf[1], predU,
+                                                 fencYuv.m_csize, predYuv.m_csize);
+
+        primitives.chroma[m_csp].cu[sizeIdx].sub_ps(resiYuv.m_buf[2], resiYuv.m_csize,
+                                                 fencYuv.m_buf[2], predV,
+                                                 fencYuv.m_csize, predYuv.m_csize);
+
+        uint32_t tuDepthRange[2];
+        cu.getInterTUQtDepthRange(tuDepthRange, 0);
+
+        residualTransformQuantInter(*bestMode, cuGeom, 0, 0, tuDepthRange);
+
+        if (cu.m_mergeFlag[0] && cu.m_partSize[0] == SIZE_2Nx2N && !cu.getQtRootCbf(0))
+            cu.setPredModeSubParts(MODE_SKIP);
+
+        /* residualTransformQuantInter() wrote transformed residual back into
+         * resiYuv. Generate the recon pixels by adding it to the prediction */
+
+        if (cu.m_cbf[0][0])
+            primitives.cu[sizeIdx].add_ps(reconPic.getLumaAddr(cu.m_cuAddr, absPartIdx), reconPic.m_stride,
+                                          predY, resiYuv.m_buf[0], predYuv.m_size, resiYuv.m_size);
+        else
+            primitives.cu[sizeIdx].copy_pp(reconPic.getLumaAddr(cu.m_cuAddr, absPartIdx), reconPic.m_stride,
+                                           predY, predYuv.m_size);
+
+        if (cu.m_cbf[1][0])
+            primitives.chroma[m_csp].cu[sizeIdx].add_ps(reconPic.getCbAddr(cu.m_cuAddr, absPartIdx), reconPic.m_strideC,
+                                                        predU, resiYuv.m_buf[1], predYuv.m_csize, resiYuv.m_csize);
+        else
+            primitives.chroma[m_csp].cu[sizeIdx].copy_pp(reconPic.getCbAddr(cu.m_cuAddr, absPartIdx), reconPic.m_strideC,
+                                                         predU, predYuv.m_csize);
+
+        if (cu.m_cbf[2][0])
+            primitives.chroma[m_csp].cu[sizeIdx].add_ps(reconPic.getCrAddr(cu.m_cuAddr, absPartIdx), reconPic.m_strideC,
+                                                        predV, resiYuv.m_buf[2], predYuv.m_csize, resiYuv.m_csize);
+        else
+            primitives.chroma[m_csp].cu[sizeIdx].copy_pp(reconPic.getCrAddr(cu.m_cuAddr, absPartIdx), reconPic.m_strideC,
+                                                         predV, predYuv.m_csize);
+    }
+
+    cu.updatePic(cuGeom.depth);
+}
+
+void Analysis::addSplitFlagCost(Mode& mode, uint32_t depth)
+{
+    if (m_param->rdLevel >= 3)
+    {
+        /* code the split flag (0 or 1) and update bit costs */
+        mode.contexts.resetBits();
+        mode.contexts.codeSplitFlag(mode.cu, 0, depth);
+        uint32_t bits = mode.contexts.getNumberOfWrittenBits();
+        mode.mvBits += bits;
+        mode.totalBits += bits;
+        updateModeCost(mode);
+    }
+    else if (m_param->rdLevel <= 1)
+    {
+        mode.sa8dBits++;
+        mode.sa8dCost = m_rdCost.calcRdSADCost((uint32_t)mode.distortion, mode.sa8dBits);
+    }
+    else
+    {
+        mode.mvBits++;
+        mode.totalBits++;
+        updateModeCost(mode);
+    }
+}
+
+uint32_t Analysis::topSkipMinDepth(const CUData& parentCTU, const CUGeom& cuGeom)
+{
+    /* Do not attempt to code a block larger than the largest block in the
+     * co-located CTUs in L0 and L1 */
+    int currentQP = parentCTU.m_qp[0];
+    int previousQP = currentQP;
+    uint32_t minDepth0 = 4, minDepth1 = 4;
+    uint32_t sum = 0;
+    int numRefs = 0;
+    if (m_slice->m_numRefIdx[0])
+    {
+        numRefs++;
+        const CUData& cu = *m_slice->m_refFrameList[0][0]->m_encData->getPicCTU(parentCTU.m_cuAddr);
+        previousQP = cu.m_qp[0];
+        if (!cu.m_cuDepth[cuGeom.absPartIdx])
+            return 0;
+        for (uint32_t i = 0; i < cuGeom.numPartitions; i += 4)
+        {
+            uint32_t d = cu.m_cuDepth[cuGeom.absPartIdx + i];
+            minDepth0 = X265_MIN(d, minDepth0);
+            sum += d;
+        }
+    }
+    if (m_slice->m_numRefIdx[1])
+    {
+        numRefs++;
+        const CUData& cu = *m_slice->m_refFrameList[1][0]->m_encData->getPicCTU(parentCTU.m_cuAddr);
+        if (!cu.m_cuDepth[cuGeom.absPartIdx])
+            return 0;
+        for (uint32_t i = 0; i < cuGeom.numPartitions; i += 4)
+        {
+            uint32_t d = cu.m_cuDepth[cuGeom.absPartIdx + i];
+            minDepth1 = X265_MIN(d, minDepth1);
+            sum += d;
+        }
+    }
+    if (!numRefs)
+        return 0;
+
+    uint32_t minDepth = X265_MIN(minDepth0, minDepth1);
+    uint32_t thresh = minDepth * numRefs * (cuGeom.numPartitions >> 2);
+
+    /* allow block size growth if QP is raising or avg depth is
+     * less than 1.5 of min depth */
+    if (minDepth && currentQP >= previousQP && (sum <= thresh + (thresh >> 1)))
+        minDepth -= 1;
+
+    return minDepth;
+}
+
+/* returns true if recursion should be stopped */
+bool Analysis::recursionDepthCheck(const CUData& parentCTU, const CUGeom& cuGeom, const Mode& bestMode)
+{
+    /* early exit when the RD cost of best mode at depth n is less than the sum
+     * of average of RD cost of the neighbor CU's(above, aboveleft, aboveright,
+     * left, colocated) and avg cost of that CU at depth "n" with weightage for
+     * each quantity */
+
+    uint32_t depth = cuGeom.depth;
+    FrameData& curEncData = *m_frame->m_encData;
+    FrameData::RCStatCU& cuStat = curEncData.m_cuStat[parentCTU.m_cuAddr];
+    uint64_t cuCost = cuStat.avgCost[depth] * cuStat.count[depth];
+    uint64_t cuCount = cuStat.count[depth];
+
+    uint64_t neighCost = 0, neighCount = 0;
+    const CUData* above = parentCTU.m_cuAbove;
+    if (above)
+    {
+        FrameData::RCStatCU& astat = curEncData.m_cuStat[above->m_cuAddr];
+        neighCost += astat.avgCost[depth] * astat.count[depth];
+        neighCount += astat.count[depth];
+
+        const CUData* aboveLeft = parentCTU.m_cuAboveLeft;
+        if (aboveLeft)
+        {
+            FrameData::RCStatCU& lstat = curEncData.m_cuStat[aboveLeft->m_cuAddr];
+            neighCost += lstat.avgCost[depth] * lstat.count[depth];
+            neighCount += lstat.count[depth];
+        }
+
+        const CUData* aboveRight = parentCTU.m_cuAboveRight;
+        if (aboveRight)
+        {
+            FrameData::RCStatCU& rstat = curEncData.m_cuStat[aboveRight->m_cuAddr];
+            neighCost += rstat.avgCost[depth] * rstat.count[depth];
+            neighCount += rstat.count[depth];
+        }
+    }
+    const CUData* left = parentCTU.m_cuLeft;
+    if (left)
+    {
+        FrameData::RCStatCU& nstat = curEncData.m_cuStat[left->m_cuAddr];
+        neighCost += nstat.avgCost[depth] * nstat.count[depth];
+        neighCount += nstat.count[depth];
+    }
+
+    // give 60% weight to all CU's and 40% weight to neighbour CU's
+    if (neighCount + cuCount)
+    {
+        uint64_t avgCost = ((3 * cuCost) + (2 * neighCost)) / ((3 * cuCount) + (2 * neighCount));
+        uint64_t curCost = m_param->rdLevel > 1 ? bestMode.rdCost : bestMode.sa8dCost;
+        if (curCost < avgCost && avgCost)
+            return true;
+    }
+
+    return false;
+}
+
+int Analysis::calculateQpforCuSize(const CUData& ctu, const CUGeom& cuGeom)
+{
+    FrameData& curEncData = *m_frame->m_encData;
+    double qp = curEncData.m_cuStat[ctu.m_cuAddr].baseQp;
+
+    /* Use cuTree offsets if cuTree enabled and frame is referenced, else use AQ offsets */
+    bool isReferenced = IS_REFERENCED(m_frame);
+    double *qpoffs = (isReferenced && m_param->rc.cuTree) ? m_frame->m_lowres.qpCuTreeOffset : m_frame->m_lowres.qpAqOffset;
+    if (qpoffs)
+    {
+        uint32_t width = m_frame->m_fencPic->m_picWidth;
+        uint32_t height = m_frame->m_fencPic->m_picHeight;
+        uint32_t block_x = ctu.m_cuPelX + g_zscanToPelX[cuGeom.absPartIdx];
+        uint32_t block_y = ctu.m_cuPelY + g_zscanToPelY[cuGeom.absPartIdx];
+        uint32_t maxCols = (m_frame->m_fencPic->m_picWidth + (16 - 1)) / 16;
+        uint32_t blockSize = g_maxCUSize >> cuGeom.depth;
+        double qp_offset = 0;
+        uint32_t cnt = 0;
+        uint32_t idx;
+
+        for (uint32_t block_yy = block_y; block_yy < block_y + blockSize && block_yy < height; block_yy += 16)
+        {
+            for (uint32_t block_xx = block_x; block_xx < block_x + blockSize && block_xx < width; block_xx += 16)
+            {
+                idx = ((block_yy / 16) * (maxCols)) + (block_xx / 16);
+                qp_offset += qpoffs[idx];
+                cnt++;
+            }
+        }
+
+        qp_offset /= cnt;
+        qp += qp_offset;
+    }
+
+    return x265_clip3(QP_MIN, QP_MAX_MAX, (int)(qp + 0.5));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/analysis.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,171 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Deepthi Nandakumar <deepthi@multicorewareinc.com>
+*          Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_ANALYSIS_H
+#define X265_ANALYSIS_H
+
+#include "common.h"
+#include "predict.h"
+#include "quant.h"
+#include "yuv.h"
+#include "shortyuv.h"
+#include "cudata.h"
+
+#include "entropy.h"
+#include "search.h"
+
+namespace X265_NS {
+// private namespace
+
+class Entropy;
+
+class Analysis : public Search
+{
+public:
+
+    enum {
+        PRED_MERGE,
+        PRED_SKIP,
+        PRED_INTRA,
+        PRED_2Nx2N,
+        PRED_BIDIR,
+        PRED_Nx2N,
+        PRED_2NxN,
+        PRED_SPLIT,
+        PRED_2NxnU,
+        PRED_2NxnD,
+        PRED_nLx2N,
+        PRED_nRx2N,
+        PRED_INTRA_NxN, /* 4x4 intra PU blocks for 8x8 CU */
+        PRED_LOSSLESS,  /* lossless encode of best mode */
+        MAX_PRED_TYPES
+    };
+
+    struct ModeDepth
+    {
+        Mode           pred[MAX_PRED_TYPES];
+        Mode*          bestMode;
+        Yuv            fencYuv;
+        CUDataMemPool  cuMemPool;
+    };
+
+    class PMODE : public BondedTaskGroup
+    {
+    public:
+
+        Analysis&     master;
+        const CUGeom& cuGeom;
+        int           modes[MAX_PRED_TYPES];
+
+        PMODE(Analysis& m, const CUGeom& g) : master(m), cuGeom(g) {}
+
+        void processTasks(int workerThreadId);
+
+    protected:
+
+        PMODE operator=(const PMODE&);
+    };
+
+    void processPmode(PMODE& pmode, Analysis& slave);
+
+    ModeDepth m_modeDepth[NUM_CU_DEPTH];
+    bool      m_bTryLossless;
+    bool      m_bChromaSa8d;
+
+    Analysis();
+
+    bool create(ThreadLocalData* tld);
+    void destroy();
+
+    Mode& compressCTU(CUData& ctu, Frame& frame, const CUGeom& cuGeom, const Entropy& initialContext);
+
+protected:
+
+    /* Analysis data for load/save modes, keeps getting incremented as CTU analysis proceeds and data is consumed or read */
+    analysis_intra_data* m_reuseIntraDataCTU;
+    analysis_inter_data* m_reuseInterDataCTU;
+    int32_t*             m_reuseRef;
+    uint32_t*            m_reuseBestMergeCand;
+
+    uint32_t m_splitRefIdx[4];
+
+    /* full analysis for an I-slice CU */
+    void compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qp);
+
+    /* full analysis for a P or B slice CU */
+    uint32_t compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp);
+    uint32_t compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp);
+    uint32_t compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qp);
+
+    /* measure merge and skip */
+    void checkMerge2Nx2N_rd0_4(Mode& skip, Mode& merge, const CUGeom& cuGeom);
+    void checkMerge2Nx2N_rd5_6(Mode& skip, Mode& merge, const CUGeom& cuGeom, bool isShareMergeCand);
+
+    /* measure inter options */
+    void checkInter_rd0_4(Mode& interMode, const CUGeom& cuGeom, PartSize partSize, uint32_t refmask[2]);
+    void checkInter_rd5_6(Mode& interMode, const CUGeom& cuGeom, PartSize partSize, uint32_t refmask[2]);
+
+    void checkBidir2Nx2N(Mode& inter2Nx2N, Mode& bidir2Nx2N, const CUGeom& cuGeom);
+
+    /* encode current bestMode losslessly, pick best RD cost */
+    void tryLossless(const CUGeom& cuGeom);
+
+    /* add the RD cost of coding a split flag (0 or 1) to the given mode */
+    void addSplitFlagCost(Mode& mode, uint32_t depth);
+
+    /* work-avoidance heuristics for RD levels < 5 */
+    uint32_t topSkipMinDepth(const CUData& parentCTU, const CUGeom& cuGeom);
+    bool recursionDepthCheck(const CUData& parentCTU, const CUGeom& cuGeom, const Mode& bestMode);
+
+    /* generate residual and recon pixels for an entire CTU recursively (RD0) */
+    void encodeResidue(const CUData& parentCTU, const CUGeom& cuGeom);
+
+    int calculateQpforCuSize(const CUData& ctu, const CUGeom& cuGeom);
+
+    /* check whether current mode is the new best */
+    inline void checkBestMode(Mode& mode, uint32_t depth)
+    {
+        X265_CHECK(mode.ok(), "mode costs are uninitialized\n");
+
+        ModeDepth& md = m_modeDepth[depth];
+        if (md.bestMode)
+        {
+            if (mode.rdCost < md.bestMode->rdCost)
+                md.bestMode = &mode;
+        }
+        else
+            md.bestMode = &mode;
+    }
+};
+
+struct ThreadLocalData
+{
+    Analysis analysis;
+
+    void destroy() { analysis.destroy(); }
+};
+
+}
+
+#endif // ifndef X265_ANALYSIS_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/api.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,523 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "bitstream.h"
+#include "param.h"
+
+#include "encoder.h"
+#include "entropy.h"
+#include "level.h"
+#include "nal.h"
+#include "bitcost.h"
+
+/* multilib namespace reflectors */
+#if LINKED_8BIT
+namespace x265_8bit {
+const x265_api* x265_api_get(int bitDepth);
+const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err);
+}
+#endif
+
+#if LINKED_10BIT
+namespace x265_10bit {
+const x265_api* x265_api_get(int bitDepth);
+const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err);
+}
+#endif
+
+#if LINKED_12BIT
+namespace x265_12bit {
+const x265_api* x265_api_get(int bitDepth);
+const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err);
+}
+#endif
+
+#if EXPORT_C_API
+/* these functions are exported as C functions (default) */
+using namespace X265_NS;
+extern "C" {
+#else
+/* these functions exist within private namespace (multilib) */
+namespace X265_NS {
+#endif
+
+x265_encoder *x265_encoder_open(x265_param *p)
+{
+    if (!p)
+        return NULL;
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant, yes I know
+#endif
+
+#if HIGH_BIT_DEPTH
+    if (X265_DEPTH != 10 && X265_DEPTH != 12)
+#else
+    if (X265_DEPTH != 8)
+#endif
+    {
+        x265_log(p, X265_LOG_ERROR, "Build error, internal bit depth mismatch\n");
+        return NULL;
+    }
+
+    Encoder* encoder = NULL;
+    x265_param* param = PARAM_NS::x265_param_alloc();
+    x265_param* latestParam = PARAM_NS::x265_param_alloc();
+    if (!param || !latestParam)
+        goto fail;
+
+    memcpy(param, p, sizeof(x265_param));
+    x265_log(param, X265_LOG_INFO, "HEVC encoder version %s\n", PFX(version_str));
+    x265_log(param, X265_LOG_INFO, "build info %s\n", PFX(build_info_str));
+
+    x265_setup_primitives(param);
+
+    if (x265_check_params(param))
+        goto fail;
+
+    if (x265_set_globals(param))
+        goto fail;
+
+    encoder = new Encoder;
+    if (!param->rc.bEnableSlowFirstPass)
+        PARAM_NS::x265_param_apply_fastfirstpass(param);
+
+    // may change params for auto-detect, etc
+    encoder->configure(param);
+    // may change rate control and CPB params
+    if (!enforceLevel(*param, encoder->m_vps))
+        goto fail;
+
+    // will detect and set profile/tier/level in VPS
+    determineLevel(*param, encoder->m_vps);
+
+    if (!param->bAllowNonConformance && encoder->m_vps.ptl.profileIdc == Profile::NONE)
+    {
+        x265_log(param, X265_LOG_INFO, "non-conformant bitstreams not allowed (--allow-non-conformance)\n");
+        goto fail;
+    }
+
+    encoder->create();
+    encoder->m_latestParam = latestParam;
+    memcpy(latestParam, param, sizeof(x265_param));
+    if (encoder->m_aborted)
+        goto fail;
+
+    x265_print_params(param);
+    return encoder;
+
+fail:
+    delete encoder;
+    PARAM_NS::x265_param_free(param);
+    PARAM_NS::x265_param_free(latestParam);
+    return NULL;
+}
+
+int x265_encoder_headers(x265_encoder *enc, x265_nal **pp_nal, uint32_t *pi_nal)
+{
+    if (pp_nal && enc)
+    {
+        Encoder *encoder = static_cast<Encoder*>(enc);
+        Entropy sbacCoder;
+        Bitstream bs;
+        encoder->getStreamHeaders(encoder->m_nalList, sbacCoder, bs);
+        *pp_nal = &encoder->m_nalList.m_nal[0];
+        if (pi_nal) *pi_nal = encoder->m_nalList.m_numNal;
+        return encoder->m_nalList.m_occupancy;
+    }
+
+    return -1;
+}
+
+void x265_encoder_parameters(x265_encoder *enc, x265_param *out)
+{
+    if (enc && out)
+    {
+        Encoder *encoder = static_cast<Encoder*>(enc);
+        memcpy(out, encoder->m_param, sizeof(x265_param));
+    }
+}
+
+int x265_encoder_reconfig(x265_encoder* enc, x265_param* param_in)
+{
+    if (!enc || !param_in)
+        return -1;
+
+    x265_param save;
+    Encoder* encoder = static_cast<Encoder*>(enc);
+    memcpy(&save, encoder->m_latestParam, sizeof(x265_param));
+    int ret = encoder->reconfigureParam(encoder->m_latestParam, param_in);
+    if (ret)
+        /* reconfigure failed, recover saved param set */
+        memcpy(encoder->m_latestParam, &save, sizeof(x265_param));
+    else
+    {
+        encoder->m_reconfigured = true;
+        x265_print_reconfigured_params(&save, encoder->m_latestParam);
+    }
+    return ret;
+}
+
+int x265_encoder_encode(x265_encoder *enc, x265_nal **pp_nal, uint32_t *pi_nal, x265_picture *pic_in, x265_picture *pic_out)
+{
+    if (!enc)
+        return -1;
+
+    Encoder *encoder = static_cast<Encoder*>(enc);
+    int numEncoded;
+
+    // While flushing, we cannot return 0 until the entire stream is flushed
+    do
+    {
+        numEncoded = encoder->encode(pic_in, pic_out);
+    }
+    while (numEncoded == 0 && !pic_in && encoder->m_numDelayedPic);
+
+    // do not allow reuse of these buffers for more than one picture. The
+    // encoder now owns these analysisData buffers.
+    if (pic_in)
+    {
+        pic_in->analysisData.intraData = NULL;
+        pic_in->analysisData.interData = NULL;
+    }
+
+    if (pp_nal && numEncoded > 0)
+    {
+        *pp_nal = &encoder->m_nalList.m_nal[0];
+        if (pi_nal) *pi_nal = encoder->m_nalList.m_numNal;
+    }
+    else if (pi_nal)
+        *pi_nal = 0;
+
+    return numEncoded;
+}
+
+void x265_encoder_get_stats(x265_encoder *enc, x265_stats *outputStats, uint32_t statsSizeBytes)
+{
+    if (enc && outputStats)
+    {
+        Encoder *encoder = static_cast<Encoder*>(enc);
+        encoder->fetchStats(outputStats, statsSizeBytes);
+    }
+}
+
+void x265_encoder_log(x265_encoder* enc, int, char **)
+{
+    if (enc)
+    {
+        Encoder *encoder = static_cast<Encoder*>(enc);
+        x265_log(encoder->m_param, X265_LOG_WARNING, "x265_encoder_log is now deprecated\n");
+    }
+}
+
+void x265_encoder_close(x265_encoder *enc)
+{
+    if (enc)
+    {
+        Encoder *encoder = static_cast<Encoder*>(enc);
+
+        encoder->stopJobs();
+        encoder->printSummary();
+        encoder->destroy();
+        delete encoder;
+        ATOMIC_DEC(&g_ctuSizeConfigured);
+    }
+}
+
+void x265_cleanup(void)
+{
+    if (!g_ctuSizeConfigured)
+    {
+        BitCost::destroy();
+        CUData::s_partSet[0] = NULL; /* allow CUData to adjust to new CTU size */
+    }
+}
+
+x265_picture *x265_picture_alloc()
+{
+    return (x265_picture*)x265_malloc(sizeof(x265_picture));
+}
+
+void x265_picture_init(x265_param *param, x265_picture *pic)
+{
+    memset(pic, 0, sizeof(x265_picture));
+
+    pic->bitDepth = param->internalBitDepth;
+    pic->colorSpace = param->internalCsp;
+    pic->forceqp = X265_QP_AUTO;
+    pic->quantOffsets = NULL;
+    if (param->analysisMode)
+    {
+        uint32_t widthInCU       = (param->sourceWidth  + g_maxCUSize - 1) >> g_maxLog2CUSize;
+        uint32_t heightInCU      = (param->sourceHeight + g_maxCUSize - 1) >> g_maxLog2CUSize;
+
+        uint32_t numCUsInFrame   = widthInCU * heightInCU;
+        pic->analysisData.numCUsInFrame = numCUsInFrame;
+        pic->analysisData.numPartitions = NUM_4x4_PARTITIONS;
+    }
+}
+
+void x265_picture_free(x265_picture *p)
+{
+    return x265_free(p);
+}
+
+static const x265_api libapi =
+{
+    X265_MAJOR_VERSION,
+    X265_BUILD,
+    sizeof(x265_param),
+    sizeof(x265_picture),
+    sizeof(x265_analysis_data),
+    sizeof(x265_zone),
+    sizeof(x265_stats),
+
+    PFX(max_bit_depth),
+    PFX(version_str),
+    PFX(build_info_str),
+
+    &PARAM_NS::x265_param_alloc,
+    &PARAM_NS::x265_param_free,
+    &PARAM_NS::x265_param_default,
+    &PARAM_NS::x265_param_parse,
+    &PARAM_NS::x265_param_apply_profile,
+    &PARAM_NS::x265_param_default_preset,
+    &x265_picture_alloc,
+    &x265_picture_free,
+    &x265_picture_init,
+    &x265_encoder_open,
+    &x265_encoder_parameters,
+    &x265_encoder_reconfig,
+    &x265_encoder_headers,
+    &x265_encoder_encode,
+    &x265_encoder_get_stats,
+    &x265_encoder_log,
+    &x265_encoder_close,
+    &x265_cleanup,
+
+    sizeof(x265_frame_stats),
+};
+
+typedef const x265_api* (*api_get_func)(int bitDepth);
+typedef const x265_api* (*api_query_func)(int bitDepth, int apiVersion, int* err);
+
+#define xstr(s) str(s)
+#define str(s) #s
+
+#if _WIN32
+#define ext ".dll"
+#elif MACOS
+#include <dlfcn.h>
+#define ext ".dylib"
+#else
+#include <dlfcn.h>
+#define ext ".so"
+#endif
+
+#if ENABLE_SHARED
+static int g_recursion /* = 0 */;
+#endif
+  
+const x265_api* x265_api_get(int bitDepth)
+{
+    if (bitDepth && bitDepth != X265_DEPTH)
+    {
+#if LINKED_8BIT
+        if (bitDepth == 8) return x265_8bit::x265_api_get(0);
+#endif
+#if LINKED_10BIT
+        if (bitDepth == 10) return x265_10bit::x265_api_get(0);
+#endif
+#if LINKED_12BIT
+        if (bitDepth == 12) return x265_12bit::x265_api_get(0);
+#endif
+#if ENABLE_SHARED
+        const char* libname = NULL;
+        const char* method = "x265_api_get_" xstr(X265_BUILD);
+        const char* multilibname = "libx265" ext;
+
+        if (bitDepth == 12)
+            libname = "libx265_main12" ext;
+        else if (bitDepth == 10)
+            libname = "libx265_main10" ext;
+        else if (bitDepth == 8)
+            libname = "libx265_main" ext;
+        else
+            return NULL;
+
+        const x265_api* api = NULL;
+        int reqDepth = 0;
+
+        if (g_recursion > 1)
+            return NULL;
+        else
+            g_recursion++;
+
+#if _WIN32
+        HMODULE h = LoadLibraryA(libname);
+        if (!h)
+        {
+            h = LoadLibraryA(multilibname);
+            reqDepth = bitDepth;
+        }
+        if (h)
+        {
+            api_get_func get = (api_get_func)GetProcAddress(h, method);
+            if (get)
+                api = get(reqDepth);
+        }
+#else
+        void* h = dlopen(libname, RTLD_LAZY | RTLD_LOCAL);
+        if (!h)
+        {
+            h = dlopen(multilibname, RTLD_LAZY | RTLD_LOCAL);
+            reqDepth = bitDepth;
+        }
+        if (h)
+        {
+            api_get_func get = (api_get_func)dlsym(h, method);
+            if (get)
+                api = get(reqDepth);
+        }
+#endif
+
+        g_recursion--;
+
+        if (api && bitDepth != api->bit_depth)
+        {
+            x265_log(NULL, X265_LOG_WARNING, "%s does not support requested bitDepth %d\n", libname, bitDepth);
+            return NULL;
+        }
+
+        return api;
+#else
+        return NULL;
+#endif
+    }
+
+    return &libapi;
+}
+
+const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err)
+{
+    if (apiVersion < 51)
+    {
+        /* builds before 1.6 had re-ordered public structs */
+        if (err) *err = X265_API_QUERY_ERR_VER_REFUSED;
+        return NULL;
+    }
+
+    if (err) *err = X265_API_QUERY_ERR_NONE;
+
+    if (bitDepth && bitDepth != X265_DEPTH)
+    {
+#if LINKED_8BIT
+        if (bitDepth == 8) return x265_8bit::x265_api_query(0, apiVersion, err);
+#endif
+#if LINKED_10BIT
+        if (bitDepth == 10) return x265_10bit::x265_api_query(0, apiVersion, err);
+#endif
+#if LINKED_12BIT
+        if (bitDepth == 12) return x265_12bit::x265_api_query(0, apiVersion, err);
+#endif
+#if ENABLE_SHARED
+        const char* libname = NULL;
+        const char* method = "x265_api_query";
+        const char* multilibname = "libx265" ext;
+
+        if (bitDepth == 12)
+            libname = "libx265_main12" ext;
+        else if (bitDepth == 10)
+            libname = "libx265_main10" ext;
+        else if (bitDepth == 8)
+            libname = "libx265_main" ext;
+        else
+        {
+            if (err) *err = X265_API_QUERY_ERR_LIB_NOT_FOUND;
+            return NULL;
+        }
+
+        const x265_api* api = NULL;
+        int reqDepth = 0;
+        int e = X265_API_QUERY_ERR_LIB_NOT_FOUND;
+
+        if (g_recursion > 1)
+        {
+            if (err) *err = X265_API_QUERY_ERR_LIB_NOT_FOUND;
+            return NULL;
+        }
+        else
+            g_recursion++;
+
+#if _WIN32
+        HMODULE h = LoadLibraryA(libname);
+        if (!h)
+        {
+            h = LoadLibraryA(multilibname);
+            reqDepth = bitDepth;
+        }
+        if (h)
+        {
+            e = X265_API_QUERY_ERR_FUNC_NOT_FOUND;
+            api_query_func query = (api_query_func)GetProcAddress(h, method);
+            if (query)
+                api = query(reqDepth, apiVersion, err);
+        }
+#else
+        void* h = dlopen(libname, RTLD_LAZY | RTLD_LOCAL);
+        if (!h)
+        {
+            h = dlopen(multilibname, RTLD_LAZY | RTLD_LOCAL);
+            reqDepth = bitDepth;
+        }
+        if (h)
+        {
+            e = X265_API_QUERY_ERR_FUNC_NOT_FOUND;
+            api_query_func query = (api_query_func)dlsym(h, method);
+            if (query)
+                api = query(reqDepth, apiVersion, err);
+        }
+#endif
+
+        g_recursion--;
+
+        if (api && bitDepth != api->bit_depth)
+        {
+            x265_log(NULL, X265_LOG_WARNING, "%s does not support requested bitDepth %d\n", libname, bitDepth);
+            if (err) *err = X265_API_QUERY_ERR_WRONG_BITDEPTH;
+            return NULL;
+        }
+
+        if (err) *err = api ? X265_API_QUERY_ERR_NONE : e;
+        return api;
+#else
+        if (err) *err = X265_API_QUERY_ERR_WRONG_BITDEPTH;
+        return NULL;
+#endif
+    }
+
+    return &libapi;
+}
+
+} /* end namespace or extern "C" */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/bitcost.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,91 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "bitcost.h"
+
+using namespace X265_NS;
+
+void BitCost::setQP(unsigned int qp)
+{
+    if (!s_costs[qp])
+    {
+        ScopedLock s(s_costCalcLock);
+
+        // Now that we have acquired the lock, check again if another thread calculated
+        // this row while we were blocked
+        if (!s_costs[qp])
+        {
+            x265_emms(); // just to be safe
+
+            CalculateLogs();
+            s_costs[qp] = new uint16_t[4 * BC_MAX_MV + 1] + 2 * BC_MAX_MV;
+            double lambda = x265_lambda_tab[qp];
+
+            // estimate same cost for negative and positive MVD
+            for (int i = 0; i <= 2 * BC_MAX_MV; i++)
+                s_costs[qp][i] = s_costs[qp][-i] = (uint16_t)X265_MIN(s_bitsizes[i] * lambda + 0.5f, (1 << 15) - 1);
+        }
+    }
+
+    m_cost = s_costs[qp];
+}
+
+/***
+ * Class static data and methods
+ */
+
+uint16_t *BitCost::s_costs[BC_MAX_QP];
+
+float *BitCost::s_bitsizes;
+
+Lock BitCost::s_costCalcLock;
+
+void BitCost::CalculateLogs()
+{
+    if (!s_bitsizes)
+    {
+        s_bitsizes = new float[2 * BC_MAX_MV + 1];
+        s_bitsizes[0] = 0.718f;
+        float log2_2 = 2.0f / log(2.0f);  // 2 x 1/log(2)
+        for (int i = 1; i <= 2 * BC_MAX_MV; i++)
+            s_bitsizes[i] = log((float)(i + 1)) * log2_2 + 1.718f;
+    }
+}
+
+void BitCost::destroy()
+{
+    for (int i = 0; i < BC_MAX_QP; i++)
+    {
+        if (s_costs[i])
+        {
+            delete [] (s_costs[i] - 2 * BC_MAX_MV);
+
+            s_costs[i] = 0;
+        }
+    }
+
+    delete [] s_bitsizes;
+    s_bitsizes = 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/bitcost.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,93 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_BITCOST_H
+#define X265_BITCOST_H
+
+#include "common.h"
+#include "threading.h"
+#include "mv.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+class BitCost
+{
+public:
+
+    BitCost() : m_cost_mvx(0), m_cost_mvy(0), m_cost(0), m_mvp(0) {}
+
+    void setQP(unsigned int qp);
+
+    void setMVP(const MV& mvp)                      { m_mvp = mvp; m_cost_mvx = m_cost - mvp.x; m_cost_mvy = m_cost - mvp.y; }
+
+    // return bit cost of motion vector difference, multiplied by lambda
+    inline uint16_t mvcost(const MV& mv) const      { return m_cost_mvx[mv.x] + m_cost_mvy[mv.y]; }
+
+    // return bit cost of motion vector difference, without lambda
+    inline uint32_t bitcost(const MV& mv) const
+    {
+        return (uint32_t)(s_bitsizes[abs(mv.x - m_mvp.x)] +
+                          s_bitsizes[abs(mv.y - m_mvp.y)] + 0.5f);
+    }
+
+    static inline uint32_t bitcost(const MV& mv, const MV& mvp)
+    {
+        return (uint32_t)(s_bitsizes[abs(mv.x - mvp.x)] +
+                          s_bitsizes[abs(mv.y - mvp.y)] + 0.5f);
+    }
+
+    static void destroy();
+
+protected:
+
+    uint16_t *m_cost_mvx;
+
+    uint16_t *m_cost_mvy;
+
+    uint16_t *m_cost;
+
+    MV        m_mvp;
+
+    BitCost& operator =(const BitCost&);
+
+private:
+
+    /* default log2_max_mv_length_horizontal and log2_max_mv_length_horizontal
+     * are 15, specified in quarter-pel luma sample units. making the maximum
+     * signaled ful-pel motion distance 4096, max qpel is 32768 */
+    enum { BC_MAX_MV = (1 << 15) };
+
+    enum { BC_MAX_QP = 82 };
+
+    static float *s_bitsizes;
+
+    static uint16_t *s_costs[BC_MAX_QP];
+
+    static Lock s_costCalcLock;
+
+    static void CalculateLogs();
+};
+}
+
+#endif // ifndef X265_BITCOST_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/dpb.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,303 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "slice.h"
+
+#include "dpb.h"
+
+using namespace X265_NS;
+
+DPB::~DPB()
+{
+    while (!m_freeList.empty())
+    {
+        Frame* curFrame = m_freeList.popFront();
+        curFrame->destroy();
+        delete curFrame;
+    }
+
+    while (!m_picList.empty())
+    {
+        Frame* curFrame = m_picList.popFront();
+        curFrame->destroy();
+        delete curFrame;
+    }
+
+    while (m_frameDataFreeList)
+    {
+        FrameData* next = m_frameDataFreeList->m_freeListNext;
+        m_frameDataFreeList->destroy();
+
+        m_frameDataFreeList->m_reconPic->destroy();
+        delete m_frameDataFreeList->m_reconPic;
+
+        delete m_frameDataFreeList;
+        m_frameDataFreeList = next;
+    }
+}
+
+// move unreferenced pictures from picList to freeList for recycle
+void DPB::recycleUnreferenced()
+{
+    Frame *iterFrame = m_picList.first();
+
+    while (iterFrame)
+    {
+        Frame *curFrame = iterFrame;
+        iterFrame = iterFrame->m_next;
+        if (!curFrame->m_encData->m_bHasReferences && !curFrame->m_countRefEncoders)
+        {
+            curFrame->m_reconRowCount.set(0);
+            curFrame->m_bChromaExtended = false;
+
+            // iterator is invalidated by remove, restart scan
+            m_picList.remove(*curFrame);
+            iterFrame = m_picList.first();
+
+            m_freeList.pushBack(*curFrame);
+            curFrame->m_encData->m_freeListNext = m_frameDataFreeList;
+            m_frameDataFreeList = curFrame->m_encData;
+            curFrame->m_encData = NULL;
+            curFrame->m_reconPic = NULL;
+        }
+    }
+}
+
+void DPB::prepareEncode(Frame *newFrame)
+{
+    Slice* slice = newFrame->m_encData->m_slice;
+    slice->m_poc = newFrame->m_poc;
+
+    int pocCurr = slice->m_poc;
+    int type = newFrame->m_lowres.sliceType;
+    bool bIsKeyFrame = newFrame->m_lowres.bKeyframe;
+
+    slice->m_nalUnitType = getNalUnitType(pocCurr, bIsKeyFrame);
+    if (slice->m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL)
+        m_lastIDR = pocCurr;
+    slice->m_lastIDR = m_lastIDR;
+    slice->m_sliceType = IS_X265_TYPE_B(type) ? B_SLICE : (type == X265_TYPE_P) ? P_SLICE : I_SLICE;
+
+    if (type == X265_TYPE_B)
+    {
+        newFrame->m_encData->m_bHasReferences = false;
+
+        // Adjust NAL type for unreferenced B frames (change from _R "referenced"
+        // to _N "non-referenced" NAL unit type)
+        switch (slice->m_nalUnitType)
+        {
+        case NAL_UNIT_CODED_SLICE_TRAIL_R:
+            slice->m_nalUnitType = m_bTemporalSublayer ? NAL_UNIT_CODED_SLICE_TSA_N : NAL_UNIT_CODED_SLICE_TRAIL_N;
+            break;
+        case NAL_UNIT_CODED_SLICE_RADL_R:
+            slice->m_nalUnitType = NAL_UNIT_CODED_SLICE_RADL_N;
+            break;
+        case NAL_UNIT_CODED_SLICE_RASL_R:
+            slice->m_nalUnitType = NAL_UNIT_CODED_SLICE_RASL_N;
+            break;
+        default:
+            break;
+        }
+    }
+    else
+    {
+        /* m_bHasReferences starts out as true for non-B pictures, and is set to false
+         * once no more pictures reference it */
+        newFrame->m_encData->m_bHasReferences = true;
+    }
+
+    m_picList.pushFront(*newFrame);
+
+    // Do decoding refresh marking if any
+    decodingRefreshMarking(pocCurr, slice->m_nalUnitType);
+
+    computeRPS(pocCurr, slice->isIRAP(), &slice->m_rps, slice->m_sps->maxDecPicBuffering);
+
+    // Mark pictures in m_piclist as unreferenced if they are not included in RPS
+    applyReferencePictureSet(&slice->m_rps, pocCurr);
+
+    slice->m_numRefIdx[0] = X265_MIN(m_maxRefL0, slice->m_rps.numberOfNegativePictures); // Ensuring L0 contains just the -ve POC
+    slice->m_numRefIdx[1] = X265_MIN(m_maxRefL1, slice->m_rps.numberOfPositivePictures);
+    slice->setRefPicList(m_picList);
+
+    X265_CHECK(slice->m_sliceType != B_SLICE || slice->m_numRefIdx[1], "B slice without L1 references (non-fatal)\n");
+
+    if (slice->m_sliceType == B_SLICE)
+    {
+        /* TODO: the lookahead should be able to tell which reference picture
+         * had the least motion residual.  We should be able to use that here to
+         * select a colocation reference list and index */
+        slice->m_colFromL0Flag = false;
+        slice->m_colRefIdx = 0;
+        slice->m_bCheckLDC = false;
+    }
+    else
+    {
+        slice->m_bCheckLDC = true;
+        slice->m_colFromL0Flag = true;
+        slice->m_colRefIdx = 0;
+    }
+    slice->m_sLFaseFlag = (SLFASE_CONSTANT & (1 << (pocCurr % 31))) > 0;
+
+    /* Increment reference count of all motion-referenced frames to prevent them
+     * from being recycled. These counts are decremented at the end of
+     * compressFrame() */
+    int numPredDir = slice->isInterP() ? 1 : slice->isInterB() ? 2 : 0;
+    for (int l = 0; l < numPredDir; l++)
+    {
+        for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)
+        {
+            Frame *refpic = slice->m_refFrameList[l][ref];
+            ATOMIC_INC(&refpic->m_countRefEncoders);
+        }
+    }
+}
+
+void DPB::computeRPS(int curPoc, bool isRAP, RPS * rps, unsigned int maxDecPicBuffer)
+{
+    unsigned int poci = 0, numNeg = 0, numPos = 0;
+
+    Frame* iterPic = m_picList.first();
+
+    while (iterPic && (poci < maxDecPicBuffer - 1))
+    {
+        if ((iterPic->m_poc != curPoc) && iterPic->m_encData->m_bHasReferences)
+        {
+            rps->poc[poci] = iterPic->m_poc;
+            rps->deltaPOC[poci] = rps->poc[poci] - curPoc;
+            (rps->deltaPOC[poci] < 0) ? numNeg++ : numPos++;
+            rps->bUsed[poci] = !isRAP;
+            poci++;
+        }
+        iterPic = iterPic->m_next;
+    }
+
+    rps->numberOfPictures = poci;
+    rps->numberOfPositivePictures = numPos;
+    rps->numberOfNegativePictures = numNeg;
+
+    rps->sortDeltaPOC();
+}
+
+/* Marking reference pictures when an IDR/CRA is encountered. */
+void DPB::decodingRefreshMarking(int pocCurr, NalUnitType nalUnitType)
+{
+    if (nalUnitType == NAL_UNIT_CODED_SLICE_IDR_W_RADL)
+    {
+        /* If the nal_unit_type is IDR, all pictures in the reference picture
+         * list are marked as "unused for reference" */
+        Frame* iterFrame = m_picList.first();
+        while (iterFrame)
+        {
+            if (iterFrame->m_poc != pocCurr)
+                iterFrame->m_encData->m_bHasReferences = false;
+            iterFrame = iterFrame->m_next;
+        }
+    }
+    else // CRA or No DR
+    {
+        if (m_bRefreshPending && pocCurr > m_pocCRA)
+        {
+            /* If the bRefreshPending flag is true (a deferred decoding refresh
+             * is pending) and the current temporal reference is greater than
+             * the temporal reference of the latest CRA picture (pocCRA), mark
+             * all reference pictures except the latest CRA picture as "unused
+             * for reference" and set the bRefreshPending flag to false */
+            Frame* iterFrame = m_picList.first();
+            while (iterFrame)
+            {
+                if (iterFrame->m_poc != pocCurr && iterFrame->m_poc != m_pocCRA)
+                    iterFrame->m_encData->m_bHasReferences = false;
+                iterFrame = iterFrame->m_next;
+            }
+
+            m_bRefreshPending = false;
+        }
+        if (nalUnitType == NAL_UNIT_CODED_SLICE_CRA)
+        {
+            /* If the nal_unit_type is CRA, set the bRefreshPending flag to true
+             * and pocCRA to the temporal reference of the current picture */
+            m_bRefreshPending = true;
+            m_pocCRA = pocCurr;
+        }
+    }
+
+    /* Note that the current picture is already placed in the reference list and
+     * its marking is not changed.  If the current picture has a nal_ref_idc
+     * that is not 0, it will remain marked as "used for reference" */
+}
+
+/** Function for applying picture marking based on the Reference Picture Set */
+void DPB::applyReferencePictureSet(RPS *rps, int curPoc)
+{
+    // loop through all pictures in the reference picture buffer
+    Frame* iterFrame = m_picList.first();
+    while (iterFrame)
+    {
+        if (iterFrame->m_poc != curPoc && iterFrame->m_encData->m_bHasReferences)
+        {
+            // loop through all pictures in the Reference Picture Set
+            // to see if the picture should be kept as reference picture
+            bool referenced = false;
+            for (int i = 0; i < rps->numberOfPositivePictures + rps->numberOfNegativePictures; i++)
+            {
+                if (iterFrame->m_poc == curPoc + rps->deltaPOC[i])
+                {
+                    referenced = true;
+                    break;
+                }
+            }
+            if (!referenced)
+                iterFrame->m_encData->m_bHasReferences = false;
+        }
+        iterFrame = iterFrame->m_next;
+    }
+}
+
+/* deciding the nal_unit_type */
+NalUnitType DPB::getNalUnitType(int curPOC, bool bIsKeyFrame)
+{
+    if (!curPOC)
+        return NAL_UNIT_CODED_SLICE_IDR_W_RADL;
+
+    if (bIsKeyFrame)
+        return m_bOpenGOP ? NAL_UNIT_CODED_SLICE_CRA : NAL_UNIT_CODED_SLICE_IDR_W_RADL;
+
+    if (m_pocCRA && curPOC < m_pocCRA)
+        // All leading pictures are being marked as TFD pictures here since
+        // current encoder uses all reference pictures while encoding leading
+        // pictures. An encoder can ensure that a leading picture can be still
+        // decodable when random accessing to a CRA/CRANT/BLA/BLANT picture by
+        // controlling the reference pictures used for encoding that leading
+        // picture. Such a leading picture need not be marked as a TFD picture.
+        return NAL_UNIT_CODED_SLICE_RASL_R;
+
+    if (m_lastIDR && curPOC < m_lastIDR)
+        return NAL_UNIT_CODED_SLICE_RADL_R;
+
+    return NAL_UNIT_CODED_SLICE_TRAIL_R;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/dpb.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,80 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_DPB_H
+#define X265_DPB_H
+
+#include "piclist.h"
+
+namespace X265_NS {
+// private namespace for x265
+
+class Frame;
+class FrameData;
+class Slice;
+
+class DPB
+{
+public:
+
+    int                m_lastIDR;
+    int                m_pocCRA;
+    int                m_maxRefL0;
+    int                m_maxRefL1;
+    int                m_bOpenGOP;
+    bool               m_bRefreshPending;
+    bool               m_bTemporalSublayer;
+    PicList            m_picList;
+    PicList            m_freeList;
+    FrameData*         m_frameDataFreeList;
+
+    DPB(x265_param *param)
+    {
+        m_lastIDR = 0;
+        m_pocCRA = 0;
+        m_bRefreshPending = false;
+        m_frameDataFreeList = NULL;
+        m_maxRefL0 = param->maxNumReferences;
+        m_maxRefL1 = param->bBPyramid ? 2 : 1;
+        m_bOpenGOP = param->bOpenGOP;
+        m_bTemporalSublayer = !!param->bEnableTemporalSubLayers;
+    }
+
+    ~DPB();
+
+    void prepareEncode(Frame*);
+
+    void recycleUnreferenced();
+
+protected:
+
+    void computeRPS(int curPoc, bool isRAP, RPS * rps, unsigned int maxDecPicBuffer);
+
+    void applyReferencePictureSet(RPS *rps, int curPoc);
+    void decodingRefreshMarking(int pocCurr, NalUnitType nalUnitType);
+
+    NalUnitType getNalUnitType(int curPoc, bool bIsKeyFrame);
+};
+}
+
+#endif // X265_DPB_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/encoder.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1921 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "threadpool.h"
+#include "param.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+
+#include "bitcost.h"
+#include "encoder.h"
+#include "slicetype.h"
+#include "frameencoder.h"
+#include "ratecontrol.h"
+#include "dpb.h"
+#include "nal.h"
+
+#include "x265.h"
+
+#if _MSC_VER
+#pragma warning(disable: 4996) // POSIX functions are just fine, thanks
+#endif
+
+namespace X265_NS {
+const char g_sliceTypeToChar[] = {'B', 'P', 'I'};
+}
+
+static const char* defaultAnalysisFileName = "x265_analysis.dat";
+
+using namespace X265_NS;
+
+Encoder::Encoder()
+{
+    m_aborted = false;
+    m_reconfigured = false;
+    m_encodedFrameNum = 0;
+    m_pocLast = -1;
+    m_curEncoder = 0;
+    m_numLumaWPFrames = 0;
+    m_numChromaWPFrames = 0;
+    m_numLumaWPBiFrames = 0;
+    m_numChromaWPBiFrames = 0;
+    m_lookahead = NULL;
+    m_rateControl = NULL;
+    m_dpb = NULL;
+    m_exportedPic = NULL;
+    m_numDelayedPic = 0;
+    m_outputCount = 0;
+    m_param = NULL;
+    m_latestParam = NULL;
+    m_threadPool = NULL;
+    m_analysisFile = NULL;
+    m_offsetEmergency = NULL;
+    for (int i = 0; i < X265_MAX_FRAME_THREADS; i++)
+        m_frameEncoder[i] = NULL;
+
+    MotionEstimate::initScales();
+}
+
+void Encoder::create()
+{
+    if (!primitives.pu[0].sad)
+    {
+        // this should be an impossible condition when using our public API, and indicates a serious bug.
+        x265_log(m_param, X265_LOG_ERROR, "Primitives must be initialized before encoder is created\n");
+        abort();
+    }
+
+    x265_param* p = m_param;
+
+    int rows = (p->sourceHeight + p->maxCUSize - 1) >> g_log2Size[p->maxCUSize];
+    int cols = (p->sourceWidth  + p->maxCUSize - 1) >> g_log2Size[p->maxCUSize];
+
+    // Do not allow WPP if only one row or fewer than 3 columns, it is pointless and unstable
+    if (rows == 1 || cols < 3)
+    {
+        x265_log(p, X265_LOG_WARNING, "Too few rows/columns, --wpp disabled\n");
+        p->bEnableWavefront = 0;
+    }
+
+    bool allowPools = !p->numaPools || strcmp(p->numaPools, "none");
+
+    // Trim the thread pool if --wpp, --pme, and --pmode are disabled
+    if (!p->bEnableWavefront && !p->bDistributeModeAnalysis && !p->bDistributeMotionEstimation && !p->lookaheadSlices)
+        allowPools = false;
+
+    if (!p->frameNumThreads)
+    {
+        // auto-detect frame threads
+        int cpuCount = ThreadPool::getCpuCount();
+        if (!p->bEnableWavefront)
+            p->frameNumThreads = X265_MIN3(cpuCount, (rows + 1) / 2, X265_MAX_FRAME_THREADS);
+        else if (cpuCount >= 32)
+            p->frameNumThreads = (p->sourceHeight > 2000) ? 8 : 6; // dual-socket 10-core IvyBridge or higher
+        else if (cpuCount >= 16)
+            p->frameNumThreads = 5; // 8 HT cores, or dual socket
+        else if (cpuCount >= 8)
+            p->frameNumThreads = 3; // 4 HT cores
+        else if (cpuCount >= 4)
+            p->frameNumThreads = 2; // Dual or Quad core
+        else
+            p->frameNumThreads = 1;
+    }
+
+    m_numPools = 0;
+    if (allowPools)
+        m_threadPool = ThreadPool::allocThreadPools(p, m_numPools);
+
+    if (!m_numPools)
+    {
+        // issue warnings if any of these features were requested
+        if (p->bEnableWavefront)
+            x265_log(p, X265_LOG_WARNING, "No thread pool allocated, --wpp disabled\n");
+        if (p->bDistributeMotionEstimation)
+            x265_log(p, X265_LOG_WARNING, "No thread pool allocated, --pme disabled\n");
+        if (p->bDistributeModeAnalysis)
+            x265_log(p, X265_LOG_WARNING, "No thread pool allocated, --pmode disabled\n");
+        if (p->lookaheadSlices)
+            x265_log(p, X265_LOG_WARNING, "No thread pool allocated, --lookahead-slices disabled\n");
+
+        // disable all pool features if the thread pool is disabled or unusable.
+        p->bEnableWavefront = p->bDistributeModeAnalysis = p->bDistributeMotionEstimation = p->lookaheadSlices = 0;
+    }
+
+    if (!p->bEnableWavefront && p->rc.vbvBufferSize)
+    {
+        x265_log(p, X265_LOG_ERROR, "VBV requires wavefront parallelism\n");
+        m_aborted = true;
+    }
+
+    char buf[128];
+    int len = 0;
+    if (p->bEnableWavefront)
+        len += sprintf(buf + len, "wpp(%d rows)", rows);
+    if (p->bDistributeModeAnalysis)
+        len += sprintf(buf + len, "%spmode", len ? "+" : "");
+    if (p->bDistributeMotionEstimation)
+        len += sprintf(buf + len, "%spme ", len ? "+" : "");
+    if (!len)
+        strcpy(buf, "none");
+
+    x265_log(p, X265_LOG_INFO, "frame threads / pool features       : %d / %s\n", p->frameNumThreads, buf);
+
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        m_frameEncoder[i] = new FrameEncoder;
+        m_frameEncoder[i]->m_nalList.m_annexB = !!m_param->bAnnexB;
+    }
+
+    if (m_numPools)
+    {
+        for (int i = 0; i < m_param->frameNumThreads; i++)
+        {
+            int pool = i % m_numPools;
+            m_frameEncoder[i]->m_pool = &m_threadPool[pool];
+            m_frameEncoder[i]->m_jpId = m_threadPool[pool].m_numProviders++;
+            m_threadPool[pool].m_jpTable[m_frameEncoder[i]->m_jpId] = m_frameEncoder[i];
+        }
+        for (int i = 0; i < m_numPools; i++)
+            m_threadPool[i].start();
+    }
+    else
+    {
+        /* CU stats and noise-reduction buffers are indexed by jpId, so it cannot be left as -1 */
+        for (int i = 0; i < m_param->frameNumThreads; i++)
+            m_frameEncoder[i]->m_jpId = 0;
+    }
+
+    if (!m_scalingList.init())
+    {
+        x265_log(m_param, X265_LOG_ERROR, "Unable to allocate scaling list arrays\n");
+        m_aborted = true;
+        return;
+    }
+    else if (!m_param->scalingLists || !strcmp(m_param->scalingLists, "off"))
+        m_scalingList.m_bEnabled = false;
+    else if (!strcmp(m_param->scalingLists, "default"))
+        m_scalingList.setDefaultScalingList();
+    else if (m_scalingList.parseScalingList(m_param->scalingLists))
+        m_aborted = true;
+
+    m_lookahead = new Lookahead(m_param, m_threadPool);
+    if (m_numPools)
+    {
+        m_lookahead->m_jpId = m_threadPool[0].m_numProviders++;
+        m_threadPool[0].m_jpTable[m_lookahead->m_jpId] = m_lookahead;
+    }
+
+    m_dpb = new DPB(m_param);
+    m_rateControl = new RateControl(*m_param);
+
+    initVPS(&m_vps);
+    initSPS(&m_sps);
+    initPPS(&m_pps);
+   
+    if (m_param->rc.vbvBufferSize)
+    {
+        m_offsetEmergency = (uint16_t(*)[MAX_NUM_TR_CATEGORIES][MAX_NUM_TR_COEFFS])X265_MALLOC(uint16_t, MAX_NUM_TR_CATEGORIES * MAX_NUM_TR_COEFFS * (QP_MAX_MAX - QP_MAX_SPEC));
+        if (!m_offsetEmergency)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "Unable to allocate memory\n");
+            m_aborted = true;
+            return;
+        }
+
+        bool scalingEnabled = m_scalingList.m_bEnabled;
+        if (!scalingEnabled)
+        {
+            m_scalingList.setDefaultScalingList();
+            m_scalingList.setupQuantMatrices();
+        }
+        else
+            m_scalingList.setupQuantMatrices();
+
+        for (int q = 0; q < QP_MAX_MAX - QP_MAX_SPEC; q++)
+        {
+            for (int cat = 0; cat < MAX_NUM_TR_CATEGORIES; cat++)
+            {
+                uint16_t *nrOffset = m_offsetEmergency[q][cat];
+
+                int trSize = cat & 3;
+
+                int coefCount = 1 << ((trSize + 2) * 2);
+
+                /* Denoise chroma first then luma, then DC. */
+                int dcThreshold = (QP_MAX_MAX - QP_MAX_SPEC) * 2 / 3;
+                int lumaThreshold = (QP_MAX_MAX - QP_MAX_SPEC) * 2 / 3;
+                int chromaThreshold = 0;
+
+                int thresh = (cat < 4 || (cat >= 8 && cat < 12)) ? lumaThreshold : chromaThreshold;
+
+                double quantF = (double)(1ULL << (q / 6 + 16 + 8));
+
+                for (int i = 0; i < coefCount; i++)
+                {
+                    /* True "emergency mode": remove all DCT coefficients */
+                    if (q == QP_MAX_MAX - QP_MAX_SPEC - 1)
+                    {
+                        nrOffset[i] = INT16_MAX;
+                        continue;
+                    }
+
+                    int iThresh = i == 0 ? dcThreshold : thresh;
+                    if (q < iThresh)
+                    {
+                        nrOffset[i] = 0;
+                        continue;
+                    }
+
+                    int numList = (cat >= 8) * 3 + ((int)!iThresh);
+
+                    double pos = (double)(q - iThresh + 1) / (QP_MAX_MAX - QP_MAX_SPEC - iThresh);
+                    double start = quantF / (m_scalingList.m_quantCoef[trSize][numList][QP_MAX_SPEC % 6][i]);
+
+                    // Formula chosen as an exponential scale to vaguely mimic the effects of a higher quantizer.
+                    double bias = (pow(2, pos * (QP_MAX_MAX - QP_MAX_SPEC)) * 0.003 - 0.003) * start;
+                    nrOffset[i] = (uint16_t)X265_MIN(bias + 0.5, INT16_MAX);
+                }
+            }
+        }
+
+        if (!scalingEnabled)
+        {
+            m_scalingList.m_bEnabled = false;
+            m_scalingList.m_bDataPresent = false;
+            m_scalingList.setupQuantMatrices();
+        }
+    }
+    else
+        m_scalingList.setupQuantMatrices();
+
+    int numRows = (m_param->sourceHeight + g_maxCUSize - 1) / g_maxCUSize;
+    int numCols = (m_param->sourceWidth  + g_maxCUSize - 1) / g_maxCUSize;
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        if (!m_frameEncoder[i]->init(this, numRows, numCols))
+        {
+            x265_log(m_param, X265_LOG_ERROR, "Unable to initialize frame encoder, aborting\n");
+            m_aborted = true;
+        }
+    }
+
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        m_frameEncoder[i]->start();
+        m_frameEncoder[i]->m_done.wait(); /* wait for thread to initialize */
+    }
+
+    if (m_param->bEmitHRDSEI)
+        m_rateControl->initHRD(m_sps);
+    if (!m_rateControl->init(m_sps))
+        m_aborted = true;
+    if (!m_lookahead->create())
+        m_aborted = true;
+
+    if (m_param->analysisMode)
+    {
+        const char* name = m_param->analysisFileName;
+        if (!name)
+            name = defaultAnalysisFileName;
+        const char* mode = m_param->analysisMode == X265_ANALYSIS_LOAD ? "rb" : "wb";
+        m_analysisFile = fopen(name, mode);
+        if (!m_analysisFile)
+        {
+            x265_log(NULL, X265_LOG_ERROR, "Analysis load/save: failed to open file %s\n", name);
+            m_aborted = true;
+        }
+    }
+
+    m_bZeroLatency = !m_param->bframes && !m_param->lookaheadDepth && m_param->frameNumThreads == 1;
+
+    m_aborted |= parseLambdaFile(m_param);
+
+    m_encodeStartTime = x265_mdate();
+
+    m_nalList.m_annexB = !!m_param->bAnnexB;
+
+    m_emitCLLSEI = p->maxCLL || p->maxFALL;
+}
+
+void Encoder::stopJobs()
+{
+    if (m_rateControl)
+        m_rateControl->terminate(); // unblock all blocked RC calls
+
+    if (m_lookahead)
+        m_lookahead->stopJobs();
+    
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        if (m_frameEncoder[i])
+        {
+            m_frameEncoder[i]->getEncodedPicture(m_nalList);
+            m_frameEncoder[i]->m_threadActive = false;
+            m_frameEncoder[i]->m_enable.trigger();
+            m_frameEncoder[i]->stop();
+        }
+    }
+
+    if (m_threadPool)
+        m_threadPool->stopWorkers();
+}
+
+void Encoder::destroy()
+{
+    if (m_exportedPic)
+    {
+        ATOMIC_DEC(&m_exportedPic->m_countRefEncoders);
+        m_exportedPic = NULL;
+    }
+
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        if (m_frameEncoder[i])
+        {
+            m_frameEncoder[i]->destroy();
+            delete m_frameEncoder[i];
+        }
+    }
+
+    // thread pools can be cleaned up now that all the JobProviders are
+    // known to be shutdown
+    delete [] m_threadPool;
+
+    if (m_lookahead)
+    {
+        m_lookahead->destroy();
+        delete m_lookahead;
+    }
+
+    delete m_dpb;
+    if (m_rateControl)
+    {
+        m_rateControl->destroy();
+        delete m_rateControl;
+    }
+
+    X265_FREE(m_offsetEmergency);
+
+    if (m_analysisFile)
+        fclose(m_analysisFile);
+
+    if (m_param)
+    {
+        /* release string arguments that were strdup'd */
+        free((char*)m_param->rc.lambdaFileName);
+        free((char*)m_param->rc.statFileName);
+        free((char*)m_param->analysisFileName);
+        free((char*)m_param->scalingLists);
+        free((char*)m_param->numaPools);
+        free((char*)m_param->masteringDisplayColorVolume);
+
+        PARAM_NS::x265_param_free(m_param);
+    }
+
+    PARAM_NS::x265_param_free(m_latestParam);
+}
+
+void Encoder::updateVbvPlan(RateControl* rc)
+{
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+    {
+        FrameEncoder *encoder = m_frameEncoder[i];
+        if (encoder->m_rce.isActive && encoder->m_rce.poc != rc->m_curSlice->m_poc)
+        {
+            int64_t bits = (int64_t) X265_MAX(encoder->m_rce.frameSizeEstimated, encoder->m_rce.frameSizePlanned);
+            rc->m_bufferFill -= bits;
+            rc->m_bufferFill = X265_MAX(rc->m_bufferFill, 0);
+            rc->m_bufferFill += encoder->m_rce.bufferRate;
+            rc->m_bufferFill = X265_MIN(rc->m_bufferFill, rc->m_bufferSize);
+            if (rc->m_2pass)
+                rc->m_predictedBits += bits;
+        }
+    }
+}
+
+/**
+ * Feed one new input frame into the encoder, get one frame out. If pic_in is
+ * NULL, a flush condition is implied and pic_in must be NULL for all subsequent
+ * calls for this encoder instance.
+ *
+ * pic_in  input original YUV picture or NULL
+ * pic_out pointer to reconstructed picture struct
+ *
+ * returns 0 if no frames are currently available for output
+ *         1 if frame was output, m_nalList contains access unit
+ *         negative on malloc error or abort */
+int Encoder::encode(const x265_picture* pic_in, x265_picture* pic_out)
+{
+#if CHECKED_BUILD || _DEBUG
+    if (g_checkFailures)
+    {
+        x265_log(m_param, X265_LOG_ERROR, "encoder aborting because of internal error\n");
+        return -1;
+    }
+#endif
+    if (m_aborted)
+        return -1;
+
+    if (m_exportedPic)
+    {
+        ATOMIC_DEC(&m_exportedPic->m_countRefEncoders);
+        m_exportedPic = NULL;
+        m_dpb->recycleUnreferenced();
+    }
+
+    if (pic_in)
+    {
+        if (pic_in->colorSpace != m_param->internalCsp)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "Unsupported color space (%d) on input\n",
+                     pic_in->colorSpace);
+            return -1;
+        }
+        if (pic_in->bitDepth < 8 || pic_in->bitDepth > 16)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "Input bit depth (%d) must be between 8 and 16\n",
+                     pic_in->bitDepth);
+            return -1;
+        }
+
+        Frame *inFrame;
+        if (m_dpb->m_freeList.empty())
+        {
+            inFrame = new Frame;
+            x265_param* p = m_reconfigured? m_latestParam : m_param;
+            if (inFrame->create(p, pic_in->quantOffsets))
+            {
+                /* the first PicYuv created is asked to generate the CU and block unit offset
+                 * arrays which are then shared with all subsequent PicYuv (orig and recon) 
+                 * allocated by this top level encoder */
+                if (m_sps.cuOffsetY)
+                {
+                    inFrame->m_fencPic->m_cuOffsetC = m_sps.cuOffsetC;
+                    inFrame->m_fencPic->m_cuOffsetY = m_sps.cuOffsetY;
+                    inFrame->m_fencPic->m_buOffsetC = m_sps.buOffsetC;
+                    inFrame->m_fencPic->m_buOffsetY = m_sps.buOffsetY;
+                }
+                else
+                {
+                    if (!inFrame->m_fencPic->createOffsets(m_sps))
+                    {
+                        m_aborted = true;
+                        x265_log(m_param, X265_LOG_ERROR, "memory allocation failure, aborting encode\n");
+                        inFrame->destroy();
+                        delete inFrame;
+                        return -1;
+                    }
+                    else
+                    {
+                        m_sps.cuOffsetC = inFrame->m_fencPic->m_cuOffsetC;
+                        m_sps.cuOffsetY = inFrame->m_fencPic->m_cuOffsetY;
+                        m_sps.buOffsetC = inFrame->m_fencPic->m_buOffsetC;
+                        m_sps.buOffsetY = inFrame->m_fencPic->m_buOffsetY;
+                    }
+                }
+            }
+            else
+            {
+                m_aborted = true;
+                x265_log(m_param, X265_LOG_ERROR, "memory allocation failure, aborting encode\n");
+                inFrame->destroy();
+                delete inFrame;
+                return -1;
+            }
+        }
+        else
+        {
+            inFrame = m_dpb->m_freeList.popBack();
+            inFrame->m_lowresInit = false;
+        }
+
+        /* Copy input picture into a Frame and PicYuv, send to lookahead */
+        inFrame->m_fencPic->copyFromPicture(*pic_in, *m_param, m_sps.conformanceWindow.rightOffset, m_sps.conformanceWindow.bottomOffset);
+
+        inFrame->m_poc       = ++m_pocLast;
+        inFrame->m_userData  = pic_in->userData;
+        inFrame->m_pts       = pic_in->pts;
+        inFrame->m_forceqp   = pic_in->forceqp;
+        inFrame->m_param     = m_reconfigured ? m_latestParam : m_param;
+        
+        if (pic_in->quantOffsets != NULL)
+        {
+            int cuCount = inFrame->m_lowres.maxBlocksInRow * inFrame->m_lowres.maxBlocksInCol;
+            memcpy(inFrame->m_quantOffsets, pic_in->quantOffsets, cuCount * sizeof(float));
+        }
+
+        if (m_pocLast == 0)
+            m_firstPts = inFrame->m_pts;
+        if (m_bframeDelay && m_pocLast == m_bframeDelay)
+            m_bframeDelayTime = inFrame->m_pts - m_firstPts;
+
+        /* Encoder holds a reference count until stats collection is finished */
+        ATOMIC_INC(&inFrame->m_countRefEncoders);
+
+        if ((m_param->rc.aqMode || m_param->bEnableWeightedPred || m_param->bEnableWeightedBiPred) &&
+            (m_param->rc.cuTree && m_param->rc.bStatRead))
+        {
+            if (!m_rateControl->cuTreeReadFor2Pass(inFrame))
+            {
+                m_aborted = 1;
+                return -1;
+            }
+        }
+
+        /* Use the frame types from the first pass, if available */
+        int sliceType = (m_param->rc.bStatRead) ? m_rateControl->rateControlSliceType(inFrame->m_poc) : pic_in->sliceType;
+
+        /* In analysisSave mode, x265_analysis_data is allocated in pic_in and inFrame points to this */
+        /* Load analysis data before lookahead->addPicture, since sliceType has been decided */
+        if (m_param->analysisMode == X265_ANALYSIS_LOAD)
+        {
+            x265_picture* inputPic = const_cast<x265_picture*>(pic_in);
+            /* readAnalysisFile reads analysis data for the frame and allocates memory based on slicetype */
+            readAnalysisFile(&inputPic->analysisData, inFrame->m_poc);
+            inFrame->m_analysisData.poc = inFrame->m_poc;
+            inFrame->m_analysisData.sliceType = inputPic->analysisData.sliceType;
+            inFrame->m_analysisData.numCUsInFrame = inputPic->analysisData.numCUsInFrame;
+            inFrame->m_analysisData.numPartitions = inputPic->analysisData.numPartitions;
+            inFrame->m_analysisData.interData = inputPic->analysisData.interData;
+            inFrame->m_analysisData.intraData = inputPic->analysisData.intraData;
+            sliceType = inputPic->analysisData.sliceType;
+        }
+
+        m_lookahead->addPicture(*inFrame, sliceType);
+        m_numDelayedPic++;
+    }
+    else
+        m_lookahead->flush();
+
+    FrameEncoder *curEncoder = m_frameEncoder[m_curEncoder];
+    m_curEncoder = (m_curEncoder + 1) % m_param->frameNumThreads;
+    int ret = 0;
+
+    /* Normal operation is to wait for the current frame encoder to complete its current frame
+     * and then to give it a new frame to work on.  In zero-latency mode, we must encode this
+     * input picture before returning so the order must be reversed. This do/while() loop allows
+     * us to alternate the order of the calls without ugly code replication */
+    Frame* outFrame = NULL;
+    Frame* frameEnc = NULL;
+    int pass = 0;
+    do
+    {
+        /* getEncodedPicture() should block until the FrameEncoder has completed
+         * encoding the frame.  This is how back-pressure through the API is
+         * accomplished when the encoder is full */
+        if (!m_bZeroLatency || pass)
+            outFrame = curEncoder->getEncodedPicture(m_nalList);
+        if (outFrame)
+        {
+            Slice *slice = outFrame->m_encData->m_slice;
+            x265_frame_stats* frameData = NULL;
+
+            /* Free up pic_in->analysisData since it has already been used */
+            if (m_param->analysisMode == X265_ANALYSIS_LOAD)
+                freeAnalysis(&outFrame->m_analysisData);
+
+            if (pic_out)
+            {
+                PicYuv *recpic = outFrame->m_reconPic;
+                pic_out->poc = slice->m_poc;
+                pic_out->bitDepth = X265_DEPTH;
+                pic_out->userData = outFrame->m_userData;
+                pic_out->colorSpace = m_param->internalCsp;
+                frameData = &(pic_out->frameData);
+
+                pic_out->pts = outFrame->m_pts;
+                pic_out->dts = outFrame->m_dts;
+
+                switch (slice->m_sliceType)
+                {
+                case I_SLICE:
+                    pic_out->sliceType = outFrame->m_lowres.bKeyframe ? X265_TYPE_IDR : X265_TYPE_I;
+                    break;
+                case P_SLICE:
+                    pic_out->sliceType = X265_TYPE_P;
+                    break;
+                case B_SLICE:
+                    pic_out->sliceType = X265_TYPE_B;
+                    break;
+                }
+
+                pic_out->planes[0] = recpic->m_picOrg[0];
+                pic_out->stride[0] = (int)(recpic->m_stride * sizeof(pixel));
+                pic_out->planes[1] = recpic->m_picOrg[1];
+                pic_out->stride[1] = (int)(recpic->m_strideC * sizeof(pixel));
+                pic_out->planes[2] = recpic->m_picOrg[2];
+                pic_out->stride[2] = (int)(recpic->m_strideC * sizeof(pixel));
+
+                /* Dump analysis data from pic_out to file in save mode and free */
+                if (m_param->analysisMode == X265_ANALYSIS_SAVE)
+                {
+                    pic_out->analysisData.poc = pic_out->poc;
+                    pic_out->analysisData.sliceType = pic_out->sliceType;
+                    pic_out->analysisData.numCUsInFrame = outFrame->m_analysisData.numCUsInFrame;
+                    pic_out->analysisData.numPartitions = outFrame->m_analysisData.numPartitions;
+                    pic_out->analysisData.interData = outFrame->m_analysisData.interData;
+                    pic_out->analysisData.intraData = outFrame->m_analysisData.intraData;
+                    writeAnalysisFile(&pic_out->analysisData);
+                    freeAnalysis(&pic_out->analysisData);
+                }
+            }
+            if (slice->m_sliceType == P_SLICE)
+            {
+                if (slice->m_weightPredTable[0][0][0].bPresentFlag)
+                    m_numLumaWPFrames++;
+                if (slice->m_weightPredTable[0][0][1].bPresentFlag ||
+                    slice->m_weightPredTable[0][0][2].bPresentFlag)
+                    m_numChromaWPFrames++;
+            }
+            else if (slice->m_sliceType == B_SLICE)
+            {
+                bool bLuma = false, bChroma = false;
+                for (int l = 0; l < 2; l++)
+                {
+                    if (slice->m_weightPredTable[l][0][0].bPresentFlag)
+                        bLuma = true;
+                    if (slice->m_weightPredTable[l][0][1].bPresentFlag ||
+                        slice->m_weightPredTable[l][0][2].bPresentFlag)
+                        bChroma = true;
+                }
+
+                if (bLuma)
+                    m_numLumaWPBiFrames++;
+                if (bChroma)
+                    m_numChromaWPBiFrames++;
+            }
+
+            if (m_aborted)
+                return -1;
+
+            finishFrameStats(outFrame, curEncoder, curEncoder->m_accessUnitBits, frameData);
+
+            /* Write RateControl Frame level stats in multipass encodes */
+            if (m_param->rc.bStatWrite)
+                if (m_rateControl->writeRateControlFrameStats(outFrame, &curEncoder->m_rce))
+                    m_aborted = true;
+
+            /* Allow this frame to be recycled if no frame encoders are using it for reference */
+            if (!pic_out)
+            {
+                ATOMIC_DEC(&outFrame->m_countRefEncoders);
+                m_dpb->recycleUnreferenced();
+            }
+            else
+                m_exportedPic = outFrame;
+
+            m_numDelayedPic--;
+
+            ret = 1;
+        }
+
+        /* pop a single frame from decided list, then provide to frame encoder
+         * curEncoder is guaranteed to be idle at this point */
+        if (!pass)
+            frameEnc = m_lookahead->getDecidedPicture();
+        if (frameEnc && !pass)
+        {
+            /* give this frame a FrameData instance before encoding */
+            if (m_dpb->m_frameDataFreeList)
+            {
+                frameEnc->m_encData = m_dpb->m_frameDataFreeList;
+                m_dpb->m_frameDataFreeList = m_dpb->m_frameDataFreeList->m_freeListNext;
+                frameEnc->reinit(m_sps);
+            }
+            else
+            {
+                frameEnc->allocEncodeData(m_param, m_sps);
+                Slice* slice = frameEnc->m_encData->m_slice;
+                slice->m_sps = &m_sps;
+                slice->m_pps = &m_pps;
+                slice->m_maxNumMergeCand = m_param->maxNumMergeCand;
+                slice->m_endCUAddr = slice->realEndAddress(m_sps.numCUsInFrame * NUM_4x4_PARTITIONS);
+            }
+
+            curEncoder->m_rce.encodeOrder = m_encodedFrameNum++;
+            if (m_bframeDelay)
+            {
+                int64_t *prevReorderedPts = m_prevReorderedPts;
+                frameEnc->m_dts = m_encodedFrameNum > m_bframeDelay
+                    ? prevReorderedPts[(m_encodedFrameNum - m_bframeDelay) % m_bframeDelay]
+                    : frameEnc->m_reorderedPts - m_bframeDelayTime;
+                prevReorderedPts[m_encodedFrameNum % m_bframeDelay] = frameEnc->m_reorderedPts;
+            }
+            else
+                frameEnc->m_dts = frameEnc->m_reorderedPts;
+
+            /* Allocate analysis data before encode in save mode. This is allocated in frameEnc */
+            if (m_param->analysisMode == X265_ANALYSIS_SAVE)
+            {
+                x265_analysis_data* analysis = &frameEnc->m_analysisData;
+                analysis->poc = frameEnc->m_poc;
+                analysis->sliceType = frameEnc->m_lowres.sliceType;
+                uint32_t widthInCU       = (m_param->sourceWidth  + g_maxCUSize - 1) >> g_maxLog2CUSize;
+                uint32_t heightInCU      = (m_param->sourceHeight + g_maxCUSize - 1) >> g_maxLog2CUSize;
+
+                uint32_t numCUsInFrame   = widthInCU * heightInCU;
+                analysis->numCUsInFrame  = numCUsInFrame;
+                analysis->numPartitions  = NUM_4x4_PARTITIONS;
+                allocAnalysis(analysis);
+            }
+
+            /* determine references, setup RPS, etc */
+            m_dpb->prepareEncode(frameEnc);
+
+            if (m_param->rc.rateControlMode != X265_RC_CQP)
+                m_lookahead->getEstimatedPictureCost(frameEnc);
+
+            /* Allow FrameEncoder::compressFrame() to start in the frame encoder thread */
+            if (!curEncoder->startCompressFrame(frameEnc))
+                m_aborted = true;
+        }
+        else if (m_encodedFrameNum)
+            m_rateControl->setFinalFrameCount(m_encodedFrameNum); 
+    }
+    while (m_bZeroLatency && ++pass < 2);
+
+    return ret;
+}
+
+int Encoder::reconfigureParam(x265_param* encParam, x265_param* param)
+{
+    encParam->maxNumReferences = param->maxNumReferences; // never uses more refs than specified in stream headers
+    encParam->bEnableLoopFilter = param->bEnableLoopFilter;
+    encParam->deblockingFilterTCOffset = param->deblockingFilterTCOffset;
+    encParam->deblockingFilterBetaOffset = param->deblockingFilterBetaOffset; 
+    encParam->bEnableFastIntra = param->bEnableFastIntra;
+    encParam->bEnableEarlySkip = param->bEnableEarlySkip;
+    encParam->bEnableTemporalMvp = param->bEnableTemporalMvp;
+    /* Scratch buffer prevents me_range from being increased for esa/tesa
+    if (param->searchMethod < X265_FULL_SEARCH || param->searchMethod < encParam->searchRange)
+        encParam->searchRange = param->searchRange; */
+    encParam->noiseReductionInter = param->noiseReductionInter;
+    encParam->noiseReductionIntra = param->noiseReductionIntra;
+    /* We can't switch out of subme=0 during encoding. */
+    if (encParam->subpelRefine)
+        encParam->subpelRefine = param->subpelRefine;
+    encParam->rdoqLevel = param->rdoqLevel;
+    encParam->rdLevel = param->rdLevel;
+    encParam->bEnableTSkipFast = param->bEnableTSkipFast;
+    encParam->psyRd = param->psyRd;
+    encParam->psyRdoq = param->psyRdoq;
+    encParam->bEnableSignHiding = param->bEnableSignHiding;
+    encParam->bEnableFastIntra = param->bEnableFastIntra;
+    encParam->maxTUSize = param->maxTUSize;
+    return x265_check_params(encParam);
+}
+
+void EncStats::addPsnr(double psnrY, double psnrU, double psnrV)
+{
+    m_psnrSumY += psnrY;
+    m_psnrSumU += psnrU;
+    m_psnrSumV += psnrV;
+}
+
+void EncStats::addBits(uint64_t bits)
+{
+    m_accBits += bits;
+    m_numPics++;
+}
+
+void EncStats::addSsim(double ssim)
+{
+    m_globalSsim += ssim;
+}
+
+void EncStats::addQP(double aveQp)
+{
+    m_totalQp += aveQp;
+}
+
+char* Encoder::statsString(EncStats& stat, char* buffer)
+{
+    double fps = (double)m_param->fpsNum / m_param->fpsDenom;
+    double scale = fps / 1000 / (double)stat.m_numPics;
+
+    int len = sprintf(buffer, "%6u, ", stat.m_numPics);
+
+    len += sprintf(buffer + len, "Avg QP:%2.2lf", stat.m_totalQp / (double)stat.m_numPics);
+    len += sprintf(buffer + len, "  kb/s: %-8.2lf", stat.m_accBits * scale);
+    if (m_param->bEnablePsnr)
+    {
+        len += sprintf(buffer + len, "  PSNR Mean: Y:%.3lf U:%.3lf V:%.3lf",
+                       stat.m_psnrSumY / (double)stat.m_numPics,
+                       stat.m_psnrSumU / (double)stat.m_numPics,
+                       stat.m_psnrSumV / (double)stat.m_numPics);
+    }
+    if (m_param->bEnableSsim)
+    {
+        sprintf(buffer + len, "  SSIM Mean: %.6lf (%.3lfdB)",
+                stat.m_globalSsim / (double)stat.m_numPics,
+                x265_ssim2dB(stat.m_globalSsim / (double)stat.m_numPics));
+    }
+    return buffer;
+}
+
+void Encoder::printSummary()
+{
+    if (m_param->logLevel < X265_LOG_INFO)
+        return;
+
+    char buffer[200];
+    if (m_analyzeI.m_numPics)
+        x265_log(m_param, X265_LOG_INFO, "frame I: %s\n", statsString(m_analyzeI, buffer));
+    if (m_analyzeP.m_numPics)
+        x265_log(m_param, X265_LOG_INFO, "frame P: %s\n", statsString(m_analyzeP, buffer));
+    if (m_analyzeB.m_numPics)
+        x265_log(m_param, X265_LOG_INFO, "frame B: %s\n", statsString(m_analyzeB, buffer));
+    if (m_param->bEnableWeightedPred && m_analyzeP.m_numPics)
+    {
+        x265_log(m_param, X265_LOG_INFO, "Weighted P-Frames: Y:%.1f%% UV:%.1f%%\n",
+            (float)100.0 * m_numLumaWPFrames / m_analyzeP.m_numPics,
+            (float)100.0 * m_numChromaWPFrames / m_analyzeP.m_numPics);
+    }
+    if (m_param->bEnableWeightedBiPred && m_analyzeB.m_numPics)
+    {
+        x265_log(m_param, X265_LOG_INFO, "Weighted B-Frames: Y:%.1f%% UV:%.1f%%\n",
+            (float)100.0 * m_numLumaWPBiFrames / m_analyzeB.m_numPics,
+            (float)100.0 * m_numChromaWPBiFrames / m_analyzeB.m_numPics);
+    }
+    int pWithB = 0;
+    for (int i = 0; i <= m_param->bframes; i++)
+        pWithB += m_lookahead->m_histogram[i];
+
+    if (pWithB)
+    {
+        int p = 0;
+        for (int i = 0; i <= m_param->bframes; i++)
+            p += sprintf(buffer + p, "%.1f%% ", 100. * m_lookahead->m_histogram[i] / pWithB);
+
+        x265_log(m_param, X265_LOG_INFO, "consecutive B-frames: %s\n", buffer);
+    }
+    if (m_param->bLossless)
+    {
+        float frameSize = (float)(m_param->sourceWidth - m_sps.conformanceWindow.rightOffset) *
+                                 (m_param->sourceHeight - m_sps.conformanceWindow.bottomOffset);
+        float uncompressed = frameSize * X265_DEPTH * m_analyzeAll.m_numPics;
+
+        x265_log(m_param, X265_LOG_INFO, "lossless compression ratio %.2f::1\n", uncompressed / m_analyzeAll.m_accBits);
+    }
+
+    if (m_analyzeAll.m_numPics)
+    {
+        int p = 0;
+        double elapsedEncodeTime = (double)(x265_mdate() - m_encodeStartTime) / 1000000;
+        double elapsedVideoTime = (double)m_analyzeAll.m_numPics * m_param->fpsDenom / m_param->fpsNum;
+        double bitrate = (0.001f * m_analyzeAll.m_accBits) / elapsedVideoTime;
+
+        p += sprintf(buffer + p, "\nencoded %d frames in %.2fs (%.2f fps), %.2f kb/s, Avg QP:%2.2lf", m_analyzeAll.m_numPics,
+                     elapsedEncodeTime, m_analyzeAll.m_numPics / elapsedEncodeTime, bitrate, m_analyzeAll.m_totalQp / (double)m_analyzeAll.m_numPics);
+
+        if (m_param->bEnablePsnr)
+        {
+            double globalPsnr = (m_analyzeAll.m_psnrSumY * 6 + m_analyzeAll.m_psnrSumU + m_analyzeAll.m_psnrSumV) / (8 * m_analyzeAll.m_numPics);
+            p += sprintf(buffer + p, ", Global PSNR: %.3f", globalPsnr);
+        }
+
+        if (m_param->bEnableSsim)
+            p += sprintf(buffer + p, ", SSIM Mean Y: %.7f (%6.3f dB)", m_analyzeAll.m_globalSsim / m_analyzeAll.m_numPics, x265_ssim2dB(m_analyzeAll.m_globalSsim / m_analyzeAll.m_numPics));
+
+        sprintf(buffer + p, "\n");
+        general_log(m_param, NULL, X265_LOG_INFO, buffer);
+    }
+    else
+        general_log(m_param, NULL, X265_LOG_INFO, "\nencoded 0 frames\n");
+
+#if DETAILED_CU_STATS
+    /* Summarize stats from all frame encoders */
+    CUStats cuStats;
+    for (int i = 0; i < m_param->frameNumThreads; i++)
+        cuStats.accumulate(m_frameEncoder[i]->m_cuStats);
+
+    if (!cuStats.totalCTUTime)
+        return;
+
+    int totalWorkerCount = 0;
+    for (int i = 0; i < m_numPools; i++)
+        totalWorkerCount += m_threadPool[i].m_numWorkers;
+
+    int64_t  batchElapsedTime, coopSliceElapsedTime;
+    uint64_t batchCount, coopSliceCount;
+    m_lookahead->getWorkerStats(batchElapsedTime, batchCount, coopSliceElapsedTime, coopSliceCount);
+    int64_t lookaheadWorkerTime = m_lookahead->m_slicetypeDecideElapsedTime + m_lookahead->m_preLookaheadElapsedTime +
+                                  batchElapsedTime + coopSliceElapsedTime;
+
+    int64_t totalWorkerTime = cuStats.totalCTUTime + cuStats.loopFilterElapsedTime + cuStats.pmodeTime +
+                              cuStats.pmeTime + lookaheadWorkerTime + cuStats.weightAnalyzeTime;
+    int64_t elapsedEncodeTime = x265_mdate() - m_encodeStartTime;
+
+    int64_t interRDOTotalTime = 0, intraRDOTotalTime = 0;
+    uint64_t interRDOTotalCount = 0, intraRDOTotalCount = 0;
+    for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+    {
+        interRDOTotalTime += cuStats.interRDOElapsedTime[i];
+        intraRDOTotalTime += cuStats.intraRDOElapsedTime[i];
+        interRDOTotalCount += cuStats.countInterRDO[i];
+        intraRDOTotalCount += cuStats.countIntraRDO[i];
+    }
+
+    /* Time within compressCTU() and pmode tasks not captured by ME, Intra mode selection, or RDO (2Nx2N merge, 2Nx2N bidir, etc) */
+    int64_t unaccounted = (cuStats.totalCTUTime + cuStats.pmodeTime) -
+                          (cuStats.intraAnalysisElapsedTime + cuStats.motionEstimationElapsedTime + interRDOTotalTime + intraRDOTotalTime);
+
+#define ELAPSED_SEC(val)  ((double)(val) / 1000000)
+#define ELAPSED_MSEC(val) ((double)(val) / 1000)
+
+    if (m_param->bDistributeMotionEstimation && cuStats.countPMEMasters)
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in motion estimation, averaging %.3lf CU inter modes per CTU\n",
+                 100.0 * (cuStats.motionEstimationElapsedTime + cuStats.pmeTime) / totalWorkerTime,
+                 (double)cuStats.countMotionEstimate / cuStats.totalCTUs);
+        x265_log(m_param, X265_LOG_INFO, "CU: %.3lf PME masters per inter CU, each blocked an average of %.3lf ns\n",
+                 (double)cuStats.countPMEMasters / cuStats.countMotionEstimate,
+                 (double)cuStats.pmeBlockTime / cuStats.countPMEMasters);
+        x265_log(m_param, X265_LOG_INFO, "CU:       %.3lf slaves per PME master, each took an average of %.3lf ms\n",
+                 (double)cuStats.countPMETasks / cuStats.countPMEMasters,
+                 ELAPSED_MSEC(cuStats.pmeTime) / cuStats.countPMETasks);
+    }
+    else
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in motion estimation, averaging %.3lf CU inter modes per CTU\n",
+                 100.0 * cuStats.motionEstimationElapsedTime / totalWorkerTime,
+                 (double)cuStats.countMotionEstimate / cuStats.totalCTUs);
+
+        if (cuStats.skippedMotionReferences[0] || cuStats.skippedMotionReferences[1] || cuStats.skippedMotionReferences[2])
+            x265_log(m_param, X265_LOG_INFO, "CU: Skipped motion searches per depth %%%.2lf %%%.2lf %%%.2lf %%%.2lf\n",
+                     100.0 * cuStats.skippedMotionReferences[0] / cuStats.totalMotionReferences[0],
+                     100.0 * cuStats.skippedMotionReferences[1] / cuStats.totalMotionReferences[1],
+                     100.0 * cuStats.skippedMotionReferences[2] / cuStats.totalMotionReferences[2],
+                     100.0 * cuStats.skippedMotionReferences[3] / cuStats.totalMotionReferences[3]);
+    }
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in intra analysis, averaging %.3lf Intra PUs per CTU\n",
+             100.0 * cuStats.intraAnalysisElapsedTime / totalWorkerTime,
+             (double)cuStats.countIntraAnalysis / cuStats.totalCTUs);
+    if (cuStats.skippedIntraCU[0] || cuStats.skippedIntraCU[1] || cuStats.skippedIntraCU[2])
+        x265_log(m_param, X265_LOG_INFO, "CU: Skipped intra CUs at depth %%%.2lf %%%.2lf %%%.2lf\n",
+                 100.0 * cuStats.skippedIntraCU[0] / cuStats.totalIntraCU[0],
+                 100.0 * cuStats.skippedIntraCU[1] / cuStats.totalIntraCU[1],
+                 100.0 * cuStats.skippedIntraCU[2] / cuStats.totalIntraCU[2]);
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in inter RDO, measuring %.3lf inter/merge predictions per CTU\n",
+             100.0 * interRDOTotalTime / totalWorkerTime,
+             (double)interRDOTotalCount / cuStats.totalCTUs);
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in intra RDO, measuring %.3lf intra predictions per CTU\n",
+             100.0 * intraRDOTotalTime / totalWorkerTime,
+             (double)intraRDOTotalCount / cuStats.totalCTUs);
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in loop filters, average %.3lf ms per call\n",
+             100.0 * cuStats.loopFilterElapsedTime / totalWorkerTime,
+             ELAPSED_MSEC(cuStats.loopFilterElapsedTime) / cuStats.countLoopFilter);
+    if (cuStats.countWeightAnalyze && cuStats.weightAnalyzeTime)
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in weight analysis, average %.3lf ms per call\n",
+                 100.0 * cuStats.weightAnalyzeTime / totalWorkerTime,
+                 ELAPSED_MSEC(cuStats.weightAnalyzeTime) / cuStats.countWeightAnalyze);
+    }
+    if (m_param->bDistributeModeAnalysis && cuStats.countPModeMasters)
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: %.3lf PMODE masters per CTU, each blocked an average of %.3lf ns\n",
+                 (double)cuStats.countPModeMasters / cuStats.totalCTUs,
+                 (double)cuStats.pmodeBlockTime / cuStats.countPModeMasters);
+        x265_log(m_param, X265_LOG_INFO, "CU:       %.3lf slaves per PMODE master, each took average of %.3lf ms\n",
+                 (double)cuStats.countPModeTasks / cuStats.countPModeMasters, 
+                 ELAPSED_MSEC(cuStats.pmodeTime) / cuStats.countPModeTasks);
+    }
+
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in slicetypeDecide (avg %.3lfms) and prelookahead (avg %.3lfms)\n",
+             100.0 * lookaheadWorkerTime / totalWorkerTime,
+             ELAPSED_MSEC(m_lookahead->m_slicetypeDecideElapsedTime) / m_lookahead->m_countSlicetypeDecide,
+             ELAPSED_MSEC(m_lookahead->m_preLookaheadElapsedTime) / m_lookahead->m_countPreLookahead);
+
+    x265_log(m_param, X265_LOG_INFO, "CU: %%%05.2lf time spent in other tasks\n",
+             100.0 * unaccounted / totalWorkerTime);
+
+    if (intraRDOTotalTime && intraRDOTotalCount)
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: Intra RDO time  per depth %%%05.2lf %%%05.2lf %%%05.2lf %%%05.2lf\n",
+                 100.0 * cuStats.intraRDOElapsedTime[0] / intraRDOTotalTime,  // 64
+                 100.0 * cuStats.intraRDOElapsedTime[1] / intraRDOTotalTime,  // 32
+                 100.0 * cuStats.intraRDOElapsedTime[2] / intraRDOTotalTime,  // 16
+                 100.0 * cuStats.intraRDOElapsedTime[3] / intraRDOTotalTime); // 8
+        x265_log(m_param, X265_LOG_INFO, "CU: Intra RDO calls per depth %%%05.2lf %%%05.2lf %%%05.2lf %%%05.2lf\n",
+                 100.0 * cuStats.countIntraRDO[0] / intraRDOTotalCount,  // 64
+                 100.0 * cuStats.countIntraRDO[1] / intraRDOTotalCount,  // 32
+                 100.0 * cuStats.countIntraRDO[2] / intraRDOTotalCount,  // 16
+                 100.0 * cuStats.countIntraRDO[3] / intraRDOTotalCount); // 8
+    }
+
+    if (interRDOTotalTime && interRDOTotalCount)
+    {
+        x265_log(m_param, X265_LOG_INFO, "CU: Inter RDO time  per depth %%%05.2lf %%%05.2lf %%%05.2lf %%%05.2lf\n",
+                 100.0 * cuStats.interRDOElapsedTime[0] / interRDOTotalTime,  // 64
+                 100.0 * cuStats.interRDOElapsedTime[1] / interRDOTotalTime,  // 32
+                 100.0 * cuStats.interRDOElapsedTime[2] / interRDOTotalTime,  // 16
+                 100.0 * cuStats.interRDOElapsedTime[3] / interRDOTotalTime); // 8
+        x265_log(m_param, X265_LOG_INFO, "CU: Inter RDO calls per depth %%%05.2lf %%%05.2lf %%%05.2lf %%%05.2lf\n",
+                 100.0 * cuStats.countInterRDO[0] / interRDOTotalCount,  // 64
+                 100.0 * cuStats.countInterRDO[1] / interRDOTotalCount,  // 32
+                 100.0 * cuStats.countInterRDO[2] / interRDOTotalCount,  // 16
+                 100.0 * cuStats.countInterRDO[3] / interRDOTotalCount); // 8
+    }
+
+    x265_log(m_param, X265_LOG_INFO, "CU: " X265_LL " %dX%d CTUs compressed in %.3lf seconds, %.3lf CTUs per worker-second\n",
+             cuStats.totalCTUs, g_maxCUSize, g_maxCUSize,
+             ELAPSED_SEC(totalWorkerTime),
+             cuStats.totalCTUs / ELAPSED_SEC(totalWorkerTime));
+
+    if (m_threadPool)
+        x265_log(m_param, X265_LOG_INFO, "CU: %.3lf average worker utilization, %%%05.2lf of theoretical maximum utilization\n",
+                 (double)totalWorkerTime / elapsedEncodeTime,
+                 100.0 * totalWorkerTime / (elapsedEncodeTime * totalWorkerCount));
+
+#undef ELAPSED_SEC
+#undef ELAPSED_MSEC
+#endif
+}
+
+void Encoder::fetchStats(x265_stats *stats, size_t statsSizeBytes)
+{
+    if (statsSizeBytes >= sizeof(stats))
+    {
+        stats->globalPsnrY = m_analyzeAll.m_psnrSumY;
+        stats->globalPsnrU = m_analyzeAll.m_psnrSumU;
+        stats->globalPsnrV = m_analyzeAll.m_psnrSumV;
+        stats->encodedPictureCount = m_analyzeAll.m_numPics;
+        stats->totalWPFrames = m_numLumaWPFrames;
+        stats->accBits = m_analyzeAll.m_accBits;
+        stats->elapsedEncodeTime = (double)(x265_mdate() - m_encodeStartTime) / 1000000;
+        if (stats->encodedPictureCount > 0)
+        {
+            stats->globalSsim = m_analyzeAll.m_globalSsim / stats->encodedPictureCount;
+            stats->globalPsnr = (stats->globalPsnrY * 6 + stats->globalPsnrU + stats->globalPsnrV) / (8 * stats->encodedPictureCount);
+            stats->elapsedVideoTime = (double)stats->encodedPictureCount * m_param->fpsDenom / m_param->fpsNum;
+            stats->bitrate = (0.001f * stats->accBits) / stats->elapsedVideoTime;
+        }
+        else
+        {
+            stats->globalSsim = 0;
+            stats->globalPsnr = 0;
+            stats->bitrate = 0;
+            stats->elapsedVideoTime = 0;
+        }
+
+        double fps = (double)m_param->fpsNum / m_param->fpsDenom;
+        double scale = fps / 1000;
+
+        stats->statsI.numPics = m_analyzeI.m_numPics;
+        stats->statsI.avgQp   = m_analyzeI.m_totalQp / (double)m_analyzeI.m_numPics;
+        stats->statsI.bitrate = m_analyzeI.m_accBits * scale / (double)m_analyzeI.m_numPics;
+        stats->statsI.psnrY   = m_analyzeI.m_psnrSumY / (double)m_analyzeI.m_numPics;
+        stats->statsI.psnrU   = m_analyzeI.m_psnrSumU / (double)m_analyzeI.m_numPics;
+        stats->statsI.psnrV   = m_analyzeI.m_psnrSumV / (double)m_analyzeI.m_numPics;
+        stats->statsI.ssim    = x265_ssim2dB(m_analyzeI.m_globalSsim / (double)m_analyzeI.m_numPics);
+
+        stats->statsP.numPics = m_analyzeP.m_numPics;
+        stats->statsP.avgQp   = m_analyzeP.m_totalQp / (double)m_analyzeP.m_numPics;
+        stats->statsP.bitrate = m_analyzeP.m_accBits * scale / (double)m_analyzeP.m_numPics;
+        stats->statsP.psnrY   = m_analyzeP.m_psnrSumY / (double)m_analyzeP.m_numPics;
+        stats->statsP.psnrU   = m_analyzeP.m_psnrSumU / (double)m_analyzeP.m_numPics;
+        stats->statsP.psnrV   = m_analyzeP.m_psnrSumV / (double)m_analyzeP.m_numPics;
+        stats->statsP.ssim    = x265_ssim2dB(m_analyzeP.m_globalSsim / (double)m_analyzeP.m_numPics);
+
+        stats->statsB.numPics = m_analyzeB.m_numPics;
+        stats->statsB.avgQp   = m_analyzeB.m_totalQp / (double)m_analyzeB.m_numPics;
+        stats->statsB.bitrate = m_analyzeB.m_accBits * scale / (double)m_analyzeB.m_numPics;
+        stats->statsB.psnrY   = m_analyzeB.m_psnrSumY / (double)m_analyzeB.m_numPics;
+        stats->statsB.psnrU   = m_analyzeB.m_psnrSumU / (double)m_analyzeB.m_numPics;
+        stats->statsB.psnrV   = m_analyzeB.m_psnrSumV / (double)m_analyzeB.m_numPics;
+        stats->statsB.ssim    = x265_ssim2dB(m_analyzeB.m_globalSsim / (double)m_analyzeB.m_numPics);
+
+        stats->maxCLL         = m_analyzeAll.m_maxCLL;
+        stats->maxFALL        = (uint16_t)(m_analyzeAll.m_maxFALL / m_analyzeAll.m_numPics);
+
+        if (m_emitCLLSEI)
+        {
+            m_param->maxCLL = stats->maxCLL;
+            m_param->maxFALL = stats->maxFALL;
+        }
+    }
+
+    /* If new statistics are added to x265_stats, we must check here whether the
+     * structure provided by the user is the new structure or an older one (for
+     * future safety) */
+}
+
+void Encoder::finishFrameStats(Frame* curFrame, FrameEncoder *curEncoder, uint64_t bits, x265_frame_stats* frameStats)
+{
+    PicYuv* reconPic = curFrame->m_reconPic;
+
+    //===== calculate PSNR =====
+    int width  = reconPic->m_picWidth - m_sps.conformanceWindow.rightOffset;
+    int height = reconPic->m_picHeight - m_sps.conformanceWindow.bottomOffset;
+    int size = width * height;
+
+    int maxvalY = 255 << (X265_DEPTH - 8);
+    int maxvalC = 255 << (X265_DEPTH - 8);
+    double refValueY = (double)maxvalY * maxvalY * size;
+    double refValueC = (double)maxvalC * maxvalC * size / 4.0;
+    uint64_t ssdY, ssdU, ssdV;
+
+    ssdY = curEncoder->m_SSDY;
+    ssdU = curEncoder->m_SSDU;
+    ssdV = curEncoder->m_SSDV;
+    double psnrY = (ssdY ? 10.0 * log10(refValueY / (double)ssdY) : 99.99);
+    double psnrU = (ssdU ? 10.0 * log10(refValueC / (double)ssdU) : 99.99);
+    double psnrV = (ssdV ? 10.0 * log10(refValueC / (double)ssdV) : 99.99);
+
+    FrameData& curEncData = *curFrame->m_encData;
+    Slice* slice = curEncData.m_slice;
+
+    //===== add bits, psnr and ssim =====
+    m_analyzeAll.addBits(bits);
+    m_analyzeAll.addQP(curEncData.m_avgQpAq);
+
+    if (m_param->bEnablePsnr)
+        m_analyzeAll.addPsnr(psnrY, psnrU, psnrV);
+
+    double ssim = 0.0;
+    if (m_param->bEnableSsim && curEncoder->m_ssimCnt)
+    {
+        ssim = curEncoder->m_ssim / curEncoder->m_ssimCnt;
+        m_analyzeAll.addSsim(ssim);
+    }
+    if (slice->isIntra())
+    {
+        m_analyzeI.addBits(bits);
+        m_analyzeI.addQP(curEncData.m_avgQpAq);
+        if (m_param->bEnablePsnr)
+            m_analyzeI.addPsnr(psnrY, psnrU, psnrV);
+        if (m_param->bEnableSsim)
+            m_analyzeI.addSsim(ssim);
+    }
+    else if (slice->isInterP())
+    {
+        m_analyzeP.addBits(bits);
+        m_analyzeP.addQP(curEncData.m_avgQpAq);
+        if (m_param->bEnablePsnr)
+            m_analyzeP.addPsnr(psnrY, psnrU, psnrV);
+        if (m_param->bEnableSsim)
+            m_analyzeP.addSsim(ssim);
+    }
+    else if (slice->isInterB())
+    {
+        m_analyzeB.addBits(bits);
+        m_analyzeB.addQP(curEncData.m_avgQpAq);
+        if (m_param->bEnablePsnr)
+            m_analyzeB.addPsnr(psnrY, psnrU, psnrV);
+        if (m_param->bEnableSsim)
+            m_analyzeB.addSsim(ssim);
+    }
+
+    m_analyzeAll.m_maxFALL += curFrame->m_fencPic->m_avgLumaLevel;
+    m_analyzeAll.m_maxCLL = X265_MAX(m_analyzeAll.m_maxCLL, curFrame->m_fencPic->m_maxLumaLevel);
+
+    char c = (slice->isIntra() ? 'I' : slice->isInterP() ? 'P' : 'B');
+    int poc = slice->m_poc;
+    if (!IS_REFERENCED(curFrame))
+        c += 32; // lower case if unreferenced
+
+    if (frameStats)
+    {
+        frameStats->encoderOrder = m_outputCount++;
+        frameStats->sliceType = c;
+        frameStats->poc = poc;
+        frameStats->qp = curEncData.m_avgQpAq;
+        frameStats->bits = bits;
+        frameStats->bScenecut = curFrame->m_lowres.bScenecut;
+        if (m_param->rc.rateControlMode == X265_RC_CRF)
+            frameStats->rateFactor = curEncData.m_rateFactor;
+        frameStats->psnrY = psnrY;
+        frameStats->psnrU = psnrU;
+        frameStats->psnrV = psnrV;
+        double psnr = (psnrY * 6 + psnrU + psnrV) / 8;
+        frameStats->psnr = psnr;
+        frameStats->ssim = ssim;
+        if (!slice->isIntra())
+        {
+            for (int ref = 0; ref < 16; ref++)
+                frameStats->list0POC[ref] = ref < slice->m_numRefIdx[0] ? slice->m_refPOCList[0][ref] - slice->m_lastIDR : -1;
+
+            if (!slice->isInterP())
+            {
+                for (int ref = 0; ref < 16; ref++)
+                    frameStats->list1POC[ref] = ref < slice->m_numRefIdx[1] ? slice->m_refPOCList[1][ref] - slice->m_lastIDR : -1;
+            }
+        }
+
+#define ELAPSED_MSEC(start, end) (((double)(end) - (start)) / 1000)
+
+        frameStats->decideWaitTime = ELAPSED_MSEC(0, curEncoder->m_slicetypeWaitTime);
+        frameStats->row0WaitTime = ELAPSED_MSEC(curEncoder->m_startCompressTime, curEncoder->m_row0WaitTime);
+        frameStats->wallTime = ELAPSED_MSEC(curEncoder->m_row0WaitTime, curEncoder->m_endCompressTime);
+        frameStats->refWaitWallTime = ELAPSED_MSEC(curEncoder->m_row0WaitTime, curEncoder->m_allRowsAvailableTime);
+        frameStats->totalCTUTime = ELAPSED_MSEC(0, curEncoder->m_totalWorkerElapsedTime);
+        frameStats->stallTime = ELAPSED_MSEC(0, curEncoder->m_totalNoWorkerTime);
+        if (curEncoder->m_totalActiveWorkerCount)
+            frameStats->avgWPP = (double)curEncoder->m_totalActiveWorkerCount / curEncoder->m_activeWorkerCountSamples;
+        else
+            frameStats->avgWPP = 1;
+        frameStats->countRowBlocks = curEncoder->m_countRowBlocks;
+
+        frameStats->cuStats.percentIntraNxN = curFrame->m_encData->m_frameStats.percentIntraNxN;
+        frameStats->avgChromaDistortion     = curFrame->m_encData->m_frameStats.avgChromaDistortion;
+        frameStats->avgLumaDistortion       = curFrame->m_encData->m_frameStats.avgLumaDistortion;
+        frameStats->avgPsyEnergy            = curFrame->m_encData->m_frameStats.avgPsyEnergy;
+        frameStats->avgResEnergy            = curFrame->m_encData->m_frameStats.avgResEnergy;
+        frameStats->avgLumaLevel            = curFrame->m_fencPic->m_avgLumaLevel;
+        frameStats->maxLumaLevel            = curFrame->m_fencPic->m_maxLumaLevel;
+        for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        {
+            frameStats->cuStats.percentSkipCu[depth]  = curFrame->m_encData->m_frameStats.percentSkipCu[depth];
+            frameStats->cuStats.percentMergeCu[depth] = curFrame->m_encData->m_frameStats.percentMergeCu[depth];
+            frameStats->cuStats.percentInterDistribution[depth][0] = curFrame->m_encData->m_frameStats.percentInterDistribution[depth][0];
+            frameStats->cuStats.percentInterDistribution[depth][1] = curFrame->m_encData->m_frameStats.percentInterDistribution[depth][1];
+            frameStats->cuStats.percentInterDistribution[depth][2] = curFrame->m_encData->m_frameStats.percentInterDistribution[depth][2];
+            for (int n = 0; n < INTRA_MODES; n++)
+                frameStats->cuStats.percentIntraDistribution[depth][n] = curFrame->m_encData->m_frameStats.percentIntraDistribution[depth][n];
+        }
+    }
+}
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4800) // forcing int to bool
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+void Encoder::getStreamHeaders(NALList& list, Entropy& sbacCoder, Bitstream& bs)
+{
+    sbacCoder.setBitstream(&bs);
+
+    /* headers for start of bitstream */
+    bs.resetBits();
+    sbacCoder.codeVPS(m_vps);
+    bs.writeByteAlignment();
+    list.serialize(NAL_UNIT_VPS, bs);
+
+    bs.resetBits();
+    sbacCoder.codeSPS(m_sps, m_scalingList, m_vps.ptl);
+    bs.writeByteAlignment();
+    list.serialize(NAL_UNIT_SPS, bs);
+
+    bs.resetBits();
+    sbacCoder.codePPS(m_pps);
+    bs.writeByteAlignment();
+    list.serialize(NAL_UNIT_PPS, bs);
+
+    if (m_param->masteringDisplayColorVolume)
+    {
+        SEIMasteringDisplayColorVolume mdsei;
+        if (mdsei.parse(m_param->masteringDisplayColorVolume))
+        {
+            bs.resetBits();
+            mdsei.write(bs, m_sps);
+            bs.writeByteAlignment();
+            list.serialize(NAL_UNIT_PREFIX_SEI, bs);
+        }
+        else
+            x265_log(m_param, X265_LOG_WARNING, "unable to parse mastering display color volume info\n");
+    }
+
+    if (m_emitCLLSEI)
+    {
+        SEIContentLightLevel cllsei;
+        cllsei.max_content_light_level = m_param->maxCLL;
+        cllsei.max_pic_average_light_level = m_param->maxFALL;
+        bs.resetBits();
+        cllsei.write(bs, m_sps);
+        bs.writeByteAlignment();
+        list.serialize(NAL_UNIT_PREFIX_SEI, bs);
+    }
+
+    if (m_param->bEmitInfoSEI)
+    {
+        char *opts = x265_param2string(m_param);
+        if (opts)
+        {
+            char *buffer = X265_MALLOC(char, strlen(opts) + strlen(PFX(version_str)) +
+                                             strlen(PFX(build_info_str)) + 200);
+            if (buffer)
+            {
+                sprintf(buffer, "x265 (build %d) - %s:%s - H.265/HEVC codec - "
+                        "Copyright 2013-2015 (c) Multicoreware Inc - "
+                        "http://x265.org - options: %s",
+                        X265_BUILD, PFX(version_str), PFX(build_info_str), opts);
+                
+                bs.resetBits();
+                SEIuserDataUnregistered idsei;
+                idsei.m_userData = (uint8_t*)buffer;
+                idsei.m_userDataLength = (uint32_t)strlen(buffer);
+                idsei.write(bs, m_sps);
+                bs.writeByteAlignment();
+                list.serialize(NAL_UNIT_PREFIX_SEI, bs);
+
+                X265_FREE(buffer);
+            }
+
+            X265_FREE(opts);
+        }
+    }
+
+    if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
+    {
+        /* Picture Timing and Buffering Period SEI require the SPS to be "activated" */
+        SEIActiveParameterSets sei;
+        sei.m_selfContainedCvsFlag = true;
+        sei.m_noParamSetUpdateFlag = true;
+
+        bs.resetBits();
+        sei.write(bs, m_sps);
+        bs.writeByteAlignment();
+        list.serialize(NAL_UNIT_PREFIX_SEI, bs);
+    }
+}
+
+void Encoder::initVPS(VPS *vps)
+{
+    /* Note that much of the VPS is initialized by determineLevel() */
+    vps->ptl.progressiveSourceFlag = !m_param->interlaceMode;
+    vps->ptl.interlacedSourceFlag = !!m_param->interlaceMode;
+    vps->ptl.nonPackedConstraintFlag = false;
+    vps->ptl.frameOnlyConstraintFlag = !m_param->interlaceMode;
+}
+
+void Encoder::initSPS(SPS *sps)
+{
+    sps->conformanceWindow = m_conformanceWindow;
+    sps->chromaFormatIdc = m_param->internalCsp;
+    sps->picWidthInLumaSamples = m_param->sourceWidth;
+    sps->picHeightInLumaSamples = m_param->sourceHeight;
+    sps->numCuInWidth = (m_param->sourceWidth + g_maxCUSize - 1) / g_maxCUSize;
+    sps->numCuInHeight = (m_param->sourceHeight + g_maxCUSize - 1) / g_maxCUSize;
+    sps->numCUsInFrame = sps->numCuInWidth * sps->numCuInHeight;
+    sps->numPartitions = NUM_4x4_PARTITIONS;
+    sps->numPartInCUSize = 1 << g_unitSizeDepth;
+
+    sps->log2MinCodingBlockSize = g_maxLog2CUSize - g_maxCUDepth;
+    sps->log2DiffMaxMinCodingBlockSize = g_maxCUDepth;
+    uint32_t maxLog2TUSize = (uint32_t)g_log2Size[m_param->maxTUSize];
+    sps->quadtreeTULog2MaxSize = X265_MIN(g_maxLog2CUSize, maxLog2TUSize);
+    sps->quadtreeTULog2MinSize = 2;
+    sps->quadtreeTUMaxDepthInter = m_param->tuQTMaxInterDepth;
+    sps->quadtreeTUMaxDepthIntra = m_param->tuQTMaxIntraDepth;
+
+    sps->bUseSAO = m_param->bEnableSAO;
+
+    sps->bUseAMP = m_param->bEnableAMP;
+    sps->maxAMPDepth = m_param->bEnableAMP ? g_maxCUDepth : 0;
+
+    sps->maxTempSubLayers = m_param->bEnableTemporalSubLayers ? 2 : 1;
+    sps->maxDecPicBuffering = m_vps.maxDecPicBuffering;
+    sps->numReorderPics = m_vps.numReorderPics;
+    sps->maxLatencyIncrease = m_vps.maxLatencyIncrease = m_param->bframes;
+
+    sps->bUseStrongIntraSmoothing = m_param->bEnableStrongIntraSmoothing;
+    sps->bTemporalMVPEnabled = m_param->bEnableTemporalMvp;
+
+    VUI& vui = sps->vuiParameters;
+    vui.aspectRatioInfoPresentFlag = !!m_param->vui.aspectRatioIdc;
+    vui.aspectRatioIdc = m_param->vui.aspectRatioIdc;
+    vui.sarWidth = m_param->vui.sarWidth;
+    vui.sarHeight = m_param->vui.sarHeight;
+
+    vui.overscanInfoPresentFlag = m_param->vui.bEnableOverscanInfoPresentFlag;
+    vui.overscanAppropriateFlag = m_param->vui.bEnableOverscanAppropriateFlag;
+
+    vui.videoSignalTypePresentFlag = m_param->vui.bEnableVideoSignalTypePresentFlag;
+    vui.videoFormat = m_param->vui.videoFormat;
+    vui.videoFullRangeFlag = m_param->vui.bEnableVideoFullRangeFlag;
+
+    vui.colourDescriptionPresentFlag = m_param->vui.bEnableColorDescriptionPresentFlag;
+    vui.colourPrimaries = m_param->vui.colorPrimaries;
+    vui.transferCharacteristics = m_param->vui.transferCharacteristics;
+    vui.matrixCoefficients = m_param->vui.matrixCoeffs;
+
+    vui.chromaLocInfoPresentFlag = m_param->vui.bEnableChromaLocInfoPresentFlag;
+    vui.chromaSampleLocTypeTopField = m_param->vui.chromaSampleLocTypeTopField;
+    vui.chromaSampleLocTypeBottomField = m_param->vui.chromaSampleLocTypeBottomField;
+
+    vui.defaultDisplayWindow.bEnabled = m_param->vui.bEnableDefaultDisplayWindowFlag;
+    vui.defaultDisplayWindow.rightOffset = m_param->vui.defDispWinRightOffset;
+    vui.defaultDisplayWindow.topOffset = m_param->vui.defDispWinTopOffset;
+    vui.defaultDisplayWindow.bottomOffset = m_param->vui.defDispWinBottomOffset;
+    vui.defaultDisplayWindow.leftOffset = m_param->vui.defDispWinLeftOffset;
+
+    vui.frameFieldInfoPresentFlag = !!m_param->interlaceMode;
+    vui.fieldSeqFlag = !!m_param->interlaceMode;
+
+    vui.hrdParametersPresentFlag = m_param->bEmitHRDSEI;
+
+    vui.timingInfo.numUnitsInTick = m_param->fpsDenom;
+    vui.timingInfo.timeScale = m_param->fpsNum;
+}
+
+void Encoder::initPPS(PPS *pps)
+{
+    bool bIsVbv = m_param->rc.vbvBufferSize > 0 && m_param->rc.vbvMaxBitrate > 0;
+
+    if (!m_param->bLossless && (m_param->rc.aqMode || bIsVbv))
+    {
+        pps->bUseDQP = true;
+        pps->maxCuDQPDepth = g_log2Size[m_param->maxCUSize] - g_log2Size[m_param->rc.qgSize];
+        X265_CHECK(pps->maxCuDQPDepth <= 2, "max CU DQP depth cannot be greater than 2\n");
+    }
+    else
+    {
+        pps->bUseDQP = false;
+        pps->maxCuDQPDepth = 0;
+    }
+
+    pps->chromaQpOffset[0] = m_param->cbQpOffset;
+    pps->chromaQpOffset[1] = m_param->crQpOffset;
+
+    pps->bConstrainedIntraPred = m_param->bEnableConstrainedIntra;
+    pps->bUseWeightPred = m_param->bEnableWeightedPred;
+    pps->bUseWeightedBiPred = m_param->bEnableWeightedBiPred;
+    pps->bTransquantBypassEnabled = m_param->bCULossless || m_param->bLossless;
+    pps->bTransformSkipEnabled = m_param->bEnableTransformSkip;
+    pps->bSignHideEnabled = m_param->bEnableSignHiding;
+
+    pps->bDeblockingFilterControlPresent = !m_param->bEnableLoopFilter || m_param->deblockingFilterBetaOffset || m_param->deblockingFilterTCOffset;
+    pps->bPicDisableDeblockingFilter = !m_param->bEnableLoopFilter;
+    pps->deblockingFilterBetaOffsetDiv2 = m_param->deblockingFilterBetaOffset;
+    pps->deblockingFilterTcOffsetDiv2 = m_param->deblockingFilterTCOffset;
+
+    pps->bEntropyCodingSyncEnabled = m_param->bEnableWavefront;
+}
+
+void Encoder::configure(x265_param *p)
+{
+    this->m_param = p;
+
+    if (p->keyframeMax < 0)
+    {
+        /* A negative max GOP size indicates the user wants only one I frame at
+         * the start of the stream. Set an infinite GOP distance and disable
+         * adaptive I frame placement */
+        p->keyframeMax = INT_MAX;
+        p->scenecutThreshold = 0;
+    }
+    else if (p->keyframeMax <= 1)
+    {
+        p->keyframeMax = 1;
+
+        // disable lookahead for all-intra encodes
+        p->bFrameAdaptive = 0;
+        p->bframes = 0;
+        p->bOpenGOP = 0;
+        p->bRepeatHeaders = 1;
+        p->lookaheadDepth = 0;
+        p->bframes = 0;
+        p->scenecutThreshold = 0;
+        p->bFrameAdaptive = 0;
+        p->rc.cuTree = 0;
+        p->bEnableWeightedPred = 0;
+        p->bEnableWeightedBiPred = 0;
+
+        /* SPSs shall have sps_max_dec_pic_buffering_minus1[ sps_max_sub_layers_minus1 ] equal to 0 only */
+        p->maxNumReferences = 1;
+    }
+    if (!p->keyframeMin)
+    {
+        double fps = (double)p->fpsNum / p->fpsDenom;
+        p->keyframeMin = X265_MIN((int)fps, p->keyframeMax / 10);
+    }
+    p->keyframeMin = X265_MAX(1, X265_MIN(p->keyframeMin, p->keyframeMax / 2 + 1));
+
+    if (!p->bframes)
+        p->bBPyramid = 0;
+    if (!p->rdoqLevel)
+        p->psyRdoq = 0;
+
+    /* Disable features which are not supported by the current RD level */
+    if (p->rdLevel < 3)
+    {
+        if (p->bCULossless)             /* impossible */
+            x265_log(p, X265_LOG_WARNING, "--cu-lossless disabled, requires --rdlevel 3 or higher\n");
+        if (p->bEnableTransformSkip)    /* impossible */
+            x265_log(p, X265_LOG_WARNING, "--tskip disabled, requires --rdlevel 3 or higher\n");
+        p->bCULossless = p->bEnableTransformSkip = 0;
+    }
+    if (p->rdLevel < 2)
+    {
+        if (p->bDistributeModeAnalysis) /* not useful */
+            x265_log(p, X265_LOG_WARNING, "--pmode disabled, requires --rdlevel 2 or higher\n");
+        p->bDistributeModeAnalysis = 0;
+
+        p->psyRd = 0;                   /* impossible */
+
+        if (p->bEnableRectInter)        /* broken, not very useful */
+            x265_log(p, X265_LOG_WARNING, "--rect disabled, requires --rdlevel 2 or higher\n");
+        p->bEnableRectInter = 0;
+    }
+
+    if (!p->bEnableRectInter)          /* not useful */
+        p->bEnableAMP = false;
+
+    /* In 444, chroma gets twice as much resolution, so halve quality when psy-rd is enabled */
+    if (p->internalCsp == X265_CSP_I444 && p->psyRd)
+    {
+        p->cbQpOffset += 6;
+        p->crQpOffset += 6;
+    }
+
+    if (p->bLossless)
+    {
+        p->rc.rateControlMode = X265_RC_CQP;
+        p->rc.qp = 4; // An oddity, QP=4 is more lossless than QP=0 and gives better lambdas
+        p->bEnableSsim = 0;
+        p->bEnablePsnr = 0;
+    }
+
+    if (p->rc.rateControlMode == X265_RC_CQP)
+    {
+        p->rc.aqMode = X265_AQ_NONE;
+        p->rc.bitrate = 0;
+        p->rc.cuTree = 0;
+        p->rc.aqStrength = 0;
+    }
+
+    if (p->rc.aqMode == 0 && p->rc.cuTree)
+    {
+        p->rc.aqMode = X265_AQ_VARIANCE;
+        p->rc.aqStrength = 0.0;
+    }
+
+    if (p->lookaheadDepth == 0 && p->rc.cuTree && !p->rc.bStatRead)
+    {
+        x265_log(p, X265_LOG_WARNING, "cuTree disabled, requires lookahead to be enabled\n");
+        p->rc.cuTree = 0;
+    }
+
+    if (p->maxTUSize > p->maxCUSize)
+    {
+        x265_log(p, X265_LOG_WARNING, "Max TU size should be less than or equal to max CU size, setting max TU size = %d\n", p->maxCUSize);
+        p->maxTUSize = p->maxCUSize;
+    }
+
+    if (p->rc.aqStrength == 0 && p->rc.cuTree == 0)
+        p->rc.aqMode = X265_AQ_NONE;
+
+    if (p->rc.aqMode == X265_AQ_NONE && p->rc.cuTree == 0)
+        p->rc.aqStrength = 0;
+
+    if (p->totalFrames && p->totalFrames <= 2 * ((float)p->fpsNum) / p->fpsDenom && p->rc.bStrictCbr)
+        p->lookaheadDepth = p->totalFrames;
+
+    if (p->scalingLists && p->internalCsp == X265_CSP_I444)
+    {
+        x265_log(p, X265_LOG_WARNING, "Scaling lists are not yet supported for 4:4:4 color space\n");
+        p->scalingLists = 0;
+    }
+
+    if (p->interlaceMode)
+        x265_log(p, X265_LOG_WARNING, "Support for interlaced video is experimental\n");
+
+    if (p->rc.rfConstantMin > p->rc.rfConstant)
+    {
+        x265_log(m_param, X265_LOG_WARNING, "CRF min must be less than CRF\n");
+        p->rc.rfConstantMin = 0;
+    }
+
+    if (p->analysisMode && (p->bDistributeModeAnalysis || p->bDistributeMotionEstimation))
+    {
+        x265_log(p, X265_LOG_WARNING, "Analysis load/save options incompatible with pmode/pme, Disabling pmode/pme\n");
+        p->bDistributeMotionEstimation = p->bDistributeModeAnalysis = 0;
+    }
+
+    if (p->bDistributeModeAnalysis && (p->limitReferences >> 1) && 1)
+    {
+        x265_log(p, X265_LOG_WARNING, "Limit reference options 2 and 3 are not supported with pmode. Disabling limit reference\n");
+        p->limitReferences = 0;
+    }
+
+    if (p->bEnableTemporalSubLayers && !p->bframes)
+    {
+        x265_log(p, X265_LOG_WARNING, "B frames not enabled, temporal sublayer disabled\n");
+        p->bEnableTemporalSubLayers = 0;
+    }
+
+    m_bframeDelay = p->bframes ? (p->bBPyramid ? 2 : 1) : 0;
+
+    p->bFrameBias = X265_MIN(X265_MAX(-90, p->bFrameBias), 100);
+
+    if (p->logLevel < X265_LOG_INFO)
+    {
+        /* don't measure these metrics if they will not be reported */
+        p->bEnablePsnr = 0;
+        p->bEnableSsim = 0;
+    }
+    /* Warn users trying to measure PSNR/SSIM with psy opts on. */
+    if (p->bEnablePsnr || p->bEnableSsim)
+    {
+        const char *s = NULL;
+
+        if (p->psyRd || p->psyRdoq)
+        {
+            s = p->bEnablePsnr ? "psnr" : "ssim";
+            x265_log(p, X265_LOG_WARNING, "--%s used with psy on: results will be invalid!\n", s);
+        }
+        else if (!p->rc.aqMode && p->bEnableSsim)
+        {
+            x265_log(p, X265_LOG_WARNING, "--ssim used with AQ off: results will be invalid!\n");
+            s = "ssim";
+        }
+        else if (p->rc.aqStrength > 0 && p->bEnablePsnr)
+        {
+            x265_log(p, X265_LOG_WARNING, "--psnr used with AQ on: results will be invalid!\n");
+            s = "psnr";
+        }
+        if (s)
+            x265_log(p, X265_LOG_WARNING, "--tune %s should be used if attempting to benchmark %s!\n", s, s);
+    }
+
+    /* some options make no sense if others are disabled */
+    p->bSaoNonDeblocked &= p->bEnableSAO;
+    p->bEnableTSkipFast &= p->bEnableTransformSkip;
+
+    /* initialize the conformance window */
+    m_conformanceWindow.bEnabled = false;
+    m_conformanceWindow.rightOffset = 0;
+    m_conformanceWindow.topOffset = 0;
+    m_conformanceWindow.bottomOffset = 0;
+    m_conformanceWindow.leftOffset = 0;
+
+    /* set pad size if width is not multiple of the minimum CU size */
+    if (p->sourceWidth & (p->minCUSize - 1))
+    {
+        uint32_t rem = p->sourceWidth & (p->minCUSize - 1);
+        uint32_t padsize = p->minCUSize - rem;
+        p->sourceWidth += padsize;
+
+        m_conformanceWindow.bEnabled = true;
+        m_conformanceWindow.rightOffset = padsize;
+    }
+
+    /* set pad size if height is not multiple of the minimum CU size */
+    if (p->sourceHeight & (p->minCUSize - 1))
+    {
+        uint32_t rem = p->sourceHeight & (p->minCUSize - 1);
+        uint32_t padsize = p->minCUSize - rem;
+        p->sourceHeight += padsize;
+
+        m_conformanceWindow.bEnabled = true;
+        m_conformanceWindow.bottomOffset = padsize;
+    }
+    if (p->bDistributeModeAnalysis && p->analysisMode)
+    {
+        p->analysisMode = X265_ANALYSIS_OFF;
+        x265_log(p, X265_LOG_WARNING, "Analysis save and load mode not supported for distributed mode analysis\n");
+    }
+
+    bool bIsVbv = m_param->rc.vbvBufferSize > 0 && m_param->rc.vbvMaxBitrate > 0;
+    if (!m_param->bLossless && (m_param->rc.aqMode || bIsVbv))
+    {
+        if (p->rc.qgSize < X265_MAX(16, p->minCUSize))
+        {
+            p->rc.qgSize = X265_MAX(16, p->minCUSize);
+            x265_log(p, X265_LOG_WARNING, "QGSize should be greater than or equal to 16 and minCUSize, setting QGSize = %d\n", p->rc.qgSize);
+        }
+        if (p->rc.qgSize > p->maxCUSize)
+        {
+            p->rc.qgSize = p->maxCUSize;
+            x265_log(p, X265_LOG_WARNING, "QGSize should be less than or equal to maxCUSize, setting QGSize = %d\n", p->rc.qgSize);
+        }
+    }
+    else
+        m_param->rc.qgSize = p->maxCUSize;
+
+    if (p->bLogCuStats)
+        x265_log(p, X265_LOG_WARNING, "--cu-stats option is now deprecated\n");
+
+    if (p->csvfn)
+        x265_log(p, X265_LOG_WARNING, "libx265 no longer supports CSV file statistics\n");
+}
+
+void Encoder::allocAnalysis(x265_analysis_data* analysis)
+{
+    analysis->interData = analysis->intraData = NULL;
+    if (analysis->sliceType == X265_TYPE_IDR || analysis->sliceType == X265_TYPE_I)
+    {
+        analysis_intra_data *intraData = (analysis_intra_data*)analysis->intraData;
+        CHECKED_MALLOC_ZERO(intraData, analysis_intra_data, 1);
+        CHECKED_MALLOC(intraData->depth, uint8_t, analysis->numPartitions * analysis->numCUsInFrame);
+        CHECKED_MALLOC(intraData->modes, uint8_t, analysis->numPartitions * analysis->numCUsInFrame);
+        CHECKED_MALLOC(intraData->partSizes, char, analysis->numPartitions * analysis->numCUsInFrame);
+        CHECKED_MALLOC(intraData->chromaModes, uint8_t, analysis->numPartitions * analysis->numCUsInFrame);
+        analysis->intraData = intraData;
+    }
+    else
+    {
+        analysis_inter_data *interData = (analysis_inter_data*)analysis->interData;
+        CHECKED_MALLOC_ZERO(interData, analysis_inter_data, 1);
+        CHECKED_MALLOC_ZERO(interData->ref, int32_t, analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU * 2);
+        CHECKED_MALLOC(interData->depth, uint8_t, analysis->numPartitions * analysis->numCUsInFrame);
+        CHECKED_MALLOC(interData->modes, uint8_t, analysis->numPartitions * analysis->numCUsInFrame);
+        CHECKED_MALLOC_ZERO(interData->bestMergeCand, uint32_t, analysis->numCUsInFrame * CUGeom::MAX_GEOMS);
+        analysis->interData = interData;
+    }
+    return;
+
+fail:
+    freeAnalysis(analysis);
+    m_aborted = true;
+}
+
+void Encoder::freeAnalysis(x265_analysis_data* analysis)
+{
+    if (analysis->intraData)
+    {
+        X265_FREE(((analysis_intra_data*)analysis->intraData)->depth);
+        X265_FREE(((analysis_intra_data*)analysis->intraData)->modes);
+        X265_FREE(((analysis_intra_data*)analysis->intraData)->partSizes);
+        X265_FREE(((analysis_intra_data*)analysis->intraData)->chromaModes);
+        X265_FREE(analysis->intraData);
+    }
+    else
+    {
+        X265_FREE(((analysis_inter_data*)analysis->interData)->ref);
+        X265_FREE(((analysis_inter_data*)analysis->interData)->depth);
+        X265_FREE(((analysis_inter_data*)analysis->interData)->modes);
+        X265_FREE(((analysis_inter_data*)analysis->interData)->bestMergeCand);
+        X265_FREE(analysis->interData);
+    }
+}
+
+void Encoder::readAnalysisFile(x265_analysis_data* analysis, int curPoc)
+{
+
+#define X265_FREAD(val, size, readSize, fileOffset)\
+    if (fread(val, size, readSize, fileOffset) != readSize)\
+    {\
+        x265_log(NULL, X265_LOG_ERROR, "Error reading analysis data\n");\
+        freeAnalysis(analysis);\
+        m_aborted = true;\
+        return;\
+    }\
+
+    static uint64_t consumedBytes = 0;
+    static uint64_t totalConsumedBytes = 0;
+    fseeko(m_analysisFile, totalConsumedBytes, SEEK_SET);
+
+    int poc; uint32_t frameRecordSize;
+    X265_FREAD(&frameRecordSize, sizeof(uint32_t), 1, m_analysisFile);
+    X265_FREAD(&poc, sizeof(int), 1, m_analysisFile);
+
+    uint64_t currentOffset = totalConsumedBytes;
+
+    /* Seeking to the right frame Record */
+    while (poc != curPoc && !feof(m_analysisFile))
+    {
+        currentOffset += frameRecordSize;
+        fseeko(m_analysisFile, currentOffset, SEEK_SET);
+        X265_FREAD(&frameRecordSize, sizeof(uint32_t), 1, m_analysisFile);
+        X265_FREAD(&poc, sizeof(int), 1, m_analysisFile);
+    }
+
+    if (poc != curPoc || feof(m_analysisFile))
+    {
+        x265_log(NULL, X265_LOG_WARNING, "Error reading analysis data: Cannot find POC %d\n", curPoc);
+        freeAnalysis(analysis);
+        return;
+    }
+
+    /* Now arrived at the right frame, read the record */
+    analysis->poc = poc;
+    analysis->frameRecordSize = frameRecordSize;
+    X265_FREAD(&analysis->sliceType, sizeof(int), 1, m_analysisFile);
+    X265_FREAD(&analysis->numCUsInFrame, sizeof(int), 1, m_analysisFile);
+    X265_FREAD(&analysis->numPartitions, sizeof(int), 1, m_analysisFile);
+
+    /* Memory is allocated for inter and intra analysis data based on the slicetype */
+    allocAnalysis(analysis);
+
+    if (analysis->sliceType == X265_TYPE_IDR || analysis->sliceType == X265_TYPE_I)
+    {
+        X265_FREAD(((analysis_intra_data *)analysis->intraData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_intra_data *)analysis->intraData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_intra_data *)analysis->intraData)->partSizes, sizeof(char), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_intra_data *)analysis->intraData)->chromaModes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        analysis->sliceType = X265_TYPE_I;
+        consumedBytes += frameRecordSize;
+    }
+    else if (analysis->sliceType == X265_TYPE_P)
+    {
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->ref, sizeof(int32_t), analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->bestMergeCand, sizeof(uint32_t), analysis->numCUsInFrame * CUGeom::MAX_GEOMS, m_analysisFile);
+        consumedBytes += frameRecordSize;
+        totalConsumedBytes = consumedBytes;
+    }
+    else
+    {
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->ref, sizeof(int32_t), analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU * 2, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FREAD(((analysis_inter_data *)analysis->interData)->bestMergeCand, sizeof(uint32_t), analysis->numCUsInFrame * CUGeom::MAX_GEOMS, m_analysisFile);
+        consumedBytes += frameRecordSize;
+    }
+#undef X265_FREAD
+}
+
+void Encoder::writeAnalysisFile(x265_analysis_data* analysis)
+{
+
+#define X265_FWRITE(val, size, writeSize, fileOffset)\
+    if (fwrite(val, size, writeSize, fileOffset) < writeSize)\
+    {\
+        x265_log(NULL, X265_LOG_ERROR, "Error writing analysis data\n");\
+        freeAnalysis(analysis);\
+        m_aborted = true;\
+        return;\
+    }\
+
+    /* calculate frameRecordSize */
+    analysis->frameRecordSize = sizeof(analysis->frameRecordSize) + sizeof(analysis->poc) + sizeof(analysis->sliceType) +
+                      sizeof(analysis->numCUsInFrame) + sizeof(analysis->numPartitions);
+    if (analysis->sliceType == X265_TYPE_IDR || analysis->sliceType == X265_TYPE_I)
+        analysis->frameRecordSize += sizeof(uint8_t) * analysis->numCUsInFrame * analysis->numPartitions * 4;
+    else if (analysis->sliceType == X265_TYPE_P)
+    {
+        analysis->frameRecordSize += sizeof(int32_t) * analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU;
+        analysis->frameRecordSize += sizeof(uint8_t) * analysis->numCUsInFrame * analysis->numPartitions * 2;
+        analysis->frameRecordSize += sizeof(uint32_t) * analysis->numCUsInFrame * CUGeom::MAX_GEOMS;
+    }
+    else
+    {
+        analysis->frameRecordSize += sizeof(int32_t) * analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU * 2;
+        analysis->frameRecordSize += sizeof(uint8_t) * analysis->numCUsInFrame * analysis->numPartitions * 2;
+        analysis->frameRecordSize += sizeof(uint32_t) * analysis->numCUsInFrame * CUGeom::MAX_GEOMS;
+    }
+
+    X265_FWRITE(&analysis->frameRecordSize, sizeof(uint32_t), 1, m_analysisFile);
+    X265_FWRITE(&analysis->poc, sizeof(int), 1, m_analysisFile);
+    X265_FWRITE(&analysis->sliceType, sizeof(int), 1, m_analysisFile);
+    X265_FWRITE(&analysis->numCUsInFrame, sizeof(int), 1, m_analysisFile);
+    X265_FWRITE(&analysis->numPartitions, sizeof(int), 1, m_analysisFile);
+
+    if (analysis->sliceType == X265_TYPE_IDR || analysis->sliceType == X265_TYPE_I)
+    {
+        X265_FWRITE(((analysis_intra_data*)analysis->intraData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_intra_data*)analysis->intraData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_intra_data*)analysis->intraData)->partSizes, sizeof(char), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_intra_data*)analysis->intraData)->chromaModes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+    }
+    else if (analysis->sliceType == X265_TYPE_P)
+    {
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->ref, sizeof(int32_t), analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->bestMergeCand, sizeof(uint32_t), analysis->numCUsInFrame * CUGeom::MAX_GEOMS, m_analysisFile);
+    }
+    else
+    {
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->ref, sizeof(int32_t), analysis->numCUsInFrame * X265_MAX_PRED_MODE_PER_CTU * 2, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->depth, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->modes, sizeof(uint8_t), analysis->numCUsInFrame * analysis->numPartitions, m_analysisFile);
+        X265_FWRITE(((analysis_inter_data*)analysis->interData)->bestMergeCand, sizeof(uint32_t), analysis->numCUsInFrame * CUGeom::MAX_GEOMS, m_analysisFile);
+    }
+#undef X265_FWRITE
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/encoder.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,179 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_ENCODER_H
+#define X265_ENCODER_H
+
+#include "common.h"
+#include "slice.h"
+#include "scalinglist.h"
+#include "x265.h"
+#include "nal.h"
+
+struct x265_encoder {};
+
+namespace X265_NS {
+// private namespace
+extern const char g_sliceTypeToChar[3];
+
+class Entropy;
+
+struct EncStats
+{
+    double        m_psnrSumY;
+    double        m_psnrSumU;
+    double        m_psnrSumV;
+    double        m_globalSsim;
+    double        m_totalQp;
+    uint64_t      m_accBits;
+    uint32_t      m_numPics;
+    uint16_t      m_maxCLL;
+    double        m_maxFALL;
+
+    EncStats()
+    {
+        m_psnrSumY = m_psnrSumU = m_psnrSumV = m_globalSsim = 0;
+        m_accBits = 0;
+        m_numPics = 0;
+        m_totalQp = 0;
+        m_maxCLL = 0;
+        m_maxFALL = 0;
+    }
+
+    void addQP(double aveQp);
+
+    void addPsnr(double psnrY, double psnrU, double psnrV);
+
+    void addBits(uint64_t bits);
+
+    void addSsim(double ssim);
+};
+
+class FrameEncoder;
+class DPB;
+class Lookahead;
+class RateControl;
+class ThreadPool;
+
+class Encoder : public x265_encoder
+{
+public:
+
+    int                m_pocLast;         // time index (POC)
+    int                m_encodedFrameNum;
+    int                m_outputCount;
+
+    int                m_bframeDelay;
+    int64_t            m_firstPts;
+    int64_t            m_bframeDelayTime;
+    int64_t            m_prevReorderedPts[2];
+
+    ThreadPool*        m_threadPool;
+    FrameEncoder*      m_frameEncoder[X265_MAX_FRAME_THREADS];
+    DPB*               m_dpb;
+
+    Frame*             m_exportedPic;
+
+    int                m_numPools;
+    int                m_curEncoder;
+
+    /* Collect statistics globally */
+    EncStats           m_analyzeAll;
+    EncStats           m_analyzeI;
+    EncStats           m_analyzeP;
+    EncStats           m_analyzeB;
+    int64_t            m_encodeStartTime;
+
+    // weighted prediction
+    int                m_numLumaWPFrames;    // number of P frames with weighted luma reference
+    int                m_numChromaWPFrames;  // number of P frames with weighted chroma reference
+    int                m_numLumaWPBiFrames;  // number of B frames with weighted luma reference
+    int                m_numChromaWPBiFrames; // number of B frames with weighted chroma reference
+    FILE*              m_analysisFile;
+    int                m_conformanceMode;
+    VPS                m_vps;
+    SPS                m_sps;
+    PPS                m_pps;
+    NALList            m_nalList;
+    ScalingList        m_scalingList;      // quantization matrix information
+
+    bool               m_emitCLLSEI;
+    int                m_lastBPSEI;
+    uint32_t           m_numDelayedPic;
+
+    x265_param*        m_param;
+    x265_param*        m_latestParam;
+    RateControl*       m_rateControl;
+    Lookahead*         m_lookahead;
+    Window             m_conformanceWindow;
+
+    bool               m_bZeroLatency;     // x265_encoder_encode() returns NALs for the input picture, zero lag
+    bool               m_aborted;          // fatal error detected
+    bool               m_reconfigured;      // reconfigure of encoder detected
+
+    uint32_t           m_residualSumEmergency[MAX_NUM_TR_CATEGORIES][MAX_NUM_TR_COEFFS];
+    uint16_t           (*m_offsetEmergency)[MAX_NUM_TR_CATEGORIES][MAX_NUM_TR_COEFFS];
+    uint32_t           m_countEmergency[MAX_NUM_TR_CATEGORIES];
+
+    Encoder();
+    ~Encoder() {}
+
+    void create();
+    void stopJobs();
+    void destroy();
+
+    int encode(const x265_picture* pic, x265_picture *pic_out);
+
+    int reconfigureParam(x265_param* encParam, x265_param* param);
+
+    void getStreamHeaders(NALList& list, Entropy& sbacCoder, Bitstream& bs);
+
+    void fetchStats(x265_stats* stats, size_t statsSizeBytes);
+
+    void printSummary();
+
+    char* statsString(EncStats&, char*);
+
+    void configure(x265_param *param);
+
+    void updateVbvPlan(RateControl* rc);
+
+    void allocAnalysis(x265_analysis_data* analysis);
+
+    void freeAnalysis(x265_analysis_data* analysis);
+
+    void readAnalysisFile(x265_analysis_data* analysis, int poc);
+
+    void writeAnalysisFile(x265_analysis_data* pic);
+
+    void finishFrameStats(Frame* pic, FrameEncoder *curEncoder, uint64_t bits, x265_frame_stats* frameStats);
+
+protected:
+
+    void initVPS(VPS *vps);
+    void initSPS(SPS *sps);
+    void initPPS(PPS *pps);
+};
+}
+
+#endif // ifndef X265_ENCODER_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/entropy.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2288 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "framedata.h"
+#include "scalinglist.h"
+#include "quant.h"
+#include "contexts.h"
+#include "picyuv.h"
+
+#include "sao.h"
+#include "entropy.h"
+
+#define CU_DQP_TU_CMAX 5 // max number bins for truncated unary
+#define CU_DQP_EG_k    0 // exp-golomb order
+#define START_VALUE    8 // start value for dpcm mode
+
+namespace X265_NS {
+
+Entropy::Entropy()
+{
+    markValid();
+    m_fracBits = 0;
+    m_pad = 0;
+    X265_CHECK(sizeof(m_contextState) >= sizeof(m_contextState[0]) * MAX_OFF_CTX_MOD, "context state table is too small\n");
+}
+
+void Entropy::codeVPS(const VPS& vps)
+{
+    WRITE_CODE(0,       4, "vps_video_parameter_set_id");
+    WRITE_CODE(3,       2, "vps_reserved_three_2bits");
+    WRITE_CODE(0,       6, "vps_reserved_zero_6bits");
+    WRITE_CODE(vps.maxTempSubLayers - 1, 3, "vps_max_sub_layers_minus1");
+    WRITE_FLAG(vps.maxTempSubLayers == 1,   "vps_temporal_id_nesting_flag");
+    WRITE_CODE(0xffff, 16, "vps_reserved_ffff_16bits");
+
+    codeProfileTier(vps.ptl, vps.maxTempSubLayers);
+
+    WRITE_FLAG(true, "vps_sub_layer_ordering_info_present_flag");
+
+    for (uint32_t i = 0; i < vps.maxTempSubLayers; i++)
+    {
+        WRITE_UVLC(vps.maxDecPicBuffering - 1, "vps_max_dec_pic_buffering_minus1[i]");
+        WRITE_UVLC(vps.numReorderPics,         "vps_num_reorder_pics[i]");
+        WRITE_UVLC(vps.maxLatencyIncrease + 1, "vps_max_latency_increase_plus1[i]");
+    }
+
+    WRITE_CODE(0, 6, "vps_max_nuh_reserved_zero_layer_id");
+    WRITE_UVLC(0,    "vps_max_op_sets_minus1");
+    WRITE_FLAG(0,    "vps_timing_info_present_flag"); /* we signal timing info in SPS-VUI */
+    WRITE_FLAG(0,    "vps_extension_flag");
+}
+
+void Entropy::codeSPS(const SPS& sps, const ScalingList& scalingList, const ProfileTierLevel& ptl)
+{
+    WRITE_CODE(0, 4, "sps_video_parameter_set_id");
+    WRITE_CODE(sps.maxTempSubLayers - 1, 3, "sps_max_sub_layers_minus1");
+    WRITE_FLAG(sps.maxTempSubLayers == 1,   "sps_temporal_id_nesting_flag");
+
+    codeProfileTier(ptl, sps.maxTempSubLayers);
+
+    WRITE_UVLC(0, "sps_seq_parameter_set_id");
+    WRITE_UVLC(sps.chromaFormatIdc, "chroma_format_idc");
+
+    if (sps.chromaFormatIdc == X265_CSP_I444)
+        WRITE_FLAG(0,                       "separate_colour_plane_flag");
+
+    WRITE_UVLC(sps.picWidthInLumaSamples,   "pic_width_in_luma_samples");
+    WRITE_UVLC(sps.picHeightInLumaSamples,  "pic_height_in_luma_samples");
+
+    const Window& conf = sps.conformanceWindow;
+    WRITE_FLAG(conf.bEnabled, "conformance_window_flag");
+    if (conf.bEnabled)
+    {
+        int hShift = CHROMA_H_SHIFT(sps.chromaFormatIdc), vShift = CHROMA_V_SHIFT(sps.chromaFormatIdc);
+        WRITE_UVLC(conf.leftOffset   >> hShift, "conf_win_left_offset");
+        WRITE_UVLC(conf.rightOffset  >> hShift, "conf_win_right_offset");
+        WRITE_UVLC(conf.topOffset    >> vShift, "conf_win_top_offset");
+        WRITE_UVLC(conf.bottomOffset >> vShift, "conf_win_bottom_offset");
+    }
+
+    WRITE_UVLC(X265_DEPTH - 8,   "bit_depth_luma_minus8");
+    WRITE_UVLC(X265_DEPTH - 8,   "bit_depth_chroma_minus8");
+    WRITE_UVLC(BITS_FOR_POC - 4, "log2_max_pic_order_cnt_lsb_minus4");
+    WRITE_FLAG(true,             "sps_sub_layer_ordering_info_present_flag");
+
+    for (uint32_t i = 0; i < sps.maxTempSubLayers; i++)
+    {
+        WRITE_UVLC(sps.maxDecPicBuffering - 1, "sps_max_dec_pic_buffering_minus1[i]");
+        WRITE_UVLC(sps.numReorderPics,         "sps_num_reorder_pics[i]");
+        WRITE_UVLC(sps.maxLatencyIncrease + 1, "sps_max_latency_increase_plus1[i]");
+    }
+
+    WRITE_UVLC(sps.log2MinCodingBlockSize - 3,    "log2_min_coding_block_size_minus3");
+    WRITE_UVLC(sps.log2DiffMaxMinCodingBlockSize, "log2_diff_max_min_coding_block_size");
+    WRITE_UVLC(sps.quadtreeTULog2MinSize - 2,     "log2_min_transform_block_size_minus2");
+    WRITE_UVLC(sps.quadtreeTULog2MaxSize - sps.quadtreeTULog2MinSize, "log2_diff_max_min_transform_block_size");
+    WRITE_UVLC(sps.quadtreeTUMaxDepthInter - 1,   "max_transform_hierarchy_depth_inter");
+    WRITE_UVLC(sps.quadtreeTUMaxDepthIntra - 1,   "max_transform_hierarchy_depth_intra");
+    WRITE_FLAG(scalingList.m_bEnabled,            "scaling_list_enabled_flag");
+    if (scalingList.m_bEnabled)
+    {
+        WRITE_FLAG(scalingList.m_bDataPresent,    "sps_scaling_list_data_present_flag");
+        if (scalingList.m_bDataPresent)
+            codeScalingList(scalingList);
+    }
+    WRITE_FLAG(sps.bUseAMP, "amp_enabled_flag");
+    WRITE_FLAG(sps.bUseSAO, "sample_adaptive_offset_enabled_flag");
+
+    WRITE_FLAG(0, "pcm_enabled_flag");
+    WRITE_UVLC(0, "num_short_term_ref_pic_sets");
+    WRITE_FLAG(0, "long_term_ref_pics_present_flag");
+
+    WRITE_FLAG(sps.bTemporalMVPEnabled, "sps_temporal_mvp_enable_flag");
+    WRITE_FLAG(sps.bUseStrongIntraSmoothing, "sps_strong_intra_smoothing_enable_flag");
+
+    WRITE_FLAG(1, "vui_parameters_present_flag");
+    codeVUI(sps.vuiParameters, sps.maxTempSubLayers);
+
+    WRITE_FLAG(0, "sps_extension_flag");
+}
+
+void Entropy::codePPS(const PPS& pps)
+{
+    WRITE_UVLC(0,                          "pps_pic_parameter_set_id");
+    WRITE_UVLC(0,                          "pps_seq_parameter_set_id");
+    WRITE_FLAG(0,                          "dependent_slice_segments_enabled_flag");
+    WRITE_FLAG(0,                          "output_flag_present_flag");
+    WRITE_CODE(0, 3,                       "num_extra_slice_header_bits");
+    WRITE_FLAG(pps.bSignHideEnabled,       "sign_data_hiding_flag");
+    WRITE_FLAG(0,                          "cabac_init_present_flag");
+    WRITE_UVLC(0,                          "num_ref_idx_l0_default_active_minus1");
+    WRITE_UVLC(0,                          "num_ref_idx_l1_default_active_minus1");
+
+    WRITE_SVLC(0, "init_qp_minus26");
+    WRITE_FLAG(pps.bConstrainedIntraPred, "constrained_intra_pred_flag");
+    WRITE_FLAG(pps.bTransformSkipEnabled, "transform_skip_enabled_flag");
+
+    WRITE_FLAG(pps.bUseDQP,                "cu_qp_delta_enabled_flag");
+    if (pps.bUseDQP)
+        WRITE_UVLC(pps.maxCuDQPDepth,      "diff_cu_qp_delta_depth");
+
+    WRITE_SVLC(pps.chromaQpOffset[0],      "pps_cb_qp_offset");
+    WRITE_SVLC(pps.chromaQpOffset[1],      "pps_cr_qp_offset");
+    WRITE_FLAG(0,                          "pps_slice_chroma_qp_offsets_present_flag");
+
+    WRITE_FLAG(pps.bUseWeightPred,            "weighted_pred_flag");
+    WRITE_FLAG(pps.bUseWeightedBiPred,        "weighted_bipred_flag");
+    WRITE_FLAG(pps.bTransquantBypassEnabled,  "transquant_bypass_enable_flag");
+    WRITE_FLAG(0,                             "tiles_enabled_flag");
+    WRITE_FLAG(pps.bEntropyCodingSyncEnabled, "entropy_coding_sync_enabled_flag");
+    WRITE_FLAG(1,                             "loop_filter_across_slices_enabled_flag");
+
+    WRITE_FLAG(pps.bDeblockingFilterControlPresent, "deblocking_filter_control_present_flag");
+    if (pps.bDeblockingFilterControlPresent)
+    {
+        WRITE_FLAG(0,                               "deblocking_filter_override_enabled_flag");
+        WRITE_FLAG(pps.bPicDisableDeblockingFilter, "pps_disable_deblocking_filter_flag");
+        if (!pps.bPicDisableDeblockingFilter)
+        {
+            WRITE_SVLC(pps.deblockingFilterBetaOffsetDiv2, "pps_beta_offset_div2");
+            WRITE_SVLC(pps.deblockingFilterTcOffsetDiv2,   "pps_tc_offset_div2");
+        }
+    }
+
+    WRITE_FLAG(0, "pps_scaling_list_data_present_flag");
+    WRITE_FLAG(0, "lists_modification_present_flag");
+    WRITE_UVLC(0, "log2_parallel_merge_level_minus2");
+    WRITE_FLAG(0, "slice_segment_header_extension_present_flag");
+    WRITE_FLAG(0, "pps_extension_flag");
+}
+
+void Entropy::codeProfileTier(const ProfileTierLevel& ptl, int maxTempSubLayers)
+{
+    WRITE_CODE(0, 2,                "XXX_profile_space[]");
+    WRITE_FLAG(ptl.tierFlag,        "XXX_tier_flag[]");
+    WRITE_CODE(ptl.profileIdc, 5,   "XXX_profile_idc[]");
+    for (int j = 0; j < 32; j++)
+        WRITE_FLAG(ptl.profileCompatibilityFlag[j], "XXX_profile_compatibility_flag[][j]");
+
+    WRITE_FLAG(ptl.progressiveSourceFlag,   "general_progressive_source_flag");
+    WRITE_FLAG(ptl.interlacedSourceFlag,    "general_interlaced_source_flag");
+    WRITE_FLAG(ptl.nonPackedConstraintFlag, "general_non_packed_constraint_flag");
+    WRITE_FLAG(ptl.frameOnlyConstraintFlag, "general_frame_only_constraint_flag");
+
+    if (ptl.profileIdc == Profile::MAINREXT || ptl.profileIdc == Profile::HIGHTHROUGHPUTREXT)
+    {
+        uint32_t bitDepthConstraint = ptl.bitDepthConstraint;
+        int csp = ptl.chromaFormatConstraint;
+        WRITE_FLAG(bitDepthConstraint<=12, "general_max_12bit_constraint_flag");
+        WRITE_FLAG(bitDepthConstraint<=10, "general_max_10bit_constraint_flag");
+        WRITE_FLAG(bitDepthConstraint<= 8 && csp != X265_CSP_I422 , "general_max_8bit_constraint_flag");
+        WRITE_FLAG(csp == X265_CSP_I422 || csp == X265_CSP_I420 || csp == X265_CSP_I400, "general_max_422chroma_constraint_flag");
+        WRITE_FLAG(csp == X265_CSP_I420 || csp == X265_CSP_I400,                         "general_max_420chroma_constraint_flag");
+        WRITE_FLAG(csp == X265_CSP_I400,                                                 "general_max_monochrome_constraint_flag");
+        WRITE_FLAG(ptl.intraConstraintFlag,        "general_intra_constraint_flag");
+        WRITE_FLAG(ptl.onePictureOnlyConstraintFlag,"general_one_picture_only_constraint_flag");
+        WRITE_FLAG(ptl.lowerBitRateConstraintFlag, "general_lower_bit_rate_constraint_flag");
+        WRITE_CODE(0 , 16, "XXX_reserved_zero_35bits[0..15]");
+        WRITE_CODE(0 , 16, "XXX_reserved_zero_35bits[16..31]");
+        WRITE_CODE(0 ,  3, "XXX_reserved_zero_35bits[32..34]");
+    }
+    else
+    {
+        WRITE_CODE(0, 16, "XXX_reserved_zero_44bits[0..15]");
+        WRITE_CODE(0, 16, "XXX_reserved_zero_44bits[16..31]");
+        WRITE_CODE(0, 12, "XXX_reserved_zero_44bits[32..43]");
+    }
+
+    WRITE_CODE(ptl.levelIdc, 8, "general_level_idc");
+
+    if (maxTempSubLayers > 1)
+    {
+         WRITE_FLAG(0, "sub_layer_profile_present_flag[i]");
+         WRITE_FLAG(0, "sub_layer_level_present_flag[i]");
+         for (int i = maxTempSubLayers - 1; i < 8 ; i++)
+             WRITE_CODE(0, 2, "reserved_zero_2bits");
+    }
+}
+
+void Entropy::codeVUI(const VUI& vui, int maxSubTLayers)
+{
+    WRITE_FLAG(vui.aspectRatioInfoPresentFlag,  "aspect_ratio_info_present_flag");
+    if (vui.aspectRatioInfoPresentFlag)
+    {
+        WRITE_CODE(vui.aspectRatioIdc, 8,       "aspect_ratio_idc");
+        if (vui.aspectRatioIdc == 255)
+        {
+            WRITE_CODE(vui.sarWidth, 16,        "sar_width");
+            WRITE_CODE(vui.sarHeight, 16,       "sar_height");
+        }
+    }
+
+    WRITE_FLAG(vui.overscanInfoPresentFlag,     "overscan_info_present_flag");
+    if (vui.overscanInfoPresentFlag)
+        WRITE_FLAG(vui.overscanAppropriateFlag, "overscan_appropriate_flag");
+
+    WRITE_FLAG(vui.videoSignalTypePresentFlag,  "video_signal_type_present_flag");
+    if (vui.videoSignalTypePresentFlag)
+    {
+        WRITE_CODE(vui.videoFormat, 3,          "video_format");
+        WRITE_FLAG(vui.videoFullRangeFlag,      "video_full_range_flag");
+        WRITE_FLAG(vui.colourDescriptionPresentFlag, "colour_description_present_flag");
+        if (vui.colourDescriptionPresentFlag)
+        {
+            WRITE_CODE(vui.colourPrimaries, 8,         "colour_primaries");
+            WRITE_CODE(vui.transferCharacteristics, 8, "transfer_characteristics");
+            WRITE_CODE(vui.matrixCoefficients, 8,      "matrix_coefficients");
+        }
+    }
+
+    WRITE_FLAG(vui.chromaLocInfoPresentFlag,           "chroma_loc_info_present_flag");
+    if (vui.chromaLocInfoPresentFlag)
+    {
+        WRITE_UVLC(vui.chromaSampleLocTypeTopField,    "chroma_sample_loc_type_top_field");
+        WRITE_UVLC(vui.chromaSampleLocTypeBottomField, "chroma_sample_loc_type_bottom_field");
+    }
+
+    WRITE_FLAG(0,                                     "neutral_chroma_indication_flag");
+    WRITE_FLAG(vui.fieldSeqFlag,                      "field_seq_flag");
+    WRITE_FLAG(vui.frameFieldInfoPresentFlag,         "frame_field_info_present_flag");
+
+    WRITE_FLAG(vui.defaultDisplayWindow.bEnabled,    "default_display_window_flag");
+    if (vui.defaultDisplayWindow.bEnabled)
+    {
+        WRITE_UVLC(vui.defaultDisplayWindow.leftOffset,   "def_disp_win_left_offset");
+        WRITE_UVLC(vui.defaultDisplayWindow.rightOffset,  "def_disp_win_right_offset");
+        WRITE_UVLC(vui.defaultDisplayWindow.topOffset,    "def_disp_win_top_offset");
+        WRITE_UVLC(vui.defaultDisplayWindow.bottomOffset, "def_disp_win_bottom_offset");
+    }
+
+    WRITE_FLAG(1,                                 "vui_timing_info_present_flag");
+    WRITE_CODE(vui.timingInfo.numUnitsInTick, 32, "vui_num_units_in_tick");
+    WRITE_CODE(vui.timingInfo.timeScale, 32,      "vui_time_scale");
+    WRITE_FLAG(0,                                 "vui_poc_proportional_to_timing_flag");
+
+    WRITE_FLAG(vui.hrdParametersPresentFlag,  "vui_hrd_parameters_present_flag");
+    if (vui.hrdParametersPresentFlag)
+        codeHrdParameters(vui.hrdParameters, maxSubTLayers);
+
+    WRITE_FLAG(0, "bitstream_restriction_flag");
+}
+
+void Entropy::codeScalingList(const ScalingList& scalingList)
+{
+    for (int sizeId = 0; sizeId < ScalingList::NUM_SIZES; sizeId++)
+    {
+        for (int listId = 0; listId < ScalingList::NUM_LISTS; listId++)
+        {
+            int predList = scalingList.checkPredMode(sizeId, listId);
+            WRITE_FLAG(predList < 0, "scaling_list_pred_mode_flag");
+            if (predList >= 0)
+                WRITE_UVLC(listId - predList, "scaling_list_pred_matrix_id_delta");
+            else // DPCM Mode
+                codeScalingList(scalingList, sizeId, listId);
+        }
+    }
+}
+
+void Entropy::codeScalingList(const ScalingList& scalingList, uint32_t sizeId, uint32_t listId)
+{
+    int coefNum = X265_MIN(ScalingList::MAX_MATRIX_COEF_NUM, (int)ScalingList::s_numCoefPerSize[sizeId]);
+    const uint16_t* scan = (sizeId == 0 ? g_scan4x4[SCAN_DIAG] : g_scan8x8diag);
+    int nextCoef = START_VALUE;
+    int32_t *src = scalingList.m_scalingListCoef[sizeId][listId];
+    int data;
+
+    if (sizeId > BLOCK_8x8)
+    {
+        WRITE_SVLC(scalingList.m_scalingListDC[sizeId][listId] - 8, "scaling_list_dc_coef_minus8");
+        nextCoef = scalingList.m_scalingListDC[sizeId][listId];
+    }
+    for (int i = 0; i < coefNum; i++)
+    {
+        data = src[scan[i]] - nextCoef;
+        nextCoef = src[scan[i]];
+        if (data > 127)
+            data = data - 256;
+        if (data < -128)
+            data = data + 256;
+
+        WRITE_SVLC(data,  "scaling_list_delta_coef");
+    }
+}
+
+void Entropy::codeHrdParameters(const HRDInfo& hrd, int maxSubTLayers)
+{
+    WRITE_FLAG(1, "nal_hrd_parameters_present_flag");
+    WRITE_FLAG(0, "vcl_hrd_parameters_present_flag");
+    WRITE_FLAG(0, "sub_pic_hrd_params_present_flag");
+
+    WRITE_CODE(hrd.bitRateScale, 4, "bit_rate_scale");
+    WRITE_CODE(hrd.cpbSizeScale, 4, "cpb_size_scale");
+
+    WRITE_CODE(hrd.initialCpbRemovalDelayLength - 1, 5, "initial_cpb_removal_delay_length_minus1");
+    WRITE_CODE(hrd.cpbRemovalDelayLength - 1,        5, "au_cpb_removal_delay_length_minus1");
+    WRITE_CODE(hrd.dpbOutputDelayLength - 1,         5, "dpb_output_delay_length_minus1");
+
+    for (int i = 0; i < maxSubTLayers; i++)
+    {
+        WRITE_FLAG(1, "fixed_pic_rate_general_flag");
+        WRITE_UVLC(0, "elemental_duration_in_tc_minus1");
+        WRITE_UVLC(0, "cpb_cnt_minus1");
+
+        WRITE_UVLC(hrd.bitRateValue - 1, "bit_rate_value_minus1");
+        WRITE_UVLC(hrd.cpbSizeValue - 1, "cpb_size_value_minus1");
+        WRITE_FLAG(hrd.cbrFlag, "cbr_flag");
+    }
+}
+
+void Entropy::codeAUD(const Slice& slice)
+{
+    int picType;
+
+    switch (slice.m_sliceType)
+    {
+    case I_SLICE:
+        picType = 0;
+        break;
+    case P_SLICE:
+        picType = 1;
+        break;
+    case B_SLICE:
+        picType = 2;
+        break;
+    default:
+        picType = 7;
+        break;
+    }
+
+    WRITE_CODE(picType, 3, "pic_type");
+}
+
+void Entropy::codeSliceHeader(const Slice& slice, FrameData& encData)
+{
+    WRITE_FLAG(1, "first_slice_segment_in_pic_flag");
+    if (slice.getRapPicFlag())
+        WRITE_FLAG(0, "no_output_of_prior_pics_flag");
+
+    WRITE_UVLC(0, "slice_pic_parameter_set_id");
+
+    /* x265 does not use dependent slices, so always write all this data */
+
+    WRITE_UVLC(slice.m_sliceType, "slice_type");
+
+    if (!slice.getIdrPicFlag())
+    {
+        int picOrderCntLSB = (slice.m_poc - slice.m_lastIDR + (1 << BITS_FOR_POC)) % (1 << BITS_FOR_POC);
+        WRITE_CODE(picOrderCntLSB, BITS_FOR_POC, "pic_order_cnt_lsb");
+
+#if _DEBUG || CHECKED_BUILD
+        // check for bitstream restriction stating that:
+        // If the current picture is a BLA or CRA picture, the value of NumPocTotalCurr shall be equal to 0.
+        // Ideally this process should not be repeated for each slice in a picture
+        if (slice.isIRAP())
+            for (int picIdx = 0; picIdx < slice.m_rps.numberOfPictures; picIdx++)
+            {
+                X265_CHECK(!slice.m_rps.bUsed[picIdx], "pic unused failure\n");
+            }
+#endif
+
+        WRITE_FLAG(0, "short_term_ref_pic_set_sps_flag");
+        codeShortTermRefPicSet(slice.m_rps);
+
+        if (slice.m_sps->bTemporalMVPEnabled)
+            WRITE_FLAG(1, "slice_temporal_mvp_enable_flag");
+    }
+    const SAOParam *saoParam = encData.m_saoParam;
+    if (slice.m_sps->bUseSAO)
+    {
+        WRITE_FLAG(saoParam->bSaoFlag[0], "slice_sao_luma_flag");
+        if (slice.m_sps->chromaFormatIdc != X265_CSP_I400) {
+            WRITE_FLAG(saoParam->bSaoFlag[1], "slice_sao_chroma_flag");
+        }
+    }
+
+    // check if numRefIdx match the defaults (1, hard-coded in PPS). If not, override
+    // TODO: this might be a place to optimize a few bits per slice, by using param->refs for L0 default
+
+    if (!slice.isIntra())
+    {
+        bool overrideFlag = (slice.m_numRefIdx[0] != 1 || (slice.isInterB() && slice.m_numRefIdx[1] != 1));
+        WRITE_FLAG(overrideFlag, "num_ref_idx_active_override_flag");
+        if (overrideFlag)
+        {
+            WRITE_UVLC(slice.m_numRefIdx[0] - 1, "num_ref_idx_l0_active_minus1");
+            if (slice.isInterB())
+                WRITE_UVLC(slice.m_numRefIdx[1] - 1, "num_ref_idx_l1_active_minus1");
+            else
+            {
+                X265_CHECK(slice.m_numRefIdx[1] == 0, "expected no L1 references for P slice\n");
+            }
+        }
+    }
+    else
+    {
+        X265_CHECK(!slice.m_numRefIdx[0] && !slice.m_numRefIdx[1], "expected no references for I slice\n");
+    }
+
+    if (slice.isInterB())
+        WRITE_FLAG(0, "mvd_l1_zero_flag");
+
+    if (slice.m_sps->bTemporalMVPEnabled)
+    {
+        if (slice.m_sliceType == B_SLICE)
+            WRITE_FLAG(slice.m_colFromL0Flag, "collocated_from_l0_flag");
+
+        if (slice.m_sliceType != I_SLICE &&
+            ((slice.m_colFromL0Flag && slice.m_numRefIdx[0] > 1) ||
+            (!slice.m_colFromL0Flag && slice.m_numRefIdx[1] > 1)))
+        {
+            WRITE_UVLC(slice.m_colRefIdx, "collocated_ref_idx");
+        }
+    }
+    if ((slice.m_pps->bUseWeightPred && slice.m_sliceType == P_SLICE) || (slice.m_pps->bUseWeightedBiPred && slice.m_sliceType == B_SLICE))
+        codePredWeightTable(slice);
+
+    X265_CHECK(slice.m_maxNumMergeCand <= MRG_MAX_NUM_CANDS, "too many merge candidates\n");
+    if (!slice.isIntra())
+        WRITE_UVLC(MRG_MAX_NUM_CANDS - slice.m_maxNumMergeCand, "five_minus_max_num_merge_cand");
+
+    int code = slice.m_sliceQp - 26;
+    WRITE_SVLC(code, "slice_qp_delta");
+    
+    bool isSAOEnabled = slice.m_sps->bUseSAO ? saoParam->bSaoFlag[0] || saoParam->bSaoFlag[1] : false;
+    bool isDBFEnabled = !slice.m_pps->bPicDisableDeblockingFilter;
+
+    if (isSAOEnabled || isDBFEnabled)
+        WRITE_FLAG(slice.m_sLFaseFlag, "slice_loop_filter_across_slices_enabled_flag");
+}
+
+/** write wavefront substreams sizes for the slice header */
+void Entropy::codeSliceHeaderWPPEntryPoints(const Slice& slice, const uint32_t *substreamSizes, uint32_t maxOffset)
+{
+    uint32_t offsetLen = 1;
+    while (maxOffset >= (1U << offsetLen))
+    {
+        offsetLen++;
+        X265_CHECK(offsetLen < 32, "offsetLen is too large\n");
+    }
+
+    uint32_t numRows = slice.m_sps->numCuInHeight - 1;
+    WRITE_UVLC(numRows, "num_entry_point_offsets");
+    if (numRows > 0)
+        WRITE_UVLC(offsetLen - 1, "offset_len_minus1");
+
+    for (uint32_t i = 0; i < numRows; i++)
+        WRITE_CODE(substreamSizes[i] - 1, offsetLen, "entry_point_offset_minus1");
+}
+
+void Entropy::codeShortTermRefPicSet(const RPS& rps)
+{
+    WRITE_UVLC(rps.numberOfNegativePictures, "num_negative_pics");
+    WRITE_UVLC(rps.numberOfPositivePictures, "num_positive_pics");
+    int prev = 0;
+    for (int j = 0; j < rps.numberOfNegativePictures; j++)
+    {
+        WRITE_UVLC(prev - rps.deltaPOC[j] - 1, "delta_poc_s0_minus1");
+        prev = rps.deltaPOC[j];
+        WRITE_FLAG(rps.bUsed[j], "used_by_curr_pic_s0_flag");
+    }
+
+    prev = 0;
+    for (int j = rps.numberOfNegativePictures; j < rps.numberOfNegativePictures + rps.numberOfPositivePictures; j++)
+    {
+        WRITE_UVLC(rps.deltaPOC[j] - prev - 1, "delta_poc_s1_minus1");
+        prev = rps.deltaPOC[j];
+        WRITE_FLAG(rps.bUsed[j], "used_by_curr_pic_s1_flag");
+    }
+}
+
+void Entropy::encodeCTU(const CUData& ctu, const CUGeom& cuGeom)
+{
+    bool bEncodeDQP = ctu.m_slice->m_pps->bUseDQP;
+    encodeCU(ctu, cuGeom, 0, 0, bEncodeDQP);
+}
+
+/* encode a CU block recursively */
+void Entropy::encodeCU(const CUData& ctu, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t depth, bool& bEncodeDQP)
+{
+    const Slice* slice = ctu.m_slice;
+
+    int cuSplitFlag = !(cuGeom.flags & CUGeom::LEAF);
+    int cuUnsplitFlag = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+
+    if (!cuUnsplitFlag)
+    {
+        uint32_t qNumParts = cuGeom.numPartitions >> 2;
+        if (depth == slice->m_pps->maxCuDQPDepth && slice->m_pps->bUseDQP)
+            bEncodeDQP = true;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + qIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+                encodeCU(ctu, childGeom, absPartIdx, depth + 1, bEncodeDQP);
+        }
+        return;
+    }
+
+    if (cuSplitFlag) 
+        codeSplitFlag(ctu, absPartIdx, depth);
+
+    if (depth < ctu.m_cuDepth[absPartIdx] && depth < g_maxCUDepth)
+    {
+        uint32_t qNumParts = cuGeom.numPartitions >> 2;
+        if (depth == slice->m_pps->maxCuDQPDepth && slice->m_pps->bUseDQP)
+            bEncodeDQP = true;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + qIdx);
+            encodeCU(ctu, childGeom, absPartIdx, depth + 1, bEncodeDQP);
+        }
+        return;
+    }
+
+    if (depth <= slice->m_pps->maxCuDQPDepth && slice->m_pps->bUseDQP)
+        bEncodeDQP = true;
+
+    if (slice->m_pps->bTransquantBypassEnabled)
+        codeCUTransquantBypassFlag(ctu.m_tqBypass[absPartIdx]);
+
+    if (!slice->isIntra())
+    {
+        codeSkipFlag(ctu, absPartIdx);
+        if (ctu.isSkipped(absPartIdx))
+        {
+            codeMergeIndex(ctu, absPartIdx);
+            finishCU(ctu, absPartIdx, depth, bEncodeDQP);
+            return;
+        }
+        codePredMode(ctu.m_predMode[absPartIdx]);
+    }
+
+    codePartSize(ctu, absPartIdx, depth);
+
+    // prediction Info ( Intra : direction mode, Inter : Mv, reference idx )
+    codePredInfo(ctu, absPartIdx);
+
+    uint32_t tuDepthRange[2];
+    if (ctu.isIntra(absPartIdx))
+        ctu.getIntraTUQtDepthRange(tuDepthRange, absPartIdx);
+    else
+        ctu.getInterTUQtDepthRange(tuDepthRange, absPartIdx);
+
+    // Encode Coefficients, allow codeCoeff() to modify bEncodeDQP
+    codeCoeff(ctu, absPartIdx, bEncodeDQP, tuDepthRange);
+
+    // --- write terminating bit ---
+    finishCU(ctu, absPartIdx, depth, bEncodeDQP);
+}
+
+/* Return bit count of signaling inter mode */
+uint32_t Entropy::bitsInterMode(const CUData& cu, uint32_t absPartIdx, uint32_t depth) const
+{
+    uint32_t bits;
+    bits = bitsCodeBin(0, m_contextState[OFF_SKIP_FLAG_CTX + cu.getCtxSkipFlag(absPartIdx)]); /* not skip */
+    bits += bitsCodeBin(0, m_contextState[OFF_PRED_MODE_CTX]); /* inter */
+    PartSize partSize = (PartSize)cu.m_partSize[absPartIdx];
+    switch (partSize)
+    {
+    case SIZE_2Nx2N:
+        bits += bitsCodeBin(1, m_contextState[OFF_PART_SIZE_CTX]);
+        break;
+
+    case SIZE_2NxN:
+    case SIZE_2NxnU:
+    case SIZE_2NxnD:
+        bits += bitsCodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 0]);
+        bits += bitsCodeBin(1, m_contextState[OFF_PART_SIZE_CTX + 1]);
+        if (cu.m_slice->m_sps->maxAMPDepth > depth)
+        {
+            bits += bitsCodeBin((partSize == SIZE_2NxN) ? 1 : 0, m_contextState[OFF_PART_SIZE_CTX + 3]);
+            if (partSize != SIZE_2NxN)
+                bits++; // encodeBinEP((partSize == SIZE_2NxnU ? 0 : 1));
+        }
+        break;
+
+    case SIZE_Nx2N:
+    case SIZE_nLx2N:
+    case SIZE_nRx2N:
+        bits += bitsCodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 0]);
+        bits += bitsCodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 1]);
+        if (depth == g_maxCUDepth && !(cu.m_log2CUSize[absPartIdx] == 3))
+            bits += bitsCodeBin(1, m_contextState[OFF_PART_SIZE_CTX + 2]);
+        if (cu.m_slice->m_sps->maxAMPDepth > depth)
+        {
+            bits += bitsCodeBin((partSize == SIZE_Nx2N) ? 1 : 0, m_contextState[OFF_PART_SIZE_CTX + 3]);
+            if (partSize != SIZE_Nx2N)
+                bits++; // encodeBinEP((partSize == SIZE_nLx2N ? 0 : 1));
+        }
+        break;
+    default:
+        X265_CHECK(0, "invalid CU partition\n");
+        break;
+    }
+
+    return bits;
+}
+
+/* finish encoding a cu and handle end-of-slice conditions */
+void Entropy::finishCU(const CUData& ctu, uint32_t absPartIdx, uint32_t depth, bool bCodeDQP)
+{
+    const Slice* slice = ctu.m_slice;
+    uint32_t realEndAddress = slice->m_endCUAddr;
+    uint32_t cuAddr = ctu.getSCUAddr() + absPartIdx;
+    X265_CHECK(realEndAddress == slice->realEndAddress(slice->m_endCUAddr), "real end address expected\n");
+
+    uint32_t granularityMask = g_maxCUSize - 1;
+    uint32_t cuSize = 1 << ctu.m_log2CUSize[absPartIdx];
+    uint32_t rpelx = ctu.m_cuPelX + g_zscanToPelX[absPartIdx] + cuSize;
+    uint32_t bpely = ctu.m_cuPelY + g_zscanToPelY[absPartIdx] + cuSize;
+    bool granularityBoundary = (((rpelx & granularityMask) == 0 || (rpelx == slice->m_sps->picWidthInLumaSamples )) &&
+                                ((bpely & granularityMask) == 0 || (bpely == slice->m_sps->picHeightInLumaSamples)));
+
+    if (slice->m_pps->bUseDQP)
+        const_cast<CUData&>(ctu).setQPSubParts(bCodeDQP ? ctu.getRefQP(absPartIdx) : ctu.m_qp[absPartIdx], absPartIdx, depth);
+
+    if (granularityBoundary)
+    {
+        // Encode slice finish
+        bool bTerminateSlice = false;
+        if (cuAddr + (NUM_4x4_PARTITIONS >> (depth << 1)) == realEndAddress)
+            bTerminateSlice = true;
+
+        // The 1-terminating bit is added to all streams, so don't add it here when it's 1.
+        if (!bTerminateSlice)
+            encodeBinTrm(0);
+
+        if (!m_bitIf)
+            resetBits(); // TODO: most likely unnecessary
+    }
+}
+
+void Entropy::encodeTransform(const CUData& cu, uint32_t absPartIdx, uint32_t curDepth, uint32_t log2CurSize,
+                              bool& bCodeDQP, const uint32_t depthRange[2])
+{
+    const bool subdiv = cu.m_tuDepth[absPartIdx] > curDepth;
+
+    /* in each of these conditions, the subdiv flag is implied and not signaled,
+     * so we have checks to make sure the implied value matches our intentions */
+    if (cu.isIntra(absPartIdx) && cu.m_partSize[absPartIdx] != SIZE_2Nx2N && log2CurSize == MIN_LOG2_CU_SIZE)
+    {
+        X265_CHECK(subdiv, "intra NxN requires TU depth below CU depth\n");
+    }
+    else if (cu.isInter(absPartIdx) && cu.m_partSize[absPartIdx] != SIZE_2Nx2N &&
+             !curDepth && cu.m_slice->m_sps->quadtreeTUMaxDepthInter == 1)
+    {
+        X265_CHECK(subdiv, "inter TU must be smaller than CU when not 2Nx2N part size: log2CurSize %d, depthRange[0] %d\n", log2CurSize, depthRange[0]);
+    }
+    else if (log2CurSize > depthRange[1])
+    {
+        X265_CHECK(subdiv, "TU is larger than the max allowed, it should have been split\n");
+    }
+    else if (log2CurSize == cu.m_slice->m_sps->quadtreeTULog2MinSize || log2CurSize == depthRange[0])
+    {
+        X265_CHECK(!subdiv, "min sized TU cannot be subdivided\n");
+    }
+    else
+    {
+        X265_CHECK(log2CurSize > depthRange[0], "transform size failure\n");
+        codeTransformSubdivFlag(subdiv, 5 - log2CurSize);
+    }
+
+    uint32_t hChromaShift = cu.m_hChromaShift;
+    uint32_t vChromaShift = cu.m_vChromaShift;
+    bool bSmallChroma = (log2CurSize - hChromaShift) < 2;
+    
+    if (cu.m_chromaFormat != X265_CSP_I400) {
+        if (!curDepth || !bSmallChroma)
+        {
+            if (!curDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_U, curDepth - 1))
+                codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_U, curDepth, !subdiv);
+            if (!curDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_V, curDepth - 1))
+                codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_V, curDepth, !subdiv);
+        }
+        else
+        {
+            X265_CHECK(cu.getCbf(absPartIdx, TEXT_CHROMA_U, curDepth) == cu.getCbf(absPartIdx, TEXT_CHROMA_U, curDepth - 1), "chroma xform size match failure\n");
+            X265_CHECK(cu.getCbf(absPartIdx, TEXT_CHROMA_V, curDepth) == cu.getCbf(absPartIdx, TEXT_CHROMA_V, curDepth - 1), "chroma xform size match failure\n");
+        }
+    }
+    
+    if (subdiv)
+    {
+        --log2CurSize;
+        ++curDepth;
+
+        uint32_t qNumParts = 1 << (log2CurSize - LOG2_UNIT_SIZE) * 2;
+
+        encodeTransform(cu, absPartIdx + 0 * qNumParts, curDepth, log2CurSize, bCodeDQP, depthRange);
+        encodeTransform(cu, absPartIdx + 1 * qNumParts, curDepth, log2CurSize, bCodeDQP, depthRange);
+        encodeTransform(cu, absPartIdx + 2 * qNumParts, curDepth, log2CurSize, bCodeDQP, depthRange);
+        encodeTransform(cu, absPartIdx + 3 * qNumParts, curDepth, log2CurSize, bCodeDQP, depthRange);
+        return;
+    }
+
+    uint32_t absPartIdxC = bSmallChroma ? absPartIdx & 0xFC : absPartIdx;
+
+    if (cu.isInter(absPartIdxC) && !curDepth && !cu.getCbf(absPartIdxC, TEXT_CHROMA_U, 0) && !cu.getCbf(absPartIdxC, TEXT_CHROMA_V, 0))
+    {
+        X265_CHECK(cu.getCbf(absPartIdxC, TEXT_LUMA, 0), "CBF should have been set\n");
+    }
+    else
+        codeQtCbfLuma(cu, absPartIdx, curDepth);
+
+    uint32_t cbfY = cu.getCbf(absPartIdx, TEXT_LUMA, curDepth);
+    uint32_t cbfU = cu.getCbf(absPartIdxC, TEXT_CHROMA_U, curDepth);
+    uint32_t cbfV = cu.getCbf(absPartIdxC, TEXT_CHROMA_V, curDepth);
+    if (!(cbfY || cbfU || cbfV))
+        return;
+
+    // dQP: only for CTU once
+    if (cu.m_slice->m_pps->bUseDQP && bCodeDQP)
+    {
+        uint32_t log2CUSize = cu.m_log2CUSize[absPartIdx];
+        uint32_t absPartIdxLT = absPartIdx & (0xFF << (log2CUSize - LOG2_UNIT_SIZE) * 2);
+        codeDeltaQP(cu, absPartIdxLT);
+        bCodeDQP = false;
+    }
+
+    if (cbfY)
+    {
+        uint32_t coeffOffset = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        codeCoeffNxN(cu, cu.m_trCoeff[0] + coeffOffset, absPartIdx, log2CurSize, TEXT_LUMA);
+        if (!(cbfU || cbfV))
+            return;
+    }
+
+    if (cu.m_chromaFormat != X265_CSP_I400) {
+    if (bSmallChroma)
+    {
+        if ((absPartIdx & 3) != 3)
+            return;
+
+        const uint32_t log2CurSizeC = 2;
+        const bool splitIntoSubTUs = (cu.m_chromaFormat == X265_CSP_I422);
+        const uint32_t curPartNum = 4;
+        uint32_t coeffOffsetC  = absPartIdxC << (LOG2_UNIT_SIZE * 2 - (hChromaShift + vChromaShift));
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            TURecurse tuIterator(splitIntoSubTUs ? VERTICAL_SPLIT : DONT_SPLIT, curPartNum, absPartIdxC);
+            const coeff_t* coeffChroma = cu.m_trCoeff[chromaId];
+            do
+            {
+                if (cu.getCbf(tuIterator.absPartIdxTURelCU, (TextType)chromaId, curDepth + splitIntoSubTUs))
+                {
+                    uint32_t subTUOffset = tuIterator.section << (log2CurSizeC * 2);
+                    codeCoeffNxN(cu, coeffChroma + coeffOffsetC + subTUOffset, tuIterator.absPartIdxTURelCU, log2CurSizeC, (TextType)chromaId);
+                }
+            }
+            while (tuIterator.isNextSection());
+        }
+    }
+    else
+    {
+        uint32_t log2CurSizeC = log2CurSize - hChromaShift;
+        const bool splitIntoSubTUs = (cu.m_chromaFormat == X265_CSP_I422);
+        uint32_t curPartNum = 1 << (log2CurSize - LOG2_UNIT_SIZE) * 2;
+        uint32_t coeffOffsetC  = absPartIdxC << (LOG2_UNIT_SIZE * 2 - (hChromaShift + vChromaShift));
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            TURecurse tuIterator(splitIntoSubTUs ? VERTICAL_SPLIT : DONT_SPLIT, curPartNum, absPartIdxC);
+            const coeff_t* coeffChroma = cu.m_trCoeff[chromaId];
+            do
+            {
+                if (cu.getCbf(tuIterator.absPartIdxTURelCU, (TextType)chromaId, curDepth + splitIntoSubTUs))
+                {
+                    uint32_t subTUOffset = tuIterator.section << (log2CurSizeC * 2);
+                    codeCoeffNxN(cu, coeffChroma + coeffOffsetC + subTUOffset, tuIterator.absPartIdxTURelCU, log2CurSizeC, (TextType)chromaId);
+                }
+            }
+            while (tuIterator.isNextSection());
+        }
+    }
+    }
+}
+
+void Entropy::codePredInfo(const CUData& cu, uint32_t absPartIdx)
+{
+    if (cu.isIntra(absPartIdx)) // If it is intra mode, encode intra prediction mode.
+    {
+        codeIntraDirLumaAng(cu, absPartIdx, true);
+        if (cu.m_chromaFormat != X265_CSP_I400)
+        {
+            uint32_t chromaDirMode[NUM_CHROMA_MODE];
+            cu.getAllowedChromaDir(absPartIdx, chromaDirMode);
+
+            codeIntraDirChroma(cu, absPartIdx, chromaDirMode);
+
+            if (cu.m_chromaFormat == X265_CSP_I444 && cu.m_partSize[absPartIdx] != SIZE_2Nx2N)
+            {
+                uint32_t qNumParts = 1 << (cu.m_log2CUSize[absPartIdx] - 1 - LOG2_UNIT_SIZE) * 2;
+                for (uint32_t qIdx = 1; qIdx < 4; ++qIdx)
+                {
+                    absPartIdx += qNumParts;
+                    cu.getAllowedChromaDir(absPartIdx, chromaDirMode);
+                    codeIntraDirChroma(cu, absPartIdx, chromaDirMode);
+                }
+            }
+        }
+    }
+    else // if it is inter mode, encode motion vector and reference index
+        codePUWise(cu, absPartIdx);
+}
+
+/** encode motion information for every PU block */
+void Entropy::codePUWise(const CUData& cu, uint32_t absPartIdx)
+{
+    X265_CHECK(!cu.isIntra(absPartIdx), "intra block not expected\n");
+    uint32_t numPU = cu.getNumPartInter(absPartIdx);
+
+    for (uint32_t puIdx = 0, subPartIdx = absPartIdx; puIdx < numPU; puIdx++, subPartIdx += cu.getPUOffset(puIdx, absPartIdx))
+    {
+        codeMergeFlag(cu, subPartIdx);
+        if (cu.m_mergeFlag[subPartIdx])
+            codeMergeIndex(cu, subPartIdx);
+        else
+        {
+            if (cu.m_slice->isInterB())
+                codeInterDir(cu, subPartIdx);
+
+            uint32_t interDir = cu.m_interDir[subPartIdx];
+            for (uint32_t list = 0; list < 2; list++)
+            {
+                if (interDir & (1 << list))
+                {
+                    X265_CHECK(cu.m_slice->m_numRefIdx[list] > 0, "numRefs should have been > 0\n");
+
+                    codeRefFrmIdxPU(cu, subPartIdx, list);
+                    codeMvd(cu, subPartIdx, list);
+                    codeMVPIdx(cu.m_mvpIdx[list][subPartIdx]);
+                }
+            }
+        }
+    }
+}
+
+/** encode reference frame index for a PU block */
+void Entropy::codeRefFrmIdxPU(const CUData& cu, uint32_t absPartIdx, int list)
+{
+    X265_CHECK(!cu.isIntra(absPartIdx), "intra block not expected\n");
+
+    if (cu.m_slice->m_numRefIdx[list] > 1)
+        codeRefFrmIdx(cu, absPartIdx, list);
+}
+
+void Entropy::codeCoeff(const CUData& cu, uint32_t absPartIdx, bool& bCodeDQP, const uint32_t depthRange[2])
+{
+    if (!cu.isIntra(absPartIdx))
+    {
+        if (!(cu.m_mergeFlag[absPartIdx] && cu.m_partSize[absPartIdx] == SIZE_2Nx2N))
+            codeQtRootCbf(cu.getQtRootCbf(absPartIdx));
+        if (!cu.getQtRootCbf(absPartIdx))
+            return;
+    }
+
+    uint32_t log2CUSize = cu.m_log2CUSize[absPartIdx];
+    encodeTransform(cu, absPartIdx, 0, log2CUSize, bCodeDQP, depthRange);
+}
+
+void Entropy::codeSaoOffset(const SaoCtuParam& ctuParam, int plane)
+{
+    int typeIdx = ctuParam.typeIdx;
+
+    if (plane != 2)
+    {
+        encodeBin(typeIdx >= 0, m_contextState[OFF_SAO_TYPE_IDX_CTX]);
+        if (typeIdx >= 0)
+            encodeBinEP(typeIdx < SAO_BO ? 1 : 0);
+    }
+
+    if (typeIdx >= 0)
+    {
+        enum { OFFSET_THRESH = 1 << X265_MIN(X265_DEPTH - 5, 5) };
+        if (typeIdx == SAO_BO)
+        {
+            for (int i = 0; i < SAO_BO_LEN; i++)
+                codeSaoMaxUvlc(abs(ctuParam.offset[i]), OFFSET_THRESH - 1);
+
+            for (int i = 0; i < SAO_BO_LEN; i++)
+                if (ctuParam.offset[i] != 0)
+                    encodeBinEP(ctuParam.offset[i] < 0);
+
+            encodeBinsEP(ctuParam.bandPos, 5);
+        }
+        else // if (typeIdx < SAO_BO)
+        {
+            codeSaoMaxUvlc(ctuParam.offset[0], OFFSET_THRESH - 1);
+            codeSaoMaxUvlc(ctuParam.offset[1], OFFSET_THRESH - 1);
+            codeSaoMaxUvlc(-ctuParam.offset[2], OFFSET_THRESH - 1);
+            codeSaoMaxUvlc(-ctuParam.offset[3], OFFSET_THRESH - 1);
+            if (plane != 2)
+                encodeBinsEP((uint32_t)(typeIdx), 2);
+        }
+    }
+}
+
+/** initialize context model with respect to QP and initialization value */
+uint8_t sbacInit(int qp, int initValue)
+{
+    qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+
+    int  slope      = (initValue >> 4) * 5 - 45;
+    int  offset     = ((initValue & 15) << 3) - 16;
+    int  initState  =  X265_MIN(X265_MAX(1, (((slope * qp) >> 4) + offset)), 126);
+    uint32_t mpState = (initState >= 64);
+    uint32_t state = ((mpState ? (initState - 64) : (63 - initState)) << 1) + mpState;
+
+    return (uint8_t)state;
+}
+
+static void initBuffer(uint8_t* contextModel, SliceType sliceType, int qp, uint8_t* ctxModel, int size)
+{
+    ctxModel += sliceType * size;
+
+    for (int n = 0; n < size; n++)
+        contextModel[n] = sbacInit(qp, ctxModel[n]);
+}
+
+void Entropy::resetEntropy(const Slice& slice)
+{
+    int  qp              = slice.m_sliceQp;
+    SliceType sliceType  = slice.m_sliceType;
+
+    initBuffer(&m_contextState[OFF_SPLIT_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_SPLIT_FLAG, NUM_SPLIT_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_SKIP_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_SKIP_FLAG, NUM_SKIP_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_MERGE_FLAG_EXT_CTX], sliceType, qp, (uint8_t*)INIT_MERGE_FLAG_EXT, NUM_MERGE_FLAG_EXT_CTX);
+    initBuffer(&m_contextState[OFF_MERGE_IDX_EXT_CTX], sliceType, qp, (uint8_t*)INIT_MERGE_IDX_EXT, NUM_MERGE_IDX_EXT_CTX);
+    initBuffer(&m_contextState[OFF_PART_SIZE_CTX], sliceType, qp, (uint8_t*)INIT_PART_SIZE, NUM_PART_SIZE_CTX);
+    initBuffer(&m_contextState[OFF_PRED_MODE_CTX], sliceType, qp, (uint8_t*)INIT_PRED_MODE, NUM_PRED_MODE_CTX);
+    initBuffer(&m_contextState[OFF_ADI_CTX], sliceType, qp, (uint8_t*)INIT_INTRA_PRED_MODE, NUM_ADI_CTX);
+    initBuffer(&m_contextState[OFF_CHROMA_PRED_CTX], sliceType, qp, (uint8_t*)INIT_CHROMA_PRED_MODE, NUM_CHROMA_PRED_CTX);
+    initBuffer(&m_contextState[OFF_DELTA_QP_CTX], sliceType, qp, (uint8_t*)INIT_DQP, NUM_DELTA_QP_CTX);
+    initBuffer(&m_contextState[OFF_INTER_DIR_CTX], sliceType, qp, (uint8_t*)INIT_INTER_DIR, NUM_INTER_DIR_CTX);
+    initBuffer(&m_contextState[OFF_REF_NO_CTX], sliceType, qp, (uint8_t*)INIT_REF_PIC, NUM_REF_NO_CTX);
+    initBuffer(&m_contextState[OFF_MV_RES_CTX], sliceType, qp, (uint8_t*)INIT_MVD, NUM_MV_RES_CTX);
+    initBuffer(&m_contextState[OFF_QT_CBF_CTX], sliceType, qp, (uint8_t*)INIT_QT_CBF, NUM_QT_CBF_CTX);
+    initBuffer(&m_contextState[OFF_TRANS_SUBDIV_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_TRANS_SUBDIV_FLAG, NUM_TRANS_SUBDIV_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_QT_ROOT_CBF_CTX], sliceType, qp, (uint8_t*)INIT_QT_ROOT_CBF, NUM_QT_ROOT_CBF_CTX);
+    initBuffer(&m_contextState[OFF_SIG_CG_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_SIG_CG_FLAG, 2 * NUM_SIG_CG_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_SIG_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_SIG_FLAG, NUM_SIG_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_CTX_LAST_FLAG_X], sliceType, qp, (uint8_t*)INIT_LAST, NUM_CTX_LAST_FLAG_XY);
+    initBuffer(&m_contextState[OFF_CTX_LAST_FLAG_Y], sliceType, qp, (uint8_t*)INIT_LAST, NUM_CTX_LAST_FLAG_XY);
+    initBuffer(&m_contextState[OFF_ONE_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_ONE_FLAG, NUM_ONE_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_ABS_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_ABS_FLAG, NUM_ABS_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_MVP_IDX_CTX], sliceType, qp, (uint8_t*)INIT_MVP_IDX, NUM_MVP_IDX_CTX);
+    initBuffer(&m_contextState[OFF_SAO_MERGE_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_SAO_MERGE_FLAG, NUM_SAO_MERGE_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_SAO_TYPE_IDX_CTX], sliceType, qp, (uint8_t*)INIT_SAO_TYPE_IDX, NUM_SAO_TYPE_IDX_CTX);
+    initBuffer(&m_contextState[OFF_TRANSFORMSKIP_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_TRANSFORMSKIP_FLAG, 2 * NUM_TRANSFORMSKIP_FLAG_CTX);
+    initBuffer(&m_contextState[OFF_TQUANT_BYPASS_FLAG_CTX], sliceType, qp, (uint8_t*)INIT_CU_TRANSQUANT_BYPASS_FLAG, NUM_TQUANT_BYPASS_FLAG_CTX);
+    // new structure
+
+    start();
+}
+
+/* code explicit wp tables */
+void Entropy::codePredWeightTable(const Slice& slice)
+{
+    const WeightParam *wp;
+    bool            bChroma      = (slice.m_sps->chromaFormatIdc != X265_CSP_I400);
+    bool            bDenomCoded  = false;
+    int             numRefDirs   = slice.m_sliceType == B_SLICE ? 2 : 1;
+    uint32_t        totalSignalledWeightFlags = 0;
+
+    if ((slice.m_sliceType == P_SLICE && slice.m_pps->bUseWeightPred) ||
+        (slice.m_sliceType == B_SLICE && slice.m_pps->bUseWeightedBiPred))
+    {
+        for (int list = 0; list < numRefDirs; list++)
+        {
+            for (int ref = 0; ref < slice.m_numRefIdx[list]; ref++)
+            {
+                wp = slice.m_weightPredTable[list][ref];
+                if (!bDenomCoded)
+                {
+                    WRITE_UVLC(wp[0].log2WeightDenom, "luma_log2_weight_denom");
+
+                    if (bChroma)
+                    {
+                        int deltaDenom = wp[1].log2WeightDenom - wp[0].log2WeightDenom;
+                        WRITE_SVLC(deltaDenom, "delta_chroma_log2_weight_denom");
+                    }
+                    bDenomCoded = true;
+                }
+                WRITE_FLAG(wp[0].bPresentFlag, "luma_weight_lX_flag");
+                totalSignalledWeightFlags += wp[0].bPresentFlag;
+            }
+
+            if (bChroma)
+            {
+                for (int ref = 0; ref < slice.m_numRefIdx[list]; ref++)
+                {
+                    wp = slice.m_weightPredTable[list][ref];
+                    WRITE_FLAG(wp[1].bPresentFlag, "chroma_weight_lX_flag");
+                    totalSignalledWeightFlags += 2 * wp[1].bPresentFlag;
+                }
+            }
+
+            for (int ref = 0; ref < slice.m_numRefIdx[list]; ref++)
+            {
+                wp = slice.m_weightPredTable[list][ref];
+                if (wp[0].bPresentFlag)
+                {
+                    int deltaWeight = (wp[0].inputWeight - (1 << wp[0].log2WeightDenom));
+                    WRITE_SVLC(deltaWeight, "delta_luma_weight_lX");
+                    WRITE_SVLC(wp[0].inputOffset, "luma_offset_lX");
+                }
+
+                if (bChroma)
+                {
+                    if (wp[1].bPresentFlag)
+                    {
+                        for (int plane = 1; plane < 3; plane++)
+                        {
+                            int deltaWeight = (wp[plane].inputWeight - (1 << wp[1].log2WeightDenom));
+                            WRITE_SVLC(deltaWeight, "delta_chroma_weight_lX");
+
+                            int pred = (128 - ((128 * wp[plane].inputWeight) >> (wp[plane].log2WeightDenom)));
+                            int deltaChroma = (wp[plane].inputOffset - pred);
+                            WRITE_SVLC(deltaChroma, "delta_chroma_offset_lX");
+                        }
+                    }
+                }
+            }
+        }
+
+        X265_CHECK(totalSignalledWeightFlags <= 24, "total weights must be <= 24\n");
+    }
+}
+
+void Entropy::writeUnaryMaxSymbol(uint32_t symbol, uint8_t* scmModel, int offset, uint32_t maxSymbol)
+{
+    X265_CHECK(maxSymbol > 0, "maxSymbol too small\n");
+
+    encodeBin(symbol ? 1 : 0, scmModel[0]);
+
+    if (!symbol)
+        return;
+
+    bool bCodeLast = (maxSymbol > symbol);
+
+    while (--symbol)
+        encodeBin(1, scmModel[offset]);
+
+    if (bCodeLast)
+        encodeBin(0, scmModel[offset]);
+}
+
+void Entropy::writeEpExGolomb(uint32_t symbol, uint32_t count)
+{
+    uint32_t bins = 0;
+    int numBins = 0;
+
+    while (symbol >= (uint32_t)(1 << count))
+    {
+        bins = 2 * bins + 1;
+        numBins++;
+        symbol -= 1 << count;
+        count++;
+    }
+
+    bins = 2 * bins + 0;
+    numBins++;
+
+    bins = (bins << count) | symbol;
+    numBins += count;
+
+    X265_CHECK(numBins <= 32, "numBins too large\n");
+    encodeBinsEP(bins, numBins);
+}
+
+/** Coding of coeff_abs_level_minus3 */
+void Entropy::writeCoefRemainExGolomb(uint32_t codeNumber, uint32_t absGoRice)
+{
+    uint32_t length;
+    const uint32_t codeRemain = codeNumber & ((1 << absGoRice) - 1);
+
+    if ((codeNumber >> absGoRice) < COEF_REMAIN_BIN_REDUCTION)
+    {
+        length = codeNumber >> absGoRice;
+
+        X265_CHECK(codeNumber - (length << absGoRice) == (codeNumber & ((1 << absGoRice) - 1)), "codeNumber failure\n");
+        X265_CHECK(length + 1 + absGoRice < 32, "length failure\n");
+        encodeBinsEP((((1 << (length + 1)) - 2) << absGoRice) + codeRemain, length + 1 + absGoRice);
+    }
+    else
+    {
+        length = 0;
+        codeNumber = (codeNumber >> absGoRice) - COEF_REMAIN_BIN_REDUCTION;
+        {
+            unsigned long idx;
+            CLZ(idx, codeNumber + 1);
+            length = idx;
+            X265_CHECK((codeNumber != 0) || (length == 0), "length check failure\n");
+            codeNumber -= (1 << idx) - 1;
+        }
+        codeNumber = (codeNumber << absGoRice) + codeRemain;
+
+        encodeBinsEP((1 << (COEF_REMAIN_BIN_REDUCTION + length + 1)) - 2, COEF_REMAIN_BIN_REDUCTION + length + 1);
+        encodeBinsEP(codeNumber, length + absGoRice);
+    }
+}
+
+// SBAC RD
+void Entropy::loadIntraDirModeLuma(const Entropy& src)
+{
+    X265_CHECK(src.m_valid, "invalid copy source context\n");
+    m_fracBits = src.m_fracBits;
+    m_contextState[OFF_ADI_CTX] = src.m_contextState[OFF_ADI_CTX];
+}
+
+void Entropy::copyFrom(const Entropy& src)
+{
+    X265_CHECK(src.m_valid, "invalid copy source context\n");
+
+    copyState(src);
+
+    memcpy(m_contextState, src.m_contextState, MAX_OFF_CTX_MOD * sizeof(uint8_t));
+    markValid();
+}
+
+void Entropy::codePartSize(const CUData& cu, uint32_t absPartIdx, uint32_t depth)
+{
+    PartSize partSize = (PartSize)cu.m_partSize[absPartIdx];
+
+    if (cu.isIntra(absPartIdx))
+    {
+        if (depth == g_maxCUDepth)
+            encodeBin(partSize == SIZE_2Nx2N ? 1 : 0, m_contextState[OFF_PART_SIZE_CTX]);
+        return;
+    }
+
+    switch (partSize)
+    {
+    case SIZE_2Nx2N:
+        encodeBin(1, m_contextState[OFF_PART_SIZE_CTX]);
+        break;
+
+    case SIZE_2NxN:
+    case SIZE_2NxnU:
+    case SIZE_2NxnD:
+        encodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 0]);
+        encodeBin(1, m_contextState[OFF_PART_SIZE_CTX + 1]);
+        if (cu.m_slice->m_sps->maxAMPDepth > depth)
+        {
+            encodeBin((partSize == SIZE_2NxN) ? 1 : 0, m_contextState[OFF_PART_SIZE_CTX + 3]);
+            if (partSize != SIZE_2NxN)
+                encodeBinEP((partSize == SIZE_2NxnU ? 0 : 1));
+        }
+        break;
+
+    case SIZE_Nx2N:
+    case SIZE_nLx2N:
+    case SIZE_nRx2N:
+        encodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 0]);
+        encodeBin(0, m_contextState[OFF_PART_SIZE_CTX + 1]);
+        if (depth == g_maxCUDepth && !(cu.m_log2CUSize[absPartIdx] == 3))
+            encodeBin(1, m_contextState[OFF_PART_SIZE_CTX + 2]);
+        if (cu.m_slice->m_sps->maxAMPDepth > depth)
+        {
+            encodeBin((partSize == SIZE_Nx2N) ? 1 : 0, m_contextState[OFF_PART_SIZE_CTX + 3]);
+            if (partSize != SIZE_Nx2N)
+                encodeBinEP((partSize == SIZE_nLx2N ? 0 : 1));
+        }
+        break;
+    default:
+        X265_CHECK(0, "invalid CU partition\n");
+        break;
+    }
+}
+
+void Entropy::codeMergeIndex(const CUData& cu, uint32_t absPartIdx)
+{
+    uint32_t numCand = cu.m_slice->m_maxNumMergeCand;
+
+    if (numCand > 1)
+    {
+        uint32_t unaryIdx = cu.m_mvpIdx[0][absPartIdx]; // merge candidate index was stored in L0 MVP idx 
+        encodeBin((unaryIdx != 0), m_contextState[OFF_MERGE_IDX_EXT_CTX]);
+
+        X265_CHECK(unaryIdx < numCand, "unaryIdx out of range\n");
+
+        if (unaryIdx != 0)
+        {
+            uint32_t mask = (1 << unaryIdx) - 2;
+            mask >>= (unaryIdx == numCand - 1) ? 1 : 0;
+            encodeBinsEP(mask, unaryIdx - (unaryIdx == numCand - 1));
+        }
+    }
+}
+
+void Entropy::codeIntraDirLumaAng(const CUData& cu, uint32_t absPartIdx, bool isMultiple)
+{
+    uint32_t dir[4], j;
+    uint32_t preds[4][3];
+    int predIdx[4];
+    uint32_t partNum = isMultiple && cu.m_partSize[absPartIdx] != SIZE_2Nx2N ? 4 : 1;
+    uint32_t qNumParts = 1 << (cu.m_log2CUSize[absPartIdx] - 1 - LOG2_UNIT_SIZE) * 2;
+
+    for (j = 0; j < partNum; j++, absPartIdx += qNumParts)
+    {
+        dir[j] = cu.m_lumaIntraDir[absPartIdx];
+        cu.getIntraDirLumaPredictor(absPartIdx, preds[j]);
+        predIdx[j] = -1;
+        for (uint32_t i = 0; i < 3; i++)
+            if (dir[j] == preds[j][i])
+                predIdx[j] = i;
+
+        encodeBin((predIdx[j] != -1) ? 1 : 0, m_contextState[OFF_ADI_CTX]);
+    }
+
+    for (j = 0; j < partNum; j++)
+    {
+        if (predIdx[j] != -1)
+        {
+            X265_CHECK((predIdx[j] >= 0) && (predIdx[j] <= 2), "predIdx out of range\n");
+            // NOTE: Mapping
+            //       0 = 0
+            //       1 = 10
+            //       2 = 11
+            int nonzero = (!!predIdx[j]);
+            encodeBinsEP(predIdx[j] + nonzero, 1 + nonzero);
+        }
+        else
+        {
+            if (preds[j][0] > preds[j][1])
+                std::swap(preds[j][0], preds[j][1]);
+
+            if (preds[j][0] > preds[j][2])
+                std::swap(preds[j][0], preds[j][2]);
+
+            if (preds[j][1] > preds[j][2])
+                std::swap(preds[j][1], preds[j][2]);
+
+            dir[j] += (dir[j] > preds[j][2]) ? -1 : 0;
+            dir[j] += (dir[j] > preds[j][1]) ? -1 : 0;
+            dir[j] += (dir[j] > preds[j][0]) ? -1 : 0;
+
+            encodeBinsEP(dir[j], 5);
+        }
+    }
+}
+
+void Entropy::codeIntraDirChroma(const CUData& cu, uint32_t absPartIdx, uint32_t *chromaDirMode)
+{
+    uint32_t intraDirChroma = cu.m_chromaIntraDir[absPartIdx];
+
+    if (intraDirChroma == DM_CHROMA_IDX)
+        encodeBin(0, m_contextState[OFF_CHROMA_PRED_CTX]);
+    else
+    {
+        for (int i = 0; i < NUM_CHROMA_MODE - 1; i++)
+        {
+            if (intraDirChroma == chromaDirMode[i])
+            {
+                intraDirChroma = i;
+                break;
+            }
+        }
+
+        encodeBin(1, m_contextState[OFF_CHROMA_PRED_CTX]);
+        encodeBinsEP(intraDirChroma, 2);
+    }
+}
+
+void Entropy::codeInterDir(const CUData& cu, uint32_t absPartIdx)
+{
+    const uint32_t interDir = cu.m_interDir[absPartIdx] - 1;
+    const uint32_t ctx      = cu.m_cuDepth[absPartIdx]; // the context of the inter dir is the depth of the CU
+
+    if (cu.m_partSize[absPartIdx] == SIZE_2Nx2N || cu.m_log2CUSize[absPartIdx] != 3)
+        encodeBin(interDir == 2 ? 1 : 0, m_contextState[OFF_INTER_DIR_CTX + ctx]);
+    if (interDir < 2)
+        encodeBin(interDir, m_contextState[OFF_INTER_DIR_CTX + 4]);
+}
+
+void Entropy::codeRefFrmIdx(const CUData& cu, uint32_t absPartIdx, int list)
+{
+    uint32_t refFrame = cu.m_refIdx[list][absPartIdx];
+
+    encodeBin(refFrame > 0, m_contextState[OFF_REF_NO_CTX]);
+
+    if (refFrame > 0)
+    {
+        uint32_t refNum = cu.m_slice->m_numRefIdx[list] - 2;
+        if (refNum == 0)
+            return;
+
+        refFrame--;
+        encodeBin(refFrame > 0, m_contextState[OFF_REF_NO_CTX + 1]);
+        if (refFrame > 0)
+        {
+            uint32_t mask = (1 << refFrame) - 2;
+            mask >>= (refFrame == refNum) ? 1 : 0;
+            encodeBinsEP(mask, refFrame - (refFrame == refNum));
+        }
+    }
+}
+
+void Entropy::codeMvd(const CUData& cu, uint32_t absPartIdx, int list)
+{
+    const MV& mvd = cu.m_mvd[list][absPartIdx];
+    const int hor = mvd.x;
+    const int ver = mvd.y;
+
+    encodeBin(hor != 0 ? 1 : 0, m_contextState[OFF_MV_RES_CTX]);
+    encodeBin(ver != 0 ? 1 : 0, m_contextState[OFF_MV_RES_CTX]);
+
+    const bool bHorAbsGr0 = hor != 0;
+    const bool bVerAbsGr0 = ver != 0;
+    const uint32_t horAbs   = 0 > hor ? -hor : hor;
+    const uint32_t verAbs   = 0 > ver ? -ver : ver;
+
+    if (bHorAbsGr0)
+        encodeBin(horAbs > 1 ? 1 : 0, m_contextState[OFF_MV_RES_CTX + 1]);
+
+    if (bVerAbsGr0)
+        encodeBin(verAbs > 1 ? 1 : 0, m_contextState[OFF_MV_RES_CTX + 1]);
+
+    if (bHorAbsGr0)
+    {
+        if (horAbs > 1)
+            writeEpExGolomb(horAbs - 2, 1);
+
+        encodeBinEP(0 > hor ? 1 : 0);
+    }
+
+    if (bVerAbsGr0)
+    {
+        if (verAbs > 1)
+            writeEpExGolomb(verAbs - 2, 1);
+
+        encodeBinEP(0 > ver ? 1 : 0);
+    }
+}
+
+void Entropy::codeDeltaQP(const CUData& cu, uint32_t absPartIdx)
+{
+    int dqp = cu.m_qp[absPartIdx] - cu.getRefQP(absPartIdx);
+
+    int qpBdOffsetY = QP_BD_OFFSET;
+
+    dqp = (dqp + 78 + qpBdOffsetY + (qpBdOffsetY / 2)) % (52 + qpBdOffsetY) - 26 - (qpBdOffsetY / 2);
+
+    uint32_t absDQp = (uint32_t)((dqp > 0) ? dqp  : (-dqp));
+    uint32_t TUValue = X265_MIN((int)absDQp, CU_DQP_TU_CMAX);
+    writeUnaryMaxSymbol(TUValue, &m_contextState[OFF_DELTA_QP_CTX], 1, CU_DQP_TU_CMAX);
+    if (absDQp >= CU_DQP_TU_CMAX)
+        writeEpExGolomb(absDQp - CU_DQP_TU_CMAX, CU_DQP_EG_k);
+
+    if (absDQp > 0)
+    {
+        uint32_t sign = (dqp > 0 ? 0 : 1);
+        encodeBinEP(sign);
+    }
+}
+
+void Entropy::codeQtCbfChroma(const CUData& cu, uint32_t absPartIdx, TextType ttype, uint32_t tuDepth, bool lowestLevel)
+{
+    uint32_t ctx = tuDepth + 2;
+
+    uint32_t log2TrSize = cu.m_log2CUSize[absPartIdx] - tuDepth;
+    bool canQuadSplit       = (log2TrSize - cu.m_hChromaShift > 2);
+    uint32_t lowestTUDepth  = tuDepth + ((!lowestLevel && !canQuadSplit) ? 1 : 0); // unsplittable TUs inherit their parent's CBF
+
+    if (cu.m_chromaFormat == X265_CSP_I422 && (lowestLevel || !canQuadSplit)) // if sub-TUs are present
+    {
+        uint32_t subTUDepth        = lowestTUDepth + 1;   // if this is the lowest level of the TU-tree, the sub-TUs are directly below.
+                                                          // Otherwise, this must be the level above the lowest level (as specified above)
+        uint32_t tuNumParts = 1 << ((log2TrSize - LOG2_UNIT_SIZE) * 2 - 1);
+
+        encodeBin(cu.getCbf(absPartIdx             , ttype, subTUDepth), m_contextState[OFF_QT_CBF_CTX + ctx]);
+        encodeBin(cu.getCbf(absPartIdx + tuNumParts, ttype, subTUDepth), m_contextState[OFF_QT_CBF_CTX + ctx]);
+    }
+    else
+        encodeBin(cu.getCbf(absPartIdx, ttype, lowestTUDepth), m_contextState[OFF_QT_CBF_CTX + ctx]);
+}
+
+#if CHECKED_BUILD || _DEBUG
+uint32_t costCoeffRemain_c0(uint16_t *absCoeff, int numNonZero)
+{
+    uint32_t goRiceParam = 0;
+    int firstCoeff2 = 1;
+    uint32_t baseLevelN = 0x5555AAAA; // 2-bits encode format baseLevel
+
+    uint32_t sum = 0;
+    int idx = 0;
+    do
+    {
+        int baseLevel = (baseLevelN & 3) | firstCoeff2;
+        X265_CHECK(baseLevel == ((idx < C1FLAG_NUMBER) ? (2 + firstCoeff2) : 1), "baseLevel check failurr\n");
+        baseLevelN >>= 2;
+        int codeNumber = absCoeff[idx] - baseLevel;
+
+        if (codeNumber >= 0)
+        {
+            //writeCoefRemainExGolomb(absCoeff[idx] - baseLevel, goRiceParam);
+            uint32_t length = 0;
+
+            codeNumber = ((uint32_t)codeNumber >> goRiceParam) - COEF_REMAIN_BIN_REDUCTION;
+            if (codeNumber >= 0)
+            {
+                {
+                    unsigned long cidx;
+                    CLZ(cidx, codeNumber + 1);
+                    length = cidx;
+                }
+                X265_CHECK((codeNumber != 0) || (length == 0), "length check failure\n");
+
+                codeNumber = (length + length);
+            }
+            sum += (COEF_REMAIN_BIN_REDUCTION + 1 + goRiceParam + codeNumber);
+
+            if (absCoeff[idx] > (COEF_REMAIN_BIN_REDUCTION << goRiceParam))
+                goRiceParam = (goRiceParam + 1) - (goRiceParam >> 2);
+            X265_CHECK(goRiceParam <= 4, "goRiceParam check failure\n");
+        }
+        if (absCoeff[idx] >= 2)
+            firstCoeff2 = 0;
+        idx++;
+    }
+    while(idx < numNonZero);
+
+    return sum;
+}
+#endif // debug only code
+
+void Entropy::codeCoeffNxN(const CUData& cu, const coeff_t* coeff, uint32_t absPartIdx, uint32_t log2TrSize, TextType ttype)
+{
+    uint32_t trSize = 1 << log2TrSize;
+    uint32_t tqBypass = cu.m_tqBypass[absPartIdx];
+    // compute number of significant coefficients
+    uint32_t numSig = primitives.cu[log2TrSize - 2].count_nonzero(coeff);
+    X265_CHECK(numSig > 0, "cbf check fail\n");
+    bool bHideFirstSign = cu.m_slice->m_pps->bSignHideEnabled & !tqBypass;
+
+    if (log2TrSize <= MAX_LOG2_TS_SIZE && !tqBypass && cu.m_slice->m_pps->bTransformSkipEnabled)
+        codeTransformSkipFlags(cu.m_transformSkip[ttype][absPartIdx], ttype);
+
+    bool bIsLuma = ttype == TEXT_LUMA;
+
+    // select scans
+    TUEntropyCodingParameters codingParameters;
+    cu.getTUEntropyCodingParameters(codingParameters, absPartIdx, log2TrSize, bIsLuma);
+
+    uint8_t coeffNum[MLS_GRP_NUM];      // value range[0, 16]
+    uint16_t coeffSign[MLS_GRP_NUM];    // bit mask map for non-zero coeff sign
+    uint16_t coeffFlag[MLS_GRP_NUM];    // bit mask map for non-zero coeff
+
+    //----- encode significance map -----
+
+    // Find position of last coefficient
+    int scanPosLast = 0;
+    uint32_t posLast;
+    uint64_t sigCoeffGroupFlag64 = 0;
+    //const uint32_t maskPosXY = ((uint32_t)~0 >> (31 - log2TrSize + MLS_CG_LOG2_SIZE)) >> 1;
+    X265_CHECK((uint32_t)((1 << (log2TrSize - MLS_CG_LOG2_SIZE)) - 1) == (((uint32_t)~0 >> (31 - log2TrSize + MLS_CG_LOG2_SIZE)) >> 1), "maskPosXY fault\n");
+
+    scanPosLast = primitives.scanPosLast(codingParameters.scan, coeff, coeffSign, coeffFlag, coeffNum, numSig, g_scan4x4[codingParameters.scanType], trSize);
+    posLast = codingParameters.scan[scanPosLast];
+
+    const int lastScanSet = scanPosLast >> MLS_CG_SIZE;
+
+    // Calculate CG block non-zero mask, the latest CG always flag as non-zero in CG scan loop
+    for(int idx = 0; idx < lastScanSet; idx++)
+    {
+        const uint8_t subSet = (uint8_t)codingParameters.scanCG[idx];
+        const uint8_t nonZero = (coeffNum[idx] != 0);
+        sigCoeffGroupFlag64 |= ((nonZero ? (uint64_t)1 : 0) << subSet);
+    }
+
+
+    // Code position of last coefficient
+    {
+        // The last position is composed of a prefix and suffix.
+        // The prefix is context coded truncated unary bins. The suffix is bypass coded fixed length bins.
+        // The bypass coded bins for both the x and y components are grouped together.
+        uint32_t packedSuffixBits = 0, packedSuffixLen = 0;
+        uint32_t pos[2] = { (posLast & (trSize - 1)), (posLast >> log2TrSize) };
+        // swap
+        if (codingParameters.scanType == SCAN_VER)
+            std::swap(pos[0], pos[1]);
+
+        int ctxIdx = bIsLuma ? (3 * (log2TrSize - 2) + (log2TrSize == 5)) : NUM_CTX_LAST_FLAG_XY_LUMA;
+        int ctxShift = (bIsLuma ? (log2TrSize > 2) : (log2TrSize - 2));
+        uint32_t maxGroupIdx = (log2TrSize << 1) - 1;
+        X265_CHECK(((log2TrSize - 1) >> 2) == (uint32_t)(log2TrSize == 5), "ctxIdx check failure\n");
+        X265_CHECK((uint32_t)ctxShift == (bIsLuma ? ((log2TrSize + 1) >> 2) : log2TrSize - 2), "ctxShift check failure\n");
+
+        uint8_t *ctx = &m_contextState[OFF_CTX_LAST_FLAG_X];
+        for (uint32_t i = 0; i < 2; i++, ctxIdx += NUM_CTX_LAST_FLAG_XY)
+        {
+            uint32_t temp = g_lastCoeffTable[pos[i]];
+            uint32_t prefixOnes = temp & 15;
+            uint32_t suffixLen = temp >> 4;
+
+            for (uint32_t ctxLast = 0; ctxLast < prefixOnes; ctxLast++)
+                encodeBin(1, *(ctx + ctxIdx + (ctxLast >> ctxShift)));
+
+            if (prefixOnes < maxGroupIdx)
+                encodeBin(0, *(ctx + ctxIdx + (prefixOnes >> ctxShift)));
+
+            packedSuffixBits <<= suffixLen;
+            packedSuffixBits |= (pos[i] & ((1 << suffixLen) - 1));
+            packedSuffixLen += suffixLen;
+        }
+
+        encodeBinsEP(packedSuffixBits, packedSuffixLen);
+    }
+
+    // code significance flag
+    uint8_t * const baseCoeffGroupCtx = &m_contextState[OFF_SIG_CG_FLAG_CTX + (bIsLuma ? 0 : NUM_SIG_CG_FLAG_CTX)];
+    uint8_t * const baseCtx = bIsLuma ? &m_contextState[OFF_SIG_FLAG_CTX] : &m_contextState[OFF_SIG_FLAG_CTX + NUM_SIG_FLAG_CTX_LUMA];
+    uint32_t c1 = 1;
+    int scanPosSigOff = scanPosLast - (lastScanSet << MLS_CG_SIZE) - 1;
+    ALIGN_VAR_32(uint16_t, absCoeff[(1 << MLS_CG_SIZE)]);
+    uint32_t numNonZero = 1;
+    unsigned long lastNZPosInCG;
+    unsigned long firstNZPosInCG;
+
+    absCoeff[0] = (uint16_t)abs(coeff[posLast]);
+
+    for (int subSet = lastScanSet; subSet >= 0; subSet--)
+    {
+        const uint32_t subCoeffFlag = coeffFlag[subSet];
+        uint32_t scanFlagMask = subCoeffFlag;
+        int subPosBase = subSet << MLS_CG_SIZE;
+        
+        if (subSet == lastScanSet)
+        {
+            X265_CHECK(scanPosSigOff == scanPosLast - (lastScanSet << MLS_CG_SIZE) - 1, "scanPos mistake\n");
+            scanFlagMask >>= 1;
+        }
+
+        // encode significant_coeffgroup_flag
+        const int cgBlkPos = codingParameters.scanCG[subSet];
+        const int cgPosY   = (uint32_t)cgBlkPos >> (log2TrSize - MLS_CG_LOG2_SIZE);
+        const int cgPosX   = cgBlkPos & ((1 << (log2TrSize - MLS_CG_LOG2_SIZE)) - 1);
+        const uint64_t cgBlkPosMask = ((uint64_t)1 << cgBlkPos);
+
+        if (subSet == lastScanSet || !subSet)
+            sigCoeffGroupFlag64 |= cgBlkPosMask;
+        else
+        {
+            uint32_t sigCoeffGroup = ((sigCoeffGroupFlag64 & cgBlkPosMask) != 0);
+            uint32_t ctxSig = Quant::getSigCoeffGroupCtxInc(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, (trSize >> MLS_CG_LOG2_SIZE));
+            encodeBin(sigCoeffGroup, baseCoeffGroupCtx[ctxSig]);
+        }
+
+        // encode significant_coeff_flag
+        if ((scanPosSigOff >= 0) && (sigCoeffGroupFlag64 & cgBlkPosMask))
+        {
+            X265_CHECK((log2TrSize != 2) || (log2TrSize == 2 && subSet == 0), "log2TrSize and subSet mistake!\n");
+            const int patternSigCtx = Quant::calcPatternSigCtx(sigCoeffGroupFlag64, cgPosX, cgPosY, cgBlkPos, (trSize >> MLS_CG_LOG2_SIZE));
+            const uint32_t posOffset = (bIsLuma && subSet) ? 3 : 0;
+
+            // NOTE: [patternSigCtx][posXinSubset][posYinSubset]
+            static const uint8_t table_cnt[5][SCAN_SET_SIZE] =
+            {
+                // patternSigCtx = 0
+                {
+                    2, 1, 1, 0,
+                    1, 1, 0, 0,
+                    1, 0, 0, 0,
+                    0, 0, 0, 0,
+                },
+                // patternSigCtx = 1
+                {
+                    2, 2, 2, 2,
+                    1, 1, 1, 1,
+                    0, 0, 0, 0,
+                    0, 0, 0, 0,
+                },
+                // patternSigCtx = 2
+                {
+                    2, 1, 0, 0,
+                    2, 1, 0, 0,
+                    2, 1, 0, 0,
+                    2, 1, 0, 0,
+                },
+                // patternSigCtx = 3
+                {
+                    2, 2, 2, 2,
+                    2, 2, 2, 2,
+                    2, 2, 2, 2,
+                    2, 2, 2, 2,
+                },
+                // 4x4
+                {
+                    0, 1, 4, 5,
+                    2, 3, 4, 5,
+                    6, 6, 8, 8,
+                    7, 7, 8, 8
+                }
+            };
+
+            const int offset = codingParameters.firstSignificanceMapContext;
+            const uint32_t blkPosBase  = codingParameters.scan[subPosBase];
+
+            X265_CHECK(scanPosSigOff >= 0, "scanPosSigOff check failure\n");
+            if (m_bitIf)
+            {
+                ALIGN_VAR_32(uint16_t, tmpCoeff[SCAN_SET_SIZE]);
+
+                // TODO: accelerate by PABSW
+                for (int i = 0; i < MLS_CG_SIZE; i++)
+                {
+                    tmpCoeff[i * MLS_CG_SIZE + 0] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 0]);
+                    tmpCoeff[i * MLS_CG_SIZE + 1] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 1]);
+                    tmpCoeff[i * MLS_CG_SIZE + 2] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 2]);
+                    tmpCoeff[i * MLS_CG_SIZE + 3] = (uint16_t)abs(coeff[blkPosBase + i * trSize + 3]);
+                }
+
+                if (log2TrSize == 2)
+                {
+                    do
+                    {
+                        uint32_t blkPos, sig, ctxSig;
+                        blkPos = g_scan4x4[codingParameters.scanType][scanPosSigOff];
+                        sig     = scanFlagMask & 1;
+                        scanFlagMask >>= 1;
+                        X265_CHECK((uint32_t)(tmpCoeff[blkPos] != 0) == sig, "sign bit mistake\n");
+                        {
+                            ctxSig = table_cnt[4][blkPos];
+                            X265_CHECK(ctxSig == Quant::getSigCtxInc(patternSigCtx, log2TrSize, trSize, blkPos, bIsLuma, codingParameters.firstSignificanceMapContext), "sigCtx mistake!\n");;
+                            encodeBin(sig, baseCtx[ctxSig]);
+                        }
+                        absCoeff[numNonZero] = tmpCoeff[blkPos];
+                        numNonZero += sig;
+                        scanPosSigOff--;
+                    }
+                    while(scanPosSigOff >= 0);
+                }
+                else
+                {
+                    X265_CHECK((log2TrSize > 2), "log2TrSize must be more than 2 in this path!\n");
+
+                    const uint8_t *tabSigCtx = table_cnt[(uint32_t)patternSigCtx];
+                    do
+                    {
+                        uint32_t blkPos, sig, ctxSig;
+                        blkPos = g_scan4x4[codingParameters.scanType][scanPosSigOff];
+                        const uint32_t posZeroMask = (subPosBase + scanPosSigOff) ? ~0 : 0;
+                        sig     = scanFlagMask & 1;
+                        scanFlagMask >>= 1;
+                        X265_CHECK((uint32_t)(tmpCoeff[blkPos] != 0) == sig, "sign bit mistake\n");
+                        if (scanPosSigOff != 0 || subSet == 0 || numNonZero)
+                        {
+                            const uint32_t cnt = tabSigCtx[blkPos] + offset;
+                            ctxSig = (cnt + posOffset) & posZeroMask;
+
+                            X265_CHECK(ctxSig == Quant::getSigCtxInc(patternSigCtx, log2TrSize, trSize, codingParameters.scan[subPosBase + scanPosSigOff], bIsLuma, codingParameters.firstSignificanceMapContext), "sigCtx mistake!\n");;
+                            encodeBin(sig, baseCtx[ctxSig]);
+                        }
+                        absCoeff[numNonZero] = tmpCoeff[blkPos];
+                        numNonZero += sig;
+                        scanPosSigOff--;
+                    }
+                    while(scanPosSigOff >= 0);
+                }
+            }
+            else // fast RD path
+            {
+                // maximum g_entropyBits are 18-bits and maximum of count are 16, so intermedia of sum are 22-bits
+                const uint8_t *tabSigCtx = table_cnt[(log2TrSize == 2) ? 4 : (uint32_t)patternSigCtx];
+                uint32_t sum = primitives.costCoeffNxN(g_scan4x4[codingParameters.scanType], &coeff[blkPosBase], (intptr_t)trSize, absCoeff + numNonZero, tabSigCtx, scanFlagMask, baseCtx, offset + posOffset, scanPosSigOff, subPosBase);
+
+#if CHECKED_BUILD || _DEBUG
+                numNonZero = coeffNum[subSet];
+#endif
+                // update RD cost
+                m_fracBits += sum;
+            } // end of fast RD path -- !m_bitIf
+        }
+        X265_CHECK(coeffNum[subSet] == numNonZero, "coefNum mistake\n");
+
+        uint32_t coeffSigns = coeffSign[subSet];
+        numNonZero = coeffNum[subSet];
+        if (numNonZero > 0)
+        {
+            uint32_t idx;
+            X265_CHECK(subCoeffFlag > 0, "subCoeffFlag is zero\n");
+            CLZ(lastNZPosInCG, subCoeffFlag);
+            CTZ(firstNZPosInCG, subCoeffFlag);
+
+            bool signHidden = (lastNZPosInCG - firstNZPosInCG >= SBH_THRESHOLD);
+            const uint8_t ctxSet = (((subSet > 0) + bIsLuma) & 2) + !(c1 & 3);
+            X265_CHECK((((subSet > 0) & bIsLuma) ? 2 : 0) + !(c1 & 3) == ctxSet, "ctxSet check failure\n");
+
+            c1 = 1;
+            uint8_t *baseCtxMod = &m_contextState[(bIsLuma ? 0 : NUM_ONE_FLAG_CTX_LUMA) + OFF_ONE_FLAG_CTX + 4 * ctxSet];
+
+            uint32_t numC1Flag = X265_MIN(numNonZero, C1FLAG_NUMBER);
+            X265_CHECK(numC1Flag > 0, "numC1Flag check failure\n");
+
+            if (!m_bitIf)
+            {
+                uint32_t sum = primitives.costC1C2Flag(absCoeff, numC1Flag, baseCtxMod, (bIsLuma ? 0 : NUM_ABS_FLAG_CTX_LUMA - NUM_ONE_FLAG_CTX_LUMA) + (OFF_ABS_FLAG_CTX - OFF_ONE_FLAG_CTX) - 3 * ctxSet);
+                uint32_t firstC2Idx = (sum >> 28);
+                c1 = ((sum >> 26) & 3);
+                m_fracBits += sum & 0x00FFFFFF;
+
+                const int hiddenShift = (bHideFirstSign & signHidden) ? -1 : 0;
+                //encodeBinsEP((coeffSigns >> hiddenShift), numNonZero - hiddenShift);
+                m_fracBits += (numNonZero + hiddenShift) << 15;
+
+                if (numNonZero > firstC2Idx)
+                {
+                    sum = primitives.costCoeffRemain(absCoeff, numNonZero, firstC2Idx);
+                    X265_CHECK(sum == costCoeffRemain_c0(absCoeff, numNonZero), "costCoeffRemain check failure\n");
+                    m_fracBits += ((uint64_t)sum << 15);
+                }
+            }
+            // Standard path
+            else
+            {
+                uint32_t firstC2Idx = 8;
+                uint32_t firstC2Flag = 2;
+                uint32_t c1Next = 0xFFFFFFFE;
+
+                idx = 0;
+                do
+                {
+                    const uint32_t symbol1 = absCoeff[idx] > 1;
+                    const uint32_t symbol2 = absCoeff[idx] > 2;
+                    encodeBin(symbol1, baseCtxMod[c1]);
+
+                    if (symbol1)
+                        c1Next = 0;
+
+                    firstC2Flag = (symbol1 + firstC2Flag == 3) ? symbol2 : firstC2Flag;
+                    firstC2Idx  = (symbol1 + firstC2Idx == 9) ? idx : firstC2Idx;
+
+                    c1 = (c1Next & 3);
+                    c1Next >>= 2;
+                    X265_CHECK(c1 <= 3, "c1 check failure\n");
+                    idx++;
+                }
+                while(idx < numC1Flag);
+
+                if (!c1)
+                {
+                    baseCtxMod = &m_contextState[(bIsLuma ? 0 : NUM_ABS_FLAG_CTX_LUMA) + OFF_ABS_FLAG_CTX + ctxSet];
+
+                    X265_CHECK((firstC2Flag <= 1), "firstC2FlagIdx check failure\n");
+                    encodeBin(firstC2Flag, baseCtxMod[0]);
+                }
+
+                const int hiddenShift = (bHideFirstSign && signHidden) ? 1 : 0;
+                encodeBinsEP((coeffSigns >> hiddenShift), numNonZero - hiddenShift);
+
+                if (!c1 || numNonZero > C1FLAG_NUMBER)
+                {
+                    // Standard path
+                    uint32_t goRiceParam = 0;
+                    int baseLevel = 3;
+                    uint32_t threshold = COEF_REMAIN_BIN_REDUCTION;
+#if CHECKED_BUILD || _DEBUG
+                    int firstCoeff2 = 1;
+#endif
+                    idx = firstC2Idx;
+                    do
+                    {
+                        if (idx >= C1FLAG_NUMBER)
+                            baseLevel = 1;
+                        // TODO: fast algorithm maybe broken this check logic
+                        X265_CHECK(baseLevel == ((idx < C1FLAG_NUMBER) ? (2 + firstCoeff2) : 1), "baseLevel check failurr\n");
+
+                        if (absCoeff[idx] >= baseLevel)
+                        {
+                            writeCoefRemainExGolomb(absCoeff[idx] - baseLevel, goRiceParam);
+                            X265_CHECK(threshold == (uint32_t)(COEF_REMAIN_BIN_REDUCTION << goRiceParam), "COEF_REMAIN_BIN_REDUCTION check failure\n");
+                            const int adjust = (absCoeff[idx] > threshold) & (goRiceParam <= 3);
+                            goRiceParam += adjust;
+                            threshold += (adjust) ? threshold : 0;
+                            X265_CHECK(goRiceParam <= 4, "goRiceParam check failure\n");
+                        }
+#if CHECKED_BUILD || _DEBUG
+                        firstCoeff2 = 0;
+#endif
+                        baseLevel = 2;
+                        idx++;
+                    }
+                    while(idx < numNonZero);
+                }
+            } // end of !bitIf
+        } // end of (numNonZero > 0)
+
+        // Initialize value for next loop
+        numNonZero = 0;
+        scanPosSigOff = (1 << MLS_CG_SIZE) - 1;
+    }
+}
+
+void Entropy::codeSaoMaxUvlc(uint32_t code, uint32_t maxSymbol)
+{
+    X265_CHECK(maxSymbol > 0, "maxSymbol too small\n");
+
+    uint32_t isCodeNonZero = !!code;
+
+    encodeBinEP(isCodeNonZero);
+    if (isCodeNonZero)
+    {
+        uint32_t isCodeLast = (maxSymbol > code);
+        uint32_t mask = (1 << (code - 1)) - 1;
+        uint32_t len = code - 1 + isCodeLast;
+        mask <<= isCodeLast;
+
+        encodeBinsEP(mask, len);
+    }
+}
+
+/* estimate bit cost for CBP, significant map and significant coefficients */
+void Entropy::estBit(EstBitsSbac& estBitsSbac, uint32_t log2TrSize, bool bIsLuma) const
+{
+    estCBFBit(estBitsSbac);
+
+    estSignificantCoeffGroupMapBit(estBitsSbac, bIsLuma);
+
+    // encode significance map
+    estSignificantMapBit(estBitsSbac, log2TrSize, bIsLuma);
+
+    // encode significant coefficients
+    estSignificantCoefficientsBit(estBitsSbac, bIsLuma);
+}
+
+/* estimate bit cost for each CBP bit */
+void Entropy::estCBFBit(EstBitsSbac& estBitsSbac) const
+{
+    const uint8_t *ctx = &m_contextState[OFF_QT_CBF_CTX];
+
+    for (uint32_t ctxInc = 0; ctxInc < NUM_QT_CBF_CTX; ctxInc++)
+    {
+        estBitsSbac.blockCbpBits[ctxInc][0] = sbacGetEntropyBits(ctx[ctxInc], 0);
+        estBitsSbac.blockCbpBits[ctxInc][1] = sbacGetEntropyBits(ctx[ctxInc], 1);
+    }
+
+    ctx = &m_contextState[OFF_QT_ROOT_CBF_CTX];
+
+    estBitsSbac.blockRootCbpBits[0] = sbacGetEntropyBits(ctx[0], 0);
+    estBitsSbac.blockRootCbpBits[1] = sbacGetEntropyBits(ctx[0], 1);
+}
+
+/* estimate SAMBAC bit cost for significant coefficient group map */
+void Entropy::estSignificantCoeffGroupMapBit(EstBitsSbac& estBitsSbac, bool bIsLuma) const
+{
+    int firstCtx = 0, numCtx = NUM_SIG_CG_FLAG_CTX;
+
+    for (int ctxIdx = firstCtx; ctxIdx < firstCtx + numCtx; ctxIdx++)
+        for (uint32_t bin = 0; bin < 2; bin++)
+            estBitsSbac.significantCoeffGroupBits[ctxIdx][bin] = sbacGetEntropyBits(m_contextState[OFF_SIG_CG_FLAG_CTX + ((bIsLuma ? 0 : NUM_SIG_CG_FLAG_CTX) + ctxIdx)], bin);
+}
+
+/* estimate SAMBAC bit cost for significant coefficient map */
+void Entropy::estSignificantMapBit(EstBitsSbac& estBitsSbac, uint32_t log2TrSize, bool bIsLuma) const
+{
+    int firstCtx = 1, numCtx = 8;
+
+    if (log2TrSize >= 4)
+    {
+        firstCtx = bIsLuma ? 21 : 12;
+        numCtx = bIsLuma ? 6 : 3;
+    }
+    else if (log2TrSize == 3)
+    {
+        firstCtx = 9;
+        numCtx = bIsLuma ? 12 : 3;
+    }
+
+    if (bIsLuma)
+    {
+        for (uint32_t bin = 0; bin < 2; bin++)
+            estBitsSbac.significantBits[bin][0] = sbacGetEntropyBits(m_contextState[OFF_SIG_FLAG_CTX], bin);
+
+        for (int ctxIdx = firstCtx; ctxIdx < firstCtx + numCtx; ctxIdx++)
+            for (uint32_t bin = 0; bin < 2; bin++)
+                estBitsSbac.significantBits[bin][ctxIdx] = sbacGetEntropyBits(m_contextState[OFF_SIG_FLAG_CTX + ctxIdx], bin);
+    }
+    else
+    {
+        for (uint32_t bin = 0; bin < 2; bin++)
+            estBitsSbac.significantBits[bin][0] = sbacGetEntropyBits(m_contextState[OFF_SIG_FLAG_CTX + (NUM_SIG_FLAG_CTX_LUMA + 0)], bin);
+
+        for (int ctxIdx = firstCtx; ctxIdx < firstCtx + numCtx; ctxIdx++)
+            for (uint32_t bin = 0; bin < 2; bin++)
+                estBitsSbac.significantBits[bin][ctxIdx] = sbacGetEntropyBits(m_contextState[OFF_SIG_FLAG_CTX + (NUM_SIG_FLAG_CTX_LUMA + ctxIdx)], bin);
+    }
+
+    int blkSizeOffset = bIsLuma ? ((log2TrSize - 2) * 3 + ((log2TrSize - 1) >> 2)) : NUM_CTX_LAST_FLAG_XY_LUMA;
+    int ctxShift = bIsLuma ? ((log2TrSize + 1) >> 2) : log2TrSize - 2;
+    uint32_t maxGroupIdx = log2TrSize * 2 - 1;
+
+    uint32_t ctx;
+    for (int i = 0, ctxIdx = 0; i < 2; i++, ctxIdx += NUM_CTX_LAST_FLAG_XY)
+    {
+        int bits = 0;
+        const uint8_t *ctxState = &m_contextState[OFF_CTX_LAST_FLAG_X + ctxIdx];
+
+        for (ctx = 0; ctx < maxGroupIdx; ctx++)
+        {
+            int ctxOffset = blkSizeOffset + (ctx >> ctxShift);
+            estBitsSbac.lastBits[i][ctx] = bits + sbacGetEntropyBits(ctxState[ctxOffset], 0);
+            bits += sbacGetEntropyBits(ctxState[ctxOffset], 1);
+        }
+
+        estBitsSbac.lastBits[i][ctx] = bits;
+    }
+}
+
+/* estimate bit cost of significant coefficient */
+void Entropy::estSignificantCoefficientsBit(EstBitsSbac& estBitsSbac, bool bIsLuma) const
+{
+    if (bIsLuma)
+    {
+        const uint8_t *ctxOne = &m_contextState[OFF_ONE_FLAG_CTX];
+        const uint8_t *ctxAbs = &m_contextState[OFF_ABS_FLAG_CTX];
+
+        for (int ctxIdx = 0; ctxIdx < NUM_ONE_FLAG_CTX_LUMA; ctxIdx++)
+        {
+            estBitsSbac.greaterOneBits[ctxIdx][0] = sbacGetEntropyBits(ctxOne[ctxIdx], 0);
+            estBitsSbac.greaterOneBits[ctxIdx][1] = sbacGetEntropyBits(ctxOne[ctxIdx], 1);
+        }
+
+        for (int ctxIdx = 0; ctxIdx < NUM_ABS_FLAG_CTX_LUMA; ctxIdx++)
+        {
+            estBitsSbac.levelAbsBits[ctxIdx][0] = sbacGetEntropyBits(ctxAbs[ctxIdx], 0);
+            estBitsSbac.levelAbsBits[ctxIdx][1] = sbacGetEntropyBits(ctxAbs[ctxIdx], 1);
+        }
+    }
+    else
+    {
+        const uint8_t *ctxOne = &m_contextState[OFF_ONE_FLAG_CTX + NUM_ONE_FLAG_CTX_LUMA];
+        const uint8_t *ctxAbs = &m_contextState[OFF_ABS_FLAG_CTX + NUM_ABS_FLAG_CTX_LUMA];
+
+        for (int ctxIdx = 0; ctxIdx < NUM_ONE_FLAG_CTX_CHROMA; ctxIdx++)
+        {
+            estBitsSbac.greaterOneBits[ctxIdx][0] = sbacGetEntropyBits(ctxOne[ctxIdx], 0);
+            estBitsSbac.greaterOneBits[ctxIdx][1] = sbacGetEntropyBits(ctxOne[ctxIdx], 1);
+        }
+
+        for (int ctxIdx = 0; ctxIdx < NUM_ABS_FLAG_CTX_CHROMA; ctxIdx++)
+        {
+            estBitsSbac.levelAbsBits[ctxIdx][0] = sbacGetEntropyBits(ctxAbs[ctxIdx], 0);
+            estBitsSbac.levelAbsBits[ctxIdx][1] = sbacGetEntropyBits(ctxAbs[ctxIdx], 1);
+        }
+    }
+}
+
+/* Initialize our context information from the nominated source */
+void Entropy::copyContextsFrom(const Entropy& src)
+{
+    X265_CHECK(src.m_valid, "invalid copy source context\n");
+
+    memcpy(m_contextState, src.m_contextState, MAX_OFF_CTX_MOD * sizeof(m_contextState[0]));
+    markValid();
+}
+
+void Entropy::start()
+{
+    m_low = 0;
+    m_range = 510;
+    m_bitsLeft = -12;
+    m_numBufferedBytes = 0;
+    m_bufferedByte = 0xff;
+}
+
+void Entropy::finish()
+{
+    if (m_low >> (21 + m_bitsLeft))
+    {
+        m_bitIf->writeByte(m_bufferedByte + 1);
+        while (m_numBufferedBytes > 1)
+        {
+            m_bitIf->writeByte(0x00);
+            m_numBufferedBytes--;
+        }
+
+        m_low -= 1 << (21 + m_bitsLeft);
+    }
+    else
+    {
+        if (m_numBufferedBytes > 0)
+            m_bitIf->writeByte(m_bufferedByte);
+
+        while (m_numBufferedBytes > 1)
+        {
+            m_bitIf->writeByte(0xff);
+            m_numBufferedBytes--;
+        }
+    }
+    m_bitIf->write(m_low >> 8, 13 + m_bitsLeft);
+}
+
+void Entropy::copyState(const Entropy& other)
+{
+    m_low = other.m_low;
+    m_range = other.m_range;
+    m_bitsLeft = other.m_bitsLeft;
+    m_bufferedByte = other.m_bufferedByte;
+    m_numBufferedBytes = other.m_numBufferedBytes;
+    m_fracBits = other.m_fracBits;
+}
+
+void Entropy::resetBits()
+{
+    m_low = 0;
+    m_bitsLeft = -12;
+    m_numBufferedBytes = 0;
+    m_bufferedByte = 0xff;
+    m_fracBits &= 32767;
+    if (m_bitIf)
+        m_bitIf->resetBits();
+}
+
+/** Encode bin */
+void Entropy::encodeBin(uint32_t binValue, uint8_t &ctxModel)
+{
+    uint32_t mstate = ctxModel;
+
+    ctxModel = sbacNext(mstate, binValue);
+
+    if (!m_bitIf)
+    {
+        m_fracBits += sbacGetEntropyBits(mstate, binValue);
+        return;
+    }
+
+    uint32_t range = m_range;
+    uint32_t state = sbacGetState(mstate);
+    uint32_t lps = g_lpsTable[state][((uint8_t)range >> 6)];
+    range -= lps;
+
+    X265_CHECK(lps >= 2, "lps is too small\n");
+
+    int numBits = (uint32_t)(range - 256) >> 31;
+    uint32_t low = m_low;
+
+    // NOTE: MPS must be LOWEST bit in mstate
+    X265_CHECK((uint32_t)((binValue ^ mstate) & 1) == (uint32_t)(binValue != sbacGetMps(mstate)), "binValue failure\n");
+    if ((binValue ^ mstate) & 1)
+    {
+        // NOTE: lps is non-zero and the maximum of idx is 8 because lps less than 256
+        //numBits = g_renormTable[lps >> 3];
+        unsigned long idx;
+        CLZ(idx, lps);
+        X265_CHECK(state != 63 || idx == 1, "state failure\n");
+
+        numBits = 8 - idx;
+        if (state >= 63)
+            numBits = 6;
+        X265_CHECK(numBits <= 6, "numBits failure\n");
+
+        low += range;
+        range = lps;
+    }
+    m_low = (low << numBits);
+    m_range = (range << numBits);
+    m_bitsLeft += numBits;
+
+    if (m_bitsLeft >= 0)
+        writeOut();
+}
+
+/** Encode equiprobable bin */
+void Entropy::encodeBinEP(uint32_t binValue)
+{
+    if (!m_bitIf)
+    {
+        m_fracBits += 32768;
+        return;
+    }
+    m_low <<= 1;
+    if (binValue)
+        m_low += m_range;
+    m_bitsLeft++;
+
+    if (m_bitsLeft >= 0)
+        writeOut();
+}
+
+/** Encode equiprobable bins */
+void Entropy::encodeBinsEP(uint32_t binValues, int numBins)
+{
+    if (!m_bitIf)
+    {
+        m_fracBits += 32768 * numBins;
+        return;
+    }
+
+    while (numBins > 8)
+    {
+        numBins -= 8;
+        uint32_t pattern = binValues >> numBins;
+        m_low <<= 8;
+        m_low += m_range * pattern;
+        binValues -= pattern << numBins;
+        m_bitsLeft += 8;
+
+        if (m_bitsLeft >= 0)
+            writeOut();
+    }
+
+    m_low <<= numBins;
+    m_low += m_range * binValues;
+    m_bitsLeft += numBins;
+
+    if (m_bitsLeft >= 0)
+        writeOut();
+}
+
+/** Encode terminating bin */
+void Entropy::encodeBinTrm(uint32_t binValue)
+{
+    if (!m_bitIf)
+    {
+        m_fracBits += sbacGetEntropyBitsTrm(binValue);
+        return;
+    }
+
+    m_range -= 2;
+    if (binValue)
+    {
+        m_low += m_range;
+        m_low <<= 7;
+        m_range = 2 << 7;
+        m_bitsLeft += 7;
+    }
+    else if (m_range >= 256)
+        return;
+    else
+    {
+        m_low <<= 1;
+        m_range <<= 1;
+        m_bitsLeft++;
+    }
+
+    if (m_bitsLeft >= 0)
+        writeOut();
+}
+
+/** Move bits from register into bitstream */
+void Entropy::writeOut()
+{
+    uint32_t leadByte = m_low >> (13 + m_bitsLeft);
+    uint32_t low_mask = (uint32_t)(~0) >> (11 + 8 - m_bitsLeft);
+
+    m_bitsLeft -= 8;
+    m_low &= low_mask;
+
+    if (leadByte == 0xff)
+        m_numBufferedBytes++;
+    else
+    {
+        uint32_t numBufferedBytes = m_numBufferedBytes;
+        if (numBufferedBytes > 0)
+        {
+            uint32_t carry = leadByte >> 8;
+            uint32_t byteTowrite = m_bufferedByte + carry;
+            m_bitIf->writeByte(byteTowrite);
+
+            byteTowrite = (0xff + carry) & 0xff;
+            while (numBufferedBytes > 1)
+            {
+                m_bitIf->writeByte(byteTowrite);
+                numBufferedBytes--;
+            }
+        }
+        m_numBufferedBytes = 1;
+        m_bufferedByte = (uint8_t)leadByte;
+    }
+}
+
+const uint32_t g_entropyBits[128] =
+{
+    // Corrected table, most notably for last state
+    0x07b23, 0x085f9, 0x074a0, 0x08cbc, 0x06ee4, 0x09354, 0x067f4, 0x09c1b, 0x060b0, 0x0a62a, 0x05a9c, 0x0af5b, 0x0548d, 0x0b955, 0x04f56, 0x0c2a9,
+    0x04a87, 0x0cbf7, 0x045d6, 0x0d5c3, 0x04144, 0x0e01b, 0x03d88, 0x0e937, 0x039e0, 0x0f2cd, 0x03663, 0x0fc9e, 0x03347, 0x10600, 0x03050, 0x10f95,
+    0x02d4d, 0x11a02, 0x02ad3, 0x12333, 0x0286e, 0x12cad, 0x02604, 0x136df, 0x02425, 0x13f48, 0x021f4, 0x149c4, 0x0203e, 0x1527b, 0x01e4d, 0x15d00,
+    0x01c99, 0x166de, 0x01b18, 0x17017, 0x019a5, 0x17988, 0x01841, 0x18327, 0x016df, 0x18d50, 0x015d9, 0x19547, 0x0147c, 0x1a083, 0x0138e, 0x1a8a3,
+    0x01251, 0x1b418, 0x01166, 0x1bd27, 0x01068, 0x1c77b, 0x00f7f, 0x1d18e, 0x00eda, 0x1d91a, 0x00e19, 0x1e254, 0x00d4f, 0x1ec9a, 0x00c90, 0x1f6e0,
+    0x00c01, 0x1fef8, 0x00b5f, 0x208b1, 0x00ab6, 0x21362, 0x00a15, 0x21e46, 0x00988, 0x2285d, 0x00934, 0x22ea8, 0x008a8, 0x239b2, 0x0081d, 0x24577,
+    0x007c9, 0x24ce6, 0x00763, 0x25663, 0x00710, 0x25e8f, 0x006a0, 0x26a26, 0x00672, 0x26f23, 0x005e8, 0x27ef8, 0x005ba, 0x284b5, 0x0055e, 0x29057,
+    0x0050c, 0x29bab, 0x004c1, 0x2a674, 0x004a7, 0x2aa5e, 0x0046f, 0x2b32f, 0x0041f, 0x2c0ad, 0x003e7, 0x2ca8d, 0x003ba, 0x2d323, 0x0010c, 0x3bfbb
+};
+
+const uint8_t g_nextState[128][2] =
+{
+    { 2, 1 }, { 0, 3 }, { 4, 0 }, { 1, 5 }, { 6, 2 }, { 3, 7 }, { 8, 4 }, { 5, 9 },
+    { 10, 4 }, { 5, 11 }, { 12, 8 }, { 9, 13 }, { 14, 8 }, { 9, 15 }, { 16, 10 }, { 11, 17 },
+    { 18, 12 }, { 13, 19 }, { 20, 14 }, { 15, 21 }, { 22, 16 }, { 17, 23 }, { 24, 18 }, { 19, 25 },
+    { 26, 18 }, { 19, 27 }, { 28, 22 }, { 23, 29 }, { 30, 22 }, { 23, 31 }, { 32, 24 }, { 25, 33 },
+    { 34, 26 }, { 27, 35 }, { 36, 26 }, { 27, 37 }, { 38, 30 }, { 31, 39 }, { 40, 30 }, { 31, 41 },
+    { 42, 32 }, { 33, 43 }, { 44, 32 }, { 33, 45 }, { 46, 36 }, { 37, 47 }, { 48, 36 }, { 37, 49 },
+    { 50, 38 }, { 39, 51 }, { 52, 38 }, { 39, 53 }, { 54, 42 }, { 43, 55 }, { 56, 42 }, { 43, 57 },
+    { 58, 44 }, { 45, 59 }, { 60, 44 }, { 45, 61 }, { 62, 46 }, { 47, 63 }, { 64, 48 }, { 49, 65 },
+    { 66, 48 }, { 49, 67 }, { 68, 50 }, { 51, 69 }, { 70, 52 }, { 53, 71 }, { 72, 52 }, { 53, 73 },
+    { 74, 54 }, { 55, 75 }, { 76, 54 }, { 55, 77 }, { 78, 56 }, { 57, 79 }, { 80, 58 }, { 59, 81 },
+    { 82, 58 }, { 59, 83 }, { 84, 60 }, { 61, 85 }, { 86, 60 }, { 61, 87 }, { 88, 60 }, { 61, 89 },
+    { 90, 62 }, { 63, 91 }, { 92, 64 }, { 65, 93 }, { 94, 64 }, { 65, 95 }, { 96, 66 }, { 67, 97 },
+    { 98, 66 }, { 67, 99 }, { 100, 66 }, { 67, 101 }, { 102, 68 }, { 69, 103 }, { 104, 68 }, { 69, 105 },
+    { 106, 70 }, { 71, 107 }, { 108, 70 }, { 71, 109 }, { 110, 70 }, { 71, 111 }, { 112, 72 }, { 73, 113 },
+    { 114, 72 }, { 73, 115 }, { 116, 72 }, { 73, 117 }, { 118, 74 }, { 75, 119 }, { 120, 74 }, { 75, 121 },
+    { 122, 74 }, { 75, 123 }, { 124, 76 }, { 77, 125 }, { 124, 76 }, { 77, 125 }, { 126, 126 }, { 127, 127 }
+};
+
+}
+
+// [8 24] --> [stateMPS BitCost], [stateLPS BitCost]
+extern "C" const uint32_t PFX(entropyStateBits)[128] =
+{
+    // Corrected table, most notably for last state
+    0x02007B23, 0x000085F9, 0x040074A0, 0x00008CBC, 0x06006EE4, 0x02009354, 0x080067F4, 0x04009C1B,
+    0x0A0060B0, 0x0400A62A, 0x0C005A9C, 0x0800AF5B, 0x0E00548D, 0x0800B955, 0x10004F56, 0x0A00C2A9,
+    0x12004A87, 0x0C00CBF7, 0x140045D6, 0x0E00D5C3, 0x16004144, 0x1000E01B, 0x18003D88, 0x1200E937,
+    0x1A0039E0, 0x1200F2CD, 0x1C003663, 0x1600FC9E, 0x1E003347, 0x16010600, 0x20003050, 0x18010F95,
+    0x22002D4D, 0x1A011A02, 0x24002AD3, 0x1A012333, 0x2600286E, 0x1E012CAD, 0x28002604, 0x1E0136DF,
+    0x2A002425, 0x20013F48, 0x2C0021F4, 0x200149C4, 0x2E00203E, 0x2401527B, 0x30001E4D, 0x24015D00,
+    0x32001C99, 0x260166DE, 0x34001B18, 0x26017017, 0x360019A5, 0x2A017988, 0x38001841, 0x2A018327,
+    0x3A0016DF, 0x2C018D50, 0x3C0015D9, 0x2C019547, 0x3E00147C, 0x2E01A083, 0x4000138E, 0x3001A8A3,
+    0x42001251, 0x3001B418, 0x44001166, 0x3201BD27, 0x46001068, 0x3401C77B, 0x48000F7F, 0x3401D18E,
+    0x4A000EDA, 0x3601D91A, 0x4C000E19, 0x3601E254, 0x4E000D4F, 0x3801EC9A, 0x50000C90, 0x3A01F6E0,
+    0x52000C01, 0x3A01FEF8, 0x54000B5F, 0x3C0208B1, 0x56000AB6, 0x3C021362, 0x58000A15, 0x3C021E46,
+    0x5A000988, 0x3E02285D, 0x5C000934, 0x40022EA8, 0x5E0008A8, 0x400239B2, 0x6000081D, 0x42024577,
+    0x620007C9, 0x42024CE6, 0x64000763, 0x42025663, 0x66000710, 0x44025E8F, 0x680006A0, 0x44026A26,
+    0x6A000672, 0x46026F23, 0x6C0005E8, 0x46027EF8, 0x6E0005BA, 0x460284B5, 0x7000055E, 0x48029057,
+    0x7200050C, 0x48029BAB, 0x740004C1, 0x4802A674, 0x760004A7, 0x4A02AA5E, 0x7800046F, 0x4A02B32F,
+    0x7A00041F, 0x4A02C0AD, 0x7C0003E7, 0x4C02CA8D, 0x7C0003BA, 0x4C02D323, 0x7E00010C, 0x7E03BFBB,
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/entropy.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,255 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_ENTROPY_H
+#define X265_ENTROPY_H
+
+#include "common.h"
+#include "bitstream.h"
+#include "frame.h"
+#include "cudata.h"
+#include "contexts.h"
+#include "slice.h"
+
+namespace X265_NS {
+// private namespace
+
+struct SaoCtuParam;
+struct EstBitsSbac;
+class ScalingList;
+
+enum SplitType
+{
+    DONT_SPLIT            = 0,
+    VERTICAL_SPLIT        = 1,
+    QUAD_SPLIT            = 2,
+    NUMBER_OF_SPLIT_MODES = 3
+};
+
+struct TURecurse
+{
+    uint32_t section;
+    uint32_t splitMode;
+    uint32_t absPartIdxTURelCU;
+    uint32_t absPartIdxStep;
+
+    TURecurse(SplitType splitType, uint32_t _absPartIdxStep, uint32_t _absPartIdxTU)
+    {
+        static const uint32_t partIdxStepShift[NUMBER_OF_SPLIT_MODES] = { 0, 1, 2 };
+        section           = 0;
+        absPartIdxTURelCU = _absPartIdxTU;
+        splitMode         = (uint32_t)splitType;
+        absPartIdxStep    = _absPartIdxStep >> partIdxStepShift[splitMode];
+    }
+
+    bool isNextSection()
+    {
+        if (splitMode == DONT_SPLIT)
+        {
+            section++;
+            return false;
+        }
+        else
+        {
+            absPartIdxTURelCU += absPartIdxStep;
+
+            section++;
+            return section < (uint32_t)(1 << splitMode);
+        }
+    }
+
+    bool isLastSection() const
+    {
+        return (section + 1) >= (uint32_t)(1 << splitMode);
+    }
+};
+
+struct EstBitsSbac
+{
+    int significantCoeffGroupBits[NUM_SIG_CG_FLAG_CTX][2];
+    int significantBits[2][NUM_SIG_FLAG_CTX];
+    int lastBits[2][10];
+    int greaterOneBits[NUM_ONE_FLAG_CTX][2];
+    int levelAbsBits[NUM_ABS_FLAG_CTX][2];
+    int blockCbpBits[NUM_QT_CBF_CTX][2];
+    int blockRootCbpBits[2];
+};
+
+class Entropy : public SyntaxElementWriter
+{
+public:
+
+    uint64_t      m_pad;
+    uint8_t       m_contextState[160]; // MAX_OFF_CTX_MOD + padding
+
+    /* CABAC state */
+    uint32_t      m_low;
+    uint32_t      m_range;
+    uint32_t      m_bufferedByte;
+    int           m_numBufferedBytes;
+    int           m_bitsLeft;
+    uint64_t      m_fracBits;
+    EstBitsSbac   m_estBitsSbac;
+
+    Entropy();
+
+    void setBitstream(Bitstream* p)    { m_bitIf = p; }
+
+    uint32_t getNumberOfWrittenBits()
+    {
+        X265_CHECK(!m_bitIf, "bit counting mode expected\n");
+        return (uint32_t)(m_fracBits >> 15);
+    }
+
+#if CHECKED_BUILD || _DEBUG
+    bool m_valid;
+    void markInvalid()                 { m_valid = false; }
+    void markValid()                   { m_valid = true; }
+#else
+    void markValid()                   { }
+#endif
+    void zeroFract()                   { m_fracBits = 0; }
+    void resetBits();
+    void resetEntropy(const Slice& slice);
+
+    // SBAC RD
+    void load(const Entropy& src)            { copyFrom(src); }
+    void store(Entropy& dest) const          { dest.copyFrom(*this); }
+    void loadContexts(const Entropy& src)    { copyContextsFrom(src); }
+    void loadIntraDirModeLuma(const Entropy& src);
+    void copyState(const Entropy& other);
+
+    void codeVPS(const VPS& vps);
+    void codeSPS(const SPS& sps, const ScalingList& scalingList, const ProfileTierLevel& ptl);
+    void codePPS(const PPS& pps);
+    void codeVUI(const VUI& vui, int maxSubTLayers);
+    void codeAUD(const Slice& slice);
+    void codeHrdParameters(const HRDInfo& hrd, int maxSubTLayers);
+
+    void codeSliceHeader(const Slice& slice, FrameData& encData);
+    void codeSliceHeaderWPPEntryPoints(const Slice& slice, const uint32_t *substreamSizes, uint32_t maxOffset);
+    void codeShortTermRefPicSet(const RPS& rps);
+    void finishSlice()                 { encodeBinTrm(1); finish(); dynamic_cast<Bitstream*>(m_bitIf)->writeByteAlignment(); }
+
+    void encodeCTU(const CUData& cu, const CUGeom& cuGeom);
+
+    void codeIntraDirLumaAng(const CUData& cu, uint32_t absPartIdx, bool isMultiple);
+    void codeIntraDirChroma(const CUData& cu, uint32_t absPartIdx, uint32_t *chromaDirMode);
+
+    void codeMergeIndex(const CUData& cu, uint32_t absPartIdx);
+    void codeMvd(const CUData& cu, uint32_t absPartIdx, int list);
+
+    void codePartSize(const CUData& cu, uint32_t absPartIdx, uint32_t depth);
+    void codePredInfo(const CUData& cu, uint32_t absPartIdx);
+    inline void codeQtCbfLuma(const CUData& cu, uint32_t absPartIdx, uint32_t tuDepth) { codeQtCbfLuma(cu.getCbf(absPartIdx, TEXT_LUMA, tuDepth), tuDepth); }
+
+    void codeQtCbfChroma(const CUData& cu, uint32_t absPartIdx, TextType ttype, uint32_t tuDepth, bool lowestLevel);
+    void codeCoeff(const CUData& cu, uint32_t absPartIdx, bool& bCodeDQP, const uint32_t depthRange[2]);
+    void codeCoeffNxN(const CUData& cu, const coeff_t* coef, uint32_t absPartIdx, uint32_t log2TrSize, TextType ttype);
+
+    inline void codeSaoMerge(uint32_t code)                          { encodeBin(code, m_contextState[OFF_SAO_MERGE_FLAG_CTX]); }
+    inline void codeMVPIdx(uint32_t symbol)                          { encodeBin(symbol, m_contextState[OFF_MVP_IDX_CTX]); }
+    inline void codeMergeFlag(const CUData& cu, uint32_t absPartIdx) { encodeBin(cu.m_mergeFlag[absPartIdx], m_contextState[OFF_MERGE_FLAG_EXT_CTX]); }
+    inline void codeSkipFlag(const CUData& cu, uint32_t absPartIdx)  { encodeBin(cu.isSkipped(absPartIdx), m_contextState[OFF_SKIP_FLAG_CTX + cu.getCtxSkipFlag(absPartIdx)]); }
+    inline void codeSplitFlag(const CUData& cu, uint32_t absPartIdx, uint32_t depth) { encodeBin(cu.m_cuDepth[absPartIdx] > depth, m_contextState[OFF_SPLIT_FLAG_CTX + cu.getCtxSplitFlag(absPartIdx, depth)]); }
+    inline void codeTransformSubdivFlag(uint32_t symbol, uint32_t ctx)    { encodeBin(symbol, m_contextState[OFF_TRANS_SUBDIV_FLAG_CTX + ctx]); }
+    inline void codePredMode(int predMode)                                { encodeBin(predMode == MODE_INTRA ? 1 : 0, m_contextState[OFF_PRED_MODE_CTX]); }
+    inline void codeCUTransquantBypassFlag(uint32_t symbol)               { encodeBin(symbol, m_contextState[OFF_TQUANT_BYPASS_FLAG_CTX]); }
+    inline void codeQtCbfLuma(uint32_t cbf, uint32_t tuDepth)             { encodeBin(cbf, m_contextState[OFF_QT_CBF_CTX + !tuDepth]); }
+    inline void codeQtCbfChroma(uint32_t cbf, uint32_t tuDepth)           { encodeBin(cbf, m_contextState[OFF_QT_CBF_CTX + 2 + tuDepth]); }
+    inline void codeQtRootCbf(uint32_t cbf)                               { encodeBin(cbf, m_contextState[OFF_QT_ROOT_CBF_CTX]); }
+    inline void codeTransformSkipFlags(uint32_t transformSkip, TextType ttype) { encodeBin(transformSkip, m_contextState[OFF_TRANSFORMSKIP_FLAG_CTX + (ttype ? NUM_TRANSFORMSKIP_FLAG_CTX : 0)]); }
+    void codeDeltaQP(const CUData& cu, uint32_t absPartIdx);
+    void codeSaoOffset(const SaoCtuParam& ctuParam, int plane);
+
+    /* RDO functions */
+    void estBit(EstBitsSbac& estBitsSbac, uint32_t log2TrSize, bool bIsLuma) const;
+    void estCBFBit(EstBitsSbac& estBitsSbac) const;
+    void estSignificantCoeffGroupMapBit(EstBitsSbac& estBitsSbac, bool bIsLuma) const;
+    void estSignificantMapBit(EstBitsSbac& estBitsSbac, uint32_t log2TrSize, bool bIsLuma) const;
+    void estSignificantCoefficientsBit(EstBitsSbac& estBitsSbac, bool bIsLuma) const;
+
+    inline uint32_t bitsIntraModeNonMPM() const { return bitsCodeBin(0, m_contextState[OFF_ADI_CTX]) + 5; }
+    inline uint32_t bitsIntraModeMPM(const uint32_t preds[3], uint32_t dir) const { return bitsCodeBin(1, m_contextState[OFF_ADI_CTX]) + (dir == preds[0] ? 1 : 2); }
+    inline uint32_t estimateCbfBits(uint32_t cbf, TextType ttype, uint32_t tuDepth) const { return bitsCodeBin(cbf, m_contextState[OFF_QT_CBF_CTX + ctxCbf[ttype][tuDepth]]); }
+    uint32_t bitsInterMode(const CUData& cu, uint32_t absPartIdx, uint32_t depth) const;
+    uint32_t bitsIntraMode(const CUData& cu, uint32_t absPartIdx) const
+    {
+        return bitsCodeBin(0, m_contextState[OFF_SKIP_FLAG_CTX + cu.getCtxSkipFlag(absPartIdx)]) + /* not skip */
+               bitsCodeBin(1, m_contextState[OFF_PRED_MODE_CTX]); /* intra */
+    }
+
+    /* these functions are only used to estimate the bits when cbf is 0 and will never be called when writing the bistream. */
+    inline void codeQtRootCbfZero() { encodeBin(0, m_contextState[OFF_QT_ROOT_CBF_CTX]); }
+
+private:
+
+    /* CABAC private methods */
+    void start();
+    void finish();
+
+    void encodeBin(uint32_t binValue, uint8_t& ctxModel);
+    void encodeBinEP(uint32_t binValue);
+    void encodeBinsEP(uint32_t binValues, int numBins);
+    void encodeBinTrm(uint32_t binValue);
+
+    /* return the bits of encoding the context bin without updating */
+    inline uint32_t bitsCodeBin(uint32_t binValue, uint32_t ctxModel) const
+    {
+        uint64_t fracBits = (m_fracBits & 32767) + sbacGetEntropyBits(ctxModel, binValue);
+        return (uint32_t)(fracBits >> 15);
+    }
+
+    void encodeCU(const CUData& ctu, const CUGeom &cuGeom, uint32_t absPartIdx, uint32_t depth, bool& bEncodeDQP);
+    void finishCU(const CUData& ctu, uint32_t absPartIdx, uint32_t depth, bool bEncodeDQP);
+
+    void writeOut();
+
+    /* SBac private methods */
+    void writeUnaryMaxSymbol(uint32_t symbol, uint8_t* scmModel, int offset, uint32_t maxSymbol);
+    void writeEpExGolomb(uint32_t symbol, uint32_t count);
+    void writeCoefRemainExGolomb(uint32_t symbol, const uint32_t absGoRice);
+
+    void codeProfileTier(const ProfileTierLevel& ptl, int maxTempSubLayers);
+    void codeScalingList(const ScalingList&);
+    void codeScalingList(const ScalingList& scalingList, uint32_t sizeId, uint32_t listId);
+
+    void codePredWeightTable(const Slice& slice);
+    void codeInterDir(const CUData& cu, uint32_t absPartIdx);
+    void codePUWise(const CUData& cu, uint32_t absPartIdx);
+    void codeRefFrmIdxPU(const CUData& cu, uint32_t absPartIdx, int list);
+    void codeRefFrmIdx(const CUData& cu, uint32_t absPartIdx, int list);
+
+    void codeSaoMaxUvlc(uint32_t code, uint32_t maxSymbol);
+
+    void codeLastSignificantXY(uint32_t posx, uint32_t posy, uint32_t log2TrSize, bool bIsLuma, uint32_t scanIdx);
+
+    void encodeTransform(const CUData& cu, uint32_t absPartIdx, uint32_t tuDepth, uint32_t log2TrSize,
+                         bool& bCodeDQP, const uint32_t depthRange[2]);
+
+    void copyFrom(const Entropy& src);
+    void copyContextsFrom(const Entropy& src);
+};
+}
+
+#endif // ifndef X265_ENTROPY_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/frameencoder.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1312 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Chung Shin Yee <shinyee@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com>
+ *          Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "wavefront.h"
+#include "param.h"
+
+#include "encoder.h"
+#include "frameencoder.h"
+#include "common.h"
+#include "slicetype.h"
+#include "nal.h"
+
+namespace X265_NS {
+void weightAnalyse(Slice& slice, Frame& frame, x265_param& param);
+
+FrameEncoder::FrameEncoder()
+{
+    m_prevOutputTime = x265_mdate();
+    m_isFrameEncoder = true;
+    m_threadActive = true;
+    m_slicetypeWaitTime = 0;
+    m_activeWorkerCount = 0;
+    m_completionCount = 0;
+    m_bAllRowsStop = false;
+    m_vbvResetTriggerRow = -1;
+    m_outStreams = NULL;
+    m_substreamSizes = NULL;
+    m_nr = NULL;
+    m_tld = NULL;
+    m_rows = NULL;
+    m_top = NULL;
+    m_param = NULL;
+    m_frame = NULL;
+    m_cuGeoms = NULL;
+    m_ctuGeomMap = NULL;
+    m_localTldIdx = 0;
+    memset(&m_rce, 0, sizeof(RateControlEntry));
+}
+
+void FrameEncoder::destroy()
+{
+    if (m_pool)
+    {
+        if (!m_jpId)
+        {
+            int numTLD = m_pool->m_numWorkers;
+            if (!m_param->bEnableWavefront)
+                numTLD += m_pool->m_numProviders;
+            for (int i = 0; i < numTLD; i++)
+                m_tld[i].destroy();
+            delete [] m_tld;
+        }
+    }
+    else
+    {
+        m_tld->destroy();
+        delete m_tld;
+    }
+
+    delete[] m_rows;
+    delete[] m_outStreams;
+    X265_FREE(m_cuGeoms);
+    X265_FREE(m_ctuGeomMap);
+    X265_FREE(m_substreamSizes);
+    X265_FREE(m_nr);
+
+    m_frameFilter.destroy();
+
+    if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
+    {
+        delete m_rce.picTimingSEI;
+        delete m_rce.hrdTiming;
+    }
+}
+
+bool FrameEncoder::init(Encoder *top, int numRows, int numCols)
+{
+    m_top = top;
+    m_param = top->m_param;
+    m_numRows = numRows;
+    m_numCols = numCols;
+    m_filterRowDelay = (m_param->bEnableSAO && m_param->bSaoNonDeblocked) ?
+                        2 : (m_param->bEnableSAO || m_param->bEnableLoopFilter ? 1 : 0);
+    m_filterRowDelayCus = m_filterRowDelay * numCols;
+    m_rows = new CTURow[m_numRows];
+    bool ok = !!m_numRows;
+
+    /* determine full motion search range */
+    int range  = m_param->searchRange;       /* fpel search */
+    range += !!(m_param->searchMethod < 2);  /* diamond/hex range check lag */
+    range += NTAPS_LUMA / 2;                 /* subpel filter half-length */
+    range += 2 + MotionEstimate::hpelIterationCount(m_param->subpelRefine) / 2; /* subpel refine steps */
+    m_refLagRows = 1 + ((range + g_maxCUSize - 1) / g_maxCUSize);
+
+    // NOTE: 2 times of numRows because both Encoder and Filter in same queue
+    if (!WaveFront::init(m_numRows * 2))
+    {
+        x265_log(m_param, X265_LOG_ERROR, "unable to initialize wavefront queue\n");
+        m_pool = NULL;
+    }
+
+    m_frameFilter.init(top, this, numRows);
+
+    // initialize HRD parameters of SPS
+    if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
+    {
+        m_rce.picTimingSEI = new SEIPictureTiming;
+        m_rce.hrdTiming = new HRDTiming;
+
+        ok &= m_rce.picTimingSEI && m_rce.hrdTiming;
+    }
+
+    if (m_param->noiseReductionIntra || m_param->noiseReductionInter || m_param->rc.vbvBufferSize)
+        m_nr = X265_MALLOC(NoiseReduction, 1);
+    if (m_nr)
+        memset(m_nr, 0, sizeof(NoiseReduction));
+    else
+        m_param->noiseReductionIntra = m_param->noiseReductionInter = 0;
+
+    return ok;
+}
+
+/* Generate a complete list of unique geom sets for the current picture dimensions */
+bool FrameEncoder::initializeGeoms()
+{
+    /* Geoms only vary between CTUs in the presence of picture edges */
+    int maxCUSize = m_param->maxCUSize;
+    int minCUSize = m_param->minCUSize;
+    int heightRem = m_param->sourceHeight & (maxCUSize - 1);
+    int widthRem = m_param->sourceWidth & (maxCUSize - 1);
+    int allocGeoms = 1; // body
+    if (heightRem && widthRem)
+        allocGeoms = 4; // body, right, bottom, corner
+    else if (heightRem || widthRem)
+        allocGeoms = 2; // body, right or bottom
+
+    m_ctuGeomMap = X265_MALLOC(uint32_t, m_numRows * m_numCols);
+    m_cuGeoms = X265_MALLOC(CUGeom, allocGeoms * CUGeom::MAX_GEOMS);
+    if (!m_cuGeoms || !m_ctuGeomMap)
+        return false;
+
+    // body
+    CUData::calcCTUGeoms(maxCUSize, maxCUSize, maxCUSize, minCUSize, m_cuGeoms);
+    memset(m_ctuGeomMap, 0, sizeof(uint32_t) * m_numRows * m_numCols);
+    if (allocGeoms == 1)
+        return true;
+
+    int countGeoms = 1;
+    if (widthRem)
+    {
+        // right
+        CUData::calcCTUGeoms(widthRem, maxCUSize, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS);
+        for (uint32_t i = 0; i < m_numRows; i++)
+        {
+            uint32_t ctuAddr = m_numCols * (i + 1) - 1;
+            m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
+        }
+        countGeoms++;
+    }
+    if (heightRem)
+    {
+        // bottom
+        CUData::calcCTUGeoms(maxCUSize, heightRem, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS);
+        for (uint32_t i = 0; i < m_numCols; i++)
+        {
+            uint32_t ctuAddr = m_numCols * (m_numRows - 1) + i;
+            m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
+        }
+        countGeoms++;
+
+        if (widthRem)
+        {
+            // corner
+            CUData::calcCTUGeoms(widthRem, heightRem, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS);
+
+            uint32_t ctuAddr = m_numCols * m_numRows - 1;
+            m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
+            countGeoms++;
+        }
+        X265_CHECK(countGeoms == allocGeoms, "geometry match check failure\n");
+    }
+
+    return true;
+}
+
+bool FrameEncoder::startCompressFrame(Frame* curFrame)
+{
+    m_slicetypeWaitTime = x265_mdate() - m_prevOutputTime;
+    m_frame = curFrame;
+    m_param = curFrame->m_param;
+    m_sliceType = curFrame->m_lowres.sliceType;
+    curFrame->m_encData->m_frameEncoderID = m_jpId;
+    curFrame->m_encData->m_jobProvider = this;
+    curFrame->m_encData->m_slice->m_mref = m_mref;
+
+    if (!m_cuGeoms)
+    {
+        if (!initializeGeoms())
+            return false;
+    }
+
+    m_enable.trigger();
+    return true;
+}
+
+void FrameEncoder::threadMain()
+{
+    THREAD_NAME("Frame", m_jpId);
+
+    if (m_pool)
+    {
+        m_pool->setCurrentThreadAffinity();
+
+        /* the first FE on each NUMA node is responsible for allocating thread
+         * local data for all worker threads in that pool. If WPP is disabled, then
+         * each FE also needs a TLD instance */
+        if (!m_jpId)
+        {
+            int numTLD = m_pool->m_numWorkers;
+            if (!m_param->bEnableWavefront)
+                numTLD += m_pool->m_numProviders;
+
+            m_tld = new ThreadLocalData[numTLD];
+            for (int i = 0; i < numTLD; i++)
+            {
+                m_tld[i].analysis.initSearch(*m_param, m_top->m_scalingList);
+                m_tld[i].analysis.create(m_tld);
+            }
+
+            for (int i = 0; i < m_pool->m_numProviders; i++)
+            {
+                if (m_pool->m_jpTable[i]->m_isFrameEncoder) /* ugh; over-allocation and other issues here */
+                {
+                    FrameEncoder *peer = dynamic_cast<FrameEncoder*>(m_pool->m_jpTable[i]);
+                    peer->m_tld = m_tld;
+                }
+            }
+        }
+
+        if (m_param->bEnableWavefront)
+            m_localTldIdx = -1; // cause exception if used
+        else
+            m_localTldIdx = m_pool->m_numWorkers + m_jpId;
+    }
+    else
+    {
+        m_tld = new ThreadLocalData;
+        m_tld->analysis.initSearch(*m_param, m_top->m_scalingList);
+        m_tld->analysis.create(NULL);
+        m_localTldIdx = 0;
+    }
+
+    m_done.trigger();     /* signal that thread is initialized */ 
+    m_enable.wait();      /* Encoder::encode() triggers this event */
+
+    while (m_threadActive)
+    {
+        compressFrame();
+        m_done.trigger(); /* FrameEncoder::getEncodedPicture() blocks for this event */
+        m_enable.wait();
+    }
+}
+
+void FrameEncoder::WeightAnalysis::processTasks(int /* workerThreadId */)
+{
+    Frame* frame = master.m_frame;
+    weightAnalyse(*frame->m_encData->m_slice, *frame, *master.m_param);
+}
+
+void FrameEncoder::compressFrame()
+{
+    ProfileScopeEvent(frameThread);
+
+    m_startCompressTime = x265_mdate();
+    m_totalActiveWorkerCount = 0;
+    m_activeWorkerCountSamples = 0;
+    m_totalWorkerElapsedTime = 0;
+    m_totalNoWorkerTime = 0;
+    m_countRowBlocks = 0;
+    m_allRowsAvailableTime = 0;
+    m_stallStartTime = 0;
+
+    m_completionCount = 0;
+    m_bAllRowsStop = false;
+    m_vbvResetTriggerRow = -1;
+
+    m_SSDY = m_SSDU = m_SSDV = 0;
+    m_ssim = 0;
+    m_ssimCnt = 0;
+    memset(&(m_frame->m_encData->m_frameStats), 0, sizeof(m_frame->m_encData->m_frameStats));
+
+    /* Emit access unit delimiter unless this is the first frame and the user is
+     * not repeating headers (since AUD is supposed to be the first NAL in the access
+     * unit) */
+    Slice* slice = m_frame->m_encData->m_slice;
+    if (m_param->bEnableAccessUnitDelimiters && (m_frame->m_poc || m_param->bRepeatHeaders))
+    {
+        m_bs.resetBits();
+        m_entropyCoder.setBitstream(&m_bs);
+        m_entropyCoder.codeAUD(*slice);
+        m_bs.writeByteAlignment();
+        m_nalList.serialize(NAL_UNIT_ACCESS_UNIT_DELIMITER, m_bs);
+    }
+    if (m_frame->m_lowres.bKeyframe && m_param->bRepeatHeaders)
+        m_top->getStreamHeaders(m_nalList, m_entropyCoder, m_bs);
+
+    // Weighted Prediction parameters estimation.
+    bool bUseWeightP = slice->m_sliceType == P_SLICE && slice->m_pps->bUseWeightPred;
+    bool bUseWeightB = slice->m_sliceType == B_SLICE && slice->m_pps->bUseWeightedBiPred;
+    if (bUseWeightP || bUseWeightB)
+    {
+#if DETAILED_CU_STATS
+        m_cuStats.countWeightAnalyze++;
+        ScopedElapsedTime time(m_cuStats.weightAnalyzeTime);
+#endif
+        WeightAnalysis wa(*this);
+        if (m_pool && wa.tryBondPeers(*this, 1))
+            /* use an idle worker for weight analysis */
+            wa.waitForExit();
+        else
+            weightAnalyse(*slice, *m_frame, *m_param);
+    }
+    else
+        slice->disableWeights();
+
+    // Generate motion references
+    int numPredDir = slice->isInterP() ? 1 : slice->isInterB() ? 2 : 0;
+    for (int l = 0; l < numPredDir; l++)
+    {
+        for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)
+        {
+            WeightParam *w = NULL;
+            if ((bUseWeightP || bUseWeightB) && slice->m_weightPredTable[l][ref][0].bPresentFlag)
+                w = slice->m_weightPredTable[l][ref];
+            slice->m_refReconPicList[l][ref] = slice->m_refFrameList[l][ref]->m_reconPic;
+            m_mref[l][ref].init(slice->m_refReconPicList[l][ref], w, *m_param);
+        }
+    }
+
+    int numTLD;
+    if (m_pool)
+        numTLD = m_param->bEnableWavefront ? m_pool->m_numWorkers : m_pool->m_numWorkers + m_pool->m_numProviders;
+    else
+        numTLD = 1;
+
+    /* Get the QP for this frame from rate control. This call may block until
+     * frames ahead of it in encode order have called rateControlEnd() */
+    int qp = m_top->m_rateControl->rateControlStart(m_frame, &m_rce, m_top);
+    m_rce.newQp = qp;
+
+    if (m_nr)
+    {
+        if (qp > QP_MAX_SPEC && m_frame->m_param->rc.vbvBufferSize)
+        {
+            for (int i = 0; i < numTLD; i++)
+            {
+                m_tld[i].analysis.m_quant.m_frameNr[m_jpId].offset = m_top->m_offsetEmergency[qp - QP_MAX_SPEC - 1];
+                m_tld[i].analysis.m_quant.m_frameNr[m_jpId].residualSum = m_top->m_residualSumEmergency;
+                m_tld[i].analysis.m_quant.m_frameNr[m_jpId].count = m_top->m_countEmergency;
+            }
+        }
+        else
+        {
+            if (m_param->noiseReductionIntra || m_param->noiseReductionInter)
+            {
+                for (int i = 0; i < numTLD; i++)
+                {
+                    m_tld[i].analysis.m_quant.m_frameNr[m_jpId].offset = m_tld[i].analysis.m_quant.m_frameNr[m_jpId].nrOffsetDenoise;
+                    m_tld[i].analysis.m_quant.m_frameNr[m_jpId].residualSum = m_tld[i].analysis.m_quant.m_frameNr[m_jpId].nrResidualSum;
+                    m_tld[i].analysis.m_quant.m_frameNr[m_jpId].count = m_tld[i].analysis.m_quant.m_frameNr[m_jpId].nrCount;
+                }
+            }
+            else
+            {
+                for (int i = 0; i < numTLD; i++)
+                    m_tld[i].analysis.m_quant.m_frameNr[m_jpId].offset = NULL;
+            }
+        }
+    }
+
+    /* Clip slice QP to 0-51 spec range before encoding */
+    slice->m_sliceQp = x265_clip3(-QP_BD_OFFSET, QP_MAX_SPEC, qp);
+
+    m_initSliceContext.resetEntropy(*slice);
+
+    m_frameFilter.start(m_frame, m_initSliceContext, qp);
+
+    /* ensure all rows are blocked prior to initializing row CTU counters */
+    WaveFront::clearEnabledRowMask();
+
+    /* reset entropy coders */
+    m_entropyCoder.load(m_initSliceContext);
+    for (uint32_t i = 0; i < m_numRows; i++)
+        m_rows[i].init(m_initSliceContext);
+
+    uint32_t numSubstreams = m_param->bEnableWavefront ? slice->m_sps->numCuInHeight : 1;
+    if (!m_outStreams)
+    {
+        m_outStreams = new Bitstream[numSubstreams];
+        m_substreamSizes = X265_MALLOC(uint32_t, numSubstreams);
+        if (!m_param->bEnableSAO)
+            for (uint32_t i = 0; i < numSubstreams; i++)
+                m_rows[i].rowGoOnCoder.setBitstream(&m_outStreams[i]);
+    }
+    else
+        for (uint32_t i = 0; i < numSubstreams; i++)
+            m_outStreams[i].resetBits();
+
+    int prevBPSEI = m_rce.encodeOrder ? m_top->m_lastBPSEI : 0;
+
+    if (m_frame->m_lowres.bKeyframe)
+    {
+        if (m_param->bEmitHRDSEI)
+        {
+            SEIBufferingPeriod* bpSei = &m_top->m_rateControl->m_bufPeriodSEI;
+
+            // since the temporal layer HRD is not ready, we assumed it is fixed
+            bpSei->m_auCpbRemovalDelayDelta = 1;
+            bpSei->m_cpbDelayOffset = 0;
+            bpSei->m_dpbDelayOffset = 0;
+
+            // hrdFullness() calculates the initial CPB removal delay and offset
+            m_top->m_rateControl->hrdFullness(bpSei);
+
+            m_bs.resetBits();
+            bpSei->write(m_bs, *slice->m_sps);
+            m_bs.writeByteAlignment();
+
+            m_nalList.serialize(NAL_UNIT_PREFIX_SEI, m_bs);
+
+            m_top->m_lastBPSEI = m_rce.encodeOrder;
+        }
+    }
+
+    if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
+    {
+        SEIPictureTiming *sei = m_rce.picTimingSEI;
+        const VUI *vui = &slice->m_sps->vuiParameters;
+        const HRDInfo *hrd = &vui->hrdParameters;
+        int poc = slice->m_poc;
+
+        if (vui->frameFieldInfoPresentFlag)
+        {
+            if (m_param->interlaceMode == 2)
+                sei->m_picStruct = (poc & 1) ? 1 /* top */ : 2 /* bottom */;
+            else if (m_param->interlaceMode == 1)
+                sei->m_picStruct = (poc & 1) ? 2 /* bottom */ : 1 /* top */;
+            else
+                sei->m_picStruct = 0;
+            sei->m_sourceScanType = 0;
+            sei->m_duplicateFlag = false;
+        }
+
+        if (vui->hrdParametersPresentFlag)
+        {
+            // The m_aucpbremoval delay specifies how many clock ticks the
+            // access unit associated with the picture timing SEI message has to
+            // wait after removal of the access unit with the most recent
+            // buffering period SEI message
+            sei->m_auCpbRemovalDelay = X265_MIN(X265_MAX(1, m_rce.encodeOrder - prevBPSEI), (1 << hrd->cpbRemovalDelayLength));
+            sei->m_picDpbOutputDelay = slice->m_sps->numReorderPics + poc - m_rce.encodeOrder;
+        }
+
+        m_bs.resetBits();
+        sei->write(m_bs, *slice->m_sps);
+        m_bs.writeByteAlignment();
+        m_nalList.serialize(NAL_UNIT_PREFIX_SEI, m_bs);
+    }
+
+    /* CQP and CRF (without capped VBV) doesn't use mid-frame statistics to 
+     * tune RateControl parameters for other frames.
+     * Hence, for these modes, update m_startEndOrder and unlock RC for previous threads waiting in
+     * RateControlEnd here, after the slice contexts are initialized. For the rest - ABR
+     * and VBV, unlock only after rateControlUpdateStats of this frame is called */
+    if (m_param->rc.rateControlMode != X265_RC_ABR && !m_top->m_rateControl->m_isVbv)
+    {
+        m_top->m_rateControl->m_startEndOrder.incr();
+
+        if (m_rce.encodeOrder < m_param->frameNumThreads - 1)
+            m_top->m_rateControl->m_startEndOrder.incr(); // faked rateControlEnd calls for negative frames
+    }
+
+    /* Analyze CTU rows, most of the hard work is done here.  Frame is
+     * compressed in a wave-front pattern if WPP is enabled. Row based loop
+     * filters runs behind the CTU compression and reconstruction */
+
+    m_rows[0].active = true;
+    if (m_param->bEnableWavefront)
+    {
+        for (uint32_t row = 0; row < m_numRows; row++)
+        {
+            // block until all reference frames have reconstructed the rows we need
+            for (int l = 0; l < numPredDir; l++)
+            {
+                for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)
+                {
+                    Frame *refpic = slice->m_refFrameList[l][ref];
+
+                    uint32_t reconRowCount = refpic->m_reconRowCount.get();
+                    while ((reconRowCount != m_numRows) && (reconRowCount < row + m_refLagRows))
+                        reconRowCount = refpic->m_reconRowCount.waitForChange(reconRowCount);
+
+                    if ((bUseWeightP || bUseWeightB) && m_mref[l][ref].isWeighted)
+                        m_mref[l][ref].applyWeight(row + m_refLagRows, m_numRows);
+                }
+            }
+
+            enableRowEncoder(row); /* clear external dependency for this row */
+            if (!row)
+            {
+                m_row0WaitTime = x265_mdate();
+                enqueueRowEncoder(0); /* clear internal dependency, start wavefront */
+            }
+            tryWakeOne();
+        }
+
+        m_allRowsAvailableTime = x265_mdate();
+        tryWakeOne(); /* ensure one thread is active or help-wanted flag is set prior to blocking */
+        static const int block_ms = 250;
+        while (m_completionEvent.timedWait(block_ms))
+            tryWakeOne();
+    }
+    else
+    {
+        for (uint32_t i = 0; i < m_numRows + m_filterRowDelay; i++)
+        {
+            // compress
+            if (i < m_numRows)
+            {
+                // block until all reference frames have reconstructed the rows we need
+                for (int l = 0; l < numPredDir; l++)
+                {
+                    int list = l;
+                    for (int ref = 0; ref < slice->m_numRefIdx[list]; ref++)
+                    {
+                        Frame *refpic = slice->m_refFrameList[list][ref];
+
+                        uint32_t reconRowCount = refpic->m_reconRowCount.get();
+                        while ((reconRowCount != m_numRows) && (reconRowCount < i + m_refLagRows))
+                            reconRowCount = refpic->m_reconRowCount.waitForChange(reconRowCount);
+
+                        if ((bUseWeightP || bUseWeightB) && m_mref[l][ref].isWeighted)
+                            m_mref[list][ref].applyWeight(i + m_refLagRows, m_numRows);
+                    }
+                }
+
+                if (!i)
+                    m_row0WaitTime = x265_mdate();
+                else if (i == m_numRows - 1)
+                    m_allRowsAvailableTime = x265_mdate();
+                processRowEncoder(i, m_tld[m_localTldIdx]);
+            }
+
+            // filter
+            if (i >= m_filterRowDelay)
+                m_frameFilter.processRow(i - m_filterRowDelay);
+        }
+    }
+
+    if (m_param->rc.bStatWrite)
+    {
+        int totalI = 0, totalP = 0, totalSkip = 0;
+
+        // accumulate intra,inter,skip cu count per frame for 2 pass
+        for (uint32_t i = 0; i < m_numRows; i++)
+        {
+            m_frame->m_encData->m_frameStats.mvBits    += m_rows[i].rowStats.mvBits;
+            m_frame->m_encData->m_frameStats.coeffBits += m_rows[i].rowStats.coeffBits;
+            m_frame->m_encData->m_frameStats.miscBits  += m_rows[i].rowStats.miscBits;
+            totalI                                     += m_rows[i].rowStats.intra8x8Cnt;
+            totalP                                     += m_rows[i].rowStats.inter8x8Cnt;
+            totalSkip                                  += m_rows[i].rowStats.skip8x8Cnt;
+        }
+        int totalCuCount = totalI + totalP + totalSkip;
+        m_frame->m_encData->m_frameStats.percent8x8Intra = (double)totalI / totalCuCount;
+        m_frame->m_encData->m_frameStats.percent8x8Inter = (double)totalP / totalCuCount;
+        m_frame->m_encData->m_frameStats.percent8x8Skip  = (double)totalSkip / totalCuCount;
+    }
+    for (uint32_t i = 0; i < m_numRows; i++)
+    {
+        m_frame->m_encData->m_frameStats.cntIntraNxN      += m_rows[i].rowStats.cntIntraNxN;
+        m_frame->m_encData->m_frameStats.totalCu          += m_rows[i].rowStats.totalCu;
+        m_frame->m_encData->m_frameStats.totalCtu         += m_rows[i].rowStats.totalCtu;
+        m_frame->m_encData->m_frameStats.lumaDistortion   += m_rows[i].rowStats.lumaDistortion;
+        m_frame->m_encData->m_frameStats.chromaDistortion += m_rows[i].rowStats.chromaDistortion;
+        m_frame->m_encData->m_frameStats.psyEnergy        += m_rows[i].rowStats.psyEnergy;
+        m_frame->m_encData->m_frameStats.resEnergy        += m_rows[i].rowStats.resEnergy;
+        for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        {
+            m_frame->m_encData->m_frameStats.cntSkipCu[depth] += m_rows[i].rowStats.cntSkipCu[depth];
+            m_frame->m_encData->m_frameStats.cntMergeCu[depth] += m_rows[i].rowStats.cntMergeCu[depth];
+            for (int m = 0; m < INTER_MODES; m++)
+                m_frame->m_encData->m_frameStats.cuInterDistribution[depth][m] += m_rows[i].rowStats.cuInterDistribution[depth][m];
+            for (int n = 0; n < INTRA_MODES; n++)
+                m_frame->m_encData->m_frameStats.cuIntraDistribution[depth][n] += m_rows[i].rowStats.cuIntraDistribution[depth][n];
+        }
+    }
+    m_frame->m_encData->m_frameStats.avgLumaDistortion   = (double)(m_frame->m_encData->m_frameStats.lumaDistortion) / m_frame->m_encData->m_frameStats.totalCtu;
+    m_frame->m_encData->m_frameStats.avgChromaDistortion = (double)(m_frame->m_encData->m_frameStats.chromaDistortion) / m_frame->m_encData->m_frameStats.totalCtu;
+    m_frame->m_encData->m_frameStats.avgPsyEnergy        = (double)(m_frame->m_encData->m_frameStats.psyEnergy) / m_frame->m_encData->m_frameStats.totalCtu;
+    m_frame->m_encData->m_frameStats.avgResEnergy        = (double)(m_frame->m_encData->m_frameStats.resEnergy) / m_frame->m_encData->m_frameStats.totalCtu;
+    m_frame->m_encData->m_frameStats.percentIntraNxN     = (double)(m_frame->m_encData->m_frameStats.cntIntraNxN * 100) / m_frame->m_encData->m_frameStats.totalCu;
+    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+    {
+        m_frame->m_encData->m_frameStats.percentSkipCu[depth]  = (double)(m_frame->m_encData->m_frameStats.cntSkipCu[depth] * 100) / m_frame->m_encData->m_frameStats.totalCu;
+        m_frame->m_encData->m_frameStats.percentMergeCu[depth] = (double)(m_frame->m_encData->m_frameStats.cntMergeCu[depth] * 100) / m_frame->m_encData->m_frameStats.totalCu;
+        for (int n = 0; n < INTRA_MODES; n++)
+            m_frame->m_encData->m_frameStats.percentIntraDistribution[depth][n] = (double)(m_frame->m_encData->m_frameStats.cuIntraDistribution[depth][n] * 100) / m_frame->m_encData->m_frameStats.totalCu;
+        uint64_t cuInterRectCnt = 0; // sum of Nx2N, 2NxN counts
+        cuInterRectCnt += m_frame->m_encData->m_frameStats.cuInterDistribution[depth][1] + m_frame->m_encData->m_frameStats.cuInterDistribution[depth][2];
+        m_frame->m_encData->m_frameStats.percentInterDistribution[depth][0] = (double)(m_frame->m_encData->m_frameStats.cuInterDistribution[depth][0] * 100) / m_frame->m_encData->m_frameStats.totalCu;
+        m_frame->m_encData->m_frameStats.percentInterDistribution[depth][1] = (double)(cuInterRectCnt * 100) / m_frame->m_encData->m_frameStats.totalCu;
+        m_frame->m_encData->m_frameStats.percentInterDistribution[depth][2] = (double)(m_frame->m_encData->m_frameStats.cuInterDistribution[depth][3] * 100) / m_frame->m_encData->m_frameStats.totalCu;
+    }
+
+    m_bs.resetBits();
+    m_entropyCoder.load(m_initSliceContext);
+    m_entropyCoder.setBitstream(&m_bs);
+    m_entropyCoder.codeSliceHeader(*slice, *m_frame->m_encData);
+
+    // finish encode of each CTU row, only required when SAO is enabled
+    if (m_param->bEnableSAO)
+        encodeSlice();
+
+    // serialize each row, record final lengths in slice header
+    uint32_t maxStreamSize = m_nalList.serializeSubstreams(m_substreamSizes, numSubstreams, m_outStreams);
+
+    // complete the slice header by writing WPP row-starts
+    m_entropyCoder.setBitstream(&m_bs);
+    if (slice->m_pps->bEntropyCodingSyncEnabled)
+        m_entropyCoder.codeSliceHeaderWPPEntryPoints(*slice, m_substreamSizes, maxStreamSize);
+    m_bs.writeByteAlignment();
+
+    m_nalList.serialize(slice->m_nalUnitType, m_bs);
+
+    if (m_param->decodedPictureHashSEI)
+    {
+        if (m_param->decodedPictureHashSEI == 1)
+        {
+            m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::MD5;
+            for (int i = 0; i < 3; i++)
+                MD5Final(&m_state[i], m_seiReconPictureDigest.m_digest[i]);
+        }
+        else if (m_param->decodedPictureHashSEI == 2)
+        {
+            m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::CRC;
+            for (int i = 0; i < 3; i++)
+                crcFinish(m_crc[i], m_seiReconPictureDigest.m_digest[i]);
+        }
+        else if (m_param->decodedPictureHashSEI == 3)
+        {
+            m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::CHECKSUM;
+            for (int i = 0; i < 3; i++)
+                checksumFinish(m_checksum[i], m_seiReconPictureDigest.m_digest[i]);
+        }
+
+        m_bs.resetBits();
+        m_seiReconPictureDigest.write(m_bs, *slice->m_sps);
+        m_bs.writeByteAlignment();
+
+        m_nalList.serialize(NAL_UNIT_SUFFIX_SEI, m_bs);
+    }
+
+    uint64_t bytes = 0;
+    for (uint32_t i = 0; i < m_nalList.m_numNal; i++)
+    {
+        int type = m_nalList.m_nal[i].type;
+
+        // exclude SEI
+        if (type != NAL_UNIT_PREFIX_SEI && type != NAL_UNIT_SUFFIX_SEI)
+        {
+            bytes += m_nalList.m_nal[i].sizeBytes;
+            // and exclude start code prefix
+            bytes -= (!i || type == NAL_UNIT_SPS || type == NAL_UNIT_PPS) ? 4 : 3;
+        }
+    }
+    m_accessUnitBits = bytes << 3;
+
+    m_endCompressTime = x265_mdate();
+
+    /* rateControlEnd may also block for earlier frames to call rateControlUpdateStats */
+    if (m_top->m_rateControl->rateControlEnd(m_frame, m_accessUnitBits, &m_rce) < 0)
+        m_top->m_aborted = true;
+
+    /* Decrement referenced frame reference counts, allow them to be recycled */
+    for (int l = 0; l < numPredDir; l++)
+    {
+        for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)
+        {
+            Frame *refpic = slice->m_refFrameList[l][ref];
+            ATOMIC_DEC(&refpic->m_countRefEncoders);
+        }
+    }
+
+    if (m_nr)
+    {
+        bool nrEnabled = (m_rce.newQp < QP_MAX_SPEC || !m_param->rc.vbvBufferSize) && (m_param->noiseReductionIntra || m_param->noiseReductionInter);
+
+        if (nrEnabled)
+        {
+            /* Accumulate NR statistics from all worker threads */
+            for (int i = 0; i < numTLD; i++)
+            {
+                NoiseReduction* nr = &m_tld[i].analysis.m_quant.m_frameNr[m_jpId];
+                for (int cat = 0; cat < MAX_NUM_TR_CATEGORIES; cat++)
+                {
+                    for (int coeff = 0; coeff < MAX_NUM_TR_COEFFS; coeff++)
+                        m_nr->nrResidualSum[cat][coeff] += nr->nrResidualSum[cat][coeff];
+
+                    m_nr->nrCount[cat] += nr->nrCount[cat];
+                }
+            }
+
+            noiseReductionUpdate();
+
+            /* Copy updated NR coefficients back to all worker threads */
+            for (int i = 0; i < numTLD; i++)
+            {
+                NoiseReduction* nr = &m_tld[i].analysis.m_quant.m_frameNr[m_jpId];
+                memcpy(nr->nrOffsetDenoise, m_nr->nrOffsetDenoise, sizeof(uint16_t)* MAX_NUM_TR_CATEGORIES * MAX_NUM_TR_COEFFS);
+                memset(nr->nrCount, 0, sizeof(uint32_t)* MAX_NUM_TR_CATEGORIES);
+                memset(nr->nrResidualSum, 0, sizeof(uint32_t)* MAX_NUM_TR_CATEGORIES * MAX_NUM_TR_COEFFS);
+            }
+        }
+    }
+
+#if DETAILED_CU_STATS
+    /* Accumulate CU statistics from each worker thread, we could report
+     * per-frame stats here, but currently we do not. */
+    for (int i = 0; i < numTLD; i++)
+        m_cuStats.accumulate(m_tld[i].analysis.m_stats[m_jpId]);
+#endif
+
+    m_endFrameTime = x265_mdate();
+}
+
+void FrameEncoder::encodeSlice()
+{
+    Slice* slice = m_frame->m_encData->m_slice;
+    const uint32_t widthInLCUs = slice->m_sps->numCuInWidth;
+    const uint32_t lastCUAddr = (slice->m_endCUAddr + NUM_4x4_PARTITIONS - 1) / NUM_4x4_PARTITIONS;
+    const uint32_t numSubstreams = m_param->bEnableWavefront ? slice->m_sps->numCuInHeight : 1;
+
+    SAOParam* saoParam = slice->m_sps->bUseSAO ? m_frame->m_encData->m_saoParam : NULL;
+    for (uint32_t cuAddr = 0; cuAddr < lastCUAddr; cuAddr++)
+    {
+        uint32_t col = cuAddr % widthInLCUs;
+        uint32_t lin = cuAddr / widthInLCUs;
+        uint32_t subStrm = lin % numSubstreams;
+        CUData* ctu = m_frame->m_encData->getPicCTU(cuAddr);
+
+        m_entropyCoder.setBitstream(&m_outStreams[subStrm]);
+
+        // Synchronize cabac probabilities with upper-right CTU if it's available and we're at the start of a line.
+        if (m_param->bEnableWavefront && !col && lin)
+        {
+            m_entropyCoder.copyState(m_initSliceContext);
+            m_entropyCoder.loadContexts(m_rows[lin - 1].bufferedEntropy);
+        }
+
+        if (saoParam)
+        {
+            if (saoParam->bSaoFlag[0] || saoParam->bSaoFlag[1])
+            {
+                int mergeLeft = col && saoParam->ctuParam[0][cuAddr].mergeMode == SAO_MERGE_LEFT;
+                int mergeUp = lin && saoParam->ctuParam[0][cuAddr].mergeMode == SAO_MERGE_UP;
+                if (col)
+                    m_entropyCoder.codeSaoMerge(mergeLeft);
+                if (lin && !mergeLeft)
+                    m_entropyCoder.codeSaoMerge(mergeUp);
+                if (!mergeLeft && !mergeUp)
+                {
+                    if (saoParam->bSaoFlag[0])
+                        m_entropyCoder.codeSaoOffset(saoParam->ctuParam[0][cuAddr], 0);
+                    if (saoParam->bSaoFlag[1])
+                    {
+                        m_entropyCoder.codeSaoOffset(saoParam->ctuParam[1][cuAddr], 1);
+                        m_entropyCoder.codeSaoOffset(saoParam->ctuParam[2][cuAddr], 2);
+                    }
+                }
+            }
+            else
+            {
+                for (int i = 0; i < 3; i++)
+                    saoParam->ctuParam[i][cuAddr].reset();
+            }
+        }
+
+        // final coding (bitstream generation) for this CU
+        m_entropyCoder.encodeCTU(*ctu, m_cuGeoms[m_ctuGeomMap[cuAddr]]);
+
+        if (m_param->bEnableWavefront)
+        {
+            if (col == 1)
+                // Store probabilities of second CTU in line into buffer
+                m_rows[lin].bufferedEntropy.loadContexts(m_entropyCoder);
+
+            if (col == widthInLCUs - 1)
+                m_entropyCoder.finishSlice();
+        }
+    }
+    if (!m_param->bEnableWavefront)
+        m_entropyCoder.finishSlice();
+}
+
+void FrameEncoder::processRow(int row, int threadId)
+{
+    int64_t startTime = x265_mdate();
+    if (ATOMIC_INC(&m_activeWorkerCount) == 1 && m_stallStartTime)
+        m_totalNoWorkerTime += x265_mdate() - m_stallStartTime;
+
+    const uint32_t realRow = row >> 1;
+    const uint32_t typeNum = row & 1;
+
+    if (!typeNum)
+        processRowEncoder(realRow, m_tld[threadId]);
+    else
+    {
+        m_frameFilter.processRow(realRow);
+
+        // NOTE: Active next row
+        if (realRow != m_numRows - 1)
+            enqueueRowFilter(realRow + 1);
+    }
+
+    if (ATOMIC_DEC(&m_activeWorkerCount) == 0)
+        m_stallStartTime = x265_mdate();
+
+    m_totalWorkerElapsedTime += x265_mdate() - startTime; // not thread safe, but good enough
+}
+
+// Called by worker threads
+void FrameEncoder::processRowEncoder(int intRow, ThreadLocalData& tld)
+{
+    uint32_t row = (uint32_t)intRow;
+    CTURow& curRow = m_rows[row];
+
+    tld.analysis.m_param = m_param;
+    if (m_param->bEnableWavefront)
+    {
+        ScopedLock self(curRow.lock);
+        if (!curRow.active)
+            /* VBV restart is in progress, exit out */
+            return;
+        if (curRow.busy)
+        {
+            /* On multi-socket Windows servers, we have seen problems with
+             * ATOMIC_CAS which resulted in multiple worker threads processing
+             * the same CU row, which often resulted in bad pointer accesses. We
+             * believe the problem is fixed, but are leaving this check in place
+             * to prevent crashes in case it is not */
+            x265_log(m_param, X265_LOG_WARNING,
+                     "internal error - simultaneous row access detected. Please report HW to x265-devel@videolan.org\n");
+            return;
+        }
+        curRow.busy = true;
+    }
+
+    /* When WPP is enabled, every row has its own row coder instance. Otherwise
+     * they share row 0 */
+    Entropy& rowCoder = m_param->bEnableWavefront ? m_rows[row].rowGoOnCoder : m_rows[0].rowGoOnCoder;
+    FrameData& curEncData = *m_frame->m_encData;
+    Slice *slice = curEncData.m_slice;
+
+    const uint32_t numCols = m_numCols;
+    const uint32_t lineStartCUAddr = row * numCols;
+    bool bIsVbv = m_param->rc.vbvBufferSize > 0 && m_param->rc.vbvMaxBitrate > 0;
+
+    uint32_t maxBlockCols = (m_frame->m_fencPic->m_picWidth + (16 - 1)) / 16;
+    uint32_t maxBlockRows = (m_frame->m_fencPic->m_picHeight + (16 - 1)) / 16;
+    uint32_t noOfBlocks = g_maxCUSize / 16;
+
+    while (curRow.completed < numCols)
+    {
+        ProfileScopeEvent(encodeCTU);
+
+        uint32_t col = curRow.completed;
+        const uint32_t cuAddr = lineStartCUAddr + col;
+        CUData* ctu = curEncData.getPicCTU(cuAddr);
+        ctu->initCTU(*m_frame, cuAddr, slice->m_sliceQp);
+
+        if (bIsVbv)
+        {
+            if (!row)
+            {
+                curEncData.m_rowStat[row].diagQp = curEncData.m_avgQpRc;
+                curEncData.m_rowStat[row].diagQpScale = x265_qp2qScale(curEncData.m_avgQpRc);
+            }
+
+            FrameData::RCStatCU& cuStat = curEncData.m_cuStat[cuAddr];
+            if (row >= col && row && m_vbvResetTriggerRow != intRow)
+                cuStat.baseQp = curEncData.m_cuStat[cuAddr - numCols + 1].baseQp;
+            else
+                cuStat.baseQp = curEncData.m_rowStat[row].diagQp;
+
+            /* TODO: use defines from slicetype.h for lowres block size */
+            uint32_t block_y = (ctu->m_cuPelY >> g_maxLog2CUSize) * noOfBlocks;
+            uint32_t block_x = (ctu->m_cuPelX >> g_maxLog2CUSize) * noOfBlocks;
+            
+            cuStat.vbvCost = 0;
+            cuStat.intraVbvCost = 0;
+            for (uint32_t h = 0; h < noOfBlocks && block_y < maxBlockRows; h++, block_y++)
+            {
+                uint32_t idx = block_x + (block_y * maxBlockCols);
+
+                for (uint32_t w = 0; w < noOfBlocks && (block_x + w) < maxBlockCols; w++, idx++)
+                {
+                    cuStat.vbvCost += m_frame->m_lowres.lowresCostForRc[idx] & LOWRES_COST_MASK;
+                    cuStat.intraVbvCost += m_frame->m_lowres.intraCost[idx];
+                }
+            }
+        }
+        else
+            curEncData.m_cuStat[cuAddr].baseQp = curEncData.m_avgQpRc;
+
+        if (m_param->bEnableWavefront && !col && row)
+        {
+            // Load SBAC coder context from previous row and initialize row state.
+            rowCoder.copyState(m_initSliceContext);
+            rowCoder.loadContexts(m_rows[row - 1].bufferedEntropy);
+        }
+
+        // Does all the CU analysis, returns best top level mode decision
+        Mode& best = tld.analysis.compressCTU(*ctu, *m_frame, m_cuGeoms[m_ctuGeomMap[cuAddr]], rowCoder);
+
+        // take a sample of the current active worker count
+        ATOMIC_ADD(&m_totalActiveWorkerCount, m_activeWorkerCount);
+        ATOMIC_INC(&m_activeWorkerCountSamples);
+
+        /* advance top-level row coder to include the context of this CTU.
+         * if SAO is disabled, rowCoder writes the final CTU bitstream */
+        rowCoder.encodeCTU(*ctu, m_cuGeoms[m_ctuGeomMap[cuAddr]]);
+
+        if (m_param->bEnableWavefront && col == 1)
+            // Save CABAC state for next row
+            curRow.bufferedEntropy.loadContexts(rowCoder);
+
+        // Completed CU processing
+        curRow.completed++;
+
+        FrameStats frameLog;
+        curEncData.m_rowStat[row].sumQpAq += collectCTUStatistics(*ctu, &frameLog);
+
+        // copy no. of intra, inter Cu cnt per row into frame stats for 2 pass
+        if (m_param->rc.bStatWrite)
+        {
+            curRow.rowStats.mvBits    += best.mvBits;
+            curRow.rowStats.coeffBits += best.coeffBits;
+            curRow.rowStats.miscBits  += best.totalBits - (best.mvBits + best.coeffBits);
+
+            for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+            {
+                /* 1 << shift == number of 8x8 blocks at current depth */
+                int shift = 2 * (g_maxCUDepth - depth);
+                int cuSize = g_maxCUSize >> depth;
+
+                if (cuSize == 8)
+                    curRow.rowStats.intra8x8Cnt += (int)(frameLog.cntIntra[depth] + frameLog.cntIntraNxN);
+                else
+                    curRow.rowStats.intra8x8Cnt += (int)(frameLog.cntIntra[depth] << shift);
+
+                curRow.rowStats.inter8x8Cnt += (int)(frameLog.cntInter[depth] << shift);
+                curRow.rowStats.skip8x8Cnt += (int)((frameLog.cntSkipCu[depth] + frameLog.cntMergeCu[depth]) << shift);
+            }
+        }
+        curRow.rowStats.totalCtu++;
+        curRow.rowStats.lumaDistortion   += best.lumaDistortion;
+        curRow.rowStats.chromaDistortion += best.chromaDistortion;
+        curRow.rowStats.psyEnergy        += best.psyEnergy;
+        curRow.rowStats.resEnergy        += best.resEnergy;
+        curRow.rowStats.cntIntraNxN      += frameLog.cntIntraNxN;
+        curRow.rowStats.totalCu          += frameLog.totalCu;
+        for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        {
+            curRow.rowStats.cntSkipCu[depth] += frameLog.cntSkipCu[depth];
+            curRow.rowStats.cntMergeCu[depth] += frameLog.cntMergeCu[depth];
+            for (int m = 0; m < INTER_MODES; m++)
+                curRow.rowStats.cuInterDistribution[depth][m] += frameLog.cuInterDistribution[depth][m];
+            for (int n = 0; n < INTRA_MODES; n++)
+                curRow.rowStats.cuIntraDistribution[depth][n] += frameLog.cuIntraDistribution[depth][n];
+        }
+
+        curEncData.m_cuStat[cuAddr].totalBits = best.totalBits;
+        x265_emms();
+
+        if (bIsVbv)
+        {
+            // Update encoded bits, satdCost, baseQP for each CU
+            curEncData.m_rowStat[row].diagSatd      += curEncData.m_cuStat[cuAddr].vbvCost;
+            curEncData.m_rowStat[row].diagIntraSatd += curEncData.m_cuStat[cuAddr].intraVbvCost;
+            curEncData.m_rowStat[row].encodedBits   += curEncData.m_cuStat[cuAddr].totalBits;
+            curEncData.m_rowStat[row].sumQpRc       += curEncData.m_cuStat[cuAddr].baseQp;
+            curEncData.m_rowStat[row].numEncodedCUs = cuAddr;
+
+            // If current block is at row diagonal checkpoint, call vbv ratecontrol.
+
+            if (row == col && row)
+            {
+                double qpBase = curEncData.m_cuStat[cuAddr].baseQp;
+                int reEncode = m_top->m_rateControl->rowDiagonalVbvRateControl(m_frame, row, &m_rce, qpBase);
+                qpBase = x265_clip3((double)QP_MIN, (double)QP_MAX_MAX, qpBase);
+                curEncData.m_rowStat[row].diagQp = qpBase;
+                curEncData.m_rowStat[row].diagQpScale =  x265_qp2qScale(qpBase);
+
+                if (reEncode < 0)
+                {
+                    x265_log(m_param, X265_LOG_DEBUG, "POC %d row %d - encode restart required for VBV, to %.2f from %.2f\n",
+                             m_frame->m_poc, row, qpBase, curEncData.m_cuStat[cuAddr].baseQp);
+
+                    // prevent the WaveFront::findJob() method from providing new jobs
+                    m_vbvResetTriggerRow = row;
+                    m_bAllRowsStop = true;
+
+                    for (uint32_t r = m_numRows - 1; r >= row; r--)
+                    {
+                        CTURow& stopRow = m_rows[r];
+
+                        if (r != row)
+                        {
+                            /* if row was active (ready to be run) clear active bit and bitmap bit for this row */
+                            stopRow.lock.acquire();
+                            while (stopRow.active)
+                            {
+                                if (dequeueRow(r * 2))
+                                    stopRow.active = false;
+                                else
+                                {
+                                    /* we must release the row lock to allow the thread to exit */
+                                    stopRow.lock.release();
+                                    GIVE_UP_TIME();
+                                    stopRow.lock.acquire();
+                                }
+                            }
+                            stopRow.lock.release();
+
+                            bool bRowBusy = true;
+                            do
+                            {
+                                stopRow.lock.acquire();
+                                bRowBusy = stopRow.busy;
+                                stopRow.lock.release();
+
+                                if (bRowBusy)
+                                {
+                                    GIVE_UP_TIME();
+                                }
+                            }
+                            while (bRowBusy);
+                        }
+
+                        m_outStreams[r].resetBits();
+                        stopRow.completed = 0;
+                        memset(&stopRow.rowStats, 0, sizeof(stopRow.rowStats));
+                        curEncData.m_rowStat[r].numEncodedCUs = 0;
+                        curEncData.m_rowStat[r].encodedBits = 0;
+                        curEncData.m_rowStat[r].diagSatd = 0;
+                        curEncData.m_rowStat[r].diagIntraSatd = 0;
+                        curEncData.m_rowStat[r].sumQpRc = 0;
+                        curEncData.m_rowStat[r].sumQpAq = 0;
+                    }
+
+                    m_bAllRowsStop = false;
+                }
+            }
+        }
+
+        /* SAO parameter estimation using non-deblocked pixels for CTU bottom and right boundary areas */
+        if (m_param->bEnableSAO && m_param->bSaoNonDeblocked)
+            m_frameFilter.m_sao.calcSaoStatsCu_BeforeDblk(m_frame, col, row);
+
+        if (m_param->bEnableWavefront && curRow.completed >= 2 && row < m_numRows - 1 &&
+            (!m_bAllRowsStop || intRow + 1 < m_vbvResetTriggerRow))
+        {
+            /* activate next row */
+            ScopedLock below(m_rows[row + 1].lock);
+            if (m_rows[row + 1].active == false &&
+                m_rows[row + 1].completed + 2 <= curRow.completed)
+            {
+                m_rows[row + 1].active = true;
+                enqueueRowEncoder(row + 1);
+                tryWakeOne(); /* wake up a sleeping thread or set the help wanted flag */
+            }
+        }
+
+        ScopedLock self(curRow.lock);
+        if ((m_bAllRowsStop && intRow > m_vbvResetTriggerRow) ||
+            (row > 0 && curRow.completed < numCols - 1 && m_rows[row - 1].completed < m_rows[row].completed + 2))
+        {
+            curRow.active = false;
+            curRow.busy = false;
+            ATOMIC_INC(&m_countRowBlocks);
+            return;
+        }
+    }
+
+    /** this row of CTUs has been compressed **/
+
+    /* If encoding with ABR, update update bits and complexity in rate control
+     * after a number of rows so the next frame's rateControlStart has more
+     * accurate data for estimation. At the start of the encode we update stats
+     * after half the frame is encoded, but after this initial period we update
+     * after refLagRows (the number of rows reference frames must have completed
+     * before referencees may begin encoding) */
+    uint32_t rowCount = 0;
+    if (m_param->rc.rateControlMode == X265_RC_ABR || bIsVbv)
+    {
+        if ((uint32_t)m_rce.encodeOrder <= 2 * (m_param->fpsNum / m_param->fpsDenom))
+            rowCount = X265_MIN((m_numRows + 1) / 2, m_numRows - 1);
+        else
+            rowCount = X265_MIN(m_refLagRows, m_numRows - 1);
+        if (row == rowCount)
+        {
+            m_rce.rowTotalBits = 0;
+            if (bIsVbv)
+                for (uint32_t i = 0; i < rowCount; i++)
+                    m_rce.rowTotalBits += curEncData.m_rowStat[i].encodedBits;
+            else
+                for (uint32_t cuAddr = 0; cuAddr < rowCount * numCols; cuAddr++)
+                    m_rce.rowTotalBits += curEncData.m_cuStat[cuAddr].totalBits;
+
+            m_top->m_rateControl->rateControlUpdateStats(&m_rce);
+        }
+    }
+
+    /* flush row bitstream (if WPP and no SAO) or flush frame if no WPP and no SAO */
+    if (!m_param->bEnableSAO && (m_param->bEnableWavefront || row == m_numRows - 1))
+        rowCoder.finishSlice();
+
+    if (m_param->bEnableWavefront)
+    {
+        /* trigger row-wise loop filters */
+        if (row >= m_filterRowDelay)
+        {
+            enableRowFilter(row - m_filterRowDelay);
+
+            /* NOTE: Activate filter if first row (row 0) */
+            if (row == m_filterRowDelay)
+                enqueueRowFilter(0);
+            tryWakeOne();
+        }
+        if (row == m_numRows - 1)
+        {
+            for (uint32_t i = m_numRows - m_filterRowDelay; i < m_numRows; i++)
+                enableRowFilter(i);
+            tryWakeOne();
+        }
+    }
+
+    tld.analysis.m_param = NULL;
+    curRow.busy = false;
+
+    if (ATOMIC_INC(&m_completionCount) == 2 * (int)m_numRows)
+        m_completionEvent.trigger();
+}
+
+/* collect statistics about CU coding decisions, return total QP */
+int FrameEncoder::collectCTUStatistics(const CUData& ctu, FrameStats* log)
+{
+    int totQP = 0;
+    if (ctu.m_slice->m_sliceType == I_SLICE)
+    {
+        uint32_t depth = 0;
+        for (uint32_t absPartIdx = 0; absPartIdx < ctu.m_numPartitions; absPartIdx += ctu.m_numPartitions >> (depth * 2))
+        {
+            depth = ctu.m_cuDepth[absPartIdx];
+
+            log->totalCu++;
+            log->cntIntra[depth]++;
+            totQP += ctu.m_qp[absPartIdx] * (ctu.m_numPartitions >> (depth * 2));
+
+            if (ctu.m_predMode[absPartIdx] == MODE_NONE)
+            {
+                log->totalCu--;
+                log->cntIntra[depth]--;
+            }
+            else if (ctu.m_partSize[absPartIdx] != SIZE_2Nx2N)
+            {
+                /* TODO: log intra modes at absPartIdx +0 to +3 */
+                X265_CHECK(ctu.m_log2CUSize[absPartIdx] == 3 && ctu.m_slice->m_sps->quadtreeTULog2MinSize < 3, "Intra NxN found at improbable depth\n");
+                log->cntIntraNxN++;
+                log->cntIntra[depth]--;
+            }
+            else if (ctu.m_lumaIntraDir[absPartIdx] > 1)
+                log->cuIntraDistribution[depth][ANGULAR_MODE_ID]++;
+            else
+                log->cuIntraDistribution[depth][ctu.m_lumaIntraDir[absPartIdx]]++;
+        }
+    }
+    else
+    {
+        uint32_t depth = 0;
+        for (uint32_t absPartIdx = 0; absPartIdx < ctu.m_numPartitions; absPartIdx += ctu.m_numPartitions >> (depth * 2))
+        {
+            depth = ctu.m_cuDepth[absPartIdx];
+
+            log->totalCu++;
+            totQP += ctu.m_qp[absPartIdx] * (ctu.m_numPartitions >> (depth * 2));
+
+            if (ctu.m_predMode[absPartIdx] == MODE_NONE)
+                log->totalCu--;
+            else if (ctu.isSkipped(absPartIdx))
+            {
+                if (ctu.m_mergeFlag[0])
+                    log->cntMergeCu[depth]++;
+                else
+                    log->cntSkipCu[depth]++;
+            }
+            else if (ctu.isInter(absPartIdx))
+            {
+                log->cntInter[depth]++;
+
+                if (ctu.m_partSize[absPartIdx] < AMP_ID)
+                    log->cuInterDistribution[depth][ctu.m_partSize[absPartIdx]]++;
+                else
+                    log->cuInterDistribution[depth][AMP_ID]++;
+            }
+            else if (ctu.isIntra(absPartIdx))
+            {
+                log->cntIntra[depth]++;
+
+                if (ctu.m_partSize[absPartIdx] != SIZE_2Nx2N)
+                {
+                    X265_CHECK(ctu.m_log2CUSize[absPartIdx] == 3 && ctu.m_slice->m_sps->quadtreeTULog2MinSize < 3, "Intra NxN found at improbable depth\n");
+                    log->cntIntraNxN++;
+                    log->cntIntra[depth]--;
+                    /* TODO: log intra modes at absPartIdx +0 to +3 */
+                }
+                else if (ctu.m_lumaIntraDir[absPartIdx] > 1)
+                    log->cuIntraDistribution[depth][ANGULAR_MODE_ID]++;
+                else
+                    log->cuIntraDistribution[depth][ctu.m_lumaIntraDir[absPartIdx]]++;
+            }
+        }
+    }
+
+    return totQP;
+}
+
+/* DCT-domain noise reduction / adaptive deadzone from libavcodec */
+void FrameEncoder::noiseReductionUpdate()
+{
+    static const uint32_t maxBlocksPerTrSize[4] = {1 << 18, 1 << 16, 1 << 14, 1 << 12};
+
+    for (int cat = 0; cat < MAX_NUM_TR_CATEGORIES; cat++)
+    {
+        int trSize = cat & 3;
+        int coefCount = 1 << ((trSize + 2) * 2);
+
+        if (m_nr->nrCount[cat] > maxBlocksPerTrSize[trSize])
+        {
+            for (int i = 0; i < coefCount; i++)
+                m_nr->nrResidualSum[cat][i] >>= 1;
+            m_nr->nrCount[cat] >>= 1;
+        }
+
+        int nrStrength = cat < 8 ? m_param->noiseReductionIntra : m_param->noiseReductionInter;
+        uint64_t scaledCount = (uint64_t)nrStrength * m_nr->nrCount[cat];
+
+        for (int i = 0; i < coefCount; i++)
+        {
+            uint64_t value = scaledCount + m_nr->nrResidualSum[cat][i] / 2;
+            uint64_t denom = m_nr->nrResidualSum[cat][i] + 1;
+            m_nr->nrOffsetDenoise[cat][i] = (uint16_t)(value / denom);
+        }
+
+        // Don't denoise DC coefficients
+        m_nr->nrOffsetDenoise[cat][0] = 0;
+    }
+}
+
+Frame *FrameEncoder::getEncodedPicture(NALList& output)
+{
+    if (m_frame)
+    {
+        /* block here until worker thread completes */
+        m_done.wait();
+
+        Frame *ret = m_frame;
+        m_frame = NULL;
+        output.takeContents(m_nalList);
+        m_prevOutputTime = x265_mdate();
+        return ret;
+    }
+
+    return NULL;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/frameencoder.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,234 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Shin Yee <shinyee@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_FRAMEENCODER_H
+#define X265_FRAMEENCODER_H
+
+#include "common.h"
+#include "wavefront.h"
+#include "bitstream.h"
+#include "frame.h"
+#include "picyuv.h"
+#include "md5.h"
+
+#include "analysis.h"
+#include "sao.h"
+
+#include "entropy.h"
+#include "framefilter.h"
+#include "ratecontrol.h"
+#include "reference.h"
+#include "nal.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+class ThreadPool;
+class Encoder;
+
+#define ANGULAR_MODE_ID 2
+#define AMP_ID 3
+
+struct StatisticLog
+{
+    uint64_t cntInter[4];
+    uint64_t cntIntra[4];
+    uint64_t cuInterDistribution[4][INTER_MODES];
+    uint64_t cuIntraDistribution[4][INTRA_MODES];
+    uint64_t cntIntraNxN;
+    uint64_t cntSkipCu[4];
+    uint64_t cntTotalCu[4];
+    uint64_t totalCu;
+
+    StatisticLog()
+    {
+        memset(this, 0, sizeof(StatisticLog));
+    }
+};
+
+/* manages the state of encoding one row of CTU blocks.  When
+ * WPP is active, several rows will be simultaneously encoded. */
+struct CTURow
+{
+    Entropy           bufferedEntropy;  /* store CTU2 context for next row CTU0 */
+    Entropy           rowGoOnCoder;     /* store context between CTUs, code bitstream if !SAO */
+
+    FrameStats        rowStats;
+
+    /* Threading variables */
+
+    /* This lock must be acquired when reading or writing m_active or m_busy */
+    Lock              lock;
+
+    /* row is ready to run, has no neighbor dependencies. The row may have
+     * external dependencies (reference frame pixels) that prevent it from being
+     * processed, so it may stay with m_active=true for some time before it is
+     * encoded by a worker thread. */
+    volatile bool     active;
+
+    /* row is being processed by a worker thread.  This flag is only true when a
+     * worker thread is within the context of FrameEncoder::processRow(). This
+     * flag is used to detect multiple possible wavefront problems. */
+    volatile bool     busy;
+
+    /* count of completed CUs in this row */
+    volatile uint32_t completed;
+
+    /* called at the start of each frame to initialize state */
+    void init(Entropy& initContext)
+    {
+        active = false;
+        busy = false;
+        completed = 0;
+        memset(&rowStats, 0, sizeof(rowStats));
+        rowGoOnCoder.load(initContext);
+    }
+};
+
+// Manages the wave-front processing of a single encoding frame
+class FrameEncoder : public WaveFront, public Thread
+{
+public:
+
+    FrameEncoder();
+
+    virtual ~FrameEncoder() {}
+
+    virtual bool init(Encoder *top, int numRows, int numCols);
+
+    void destroy();
+
+    /* triggers encode of a new frame by the worker thread */
+    bool startCompressFrame(Frame* curFrame);
+
+    /* blocks until worker thread is done, returns access unit */
+    Frame *getEncodedPicture(NALList& list);
+
+    Event                    m_enable;
+    Event                    m_done;
+    Event                    m_completionEvent;
+    int                      m_localTldIdx;
+
+    volatile bool            m_threadActive;
+    volatile bool            m_bAllRowsStop;
+    volatile int             m_completionCount;
+    volatile int             m_vbvResetTriggerRow;
+
+    uint32_t                 m_numRows;
+    uint32_t                 m_numCols;
+    uint32_t                 m_filterRowDelay;
+    uint32_t                 m_filterRowDelayCus;
+    uint32_t                 m_refLagRows;
+
+    CTURow*                  m_rows;
+    RateControlEntry         m_rce;
+    SEIDecodedPictureHash    m_seiReconPictureDigest;
+
+    uint64_t                 m_SSDY;
+    uint64_t                 m_SSDU;
+    uint64_t                 m_SSDV;
+    double                   m_ssim;
+    uint64_t                 m_accessUnitBits;
+    uint32_t                 m_ssimCnt;
+    MD5Context               m_state[3];
+    uint32_t                 m_crc[3];
+    uint32_t                 m_checksum[3];
+
+    volatile int             m_activeWorkerCount;        // count of workers currently encoding or filtering CTUs
+    volatile int             m_totalActiveWorkerCount;   // sum of m_activeWorkerCount sampled at end of each CTU
+    volatile int             m_activeWorkerCountSamples; // count of times m_activeWorkerCount was sampled (think vbv restarts)
+    volatile int             m_countRowBlocks;           // count of workers forced to abandon a row because of top dependency
+    int64_t                  m_startCompressTime;        // timestamp when frame encoder is given a frame
+    int64_t                  m_row0WaitTime;             // timestamp when row 0 is allowed to start
+    int64_t                  m_allRowsAvailableTime;     // timestamp when all reference dependencies are resolved
+    int64_t                  m_endCompressTime;          // timestamp after all CTUs are compressed
+    int64_t                  m_endFrameTime;             // timestamp after RCEnd, NR updates, etc
+    int64_t                  m_stallStartTime;           // timestamp when worker count becomes 0
+    int64_t                  m_prevOutputTime;           // timestamp when prev frame was retrieved by API thread
+    int64_t                  m_slicetypeWaitTime;        // total elapsed time waiting for decided frame
+    int64_t                  m_totalWorkerElapsedTime;   // total elapsed time spent by worker threads processing CTUs
+    int64_t                  m_totalNoWorkerTime;        // total elapsed time without any active worker threads
+#if DETAILED_CU_STATS
+    CUStats                  m_cuStats;
+#endif
+
+    Encoder*                 m_top;
+    x265_param*              m_param;
+    Frame*                   m_frame;
+    NoiseReduction*          m_nr;
+    ThreadLocalData*         m_tld; /* for --no-wpp */
+    Bitstream*               m_outStreams;
+    uint32_t*                m_substreamSizes;
+
+    CUGeom*                  m_cuGeoms;
+    uint32_t*                m_ctuGeomMap;
+
+    Bitstream                m_bs;
+    MotionReference          m_mref[2][MAX_NUM_REF + 1];
+    Entropy                  m_entropyCoder;
+    Entropy                  m_initSliceContext;
+    FrameFilter              m_frameFilter;
+    NALList                  m_nalList;
+
+    class WeightAnalysis : public BondedTaskGroup
+    {
+    public:
+
+        FrameEncoder& master;
+
+        WeightAnalysis(FrameEncoder& fe) : master(fe) {}
+
+        void processTasks(int workerThreadId);
+
+    protected:
+
+        WeightAnalysis operator=(const WeightAnalysis&);
+    };
+
+protected:
+
+    bool initializeGeoms();
+
+    /* analyze / compress frame, can be run in parallel within reference constraints */
+    void compressFrame();
+
+    /* called by compressFrame to generate final per-row bitstreams */
+    void encodeSlice();
+
+    void threadMain();
+    int  collectCTUStatistics(const CUData& ctu, FrameStats* frameLog);
+    void noiseReductionUpdate();
+
+    /* Called by WaveFront::findJob() */
+    virtual void processRow(int row, int threadId);
+    virtual void processRowEncoder(int row, ThreadLocalData& tld);
+
+    void enqueueRowEncoder(int row) { WaveFront::enqueueRow(row * 2 + 0); }
+    void enqueueRowFilter(int row)  { WaveFront::enqueueRow(row * 2 + 1); }
+    void enableRowEncoder(int row)  { WaveFront::enableRow(row * 2 + 0); }
+    void enableRowFilter(int row)   { WaveFront::enableRow(row * 2 + 1); }
+};
+}
+
+#endif // ifndef X265_FRAMEENCODER_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/framefilter.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,494 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Chung Shin Yee <shinyee@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "encoder.h"
+#include "framefilter.h"
+#include "frameencoder.h"
+#include "wavefront.h"
+
+using namespace X265_NS;
+
+static uint64_t computeSSD(pixel *fenc, pixel *rec, intptr_t stride, uint32_t width, uint32_t height);
+static float calculateSSIM(pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, uint32_t width, uint32_t height, void *buf, uint32_t& cnt);
+
+FrameFilter::FrameFilter()
+    : m_param(NULL)
+    , m_frame(NULL)
+    , m_frameEncoder(NULL)
+    , m_ssimBuf(NULL)
+{
+}
+
+void FrameFilter::destroy()
+{
+    if (m_param->bEnableSAO)
+        m_sao.destroy();
+
+    X265_FREE(m_ssimBuf);
+}
+
+void FrameFilter::init(Encoder *top, FrameEncoder *frame, int numRows)
+{
+    m_param = top->m_param;
+    m_frameEncoder = frame;
+    m_numRows = numRows;
+    m_hChromaShift = CHROMA_H_SHIFT(m_param->internalCsp);
+    m_vChromaShift = CHROMA_V_SHIFT(m_param->internalCsp);
+    m_pad[0] = top->m_sps.conformanceWindow.rightOffset;
+    m_pad[1] = top->m_sps.conformanceWindow.bottomOffset;
+    m_saoRowDelay = m_param->bEnableLoopFilter ? 1 : 0;
+    m_lastHeight = m_param->sourceHeight % g_maxCUSize ? m_param->sourceHeight % g_maxCUSize : g_maxCUSize;
+
+    if (m_param->bEnableSAO)
+        if (!m_sao.create(m_param))
+            m_param->bEnableSAO = 0;
+
+    if (m_param->bEnableSsim)
+        m_ssimBuf = X265_MALLOC(int, 8 * (m_param->sourceWidth / 4 + 3));
+}
+
+void FrameFilter::start(Frame *frame, Entropy& initState, int qp)
+{
+    m_frame = frame;
+
+    if (m_param->bEnableSAO)
+        m_sao.startSlice(frame, initState, qp);
+}
+
+void FrameFilter::processRow(int row)
+{
+    ProfileScopeEvent(filterCTURow);
+
+#if DETAILED_CU_STATS
+    ScopedElapsedTime filterPerfScope(m_frameEncoder->m_cuStats.loopFilterElapsedTime);
+    m_frameEncoder->m_cuStats.countLoopFilter++;
+#endif
+
+    if (!m_param->bEnableLoopFilter && !m_param->bEnableSAO)
+    {
+        processRowPost(row);
+        return;
+    }
+    FrameData& encData = *m_frame->m_encData;
+    const uint32_t numCols = encData.m_slice->m_sps->numCuInWidth;
+    const uint32_t lineStartCUAddr = row * numCols;
+
+    if (m_param->bEnableLoopFilter)
+    {
+        const CUGeom* cuGeoms = m_frameEncoder->m_cuGeoms;
+        const uint32_t* ctuGeomMap = m_frameEncoder->m_ctuGeomMap;
+
+        for (uint32_t col = 0; col < numCols; col++)
+        {
+            uint32_t cuAddr = lineStartCUAddr + col;
+            const CUData* ctu = encData.getPicCTU(cuAddr);
+            deblockCTU(ctu, cuGeoms[ctuGeomMap[cuAddr]], Deblock::EDGE_VER);
+
+            if (col > 0)
+            {
+                const CUData* ctuPrev = encData.getPicCTU(cuAddr - 1);
+                deblockCTU(ctuPrev, cuGeoms[ctuGeomMap[cuAddr - 1]], Deblock::EDGE_HOR);
+            }
+        }
+
+        const CUData* ctuPrev = encData.getPicCTU(lineStartCUAddr + numCols - 1);
+        deblockCTU(ctuPrev, cuGeoms[ctuGeomMap[lineStartCUAddr + numCols - 1]], Deblock::EDGE_HOR);
+    }
+
+    // SAO
+    SAOParam* saoParam = encData.m_saoParam;
+    if (m_param->bEnableSAO)
+    {
+        m_sao.m_entropyCoder.load(m_frameEncoder->m_initSliceContext);
+        m_sao.m_rdContexts.next.load(m_frameEncoder->m_initSliceContext);
+        m_sao.m_rdContexts.cur.load(m_frameEncoder->m_initSliceContext);
+
+        m_sao.rdoSaoUnitRow(saoParam, row);
+
+        // NOTE: Delay a row because SAO decide need top row pixels at next row, is it HM's bug?
+        if (row >= m_saoRowDelay)
+            processSao(row - m_saoRowDelay);
+    }
+
+    // this row of CTUs has been encoded
+
+    if (row > 0)
+        processRowPost(row - 1);
+
+    if (row == m_numRows - 1)
+    {
+        if (m_param->bEnableSAO)
+        {
+            m_sao.rdoSaoUnitRowEnd(saoParam, encData.m_slice->m_sps->numCUsInFrame);
+
+            for (int i = m_numRows - m_saoRowDelay; i < m_numRows; i++)
+                processSao(i);
+        }
+
+        processRowPost(row);
+    }
+}
+
+uint32_t FrameFilter::getCUHeight(int rowNum) const
+{
+    return rowNum == m_numRows - 1 ? m_lastHeight : g_maxCUSize;
+}
+
+void FrameFilter::processRowPost(int row)
+{
+    PicYuv *reconPic = m_frame->m_reconPic;
+    const uint32_t numCols = m_frame->m_encData->m_slice->m_sps->numCuInWidth;
+    const uint32_t lineStartCUAddr = row * numCols;
+    const int realH = getCUHeight(row);
+
+    // Border extend Left and Right
+    primitives.extendRowBorder(reconPic->getLumaAddr(lineStartCUAddr), reconPic->m_stride, reconPic->m_picWidth, realH, reconPic->m_lumaMarginX);
+    if (reconPic->m_picCsp != X265_CSP_I400) {
+        primitives.extendRowBorder(reconPic->getCbAddr(lineStartCUAddr), reconPic->m_strideC, reconPic->m_picWidth >> m_hChromaShift, realH >> m_vChromaShift, reconPic->m_chromaMarginX);
+        primitives.extendRowBorder(reconPic->getCrAddr(lineStartCUAddr), reconPic->m_strideC, reconPic->m_picWidth >> m_hChromaShift, realH >> m_vChromaShift, reconPic->m_chromaMarginX);
+    }
+
+    // Border extend Top
+    if (!row)
+    {
+        const intptr_t stride = reconPic->m_stride;
+        pixel *pixY = reconPic->getLumaAddr(lineStartCUAddr) - reconPic->m_lumaMarginX;
+
+        for (uint32_t y = 0; y < reconPic->m_lumaMarginY; y++)
+            memcpy(pixY - (y + 1) * stride, pixY, stride * sizeof(pixel));
+
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            const intptr_t strideC = reconPic->m_strideC;
+            pixel *pixU = reconPic->getCbAddr(lineStartCUAddr) - reconPic->m_chromaMarginX;
+            pixel *pixV = reconPic->getCrAddr(lineStartCUAddr) - reconPic->m_chromaMarginX;
+            for (uint32_t y = 0; y < reconPic->m_chromaMarginY; y++)
+            {
+                memcpy(pixU - (y + 1) * strideC, pixU, strideC * sizeof(pixel));
+                memcpy(pixV - (y + 1) * strideC, pixV, strideC * sizeof(pixel));
+            }
+        }
+    }
+
+    // Border extend Bottom
+    if (row == m_numRows - 1)
+    {
+        const intptr_t stride = reconPic->m_stride;
+        pixel *pixY = reconPic->getLumaAddr(lineStartCUAddr) - reconPic->m_lumaMarginX + (realH - 1) * stride;
+        for (uint32_t y = 0; y < reconPic->m_lumaMarginY; y++)
+            memcpy(pixY + (y + 1) * stride, pixY, stride * sizeof(pixel));
+
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            const intptr_t strideC = reconPic->m_strideC;
+            pixel *pixU = reconPic->getCbAddr(lineStartCUAddr) - reconPic->m_chromaMarginX + ((realH >> m_vChromaShift) - 1) * strideC;
+            pixel *pixV = reconPic->getCrAddr(lineStartCUAddr) - reconPic->m_chromaMarginX + ((realH >> m_vChromaShift) - 1) * strideC;
+            for (uint32_t y = 0; y < reconPic->m_chromaMarginY; y++)
+            {
+                memcpy(pixU + (y + 1) * strideC, pixU, strideC * sizeof(pixel));
+                memcpy(pixV + (y + 1) * strideC, pixV, strideC * sizeof(pixel));
+            }
+        }
+    }
+
+    // Notify other FrameEncoders that this row of reconstructed pixels is available
+    m_frame->m_reconRowCount.incr();
+
+    uint32_t cuAddr = lineStartCUAddr;
+    if (m_param->bEnablePsnr)
+    {
+        PicYuv* fencPic = m_frame->m_fencPic;
+
+        intptr_t stride = reconPic->m_stride;
+        uint32_t width  = reconPic->m_picWidth - m_pad[0];
+        uint32_t height = getCUHeight(row);
+
+        uint64_t ssdY = computeSSD(fencPic->getLumaAddr(cuAddr), reconPic->getLumaAddr(cuAddr), stride, width, height);
+        m_frameEncoder->m_SSDY += ssdY;
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            height >>= m_vChromaShift;
+            width  >>= m_hChromaShift;
+            stride = reconPic->m_strideC;
+            
+            uint64_t ssdU = computeSSD(fencPic->getCbAddr(cuAddr), reconPic->getCbAddr(cuAddr), stride, width, height);
+            uint64_t ssdV = computeSSD(fencPic->getCrAddr(cuAddr), reconPic->getCrAddr(cuAddr), stride, width, height);
+
+            m_frameEncoder->m_SSDU += ssdU;
+            m_frameEncoder->m_SSDV += ssdV;
+        }
+    }
+    if (m_param->bEnableSsim && m_ssimBuf)
+    {
+        pixel *rec = reconPic->m_picOrg[0];
+        pixel *fenc = m_frame->m_fencPic->m_picOrg[0];
+        intptr_t stride1 = reconPic->m_stride;
+        intptr_t stride2 = m_frame->m_fencPic->m_stride;
+        uint32_t bEnd = ((row + 1) == (this->m_numRows - 1));
+        uint32_t bStart = (row == 0);
+        uint32_t minPixY = row * g_maxCUSize - 4 * !bStart;
+        uint32_t maxPixY = (row + 1) * g_maxCUSize - 4 * !bEnd;
+        uint32_t ssim_cnt;
+        x265_emms();
+
+        /* SSIM is done for each row in blocks of 4x4 . The First blocks are offset by 2 pixels to the right
+        * to avoid alignment of ssim blocks with DCT blocks. */
+        minPixY += bStart ? 2 : -6;
+        m_frameEncoder->m_ssim += calculateSSIM(rec + 2 + minPixY * stride1, stride1, fenc + 2 + minPixY * stride2, stride2,
+                                                m_param->sourceWidth - 2, maxPixY - minPixY, m_ssimBuf, ssim_cnt);
+        m_frameEncoder->m_ssimCnt += ssim_cnt;
+    }
+    if (m_param->decodedPictureHashSEI == 1)
+    {
+        uint32_t height = getCUHeight(row);
+        uint32_t width = reconPic->m_picWidth;
+        intptr_t stride = reconPic->m_stride;
+
+        if (!row)
+        {
+            for (int i = 0; i < 3; i++)
+                MD5Init(&m_frameEncoder->m_state[i]);
+        }
+
+        updateMD5Plane(m_frameEncoder->m_state[0], reconPic->getLumaAddr(cuAddr), width, height, stride);
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            width  >>= m_hChromaShift;
+            height >>= m_vChromaShift;
+            stride = reconPic->m_strideC;
+            
+            updateMD5Plane(m_frameEncoder->m_state[1], reconPic->getCbAddr(cuAddr), width, height, stride);
+            updateMD5Plane(m_frameEncoder->m_state[2], reconPic->getCrAddr(cuAddr), width, height, stride);
+        }
+    }
+    else if (m_param->decodedPictureHashSEI == 2)
+    {
+        uint32_t height = getCUHeight(row);
+        uint32_t width = reconPic->m_picWidth;
+        intptr_t stride = reconPic->m_stride;
+        if (!row)
+            m_frameEncoder->m_crc[0] = m_frameEncoder->m_crc[1] = m_frameEncoder->m_crc[2] = 0xffff;
+        updateCRC(reconPic->getLumaAddr(cuAddr), m_frameEncoder->m_crc[0], height, width, stride);
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            width  >>= m_hChromaShift;
+            height >>= m_vChromaShift;
+            stride = reconPic->m_strideC;
+            
+            updateCRC(reconPic->getCbAddr(cuAddr), m_frameEncoder->m_crc[1], height, width, stride);
+            updateCRC(reconPic->getCrAddr(cuAddr), m_frameEncoder->m_crc[2], height, width, stride);
+        }
+    }
+    else if (m_param->decodedPictureHashSEI == 3)
+    {
+        uint32_t width = reconPic->m_picWidth;
+        uint32_t height = getCUHeight(row);
+        intptr_t stride = reconPic->m_stride;
+        uint32_t cuHeight = g_maxCUSize;
+        if (!row)
+            m_frameEncoder->m_checksum[0] = m_frameEncoder->m_checksum[1] = m_frameEncoder->m_checksum[2] = 0;
+        updateChecksum(reconPic->m_picOrg[0], m_frameEncoder->m_checksum[0], height, width, stride, row, cuHeight);
+        if (reconPic->m_picCsp != X265_CSP_I400) {
+            width  >>= m_hChromaShift;
+            height >>= m_vChromaShift;
+            stride = reconPic->m_strideC;
+            cuHeight >>= m_vChromaShift;
+            
+            updateChecksum(reconPic->m_picOrg[1], m_frameEncoder->m_checksum[1], height, width, stride, row, cuHeight);
+            updateChecksum(reconPic->m_picOrg[2], m_frameEncoder->m_checksum[2], height, width, stride, row, cuHeight);
+        }
+    }
+
+    if (ATOMIC_INC(&m_frameEncoder->m_completionCount) == 2 * (int)m_frameEncoder->m_numRows)
+        m_frameEncoder->m_completionEvent.trigger();
+}
+
+static uint64_t computeSSD(pixel *fenc, pixel *rec, intptr_t stride, uint32_t width, uint32_t height)
+{
+    uint64_t ssd = 0;
+
+    if ((width | height) & 3)
+    {
+        /* Slow Path */
+        for (uint32_t y = 0; y < height; y++)
+        {
+            for (uint32_t x = 0; x < width; x++)
+            {
+                int diff = (int)(fenc[x] - rec[x]);
+                ssd += diff * diff;
+            }
+
+            fenc += stride;
+            rec += stride;
+        }
+
+        return ssd;
+    }
+
+    uint32_t y = 0;
+
+    /* Consume rows in ever narrower chunks of height */
+    for (int size = BLOCK_64x64; size >= BLOCK_4x4 && y < height; size--)
+    {
+        uint32_t rowHeight = 1 << (size + 2);
+
+        for (; y + rowHeight <= height; y += rowHeight)
+        {
+            uint32_t y1, x = 0;
+
+            /* Consume each row using the largest square blocks possible */
+            if (size == BLOCK_64x64 && !(stride & 31))
+                for (; x + 64 <= width; x += 64)
+                    ssd += primitives.cu[BLOCK_64x64].sse_pp(fenc + x, stride, rec + x, stride);
+
+            if (size >= BLOCK_32x32 && !(stride & 15))
+                for (; x + 32 <= width; x += 32)
+                    for (y1 = 0; y1 + 32 <= rowHeight; y1 += 32)
+                        ssd += primitives.cu[BLOCK_32x32].sse_pp(fenc + y1 * stride + x, stride, rec + y1 * stride + x, stride);
+
+            if (size >= BLOCK_16x16)
+                for (; x + 16 <= width; x += 16)
+                    for (y1 = 0; y1 + 16 <= rowHeight; y1 += 16)
+                        ssd += primitives.cu[BLOCK_16x16].sse_pp(fenc + y1 * stride + x, stride, rec + y1 * stride + x, stride);
+
+            if (size >= BLOCK_8x8)
+                for (; x + 8 <= width; x += 8)
+                    for (y1 = 0; y1 + 8 <= rowHeight; y1 += 8)
+                        ssd += primitives.cu[BLOCK_8x8].sse_pp(fenc + y1 * stride + x, stride, rec + y1 * stride + x, stride);
+
+            for (; x + 4 <= width; x += 4)
+                for (y1 = 0; y1 + 4 <= rowHeight; y1 += 4)
+                    ssd += primitives.cu[BLOCK_4x4].sse_pp(fenc + y1 * stride + x, stride, rec + y1 * stride + x, stride);
+
+            fenc += stride * rowHeight;
+            rec += stride * rowHeight;
+        }
+    }
+
+    return ssd;
+}
+
+/* Function to calculate SSIM for each row */
+static float calculateSSIM(pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, uint32_t width, uint32_t height, void *buf, uint32_t& cnt)
+{
+    uint32_t z = 0;
+    float ssim = 0.0;
+
+    int(*sum0)[4] = (int(*)[4])buf;
+    int(*sum1)[4] = sum0 + (width >> 2) + 3;
+    width >>= 2;
+    height >>= 2;
+
+    for (uint32_t y = 1; y < height; y++)
+    {
+        for (; z <= y; z++)
+        {
+            std::swap(sum0, sum1);
+            for (uint32_t x = 0; x < width; x += 2)
+                primitives.ssim_4x4x2_core(&pix1[(4 * x + (z * stride1))], stride1, &pix2[(4 * x + (z * stride2))], stride2, &sum0[x]);
+        }
+
+        for (uint32_t x = 0; x < width - 1; x += 4)
+            ssim += primitives.ssim_end_4(sum0 + x, sum1 + x, X265_MIN(4, width - x - 1));
+    }
+
+    cnt = (height - 1) * (width - 1);
+    return ssim;
+}
+
+/* restore original YUV samples to recon after SAO (if lossless) */
+static void restoreOrigLosslessYuv(const CUData* cu, Frame& frame, uint32_t absPartIdx)
+{
+    int size = cu->m_log2CUSize[absPartIdx] - 2;
+    uint32_t cuAddr = cu->m_cuAddr;
+
+    PicYuv* reconPic = frame.m_reconPic;
+    PicYuv* fencPic  = frame.m_fencPic;
+
+    pixel* dst = reconPic->getLumaAddr(cuAddr, absPartIdx);
+    pixel* src = fencPic->getLumaAddr(cuAddr, absPartIdx);
+
+    primitives.cu[size].copy_pp(dst, reconPic->m_stride, src, fencPic->m_stride);
+   
+    int csp = fencPic->m_picCsp;
+    if (csp != X265_CSP_I400) {
+        pixel* dstCb = reconPic->getCbAddr(cuAddr, absPartIdx);
+        pixel* srcCb = fencPic->getCbAddr(cuAddr, absPartIdx);
+
+        pixel* dstCr = reconPic->getCrAddr(cuAddr, absPartIdx);
+        pixel* srcCr = fencPic->getCrAddr(cuAddr, absPartIdx);
+
+        primitives.chroma[csp].cu[size].copy_pp(dstCb, reconPic->m_strideC, srcCb, fencPic->m_strideC);
+        primitives.chroma[csp].cu[size].copy_pp(dstCr, reconPic->m_strideC, srcCr, fencPic->m_strideC);
+    }
+}
+
+/* Original YUV restoration for CU in lossless coding */
+static void origCUSampleRestoration(const CUData* cu, const CUGeom& cuGeom, Frame& frame)
+{
+    uint32_t absPartIdx = cuGeom.absPartIdx;
+    if (cu->m_cuDepth[absPartIdx] > cuGeom.depth)
+    {
+        for (int subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+        {
+            const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+            if (childGeom.flags & CUGeom::PRESENT)
+                origCUSampleRestoration(cu, childGeom, frame);
+        }
+        return;
+    }
+
+    // restore original YUV samples
+    if (cu->m_tqBypass[absPartIdx])
+        restoreOrigLosslessYuv(cu, frame, absPartIdx);
+}
+
+void FrameFilter::processSao(int row)
+{
+    FrameData& encData = *m_frame->m_encData;
+    SAOParam* saoParam = encData.m_saoParam;
+
+    if (saoParam->bSaoFlag[0])
+        m_sao.processSaoUnitRow(saoParam->ctuParam[0], row, 0);
+
+    if (saoParam->bSaoFlag[1])
+    {
+        m_sao.processSaoUnitRow(saoParam->ctuParam[1], row, 1);
+        m_sao.processSaoUnitRow(saoParam->ctuParam[2], row, 2);
+    }
+
+    if (encData.m_slice->m_pps->bTransquantBypassEnabled)
+    {
+        uint32_t numCols = encData.m_slice->m_sps->numCuInWidth;
+        uint32_t lineStartCUAddr = row * numCols;
+
+        const CUGeom* cuGeoms = m_frameEncoder->m_cuGeoms;
+        const uint32_t* ctuGeomMap = m_frameEncoder->m_ctuGeomMap;
+
+        for (uint32_t col = 0; col < numCols; col++)
+        {
+            uint32_t cuAddr = lineStartCUAddr + col;
+            const CUData* ctu = encData.getPicCTU(cuAddr);
+            origCUSampleRestoration(ctu, cuGeoms[ctuGeomMap[cuAddr]], *m_frame);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/framefilter.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,74 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Chung Shin Yee <shinyee@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_FRAMEFILTER_H
+#define X265_FRAMEFILTER_H
+
+#include "common.h"
+#include "frame.h"
+#include "deblock.h"
+#include "sao.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+class Encoder;
+class Entropy;
+class FrameEncoder;
+struct ThreadLocalData;
+
+// Manages the processing of a single frame loopfilter
+class FrameFilter : public Deblock
+{
+public:
+
+    x265_param*   m_param;
+    Frame*        m_frame;
+    FrameEncoder* m_frameEncoder;
+    int           m_hChromaShift;
+    int           m_vChromaShift;
+    int           m_pad[2];
+
+    SAO           m_sao;
+    int           m_numRows;
+    int           m_saoRowDelay;
+    int           m_lastHeight;
+    
+    void*         m_ssimBuf; /* Temp storage for ssim computation */
+
+    FrameFilter();
+
+    void init(Encoder *top, FrameEncoder *frame, int numRows);
+    void destroy();
+
+    void start(Frame *pic, Entropy& initState, int qp);
+
+    void processRow(int row);
+    void processRowPost(int row);
+    void processSao(int row);
+    uint32_t getCUHeight(int rowNum) const;
+};
+}
+
+#endif // ifndef X265_FRAMEFILTER_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/level.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,496 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "slice.h"
+#include "level.h"
+
+namespace X265_NS {
+typedef struct
+{
+    uint32_t maxLumaSamples;
+    uint32_t maxLumaSamplesPerSecond;
+    uint32_t maxBitrateMain;
+    uint32_t maxBitrateHigh;
+    uint32_t maxCpbSizeMain;
+    uint32_t maxCpbSizeHigh;
+    uint32_t minCompressionRatio;
+    Level::Name levelEnum;
+    const char* name;
+    int levelIdc;
+} LevelSpec;
+
+LevelSpec levels[] =
+{
+    { 36864,    552960,     128,      MAX_UINT, 350,    MAX_UINT, 2, Level::LEVEL1,   "1",   10 },
+    { 122880,   3686400,    1500,     MAX_UINT, 1500,   MAX_UINT, 2, Level::LEVEL2,   "2",   20 },
+    { 245760,   7372800,    3000,     MAX_UINT, 3000,   MAX_UINT, 2, Level::LEVEL2_1, "2.1", 21 },
+    { 552960,   16588800,   6000,     MAX_UINT, 6000,   MAX_UINT, 2, Level::LEVEL3,   "3",   30 },
+    { 983040,   33177600,   10000,    MAX_UINT, 10000,  MAX_UINT, 2, Level::LEVEL3_1, "3.1", 31 },
+    { 2228224,  66846720,   12000,    30000,    12000,  30000,    4, Level::LEVEL4,   "4",   40 },
+    { 2228224,  133693440,  20000,    50000,    20000,  50000,    4, Level::LEVEL4_1, "4.1", 41 },
+    { 8912896,  267386880,  25000,    100000,   25000,  100000,   6, Level::LEVEL5,   "5",   50 },
+    { 8912896,  534773760,  40000,    160000,   40000,  160000,   8, Level::LEVEL5_1, "5.1", 51 },
+    { 8912896,  1069547520, 60000,    240000,   60000,  240000,   8, Level::LEVEL5_2, "5.2", 52 },
+    { 35651584, 1069547520, 60000,    240000,   60000,  240000,   8, Level::LEVEL6,   "6",   60 },
+    { 35651584, 2139095040, 120000,   480000,   120000, 480000,   8, Level::LEVEL6_1, "6.1", 61 },
+    { 35651584, 4278190080U, 240000,  800000,   240000, 800000,   6, Level::LEVEL6_2, "6.2", 62 },
+    { MAX_UINT, MAX_UINT, MAX_UINT, MAX_UINT, MAX_UINT, MAX_UINT, 1, Level::LEVEL8_5, "8.5", 85 },
+};
+
+/* determine minimum decoder level required to decode the described video */
+void determineLevel(const x265_param &param, VPS& vps)
+{
+    vps.ptl.onePictureOnlyConstraintFlag = param.totalFrames == 1;
+    vps.ptl.intraConstraintFlag = param.keyframeMax <= 1 || vps.ptl.onePictureOnlyConstraintFlag;
+    vps.ptl.bitDepthConstraint = param.internalBitDepth;
+    vps.ptl.chromaFormatConstraint = param.internalCsp;
+
+    /* TODO: figure out HighThroughput signaling, aka: HbrFactor in section A.4.2, only available
+     * for intra-only profiles (vps.ptl.intraConstraintFlag) */
+    vps.ptl.lowerBitRateConstraintFlag = true;
+
+    vps.maxTempSubLayers = param.bEnableTemporalSubLayers ? 2 : 1;
+    
+    if (param.internalCsp == X265_CSP_I420 && param.internalBitDepth <= 10)
+    {
+        /* Probably an HEVC v1 profile, but must check to be sure */
+        if (param.internalBitDepth <= 8)
+        {
+            if (vps.ptl.onePictureOnlyConstraintFlag)
+                vps.ptl.profileIdc = Profile::MAINSTILLPICTURE;
+            else if (vps.ptl.intraConstraintFlag)
+                vps.ptl.profileIdc = Profile::MAINREXT; /* Main Intra */
+            else 
+                vps.ptl.profileIdc = Profile::MAIN;
+        }
+        else if (param.internalBitDepth <= 10)
+        {
+            /* note there is no 10bit still picture profile */
+            if (vps.ptl.intraConstraintFlag)
+                vps.ptl.profileIdc = Profile::MAINREXT; /* Main10 Intra */
+            else
+                vps.ptl.profileIdc = Profile::MAIN10;
+        }
+    }
+    else
+        vps.ptl.profileIdc = Profile::MAINREXT;
+
+    /* determine which profiles are compatible with this stream */
+
+    memset(vps.ptl.profileCompatibilityFlag, 0, sizeof(vps.ptl.profileCompatibilityFlag));
+    vps.ptl.profileCompatibilityFlag[vps.ptl.profileIdc] = true;
+    if (vps.ptl.profileIdc == Profile::MAIN10 && param.internalBitDepth == 8)
+        vps.ptl.profileCompatibilityFlag[Profile::MAIN] = true;
+    else if (vps.ptl.profileIdc == Profile::MAIN)
+        vps.ptl.profileCompatibilityFlag[Profile::MAIN10] = true;
+    else if (vps.ptl.profileIdc == Profile::MAINSTILLPICTURE)
+    {
+        vps.ptl.profileCompatibilityFlag[Profile::MAIN] = true;
+        vps.ptl.profileCompatibilityFlag[Profile::MAIN10] = true;
+    }
+    else if (vps.ptl.profileIdc == Profile::MAINREXT)
+        vps.ptl.profileCompatibilityFlag[Profile::MAINREXT] = true;
+
+    uint32_t lumaSamples = param.sourceWidth * param.sourceHeight;
+    uint32_t samplesPerSec = (uint32_t)(lumaSamples * ((double)param.fpsNum / param.fpsDenom));
+    uint32_t bitrate = param.rc.vbvMaxBitrate ? param.rc.vbvMaxBitrate : param.rc.bitrate;
+
+    const uint32_t MaxDpbPicBuf = 6;
+    vps.ptl.levelIdc = Level::NONE;
+    vps.ptl.tierFlag = Level::MAIN;
+
+    const size_t NumLevels = sizeof(levels) / sizeof(levels[0]);
+    uint32_t i;
+    if (param.bLossless)
+    {
+        i = 13;
+        vps.ptl.minCrForLevel = 1;
+        vps.ptl.maxLumaSrForLevel = MAX_UINT;
+        vps.ptl.levelIdc = Level::LEVEL8_5;
+        vps.ptl.tierFlag = Level::MAIN;
+    }
+    else for (i = 0; i < NumLevels; i++)
+    {
+        if (lumaSamples > levels[i].maxLumaSamples)
+            continue;
+        else if (samplesPerSec > levels[i].maxLumaSamplesPerSecond)
+            continue;
+        else if (bitrate > levels[i].maxBitrateMain && levels[i].maxBitrateHigh == MAX_UINT)
+            continue;
+        else if (bitrate > levels[i].maxBitrateHigh)
+            continue;
+        else if (param.sourceWidth > sqrt(levels[i].maxLumaSamples * 8.0f))
+            continue;
+        else if (param.sourceHeight > sqrt(levels[i].maxLumaSamples * 8.0f))
+            continue;
+
+        uint32_t maxDpbSize = MaxDpbPicBuf;
+        if (lumaSamples <= (levels[i].maxLumaSamples >> 2))
+            maxDpbSize = X265_MIN(4 * MaxDpbPicBuf, 16);
+        else if (lumaSamples <= (levels[i].maxLumaSamples >> 1))
+            maxDpbSize = X265_MIN(2 * MaxDpbPicBuf, 16);
+        else if (lumaSamples <= ((3 * levels[i].maxLumaSamples) >> 2))
+            maxDpbSize = X265_MIN((4 * MaxDpbPicBuf) / 3, 16);
+
+        /* The value of sps_max_dec_pic_buffering_minus1[ HighestTid ] + 1 shall be less than
+         * or equal to MaxDpbSize */
+        if (vps.maxDecPicBuffering > maxDpbSize)
+            continue;
+
+        /* For level 5 and higher levels, the value of CtbSizeY shall be equal to 32 or 64 */
+        if (levels[i].levelEnum >= Level::LEVEL5 && param.maxCUSize < 32)
+        {
+            x265_log(&param, X265_LOG_WARNING, "level %s detected, but CTU size 16 is non-compliant\n", levels[i].name);
+            vps.ptl.profileIdc = Profile::NONE;
+            vps.ptl.levelIdc = Level::NONE;
+            vps.ptl.tierFlag = Level::MAIN;
+            x265_log(&param, X265_LOG_INFO, "NONE profile, Level-NONE (Main tier)\n");
+            return;
+        }
+
+        /* The value of NumPocTotalCurr shall be less than or equal to 8 */
+        int numPocTotalCurr = param.maxNumReferences + vps.numReorderPics;
+        if (numPocTotalCurr > 8)
+        {
+            x265_log(&param, X265_LOG_WARNING, "level %s detected, but NumPocTotalCurr (total references) is non-compliant\n", levels[i].name);
+            vps.ptl.profileIdc = Profile::NONE;
+            vps.ptl.levelIdc = Level::NONE;
+            vps.ptl.tierFlag = Level::MAIN;
+            x265_log(&param, X265_LOG_INFO, "NONE profile, Level-NONE (Main tier)\n");
+            return;
+        }
+
+#define CHECK_RANGE(value, main, high) (high != MAX_UINT && value > main && value <= high)
+
+        if (CHECK_RANGE(bitrate, levels[i].maxBitrateMain, levels[i].maxBitrateHigh) ||
+            CHECK_RANGE((uint32_t)param.rc.vbvBufferSize, levels[i].maxCpbSizeMain, levels[i].maxCpbSizeHigh))
+        {
+            /* The bitrate or buffer size are out of range for Main tier, but in
+             * range for High tier. If the user requested High tier then give
+             * them High tier at this level.  Otherwise allow the loop to
+             * progress to the Main tier of the next level */
+            if (param.bHighTier)
+                vps.ptl.tierFlag = Level::HIGH;
+            else
+                continue;
+        }
+        else
+            vps.ptl.tierFlag = Level::MAIN;
+#undef CHECK_RANGE
+
+        vps.ptl.levelIdc = levels[i].levelEnum;
+        vps.ptl.minCrForLevel = levels[i].minCompressionRatio;
+        vps.ptl.maxLumaSrForLevel = levels[i].maxLumaSamplesPerSecond;
+        break;
+    }
+
+    static const char *profiles[] = { "None", "Main", "Main 10", "Main Still Picture", "RExt" };
+    static const char *tiers[]    = { "Main", "High" };
+
+    char profbuf[64];
+    strcpy(profbuf, profiles[vps.ptl.profileIdc]);
+
+    bool bStillPicture = false;
+    if (vps.ptl.profileIdc == Profile::MAINREXT)
+    {
+        if (vps.ptl.bitDepthConstraint > 12 && vps.ptl.intraConstraintFlag)
+        {
+            if (vps.ptl.onePictureOnlyConstraintFlag)
+            {
+                strcpy(profbuf, "Main 4:4:4 16 Still Picture");
+                bStillPicture = true;
+            }
+            else
+                strcpy(profbuf, "Main 4:4:4 16");
+        }
+        else if (param.internalCsp == X265_CSP_I420)
+        {
+            X265_CHECK(vps.ptl.intraConstraintFlag || vps.ptl.bitDepthConstraint > 10, "rext fail\n");
+            if (vps.ptl.bitDepthConstraint <= 8)
+                strcpy(profbuf, "Main");
+            else if (vps.ptl.bitDepthConstraint <= 10)
+                strcpy(profbuf, "Main 10");
+            else if (vps.ptl.bitDepthConstraint <= 12)
+                strcpy(profbuf, "Main 12");
+        }
+        else if (param.internalCsp == X265_CSP_I422)
+        {
+            /* there is no Main 4:2:2 profile, so it must be signaled as Main10 4:2:2 */
+            if (param.internalBitDepth <= 10)
+                strcpy(profbuf, "Main 4:2:2 10");
+            else if (vps.ptl.bitDepthConstraint <= 12)
+                strcpy(profbuf, "Main 4:2:2 12");
+        }
+        else if (param.internalCsp == X265_CSP_I444)
+        {
+            if (vps.ptl.bitDepthConstraint <= 8)
+            {
+                if (vps.ptl.onePictureOnlyConstraintFlag)
+                {
+                    strcpy(profbuf, "Main 4:4:4 Still Picture");
+                    bStillPicture = true;
+                }
+                else
+                    strcpy(profbuf, "Main 4:4:4");
+            }
+            else if (vps.ptl.bitDepthConstraint <= 10)
+                strcpy(profbuf, "Main 4:4:4 10");
+            else if (vps.ptl.bitDepthConstraint <= 12)
+                strcpy(profbuf, "Main 4:4:4 12");
+        }
+        else
+            strcpy(profbuf, "Unknown");
+
+        if (vps.ptl.intraConstraintFlag && !bStillPicture)
+            strcat(profbuf, " Intra");
+    }
+    x265_log(&param, X265_LOG_INFO, "%s profile, Level-%s (%s tier)\n",
+             profbuf, levels[i].name, tiers[vps.ptl.tierFlag]);
+}
+
+/* enforce a maximum decoder level requirement, in other words assure that a
+ * decoder of the specified level may decode the video about to be created.
+ * Lower parameters where necessary to ensure the video will be decodable by a
+ * decoder meeting this level of requirement.  Some parameters (resolution and
+ * frame rate) are non-negotiable and thus this function may fail. In those
+ * circumstances it will be quite noisy */
+bool enforceLevel(x265_param& param, VPS& vps)
+{
+    vps.numReorderPics = (param.bBPyramid && param.bframes > 1) ? 2 : !!param.bframes;
+    vps.maxDecPicBuffering = X265_MIN(MAX_NUM_REF, X265_MAX(vps.numReorderPics + 2, (uint32_t)param.maxNumReferences) + vps.numReorderPics);
+
+    /* no level specified by user, just auto-detect from the configuration */
+    if (param.levelIdc <= 0)
+        return true;
+
+    uint32_t level = 0;
+    while (levels[level].levelIdc != param.levelIdc && level + 1 < sizeof(levels) / sizeof(levels[0]))
+        level++;
+    if (levels[level].levelIdc != param.levelIdc)
+    {
+        x265_log(&param, X265_LOG_WARNING, "specified level %d does not exist\n", param.levelIdc);
+        return false;
+    }
+
+    LevelSpec& l = levels[level];
+    bool highTier = !!param.bHighTier;
+    if (highTier && l.maxBitrateHigh == MAX_UINT)
+    {
+        highTier = false;
+        x265_log(&param, X265_LOG_WARNING, "Level %s has no High tier, using Main tier\n", l.name);
+    }
+
+    uint32_t lumaSamples = param.sourceWidth * param.sourceHeight;
+    uint32_t samplesPerSec = (uint32_t)(lumaSamples * ((double)param.fpsNum / param.fpsDenom));
+    bool ok = true;
+    if (lumaSamples > l.maxLumaSamples)
+        ok = false;
+    else if (param.sourceWidth > sqrt(l.maxLumaSamples * 8.0f))
+        ok = false;
+    else if (param.sourceHeight > sqrt(l.maxLumaSamples * 8.0f))
+        ok = false;
+    if (!ok)
+    {
+        x265_log(&param, X265_LOG_WARNING, "picture dimensions are out of range for specified level\n");
+        return false;
+    }
+    else if (samplesPerSec > l.maxLumaSamplesPerSecond)
+    {
+        x265_log(&param, X265_LOG_WARNING, "frame rate is out of range for specified level\n");
+        return false;
+    }
+
+    if ((uint32_t)param.rc.vbvMaxBitrate > (highTier ? l.maxBitrateHigh : l.maxBitrateMain))
+    {
+        param.rc.vbvMaxBitrate = highTier ? l.maxBitrateHigh : l.maxBitrateMain;
+        x265_log(&param, X265_LOG_INFO, "lowering VBV max bitrate to %dKbps\n", param.rc.vbvMaxBitrate);
+    }
+    if ((uint32_t)param.rc.vbvBufferSize > (highTier ? l.maxCpbSizeHigh : l.maxCpbSizeMain))
+    {
+        param.rc.vbvBufferSize = highTier ? l.maxCpbSizeHigh : l.maxCpbSizeMain;
+        x265_log(&param, X265_LOG_INFO, "lowering VBV buffer size to %dKb\n", param.rc.vbvBufferSize);
+    }
+
+    switch (param.rc.rateControlMode)
+    {
+    case X265_RC_ABR:
+        if ((uint32_t)param.rc.bitrate > (highTier ? l.maxBitrateHigh : l.maxBitrateMain))
+        {
+            param.rc.bitrate = l.maxBitrateHigh;
+            x265_log(&param, X265_LOG_INFO, "lowering target bitrate to High tier limit of %dKbps\n", param.rc.bitrate);
+        }
+        break;
+
+    case X265_RC_CQP:
+        x265_log(&param, X265_LOG_WARNING, "Constant QP is inconsistent with specifying a decoder level, no bitrate guarantee is possible.\n");
+        return false;
+
+    case X265_RC_CRF:
+        if (!param.rc.vbvBufferSize || !param.rc.vbvMaxBitrate)
+        {
+            if (!param.rc.vbvMaxBitrate)
+                param.rc.vbvMaxBitrate = highTier ? l.maxBitrateHigh : l.maxBitrateMain;
+            if (!param.rc.vbvBufferSize)
+                param.rc.vbvBufferSize = highTier ? l.maxCpbSizeHigh : l.maxCpbSizeMain;
+            x265_log(&param, X265_LOG_WARNING, "Specifying a decoder level with constant rate factor rate-control requires\n");
+            x265_log(&param, X265_LOG_WARNING, "enabling VBV with vbv-bufsize=%dkb vbv-maxrate=%dkbps. VBV outputs are non-deterministic!\n",
+                     param.rc.vbvBufferSize, param.rc.vbvMaxBitrate);
+        }
+        break;
+
+    default:
+        x265_log(&param, X265_LOG_ERROR, "Unknown rate control mode is inconsistent with specifying a decoder level\n");
+        return false;
+    }
+
+    /* The value of sps_max_dec_pic_buffering_minus1[ HighestTid ] + 1 shall be less than or equal to MaxDpbSize */
+    const uint32_t MaxDpbPicBuf = 6;
+    uint32_t maxDpbSize = MaxDpbPicBuf;
+    if (lumaSamples <= (l.maxLumaSamples >> 2))
+        maxDpbSize = X265_MIN(4 * MaxDpbPicBuf, 16);
+    else if (lumaSamples <= (l.maxLumaSamples >> 1))
+        maxDpbSize = X265_MIN(2 * MaxDpbPicBuf, 16);
+    else if (lumaSamples <= ((3 * l.maxLumaSamples) >> 2))
+        maxDpbSize = X265_MIN((4 * MaxDpbPicBuf) / 3, 16);
+
+    int savedRefCount = param.maxNumReferences;
+    while (vps.maxDecPicBuffering > maxDpbSize && param.maxNumReferences > 1)
+    {
+        param.maxNumReferences--;
+        vps.maxDecPicBuffering = X265_MIN(MAX_NUM_REF, X265_MAX(vps.numReorderPics + 1, (uint32_t)param.maxNumReferences) + vps.numReorderPics);
+    }
+    if (param.maxNumReferences != savedRefCount)
+        x265_log(&param, X265_LOG_INFO, "Lowering max references to %d to meet level requirement\n", param.maxNumReferences);
+
+    /* For level 5 and higher levels, the value of CtbSizeY shall be equal to 32 or 64 */
+    if (param.levelIdc >= 50 && param.maxCUSize < 32)
+    {
+        param.maxCUSize = 32;
+        x265_log(&param, X265_LOG_INFO, "Levels 5.0 and above require a maximum CTU size of at least 32, using --ctu 32\n");
+    }
+
+    /* The value of NumPocTotalCurr shall be less than or equal to 8 */
+    int numPocTotalCurr = param.maxNumReferences + !!param.bframes;
+    if (numPocTotalCurr > 8)
+    {
+        param.maxNumReferences = 8 - !!param.bframes;
+        x265_log(&param, X265_LOG_INFO, "Lowering max references to %d to meet numPocTotalCurr requirement\n", param.maxNumReferences);
+    }
+
+    return true;
+}
+}
+
+#if EXPORT_C_API
+
+/* these functions are exported as C functions (default) */
+using namespace X265_NS;
+extern "C" {
+
+#else
+
+/* these functions exist within private namespace (multilib) */
+namespace X265_NS {
+
+#endif
+
+int x265_param_apply_profile(x265_param *param, const char *profile)
+{
+    if (!param || !profile)
+        return 0;
+
+    /* Check if profile bit-depth requirement is exceeded by internal bit depth */
+    bool bInvalidDepth = false;
+#if X265_DEPTH > 8
+    if (!strcmp(profile, "main") || !strcmp(profile, "mainstillpicture") || !strcmp(profile, "msp") ||
+        !strcmp(profile, "main444-8") || !strcmp(profile, "main-intra") ||
+        !strcmp(profile, "main444-intra") || !strcmp(profile, "main444-stillpicture"))
+        bInvalidDepth = true;
+#endif
+#if X265_DEPTH > 10
+    if (!strcmp(profile, "main10") || !strcmp(profile, "main422-10") || !strcmp(profile, "main444-10") ||
+        !strcmp(profile, "main10-intra") || !strcmp(profile, "main422-10-intra") || !strcmp(profile, "main444-10-intra"))
+        bInvalidDepth = true;
+#endif
+#if X265_DEPTH > 12
+    if (!strcmp(profile, "main12") || !strcmp(profile, "main422-12") || !strcmp(profile, "main444-12") ||
+        !strcmp(profile, "main12-intra") || !strcmp(profile, "main422-12-intra") || !strcmp(profile, "main444-12-intra"))
+        bInvalidDepth = true;
+#endif
+
+    if (bInvalidDepth)
+    {
+        x265_log(param, X265_LOG_ERROR, "%s profile not supported, internal bit depth %d.\n", profile, X265_DEPTH);
+        return -1;
+    }
+
+    size_t l = strlen(profile);
+    bool bBoolIntra = (l > 6 && !strcmp(profile + l - 6, "-intra")) ||
+                      !strcmp(profile, "mainstillpicture") || !strcmp(profile, "msp");
+    if (bBoolIntra)
+    {
+        /* The profile may be detected as still picture if param->totalFrames is 1 */
+        param->keyframeMax = 1;
+    }
+    
+    /* check that input color space is supported by profile */
+    if (!strcmp(profile, "main") || !strcmp(profile, "main-intra") ||
+        !strcmp(profile, "main10") || !strcmp(profile, "main10-intra") ||
+        !strcmp(profile, "main12") || !strcmp(profile, "main12-intra") ||
+        !strcmp(profile, "mainstillpicture") || !strcmp(profile, "msp"))
+    {
+        if (param->internalCsp != X265_CSP_I420)
+        {
+            x265_log(param, X265_LOG_ERROR, "%s profile not compatible with %s input color space.\n",
+                     profile, x265_source_csp_names[param->internalCsp]);
+            return -1;
+        }
+    }
+    else if (!strcmp(profile, "main422-10") || !strcmp(profile, "main422-10-intra") ||
+             !strcmp(profile, "main422-12") || !strcmp(profile, "main422-12-intra"))
+    {
+        if (param->internalCsp != X265_CSP_I420 && param->internalCsp != X265_CSP_I422)
+        {
+            x265_log(param, X265_LOG_ERROR, "%s profile not compatible with %s input color space.\n",
+                     profile, x265_source_csp_names[param->internalCsp]);
+            return -1;
+        }
+    }
+    else if (!strcmp(profile, "main444-8") ||
+             !strcmp(profile, "main444-intra") || !strcmp(profile, "main444-stillpicture") ||
+             !strcmp(profile, "main444-10") || !strcmp(profile, "main444-10-intra") ||
+             !strcmp(profile, "main444-12") || !strcmp(profile, "main444-12-intra") ||
+             !strcmp(profile, "main444-16-intra") || !strcmp(profile, "main444-16-stillpicture"))
+    {
+        /* any color space allowed */
+    }
+    else
+    {
+        x265_log(param, X265_LOG_ERROR, "unknown profile <%s>\n", profile);
+        return -1;
+    }
+
+    return 0;
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/level.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,39 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_LEVEL_H
+#define X265_LEVEL_H 1
+
+#include "common.h"
+#include "x265.h"
+
+namespace X265_NS {
+// encoder private namespace
+
+struct VPS;
+void determineLevel(const x265_param &param, VPS& vps);
+bool enforceLevel(x265_param& param, VPS& vps);
+
+}
+
+#endif // ifndef X265_LEVEL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/motion.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1264 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "lowres.h"
+#include "motion.h"
+#include "x265.h"
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional  expression is constant (macros use this construct)
+#endif
+
+using namespace X265_NS;
+
+namespace {
+
+struct SubpelWorkload
+{
+    int hpel_iters;
+    int hpel_dirs;
+    int qpel_iters;
+    int qpel_dirs;
+    bool hpel_satd;
+};
+
+const SubpelWorkload workload[X265_MAX_SUBPEL_LEVEL + 1] =
+{
+    { 1, 4, 0, 4, false }, // 4 SAD HPEL only
+    { 1, 4, 1, 4, false }, // 4 SAD HPEL + 4 SATD QPEL
+    { 1, 4, 1, 4, true },  // 4 SATD HPEL + 4 SATD QPEL
+    { 2, 4, 1, 4, true },  // 2x4 SATD HPEL + 4 SATD QPEL
+    { 2, 4, 2, 4, true },  // 2x4 SATD HPEL + 2x4 SATD QPEL
+    { 1, 8, 1, 8, true },  // 8 SATD HPEL + 8 SATD QPEL (default)
+    { 2, 8, 1, 8, true },  // 2x8 SATD HPEL + 8 SATD QPEL
+    { 2, 8, 2, 8, true },  // 2x8 SATD HPEL + 2x8 SATD QPEL
+};
+
+static int sizeScale[NUM_PU_SIZES];
+#define SAD_THRESH(v) (bcost < (((v >> 4) * sizeScale[partEnum])))
+
+/* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
+const MV hex2[8] = { MV(-1, -2), MV(-2, 0), MV(-1, 2), MV(1, 2), MV(2, 0), MV(1, -2), MV(-1, -2), MV(-2, 0) };
+const uint8_t mod6m1[8] = { 5, 0, 1, 2, 3, 4, 5, 0 };  /* (x-1)%6 */
+const MV square1[9] = { MV(0, 0), MV(0, -1), MV(0, 1), MV(-1, 0), MV(1, 0), MV(-1, -1), MV(-1, 1), MV(1, -1), MV(1, 1) };
+const MV hex4[16] =
+{
+    MV(0, -4), MV(0, 4), MV(-2, -3), MV(2, -3),
+    MV(-4, -2), MV(4, -2), MV(-4, -1), MV(4, -1),
+    MV(-4, 0), MV(4, 0), MV(-4, 1), MV(4, 1),
+    MV(-4, 2), MV(4, 2), MV(-2, 3), MV(2, 3),
+};
+const MV offsets[] =
+{
+    MV(-1, 0), MV(0, -1),
+    MV(-1, -1), MV(1, -1),
+    MV(-1, 0), MV(1, 0),
+    MV(-1, 1), MV(-1, -1),
+    MV(1, -1), MV(1, 1),
+    MV(-1, 0), MV(0, 1),
+    MV(-1, 1), MV(1, 1),
+    MV(1, 0), MV(0, 1),
+}; // offsets for Two Point Search
+
+/* sum of absolute differences between MV candidates, used for adaptive ME range */
+inline int predictorDifference(const MV *mvc, intptr_t numCandidates)
+{
+    int sum = 0;
+
+    for (int i = 0; i < numCandidates - 1; i++)
+    {
+        sum += abs(mvc[i].x - mvc[i + 1].x)
+            +  abs(mvc[i].y - mvc[i + 1].y);
+    }
+
+    return sum;
+}
+
+}
+
+MotionEstimate::MotionEstimate()
+{
+    ctuAddr = -1;
+    absPartIdx = -1;
+    searchMethod = X265_HEX_SEARCH;
+    subpelRefine = 2;
+    blockwidth = blockheight = 0;
+    blockOffset = 0;
+    bChromaSATD = false;
+    chromaSatd = NULL;
+}
+
+void MotionEstimate::init(int method, int refine, int csp)
+{
+    searchMethod = method;
+    subpelRefine = refine;
+    fencPUYuv.create(FENC_STRIDE, csp);
+}
+
+void MotionEstimate::initScales(void)
+{
+#define SETUP_SCALE(W, H) \
+    sizeScale[LUMA_ ## W ## x ## H] = (H * H) >> 4;
+    SETUP_SCALE(4, 4);
+    SETUP_SCALE(8, 8);
+    SETUP_SCALE(8, 4);
+    SETUP_SCALE(4, 8);
+    SETUP_SCALE(16, 16);
+    SETUP_SCALE(16, 8);
+    SETUP_SCALE(8, 16);
+    SETUP_SCALE(16, 12);
+    SETUP_SCALE(12, 16);
+    SETUP_SCALE(4, 16);
+    SETUP_SCALE(16, 4);
+    SETUP_SCALE(32, 32);
+    SETUP_SCALE(32, 16);
+    SETUP_SCALE(16, 32);
+    SETUP_SCALE(32, 24);
+    SETUP_SCALE(24, 32);
+    SETUP_SCALE(32, 8);
+    SETUP_SCALE(8, 32);
+    SETUP_SCALE(64, 64);
+    SETUP_SCALE(64, 32);
+    SETUP_SCALE(32, 64);
+    SETUP_SCALE(64, 48);
+    SETUP_SCALE(48, 64);
+    SETUP_SCALE(64, 16);
+    SETUP_SCALE(16, 64);
+#undef SETUP_SCALE
+}
+
+int MotionEstimate::hpelIterationCount(int subme)
+{
+    return workload[subme].hpel_iters +
+           workload[subme].qpel_iters / 2;
+}
+
+MotionEstimate::~MotionEstimate()
+{
+    fencPUYuv.destroy();
+}
+
+/* Called by lookahead, luma only, no use of PicYuv */
+void MotionEstimate::setSourcePU(pixel *fencY, intptr_t stride, intptr_t offset, int pwidth, int pheight)
+{
+    partEnum = partitionFromSizes(pwidth, pheight);
+    X265_CHECK(LUMA_4x4 != partEnum, "4x4 inter partition detected!\n");
+    sad = primitives.pu[partEnum].sad;
+    satd = primitives.pu[partEnum].satd;
+    sad_x3 = primitives.pu[partEnum].sad_x3;
+    sad_x4 = primitives.pu[partEnum].sad_x4;
+
+    blockwidth = pwidth;
+    blockOffset = offset;
+    absPartIdx = ctuAddr = -1;
+
+    /* copy PU block into cache */
+    primitives.pu[partEnum].copy_pp(fencPUYuv.m_buf[0], FENC_STRIDE, fencY + offset, stride);
+    X265_CHECK(!bChromaSATD, "chroma distortion measurements impossible in this code path\n");
+}
+
+/* Called by Search::predInterSearch() or --pme equivalent, chroma residual might be considered */
+void MotionEstimate::setSourcePU(const Yuv& srcFencYuv, int _ctuAddr, int cuPartIdx, int puPartIdx, int pwidth, int pheight)
+{
+    partEnum = partitionFromSizes(pwidth, pheight);
+    X265_CHECK(LUMA_4x4 != partEnum, "4x4 inter partition detected!\n");
+    sad = primitives.pu[partEnum].sad;
+    satd = primitives.pu[partEnum].satd;
+    sad_x3 = primitives.pu[partEnum].sad_x3;
+    sad_x4 = primitives.pu[partEnum].sad_x4;
+    chromaSatd = primitives.chroma[fencPUYuv.m_csp].pu[partEnum].satd;
+
+    /* Enable chroma residual cost if subpelRefine level is greater than 2 and chroma block size
+     * is an even multiple of 4x4 pixels (indicated by non-null chromaSatd pointer) */
+    bChromaSATD = subpelRefine > 2 && chromaSatd;
+    X265_CHECK(!(bChromaSATD && !workload[subpelRefine].hpel_satd), "Chroma SATD cannot be used with SAD hpel\n");
+
+    ctuAddr = _ctuAddr;
+    absPartIdx = cuPartIdx + puPartIdx;
+    blockwidth = pwidth;
+    blockOffset = 0;
+
+    /* copy PU from CU Yuv */
+    fencPUYuv.copyPUFromYuv(srcFencYuv, puPartIdx, partEnum, bChromaSATD);
+}
+
+#define COST_MV_PT_DIST(mx, my, point, dist) \
+    do \
+    { \
+        MV tmv(mx, my); \
+        int cost = sad(fenc, FENC_STRIDE, fref + mx + my * stride, stride); \
+        cost += mvcost(tmv << 2); \
+        if (cost < bcost) { \
+            bcost = cost; \
+            bmv = tmv; \
+            bPointNr = point; \
+            bDistance = dist; \
+        } \
+    } while (0)
+
+#define COST_MV(mx, my) \
+    do \
+    { \
+        int cost = sad(fenc, FENC_STRIDE, fref + (mx) + (my) * stride, stride); \
+        cost += mvcost(MV(mx, my) << 2); \
+        COPY2_IF_LT(bcost, cost, bmv, MV(mx, my)); \
+    } while (0)
+
+#define COST_MV_X3_DIR(m0x, m0y, m1x, m1y, m2x, m2y, costs) \
+    { \
+        pixel *pix_base = fref + bmv.x + bmv.y * stride; \
+        sad_x3(fenc, \
+               pix_base + (m0x) + (m0y) * stride, \
+               pix_base + (m1x) + (m1y) * stride, \
+               pix_base + (m2x) + (m2y) * stride, \
+               stride, costs); \
+        (costs)[0] += mvcost((bmv + MV(m0x, m0y)) << 2); \
+        (costs)[1] += mvcost((bmv + MV(m1x, m1y)) << 2); \
+        (costs)[2] += mvcost((bmv + MV(m2x, m2y)) << 2); \
+    }
+
+#define COST_MV_PT_DIST_X4(m0x, m0y, p0, d0, m1x, m1y, p1, d1, m2x, m2y, p2, d2, m3x, m3y, p3, d3) \
+    { \
+        sad_x4(fenc, \
+               fref + (m0x) + (m0y) * stride, \
+               fref + (m1x) + (m1y) * stride, \
+               fref + (m2x) + (m2y) * stride, \
+               fref + (m3x) + (m3y) * stride, \
+               stride, costs); \
+        (costs)[0] += mvcost(MV(m0x, m0y) << 2); \
+        (costs)[1] += mvcost(MV(m1x, m1y) << 2); \
+        (costs)[2] += mvcost(MV(m2x, m2y) << 2); \
+        (costs)[3] += mvcost(MV(m3x, m3y) << 2); \
+        COPY4_IF_LT(bcost, costs[0], bmv, MV(m0x, m0y), bPointNr, p0, bDistance, d0); \
+        COPY4_IF_LT(bcost, costs[1], bmv, MV(m1x, m1y), bPointNr, p1, bDistance, d1); \
+        COPY4_IF_LT(bcost, costs[2], bmv, MV(m2x, m2y), bPointNr, p2, bDistance, d2); \
+        COPY4_IF_LT(bcost, costs[3], bmv, MV(m3x, m3y), bPointNr, p3, bDistance, d3); \
+    }
+
+#define COST_MV_X4(m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y) \
+    { \
+        pixel *pix_base = fref + omv.x + omv.y * stride; \
+        sad_x4(fenc, \
+               pix_base + (m0x) + (m0y) * stride, \
+               pix_base + (m1x) + (m1y) * stride, \
+               pix_base + (m2x) + (m2y) * stride, \
+               pix_base + (m3x) + (m3y) * stride, \
+               stride, costs); \
+        costs[0] += mvcost((omv + MV(m0x, m0y)) << 2); \
+        costs[1] += mvcost((omv + MV(m1x, m1y)) << 2); \
+        costs[2] += mvcost((omv + MV(m2x, m2y)) << 2); \
+        costs[3] += mvcost((omv + MV(m3x, m3y)) << 2); \
+        COPY2_IF_LT(bcost, costs[0], bmv, omv + MV(m0x, m0y)); \
+        COPY2_IF_LT(bcost, costs[1], bmv, omv + MV(m1x, m1y)); \
+        COPY2_IF_LT(bcost, costs[2], bmv, omv + MV(m2x, m2y)); \
+        COPY2_IF_LT(bcost, costs[3], bmv, omv + MV(m3x, m3y)); \
+    }
+
+#define COST_MV_X4_DIR(m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y, costs) \
+    { \
+        pixel *pix_base = fref + bmv.x + bmv.y * stride; \
+        sad_x4(fenc, \
+               pix_base + (m0x) + (m0y) * stride, \
+               pix_base + (m1x) + (m1y) * stride, \
+               pix_base + (m2x) + (m2y) * stride, \
+               pix_base + (m3x) + (m3y) * stride, \
+               stride, costs); \
+        (costs)[0] += mvcost((bmv + MV(m0x, m0y)) << 2); \
+        (costs)[1] += mvcost((bmv + MV(m1x, m1y)) << 2); \
+        (costs)[2] += mvcost((bmv + MV(m2x, m2y)) << 2); \
+        (costs)[3] += mvcost((bmv + MV(m3x, m3y)) << 2); \
+    }
+
+#define DIA1_ITER(mx, my) \
+    { \
+        omv.x = mx; omv.y = my; \
+        COST_MV_X4(0, -1, 0, 1, -1, 0, 1, 0); \
+    }
+
+#define CROSS(start, x_max, y_max) \
+    { \
+        int16_t i = start; \
+        if ((x_max) <= X265_MIN(mvmax.x - omv.x, omv.x - mvmin.x)) \
+            for (; i < (x_max) - 2; i += 4) { \
+                COST_MV_X4(i, 0, -i, 0, i + 2, 0, -i - 2, 0); } \
+        for (; i < (x_max); i += 2) \
+        { \
+            if (omv.x + i <= mvmax.x) \
+                COST_MV(omv.x + i, omv.y); \
+            if (omv.x - i >= mvmin.x) \
+                COST_MV(omv.x - i, omv.y); \
+        } \
+        i = start; \
+        if ((y_max) <= X265_MIN(mvmax.y - omv.y, omv.y - mvmin.y)) \
+            for (; i < (y_max) - 2; i += 4) { \
+                COST_MV_X4(0, i, 0, -i, 0, i + 2, 0, -i - 2); } \
+        for (; i < (y_max); i += 2) \
+        { \
+            if (omv.y + i <= mvmax.y) \
+                COST_MV(omv.x, omv.y + i); \
+            if (omv.y - i >= mvmin.y) \
+                COST_MV(omv.x, omv.y - i); \
+        } \
+    }
+
+void MotionEstimate::StarPatternSearch(ReferencePlanes *ref,
+                                       const MV &       mvmin,
+                                       const MV &       mvmax,
+                                       MV &             bmv,
+                                       int &            bcost,
+                                       int &            bPointNr,
+                                       int &            bDistance,
+                                       int              earlyExitIters,
+                                       int              merange)
+{
+    ALIGN_VAR_16(int, costs[16]);
+    pixel* fenc = fencPUYuv.m_buf[0];
+    pixel* fref = ref->fpelPlane[0] + blockOffset;
+    intptr_t stride = ref->lumaStride;
+
+    MV omv = bmv;
+    int saved = bcost;
+    int rounds = 0;
+
+    {
+        int16_t dist = 1;
+
+        /* bPointNr
+              2
+            4 * 5
+              7
+         */
+        const int16_t top    = omv.y - dist;
+        const int16_t bottom = omv.y + dist;
+        const int16_t left   = omv.x - dist;
+        const int16_t right  = omv.x + dist;
+
+        if (top >= mvmin.y && left >= mvmin.x && right <= mvmax.x && bottom <= mvmax.y)
+        {
+            COST_MV_PT_DIST_X4(omv.x,  top,    2, dist,
+                               left,  omv.y,   4, dist,
+                               right, omv.y,   5, dist,
+                               omv.x,  bottom, 7, dist);
+        }
+        else
+        {
+            if (top >= mvmin.y) // check top
+            {
+                COST_MV_PT_DIST(omv.x, top, 2, dist);
+            }
+            if (left >= mvmin.x) // check middle left
+            {
+                COST_MV_PT_DIST(left, omv.y, 4, dist);
+            }
+            if (right <= mvmax.x) // check middle right
+            {
+                COST_MV_PT_DIST(right, omv.y, 5, dist);
+            }
+            if (bottom <= mvmax.y) // check bottom
+            {
+                COST_MV_PT_DIST(omv.x, bottom, 7, dist);
+            }
+        }
+        if (bcost < saved)
+            rounds = 0;
+        else if (++rounds >= earlyExitIters)
+            return;
+    }
+
+    for (int16_t dist = 2; dist <= 8; dist <<= 1)
+    {
+        /* bPointNr
+              2
+             1 3
+            4 * 5
+             6 8
+              7
+         Points 2, 4, 5, 7 are dist
+         Points 1, 3, 6, 8 are dist>>1
+         */
+        const int16_t top     = omv.y - dist;
+        const int16_t bottom  = omv.y + dist;
+        const int16_t left    = omv.x - dist;
+        const int16_t right   = omv.x + dist;
+        const int16_t top2    = omv.y - (dist >> 1);
+        const int16_t bottom2 = omv.y + (dist >> 1);
+        const int16_t left2   = omv.x - (dist >> 1);
+        const int16_t right2  = omv.x + (dist >> 1);
+        saved = bcost;
+
+        if (top >= mvmin.y && left >= mvmin.x &&
+            right <= mvmax.x && bottom <= mvmax.y) // check border
+        {
+            COST_MV_PT_DIST_X4(omv.x,  top,   2, dist,
+                               left2,  top2,  1, dist >> 1,
+                               right2, top2,  3, dist >> 1,
+                               left,   omv.y, 4, dist);
+            COST_MV_PT_DIST_X4(right,  omv.y,   5, dist,
+                               left2,  bottom2, 6, dist >> 1,
+                               right2, bottom2, 8, dist >> 1,
+                               omv.x,  bottom,  7, dist);
+        }
+        else // check border for each mv
+        {
+            if (top >= mvmin.y) // check top
+            {
+                COST_MV_PT_DIST(omv.x, top, 2, dist);
+            }
+            if (top2 >= mvmin.y) // check half top
+            {
+                if (left2 >= mvmin.x) // check half left
+                {
+                    COST_MV_PT_DIST(left2, top2, 1, (dist >> 1));
+                }
+                if (right2 <= mvmax.x) // check half right
+                {
+                    COST_MV_PT_DIST(right2, top2, 3, (dist >> 1));
+                }
+            }
+            if (left >= mvmin.x) // check left
+            {
+                COST_MV_PT_DIST(left, omv.y, 4, dist);
+            }
+            if (right <= mvmax.x) // check right
+            {
+                COST_MV_PT_DIST(right, omv.y, 5, dist);
+            }
+            if (bottom2 <= mvmax.y) // check half bottom
+            {
+                if (left2 >= mvmin.x) // check half left
+                {
+                    COST_MV_PT_DIST(left2, bottom2, 6, (dist >> 1));
+                }
+                if (right2 <= mvmax.x) // check half right
+                {
+                    COST_MV_PT_DIST(right2, bottom2, 8, (dist >> 1));
+                }
+            }
+            if (bottom <= mvmax.y) // check bottom
+            {
+                COST_MV_PT_DIST(omv.x, bottom, 7, dist);
+            }
+        }
+
+        if (bcost < saved)
+            rounds = 0;
+        else if (++rounds >= earlyExitIters)
+            return;
+    }
+
+    for (int16_t dist = 16; dist <= (int16_t)merange; dist <<= 1)
+    {
+        const int16_t top    = omv.y - dist;
+        const int16_t bottom = omv.y + dist;
+        const int16_t left   = omv.x - dist;
+        const int16_t right  = omv.x + dist;
+
+        saved = bcost;
+        if (top >= mvmin.y && left >= mvmin.x &&
+            right <= mvmax.x && bottom <= mvmax.y) // check border
+        {
+            /* index
+                  0
+                  3
+                  2
+                  1
+          0 3 2 1 * 1 2 3 0
+                  1
+                  2
+                  3
+                  0
+            */
+
+            COST_MV_PT_DIST_X4(omv.x,  top,    0, dist,
+                               left,   omv.y,  0, dist,
+                               right,  omv.y,  0, dist,
+                               omv.x,  bottom, 0, dist);
+
+            for (int16_t index = 1; index < 4; index++)
+            {
+                int16_t posYT = top    + ((dist >> 2) * index);
+                int16_t posYB = bottom - ((dist >> 2) * index);
+                int16_t posXL = omv.x  - ((dist >> 2) * index);
+                int16_t posXR = omv.x  + ((dist >> 2) * index);
+
+                COST_MV_PT_DIST_X4(posXL, posYT, 0, dist,
+                                   posXR, posYT, 0, dist,
+                                   posXL, posYB, 0, dist,
+                                   posXR, posYB, 0, dist);
+            }
+        }
+        else // check border for each mv
+        {
+            if (top >= mvmin.y) // check top
+            {
+                COST_MV_PT_DIST(omv.x, top, 0, dist);
+            }
+            if (left >= mvmin.x) // check left
+            {
+                COST_MV_PT_DIST(left, omv.y, 0, dist);
+            }
+            if (right <= mvmax.x) // check right
+            {
+                COST_MV_PT_DIST(right, omv.y, 0, dist);
+            }
+            if (bottom <= mvmax.y) // check bottom
+            {
+                COST_MV_PT_DIST(omv.x, bottom, 0, dist);
+            }
+            for (int16_t index = 1; index < 4; index++)
+            {
+                int16_t posYT = top    + ((dist >> 2) * index);
+                int16_t posYB = bottom - ((dist >> 2) * index);
+                int16_t posXL = omv.x - ((dist >> 2) * index);
+                int16_t posXR = omv.x + ((dist >> 2) * index);
+
+                if (posYT >= mvmin.y) // check top
+                {
+                    if (posXL >= mvmin.x) // check left
+                    {
+                        COST_MV_PT_DIST(posXL, posYT, 0, dist);
+                    }
+                    if (posXR <= mvmax.x) // check right
+                    {
+                        COST_MV_PT_DIST(posXR, posYT, 0, dist);
+                    }
+                }
+                if (posYB <= mvmax.y) // check bottom
+                {
+                    if (posXL >= mvmin.x) // check left
+                    {
+                        COST_MV_PT_DIST(posXL, posYB, 0, dist);
+                    }
+                    if (posXR <= mvmax.x) // check right
+                    {
+                        COST_MV_PT_DIST(posXR, posYB, 0, dist);
+                    }
+                }
+            }
+        }
+
+        if (bcost < saved)
+            rounds = 0;
+        else if (++rounds >= earlyExitIters)
+            return;
+    }
+}
+
+int MotionEstimate::motionEstimate(ReferencePlanes *ref,
+                                   const MV &       mvmin,
+                                   const MV &       mvmax,
+                                   const MV &       qmvp,
+                                   int              numCandidates,
+                                   const MV *       mvc,
+                                   int              merange,
+                                   MV &             outQMv)
+{
+    ALIGN_VAR_16(int, costs[16]);
+    if (ctuAddr >= 0)
+        blockOffset = ref->reconPic->getLumaAddr(ctuAddr, absPartIdx) - ref->reconPic->getLumaAddr(0);
+    intptr_t stride = ref->lumaStride;
+    pixel* fenc = fencPUYuv.m_buf[0];
+    pixel* fref = ref->fpelPlane[0] + blockOffset;
+
+    setMVP(qmvp);
+
+    MV qmvmin = mvmin.toQPel();
+    MV qmvmax = mvmax.toQPel();
+
+    /* The term cost used here means satd/sad values for that particular search.
+     * The costs used in ME integer search only includes the SAD cost of motion
+     * residual and sqrtLambda times MVD bits.  The subpel refine steps use SATD
+     * cost of residual and sqrtLambda * MVD bits.  Mode decision will be based
+     * on video distortion cost (SSE/PSNR) plus lambda times all signaling bits
+     * (mode + MVD bits). */
+
+    // measure SAD cost at clipped QPEL MVP
+    MV pmv = qmvp.clipped(qmvmin, qmvmax);
+    MV bestpre = pmv;
+    int bprecost;
+
+    if (ref->isLowres)
+        bprecost = ref->lowresQPelCost(fenc, blockOffset, pmv, sad);
+    else
+        bprecost = subpelCompare(ref, pmv, sad);
+
+    /* re-measure full pel rounded MVP with SAD as search start point */
+    MV bmv = pmv.roundToFPel();
+    int bcost = bprecost;
+    if (pmv.isSubpel())
+        bcost = sad(fenc, FENC_STRIDE, fref + bmv.x + bmv.y * stride, stride) + mvcost(bmv << 2);
+
+    // measure SAD cost at MV(0) if MVP is not zero
+    if (pmv.notZero())
+    {
+        int cost = sad(fenc, FENC_STRIDE, fref, stride) + mvcost(MV(0, 0));
+        if (cost < bcost)
+        {
+            bcost = cost;
+            bmv = 0;
+        }
+    }
+
+    X265_CHECK(!(ref->isLowres && numCandidates), "lowres motion candidates not allowed\n")
+    // measure SAD cost at each QPEL motion vector candidate
+    for (int i = 0; i < numCandidates; i++)
+    {
+        MV m = mvc[i].clipped(qmvmin, qmvmax);
+        if (m.notZero() & (m != pmv ? 1 : 0) & (m != bestpre ? 1 : 0)) // check already measured
+        {
+            int cost = subpelCompare(ref, m, sad) + mvcost(m);
+            if (cost < bprecost)
+            {
+                bprecost = cost;
+                bestpre = m;
+            }
+        }
+    }
+
+    pmv = pmv.roundToFPel();
+    MV omv = bmv;  // current search origin or starting point
+
+    switch (searchMethod)
+    {
+    case X265_DIA_SEARCH:
+    {
+        /* diamond search, radius 1 */
+        bcost <<= 4;
+        int i = merange;
+        do
+        {
+            COST_MV_X4_DIR(0, -1, 0, 1, -1, 0, 1, 0, costs);
+            COPY1_IF_LT(bcost, (costs[0] << 4) + 1);
+            COPY1_IF_LT(bcost, (costs[1] << 4) + 3);
+            COPY1_IF_LT(bcost, (costs[2] << 4) + 4);
+            COPY1_IF_LT(bcost, (costs[3] << 4) + 12);
+            if (!(bcost & 15))
+                break;
+            bmv.x -= (bcost << 28) >> 30;
+            bmv.y -= (bcost << 30) >> 30;
+            bcost &= ~15;
+        }
+        while (--i && bmv.checkRange(mvmin, mvmax));
+        bcost >>= 4;
+        break;
+    }
+
+    case X265_HEX_SEARCH:
+    {
+me_hex2:
+        /* hexagon search, radius 2 */
+#if 0
+        for (int i = 0; i < merange / 2; i++)
+        {
+            omv = bmv;
+            COST_MV(omv.x - 2, omv.y);
+            COST_MV(omv.x - 1, omv.y + 2);
+            COST_MV(omv.x + 1, omv.y + 2);
+            COST_MV(omv.x + 2, omv.y);
+            COST_MV(omv.x + 1, omv.y - 2);
+            COST_MV(omv.x - 1, omv.y - 2);
+            if (omv == bmv)
+                break;
+            if (!bmv.checkRange(mvmin, mvmax))
+                break;
+        }
+
+#else // if 0
+      /* equivalent to the above, but eliminates duplicate candidates */
+        COST_MV_X3_DIR(-2, 0, -1, 2,  1, 2, costs);
+        bcost <<= 3;
+        COPY1_IF_LT(bcost, (costs[0] << 3) + 2);
+        COPY1_IF_LT(bcost, (costs[1] << 3) + 3);
+        COPY1_IF_LT(bcost, (costs[2] << 3) + 4);
+        COST_MV_X3_DIR(2, 0,  1, -2, -1, -2, costs);
+        COPY1_IF_LT(bcost, (costs[0] << 3) + 5);
+        COPY1_IF_LT(bcost, (costs[1] << 3) + 6);
+        COPY1_IF_LT(bcost, (costs[2] << 3) + 7);
+
+        if (bcost & 7)
+        {
+            int dir = (bcost & 7) - 2;
+            bmv += hex2[dir + 1];
+
+            /* half hexagon, not overlapping the previous iteration */
+            for (int i = (merange >> 1) - 1; i > 0 && bmv.checkRange(mvmin, mvmax); i--)
+            {
+                COST_MV_X3_DIR(hex2[dir + 0].x, hex2[dir + 0].y,
+                               hex2[dir + 1].x, hex2[dir + 1].y,
+                               hex2[dir + 2].x, hex2[dir + 2].y,
+                               costs);
+                bcost &= ~7;
+                COPY1_IF_LT(bcost, (costs[0] << 3) + 1);
+                COPY1_IF_LT(bcost, (costs[1] << 3) + 2);
+                COPY1_IF_LT(bcost, (costs[2] << 3) + 3);
+                if (!(bcost & 7))
+                    break;
+                dir += (bcost & 7) - 2;
+                dir = mod6m1[dir + 1];
+                bmv += hex2[dir + 1];
+            }
+        }
+        bcost >>= 3;
+#endif // if 0
+
+        /* square refine */
+        int dir = 0;
+        COST_MV_X4_DIR(0, -1,  0, 1, -1, 0, 1, 0, costs);
+        COPY2_IF_LT(bcost, costs[0], dir, 1);
+        COPY2_IF_LT(bcost, costs[1], dir, 2);
+        COPY2_IF_LT(bcost, costs[2], dir, 3);
+        COPY2_IF_LT(bcost, costs[3], dir, 4);
+        COST_MV_X4_DIR(-1, -1, -1, 1, 1, -1, 1, 1, costs);
+        COPY2_IF_LT(bcost, costs[0], dir, 5);
+        COPY2_IF_LT(bcost, costs[1], dir, 6);
+        COPY2_IF_LT(bcost, costs[2], dir, 7);
+        COPY2_IF_LT(bcost, costs[3], dir, 8);
+        bmv += square1[dir];
+        break;
+    }
+
+    case X265_UMH_SEARCH:
+    {
+        int ucost1, ucost2;
+        int16_t cross_start = 1;
+
+        /* refine predictors */
+        omv = bmv;
+        ucost1 = bcost;
+        DIA1_ITER(pmv.x, pmv.y);
+        if (pmv.notZero())
+            DIA1_ITER(0, 0);
+
+        ucost2 = bcost;
+        if (bmv.notZero() && bmv != pmv)
+            DIA1_ITER(bmv.x, bmv.y);
+        if (bcost == ucost2)
+            cross_start = 3;
+
+        /* Early Termination */
+        omv = bmv;
+        if (bcost == ucost2 && SAD_THRESH(2000))
+        {
+            COST_MV_X4(0, -2, -1, -1, 1, -1, -2, 0);
+            COST_MV_X4(2, 0, -1, 1, 1, 1,  0, 2);
+            if (bcost == ucost1 && SAD_THRESH(500))
+                break;
+            if (bcost == ucost2)
+            {
+                int16_t range = (int16_t)(merange >> 1) | 1;
+                CROSS(3, range, range);
+                COST_MV_X4(-1, -2, 1, -2, -2, -1, 2, -1);
+                COST_MV_X4(-2, 1, 2, 1, -1, 2, 1, 2);
+                if (bcost == ucost2)
+                    break;
+                cross_start = range + 2;
+            }
+        }
+
+        // TODO: Need to study x264's logic for building mvc list to understand why they
+        //       have special cases here for 16x16, and whether they apply to HEVC CTU
+
+        // adaptive search range based on mvc variability
+        if (numCandidates)
+        {
+            /* range multipliers based on casual inspection of some statistics of
+             * average distance between current predictor and final mv found by ESA.
+             * these have not been tuned much by actual encoding. */
+            static const uint8_t range_mul[4][4] =
+            {
+                { 3, 3, 4, 4 },
+                { 3, 4, 4, 4 },
+                { 4, 4, 4, 5 },
+                { 4, 4, 5, 6 },
+            };
+
+            int mvd;
+            int sad_ctx, mvd_ctx;
+            int denom = 1;
+
+            if (numCandidates == 1)
+            {
+                if (LUMA_64x64 == partEnum)
+                    /* mvc is probably the same as mvp, so the difference isn't meaningful.
+                     * but prediction usually isn't too bad, so just use medium range */
+                    mvd = 25;
+                else
+                    mvd = abs(qmvp.x - mvc[0].x) + abs(qmvp.y - mvc[0].y);
+            }
+            else
+            {
+                /* calculate the degree of agreement between predictors. */
+
+                /* in 64x64, mvc includes all the neighbors used to make mvp,
+                 * so don't count mvp separately. */
+
+                denom = numCandidates - 1;
+                mvd = 0;
+                if (partEnum != LUMA_64x64)
+                {
+                    mvd = abs(qmvp.x - mvc[0].x) + abs(qmvp.y - mvc[0].y);
+                    denom++;
+                }
+                mvd += predictorDifference(mvc, numCandidates);
+            }
+
+            sad_ctx = SAD_THRESH(1000) ? 0
+                : SAD_THRESH(2000) ? 1
+                : SAD_THRESH(4000) ? 2 : 3;
+            mvd_ctx = mvd < 10 * denom ? 0
+                : mvd < 20 * denom ? 1
+                : mvd < 40 * denom ? 2 : 3;
+
+            merange = (merange * range_mul[mvd_ctx][sad_ctx]) >> 2;
+        }
+
+        /* FIXME if the above DIA2/OCT2/CROSS found a new mv, it has not updated omx/omy.
+         * we are still centered on the same place as the DIA2. is this desirable? */
+        CROSS(cross_start, merange, merange >> 1);
+        COST_MV_X4(-2, -2, -2, 2, 2, -2, 2, 2);
+
+        /* hexagon grid */
+        omv = bmv;
+        const uint16_t *p_cost_omvx = m_cost_mvx + omv.x * 4;
+        const uint16_t *p_cost_omvy = m_cost_mvy + omv.y * 4;
+        uint16_t i = 1;
+        do
+        {
+            if (4 * i > X265_MIN4(mvmax.x - omv.x, omv.x - mvmin.x,
+                                  mvmax.y - omv.y, omv.y - mvmin.y))
+            {
+                for (int j = 0; j < 16; j++)
+                {
+                    MV mv = omv + (hex4[j] * i);
+                    if (mv.checkRange(mvmin, mvmax))
+                        COST_MV(mv.x, mv.y);
+                }
+            }
+            else
+            {
+                int16_t dir = 0;
+                pixel *fref_base = fref + omv.x + (omv.y - 4 * i) * stride;
+                size_t dy = (size_t)i * stride;
+#define SADS(k, x0, y0, x1, y1, x2, y2, x3, y3) \
+    sad_x4(fenc, \
+           fref_base x0 * i + (y0 - 2 * k + 4) * dy, \
+           fref_base x1 * i + (y1 - 2 * k + 4) * dy, \
+           fref_base x2 * i + (y2 - 2 * k + 4) * dy, \
+           fref_base x3 * i + (y3 - 2 * k + 4) * dy, \
+           stride, costs + 4 * k); \
+    fref_base += 2 * dy;
+#define ADD_MVCOST(k, x, y) costs[k] += p_cost_omvx[x * 4 * i] + p_cost_omvy[y * 4 * i]
+#define MIN_MV(k, x, y)     COPY2_IF_LT(bcost, costs[k], dir, x * 16 + (y & 15))
+
+                SADS(0, +0, -4, +0, +4, -2, -3, +2, -3);
+                SADS(1, -4, -2, +4, -2, -4, -1, +4, -1);
+                SADS(2, -4, +0, +4, +0, -4, +1, +4, +1);
+                SADS(3, -4, +2, +4, +2, -2, +3, +2, +3);
+                ADD_MVCOST(0, 0, -4);
+                ADD_MVCOST(1, 0, 4);
+                ADD_MVCOST(2, -2, -3);
+                ADD_MVCOST(3, 2, -3);
+                ADD_MVCOST(4, -4, -2);
+                ADD_MVCOST(5, 4, -2);
+                ADD_MVCOST(6, -4, -1);
+                ADD_MVCOST(7, 4, -1);
+                ADD_MVCOST(8, -4, 0);
+                ADD_MVCOST(9, 4, 0);
+                ADD_MVCOST(10, -4, 1);
+                ADD_MVCOST(11, 4, 1);
+                ADD_MVCOST(12, -4, 2);
+                ADD_MVCOST(13, 4, 2);
+                ADD_MVCOST(14, -2, 3);
+                ADD_MVCOST(15, 2, 3);
+                MIN_MV(0, 0, -4);
+                MIN_MV(1, 0, 4);
+                MIN_MV(2, -2, -3);
+                MIN_MV(3, 2, -3);
+                MIN_MV(4, -4, -2);
+                MIN_MV(5, 4, -2);
+                MIN_MV(6, -4, -1);
+                MIN_MV(7, 4, -1);
+                MIN_MV(8, -4, 0);
+                MIN_MV(9, 4, 0);
+                MIN_MV(10, -4, 1);
+                MIN_MV(11, 4, 1);
+                MIN_MV(12, -4, 2);
+                MIN_MV(13, 4, 2);
+                MIN_MV(14, -2, 3);
+                MIN_MV(15, 2, 3);
+#undef SADS
+#undef ADD_MVCOST
+#undef MIN_MV
+                if (dir)
+                {
+                    bmv.x = omv.x + i * (dir >> 4);
+                    bmv.y = omv.y + i * ((dir << 28) >> 28);
+                }
+            }
+        }
+        while (++i <= merange >> 2);
+        if (bmv.checkRange(mvmin, mvmax))
+            goto me_hex2;
+        break;
+    }
+
+    case X265_STAR_SEARCH: // Adapted from HM ME
+    {
+        int bPointNr = 0;
+        int bDistance = 0;
+
+        const int EarlyExitIters = 3;
+        StarPatternSearch(ref, mvmin, mvmax, bmv, bcost, bPointNr, bDistance, EarlyExitIters, merange);
+        if (bDistance == 1)
+        {
+            // if best distance was only 1, check two missing points.  If no new point is found, stop
+            if (bPointNr)
+            {
+                /* For a given direction 1 to 8, check nearest two outer X pixels
+                     X   X
+                   X 1 2 3 X
+                     4 * 5
+                   X 6 7 8 X
+                     X   X
+                */
+                int saved = bcost;
+                const MV mv1 = bmv + offsets[(bPointNr - 1) * 2];
+                const MV mv2 = bmv + offsets[(bPointNr - 1) * 2 + 1];
+                if (mv1.checkRange(mvmin, mvmax))
+                {
+                    COST_MV(mv1.x, mv1.y);
+                }
+                if (mv2.checkRange(mvmin, mvmax))
+                {
+                    COST_MV(mv2.x, mv2.y);
+                }
+                if (bcost == saved)
+                    break;
+            }
+            else
+                break;
+        }
+
+        const int RasterDistance = 5;
+        if (bDistance > RasterDistance)
+        {
+            // raster search refinement if original search distance was too big
+            MV tmv;
+            for (tmv.y = mvmin.y; tmv.y <= mvmax.y; tmv.y += RasterDistance)
+            {
+                for (tmv.x = mvmin.x; tmv.x <= mvmax.x; tmv.x += RasterDistance)
+                {
+                    if (tmv.x + (RasterDistance * 3) <= mvmax.x)
+                    {
+                        pixel *pix_base = fref + tmv.y * stride + tmv.x;
+                        sad_x4(fenc,
+                               pix_base,
+                               pix_base + RasterDistance,
+                               pix_base + RasterDistance * 2,
+                               pix_base + RasterDistance * 3,
+                               stride, costs);
+                        costs[0] += mvcost(tmv << 2);
+                        COPY2_IF_LT(bcost, costs[0], bmv, tmv);
+                        tmv.x += RasterDistance;
+                        costs[1] += mvcost(tmv << 2);
+                        COPY2_IF_LT(bcost, costs[1], bmv, tmv);
+                        tmv.x += RasterDistance;
+                        costs[2] += mvcost(tmv << 2);
+                        COPY2_IF_LT(bcost, costs[2], bmv, tmv);
+                        tmv.x += RasterDistance;
+                        costs[3] += mvcost(tmv << 3);
+                        COPY2_IF_LT(bcost, costs[3], bmv, tmv);
+                    }
+                    else
+                        COST_MV(tmv.x, tmv.y);
+                }
+            }
+        }
+
+        while (bDistance > 0)
+        {
+            // center a new search around current best
+            bDistance = 0;
+            bPointNr = 0;
+            const int MaxIters = 32;
+            StarPatternSearch(ref, mvmin, mvmax, bmv, bcost, bPointNr, bDistance, MaxIters, merange);
+
+            if (bDistance == 1)
+            {
+                if (!bPointNr)
+                    break;
+
+                /* For a given direction 1 to 8, check nearest 2 outer X pixels
+                        X   X
+                    X 1 2 3 X
+                        4 * 5
+                    X 6 7 8 X
+                        X   X
+                */
+                const MV mv1 = bmv + offsets[(bPointNr - 1) * 2];
+                const MV mv2 = bmv + offsets[(bPointNr - 1) * 2 + 1];
+                if (mv1.checkRange(mvmin, mvmax))
+                {
+                    COST_MV(mv1.x, mv1.y);
+                }
+                if (mv2.checkRange(mvmin, mvmax))
+                {
+                    COST_MV(mv2.x, mv2.y);
+                }
+                break;
+            }
+        }
+
+        break;
+    }
+
+    case X265_FULL_SEARCH:
+    {
+        // dead slow exhaustive search, but at least it uses sad_x4()
+        MV tmv;
+        for (tmv.y = mvmin.y; tmv.y <= mvmax.y; tmv.y++)
+        {
+            for (tmv.x = mvmin.x; tmv.x <= mvmax.x; tmv.x++)
+            {
+                if (tmv.x + 3 <= mvmax.x)
+                {
+                    pixel *pix_base = fref + tmv.y * stride + tmv.x;
+                    sad_x4(fenc,
+                           pix_base,
+                           pix_base + 1,
+                           pix_base + 2,
+                           pix_base + 3,
+                           stride, costs);
+                    costs[0] += mvcost(tmv << 2);
+                    COPY2_IF_LT(bcost, costs[0], bmv, tmv);
+                    tmv.x++;
+                    costs[1] += mvcost(tmv << 2);
+                    COPY2_IF_LT(bcost, costs[1], bmv, tmv);
+                    tmv.x++;
+                    costs[2] += mvcost(tmv << 2);
+                    COPY2_IF_LT(bcost, costs[2], bmv, tmv);
+                    tmv.x++;
+                    costs[3] += mvcost(tmv << 2);
+                    COPY2_IF_LT(bcost, costs[3], bmv, tmv);
+                }
+                else
+                    COST_MV(tmv.x, tmv.y);
+            }
+        }
+
+        break;
+    }
+
+    default:
+        X265_CHECK(0, "invalid motion estimate mode\n");
+        break;
+    }
+
+    if (bprecost < bcost)
+    {
+        bmv = bestpre;
+        bcost = bprecost;
+    }
+    else
+        bmv = bmv.toQPel(); // promote search bmv to qpel
+
+    const SubpelWorkload& wl = workload[this->subpelRefine];
+
+    if (!bcost)
+    {
+        /* if there was zero residual at the clipped MVP, we can skip subpel
+         * refine, but we do need to include the mvcost in the returned cost */
+        bcost = mvcost(bmv);
+    }
+    else if (ref->isLowres)
+    {
+        int bdir = 0;
+        for (int i = 1; i <= wl.hpel_dirs; i++)
+        {
+            MV qmv = bmv + square1[i] * 2;
+            int cost = ref->lowresQPelCost(fenc, blockOffset, qmv, sad) + mvcost(qmv);
+            COPY2_IF_LT(bcost, cost, bdir, i);
+        }
+
+        bmv += square1[bdir] * 2;
+        bcost = ref->lowresQPelCost(fenc, blockOffset, bmv, satd) + mvcost(bmv);
+
+        bdir = 0;
+        for (int i = 1; i <= wl.qpel_dirs; i++)
+        {
+            MV qmv = bmv + square1[i];
+            int cost = ref->lowresQPelCost(fenc, blockOffset, qmv, satd) + mvcost(qmv);
+            COPY2_IF_LT(bcost, cost, bdir, i);
+        }
+
+        bmv += square1[bdir];
+    }
+    else
+    {
+        pixelcmp_t hpelcomp;
+
+        if (wl.hpel_satd)
+        {
+            bcost = subpelCompare(ref, bmv, satd) + mvcost(bmv);
+            hpelcomp = satd;
+        }
+        else
+            hpelcomp = sad;
+
+        for (int iter = 0; iter < wl.hpel_iters; iter++)
+        {
+            int bdir = 0;
+            for (int i = 1; i <= wl.hpel_dirs; i++)
+            {
+                MV qmv = bmv + square1[i] * 2;
+                int cost = subpelCompare(ref, qmv, hpelcomp) + mvcost(qmv);
+                COPY2_IF_LT(bcost, cost, bdir, i);
+            }
+
+            if (bdir)
+                bmv += square1[bdir] * 2;
+            else
+                break;
+        }
+
+        /* if HPEL search used SAD, remeasure with SATD before QPEL */
+        if (!wl.hpel_satd)
+            bcost = subpelCompare(ref, bmv, satd) + mvcost(bmv);
+
+        for (int iter = 0; iter < wl.qpel_iters; iter++)
+        {
+            int bdir = 0;
+            for (int i = 1; i <= wl.qpel_dirs; i++)
+            {
+                MV qmv = bmv + square1[i];
+                int cost = subpelCompare(ref, qmv, satd) + mvcost(qmv);
+                COPY2_IF_LT(bcost, cost, bdir, i);
+            }
+
+            if (bdir)
+                bmv += square1[bdir];
+            else
+                break;
+        }
+    }
+
+    x265_emms();
+    outQMv = bmv;
+    return bcost;
+}
+
+int MotionEstimate::subpelCompare(ReferencePlanes *ref, const MV& qmv, pixelcmp_t cmp)
+{
+    intptr_t refStride = ref->lumaStride;
+    pixel *fref = ref->fpelPlane[0] + blockOffset + (qmv.x >> 2) + (qmv.y >> 2) * refStride;
+    int xFrac = qmv.x & 0x3;
+    int yFrac = qmv.y & 0x3;
+    int cost;
+    intptr_t lclStride = fencPUYuv.m_size;
+    X265_CHECK(lclStride == FENC_STRIDE, "fenc buffer is assumed to have FENC_STRIDE by sad_x3 and sad_x4\n");
+
+    if (!(yFrac | xFrac))
+        cost = cmp(fencPUYuv.m_buf[0], lclStride, fref, refStride);
+    else
+    {
+        /* we are taking a short-cut here if the reference is weighted. To be
+         * accurate we should be interpolating unweighted pixels and weighting
+         * the final 16bit values prior to rounding and down shifting. Instead we
+         * are simply interpolating the weighted full-pel pixels. Not 100%
+         * accurate but good enough for fast qpel ME */
+        ALIGN_VAR_32(pixel, subpelbuf[64 * 64]);
+        if (!yFrac)
+            primitives.pu[partEnum].luma_hpp(fref, refStride, subpelbuf, lclStride, xFrac);
+        else if (!xFrac)
+            primitives.pu[partEnum].luma_vpp(fref, refStride, subpelbuf, lclStride, yFrac);
+        else
+            primitives.pu[partEnum].luma_hvpp(fref, refStride, subpelbuf, lclStride, xFrac, yFrac);
+
+        cost = cmp(fencPUYuv.m_buf[0], lclStride, subpelbuf, lclStride);
+    }
+
+    if (bChromaSATD)
+    {
+        int csp    = fencPUYuv.m_csp;
+        int hshift = fencPUYuv.m_hChromaShift;
+        int vshift = fencPUYuv.m_vChromaShift;
+        int shiftHor = (2 + hshift);
+        int shiftVer = (2 + vshift);
+        lclStride = fencPUYuv.m_csize;
+
+        intptr_t refStrideC = ref->reconPic->m_strideC;
+        intptr_t refOffset = (qmv.x >> shiftHor) + (qmv.y >> shiftVer) * refStrideC;
+
+        const pixel* refCb = ref->getCbAddr(ctuAddr, absPartIdx) + refOffset;
+        const pixel* refCr = ref->getCrAddr(ctuAddr, absPartIdx) + refOffset;
+
+        xFrac = qmv.x & ((1 << shiftHor) - 1);
+        yFrac = qmv.y & ((1 << shiftVer) - 1);
+
+        if (!(yFrac | xFrac))
+        {
+            cost += chromaSatd(fencPUYuv.m_buf[1], lclStride, refCb, refStrideC);
+            cost += chromaSatd(fencPUYuv.m_buf[2], lclStride, refCr, refStrideC);
+        }
+        else
+        {
+            ALIGN_VAR_32(pixel, subpelbuf[64 * 64]);
+            if (!yFrac)
+            {
+                primitives.chroma[csp].pu[partEnum].filter_hpp(refCb, refStrideC, subpelbuf, lclStride, xFrac << (1 - hshift));
+                cost += chromaSatd(fencPUYuv.m_buf[1], lclStride, subpelbuf, lclStride);
+
+                primitives.chroma[csp].pu[partEnum].filter_hpp(refCr, refStrideC, subpelbuf, lclStride, xFrac << (1 - hshift));
+                cost += chromaSatd(fencPUYuv.m_buf[2], lclStride, subpelbuf, lclStride);
+            }
+            else if (!xFrac)
+            {
+                primitives.chroma[csp].pu[partEnum].filter_vpp(refCb, refStrideC, subpelbuf, lclStride, yFrac << (1 - vshift));
+                cost += chromaSatd(fencPUYuv.m_buf[1], lclStride, subpelbuf, lclStride);
+
+                primitives.chroma[csp].pu[partEnum].filter_vpp(refCr, refStrideC, subpelbuf, lclStride, yFrac << (1 - vshift));
+                cost += chromaSatd(fencPUYuv.m_buf[2], lclStride, subpelbuf, lclStride);
+            }
+            else
+            {
+                ALIGN_VAR_32(int16_t, immed[64 * (64 + NTAPS_CHROMA)]);
+
+                int extStride = blockwidth >> hshift;
+                int filterSize = NTAPS_CHROMA;
+                int halfFilterSize = (filterSize >> 1);
+
+                primitives.chroma[csp].pu[partEnum].filter_hps(refCb, refStrideC, immed, extStride, xFrac << (1 - hshift), 1);
+                primitives.chroma[csp].pu[partEnum].filter_vsp(immed + (halfFilterSize - 1) * extStride, extStride, subpelbuf, lclStride, yFrac << (1 - vshift));
+                cost += chromaSatd(fencPUYuv.m_buf[1], lclStride, subpelbuf, lclStride);
+
+                primitives.chroma[csp].pu[partEnum].filter_hps(refCr, refStrideC, immed, extStride, xFrac << (1 - hshift), 1);
+                primitives.chroma[csp].pu[partEnum].filter_vsp(immed + (halfFilterSize - 1) * extStride, extStride, subpelbuf, lclStride, yFrac << (1 - vshift));
+                cost += chromaSatd(fencPUYuv.m_buf[2], lclStride, subpelbuf, lclStride);
+            }
+        }
+    }
+
+    return cost;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/motion.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,110 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_MOTIONESTIMATE_H
+#define X265_MOTIONESTIMATE_H
+
+#include "primitives.h"
+#include "reference.h"
+#include "mv.h"
+#include "bitcost.h"
+#include "yuv.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+class MotionEstimate : public BitCost
+{
+protected:
+
+    intptr_t blockOffset;
+    
+    int ctuAddr;
+    int absPartIdx;  // part index of PU, including CU offset within CTU
+
+    int searchMethod;
+    int subpelRefine;
+
+    int blockwidth;
+    int blockheight;
+
+    pixelcmp_t sad;
+    pixelcmp_x3_t sad_x3;
+    pixelcmp_x4_t sad_x4;
+    pixelcmp_t satd;
+    pixelcmp_t chromaSatd;
+
+    MotionEstimate& operator =(const MotionEstimate&);
+
+public:
+
+    static const int COST_MAX = 1 << 28;
+
+    Yuv fencPUYuv;
+    int partEnum;
+    bool bChromaSATD;
+
+    MotionEstimate();
+    ~MotionEstimate();
+
+    static void initScales();
+    static int hpelIterationCount(int subme);
+    void init(int method, int refine, int csp);
+
+    /* Methods called at slice setup */
+
+    void setSourcePU(pixel *fencY, intptr_t stride, intptr_t offset, int pwidth, int pheight);
+    void setSourcePU(const Yuv& srcFencYuv, int ctuAddr, int cuPartIdx, int puPartIdx, int pwidth, int pheight);
+
+    /* buf*() and motionEstimate() methods all use cached fenc pixels and thus
+     * require setSourcePU() to be called prior. */
+
+    inline int bufSAD(const pixel* fref, intptr_t stride)  { return sad(fencPUYuv.m_buf[0], FENC_STRIDE, fref, stride); }
+
+    inline int bufSATD(const pixel* fref, intptr_t stride) { return satd(fencPUYuv.m_buf[0], FENC_STRIDE, fref, stride); }
+
+    inline int bufChromaSATD(const Yuv& refYuv, int puPartIdx)
+    {
+        return chromaSatd(refYuv.getCbAddr(puPartIdx), refYuv.m_csize, fencPUYuv.m_buf[1], fencPUYuv.m_csize) +
+               chromaSatd(refYuv.getCrAddr(puPartIdx), refYuv.m_csize, fencPUYuv.m_buf[2], fencPUYuv.m_csize);
+    }
+
+    int motionEstimate(ReferencePlanes* ref, const MV & mvmin, const MV & mvmax, const MV & qmvp, int numCandidates, const MV * mvc, int merange, MV & outQMv);
+
+    int subpelCompare(ReferencePlanes* ref, const MV &qmv, pixelcmp_t);
+
+protected:
+
+    inline void StarPatternSearch(ReferencePlanes *ref,
+                                  const MV &       mvmin,
+                                  const MV &       mvmax,
+                                  MV &             bmv,
+                                  int &            bcost,
+                                  int &            bPointNr,
+                                  int &            bDistance,
+                                  int              earlyExitIters,
+                                  int              merange);
+};
+}
+
+#endif // ifndef X265_MOTIONESTIMATE_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/nal.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,232 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "bitstream.h"
+#include "nal.h"
+
+using namespace X265_NS;
+
+NALList::NALList()
+    : m_numNal(0)
+    , m_buffer(NULL)
+    , m_occupancy(0)
+    , m_allocSize(0)
+    , m_extraBuffer(NULL)
+    , m_extraOccupancy(0)
+    , m_extraAllocSize(0)
+    , m_annexB(true)
+{}
+
+void NALList::takeContents(NALList& other)
+{
+    /* take other NAL buffer, discard our old one */
+    X265_FREE(m_buffer);
+    m_buffer = other.m_buffer;
+    m_allocSize = other.m_allocSize;
+    m_occupancy = other.m_occupancy;
+
+    /* copy packet data */
+    m_numNal = other.m_numNal;
+    memcpy(m_nal, other.m_nal, sizeof(x265_nal) * m_numNal);
+
+    /* reset other list, re-allocate their buffer with same size */
+    other.m_numNal = 0;
+    other.m_occupancy = 0;
+    other.m_buffer = X265_MALLOC(uint8_t, m_allocSize);
+}
+
+void NALList::serialize(NalUnitType nalUnitType, const Bitstream& bs)
+{
+    static const char startCodePrefix[] = { 0, 0, 0, 1 };
+
+    uint32_t payloadSize = bs.getNumberOfWrittenBytes();
+    const uint8_t* bpayload = bs.getFIFO();
+    if (!bpayload)
+        return;
+
+    uint32_t nextSize = m_occupancy + sizeof(startCodePrefix) + 2 + payloadSize + (payloadSize >> 1) + m_extraOccupancy;
+    if (nextSize > m_allocSize)
+    {
+        uint8_t *temp = X265_MALLOC(uint8_t, nextSize);
+        if (temp)
+        {
+            memcpy(temp, m_buffer, m_occupancy);
+
+            /* fixup existing payload pointers */
+            for (uint32_t i = 0; i < m_numNal; i++)
+                m_nal[i].payload = temp + (m_nal[i].payload - m_buffer);
+
+            X265_FREE(m_buffer);
+            m_buffer = temp;
+            m_allocSize = nextSize;
+        }
+        else
+        {
+            x265_log(NULL, X265_LOG_ERROR, "Unable to realloc access unit buffer\n");
+            return;
+        }
+    }
+
+    uint8_t *out = m_buffer + m_occupancy;
+    uint32_t bytes = 0;
+
+    if (!m_annexB)
+    {
+        /* Will write size later */
+        bytes += 4;
+    }
+    else if (!m_numNal || nalUnitType == NAL_UNIT_VPS || nalUnitType == NAL_UNIT_SPS || nalUnitType == NAL_UNIT_PPS)
+    {
+        memcpy(out, startCodePrefix, 4);
+        bytes += 4;
+    }
+    else
+    {
+        memcpy(out, startCodePrefix + 1, 3);
+        bytes += 3;
+    }
+
+    /* 16 bit NAL header:
+     * forbidden_zero_bit       1-bit
+     * nal_unit_type            6-bits
+     * nuh_reserved_zero_6bits  6-bits
+     * nuh_temporal_id_plus1    3-bits */
+    out[bytes++] = (uint8_t)nalUnitType << 1;
+    out[bytes++] = 1 + (nalUnitType == NAL_UNIT_CODED_SLICE_TSA_N);
+
+    /* 7.4.1 ...
+     * Within the NAL unit, the following three-byte sequences shall not occur at
+     * any byte-aligned position:
+     *  - 0x000000
+     *  - 0x000001
+     *  - 0x000002 */
+    for (uint32_t i = 0; i < payloadSize; i++)
+    {
+        if (i > 2 && !out[bytes - 2] && !out[bytes - 3] && out[bytes - 1] <= 0x03)
+        {
+            /* inject 0x03 to prevent emulating a start code */
+            out[bytes] = out[bytes - 1];
+            out[bytes - 1] = 0x03;
+            bytes++;
+        }
+
+        out[bytes++] = bpayload[i];
+    }
+
+    X265_CHECK(bytes <= 4 + 2 + payloadSize + (payloadSize >> 1), "NAL buffer overflow\n");
+
+    if (m_extraOccupancy)
+    {
+        /* these bytes were escaped by serializeSubstreams */
+        memcpy(out + bytes, m_extraBuffer, m_extraOccupancy);
+        bytes += m_extraOccupancy;
+        m_extraOccupancy = 0;
+    }
+
+    /* 7.4.1.1
+     * ... when the last byte of the RBSP data is equal to 0x00 (which can
+     * only occur when the RBSP ends in a cabac_zero_word), a final byte equal
+     * to 0x03 is appended to the end of the data.  */
+    if (!out[bytes - 1])
+        out[bytes++] = 0x03;
+
+    if (!m_annexB)
+    {
+        uint32_t dataSize = bytes - 4;
+        out[0] = (uint8_t)(dataSize >> 24);
+        out[1] = (uint8_t)(dataSize >> 16);
+        out[2] = (uint8_t)(dataSize >> 8);
+        out[3] = (uint8_t)dataSize;
+    }
+
+    m_occupancy += bytes;
+
+    X265_CHECK(m_numNal < (uint32_t)MAX_NAL_UNITS, "NAL count overflow\n");
+
+    x265_nal& nal = m_nal[m_numNal++];
+    nal.type = nalUnitType;
+    nal.sizeBytes = bytes;
+    nal.payload = out;
+}
+
+/* concatenate and escape WPP sub-streams, return escaped row lengths.
+ * These streams will be appended to the next serialized NAL */
+uint32_t NALList::serializeSubstreams(uint32_t* streamSizeBytes, uint32_t streamCount, const Bitstream* streams)
+{
+    uint32_t maxStreamSize = 0;
+    uint32_t estSize = 0;
+    for (uint32_t s = 0; s < streamCount; s++)
+        estSize += streams[s].getNumberOfWrittenBytes();
+    estSize += estSize >> 1;
+
+    if (estSize > m_extraAllocSize)
+    {
+        uint8_t *temp = X265_MALLOC(uint8_t, estSize);
+        if (temp)
+        {
+            X265_FREE(m_extraBuffer);
+            m_extraBuffer = temp;
+            m_extraAllocSize = estSize;
+        }
+        else
+        {
+            x265_log(NULL, X265_LOG_ERROR, "Unable to realloc WPP substream concatenation buffer\n");
+            return 0;
+        }
+    }
+
+    uint32_t bytes = 0;
+    uint8_t *out = m_extraBuffer;
+    for (uint32_t s = 0; s < streamCount; s++)
+    {
+        const Bitstream& stream = streams[s];
+        uint32_t inSize = stream.getNumberOfWrittenBytes();
+        const uint8_t *inBytes = stream.getFIFO();
+        uint32_t prevBufSize = bytes;
+
+        if (inBytes)
+        {
+            for (uint32_t i = 0; i < inSize; i++)
+            {
+                if (bytes >= 2 && !out[bytes - 2] && !out[bytes - 1] && inBytes[i] <= 0x03)
+                {
+                    /* inject 0x03 to prevent emulating a start code */
+                    out[bytes++] = 3;
+                }
+
+                out[bytes++] = inBytes[i];
+            }
+        }
+
+        if (s < streamCount - 1)
+        {
+            streamSizeBytes[s] = bytes - prevBufSize;
+            if (streamSizeBytes[s] > maxStreamSize)
+                maxStreamSize = streamSizeBytes[s];
+        }
+    }
+
+    m_extraOccupancy = bytes;
+    return maxStreamSize;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/nal.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,65 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_NAL_H
+#define X265_NAL_H
+
+#include "common.h"
+#include "x265.h"
+
+namespace X265_NS {
+// private namespace
+
+class Bitstream;
+
+class NALList
+{
+    static const int MAX_NAL_UNITS = 16;
+
+public:
+
+    x265_nal    m_nal[MAX_NAL_UNITS];
+    uint32_t    m_numNal;
+
+    uint8_t*    m_buffer;
+    uint32_t    m_occupancy;
+    uint32_t    m_allocSize;
+
+    uint8_t*    m_extraBuffer;
+    uint32_t    m_extraOccupancy;
+    uint32_t    m_extraAllocSize;
+    bool        m_annexB;
+
+    NALList();
+    ~NALList() { X265_FREE(m_buffer); X265_FREE(m_extraBuffer); }
+
+    void takeContents(NALList& other);
+
+    void serialize(NalUnitType nalUnitType, const Bitstream& bs);
+
+    uint32_t serializeSubstreams(uint32_t* streamSizeBytes, uint32_t streamCount, const Bitstream* streams);
+};
+
+}
+
+#endif // ifndef X265_NAL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/ratecontrol.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2424 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Sumalatha Polureddy <sumalatha@multicorewareinc.com>
+ *          Aarthi Priya Thirumalai <aarthi@multicorewareinc.com>
+ *          Xun Xu, PPLive Corporation <xunxu@pptv.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "param.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+
+#include "encoder.h"
+#include "slicetype.h"
+#include "ratecontrol.h"
+#include "sei.h"
+
+#define BR_SHIFT  6
+#define CPB_SHIFT 4
+
+using namespace X265_NS;
+
+/* Amortize the partial cost of I frames over the next N frames */
+
+const int RateControl::s_slidingWindowFrames = 20;
+const char *RateControl::s_defaultStatFileName = "x265_2pass.log";
+
+namespace {
+#define CMP_OPT_FIRST_PASS(opt, param_val)\
+{\
+    bErr = 0;\
+    p = strstr(opts, opt "=");\
+    char* q = strstr(opts, "no-"opt);\
+    if (p && sscanf(p, opt "=%d" , &i) && param_val != i)\
+        bErr = 1;\
+    else if (!param_val && !q && !p)\
+        bErr = 1;\
+    else if (param_val && (q || !strstr(opts, opt)))\
+        bErr = 1;\
+    if (bErr)\
+    {\
+        x265_log(m_param, X265_LOG_ERROR, "different " opt " setting than first pass (%d vs %d)\n", param_val, i);\
+        return false;\
+    }\
+}
+
+inline int calcScale(uint32_t x)
+{
+    static uint8_t lut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
+    int y, z = (((x & 0xffff) - 1) >> 27) & 16;
+    x >>= z;
+    z += y = (((x & 0xff) - 1) >> 28) & 8;
+    x >>= y;
+    z += y = (((x & 0xf) - 1) >> 29) & 4;
+    x >>= y;
+    return z + lut[x&0xf];
+}
+
+inline int calcLength(uint32_t x)
+{
+    static uint8_t lut[16] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
+    int y, z = (((x >> 16) - 1) >> 27) & 16;
+    x >>= z ^ 16;
+    z += y = ((x - 0x100) >> 28) & 8;
+    x >>= y ^ 8;
+    z += y = ((x - 0x10) >> 29) & 4;
+    x >>= y ^ 4;
+    return z + lut[x];
+}
+
+inline void reduceFraction(int* n, int* d)
+{
+    int a = *n;
+    int b = *d;
+    int c;
+    if (!a || !b)
+        return;
+    c = a % b;
+    while (c)
+    {
+        a = b;
+        b = c;
+        c = a % b;
+    }
+    *n /= b;
+    *d /= b;
+}
+
+inline char *strcatFilename(const char *input, const char *suffix)
+{
+    char *output = X265_MALLOC(char, strlen(input) + strlen(suffix) + 1);
+    if (!output)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "unable to allocate memory for filename\n");
+        return NULL;
+    }
+    strcpy(output, input);
+    strcat(output, suffix);
+    return output;
+}
+
+inline double qScale2bits(RateControlEntry *rce, double qScale)
+{
+    if (qScale < 0.1)
+        qScale = 0.1;
+    return (rce->coeffBits + .1) * pow(rce->qScale / qScale, 1.1)
+           + rce->mvBits * pow(X265_MAX(rce->qScale, 1) / X265_MAX(qScale, 1), 0.5)
+           + rce->miscBits;
+}
+
+inline void copyRceData(RateControlEntry* rce, RateControlEntry* rce2Pass)
+{
+    rce->coeffBits = rce2Pass->coeffBits;
+    rce->mvBits = rce2Pass->mvBits;
+    rce->miscBits = rce2Pass->miscBits;
+    rce->iCuCount = rce2Pass->iCuCount;
+    rce->pCuCount = rce2Pass->pCuCount;
+    rce->skipCuCount = rce2Pass->skipCuCount;
+    rce->keptAsRef = rce2Pass->keptAsRef;
+    rce->qScale = rce2Pass->qScale;
+    rce->newQScale = rce2Pass->newQScale;
+    rce->expectedBits = rce2Pass->expectedBits;
+    rce->expectedVbv = rce2Pass->expectedVbv;
+    rce->blurredComplexity = rce2Pass->blurredComplexity;
+    rce->sliceType = rce2Pass->sliceType;
+}
+
+}  // end anonymous namespace
+/* Returns the zone for the current frame */
+x265_zone* RateControl::getZone()
+{
+    for (int i = m_param->rc.zoneCount - 1; i >= 0; i--)
+    {
+        x265_zone *z = &m_param->rc.zones[i];
+        if (m_framesDone + 1 >= z->startFrame && m_framesDone < z->endFrame)
+            return z;
+    }
+    return NULL;
+}
+
+RateControl::RateControl(x265_param& p)
+{
+    m_param = &p;
+    int lowresCuWidth = ((m_param->sourceWidth / 2) + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    int lowresCuHeight = ((m_param->sourceHeight / 2) + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    m_ncu = lowresCuWidth * lowresCuHeight;
+
+    if (m_param->rc.cuTree)
+        m_qCompress = 1;
+    else
+        m_qCompress = m_param->rc.qCompress;
+
+    // validate for param->rc, maybe it is need to add a function like x265_parameters_valiate()
+    m_residualFrames = 0;
+    m_partialResidualFrames = 0;
+    m_residualCost = 0;
+    m_partialResidualCost = 0;
+    m_rateFactorMaxIncrement = 0;
+    m_rateFactorMaxDecrement = 0;
+    m_fps = (double)m_param->fpsNum / m_param->fpsDenom;
+    m_startEndOrder.set(0);
+    m_bTerminated = false;
+    m_finalFrameCount = 0;
+    m_numEntries = 0;
+    m_isSceneTransition = false;
+    if (m_param->rc.rateControlMode == X265_RC_CRF)
+    {
+        m_param->rc.qp = (int)m_param->rc.rfConstant;
+        m_param->rc.bitrate = 0;
+
+        double baseCplx = m_ncu * (m_param->bframes ? 120 : 80);
+        double mbtree_offset = m_param->rc.cuTree ? (1.0 - m_param->rc.qCompress) * 13.5 : 0;
+        m_rateFactorConstant = pow(baseCplx, 1 - m_qCompress) /
+            x265_qp2qScale(m_param->rc.rfConstant + mbtree_offset);
+        if (m_param->rc.rfConstantMax)
+        {
+            m_rateFactorMaxIncrement = m_param->rc.rfConstantMax - m_param->rc.rfConstant;
+            if (m_rateFactorMaxIncrement <= 0)
+            {
+                x265_log(m_param, X265_LOG_WARNING, "CRF max must be greater than CRF\n");
+                m_rateFactorMaxIncrement = 0;
+            }
+        }
+        if (m_param->rc.rfConstantMin)
+            m_rateFactorMaxDecrement = m_param->rc.rfConstant - m_param->rc.rfConstantMin;
+    }
+    m_isAbr = m_param->rc.rateControlMode != X265_RC_CQP && !m_param->rc.bStatRead;
+    m_2pass = m_param->rc.rateControlMode == X265_RC_ABR && m_param->rc.bStatRead;
+    m_bitrate = m_param->rc.bitrate * 1000;
+    m_frameDuration = (double)m_param->fpsDenom / m_param->fpsNum;
+    m_qp = m_param->rc.qp;
+    m_lastRceq = 1; /* handles the cmplxrsum when the previous frame cost is zero */
+    m_shortTermCplxSum = 0;
+    m_shortTermCplxCount = 0;
+    m_lastNonBPictType = I_SLICE;
+    m_isAbrReset = false;
+    m_lastAbrResetPoc = -1;
+    m_statFileOut = NULL;
+    m_cutreeStatFileOut = m_cutreeStatFileIn = NULL;
+    m_rce2Pass = NULL;
+    m_lastBsliceSatdCost = 0;
+
+    // vbv initialization
+    m_param->rc.vbvBufferSize = x265_clip3(0, 2000000, m_param->rc.vbvBufferSize);
+    m_param->rc.vbvMaxBitrate = x265_clip3(0, 2000000, m_param->rc.vbvMaxBitrate);
+    m_param->rc.vbvBufferInit = x265_clip3(0.0, 2000000.0, m_param->rc.vbvBufferInit);
+    m_singleFrameVbv = 0;
+    m_rateTolerance = 1.0;
+
+    if (m_param->rc.vbvBufferSize)
+    {
+        if (m_param->rc.rateControlMode == X265_RC_CQP)
+        {
+            x265_log(m_param, X265_LOG_WARNING, "VBV is incompatible with constant QP, ignored.\n");
+            m_param->rc.vbvBufferSize = 0;
+            m_param->rc.vbvMaxBitrate = 0;
+        }
+        else if (m_param->rc.vbvMaxBitrate == 0)
+        {
+            if (m_param->rc.rateControlMode == X265_RC_ABR)
+            {
+                x265_log(m_param, X265_LOG_WARNING, "VBV maxrate unspecified, assuming CBR\n");
+                m_param->rc.vbvMaxBitrate = m_param->rc.bitrate;
+            }
+            else
+            {
+                x265_log(m_param, X265_LOG_WARNING, "VBV bufsize set but maxrate unspecified, ignored\n");
+                m_param->rc.vbvBufferSize = 0;
+            }
+        }
+        else if (m_param->rc.vbvMaxBitrate < m_param->rc.bitrate &&
+                 m_param->rc.rateControlMode == X265_RC_ABR)
+        {
+            x265_log(m_param, X265_LOG_WARNING, "max bitrate less than average bitrate, assuming CBR\n");
+            m_param->rc.bitrate = m_param->rc.vbvMaxBitrate;
+        }
+    }
+    else if (m_param->rc.vbvMaxBitrate)
+    {
+        x265_log(m_param, X265_LOG_WARNING, "VBV maxrate specified, but no bufsize, ignored\n");
+        m_param->rc.vbvMaxBitrate = 0;
+    }
+    m_isVbv = m_param->rc.vbvMaxBitrate > 0 && m_param->rc.vbvBufferSize > 0;
+    if (m_param->bEmitHRDSEI && !m_isVbv)
+    {
+        x265_log(m_param, X265_LOG_WARNING, "NAL HRD parameters require VBV parameters, ignored\n");
+        m_param->bEmitHRDSEI = 0;
+    }
+    m_isCbr = m_param->rc.rateControlMode == X265_RC_ABR && m_isVbv && !m_2pass && m_param->rc.vbvMaxBitrate <= m_param->rc.bitrate;
+    if (m_param->rc.bStrictCbr && !m_isCbr)
+    {
+        x265_log(m_param, X265_LOG_WARNING, "strict CBR set without CBR mode, ignored\n");
+        m_param->rc.bStrictCbr = 0;
+    }
+    if(m_param->rc.bStrictCbr)
+        m_rateTolerance = 0.7;
+
+    m_bframeBits = 0;
+    m_leadingNoBSatd = 0;
+    m_ipOffset = 6.0 * X265_LOG2(m_param->rc.ipFactor);
+    m_pbOffset = 6.0 * X265_LOG2(m_param->rc.pbFactor);
+
+    /* Adjust the first frame in order to stabilize the quality level compared to the rest */
+#define ABR_INIT_QP_MIN (24)
+#define ABR_INIT_QP_MAX (40)
+#define ABR_SCENECUT_INIT_QP_MIN (12)
+#define CRF_INIT_QP (int)m_param->rc.rfConstant
+    for (int i = 0; i < 3; i++)
+        m_lastQScaleFor[i] = x265_qp2qScale(m_param->rc.rateControlMode == X265_RC_CRF ? CRF_INIT_QP : ABR_INIT_QP_MIN);
+
+    if (m_param->rc.rateControlMode == X265_RC_CQP)
+    {
+        if (m_qp && !m_param->bLossless)
+        {
+            m_qpConstant[P_SLICE] = m_qp;
+            m_qpConstant[I_SLICE] = x265_clip3(QP_MIN, QP_MAX_MAX, (int)(m_qp - m_ipOffset + 0.5));
+            m_qpConstant[B_SLICE] = x265_clip3(QP_MIN, QP_MAX_MAX, (int)(m_qp + m_pbOffset + 0.5));
+        }
+        else
+        {
+            m_qpConstant[P_SLICE] = m_qpConstant[I_SLICE] = m_qpConstant[B_SLICE] = m_qp;
+        }
+    }
+
+    /* qpstep - value set as encoder specific */
+    m_lstep = pow(2, m_param->rc.qpStep / 6.0);
+
+    for (int i = 0; i < 2; i++)
+        m_cuTreeStats.qpBuffer[i] = NULL;
+}
+
+bool RateControl::init(const SPS& sps)
+{
+    if (m_isVbv)
+    {
+        /* We don't support changing the ABR bitrate right now,
+         * so if the stream starts as CBR, keep it CBR. */
+        if (m_param->rc.vbvBufferSize < (int)(m_param->rc.vbvMaxBitrate / m_fps))
+        {
+            m_param->rc.vbvBufferSize = (int)(m_param->rc.vbvMaxBitrate / m_fps);
+            x265_log(m_param, X265_LOG_WARNING, "VBV buffer size cannot be smaller than one frame, using %d kbit\n",
+                     m_param->rc.vbvBufferSize);
+        }
+        int vbvBufferSize = m_param->rc.vbvBufferSize * 1000;
+        int vbvMaxBitrate = m_param->rc.vbvMaxBitrate * 1000;
+
+        if (m_param->bEmitHRDSEI)
+        {
+            const HRDInfo* hrd = &sps.vuiParameters.hrdParameters;
+            vbvBufferSize = hrd->cpbSizeValue << (hrd->cpbSizeScale + CPB_SHIFT);
+            vbvMaxBitrate = hrd->bitRateValue << (hrd->bitRateScale + BR_SHIFT);
+        }
+        m_bufferRate = vbvMaxBitrate / m_fps;
+        m_vbvMaxRate = vbvMaxBitrate;
+        m_bufferSize = vbvBufferSize;
+        m_singleFrameVbv = m_bufferRate * 1.1 > m_bufferSize;
+
+        if (m_param->rc.vbvBufferInit > 1.)
+            m_param->rc.vbvBufferInit = x265_clip3(0.0, 1.0, m_param->rc.vbvBufferInit / m_param->rc.vbvBufferSize);
+        m_param->rc.vbvBufferInit = x265_clip3(0.0, 1.0, X265_MAX(m_param->rc.vbvBufferInit, m_bufferRate / m_bufferSize));
+        m_bufferFillFinal = m_bufferSize * m_param->rc.vbvBufferInit;
+    }
+
+    m_totalBits = 0;
+    m_encodedBits = 0;
+    m_framesDone = 0;
+    m_residualCost = 0;
+    m_partialResidualCost = 0;
+    m_amortizeFraction = 0.85;
+    m_amortizeFrames = 75;
+    if (m_param->totalFrames && m_param->totalFrames <= 2 * m_fps && m_param->rc.bStrictCbr) /* Strict CBR segment encode */
+    {
+        m_amortizeFraction = 0.85;
+        m_amortizeFrames = m_param->totalFrames / 2;
+    }
+    for (int i = 0; i < s_slidingWindowFrames; i++)
+    {
+        m_satdCostWindow[i] = 0;
+        m_encodedBitsWindow[i] = 0;
+    }
+    m_sliderPos = 0;
+    m_isPatternPresent = false;
+    m_numBframesInPattern = 0;
+
+    /* 720p videos seem to be a good cutoff for cplxrSum */
+    double tuneCplxFactor = (m_param->rc.cuTree && m_ncu > 3600) ? 2.5 : 1;
+
+    /* estimated ratio that produces a reasonable QP for the first I-frame */
+    m_cplxrSum = .01 * pow(7.0e5, m_qCompress) * pow(m_ncu, 0.5) * tuneCplxFactor;
+    m_wantedBitsWindow = m_bitrate * m_frameDuration;
+    m_accumPNorm = .01;
+    m_accumPQp = (m_param->rc.rateControlMode == X265_RC_CRF ? CRF_INIT_QP : ABR_INIT_QP_MIN) * m_accumPNorm;
+
+    /* Frame Predictors and Row predictors used in vbv */
+    for (int i = 0; i < 4; i++)
+    {
+        m_pred[i].coeff = 1.0;
+        m_pred[i].count = 1.0;
+        m_pred[i].decay = 0.5;
+        m_pred[i].offset = 0.0;
+    }
+    m_pred[0].coeff = m_pred[3].coeff = 0.75;
+    if (m_param->rc.qCompress >= 0.8) // when tuned for grain 
+    {
+        m_pred[1].coeff = 0.75;
+        m_pred[0].coeff = m_pred[3].coeff = 0.50;
+    }
+    if (!m_statFileOut && (m_param->rc.bStatWrite || m_param->rc.bStatRead))
+    {
+        /* If the user hasn't defined the stat filename, use the default value */
+        const char *fileName = m_param->rc.statFileName;
+        if (!fileName)
+            fileName = s_defaultStatFileName;
+        /* Load stat file and init 2pass algo */
+        if (m_param->rc.bStatRead)
+        {
+            m_expectedBitsSum = 0;
+            char *p, *statsIn, *statsBuf;
+            /* read 1st pass stats */
+            statsIn = statsBuf = x265_slurp_file(fileName);
+            if (!statsBuf)
+                return false;
+            if (m_param->rc.cuTree)
+            {
+                char *tmpFile = strcatFilename(fileName, ".cutree");
+                if (!tmpFile)
+                    return false;
+                m_cutreeStatFileIn = fopen(tmpFile, "rb");
+                X265_FREE(tmpFile);
+                if (!m_cutreeStatFileIn)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "can't open stats file %s\n", tmpFile);
+                    return false;
+                }
+            }
+
+            /* check whether 1st pass options were compatible with current options */
+            if (strncmp(statsBuf, "#options:", 9))
+            {
+                x265_log(m_param, X265_LOG_ERROR,"options list in stats file not valid\n");
+                return false;
+            }
+            {
+                int i, j;
+                uint32_t k , l;
+                bool bErr = false;
+                char *opts = statsBuf;
+                statsIn = strchr(statsBuf, '\n');
+                if (!statsIn)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "Malformed stats file\n");
+                    return false;
+                }
+                *statsIn = '\0';
+                statsIn++;
+                if (sscanf(opts, "#options: %dx%d", &i, &j) != 2)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "Resolution specified in stats file not valid\n");
+                    return false;
+                }
+                if ((p = strstr(opts, " fps=")) == 0 || sscanf(p, " fps=%u/%u", &k, &l) != 2)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "fps specified in stats file not valid\n");
+                    return false;
+                }
+                if (k != m_param->fpsNum || l != m_param->fpsDenom)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "fps mismatch with 1st pass (%u/%u vs %u/%u)\n",
+                              m_param->fpsNum, m_param->fpsDenom, k, l);
+                    return false;
+                }
+                CMP_OPT_FIRST_PASS("bitdepth", m_param->internalBitDepth);
+                CMP_OPT_FIRST_PASS("weightp", m_param->bEnableWeightedPred);
+                CMP_OPT_FIRST_PASS("bframes", m_param->bframes);
+                CMP_OPT_FIRST_PASS("b-pyramid", m_param->bBPyramid);
+                CMP_OPT_FIRST_PASS("open-gop", m_param->bOpenGOP);
+                CMP_OPT_FIRST_PASS("keyint", m_param->keyframeMax);
+                CMP_OPT_FIRST_PASS("scenecut", m_param->scenecutThreshold);
+
+                if ((p = strstr(opts, "b-adapt=")) != 0 && sscanf(p, "b-adapt=%d", &i) && i >= X265_B_ADAPT_NONE && i <= X265_B_ADAPT_TRELLIS)
+                {
+                    m_param->bFrameAdaptive = i;
+                }
+                else if (m_param->bframes)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "b-adapt method specified in stats file not valid\n");
+                    return false;
+                }
+
+                if ((p = strstr(opts, "rc-lookahead=")) != 0 && sscanf(p, "rc-lookahead=%d", &i))
+                    m_param->lookaheadDepth = i;
+            }
+            /* find number of pics */
+            p = statsIn;
+            int numEntries;
+            for (numEntries = -1; p; numEntries++)
+                p = strchr(p + 1, ';');
+            if (!numEntries)
+            {
+                x265_log(m_param, X265_LOG_ERROR, "empty stats file\n");
+                return false;
+            }
+            m_numEntries = numEntries;
+
+            if (m_param->totalFrames < m_numEntries && m_param->totalFrames > 0)
+            {
+                x265_log(m_param, X265_LOG_WARNING, "2nd pass has fewer frames than 1st pass (%d vs %d)\n",
+                         m_param->totalFrames, m_numEntries);
+            }
+            if (m_param->totalFrames > m_numEntries)
+            {
+                x265_log(m_param, X265_LOG_ERROR, "2nd pass has more frames than 1st pass (%d vs %d)\n",
+                         m_param->totalFrames, m_numEntries);
+                return false;
+            }
+
+            m_rce2Pass = X265_MALLOC(RateControlEntry, m_numEntries);
+            if (!m_rce2Pass)
+            {
+                 x265_log(m_param, X265_LOG_ERROR, "Rce Entries for 2 pass cannot be allocated\n");
+                 return false;
+            }
+            /* init all to skipped p frames */
+            for (int i = 0; i < m_numEntries; i++)
+            {
+                RateControlEntry *rce = &m_rce2Pass[i];
+                rce->sliceType = P_SLICE;
+                rce->qScale = rce->newQScale = x265_qp2qScale(20);
+                rce->miscBits = m_ncu + 10;
+                rce->newQp = 0;
+            }
+            /* read stats */
+            p = statsIn;
+            double totalQpAq = 0;
+            for (int i = 0; i < m_numEntries; i++)
+            {
+                RateControlEntry *rce;
+                int frameNumber;
+                char picType;
+                int e;
+                char *next;
+                double qpRc, qpAq;
+                next = strstr(p, ";");
+                if (next)
+                    *next++ = 0;
+                e = sscanf(p, " in:%d ", &frameNumber);
+                if (frameNumber < 0 || frameNumber >= m_numEntries)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "bad frame number (%d) at stats line %d\n", frameNumber, i);
+                    return false;
+                }
+                rce = &m_rce2Pass[frameNumber];
+                e += sscanf(p, " in:%*d out:%*d type:%c q:%lf q-aq:%lf tex:%d mv:%d misc:%d icu:%lf pcu:%lf scu:%lf",
+                       &picType, &qpRc, &qpAq, &rce->coeffBits,
+                       &rce->mvBits, &rce->miscBits, &rce->iCuCount, &rce->pCuCount,
+                       &rce->skipCuCount);
+                rce->keptAsRef = true;
+                if (picType == 'b' || picType == 'p')
+                    rce->keptAsRef = false;
+                if (picType == 'I' || picType == 'i')
+                    rce->sliceType = I_SLICE;
+                else if (picType == 'P' || picType == 'p')
+                    rce->sliceType = P_SLICE;
+                else if (picType == 'B' || picType == 'b')
+                    rce->sliceType = B_SLICE;
+                else
+                    e = -1;
+                if (e < 10)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e);
+                    return false;
+                }
+                rce->qScale = x265_qp2qScale(qpRc);
+                totalQpAq += qpAq;
+                p = next;
+            }
+            X265_FREE(statsBuf);
+
+            if (m_param->rc.rateControlMode == X265_RC_ABR)
+            {
+                if (!initPass2())
+                    return false;
+            } /* else we're using constant quant, so no need to run the bitrate allocation */
+        }
+        /* Open output file */
+        /* If input and output files are the same, output to a temp file
+         * and move it to the real name only when it's complete */
+        if (m_param->rc.bStatWrite)
+        {
+            char *p, *statFileTmpname;
+            statFileTmpname = strcatFilename(fileName, ".temp");
+            if (!statFileTmpname)
+                return false;
+            m_statFileOut = fopen(statFileTmpname, "wb");
+            X265_FREE(statFileTmpname);
+            if (!m_statFileOut)
+            {
+                x265_log(m_param, X265_LOG_ERROR, "can't open stats file %s\n", statFileTmpname);
+                return false;
+            }
+            p = x265_param2string(m_param);
+            if (p)
+                fprintf(m_statFileOut, "#options: %s\n", p);
+            X265_FREE(p);
+            if (m_param->rc.cuTree && !m_param->rc.bStatRead)
+            {
+                statFileTmpname = strcatFilename(fileName, ".cutree.temp");
+                if (!statFileTmpname)
+                    return false;
+                m_cutreeStatFileOut = fopen(statFileTmpname, "wb");
+                X265_FREE(statFileTmpname);
+                if (!m_cutreeStatFileOut)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "can't open mbtree stats file %s\n", statFileTmpname);
+                    return false;
+                }
+            }
+        }
+        if (m_param->rc.cuTree)
+        {
+            m_cuTreeStats.qpBuffer[0] = X265_MALLOC(uint16_t, m_ncu * sizeof(uint16_t));
+            if (m_param->bBPyramid && m_param->rc.bStatRead)
+                m_cuTreeStats.qpBuffer[1] = X265_MALLOC(uint16_t, m_ncu * sizeof(uint16_t));
+            m_cuTreeStats.qpBufPos = -1;
+        }
+    }
+    return true;
+}
+
+void RateControl::initHRD(SPS& sps)
+{
+    int vbvBufferSize = m_param->rc.vbvBufferSize * 1000;
+    int vbvMaxBitrate = m_param->rc.vbvMaxBitrate * 1000;
+
+    // Init HRD
+    HRDInfo* hrd = &sps.vuiParameters.hrdParameters;
+    hrd->cbrFlag = m_isCbr;
+
+    // normalize HRD size and rate to the value / scale notation
+    hrd->bitRateScale = x265_clip3(0, 15, calcScale(vbvMaxBitrate) - BR_SHIFT);
+    hrd->bitRateValue = (vbvMaxBitrate >> (hrd->bitRateScale + BR_SHIFT));
+
+    hrd->cpbSizeScale = x265_clip3(0, 15, calcScale(vbvBufferSize) - CPB_SHIFT);
+    hrd->cpbSizeValue = (vbvBufferSize >> (hrd->cpbSizeScale + CPB_SHIFT));
+    int bitRateUnscale = hrd->bitRateValue << (hrd->bitRateScale + BR_SHIFT);
+    int cpbSizeUnscale = hrd->cpbSizeValue << (hrd->cpbSizeScale + CPB_SHIFT);
+
+    // arbitrary
+    #define MAX_DURATION 0.5
+
+    TimingInfo *time = &sps.vuiParameters.timingInfo;
+    int maxCpbOutputDelay = (int)(X265_MIN(m_param->keyframeMax * MAX_DURATION * time->timeScale / time->numUnitsInTick, INT_MAX));
+    int maxDpbOutputDelay = (int)(sps.maxDecPicBuffering * MAX_DURATION * time->timeScale / time->numUnitsInTick);
+    int maxDelay = (int)(90000.0 * cpbSizeUnscale / bitRateUnscale + 0.5);
+
+    hrd->initialCpbRemovalDelayLength = 2 + x265_clip3(4, 22, 32 - calcLength(maxDelay));
+    hrd->cpbRemovalDelayLength = x265_clip3(4, 31, 32 - calcLength(maxCpbOutputDelay));
+    hrd->dpbOutputDelayLength = x265_clip3(4, 31, 32 - calcLength(maxDpbOutputDelay));
+
+    #undef MAX_DURATION
+}
+
+bool RateControl::initPass2()
+{
+    uint64_t allConstBits = 0;
+    uint64_t allAvailableBits = uint64_t(m_param->rc.bitrate * 1000. * m_numEntries * m_frameDuration);
+    double rateFactor, stepMult;
+    double qBlur = m_param->rc.qblur;
+    double cplxBlur = m_param->rc.complexityBlur;
+    const int filterSize = (int)(qBlur * 4) | 1;
+    double expectedBits;
+    double *qScale, *blurredQscale;
+    double baseCplx = m_ncu * (m_param->bframes ? 120 : 80);
+    double clippedDuration = CLIP_DURATION(m_frameDuration) / BASE_FRAME_DURATION;
+
+    /* find total/average complexity & const_bits */
+    for (int i = 0; i < m_numEntries; i++)
+        allConstBits += m_rce2Pass[i].miscBits;
+
+    if (allAvailableBits < allConstBits)
+    {
+        x265_log(m_param, X265_LOG_ERROR, "requested bitrate is too low. estimated minimum is %d kbps\n",
+                 (int)(allConstBits * m_fps / m_numEntries * 1000.));
+        return false;
+    }
+
+    /* Blur complexities, to reduce local fluctuation of QP.
+     * We don't blur the QPs directly, because then one very simple frame
+     * could drag down the QP of a nearby complex frame and give it more
+     * bits than intended. */
+    for (int i = 0; i < m_numEntries; i++)
+    {
+        double weightSum = 0;
+        double cplxSum = 0;
+        double weight = 1.0;
+        double gaussianWeight;
+        /* weighted average of cplx of future frames */
+        for (int j = 1; j < cplxBlur * 2 && j < m_numEntries - i; j++)
+        {
+            RateControlEntry *rcj = &m_rce2Pass[i + j];
+            weight *= 1 - pow(rcj->iCuCount / m_ncu, 2);
+            if (weight < 0.0001)
+                break;
+            gaussianWeight = weight * exp(-j * j / 200.0);
+            weightSum += gaussianWeight;
+            cplxSum += gaussianWeight * (qScale2bits(rcj, 1) - rcj->miscBits) / clippedDuration;
+        }
+        /* weighted average of cplx of past frames */
+        weight = 1.0;
+        for (int j = 0; j <= cplxBlur * 2 && j <= i; j++)
+        {
+            RateControlEntry *rcj = &m_rce2Pass[i - j];
+            gaussianWeight = weight * exp(-j * j / 200.0);
+            weightSum += gaussianWeight;
+            cplxSum += gaussianWeight * (qScale2bits(rcj, 1) - rcj->miscBits) / clippedDuration;
+            weight *= 1 - pow(rcj->iCuCount / m_ncu, 2);
+            if (weight < .0001)
+                break;
+        }
+        m_rce2Pass[i].blurredComplexity = cplxSum / weightSum;
+    }
+
+    CHECKED_MALLOC(qScale, double, m_numEntries);
+    if (filterSize > 1)
+    {
+        CHECKED_MALLOC(blurredQscale, double, m_numEntries);
+    }
+    else
+        blurredQscale = qScale;
+
+    /* Search for a factor which, when multiplied by the RCEQ values from
+     * each frame, adds up to the desired total size.
+     * There is no exact closed-form solution because of VBV constraints and
+     * because qscale2bits is not invertible, but we can start with the simple
+     * approximation of scaling the 1st pass by the ratio of bitrates.
+     * The search range is probably overkill, but speed doesn't matter here. */
+
+    expectedBits = 1;
+    for (int i = 0; i < m_numEntries; i++)
+    {
+        RateControlEntry* rce = &m_rce2Pass[i];
+        double q = getQScale(rce, 1.0);
+        expectedBits += qScale2bits(rce, q);
+        m_lastQScaleFor[rce->sliceType] = q;
+    }
+    stepMult = allAvailableBits / expectedBits;
+
+    rateFactor = 0;
+    for (double step = 1E4 * stepMult; step > 1E-7 * stepMult; step *= 0.5)
+    {
+        expectedBits = 0;
+        rateFactor += step;
+
+        m_lastNonBPictType = -1;
+        m_lastAccumPNorm = 1;
+        m_accumPNorm = 0;
+
+        m_lastQScaleFor[0] = m_lastQScaleFor[1] =
+        m_lastQScaleFor[2] = pow(baseCplx, 1 - m_qCompress) / rateFactor;
+
+        /* find qscale */
+        for (int i = 0; i < m_numEntries; i++)
+        {
+            RateControlEntry *rce = &m_rce2Pass[i];
+            qScale[i] = getQScale(rce, rateFactor);
+            m_lastQScaleFor[rce->sliceType] = qScale[i];
+        }
+
+        /* fixed I/B qscale relative to P */
+        for (int i = m_numEntries - 1; i >= 0; i--)
+        {
+            qScale[i] = getDiffLimitedQScale(&m_rce2Pass[i], qScale[i]);
+            X265_CHECK(qScale[i] >= 0, "qScale became negative\n");
+        }
+
+        /* smooth curve */
+        if (filterSize > 1)
+        {
+            X265_CHECK(filterSize % 2 == 1, "filterSize not an odd number\n");
+            for (int i = 0; i < m_numEntries; i++)
+            {
+                double q = 0.0, sum = 0.0;
+
+                for (int j = 0; j < filterSize; j++)
+                {
+                    int idx = i + j - filterSize / 2;
+                    double d = idx - i;
+                    double coeff = qBlur == 0 ? 1.0 : exp(-d * d / (qBlur * qBlur));
+                    if (idx < 0 || idx >= m_numEntries)
+                        continue;
+                    if (m_rce2Pass[i].sliceType != m_rce2Pass[idx].sliceType)
+                        continue;
+                    q += qScale[idx] * coeff;
+                    sum += coeff;
+                }
+                blurredQscale[i] = q / sum;
+            }
+        }
+
+        /* find expected bits */
+        for (int i = 0; i < m_numEntries; i++)
+        {
+            RateControlEntry *rce = &m_rce2Pass[i];
+            rce->newQScale = clipQscale(NULL, rce, blurredQscale[i]); // check if needed
+            X265_CHECK(rce->newQScale >= 0, "new Qscale is negative\n");
+            expectedBits += qScale2bits(rce, rce->newQScale);
+        }
+
+        if (expectedBits > allAvailableBits)
+            rateFactor -= step;
+    }
+
+    X265_FREE(qScale);
+    if (filterSize > 1)
+        X265_FREE(blurredQscale);
+
+    if (m_isVbv)
+        if (!vbv2Pass(allAvailableBits))
+            return false;
+    expectedBits = countExpectedBits();
+
+    if (fabs(expectedBits / allAvailableBits - 1.0) > 0.01)
+    {
+        double avgq = 0;
+        for (int i = 0; i < m_numEntries; i++)
+            avgq += m_rce2Pass[i].newQScale;
+        avgq = x265_qScale2qp(avgq / m_numEntries);
+
+        if (expectedBits > allAvailableBits || !m_isVbv)
+            x265_log(m_param, X265_LOG_WARNING, "Error: 2pass curve failed to converge\n");
+        x265_log(m_param, X265_LOG_WARNING, "target: %.2f kbit/s, expected: %.2f kbit/s, avg QP: %.4f\n",
+                 (double)m_param->rc.bitrate,
+                 expectedBits * m_fps / (m_numEntries * 1000.),
+                 avgq);
+        if (expectedBits < allAvailableBits && avgq < QP_MIN + 2)
+        {
+            x265_log(m_param, X265_LOG_WARNING, "try reducing target bitrate\n");
+        }
+        else if (expectedBits > allAvailableBits && avgq > QP_MAX_SPEC - 2)
+        {
+            x265_log(m_param, X265_LOG_WARNING, "try increasing target bitrate\n");
+        }
+        else if (!(m_2pass && m_isVbv))
+            x265_log(m_param, X265_LOG_WARNING, "internal error\n");
+    }
+
+    return true;
+
+fail:
+    x265_log(m_param, X265_LOG_WARNING, "two-pass ABR initialization failed\n");
+    return false;
+}
+
+bool RateControl::vbv2Pass(uint64_t allAvailableBits)
+{
+    /* for each interval of bufferFull .. underflow, uniformly increase the qp of all
+     * frames in the interval until either buffer is full at some intermediate frame or the
+     * last frame in the interval no longer underflows.  Recompute intervals and repeat.
+     * Then do the converse to put bits back into overflow areas until target size is met */
+
+    double *fills;
+    double expectedBits = 0;
+    double adjustment;
+    double prevBits = 0;
+    int t0, t1;
+    int iterations = 0 , adjMin, adjMax;
+    CHECKED_MALLOC(fills, double, m_numEntries + 1);
+    fills++;
+
+    /* adjust overall stream size */
+    do
+    {
+        iterations++;
+        prevBits = expectedBits;
+
+        if (expectedBits)
+        {   /* not first iteration */
+            adjustment = X265_MAX(X265_MIN(expectedBits / allAvailableBits, 0.999), 0.9);
+            fills[-1] = m_bufferSize * m_param->rc.vbvBufferInit;
+            t0 = 0;
+            /* fix overflows */
+            adjMin = 1;
+            while (adjMin && findUnderflow(fills, &t0, &t1, 1))
+            {
+                adjMin = fixUnderflow(t0, t1, adjustment, MIN_QPSCALE, MAX_MAX_QPSCALE);
+                t0 = t1;
+            }
+        }
+
+        fills[-1] = m_bufferSize * (1. - m_param->rc.vbvBufferInit);
+        t0 = 0;
+        /* fix underflows -- should be done after overflow, as we'd better undersize target than underflowing VBV */
+        adjMax = 1;
+        while (adjMax && findUnderflow(fills, &t0, &t1, 0))
+            adjMax = fixUnderflow(t0, t1, 1.001, MIN_QPSCALE, MAX_MAX_QPSCALE );
+
+        expectedBits = countExpectedBits();
+    }
+    while ((expectedBits < .995 * allAvailableBits) && ((int64_t)(expectedBits+.5) > (int64_t)(prevBits+.5)));
+
+    if (!adjMax)
+        x265_log(m_param, X265_LOG_WARNING, "vbv-maxrate issue, qpmax or vbv-maxrate too low\n");
+
+    /* store expected vbv filling values for tracking when encoding */
+    for (int i = 0; i < m_numEntries; i++)
+        m_rce2Pass[i].expectedVbv = m_bufferSize - fills[i];
+
+    X265_FREE(fills - 1);
+    return true;
+
+fail:
+    x265_log(m_param, X265_LOG_ERROR, "malloc failure in two-pass VBV init\n");
+    return false;
+}
+
+/* In 2pass, force the same frame types as in the 1st pass */
+int RateControl::rateControlSliceType(int frameNum)
+{
+    if (m_param->rc.bStatRead)
+    {
+        if (frameNum >= m_numEntries)
+        {
+            /* We could try to initialize everything required for ABR and
+             * adaptive B-frames, but that would be complicated.
+             * So just calculate the average QP used so far. */
+            m_param->rc.qp = (m_accumPQp < 1) ? ABR_INIT_QP_MAX : (int)(m_accumPQp + 0.5);
+            m_qpConstant[P_SLICE] = x265_clip3(QP_MIN, QP_MAX_MAX, m_param->rc.qp);
+            m_qpConstant[I_SLICE] = x265_clip3(QP_MIN, QP_MAX_MAX, (int)(m_param->rc.qp - m_ipOffset + 0.5));
+            m_qpConstant[B_SLICE] = x265_clip3(QP_MIN, QP_MAX_MAX, (int)(m_param->rc.qp + m_pbOffset + 0.5));
+
+            x265_log(m_param, X265_LOG_ERROR, "2nd pass has more frames than 1st pass (%d)\n", m_numEntries);
+            x265_log(m_param, X265_LOG_ERROR, "continuing anyway, at constant QP=%d\n", m_param->rc.qp);
+            if (m_param->bFrameAdaptive)
+                x265_log(m_param, X265_LOG_ERROR, "disabling adaptive B-frames\n");
+
+            m_isAbr = 0;
+            m_2pass = 0;
+            m_param->rc.rateControlMode = X265_RC_CQP;
+            m_param->rc.bStatRead = 0;
+            m_param->bFrameAdaptive = 0;
+            m_param->scenecutThreshold = 0;
+            m_param->rc.cuTree = 0;
+            if (m_param->bframes > 1)
+                m_param->bframes = 1;
+            return X265_TYPE_AUTO;
+        }
+        int frameType = m_rce2Pass[frameNum].sliceType == I_SLICE ? (frameNum > 0 && m_param->bOpenGOP ? X265_TYPE_I : X265_TYPE_IDR)
+                            : m_rce2Pass[frameNum].sliceType == P_SLICE ? X265_TYPE_P
+                            : (m_rce2Pass[frameNum].sliceType == B_SLICE && m_rce2Pass[frameNum].keptAsRef? X265_TYPE_BREF : X265_TYPE_B);
+        return frameType;
+    }
+    else
+        return X265_TYPE_AUTO;
+}
+
+int RateControl::rateControlStart(Frame* curFrame, RateControlEntry* rce, Encoder* enc)
+{
+    int orderValue = m_startEndOrder.get();
+    int startOrdinal = rce->encodeOrder * 2;
+
+    while (orderValue < startOrdinal && !m_bTerminated)
+        orderValue = m_startEndOrder.waitForChange(orderValue);
+
+    if (!curFrame)
+    {
+        // faked rateControlStart calls when the encoder is flushing
+        m_startEndOrder.incr();
+        return 0;
+    }
+
+    FrameData& curEncData = *curFrame->m_encData;
+    m_curSlice = curEncData.m_slice;
+    m_sliceType = m_curSlice->m_sliceType;
+    rce->sliceType = m_sliceType;
+    if (!m_2pass)
+        rce->keptAsRef = IS_REFERENCED(curFrame);
+    m_predType = getPredictorType(curFrame->m_lowres.sliceType, m_sliceType);
+    rce->poc = m_curSlice->m_poc;
+    if (m_param->rc.bStatRead)
+    {
+        X265_CHECK(rce->poc >= 0 && rce->poc < m_numEntries, "bad encode ordinal\n");
+        copyRceData(rce, &m_rce2Pass[rce->poc]);
+    }
+    rce->isActive = true;
+    bool isframeAfterKeyframe = m_sliceType != I_SLICE && m_curSlice->m_refFrameList[0][0]->m_encData->m_slice->m_sliceType == I_SLICE;
+    if (curFrame->m_lowres.bScenecut)
+    {
+        m_isSceneTransition = true;
+        /* Frame Predictors and Row predictors used in vbv */
+        for (int i = 0; i < 4; i++)
+        {
+            m_pred[i].coeff = 1.0;
+            m_pred[i].count = 1.0;
+            m_pred[i].decay = 0.5;
+            m_pred[i].offset = 0.0;
+        }
+        m_pred[0].coeff = m_pred[3].coeff = 0.75;
+    }
+    else if (m_sliceType != B_SLICE && !isframeAfterKeyframe)
+        m_isSceneTransition = false;
+
+    rce->bLastMiniGopBFrame = curFrame->m_lowres.bLastMiniGopBFrame;
+    rce->bufferRate = m_bufferRate;
+    rce->rowCplxrSum = 0.0;
+    rce->rowTotalBits = 0;
+    if (m_isVbv)
+    {
+        if (rce->rowPreds[0][0].count == 0)
+        {
+            for (int i = 0; i < 3; i++)
+            {
+                for (int j = 0; j < 2; j++)
+                {
+                    rce->rowPreds[i][j].coeff = 0.25;
+                    rce->rowPreds[i][j].count = 1.0;
+                    rce->rowPreds[i][j].decay = 0.5;
+                    rce->rowPreds[i][j].offset = 0.0;
+                }
+            }
+        }
+        rce->rowPred[0] = &rce->rowPreds[m_sliceType][0];
+        rce->rowPred[1] = &rce->rowPreds[m_sliceType][1];
+        m_predictedBits = m_totalBits;
+        updateVbvPlan(enc);
+        rce->bufferFill = m_bufferFill;
+
+        int mincr = enc->m_vps.ptl.minCrForLevel;
+        /* Profiles above Main10 don't require maxAU size check, so just set the maximum to a large value. */
+        if (enc->m_vps.ptl.profileIdc > Profile::MAIN10 || enc->m_vps.ptl.levelIdc == Level::NONE)
+            rce->frameSizeMaximum = 1e9;
+        else
+        {
+            /* The spec has a special case for the first frame. */
+            if (rce->encodeOrder == 0)
+            {
+                /* 1.5 * (Max( PicSizeInSamplesY, fR * MaxLumaSr) + MaxLumaSr * (AuCpbRemovalTime[ 0 ] -AuNominalRemovalTime[ 0 ])) ? MinCr */
+                double fr = 1. / 300;
+                int picSizeInSamplesY = m_param->sourceWidth * m_param->sourceHeight;
+                rce->frameSizeMaximum = 8 * 1.5 * X265_MAX(picSizeInSamplesY, fr * enc->m_vps.ptl.maxLumaSrForLevel) / mincr;
+            }
+            else
+            {
+                /* 1.5 * MaxLumaSr * (AuCpbRemovalTime[ n ] - AuCpbRemovalTime[ n - 1 ]) / MinCr */
+                rce->frameSizeMaximum = 8 * 1.5 * enc->m_vps.ptl.maxLumaSrForLevel * m_frameDuration / mincr;
+            }
+        }
+    }
+    if (m_isAbr || m_2pass) // ABR,CRF
+    {
+        if (m_isAbr || m_isVbv)
+        {
+            m_currentSatd = curFrame->m_lowres.satdCost >> (X265_DEPTH - 8);
+            /* Update rce for use in rate control VBV later */
+            rce->lastSatd = m_currentSatd;
+            X265_CHECK(rce->lastSatd, "satdcost cannot be zero\n");
+            /* Detect a pattern for B frames with same SATDcost to identify a series of static frames
+             * and the P frame at the end of the series marks a possible case for ABR reset logic */
+            if (m_param->bframes)
+            {
+                if (m_sliceType != B_SLICE && m_numBframesInPattern > m_param->bframes)
+                {
+                    m_isPatternPresent = true;
+                }
+                else if (m_sliceType == B_SLICE && !IS_REFERENCED(curFrame))
+                {
+                    if (m_currentSatd != m_lastBsliceSatdCost && !rce->bLastMiniGopBFrame)
+                    {
+                        m_isPatternPresent = false;
+                        m_lastBsliceSatdCost = m_currentSatd;
+                        m_numBframesInPattern = 0;
+                    }
+                    else if (m_currentSatd == m_lastBsliceSatdCost)
+                        m_numBframesInPattern++;
+                }
+            }
+        }
+        /* For a scenecut that occurs within the mini-gop, enable scene transition
+         * switch until the next mini-gop to ensure a min qp for all the frames within 
+         * the scene-transition mini-gop */
+
+        double q = x265_qScale2qp(rateEstimateQscale(curFrame, rce));
+        q = x265_clip3((double)QP_MIN, (double)QP_MAX_MAX, q);
+        m_qp = int(q + 0.5);
+        rce->qpaRc = curEncData.m_avgQpRc = curEncData.m_avgQpAq = q;
+        /* copy value of lastRceq into thread local rce struct *to be used in RateControlEnd() */
+        rce->qRceq = m_lastRceq;
+        accumPQpUpdate();
+    }
+    else // CQP
+    {
+        if (m_sliceType == B_SLICE && IS_REFERENCED(curFrame))
+            m_qp = (m_qpConstant[B_SLICE] + m_qpConstant[P_SLICE]) / 2;
+        else
+            m_qp = m_qpConstant[m_sliceType];
+        curEncData.m_avgQpAq = curEncData.m_avgQpRc = m_qp;
+        
+        x265_zone* zone = getZone();
+        if (zone)
+        {
+            if (zone->bForceQp)
+                m_qp += zone->qp - m_qpConstant[P_SLICE];
+            else
+                m_qp -= (int)(6.0 * X265_LOG2(zone->bitrateFactor));
+        }
+    }
+    if (m_sliceType != B_SLICE)
+    {
+        m_lastNonBPictType = m_sliceType;
+        m_leadingNoBSatd = m_currentSatd;
+    }
+    rce->leadingNoBSatd = m_leadingNoBSatd;
+    if (curFrame->m_forceqp)
+    {
+        m_qp = (int32_t)(curFrame->m_forceqp + 0.5) - 1;
+        m_qp = x265_clip3(QP_MIN, QP_MAX_MAX, m_qp);
+        rce->qpaRc = curEncData.m_avgQpRc = curEncData.m_avgQpAq = m_qp;
+        if (m_isAbr || m_2pass)
+        {
+            rce->qpNoVbv = rce->qpaRc;
+            m_lastQScaleFor[m_sliceType] = x265_qp2qScale(rce->qpaRc);
+            if (rce->poc == 0)
+                 m_lastQScaleFor[P_SLICE] = m_lastQScaleFor[m_sliceType] * fabs(m_param->rc.ipFactor);
+            rce->frameSizePlanned = predictSize(&m_pred[m_predType], m_qp, (double)m_currentSatd);
+        }
+    }
+    m_framesDone++;
+
+    return m_qp;
+}
+
+void RateControl::accumPQpUpdate()
+{
+    m_accumPQp   *= .95;
+    m_accumPNorm *= .95;
+    m_accumPNorm += 1;
+    if (m_sliceType == I_SLICE)
+        m_accumPQp += m_qp + m_ipOffset;
+    else
+        m_accumPQp += m_qp;
+}
+
+int RateControl::getPredictorType(int lowresSliceType, int sliceType)
+{
+    /* Use a different predictor for B Ref and B frames for vbv frame size predictions */
+    if (lowresSliceType == X265_TYPE_BREF)
+        return 3;
+    return sliceType;
+}
+
+double RateControl::getDiffLimitedQScale(RateControlEntry *rce, double q)
+{
+    // force I/B quants as a function of P quants
+    const double lastPqScale    = m_lastQScaleFor[P_SLICE];
+    const double lastNonBqScale = m_lastQScaleFor[m_lastNonBPictType];
+    if (rce->sliceType == I_SLICE)
+    {
+        double iq = q;
+        double pq = x265_qp2qScale(m_accumPQp / m_accumPNorm);
+        double ipFactor = fabs(m_param->rc.ipFactor);
+        /* don't apply ipFactor if the following frame is also I */
+        if (m_accumPNorm <= 0)
+            q = iq;
+        else if (m_param->rc.ipFactor < 0)
+            q = iq / ipFactor;
+        else if (m_accumPNorm >= 1)
+            q = pq / ipFactor;
+        else
+            q = m_accumPNorm * pq / ipFactor + (1 - m_accumPNorm) * iq;
+    }
+    else if (rce->sliceType == B_SLICE)
+    {
+        if (m_param->rc.pbFactor > 0)
+            q = lastNonBqScale;
+        if (!rce->keptAsRef)
+            q *= fabs(m_param->rc.pbFactor);
+    }
+    else if (rce->sliceType == P_SLICE
+             && m_lastNonBPictType == P_SLICE
+             && rce->coeffBits == 0)
+    {
+        q = lastPqScale;
+    }
+
+    /* last qscale / qdiff stuff */
+    if (m_lastNonBPictType == rce->sliceType &&
+        (rce->sliceType != I_SLICE || m_lastAccumPNorm < 1))
+    {
+        double maxQscale = m_lastQScaleFor[rce->sliceType] * m_lstep;
+        double minQscale = m_lastQScaleFor[rce->sliceType] / m_lstep;
+        q = x265_clip3(minQscale, maxQscale, q);
+    }
+
+    m_lastQScaleFor[rce->sliceType] = q;
+    if (rce->sliceType != B_SLICE)
+        m_lastNonBPictType = rce->sliceType;
+    if (rce->sliceType == I_SLICE)
+    {
+        m_lastAccumPNorm = m_accumPNorm;
+        m_accumPNorm = 0;
+        m_accumPQp = 0;
+    }
+    if (rce->sliceType == P_SLICE)
+    {
+        double mask = 1 - pow(rce->iCuCount / m_ncu, 2);
+        m_accumPQp   = mask * (x265_qScale2qp(q) + m_accumPQp);
+        m_accumPNorm = mask * (1 + m_accumPNorm);
+    }
+
+    x265_zone* zone = getZone();
+    if (zone)
+    {
+        if (zone->bForceQp)
+            q = x265_qp2qScale(zone->qp);
+        else
+            q /= zone->bitrateFactor;
+    }
+    return q;
+}
+
+double RateControl::countExpectedBits()
+{
+    double expectedBits = 0;
+    for( int i = 0; i < m_numEntries; i++ )
+    {
+        RateControlEntry *rce = &m_rce2Pass[i];
+        rce->expectedBits = (uint64_t)expectedBits;
+        expectedBits += qScale2bits(rce, rce->newQScale);
+    }
+    return expectedBits;
+}
+
+bool RateControl::findUnderflow(double *fills, int *t0, int *t1, int over)
+{
+    /* find an interval ending on an overflow or underflow (depending on whether
+     * we're adding or removing bits), and starting on the earliest frame that
+     * can influence the buffer fill of that end frame. */
+    const double bufferMin = .1 * m_bufferSize;
+    const double bufferMax = .9 * m_bufferSize;
+    double fill = fills[*t0 - 1];
+    double parity = over ? 1. : -1.;
+    int start = -1, end = -1;
+    for (int i = *t0; i < m_numEntries; i++)
+    {
+        fill += (m_frameDuration * m_vbvMaxRate -
+                 qScale2bits(&m_rce2Pass[i], m_rce2Pass[i].newQScale)) * parity;
+        fill = x265_clip3(0.0, m_bufferSize, fill);
+        fills[i] = fill;
+        if (fill <= bufferMin || i == 0)
+        {
+            if (end >= 0)
+                break;
+            start = i;
+        }
+        else if (fill >= bufferMax && start >= 0)
+            end = i;
+    }
+    *t0 = start;
+    *t1 = end;
+    return start >= 0 && end >= 0;
+}
+
+bool RateControl::fixUnderflow(int t0, int t1, double adjustment, double qscaleMin, double qscaleMax)
+{
+    double qscaleOrig, qscaleNew;
+    bool adjusted = false;
+    if (t0 > 0)
+        t0++;
+    for (int i = t0; i <= t1; i++)
+    {
+        qscaleOrig = m_rce2Pass[i].newQScale;
+        qscaleOrig = x265_clip3(qscaleMin, qscaleMax, qscaleOrig);
+        qscaleNew  = qscaleOrig * adjustment;
+        qscaleNew  = x265_clip3(qscaleMin, qscaleMax, qscaleNew);
+        m_rce2Pass[i].newQScale = qscaleNew;
+        adjusted = adjusted || (qscaleNew != qscaleOrig);
+    }
+    return adjusted;
+}
+
+bool RateControl::cuTreeReadFor2Pass(Frame* frame)
+{
+    uint8_t sliceTypeActual = (uint8_t)m_rce2Pass[frame->m_poc].sliceType;
+
+    if (m_rce2Pass[frame->m_poc].keptAsRef)
+    {
+        /* TODO: We don't need pre-lookahead to measure AQ offsets, but there is currently
+         * no way to signal this */
+        uint8_t type;
+        if (m_cuTreeStats.qpBufPos < 0)
+        {
+            do
+            {
+                m_cuTreeStats.qpBufPos++;
+
+                if (!fread(&type, 1, 1, m_cutreeStatFileIn))
+                    goto fail;
+                if (fread(m_cuTreeStats.qpBuffer[m_cuTreeStats.qpBufPos], sizeof(uint16_t), m_ncu, m_cutreeStatFileIn) != (size_t)m_ncu)
+                    goto fail;
+
+                if (type != sliceTypeActual && m_cuTreeStats.qpBufPos == 1)
+                {
+                    x265_log(m_param, X265_LOG_ERROR, "CU-tree frametype %d doesn't match actual frametype %d.\n", type, sliceTypeActual);
+                    return false;
+                }
+            }
+            while(type != sliceTypeActual);
+        }
+        for (int i = 0; i < m_ncu; i++)
+        {
+            int16_t qpFix8 = m_cuTreeStats.qpBuffer[m_cuTreeStats.qpBufPos][i];
+            frame->m_lowres.qpCuTreeOffset[i] = (double)(qpFix8) / 256.0;
+            frame->m_lowres.invQscaleFactor[i] = x265_exp2fix8(frame->m_lowres.qpCuTreeOffset[i]);
+        }
+        m_cuTreeStats.qpBufPos--;
+    }
+    return true;
+
+fail:
+    x265_log(m_param, X265_LOG_ERROR, "Incomplete CU-tree stats file.\n");
+    return false;
+}
+
+double RateControl::tuneAbrQScaleFromFeedback(double qScale)
+{
+    double abrBuffer = 2 * m_rateTolerance * m_bitrate;
+    if (m_currentSatd)
+    {
+        /* use framesDone instead of POC as poc count is not serial with bframes enabled */
+        double overflow = 1.0;
+        double timeDone = (double)(m_framesDone - m_param->frameNumThreads + 1) * m_frameDuration;
+        double wantedBits = timeDone * m_bitrate;
+        int64_t encodedBits = m_totalBits;
+        if (m_param->totalFrames && m_param->totalFrames <= 2 * m_fps)
+        {
+            abrBuffer = m_param->totalFrames * (m_bitrate / m_fps);
+            encodedBits = m_encodedBits;
+        }
+
+        if (wantedBits > 0 && encodedBits > 0 && (!m_partialResidualFrames || 
+            m_param->rc.bStrictCbr))
+        {
+            abrBuffer *= X265_MAX(1, sqrt(timeDone));
+            overflow = x265_clip3(.5, 2.0, 1.0 + (encodedBits - wantedBits) / abrBuffer);
+            qScale *= overflow;
+        }
+    }
+    return qScale;
+}
+
+double RateControl::rateEstimateQscale(Frame* curFrame, RateControlEntry *rce)
+{
+    double q;
+
+    if (m_2pass)
+    {
+        if (m_sliceType != rce->sliceType)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "slice=%c but 2pass stats say %c\n",
+                     g_sliceTypeToChar[m_sliceType], g_sliceTypeToChar[rce->sliceType]);
+        }
+    }
+    else
+    {
+        if (m_isAbr)
+        {
+            double slidingWindowCplxSum = 0;
+            int start = m_sliderPos > s_slidingWindowFrames ?  m_sliderPos : 0;
+            for (int cnt = 0; cnt < s_slidingWindowFrames; cnt++, start++)
+            {
+                int pos = start % s_slidingWindowFrames;
+                slidingWindowCplxSum *= 0.5;
+                if (!m_satdCostWindow[pos])
+                    break;
+                slidingWindowCplxSum += m_satdCostWindow[pos];
+            }
+            rce->movingAvgSum = slidingWindowCplxSum;
+            m_satdCostWindow[m_sliderPos % s_slidingWindowFrames] = rce->lastSatd;
+            m_sliderPos++;
+        }
+    }
+
+    if (m_sliceType == B_SLICE)
+    {
+        /* B-frames don't have independent rate control, but rather get the
+         * average QP of the two adjacent P-frames + an offset */
+        Slice* prevRefSlice = m_curSlice->m_refFrameList[0][0]->m_encData->m_slice;
+        Slice* nextRefSlice = m_curSlice->m_refFrameList[1][0]->m_encData->m_slice;
+        double q0 = m_curSlice->m_refFrameList[0][0]->m_encData->m_avgQpRc;
+        double q1 = m_curSlice->m_refFrameList[1][0]->m_encData->m_avgQpRc;
+        bool i0 = prevRefSlice->m_sliceType == I_SLICE;
+        bool i1 = nextRefSlice->m_sliceType == I_SLICE;
+        int dt0 = abs(m_curSlice->m_poc - prevRefSlice->m_poc);
+        int dt1 = abs(m_curSlice->m_poc - nextRefSlice->m_poc);
+
+        // Skip taking a reference frame before the Scenecut if ABR has been reset.
+        if (m_lastAbrResetPoc >= 0)
+        {
+            if (prevRefSlice->m_sliceType == P_SLICE && prevRefSlice->m_poc < m_lastAbrResetPoc)
+            {
+                i0 = i1;
+                dt0 = dt1;
+                q0 = q1;
+            }
+        }
+        if (prevRefSlice->m_sliceType == B_SLICE && IS_REFERENCED(m_curSlice->m_refFrameList[0][0]))
+            q0 -= m_pbOffset / 2;
+        if (nextRefSlice->m_sliceType == B_SLICE && IS_REFERENCED(m_curSlice->m_refFrameList[1][0]))
+            q1 -= m_pbOffset / 2;
+        if (i0 && i1)
+            q = (q0 + q1) / 2 + m_ipOffset;
+        else if (i0)
+            q = q1;
+        else if (i1)
+            q = q0;
+        else
+            q = (q0 * dt1 + q1 * dt0) / (dt0 + dt1);
+
+        if (IS_REFERENCED(curFrame))
+            q += m_pbOffset / 2;
+        else
+            q += m_pbOffset;
+
+        /* Set a min qp at scenechanges and transitions */
+        if (m_isSceneTransition)
+        {
+            q = X265_MAX(ABR_SCENECUT_INIT_QP_MIN, q);
+            double minScenecutQscale =x265_qp2qScale(ABR_SCENECUT_INIT_QP_MIN); 
+            m_lastQScaleFor[P_SLICE] = X265_MAX(minScenecutQscale, m_lastQScaleFor[P_SLICE]);
+        }
+        double qScale = x265_qp2qScale(q);
+        rce->qpNoVbv = q;
+        double lmin = 0, lmax = 0;
+        if (m_isVbv)
+        {
+            lmin = m_lastQScaleFor[P_SLICE] / m_lstep;
+            lmax = m_lastQScaleFor[P_SLICE] * m_lstep;
+            if (m_isCbr)
+            {
+                qScale = tuneAbrQScaleFromFeedback(qScale);
+                if (!m_isAbrReset)
+                    qScale = x265_clip3(lmin, lmax, qScale);
+                q = x265_qScale2qp(qScale);
+            }
+            if (!m_2pass)
+            {
+                qScale = clipQscale(curFrame, rce, qScale);
+                /* clip qp to permissible range after vbv-lookahead estimation to avoid possible 
+                 * mispredictions by initial frame size predictors */
+                if (m_pred[m_predType].count == 1)
+                    qScale = x265_clip3(lmin, lmax, qScale);
+                m_lastQScaleFor[m_sliceType] = qScale;
+                rce->frameSizePlanned = predictSize(&m_pred[m_predType], qScale, (double)m_currentSatd);
+            }
+            else
+                rce->frameSizePlanned = qScale2bits(rce, qScale);
+
+            /* Limit planned size by MinCR */
+            rce->frameSizePlanned = X265_MIN(rce->frameSizePlanned, rce->frameSizeMaximum);
+            rce->frameSizeEstimated = rce->frameSizePlanned;
+        }
+        rce->newQScale = qScale;
+        return qScale;
+    }
+    else
+    {
+        double abrBuffer = 2 * m_rateTolerance * m_bitrate;
+        if (m_2pass)
+        {
+            int64_t diff;
+            if (!m_isVbv)
+            {
+                m_predictedBits = m_totalBits;
+                if (rce->encodeOrder < m_param->frameNumThreads)
+                    m_predictedBits += (int64_t)(rce->encodeOrder * m_bitrate / m_fps);
+                else
+                    m_predictedBits += (int64_t)(m_param->frameNumThreads * m_bitrate / m_fps);
+            }
+            /* Adjust ABR buffer based on distance to the end of the video. */
+            if (m_numEntries > rce->encodeOrder)
+            {
+                uint64_t finalBits = m_rce2Pass[m_numEntries - 1].expectedBits;
+                double videoPos = (double)rce->expectedBits / finalBits;
+                double scaleFactor = sqrt((1 - videoPos) * m_numEntries);
+                abrBuffer *= 0.5 * X265_MAX(scaleFactor, 0.5);
+            }
+            diff = m_predictedBits - (int64_t)rce->expectedBits;
+            q = rce->newQScale;
+            q /= x265_clip3(0.5, 2.0, (double)(abrBuffer - diff) / abrBuffer);
+            if (m_expectedBitsSum > 0)
+            {
+                /* Adjust quant based on the difference between
+                 * achieved and expected bitrate so far */
+                double curTime = (double)rce->encodeOrder / m_numEntries;
+                double w = x265_clip3(0.0, 1.0, curTime * 100);
+                q *= pow((double)m_totalBits / m_expectedBitsSum, w);
+            }
+            rce->qpNoVbv = x265_qScale2qp(q);
+            if (m_isVbv)
+            {
+                /* Do not overflow vbv */
+                double expectedSize = qScale2bits(rce, q);
+                double expectedVbv = m_bufferFill + m_bufferRate - expectedSize;
+                double expectedFullness = rce->expectedVbv / m_bufferSize;
+                double qmax = q * (2 - expectedFullness);
+                double sizeConstraint = 1 + expectedFullness;
+                qmax = X265_MAX(qmax, rce->newQScale);
+                if (expectedFullness < .05)
+                    qmax = MAX_MAX_QPSCALE;
+                qmax = X265_MIN(qmax, MAX_MAX_QPSCALE);
+                while (((expectedVbv < rce->expectedVbv/sizeConstraint) && (q < qmax)) ||
+                        ((expectedVbv < 0) && (q < MAX_MAX_QPSCALE)))
+                {
+                    q *= 1.05;
+                    expectedSize = qScale2bits(rce, q);
+                    expectedVbv = m_bufferFill + m_bufferRate - expectedSize;
+                }
+            }
+            q = x265_clip3(MIN_QPSCALE, MAX_MAX_QPSCALE, q);
+        }
+        else
+        {
+            /* 1pass ABR */
+
+            /* Calculate the quantizer which would have produced the desired
+             * average bitrate if it had been applied to all frames so far.
+             * Then modulate that quant based on the current frame's complexity
+             * relative to the average complexity so far (using the 2pass RCEQ).
+             * Then bias the quant up or down if total size so far was far from
+             * the target.
+             * Result: Depending on the value of rate_tolerance, there is a
+             * trade-off between quality and bitrate precision. But at large
+             * tolerances, the bit distribution approaches that of 2pass. */
+
+            double overflow = 1;
+            double lqmin = MIN_QPSCALE, lqmax = MAX_MAX_QPSCALE;
+            m_shortTermCplxSum *= 0.5;
+            m_shortTermCplxCount *= 0.5;
+            m_shortTermCplxSum += m_currentSatd / (CLIP_DURATION(m_frameDuration) / BASE_FRAME_DURATION);
+            m_shortTermCplxCount++;
+            /* coeffBits to be used in 2-pass */
+            rce->coeffBits = (int)m_currentSatd;
+            rce->blurredComplexity = m_shortTermCplxSum / m_shortTermCplxCount;
+            rce->mvBits = 0;
+            rce->sliceType = m_sliceType;
+
+            if (m_param->rc.rateControlMode == X265_RC_CRF)
+            {
+                q = getQScale(rce, m_rateFactorConstant);
+            }
+            else
+            {
+                if (!m_param->rc.bStatRead)
+                    checkAndResetABR(rce, false);
+                double initialQScale = getQScale(rce, m_wantedBitsWindow / m_cplxrSum);
+                q = tuneAbrQScaleFromFeedback(initialQScale);
+                overflow = q / initialQScale;
+            }
+            if (m_sliceType == I_SLICE && m_param->keyframeMax > 1
+                && m_lastNonBPictType != I_SLICE && !m_isAbrReset)
+            {
+                if (!m_param->rc.bStrictCbr)
+                    q = x265_qp2qScale(m_accumPQp / m_accumPNorm);
+                q /= fabs(m_param->rc.ipFactor);
+            }
+            else if (m_framesDone > 0)
+            {
+                if (m_param->rc.rateControlMode != X265_RC_CRF)
+                {
+                    lqmin = m_lastQScaleFor[m_sliceType] / m_lstep;
+                    lqmax = m_lastQScaleFor[m_sliceType] * m_lstep;
+                    if (!m_partialResidualFrames)
+                    {
+                        if (overflow > 1.1 && m_framesDone > 3)
+                            lqmax *= m_lstep;
+                        else if (overflow < 0.9)
+                            lqmin /= m_lstep;
+                    }
+                    q = x265_clip3(lqmin, lqmax, q);
+                }
+            }
+            else if (m_qCompress != 1 && m_param->rc.rateControlMode == X265_RC_CRF)
+            {
+                q = x265_qp2qScale(CRF_INIT_QP) / fabs(m_param->rc.ipFactor);
+            }
+            else if (m_framesDone == 0 && !m_isVbv && m_param->rc.rateControlMode == X265_RC_ABR)
+            {
+                /* for ABR alone, clip the first I frame qp */
+                lqmax = x265_qp2qScale(ABR_INIT_QP_MAX) * m_lstep;
+                q = X265_MIN(lqmax, q);
+            }
+            q = x265_clip3(MIN_QPSCALE, MAX_MAX_QPSCALE, q);
+            /* Set a min qp at scenechanges and transitions */
+            if (m_isSceneTransition)
+            {
+               double minScenecutQscale =x265_qp2qScale(ABR_SCENECUT_INIT_QP_MIN); 
+               q = X265_MAX(minScenecutQscale, q);
+               m_lastQScaleFor[P_SLICE] = X265_MAX(minScenecutQscale, m_lastQScaleFor[P_SLICE]);
+            }
+            rce->qpNoVbv = x265_qScale2qp(q);
+            q = clipQscale(curFrame, rce, q);
+            /*  clip qp to permissible range after vbv-lookahead estimation to avoid possible
+             * mispredictions by initial frame size predictors */
+            if (!m_2pass && m_isVbv && m_pred[m_predType].count == 1)
+                q = x265_clip3(lqmin, lqmax, q);
+        }
+        m_lastQScaleFor[m_sliceType] = q;
+        if ((m_curSlice->m_poc == 0 || m_lastQScaleFor[P_SLICE] < q) && !(m_2pass && !m_isVbv))
+            m_lastQScaleFor[P_SLICE] = q * fabs(m_param->rc.ipFactor);
+
+        if (m_2pass && m_isVbv)
+            rce->frameSizePlanned = qScale2bits(rce, q);
+        else
+            rce->frameSizePlanned = predictSize(&m_pred[m_predType], q, (double)m_currentSatd);
+
+        /* Always use up the whole VBV in this case. */
+        if (m_singleFrameVbv)
+            rce->frameSizePlanned = m_bufferRate;
+        /* Limit planned size by MinCR */
+        if (m_isVbv)
+            rce->frameSizePlanned = X265_MIN(rce->frameSizePlanned, rce->frameSizeMaximum);
+        rce->frameSizeEstimated = rce->frameSizePlanned;
+        rce->newQScale = q;
+        return q;
+    }
+}
+
+void RateControl::rateControlUpdateStats(RateControlEntry* rce)
+{
+    if (!m_param->rc.bStatWrite && !m_param->rc.bStatRead)
+    {
+        if (rce->sliceType == I_SLICE)
+        {
+            /* previous I still had a residual; roll it into the new loan */
+            if (m_partialResidualFrames)
+                rce->rowTotalBits += m_partialResidualCost * m_partialResidualFrames;
+            if ((m_param->totalFrames != 0) && (m_amortizeFrames > (m_param->totalFrames - m_framesDone)))
+            {
+                m_amortizeFrames = 0;
+                m_amortizeFraction = 0;
+            }
+            else
+            {
+                double depreciateRate = 1.1;
+                m_amortizeFrames = (int)(m_amortizeFrames / depreciateRate);
+                m_amortizeFraction /= depreciateRate;
+                m_amortizeFrames = X265_MAX(m_amortizeFrames, MIN_AMORTIZE_FRAME);
+                m_amortizeFraction = X265_MAX(m_amortizeFraction, MIN_AMORTIZE_FRACTION);
+            }
+            rce->amortizeFrames = m_amortizeFrames;
+            rce->amortizeFraction = m_amortizeFraction;
+            m_partialResidualFrames = X265_MIN((int)rce->amortizeFrames, m_param->keyframeMax);
+            m_partialResidualCost = (int)((rce->rowTotalBits * rce->amortizeFraction) / m_partialResidualFrames);
+            rce->rowTotalBits -= m_partialResidualCost * m_partialResidualFrames;
+        }
+        else if (m_partialResidualFrames)
+        {
+             rce->rowTotalBits += m_partialResidualCost;
+             m_partialResidualFrames--;
+        }
+    }
+    if (rce->sliceType != B_SLICE)
+        rce->rowCplxrSum = rce->rowTotalBits * x265_qp2qScale(rce->qpaRc) / rce->qRceq;
+    else
+        rce->rowCplxrSum = rce->rowTotalBits * x265_qp2qScale(rce->qpaRc) / (rce->qRceq * fabs(m_param->rc.pbFactor));
+
+    m_cplxrSum += rce->rowCplxrSum;
+    m_totalBits += rce->rowTotalBits;
+
+    /* do not allow the next frame to enter rateControlStart() until this
+     * frame has updated its mid-frame statistics */
+    if (m_param->rc.rateControlMode == X265_RC_ABR || m_isVbv)
+    {
+        m_startEndOrder.incr();
+
+        if (rce->encodeOrder < m_param->frameNumThreads - 1)
+            m_startEndOrder.incr(); // faked rateControlEnd calls for negative frames
+    }
+}
+
+void RateControl::checkAndResetABR(RateControlEntry* rce, bool isFrameDone)
+{
+    double abrBuffer = 2 * m_rateTolerance * m_bitrate;
+
+    // Check if current Slice is a scene cut that follows low detailed/blank frames
+    if (rce->lastSatd > 4 * rce->movingAvgSum)
+    {
+        if (!m_isAbrReset && rce->movingAvgSum > 0
+            && (m_isPatternPresent || !m_param->bframes))
+        {
+            int pos = X265_MAX(m_sliderPos - m_param->frameNumThreads, 0);
+            int64_t shrtTermWantedBits = (int64_t) (X265_MIN(pos, s_slidingWindowFrames) * m_bitrate * m_frameDuration);
+            int64_t shrtTermTotalBitsSum = 0;
+            // Reset ABR if prev frames are blank to prevent further sudden overflows/ high bit rate spikes.
+            for (int i = 0; i < s_slidingWindowFrames ; i++)
+                shrtTermTotalBitsSum += m_encodedBitsWindow[i];
+            double underflow = (shrtTermTotalBitsSum - shrtTermWantedBits) / abrBuffer;
+            const double epsilon = 0.0001f;
+            if (underflow < epsilon && !isFrameDone)
+            {
+                init(*m_curSlice->m_sps);
+                m_shortTermCplxSum = rce->lastSatd / (CLIP_DURATION(m_frameDuration) / BASE_FRAME_DURATION);
+                m_shortTermCplxCount = 1;
+                m_isAbrReset = true;
+                m_lastAbrResetPoc = rce->poc;
+            }
+        }
+        else if (m_isAbrReset && isFrameDone)
+        {
+            // Clear flag to reset ABR and continue as usual.
+            m_isAbrReset = false;
+        }
+    }
+}
+
+void RateControl::hrdFullness(SEIBufferingPeriod *seiBP)
+{
+    const VUI* vui = &m_curSlice->m_sps->vuiParameters;
+    const HRDInfo* hrd = &vui->hrdParameters;
+    int num = 90000;
+    int denom = hrd->bitRateValue << (hrd->bitRateScale + BR_SHIFT);
+    reduceFraction(&num, &denom);
+    int64_t cpbState = (int64_t)m_bufferFillFinal;
+    int64_t cpbSize = (int64_t)hrd->cpbSizeValue << (hrd->cpbSizeScale + CPB_SHIFT);
+
+    if (cpbState < 0 || cpbState > cpbSize)
+    {
+        x265_log(m_param, X265_LOG_WARNING, "CPB %s: %.0lf bits in a %.0lf-bit buffer\n",
+                 cpbState < 0 ? "underflow" : "overflow", (float)cpbState/denom, (float)cpbSize/denom);
+    }
+
+    seiBP->m_initialCpbRemovalDelay = (uint32_t)(num * cpbState + denom) / denom;
+    seiBP->m_initialCpbRemovalDelayOffset = (uint32_t)(num * cpbSize + denom) / denom - seiBP->m_initialCpbRemovalDelay;
+}
+
+void RateControl::updateVbvPlan(Encoder* enc)
+{
+    m_bufferFill = m_bufferFillFinal;
+    enc->updateVbvPlan(this);
+}
+
+double RateControl::predictSize(Predictor *p, double q, double var)
+{
+    return (p->coeff * var + p->offset) / (q * p->count);
+}
+
+double RateControl::clipQscale(Frame* curFrame, RateControlEntry* rce, double q)
+{
+    // B-frames are not directly subject to VBV,
+    // since they are controlled by referenced P-frames' QPs.
+    double q0 = q;
+    if (m_isVbv && m_currentSatd > 0 && curFrame)
+    {
+        if (m_param->lookaheadDepth || m_param->rc.cuTree ||
+            m_param->scenecutThreshold ||
+            (m_param->bFrameAdaptive && m_param->bframes))
+        {
+           /* Lookahead VBV: If lookahead is done, raise the quantizer as necessary
+            * such that no frames in the lookahead overflow and such that the buffer
+            * is in a reasonable state by the end of the lookahead. */
+            int loopTerminate = 0;
+            /* Avoid an infinite loop. */
+            for (int iterations = 0; iterations < 1000 && loopTerminate != 3; iterations++)
+            {
+                double frameQ[3];
+                double curBits;
+                curBits = predictSize(&m_pred[m_predType], q, (double)m_currentSatd);
+                double bufferFillCur = m_bufferFill - curBits;
+                double targetFill;
+                double totalDuration = m_frameDuration;
+                frameQ[P_SLICE] = m_sliceType == I_SLICE ? q * m_param->rc.ipFactor : (m_sliceType == B_SLICE ? q / m_param->rc.pbFactor : q);
+                frameQ[B_SLICE] = frameQ[P_SLICE] * m_param->rc.pbFactor;
+                frameQ[I_SLICE] = frameQ[P_SLICE] / m_param->rc.ipFactor;
+                /* Loop over the planned future frames. */
+                for (int j = 0; bufferFillCur >= 0; j++)
+                {
+                    int type = curFrame->m_lowres.plannedType[j];
+                    if (type == X265_TYPE_AUTO || totalDuration >= 1.0)
+                        break;
+                    totalDuration += m_frameDuration;
+                    double wantedFrameSize = m_vbvMaxRate * m_frameDuration;
+                    if (bufferFillCur + wantedFrameSize <= m_bufferSize)
+                        bufferFillCur += wantedFrameSize;
+                    int64_t satd = curFrame->m_lowres.plannedSatd[j] >> (X265_DEPTH - 8);
+                    type = IS_X265_TYPE_I(type) ? I_SLICE : IS_X265_TYPE_B(type) ? B_SLICE : P_SLICE;
+                    int predType = getPredictorType(curFrame->m_lowres.plannedType[j], type);
+                    curBits = predictSize(&m_pred[predType], frameQ[type], (double)satd);
+                    bufferFillCur -= curBits;
+                }
+
+                /* Try to get the buffer at least 50% filled, but don't set an impossible goal. */
+                double finalDur = 1;
+                if (m_param->rc.bStrictCbr)
+                {
+                    finalDur = x265_clip3(0.4, 1.0, totalDuration);
+                }
+                targetFill = X265_MIN(m_bufferFill + totalDuration * m_vbvMaxRate * 0.5 , m_bufferSize * (1 - 0.5 * finalDur));
+                if (bufferFillCur < targetFill)
+                {
+                    q *= 1.01;
+                    loopTerminate |= 1;
+                    continue;
+                }
+                /* Try to get the buffer not more than 80% filled, but don't set an impossible goal. */
+                targetFill = x265_clip3(m_bufferSize * (1 - 0.2 * finalDur), m_bufferSize, m_bufferFill - totalDuration * m_vbvMaxRate * 0.5);
+                if (m_isCbr && bufferFillCur > targetFill && !m_isSceneTransition)
+                {
+                    q /= 1.01;
+                    loopTerminate |= 2;
+                    continue;
+                }
+                break;
+            }
+            q = X265_MAX(q0 / 2, q);
+        }
+        else
+        {
+            /* Fallback to old purely-reactive algorithm: no lookahead. */
+            if ((m_sliceType == P_SLICE || m_sliceType == B_SLICE ||
+                    (m_sliceType == I_SLICE && m_lastNonBPictType == I_SLICE)) &&
+                m_bufferFill / m_bufferSize < 0.5)
+            {
+                q /= x265_clip3(0.5, 1.0, 2.0 * m_bufferFill / m_bufferSize);
+            }
+            // Now a hard threshold to make sure the frame fits in VBV.
+            // This one is mostly for I-frames.
+            double bits = predictSize(&m_pred[m_predType], q, (double)m_currentSatd);
+
+            // For small VBVs, allow the frame to use up the entire VBV.
+            double maxFillFactor;
+            maxFillFactor = m_bufferSize >= 5 * m_bufferRate ? 2 : 1;
+            // For single-frame VBVs, request that the frame use up the entire VBV.
+            double minFillFactor = m_singleFrameVbv ? 1 : 2;
+
+            for (int iterations = 0; iterations < 10; iterations++)
+            {
+                double qf = 1.0;
+                if (bits > m_bufferFill / maxFillFactor)
+                    qf = x265_clip3(0.2, 1.0, m_bufferFill / (maxFillFactor * bits));
+                q /= qf;
+                bits *= qf;
+                if (bits < m_bufferRate / minFillFactor)
+                    q *= bits * minFillFactor / m_bufferRate;
+                bits = predictSize(&m_pred[m_predType], q, (double)m_currentSatd);
+            }
+
+            q = X265_MAX(q0, q);
+        }
+
+        /* Apply MinCR restrictions */
+        double pbits = predictSize(&m_pred[m_predType], q, (double)m_currentSatd);
+        if (pbits > rce->frameSizeMaximum)
+            q *= pbits / rce->frameSizeMaximum;
+        /* To detect frames that are more complex in SATD costs compared to prev window, yet 
+         * lookahead vbv reduces its qscale by half its value. Be on safer side and avoid drastic 
+         * qscale reductions for frames high in complexity */
+        bool mispredCheck = rce->movingAvgSum && m_currentSatd >= rce->movingAvgSum && q <= q0 / 2;
+        if (!m_isCbr || (m_isAbr && mispredCheck))
+            q = X265_MAX(q0, q);
+
+        if (m_rateFactorMaxIncrement)
+        {
+            double qpNoVbv = x265_qScale2qp(q0);
+            double qmax = X265_MIN(MAX_MAX_QPSCALE,x265_qp2qScale(qpNoVbv + m_rateFactorMaxIncrement));
+            return x265_clip3(MIN_QPSCALE, qmax, q);
+        }
+    }
+    if (m_2pass)
+    {
+        double min = log(MIN_QPSCALE);
+        double max = log(MAX_MAX_QPSCALE);
+        q = (log(q) - min) / (max - min) - 0.5;
+        q = 1.0 / (1.0 + exp(-4 * q));
+        q = q*(max - min) + min;
+        return exp(q);
+    }
+    return x265_clip3(MIN_QPSCALE, MAX_MAX_QPSCALE, q);
+}
+
+double RateControl::predictRowsSizeSum(Frame* curFrame, RateControlEntry* rce, double qpVbv, int32_t& encodedBitsSoFar)
+{
+    uint32_t rowSatdCostSoFar = 0, totalSatdBits = 0;
+    encodedBitsSoFar = 0;
+
+    double qScale = x265_qp2qScale(qpVbv);
+    FrameData& curEncData = *curFrame->m_encData;
+    int picType = curEncData.m_slice->m_sliceType;
+    Frame* refFrame = curEncData.m_slice->m_refFrameList[0][0];
+
+    uint32_t maxRows = curEncData.m_slice->m_sps->numCuInHeight;
+    uint32_t maxCols = curEncData.m_slice->m_sps->numCuInWidth;
+
+    for (uint32_t row = 0; row < maxRows; row++)
+    {
+        encodedBitsSoFar += curEncData.m_rowStat[row].encodedBits;
+        rowSatdCostSoFar = curEncData.m_rowStat[row].diagSatd;
+        uint32_t satdCostForPendingCus = curEncData.m_rowStat[row].satdForVbv - rowSatdCostSoFar;
+        satdCostForPendingCus >>= X265_DEPTH - 8;
+        if (satdCostForPendingCus  > 0)
+        {
+            double pred_s = predictSize(rce->rowPred[0], qScale, satdCostForPendingCus);
+            uint32_t refRowSatdCost = 0, refRowBits = 0, intraCostForPendingCus = 0;
+            double refQScale = 0;
+
+            if (picType != I_SLICE)
+            {
+                FrameData& refEncData = *refFrame->m_encData;
+                uint32_t endCuAddr = maxCols * (row + 1);
+                uint32_t startCuAddr = curEncData.m_rowStat[row].numEncodedCUs;
+                if (startCuAddr)
+                {
+                    for (uint32_t cuAddr = startCuAddr + 1 ; cuAddr < endCuAddr; cuAddr++)
+                    {
+                        refRowSatdCost += refEncData.m_cuStat[cuAddr].vbvCost;
+                        refRowBits += refEncData.m_cuStat[cuAddr].totalBits;
+                    }
+                }
+                else
+                {
+                    refRowBits = refEncData.m_rowStat[row].encodedBits;
+                    refRowSatdCost = refEncData.m_rowStat[row].satdForVbv;
+                }
+
+                refRowSatdCost >>= X265_DEPTH - 8;
+                refQScale = refEncData.m_rowStat[row].diagQpScale;
+            }
+
+            if (picType == I_SLICE || qScale >= refQScale)
+            {
+                if (picType == P_SLICE 
+                    && refFrame 
+                    && refFrame->m_encData->m_slice->m_sliceType == picType
+                    && refQScale > 0
+                    && refRowSatdCost > 0)
+                {
+                    if (abs((int32_t)(refRowSatdCost - satdCostForPendingCus)) < (int32_t)satdCostForPendingCus / 2)
+                    {
+                        double predTotal = refRowBits * satdCostForPendingCus / refRowSatdCost * refQScale / qScale;
+                        totalSatdBits += (int32_t)((pred_s + predTotal) * 0.5);
+                        continue;
+                    }
+                }
+                totalSatdBits += (int32_t)pred_s;
+            }
+            else if (picType == P_SLICE)
+            {
+                intraCostForPendingCus = curEncData.m_rowStat[row].intraSatdForVbv - curEncData.m_rowStat[row].diagIntraSatd;
+                /* Our QP is lower than the reference! */
+                double pred_intra = predictSize(rce->rowPred[1], qScale, intraCostForPendingCus);
+                /* Sum: better to overestimate than underestimate by using only one of the two predictors. */
+                totalSatdBits += (int32_t)(pred_intra + pred_s);
+            }
+            else
+                totalSatdBits += (int32_t)pred_s;
+        }
+    }
+
+    return totalSatdBits + encodedBitsSoFar;
+}
+
+int RateControl::rowDiagonalVbvRateControl(Frame* curFrame, uint32_t row, RateControlEntry* rce, double& qpVbv)
+{
+    FrameData& curEncData = *curFrame->m_encData;
+    double qScaleVbv = x265_qp2qScale(qpVbv);
+    uint64_t rowSatdCost = curEncData.m_rowStat[row].diagSatd;
+    double encodedBits = curEncData.m_rowStat[row].encodedBits;
+
+    if (row == 1)
+    {
+        rowSatdCost += curEncData.m_rowStat[0].diagSatd;
+        encodedBits += curEncData.m_rowStat[0].encodedBits;
+    }
+    rowSatdCost >>= X265_DEPTH - 8;
+    updatePredictor(rce->rowPred[0], qScaleVbv, (double)rowSatdCost, encodedBits);
+    if (curEncData.m_slice->m_sliceType == P_SLICE)
+    {
+        Frame* refFrame = curEncData.m_slice->m_refFrameList[0][0];
+        if (qpVbv < refFrame->m_encData->m_rowStat[row].diagQp)
+        {
+            uint64_t intraRowSatdCost = curEncData.m_rowStat[row].diagIntraSatd;
+            if (row == 1)
+                intraRowSatdCost += curEncData.m_rowStat[0].diagIntraSatd;
+
+            updatePredictor(rce->rowPred[1], qScaleVbv, (double)intraRowSatdCost, encodedBits);
+        }
+    }
+
+    int canReencodeRow = 1;
+    /* tweak quality based on difference from predicted size */
+    double prevRowQp = qpVbv;
+    double qpAbsoluteMax = QP_MAX_MAX;
+    double qpAbsoluteMin = QP_MIN;
+    if (m_rateFactorMaxIncrement)
+        qpAbsoluteMax = X265_MIN(qpAbsoluteMax, rce->qpNoVbv + m_rateFactorMaxIncrement);
+
+    if (m_rateFactorMaxDecrement)
+        qpAbsoluteMin = X265_MAX(qpAbsoluteMin, rce->qpNoVbv - m_rateFactorMaxDecrement);
+
+    double qpMax = X265_MIN(prevRowQp + m_param->rc.qpStep, qpAbsoluteMax);
+    double qpMin = X265_MAX(prevRowQp - m_param->rc.qpStep, qpAbsoluteMin);
+    double stepSize = 0.5;
+    double bufferLeftPlanned = rce->bufferFill - rce->frameSizePlanned;
+
+    const SPS& sps = *curEncData.m_slice->m_sps;
+    double maxFrameError = X265_MAX(0.05, 1.0 / sps.numCuInHeight);
+
+    if (row < sps.numCuInHeight - 1)
+    {
+        /* More threads means we have to be more cautious in letting ratecontrol use up extra bits. */
+        double rcTol = bufferLeftPlanned / m_param->frameNumThreads * m_rateTolerance;
+        int32_t encodedBitsSoFar = 0;
+        double accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+
+        /* * Don't increase the row QPs until a sufficent amount of the bits of
+         * the frame have been processed, in case a flat area at the top of the
+         * frame was measured inaccurately. */
+        if (encodedBitsSoFar < 0.05f * rce->frameSizePlanned)
+            qpMax = qpAbsoluteMax = prevRowQp;
+
+        if (rce->sliceType != I_SLICE || (m_param->rc.bStrictCbr && rce->poc > 0))
+            rcTol *= 0.5;
+
+        if (!m_isCbr)
+            qpMin = X265_MAX(qpMin, rce->qpNoVbv);
+
+        double totalBitsNeeded = m_wantedBitsWindow;
+        if (m_param->totalFrames)
+            totalBitsNeeded = (m_param->totalFrames * m_bitrate) / m_fps;
+        double abrOvershoot = (accFrameBits + m_totalBits - m_wantedBitsWindow) / totalBitsNeeded;
+
+        while (qpVbv < qpMax
+               && (((accFrameBits > rce->frameSizePlanned + rcTol) ||
+                   (rce->bufferFill - accFrameBits < bufferLeftPlanned * 0.5) ||
+                   (accFrameBits > rce->frameSizePlanned && qpVbv < rce->qpNoVbv))
+                   && (!m_param->rc.bStrictCbr ? 1 : abrOvershoot > 0.1)))
+        {
+            qpVbv += stepSize;
+            accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+            abrOvershoot = (accFrameBits + m_totalBits - m_wantedBitsWindow) / totalBitsNeeded;
+        }
+
+        while (qpVbv > qpMin
+               && (qpVbv > curEncData.m_rowStat[0].diagQp || m_singleFrameVbv)
+               && (((accFrameBits < rce->frameSizePlanned * 0.8f && qpVbv <= prevRowQp)
+                   || accFrameBits < (rce->bufferFill - m_bufferSize + m_bufferRate) * 1.1)
+                   && (!m_param->rc.bStrictCbr ? 1 : abrOvershoot < 0)))
+        {
+            qpVbv -= stepSize;
+            accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+            abrOvershoot = (accFrameBits + m_totalBits - m_wantedBitsWindow) / totalBitsNeeded;
+        }
+
+        if (m_param->rc.bStrictCbr && m_param->totalFrames)
+        {
+            double timeDone = (double)(m_framesDone) / m_param->totalFrames;
+            while (qpVbv < qpMax && (qpVbv < rce->qpNoVbv + (m_param->rc.qpStep * timeDone)) &&
+                   (timeDone > 0.75 && abrOvershoot > 0))
+            {
+                qpVbv += stepSize;
+                accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+                abrOvershoot = (accFrameBits + m_totalBits - m_wantedBitsWindow) / totalBitsNeeded;
+            }
+            if (qpVbv > curEncData.m_rowStat[0].diagQp &&
+                abrOvershoot < -0.1 && timeDone > 0.5 && accFrameBits < rce->frameSizePlanned - rcTol)
+            {
+                qpVbv -= stepSize;
+                accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+            }
+        }
+
+        /* avoid VBV underflow or MinCr violation */
+        while ((qpVbv < qpAbsoluteMax)
+               && ((rce->bufferFill - accFrameBits < m_bufferRate * maxFrameError) ||
+                   (rce->frameSizeMaximum - accFrameBits < rce->frameSizeMaximum * maxFrameError)))
+        {
+            qpVbv += stepSize;
+            accFrameBits = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+        }
+
+        rce->frameSizeEstimated = accFrameBits;
+
+        /* If the current row was large enough to cause a large QP jump, try re-encoding it. */
+        if (qpVbv > qpMax && prevRowQp < qpMax && canReencodeRow)
+        {
+            /* Bump QP to halfway in between... close enough. */
+            qpVbv = x265_clip3(prevRowQp + 1.0f, qpMax, (prevRowQp + qpVbv) * 0.5);
+            return -1;
+        }
+
+        if (m_param->rc.rfConstantMin)
+        {
+            if (qpVbv < qpMin && prevRowQp > qpMin && canReencodeRow)
+            {
+                qpVbv = x265_clip3(qpMin, prevRowQp, (prevRowQp + qpVbv) * 0.5);
+                return -1;
+            }
+        }
+    }
+    else
+    {
+        int32_t encodedBitsSoFar = 0;
+        rce->frameSizeEstimated = predictRowsSizeSum(curFrame, rce, qpVbv, encodedBitsSoFar);
+
+        /* Last-ditch attempt: if the last row of the frame underflowed the VBV,
+         * try again. */
+        if ((rce->frameSizeEstimated > (rce->bufferFill - m_bufferRate * maxFrameError) &&
+             qpVbv < qpMax && canReencodeRow))
+        {
+            qpVbv = qpMax;
+            return -1;
+        }
+    }
+    return 0;
+}
+
+/* modify the bitrate curve from pass1 for one frame */
+double RateControl::getQScale(RateControlEntry *rce, double rateFactor)
+{
+    double q;
+
+    if (m_param->rc.cuTree)
+    {
+        // Scale and units are obtained from rateNum and rateDenom for videos with fixed frame rates.
+        double timescale = (double)m_param->fpsDenom / (2 * m_param->fpsNum);
+        q = pow(BASE_FRAME_DURATION / CLIP_DURATION(2 * timescale), 1 - m_param->rc.qCompress);
+    }
+    else
+        q = pow(rce->blurredComplexity, 1 - m_param->rc.qCompress);
+    // avoid NaN's in the Rceq
+    if (rce->coeffBits + rce->mvBits == 0)
+        q = m_lastQScaleFor[rce->sliceType];
+    else
+    {
+        m_lastRceq = q;
+        q /= rateFactor;
+    }
+    
+    x265_zone* zone = getZone();
+    if (zone)
+    {
+        if (zone->bForceQp)
+            q = x265_qp2qScale(zone->qp);
+        else
+            q /= zone->bitrateFactor;
+    }
+    return q;
+}
+
+void RateControl::updatePredictor(Predictor *p, double q, double var, double bits)
+{
+    if (var < 10)
+        return;
+    const double range = 2;
+    double old_coeff = p->coeff / p->count;
+    double new_coeff = bits * q / var;
+    double new_coeff_clipped = x265_clip3(old_coeff / range, old_coeff * range, new_coeff);
+    double new_offset = bits * q - new_coeff_clipped * var;
+    if (new_offset >= 0)
+        new_coeff = new_coeff_clipped;
+    else
+        new_offset = 0;
+    p->count  *= p->decay;
+    p->coeff  *= p->decay;
+    p->offset *= p->decay;
+    p->count++;
+    p->coeff  += new_coeff;
+    p->offset += new_offset;
+}
+
+void RateControl::updateVbv(int64_t bits, RateControlEntry* rce)
+{
+    int predType = rce->sliceType;
+    predType = rce->sliceType == B_SLICE && rce->keptAsRef ? 3 : predType;
+    if (rce->lastSatd >= m_ncu)
+        updatePredictor(&m_pred[predType], x265_qp2qScale(rce->qpaRc), (double)rce->lastSatd, (double)bits);
+    if (!m_isVbv)
+        return;
+
+    m_bufferFillFinal -= bits;
+
+    if (m_bufferFillFinal < 0)
+        x265_log(m_param, X265_LOG_WARNING, "poc:%d, VBV underflow (%.0f bits)\n", rce->poc, m_bufferFillFinal);
+
+    m_bufferFillFinal = X265_MAX(m_bufferFillFinal, 0);
+    m_bufferFillFinal += m_bufferRate;
+    m_bufferFillFinal = X265_MIN(m_bufferFillFinal, m_bufferSize);
+}
+
+/* After encoding one frame, update rate control state */
+int RateControl::rateControlEnd(Frame* curFrame, int64_t bits, RateControlEntry* rce)
+{
+    int orderValue = m_startEndOrder.get();
+    int endOrdinal = (rce->encodeOrder + m_param->frameNumThreads) * 2 - 1;
+    while (orderValue < endOrdinal && !m_bTerminated)
+    {
+        /* no more frames are being encoded, so fake the start event if we would
+         * have blocked on it. Note that this does not enforce rateControlEnd()
+         * ordering during flush, but this has no impact on the outputs */
+        if (m_finalFrameCount && orderValue >= 2 * m_finalFrameCount)
+            break;
+        orderValue = m_startEndOrder.waitForChange(orderValue);
+    }
+
+    FrameData& curEncData = *curFrame->m_encData;
+    int64_t actualBits = bits;
+    Slice *slice = curEncData.m_slice;
+
+    if (m_param->rc.aqMode || m_isVbv)
+    {
+        if (m_isVbv)
+        {
+            /* determine avg QP decided by VBV rate control */
+            for (uint32_t i = 0; i < slice->m_sps->numCuInHeight; i++)
+                curEncData.m_avgQpRc += curEncData.m_rowStat[i].sumQpRc;
+
+            curEncData.m_avgQpRc /= slice->m_sps->numCUsInFrame;
+            rce->qpaRc = curEncData.m_avgQpRc;
+        }
+
+        if (m_param->rc.aqMode)
+        {
+            /* determine actual avg encoded QP, after AQ/cutree adjustments */
+            for (uint32_t i = 0; i < slice->m_sps->numCuInHeight; i++)
+                curEncData.m_avgQpAq += curEncData.m_rowStat[i].sumQpAq;
+
+            curEncData.m_avgQpAq /= (slice->m_sps->numCUsInFrame * NUM_4x4_PARTITIONS);
+        }
+        else
+            curEncData.m_avgQpAq = curEncData.m_avgQpRc;
+    }
+
+    if (m_isAbr)
+    {
+        if (m_param->rc.rateControlMode == X265_RC_ABR && !m_param->rc.bStatRead)
+            checkAndResetABR(rce, true);
+
+        if (m_param->rc.rateControlMode == X265_RC_CRF)
+        {
+            if (int(curEncData.m_avgQpRc + 0.5) == slice->m_sliceQp)
+                curEncData.m_rateFactor = m_rateFactorConstant;
+            else
+            {
+                /* If vbv changed the frame QP recalculate the rate-factor */
+                double baseCplx = m_ncu * (m_param->bframes ? 120 : 80);
+                double mbtree_offset = m_param->rc.cuTree ? (1.0 - m_param->rc.qCompress) * 13.5 : 0;
+                curEncData.m_rateFactor = pow(baseCplx, 1 - m_qCompress) /
+                    x265_qp2qScale(int(curEncData.m_avgQpRc + 0.5) + mbtree_offset);
+            }
+        }
+    }
+
+    if (m_isAbr && !m_isAbrReset)
+    {
+        /* amortize part of each I slice over the next several frames, up to
+         * keyint-max, to avoid over-compensating for the large I slice cost */
+        if (!m_param->rc.bStatWrite && !m_param->rc.bStatRead)
+        {
+            if (rce->sliceType == I_SLICE)
+            {
+                /* previous I still had a residual; roll it into the new loan */
+                if (m_residualFrames)
+                    bits += m_residualCost * m_residualFrames;
+                m_residualFrames = X265_MIN((int)rce->amortizeFrames, m_param->keyframeMax);
+                m_residualCost = (int)((bits * rce->amortizeFraction) / m_residualFrames);
+                bits -= m_residualCost * m_residualFrames;
+            }
+            else if (m_residualFrames)
+            {
+                bits += m_residualCost;
+                m_residualFrames--;
+            }
+        }
+        if (rce->sliceType != B_SLICE)
+        {
+            /* The factor 1.5 is to tune up the actual bits, otherwise the cplxrSum is scaled too low
+                * to improve short term compensation for next frame. */
+            m_cplxrSum += (bits * x265_qp2qScale(rce->qpaRc) / rce->qRceq) - (rce->rowCplxrSum);
+        }
+        else
+        {
+            /* Depends on the fact that B-frame's QP is an offset from the following P-frame's.
+                * Not perfectly accurate with B-refs, but good enough. */
+            m_cplxrSum += (bits * x265_qp2qScale(rce->qpaRc) / (rce->qRceq * fabs(m_param->rc.pbFactor))) - (rce->rowCplxrSum);
+        }
+        m_wantedBitsWindow += m_frameDuration * m_bitrate;
+        m_totalBits += bits - rce->rowTotalBits;
+        m_encodedBits += actualBits;
+        int pos = m_sliderPos - m_param->frameNumThreads;
+        if (pos >= 0)
+            m_encodedBitsWindow[pos % s_slidingWindowFrames] = actualBits;
+    }
+
+    if (m_2pass)
+    {
+        m_expectedBitsSum += qScale2bits(rce, x265_qp2qScale(rce->newQp));
+        m_totalBits += bits - rce->rowTotalBits;
+    }
+
+    if (m_isVbv)
+    {
+        updateVbv(actualBits, rce);
+
+        if (m_param->bEmitHRDSEI)
+        {
+            const VUI *vui = &curEncData.m_slice->m_sps->vuiParameters;
+            const HRDInfo *hrd = &vui->hrdParameters;
+            const TimingInfo *time = &vui->timingInfo;
+            if (!curFrame->m_poc)
+            {
+                // first access unit initializes the HRD
+                rce->hrdTiming->cpbInitialAT = 0;
+                rce->hrdTiming->cpbRemovalTime = m_nominalRemovalTime = (double)m_bufPeriodSEI.m_initialCpbRemovalDelay / 90000;
+            }
+            else
+            {
+                rce->hrdTiming->cpbRemovalTime = m_nominalRemovalTime + (double)rce->picTimingSEI->m_auCpbRemovalDelay * time->numUnitsInTick / time->timeScale;
+                double cpbEarliestAT = rce->hrdTiming->cpbRemovalTime - (double)m_bufPeriodSEI.m_initialCpbRemovalDelay / 90000;
+                if (!curFrame->m_lowres.bKeyframe)
+                    cpbEarliestAT -= (double)m_bufPeriodSEI.m_initialCpbRemovalDelayOffset / 90000;
+
+                rce->hrdTiming->cpbInitialAT = hrd->cbrFlag ? m_prevCpbFinalAT : X265_MAX(m_prevCpbFinalAT, cpbEarliestAT);
+            }
+
+            uint32_t cpbsizeUnscale = hrd->cpbSizeValue << (hrd->cpbSizeScale + CPB_SHIFT);
+            rce->hrdTiming->cpbFinalAT = m_prevCpbFinalAT = rce->hrdTiming->cpbInitialAT + actualBits / cpbsizeUnscale;
+            rce->hrdTiming->dpbOutputTime = (double)rce->picTimingSEI->m_picDpbOutputDelay * time->numUnitsInTick / time->timeScale + rce->hrdTiming->cpbRemovalTime;
+        }
+    }
+    rce->isActive = false;
+    // Allow rateControlStart of next frame only when rateControlEnd of previous frame is over
+    m_startEndOrder.incr();
+    return 0;
+}
+
+/* called to write out the rate control frame stats info in multipass encodes */
+int RateControl::writeRateControlFrameStats(Frame* curFrame, RateControlEntry* rce)
+{
+    FrameData& curEncData = *curFrame->m_encData;
+    char cType = rce->sliceType == I_SLICE ? (rce->poc > 0 && m_param->bOpenGOP ? 'i' : 'I')
+        : rce->sliceType == P_SLICE ? 'P'
+        : IS_REFERENCED(curFrame) ? 'B' : 'b';
+    if (fprintf(m_statFileOut,
+                "in:%d out:%d type:%c q:%.2f q-aq:%.2f tex:%d mv:%d misc:%d icu:%.2f pcu:%.2f scu:%.2f ;\n",
+                rce->poc, rce->encodeOrder,
+                cType, curEncData.m_avgQpRc, curEncData.m_avgQpAq,
+                curFrame->m_encData->m_frameStats.coeffBits,
+                curFrame->m_encData->m_frameStats.mvBits,
+                curFrame->m_encData->m_frameStats.miscBits,
+                curFrame->m_encData->m_frameStats.percent8x8Intra * m_ncu,
+                curFrame->m_encData->m_frameStats.percent8x8Inter * m_ncu,
+                curFrame->m_encData->m_frameStats.percent8x8Skip  * m_ncu) < 0)
+        goto writeFailure;
+    /* Don't re-write the data in multi-pass mode. */
+    if (m_param->rc.cuTree && IS_REFERENCED(curFrame) && !m_param->rc.bStatRead)
+    {
+        uint8_t sliceType = (uint8_t)rce->sliceType;
+        for (int i = 0; i < m_ncu; i++)
+                m_cuTreeStats.qpBuffer[0][i] = (uint16_t)(curFrame->m_lowres.qpCuTreeOffset[i] * 256.0);
+        if (fwrite(&sliceType, 1, 1, m_cutreeStatFileOut) < 1)
+            goto writeFailure;
+        if (fwrite(m_cuTreeStats.qpBuffer[0], sizeof(uint16_t), m_ncu, m_cutreeStatFileOut) < (size_t)m_ncu)
+            goto writeFailure;
+    }
+    return 0;
+
+    writeFailure:
+    x265_log(m_param, X265_LOG_ERROR, "RatecontrolEnd: stats file write failure\n");
+    return 1;
+}
+#if defined(_MSC_VER)
+#pragma warning(disable: 4996) // POSIX function names are just fine, thank you
+#endif
+
+/* called when the encoder is flushing, and thus the final frame count is
+ * unambiguously known */
+void RateControl::setFinalFrameCount(int count)
+{
+    m_finalFrameCount = count;
+    /* unblock waiting threads */
+    m_startEndOrder.poke();
+}
+
+/* called when the encoder is closing, and no more frames will be output.
+ * all blocked functions must finish so the frame encoder threads can be
+ * closed */
+void RateControl::terminate()
+{
+    m_bTerminated = true;
+    /* unblock waiting threads */
+    m_startEndOrder.poke();
+}
+
+void RateControl::destroy()
+{
+    const char *fileName = m_param->rc.statFileName;
+    if (!fileName)
+        fileName = s_defaultStatFileName;
+
+    if (m_statFileOut)
+    {
+        fclose(m_statFileOut);
+        char *tmpFileName = strcatFilename(fileName, ".temp");
+        int bError = 1;
+        if (tmpFileName)
+        {
+           unlink(fileName);
+           bError = rename(tmpFileName, fileName);
+        }
+        if (bError)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "failed to rename output stats file to \"%s\"\n",
+                     fileName);
+        }
+        X265_FREE(tmpFileName);
+    }
+
+    if (m_cutreeStatFileOut)
+    {
+        fclose(m_cutreeStatFileOut);
+        char *tmpFileName = strcatFilename(fileName, ".cutree.temp");
+        char *newFileName = strcatFilename(fileName, ".cutree");
+        int bError = 1;
+        if (tmpFileName && newFileName)
+        {
+           unlink(newFileName);
+           bError = rename(tmpFileName, newFileName);
+        }
+        if (bError)
+        {
+            x265_log(m_param, X265_LOG_ERROR, "failed to rename cutree output stats file to \"%s\"\n",
+                     newFileName);
+        }
+        X265_FREE(tmpFileName);
+        X265_FREE(newFileName);
+    }
+
+    if (m_cutreeStatFileIn)
+        fclose(m_cutreeStatFileIn);
+
+    X265_FREE(m_rce2Pass);
+    for (int i = 0; i < 2; i++)
+        X265_FREE(m_cuTreeStats.qpBuffer[i]);
+    
+    X265_FREE(m_param->rc.zones);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/ratecontrol.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,267 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Sumalatha Polureddy <sumalatha@multicorewareinc.com>
+ *          Aarthi Priya Thirumalai <aarthi@multicorewareinc.com>
+ *          Xun Xu, PPLive Corporation <xunxu@pptv.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_RATECONTROL_H
+#define X265_RATECONTROL_H
+
+#include "common.h"
+#include "sei.h"
+
+namespace X265_NS {
+// encoder namespace
+
+class Encoder;
+class Frame;
+class SEIBufferingPeriod;
+struct SPS;
+#define BASE_FRAME_DURATION 0.04
+
+/* Arbitrary limitations as a sanity check. */
+#define MAX_FRAME_DURATION 1.00
+#define MIN_FRAME_DURATION 0.01
+
+#define MIN_AMORTIZE_FRAME 10
+#define MIN_AMORTIZE_FRACTION 0.2
+#define CLIP_DURATION(f) x265_clip3(MIN_FRAME_DURATION, MAX_FRAME_DURATION, f)
+
+struct Predictor
+{
+    double coeff;
+    double count;
+    double decay;
+    double offset;
+};
+
+struct HRDTiming
+{
+    double cpbInitialAT;
+    double cpbFinalAT;
+    double dpbOutputTime;
+    double cpbRemovalTime;
+};
+
+struct RateControlEntry
+{
+    Predictor  rowPreds[3][2];
+    Predictor* rowPred[2];
+
+    int64_t lastSatd;      /* Contains the picture cost of the previous frame, required for resetAbr and VBV */
+    int64_t leadingNoBSatd;
+    int64_t rowTotalBits;  /* update cplxrsum and totalbits at the end of 2 rows */
+    double  blurredComplexity;
+    double  qpaRc;
+    double  qpAq;
+    double  qRceq;
+    double  frameSizePlanned;  /* frame Size decided by RateCotrol before encoding the frame */
+    double  bufferRate;
+    double  movingAvgSum;
+    double  rowCplxrSum;
+    double  qpNoVbv;
+    double  bufferFill;
+    double  frameDuration;
+    double  clippedDuration;
+    double  frameSizeEstimated; /* hold frameSize, updated from cu level vbv rc */
+    double  frameSizeMaximum;   /* max frame Size according to minCR restrictions and level of the video */
+    int     sliceType;
+    int     bframes;
+    int     poc;
+    int     encodeOrder;
+    bool    bLastMiniGopBFrame;
+    bool    isActive;
+    double  amortizeFrames;
+    double  amortizeFraction;
+    /* Required in 2-pass rate control */
+    uint64_t expectedBits; /* total expected bits up to the current frame (current one excluded) */
+    double   iCuCount;
+    double   pCuCount;
+    double   skipCuCount;
+    double   expectedVbv;
+    double   qScale;
+    double   newQScale;
+    double   newQp;
+    int      mvBits;
+    int      miscBits;
+    int      coeffBits;
+    bool     keptAsRef;
+
+    SEIPictureTiming *picTimingSEI;
+    HRDTiming        *hrdTiming;
+};
+
+class RateControl
+{
+public:
+
+    x265_param* m_param;
+    Slice*      m_curSlice;      /* all info about the current frame */
+    SliceType   m_sliceType;     /* Current frame type */
+    int         m_ncu;           /* number of CUs in a frame */
+    int         m_qp;            /* updated qp for current frame */
+
+    bool   m_isAbr;
+    bool   m_isVbv;
+    bool   m_isCbr;
+    bool   m_singleFrameVbv;
+
+    bool   m_isAbrReset;
+    int    m_lastAbrResetPoc;
+
+    double m_rateTolerance;
+    double m_frameDuration;     /* current frame duration in seconds */
+    double m_bitrate;
+    double m_rateFactorConstant;
+    double m_bufferSize;
+    double m_bufferFillFinal;  /* real buffer as of the last finished frame */
+    double m_bufferFill;       /* planned buffer, if all in-progress frames hit their bit budget */
+    double m_bufferRate;       /* # of bits added to buffer_fill after each frame */
+    double m_vbvMaxRate;       /* in kbps */
+    double m_rateFactorMaxIncrement; /* Don't allow RF above (CRF + this value). */
+    double m_rateFactorMaxDecrement; /* don't allow RF below (this value). */
+
+    Predictor m_pred[4];       /* Slice predictors to preidct bits for each Slice type - I,P,Bref and B */
+    int64_t m_leadingNoBSatd;
+    int     m_predType;       /* Type of slice predictors to be used - depends on the slice type */
+    double  m_ipOffset;
+    double  m_pbOffset;
+    int64_t m_bframeBits;
+    int64_t m_currentSatd;
+    int     m_qpConstant[3];
+    int     m_lastNonBPictType;
+    int     m_framesDone;        /* # of frames passed through RateCotrol already */
+
+    double  m_cplxrSum;          /* sum of bits*qscale/rceq */
+    double  m_wantedBitsWindow;  /* target bitrate * window */
+    double  m_accumPQp;          /* for determining I-frame quant */
+    double  m_accumPNorm;
+    double  m_lastQScaleFor[3];  /* last qscale for a specific pict type, used for max_diff & ipb factor stuff */
+    double  m_lstep;
+    double  m_shortTermCplxSum;
+    double  m_shortTermCplxCount;
+    double  m_lastRceq;
+    double  m_qCompress;
+    int64_t m_totalBits;        /* total bits used for already encoded frames (after ammortization) */
+    int64_t m_encodedBits;      /* bits used for encoded frames (without ammortization) */
+    double  m_fps;
+    int64_t m_satdCostWindow[50];
+    int64_t m_encodedBitsWindow[50];
+    int     m_sliderPos;
+
+    /* To detect a pattern of low detailed static frames in single pass ABR using satdcosts */
+    int64_t m_lastBsliceSatdCost;
+    int     m_numBframesInPattern;
+    bool    m_isPatternPresent;
+    bool    m_isSceneTransition;
+
+    /* a common variable on which rateControlStart, rateControlEnd and rateControUpdateStats waits to
+     * sync the calls to these functions. For example
+     * -F2:
+     * rceStart  10
+     * rceUpdate 10
+     * rceEnd    9
+     * rceStart  11
+     * rceUpdate 11
+     * rceEnd    10
+     * rceStart  12
+     * rceUpdate 12
+     * rceEnd    11 */
+    ThreadSafeInteger m_startEndOrder;
+    int     m_finalFrameCount;   /* set when encoder begins flushing */
+    bool    m_bTerminated;       /* set true when encoder is closing */
+
+    /* hrd stuff */
+    SEIBufferingPeriod m_bufPeriodSEI;
+    double  m_nominalRemovalTime;
+    double  m_prevCpbFinalAT;
+
+    /* 2 pass */
+    bool    m_2pass;
+    int     m_numEntries;
+    FILE*   m_statFileOut;
+    FILE*   m_cutreeStatFileOut;
+    FILE*   m_cutreeStatFileIn;
+    double  m_lastAccumPNorm;
+    double  m_expectedBitsSum;   /* sum of qscale2bits after rceq, ratefactor, and overflow, only includes finished frames */
+    int64_t m_predictedBits;
+    RateControlEntry* m_rce2Pass;
+
+    struct
+    {
+        uint16_t *qpBuffer[2]; /* Global buffers for converting MB-tree quantizer data. */
+        int qpBufPos;          /* In order to handle pyramid reordering, QP buffer acts as a stack.
+                                * This value is the current position (0 or 1). */
+    } m_cuTreeStats;
+
+    RateControl(x265_param& p);
+    bool init(const SPS& sps);
+    void initHRD(SPS& sps);
+
+    void setFinalFrameCount(int count);
+    void terminate();          /* un-block all waiting functions so encoder may close */
+    void destroy();
+
+    // to be called for each curFrame to process RateControl and set QP
+    int  rateControlStart(Frame* curFrame, RateControlEntry* rce, Encoder* enc);
+    void rateControlUpdateStats(RateControlEntry* rce);
+    int  rateControlEnd(Frame* curFrame, int64_t bits, RateControlEntry* rce);
+    int  rowDiagonalVbvRateControl(Frame* curFrame, uint32_t row, RateControlEntry* rce, double& qpVbv);
+    int  rateControlSliceType(int frameNum);
+    bool cuTreeReadFor2Pass(Frame* curFrame);
+    void hrdFullness(SEIBufferingPeriod* sei);
+    int writeRateControlFrameStats(Frame* curFrame, RateControlEntry* rce);
+protected:
+
+    static const int   s_slidingWindowFrames;
+    static const char* s_defaultStatFileName;
+
+    double m_amortizeFraction;
+    int    m_amortizeFrames;
+    int    m_residualFrames;
+    int    m_partialResidualFrames;
+    int    m_residualCost;
+    int    m_partialResidualCost;
+
+    x265_zone* getZone();
+    double getQScale(RateControlEntry *rce, double rateFactor);
+    double rateEstimateQscale(Frame* pic, RateControlEntry *rce); // main logic for calculating QP based on ABR
+    double tuneAbrQScaleFromFeedback(double qScale);
+    void   accumPQpUpdate();
+
+    int    getPredictorType(int lowresSliceType, int sliceType);
+    void   updateVbv(int64_t bits, RateControlEntry* rce);
+    void   updatePredictor(Predictor *p, double q, double var, double bits);
+    double clipQscale(Frame* pic, RateControlEntry* rce, double q);
+    void   updateVbvPlan(Encoder* enc);
+    double predictSize(Predictor *p, double q, double var);
+    void   checkAndResetABR(RateControlEntry* rce, bool isFrameDone);
+    double predictRowsSizeSum(Frame* pic, RateControlEntry* rce, double qpm, int32_t& encodedBits);
+    bool   initPass2();
+    double getDiffLimitedQScale(RateControlEntry *rce, double q);
+    double countExpectedBits();
+    bool   vbv2Pass(uint64_t allAvailableBits);
+    bool   findUnderflow(double *fills, int *t0, int *t1, int over);
+    bool   fixUnderflow(int t0, int t1, double adjustment, double qscaleMin, double qscaleMax);
+};
+}
+#endif // ifndef X265_RATECONTROL_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/rdcost.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,147 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_RDCOST_H
+#define X265_RDCOST_H
+
+#include "common.h"
+#include "slice.h"
+
+namespace X265_NS {
+// private namespace
+
+class RDCost
+{
+public:
+
+    /* all weights and factors stored as FIX8 */
+    uint64_t  m_lambda2;
+    uint64_t  m_lambda;
+    uint32_t  m_chromaDistWeight[2];
+    uint32_t  m_psyRdBase;
+    uint32_t  m_psyRd;
+    int       m_qp; /* QP used to configure lambda, may be higher than QP_MAX_SPEC but <= QP_MAX_MAX */
+
+    void setPsyRdScale(double scale)                { m_psyRdBase = (uint32_t)floor(65536.0 * scale * 0.33); }
+
+    void setQP(const Slice& slice, int qp)
+    {
+        x265_emms(); /* TODO: if the lambda tables were ints, this would not be necessary */
+        m_qp = qp;
+        setLambda(x265_lambda2_tab[qp], x265_lambda_tab[qp]);
+
+        /* Scale PSY RD factor by a slice type factor */
+        static const uint32_t psyScaleFix8[3] = { 300, 256, 96 }; /* B, P, I */
+        m_psyRd = (m_psyRdBase * psyScaleFix8[slice.m_sliceType]) >> 8;
+
+        /* Scale PSY RD factor by QP, at high QP psy-rd can cause artifacts */
+        if (qp >= 40)
+        {
+            int scale = qp >= QP_MAX_SPEC ? 0 : (QP_MAX_SPEC - qp) * 23;
+            m_psyRd = (m_psyRd * scale) >> 8;
+        }
+
+        int qpCb, qpCr;
+        if (slice.m_sps->chromaFormatIdc == X265_CSP_I420)
+        {
+            qpCb = (int)g_chromaScale[x265_clip3(QP_MIN, QP_MAX_MAX, qp + slice.m_pps->chromaQpOffset[0])];
+            qpCr = (int)g_chromaScale[x265_clip3(QP_MIN, QP_MAX_MAX, qp + slice.m_pps->chromaQpOffset[1])];
+        }
+        else
+        {
+            qpCb = x265_clip3(QP_MIN, QP_MAX_SPEC, qp + slice.m_pps->chromaQpOffset[0]);
+            qpCr = x265_clip3(QP_MIN, QP_MAX_SPEC, qp + slice.m_pps->chromaQpOffset[1]);
+        }
+
+        int chroma_offset_idx = X265_MIN(qp - qpCb + 12, MAX_CHROMA_LAMBDA_OFFSET);
+        uint16_t lambdaOffset = m_psyRd ? x265_chroma_lambda2_offset_tab[chroma_offset_idx] : 256;
+        m_chromaDistWeight[0] = lambdaOffset;
+
+        chroma_offset_idx = X265_MIN(qp - qpCr + 12, MAX_CHROMA_LAMBDA_OFFSET);
+        lambdaOffset = m_psyRd ? x265_chroma_lambda2_offset_tab[chroma_offset_idx] : 256;
+        m_chromaDistWeight[1] = lambdaOffset;
+    }
+
+    void setLambda(double lambda2, double lambda)
+    {
+        m_lambda2 = (uint64_t)floor(256.0 * lambda2);
+        m_lambda = (uint64_t)floor(256.0 * lambda);
+    }
+
+    inline uint64_t calcRdCost(sse_ret_t distortion, uint32_t bits) const
+    {
+        X265_CHECK(bits <= (UINT64_MAX - 128) / m_lambda2,
+#if X265_DEPTH <= 10
+                   "calcRdCost wrap detected dist: %u, bits %u, lambda: " X265_LL "\n",
+#else
+                   "calcRdCost wrap detected dist: " X265_LL ", bits %u, lambda: " X265_LL "\n",
+#endif
+                   distortion, bits, m_lambda2);
+        return distortion + ((bits * m_lambda2 + 128) >> 8);
+    }
+
+    /* return the difference in energy between the source block and the recon block */
+    inline int psyCost(int size, const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride) const
+    {
+        return primitives.cu[size].psy_cost_pp(source, sstride, recon, rstride);
+    }
+
+    /* return the difference in energy between the source block and the recon block */
+    inline int psyCost(int size, const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride) const
+    {
+        return primitives.cu[size].psy_cost_ss(source, sstride, recon, rstride);
+    }
+
+    /* return the RD cost of this prediction, including the effect of psy-rd */
+    inline uint64_t calcPsyRdCost(sse_ret_t distortion, uint32_t bits, uint32_t psycost) const
+    {
+        return distortion + ((m_lambda * m_psyRd * psycost) >> 24) + ((bits * m_lambda2) >> 8);
+    }
+
+    inline uint64_t calcRdSADCost(uint32_t sadCost, uint32_t bits) const
+    {
+        X265_CHECK(bits <= (UINT64_MAX - 128) / m_lambda,
+                   "calcRdSADCost wrap detected dist: %u, bits %u, lambda: " X265_LL "\n", sadCost, bits, m_lambda);
+        return sadCost + ((bits * m_lambda + 128) >> 8);
+    }
+
+    inline sse_ret_t scaleChromaDist(uint32_t plane, sse_ret_t dist) const
+    {
+        X265_CHECK(dist <= (UINT64_MAX - 128) / m_chromaDistWeight[plane - 1],
+#if X265_DEPTH <= 10
+                   "scaleChromaDist wrap detected dist: %u, lambda: %u\n",
+#else
+                   "scaleChromaDist wrap detected dist: " X265_LL " lambda: %u\n",
+#endif
+                   dist, m_chromaDistWeight[plane - 1]);
+        return (sse_ret_t)((dist * (uint64_t)m_chromaDistWeight[plane - 1] + 128) >> 8);
+    }
+
+    inline uint32_t getCost(uint32_t bits) const
+    {
+        return (uint32_t)((bits * m_lambda + 128) >> 8);
+    }
+};
+}
+
+#endif // ifndef X265_TCOMRDCOST_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/reference.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,174 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Deepthi Devaki <deepthidevaki@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "slice.h"
+#include "picyuv.h"
+
+#include "reference.h"
+
+using namespace X265_NS;
+
+MotionReference::MotionReference()
+{
+    weightBuffer[0] = NULL;
+    weightBuffer[1] = NULL;
+    weightBuffer[2] = NULL;
+}
+
+MotionReference::~MotionReference()
+{
+    X265_FREE(weightBuffer[0]);
+    X265_FREE(weightBuffer[1]);
+    X265_FREE(weightBuffer[2]);
+}
+
+int MotionReference::init(PicYuv* recPic, WeightParam *wp, const x265_param& p)
+{
+    reconPic = recPic;
+    numWeightedRows = 0;
+    lumaStride = recPic->m_stride;
+    chromaStride = recPic->m_strideC;
+    numInterpPlanes = p.subpelRefine > 2 ? 3 : 1; /* is chroma satd possible? */
+
+    /* directly reference the extended integer pel planes */
+    fpelPlane[0] = recPic->m_picOrg[0];
+    fpelPlane[1] = recPic->m_picOrg[1];
+    fpelPlane[2] = recPic->m_picOrg[2];
+    isWeighted = false;
+
+    if (wp)
+    {
+        uint32_t numCUinHeight = (reconPic->m_picHeight + g_maxCUSize - 1) / g_maxCUSize;
+
+        int marginX = reconPic->m_lumaMarginX;
+        int marginY = reconPic->m_lumaMarginY;
+        intptr_t stride = reconPic->m_stride;
+        int cuHeight = g_maxCUSize;
+
+        for (int c = 0; c < numInterpPlanes; c++)
+        {
+            if (c == 1)
+            {
+                marginX = reconPic->m_chromaMarginX;
+                marginY = reconPic->m_chromaMarginY;
+                stride  = reconPic->m_strideC;
+                cuHeight >>= reconPic->m_vChromaShift;
+            }
+
+            if (wp[c].bPresentFlag)
+            {
+                if (!weightBuffer[c])
+                {
+                    size_t padheight = (numCUinHeight * cuHeight) + marginY * 2;
+                    weightBuffer[c] = X265_MALLOC(pixel, stride * padheight);
+                    if (!weightBuffer[c])
+                        return -1;
+                }
+
+                /* use our buffer which will have weighted pixels written to it */
+                fpelPlane[c] = weightBuffer[c] + marginY * stride + marginX;
+                X265_CHECK(recPic->m_picOrg[c] - recPic->m_picBuf[c] == marginY * stride + marginX, "PicYuv pad calculation mismatch\n");
+
+                w[c].weight = wp[c].inputWeight;
+                w[c].offset = wp[c].inputOffset * (1 << (X265_DEPTH - 8));
+                w[c].shift = wp[c].log2WeightDenom;
+                w[c].round = w[c].shift ? 1 << (w[c].shift - 1) : 0;
+            }
+        }
+
+        isWeighted = true;
+    }
+
+    return 0;
+}
+
+void MotionReference::applyWeight(int finishedRows, int maxNumRows)
+{
+    finishedRows = X265_MIN(finishedRows, maxNumRows);
+    if (numWeightedRows >= finishedRows)
+        return;
+
+    int marginX = reconPic->m_lumaMarginX;
+    int marginY = reconPic->m_lumaMarginY;
+    intptr_t stride = reconPic->m_stride;
+    int width   = reconPic->m_picWidth;
+    int height  = (finishedRows - numWeightedRows) * g_maxCUSize;
+    if (finishedRows == maxNumRows && (reconPic->m_picHeight % g_maxCUSize))
+    {
+        /* the last row may be partial height */
+        height -= g_maxCUSize;
+        height += reconPic->m_picHeight % g_maxCUSize;
+    }
+    int cuHeight = g_maxCUSize;
+
+    for (int c = 0; c < numInterpPlanes; c++)
+    {
+        if (c == 1)
+        {
+            marginX = reconPic->m_chromaMarginX;
+            marginY = reconPic->m_chromaMarginY;
+            stride  = reconPic->m_strideC;
+            width    >>= reconPic->m_hChromaShift;
+            height   >>= reconPic->m_vChromaShift;
+            cuHeight >>= reconPic->m_vChromaShift;
+        }
+
+        /* Do not generate weighted predictions if using original picture */
+        if (fpelPlane[c] == reconPic->m_picOrg[c])
+            continue;
+
+        const pixel* src = reconPic->m_picOrg[c] + numWeightedRows * cuHeight * stride;
+        pixel* dst = fpelPlane[c] + numWeightedRows * cuHeight * stride;
+
+        // Computing weighted CU rows
+        int correction = IF_INTERNAL_PREC - X265_DEPTH; // intermediate interpolation depth
+        int padwidth = (width + 15) & ~15;              // weightp assembly needs even 16 byte widths
+        primitives.weight_pp(src, dst, stride, padwidth, height, w[c].weight, w[c].round << correction, w[c].shift + correction, w[c].offset);
+
+        // Extending Left & Right
+        primitives.extendRowBorder(dst, stride, width, height, marginX);
+
+        // Extending Above
+        if (numWeightedRows == 0)
+        {
+            pixel *pixY = fpelPlane[c] - marginX;
+            for (int y = 0; y < marginY; y++)
+                memcpy(pixY - (y + 1) * stride, pixY, stride * sizeof(pixel));
+        }
+
+        // Extending Bottom
+        if (finishedRows == maxNumRows)
+        {
+            int picHeight = reconPic->m_picHeight;
+            if (c) picHeight >>= reconPic->m_vChromaShift;
+            pixel *pixY = fpelPlane[c] - marginX + (picHeight - 1) * stride;
+            for (int y = 0; y < marginY; y++)
+                memcpy(pixY + (y + 1) * stride, pixY, stride * sizeof(pixel));
+        }
+    }
+
+    numWeightedRows = finishedRows;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/reference.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,56 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_REFERENCE_H
+#define X265_REFERENCE_H
+
+#include "primitives.h"
+#include "picyuv.h"
+#include "lowres.h"
+#include "mv.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+struct WeightParam;
+
+class MotionReference : public ReferencePlanes
+{
+public:
+
+    MotionReference();
+    ~MotionReference();
+    int  init(PicYuv*, WeightParam* wp, const x265_param& p);
+    void applyWeight(int rows, int numRows);
+
+    pixel*  weightBuffer[3];
+    int     numInterpPlanes;
+    int     numWeightedRows;
+
+protected:
+
+    MotionReference& operator =(const MotionReference&);
+};
+}
+
+#endif // ifndef X265_REFERENCE_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/sao.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1709 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Min Chen <chenm003@163.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "sao.h"
+
+namespace {
+
+inline int32_t roundIBDI(int32_t num, int32_t den)
+{
+    return num >= 0 ? ((num * 2 + den) / (den * 2)) : -((-num * 2 + den) / (den * 2));
+}
+
+/* get the sign of input variable (TODO: this is a dup, make common) */
+inline int8_t signOf(int x)
+{
+    return (x >> 31) | ((int)((((uint32_t)-x)) >> 31));
+}
+
+inline int signOf2(const int a, const int b)
+{
+    // NOTE: don't reorder below compare, both ICL, VC, GCC optimize strong depends on order!
+    int r = 0;
+    if (a < b)
+        r = -1;
+    if (a > b)
+        r = 1;
+    return r;
+}
+
+inline int64_t estSaoDist(int32_t count, int offset, int32_t offsetOrg)
+{
+    return (count * offset - offsetOrg * 2) * offset;
+}
+} // end anonymous namespace
+
+
+namespace X265_NS {
+
+const uint32_t SAO::s_eoTable[NUM_EDGETYPE] =
+{
+    1, // 0
+    2, // 1
+    0, // 2
+    3, // 3
+    4  // 4
+};
+
+SAO::SAO()
+{
+    m_count = NULL;
+    m_offset = NULL;
+    m_offsetOrg = NULL;
+    m_countPreDblk = NULL;
+    m_offsetOrgPreDblk = NULL;
+    m_refDepth = 0;
+    m_lumaLambda = 0;
+    m_chromaLambda = 0;
+    m_param = NULL;
+    m_clipTable = NULL;
+    m_clipTableBase = NULL;
+    m_tmpU1[0] = NULL;
+    m_tmpU1[1] = NULL;
+    m_tmpU1[2] = NULL;
+    m_tmpU2[0] = NULL;
+    m_tmpU2[1] = NULL;
+    m_tmpU2[2] = NULL;
+    m_tmpL1 = NULL;
+    m_tmpL2 = NULL;
+
+    m_depthSaoRate[0][0] = 0;
+    m_depthSaoRate[0][1] = 0;
+    m_depthSaoRate[0][2] = 0;
+    m_depthSaoRate[0][3] = 0;
+    m_depthSaoRate[1][0] = 0;
+    m_depthSaoRate[1][1] = 0;
+    m_depthSaoRate[1][2] = 0;
+    m_depthSaoRate[1][3] = 0;
+}
+
+bool SAO::create(x265_param* param)
+{
+    m_param = param;
+    if (param->internalCsp != X265_CSP_I400) {
+        m_hChromaShift = CHROMA_H_SHIFT(param->internalCsp);
+        m_vChromaShift = CHROMA_V_SHIFT(param->internalCsp);
+        m_numPlanes = 3;
+    } else {
+        m_numPlanes = 1;
+    }
+  
+    m_numCuInWidth =  (m_param->sourceWidth + g_maxCUSize - 1) / g_maxCUSize;
+    m_numCuInHeight = (m_param->sourceHeight + g_maxCUSize - 1) / g_maxCUSize;
+
+    const pixel maxY = (1 << X265_DEPTH) - 1;
+    const pixel rangeExt = maxY >> 1;
+    int numCtu = m_numCuInWidth * m_numCuInHeight;
+
+    CHECKED_MALLOC(m_clipTableBase,  pixel, maxY + 2 * rangeExt);
+
+    CHECKED_MALLOC(m_tmpL1, pixel, g_maxCUSize + 1);
+    CHECKED_MALLOC(m_tmpL2, pixel, g_maxCUSize + 1);
+
+    for (int i = 0; i < 3; i++)
+    {
+        // SAO asm code will read 1 pixel before and after, so pad by 2
+        CHECKED_MALLOC(m_tmpU1[i], pixel, m_param->sourceWidth + 2);
+        m_tmpU1[i] += 1;
+        CHECKED_MALLOC(m_tmpU2[i], pixel, m_param->sourceWidth + 2);
+        m_tmpU2[i] += 1;
+    }
+
+    CHECKED_MALLOC(m_count, PerClass, NUM_PLANE);
+    CHECKED_MALLOC(m_offset, PerClass, NUM_PLANE);
+    CHECKED_MALLOC(m_offsetOrg, PerClass, NUM_PLANE);
+
+    CHECKED_MALLOC(m_countPreDblk, PerPlane, numCtu);
+    CHECKED_MALLOC(m_offsetOrgPreDblk, PerPlane, numCtu);
+
+    m_clipTable = &(m_clipTableBase[rangeExt]);
+
+    for (int i = 0; i < rangeExt; i++)
+        m_clipTableBase[i] = 0;
+
+    for (int i = 0; i < maxY; i++)
+        m_clipTable[i] = (pixel)i;
+
+    for (int i = maxY; i < maxY + rangeExt; i++)
+        m_clipTable[i] = maxY;
+
+    return true;
+
+fail:
+    return false;
+}
+
+void SAO::destroy()
+{
+    X265_FREE(m_clipTableBase);
+
+    X265_FREE(m_tmpL1);
+    X265_FREE(m_tmpL2);
+
+    for (int i = 0; i < 3; i++)
+    {
+        if (m_tmpU1[i]) X265_FREE(m_tmpU1[i] - 1);
+        if (m_tmpU2[i]) X265_FREE(m_tmpU2[i] - 1);
+    }
+
+    X265_FREE(m_count);
+    X265_FREE(m_offset);
+    X265_FREE(m_offsetOrg);
+    X265_FREE(m_countPreDblk);
+    X265_FREE(m_offsetOrgPreDblk);
+}
+
+/* allocate memory for SAO parameters */
+void SAO::allocSaoParam(SAOParam* saoParam) const
+{
+    saoParam->numCuInWidth  = m_numCuInWidth;
+
+    saoParam->ctuParam[0] = new SaoCtuParam[m_numCuInHeight * m_numCuInWidth];
+    saoParam->ctuParam[1] = new SaoCtuParam[m_numCuInHeight * m_numCuInWidth];
+    saoParam->ctuParam[2] = new SaoCtuParam[m_numCuInHeight * m_numCuInWidth];
+}
+
+void SAO::startSlice(Frame* frame, Entropy& initState, int qp)
+{
+    Slice* slice = frame->m_encData->m_slice;
+    int qpCb = qp;
+    if (m_param->internalCsp == X265_CSP_I420)
+        qpCb = x265_clip3(QP_MIN, QP_MAX_MAX, (int)g_chromaScale[qp + slice->m_pps->chromaQpOffset[0]]);
+    else
+        qpCb = X265_MIN(qp + slice->m_pps->chromaQpOffset[0], QP_MAX_SPEC);
+    m_lumaLambda = x265_lambda2_tab[qp];
+    m_chromaLambda = x265_lambda2_tab[qpCb]; // Use Cb QP for SAO chroma
+    m_frame = frame;
+
+    switch (slice->m_sliceType)
+    {
+    case I_SLICE:
+        m_refDepth = 0;
+        break;
+    case P_SLICE:
+        m_refDepth = 1;
+        break;
+    case B_SLICE:
+        m_refDepth = 2 + !IS_REFERENCED(frame);
+        break;
+    }
+
+    resetStats();
+
+    m_entropyCoder.load(initState);
+    m_rdContexts.next.load(initState);
+    m_rdContexts.cur.load(initState);
+
+    SAOParam* saoParam = frame->m_encData->m_saoParam;
+    if (!saoParam)
+    {
+        saoParam = new SAOParam;
+        allocSaoParam(saoParam);
+        frame->m_encData->m_saoParam = saoParam;
+    }
+
+    saoParam->bSaoFlag[0] = true;
+    saoParam->bSaoFlag[1] = (m_numPlanes > 1);
+
+    m_numNoSao[0] = 0; // Luma
+    m_numNoSao[1] = 0; // Chroma
+
+    // NOTE: Allow SAO automatic turn-off only when frame parallelism is disabled.
+    if (m_param->frameNumThreads == 1)
+    {
+        if (m_refDepth > 0 && m_depthSaoRate[0][m_refDepth - 1] > SAO_ENCODING_RATE)
+            saoParam->bSaoFlag[0] = false;
+        if (m_refDepth > 0 && m_depthSaoRate[1][m_refDepth - 1] > SAO_ENCODING_RATE_CHROMA)
+            saoParam->bSaoFlag[1] = false;
+    }
+}
+
+// CTU-based SAO process without slice granularity
+void SAO::processSaoCu(int addr, int typeIdx, int plane)
+{
+    int x, y;
+    PicYuv* reconPic = m_frame->m_reconPic;
+    pixel* rec = reconPic->getPlaneAddr(plane, addr);
+    intptr_t stride = plane ? reconPic->m_strideC : reconPic->m_stride;
+    uint32_t picWidth  = m_param->sourceWidth;
+    uint32_t picHeight = m_param->sourceHeight;
+    const CUData* cu = m_frame->m_encData->getPicCTU(addr);
+    int ctuWidth = g_maxCUSize;
+    int ctuHeight = g_maxCUSize;
+    uint32_t lpelx = cu->m_cuPelX;
+    uint32_t tpely = cu->m_cuPelY;
+    if (plane)
+    {
+        picWidth  >>= m_hChromaShift;
+        picHeight >>= m_vChromaShift;
+        ctuWidth  >>= m_hChromaShift;
+        ctuHeight >>= m_vChromaShift;
+        lpelx     >>= m_hChromaShift;
+        tpely     >>= m_vChromaShift;
+    }
+    uint32_t rpelx = x265_min(lpelx + ctuWidth,  picWidth);
+    uint32_t bpely = x265_min(tpely + ctuHeight, picHeight);
+    ctuWidth  = rpelx - lpelx;
+    ctuHeight = bpely - tpely;
+
+    int startX;
+    int startY;
+    int endX;
+    int endY;
+    pixel* tmpL;
+    pixel* tmpU;
+
+    int8_t _upBuff1[MAX_CU_SIZE + 2], *upBuff1 = _upBuff1 + 1, signLeft1[2];
+    int8_t _upBufft[MAX_CU_SIZE + 2], *upBufft = _upBufft + 1;
+
+    memset(_upBuff1 + MAX_CU_SIZE, 0, 2 * sizeof(int8_t)); /* avoid valgrind uninit warnings */
+
+    {
+        const pixel* recR = &rec[ctuWidth - 1];
+        for (int i = 0; i < ctuHeight + 1; i++)
+        {
+            m_tmpL2[i] = *recR;
+            recR += stride;
+        }
+
+        tmpL = m_tmpL1;
+        tmpU = &(m_tmpU1[plane][lpelx]);
+    }
+
+    switch (typeIdx)
+    {
+    case SAO_EO_0: // dir: -
+    {
+        pixel firstPxl = 0, lastPxl = 0, row1FirstPxl = 0, row1LastPxl = 0;
+        startX = !lpelx;
+        endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+        if (ctuWidth & 15)
+        {
+            for (y = 0; y < ctuHeight; y++)
+            {
+                int signLeft = signOf(rec[startX] - tmpL[y]);
+                for (x = startX; x < endX; x++)
+                {
+                    int signRight = signOf(rec[x] - rec[x + 1]);
+                    int edgeType = signRight + signLeft + 2;
+                    signLeft = -signRight;
+
+                    rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+                }
+
+                rec += stride;
+            }
+        }
+        else
+        {
+            for (y = 0; y < ctuHeight; y += 2)
+            {
+                signLeft1[0] = signOf(rec[startX] - tmpL[y]);
+                signLeft1[1] = signOf(rec[stride + startX] - tmpL[y + 1]);
+
+                if (!lpelx)
+                {
+                    firstPxl = rec[0];
+                    row1FirstPxl = rec[stride];
+                }
+
+                if (rpelx == picWidth)
+                {
+                    lastPxl = rec[ctuWidth - 1];
+                    row1LastPxl = rec[stride + ctuWidth - 1];
+                }
+
+                primitives.saoCuOrgE0(rec, m_offsetEo, ctuWidth, signLeft1, stride);
+
+                if (!lpelx)
+                {
+                    rec[0] = firstPxl;
+                    rec[stride] = row1FirstPxl;
+                }
+
+                if (rpelx == picWidth)
+                {
+                    rec[ctuWidth - 1] = lastPxl;
+                    rec[stride + ctuWidth - 1] = row1LastPxl;
+                }
+
+                rec += 2 * stride;
+            }
+        }
+        break;
+    }
+    case SAO_EO_1: // dir: |
+    {
+        startY = !tpely;
+        endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+        if (!tpely)
+            rec += stride;
+
+        if (ctuWidth & 15)
+        {
+            for (x = 0; x < ctuWidth; x++)
+                upBuff1[x] = signOf(rec[x] - tmpU[x]);
+
+            for (y = startY; y < endY; y++)
+            {
+                for (x = 0; x < ctuWidth; x++)
+                {
+                    int8_t signDown = signOf(rec[x] - rec[x + stride]);
+                    int edgeType = signDown + upBuff1[x] + 2;
+                    upBuff1[x] = -signDown;
+
+                    rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+                }
+
+                rec += stride;
+            }
+        }
+        else
+        {
+            primitives.sign(upBuff1, rec, tmpU, ctuWidth);
+
+            int diff = (endY - startY) % 2;
+            for (y = startY; y < endY - diff; y += 2)
+            {
+                primitives.saoCuOrgE1_2Rows(rec, upBuff1, m_offsetEo, stride, ctuWidth);
+                rec += 2 * stride;
+            }
+            if (diff & 1)
+                primitives.saoCuOrgE1(rec, upBuff1, m_offsetEo, stride, ctuWidth);
+        }
+
+        break;
+    }
+    case SAO_EO_2: // dir: 135
+    {
+        startX = !lpelx;
+        endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+
+        startY = !tpely;
+        endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+
+        if (!tpely)
+            rec += stride;
+
+        if (!(ctuWidth & 15))
+        {
+            int8_t firstSign, lastSign;
+
+            if (!lpelx)
+                firstSign = upBuff1[0];
+
+            if (rpelx == picWidth)
+                lastSign = upBuff1[ctuWidth - 1];
+
+            primitives.sign(upBuff1, rec, &tmpU[- 1], ctuWidth);
+
+            if (!lpelx)
+                upBuff1[0] = firstSign;
+
+            if (rpelx == picWidth)
+                upBuff1[ctuWidth - 1] = lastSign;
+        }
+        else
+        {
+            for (x = startX; x < endX; x++)
+                upBuff1[x] = signOf(rec[x] - tmpU[x - 1]);
+        }
+
+        if (ctuWidth & 15)
+        {
+             for (y = startY; y < endY; y++)
+             {
+                 upBufft[startX] = signOf(rec[stride + startX] - tmpL[y]);
+                 for (x = startX; x < endX; x++)
+                 {
+                     int8_t signDown = signOf(rec[x] - rec[x + stride + 1]);
+                     int edgeType = signDown + upBuff1[x] + 2;
+                     upBufft[x + 1] = -signDown;
+                     rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+                 }
+
+                 std::swap(upBuff1, upBufft);
+
+                 rec += stride;
+             }
+        }
+        else
+        {
+            for (y = startY; y < endY; y++)
+            {
+                int8_t iSignDown2 = signOf(rec[stride + startX] - tmpL[y]);
+
+                primitives.saoCuOrgE2[endX > 16](rec + startX, upBufft + startX, upBuff1 + startX, m_offsetEo, endX - startX, stride);
+
+                upBufft[startX] = iSignDown2;
+
+                std::swap(upBuff1, upBufft);
+                rec += stride;
+            }
+        }
+        break;
+    }
+    case SAO_EO_3: // dir: 45
+    {
+        startX = !lpelx;
+        endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+
+        startY = !tpely;
+        endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+
+        if (!tpely)
+            rec += stride;
+
+        if (ctuWidth & 15)
+        {
+            for (x = startX - 1; x < endX; x++)
+                upBuff1[x] = signOf(rec[x] - tmpU[x + 1]);
+
+            for (y = startY; y < endY; y++)
+            {
+                x = startX;
+                int8_t signDown = signOf(rec[x] - tmpL[y + 1]);
+                int edgeType = signDown + upBuff1[x] + 2;
+                upBuff1[x - 1] = -signDown;
+                rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+
+                for (x = startX + 1; x < endX; x++)
+                {
+                    signDown = signOf(rec[x] - rec[x + stride - 1]);
+                    edgeType = signDown + upBuff1[x] + 2;
+                    upBuff1[x - 1] = -signDown;
+                    rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+                }
+
+                upBuff1[endX - 1] = signOf(rec[endX - 1 + stride] - rec[endX]);
+
+                rec += stride;
+            }
+        }
+        else
+        {
+            int8_t firstSign, lastSign;
+
+            if (lpelx)
+                firstSign = signOf(rec[-1] - tmpU[0]);
+            if (rpelx == picWidth)
+                lastSign = upBuff1[ctuWidth - 1];
+
+            primitives.sign(upBuff1, rec, &tmpU[1], ctuWidth);
+
+            if (lpelx)
+                upBuff1[-1] = firstSign;
+            if (rpelx == picWidth)
+                upBuff1[ctuWidth - 1] = lastSign;
+
+            for (y = startY; y < endY; y++)
+            {
+                x = startX;
+                int8_t signDown = signOf(rec[x] - tmpL[y + 1]);
+                int edgeType = signDown + upBuff1[x] + 2;
+                upBuff1[x - 1] = -signDown;
+                rec[x] = m_clipTable[rec[x] + m_offsetEo[edgeType]];
+
+                primitives.saoCuOrgE3[endX > 16](rec, upBuff1, m_offsetEo, stride - 1, startX, endX);
+
+                upBuff1[endX - 1] = signOf(rec[endX - 1 + stride] - rec[endX]);
+
+                rec += stride;
+            }
+        }
+
+        break;
+    }
+    case SAO_BO:
+    {
+        const int8_t* offsetBo = m_offsetBo;
+
+        if (ctuWidth & 15)
+        {
+            #define SAO_BO_BITS 5
+            const int boShift = X265_DEPTH - SAO_BO_BITS;
+            for (y = 0; y < ctuHeight; y++)
+            {
+                for (x = 0; x < ctuWidth; x++)
+                {
+                     int val = rec[x] + offsetBo[rec[x] >> boShift];
+                     if (val < 0)
+                         val = 0;
+                     else if (val > ((1 << X265_DEPTH) - 1))
+                         val = ((1 << X265_DEPTH) - 1);
+                     rec[x] = (pixel)val;
+                }
+                rec += stride;
+            }
+        }
+        else
+        {
+            primitives.saoCuOrgB0(rec, offsetBo, ctuWidth, ctuHeight, stride);
+        }
+        break;
+    }
+    default: break;
+    }
+
+//   if (iSaoType!=SAO_BO_0 || iSaoType!=SAO_BO_1)
+    std::swap(m_tmpL1, m_tmpL2);
+}
+
+/* Process SAO all units */
+void SAO::processSaoUnitRow(SaoCtuParam* ctuParam, int idxY, int plane)
+{
+    PicYuv* reconPic = m_frame->m_reconPic;
+    intptr_t stride = plane ? reconPic->m_strideC : reconPic->m_stride;
+    uint32_t picWidth  = m_param->sourceWidth;
+    int ctuWidth  = g_maxCUSize;
+    int ctuHeight = g_maxCUSize;
+    if (plane)
+    {
+        picWidth  >>= m_hChromaShift;
+        ctuWidth  >>= m_hChromaShift;
+        ctuHeight >>= m_vChromaShift;
+    }
+
+    if (!idxY)
+    {
+        pixel* rec = reconPic->m_picOrg[plane];
+        memcpy(m_tmpU1[plane], rec, sizeof(pixel) * picWidth);
+    }
+
+    int addr = idxY * m_numCuInWidth;
+    pixel* rec = plane ? reconPic->getChromaAddr(plane, addr) : reconPic->getLumaAddr(addr);
+
+    for (int i = 0; i < ctuHeight + 1; i++)
+    {
+        m_tmpL1[i] = rec[0];
+        rec += stride;
+    }
+
+    rec -= (stride << 1);
+
+    memcpy(m_tmpU2[plane], rec, sizeof(pixel) * picWidth);
+
+    for (int idxX = 0; idxX < m_numCuInWidth; idxX++)
+    {
+        addr = idxY * m_numCuInWidth + idxX;
+
+        bool mergeLeftFlag = ctuParam[addr].mergeMode == SAO_MERGE_LEFT;
+        int typeIdx = ctuParam[addr].typeIdx;
+
+        if (typeIdx >= 0)
+        {
+            if (!mergeLeftFlag)
+            {
+                if (typeIdx == SAO_BO)
+                {
+                    memset(m_offsetBo, 0, sizeof(m_offsetBo));
+
+                    for (int i = 0; i < SAO_NUM_OFFSET; i++)
+                        m_offsetBo[((ctuParam[addr].bandPos + i) & (SAO_NUM_BO_CLASSES - 1))] = (int8_t)(ctuParam[addr].offset[i] << SAO_BIT_INC);
+                }
+                else // if (typeIdx == SAO_EO_0 || typeIdx == SAO_EO_1 || typeIdx == SAO_EO_2 || typeIdx == SAO_EO_3)
+                {
+                    int offset[NUM_EDGETYPE];
+                    offset[0] = 0;
+                    for (int i = 0; i < SAO_NUM_OFFSET; i++)
+                        offset[i + 1] = ctuParam[addr].offset[i] << SAO_BIT_INC;
+
+                    for (int edgeType = 0; edgeType < NUM_EDGETYPE; edgeType++)
+                        m_offsetEo[edgeType] = (int8_t)offset[s_eoTable[edgeType]];
+                }
+            }
+            processSaoCu(addr, typeIdx, plane);
+        }
+        else if (idxX != (m_numCuInWidth - 1))
+        {
+            rec = plane ? reconPic->getChromaAddr(plane, addr) : reconPic->getLumaAddr(addr);
+
+            for (int i = 0; i < ctuHeight + 1; i++)
+            {
+                m_tmpL1[i] = rec[ctuWidth - 1];
+                rec += stride;
+            }
+        }
+    }
+
+    std::swap(m_tmpU1[plane], m_tmpU2[plane]);
+}
+
+void SAO::resetSaoUnit(SaoCtuParam* saoUnit)
+{
+    saoUnit->mergeMode  = SAO_MERGE_NONE;
+    saoUnit->typeIdx    = -1;
+    saoUnit->bandPos    = 0;
+
+    for (int i = 0; i < SAO_NUM_OFFSET; i++)
+        saoUnit->offset[i] = 0;
+}
+
+void SAO::copySaoUnit(SaoCtuParam* saoUnitDst, const SaoCtuParam* saoUnitSrc)
+{
+    saoUnitDst->mergeMode   = saoUnitSrc->mergeMode;
+    saoUnitDst->typeIdx     = saoUnitSrc->typeIdx;
+    saoUnitDst->bandPos     = saoUnitSrc->bandPos;
+
+    for (int i = 0; i < SAO_NUM_OFFSET; i++)
+        saoUnitDst->offset[i] = saoUnitSrc->offset[i];
+}
+
+/* Calculate SAO statistics for current CTU without non-crossing slice */
+void SAO::calcSaoStatsCu(int addr, int plane)
+{
+    const PicYuv* reconPic = m_frame->m_reconPic;
+    const CUData* cu = m_frame->m_encData->getPicCTU(addr);
+    const pixel* fenc0 = m_frame->m_fencPic->getPlaneAddr(plane, addr);
+    const pixel* rec0  = reconPic->getPlaneAddr(plane, addr);
+    const pixel* fenc;
+    const pixel* rec;
+    intptr_t stride = plane ? reconPic->m_strideC : reconPic->m_stride;
+    uint32_t picWidth  = m_param->sourceWidth;
+    uint32_t picHeight = m_param->sourceHeight;
+    int ctuWidth  = g_maxCUSize;
+    int ctuHeight = g_maxCUSize;
+    uint32_t lpelx = cu->m_cuPelX;
+    uint32_t tpely = cu->m_cuPelY;
+    if (plane)
+    {
+        picWidth  >>= m_hChromaShift;
+        picHeight >>= m_vChromaShift;
+        ctuWidth  >>= m_hChromaShift;
+        ctuHeight >>= m_vChromaShift;
+        lpelx     >>= m_hChromaShift;
+        tpely     >>= m_vChromaShift;
+    }
+    uint32_t rpelx = x265_min(lpelx + ctuWidth,  picWidth);
+    uint32_t bpely = x265_min(tpely + ctuHeight, picHeight);
+    ctuWidth  = rpelx - lpelx;
+    ctuHeight = bpely - tpely;
+
+    int startX;
+    int startY;
+    int endX;
+    int endY;
+
+    int skipB = plane ? 2 : 4;
+    int skipR = plane ? 3 : 5;
+
+    int8_t _upBuff1[MAX_CU_SIZE + 2], *upBuff1 = _upBuff1 + 1;
+    int8_t _upBufft[MAX_CU_SIZE + 2], *upBufft = _upBufft + 1;
+
+    // SAO_BO:
+    {
+        if (m_param->bSaoNonDeblocked)
+        {
+            skipB = plane ? 1 : 3;
+            skipR = plane ? 2 : 4;
+        }
+
+        endX = (rpelx == picWidth) ? ctuWidth : ctuWidth - skipR;
+        endY = (bpely == picHeight) ? ctuHeight : ctuHeight - skipB;
+
+        primitives.saoCuStatsBO(fenc0, rec0, stride, endX, endY, m_offsetOrg[plane][SAO_BO], m_count[plane][SAO_BO]);
+    }
+
+    {
+        // SAO_EO_0: // dir: -
+        {
+            if (m_param->bSaoNonDeblocked)
+            {
+                skipB = plane ? 1 : 3;
+                skipR = plane ? 3 : 5;
+            }
+
+            startX = !lpelx;
+            endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+
+            primitives.saoCuStatsE0(fenc0 + startX, rec0 + startX, stride, endX - startX, ctuHeight - skipB, m_offsetOrg[plane][SAO_EO_0], m_count[plane][SAO_EO_0]);
+        }
+
+        // SAO_EO_1: // dir: |
+        {
+            if (m_param->bSaoNonDeblocked)
+            {
+                skipB = plane ? 2 : 4;
+                skipR = plane ? 2 : 4;
+            }
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startY = !tpely;
+            endX   = (rpelx == picWidth) ? ctuWidth : ctuWidth - skipR;
+            endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            primitives.sign(upBuff1, rec, &rec[- stride], ctuWidth);
+
+            primitives.saoCuStatsE1(fenc0 + startY * stride, rec0 + startY * stride, stride, upBuff1, endX, endY - startY, m_offsetOrg[plane][SAO_EO_1], m_count[plane][SAO_EO_1]);
+        }
+
+        // SAO_EO_2: // dir: 135
+        {
+            if (m_param->bSaoNonDeblocked)
+            {
+                skipB = plane ? 2 : 4;
+                skipR = plane ? 3 : 5;
+            }
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = !lpelx;
+            endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+
+            startY = !tpely;
+            endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            primitives.sign(&upBuff1[startX], &rec[startX], &rec[startX - stride - 1], (endX - startX));
+
+            primitives.saoCuStatsE2(fenc0 + startX + startY * stride, rec0  + startX + startY * stride, stride, upBuff1 + startX, upBufft + startX, endX - startX, endY - startY, m_offsetOrg[plane][SAO_EO_2], m_count[plane][SAO_EO_2]);
+        }
+
+        // SAO_EO_3: // dir: 45
+        {
+            if (m_param->bSaoNonDeblocked)
+            {
+                skipB = plane ? 2 : 4;
+                skipR = plane ? 3 : 5;
+            }
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = !lpelx;
+            endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+
+            startY = !tpely;
+            endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            primitives.sign(&upBuff1[startX - 1], &rec[startX - 1], &rec[startX - 1 - stride + 1], (endX - startX + 1));
+
+            primitives.saoCuStatsE3(fenc0 + startX + startY * stride, rec0  + startX + startY * stride, stride, upBuff1 + startX, endX - startX, endY - startY, m_offsetOrg[plane][SAO_EO_3], m_count[plane][SAO_EO_3]);
+        }
+    }
+}
+
+void SAO::calcSaoStatsCu_BeforeDblk(Frame* frame, int idxX, int idxY)
+{
+    int addr = idxX + m_numCuInWidth * idxY;
+
+    int x, y;
+    const CUData* cu = frame->m_encData->getPicCTU(addr);
+    const PicYuv* reconPic = m_frame->m_reconPic;
+    const pixel* fenc;
+    const pixel* rec;
+    intptr_t stride = reconPic->m_stride;
+    uint32_t picWidth  = m_param->sourceWidth;
+    uint32_t picHeight = m_param->sourceHeight;
+    int ctuWidth  = g_maxCUSize;
+    int ctuHeight = g_maxCUSize;
+    uint32_t lpelx = cu->m_cuPelX;
+    uint32_t tpely = cu->m_cuPelY;
+    uint32_t rpelx = x265_min(lpelx + ctuWidth,  picWidth);
+    uint32_t bpely = x265_min(tpely + ctuHeight, picHeight);
+    ctuWidth  = rpelx - lpelx;
+    ctuHeight = bpely - tpely;
+
+    int startX;
+    int startY;
+    int endX;
+    int endY;
+    int firstX, firstY;
+    int32_t* stats;
+    int32_t* count;
+
+    int skipB, skipR;
+
+    int32_t _upBuff1[MAX_CU_SIZE + 2], *upBuff1 = _upBuff1 + 1;
+    int32_t _upBufft[MAX_CU_SIZE + 2], *upBufft = _upBufft + 1;
+
+    const int boShift = X265_DEPTH - SAO_BO_BITS;
+
+    memset(m_countPreDblk[addr], 0, sizeof(PerPlane));
+    memset(m_offsetOrgPreDblk[addr], 0, sizeof(PerPlane));
+
+    for (int plane = 0; plane < NUM_PLANE; plane++)
+    {
+        if (plane == 1)
+        {
+            stride = reconPic->m_strideC;
+            picWidth  >>= m_hChromaShift;
+            picHeight >>= m_vChromaShift;
+            ctuWidth  >>= m_hChromaShift;
+            ctuHeight >>= m_vChromaShift;
+            lpelx     >>= m_hChromaShift;
+            tpely     >>= m_vChromaShift;
+            rpelx     >>= m_hChromaShift;
+            bpely     >>= m_vChromaShift;
+        }
+
+        // SAO_BO:
+
+        skipB = plane ? 1 : 3;
+        skipR = plane ? 2 : 4;
+
+        stats = m_offsetOrgPreDblk[addr][plane][SAO_BO];
+        count = m_countPreDblk[addr][plane][SAO_BO];
+
+        const pixel* fenc0 = m_frame->m_fencPic->getPlaneAddr(plane, addr);
+        const pixel* rec0 = reconPic->getPlaneAddr(plane, addr);
+        fenc = fenc0;
+        rec  = rec0;
+
+        startX = (rpelx == picWidth) ? ctuWidth : ctuWidth - skipR;
+        startY = (bpely == picHeight) ? ctuHeight : ctuHeight - skipB;
+
+        for (y = 0; y < ctuHeight; y++)
+        {
+            for (x = (y < startY ? startX : 0); x < ctuWidth; x++)
+            {
+                int classIdx = 1 + (rec[x] >> boShift);
+                stats[classIdx] += (fenc[x] - rec[x]);
+                count[classIdx]++;
+            }
+
+            fenc += stride;
+            rec += stride;
+        }
+
+        // SAO_EO_0: // dir: -
+        {
+            skipB = plane ? 1 : 3;
+            skipR = plane ? 3 : 5;
+
+            stats = m_offsetOrgPreDblk[addr][plane][SAO_EO_0];
+            count = m_countPreDblk[addr][plane][SAO_EO_0];
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+            startY = (bpely == picHeight) ? ctuHeight : ctuHeight - skipB;
+            firstX = !lpelx;
+            // endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+            endX   = ctuWidth - 1;  // not refer right CTU
+
+            for (y = 0; y < ctuHeight; y++)
+            {
+                x = (y < startY ? startX : firstX);
+                int signLeft = signOf(rec[x] - rec[x - 1]);
+                for (; x < endX; x++)
+                {
+                    int signRight = signOf(rec[x] - rec[x + 1]);
+                    int edgeType = signRight + signLeft + 2;
+                    signLeft = -signRight;
+
+                    stats[s_eoTable[edgeType]] += (fenc[x] - rec[x]);
+                    count[s_eoTable[edgeType]]++;
+                }
+
+                fenc += stride;
+                rec += stride;
+            }
+        }
+
+        // SAO_EO_1: // dir: |
+        {
+            skipB = plane ? 2 : 4;
+            skipR = plane ? 2 : 4;
+
+            stats = m_offsetOrgPreDblk[addr][plane][SAO_EO_1];
+            count = m_countPreDblk[addr][plane][SAO_EO_1];
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = (rpelx == picWidth) ? ctuWidth : ctuWidth - skipR;
+            startY = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+            firstY = !tpely;
+            // endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+            endY   = ctuHeight - 1; // not refer below CTU
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            for (x = startX; x < ctuWidth; x++)
+                upBuff1[x] = signOf(rec[x] - rec[x - stride]);
+
+            for (y = firstY; y < endY; y++)
+            {
+                for (x = (y < startY - 1 ? startX : 0); x < ctuWidth; x++)
+                {
+                    int signDown = signOf(rec[x] - rec[x + stride]);
+                    int edgeType = signDown + upBuff1[x] + 2;
+                    upBuff1[x] = -signDown;
+
+                    if (x < startX && y < startY)
+                        continue;
+
+                    stats[s_eoTable[edgeType]] += (fenc[x] - rec[x]);
+                    count[s_eoTable[edgeType]]++;
+                }
+
+                fenc += stride;
+                rec += stride;
+            }
+        }
+
+        // SAO_EO_2: // dir: 135
+        {
+            skipB = plane ? 2 : 4;
+            skipR = plane ? 3 : 5;
+
+            stats = m_offsetOrgPreDblk[addr][plane][SAO_EO_2];
+            count = m_countPreDblk[addr][plane][SAO_EO_2];
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+            startY = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+            firstX = !lpelx;
+            firstY = !tpely;
+            // endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+            // endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+            endX   = ctuWidth - 1;  // not refer right CTU
+            endY   = ctuHeight - 1; // not refer below CTU
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            for (x = startX; x < endX; x++)
+                upBuff1[x] = signOf(rec[x] - rec[x - stride - 1]);
+
+            for (y = firstY; y < endY; y++)
+            {
+                x = (y < startY - 1 ? startX : firstX);
+                upBufft[x] = signOf(rec[x + stride] - rec[x - 1]);
+                for (; x < endX; x++)
+                {
+                    int signDown = signOf(rec[x] - rec[x + stride + 1]);
+                    int edgeType = signDown + upBuff1[x] + 2;
+                    upBufft[x + 1] = -signDown;
+
+                    if (x < startX && y < startY)
+                        continue;
+
+                    stats[s_eoTable[edgeType]] += (fenc[x] - rec[x]);
+                    count[s_eoTable[edgeType]]++;
+                }
+
+                std::swap(upBuff1, upBufft);
+
+                rec += stride;
+                fenc += stride;
+            }
+        }
+
+        // SAO_EO_3: // dir: 45
+        {
+            skipB = plane ? 2 : 4;
+            skipR = plane ? 3 : 5;
+
+            stats = m_offsetOrgPreDblk[addr][plane][SAO_EO_3];
+            count = m_countPreDblk[addr][plane][SAO_EO_3];
+
+            fenc = fenc0;
+            rec  = rec0;
+
+            startX = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth - skipR;
+            startY = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight - skipB;
+            firstX = !lpelx;
+            firstY = !tpely;
+            // endX   = (rpelx == picWidth) ? ctuWidth - 1 : ctuWidth;
+            // endY   = (bpely == picHeight) ? ctuHeight - 1 : ctuHeight;
+            endX   = ctuWidth - 1;  // not refer right CTU
+            endY   = ctuHeight - 1; // not refer below CTU
+            if (!tpely)
+            {
+                fenc += stride;
+                rec += stride;
+            }
+
+            for (x = startX - 1; x < endX; x++)
+                upBuff1[x] = signOf(rec[x] - rec[x - stride + 1]);
+
+            for (y = firstY; y < endY; y++)
+            {
+                for (x = (y < startY - 1 ? startX : firstX); x < endX; x++)
+                {
+                    int signDown = signOf(rec[x] - rec[x + stride - 1]);
+                    int edgeType = signDown + upBuff1[x] + 2;
+                    upBuff1[x - 1] = -signDown;
+
+                    if (x < startX && y < startY)
+                        continue;
+
+                    stats[s_eoTable[edgeType]] += (fenc[x] - rec[x]);
+                    count[s_eoTable[edgeType]]++;
+                }
+
+                upBuff1[endX - 1] = signOf(rec[endX - 1 + stride] - rec[endX]);
+
+                rec += stride;
+                fenc += stride;
+            }
+        }
+    }
+}
+
+/* reset offset statistics */
+void SAO::resetStats()
+{
+    memset(m_count, 0, sizeof(PerClass) * NUM_PLANE);
+    memset(m_offset, 0, sizeof(PerClass) * NUM_PLANE);
+    memset(m_offsetOrg, 0, sizeof(PerClass) * NUM_PLANE);
+}
+
+void SAO::rdoSaoUnitRowEnd(const SAOParam* saoParam, int numctus)
+{
+    if (!saoParam->bSaoFlag[0])
+        m_depthSaoRate[0][m_refDepth] = 1.0;
+    else
+        m_depthSaoRate[0][m_refDepth] = m_numNoSao[0] / ((double)numctus);
+
+    if (!saoParam->bSaoFlag[1])
+        m_depthSaoRate[1][m_refDepth] = 1.0;
+    else
+        m_depthSaoRate[1][m_refDepth] = m_numNoSao[1] / ((double)numctus);
+}
+
+void SAO::rdoSaoUnitRow(SAOParam* saoParam, int idxY)
+{
+    SaoCtuParam mergeSaoParam[NUM_MERGE_MODE][2];
+    double mergeDist[NUM_MERGE_MODE];
+    bool allowMerge[2]; // left, up
+    allowMerge[1] = (idxY > 0);
+
+    for (int idxX = 0; idxX < m_numCuInWidth; idxX++)
+    {
+        int addr     = idxX + idxY * m_numCuInWidth;
+        int addrUp   = idxY ? addr - m_numCuInWidth : -1;
+        int addrLeft = idxX ? addr - 1 : -1;
+        allowMerge[0] = (idxX > 0);
+
+        m_entropyCoder.load(m_rdContexts.cur);
+        if (allowMerge[0])
+            m_entropyCoder.codeSaoMerge(0);
+        if (allowMerge[1])
+            m_entropyCoder.codeSaoMerge(0);
+        m_entropyCoder.store(m_rdContexts.temp);
+        // reset stats Y, Cb, Cr
+        for (int plane = 0; plane < m_numPlanes; plane++)
+        {
+            for (int j = 0; j < MAX_NUM_SAO_TYPE; j++)
+            {
+                for (int k = 0; k < MAX_NUM_SAO_CLASS; k++)
+                {
+                    m_offset[plane][j][k] = 0;
+                    if (m_param->bSaoNonDeblocked)
+                    {
+                        m_count[plane][j][k] = m_countPreDblk[addr][plane][j][k];
+                        m_offsetOrg[plane][j][k] = m_offsetOrgPreDblk[addr][plane][j][k];
+                    }
+                    else
+                    {
+                        m_count[plane][j][k] = 0;
+                        m_offsetOrg[plane][j][k] = 0;
+                    }
+                }
+            }
+
+            saoParam->ctuParam[plane][addr].mergeMode = SAO_MERGE_NONE;
+            saoParam->ctuParam[plane][addr].typeIdx   = -1;
+            saoParam->ctuParam[plane][addr].bandPos   = 0;
+            if (saoParam->bSaoFlag[plane > 0])
+                calcSaoStatsCu(addr, plane);
+        }
+
+        saoComponentParamDist(saoParam, addr, addrUp, addrLeft, &mergeSaoParam[0][0], mergeDist);
+
+        if (m_numPlanes > 1) {
+            sao2ChromaParamDist(saoParam, addr, addrUp, addrLeft, mergeSaoParam, mergeDist);
+        }
+
+        if (saoParam->bSaoFlag[0] || saoParam->bSaoFlag[1])
+        {
+            // Cost of new SAO_params
+            m_entropyCoder.load(m_rdContexts.cur);
+            m_entropyCoder.resetBits();
+            if (allowMerge[0])
+                m_entropyCoder.codeSaoMerge(0);
+            if (allowMerge[1])
+                m_entropyCoder.codeSaoMerge(0);
+            for (int plane = 0; plane < m_numPlanes; plane++)
+            {
+                if (saoParam->bSaoFlag[plane > 0])
+                    m_entropyCoder.codeSaoOffset(saoParam->ctuParam[plane][addr], plane);
+            }
+
+            uint32_t rate = m_entropyCoder.getNumberOfWrittenBits();
+            double bestCost = mergeDist[0] + (double)rate;
+            m_entropyCoder.store(m_rdContexts.temp);
+
+            // Cost of Merge
+            for (int mergeIdx = 0; mergeIdx < 2; ++mergeIdx)
+            {
+                if (!allowMerge[mergeIdx])
+                    continue;
+
+                m_entropyCoder.load(m_rdContexts.cur);
+                m_entropyCoder.resetBits();
+                if (allowMerge[0])
+                    m_entropyCoder.codeSaoMerge(1 - mergeIdx);
+                if (allowMerge[1] && (mergeIdx == 1))
+                    m_entropyCoder.codeSaoMerge(1);
+
+                rate = m_entropyCoder.getNumberOfWrittenBits();
+                double mergeCost = mergeDist[mergeIdx + 1] + (double)rate;
+                if (mergeCost < bestCost)
+                {
+                    SaoMergeMode mergeMode = mergeIdx ? SAO_MERGE_UP : SAO_MERGE_LEFT;
+                    bestCost = mergeCost;
+                    m_entropyCoder.store(m_rdContexts.temp);
+                    for (int plane = 0; plane < m_numPlanes; plane++)
+                    {
+                        mergeSaoParam[plane][mergeIdx].mergeMode = mergeMode;
+                        if (saoParam->bSaoFlag[plane > 0])
+                            copySaoUnit(&saoParam->ctuParam[plane][addr], &mergeSaoParam[plane][mergeIdx]);
+                    }
+                }
+            }
+
+            if (saoParam->ctuParam[0][addr].typeIdx < 0)
+                m_numNoSao[0]++;
+            if (saoParam->ctuParam[1][addr].typeIdx < 0)
+                m_numNoSao[1]++;
+            m_entropyCoder.load(m_rdContexts.temp);
+            m_entropyCoder.store(m_rdContexts.cur);
+        }
+    }
+}
+
+/** rate distortion optimization of SAO unit */
+inline int64_t SAO::estSaoTypeDist(int plane, int typeIdx, double lambda, int32_t* currentDistortionTableBo, double* currentRdCostTableBo)
+{
+    int64_t estDist = 0;
+
+    for (int classIdx = 1; classIdx < ((typeIdx < SAO_BO) ?  SAO_EO_LEN + 1 : SAO_NUM_BO_CLASSES + 1); classIdx++)
+    {
+        int32_t  count = m_count[plane][typeIdx][classIdx];
+        int32_t& offsetOrg = m_offsetOrg[plane][typeIdx][classIdx];
+        int32_t& offsetOut = m_offset[plane][typeIdx][classIdx];
+
+        if (typeIdx == SAO_BO)
+        {
+            currentDistortionTableBo[classIdx - 1] = 0;
+            currentRdCostTableBo[classIdx - 1] = lambda;
+        }
+        if (count)
+        {
+            int offset = roundIBDI(offsetOrg << (X265_DEPTH - 8), count);
+            offset = x265_clip3(-OFFSET_THRESH + 1, OFFSET_THRESH - 1, offset);
+            if (typeIdx < SAO_BO)
+            {
+                if (classIdx < 3)
+                    offset = X265_MAX(offset, 0);
+                else
+                    offset = X265_MIN(offset, 0);
+            }
+            offsetOut = estIterOffset(typeIdx, classIdx, lambda, offset, count, offsetOrg, currentDistortionTableBo, currentRdCostTableBo);
+        }
+        else
+        {
+            offsetOrg = 0;
+            offsetOut = 0;
+        }
+        if (typeIdx != SAO_BO)
+            estDist += estSaoDist(count, (int)offsetOut << SAO_BIT_INC, offsetOrg);
+    }
+
+    return estDist;
+}
+
+inline int SAO::estIterOffset(int typeIdx, int classIdx, double lambda, int offset, int32_t count, int32_t offsetOrg, int32_t* currentDistortionTableBo, double* currentRdCostTableBo)
+{
+    int offsetOut = 0;
+
+    // Assuming sending quantized value 0 results in zero offset and sending the value zero needs 1 bit. entropy coder can be used to measure the exact rate here.
+    double tempMinCost = lambda;
+    while (offset != 0)
+    {
+        // Calculate the bits required for signalling the offset
+        int tempRate = (typeIdx == SAO_BO) ? (abs(offset) + 2) : (abs(offset) + 1);
+        if (abs(offset) == OFFSET_THRESH - 1)
+            tempRate--;
+
+        // Do the dequntization before distorion calculation
+        int tempOffset = offset << SAO_BIT_INC;
+        int64_t tempDist  = estSaoDist(count, tempOffset, offsetOrg);
+        double tempCost   = ((double)tempDist + lambda * (double)tempRate);
+        if (tempCost < tempMinCost)
+        {
+            tempMinCost = tempCost;
+            offsetOut = offset;
+            if (typeIdx == SAO_BO)
+            {
+                currentDistortionTableBo[classIdx - 1] = (int)tempDist;
+                currentRdCostTableBo[classIdx - 1] = tempCost;
+            }
+        }
+        offset = (offset > 0) ? (offset - 1) : (offset + 1);
+    }
+
+    return offsetOut;
+}
+
+void SAO::saoComponentParamDist(SAOParam* saoParam, int addr, int addrUp, int addrLeft, SaoCtuParam* mergeSaoParam, double* mergeDist)
+{
+    int64_t bestDist = 0;
+
+    SaoCtuParam* lclCtuParam = &saoParam->ctuParam[0][addr];
+
+    double bestRDCostTableBo = MAX_DOUBLE;
+    int    bestClassTableBo  = 0;
+    int    currentDistortionTableBo[MAX_NUM_SAO_CLASS];
+    double currentRdCostTableBo[MAX_NUM_SAO_CLASS];
+
+    resetSaoUnit(lclCtuParam);
+    m_entropyCoder.load(m_rdContexts.temp);
+    m_entropyCoder.resetBits();
+    m_entropyCoder.codeSaoOffset(*lclCtuParam, 0);
+    double dCostPartBest = m_entropyCoder.getNumberOfWrittenBits() * m_lumaLambda;
+
+    for (int typeIdx = 0; typeIdx < MAX_NUM_SAO_TYPE; typeIdx++)
+    {
+        int64_t estDist = estSaoTypeDist(0, typeIdx, m_lumaLambda, currentDistortionTableBo, currentRdCostTableBo);
+
+        if (typeIdx == SAO_BO)
+        {
+            // Estimate Best Position
+            for (int i = 0; i < SAO_NUM_BO_CLASSES - SAO_BO_LEN + 1; i++)
+            {
+                double currentRDCost = 0.0;
+                for (int j = i; j < i + SAO_BO_LEN; j++)
+                    currentRDCost += currentRdCostTableBo[j];
+
+                if (currentRDCost < bestRDCostTableBo)
+                {
+                    bestRDCostTableBo = currentRDCost;
+                    bestClassTableBo  = i;
+                }
+            }
+
+            // Re code all Offsets
+            // Code Center
+            estDist = 0;
+            for (int classIdx = bestClassTableBo; classIdx < bestClassTableBo + SAO_BO_LEN; classIdx++)
+                estDist += currentDistortionTableBo[classIdx];
+        }
+        SaoCtuParam  ctuParamRdo;
+        ctuParamRdo.mergeMode = SAO_MERGE_NONE;
+        ctuParamRdo.typeIdx = typeIdx;
+        ctuParamRdo.bandPos = (typeIdx == SAO_BO) ? bestClassTableBo : 0;
+        for (int classIdx = 0; classIdx < SAO_NUM_OFFSET; classIdx++)
+            ctuParamRdo.offset[classIdx] = (int)m_offset[0][typeIdx][classIdx + ctuParamRdo.bandPos + 1];
+
+        m_entropyCoder.load(m_rdContexts.temp);
+        m_entropyCoder.resetBits();
+        m_entropyCoder.codeSaoOffset(ctuParamRdo, 0);
+
+        uint32_t estRate = m_entropyCoder.getNumberOfWrittenBits();
+        double cost = (double)estDist + m_lumaLambda * (double)estRate;
+
+        if (cost < dCostPartBest)
+        {
+            dCostPartBest = cost;
+            copySaoUnit(lclCtuParam, &ctuParamRdo);
+            bestDist = estDist;
+        }
+    }
+
+    mergeDist[0] = ((double)bestDist / m_lumaLambda);
+    m_entropyCoder.load(m_rdContexts.temp);
+    m_entropyCoder.codeSaoOffset(*lclCtuParam, 0);
+    m_entropyCoder.store(m_rdContexts.temp);
+
+    // merge left or merge up
+
+    for (int mergeIdx = 0; mergeIdx < 2; mergeIdx++)
+    {
+        SaoCtuParam* mergeSrcParam = NULL;
+        if (addrLeft >= 0 && mergeIdx == 0)
+            mergeSrcParam = &(saoParam->ctuParam[0][addrLeft]);
+        else if (addrUp >= 0 && mergeIdx == 1)
+            mergeSrcParam = &(saoParam->ctuParam[0][addrUp]);
+        if (mergeSrcParam)
+        {
+            int64_t estDist = 0;
+            int typeIdx = mergeSrcParam->typeIdx;
+            if (typeIdx >= 0)
+            {
+                int bandPos = (typeIdx == SAO_BO) ? mergeSrcParam->bandPos : 0;
+                for (int classIdx = 0; classIdx < SAO_NUM_OFFSET; classIdx++)
+                {
+                    int mergeOffset = mergeSrcParam->offset[classIdx];
+                    estDist += estSaoDist(m_count[0][typeIdx][classIdx + bandPos + 1], mergeOffset, m_offsetOrg[0][typeIdx][classIdx + bandPos + 1]);
+                }
+            }
+
+            copySaoUnit(&mergeSaoParam[mergeIdx], mergeSrcParam);
+            mergeSaoParam[mergeIdx].mergeMode = mergeIdx ? SAO_MERGE_UP : SAO_MERGE_LEFT;
+
+            mergeDist[mergeIdx + 1] = ((double)estDist / m_lumaLambda);
+        }
+        else
+            resetSaoUnit(&mergeSaoParam[mergeIdx]);
+    }
+}
+
+void SAO::sao2ChromaParamDist(SAOParam* saoParam, int addr, int addrUp, int addrLeft, SaoCtuParam mergeSaoParam[][2], double* mergeDist)
+{
+    int64_t bestDist = 0;
+
+    SaoCtuParam* lclCtuParam[2] = { &saoParam->ctuParam[1][addr], &saoParam->ctuParam[2][addr] };
+
+    double currentRdCostTableBo[MAX_NUM_SAO_CLASS];
+    int    bestClassTableBo[2] = { 0, 0 };
+    int    currentDistortionTableBo[MAX_NUM_SAO_CLASS];
+
+    resetSaoUnit(lclCtuParam[0]);
+    resetSaoUnit(lclCtuParam[1]);
+    m_entropyCoder.load(m_rdContexts.temp);
+    m_entropyCoder.resetBits();
+    m_entropyCoder.codeSaoOffset(*lclCtuParam[0], 1);
+    m_entropyCoder.codeSaoOffset(*lclCtuParam[1], 2);
+
+    double costPartBest = m_entropyCoder.getNumberOfWrittenBits() * m_chromaLambda;
+
+    for (int typeIdx = 0; typeIdx < MAX_NUM_SAO_TYPE; typeIdx++)
+    {
+        int64_t estDist[2];
+        if (typeIdx == SAO_BO)
+        {
+            // Estimate Best Position
+            for (int compIdx = 0; compIdx < 2; compIdx++)
+            {
+                double bestRDCostTableBo = MAX_DOUBLE;
+                estDist[compIdx] = estSaoTypeDist(compIdx + 1, typeIdx, m_chromaLambda, currentDistortionTableBo, currentRdCostTableBo);
+                for (int i = 0; i < SAO_NUM_BO_CLASSES - SAO_BO_LEN + 1; i++)
+                {
+                    double currentRDCost = 0.0;
+                    for (int j = i; j < i + SAO_BO_LEN; j++)
+                        currentRDCost += currentRdCostTableBo[j];
+
+                    if (currentRDCost < bestRDCostTableBo)
+                    {
+                        bestRDCostTableBo = currentRDCost;
+                        bestClassTableBo[compIdx]  = i;
+                    }
+                }
+
+                // Re code all Offsets
+                // Code Center
+                estDist[compIdx] = 0;
+                for (int classIdx = bestClassTableBo[compIdx]; classIdx < bestClassTableBo[compIdx] + SAO_BO_LEN; classIdx++)
+                    estDist[compIdx] += currentDistortionTableBo[classIdx];
+            }
+        }
+        else
+        {
+            estDist[0] = estSaoTypeDist(1, typeIdx, m_chromaLambda, currentDistortionTableBo, currentRdCostTableBo);
+            estDist[1] = estSaoTypeDist(2, typeIdx, m_chromaLambda, currentDistortionTableBo, currentRdCostTableBo);
+        }
+
+        m_entropyCoder.load(m_rdContexts.temp);
+        m_entropyCoder.resetBits();
+
+        SaoCtuParam  ctuParamRdo[2];
+        for (int compIdx = 0; compIdx < 2; compIdx++)
+        {
+            ctuParamRdo[compIdx].mergeMode = SAO_MERGE_NONE;
+            ctuParamRdo[compIdx].typeIdx = typeIdx;
+            ctuParamRdo[compIdx].bandPos = (typeIdx == SAO_BO) ? bestClassTableBo[compIdx] : 0;
+            for (int classIdx = 0; classIdx < SAO_NUM_OFFSET; classIdx++)
+                ctuParamRdo[compIdx].offset[classIdx] = (int)m_offset[compIdx + 1][typeIdx][classIdx + ctuParamRdo[compIdx].bandPos + 1];
+
+            m_entropyCoder.codeSaoOffset(ctuParamRdo[compIdx], compIdx + 1);
+        }
+
+        uint32_t estRate = m_entropyCoder.getNumberOfWrittenBits();
+        double cost = (double)(estDist[0] + estDist[1]) + m_chromaLambda * (double)estRate;
+
+        if (cost < costPartBest)
+        {
+            costPartBest = cost;
+            copySaoUnit(lclCtuParam[0], &ctuParamRdo[0]);
+            copySaoUnit(lclCtuParam[1], &ctuParamRdo[1]);
+            bestDist = (estDist[0] + estDist[1]);
+        }
+    }
+
+    mergeDist[0] += ((double)bestDist / m_chromaLambda);
+    m_entropyCoder.load(m_rdContexts.temp);
+    m_entropyCoder.codeSaoOffset(*lclCtuParam[0], 1);
+    m_entropyCoder.codeSaoOffset(*lclCtuParam[1], 2);
+    m_entropyCoder.store(m_rdContexts.temp);
+
+    // merge left or merge up
+
+    for (int mergeIdx = 0; mergeIdx < 2; mergeIdx++)
+    {
+        for (int compIdx = 0; compIdx < 2; compIdx++)
+        {
+            int plane = compIdx + 1;
+            SaoCtuParam* mergeSrcParam = NULL;
+            if (addrLeft >= 0 && mergeIdx == 0)
+                mergeSrcParam = &(saoParam->ctuParam[plane][addrLeft]);
+            else if (addrUp >= 0 && mergeIdx == 1)
+                mergeSrcParam = &(saoParam->ctuParam[plane][addrUp]);
+            if (mergeSrcParam)
+            {
+                int64_t estDist = 0;
+                int typeIdx = mergeSrcParam->typeIdx;
+                if (typeIdx >= 0)
+                {
+                    int bandPos = (typeIdx == SAO_BO) ? mergeSrcParam->bandPos : 0;
+                    for (int classIdx = 0; classIdx < SAO_NUM_OFFSET; classIdx++)
+                    {
+                        int mergeOffset = mergeSrcParam->offset[classIdx];
+                        estDist += estSaoDist(m_count[plane][typeIdx][classIdx + bandPos + 1], mergeOffset, m_offsetOrg[plane][typeIdx][classIdx + bandPos + 1]);
+                    }
+                }
+
+                copySaoUnit(&mergeSaoParam[plane][mergeIdx], mergeSrcParam);
+                mergeSaoParam[plane][mergeIdx].mergeMode = mergeIdx ? SAO_MERGE_UP : SAO_MERGE_LEFT;
+                mergeDist[mergeIdx + 1] += ((double)estDist / m_chromaLambda);
+            }
+            else
+                resetSaoUnit(&mergeSaoParam[plane][mergeIdx]);
+        }
+    }
+}
+
+// NOTE: must put in namespace X265_NS since we need class SAO
+void saoCuStatsBO_c(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count)
+{
+    int x, y;
+    const int boShift = X265_DEPTH - SAO_BO_BITS;
+
+    for (y = 0; y < endY; y++)
+    {
+        for (x = 0; x < endX; x++)
+        {
+            int classIdx = 1 + (rec[x] >> boShift);
+            stats[classIdx] += (fenc[x] - rec[x]);
+            count[classIdx]++;
+        }
+
+        fenc += stride;
+        rec += stride;
+    }
+}
+
+void saoCuStatsE0_c(const pixel *fenc, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count)
+{
+    int x, y;
+    int32_t tmp_stats[SAO::NUM_EDGETYPE];
+    int32_t tmp_count[SAO::NUM_EDGETYPE];
+
+    memset(tmp_stats, 0, sizeof(tmp_stats));
+    memset(tmp_count, 0, sizeof(tmp_count));
+
+    for (y = 0; y < endY; y++)
+    {
+        int signLeft = signOf(rec[0] - rec[-1]);
+        for (x = 0; x < endX; x++)
+        {
+            int signRight = signOf2(rec[x], rec[x + 1]);
+            X265_CHECK(signRight == signOf(rec[x] - rec[x + 1]), "signDown check failure\n");
+            uint32_t edgeType = signRight + signLeft + 2;
+            signLeft = -signRight;
+
+            X265_CHECK(edgeType <= 4, "edgeType check failure\n");
+            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+            tmp_count[edgeType]++;
+        }
+
+        fenc += stride;
+        rec += stride;
+    }
+
+    for (x = 0; x < SAO::NUM_EDGETYPE; x++)
+    {
+        stats[SAO::s_eoTable[x]] += tmp_stats[x];
+        count[SAO::s_eoTable[x]] += tmp_count[x];
+    }
+}
+
+void saoCuStatsE1_c(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count)
+{
+    X265_CHECK(endX <= MAX_CU_SIZE, "endX check failure\n");
+    X265_CHECK(endY <= MAX_CU_SIZE, "endY check failure\n");
+
+    int x, y;
+    int32_t tmp_stats[SAO::NUM_EDGETYPE];
+    int32_t tmp_count[SAO::NUM_EDGETYPE];
+
+    memset(tmp_stats, 0, sizeof(tmp_stats));
+    memset(tmp_count, 0, sizeof(tmp_count));
+
+    for (y = 0; y < endY; y++)
+    {
+        for (x = 0; x < endX; x++)
+        {
+            int signDown = signOf2(rec[x], rec[x + stride]);
+            X265_CHECK(signDown == signOf(rec[x] - rec[x + stride]), "signDown check failure\n");
+            uint32_t edgeType = signDown + upBuff1[x] + 2;
+            upBuff1[x] = (int8_t)(-signDown);
+
+            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+            tmp_count[edgeType]++;
+        }
+        fenc += stride;
+        rec += stride;
+    }
+
+    for (x = 0; x < SAO::NUM_EDGETYPE; x++)
+    {
+        stats[SAO::s_eoTable[x]] += tmp_stats[x];
+        count[SAO::s_eoTable[x]] += tmp_count[x];
+    }
+}
+
+void saoCuStatsE2_c(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int8_t *upBufft, int endX, int endY, int32_t *stats, int32_t *count)
+{
+    X265_CHECK(endX < MAX_CU_SIZE, "endX check failure\n");
+    X265_CHECK(endY < MAX_CU_SIZE, "endY check failure\n");
+
+    int x, y;
+    int32_t tmp_stats[SAO::NUM_EDGETYPE];
+    int32_t tmp_count[SAO::NUM_EDGETYPE];
+
+    memset(tmp_stats, 0, sizeof(tmp_stats));
+    memset(tmp_count, 0, sizeof(tmp_count));
+
+    for (y = 0; y < endY; y++)
+    {
+        upBufft[0] = signOf(rec[stride] - rec[-1]);
+        for (x = 0; x < endX; x++)
+        {
+            int signDown = signOf2(rec[x], rec[x + stride + 1]);
+            X265_CHECK(signDown == signOf(rec[x] - rec[x + stride + 1]), "signDown check failure\n");
+            uint32_t edgeType = signDown + upBuff1[x] + 2;
+            upBufft[x + 1] = (int8_t)(-signDown);
+            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+            tmp_count[edgeType]++;
+        }
+
+        std::swap(upBuff1, upBufft);
+
+        rec += stride;
+        fenc += stride;
+    }
+
+    for (x = 0; x < SAO::NUM_EDGETYPE; x++)
+    {
+        stats[SAO::s_eoTable[x]] += tmp_stats[x];
+        count[SAO::s_eoTable[x]] += tmp_count[x];
+    }
+}
+
+void saoCuStatsE3_c(const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count)
+{
+    X265_CHECK(endX < MAX_CU_SIZE, "endX check failure\n");
+    X265_CHECK(endY < MAX_CU_SIZE, "endY check failure\n");
+
+    int x, y;
+    int32_t tmp_stats[SAO::NUM_EDGETYPE];
+    int32_t tmp_count[SAO::NUM_EDGETYPE];
+
+    memset(tmp_stats, 0, sizeof(tmp_stats));
+    memset(tmp_count, 0, sizeof(tmp_count));
+
+    for (y = 0; y < endY; y++)
+    {
+        for (x = 0; x < endX; x++)
+        {
+            int signDown = signOf2(rec[x], rec[x + stride - 1]);
+            X265_CHECK(signDown == signOf(rec[x] - rec[x + stride - 1]), "signDown check failure\n");
+            X265_CHECK(abs(upBuff1[x]) <= 1, "upBuffer1 check failure\n");
+
+            uint32_t edgeType = signDown + upBuff1[x] + 2;
+            upBuff1[x - 1] = (int8_t)(-signDown);
+            tmp_stats[edgeType] += (fenc[x] - rec[x]);
+            tmp_count[edgeType]++;
+        }
+
+        upBuff1[endX - 1] = signOf(rec[endX - 1 + stride] - rec[endX]);
+
+        rec += stride;
+        fenc += stride;
+    }
+
+    for (x = 0; x < SAO::NUM_EDGETYPE; x++)
+    {
+        stats[SAO::s_eoTable[x]] += tmp_stats[x];
+        count[SAO::s_eoTable[x]] += tmp_count[x];
+    }
+}
+
+void setupSaoPrimitives_c(EncoderPrimitives &p)
+{
+    // TODO: move other sao functions to here
+    p.saoCuStatsBO = saoCuStatsBO_c;
+    p.saoCuStatsE0 = saoCuStatsE0_c;
+    p.saoCuStatsE1 = saoCuStatsE1_c;
+    p.saoCuStatsE2 = saoCuStatsE2_c;
+    p.saoCuStatsE3 = saoCuStatsE3_c;
+}
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/sao.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,154 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Min Chen <chenm003@163.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_SAO_H
+#define X265_SAO_H
+
+#include "common.h"
+#include "frame.h"
+#include "entropy.h"
+
+namespace X265_NS {
+// private namespace
+
+enum SAOTypeLen
+{
+    SAO_EO_LEN = 4,
+    SAO_BO_LEN = 4,
+    SAO_NUM_BO_CLASSES = 32
+};
+
+enum SAOType
+{
+    SAO_EO_0 = 0,
+    SAO_EO_1,
+    SAO_EO_2,
+    SAO_EO_3,
+    SAO_BO,
+    MAX_NUM_SAO_TYPE
+};
+
+class SAO
+{
+public:
+
+    enum { SAO_MAX_DEPTH = 4 };
+    enum { SAO_BO_BITS  = 5 };
+    enum { MAX_NUM_SAO_CLASS = 33 };
+    enum { SAO_BIT_INC = 0 }; /* in HM12.0, it wrote as X265_MAX(X265_DEPTH - 10, 0) */
+    enum { OFFSET_THRESH = 1 << X265_MIN(X265_DEPTH - 5, 5) };
+    enum { NUM_EDGETYPE = 5 };
+    enum { NUM_PLANE = 3 };
+    enum { NUM_MERGE_MODE = 3 };
+
+    static const uint32_t s_eoTable[NUM_EDGETYPE];
+
+    typedef int32_t (PerClass[MAX_NUM_SAO_TYPE][MAX_NUM_SAO_CLASS]);
+    typedef int32_t (PerPlane[NUM_PLANE][MAX_NUM_SAO_TYPE][MAX_NUM_SAO_CLASS]);
+
+protected:
+
+    /* allocated per part */
+    PerClass*   m_count;
+    PerClass*   m_offset;
+    PerClass*   m_offsetOrg;
+
+    /* allocated per CTU */
+    PerPlane*   m_countPreDblk;
+    PerPlane*   m_offsetOrgPreDblk;
+
+    double      m_depthSaoRate[2][4];
+    int8_t      m_offsetBo[SAO_NUM_BO_CLASSES];
+    int8_t      m_offsetEo[NUM_EDGETYPE];
+
+    int         m_numCuInWidth;
+    int         m_numCuInHeight;
+    int         m_numPlanes;
+    int         m_hChromaShift;
+    int         m_vChromaShift;
+
+    pixel*      m_clipTable;
+    pixel*      m_clipTableBase;
+
+    pixel*      m_tmpU1[3];
+    pixel*      m_tmpU2[3];
+    pixel*      m_tmpL1;
+    pixel*      m_tmpL2;
+
+public:
+
+    struct SAOContexts
+    {
+        Entropy cur;
+        Entropy next;
+        Entropy temp;
+    };
+
+    Frame*      m_frame;
+    Entropy     m_entropyCoder;
+    SAOContexts m_rdContexts;
+
+    x265_param* m_param;
+    int         m_refDepth;
+    int         m_numNoSao[2];
+
+    double      m_lumaLambda;
+    double      m_chromaLambda;
+    /* TODO: No doubles for distortion */
+
+    SAO();
+
+    bool create(x265_param* param);
+    void destroy();
+
+    void allocSaoParam(SAOParam* saoParam) const;
+
+    void startSlice(Frame* pic, Entropy& initState, int qp);
+    void resetStats();
+    void resetSaoUnit(SaoCtuParam* saoUnit);
+
+    // CTU-based SAO process without slice granularity
+    void processSaoCu(int addr, int typeIdx, int plane);
+    void processSaoUnitRow(SaoCtuParam* ctuParam, int idxY, int plane);
+
+    void copySaoUnit(SaoCtuParam* saoUnitDst, const SaoCtuParam* saoUnitSrc);
+
+    void calcSaoStatsCu(int addr, int plane);
+    void calcSaoStatsCu_BeforeDblk(Frame* pic, int idxX, int idxY);
+
+    void saoComponentParamDist(SAOParam* saoParam, int addr, int addrUp, int addrLeft, SaoCtuParam mergeSaoParam[2], double* mergeDist);
+    void sao2ChromaParamDist(SAOParam* saoParam, int addr, int addrUp, int addrLeft, SaoCtuParam mergeSaoParam[][2], double* mergeDist);
+
+    inline int estIterOffset(int typeIdx, int classIdx, double lambda, int offset, int32_t count, int32_t offsetOrg,
+                             int32_t* currentDistortionTableBo, double* currentRdCostTableBo);
+    inline int64_t estSaoTypeDist(int plane, int typeIdx, double lambda, int32_t* currentDistortionTableBo, double* currentRdCostTableBo);
+
+    void rdoSaoUnitRowEnd(const SAOParam* saoParam, int numctus);
+    void rdoSaoUnitRow(SAOParam* saoParam, int idxY);
+};
+
+}
+
+#endif // ifndef X265_SAO_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/search.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,3557 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "picyuv.h"
+#include "cudata.h"
+
+#include "search.h"
+#include "entropy.h"
+#include "rdcost.h"
+
+#include "analysis.h"  // TLD
+#include "framedata.h"
+
+using namespace X265_NS;
+
+#if _MSC_VER
+#pragma warning(disable: 4800) // 'uint8_t' : forcing value to bool 'true' or 'false' (performance warning)
+#pragma warning(disable: 4244) // '=' : conversion from 'int' to 'uint8_t', possible loss of data)
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+#define MVP_IDX_BITS 1
+
+ALIGN_VAR_32(const int16_t, Search::zeroShort[MAX_CU_SIZE]) = { 0 };
+
+Search::Search()
+{
+    memset(m_rqt, 0, sizeof(m_rqt));
+
+    for (int i = 0; i < 3; i++)
+    {
+        m_qtTempTransformSkipFlag[i] = NULL;
+        m_qtTempCbf[i] = NULL;
+    }
+
+    m_numLayers = 0;
+    m_intraPred = NULL;
+    m_intraPredAngs = NULL;
+    m_fencScaled = NULL;
+    m_fencTransposed = NULL;
+    m_tsCoeff = NULL;
+    m_tsResidual = NULL;
+    m_tsRecon = NULL;
+    m_param = NULL;
+    m_slice = NULL;
+    m_frame = NULL;
+}
+
+bool Search::initSearch(const x265_param& param, ScalingList& scalingList)
+{
+    uint32_t maxLog2CUSize = g_log2Size[param.maxCUSize];
+    m_param = &param;
+    m_bEnableRDOQ = !!param.rdoqLevel;
+    m_bFrameParallel = param.frameNumThreads > 1;
+    m_numLayers = g_log2Size[param.maxCUSize] - 2;
+
+    m_rdCost.setPsyRdScale(param.psyRd);
+    m_me.init(param.searchMethod, param.subpelRefine, param.internalCsp);
+
+    bool ok = m_quant.init(param.rdoqLevel, param.psyRdoq, scalingList, m_entropyCoder);
+    if (m_param->noiseReductionIntra || m_param->noiseReductionInter || m_param->rc.vbvBufferSize)
+        ok &= m_quant.allocNoiseReduction(param);
+
+    ok &= Predict::allocBuffers(param.internalCsp); /* sets m_hChromaShift & m_vChromaShift */
+
+    /* When frame parallelism is active, only 'refLagPixels' of reference frames will be guaranteed
+     * available for motion reference.  See refLagRows in FrameEncoder::compressCTURows() */
+    m_refLagPixels = m_bFrameParallel ? param.searchRange : param.sourceHeight;
+
+    uint32_t sizeL = 1 << (maxLog2CUSize * 2);
+    uint32_t sizeC = sizeL >> (m_hChromaShift + m_vChromaShift);
+    uint32_t numPartitions = 1 << (maxLog2CUSize - LOG2_UNIT_SIZE) * 2;
+
+    /* these are indexed by qtLayer (log2size - 2) so nominally 0=4x4, 1=8x8, 2=16x16, 3=32x32
+     * the coeffRQT and reconQtYuv are allocated to the max CU size at every depth. The parts
+     * which are reconstructed at each depth are valid. At the end, the transform depth table
+     * is walked and the coeff and recon at the correct depths are collected */
+    for (uint32_t i = 0; i <= m_numLayers; i++)
+    {
+        CHECKED_MALLOC(m_rqt[i].coeffRQT[0], coeff_t, sizeL + sizeC * 2);
+        m_rqt[i].coeffRQT[1] = m_rqt[i].coeffRQT[0] + sizeL;
+        m_rqt[i].coeffRQT[2] = m_rqt[i].coeffRQT[0] + sizeL + sizeC;
+        ok &= m_rqt[i].reconQtYuv.create(g_maxCUSize, param.internalCsp);
+        ok &= m_rqt[i].resiQtYuv.create(g_maxCUSize, param.internalCsp);
+    }
+
+    /* the rest of these buffers are indexed per-depth */
+    for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+    {
+        int cuSize = g_maxCUSize >> i;
+        ok &= m_rqt[i].tmpResiYuv.create(cuSize, param.internalCsp);
+        ok &= m_rqt[i].tmpPredYuv.create(cuSize, param.internalCsp);
+        ok &= m_rqt[i].bidirPredYuv[0].create(cuSize, param.internalCsp);
+        ok &= m_rqt[i].bidirPredYuv[1].create(cuSize, param.internalCsp);
+    }
+
+    CHECKED_MALLOC(m_qtTempCbf[0], uint8_t, numPartitions * 3);
+    m_qtTempCbf[1] = m_qtTempCbf[0] + numPartitions;
+    m_qtTempCbf[2] = m_qtTempCbf[0] + numPartitions * 2;
+    CHECKED_MALLOC(m_qtTempTransformSkipFlag[0], uint8_t, numPartitions * 3);
+    m_qtTempTransformSkipFlag[1] = m_qtTempTransformSkipFlag[0] + numPartitions;
+    m_qtTempTransformSkipFlag[2] = m_qtTempTransformSkipFlag[0] + numPartitions * 2;
+
+    CHECKED_MALLOC(m_intraPred, pixel, (32 * 32) * (33 + 3));
+    m_fencScaled = m_intraPred + 32 * 32;
+    m_fencTransposed = m_fencScaled + 32 * 32;
+    m_intraPredAngs = m_fencTransposed + 32 * 32;
+
+    CHECKED_MALLOC(m_tsCoeff,    coeff_t, MAX_TS_SIZE * MAX_TS_SIZE);
+    CHECKED_MALLOC(m_tsResidual, int16_t, MAX_TS_SIZE * MAX_TS_SIZE);
+    CHECKED_MALLOC(m_tsRecon,    pixel,   MAX_TS_SIZE * MAX_TS_SIZE);
+
+    return ok;
+
+fail:
+    return false;
+}
+
+Search::~Search()
+{
+    for (uint32_t i = 0; i <= m_numLayers; i++)
+    {
+        X265_FREE(m_rqt[i].coeffRQT[0]);
+        m_rqt[i].reconQtYuv.destroy();
+        m_rqt[i].resiQtYuv.destroy();
+    }
+
+    for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+    {
+        m_rqt[i].tmpResiYuv.destroy();
+        m_rqt[i].tmpPredYuv.destroy();
+        m_rqt[i].bidirPredYuv[0].destroy();
+        m_rqt[i].bidirPredYuv[1].destroy();
+    }
+
+    X265_FREE(m_qtTempCbf[0]);
+    X265_FREE(m_qtTempTransformSkipFlag[0]);
+    X265_FREE(m_intraPred);
+    X265_FREE(m_tsCoeff);
+    X265_FREE(m_tsResidual);
+    X265_FREE(m_tsRecon);
+}
+
+int Search::setLambdaFromQP(const CUData& ctu, int qp)
+{
+    X265_CHECK(qp >= QP_MIN && qp <= QP_MAX_MAX, "QP used for lambda is out of range\n");
+
+    m_me.setQP(qp);
+    m_rdCost.setQP(*m_slice, qp);
+
+    int quantQP = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+    m_quant.setQPforQuant(ctu, quantQP);
+    return quantQP;
+}
+
+#if CHECKED_BUILD || _DEBUG
+void Search::invalidateContexts(int fromDepth)
+{
+    /* catch reads without previous writes */
+    for (int d = fromDepth; d < NUM_FULL_DEPTH; d++)
+    {
+        m_rqt[d].cur.markInvalid();
+        m_rqt[d].rqtTemp.markInvalid();
+        m_rqt[d].rqtRoot.markInvalid();
+        m_rqt[d].rqtTest.markInvalid();
+    }
+}
+#else
+void Search::invalidateContexts(int) {}
+#endif
+
+void Search::codeSubdivCbfQTChroma(const CUData& cu, uint32_t tuDepth, uint32_t absPartIdx)
+{
+    uint32_t subdiv     = tuDepth < cu.m_tuDepth[absPartIdx];
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (!(log2TrSize - m_hChromaShift < 2))
+    {
+        if (!tuDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_U, tuDepth - 1))
+            m_entropyCoder.codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_U, tuDepth, !subdiv);
+        if (!tuDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_V, tuDepth - 1))
+            m_entropyCoder.codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_V, tuDepth, !subdiv);
+    }
+
+    if (subdiv)
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            codeSubdivCbfQTChroma(cu, tuDepth + 1, absPartIdx);
+    }
+}
+
+void Search::codeCoeffQTChroma(const CUData& cu, uint32_t tuDepth, uint32_t absPartIdx, TextType ttype)
+{
+    if (!cu.getCbf(absPartIdx, ttype, tuDepth))
+        return;
+
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (tuDepth < cu.m_tuDepth[absPartIdx])
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            codeCoeffQTChroma(cu, tuDepth + 1, absPartIdx, ttype);
+
+        return;
+    }
+
+    uint32_t tuDepthC = tuDepth;
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+
+    if (log2TrSizeC < 2)
+    {
+        X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+        if (absPartIdx & 3)
+            return;
+        log2TrSizeC = 2;
+        tuDepthC--;
+    }
+
+    uint32_t qtLayer = log2TrSize - 2;
+
+    if (m_csp != X265_CSP_I422)
+    {
+        uint32_t shift = (m_csp == X265_CSP_I420) ? 2 : 0;
+        uint32_t coeffOffset = absPartIdx << (LOG2_UNIT_SIZE * 2 - shift);
+        coeff_t* coeff = m_rqt[qtLayer].coeffRQT[ttype] + coeffOffset;
+        m_entropyCoder.codeCoeffNxN(cu, coeff, absPartIdx, log2TrSizeC, ttype);
+    }
+    else
+    {
+        uint32_t coeffOffset = absPartIdx << (LOG2_UNIT_SIZE * 2 - 1);
+        coeff_t* coeff = m_rqt[qtLayer].coeffRQT[ttype] + coeffOffset;
+        uint32_t subTUSize = 1 << (log2TrSizeC * 2);
+        uint32_t tuNumParts = 2 << ((log2TrSizeC - LOG2_UNIT_SIZE) * 2);
+        if (cu.getCbf(absPartIdx, ttype, tuDepth + 1))
+            m_entropyCoder.codeCoeffNxN(cu, coeff, absPartIdx, log2TrSizeC, ttype);
+        if (cu.getCbf(absPartIdx + tuNumParts, ttype, tuDepth + 1))
+            m_entropyCoder.codeCoeffNxN(cu, coeff + subTUSize, absPartIdx + tuNumParts, log2TrSizeC, ttype);
+    }
+}
+
+void Search::codeIntraLumaQT(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, bool bAllowSplit, Cost& outCost, const uint32_t depthRange[2])
+{
+    CUData& cu = mode.cu;
+    uint32_t fullDepth  = cuGeom.depth + tuDepth;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+    uint32_t qtLayer    = log2TrSize - 2;
+    uint32_t sizeIdx    = log2TrSize - 2;
+    bool mightNotSplit  = log2TrSize <= depthRange[1];
+    bool mightSplit     = (log2TrSize > depthRange[0]) && (bAllowSplit || !mightNotSplit);
+
+    /* If maximum RD penalty, force spits at TU size 32x32 if SPS allows TUs of 16x16 */
+    if (m_param->rdPenalty == 2 && m_slice->m_sliceType != I_SLICE && log2TrSize == 5 && depthRange[0] <= 4)
+    {
+        mightNotSplit = false;
+        mightSplit = true;
+    }
+
+    Cost fullCost;
+    uint32_t bCBF = 0;
+
+    pixel*   reconQt = m_rqt[qtLayer].reconQtYuv.getLumaAddr(absPartIdx);
+    uint32_t reconQtStride = m_rqt[qtLayer].reconQtYuv.m_size;
+
+    if (mightNotSplit)
+    {
+        if (mightSplit)
+            m_entropyCoder.store(m_rqt[fullDepth].rqtRoot);
+
+        const pixel* fenc = mode.fencYuv->getLumaAddr(absPartIdx);
+        pixel*   pred     = mode.predYuv.getLumaAddr(absPartIdx);
+        int16_t* residual = m_rqt[cuGeom.depth].tmpResiYuv.getLumaAddr(absPartIdx);
+        uint32_t stride   = mode.fencYuv->m_size;
+
+        // init availability pattern
+        uint32_t lumaPredMode = cu.m_lumaIntraDir[absPartIdx];
+        IntraNeighbors intraNeighbors;
+        initIntraNeighbors(cu, absPartIdx, tuDepth, true, &intraNeighbors);
+        initAdiPattern(cu, cuGeom, absPartIdx, intraNeighbors, lumaPredMode);
+
+        // get prediction signal
+        predIntraLumaAng(lumaPredMode, pred, stride, log2TrSize);
+
+        cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, fullDepth);
+        cu.setTUDepthSubParts(tuDepth, absPartIdx, fullDepth);
+
+        uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        coeff_t* coeffY       = m_rqt[qtLayer].coeffRQT[0] + coeffOffsetY;
+
+        // store original entropy coding status
+        if (m_bEnableRDOQ)
+            m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSize, true);
+
+        primitives.cu[sizeIdx].calcresidual(fenc, pred, residual, stride);
+
+        uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeffY, log2TrSize, TEXT_LUMA, absPartIdx, false);
+        if (numSig)
+        {
+            m_quant.invtransformNxN(cu, residual, stride, coeffY, log2TrSize, TEXT_LUMA, true, false, numSig);
+            primitives.cu[sizeIdx].add_ps(reconQt, reconQtStride, pred, residual, stride, stride);
+        }
+        else
+            // no coded residual, recon = pred
+            primitives.cu[sizeIdx].copy_pp(reconQt, reconQtStride, pred, stride);
+
+        bCBF = !!numSig << tuDepth;
+        cu.setCbfSubParts(bCBF, TEXT_LUMA, absPartIdx, fullDepth);
+        fullCost.distortion = primitives.cu[sizeIdx].sse_pp(reconQt, reconQtStride, fenc, stride);
+
+        m_entropyCoder.resetBits();
+        if (!absPartIdx)
+        {
+            if (!cu.m_slice->isIntra())
+            {
+                if (cu.m_slice->m_pps->bTransquantBypassEnabled)
+                    m_entropyCoder.codeCUTransquantBypassFlag(cu.m_tqBypass[0]);
+                m_entropyCoder.codeSkipFlag(cu, 0);
+                m_entropyCoder.codePredMode(cu.m_predMode[0]);
+            }
+
+            m_entropyCoder.codePartSize(cu, 0, cuGeom.depth);
+        }
+        if (cu.m_partSize[0] == SIZE_2Nx2N)
+        {
+            if (!absPartIdx)
+                m_entropyCoder.codeIntraDirLumaAng(cu, 0, false);
+        }
+        else
+        {
+            uint32_t qNumParts = cuGeom.numPartitions >> 2;
+            if (!tuDepth)
+            {
+                for (uint32_t qIdx = 0; qIdx < 4; ++qIdx)
+                    m_entropyCoder.codeIntraDirLumaAng(cu, qIdx * qNumParts, false);
+            }
+            else if (!(absPartIdx & (qNumParts - 1)))
+                m_entropyCoder.codeIntraDirLumaAng(cu, absPartIdx, false);
+        }
+        if (log2TrSize != depthRange[0])
+            m_entropyCoder.codeTransformSubdivFlag(0, 5 - log2TrSize);
+
+        m_entropyCoder.codeQtCbfLuma(!!numSig, tuDepth);
+
+        if (cu.getCbf(absPartIdx, TEXT_LUMA, tuDepth))
+            m_entropyCoder.codeCoeffNxN(cu, coeffY, absPartIdx, log2TrSize, TEXT_LUMA);
+
+        fullCost.bits = m_entropyCoder.getNumberOfWrittenBits();
+
+        if (m_param->rdPenalty && log2TrSize == 5 && m_slice->m_sliceType != I_SLICE)
+            fullCost.bits *= 4;
+
+        if (m_rdCost.m_psyRd)
+        {
+            fullCost.energy = m_rdCost.psyCost(sizeIdx, fenc, mode.fencYuv->m_size, reconQt, reconQtStride);
+            fullCost.rdcost = m_rdCost.calcPsyRdCost(fullCost.distortion, fullCost.bits, fullCost.energy);
+        }
+        else
+            fullCost.rdcost = m_rdCost.calcRdCost(fullCost.distortion, fullCost.bits);
+    }
+    else
+        fullCost.rdcost = MAX_INT64;
+
+    if (mightSplit)
+    {
+        if (mightNotSplit)
+        {
+            m_entropyCoder.store(m_rqt[fullDepth].rqtTest);  // save state after full TU encode
+            m_entropyCoder.load(m_rqt[fullDepth].rqtRoot);   // prep state of split encode
+        }
+
+        /* code split block */
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+
+        int checkTransformSkip = m_slice->m_pps->bTransformSkipEnabled && (log2TrSize - 1) <= MAX_LOG2_TS_SIZE && !cu.m_tqBypass[0];
+        if (m_param->bEnableTSkipFast)
+            checkTransformSkip &= cu.m_partSize[0] != SIZE_2Nx2N;
+
+        Cost splitCost;
+        uint32_t cbf = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            if (checkTransformSkip)
+                codeIntraLumaTSkip(mode, cuGeom, tuDepth + 1, qPartIdx, splitCost);
+            else
+                codeIntraLumaQT(mode, cuGeom, tuDepth + 1, qPartIdx, bAllowSplit, splitCost, depthRange);
+
+            cbf |= cu.getCbf(qPartIdx, TEXT_LUMA, tuDepth + 1);
+        }
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+            cu.m_cbf[0][absPartIdx + offs] |= (cbf << tuDepth);
+
+        if (mightNotSplit && log2TrSize != depthRange[0])
+        {
+            /* If we could have coded this TU depth, include cost of subdiv flag */
+            m_entropyCoder.resetBits();
+            m_entropyCoder.codeTransformSubdivFlag(1, 5 - log2TrSize);
+            splitCost.bits += m_entropyCoder.getNumberOfWrittenBits();
+
+            if (m_rdCost.m_psyRd)
+                splitCost.rdcost = m_rdCost.calcPsyRdCost(splitCost.distortion, splitCost.bits, splitCost.energy);
+            else
+                splitCost.rdcost = m_rdCost.calcRdCost(splitCost.distortion, splitCost.bits);
+        }
+
+        if (splitCost.rdcost < fullCost.rdcost)
+        {
+            outCost.rdcost     += splitCost.rdcost;
+            outCost.distortion += splitCost.distortion;
+            outCost.bits       += splitCost.bits;
+            outCost.energy     += splitCost.energy;
+            return;
+        }
+        else
+        {
+            // recover entropy state of full-size TU encode
+            m_entropyCoder.load(m_rqt[fullDepth].rqtTest);
+
+            // recover transform index and Cbf values
+            cu.setTUDepthSubParts(tuDepth, absPartIdx, fullDepth);
+            cu.setCbfSubParts(bCBF, TEXT_LUMA, absPartIdx, fullDepth);
+            cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, fullDepth);
+        }
+    }
+
+    // set reconstruction for next intra prediction blocks if full TU prediction won
+    PicYuv*  reconPic = m_frame->m_reconPic;
+    pixel*   picReconY = reconPic->getLumaAddr(cu.m_cuAddr, cuGeom.absPartIdx + absPartIdx);
+    intptr_t picStride = reconPic->m_stride;
+    primitives.cu[sizeIdx].copy_pp(picReconY, picStride, reconQt, reconQtStride);
+
+    outCost.rdcost     += fullCost.rdcost;
+    outCost.distortion += fullCost.distortion;
+    outCost.bits       += fullCost.bits;
+    outCost.energy     += fullCost.energy;
+}
+
+void Search::codeIntraLumaTSkip(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, Cost& outCost)
+{
+    uint32_t fullDepth = cuGeom.depth + tuDepth;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+    uint32_t tuSize = 1 << log2TrSize;
+
+    X265_CHECK(tuSize <= MAX_TS_SIZE, "transform skip is only possible at 4x4 TUs\n");
+
+    CUData& cu = mode.cu;
+    Yuv* predYuv = &mode.predYuv;
+    const Yuv* fencYuv = mode.fencYuv;
+
+    Cost fullCost;
+    fullCost.rdcost = MAX_INT64;
+    int      bTSkip = 0;
+    uint32_t bCBF = 0;
+
+    const pixel* fenc = fencYuv->getLumaAddr(absPartIdx);
+    pixel*   pred = predYuv->getLumaAddr(absPartIdx);
+    int16_t* residual = m_rqt[cuGeom.depth].tmpResiYuv.getLumaAddr(absPartIdx);
+    uint32_t stride = fencYuv->m_size;
+    uint32_t sizeIdx = log2TrSize - 2;
+
+    // init availability pattern
+    uint32_t lumaPredMode = cu.m_lumaIntraDir[absPartIdx];
+    IntraNeighbors intraNeighbors;
+    initIntraNeighbors(cu, absPartIdx, tuDepth, true, &intraNeighbors);
+    initAdiPattern(cu, cuGeom, absPartIdx, intraNeighbors, lumaPredMode);
+
+    // get prediction signal
+    predIntraLumaAng(lumaPredMode, pred, stride, log2TrSize);
+
+    cu.setTUDepthSubParts(tuDepth, absPartIdx, fullDepth);
+
+    uint32_t qtLayer = log2TrSize - 2;
+    uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+    coeff_t* coeffY = m_rqt[qtLayer].coeffRQT[0] + coeffOffsetY;
+    pixel*   reconQt = m_rqt[qtLayer].reconQtYuv.getLumaAddr(absPartIdx);
+    uint32_t reconQtStride = m_rqt[qtLayer].reconQtYuv.m_size;
+
+    // store original entropy coding status
+    m_entropyCoder.store(m_rqt[fullDepth].rqtRoot);
+
+    if (m_bEnableRDOQ)
+        m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSize, true);
+
+    int checkTransformSkip = 1;
+    for (int useTSkip = 0; useTSkip <= checkTransformSkip; useTSkip++)
+    {
+        uint64_t tmpCost;
+        uint32_t tmpEnergy = 0;
+
+        coeff_t* coeff = (useTSkip ? m_tsCoeff : coeffY);
+        pixel*   tmpRecon = (useTSkip ? m_tsRecon : reconQt);
+        uint32_t tmpReconStride = (useTSkip ? MAX_TS_SIZE : reconQtStride);
+
+        primitives.cu[sizeIdx].calcresidual(fenc, pred, residual, stride);
+
+        uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeff, log2TrSize, TEXT_LUMA, absPartIdx, useTSkip);
+        if (numSig)
+        {
+            m_quant.invtransformNxN(cu, residual, stride, coeff, log2TrSize, TEXT_LUMA, true, useTSkip, numSig);
+            primitives.cu[sizeIdx].add_ps(tmpRecon, tmpReconStride, pred, residual, stride, stride);
+        }
+        else if (useTSkip)
+        {
+            /* do not allow tskip if CBF=0, pretend we did not try tskip */
+            checkTransformSkip = 0;
+            break;
+        }
+        else
+            // no residual coded, recon = pred
+            primitives.cu[sizeIdx].copy_pp(tmpRecon, tmpReconStride, pred, stride);
+
+        sse_ret_t tmpDist = primitives.cu[sizeIdx].sse_pp(tmpRecon, tmpReconStride, fenc, stride);
+
+        cu.setTransformSkipSubParts(useTSkip, TEXT_LUMA, absPartIdx, fullDepth);
+        cu.setCbfSubParts((!!numSig) << tuDepth, TEXT_LUMA, absPartIdx, fullDepth);
+
+        if (useTSkip)
+            m_entropyCoder.load(m_rqt[fullDepth].rqtRoot);
+
+        m_entropyCoder.resetBits();
+        if (!absPartIdx)
+        {
+            if (!cu.m_slice->isIntra())
+            {
+                if (cu.m_slice->m_pps->bTransquantBypassEnabled)
+                    m_entropyCoder.codeCUTransquantBypassFlag(cu.m_tqBypass[0]);
+                m_entropyCoder.codeSkipFlag(cu, 0);
+                m_entropyCoder.codePredMode(cu.m_predMode[0]);
+            }
+
+            m_entropyCoder.codePartSize(cu, 0, cuGeom.depth);
+        }
+        if (cu.m_partSize[0] == SIZE_2Nx2N)
+        {
+            if (!absPartIdx)
+                m_entropyCoder.codeIntraDirLumaAng(cu, 0, false);
+        }
+        else
+        {
+            uint32_t qNumParts = cuGeom.numPartitions >> 2;
+            if (!tuDepth)
+            {
+                for (uint32_t qIdx = 0; qIdx < 4; ++qIdx)
+                    m_entropyCoder.codeIntraDirLumaAng(cu, qIdx * qNumParts, false);
+            }
+            else if (!(absPartIdx & (qNumParts - 1)))
+                m_entropyCoder.codeIntraDirLumaAng(cu, absPartIdx, false);
+        }
+        m_entropyCoder.codeTransformSubdivFlag(0, 5 - log2TrSize);
+
+        m_entropyCoder.codeQtCbfLuma(!!numSig, tuDepth);
+
+        if (cu.getCbf(absPartIdx, TEXT_LUMA, tuDepth))
+            m_entropyCoder.codeCoeffNxN(cu, coeff, absPartIdx, log2TrSize, TEXT_LUMA);
+
+        uint32_t tmpBits = m_entropyCoder.getNumberOfWrittenBits();
+
+        if (!useTSkip)
+            m_entropyCoder.store(m_rqt[fullDepth].rqtTemp);
+
+        if (m_rdCost.m_psyRd)
+        {
+            tmpEnergy = m_rdCost.psyCost(sizeIdx, fenc, fencYuv->m_size, tmpRecon, tmpReconStride);
+            tmpCost = m_rdCost.calcPsyRdCost(tmpDist, tmpBits, tmpEnergy);
+        }
+        else
+            tmpCost = m_rdCost.calcRdCost(tmpDist, tmpBits);
+
+        if (tmpCost < fullCost.rdcost)
+        {
+            bTSkip = useTSkip;
+            bCBF = !!numSig;
+            fullCost.rdcost = tmpCost;
+            fullCost.distortion = tmpDist;
+            fullCost.bits = tmpBits;
+            fullCost.energy = tmpEnergy;
+        }
+    }
+
+    if (bTSkip)
+    {
+        memcpy(coeffY, m_tsCoeff, sizeof(coeff_t) << (log2TrSize * 2));
+        primitives.cu[sizeIdx].copy_pp(reconQt, reconQtStride, m_tsRecon, tuSize);
+    }
+    else if (checkTransformSkip)
+    {
+        cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, fullDepth);
+        cu.setCbfSubParts(bCBF << tuDepth, TEXT_LUMA, absPartIdx, fullDepth);
+        m_entropyCoder.load(m_rqt[fullDepth].rqtTemp);
+    }
+
+    // set reconstruction for next intra prediction blocks
+    PicYuv*  reconPic = m_frame->m_reconPic;
+    pixel*   picReconY = reconPic->getLumaAddr(cu.m_cuAddr, cuGeom.absPartIdx + absPartIdx);
+    intptr_t picStride = reconPic->m_stride;
+    primitives.cu[sizeIdx].copy_pp(picReconY, picStride, reconQt, reconQtStride);
+
+    outCost.rdcost += fullCost.rdcost;
+    outCost.distortion += fullCost.distortion;
+    outCost.bits += fullCost.bits;
+    outCost.energy += fullCost.energy;
+}
+
+/* fast luma intra residual generation. Only perform the minimum number of TU splits required by the CU size */
+void Search::residualTransformQuantIntra(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, const uint32_t depthRange[2])
+{
+    CUData& cu = mode.cu;
+    uint32_t fullDepth  = cuGeom.depth + tuDepth;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+    bool     bCheckFull = log2TrSize <= depthRange[1];
+
+    X265_CHECK(m_slice->m_sliceType != I_SLICE, "residualTransformQuantIntra not intended for I slices\n");
+
+    /* we still respect rdPenalty == 2, we can forbid 32x32 intra TU. rdPenalty = 1 is impossible
+     * since we are not measuring RD cost */
+    if (m_param->rdPenalty == 2 && log2TrSize == 5 && depthRange[0] <= 4)
+        bCheckFull = false;
+
+    if (bCheckFull)
+    {
+        const pixel* fenc = mode.fencYuv->getLumaAddr(absPartIdx);
+        pixel*   pred     = mode.predYuv.getLumaAddr(absPartIdx);
+        int16_t* residual = m_rqt[cuGeom.depth].tmpResiYuv.getLumaAddr(absPartIdx);
+        uint32_t stride   = mode.fencYuv->m_size;
+
+        // init availability pattern
+        uint32_t lumaPredMode = cu.m_lumaIntraDir[absPartIdx];
+        IntraNeighbors intraNeighbors;
+        initIntraNeighbors(cu, absPartIdx, tuDepth, true, &intraNeighbors);
+        initAdiPattern(cu, cuGeom, absPartIdx, intraNeighbors, lumaPredMode);
+
+        // get prediction signal
+        predIntraLumaAng(lumaPredMode, pred, stride, log2TrSize);
+
+        X265_CHECK(!cu.m_transformSkip[TEXT_LUMA][absPartIdx], "unexpected tskip flag in residualTransformQuantIntra\n");
+        cu.setTUDepthSubParts(tuDepth, absPartIdx, fullDepth);
+
+        uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        coeff_t* coeffY       = cu.m_trCoeff[0] + coeffOffsetY;
+
+        uint32_t sizeIdx   = log2TrSize - 2;
+        primitives.cu[sizeIdx].calcresidual(fenc, pred, residual, stride);
+
+        PicYuv*  reconPic = m_frame->m_reconPic;
+        pixel*   picReconY = reconPic->getLumaAddr(cu.m_cuAddr, cuGeom.absPartIdx + absPartIdx);
+        intptr_t picStride = reconPic->m_stride;
+
+        uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeffY, log2TrSize, TEXT_LUMA, absPartIdx, false);
+        if (numSig)
+        {
+            m_quant.invtransformNxN(cu, residual, stride, coeffY, log2TrSize, TEXT_LUMA, true, false, numSig);
+            primitives.cu[sizeIdx].add_ps(picReconY, picStride, pred, residual, stride, stride);
+            cu.setCbfSubParts(1 << tuDepth, TEXT_LUMA, absPartIdx, fullDepth);
+        }
+        else
+        {
+            primitives.cu[sizeIdx].copy_pp(picReconY, picStride, pred, stride);
+            cu.setCbfSubParts(0, TEXT_LUMA, absPartIdx, fullDepth);
+        }
+    }
+    else
+    {
+        X265_CHECK(log2TrSize > depthRange[0], "intra luma split state failure\n");
+
+        /* code split block */
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        uint32_t cbf = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            residualTransformQuantIntra(mode, cuGeom, qPartIdx, tuDepth + 1, depthRange);
+            cbf |= cu.getCbf(qPartIdx, TEXT_LUMA, tuDepth + 1);
+        }
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+            cu.m_cbf[0][absPartIdx + offs] |= (cbf << tuDepth);
+    }
+}
+
+void Search::extractIntraResultQT(CUData& cu, Yuv& reconYuv, uint32_t tuDepth, uint32_t absPartIdx)
+{
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (tuDepth == cu.m_tuDepth[absPartIdx])
+    {
+        uint32_t qtLayer    = log2TrSize - 2;
+
+        // copy transform coefficients
+        uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        coeff_t* coeffSrcY    = m_rqt[qtLayer].coeffRQT[0] + coeffOffsetY;
+        coeff_t* coeffDestY   = cu.m_trCoeff[0]            + coeffOffsetY;
+        memcpy(coeffDestY, coeffSrcY, sizeof(coeff_t) << (log2TrSize * 2));
+
+        // copy reconstruction
+        m_rqt[qtLayer].reconQtYuv.copyPartToPartLuma(reconYuv, absPartIdx, log2TrSize);
+    }
+    else
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            extractIntraResultQT(cu, reconYuv, tuDepth + 1, absPartIdx);
+    }
+}
+
+inline void offsetCBFs(uint8_t subTUCBF[2])
+{
+    uint8_t combinedCBF = subTUCBF[0] | subTUCBF[1];
+    subTUCBF[0] = subTUCBF[0] << 1 | combinedCBF;
+    subTUCBF[1] = subTUCBF[1] << 1 | combinedCBF;
+}
+
+/* 4:2:2 post-TU split processing */
+void Search::offsetSubTUCBFs(CUData& cu, TextType ttype, uint32_t tuDepth, uint32_t absPartIdx)
+{
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (log2TrSize == 2)
+    {
+        X265_CHECK(m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+        ++log2TrSize;
+    }
+
+    uint32_t tuNumParts = 1 << ((log2TrSize - LOG2_UNIT_SIZE) * 2 - 1);
+
+    // move the CBFs down a level and set the parent CBF
+    uint8_t subTUCBF[2];
+    subTUCBF[0] = cu.getCbf(absPartIdx            , ttype, tuDepth);
+    subTUCBF[1] = cu.getCbf(absPartIdx+ tuNumParts, ttype, tuDepth);
+    offsetCBFs(subTUCBF);
+
+    cu.setCbfPartRange(subTUCBF[0] << tuDepth, ttype, absPartIdx             , tuNumParts);
+    cu.setCbfPartRange(subTUCBF[1] << tuDepth, ttype, absPartIdx + tuNumParts, tuNumParts);
+}
+
+/* returns distortion */
+uint32_t Search::codeIntraChromaQt(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, uint32_t& psyEnergy)
+{
+    CUData& cu = mode.cu;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+
+    if (tuDepth < cu.m_tuDepth[absPartIdx])
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        uint32_t outDist = 0, splitCbfU = 0, splitCbfV = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            outDist += codeIntraChromaQt(mode, cuGeom, tuDepth + 1, qPartIdx, psyEnergy);
+            splitCbfU |= cu.getCbf(qPartIdx, TEXT_CHROMA_U, tuDepth + 1);
+            splitCbfV |= cu.getCbf(qPartIdx, TEXT_CHROMA_V, tuDepth + 1);
+        }
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+        {
+            cu.m_cbf[1][absPartIdx + offs] |= (splitCbfU << tuDepth);
+            cu.m_cbf[2][absPartIdx + offs] |= (splitCbfV << tuDepth);
+        }
+
+        return outDist;
+    }
+
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+    uint32_t tuDepthC = tuDepth;
+    if (log2TrSizeC < 2)
+    {
+        X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+        if (absPartIdx & 3)
+            return 0;
+        log2TrSizeC = 2;
+        tuDepthC--;
+    }
+
+    if (m_bEnableRDOQ)
+        m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSizeC, false);
+
+    bool checkTransformSkip = m_slice->m_pps->bTransformSkipEnabled && log2TrSizeC <= MAX_LOG2_TS_SIZE && !cu.m_tqBypass[0];
+    checkTransformSkip &= !m_param->bEnableTSkipFast || (log2TrSize <= MAX_LOG2_TS_SIZE && cu.m_transformSkip[TEXT_LUMA][absPartIdx]);
+    if (checkTransformSkip)
+        return codeIntraChromaTSkip(mode, cuGeom, tuDepth, tuDepthC, absPartIdx, psyEnergy);
+
+    ShortYuv& resiYuv = m_rqt[cuGeom.depth].tmpResiYuv;
+    uint32_t qtLayer = log2TrSize - 2;
+    uint32_t stride = mode.fencYuv->m_csize;
+    const uint32_t sizeIdxC = log2TrSizeC - 2;
+    sse_ret_t outDist = 0;
+
+    uint32_t curPartNum = cuGeom.numPartitions >> tuDepthC * 2;
+    const SplitType splitType = (m_csp == X265_CSP_I422) ? VERTICAL_SPLIT : DONT_SPLIT;
+
+    TURecurse tuIterator(splitType, curPartNum, absPartIdx);
+    do
+    {
+        uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+
+        IntraNeighbors intraNeighbors;
+        initIntraNeighbors(cu, absPartIdxC, tuDepthC, false, &intraNeighbors);
+
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            TextType ttype = (TextType)chromaId;
+
+            const pixel* fenc = mode.fencYuv->getChromaAddr(chromaId, absPartIdxC);
+            pixel*   pred     = mode.predYuv.getChromaAddr(chromaId, absPartIdxC);
+            int16_t* residual = resiYuv.getChromaAddr(chromaId, absPartIdxC);
+            uint32_t coeffOffsetC  = absPartIdxC << (LOG2_UNIT_SIZE * 2 - (m_hChromaShift + m_vChromaShift));
+            coeff_t* coeffC        = m_rqt[qtLayer].coeffRQT[chromaId] + coeffOffsetC;
+            pixel*   reconQt       = m_rqt[qtLayer].reconQtYuv.getChromaAddr(chromaId, absPartIdxC);
+            uint32_t reconQtStride = m_rqt[qtLayer].reconQtYuv.m_csize;
+            PicYuv*  reconPic = m_frame->m_reconPic;
+            pixel*   picReconC = reconPic->getChromaAddr(chromaId, cu.m_cuAddr, cuGeom.absPartIdx + absPartIdxC);
+            intptr_t picStride = reconPic->m_strideC;
+
+            uint32_t chromaPredMode = cu.m_chromaIntraDir[absPartIdxC];
+            if (chromaPredMode == DM_CHROMA_IDX)
+                chromaPredMode = cu.m_lumaIntraDir[(m_csp == X265_CSP_I444) ? absPartIdxC : 0];
+            if (m_csp == X265_CSP_I422)
+                chromaPredMode = g_chroma422IntraAngleMappingTable[chromaPredMode];
+
+            // init availability pattern
+            initAdiPatternChroma(cu, cuGeom, absPartIdxC, intraNeighbors, chromaId);
+
+            // get prediction signal
+            predIntraChromaAng(chromaPredMode, pred, stride, log2TrSizeC);
+            cu.setTransformSkipPartRange(0, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+
+            primitives.cu[sizeIdxC].calcresidual(fenc, pred, residual, stride);
+            uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeffC, log2TrSizeC, ttype, absPartIdxC, false);
+            if (numSig)
+            {
+                m_quant.invtransformNxN(cu, residual, stride, coeffC, log2TrSizeC, ttype, true, false, numSig);
+                primitives.cu[sizeIdxC].add_ps(reconQt, reconQtStride, pred, residual, stride, stride);
+                cu.setCbfPartRange(1 << tuDepth, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+            }
+            else
+            {
+                // no coded residual, recon = pred
+                primitives.cu[sizeIdxC].copy_pp(reconQt, reconQtStride, pred, stride);
+                cu.setCbfPartRange(0, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+            }
+
+            outDist += m_rdCost.scaleChromaDist(chromaId, primitives.cu[sizeIdxC].sse_pp(reconQt, reconQtStride, fenc, stride));
+
+            if (m_rdCost.m_psyRd)
+                psyEnergy += m_rdCost.psyCost(sizeIdxC, fenc, stride, reconQt, reconQtStride);
+
+            primitives.cu[sizeIdxC].copy_pp(picReconC, picStride, reconQt, reconQtStride);
+        }
+    }
+    while (tuIterator.isNextSection());
+
+    if (splitType == VERTICAL_SPLIT)
+    {
+        offsetSubTUCBFs(cu, TEXT_CHROMA_U, tuDepth, absPartIdx);
+        offsetSubTUCBFs(cu, TEXT_CHROMA_V, tuDepth, absPartIdx);
+    }
+
+    return outDist;
+}
+
+/* returns distortion */
+uint32_t Search::codeIntraChromaTSkip(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t tuDepthC, uint32_t absPartIdx, uint32_t& psyEnergy)
+{
+    CUData& cu = mode.cu;
+    uint32_t fullDepth  = cuGeom.depth + tuDepth;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+    const uint32_t log2TrSizeC = 2;
+    uint32_t qtLayer = log2TrSize - 2;
+    uint32_t outDist = 0;
+
+    /* At the TU layers above this one, no RDO is performed, only distortion is being measured,
+     * so the entropy coder is not very accurate. The best we can do is return it in the same
+     * condition as it arrived, and to do all bit estimates from the same state. */
+    m_entropyCoder.store(m_rqt[fullDepth].rqtRoot);
+
+    uint32_t curPartNum = cuGeom.numPartitions >> tuDepthC * 2;
+    const SplitType splitType = (m_csp == X265_CSP_I422) ? VERTICAL_SPLIT : DONT_SPLIT;
+
+    TURecurse tuIterator(splitType, curPartNum, absPartIdx);
+    do
+    {
+        uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+
+        IntraNeighbors intraNeighbors;
+        initIntraNeighbors(cu, absPartIdxC, tuDepthC, false, &intraNeighbors);
+
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            TextType ttype = (TextType)chromaId;
+
+            const pixel* fenc = mode.fencYuv->getChromaAddr(chromaId, absPartIdxC);
+            pixel*   pred = mode.predYuv.getChromaAddr(chromaId, absPartIdxC);
+            int16_t* residual = m_rqt[cuGeom.depth].tmpResiYuv.getChromaAddr(chromaId, absPartIdxC);
+            uint32_t stride = mode.fencYuv->m_csize;
+            const uint32_t sizeIdxC = log2TrSizeC - 2;
+
+            uint32_t coeffOffsetC = absPartIdxC << (LOG2_UNIT_SIZE * 2 - (m_hChromaShift + m_vChromaShift));
+            coeff_t* coeffC = m_rqt[qtLayer].coeffRQT[chromaId] + coeffOffsetC;
+            pixel*   reconQt = m_rqt[qtLayer].reconQtYuv.getChromaAddr(chromaId, absPartIdxC);
+            uint32_t reconQtStride = m_rqt[qtLayer].reconQtYuv.m_csize;
+
+            // init availability pattern
+            initAdiPatternChroma(cu, cuGeom, absPartIdxC, intraNeighbors, chromaId);
+
+            uint32_t chromaPredMode = cu.m_chromaIntraDir[absPartIdxC];
+            if (chromaPredMode == DM_CHROMA_IDX)
+                chromaPredMode = cu.m_lumaIntraDir[(m_csp == X265_CSP_I444) ? absPartIdxC : 0];
+            if (m_csp == X265_CSP_I422)
+                chromaPredMode = g_chroma422IntraAngleMappingTable[chromaPredMode];
+
+            // get prediction signal
+            predIntraChromaAng(chromaPredMode, pred, stride, log2TrSizeC);
+
+            uint64_t bCost = MAX_INT64;
+            uint32_t bDist = 0;
+            uint32_t bCbf = 0;
+            uint32_t bEnergy = 0;
+            int      bTSkip = 0;
+
+            int checkTransformSkip = 1;
+            for (int useTSkip = 0; useTSkip <= checkTransformSkip; useTSkip++)
+            {
+                coeff_t* coeff = (useTSkip ? m_tsCoeff : coeffC);
+                pixel*   recon = (useTSkip ? m_tsRecon : reconQt);
+                uint32_t reconStride = (useTSkip ? MAX_TS_SIZE : reconQtStride);
+
+                primitives.cu[sizeIdxC].calcresidual(fenc, pred, residual, stride);
+
+                uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeff, log2TrSizeC, ttype, absPartIdxC, useTSkip);
+                if (numSig)
+                {
+                    m_quant.invtransformNxN(cu, residual, stride, coeff, log2TrSizeC, ttype, true, useTSkip, numSig);
+                    primitives.cu[sizeIdxC].add_ps(recon, reconStride, pred, residual, stride, stride);
+                    cu.setCbfPartRange(1 << tuDepth, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                else if (useTSkip)
+                {
+                    checkTransformSkip = 0;
+                    break;
+                }
+                else
+                {
+                    primitives.cu[sizeIdxC].copy_pp(recon, reconStride, pred, stride);
+                    cu.setCbfPartRange(0, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                sse_ret_t tmpDist = primitives.cu[sizeIdxC].sse_pp(recon, reconStride, fenc, stride);
+                tmpDist = m_rdCost.scaleChromaDist(chromaId, tmpDist);
+
+                cu.setTransformSkipPartRange(useTSkip, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+
+                uint32_t tmpBits = 0, tmpEnergy = 0;
+                if (numSig)
+                {
+                    m_entropyCoder.load(m_rqt[fullDepth].rqtRoot);
+                    m_entropyCoder.resetBits();
+                    m_entropyCoder.codeCoeffNxN(cu, coeff, absPartIdxC, log2TrSizeC, (TextType)chromaId);
+                    tmpBits = m_entropyCoder.getNumberOfWrittenBits();
+                }
+
+                uint64_t tmpCost;
+                if (m_rdCost.m_psyRd)
+                {
+                    tmpEnergy = m_rdCost.psyCost(sizeIdxC, fenc, stride, reconQt, reconQtStride);
+                    tmpCost = m_rdCost.calcPsyRdCost(tmpDist, tmpBits, tmpEnergy);
+                }
+                else
+                    tmpCost = m_rdCost.calcRdCost(tmpDist, tmpBits);
+
+                if (tmpCost < bCost)
+                {
+                    bCost = tmpCost;
+                    bDist = tmpDist;
+                    bTSkip = useTSkip;
+                    bCbf = !!numSig;
+                    bEnergy = tmpEnergy;
+                }
+            }
+
+            if (bTSkip)
+            {
+                memcpy(coeffC, m_tsCoeff, sizeof(coeff_t) << (log2TrSizeC * 2));
+                primitives.cu[sizeIdxC].copy_pp(reconQt, reconQtStride, m_tsRecon, MAX_TS_SIZE);
+            }
+
+            cu.setCbfPartRange(bCbf << tuDepth, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+            cu.setTransformSkipPartRange(bTSkip, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+
+            PicYuv*  reconPic = m_frame->m_reconPic;
+            pixel*   reconPicC = reconPic->getChromaAddr(chromaId, cu.m_cuAddr, cuGeom.absPartIdx + absPartIdxC);
+            intptr_t picStride = reconPic->m_strideC;
+            primitives.cu[sizeIdxC].copy_pp(reconPicC, picStride, reconQt, reconQtStride);
+
+            outDist += bDist;
+            psyEnergy += bEnergy;
+        }
+    }
+    while (tuIterator.isNextSection());
+
+    if (splitType == VERTICAL_SPLIT)
+    {
+        offsetSubTUCBFs(cu, TEXT_CHROMA_U, tuDepth, absPartIdx);
+        offsetSubTUCBFs(cu, TEXT_CHROMA_V, tuDepth, absPartIdx);
+    }
+
+    m_entropyCoder.load(m_rqt[fullDepth].rqtRoot);
+    return outDist;
+}
+
+void Search::extractIntraResultChromaQT(CUData& cu, Yuv& reconYuv, uint32_t absPartIdx, uint32_t tuDepth)
+{
+    uint32_t tuDepthL  = cu.m_tuDepth[absPartIdx];
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+
+    if (tuDepthL == tuDepth || log2TrSizeC == 2)
+    {
+        // copy transform coefficients
+        uint32_t numCoeffC = 1 << (log2TrSizeC * 2 + (m_csp == X265_CSP_I422));
+        uint32_t coeffOffsetC = absPartIdx << (LOG2_UNIT_SIZE * 2 - (m_hChromaShift + m_vChromaShift));
+
+        uint32_t qtLayer   = log2TrSize - 2 - (tuDepthL - tuDepth);
+        coeff_t* coeffSrcU = m_rqt[qtLayer].coeffRQT[1] + coeffOffsetC;
+        coeff_t* coeffSrcV = m_rqt[qtLayer].coeffRQT[2] + coeffOffsetC;
+        coeff_t* coeffDstU = cu.m_trCoeff[1]           + coeffOffsetC;
+        coeff_t* coeffDstV = cu.m_trCoeff[2]           + coeffOffsetC;
+        memcpy(coeffDstU, coeffSrcU, sizeof(coeff_t) * numCoeffC);
+        memcpy(coeffDstV, coeffSrcV, sizeof(coeff_t) * numCoeffC);
+
+        // copy reconstruction
+        m_rqt[qtLayer].reconQtYuv.copyPartToPartChroma(reconYuv, absPartIdx, log2TrSizeC + m_hChromaShift);
+    }
+    else
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            extractIntraResultChromaQT(cu, reconYuv, absPartIdx, tuDepth + 1);
+    }
+}
+
+void Search::residualQTIntraChroma(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth)
+{
+    CUData& cu = mode.cu;
+    uint32_t log2TrSize = cu.m_log2CUSize[absPartIdx] - tuDepth;
+
+    if (tuDepth < cu.m_tuDepth[absPartIdx])
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        uint32_t splitCbfU = 0, splitCbfV = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            residualQTIntraChroma(mode, cuGeom, qPartIdx, tuDepth + 1);
+            splitCbfU |= cu.getCbf(qPartIdx, TEXT_CHROMA_U, tuDepth + 1);
+            splitCbfV |= cu.getCbf(qPartIdx, TEXT_CHROMA_V, tuDepth + 1);
+        }
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+        {
+            cu.m_cbf[1][absPartIdx + offs] |= (splitCbfU << tuDepth);
+            cu.m_cbf[2][absPartIdx + offs] |= (splitCbfV << tuDepth);
+        }
+
+        return;
+    }
+
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+    uint32_t tuDepthC = tuDepth;
+    if (log2TrSizeC < 2)
+    {
+        X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+        if (absPartIdx & 3)
+            return;
+        log2TrSizeC = 2;
+        tuDepthC--;
+    }
+
+    ShortYuv& resiYuv = m_rqt[cuGeom.depth].tmpResiYuv;
+    uint32_t stride = mode.fencYuv->m_csize;
+    const uint32_t sizeIdxC = log2TrSizeC - 2;
+
+    uint32_t curPartNum = cuGeom.numPartitions >> tuDepthC * 2;
+    const SplitType splitType = (m_csp == X265_CSP_I422) ? VERTICAL_SPLIT : DONT_SPLIT;
+
+    TURecurse tuIterator(splitType, curPartNum, absPartIdx);
+    do
+    {
+        uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+
+        IntraNeighbors intraNeighbors;
+        initIntraNeighbors(cu, absPartIdxC, tuDepthC, false, &intraNeighbors);
+
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            TextType ttype = (TextType)chromaId;
+
+            const pixel* fenc = mode.fencYuv->getChromaAddr(chromaId, absPartIdxC);
+            pixel*   pred     = mode.predYuv.getChromaAddr(chromaId, absPartIdxC);
+            int16_t* residual = resiYuv.getChromaAddr(chromaId, absPartIdxC);
+            uint32_t coeffOffsetC  = absPartIdxC << (LOG2_UNIT_SIZE * 2 - (m_hChromaShift + m_vChromaShift));
+            coeff_t* coeffC        = cu.m_trCoeff[ttype] + coeffOffsetC;
+            PicYuv*  reconPic = m_frame->m_reconPic;
+            pixel*   picReconC = reconPic->getChromaAddr(chromaId, cu.m_cuAddr, cuGeom.absPartIdx + absPartIdxC);
+            intptr_t picStride = reconPic->m_strideC;
+
+            uint32_t chromaPredMode = cu.m_chromaIntraDir[absPartIdxC];
+            if (chromaPredMode == DM_CHROMA_IDX)
+                chromaPredMode = cu.m_lumaIntraDir[(m_csp == X265_CSP_I444) ? absPartIdxC : 0];
+            if (m_csp == X265_CSP_I422)
+                chromaPredMode = g_chroma422IntraAngleMappingTable[chromaPredMode];
+
+            // init availability pattern
+            initAdiPatternChroma(cu, cuGeom, absPartIdxC, intraNeighbors, chromaId);
+
+            // get prediction signal
+            predIntraChromaAng(chromaPredMode, pred, stride, log2TrSizeC);
+
+            X265_CHECK(!cu.m_transformSkip[ttype][0], "transform skip not supported at low RD levels\n");
+
+            primitives.cu[sizeIdxC].calcresidual(fenc, pred, residual, stride);
+            uint32_t numSig = m_quant.transformNxN(cu, fenc, stride, residual, stride, coeffC, log2TrSizeC, ttype, absPartIdxC, false);
+            if (numSig)
+            {
+                m_quant.invtransformNxN(cu, residual, stride, coeffC, log2TrSizeC, ttype, true, false, numSig);
+                primitives.cu[sizeIdxC].add_ps(picReconC, picStride, pred, residual, stride, stride);
+                cu.setCbfPartRange(1 << tuDepth, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+            }
+            else
+            {
+                // no coded residual, recon = pred
+                primitives.cu[sizeIdxC].copy_pp(picReconC, picStride, pred, stride);
+                cu.setCbfPartRange(0, ttype, absPartIdxC, tuIterator.absPartIdxStep);
+            }
+        }
+    }
+    while (tuIterator.isNextSection());
+
+    if (splitType == VERTICAL_SPLIT)
+    {
+        offsetSubTUCBFs(cu, TEXT_CHROMA_U, tuDepth, absPartIdx);
+        offsetSubTUCBFs(cu, TEXT_CHROMA_V, tuDepth, absPartIdx);
+    }
+}
+
+void Search::checkIntra(Mode& intraMode, const CUGeom& cuGeom, PartSize partSize, uint8_t* sharedModes, uint8_t* sharedChromaModes)
+{
+    CUData& cu = intraMode.cu;
+
+    cu.setPartSizeSubParts(partSize);
+    cu.setPredModeSubParts(MODE_INTRA);
+
+    uint32_t tuDepthRange[2];
+    cu.getIntraTUQtDepthRange(tuDepthRange, 0);
+
+    intraMode.initCosts();
+    intraMode.lumaDistortion += estIntraPredQT(intraMode, cuGeom, tuDepthRange, sharedModes);
+    if (m_csp != X265_CSP_I400)
+      intraMode.chromaDistortion += estIntraPredChromaQT(intraMode, cuGeom, sharedChromaModes);
+    intraMode.distortion += intraMode.lumaDistortion + intraMode.chromaDistortion;
+
+    m_entropyCoder.resetBits();
+    if (m_slice->m_pps->bTransquantBypassEnabled)
+        m_entropyCoder.codeCUTransquantBypassFlag(cu.m_tqBypass[0]);
+
+    if (!m_slice->isIntra())
+    {
+        m_entropyCoder.codeSkipFlag(cu, 0);
+        m_entropyCoder.codePredMode(cu.m_predMode[0]);
+    }
+
+    m_entropyCoder.codePartSize(cu, 0, cuGeom.depth);
+    m_entropyCoder.codePredInfo(cu, 0);
+    intraMode.mvBits = m_entropyCoder.getNumberOfWrittenBits();
+
+    bool bCodeDQP = m_slice->m_pps->bUseDQP;
+    m_entropyCoder.codeCoeff(cu, 0, bCodeDQP, tuDepthRange);
+    m_entropyCoder.store(intraMode.contexts);
+    intraMode.totalBits = m_entropyCoder.getNumberOfWrittenBits();
+    intraMode.coeffBits = intraMode.totalBits - intraMode.mvBits;
+    if (m_rdCost.m_psyRd)
+    {
+        const Yuv* fencYuv = intraMode.fencYuv;
+        intraMode.psyEnergy = m_rdCost.psyCost(cuGeom.log2CUSize - 2, fencYuv->m_buf[0], fencYuv->m_size, intraMode.reconYuv.m_buf[0], intraMode.reconYuv.m_size);
+    }
+    intraMode.resEnergy = primitives.cu[cuGeom.log2CUSize - 2].sse_pp(intraMode.fencYuv->m_buf[0], intraMode.fencYuv->m_size, intraMode.predYuv.m_buf[0], intraMode.predYuv.m_size);
+
+    updateModeCost(intraMode);
+    checkDQP(intraMode, cuGeom);
+}
+
+/* Note that this function does not save the best intra prediction, it must
+ * be generated later. It records the best mode in the cu */
+void Search::checkIntraInInter(Mode& intraMode, const CUGeom& cuGeom)
+{
+    ProfileCUScope(intraMode.cu, intraAnalysisElapsedTime, countIntraAnalysis);
+
+    CUData& cu = intraMode.cu;
+    uint32_t depth = cuGeom.depth;
+
+    cu.setPartSizeSubParts(SIZE_2Nx2N);
+    cu.setPredModeSubParts(MODE_INTRA);
+
+    const uint32_t initTuDepth = 0;
+    uint32_t log2TrSize = cuGeom.log2CUSize - initTuDepth;
+    uint32_t tuSize = 1 << log2TrSize;
+    const uint32_t absPartIdx = 0;
+
+    // Reference sample smoothing
+    IntraNeighbors intraNeighbors;
+    initIntraNeighbors(cu, absPartIdx, initTuDepth, true, &intraNeighbors);
+    initAdiPattern(cu, cuGeom, absPartIdx, intraNeighbors, ALL_IDX);
+
+    const pixel* fenc = intraMode.fencYuv->m_buf[0];
+    uint32_t stride = intraMode.fencYuv->m_size;
+
+    int sad, bsad;
+    uint32_t bits, bbits, mode, bmode;
+    uint64_t cost, bcost;
+
+    // 33 Angle modes once
+    int scaleTuSize = tuSize;
+    int scaleStride = stride;
+    int costShift = 0;
+    int sizeIdx = log2TrSize - 2;
+
+    if (tuSize > 32)
+    {
+        // CU is 64x64, we scale to 32x32 and adjust required parameters
+        primitives.scale2D_64to32(m_fencScaled, fenc, stride);
+        fenc = m_fencScaled;
+
+        pixel nScale[129];
+        intraNeighbourBuf[1][0] = intraNeighbourBuf[0][0];
+        primitives.scale1D_128to64(nScale + 1, intraNeighbourBuf[0] + 1);
+
+        // we do not estimate filtering for downscaled samples
+        memcpy(&intraNeighbourBuf[0][1], &nScale[1], 2 * 64 * sizeof(pixel));   // Top & Left pixels
+        memcpy(&intraNeighbourBuf[1][1], &nScale[1], 2 * 64 * sizeof(pixel));
+
+        scaleTuSize = 32;
+        scaleStride = 32;
+        costShift = 2;
+        sizeIdx = 5 - 2; // log2(scaleTuSize) - 2
+    }
+
+    pixelcmp_t sa8d = primitives.cu[sizeIdx].sa8d;
+    int predsize = scaleTuSize * scaleTuSize;
+
+    m_entropyCoder.loadIntraDirModeLuma(m_rqt[depth].cur);
+
+    /* there are three cost tiers for intra modes:
+     *  pred[0]          - mode probable, least cost
+     *  pred[1], pred[2] - less probable, slightly more cost
+     *  non-mpm modes    - all cost the same (rbits) */
+    uint64_t mpms;
+    uint32_t mpmModes[3];
+    uint32_t rbits = getIntraRemModeBits(cu, absPartIdx, mpmModes, mpms);
+
+    // DC
+    primitives.cu[sizeIdx].intra_pred[DC_IDX](m_intraPredAngs, scaleStride, intraNeighbourBuf[0], 0, (scaleTuSize <= 16));
+    bsad = sa8d(fenc, scaleStride, m_intraPredAngs, scaleStride) << costShift;
+    bmode = mode = DC_IDX;
+    bbits = (mpms & ((uint64_t)1 << mode)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, mode) : rbits;
+    bcost = m_rdCost.calcRdSADCost(bsad, bbits);
+
+    // PLANAR
+    pixel* planar = intraNeighbourBuf[0];
+    if (tuSize & (8 | 16 | 32))
+        planar = intraNeighbourBuf[1];
+
+    primitives.cu[sizeIdx].intra_pred[PLANAR_IDX](m_intraPredAngs, scaleStride, planar, 0, 0);
+    sad = sa8d(fenc, scaleStride, m_intraPredAngs, scaleStride) << costShift;
+    mode = PLANAR_IDX;
+    bits = (mpms & ((uint64_t)1 << mode)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, mode) : rbits;
+    cost = m_rdCost.calcRdSADCost(sad, bits);
+    COPY4_IF_LT(bcost, cost, bmode, mode, bsad, sad, bbits, bits);
+
+    bool allangs = true;
+    if (primitives.cu[sizeIdx].intra_pred_allangs)
+    {
+        primitives.cu[sizeIdx].transpose(m_fencTransposed, fenc, scaleStride);
+        primitives.cu[sizeIdx].intra_pred_allangs(m_intraPredAngs, intraNeighbourBuf[0], intraNeighbourBuf[1], (scaleTuSize <= 16)); 
+    }
+    else
+        allangs = false;
+
+#define TRY_ANGLE(angle) \
+    if (allangs) { \
+        if (angle < 18) \
+            sad = sa8d(m_fencTransposed, scaleTuSize, &m_intraPredAngs[(angle - 2) * predsize], scaleTuSize) << costShift; \
+        else \
+            sad = sa8d(fenc, scaleStride, &m_intraPredAngs[(angle - 2) * predsize], scaleTuSize) << costShift; \
+        bits = (mpms & ((uint64_t)1 << angle)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, angle) : rbits; \
+        cost = m_rdCost.calcRdSADCost(sad, bits); \
+    } else { \
+        int filter = !!(g_intraFilterFlags[angle] & scaleTuSize); \
+        primitives.cu[sizeIdx].intra_pred[angle](m_intraPredAngs, scaleTuSize, intraNeighbourBuf[filter], angle, scaleTuSize <= 16); \
+        sad = sa8d(fenc, scaleStride, m_intraPredAngs, scaleTuSize) << costShift; \
+        bits = (mpms & ((uint64_t)1 << angle)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, angle) : rbits; \
+        cost = m_rdCost.calcRdSADCost(sad, bits); \
+    }
+
+    if (m_param->bEnableFastIntra)
+    {
+        int asad = 0;
+        uint32_t lowmode, highmode, amode = 5, abits = 0;
+        uint64_t acost = MAX_INT64;
+
+        /* pick the best angle, sampling at distance of 5 */
+        for (mode = 5; mode < 35; mode += 5)
+        {
+            TRY_ANGLE(mode);
+            COPY4_IF_LT(acost, cost, amode, mode, asad, sad, abits, bits);
+        }
+
+        /* refine best angle at distance 2, then distance 1 */
+        for (uint32_t dist = 2; dist >= 1; dist--)
+        {
+            lowmode = amode - dist;
+            highmode = amode + dist;
+
+            X265_CHECK(lowmode >= 2 && lowmode <= 34, "low intra mode out of range\n");
+            TRY_ANGLE(lowmode);
+            COPY4_IF_LT(acost, cost, amode, lowmode, asad, sad, abits, bits);
+
+            X265_CHECK(highmode >= 2 && highmode <= 34, "high intra mode out of range\n");
+            TRY_ANGLE(highmode);
+            COPY4_IF_LT(acost, cost, amode, highmode, asad, sad, abits, bits);
+        }
+
+        if (amode == 33)
+        {
+            TRY_ANGLE(34);
+            COPY4_IF_LT(acost, cost, amode, 34, asad, sad, abits, bits);
+        }
+
+        COPY4_IF_LT(bcost, acost, bmode, amode, bsad, asad, bbits, abits);
+    }
+    else // calculate and search all intra prediction angles for lowest cost
+    {
+        for (mode = 2; mode < 35; mode++)
+        {
+            TRY_ANGLE(mode);
+            COPY4_IF_LT(bcost, cost, bmode, mode, bsad, sad, bbits, bits);
+        }
+    }
+
+    cu.setLumaIntraDirSubParts((uint8_t)bmode, absPartIdx, depth + initTuDepth);
+    intraMode.initCosts();
+    intraMode.totalBits = bbits;
+    intraMode.distortion = bsad;
+    intraMode.sa8dCost = bcost;
+    intraMode.sa8dBits = bbits;
+    X265_CHECK(intraMode.ok(), "intra mode is not ok");
+}
+
+void Search::encodeIntraInInter(Mode& intraMode, const CUGeom& cuGeom)
+{
+    ProfileCUScope(intraMode.cu, intraRDOElapsedTime[cuGeom.depth], countIntraRDO[cuGeom.depth]);
+
+    CUData& cu = intraMode.cu;
+    Yuv* reconYuv = &intraMode.reconYuv;
+
+    X265_CHECK(cu.m_partSize[0] == SIZE_2Nx2N, "encodeIntraInInter does not expect NxN intra\n");
+    X265_CHECK(!m_slice->isIntra(), "encodeIntraInInter does not expect to be used in I slices\n");
+
+    uint32_t tuDepthRange[2];
+    cu.getIntraTUQtDepthRange(tuDepthRange, 0);
+
+    m_entropyCoder.load(m_rqt[cuGeom.depth].cur);
+
+    Cost icosts;
+    codeIntraLumaQT(intraMode, cuGeom, 0, 0, false, icosts, tuDepthRange);
+    extractIntraResultQT(cu, *reconYuv, 0, 0);
+
+    intraMode.lumaDistortion = icosts.distortion;
+    intraMode.chromaDistortion = estIntraPredChromaQT(intraMode, cuGeom, NULL);
+    intraMode.distortion = intraMode.lumaDistortion + intraMode.chromaDistortion;
+
+    m_entropyCoder.resetBits();
+    if (m_slice->m_pps->bTransquantBypassEnabled)
+        m_entropyCoder.codeCUTransquantBypassFlag(cu.m_tqBypass[0]);
+    m_entropyCoder.codeSkipFlag(cu, 0);
+    m_entropyCoder.codePredMode(cu.m_predMode[0]);
+    m_entropyCoder.codePartSize(cu, 0, cuGeom.depth);
+    m_entropyCoder.codePredInfo(cu, 0);
+    intraMode.mvBits += m_entropyCoder.getNumberOfWrittenBits();
+
+    bool bCodeDQP = m_slice->m_pps->bUseDQP;
+    m_entropyCoder.codeCoeff(cu, 0, bCodeDQP, tuDepthRange);
+
+    intraMode.totalBits = m_entropyCoder.getNumberOfWrittenBits();
+    intraMode.coeffBits = intraMode.totalBits - intraMode.mvBits;
+    if (m_rdCost.m_psyRd)
+    {
+        const Yuv* fencYuv = intraMode.fencYuv;
+        intraMode.psyEnergy = m_rdCost.psyCost(cuGeom.log2CUSize - 2, fencYuv->m_buf[0], fencYuv->m_size, reconYuv->m_buf[0], reconYuv->m_size);
+    }
+    intraMode.resEnergy = primitives.cu[cuGeom.log2CUSize - 2].sse_pp(intraMode.fencYuv->m_buf[0], intraMode.fencYuv->m_size, intraMode.predYuv.m_buf[0], intraMode.predYuv.m_size);
+    m_entropyCoder.store(intraMode.contexts);
+    updateModeCost(intraMode);
+    checkDQP(intraMode, cuGeom);
+}
+
+uint32_t Search::estIntraPredQT(Mode &intraMode, const CUGeom& cuGeom, const uint32_t depthRange[2], uint8_t* sharedModes)
+{
+    CUData& cu = intraMode.cu;
+    Yuv* reconYuv = &intraMode.reconYuv;
+    Yuv* predYuv = &intraMode.predYuv;
+    const Yuv* fencYuv = intraMode.fencYuv;
+
+    uint32_t depth        = cuGeom.depth;
+    uint32_t initTuDepth  = cu.m_partSize[0] != SIZE_2Nx2N;
+    uint32_t numPU        = 1 << (2 * initTuDepth);
+    uint32_t log2TrSize   = cuGeom.log2CUSize - initTuDepth;
+    uint32_t tuSize       = 1 << log2TrSize;
+    uint32_t qNumParts    = cuGeom.numPartitions >> 2;
+    uint32_t sizeIdx      = log2TrSize - 2;
+    uint32_t absPartIdx   = 0;
+    uint32_t totalDistortion = 0;
+
+    int checkTransformSkip = m_slice->m_pps->bTransformSkipEnabled && !cu.m_tqBypass[0] && cu.m_partSize[0] != SIZE_2Nx2N;
+
+    // loop over partitions
+    for (uint32_t puIdx = 0; puIdx < numPU; puIdx++, absPartIdx += qNumParts)
+    {
+        uint32_t bmode = 0;
+
+        if (sharedModes)
+            bmode = sharedModes[puIdx];
+        else
+        {
+            uint64_t candCostList[MAX_RD_INTRA_MODES];
+            uint32_t rdModeList[MAX_RD_INTRA_MODES];
+            uint64_t bcost;
+            int maxCandCount = 2 + m_param->rdLevel + ((depth + initTuDepth) >> 1);
+
+            {
+                ProfileCUScope(intraMode.cu, intraAnalysisElapsedTime, countIntraAnalysis);
+
+                // Reference sample smoothing
+                IntraNeighbors intraNeighbors;
+                initIntraNeighbors(cu, absPartIdx, initTuDepth, true, &intraNeighbors);
+                initAdiPattern(cu, cuGeom, absPartIdx, intraNeighbors, ALL_IDX);
+
+                // determine set of modes to be tested (using prediction signal only)
+                const pixel* fenc = fencYuv->getLumaAddr(absPartIdx);
+                uint32_t stride = predYuv->m_size;
+
+                int scaleTuSize = tuSize;
+                int scaleStride = stride;
+                int costShift = 0;
+
+                if (tuSize > 32)
+                {
+                    // origin is 64x64, we scale to 32x32 and setup required parameters
+                    primitives.scale2D_64to32(m_fencScaled, fenc, stride);
+                    fenc = m_fencScaled;
+
+                    pixel nScale[129];
+                    intraNeighbourBuf[1][0] = intraNeighbourBuf[0][0];
+                    primitives.scale1D_128to64(nScale + 1, intraNeighbourBuf[0] + 1);
+
+                    memcpy(&intraNeighbourBuf[0][1], &nScale[1], 2 * 64 * sizeof(pixel));
+                    memcpy(&intraNeighbourBuf[1][1], &nScale[1], 2 * 64 * sizeof(pixel));
+
+                    scaleTuSize = 32;
+                    scaleStride = 32;
+                    costShift = 2;
+                    sizeIdx = 5 - 2; // log2(scaleTuSize) - 2
+                }
+
+                m_entropyCoder.loadIntraDirModeLuma(m_rqt[depth].cur);
+
+                /* there are three cost tiers for intra modes:
+                *  pred[0]          - mode probable, least cost
+                *  pred[1], pred[2] - less probable, slightly more cost
+                *  non-mpm modes    - all cost the same (rbits) */
+                uint64_t mpms;
+                uint32_t mpmModes[3];
+                uint32_t rbits = getIntraRemModeBits(cu, absPartIdx, mpmModes, mpms);
+
+                pixelcmp_t sa8d = primitives.cu[sizeIdx].sa8d;
+                uint64_t modeCosts[35];
+
+                // DC
+                primitives.cu[sizeIdx].intra_pred[DC_IDX](m_intraPred, scaleStride, intraNeighbourBuf[0], 0, (scaleTuSize <= 16));
+                uint32_t bits = (mpms & ((uint64_t)1 << DC_IDX)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, DC_IDX) : rbits;
+                uint32_t sad = sa8d(fenc, scaleStride, m_intraPred, scaleStride) << costShift;
+                modeCosts[DC_IDX] = bcost = m_rdCost.calcRdSADCost(sad, bits);
+
+                // PLANAR
+                pixel* planar = intraNeighbourBuf[0];
+                if (tuSize >= 8 && tuSize <= 32)
+                    planar = intraNeighbourBuf[1];
+
+                primitives.cu[sizeIdx].intra_pred[PLANAR_IDX](m_intraPred, scaleStride, planar, 0, 0);
+                bits = (mpms & ((uint64_t)1 << PLANAR_IDX)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, PLANAR_IDX) : rbits;
+                sad = sa8d(fenc, scaleStride, m_intraPred, scaleStride) << costShift;
+                modeCosts[PLANAR_IDX] = m_rdCost.calcRdSADCost(sad, bits);
+                COPY1_IF_LT(bcost, modeCosts[PLANAR_IDX]);
+
+                // angular predictions
+                if (primitives.cu[sizeIdx].intra_pred_allangs)
+                {
+                    primitives.cu[sizeIdx].transpose(m_fencTransposed, fenc, scaleStride);
+                    primitives.cu[sizeIdx].intra_pred_allangs(m_intraPredAngs, intraNeighbourBuf[0], intraNeighbourBuf[1], (scaleTuSize <= 16));
+                    for (int mode = 2; mode < 35; mode++)
+                    {
+                        bits = (mpms & ((uint64_t)1 << mode)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, mode) : rbits;
+                        if (mode < 18)
+                            sad = sa8d(m_fencTransposed, scaleTuSize, &m_intraPredAngs[(mode - 2) * (scaleTuSize * scaleTuSize)], scaleTuSize) << costShift;
+                        else
+                            sad = sa8d(fenc, scaleStride, &m_intraPredAngs[(mode - 2) * (scaleTuSize * scaleTuSize)], scaleTuSize) << costShift;
+                        modeCosts[mode] = m_rdCost.calcRdSADCost(sad, bits);
+                        COPY1_IF_LT(bcost, modeCosts[mode]);
+                    }
+                }
+                else
+                {
+                    for (int mode = 2; mode < 35; mode++)
+                    {
+                        bits = (mpms & ((uint64_t)1 << mode)) ? m_entropyCoder.bitsIntraModeMPM(mpmModes, mode) : rbits;
+                        int filter = !!(g_intraFilterFlags[mode] & scaleTuSize);
+                        primitives.cu[sizeIdx].intra_pred[mode](m_intraPred, scaleTuSize, intraNeighbourBuf[filter], mode, scaleTuSize <= 16);
+                        sad = sa8d(fenc, scaleStride, m_intraPred, scaleTuSize) << costShift;
+                        modeCosts[mode] = m_rdCost.calcRdSADCost(sad, bits);
+                        COPY1_IF_LT(bcost, modeCosts[mode]);
+                    }
+                }
+
+                /* Find the top maxCandCount candidate modes with cost within 25% of best
+                * or among the most probable modes. maxCandCount is derived from the
+                * rdLevel and depth. In general we want to try more modes at slower RD
+                * levels and at higher depths */
+                for (int i = 0; i < maxCandCount; i++)
+                    candCostList[i] = MAX_INT64;
+
+                uint64_t paddedBcost = bcost + (bcost >> 3); // 1.12%
+                for (int mode = 0; mode < 35; mode++)
+                    if (modeCosts[mode] < paddedBcost || (mpms & ((uint64_t)1 << mode)))
+                        updateCandList(mode, modeCosts[mode], maxCandCount, rdModeList, candCostList);
+            }
+
+            /* measure best candidates using simple RDO (no TU splits) */
+            bcost = MAX_INT64;
+            for (int i = 0; i < maxCandCount; i++)
+            {
+                if (candCostList[i] == MAX_INT64)
+                    break;
+
+                ProfileCUScope(intraMode.cu, intraRDOElapsedTime[cuGeom.depth], countIntraRDO[cuGeom.depth]);
+
+                m_entropyCoder.load(m_rqt[depth].cur);
+                cu.setLumaIntraDirSubParts(rdModeList[i], absPartIdx, depth + initTuDepth);
+
+                Cost icosts;
+                if (checkTransformSkip)
+                    codeIntraLumaTSkip(intraMode, cuGeom, initTuDepth, absPartIdx, icosts);
+                else
+                    codeIntraLumaQT(intraMode, cuGeom, initTuDepth, absPartIdx, false, icosts, depthRange);
+                COPY2_IF_LT(bcost, icosts.rdcost, bmode, rdModeList[i]);
+            }
+        }
+
+        ProfileCUScope(intraMode.cu, intraRDOElapsedTime[cuGeom.depth], countIntraRDO[cuGeom.depth]);
+
+        /* remeasure best mode, allowing TU splits */
+        cu.setLumaIntraDirSubParts(bmode, absPartIdx, depth + initTuDepth);
+        m_entropyCoder.load(m_rqt[depth].cur);
+
+        Cost icosts;
+        if (checkTransformSkip)
+            codeIntraLumaTSkip(intraMode, cuGeom, initTuDepth, absPartIdx, icosts);
+        else
+            codeIntraLumaQT(intraMode, cuGeom, initTuDepth, absPartIdx, true, icosts, depthRange);
+        totalDistortion += icosts.distortion;
+
+        extractIntraResultQT(cu, *reconYuv, initTuDepth, absPartIdx);
+
+        // set reconstruction for next intra prediction blocks
+        if (puIdx != numPU - 1)
+        {
+            /* This has important implications for parallelism and RDO.  It is writing intermediate results into the
+             * output recon picture, so it cannot proceed in parallel with anything else when doing INTRA_NXN. Also
+             * it is not updating m_rdContexts[depth].cur for the later PUs which I suspect is slightly wrong. I think
+             * that the contexts should be tracked through each PU */
+            PicYuv*  reconPic = m_frame->m_reconPic;
+            pixel*   dst       = reconPic->getLumaAddr(cu.m_cuAddr, cuGeom.absPartIdx + absPartIdx);
+            uint32_t dststride = reconPic->m_stride;
+            const pixel*   src = reconYuv->getLumaAddr(absPartIdx);
+            uint32_t srcstride = reconYuv->m_size;
+            primitives.cu[log2TrSize - 2].copy_pp(dst, dststride, src, srcstride);
+        }
+    }
+
+    if (numPU > 1)
+    {
+        uint32_t combCbfY = 0;
+        for (uint32_t qIdx = 0, qPartIdx = 0; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+            combCbfY |= cu.getCbf(qPartIdx, TEXT_LUMA, 1);
+
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+            cu.m_cbf[0][offs] |= combCbfY;
+    }
+
+    // TODO: remove this
+    m_entropyCoder.load(m_rqt[depth].cur);
+
+    return totalDistortion;
+}
+
+void Search::getBestIntraModeChroma(Mode& intraMode, const CUGeom& cuGeom)
+{
+    CUData& cu = intraMode.cu;
+    const Yuv* fencYuv = intraMode.fencYuv;
+    Yuv* predYuv = &intraMode.predYuv;
+
+    uint32_t bestMode  = 0;
+    uint64_t bestCost  = MAX_INT64;
+    uint32_t modeList[NUM_CHROMA_MODE];
+
+    uint32_t log2TrSizeC = cu.m_log2CUSize[0] - m_hChromaShift;
+    uint32_t tuSize = 1 << log2TrSizeC;
+    uint32_t tuDepth = 0;
+    int32_t costShift = 0;
+
+    if (tuSize > 32)
+    {
+        tuDepth = 1;
+        costShift = 2;
+        log2TrSizeC = 5;
+    }
+
+    IntraNeighbors intraNeighbors;
+    initIntraNeighbors(cu, 0, tuDepth, false, &intraNeighbors);
+    cu.getAllowedChromaDir(0, modeList);
+
+    // check chroma modes
+    for (uint32_t mode = 0; mode < NUM_CHROMA_MODE; mode++)
+    {
+        uint32_t chromaPredMode = modeList[mode];
+        if (chromaPredMode == DM_CHROMA_IDX)
+            chromaPredMode = cu.m_lumaIntraDir[0];
+        if (m_csp == X265_CSP_I422)
+            chromaPredMode = g_chroma422IntraAngleMappingTable[chromaPredMode];
+
+        uint64_t cost = 0;
+        for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+        {
+            const pixel* fenc = fencYuv->m_buf[chromaId];
+            pixel* pred = predYuv->m_buf[chromaId];
+            Predict::initAdiPatternChroma(cu, cuGeom, 0, intraNeighbors, chromaId);
+            // get prediction signal
+            predIntraChromaAng(chromaPredMode, pred, fencYuv->m_csize, log2TrSizeC);
+            cost += primitives.cu[log2TrSizeC - 2].sa8d(fenc, predYuv->m_csize, pred, fencYuv->m_csize) << costShift;
+        }
+
+        if (cost < bestCost)
+        {
+            bestCost = cost;
+            bestMode = modeList[mode];
+        }
+    }
+
+    cu.setChromIntraDirSubParts(bestMode, 0, cuGeom.depth);
+}
+
+uint32_t Search::estIntraPredChromaQT(Mode &intraMode, const CUGeom& cuGeom, uint8_t* sharedChromaModes)
+{
+    CUData& cu = intraMode.cu;
+    Yuv& reconYuv = intraMode.reconYuv;
+
+    uint32_t depth       = cuGeom.depth;
+    uint32_t initTuDepth = cu.m_partSize[0] != SIZE_2Nx2N && m_csp == X265_CSP_I444;
+    uint32_t log2TrSize  = cuGeom.log2CUSize - initTuDepth;
+    uint32_t absPartStep = cuGeom.numPartitions;
+    uint32_t totalDistortion = 0;
+
+    int size = partitionFromLog2Size(log2TrSize);
+
+    TURecurse tuIterator((initTuDepth == 0) ? DONT_SPLIT : QUAD_SPLIT, absPartStep, 0);
+
+    do
+    {
+        uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+
+        uint32_t bestMode = 0;
+        uint32_t bestDist = 0;
+        uint64_t bestCost = MAX_INT64;
+
+        // init mode list
+        uint32_t minMode = 0;
+        uint32_t maxMode = NUM_CHROMA_MODE;
+        uint32_t modeList[NUM_CHROMA_MODE];
+
+        if (sharedChromaModes && !initTuDepth)
+        {
+            for (uint32_t l = 0; l < NUM_CHROMA_MODE; l++)
+                modeList[l] = sharedChromaModes[0];
+            maxMode = 1;
+        }
+        else
+            cu.getAllowedChromaDir(absPartIdxC, modeList);
+
+        // check chroma modes
+        for (uint32_t mode = minMode; mode < maxMode; mode++)
+        {
+            // restore context models
+            m_entropyCoder.load(m_rqt[depth].cur);
+
+            cu.setChromIntraDirSubParts(modeList[mode], absPartIdxC, depth + initTuDepth);
+            uint32_t psyEnergy = 0;
+            uint32_t dist = codeIntraChromaQt(intraMode, cuGeom, initTuDepth, absPartIdxC, psyEnergy);
+
+            if (m_slice->m_pps->bTransformSkipEnabled)
+                m_entropyCoder.load(m_rqt[depth].cur);
+
+            m_entropyCoder.resetBits();
+            // chroma prediction mode
+            if (cu.m_partSize[0] == SIZE_2Nx2N || m_csp != X265_CSP_I444)
+            {
+                if (!absPartIdxC)
+                    m_entropyCoder.codeIntraDirChroma(cu, absPartIdxC, modeList);
+            }
+            else
+            {
+                uint32_t qNumParts = cuGeom.numPartitions >> 2;
+                if (!(absPartIdxC & (qNumParts - 1)))
+                    m_entropyCoder.codeIntraDirChroma(cu, absPartIdxC, modeList);
+            }
+
+            codeSubdivCbfQTChroma(cu, initTuDepth, absPartIdxC);
+            codeCoeffQTChroma(cu, initTuDepth, absPartIdxC, TEXT_CHROMA_U);
+            codeCoeffQTChroma(cu, initTuDepth, absPartIdxC, TEXT_CHROMA_V);
+            uint32_t bits = m_entropyCoder.getNumberOfWrittenBits();
+            uint64_t cost = m_rdCost.m_psyRd ? m_rdCost.calcPsyRdCost(dist, bits, psyEnergy) : m_rdCost.calcRdCost(dist, bits);
+
+            if (cost < bestCost)
+            {
+                bestCost = cost;
+                bestDist = dist;
+                bestMode = modeList[mode];
+                extractIntraResultChromaQT(cu, reconYuv, absPartIdxC, initTuDepth);
+                memcpy(m_qtTempCbf[1], cu.m_cbf[1] + absPartIdxC, tuIterator.absPartIdxStep * sizeof(uint8_t));
+                memcpy(m_qtTempCbf[2], cu.m_cbf[2] + absPartIdxC, tuIterator.absPartIdxStep * sizeof(uint8_t));
+                memcpy(m_qtTempTransformSkipFlag[1], cu.m_transformSkip[1] + absPartIdxC, tuIterator.absPartIdxStep * sizeof(uint8_t));
+                memcpy(m_qtTempTransformSkipFlag[2], cu.m_transformSkip[2] + absPartIdxC, tuIterator.absPartIdxStep * sizeof(uint8_t));
+            }
+        }
+
+        if (!tuIterator.isLastSection())
+        {
+            uint32_t zorder    = cuGeom.absPartIdx + absPartIdxC;
+            PicYuv*  reconPic  = m_frame->m_reconPic;
+            uint32_t dststride = reconPic->m_strideC;
+            const pixel* src;
+            pixel* dst;
+
+            dst = reconPic->getCbAddr(cu.m_cuAddr, zorder);
+            src = reconYuv.getCbAddr(absPartIdxC);
+            primitives.chroma[m_csp].cu[size].copy_pp(dst, dststride, src, reconYuv.m_csize);
+
+            dst = reconPic->getCrAddr(cu.m_cuAddr, zorder);
+            src = reconYuv.getCrAddr(absPartIdxC);
+            primitives.chroma[m_csp].cu[size].copy_pp(dst, dststride, src, reconYuv.m_csize);
+        }
+
+        memcpy(cu.m_cbf[1] + absPartIdxC, m_qtTempCbf[1], tuIterator.absPartIdxStep * sizeof(uint8_t));
+        memcpy(cu.m_cbf[2] + absPartIdxC, m_qtTempCbf[2], tuIterator.absPartIdxStep * sizeof(uint8_t));
+        memcpy(cu.m_transformSkip[1] + absPartIdxC, m_qtTempTransformSkipFlag[1], tuIterator.absPartIdxStep * sizeof(uint8_t));
+        memcpy(cu.m_transformSkip[2] + absPartIdxC, m_qtTempTransformSkipFlag[2], tuIterator.absPartIdxStep * sizeof(uint8_t));
+        cu.setChromIntraDirSubParts(bestMode, absPartIdxC, depth + initTuDepth);
+        totalDistortion += bestDist;
+    }
+    while (tuIterator.isNextSection());
+
+    if (initTuDepth != 0)
+    {
+        uint32_t combCbfU = 0;
+        uint32_t combCbfV = 0;
+        uint32_t qNumParts = tuIterator.absPartIdxStep;
+        for (uint32_t qIdx = 0, qPartIdx = 0; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            combCbfU |= cu.getCbf(qPartIdx, TEXT_CHROMA_U, 1);
+            combCbfV |= cu.getCbf(qPartIdx, TEXT_CHROMA_V, 1);
+        }
+
+        for (uint32_t offs = 0; offs < 4 * qNumParts; offs++)
+        {
+            cu.m_cbf[1][offs] |= combCbfU;
+            cu.m_cbf[2][offs] |= combCbfV;
+        }
+    }
+
+    /* TODO: remove this */
+    m_entropyCoder.load(m_rqt[depth].cur);
+    return totalDistortion;
+}
+
+/* estimation of best merge coding of an inter PU (2Nx2N merge PUs are evaluated as their own mode) */
+uint32_t Search::mergeEstimation(CUData& cu, const CUGeom& cuGeom, const PredictionUnit& pu, int puIdx, MergeData& m)
+{
+    X265_CHECK(cu.m_partSize[0] != SIZE_2Nx2N, "mergeEstimation() called for 2Nx2N\n");
+
+    MVField  candMvField[MRG_MAX_NUM_CANDS][2];
+    uint8_t  candDir[MRG_MAX_NUM_CANDS];
+    uint32_t numMergeCand = cu.getInterMergeCandidates(pu.puAbsPartIdx, puIdx, candMvField, candDir);
+
+    if (cu.isBipredRestriction())
+    {
+        /* do not allow bidir merge candidates if PU is smaller than 8x8, drop L1 reference */
+        for (uint32_t mergeCand = 0; mergeCand < numMergeCand; ++mergeCand)
+        {
+            if (candDir[mergeCand] == 3)
+            {
+                candDir[mergeCand] = 1;
+                candMvField[mergeCand][1].refIdx = REF_NOT_VALID;
+            }
+        }
+    }
+
+    Yuv& tempYuv = m_rqt[cuGeom.depth].tmpPredYuv;
+
+    uint32_t outCost = MAX_UINT;
+    for (uint32_t mergeCand = 0; mergeCand < numMergeCand; ++mergeCand)
+    {
+        /* Prevent TMVP candidates from using unavailable reference pixels */
+        if (m_bFrameParallel &&
+            (candMvField[mergeCand][0].mv.y >= (m_param->searchRange + 1) * 4 ||
+             candMvField[mergeCand][1].mv.y >= (m_param->searchRange + 1) * 4))
+            continue;
+
+        cu.m_mv[0][pu.puAbsPartIdx] = candMvField[mergeCand][0].mv;
+        cu.m_refIdx[0][pu.puAbsPartIdx] = (int8_t)candMvField[mergeCand][0].refIdx;
+        cu.m_mv[1][pu.puAbsPartIdx] = candMvField[mergeCand][1].mv;
+        cu.m_refIdx[1][pu.puAbsPartIdx] = (int8_t)candMvField[mergeCand][1].refIdx;
+
+        motionCompensation(cu, pu, tempYuv, true, m_me.bChromaSATD);
+
+        uint32_t costCand = m_me.bufSATD(tempYuv.getLumaAddr(pu.puAbsPartIdx), tempYuv.m_size);
+        if (m_me.bChromaSATD)
+            costCand += m_me.bufChromaSATD(tempYuv, pu.puAbsPartIdx);
+
+        uint32_t bitsCand = getTUBits(mergeCand, numMergeCand);
+        costCand = costCand + m_rdCost.getCost(bitsCand);
+        if (costCand < outCost)
+        {
+            outCost = costCand;
+            m.bits = bitsCand;
+            m.index = mergeCand;
+        }
+    }
+
+    m.mvField[0] = candMvField[m.index][0];
+    m.mvField[1] = candMvField[m.index][1];
+    m.dir = candDir[m.index];
+
+    return outCost;
+}
+
+/* find the lowres motion vector from lookahead in middle of current PU */
+MV Search::getLowresMV(const CUData& cu, const PredictionUnit& pu, int list, int ref)
+{
+    int diffPoc = abs(m_slice->m_poc - m_slice->m_refPOCList[list][ref]);
+    if (diffPoc > m_param->bframes + 1)
+        /* poc difference is out of range for lookahead */
+        return 0;
+
+    MV* mvs = m_frame->m_lowres.lowresMvs[list][diffPoc - 1];
+    if (mvs[0].x == 0x7FFF)
+        /* this motion search was not estimated by lookahead */
+        return 0;
+
+    uint32_t block_x = (cu.m_cuPelX + g_zscanToPelX[pu.puAbsPartIdx] + pu.width / 2) >> 4;
+    uint32_t block_y = (cu.m_cuPelY + g_zscanToPelY[pu.puAbsPartIdx] + pu.height / 2) >> 4;
+    uint32_t idx = block_y * m_frame->m_lowres.maxBlocksInRow + block_x;
+
+    X265_CHECK(block_x < m_frame->m_lowres.maxBlocksInRow, "block_x is too high\n");
+    X265_CHECK(block_y < m_frame->m_lowres.maxBlocksInCol, "block_y is too high\n");
+
+    return mvs[idx] << 1; /* scale up lowres mv */
+}
+
+/* Pick between the two AMVP candidates which is the best one to use as
+ * MVP for the motion search, based on SAD cost */
+int Search::selectMVP(const CUData& cu, const PredictionUnit& pu, const MV amvp[AMVP_NUM_CANDS], int list, int ref)
+{
+    if (amvp[0] == amvp[1])
+        return 0;
+
+    Yuv& tmpPredYuv = m_rqt[cu.m_cuDepth[0]].tmpPredYuv;
+    uint32_t costs[AMVP_NUM_CANDS];
+
+    for (int i = 0; i < AMVP_NUM_CANDS; i++)
+    {
+        MV mvCand = amvp[i];
+
+        // NOTE: skip mvCand if Y is > merange and -FN>1
+        if (m_bFrameParallel && (mvCand.y >= (m_param->searchRange + 1) * 4))
+            costs[i] = m_me.COST_MAX;
+        else
+        {
+            cu.clipMv(mvCand);
+            predInterLumaPixel(pu, tmpPredYuv, *m_slice->m_refReconPicList[list][ref], mvCand);
+            costs[i] = m_me.bufSAD(tmpPredYuv.getLumaAddr(pu.puAbsPartIdx), tmpPredYuv.m_size);
+        }
+    }
+
+    return costs[0] <= costs[1] ? 0 : 1;
+}
+
+void Search::PME::processTasks(int workerThreadId)
+{
+#if DETAILED_CU_STATS
+    int fe = mode.cu.m_encData->m_frameEncoderID;
+    master.m_stats[fe].countPMETasks++;
+    ScopedElapsedTime pmeTime(master.m_stats[fe].pmeTime);
+#endif
+    ProfileScopeEvent(pme);
+    master.processPME(*this, master.m_tld[workerThreadId].analysis);
+}
+
+void Search::processPME(PME& pme, Search& slave)
+{
+    /* acquire a motion estimation job, else exit early */
+    int meId;
+    pme.m_lock.acquire();
+    if (pme.m_jobTotal > pme.m_jobAcquired)
+    {
+        meId = pme.m_jobAcquired++;
+        pme.m_lock.release();
+    }
+    else
+    {
+        pme.m_lock.release();
+        return;
+    }
+
+    /* Setup slave Search instance for ME for master's CU */
+    if (&slave != this)
+    {
+        slave.m_slice = m_slice;
+        slave.m_frame = m_frame;
+        slave.m_param = m_param;
+        slave.setLambdaFromQP(pme.mode.cu, m_rdCost.m_qp);
+        slave.m_me.setSourcePU(*pme.mode.fencYuv, pme.pu.ctuAddr, pme.pu.cuAbsPartIdx, pme.pu.puAbsPartIdx, pme.pu.width, pme.pu.height);
+    }
+
+    /* Perform ME, repeat until no more work is available */
+    do
+    {
+        if (meId < pme.m_jobs.refCnt[0])
+        {
+            int refIdx = pme.m_jobs.ref[0][meId]; //L0
+            slave.singleMotionEstimation(*this, pme.mode, pme.pu, pme.puIdx, 0, refIdx);
+        }
+        else
+        {
+            int refIdx = pme.m_jobs.ref[1][meId - pme.m_jobs.refCnt[0]]; //L1
+            slave.singleMotionEstimation(*this, pme.mode, pme.pu, pme.puIdx, 1, refIdx);
+        }
+
+        meId = -1;
+        pme.m_lock.acquire();
+        if (pme.m_jobTotal > pme.m_jobAcquired)
+            meId = pme.m_jobAcquired++;
+        pme.m_lock.release();
+    }
+    while (meId >= 0);
+}
+
+void Search::singleMotionEstimation(Search& master, Mode& interMode, const PredictionUnit& pu, int part, int list, int ref)
+{
+    uint32_t bits = master.m_listSelBits[list] + MVP_IDX_BITS;
+    bits += getTUBits(ref, m_slice->m_numRefIdx[list]);
+
+    MotionData* bestME = interMode.bestME[part];
+
+    // 12 mv candidates including lowresMV
+    MV  mvc[(MD_ABOVE_LEFT + 1) * 2 + 2];
+    int numMvc = interMode.cu.getPMV(interMode.interNeighbours, list, ref, interMode.amvpCand[list][ref], mvc);
+
+    const MV* amvp = interMode.amvpCand[list][ref];
+    int mvpIdx = selectMVP(interMode.cu, pu, amvp, list, ref);
+    MV mvmin, mvmax, outmv, mvp = amvp[mvpIdx];
+
+    MV lmv = getLowresMV(interMode.cu, pu, list, ref);
+    if (lmv.notZero())
+        mvc[numMvc++] = lmv;
+
+    setSearchRange(interMode.cu, mvp, m_param->searchRange, mvmin, mvmax);
+
+    int satdCost = m_me.motionEstimate(&m_slice->m_mref[list][ref], mvmin, mvmax, mvp, numMvc, mvc, m_param->searchRange, outmv);
+
+    /* Get total cost of partition, but only include MV bit cost once */
+    bits += m_me.bitcost(outmv);
+    uint32_t cost = (satdCost - m_me.mvcost(outmv)) + m_rdCost.getCost(bits);
+
+    /* Refine MVP selection, updates: mvpIdx, bits, cost */
+    mvp = checkBestMVP(amvp, outmv, mvpIdx, bits, cost);
+
+    /* tie goes to the smallest ref ID, just like --no-pme */
+    ScopedLock _lock(master.m_meLock);
+    if (cost < bestME[list].cost ||
+       (cost == bestME[list].cost && ref < bestME[list].ref))
+    {
+        bestME[list].mv = outmv;
+        bestME[list].mvp = mvp;
+        bestME[list].mvpIdx = mvpIdx;
+        bestME[list].ref = ref;
+        bestME[list].cost = cost;
+        bestME[list].bits = bits;
+    }
+}
+
+/* find the best inter prediction for each PU of specified mode */
+void Search::predInterSearch(Mode& interMode, const CUGeom& cuGeom, bool bChromaMC, uint32_t refMasks[2])
+{
+    ProfileCUScope(interMode.cu, motionEstimationElapsedTime, countMotionEstimate);
+
+    CUData& cu = interMode.cu;
+    Yuv* predYuv = &interMode.predYuv;
+
+    // 12 mv candidates including lowresMV
+    MV mvc[(MD_ABOVE_LEFT + 1) * 2 + 2];
+
+    const Slice *slice = m_slice;
+    int numPart     = cu.getNumPartInter(0);
+    int numPredDir  = slice->isInterP() ? 1 : 2;
+    const int* numRefIdx = slice->m_numRefIdx;
+    uint32_t lastMode = 0;
+    int      totalmebits = 0;
+    MV       mvzero(0, 0);
+    Yuv&     tmpPredYuv = m_rqt[cuGeom.depth].tmpPredYuv;
+
+    MergeData merge;
+    memset(&merge, 0, sizeof(merge));
+
+    for (int puIdx = 0; puIdx < numPart; puIdx++)
+    {
+        MotionData* bestME = interMode.bestME[puIdx];
+        PredictionUnit pu(cu, cuGeom, puIdx);
+
+        m_me.setSourcePU(*interMode.fencYuv, pu.ctuAddr, pu.cuAbsPartIdx, pu.puAbsPartIdx, pu.width, pu.height);
+
+        /* find best cost merge candidate. note: 2Nx2N merge and bidir are handled as separate modes */
+        uint32_t mrgCost = numPart == 1 ? MAX_UINT : mergeEstimation(cu, cuGeom, pu, puIdx, merge);
+
+        bestME[0].cost = MAX_UINT;
+        bestME[1].cost = MAX_UINT;
+
+        getBlkBits((PartSize)cu.m_partSize[0], slice->isInterP(), puIdx, lastMode, m_listSelBits);
+        bool bDoUnidir = true;
+
+        cu.getNeighbourMV(puIdx, pu.puAbsPartIdx, interMode.interNeighbours);
+
+        /* Uni-directional prediction */
+        if (m_param->analysisMode == X265_ANALYSIS_LOAD && bestME[0].ref >= 0)
+        {
+            for (int list = 0; list < numPredDir; list++)
+            {
+                int ref = bestME[list].ref;
+                uint32_t bits = m_listSelBits[list] + MVP_IDX_BITS;
+                bits += getTUBits(ref, numRefIdx[list]);
+
+                int numMvc = cu.getPMV(interMode.interNeighbours, list, ref, interMode.amvpCand[list][ref], mvc);
+
+                const MV* amvp = interMode.amvpCand[list][ref];
+                int mvpIdx = selectMVP(cu, pu, amvp, list, ref);
+                MV mvmin, mvmax, outmv, mvp = amvp[mvpIdx];
+
+                MV lmv = getLowresMV(cu, pu, list, ref);
+                if (lmv.notZero())
+                    mvc[numMvc++] = lmv;
+
+                setSearchRange(cu, mvp, m_param->searchRange, mvmin, mvmax);
+                int satdCost = m_me.motionEstimate(&slice->m_mref[list][ref], mvmin, mvmax, mvp, numMvc, mvc, m_param->searchRange, outmv);
+
+                /* Get total cost of partition, but only include MV bit cost once */
+                bits += m_me.bitcost(outmv);
+                uint32_t cost = (satdCost - m_me.mvcost(outmv)) + m_rdCost.getCost(bits);
+
+                /* Refine MVP selection, updates: mvpIdx, bits, cost */
+                mvp = checkBestMVP(amvp, outmv, mvpIdx, bits, cost);
+
+                if (cost < bestME[list].cost)
+                {
+                    bestME[list].mv = outmv;
+                    bestME[list].mvp = mvp;
+                    bestME[list].mvpIdx = mvpIdx;
+                    bestME[list].cost = cost;
+                    bestME[list].bits = bits;
+                }
+            }
+            bDoUnidir = false;
+        }
+        else if (m_param->bDistributeMotionEstimation)
+        {
+            PME pme(*this, interMode, cuGeom, pu, puIdx);
+            pme.m_jobTotal = 0;
+            pme.m_jobAcquired = 1; /* reserve L0-0 or L1-0 */
+
+            uint32_t refMask = refMasks[puIdx] ? refMasks[puIdx] : (uint32_t)-1;
+            for (int list = 0; list < numPredDir; list++)
+            {
+                int idx = 0;
+                for (int ref = 0; ref < numRefIdx[list]; ref++)
+                {
+                    if (!(refMask & (1 << ref)))
+                        continue;
+
+                    pme.m_jobs.ref[list][idx++]  = ref;
+                    pme.m_jobTotal++;
+                }
+                pme.m_jobs.refCnt[list] = idx;
+
+                /* the second list ref bits start at bit 16 */
+                refMask >>= 16;
+            }
+
+            if (pme.m_jobTotal > 2)
+            {
+                pme.tryBondPeers(*m_frame->m_encData->m_jobProvider, pme.m_jobTotal - 1);
+
+                processPME(pme, *this);
+
+                int ref = pme.m_jobs.refCnt[0] ? pme.m_jobs.ref[0][0] : pme.m_jobs.ref[1][0];
+                singleMotionEstimation(*this, interMode, pu, puIdx, 0, ref); /* L0-0 or L1-0 */
+
+                bDoUnidir = false;
+
+                ProfileCUScopeNamed(pmeWaitScope, interMode.cu, pmeBlockTime, countPMEMasters);
+                pme.waitForExit();
+            }
+
+            /* if no peer threads were bonded, fall back to doing unidirectional
+             * searches ourselves without overhead of singleMotionEstimation() */
+        }
+        if (bDoUnidir)
+        {
+            uint32_t refMask = refMasks[puIdx] ? refMasks[puIdx] : (uint32_t)-1;
+
+            for (int list = 0; list < numPredDir; list++)
+            {
+                for (int ref = 0; ref < numRefIdx[list]; ref++)
+                {
+                    ProfileCounter(interMode.cu, totalMotionReferences[cuGeom.depth]);
+
+                    if (!(refMask & (1 << ref)))
+                    {
+                        ProfileCounter(interMode.cu, skippedMotionReferences[cuGeom.depth]);
+                        continue;
+                    }
+
+                    uint32_t bits = m_listSelBits[list] + MVP_IDX_BITS;
+                    bits += getTUBits(ref, numRefIdx[list]);
+
+                    int numMvc = cu.getPMV(interMode.interNeighbours, list, ref, interMode.amvpCand[list][ref], mvc);
+
+                    const MV* amvp = interMode.amvpCand[list][ref];
+                    int mvpIdx = selectMVP(cu, pu, amvp, list, ref);
+                    MV mvmin, mvmax, outmv, mvp = amvp[mvpIdx];
+
+                    MV lmv = getLowresMV(cu, pu, list, ref);
+                    if (lmv.notZero())
+                        mvc[numMvc++] = lmv;
+
+                    setSearchRange(cu, mvp, m_param->searchRange, mvmin, mvmax);
+                    int satdCost = m_me.motionEstimate(&slice->m_mref[list][ref], mvmin, mvmax, mvp, numMvc, mvc, m_param->searchRange, outmv);
+
+                    /* Get total cost of partition, but only include MV bit cost once */
+                    bits += m_me.bitcost(outmv);
+                    uint32_t cost = (satdCost - m_me.mvcost(outmv)) + m_rdCost.getCost(bits);
+
+                    /* Refine MVP selection, updates: mvpIdx, bits, cost */
+                    mvp = checkBestMVP(amvp, outmv, mvpIdx, bits, cost);
+
+                    if (cost < bestME[list].cost)
+                    {
+                        bestME[list].mv = outmv;
+                        bestME[list].mvp = mvp;
+                        bestME[list].mvpIdx = mvpIdx;
+                        bestME[list].ref = ref;
+                        bestME[list].cost = cost;
+                        bestME[list].bits = bits;
+                    }
+                }
+                /* the second list ref bits start at bit 16 */
+                refMask >>= 16;
+            }
+        }
+
+        /* Bi-directional prediction */
+        MotionData bidir[2];
+        uint32_t bidirCost = MAX_UINT;
+        int bidirBits = 0;
+
+        if (slice->isInterB() && !cu.isBipredRestriction() &&  /* biprediction is possible for this PU */
+            cu.m_partSize[pu.puAbsPartIdx] != SIZE_2Nx2N &&    /* 2Nx2N biprediction is handled elsewhere */
+            bestME[0].cost != MAX_UINT && bestME[1].cost != MAX_UINT)
+        {
+            bidir[0] = bestME[0];
+            bidir[1] = bestME[1];
+
+            int satdCost;
+
+            if (m_me.bChromaSATD)
+            {
+                cu.m_mv[0][pu.puAbsPartIdx] = bidir[0].mv;
+                cu.m_refIdx[0][pu.puAbsPartIdx] = (int8_t)bidir[0].ref;
+                cu.m_mv[1][pu.puAbsPartIdx] = bidir[1].mv;
+                cu.m_refIdx[1][pu.puAbsPartIdx] = (int8_t)bidir[1].ref;
+                motionCompensation(cu, pu, tmpPredYuv, true, true);
+
+                satdCost = m_me.bufSATD(tmpPredYuv.getLumaAddr(pu.puAbsPartIdx), tmpPredYuv.m_size) +
+                           m_me.bufChromaSATD(tmpPredYuv, pu.puAbsPartIdx);
+            }
+            else
+            {
+                PicYuv* refPic0 = slice->m_refReconPicList[0][bestME[0].ref];
+                PicYuv* refPic1 = slice->m_refReconPicList[1][bestME[1].ref];
+                Yuv* bidirYuv = m_rqt[cuGeom.depth].bidirPredYuv;
+
+                /* Generate reference subpels */
+                predInterLumaPixel(pu, bidirYuv[0], *refPic0, bestME[0].mv);
+                predInterLumaPixel(pu, bidirYuv[1], *refPic1, bestME[1].mv);
+
+                primitives.pu[m_me.partEnum].pixelavg_pp(tmpPredYuv.m_buf[0], tmpPredYuv.m_size, bidirYuv[0].getLumaAddr(pu.puAbsPartIdx), bidirYuv[0].m_size,
+                                                                                                 bidirYuv[1].getLumaAddr(pu.puAbsPartIdx), bidirYuv[1].m_size, 32);
+                satdCost = m_me.bufSATD(tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
+            }
+
+            bidirBits = bestME[0].bits + bestME[1].bits + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
+            bidirCost = satdCost + m_rdCost.getCost(bidirBits);
+
+            bool bTryZero = bestME[0].mv.notZero() || bestME[1].mv.notZero();
+            if (bTryZero)
+            {
+                /* Do not try zero MV if unidir motion predictors are beyond
+                 * valid search area */
+                MV mvmin, mvmax;
+                int merange = X265_MAX(m_param->sourceWidth, m_param->sourceHeight);
+                setSearchRange(cu, mvzero, merange, mvmin, mvmax);
+                mvmax.y += 2; // there is some pad for subpel refine
+                mvmin <<= 2;
+                mvmax <<= 2;
+
+                bTryZero &= bestME[0].mvp.checkRange(mvmin, mvmax);
+                bTryZero &= bestME[1].mvp.checkRange(mvmin, mvmax);
+            }
+            if (bTryZero)
+            {
+                /* coincident blocks of the two reference pictures */
+                if (m_me.bChromaSATD)
+                {
+                    cu.m_mv[0][pu.puAbsPartIdx] = mvzero;
+                    cu.m_refIdx[0][pu.puAbsPartIdx] = (int8_t)bidir[0].ref;
+                    cu.m_mv[1][pu.puAbsPartIdx] = mvzero;
+                    cu.m_refIdx[1][pu.puAbsPartIdx] = (int8_t)bidir[1].ref;
+                    motionCompensation(cu, pu, tmpPredYuv, true, true);
+
+                    satdCost = m_me.bufSATD(tmpPredYuv.getLumaAddr(pu.puAbsPartIdx), tmpPredYuv.m_size) +
+                               m_me.bufChromaSATD(tmpPredYuv, pu.puAbsPartIdx);
+                }
+                else
+                {
+                    const pixel* ref0 = m_slice->m_mref[0][bestME[0].ref].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx);
+                    const pixel* ref1 = m_slice->m_mref[1][bestME[1].ref].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx + pu.puAbsPartIdx);
+                    intptr_t refStride = slice->m_mref[0][0].lumaStride;
+
+                    primitives.pu[m_me.partEnum].pixelavg_pp(tmpPredYuv.m_buf[0], tmpPredYuv.m_size, ref0, refStride, ref1, refStride, 32);
+                    satdCost = m_me.bufSATD(tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
+                }
+
+                MV mvp0 = bestME[0].mvp;
+                int mvpIdx0 = bestME[0].mvpIdx;
+                uint32_t bits0 = bestME[0].bits - m_me.bitcost(bestME[0].mv, mvp0) + m_me.bitcost(mvzero, mvp0);
+
+                MV mvp1 = bestME[1].mvp;
+                int mvpIdx1 = bestME[1].mvpIdx;
+                uint32_t bits1 = bestME[1].bits - m_me.bitcost(bestME[1].mv, mvp1) + m_me.bitcost(mvzero, mvp1);
+
+                uint32_t cost = satdCost + m_rdCost.getCost(bits0) + m_rdCost.getCost(bits1);
+
+                /* refine MVP selection for zero mv, updates: mvp, mvpidx, bits, cost */
+                mvp0 = checkBestMVP(interMode.amvpCand[0][bestME[0].ref], mvzero, mvpIdx0, bits0, cost);
+                mvp1 = checkBestMVP(interMode.amvpCand[1][bestME[1].ref], mvzero, mvpIdx1, bits1, cost);
+
+                if (cost < bidirCost)
+                {
+                    bidir[0].mv = mvzero;
+                    bidir[1].mv = mvzero;
+                    bidir[0].mvp = mvp0;
+                    bidir[1].mvp = mvp1;
+                    bidir[0].mvpIdx = mvpIdx0;
+                    bidir[1].mvpIdx = mvpIdx1;
+                    bidirCost = cost;
+                    bidirBits = bits0 + bits1 + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
+                }
+            }
+        }
+
+        /* select best option and store into CU */
+        if (mrgCost < bidirCost && mrgCost < bestME[0].cost && mrgCost < bestME[1].cost)
+        {
+            cu.m_mergeFlag[pu.puAbsPartIdx] = true;
+            cu.m_mvpIdx[0][pu.puAbsPartIdx] = merge.index; /* merge candidate ID is stored in L0 MVP idx */
+            cu.setPUInterDir(merge.dir, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(0, merge.mvField[0].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(0, merge.mvField[0].refIdx, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(1, merge.mvField[1].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(1, merge.mvField[1].refIdx, pu.puAbsPartIdx, puIdx);
+
+            totalmebits += merge.bits;
+        }
+        else if (bidirCost < bestME[0].cost && bidirCost < bestME[1].cost)
+        {
+            lastMode = 2;
+
+            cu.m_mergeFlag[pu.puAbsPartIdx] = false;
+            cu.setPUInterDir(3, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(0, bidir[0].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(0, bestME[0].ref, pu.puAbsPartIdx, puIdx);
+            cu.m_mvd[0][pu.puAbsPartIdx] = bidir[0].mv - bidir[0].mvp;
+            cu.m_mvpIdx[0][pu.puAbsPartIdx] = bidir[0].mvpIdx;
+
+            cu.setPUMv(1, bidir[1].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(1, bestME[1].ref, pu.puAbsPartIdx, puIdx);
+            cu.m_mvd[1][pu.puAbsPartIdx] = bidir[1].mv - bidir[1].mvp;
+            cu.m_mvpIdx[1][pu.puAbsPartIdx] = bidir[1].mvpIdx;
+
+            totalmebits += bidirBits;
+        }
+        else if (bestME[0].cost <= bestME[1].cost)
+        {
+            lastMode = 0;
+
+            cu.m_mergeFlag[pu.puAbsPartIdx] = false;
+            cu.setPUInterDir(1, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(0, bestME[0].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(0, bestME[0].ref, pu.puAbsPartIdx, puIdx);
+            cu.m_mvd[0][pu.puAbsPartIdx] = bestME[0].mv - bestME[0].mvp;
+            cu.m_mvpIdx[0][pu.puAbsPartIdx] = bestME[0].mvpIdx;
+
+            cu.setPURefIdx(1, REF_NOT_VALID, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(1, mvzero, pu.puAbsPartIdx, puIdx);
+
+            totalmebits += bestME[0].bits;
+        }
+        else
+        {
+            lastMode = 1;
+
+            cu.m_mergeFlag[pu.puAbsPartIdx] = false;
+            cu.setPUInterDir(2, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(1, bestME[1].mv, pu.puAbsPartIdx, puIdx);
+            cu.setPURefIdx(1, bestME[1].ref, pu.puAbsPartIdx, puIdx);
+            cu.m_mvd[1][pu.puAbsPartIdx] = bestME[1].mv - bestME[1].mvp;
+            cu.m_mvpIdx[1][pu.puAbsPartIdx] = bestME[1].mvpIdx;
+
+            cu.setPURefIdx(0, REF_NOT_VALID, pu.puAbsPartIdx, puIdx);
+            cu.setPUMv(0, mvzero, pu.puAbsPartIdx, puIdx);
+
+            totalmebits += bestME[1].bits;
+        }
+
+        motionCompensation(cu, pu, *predYuv, true, bChromaMC);
+    }
+    X265_CHECK(interMode.ok(), "inter mode is not ok");
+    interMode.sa8dBits += totalmebits;
+}
+
+void Search::getBlkBits(PartSize cuMode, bool bPSlice, int partIdx, uint32_t lastMode, uint32_t blockBit[3])
+{
+    if (cuMode == SIZE_2Nx2N)
+    {
+        blockBit[0] = (!bPSlice) ? 3 : 1;
+        blockBit[1] = 3;
+        blockBit[2] = 5;
+    }
+    else if (cuMode == SIZE_2NxN || cuMode == SIZE_2NxnU || cuMode == SIZE_2NxnD)
+    {
+        static const uint32_t listBits[2][3][3] =
+        {
+            { { 0, 0, 3 }, { 0, 0, 0 }, { 0, 0, 0 } },
+            { { 5, 7, 7 }, { 7, 5, 7 }, { 9 - 3, 9 - 3, 9 - 3 } }
+        };
+        if (bPSlice)
+        {
+            blockBit[0] = 3;
+            blockBit[1] = 0;
+            blockBit[2] = 0;
+        }
+        else
+            memcpy(blockBit, listBits[partIdx][lastMode], 3 * sizeof(uint32_t));
+    }
+    else if (cuMode == SIZE_Nx2N || cuMode == SIZE_nLx2N || cuMode == SIZE_nRx2N)
+    {
+        static const uint32_t listBits[2][3][3] =
+        {
+            { { 0, 2, 3 }, { 0, 0, 0 }, { 0, 0, 0 } },
+            { { 5, 7, 7 }, { 7 - 2, 7 - 2, 9 - 2 }, { 9 - 3, 9 - 3, 9 - 3 } }
+        };
+        if (bPSlice)
+        {
+            blockBit[0] = 3;
+            blockBit[1] = 0;
+            blockBit[2] = 0;
+        }
+        else
+            memcpy(blockBit, listBits[partIdx][lastMode], 3 * sizeof(uint32_t));
+    }
+    else if (cuMode == SIZE_NxN)
+    {
+        blockBit[0] = (!bPSlice) ? 3 : 1;
+        blockBit[1] = 3;
+        blockBit[2] = 5;
+    }
+    else
+    {
+        X265_CHECK(0, "getBlkBits: unknown cuMode\n");
+    }
+}
+
+/* Check if using an alternative MVP would result in a smaller MVD + signal bits */
+const MV& Search::checkBestMVP(const MV* amvpCand, const MV& mv, int& mvpIdx, uint32_t& outBits, uint32_t& outCost) const
+{
+    int diffBits = m_me.bitcost(mv, amvpCand[!mvpIdx]) - m_me.bitcost(mv, amvpCand[mvpIdx]);
+    if (diffBits < 0)
+    {
+        mvpIdx = !mvpIdx;
+        uint32_t origOutBits = outBits;
+        outBits = origOutBits + diffBits;
+        outCost = (outCost - m_rdCost.getCost(origOutBits)) + m_rdCost.getCost(outBits);
+    }
+    return amvpCand[mvpIdx];
+}
+
+void Search::setSearchRange(const CUData& cu, const MV& mvp, int merange, MV& mvmin, MV& mvmax) const
+{
+    MV dist((int16_t)merange << 2, (int16_t)merange << 2);
+    mvmin = mvp - dist;
+    mvmax = mvp + dist;
+
+    cu.clipMv(mvmin);
+    cu.clipMv(mvmax);
+
+    /* Clip search range to signaled maximum MV length.
+     * We do not support this VUI field being changed from the default */
+    const int maxMvLen = (1 << 15) - 1;
+    mvmin.x = X265_MAX(mvmin.x, -maxMvLen);
+    mvmin.y = X265_MAX(mvmin.y, -maxMvLen);
+    mvmax.x = X265_MIN(mvmax.x, maxMvLen);
+    mvmax.y = X265_MIN(mvmax.y, maxMvLen);
+
+    mvmin >>= 2;
+    mvmax >>= 2;
+
+    /* conditional clipping for frame parallelism */
+    mvmin.y = X265_MIN(mvmin.y, (int16_t)m_refLagPixels);
+    mvmax.y = X265_MIN(mvmax.y, (int16_t)m_refLagPixels);
+}
+
+/* Note: this function overwrites the RD cost variables of interMode, but leaves the sa8d cost unharmed */
+void Search::encodeResAndCalcRdSkipCU(Mode& interMode)
+{
+    CUData& cu = interMode.cu;
+    Yuv* reconYuv = &interMode.reconYuv;
+    const Yuv* fencYuv = interMode.fencYuv;
+    Yuv* predYuv = &interMode.predYuv;
+    X265_CHECK(!cu.isIntra(0), "intra CU not expected\n");
+    uint32_t depth  = cu.m_cuDepth[0];
+
+    // No residual coding : SKIP mode
+
+    cu.setPredModeSubParts(MODE_SKIP);
+    cu.clearCbf();
+    cu.setTUDepthSubParts(0, 0, depth);
+
+    reconYuv->copyFromYuv(interMode.predYuv);
+
+    // Luma
+    int part = partitionFromLog2Size(cu.m_log2CUSize[0]);
+    interMode.lumaDistortion = primitives.cu[part].sse_pp(fencYuv->m_buf[0], fencYuv->m_size, reconYuv->m_buf[0], reconYuv->m_size);
+    if (m_csp != X265_CSP_I400) {
+      // Chroma
+        interMode.chromaDistortion = m_rdCost.scaleChromaDist(1, primitives.chroma[m_csp].cu[part].sse_pp(fencYuv->m_buf[1], fencYuv->m_csize, reconYuv->m_buf[1], reconYuv->m_csize));
+        interMode.chromaDistortion += m_rdCost.scaleChromaDist(2, primitives.chroma[m_csp].cu[part].sse_pp(fencYuv->m_buf[2], fencYuv->m_csize, reconYuv->m_buf[2], reconYuv->m_csize));
+    } else {
+        interMode.chromaDistortion = 0;
+    }
+    interMode.distortion = interMode.lumaDistortion + interMode.chromaDistortion;
+
+    m_entropyCoder.load(m_rqt[depth].cur);
+    m_entropyCoder.resetBits();
+    if (m_slice->m_pps->bTransquantBypassEnabled)
+        m_entropyCoder.codeCUTransquantBypassFlag(cu.m_tqBypass[0]);
+    m_entropyCoder.codeSkipFlag(cu, 0);
+    m_entropyCoder.codeMergeIndex(cu, 0);
+
+    interMode.mvBits = m_entropyCoder.getNumberOfWrittenBits();
+    interMode.coeffBits = 0;
+    interMode.totalBits = interMode.mvBits;
+    if (m_rdCost.m_psyRd)
+        interMode.psyEnergy = m_rdCost.psyCost(part, fencYuv->m_buf[0], fencYuv->m_size, reconYuv->m_buf[0], reconYuv->m_size);
+    interMode.resEnergy = primitives.cu[part].sse_pp(fencYuv->m_buf[0], fencYuv->m_size, predYuv->m_buf[0], predYuv->m_size);
+    updateModeCost(interMode);
+    m_entropyCoder.store(interMode.contexts);
+}
+
+/* encode residual and calculate rate-distortion for a CU block.
+ * Note: this function overwrites the RD cost variables of interMode, but leaves the sa8d cost unharmed */
+void Search::encodeResAndCalcRdInterCU(Mode& interMode, const CUGeom& cuGeom)
+{
+    ProfileCUScope(interMode.cu, interRDOElapsedTime[cuGeom.depth], countInterRDO[cuGeom.depth]);
+
+    CUData& cu = interMode.cu;
+    Yuv* reconYuv = &interMode.reconYuv;
+    Yuv* predYuv = &interMode.predYuv;
+    uint32_t depth = cuGeom.depth;
+    ShortYuv* resiYuv = &m_rqt[depth].tmpResiYuv;
+    const Yuv* fencYuv = interMode.fencYuv;
+
+    X265_CHECK(!cu.isIntra(0), "intra CU not expected\n");
+
+    uint32_t log2CUSize = cuGeom.log2CUSize;
+    int sizeIdx = log2CUSize - 2;
+
+    resiYuv->subtract(*fencYuv, *predYuv, log2CUSize);
+
+    uint32_t tuDepthRange[2];
+    cu.getInterTUQtDepthRange(tuDepthRange, 0);
+
+    m_entropyCoder.load(m_rqt[depth].cur);
+
+    Cost costs;
+    estimateResidualQT(interMode, cuGeom, 0, 0, *resiYuv, costs, tuDepthRange);
+
+    uint32_t tqBypass = cu.m_tqBypass[0];
+    if (!tqBypass)
+    {
+        sse_ret_t cbf0Dist = primitives.cu[sizeIdx].sse_pp(fencYuv->m_buf[0], fencYuv->m_size, predYuv->m_buf[0], predYuv->m_size);
+        if (m_csp != X265_CSP_I400) {
+            cbf0Dist += m_rdCost.scaleChromaDist(1, primitives.chroma[m_csp].cu[sizeIdx].sse_pp(fencYuv->m_buf[1], predYuv->m_csize, predYuv->m_buf[1], predYuv->m_csize));
+            cbf0Dist += m_rdCost.scaleChromaDist(2, primitives.chroma[m_csp].cu[sizeIdx].sse_pp(fencYuv->m_buf[2], predYuv->m_csize, predYuv->m_buf[2], predYuv->m_csize));
+        }
+        
+        /* Consider the RD cost of not signaling any residual */
+        m_entropyCoder.load(m_rqt[depth].cur);
+        m_entropyCoder.resetBits();
+        m_entropyCoder.codeQtRootCbfZero();
+        uint32_t cbf0Bits = m_entropyCoder.getNumberOfWrittenBits();
+
+        uint64_t cbf0Cost;
+        uint32_t cbf0Energy;
+        if (m_rdCost.m_psyRd)
+        {
+            cbf0Energy = m_rdCost.psyCost(log2CUSize - 2, fencYuv->m_buf[0], fencYuv->m_size, predYuv->m_buf[0], predYuv->m_size);
+            cbf0Cost = m_rdCost.calcPsyRdCost(cbf0Dist, cbf0Bits, cbf0Energy);
+        }
+        else
+            cbf0Cost = m_rdCost.calcRdCost(cbf0Dist, cbf0Bits);
+
+        if (cbf0Cost < costs.rdcost)
+        {
+            cu.clearCbf();
+            cu.setTUDepthSubParts(0, 0, depth);
+        }
+    }
+
+    if (cu.getQtRootCbf(0))
+        saveResidualQTData(cu, *resiYuv, 0, 0);
+
+    /* calculate signal bits for inter/merge/skip coded CU */
+    m_entropyCoder.load(m_rqt[depth].cur);
+
+    m_entropyCoder.resetBits();
+    if (m_slice->m_pps->bTransquantBypassEnabled)
+        m_entropyCoder.codeCUTransquantBypassFlag(tqBypass);
+
+    uint32_t coeffBits, bits;
+    if (cu.m_mergeFlag[0] && cu.m_partSize[0] == SIZE_2Nx2N && !cu.getQtRootCbf(0))
+    {
+        cu.setPredModeSubParts(MODE_SKIP);
+
+        /* Merge/Skip */
+        m_entropyCoder.codeSkipFlag(cu, 0);
+        m_entropyCoder.codeMergeIndex(cu, 0);
+        coeffBits = 0;
+        bits = m_entropyCoder.getNumberOfWrittenBits();
+    }
+    else
+    {
+        m_entropyCoder.codeSkipFlag(cu, 0);
+        m_entropyCoder.codePredMode(cu.m_predMode[0]);
+        m_entropyCoder.codePartSize(cu, 0, cuGeom.depth);
+        m_entropyCoder.codePredInfo(cu, 0);
+        uint32_t mvBits = m_entropyCoder.getNumberOfWrittenBits();
+
+        bool bCodeDQP = m_slice->m_pps->bUseDQP;
+        m_entropyCoder.codeCoeff(cu, 0, bCodeDQP, tuDepthRange);
+        bits = m_entropyCoder.getNumberOfWrittenBits();
+
+        coeffBits = bits - mvBits;
+    }
+
+    m_entropyCoder.store(interMode.contexts);
+
+    if (cu.getQtRootCbf(0))
+        reconYuv->addClip(*predYuv, *resiYuv, log2CUSize);
+    else
+        reconYuv->copyFromYuv(*predYuv);
+
+    // update with clipped distortion and cost (qp estimation loop uses unclipped values)
+    sse_ret_t bestLumaDist = primitives.cu[sizeIdx].sse_pp(fencYuv->m_buf[0], fencYuv->m_size, reconYuv->m_buf[0], reconYuv->m_size);
+    sse_ret_t bestChromaDist;
+    if (m_csp != X265_CSP_I400) {
+        bestChromaDist = m_rdCost.scaleChromaDist(1, primitives.chroma[m_csp].cu[sizeIdx].sse_pp(fencYuv->m_buf[1], fencYuv->m_csize, reconYuv->m_buf[1], reconYuv->m_csize));
+        bestChromaDist += m_rdCost.scaleChromaDist(2, primitives.chroma[m_csp].cu[sizeIdx].sse_pp(fencYuv->m_buf[2], fencYuv->m_csize, reconYuv->m_buf[2], reconYuv->m_csize));
+    } else {
+        bestChromaDist = 0;
+    }
+    if (m_rdCost.m_psyRd)
+        interMode.psyEnergy = m_rdCost.psyCost(sizeIdx, fencYuv->m_buf[0], fencYuv->m_size, reconYuv->m_buf[0], reconYuv->m_size);
+    interMode.resEnergy = primitives.cu[sizeIdx].sse_pp(fencYuv->m_buf[0], fencYuv->m_size, predYuv->m_buf[0], predYuv->m_size);
+    interMode.totalBits = bits;
+    interMode.lumaDistortion = bestLumaDist;
+    interMode.chromaDistortion = bestChromaDist;
+    interMode.distortion = bestLumaDist + bestChromaDist;
+    interMode.coeffBits = coeffBits;
+    interMode.mvBits = bits - coeffBits;
+    updateModeCost(interMode);
+    checkDQP(interMode, cuGeom);
+}
+
+void Search::residualTransformQuantInter(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, const uint32_t depthRange[2])
+{
+    uint32_t depth = cuGeom.depth + tuDepth;
+    CUData& cu = mode.cu;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+
+    bool bCheckFull = log2TrSize <= depthRange[1];
+    if (cu.m_partSize[0] != SIZE_2Nx2N && !tuDepth && log2TrSize > depthRange[0])
+        bCheckFull = false;
+
+    if (bCheckFull)
+    {
+        // code full block
+        uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+        bool bCodeChroma = true;
+        uint32_t tuDepthC = tuDepth;
+        if (log2TrSizeC < 2)
+        {
+            X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+            log2TrSizeC = 2;
+            tuDepthC--;
+            bCodeChroma = !(absPartIdx & 3);
+        }
+
+        uint32_t absPartIdxStep = cuGeom.numPartitions >> tuDepthC * 2;
+        uint32_t setCbf = 1 << tuDepth;
+
+        uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        coeff_t* coeffCurY = cu.m_trCoeff[0] + coeffOffsetY;
+
+        uint32_t sizeIdx  = log2TrSize  - 2;
+
+        cu.setTUDepthSubParts(tuDepth, absPartIdx, depth);
+        cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, depth);
+
+        ShortYuv& resiYuv = m_rqt[cuGeom.depth].tmpResiYuv;
+        const Yuv* fencYuv = mode.fencYuv;
+
+        int16_t* curResiY = resiYuv.getLumaAddr(absPartIdx);
+        uint32_t strideResiY = resiYuv.m_size;
+
+        const pixel* fenc = fencYuv->getLumaAddr(absPartIdx);
+        uint32_t numSigY = m_quant.transformNxN(cu, fenc, fencYuv->m_size, curResiY, strideResiY, coeffCurY, log2TrSize, TEXT_LUMA, absPartIdx, false);
+
+        if (numSigY)
+        {
+            m_quant.invtransformNxN(cu, curResiY, strideResiY, coeffCurY, log2TrSize, TEXT_LUMA, false, false, numSigY);
+            cu.setCbfSubParts(setCbf, TEXT_LUMA, absPartIdx, depth);
+        }
+        else
+        {
+            primitives.cu[sizeIdx].blockfill_s(curResiY, strideResiY, 0);
+            cu.setCbfSubParts(0, TEXT_LUMA, absPartIdx, depth);
+        }
+
+        if (bCodeChroma)
+        {
+            uint32_t sizeIdxC = log2TrSizeC - 2;
+            uint32_t strideResiC = resiYuv.m_csize;
+
+            uint32_t coeffOffsetC = coeffOffsetY >> (m_hChromaShift + m_vChromaShift);
+            coeff_t* coeffCurU = cu.m_trCoeff[1] + coeffOffsetC;
+            coeff_t* coeffCurV = cu.m_trCoeff[2] + coeffOffsetC;
+            bool splitIntoSubTUs = (m_csp == X265_CSP_I422);
+
+            TURecurse tuIterator(splitIntoSubTUs ? VERTICAL_SPLIT : DONT_SPLIT, absPartIdxStep, absPartIdx);
+            do
+            {
+                uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+                uint32_t subTUOffset = tuIterator.section << (log2TrSizeC * 2);
+
+                cu.setTransformSkipPartRange(0, TEXT_CHROMA_U, absPartIdxC, tuIterator.absPartIdxStep);
+                cu.setTransformSkipPartRange(0, TEXT_CHROMA_V, absPartIdxC, tuIterator.absPartIdxStep);
+
+                int16_t* curResiU = resiYuv.getCbAddr(absPartIdxC);
+                const pixel* fencCb = fencYuv->getCbAddr(absPartIdxC);
+                uint32_t numSigU = m_quant.transformNxN(cu, fencCb, fencYuv->m_csize, curResiU, strideResiC, coeffCurU + subTUOffset, log2TrSizeC, TEXT_CHROMA_U, absPartIdxC, false);
+                if (numSigU)
+                {
+                    m_quant.invtransformNxN(cu, curResiU, strideResiC, coeffCurU + subTUOffset, log2TrSizeC, TEXT_CHROMA_U, false, false, numSigU);
+                    cu.setCbfPartRange(setCbf, TEXT_CHROMA_U, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                else
+                {
+                    primitives.cu[sizeIdxC].blockfill_s(curResiU, strideResiC, 0);
+                    cu.setCbfPartRange(0, TEXT_CHROMA_U, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+
+                int16_t* curResiV = resiYuv.getCrAddr(absPartIdxC);
+                const pixel* fencCr = fencYuv->getCrAddr(absPartIdxC);
+                uint32_t numSigV = m_quant.transformNxN(cu, fencCr, fencYuv->m_csize, curResiV, strideResiC, coeffCurV + subTUOffset, log2TrSizeC, TEXT_CHROMA_V, absPartIdxC, false);
+                if (numSigV)
+                {
+                    m_quant.invtransformNxN(cu, curResiV, strideResiC, coeffCurV + subTUOffset, log2TrSizeC, TEXT_CHROMA_V, false, false, numSigV);
+                    cu.setCbfPartRange(setCbf, TEXT_CHROMA_V, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                else
+                {
+                    primitives.cu[sizeIdxC].blockfill_s(curResiV, strideResiC, 0);
+                    cu.setCbfPartRange(0, TEXT_CHROMA_V, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+            }
+            while (tuIterator.isNextSection());
+
+            if (splitIntoSubTUs)
+            {
+                offsetSubTUCBFs(cu, TEXT_CHROMA_U, tuDepth, absPartIdx);
+                offsetSubTUCBFs(cu, TEXT_CHROMA_V, tuDepth, absPartIdx);
+            }
+        }
+    }
+    else
+    {
+        X265_CHECK(log2TrSize > depthRange[0], "residualTransformQuantInter recursion check failure\n");
+
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        uint32_t ycbf = 0, ucbf = 0, vcbf = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            residualTransformQuantInter(mode, cuGeom, qPartIdx, tuDepth + 1, depthRange);
+            ycbf |= cu.getCbf(qPartIdx, TEXT_LUMA,     tuDepth + 1);
+            ucbf |= cu.getCbf(qPartIdx, TEXT_CHROMA_U, tuDepth + 1);
+            vcbf |= cu.getCbf(qPartIdx, TEXT_CHROMA_V, tuDepth + 1);
+        }
+        for (uint32_t i = 0; i < 4 * qNumParts; ++i)
+        {
+            cu.m_cbf[0][absPartIdx + i] |= ycbf << tuDepth;
+            cu.m_cbf[1][absPartIdx + i] |= ucbf << tuDepth;
+            cu.m_cbf[2][absPartIdx + i] |= vcbf << tuDepth;
+        }
+    }
+}
+
+uint64_t Search::estimateNullCbfCost(uint32_t &dist, uint32_t &psyEnergy, uint32_t tuDepth, TextType compId)
+{
+    uint32_t nullBits = m_entropyCoder.estimateCbfBits(0, compId, tuDepth);
+
+    if (m_rdCost.m_psyRd)
+        return m_rdCost.calcPsyRdCost(dist, nullBits, psyEnergy);
+    else
+        return m_rdCost.calcRdCost(dist, nullBits);
+}
+
+void Search::estimateResidualQT(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, ShortYuv& resiYuv, Cost& outCosts, const uint32_t depthRange[2])
+{
+    CUData& cu = mode.cu;
+    uint32_t depth = cuGeom.depth + tuDepth;
+    uint32_t log2TrSize = cuGeom.log2CUSize - tuDepth;
+
+    bool bCheckSplit = log2TrSize > depthRange[0];
+    bool bCheckFull = log2TrSize <= depthRange[1];
+    bool bSplitPresentFlag = bCheckSplit && bCheckFull;
+
+    if (cu.m_partSize[0] != SIZE_2Nx2N && !tuDepth && bCheckSplit)
+        bCheckFull = false;
+
+    X265_CHECK(bCheckFull || bCheckSplit, "check-full or check-split must be set\n");
+
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+    bool bCodeChroma;
+    uint32_t tuDepthC = tuDepth;
+    
+    if (m_csp != X265_CSP_I400) {
+        bCodeChroma = true;
+        if (log2TrSizeC < 2)
+        {
+            X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+            log2TrSizeC = 2;
+            tuDepthC--;
+            bCodeChroma = !(absPartIdx & 3);
+        }
+    } else {
+        bCodeChroma = false;
+    }
+
+    // code full block
+    Cost fullCost;
+    fullCost.rdcost = MAX_INT64;
+
+    uint8_t  cbfFlag[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, {0, 0}, {0, 0} };
+    uint32_t numSig[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, {0, 0}, {0, 0} };
+    uint32_t singleBits[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, { 0, 0 }, { 0, 0 } };
+    uint32_t singleDist[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, { 0, 0 }, { 0, 0 } };
+    uint32_t singlePsyEnergy[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, { 0, 0 }, { 0, 0 } };
+    uint32_t bestTransformMode[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { 0, 0 }, { 0, 0 }, { 0, 0 } };
+    uint64_t minCost[MAX_NUM_COMPONENT][2 /*0 = top (or whole TU for non-4:2:2) sub-TU, 1 = bottom sub-TU*/] = { { MAX_INT64, MAX_INT64 }, {MAX_INT64, MAX_INT64}, {MAX_INT64, MAX_INT64} };
+
+    m_entropyCoder.store(m_rqt[depth].rqtRoot);
+
+    uint32_t trSize = 1 << log2TrSize;
+    const bool splitIntoSubTUs = (m_csp == X265_CSP_I422);
+    uint32_t absPartIdxStep = cuGeom.numPartitions >> tuDepthC * 2;
+    const Yuv* fencYuv = mode.fencYuv;
+
+    // code full block
+    if (bCheckFull)
+    {
+        uint32_t trSizeC = 1 << log2TrSizeC;
+        int partSize  = partitionFromLog2Size(log2TrSize);
+        int partSizeC = partitionFromLog2Size(log2TrSizeC);
+        const uint32_t qtLayer = log2TrSize - 2;
+        uint32_t coeffOffsetY = absPartIdx << (LOG2_UNIT_SIZE * 2);
+        coeff_t* coeffCurY = m_rqt[qtLayer].coeffRQT[0] + coeffOffsetY;
+
+        bool checkTransformSkip   = m_slice->m_pps->bTransformSkipEnabled && !cu.m_tqBypass[0];
+        bool checkTransformSkipY  = checkTransformSkip && log2TrSize  <= MAX_LOG2_TS_SIZE;
+        bool checkTransformSkipC = checkTransformSkip && log2TrSizeC <= MAX_LOG2_TS_SIZE;
+
+        cu.setTUDepthSubParts(tuDepth, absPartIdx, depth);
+        cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, depth);
+
+        if (m_bEnableRDOQ)
+            m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSize, true);
+
+        const pixel* fenc = fencYuv->getLumaAddr(absPartIdx);
+        int16_t* resi = resiYuv.getLumaAddr(absPartIdx);
+        numSig[TEXT_LUMA][0] = m_quant.transformNxN(cu, fenc, fencYuv->m_size, resi, resiYuv.m_size, coeffCurY, log2TrSize, TEXT_LUMA, absPartIdx, false);
+        cbfFlag[TEXT_LUMA][0] = !!numSig[TEXT_LUMA][0];
+
+        m_entropyCoder.resetBits();
+
+        if (bSplitPresentFlag && log2TrSize > depthRange[0])
+            m_entropyCoder.codeTransformSubdivFlag(0, 5 - log2TrSize);
+        fullCost.bits = m_entropyCoder.getNumberOfWrittenBits();
+
+        // Coding luma cbf flag has been removed from here. The context for cbf flag is different for each depth.
+        // So it is valid if we encode coefficients and then cbfs at least for analysis.
+//        m_entropyCoder.codeQtCbfLuma(cbfFlag[TEXT_LUMA][0], tuDepth);
+        if (cbfFlag[TEXT_LUMA][0])
+            m_entropyCoder.codeCoeffNxN(cu, coeffCurY, absPartIdx, log2TrSize, TEXT_LUMA);
+
+        uint32_t singleBitsPrev = m_entropyCoder.getNumberOfWrittenBits();
+        singleBits[TEXT_LUMA][0] = singleBitsPrev - fullCost.bits;
+
+        X265_CHECK(log2TrSize <= 5, "log2TrSize is too large\n");
+        uint32_t distY = primitives.cu[partSize].ssd_s(resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size);
+        uint32_t psyEnergyY = 0;
+        if (m_rdCost.m_psyRd)
+            psyEnergyY = m_rdCost.psyCost(partSize, resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size, (int16_t*)zeroShort, 0);
+
+        int16_t* curResiY    = m_rqt[qtLayer].resiQtYuv.getLumaAddr(absPartIdx);
+        uint32_t strideResiY = m_rqt[qtLayer].resiQtYuv.m_size;
+
+        if (cbfFlag[TEXT_LUMA][0])
+        {
+            m_quant.invtransformNxN(cu, curResiY, strideResiY, coeffCurY, log2TrSize, TEXT_LUMA, false, false, numSig[TEXT_LUMA][0]); //this is for inter mode only
+
+            // non-zero cost calculation for luma - This is an approximation
+            // finally we have to encode correct cbf after comparing with null cost
+            const uint32_t nonZeroDistY = primitives.cu[partSize].sse_ss(resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size, curResiY, strideResiY);
+            uint32_t nzCbfBitsY = m_entropyCoder.estimateCbfBits(cbfFlag[TEXT_LUMA][0], TEXT_LUMA, tuDepth);
+            uint32_t nonZeroPsyEnergyY = 0; uint64_t singleCostY = 0;
+            if (m_rdCost.m_psyRd)
+            {
+                nonZeroPsyEnergyY = m_rdCost.psyCost(partSize, resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size, curResiY, strideResiY);
+                singleCostY = m_rdCost.calcPsyRdCost(nonZeroDistY, nzCbfBitsY + singleBits[TEXT_LUMA][0], nonZeroPsyEnergyY);
+            }
+            else
+                singleCostY = m_rdCost.calcRdCost(nonZeroDistY, nzCbfBitsY + singleBits[TEXT_LUMA][0]);
+
+            if (cu.m_tqBypass[0])
+            {
+                singleDist[TEXT_LUMA][0] = nonZeroDistY;
+                singlePsyEnergy[TEXT_LUMA][0] = nonZeroPsyEnergyY;
+            }
+            else
+            {
+                // zero-cost calculation for luma. This is an approximation
+                // Initial cost calculation was also an approximation. First resetting the bit counter and then encoding zero cbf.
+                // Now encoding the zero cbf without writing into bitstream, keeping m_fracBits unchanged. The same is valid for chroma.
+                uint64_t nullCostY = estimateNullCbfCost(distY, psyEnergyY, tuDepth, TEXT_LUMA);
+
+                if (nullCostY < singleCostY)
+                {
+                    cbfFlag[TEXT_LUMA][0] = 0;
+                    singleBits[TEXT_LUMA][0] = 0;
+                    primitives.cu[partSize].blockfill_s(curResiY, strideResiY, 0);
+#if CHECKED_BUILD || _DEBUG
+                    uint32_t numCoeffY = 1 << (log2TrSize << 1);
+                    memset(coeffCurY, 0, sizeof(coeff_t) * numCoeffY);
+#endif
+                    if (checkTransformSkipY)
+                        minCost[TEXT_LUMA][0] = nullCostY;
+                    singleDist[TEXT_LUMA][0] = distY;
+                    singlePsyEnergy[TEXT_LUMA][0] = psyEnergyY;
+                }
+                else
+                {
+                    if (checkTransformSkipY)
+                        minCost[TEXT_LUMA][0] = singleCostY;
+                    singleDist[TEXT_LUMA][0] = nonZeroDistY;
+                    singlePsyEnergy[TEXT_LUMA][0] = nonZeroPsyEnergyY;
+                }
+            }
+        }
+        else
+        {
+            if (checkTransformSkipY)
+                minCost[TEXT_LUMA][0] = estimateNullCbfCost(distY, psyEnergyY, tuDepth, TEXT_LUMA);
+            primitives.cu[partSize].blockfill_s(curResiY, strideResiY, 0);
+            singleDist[TEXT_LUMA][0] = distY;
+            singlePsyEnergy[TEXT_LUMA][0] = psyEnergyY;
+        }
+
+        cu.setCbfSubParts(cbfFlag[TEXT_LUMA][0] << tuDepth, TEXT_LUMA, absPartIdx, depth);
+
+        if (bCodeChroma)
+        {
+            uint32_t coeffOffsetC = coeffOffsetY >> (m_hChromaShift + m_vChromaShift);
+            uint32_t strideResiC  = m_rqt[qtLayer].resiQtYuv.m_csize;
+            for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+            {
+                uint32_t distC = 0, psyEnergyC = 0;
+                coeff_t* coeffCurC = m_rqt[qtLayer].coeffRQT[chromaId] + coeffOffsetC;
+                TURecurse tuIterator(splitIntoSubTUs ? VERTICAL_SPLIT : DONT_SPLIT, absPartIdxStep, absPartIdx);
+
+                do
+                {
+                    uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+                    uint32_t subTUOffset = tuIterator.section << (log2TrSizeC * 2);
+
+                    cu.setTransformSkipPartRange(0, (TextType)chromaId, absPartIdxC, tuIterator.absPartIdxStep);
+
+                    if (m_bEnableRDOQ && (chromaId != TEXT_CHROMA_V))
+                        m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSizeC, false);
+
+                    fenc = fencYuv->getChromaAddr(chromaId, absPartIdxC);
+                    resi = resiYuv.getChromaAddr(chromaId, absPartIdxC);
+                    numSig[chromaId][tuIterator.section] = m_quant.transformNxN(cu, fenc, fencYuv->m_csize, resi, resiYuv.m_csize, coeffCurC + subTUOffset, log2TrSizeC, (TextType)chromaId, absPartIdxC, false);
+                    cbfFlag[chromaId][tuIterator.section] = !!numSig[chromaId][tuIterator.section];
+
+                    if (cbfFlag[chromaId][tuIterator.section])
+                        m_entropyCoder.codeCoeffNxN(cu, coeffCurC + subTUOffset, absPartIdxC, log2TrSizeC, (TextType)chromaId);
+                    uint32_t newBits = m_entropyCoder.getNumberOfWrittenBits();
+                    singleBits[chromaId][tuIterator.section] = newBits - singleBitsPrev;
+                    singleBitsPrev = newBits;
+
+                    int16_t* curResiC = m_rqt[qtLayer].resiQtYuv.getChromaAddr(chromaId, absPartIdxC);
+                    distC = m_rdCost.scaleChromaDist(chromaId, primitives.cu[log2TrSizeC - 2].ssd_s(resiYuv.getChromaAddr(chromaId, absPartIdxC), resiYuv.m_csize));
+
+                    if (cbfFlag[chromaId][tuIterator.section])
+                    {
+                        m_quant.invtransformNxN(cu, curResiC, strideResiC, coeffCurC + subTUOffset,
+                                                log2TrSizeC, (TextType)chromaId, false, false, numSig[chromaId][tuIterator.section]);
+
+                        // non-zero cost calculation for luma, same as luma - This is an approximation
+                        // finally we have to encode correct cbf after comparing with null cost
+                        uint32_t dist = primitives.cu[partSizeC].sse_ss(resiYuv.getChromaAddr(chromaId, absPartIdxC), resiYuv.m_csize, curResiC, strideResiC);
+                        uint32_t nzCbfBitsC = m_entropyCoder.estimateCbfBits(cbfFlag[chromaId][tuIterator.section], (TextType)chromaId, tuDepth);
+                        uint32_t nonZeroDistC = m_rdCost.scaleChromaDist(chromaId, dist);
+                        uint32_t nonZeroPsyEnergyC = 0; uint64_t singleCostC = 0;
+                        if (m_rdCost.m_psyRd)
+                        {
+                            nonZeroPsyEnergyC = m_rdCost.psyCost(partSizeC, resiYuv.getChromaAddr(chromaId, absPartIdxC), resiYuv.m_csize, curResiC, strideResiC);
+                            singleCostC = m_rdCost.calcPsyRdCost(nonZeroDistC, nzCbfBitsC + singleBits[chromaId][tuIterator.section], nonZeroPsyEnergyC);
+                        }
+                        else
+                            singleCostC = m_rdCost.calcRdCost(nonZeroDistC, nzCbfBitsC + singleBits[chromaId][tuIterator.section]);
+
+                        if (cu.m_tqBypass[0])
+                        {
+                            singleDist[chromaId][tuIterator.section] = nonZeroDistC;
+                            singlePsyEnergy[chromaId][tuIterator.section] = nonZeroPsyEnergyC;
+                        }
+                        else
+                        {
+                            //zero-cost calculation for chroma. This is an approximation
+                            uint64_t nullCostC = estimateNullCbfCost(distC, psyEnergyC, tuDepth, (TextType)chromaId);
+
+                            if (nullCostC < singleCostC)
+                            {
+                                cbfFlag[chromaId][tuIterator.section] = 0;
+                                singleBits[chromaId][tuIterator.section] = 0;
+                                primitives.cu[partSizeC].blockfill_s(curResiC, strideResiC, 0);
+#if CHECKED_BUILD || _DEBUG
+                                uint32_t numCoeffC = 1 << (log2TrSizeC << 1);
+                                memset(coeffCurC + subTUOffset, 0, sizeof(coeff_t) * numCoeffC);
+#endif
+                                if (checkTransformSkipC)
+                                    minCost[chromaId][tuIterator.section] = nullCostC;
+                                singleDist[chromaId][tuIterator.section] = distC;
+                                singlePsyEnergy[chromaId][tuIterator.section] = psyEnergyC;
+                            }
+                            else
+                            {
+                                if (checkTransformSkipC)
+                                    minCost[chromaId][tuIterator.section] = singleCostC;
+                                singleDist[chromaId][tuIterator.section] = nonZeroDistC;
+                                singlePsyEnergy[chromaId][tuIterator.section] = nonZeroPsyEnergyC;
+                            }
+                        }
+                    }
+                    else
+                    {
+                        if (checkTransformSkipC)
+                            minCost[chromaId][tuIterator.section] = estimateNullCbfCost(distC, psyEnergyC, tuDepthC, (TextType)chromaId);
+                        primitives.cu[partSizeC].blockfill_s(curResiC, strideResiC, 0);
+                        singleDist[chromaId][tuIterator.section] = distC;
+                        singlePsyEnergy[chromaId][tuIterator.section] = psyEnergyC;
+                    }
+
+                    cu.setCbfPartRange(cbfFlag[chromaId][tuIterator.section] << tuDepth, (TextType)chromaId, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                while (tuIterator.isNextSection());
+            }
+        }
+
+        if (checkTransformSkipY)
+        {
+            uint32_t nonZeroDistY = 0;
+            uint32_t nonZeroPsyEnergyY = 0;
+            uint64_t singleCostY = MAX_INT64;
+
+            m_entropyCoder.load(m_rqt[depth].rqtRoot);
+
+            cu.setTransformSkipSubParts(1, TEXT_LUMA, absPartIdx, depth);
+
+            if (m_bEnableRDOQ)
+                m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSize, true);
+
+            fenc = fencYuv->getLumaAddr(absPartIdx);
+            resi = resiYuv.getLumaAddr(absPartIdx);
+            uint32_t numSigTSkipY = m_quant.transformNxN(cu, fenc, fencYuv->m_size, resi, resiYuv.m_size, m_tsCoeff, log2TrSize, TEXT_LUMA, absPartIdx, true);
+
+            if (numSigTSkipY)
+            {
+                m_entropyCoder.resetBits();
+                m_entropyCoder.codeQtCbfLuma(!!numSigTSkipY, tuDepth);
+                m_entropyCoder.codeCoeffNxN(cu, m_tsCoeff, absPartIdx, log2TrSize, TEXT_LUMA);
+                const uint32_t skipSingleBitsY = m_entropyCoder.getNumberOfWrittenBits();
+
+                m_quant.invtransformNxN(cu, m_tsResidual, trSize, m_tsCoeff, log2TrSize, TEXT_LUMA, false, true, numSigTSkipY);
+
+                nonZeroDistY = primitives.cu[partSize].sse_ss(resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size, m_tsResidual, trSize);
+
+                if (m_rdCost.m_psyRd)
+                {
+                    nonZeroPsyEnergyY = m_rdCost.psyCost(partSize, resiYuv.getLumaAddr(absPartIdx), resiYuv.m_size, m_tsResidual, trSize);
+                    singleCostY = m_rdCost.calcPsyRdCost(nonZeroDistY, skipSingleBitsY, nonZeroPsyEnergyY);
+                }
+                else
+                    singleCostY = m_rdCost.calcRdCost(nonZeroDistY, skipSingleBitsY);
+            }
+
+            if (!numSigTSkipY || minCost[TEXT_LUMA][0] < singleCostY)
+                cu.setTransformSkipSubParts(0, TEXT_LUMA, absPartIdx, depth);
+            else
+            {
+                singleDist[TEXT_LUMA][0] = nonZeroDistY;
+                singlePsyEnergy[TEXT_LUMA][0] = nonZeroPsyEnergyY;
+                cbfFlag[TEXT_LUMA][0] = !!numSigTSkipY;
+                bestTransformMode[TEXT_LUMA][0] = 1;
+                uint32_t numCoeffY = 1 << (log2TrSize << 1);
+                memcpy(coeffCurY, m_tsCoeff, sizeof(coeff_t) * numCoeffY);
+                primitives.cu[partSize].copy_ss(curResiY, strideResiY, m_tsResidual, trSize);
+            }
+
+            cu.setCbfSubParts(cbfFlag[TEXT_LUMA][0] << tuDepth, TEXT_LUMA, absPartIdx, depth);
+        }
+
+        if (bCodeChroma && checkTransformSkipC)
+        {
+            uint32_t nonZeroDistC = 0, nonZeroPsyEnergyC = 0;
+            uint64_t singleCostC = MAX_INT64;
+            uint32_t strideResiC = m_rqt[qtLayer].resiQtYuv.m_csize;
+            uint32_t coeffOffsetC = coeffOffsetY >> (m_hChromaShift + m_vChromaShift);
+
+            m_entropyCoder.load(m_rqt[depth].rqtRoot);
+
+            for (uint32_t chromaId = TEXT_CHROMA_U; chromaId <= TEXT_CHROMA_V; chromaId++)
+            {
+                coeff_t* coeffCurC = m_rqt[qtLayer].coeffRQT[chromaId] + coeffOffsetC;
+                TURecurse tuIterator(splitIntoSubTUs ? VERTICAL_SPLIT : DONT_SPLIT, absPartIdxStep, absPartIdx);
+
+                do
+                {
+                    uint32_t absPartIdxC = tuIterator.absPartIdxTURelCU;
+                    uint32_t subTUOffset = tuIterator.section << (log2TrSizeC * 2);
+
+                    int16_t* curResiC = m_rqt[qtLayer].resiQtYuv.getChromaAddr(chromaId, absPartIdxC);
+
+                    cu.setTransformSkipPartRange(1, (TextType)chromaId, absPartIdxC, tuIterator.absPartIdxStep);
+
+                    if (m_bEnableRDOQ && (chromaId != TEXT_CHROMA_V))
+                        m_entropyCoder.estBit(m_entropyCoder.m_estBitsSbac, log2TrSizeC, false);
+
+                    fenc = fencYuv->getChromaAddr(chromaId, absPartIdxC);
+                    resi = resiYuv.getChromaAddr(chromaId, absPartIdxC);
+                    uint32_t numSigTSkipC = m_quant.transformNxN(cu, fenc, fencYuv->m_csize, resi, resiYuv.m_csize, m_tsCoeff, log2TrSizeC, (TextType)chromaId, absPartIdxC, true);
+
+                    m_entropyCoder.resetBits();
+                    singleBits[chromaId][tuIterator.section] = 0;
+
+                    if (numSigTSkipC)
+                    {
+                        m_entropyCoder.codeQtCbfChroma(!!numSigTSkipC, tuDepth);
+                        m_entropyCoder.codeCoeffNxN(cu, m_tsCoeff, absPartIdxC, log2TrSizeC, (TextType)chromaId);
+                        singleBits[chromaId][tuIterator.section] = m_entropyCoder.getNumberOfWrittenBits();
+
+                        m_quant.invtransformNxN(cu, m_tsResidual, trSizeC, m_tsCoeff,
+                                                log2TrSizeC, (TextType)chromaId, false, true, numSigTSkipC);
+                        uint32_t dist = primitives.cu[partSizeC].sse_ss(resiYuv.getChromaAddr(chromaId, absPartIdxC), resiYuv.m_csize, m_tsResidual, trSizeC);
+                        nonZeroDistC = m_rdCost.scaleChromaDist(chromaId, dist);
+                        if (m_rdCost.m_psyRd)
+                        {
+                            nonZeroPsyEnergyC = m_rdCost.psyCost(partSizeC, resiYuv.getChromaAddr(chromaId, absPartIdxC), resiYuv.m_csize, m_tsResidual, trSizeC);
+                            singleCostC = m_rdCost.calcPsyRdCost(nonZeroDistC, singleBits[chromaId][tuIterator.section], nonZeroPsyEnergyC);
+                        }
+                        else
+                            singleCostC = m_rdCost.calcRdCost(nonZeroDistC, singleBits[chromaId][tuIterator.section]);
+                    }
+
+                    if (!numSigTSkipC || minCost[chromaId][tuIterator.section] < singleCostC)
+                        cu.setTransformSkipPartRange(0, (TextType)chromaId, absPartIdxC, tuIterator.absPartIdxStep);
+                    else
+                    {
+                        singleDist[chromaId][tuIterator.section] = nonZeroDistC;
+                        singlePsyEnergy[chromaId][tuIterator.section] = nonZeroPsyEnergyC;
+                        cbfFlag[chromaId][tuIterator.section] = !!numSigTSkipC;
+                        bestTransformMode[chromaId][tuIterator.section] = 1;
+                        uint32_t numCoeffC = 1 << (log2TrSizeC << 1);
+                        memcpy(coeffCurC + subTUOffset, m_tsCoeff, sizeof(coeff_t) * numCoeffC);
+                        primitives.cu[partSizeC].copy_ss(curResiC, strideResiC, m_tsResidual, trSizeC);
+                    }
+
+                    cu.setCbfPartRange(cbfFlag[chromaId][tuIterator.section] << tuDepth, (TextType)chromaId, absPartIdxC, tuIterator.absPartIdxStep);
+                }
+                while (tuIterator.isNextSection());
+            }
+        }
+
+        // Here we were encoding cbfs and coefficients, after calculating distortion above.
+        // Now I am encoding only cbfs, since I have encoded coefficients above. I have just collected
+        // bits required for coefficients and added with number of cbf bits. As I tested the order does not
+        // make any difference. But bit confused whether I should load the original context as below.
+        m_entropyCoder.load(m_rqt[depth].rqtRoot);
+        m_entropyCoder.resetBits();
+
+        //Encode cbf flags
+        if (bCodeChroma)
+        {
+            if (!splitIntoSubTUs)
+            {
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_U][0], tuDepth);
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_V][0], tuDepth);
+            }
+            else
+            {
+                offsetSubTUCBFs(cu, TEXT_CHROMA_U, tuDepth, absPartIdx);
+                offsetSubTUCBFs(cu, TEXT_CHROMA_V, tuDepth, absPartIdx);
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_U][0], tuDepth);
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_U][1], tuDepth);
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_V][0], tuDepth);
+                m_entropyCoder.codeQtCbfChroma(cbfFlag[TEXT_CHROMA_V][1], tuDepth);
+            }
+        }
+
+        m_entropyCoder.codeQtCbfLuma(cbfFlag[TEXT_LUMA][0], tuDepth);
+
+        uint32_t cbfBits = m_entropyCoder.getNumberOfWrittenBits();
+
+        uint32_t coeffBits = 0;
+        coeffBits = singleBits[TEXT_LUMA][0];
+        for (uint32_t subTUIndex = 0; subTUIndex < 2; subTUIndex++)
+        {
+            coeffBits += singleBits[TEXT_CHROMA_U][subTUIndex];
+            coeffBits += singleBits[TEXT_CHROMA_V][subTUIndex];
+        }
+
+        // In split mode, we need only coeffBits. The reason is encoding chroma cbfs is different from luma.
+        // In case of chroma, if any one of the split block's cbf is 1, then we need to encode cbf 1, and then for
+        // four split block's individual cbf value. This is not known before analysis of four split blocks.
+        // For that reason, I am collecting individual coefficient bits only.
+        fullCost.bits = bSplitPresentFlag ? cbfBits + coeffBits : coeffBits;
+
+        fullCost.distortion += singleDist[TEXT_LUMA][0];
+        fullCost.energy += singlePsyEnergy[TEXT_LUMA][0];// need to check we need to add chroma also
+        for (uint32_t subTUIndex = 0; subTUIndex < 2; subTUIndex++)
+        {
+            fullCost.distortion += singleDist[TEXT_CHROMA_U][subTUIndex];
+            fullCost.distortion += singleDist[TEXT_CHROMA_V][subTUIndex];
+        }
+
+        if (m_rdCost.m_psyRd)
+            fullCost.rdcost = m_rdCost.calcPsyRdCost(fullCost.distortion, fullCost.bits, fullCost.energy);
+        else
+            fullCost.rdcost = m_rdCost.calcRdCost(fullCost.distortion, fullCost.bits);
+    }
+
+    // code sub-blocks
+    if (bCheckSplit)
+    {
+        if (bCheckFull)
+        {
+            m_entropyCoder.store(m_rqt[depth].rqtTest);
+            m_entropyCoder.load(m_rqt[depth].rqtRoot);
+        }
+
+        Cost splitCost;
+        if (bSplitPresentFlag && (log2TrSize <= depthRange[1] && log2TrSize > depthRange[0]))
+        {
+            // Subdiv flag can be encoded at the start of analysis of split blocks.
+            m_entropyCoder.resetBits();
+            m_entropyCoder.codeTransformSubdivFlag(1, 5 - log2TrSize);
+            splitCost.bits = m_entropyCoder.getNumberOfWrittenBits();
+        }
+
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        uint32_t ycbf = 0, ucbf = 0, vcbf = 0;
+        for (uint32_t qIdx = 0, qPartIdx = absPartIdx; qIdx < 4; ++qIdx, qPartIdx += qNumParts)
+        {
+            estimateResidualQT(mode, cuGeom, qPartIdx, tuDepth + 1, resiYuv, splitCost, depthRange);
+            ycbf |= cu.getCbf(qPartIdx, TEXT_LUMA,     tuDepth + 1);
+            ucbf |= cu.getCbf(qPartIdx, TEXT_CHROMA_U, tuDepth + 1);
+            vcbf |= cu.getCbf(qPartIdx, TEXT_CHROMA_V, tuDepth + 1);
+        }
+        for (uint32_t i = 0; i < 4 * qNumParts; ++i)
+        {
+            cu.m_cbf[0][absPartIdx + i] |= ycbf << tuDepth;
+            cu.m_cbf[1][absPartIdx + i] |= ucbf << tuDepth;
+            cu.m_cbf[2][absPartIdx + i] |= vcbf << tuDepth;
+        }
+
+        // Here we were encoding cbfs and coefficients for splitted blocks. Since I have collected coefficient bits
+        // for each individual blocks, only encoding cbf values. As I mentioned encoding chroma cbfs is different then luma.
+        // But have one doubt that if coefficients are encoded in context at depth 2 (for example) and cbfs are encoded in context
+        // at depth 0 (for example).
+        m_entropyCoder.load(m_rqt[depth].rqtRoot);
+        m_entropyCoder.resetBits();
+
+        codeInterSubdivCbfQT(cu, absPartIdx, tuDepth, depthRange);
+        uint32_t splitCbfBits = m_entropyCoder.getNumberOfWrittenBits();
+        splitCost.bits += splitCbfBits;
+
+        if (m_rdCost.m_psyRd)
+            splitCost.rdcost = m_rdCost.calcPsyRdCost(splitCost.distortion, splitCost.bits, splitCost.energy);
+        else
+            splitCost.rdcost = m_rdCost.calcRdCost(splitCost.distortion, splitCost.bits);
+
+        if (ycbf || ucbf || vcbf || !bCheckFull)
+        {
+            if (splitCost.rdcost < fullCost.rdcost)
+            {
+                outCosts.distortion += splitCost.distortion;
+                outCosts.rdcost     += splitCost.rdcost;
+                outCosts.bits       += splitCost.bits;
+                outCosts.energy     += splitCost.energy;
+                return;
+            }
+            else
+                outCosts.energy     += splitCost.energy;
+        }
+
+        cu.setTransformSkipSubParts(bestTransformMode[TEXT_LUMA][0], TEXT_LUMA, absPartIdx, depth);
+        if (bCodeChroma)
+        {
+            if (!splitIntoSubTUs)
+            {
+                cu.setTransformSkipSubParts(bestTransformMode[TEXT_CHROMA_U][0], TEXT_CHROMA_U, absPartIdx, depth);
+                cu.setTransformSkipSubParts(bestTransformMode[TEXT_CHROMA_V][0], TEXT_CHROMA_V, absPartIdx, depth);
+            }
+            else
+            {
+                uint32_t tuNumParts = absPartIdxStep >> 1;
+                cu.setTransformSkipPartRange(bestTransformMode[TEXT_CHROMA_U][0], TEXT_CHROMA_U, absPartIdx             , tuNumParts);
+                cu.setTransformSkipPartRange(bestTransformMode[TEXT_CHROMA_U][1], TEXT_CHROMA_U, absPartIdx + tuNumParts, tuNumParts);
+                cu.setTransformSkipPartRange(bestTransformMode[TEXT_CHROMA_V][0], TEXT_CHROMA_V, absPartIdx             , tuNumParts);
+                cu.setTransformSkipPartRange(bestTransformMode[TEXT_CHROMA_V][1], TEXT_CHROMA_V, absPartIdx + tuNumParts, tuNumParts);
+            }
+        }
+        X265_CHECK(bCheckFull, "check-full must be set\n");
+        m_entropyCoder.load(m_rqt[depth].rqtTest);
+    }
+
+    cu.setTUDepthSubParts(tuDepth, absPartIdx, depth);
+    cu.setCbfSubParts(cbfFlag[TEXT_LUMA][0] << tuDepth, TEXT_LUMA, absPartIdx, depth);
+
+    if (bCodeChroma)
+    {
+        if (!splitIntoSubTUs)
+        {
+            cu.setCbfSubParts(cbfFlag[TEXT_CHROMA_U][0] << tuDepth, TEXT_CHROMA_U, absPartIdx, depth);
+            cu.setCbfSubParts(cbfFlag[TEXT_CHROMA_V][0] << tuDepth, TEXT_CHROMA_V, absPartIdx, depth);
+        }
+        else
+        {
+            uint32_t tuNumParts = absPartIdxStep >> 1;
+
+            offsetCBFs(cbfFlag[TEXT_CHROMA_U]);
+            offsetCBFs(cbfFlag[TEXT_CHROMA_V]);
+            cu.setCbfPartRange(cbfFlag[TEXT_CHROMA_U][0] << tuDepth, TEXT_CHROMA_U, absPartIdx             , tuNumParts);
+            cu.setCbfPartRange(cbfFlag[TEXT_CHROMA_U][1] << tuDepth, TEXT_CHROMA_U, absPartIdx + tuNumParts, tuNumParts);
+            cu.setCbfPartRange(cbfFlag[TEXT_CHROMA_V][0] << tuDepth, TEXT_CHROMA_V, absPartIdx             , tuNumParts);
+            cu.setCbfPartRange(cbfFlag[TEXT_CHROMA_V][1] << tuDepth, TEXT_CHROMA_V, absPartIdx + tuNumParts, tuNumParts);
+        }
+    }
+
+    outCosts.distortion += fullCost.distortion;
+    outCosts.rdcost     += fullCost.rdcost;
+    outCosts.bits       += fullCost.bits;
+    outCosts.energy     += fullCost.energy;
+}
+
+void Search::codeInterSubdivCbfQT(CUData& cu, uint32_t absPartIdx, const uint32_t tuDepth, const uint32_t depthRange[2])
+{
+    X265_CHECK(cu.isInter(absPartIdx), "codeInterSubdivCbfQT() with intra block\n");
+
+    const bool bSubdiv  = tuDepth < cu.m_tuDepth[absPartIdx];
+    uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (!(log2TrSize - m_hChromaShift < 2))
+    {
+        if (!tuDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_U, tuDepth - 1))
+            m_entropyCoder.codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_U, tuDepth, !bSubdiv);
+        if (!tuDepth || cu.getCbf(absPartIdx, TEXT_CHROMA_V, tuDepth - 1))
+            m_entropyCoder.codeQtCbfChroma(cu, absPartIdx, TEXT_CHROMA_V, tuDepth, !bSubdiv);
+    }
+    else
+    {
+        X265_CHECK(cu.getCbf(absPartIdx, TEXT_CHROMA_U, tuDepth) == cu.getCbf(absPartIdx, TEXT_CHROMA_U, tuDepth - 1), "chroma CBF not matching\n");
+        X265_CHECK(cu.getCbf(absPartIdx, TEXT_CHROMA_V, tuDepth) == cu.getCbf(absPartIdx, TEXT_CHROMA_V, tuDepth - 1), "chroma CBF not matching\n");
+    }
+
+    if (!bSubdiv)
+    {
+        m_entropyCoder.codeQtCbfLuma(cu, absPartIdx, tuDepth);
+    }
+    else
+    {
+        uint32_t qNumParts = 1 << (log2TrSize -1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            codeInterSubdivCbfQT(cu, absPartIdx, tuDepth + 1, depthRange);
+    }
+}
+
+void Search::saveResidualQTData(CUData& cu, ShortYuv& resiYuv, uint32_t absPartIdx, uint32_t tuDepth)
+{
+    const uint32_t log2TrSize = cu.m_log2CUSize[0] - tuDepth;
+
+    if (tuDepth < cu.m_tuDepth[absPartIdx])
+    {
+        uint32_t qNumParts = 1 << (log2TrSize - 1 - LOG2_UNIT_SIZE) * 2;
+        for (uint32_t qIdx = 0; qIdx < 4; ++qIdx, absPartIdx += qNumParts)
+            saveResidualQTData(cu, resiYuv, absPartIdx, tuDepth + 1);
+        return;
+    }
+
+    const uint32_t qtLayer = log2TrSize - 2;
+
+    uint32_t log2TrSizeC = log2TrSize - m_hChromaShift;
+    bool bCodeChroma;
+    uint32_t tuDepthC = tuDepth;
+
+    if (m_csp != X265_CSP_I400) {
+        bCodeChroma = true;
+        if (log2TrSizeC < 2)
+        {
+            X265_CHECK(log2TrSize == 2 && m_csp != X265_CSP_I444 && tuDepth, "invalid tuDepth\n");
+            log2TrSizeC = 2;
+            tuDepthC--;
+            bCodeChroma = !(absPartIdx & 3);
+        }
+    } else {
+        bCodeChroma = false;
+    }
+
+    m_rqt[qtLayer].resiQtYuv.copyPartToPartLuma(resiYuv, absPartIdx, log2TrSize);
+
+    uint32_t numCoeffY = 1 << (log2TrSize * 2);
+    uint32_t coeffOffsetY = absPartIdx << LOG2_UNIT_SIZE * 2;
+    coeff_t* coeffSrcY = m_rqt[qtLayer].coeffRQT[0] + coeffOffsetY;
+    coeff_t* coeffDstY = cu.m_trCoeff[0] + coeffOffsetY;
+    memcpy(coeffDstY, coeffSrcY, sizeof(coeff_t) * numCoeffY);
+
+    if (bCodeChroma)
+    {
+        m_rqt[qtLayer].resiQtYuv.copyPartToPartChroma(resiYuv, absPartIdx, log2TrSizeC + m_hChromaShift);
+
+        uint32_t numCoeffC = 1 << (log2TrSizeC * 2 + (m_csp == X265_CSP_I422));
+        uint32_t coeffOffsetC = coeffOffsetY >> (m_hChromaShift + m_vChromaShift);
+
+        coeff_t* coeffSrcU = m_rqt[qtLayer].coeffRQT[1] + coeffOffsetC;
+        coeff_t* coeffSrcV = m_rqt[qtLayer].coeffRQT[2] + coeffOffsetC;
+        coeff_t* coeffDstU = cu.m_trCoeff[1] + coeffOffsetC;
+        coeff_t* coeffDstV = cu.m_trCoeff[2] + coeffOffsetC;
+        memcpy(coeffDstU, coeffSrcU, sizeof(coeff_t) * numCoeffC);
+        memcpy(coeffDstV, coeffSrcV, sizeof(coeff_t) * numCoeffC);
+    }
+}
+
+/* returns the number of bits required to signal a non-most-probable mode.
+ * on return mpms contains bitmap of most probable modes */
+uint32_t Search::getIntraRemModeBits(CUData& cu, uint32_t absPartIdx, uint32_t mpmModes[3], uint64_t& mpms) const
+{
+    cu.getIntraDirLumaPredictor(absPartIdx, mpmModes);
+
+    mpms = 0;
+    for (int i = 0; i < 3; ++i)
+        mpms |= ((uint64_t)1 << mpmModes[i]);
+
+    return m_entropyCoder.bitsIntraModeNonMPM();
+}
+
+/* swap the current mode/cost with the mode with the highest cost in the
+ * current candidate list, if its cost is better (maintain a top N list) */
+void Search::updateCandList(uint32_t mode, uint64_t cost, int maxCandCount, uint32_t* candModeList, uint64_t* candCostList)
+{
+    uint32_t maxIndex = 0;
+    uint64_t maxValue = 0;
+
+    for (int i = 0; i < maxCandCount; i++)
+    {
+        if (maxValue < candCostList[i])
+        {
+            maxValue = candCostList[i];
+            maxIndex = i;
+        }
+    }
+
+    if (cost < maxValue)
+    {
+        candCostList[maxIndex] = cost;
+        candModeList[maxIndex] = mode;
+    }
+}
+
+void Search::checkDQP(Mode& mode, const CUGeom& cuGeom)
+{
+    CUData& cu = mode.cu;
+    if (cu.m_slice->m_pps->bUseDQP && cuGeom.depth <= cu.m_slice->m_pps->maxCuDQPDepth)
+    {
+        if (cu.getQtRootCbf(0))
+        {
+            if (m_param->rdLevel >= 3)
+            {
+                mode.contexts.resetBits();
+                mode.contexts.codeDeltaQP(cu, 0);
+                uint32_t bits = mode.contexts.getNumberOfWrittenBits();
+                mode.mvBits += bits;
+                mode.totalBits += bits;
+                updateModeCost(mode);
+            }
+            else if (m_param->rdLevel <= 1)
+            {
+                mode.sa8dBits++;
+                mode.sa8dCost = m_rdCost.calcRdSADCost((uint32_t)mode.distortion, mode.sa8dBits);
+            }
+            else
+            {
+                mode.mvBits++;
+                mode.totalBits++;
+                updateModeCost(mode);
+            }
+        }
+        else
+            cu.setQPSubParts(cu.getRefQP(0), 0, cuGeom.depth);
+    }
+}
+
+void Search::checkDQPForSplitPred(Mode& mode, const CUGeom& cuGeom)
+{
+    CUData& cu = mode.cu;
+
+    if ((cuGeom.depth == cu.m_slice->m_pps->maxCuDQPDepth) && cu.m_slice->m_pps->bUseDQP)
+    {
+        bool hasResidual = false;
+
+        /* Check if any sub-CU has a non-zero QP */
+        for (uint32_t blkIdx = 0; blkIdx < cuGeom.numPartitions; blkIdx++)
+        {
+            if (cu.getQtRootCbf(blkIdx))
+            {
+                hasResidual = true;
+                break;
+            }
+        }
+        if (hasResidual)
+        {
+            if (m_param->rdLevel >= 3)
+            {
+                mode.contexts.resetBits();
+                mode.contexts.codeDeltaQP(cu, 0);
+                uint32_t bits = mode.contexts.getNumberOfWrittenBits();
+                mode.mvBits += bits;
+                mode.totalBits += bits;
+                updateModeCost(mode);
+            }
+            else if (m_param->rdLevel <= 1)
+            {
+                mode.sa8dBits++;
+                mode.sa8dCost = m_rdCost.calcRdSADCost((uint32_t)mode.distortion, mode.sa8dBits);
+            }
+            else
+            {
+                mode.mvBits++;
+                mode.totalBits++;
+                updateModeCost(mode);
+            }
+            /* For all zero CBF sub-CUs, reset QP to RefQP (so that deltaQP is not signalled).
+            When the non-zero CBF sub-CU is found, stop */
+            cu.setQPSubCUs(cu.getRefQP(0), 0, cuGeom.depth);
+        }
+        else
+            /* No residual within this CU or subCU, so reset QP to RefQP */
+            cu.setQPSubParts(cu.getRefQP(0), 0, cuGeom.depth);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/search.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,468 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_SEARCH_H
+#define X265_SEARCH_H
+
+#include "common.h"
+#include "predict.h"
+#include "quant.h"
+#include "bitcost.h"
+#include "framedata.h"
+#include "yuv.h"
+#include "threadpool.h"
+
+#include "rdcost.h"
+#include "entropy.h"
+#include "motion.h"
+
+#if DETAILED_CU_STATS
+#define ProfileCUScopeNamed(name, cu, acc, count) \
+    m_stats[cu.m_encData->m_frameEncoderID].count++; \
+    ScopedElapsedTime name(m_stats[cu.m_encData->m_frameEncoderID].acc)
+#define ProfileCUScope(cu, acc, count) ProfileCUScopeNamed(timedScope, cu, acc, count)
+#define ProfileCounter(cu, count) m_stats[cu.m_encData->m_frameEncoderID].count++;
+#else
+#define ProfileCUScopeNamed(name, cu, acc, count)
+#define ProfileCUScope(cu, acc, count)
+#define ProfileCounter(cu, count)
+#endif
+
+namespace X265_NS {
+// private namespace
+
+class Entropy;
+struct ThreadLocalData;
+
+/* All the CABAC contexts that Analysis needs to keep track of at each depth
+ * and temp buffers for residual, coeff, and recon for use during residual
+ * quad-tree depth recursion */
+struct RQTData
+{
+    Entropy  cur;     /* starting context for current CU */
+
+    /* these are indexed by qtLayer (log2size - 2) so nominally 0=4x4, 1=8x8, 2=16x16, 3=32x32
+     * the coeffRQT and reconQtYuv are allocated to the max CU size at every depth. The parts
+     * which are reconstructed at each depth are valid. At the end, the transform depth table
+     * is walked and the coeff and recon at the final split depths are collected */
+    Entropy  rqtRoot;      /* residual quad-tree start context */
+    Entropy  rqtTemp;      /* residual quad-tree temp context */
+    Entropy  rqtTest;      /* residual quad-tree test context */
+    coeff_t* coeffRQT[3];  /* coeff storage for entire CTU for each RQT layer */
+    Yuv      reconQtYuv;   /* recon storage for entire CTU for each RQT layer (intra) */
+    ShortYuv resiQtYuv;    /* residual storage for entire CTU for each RQT layer (inter) */
+    
+    /* per-depth temp buffers for inter prediction */
+    ShortYuv tmpResiYuv;
+    Yuv      tmpPredYuv;
+    Yuv      bidirPredYuv[2];
+};
+
+struct MotionData
+{
+    MV       mv;
+    MV       mvp;
+    int      mvpIdx;
+    int      ref;
+    uint32_t cost;
+    int      bits;
+};
+
+struct Mode
+{
+    CUData     cu;
+    const Yuv* fencYuv;
+    Yuv        predYuv;
+    Yuv        reconYuv;
+    Entropy    contexts;
+
+    enum { MAX_INTER_PARTS = 2 };
+
+    MotionData bestME[MAX_INTER_PARTS][2];
+    MV         amvpCand[2][MAX_NUM_REF][AMVP_NUM_CANDS];
+
+    // Neighbour MVs of the current partition. 5 spatial candidates and the
+    // temporal candidate.
+    InterNeighbourMV interNeighbours[6];
+
+    uint64_t   rdCost;     // sum of partition (psy) RD costs          (sse(fenc, recon) + lambda2 * bits)
+    uint64_t   sa8dCost;   // sum of partition sa8d distortion costs   (sa8d(fenc, pred) + lambda * bits)
+    uint32_t   sa8dBits;   // signal bits used in sa8dCost calculation
+    uint32_t   psyEnergy;  // sum of partition psycho-visual energy difference
+    sse_ret_t  resEnergy;  // sum of partition residual energy after motion prediction
+    sse_ret_t  lumaDistortion;
+    sse_ret_t  chromaDistortion;
+    sse_ret_t  distortion; // sum of partition SSE distortion
+    uint32_t   totalBits;  // sum of partition bits (mv + coeff)
+    uint32_t   mvBits;     // Mv bits + Ref + block type (or intra mode)
+    uint32_t   coeffBits;  // Texture bits (DCT Coeffs)
+
+    void initCosts()
+    {
+        rdCost = 0;
+        sa8dCost = 0;
+        sa8dBits = 0;
+        psyEnergy = 0;
+        resEnergy = 0;
+        lumaDistortion = 0;
+        chromaDistortion = 0;
+        distortion = 0;
+        totalBits = 0;
+        mvBits = 0;
+        coeffBits = 0;
+    }
+
+    void invalidate()
+    {
+        /* set costs to invalid data, catch uninitialized re-use */
+        rdCost = UINT64_MAX / 2;
+        sa8dCost = UINT64_MAX / 2;
+        sa8dBits = MAX_UINT / 2;
+        psyEnergy = MAX_UINT / 2;
+#if X265_DEPTH <= 10
+        resEnergy = MAX_UINT / 2;
+        lumaDistortion = MAX_UINT / 2;
+        chromaDistortion = MAX_UINT / 2;
+        distortion = MAX_UINT / 2;
+#else
+        resEnergy = UINT64_MAX / 2;
+        lumaDistortion = UINT64_MAX / 2;
+        chromaDistortion = UINT64_MAX / 2;
+        distortion = UINT64_MAX / 2;
+#endif
+        totalBits = MAX_UINT / 2;
+        mvBits = MAX_UINT / 2;
+        coeffBits = MAX_UINT / 2;
+    }
+
+    bool ok() const
+    {
+#if X265_DEPTH <= 10
+        return !(rdCost >= UINT64_MAX / 2 ||
+            sa8dCost >= UINT64_MAX / 2 ||
+            sa8dBits >= MAX_UINT / 2 ||
+            psyEnergy >= MAX_UINT / 2 ||
+            resEnergy >= MAX_UINT / 2 ||
+            lumaDistortion >= MAX_UINT / 2 ||
+            chromaDistortion >= MAX_UINT / 2 ||
+            distortion >= MAX_UINT / 2 ||
+            totalBits >= MAX_UINT / 2 ||
+            mvBits >= MAX_UINT / 2 ||
+            coeffBits >= MAX_UINT / 2);
+#else
+        return !(rdCost >= UINT64_MAX / 2 ||
+                 sa8dCost >= UINT64_MAX / 2 ||
+                 sa8dBits >= MAX_UINT / 2 ||
+                 psyEnergy >= MAX_UINT / 2 ||
+                 resEnergy >= UINT64_MAX / 2 ||
+                 lumaDistortion >= UINT64_MAX / 2 ||
+                 chromaDistortion >= UINT64_MAX / 2 ||
+                 distortion >= UINT64_MAX / 2 ||
+                 totalBits >= MAX_UINT / 2 ||
+                 mvBits >= MAX_UINT / 2 ||
+                 coeffBits >= MAX_UINT / 2);
+#endif
+    }
+
+    void addSubCosts(const Mode& subMode)
+    {
+        X265_CHECK(subMode.ok(), "sub-mode not initialized");
+
+        rdCost += subMode.rdCost;
+        sa8dCost += subMode.sa8dCost;
+        sa8dBits += subMode.sa8dBits;
+        psyEnergy += subMode.psyEnergy;
+        resEnergy += subMode.resEnergy;
+        lumaDistortion += subMode.lumaDistortion;
+        chromaDistortion += subMode.chromaDistortion;
+        distortion += subMode.distortion;
+        totalBits += subMode.totalBits;
+        mvBits += subMode.mvBits;
+        coeffBits += subMode.coeffBits;
+    }
+};
+
+#if DETAILED_CU_STATS
+/* This structure is intended for performance debugging and we make no attempt
+ * to handle dynamic range overflows. Care should be taken to avoid long encodes
+ * if you care about the accuracy of these elapsed times and counters. This
+ * profiling is orthogonal to PPA/VTune and can be enabled independently from
+ * either of them */
+struct CUStats
+{
+    int64_t  intraRDOElapsedTime[NUM_CU_DEPTH]; // elapsed worker time in intra RDO per CU depth
+    int64_t  interRDOElapsedTime[NUM_CU_DEPTH]; // elapsed worker time in inter RDO per CU depth
+    int64_t  intraAnalysisElapsedTime;          // elapsed worker time in intra sa8d analysis
+    int64_t  motionEstimationElapsedTime;       // elapsed worker time in predInterSearch()
+    int64_t  loopFilterElapsedTime;             // elapsed worker time in deblock and SAO and PSNR/SSIM
+    int64_t  pmeTime;                           // elapsed worker time processing ME slave jobs
+    int64_t  pmeBlockTime;                      // elapsed worker time blocked for pme batch completion
+    int64_t  pmodeTime;                         // elapsed worker time processing pmode slave jobs
+    int64_t  pmodeBlockTime;                    // elapsed worker time blocked for pmode batch completion
+    int64_t  weightAnalyzeTime;                 // elapsed worker time analyzing reference weights
+    int64_t  totalCTUTime;                      // elapsed worker time in compressCTU (includes pmode master)
+
+    uint32_t skippedMotionReferences[NUM_CU_DEPTH];
+    uint32_t totalMotionReferences[NUM_CU_DEPTH];
+    uint32_t skippedIntraCU[NUM_CU_DEPTH];
+    uint32_t totalIntraCU[NUM_CU_DEPTH];
+
+    uint64_t countIntraRDO[NUM_CU_DEPTH];
+    uint64_t countInterRDO[NUM_CU_DEPTH];
+    uint64_t countIntraAnalysis;
+    uint64_t countMotionEstimate;
+    uint64_t countLoopFilter;
+    uint64_t countPMETasks;
+    uint64_t countPMEMasters;
+    uint64_t countPModeTasks;
+    uint64_t countPModeMasters;
+    uint64_t countWeightAnalyze;
+    uint64_t totalCTUs;
+
+    CUStats() { clear(); }
+
+    void clear()
+    {
+        memset(this, 0, sizeof(*this));
+    }
+
+    void accumulate(CUStats& other)
+    {
+        for (uint32_t i = 0; i <= g_maxCUDepth; i++)
+        {
+            intraRDOElapsedTime[i] += other.intraRDOElapsedTime[i];
+            interRDOElapsedTime[i] += other.interRDOElapsedTime[i];
+            countIntraRDO[i] += other.countIntraRDO[i];
+            countInterRDO[i] += other.countInterRDO[i];
+            skippedMotionReferences[i] += other.skippedMotionReferences[i];
+            totalMotionReferences[i] += other.totalMotionReferences[i];
+            skippedIntraCU[i] += other.skippedIntraCU[i];
+            totalIntraCU[i] += other.totalIntraCU[i];
+        }
+
+        intraAnalysisElapsedTime += other.intraAnalysisElapsedTime;
+        motionEstimationElapsedTime += other.motionEstimationElapsedTime;
+        loopFilterElapsedTime += other.loopFilterElapsedTime;
+        pmeTime += other.pmeTime;
+        pmeBlockTime += other.pmeBlockTime;
+        pmodeTime += other.pmodeTime;
+        pmodeBlockTime += other.pmodeBlockTime;
+        weightAnalyzeTime += other.weightAnalyzeTime;
+        totalCTUTime += other.totalCTUTime;
+
+        countIntraAnalysis += other.countIntraAnalysis;
+        countMotionEstimate += other.countMotionEstimate;
+        countLoopFilter += other.countLoopFilter;
+        countPMETasks += other.countPMETasks;
+        countPMEMasters += other.countPMEMasters;
+        countPModeTasks += other.countPModeTasks;
+        countPModeMasters += other.countPModeMasters;
+        countWeightAnalyze += other.countWeightAnalyze;
+        totalCTUs += other.totalCTUs;
+
+        other.clear();
+    }
+}; 
+#endif
+
+inline int getTUBits(int idx, int numIdx)
+{
+    return idx + (idx < numIdx - 1);
+}
+
+class Search : public Predict
+{
+public:
+
+    static const int16_t zeroShort[MAX_CU_SIZE];
+
+    MotionEstimate  m_me;
+    Quant           m_quant;
+    RDCost          m_rdCost;
+    const x265_param* m_param;
+    Frame*          m_frame;
+    const Slice*    m_slice;
+
+    Entropy         m_entropyCoder;
+    RQTData         m_rqt[NUM_FULL_DEPTH];
+
+    uint8_t*        m_qtTempCbf[3];
+    uint8_t*        m_qtTempTransformSkipFlag[3];
+
+    pixel*          m_fencScaled;     /* 32x32 buffer for down-scaled version of 64x64 CU fenc */
+    pixel*          m_fencTransposed; /* 32x32 buffer for transposed copy of fenc */
+    pixel*          m_intraPred;      /* 32x32 buffer for individual intra predictions */
+    pixel*          m_intraPredAngs;  /* allocation for 33 consecutive (all angular) 32x32 intra predictions */
+
+    coeff_t*        m_tsCoeff;        /* transform skip coeff 32x32 */
+    int16_t*        m_tsResidual;     /* transform skip residual 32x32 */
+    pixel*          m_tsRecon;        /* transform skip reconstructed pixels 32x32 */
+
+    bool            m_bFrameParallel;
+    bool            m_bEnableRDOQ;
+    uint32_t        m_numLayers;
+    uint32_t        m_refLagPixels;
+
+#if DETAILED_CU_STATS
+    /* Accumulate CU statistics separately for each frame encoder */
+    CUStats         m_stats[X265_MAX_FRAME_THREADS];
+#endif
+
+    Search();
+    ~Search();
+
+    bool     initSearch(const x265_param& param, ScalingList& scalingList);
+    int      setLambdaFromQP(const CUData& ctu, int qp); /* returns real quant QP in valid spec range */
+
+    // mark temp RD entropy contexts as uninitialized; useful for finding loads without stores
+    void     invalidateContexts(int fromDepth);
+
+    // full RD search of intra modes. if sharedModes is not NULL, it directly uses them
+    void     checkIntra(Mode& intraMode, const CUGeom& cuGeom, PartSize partSize, uint8_t* sharedModes, uint8_t* sharedChromaModes);
+
+    // select best intra mode using only sa8d costs, cannot measure NxN intra
+    void     checkIntraInInter(Mode& intraMode, const CUGeom& cuGeom);
+    // encode luma mode selected by checkIntraInInter, then pick and encode a chroma mode
+    void     encodeIntraInInter(Mode& intraMode, const CUGeom& cuGeom);
+
+    // estimation inter prediction (non-skip)
+    void     predInterSearch(Mode& interMode, const CUGeom& cuGeom, bool bChromaMC, uint32_t masks[2]);
+
+    // encode residual and compute rd-cost for inter mode
+    void     encodeResAndCalcRdInterCU(Mode& interMode, const CUGeom& cuGeom);
+    void     encodeResAndCalcRdSkipCU(Mode& interMode);
+
+    // encode residual without rd-cost
+    void     residualTransformQuantInter(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, const uint32_t depthRange[2]);
+    void     residualTransformQuantIntra(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, const uint32_t depthRange[2]);
+    void     residualQTIntraChroma(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth);
+
+    // pick be chroma mode from available using just sa8d costs
+    void     getBestIntraModeChroma(Mode& intraMode, const CUGeom& cuGeom);
+
+    /* update CBF flags and QP values to be internally consistent */
+    void checkDQP(Mode& mode, const CUGeom& cuGeom);
+    void checkDQPForSplitPred(Mode& mode, const CUGeom& cuGeom);
+
+    MV getLowresMV(const CUData& cu, const PredictionUnit& pu, int list, int ref);
+
+    class PME : public BondedTaskGroup
+    {
+    public:
+
+        Search&       master;
+        Mode&         mode;
+        const CUGeom& cuGeom;
+        const PredictionUnit& pu;
+        int           puIdx;
+
+        struct {
+            int ref[2][MAX_NUM_REF];
+            int refCnt[2];
+        } m_jobs;
+
+        PME(Search& s, Mode& m, const CUGeom& g, const PredictionUnit& u, int p) : master(s), mode(m), cuGeom(g), pu(u), puIdx(p) {}
+
+        void processTasks(int workerThreadId);
+
+    protected:
+
+        PME operator=(const PME&);
+    };
+
+    void     processPME(PME& pme, Search& slave);
+    void     singleMotionEstimation(Search& master, Mode& interMode, const PredictionUnit& pu, int part, int list, int ref);
+
+protected:
+
+    /* motion estimation distribution */
+    ThreadLocalData* m_tld;
+
+    uint32_t      m_listSelBits[3];
+    Lock          m_meLock;
+
+    void     saveResidualQTData(CUData& cu, ShortYuv& resiYuv, uint32_t absPartIdx, uint32_t tuDepth);
+
+    // RDO search of luma intra modes; result is fully encoded luma. luma distortion is returned
+    uint32_t estIntraPredQT(Mode &intraMode, const CUGeom& cuGeom, const uint32_t depthRange[2], uint8_t* sharedModes);
+
+    // RDO select best chroma mode from luma; result is fully encode chroma. chroma distortion is returned
+    uint32_t estIntraPredChromaQT(Mode &intraMode, const CUGeom& cuGeom, uint8_t* sharedChromaModes);
+
+    void     codeSubdivCbfQTChroma(const CUData& cu, uint32_t tuDepth, uint32_t absPartIdx);
+    void     codeInterSubdivCbfQT(CUData& cu, uint32_t absPartIdx, const uint32_t tuDepth, const uint32_t depthRange[2]);
+    void     codeCoeffQTChroma(const CUData& cu, uint32_t tuDepth, uint32_t absPartIdx, TextType ttype);
+
+    struct Cost
+    {
+        uint64_t rdcost;
+        uint32_t bits;
+        sse_ret_t distortion;
+        uint32_t energy;
+        Cost() { rdcost = 0; bits = 0; distortion = 0; energy = 0; }
+    };
+
+    uint64_t estimateNullCbfCost(uint32_t &dist, uint32_t &psyEnergy, uint32_t tuDepth, TextType compId);
+    void     estimateResidualQT(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t depth, ShortYuv& resiYuv, Cost& costs, const uint32_t depthRange[2]);
+
+    // generate prediction, generate residual and recon. if bAllowSplit, find optimal RQT splits
+    void     codeIntraLumaQT(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, bool bAllowSplit, Cost& costs, const uint32_t depthRange[2]);
+    void     codeIntraLumaTSkip(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, Cost& costs);
+    void     extractIntraResultQT(CUData& cu, Yuv& reconYuv, uint32_t tuDepth, uint32_t absPartIdx);
+
+    // generate chroma prediction, generate residual and recon
+    uint32_t codeIntraChromaQt(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t absPartIdx, uint32_t& psyEnergy);
+    uint32_t codeIntraChromaTSkip(Mode& mode, const CUGeom& cuGeom, uint32_t tuDepth, uint32_t tuDepthC, uint32_t absPartIdx, uint32_t& psyEnergy);
+    void     extractIntraResultChromaQT(CUData& cu, Yuv& reconYuv, uint32_t absPartIdx, uint32_t tuDepth);
+
+    // reshuffle CBF flags after coding a pair of 4:2:2 chroma blocks
+    void     offsetSubTUCBFs(CUData& cu, TextType ttype, uint32_t tuDepth, uint32_t absPartIdx);
+
+    /* output of mergeEstimation, best merge candidate */
+    struct MergeData
+    {
+        MVField  mvField[2];
+        uint32_t dir;
+        uint32_t index;
+        uint32_t bits;
+    };
+
+    /* inter/ME helper functions */
+    int       selectMVP(const CUData& cu, const PredictionUnit& pu, const MV amvp[AMVP_NUM_CANDS], int list, int ref);
+    const MV& checkBestMVP(const MV amvpCand[2], const MV& mv, int& mvpIdx, uint32_t& outBits, uint32_t& outCost) const;
+    void     setSearchRange(const CUData& cu, const MV& mvp, int merange, MV& mvmin, MV& mvmax) const;
+    uint32_t mergeEstimation(CUData& cu, const CUGeom& cuGeom, const PredictionUnit& pu, int puIdx, MergeData& m);
+    static void getBlkBits(PartSize cuMode, bool bPSlice, int puIdx, uint32_t lastMode, uint32_t blockBit[3]);
+
+    /* intra helper functions */
+    enum { MAX_RD_INTRA_MODES = 16 };
+    static void updateCandList(uint32_t mode, uint64_t cost, int maxCandCount, uint32_t* candModeList, uint64_t* candCostList);
+
+    // get most probable luma modes for CU part, and bit cost of all non mpm modes
+    uint32_t getIntraRemModeBits(CUData & cu, uint32_t absPartIdx, uint32_t mpmModes[3], uint64_t& mpms) const;
+
+    void updateModeCost(Mode& m) const { m.rdCost = m_rdCost.m_psyRd ? m_rdCost.calcPsyRdCost(m.distortion, m.totalBits, m.psyEnergy) : m_rdCost.calcRdCost(m.distortion, m.totalBits); }
+};
+}
+
+#endif // ifndef X265_SEARCH_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/sei.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,74 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#include "common.h"
+#include "bitstream.h"
+#include "slice.h"
+#include "sei.h"
+
+using namespace X265_NS;
+
+/* x265's identifying GUID */
+const uint8_t SEIuserDataUnregistered::m_uuid_iso_iec_11578[16] = {
+    0x2C, 0xA2, 0xDE, 0x09, 0xB5, 0x17, 0x47, 0xDB,
+    0xBB, 0x55, 0xA4, 0xFE, 0x7F, 0xC2, 0xFC, 0x4E
+};
+
+/* marshal a single SEI message sei, storing the marshalled representation
+ * in bitstream bs */
+void SEI::write(Bitstream& bs, const SPS& sps)
+{
+    BitCounter count;
+    m_bitIf = &count;
+
+    /* virtual writeSEI method, write to bit counter */
+    writeSEI(sps);
+
+    m_bitIf = &bs;
+    uint32_t type = payloadType();
+    for (; type >= 0xff; type -= 0xff)
+        WRITE_CODE(0xff, 8, "payload_type");
+    WRITE_CODE(type, 8, "payload_type");
+
+    X265_CHECK(0 == (count.getNumberOfWrittenBits() & 7), "payload unaligned\n");
+    uint32_t payloadSize = count.getNumberOfWrittenBits() >> 3;
+    for (; payloadSize >= 0xff; payloadSize -= 0xff)
+        WRITE_CODE(0xff, 8, "payload_size");
+    WRITE_CODE(payloadSize, 8, "payload_size");
+
+    /* virtual writeSEI method, write to bs */
+    writeSEI(sps);
+}
+
+void SEI::writeByteAlign()
+{
+    // TODO: expose bs.writeByteAlignment() as virtual function
+    if (m_bitIf->getNumberOfWrittenBits() % 8 != 0)
+    {
+        WRITE_FLAG(1, "bit_equal_to_one");
+        while (m_bitIf->getNumberOfWrittenBits() % 8 != 0)
+        {
+            WRITE_FLAG(0, "bit_equal_to_zero");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/sei.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,344 @@
+/*****************************************************************************
+* Copyright (C) 2013 x265 project
+*
+* Authors: Steve Borho <steve@borho.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_SEI_H
+#define X265_SEI_H
+
+#include "common.h"
+#include "bitstream.h"
+#include "slice.h"
+
+namespace X265_NS {
+// private namespace
+
+class SEI : public SyntaxElementWriter
+{
+public:
+
+    /* SEI users call write() to marshal an SEI to a bitstream. SEI
+     * subclasses may implement write() or accept the default write()
+     * method which calls writeSEI() with a bitcounter to determine
+     * the size, then it encodes the header and calls writeSEI a
+     * second time for the real encode. */
+    virtual void write(Bitstream& bs, const SPS& sps);
+
+    virtual ~SEI() {}
+
+protected:
+
+    enum PayloadType
+    {
+        BUFFERING_PERIOD                     = 0,
+        PICTURE_TIMING                       = 1,
+        PAN_SCAN_RECT                        = 2,
+        FILLER_PAYLOAD                       = 3,
+        USER_DATA_REGISTERED_ITU_T_T35       = 4,
+        USER_DATA_UNREGISTERED               = 5,
+        RECOVERY_POINT                       = 6,
+        SCENE_INFO                           = 9,
+        FULL_FRAME_SNAPSHOT                  = 15,
+        PROGRESSIVE_REFINEMENT_SEGMENT_START = 16,
+        PROGRESSIVE_REFINEMENT_SEGMENT_END   = 17,
+        FILM_GRAIN_CHARACTERISTICS           = 19,
+        POST_FILTER_HINT                     = 22,
+        TONE_MAPPING_INFO                    = 23,
+        FRAME_PACKING                        = 45,
+        DISPLAY_ORIENTATION                  = 47,
+        SOP_DESCRIPTION                      = 128,
+        ACTIVE_PARAMETER_SETS                = 129,
+        DECODING_UNIT_INFO                   = 130,
+        TEMPORAL_LEVEL0_INDEX                = 131,
+        DECODED_PICTURE_HASH                 = 132,
+        SCALABLE_NESTING                     = 133,
+        REGION_REFRESH_INFO                  = 134,
+        MASTERING_DISPLAY_INFO               = 137,
+        CONTENT_LIGHT_LEVEL_INFO             = 144,
+    };
+
+    virtual PayloadType payloadType() const = 0;
+
+    virtual void writeSEI(const SPS&) { X265_CHECK(0, "empty writeSEI method called\n");  }
+
+    void writeByteAlign();
+};
+
+class SEIuserDataUnregistered : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return USER_DATA_UNREGISTERED; }
+
+    SEIuserDataUnregistered() : m_userData(NULL) {}
+
+    static const uint8_t m_uuid_iso_iec_11578[16];
+    uint32_t m_userDataLength;
+    uint8_t *m_userData;
+
+    void write(Bitstream& bs, const SPS&)
+    {
+        m_bitIf = &bs;
+
+        WRITE_CODE(USER_DATA_UNREGISTERED, 8, "payload_type");
+
+        uint32_t payloadSize = 16 + m_userDataLength;
+        for (; payloadSize >= 0xff; payloadSize -= 0xff)
+            WRITE_CODE(0xff, 8, "payload_size");
+        WRITE_CODE(payloadSize, 8, "payload_size");
+
+        for (uint32_t i = 0; i < 16; i++)
+            WRITE_CODE(m_uuid_iso_iec_11578[i], 8, "sei.uuid_iso_iec_11578[i]");
+
+        for (uint32_t i = 0; i < m_userDataLength; i++)
+            WRITE_CODE(m_userData[i], 8, "user_data");
+    }
+};
+
+class SEIMasteringDisplayColorVolume : public SEI
+{
+public:
+
+    uint16_t displayPrimaryX[3];
+    uint16_t displayPrimaryY[3];
+    uint16_t whitePointX, whitePointY;
+    uint32_t maxDisplayMasteringLuminance;
+    uint32_t minDisplayMasteringLuminance;
+
+    PayloadType payloadType() const { return MASTERING_DISPLAY_INFO; }
+
+    bool parse(const char* value)
+    {
+        return sscanf(value, "G(%hu,%hu)B(%hu,%hu)R(%hu,%hu)WP(%hu,%hu)L(%u,%u)",
+                      &displayPrimaryX[0], &displayPrimaryY[0],
+                      &displayPrimaryX[1], &displayPrimaryY[1],
+                      &displayPrimaryX[2], &displayPrimaryY[2],
+                      &whitePointX, &whitePointY,
+                      &maxDisplayMasteringLuminance, &minDisplayMasteringLuminance) == 10;
+    }
+
+    void write(Bitstream& bs, const SPS&)
+    {
+        m_bitIf = &bs;
+
+        WRITE_CODE(MASTERING_DISPLAY_INFO, 8, "payload_type");
+        WRITE_CODE(8 * 2 + 2 * 4, 8, "payload_size");
+
+        for (uint32_t i = 0; i < 3; i++)
+        {
+            WRITE_CODE(displayPrimaryX[i], 16, "display_primaries_x[ c ]");
+            WRITE_CODE(displayPrimaryY[i], 16, "display_primaries_y[ c ]");
+        }
+        WRITE_CODE(whitePointX, 16, "white_point_x");
+        WRITE_CODE(whitePointY, 16, "white_point_y");
+        WRITE_CODE(maxDisplayMasteringLuminance, 32, "max_display_mastering_luminance");
+        WRITE_CODE(minDisplayMasteringLuminance, 32, "min_display_mastering_luminance");
+    }
+};
+
+class SEIContentLightLevel : public SEI
+{
+public:
+
+    uint16_t max_content_light_level;
+    uint16_t max_pic_average_light_level;
+
+    PayloadType payloadType() const { return CONTENT_LIGHT_LEVEL_INFO; }
+
+    void write(Bitstream& bs, const SPS&)
+    {
+        m_bitIf = &bs;
+
+        WRITE_CODE(CONTENT_LIGHT_LEVEL_INFO, 8, "payload_type");
+        WRITE_CODE(4, 8, "payload_size");
+        WRITE_CODE(max_content_light_level,     16, "max_content_light_level");
+        WRITE_CODE(max_pic_average_light_level, 16, "max_pic_average_light_level");
+    }
+};
+
+class SEIDecodedPictureHash : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return DECODED_PICTURE_HASH; }
+
+    enum Method
+    {
+        MD5,
+        CRC,
+        CHECKSUM,
+    } m_method;
+
+    uint8_t m_digest[3][16];
+
+    void write(Bitstream& bs, const SPS&)
+    {
+        m_bitIf = &bs;
+
+        WRITE_CODE(DECODED_PICTURE_HASH, 8, "payload_type");
+
+        switch (m_method)
+        {
+        case MD5:
+            WRITE_CODE(1 + 16 * 3, 8, "payload_size");
+            WRITE_CODE(MD5, 8, "hash_type");
+            break;
+        case CRC:
+            WRITE_CODE(1 + 2 * 3, 8, "payload_size");
+            WRITE_CODE(CRC, 8, "hash_type");
+            break;
+        case CHECKSUM:
+            WRITE_CODE(1 + 4 * 3, 8, "payload_size");
+            WRITE_CODE(CHECKSUM, 8, "hash_type");
+            break;
+        }
+
+        for (int yuvIdx = 0; yuvIdx < 3; yuvIdx++)
+        {
+            if (m_method == MD5)
+            {
+                for (uint32_t i = 0; i < 16; i++)
+                    WRITE_CODE(m_digest[yuvIdx][i], 8, "picture_md5");
+            }
+            else if (m_method == CRC)
+            {
+                uint32_t val = (m_digest[yuvIdx][0] << 8) + m_digest[yuvIdx][1];
+                WRITE_CODE(val, 16, "picture_crc");
+            }
+            else if (m_method == CHECKSUM)
+            {
+                uint32_t val = (m_digest[yuvIdx][0] << 24) + (m_digest[yuvIdx][1] << 16) + (m_digest[yuvIdx][2] << 8) + m_digest[yuvIdx][3];
+                WRITE_CODE(val, 32, "picture_checksum");
+            }
+        }
+    }
+};
+
+class SEIActiveParameterSets : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return ACTIVE_PARAMETER_SETS; }
+
+    bool m_selfContainedCvsFlag;
+    bool m_noParamSetUpdateFlag;
+
+    void writeSEI(const SPS&)
+    {
+        WRITE_CODE(0, 4, "active_vps_id");
+        WRITE_FLAG(m_selfContainedCvsFlag, "self_contained_cvs_flag");
+        WRITE_FLAG(m_noParamSetUpdateFlag, "no_param_set_update_flag");
+        WRITE_UVLC(0, "num_sps_ids_minus1");
+        WRITE_UVLC(0, "active_seq_param_set_id");
+        writeByteAlign();
+    }
+};
+
+class SEIBufferingPeriod : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return BUFFERING_PERIOD; }
+
+    SEIBufferingPeriod()
+        : m_cpbDelayOffset(0)
+        , m_dpbDelayOffset(0)
+        , m_auCpbRemovalDelayDelta(1)
+    {
+    }
+
+    bool     m_cpbDelayOffset;
+    bool     m_dpbDelayOffset;
+    uint32_t m_initialCpbRemovalDelay;
+    uint32_t m_initialCpbRemovalDelayOffset;
+    uint32_t m_auCpbRemovalDelayDelta;
+
+    void writeSEI(const SPS& sps)
+    {
+        const HRDInfo& hrd = sps.vuiParameters.hrdParameters;
+
+        WRITE_UVLC(0, "bp_seq_parameter_set_id");
+        WRITE_FLAG(0, "rap_cpb_params_present_flag");
+        WRITE_FLAG(0, "concatenation_flag");
+        WRITE_CODE(m_auCpbRemovalDelayDelta - 1,   hrd.cpbRemovalDelayLength,       "au_cpb_removal_delay_delta_minus1");
+        WRITE_CODE(m_initialCpbRemovalDelay,       hrd.initialCpbRemovalDelayLength,        "initial_cpb_removal_delay");
+        WRITE_CODE(m_initialCpbRemovalDelayOffset, hrd.initialCpbRemovalDelayLength, "initial_cpb_removal_delay_offset");
+
+        writeByteAlign();
+    }
+};
+
+class SEIPictureTiming : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return PICTURE_TIMING; }
+
+    uint32_t  m_picStruct;
+    uint32_t  m_sourceScanType;
+    bool      m_duplicateFlag;
+
+    uint32_t  m_auCpbRemovalDelay;
+    uint32_t  m_picDpbOutputDelay;
+
+    void writeSEI(const SPS& sps)
+    {
+        const VUI *vui = &sps.vuiParameters;
+        const HRDInfo *hrd = &vui->hrdParameters;
+
+        if (vui->frameFieldInfoPresentFlag)
+        {
+            WRITE_CODE(m_picStruct, 4,          "pic_struct");
+            WRITE_CODE(m_sourceScanType, 2,     "source_scan_type");
+            WRITE_FLAG(m_duplicateFlag,         "duplicate_flag");
+        }
+
+        if (vui->hrdParametersPresentFlag)
+        {
+            WRITE_CODE(m_auCpbRemovalDelay - 1, hrd->cpbRemovalDelayLength, "au_cpb_removal_delay_minus1");
+            WRITE_CODE(m_picDpbOutputDelay, hrd->dpbOutputDelayLength, "pic_dpb_output_delay");
+            /* Removed sub-pic signaling June 2014 */
+        }
+        writeByteAlign();
+    }
+};
+
+class SEIRecoveryPoint : public SEI
+{
+public:
+
+    PayloadType payloadType() const { return RECOVERY_POINT; }
+
+    int  m_recoveryPocCnt;
+    bool m_exactMatchingFlag;
+    bool m_brokenLinkFlag;
+
+    void writeSEI(const SPS&)
+    {
+        WRITE_SVLC(m_recoveryPocCnt,    "recovery_poc_cnt");
+        WRITE_FLAG(m_exactMatchingFlag, "exact_matching_flag");
+        WRITE_FLAG(m_brokenLinkFlag,    "broken_link_flag");
+        writeByteAlign();
+    }
+};
+}
+
+#endif // ifndef X265_SEI_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/slicetype.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2190 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@multicorewareinc.com>
+ *          Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "framedata.h"
+#include "picyuv.h"
+#include "primitives.h"
+#include "lowres.h"
+#include "mv.h"
+
+#include "slicetype.h"
+#include "motion.h"
+#include "ratecontrol.h"
+
+#if DETAILED_CU_STATS
+#define ProfileLookaheadTime(elapsed, count) ScopedElapsedTime _scope(elapsed); count++
+#else
+#define ProfileLookaheadTime(elapsed, count)
+#endif
+
+using namespace X265_NS;
+
+namespace {
+
+/* Compute variance to derive AC energy of each block */
+inline uint32_t acEnergyVar(Frame *curFrame, uint64_t sum_ssd, int shift, int plane)
+{
+    uint32_t sum = (uint32_t)sum_ssd;
+    uint32_t ssd = (uint32_t)(sum_ssd >> 32);
+
+    curFrame->m_lowres.wp_sum[plane] += sum;
+    curFrame->m_lowres.wp_ssd[plane] += ssd;
+    return ssd - ((uint64_t)sum * sum >> shift);
+}
+
+/* Find the energy of each block in Y/Cb/Cr plane */
+inline uint32_t acEnergyPlane(Frame *curFrame, pixel* src, intptr_t srcStride, int plane, int colorFormat)
+{
+    if ((colorFormat != X265_CSP_I444) && plane)
+    {
+        ALIGN_VAR_8(pixel, pix[8 * 8]);
+        primitives.cu[BLOCK_8x8].copy_pp(pix, 8, src, srcStride);
+        return acEnergyVar(curFrame, primitives.cu[BLOCK_8x8].var(pix, 8), 6, plane);
+    }
+    else
+        return acEnergyVar(curFrame, primitives.cu[BLOCK_16x16].var(src, srcStride), 8, plane);
+}
+
+} // end anonymous namespace
+
+/* Find the total AC energy of each block in all planes */
+uint32_t LookaheadTLD::acEnergyCu(Frame* curFrame, uint32_t blockX, uint32_t blockY, int csp)
+{
+    intptr_t stride = curFrame->m_fencPic->m_stride;
+    intptr_t blockOffsetLuma = blockX + (blockY * stride);
+
+    uint32_t var;
+
+    var  = acEnergyPlane(curFrame, curFrame->m_fencPic->m_picOrg[0] + blockOffsetLuma, stride, 0, csp);
+    if (csp != X265_CSP_I400) {
+        intptr_t cStride = curFrame->m_fencPic->m_strideC;
+        int hShift = CHROMA_H_SHIFT(csp);
+        int vShift = CHROMA_V_SHIFT(csp);
+        intptr_t blockOffsetChroma = (blockX >> hShift) + ((blockY >> vShift) * cStride);
+        var += acEnergyPlane(curFrame, curFrame->m_fencPic->m_picOrg[1] + blockOffsetChroma, cStride, 1, csp);
+        var += acEnergyPlane(curFrame, curFrame->m_fencPic->m_picOrg[2] + blockOffsetChroma, cStride, 2, csp);
+    }
+    x265_emms();
+    return var;
+}
+
+void LookaheadTLD::calcAdaptiveQuantFrame(Frame *curFrame, x265_param* param)
+{
+    /* Actual adaptive quantization */
+    int maxCol = curFrame->m_fencPic->m_picWidth;
+    int maxRow = curFrame->m_fencPic->m_picHeight;
+    int blockCount = curFrame->m_lowres.maxBlocksInRow * curFrame->m_lowres.maxBlocksInCol;
+
+    float* quantOffsets = curFrame->m_quantOffsets;
+    for (int y = 0; y < 3; y++)
+    {
+        curFrame->m_lowres.wp_ssd[y] = 0;
+        curFrame->m_lowres.wp_sum[y] = 0;
+    }
+
+    /* Calculate Qp offset for each 16x16 block in the frame */
+    int blockXY = 0;
+    int blockX = 0, blockY = 0;
+    double strength = 0.f;
+    if (param->rc.aqMode == X265_AQ_NONE || param->rc.aqStrength == 0)
+    {
+        /* Need to init it anyways for CU tree */
+        int cuCount = widthInCU * heightInCU;
+
+        if (param->rc.aqMode && param->rc.aqStrength == 0)
+        {
+            if (quantOffsets)
+            {
+                for (int cuxy = 0; cuxy < cuCount; cuxy++)
+                {
+                    curFrame->m_lowres.qpCuTreeOffset[cuxy] = curFrame->m_lowres.qpAqOffset[cuxy] = quantOffsets[cuxy];
+                    curFrame->m_lowres.invQscaleFactor[cuxy] = x265_exp2fix8(curFrame->m_lowres.qpCuTreeOffset[cuxy]);
+                }
+            }
+            else
+            {
+                memset(curFrame->m_lowres.qpCuTreeOffset, 0, cuCount * sizeof(double));
+                memset(curFrame->m_lowres.qpAqOffset, 0, cuCount * sizeof(double));
+                for (int cuxy = 0; cuxy < cuCount; cuxy++)
+                    curFrame->m_lowres.invQscaleFactor[cuxy] = 256;
+            }
+        }
+
+        /* Need variance data for weighted prediction */
+        if (param->bEnableWeightedPred || param->bEnableWeightedBiPred)
+        {
+            for (blockY = 0; blockY < maxRow; blockY += 16)
+                for (blockX = 0; blockX < maxCol; blockX += 16)
+                    acEnergyCu(curFrame, blockX, blockY, param->internalCsp);
+        }
+    }
+    else
+    {
+        blockXY = 0;
+        double avg_adj_pow2 = 0, avg_adj = 0, qp_adj = 0;
+        double bias_strength = 0.f;
+        if (param->rc.aqMode == X265_AQ_AUTO_VARIANCE || param->rc.aqMode == X265_AQ_AUTO_VARIANCE_BIASED)
+        {
+            double bit_depth_correction = 1.f / (1 << (2*(X265_DEPTH-8)));
+            for (blockY = 0; blockY < maxRow; blockY += 16)
+            {
+                for (blockX = 0; blockX < maxCol; blockX += 16)
+                {
+                    uint32_t energy = acEnergyCu(curFrame, blockX, blockY, param->internalCsp);
+                    qp_adj = pow(energy * bit_depth_correction + 1, 0.1);
+                    curFrame->m_lowres.qpCuTreeOffset[blockXY] = qp_adj;
+                    avg_adj += qp_adj;
+                    avg_adj_pow2 += qp_adj * qp_adj;
+                    blockXY++;
+                }
+            }
+
+            avg_adj /= blockCount;
+            avg_adj_pow2 /= blockCount;
+            strength = param->rc.aqStrength * avg_adj;
+            avg_adj = avg_adj - 0.5f * (avg_adj_pow2 - (11.f)) / avg_adj;
+            bias_strength = param->rc.aqStrength;
+        }
+        else
+            strength = param->rc.aqStrength * 1.0397f;
+
+        blockXY = 0;
+        for (blockY = 0; blockY < maxRow; blockY += 16)
+        {
+            for (blockX = 0; blockX < maxCol; blockX += 16)
+            {
+                if (param->rc.aqMode == X265_AQ_AUTO_VARIANCE_BIASED)
+                {
+                    qp_adj = curFrame->m_lowres.qpCuTreeOffset[blockXY];
+                    qp_adj = strength * (qp_adj - avg_adj) + bias_strength * (1.f - 11.f / (qp_adj * qp_adj));
+                }
+                else if (param->rc.aqMode == X265_AQ_AUTO_VARIANCE)
+                {
+                    qp_adj = curFrame->m_lowres.qpCuTreeOffset[blockXY];
+                    qp_adj = strength * (qp_adj - avg_adj);
+                }
+                else
+                {
+                    uint32_t energy = acEnergyCu(curFrame, blockX, blockY, param->internalCsp);
+                    qp_adj = strength * (X265_LOG2(X265_MAX(energy, 1)) - (14.427f + 2 * (X265_DEPTH - 8)));
+                }
+                if (quantOffsets != NULL)
+                    qp_adj += quantOffsets[blockXY];
+                curFrame->m_lowres.qpAqOffset[blockXY] = qp_adj;
+                curFrame->m_lowres.qpCuTreeOffset[blockXY] = qp_adj;
+                curFrame->m_lowres.invQscaleFactor[blockXY] = x265_exp2fix8(qp_adj);
+                blockXY++;
+            }
+        }
+    }
+
+    if (param->bEnableWeightedPred || param->bEnableWeightedBiPred)
+    {
+        int hShift = CHROMA_H_SHIFT(param->internalCsp);
+        int vShift = CHROMA_V_SHIFT(param->internalCsp);
+        maxCol = ((maxCol + 8) >> 4) << 4;
+        maxRow = ((maxRow + 8) >> 4) << 4;
+        int width[3]  = { maxCol, maxCol >> hShift, maxCol >> hShift };
+        int height[3] = { maxRow, maxRow >> vShift, maxRow >> vShift };
+
+        for (int i = 0; i < 3; i++)
+        {
+            uint64_t sum, ssd;
+            sum = curFrame->m_lowres.wp_sum[i];
+            ssd = curFrame->m_lowres.wp_ssd[i];
+            curFrame->m_lowres.wp_ssd[i] = ssd - (sum * sum + (width[i] * height[i]) / 2) / (width[i] * height[i]);
+        }
+    }
+}
+
+void LookaheadTLD::lowresIntraEstimate(Lowres& fenc)
+{
+    ALIGN_VAR_32(pixel, prediction[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE]);
+    pixel fencIntra[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE];
+    pixel neighbours[2][X265_LOWRES_CU_SIZE * 4 + 1];
+    pixel* samples = neighbours[0], *filtered = neighbours[1];
+
+    const int lookAheadLambda = (int)x265_lambda_tab[X265_LOOKAHEAD_QP];
+    const int intraPenalty = 5 * lookAheadLambda;
+    const int lowresPenalty = 4; /* fixed CU cost overhead */
+
+    const int cuSize  = X265_LOWRES_CU_SIZE;
+    const int cuSize2 = cuSize << 1;
+    const int sizeIdx = X265_LOWRES_CU_BITS - 2;
+
+    pixelcmp_t satd = primitives.pu[sizeIdx].satd;
+    int planar = !!(cuSize >= 8);
+
+    int costEst = 0, costEstAq = 0;
+
+    for (int cuY = 0; cuY < heightInCU; cuY++)
+    {
+        fenc.rowSatds[0][0][cuY] = 0;
+
+        for (int cuX = 0; cuX < widthInCU; cuX++)
+        {
+            const int cuXY = cuX + cuY * widthInCU;
+            const intptr_t pelOffset = cuSize * cuX + cuSize * cuY * fenc.lumaStride;
+            pixel *pixCur = fenc.lowresPlane[0] + pelOffset;
+
+            /* copy fenc pixels */
+            primitives.cu[sizeIdx].copy_pp(fencIntra, cuSize, pixCur, fenc.lumaStride);
+
+            /* collect reference sample pixels */
+            pixCur -= fenc.lumaStride + 1;
+            memcpy(samples, pixCur, (2 * cuSize + 1) * sizeof(pixel)); /* top */
+            for (int i = 1; i <= 2 * cuSize; i++)
+                samples[cuSize2 + i] = pixCur[i * fenc.lumaStride];    /* left */
+
+            primitives.cu[sizeIdx].intra_filter(samples, filtered);
+
+            int cost, icost = me.COST_MAX;
+            uint32_t ilowmode = 0;
+
+            /* DC and planar */
+            primitives.cu[sizeIdx].intra_pred[DC_IDX](prediction, cuSize, samples, 0, cuSize <= 16);
+            cost = satd(fencIntra, cuSize, prediction, cuSize);
+            COPY2_IF_LT(icost, cost, ilowmode, DC_IDX);
+
+            primitives.cu[sizeIdx].intra_pred[PLANAR_IDX](prediction, cuSize, neighbours[planar], 0, 0);
+            cost = satd(fencIntra, cuSize, prediction, cuSize);
+            COPY2_IF_LT(icost, cost, ilowmode, PLANAR_IDX);
+
+            /* scan angular predictions */
+            int filter, acost = me.COST_MAX;
+            uint32_t mode, alowmode = 4;
+            for (mode = 5; mode < 35; mode += 5)
+            {
+                filter = !!(g_intraFilterFlags[mode] & cuSize);
+                primitives.cu[sizeIdx].intra_pred[mode](prediction, cuSize, neighbours[filter], mode, cuSize <= 16);
+                cost = satd(fencIntra, cuSize, prediction, cuSize);
+                COPY2_IF_LT(acost, cost, alowmode, mode);
+            }
+            for (uint32_t dist = 2; dist >= 1; dist--)
+            {
+                int minusmode = alowmode - dist;
+                int plusmode = alowmode + dist;
+
+                mode = minusmode;
+                filter = !!(g_intraFilterFlags[mode] & cuSize);
+                primitives.cu[sizeIdx].intra_pred[mode](prediction, cuSize, neighbours[filter], mode, cuSize <= 16);
+                cost = satd(fencIntra, cuSize, prediction, cuSize);
+                COPY2_IF_LT(acost, cost, alowmode, mode);
+
+                mode = plusmode;
+                filter = !!(g_intraFilterFlags[mode] & cuSize);
+                primitives.cu[sizeIdx].intra_pred[mode](prediction, cuSize, neighbours[filter], mode, cuSize <= 16);
+                cost = satd(fencIntra, cuSize, prediction, cuSize);
+                COPY2_IF_LT(acost, cost, alowmode, mode);
+            }
+            COPY2_IF_LT(icost, acost, ilowmode, alowmode);
+
+            icost += intraPenalty + lowresPenalty; /* estimate intra signal cost */
+
+            fenc.lowresCosts[0][0][cuXY] = (uint16_t)(X265_MIN(icost, LOWRES_COST_MASK) | (0 << LOWRES_COST_SHIFT));
+            fenc.intraCost[cuXY] = icost;
+            fenc.intraMode[cuXY] = (uint8_t)ilowmode;
+
+            /* do not include edge blocks in the frame cost estimates, they are not very accurate */
+            const bool bFrameScoreCU = (cuX > 0 && cuX < widthInCU - 1 &&
+                                        cuY > 0 && cuY < heightInCU - 1) || widthInCU <= 2 || heightInCU <= 2;
+
+            int icostAq = (bFrameScoreCU && fenc.invQscaleFactor) ? ((icost * fenc.invQscaleFactor[cuXY] + 128) >> 8) : icost;
+
+            if (bFrameScoreCU)
+            {
+                costEst += icost;
+                costEstAq += icostAq;
+            }
+
+            fenc.rowSatds[0][0][cuY] += icostAq;
+        }
+    }
+
+    fenc.costEst[0][0] = costEst;
+    fenc.costEstAq[0][0] = costEstAq;
+}
+
+uint32_t LookaheadTLD::weightCostLuma(Lowres& fenc, Lowres& ref, WeightParam& wp)
+{
+    pixel *src = ref.fpelPlane[0];
+    intptr_t stride = fenc.lumaStride;
+
+    if (wp.bPresentFlag)
+    {
+        int offset = wp.inputOffset << (X265_DEPTH - 8);
+        int scale = wp.inputWeight;
+        int denom = wp.log2WeightDenom;
+        int round = denom ? 1 << (denom - 1) : 0;
+        int correction = IF_INTERNAL_PREC - X265_DEPTH; // intermediate interpolation depth
+        int widthHeight = (int)stride;
+
+        primitives.weight_pp(ref.buffer[0], wbuffer[0], stride, widthHeight, paddedLines,
+            scale, round << correction, denom + correction, offset);
+        src = weightedRef.fpelPlane[0];
+    }
+
+    uint32_t cost = 0;
+    intptr_t pixoff = 0;
+    int mb = 0;
+
+    for (int y = 0; y < fenc.lines; y += 8, pixoff = y * stride)
+    {
+        for (int x = 0; x < fenc.width; x += 8, mb++, pixoff += 8)
+        {
+            int satd = primitives.pu[LUMA_8x8].satd(src + pixoff, stride, fenc.fpelPlane[0] + pixoff, stride);
+            cost += X265_MIN(satd, fenc.intraCost[mb]);
+        }
+    }
+
+    return cost;
+}
+
+bool LookaheadTLD::allocWeightedRef(Lowres& fenc)
+{
+    intptr_t planesize = fenc.buffer[1] - fenc.buffer[0];
+    intptr_t padoffset = fenc.lowresPlane[0] - fenc.buffer[0];
+    paddedLines = (int)(planesize / fenc.lumaStride);
+
+    wbuffer[0] = X265_MALLOC(pixel, 4 * planesize);
+    if (wbuffer[0])
+    {
+        wbuffer[1] = wbuffer[0] + planesize;
+        wbuffer[2] = wbuffer[1] + planesize;
+        wbuffer[3] = wbuffer[2] + planesize;
+    }
+    else
+        return false;
+
+    for (int i = 0; i < 4; i++)
+        weightedRef.lowresPlane[i] = wbuffer[i] + padoffset;
+
+    weightedRef.fpelPlane[0] = weightedRef.lowresPlane[0];
+    weightedRef.lumaStride = fenc.lumaStride;
+    weightedRef.isLowres = true;
+    weightedRef.isWeighted = false;
+
+    return true;
+}
+
+void LookaheadTLD::weightsAnalyse(Lowres& fenc, Lowres& ref)
+{
+    static const float epsilon = 1.f / 128.f;
+    int deltaIndex = fenc.frameNum - ref.frameNum;
+
+    WeightParam wp;
+    wp.bPresentFlag = false;
+
+    if (!wbuffer[0])
+    {
+        if (!allocWeightedRef(fenc))
+            return;
+    }
+
+    /* epsilon is chosen to require at least a numerator of 127 (with denominator = 128) */
+    float guessScale, fencMean, refMean;
+    x265_emms();
+    if (fenc.wp_ssd[0] && ref.wp_ssd[0])
+        guessScale = sqrtf((float)fenc.wp_ssd[0] / ref.wp_ssd[0]);
+    else
+        guessScale = 1.0f;
+    fencMean = (float)fenc.wp_sum[0] / (fenc.lines * fenc.width) / (1 << (X265_DEPTH - 8));
+    refMean = (float)ref.wp_sum[0] / (fenc.lines * fenc.width) / (1 << (X265_DEPTH - 8));
+
+    /* Early termination */
+    if (fabsf(refMean - fencMean) < 0.5f && fabsf(1.f - guessScale) < epsilon)
+        return;
+
+    int minoff = 0, minscale, mindenom;
+    unsigned int minscore = 0, origscore = 1;
+    int found = 0;
+
+    wp.setFromWeightAndOffset((int)(guessScale * 128 + 0.5f), 0, 7, true);
+    mindenom = wp.log2WeightDenom;
+    minscale = wp.inputWeight;
+
+    origscore = minscore = weightCostLuma(fenc, ref, wp);
+
+    if (!minscore)
+        return;
+
+    unsigned int s = 0;
+    int curScale = minscale;
+    int curOffset = (int)(fencMean - refMean * curScale / (1 << mindenom) + 0.5f);
+    if (curOffset < -128 || curOffset > 127)
+    {
+        /* Rescale considering the constraints on curOffset. We do it in this order
+        * because scale has a much wider range than offset (because of denom), so
+        * it should almost never need to be clamped. */
+        curOffset = x265_clip3(-128, 127, curOffset);
+        curScale = (int)((1 << mindenom) * (fencMean - curOffset) / refMean + 0.5f);
+        curScale = x265_clip3(0, 127, curScale);
+    }
+    SET_WEIGHT(wp, true, curScale, mindenom, curOffset);
+    s = weightCostLuma(fenc, ref, wp);
+    COPY4_IF_LT(minscore, s, minscale, curScale, minoff, curOffset, found, 1);
+
+    /* Use a smaller denominator if possible */
+    while (mindenom > 0 && !(minscale & 1))
+    {
+        mindenom--;
+        minscale >>= 1;
+    }
+
+    if (!found || (minscale == 1 << mindenom && minoff == 0) || (float)minscore / origscore > 0.998f)
+        return;
+    else
+    {
+        SET_WEIGHT(wp, true, minscale, mindenom, minoff);
+
+        // set weighted delta cost
+        fenc.weightedCostDelta[deltaIndex] = minscore / origscore;
+
+        int offset = wp.inputOffset << (X265_DEPTH - 8);
+        int scale = wp.inputWeight;
+        int denom = wp.log2WeightDenom;
+        int round = denom ? 1 << (denom - 1) : 0;
+        int correction = IF_INTERNAL_PREC - X265_DEPTH; // intermediate interpolation depth
+        intptr_t stride = ref.lumaStride;
+        int widthHeight = (int)stride;
+
+        for (int i = 0; i < 4; i++)
+            primitives.weight_pp(ref.buffer[i], wbuffer[i], stride, widthHeight, paddedLines,
+            scale, round << correction, denom + correction, offset);
+
+        weightedRef.isWeighted = true;
+    }
+}
+
+Lookahead::Lookahead(x265_param *param, ThreadPool* pool)
+{
+    m_param = param;
+    m_pool  = pool;
+
+    m_lastNonB = NULL;
+    m_isSceneTransition = false;
+    m_scratch  = NULL;
+    m_tld      = NULL;
+    m_filled   = false;
+    m_outputSignalRequired = false;
+    m_isActive = true;
+
+    m_8x8Height = ((m_param->sourceHeight / 2) + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    m_8x8Width = ((m_param->sourceWidth / 2) + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS;
+    m_8x8Blocks = m_8x8Width > 2 && m_8x8Height > 2 ? (m_8x8Width - 2) * (m_8x8Height - 2) : m_8x8Width * m_8x8Height;
+
+    m_lastKeyframe = -m_param->keyframeMax;
+    m_sliceTypeBusy = false;
+    m_fullQueueSize = X265_MAX(1, m_param->lookaheadDepth);
+    m_bAdaptiveQuant = m_param->rc.aqMode || m_param->bEnableWeightedPred || m_param->bEnableWeightedBiPred;
+
+    /* If we have a thread pool and are using --b-adapt 2, it is generally
+     * preferable to perform all motion searches for each lowres frame in large
+     * batched; this will create one job per --bframe per lowres frame, and
+     * these jobs are performed by workers bonded to the thread running
+     * slicetypeDecide() */
+    m_bBatchMotionSearch = m_pool && m_param->bFrameAdaptive == X265_B_ADAPT_TRELLIS;
+
+    /* It is also beneficial to pre-calculate all possible frame cost estimates
+     * using worker threads bonded to the worker thread running
+     * slicetypeDecide(). This creates bframes * bframes jobs which take less
+     * time than the motion search batches but there are many of them. This may
+     * do much unnecessary work, some frame cost estimates are not needed, so if
+     * the thread pool is small we disable this feature after the initial burst
+     * of work */
+    m_bBatchFrameCosts = m_bBatchMotionSearch;
+
+    if (m_param->lookaheadSlices && !m_pool)
+        m_param->lookaheadSlices = 0;
+
+    if (m_param->lookaheadSlices > 1)
+    {
+        m_numRowsPerSlice = m_8x8Height / m_param->lookaheadSlices;
+        m_numRowsPerSlice = X265_MAX(m_numRowsPerSlice, 10);            // at least 10 rows per slice
+        m_numRowsPerSlice = X265_MIN(m_numRowsPerSlice, m_8x8Height);   // but no more than the full picture
+        m_numCoopSlices = m_8x8Height / m_numRowsPerSlice;
+        m_param->lookaheadSlices = m_numCoopSlices;                     // report actual final slice count
+    }
+    else
+    {
+        m_numRowsPerSlice = m_8x8Height;
+        m_numCoopSlices = 1;
+    }
+
+#if DETAILED_CU_STATS
+    m_slicetypeDecideElapsedTime = 0;
+    m_preLookaheadElapsedTime = 0;
+    m_countSlicetypeDecide = 0;
+    m_countPreLookahead = 0;
+#endif
+
+    memset(m_histogram, 0, sizeof(m_histogram));
+}
+
+#if DETAILED_CU_STATS
+void Lookahead::getWorkerStats(int64_t& batchElapsedTime, uint64_t& batchCount, int64_t& coopSliceElapsedTime, uint64_t& coopSliceCount)
+{
+    batchElapsedTime = coopSliceElapsedTime = 0;
+    coopSliceCount = batchCount = 0;
+    int tldCount = m_pool ? m_pool->m_numWorkers : 1;
+    for (int i = 0; i < tldCount; i++)
+    {
+        batchElapsedTime += m_tld[i].batchElapsedTime;
+        coopSliceElapsedTime += m_tld[i].coopSliceElapsedTime;
+        batchCount += m_tld[i].countBatches;
+        coopSliceCount += m_tld[i].countCoopSlices;
+    }
+}
+#endif
+
+bool Lookahead::create()
+{
+    int numTLD = 1 + (m_pool ? m_pool->m_numWorkers : 0);
+    m_tld = new LookaheadTLD[numTLD];
+    for (int i = 0; i < numTLD; i++)
+        m_tld[i].init(m_8x8Width, m_8x8Height, m_8x8Blocks);
+    m_scratch = X265_MALLOC(int, m_tld[0].widthInCU);
+
+    return m_tld && m_scratch;
+}
+
+void Lookahead::stopJobs()
+{
+    if (m_pool && !m_inputQueue.empty())
+    {
+        m_inputLock.acquire();
+        m_isActive = false;
+        bool wait = m_outputSignalRequired = m_sliceTypeBusy;
+        m_inputLock.release();
+
+        if (wait)
+            m_outputSignal.wait();
+    }
+}
+
+void Lookahead::destroy()
+{
+    // these two queues will be empty unless the encode was aborted
+    while (!m_inputQueue.empty())
+    {
+        Frame* curFrame = m_inputQueue.popFront();
+        curFrame->destroy();
+        delete curFrame;
+    }
+
+    while (!m_outputQueue.empty())
+    {
+        Frame* curFrame = m_outputQueue.popFront();
+        curFrame->destroy();
+        delete curFrame;
+    }
+
+    X265_FREE(m_scratch);
+
+    delete [] m_tld;
+}
+
+/* The synchronization of slicetypeDecide is managed here.  The findJob() method
+ * polls the occupancy of the input queue. If the queue is
+ * full, it will run slicetypeDecide() and output a mini-gop of frames to the
+ * output queue. If the flush() method has been called (implying no new pictures
+ * will be received) then the input queue is considered full if it has even one
+ * picture left. getDecidedPicture() removes pictures from the output queue and
+ * only blocks as a last resort. It does not start removing pictures until
+ * m_filled is true, which occurs after *more than* the lookahead depth of
+ * pictures have been input so slicetypeDecide() should have started prior to
+ * output pictures being withdrawn. The first slicetypeDecide() will obviously
+ * still require a blocking wait, but after this slicetypeDecide() will maintain
+ * its lead over the encoder (because one picture is added to the input queue
+ * each time one is removed from the output) and decides slice types of pictures
+ * just ahead of when the encoder needs them */
+
+/* Called by API thread */
+void Lookahead::addPicture(Frame& curFrame, int sliceType)
+{
+    curFrame.m_lowres.sliceType = sliceType;
+
+    /* determine if the lookahead is (over) filled enough for frames to begin to
+     * be consumed by frame encoders */
+    if (!m_filled)
+    {
+        if (!m_param->bframes & !m_param->lookaheadDepth)
+            m_filled = true; /* zero-latency */
+        else if (curFrame.m_poc >= m_param->lookaheadDepth + 2 + m_param->bframes)
+            m_filled = true; /* full capacity plus mini-gop lag */
+    }
+
+    m_inputLock.acquire();
+    m_inputQueue.pushBack(curFrame);
+    if (m_pool && m_inputQueue.size() >= m_fullQueueSize)
+        tryWakeOne();
+    m_inputLock.release();
+}
+
+/* Called by API thread */
+void Lookahead::flush()
+{
+    /* force slicetypeDecide to run until the input queue is empty */
+    m_fullQueueSize = 1;
+    m_filled = true;
+}
+
+void Lookahead::findJob(int /*workerThreadID*/)
+{
+    bool doDecide;
+
+    m_inputLock.acquire();
+    if (m_inputQueue.size() >= m_fullQueueSize && !m_sliceTypeBusy && m_isActive)
+        doDecide = m_sliceTypeBusy = true;
+    else
+        doDecide = m_helpWanted = false;
+    m_inputLock.release();
+
+    if (!doDecide)
+        return;
+
+    ProfileLookaheadTime(m_slicetypeDecideElapsedTime, m_countSlicetypeDecide);
+    ProfileScopeEvent(slicetypeDecideEV);
+
+    slicetypeDecide();
+
+    m_inputLock.acquire();
+    if (m_outputSignalRequired)
+    {
+        m_outputSignal.trigger();
+        m_outputSignalRequired = false;
+    }
+    m_sliceTypeBusy = false;
+    m_inputLock.release();
+}
+
+/* Called by API thread */
+Frame* Lookahead::getDecidedPicture()
+{
+    if (m_filled)
+    {
+        m_outputLock.acquire();
+        Frame *out = m_outputQueue.popFront();
+        m_outputLock.release();
+
+        if (out)
+            return out;
+
+        findJob(-1); /* run slicetypeDecide() if necessary */
+
+        m_inputLock.acquire();
+        bool wait = m_outputSignalRequired = m_sliceTypeBusy;
+        m_inputLock.release();
+
+        if (wait)
+            m_outputSignal.wait();
+
+        return m_outputQueue.popFront();
+    }
+    else
+        return NULL;
+}
+
+/* Called by rate-control to calculate the estimated SATD cost for a given
+ * picture.  It assumes dpb->prepareEncode() has already been called for the
+ * picture and all the references are established */
+void Lookahead::getEstimatedPictureCost(Frame *curFrame)
+{
+    Lowres *frames[X265_LOOKAHEAD_MAX];
+
+    // POC distances to each reference
+    Slice *slice = curFrame->m_encData->m_slice;
+    int p0 = 0, p1, b;
+    int poc = slice->m_poc;
+    int l0poc = slice->m_refPOCList[0][0];
+    int l1poc = slice->m_refPOCList[1][0];
+
+    switch (slice->m_sliceType)
+    {
+    case I_SLICE:
+        frames[p0] = &curFrame->m_lowres;
+        b = p1 = 0;
+        break;
+
+    case P_SLICE:
+        b = p1 = poc - l0poc;
+        frames[p0] = &slice->m_refFrameList[0][0]->m_lowres;
+        frames[b] = &curFrame->m_lowres;
+        break;
+
+    case B_SLICE:
+        b = poc - l0poc;
+        p1 = b + l1poc - poc;
+        frames[p0] = &slice->m_refFrameList[0][0]->m_lowres;
+        frames[b] = &curFrame->m_lowres;
+        frames[p1] = &slice->m_refFrameList[1][0]->m_lowres;
+        break;
+
+    default:
+        return;
+    }
+
+    X265_CHECK(curFrame->m_lowres.costEst[b - p0][p1 - b] > 0, "Slice cost not estimated\n")
+
+    if (m_param->rc.cuTree && !m_param->rc.bStatRead)
+        /* update row satds based on cutree offsets */
+        curFrame->m_lowres.satdCost = frameCostRecalculate(frames, p0, p1, b);
+    else if (m_param->rc.aqMode)
+        curFrame->m_lowres.satdCost = curFrame->m_lowres.costEstAq[b - p0][p1 - b];
+    else
+        curFrame->m_lowres.satdCost = curFrame->m_lowres.costEst[b - p0][p1 - b];
+
+    if (m_param->rc.vbvBufferSize && m_param->rc.vbvMaxBitrate)
+    {
+        /* aggregate lowres row satds to CTU resolution */
+        curFrame->m_lowres.lowresCostForRc = curFrame->m_lowres.lowresCosts[b - p0][p1 - b];
+        uint32_t lowresRow = 0, lowresCol = 0, lowresCuIdx = 0, sum = 0, intraSum = 0;
+        uint32_t scale = m_param->maxCUSize / (2 * X265_LOWRES_CU_SIZE);
+        uint32_t numCuInHeight = (m_param->sourceHeight + g_maxCUSize - 1) / g_maxCUSize;
+        uint32_t widthInLowresCu = (uint32_t)m_8x8Width, heightInLowresCu = (uint32_t)m_8x8Height;
+        double *qp_offset = 0;
+        /* Factor in qpoffsets based on Aq/Cutree in CU costs */
+        if (m_param->rc.aqMode)
+            qp_offset = (frames[b]->sliceType == X265_TYPE_B || !m_param->rc.cuTree) ? frames[b]->qpAqOffset : frames[b]->qpCuTreeOffset;
+
+        for (uint32_t row = 0; row < numCuInHeight; row++)
+        {
+            lowresRow = row * scale;
+            for (uint32_t cnt = 0; cnt < scale && lowresRow < heightInLowresCu; lowresRow++, cnt++)
+            {
+                sum = 0; intraSum = 0;
+                lowresCuIdx = lowresRow * widthInLowresCu;
+                for (lowresCol = 0; lowresCol < widthInLowresCu; lowresCol++, lowresCuIdx++)
+                {
+                    uint16_t lowresCuCost = curFrame->m_lowres.lowresCostForRc[lowresCuIdx] & LOWRES_COST_MASK;
+                    if (qp_offset)
+                    {
+                        lowresCuCost = (uint16_t)((lowresCuCost * x265_exp2fix8(qp_offset[lowresCuIdx]) + 128) >> 8);
+                        int32_t intraCuCost = curFrame->m_lowres.intraCost[lowresCuIdx]; 
+                        curFrame->m_lowres.intraCost[lowresCuIdx] = (intraCuCost * x265_exp2fix8(qp_offset[lowresCuIdx]) + 128) >> 8;
+                    }
+                    curFrame->m_lowres.lowresCostForRc[lowresCuIdx] = lowresCuCost;
+                    sum += lowresCuCost;
+                    intraSum += curFrame->m_lowres.intraCost[lowresCuIdx];
+                }
+                curFrame->m_encData->m_rowStat[row].satdForVbv += sum;
+                curFrame->m_encData->m_rowStat[row].intraSatdForVbv += intraSum;
+            }
+        }
+    }
+}
+
+void PreLookaheadGroup::processTasks(int workerThreadID)
+{
+    if (workerThreadID < 0)
+        workerThreadID = m_lookahead.m_pool ? m_lookahead.m_pool->m_numWorkers : 0;
+    LookaheadTLD& tld = m_lookahead.m_tld[workerThreadID];
+
+    m_lock.acquire();
+    while (m_jobAcquired < m_jobTotal)
+    {
+        Frame* preFrame = m_preframes[m_jobAcquired++];
+        ProfileLookaheadTime(m_lookahead.m_preLookaheadElapsedTime, m_lookahead.m_countPreLookahead);
+        ProfileScopeEvent(prelookahead);
+        m_lock.release();
+
+        preFrame->m_lowres.init(preFrame->m_fencPic, preFrame->m_poc);
+        if (m_lookahead.m_param->rc.bStatRead && m_lookahead.m_param->rc.cuTree && IS_REFERENCED(preFrame))
+            /* cu-tree offsets were read from stats file */;
+        else if (m_lookahead.m_bAdaptiveQuant)
+            tld.calcAdaptiveQuantFrame(preFrame, m_lookahead.m_param);
+        tld.lowresIntraEstimate(preFrame->m_lowres);
+        preFrame->m_lowresInit = true;
+
+        m_lock.acquire();
+    }
+    m_lock.release();
+}
+
+/* called by API thread or worker thread with inputQueueLock acquired */
+void Lookahead::slicetypeDecide()
+{
+    PreLookaheadGroup pre(*this);
+
+    Lowres* frames[X265_LOOKAHEAD_MAX + X265_BFRAME_MAX + 4];
+    Frame*  list[X265_BFRAME_MAX + 4];
+    memset(frames, 0, sizeof(frames));
+    memset(list, 0, sizeof(list));
+    int maxSearch = X265_MIN(m_param->lookaheadDepth, X265_LOOKAHEAD_MAX);
+    maxSearch = X265_MAX(1, maxSearch);
+
+    {
+        ScopedLock lock(m_inputLock);
+
+        Frame *curFrame = m_inputQueue.first();
+        int j;
+        for (j = 0; j < m_param->bframes + 2; j++)
+        {
+            if (!curFrame) break;
+            list[j] = curFrame;
+            curFrame = curFrame->m_next;
+        }
+
+        curFrame = m_inputQueue.first();
+        frames[0] = m_lastNonB;
+        for (j = 0; j < maxSearch; j++)
+        {
+            if (!curFrame) break;
+            frames[j + 1] = &curFrame->m_lowres;
+
+            if (!curFrame->m_lowresInit)
+                pre.m_preframes[pre.m_jobTotal++] = curFrame;
+
+            curFrame = curFrame->m_next;
+        }
+
+        maxSearch = j;
+    }
+
+    /* perform pre-analysis on frames which need it, using a bonded task group */
+    if (pre.m_jobTotal)
+    {
+        if (m_pool)
+            pre.tryBondPeers(*m_pool, pre.m_jobTotal);
+        pre.processTasks(-1);
+        pre.waitForExit();
+    }
+
+    if (m_lastNonB && !m_param->rc.bStatRead &&
+        ((m_param->bFrameAdaptive && m_param->bframes) ||
+         m_param->rc.cuTree || m_param->scenecutThreshold ||
+         (m_param->lookaheadDepth && m_param->rc.vbvBufferSize)))
+    {
+        slicetypeAnalyse(frames, false);
+    }
+
+    int bframes, brefs;
+    for (bframes = 0, brefs = 0;; bframes++)
+    {
+        Lowres& frm = list[bframes]->m_lowres;
+
+        if (frm.sliceType == X265_TYPE_BREF && !m_param->bBPyramid && brefs == m_param->bBPyramid)
+        {
+            frm.sliceType = X265_TYPE_B;
+            x265_log(m_param, X265_LOG_WARNING, "B-ref at frame %d incompatible with B-pyramid\n",
+                     frm.frameNum);
+        }
+
+        /* pyramid with multiple B-refs needs a big enough dpb that the preceding P-frame stays available.
+         * smaller dpb could be supported by smart enough use of mmco, but it's easier just to forbid it. */
+        else if (frm.sliceType == X265_TYPE_BREF && m_param->bBPyramid && brefs &&
+                 m_param->maxNumReferences <= (brefs + 3))
+        {
+            frm.sliceType = X265_TYPE_B;
+            x265_log(m_param, X265_LOG_WARNING, "B-ref at frame %d incompatible with B-pyramid and %d reference frames\n",
+                     frm.sliceType, m_param->maxNumReferences);
+        }
+
+        if (/* (!param->intraRefresh || frm.frameNum == 0) && */ frm.frameNum - m_lastKeyframe >= m_param->keyframeMax)
+        {
+            if (frm.sliceType == X265_TYPE_AUTO || frm.sliceType == X265_TYPE_I)
+                frm.sliceType = m_param->bOpenGOP && m_lastKeyframe >= 0 ? X265_TYPE_I : X265_TYPE_IDR;
+            bool warn = frm.sliceType != X265_TYPE_IDR;
+            if (warn && m_param->bOpenGOP)
+                warn &= frm.sliceType != X265_TYPE_I;
+            if (warn)
+            {
+                x265_log(m_param, X265_LOG_WARNING, "specified frame type (%d) at %d is not compatible with keyframe interval\n",
+                         frm.sliceType, frm.frameNum);
+                frm.sliceType = m_param->bOpenGOP && m_lastKeyframe >= 0 ? X265_TYPE_I : X265_TYPE_IDR;
+            }
+        }
+        if (frm.sliceType == X265_TYPE_I && frm.frameNum - m_lastKeyframe >= m_param->keyframeMin)
+        {
+            if (m_param->bOpenGOP)
+            {
+                m_lastKeyframe = frm.frameNum;
+                frm.bKeyframe = true;
+            }
+            else
+                frm.sliceType = X265_TYPE_IDR;
+        }
+        if (frm.sliceType == X265_TYPE_IDR)
+        {
+            /* Closed GOP */
+            m_lastKeyframe = frm.frameNum;
+            frm.bKeyframe = true;
+            if (bframes > 0)
+            {
+                list[bframes - 1]->m_lowres.sliceType = X265_TYPE_P;
+                bframes--;
+            }
+        }
+        if (bframes == m_param->bframes || !list[bframes + 1])
+        {
+            if (IS_X265_TYPE_B(frm.sliceType))
+                x265_log(m_param, X265_LOG_WARNING, "specified frame type is not compatible with max B-frames\n");
+            if (frm.sliceType == X265_TYPE_AUTO || IS_X265_TYPE_B(frm.sliceType))
+                frm.sliceType = X265_TYPE_P;
+        }
+        if (frm.sliceType == X265_TYPE_BREF)
+            brefs++;
+        if (frm.sliceType == X265_TYPE_AUTO)
+            frm.sliceType = X265_TYPE_B;
+        else if (!IS_X265_TYPE_B(frm.sliceType))
+            break;
+    }
+
+    if (bframes)
+        list[bframes - 1]->m_lowres.bLastMiniGopBFrame = true;
+    list[bframes]->m_lowres.leadingBframes = bframes;
+    m_lastNonB = &list[bframes]->m_lowres;
+    m_histogram[bframes]++;
+
+    /* insert a bref into the sequence */
+    if (m_param->bBPyramid && bframes > 1 && !brefs)
+    {
+        list[bframes / 2]->m_lowres.sliceType = X265_TYPE_BREF;
+        brefs++;
+    }
+    /* calculate the frame costs ahead of time for estimateFrameCost while we still have lowres */
+    if (m_param->rc.rateControlMode != X265_RC_CQP)
+    {
+        int p0, p1, b;
+        /* For zero latency tuning, calculate frame cost to be used later in RC */
+        if (!maxSearch)
+        {
+            for (int i = 0; i <= bframes; i++)
+               frames[i + 1] = &list[i]->m_lowres;
+        }
+
+        /* estimate new non-B cost */
+        p1 = b = bframes + 1;
+        p0 = (IS_X265_TYPE_I(frames[bframes + 1]->sliceType)) ? b : 0;
+
+        CostEstimateGroup estGroup(*this, frames);
+
+        estGroup.singleCost(p0, p1, b);
+
+        if (bframes)
+        {
+            p0 = 0; // last nonb
+            for (b = 1; b <= bframes; b++)
+            {
+                if (frames[b]->sliceType == X265_TYPE_B)
+                    for (p1 = b; frames[p1]->sliceType == X265_TYPE_B; p1++)
+                        ; // find new nonb or bref
+                else
+                    p1 = bframes + 1;
+
+                estGroup.singleCost(p0, p1, b);
+
+                if (frames[b]->sliceType == X265_TYPE_BREF)
+                    p0 = b;
+            }
+        }
+    }
+
+    m_inputLock.acquire();
+    /* dequeue all frames from inputQueue that are about to be enqueued
+     * in the output queue. The order is important because Frame can
+     * only be in one list at a time */
+    int64_t pts[X265_BFRAME_MAX + 1];
+    for (int i = 0; i <= bframes; i++)
+    {
+        Frame *curFrame;
+        curFrame = m_inputQueue.popFront();
+        pts[i] = curFrame->m_pts;
+        maxSearch--;
+    }
+    m_inputLock.release();
+
+    m_outputLock.acquire();
+    /* add non-B to output queue */
+    int idx = 0;
+    list[bframes]->m_reorderedPts = pts[idx++];
+    m_outputQueue.pushBack(*list[bframes]);
+
+    /* Add B-ref frame next to P frame in output queue, the B-ref encode before non B-ref frame */
+    if (bframes > 1 && m_param->bBPyramid)
+    {
+        for (int i = 0; i < bframes; i++)
+        {
+            if (list[i]->m_lowres.sliceType == X265_TYPE_BREF)
+            {
+                list[i]->m_reorderedPts = pts[idx++];
+                m_outputQueue.pushBack(*list[i]);
+            }
+        }
+    }
+
+    /* add B frames to output queue */
+    for (int i = 0; i < bframes; i++)
+    {
+        /* push all the B frames into output queue except B-ref, which already pushed into output queue */
+        if (list[i]->m_lowres.sliceType != X265_TYPE_BREF)
+        {
+            list[i]->m_reorderedPts = pts[idx++];
+            m_outputQueue.pushBack(*list[i]);
+        }
+    }
+
+    bool isKeyFrameAnalyse = (m_param->rc.cuTree || (m_param->rc.vbvBufferSize && m_param->lookaheadDepth)) && !m_param->rc.bStatRead;
+    if (isKeyFrameAnalyse && IS_X265_TYPE_I(m_lastNonB->sliceType))
+    {
+        m_inputLock.acquire();
+        Frame *curFrame = m_inputQueue.first();
+        frames[0] = m_lastNonB;
+        int j;
+        for (j = 0; j < maxSearch; j++)
+        {
+            frames[j + 1] = &curFrame->m_lowres;
+            curFrame = curFrame->m_next;
+        }
+        m_inputLock.release();
+
+        frames[j + 1] = NULL;
+        slicetypeAnalyse(frames, true);
+    }
+    m_outputLock.release();
+}
+
+void Lookahead::vbvLookahead(Lowres **frames, int numFrames, int keyframe)
+{
+    int prevNonB = 0, curNonB = 1, idx = 0;
+    while (curNonB < numFrames && frames[curNonB]->sliceType == X265_TYPE_B)
+        curNonB++;
+    int nextNonB = keyframe ? prevNonB : curNonB;
+    int nextB = prevNonB + 1;
+    int nextBRef = 0, curBRef = 0;
+    if (m_param->bBPyramid && curNonB - prevNonB > 1)
+        curBRef = (prevNonB + curNonB + 1) / 2;
+    int miniGopEnd = keyframe ? prevNonB : curNonB;
+    while (curNonB < numFrames + !keyframe)
+    {
+        /* P/I cost: This shouldn't include the cost of nextNonB */
+        if (nextNonB != curNonB)
+        {
+            int p0 = IS_X265_TYPE_I(frames[curNonB]->sliceType) ? curNonB : prevNonB;
+            frames[nextNonB]->plannedSatd[idx] = vbvFrameCost(frames, p0, curNonB, curNonB);
+            frames[nextNonB]->plannedType[idx] = frames[curNonB]->sliceType;
+
+            /* Save the nextNonB Cost in each B frame of the current miniGop */
+            if (curNonB > miniGopEnd)
+            {
+                for (int j = nextB; j < miniGopEnd; j++)
+                {
+                    frames[j]->plannedSatd[frames[j]->indB] = frames[nextNonB]->plannedSatd[idx];
+                    frames[j]->plannedType[frames[j]->indB++] = frames[nextNonB]->plannedType[idx];
+                }
+            }
+            idx++;
+        }
+
+        /* Handle the B-frames: coded order */
+        if (m_param->bBPyramid && curNonB - prevNonB > 1)
+            nextBRef = (prevNonB + curNonB + 1) / 2;
+
+        for (int i = prevNonB + 1; i < curNonB; i++, idx++)
+        {
+            int64_t satdCost = 0;
+            int type = X265_TYPE_B;
+            if (nextBRef)
+            {
+                if (i == nextBRef)
+                {
+                    satdCost = vbvFrameCost(frames, prevNonB, curNonB, nextBRef);
+                    type = X265_TYPE_BREF;
+                }
+                else if (i < nextBRef)
+                    satdCost = vbvFrameCost(frames, prevNonB, nextBRef, i);
+                else
+                    satdCost = vbvFrameCost(frames, nextBRef, curNonB, i);
+            }
+            else
+                satdCost = vbvFrameCost(frames, prevNonB, curNonB, i);
+            frames[nextNonB]->plannedSatd[idx] = satdCost;
+            frames[nextNonB]->plannedType[idx] = type;
+            /* Save the nextB Cost in each B frame of the current miniGop */
+
+            for (int j = nextB; j < miniGopEnd; j++)
+            {
+                if (curBRef && curBRef == i)
+                    break;
+                if (j >= i && j !=nextBRef)
+                    continue;
+                frames[j]->plannedSatd[frames[j]->indB] = satdCost;
+                frames[j]->plannedType[frames[j]->indB++] = type;
+            }
+        }
+        prevNonB = curNonB;
+        curNonB++;
+        while (curNonB <= numFrames && frames[curNonB]->sliceType == X265_TYPE_B)
+            curNonB++;
+    }
+
+    frames[nextNonB]->plannedType[idx] = X265_TYPE_AUTO;
+}
+
+int64_t Lookahead::vbvFrameCost(Lowres **frames, int p0, int p1, int b)
+{
+    CostEstimateGroup estGroup(*this, frames);
+    int64_t cost = estGroup.singleCost(p0, p1, b);
+
+    if (m_param->rc.aqMode)
+    {
+        if (m_param->rc.cuTree)
+            return frameCostRecalculate(frames, p0, p1, b);
+        else
+            return frames[b]->costEstAq[b - p0][p1 - b];
+    }
+
+    return cost;
+}
+
+void Lookahead::slicetypeAnalyse(Lowres **frames, bool bKeyframe)
+{
+    int numFrames, origNumFrames, keyintLimit, framecnt;
+    int maxSearch = X265_MIN(m_param->lookaheadDepth, X265_LOOKAHEAD_MAX);
+    int cuCount = m_8x8Blocks;
+    int resetStart;
+    bool bIsVbvLookahead = m_param->rc.vbvBufferSize && m_param->lookaheadDepth;
+
+    /* count undecided frames */
+    for (framecnt = 0; framecnt < maxSearch; framecnt++)
+    {
+        Lowres *fenc = frames[framecnt + 1];
+        if (!fenc || fenc->sliceType != X265_TYPE_AUTO)
+            break;
+    }
+
+    if (!framecnt)
+    {
+        if (m_param->rc.cuTree)
+            cuTree(frames, 0, bKeyframe);
+        return;
+    }
+
+    frames[framecnt + 1] = NULL;
+
+    keyintLimit = m_param->keyframeMax - frames[0]->frameNum + m_lastKeyframe - 1;
+    origNumFrames = numFrames = X265_MIN(framecnt, keyintLimit);
+
+    if (bIsVbvLookahead)
+        numFrames = framecnt;
+    else if (m_param->bOpenGOP && numFrames < framecnt)
+        numFrames++;
+    else if (numFrames == 0)
+    {
+        frames[1]->sliceType = X265_TYPE_I;
+        return;
+    }
+
+    if (m_bBatchMotionSearch)
+    {
+        /* pre-calculate all motion searches, using many worker threads */
+        CostEstimateGroup estGroup(*this, frames);
+        for (int b = 2; b < numFrames; b++)
+        {
+            for (int i = 1; i <= m_param->bframes + 1; i++)
+            {
+                int p0 = b - i;
+                if (p0 < 0)
+                    continue;
+
+                /* Skip search if already done */
+                if (frames[b]->lowresMvs[0][i - 1][0].x != 0x7FFF)
+                    continue;
+
+                /* perform search to p1 at same distance, if possible */
+                int p1 = b + i;
+                if (p1 >= numFrames || frames[b]->lowresMvs[1][i - 1][0].x != 0x7FFF)
+                    p1 = b;
+
+                estGroup.add(p0, p1, b);
+            }
+        }
+        /* auto-disable after the first batch if pool is small */
+        m_bBatchMotionSearch &= m_pool->m_numWorkers >= 4;
+        estGroup.finishBatch();
+
+        if (m_bBatchFrameCosts)
+        {
+            /* pre-calculate all frame cost estimates, using many worker threads */
+            for (int b = 2; b < numFrames; b++)
+            {
+                for (int i = 1; i <= m_param->bframes + 1; i++)
+                {
+                    if (b < i)
+                        continue;
+
+                    /* only measure frame cost in this pass if motion searches
+                     * are already done */
+                    if (frames[b]->lowresMvs[0][i - 1][0].x == 0x7FFF)
+                        continue;
+
+                    int p0 = b - i;
+
+                    for (int j = 0; j <= m_param->bframes; j++)
+                    {
+                        int p1 = b + j;
+                        if (p1 >= numFrames)
+                            break;
+
+                        /* ensure P1 search is done */
+                        if (j && frames[b]->lowresMvs[1][j - 1][0].x == 0x7FFF)
+                            continue;
+
+                        /* ensure frame cost is not done */
+                        if (frames[b]->costEst[i][j] >= 0)
+                            continue;
+
+                        estGroup.add(p0, p1, b);
+                    }
+                }
+            }
+
+            /* auto-disable after the first batch if the pool is not large */
+            m_bBatchFrameCosts &= m_pool->m_numWorkers > 12;
+            estGroup.finishBatch();
+        }
+    }
+
+    int numBFrames = 0;
+    int numAnalyzed = numFrames;
+
+    if (m_param->bFrameAdaptive)
+    {
+        bool isScenecut = scenecut(frames, 0, 1, true, origNumFrames);
+        /* When scenecut threshold is set, use scenecut detection for I frame placements */
+        if (!m_param->scenecutThreshold && isScenecut)
+        {
+            frames[1]->sliceType = X265_TYPE_I;
+            return;
+        }
+    }
+
+    if (m_param->bframes)
+    {
+        if (m_param->bFrameAdaptive == X265_B_ADAPT_TRELLIS)
+        {
+            if (numFrames > 1)
+            {
+                char best_paths[X265_BFRAME_MAX + 1][X265_LOOKAHEAD_MAX + 1] = { "", "P" };
+                int best_path_index = numFrames % (X265_BFRAME_MAX + 1);
+
+                /* Perform the frame type analysis. */
+                for (int j = 2; j <= numFrames; j++)
+                    slicetypePath(frames, j, best_paths);
+
+                numBFrames = (int)strspn(best_paths[best_path_index], "B");
+
+                /* Load the results of the analysis into the frame types. */
+                for (int j = 1; j < numFrames; j++)
+                    frames[j]->sliceType = best_paths[best_path_index][j - 1] == 'B' ? X265_TYPE_B : X265_TYPE_P;
+            }
+            frames[numFrames]->sliceType = X265_TYPE_P;
+        }
+        else if (m_param->bFrameAdaptive == X265_B_ADAPT_FAST)
+        {
+            CostEstimateGroup estGroup(*this, frames);
+
+            int64_t cost1p0, cost2p0, cost1b1, cost2p1;
+
+            for (int i = 0; i <= numFrames - 2; )
+            {
+                cost2p1 = estGroup.singleCost(i + 0, i + 2, i + 2, true);
+                if (frames[i + 2]->intraMbs[2] > cuCount / 2)
+                {
+                    frames[i + 1]->sliceType = X265_TYPE_P;
+                    frames[i + 2]->sliceType = X265_TYPE_P;
+                    i += 2;
+                    continue;
+                }
+
+                cost1b1 = estGroup.singleCost(i + 0, i + 2, i + 1);
+                cost1p0 = estGroup.singleCost(i + 0, i + 1, i + 1);
+                cost2p0 = estGroup.singleCost(i + 1, i + 2, i + 2);
+
+                if (cost1p0 + cost2p0 < cost1b1 + cost2p1)
+                {
+                    frames[i + 1]->sliceType = X265_TYPE_P;
+                    i += 1;
+                    continue;
+                }
+
+// arbitrary and untuned
+#define INTER_THRESH 300
+#define P_SENS_BIAS (50 - m_param->bFrameBias)
+                frames[i + 1]->sliceType = X265_TYPE_B;
+
+                int j;
+                for (j = i + 2; j <= X265_MIN(i + m_param->bframes, numFrames - 1); j++)
+                {
+                    int64_t pthresh = X265_MAX(INTER_THRESH - P_SENS_BIAS * (j - i - 1), INTER_THRESH / 10);
+                    int64_t pcost = estGroup.singleCost(i + 0, j + 1, j + 1, true);
+                    if (pcost > pthresh * cuCount || frames[j + 1]->intraMbs[j - i + 1] > cuCount / 3)
+                        break;
+                    frames[j]->sliceType = X265_TYPE_B;
+                }
+
+                frames[j]->sliceType = X265_TYPE_P;
+                i = j;
+            }
+            frames[numFrames]->sliceType = X265_TYPE_P;
+            numBFrames = 0;
+            while (numBFrames < numFrames && frames[numBFrames + 1]->sliceType == X265_TYPE_B)
+                numBFrames++;
+        }
+        else
+        {
+            numBFrames = X265_MIN(numFrames - 1, m_param->bframes);
+            for (int j = 1; j < numFrames; j++)
+                frames[j]->sliceType = (j % (numBFrames + 1)) ? X265_TYPE_B : X265_TYPE_P;
+
+            frames[numFrames]->sliceType = X265_TYPE_P;
+        }
+
+        /* Check scenecut on the first minigop. */
+        for (int j = 1; j < numBFrames + 1; j++)
+        {
+            if (m_param->scenecutThreshold && scenecut(frames, j, j + 1, false, origNumFrames))
+            {
+                frames[j]->sliceType = X265_TYPE_P;
+                numAnalyzed = j;
+                break;
+            }
+        }
+        resetStart = bKeyframe ? 1 : X265_MIN(numBFrames + 2, numAnalyzed + 1);
+    }
+    else
+    {
+        for (int j = 1; j <= numFrames; j++)
+            frames[j]->sliceType = X265_TYPE_P;
+
+        resetStart = bKeyframe ? 1 : 2;
+    }
+
+    if (m_param->rc.cuTree)
+        cuTree(frames, X265_MIN(numFrames, m_param->keyframeMax), bKeyframe);
+
+    // if (!param->bIntraRefresh)
+    for (int j = keyintLimit + 1; j <= numFrames; j += m_param->keyframeMax)
+    {
+        frames[j]->sliceType = X265_TYPE_I;
+        resetStart = X265_MIN(resetStart, j + 1);
+    }
+
+    if (bIsVbvLookahead)
+        vbvLookahead(frames, numFrames, bKeyframe);
+
+     int maxp1 = X265_MIN(m_param->bframes + 1, origNumFrames);
+    /* Restore frame types for all frames that haven't actually been decided yet. */
+    for (int j = resetStart; j <= numFrames; j++)
+    {
+        frames[j]->sliceType = X265_TYPE_AUTO;
+        /* If any frame marked as scenecut is being restarted for sliceDecision, 
+         * undo scene Transition flag */
+        if (j <= maxp1 && frames[j]->bScenecut && m_isSceneTransition)
+            m_isSceneTransition = false;
+    }
+}
+
+bool Lookahead::scenecut(Lowres **frames, int p0, int p1, bool bRealScenecut, int numFrames)
+{
+    /* Only do analysis during a normal scenecut check. */
+    if (bRealScenecut && m_param->bframes)
+    {
+        int origmaxp1 = p0 + 1;
+        /* Look ahead to avoid coding short flashes as scenecuts. */
+        origmaxp1 += m_param->bframes;
+        int maxp1 = X265_MIN(origmaxp1, numFrames);
+        bool fluctuate = false;
+        bool noScenecuts = false;
+        int64_t avgSatdCost = 0;
+        if (frames[0]->costEst[1][0] > -1)
+            avgSatdCost = frames[0]->costEst[1][0];
+        int cnt = 1;
+        /* Where A and B are scenes: AAAAAABBBAAAAAA
+         * If BBB is shorter than (maxp1-p0), it is detected as a flash
+         * and not considered a scenecut. */
+        for (int cp1 = p1; cp1 <= maxp1; cp1++)
+        {
+            if (!scenecutInternal(frames, p0, cp1, false))
+            {
+                /* Any frame in between p0 and cur_p1 cannot be a real scenecut. */
+                for (int i = cp1; i > p0; i--)
+                {
+                    frames[i]->bScenecut = false;
+                    noScenecuts = false;
+                }
+            }
+            else if (scenecutInternal(frames, cp1 - 1, cp1, false))
+            {
+                /* If current frame is a Scenecut from p0 frame as well as Scenecut from
+                 * preceeding frame, mark it as a Scenecut */
+                frames[cp1]->bScenecut = true;
+                noScenecuts = true;
+            }
+
+            /* compute average satdcost of all the frames in the mini-gop to confirm 
+             * whether there is any great fluctuation among them to rule out false positives */
+            X265_CHECK(frames[cp1]->costEst[cp1 - p0][0]!= -1, "costEst is not done \n");
+            avgSatdCost += frames[cp1]->costEst[cp1 - p0][0];
+            cnt++;
+        }
+
+        /* Identify possible scene fluctuations by comparing the satd cost of the frames.
+         * This could denote the beginning or ending of scene transitions.
+         * During a scene transition(fade in/fade outs), if fluctuate remains false,
+         * then the scene had completed its transition or stabilized */
+        if (noScenecuts)
+        {
+            fluctuate = false;
+            avgSatdCost /= cnt;
+            for (int i= p1 ; i <= maxp1; i++)
+            {
+                if (fabs((double)(frames[i]->costEst[i - p0][0] - avgSatdCost)) > 0.1 * avgSatdCost)
+                {
+                    fluctuate = true;
+                    if (!m_isSceneTransition && frames[i]->bScenecut)
+                    {
+                        m_isSceneTransition = true;
+                        /* just mark the first scenechange in the scene transition as a scenecut. */
+                        for (int j = i + 1; j <= maxp1; j++)
+                            frames[j]->bScenecut = false;
+                        break;
+                    }
+                }
+                frames[i]->bScenecut = false;
+            }
+        }
+        if (!fluctuate && !noScenecuts)
+            m_isSceneTransition = false; /* Signal end of scene transitioning */
+    }
+
+    /* Ignore frames that are part of a flash, i.e. cannot be real scenecuts */
+    if (!frames[p1]->bScenecut)
+        return false;
+    return scenecutInternal(frames, p0, p1, bRealScenecut);
+}
+
+bool Lookahead::scenecutInternal(Lowres **frames, int p0, int p1, bool bRealScenecut)
+{
+    Lowres *frame = frames[p1];
+
+    CostEstimateGroup estGroup(*this, frames);
+    estGroup.singleCost(p0, p1, p1);
+
+    int64_t icost = frame->costEst[0][0];
+    int64_t pcost = frame->costEst[p1 - p0][0];
+    int gopSize = frame->frameNum - m_lastKeyframe;
+    float threshMax = (float)(m_param->scenecutThreshold / 100.0);
+
+    /* magic numbers pulled out of thin air */
+    float threshMin = (float)(threshMax * 0.25);
+    double bias = 0.05;
+    if (bRealScenecut)
+    {
+        if (m_param->keyframeMin == m_param->keyframeMax)
+            threshMin = threshMax;
+        if (gopSize <= m_param->keyframeMin / 4)
+            bias = threshMin / 4;
+        else if (gopSize <= m_param->keyframeMin)
+            bias = threshMin * gopSize / m_param->keyframeMin;
+        else
+        {
+            bias = threshMin
+                + (threshMax - threshMin)
+                * (gopSize - m_param->keyframeMin)
+                / (m_param->keyframeMax - m_param->keyframeMin);
+        }
+    }
+    bool res = pcost >= (1.0 - bias) * icost;
+    if (res && bRealScenecut)
+    {
+        int imb = frame->intraMbs[p1 - p0];
+        int pmb = m_8x8Blocks - imb;
+        x265_log(m_param, X265_LOG_DEBUG, "scene cut at %d Icost:%d Pcost:%d ratio:%.4f bias:%.4f gop:%d (imb:%d pmb:%d)\n",
+                 frame->frameNum, icost, pcost, 1. - (double)pcost / icost, bias, gopSize, imb, pmb);
+    }
+    return res;
+}
+
+void Lookahead::slicetypePath(Lowres **frames, int length, char(*best_paths)[X265_LOOKAHEAD_MAX + 1])
+{
+    char paths[2][X265_LOOKAHEAD_MAX + 1];
+    int num_paths = X265_MIN(m_param->bframes + 1, length);
+    int64_t best_cost = 1LL << 62;
+    int idx = 0;
+
+    /* Iterate over all currently possible paths */
+    for (int path = 0; path < num_paths; path++)
+    {
+        /* Add suffixes to the current path */
+        int len = length - (path + 1);
+        memcpy(paths[idx], best_paths[len % (X265_BFRAME_MAX + 1)], len);
+        memset(paths[idx] + len, 'B', path);
+        strcpy(paths[idx] + len + path, "P");
+
+        /* Calculate the actual cost of the current path */
+        int64_t cost = slicetypePathCost(frames, paths[idx], best_cost);
+        if (cost < best_cost)
+        {
+            best_cost = cost;
+            idx ^= 1;
+        }
+    }
+
+    /* Store the best path. */
+    memcpy(best_paths[length % (X265_BFRAME_MAX + 1)], paths[idx ^ 1], length);
+}
+
+int64_t Lookahead::slicetypePathCost(Lowres **frames, char *path, int64_t threshold)
+{
+    int64_t cost = 0;
+    int loc = 1;
+    int cur_p = 0;
+
+    CostEstimateGroup estGroup(*this, frames);
+
+    path--; /* Since the 1st path element is really the second frame */
+    while (path[loc])
+    {
+        int next_p = loc;
+        /* Find the location of the next P-frame. */
+        while (path[next_p] != 'P')
+            next_p++;
+
+        /* Add the cost of the P-frame found above */
+        cost += estGroup.singleCost(cur_p, next_p, next_p);
+
+        /* Early terminate if the cost we have found is larger than the best path cost so far */
+        if (cost > threshold)
+            break;
+
+        if (m_param->bBPyramid && next_p - cur_p > 2)
+        {
+            int middle = cur_p + (next_p - cur_p) / 2;
+            cost += estGroup.singleCost(cur_p, next_p, middle);
+
+            for (int next_b = loc; next_b < middle && cost < threshold; next_b++)
+                cost += estGroup.singleCost(cur_p, middle, next_b);
+
+            for (int next_b = middle + 1; next_b < next_p && cost < threshold; next_b++)
+                cost += estGroup.singleCost(middle, next_p, next_b);
+        }
+        else
+        {
+            for (int next_b = loc; next_b < next_p && cost < threshold; next_b++)
+                cost += estGroup.singleCost(cur_p, next_p, next_b);
+        }
+
+        loc = next_p + 1;
+        cur_p = next_p;
+    }
+
+    return cost;
+}
+
+void Lookahead::cuTree(Lowres **frames, int numframes, bool bIntra)
+{
+    int idx = !bIntra;
+    int lastnonb, curnonb = 1;
+    int bframes = 0;
+
+    x265_emms();
+    double totalDuration = 0.0;
+    for (int j = 0; j <= numframes; j++)
+        totalDuration += (double)m_param->fpsDenom / m_param->fpsNum;
+
+    double averageDuration = totalDuration / (numframes + 1);
+
+    int i = numframes;
+    int cuCount = m_8x8Width * m_8x8Height;
+
+    while (i > 0 && frames[i]->sliceType == X265_TYPE_B)
+        i--;
+
+    lastnonb = i;
+
+    /* Lookaheadless MB-tree is not a theoretically distinct case; the same extrapolation could
+     * be applied to the end of a lookahead buffer of any size.  However, it's most needed when
+     * lookahead=0, so that's what's currently implemented. */
+    if (!m_param->lookaheadDepth)
+    {
+        if (bIntra)
+        {
+            memset(frames[0]->propagateCost, 0, cuCount * sizeof(uint16_t));
+            memcpy(frames[0]->qpCuTreeOffset, frames[0]->qpAqOffset, cuCount * sizeof(double));
+            return;
+        }
+        std::swap(frames[lastnonb]->propagateCost, frames[0]->propagateCost);
+        memset(frames[0]->propagateCost, 0, cuCount * sizeof(uint16_t));
+    }
+    else
+    {
+        if (lastnonb < idx)
+            return;
+        memset(frames[lastnonb]->propagateCost, 0, cuCount * sizeof(uint16_t));
+    }
+
+    CostEstimateGroup estGroup(*this, frames);
+
+    while (i-- > idx)
+    {
+        curnonb = i;
+        while (frames[curnonb]->sliceType == X265_TYPE_B && curnonb > 0)
+            curnonb--;
+
+        if (curnonb < idx)
+            break;
+
+        estGroup.singleCost(curnonb, lastnonb, lastnonb);
+
+        memset(frames[curnonb]->propagateCost, 0, cuCount * sizeof(uint16_t));
+        bframes = lastnonb - curnonb - 1;
+        if (m_param->bBPyramid && bframes > 1)
+        {
+            int middle = (bframes + 1) / 2 + curnonb;
+            estGroup.singleCost(curnonb, lastnonb, middle);
+            memset(frames[middle]->propagateCost, 0, cuCount * sizeof(uint16_t));
+            while (i > curnonb)
+            {
+                int p0 = i > middle ? middle : curnonb;
+                int p1 = i < middle ? middle : lastnonb;
+                if (i != middle)
+                {
+                    estGroup.singleCost(p0, p1, i);
+                    estimateCUPropagate(frames, averageDuration, p0, p1, i, 0);
+                }
+                i--;
+            }
+
+            estimateCUPropagate(frames, averageDuration, curnonb, lastnonb, middle, 1);
+        }
+        else
+        {
+            while (i > curnonb)
+            {
+                estGroup.singleCost(curnonb, lastnonb, i);
+                estimateCUPropagate(frames, averageDuration, curnonb, lastnonb, i, 0);
+                i--;
+            }
+        }
+        estimateCUPropagate(frames, averageDuration, curnonb, lastnonb, lastnonb, 1);
+        lastnonb = curnonb;
+    }
+
+    if (!m_param->lookaheadDepth)
+    {
+        estGroup.singleCost(0, lastnonb, lastnonb);
+        estimateCUPropagate(frames, averageDuration, 0, lastnonb, lastnonb, 1);
+        std::swap(frames[lastnonb]->propagateCost, frames[0]->propagateCost);
+    }
+
+    cuTreeFinish(frames[lastnonb], averageDuration, lastnonb);
+    if (m_param->bBPyramid && bframes > 1 && !m_param->rc.vbvBufferSize)
+        cuTreeFinish(frames[lastnonb + (bframes + 1) / 2], averageDuration, 0);
+}
+
+void Lookahead::estimateCUPropagate(Lowres **frames, double averageDuration, int p0, int p1, int b, int referenced)
+{
+    uint16_t *refCosts[2] = { frames[p0]->propagateCost, frames[p1]->propagateCost };
+    int32_t distScaleFactor = (((b - p0) << 8) + ((p1 - p0) >> 1)) / (p1 - p0);
+    int32_t bipredWeight = m_param->bEnableWeightedBiPred ? 64 - (distScaleFactor >> 2) : 32;
+    int32_t bipredWeights[2] = { bipredWeight, 64 - bipredWeight };
+    int listDist[2] = { b - p0 - 1, p1 - b - 1 };
+
+    memset(m_scratch, 0, m_8x8Width * sizeof(int));
+
+    uint16_t *propagateCost = frames[b]->propagateCost;
+
+    x265_emms();
+    double fpsFactor = CLIP_DURATION((double)m_param->fpsDenom / m_param->fpsNum) / CLIP_DURATION(averageDuration);
+
+    /* For non-referred frames the source costs are always zero, so just memset one row and re-use it. */
+    if (!referenced)
+        memset(frames[b]->propagateCost, 0, m_8x8Width * sizeof(uint16_t));
+
+    int32_t strideInCU = m_8x8Width;
+    for (uint16_t blocky = 0; blocky < m_8x8Height; blocky++)
+    {
+        int cuIndex = blocky * strideInCU;
+        primitives.propagateCost(m_scratch, propagateCost,
+                                 frames[b]->intraCost + cuIndex, frames[b]->lowresCosts[b - p0][p1 - b] + cuIndex,
+                                 frames[b]->invQscaleFactor + cuIndex, &fpsFactor, m_8x8Width);
+
+        if (referenced)
+            propagateCost += m_8x8Width;
+
+        for (uint16_t blockx = 0; blockx < m_8x8Width; blockx++, cuIndex++)
+        {
+            int32_t propagate_amount = m_scratch[blockx];
+            /* Don't propagate for an intra block. */
+            if (propagate_amount > 0)
+            {
+                /* Access width-2 bitfield. */
+                int32_t lists_used = frames[b]->lowresCosts[b - p0][p1 - b][cuIndex] >> LOWRES_COST_SHIFT;
+                /* Follow the MVs to the previous frame(s). */
+                for (uint16_t list = 0; list < 2; list++)
+                {
+                    if ((lists_used >> list) & 1)
+                    {
+#define CLIP_ADD(s, x) (s) = (uint16_t)X265_MIN((s) + (x), (1 << 16) - 1)
+                        int32_t listamount = propagate_amount;
+                        /* Apply bipred weighting. */
+                        if (lists_used == 3)
+                            listamount = (listamount * bipredWeights[list] + 32) >> 6;
+
+                        MV *mvs = frames[b]->lowresMvs[list][listDist[list]];
+
+                        /* Early termination for simple case of mv0. */
+                        if (!mvs[cuIndex].word)
+                        {
+                            CLIP_ADD(refCosts[list][cuIndex], listamount);
+                            continue;
+                        }
+
+                        int32_t x = mvs[cuIndex].x;
+                        int32_t y = mvs[cuIndex].y;
+                        int32_t cux = (x >> 5) + blockx;
+                        int32_t cuy = (y >> 5) + blocky;
+                        int32_t idx0 = cux + cuy * strideInCU;
+                        int32_t idx1 = idx0 + 1;
+                        int32_t idx2 = idx0 + strideInCU;
+                        int32_t idx3 = idx0 + strideInCU + 1;
+                        x &= 31;
+                        y &= 31;
+                        int32_t idx0weight = (32 - y) * (32 - x);
+                        int32_t idx1weight = (32 - y) * x;
+                        int32_t idx2weight = y * (32 - x);
+                        int32_t idx3weight = y * x;
+
+                        /* We could just clip the MVs, but pixels that lie outside the frame probably shouldn't
+                         * be counted. */
+                        if (cux < m_8x8Width - 1 && cuy < m_8x8Height - 1 && cux >= 0 && cuy >= 0)
+                        {
+                            CLIP_ADD(refCosts[list][idx0], (listamount * idx0weight + 512) >> 10);
+                            CLIP_ADD(refCosts[list][idx1], (listamount * idx1weight + 512) >> 10);
+                            CLIP_ADD(refCosts[list][idx2], (listamount * idx2weight + 512) >> 10);
+                            CLIP_ADD(refCosts[list][idx3], (listamount * idx3weight + 512) >> 10);
+                        }
+                        else /* Check offsets individually */
+                        {
+                            if (cux < m_8x8Width && cuy < m_8x8Height && cux >= 0 && cuy >= 0)
+                                CLIP_ADD(refCosts[list][idx0], (listamount * idx0weight + 512) >> 10);
+                            if (cux + 1 < m_8x8Width && cuy < m_8x8Height && cux + 1 >= 0 && cuy >= 0)
+                                CLIP_ADD(refCosts[list][idx1], (listamount * idx1weight + 512) >> 10);
+                            if (cux < m_8x8Width && cuy + 1 < m_8x8Height && cux >= 0 && cuy + 1 >= 0)
+                                CLIP_ADD(refCosts[list][idx2], (listamount * idx2weight + 512) >> 10);
+                            if (cux + 1 < m_8x8Width && cuy + 1 < m_8x8Height && cux + 1 >= 0 && cuy + 1 >= 0)
+                                CLIP_ADD(refCosts[list][idx3], (listamount * idx3weight + 512) >> 10);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    if (m_param->rc.vbvBufferSize && m_param->lookaheadDepth && referenced)
+        cuTreeFinish(frames[b], averageDuration, b == p1 ? b - p0 : 0);
+}
+
+void Lookahead::cuTreeFinish(Lowres *frame, double averageDuration, int ref0Distance)
+{
+    int fpsFactor = (int)(CLIP_DURATION(averageDuration) / CLIP_DURATION((double)m_param->fpsDenom / m_param->fpsNum) * 256);
+    double weightdelta = 0.0;
+
+    if (ref0Distance && frame->weightedCostDelta[ref0Distance - 1] > 0)
+        weightdelta = (1.0 - frame->weightedCostDelta[ref0Distance - 1]);
+
+    /* Allow the strength to be adjusted via qcompress, since the two concepts
+     * are very similar. */
+
+    int cuCount = m_8x8Width * m_8x8Height;
+    double strength = 5.0 * (1.0 - m_param->rc.qCompress);
+
+    for (int cuIndex = 0; cuIndex < cuCount; cuIndex++)
+    {
+        int intracost = (frame->intraCost[cuIndex] * frame->invQscaleFactor[cuIndex] + 128) >> 8;
+        if (intracost)
+        {
+            int propagateCost = (frame->propagateCost[cuIndex] * fpsFactor + 128) >> 8;
+            double log2_ratio = X265_LOG2(intracost + propagateCost) - X265_LOG2(intracost) + weightdelta;
+            frame->qpCuTreeOffset[cuIndex] = frame->qpAqOffset[cuIndex] - strength * log2_ratio;
+        }
+    }
+}
+
+/* If MB-tree changes the quantizers, we need to recalculate the frame cost without
+ * re-running lookahead. */
+int64_t Lookahead::frameCostRecalculate(Lowres** frames, int p0, int p1, int b)
+{
+    if (frames[b]->sliceType == X265_TYPE_B)
+        return frames[b]->costEstAq[b - p0][p1 - b];
+
+    int64_t score = 0;
+    int *rowSatd = frames[b]->rowSatds[b - p0][p1 - b];
+    double *qp_offset = frames[b]->qpCuTreeOffset;
+
+    x265_emms();
+    for (int cuy = m_8x8Height - 1; cuy >= 0; cuy--)
+    {
+        rowSatd[cuy] = 0;
+        for (int cux = m_8x8Width - 1; cux >= 0; cux--)
+        {
+            int cuxy = cux + cuy * m_8x8Width;
+            int cuCost = frames[b]->lowresCosts[b - p0][p1 - b][cuxy] & LOWRES_COST_MASK;
+            double qp_adj = qp_offset[cuxy];
+            cuCost = (cuCost * x265_exp2fix8(qp_adj) + 128) >> 8;
+            rowSatd[cuy] += cuCost;
+            if ((cuy > 0 && cuy < m_8x8Height - 1 &&
+                 cux > 0 && cux < m_8x8Width - 1) ||
+                m_8x8Width <= 2 || m_8x8Height <= 2)
+            {
+                score += cuCost;
+            }
+        }
+    }
+
+    return score;
+}
+
+
+int64_t CostEstimateGroup::singleCost(int p0, int p1, int b, bool intraPenalty)
+{
+    LookaheadTLD& tld = m_lookahead.m_tld[m_lookahead.m_pool ? m_lookahead.m_pool->m_numWorkers : 0];
+    return estimateFrameCost(tld, p0, p1, b, intraPenalty);
+}
+
+void CostEstimateGroup::add(int p0, int p1, int b)
+{
+    X265_CHECK(m_batchMode || !m_jobTotal, "single CostEstimateGroup instance cannot mix batch modes\n");
+    m_batchMode = true;
+
+    Estimate& e = m_estimates[m_jobTotal++];
+    e.p0 = p0;
+    e.p1 = p1;
+    e.b = b;
+
+    if (m_jobTotal == MAX_BATCH_SIZE)
+        finishBatch();
+}
+
+void CostEstimateGroup::finishBatch()
+{
+    if (m_lookahead.m_pool)
+        tryBondPeers(*m_lookahead.m_pool, m_jobTotal);
+    processTasks(-1);
+    waitForExit();
+    m_jobTotal = m_jobAcquired = 0;
+}
+
+void CostEstimateGroup::processTasks(int workerThreadID)
+{
+    ThreadPool* pool = m_lookahead.m_pool;
+    int id = workerThreadID;
+    if (workerThreadID < 0)
+        id = pool ? pool->m_numWorkers : 0;
+    LookaheadTLD& tld = m_lookahead.m_tld[id];
+
+    m_lock.acquire();
+    while (m_jobAcquired < m_jobTotal)
+    {
+        int i = m_jobAcquired++;
+        m_lock.release();
+
+        if (m_batchMode)
+        {
+            ProfileLookaheadTime(tld.batchElapsedTime, tld.countBatches);
+            ProfileScopeEvent(estCostSingle);
+
+            Estimate& e = m_estimates[i];
+            estimateFrameCost(tld, e.p0, e.p1, e.b, false);
+        }
+        else
+        {
+            ProfileLookaheadTime(tld.coopSliceElapsedTime, tld.countCoopSlices);
+            ProfileScopeEvent(estCostCoop);
+
+            X265_CHECK(i < MAX_COOP_SLICES, "impossible number of coop slices\n");
+
+            int firstY = m_lookahead.m_numRowsPerSlice * i;
+            int lastY = (i == m_jobTotal - 1) ? m_lookahead.m_8x8Height - 1 : m_lookahead.m_numRowsPerSlice * (i + 1) - 1;
+
+            bool lastRow = true;
+            for (int cuY = lastY; cuY >= firstY; cuY--)
+            {
+                m_frames[m_coop.b]->rowSatds[m_coop.b - m_coop.p0][m_coop.p1 - m_coop.b][cuY] = 0;
+
+                for (int cuX = m_lookahead.m_8x8Width - 1; cuX >= 0; cuX--)
+                    estimateCUCost(tld, cuX, cuY, m_coop.p0, m_coop.p1, m_coop.b, m_coop.bDoSearch, lastRow, i);
+
+                lastRow = false;
+            }
+        }
+
+        m_lock.acquire();
+    }
+    m_lock.release();
+}
+
+int64_t CostEstimateGroup::estimateFrameCost(LookaheadTLD& tld, int p0, int p1, int b, bool bIntraPenalty)
+{
+    Lowres*     fenc  = m_frames[b];
+    x265_param* param = m_lookahead.m_param;
+    int64_t     score = 0;
+
+    if (fenc->costEst[b - p0][p1 - b] >= 0 && fenc->rowSatds[b - p0][p1 - b][0] != -1)
+        score = fenc->costEst[b - p0][p1 - b];
+    else
+    {
+        X265_CHECK(p0 != b, "I frame estimates should always be pre-calculated\n");
+
+        bool bDoSearch[2];
+        bDoSearch[0] = p0 < b && fenc->lowresMvs[0][b - p0 - 1][0].x == 0x7FFF;
+        bDoSearch[1] = p1 > b && fenc->lowresMvs[1][p1 - b - 1][0].x == 0x7FFF;
+
+#if CHECKED_BUILD
+        X265_CHECK(!(p0 < b && fenc->lowresMvs[0][b - p0 - 1][0].x == 0x7FFE), "motion search batch duplication L0\n");
+        X265_CHECK(!(p1 > b && fenc->lowresMvs[1][p1 - b - 1][0].x == 0x7FFE), "motion search batch duplication L1\n");
+        if (bDoSearch[0]) fenc->lowresMvs[0][b - p0 - 1][0].x = 0x7FFE;
+        if (bDoSearch[1]) fenc->lowresMvs[1][p1 - b - 1][0].x = 0x7FFE;
+#endif
+
+        tld.weightedRef.isWeighted = false;
+        if (param->bEnableWeightedPred && bDoSearch[0])
+            tld.weightsAnalyse(*m_frames[b], *m_frames[p0]);
+
+        fenc->costEst[b - p0][p1 - b] = 0;
+        fenc->costEstAq[b - p0][p1 - b] = 0;
+
+        if (!m_batchMode && m_lookahead.m_numCoopSlices > 1 && ((p1 > b) || bDoSearch[0] || bDoSearch[1]))
+        {
+            /* Use cooperative mode if a thread pool is available and the cost estimate is
+             * going to need motion searches or bidir measurements */
+
+            memset(&m_slice, 0, sizeof(Slice) * m_lookahead.m_numCoopSlices);
+
+            m_lock.acquire();
+            X265_CHECK(!m_batchMode, "single CostEstimateGroup instance cannot mix batch modes\n");
+            m_coop.p0 = p0;
+            m_coop.p1 = p1;
+            m_coop.b = b;
+            m_coop.bDoSearch[0] = bDoSearch[0];
+            m_coop.bDoSearch[1] = bDoSearch[1];
+            m_jobTotal = m_lookahead.m_numCoopSlices;
+            m_jobAcquired = 0;
+            m_lock.release();
+
+            tryBondPeers(*m_lookahead.m_pool, m_jobTotal);
+
+            processTasks(-1);
+
+            waitForExit();
+
+            for (int i = 0; i < m_lookahead.m_numCoopSlices; i++)
+            {
+                fenc->costEst[b - p0][p1 - b] += m_slice[i].costEst;
+                fenc->costEstAq[b - p0][p1 - b] += m_slice[i].costEstAq;
+                if (p1 == b)
+                    fenc->intraMbs[b - p0] += m_slice[i].intraMbs;
+            }
+        }
+        else
+        {
+            bool lastRow = true;
+            for (int cuY = m_lookahead.m_8x8Height - 1; cuY >= 0; cuY--)
+            {
+                fenc->rowSatds[b - p0][p1 - b][cuY] = 0;
+
+                for (int cuX = m_lookahead.m_8x8Width - 1; cuX >= 0; cuX--)
+                    estimateCUCost(tld, cuX, cuY, p0, p1, b, bDoSearch, lastRow, -1);
+
+                lastRow = false;
+            }
+        }
+
+        score = fenc->costEst[b - p0][p1 - b];
+
+        if (b != p1)
+            score = score * 100 / (130 + param->bFrameBias);
+
+        fenc->costEst[b - p0][p1 - b] = score;
+    }
+
+    if (bIntraPenalty)
+        // arbitrary penalty for I-blocks after B-frames
+        score += score * fenc->intraMbs[b - p0] / (tld.ncu * 8);
+
+    return score;
+}
+
+void CostEstimateGroup::estimateCUCost(LookaheadTLD& tld, int cuX, int cuY, int p0, int p1, int b, bool bDoSearch[2], bool lastRow, int slice)
+{
+    Lowres *fref0 = m_frames[p0];
+    Lowres *fref1 = m_frames[p1];
+    Lowres *fenc  = m_frames[b];
+
+    ReferencePlanes *wfref0 = tld.weightedRef.isWeighted ? &tld.weightedRef : fref0;
+
+    const int widthInCU = m_lookahead.m_8x8Width;
+    const int heightInCU = m_lookahead.m_8x8Height;
+    const int bBidir = (b < p1);
+    const int cuXY = cuX + cuY * widthInCU;
+    const int cuSize = X265_LOWRES_CU_SIZE;
+    const intptr_t pelOffset = cuSize * cuX + cuSize * cuY * fenc->lumaStride;
+
+    if (bBidir || bDoSearch[0] || bDoSearch[1])
+        tld.me.setSourcePU(fenc->lowresPlane[0], fenc->lumaStride, pelOffset, cuSize, cuSize);
+
+    /* A small, arbitrary bias to avoid VBV problems caused by zero-residual lookahead blocks. */
+    int lowresPenalty = 4;
+    int listDist[2] = { b - p0 - 1, p1 - b - 1 };
+
+    MV mvmin, mvmax;
+    int bcost = tld.me.COST_MAX;
+    int listused = 0;
+
+    // establish search bounds that don't cross extended frame boundaries
+    mvmin.x = (int16_t)(-cuX * cuSize - 8);
+    mvmin.y = (int16_t)(-cuY * cuSize - 8);
+    mvmax.x = (int16_t)((widthInCU - cuX - 1) * cuSize + 8);
+    mvmax.y = (int16_t)((heightInCU - cuY - 1) * cuSize + 8);
+
+    for (int i = 0; i < 1 + bBidir; i++)
+    {
+        int& fencCost = fenc->lowresMvCosts[i][listDist[i]][cuXY];
+
+        if (!bDoSearch[i])
+        {
+            COPY2_IF_LT(bcost, fencCost, listused, i + 1);
+            continue;
+        }
+
+        int numc = 0;
+        MV mvc[4], mvp;
+        MV* fencMV = &fenc->lowresMvs[i][listDist[i]][cuXY];
+        ReferencePlanes* fref = i ? fref1 : wfref0;
+
+        /* Reverse-order MV prediction */
+#define MVC(mv) mvc[numc++] = mv;
+        if (cuX < widthInCU - 1)
+            MVC(fencMV[1]);
+        if (!lastRow)
+        {
+            MVC(fencMV[widthInCU]);
+            if (cuX > 0)
+                MVC(fencMV[widthInCU - 1]);
+            if (cuX < widthInCU - 1)
+                MVC(fencMV[widthInCU + 1]);
+        }
+#undef MVC
+
+        if (!numc)
+            mvp = 0;
+        else
+        {
+            ALIGN_VAR_32(pixel, subpelbuf[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE]);
+            int mvpcost = MotionEstimate::COST_MAX;
+
+            /* measure SATD cost of each neighbor MV (estimating merge analysis)
+             * and use the lowest cost MV as MVP (estimating AMVP). Since all
+             * mvc[] candidates are measured here, none are passed to motionEstimate */
+            for (int idx = 0; idx < numc; idx++)
+            {
+                intptr_t stride = X265_LOWRES_CU_SIZE;
+                pixel *src = fref->lowresMC(pelOffset, mvc[idx], subpelbuf, stride);
+                int cost = tld.me.bufSATD(src, stride);
+                COPY2_IF_LT(mvpcost, cost, mvp, mvc[idx]);
+            }
+        }
+
+        /* ME will never return a cost larger than the cost @MVP, so we do not
+         * have to check that ME cost is more than the estimated merge cost */
+        fencCost = tld.me.motionEstimate(fref, mvmin, mvmax, mvp, 0, NULL, s_merange, *fencMV);
+        COPY2_IF_LT(bcost, fencCost, listused, i + 1);
+    }
+
+    if (bBidir) /* B, also consider bidir */
+    {
+        /* NOTE: the wfref0 (weightp) is not used for BIDIR */
+
+        /* avg(l0-mv, l1-mv) candidate */
+        ALIGN_VAR_32(pixel, subpelbuf0[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE]);
+        ALIGN_VAR_32(pixel, subpelbuf1[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE]);
+        intptr_t stride0 = X265_LOWRES_CU_SIZE, stride1 = X265_LOWRES_CU_SIZE;
+        pixel *src0 = fref0->lowresMC(pelOffset, fenc->lowresMvs[0][listDist[0]][cuXY], subpelbuf0, stride0);
+        pixel *src1 = fref1->lowresMC(pelOffset, fenc->lowresMvs[1][listDist[1]][cuXY], subpelbuf1, stride1);
+
+        ALIGN_VAR_32(pixel, ref[X265_LOWRES_CU_SIZE * X265_LOWRES_CU_SIZE]);
+        primitives.pu[LUMA_8x8].pixelavg_pp(ref, X265_LOWRES_CU_SIZE, src0, stride0, src1, stride1, 32);
+        int bicost = tld.me.bufSATD(ref, X265_LOWRES_CU_SIZE);
+        COPY2_IF_LT(bcost, bicost, listused, 3);
+
+        /* coloc candidate */
+        src0 = fref0->lowresPlane[0] + pelOffset;
+        src1 = fref1->lowresPlane[0] + pelOffset;
+        primitives.pu[LUMA_8x8].pixelavg_pp(ref, X265_LOWRES_CU_SIZE, src0, fref0->lumaStride, src1, fref1->lumaStride, 32);
+        bicost = tld.me.bufSATD(ref, X265_LOWRES_CU_SIZE);
+        COPY2_IF_LT(bcost, bicost, listused, 3);
+
+        bcost += lowresPenalty;
+    }
+    else /* P, also consider intra */
+    {
+        bcost += lowresPenalty;
+
+        if (fenc->intraCost[cuXY] < bcost)
+        {
+            bcost = fenc->intraCost[cuXY];
+            listused = 0;
+        }
+    }
+
+    /* do not include edge blocks in the frame cost estimates, they are not very accurate */
+    const bool bFrameScoreCU = (cuX > 0 && cuX < widthInCU - 1 &&
+                                cuY > 0 && cuY < heightInCU - 1) || widthInCU <= 2 || heightInCU <= 2;
+
+    int bcostAq = (bFrameScoreCU && fenc->invQscaleFactor) ? ((bcost * fenc->invQscaleFactor[cuXY] + 128) >> 8) : bcost;
+
+    if (bFrameScoreCU)
+    {
+        if (slice < 0)
+        {
+            fenc->costEst[b - p0][p1 - b] += bcost;
+            fenc->costEstAq[b - p0][p1 - b] += bcostAq;
+            if (!listused && !bBidir)
+                fenc->intraMbs[b - p0]++;
+        }
+        else
+        {
+            m_slice[slice].costEst += bcost;
+            m_slice[slice].costEstAq += bcostAq;
+            if (!listused && !bBidir)
+                m_slice[slice].intraMbs++;
+        }
+    }
+
+    fenc->rowSatds[b - p0][p1 - b][cuY] += bcostAq;
+    fenc->lowresCosts[b - p0][p1 - b][cuXY] = (uint16_t)(X265_MIN(bcost, LOWRES_COST_MASK) | (listused << LOWRES_COST_SHIFT));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/slicetype.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,243 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_SLICETYPE_H
+#define X265_SLICETYPE_H
+
+#include "common.h"
+#include "slice.h"
+#include "motion.h"
+#include "piclist.h"
+#include "threadpool.h"
+
+namespace X265_NS {
+// private namespace
+
+struct Lowres;
+class Frame;
+class Lookahead;
+
+#define LOWRES_COST_MASK  ((1 << 14) - 1)
+#define LOWRES_COST_SHIFT 14
+
+/* Thread local data for lookahead tasks */
+struct LookaheadTLD
+{
+    MotionEstimate  me;
+    ReferencePlanes weightedRef;
+    pixel*          wbuffer[4];
+    int             widthInCU;
+    int             heightInCU;
+    int             ncu;
+    int             paddedLines;
+
+#if DETAILED_CU_STATS
+    int64_t         batchElapsedTime;
+    int64_t         coopSliceElapsedTime;
+    uint64_t        countBatches;
+    uint64_t        countCoopSlices;
+#endif
+
+    LookaheadTLD()
+    {
+        me.setQP(X265_LOOKAHEAD_QP);
+        me.init(X265_HEX_SEARCH, 1, X265_CSP_I400);
+        for (int i = 0; i < 4; i++)
+            wbuffer[i] = NULL;
+        widthInCU = heightInCU = ncu = paddedLines = 0;
+
+#if DETAILED_CU_STATS
+        batchElapsedTime = 0;
+        coopSliceElapsedTime = 0;
+        countBatches = 0;
+        countCoopSlices = 0;
+#endif
+    }
+
+    void init(int w, int h, int n)
+    {
+        widthInCU = w;
+        heightInCU = h;
+        ncu = n;
+    }
+
+    ~LookaheadTLD() { X265_FREE(wbuffer[0]); }
+
+    void calcAdaptiveQuantFrame(Frame *curFrame, x265_param* param);
+    void lowresIntraEstimate(Lowres& fenc);
+
+    void weightsAnalyse(Lowres& fenc, Lowres& ref);
+
+protected:
+
+    uint32_t acEnergyCu(Frame* curFrame, uint32_t blockX, uint32_t blockY, int csp);
+    uint32_t weightCostLuma(Lowres& fenc, Lowres& ref, WeightParam& wp);
+    bool     allocWeightedRef(Lowres& fenc);
+};
+
+class Lookahead : public JobProvider
+{
+public:
+
+    PicList       m_inputQueue;      // input pictures in order received
+    PicList       m_outputQueue;     // pictures to be encoded, in encode order
+    Lock          m_inputLock;
+    Lock          m_outputLock;
+
+    /* pre-lookahead */
+    int           m_fullQueueSize;
+    bool          m_isActive;
+    bool          m_sliceTypeBusy;
+    bool          m_bAdaptiveQuant;
+    bool          m_outputSignalRequired;
+    bool          m_bBatchMotionSearch;
+    bool          m_bBatchFrameCosts;
+    Event         m_outputSignal;
+
+    LookaheadTLD* m_tld;
+    x265_param*   m_param;
+    Lowres*       m_lastNonB;
+    int*          m_scratch;         // temp buffer for cutree propagate
+    
+    int           m_histogram[X265_BFRAME_MAX + 1];
+    int           m_lastKeyframe;
+    int           m_8x8Width;
+    int           m_8x8Height;
+    int           m_8x8Blocks;
+    int           m_numCoopSlices;
+    int           m_numRowsPerSlice;
+    bool          m_filled;
+    bool          m_isSceneTransition;
+    Lookahead(x265_param *param, ThreadPool *pool);
+
+#if DETAILED_CU_STATS
+    int64_t       m_slicetypeDecideElapsedTime;
+    int64_t       m_preLookaheadElapsedTime;
+    uint64_t      m_countSlicetypeDecide;
+    uint64_t      m_countPreLookahead;
+    void          getWorkerStats(int64_t& batchElapsedTime, uint64_t& batchCount, int64_t& coopSliceElapsedTime, uint64_t& coopSliceCount);
+#endif
+
+    bool    create();
+    void    destroy();
+    void    stopJobs();
+
+    void    addPicture(Frame&, int sliceType);
+    void    flush();
+    Frame*  getDecidedPicture();
+
+    void    getEstimatedPictureCost(Frame *pic);
+
+
+protected:
+
+    void    findJob(int workerThreadID);
+    void    slicetypeDecide();
+    void    slicetypeAnalyse(Lowres **frames, bool bKeyframe);
+
+    /* called by slicetypeAnalyse() to make slice decisions */
+    bool    scenecut(Lowres **frames, int p0, int p1, bool bRealScenecut, int numFrames);
+    bool    scenecutInternal(Lowres **frames, int p0, int p1, bool bRealScenecut);
+    void    slicetypePath(Lowres **frames, int length, char(*best_paths)[X265_LOOKAHEAD_MAX + 1]);
+    int64_t slicetypePathCost(Lowres **frames, char *path, int64_t threshold);
+    int64_t vbvFrameCost(Lowres **frames, int p0, int p1, int b);
+    void    vbvLookahead(Lowres **frames, int numFrames, int keyframes);
+
+    /* called by slicetypeAnalyse() to effect cuTree adjustments to adaptive
+     * quant offsets */
+    void    cuTree(Lowres **frames, int numframes, bool bintra);
+    void    estimateCUPropagate(Lowres **frames, double average_duration, int p0, int p1, int b, int referenced);
+    void    cuTreeFinish(Lowres *frame, double averageDuration, int ref0Distance);
+
+    /* called by getEstimatedPictureCost() to finalize cuTree costs */
+    int64_t frameCostRecalculate(Lowres **frames, int p0, int p1, int b);
+};
+
+class PreLookaheadGroup : public BondedTaskGroup
+{
+public:
+
+    Frame* m_preframes[X265_LOOKAHEAD_MAX];
+    Lookahead& m_lookahead;
+
+    PreLookaheadGroup(Lookahead& l) : m_lookahead(l) {}
+
+    void processTasks(int workerThreadID);
+
+protected:
+
+    PreLookaheadGroup& operator=(const PreLookaheadGroup&);
+};
+
+class CostEstimateGroup : public BondedTaskGroup
+{
+public:
+
+    Lookahead& m_lookahead;
+    Lowres**   m_frames;
+    bool       m_batchMode;
+
+    CostEstimateGroup(Lookahead& l, Lowres** f) : m_lookahead(l), m_frames(f), m_batchMode(false) {}
+
+    /* Cooperative cost estimate using multiple slices of downscaled frame */
+    struct Coop
+    {
+        int  p0, b, p1;
+        bool bDoSearch[2];
+    } m_coop;
+
+    enum { MAX_COOP_SLICES = 32 };
+    struct Slice
+    {
+        int  costEst;
+        int  costEstAq;
+        int  intraMbs;
+    } m_slice[MAX_COOP_SLICES];
+
+    int64_t singleCost(int p0, int p1, int b, bool intraPenalty = false);
+
+    /* Batch cost estimates, using one worker thread per estimateFrameCost() call */
+    enum { MAX_BATCH_SIZE = 512 };
+    struct Estimate
+    {
+        int  p0, b, p1;
+    } m_estimates[MAX_BATCH_SIZE];
+
+    void add(int p0, int p1, int b);
+    void finishBatch();
+
+protected:
+
+    static const int s_merange = 16;
+
+    void    processTasks(int workerThreadID);
+
+    int64_t estimateFrameCost(LookaheadTLD& tld, int p0, int p1, int b, bool intraPenalty);
+    void    estimateCUCost(LookaheadTLD& tld, int cux, int cuy, int p0, int p1, int b, bool bDoSearch[2], bool lastRow, int slice);
+
+    CostEstimateGroup& operator=(const CostEstimateGroup&);
+};
+
+}
+
+#endif // ifndef X265_SLICETYPE_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/encoder/weightPrediction.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,536 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Author: Shazeb Nawaz Khan <shazeb@multicorewareinc.com>
+ *         Steve Borho <steve@borho.org>
+ *         Kavitha Sampas <kavitha@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "frame.h"
+#include "picyuv.h"
+#include "lowres.h"
+#include "slice.h"
+#include "mv.h"
+#include "bitstream.h"
+
+using namespace X265_NS;
+namespace {
+struct Cache
+{
+    const int * intraCost;
+    int         numPredDir;
+    int         csp;
+    int         hshift;
+    int         vshift;
+    int         lowresWidthInCU;
+    int         lowresHeightInCU;
+};
+
+int sliceHeaderCost(WeightParam *w, int lambda, int bChroma)
+{
+    /* 4 times higher, because chroma is analyzed at full resolution. */
+    if (bChroma)
+        lambda *= 4;
+    int denomCost = bs_size_ue(w[0].log2WeightDenom) * (2 - bChroma);
+    return lambda * (10 + denomCost + 2 * (bs_size_se(w[0].inputWeight) + bs_size_se(w[0].inputOffset)));
+}
+
+/* make a motion compensated copy of lowres ref into mcout with the same stride.
+ * The borders of mcout are not extended */
+void mcLuma(pixel* mcout, Lowres& ref, const MV * mvs)
+{
+    intptr_t stride = ref.lumaStride;
+    const int mvshift = 1 << 2;
+    const int cuSize = 8;
+    MV mvmin, mvmax;
+
+    int cu = 0;
+
+    for (int y = 0; y < ref.lines; y += cuSize)
+    {
+        intptr_t pixoff = y * stride;
+        mvmin.y = (int16_t)((-y - 8) * mvshift);
+        mvmax.y = (int16_t)((ref.lines - y - 1 + 8) * mvshift);
+
+        for (int x = 0; x < ref.width; x += cuSize, pixoff += cuSize, cu++)
+        {
+            ALIGN_VAR_16(pixel, buf8x8[8 * 8]);
+            intptr_t bstride = 8;
+            mvmin.x = (int16_t)((-x - 8) * mvshift);
+            mvmax.x = (int16_t)((ref.width - x - 1 + 8) * mvshift);
+
+            /* clip MV to available pixels */
+            MV mv = mvs[cu];
+            mv = mv.clipped(mvmin, mvmax);
+            pixel *tmp = ref.lowresMC(pixoff, mv, buf8x8, bstride);
+            primitives.cu[BLOCK_8x8].copy_pp(mcout + pixoff, stride, tmp, bstride);
+        }
+    }
+}
+
+/* use lowres MVs from lookahead to generate a motion compensated chroma plane.
+ * if a block had cheaper lowres cost as intra, we treat it as MV 0 */
+void mcChroma(pixel *      mcout,
+              pixel *      src,
+              intptr_t     stride,
+              const MV *   mvs,
+              const Cache& cache,
+              int          height,
+              int          width)
+{
+    /* the motion vectors correspond to 8x8 lowres luma blocks, or 16x16 fullres
+     * luma blocks. We have to adapt block size to chroma csp */
+    int csp = cache.csp;
+    int bw = 16 >> cache.hshift;
+    int bh = 16 >> cache.vshift;
+    const int mvshift = 1 << 2;
+    MV mvmin, mvmax;
+
+    for (int y = 0; y < height; y += bh)
+    {
+        /* note: lowres block count per row might be different from chroma block
+         * count per row because of rounding issues, so be very careful with indexing
+         * into the lowres structures */
+        int cu = y * cache.lowresWidthInCU;
+        intptr_t pixoff = y * stride;
+        mvmin.y = (int16_t)((-y - 8) * mvshift);
+        mvmax.y = (int16_t)((height - y - 1 + 8) * mvshift);
+
+        for (int x = 0; x < width; x += bw, cu++, pixoff += bw)
+        {
+            if (x < cache.lowresWidthInCU && y < cache.lowresHeightInCU)
+            {
+                MV mv = mvs[cu]; // lowres MV
+                mv <<= 1;        // fullres MV
+                mv.x >>= cache.hshift;
+                mv.y >>= cache.vshift;
+
+                /* clip MV to available pixels */
+                mvmin.x = (int16_t)((-x - 8) * mvshift);
+                mvmax.x = (int16_t)((width - x - 1 + 8) * mvshift);
+                mv = mv.clipped(mvmin, mvmax);
+
+                intptr_t fpeloffset = (mv.y >> 2) * stride + (mv.x >> 2);
+                pixel *temp = src + pixoff + fpeloffset;
+
+                int xFrac = mv.x & 0x7;
+                int yFrac = mv.y & 0x7;
+                if ((yFrac | xFrac) == 0)
+                {
+                    primitives.chroma[csp].pu[LUMA_16x16].copy_pp(mcout + pixoff, stride, temp, stride);
+                }
+                else if (yFrac == 0)
+                {
+                    primitives.chroma[csp].pu[LUMA_16x16].filter_hpp(temp, stride, mcout + pixoff, stride, xFrac);
+                }
+                else if (xFrac == 0)
+                {
+                    primitives.chroma[csp].pu[LUMA_16x16].filter_vpp(temp, stride, mcout + pixoff, stride, yFrac);
+                }
+                else
+                {
+                    ALIGN_VAR_16(int16_t, imm[16 * (16 + NTAPS_CHROMA)]);
+                    primitives.chroma[csp].pu[LUMA_16x16].filter_hps(temp, stride, imm, bw, xFrac, 1);
+                    primitives.chroma[csp].pu[LUMA_16x16].filter_vsp(imm + ((NTAPS_CHROMA >> 1) - 1) * bw, bw, mcout + pixoff, stride, yFrac);
+                }
+            }
+            else
+            {
+                primitives.chroma[csp].pu[LUMA_16x16].copy_pp(mcout + pixoff, stride, src + pixoff, stride);
+            }
+        }
+    }
+}
+
+/* Measure sum of 8x8 satd costs between source frame and reference
+ * frame (potentially weighted, potentially motion compensated). We
+ * always use source images for this analysis since reference recon
+ * pixels have unreliable availability */
+uint32_t weightCost(pixel *         fenc,
+                    pixel *         ref,
+                    pixel *         weightTemp,
+                    intptr_t        stride,
+                    const Cache &   cache,
+                    int             width,
+                    int             height,
+                    WeightParam *   w,
+                    bool            bLuma)
+{
+    if (w)
+    {
+        /* make a weighted copy of the reference plane */
+        int offset = w->inputOffset << (X265_DEPTH - 8);
+        int weight = w->inputWeight;
+        int denom = w->log2WeightDenom;
+        int round = denom ? 1 << (denom - 1) : 0;
+        int correction = IF_INTERNAL_PREC - X265_DEPTH; /* intermediate interpolation depth */
+        int pwidth = ((width + 15) >> 4) << 4;
+
+        primitives.weight_pp(ref, weightTemp, stride, pwidth, height,
+                             weight, round << correction, denom + correction, offset);
+        ref = weightTemp;
+    }
+
+    uint32_t cost = 0;
+    pixel *f = fenc, *r = ref;
+
+    if (bLuma)
+    {
+        int cu = 0;
+        for (int y = 0; y < height; y += 8, r += 8 * stride, f += 8 * stride)
+        {
+            for (int x = 0; x < width; x += 8, cu++)
+            {
+                int cmp = primitives.pu[LUMA_8x8].satd(r + x, stride, f + x, stride);
+                cost += X265_MIN(cmp, cache.intraCost[cu]);
+            }
+        }
+    }
+    else if (cache.csp == X265_CSP_I444)
+        for (int y = 0; y < height; y += 16, r += 16 * stride, f += 16 * stride)
+            for (int x = 0; x < width; x += 16)
+                cost += primitives.pu[LUMA_16x16].satd(r + x, stride, f + x, stride);
+    else
+        for (int y = 0; y < height; y += 8, r += 8 * stride, f += 8 * stride)
+            for (int x = 0; x < width; x += 8)
+                cost += primitives.pu[LUMA_8x8].satd(r + x, stride, f + x, stride);
+
+    return cost;
+}
+}
+
+namespace X265_NS {
+void weightAnalyse(Slice& slice, Frame& frame, x265_param& param)
+{
+    WeightParam wp[2][MAX_NUM_REF][3];
+    PicYuv *fencPic = frame.m_fencPic;
+    Lowres& fenc    = frame.m_lowres;
+
+    Cache cache;
+
+    memset(&cache, 0, sizeof(cache));
+    cache.intraCost = fenc.intraCost;
+    cache.numPredDir = slice.isInterP() ? 1 : 2;
+    cache.lowresWidthInCU = fenc.width >> 3;
+    cache.lowresHeightInCU = fenc.lines >> 3;
+    cache.csp = fencPic->m_picCsp;
+    cache.hshift = CHROMA_H_SHIFT(cache.csp);
+    cache.vshift = CHROMA_V_SHIFT(cache.csp);
+
+    /* Use single allocation for motion compensated ref and weight buffers */
+    pixel *mcbuf = X265_MALLOC(pixel, 2 * fencPic->m_stride * fencPic->m_picHeight);
+    if (!mcbuf)
+    {
+        slice.disableWeights();
+        return;
+    }
+    pixel *weightTemp = mcbuf + fencPic->m_stride * fencPic->m_picHeight;
+
+    int lambda = (int)x265_lambda_tab[X265_LOOKAHEAD_QP];
+    int curPoc = slice.m_poc;
+    const float epsilon = 1.f / 128.f;
+
+    int chromaDenom, lumaDenom, denom;
+    chromaDenom = lumaDenom = 7;
+    int numpixels[3];
+    int w16 = ((fencPic->m_picWidth  + 15) >> 4) << 4;
+    int h16 = ((fencPic->m_picHeight + 15) >> 4) << 4;
+    numpixels[0] = w16 * h16;
+    numpixels[1] = numpixels[2] = numpixels[0] >> (cache.hshift + cache.vshift);
+
+    for (int list = 0; list < cache.numPredDir; list++)
+    {
+        WeightParam *weights = wp[list][0];
+        Frame *refFrame = slice.m_refFrameList[list][0];
+        Lowres& refLowres = refFrame->m_lowres;
+        int diffPoc = abs(curPoc - refFrame->m_poc);
+
+        /* prepare estimates */
+        float guessScale[3], fencMean[3], refMean[3];
+        for (int plane = 0; plane < 3; plane++)
+        {
+            SET_WEIGHT(weights[plane], false, 1, 0, 0);
+            uint64_t fencVar = fenc.wp_ssd[plane] + !refLowres.wp_ssd[plane];
+            uint64_t refVar  = refLowres.wp_ssd[plane] + !refLowres.wp_ssd[plane];
+            guessScale[plane] = sqrt((float)fencVar / refVar);
+            fencMean[plane] = (float)fenc.wp_sum[plane] / (numpixels[plane]) / (1 << (X265_DEPTH - 8));
+            refMean[plane]  = (float)refLowres.wp_sum[plane] / (numpixels[plane]) / (1 << (X265_DEPTH - 8));
+        }
+
+        /* make sure both our scale factors fit */
+        while (!list && chromaDenom > 0)
+        {
+            float thresh = 127.f / (1 << chromaDenom);
+            if (guessScale[1] < thresh && guessScale[2] < thresh)
+                break;
+            chromaDenom--;
+        }
+
+        SET_WEIGHT(weights[1], false, 1 << chromaDenom, chromaDenom, 0);
+        SET_WEIGHT(weights[2], false, 1 << chromaDenom, chromaDenom, 0);
+
+        MV *mvs = NULL;
+
+        for (int plane = 0; plane < 3; plane++)
+        {
+            denom = plane ? chromaDenom : lumaDenom;
+            if (plane && !weights[0].bPresentFlag)
+                break;
+
+            /* Early termination */
+            x265_emms();
+            if (fabsf(refMean[plane] - fencMean[plane]) < 0.5f && fabsf(1.f - guessScale[plane]) < epsilon)
+            {
+                SET_WEIGHT(weights[plane], 0, 1 << denom, denom, 0);
+                continue;
+            }
+
+            if (plane)
+            {
+                int scale = x265_clip3(0, 255, (int)(guessScale[plane] * (1 << denom) + 0.5f));
+                if (scale > 127)
+                    continue;
+                weights[plane].inputWeight = scale;
+            }
+            else
+            {
+                weights[plane].setFromWeightAndOffset((int)(guessScale[plane] * (1 << denom) + 0.5f), 0, denom, !list);
+            }
+
+            int mindenom = weights[plane].log2WeightDenom;
+            int minscale = weights[plane].inputWeight;
+            int minoff = 0;
+
+            if (!plane && diffPoc <= param.bframes + 1)
+            {
+                mvs = fenc.lowresMvs[list][diffPoc - 1];
+
+                /* test whether this motion search was performed by lookahead */
+                if (mvs[0].x != 0x7FFF)
+                {
+                    /* reference chroma planes must be extended prior to being
+                     * used as motion compensation sources */
+                    if (!refFrame->m_bChromaExtended)
+                    {
+                        refFrame->m_bChromaExtended = true;
+                        PicYuv *refPic = refFrame->m_fencPic;
+                        int width = refPic->m_picWidth >> cache.hshift;
+                        int height = refPic->m_picHeight >> cache.vshift;
+                        extendPicBorder(refPic->m_picOrg[1], refPic->m_strideC, width, height, refPic->m_chromaMarginX, refPic->m_chromaMarginY);
+                        extendPicBorder(refPic->m_picOrg[2], refPic->m_strideC, width, height, refPic->m_chromaMarginX, refPic->m_chromaMarginY);
+                    }
+                }
+                else
+                    mvs = 0;
+            }
+
+            /* prepare inputs to weight analysis */
+            pixel *orig;
+            pixel *fref;
+            intptr_t stride;
+            int    width, height;
+            switch (plane)
+            {
+            case 0:
+                orig = fenc.lowresPlane[0];
+                stride = fenc.lumaStride;
+                width = fenc.width;
+                height = fenc.lines;
+                fref = refLowres.lowresPlane[0];
+                if (mvs)
+                {
+                    mcLuma(mcbuf, refLowres, mvs);
+                    fref = mcbuf;
+                }
+                break;
+
+            case 1:
+                orig = fencPic->m_picOrg[1];
+                stride = fencPic->m_strideC;
+                fref = refFrame->m_fencPic->m_picOrg[1];
+
+                /* Clamp the chroma dimensions to the nearest multiple of
+                 * 8x8 blocks (or 16x16 for 4:4:4) since mcChroma uses lowres
+                 * blocks and weightCost measures 8x8 blocks. This
+                 * potentially ignores some edge pixels, but simplifies the
+                 * logic and prevents reading uninitialized pixels. Lowres
+                 * planes are border extended and require no clamping. */
+                width =  ((fencPic->m_picWidth  >> 4) << 4) >> cache.hshift;
+                height = ((fencPic->m_picHeight >> 4) << 4) >> cache.vshift;
+                if (mvs)
+                {
+                    mcChroma(mcbuf, fref, stride, mvs, cache, height, width);
+                    fref = mcbuf;
+                }
+                break;
+
+            case 2:
+                orig = fencPic->m_picOrg[2];
+                stride = fencPic->m_strideC;
+                fref = refFrame->m_fencPic->m_picOrg[2];
+                width =  ((fencPic->m_picWidth  >> 4) << 4) >> cache.hshift;
+                height = ((fencPic->m_picHeight >> 4) << 4) >> cache.vshift;
+                if (mvs)
+                {
+                    mcChroma(mcbuf, fref, stride, mvs, cache, height, width);
+                    fref = mcbuf;
+                }
+                break;
+
+            default:
+                slice.disableWeights();
+                X265_FREE(mcbuf);
+                return;
+            }
+
+            uint32_t origscore = weightCost(orig, fref, weightTemp, stride, cache, width, height, NULL, !plane);
+            if (!origscore)
+            {
+                SET_WEIGHT(weights[plane], 0, 1 << denom, denom, 0);
+                continue;
+            }
+
+            uint32_t minscore = origscore;
+            bool bFound = false;
+
+            /* x264 uses a table lookup here, selecting search range based on preset */
+            static const int scaleDist = 4;
+            static const int offsetDist = 2;
+
+            int startScale = x265_clip3(0, 127, minscale - scaleDist);
+            int endScale   = x265_clip3(0, 127, minscale + scaleDist);
+            for (int scale = startScale; scale <= endScale; scale++)
+            {
+                int deltaWeight = scale - (1 << mindenom);
+                if (deltaWeight > 127 || deltaWeight <= -128)
+                    continue;
+
+                x265_emms();
+                int curScale = scale;
+                int curOffset = (int)(fencMean[plane] - refMean[plane] * curScale / (1 << mindenom) + 0.5f);
+                if (curOffset < -128 || curOffset > 127)
+                {
+                    /* Rescale considering the constraints on curOffset. We do it in this order
+                     * because scale has a much wider range than offset (because of denom), so
+                     * it should almost never need to be clamped. */
+                    curOffset = x265_clip3(-128, 127, curOffset);
+                    curScale = (int)((1 << mindenom) * (fencMean[plane] - curOffset) / refMean[plane] + 0.5f);
+                    curScale = x265_clip3(0, 127, curScale);
+                }
+
+                int startOffset = x265_clip3(-128, 127, curOffset - offsetDist);
+                int endOffset   = x265_clip3(-128, 127, curOffset + offsetDist);
+                for (int off = startOffset; off <= endOffset; off++)
+                {
+                    WeightParam wsp;
+                    SET_WEIGHT(wsp, true, curScale, mindenom, off);
+                    uint32_t s = weightCost(orig, fref, weightTemp, stride, cache, width, height, &wsp, !plane) +
+                                 sliceHeaderCost(&wsp, lambda, !!plane);
+                    COPY4_IF_LT(minscore, s, minscale, curScale, minoff, off, bFound, true);
+
+                    /* Don't check any more offsets if the previous one had a lower cost than the current one */
+                    if (minoff == startOffset && off != startOffset)
+                        break;
+                }
+            }
+
+            /* Use a smaller luma denominator if possible */
+            if (!(plane || list))
+            {
+                while (mindenom > 0 && !(minscale & 1))
+                {
+                    mindenom--;
+                    minscale >>= 1;
+                }
+            }
+
+            if (!bFound || (minscale == (1 << mindenom) && minoff == 0) || (float)minscore / origscore > 0.998f)
+            {
+                SET_WEIGHT(weights[plane], false, 1 << denom, denom, 0);
+            }
+            else
+            {
+                SET_WEIGHT(weights[plane], true, minscale, mindenom, minoff);
+            }
+        }
+
+        if (weights[0].bPresentFlag)
+        {
+            // Make sure both chroma channels match
+            if (weights[1].bPresentFlag != weights[2].bPresentFlag)
+            {
+                if (weights[1].bPresentFlag)
+                    weights[2] = weights[1];
+                else
+                    weights[1] = weights[2];
+            }
+        }
+
+        lumaDenom = weights[0].log2WeightDenom;
+        chromaDenom = weights[1].log2WeightDenom;
+
+        /* reset weight states */
+        for (int ref = 1; ref < slice.m_numRefIdx[list]; ref++)
+        {
+            SET_WEIGHT(wp[list][ref][0], false, 1 << lumaDenom, lumaDenom, 0);
+            SET_WEIGHT(wp[list][ref][1], false, 1 << chromaDenom, chromaDenom, 0);
+            SET_WEIGHT(wp[list][ref][2], false, 1 << chromaDenom, chromaDenom, 0);
+        }
+    }
+
+    X265_FREE(mcbuf);
+
+    memcpy(slice.m_weightPredTable, wp, sizeof(WeightParam) * 2 * MAX_NUM_REF * 3);
+
+    if (param.logLevel >= X265_LOG_FULL)
+    {
+        char buf[1024];
+        int p = 0;
+        bool bWeighted = false;
+
+        p = sprintf(buf, "poc: %d weights:", slice.m_poc);
+        int numPredDir = slice.isInterP() ? 1 : 2;
+        for (int list = 0; list < numPredDir; list++)
+        {
+            WeightParam* w = &wp[list][0][0];
+            if (w[0].bPresentFlag || w[1].bPresentFlag || w[2].bPresentFlag)
+            {
+                bWeighted = true;
+                p += sprintf(buf + p, " [L%d:R0 ", list);
+                if (w[0].bPresentFlag)
+                    p += sprintf(buf + p, "Y{%d/%d%+d}", w[0].inputWeight, 1 << w[0].log2WeightDenom, w[0].inputOffset);
+                if (w[1].bPresentFlag)
+                    p += sprintf(buf + p, "U{%d/%d%+d}", w[1].inputWeight, 1 << w[1].log2WeightDenom, w[1].inputOffset);
+                if (w[2].bPresentFlag)
+                    p += sprintf(buf + p, "V{%d/%d%+d}", w[2].inputWeight, 1 << w[2].log2WeightDenom, w[2].inputOffset);
+                p += sprintf(buf + p, "]");
+            }
+        }
+
+        if (bWeighted)
+        {
+            if (p < 80) // pad with spaces to ensure progress line overwritten
+                sprintf(buf + p, "%*s", 80 - p, " ");
+            x265_log(&param, X265_LOG_FULL, "%s\n", buf);
+        }
+    }
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/input.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,38 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "input.h"
+#include "yuv.h"
+#include "y4m.h"
+
+using namespace X265_NS;
+
+InputFile* InputFile::open(InputFileInfo& info, bool bForceY4m)
+{
+    const char * s = strrchr(info.filename, '.');
+
+    if (bForceY4m || (s && !strcmp(s, ".y4m")))
+        return new Y4MInput(info);
+    else
+        return new YUVInput(info);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/input.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,85 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_INPUT_H
+#define X265_INPUT_H
+
+#define MIN_FRAME_WIDTH 64
+#define MAX_FRAME_WIDTH 8192
+#define MIN_FRAME_HEIGHT 64
+#define MAX_FRAME_HEIGHT 4320
+#define MIN_FRAME_RATE 1
+#define MAX_FRAME_RATE 300
+
+#include "common.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+struct InputFileInfo
+{
+    /* possibly user-supplied, possibly read from file header */
+    int width;
+    int height;
+    int csp;
+    int depth;
+    int fpsNum;
+    int fpsDenom;
+    int sarWidth;
+    int sarHeight;
+    int frameCount;
+    int timebaseNum;
+    int timebaseDenom;
+
+    /* user supplied */
+    int skipFrames;
+    const char *filename;
+};
+
+class InputFile
+{
+protected:
+
+    virtual ~InputFile()  {}
+
+public:
+
+    InputFile()           {}
+
+    static InputFile* open(InputFileInfo& info, bool bForceY4m);
+
+    virtual void startReader() = 0;
+
+    virtual void release() = 0;
+
+    virtual bool readPicture(x265_picture& pic) = 0;
+
+    virtual bool isEof() const = 0;
+
+    virtual bool isFail() = 0;
+
+    virtual const char *getName() const = 0;
+};
+}
+
+#endif // ifndef X265_INPUT_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/y4m.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,433 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "y4m.h"
+#include "common.h"
+
+#include <iostream>
+
+#define ENABLE_THREADING 1
+
+#if _WIN32
+#include <io.h>
+#include <fcntl.h>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4996) // POSIX setmode and fileno deprecated
+#endif
+#endif
+
+using namespace X265_NS;
+using namespace std;
+
+static const char header[] = "FRAME";
+
+Y4MInput::Y4MInput(InputFileInfo& info)
+{
+    for (int i = 0; i < QUEUE_SIZE; i++)
+        buf[i] = NULL;
+
+    threadActive = false;
+    colorSpace = info.csp;
+    sarWidth = info.sarWidth;
+    sarHeight = info.sarHeight;
+    width = info.width;
+    height = info.height;
+    rateNum = info.fpsNum;
+    rateDenom = info.fpsDenom;
+    depth = info.depth;
+    framesize = 0;
+
+    ifs = NULL;
+    if (!strcmp(info.filename, "-"))
+    {
+        ifs = &cin;
+#if _WIN32
+        setmode(fileno(stdin), O_BINARY);
+#endif
+    }
+    else
+        ifs = new ifstream(info.filename, ios::binary | ios::in);
+
+    if (ifs && ifs->good() && parseHeader())
+    {
+        int pixelbytes = depth > 8 ? 2 : 1;
+        for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        {
+            int stride = (width >> x265_cli_csps[colorSpace].width[i]) * pixelbytes;
+            framesize += (stride * (height >> x265_cli_csps[colorSpace].height[i]));
+        }
+
+        threadActive = true;
+        for (int q = 0; q < QUEUE_SIZE; q++)
+        {
+            buf[q] = X265_MALLOC(char, framesize);
+            if (!buf[q])
+            {
+                x265_log(NULL, X265_LOG_ERROR, "y4m: buffer allocation failure, aborting");
+                threadActive = false;
+                break;
+            }
+        }
+    }
+    if (!threadActive)
+    {
+        if (ifs && ifs != &cin)
+            delete ifs;
+        ifs = NULL;
+        return;
+    }
+
+    info.width = width;
+    info.height = height;
+    info.sarHeight = sarHeight;
+    info.sarWidth = sarWidth;
+    info.fpsNum = rateNum;
+    info.fpsDenom = rateDenom;
+    info.csp = colorSpace;
+    info.depth = depth;
+    info.frameCount = -1;
+
+    size_t estFrameSize = framesize + strlen(header) + 1; /* assume basic FRAME\n headers */
+
+    /* try to estimate frame count, if this is not stdin */
+    if (ifs != &cin)
+    {
+        istream::pos_type cur = ifs->tellg();
+
+#if defined(_MSC_VER) && _MSC_VER < 1700
+        /* Older MSVC versions cannot handle 64bit file sizes properly, so go native */
+        HANDLE hFile = CreateFileA(info.filename, GENERIC_READ,
+                                   FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING,
+                                   FILE_ATTRIBUTE_NORMAL, NULL);
+        if (hFile != INVALID_HANDLE_VALUE)
+        {
+            LARGE_INTEGER size;
+            if (GetFileSizeEx(hFile, &size))
+                info.frameCount = (int)((size.QuadPart - (int64_t)cur) / estFrameSize);
+            CloseHandle(hFile);
+        }
+#else // if defined(_MSC_VER) && _MSC_VER < 1700
+        if (cur >= 0)
+        {
+            ifs->seekg(0, ios::end);
+            istream::pos_type size = ifs->tellg();
+            ifs->seekg(cur, ios::beg);
+            if (size > 0)
+                info.frameCount = (int)((size - cur) / estFrameSize);
+        }
+#endif // if defined(_MSC_VER) && _MSC_VER < 1700
+    }
+
+    if (info.skipFrames)
+    {
+#if X86_64
+        ifs->seekg((uint64_t)estFrameSize * info.skipFrames, ios::cur);
+#else
+        for (int i = 0; i < info.skipFrames; i++)
+            ifs->ignore(estFrameSize);
+#endif
+    }
+}
+
+Y4MInput::~Y4MInput()
+{
+    if (ifs && ifs != &cin)
+        delete ifs;
+
+    for (int i = 0; i < QUEUE_SIZE; i++)
+        X265_FREE(buf[i]);
+}
+
+void Y4MInput::release()
+{
+    threadActive = false;
+    readCount.poke();
+    stop();
+    delete this;
+}
+
+bool Y4MInput::parseHeader()
+{
+    if (!ifs)
+        return false;
+
+    int csp = 0;
+    int d = 0;
+
+    while (ifs->good())
+    {
+        // Skip Y4MPEG string
+        int c = ifs->get();
+        while (ifs->good() && (c != ' ') && (c != '\n'))
+            c = ifs->get();
+
+        while (c == ' ' && ifs->good())
+        {
+            // read parameter identifier
+            switch (ifs->get())
+            {
+            case 'W':
+                width = 0;
+                while (ifs->good())
+                {
+                    c = ifs->get();
+
+                    if (c == ' ' || c == '\n')
+                        break;
+                    else
+                        width = width * 10 + (c - '0');
+                }
+                break;
+
+            case 'H':
+                height = 0;
+                while (ifs->good())
+                {
+                    c = ifs->get();
+                    if (c == ' ' || c == '\n')
+                        break;
+                    else
+                        height = height * 10 + (c - '0');
+                }
+                break;
+
+            case 'F':
+                rateNum = 0;
+                rateDenom = 0;
+                while (ifs->good())
+                {
+                    c = ifs->get();
+                    if (c == '.')
+                    {
+                        rateDenom = 1;
+                        while (ifs->good())
+                        {
+                            c = ifs->get();
+                            if (c == ' ' || c == '\n')
+                                break;
+                            else
+                            {
+                                rateNum = rateNum * 10 + (c - '0');
+                                rateDenom = rateDenom * 10;
+                            }
+                        }
+                        break;
+                    }
+                    else if (c == ':')
+                    {
+                        while (ifs->good())
+                        {
+                            c = ifs->get();
+                            if (c == ' ' || c == '\n')
+                                break;
+                            else
+                                rateDenom = rateDenom * 10 + (c - '0');
+                        }
+                        break;
+                    }
+                    else
+                        rateNum = rateNum * 10 + (c - '0');
+                }
+                break;
+
+            case 'A':
+                sarWidth = 0;
+                sarHeight = 0;
+                while (ifs->good())
+                {
+                    c = ifs->get();
+                    if (c == ':')
+                    {
+                        while (ifs->good())
+                        {
+                            c = ifs->get();
+                            if (c == ' ' || c == '\n')
+                                break;
+                            else
+                                sarHeight = sarHeight * 10 + (c - '0');
+                        }
+                        break;
+                    }
+                    else
+                        sarWidth = sarWidth * 10 + (c - '0');
+                }
+                break;
+
+            case 'C':
+                csp = 0;
+                d = 0;
+                while (ifs->good())
+                {
+                    c = ifs->get();
+
+                    if (c <= '9' && c >= '0')
+                        csp = csp * 10 + (c - '0');
+                    else if (c == 'p')
+                    {
+                        // example: C420p16
+                        while (ifs->good())
+                        {
+                            c = ifs->get();
+
+                            if (c <= '9' && c >= '0')
+                                d = d * 10 + (c - '0');
+                            else
+                                break;
+                        }
+                        break;
+                    }
+                    else
+                        break;
+                }
+
+                if (d >= 8 && d <= 16)
+                    depth = d;
+                colorSpace = (csp == 444) ? X265_CSP_I444 : (csp == 422) ? X265_CSP_I422 : X265_CSP_I420;
+                break;
+
+            default:
+                while (ifs->good())
+                {
+                    // consume this unsupported configuration word
+                    c = ifs->get();
+                    if (c == ' ' || c == '\n')
+                        break;
+                }
+                break;
+            }
+        }
+
+        if (c == '\n')
+            break;
+    }
+
+    if (width < MIN_FRAME_WIDTH || width > MAX_FRAME_WIDTH ||
+        height < MIN_FRAME_HEIGHT || height > MAX_FRAME_HEIGHT ||
+        (rateNum / rateDenom) < 1 || (rateNum / rateDenom) > MAX_FRAME_RATE ||
+        colorSpace <= X265_CSP_I400 || colorSpace >= X265_CSP_COUNT)
+        return false;
+
+    return true;
+}
+
+void Y4MInput::startReader()
+{
+#if ENABLE_THREADING
+    if (threadActive)
+        start();
+#endif
+}
+
+void Y4MInput::threadMain()
+{
+    THREAD_NAME("Y4MRead", 0);
+    do
+    {
+        if (!populateFrameQueue())
+            break;
+    }
+    while (threadActive);
+
+    threadActive = false;
+    writeCount.poke();
+}
+
+bool Y4MInput::populateFrameQueue()
+{
+    if (!ifs || ifs->fail())
+        return false;
+
+    /* strip off the FRAME header */
+    char hbuf[sizeof(header)];
+
+    ifs->read(hbuf, strlen(header));
+    if (ifs->eof())
+        return false;
+
+    if (!ifs->good() || memcmp(hbuf, header, strlen(header)))
+    {
+        x265_log(NULL, X265_LOG_ERROR, "y4m: frame header missing\n");
+        return false;
+    }
+
+    /* consume bytes up to line feed */
+    int c = ifs->get();
+    while (c != '\n' && ifs->good())
+        c = ifs->get();
+
+    /* wait for room in the ring buffer */
+    int written = writeCount.get();
+    int read = readCount.get();
+    while (written - read > QUEUE_SIZE - 2)
+    {
+        read = readCount.waitForChange(read);
+        if (!threadActive)
+            return false;
+    }
+
+    ProfileScopeEvent(frameRead);
+    ifs->read(buf[written % QUEUE_SIZE], framesize);
+    if (ifs->good())
+    {
+        writeCount.incr();
+        return true;
+    }
+    else
+        return false;
+}
+
+bool Y4MInput::readPicture(x265_picture& pic)
+{
+    int read = readCount.get();
+    int written = writeCount.get();
+
+#if ENABLE_THREADING
+
+    /* only wait if the read thread is still active */
+    while (threadActive && read == written)
+        written = writeCount.waitForChange(written);
+
+#else
+
+    populateFrameQueue();
+
+#endif // if ENABLE_THREADING
+
+    if (read < written)
+    {
+        int pixelbytes = depth > 8 ? 2 : 1;
+        pic.bitDepth = depth;
+        pic.colorSpace = colorSpace;
+        pic.stride[0] = width * pixelbytes;
+        pic.stride[1] = pic.stride[0] >> x265_cli_csps[colorSpace].width[1];
+        pic.stride[2] = pic.stride[0] >> x265_cli_csps[colorSpace].width[2];
+        pic.planes[0] = buf[read % QUEUE_SIZE];
+        pic.planes[1] = (char*)pic.planes[0] + pic.stride[0] * height;
+        pic.planes[2] = (char*)pic.planes[1] + pic.stride[1] * (height >> x265_cli_csps[colorSpace].height[1]);
+        readCount.incr();
+        return true;
+    }
+    else
+        return false;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/y4m.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,94 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_Y4M_H
+#define X265_Y4M_H
+
+#include "input.h"
+#include "threading.h"
+#include <fstream>
+
+#define QUEUE_SIZE 5
+
+namespace X265_NS {
+// x265 private namespace
+
+class Y4MInput : public InputFile, public Thread
+{
+protected:
+
+    uint32_t rateNum;
+
+    uint32_t rateDenom;
+
+    uint32_t sarWidth;
+
+    uint32_t sarHeight;
+
+    size_t framesize;
+
+    int depth;
+
+    int width;
+
+    int height;
+
+    int colorSpace;
+
+    bool threadActive;
+
+    ThreadSafeInteger readCount;
+
+    ThreadSafeInteger writeCount;
+
+    char* buf[QUEUE_SIZE];
+
+    std::istream *ifs;
+
+    bool parseHeader();
+
+    void threadMain();
+
+    bool populateFrameQueue();
+
+public:
+
+    Y4MInput(InputFileInfo& info);
+
+    virtual ~Y4MInput();
+
+    void release();
+
+    bool isEof() const            { return ifs && ifs->eof();  }
+
+    bool isFail()                 { return !(ifs && !ifs->fail() && threadActive); }
+
+    void startReader();
+
+    bool readPicture(x265_picture&);
+
+    const char *getName() const   { return "y4m"; }
+};
+}
+
+#endif // ifndef X265_Y4M_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/yuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,239 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "yuv.h"
+#include "common.h"
+
+#include <iostream>
+
+#define ENABLE_THREADING 1
+
+#if _WIN32
+#include <io.h>
+#include <fcntl.h>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4996) // POSIX setmode and fileno deprecated
+#endif
+#endif
+
+using namespace X265_NS;
+using namespace std;
+
+YUVInput::YUVInput(InputFileInfo& info)
+{
+    for (int i = 0; i < QUEUE_SIZE; i++)
+        buf[i] = NULL;
+
+    depth = info.depth;
+    width = info.width;
+    height = info.height;
+    colorSpace = info.csp;
+    threadActive = false;
+    ifs = NULL;
+
+    uint32_t pixelbytes = depth > 8 ? 2 : 1;
+    framesize = 0;
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+    {
+        uint32_t w = width >> x265_cli_csps[colorSpace].width[i];
+        uint32_t h = height >> x265_cli_csps[colorSpace].height[i];
+        framesize += w * h * pixelbytes;
+    }
+
+    if (width == 0 || height == 0 || info.fpsNum == 0 || info.fpsDenom == 0)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "yuv: width, height, and FPS must be specified\n");
+        return;
+    }
+
+    if (!strcmp(info.filename, "-"))
+    {
+        ifs = &cin;
+#if _WIN32
+        setmode(fileno(stdin), O_BINARY);
+#endif
+    }
+    else
+        ifs = new ifstream(info.filename, ios::binary | ios::in);
+
+    if (ifs && ifs->good())
+        threadActive = true;
+    else
+    {
+        if (ifs && ifs != &cin)
+            delete ifs;
+        ifs = NULL;
+        return;
+    }
+
+    for (uint32_t i = 0; i < QUEUE_SIZE; i++)
+    {
+        buf[i] = X265_MALLOC(char, framesize);
+        if (buf[i] == NULL)
+        {
+            x265_log(NULL, X265_LOG_ERROR, "yuv: buffer allocation failure, aborting\n");
+            threadActive = false;
+            return;
+        }
+    }
+
+    info.frameCount = -1;
+
+    /* try to estimate frame count, if this is not stdin */
+    if (ifs != &cin)
+    {
+        istream::pos_type cur = ifs->tellg();
+
+#if defined(_MSC_VER) && _MSC_VER < 1700
+        /* Older MSVC versions cannot handle 64bit file sizes properly, so go native */
+        HANDLE hFile = CreateFileA(info.filename, GENERIC_READ,
+                                   FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING,
+                                   FILE_ATTRIBUTE_NORMAL, NULL);
+        if (hFile != INVALID_HANDLE_VALUE)
+        {
+            LARGE_INTEGER size;
+            if (GetFileSizeEx(hFile, &size))
+                info.frameCount = (int)((size.QuadPart - (int64_t)cur) / framesize);
+            CloseHandle(hFile);
+        }
+#else // if defined(_MSC_VER) && _MSC_VER < 1700
+        if (cur >= 0)
+        {
+            ifs->seekg(0, ios::end);
+            istream::pos_type size = ifs->tellg();
+            ifs->seekg(cur, ios::beg);
+            if (size > 0)
+                info.frameCount = (int)((size - cur) / framesize);
+        }
+#endif // if defined(_MSC_VER) && _MSC_VER < 1700
+    }
+
+    if (info.skipFrames)
+    {
+#if X86_64
+        ifs->seekg((uint64_t)framesize * info.skipFrames, ios::cur);
+#else
+        for (int i = 0; i < info.skipFrames; i++)
+            ifs->ignore(framesize);
+#endif
+    }
+}
+
+YUVInput::~YUVInput()
+{
+    if (ifs && ifs != &cin)
+        delete ifs;
+    for (int i = 0; i < QUEUE_SIZE; i++)
+        X265_FREE(buf[i]);
+}
+
+void YUVInput::release()
+{
+    threadActive = false;
+    readCount.poke();
+    stop();
+    delete this;
+}
+
+void YUVInput::startReader()
+{
+#if ENABLE_THREADING
+    if (threadActive)
+        start();
+#endif
+}
+
+void YUVInput::threadMain()
+{
+    THREAD_NAME("YUVRead", 0);
+    while (threadActive)
+    {
+        if (!populateFrameQueue())
+            break;
+    }
+
+    threadActive = false;
+    writeCount.poke();
+}
+
+bool YUVInput::populateFrameQueue()
+{
+    if (!ifs || ifs->fail())
+        return false;
+
+    /* wait for room in the ring buffer */
+    int written = writeCount.get();
+    int read = readCount.get();
+    while (written - read > QUEUE_SIZE - 2)
+    {
+        read = readCount.waitForChange(read);
+        if (!threadActive)
+            // release() has been called
+            return false;
+    }
+
+    ProfileScopeEvent(frameRead);
+    ifs->read(buf[written % QUEUE_SIZE], framesize);
+    if (ifs->good())
+    {
+        writeCount.incr();
+        return true;
+    }
+    else
+        return false;
+}
+
+bool YUVInput::readPicture(x265_picture& pic)
+{
+    int read = readCount.get();
+    int written = writeCount.get();
+
+#if ENABLE_THREADING
+
+    /* only wait if the read thread is still active */
+    while (threadActive && read == written)
+        written = writeCount.waitForChange(written);
+
+#else
+
+    populateFrameQueue();
+
+#endif // if ENABLE_THREADING
+
+    if (read < written)
+    {
+        uint32_t pixelbytes = depth > 8 ? 2 : 1;
+        pic.colorSpace = colorSpace;
+        pic.bitDepth = depth;
+        pic.stride[0] = width * pixelbytes;
+        pic.stride[1] = pic.stride[0] >> x265_cli_csps[colorSpace].width[1];
+        pic.stride[2] = pic.stride[0] >> x265_cli_csps[colorSpace].width[2];
+        pic.planes[0] = buf[read % QUEUE_SIZE];
+        pic.planes[1] = (char*)pic.planes[0] + pic.stride[0] * height;
+        pic.planes[2] = (char*)pic.planes[1] + pic.stride[1] * (height >> x265_cli_csps[colorSpace].height[1]);
+        readCount.incr();
+        return true;
+    }
+    else
+        return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/input/yuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_YUV_H
+#define X265_YUV_H
+
+#include "input.h"
+#include "threading.h"
+#include <fstream>
+
+#define QUEUE_SIZE 5
+
+namespace X265_NS {
+// private x265 namespace
+
+class YUVInput : public InputFile, public Thread
+{
+protected:
+
+    int width;
+
+    int height;
+
+    int colorSpace; //< source Color Space Parameter
+
+    uint32_t depth;
+
+    uint32_t framesize;
+
+    bool threadActive;
+
+    ThreadSafeInteger readCount;
+
+    ThreadSafeInteger writeCount;
+
+    char* buf[QUEUE_SIZE];
+
+    std::istream *ifs;
+
+    int guessFrameCount();
+
+    void threadMain();
+
+    bool populateFrameQueue();
+
+public:
+
+    YUVInput(InputFileInfo& info);
+
+    virtual ~YUVInput();
+
+    void release();
+
+    bool isEof() const                            { return ifs && ifs->eof();  }
+
+    bool isFail()                                 { return !(ifs && !ifs->fail() && threadActive); }
+
+    void startReader();
+
+    bool readPicture(x265_picture&);
+
+    const char *getName() const                   { return "yuv"; }
+};
+}
+
+#endif // ifndef X265_YUV_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/output.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,46 @@
+/*****************************************************************************
+ * Copyright (C) 2013-2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Xinyue Lu <i@7086.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "output.h"
+#include "yuv.h"
+#include "y4m.h"
+
+#include "raw.h"
+
+using namespace X265_NS;
+
+ReconFile* ReconFile::open(const char *fname, int width, int height, uint32_t bitdepth, uint32_t fpsNum, uint32_t fpsDenom, int csp)
+{
+    const char * s = strrchr(fname, '.');
+
+    if (s && !strcmp(s, ".y4m"))
+        return new Y4MOutput(fname, width, height, fpsNum, fpsDenom, csp);
+    else
+        return new YUVOutput(fname, width, height, bitdepth, csp);
+}
+
+OutputFile* OutputFile::open(const char *fname, InputFileInfo& inputInfo)
+{
+    return new RAWOutput(fname, inputInfo);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/output.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*****************************************************************************
+ * Copyright (C) 2013-2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Xinyue Lu <i@7086.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_OUTPUT_H
+#define X265_OUTPUT_H
+
+#include "x265.h"
+#include "input/input.h"
+
+namespace X265_NS {
+// private x265 namespace
+
+class ReconFile
+{
+protected:
+
+    virtual ~ReconFile()  {}
+
+public:
+
+    ReconFile()           {}
+
+    static ReconFile* open(const char *fname, int width, int height, uint32_t bitdepth,
+                           uint32_t fpsNum, uint32_t fpsDenom, int csp);
+
+    virtual bool isFail() const = 0;
+
+    virtual void release() = 0;
+
+    virtual bool writePicture(const x265_picture& pic) = 0;
+
+    virtual const char *getName() const = 0;
+};
+
+class OutputFile
+{
+protected:
+
+    virtual ~OutputFile() {}
+
+public:
+
+    OutputFile() {}
+
+    static OutputFile* open(const char* fname, InputFileInfo& inputInfo);
+
+    virtual bool isFail() const = 0;
+
+    virtual bool needPTS() const = 0;
+
+    virtual void release() = 0;
+
+    virtual const char* getName() const = 0;
+
+    virtual void setParam(x265_param* param) = 0;
+
+    virtual int writeHeaders(const x265_nal* nal, uint32_t nalcount) = 0;
+
+    virtual int writeFrame(const x265_nal* nal, uint32_t nalcount, x265_picture& pic) = 0;
+
+    virtual void closeFile(int64_t largest_pts, int64_t second_largest_pts) = 0;
+};
+}
+
+#endif // ifndef X265_OUTPUT_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/raw.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,80 @@
+/*****************************************************************************
+ * Copyright (C) 2013-2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Xinyue Lu <i@7086.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "raw.h"
+
+using namespace X265_NS;
+using namespace std;
+
+RAWOutput::RAWOutput(const char* fname, InputFileInfo&)
+{
+    b_fail = false;
+    if (!strcmp(fname, "-"))
+    {
+        ofs = &cout;
+        return;
+    }
+    ofs = new ofstream(fname, ios::binary | ios::out);
+    if (ofs->fail())
+        b_fail = true;
+}
+
+void RAWOutput::setParam(x265_param* param)
+{
+    param->bAnnexB = true;
+}
+
+int RAWOutput::writeHeaders(const x265_nal* nal, uint32_t nalcount)
+{
+    uint32_t bytes = 0;
+
+    for (uint32_t i = 0; i < nalcount; i++)
+    {
+        ofs->write((const char*)nal->payload, nal->sizeBytes);
+        bytes += nal->sizeBytes;
+        nal++;
+    }
+
+    return bytes;
+}
+
+int RAWOutput::writeFrame(const x265_nal* nal, uint32_t nalcount, x265_picture&)
+{
+    uint32_t bytes = 0;
+
+    for (uint32_t i = 0; i < nalcount; i++)
+    {
+        ofs->write((const char*)nal->payload, nal->sizeBytes);
+        bytes += nal->sizeBytes;
+        nal++;
+    }
+
+    return bytes;
+}
+
+void RAWOutput::closeFile(int64_t, int64_t)
+{
+    if (ofs != &cout)
+        delete ofs;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/raw.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,64 @@
+/*****************************************************************************
+ * Copyright (C) 2013-2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Xinyue Lu <i@7086.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_HEVC_RAW_H
+#define X265_HEVC_RAW_H
+
+#include "output.h"
+#include "common.h"
+#include <fstream>
+#include <iostream>
+
+namespace X265_NS {
+class RAWOutput : public OutputFile
+{
+protected:
+
+    std::ostream* ofs;
+
+    bool b_fail;
+
+public:
+
+    RAWOutput(const char* fname, InputFileInfo&);
+
+    bool isFail() const { return b_fail; }
+
+    bool needPTS() const { return false; }
+
+    void release() { delete this; }
+
+    const char* getName() const { return "raw"; }
+
+    void setParam(x265_param* param);
+
+    int writeHeaders(const x265_nal* nal, uint32_t nalcount);
+
+    int writeFrame(const x265_nal* nal, uint32_t nalcount, x265_picture&);
+
+    void closeFile(int64_t largest_pts, int64_t second_largest_pts);
+};
+}
+
+#endif // ifndef X265_HEVC_RAW_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/reconplay.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,197 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Peixuan Zhang <zhangpeixuancn@gmail.com>
+ *          Chunli Zhang <chunli@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "reconplay.h"
+
+#include <signal.h>
+
+using namespace X265_NS;
+
+#if _WIN32
+#define popen  _popen
+#define pclose _pclose
+#define pipemode "wb"
+#else
+#define pipemode "w"
+#endif
+
+bool ReconPlay::pipeValid;
+
+#ifndef _WIN32
+static void sigpipe_handler(int)
+{
+    if (ReconPlay::pipeValid)
+        general_log(NULL, "exec", X265_LOG_ERROR, "pipe closed\n");
+    ReconPlay::pipeValid = false;
+}
+#endif
+
+ReconPlay::ReconPlay(const char* commandLine, x265_param& param)
+{
+#ifndef _WIN32
+    if (signal(SIGPIPE, sigpipe_handler) == SIG_ERR)
+        general_log(&param, "exec", X265_LOG_ERROR, "Unable to register SIGPIPE handler: %s\n", strerror(errno));
+#endif
+
+    width = param.sourceWidth;
+    height = param.sourceHeight;
+    colorSpace = param.internalCsp;
+
+    frameSize = 0;
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        frameSize += (uint32_t)((width >> x265_cli_csps[colorSpace].width[i]) * (height >> x265_cli_csps[colorSpace].height[i]));
+
+    for (int i = 0; i < RECON_BUF_SIZE; i++)
+    {
+        poc[i] = -1;
+        CHECKED_MALLOC(frameData[i], pixel, frameSize);
+    }
+
+    outputPipe = popen(commandLine, pipemode);
+    if (outputPipe)
+    {
+        const char* csp = (colorSpace >= X265_CSP_I444) ? "444" : (colorSpace >= X265_CSP_I422) ? "422" : "420";
+        const char* depth = (param.internalBitDepth == 10) ? "p10" : "";
+
+        fprintf(outputPipe, "YUV4MPEG2 W%d H%d F%d:%d Ip C%s%s\n", width, height, param.fpsNum, param.fpsDenom, csp, depth);
+
+        pipeValid = true;
+        threadActive = true;
+        start();
+        return;
+    }
+    else
+        general_log(&param, "exec", X265_LOG_ERROR, "popen(%s) failed\n", commandLine);
+
+fail:
+    threadActive = false;
+}
+
+ReconPlay::~ReconPlay()
+{
+    if (threadActive)
+    {
+        threadActive = false;
+        writeCount.poke();
+        stop();
+    }
+
+    if (outputPipe) 
+        pclose(outputPipe);
+
+    for (int i = 0; i < RECON_BUF_SIZE; i++)
+        X265_FREE(frameData[i]);
+}
+
+bool ReconPlay::writePicture(const x265_picture& pic)
+{
+    if (!threadActive || !pipeValid)
+        return false;
+
+    int written = writeCount.get();
+    int read = readCount.get();
+    int currentCursor = pic.poc % RECON_BUF_SIZE;
+
+    /* TODO: it's probably better to drop recon pictures when the ring buffer is
+     * backed up on the display app */
+    while (written - read > RECON_BUF_SIZE - 2 || poc[currentCursor] != -1)
+    {
+        read = readCount.waitForChange(read);
+        if (!threadActive)
+            return false;
+    }
+
+    X265_CHECK(pic.colorSpace == colorSpace, "invalid color space\n");
+    X265_CHECK(pic.bitDepth == X265_DEPTH,   "invalid bit depth\n");
+
+    pixel* buf = frameData[currentCursor];
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+    {
+        char* src = (char*)pic.planes[i];
+        int pwidth = width >> x265_cli_csps[colorSpace].width[i];
+
+        for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+        {
+            memcpy(buf, src, pwidth * sizeof(pixel));
+            src += pic.stride[i];
+            buf += pwidth;
+        }
+    }
+
+    poc[currentCursor] = pic.poc;
+    writeCount.incr();
+
+    return true;
+}
+
+void ReconPlay::threadMain()
+{
+    THREAD_NAME("ReconPlayOutput", 0);
+
+    do
+    {
+        /* extract the next output picture in display order and write to pipe */
+        if (!outputFrame())
+            break;
+    }
+    while (threadActive);
+
+    threadActive = false;
+    readCount.poke();
+}
+
+bool ReconPlay::outputFrame()
+{
+    int written = writeCount.get();
+    int read = readCount.get();
+    int currentCursor = read % RECON_BUF_SIZE;
+
+    while (poc[currentCursor] != read)
+    {
+        written = writeCount.waitForChange(written);
+        if (!threadActive)
+            return false;
+    }
+
+    char* buf = (char*)frameData[currentCursor];
+    intptr_t remainSize = frameSize * sizeof(pixel);
+
+    fprintf(outputPipe, "FRAME\n");
+    while (remainSize > 0)
+    {
+        intptr_t retCount = (intptr_t)fwrite(buf, sizeof(char), remainSize, outputPipe);
+
+        if (retCount < 0 || !pipeValid)
+            /* pipe failure, stop writing and start dropping recon pictures */
+            return false;
+    
+        buf += retCount;
+        remainSize -= retCount;
+    }
+
+    poc[currentCursor] = -1;
+    readCount.incr();
+    return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/reconplay.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,74 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Peixuan Zhang <zhangpeixuancn@gmail.com>
+ *          Chunli Zhang <chunli@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_RECONPLAY_H
+#define X265_RECONPLAY_H
+
+#include "x265.h"
+#include "threading.h"
+#include <cstdio>
+
+namespace X265_NS {
+// private x265 namespace
+
+class ReconPlay : public Thread
+{
+public:
+
+    ReconPlay(const char* commandLine, x265_param& param);
+
+    virtual ~ReconPlay();
+
+    bool writePicture(const x265_picture& pic);
+
+    static bool pipeValid;
+
+protected:
+
+    enum { RECON_BUF_SIZE = 40 };
+
+    FILE*  outputPipe;     /* The output pipe for player */
+    size_t frameSize;      /* size of one frame in pixels */
+    bool   threadActive;   /* worker thread is active */
+    int    width;          /* width of frame */
+    int    height;         /* height of frame */
+    int    colorSpace;     /* color space of frame */
+
+    int    poc[RECON_BUF_SIZE];
+    pixel* frameData[RECON_BUF_SIZE];
+
+    /* Note that the class uses read and write counters to signal that reads and
+     * writes have occurred in the ring buffer, but writes into the buffer
+     * happen in decode order and the reader must check that the POC it next
+     * needs to send to the pipe is in fact present.  The counters are used to
+     * prevent the writer from getting too far ahead of the reader */
+    ThreadSafeInteger readCount;
+    ThreadSafeInteger writeCount;
+
+    void threadMain();
+    bool outputFrame();
+};
+}
+
+#endif // ifndef X265_RECONPLAY_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/y4m.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,109 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "output.h"
+#include "y4m.h"
+
+using namespace X265_NS;
+using namespace std;
+
+Y4MOutput::Y4MOutput(const char *filename, int w, int h, uint32_t fpsNum, uint32_t fpsDenom, int csp)
+    : width(w)
+    , height(h)
+    , colorSpace(csp)
+    , frameSize(0)
+{
+    ofs.open(filename, ios::binary | ios::out);
+    buf = new char[width];
+
+    const char *cf = (csp >= X265_CSP_I444) ? "444" : (csp >= X265_CSP_I422) ? "422" : "420";
+
+    if (ofs)
+    {
+        ofs << "YUV4MPEG2 W" << width << " H" << height << " F" << fpsNum << ":" << fpsDenom << " Ip" << " C" << cf << "\n";
+        header = ofs.tellp();
+    }
+
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        frameSize += (uint32_t)((width >> x265_cli_csps[colorSpace].width[i]) * (height >> x265_cli_csps[colorSpace].height[i]));
+}
+
+Y4MOutput::~Y4MOutput()
+{
+    ofs.close();
+    delete [] buf;
+}
+
+bool Y4MOutput::writePicture(const x265_picture& pic)
+{
+    std::ofstream::pos_type outPicPos = header;
+    outPicPos += (uint64_t)pic.poc * (6 + frameSize);
+    ofs.seekp(outPicPos);
+    ofs << "FRAME\n";
+
+#if HIGH_BIT_DEPTH
+    if (pic.bitDepth > 8 && pic.poc == 0)
+        x265_log(NULL, X265_LOG_WARNING, "y4m: down-shifting reconstructed pixels to 8 bits\n");
+#else
+    if (pic.bitDepth > 8 && pic.poc == 0)
+        x265_log(NULL, X265_LOG_WARNING, "y4m: forcing reconstructed pixels to 8 bits\n");
+#endif
+
+    X265_CHECK(pic.colorSpace == colorSpace, "invalid color space\n");
+
+#if HIGH_BIT_DEPTH
+
+    // encoder gave us short pixels, downshift, then write
+    X265_CHECK(pic.bitDepth > 8, "invalid bit depth\n");
+    int shift = pic.bitDepth - 8;
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+    {
+        uint16_t *src = (uint16_t*)pic.planes[i];
+        for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+        {
+            for (int w = 0; w < width >> x265_cli_csps[colorSpace].width[i]; w++)
+                buf[w] = (char)(src[w] >> shift);
+
+            ofs.write(buf, width >> x265_cli_csps[colorSpace].width[i]);
+            src += pic.stride[i] / sizeof(*src);
+        }
+    }
+
+#else // if HIGH_BIT_DEPTH
+
+    X265_CHECK(pic.bitDepth == 8, "invalid bit depth\n");
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+    {
+        char *src = (char*)pic.planes[i];
+        for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+        {
+            ofs.write(src, width >> x265_cli_csps[colorSpace].width[i]);
+            src += pic.stride[i] / sizeof(*src);
+        }
+    }
+
+#endif // if HIGH_BIT_DEPTH
+
+    return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/y4m.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,69 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_Y4M_H
+#define X265_Y4M_H
+
+#include "output.h"
+#include <fstream>
+
+namespace X265_NS {
+// private x265 namespace
+
+class Y4MOutput : public ReconFile
+{
+protected:
+
+    int width;
+
+    int height;
+
+    int colorSpace;
+
+    uint32_t frameSize;
+
+    std::ofstream ofs;
+
+    std::ofstream::pos_type header;
+
+    char *buf;
+
+    void writeHeader();
+
+public:
+
+    Y4MOutput(const char *filename, int width, int height, uint32_t fpsNum, uint32_t fpsDenom, int csp);
+
+    virtual ~Y4MOutput();
+
+    const char *getName() const                   { return "y4m"; }
+
+    bool isFail() const                           { return ofs.fail(); }
+
+    void release()                                { delete this; }
+
+    bool writePicture(const x265_picture& pic);
+};
+}
+
+#endif // ifndef X265_Y4M_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/yuv.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,105 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "output.h"
+#include "yuv.h"
+
+using namespace X265_NS;
+using namespace std;
+
+YUVOutput::YUVOutput(const char *filename, int w, int h, uint32_t d, int csp)
+    : width(w)
+    , height(h)
+    , depth(d)
+    , colorSpace(csp)
+    , frameSize(0)
+{
+    ofs.open(filename, ios::binary | ios::out);
+    buf = new char[width];
+
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        frameSize += (uint32_t)((width >> x265_cli_csps[colorSpace].width[i]) * (height >> x265_cli_csps[colorSpace].height[i]));
+}
+
+YUVOutput::~YUVOutput()
+{
+    ofs.close();
+    delete [] buf;
+}
+
+bool YUVOutput::writePicture(const x265_picture& pic)
+{
+    uint64_t fileOffset = pic.poc;
+    fileOffset *= frameSize;
+
+    X265_CHECK(pic.colorSpace == colorSpace, "invalid color space\n");
+    X265_CHECK(pic.bitDepth == (int)depth, "invalid bit depth\n");
+
+#if HIGH_BIT_DEPTH
+    if (depth == 8)
+    {
+        int shift = pic.bitDepth - 8;
+        ofs.seekp((std::streamoff)fileOffset);
+        for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        {
+            uint16_t *src = (uint16_t*)pic.planes[i];
+            for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+            {
+                for (int w = 0; w < width >> x265_cli_csps[colorSpace].width[i]; w++)
+                    buf[w] = (char)(src[w] >> shift);
+
+                ofs.write(buf, width >> x265_cli_csps[colorSpace].width[i]);
+                src += pic.stride[i] / sizeof(*src);
+            }
+        }
+    }
+    else
+    {
+        ofs.seekp((std::streamoff)(fileOffset * 2));
+        for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+        {
+            uint16_t *src = (uint16_t*)pic.planes[i];
+            for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+            {
+                ofs.write((const char*)src, (width * 2) >> x265_cli_csps[colorSpace].width[i]);
+                src += pic.stride[i] / sizeof(*src);
+            }
+        }
+    }
+#else // if HIGH_BIT_DEPTH
+    ofs.seekp((std::streamoff)fileOffset);
+    for (int i = 0; i < x265_cli_csps[colorSpace].planes; i++)
+    {
+        char *src = (char*)pic.planes[i];
+        for (int h = 0; h < height >> x265_cli_csps[colorSpace].height[i]; h++)
+        {
+            ofs.write(src, width >> x265_cli_csps[colorSpace].width[i]);
+            src += pic.stride[i] / sizeof(*src);
+        }
+    }
+
+#endif // if HIGH_BIT_DEPTH
+
+    return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/output/yuv.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,69 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_YUV_H
+#define X265_YUV_H
+
+#include "output.h"
+#include "common.h"
+
+#include <fstream>
+
+namespace X265_NS {
+// private x265 namespace
+
+class YUVOutput : public ReconFile
+{
+protected:
+
+    int width;
+
+    int height;
+
+    uint32_t depth;
+
+    int colorSpace;
+
+    uint32_t frameSize;
+
+    char *buf;
+
+    std::ofstream ofs;
+
+public:
+
+    YUVOutput(const char *filename, int width, int height, uint32_t bitdepth, int csp);
+
+    virtual ~YUVOutput();
+
+    const char *getName() const                   { return "yuv"; }
+
+    bool isFail() const                           { return ofs.fail(); }
+
+    void release()                                { delete this; }
+
+    bool writePicture(const x265_picture& pic);
+};
+}
+
+#endif // ifndef X265_YUV_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,25 @@
+# vim: syntax=cmake
+
+option(ENABLE_PPA "Enable PPA profiling instrumentation" OFF)
+if(ENABLE_PPA)
+    add_definitions(-DENABLE_PPA)
+    add_subdirectory(PPA)
+    list(APPEND PLATFORM_LIBS PPA)
+    if(UNIX)
+        list(APPEND PLATFORM_LIBS dl)
+    endif(UNIX)
+endif(ENABLE_PPA)
+
+option(ENABLE_VTUNE "Enable Vtune profiling instrumentation" OFF)
+if(ENABLE_VTUNE)
+    add_definitions(-DENABLE_VTUNE)
+    add_subdirectory(vtune)
+    list(APPEND PLATFORM_LIBS vtune)
+    include_directories($ENV{VTUNE_AMPLIFIER_XE_2015_DIR}/include)
+    link_directories($ENV{VTUNE_AMPLIFIER_XE_2015_DIR}/lib64)
+    if(WIN32)
+        list(APPEND PLATFORM_LIBS libittnotify.lib)
+    else()
+        list(APPEND PLATFORM_LIBS libittnotify.a dl)
+    endif()
+endif(ENABLE_VTUNE)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/PPA/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1 @@
+add_library(PPA ppa.h ppaApi.h ppa.cpp ../cpuEvents.h)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/PPA/ppa.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,147 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#if defined(ENABLE_PPA)
+
+#include "ppa.h"
+#include <stdlib.h>
+
+#define PPA_REGISTER_CPU_EVENT2GROUP(x, y) # x, # y,
+#define CPU_EVENT(x) PPA_REGISTER_CPU_EVENT2GROUP(x, NoGroup)
+const char *PPACpuAndGroup[] =
+{
+#include "../cpuEvents.h"
+    ""
+};
+#undef CPU_EVENT
+#undef PPA_REGISTER_CPU_EVENT2GROUP
+
+extern "C" {
+typedef ppa::Base *(FUNC_PPALibInit)(const char **, int);
+typedef void (FUNC_PPALibRelease)(ppa::Base* &);
+}
+
+using namespace ppa;
+
+static FUNC_PPALibRelease *_pfuncPpaRelease;
+ppa::Base *ppa::ppabase;
+
+static void _ppaReleaseAtExit()
+{
+    _pfuncPpaRelease(ppabase);
+}
+
+#ifdef _WIN32
+#include <windows.h>
+
+#if defined(_M_X64) || defined(__x86_64__) || defined(__amd64__)
+# ifdef UNICODE
+# define PPA_DLL_NAME L"ppa64.dll"
+# else
+# define PPA_DLL_NAME "ppa64.dll"
+# endif
+#else
+# ifdef UNICODE
+# define PPA_DLL_NAME L"ppa.dll"
+# else
+# define PPA_DLL_NAME "ppa.dll"
+# endif
+#endif // if defined(_M_X64) || defined(__x86_64__) || defined(__amd64__)
+
+void initializePPA(void)
+{
+    if (ppabase)
+        return;
+
+    HMODULE _ppaLibHandle = LoadLibrary(PPA_DLL_NAME);
+    if (!_ppaLibHandle)
+        return;
+
+    FUNC_PPALibInit *_pfuncPpaInit = (FUNC_PPALibInit*)GetProcAddress(_ppaLibHandle, "InitPpaUtil");
+    _pfuncPpaRelease  = (FUNC_PPALibRelease*)GetProcAddress(_ppaLibHandle, "DeletePpa");
+
+    if (!_pfuncPpaInit || !_pfuncPpaRelease)
+    {
+        FreeLibrary(_ppaLibHandle);
+        return;
+    }
+
+    ppabase = _pfuncPpaInit(PPACpuAndGroup, PPACpuGroupNums);
+    if (!ppabase)
+    {
+        FreeLibrary(_ppaLibHandle);
+        return;
+    }
+
+    atexit(_ppaReleaseAtExit);
+}
+
+#else /* linux & unix & cygwin */
+#include <dlfcn.h>
+#include <stdio.h>
+
+#if defined(_M_X64) || defined(__x86_64__) || defined(__amd64__)
+# define PPA_LIB_NAME "libppa64.so"
+#else
+# define PPA_LIB_NAME "libppa.so"
+#endif
+
+void initializePPA(void)
+{
+    if (ppabase)
+    {
+        printf("PPA: already initialized\n");
+        return;
+    }
+
+    void *_ppaDllHandle = dlopen(PPA_LIB_NAME, RTLD_LAZY);
+    if (!_ppaDllHandle)
+    {
+        printf("PPA: Unable to load %s\n", PPA_LIB_NAME);
+        return;
+    }
+
+    FUNC_PPALibInit *_pfuncPpaInit = (FUNC_PPALibInit*)dlsym(_ppaDllHandle, "InitPpaUtil");
+    _pfuncPpaRelease = (FUNC_PPALibRelease*)dlsym(_ppaDllHandle, "DeletePpa");
+
+    if (!_pfuncPpaInit || !_pfuncPpaRelease)
+    {
+        printf("PPA: Function bindings failed\n");
+        dlclose(_ppaDllHandle);
+        return;
+    }
+
+    ppabase = _pfuncPpaInit(PPACpuAndGroup, PPACpuGroupNums);
+    if (!ppabase)
+    {
+        printf("PPA: Init failed\n");
+        dlclose(_ppaDllHandle);
+        return;
+    }
+
+    atexit(_ppaReleaseAtExit);
+}
+
+#endif /* !_WIN32 */
+
+#endif /* defined(ENABLE_PPA) */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/PPA/ppa.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,43 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef PPA_H
+#define PPA_H
+
+/* declare enum list of users CPU events */
+#define CPU_EVENT(x) x,
+enum PPACpuEventEnum
+{
+#include "../cpuEvents.h"
+    PPACpuGroupNums
+};
+#undef CPU_EVENT
+
+#include "ppaApi.h"
+
+void initializePPA();
+
+#define PPA_INIT()               initializePPA()
+#define PPAScopeEvent(e)         ppa::ProfileScope ppaScope_(e)
+
+#endif /* PPA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/PPA/ppaApi.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,70 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _PPA_API_H_
+#define _PPA_API_H_
+
+namespace ppa {
+// PPA private namespace
+
+typedef unsigned short EventID;
+typedef unsigned char GroupID;
+
+class Base
+{
+public:
+
+    virtual ~Base() {}
+
+    virtual bool isEventFiltered(EventID eventId) const = 0;
+    virtual bool configEventById(EventID eventId, bool filtered) const = 0;
+    virtual int  configGroupById(GroupID groupId, bool filtered) const = 0;
+    virtual void configAllEvents(bool filtered) const = 0;
+    virtual EventID  registerEventByName(const char *pEventName) = 0;
+    virtual GroupID registerGroupByName(const char *pGroupName) = 0;
+    virtual EventID registerEventInGroup(const char *pEventName, GroupID groupId) = 0;
+    virtual void triggerStartEvent(EventID eventId) = 0;
+    virtual void triggerEndEvent(EventID eventId) = 0;
+    virtual void triggerTidEvent(EventID eventId, unsigned int data) = 0;
+    virtual void triggerDebugEvent(EventID eventId, unsigned int data0, unsigned int data1) = 0;
+
+    virtual EventID getEventId(int index) const = 0;
+
+protected:
+
+    virtual void init(const char **pNames, int eventCount) = 0;
+};
+
+extern ppa::Base *ppabase;
+
+struct ProfileScope
+{
+    ppa::EventID id;
+
+    ProfileScope(int e) { if (ppabase) { id = ppabase->getEventId(e); ppabase->triggerStartEvent(id); } else id = 0; }
+    ~ProfileScope()     { if (ppabase) ppabase->triggerEndEvent(id); }
+};
+
+}
+
+#endif //_PPA_API_H_
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/cpuEvents.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,11 @@
+CPU_EVENT(frameRead)
+CPU_EVENT(bitstreamWrite)
+CPU_EVENT(frameThread)
+CPU_EVENT(encodeCTU)
+CPU_EVENT(filterCTURow)
+CPU_EVENT(slicetypeDecideEV)
+CPU_EVENT(prelookahead)
+CPU_EVENT(estCostSingle)
+CPU_EVENT(estCostCoop)
+CPU_EVENT(pmode)
+CPU_EVENT(pme)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/vtune/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2 @@
+include_directories($ENV{VTUNE_AMPLIFIER_XE_2015_DIR}/include)
+add_library(vtune vtune.h vtune.cpp ../cpuEvents.h)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/vtune/vtune.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,58 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "vtune.h"
+#include <stdio.h>
+
+namespace {
+
+#define CPU_EVENT(x) #x,
+const char *stringNames[] =
+{
+#include "../cpuEvents.h"
+    ""
+};
+#undef CPU_EVENT
+
+}
+
+namespace X265_NS {
+
+__itt_domain* domain;
+__itt_string_handle* taskHandle[NUM_VTUNE_TASKS];
+
+void vtuneInit()
+{
+    domain = __itt_domain_create("x265");
+    for (size_t i = 0; i < sizeof(stringNames) / sizeof(const char *); i++)
+        taskHandle[i] = __itt_string_handle_create(stringNames[i]);
+}
+
+void vtuneSetThreadName(const char *name, int id)
+{
+    char threadname[128];
+    sprintf(threadname, "%s %d", name, id);
+    __itt_thread_set_name(threadname);
+}
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/profile/vtune/vtune.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,53 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef VTUNE_H
+#define VTUNE_H
+
+#include "ittnotify.h"
+
+namespace X265_NS {
+
+#define CPU_EVENT(x) x,
+enum VTuneTasksEnum
+{
+#include "../cpuEvents.h"
+    NUM_VTUNE_TASKS
+};
+#undef CPU_EVENT
+
+extern __itt_domain* domain;
+extern __itt_string_handle* taskHandle[NUM_VTUNE_TASKS];
+
+struct VTuneScopeEvent
+{
+    VTuneScopeEvent(int e) { __itt_task_begin(domain, __itt_null, __itt_null, taskHandle[e]); }
+    ~VTuneScopeEvent()     { __itt_task_end(domain); }
+};
+
+void vtuneInit();
+void vtuneSetThreadName(const char *name, int id);
+
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/CMakeLists.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,33 @@
+# vim: syntax=cmake
+enable_language(ASM_YASM)
+
+if(MSVC_IDE)
+    set(YASM_SRC checkasm-a.obj)
+    add_custom_command(
+        OUTPUT checkasm-a.obj
+        COMMAND ${YASM_EXECUTABLE}
+        ARGS ${YASM_FLAGS} ${CMAKE_CURRENT_SOURCE_DIR}/checkasm-a.asm -o checkasm-a.obj
+        DEPENDS checkasm-a.asm)
+else()
+    set(YASM_SRC checkasm-a.asm)
+endif()
+
+check_symbol_exists(__rdtsc "intrin.h" HAVE_RDTSC)
+if(HAVE_RDTSC)
+    add_definitions(-DHAVE_RDTSC=1)
+endif()
+
+add_executable(TestBench ${YASM_SRC}
+    testbench.cpp testharness.h
+    pixelharness.cpp pixelharness.h
+    mbdstharness.cpp mbdstharness.h
+    ipfilterharness.cpp ipfilterharness.h
+    intrapredharness.cpp intrapredharness.h)
+target_link_libraries(TestBench x265-static ${PLATFORM_LIBS})
+if(LINKER_OPTIONS)
+    if(EXTRA_LIB)
+        list(APPEND LINKER_OPTIONS "-L..")
+    endif(EXTRA_LIB)
+    string(REPLACE ";" " " LINKER_OPTION_STR "${LINKER_OPTIONS}")
+    set_target_properties(TestBench PROPERTIES LINK_FLAGS "${LINKER_OPTION_STR}")
+endif()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/checkasm-a.asm	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,225 @@
+;*****************************************************************************
+;* checkasm-a.asm: assembly check tool
+;*****************************************************************************
+;* Copyright (C) 2008-2014 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;*          Henrik Gramner <henrik@gramner.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************
+
+%include "../common/x86/x86inc.asm"
+
+SECTION_RODATA
+
+error_message: db "failed to preserve register", 0
+
+%if ARCH_X86_64
+; just random numbers to reduce the chance of incidental match
+ALIGN 16
+x6:  ddq 0x79445c159ce790641a1b2550a612b48c
+x7:  ddq 0x86b2536fcd8cf6362eed899d5a28ddcd
+x8:  ddq 0x3f2bf84fc0fcca4eb0856806085e7943
+x9:  ddq 0xd229e1f5b281303facbd382dcf5b8de2
+x10: ddq 0xab63e2e11fa38ed971aeaff20b095fd9
+x11: ddq 0x77d410d5c42c882d89b0c0765892729a
+x12: ddq 0x24b3c1d2a024048bc45ea11a955d8dd5
+x13: ddq 0xdd7b8919edd427862e8ec680de14b47c
+x14: ddq 0x11e53e2b2ac655ef135ce6888fa02cbf
+x15: ddq 0x6de8f4c914c334d5011ff554472a7a10
+n7:   dq 0x21f86d66c8ca00ce
+n8:   dq 0x75b6ba21077c48ad
+n9:   dq 0xed56bb2dcb3c7736
+n10:  dq 0x8bda43d3fd1a7e06
+n11:  dq 0xb64a9c9e5d318408
+n12:  dq 0xdf9a54b303f1d3a3
+n13:  dq 0x4a75479abd64e097
+n14:  dq 0x249214109d5d1c88
+%endif
+
+SECTION .text
+
+cextern_naked puts
+
+; max number of args used by any x265 asm function.
+; (max_args % 4) must equal 3 for stack alignment
+%define max_args 15
+
+%if ARCH_X86_64
+
+;-----------------------------------------------------------------------------
+; void x265_checkasm_stack_clobber( uint64_t clobber, ... )
+;-----------------------------------------------------------------------------
+cglobal checkasm_stack_clobber, 1,2
+    ; Clobber the stack with junk below the stack pointer
+    %define size (max_args+6)*8
+    SUB  rsp, size
+    mov   r1, size-8
+.loop:
+    mov [rsp+r1], r0
+    sub   r1, 8
+    jge .loop
+    ADD  rsp, size
+    RET
+
+%if WIN64
+    %assign free_regs 7
+%else
+    %assign free_regs 9
+%endif
+
+;-----------------------------------------------------------------------------
+; intptr_t x265_checkasm_call( intptr_t (*func)(), int *ok, ... )
+;-----------------------------------------------------------------------------
+cglobal checkasm_call_float
+INIT_XMM
+cglobal checkasm_call, 2,15,16,max_args*8+8
+    mov  r6, r0
+    mov  [rsp+max_args*8], r1
+
+    ; All arguments have been pushed on the stack instead of registers in order to
+    ; test for incorrect assumptions that 32-bit ints are zero-extended to 64-bit.
+    mov  r0, r6mp
+    mov  r1, r7mp
+    mov  r2, r8mp
+    mov  r3, r9mp
+%if UNIX64
+    mov  r4, r10mp
+    mov  r5, r11mp
+    %assign i 6
+    %rep max_args-6
+        mov  r9, [rsp+stack_offset+(i+1)*8]
+        mov  [rsp+(i-6)*8], r9
+        %assign i i+1
+    %endrep
+%else
+    %assign i 4
+    %rep max_args-4
+        mov  r9, [rsp+stack_offset+(i+7)*8]
+        mov  [rsp+i*8], r9
+        %assign i i+1
+    %endrep
+%endif
+
+%if WIN64
+    %assign i 6
+    %rep 16-6
+        mova m %+ i, [x %+ i]
+        %assign i i+1
+    %endrep
+%endif
+
+%assign i 14
+%rep 15-free_regs
+    mov  r %+ i, [n %+ i]
+    %assign i i-1
+%endrep
+    call r6
+%assign i 14
+%rep 15-free_regs
+    xor  r %+ i, [n %+ i]
+    or  r14, r %+ i
+    %assign i i-1
+%endrep
+
+%if WIN64
+    %assign i 6
+    %rep 16-6
+        pxor m %+ i, [x %+ i]
+        por  m6, m %+ i
+        %assign i i+1
+    %endrep
+    packsswb m6, m6
+    movq r5, m6
+    or  r14, r5
+%endif
+
+    jz .ok
+    mov  r9, rax
+    mov r10, rdx
+    lea  r0, [error_message]
+    call puts
+    mov  r1, [rsp+max_args*8]
+    mov  dword [r1], 0
+    mov  rdx, r10
+    mov  rax, r9
+.ok:
+    RET
+
+%else
+
+; just random numbers to reduce the chance of incidental match
+%define n3 dword 0x6549315c
+%define n4 dword 0xe02f3e23
+%define n5 dword 0xb78d0d1d
+%define n6 dword 0x33627ba7
+
+;-----------------------------------------------------------------------------
+; intptr_t x265_checkasm_call( intptr_t (*func)(), int *ok, ... )
+;-----------------------------------------------------------------------------
+cglobal checkasm_call_float
+cglobal checkasm_call, 1,7
+    mov  r3, n3
+    mov  r4, n4
+    mov  r5, n5
+    mov  r6, n6
+%rep max_args
+    push dword [esp+24+max_args*4]
+%endrep
+    call r0
+    add  esp, max_args*4
+    xor  r3, n3
+    xor  r4, n4
+    xor  r5, n5
+    xor  r6, n6
+    or   r3, r4
+    or   r5, r6
+    or   r3, r5
+    jz .ok
+    mov  r3, eax
+    mov  r4, edx
+    lea  r1, [error_message]
+    push r1
+    call puts
+    add  esp, 4
+    mov  r1, r1m
+    mov  dword [r1], 0
+    mov  edx, r4
+    mov  eax, r3
+.ok:
+    REP_RET
+
+%endif ; ARCH_X86_64
+
+;-----------------------------------------------------------------------------
+; int x265_stack_pagealign( int (*func)(), int align )
+;-----------------------------------------------------------------------------
+cglobal stack_pagealign, 2,2
+    movsxdifnidn r1, r1d
+    push rbp
+    mov  rbp, rsp
+%if WIN64
+    sub  rsp, 32 ; shadow space
+%endif
+    and  rsp, ~0xfff
+    sub  rsp, r1
+    call r0
+    leave
+    RET
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/intrapredharness.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,318 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "predict.h"
+#include "intrapredharness.h"
+
+using namespace X265_NS;
+
+IntraPredHarness::IntraPredHarness()
+{
+    for (int i = 0; i < INPUT_SIZE; i++)
+        pixel_buff[i] = rand() % PIXEL_MAX;
+
+    /* [0] --- Random values
+     * [1] --- Minimum
+     * [2] --- Maximum */
+    for (int i = 0; i < BUFFSIZE; i++)
+    {
+        pixel_test_buff[0][i]   = rand() % PIXEL_MAX;
+        pixel_test_buff[1][i]   = PIXEL_MIN;
+        pixel_test_buff[2][i]   = PIXEL_MAX;
+    }
+}
+
+bool IntraPredHarness::check_dc_primitive(intra_pred_t ref, intra_pred_t opt, int width)
+{
+    int j = Predict::ADI_BUF_STRIDE;
+    intptr_t stride = FENC_STRIDE;
+
+#if _DEBUG
+    memset(pixel_out_vec, 0xCD, OUTPUT_SIZE);
+    memset(pixel_out_c, 0xCD, OUTPUT_SIZE);
+#endif
+
+    for (int i = 0; i <= 100; i++)
+    {
+        int rand_filter = rand() & 1;
+        if (width > 16)
+            rand_filter = 0;
+
+        ref(pixel_out_c, stride, pixel_buff + j - Predict::ADI_BUF_STRIDE, 0, rand_filter);
+        checked(opt, pixel_out_vec, stride, pixel_buff + j - Predict::ADI_BUF_STRIDE, 0, rand_filter);
+
+        for (int k = 0; k < width; k++)
+        {
+            if (memcmp(pixel_out_vec + k * FENC_STRIDE, pixel_out_c + k * FENC_STRIDE, width * sizeof(pixel)))
+                return false;
+        }
+
+        reportfail();
+        j += FENC_STRIDE;
+    }
+
+    return true;
+}
+
+bool IntraPredHarness::check_planar_primitive(intra_pred_t ref, intra_pred_t opt, int width)
+{
+    int j = Predict::ADI_BUF_STRIDE;
+    intptr_t stride = FENC_STRIDE;
+
+#if _DEBUG
+    memset(pixel_out_vec, 0xCD, OUTPUT_SIZE);
+    memset(pixel_out_c, 0xCD, OUTPUT_SIZE);
+#endif
+
+    for (int i = 0; i <= 100; i++)
+    {
+        ref(pixel_out_c, stride, pixel_buff + j - Predict::ADI_BUF_STRIDE, 0, 0);
+        checked(opt, pixel_out_vec, stride, pixel_buff + j - Predict::ADI_BUF_STRIDE, 0, 0);
+
+        for (int k = 0; k < width; k++)
+        {
+            if (memcmp(pixel_out_vec + k * FENC_STRIDE, pixel_out_c + k * FENC_STRIDE, width * sizeof(pixel)))
+                return false;
+        }
+
+        reportfail();
+        j += FENC_STRIDE;
+    }
+
+    return true;
+}
+
+bool IntraPredHarness::check_angular_primitive(const intra_pred_t ref[], const intra_pred_t opt[], int sizeIdx)
+{
+    int j = Predict::ADI_BUF_STRIDE;
+    intptr_t stride = FENC_STRIDE;
+
+#if _DEBUG
+    memset(pixel_out_vec, 0xCD, OUTPUT_SIZE);
+    memset(pixel_out_c, 0xCD, OUTPUT_SIZE);
+#endif
+
+    int width = 1 << (sizeIdx + 2);
+    for (int i = 0; i <= 100; i++)
+    {
+        int bFilter = (width <= 16) && (rand() % 2);
+        for (int pmode = 2; pmode <= 34; pmode++)
+        {
+            if (!opt[pmode])
+                continue;
+
+            checked(opt[pmode], pixel_out_vec, stride, pixel_buff + j, pmode, bFilter);
+            ref[pmode](pixel_out_c, stride, pixel_buff + j, pmode, bFilter);
+
+            for (int k = 0; k < width; k++)
+            {
+                if (memcmp(pixel_out_vec + k * FENC_STRIDE, pixel_out_c + k * FENC_STRIDE, width * sizeof(pixel)))
+                {
+                    printf("ang_%dx%d, Mode = %d, Row = %d failed !!\n", width, width, pmode, k);
+                    ref[pmode](pixel_out_c, stride, pixel_buff + j, pmode, bFilter);
+                    opt[pmode](pixel_out_vec, stride, pixel_buff + j, pmode, bFilter);
+                    return false;
+                }
+            }
+
+            reportfail();
+        }
+
+        j += FENC_STRIDE;
+    }
+
+    return true;
+}
+
+bool IntraPredHarness::check_allangs_primitive(const intra_allangs_t ref, const intra_allangs_t opt, int sizeIdx)
+{
+    int j = Predict::ADI_BUF_STRIDE;
+    int isLuma;
+
+#if _DEBUG
+    memset(pixel_out_33_vec, 0xCD, OUTPUT_SIZE_33);
+    memset(pixel_out_33_c, 0xCD, OUTPUT_SIZE_33);
+#endif
+
+    const int width = 1 << (sizeIdx + 2);
+
+    for (int i = 0; i <= 100; i++)
+    {
+        isLuma = (width <= 16) ? true : false;  // bFilter is true for 4x4, 8x8, 16x16 and false for 32x32
+
+        pixel * refAbove0 = pixel_buff + j + 3 * FENC_STRIDE;   // keep this offset, since vector code may broken input buffer range [-(width-1), 0];
+        pixel * refLeft0 = refAbove0 + 3 * width + FENC_STRIDE;
+
+        refLeft0[0] = refAbove0[0];
+
+        ref(pixel_out_33_c,   refAbove0, refLeft0, isLuma);
+        checked(opt, pixel_out_33_vec, refAbove0, refLeft0, isLuma);
+
+        for (int p = 2 - 2; p <= 34 - 2; p++)
+        {
+            for (int k = 0; k < width; k++)
+            {
+                if (memcmp(pixel_out_33_c + p * (width * width) + k * width, pixel_out_33_vec + p * (width * width) + k * width, width * sizeof(pixel)))
+                {
+                    printf("\nFailed: (%dx%d) Mode(%2d), Line[%2d], bfilter=%d\n", width, width, p + 2, k, isLuma);
+                    opt(pixel_out_33_vec, refAbove0, refLeft0, isLuma);
+                    return false;
+                }
+            }
+        }
+
+        reportfail();
+        j += FENC_STRIDE;
+    }
+
+    return true;
+}
+
+bool IntraPredHarness::check_intra_filter_primitive(const intra_filter_t ref, const intra_filter_t opt)
+{
+    memset(pixel_out_c, 0, 64 * 64 * sizeof(pixel));
+    memset(pixel_out_vec, 0, 64 * 64 * sizeof(pixel));
+    int j = 0;
+
+    for (int i = 0; i < 100; i++)
+    {
+        int index = rand() % TEST_CASES;
+
+        ref(pixel_test_buff[index] + j, pixel_out_c);
+        checked(opt, pixel_test_buff[index] + j, pixel_out_vec);
+
+        if (memcmp(pixel_out_c, pixel_out_vec, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += FENC_STRIDE;
+    }
+    return true;
+}
+bool IntraPredHarness::testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    for (int i = BLOCK_4x4; i <= BLOCK_32x32; i++)
+    {
+        const int size = (1 << (i + 2));
+        if (opt.cu[i].intra_pred[PLANAR_IDX])
+        {
+            if (!check_planar_primitive(ref.cu[i].intra_pred[PLANAR_IDX], opt.cu[i].intra_pred[PLANAR_IDX], size))
+            {
+                printf("intra_planar %dx%d failed\n", size, size);
+                return false;
+            }
+        }
+        if (opt.cu[i].intra_pred[DC_IDX])
+        {
+            if (!check_dc_primitive(ref.cu[i].intra_pred[DC_IDX], opt.cu[i].intra_pred[DC_IDX], size))
+            {
+                printf("intra_dc %dx%d failed\n", size, size);
+                return false;
+            }
+        }
+
+        if (!check_angular_primitive(ref.cu[i].intra_pred, opt.cu[i].intra_pred, i))
+        {
+            printf("intra_angular failed\n");
+            return false;
+        }
+
+        if (opt.cu[i].intra_pred_allangs)
+        {
+            if (!check_allangs_primitive(ref.cu[i].intra_pred_allangs, opt.cu[i].intra_pred_allangs, i))
+            {
+                printf("intra_allangs failed\n");
+                return false;
+            }
+        }
+        if (opt.cu[i].intra_filter)
+        {
+            if (!check_intra_filter_primitive(ref.cu[i].intra_filter, opt.cu[i].intra_filter))
+            {
+                printf("intra_filter_%dx%d failed\n", size, size);
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+void IntraPredHarness::measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    int width = 64;
+    uint16_t srcStride = 96;
+
+    for (int i = BLOCK_4x4; i <= BLOCK_32x32; i++)
+    {
+        const int size = (1 << (i + 2));
+        if (opt.cu[i].intra_pred[PLANAR_IDX])
+        {
+            printf("intra_planar_%dx%d", size, size);
+            REPORT_SPEEDUP(opt.cu[i].intra_pred[PLANAR_IDX], ref.cu[i].intra_pred[PLANAR_IDX],
+                           pixel_out_vec, FENC_STRIDE, pixel_buff + srcStride, 0, 0);
+        }
+        if (opt.cu[i].intra_pred[DC_IDX])
+        {
+            printf("intra_dc_%dx%d[f=0]", size, size);
+            REPORT_SPEEDUP(opt.cu[i].intra_pred[DC_IDX], ref.cu[i].intra_pred[DC_IDX],
+                pixel_out_vec, FENC_STRIDE, pixel_buff + srcStride, 0, 0);
+            if (size <= 16)
+            {
+                printf("intra_dc_%dx%d[f=1]", size, size);
+                REPORT_SPEEDUP(opt.cu[i].intra_pred[DC_IDX], ref.cu[i].intra_pred[DC_IDX],
+                    pixel_out_vec, FENC_STRIDE, pixel_buff + srcStride, 0, 1);
+            }
+        }
+        if (opt.cu[i].intra_pred_allangs)
+        {
+            bool bFilter = (size <= 16);
+            pixel * refAbove = pixel_buff + srcStride;
+            pixel * refLeft = refAbove + 3 * size;
+            refLeft[0] = refAbove[0];
+            printf("intra_allangs%dx%d", size, size);
+            REPORT_SPEEDUP(opt.cu[i].intra_pred_allangs, ref.cu[i].intra_pred_allangs,
+                           pixel_out_33_vec, refAbove, refLeft, bFilter);
+        }
+        for (int mode = 2; mode <= 34; mode += 1)
+        {
+            if (opt.cu[i].intra_pred[mode])
+            {
+                width = 1 << (i + 2);
+                bool bFilter = (width <= 16);
+                pixel * refAbove = pixel_buff + srcStride;
+                pixel * refLeft = refAbove + 3 * width;
+                refLeft[0] = refAbove[0];
+                printf("intra_ang_%dx%d[%2d]", width, width, mode);
+                REPORT_SPEEDUP(opt.cu[i].intra_pred[mode], ref.cu[i].intra_pred[mode],
+                               pixel_out_vec, FENC_STRIDE, pixel_buff + srcStride, mode, bFilter);
+            }
+        }
+        if (opt.cu[i].intra_filter)
+        {
+            printf("intra_filter_%dx%d", size, size);
+            REPORT_SPEEDUP(opt.cu[i].intra_filter, ref.cu[i].intra_filter, pixel_buff, pixel_out_c);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/intrapredharness.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,69 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Min Chen <chenm003@163.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _INTRAPREDHARNESS_H_1
+#define _INTRAPREDHARNESS_H_1 1
+
+#include "testharness.h"
+#include "primitives.h"
+
+class IntraPredHarness : public TestHarness
+{
+protected:
+
+    enum { INPUT_SIZE = 4 * 65 * 65 * 100 };
+    enum { OUTPUT_SIZE = 64 * FENC_STRIDE };
+    enum { OUTPUT_SIZE_33 = 33 * OUTPUT_SIZE };
+    enum { TEST_CASES = 3 };
+    enum { INCR = 32 };
+    enum { STRIDE = 64 };
+    enum { ITERS = 100 };
+    enum { MAX_HEIGHT = 64 };
+    enum { PAD_ROWS = 64 };
+    enum { BUFFSIZE = STRIDE * (MAX_HEIGHT + PAD_ROWS) + INCR * ITERS };
+
+    pixel    pixel_test_buff[TEST_CASES][BUFFSIZE];
+    ALIGN_VAR_16(pixel, pixel_buff[INPUT_SIZE]);
+    pixel pixel_out_c[OUTPUT_SIZE];
+    pixel pixel_out_vec[OUTPUT_SIZE];
+    pixel pixel_out_33_c[OUTPUT_SIZE_33];
+    pixel pixel_out_33_vec[OUTPUT_SIZE_33];
+
+    bool check_dc_primitive(intra_pred_t ref, intra_pred_t opt, int width);
+    bool check_planar_primitive(intra_pred_t ref, intra_pred_t opt, int width);
+    bool check_angular_primitive(const intra_pred_t ref[], const intra_pred_t opt[], int size);
+    bool check_allangs_primitive(const intra_allangs_t ref, const intra_allangs_t opt, int size);
+    bool check_intra_filter_primitive(const intra_filter_t ref, const intra_filter_t opt);
+
+public:
+
+    IntraPredHarness();
+
+    const char *getName() const { return "intrapred"; }
+
+    bool testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+
+    void measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+};
+
+#endif // ifndef _INTRAPREDHARNESS_H_1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/ipfilterharness.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,776 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Devaki <deepthidevaki@multicorewareinc.com>,
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Min Chen <chenm003@163.com> <min.chen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "ipfilterharness.h"
+
+using namespace X265_NS;
+
+IPFilterHarness::IPFilterHarness()
+{
+    /* [0] --- Random values
+     * [1] --- Minimum
+     * [2] --- Maximum */
+    for (int i = 0; i < TEST_BUF_SIZE; i++)
+    {
+        pixel_test_buff[0][i] = rand() & PIXEL_MAX;
+        short_test_buff[0][i] = (rand() % (2 * SMAX)) - SMAX;
+
+        pixel_test_buff[1][i] = PIXEL_MIN;
+        short_test_buff[1][i] = SMIN;
+
+        pixel_test_buff[2][i] = PIXEL_MAX;
+        short_test_buff[2][i] = SMAX;
+    }
+
+    memset(IPF_C_output_p, 0xCD, TEST_BUF_SIZE * sizeof(pixel));
+    memset(IPF_vec_output_p, 0xCD, TEST_BUF_SIZE * sizeof(pixel));
+    memset(IPF_C_output_s, 0xCD, TEST_BUF_SIZE * sizeof(int16_t));
+    memset(IPF_vec_output_s, 0xCD, TEST_BUF_SIZE * sizeof(int16_t));
+
+    int pixelMax = (1 << X265_DEPTH) - 1;
+    int shortMax = (1 << 15) - 1;
+    for (int i = 0; i < TEST_BUF_SIZE; i++)
+    {
+        pixel_buff[i] = (pixel)(rand() & pixelMax);
+        int isPositive = (rand() & 1) ? 1 : -1;
+        short_buff[i] = (int16_t)(isPositive * (rand() & shortMax));
+    }
+}
+
+bool IPFilterHarness::check_IPFilterChroma_primitive(filter_pp_t ref, filter_pp_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 8; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100 + 2;
+            rand_dstStride = rand() % 100 + 64;
+
+            checked(opt, pixel_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_p,
+                    rand_dstStride,
+                    coeffIdx);
+
+            ref(pixel_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_p,
+                rand_dstStride,
+                coeffIdx);
+
+            if (memcmp(IPF_vec_output_p, IPF_C_output_p, TEST_BUF_SIZE * sizeof(pixel)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterChroma_ps_primitive(filter_ps_t ref, filter_ps_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 8; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(pixel_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_s,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, pixel_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_s,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+            {
+            ref(pixel_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_s,
+                rand_dstStride,
+                coeffIdx);
+                return false;
+            }
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterChroma_hps_primitive(filter_hps_t ref, filter_hps_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 8; coeffIdx++)
+        {
+            // 0 : Interpolate W x H, 1 : Interpolate W x (H + 7)
+            for (int isRowExt = 0; isRowExt < 2; isRowExt++)
+            {
+                rand_srcStride = rand() % 100 + 2;
+                rand_dstStride = rand() % 100 + 64;
+
+                ref(pixel_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_C_output_s,
+                    rand_dstStride,
+                    coeffIdx,
+                    isRowExt);
+
+                checked(opt, pixel_test_buff[index] + 3 * rand_srcStride,
+                        rand_srcStride,
+                        IPF_vec_output_s,
+                        rand_dstStride,
+                        coeffIdx,
+                        isRowExt);
+
+                if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+                    return false;
+
+                reportfail();
+            }
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterChroma_sp_primitive(filter_sp_t ref, filter_sp_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 8; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(short_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_p,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, short_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_p,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_vec_output_p, IPF_C_output_p, TEST_BUF_SIZE * sizeof(pixel)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterChroma_ss_primitive(filter_ss_t ref, filter_ss_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 8; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(short_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_s,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, short_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_s,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_C_output_s, IPF_vec_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLuma_primitive(filter_pp_t ref, filter_pp_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 4; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            checked(opt, pixel_test_buff[index] + 3 * rand_srcStride + 6,
+                    rand_srcStride,
+                    IPF_vec_output_p,
+                    rand_dstStride,
+                    coeffIdx);
+
+            ref(pixel_test_buff[index] + 3 * rand_srcStride + 6,
+                rand_srcStride,
+                IPF_C_output_p,
+                rand_dstStride,
+                coeffIdx);
+
+            if (memcmp(IPF_vec_output_p, IPF_C_output_p, TEST_BUF_SIZE))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLuma_ps_primitive(filter_ps_t ref, filter_ps_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 4; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(pixel_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_s,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, pixel_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_s,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLuma_hps_primitive(filter_hps_t ref, filter_hps_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 4; coeffIdx++)
+        {
+            // 0 : Interpolate W x H, 1 : Interpolate W x (H + 7)
+            for (int isRowExt = 0; isRowExt < 2; isRowExt++)
+            {
+                rand_srcStride = rand() % 100;
+                rand_dstStride = rand() % 100 + 64;
+
+                ref(pixel_test_buff[index] + 3 * rand_srcStride + 6,
+                    rand_srcStride,
+                    IPF_C_output_s,
+                    rand_dstStride,
+                    coeffIdx,
+                    isRowExt);
+
+                checked(opt, pixel_test_buff[index] + 3 * rand_srcStride + 6,
+                        rand_srcStride,
+                        IPF_vec_output_s,
+                        rand_dstStride,
+                        coeffIdx,
+                        isRowExt);
+
+                if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+                    return false;
+
+                reportfail();
+            }
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLuma_sp_primitive(filter_sp_t ref, filter_sp_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 4; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(short_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_p,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, short_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_p,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_vec_output_p, IPF_C_output_p, TEST_BUF_SIZE * sizeof(pixel)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLuma_ss_primitive(filter_ss_t ref, filter_ss_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdx = 0; coeffIdx < 4; coeffIdx++)
+        {
+            rand_srcStride = rand() % 100;
+            rand_dstStride = rand() % 100 + 64;
+
+            ref(short_test_buff[index] + 3 * rand_srcStride,
+                rand_srcStride,
+                IPF_C_output_s,
+                rand_dstStride,
+                coeffIdx);
+
+            checked(opt, short_test_buff[index] + 3 * rand_srcStride,
+                    rand_srcStride,
+                    IPF_vec_output_s,
+                    rand_dstStride,
+                    coeffIdx);
+
+            if (memcmp(IPF_C_output_s, IPF_vec_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+                return false;
+
+            reportfail();
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLumaHV_primitive(filter_hv_pp_t ref, filter_hv_pp_t opt)
+{
+    intptr_t rand_srcStride, rand_dstStride;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+
+        for (int coeffIdxX = 0; coeffIdxX < 4; coeffIdxX++)
+        {
+            for (int coeffIdxY = 0; coeffIdxY < 4; coeffIdxY++)
+            {
+                rand_srcStride = rand() % 100;
+                rand_dstStride = rand() % 100 + 64;
+
+                ref(pixel_test_buff[index] + 3 * rand_srcStride + 3,
+                    rand_srcStride,
+                    IPF_C_output_p,
+                    rand_dstStride,
+                    coeffIdxX,
+                    coeffIdxY);
+
+                checked(opt, pixel_test_buff[index] + 3 * rand_srcStride + 3,
+                        rand_srcStride,
+                        IPF_vec_output_p,
+                        rand_dstStride,
+                        coeffIdxX,
+                        coeffIdxY);
+
+                if (memcmp(IPF_vec_output_p, IPF_C_output_p, TEST_BUF_SIZE * sizeof(pixel)))
+                    return false;
+
+                reportfail();
+            }
+        }
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterLumaP2S_primitive(filter_p2s_t ref, filter_p2s_t opt)
+{
+    for (int i = 0; i < ITERS; i++)
+    {
+        intptr_t rand_srcStride = rand() % 100;
+        int index = i % TEST_CASES;
+        intptr_t dstStride = rand() % 100 + 64;
+
+        ref(pixel_test_buff[index] + i, rand_srcStride, IPF_C_output_s, dstStride);
+
+        checked(opt, pixel_test_buff[index] + i, rand_srcStride, IPF_vec_output_s, dstStride);
+
+        if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::check_IPFilterChromaP2S_primitive(filter_p2s_t ref, filter_p2s_t opt)
+{
+    for (int i = 0; i < ITERS; i++)
+    {
+        intptr_t rand_srcStride = rand() % 100;
+        int index = i % TEST_CASES;
+        intptr_t dstStride = rand() % 100 + 64;
+
+        ref(pixel_test_buff[index] + i, rand_srcStride, IPF_C_output_s, dstStride);
+
+        checked(opt, pixel_test_buff[index] + i, rand_srcStride, IPF_vec_output_s, dstStride);
+
+        if (memcmp(IPF_vec_output_s, IPF_C_output_s, TEST_BUF_SIZE * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool IPFilterHarness::testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+
+    for (int value = 0; value < NUM_PU_SIZES; value++)
+    {
+        if (opt.pu[value].luma_hpp)
+        {
+            if (!check_IPFilterLuma_primitive(ref.pu[value].luma_hpp, opt.pu[value].luma_hpp))
+            {
+                printf("luma_hpp[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_hps)
+        {
+            if (!check_IPFilterLuma_hps_primitive(ref.pu[value].luma_hps, opt.pu[value].luma_hps))
+            {
+                printf("luma_hps[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_vpp)
+        {
+            if (!check_IPFilterLuma_primitive(ref.pu[value].luma_vpp, opt.pu[value].luma_vpp))
+            {
+                printf("luma_vpp[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_vps)
+        {
+            if (!check_IPFilterLuma_ps_primitive(ref.pu[value].luma_vps, opt.pu[value].luma_vps))
+            {
+                printf("luma_vps[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_vsp)
+        {
+            if (!check_IPFilterLuma_sp_primitive(ref.pu[value].luma_vsp, opt.pu[value].luma_vsp))
+            {
+                printf("luma_vsp[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_vss)
+        {
+            if (!check_IPFilterLuma_ss_primitive(ref.pu[value].luma_vss, opt.pu[value].luma_vss))
+            {
+                printf("luma_vss[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].luma_hvpp)
+        {
+            if (!check_IPFilterLumaHV_primitive(ref.pu[value].luma_hvpp, opt.pu[value].luma_hvpp))
+            {
+                printf("luma_hvpp[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+        if (opt.pu[value].convert_p2s)
+        {
+            if (!check_IPFilterLumaP2S_primitive(ref.pu[value].convert_p2s, opt.pu[value].convert_p2s))
+            {
+                printf("convert_p2s[%s]", lumaPartStr[value]);
+                return false;
+            }
+        }
+    }
+
+    for (int csp = X265_CSP_I420; csp < X265_CSP_COUNT; csp++)
+    {
+        for (int value = 0; value < NUM_PU_SIZES; value++)
+        {
+            if (opt.chroma[csp].pu[value].filter_hpp)
+            {
+                if (!check_IPFilterChroma_primitive(ref.chroma[csp].pu[value].filter_hpp, opt.chroma[csp].pu[value].filter_hpp))
+                {
+                    printf("chroma_hpp[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].filter_hps)
+            {
+                if (!check_IPFilterChroma_hps_primitive(ref.chroma[csp].pu[value].filter_hps, opt.chroma[csp].pu[value].filter_hps))
+                {
+                    printf("chroma_hps[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].filter_vpp)
+            {
+                if (!check_IPFilterChroma_primitive(ref.chroma[csp].pu[value].filter_vpp, opt.chroma[csp].pu[value].filter_vpp))
+                {
+                    printf("chroma_vpp[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].filter_vps)
+            {
+                if (!check_IPFilterChroma_ps_primitive(ref.chroma[csp].pu[value].filter_vps, opt.chroma[csp].pu[value].filter_vps))
+                {
+                    printf("chroma_vps[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].filter_vsp)
+            {
+                if (!check_IPFilterChroma_sp_primitive(ref.chroma[csp].pu[value].filter_vsp, opt.chroma[csp].pu[value].filter_vsp))
+                {
+                    printf("chroma_vsp[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].filter_vss)
+            {
+                if (!check_IPFilterChroma_ss_primitive(ref.chroma[csp].pu[value].filter_vss, opt.chroma[csp].pu[value].filter_vss))
+                {
+                    printf("chroma_vss[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+            if (opt.chroma[csp].pu[value].p2s)
+            {
+                if (!check_IPFilterChromaP2S_primitive(ref.chroma[csp].pu[value].p2s, opt.chroma[csp].pu[value].p2s))
+                {
+                    printf("chroma_p2s[%s]", chromaPartStr[csp][value]);
+                    return false;
+                }
+            }
+        }
+    }
+
+    return true;
+}
+
+void IPFilterHarness::measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    int16_t srcStride = 96;
+    int16_t dstStride = 96;
+    int maxVerticalfilterHalfDistance = 3;
+
+    for (int value = 0; value < NUM_PU_SIZES; value++)
+    {
+        if (opt.pu[value].luma_hpp)
+        {
+            printf("luma_hpp[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_hpp, ref.pu[value].luma_hpp,
+                           pixel_buff + srcStride, srcStride, IPF_vec_output_p, dstStride, 1);
+        }
+
+        if (opt.pu[value].luma_hps)
+        {
+            printf("luma_hps[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_hps, ref.pu[value].luma_hps,
+                           pixel_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                           IPF_vec_output_s, dstStride, 1, 1);
+        }
+
+        if (opt.pu[value].luma_vpp)
+        {
+            printf("luma_vpp[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_vpp, ref.pu[value].luma_vpp,
+                           pixel_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                           IPF_vec_output_p, dstStride, 1);
+        }
+
+        if (opt.pu[value].luma_vps)
+        {
+            printf("luma_vps[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_vps, ref.pu[value].luma_vps,
+                           pixel_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                           IPF_vec_output_s, dstStride, 1);
+        }
+
+        if (opt.pu[value].luma_vsp)
+        {
+            printf("luma_vsp[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_vsp, ref.pu[value].luma_vsp,
+                           short_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                           IPF_vec_output_p, dstStride, 1);
+        }
+
+        if (opt.pu[value].luma_vss)
+        {
+            printf("luma_vss[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_vss, ref.pu[value].luma_vss,
+                           short_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                           IPF_vec_output_s, dstStride, 1);
+        }
+
+        if (opt.pu[value].luma_hvpp)
+        {
+            printf("luma_hv [%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].luma_hvpp, ref.pu[value].luma_hvpp,
+                           pixel_buff + 3 * srcStride, srcStride, IPF_vec_output_p, srcStride, 1, 3);
+        }
+
+        if (opt.pu[value].convert_p2s)
+        {
+            printf("convert_p2s[%s]\t", lumaPartStr[value]);
+            REPORT_SPEEDUP(opt.pu[value].convert_p2s, ref.pu[value].convert_p2s,
+                               pixel_buff, srcStride,
+                               IPF_vec_output_s, dstStride);
+        }
+    }
+
+    for (int csp = X265_CSP_I420; csp < X265_CSP_COUNT; csp++)
+    {
+        printf("= Color Space %s =\n", x265_source_csp_names[csp]);
+        for (int value = 0; value < NUM_PU_SIZES; value++)
+        {
+            if (opt.chroma[csp].pu[value].filter_hpp)
+            {
+                printf("chroma_hpp[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_hpp, ref.chroma[csp].pu[value].filter_hpp,
+                               pixel_buff + srcStride, srcStride, IPF_vec_output_p, dstStride, 1);
+            }
+            if (opt.chroma[csp].pu[value].filter_hps)
+            {
+                printf("chroma_hps[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_hps, ref.chroma[csp].pu[value].filter_hps,
+                               pixel_buff + srcStride, srcStride, IPF_vec_output_s, dstStride, 1, 1);
+            }
+            if (opt.chroma[csp].pu[value].filter_vpp)
+            {
+                printf("chroma_vpp[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_vpp, ref.chroma[csp].pu[value].filter_vpp,
+                               pixel_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                               IPF_vec_output_p, dstStride, 1);
+            }
+            if (opt.chroma[csp].pu[value].filter_vps)
+            {
+                printf("chroma_vps[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_vps, ref.chroma[csp].pu[value].filter_vps,
+                               pixel_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                               IPF_vec_output_s, dstStride, 1);
+            }
+            if (opt.chroma[csp].pu[value].filter_vsp)
+            {
+                printf("chroma_vsp[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_vsp, ref.chroma[csp].pu[value].filter_vsp,
+                               short_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                               IPF_vec_output_p, dstStride, 1);
+            }
+            if (opt.chroma[csp].pu[value].filter_vss)
+            {
+                printf("chroma_vss[%s]", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].filter_vss, ref.chroma[csp].pu[value].filter_vss,
+                               short_buff + maxVerticalfilterHalfDistance * srcStride, srcStride,
+                               IPF_vec_output_s, dstStride, 1);
+            }
+            if (opt.chroma[csp].pu[value].p2s)
+            {
+                printf("chroma_p2s[%s]\t", chromaPartStr[csp][value]);
+                REPORT_SPEEDUP(opt.chroma[csp].pu[value].p2s, ref.chroma[csp].pu[value].p2s,
+                               pixel_buff, srcStride, IPF_vec_output_s, dstStride);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/ipfilterharness.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,78 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Deepthi Devaki <deepthidevaki@multicorewareinc.com>,
+ *          Rajesh Paulraj <rajesh@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _IPFILTERHARNESS_H_1
+#define _IPFILTERHARNESS_H_1 1
+
+#include "testharness.h"
+#include "primitives.h"
+
+class IPFilterHarness : public TestHarness
+{
+protected:
+
+    // Assuming max_height = max_width = max_srcStride = max_dstStride = 100
+    enum { TEST_BUF_SIZE = 200 * 200 };
+    enum { ITERS = 100 };
+    enum { TEST_CASES = 3 };
+    enum { SMAX = 1 << 12 };
+    enum { SMIN = -1 << 12 };
+
+    ALIGN_VAR_32(pixel, pixel_buff[TEST_BUF_SIZE]);
+    int16_t short_buff[TEST_BUF_SIZE];
+    int16_t IPF_vec_output_s[TEST_BUF_SIZE];
+    int16_t IPF_C_output_s[TEST_BUF_SIZE];
+    pixel   IPF_vec_output_p[TEST_BUF_SIZE];
+    pixel   IPF_C_output_p[TEST_BUF_SIZE];
+
+    pixel   pixel_test_buff[TEST_CASES][TEST_BUF_SIZE];
+    int16_t short_test_buff[TEST_CASES][TEST_BUF_SIZE];
+
+    bool check_IPFilterChroma_primitive(filter_pp_t ref, filter_pp_t opt);
+    bool check_IPFilterChroma_ps_primitive(filter_ps_t ref, filter_ps_t opt);
+    bool check_IPFilterChroma_hps_primitive(filter_hps_t ref, filter_hps_t opt);
+    bool check_IPFilterChroma_sp_primitive(filter_sp_t ref, filter_sp_t opt);
+    bool check_IPFilterChroma_ss_primitive(filter_ss_t ref, filter_ss_t opt);
+    bool check_IPFilterLuma_primitive(filter_pp_t ref, filter_pp_t opt);
+    bool check_IPFilterLuma_ps_primitive(filter_ps_t ref, filter_ps_t opt);
+    bool check_IPFilterLuma_hps_primitive(filter_hps_t ref, filter_hps_t opt);
+    bool check_IPFilterLuma_sp_primitive(filter_sp_t ref, filter_sp_t opt);
+    bool check_IPFilterLuma_ss_primitive(filter_ss_t ref, filter_ss_t opt);
+    bool check_IPFilterLumaHV_primitive(filter_hv_pp_t ref, filter_hv_pp_t opt);
+    bool check_IPFilterLumaP2S_primitive(filter_p2s_t ref, filter_p2s_t opt);
+    bool check_IPFilterChromaP2S_primitive(filter_p2s_t ref, filter_p2s_t opt);
+
+public:
+
+    IPFilterHarness();
+
+    const char *getName() const { return "interp"; }
+
+    bool testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+
+    void measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+};
+
+#endif // ifndef _FILTERHARNESS_H_1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/mbdstharness.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,523 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "mbdstharness.h"
+
+using namespace X265_NS;
+
+struct DctConf
+{
+    const char *name;
+    int width;
+};
+
+const DctConf dctInfo[] =
+{
+    { "dct4x4\t",    4 },
+    { "dct8x8\t",    8 },
+    { "dct16x16",   16 },
+    { "dct32x32",   32 },
+};
+
+const DctConf idctInfo[] =
+{
+    { "idct4x4\t",    4 },
+    { "idct8x8\t",    8 },
+    { "idct16x16",   16 },
+    { "idct32x32",   32 },
+};
+
+MBDstHarness::MBDstHarness()
+{
+    const int idct_max = (1 << (X265_DEPTH + 4)) - 1;
+
+    /* [0] --- Random values
+     * [1] --- Minimum
+     * [2] --- Maximum */
+    for (int i = 0; i < TEST_BUF_SIZE; i++)
+    {
+        short_test_buff[0][i]    = (rand() & PIXEL_MAX) - (rand() & PIXEL_MAX);
+        int_test_buff[0][i]      = rand() % PIXEL_MAX;
+        int_idct_test_buff[0][i] = (rand() % (SHORT_MAX - SHORT_MIN)) - SHORT_MAX;
+        short_denoise_test_buff1[0][i] = short_denoise_test_buff2[0][i] = (rand() & SHORT_MAX) - (rand() & SHORT_MAX);
+
+        short_test_buff[1][i]    = -PIXEL_MAX;
+        int_test_buff[1][i]      = -PIXEL_MAX;
+        int_idct_test_buff[1][i] = SHORT_MIN;
+        short_denoise_test_buff1[1][i] = short_denoise_test_buff2[1][i] = -SHORT_MAX;
+
+        short_test_buff[2][i]    = PIXEL_MAX;
+        int_test_buff[2][i]      = PIXEL_MAX;
+        int_idct_test_buff[2][i] = SHORT_MAX;
+        short_denoise_test_buff1[2][i] = short_denoise_test_buff2[2][i] = SHORT_MAX;
+
+        mbuf1[i] = rand() & PIXEL_MAX;
+        mbufdct[i] = (rand() & PIXEL_MAX) - (rand() & PIXEL_MAX);
+        mbufidct[i] = (rand() & idct_max);
+    }
+
+#if _DEBUG
+    memset(mshortbuf2, 0, MAX_TU_SIZE * sizeof(int16_t));
+    memset(mshortbuf3, 0, MAX_TU_SIZE * sizeof(int16_t));
+
+    memset(mintbuf1, 0, MAX_TU_SIZE * sizeof(int));
+    memset(mintbuf2, 0, MAX_TU_SIZE * sizeof(int));
+    memset(mintbuf3, 0, MAX_TU_SIZE * sizeof(int));
+    memset(mintbuf4, 0, MAX_TU_SIZE * sizeof(int));
+#endif // if _DEBUG
+}
+
+bool MBDstHarness::check_dct_primitive(dct_t ref, dct_t opt, intptr_t width)
+{
+    int j = 0;
+    intptr_t cmp_size = sizeof(short) * width * width;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = rand() % TEST_CASES;
+
+        ref(short_test_buff[index] + j, mshortbuf2, width);
+        checked(opt, short_test_buff[index] + j, mshortbuf3, width);
+
+        if (memcmp(mshortbuf2, mshortbuf3, cmp_size))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool MBDstHarness::check_idct_primitive(idct_t ref, idct_t opt, intptr_t width)
+{
+    int j = 0;
+    intptr_t cmp_size = sizeof(int16_t) * width * width;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = rand() % TEST_CASES;
+
+        ref(short_test_buff[index] + j, mshortbuf2, width);
+        checked(opt, short_test_buff[index] + j, mshortbuf3, width);
+
+        if (memcmp(mshortbuf2, mshortbuf3, cmp_size))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool MBDstHarness::check_dequant_primitive(dequant_normal_t ref, dequant_normal_t opt)
+{
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = rand() % TEST_CASES;
+        int log2TrSize = (rand() % 4) + 2;
+
+        int width = (1 << log2TrSize);
+        int height = width;
+        int qp = rand() % (QP_MAX_SPEC + QP_BD_OFFSET + 1);
+        int per = qp / 6;
+        int rem = qp % 6;
+        static const int invQuantScales[6] = { 40, 45, 51, 57, 64, 72 };
+        int scale = invQuantScales[rem] << per;
+        int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize;
+        int shift = QUANT_IQUANT_SHIFT - QUANT_SHIFT - transformShift;
+
+        ref(short_test_buff[index] + j, mshortbuf2, width * height, scale, shift);
+        checked(opt, short_test_buff[index] + j, mshortbuf3, width * height, scale, shift);
+
+        if (memcmp(mshortbuf2, mshortbuf3, sizeof(int16_t) * height * width))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool MBDstHarness::check_dequant_primitive(dequant_scaling_t ref, dequant_scaling_t opt)
+{
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+
+        memset(mshortbuf2, 0, MAX_TU_SIZE * sizeof(int16_t));
+        memset(mshortbuf3, 0, MAX_TU_SIZE * sizeof(int16_t));
+
+        int log2TrSize = (rand() % 4) + 2;
+
+        int width = (1 << log2TrSize);
+        int height = width;
+
+        int qp = rand() % (QP_MAX_SPEC + QP_BD_OFFSET + 1);
+        int per = qp / 6;
+        int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize;
+        int shift = QUANT_IQUANT_SHIFT - QUANT_SHIFT - transformShift;
+
+        int cmp_size = sizeof(int16_t) * height * width;
+        int index1 = rand() % TEST_CASES;
+
+        ref(short_test_buff[index1] + j, int_test_buff[index1] + j, mshortbuf2, width * height, per, shift);
+        checked(opt, short_test_buff[index1] + j, int_test_buff[index1] + j, mshortbuf3, width * height, per, shift);
+
+        if (memcmp(mshortbuf2, mshortbuf3, cmp_size))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool MBDstHarness::check_quant_primitive(quant_t ref, quant_t opt)
+{
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = 1 << (rand() % 4 + 2);
+        int height = width;
+
+        uint32_t optReturnValue = 0;
+        uint32_t refReturnValue = 0;
+
+        int sliceType = rand() % 2;
+        int log2TrSize = rand() % 4 + 2;
+        int qp = rand() % (QP_MAX_SPEC + QP_BD_OFFSET + 1);
+        int per = qp / 6;
+        int transformShift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - log2TrSize;
+
+        int bits = QUANT_SHIFT + per + transformShift;
+        int valueToAdd = (sliceType == 1 ? 171 : 85) << (bits - 9);
+        int cmp_size = sizeof(int) * height * width;
+        int cmp_size1 = sizeof(short) * height * width;
+        int numCoeff = height * width;
+
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+
+        refReturnValue = ref(short_test_buff[index1] + j, int_test_buff[index2] + j, mintbuf1, mshortbuf2, bits, valueToAdd, numCoeff);
+        optReturnValue = (uint32_t)checked(opt, short_test_buff[index1] + j, int_test_buff[index2] + j, mintbuf3, mshortbuf3, bits, valueToAdd, numCoeff);
+
+        if (memcmp(mintbuf1, mintbuf3, cmp_size))
+            return false;
+
+        if (memcmp(mshortbuf2, mshortbuf3, cmp_size1))
+            return false;
+
+        if (optReturnValue != refReturnValue)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool MBDstHarness::check_nquant_primitive(nquant_t ref, nquant_t opt)
+{
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = (rand() % 4 + 1) * 4;
+        int height = width;
+
+        uint32_t optReturnValue = 0;
+        uint32_t refReturnValue = 0;
+
+        int bits = rand() % 32;
+        int valueToAdd = rand() % (1 << bits);
+        int cmp_size = sizeof(short) * height * width;
+        int numCoeff = height * width;
+
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+
+        refReturnValue = ref(short_test_buff[index1] + j, int_test_buff[index2] + j, mshortbuf2, bits, valueToAdd, numCoeff);
+        optReturnValue = (uint32_t)checked(opt, short_test_buff[index1] + j, int_test_buff[index2] + j, mshortbuf3, bits, valueToAdd, numCoeff);
+
+        if (memcmp(mshortbuf2, mshortbuf3, cmp_size))
+            return false;
+
+        if (optReturnValue != refReturnValue)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+bool MBDstHarness::check_count_nonzero_primitive(count_nonzero_t ref, count_nonzero_t opt)
+{
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        int opt_cnt = (int)checked(opt, short_test_buff[index] + j);
+        int ref_cnt = ref(short_test_buff[index] + j);
+        if (ref_cnt != opt_cnt)
+            return false;
+        reportfail();
+        j += INCR;
+    }
+    return true;
+}
+
+bool MBDstHarness::check_denoise_dct_primitive(denoiseDct_t ref, denoiseDct_t opt)
+{
+    int j = 0;
+
+    for (int s = 0; s < 4; s++)
+    {
+        int log2TrSize = s + 2;
+        int num = 1 << (log2TrSize * 2);
+        int cmp_size = sizeof(int) * num;
+        int cmp_short = sizeof(short) * num;
+
+        for (int i = 0; i < ITERS; i++)
+        {
+            memset(mubuf1, 0, num * sizeof(uint32_t));
+            memset(mubuf2, 0, num * sizeof(uint32_t));
+            memset(mushortbuf1, 0,  num * sizeof(uint16_t));
+
+            for (int k = 0; k < num; k++)
+                mushortbuf1[k] = rand() % UNSIGNED_SHORT_MAX;
+
+            int index = rand() % TEST_CASES;
+
+            ref(short_denoise_test_buff1[index] + j, mubuf1, mushortbuf1, num);
+            checked(opt, short_denoise_test_buff2[index] + j, mubuf2, mushortbuf1, num);
+
+            if (memcmp(short_denoise_test_buff1[index] + j, short_denoise_test_buff2[index] + j, cmp_short))
+                return false;
+
+            if (memcmp(mubuf1, mubuf2, cmp_size))
+                return false;
+
+            reportfail();
+            j += INCR;
+        }
+        j = 0;
+    }
+
+    return true;
+}
+
+
+bool MBDstHarness::testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    for (int i = 0; i < NUM_TR_SIZE; i++)
+    {
+        if (opt.cu[i].dct)
+        {
+            if (!check_dct_primitive(ref.cu[i].dct, opt.cu[i].dct, dctInfo[i].width))
+            {
+                printf("\n%s failed\n", dctInfo[i].name);
+                return false;
+            }
+        }
+    }
+
+    for (int i = 0; i < NUM_TR_SIZE; i++)
+    {
+        if (opt.cu[i].idct)
+        {
+            if (!check_idct_primitive(ref.cu[i].idct, opt.cu[i].idct, idctInfo[i].width))
+            {
+                printf("%s failed\n", idctInfo[i].name);
+                return false;
+            }
+        }
+    }
+
+    if (opt.dst4x4)
+    {
+        if (!check_dct_primitive(ref.dst4x4, opt.dst4x4, 4))
+        {
+            printf("dst4x4: Failed\n");
+            return false;
+        }
+    }
+
+    if (opt.idst4x4)
+    {
+        if (!check_idct_primitive(ref.idst4x4, opt.idst4x4, 4))
+        {
+            printf("idst4x4: Failed\n");
+            return false;
+        }
+    }
+
+    if (opt.dequant_normal)
+    {
+        if (!check_dequant_primitive(ref.dequant_normal, opt.dequant_normal))
+        {
+            printf("dequant: Failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.dequant_scaling)
+    {
+        if (!check_dequant_primitive(ref.dequant_scaling, opt.dequant_scaling))
+        {
+            printf("dequant_scaling: Failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.quant)
+    {
+        if (!check_quant_primitive(ref.quant, opt.quant))
+        {
+            printf("quant: Failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.nquant)
+    {
+        if (!check_nquant_primitive(ref.nquant, opt.nquant))
+        {
+            printf("nquant: Failed!\n");
+            return false;
+        }
+    }
+    for (int i = 0; i < NUM_TR_SIZE; i++)
+    {
+        if (opt.cu[i].count_nonzero)
+        {
+            if (!check_count_nonzero_primitive(ref.cu[i].count_nonzero, opt.cu[i].count_nonzero))
+            {
+                printf("count_nonzero[%dx%d] Failed!\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+    }
+    if (opt.dequant_scaling)
+    {
+        if (!check_dequant_primitive(ref.dequant_scaling, opt.dequant_scaling))
+        {
+            printf("dequant_scaling: Failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.denoiseDct)
+    {
+        if (!check_denoise_dct_primitive(ref.denoiseDct, opt.denoiseDct))
+        {
+            printf("denoiseDct: Failed!\n");
+            return false;
+        }
+    }
+
+    return true;
+}
+
+void MBDstHarness::measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    if (opt.dst4x4)
+    {
+        printf("dst4x4\t");
+        REPORT_SPEEDUP(opt.dst4x4, ref.dst4x4, mbuf1, mshortbuf2, 4);
+    }
+
+    for (int value = 0; value < NUM_TR_SIZE; value++)
+    {
+        if (opt.cu[value].dct)
+        {
+            printf("%s\t", dctInfo[value].name);
+            REPORT_SPEEDUP(opt.cu[value].dct, ref.cu[value].dct, mbuf1, mshortbuf2, dctInfo[value].width);
+        }
+    }
+
+    if (opt.idst4x4)
+    {
+        printf("idst4x4\t");
+        REPORT_SPEEDUP(opt.idst4x4, ref.idst4x4, mbuf1, mshortbuf2, 4);
+    }
+
+    for (int value = 0; value < NUM_TR_SIZE; value++)
+    {
+        if (opt.cu[value].idct)
+        {
+            printf("%s\t", idctInfo[value].name);
+            REPORT_SPEEDUP(opt.cu[value].idct, ref.cu[value].idct, mshortbuf3, mshortbuf2, idctInfo[value].width);
+        }
+    }
+
+    if (opt.dequant_normal)
+    {
+        printf("dequant_normal\t");
+        REPORT_SPEEDUP(opt.dequant_normal, ref.dequant_normal, short_test_buff[0], mshortbuf2, 32 * 32, 70, 1);
+    }
+
+    if (opt.dequant_scaling)
+    {
+        printf("dequant_scaling\t");
+        REPORT_SPEEDUP(opt.dequant_scaling, ref.dequant_scaling, short_test_buff[0], mintbuf3, mshortbuf2, 32 * 32, 5, 1);
+    }
+
+    if (opt.quant)
+    {
+        printf("quant\t\t");
+        REPORT_SPEEDUP(opt.quant, ref.quant, short_test_buff[0], int_test_buff[1], mintbuf3, mshortbuf2, 23, 23785, 32 * 32);
+    }
+
+    if (opt.nquant)
+    {
+        printf("nquant\t\t");
+        REPORT_SPEEDUP(opt.nquant, ref.nquant, short_test_buff[0], int_test_buff[1], mshortbuf2, 23, 23785, 32 * 32);
+    }
+    for (int value = 0; value < NUM_TR_SIZE; value++)
+    {
+        if (opt.cu[value].count_nonzero)
+        {
+            printf("count_nonzero[%dx%d]", 4 << value, 4 << value);
+            REPORT_SPEEDUP(opt.cu[value].count_nonzero, ref.cu[value].count_nonzero, mbuf1);
+        }
+    }
+    if (opt.denoiseDct)
+    {
+        printf("denoiseDct\t");
+        REPORT_SPEEDUP(opt.denoiseDct, ref.denoiseDct, short_denoise_test_buff1[0], mubuf1, mushortbuf1, 32 * 32);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/mbdstharness.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,86 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Min Chen <min.chen@multicorewareinc.com>
+ *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
+ *          Nabajit Deka <nabajit@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _MBDSTHARNESS_H_1
+#define _MBDSTHARNESS_H_1 1
+
+#include "testharness.h"
+#include "primitives.h"
+
+class MBDstHarness : public TestHarness
+{
+protected:
+
+    enum { ITERS = 128 };
+    enum { INCR = 16 };
+    enum { MAX_TU_SIZE = 32 * 32 };
+    enum { TEST_BUF_SIZE = MAX_TU_SIZE + ITERS * INCR };
+    enum { TEST_CASES = 3 };
+
+    ALIGN_VAR_32(int16_t, mbuf1[TEST_BUF_SIZE]);
+    int16_t mbufdct[TEST_BUF_SIZE];
+    int     mbufidct[TEST_BUF_SIZE];
+
+    int16_t mshortbuf2[MAX_TU_SIZE];
+    int16_t mshortbuf3[MAX_TU_SIZE];
+
+    int     mintbuf1[MAX_TU_SIZE];
+    int     mintbuf2[MAX_TU_SIZE];
+    int     mintbuf3[MAX_TU_SIZE];
+    int     mintbuf4[MAX_TU_SIZE];
+
+    int16_t short_test_buff[TEST_CASES][TEST_BUF_SIZE];
+    int     int_test_buff[TEST_CASES][TEST_BUF_SIZE];
+    int     int_idct_test_buff[TEST_CASES][TEST_BUF_SIZE];
+
+    uint32_t mubuf1[MAX_TU_SIZE];
+    uint32_t mubuf2[MAX_TU_SIZE];
+    uint16_t mushortbuf1[MAX_TU_SIZE];
+
+    int16_t short_denoise_test_buff1[TEST_CASES][TEST_BUF_SIZE];
+    int16_t short_denoise_test_buff2[TEST_CASES][TEST_BUF_SIZE];
+
+    bool check_dequant_primitive(dequant_scaling_t ref, dequant_scaling_t opt);
+    bool check_dequant_primitive(dequant_normal_t ref, dequant_normal_t opt);
+    bool check_quant_primitive(quant_t ref, quant_t opt);
+    bool check_nquant_primitive(nquant_t ref, nquant_t opt);
+    bool check_dct_primitive(dct_t ref, dct_t opt, intptr_t width);
+    bool check_idct_primitive(idct_t ref, idct_t opt, intptr_t width);
+    bool check_count_nonzero_primitive(count_nonzero_t ref, count_nonzero_t opt);
+    bool check_denoise_dct_primitive(denoiseDct_t ref, denoiseDct_t opt);
+
+public:
+
+    MBDstHarness();
+
+    const char *getName() const { return "transforms"; }
+
+    bool testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+
+    void measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+};
+
+#endif // ifndef _MBDSTHARNESS_H_1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/pixelharness.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,2970 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "pixelharness.h"
+#include "primitives.h"
+#include "entropy.h"
+
+using namespace X265_NS;
+
+PixelHarness::PixelHarness()
+{
+    /* [0] --- Random values
+     * [1] --- Minimum
+     * [2] --- Maximum */
+    for (int i = 0; i < BUFFSIZE; i++)
+    {
+        pixel_test_buff[0][i]   = rand() % PIXEL_MAX;
+        short_test_buff[0][i]   = (rand() % (2 * SMAX + 1)) - SMAX - 1; // max(SHORT_MIN, min(rand(), SMAX));
+        short_test_buff1[0][i]  = rand() & PIXEL_MAX;                   // For block copy only
+        short_test_buff2[0][i]  = rand() % 16383;                       // for addAvg
+        int_test_buff[0][i]     = rand() % SHORT_MAX;
+        ushort_test_buff[0][i]  = rand() % ((1 << 16) - 1);
+        uchar_test_buff[0][i]   = rand() % ((1 << 8) - 1);
+
+        pixel_test_buff[1][i]   = PIXEL_MIN;
+        short_test_buff[1][i]   = SMIN;
+        short_test_buff1[1][i]  = PIXEL_MIN;
+        short_test_buff2[1][i]  = -16384;
+        int_test_buff[1][i]     = SHORT_MIN;
+        ushort_test_buff[1][i]  = PIXEL_MIN;
+        uchar_test_buff[1][i]   = PIXEL_MIN;
+
+        pixel_test_buff[2][i]   = PIXEL_MAX;
+        short_test_buff[2][i]   = SMAX;
+        short_test_buff1[2][i]  = PIXEL_MAX;
+        short_test_buff2[2][i]  = 16383;
+        int_test_buff[2][i]     = SHORT_MAX;
+        ushort_test_buff[2][i]  = ((1 << 16) - 1);
+        uchar_test_buff[2][i]   = 255;
+
+        pbuf1[i] = rand() & PIXEL_MAX;
+        pbuf2[i] = rand() & PIXEL_MAX;
+        pbuf3[i] = rand() & PIXEL_MAX;
+        pbuf4[i] = rand() & PIXEL_MAX;
+
+        sbuf1[i] = (rand() % (2 * SMAX + 1)) - SMAX - 1; //max(SHORT_MIN, min(rand(), SMAX));
+        sbuf2[i] = (rand() % (2 * SMAX + 1)) - SMAX - 1; //max(SHORT_MIN, min(rand(), SMAX));
+        ibuf1[i] = (rand() % (2 * SMAX + 1)) - SMAX - 1;
+        psbuf1[i] = psbuf4[i] = (rand() % 65) - 32;                   // range is between -32 to 32
+        psbuf2[i] = psbuf5[i] = (rand() % 3) - 1;                     // possible values {-1,0,1}
+        psbuf3[i] = (rand() % 129) - 128;
+        sbuf3[i] = rand() % PIXEL_MAX; // for blockcopy only
+    }
+}
+
+bool PixelHarness::check_pixelcmp(pixelcmp_t ref, pixelcmp_t opt)
+{
+    int j = 0;
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        int vres = (int)checked(opt, pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+        int cres = ref(pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+        if (vres != cres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixel_sse(pixel_sse_t ref, pixel_sse_t opt)
+{
+    int j = 0;
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        sse_ret_t vres = (sse_ret_t)checked(opt, pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+        sse_ret_t cres = ref(pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+        if (vres != cres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixel_sse_ss(pixel_sse_ss_t ref, pixel_sse_ss_t opt)
+{
+    int j = 0;
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        sse_ret_t vres = (sse_ret_t)checked(opt, short_test_buff[index1], stride, short_test_buff[index2] + j, stride);
+        sse_ret_t cres = ref(short_test_buff[index1], stride, short_test_buff[index2] + j, stride);
+        if (vres != cres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixelcmp_x3(pixelcmp_x3_t ref, pixelcmp_x3_t opt)
+{
+    ALIGN_VAR_16(int, cres[16]);
+    ALIGN_VAR_16(int, vres[16]);
+    int j = 0;
+    intptr_t stride = FENC_STRIDE - 5;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        checked(opt, pixel_test_buff[index1],
+                pixel_test_buff[index2] + j,
+                pixel_test_buff[index2] + j + 1,
+                pixel_test_buff[index2] + j + 2, stride, &vres[0]);
+        ref(pixel_test_buff[index1],
+            pixel_test_buff[index2] + j,
+            pixel_test_buff[index2] + j + 1,
+            pixel_test_buff[index2] + j + 2, stride, &cres[0]);
+        if ((vres[0] != cres[0]) || ((vres[1] != cres[1])) || ((vres[2] != cres[2])))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixelcmp_x4(pixelcmp_x4_t ref, pixelcmp_x4_t opt)
+{
+    ALIGN_VAR_16(int, cres[16]);
+    ALIGN_VAR_16(int, vres[16]);
+    int j = 0;
+    intptr_t stride = FENC_STRIDE - 5;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        checked(opt, pixel_test_buff[index1],
+                pixel_test_buff[index2] + j,
+                pixel_test_buff[index2] + j + 1,
+                pixel_test_buff[index2] + j + 2,
+                pixel_test_buff[index2] + j + 3, stride, &vres[0]);
+        ref(pixel_test_buff[index1],
+            pixel_test_buff[index2] + j,
+            pixel_test_buff[index2] + j + 1,
+            pixel_test_buff[index2] + j + 2,
+            pixel_test_buff[index2] + j + 3, stride, &cres[0]);
+
+        if ((vres[0] != cres[0]) || ((vres[1] != cres[1])) || ((vres[2] != cres[2])) || ((vres[3] != cres[3])))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_calresidual(calcresidual_t ref, calcresidual_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+    memset(ref_dest, 0, 64 * 64 * sizeof(int16_t));
+    memset(opt_dest, 0, 64 * 64 * sizeof(int16_t));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, pbuf1 + j, pixel_test_buff[index] + j, opt_dest, stride);
+        ref(pbuf1 + j, pixel_test_buff[index] + j, ref_dest, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_ssd_s(pixel_ssd_s_t ref, pixel_ssd_s_t opt)
+{
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        // NOTE: stride must be multiple of 16, because minimum block is 4x4
+        int stride = (STRIDE + (rand() % STRIDE)) & ~15;
+        int cres = ref(sbuf1 + j, stride);
+        int vres = (int)checked(opt, sbuf1 + j, (intptr_t)stride);
+
+        if (cres != vres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_weightp(weightp_sp_t ref, weightp_sp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * (64 + 1)]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * (64 + 1)]);
+
+    memset(ref_dest, 0, 64 * 64 * sizeof(pixel));
+    memset(opt_dest, 0, 64 * 64 * sizeof(pixel));
+    int j = 0;
+    int width = 2 * (rand() % 32 + 1);
+    int height = 8;
+    int w0 = rand() % 128;
+    int shift = rand() % 8; // maximum is 7, see setFromWeightAndOffset()
+    int round = shift ? (1 << (shift - 1)) : 0;
+    int offset = (rand() % 256) - 128;
+    intptr_t stride = 64;
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, short_test_buff[index] + j, opt_dest, stride, stride + 1, width, height, w0, round << correction, shift + correction, offset);
+        ref(short_test_buff[index] + j, ref_dest, stride, stride + 1, width, height, w0, round << correction, shift + correction, offset);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+        {
+            opt(short_test_buff[index] + j, opt_dest, stride, stride + 1, width, height, w0, round << correction, shift + correction, offset);
+            return false;
+        }
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_weightp(weightp_pp_t ref, weightp_pp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0, 64 * 64 * sizeof(pixel));
+    memset(opt_dest, 0, 64 * 64 * sizeof(pixel));
+    int j = 0;
+    int width = 16 * (rand() % 4 + 1);
+    int height = 8;
+    int w0 = rand() % 128;
+    int shift = rand() % 8; // maximum is 7, see setFromWeightAndOffset()
+    int round = shift ? (1 << (shift - 1)) : 0;
+    int offset = (rand() % 256) - 128;
+    intptr_t stride = 64;
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, pixel_test_buff[index] + j, opt_dest, stride, width, height, w0, round << correction, shift + correction, offset);
+        ref(pixel_test_buff[index] + j, ref_dest, stride, width, height, w0, round << correction, shift + correction, offset);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+        {
+            checked(opt, pixel_test_buff[index] + j, opt_dest, stride, width, height, w0, round << correction, shift + correction, offset);
+            return false;
+        }
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_downscale_t(downscale_t ref, downscale_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_destf[32 * 32]);
+    ALIGN_VAR_16(pixel, opt_destf[32 * 32]);
+
+    ALIGN_VAR_16(pixel, ref_desth[32 * 32]);
+    ALIGN_VAR_16(pixel, opt_desth[32 * 32]);
+
+    ALIGN_VAR_16(pixel, ref_destv[32 * 32]);
+    ALIGN_VAR_16(pixel, opt_destv[32 * 32]);
+
+    ALIGN_VAR_16(pixel, ref_destc[32 * 32]);
+    ALIGN_VAR_16(pixel, opt_destc[32 * 32]);
+
+    intptr_t src_stride = 64;
+    intptr_t dst_stride = 32;
+    int bx = 32;
+    int by = 32;
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        ref(pixel_test_buff[index] + j, ref_destf, ref_desth, ref_destv,
+            ref_destc, src_stride, dst_stride, bx, by);
+        checked(opt, pixel_test_buff[index] + j, opt_destf, opt_desth, opt_destv,
+                opt_destc, src_stride, dst_stride, bx, by);
+
+        if (memcmp(ref_destf, opt_destf, 32 * 32 * sizeof(pixel)))
+            return false;
+        if (memcmp(ref_desth, opt_desth, 32 * 32 * sizeof(pixel)))
+            return false;
+        if (memcmp(ref_destv, opt_destv, 32 * 32 * sizeof(pixel)))
+            return false;
+        if (memcmp(ref_destc, opt_destc, 32 * 32 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_cpy2Dto1D_shl_t(cpy2Dto1D_shl_t ref, cpy2Dto1D_shl_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int shift = (rand() % 7 + 1);
+
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, short_test_buff[index] + j, stride, shift);
+        ref(ref_dest, short_test_buff[index] + j, stride, shift);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_cpy2Dto1D_shr_t(cpy2Dto1D_shr_t ref, cpy2Dto1D_shr_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int shift = (rand() % 7 + 1);
+
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, short_test_buff[index] + j, stride, shift);
+        ref(ref_dest, short_test_buff[index] + j, stride, shift);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_copy_cnt_t(copy_cnt_t ref, copy_cnt_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        int opt_cnt = (int)checked(opt, opt_dest, short_test_buff1[index] + j, stride);
+        int ref_cnt = ref(ref_dest, short_test_buff1[index] + j, stride);
+
+        if ((ref_cnt != opt_cnt) || memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_cpy1Dto2D_shl_t(cpy1Dto2D_shl_t ref, cpy1Dto2D_shl_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int shift = (rand() % 7 + 1);
+
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, short_test_buff[index] + j, stride, shift);
+        ref(ref_dest, short_test_buff[index] + j, stride, shift);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_cpy1Dto2D_shr_t(cpy1Dto2D_shr_t ref, cpy1Dto2D_shr_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int shift = (rand() % 7 + 1);
+
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, short_test_buff[index] + j, stride, shift);
+        ref(ref_dest, short_test_buff[index] + j, stride, shift);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixelavg_pp(pixelavg_pp_t ref, pixelavg_pp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    int j = 0;
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        checked(ref, ref_dest, stride, pixel_test_buff[index1] + j,
+                stride, pixel_test_buff[index2] + j, stride, 32);
+        opt(opt_dest, stride, pixel_test_buff[index1] + j,
+            stride, pixel_test_buff[index2] + j, stride, 32);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_copy_pp(copy_pp_t ref, copy_pp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    // we don't know the partition size so we are checking the entire output buffer so
+    // we must initialize the buffers
+    memset(ref_dest, 0, sizeof(ref_dest));
+    memset(opt_dest, 0, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, stride, pixel_test_buff[index] + j, stride);
+        ref(ref_dest, stride, pixel_test_buff[index] + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_copy_sp(copy_sp_t ref, copy_sp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    // we don't know the partition size so we are checking the entire output buffer so
+    // we must initialize the buffers
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride1 = 64, stride2 = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, stride1, short_test_buff1[index] + j, stride2);
+        ref(ref_dest, stride1, short_test_buff1[index] + j, stride2);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_copy_ps(copy_ps_t ref, copy_ps_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    // we don't know the partition size so we are checking the entire output buffer so
+    // we must initialize the buffers
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, stride, pixel_test_buff[index] + j, stride);
+        ref(ref_dest, stride, pixel_test_buff[index] + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_copy_ss(copy_ss_t ref, copy_ss_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    // we don't know the partition size so we are checking the entire output buffer so
+    // we must initialize the buffers
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, stride, short_test_buff1[index] + j, stride);
+        ref(ref_dest, stride, short_test_buff1[index] + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_blockfill_s(blockfill_s_t ref, blockfill_s_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    intptr_t stride = 64;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int16_t value = (rand() % SHORT_MAX) + 1;
+
+        checked(opt, opt_dest, stride, value);
+        ref(ref_dest, stride, value);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixel_sub_ps(pixel_sub_ps_t ref, pixel_sub_ps_t opt)
+{
+    ALIGN_VAR_16(int16_t, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int16_t, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride2 = 64, stride = STRIDE;
+    for (int i = 0; i < 1; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        checked(opt, opt_dest, stride2, pixel_test_buff[index1] + j,
+                pixel_test_buff[index2] + j, stride, stride);
+        ref(ref_dest, stride2, pixel_test_buff[index1] + j,
+            pixel_test_buff[index2] + j, stride, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(int16_t)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_scale1D_pp(scale1D_t ref, scale1D_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0, sizeof(ref_dest));
+    memset(opt_dest, 0, sizeof(opt_dest));
+
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, pixel_test_buff[index] + j);
+        ref(ref_dest, pixel_test_buff[index] + j);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_scale2D_pp(scale2D_t ref, scale2D_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0, sizeof(ref_dest));
+    memset(opt_dest, 0, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, pixel_test_buff[index] + j, stride);
+        ref(ref_dest, pixel_test_buff[index] + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_transpose(transpose_t ref, transpose_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0, sizeof(ref_dest));
+    memset(opt_dest, 0, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, pixel_test_buff[index] + j, stride);
+        ref(ref_dest, pixel_test_buff[index] + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixel_add_ps(pixel_add_ps_t ref, pixel_add_ps_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+    intptr_t stride2 = 64, stride = STRIDE;
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        checked(opt, opt_dest, stride2, pixel_test_buff[index1] + j, short_test_buff[index2] + j, stride, stride);
+        ref(ref_dest, stride2, pixel_test_buff[index1] + j, short_test_buff[index2] + j, stride, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_pixel_var(var_t ref, var_t opt)
+{
+    int j = 0;
+
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        uint64_t vres = checked(opt, pixel_test_buff[index], stride);
+        uint64_t cres = ref(pixel_test_buff[index], stride);
+        if (vres != cres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_ssim_4x4x2_core(ssim_4x4x2_core_t ref, ssim_4x4x2_core_t opt)
+{
+    ALIGN_VAR_32(int, sum0[2][4]);
+    ALIGN_VAR_32(int, sum1[2][4]);
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        intptr_t stride = rand() % 64;
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        ref(pixel_test_buff[index1] + i, stride, pixel_test_buff[index2] + i, stride, sum0);
+        checked(opt, pixel_test_buff[index1] + i, stride, pixel_test_buff[index2] + i, stride, sum1);
+
+        if (memcmp(sum0, sum1, sizeof(sum0)))
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_ssim_end(ssim_end4_t ref, ssim_end4_t opt)
+{
+    ALIGN_VAR_32(int, sum0[5][4]);
+    ALIGN_VAR_32(int, sum1[5][4]);
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        for (int j = 0; j < 5; j++)
+        {
+            for (int k = 0; k < 4; k++)
+            {
+                sum0[j][k] = rand() % (1 << 12);
+                sum1[j][k] = rand() % (1 << 12);
+            }
+        }
+
+        int width = (rand() % 4) + 1; // range[1-4]
+        float cres = ref(sum0, sum1, width);
+        float vres = checked_float(opt, sum0, sum1, width);
+        if (fabs(vres - cres) > 0.00001)
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_addAvg(addAvg_t ref, addAvg_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    int j = 0;
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index1 = rand() % TEST_CASES;
+        int index2 = rand() % TEST_CASES;
+        ref(short_test_buff2[index1] + j, short_test_buff2[index2] + j, ref_dest, stride, stride, stride);
+        checked(opt, short_test_buff2[index1] + j, short_test_buff2[index2] + j, opt_dest, stride, stride, stride);
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_calSign(sign_t ref, sign_t opt)
+{
+    ALIGN_VAR_16(int8_t, ref_dest[64 * 2]);
+    ALIGN_VAR_16(int8_t, opt_dest[64 * 2]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = (rand() % 64) + 1;
+
+        ref(ref_dest, pbuf2 + j, pbuf3 + j, width);
+        checked(opt, opt_dest, pbuf2 + j, pbuf3 + j, width);
+
+        if (memcmp(ref_dest, opt_dest, sizeof(ref_dest)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgE0_t(saoCuOrgE0_t ref, saoCuOrgE0_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = 16 * (rand() % 4 + 1);
+        int stride = width + 1;
+
+        ref(ref_dest, psbuf1 + j, width, psbuf2 + j, stride);
+        checked(opt, opt_dest, psbuf1 + j, width, psbuf5 + j, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgE1_t(saoCuOrgE1_t ref, saoCuOrgE1_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = 16 * (rand() % 4 + 1);
+        int stride = width + 1;
+
+        ref(ref_dest, psbuf2 + j, psbuf1 + j, stride, width);
+        checked(opt, opt_dest, psbuf5 + j, psbuf1 + j, stride, width);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)) || memcmp(psbuf2, psbuf5, BUFFSIZE))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgE2_t(saoCuOrgE2_t ref[2], saoCuOrgE2_t opt[2])
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    for (int id = 0; id < 2; id++)
+    {
+        int j = 0;
+        if (opt[id])
+        {
+            for (int i = 0; i < ITERS; i++)
+            {
+                int width = 16 * (1 << (id * (rand() % 2 + 1))) - (rand() % 2);
+                int stride = width + 1;
+
+                ref[width > 16](ref_dest, psbuf1 + j, psbuf2 + j, psbuf3 + j, width, stride);
+                checked(opt[width > 16], opt_dest, psbuf4 + j, psbuf2 + j, psbuf3 + j, width, stride);
+
+                if (memcmp(psbuf1 + j, psbuf4 + j, width * sizeof(int8_t)))
+                    return false;
+
+                if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+                    return false;
+
+                reportfail();
+                j += INCR;
+            }
+        }
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgE3_t(saoCuOrgE3_t ref, saoCuOrgE3_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int stride = 16 * (rand() % 4 + 1);
+        int start = rand() % 2;
+        int end = 16 - rand() % 2;
+
+        ref(ref_dest, psbuf2 + j, psbuf1 + j, stride, start, end);
+        checked(opt, opt_dest, psbuf5 + j, psbuf1 + j, stride, start, end);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)) || memcmp(psbuf2, psbuf5, BUFFSIZE))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuStatsBO_t(saoCuStatsBO_t ref, saoCuStatsBO_t opt)
+{
+    enum { NUM_EDGETYPE = 33 }; // classIdx = 1 + (rec[x] >> 3);
+    int32_t stats_ref[NUM_EDGETYPE];
+    int32_t stats_vec[NUM_EDGETYPE];
+
+    int32_t count_ref[NUM_EDGETYPE];
+    int32_t count_vec[NUM_EDGETYPE];
+
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        // initialize input data to random, the dynamic range wrong but good to verify our asm code
+        for (int x = 0; x < NUM_EDGETYPE; x++)
+        {
+            stats_ref[x] = stats_vec[x] = rand();
+            count_ref[x] = count_vec[x] = rand();
+        }
+
+        intptr_t stride = 16 * (rand() % 4 + 1);
+        int endX = MAX_CU_SIZE - (rand() % 5);
+        int endY = MAX_CU_SIZE - (rand() % 4) - 1;
+
+        ref(pbuf2 + j + 1, pbuf3 + 1, stride, endX, endY, stats_ref, count_ref);
+        checked(opt, pbuf2 + j + 1, pbuf3 + 1, stride, endX, endY, stats_vec, count_vec);
+
+        if (memcmp(stats_ref, stats_vec, sizeof(stats_ref)) || memcmp(count_ref, count_vec, sizeof(count_ref)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuStatsE0_t(saoCuStatsE0_t ref, saoCuStatsE0_t opt)
+{
+    enum { NUM_EDGETYPE = 5 };
+    int32_t stats_ref[NUM_EDGETYPE];
+    int32_t stats_vec[NUM_EDGETYPE];
+
+    int32_t count_ref[NUM_EDGETYPE];
+    int32_t count_vec[NUM_EDGETYPE];
+
+    int j = 0;
+    for (int i = 0; i < ITERS; i++)
+    {
+        // initialize input data to random, the dynamic range wrong but good to verify our asm code
+        for (int x = 0; x < NUM_EDGETYPE; x++)
+        {
+            stats_ref[x] = stats_vec[x] = rand();
+            count_ref[x] = count_vec[x] = rand();
+        }
+
+        intptr_t stride = 16 * (rand() % 4 + 1);
+        int endX = MAX_CU_SIZE - (rand() % 5) - 1;
+        int endY = MAX_CU_SIZE - (rand() % 4) - 1;
+
+        ref(pbuf2 + j + 1, pbuf3 + j + 1, stride, endX, endY, stats_ref, count_ref);
+        checked(opt, pbuf2 + j + 1, pbuf3 + j + 1, stride, endX, endY, stats_vec, count_vec);
+
+        if (memcmp(stats_ref, stats_vec, sizeof(stats_ref)) || memcmp(count_ref, count_vec, sizeof(count_ref)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuStatsE1_t(saoCuStatsE1_t ref, saoCuStatsE1_t opt)
+{
+    enum { NUM_EDGETYPE = 5 };
+    int32_t stats_ref[NUM_EDGETYPE];
+    int32_t stats_vec[NUM_EDGETYPE];
+
+    int32_t count_ref[NUM_EDGETYPE];
+    int32_t count_vec[NUM_EDGETYPE];
+
+    int8_t _upBuff1_ref[MAX_CU_SIZE + 2], *upBuff1_ref = _upBuff1_ref + 1;
+    int8_t _upBuff1_vec[MAX_CU_SIZE + 2], *upBuff1_vec = _upBuff1_vec + 1;
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        // initialize input data to random, the dynamic range wrong but good to verify our asm code
+        for (int x = 0; x < NUM_EDGETYPE; x++)
+        {
+            stats_ref[x] = stats_vec[x] = rand();
+            count_ref[x] = count_vec[x] = rand();
+        }
+
+        // initial sign
+        for (int x = 0; x < MAX_CU_SIZE + 2; x++)
+            _upBuff1_ref[x] = _upBuff1_vec[x] = (rand() % 3) - 1;
+
+        intptr_t stride = 16 * (rand() % 4 + 1);
+        int endX = MAX_CU_SIZE - (rand() % 5);
+        int endY = MAX_CU_SIZE - (rand() % 4) - 1;
+
+        ref(pbuf2 + 1, pbuf3 + 1, stride, upBuff1_ref, endX, endY, stats_ref, count_ref);
+        checked(opt, pbuf2 + 1, pbuf3 + 1, stride, upBuff1_vec, endX, endY, stats_vec, count_vec);
+
+        if (   memcmp(_upBuff1_ref, _upBuff1_vec, sizeof(_upBuff1_ref))
+            || memcmp(stats_ref, stats_vec, sizeof(stats_ref))
+            || memcmp(count_ref, count_vec, sizeof(count_ref)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuStatsE2_t(saoCuStatsE2_t ref, saoCuStatsE2_t opt)
+{
+    enum { NUM_EDGETYPE = 5 };
+    int32_t stats_ref[NUM_EDGETYPE];
+    int32_t stats_vec[NUM_EDGETYPE];
+
+    int32_t count_ref[NUM_EDGETYPE];
+    int32_t count_vec[NUM_EDGETYPE];
+
+    int8_t _upBuff1_ref[MAX_CU_SIZE + 2], *upBuff1_ref = _upBuff1_ref + 1;
+    int8_t _upBufft_ref[MAX_CU_SIZE + 2], *upBufft_ref = _upBufft_ref + 1;
+    int8_t _upBuff1_vec[MAX_CU_SIZE + 2], *upBuff1_vec = _upBuff1_vec + 1;
+    int8_t _upBufft_vec[MAX_CU_SIZE + 2], *upBufft_vec = _upBufft_vec + 1;
+
+    int j = 0;
+
+    // NOTE: verify more times since our asm is NOT exact match to C, the output of upBuff* will be DIFFERENT
+    for (int i = 0; i < ITERS * 10; i++)
+    {
+        // initialize input data to random, the dynamic range wrong but good to verify our asm code
+        for (int x = 0; x < NUM_EDGETYPE; x++)
+        {
+            stats_ref[x] = stats_vec[x] = rand();
+            count_ref[x] = count_vec[x] = rand();
+        }
+
+        // initial sign
+        for (int x = 0; x < MAX_CU_SIZE + 2; x++)
+        {
+            _upBuff1_ref[x] = _upBuff1_vec[x] = (rand() % 3) - 1;
+            _upBufft_ref[x] = _upBufft_vec[x] = (rand() % 3) - 1;
+        }
+
+        intptr_t stride = 16 * (rand() % 4 + 1);
+        int endX = MAX_CU_SIZE - (rand() % 5) - 1;
+        int endY = MAX_CU_SIZE - (rand() % 4) - 1;
+
+        ref(pbuf2 + 1, pbuf3 + 1, stride, upBuff1_ref, upBufft_ref, endX, endY, stats_ref, count_ref);
+        checked(opt, pbuf2 + 1, pbuf3 + 1, stride, upBuff1_vec, upBufft_vec, endX, endY, stats_vec, count_vec);
+
+        // TODO: don't check upBuff*, the latest output pixels different, and can move into stack temporary buffer in future
+        if (   memcmp(_upBuff1_ref, _upBuff1_vec, sizeof(_upBuff1_ref))
+            || memcmp(_upBufft_ref, _upBufft_vec, sizeof(_upBufft_ref))
+            || memcmp(stats_ref, stats_vec, sizeof(stats_ref))
+            || memcmp(count_ref, count_vec, sizeof(count_ref)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuStatsE3_t(saoCuStatsE3_t ref, saoCuStatsE3_t opt)
+{
+    enum { NUM_EDGETYPE = 5 };
+    int32_t stats_ref[NUM_EDGETYPE];
+    int32_t stats_vec[NUM_EDGETYPE];
+
+    int32_t count_ref[NUM_EDGETYPE];
+    int32_t count_vec[NUM_EDGETYPE];
+
+    int8_t _upBuff1_ref[MAX_CU_SIZE + 2], *upBuff1_ref = _upBuff1_ref + 1;
+    int8_t _upBuff1_vec[MAX_CU_SIZE + 2], *upBuff1_vec = _upBuff1_vec + 1;
+
+    int j = 0;
+
+    // (const pixel *fenc, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count)
+    for (int i = 0; i < ITERS; i++)
+    {
+        // initialize input data to random, the dynamic range wrong but good to verify our asm code
+        for (int x = 0; x < NUM_EDGETYPE; x++)
+        {
+            stats_ref[x] = stats_vec[x] = rand();
+            count_ref[x] = count_vec[x] = rand();
+        }
+
+        // initial sign
+        for (int x = 0; x < (int)sizeof(_upBuff1_ref); x++)
+        {
+            _upBuff1_ref[x] = _upBuff1_vec[x] = (rand() % 3) - 1;
+        }
+
+        intptr_t stride = 16 * (rand() % 4 + 1);
+        int endX = MAX_CU_SIZE - (rand() % 5) - 1;
+        int endY = MAX_CU_SIZE - (rand() % 4) - 1;
+
+        ref(pbuf2, pbuf3, stride, upBuff1_ref, endX, endY, stats_ref, count_ref);
+        checked(opt, pbuf2, pbuf3, stride, upBuff1_vec, endX, endY, stats_vec, count_vec);
+
+        if (   memcmp(_upBuff1_ref, _upBuff1_vec, sizeof(_upBuff1_ref))
+            || memcmp(stats_ref, stats_vec, sizeof(stats_ref))
+            || memcmp(count_ref, count_vec, sizeof(count_ref)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgE3_32_t(saoCuOrgE3_t ref, saoCuOrgE3_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int stride = 32 * (rand() % 2 + 1);
+        int start = rand() % 2;
+        int end = (32 * (rand() % 2 + 1)) - rand() % 2;
+
+        ref(ref_dest, psbuf2 + j, psbuf1 + j, stride, start, end);
+        checked(opt, opt_dest, psbuf5 + j, psbuf1 + j, stride, start, end);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)) || memcmp(psbuf2, psbuf5, BUFFSIZE))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_planecopy_sp(planecopy_sp_t ref, planecopy_sp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+    int width = 32 + rand() % 32;
+    int height = 32 + rand() % 32;
+    intptr_t srcStride = 64;
+    intptr_t dstStride = width;
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, ushort_test_buff[index] + j, srcStride, opt_dest, dstStride, width, height, (int)8, (uint16_t)((1 << X265_DEPTH) - 1));
+        ref(ushort_test_buff[index] + j, srcStride, ref_dest, dstStride, width, height, (int)8, (uint16_t)((1 << X265_DEPTH) - 1));
+
+        if (memcmp(ref_dest, opt_dest, width * height * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_planecopy_cp(planecopy_cp_t ref, planecopy_cp_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64 * 2]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64 * 2]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    int width = 16 + rand() % 48;
+    int height = 16 + rand() % 48;
+    intptr_t srcStride = 64;
+    intptr_t dstStride = width;
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, uchar_test_buff[index] + j, srcStride, opt_dest, dstStride, width, height, (int)2);
+        ref(uchar_test_buff[index] + j, srcStride, ref_dest, dstStride, width, height, (int)2);
+
+        if (memcmp(ref_dest, opt_dest, sizeof(ref_dest)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_cutree_propagate_cost(cutree_propagate_cost ref, cutree_propagate_cost opt)
+{
+    ALIGN_VAR_16(int, ref_dest[64 * 64]);
+    ALIGN_VAR_16(int, opt_dest[64 * 64]);
+
+    memset(ref_dest, 0xCD, sizeof(ref_dest));
+    memset(opt_dest, 0xCD, sizeof(opt_dest));
+
+    double fps = 1.0;
+    int width = 16 + rand() % 64;
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int index = i % TEST_CASES;
+        checked(opt, opt_dest, ushort_test_buff[index] + j, int_test_buff[index] + j, ushort_test_buff[index] + j, int_test_buff[index] + j, &fps, width);
+        ref(ref_dest, ushort_test_buff[index] + j, int_test_buff[index] + j, ushort_test_buff[index] + j, int_test_buff[index] + j, &fps, width);
+
+        if (memcmp(ref_dest, opt_dest, width * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_psyCost_pp(pixelcmp_t ref, pixelcmp_t opt)
+{
+    int j = 0, index1, index2, optres, refres;
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        index1 = rand() % TEST_CASES;
+        index2 = rand() % TEST_CASES;
+        optres = (int)checked(opt, pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+        refres = ref(pixel_test_buff[index1], stride, pixel_test_buff[index2] + j, stride);
+
+        if (optres != refres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_psyCost_ss(pixelcmp_ss_t ref, pixelcmp_ss_t opt)
+{
+    int j = 0, index1, index2, optres, refres;
+    intptr_t stride = STRIDE;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        index1 = rand() % TEST_CASES;
+        index2 = rand() % TEST_CASES;
+        optres = (int)checked(opt, short_test_buff[index1], stride, short_test_buff[index2] + j, stride);
+        refres = ref(short_test_buff[index1], stride, short_test_buff[index2] + j, stride);
+
+        if (optres != refres)
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_saoCuOrgB0_t(saoCuOrgB0_t ref, saoCuOrgB0_t opt)
+{
+    ALIGN_VAR_16(pixel, ref_dest[64 * 64]);
+    ALIGN_VAR_16(pixel, opt_dest[64 * 64]);
+
+    for (int i = 0; i < 64 * 64; i++)
+        ref_dest[i] = opt_dest[i] = rand() % (PIXEL_MAX);
+
+    int j = 0;
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int width = 16 * (rand() % 4 + 1);
+        int height = rand() % 63 + 2;
+        int stride = width;
+
+        ref(ref_dest, psbuf1 + j, width, height, stride);
+        checked(opt, opt_dest, psbuf1 + j, width, height, stride);
+
+        if (memcmp(ref_dest, opt_dest, 64 * 64 * sizeof(pixel)))
+            return false;
+
+        reportfail();
+        j += INCR;
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_scanPosLast(scanPosLast_t ref, scanPosLast_t opt)
+{
+    ALIGN_VAR_16(coeff_t, ref_src[32 * 32 + ITERS * 2]);
+    uint8_t ref_coeffNum[MLS_GRP_NUM], opt_coeffNum[MLS_GRP_NUM];      // value range[0, 16]
+    uint16_t ref_coeffSign[MLS_GRP_NUM], opt_coeffSign[MLS_GRP_NUM];    // bit mask map for non-zero coeff sign
+    uint16_t ref_coeffFlag[MLS_GRP_NUM], opt_coeffFlag[MLS_GRP_NUM];    // bit mask map for non-zero coeff
+
+    int totalCoeffs = 0;
+    for (int i = 0; i < 32 * 32; i++)
+    {
+        ref_src[i] = rand() & SHORT_MAX;
+
+        // more zero coeff
+        if (ref_src[i] < SHORT_MAX * 2 / 3)
+            ref_src[i] = 0;
+
+        // more negtive
+        if ((rand() % 10) < 8)
+            ref_src[i] *= -1;
+        totalCoeffs += (ref_src[i] != 0);
+    }
+
+    // extra test area all of 0x1234
+    for (int i = 0; i < ITERS * 2; i++)
+    {
+        ref_src[32 * 32 + i] = 0x1234;
+    }
+    
+
+    memset(ref_coeffNum, 0xCD, sizeof(ref_coeffNum));
+    memset(ref_coeffSign, 0xCD, sizeof(ref_coeffSign));
+    memset(ref_coeffFlag, 0xCD, sizeof(ref_coeffFlag));
+
+    memset(opt_coeffNum, 0xCD, sizeof(opt_coeffNum));
+    memset(opt_coeffSign, 0xCD, sizeof(opt_coeffSign));
+    memset(opt_coeffFlag, 0xCD, sizeof(opt_coeffFlag));
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int rand_scan_type = rand() % NUM_SCAN_TYPE;
+        int rand_scan_size = rand() % NUM_SCAN_SIZE;
+        int rand_numCoeff = 0;
+
+        for (int j = 0; j < 1 << (2 * (rand_scan_size + 2)); j++)
+            rand_numCoeff += (ref_src[i + j] != 0);
+
+        // at least one coeff in transform block
+        if (rand_numCoeff == 0)
+        {
+            ref_src[i + (1 << (2 * (rand_scan_size + 2))) - 1] = -1;
+            rand_numCoeff = 1;
+        }
+
+        const int trSize = (1 << (rand_scan_size + 2));
+        const uint16_t* const scanTbl = g_scanOrder[rand_scan_type][rand_scan_size];
+        const uint16_t* const scanTblCG4x4 = g_scan4x4[rand_scan_size <= (MDCS_LOG2_MAX_SIZE - 2) ? rand_scan_type : SCAN_DIAG];
+
+        int ref_scanPos = ref(scanTbl, ref_src + i, ref_coeffSign, ref_coeffFlag, ref_coeffNum, rand_numCoeff, scanTblCG4x4, trSize);
+        int opt_scanPos = (int)checked(opt, scanTbl, ref_src + i, opt_coeffSign, opt_coeffFlag, opt_coeffNum, rand_numCoeff, scanTblCG4x4, trSize);
+
+        if (ref_scanPos != opt_scanPos)
+            return false;
+
+        for (int j = 0; rand_numCoeff; j++)
+        {
+            if (ref_coeffSign[j] != opt_coeffSign[j])
+                return false;
+
+            if (ref_coeffFlag[j] != opt_coeffFlag[j])
+                return false;
+
+            if (ref_coeffNum[j] != opt_coeffNum[j])
+                return false;
+
+            rand_numCoeff -= ref_coeffNum[j];
+        }
+
+        if (rand_numCoeff != 0)
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_findPosFirstLast(findPosFirstLast_t ref, findPosFirstLast_t opt)
+{
+    ALIGN_VAR_16(coeff_t, ref_src[4 * 32 + ITERS * 2]);
+    memset(ref_src, 0, sizeof(ref_src));
+
+    // minus ITERS for keep probability to generate all zeros block
+    for (int i = 0; i < 4 * 32 - ITERS; i++)
+    {
+        ref_src[i] = rand() & SHORT_MAX;
+    }
+
+    // extra test area all of Zeros
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int rand_scan_type = rand() % NUM_SCAN_TYPE;
+        int rand_scan_size = (rand() % NUM_SCAN_SIZE) + 2;
+        const int trSize = (1 << rand_scan_size);
+        coeff_t *rand_src = ref_src + i;
+
+        const uint16_t* const scanTbl = g_scan4x4[rand_scan_type];
+
+        int j;
+        for (j = 0; j < SCAN_SET_SIZE; j++)
+        {
+            const uint32_t idxY = j / MLS_CG_SIZE;
+            const uint32_t idxX = j % MLS_CG_SIZE;
+            if (rand_src[idxY * trSize + idxX]) break;
+        }
+
+        uint32_t ref_scanPos = ref(rand_src, trSize, scanTbl);
+        uint32_t opt_scanPos = (int)checked(opt, rand_src, trSize, scanTbl);
+
+        // specially case: all coeff group are zero
+        if (j >= SCAN_SET_SIZE)
+        {
+            // all zero block the high 16-bits undefined
+            if ((uint16_t)ref_scanPos != (uint16_t)opt_scanPos)
+                return false;
+        }
+        else if (ref_scanPos != opt_scanPos)
+            return false;
+
+        reportfail();
+    }
+
+    return true;
+}
+
+bool PixelHarness::check_costCoeffNxN(costCoeffNxN_t ref, costCoeffNxN_t opt)
+{
+    ALIGN_VAR_16(coeff_t, ref_src[32 * 32 + ITERS * 3]);
+    ALIGN_VAR_32(uint16_t, ref_absCoeff[1 << MLS_CG_SIZE]);
+    ALIGN_VAR_32(uint16_t, opt_absCoeff[1 << MLS_CG_SIZE]);
+
+    memset(ref_absCoeff, 0xCD, sizeof(ref_absCoeff));
+    memset(opt_absCoeff, 0xCD, sizeof(opt_absCoeff));
+
+    int totalCoeffs = 0;
+    for (int i = 0; i < 32 * 32; i++)
+    {
+        ref_src[i] = rand() & SHORT_MAX;
+
+        // more zero coeff
+        if (ref_src[i] < SHORT_MAX * 2 / 3)
+            ref_src[i] = 0;
+
+        // more negtive
+        if ((rand() % 10) < 8)
+            ref_src[i] *= -1;
+        totalCoeffs += (ref_src[i] != 0);
+    }
+
+    // extra test area all of 0x1234
+    for (int i = 0; i < ITERS * 3; i++)
+    {
+        ref_src[32 * 32 + i] = 0x1234;
+    }
+
+    // generate CABAC context table
+    uint8_t m_contextState_ref[OFF_SIG_FLAG_CTX + NUM_SIG_FLAG_CTX_LUMA];
+    uint8_t m_contextState_opt[OFF_SIG_FLAG_CTX + NUM_SIG_FLAG_CTX_LUMA];
+    for (int k = 0; k < (OFF_SIG_FLAG_CTX + NUM_SIG_FLAG_CTX_LUMA); k++)
+    {
+        m_contextState_ref[k] = (rand() % (125 - 2)) + 2;
+        m_contextState_opt[k] = m_contextState_ref[k];
+    }
+    uint8_t *const ref_baseCtx = m_contextState_ref;
+    uint8_t *const opt_baseCtx = m_contextState_opt;
+
+    for (int i = 0; i < ITERS * 2; i++)
+    {
+        int rand_scan_type = rand() % NUM_SCAN_TYPE;
+        int rand_scanPosSigOff = rand() % 16; //rand_scanPosSigOff range is [1,15]
+        int rand_patternSigCtx = rand() % 4; //range [0,3]
+        int rand_scan_size = rand() % NUM_SCAN_SIZE;
+        int offset; // the value have a exact range, details in CoeffNxN()
+        if (rand_scan_size == 2)
+            offset = 0;
+        else if (rand_scan_size == 3)
+            offset = 9;
+        else
+            offset = 12;
+
+        const int trSize = (1 << (rand_scan_size + 2));
+        ALIGN_VAR_32(static const uint8_t, table_cnt[5][SCAN_SET_SIZE]) =
+        {
+            // patternSigCtx = 0
+            {
+                2, 1, 1, 0,
+                1, 1, 0, 0,
+                1, 0, 0, 0,
+                0, 0, 0, 0,
+            },
+            // patternSigCtx = 1
+            {
+                2, 2, 2, 2,
+                1, 1, 1, 1,
+                0, 0, 0, 0,
+                0, 0, 0, 0,
+            },
+            // patternSigCtx = 2
+            {
+                2, 1, 0, 0,
+                2, 1, 0, 0,
+                2, 1, 0, 0,
+                2, 1, 0, 0,
+            },
+            // patternSigCtx = 3
+            {
+                2, 2, 2, 2,
+                2, 2, 2, 2,
+                2, 2, 2, 2,
+                2, 2, 2, 2,
+            },
+            // 4x4
+            {
+                0, 1, 4, 5,
+                2, 3, 4, 5,
+                6, 6, 8, 8,
+                7, 7, 8, 8
+            }
+        };
+        const uint8_t *rand_tabSigCtx = table_cnt[(rand_scan_size == 2) ? 4 : (uint32_t)rand_patternSigCtx];
+        const uint16_t* const scanTbl = g_scanOrder[rand_scan_type][rand_scan_size];
+        const uint16_t* const scanTblCG4x4 = g_scan4x4[rand_scan_size <= (MDCS_LOG2_MAX_SIZE - 2) ? rand_scan_type : SCAN_DIAG];
+
+        int rand_scanPosCG = rand() % (trSize * trSize / MLS_CG_BLK_SIZE);
+        int subPosBase = rand_scanPosCG * MLS_CG_BLK_SIZE;
+        int rand_numCoeff = 0;
+        uint32_t scanFlagMask = 0;
+        const int numNonZero = (rand_scanPosSigOff < (MLS_CG_BLK_SIZE - 1)) ? 1 : 0;
+
+        for(int k = 0; k <= rand_scanPosSigOff; k++)
+        {
+            uint32_t pos = scanTbl[subPosBase + k];
+            coeff_t tmp_coeff = ref_src[i + pos];
+            if (tmp_coeff != 0)
+            {
+                rand_numCoeff++;
+            }
+            scanFlagMask = scanFlagMask * 2 + (tmp_coeff != 0);
+        }
+
+        // can't process all zeros block
+        if (rand_numCoeff == 0)
+            continue;
+
+        const uint32_t blkPosBase = scanTbl[subPosBase];
+        uint32_t ref_sum = ref(scanTblCG4x4, &ref_src[blkPosBase + i], trSize, ref_absCoeff + numNonZero, rand_tabSigCtx, scanFlagMask, (uint8_t*)ref_baseCtx, offset, rand_scanPosSigOff, subPosBase);
+        uint32_t opt_sum = (uint32_t)checked(opt, scanTblCG4x4, &ref_src[blkPosBase + i], trSize, opt_absCoeff + numNonZero, rand_tabSigCtx, scanFlagMask, (uint8_t*)opt_baseCtx, offset, rand_scanPosSigOff, subPosBase);
+
+        if (ref_sum != opt_sum)
+            return false;
+        if (memcmp(ref_baseCtx, opt_baseCtx, sizeof(m_contextState_ref)))
+            return false;
+
+        // NOTE: just first rand_numCoeff valid, but I check full buffer for confirm no overwrite bug
+        if (memcmp(ref_absCoeff, opt_absCoeff, sizeof(ref_absCoeff)))
+            return false;
+
+        reportfail();
+    }
+    return true;
+}
+
+bool PixelHarness::check_costCoeffRemain(costCoeffRemain_t ref, costCoeffRemain_t opt)
+{
+    ALIGN_VAR_32(uint16_t, absCoeff[(1 << MLS_CG_SIZE) + ITERS]);
+
+    for (int i = 0; i < (1 << MLS_CG_SIZE) + ITERS; i++)
+    {
+        absCoeff[i] = rand() & SHORT_MAX;
+        // more coeff with value one
+        if (absCoeff[i] < SHORT_MAX * 2 / 3)
+            absCoeff[i] = 1;
+    }
+    for (int i = 0; i < ITERS; i++)
+    {
+        uint32_t firstC2Idx = 0;
+        int k = 0;
+        int numNonZero = rand() % 17; //can be random, range[1, 16]
+        for (k = 0; k < C1FLAG_NUMBER; k++)
+        {
+            if (absCoeff[i + k] >= 2)
+            {
+                break;
+            }
+        }
+        firstC2Idx = k; // it is index of exact first coeff that value more than 2
+        int ref_sum = ref(absCoeff + i, numNonZero, firstC2Idx);
+        int opt_sum = (int)checked(opt, absCoeff + i, numNonZero, firstC2Idx);
+        if (ref_sum != opt_sum)
+            return false;
+    }
+    return true;
+}
+
+bool PixelHarness::check_costC1C2Flag(costC1C2Flag_t ref, costC1C2Flag_t opt)
+{
+    ALIGN_VAR_32(uint16_t, absCoeff[(1 << MLS_CG_SIZE)]);
+
+    // generate CABAC context table
+    uint8_t ref_baseCtx[8];
+    uint8_t opt_baseCtx[8];
+    for (int k = 0; k < 8; k++)
+    {
+        ref_baseCtx[k] =
+        opt_baseCtx[k] = (rand() % (125 - 2)) + 2;
+    }
+
+    for (int i = 0; i < ITERS; i++)
+    {
+        int rand_offset = rand() % 4;
+        int numNonZero = 0;
+
+        // generate test data, all are Absolute value and Aligned
+        for (int k = 0; k < C1FLAG_NUMBER; k++)
+        {
+            int value = rand() & SHORT_MAX;
+            // more coeff with value [0,2]
+            if (value < SHORT_MAX * 1 / 3)
+                value = 0;
+            else if (value < SHORT_MAX * 2 / 3)
+                value = 1;
+            else if (value < SHORT_MAX * 3 / 4)
+                value = 2;
+
+            if (value)
+            {
+                absCoeff[numNonZero] = (uint16_t)value;
+                numNonZero++;
+            }
+        }
+
+        int ref_sum = ref(absCoeff, (intptr_t)numNonZero, ref_baseCtx, (intptr_t)rand_offset);
+        int opt_sum = (int)checked(opt, absCoeff, (intptr_t)numNonZero, opt_baseCtx, (intptr_t)rand_offset);
+        if (ref_sum != opt_sum)
+        {
+            ref_sum = ref(absCoeff, (intptr_t)numNonZero, ref_baseCtx, (intptr_t)rand_offset);
+            opt_sum = opt(absCoeff, (intptr_t)numNonZero, opt_baseCtx, (intptr_t)rand_offset);
+            return false;
+        }
+    }
+    return true;
+}
+
+bool PixelHarness::check_planeClipAndMax(planeClipAndMax_t ref, planeClipAndMax_t opt)
+{
+    for (int i = 0; i < ITERS; i++)
+    {
+        intptr_t rand_stride = rand() % STRIDE;
+        int rand_width = (rand() % (STRIDE * 2)) + 1;
+        const int rand_height = (rand() % MAX_HEIGHT) + 1;
+        const pixel rand_min = rand() % 32;
+        const pixel rand_max = PIXEL_MAX - (rand() % 32);
+        uint64_t ref_sum, opt_sum;
+
+        // video width must be more than or equal to 32
+        if (rand_width < 32)
+            rand_width = 32;
+
+        // stride must be more than or equal to width
+        if (rand_stride < rand_width)
+            rand_stride = rand_width;
+
+        pixel ref_max = ref(pbuf1, rand_stride, rand_width, rand_height, &ref_sum, rand_min, rand_max);
+        pixel opt_max = (pixel)checked(opt, pbuf1, rand_stride, rand_width, rand_height, &opt_sum, rand_min, rand_max);
+
+        if (ref_max != opt_max)
+            return false;
+    }
+    return true;
+}
+
+bool PixelHarness::testPU(int part, const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    if (opt.pu[part].satd)
+    {
+        if (!check_pixelcmp(ref.pu[part].satd, opt.pu[part].satd))
+        {
+            printf("satd[%s]: failed!\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].sad)
+    {
+        if (!check_pixelcmp(ref.pu[part].sad, opt.pu[part].sad))
+        {
+            printf("sad[%s]: failed!\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].sad_x3)
+    {
+        if (!check_pixelcmp_x3(ref.pu[part].sad_x3, opt.pu[part].sad_x3))
+        {
+            printf("sad_x3[%s]: failed!\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].sad_x4)
+    {
+        if (!check_pixelcmp_x4(ref.pu[part].sad_x4, opt.pu[part].sad_x4))
+        {
+            printf("sad_x4[%s]: failed!\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].pixelavg_pp)
+    {
+        if (!check_pixelavg_pp(ref.pu[part].pixelavg_pp, opt.pu[part].pixelavg_pp))
+        {
+            printf("pixelavg_pp[%s]: failed!\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].copy_pp)
+    {
+        if (!check_copy_pp(ref.pu[part].copy_pp, opt.pu[part].copy_pp))
+        {
+            printf("copy_pp[%s] failed\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (opt.pu[part].addAvg)
+    {
+        if (!check_addAvg(ref.pu[part].addAvg, opt.pu[part].addAvg))
+        {
+            printf("addAvg[%s] failed\n", lumaPartStr[part]);
+            return false;
+        }
+    }
+
+    if (part < NUM_CU_SIZES)
+    {
+        if (opt.cu[part].sse_pp)
+        {
+            if (!check_pixel_sse(ref.cu[part].sse_pp, opt.cu[part].sse_pp))
+            {
+                printf("sse_pp[%s]: failed!\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].sse_ss)
+        {
+            if (!check_pixel_sse_ss(ref.cu[part].sse_ss, opt.cu[part].sse_ss))
+            {
+                printf("sse_ss[%s]: failed!\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].sub_ps)
+        {
+            if (!check_pixel_sub_ps(ref.cu[part].sub_ps, opt.cu[part].sub_ps))
+            {
+                printf("sub_ps[%s] failed\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].add_ps)
+        {
+            if (!check_pixel_add_ps(ref.cu[part].add_ps, opt.cu[part].add_ps))
+            {
+                printf("add_ps[%s] failed\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].copy_ss)
+        {
+            if (!check_copy_ss(ref.cu[part].copy_ss, opt.cu[part].copy_ss))
+            {
+                printf("copy_ss[%s] failed\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].copy_sp)
+        {
+            if (!check_copy_sp(ref.cu[part].copy_sp, opt.cu[part].copy_sp))
+            {
+                printf("copy_sp[%s] failed\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+
+        if (opt.cu[part].copy_ps)
+        {
+            if (!check_copy_ps(ref.cu[part].copy_ps, opt.cu[part].copy_ps))
+            {
+                printf("copy_ps[%s] failed\n", lumaPartStr[part]);
+                return false;
+            }
+        }
+    }
+
+    for (int i = 0; i < X265_CSP_COUNT; i++)
+    {
+        if (opt.chroma[i].pu[part].copy_pp)
+        {
+            if (!check_copy_pp(ref.chroma[i].pu[part].copy_pp, opt.chroma[i].pu[part].copy_pp))
+            {
+                printf("chroma_copy_pp[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                return false;
+            }
+        }
+        if (opt.chroma[i].pu[part].addAvg)
+        {
+            if (!check_addAvg(ref.chroma[i].pu[part].addAvg, opt.chroma[i].pu[part].addAvg))
+            {
+                printf("chroma_addAvg[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                return false;
+            }
+        }
+        if (opt.chroma[i].pu[part].satd)
+        {
+            if (!check_pixelcmp(ref.chroma[i].pu[part].satd, opt.chroma[i].pu[part].satd))
+            {
+                printf("chroma_satd[%s][%s] failed!\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                return false;
+            }
+        }
+        if (part < NUM_CU_SIZES)
+        {
+            if (opt.chroma[i].cu[part].sse_pp)
+            {
+                if (!check_pixel_sse(ref.chroma[i].cu[part].sse_pp, opt.chroma[i].cu[part].sse_pp))
+                {
+                    printf("chroma_sse_pp[%s][%s]: failed!\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].sub_ps)
+            {
+                if (!check_pixel_sub_ps(ref.chroma[i].cu[part].sub_ps, opt.chroma[i].cu[part].sub_ps))
+                {
+                    printf("chroma_sub_ps[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].add_ps)
+            {
+                if (!check_pixel_add_ps(ref.chroma[i].cu[part].add_ps, opt.chroma[i].cu[part].add_ps))
+                {
+                    printf("chroma_add_ps[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].copy_sp)
+            {
+                if (!check_copy_sp(ref.chroma[i].cu[part].copy_sp, opt.chroma[i].cu[part].copy_sp))
+                {
+                    printf("chroma_copy_sp[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].copy_ps)
+            {
+                if (!check_copy_ps(ref.chroma[i].cu[part].copy_ps, opt.chroma[i].cu[part].copy_ps))
+                {
+                    printf("chroma_copy_ps[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].copy_ss)
+            {
+                if (!check_copy_ss(ref.chroma[i].cu[part].copy_ss, opt.chroma[i].cu[part].copy_ss))
+                {
+                    printf("chroma_copy_ss[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+            if (opt.chroma[i].cu[part].sa8d)
+            {
+                if (!check_pixelcmp(ref.chroma[i].cu[part].sa8d, opt.chroma[i].cu[part].sa8d))
+                {
+                    printf("chroma_sa8d[%s][%s] failed\n", x265_source_csp_names[i], chromaPartStr[i][part]);
+                    return false;
+                }
+            }
+        }
+    }
+
+    return true;
+}
+
+bool PixelHarness::testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    for (int size = 4; size <= 64; size *= 2)
+    {
+        int part = partitionFromSizes(size, size); // 2Nx2N
+        if (!testPU(part, ref, opt)) return false;
+
+        if (size > 4)
+        {
+            part = partitionFromSizes(size, size >> 1); // 2NxN
+            if (!testPU(part, ref, opt)) return false;
+            part = partitionFromSizes(size >> 1, size); // Nx2N
+            if (!testPU(part, ref, opt)) return false;
+        }
+        if (size > 8)
+        {
+            // 4 AMP modes
+            part = partitionFromSizes(size, size >> 2);
+            if (!testPU(part, ref, opt)) return false;
+            part = partitionFromSizes(size, 3 * (size >> 2));
+            if (!testPU(part, ref, opt)) return false;
+
+            part = partitionFromSizes(size >> 2, size);
+            if (!testPU(part, ref, opt)) return false;
+            part = partitionFromSizes(3 * (size >> 2), size);
+            if (!testPU(part, ref, opt)) return false;
+        }
+    }
+
+    for (int i = 0; i < NUM_CU_SIZES; i++)
+    {
+        if (opt.cu[i].sa8d)
+        {
+            if (!check_pixelcmp(ref.cu[i].sa8d, opt.cu[i].sa8d))
+            {
+                printf("sa8d[%dx%d]: failed!\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+
+        if (opt.cu[i].blockfill_s)
+        {
+            if (!check_blockfill_s(ref.cu[i].blockfill_s, opt.cu[i].blockfill_s))
+            {
+                printf("blockfill_s[%dx%d]: failed!\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+
+        if (opt.cu[i].var)
+        {
+            if (!check_pixel_var(ref.cu[i].var, opt.cu[i].var))
+            {
+                printf("var[%dx%d] failed\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+
+        if (opt.cu[i].psy_cost_pp)
+        {
+            if (!check_psyCost_pp(ref.cu[i].psy_cost_pp, opt.cu[i].psy_cost_pp))
+            {
+                printf("\npsy_cost_pp[%dx%d] failed!\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+
+        if (opt.cu[i].psy_cost_ss)
+        {
+            if (!check_psyCost_ss(ref.cu[i].psy_cost_ss, opt.cu[i].psy_cost_ss))
+            {
+                printf("\npsy_cost_ss[%dx%d] failed!\n", 4 << i, 4 << i);
+                return false;
+            }
+        }
+
+        if (i < BLOCK_64x64)
+        {
+            /* TU only primitives */
+
+            if (opt.cu[i].calcresidual)
+            {
+                if (!check_calresidual(ref.cu[i].calcresidual, opt.cu[i].calcresidual))
+                {
+                    printf("calcresidual width: %d failed!\n", 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].transpose)
+            {
+                if (!check_transpose(ref.cu[i].transpose, opt.cu[i].transpose))
+                {
+                    printf("transpose[%dx%d] failed\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].ssd_s)
+            {
+                if (!check_ssd_s(ref.cu[i].ssd_s, opt.cu[i].ssd_s))
+                {
+                    printf("ssd_s[%dx%d]: failed!\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].copy_cnt)
+            {
+                if (!check_copy_cnt_t(ref.cu[i].copy_cnt, opt.cu[i].copy_cnt))
+                {
+                    printf("copy_cnt[%dx%d] failed!\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].cpy2Dto1D_shl)
+            {
+                if (!check_cpy2Dto1D_shl_t(ref.cu[i].cpy2Dto1D_shl, opt.cu[i].cpy2Dto1D_shl))
+                {
+                    printf("cpy2Dto1D_shl[%dx%d] failed!\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].cpy2Dto1D_shr)
+            {
+                if (!check_cpy2Dto1D_shr_t(ref.cu[i].cpy2Dto1D_shr, opt.cu[i].cpy2Dto1D_shr))
+                {
+                    printf("cpy2Dto1D_shr failed!\n");
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].cpy1Dto2D_shl)
+            {
+                if (!check_cpy1Dto2D_shl_t(ref.cu[i].cpy1Dto2D_shl, opt.cu[i].cpy1Dto2D_shl))
+                {
+                    printf("cpy1Dto2D_shl[%dx%d] failed!\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+
+            if (opt.cu[i].cpy1Dto2D_shr)
+            {
+                if (!check_cpy1Dto2D_shr_t(ref.cu[i].cpy1Dto2D_shr, opt.cu[i].cpy1Dto2D_shr))
+                {
+                    printf("cpy1Dto2D_shr[%dx%d] failed!\n", 4 << i, 4 << i);
+                    return false;
+                }
+            }
+        }
+    }
+
+    if (opt.weight_pp)
+    {
+        if (!check_weightp(ref.weight_pp, opt.weight_pp))
+        {
+            printf("Weighted Prediction (pixel) failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.weight_sp)
+    {
+        if (!check_weightp(ref.weight_sp, opt.weight_sp))
+        {
+            printf("Weighted Prediction (short) failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.frameInitLowres)
+    {
+        if (!check_downscale_t(ref.frameInitLowres, opt.frameInitLowres))
+        {
+            printf("downscale failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.scale1D_128to64)
+    {
+        if (!check_scale1D_pp(ref.scale1D_128to64, opt.scale1D_128to64))
+        {
+            printf("scale1D_128to64 failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.scale2D_64to32)
+    {
+        if (!check_scale2D_pp(ref.scale2D_64to32, opt.scale2D_64to32))
+        {
+            printf("scale2D_64to32 failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.ssim_4x4x2_core)
+    {
+        if (!check_ssim_4x4x2_core(ref.ssim_4x4x2_core, opt.ssim_4x4x2_core))
+        {
+            printf("ssim_end_4 failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.ssim_end_4)
+    {
+        if (!check_ssim_end(ref.ssim_end_4, opt.ssim_end_4))
+        {
+            printf("ssim_end_4 failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.sign)
+    {
+        if (!check_calSign(ref.sign, opt.sign))
+        {
+            printf("calSign failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE0)
+    {
+        if (!check_saoCuOrgE0_t(ref.saoCuOrgE0, opt.saoCuOrgE0))
+        {
+            printf("SAO_EO_0 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE1)
+    {
+        if (!check_saoCuOrgE1_t(ref.saoCuOrgE1, opt.saoCuOrgE1))
+        {
+            printf("SAO_EO_1 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE1_2Rows)
+    {
+        if (!check_saoCuOrgE1_t(ref.saoCuOrgE1_2Rows, opt.saoCuOrgE1_2Rows))
+        {
+            printf("SAO_EO_1_2Rows failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE2[0] || opt.saoCuOrgE2[1])
+    {
+        saoCuOrgE2_t ref1[] = { ref.saoCuOrgE2[0], ref.saoCuOrgE2[1] };
+        saoCuOrgE2_t opt1[] = { opt.saoCuOrgE2[0], opt.saoCuOrgE2[1] };
+
+        if (!check_saoCuOrgE2_t(ref1, opt1))
+        {
+            printf("SAO_EO_2[0] && SAO_EO_2[1] failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE3[0])
+    {
+        if (!check_saoCuOrgE3_t(ref.saoCuOrgE3[0], opt.saoCuOrgE3[0]))
+        {
+            printf("SAO_EO_3[0] failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgE3[1])
+    {
+        if (!check_saoCuOrgE3_32_t(ref.saoCuOrgE3[1], opt.saoCuOrgE3[1]))
+        {
+            printf("SAO_EO_3[1] failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuOrgB0)
+    {
+        if (!check_saoCuOrgB0_t(ref.saoCuOrgB0, opt.saoCuOrgB0))
+        {
+            printf("SAO_BO_0 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuStatsBO)
+    {
+        if (!check_saoCuStatsBO_t(ref.saoCuStatsBO, opt.saoCuStatsBO))
+        {
+            printf("saoCuStatsBO failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuStatsE0)
+    {
+        if (!check_saoCuStatsE0_t(ref.saoCuStatsE0, opt.saoCuStatsE0))
+        {
+            printf("saoCuStatsE0 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuStatsE1)
+    {
+        if (!check_saoCuStatsE1_t(ref.saoCuStatsE1, opt.saoCuStatsE1))
+        {
+            printf("saoCuStatsE1 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuStatsE2)
+    {
+        if (!check_saoCuStatsE2_t(ref.saoCuStatsE2, opt.saoCuStatsE2))
+        {
+            printf("saoCuStatsE2 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.saoCuStatsE3)
+    {
+        if (!check_saoCuStatsE3_t(ref.saoCuStatsE3, opt.saoCuStatsE3))
+        {
+            printf("saoCuStatsE3 failed\n");
+            return false;
+        }
+    }
+
+    if (opt.planecopy_sp)
+    {
+        if (!check_planecopy_sp(ref.planecopy_sp, opt.planecopy_sp))
+        {
+            printf("planecopy_sp failed\n");
+            return false;
+        }
+    }
+
+    if (opt.planecopy_sp_shl)
+    {
+        if (!check_planecopy_sp(ref.planecopy_sp_shl, opt.planecopy_sp_shl))
+        {
+            printf("planecopy_sp_shl failed\n");
+            return false;
+        }
+    }
+
+    if (opt.planecopy_cp)
+    {
+        if (!check_planecopy_cp(ref.planecopy_cp, opt.planecopy_cp))
+        {
+            printf("planecopy_cp failed\n");
+            return false;
+        }
+    }
+
+    if (opt.propagateCost)
+    {
+        if (!check_cutree_propagate_cost(ref.propagateCost, opt.propagateCost))
+        {
+            printf("propagateCost failed\n");
+            return false;
+        }
+    }
+
+    if (opt.scanPosLast)
+    {
+        if (!check_scanPosLast(ref.scanPosLast, opt.scanPosLast))
+        {
+            printf("scanPosLast failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.findPosFirstLast)
+    {
+        if (!check_findPosFirstLast(ref.findPosFirstLast, opt.findPosFirstLast))
+        {
+            printf("findPosFirstLast failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.costCoeffNxN)
+    {
+        if (!check_costCoeffNxN(ref.costCoeffNxN, opt.costCoeffNxN))
+        {
+            printf("costCoeffNxN failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.costCoeffRemain)
+    {
+        if (!check_costCoeffRemain(ref.costCoeffRemain, opt.costCoeffRemain))
+        {
+            printf("costCoeffRemain failed!\n");
+            return false;
+        }
+    }
+
+    if (opt.costC1C2Flag)
+    {
+        if (!check_costC1C2Flag(ref.costC1C2Flag, opt.costC1C2Flag))
+        {
+            printf("costC1C2Flag failed!\n");
+            return false;
+        }
+    }
+    
+
+    if (opt.planeClipAndMax)
+    {
+        if (!check_planeClipAndMax(ref.planeClipAndMax, opt.planeClipAndMax))
+        {
+            printf("planeClipAndMax failed!\n");
+            return false;
+        }
+    }
+
+    return true;
+}
+
+void PixelHarness::measurePartition(int part, const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    ALIGN_VAR_16(int, cres[16]);
+    pixel *fref = pbuf2 + 2 * INCR;
+    char header[128];
+#define HEADER(str, ...) sprintf(header, str, __VA_ARGS__); printf("%22s", header);
+
+    if (opt.pu[part].satd)
+    {
+        HEADER("satd[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].satd, ref.pu[part].satd, pbuf1, STRIDE, fref, STRIDE);
+    }
+
+    if (opt.pu[part].pixelavg_pp)
+    {
+        HEADER("avg_pp[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].pixelavg_pp, ref.pu[part].pixelavg_pp, pbuf1, STRIDE, pbuf2, STRIDE, pbuf3, STRIDE, 32);
+    }
+
+    if (opt.pu[part].sad)
+    {
+        HEADER("sad[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].sad, ref.pu[part].sad, pbuf1, STRIDE, fref, STRIDE);
+    }
+
+    if (opt.pu[part].sad_x3)
+    {
+        HEADER("sad_x3[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].sad_x3, ref.pu[part].sad_x3, pbuf1, fref, fref + 1, fref - 1, FENC_STRIDE + 5, &cres[0]);
+    }
+
+    if (opt.pu[part].sad_x4)
+    {
+        HEADER("sad_x4[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].sad_x4, ref.pu[part].sad_x4, pbuf1, fref, fref + 1, fref - 1, fref - INCR, FENC_STRIDE + 5, &cres[0]);
+    }
+
+    if (opt.pu[part].copy_pp)
+    {
+        HEADER("copy_pp[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].copy_pp, ref.pu[part].copy_pp, pbuf1, 64, pbuf2, 64);
+    }
+
+    if (opt.pu[part].addAvg)
+    {
+        HEADER("addAvg[%s]", lumaPartStr[part]);
+        REPORT_SPEEDUP(opt.pu[part].addAvg, ref.pu[part].addAvg, sbuf1, sbuf2, pbuf1, STRIDE, STRIDE, STRIDE);
+    }
+
+    if (part < NUM_CU_SIZES)
+    {
+        if (opt.cu[part].sse_pp)
+        {
+            HEADER("sse_pp[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].sse_pp, ref.cu[part].sse_pp, pbuf1, STRIDE, fref, STRIDE);
+        }
+
+        if (opt.cu[part].sse_ss)
+        {
+            HEADER("sse_ss[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].sse_ss, ref.cu[part].sse_ss, (int16_t*)pbuf1, STRIDE, (int16_t*)fref, STRIDE);
+        }
+        if (opt.cu[part].sub_ps)
+        {
+            HEADER("sub_ps[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].sub_ps, ref.cu[part].sub_ps, (int16_t*)pbuf1, FENC_STRIDE, pbuf2, pbuf1, STRIDE, STRIDE);
+        }
+        if (opt.cu[part].add_ps)
+        {
+            HEADER("add_ps[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].add_ps, ref.cu[part].add_ps, pbuf1, FENC_STRIDE, pbuf2, sbuf1, STRIDE, STRIDE);
+        }
+        if (opt.cu[part].copy_ss)
+        {
+            HEADER("copy_ss[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].copy_ss, ref.cu[part].copy_ss, sbuf1, 128, sbuf2, 128);
+        }
+        if (opt.cu[part].copy_sp)
+        {
+            HEADER("copy_sp[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].copy_sp, ref.cu[part].copy_sp, pbuf1, 64, sbuf3, 128);
+        }
+        if (opt.cu[part].copy_ps)
+        {
+            HEADER("copy_ps[%s]", lumaPartStr[part]);
+            REPORT_SPEEDUP(opt.cu[part].copy_ps, ref.cu[part].copy_ps, sbuf1, 128, pbuf1, 64);
+        }
+    }
+
+    for (int i = 0; i < X265_CSP_COUNT; i++)
+    {
+        if (opt.chroma[i].pu[part].copy_pp)
+        {
+            HEADER("[%s] copy_pp[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+            REPORT_SPEEDUP(opt.chroma[i].pu[part].copy_pp, ref.chroma[i].pu[part].copy_pp, pbuf1, 64, pbuf2, 128);
+        }
+        if (opt.chroma[i].pu[part].addAvg)
+        {
+            HEADER("[%s]  addAvg[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+            REPORT_SPEEDUP(opt.chroma[i].pu[part].addAvg, ref.chroma[i].pu[part].addAvg, sbuf1, sbuf2, pbuf1, STRIDE, STRIDE, STRIDE);
+        }
+        if (opt.chroma[i].pu[part].satd)
+        {
+            HEADER("[%s] satd[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+            REPORT_SPEEDUP(opt.chroma[i].pu[part].satd, ref.chroma[i].pu[part].satd, pbuf1, STRIDE, fref, STRIDE);
+        }
+        if (part < NUM_CU_SIZES)
+        {
+            if (opt.chroma[i].cu[part].copy_ss)
+            {
+                HEADER("[%s] copy_ss[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].copy_ss, ref.chroma[i].cu[part].copy_ss, sbuf1, 64, sbuf2, 128);
+            }
+            if (opt.chroma[i].cu[part].copy_ps)
+            {
+                HEADER("[%s] copy_ps[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].copy_ps, ref.chroma[i].cu[part].copy_ps, sbuf1, 64, pbuf1, 128);
+            }
+            if (opt.chroma[i].cu[part].copy_sp)
+            {
+                HEADER("[%s] copy_sp[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].copy_sp, ref.chroma[i].cu[part].copy_sp, pbuf1, 64, sbuf3, 128);
+            }
+            if (opt.chroma[i].cu[part].sse_pp)
+            {
+                HEADER("[%s] sse_pp[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].sse_pp, ref.chroma[i].cu[part].sse_pp, pbuf1, STRIDE, fref, STRIDE);
+            }
+            if (opt.chroma[i].cu[part].sub_ps)
+            {
+                HEADER("[%s]  sub_ps[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].sub_ps, ref.chroma[i].cu[part].sub_ps, (int16_t*)pbuf1, FENC_STRIDE, pbuf2, pbuf1, STRIDE, STRIDE);
+            }
+            if (opt.chroma[i].cu[part].add_ps)
+            {
+                HEADER("[%s]  add_ps[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].add_ps, ref.chroma[i].cu[part].add_ps, pbuf1, FENC_STRIDE, pbuf2, sbuf1, STRIDE, STRIDE);
+            }
+            if (opt.chroma[i].cu[part].sa8d)
+            {
+                HEADER("[%s] sa8d[%s]", x265_source_csp_names[i], chromaPartStr[i][part]);
+                REPORT_SPEEDUP(opt.chroma[i].cu[part].sa8d, ref.chroma[i].cu[part].sa8d, pbuf1, STRIDE, pbuf2, STRIDE);
+            }
+        }
+    }
+
+#undef HEADER
+}
+
+void PixelHarness::measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt)
+{
+    char header[128];
+
+#define HEADER(str, ...) sprintf(header, str, __VA_ARGS__); printf("%22s", header);
+#define HEADER0(str) printf("%22s", str);
+
+    for (int size = 4; size <= 64; size *= 2)
+    {
+        int part = partitionFromSizes(size, size); // 2Nx2N
+        measurePartition(part, ref, opt);
+
+        if (size > 4)
+        {
+            part = partitionFromSizes(size, size >> 1); // 2NxN
+            measurePartition(part, ref, opt);
+            part = partitionFromSizes(size >> 1, size); // Nx2N
+            measurePartition(part, ref, opt);
+        }
+        if (size > 8)
+        {
+            // 4 AMP modes
+            part = partitionFromSizes(size, size >> 2);
+            measurePartition(part, ref, opt);
+            part = partitionFromSizes(size, 3 * (size >> 2));
+            measurePartition(part, ref, opt);
+
+            part = partitionFromSizes(size >> 2, size);
+            measurePartition(part, ref, opt);
+            part = partitionFromSizes(3 * (size >> 2), size);
+            measurePartition(part, ref, opt);
+        }
+    }
+
+    for (int i = 0; i < NUM_CU_SIZES; i++)
+    {
+        if ((i <= BLOCK_32x32) && opt.cu[i].ssd_s)
+        {
+            HEADER("ssd_s[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].ssd_s, ref.cu[i].ssd_s, sbuf1, STRIDE);
+        }
+        if (opt.cu[i].sa8d)
+        {
+            HEADER("sa8d[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].sa8d, ref.cu[i].sa8d, pbuf1, STRIDE, pbuf2, STRIDE);
+        }
+        if (opt.cu[i].calcresidual)
+        {
+            HEADER("residual[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].calcresidual, ref.cu[i].calcresidual, pbuf1, pbuf2, sbuf1, 64);
+        }
+
+        if (opt.cu[i].blockfill_s)
+        {
+            HEADER("blkfill[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].blockfill_s, ref.cu[i].blockfill_s, sbuf1, 64, SHORT_MAX);
+        }
+
+        if (opt.cu[i].transpose)
+        {
+            HEADER("transpose[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].transpose, ref.cu[i].transpose, pbuf1, pbuf2, STRIDE);
+        }
+
+        if (opt.cu[i].var)
+        {
+            HEADER("var[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].var, ref.cu[i].var, pbuf1, STRIDE);
+        }
+
+        if ((i < BLOCK_64x64) && opt.cu[i].cpy2Dto1D_shl)
+        {
+            HEADER("cpy2Dto1D_shl[%dx%d]", 4 << i, 4 << i);
+            const int shift = MAX_TR_DYNAMIC_RANGE - X265_DEPTH - (i + 2);
+            REPORT_SPEEDUP(opt.cu[i].cpy2Dto1D_shl, ref.cu[i].cpy2Dto1D_shl, sbuf1, sbuf2, STRIDE, X265_MAX(0, shift));
+        }
+
+        if ((i < BLOCK_64x64) && opt.cu[i].cpy2Dto1D_shr)
+        {
+            HEADER("cpy2Dto1D_shr[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].cpy2Dto1D_shr, ref.cu[i].cpy2Dto1D_shr, sbuf1, sbuf2, STRIDE, 3);
+        }
+
+        if ((i < BLOCK_64x64) && opt.cu[i].cpy1Dto2D_shl)
+        {
+            HEADER("cpy1Dto2D_shl[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].cpy1Dto2D_shl, ref.cu[i].cpy1Dto2D_shl, sbuf1, sbuf2, STRIDE, 64);
+        }
+
+        if ((i < BLOCK_64x64) && opt.cu[i].cpy1Dto2D_shr)
+        {
+            HEADER("cpy1Dto2D_shr[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].cpy1Dto2D_shr, ref.cu[i].cpy1Dto2D_shr, sbuf1, sbuf2, STRIDE, 64);
+        }
+
+        if ((i < BLOCK_64x64) && opt.cu[i].copy_cnt)
+        {
+            HEADER("copy_cnt[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].copy_cnt, ref.cu[i].copy_cnt, sbuf1, sbuf2, STRIDE);
+        }
+
+        if (opt.cu[i].psy_cost_pp)
+        {
+            HEADER("psy_cost_pp[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].psy_cost_pp, ref.cu[i].psy_cost_pp, pbuf1, STRIDE, pbuf2, STRIDE);
+        }
+
+        if (opt.cu[i].psy_cost_ss)
+        {
+            HEADER("psy_cost_ss[%dx%d]", 4 << i, 4 << i);
+            REPORT_SPEEDUP(opt.cu[i].psy_cost_ss, ref.cu[i].psy_cost_ss, sbuf1, STRIDE, sbuf2, STRIDE);
+        }
+    }
+
+    if (opt.weight_pp)
+    {
+        HEADER0("weight_pp");
+        REPORT_SPEEDUP(opt.weight_pp, ref.weight_pp, pbuf1, pbuf2, 64, 32, 32, 128, 1 << 9, 10, 100);
+    }
+
+    if (opt.weight_sp)
+    {
+        HEADER0("weight_sp");
+        REPORT_SPEEDUP(opt.weight_sp, ref.weight_sp, (int16_t*)sbuf1, pbuf1, 64, 64, 32, 32, 128, 1 << 9, 10, 100);
+    }
+
+    if (opt.frameInitLowres)
+    {
+        HEADER0("downscale");
+        REPORT_SPEEDUP(opt.frameInitLowres, ref.frameInitLowres, pbuf2, pbuf1, pbuf2, pbuf3, pbuf4, 64, 64, 64, 64);
+    }
+
+    if (opt.scale1D_128to64)
+    {
+        HEADER0("scale1D_128to64");
+        REPORT_SPEEDUP(opt.scale1D_128to64, ref.scale1D_128to64, pbuf2, pbuf1);
+    }
+
+    if (opt.scale2D_64to32)
+    {
+        HEADER0("scale2D_64to32");
+        REPORT_SPEEDUP(opt.scale2D_64to32, ref.scale2D_64to32, pbuf2, pbuf1, 64);
+    }
+
+    if (opt.ssim_4x4x2_core)
+    {
+        HEADER0("ssim_4x4x2_core");
+        REPORT_SPEEDUP(opt.ssim_4x4x2_core, ref.ssim_4x4x2_core, pbuf1, 64, pbuf2, 64, (int(*)[4])sbuf1);
+    }
+
+    if (opt.ssim_end_4)
+    {
+        HEADER0("ssim_end_4");
+        REPORT_SPEEDUP(opt.ssim_end_4, ref.ssim_end_4, (int(*)[4])pbuf2, (int(*)[4])pbuf1, 4);
+    }
+
+    if (opt.sign)
+    {
+        HEADER0("calSign");
+        REPORT_SPEEDUP(opt.sign, ref.sign, psbuf1, pbuf1, pbuf2, 64);
+    }
+
+    if (opt.saoCuOrgE0)
+    {
+        HEADER0("SAO_EO_0");
+        REPORT_SPEEDUP(opt.saoCuOrgE0, ref.saoCuOrgE0, pbuf1, psbuf1, 64, psbuf2, 64);
+    }
+
+    if (opt.saoCuOrgE1)
+    {
+        HEADER0("SAO_EO_1");
+        REPORT_SPEEDUP(opt.saoCuOrgE1, ref.saoCuOrgE1, pbuf1, psbuf2, psbuf1, 64, 64);
+    }
+
+    if (opt.saoCuOrgE1_2Rows)
+    {
+        HEADER0("SAO_EO_1_2Rows");
+        REPORT_SPEEDUP(opt.saoCuOrgE1_2Rows, ref.saoCuOrgE1_2Rows, pbuf1, psbuf2, psbuf1, 64, 64);
+    }
+
+    if (opt.saoCuOrgE2[0])
+    {
+        HEADER0("SAO_EO_2[0]");
+        REPORT_SPEEDUP(opt.saoCuOrgE2[0], ref.saoCuOrgE2[0], pbuf1, psbuf1, psbuf2, psbuf3, 16, 64);
+    }
+
+    if (opt.saoCuOrgE2[1])
+    {
+        HEADER0("SAO_EO_2[1]");
+        REPORT_SPEEDUP(opt.saoCuOrgE2[1], ref.saoCuOrgE2[1], pbuf1, psbuf1, psbuf2, psbuf3, 64, 64);
+    }
+
+    if (opt.saoCuOrgE3[0])
+    {
+        HEADER0("SAO_EO_3[0]");
+        REPORT_SPEEDUP(opt.saoCuOrgE3[0], ref.saoCuOrgE3[0], pbuf1, psbuf2, psbuf1, 64, 0, 16);
+    }
+
+    if (opt.saoCuOrgE3[1])
+    {
+        HEADER0("SAO_EO_3[1]");
+        REPORT_SPEEDUP(opt.saoCuOrgE3[1], ref.saoCuOrgE3[1], pbuf1, psbuf2, psbuf1, 64, 0, 64);
+    }
+
+    if (opt.saoCuOrgB0)
+    {
+        HEADER0("SAO_BO_0");
+        REPORT_SPEEDUP(opt.saoCuOrgB0, ref.saoCuOrgB0, pbuf1, psbuf1, 64, 64, 64);
+    }
+
+    if (opt.saoCuStatsBO)
+    {
+        int32_t stats[33], count[33];
+        HEADER0("saoCuStatsBO");
+        REPORT_SPEEDUP(opt.saoCuStatsBO, ref.saoCuStatsBO, pbuf2, pbuf3, 64, 60, 61, stats, count);
+    }
+
+    if (opt.saoCuStatsE0)
+    {
+        int32_t stats[33], count[33];
+        HEADER0("saoCuStatsE0");
+        REPORT_SPEEDUP(opt.saoCuStatsE0, ref.saoCuStatsE0, pbuf2, pbuf3, 64, 60, 61, stats, count);
+    }
+
+    if (opt.saoCuStatsE1)
+    {
+        int32_t stats[5], count[5];
+        int8_t upBuff1[MAX_CU_SIZE + 2];
+        memset(upBuff1, 1, sizeof(upBuff1));
+        HEADER0("saoCuStatsE1");
+        REPORT_SPEEDUP(opt.saoCuStatsE1, ref.saoCuStatsE1, pbuf2, pbuf3, 64, upBuff1 + 1,60, 61, stats, count);
+    }
+
+    if (opt.saoCuStatsE2)
+    {
+        int32_t stats[5], count[5];
+        int8_t upBuff1[MAX_CU_SIZE + 2];
+        int8_t upBufft[MAX_CU_SIZE + 2];
+        memset(upBuff1, 1, sizeof(upBuff1));
+        memset(upBufft, -1, sizeof(upBufft));
+        HEADER0("saoCuStatsE2");
+        REPORT_SPEEDUP(opt.saoCuStatsE2, ref.saoCuStatsE2, pbuf2, pbuf3, 64, upBuff1 + 1, upBufft + 1, 60, 61, stats, count);
+    }
+
+    if (opt.saoCuStatsE3)
+    {
+        int8_t upBuff1[MAX_CU_SIZE + 2];
+        int32_t stats[5], count[5];
+        memset(upBuff1, 1, sizeof(upBuff1));
+        HEADER0("saoCuStatsE3");
+        REPORT_SPEEDUP(opt.saoCuStatsE3, ref.saoCuStatsE3, pbuf2, pbuf3, 64, upBuff1 + 1, 60, 61, stats, count);
+    }
+
+    if (opt.planecopy_sp)
+    {
+        HEADER0("planecopy_sp");
+        REPORT_SPEEDUP(opt.planecopy_sp, ref.planecopy_sp, ushort_test_buff[0], 64, pbuf1, 64, 64, 64, 8, 255);
+    }
+
+    if (opt.planecopy_cp)
+    {
+        HEADER0("planecopy_cp");
+        REPORT_SPEEDUP(opt.planecopy_cp, ref.planecopy_cp, uchar_test_buff[0], 64, pbuf1, 64, 64, 64, 2);
+    }
+
+    if (opt.propagateCost)
+    {
+        HEADER0("propagateCost");
+        REPORT_SPEEDUP(opt.propagateCost, ref.propagateCost, ibuf1, ushort_test_buff[0], int_test_buff[0], ushort_test_buff[0], int_test_buff[0], double_test_buff[0], 80);
+    }
+
+    if (opt.scanPosLast)
+    {
+        HEADER0("scanPosLast");
+        coeff_t coefBuf[32 * 32];
+        memset(coefBuf, 0, sizeof(coefBuf));
+        memset(coefBuf + 32 * 31, 1, 32 * sizeof(coeff_t));
+        REPORT_SPEEDUP(opt.scanPosLast, ref.scanPosLast, g_scanOrder[SCAN_DIAG][NUM_SCAN_SIZE - 1], coefBuf, (uint16_t*)sbuf1, (uint16_t*)sbuf2, (uint8_t*)psbuf1, 32, g_scan4x4[SCAN_DIAG], 32);
+    }
+
+    if (opt.findPosFirstLast)
+    {
+        HEADER0("findPosFirstLast");
+        coeff_t coefBuf[32 * MLS_CG_SIZE];
+        memset(coefBuf, 0, sizeof(coefBuf));
+        // every CG can't be all zeros!
+        coefBuf[3 + 0 * 32] = 0x0BAD;
+        coefBuf[3 + 1 * 32] = 0x0BAD;
+        coefBuf[3 + 2 * 32] = 0x0BAD;
+        coefBuf[3 + 3 * 32] = 0x0BAD;
+        REPORT_SPEEDUP(opt.findPosFirstLast, ref.findPosFirstLast, coefBuf, 32, g_scan4x4[SCAN_DIAG]);
+    }
+
+    if (opt.costCoeffNxN)
+    {
+        HEADER0("costCoeffNxN");
+        coeff_t coefBuf[32 * 32];
+        uint16_t tmpOut[16];
+        memset(coefBuf, 1, sizeof(coefBuf));
+        ALIGN_VAR_32(static uint8_t const, ctxSig[]) =
+        {
+            0, 1, 4, 5,
+            2, 3, 4, 5,
+            6, 6, 8, 8,
+            7, 7, 8, 8
+        };
+        uint8_t ctx[OFF_SIG_FLAG_CTX + NUM_SIG_FLAG_CTX_LUMA];
+        memset(ctx, 120, sizeof(ctx));
+
+        REPORT_SPEEDUP(opt.costCoeffNxN, ref.costCoeffNxN, g_scan4x4[SCAN_DIAG], coefBuf, 32, tmpOut, ctxSig, 0xFFFF, ctx, 1, 15, 32);
+    }
+
+    if (opt.costCoeffRemain)
+    {
+        HEADER0("costCoeffRemain");
+        uint16_t abscoefBuf[32 * 32];
+        memset(abscoefBuf, 0, sizeof(abscoefBuf));
+        memset(abscoefBuf + 32 * 31, 1, 32 * sizeof(uint16_t));
+        REPORT_SPEEDUP(opt.costCoeffRemain, ref.costCoeffRemain, abscoefBuf, 16, 3);
+    }
+
+    if (opt.costC1C2Flag)
+    {
+        HEADER0("costC1C2Flag");
+        ALIGN_VAR_32(uint16_t, abscoefBuf[C1FLAG_NUMBER]);
+        memset(abscoefBuf, 1, sizeof(abscoefBuf));
+        abscoefBuf[C1FLAG_NUMBER - 2] = 2;
+        abscoefBuf[C1FLAG_NUMBER - 1] = 3;
+        REPORT_SPEEDUP(opt.costC1C2Flag, ref.costC1C2Flag, abscoefBuf, C1FLAG_NUMBER, (uint8_t*)psbuf1, 1);
+    }
+
+    if (opt.planeClipAndMax)
+    {
+        HEADER0("planeClipAndMax");
+        uint64_t dummy;
+        REPORT_SPEEDUP(opt.planeClipAndMax, ref.planeClipAndMax, pbuf1, 128, 63, 62, &dummy, 1, PIXEL_MAX - 1);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/pixelharness.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,135 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _PIXELHARNESS_H_1
+#define _PIXELHARNESS_H_1 1
+
+#include "testharness.h"
+#include "primitives.h"
+
+class PixelHarness : public TestHarness
+{
+protected:
+
+    enum { INCR = 32 };
+    enum { STRIDE = 64 };
+    enum { ITERS = 100 };
+    enum { MAX_HEIGHT = 64 };
+    enum { PAD_ROWS = 64 };
+    enum { BUFFSIZE = STRIDE * (MAX_HEIGHT + PAD_ROWS) + INCR * ITERS };
+    enum { TEST_CASES = 3 };
+    enum { SMAX = 1 << 12 };
+    enum { SMIN = -1 << 12 };
+
+    ALIGN_VAR_32(pixel, pbuf1[BUFFSIZE]);
+    pixel    pbuf2[BUFFSIZE];
+    pixel    pbuf3[BUFFSIZE];
+    pixel    pbuf4[BUFFSIZE];
+    int      ibuf1[BUFFSIZE];
+    int8_t   psbuf1[BUFFSIZE];
+    int8_t   psbuf2[BUFFSIZE];
+    int8_t   psbuf3[BUFFSIZE];
+    int8_t   psbuf4[BUFFSIZE];
+    int8_t   psbuf5[BUFFSIZE];
+
+    int16_t  sbuf1[BUFFSIZE];
+    int16_t  sbuf2[BUFFSIZE];
+    int16_t  sbuf3[BUFFSIZE];
+
+    pixel    pixel_test_buff[TEST_CASES][BUFFSIZE];
+    int16_t  short_test_buff[TEST_CASES][BUFFSIZE];
+    int16_t  short_test_buff1[TEST_CASES][BUFFSIZE];
+    int16_t  short_test_buff2[TEST_CASES][BUFFSIZE];
+    int      int_test_buff[TEST_CASES][BUFFSIZE];
+    uint16_t ushort_test_buff[TEST_CASES][BUFFSIZE];
+    uint8_t  uchar_test_buff[TEST_CASES][BUFFSIZE];
+    double   double_test_buff[TEST_CASES][BUFFSIZE];
+
+    bool check_pixelcmp(pixelcmp_t ref, pixelcmp_t opt);
+    bool check_pixel_sse(pixel_sse_t ref, pixel_sse_t opt);
+    bool check_pixel_sse_ss(pixel_sse_ss_t ref, pixel_sse_ss_t opt);
+    bool check_pixelcmp_x3(pixelcmp_x3_t ref, pixelcmp_x3_t opt);
+    bool check_pixelcmp_x4(pixelcmp_x4_t ref, pixelcmp_x4_t opt);
+    bool check_copy_pp(copy_pp_t ref, copy_pp_t opt);
+    bool check_copy_sp(copy_sp_t ref, copy_sp_t opt);
+    bool check_copy_ps(copy_ps_t ref, copy_ps_t opt);
+    bool check_copy_ss(copy_ss_t ref, copy_ss_t opt);
+    bool check_pixelavg_pp(pixelavg_pp_t ref, pixelavg_pp_t opt);
+    bool check_pixel_sub_ps(pixel_sub_ps_t ref, pixel_sub_ps_t opt);
+    bool check_pixel_add_ps(pixel_add_ps_t ref, pixel_add_ps_t opt);
+    bool check_scale1D_pp(scale1D_t ref, scale1D_t opt);
+    bool check_scale2D_pp(scale2D_t ref, scale2D_t opt);
+    bool check_ssd_s(pixel_ssd_s_t ref, pixel_ssd_s_t opt);
+    bool check_blockfill_s(blockfill_s_t ref, blockfill_s_t opt);
+    bool check_calresidual(calcresidual_t ref, calcresidual_t opt);
+    bool check_transpose(transpose_t ref, transpose_t opt);
+    bool check_weightp(weightp_pp_t ref, weightp_pp_t opt);
+    bool check_weightp(weightp_sp_t ref, weightp_sp_t opt);
+    bool check_downscale_t(downscale_t ref, downscale_t opt);
+    bool check_cpy2Dto1D_shl_t(cpy2Dto1D_shl_t ref, cpy2Dto1D_shl_t opt);
+    bool check_cpy2Dto1D_shr_t(cpy2Dto1D_shr_t ref, cpy2Dto1D_shr_t opt);
+    bool check_cpy1Dto2D_shl_t(cpy1Dto2D_shl_t ref, cpy1Dto2D_shl_t opt);
+    bool check_cpy1Dto2D_shr_t(cpy1Dto2D_shr_t ref, cpy1Dto2D_shr_t opt);
+    bool check_copy_cnt_t(copy_cnt_t ref, copy_cnt_t opt);
+    bool check_pixel_var(var_t ref, var_t opt);
+    bool check_ssim_4x4x2_core(ssim_4x4x2_core_t ref, ssim_4x4x2_core_t opt);
+    bool check_ssim_end(ssim_end4_t ref, ssim_end4_t opt);
+    bool check_addAvg(addAvg_t, addAvg_t);
+    bool check_saoCuOrgE0_t(saoCuOrgE0_t ref, saoCuOrgE0_t opt);
+    bool check_saoCuOrgE1_t(saoCuOrgE1_t ref, saoCuOrgE1_t opt);
+    bool check_saoCuOrgE2_t(saoCuOrgE2_t ref[], saoCuOrgE2_t opt[]);
+    bool check_saoCuOrgE3_t(saoCuOrgE3_t ref, saoCuOrgE3_t opt);
+    bool check_saoCuOrgE3_32_t(saoCuOrgE3_t ref, saoCuOrgE3_t opt);
+    bool check_saoCuOrgB0_t(saoCuOrgB0_t ref, saoCuOrgB0_t opt);
+    bool check_saoCuStatsBO_t(saoCuStatsBO_t ref, saoCuStatsBO_t opt);
+    bool check_saoCuStatsE0_t(saoCuStatsE0_t ref, saoCuStatsE0_t opt);
+    bool check_saoCuStatsE1_t(saoCuStatsE1_t ref, saoCuStatsE1_t opt);
+    bool check_saoCuStatsE2_t(saoCuStatsE2_t ref, saoCuStatsE2_t opt);
+    bool check_saoCuStatsE3_t(saoCuStatsE3_t ref, saoCuStatsE3_t opt);
+    bool check_planecopy_sp(planecopy_sp_t ref, planecopy_sp_t opt);
+    bool check_planecopy_cp(planecopy_cp_t ref, planecopy_cp_t opt);
+    bool check_cutree_propagate_cost(cutree_propagate_cost ref, cutree_propagate_cost opt);
+    bool check_psyCost_pp(pixelcmp_t ref, pixelcmp_t opt);
+    bool check_psyCost_ss(pixelcmp_ss_t ref, pixelcmp_ss_t opt);
+    bool check_calSign(sign_t ref, sign_t opt);
+    bool check_scanPosLast(scanPosLast_t ref, scanPosLast_t opt);
+    bool check_findPosFirstLast(findPosFirstLast_t ref, findPosFirstLast_t opt);
+    bool check_costCoeffNxN(costCoeffNxN_t ref, costCoeffNxN_t opt);
+    bool check_costCoeffRemain(costCoeffRemain_t ref, costCoeffRemain_t opt);
+    bool check_costC1C2Flag(costC1C2Flag_t ref, costC1C2Flag_t opt);
+    bool check_planeClipAndMax(planeClipAndMax_t ref, planeClipAndMax_t opt);
+
+public:
+
+    PixelHarness();
+
+    const char *getName() const { return "pixel"; }
+
+    bool testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+    bool testPU(int part, const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+
+    void measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+    void measurePartition(int part, const EncoderPrimitives& ref, const EncoderPrimitives& opt);
+};
+
+#endif // ifndef _PIXELHARNESS_H_1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/rate-control-tests.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,36 @@
+# List of command lines to be run by rate control regression tests, see https://bitbucket.org/sborho/test-harness
+
+#These tests should yeild deterministic results
+# This test is listed first since it currently reproduces bugs
+big_buck_bunny_360p24.y4m,--preset medium --bitrate 1000 --pass 1 -F4,--preset medium --bitrate 1000 --pass 2 -F4
+fire_1920x1080_30.yuv, --preset slow --bitrate 2000 --tune zero-latency 
+
+
+# VBV tests, non-deterministic so testing for correctness and bitrate
+# fluctuations - up to 1% bitrate fluctuation is allowed between runs
+night_cars_1920x1080_30.yuv,--preset medium --crf 25 --vbv-bufsize 5000 --vbv-maxrate 5000 -F6 --crf-max 34 --crf-min 22
+ducks_take_off_420_720p50.y4m,--preset slow --bitrate 1600 --vbv-bufsize 1600 --vbv-maxrate 1600 --strict-cbr --aq-mode 2 --aq-strength 0.5
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset veryslow --bitrate 4000 --vbv-bufsize 3000 --vbv-maxrate 4000 --tune grain
+fire_1920x1080_30.yuv,--preset medium --bitrate 1000 --vbv-maxrate 1500 --vbv-bufsize 1500 --aud --pmode --tune ssim
+112_1920x1080_25.yuv,--preset ultrafast --bitrate 10000 --vbv-maxrate 10000 --vbv-bufsize 15000 --hrd --strict-cbr
+Traffic_4096x2048_30.yuv,--preset superfast --bitrate 20000 --vbv-maxrate 20000 --vbv-bufsize 20000 --repeat-headers --strict-cbr
+Traffic_4096x2048_30.yuv,--preset faster --bitrate 8000 --vbv-maxrate 8000 --vbv-bufsize 6000 --aud --repeat-headers --no-open-gop --hrd --pmode --pme
+News-4k.y4m,--preset veryfast --bitrate 3000 --vbv-maxrate 5000 --vbv-bufsize 5000 --repeat-headers --temporal-layers
+NebutaFestival_2560x1600_60_10bit_crop.yuv,--preset medium --bitrate 18000 --vbv-bufsize 20000 --vbv-maxrate 18000 --strict-cbr
+NebutaFestival_2560x1600_60_10bit_crop.yuv,--preset medium --bitrate 8000 --vbv-bufsize 12000 --vbv-maxrate 10000  --tune grain
+big_buck_bunny_360p24.y4m,--preset medium --bitrate 400 --vbv-bufsize 600 --vbv-maxrate 600 --aud --hrd --tune fast-decode
+sita_1920x1080_30.yuv,--preset superfast --crf 25 --vbv-bufsize 3000 --vbv-maxrate 4000 --vbv-bufsize 5000 --hrd  --crf-max 30
+sita_1920x1080_30.yuv,--preset superfast --bitrate 3000 --vbv-bufsize 3000 --vbv-maxrate 3000 --aud --strict-cbr
+
+
+
+# multi-pass rate control tests
+big_buck_bunny_360p24.y4m,--preset slow --crf 40 --pass 1 -f 5000,--preset slow --bitrate 200 --pass 2 -f 5000
+big_buck_bunny_360p24.y4m,--preset medium --bitrate 700 --pass 1 -F4 --slow-firstpass -f 5000 ,--preset medium --bitrate 700 --vbv-bufsize 900 --vbv-maxrate 700 --pass 2 -F4 -f 5000
+112_1920x1080_25.yuv,--preset fast --bitrate 1000 --vbv-maxrate 1000 --vbv-bufsize 1000 --strict-cbr --pass 1 -F4,--preset fast --bitrate 1000 --vbv-maxrate 3000 --vbv-bufsize 3000 --pass 2 -F4
+pine_tree_1920x1080_30.yuv,--preset veryfast --crf 12 --pass 1 -F4,--preset faster --bitrate 4000 --pass 2 -F4
+SteamLocomotiveTrain_2560x1600_60_10bit_crop.yuv, --tune grain --preset ultrafast --bitrate 5000 --vbv-maxrate 5000 --vbv-bufsize 8000 --strict-cbr -F4 --pass 1, --tune grain --preset ultrafast --bitrate 8000 --vbv-maxrate 8000 --vbv-bufsize 8000 -F4 --pass 2
+RaceHorses_416x240_30_10bit.yuv,--preset medium --crf 40 --pass 1, --preset faster --bitrate 200 --pass 2 -F4
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset superfast --bitrate 2500 --pass 1 -F4 --slow-firstpass,--preset superfast --bitrate 2500 --pass 2 -F4
+RaceHorses_416x240_30_10bit.yuv,--preset medium --crf 26 --vbv-maxrate 1000 --vbv-bufsize 1000 --pass 1,--preset fast --bitrate 1000  --vbv-maxrate 1000 --vbv-bufsize 700 --pass 3 -F4,--preset slow --bitrate 500 --vbv-maxrate 500  --vbv-bufsize 700 --pass 2 -F4
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/regression-tests.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,134 @@
+# List of command lines to be run by regression tests, see https://bitbucket.org/sborho/test-harness
+
+# the vast majority of the commands are tested for results matching the
+# most recent commit which was known to change outputs. The output
+# bitstream must be bit-exact or the test fails. If no golden outputs
+# are available the bitstream is validated (decoded) and then saved as a
+# new golden output
+
+# Note: --nr-intra, --nr-inter, and --bitrate (ABR) give different
+# outputs for different frame encoder counts. In order for outputs to be
+# consistent across many machines, you must force a certain -FN so it is
+# not auto-detected.
+
+BasketballDrive_1920x1080_50.y4m,--preset faster --aq-strength 2 --merange 190
+BasketballDrive_1920x1080_50.y4m,--preset medium --ctu 16 --max-tu-size 8 --subme 7 --qg-size 16 --cu-lossless
+BasketballDrive_1920x1080_50.y4m,--preset medium --keyint -1 --nr-inter 100 -F4 --no-sao
+BasketballDrive_1920x1080_50.y4m,--preset slow --nr-intra 100 -F4 --aq-strength 3 --qg-size 16 --limit-refs 1
+BasketballDrive_1920x1080_50.y4m,--preset slower --lossless --chromaloc 3 --subme 0
+BasketballDrive_1920x1080_50.y4m,--preset superfast --psy-rd 1 --ctu 16 --no-wpp
+BasketballDrive_1920x1080_50.y4m,--preset ultrafast --signhide --colormatrix bt709
+BasketballDrive_1920x1080_50.y4m,--preset veryfast --tune zerolatency --no-temporal-mvp
+BasketballDrive_1920x1080_50.y4m,--preset veryslow --crf 4 --cu-lossless --pmode --limit-refs 1
+Coastguard-4k.y4m,--preset medium --rdoq-level 1 --tune ssim --no-signhide --me umh
+Coastguard-4k.y4m,--preset slow --tune psnr --cbqpoffs -1 --crqpoffs 1 --limit-refs 1
+Coastguard-4k.y4m,--preset superfast --tune grain --overscan=crop
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset fast --aq-mode 0 --sar 2 --range full
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset faster --max-tu-size 4 --min-cu-size 32
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset medium --no-wpp --no-cutree --no-strong-intra-smoothing --limit-refs 1
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset slow --no-wpp --tune ssim --transfer smpte240m
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset slower --tune ssim --tune fastdecode --limit-refs 2
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset superfast --weightp --no-wpp --sao
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset ultrafast --weightp --tune zerolatency --qg-size 16
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset veryfast --temporal-layers --tune grain
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset medium --dither --keyint -1 --rdoq-level 1
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset superfast --weightp --dither --no-psy-rd
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset ultrafast --weightp --no-wpp --no-open-gop
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset veryfast --temporal-layers --repeat-headers --limit-refs 2
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset veryslow --tskip --tskip-fast --no-scenecut
+DucksAndLegs_1920x1080_60_10bit_422.yuv,--preset medium --tune psnr --bframes 16
+DucksAndLegs_1920x1080_60_10bit_422.yuv,--preset slow --temporal-layers --no-psy-rd --qg-size 32 --limit-refs 0 --cu-lossless
+DucksAndLegs_1920x1080_60_10bit_422.yuv,--preset superfast --weightp --qg-size 16
+DucksAndLegs_1920x1080_60_10bit_444.yuv,--preset medium --nr-inter 500 -F4 --no-psy-rdoq
+DucksAndLegs_1920x1080_60_10bit_444.yuv,--preset slower --no-weightp --rdoq-level 0 --limit-refs 3
+DucksAndLegs_1920x1080_60_10bit_444.yuv,--preset veryfast --weightp --nr-intra 1000 -F4
+FourPeople_1280x720_60.y4m,--preset medium --qp 38 --no-psy-rd
+FourPeople_1280x720_60.y4m,--preset superfast --no-wpp --lookahead-slices 2
+Keiba_832x480_30.y4m,--preset medium --pmode --tune grain
+Keiba_832x480_30.y4m,--preset slower --fast-intra --nr-inter 500 -F4 --limit-refs 0
+Keiba_832x480_30.y4m,--preset superfast --no-fast-intra --nr-intra 1000 -F4
+Kimono1_1920x1080_24_10bit_444.yuv,--preset medium --min-cu-size 32
+Kimono1_1920x1080_24_10bit_444.yuv,--preset superfast --weightb
+KristenAndSara_1280x720_60.y4m,--preset medium --no-cutree --max-tu-size 16
+KristenAndSara_1280x720_60.y4m,--preset slower --pmode --max-tu-size 8 --limit-refs 0
+KristenAndSara_1280x720_60.y4m,--preset superfast --min-cu-size 16 --qg-size 16 --limit-refs 1
+KristenAndSara_1280x720_60.y4m,--preset ultrafast --strong-intra-smoothing
+NebutaFestival_2560x1600_60_10bit_crop.yuv,--preset medium --tune grain --limit-refs 2
+NebutaFestival_2560x1600_60_10bit_crop.yuv,--preset superfast --tune psnr
+News-4k.y4m,--preset medium --tune ssim --no-sao --qg-size 16
+News-4k.y4m,--preset superfast --lookahead-slices 6 --aq-mode 0
+OldTownCross_1920x1080_50_10bit_422.yuv,--preset medium --no-weightp
+OldTownCross_1920x1080_50_10bit_422.yuv,--preset slower --tune fastdecode
+OldTownCross_1920x1080_50_10bit_422.yuv,--preset superfast --weightp
+ParkScene_1920x1080_24.y4m,--preset medium --qp 40 --rdpenalty 2 --tu-intra-depth 3
+ParkScene_1920x1080_24.y4m,--preset slower --no-weightp
+ParkScene_1920x1080_24_10bit_444.yuv,--preset superfast --weightp --lookahead-slices 4
+RaceHorses_416x240_30.y4m,--preset medium --tskip-fast --tskip
+RaceHorses_416x240_30.y4m,--preset slower --keyint -1 --rdoq-level 0
+RaceHorses_416x240_30.y4m,--preset superfast --no-cutree
+RaceHorses_416x240_30.y4m,--preset veryslow --tskip-fast --tskip --limit-refs 3
+RaceHorses_416x240_30_10bit.yuv,--preset fast --lookahead-slices 2 --b-intra --limit-refs 1
+RaceHorses_416x240_30_10bit.yuv,--preset faster --rdoq-level 0 --dither
+RaceHorses_416x240_30_10bit.yuv,--preset slow --tune grain
+RaceHorses_416x240_30_10bit.yuv,--preset ultrafast --tune psnr --limit-refs 1
+RaceHorses_416x240_30_10bit.yuv,--preset veryfast --weightb
+RaceHorses_416x240_30_10bit.yuv,--preset placebo --limit-refs 1
+SteamLocomotiveTrain_2560x1600_60_10bit_crop.yuv,--preset medium --dither
+big_buck_bunny_360p24.y4m,--preset faster --keyint 240 --min-keyint 60 --rc-lookahead 200
+big_buck_bunny_360p24.y4m,--preset medium --keyint 60 --min-keyint 48 --weightb --limit-refs 3
+big_buck_bunny_360p24.y4m,--preset slow --psy-rdoq 2.0 --rdoq-level 1 --no-b-intra
+big_buck_bunny_360p24.y4m,--preset superfast --psy-rdoq 2.0
+big_buck_bunny_360p24.y4m,--preset ultrafast --deblock=2
+big_buck_bunny_360p24.y4m,--preset veryfast --no-deblock
+city_4cif_60fps.y4m,--preset medium --crf 4 --cu-lossless --sao-non-deblock
+city_4cif_60fps.y4m,--preset superfast --rdpenalty 1 --tu-intra-depth 2
+city_4cif_60fps.y4m,--preset slower --scaling-list default
+city_4cif_60fps.y4m,--preset veryslow --rdpenalty 2 --sao-non-deblock --no-b-intra --limit-refs 0
+ducks_take_off_420_720p50.y4m,--preset fast --deblock 6 --bframes 16 --rc-lookahead 40
+ducks_take_off_420_720p50.y4m,--preset faster --qp 24 --deblock -6 --limit-refs 2
+ducks_take_off_420_720p50.y4m,--preset medium --tskip --tskip-fast --constrained-intra
+ducks_take_off_420_720p50.y4m,--preset slow --scaling-list default --qp 40
+ducks_take_off_420_720p50.y4m,--preset ultrafast --constrained-intra --rd 1
+ducks_take_off_420_720p50.y4m,--preset veryslow --constrained-intra --bframes 2
+ducks_take_off_444_720p50.y4m,--preset medium --qp 38 --no-scenecut
+ducks_take_off_444_720p50.y4m,--preset superfast --weightp --rd 0 --limit-refs 2
+ducks_take_off_444_720p50.y4m,--preset slower --psy-rd 1 --psy-rdoq 2.0 --rdoq-level 1 --limit-refs 1
+mobile_calendar_422_ntsc.y4m,--preset medium --bitrate 500 -F4
+mobile_calendar_422_ntsc.y4m,--preset slower --tskip --tskip-fast
+mobile_calendar_422_ntsc.y4m,--preset superfast --weightp --rd 0
+mobile_calendar_422_ntsc.y4m,--preset veryslow --tskip --limit-refs 2
+old_town_cross_444_720p50.y4m,--preset faster --rd 1 --tune zero-latency
+old_town_cross_444_720p50.y4m,--preset medium --keyint -1 --no-weightp --ref 6
+old_town_cross_444_720p50.y4m,--preset slow --rdoq-level 1 --early-skip --ref 7 --no-b-pyramid
+old_town_cross_444_720p50.y4m,--preset slower --crf 4 --cu-lossless
+old_town_cross_444_720p50.y4m,--preset superfast --weightp --min-cu 16
+old_town_cross_444_720p50.y4m,--preset ultrafast --weightp --min-cu 32
+old_town_cross_444_720p50.y4m,--preset veryfast --qp 1 --tune ssim
+parkrun_ter_720p50.y4m,--preset medium --no-open-gop --sao-non-deblock --crf 4 --cu-lossless
+parkrun_ter_720p50.y4m,--preset slower --fast-intra --no-rect --tune grain
+silent_cif_420.y4m,--preset medium --me full --rect --amp
+silent_cif_420.y4m,--preset superfast --weightp --rect
+silent_cif_420.y4m,--preset placebo --ctu 32 --no-sao --qg-size 16
+vtc1nw_422_ntsc.y4m,--preset medium --scaling-list default --ctu 16 --ref 5
+vtc1nw_422_ntsc.y4m,--preset slower --nr-inter 1000 -F4 --tune fast-decode --qg-size 16
+vtc1nw_422_ntsc.y4m,--preset superfast --weightp --nr-intra 100 -F4
+washdc_422_ntsc.y4m,--preset faster --rdoq-level 1 --max-merge 5
+washdc_422_ntsc.y4m,--preset medium --no-weightp --max-tu-size 4 --limit-refs 1
+washdc_422_ntsc.y4m,--preset slower --psy-rdoq 2.0 --rdoq-level 2 --qg-size 32 --limit-refs 1
+washdc_422_ntsc.y4m,--preset superfast --psy-rd 1 --tune zerolatency
+washdc_422_ntsc.y4m,--preset ultrafast --weightp --tu-intra-depth 4
+washdc_422_ntsc.y4m,--preset veryfast --tu-inter-depth 4
+washdc_422_ntsc.y4m,--preset veryslow --crf 4 --cu-lossless --limit-refs 3
+BasketballDrive_1920x1080_50.y4m,--preset medium --no-cutree --analysis-mode=save --bitrate 15000,--preset medium --no-cutree --analysis-mode=load --bitrate 13000,--preset medium --no-cutree --analysis-mode=load --bitrate 11000,--preset medium --no-cutree --analysis-mode=load --bitrate 9000,--preset medium --no-cutree --analysis-mode=load --bitrate 7000
+NebutaFestival_2560x1600_60_10bit_crop.yuv,--preset slow --no-cutree --analysis-mode=save --bitrate 15000,--preset slow --no-cutree --analysis-mode=load --bitrate 13000,--preset slow --no-cutree --analysis-mode=load --bitrate 11000,--preset slow --no-cutree --analysis-mode=load --bitrate 9000,--preset slow --no-cutree --analysis-mode=load --bitrate 7000
+old_town_cross_444_720p50.y4m,--preset veryslow --no-cutree --analysis-mode=save --bitrate 15000 --early-skip,--preset veryslow --no-cutree --analysis-mode=load --bitrate 13000 --early-skip,--preset veryslow --no-cutree --analysis-mode=load --bitrate 11000 --early-skip,--preset veryslow --no-cutree --analysis-mode=load --bitrate 9000 --early-skip,--preset veryslow --no-cutree --analysis-mode=load --bitrate 7000 --early-skip
+Johnny_1280x720_60.y4m,--preset medium --no-cutree --analysis-mode=save --bitrate 15000 --tskip-fast,--preset medium --no-cutree --analysis-mode=load --bitrate 13000  --tskip-fast,--preset medium --no-cutree --analysis-mode=load --bitrate 11000  --tskip-fast,--preset medium --no-cutree --analysis-mode=load --bitrate 9000  --tskip-fast,--preset medium --no-cutree --analysis-mode=load --bitrate 7000  --tskip-fast
+BasketballDrive_1920x1080_50.y4m,--preset medium --recon-y4m-exec "ffplay -i pipe:0 -autoexit"
+FourPeople_1280x720_60.y4m,--preset ultrafast --recon-y4m-exec "ffplay -i pipe:0 -autoexit"
+FourPeople_1280x720_60.y4m,--preset veryslow --recon-y4m-exec "ffplay -i pipe:0 -autoexit"
+
+# interlace test, even though input YUV is not field seperated
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset fast --interlace bff
+CrowdRun_1920x1080_50_10bit_422.yuv,--preset faster --interlace tff
+
+# vim: tw=200
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/smoke-tests.txt	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,21 @@
+# List of command lines to be run by smoke tests, see https://bitbucket.org/sborho/test-harness
+
+# consider VBV tests a failure if new bitrate is more than 5% different
+# from the old bitrate
+# vbv-tolerance = 0.05
+
+big_buck_bunny_360p24.y4m,--preset=superfast --bitrate 400 --vbv-bufsize 600 --vbv-maxrate 400 --hrd --aud --repeat-headers
+big_buck_bunny_360p24.y4m,--preset=medium --bitrate 1000 -F4 --cu-lossless --scaling-list default
+big_buck_bunny_360p24.y4m,--preset=slower --no-weightp --pme --qg-size 16
+washdc_422_ntsc.y4m,--preset=faster --no-strong-intra-smoothing --keyint 1 --qg-size 16
+washdc_422_ntsc.y4m,--preset=medium --qp 40 --nr-inter 400 -F4
+washdc_422_ntsc.y4m,--preset=veryslow --pmode --tskip --rdoq-level 0
+old_town_cross_444_720p50.y4m,--preset=ultrafast --weightp --keyint -1
+old_town_cross_444_720p50.y4m,--preset=fast --keyint 20 --min-cu-size 16
+old_town_cross_444_720p50.y4m,--preset=slow --sao-non-deblock --pmode --qg-size 32
+RaceHorses_416x240_30_10bit.yuv,--preset=veryfast --max-tu-size 8
+RaceHorses_416x240_30_10bit.yuv,--preset=slower --bitrate 500 -F4 --rdoq-level 1
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset=ultrafast --constrained-intra --min-keyint 5 --keyint 10
+CrowdRun_1920x1080_50_10bit_444.yuv,--preset=medium --max-tu-size 16
+DucksAndLegs_1920x1080_60_10bit_422.yuv,--preset=veryfast --min-cu 16
+DucksAndLegs_1920x1080_60_10bit_422.yuv,--preset=fast --weightb --interlace bff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/testbench.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,246 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Gopu Govindaswamy <gopu@govindaswamy.org>
+ *          Mandar Gurav <mandar@multicorewareinc.com>
+ *          Mahesh Pittala <mahesh@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "pixelharness.h"
+#include "mbdstharness.h"
+#include "ipfilterharness.h"
+#include "intrapredharness.h"
+#include "param.h"
+#include "cpu.h"
+
+using namespace X265_NS;
+
+const char* lumaPartStr[NUM_PU_SIZES] =
+{
+    "  4x4", "  8x8", "16x16", "32x32", "64x64",
+    "  8x4", "  4x8",
+    " 16x8", " 8x16",
+    "32x16", "16x32",
+    "64x32", "32x64",
+    "16x12", "12x16", " 16x4", " 4x16",
+    "32x24", "24x32", " 32x8", " 8x32",
+    "64x48", "48x64", "64x16", "16x64",
+};
+
+const char* chromaPartStr420[NUM_PU_SIZES] =
+{
+    "  2x2", "  4x4", "  8x8", "16x16", "32x32",
+    "  4x2", "  2x4",
+    "  8x4", "  4x8",
+    " 16x8", " 8x16",
+    "32x16", "16x32",
+    "  8x6", "  6x8", "  8x2", "  2x8",
+    "16x12", "12x16", " 16x4", " 4x16",
+    "32x24", "24x32", " 32x8", " 8x32",
+};
+
+const char* chromaPartStr422[NUM_PU_SIZES] =
+{
+    "  2x4", "  4x8", " 8x16", "16x32", "32x64",
+    "  4x4", "  2x8",
+    "  8x8", " 4x16",
+    "16x16", " 8x32",
+    "32x32", "16x64",
+    " 8x12", " 6x16", "  8x4", " 2x16",
+    "16x24", "12x32", " 16x8", " 4x32",
+    "32x48", "24x64", "32x16", " 8x64",
+};
+
+const char* const* chromaPartStr[X265_CSP_COUNT] =
+{
+    lumaPartStr,
+    chromaPartStr420,
+    chromaPartStr422,
+    lumaPartStr
+};
+
+void do_help()
+{
+    printf("x265 optimized primitive testbench\n\n");
+    printf("usage: TestBench [--cpuid CPU] [--testbench BENCH] [--help]\n\n");
+    printf("       CPU is comma separated SIMD arch list, example: SSE4,AVX\n");
+    printf("       BENCH is one of (pixel,transforms,interp,intrapred)\n\n");
+    printf("By default, the test bench will test all benches on detected CPU architectures\n");
+    printf("Options and testbench name may be truncated.\n");
+}
+
+PixelHarness  HPixel;
+MBDstHarness  HMBDist;
+IPFilterHarness HIPFilter;
+IntraPredHarness HIPred;
+
+int main(int argc, char *argv[])
+{
+    int cpuid = X265_NS::cpu_detect();
+    const char *testname = 0;
+
+    if (!(argc & 1))
+    {
+        do_help();
+        return 0;
+    }
+    for (int i = 1; i < argc - 1; i += 2)
+    {
+        if (strncmp(argv[i], "--", 2))
+        {
+            printf("** invalid long argument: %s\n\n", argv[i]);
+            do_help();
+            return 1;
+        }
+        const char *name = argv[i] + 2;
+        const char *value = argv[i + 1];
+        if (!strncmp(name, "cpuid", strlen(name)))
+        {
+            bool bError = false;
+            cpuid = parseCpuName(value, bError);
+            if (bError)
+            {
+                printf("Invalid CPU name: %s\n", value);
+                return 1;
+            }
+        }
+        else if (!strncmp(name, "testbench", strlen(name)))
+        {
+            testname = value;
+            printf("Testing only harnesses that match name <%s>\n", testname);
+        }
+        else
+        {
+            printf("** invalid long argument: %s\n\n", name);
+            do_help();
+            return 1;
+        }
+    }
+
+    int seed = (int)time(NULL);
+    printf("Using random seed %X %dbit\n", seed, X265_DEPTH);
+    srand(seed);
+
+    // To disable classes of tests, simply comment them out in this list
+    TestHarness *harness[] =
+    {
+        &HPixel,
+        &HMBDist,
+        &HIPFilter,
+        &HIPred
+    };
+
+    EncoderPrimitives cprim;
+    memset(&cprim, 0, sizeof(EncoderPrimitives));
+    setupCPrimitives(cprim);
+    setupAliasPrimitives(cprim);
+
+    struct test_arch_t
+    {
+        char name[12];
+        int flag;
+    } test_arch[] =
+    {
+        { "SSE2", X265_CPU_SSE2 },
+        { "SSE3", X265_CPU_SSE3 },
+        { "SSSE3", X265_CPU_SSSE3 },
+        { "SSE4", X265_CPU_SSE4 },
+        { "AVX", X265_CPU_AVX },
+        { "XOP", X265_CPU_XOP },
+        { "AVX2", X265_CPU_AVX2 },
+        { "BMI2", X265_CPU_AVX2 | X265_CPU_BMI1 | X265_CPU_BMI2 },
+        { "", 0 },
+    };
+
+    for (int i = 0; test_arch[i].flag; i++)
+    {
+        if ((test_arch[i].flag & cpuid) == test_arch[i].flag)
+        {
+            printf("Testing primitives: %s\n", test_arch[i].name);
+            fflush(stdout);
+        }
+        else
+            continue;
+
+        EncoderPrimitives vecprim;
+        memset(&vecprim, 0, sizeof(vecprim));
+        setupInstrinsicPrimitives(vecprim, test_arch[i].flag);
+        setupAliasPrimitives(vecprim);
+        for (size_t h = 0; h < sizeof(harness) / sizeof(TestHarness*); h++)
+        {
+            if (testname && strncmp(testname, harness[h]->getName(), strlen(testname)))
+                continue;
+            if (!harness[h]->testCorrectness(cprim, vecprim))
+            {
+                fflush(stdout);
+                fprintf(stderr, "\nx265: intrinsic primitive has failed. Go and fix that Right Now!\n");
+                return -1;
+            }
+        }
+
+        EncoderPrimitives asmprim;
+        memset(&asmprim, 0, sizeof(asmprim));
+        setupAssemblyPrimitives(asmprim, test_arch[i].flag);
+        setupAliasPrimitives(asmprim);
+        memcpy(&primitives, &asmprim, sizeof(EncoderPrimitives));
+        for (size_t h = 0; h < sizeof(harness) / sizeof(TestHarness*); h++)
+        {
+            if (testname && strncmp(testname, harness[h]->getName(), strlen(testname)))
+                continue;
+            if (!harness[h]->testCorrectness(cprim, asmprim))
+            {
+                fflush(stdout);
+                fprintf(stderr, "\nx265: asm primitive has failed. Go and fix that Right Now!\n");
+                return -1;
+            }
+        }
+    }
+
+    /******************* Cycle count for all primitives **********************/
+
+    EncoderPrimitives optprim;
+    memset(&optprim, 0, sizeof(optprim));
+    setupInstrinsicPrimitives(optprim, cpuid);
+    setupAssemblyPrimitives(optprim, cpuid);
+
+    /* Note that we do not setup aliases for performance tests, that would be
+     * redundant. The testbench only verifies they are correctly aliased */
+
+    /* some hybrid primitives may rely on other primitives in the
+     * global primitive table, so set up those pointers. This is a
+     * bit ugly, but I don't see a better solution */
+    memcpy(&primitives, &optprim, sizeof(EncoderPrimitives));
+
+    printf("\nTest performance improvement with full optimizations\n");
+    fflush(stdout);
+
+    for (size_t h = 0; h < sizeof(harness) / sizeof(TestHarness*); h++)
+    {
+        if (testname && strncmp(testname, harness[h]->getName(), strlen(testname)))
+            continue;
+        printf("== %s primitives ==\n", harness[h]->getName());
+        harness[h]->measureSpeed(cprim, optprim);
+    }
+
+    printf("\n");
+    return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/test/testharness.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,168 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _TESTHARNESS_H_
+#define _TESTHARNESS_H_ 1
+
+#include "common.h"
+#include "primitives.h"
+
+#if _MSC_VER
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#endif
+
+#define PIXEL_MAX ((1 << X265_DEPTH) - 1)
+#define PIXEL_MIN 0
+#define SHORT_MAX  32767
+#define SHORT_MIN -32767
+#define UNSIGNED_SHORT_MAX 65535
+
+using namespace X265_NS;
+
+extern const char* lumaPartStr[NUM_PU_SIZES];
+extern const char* const* chromaPartStr[X265_CSP_COUNT];
+
+class TestHarness
+{
+public:
+
+    TestHarness() {}
+
+    virtual ~TestHarness() {}
+
+    virtual bool testCorrectness(const EncoderPrimitives& ref, const EncoderPrimitives& opt) = 0;
+
+    virtual void measureSpeed(const EncoderPrimitives& ref, const EncoderPrimitives& opt) = 0;
+
+    virtual const char *getName() const = 0;
+
+protected:
+
+    /* Temporary variables for stack checks */
+    int      m_ok;
+
+    uint64_t m_rand;
+};
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#elif HAVE_RDTSC
+#include <intrin.h>
+#elif defined(__GNUC__)
+/* fallback for older GCC/MinGW */
+static inline uint32_t __rdtsc(void)
+{
+    uint32_t a = 0;
+
+    asm volatile("rdtsc" : "=a" (a) ::"edx");
+    return a;
+}
+
+#endif // ifdef _MSC_VER
+
+#define BENCH_RUNS 1000
+
+// Adapted from checkasm.c, runs each optimized primitive four times, measures rdtsc
+// and discards invalid times.  Repeats 1000 times to get a good average.  Then measures
+// the C reference with fewer runs and reports X factor and average cycles.
+#define REPORT_SPEEDUP(RUNOPT, RUNREF, ...) \
+    { \
+        uint32_t cycles = 0; int runs = 0; \
+        RUNOPT(__VA_ARGS__); \
+        for (int ti = 0; ti < BENCH_RUNS; ti++) { \
+            uint32_t t0 = (uint32_t)__rdtsc(); \
+            RUNOPT(__VA_ARGS__); \
+            RUNOPT(__VA_ARGS__); \
+            RUNOPT(__VA_ARGS__); \
+            RUNOPT(__VA_ARGS__); \
+            uint32_t t1 = (uint32_t)__rdtsc() - t0; \
+            if (t1 * runs <= cycles * 4 && ti > 0) { cycles += t1; runs++; } \
+        } \
+        uint32_t refcycles = 0; int refruns = 0; \
+        RUNREF(__VA_ARGS__); \
+        for (int ti = 0; ti < BENCH_RUNS / 4; ti++) { \
+            uint32_t t0 = (uint32_t)__rdtsc(); \
+            RUNREF(__VA_ARGS__); \
+            RUNREF(__VA_ARGS__); \
+            RUNREF(__VA_ARGS__); \
+            RUNREF(__VA_ARGS__); \
+            uint32_t t1 = (uint32_t)__rdtsc() - t0; \
+            if (t1 * refruns <= refcycles * 4 && ti > 0) { refcycles += t1; refruns++; } \
+        } \
+        x265_emms(); \
+        float optperf = (10.0f * cycles / runs) / 4; \
+        float refperf = (10.0f * refcycles / refruns) / 4; \
+        printf("\t%3.2fx ", refperf / optperf); \
+        printf("\t %-8.2lf \t %-8.2lf\n", optperf, refperf); \
+    }
+
+extern "C" {
+#if X265_ARCH_X86
+int PFX(stack_pagealign)(int (*func)(), int align);
+
+/* detect when callee-saved regs aren't saved
+ * needs an explicit asm check because it only sometimes crashes in normal use. */
+intptr_t PFX(checkasm_call)(intptr_t (*func)(), int *ok, ...);
+float PFX(checkasm_call_float)(float (*func)(), int *ok, ...);
+#else
+#define PFX(stack_pagealign)(func, align) func()
+#endif
+
+#if X86_64
+
+/* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended to 64-bit.
+ * This is done by clobbering the stack with junk around the stack pointer and calling the
+ * assembly function through x265_checkasm_call with added dummy arguments which forces all
+ * real arguments to be passed on the stack and not in registers. For 32-bit argument the
+ * upper half of the 64-bit register location on the stack will now contain junk. Note that
+ * this is dependent on compiler behavior and that interrupts etc. at the wrong time may
+ * overwrite the junk written to the stack so there's no guarantee that it will always
+ * detect all functions that assumes zero-extension.
+ */
+void PFX(checkasm_stack_clobber)(uint64_t clobber, ...);
+#define checked(func, ...) ( \
+        m_ok = 1, m_rand = (rand() & 0xffff) * 0x0001000100010001ULL, \
+        PFX(checkasm_stack_clobber)(m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, \
+                                    m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, \
+                                    m_rand, m_rand, m_rand, m_rand, m_rand), /* max_args+6 */ \
+        PFX(checkasm_call)((intptr_t(*)())func, &m_ok, 0, 0, 0, 0, __VA_ARGS__))
+
+#define checked_float(func, ...) ( \
+        m_ok = 1, m_rand = (rand() & 0xffff) * 0x0001000100010001ULL, \
+        PFX(checkasm_stack_clobber)(m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, \
+                                    m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, m_rand, \
+                                    m_rand, m_rand, m_rand, m_rand, m_rand), /* max_args+6 */ \
+        PFX(checkasm_call_float)((float(*)())func, &m_ok, 0, 0, 0, 0, __VA_ARGS__))
+#define reportfail() if (!m_ok) { fflush(stdout); fprintf(stderr, "stack clobber check failed at %s:%d", __FILE__, __LINE__); abort(); }
+#elif ARCH_X86
+#define checked(func, ...) PFX(checkasm_call)((intptr_t(*)())func, &m_ok, __VA_ARGS__);
+#define checked_float(func, ...) PFX(checkasm_call_float)((float(*)())func, &m_ok, __VA_ARGS__);
+
+#else // if X86_64
+#define checked(func, ...) func(__VA_ARGS__)
+#define checked_float(func, ...) func(__VA_ARGS__)
+#define reportfail()
+#endif // if X86_64
+}
+
+#endif // ifndef _TESTHARNESS_H_
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265-extras.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,341 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *          Selvakumar Nithiyaruban <selvakumar@multicorewareinc.com>
+ *          Divya Manivannan <divya@multicorewareinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "x265.h"
+#include "x265-extras.h"
+
+#include "common.h"
+
+using namespace X265_NS;
+
+static const char* summaryCSVHeader =
+    "Command, Date/Time, Elapsed Time, FPS, Bitrate, "
+    "Y PSNR, U PSNR, V PSNR, Global PSNR, SSIM, SSIM (dB), "
+    "I count, I ave-QP, I kbps, I-PSNR Y, I-PSNR U, I-PSNR V, I-SSIM (dB), "
+    "P count, P ave-QP, P kbps, P-PSNR Y, P-PSNR U, P-PSNR V, P-SSIM (dB), "
+    "B count, B ave-QP, B kbps, B-PSNR Y, B-PSNR U, B-PSNR V, B-SSIM (dB), "
+    "MaxCLL, MaxFALL, Version\n";
+
+FILE* x265_csvlog_open(const x265_api& api, const x265_param& param, const char* fname, int level)
+{
+    if (sizeof(x265_stats) != api.sizeof_stats || sizeof(x265_picture) != api.sizeof_picture)
+    {
+        fprintf(stderr, "extras [error]: structure size skew, unable to create CSV logfile\n");
+        return NULL;
+    }
+
+    FILE *csvfp = fopen(fname, "r");
+    if (csvfp)
+    {
+        /* file already exists, re-open for append */
+        fclose(csvfp);
+        return fopen(fname, "ab");
+    }
+    else
+    {
+        /* new CSV file, write header */
+        csvfp = fopen(fname, "wb");
+        if (csvfp)
+        {
+            if (level)
+            {
+                fprintf(csvfp, "Encode Order, Type, POC, QP, Bits, Scenecut, ");
+                if (param.rc.rateControlMode == X265_RC_CRF)
+                    fprintf(csvfp, "RateFactor, ");
+                if (param.bEnablePsnr)
+                    fprintf(csvfp, "Y PSNR, U PSNR, V PSNR, YUV PSNR, ");
+                if (param.bEnableSsim)
+                    fprintf(csvfp, "SSIM, SSIM(dB),");
+                fprintf(csvfp, "List 0, List 1");
+                uint32_t size = param.maxCUSize;
+                for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+                {
+                    fprintf(csvfp, ", Intra %dx%d DC, Intra %dx%d Planar, Intra %dx%d Ang", size, size, size, size, size, size);
+                    size /= 2;
+                }
+                fprintf(csvfp, ", 4x4");
+                size = param.maxCUSize;
+                if (param.bEnableRectInter)
+                {
+                    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+                    {
+                        fprintf(csvfp, ", Inter %dx%d, Inter %dx%d (Rect)", size, size, size, size);
+                        if (param.bEnableAMP)
+                            fprintf(csvfp, ", Inter %dx%d (Amp)", size, size);
+                        size /= 2;
+                    }
+                }
+                else
+                {
+                    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+                    {
+                        fprintf(csvfp, ", Inter %dx%d", size, size);
+                        size /= 2;
+                    }
+                }
+                size = param.maxCUSize;
+                for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+                {
+                    fprintf(csvfp, ", Skip %dx%d", size, size);
+                    size /= 2;
+                }
+                size = param.maxCUSize;
+                for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+                {
+                    fprintf(csvfp, ", Merge %dx%d", size, size);
+                    size /= 2;
+                }
+                fprintf(csvfp, ", Avg Luma Distortion, Avg Chroma Distortion, Avg psyEnergy, Avg Luma Level, Max Luma Level, Avg Residual Energy");
+
+                /* detailed performance statistics */
+                if (level >= 2)
+                    fprintf(csvfp, ", DecideWait (ms), Row0Wait (ms), Wall time (ms), Ref Wait Wall (ms), Total CTU time (ms), Stall Time (ms), Avg WPP, Row Blocks");
+                fprintf(csvfp, "\n");
+            }
+            else
+                fputs(summaryCSVHeader, csvfp);
+        }
+        return csvfp;
+    }
+}
+
+// per frame CSV logging
+void x265_csvlog_frame(FILE* csvfp, const x265_param& param, const x265_picture& pic, int level)
+{
+    if (!csvfp)
+        return;
+
+    const x265_frame_stats* frameStats = &pic.frameData;
+    fprintf(csvfp, "%d, %c-SLICE, %4d, %2.2lf, %10d, %d,", frameStats->encoderOrder, frameStats->sliceType, frameStats->poc, frameStats->qp, (int)frameStats->bits, frameStats->bScenecut);
+    if (param.rc.rateControlMode == X265_RC_CRF)
+        fprintf(csvfp, "%.3lf,", frameStats->rateFactor);
+    if (param.bEnablePsnr)
+        fprintf(csvfp, "%.3lf, %.3lf, %.3lf, %.3lf,", frameStats->psnrY, frameStats->psnrU, frameStats->psnrV, frameStats->psnr);
+    if (param.bEnableSsim)
+        fprintf(csvfp, " %.6f, %6.3f,", frameStats->ssim, x265_ssim2dB(frameStats->ssim));
+    if (frameStats->sliceType == 'I')
+        fputs(" -, -,", csvfp);
+    else
+    {
+        int i = 0;
+        while (frameStats->list0POC[i] != -1)
+            fprintf(csvfp, "%d ", frameStats->list0POC[i++]);
+        fprintf(csvfp, ",");
+        if (frameStats->sliceType != 'P')
+        {
+            i = 0;
+            while (frameStats->list1POC[i] != -1)
+                fprintf(csvfp, "%d ", frameStats->list1POC[i++]);
+            fprintf(csvfp, ",");
+        }
+        else
+            fputs(" -,", csvfp);
+    }
+    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        fprintf(csvfp, "%5.2lf%%, %5.2lf%%, %5.2lf%%,", frameStats->cuStats.percentIntraDistribution[depth][0], frameStats->cuStats.percentIntraDistribution[depth][1], frameStats->cuStats.percentIntraDistribution[depth][2]);
+    fprintf(csvfp, "%5.2lf%%", frameStats->cuStats.percentIntraNxN);
+    if (param.bEnableRectInter)
+    {
+        for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        {
+            fprintf(csvfp, ", %5.2lf%%, %5.2lf%%", frameStats->cuStats.percentInterDistribution[depth][0], frameStats->cuStats.percentInterDistribution[depth][1]);
+            if (param.bEnableAMP)
+                fprintf(csvfp, ", %5.2lf%%", frameStats->cuStats.percentInterDistribution[depth][2]);
+        }
+    }
+    else
+    {
+        for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+            fprintf(csvfp, ", %5.2lf%%", frameStats->cuStats.percentInterDistribution[depth][0]);
+    }
+    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        fprintf(csvfp, ", %5.2lf%%", frameStats->cuStats.percentSkipCu[depth]);
+    for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
+        fprintf(csvfp, ", %5.2lf%%", frameStats->cuStats.percentMergeCu[depth]);
+    fprintf(csvfp, ", %.2lf, %.2lf, %.2lf, %.2lf, %d, %.2lf", frameStats->avgLumaDistortion, frameStats->avgChromaDistortion, frameStats->avgPsyEnergy, frameStats->avgLumaLevel, frameStats->maxLumaLevel, frameStats->avgResEnergy);
+
+    if (level >= 2)
+    {
+        fprintf(csvfp, ", %.1lf, %.1lf, %.1lf, %.1lf, %.1lf, %.1lf,", frameStats->decideWaitTime, frameStats->row0WaitTime, frameStats->wallTime, frameStats->refWaitWallTime, frameStats->totalCTUTime, frameStats->stallTime);
+        fprintf(csvfp, " %.3lf, %d", frameStats->avgWPP, frameStats->countRowBlocks);
+    }
+    fprintf(csvfp, "\n");
+    fflush(stderr);
+}
+
+void x265_csvlog_encode(FILE* csvfp, const x265_api& api, const x265_param& param, const x265_stats& stats, int level, int argc, char** argv)
+{
+    if (!csvfp)
+        return;
+
+    if (level)
+    {
+        // adding summary to a per-frame csv log file, so it needs a summary header
+        fprintf(csvfp, "\nSummary\n");
+        fputs(summaryCSVHeader, csvfp);
+    }
+
+    // CLI arguments or other
+    for (int i = 1; i < argc; i++)
+    {
+        if (i) fputc(' ', csvfp);
+        fputs(argv[i], csvfp);
+    }
+
+    // current date and time
+    time_t now;
+    struct tm* timeinfo;
+    time(&now);
+    timeinfo = localtime(&now);
+    char buffer[200];
+    strftime(buffer, 128, "%c", timeinfo);
+    fprintf(csvfp, ", %s, ", buffer);
+
+    // elapsed time, fps, bitrate
+    fprintf(csvfp, "%.2f, %.2f, %.2f,",
+        stats.elapsedEncodeTime, stats.encodedPictureCount / stats.elapsedEncodeTime, stats.bitrate);
+
+    if (param.bEnablePsnr)
+        fprintf(csvfp, " %.3lf, %.3lf, %.3lf, %.3lf,",
+        stats.globalPsnrY / stats.encodedPictureCount, stats.globalPsnrU / stats.encodedPictureCount,
+        stats.globalPsnrV / stats.encodedPictureCount, stats.globalPsnr);
+    else
+        fprintf(csvfp, " -, -, -, -,");
+    if (param.bEnableSsim)
+        fprintf(csvfp, " %.6f, %6.3f,", stats.globalSsim, x265_ssim2dB(stats.globalSsim));
+    else
+        fprintf(csvfp, " -, -,");
+
+    if (stats.statsI.numPics)
+    {
+        fprintf(csvfp, " %-6u, %2.2lf, %-8.2lf,", stats.statsI.numPics, stats.statsI.avgQp, stats.statsI.bitrate);
+        if (param.bEnablePsnr)
+            fprintf(csvfp, " %.3lf, %.3lf, %.3lf,", stats.statsI.psnrY, stats.statsI.psnrU, stats.statsI.psnrV);
+        else
+            fprintf(csvfp, " -, -, -,");
+        if (param.bEnableSsim)
+            fprintf(csvfp, " %.3lf,", stats.statsI.ssim);
+        else
+            fprintf(csvfp, " -,");
+    }
+    else
+        fprintf(csvfp, " -, -, -, -, -, -, -,");
+
+    if (stats.statsP.numPics)
+    {
+        fprintf(csvfp, " %-6u, %2.2lf, %-8.2lf,", stats.statsP.numPics, stats.statsP.avgQp, stats.statsP.bitrate);
+        if (param.bEnablePsnr)
+            fprintf(csvfp, " %.3lf, %.3lf, %.3lf,", stats.statsP.psnrY, stats.statsP.psnrU, stats.statsP.psnrV);
+        else
+            fprintf(csvfp, " -, -, -,");
+        if (param.bEnableSsim)
+            fprintf(csvfp, " %.3lf,", stats.statsP.ssim);
+        else
+            fprintf(csvfp, " -,");
+    }
+    else
+        fprintf(csvfp, " -, -, -, -, -, -, -,");
+
+    if (stats.statsB.numPics)
+    {
+        fprintf(csvfp, " %-6u, %2.2lf, %-8.2lf,", stats.statsB.numPics, stats.statsB.avgQp, stats.statsB.bitrate);
+        if (param.bEnablePsnr)
+            fprintf(csvfp, " %.3lf, %.3lf, %.3lf,", stats.statsB.psnrY, stats.statsB.psnrU, stats.statsB.psnrV);
+        else
+            fprintf(csvfp, " -, -, -,");
+        if (param.bEnableSsim)
+            fprintf(csvfp, " %.3lf,", stats.statsB.ssim);
+        else
+            fprintf(csvfp, " -,");
+    }
+    else
+        fprintf(csvfp, " -, -, -, -, -, -, -,");
+
+    fprintf(csvfp, " %-6u, %-6u, %s\n", stats.maxCLL, stats.maxFALL, api.version_str);
+}
+
+/* The dithering algorithm is based on Sierra-2-4A error diffusion. */
+static void ditherPlane(pixel *dst, int dstStride, uint16_t *src, int srcStride,
+                        int width, int height, int16_t *errors, int bitDepth)
+{
+    const int lShift = 16 - bitDepth;
+    const int rShift = 16 - bitDepth + 2;
+    const int half = (1 << (16 - bitDepth + 1));
+    const int pixelMax = (1 << bitDepth) - 1;
+
+    memset(errors, 0, (width + 1) * sizeof(int16_t));
+    int pitch = 1;
+    for (int y = 0; y < height; y++, src += srcStride, dst += dstStride)
+    {
+        int16_t err = 0;
+        for (int x = 0; x < width; x++)
+        {
+            err = err * 2 + errors[x] + errors[x + 1];
+            dst[x * pitch] = (pixel)x265_clip3(0, pixelMax, ((src[x * 1] << 2) + err + half) >> rShift);
+            errors[x] = err = src[x * pitch] - (dst[x * pitch] << lShift);
+        }
+    }
+}
+
+void x265_dither_image(const x265_api& api, x265_picture& picIn, int picWidth, int picHeight, int16_t *errorBuf, int bitDepth)
+{
+    if (sizeof(x265_picture) != api.sizeof_picture)
+    {
+        fprintf(stderr, "extras [error]: structure size skew, unable to dither\n");
+        return;
+    }
+
+    if (picIn.bitDepth <= 8)
+    {
+        fprintf(stderr, "extras [error]: dither support enabled only for input bitdepth > 8\n");
+        return;
+    }
+
+    /* This portion of code is from readFrame in x264. */
+    for (int i = 0; i < x265_cli_csps[picIn.colorSpace].planes; i++)
+    {
+        if ((picIn.bitDepth & 7) && (picIn.bitDepth != 16))
+        {
+            /* upconvert non 16bit high depth planes to 16bit */
+            uint16_t *plane = (uint16_t*)picIn.planes[i];
+            uint32_t pixelCount = x265_picturePlaneSize(picIn.colorSpace, picWidth, picHeight, i);
+            int lShift = 16 - picIn.bitDepth;
+
+            /* This loop assumes width is equal to stride which
+             * happens to be true for file reader outputs */
+            for (uint32_t j = 0; j < pixelCount; j++)
+                plane[j] = plane[j] << lShift;
+        }
+    }
+
+    for (int i = 0; i < x265_cli_csps[picIn.colorSpace].planes; i++)
+    {
+        int height = (int)(picHeight >> x265_cli_csps[picIn.colorSpace].height[i]);
+        int width = (int)(picWidth >> x265_cli_csps[picIn.colorSpace].width[i]);
+
+        ditherPlane(((pixel*)picIn.planes[i]), picIn.stride[i] / sizeof(pixel), ((uint16_t*)picIn.planes[i]),
+                    picIn.stride[i] / 2, width, height, errorBuf, bitDepth);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265-extras.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,66 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_EXTRAS_H
+#define X265_EXTRAS_H 1
+
+#include "x265.h"
+
+#include <stdio.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if _WIN32
+#define LIBAPI __declspec(dllexport)
+#else
+#define LIBAPI
+#endif
+
+/* Open a CSV log file. On success it returns a file handle which must be passed
+ * to x265_csvlog_frame() and/or x265_csvlog_encode(). The file handle must be
+ * closed by the caller using fclose(). If level is 0, then no frame logging
+ * header is written to the file. This function will return NULL if it is unable
+ * to open the file for write or if it detects a structure size skew */
+LIBAPI FILE* x265_csvlog_open(const x265_api& api, const x265_param& param, const char* fname, int level);
+
+/* Log frame statistics to the CSV file handle. level should have been non-zero
+ * in the call to x265_csvlog_open() if this function is called. */
+LIBAPI void x265_csvlog_frame(FILE* csvfp, const x265_param& param, const x265_picture& pic, int level);
+
+/* Log final encode statistics to the CSV file handle. 'argc' and 'argv' are
+ * intended to be command line arguments passed to the encoder. Encode
+ * statistics should be queried from the encoder just prior to closing it. */
+LIBAPI void x265_csvlog_encode(FILE* csvfp, const x265_api& api, const x265_param& param, const x265_stats& stats, int level, int argc, char** argv);
+
+/* In-place downshift from a bit-depth greater than 8 to a bit-depth of 8, using
+ * the residual bits to dither each row. */
+LIBAPI void x265_dither_image(const x265_api& api, x265_picture&, int picWidth, int picHeight, int16_t *errorBuf, int bitDepth);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265.cpp	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,754 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#if _MSC_VER
+#pragma warning(disable: 4127) // conditional expression is constant, yes I know
+#endif
+
+#include "x265.h"
+#include "x265-extras.h"
+#include "x265cli.h"
+
+#include "common.h"
+#include "input/input.h"
+#include "output/output.h"
+#include "output/reconplay.h"
+
+#include "param.h"
+#include "cpu.h"
+
+#if HAVE_VLD
+/* Visual Leak Detector */
+#include <vld.h>
+#endif
+
+#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <string>
+#include <ostream>
+#include <fstream>
+#include <queue>
+
+#define CONSOLE_TITLE_SIZE 200
+#ifdef _WIN32
+#include <windows.h>
+static char orgConsoleTitle[CONSOLE_TITLE_SIZE] = "";
+#else
+#define GetConsoleTitle(t, n)
+#define SetConsoleTitle(t)
+#define SetThreadExecutionState(es)
+#endif
+
+using namespace X265_NS;
+
+/* Ctrl-C handler */
+static volatile sig_atomic_t b_ctrl_c /* = 0 */;
+static void sigint_handler(int)
+{
+    b_ctrl_c = 1;
+}
+
+struct CLIOptions
+{
+    InputFile* input;
+    ReconFile* recon;
+    OutputFile* output;
+    FILE*       qpfile;
+    FILE*       csvfpt;
+    const char* csvfn;
+    const char* reconPlayCmd;
+    const x265_api* api;
+    x265_param* param;
+    bool bProgress;
+    bool bForceY4m;
+    bool bDither;
+    int csvLogLevel;
+    uint32_t seek;              // number of frames to skip from the beginning
+    uint32_t framesToBeEncoded; // number of frames to encode
+    uint64_t totalbytes;
+    int64_t startTime;
+    int64_t prevUpdateTime;
+
+    /* in microseconds */
+    static const int UPDATE_INTERVAL = 250000;
+
+    CLIOptions()
+    {
+        input = NULL;
+        recon = NULL;
+        output = NULL;
+        qpfile = NULL;
+        csvfpt = NULL;
+        csvfn = NULL;
+        reconPlayCmd = NULL;
+        api = NULL;
+        param = NULL;
+        framesToBeEncoded = seek = 0;
+        totalbytes = 0;
+        bProgress = true;
+        bForceY4m = false;
+        startTime = x265_mdate();
+        prevUpdateTime = 0;
+        bDither = false;
+        csvLogLevel = 0;
+    }
+
+    void destroy();
+    void printStatus(uint32_t frameNum);
+    bool parse(int argc, char **argv);
+    bool parseQPFile(x265_picture &pic_org);
+};
+
+void CLIOptions::destroy()
+{
+    if (input)
+        input->release();
+    input = NULL;
+    if (recon)
+        recon->release();
+    recon = NULL;
+    if (qpfile)
+        fclose(qpfile);
+    qpfile = NULL;
+    if (csvfpt)
+        fclose(csvfpt);
+    csvfpt = NULL;
+    if (output)
+        output->release();
+    output = NULL;
+}
+
+void CLIOptions::printStatus(uint32_t frameNum)
+{
+    char buf[200];
+    int64_t time = x265_mdate();
+
+    if (!bProgress || !frameNum || (prevUpdateTime && time - prevUpdateTime < UPDATE_INTERVAL))
+        return;
+
+    int64_t elapsed = time - startTime;
+    double fps = elapsed > 0 ? frameNum * 1000000. / elapsed : 0;
+    float bitrate = 0.008f * totalbytes * (param->fpsNum / param->fpsDenom) / ((float)frameNum);
+    if (framesToBeEncoded)
+    {
+        int eta = (int)(elapsed * (framesToBeEncoded - frameNum) / ((int64_t)frameNum * 1000000));
+        sprintf(buf, "x265 [%.1f%%] %d/%d frames, %.2f fps, %.2f kb/s, eta %d:%02d:%02d",
+                100. * frameNum / framesToBeEncoded, frameNum, framesToBeEncoded, fps, bitrate,
+                eta / 3600, (eta / 60) % 60, eta % 60);
+    }
+    else
+        sprintf(buf, "x265 %d frames: %.2f fps, %.2f kb/s", frameNum, fps, bitrate);
+
+    fprintf(stderr, "%s  \r", buf + 5);
+    SetConsoleTitle(buf);
+    fflush(stderr); // needed in windows
+    prevUpdateTime = time;
+}
+
+bool CLIOptions::parse(int argc, char **argv)
+{
+    bool bError = false;
+    int bShowHelp = false;
+    int inputBitDepth = 8;
+    int outputBitDepth = 0;
+    int reconFileBitDepth = 0;
+    const char *inputfn = NULL;
+    const char *reconfn = NULL;
+    const char *outputfn = NULL;
+    const char *preset = NULL;
+    const char *tune = NULL;
+    const char *profile = NULL;
+
+    if (argc <= 1)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "No input file. Run x265 --help for a list of options.\n");
+        return true;
+    }
+
+    /* Presets are applied before all other options. */
+    for (optind = 0;; )
+    {
+        int c = getopt_long(argc, argv, short_options, long_options, NULL);
+        if (c == -1)
+            break;
+        else if (c == 'p')
+            preset = optarg;
+        else if (c == 't')
+            tune = optarg;
+        else if (c == 'D')
+            outputBitDepth = atoi(optarg);
+        else if (c == 'P')
+            profile = optarg;
+        else if (c == '?')
+            bShowHelp = true;
+    }
+
+    if (!outputBitDepth && profile)
+    {
+        /* try to derive the output bit depth from the requested profile */
+        if (strstr(profile, "10"))
+            outputBitDepth = 10;
+        else if (strstr(profile, "12"))
+            outputBitDepth = 12;
+        else
+            outputBitDepth = 8;
+    }
+
+    api = x265_api_get(outputBitDepth);
+    if (!api)
+    {
+        x265_log(NULL, X265_LOG_WARNING, "falling back to default bit-depth\n");
+        api = x265_api_get(0);
+    }
+
+    param = api->param_alloc();
+    if (!param)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "param alloc failed\n");
+        return true;
+    }
+
+    if (api->param_default_preset(param, preset, tune) < 0)
+    {
+        x265_log(NULL, X265_LOG_ERROR, "preset or tune unrecognized\n");
+        return true;
+    }
+
+    if (bShowHelp)
+    {
+        printVersion(param, api);
+        showHelp(param);
+    }
+
+    for (optind = 0;; )
+    {
+        int long_options_index = -1;
+        int c = getopt_long(argc, argv, short_options, long_options, &long_options_index);
+        if (c == -1)
+            break;
+
+        switch (c)
+        {
+        case 'h':
+            printVersion(param, api);
+            showHelp(param);
+            break;
+
+        case 'V':
+            printVersion(param, api);
+            x265_report_simd(param);
+            exit(0);
+
+        default:
+            if (long_options_index < 0 && c > 0)
+            {
+                for (size_t i = 0; i < sizeof(long_options) / sizeof(long_options[0]); i++)
+                {
+                    if (long_options[i].val == c)
+                    {
+                        long_options_index = (int)i;
+                        break;
+                    }
+                }
+
+                if (long_options_index < 0)
+                {
+                    /* getopt_long might have already printed an error message */
+                    if (c != 63)
+                        x265_log(NULL, X265_LOG_WARNING, "internal error: short option '%c' has no long option\n", c);
+                    return true;
+                }
+            }
+            if (long_options_index < 0)
+            {
+                x265_log(NULL, X265_LOG_WARNING, "short option '%c' unrecognized\n", c);
+                return true;
+            }
+#define OPT(longname) \
+    else if (!strcmp(long_options[long_options_index].name, longname))
+#define OPT2(name1, name2) \
+    else if (!strcmp(long_options[long_options_index].name, name1) || \
+             !strcmp(long_options[long_options_index].name, name2))
+
+            if (0) ;
+            OPT2("frame-skip", "seek") this->seek = (uint32_t)x265_atoi(optarg, bError);
+            OPT("frames") this->framesToBeEncoded = (uint32_t)x265_atoi(optarg, bError);
+            OPT("csv") this->csvfn = optarg;
+            OPT("csv-log-level") this->csvLogLevel = x265_atoi(optarg, bError);
+            OPT("no-progress") this->bProgress = false;
+            OPT("output") outputfn = optarg;
+            OPT("input") inputfn = optarg;
+            OPT("recon") reconfn = optarg;
+            OPT("input-depth") inputBitDepth = (uint32_t)x265_atoi(optarg, bError);
+            OPT("dither") this->bDither = true;
+            OPT("recon-depth") reconFileBitDepth = (uint32_t)x265_atoi(optarg, bError);
+            OPT("y4m") this->bForceY4m = true;
+            OPT("profile") /* handled above */;
+            OPT("preset")  /* handled above */;
+            OPT("tune")    /* handled above */;
+            OPT("output-depth")   /* handled above */;
+            OPT("recon-y4m-exec") reconPlayCmd = optarg;
+            OPT("qpfile")
+            {
+                this->qpfile = fopen(optarg, "rb");
+                if (!this->qpfile)
+                {
+                    x265_log(param, X265_LOG_ERROR, "%s qpfile not found or error in opening qp file\n", optarg);
+                    return false;
+                }
+            }
+            else
+                bError |= !!api->param_parse(param, long_options[long_options_index].name, optarg);
+
+            if (bError)
+            {
+                const char *name = long_options_index > 0 ? long_options[long_options_index].name : argv[optind - 2];
+                x265_log(NULL, X265_LOG_ERROR, "invalid argument: %s = %s\n", name, optarg);
+                return true;
+            }
+#undef OPT
+        }
+    }
+
+    if (optind < argc && !inputfn)
+        inputfn = argv[optind++];
+    if (optind < argc && !outputfn)
+        outputfn = argv[optind++];
+    if (optind < argc)
+    {
+        x265_log(param, X265_LOG_WARNING, "extra unused command arguments given <%s>\n", argv[optind]);
+        return true;
+    }
+
+    if (argc <= 1)
+    {
+        api->param_default(param);
+        printVersion(param, api);
+        showHelp(param);
+    }
+
+    if (!inputfn || !outputfn)
+    {
+        x265_log(param, X265_LOG_ERROR, "input or output file not specified, try --help for help\n");
+        return true;
+    }
+
+    if (param->internalBitDepth != api->bit_depth)
+    {
+        x265_log(param, X265_LOG_ERROR, "Only bit depths of %d are supported in this build\n", api->bit_depth);
+        return true;
+    }
+
+    InputFileInfo info;
+    info.filename = inputfn;
+    info.depth = inputBitDepth;
+    info.csp = param->internalCsp;
+    info.width = param->sourceWidth;
+    info.height = param->sourceHeight;
+    info.fpsNum = param->fpsNum;
+    info.fpsDenom = param->fpsDenom;
+    info.sarWidth = param->vui.sarWidth;
+    info.sarHeight = param->vui.sarHeight;
+    info.skipFrames = seek;
+    info.frameCount = 0;
+    getParamAspectRatio(param, info.sarWidth, info.sarHeight);
+
+    this->input = InputFile::open(info, this->bForceY4m);
+    if (!this->input || this->input->isFail())
+    {
+        x265_log(param, X265_LOG_ERROR, "unable to open input file <%s>\n", inputfn);
+        return true;
+    }
+
+    if (info.depth < 8 || info.depth > 16)
+    {
+        x265_log(param, X265_LOG_ERROR, "Input bit depth (%d) must be between 8 and 16\n", inputBitDepth);
+        return true;
+    }
+
+    /* Unconditionally accept height/width/csp from file info */
+    param->sourceWidth = info.width;
+    param->sourceHeight = info.height;
+    param->internalCsp = info.csp;
+
+    /* Accept fps and sar from file info if not specified by user */
+    if (param->fpsDenom == 0 || param->fpsNum == 0)
+    {
+        param->fpsDenom = info.fpsDenom;
+        param->fpsNum = info.fpsNum;
+    }
+    if (!param->vui.aspectRatioIdc && info.sarWidth && info.sarHeight)
+        setParamAspectRatio(param, info.sarWidth, info.sarHeight);
+    if (this->framesToBeEncoded == 0 && info.frameCount > (int)seek)
+        this->framesToBeEncoded = info.frameCount - seek;
+    param->totalFrames = this->framesToBeEncoded;
+
+    /* Force CFR until we have support for VFR */
+    info.timebaseNum = param->fpsDenom;
+    info.timebaseDenom = param->fpsNum;
+
+    if (api->param_apply_profile(param, profile))
+        return true;
+
+    if (param->logLevel >= X265_LOG_INFO)
+    {
+        char buf[128];
+        int p = sprintf(buf, "%dx%d fps %d/%d %sp%d", param->sourceWidth, param->sourceHeight,
+                        param->fpsNum, param->fpsDenom, x265_source_csp_names[param->internalCsp], info.depth);
+
+        int width, height;
+        getParamAspectRatio(param, width, height);
+        if (width && height)
+            p += sprintf(buf + p, " sar %d:%d", width, height);
+
+        if (framesToBeEncoded <= 0 || info.frameCount <= 0)
+            strcpy(buf + p, " unknown frame count");
+        else
+            sprintf(buf + p, " frames %u - %d of %d", this->seek, this->seek + this->framesToBeEncoded - 1, info.frameCount);
+
+        general_log(param, input->getName(), X265_LOG_INFO, "%s\n", buf);
+    }
+
+    this->input->startReader();
+
+    if (reconfn)
+    {
+        if (reconFileBitDepth == 0)
+            reconFileBitDepth = param->internalBitDepth;
+        this->recon = ReconFile::open(reconfn, param->sourceWidth, param->sourceHeight, reconFileBitDepth,
+                                      param->fpsNum, param->fpsDenom, param->internalCsp);
+        if (this->recon->isFail())
+        {
+            x265_log(param, X265_LOG_WARNING, "unable to write reconstructed outputs file\n");
+            this->recon->release();
+            this->recon = 0;
+        }
+        else
+            general_log(param, this->recon->getName(), X265_LOG_INFO,
+                    "reconstructed images %dx%d fps %d/%d %s\n",
+                    param->sourceWidth, param->sourceHeight, param->fpsNum, param->fpsDenom,
+                    x265_source_csp_names[param->internalCsp]);
+    }
+
+    this->output = OutputFile::open(outputfn, info);
+    if (this->output->isFail())
+    {
+        x265_log(param, X265_LOG_ERROR, "failed to open output file <%s> for writing\n", outputfn);
+        return true;
+    }
+    general_log(param, this->output->getName(), X265_LOG_INFO, "output file: %s\n", outputfn);
+    return false;
+}
+
+bool CLIOptions::parseQPFile(x265_picture &pic_org)
+{
+    int32_t num = -1, qp, ret;
+    char type;
+    uint32_t filePos;
+    pic_org.forceqp = 0;
+    pic_org.sliceType = X265_TYPE_AUTO;
+    while (num < pic_org.poc)
+    {
+        filePos = ftell(qpfile);
+        qp = -1;
+        ret = fscanf(qpfile, "%d %c%*[ \t]%d\n", &num, &type, &qp);
+
+        if (num > pic_org.poc || ret == EOF)
+        {
+            fseek(qpfile, filePos, SEEK_SET);
+            break;
+        }
+        if (num < pic_org.poc && ret >= 2)
+            continue;
+        if (ret == 3 && qp >= 0)
+            pic_org.forceqp = qp + 1;
+        if (type == 'I') pic_org.sliceType = X265_TYPE_IDR;
+        else if (type == 'i') pic_org.sliceType = X265_TYPE_I;
+        else if (type == 'P') pic_org.sliceType = X265_TYPE_P;
+        else if (type == 'B') pic_org.sliceType = X265_TYPE_BREF;
+        else if (type == 'b') pic_org.sliceType = X265_TYPE_B;
+        else ret = 0;
+        if (ret < 2 || qp < -1 || qp > 51)
+            return 0;
+    }
+    return 1;
+}
+
+/* CLI return codes:
+ *
+ * 0 - encode successful
+ * 1 - unable to parse command line
+ * 2 - unable to open encoder
+ * 3 - unable to generate stream headers
+ * 4 - encoder abort
+ * 5 - unable to open csv file */
+
+int main(int argc, char **argv)
+{
+#if HAVE_VLD
+    // This uses Microsoft's proprietary WCHAR type, but this only builds on Windows to start with
+    VLDSetReportOptions(VLD_OPT_REPORT_TO_DEBUGGER | VLD_OPT_REPORT_TO_FILE, L"x265_leaks.txt");
+#endif
+    PROFILE_INIT();
+    THREAD_NAME("API", 0);
+
+    GetConsoleTitle(orgConsoleTitle, CONSOLE_TITLE_SIZE);
+    SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_AWAYMODE_REQUIRED);
+
+    ReconPlay* reconPlay = NULL;
+    CLIOptions cliopt;
+
+    if (cliopt.parse(argc, argv))
+    {
+        cliopt.destroy();
+        if (cliopt.api)
+            cliopt.api->param_free(cliopt.param);
+        exit(1);
+    }
+
+    x265_param* param = cliopt.param;
+    const x265_api* api = cliopt.api;
+
+    /* This allows muxers to modify bitstream format */
+    cliopt.output->setParam(param);
+
+    if (cliopt.reconPlayCmd)
+        reconPlay = new ReconPlay(cliopt.reconPlayCmd, *param);
+
+    /* note: we could try to acquire a different libx265 API here based on
+     * the profile found during option parsing, but it must be done before
+     * opening an encoder */
+
+    x265_encoder *encoder = api->encoder_open(param);
+    if (!encoder)
+    {
+        x265_log(param, X265_LOG_ERROR, "failed to open encoder\n");
+        cliopt.destroy();
+        api->param_free(param);
+        api->cleanup();
+        exit(2);
+    }
+
+    /* get the encoder parameters post-initialization */
+    api->encoder_parameters(encoder, param);
+
+    if (cliopt.csvfn)
+    {
+        cliopt.csvfpt = x265_csvlog_open(*api, *param, cliopt.csvfn, cliopt.csvLogLevel);
+        if (!cliopt.csvfpt)
+        {
+            x265_log(param, X265_LOG_ERROR, "Unable to open CSV log file <%s>, aborting\n", cliopt.csvfn);
+            cliopt.destroy();
+            if (cliopt.api)
+                cliopt.api->param_free(cliopt.param);
+            exit(5);
+        }
+    }
+
+    /* Control-C handler */
+    if (signal(SIGINT, sigint_handler) == SIG_ERR)
+        x265_log(param, X265_LOG_ERROR, "Unable to register CTRL+C handler: %s\n", strerror(errno));
+
+    x265_picture pic_orig, pic_out;
+    x265_picture *pic_in = &pic_orig;
+    /* Allocate recon picture if analysisMode is enabled */
+    std::priority_queue<int64_t>* pts_queue = cliopt.output->needPTS() ? new std::priority_queue<int64_t>() : NULL;
+    x265_picture *pic_recon = (cliopt.recon || !!param->analysisMode || pts_queue || reconPlay || cliopt.csvLogLevel) ? &pic_out : NULL;
+    uint32_t inFrameCount = 0;
+    uint32_t outFrameCount = 0;
+    x265_nal *p_nal;
+    x265_stats stats;
+    uint32_t nal;
+    int16_t *errorBuf = NULL;
+    int ret = 0;
+
+    if (!param->bRepeatHeaders)
+    {
+        if (api->encoder_headers(encoder, &p_nal, &nal) < 0)
+        {
+            x265_log(param, X265_LOG_ERROR, "Failure generating stream headers\n");
+            ret = 3;
+            goto fail;
+        }
+        else
+            cliopt.totalbytes += cliopt.output->writeHeaders(p_nal, nal);
+    }
+
+    api->picture_init(param, pic_in);
+
+    if (cliopt.bDither)
+    {
+        errorBuf = X265_MALLOC(int16_t, param->sourceWidth + 1);
+        if (errorBuf)
+            memset(errorBuf, 0, (param->sourceWidth + 1) * sizeof(int16_t));
+        else
+            cliopt.bDither = false;
+    }
+
+    // main encoder loop
+    while (pic_in && !b_ctrl_c)
+    {
+        pic_orig.poc = inFrameCount;
+        if (cliopt.qpfile)
+        {
+            if (!cliopt.parseQPFile(pic_orig))
+            {
+                x265_log(NULL, X265_LOG_ERROR, "can't parse qpfile for frame %d\n", pic_in->poc);
+                fclose(cliopt.qpfile);
+                cliopt.qpfile = NULL;
+            }
+        }
+
+        if (cliopt.framesToBeEncoded && inFrameCount >= cliopt.framesToBeEncoded)
+            pic_in = NULL;
+        else if (cliopt.input->readPicture(pic_orig))
+            inFrameCount++;
+        else
+            pic_in = NULL;
+
+        if (pic_in)
+        {
+            if (pic_in->bitDepth > param->internalBitDepth && cliopt.bDither)
+            {
+                x265_dither_image(*api, *pic_in, param->sourceWidth, param->sourceHeight, errorBuf, param->internalBitDepth);
+                pic_in->bitDepth = param->internalBitDepth;
+            }
+            /* Overwrite PTS */
+            pic_in->pts = pic_in->poc;
+        }
+
+        int numEncoded = api->encoder_encode(encoder, &p_nal, &nal, pic_in, pic_recon);
+        if (numEncoded < 0)
+        {
+            b_ctrl_c = 1;
+            ret = 4;
+            break;
+        }
+
+        if (reconPlay && numEncoded)
+            reconPlay->writePicture(*pic_recon);
+
+        outFrameCount += numEncoded;
+
+        if (numEncoded && pic_recon && cliopt.recon)
+            cliopt.recon->writePicture(pic_out);
+        if (nal)
+        {
+            cliopt.totalbytes += cliopt.output->writeFrame(p_nal, nal, pic_out);
+            if (pts_queue)
+            {
+                pts_queue->push(-pic_out.pts);
+                if (pts_queue->size() > 2)
+                    pts_queue->pop();
+            }
+        }
+
+        cliopt.printStatus(outFrameCount);
+        if (numEncoded && cliopt.csvLogLevel)
+            x265_csvlog_frame(cliopt.csvfpt, *param, *pic_recon, cliopt.csvLogLevel);
+    }
+
+    /* Flush the encoder */
+    while (!b_ctrl_c)
+    {
+        int numEncoded = api->encoder_encode(encoder, &p_nal, &nal, NULL, pic_recon);
+        if (numEncoded < 0)
+        {
+            ret = 4;
+            break;
+        }
+
+        if (reconPlay && numEncoded)
+            reconPlay->writePicture(*pic_recon);
+
+        outFrameCount += numEncoded;
+        if (numEncoded && pic_recon && cliopt.recon)
+            cliopt.recon->writePicture(pic_out);
+        if (nal)
+        {
+            cliopt.totalbytes += cliopt.output->writeFrame(p_nal, nal, pic_out);
+            if (pts_queue)
+            {
+                pts_queue->push(-pic_out.pts);
+                if (pts_queue->size() > 2)
+                    pts_queue->pop();
+            }
+        }
+
+        cliopt.printStatus(outFrameCount);
+        if (numEncoded && cliopt.csvLogLevel)
+            x265_csvlog_frame(cliopt.csvfpt, *param, *pic_recon, cliopt.csvLogLevel);
+
+        if (!numEncoded)
+            break;
+    }
+
+    /* clear progress report */
+    if (cliopt.bProgress)
+        fprintf(stderr, "%*s\r", 80, " ");
+
+fail:
+
+    delete reconPlay;
+
+    api->encoder_get_stats(encoder, &stats, sizeof(stats));
+    if (cliopt.csvfpt && !b_ctrl_c)
+        x265_csvlog_encode(cliopt.csvfpt, *api, *param, stats, cliopt.csvLogLevel, argc, argv);
+    api->encoder_close(encoder);
+
+    int64_t second_largest_pts = 0;
+    int64_t largest_pts = 0;
+    if (pts_queue && pts_queue->size() >= 2)
+    {
+        second_largest_pts = -pts_queue->top();
+        pts_queue->pop();
+        largest_pts = -pts_queue->top();
+        pts_queue->pop();
+        delete pts_queue;
+        pts_queue = NULL;
+    }
+    cliopt.output->closeFile(largest_pts, second_largest_pts);
+
+    if (b_ctrl_c)
+        general_log(param, NULL, X265_LOG_INFO, "aborted at input frame %d, output frame %d\n",
+                    cliopt.seek + inFrameCount, stats.encodedPictureCount);
+
+    api->cleanup(); /* Free library singletons */
+
+    cliopt.destroy();
+
+    api->param_free(param);
+
+    X265_FREE(errorBuf);
+
+    SetConsoleTitle(orgConsoleTitle);
+    SetThreadExecutionState(ES_CONTINUOUS);
+
+#if HAVE_VLD
+    assert(VLDReportLeaks() == 0);
+#endif
+
+    return ret;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265.def.in	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,24 @@
+EXPORTS
+x265_encoder_open_${X265_BUILD}
+x265_param_default
+x265_param_default_preset
+x265_param_parse
+x265_param_alloc
+x265_param_free
+x265_picture_init
+x265_picture_alloc
+x265_picture_free
+x265_param_apply_profile
+x265_max_bit_depth
+x265_version_str
+x265_build_info_str
+x265_encoder_headers
+x265_encoder_parameters
+x265_encoder_reconfig
+x265_encoder_encode
+x265_encoder_get_stats
+x265_encoder_log
+x265_encoder_close
+x265_cleanup
+x265_api_get_${X265_BUILD}
+x265_api_query
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,1488 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_H
+#define X265_H
+
+#include <stdint.h>
+#include "x265_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* x265_encoder:
+ *      opaque handler for encoder */
+typedef struct x265_encoder x265_encoder;
+
+/* Application developers planning to link against a shared library version of
+ * libx265 from a Microsoft Visual Studio or similar development environment
+ * will need to define X265_API_IMPORTS before including this header.
+ * This clause does not apply to MinGW, similar development environments, or non
+ * Windows platforms. */
+#ifdef X265_API_IMPORTS
+#define X265_API __declspec(dllimport)
+#else
+#define X265_API
+#endif
+
+typedef enum
+{
+    NAL_UNIT_CODED_SLICE_TRAIL_N = 0,
+    NAL_UNIT_CODED_SLICE_TRAIL_R,
+    NAL_UNIT_CODED_SLICE_TSA_N,
+    NAL_UNIT_CODED_SLICE_TLA_R,
+    NAL_UNIT_CODED_SLICE_STSA_N,
+    NAL_UNIT_CODED_SLICE_STSA_R,
+    NAL_UNIT_CODED_SLICE_RADL_N,
+    NAL_UNIT_CODED_SLICE_RADL_R,
+    NAL_UNIT_CODED_SLICE_RASL_N,
+    NAL_UNIT_CODED_SLICE_RASL_R,
+    NAL_UNIT_CODED_SLICE_BLA_W_LP = 16,
+    NAL_UNIT_CODED_SLICE_BLA_W_RADL,
+    NAL_UNIT_CODED_SLICE_BLA_N_LP,
+    NAL_UNIT_CODED_SLICE_IDR_W_RADL,
+    NAL_UNIT_CODED_SLICE_IDR_N_LP,
+    NAL_UNIT_CODED_SLICE_CRA,
+    NAL_UNIT_VPS = 32,
+    NAL_UNIT_SPS,
+    NAL_UNIT_PPS,
+    NAL_UNIT_ACCESS_UNIT_DELIMITER,
+    NAL_UNIT_EOS,
+    NAL_UNIT_EOB,
+    NAL_UNIT_FILLER_DATA,
+    NAL_UNIT_PREFIX_SEI,
+    NAL_UNIT_SUFFIX_SEI,
+    NAL_UNIT_INVALID = 64,
+} NalUnitType;
+
+/* The data within the payload is already NAL-encapsulated; the type is merely
+ * in the struct for easy access by the calling application.  All data returned
+ * in an x265_nal, including the data in payload, is no longer valid after the
+ * next call to x265_encoder_encode.  Thus it must be used or copied before
+ * calling x265_encoder_encode again. */
+typedef struct x265_nal
+{
+    uint32_t type;        /* NalUnitType */
+    uint32_t sizeBytes;   /* size in bytes */
+    uint8_t* payload;
+} x265_nal;
+
+/* Stores all analysis data for a single frame */
+typedef struct x265_analysis_data
+{
+    void*            interData;
+    void*            intraData;
+    uint32_t         frameRecordSize;
+    uint32_t         poc;
+    uint32_t         sliceType;
+    uint32_t         numCUsInFrame;
+    uint32_t         numPartitions;
+} x265_analysis_data;
+
+/* cu statistics */
+typedef struct x265_cu_stats
+{
+    double      percentSkipCu[4];                // Percentage of skip cu in all depths
+    double      percentMergeCu[4];               // Percentage of merge cu in all depths
+    double      percentIntraDistribution[4][3];  // Percentage of DC, Planar, Angular intra modes in all depths
+    double      percentInterDistribution[4][3];  // Percentage of 2Nx2N inter, rect and amp in all depths
+    double      percentIntraNxN;                 // Percentage of 4x4 cu
+
+    /* All the above values will add up to 100%. */
+} x265_cu_stats;
+
+/* Frame level statistics */
+typedef struct x265_frame_stats
+{
+    double           qp;
+    double           rateFactor;
+    double           psnrY;
+    double           psnrU;
+    double           psnrV;
+    double           psnr;
+    double           ssim;
+    double           decideWaitTime;
+    double           row0WaitTime;
+    double           wallTime;
+    double           refWaitWallTime;
+    double           totalCTUTime;
+    double           stallTime;
+    double           avgWPP;
+    double           avgLumaDistortion;
+    double           avgChromaDistortion;
+    double           avgPsyEnergy;
+    double           avgResEnergy;
+    double           avgLumaLevel;
+    uint64_t         bits;
+    int              encoderOrder;
+    int              poc;
+    int              countRowBlocks;
+    int              list0POC[16];
+    int              list1POC[16];
+    uint16_t         maxLumaLevel;
+    char             sliceType;
+    int              bScenecut;
+    x265_cu_stats    cuStats;
+} x265_frame_stats;
+
+/* Used to pass pictures into the encoder, and to get picture data back out of
+ * the encoder.  The input and output semantics are different */
+typedef struct x265_picture
+{
+    /* presentation time stamp: user-specified, returned on output */
+    int64_t pts;
+
+    /* display time stamp: ignored on input, copied from reordered pts. Returned
+     * on output */
+    int64_t dts;
+
+    /* force quantizer for != X265_QP_AUTO */
+    /* The value provided on input is returned with the same picture (POC) on
+     * output */
+    void*   userData;
+
+    /* Must be specified on input pictures, the number of planes is determined
+     * by the colorSpace value */
+    void*   planes[3];
+
+    /* Stride is the number of bytes between row starts */
+    int     stride[3];
+
+    /* Must be specified on input pictures. x265_picture_init() will set it to
+     * the encoder's internal bit depth, but this field must describe the depth
+     * of the input pictures. Must be between 8 and 16. Values larger than 8
+     * imply 16bits per input sample. If input bit depth is larger than the
+     * internal bit depth, the encoder will down-shift pixels. Input samples
+     * larger than 8bits will be masked to internal bit depth. On output the
+     * bitDepth will be the internal encoder bit depth */
+    int     bitDepth;
+
+    /* Must be specified on input pictures: X265_TYPE_AUTO or other.
+     * x265_picture_init() sets this to auto, returned on output */
+    int     sliceType;
+
+    /* Ignored on input, set to picture count, returned on output */
+    int     poc;
+
+    /* Must be specified on input pictures: X265_CSP_I420 or other. It must
+     * match the internal color space of the encoder. x265_picture_init() will
+     * initialize this value to the internal color space */
+    int     colorSpace;
+
+    /* Force the slice base QP for this picture within the encoder. Set to 0
+     * to allow the encoder to determine base QP */
+    int     forceqp;
+
+    /* If param.analysisMode is X265_ANALYSIS_OFF this field is ignored on input
+     * and output. Else the user must call x265_alloc_analysis_data() to
+     * allocate analysis buffers for every picture passed to the encoder.
+     *
+     * On input when param.analysisMode is X265_ANALYSIS_LOAD and analysisData
+     * member pointers are valid, the encoder will use the data stored here to
+     * reduce encoder work.
+     *
+     * On output when param.analysisMode is X265_ANALYSIS_SAVE and analysisData
+     * member pointers are valid, the encoder will write output analysis into
+     * this data structure */
+    x265_analysis_data analysisData;
+
+    /* An array of quantizer offsets to be applied to this image during encoding.
+     * These are added on top of the decisions made by rateControl.
+     * Adaptive quantization must be enabled to use this feature. These quantizer 
+     * offsets should be given for each 16x16 block. Behavior if quant
+     * offsets differ between encoding passes is undefined. */
+    float            *quantOffsets;
+
+    /* Frame level statistics */
+    x265_frame_stats frameData;
+
+} x265_picture;
+
+typedef enum
+{
+    X265_DIA_SEARCH,
+    X265_HEX_SEARCH,
+    X265_UMH_SEARCH,
+    X265_STAR_SEARCH,
+    X265_FULL_SEARCH
+} X265_ME_METHODS;
+
+/* CPU flags */
+
+/* x86 */
+#define X265_CPU_CMOV            0x0000001
+#define X265_CPU_MMX             0x0000002
+#define X265_CPU_MMX2            0x0000004  /* MMX2 aka MMXEXT aka ISSE */
+#define X265_CPU_MMXEXT          X265_CPU_MMX2
+#define X265_CPU_SSE             0x0000008
+#define X265_CPU_SSE2            0x0000010
+#define X265_CPU_SSE3            0x0000020
+#define X265_CPU_SSSE3           0x0000040
+#define X265_CPU_SSE4            0x0000080  /* SSE4.1 */
+#define X265_CPU_SSE42           0x0000100  /* SSE4.2 */
+#define X265_CPU_LZCNT           0x0000200  /* Phenom support for "leading zero count" instruction. */
+#define X265_CPU_AVX             0x0000400  /* AVX support: requires OS support even if YMM registers aren't used. */
+#define X265_CPU_XOP             0x0000800  /* AMD XOP */
+#define X265_CPU_FMA4            0x0001000  /* AMD FMA4 */
+#define X265_CPU_AVX2            0x0002000  /* AVX2 */
+#define X265_CPU_FMA3            0x0004000  /* Intel FMA3 */
+#define X265_CPU_BMI1            0x0008000  /* BMI1 */
+#define X265_CPU_BMI2            0x0010000  /* BMI2 */
+/* x86 modifiers */
+#define X265_CPU_CACHELINE_32    0x0020000  /* avoid memory loads that span the border between two cachelines */
+#define X265_CPU_CACHELINE_64    0x0040000  /* 32/64 is the size of a cacheline in bytes */
+#define X265_CPU_SSE2_IS_SLOW    0x0080000  /* avoid most SSE2 functions on Athlon64 */
+#define X265_CPU_SSE2_IS_FAST    0x0100000  /* a few functions are only faster on Core2 and Phenom */
+#define X265_CPU_SLOW_SHUFFLE    0x0200000  /* The Conroe has a slow shuffle unit (relative to overall SSE performance) */
+#define X265_CPU_STACK_MOD4      0x0400000  /* if stack is only mod4 and not mod16 */
+#define X265_CPU_SLOW_CTZ        0x0800000  /* BSR/BSF x86 instructions are really slow on some CPUs */
+#define X265_CPU_SLOW_ATOM       0x1000000  /* The Atom is terrible: slow SSE unaligned loads, slow
+                                             * SIMD multiplies, slow SIMD variable shifts, slow pshufb,
+                                             * cacheline split penalties -- gather everything here that
+                                             * isn't shared by other CPUs to avoid making half a dozen
+                                             * new SLOW flags. */
+#define X265_CPU_SLOW_PSHUFB     0x2000000  /* such as on the Intel Atom */
+#define X265_CPU_SLOW_PALIGNR    0x4000000  /* such as on the AMD Bobcat */
+
+/* ARM */
+#define X265_CPU_ARMV6           0x0000001
+#define X265_CPU_NEON            0x0000002  /* ARM NEON */
+#define X265_CPU_FAST_NEON_MRC   0x0000004  /* Transfer from NEON to ARM register is fast (Cortex-A9) */
+
+#define X265_MAX_SUBPEL_LEVEL   7
+
+/* Log level */
+#define X265_LOG_NONE          (-1)
+#define X265_LOG_ERROR          0
+#define X265_LOG_WARNING        1
+#define X265_LOG_INFO           2
+#define X265_LOG_DEBUG          3
+#define X265_LOG_FULL           4
+
+#define X265_B_ADAPT_NONE       0
+#define X265_B_ADAPT_FAST       1
+#define X265_B_ADAPT_TRELLIS    2
+
+#define X265_REF_LIMIT_DEPTH    1
+#define X265_REF_LIMIT_CU       2
+
+#define X265_BFRAME_MAX         16
+#define X265_MAX_FRAME_THREADS  16
+
+#define X265_TYPE_AUTO          0x0000  /* Let x265 choose the right type */
+#define X265_TYPE_IDR           0x0001
+#define X265_TYPE_I             0x0002
+#define X265_TYPE_P             0x0003
+#define X265_TYPE_BREF          0x0004  /* Non-disposable B-frame */
+#define X265_TYPE_B             0x0005
+#define IS_X265_TYPE_I(x) ((x) == X265_TYPE_I || (x) == X265_TYPE_IDR)
+#define IS_X265_TYPE_B(x) ((x) == X265_TYPE_B || (x) == X265_TYPE_BREF)
+
+#define X265_QP_AUTO                 0
+
+#define X265_AQ_NONE                 0
+#define X265_AQ_VARIANCE             1
+#define X265_AQ_AUTO_VARIANCE        2
+#define X265_AQ_AUTO_VARIANCE_BIASED 3
+
+/* NOTE! For this release only X265_CSP_I420 and X265_CSP_I444 are supported */
+
+/* Supported internal color space types (according to semantics of chroma_format_idc) */
+#define X265_CSP_I400           0  /* yuv 4:0:0 planar */
+#define X265_CSP_I420           1  /* yuv 4:2:0 planar */
+#define X265_CSP_I422           2  /* yuv 4:2:2 planar */
+#define X265_CSP_I444           3  /* yuv 4:4:4 planar */
+#define X265_CSP_COUNT          4  /* Number of supported internal color spaces */
+
+/* These color spaces will eventually be supported as input pictures. The pictures will
+ * be converted to the appropriate planar color spaces at ingest */
+#define X265_CSP_NV12           4  /* yuv 4:2:0, with one y plane and one packed u+v */
+#define X265_CSP_NV16           5  /* yuv 4:2:2, with one y plane and one packed u+v */
+
+/* Interleaved color-spaces may eventually be supported as input pictures */
+#define X265_CSP_BGR            6  /* packed bgr 24bits   */
+#define X265_CSP_BGRA           7  /* packed bgr 32bits   */
+#define X265_CSP_RGB            8  /* packed rgb 24bits   */
+#define X265_CSP_MAX            9  /* end of list */
+
+#define X265_EXTENDED_SAR       255 /* aspect ratio explicitly specified as width:height */
+
+/* Analysis options */
+#define X265_ANALYSIS_OFF  0
+#define X265_ANALYSIS_SAVE 1
+#define X265_ANALYSIS_LOAD 2
+
+typedef struct x265_cli_csp
+{
+    int planes;
+    int width[3];
+    int height[3];
+} x265_cli_csp;
+
+static const x265_cli_csp x265_cli_csps[] =
+{
+    { 1, { 0, 0, 0 }, { 0, 0, 0 } }, /* i400 */
+    { 3, { 0, 1, 1 }, { 0, 1, 1 } }, /* i420 */
+    { 3, { 0, 1, 1 }, { 0, 0, 0 } }, /* i422 */
+    { 3, { 0, 0, 0 }, { 0, 0, 0 } }, /* i444 */
+    { 2, { 0, 0 },    { 0, 1 } },    /* nv12 */
+    { 2, { 0, 0 },    { 0, 0 } },    /* nv16 */
+};
+
+/* rate tolerance method */
+typedef enum
+{
+    X265_RC_ABR,
+    X265_RC_CQP,
+    X265_RC_CRF
+} X265_RC_METHODS;
+
+/* slice type statistics */
+typedef struct x265_sliceType_stats
+{
+    double        avgQp;
+    double        bitrate;
+    double        psnrY;
+    double        psnrU;
+    double        psnrV;
+    double        ssim;
+    uint32_t      numPics;
+} x265_sliceType_stats;
+
+/* Output statistics from encoder */
+typedef struct x265_stats
+{
+    double                globalPsnrY;
+    double                globalPsnrU;
+    double                globalPsnrV;
+    double                globalPsnr;
+    double                globalSsim;
+    double                elapsedEncodeTime;    /* wall time since encoder was opened */
+    double                elapsedVideoTime;     /* encoded picture count / frame rate */
+    double                bitrate;              /* accBits / elapsed video time */
+    uint64_t              accBits;              /* total bits output thus far */
+    uint32_t              encodedPictureCount;  /* number of output pictures thus far */
+    uint32_t              totalWPFrames;        /* number of uni-directional weighted frames used */
+    x265_sliceType_stats  statsI;               /* statistics of I slice */
+    x265_sliceType_stats  statsP;               /* statistics of P slice */
+    x265_sliceType_stats  statsB;               /* statistics of B slice */
+    uint16_t              maxCLL;               /* maximum content light level */
+    uint16_t              maxFALL;              /* maximum frame average light level */
+} x265_stats;
+
+/* String values accepted by x265_param_parse() (and CLI) for various parameters */
+static const char * const x265_motion_est_names[] = { "dia", "hex", "umh", "star", "full", 0 };
+static const char * const x265_source_csp_names[] = { "i400", "i420", "i422", "i444", "nv12", "nv16", 0 };
+static const char * const x265_video_format_names[] = { "component", "pal", "ntsc", "secam", "mac", "undef", 0 };
+static const char * const x265_fullrange_names[] = { "limited", "full", 0 };
+static const char * const x265_colorprim_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "film", "bt2020", 0 };
+static const char * const x265_transfer_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "linear", "log100",
+                                                    "log316", "iec61966-2-4", "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12",
+                                                    "smpte-st-2084", "smpte-st-428", "arib-std-b67", 0 };
+static const char * const x265_colmatrix_names[] = { "GBR", "bt709", "undef", "", "fcc", "bt470bg", "smpte170m", "smpte240m",
+                                                     "YCgCo", "bt2020nc", "bt2020c", 0 };
+static const char * const x265_sar_names[] = { "undef", "1:1", "12:11", "10:11", "16:11", "40:33", "24:11", "20:11",
+                                               "32:11", "80:33", "18:11", "15:11", "64:33", "160:99", "4:3", "3:2", "2:1", 0 };
+static const char * const x265_interlace_names[] = { "prog", "tff", "bff", 0 };
+static const char * const x265_analysis_names[] = { "off", "save", "load", 0 };
+
+/* Zones: override ratecontrol for specific sections of the video.
+ * If zones overlap, whichever comes later in the list takes precedence. */
+typedef struct x265_zone
+{
+    int   startFrame, endFrame; /* range of frame numbers */
+    int   bForceQp;             /* whether to use qp vs bitrate factor */
+    int   qp;
+    float bitrateFactor;
+} x265_zone;
+    
+/* x265 input parameters
+ *
+ * For version safety you may use x265_param_alloc/free() to manage the
+ * allocation of x265_param instances, and x265_param_parse() to assign values
+ * by name.  By never dereferencing param fields in your own code you can treat
+ * x265_param as an opaque data structure */
+typedef struct x265_param
+{
+    /* x265_param_default() will auto-detect this cpu capability bitmap.  it is
+     * recommended to not change this value unless you know the cpu detection is
+     * somehow flawed on your target hardware. The asm function tables are
+     * process global, the first encoder configures them for all encoders */
+    int       cpuid;
+
+    /*== Parallelism Features ==*/
+
+    /* Number of concurrently encoded frames between 1 and X265_MAX_FRAME_THREADS
+     * or 0 for auto-detection. By default x265 will use a number of frame
+     * threads empirically determined to be optimal for your CPU core count,
+     * between 2 and 6.  Using more than one frame thread causes motion search
+     * in the down direction to be clamped but otherwise encode behavior is
+     * unaffected. With CQP rate control the output bitstream is deterministic
+     * for all values of frameNumThreads greater than 1. All other forms of
+     * rate-control can be negatively impacted by increases to the number of
+     * frame threads because the extra concurrency adds uncertainty to the
+     * bitrate estimations. Frame parallelism is generally limited by the the
+     * is generally limited by the the number of CU rows
+     *
+     * When thread pools are used, each frame thread is assigned to a single
+     * pool and the frame thread itself is given the node affinity of its pool.
+     * But when no thread pools are used no node affinity is assigned. */
+    int       frameNumThreads;
+
+    /* Comma seperated list of threads per NUMA node. If "none", then no worker
+     * pools are created and only frame parallelism is possible. If NULL or ""
+     * (default) x265 will use all available threads on each NUMA node.
+     *
+     * '+'  is a special value indicating all cores detected on the node
+     * '*'  is a special value indicating all cores detected on the node and all
+     *      remaining nodes.
+     * '-'  is a special value indicating no cores on the node, same as '0'
+     *
+     * example strings for a 4-node system:
+     *   ""        - default, unspecified, all numa nodes are used for thread pools
+     *   "*"       - same as default
+     *   "none"    - no thread pools are created, only frame parallelism possible
+     *   "-"       - same as "none"
+     *   "10"      - allocate one pool, using up to 10 cores on node 0
+     *   "-,+"     - allocate one pool, using all cores on node 1
+     *   "+,-,+"   - allocate two pools, using all cores on nodes 0 and 2
+     *   "+,-,+,-" - allocate two pools, using all cores on nodes 0 and 2
+     *   "-,*"     - allocate three pools, using all cores on nodes 1, 2 and 3
+     *   "8,8,8,8" - allocate four pools with up to 8 threads in each pool
+     *
+     * The total number of threads will be determined by the number of threads
+     * assigned to all nodes. The worker threads will each be given affinity for
+     * their node, they will not be allowed to migrate between nodes, but they
+     * will be allowed to move between CPU cores within their node.
+     *
+     * If the three pool features: bEnableWavefront, bDistributeModeAnalysis and
+     * bDistributeMotionEstimation are all disabled, then numaPools is ignored
+     * and no thread pools are created.
+     *
+     * If "none" is specified, then all three of the thread pool features are
+     * implicitly disabled.
+     *
+     * Multiple thread pools will be allocated for any NUMA node with more than
+     * 64 logical CPU cores. But any given thread pool will always use at most
+     * one NUMA node.
+     *
+     * Frame encoders are distributed between the available thread pools, and
+     * the encoder will never generate more thread pools than frameNumThreads */
+    const char* numaPools;
+
+    /* Enable wavefront parallel processing, greatly increases parallelism for
+     * less than 1% compression efficiency loss. Requires a thread pool, enabled
+     * by default */
+    int       bEnableWavefront;
+
+    /* Use multiple threads to measure CU mode costs. Recommended for many core
+     * CPUs. On RD levels less than 5, it may not offload enough work to warrant
+     * the overhead. It is useful with the slow preset since it has the
+     * rectangular predictions enabled. At RD level 5 and 6 (preset slower and
+     * below), this feature should be an unambiguous win if you have CPU
+     * cores available for work. Default disabled */
+    int       bDistributeModeAnalysis;
+
+    /* Use multiple threads to perform motion estimation to (ME to one reference
+     * per thread). Recommended for many core CPUs. The more references the more
+     * motion searches there will be to distribute. This option is often not a
+     * win, particularly in video sequences with low motion. Default disabled */
+    int       bDistributeMotionEstimation;
+
+    /*== Logging Features ==*/
+
+    /* Enable analysis and logging distribution of CUs. Now deprecated */
+    int       bLogCuStats;
+
+    /* Enable the measurement and reporting of PSNR. Default is enabled */
+    int       bEnablePsnr;
+
+    /* Enable the measurement and reporting of SSIM. Default is disabled */
+    int       bEnableSsim;
+
+    /* The level of logging detail emitted by the encoder. X265_LOG_NONE to
+     * X265_LOG_FULL, default is X265_LOG_INFO */
+    int       logLevel;
+
+    /* Filename of CSV log. Now deprecated */
+    const char* csvfn;
+
+    /*== Internal Picture Specification ==*/
+
+    /* Internal encoder bit depth. If x265 was compiled to use 8bit pixels
+     * (HIGH_BIT_DEPTH=0), this field must be 8, else this field must be 10.
+     * Future builds may support 12bit pixels. */
+    int       internalBitDepth;
+
+    /* Color space of internal pictures, must match color space of input
+     * pictures */
+    int       internalCsp;
+
+    /* Numerator and denominator of frame rate */
+    uint32_t  fpsNum;
+    uint32_t  fpsDenom;
+
+    /* Width (in pixels) of the source pictures. If this width is not an even
+     * multiple of 4, the encoder will pad the pictures internally to meet this
+     * minimum requirement. All valid HEVC widths are supported */
+    int       sourceWidth;
+
+    /* Height (in pixels) of the source pictures. If this height is not an even
+     * multiple of 4, the encoder will pad the pictures internally to meet this
+     * minimum requirement. All valid HEVC heights are supported */
+    int       sourceHeight;
+
+    /* Interlace type of source pictures. 0 - progressive pictures (default).
+     * 1 - top field first, 2 - bottom field first. HEVC encodes interlaced
+     * content as fields, they must be provided to the encoder in the correct
+     * temporal order */
+    int       interlaceMode;
+
+    /* Total Number of frames to be encoded, calculated from the user input
+     * (--frames) and (--seek). In case, the input is read from a pipe, this can
+     * remain as 0. It is later used in 2 pass RateControl, hence storing the
+     * value in param */
+    int       totalFrames;
+
+    /*== Profile / Tier / Level ==*/
+
+    /* Note: the profile is specified by x265_param_apply_profile() */
+
+    /* Minimum decoder requirement level. Defaults to 0, which implies auto-
+     * detection by the encoder. If specified, the encoder will attempt to bring
+     * the encode specifications within that specified level. If the encoder is
+     * unable to reach the level it issues a warning and emits the actual
+     * decoder requirement. If the requested requirement level is higher than
+     * the actual level, the actual requirement level is signaled. The value is
+     * an specified as an integer with the level times 10, for example level
+     * "5.1" is specified as 51, and level "5.0" is specified as 50. */
+    int       levelIdc;
+
+    /* if levelIdc is specified (non-zero) this flag will differentiate between
+     * Main (0) and High (1) tier. Default is Main tier (0) */
+    int       bHighTier;
+
+    /* The maximum number of L0 references a P or B slice may use. This
+     * influences the size of the decoded picture buffer. The higher this
+     * number, the more reference frames there will be available for motion
+     * search, improving compression efficiency of most video at a cost of
+     * performance. Value must be between 1 and 16, default is 3 */
+    int       maxNumReferences;
+
+    /* Allow libx265 to emit HEVC bitstreams which do not meet strict level
+     * requirements. Defaults to false */
+    int       bAllowNonConformance;
+
+    /*== Bitstream Options ==*/
+
+    /* Flag indicating whether VPS, SPS and PPS headers should be output with
+     * each keyframe. Default false */
+    int       bRepeatHeaders;
+
+    /* Flag indicating whether the encoder should generate start codes (Annex B
+     * format) or length (file format) before NAL units. Default true, Annex B.
+     * Muxers should set this to the correct value */
+    int       bAnnexB;
+
+    /* Flag indicating whether the encoder should emit an Access Unit Delimiter
+     * NAL at the start of every access unit. Default false */
+    int       bEnableAccessUnitDelimiters;
+
+    /* Enables the buffering period SEI and picture timing SEI to signal the HRD
+     * parameters. Default is disabled */
+    int       bEmitHRDSEI;
+
+    /* Enables the emission of a user data SEI with the stream headers which
+     * describes the encoder version, build info, and parameters. This is
+     * very helpful for debugging, but may interfere with regression tests. 
+     * Default enabled */
+    int       bEmitInfoSEI;
+
+    /* Enable the generation of SEI messages for each encoded frame containing
+     * the hashes of the three reconstructed picture planes. Most decoders will
+     * validate those hashes against the reconstructed images it generates and
+     * report any mismatches. This is essentially a debugging feature.  Hash
+     * types are MD5(1), CRC(2), Checksum(3).  Default is 0, none */
+    int       decodedPictureHashSEI;
+
+    /* Enable Temporal Sub Layers while encoding, signals NAL units of coded
+     * slices with their temporalId. Output bitstreams can be extracted either
+     * at the base temporal layer (layer 0) with roughly half the frame rate or
+     * at a higher temporal layer (layer 1) that decodes all the frames in the
+     * sequence. */
+    int       bEnableTemporalSubLayers;
+
+    /*== GOP structure and slice type decisions (lookahead) ==*/
+
+    /* Enable open GOP - meaning I slices are not necessarily IDR and thus frames
+     * encoded after an I slice may reference frames encoded prior to the I
+     * frame which have remained in the decoded picture buffer.  Open GOP
+     * generally has better compression efficiency and negligible encoder
+     * performance impact, but the use case may preclude it.  Default true */
+    int       bOpenGOP;
+
+    /* Scene cuts closer together than this are coded as I, not IDR. */
+    int       keyframeMin;
+
+    /* Maximum keyframe distance or intra period in number of frames. If 0 or 1,
+     * all frames are I frames. A negative value is casted to MAX_INT internally
+     * which effectively makes frame 0 the only I frame. Default is 250 */
+    int       keyframeMax;
+
+    /* Maximum consecutive B frames that can be emitted by the lookahead. When
+     * b-adapt is 0 and keyframMax is greater than bframes, the lookahead emits
+     * a fixed pattern of `bframes` B frames between each P.  With b-adapt 1 the
+     * lookahead ignores the value of bframes for the most part.  With b-adapt 2
+     * the value of bframes determines the search (POC) distance performed in
+     * both directions, quadratically increasing the compute load of the
+     * lookahead.  The higher the value, the more B frames the lookahead may
+     * possibly use consecutively, usually improving compression. Default is 3,
+     * maximum is 16 */
+    int       bframes;
+
+    /* Sets the operating mode of the lookahead.  With b-adapt 0, the GOP
+     * structure is fixed based on the values of keyframeMax and bframes.
+     * With b-adapt 1 a light lookahead is used to chose B frame placement.
+     * With b-adapt 2 (trellis) a viterbi B path selection is performed */
+    int       bFrameAdaptive;
+
+    /* When enabled, the encoder will use the B frame in the middle of each
+     * mini-GOP larger than 2 B frames as a motion reference for the surrounding
+     * B frames.  This improves compression efficiency for a small performance
+     * penalty.  Referenced B frames are treated somewhere between a B and a P
+     * frame by rate control.  Default is enabled. */
+    int       bBPyramid;
+
+    /* A value which is added to the cost estimate of B frames in the lookahead.
+     * It may be a positive value (making B frames appear more expensive, which
+     * causes the lookahead to chose more P frames) or negative, which makes the
+     * lookahead chose more B frames. Default is 0, there are no limits */
+    int       bFrameBias;
+
+    /* The number of frames that must be queued in the lookahead before it may
+     * make slice decisions. Increasing this value directly increases the encode
+     * latency. The longer the queue the more optimally the lookahead may make
+     * slice decisions, particularly with b-adapt 2. When cu-tree is enabled,
+     * the length of the queue linearly increases the effectiveness of the
+     * cu-tree analysis. Default is 40 frames, maximum is 250 */
+    int       lookaheadDepth;
+
+    /* Use multiple worker threads to measure the estimated cost of each frame
+     * within the lookahead. When bFrameAdaptive is 2, most frame cost estimates
+     * will be performed in batch mode, many cost estimates at the same time,
+     * and lookaheadSlices is ignored for batched estimates. The effect on
+     * performance can be quite small.  The higher this parameter, the less
+     * accurate the frame costs will be (since context is lost across slice
+     * boundaries) which will result in less accurate B-frame and scene-cut
+     * decisions. Default is 0 - disabled. 1 is the same as 0. Max 16 */
+    int       lookaheadSlices;
+
+    /* An arbitrary threshold which determines how aggressively the lookahead
+     * should detect scene cuts. The default (40) is recommended. */
+    int       scenecutThreshold;
+
+    /*== Coding Unit (CU) definitions ==*/
+
+    /* Maximum CU width and height in pixels.  The size must be 64, 32, or 16.
+     * The higher the size, the more efficiently x265 can encode areas of low
+     * complexity, greatly improving compression efficiency at large
+     * resolutions.  The smaller the size, the more effective wavefront and
+     * frame parallelism will become because of the increase in rows. default 64
+     * All encoders within the same process must use the same maxCUSize, until
+     * all encoders are closed and x265_cleanup() is called to reset the value. */
+    uint32_t  maxCUSize;
+
+    /* Minimum CU width and height in pixels.  The size must be 64, 32, 16, or
+     * 8. Default 8. All encoders within the same process must use the same
+     * minCUSize. */
+    uint32_t  minCUSize;
+
+    /* Enable rectangular motion prediction partitions (vertical and
+     * horizontal), available at all CU depths from 64x64 to 8x8. Default is
+     * disabled */
+    int       bEnableRectInter;
+
+    /* Enable asymmetrical motion predictions.  At CU depths 64, 32, and 16, it
+     * is possible to use 25%/75% split partitions in the up, down, right, left
+     * directions. For some material this can improve compression efficiency at
+     * the cost of extra analysis. bEnableRectInter must be enabled for this
+     * feature to be used. Default disabled */
+    int       bEnableAMP;
+
+    /*== Residual Quadtree Transform Unit (TU) definitions ==*/
+
+    /* Maximum TU width and height in pixels.  The size must be 32, 16, 8 or 4.
+     * The larger the size the more efficiently the residual can be compressed
+     * by the DCT transforms, at the expense of more computation */
+    uint32_t  maxTUSize;
+
+    /* The additional depth the residual quad-tree is allowed to recurse beyond
+     * the coding quad-tree, for inter coded blocks. This must be between 1 and
+     * 4. The higher the value the more efficiently the residual can be
+     * compressed by the DCT transforms, at the expense of much more compute */
+    uint32_t  tuQTMaxInterDepth;
+
+    /* The additional depth the residual quad-tree is allowed to recurse beyond
+     * the coding quad-tree, for intra coded blocks. This must be between 1 and
+     * 4. The higher the value the more efficiently the residual can be
+     * compressed by the DCT transforms, at the expense of much more compute */
+    uint32_t  tuQTMaxIntraDepth;
+
+    /* Set the amount of rate-distortion analysis to use within quant. 0 implies
+     * no rate-distortion optimization. At level 1 rate-distortion cost is used to
+     * find optimal rounding values for each level (and allows psy-rdoq to be
+     * enabled). At level 2 rate-distortion cost is used to make decimate decisions
+     * on each 4x4 coding group (including the cost of signaling the group within
+     * the group bitmap).  Psy-rdoq is less effective at preserving energy when
+     * RDOQ is at level 2 */
+    int       rdoqLevel;
+
+    /* Enable the implicit signaling of the sign bit of the last coefficient of
+     * each transform unit. This saves one bit per TU at the expense of figuring
+     * out which coefficient can be toggled with the least distortion.
+     * Default is enabled */
+    int       bEnableSignHiding;
+
+    /* Allow intra coded blocks to be encoded directly as residual without the
+     * DCT transform, when this improves efficiency. Checking whether the block
+     * will benefit from this option incurs a performance penalty. Default is
+     * disabled */
+    int       bEnableTransformSkip;
+
+    /* An integer value in range of 0 to 2000, which denotes strength of noise
+     * reduction in intra CUs. 0 means disabled */
+    int       noiseReductionIntra;
+
+    /* An integer value in range of 0 to 2000, which denotes strength of noise
+     * reduction in inter CUs. 0 means disabled */
+    int       noiseReductionInter;
+
+    /* Quantization scaling lists. HEVC supports 6 quantization scaling lists to
+     * be defined; one each for Y, Cb, Cr for intra prediction and one each for
+     * inter prediction.
+     *
+     * - NULL and "off" will disable quant scaling (default)
+     * - "default" will enable the HEVC default scaling lists, which
+     *   do not need to be signaled since they are specified
+     * - all other strings indicate a filename containing custom scaling lists
+     *   in the HM format. The encode will fail if the file is not parsed
+     *   correctly. Custom lists must be signaled in the SPS. */
+    const char *scalingLists;
+
+    /*== Intra Coding Tools ==*/
+
+    /* Enable constrained intra prediction. This causes intra prediction to
+     * input samples that were inter predicted. For some use cases this is
+     * believed to me more robust to stream errors, but it has a compression
+     * penalty on P and (particularly) B slices. Defaults to disabled */
+    int       bEnableConstrainedIntra;
+
+    /* Enable strong intra smoothing for 32x32 blocks where the reference
+     * samples are flat. It may or may not improve compression efficiency,
+     * depending on your source material. Defaults to disabled */
+    int       bEnableStrongIntraSmoothing;
+
+    /*== Inter Coding Tools ==*/
+
+    /* The maximum number of merge candidates that are considered during inter
+     * analysis.  This number (between 1 and 5) is signaled in the stream
+     * headers and determines the number of bits required to signal a merge so
+     * it can have significant trade-offs. The smaller this number the higher
+     * the performance but the less compression efficiency. Default is 3 */
+    uint32_t  maxNumMergeCand;
+
+    /* Limit the motion references used for each search based on the results of
+     * previous motion searches already performed for the same CU: If 0 all
+     * references are always searched. If X265_REF_LIMIT_CU all motion searches
+     * will restrict themselves to the references selected by the 2Nx2N search
+     * at the same depth. If X265_REF_LIMIT_DEPTH the 2Nx2N motion search will
+     * only use references that were selected by the best motion searches of the
+     * 4 split CUs at the next lower CU depth.  The two flags may be combined */
+    uint32_t  limitReferences;
+
+    /* ME search method (DIA, HEX, UMH, STAR, FULL). The search patterns
+     * (methods) are sorted in increasing complexity, with diamond being the
+     * simplest and fastest and full being the slowest.  DIA, HEX, and UMH were
+     * adapted from x264 directly. STAR is an adaption of the HEVC reference
+     * encoder's three step search, while full is a naive exhaustive search. The
+     * default is the star search, it has a good balance of performance and
+     * compression efficiency */
+    int       searchMethod;
+
+    /* A value between 0 and X265_MAX_SUBPEL_LEVEL which adjusts the amount of
+     * effort performed during sub-pel refine. Default is 5 */
+    int       subpelRefine;
+
+    /* The maximum distance from the motion prediction that the full pel motion
+     * search is allowed to progress before terminating. This value can have an
+     * effect on frame parallelism, as referenced frames must be at least this
+     * many rows of reconstructed pixels ahead of the referencee at all times.
+     * (When considering reference lag, the motion prediction must be ignored
+     * because it cannot be known ahead of time).  Default is 60, which is the
+     * default max CU size (64) minus the luma HPEL half-filter length (4). If a
+     * smaller CU size is used, the search range should be similarly reduced */
+    int       searchRange;
+
+    /* Enable availability of temporal motion vector for AMVP, default is enabled */
+    int       bEnableTemporalMvp;
+
+    /* Enable weighted prediction in P slices.  This enables weighting analysis
+     * in the lookahead, which influences slice decisions, and enables weighting
+     * analysis in the main encoder which allows P reference samples to have a
+     * weight function applied to them prior to using them for motion
+     * compensation.  In video which has lighting changes, it can give a large
+     * improvement in compression efficiency. Default is enabled */
+    int       bEnableWeightedPred;
+
+    /* Enable weighted prediction in B slices. Default is disabled */
+    int       bEnableWeightedBiPred;
+
+    /*== Loop Filters ==*/
+
+    /* Enable the deblocking loop filter, which improves visual quality by
+     * reducing blocking effects at block edges, particularly at lower bitrates
+     * or higher QP. When enabled it adds another CU row of reference lag,
+     * reducing frame parallelism effectiveness. Default is enabled */
+    int       bEnableLoopFilter;
+
+    /* deblocking filter tC offset [-6, 6] -6 light filter, 6 strong.
+     * This is the coded div2 value, actual offset is doubled at use */
+    int       deblockingFilterTCOffset;
+
+    /* deblocking filter Beta offset [-6, 6] -6 light filter, 6 strong
+     * This is the coded div2 value, actual offset is doubled at use */
+    int       deblockingFilterBetaOffset;
+
+    /* Enable the Sample Adaptive Offset loop filter, which reduces distortion
+     * effects by adjusting reconstructed sample values based on histogram
+     * analysis to better approximate the original samples. When enabled it adds
+     * a CU row of reference lag, reducing frame parallelism effectiveness.
+     * Default is enabled */
+    int       bEnableSAO;
+
+    /* Note: when deblocking and SAO are both enabled, the loop filter CU lag is
+     * only one row, as they operate in series on the same row. */
+
+    /* Select the method in which SAO deals with deblocking boundary pixels.  If
+     * disabled the right and bottom boundary areas are skipped. If enabled,
+     * non-deblocked pixels are used entirely. Default is disabled */
+    int       bSaoNonDeblocked;
+
+    /*== Analysis tools ==*/
+
+    /* A value between X265_NO_RDO_NO_RDOQ and X265_RDO_LEVEL which determines
+     * the level of rate distortion optimizations to perform during mode
+     * decisions and quantization. The more RDO the better the compression
+     * efficiency at a major cost of performance. Default is no RDO (0) */
+    int       rdLevel;
+
+    /* Enable early skip decisions to avoid intra and inter analysis in likely
+     * skip blocks. Default is disabled */
+    int       bEnableEarlySkip;
+
+    /* Use a faster search method to find the best intra mode. Default is 0 */
+    int       bEnableFastIntra;
+
+    /* Enable a faster determination of whether skipping the DCT transform will
+     * be beneficial. Slight performance gain for some compression loss. Default
+     * is enabled */
+    int       bEnableTSkipFast;
+
+    /* The CU Lossless flag, when enabled, compares the rate-distortion costs
+     * for normal and lossless encoding, and chooses the best mode for each CU.
+     * If lossless mode is chosen, the cu-transquant-bypass flag is set for that
+     * CU */
+    int       bCULossless;
+
+    /* Specify whether to attempt to encode intra modes in B frames. By default
+     * enabled, but only applicable for the presets which use rdLevel 5 or 6
+     * (veryslow and placebo). All other presets will not try intra in B frames
+     * regardless of this setting */
+    int       bIntraInBFrames;
+
+    /* Apply an optional penalty to the estimated cost of 32x32 intra blocks in
+     * non-intra slices. 0 is disabled, 1 enables a small penalty, and 2 enables
+     * a full penalty. This favors inter-coding and its low bitrate over
+     * potential increases in distortion, but usually improves performance.
+     * Default is 0 */
+    int       rdPenalty;
+
+    /* Psycho-visual rate-distortion strength. Only has an effect in presets
+     * which use RDO. It makes mode decision favor options which preserve the
+     * energy of the source, at the cost of lost compression. The value must
+     * be between 0 and 2.0, 1.0 is typical. Default 0.3 */
+    double    psyRd;
+
+    /* Strength of psycho-visual optimizations in quantization. Only has an
+     * effect in presets which use RDOQ (rd-levels 4 and 5).  The value must be
+     * between 0 and 50, 1.0 is typical. Default 1.0 */
+    double    psyRdoq;
+
+    /* If X265_ANALYSIS_SAVE, write per-frame analysis information into analysis
+     * buffers.  if X265_ANALYSIS_LOAD, read analysis information into analysis
+     * buffer and use this analysis information to reduce the amount of work
+     * the encoder must perform. Default X265_ANALYSIS_OFF */
+    int       analysisMode;
+
+    /* Filename for analysisMode save/load. Default name is "x265_analysis.dat" */
+    const char* analysisFileName;
+
+    /*== Rate Control ==*/
+
+    /* The lossless flag enables true lossless coding, bypassing scaling,
+     * transform, quantization and in-loop filter processes. This is used for
+     * ultra-high bitrates with zero loss of quality. It implies no rate control */
+    int       bLossless;
+
+    /* Generally a small signed integer which offsets the QP used to quantize
+     * the Cb chroma residual (delta from luma QP specified by rate-control).
+     * Default is 0, which is recommended */
+    int       cbQpOffset;
+
+    /* Generally a small signed integer which offsets the QP used to quantize
+     * the Cr chroma residual (delta from luma QP specified by rate-control).
+     * Default is 0, which is recommended */
+    int       crQpOffset;
+
+    struct
+    {
+        /* Explicit mode of rate-control, necessary for API users. It must
+         * be one of the X265_RC_METHODS enum values. */
+        int       rateControlMode;
+
+        /* Base QP to use for Constant QP rate control. Adaptive QP may alter
+         * the QP used for each block. If a QP is specified on the command line
+         * CQP rate control is implied. Default: 32 */
+        int       qp;
+
+        /* target bitrate for Average BitRate (ABR) rate control. If a non- zero
+         * bitrate is specified on the command line, ABR is implied. Default 0 */
+        int       bitrate;
+
+        /* qComp sets the quantizer curve compression factor. It weights the frame
+         * quantizer based on the complexity of residual (measured by lookahead).
+         * Default value is 0.6. Increasing it to 1 will effectively generate CQP */
+        double    qCompress;
+
+        /* QP offset between I/P and P/B frames. Default ipfactor: 1.4
+         * Default pbFactor: 1.3 */
+        double    ipFactor;
+        double    pbFactor;
+
+        /* Ratefactor constant: targets a certain constant "quality".
+         * Acceptable values between 0 and 51. Default value: 28 */
+        double    rfConstant;
+
+        /* Max QP difference between frames. Default: 4 */
+        int       qpStep;
+
+        /* Enable adaptive quantization. This mode distributes available bits between all
+         * CTUs of a frame, assigning more bits to low complexity areas. Turning
+         * this ON will usually affect PSNR negatively, however SSIM and visual quality
+         * generally improves. Default: X265_AQ_VARIANCE */
+        int       aqMode;
+
+        /* Sets the strength of AQ bias towards low detail CTUs. Valid only if
+         * AQ is enabled. Default value: 1.0. Acceptable values between 0.0 and 3.0 */
+        double    aqStrength;
+
+        /* Sets the maximum rate the VBV buffer should be assumed to refill at
+         * Default is zero */
+        int       vbvMaxBitrate;
+
+        /* Sets the size of the VBV buffer in kilobits. Default is zero */
+        int       vbvBufferSize;
+
+        /* Sets how full the VBV buffer must be before playback starts. If it is less than
+         * 1, then the initial fill is vbv-init * vbvBufferSize. Otherwise, it is
+         * interpreted as the initial fill in kbits. Default is 0.9 */
+        double    vbvBufferInit;
+
+        /* Enable CUTree rate-control. This keeps track of the CUs that propagate temporally
+         * across frames and assigns more bits to these CUs. Improves encode efficiency.
+         * Default: enabled */
+        int       cuTree;
+
+        /* In CRF mode, maximum CRF as caused by VBV. 0 implies no limit */
+        double    rfConstantMax;
+
+        /* In CRF mode, minimum CRF as caused by VBV */
+        double    rfConstantMin;
+
+        /* Multi-pass encoding */
+        /* Enable writing the stats in a multi-pass encode to the stat output file */
+        int       bStatWrite;
+
+        /* Enable loading data from the stat input file in a multi pass encode */
+        int       bStatRead;
+
+        /* Filename of the 2pass output/input stats file, if unspecified the
+         * encoder will default to using x265_2pass.log */
+        const char* statFileName;
+
+        /* temporally blur quants */
+        double    qblur;
+
+        /* temporally blur complexity */
+        double    complexityBlur;
+
+        /* Enable slow and a more detailed first pass encode in multi pass rate control */
+        int       bEnableSlowFirstPass;
+        
+        /* rate-control overrides */
+        int        zoneCount;
+        x265_zone* zones;
+
+        /* specify a text file which contains MAX_MAX_QP + 1 floating point
+         * values to be copied into x265_lambda_tab and a second set of
+         * MAX_MAX_QP + 1 floating point values for x265_lambda2_tab. All values
+         * are separated by comma, space or newline. Text after a hash (#) is
+         * ignored. The lambda tables are process-global, so these new lambda
+         * values will affect all encoders in the same process */
+        const char* lambdaFileName;
+
+        /* Enable stricter conditions to check bitrate deviations in CBR mode. May compromise 
+         * quality to maintain bitrate adherence */
+        int bStrictCbr;
+
+        /* Enable adaptive quantization at CU granularity. This parameter specifies 
+         * the minimum CU size at which QP can be adjusted, i.e. Quantization Group 
+         * (QG) size. Allowed values are 64, 32, 16 provided it falls within the 
+         * inclusuve range [maxCUSize, minCUSize]. Experimental, default: maxCUSize*/
+        uint32_t qgSize;
+    } rc;
+
+    /*== Video Usability Information ==*/
+    struct
+    {
+        /* Aspect ratio idc to be added to the VUI.  The default is 0 indicating
+         * the apsect ratio is unspecified. If set to X265_EXTENDED_SAR then
+         * sarWidth and sarHeight must also be set */
+        int aspectRatioIdc;
+
+        /* Sample Aspect Ratio width in arbitrary units to be added to the VUI
+         * only if aspectRatioIdc is set to X265_EXTENDED_SAR.  This is the width
+         * of an individual pixel. If this is set then sarHeight must also be set */
+        int sarWidth;
+
+        /* Sample Aspect Ratio height in arbitrary units to be added to the VUI.
+         * only if aspectRatioIdc is set to X265_EXTENDED_SAR.  This is the width
+         * of an individual pixel. If this is set then sarWidth must also be set */
+        int sarHeight;
+
+        /* Enable overscan info present flag in the VUI.  If this is set then
+         * bEnabledOverscanAppropriateFlag will be added to the VUI. The default
+         * is false */
+        int bEnableOverscanInfoPresentFlag;
+
+        /* Enable overscan appropriate flag.  The status of this flag is added
+         * to the VUI only if bEnableOverscanInfoPresentFlag is set. If this
+         * flag is set then cropped decoded pictures may be output for display.
+         * The default is false */
+        int bEnableOverscanAppropriateFlag;
+
+        /* Video signal type present flag of the VUI.  If this is set then
+         * videoFormat, bEnableVideoFullRangeFlag and
+         * bEnableColorDescriptionPresentFlag will be added to the VUI. The
+         * default is false */
+        int bEnableVideoSignalTypePresentFlag;
+
+        /* Video format of the source video.  0 = component, 1 = PAL, 2 = NTSC,
+         * 3 = SECAM, 4 = MAC, 5 = unspecified video format is the default */
+        int videoFormat;
+
+        /* Video full range flag indicates the black level and range of the luma
+         * and chroma signals as derived from E′Y, E′PB, and E′PR or E′R, E′G,
+         * and E′B real-valued component signals. The default is false */
+        int bEnableVideoFullRangeFlag;
+
+        /* Color description present flag in the VUI. If this is set then
+         * color_primaries, transfer_characteristics and matrix_coeffs are to be
+         * added to the VUI. The default is false */
+        int bEnableColorDescriptionPresentFlag;
+
+        /* Color primaries holds the chromacity coordinates of the source
+         * primaries. The default is 2 */
+        int colorPrimaries;
+
+        /* Transfer characteristics indicates the opto-electronic transfer
+         * characteristic of the source picture. The default is 2 */
+        int transferCharacteristics;
+
+        /* Matrix coefficients used to derive the luma and chroma signals from
+         * the red, blue and green primaries. The default is 2 */
+        int matrixCoeffs;
+
+        /* Chroma location info present flag adds chroma_sample_loc_type_top_field and
+         * chroma_sample_loc_type_bottom_field to the VUI. The default is false */
+        int bEnableChromaLocInfoPresentFlag;
+
+        /* Chroma sample location type top field holds the chroma location in
+         * the top field. The default is 0 */
+        int chromaSampleLocTypeTopField;
+
+        /* Chroma sample location type bottom field holds the chroma location in
+         * the bottom field. The default is 0 */
+        int chromaSampleLocTypeBottomField;
+
+        /* Default display window flag adds def_disp_win_left_offset,
+         * def_disp_win_right_offset, def_disp_win_top_offset and
+         * def_disp_win_bottom_offset to the VUI. The default is false */
+        int bEnableDefaultDisplayWindowFlag;
+
+        /* Default display window left offset holds the left offset with the
+         * conformance cropping window to further crop the displayed window */
+        int defDispWinLeftOffset;
+
+        /* Default display window right offset holds the right offset with the
+         * conformance cropping window to further crop the displayed window */
+        int defDispWinRightOffset;
+
+        /* Default display window top offset holds the top offset with the
+         * conformance cropping window to further crop the displayed window */
+        int defDispWinTopOffset;
+
+        /* Default display window bottom offset holds the bottom offset with the
+         * conformance cropping window to further crop the displayed window */
+        int defDispWinBottomOffset;
+    } vui;
+
+    /* SMPTE ST 2086 mastering display color volume SEI info, specified as a
+     * string which is parsed when the stream header SEI are emitted. The string
+     * format is "G(%hu,%hu)B(%hu,%hu)R(%hu,%hu)WP(%hu,%hu)L(%u,%u)" where %hu
+     * are unsigned 16bit integers and %u are unsigned 32bit integers. The SEI
+     * includes X,Y display primaries for RGB channels, white point X,Y and
+     * max,min luminance values. */
+    const char* masteringDisplayColorVolume;
+
+    /* Maximum Content light level(MaxCLL), specified as integer that indicates the
+     * maximum pixel intensity level in units of 1 candela per square metre of the
+     * bitstream. x265 will also calculate MaxCLL programmatically from the input
+     * pixel values and set in the Content light level info SEI */
+    uint16_t maxCLL;
+
+    /* Maximum Frame Average Light Level(MaxFALL), specified as integer that indicates
+     * the maximum frame average intensity level in units of 1 candela per square
+     * metre of the bitstream. x265 will also calculate MaxFALL programmatically
+     * from the input pixel values and set in the Content light level info SEI */
+    uint16_t maxFALL;
+
+    /* Minimum luma level of input source picture, specified as a integer which
+     * would automatically increase any luma values below the specified --min-luma
+     * value to that value. */
+    uint16_t minLuma;
+
+    /* Maximum luma level of input source picture, specified as a integer which
+     * would automatically decrease any luma values above the specified --max-luma
+     * value to that value. */
+    uint16_t maxLuma;
+
+} x265_param;
+
+/* x265_param_alloc:
+ *  Allocates an x265_param instance. The returned param structure is not
+ *  special in any way, but using this method together with x265_param_free()
+ *  and x265_param_parse() to set values by name allows the application to treat
+ *  x265_param as an opaque data struct for version safety */
+x265_param *x265_param_alloc(void);
+
+/* x265_param_free:
+ *  Use x265_param_free() to release storage for an x265_param instance
+ *  allocated by x265_param_alloc() */
+void x265_param_free(x265_param *);
+
+/* x265_param_default:
+ *  Initialize an x265_param structure to default values */
+void x265_param_default(x265_param *param);
+
+/* x265_param_parse:
+ *  set one parameter by name.
+ *  returns 0 on success, or returns one of the following errors.
+ *  note: BAD_VALUE occurs only if it can't even parse the value,
+ *  numerical range is not checked until x265_encoder_open().
+ *  value=NULL means "true" for boolean options, but is a BAD_VALUE for non-booleans. */
+#define X265_PARAM_BAD_NAME  (-1)
+#define X265_PARAM_BAD_VALUE (-2)
+int x265_param_parse(x265_param *p, const char *name, const char *value);
+
+static const char * const x265_profile_names[] = {
+    /* HEVC v1 */
+    "main", "main10", "mainstillpicture", /* alias */ "msp",
+
+    /* HEVC v2 (Range Extensions) */
+    "main-intra", "main10-intra",
+    "main444-8",  "main444-intra", "main444-stillpicture",
+
+    "main422-10", "main422-10-intra",
+    "main444-10", "main444-10-intra",
+
+    "main12",     "main12-intra",
+    "main422-12", "main422-12-intra",
+    "main444-12", "main444-12-intra",
+
+    "main444-16-intra", "main444-16-stillpicture", /* Not Supported! */
+    0
+};
+
+/* x265_param_apply_profile:
+ *      Applies the restrictions of the given profile. (one of x265_profile_names)
+ *      (can be NULL, in which case the function will do nothing)
+ *      Note: the detected profile can be lower than the one specified to this
+ *      function. This function will force the encoder parameters to fit within
+ *      the specified profile, or fail if that is impossible.
+ *      returns 0 on success, negative on failure (e.g. invalid profile name). */
+int x265_param_apply_profile(x265_param *, const char *profile);
+
+/* x265_param_default_preset:
+ *      The same as x265_param_default, but also use the passed preset and tune
+ *      to modify the default settings.
+ *      (either can be NULL, which implies no preset or no tune, respectively)
+ *
+ *      Currently available presets are, ordered from fastest to slowest: */
+static const char * const x265_preset_names[] = { "ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo", 0 };
+
+/*      The presets can also be indexed numerically, as in:
+ *      x265_param_default_preset( &param, "3", ... )
+ *      with ultrafast mapping to "0" and placebo mapping to "9".  This mapping may
+ *      of course change if new presets are added in between, but will always be
+ *      ordered from fastest to slowest.
+ *
+ *      Warning: the speed of these presets scales dramatically.  Ultrafast is a full
+ *      100 times faster than placebo!
+ *
+ *      Currently available tunings are: */
+static const char * const x265_tune_names[] = { "psnr", "ssim", "grain", "zerolatency", "fastdecode", 0 };
+
+/*      returns 0 on success, negative on failure (e.g. invalid preset/tune name). */
+int x265_param_default_preset(x265_param *, const char *preset, const char *tune);
+
+/* x265_picture_alloc:
+ *  Allocates an x265_picture instance. The returned picture structure is not
+ *  special in any way, but using this method together with x265_picture_free()
+ *  and x265_picture_init() allows some version safety. New picture fields will
+ *  always be added to the end of x265_picture */
+x265_picture *x265_picture_alloc(void);
+
+/* x265_picture_free:
+ *  Use x265_picture_free() to release storage for an x265_picture instance
+ *  allocated by x265_picture_alloc() */
+void x265_picture_free(x265_picture *);
+
+/* x265_picture_init:
+ *       Initialize an x265_picture structure to default values. It sets the pixel
+ *       depth and color space to the encoder's internal values and sets the slice
+ *       type to auto - so the lookahead will determine slice type. */
+void x265_picture_init(x265_param *param, x265_picture *pic);
+
+/* x265_max_bit_depth:
+ *      Specifies the numer of bits per pixel that x265 uses internally to
+ *      represent a pixel, and the bit depth of the output bitstream.
+ *      param->internalBitDepth must be set to this value. x265_max_bit_depth
+ *      will be 8 for default builds, 10 for HIGH_BIT_DEPTH builds. */
+X265_API extern const int x265_max_bit_depth;
+
+/* x265_version_str:
+ *      A static string containing the version of this compiled x265 library */
+X265_API extern const char *x265_version_str;
+
+/* x265_build_info:
+ *      A static string describing the compiler and target architecture */
+X265_API extern const char *x265_build_info_str;
+
+/* Force a link error in the case of linking against an incompatible API version.
+ * Glue #defines exist to force correct macro expansion; the final output of the macro
+ * is x265_encoder_open_##X265_BUILD (for purposes of dlopen). */
+#define x265_encoder_glue1(x, y) x ## y
+#define x265_encoder_glue2(x, y) x265_encoder_glue1(x, y)
+#define x265_encoder_open x265_encoder_glue2(x265_encoder_open_, X265_BUILD)
+
+/* x265_encoder_open:
+ *      create a new encoder handler, all parameters from x265_param are copied */
+x265_encoder* x265_encoder_open(x265_param *);
+
+/* x265_encoder_parameters:
+ *      copies the current internal set of parameters to the pointer provided
+ *      by the caller.  useful when the calling application needs to know
+ *      how x265_encoder_open has changed the parameters.
+ *      note that the data accessible through pointers in the returned param struct
+ *      (e.g. filenames) should not be modified by the calling application. */
+void x265_encoder_parameters(x265_encoder *, x265_param *);
+
+/* x265_encoder_headers:
+ *      return the SPS and PPS that will be used for the whole stream.
+ *      *pi_nal is the number of NAL units outputted in pp_nal.
+ *      returns negative on error, total byte size of payload data on success
+ *      the payloads of all output NALs are guaranteed to be sequential in memory. */
+int x265_encoder_headers(x265_encoder *, x265_nal **pp_nal, uint32_t *pi_nal);
+
+/* x265_encoder_encode:
+ *      encode one picture.
+ *      *pi_nal is the number of NAL units outputted in pp_nal.
+ *      returns negative on error, 1 if a picture and access unit were output,
+ *      or zero if the encoder pipeline is still filling or is empty after flushing.
+ *      the payloads of all output NALs are guaranteed to be sequential in memory.
+ *      To flush the encoder and retrieve delayed output pictures, pass pic_in as NULL.
+ *      Once flushing has begun, all subsequent calls must pass pic_in as NULL. */
+int x265_encoder_encode(x265_encoder *encoder, x265_nal **pp_nal, uint32_t *pi_nal, x265_picture *pic_in, x265_picture *pic_out);
+
+/* x265_encoder_reconfig:
+ *      various parameters from x265_param are copied.
+ *      this takes effect immediately, on whichever frame is encoded next;
+ *      returns 0 on success, negative on parameter validation error.
+ *
+ *      not all parameters can be changed; see the actual function for a
+ *      detailed breakdown.  since not all parameters can be changed, moving
+ *      from preset to preset may not always fully copy all relevant parameters,
+ *      but should still work usably in practice. however, more so than for
+ *      other presets, many of the speed shortcuts used in ultrafast cannot be
+ *      switched out of; using reconfig to switch between ultrafast and other
+ *      presets is not recommended without a more fine-grained breakdown of
+ *      parameters to take this into account. */
+int x265_encoder_reconfig(x265_encoder *, x265_param *);
+
+/* x265_encoder_get_stats:
+ *       returns encoder statistics */
+void x265_encoder_get_stats(x265_encoder *encoder, x265_stats *, uint32_t statsSizeBytes);
+
+/* x265_encoder_log:
+ *       This function is deprecated */
+void x265_encoder_log(x265_encoder *encoder, int argc, char **argv);
+
+/* x265_encoder_close:
+ *      close an encoder handler */
+void x265_encoder_close(x265_encoder *);
+
+/* x265_cleanup:
+ *       release library static allocations, reset configured CTU size */
+void x265_cleanup(void);
+
+#define X265_MAJOR_VERSION 1
+
+/* === Multi-lib API ===
+ * By using this method to gain access to the libx265 interfaces, you allow run-
+ * time selection between various available libx265 libraries based on the
+ * encoder parameters. The most likely use case is to choose between Main and
+ * Main10 builds of libx265. */
+
+typedef struct x265_api
+{
+    int           api_major_version;    /* X265_MAJOR_VERSION */
+    int           api_build_number;     /* X265_BUILD (soname) */
+    int           sizeof_param;         /* sizeof(x265_param) */
+    int           sizeof_picture;       /* sizeof(x265_picture) */
+    int           sizeof_analysis_data; /* sizeof(x265_analysis_data) */
+    int           sizeof_zone;          /* sizeof(x265_zone) */
+    int           sizeof_stats;         /* sizeof(x265_stats) */
+
+    int           bit_depth;
+    const char*   version_str;
+    const char*   build_info_str;
+
+    /* libx265 public API functions, documented above with x265_ prefixes */
+    x265_param*   (*param_alloc)(void);
+    void          (*param_free)(x265_param*);
+    void          (*param_default)(x265_param*);
+    int           (*param_parse)(x265_param*, const char*, const char*);
+    int           (*param_apply_profile)(x265_param*, const char*);
+    int           (*param_default_preset)(x265_param*, const char*, const char *);
+    x265_picture* (*picture_alloc)(void);
+    void          (*picture_free)(x265_picture*);
+    void          (*picture_init)(x265_param*, x265_picture*);
+    x265_encoder* (*encoder_open)(x265_param*);
+    void          (*encoder_parameters)(x265_encoder*, x265_param*);
+    int           (*encoder_reconfig)(x265_encoder*, x265_param*);
+    int           (*encoder_headers)(x265_encoder*, x265_nal**, uint32_t*);
+    int           (*encoder_encode)(x265_encoder*, x265_nal**, uint32_t*, x265_picture*, x265_picture*);
+    void          (*encoder_get_stats)(x265_encoder*, x265_stats*, uint32_t);
+    void          (*encoder_log)(x265_encoder*, int, char**);
+    void          (*encoder_close)(x265_encoder*);
+    void          (*cleanup)(void);
+
+    int           sizeof_frame_stats;   /* sizeof(x265_frame_stats) */
+    /* add new pointers to the end, or increment X265_MAJOR_VERSION */
+} x265_api;
+
+/* Force a link error in the case of linking against an incompatible API version.
+ * Glue #defines exist to force correct macro expansion; the final output of the macro
+ * is x265_api_get_##X265_BUILD (for purposes of dlopen). */
+#define x265_api_glue1(x, y) x ## y
+#define x265_api_glue2(x, y) x265_api_glue1(x, y)
+#define x265_api_get x265_api_glue2(x265_api_get_, X265_BUILD)
+
+/* x265_api_get:
+ *   Retrieve the programming interface for a linked x265 library.
+ *   May return NULL if no library is available that supports the
+ *   requested bit depth. If bitDepth is 0 the function is guarunteed
+ *   to return a non-NULL x265_api pointer, from the linked libx265.
+ *
+ *   If the requested bitDepth is not supported by the linked libx265,
+ *   it will attempt to dynamically bind x265_api_get() from a shared
+ *   library with an appropriate name:
+ *     8bit:  libx265_main.so
+ *     10bit: libx265_main10.so
+ *   Obviously the shared library file extension is platform specific */
+const x265_api* x265_api_get(int bitDepth);
+
+/* x265_api_query:
+ *   Retrieve the programming interface for a linked x265 library, like
+ *   x265_api_get(), except this function accepts X265_BUILD as the second
+ *   argument rather than using the build number as part of the function name.
+ *   Applications which dynamically link to libx265 can use this interface to
+ *   query the library API and achieve a relative amount of version skew
+ *   flexibility. The function may return NULL if the library determines that
+ *   the apiVersion that your application was compiled against is not compatible
+ *   with the library you have linked with.
+ *
+ *   api_major_version will be incremented any time non-backward compatible
+ *   changes are made to any public structures or functions. If
+ *   api_major_version does not match X265_MAJOR_VERSION from the x265.h your
+ *   application compiled against, your application must not use the returned
+ *   x265_api pointer.
+ *
+ *   Users of this API *must* also validate the sizes of any structures which
+ *   are not treated as opaque in application code. For instance, if your
+ *   application dereferences a x265_param pointer, then it must check that
+ *   api->sizeof_param matches the sizeof(x265_param) that your application
+ *   compiled with. */
+const x265_api* x265_api_query(int bitDepth, int apiVersion, int* err);
+
+#define X265_API_QUERY_ERR_NONE           0 /* returned API pointer is non-NULL */
+#define X265_API_QUERY_ERR_VER_REFUSED    1 /* incompatible version skew        */
+#define X265_API_QUERY_ERR_LIB_NOT_FOUND  2 /* libx265_main10 not found, for ex */
+#define X265_API_QUERY_ERR_FUNC_NOT_FOUND 3 /* unable to bind x265_api_query    */
+#define X265_API_QUERY_ERR_WRONG_BITDEPTH 4 /* libx265_main10 not 10bit, for ex */
+
+static const char * const x265_api_query_errnames[] = {
+    "api queried from libx265",
+    "libx265 version is not compatible with this application",
+    "unable to bind a libx265 with requested bit depth",
+    "unable to bind x265_api_query from libx265",
+    "libx265 has an invalid bitdepth"
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // X265_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265.pc.in	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,11 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=${prefix}
+libdir=${exec_prefix}/@LIB_INSTALL_DIR@
+includedir=${prefix}/include
+
+Name: @CMAKE_PROJECT_NAME@
+Description: H.265/HEVC video encoder
+Version: @X265_LATEST_TAG@
+Libs: -L${libdir} -lx265
+Libs.private: @PRIVATE_LIBS@
+Cflags: -I${includedir}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265.rc.in	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,32 @@
+#include <winresrc.h>
+
+VS_VERSION_INFO VERSIONINFO
+  FILEVERSION    @X265_VERSION_MAJOR@,@X265_VERSION_MINOR@,@X265_BRANCH_ID@,@X265_TAG_DISTANCE@
+  PRODUCTVERSION @X265_VERSION_MAJOR@,@X265_VERSION_MINOR@,@X265_BRANCH_ID@,@X265_TAG_DISTANCE@
+  FILEFLAGSMASK  VS_FFI_FILEFLAGSMASK
+  FILEOS         VOS_NT_WINDOWS32
+#ifdef OPENOBEX_EXPORTS
+  FILETYPE       VFT_DLL
+#else
+  FILETYPE       VFT_STATIC_LIB
+#endif
+  FILESUBTYPE    VFT2_UNKNOWN
+  BEGIN
+    BLOCK "StringFileInfo"
+    BEGIN
+      BLOCK "04090000"
+      BEGIN
+        VALUE "FileDescription",  "HEVC video encoder"
+        VALUE "FileVersion",      "@X265_VERSION@"
+        VALUE "InternalName",     "x265"
+        VALUE "LegalCopyright",   "Multicoreware: GPLv2 or commercial"
+        VALUE "OriginalFilename", "libx265.dll"
+        VALUE "ProductName",      "x265"
+        VALUE "ProductVersion",   "@X265_VERSION@"
+      END
+    END
+    BLOCK "VarFileInfo"
+    BEGIN
+      VALUE "Translation", 0x409, 1200
+    END
+END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265_config.h.in	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,34 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265_CONFIG_H
+#define X265_CONFIG_H
+
+/* Defines generated at build time */
+
+/* Incremented each time public API is changed, X265_BUILD is used as
+ * the shared library SONAME on platforms which support it. It also
+ * prevents linking against a different version of the static lib */
+#define X265_BUILD ${X265_BUILD}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265/source/x265cli.h	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,441 @@
+/*****************************************************************************
+ * Copyright (C) 2013 x265 project
+ *
+ * Authors: Steve Borho <steve@borho.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef X265CLI_H
+#define X265CLI_H 1
+
+#include "common.h"
+#include "param.h"
+
+#include <getopt.h>
+
+#ifdef __cplusplus
+namespace X265_NS {
+#endif
+
+static const char short_options[] = "o:D:P:p:f:F:r:I:i:b:s:t:q:m:hwV?";
+static const struct option long_options[] =
+{
+    { "help",                 no_argument, NULL, 'h' },
+    { "version",              no_argument, NULL, 'V' },
+    { "asm",            required_argument, NULL, 0 },
+    { "no-asm",               no_argument, NULL, 0 },
+    { "pools",          required_argument, NULL, 0 },
+    { "numa-pools",     required_argument, NULL, 0 },
+    { "preset",         required_argument, NULL, 'p' },
+    { "tune",           required_argument, NULL, 't' },
+    { "frame-threads",  required_argument, NULL, 'F' },
+    { "no-pmode",             no_argument, NULL, 0 },
+    { "pmode",                no_argument, NULL, 0 },
+    { "no-pme",               no_argument, NULL, 0 },
+    { "pme",                  no_argument, NULL, 0 },
+    { "log-level",      required_argument, NULL, 0 },
+    { "profile",        required_argument, NULL, 'P' },
+    { "level-idc",      required_argument, NULL, 0 },
+    { "high-tier",            no_argument, NULL, 0 },
+    { "no-high-tier",         no_argument, NULL, 0 },
+    { "allow-non-conformance",no_argument, NULL, 0 },
+    { "no-allow-non-conformance",no_argument, NULL, 0 },
+    { "csv",            required_argument, NULL, 0 },
+    { "csv-log-level",  required_argument, NULL, 0 },
+    { "no-cu-stats",          no_argument, NULL, 0 },
+    { "cu-stats",             no_argument, NULL, 0 },
+    { "y4m",                  no_argument, NULL, 0 },
+    { "no-progress",          no_argument, NULL, 0 },
+    { "output",         required_argument, NULL, 'o' },
+    { "output-depth",   required_argument, NULL, 'D' },
+    { "input",          required_argument, NULL, 0 },
+    { "input-depth",    required_argument, NULL, 0 },
+    { "input-res",      required_argument, NULL, 0 },
+    { "input-csp",      required_argument, NULL, 0 },
+    { "interlace",      required_argument, NULL, 0 },
+    { "no-interlace",         no_argument, NULL, 0 },
+    { "fps",            required_argument, NULL, 0 },
+    { "seek",           required_argument, NULL, 0 },
+    { "frame-skip",     required_argument, NULL, 0 },
+    { "frames",         required_argument, NULL, 'f' },
+    { "recon",          required_argument, NULL, 'r' },
+    { "recon-depth",    required_argument, NULL, 0 },
+    { "no-wpp",               no_argument, NULL, 0 },
+    { "wpp",                  no_argument, NULL, 0 },
+    { "ctu",            required_argument, NULL, 's' },
+    { "min-cu-size",    required_argument, NULL, 0 },
+    { "max-tu-size",    required_argument, NULL, 0 },
+    { "tu-intra-depth", required_argument, NULL, 0 },
+    { "tu-inter-depth", required_argument, NULL, 0 },
+    { "me",             required_argument, NULL, 0 },
+    { "subme",          required_argument, NULL, 'm' },
+    { "merange",        required_argument, NULL, 0 },
+    { "max-merge",      required_argument, NULL, 0 },
+    { "no-temporal-mvp",      no_argument, NULL, 0 },
+    { "temporal-mvp",         no_argument, NULL, 0 },
+    { "rdpenalty",      required_argument, NULL, 0 },
+    { "no-rect",              no_argument, NULL, 0 },
+    { "rect",                 no_argument, NULL, 0 },
+    { "no-amp",               no_argument, NULL, 0 },
+    { "amp",                  no_argument, NULL, 0 },
+    { "no-early-skip",        no_argument, NULL, 0 },
+    { "early-skip",           no_argument, NULL, 0 },
+    { "no-fast-cbf",          no_argument, NULL, 0 },
+    { "fast-cbf",             no_argument, NULL, 0 },
+    { "no-tskip",             no_argument, NULL, 0 },
+    { "tskip",                no_argument, NULL, 0 },
+    { "no-tskip-fast",        no_argument, NULL, 0 },
+    { "tskip-fast",           no_argument, NULL, 0 },
+    { "cu-lossless",          no_argument, NULL, 0 },
+    { "no-cu-lossless",       no_argument, NULL, 0 },
+    { "no-constrained-intra", no_argument, NULL, 0 },
+    { "constrained-intra",    no_argument, NULL, 0 },
+    { "cip",                  no_argument, NULL, 0 },
+    { "no-cip",               no_argument, NULL, 0 },
+    { "fast-intra",           no_argument, NULL, 0 },
+    { "no-fast-intra",        no_argument, NULL, 0 },
+    { "no-open-gop",          no_argument, NULL, 0 },
+    { "open-gop",             no_argument, NULL, 0 },
+    { "keyint",         required_argument, NULL, 'I' },
+    { "min-keyint",     required_argument, NULL, 'i' },
+    { "scenecut",       required_argument, NULL, 0 },
+    { "no-scenecut",          no_argument, NULL, 0 },
+    { "rc-lookahead",   required_argument, NULL, 0 },
+    { "lookahead-slices", required_argument, NULL, 0 },
+    { "bframes",        required_argument, NULL, 'b' },
+    { "bframe-bias",    required_argument, NULL, 0 },
+    { "b-adapt",        required_argument, NULL, 0 },
+    { "no-b-adapt",           no_argument, NULL, 0 },
+    { "no-b-pyramid",         no_argument, NULL, 0 },
+    { "b-pyramid",            no_argument, NULL, 0 },
+    { "ref",            required_argument, NULL, 0 },
+    { "limit-refs",     required_argument, NULL, 0 },
+    { "no-weightp",           no_argument, NULL, 0 },
+    { "weightp",              no_argument, NULL, 'w' },
+    { "no-weightb",           no_argument, NULL, 0 },
+    { "weightb",              no_argument, NULL, 0 },
+    { "crf",            required_argument, NULL, 0 },
+    { "crf-max",        required_argument, NULL, 0 },
+    { "crf-min",        required_argument, NULL, 0 },
+    { "vbv-maxrate",    required_argument, NULL, 0 },
+    { "vbv-bufsize",    required_argument, NULL, 0 },
+    { "vbv-init",       required_argument, NULL, 0 },
+    { "bitrate",        required_argument, NULL, 0 },
+    { "qp",             required_argument, NULL, 'q' },
+    { "aq-mode",        required_argument, NULL, 0 },
+    { "aq-strength",    required_argument, NULL, 0 },
+    { "ipratio",        required_argument, NULL, 0 },
+    { "pbratio",        required_argument, NULL, 0 },
+    { "qcomp",          required_argument, NULL, 0 },
+    { "qpstep",         required_argument, NULL, 0 },
+    { "ratetol",        required_argument, NULL, 0 },
+    { "cplxblur",       required_argument, NULL, 0 },
+    { "qblur",          required_argument, NULL, 0 },
+    { "cbqpoffs",       required_argument, NULL, 0 },
+    { "crqpoffs",       required_argument, NULL, 0 },
+    { "rd",             required_argument, NULL, 0 },
+    { "rdoq-level",     required_argument, NULL, 0 },
+    { "no-rdoq-level",        no_argument, NULL, 0 },
+    { "psy-rd",         required_argument, NULL, 0 },
+    { "psy-rdoq",       required_argument, NULL, 0 },
+    { "no-psy-rd",            no_argument, NULL, 0 },
+    { "no-psy-rdoq",          no_argument, NULL, 0 },
+    { "scaling-list",   required_argument, NULL, 0 },
+    { "lossless",             no_argument, NULL, 0 },
+    { "no-lossless",          no_argument, NULL, 0 },
+    { "no-signhide",          no_argument, NULL, 0 },
+    { "signhide",             no_argument, NULL, 0 },
+    { "no-lft",               no_argument, NULL, 0 }, /* DEPRECATED */
+    { "lft",                  no_argument, NULL, 0 }, /* DEPRECATED */
+    { "no-deblock",           no_argument, NULL, 0 },
+    { "deblock",        required_argument, NULL, 0 },
+    { "no-sao",               no_argument, NULL, 0 },
+    { "sao",                  no_argument, NULL, 0 },
+    { "no-sao-non-deblock",   no_argument, NULL, 0 },
+    { "sao-non-deblock",      no_argument, NULL, 0 },
+    { "no-ssim",              no_argument, NULL, 0 },
+    { "ssim",                 no_argument, NULL, 0 },
+    { "no-psnr",              no_argument, NULL, 0 },
+    { "psnr",                 no_argument, NULL, 0 },
+    { "hash",           required_argument, NULL, 0 },
+    { "no-strong-intra-smoothing", no_argument, NULL, 0 },
+    { "strong-intra-smoothing",    no_argument, NULL, 0 },
+    { "no-cutree",                 no_argument, NULL, 0 },
+    { "cutree",                    no_argument, NULL, 0 },
+    { "no-hrd",               no_argument, NULL, 0 },
+    { "hrd",                  no_argument, NULL, 0 },
+    { "sar",            required_argument, NULL, 0 },
+    { "overscan",       required_argument, NULL, 0 },
+    { "videoformat",    required_argument, NULL, 0 },
+    { "range",          required_argument, NULL, 0 },
+    { "colorprim",      required_argument, NULL, 0 },
+    { "transfer",       required_argument, NULL, 0 },
+    { "colormatrix",    required_argument, NULL, 0 },
+    { "chromaloc",      required_argument, NULL, 0 },
+    { "display-window", required_argument, NULL, 0 },
+    { "crop-rect",      required_argument, NULL, 0 }, /* DEPRECATED */
+    { "master-display", required_argument, NULL, 0 },
+    { "max-cll",        required_argument, NULL, 0 },
+    { "min-luma",       required_argument, NULL, 0 },
+    { "max-luma",       required_argument, NULL, 0 },
+    { "no-dither",            no_argument, NULL, 0 },
+    { "dither",               no_argument, NULL, 0 },
+    { "no-repeat-headers",    no_argument, NULL, 0 },
+    { "repeat-headers",       no_argument, NULL, 0 },
+    { "aud",                  no_argument, NULL, 0 },
+    { "no-aud",               no_argument, NULL, 0 },
+    { "info",                 no_argument, NULL, 0 },
+    { "no-info",              no_argument, NULL, 0 },
+    { "zones",          required_argument, NULL, 0 },
+    { "qpfile",         required_argument, NULL, 0 },
+    { "lambda-file",    required_argument, NULL, 0 },
+    { "b-intra",              no_argument, NULL, 0 },
+    { "no-b-intra",           no_argument, NULL, 0 },
+    { "nr-intra",       required_argument, NULL, 0 },
+    { "nr-inter",       required_argument, NULL, 0 },
+    { "stats",          required_argument, NULL, 0 },
+    { "pass",           required_argument, NULL, 0 },
+    { "slow-firstpass",       no_argument, NULL, 0 },
+    { "no-slow-firstpass",    no_argument, NULL, 0 },
+    { "analysis-mode",  required_argument, NULL, 0 },
+    { "analysis-file",  required_argument, NULL, 0 },
+    { "strict-cbr",           no_argument, NULL, 0 },
+    { "temporal-layers",      no_argument, NULL, 0 },
+    { "no-temporal-layers",   no_argument, NULL, 0 },
+    { "qg-size",        required_argument, NULL, 0 },
+    { "recon-y4m-exec", required_argument, NULL, 0 },
+    { 0, 0, 0, 0 },
+    { 0, 0, 0, 0 },
+    { 0, 0, 0, 0 },
+    { 0, 0, 0, 0 },
+    { 0, 0, 0, 0 }
+};
+
+static void printVersion(x265_param *param, const x265_api* api)
+{
+    x265_log(param, X265_LOG_INFO, "HEVC encoder version %s\n", api->version_str);
+    x265_log(param, X265_LOG_INFO, "build info %s\n", api->build_info_str);
+}
+
+static void showHelp(x265_param *param)
+{
+    int level = param->logLevel;
+
+#define OPT(value) (value ? "enabled" : "disabled")
+#define H0 printf
+#define H1 if (level >= X265_LOG_DEBUG) printf
+
+    H0("\nSyntax: x265 [options] infile [-o] outfile\n");
+    H0("    infile can be YUV or Y4M\n");
+    H0("    outfile is raw HEVC bitstream\n");
+    H0("\nExecutable Options:\n");
+    H0("-h/--help                        Show this help text and exit\n");
+    H0("-V/--version                     Show version info and exit\n");
+    H0("\nOutput Options:\n");
+    H0("-o/--output <filename>           Bitstream output file name\n");
+    H0("-D/--output-depth 8|10|12        Output bit depth (also internal bit depth). Default %d\n", param->internalBitDepth);
+    H0("   --log-level <string>          Logging level: none error warning info debug full. Default %s\n", X265_NS::logLevelNames[param->logLevel + 1]);
+    H0("   --no-progress                 Disable CLI progress reports\n");
+    H0("   --csv <filename>              Comma separated log file, if csv-log-level > 0 frame level statistics, else one line per run\n");
+    H0("   --csv-log-level               Level of csv logging, if csv-log-level > 0 frame level statistics, else one line per run: 0-2\n");
+    H0("\nInput Options:\n");
+    H0("   --input <filename>            Raw YUV or Y4M input file name. `-` for stdin\n");
+    H1("   --y4m                         Force parsing of input stream as YUV4MPEG2 regardless of file extension\n");
+    H0("   --fps <float|rational>        Source frame rate (float or num/denom), auto-detected if Y4M\n");
+    H0("   --input-res WxH               Source picture size [w x h], auto-detected if Y4M\n");
+    H1("   --input-depth <integer>       Bit-depth of input file. Default 8\n");
+    H1("   --input-csp <string>          Source color space: i420, i444 or i422, auto-detected if Y4M. Default: i420\n");
+    H0("-f/--frames <integer>            Maximum number of frames to encode. Default all\n");
+    H0("   --seek <integer>              First frame to encode\n");
+    H1("   --[no-]interlace <bff|tff>    Indicate input pictures are interlace fields in temporal order. Default progressive\n");
+    H1("   --dither                      Enable dither if downscaling to 8 bit pixels. Default disabled\n");
+    H0("\nQuality reporting metrics:\n");
+    H0("   --[no-]ssim                   Enable reporting SSIM metric scores. Default %s\n", OPT(param->bEnableSsim));
+    H0("   --[no-]psnr                   Enable reporting PSNR metric scores. Default %s\n", OPT(param->bEnablePsnr));
+    H0("\nProfile, Level, Tier:\n");
+    H0("-P/--profile <string>            Enforce an encode profile: main, main10, mainstillpicture\n");
+    H0("   --level-idc <integer|float>   Force a minimum required decoder level (as '5.0' or '50')\n");
+    H0("   --[no-]high-tier              If a decoder level is specified, this modifier selects High tier of that level\n");
+    H0("   --[no-]allow-non-conformance  Allow the encoder to generate profile NONE bitstreams. Default %s\n", OPT(param->bAllowNonConformance));
+    H0("\nThreading, performance:\n");
+    H0("   --pools <integer,...>         Comma separated thread count per thread pool (pool per NUMA node)\n");
+    H0("                                 '-' implies no threads on node, '+' implies one thread per core on node\n");
+    H0("-F/--frame-threads <integer>     Number of concurrently encoded frames. 0: auto-determined by core count\n");
+    H0("   --[no-]wpp                    Enable Wavefront Parallel Processing. Default %s\n", OPT(param->bEnableWavefront));
+    H0("   --[no-]pmode                  Parallel mode analysis. Default %s\n", OPT(param->bDistributeModeAnalysis));
+    H0("   --[no-]pme                    Parallel motion estimation. Default %s\n", OPT(param->bDistributeMotionEstimation));
+    H0("   --[no-]asm <bool|int|string>  Override CPU detection. Default: auto\n");
+    H0("\nPresets:\n");
+    H0("-p/--preset <string>             Trade off performance for compression efficiency. Default medium\n");
+    H0("                                 ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, veryslow, or placebo\n");
+    H0("-t/--tune <string>               Tune the settings for a particular type of source or situation:\n");
+    H0("                                 psnr, ssim, grain, zerolatency, fastdecode\n");
+    H0("\nQuad-Tree size and depth:\n");
+    H0("-s/--ctu <64|32|16>              Maximum CU size (WxH). Default %d\n", param->maxCUSize);
+    H0("   --min-cu-size <64|32|16|8>    Minimum CU size (WxH). Default %d\n", param->minCUSize);
+    H0("   --max-tu-size <32|16|8|4>     Maximum TU size (WxH). Default %d\n", param->maxTUSize);
+    H0("   --tu-intra-depth <integer>    Max TU recursive depth for intra CUs. Default %d\n", param->tuQTMaxIntraDepth);
+    H0("   --tu-inter-depth <integer>    Max TU recursive depth for inter CUs. Default %d\n", param->tuQTMaxInterDepth);
+    H0("\nAnalysis:\n");
+    H0("   --rd <0..6>                   Level of RDO in mode decision 0:least....6:full RDO. Default %d\n", param->rdLevel);
+    H0("   --[no-]psy-rd <0..2.0>        Strength of psycho-visual rate distortion optimization, 0 to disable. Default %.1f\n", param->psyRd);
+    H0("   --[no-]rdoq-level <0|1|2>     Level of RDO in quantization 0:none, 1:levels, 2:levels & coding groups. Default %d\n", param->rdoqLevel);
+    H0("   --[no-]psy-rdoq <0..50.0>     Strength of psycho-visual optimization in RDO quantization, 0 to disable. Default %.1f\n", param->psyRdoq);
+    H0("   --[no-]early-skip             Enable early SKIP detection. Default %s\n", OPT(param->bEnableEarlySkip));
+    H1("   --[no-]tskip-fast             Enable fast intra transform skipping. Default %s\n", OPT(param->bEnableTSkipFast));
+    H1("   --nr-intra <integer>          An integer value in range of 0 to 2000, which denotes strength of noise reduction in intra CUs. Default 0\n");
+    H1("   --nr-inter <integer>          An integer value in range of 0 to 2000, which denotes strength of noise reduction in inter CUs. Default 0\n");
+    H0("\nCoding tools:\n");
+    H0("-w/--[no-]weightp                Enable weighted prediction in P slices. Default %s\n", OPT(param->bEnableWeightedPred));
+    H0("   --[no-]weightb                Enable weighted prediction in B slices. Default %s\n", OPT(param->bEnableWeightedBiPred));
+    H0("   --[no-]cu-lossless            Consider lossless mode in CU RDO decisions. Default %s\n", OPT(param->bCULossless));
+    H0("   --[no-]signhide               Hide sign bit of one coeff per TU (rdo). Default %s\n", OPT(param->bEnableSignHiding));
+    H1("   --[no-]tskip                  Enable intra 4x4 transform skipping. Default %s\n", OPT(param->bEnableTransformSkip));
+    H0("\nTemporal / motion search options:\n");
+    H0("   --max-merge <1..5>            Maximum number of merge candidates. Default %d\n", param->maxNumMergeCand);
+    H0("   --ref <integer>               max number of L0 references to be allowed (1 .. 16) Default %d\n", param->maxNumReferences);
+    H0("   --limit-refs <0|1|2|3>        limit references per depth (1) or CU (2) or both (3). Default %d\n", param->limitReferences);
+    H0("   --me <string>                 Motion search method dia hex umh star full. Default %d\n", param->searchMethod);
+    H0("-m/--subme <integer>             Amount of subpel refinement to perform (0:least .. 7:most). Default %d \n", param->subpelRefine);
+    H0("   --merange <integer>           Motion search range. Default %d\n", param->searchRange);
+    H0("   --[no-]rect                   Enable rectangular motion partitions Nx2N and 2NxN. Default %s\n", OPT(param->bEnableRectInter));
+    H0("   --[no-]amp                    Enable asymmetric motion partitions, requires --rect. Default %s\n", OPT(param->bEnableAMP));
+    H1("   --[no-]temporal-mvp           Enable temporal MV predictors. Default %s\n", OPT(param->bEnableTemporalMvp));
+    H0("\nSpatial / intra options:\n");
+    H0("   --[no-]strong-intra-smoothing Enable strong intra smoothing for 32x32 blocks. Default %s\n", OPT(param->bEnableStrongIntraSmoothing));
+    H0("   --[no-]constrained-intra      Constrained intra prediction (use only intra coded reference pixels) Default %s\n", OPT(param->bEnableConstrainedIntra));
+    H0("   --[no-]b-intra                Enable intra in B frames in veryslow presets. Default %s\n", OPT(param->bIntraInBFrames));
+    H0("   --[no-]fast-intra             Enable faster search method for angular intra predictions. Default %s\n", OPT(param->bEnableFastIntra));
+    H0("   --rdpenalty <0..2>            penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum. Default %d\n", param->rdPenalty);
+    H0("\nSlice decision options:\n");
+    H0("   --[no-]open-gop               Enable open-GOP, allows I slices to be non-IDR. Default %s\n", OPT(param->bOpenGOP));
+    H0("-I/--keyint <integer>            Max IDR period in frames. -1 for infinite-gop. Default %d\n", param->keyframeMax);
+    H0("-i/--min-keyint <integer>        Scenecuts closer together than this are coded as I, not IDR. Default: auto\n");
+    H0("   --no-scenecut                 Disable adaptive I-frame decision\n");
+    H0("   --scenecut <integer>          How aggressively to insert extra I-frames. Default %d\n", param->scenecutThreshold);
+    H0("   --rc-lookahead <integer>      Number of frames for frame-type lookahead (determines encoder latency) Default %d\n", param->lookaheadDepth);
+    H1("   --lookahead-slices <0..16>    Number of slices to use per lookahead cost estimate. Default %d\n", param->lookaheadSlices);
+    H0("   --bframes <integer>           Maximum number of consecutive b-frames (now it only enables B GOP structure) Default %d\n", param->bframes);
+    H1("   --bframe-bias <integer>       Bias towards B frame decisions. Default %d\n", param->bFrameBias);
+    H0("   --b-adapt <0..2>              0 - none, 1 - fast, 2 - full (trellis) adaptive B frame scheduling. Default %d\n", param->bFrameAdaptive);
+    H0("   --[no-]b-pyramid              Use B-frames as references. Default %s\n", OPT(param->bBPyramid));
+    H1("   --qpfile <string>             Force frametypes and QPs for some or all frames\n");
+    H1("                                 Format of each line: framenumber frametype QP\n");
+    H1("                                 QP is optional (none lets x265 choose). Frametypes: I,i,P,B,b.\n");
+    H1("                                 QPs are restricted by qpmin/qpmax.\n");
+    H0("\nRate control, Adaptive Quantization:\n");
+    H0("   --bitrate <integer>           Target bitrate (kbps) for ABR (implied). Default %d\n", param->rc.bitrate);
+    H1("-q/--qp <integer>                QP for P slices in CQP mode (implied). --ipratio and --pbration determine other slice QPs\n");
+    H0("   --crf <float>                 Quality-based VBR (0-51). Default %.1f\n", param->rc.rfConstant);
+    H1("   --[no-]lossless               Enable lossless: bypass transform, quant and loop filters globally. Default %s\n", OPT(param->bLossless));
+    H1("   --crf-max <float>             With CRF+VBV, limit RF to this value. Default %f\n", param->rc.rfConstantMax);
+    H1("                                 May cause VBV underflows!\n");
+    H1("   --crf-min <float>             With CRF+VBV, limit RF to this value. Default %f\n", param->rc.rfConstantMin);
+    H1("                                 this specifies a minimum rate factor value for encode!\n");
+    H0("   --vbv-maxrate <integer>       Max local bitrate (kbit/s). Default %d\n", param->rc.vbvMaxBitrate);
+    H0("   --vbv-bufsize <integer>       Set size of the VBV buffer (kbit). Default %d\n", param->rc.vbvBufferSize);
+    H0("   --vbv-init <float>            Initial VBV buffer occupancy (fraction of bufsize or in kbits). Default %.2f\n", param->rc.vbvBufferInit);
+    H0("   --pass                        Multi pass rate control.\n"
+       "                                   - 1 : First pass, creates stats file\n"
+       "                                   - 2 : Last pass, does not overwrite stats file\n"
+       "                                   - 3 : Nth pass, overwrites stats file\n");
+    H0("   --stats                       Filename for stats file in multipass pass rate control. Default x265_2pass.log\n");
+    H0("   --[no-]slow-firstpass         Enable a slow first pass in a multipass rate control mode. Default %s\n", OPT(param->rc.bEnableSlowFirstPass));
+    H0("   --[no-]strict-cbr             Enable stricter conditions and tolerance for bitrate deviations in CBR mode. Default %s\n", OPT(param->rc.bStrictCbr));
+    H0("   --analysis-mode <string|int>  save - Dump analysis info into file, load - Load analysis buffers from the file. Default %d\n", param->analysisMode);
+    H0("   --analysis-file <filename>    Specify file name used for either dumping or reading analysis data.\n");
+    H0("   --aq-mode <integer>           Mode for Adaptive Quantization - 0:none 1:uniform AQ 2:auto variance 3:auto variance with bias to dark scenes. Default %d\n", param->rc.aqMode);
+    H0("   --aq-strength <float>         Reduces blocking and blurring in flat and textured areas (0 to 3.0). Default %.2f\n", param->rc.aqStrength);
+    H0("   --qg-size <int>               Specifies the size of the quantization group (64, 32, 16). Default %d\n", param->rc.qgSize);
+    H0("   --[no-]cutree                 Enable cutree for Adaptive Quantization. Default %s\n", OPT(param->rc.cuTree));
+    H1("   --ipratio <float>             QP factor between I and P. Default %.2f\n", param->rc.ipFactor);
+    H1("   --pbratio <float>             QP factor between P and B. Default %.2f\n", param->rc.pbFactor);
+    H1("   --qcomp <float>               Weight given to predicted complexity. Default %.2f\n", param->rc.qCompress);
+    H1("   --qpstep <integer>            The maximum single adjustment in QP allowed to rate control. Default %d\n", param->rc.qpStep);
+    H1("   --cbqpoffs <integer>          Chroma Cb QP Offset [-12..12]. Default %d\n", param->cbQpOffset);
+    H1("   --crqpoffs <integer>          Chroma Cr QP Offset [-12..12]. Default %d\n", param->crQpOffset);
+    H1("   --scaling-list <string>       Specify a file containing HM style quant scaling lists or 'default' or 'off'. Default: off\n");
+    H1("   --zones <zone0>/<zone1>/...   Tweak the bitrate of regions of the video\n");
+    H1("                                 Each zone is of the form\n");
+    H1("                                   <start frame>,<end frame>,<option>\n");
+    H1("                                   where <option> is either\n");
+    H1("                                       q=<integer> (force QP)\n");
+    H1("                                   or  b=<float> (bitrate multiplier)\n");
+    H1("   --lambda-file <string>        Specify a file containing replacement values for the lambda tables\n");
+    H1("                                 MAX_MAX_QP+1 floats for lambda table, then again for lambda2 table\n");
+    H1("                                 Blank lines and lines starting with hash(#) are ignored\n");
+    H1("                                 Comma is considered to be white-space\n");
+    H0("\nLoop filters (deblock and SAO):\n");
+    H0("   --[no-]deblock                Enable Deblocking Loop Filter, optionally specify tC:Beta offsets Default %s\n", OPT(param->bEnableLoopFilter));
+    H0("   --[no-]sao                    Enable Sample Adaptive Offset. Default %s\n", OPT(param->bEnableSAO));
+    H1("   --[no-]sao-non-deblock        Use non-deblocked pixels, else right/bottom boundary areas skipped. Default %s\n", OPT(param->bSaoNonDeblocked));
+    H0("\nVUI options:\n");
+    H0("   --sar <width:height|int>      Sample Aspect Ratio, the ratio of width to height of an individual pixel.\n");
+    H0("                                 Choose from 0=undef, 1=1:1(\"square\"), 2=12:11, 3=10:11, 4=16:11,\n");
+    H0("                                 5=40:33, 6=24:11, 7=20:11, 8=32:11, 9=80:33, 10=18:11, 11=15:11,\n");
+    H0("                                 12=64:33, 13=160:99, 14=4:3, 15=3:2, 16=2:1 or custom ratio of <int:int>. Default %d\n", param->vui.aspectRatioIdc);
+    H1("   --display-window <string>     Describe overscan cropping region as 'left,top,right,bottom' in pixels\n");
+    H1("   --overscan <string>           Specify whether it is appropriate for decoder to show cropped region: undef, show or crop. Default undef\n");
+    H0("   --videoformat <string>        Specify video format from undef, component, pal, ntsc, secam, mac. Default undef\n");
+    H0("   --range <string>              Specify black level and range of luma and chroma signals as full or limited Default limited\n");
+    H0("   --colorprim <string>          Specify color primaries from undef, bt709, bt470m, bt470bg, smpte170m,\n");
+    H0("                                 smpte240m, film, bt2020. Default undef\n");
+    H0("   --transfer <string>           Specify transfer characteristics from undef, bt709, bt470m, bt470bg, smpte170m,\n");
+    H0("                                 smpte240m, linear, log100, log316, iec61966-2-4, bt1361e, iec61966-2-1,\n");
+    H0("                                 bt2020-10, bt2020-12, smpte-st-2084, smpte-st-428, arib-std-b67. Default undef\n");
+    H1("   --colormatrix <string>        Specify color matrix setting from undef, bt709, fcc, bt470bg, smpte170m,\n");
+    H1("                                 smpte240m, GBR, YCgCo, bt2020nc, bt2020c. Default undef\n");
+    H1("   --chromaloc <integer>         Specify chroma sample location (0 to 5). Default of %d\n", param->vui.chromaSampleLocTypeTopField);
+    H0("   --master-display <string>     SMPTE ST 2086 master display color volume info SEI (HDR)\n");
+    H0("                                    format: G(x,y)B(x,y)R(x,y)WP(x,y)L(max,min)\n");
+    H0("   --max-cll <string>            Emit content light level info SEI as \"cll,fall\" (HDR)\n");
+    H0("   --min-luma <integer>          Minimum luma plane value of input source picture\n");
+    H0("   --max-luma <integer>          Maximum luma plane value of input source picture\n");
+    H0("\nBitstream options:\n");
+    H0("   --[no-]repeat-headers         Emit SPS and PPS headers at each keyframe. Default %s\n", OPT(param->bRepeatHeaders));
+    H0("   --[no-]info                   Emit SEI identifying encoder and parameters. Default %s\n", OPT(param->bEmitInfoSEI));
+    H0("   --[no-]hrd                    Enable HRD parameters signaling. Default %s\n", OPT(param->bEmitHRDSEI));
+    H0("   --[no-]temporal-layers        Enable a temporal sublayer for unreferenced B frames. Default %s\n", OPT(param->bEnableTemporalSubLayers));
+    H0("   --[no-]aud                    Emit access unit delimiters at the start of each access unit. Default %s\n", OPT(param->bEnableAccessUnitDelimiters));
+    H1("   --hash <integer>              Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum. Default %d\n", param->decodedPictureHashSEI);
+    H1("\nReconstructed video options (debugging):\n");
+    H1("-r/--recon <filename>            Reconstructed raw image YUV or Y4M output file name\n");
+    H1("   --recon-depth <integer>       Bit-depth of reconstructed raw image file. Defaults to input bit depth, or 8 if Y4M\n");
+    H1("   --recon-y4m-exec <string>     pipe reconstructed frames to Y4M viewer, ex:\"ffplay -i pipe:0 -autoexit\"\n");
+    H1("\nExecutable return codes:\n");
+    H1("    0 - encode successful\n");
+    H1("    1 - unable to parse command line\n");
+    H1("    2 - unable to open encoder\n");
+    H1("    3 - unable to generate stream headers\n");
+    H1("    4 - encoder abort\n");
+#undef OPT
+#undef H0
+#undef H1
+
+    if (level < X265_LOG_DEBUG)
+        printf("\nUse --log-level full --help for a full listing\n");
+    printf("\n\nComplete documentation may be found at http://x265.readthedocs.org/en/default/cli.html\n");
+    exit(1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/x265_glue.c	Wed Nov 16 11:16:33 2016 +0200
@@ -0,0 +1,210 @@
+/*
+ * x265 encoder front-end  
+ *
+ * Copyright (c) 2014 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <unistd.h>
+
+#include "bpgenc.h"
+
+#include "x265.h"
+
+struct HEVCEncoderContext {
+    const x265_api *api;
+    x265_encoder *enc;
+    x265_picture *pic;
+    uint8_t *buf;
+    int buf_len, buf_size;
+};
+
+static HEVCEncoderContext *x265_open(const HEVCEncodeParams *params)
+{
+    HEVCEncoderContext *s;
+    x265_param *p;
+    int preset_index;
+    const char *preset;
+    
+    s = malloc(sizeof(HEVCEncoderContext));
+    memset(s, 0, sizeof(*s));
+
+    s->api = x265_api_get(params->bit_depth);
+    if (!s->api) {
+        fprintf(stderr, "x265 supports bit depths of 8, 10 or 12.\n");
+        return NULL;
+    }
+#if 0
+    /* Note: the x265 library included in libbpg supported gray encoding */
+    if (params->chroma_format == BPG_FORMAT_GRAY) {
+        fprintf(stderr, "x265 does not support monochrome (or alpha) data yet. Plase use the jctvc encoder.\n");
+        return NULL;
+    }
+#endif
+    
+    p = s->api->param_alloc();
+
+    preset_index = params->compress_level; /* 9 is placebo */
+
+    preset = x265_preset_names[preset_index];
+    if (params->verbose)
+        printf("Using x265 preset: %s\n", preset);
+    
+    s->api->param_default_preset(p, preset, "ssim");
+
+    p->bRepeatHeaders = 1;
+    p->decodedPictureHashSEI = params->sei_decoded_picture_hash;
+    p->sourceWidth = params->width;
+    p->sourceHeight = params->height;
+    switch(params->chroma_format) {
+    case BPG_FORMAT_GRAY:
+        p->internalCsp = X265_CSP_I400;
+        break;
+    case BPG_FORMAT_420:
+        p->internalCsp = X265_CSP_I420;
+        break;
+    case BPG_FORMAT_422:
+        p->internalCsp = X265_CSP_I422;
+        break;
+    case BPG_FORMAT_444:
+        p->internalCsp = X265_CSP_I444;
+        break;
+    default:
+        abort();
+    }
+    if (params->intra_only) {
+        p->keyframeMax = 1; /* only I frames */
+        p->totalFrames = 1;
+    } else {
+        p->keyframeMax = 250;
+        p->totalFrames = 0;
+        p->maxNumReferences = 1;
+        p->bframes = 0;
+    }
+    p->bEnableRectInter = 1;
+    p->bEnableAMP = 1; /* cannot use 0 due to header restriction */
+    p->internalBitDepth = params->bit_depth;
+    p->bEmitInfoSEI = 0;
+    if (params->verbose)
+        p->logLevel = X265_LOG_INFO;
+    else
+        p->logLevel = X265_LOG_NONE;
+        
+    /* dummy frame rate */
+    p->fpsNum = 25;
+    p->fpsDenom = 1;
+
+    p->rc.rateControlMode = X265_RC_CQP;
+    p->rc.qp = params->qp;
+    p->bLossless = params->lossless;
+
+    s->enc = s->api->encoder_open(p);
+
+    s->pic = s->api->picture_alloc();
+    s->api->picture_init(p, s->pic);
+
+    s->pic->colorSpace = p->internalCsp;
+
+    s->api->param_free(p);
+
+    return s;
+}
+
+static void add_nal(HEVCEncoderContext *s, const uint8_t *data, int data_len)
+{
+    int new_size, size;
+
+    size = s->buf_len + data_len;
+    if (size > s->buf_size) {
+        new_size = (s->buf_size * 3) / 2;
+        if (new_size < size)
+            new_size = size;
+        s->buf = realloc(s->buf, new_size);
+        s->buf_size = new_size;
+    }
+    memcpy(s->buf + s->buf_len, data, data_len);
+    s->buf_len += data_len;
+}
+
+static int x265_encode(HEVCEncoderContext *s, Image *img)
+{
+    int c_count, i, ret;
+    x265_picture *pic;
+    uint32_t nal_count;
+    x265_nal *p_nal;
+    
+    pic = s->pic;
+
+    if (img->format == BPG_FORMAT_GRAY)
+        c_count = 1;
+    else
+        c_count = 3;
+    for(i = 0; i < c_count; i++) {
+        pic->planes[i] = img->data[i];
+        pic->stride[i] = img->linesize[i];
+    }
+    pic->bitDepth = img->bit_depth;
+
+    ret = s->api->encoder_encode(s->enc, &p_nal, &nal_count, pic, NULL);
+    if (ret > 0) {
+        for(i = 0; i < nal_count; i++) {
+            add_nal(s, p_nal[i].payload, p_nal[i].sizeBytes);
+        }
+    }
+    return 0;
+}
+
+static int x265_close(HEVCEncoderContext *s, uint8_t **pbuf)
+{
+    int buf_len, ret, i;
+    uint32_t nal_count;
+    x265_nal *p_nal;
+    
+    /* get last compressed pictures */
+    for(;;) {
+        ret = s->api->encoder_encode(s->enc, &p_nal, &nal_count, NULL, NULL);
+        if (ret <= 0)
+            break;
+        for(i = 0; i < nal_count; i++) {
+            add_nal(s, p_nal[i].payload, p_nal[i].sizeBytes);
+        }
+    }
+
+    if (s->buf_len < s->buf_size) {
+        s->buf = realloc(s->buf, s->buf_len);
+    }
+
+    *pbuf = s->buf;
+    buf_len = s->buf_len;
+
+    s->api->encoder_close(s->enc);
+    s->api->picture_free(s->pic);
+    free(s);
+    return buf_len;
+}
+
+HEVCEncoder x265_hevc_encoder = {
+  .open = x265_open,
+  .encode = x265_encode,
+  .close = x265_close,
+};